sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
 {
-       sector_t s = i_size_read(c->bdev->bd_inode) >> SECTOR_SHIFT;
+       sector_t s = bdev_nr_sectors(c->bdev);
        if (s >= c->start)
                s -= c->start;
        else
 
        int r;
        struct dm_block *sblock;
        struct cache_disk_superblock *disk_super;
-       sector_t bdev_size = i_size_read(cmd->bdev->bd_inode) >> SECTOR_SHIFT;
+       sector_t bdev_size = bdev_nr_sectors(cmd->bdev);
 
        /* FIXME: see if we can lose the max sectors limit */
        if (bdev_size > DM_CACHE_METADATA_MAX_SECTORS)
 
 
 static sector_t get_dev_size(struct dm_dev *dev)
 {
-       return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
+       return bdev_nr_sectors(dev->bdev);
 }
 
 /*----------------------------------------------------------------*/
 
 
 static sector_t get_dev_size(struct dm_dev *dev)
 {
-       return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
+       return bdev_nr_sectors(dev->bdev);
 }
 
 /*---------------------------------------------------------------------------*/
 
                        char *result, unsigned int maxlen)
 {
        struct dust_device *dd = ti->private;
-       sector_t size = i_size_read(dd->dev->bdev->bd_inode) >> SECTOR_SHIFT;
+       sector_t size = bdev_nr_sectors(dd->dev->bdev);
        bool invalid_msg = false;
        int r = -EINVAL;
        unsigned long long tmp, block;
        /*
         * Only pass ioctls through if the device sizes match exactly.
         */
-       if (dd->start ||
-           ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
+       if (dd->start || ti->len != bdev_nr_sectors(dev->bdev))
                return 1;
 
        return 0;
 
         * Only pass ioctls through if the device sizes match exactly.
         */
        *bdev = dev->bdev;
-       return !!(ec->start || ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT);
+       return !!(ec->start || ti->len != bdev_nr_sectors(dev->bdev));
 }
 
 static void ebs_io_hints(struct dm_target *ti, struct queue_limits *limits)
 
 
 static sector_t get_dev_size(struct dm_dev *dev)
 {
-       return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
+       return bdev_nr_sectors(dev->bdev);
 }
 
 static int era_iterate_devices(struct dm_target *ti,
 
  */
 static inline sector_t get_dev_size(struct block_device *bdev)
 {
-       return i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
+       return bdev_nr_sectors(bdev);
 }
 
 static inline chunk_t sector_to_chunk(struct dm_exception_store *store,
 
        /*
         * Only pass ioctls through if the device sizes match exactly.
         */
-       if (fc->start ||
-           ti->len != i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT)
+       if (fc->start || ti->len != bdev_nr_sectors((*bdev)))
                return 1;
        return 0;
 }
 
                }
        }
 
-       ic->data_device_sectors = i_size_read(ic->dev->bdev->bd_inode) >> SECTOR_SHIFT;
+       ic->data_device_sectors = bdev_nr_sectors(ic->dev->bdev);
        if (!ic->meta_dev)
                ic->meta_device_sectors = ic->data_device_sectors;
        else
-               ic->meta_device_sectors = i_size_read(ic->meta_dev->bdev->bd_inode) >> SECTOR_SHIFT;
+               ic->meta_device_sectors = bdev_nr_sectors(ic->meta_dev->bdev);
 
        if (!journal_sectors) {
                journal_sectors = min((sector_t)DEFAULT_MAX_JOURNAL_SECTORS,
        DEBUG_print("   journal_sections %u\n", (unsigned)le32_to_cpu(ic->sb->journal_sections));
        DEBUG_print("   journal_entries %u\n", ic->journal_entries);
        DEBUG_print("   log2_interleave_sectors %d\n", ic->sb->log2_interleave_sectors);
-       DEBUG_print("   data_device_sectors 0x%llx\n", i_size_read(ic->dev->bdev->bd_inode) >> SECTOR_SHIFT);
+       DEBUG_print("   data_device_sectors 0x%llx\n", bdev_nr_sectors(ic->dev->bdev));
        DEBUG_print("   initial_sectors 0x%x\n", ic->initial_sectors);
        DEBUG_print("   metadata_run 0x%x\n", ic->metadata_run);
        DEBUG_print("   log2_metadata_run %d\n", ic->log2_metadata_run);
 
        /*
         * Only pass ioctls through if the device sizes match exactly.
         */
-       if (lc->start ||
-           ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
+       if (lc->start || ti->len != bdev_nr_sectors(dev->bdev))
                return 1;
        return 0;
 }
 
 
 static inline sector_t logdev_last_sector(struct log_writes_c *lc)
 {
-       return i_size_read(lc->logdev->bdev->bd_inode) >> SECTOR_SHIFT;
+       return bdev_nr_sectors(lc->logdev->bdev);
 }
 
 static int log_writes_kthread(void *arg)
        /*
         * Only pass ioctls through if the device sizes match exactly.
         */
-       if (ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
+       if (ti->len != bdev_nr_sectors(dev->bdev))
                return 1;
        return 0;
 }
 
                                bdev_logical_block_size(lc->header_location.
                                                            bdev));
 
-               if (buf_size > i_size_read(dev->bdev->bd_inode)) {
+               if (buf_size > bdev_nr_bytes(dev->bdev)) {
                        DMWARN("log device %s too small: need %llu bytes",
                                dev->name, (unsigned long long)buf_size);
                        kfree(lc);
 
        /*
         * Only pass ioctls through if the device sizes match exactly.
         */
-       if (!r && ti->len != i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT)
+       if (!r && ti->len != bdev_nr_sectors((*bdev)))
                return 1;
        return r;
 }
 
                        md_rdev_init(jdev);
                        jdev->mddev = &rs->md;
                        jdev->bdev = rs->journal_dev.dev->bdev;
-                       jdev->sectors = to_sector(i_size_read(jdev->bdev->bd_inode));
+                       jdev->sectors = bdev_nr_sectors(jdev->bdev);
                        if (jdev->sectors < MIN_RAID456_JOURNAL_SPACE) {
                                rs->ti->error = "No space for raid4/5/6 journal";
                                return -ENOSPC;
 
        rdev_for_each(rdev, &rs->md)
                if (!test_bit(Journal, &rdev->flags) && rdev->bdev) {
-                       ds = min(ds, to_sector(i_size_read(rdev->bdev->bd_inode)));
+                       ds = min(ds, bdev_nr_sectors(rdev->bdev));
                        if (ds < rs->md.dev_sectors) {
                                rs->ti->error = "Component device(s) too small";
                                return -EINVAL;
         * Make sure we got a minimum amount of free sectors per device
         */
        if (rs->data_offset &&
-           to_sector(i_size_read(rdev->bdev->bd_inode)) - rs->md.dev_sectors < MIN_FREE_RESHAPE_SPACE) {
+           bdev_nr_sectors(rdev->bdev) - rs->md.dev_sectors < MIN_FREE_RESHAPE_SPACE) {
                rs->ti->error = data_offset ? "No space for forward reshape" :
                                              "No space for backward reshape";
                return -ENOSPC;
 
         * Only pass ioctls through if the device sizes match exactly.
         */
        if (ti->len + sctx->path_list[path_nr].start !=
-           i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT)
+           bdev_nr_sectors((*bdev)))
                return 1;
        return 0;
 }
 
 {
        struct queue_limits *limits = data;
        struct block_device *bdev = dev->bdev;
-       sector_t dev_size =
-               i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
+       sector_t dev_size = bdev_nr_sectors(bdev);
        unsigned short logical_block_size_sectors =
                limits->logical_block_size >> SECTOR_SHIFT;
        char b[BDEVNAME_SIZE];
 
        int r;
        struct dm_block *sblock;
        struct thin_disk_superblock *disk_super;
-       sector_t bdev_size = i_size_read(pmd->bdev->bd_inode) >> SECTOR_SHIFT;
+       sector_t bdev_size = bdev_nr_sectors(pmd->bdev);
 
        if (bdev_size > THIN_METADATA_MAX_SECTORS)
                bdev_size = THIN_METADATA_MAX_SECTORS;
 
 
 static sector_t get_dev_size(struct block_device *bdev)
 {
-       return i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
+       return bdev_nr_sectors(bdev);
 }
 
 static void warn_if_metadata_device_too_big(struct block_device *bdev)
 
 
        *bdev = v->data_dev->bdev;
 
-       if (v->data_start ||
-           ti->len != i_size_read(v->data_dev->bdev->bd_inode) >> SECTOR_SHIFT)
+       if (v->data_start || ti->len != bdev_nr_sectors(v->data_dev->bdev))
                return 1;
        return 0;
 }
 
                ti->error = "Cache data device lookup failed";
                goto bad;
        }
-       wc->memory_map_size = i_size_read(wc->ssd_dev->bdev->bd_inode);
+       wc->memory_map_size = bdev_nr_bytes(wc->ssd_dev->bdev);
 
        /*
         * Parse the cache block size
 
        dev->dev_idx = idx;
        (void)bdevname(dev->bdev, dev->name);
 
-       dev->capacity = i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
+       dev->capacity = bdev_nr_sectors(bdev);
        if (ti->begin) {
                ti->error = "Partial mapping is not supported";
                goto err;