void guard_bio_eod(struct bio *bio)
 {
        sector_t maxsector;
-       struct hd_struct *part;
+       struct block_device *part;
 
        rcu_read_lock();
        part = __disk_get_part(bio->bi_disk, bio->bi_partno);
        if (part)
-               maxsector = bdev_nr_sectors(part->bdev);
+               maxsector = bdev_nr_sectors(part);
        else    
                maxsector = get_capacity(bio->bi_disk);
        rcu_read_unlock();
 
 }
 __setup("fail_make_request=", setup_fail_make_request);
 
-static bool should_fail_request(struct hd_struct *part, unsigned int bytes)
+static bool should_fail_request(struct block_device *part, unsigned int bytes)
 {
-       return part->bdev->bd_make_it_fail &&
-               should_fail(&fail_make_request, bytes);
+       return part->bd_make_it_fail && should_fail(&fail_make_request, bytes);
 }
 
 static int __init fail_make_request_debugfs(void)
 
 #else /* CONFIG_FAIL_MAKE_REQUEST */
 
-static inline bool should_fail_request(struct hd_struct *part,
+static inline bool should_fail_request(struct block_device *part,
                                        unsigned int bytes)
 {
        return false;
 
 #endif /* CONFIG_FAIL_MAKE_REQUEST */
 
-static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part)
+static inline bool bio_check_ro(struct bio *bio, struct block_device *part)
 {
        const int op = bio_op(bio);
 
-       if (part->bdev->bd_read_only && op_is_write(op)) {
+       if (part->bd_read_only && op_is_write(op)) {
                char b[BDEVNAME_SIZE];
 
                if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
 
                WARN_ONCE(1,
                       "Trying to write to read-only block-device %s (partno %d)\n",
-                       bio_devname(bio, b), part->partno);
+                       bio_devname(bio, b), part->bd_partno);
                /* Older lvm-tools actually trigger this */
                return false;
        }
 
 static noinline int should_fail_bio(struct bio *bio)
 {
-       if (should_fail_request(bio->bi_disk->part0->bd_part,
-                       bio->bi_iter.bi_size))
+       if (should_fail_request(bio->bi_disk->part0, bio->bi_iter.bi_size))
                return -EIO;
        return 0;
 }
  */
 static inline int blk_partition_remap(struct bio *bio)
 {
-       struct hd_struct *p;
+       struct block_device *p;
        int ret = -EIO;
 
        rcu_read_lock();
                goto out;
 
        if (bio_sectors(bio)) {
-               if (bio_check_eod(bio, bdev_nr_sectors(p->bdev)))
+               if (bio_check_eod(bio, bdev_nr_sectors(p)))
                        goto out;
-               bio->bi_iter.bi_sector += p->bdev->bd_start_sect;
-               trace_block_bio_remap(bio->bi_disk->queue, bio, part_devt(p),
+               bio->bi_iter.bi_sector += p->bd_start_sect;
+               trace_block_bio_remap(bio->bi_disk->queue, bio, p->bd_dev,
                                      bio->bi_iter.bi_sector -
-                                     p->bdev->bd_start_sect);
+                                     p->bd_start_sect);
        }
        bio->bi_partno = 0;
        ret = 0;
                if (unlikely(blk_partition_remap(bio)))
                        goto end_io;
        } else {
-               if (unlikely(bio_check_ro(bio, bio->bi_disk->part0->bd_part)))
+               if (unlikely(bio_check_ro(bio, bio->bi_disk->part0)))
                        goto end_io;
                if (unlikely(bio_check_eod(bio, get_capacity(bio->bi_disk))))
                        goto end_io;
                return ret;
 
        if (rq->rq_disk &&
-           should_fail_request(rq->rq_disk->part0->bd_part, blk_rq_bytes(rq)))
+           should_fail_request(rq->rq_disk->part0, blk_rq_bytes(rq)))
                return BLK_STS_IOERR;
 
        if (blk_crypto_insert_cloned_request(rq))
 }
 EXPORT_SYMBOL_GPL(blk_rq_err_bytes);
 
-static void update_io_ticks(struct hd_struct *part, unsigned long now, bool end)
+static void update_io_ticks(struct block_device *part, unsigned long now,
+               bool end)
 {
        unsigned long stamp;
 again:
-       stamp = READ_ONCE(part->bdev->bd_stamp);
+       stamp = READ_ONCE(part->bd_stamp);
        if (unlikely(stamp != now)) {
-               if (likely(cmpxchg(&part->bdev->bd_stamp, stamp, now) == stamp))
+               if (likely(cmpxchg(&part->bd_stamp, stamp, now) == stamp))
                        __part_stat_add(part, io_ticks, end ? now - stamp : 1);
        }
-       if (part->partno) {
-               part = part_to_disk(part)->part0->bd_part;
+       if (part->bd_partno) {
+               part = bdev_whole(part);
                goto again;
        }
 }
 {
        if (req->part && blk_do_io_stat(req)) {
                const int sgrp = op_stat_group(req_op(req));
-               struct hd_struct *part;
 
                part_stat_lock();
-               part = req->part;
-               part_stat_add(part, sectors[sgrp], bytes >> 9);
+               part_stat_add(req->part, sectors[sgrp], bytes >> 9);
                part_stat_unlock();
        }
 }
        if (req->part && blk_do_io_stat(req) &&
            !(req->rq_flags & RQF_FLUSH_SEQ)) {
                const int sgrp = op_stat_group(req_op(req));
-               struct hd_struct *part;
 
                part_stat_lock();
-               part = req->part;
-
-               update_io_ticks(part, jiffies, true);
-               part_stat_inc(part, ios[sgrp]);
-               part_stat_add(part, nsecs[sgrp], now - req->start_time_ns);
+               update_io_ticks(req->part, jiffies, true);
+               part_stat_inc(req->part, ios[sgrp]);
+               part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns);
                part_stat_unlock();
        }
 }
        part_stat_unlock();
 }
 
-static unsigned long __part_start_io_acct(struct hd_struct *part,
+static unsigned long __part_start_io_acct(struct block_device *part,
                                          unsigned int sectors, unsigned int op)
 {
        const int sgrp = op_stat_group(op);
        return now;
 }
 
-unsigned long part_start_io_acct(struct gendisk *disk, struct hd_struct **part,
+unsigned long part_start_io_acct(struct gendisk *disk, struct block_device **part,
                                 struct bio *bio)
 {
        *part = disk_map_sector_rcu(disk, bio->bi_iter.bi_sector);
 unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors,
                                 unsigned int op)
 {
-       return __part_start_io_acct(disk->part0->bd_part, sectors, op);
+       return __part_start_io_acct(disk->part0, sectors, op);
 }
 EXPORT_SYMBOL(disk_start_io_acct);
 
-static void __part_end_io_acct(struct hd_struct *part, unsigned int op,
+static void __part_end_io_acct(struct block_device *part, unsigned int op,
                               unsigned long start_time)
 {
        const int sgrp = op_stat_group(op);
        part_stat_unlock();
 }
 
-void part_end_io_acct(struct hd_struct *part, struct bio *bio,
+void part_end_io_acct(struct block_device *part, struct bio *bio,
                      unsigned long start_time)
 {
        __part_end_io_acct(part, bio_op(bio), start_time);
 void disk_end_io_acct(struct gendisk *disk, unsigned int op,
                      unsigned long start_time)
 {
-       __part_end_io_acct(disk->part0->bd_part, op, start_time);
+       __part_end_io_acct(disk->part0, op, start_time);
 }
 EXPORT_SYMBOL(disk_end_io_acct);
 
 
 
 static void blk_account_io_flush(struct request *rq)
 {
-       struct hd_struct *part = rq->rq_disk->part0->bd_part;
+       struct block_device *part = rq->rq_disk->part0;
 
        part_stat_lock();
        part_stat_inc(part, ios[STAT_FLUSH]);
 
 }
 
 struct mq_inflight {
-       struct hd_struct *part;
+       struct block_device *part;
        unsigned int inflight[2];
 };
 
        return true;
 }
 
-unsigned int blk_mq_in_flight(struct request_queue *q, struct hd_struct *part)
+unsigned int blk_mq_in_flight(struct request_queue *q,
+               struct block_device *part)
 {
        struct mq_inflight mi = { .part = part };
 
        return mi.inflight[0] + mi.inflight[1];
 }
 
-void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
-                        unsigned int inflight[2])
+void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
+               unsigned int inflight[2])
 {
        struct mq_inflight mi = { .part = part };
 
 
        return hctx->nr_ctx && hctx->tags;
 }
 
-unsigned int blk_mq_in_flight(struct request_queue *q, struct hd_struct *part);
-void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
-                        unsigned int inflight[2]);
+unsigned int blk_mq_in_flight(struct request_queue *q,
+               struct block_device *part);
+void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
+               unsigned int inflight[2]);
 
 static inline void blk_mq_put_dispatch_budget(struct request_queue *q)
 {
 
        __elevator_exit(q, e);
 }
 
-struct hd_struct *__disk_get_part(struct gendisk *disk, int partno);
+struct block_device *__disk_get_part(struct gendisk *disk, int partno);
 
 ssize_t part_size_show(struct device *dev, struct device_attribute *attr,
                char *buf);
 static inline void blk_queue_free_zone_bitmaps(struct request_queue *q) {}
 #endif
 
-struct hd_struct *disk_map_sector_rcu(struct gendisk *disk, sector_t sector);
+struct block_device *disk_map_sector_rcu(struct gendisk *disk, sector_t sector);
 
 int blk_alloc_devt(struct hd_struct *part, dev_t *devt);
 void blk_free_devt(dev_t devt);
 
        }
 }
 
-static unsigned int part_in_flight(struct hd_struct *part)
+static unsigned int part_in_flight(struct block_device *part)
 {
        unsigned int inflight = 0;
        int cpu;
        return inflight;
 }
 
-static void part_in_flight_rw(struct hd_struct *part, unsigned int inflight[2])
+static void part_in_flight_rw(struct block_device *part,
+               unsigned int inflight[2])
 {
        int cpu;
 
                inflight[1] = 0;
 }
 
-struct hd_struct *__disk_get_part(struct gendisk *disk, int partno)
+struct block_device *__disk_get_part(struct gendisk *disk, int partno)
 {
        struct disk_part_tbl *ptbl = rcu_dereference(disk->part_tbl);
 
  */
 struct hd_struct *disk_get_part(struct gendisk *disk, int partno)
 {
+       struct block_device *bdev;
        struct hd_struct *part;
 
        rcu_read_lock();
-       part = __disk_get_part(disk, partno);
-       if (part)
-               get_device(part_to_dev(part));
+       bdev = __disk_get_part(disk, partno);
+       if (!bdev)
+               goto fail;
+       part = bdev->bd_part;
+       if (!kobject_get_unless_zero(&part_to_dev(part)->kobj))
+               goto fail;
        rcu_read_unlock();
-
        return part;
+fail:
+       rcu_read_unlock();
+       return NULL;
 }
 
 /**
 
        /* iterate to the next partition */
        for (; piter->idx != end; piter->idx += inc) {
-               struct hd_struct *part;
+               struct block_device *part;
 
                part = rcu_dereference(ptbl->part[piter->idx]);
                if (!part)
                        continue;
-               if (!bdev_nr_sectors(part->bdev) &&
+               if (!bdev_nr_sectors(part) &&
                    !(piter->flags & DISK_PITER_INCL_EMPTY) &&
                    !(piter->flags & DISK_PITER_INCL_EMPTY_PART0 &&
                      piter->idx == 0))
                        continue;
 
-               get_device(part_to_dev(part));
-               piter->part = part;
+               get_device(part_to_dev(part->bd_part));
+               piter->part = part->bd_part;
                piter->idx += inc;
                break;
        }
 }
 EXPORT_SYMBOL_GPL(disk_part_iter_exit);
 
-static inline int sector_in_part(struct hd_struct *part, sector_t sector)
+static inline int sector_in_part(struct block_device *part, sector_t sector)
 {
-       return part->bdev->bd_start_sect <= sector &&
-               sector < part->bdev->bd_start_sect + bdev_nr_sectors(part->bdev);
+       return part->bd_start_sect <= sector &&
+               sector < part->bd_start_sect + bdev_nr_sectors(part);
 }
 
 /**
  * Found partition on success, part0 is returned if no partition matches
  * or the matched partition is being deleted.
  */
-struct hd_struct *disk_map_sector_rcu(struct gendisk *disk, sector_t sector)
+struct block_device *disk_map_sector_rcu(struct gendisk *disk, sector_t sector)
 {
        struct disk_part_tbl *ptbl;
-       struct hd_struct *part;
+       struct block_device *part;
        int i;
 
        rcu_read_lock();
                }
        }
 
-       part = disk->part0->bd_part;
+       part = disk->part0;
 out_unlock:
        rcu_read_unlock();
        return part;
        kobject_put(disk->part0->bd_holder_dir);
        kobject_put(disk->slave_dir);
 
-       part_stat_set_all(disk->part0->bd_part, 0);
+       part_stat_set_all(disk->part0, 0);
        disk->part0->bd_stamp = 0;
        if (!sysfs_deprecated)
                sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk)));
 
        part_stat_read_all(p, &stat);
        if (queue_is_mq(q))
-               inflight = blk_mq_in_flight(q, p);
+               inflight = blk_mq_in_flight(q, p->bdev);
        else
-               inflight = part_in_flight(p);
+               inflight = part_in_flight(p->bdev);
 
        return sprintf(buf,
                "%8lu %8lu %8llu %8u "
        unsigned int inflight[2];
 
        if (queue_is_mq(q))
-               blk_mq_in_flight_rw(q, p, inflight);
+               blk_mq_in_flight_rw(q, p->bdev, inflight);
        else
-               part_in_flight_rw(p, inflight);
+               part_in_flight_rw(p->bdev, inflight);
 
        return sprintf(buf, "%8u %8u\n", inflight[0], inflight[1]);
 }
        while ((hd = disk_part_iter_next(&piter))) {
                part_stat_read_all(hd, &stat);
                if (queue_is_mq(gp->queue))
-                       inflight = blk_mq_in_flight(gp->queue, hd);
+                       inflight = blk_mq_in_flight(gp->queue, hd->bdev);
                else
-                       inflight = part_in_flight(hd);
+                       inflight = part_in_flight(hd->bdev);
 
                seq_printf(seqf, "%4d %7d %s "
                           "%lu %lu %lu %u "
                goto out_bdput;
 
        ptbl = rcu_dereference_protected(disk->part_tbl, 1);
-       rcu_assign_pointer(ptbl->part[0], disk->part0->bd_part);
+       rcu_assign_pointer(ptbl->part[0], disk->part0);
 
        disk->minors = minors;
        rand_initialize_disk(disk);
 
        struct disk_part_tbl *ptbl =
                rcu_dereference_protected(disk->part_tbl, 1);
 
-       /*
-        * ->part_tbl is referenced in this part's release handler, so
-        *  we have to hold the disk device
-        */
        rcu_assign_pointer(ptbl->part[part->partno], NULL);
        rcu_assign_pointer(ptbl->last_lookup, NULL);
+
        kobject_put(part->bdev->bd_holder_dir);
        device_del(part_to_dev(part));
 
 
        /* everything is up and running, commence */
        bdev_add(bdev, devt);
-       rcu_assign_pointer(ptbl->part[partno], p);
+       rcu_assign_pointer(ptbl->part[partno], bdev);
 
        /* suppress uevent if the disk suppresses it */
        if (!dev_get_uevent_suppress(ddev))
 
        if (c_min_rate == 0)
                return false;
 
-       curr_events = (int)part_stat_read_accum(disk->part0->bd_part, sectors) -
+       curr_events = (int)part_stat_read_accum(disk->part0, sectors) -
                        atomic_read(&device->rs_sect_ev);
 
        if (atomic_read(&device->ap_actlog_cnt)
 
        atomic_set(&device->rs_sect_ev, 0);
        device->rs_in_flight = 0;
        device->rs_last_events =
-               (int)part_stat_read_accum(disk->part0->bd_part, sectors);
+               (int)part_stat_read_accum(disk->part0, sectors);
 
        /* Updating the RCU protected object in place is necessary since
           this function gets called from atomic context.
 
        zram->disksize = 0;
 
        set_capacity_and_notify(zram->disk, 0);
-       part_stat_set_all(zram->disk->part0->bd_part, 0);
+       part_stat_set_all(zram->disk->part0, 0);
 
        up_write(&zram->init_lock);
        /* I/O operation under all of CPU are done so let's free */
 
        unsigned int            read_dirty_data:1;
        unsigned int            cache_missed:1;
 
-       struct hd_struct        *part;
+       struct block_device     *part;
        unsigned long           start_time;
 
        struct btree_op         op;
        unsigned long           start_time;
        bio_end_io_t            *bi_end_io;
        void                    *bi_private;
-       struct hd_struct        *part;
+       struct block_device     *part;
 };
 
 static void detached_dev_end_io(struct bio *bio)
 
                                 * (by eliminating DM's splitting and just using bio_split)
                                 */
                                part_stat_lock();
-                               __dm_part_stat_sub(dm_disk(md)->part0->bd_part,
+                               __dm_part_stat_sub(dm_disk(md)->part0,
                                                   sectors[op_stat_group(bio_op(bio))], ci.sector_count);
                                part_stat_unlock();
 
 static bool md_in_flight_bios(struct mapped_device *md)
 {
        int cpu;
-       struct hd_struct *part = dm_disk(md)->part0->bd_part;
+       struct block_device *part = dm_disk(md)->part0;
        long sum = 0;
 
        for_each_possible_cpu(cpu) {
 
        bio_end_io_t *orig_bi_end_io;
        void *orig_bi_private;
        unsigned long start_time;
-       struct hd_struct *part;
+       struct block_device *part;
 };
 
 static void md_end_io(struct bio *bio)
        rcu_read_lock();
        rdev_for_each_rcu(rdev, mddev) {
                struct gendisk *disk = rdev->bdev->bd_disk;
-               curr_events = (int)part_stat_read_accum(disk->part0->bd_part, sectors) -
+               curr_events = (int)part_stat_read_accum(disk->part0, sectors) -
                              atomic_read(&disk->sync_io);
                /* sync IO will cause sync_io to increase before the disk_stats
                 * as sync_io is counted when a request starts, and
 
        if (!ns->bdev)
                goto out;
 
-       host_reads = part_stat_read(ns->bdev->bd_part, ios[READ]);
-       data_units_read = DIV_ROUND_UP(part_stat_read(ns->bdev->bd_part,
-               sectors[READ]), 1000);
-       host_writes = part_stat_read(ns->bdev->bd_part, ios[WRITE]);
-       data_units_written = DIV_ROUND_UP(part_stat_read(ns->bdev->bd_part,
-               sectors[WRITE]), 1000);
+       host_reads = part_stat_read(ns->bdev, ios[READ]);
+       data_units_read =
+               DIV_ROUND_UP(part_stat_read(ns->bdev, sectors[READ]), 1000);
+       host_writes = part_stat_read(ns->bdev, ios[WRITE]);
+       data_units_written =
+               DIV_ROUND_UP(part_stat_read(ns->bdev, sectors[WRITE]), 1000);
 
        put_unaligned_le64(host_reads, &slog->host_reads[0]);
        put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
                /* we don't have the right data for file backed ns */
                if (!ns->bdev)
                        continue;
-               host_reads += part_stat_read(ns->bdev->bd_part, ios[READ]);
+               host_reads += part_stat_read(ns->bdev, ios[READ]);
                data_units_read += DIV_ROUND_UP(
-                       part_stat_read(ns->bdev->bd_part, sectors[READ]), 1000);
-               host_writes += part_stat_read(ns->bdev->bd_part, ios[WRITE]);
+                       part_stat_read(ns->bdev, sectors[READ]), 1000);
+               host_writes += part_stat_read(ns->bdev, ios[WRITE]);
                data_units_written += DIV_ROUND_UP(
-                       part_stat_read(ns->bdev->bd_part, sectors[WRITE]), 1000);
+                       part_stat_read(ns->bdev, sectors[WRITE]), 1000);
        }
 
        put_unaligned_le64(host_reads, &slog->host_reads[0]);
 
        sbi->s_sb = sb;
        sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS;
        sbi->s_sb_block = sb_block;
-       if (sb->s_bdev->bd_part)
-               sbi->s_sectors_written_start =
-                       part_stat_read(sb->s_bdev->bd_part, sectors[STAT_WRITE]);
+       sbi->s_sectors_written_start =
+               part_stat_read(sb->s_bdev, sectors[STAT_WRITE]);
 
        /* Cleanup superblock name */
        strreplace(sb->s_id, '/', '!');
         */
        if (!(sb->s_flags & SB_RDONLY))
                ext4_update_tstamp(es, s_wtime);
-       if (sb->s_bdev->bd_part)
-               es->s_kbytes_written =
-                       cpu_to_le64(EXT4_SB(sb)->s_kbytes_written +
-                           ((part_stat_read(sb->s_bdev->bd_part,
-                                            sectors[STAT_WRITE]) -
-                             EXT4_SB(sb)->s_sectors_written_start) >> 1));
-       else
-               es->s_kbytes_written =
-                       cpu_to_le64(EXT4_SB(sb)->s_kbytes_written);
+       es->s_kbytes_written =
+               cpu_to_le64(EXT4_SB(sb)->s_kbytes_written +
+                   ((part_stat_read(sb->s_bdev, sectors[STAT_WRITE]) -
+                     EXT4_SB(sb)->s_sectors_written_start) >> 1));
        if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeclusters_counter))
                ext4_free_blocks_count_set(es,
                        EXT4_C2B(EXT4_SB(sb), percpu_counter_sum_positive(
 
 {
        struct super_block *sb = sbi->s_buddy_cache->i_sb;
 
-       if (!sb->s_bdev->bd_part)
-               return snprintf(buf, PAGE_SIZE, "0\n");
        return snprintf(buf, PAGE_SIZE, "%lu\n",
-                       (part_stat_read(sb->s_bdev->bd_part,
-                                       sectors[STAT_WRITE]) -
+                       (part_stat_read(sb->s_bdev, sectors[STAT_WRITE]) -
                         sbi->s_sectors_written_start) >> 1);
 }
 
 {
        struct super_block *sb = sbi->s_buddy_cache->i_sb;
 
-       if (!sb->s_bdev->bd_part)
-               return snprintf(buf, PAGE_SIZE, "0\n");
        return snprintf(buf, PAGE_SIZE, "%llu\n",
                        (unsigned long long)(sbi->s_kbytes_written +
-                       ((part_stat_read(sb->s_bdev->bd_part,
-                                        sectors[STAT_WRITE]) -
+                       ((part_stat_read(sb->s_bdev, sectors[STAT_WRITE]) -
                          EXT4_SB(sb)->s_sectors_written_start) >> 1)));
 }
 
 
  * and the return value is in kbytes. s is of struct f2fs_sb_info.
  */
 #define BD_PART_WRITTEN(s)                                              \
-(((u64)part_stat_read((s)->sb->s_bdev->bd_part, sectors[STAT_WRITE]) -   \
+       (((u64)part_stat_read((s)->sb->s_bdev, sectors[STAT_WRITE]) -   \
                (s)->sectors_written_start) >> 1)
 
 static inline void f2fs_update_time(struct f2fs_sb_info *sbi, int type)
 
        }
 
        /* For write statistics */
-       if (sb->s_bdev->bd_part)
-               sbi->sectors_written_start =
-                       (u64)part_stat_read(sb->s_bdev->bd_part,
-                                           sectors[STAT_WRITE]);
+       sbi->sectors_written_start =
+               (u64)part_stat_read(sb->s_bdev, sectors[STAT_WRITE]);
 
        /* Read accumulated write IO statistics if exists */
        seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
 
        };
 
        struct gendisk *rq_disk;
-       struct hd_struct *part;
+       struct block_device *part;
 #ifdef CONFIG_BLK_RQ_ALLOC_TIME
        /* Time that the first bio started allocating this request. */
        u64 alloc_time_ns;
 void disk_end_io_acct(struct gendisk *disk, unsigned int op,
                unsigned long start_time);
 
-unsigned long part_start_io_acct(struct gendisk *disk, struct hd_struct **part,
-                                struct bio *bio);
-void part_end_io_acct(struct hd_struct *part, struct bio *bio,
+unsigned long part_start_io_acct(struct gendisk *disk,
+               struct block_device **part, struct bio *bio);
+void part_end_io_acct(struct block_device *part, struct bio *bio,
                      unsigned long start_time);
 
 /**
 
 struct disk_part_tbl {
        struct rcu_head rcu_head;
        int len;
-       struct hd_struct __rcu *last_lookup;
-       struct hd_struct __rcu *part[];
+       struct block_device __rcu *last_lookup;
+       struct block_device __rcu *part[];
 };
 
 struct disk_events;
 
 #define part_stat_unlock()     preempt_enable()
 
 #define part_stat_get_cpu(part, field, cpu)                            \
-       (per_cpu_ptr((part)->bdev->bd_stats, (cpu))->field)
+       (per_cpu_ptr((part)->bd_stats, (cpu))->field)
 
 #define part_stat_get(part, field)                                     \
        part_stat_get_cpu(part, field, smp_processor_id())
 
 #define part_stat_read(part, field)                                    \
 ({                                                                     \
-       typeof((part)->bdev->bd_stats->field) res = 0;                  \
+       typeof((part)->bd_stats->field) res = 0;                        \
        unsigned int _cpu;                                              \
        for_each_possible_cpu(_cpu)                                     \
-               res += per_cpu_ptr((part)->bdev->bd_stats, _cpu)->field; \
+               res += per_cpu_ptr((part)->bd_stats, _cpu)->field; \
        res;                                                            \
 })
 
-static inline void part_stat_set_all(struct hd_struct *part, int value)
+static inline void part_stat_set_all(struct block_device *part, int value)
 {
        int i;
 
        for_each_possible_cpu(i)
-               memset(per_cpu_ptr(part->bdev->bd_stats, i), value,
+               memset(per_cpu_ptr(part->bd_stats, i), value,
                                sizeof(struct disk_stats));
 }
 
         part_stat_read(part, field[STAT_DISCARD]))
 
 #define __part_stat_add(part, field, addnd)                            \
-       __this_cpu_add((part)->bdev->bd_stats->field, addnd)
+       __this_cpu_add((part)->bd_stats->field, addnd)
 
 #define part_stat_add(part, field, addnd)      do {                    \
        __part_stat_add((part), field, addnd);                          \
-       if ((part)->partno)                                             \
-               __part_stat_add(part_to_disk((part))->part0->bd_part,   \
-                       field, addnd); \
+       if ((part)->bd_partno)                                          \
+               __part_stat_add(bdev_whole(part), field, addnd);        \
 } while (0)
 
 #define part_stat_dec(part, field)                                     \