if (bio_sectors(bio)) {
                if (bio_check_eod(bio, bdev_nr_sectors(p->bdev)))
                        goto out;
-               bio->bi_iter.bi_sector += p->start_sect;
+               bio->bi_iter.bi_sector += p->bdev->bd_start_sect;
                trace_block_bio_remap(bio->bi_disk->queue, bio, part_devt(p),
-                                     bio->bi_iter.bi_sector - p->start_sect);
+                                     bio->bi_iter.bi_sector -
+                                     p->bdev->bd_start_sect);
        }
        bio->bi_partno = 0;
        ret = 0;
 
 {
        struct hd_struct *p = dev_to_part(dev);
 
-       return sprintf(buf, "%llu\n",(unsigned long long)p->start_sect);
+       return sprintf(buf, "%llu\n", p->bdev->bd_start_sect);
 }
 
 static ssize_t part_ro_show(struct device *dev,
 
        return sprintf(buf, "%u\n",
                queue_limit_alignment_offset(&part_to_disk(p)->queue->limits,
-                               p->start_sect));
+                               p->bdev->bd_start_sect));
 }
 
 static ssize_t part_discard_alignment_show(struct device *dev,
 
        return sprintf(buf, "%u\n",
                queue_limit_discard_alignment(&part_to_disk(p)->queue->limits,
-                               p->start_sect));
+                               p->bdev->bd_start_sect));
 }
 
 static DEVICE_ATTR(partition, 0444, part_partition_show, NULL);
         */
        put_device(disk_to_dev(disk));
 
-       part->start_sect = 0;
+       part->bdev->bd_start_sect = 0;
        bdev_set_nr_sectors(part->bdev, 0);
        part_stat_set_all(part, 0);
        put_device(part_to_dev(part));
 
        pdev = part_to_dev(p);
 
-       p->start_sect = start;
+       bdev->bd_start_sect = start;
        bdev_set_nr_sectors(bdev, len);
        p->partno = partno;
        p->policy = get_disk_ro(disk);
        disk_part_iter_init(&piter, disk, DISK_PITER_INCL_EMPTY);
        while ((part = disk_part_iter_next(&piter))) {
                if (part->partno == skip_partno ||
-                   start >= part->start_sect + bdev_nr_sectors(part->bdev) ||
-                   start + length <= part->start_sect)
+                   start >= part->bdev->bd_start_sect +
+                       bdev_nr_sectors(part->bdev) ||
+                   start + length <= part->bdev->bd_start_sect)
                        continue;
                overlap = true;
                break;
        mutex_lock_nested(&bdev->bd_mutex, 1);
 
        ret = -EINVAL;
-       if (start != part->start_sect)
+       if (start != part->bdev->bd_start_sect)
                goto out_unlock;
 
        ret = -EBUSY;
 
                return -1;
        if (bdev_is_partition(bdev))
                return queue_limit_alignment_offset(&q->limits,
-                               bdev->bd_part->start_sect);
+                               bdev->bd_start_sect);
        return q->limits.alignment_offset;
 }
 
 
        if (bdev_is_partition(bdev))
                return queue_limit_discard_alignment(&q->limits,
-                               bdev->bd_part->start_sect);
+                               bdev->bd_start_sect);
        return q->limits.discard_alignment;
 }
 
 
 static void blk_trace_setup_lba(struct blk_trace *bt,
                                struct block_device *bdev)
 {
-       struct hd_struct *part = NULL;
-
-       if (bdev)
-               part = bdev->bd_part;
-
-       if (part) {
-               bt->start_lba = part->start_sect;
-               bt->end_lba = part->start_sect + bdev_nr_sectors(bdev);
+       if (bdev) {
+               bt->start_lba = bdev->bd_start_sect;
+               bt->end_lba = bdev->bd_start_sect + bdev_nr_sectors(bdev);
        } else {
                bt->start_lba = 0;
                bt->end_lba = -1ULL;