/**
  * fs_dax_get_by_bdev() - temporary lookup mechanism for filesystem-dax
  * @bdev: block device to find a dax_device for
+ * @start_off: returns the byte offset into the dax_device that @bdev starts
  */
-struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev)
+struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev, u64 *start_off)
 {
        struct dax_device *dax_dev;
+       u64 part_size;
        int id;
 
        if (!blk_queue_dax(bdev->bd_disk->queue))
                return NULL;
 
-       if ((get_start_sect(bdev) * SECTOR_SIZE) % PAGE_SIZE ||
-           (bdev_nr_sectors(bdev) * SECTOR_SIZE) % PAGE_SIZE) {
+       *start_off = get_start_sect(bdev) * SECTOR_SIZE;
+       part_size = bdev_nr_sectors(bdev) * SECTOR_SIZE;
+       if (*start_off % PAGE_SIZE || part_size % PAGE_SIZE) {
                pr_info("%pg: error: unaligned partition for dax\n", bdev);
                return NULL;
        }
 
                             struct mapped_device *md)
 {
        struct block_device *bdev;
-
+       u64 part_off;
        int r;
 
        BUG_ON(td->dm_dev.bdev);
        }
 
        td->dm_dev.bdev = bdev;
-       td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev);
+       td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev, &part_off);
        return 0;
 }
 
 
        char *path;
        struct block_device *bdev;
        struct dax_device *dax_dev;
+       u64 dax_part_off;
 
        u32 blocks;
        u32 mapped_blkaddr;
 #endif /* CONFIG_EROFS_FS_ZIP */
        struct erofs_dev_context *devs;
        struct dax_device *dax_dev;
+       u64 dax_part_off;
        u64 total_blocks;
        u32 primarydevice_blocks;
 
 
                        goto err_out;
                }
                dif->bdev = bdev;
-               dif->dax_dev = fs_dax_get_by_bdev(bdev);
+               dif->dax_dev = fs_dax_get_by_bdev(bdev, &dif->dax_part_off);
                dif->blocks = le32_to_cpu(dis->blocks);
                dif->mapped_blkaddr = le32_to_cpu(dis->mapped_blkaddr);
                sbi->total_blocks += dif->blocks;
 
        sb->s_fs_info = sbi;
        sbi->opt = ctx->opt;
-       sbi->dax_dev = fs_dax_get_by_bdev(sb->s_bdev);
+       sbi->dax_dev = fs_dax_get_by_bdev(sb->s_bdev, &sbi->dax_part_off);
        sbi->devs = ctx->devs;
        ctx->devs = NULL;
 
 
        spinlock_t s_lock;
        struct mb_cache *s_ea_block_cache;
        struct dax_device *s_daxdev;
+       u64 s_dax_part_off;
 };
 
 static inline spinlock_t *
 
        }
        sb->s_fs_info = sbi;
        sbi->s_sb_block = sb_block;
-       sbi->s_daxdev = fs_dax_get_by_bdev(sb->s_bdev);
+       sbi->s_daxdev = fs_dax_get_by_bdev(sb->s_bdev, &sbi->s_dax_part_off);
 
        spin_lock_init(&sbi->s_lock);
        ret = -EINVAL;
 
         */
        struct percpu_rw_semaphore s_writepages_rwsem;
        struct dax_device *s_daxdev;
+       u64 s_dax_part_off;
 #ifdef CONFIG_EXT4_DEBUG
        unsigned long s_simulate_fail;
 #endif
 
        if (!sbi->s_blockgroup_lock)
                goto out_free_base;
 
-       sbi->s_daxdev = fs_dax_get_by_bdev(sb->s_bdev);
+       sbi->s_daxdev = fs_dax_get_by_bdev(sb->s_bdev, &sbi->s_dax_part_off);
        sb->s_fs_info = sbi;
        sbi->s_sb = sb;
        sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS;
 
        btp->bt_mount = mp;
        btp->bt_dev =  bdev->bd_dev;
        btp->bt_bdev = bdev;
-       btp->bt_daxdev = fs_dax_get_by_bdev(bdev);
+       btp->bt_daxdev = fs_dax_get_by_bdev(bdev, &btp->bt_dax_part_off);
 
        /*
         * Buffer IO error rate limiting. Limit it to no more than 10 messages
 
        dev_t                   bt_dev;
        struct block_device     *bt_bdev;
        struct dax_device       *bt_daxdev;
+       u64                     bt_dax_part_off;
        struct xfs_mount        *bt_mount;
        unsigned int            bt_meta_sectorsize;
        size_t                  bt_meta_sectormask;
 
        put_dax(dax_dev);
 }
 
-struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev);
+struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev,
+               u64 *start_off);
 int dax_writeback_mapping_range(struct address_space *mapping,
                struct dax_device *dax_dev, struct writeback_control *wbc);
 
 {
 }
 
-static inline struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev)
+static inline struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev,
+               u64 *start_off)
 {
        return NULL;
 }