ublk: pass queue_limits to blk_mq_alloc_disk
authorChristoph Hellwig <hch@lst.de>
Thu, 15 Feb 2024 07:02:59 +0000 (08:02 +0100)
committerJens Axboe <axboe@kernel.dk>
Mon, 19 Feb 2024 23:59:32 +0000 (16:59 -0700)
Pass the limits ublk imposes directly to blk_mq_alloc_disk instead of
setting them one at a time.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20240215070300.2200308-17-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
drivers/block/ublk_drv.c

index c5b6552707984bc41f55eb2c1d6d916842f98070..01afe90a47ac46e2131a5bb1e895aa75c81ed493 100644 (file)
@@ -246,21 +246,12 @@ static int ublk_dev_param_zoned_validate(const struct ublk_device *ub)
        return 0;
 }
 
-static int ublk_dev_param_zoned_apply(struct ublk_device *ub)
+static void ublk_dev_param_zoned_apply(struct ublk_device *ub)
 {
-       const struct ublk_param_zoned *p = &ub->params.zoned;
-
-       disk_set_zoned(ub->ub_disk);
        blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, ub->ub_disk->queue);
        blk_queue_required_elevator_features(ub->ub_disk->queue,
                                             ELEVATOR_F_ZBD_SEQ_WRITE);
-       disk_set_max_active_zones(ub->ub_disk, p->max_active_zones);
-       disk_set_max_open_zones(ub->ub_disk, p->max_open_zones);
-       blk_queue_max_zone_append_sectors(ub->ub_disk->queue, p->max_zone_append_sectors);
-
        ub->ub_disk->nr_zones = ublk_get_nr_zones(ub);
-
-       return 0;
 }
 
 /* Based on virtblk_alloc_report_buffer */
@@ -432,9 +423,8 @@ static int ublk_dev_param_zoned_validate(const struct ublk_device *ub)
        return -EOPNOTSUPP;
 }
 
-static int ublk_dev_param_zoned_apply(struct ublk_device *ub)
+static void ublk_dev_param_zoned_apply(struct ublk_device *ub)
 {
-       return -EOPNOTSUPP;
 }
 
 static int ublk_revalidate_disk_zones(struct ublk_device *ub)
@@ -498,11 +488,6 @@ static void ublk_dev_param_basic_apply(struct ublk_device *ub)
        struct request_queue *q = ub->ub_disk->queue;
        const struct ublk_param_basic *p = &ub->params.basic;
 
-       blk_queue_logical_block_size(q, 1 << p->logical_bs_shift);
-       blk_queue_physical_block_size(q, 1 << p->physical_bs_shift);
-       blk_queue_io_min(q, 1 << p->io_min_shift);
-       blk_queue_io_opt(q, 1 << p->io_opt_shift);
-
        blk_queue_write_cache(q, p->attrs & UBLK_ATTR_VOLATILE_CACHE,
                        p->attrs & UBLK_ATTR_FUA);
        if (p->attrs & UBLK_ATTR_ROTATIONAL)
@@ -510,29 +495,12 @@ static void ublk_dev_param_basic_apply(struct ublk_device *ub)
        else
                blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
 
-       blk_queue_max_hw_sectors(q, p->max_sectors);
-       blk_queue_chunk_sectors(q, p->chunk_sectors);
-       blk_queue_virt_boundary(q, p->virt_boundary_mask);
-
        if (p->attrs & UBLK_ATTR_READ_ONLY)
                set_disk_ro(ub->ub_disk, true);
 
        set_capacity(ub->ub_disk, p->dev_sectors);
 }
 
-static void ublk_dev_param_discard_apply(struct ublk_device *ub)
-{
-       struct request_queue *q = ub->ub_disk->queue;
-       const struct ublk_param_discard *p = &ub->params.discard;
-
-       q->limits.discard_alignment = p->discard_alignment;
-       q->limits.discard_granularity = p->discard_granularity;
-       blk_queue_max_discard_sectors(q, p->max_discard_sectors);
-       blk_queue_max_write_zeroes_sectors(q,
-                       p->max_write_zeroes_sectors);
-       blk_queue_max_discard_segments(q, p->max_discard_segments);
-}
-
 static int ublk_validate_params(const struct ublk_device *ub)
 {
        /* basic param is the only one which must be set */
@@ -576,20 +544,12 @@ static int ublk_validate_params(const struct ublk_device *ub)
        return 0;
 }
 
-static int ublk_apply_params(struct ublk_device *ub)
+static void ublk_apply_params(struct ublk_device *ub)
 {
-       if (!(ub->params.types & UBLK_PARAM_TYPE_BASIC))
-               return -EINVAL;
-
        ublk_dev_param_basic_apply(ub);
 
-       if (ub->params.types & UBLK_PARAM_TYPE_DISCARD)
-               ublk_dev_param_discard_apply(ub);
-
        if (ub->params.types & UBLK_PARAM_TYPE_ZONED)
-               return ublk_dev_param_zoned_apply(ub);
-
-       return 0;
+               ublk_dev_param_zoned_apply(ub);
 }
 
 static inline bool ublk_support_user_copy(const struct ublk_queue *ubq)
@@ -2205,12 +2165,47 @@ static struct ublk_device *ublk_get_device_from_id(int idx)
 static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
 {
        const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
+       const struct ublk_param_basic *p = &ub->params.basic;
        int ublksrv_pid = (int)header->data[0];
+       struct queue_limits lim = {
+               .logical_block_size     = 1 << p->logical_bs_shift,
+               .physical_block_size    = 1 << p->physical_bs_shift,
+               .io_min                 = 1 << p->io_min_shift,
+               .io_opt                 = 1 << p->io_opt_shift,
+               .max_hw_sectors         = p->max_sectors,
+               .chunk_sectors          = p->chunk_sectors,
+               .virt_boundary_mask     = p->virt_boundary_mask,
+
+       };
        struct gendisk *disk;
        int ret = -EINVAL;
 
        if (ublksrv_pid <= 0)
                return -EINVAL;
+       if (!(ub->params.types & UBLK_PARAM_TYPE_BASIC))
+               return -EINVAL;
+
+       if (ub->params.types & UBLK_PARAM_TYPE_DISCARD) {
+               const struct ublk_param_discard *pd = &ub->params.discard;
+
+               lim.discard_alignment = pd->discard_alignment;
+               lim.discard_granularity = pd->discard_granularity;
+               lim.max_hw_discard_sectors = pd->max_discard_sectors;
+               lim.max_write_zeroes_sectors = pd->max_write_zeroes_sectors;
+               lim.max_discard_segments = pd->max_discard_segments;
+       }
+
+       if (ub->params.types & UBLK_PARAM_TYPE_ZONED) {
+               const struct ublk_param_zoned *p = &ub->params.zoned;
+
+               if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED))
+                       return -EOPNOTSUPP;
+
+               lim.zoned = true;
+               lim.max_active_zones = p->max_active_zones;
+               lim.max_open_zones =  p->max_open_zones;
+               lim.max_zone_append_sectors = p->max_zone_append_sectors;
+       }
 
        if (wait_for_completion_interruptible(&ub->completion) != 0)
                return -EINTR;
@@ -2222,7 +2217,7 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
                goto out_unlock;
        }
 
-       disk = blk_mq_alloc_disk(&ub->tag_set, NULL, NULL);
+       disk = blk_mq_alloc_disk(&ub->tag_set, &lim, NULL);
        if (IS_ERR(disk)) {
                ret = PTR_ERR(disk);
                goto out_unlock;
@@ -2234,9 +2229,7 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
        ub->dev_info.ublksrv_pid = ublksrv_pid;
        ub->ub_disk = disk;
 
-       ret = ublk_apply_params(ub);
-       if (ret)
-               goto out_put_disk;
+       ublk_apply_params(ub);
 
        /* don't probe partitions if any one ubq daemon is un-trusted */
        if (ub->nr_privileged_daemon != ub->nr_queues_ready)
@@ -2262,7 +2255,6 @@ out_put_cdev:
                ub->dev_info.state = UBLK_S_DEV_DEAD;
                ublk_put_device(ub);
        }
-out_put_disk:
        if (ret)
                put_disk(disk);
 out_unlock: