drbd: merge drbd_setup_queue_param into drbd_reconsider_queue_parameters
authorChristoph Hellwig <hch@lst.de>
Wed, 6 Mar 2024 14:03:29 +0000 (15:03 +0100)
committerJens Axboe <axboe@kernel.dk>
Wed, 6 Mar 2024 15:30:34 +0000 (08:30 -0700)
drbd_setup_queue_param is only called by drbd_reconsider_queue_parameters
and there is no really clear boundary of responsibilities between the
two.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Philipp Reisner <philipp.reisner@linbit.com>
Reviewed-by: Lars Ellenberg <lars.ellenberg@linbit.com>
Tested-by: Christoph Böhmwalder <christoph.boehmwalder@linbit.com>
Link: https://lore.kernel.org/r/20240306140332.623759-5-philipp.reisner@linbit.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
drivers/block/drbd/drbd_nl.c

index 0326b7322ceb48f566575051b1add8a2639932ce..0f40fdee0899718feb9d53c50b0015be3527ac1c 100644 (file)
@@ -1309,45 +1309,16 @@ static unsigned int drbd_backing_dev_max_segments(struct drbd_device *device)
        return max_segments;
 }
 
-static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backing_dev *bdev,
-                                  unsigned int max_bio_size, struct o_qlim *o)
-{
-       struct request_queue * const q = device->rq_queue;
-       unsigned int max_hw_sectors = max_bio_size >> 9;
-       unsigned int max_segments = BLK_MAX_SEGMENTS;
-       struct request_queue *b = NULL;
-
-       if (bdev) {
-               b = bdev->backing_bdev->bd_disk->queue;
-
-               max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
-               max_segments = drbd_backing_dev_max_segments(device);
-
-               blk_set_stacking_limits(&q->limits);
-       }
-
-       blk_queue_max_hw_sectors(q, max_hw_sectors);
-       blk_queue_max_segments(q, max_segments);
-       blk_queue_segment_boundary(q, PAGE_SIZE-1);
-       decide_on_discard_support(device, bdev);
-
-       if (b) {
-               blk_stack_limits(&q->limits, &b->limits, 0);
-               disk_update_readahead(device->vdisk);
-       }
-       fixup_write_zeroes(device, q);
-       fixup_discard_support(device, q);
-}
-
 void drbd_reconsider_queue_parameters(struct drbd_device *device,
                struct drbd_backing_dev *bdev, struct o_qlim *o)
 {
-       unsigned int now = queue_max_hw_sectors(device->rq_queue) <<
-                       SECTOR_SHIFT;
+       struct request_queue * const q = device->rq_queue;
+       unsigned int now = queue_max_hw_sectors(q) << 9;
+       struct request_queue *b = NULL;
        unsigned int new;
 
        if (bdev) {
-               struct request_queue *b = bdev->backing_bdev->bd_disk->queue;
+               b = bdev->backing_bdev->bd_disk->queue;
 
                device->local_max_bio_size =
                        queue_max_hw_sectors(b) << SECTOR_SHIFT;
@@ -1369,7 +1340,24 @@ void drbd_reconsider_queue_parameters(struct drbd_device *device,
                drbd_info(device, "max BIO size = %u\n", new);
        }
 
-       drbd_setup_queue_param(device, bdev, new, o);
+       if (bdev) {
+               blk_set_stacking_limits(&q->limits);
+               blk_queue_max_segments(q,
+                       drbd_backing_dev_max_segments(device));
+       } else {
+               blk_queue_max_segments(q, BLK_MAX_SEGMENTS);
+       }
+
+       blk_queue_max_hw_sectors(q, new >> SECTOR_SHIFT);
+       blk_queue_segment_boundary(q, PAGE_SIZE - 1);
+       decide_on_discard_support(device, bdev);
+
+       if (bdev) {
+               blk_stack_limits(&q->limits, &b->limits, 0);
+               disk_update_readahead(device->vdisk);
+       }
+       fixup_write_zeroes(device, q);
+       fixup_discard_support(device, q);
 }
 
 /* Starts the worker thread */