}
EXPORT_SYMBOL(blk_queue_io_opt);
-/**
- * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers
- * @t: the stacking driver (top)
- * @b: the underlying device (bottom)
- **/
-void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
-{
- blk_stack_limits(&t->limits, &b->limits, 0);
-}
-EXPORT_SYMBOL(blk_queue_stack_limits);
-
/**
* blk_stack_limits - adjust queue_limits for stacked devices
* @t: the stacking driver limits (top device)
static void fixup_write_zeroes(struct drbd_device *device, struct request_queue *q)
{
- /* Fixup max_write_zeroes_sectors after blk_queue_stack_limits():
+ /* Fixup max_write_zeroes_sectors after blk_stack_limits():
* if we can handle "zeroes" efficiently on the protocol,
* we want to do that, even if our backend does not announce
* max_write_zeroes_sectors itself. */
decide_on_write_same_support(device, q, b, o, disable_write_same);
if (b) {
- blk_queue_stack_limits(q, b);
+ blk_stack_limits(&q->limits, &b->limits, 0);
if (q->backing_dev_info->ra_pages !=
b->backing_dev_info->ra_pages) {
#ifdef CONFIG_NVME_MULTIPATH
if (ns->head->disk) {
nvme_update_disk_info(ns->head->disk, ns, id);
- blk_queue_stack_limits(ns->head->disk->queue, ns->queue);
+ blk_stack_limits(&ns->head->disk->queue->limits,
+ &ns->queue->limits, 0);
nvme_mpath_update_disk_size(ns->head->disk);
}
#endif
sector_t offset);
extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
sector_t offset);
-extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
extern void blk_queue_virt_boundary(struct request_queue *, unsigned long);