commit
613b14884b8595e20b9fac4126bf627313827fbe upstream.
This can't happen right now, but in preparation for allowing
bio_split_to_limits() returning NULL if it ended the bio, check for it
in all the callers.
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
break;
}
split = blk_bio_segment_split(q, *bio, &q->bio_split, nr_segs);
+ if (IS_ERR(split))
+ *bio = split = NULL;
break;
}
if (split) {
- /* there isn't chance to merge the splitted bio */
+ /* there isn't chance to merge the split bio */
split->bi_opf |= REQ_NOMERGE;
bio_chain(split, *bio);
blk_queue_bounce(q, &bio);
__blk_queue_split(&bio, &nr_segs);
+ if (!bio)
+ goto queue_exit;
if (!bio_integrity_prep(bio))
goto queue_exit;
struct drbd_device *device = bio->bi_bdev->bd_disk->private_data;
blk_queue_split(&bio);
+ if (!bio)
+ return BLK_QC_T_NONE;
/*
* what we "blindly" assume:
struct bio *split;
blk_queue_split(&bio);
+ if (!bio)
+ return BLK_QC_T_NONE;
pd = bio->bi_bdev->bd_disk->queue->queuedata;
if (!pd) {
dev_dbg(&dev->core, "%s\n", __func__);
blk_queue_split(&bio);
+ if (!bio)
+ return BLK_QC_T_NONE;
spin_lock_irq(&priv->lock);
busy = !bio_list_empty(&priv->list);
blk_status_t st = BLK_STS_IOERR;
blk_queue_split(&bio);
+ if (!bio)
+ return BLK_QC_T_NONE;
might_sleep();
}
blk_queue_split(&bio);
+ if (!bio)
+ return BLK_QC_T_NONE;
if (mddev->ro == 1 && unlikely(rw == WRITE)) {
if (bio_sectors(bio) != 0)
* pool from the original queue to allocate the bvecs from.
*/
blk_queue_split(&bio);
+ if (!bio)
+ return BLK_QC_T_NONE;
srcu_idx = srcu_read_lock(&head->srcu);
ns = nvme_find_path(head);
unsigned long bytes_done;
blk_queue_split(&bio);
+ if (!bio)
+ return BLK_QC_T_NONE;
bytes_done = 0;
dev_info = bio->bi_bdev->bd_disk->private_data;