raid5-cache: statically allocate the recovery ra bio
authorChristoph Hellwig <hch@lst.de>
Mon, 28 Feb 2022 11:25:02 +0000 (13:25 +0200)
committerSong Liu <song@kernel.org>
Wed, 9 Mar 2022 06:55:09 +0000 (22:55 -0800)
There is no need to preallocate the bio and reset it when use.  Just
allocate it on-stack and use a bvec places next to the pages used for
it.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Song Liu <song@kernel.org>
drivers/md/raid5-cache.c

index 8907d18c225a932bfb052f67a081cf4557e029fb..a7d50ff9020a82140d28af088703d120cbf84e7a 100644 (file)
@@ -1623,10 +1623,10 @@ struct r5l_recovery_ctx {
         * just copy data from the pool.
         */
        struct page *ra_pool[R5L_RECOVERY_PAGE_POOL_SIZE];
+       struct bio_vec ra_bvec[R5L_RECOVERY_PAGE_POOL_SIZE];
        sector_t pool_offset;   /* offset of first page in the pool */
        int total_pages;        /* total allocated pages */
        int valid_pages;        /* pages with valid data */
-       struct bio *ra_bio;     /* bio to do the read ahead */
 };
 
 static int r5l_recovery_allocate_ra_pool(struct r5l_log *log,
@@ -1634,11 +1634,6 @@ static int r5l_recovery_allocate_ra_pool(struct r5l_log *log,
 {
        struct page *page;
 
-       ctx->ra_bio = bio_alloc_bioset(NULL, BIO_MAX_VECS, 0, GFP_KERNEL,
-                                      &log->bs);
-       if (!ctx->ra_bio)
-               return -ENOMEM;
-
        ctx->valid_pages = 0;
        ctx->total_pages = 0;
        while (ctx->total_pages < R5L_RECOVERY_PAGE_POOL_SIZE) {
@@ -1650,10 +1645,8 @@ static int r5l_recovery_allocate_ra_pool(struct r5l_log *log,
                ctx->total_pages += 1;
        }
 
-       if (ctx->total_pages == 0) {
-               bio_put(ctx->ra_bio);
+       if (ctx->total_pages == 0)
                return -ENOMEM;
-       }
 
        ctx->pool_offset = 0;
        return 0;
@@ -1666,7 +1659,6 @@ static void r5l_recovery_free_ra_pool(struct r5l_log *log,
 
        for (i = 0; i < ctx->total_pages; ++i)
                put_page(ctx->ra_pool[i]);
-       bio_put(ctx->ra_bio);
 }
 
 /*
@@ -1679,15 +1671,19 @@ static int r5l_recovery_fetch_ra_pool(struct r5l_log *log,
                                      struct r5l_recovery_ctx *ctx,
                                      sector_t offset)
 {
-       bio_reset(ctx->ra_bio, log->rdev->bdev, REQ_OP_READ);
-       ctx->ra_bio->bi_iter.bi_sector = log->rdev->data_offset + offset;
+       struct bio bio;
+       int ret;
+
+       bio_init(&bio, log->rdev->bdev, ctx->ra_bvec,
+                R5L_RECOVERY_PAGE_POOL_SIZE, REQ_OP_READ);
+       bio.bi_iter.bi_sector = log->rdev->data_offset + offset;
 
        ctx->valid_pages = 0;
        ctx->pool_offset = offset;
 
        while (ctx->valid_pages < ctx->total_pages) {
-               bio_add_page(ctx->ra_bio,
-                            ctx->ra_pool[ctx->valid_pages], PAGE_SIZE, 0);
+               __bio_add_page(&bio, ctx->ra_pool[ctx->valid_pages], PAGE_SIZE,
+                              0);
                ctx->valid_pages += 1;
 
                offset = r5l_ring_add(log, offset, BLOCK_SECTORS);
@@ -1696,7 +1692,9 @@ static int r5l_recovery_fetch_ra_pool(struct r5l_log *log,
                        break;
        }
 
-       return submit_bio_wait(ctx->ra_bio);
+       ret = submit_bio_wait(&bio);
+       bio_uninit(&bio);
+       return ret;
 }
 
 /*