bcachefs: Kill direct access to bi_io_vec
authorKent Overstreet <kent.overstreet@gmail.com>
Wed, 3 Jul 2019 23:27:42 +0000 (19:27 -0400)
committerKent Overstreet <kent.overstreet@linux.dev>
Sun, 22 Oct 2023 21:08:23 +0000 (17:08 -0400)
Switch to always using bio_add_page(), which merges contiguous pages now
that we have multipage bvecs.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
fs/bcachefs/btree_io.c
fs/bcachefs/compress.c
fs/bcachefs/debug.c
fs/bcachefs/ec.c
fs/bcachefs/fs-io.c
fs/bcachefs/io.c
fs/bcachefs/io.h
fs/bcachefs/journal_io.c
fs/bcachefs/super-io.c
fs/bcachefs/util.c
fs/bcachefs/util.h

index d4806809fc0da99a2a6b14dcc2a794fbc3791b01..c1d3e685a5f266058d8dbaa46a915b92ff8ab96c 100644 (file)
@@ -1037,10 +1037,9 @@ void bch2_btree_node_read(struct bch_fs *c, struct btree *b,
        rb->pick                = pick;
        INIT_WORK(&rb->work, btree_node_read_work);
        bio->bi_iter.bi_sector  = pick.ptr.offset;
-       bio->bi_iter.bi_size    = btree_bytes(c);
        bio->bi_end_io          = btree_node_read_endio;
        bio->bi_private         = b;
-       bch2_bio_map(bio, b->data);
+       bch2_bio_map(bio, b->data, btree_bytes(c));
 
        set_btree_node_read_in_flight(b);
 
@@ -1502,11 +1501,10 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b,
        wbio->data                      = data;
        wbio->wbio.order                = order;
        wbio->wbio.used_mempool         = used_mempool;
-       wbio->wbio.bio.bi_iter.bi_size  = sectors_to_write << 9;
        wbio->wbio.bio.bi_end_io        = btree_node_write_endio;
        wbio->wbio.bio.bi_private       = b;
 
-       bch2_bio_map(&wbio->wbio.bio, data);
+       bch2_bio_map(&wbio->wbio.bio, data, sectors_to_write << 9);
 
        /*
         * If we're appending to a leaf node, we don't technically need FUA -
index 6b5b61f10fcbb1d2f2bb05e06b166a5819051395..3e91fa53985a401cef06ee7e6cd67ef32df6aec6 100644 (file)
@@ -244,7 +244,16 @@ int bch2_bio_uncompress_inplace(struct bch_fs *c, struct bio *bio,
         * might have to free existing pages and retry allocation from mempool -
         * do this _after_ decompressing:
         */
-       bch2_bio_alloc_more_pages_pool(c, bio, crc->live_size << 9);
+       if (bio->bi_iter.bi_size < crc->live_size << 9) {
+               if (bch2_bio_alloc_pages(bio, (crc->live_size << 9) -
+                                        bio->bi_iter.bi_size,
+                                        GFP_NOFS)) {
+                       bch2_bio_free_pages_pool(c, bio);
+                       bio->bi_iter.bi_size = 0;
+                       bio->bi_vcnt = 0;
+                       bch2_bio_alloc_pages_pool(c, bio, crc->live_size << 9);
+               }
+       }
 
        memcpy_to_bio(bio, bio->bi_iter, data.b + (crc->offset << 9));
 
index 4c6fcb6f918e3368f0f1bc32a6a3efd808580b61..7adc5ae20b9f2a4b61a4ff2132dfb72b5c6668a9 100644 (file)
@@ -70,8 +70,7 @@ void __bch2_btree_verify(struct bch_fs *c, struct btree *b)
                               GFP_NOIO,
                               &c->btree_bio);
        bio->bi_iter.bi_sector  = pick.ptr.offset;
-       bio->bi_iter.bi_size    = btree_bytes(c);
-       bch2_bio_map(bio, n_sorted);
+       bch2_bio_map(bio, n_sorted, btree_bytes(c));
 
        submit_bio_wait(bio);
 
index 01e85fae72d33aafa714da7b917089521862d23a..40acd1ec46452520f8588254a604142bb5c08130 100644 (file)
@@ -399,11 +399,10 @@ static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf,
                ec_bio->idx                     = idx;
 
                ec_bio->bio.bi_iter.bi_sector   = ptr->offset + buf->offset + (offset >> 9);
-               ec_bio->bio.bi_iter.bi_size     = b;
                ec_bio->bio.bi_end_io           = ec_block_endio;
                ec_bio->bio.bi_private          = cl;
 
-               bch2_bio_map(&ec_bio->bio, buf->data[idx] + offset);
+               bch2_bio_map(&ec_bio->bio, buf->data[idx] + offset, b);
 
                closure_get(cl);
                percpu_ref_get(&ca->io_ref);
index 9d0cca0bdfa3b8c65743546efb53f9d65b47554e..54b071b9ca2c520cc4cfe5d6e9c549bb54731536 100644 (file)
@@ -775,7 +775,7 @@ static int bio_add_page_contig(struct bio *bio, struct page *page)
        else if (!bio_can_add_page_contig(bio, page))
                return -1;
 
-       __bio_add_page(bio, page, PAGE_SIZE, 0);
+       BUG_ON(!bio_add_page(bio, page, PAGE_SIZE, 0));
        return 0;
 }
 
@@ -913,7 +913,7 @@ static void readpage_bio_extend(struct readpages_iter *iter,
                        put_page(page);
                }
 
-               __bio_add_page(bio, page, PAGE_SIZE, 0);
+               BUG_ON(!bio_add_page(bio, page, PAGE_SIZE, 0));
        }
 }
 
@@ -1025,7 +1025,7 @@ void bch2_readahead(struct readahead_control *ractl)
 
                rbio->bio.bi_iter.bi_sector = (sector_t) index << PAGE_SECTOR_SHIFT;
                rbio->bio.bi_end_io = bch2_readpages_end_io;
-               __bio_add_page(&rbio->bio, page, PAGE_SIZE, 0);
+               BUG_ON(!bio_add_page(&rbio->bio, page, PAGE_SIZE, 0));
 
                bchfs_read(&trans, iter, rbio, inode->v.i_ino,
                           &readpages_iter);
index 8a090b0d9b03e7731baa1ec340b5dc1e004084a1..8c43791bfbb13bce3cf389ca5322df3dc5039482 100644 (file)
@@ -141,14 +141,13 @@ void bch2_bio_free_pages_pool(struct bch_fs *c, struct bio *bio)
        bio->bi_vcnt = 0;
 }
 
-static void bch2_bio_alloc_page_pool(struct bch_fs *c, struct bio *bio,
-                                   bool *using_mempool)
+static struct page *__bio_alloc_page_pool(struct bch_fs *c, bool *using_mempool)
 {
-       struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt++];
+       struct page *page;
 
        if (likely(!*using_mempool)) {
-               bv->bv_page = alloc_page(GFP_NOIO);
-               if (unlikely(!bv->bv_page)) {
+               page = alloc_page(GFP_NOIO);
+               if (unlikely(!page)) {
                        mutex_lock(&c->bio_bounce_pages_lock);
                        *using_mempool = true;
                        goto pool_alloc;
@@ -156,57 +155,29 @@ static void bch2_bio_alloc_page_pool(struct bch_fs *c, struct bio *bio,
                }
        } else {
 pool_alloc:
-               bv->bv_page = mempool_alloc(&c->bio_bounce_pages, GFP_NOIO);
+               page = mempool_alloc(&c->bio_bounce_pages, GFP_NOIO);
        }
 
-       bv->bv_len = PAGE_SIZE;
-       bv->bv_offset = 0;
+       return page;
 }
 
 void bch2_bio_alloc_pages_pool(struct bch_fs *c, struct bio *bio,
-                              size_t bytes)
+                              size_t size)
 {
        bool using_mempool = false;
 
-       BUG_ON(DIV_ROUND_UP(bytes, PAGE_SIZE) > bio->bi_max_vecs);
+       while (size) {
+               struct page *page = __bio_alloc_page_pool(c, &using_mempool);
+               unsigned len = min(PAGE_SIZE, size);
 
-       bio->bi_iter.bi_size = bytes;
-
-       while (bio->bi_vcnt < DIV_ROUND_UP(bytes, PAGE_SIZE))
-               bch2_bio_alloc_page_pool(c, bio, &using_mempool);
+               BUG_ON(!bio_add_page(bio, page, len, 0));
+               size -= len;
+       }
 
        if (using_mempool)
                mutex_unlock(&c->bio_bounce_pages_lock);
 }
 
-void bch2_bio_alloc_more_pages_pool(struct bch_fs *c, struct bio *bio,
-                                   size_t bytes)
-{
-       while (bio->bi_vcnt < DIV_ROUND_UP(bytes, PAGE_SIZE)) {
-               struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt];
-
-               BUG_ON(bio->bi_vcnt >= bio->bi_max_vecs);
-
-               bv->bv_page = alloc_page(GFP_NOIO);
-               if (!bv->bv_page) {
-                       /*
-                        * We already allocated from mempool, we can't allocate from it again
-                        * without freeing the pages we already allocated or else we could
-                        * deadlock:
-                        */
-                       bch2_bio_free_pages_pool(c, bio);
-                       bch2_bio_alloc_pages_pool(c, bio, bytes);
-                       return;
-               }
-
-               bv->bv_len = PAGE_SIZE;
-               bv->bv_offset = 0;
-               bio->bi_vcnt++;
-       }
-
-       bio->bi_iter.bi_size = bytes;
-}
-
 /* Writes */
 
 void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
@@ -491,8 +462,7 @@ static struct bio *bch2_write_bio_alloc(struct bch_fs *c,
        wbio->bio.bi_opf        = src->bi_opf;
 
        if (buf) {
-               bio->bi_iter.bi_size = output_available;
-               bch2_bio_map(bio, buf);
+               bch2_bio_map(bio, buf, output_available);
                return bio;
        }
 
@@ -502,31 +472,17 @@ static struct bio *bch2_write_bio_alloc(struct bch_fs *c,
         * We can't use mempool for more than c->sb.encoded_extent_max
         * worth of pages, but we'd like to allocate more if we can:
         */
-       while (bio->bi_iter.bi_size < output_available) {
-               unsigned len = min_t(unsigned, PAGE_SIZE,
-                                    output_available - bio->bi_iter.bi_size);
-               struct page *p;
-
-               p = alloc_page(GFP_NOIO);
-               if (!p) {
-                       unsigned pool_max =
-                               min_t(unsigned, output_available,
-                                     c->sb.encoded_extent_max << 9);
-
-                       if (bio_sectors(bio) < pool_max)
-                               bch2_bio_alloc_pages_pool(c, bio, pool_max);
-                       break;
-               }
+       bch2_bio_alloc_pages_pool(c, bio,
+                                 min_t(unsigned, output_available,
+                                       c->sb.encoded_extent_max << 9));
 
-               bio->bi_io_vec[bio->bi_vcnt++] = (struct bio_vec) {
-                       .bv_page        = p,
-                       .bv_len         = len,
-                       .bv_offset      = 0,
-               };
-               bio->bi_iter.bi_size += len;
-       }
+       if (bio->bi_iter.bi_size < output_available)
+               *page_alloc_failed =
+                       bch2_bio_alloc_pages(bio,
+                                            output_available -
+                                            bio->bi_iter.bi_size,
+                                            GFP_NOFS) != 0;
 
-       *page_alloc_failed = bio->bi_vcnt < pages;
        return bio;
 }
 
@@ -830,12 +786,6 @@ static int bch2_write_extent(struct bch_write_op *op, struct write_point *wp)
        }
 
        dst->bi_iter.bi_size = total_output;
-
-       /* Free unneeded pages after compressing: */
-       if (to_wbio(dst)->bounce)
-               while (dst->bi_vcnt > DIV_ROUND_UP(dst->bi_iter.bi_size, PAGE_SIZE))
-                       mempool_free(dst->bi_io_vec[--dst->bi_vcnt].bv_page,
-                                    &c->bio_bounce_pages);
 do_write:
        /* might have done a realloc... */
 
index 84070b674187e0154a8abe7f0bd59bc729e36adc..61c8b8b3a459527f9612d6312829d450360daf0d 100644 (file)
@@ -13,7 +13,6 @@
 
 void bch2_bio_free_pages_pool(struct bch_fs *, struct bio *);
 void bch2_bio_alloc_pages_pool(struct bch_fs *, struct bio *, size_t);
-void bch2_bio_alloc_more_pages_pool(struct bch_fs *, struct bio *, size_t);
 
 #ifndef CONFIG_BCACHEFS_NO_LATENCY_ACCT
 void bch2_latency_acct(struct bch_dev *, u64, int);
index 4e0c63f0076fc1079f7c834c6430080a8e5674f8..2531379e67c69b466e7b9e0228098a89ca592e51 100644 (file)
@@ -494,9 +494,8 @@ reread:
                        bio = bio_kmalloc(nr_bvecs, GFP_KERNEL);
                        bio_init(bio, ca->disk_sb.bdev, bio->bi_inline_vecs, nr_bvecs, REQ_OP_READ);
 
-                       bio->bi_iter.bi_sector  = offset;
-                       bio->bi_iter.bi_size    = sectors_read << 9;
-                       bch2_bio_map(bio, buf->data);
+                       bio->bi_iter.bi_sector = offset;
+                       bch2_bio_map(bio, buf->data, sectors_read << 9);
 
                        ret = submit_bio_wait(bio);
                        kfree(bio);
@@ -1086,10 +1085,9 @@ void bch2_journal_write(struct closure *cl)
                bio_reset(bio, ca->disk_sb.bdev,
                          REQ_OP_WRITE|REQ_SYNC|REQ_META|REQ_PREFLUSH|REQ_FUA);
                bio->bi_iter.bi_sector  = ptr->offset;
-               bio->bi_iter.bi_size    = sectors << 9;
                bio->bi_end_io          = journal_write_endio;
                bio->bi_private         = ca;
-               bch2_bio_map(bio, jset);
+               bch2_bio_map(bio, jset, sectors << 9);
 
                trace_journal_write(bio);
                closure_bio_submit(bio, cl);
index b991238c5bd23fa5c94fe3589f4177218b7c97e0..af6fb90413e95cc7f2e27fb2b16ab9e911e69f94 100644 (file)
@@ -476,8 +476,7 @@ static const char *read_one_super(struct bch_sb_handle *sb, u64 offset)
 reread:
        bio_reset(sb->bio, sb->bdev, REQ_OP_READ|REQ_SYNC|REQ_META);
        sb->bio->bi_iter.bi_sector = offset;
-       sb->bio->bi_iter.bi_size = PAGE_SIZE << sb->page_order;
-       bch2_bio_map(sb->bio, sb->sb);
+       bch2_bio_map(sb->bio, sb->sb, PAGE_SIZE << sb->page_order);
 
        if (submit_bio_wait(sb->bio))
                return "IO error";
@@ -582,12 +581,11 @@ int bch2_read_super(const char *path, struct bch_opts *opts,
         */
        bio_reset(sb->bio, sb->bdev, REQ_OP_READ|REQ_SYNC|REQ_META);
        sb->bio->bi_iter.bi_sector = BCH_SB_LAYOUT_SECTOR;
-       sb->bio->bi_iter.bi_size = sizeof(struct bch_sb_layout);
        /*
         * use sb buffer to read layout, since sb buffer is page aligned but
         * layout won't be:
         */
-       bch2_bio_map(sb->bio, sb->sb);
+       bch2_bio_map(sb->bio, sb->sb, sizeof(struct bch_sb_layout));
 
        err = "IO error";
        if (submit_bio_wait(sb->bio))
@@ -653,10 +651,9 @@ static void read_back_super(struct bch_fs *c, struct bch_dev *ca)
 
        bio_reset(bio, ca->disk_sb.bdev, REQ_OP_READ|REQ_SYNC|REQ_META);
        bio->bi_iter.bi_sector  = le64_to_cpu(sb->layout.sb_offset[0]);
-       bio->bi_iter.bi_size    = PAGE_SIZE;
        bio->bi_end_io          = write_super_endio;
        bio->bi_private         = ca;
-       bch2_bio_map(bio, ca->sb_read_scratch);
+       bch2_bio_map(bio, ca->sb_read_scratch, PAGE_SIZE);
 
        this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_SB],
                     bio_sectors(bio));
@@ -678,12 +675,11 @@ static void write_one_super(struct bch_fs *c, struct bch_dev *ca, unsigned idx)
 
        bio_reset(bio, ca->disk_sb.bdev, REQ_OP_WRITE|REQ_SYNC|REQ_META);
        bio->bi_iter.bi_sector  = le64_to_cpu(sb->offset);
-       bio->bi_iter.bi_size    =
-               roundup((size_t) vstruct_bytes(sb),
-                       bdev_logical_block_size(ca->disk_sb.bdev));
        bio->bi_end_io          = write_super_endio;
        bio->bi_private         = ca;
-       bch2_bio_map(bio, sb);
+       bch2_bio_map(bio, sb,
+                    roundup((size_t) vstruct_bytes(sb),
+                            bdev_logical_block_size(ca->disk_sb.bdev)));
 
        this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_SB],
                     bio_sectors(bio));
index 0ca1fb59f54df5ef29d6055f8237a12a7013a4cf..fc2ca798fbc3fd1ff51e8338bcdbd10df45941cf 100644 (file)
@@ -506,33 +506,18 @@ size_t bch2_pd_controller_print_debug(struct bch_pd_controller *pd, char *buf)
 
 /* misc: */
 
-void bch2_bio_map(struct bio *bio, void *base)
+void bch2_bio_map(struct bio *bio, void *base, size_t size)
 {
-       size_t size = bio->bi_iter.bi_size;
-       struct bio_vec *bv = bio->bi_io_vec;
-
-       BUG_ON(!bio->bi_iter.bi_size);
-       BUG_ON(bio->bi_vcnt);
-       BUG_ON(!bio->bi_max_vecs);
-
-       bv->bv_offset = base ? offset_in_page(base) : 0;
-       goto start;
-
-       for (; size; bio->bi_vcnt++, bv++) {
-               BUG_ON(bio->bi_vcnt >= bio->bi_max_vecs);
-
-               bv->bv_offset   = 0;
-start:         bv->bv_len      = min_t(size_t, PAGE_SIZE - bv->bv_offset,
-                                       size);
-               if (base) {
-                       bv->bv_page = is_vmalloc_addr(base)
+       while (size) {
+               struct page *page = is_vmalloc_addr(base)
                                ? vmalloc_to_page(base)
                                : virt_to_page(base);
+               unsigned offset = offset_in_page(base);
+               unsigned len = min_t(size_t, PAGE_SIZE - offset, size);
 
-                       base += bv->bv_len;
-               }
-
-               size -= bv->bv_len;
+               BUG_ON(!bio_add_page(bio, page, len, offset));
+               size -= len;
+               base += len;
        }
 }
 
index c0910f230caf4adda651431842f990e33b1c3aec..baa236b4247c1cf38ece4c89653597596d2373cd 100644 (file)
@@ -503,7 +503,7 @@ static inline unsigned fract_exp_two(unsigned x, unsigned fract_bits)
        return x;
 }
 
-void bch2_bio_map(struct bio *bio, void *base);
+void bch2_bio_map(struct bio *bio, void *base, size_t);
 int bch2_bio_alloc_pages(struct bio *, size_t, gfp_t);
 
 static inline sector_t bdev_sectors(struct block_device *bdev)