struct readpages_iter *readpages_iter)
{
struct bch_fs *c = trans->c;
- struct bio *bio = &rbio->bio;
int flags = BCH_READ_RETRY_IF_STALE|
BCH_READ_MAY_PROMOTE;
while (1) {
BKEY_PADDED(k) tmp;
struct bkey_s_c k;
- unsigned bytes;
+ unsigned bytes, offset_into_extent;
- bch2_btree_iter_set_pos(iter, POS(inum, bio->bi_iter.bi_sector));
+ bch2_btree_iter_set_pos(iter,
+ POS(inum, rbio->bio.bi_iter.bi_sector));
k = bch2_btree_iter_peek_slot(iter);
BUG_ON(!k.k);
if (IS_ERR(k.k)) {
int ret = btree_iter_err(iter);
BUG_ON(!ret);
- bcache_io_error(c, bio, "btree IO error %i", ret);
- bio_endio(bio);
+ bcache_io_error(c, &rbio->bio, "btree IO error %i", ret);
+ bio_endio(&rbio->bio);
return;
}
bch2_trans_unlock(trans);
k = bkey_i_to_s_c(&tmp.k);
+ offset_into_extent = iter->pos.offset -
+ bkey_start_offset(k.k);
+
if (readpages_iter) {
bool want_full_extent = false;
}
readpage_bio_extend(readpages_iter,
- bio, k.k->p.offset,
+ &rbio->bio, k.k->p.offset,
want_full_extent);
}
- bytes = (min_t(u64, k.k->p.offset, bio_end_sector(bio)) -
- bio->bi_iter.bi_sector) << 9;
- swap(bio->bi_iter.bi_size, bytes);
+ bytes = min_t(unsigned, bio_sectors(&rbio->bio),
+ (k.k->size - offset_into_extent)) << 9;
+ swap(rbio->bio.bi_iter.bi_size, bytes);
- if (bytes == bio->bi_iter.bi_size)
+ if (rbio->bio.bi_iter.bi_size == bytes)
flags |= BCH_READ_LAST_FRAGMENT;
if (bkey_extent_is_allocation(k.k))
- bch2_add_page_sectors(bio, k);
+ bch2_add_page_sectors(&rbio->bio, k);
- bch2_read_extent(c, rbio, k, flags);
+ bch2_read_extent(c, rbio, k, offset_into_extent, flags);
if (flags & BCH_READ_LAST_FRAGMENT)
return;
- swap(bio->bi_iter.bi_size, bytes);
- bio_advance(bio, bytes);
+ swap(rbio->bio.bi_iter.bi_size, bytes);
+ bio_advance(&rbio->bio, bytes);
}
}
goto out;
}
- ret = __bch2_read_extent(c, rbio, bvec_iter, k, failed, flags);
+ ret = __bch2_read_extent(c, rbio, bvec_iter, k, 0, failed, flags);
if (ret == READ_RETRY)
goto retry;
if (ret)
POS(inode, bvec_iter.bi_sector),
BTREE_ITER_SLOTS, k, ret) {
BKEY_PADDED(k) tmp;
- unsigned bytes;
+ unsigned bytes, offset_into_extent;
bkey_reassemble(&tmp.k, k);
k = bkey_i_to_s_c(&tmp.k);
+
bch2_trans_unlock(&trans);
- bytes = min_t(unsigned, bvec_iter.bi_size,
- (k.k->p.offset - bvec_iter.bi_sector) << 9);
+ offset_into_extent = iter->pos.offset -
+ bkey_start_offset(k.k);
+
+ bytes = min_t(unsigned, bvec_iter_sectors(bvec_iter),
+ (k.k->size - offset_into_extent)) << 9;
swap(bvec_iter.bi_size, bytes);
- ret = __bch2_read_extent(c, rbio, bvec_iter, k, failed, flags);
+ ret = __bch2_read_extent(c, rbio, bvec_iter, k,
+ offset_into_extent, failed, flags);
switch (ret) {
case READ_RETRY:
goto retry;
goto nodecode;
/* Adjust crc to point to subset of data we want: */
- crc.offset += rbio->bvec_iter.bi_sector - rbio->pos.offset;
+ crc.offset += rbio->offset_into_extent;
crc.live_size = bvec_iter_sectors(rbio->bvec_iter);
if (crc.compression_type != BCH_COMPRESSION_NONE) {
int __bch2_read_extent(struct bch_fs *c, struct bch_read_bio *orig,
struct bvec_iter iter, struct bkey_s_c k,
+ unsigned offset_into_extent,
struct bch_io_failures *failed, unsigned flags)
{
struct extent_ptr_decoded pick;
if (pick.crc.compressed_size > orig->bio.bi_vcnt * PAGE_SECTORS)
goto hole;
- iter.bi_sector = pos.offset;
iter.bi_size = pick.crc.compressed_size << 9;
goto noclone;
}
if (narrow_crcs && (flags & BCH_READ_USER_MAPPED))
flags |= BCH_READ_MUST_BOUNCE;
- EBUG_ON(bkey_start_offset(k.k) > iter.bi_sector ||
- k.k->p.offset < bvec_iter_end_sector(iter));
+ BUG_ON(offset_into_extent + bvec_iter_sectors(iter) > k.k->size);
if (pick.crc.compression_type != BCH_COMPRESSION_NONE ||
(pick.crc.csum_type != BCH_CSUM_NONE &&
(bvec_iter_sectors(iter) != pick.crc.uncompressed_size ||
bvec_iter_sectors(iter) != pick.crc.live_size ||
pick.crc.offset ||
- iter.bi_sector != pos.offset));
+ offset_into_extent));
+ pos.offset += offset_into_extent;
pick.ptr.offset += pick.crc.offset +
- (iter.bi_sector - pos.offset);
+ offset_into_extent;
pick.crc.compressed_size = bvec_iter_sectors(iter);
pick.crc.uncompressed_size = bvec_iter_sectors(iter);
pick.crc.offset = 0;
pick.crc.live_size = bvec_iter_sectors(iter);
- pos.offset = iter.bi_sector;
+ offset_into_extent = 0;
}
if (rbio) {
else
rbio->end_io = orig->bio.bi_end_io;
rbio->bvec_iter = iter;
+ rbio->offset_into_extent= offset_into_extent;
rbio->flags = flags;
rbio->have_ioref = pick_ret > 0 && bch2_dev_get_ioref(ca, READ);
rbio->narrow_crcs = narrow_crcs;
POS(inode, rbio->bio.bi_iter.bi_sector),
BTREE_ITER_SLOTS, k, ret) {
BKEY_PADDED(k) tmp;
- unsigned bytes;
+ unsigned bytes, offset_into_extent;
/*
* Unlock the iterator while the btree node's lock is still in
k = bkey_i_to_s_c(&tmp.k);
bch2_trans_unlock(&trans);
- bytes = min_t(unsigned, rbio->bio.bi_iter.bi_size,
- (k.k->p.offset - rbio->bio.bi_iter.bi_sector) << 9);
+ offset_into_extent = iter->pos.offset -
+ bkey_start_offset(k.k);
+
+ bytes = min_t(unsigned, bio_sectors(&rbio->bio),
+ (k.k->size - offset_into_extent)) << 9;
swap(rbio->bio.bi_iter.bi_size, bytes);
if (rbio->bio.bi_iter.bi_size == bytes)
flags |= BCH_READ_LAST_FRAGMENT;
- bch2_read_extent(c, rbio, k, flags);
+ bch2_read_extent(c, rbio, k, offset_into_extent, flags);
if (flags & BCH_READ_LAST_FRAGMENT)
return;
struct cache_promote_op;
struct extent_ptr_decoded;
-int __bch2_read_extent(struct bch_fs *, struct bch_read_bio *, struct bvec_iter,
- struct bkey_s_c, struct bch_io_failures *, unsigned);
-void bch2_read(struct bch_fs *, struct bch_read_bio *, u64);
-
enum bch_read_flags {
BCH_READ_RETRY_IF_STALE = 1 << 0,
BCH_READ_MAY_PROMOTE = 1 << 1,
BCH_READ_IN_RETRY = 1 << 7,
};
+int __bch2_read_extent(struct bch_fs *, struct bch_read_bio *,
+ struct bvec_iter, struct bkey_s_c, unsigned,
+ struct bch_io_failures *, unsigned);
+
static inline void bch2_read_extent(struct bch_fs *c,
struct bch_read_bio *rbio,
struct bkey_s_c k,
+ unsigned offset_into_extent,
unsigned flags)
{
- __bch2_read_extent(c, rbio, rbio->bio.bi_iter, k, NULL, flags);
+ __bch2_read_extent(c, rbio, rbio->bio.bi_iter, k,
+ offset_into_extent, NULL, flags);
}
+void bch2_read(struct bch_fs *, struct bch_read_bio *, u64);
+
static inline struct bch_read_bio *rbio_init(struct bio *bio,
struct bch_io_opts opts)
{