goto fail;
}
+ /* Do not leak our private flag into the block layer. */
+ bio->bi_opf &= ~REQ_BTRFS_ONE_ORDERED;
+
if (!bioc) {
/* Single mirror read/write fast path */
bbio->mirror_num = mirror_num;
bbio->end_io(bbio);
}
+/* Bio only refers to one ordered extent. */
+#define REQ_BTRFS_ONE_ORDERED REQ_DRV
+
void btrfs_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
int mirror_num);
int btrfs_repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
blk_status_t ret = BLK_STS_OK;
int skip_sum = inode->flags & BTRFS_INODE_NODATASUM;
const bool use_append = btrfs_use_zone_append(inode, disk_start);
- const enum req_op bio_op = use_append ? REQ_OP_ZONE_APPEND : REQ_OP_WRITE;
+ const enum req_op bio_op = REQ_BTRFS_ONE_ORDERED |
+ (use_append ? REQ_OP_ZONE_APPEND : REQ_OP_WRITE);
ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
IS_ALIGNED(len, fs_info->sectorsize));
ret = errno_to_blk_status(PTR_ERR(bio));
break;
}
+ btrfs_bio(bio)->file_offset = start;
if (blkcg_css)
bio->bi_opf |= REQ_CGROUP_PUNT;
}
if (submit) {
if (!skip_sum) {
- ret = btrfs_csum_one_bio(inode, bio, start, true);
+ ret = btrfs_csum_one_bio(btrfs_bio(bio));
if (ret) {
btrfs_bio_end_io(btrfs_bio(bio), ret);
break;
ret = btree_csum_one_bio(async->bio);
break;
case WQ_SUBMIT_DATA:
- ret = btrfs_csum_one_bio(async->inode, async->bio, (u64)-1, false);
- break;
case WQ_SUBMIT_DATA_DIO:
- ret = btrfs_csum_one_bio(async->inode, async->bio,
- async->dio_file_offset, false);
+ ret = btrfs_csum_one_bio(btrfs_bio(async->bio));
break;
default:
/* Can't happen so return something that would prevent the IO. */
/*
* Calculate checksums of the data contained inside a bio.
- *
- * @inode: Owner of the data inside the bio
- * @bio: Contains the data to be checksummed
- * @offset: If (u64)-1, @bio may contain discontiguous bio vecs, so the
- * file offsets are determined from the page offsets in the bio.
- * Otherwise, this is the starting file offset of the bio vecs in
- * @bio, which must be contiguous.
- * @one_ordered: If true, @bio only refers to one ordered extent.
*/
-blk_status_t btrfs_csum_one_bio(struct btrfs_inode *inode, struct bio *bio,
- u64 offset, bool one_ordered)
+blk_status_t btrfs_csum_one_bio(struct btrfs_bio *bbio)
{
+ struct btrfs_inode *inode = bbio->inode;
struct btrfs_fs_info *fs_info = inode->root->fs_info;
SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
+ struct bio *bio = &bbio->bio;
+ u64 offset = bbio->file_offset;
struct btrfs_ordered_sum *sums;
struct btrfs_ordered_extent *ordered = NULL;
- const bool use_page_offsets = (offset == (u64)-1);
char *data;
struct bvec_iter iter;
struct bio_vec bvec;
shash->tfm = fs_info->csum_shash;
bio_for_each_segment(bvec, bio, iter) {
- if (use_page_offsets)
- offset = page_offset(bvec.bv_page) + bvec.bv_offset;
-
if (!ordered) {
ordered = btrfs_lookup_ordered_extent(inode, offset);
/*
- 1);
for (i = 0; i < blockcount; i++) {
- if (!one_ordered &&
+ if (!(bio->bi_opf & REQ_BTRFS_ONE_ORDERED) &&
!in_range(offset, ordered->file_offset,
ordered->num_bytes)) {
unsigned long bytes_left;
int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_ordered_sum *sums);
-blk_status_t btrfs_csum_one_bio(struct btrfs_inode *inode, struct bio *bio,
- u64 offset, bool one_ordered);
+blk_status_t btrfs_csum_one_bio(struct btrfs_bio *bbio);
+int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
+ struct list_head *list, int search_commit,
+ bool nowait);
int btrfs_lookup_csums_list(struct btrfs_root *root, u64 start, u64 end,
struct list_head *list, int search_commit,
bool nowait);
btrfs_wq_submit_bio(inode, bio, mirror_num, 0, WQ_SUBMIT_DATA))
return;
- ret = btrfs_csum_one_bio(inode, bio, (u64)-1, false);
+ ret = btrfs_csum_one_bio(btrfs_bio(bio));
if (ret) {
btrfs_bio_end_io(btrfs_bio(bio), ret);
return;
* If we aren't doing async submit, calculate the csum of the
* bio now.
*/
- ret = btrfs_csum_one_bio(inode, bio, file_offset, false);
+ ret = btrfs_csum_one_bio(btrfs_bio(bio));
if (ret) {
btrfs_bio_end_io(btrfs_bio(bio), ret);
return;