return -EACCES;
}
- ret = bdrv_block_status(child->bs, offset, cur_bytes, pnum, map, file);
+ ret = bdrv_co_block_status(child->bs, offset, cur_bytes, pnum, map, file);
if (child == s->target) {
/*
* We refer to s->target only for areas that we've written to it.
local_flags = flags;
/* In case of failure, try to copy-on-read anyway */
- ret = bdrv_is_allocated(bs->file->bs, offset, bytes, &n);
+ ret = bdrv_co_is_allocated(bs->file->bs, offset, bytes, &n);
if (ret <= 0) {
- ret = bdrv_is_allocated_above(bdrv_backing_chain_next(bs->file->bs),
- state->bottom_bs, true, offset,
- n, &n);
+ ret = bdrv_co_is_allocated_above(bdrv_backing_chain_next(bs->file->bs),
+ state->bottom_bs, true, offset,
+ n, &n);
if (ret > 0 || ret < 0) {
local_flags |= BDRV_REQ_COPY_ON_READ;
}
ret = 1; /* "already allocated", so nothing will be copied */
pnum = MIN(align_bytes, max_transfer);
} else {
- ret = bdrv_is_allocated(bs, align_offset,
- MIN(align_bytes, max_transfer), &pnum);
+ ret = bdrv_co_is_allocated(bs, align_offset,
+ MIN(align_bytes, max_transfer), &pnum);
if (ret < 0) {
/*
* Safe to treat errors in querying allocation as if
/* The flag BDRV_REQ_COPY_ON_READ has reached its addressee */
flags &= ~BDRV_REQ_COPY_ON_READ;
- ret = bdrv_is_allocated(bs, offset, bytes, &pnum);
+ ret = bdrv_co_is_allocated(bs, offset, bytes, &pnum);
if (ret < 0) {
goto out;
}
assert(!(offset % s->granularity));
WITH_GRAPH_RDLOCK_GUARD() {
- ret = bdrv_block_status_above(source, NULL, offset,
- nb_chunks * s->granularity,
- &io_bytes, NULL, NULL);
+ ret = bdrv_co_block_status_above(source, NULL, offset,
+ nb_chunks * s->granularity,
+ &io_bytes, NULL, NULL);
}
if (ret < 0) {
io_bytes = MIN(nb_chunks * s->granularity, max_io_bytes);
}
WITH_GRAPH_RDLOCK_GUARD() {
- ret = bdrv_is_allocated_above(bs, s->base_overlay, true, offset,
- bytes, &count);
+ ret = bdrv_co_is_allocated_above(bs, s->base_overlay, true, offset,
+ bytes, &count);
}
if (ret < 0) {
return ret;
}
-static bool is_zero(BlockDriverState *bs, int64_t offset, int64_t bytes)
+static bool coroutine_fn GRAPH_RDLOCK
+is_zero(BlockDriverState *bs, int64_t offset, int64_t bytes)
{
int64_t nr;
int res;
* backing file. So, we need a loop.
*/
do {
- res = bdrv_block_status_above(bs, NULL, offset, bytes, &nr, NULL, NULL);
+ res = bdrv_co_block_status_above(bs, NULL, offset, bytes, &nr, NULL, NULL);
offset += nr;
bytes -= nr;
} while (res >= 0 && (res & BDRV_BLOCK_ZERO) && nr && bytes);
while (remaining_sectors > 0) {
int64_t count;
- ret = bdrv_is_allocated_above(top->bs, base->bs, false,
- sector_num * BDRV_SECTOR_SIZE,
- remaining_sectors * BDRV_SECTOR_SIZE,
- &count);
+ ret = bdrv_co_is_allocated_above(top->bs, base->bs, false,
+ sector_num * BDRV_SECTOR_SIZE,
+ remaining_sectors * BDRV_SECTOR_SIZE,
+ &count);
if (ret < 0) {
goto out1;
}
copy = false;
WITH_GRAPH_RDLOCK_GUARD() {
- ret = bdrv_is_allocated(unfiltered_bs, offset, STREAM_CHUNK, &n);
+ ret = bdrv_co_is_allocated(unfiltered_bs, offset, STREAM_CHUNK, &n);
if (ret == 1) {
/* Allocated in the top, no need to copy. */
} else if (ret >= 0) {
* Copy if allocated in the intermediate images. Limit to the
* known-unallocated area [offset, offset+n*BDRV_SECTOR_SIZE).
*/
- ret = bdrv_is_allocated_above(bdrv_cow_bs(unfiltered_bs),
- s->base_overlay, true,
- offset, n, &n);
+ ret = bdrv_co_is_allocated_above(bdrv_cow_bs(unfiltered_bs),
+ s->base_overlay, true,
+ offset, n, &n);
/* Finish early if end of backing file has been reached */
if (ret == 0 && n == 0) {
n = len - offset;
if (s->qcow) {
int64_t n;
int ret;
- ret = bdrv_is_allocated(s->qcow->bs, sector_num * BDRV_SECTOR_SIZE,
- (nb_sectors - i) * BDRV_SECTOR_SIZE, &n);
+ ret = bdrv_co_is_allocated(s->qcow->bs, sector_num * BDRV_SECTOR_SIZE,
+ (nb_sectors - i) * BDRV_SECTOR_SIZE, &n);
if (ret < 0) {
return ret;
}
}
for (i = 0; !was_modified && i < s->sectors_per_cluster; i++) {
- was_modified = bdrv_is_allocated(s->qcow->bs,
- (cluster2sector(s, cluster_num) +
- i) * BDRV_SECTOR_SIZE,
- BDRV_SECTOR_SIZE, NULL);
+ was_modified = bdrv_co_is_allocated(s->qcow->bs,
+ (cluster2sector(s, cluster_num) +
+ i) * BDRV_SECTOR_SIZE,
+ BDRV_SECTOR_SIZE, NULL);
}
/*
for (i = 0; i < s->sectors_per_cluster; i++) {
int res;
- res = bdrv_is_allocated(s->qcow->bs,
- (offs + i) * BDRV_SECTOR_SIZE,
- BDRV_SECTOR_SIZE, NULL);
+ res = bdrv_co_is_allocated(s->qcow->bs,
+ (offs + i) * BDRV_SECTOR_SIZE,
+ BDRV_SECTOR_SIZE, NULL);
if (res < 0) {
return -1;
}