return ret;
}
+ /* throttling disk I/O */
+ if (blk->public.throttle_state) {
+ throttle_group_co_io_limits_intercept(blk, bytes, false);
+ }
+
return bdrv_co_preadv(blk_bs(blk), offset, bytes, qiov, flags);
}
return ret;
}
+ /* throttling disk I/O */
+ if (blk->public.throttle_state) {
+ throttle_group_co_io_limits_intercept(blk, bytes, true);
+ }
+
if (!blk->enable_write_cache) {
flags |= BDRV_REQ_FUA;
}
flags |= BDRV_REQ_COPY_ON_READ;
}
- /* throttling disk I/O */
- if (bs->blk && blk_get_public(bs->blk)->throttle_state) {
- throttle_group_co_io_limits_intercept(bs, bytes, false);
- }
-
/* Align read if necessary by padding qiov */
if (offset & (align - 1)) {
head_buf = qemu_blockalign(bs, align);
return ret;
}
- /* throttling disk I/O */
- if (bs->blk && blk_get_public(bs->blk)->throttle_state) {
- throttle_group_co_io_limits_intercept(bs, bytes, true);
- }
-
/*
* Align write if necessary by performing a read-modify-write cycle.
* Pad qiov with the read parts and be sure to have a tracked request not
* if necessary, and schedule the next request using a round robin
* algorithm.
*
- * @bs: the current BlockDriverState
+ * @blk: the current BlockBackend
* @bytes: the number of bytes for this I/O
* @is_write: the type of operation (read/write)
*/
-void coroutine_fn throttle_group_co_io_limits_intercept(BlockDriverState *bs,
+void coroutine_fn throttle_group_co_io_limits_intercept(BlockBackend *blk,
unsigned int bytes,
bool is_write)
{
bool must_wait;
BlockBackend *token;
- BlockBackend *blk = bs->blk;
BlockBackendPublic *blkp = blk_get_public(blk);
ThrottleGroup *tg = container_of(blkp->throttle_state, ThrottleGroup, ts);
qemu_mutex_lock(&tg->lock);
void throttle_group_unregister_blk(BlockBackend *blk);
void throttle_group_restart_blk(BlockBackend *blk);
-void coroutine_fn throttle_group_co_io_limits_intercept(BlockDriverState *bs,
+void coroutine_fn throttle_group_co_io_limits_intercept(BlockBackend *blk,
unsigned int bytes,
bool is_write);