/*
* Recursion phase: go through all nodes of the graph.
* Take care of checking that all nodes support changing AioContext
- * and drain them, builing a linear list of callbacks to run if everything
+ * and drain them, building a linear list of callbacks to run if everything
* is successful (the transaction itself).
*/
tran = tran_new();
QLIST_ENTRY(BlockCopyCallState) list;
/*
- * Fields that report information about return values and erros.
+ * Fields that report information about return values and errors.
* Protected by lock in BlockCopyState.
*/
bool error_is_read;
* Do copy of cluster-aligned chunk. Requested region is allowed to exceed
* s->len only to cover last cluster when s->len is not aligned to clusters.
*
- * No sync here: nor bitmap neighter intersecting requests handling, only copy.
+ * No sync here: neither bitmap nor intersecting requests handling, only copy.
*
* @method is an in-out argument, so that copy_range can be either extended to
* a full-size buffer or disabled if the copy_range attempt fails. The output
aio_set_fd_handler(vblk_exp->export.ctx, vduse_queue_get_fd(vq),
on_vduse_vq_kick, NULL, NULL, NULL, vq);
- /* Make sure we don't miss any kick afer reconnecting */
+ /* Make sure we don't miss any kick after reconnecting */
eventfd_write(vduse_queue_get_fd(vq), 1);
}
/*
- * Sharing QEMU block devices via vhost-user protocal
+ * Sharing QEMU block devices via vhost-user protocol
*
* Parts of the code based on nbd/server.c.
*
/*
- * Sharing QEMU block devices via vhost-user protocal
+ * Sharing QEMU block devices via vhost-user protocol
*
* Copyright (c) Coiby Xu <coiby.xu@gmail.com>.
* Copyright (c) 2020 Red Hat, Inc.
* As part of reopen prepare we also want to create new fd by
* raw_reconfigure_getfd(). But it wants updated "perm", when in
* bdrv_reopen_multiple() .bdrv_reopen_prepare() callback called prior to
- * permission update. Happily, permission update is always a part (a seprate
- * stage) of bdrv_reopen_multiple() so we can rely on this fact and
- * reconfigure fd in raw_check_perm().
+ * permission update. Happily, permission update is always a part
+ * (a separate stage) of bdrv_reopen_multiple() so we can rely on this
+ * fact and reconfigure fd in raw_check_perm().
*/
s->reopen_state = state;
* of an array of zone descriptors.
* zones is an array of zone descriptors to hold zone information on reply;
* offset can be any byte within the entire size of the device;
- * nr_zones is the maxium number of sectors the command should operate on.
+ * nr_zones is the maximum number of sectors the command should operate on.
*/
#if defined(CONFIG_BLKZONED)
static int coroutine_fn raw_co_zone_report(BlockDriverState *bs, int64_t offset,
QEMU_LOCK_GUARD(&aio_context_list_lock);
- /* rd can temporarly be negative, but the total will *always* be >= 0 */
+ /* rd can temporarily be negative, but the total will *always* be >= 0 */
rd = orphaned_reader_count;
QTAILQ_FOREACH(brdv_graph, &aio_context_list, next_aio) {
rd += qatomic_read(&brdv_graph->reader_count);
* timer callback), it is a bug in the caller that should be fixed. */
assert(data.done);
- /* Reaquire the AioContext of bs if we dropped it */
+ /* Reacquire the AioContext of bs if we dropped it */
if (ctx != co_ctx) {
aio_context_acquire(ctx);
}
/* If we are nested we have to notify the level above that we are done
* by setting event_max to zero, upper level will then jump out of it's
- * own `for` loop. If we are the last all counters droped to zero. */
+ * own `for` loop. If we are the last all counters dropped to zero. */
s->event_max = 0;
s->event_idx = 0;
}
job_pause_point(&s->common.job);
- /* Find the number of consective dirty chunks following the first dirty
+ /* Find the number of consecutive dirty chunks following the first dirty
* one, and wait for in flight requests in them. */
bdrv_dirty_bitmap_lock(s->dirty_bitmap);
while (nb_chunks * s->granularity < s->buf_size) {
* repeat all this until the reftable stops growing.
*
* (This loop will terminate, because with every cluster the
- * reftable grows, it can accomodate a multitude of more refcounts,
+ * reftable grows, it can accommodate a multitude of more refcounts,
* so that at some point this must be able to cover the reftable
* and all refblocks describing it.)
*
goto fail;
}
- /* endian convert populated BAT field entires */
+ /* endian convert populated BAT field entries */
for (i = 0; i < s->bat_entries; i++) {
s->bat[i] = le64_to_cpu(s->bat[i]);
}
uint32_t sequence_high; /* 4 MSB of 8 byte sequence_number */
uint8_t data[4084]; /* raw data, bytes 8-4091 (inclusive).
see the data descriptor field for the
- other mising bytes */
+ other missing bytes */
uint32_t sequence_low; /* 4 LSB of 8 byte sequence_number */
} VHDXLogDataSector;
#define VHDX_META_FLAGS_IS_USER 0x01 /* max 1024 entries */
#define VHDX_META_FLAGS_IS_VIRTUAL_DISK 0x02 /* virtual disk metadata if set,
- otherwise file metdata */
+ otherwise file metadata */
#define VHDX_META_FLAGS_IS_REQUIRED 0x04 /* parse must understand this
entry to open the file */
typedef struct QEMU_PACKED VHDXMetadataTableEntry {