* August 21, 1998
* Copyright 1998 Fabrice Bellard.
*
- * [Rewrote completly the code of Lance Norskog And Sundry
+ * [Rewrote completely the code of Lance Norskog And Sundry
* Contributors with a more efficient algorithm.]
*
* This source code is freely redistributable and may be used for
.name = "EXCLUSIVE",
.tag = AUD_OPT_BOOL,
.valp = &glob_conf.exclusive,
- .descr = "Open device in exclusive mode (vmix wont work)"
+ .descr = "Open device in exclusive mode (vmix won't work)"
},
#ifdef USE_DSP_POLICY
{
* unix socket. For each client, the server will create some eventfd
* (see EVENTFD(2)), one per vector. These fd are transmitted to all
* clients using the SCM_RIGHTS cmsg message. Therefore, each client is
- * able to send a notification to another client without beeing
+ * able to send a notification to another client without being
* "profixied" by the server.
*
* We use this mechanism to send interruptions between guests.
----------
Device registers are hard-coded to little-endian (LE). The driver should
-convert to/from host endianess to LE for device register accesses.
+convert to/from host endianness to LE for device register accesses.
Descriptors are LE. Descriptor buffer TLVs will have LE type and length
fields, but the value field can either be LE or network-byte-order, depending
------------
QEMU includes a throttling module that can be used to set limits to
I/O operations. The code itself is generic and independent of the I/O
-units, but it is currenly used to limit the number of bytes per second
+units, but it is currently used to limit the number of bytes per second
and operations per second (IOPS) when performing disk I/O.
This document explains how to use the throttling code in QEMU, and how
if (s->address == ADDR_RESET) {
if (i2c_start_transfer(s->bus, extract32(s->i2dr_write, 1, 7),
extract32(s->i2dr_write, 0, 1))) {
- /* if non zero is returned, the adress is not valid */
+ /* if non zero is returned, the address is not valid */
s->i2sr |= I2SR_RXAK;
} else {
s->address = s->i2dr_write;
#define VMXNET3_MSIX_BAR_SIZE 0x2000
#define MIN_BUF_SIZE 60
-/* Compatability flags for migration */
+/* Compatibility flags for migration */
#define VMXNET3_COMPAT_FLAG_OLD_MSI_OFFSETS_BIT 0
#define VMXNET3_COMPAT_FLAG_OLD_MSI_OFFSETS \
(1 << VMXNET3_COMPAT_FLAG_OLD_MSI_OFFSETS_BIT)
uint32_t mcast_list_len;
uint32_t mcast_list_buff_size; /* needed for live migration. */
- /* Compatability flags for migration */
+ /* Compatibility flags for migration */
uint32_t compat_flags;
} VMXNET3State;
*
* Setting this flag to false will remove MSI/MSI-X capability from all devices.
*
- * It is preferrable for controllers to set this to true (non-broken) even if
+ * It is preferable for controllers to set this to true (non-broken) even if
* they do not actually support MSI/MSI-X: guests normally probe the controller
* type and do not attempt to enable MSI/MSI-X with interrupt controllers not
* supporting such, so removing the capability is not required, and
return base;
}
-/* accessor funciton to get bridge filtering limit */
+/* accessor function to get bridge filtering limit */
pcibus_t pci_bridge_get_limit(const PCIDevice *bridge, uint8_t type)
{
pcibus_t limit;
uint8_t resp_data[36];
int rc, len, alen;
- /* We dont do EVPD. Also check that page_code is 0 */
+ /* We don't do EVPD. Also check that page_code is 0 */
if ((cdb[1] & 0x01) || cdb[2] != 0) {
/* Send INVALID FIELD IN CDB */
vscsi_makeup_sense(s, req, ILLEGAL_REQUEST, 0x24, 0);
#define PVSCSI_DEVICE_GET_CLASS(obj) \
OBJECT_GET_CLASS(PVSCSIClass, (obj), TYPE_PVSCSI)
-/* Compatability flags for migration */
+/* Compatibility flags for migration */
#define PVSCSI_COMPAT_OLD_PCI_CONFIGURATION_BIT 0
#define PVSCSI_COMPAT_OLD_PCI_CONFIGURATION \
(1 << PVSCSI_COMPAT_OLD_PCI_CONFIGURATION_BIT)
case R_COUNTER_LO:
/*
* Keep it simple - ARM docco explicitly says to disable timer before
- * modding it, so dont bother trying to do all the difficult on the fly
+ * modding it, so don't bother trying to do all the difficult on the fly
* timer modifications - (if they even work in real hardware??).
*/
if (s->control & R_CONTROL_TIMER_ENABLE) {
}
/* Control register operations are broken out into helpers that can be
- * explictly called on aspeed_timer_reset(), but also from
+ * explicitly called on aspeed_timer_reset(), but also from
* aspeed_timer_ctrl_op().
*/
for (i = 0; i < ASPEED_TIMER_NR_TIMERS; i++) {
AspeedTimer *t = &s->timers[i];
- /* Explictly call helpers to avoid any conditional behaviour through
+ /* Explicitly call helpers to avoid any conditional behaviour through
* aspeed_timer_set_ctrl().
*/
aspeed_timer_ctrl_enable(t, false);
* Fill @buf with @buflen bytes of cryptographically strong
* random data
*
- * Returns 0 on sucess, -1 on error
+ * Returns 0 on success, -1 on error
*/
int qcrypto_random_bytes(uint8_t *buf,
size_t buflen,
* We don't support Xen prior to 4.2.0.
*/
-/* Xen 4.2 thru 4.6 */
+/* Xen 4.2 through 4.6 */
#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 471
typedef xc_interface xenforeignmemory_handle;
* qio_task_complete:
* @task: the task struct
*
- * Mark the operation as succesfully completed
+ * Mark the operation as successfully completed
* and free the memory for @task.
*/
void qio_task_complete(QIOTask *task);
void qemu_set_cloexec(int fd);
/* QEMU "hardware version" setting. Used to replace code that exposed
- * QEMU_VERSION to guests in the past and need to keep compatibilty.
+ * QEMU_VERSION to guests in the past and need to keep compatibility.
* Do not use qemu_hw_version() in new code.
*/
void qemu_set_hw_version(const char *);
* userspace memory corruption (which is not detectable by valgrind
* too, in most cases).
* So for now, let's align to 64 instead of HOST_LONG_BITS here, in
- * a hope that sizeof(long) wont become >8 any time soon.
+ * a hope that sizeof(long) won't become >8 any time soon.
*/
size = ALIGN(((mem->memory_size) >> TARGET_PAGE_BITS),
/*HOST_LONG_BITS*/ 64) / 8;
void migrate_send_rp_req_pages(MigrationIncomingState *mis, const char *rbname,
ram_addr_t start, size_t len)
{
- uint8_t bufc[12 + 1 + 255]; /* start (8), len (4), rbname upto 256 */
+ uint8_t bufc[12 + 1 + 255]; /* start (8), len (4), rbname up to 256 */
size_t msglen = 12; /* start + len */
*(uint64_t *)bufc = cpu_to_be64((uint64_t)start);
}
/**
- * ram_save_host_page: Starting at *offset send pages upto the end
+ * ram_save_host_page: Starting at *offset send pages up to the end
* of the current host page. It's valid for the initial
* offset to point into the middle of a host page
* in which case the remainder of the hostpage is sent.
}
length = be32_to_cpu(length);
if (length != 0) {
- error_setg(errp, "Start TLS reponse was not zero %x",
+ error_setg(errp, "Start TLS response was not zero %x",
length);
return NULL;
}
}
out:
- /* dont block forever, iterate the main loop every once and a while */
+ /* don't block forever, iterate the main loop every once in a while */
*timeout_ms = 500;
/* if there's data in the read buffer, or another event is pending,
* skip polling and issue user cb.
*/
#ifdef G_OS_WIN32
/* Additionally WIN32 does not provide any additional information
- * on whetherthe child exited or terminated via signal.
- * We use this simple range check to distingish application exit code
+ * on whether the child exited or terminated via signal.
+ * We use this simple range check to distinguish application exit code
* (usually value less then 256) and unhandled exception code with
* ntstatus (always value greater then 0xC0000005). */
if ((uint32_t)gei->status < 0xC0000000U) {
for ($off = 1; $off < length($line); $off++) {
$c = substr($line, $off, 1);
- # Comments we are wacking completly including the begin
+ # Comments we are wacking completely including the begin
# and end, all to $;.
if ($sanitise_quote eq '' && substr($line, $off, 2) eq '/*') {
$sanitise_quote = '*/';
* We don't test for <= 0 this time, because there legitimately
* might not be any more data (since the socket is non-blocking),
* a close will be detected on next iteration.
- * A return of -1 wont (shouldn't) happen, since it didn't happen above
+ * A return of -1 won't (shouldn't) happen, since it didn't happen above
*/
if (n == 2 && nn == iov[0].iov_len) {
int ret;
*
* When the software returns from an exception, the branch will re-execute.
* On QEMU care needs to be taken when a branch+delayslot sequence is broken
- * and the branch and delayslot dont share pages.
+ * and the branch and delayslot don't share pages.
*
* The TB contaning the branch insn will set up env->btarget and evaluate
* env->btaken. When the translation loop exits we will note that the branch
}
/* If we are rexecuting a branch due to exceptions on
- delay slots dont break. */
+ delay slots don't break. */
if (!(tb->pc & 1) && cs->singlestep_enabled) {
break;
}
dc->tb_flags |= PFIX_FLAG;
tcg_gen_ori_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], PFIX_FLAG);
- /* prefix insns dont clear the x flag. */
+ /* prefix insns don't clear the x flag. */
dc->clear_x = 0;
cris_lock_irq(dc);
}
/* The Linux kernel checks for the CMPLegacy bit and
* discards multiple thread information if it is set.
- * So dont set it here for Intel to make Linux guests happy.
+ * So don't set it here for Intel to make Linux guests happy.
*/
if (cs->nr_cores * cs->nr_threads > 1) {
if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
* If value is NULL, no default will be set and the original
* value from the CPU model table will be kept.
*
- * It is valid to call this funciton only for properties that
+ * It is valid to call this function only for properties that
* are already present in the kvm_default_props table.
*/
void x86_cpu_change_kvm_default(const char *prop, const char *value);
static inline void mips_vpe_wake(MIPSCPU *c)
{
- /* Dont set ->halted = 0 directly, let it be done via cpu_has_work
+ /* Don't set ->halted = 0 directly, let it be done via cpu_has_work
because there might be other conditions that state that c should
be sleeping. */
cpu_interrupt(CPU(c), CPU_INTERRUPT_WAKE);
} else if (shift_count == -32) {
/* set PSW.C */
tcg_gen_mov_tl(cpu_PSW_C, r1);
- /* fill ret completly with sign bit */
+ /* fill ret completely with sign bit */
tcg_gen_sari_tl(ret, r1, 31);
/* clear PSW.V */
tcg_gen_movi_tl(cpu_PSW_V, 0);
few specific operations must be implemented to allow it (see add2_i32,
sub2_i32, brcond2_i32).
-On a 64 bit target, the values are transfered between 32 and 64-bit
+On a 64 bit target, the values are transferred between 32 and 64-bit
registers using the following ops:
- trunc_shr_i64_i32
- ext_i32_i64
t = (unsigned char *)x;
t -= 32768;
p = (unsigned char *) &y.v1;
- mb(); /* dont reorder anything beyond here. */
+ mb(); /* don't reorder anything beyond here. */
cris_tst_cc_init();
asm volatile ("setf\tzvnc\n");
cris_addo_pi_d(p, t);
t += 32770;
- mb(); /* dont reorder anything beyond here. */
+ mb(); /* don't reorder anything beyond here. */
cris_tst_cc_init();
asm volatile ("setf\tzvnc\n");
cris_addo_pi_w(p, t);
if (*r != 0x4455aa77)
err();
- mb(); /* dont reorder anything beyond here. */
+ mb(); /* don't reorder anything beyond here. */
cris_tst_cc_init();
asm volatile ("setf\tzvnc\n");
cris_addo_d(p, r);
if (*r != 0xee19ccff)
err();
- mb(); /* dont reorder anything beyond here. */
+ mb(); /* don't reorder anything beyond here. */
cris_tst_cc_init();
asm volatile ("setf\tzvnc\n");
cris_addo_pi_b(p, t);
if (*(uint16_t*)r != 0xff22)
err();
- mb(); /* dont reorder anything beyond here. */
+ mb(); /* don't reorder anything beyond here. */
cris_tst_cc_init();
asm volatile ("setf\tzvnc\n");
cris_addo_b(p, r);
if (*r != 0x4455aa77)
err();
- mb(); /* dont reorder anything beyond here. */
+ mb(); /* don't reorder anything beyond here. */
cris_tst_cc_init();
asm volatile ("setf\tzvnc\n");
cris_addo_w(p, r);
if (*r != 0xff224455)
err();
- mb(); /* dont reorder anything beyond here. */
+ mb(); /* don't reorder anything beyond here. */
cris_tst_cc_init();
asm volatile ("setf\tzvnc\n");
cris_addo_pi_d(p, t);
smp_rmb(); /* read memory barrier before accessing record */
/* read the record header to know record length */
read_from_buffer(idx, &record, sizeof(TraceRecord));
- *recordptr = malloc(record.length); /* dont use g_malloc, can deadlock when traced */
+ *recordptr = malloc(record.length); /* don't use g_malloc, can deadlock when traced */
/* make a copy of record to avoid being overwritten */
read_from_buffer(idx, *recordptr, record.length);
smp_rmb(); /* memory barrier before clearing valid flag */
while (get_trace_record(idx, &recordptr)) {
unused = fwrite(recordptr, recordptr->length, 1, trace_fp);
writeout_idx += recordptr->length;
- free(recordptr); /* dont use g_free, can deadlock when traced */
+ free(recordptr); /* don't use g_free, can deadlock when traced */
idx = writeout_idx % TRACE_BUF_LEN;
}
[menuItem setEnabled: NO];
[menu addItem: menuItem];
- /* Loop thru all the block devices in the emulator */
+ /* Loop through all the block devices in the emulator */
while (currentDevice) {
deviceName = [[NSString stringWithFormat: @"%s", currentDevice->value->device] retain];
* BenoƮt Canet <benoit.canet@nodalink.com>
* Alberto Garcia <berto@igalia.com>
*
- * This program is free sofware: you can redistribute it and/or modify
+ * This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
- * the Free Sofware Foundation, either version 2 of the License, or
+ * the Free Software Foundation, either version 2 of the License, or
* (at your option) version 3 or any later version.
*
* This program is distributed in the hope that it will be useful,