{
hvf_slot *mem;
MemoryRegion *area = section->mr;
- bool writeable = !area->readonly && !area->rom_device;
+ bool writable = !area->readonly && !area->rom_device;
hv_memory_flags_t flags;
uint64_t page_size = qemu_real_host_page_size();
if (!memory_region_is_ram(area)) {
- if (writeable) {
+ if (writable) {
return;
} else if (!memory_region_is_romd(area)) {
/*
KVMSlot *mem;
int err;
MemoryRegion *mr = section->mr;
- bool writeable = !mr->readonly && !mr->rom_device;
+ bool writable = !mr->readonly && !mr->rom_device;
hwaddr start_addr, size, slot_size, mr_offset;
ram_addr_t ram_start_offset;
void *ram;
if (!memory_region_is_ram(mr)) {
- if (writeable || !kvm_readonly_mem_allowed) {
+ if (writable || !kvm_readonly_mem_allowed) {
return;
} else if (!mr->romd_mode) {
/* If the memory device is not in romd_mode, then we actually want
* Return true if the write fault has been handled, and should be re-tried.
*
* Note that it is important that we don't call page_unprotect() unless
- * this is really a "write to nonwriteable page" fault, because
+ * this is really a "write to nonwritable page" fault, because
* page_unprotect() assumes that if it is called for an access to
- * a page that's writeable this means we had two threads racing and
- * another thread got there first and already made the page writeable;
+ * a page that's writable this means we had two threads racing and
+ * another thread got there first and already made the page writable;
* so we will retry the access. If we were to call page_unprotect()
* for some other kind of fault that should really be passed to the
* guest, we'd end up in an infinite loop of retrying the faulting access.
:size: a 32-bit configuration space access size in bytes
:flags: a 32-bit value:
- - 0: Vhost front-end messages used for writeable fields
+ - 0: Vhost front-end messages used for writable fields
- 1: Vhost front-end messages used for live migration
:payload: Size bytes array holding the contents of the virtio
backed-up or snapshotted image. In order to do this, QEMU must know the
address that has been allocated.
-The mechanism chosen for this memory sharing is writeable fw_cfg blobs.
+The mechanism chosen for this memory sharing is writable fw_cfg blobs.
These are data object that are visible to both QEMU and guests, and are
addressable as sequential files.
/etc/vmgenid_guid - contains the actual VM Generation ID GUID
- read-only to the guest
/etc/vmgenid_addr - contains the address of the downloaded vmgenid blob
- - writeable by the guest
+ - writable by the guest
QEMU sends the following commands to the guest at startup:
for (i = 0; i < ACPI_GHES_ERROR_SOURCE_COUNT; i++) {
/*
* Initialize the value of read_ack_register to 1, so GHES can be
- * writeable after (re)boot.
+ * writable after (re)boot.
* ACPI 6.2: 18.3.2.8 Generic Hardware Error Source version 2
* (GHESv2 - Type 10)
*/
cs->icc_ctlr_el1[GICV3_S] |= ICC_CTLR_EL1_CBPR;
}
- /* The only bit stored in icc_ctlr_el3 which is writeable is EOIMODE_EL3: */
+ /* The only bit stored in icc_ctlr_el3 which is writable is EOIMODE_EL3: */
mask = ICC_CTLR_EL3_EOIMODE_EL3;
cs->icc_ctlr_el3 &= ~mask;
if (value & mask & GICD_CTLR_DS) {
/* We just set DS, so the ARE_NS and EnG1S bits are now RES0.
* Note that this is a one-way transition because if DS is set
- * then it's not writeable, so it can only go back to 0 with a
+ * then it's not writable, so it can only go back to 0 with a
* hardware reset.
*/
s->gicd_ctlr &= ~(GICD_CTLR_EN_GRP1S | GICD_CTLR_ARE_NS);
/*
* The DIRTY bit is read-only and for us is always zero;
- * other fields are writeable.
+ * other fields are writable.
*/
newval &= R_GICR_VPENDBASER_INNERCACHE_MASK |
R_GICR_VPENDBASER_SHAREABILITY_MASK |
/* RAZ/WI for our implementation */
return MEMTX_OK;
case GICR_WAKER:
- /* Only the ProcessorSleep bit is writeable. When the guest sets
+ /* Only the ProcessorSleep bit is writable. When the guest sets
* it it requests that we transition the channel between the
* redistributor and the cpu interface to quiescent, and that
* we set the ChildrenAsleep bit once the inteface has reached the
/* Claim software interrupt bits */
for (i = 0; i < swi->num_harts; i++) {
RISCVCPU *cpu = RISCV_CPU(qemu_get_cpu(swi->hartid_base + i));
- /* We don't claim mip.SSIP because it is writeable by software */
+ /* We don't claim mip.SSIP because it is writable by software */
if (riscv_cpu_claim_interrupts(cpu, swi->sswi ? 0 : MIP_MSIP) < 0) {
error_report("MSIP already claimed");
exit(1);
}
if (addr == APLIC_DOMAINCFG) {
- /* Only IE bit writeable at the moment */
+ /* Only IE bit writable at the moment */
value &= APLIC_DOMAINCFG_IE;
aplic->domaincfg = value;
} else if ((APLIC_SOURCECFG_BASE <= addr) &&
pci_set_byte(config + SHPC_CAP_CxP, 0);
pci_set_long(config + SHPC_CAP_DWORD_DATA, 0);
d->shpc->cap = config_offset;
- /* Make dword select and data writeable. */
+ /* Make dword select and data writable. */
pci_set_byte(d->wmask + config_offset + SHPC_CAP_DWORD_SELECT, 0xff);
pci_set_long(d->wmask + config_offset + SHPC_CAP_DWORD_DATA, 0xffffffff);
return 0;
* metadata and user data
* 1=5%, 2=10%, 3=15% and so on
*/
- uint8_t viewSpace; /* snapshot writeable VIEWs
+ uint8_t viewSpace; /* snapshot writable VIEWs
* capacity as a % of source LD
* capacity. 0=READ only
* 1=5%, 2=10%, 3=15% and so on
}
if (tte & IOMMU_TTE_DATA_W) {
- /* Writeable */
+ /* Writable */
ret.perm = IOMMU_RW;
} else {
ret.perm = IOMMU_RO;
{
uint32_t old_ctl = s->cntp_aival_ctl;
- /* EN bit is writeable; CLR bit is write-0-to-clear, write-1-ignored */
+ /* EN bit is writable; CLR bit is write-0-to-clear, write-1-ignored */
s->cntp_aival_ctl &= ~R_CNTP_AIVAL_CTL_EN_MASK;
s->cntp_aival_ctl |= value & R_CNTP_AIVAL_CTL_EN_MASK;
if (!(value & R_CNTP_AIVAL_CTL_CLR_MASK)) {
"""
# If we keep the console socket open, we may deadlock waiting
# for QEMU to exit, while QEMU is waiting for the socket to
- # become writeable.
+ # become writable.
if self._console_socket is not None:
self._console_socket.close()
self._console_socket = None
/*
* Don't allow writing to XPSR.Exception as it can cause
* a transition into or out of handler mode (it's not
- * writeable via the MSR insn so this is a reasonable
+ * writable via the MSR insn so this is a reasonable
* restriction). Other fields are safe to update.
*/
xpsr_write(env, tmp, ~XPSR_EXCP);
}
}
- env->cp15.c9_pmcr &= ~PMCR_WRITEABLE_MASK;
- env->cp15.c9_pmcr |= (value & PMCR_WRITEABLE_MASK);
+ env->cp15.c9_pmcr &= ~PMCR_WRITABLE_MASK;
+ env->cp15.c9_pmcr |= (value & PMCR_WRITABLE_MASK);
pmu_op_finish(env);
}
}
}
- env->cp15.c9_pmcr &= ~PMCR_WRITEABLE_MASK;
- env->cp15.c9_pmcr |= (val & PMCR_WRITEABLE_MASK);
+ env->cp15.c9_pmcr &= ~PMCR_WRITABLE_MASK;
+ env->cp15.c9_pmcr |= (val & PMCR_WRITABLE_MASK);
pmu_op_finish(env);
break;
#define PMCRP 0x2
#define PMCRE 0x1
/*
- * Mask of PMCR bits writeable by guest (not including WO bits like C, P,
+ * Mask of PMCR bits writable by guest (not including WO bits like C, P,
* which can be written as 1 to trigger behaviour but which stay RAZ).
*/
-#define PMCR_WRITEABLE_MASK (PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE)
+#define PMCR_WRITABLE_MASK (PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE)
#define PMXEVTYPER_P 0x80000000
#define PMXEVTYPER_U 0x40000000
/* Convert CPU model data from X86CPU object to a property dictionary
* that can recreate exactly the same CPU model, including every
- * writeable QOM property.
+ * writable QOM property.
*/
static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props)
{
#define EPT_VIOLATION_DATA_WRITE (1UL << 1)
#define EPT_VIOLATION_INST_FETCH (1UL << 2)
#define EPT_VIOLATION_GPA_READABLE (1UL << 3)
-#define EPT_VIOLATION_GPA_WRITEABLE (1UL << 4)
+#define EPT_VIOLATION_GPA_WRITABLE (1UL << 4)
#define EPT_VIOLATION_GPA_EXECUTABLE (1UL << 5)
#define EPT_VIOLATION_GLA_VALID (1UL << 7)
#define EPT_VIOLATION_XLAT_VALID (1UL << 8)
#define AR_TYPE_ACCESSES_MASK 1
#define AR_TYPE_READABLE_MASK (1 << 1)
-#define AR_TYPE_WRITEABLE_MASK (1 << 2)
+#define AR_TYPE_WRITABLE_MASK (1 << 2)
#define AR_TYPE_CODE_MASK (1 << 3)
#define AR_TYPE_MASK 0x0f
#define AR_TYPE_BUSY_64_TSS 11
g_assert(!s390_is_pv());
/*
* As operand exceptions have a lower priority than access exceptions,
- * we check whether the memory area is writeable (injecting the
+ * we check whether the memory area is writable (injecting the
* access execption if it is not) first.
*/
if (!s390_cpu_virt_mem_check_write(cpu, addr, ar, sizeof(schib))) {
*
* - `ebx`: contains the physical memory address where the loader has placed
* the boot start info structure.
- * - `cr0`: bit 0 (PE) must be set. All the other writeable bits are cleared.
+ * - `cr0`: bit 0 (PE) must be set. All the other writable bits are cleared.
* - `cr4`: all bits are cleared.
* - `cs `: must be a 32-bit read/execute code segment with a base of ‘0’
* and a limit of ‘0xFFFFFFFF’. The selector value is unspecified.