uint64_t efer = rvmcs(vcpu, VMCS_GUEST_IA32_EFER);
uint64_t old_cr0 = rvmcs(vcpu, VMCS_GUEST_CR0);
uint64_t changed_cr0 = old_cr0 ^ cr0;
- uint64_t mask = CR0_PG | CR0_CD | CR0_NW | CR0_NE | CR0_ET;
+ uint64_t mask = CR0_PG_MASK | CR0_CD_MASK | CR0_NW_MASK |
+ CR0_NE_MASK | CR0_ET_MASK;
uint64_t entry_ctls;
- if ((cr0 & CR0_PG) && (rvmcs(vcpu, VMCS_GUEST_CR4) & CR4_PAE) &&
+ if ((cr0 & CR0_PG_MASK) && (rvmcs(vcpu, VMCS_GUEST_CR4) & CR4_PAE_MASK) &&
!(efer & MSR_EFER_LME)) {
address_space_read(&address_space_memory,
rvmcs(vcpu, VMCS_GUEST_CR3) & ~0x1f,
wvmcs(vcpu, VMCS_CR0_SHADOW, cr0);
if (efer & MSR_EFER_LME) {
- if (changed_cr0 & CR0_PG) {
- if (cr0 & CR0_PG) {
+ if (changed_cr0 & CR0_PG_MASK) {
+ if (cr0 & CR0_PG_MASK) {
enter_long_mode(vcpu, cr0, efer);
} else {
exit_long_mode(vcpu, cr0, efer);
}
/* Filter new CR0 after we are finished examining it above. */
- cr0 = (cr0 & ~(mask & ~CR0_PG));
- wvmcs(vcpu, VMCS_GUEST_CR0, cr0 | CR0_NE | CR0_ET);
+ cr0 = (cr0 & ~(mask & ~CR0_PG_MASK));
+ wvmcs(vcpu, VMCS_GUEST_CR0, cr0 | CR0_NE_MASK | CR0_ET_MASK);
hv_vcpu_invalidate_tlb(vcpu);
hv_vcpu_flush(vcpu);
static inline void macvm_set_cr4(hv_vcpuid_t vcpu, uint64_t cr4)
{
- uint64_t guest_cr4 = cr4 | CR4_VMXE;
+ uint64_t guest_cr4 = cr4 | CR4_VMXE_MASK;
wvmcs(vcpu, VMCS_GUEST_CR4, guest_cr4);
wvmcs(vcpu, VMCS_CR4_SHADOW, cr4);
- wvmcs(vcpu, VMCS_CR4_MASK, CR4_VMXE);
+ wvmcs(vcpu, VMCS_CR4_MASK, CR4_VMXE_MASK);
hv_vcpu_invalidate_tlb(vcpu);
hv_vcpu_flush(vcpu);
bool x86_is_protected(struct CPUState *cpu)
{
uint64_t cr0 = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR0);
- return cr0 & CR0_PE;
+ return cr0 & CR0_PE_MASK;
}
bool x86_is_real(struct CPUState *cpu)
bool x86_is_paging_mode(struct CPUState *cpu)
{
uint64_t cr0 = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR0);
- return cr0 & CR0_PG;
+ return cr0 & CR0_PG_MASK;
}
bool x86_is_pae_enabled(struct CPUState *cpu)
{
uint64_t cr4 = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR4);
- return cr4 & CR4_PAE;
+ return cr4 & CR4_PAE_MASK;
}
target_ulong linear_addr(struct CPUState *cpu, target_ulong addr, X86Seg seg)
};
} __attribute__ ((__packed__)) x86_register;
-typedef enum x86_reg_cr0 {
- CR0_PE = (1L << 0),
- CR0_MP = (1L << 1),
- CR0_EM = (1L << 2),
- CR0_TS = (1L << 3),
- CR0_ET = (1L << 4),
- CR0_NE = (1L << 5),
- CR0_WP = (1L << 16),
- CR0_AM = (1L << 18),
- CR0_NW = (1L << 29),
- CR0_CD = (1L << 30),
- CR0_PG = (1L << 31),
-} x86_reg_cr0;
-
-typedef enum x86_reg_cr4 {
- CR4_VME = (1L << 0),
- CR4_PVI = (1L << 1),
- CR4_TSD = (1L << 2),
- CR4_DE = (1L << 3),
- CR4_PSE = (1L << 4),
- CR4_PAE = (1L << 5),
- CR4_MSE = (1L << 6),
- CR4_PGE = (1L << 7),
- CR4_PCE = (1L << 8),
- CR4_OSFXSR = (1L << 9),
- CR4_OSXMMEXCPT = (1L << 10),
- CR4_VMXE = (1L << 13),
- CR4_SMXE = (1L << 14),
- CR4_FSGSBASE = (1L << 16),
- CR4_PCIDE = (1L << 17),
- CR4_OSXSAVE = (1L << 18),
- CR4_SMEP = (1L << 20),
-} x86_reg_cr4;
-
/* 16 bit Task State Segment */
typedef struct x86_tss_segment16 {
uint16_t link;
//ret = task_switch_16(cpu, tss_sel, old_tss_sel, old_tss_base, &next_tss_desc);
VM_PANIC("task_switch_16");
- macvm_set_cr0(cpu->hvf->fd, rvmcs(cpu->hvf->fd, VMCS_GUEST_CR0) | CR0_TS);
+ macvm_set_cr0(cpu->hvf->fd, rvmcs(cpu->hvf->fd, VMCS_GUEST_CR0) |
+ CR0_TS_MASK);
x86_segment_descriptor_to_vmx(cpu, tss_sel, &next_tss_desc, &vmx_seg);
vmx_write_segment_descriptor(cpu, &vmx_seg, R_TR);