}
case EXIT_REASON_TASK_SWITCH: {
uint64_t vinfo = rvmcs(cpu->accel->fd, VMCS_IDT_VECTORING_INFO);
- x68_segment_selector sel = {.sel = exit_qual & 0xffff};
+ x86_segment_selector sel = {.sel = exit_qual & 0xffff};
vmx_handle_task_switch(cpu, sel, (exit_qual >> 30) & 0x3,
vinfo & VMCS_INTR_VALID, vinfo & VECTORING_INFO_VECTOR_MASK, vinfo
& VMCS_INTR_T_MASK);
bool x86_read_segment_descriptor(CPUState *cpu,
struct x86_segment_descriptor *desc,
- x68_segment_selector sel)
+ x86_segment_selector sel)
{
target_ulong base;
uint32_t limit;
bool x86_write_segment_descriptor(CPUState *cpu,
struct x86_segment_descriptor *desc,
- x68_segment_selector sel)
+ x86_segment_selector sel)
{
target_ulong base;
uint32_t limit;
#define GDT_SEL 0
#define LDT_SEL 1
-typedef struct x68_segment_selector {
+typedef struct x86_segment_selector {
union {
uint16_t sel;
struct {
uint16_t index:13;
};
};
-} __attribute__ ((__packed__)) x68_segment_selector;
+} __attribute__ ((__packed__)) x86_segment_selector;
/* useful register access macros */
#define x86_reg(cpu, reg) ((x86_register *) &cpu->regs[reg])
/* deal with GDT/LDT descriptors in memory */
bool x86_read_segment_descriptor(CPUState *cpu,
struct x86_segment_descriptor *desc,
- x68_segment_selector sel);
+ x86_segment_selector sel);
bool x86_write_segment_descriptor(CPUState *cpu,
struct x86_segment_descriptor *desc,
- x68_segment_selector sel);
+ x86_segment_selector sel);
bool x86_read_call_gate(CPUState *cpu, struct x86_call_gate *idt_desc,
int gate);
return rvmcs(cpu->accel->fd, vmx_segment_fields[seg].base);
}
-x68_segment_selector vmx_read_segment_selector(CPUState *cpu, X86Seg seg)
+x86_segment_selector vmx_read_segment_selector(CPUState *cpu, X86Seg seg)
{
- x68_segment_selector sel;
+ x86_segment_selector sel;
sel.sel = rvmcs(cpu->accel->fd, vmx_segment_fields[seg].selector);
return sel;
}
-void vmx_write_segment_selector(CPUState *cpu, x68_segment_selector selector, X86Seg seg)
+void vmx_write_segment_selector(CPUState *cpu, x86_segment_selector selector, X86Seg seg)
{
wvmcs(cpu->accel->fd, vmx_segment_fields[seg].selector, selector.sel);
}
wvmcs(cpu->accel->fd, sf->ar_bytes, desc->ar);
}
-void x86_segment_descriptor_to_vmx(CPUState *cpu, x68_segment_selector selector,
+void x86_segment_descriptor_to_vmx(CPUState *cpu, x86_segment_selector selector,
struct x86_segment_descriptor *desc,
struct vmx_segment *vmx_desc)
{
void vmx_write_segment_descriptor(CPUState *cpu, struct vmx_segment *desc,
enum X86Seg seg);
-x68_segment_selector vmx_read_segment_selector(CPUState *cpu,
+x86_segment_selector vmx_read_segment_selector(CPUState *cpu,
enum X86Seg seg);
void vmx_write_segment_selector(CPUState *cpu,
- x68_segment_selector selector,
+ x86_segment_selector selector,
enum X86Seg seg);
uint64_t vmx_read_segment_base(CPUState *cpu, enum X86Seg seg);
uint64_t base);
void x86_segment_descriptor_to_vmx(CPUState *cpu,
- x68_segment_selector selector,
+ x86_segment_selector selector,
struct x86_segment_descriptor *desc,
struct vmx_segment *vmx_desc);
RSI(env) = tss->esi;
RDI(env) = tss->edi;
- vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->ldt}}, R_LDTR);
- vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->es}}, R_ES);
- vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->cs}}, R_CS);
- vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->ss}}, R_SS);
- vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->ds}}, R_DS);
- vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->fs}}, R_FS);
- vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->gs}}, R_GS);
+ vmx_write_segment_selector(cpu, (x86_segment_selector){{tss->ldt}}, R_LDTR);
+ vmx_write_segment_selector(cpu, (x86_segment_selector){{tss->es}}, R_ES);
+ vmx_write_segment_selector(cpu, (x86_segment_selector){{tss->cs}}, R_CS);
+ vmx_write_segment_selector(cpu, (x86_segment_selector){{tss->ss}}, R_SS);
+ vmx_write_segment_selector(cpu, (x86_segment_selector){{tss->ds}}, R_DS);
+ vmx_write_segment_selector(cpu, (x86_segment_selector){{tss->fs}}, R_FS);
+ vmx_write_segment_selector(cpu, (x86_segment_selector){{tss->gs}}, R_GS);
}
-static int task_switch_32(CPUState *cpu, x68_segment_selector tss_sel, x68_segment_selector old_tss_sel,
+static int task_switch_32(CPUState *cpu, x86_segment_selector tss_sel, x86_segment_selector old_tss_sel,
uint64_t old_tss_base, struct x86_segment_descriptor *new_desc)
{
struct x86_tss_segment32 tss_seg;
return 0;
}
-void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, int reason, bool gate_valid, uint8_t gate, uint64_t gate_type)
+void vmx_handle_task_switch(CPUState *cpu, x86_segment_selector tss_sel, int reason, bool gate_valid, uint8_t gate, uint64_t gate_type)
{
uint64_t rip = rreg(cpu->accel->fd, HV_X86_RIP);
if (!gate_valid || (gate_type != VMCS_INTR_T_HWEXCEPTION &&
load_regs(cpu);
struct x86_segment_descriptor curr_tss_desc, next_tss_desc;
- x68_segment_selector old_tss_sel = vmx_read_segment_selector(cpu, R_TR);
+ x86_segment_selector old_tss_sel = vmx_read_segment_selector(cpu, R_TR);
uint64_t old_tss_base = vmx_read_segment_base(cpu, R_TR);
uint32_t desc_limit;
struct x86_call_gate task_gate_desc;
x86_read_call_gate(cpu, &task_gate_desc, gate);
dpl = task_gate_desc.dpl;
- x68_segment_selector cs = vmx_read_segment_selector(cpu, R_CS);
+ x86_segment_selector cs = vmx_read_segment_selector(cpu, R_CS);
if (tss_sel.rpl > dpl || cs.rpl > dpl)
;//DPRINTF("emulate_gp");
}
#ifndef HVF_X86_TASK_H
#define HVF_X86_TASK_H
-void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel,
+void vmx_handle_task_switch(CPUState *cpu, x86_segment_selector tss_sel,
int reason, bool gate_valid, uint8_t gate, uint64_t gate_type);
#endif