VCPU_EXREG_CR3,
        VCPU_EXREG_RFLAGS,
        VCPU_EXREG_SEGMENTS,
+       VCPU_EXREG_EXIT_INFO_1,
 };
 
 enum {
 
        gva_t gva;
        struct x86_exception e;
 
-       if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
+       if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu),
                                vmcs_read32(VMX_INSTRUCTION_INFO), false,
                                sizeof(*vmpointer), &gva))
                return 1;
 {
        struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu)
                                                    : get_vmcs12(vcpu);
-       unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+       unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
        u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO);
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        struct x86_exception e;
 {
        struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu)
                                                    : get_vmcs12(vcpu);
-       unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+       unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
        u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO);
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        struct x86_exception e;
 /* Emulate the VMPTRST instruction */
 static int handle_vmptrst(struct kvm_vcpu *vcpu)
 {
-       unsigned long exit_qual = vmcs_readl(EXIT_QUALIFICATION);
+       unsigned long exit_qual = vmx_get_exit_qual(vcpu);
        u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO);
        gpa_t current_vmptr = to_vmx(vcpu)->nested.current_vmptr;
        struct x86_exception e;
        /* According to the Intel VMX instruction reference, the memory
         * operand is read even if it isn't needed (e.g., for type==global)
         */
-       if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
+       if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu),
                        vmx_instruction_info, false, sizeof(operand), &gva))
                return 1;
        if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
        /* according to the intel vmx instruction reference, the memory
         * operand is read even if it isn't needed (e.g., for type==global)
         */
-       if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
+       if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu),
                        vmx_instruction_info, false, sizeof(operand), &gva))
                return 1;
        if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
 fail:
        nested_vmx_vmexit(vcpu, vmx->exit_reason,
                          vmcs_read32(VM_EXIT_INTR_INFO),
-                         vmcs_readl(EXIT_QUALIFICATION));
+                         vmx_get_exit_qual(vcpu));
        return 1;
 }
 
        if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
                return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING);
 
-       exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+       exit_qualification = vmx_get_exit_qual(vcpu);
 
        port = exit_qualification >> 16;
        size = (exit_qualification & 7) + 1;
 static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
        struct vmcs12 *vmcs12)
 {
-       unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+       unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
        int cr = exit_qualification & 15;
        int reg;
        unsigned long val;
        }
 
        exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
-       exit_qual = vmcs_readl(EXIT_QUALIFICATION);
+       exit_qual = vmx_get_exit_qual(vcpu);
 
        trace_kvm_nested_vmexit(kvm_rip_read(vcpu), exit_reason, exit_qual,
                                vmx->idt_vectoring_info, exit_intr_info,
 
        }
 
        if (is_page_fault(intr_info)) {
-               cr2 = vmcs_readl(EXIT_QUALIFICATION);
+               cr2 = vmx_get_exit_qual(vcpu);
                /* EPT won't cause page fault directly */
                WARN_ON_ONCE(!vcpu->arch.apf.host_apf_reason && enable_ept);
                return kvm_handle_page_fault(vcpu, error_code, cr2, NULL, 0);
                kvm_queue_exception_e(vcpu, AC_VECTOR, error_code);
                return 1;
        case DB_VECTOR:
-               dr6 = vmcs_readl(EXIT_QUALIFICATION);
+               dr6 = vmx_get_exit_qual(vcpu);
                if (!(vcpu->guest_debug &
                      (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) {
                        vcpu->arch.dr6 &= ~DR_TRAP_BITS;
        int size, in, string;
        unsigned port;
 
-       exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+       exit_qualification = vmx_get_exit_qual(vcpu);
        string = (exit_qualification & 16) != 0;
 
        ++vcpu->stat.io_exits;
        int err;
        int ret;
 
-       exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+       exit_qualification = vmx_get_exit_qual(vcpu);
        cr = exit_qualification & 15;
        reg = (exit_qualification >> 8) & 15;
        switch ((exit_qualification >> 4) & 3) {
        unsigned long exit_qualification;
        int dr, dr7, reg;
 
-       exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+       exit_qualification = vmx_get_exit_qual(vcpu);
        dr = exit_qualification & DEBUG_REG_ACCESS_NUM;
 
        /* First, if DR does not exist, trigger UD */
 
 static int handle_invlpg(struct kvm_vcpu *vcpu)
 {
-       unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+       unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
 
        kvm_mmu_invlpg(vcpu, exit_qualification);
        return kvm_skip_emulated_instruction(vcpu);
 static int handle_apic_access(struct kvm_vcpu *vcpu)
 {
        if (likely(fasteoi)) {
-               unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+               unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
                int access_type, offset;
 
                access_type = exit_qualification & APIC_ACCESS_TYPE;
 
 static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu)
 {
-       unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+       unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
        int vector = exit_qualification & 0xff;
 
        /* EOI-induced VM exit is trap-like and thus no need to adjust IP */
 
 static int handle_apic_write(struct kvm_vcpu *vcpu)
 {
-       unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+       unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
        u32 offset = exit_qualification & 0xfff;
 
        /* APIC-write VM exit is trap-like and thus no need to adjust IP */
        idt_index = (vmx->idt_vectoring_info & VECTORING_INFO_VECTOR_MASK);
        type = (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK);
 
-       exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+       exit_qualification = vmx_get_exit_qual(vcpu);
 
        reason = (u32)exit_qualification >> 30;
        if (reason == TASK_SWITCH_GATE && idt_v) {
        gpa_t gpa;
        u64 error_code;
 
-       exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+       exit_qualification = vmx_get_exit_qual(vcpu);
 
        /*
         * EPT violation happened while executing iret from NMI,
        /* According to the Intel instruction reference, the memory operand
         * is read even if it isn't needed (e.g., for type==all)
         */
-       if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
+       if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu),
                                vmx_instruction_info, false,
                                sizeof(operand), &gva))
                return 1;
 
        trace_kvm_pml_full(vcpu->vcpu_id);
 
-       exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+       exit_qualification = vmx_get_exit_qual(vcpu);
 
        /*
         * PML buffer FULL happened while executing iret from NMI,
 
 static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
 {
-       *info1 = vmcs_readl(EXIT_QUALIFICATION);
+       *info1 = vmx_get_exit_qual(vcpu);
        *info2 = vmcs_read32(VM_EXIT_INTR_INFO);
 }
 
 
         */
        bool                  guest_state_loaded;
 
+       unsigned long         exit_qualification;
        u32                   exit_intr_info;
        u32                   idt_vectoring_info;
        ulong                 rflags;
                                  | (1 << VCPU_EXREG_RFLAGS)
                                  | (1 << VCPU_EXREG_PDPTR)
                                  | (1 << VCPU_EXREG_SEGMENTS)
-                                 | (1 << VCPU_EXREG_CR3));
+                                 | (1 << VCPU_EXREG_CR3)
+                                 | (1 << VCPU_EXREG_EXIT_INFO_1));
        vcpu->arch.regs_dirty = 0;
 }
 
        return &(to_vmx(vcpu)->pi_desc);
 }
 
+static inline unsigned long vmx_get_exit_qual(struct kvm_vcpu *vcpu)
+{
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+       if (!kvm_register_is_available(vcpu, VCPU_EXREG_EXIT_INFO_1)) {
+               kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_1);
+               vmx->exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+       }
+       return vmx->exit_qualification;
+}
+
 struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags);
 void free_vmcs(struct vmcs *vmcs);
 int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs);