KVM: x86/mmu: Use 64-bit address to invalidate to fix a subtle bug
authorLai Jiangshan <jiangshan.ljs@antgroup.com>
Thu, 16 Feb 2023 15:41:07 +0000 (23:41 +0800)
committerSean Christopherson <seanjc@google.com>
Thu, 16 Mar 2023 19:41:05 +0000 (12:41 -0700)
FNAME(invlpg)() and kvm_mmu_invalidate_gva() take a gva_t, i.e. unsigned
long, as the type of the address to invalidate.  On 32-bit kernels, the
upper 32 bits of the GPA will get dropped when an L2 GPA address is
invalidated in the shadowed nested TDP MMU.

Convert it to u64 to fix the problem.

Reported-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Lai Jiangshan <jiangshan.ljs@antgroup.com>
Link: https://lore.kernel.org/r/20230216154115.710033-2-jiangshanlai@gmail.com
[sean: tweak changelog]
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/paging_tmpl.h
arch/x86/kvm/x86.c

index a45de1118a42d6976d659055363c3de86e5c4630..2146365546753677101e803226eede4b68055402 100644 (file)
@@ -441,7 +441,7 @@ struct kvm_mmu {
                            struct x86_exception *exception);
        int (*sync_page)(struct kvm_vcpu *vcpu,
                         struct kvm_mmu_page *sp);
-       void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa);
+       void (*invlpg)(struct kvm_vcpu *vcpu, u64 addr, hpa_t root_hpa);
        struct kvm_mmu_root_info root;
        union kvm_cpu_role cpu_role;
        union kvm_mmu_page_role root_role;
@@ -2044,8 +2044,8 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
                       void *insn, int insn_len);
 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
-void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
-                           gva_t gva, hpa_t root_hpa);
+void kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
+                            u64 addr, hpa_t root_hpa);
 void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid);
 void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd);
 
index 144c5a01cd778092034ee5fd002a2f0e14e33135..edad1a4828dc72689e579143ffbd03d6176eb170 100644 (file)
@@ -5707,25 +5707,25 @@ emulate:
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
 
-void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
-                           gva_t gva, hpa_t root_hpa)
+void kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
+                            u64 addr, hpa_t root_hpa)
 {
        int i;
 
        /* It's actually a GPA for vcpu->arch.guest_mmu.  */
        if (mmu != &vcpu->arch.guest_mmu) {
                /* INVLPG on a non-canonical address is a NOP according to the SDM.  */
-               if (is_noncanonical_address(gva, vcpu))
+               if (is_noncanonical_address(addr, vcpu))
                        return;
 
-               static_call(kvm_x86_flush_tlb_gva)(vcpu, gva);
+               static_call(kvm_x86_flush_tlb_gva)(vcpu, addr);
        }
 
        if (!mmu->invlpg)
                return;
 
        if (root_hpa == INVALID_PAGE) {
-               mmu->invlpg(vcpu, gva, mmu->root.hpa);
+               mmu->invlpg(vcpu, addr, mmu->root.hpa);
 
                /*
                 * INVLPG is required to invalidate any global mappings for the VA,
@@ -5740,15 +5740,15 @@ void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
                 */
                for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
                        if (VALID_PAGE(mmu->prev_roots[i].hpa))
-                               mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
+                               mmu->invlpg(vcpu, addr, mmu->prev_roots[i].hpa);
        } else {
-               mmu->invlpg(vcpu, gva, root_hpa);
+               mmu->invlpg(vcpu, addr, root_hpa);
        }
 }
 
 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
 {
-       kvm_mmu_invalidate_gva(vcpu, vcpu->arch.walk_mmu, gva, INVALID_PAGE);
+       kvm_mmu_invalidate_addr(vcpu, vcpu->arch.walk_mmu, gva, INVALID_PAGE);
        ++vcpu->stat.invlpg;
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
index a056f2773dd9014c200afdc60b354e59d8c7589d..0a9c11c24195534f09c1bf871b8ab6820961202c 100644 (file)
@@ -846,7 +846,8 @@ static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp)
        return gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t);
 }
 
-static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa)
+/* Note, @addr is a GPA when invlpg() invalidates an L2 GPA translation in shadowed TDP */
+static void FNAME(invlpg)(struct kvm_vcpu *vcpu, u64 addr, hpa_t root_hpa)
 {
        struct kvm_shadow_walk_iterator iterator;
        struct kvm_mmu_page *sp;
@@ -854,7 +855,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa)
        int level;
        u64 *sptep;
 
-       vcpu_clear_mmio_info(vcpu, gva);
+       vcpu_clear_mmio_info(vcpu, addr);
 
        /*
         * No need to check return value here, rmap_can_add() can
@@ -868,7 +869,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa)
        }
 
        write_lock(&vcpu->kvm->mmu_lock);
-       for_each_shadow_entry_using_root(vcpu, root_hpa, gva, iterator) {
+       for_each_shadow_entry_using_root(vcpu, root_hpa, addr, iterator) {
                level = iterator.level;
                sptep = iterator.sptep;
 
index 237c483b1230168e8bc4cb16e9bb1de7f392c9af..0b6b587d791409dca957d7e208594ee36a374d49 100644 (file)
@@ -802,8 +802,8 @@ void kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu,
         */
        if ((fault->error_code & PFERR_PRESENT_MASK) &&
            !(fault->error_code & PFERR_RSVD_MASK))
-               kvm_mmu_invalidate_gva(vcpu, fault_mmu, fault->address,
-                                      fault_mmu->root.hpa);
+               kvm_mmu_invalidate_addr(vcpu, fault_mmu, fault->address,
+                                       fault_mmu->root.hpa);
 
        fault_mmu->inject_page_fault(vcpu, fault);
 }