KVM: x86/mmu: Fix wrong start gfn of tlb flushing with range
authorHou Wenlong <houwenlong.hwl@antgroup.com>
Mon, 10 Oct 2022 12:19:15 +0000 (20:19 +0800)
committerSean Christopherson <seanjc@google.com>
Tue, 24 Jan 2023 18:05:47 +0000 (10:05 -0800)
When a spte is dropped, the start gfn of tlb flushing should be the gfn
of spte not the base gfn of SP which contains the spte. Also introduce a
helper function to do range-based flushing when a spte is dropped, which
would help prevent future buggy use of
kvm_flush_remote_tlbs_with_address() in such case.

Fixes: c3134ce240eed ("KVM: Replace old tlb flush function with new one to flush a specified range.")
Suggested-by: David Matlack <dmatlack@google.com>
Signed-off-by: Hou Wenlong <houwenlong.hwl@antgroup.com>
Link: https://lore.kernel.org/r/72ac2169a261976f00c1703e88cda676dfb960f5.1665214747.git.houwenlong.hwl@antgroup.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/paging_tmpl.h

index 52be98e0102ac0d5ebdcee03e0473901d2e6c100..5e34f07e46ba88f240f7bfb72644ef5954590529 100644 (file)
@@ -269,6 +269,17 @@ void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
        kvm_flush_remote_tlbs_with_range(kvm, &range);
 }
 
+static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index);
+
+/* Flush the range of guest memory mapped by the given SPTE. */
+static void kvm_flush_remote_tlbs_sptep(struct kvm *kvm, u64 *sptep)
+{
+       struct kvm_mmu_page *sp = sptep_to_sp(sptep);
+       gfn_t gfn = kvm_mmu_page_get_gfn(sp, spte_index(sptep));
+
+       kvm_flush_remote_tlbs_gfn(kvm, gfn, sp->role.level);
+}
+
 static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
                           unsigned int access)
 {
@@ -1187,8 +1198,7 @@ static void drop_large_spte(struct kvm *kvm, u64 *sptep, bool flush)
        drop_spte(kvm, sptep);
 
        if (flush)
-               kvm_flush_remote_tlbs_with_address(kvm, sp->gfn,
-                       KVM_PAGES_PER_HPAGE(sp->role.level));
+               kvm_flush_remote_tlbs_sptep(kvm, sptep);
 }
 
 /*
@@ -1639,8 +1649,7 @@ static void __rmap_add(struct kvm *kvm,
                kvm->stat.max_mmu_rmap_size = rmap_count;
        if (rmap_count > RMAP_RECYCLE_THRESHOLD) {
                kvm_zap_all_rmap_sptes(kvm, rmap_head);
-               kvm_flush_remote_tlbs_with_address(
-                               kvm, sp->gfn, KVM_PAGES_PER_HPAGE(sp->role.level));
+               kvm_flush_remote_tlbs_gfn(kvm, gfn, sp->role.level);
        }
 }
 
@@ -6521,8 +6530,7 @@ restart:
                        kvm_zap_one_rmap_spte(kvm, rmap_head, sptep);
 
                        if (kvm_available_flush_tlb_with_range())
-                               kvm_flush_remote_tlbs_with_address(kvm, sp->gfn,
-                                       KVM_PAGES_PER_HPAGE(sp->role.level));
+                               kvm_flush_remote_tlbs_sptep(kvm, sptep);
                        else
                                need_tlb_flush = 1;
 
index faf193e970614b84767621f73f1e4c396dfb97d0..57f0b75c80f9d52a8d0401e8ebadb39f2d6d1283 100644 (file)
@@ -927,8 +927,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa)
 
                        mmu_page_zap_pte(vcpu->kvm, sp, sptep, NULL);
                        if (is_shadow_present_pte(old_spte))
-                               kvm_flush_remote_tlbs_with_address(vcpu->kvm,
-                                       sp->gfn, KVM_PAGES_PER_HPAGE(sp->role.level));
+                               kvm_flush_remote_tlbs_sptep(vcpu->kvm, sptep);
 
                        if (!rmap_can_add(vcpu))
                                break;