KVM: x86/mmu: Allow zapping collapsible SPTEs to use MMU read lock
authorBen Gardon <bgardon@google.com>
Thu, 1 Apr 2021 23:37:33 +0000 (16:37 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Mon, 19 Apr 2021 13:06:04 +0000 (09:06 -0400)
To reduce the impact of disabling dirty logging, change the TDP MMU
function which zaps collapsible SPTEs to run under the MMU read lock.
This way, page faults on zapped SPTEs can proceed in parallel with
kvm_mmu_zap_collapsible_sptes.

Signed-off-by: Ben Gardon <bgardon@google.com>
Message-Id: <20210401233736.638171-11-bgardon@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/tdp_mmu.c

index d29aded6e4bc2b8f3672b73cb8312fed0787d87b..2390e8a8fff139f548e52524f84d969ba17fa183 100644 (file)
@@ -5601,13 +5601,19 @@ void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
        write_lock(&kvm->mmu_lock);
        flush = slot_handle_leaf(kvm, slot, kvm_mmu_zap_collapsible_spte, true);
 
-       if (is_tdp_mmu_enabled(kvm))
-               flush = kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot, flush);
-
        if (flush)
                kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
-
        write_unlock(&kvm->mmu_lock);
+
+       if (is_tdp_mmu_enabled(kvm)) {
+               flush = false;
+
+               read_lock(&kvm->mmu_lock);
+               flush = kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot, flush);
+               if (flush)
+                       kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
+               read_unlock(&kvm->mmu_lock);
+       }
 }
 
 void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
index 2fb81033aba0000ba0476bff82af11f5656379dc..6a3b7ba4aa3c1f062f6185419f82df0b91e794fd 100644 (file)
@@ -1257,7 +1257,8 @@ static bool zap_collapsible_spte_range(struct kvm *kvm,
        rcu_read_lock();
 
        tdp_root_for_each_pte(iter, root, start, end) {
-               if (tdp_mmu_iter_cond_resched(kvm, &iter, flush, false)) {
+retry:
+               if (tdp_mmu_iter_cond_resched(kvm, &iter, flush, true)) {
                        flush = false;
                        continue;
                }
@@ -1272,8 +1273,14 @@ static bool zap_collapsible_spte_range(struct kvm *kvm,
                                                            pfn, PG_LEVEL_NUM))
                        continue;
 
-               tdp_mmu_set_spte(kvm, &iter, 0);
-
+               if (!tdp_mmu_zap_spte_atomic(kvm, &iter)) {
+                       /*
+                        * The iter must explicitly re-read the SPTE because
+                        * the atomic cmpxchg failed.
+                        */
+                       iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
+                       goto retry;
+               }
                flush = true;
        }
 
@@ -1292,7 +1299,9 @@ bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
 {
        struct kvm_mmu_page *root;
 
-       for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, false)
+       lockdep_assert_held_read(&kvm->mmu_lock);
+
+       for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
                flush = zap_collapsible_spte_range(kvm, root, slot, flush);
 
        return flush;