KVM: MIPS: rework flush_shadow_* callbacks into one that prepares the flush
authorPaolo Bonzini <pbonzini@redhat.com>
Wed, 31 Mar 2021 07:38:16 +0000 (09:38 +0200)
committerPaolo Bonzini <pbonzini@redhat.com>
Sat, 17 Apr 2021 12:31:05 +0000 (08:31 -0400)
Both trap-and-emulate and VZ have a single implementation that covers
both .flush_shadow_all and .flush_shadow_memslot, and both of them end
with a call to kvm_flush_remote_tlbs.

Unify the callbacks into one and extract the call to kvm_flush_remote_tlbs.
The next patches will pull it further out of the the architecture-specific
MMU notifier functions kvm_unmap_hva_range and kvm_set_spte_hva.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/mips/include/asm/kvm_host.h
arch/mips/kvm/mips.c
arch/mips/kvm/mmu.c
arch/mips/kvm/trap_emul.c
arch/mips/kvm/vz.c

index feaa77036b67f3db641502999f8e3f3e007a45d0..6c8c0ab53be28861859e86e0ae938a5ff609772c 100644 (file)
@@ -815,14 +815,7 @@ struct kvm_mips_callbacks {
        int (*vcpu_init)(struct kvm_vcpu *vcpu);
        void (*vcpu_uninit)(struct kvm_vcpu *vcpu);
        int (*vcpu_setup)(struct kvm_vcpu *vcpu);
-       void (*flush_shadow_all)(struct kvm *kvm);
-       /*
-        * Must take care of flushing any cached GPA PTEs (e.g. guest entries in
-        * VZ root TLB, or T&E GVA page tables and corresponding root TLB
-        * mappings).
-        */
-       void (*flush_shadow_memslot)(struct kvm *kvm,
-                                    const struct kvm_memory_slot *slot);
+       void (*prepare_flush_shadow)(struct kvm *kvm);
        gpa_t (*gva_to_gpa)(gva_t gva);
        void (*queue_timer_int)(struct kvm_vcpu *vcpu);
        void (*dequeue_timer_int)(struct kvm_vcpu *vcpu);
index 7db8234a44072d14a2d1a47959ce46ada06624f1..867b8de0fc07395c86b83ef6ff13da17bccb6efc 100644 (file)
@@ -206,7 +206,8 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm)
        kvm_mips_flush_gpa_pt(kvm, 0, ~0);
 
        /* Let implementation do the rest */
-       kvm_mips_callbacks->flush_shadow_all(kvm);
+       kvm_mips_callbacks->prepare_flush_shadow(kvm);
+       kvm_flush_remote_tlbs(kvm);
 }
 
 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
@@ -221,8 +222,7 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
        /* Flush slot from GPA */
        kvm_mips_flush_gpa_pt(kvm, slot->base_gfn,
                              slot->base_gfn + slot->npages - 1);
-       /* Let implementation do the rest */
-       kvm_mips_callbacks->flush_shadow_memslot(kvm, slot);
+       kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
        spin_unlock(&kvm->mmu_lock);
 }
 
@@ -262,9 +262,8 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
                /* Write protect GPA page table entries */
                needs_flush = kvm_mips_mkclean_gpa_pt(kvm, new->base_gfn,
                                        new->base_gfn + new->npages - 1);
-               /* Let implementation do the rest */
                if (needs_flush)
-                       kvm_mips_callbacks->flush_shadow_memslot(kvm, new);
+                       kvm_arch_flush_remote_tlbs_memslot(kvm, new);
                spin_unlock(&kvm->mmu_lock);
        }
 }
@@ -1000,7 +999,8 @@ void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
                                        const struct kvm_memory_slot *memslot)
 {
        /* Let implementation handle TLB/GVA invalidation */
-       kvm_mips_callbacks->flush_shadow_memslot(kvm, memslot);
+       kvm_mips_callbacks->prepare_flush_shadow(kvm);
+       kvm_flush_remote_tlbs(kvm);
 }
 
 long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
index 3dabeda82458678c8995d3ad84ad4c7270c0f346..7e055e5dfd3cf4e63eeeef407af5ef0ac352430e 100644 (file)
@@ -491,7 +491,8 @@ int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
 {
        handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
 
-       kvm_mips_callbacks->flush_shadow_all(kvm);
+       kvm_mips_callbacks->prepare_flush_shadow(kvm);
+       kvm_flush_remote_tlbs(kvm);
        return 0;
 }
 
@@ -532,8 +533,10 @@ int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
        int ret;
 
        ret = handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &pte);
-       if (ret)
-               kvm_mips_callbacks->flush_shadow_all(kvm);
+       if (ret) {
+               kvm_mips_callbacks->prepare_flush_shadow(kvm);
+               kvm_flush_remote_tlbs(kvm);
+       }
        return 0;
 }
 
index 0788c00d7e94b4c54d25fd03e5f8d290de8c2e1a..5f2df497599c2db86adc97580c648ab3eccfd410 100644 (file)
@@ -687,16 +687,8 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
        return 0;
 }
 
-static void kvm_trap_emul_flush_shadow_all(struct kvm *kvm)
+static void kvm_trap_emul_prepare_flush_shadow(struct kvm *kvm)
 {
-       /* Flush GVA page tables and invalidate GVA ASIDs on all VCPUs */
-       kvm_flush_remote_tlbs(kvm);
-}
-
-static void kvm_trap_emul_flush_shadow_memslot(struct kvm *kvm,
-                                       const struct kvm_memory_slot *slot)
-{
-       kvm_trap_emul_flush_shadow_all(kvm);
 }
 
 static u64 kvm_trap_emul_get_one_regs[] = {
@@ -1280,8 +1272,7 @@ static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
        .vcpu_init = kvm_trap_emul_vcpu_init,
        .vcpu_uninit = kvm_trap_emul_vcpu_uninit,
        .vcpu_setup = kvm_trap_emul_vcpu_setup,
-       .flush_shadow_all = kvm_trap_emul_flush_shadow_all,
-       .flush_shadow_memslot = kvm_trap_emul_flush_shadow_memslot,
+       .prepare_flush_shadow = kvm_trap_emul_prepare_flush_shadow,
        .gva_to_gpa = kvm_trap_emul_gva_to_gpa_cb,
        .queue_timer_int = kvm_mips_queue_timer_int_cb,
        .dequeue_timer_int = kvm_mips_dequeue_timer_int_cb,
index 2ffbe9264a316e48224bd02793bb5608caebd4f5..2c75571dc4a25cc6865b611972e16f5c10c25b3a 100644 (file)
@@ -3211,32 +3211,22 @@ static int kvm_vz_vcpu_setup(struct kvm_vcpu *vcpu)
        return 0;
 }
 
-static void kvm_vz_flush_shadow_all(struct kvm *kvm)
+static void kvm_vz_prepare_flush_shadow(struct kvm *kvm)
 {
-       if (cpu_has_guestid) {
-               /* Flush GuestID for each VCPU individually */
-               kvm_flush_remote_tlbs(kvm);
-       } else {
+       if (!cpu_has_guestid) {
                /*
                 * For each CPU there is a single GPA ASID used by all VCPUs in
                 * the VM, so it doesn't make sense for the VCPUs to handle
                 * invalidation of these ASIDs individually.
                 *
                 * Instead mark all CPUs as needing ASID invalidation in
-                * asid_flush_mask, and just use kvm_flush_remote_tlbs(kvm) to
+                * asid_flush_mask, and kvm_flush_remote_tlbs(kvm) will
                 * kick any running VCPUs so they check asid_flush_mask.
                 */
                cpumask_setall(&kvm->arch.asid_flush_mask);
-               kvm_flush_remote_tlbs(kvm);
        }
 }
 
-static void kvm_vz_flush_shadow_memslot(struct kvm *kvm,
-                                       const struct kvm_memory_slot *slot)
-{
-       kvm_vz_flush_shadow_all(kvm);
-}
-
 static void kvm_vz_vcpu_reenter(struct kvm_vcpu *vcpu)
 {
        int cpu = smp_processor_id();
@@ -3292,8 +3282,7 @@ static struct kvm_mips_callbacks kvm_vz_callbacks = {
        .vcpu_init = kvm_vz_vcpu_init,
        .vcpu_uninit = kvm_vz_vcpu_uninit,
        .vcpu_setup = kvm_vz_vcpu_setup,
-       .flush_shadow_all = kvm_vz_flush_shadow_all,
-       .flush_shadow_memslot = kvm_vz_flush_shadow_memslot,
+       .prepare_flush_shadow = kvm_vz_prepare_flush_shadow,
        .gva_to_gpa = kvm_vz_gva_to_gpa_cb,
        .queue_timer_int = kvm_vz_queue_timer_int_cb,
        .dequeue_timer_int = kvm_vz_dequeue_timer_int_cb,