kvm: Add interruptible flag to __gfn_to_pfn_memslot()
authorPeter Xu <peterx@redhat.com>
Tue, 11 Oct 2022 19:58:08 +0000 (15:58 -0400)
committerPaolo Bonzini <pbonzini@redhat.com>
Wed, 9 Nov 2022 17:31:27 +0000 (12:31 -0500)
Add a new "interruptible" flag showing that the caller is willing to be
interrupted by signals during the __gfn_to_pfn_memslot() request.  Wire it
up with a FOLL_INTERRUPTIBLE flag that we've just introduced.

This prepares KVM to be able to respond to SIGUSR1 (for QEMU that's the
SIGIPI) even during e.g. handling an userfaultfd page fault.

No functional change intended.

Signed-off-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20221011195809.557016-4-peterx@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/arm64/kvm/mmu.c
arch/powerpc/kvm/book3s_64_mmu_hv.c
arch/powerpc/kvm/book3s_64_mmu_radix.c
arch/x86/kvm/mmu/mmu.c
include/linux/kvm_host.h
virt/kvm/kvm_main.c
virt/kvm/kvm_mm.h
virt/kvm/pfncache.c

index 60ee3d9f01f8c198b0dd90a35f32594e1cfff2d1..f154d4a7fae0b89a14b2f8cee3c7d72da175bda0 100644 (file)
@@ -1239,7 +1239,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
         */
        smp_rmb();
 
-       pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL,
+       pfn = __gfn_to_pfn_memslot(memslot, gfn, false, false, NULL,
                                   write_fault, &writable, NULL);
        if (pfn == KVM_PFN_ERR_HWPOISON) {
                kvm_send_hwpoison_signal(hva, vma_shift);
index e9744b41a226ca9a13b89f524d24f057dbfea9d0..4939f57b6f6a2256ede3bba3e9af0789eed06e9e 100644 (file)
@@ -598,7 +598,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu,
                write_ok = true;
        } else {
                /* Call KVM generic code to do the slow-path check */
-               pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL,
+               pfn = __gfn_to_pfn_memslot(memslot, gfn, false, false, NULL,
                                           writing, &write_ok, NULL);
                if (is_error_noslot_pfn(pfn))
                        return -EFAULT;
index 5d5e12f3bf864a89ad9985788a4bb26b78a87745..9d3743ca16d53e6baf9083bee97cd58101acd695 100644 (file)
@@ -846,7 +846,7 @@ int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
                unsigned long pfn;
 
                /* Call KVM generic code to do the slow-path check */
-               pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL,
+               pfn = __gfn_to_pfn_memslot(memslot, gfn, false, false, NULL,
                                           writing, upgrade_p, NULL);
                if (is_error_noslot_pfn(pfn))
                        return -EFAULT;
index f8c92a4a35faa754434d891512e871c53c49b201..0bbfb33fa7355c0dc7da2bdde0500d35bcfa0a0c 100644 (file)
@@ -4170,7 +4170,7 @@ static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
        }
 
        async = false;
-       fault->pfn = __gfn_to_pfn_memslot(slot, fault->gfn, false, &async,
+       fault->pfn = __gfn_to_pfn_memslot(slot, fault->gfn, false, false, &async,
                                          fault->write, &fault->map_writable,
                                          &fault->hva);
        if (!async)
@@ -4187,7 +4187,7 @@ static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
                }
        }
 
-       fault->pfn = __gfn_to_pfn_memslot(slot, fault->gfn, false, NULL,
+       fault->pfn = __gfn_to_pfn_memslot(slot, fault->gfn, false, false, NULL,
                                          fault->write, &fault->map_writable,
                                          &fault->hva);
        return RET_PF_CONTINUE;
index 911b064878dfa3a372234a0ff0d73b8295ccb646..8fe4665bd020bda21d7f44bddee4c890bdb64e6e 100644 (file)
@@ -1150,8 +1150,8 @@ kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
 kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn);
 kvm_pfn_t gfn_to_pfn_memslot_atomic(const struct kvm_memory_slot *slot, gfn_t gfn);
 kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn,
-                              bool atomic, bool *async, bool write_fault,
-                              bool *writable, hva_t *hva);
+                              bool atomic, bool interruptible, bool *async,
+                              bool write_fault, bool *writable, hva_t *hva);
 
 void kvm_release_pfn_clean(kvm_pfn_t pfn);
 void kvm_release_pfn_dirty(kvm_pfn_t pfn);
index 558f52dbebbde8f3abcbea9e864f1ec5010b4655..43bbe4fde078fbf6445d4c8e23e5cd6fd800afcd 100644 (file)
@@ -2514,7 +2514,7 @@ static bool hva_to_pfn_fast(unsigned long addr, bool write_fault,
  * 1 indicates success, -errno is returned if error is detected.
  */
 static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
-                          bool *writable, kvm_pfn_t *pfn)
+                          bool interruptible, bool *writable, kvm_pfn_t *pfn)
 {
        unsigned int flags = FOLL_HWPOISON;
        struct page *page;
@@ -2529,6 +2529,8 @@ static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
                flags |= FOLL_WRITE;
        if (async)
                flags |= FOLL_NOWAIT;
+       if (interruptible)
+               flags |= FOLL_INTERRUPTIBLE;
 
        npages = get_user_pages_unlocked(addr, 1, &page, flags);
        if (npages != 1)
@@ -2638,6 +2640,7 @@ out:
  * Pin guest page in memory and return its pfn.
  * @addr: host virtual address which maps memory to the guest
  * @atomic: whether this function can sleep
+ * @interruptible: whether the process can be interrupted by non-fatal signals
  * @async: whether this function need to wait IO complete if the
  *         host page is not in the memory
  * @write_fault: whether we should get a writable host page
@@ -2648,8 +2651,8 @@ out:
  * 2): @write_fault = false && @writable, @writable will tell the caller
  *     whether the mapping is writable.
  */
-kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
-                    bool write_fault, bool *writable)
+kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool interruptible,
+                    bool *async, bool write_fault, bool *writable)
 {
        struct vm_area_struct *vma;
        kvm_pfn_t pfn;
@@ -2664,7 +2667,8 @@ kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
        if (atomic)
                return KVM_PFN_ERR_FAULT;
 
-       npages = hva_to_pfn_slow(addr, async, write_fault, writable, &pfn);
+       npages = hva_to_pfn_slow(addr, async, write_fault, interruptible,
+                                writable, &pfn);
        if (npages == 1)
                return pfn;
        if (npages == -EINTR)
@@ -2699,8 +2703,8 @@ exit:
 }
 
 kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn,
-                              bool atomic, bool *async, bool write_fault,
-                              bool *writable, hva_t *hva)
+                              bool atomic, bool interruptible, bool *async,
+                              bool write_fault, bool *writable, hva_t *hva)
 {
        unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault);
 
@@ -2725,7 +2729,7 @@ kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn,
                writable = NULL;
        }
 
-       return hva_to_pfn(addr, atomic, async, write_fault,
+       return hva_to_pfn(addr, atomic, interruptible, async, write_fault,
                          writable);
 }
 EXPORT_SYMBOL_GPL(__gfn_to_pfn_memslot);
@@ -2733,20 +2737,22 @@ EXPORT_SYMBOL_GPL(__gfn_to_pfn_memslot);
 kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
                      bool *writable)
 {
-       return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, NULL,
-                                   write_fault, writable, NULL);
+       return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, false,
+                                   NULL, write_fault, writable, NULL);
 }
 EXPORT_SYMBOL_GPL(gfn_to_pfn_prot);
 
 kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn)
 {
-       return __gfn_to_pfn_memslot(slot, gfn, false, NULL, true, NULL, NULL);
+       return __gfn_to_pfn_memslot(slot, gfn, false, false, NULL, true,
+                                   NULL, NULL);
 }
 EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot);
 
 kvm_pfn_t gfn_to_pfn_memslot_atomic(const struct kvm_memory_slot *slot, gfn_t gfn)
 {
-       return __gfn_to_pfn_memslot(slot, gfn, true, NULL, true, NULL, NULL);
+       return __gfn_to_pfn_memslot(slot, gfn, true, false, NULL, true,
+                                   NULL, NULL);
 }
 EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic);
 
index 41da467d99c95ef31412ac82d00111b88bb4f40a..a1ab15006af34e81956590e2f3ddb6c7891c5711 100644 (file)
@@ -24,8 +24,8 @@
 #define KVM_MMU_READ_UNLOCK(kvm)       spin_unlock(&(kvm)->mmu_lock)
 #endif /* KVM_HAVE_MMU_RWLOCK */
 
-kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
-                    bool write_fault, bool *writable);
+kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool interruptible,
+                    bool *async, bool write_fault, bool *writable);
 
 #ifdef CONFIG_HAVE_KVM_PFNCACHE
 void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm,
index 346e47f15572484b91d7b72be62a65cef1481936..bd4a46aee384bba3eb90ff5d88c9e962621ca29e 100644 (file)
@@ -185,7 +185,7 @@ static kvm_pfn_t hva_to_pfn_retry(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
                }
 
                /* We always request a writeable mapping */
-               new_pfn = hva_to_pfn(gpc->uhva, false, NULL, true, NULL);
+               new_pfn = hva_to_pfn(gpc->uhva, false, false, NULL, true, NULL);
                if (is_error_noslot_pfn(new_pfn))
                        goto out_error;