KVM: x86/mmu: Move transparent_hugepage_adjust() above __direct_map()
authorSean Christopherson <sean.j.christopherson@intel.com>
Fri, 6 Dec 2019 23:57:25 +0000 (15:57 -0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Wed, 8 Jan 2020 17:16:07 +0000 (18:16 +0100)
Move thp_adjust() above __direct_map() in preparation of calling
thp_adjust() from  __direct_map() and FNAME(fetch).

No functional change intended.

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/mmu/mmu.c

index 313f77a342624e661353f5401a5b057d46577233..f8da3965c3e98dc7cb3b951d76e49fcbd4149edf 100644 (file)
@@ -3328,6 +3328,44 @@ static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
        __direct_pte_prefetch(vcpu, sp, sptep);
 }
 
+static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
+                                       gfn_t gfn, kvm_pfn_t *pfnp,
+                                       int *levelp)
+{
+       kvm_pfn_t pfn = *pfnp;
+       int level = *levelp;
+
+       /*
+        * Check if it's a transparent hugepage. If this would be an
+        * hugetlbfs page, level wouldn't be set to
+        * PT_PAGE_TABLE_LEVEL and there would be no adjustment done
+        * here.
+        */
+       if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) &&
+           !kvm_is_zone_device_pfn(pfn) && level == PT_PAGE_TABLE_LEVEL &&
+           PageTransCompoundMap(pfn_to_page(pfn))) {
+               unsigned long mask;
+               /*
+                * mmu_notifier_retry was successful and we hold the
+                * mmu_lock here, so the pmd can't become splitting
+                * from under us, and in turn
+                * __split_huge_page_refcount() can't run from under
+                * us and we can safely transfer the refcount from
+                * PG_tail to PG_head as we switch the pfn to tail to
+                * head.
+                */
+               *levelp = level = PT_DIRECTORY_LEVEL;
+               mask = KVM_PAGES_PER_HPAGE(level) - 1;
+               VM_BUG_ON((gfn & mask) != (pfn & mask));
+               if (pfn & mask) {
+                       kvm_release_pfn_clean(pfn);
+                       pfn &= ~mask;
+                       kvm_get_pfn(pfn);
+                       *pfnp = pfn;
+               }
+       }
+}
+
 static void disallowed_hugepage_adjust(struct kvm_shadow_walk_iterator it,
                                       gfn_t gfn, kvm_pfn_t *pfnp, int *levelp)
 {
@@ -3418,44 +3456,6 @@ static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
        return -EFAULT;
 }
 
-static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
-                                       gfn_t gfn, kvm_pfn_t *pfnp,
-                                       int *levelp)
-{
-       kvm_pfn_t pfn = *pfnp;
-       int level = *levelp;
-
-       /*
-        * Check if it's a transparent hugepage. If this would be an
-        * hugetlbfs page, level wouldn't be set to
-        * PT_PAGE_TABLE_LEVEL and there would be no adjustment done
-        * here.
-        */
-       if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) &&
-           !kvm_is_zone_device_pfn(pfn) && level == PT_PAGE_TABLE_LEVEL &&
-           PageTransCompoundMap(pfn_to_page(pfn))) {
-               unsigned long mask;
-               /*
-                * mmu_notifier_retry was successful and we hold the
-                * mmu_lock here, so the pmd can't become splitting
-                * from under us, and in turn
-                * __split_huge_page_refcount() can't run from under
-                * us and we can safely transfer the refcount from
-                * PG_tail to PG_head as we switch the pfn to tail to
-                * head.
-                */
-               *levelp = level = PT_DIRECTORY_LEVEL;
-               mask = KVM_PAGES_PER_HPAGE(level) - 1;
-               VM_BUG_ON((gfn & mask) != (pfn & mask));
-               if (pfn & mask) {
-                       kvm_release_pfn_clean(pfn);
-                       pfn &= ~mask;
-                       kvm_get_pfn(pfn);
-                       *pfnp = pfn;
-               }
-       }
-}
-
 static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
                                kvm_pfn_t pfn, unsigned access, int *ret_val)
 {