{
        struct kvm_rmap_head *rmap_head;
 
-       if (kvm->arch.tdp_mmu_enabled)
+       if (is_tdp_mmu_enabled(kvm))
                kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
                                slot->base_gfn + gfn_offset, mask, true);
        while (mask) {
 {
        struct kvm_rmap_head *rmap_head;
 
-       if (kvm->arch.tdp_mmu_enabled)
+       if (is_tdp_mmu_enabled(kvm))
                kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
                                slot->base_gfn + gfn_offset, mask, false);
        while (mask) {
                write_protected |= __rmap_write_protect(kvm, rmap_head, true);
        }
 
-       if (kvm->arch.tdp_mmu_enabled)
+       if (is_tdp_mmu_enabled(kvm))
                write_protected |=
                        kvm_tdp_mmu_write_protect_gfn(kvm, slot, gfn);
 
 
        r = kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp);
 
-       if (kvm->arch.tdp_mmu_enabled)
+       if (is_tdp_mmu_enabled(kvm))
                r |= kvm_tdp_mmu_zap_hva_range(kvm, start, end);
 
        return r;
 
        r = kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp);
 
-       if (kvm->arch.tdp_mmu_enabled)
+       if (is_tdp_mmu_enabled(kvm))
                r |= kvm_tdp_mmu_set_spte_hva(kvm, hva, &pte);
 
        return r;
        int young = false;
 
        young = kvm_handle_hva_range(kvm, start, end, 0, kvm_age_rmapp);
-       if (kvm->arch.tdp_mmu_enabled)
+       if (is_tdp_mmu_enabled(kvm))
                young |= kvm_tdp_mmu_age_hva_range(kvm, start, end);
 
        return young;
        int young = false;
 
        young = kvm_handle_hva(kvm, hva, 0, kvm_test_age_rmapp);
-       if (kvm->arch.tdp_mmu_enabled)
+       if (is_tdp_mmu_enabled(kvm))
                young |= kvm_tdp_mmu_test_age_hva(kvm, hva);
 
        return young;
        sp = to_shadow_page(*root_hpa & PT64_BASE_ADDR_MASK);
 
        if (kvm_mmu_put_root(kvm, sp)) {
-               if (sp->tdp_mmu_page)
+               if (is_tdp_mmu_page(sp))
                        kvm_tdp_mmu_free_root(kvm, sp);
                else if (sp->role.invalid)
                        kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
        hpa_t root;
        unsigned i;
 
-       if (vcpu->kvm->arch.tdp_mmu_enabled) {
+       if (is_tdp_mmu_enabled(vcpu->kvm)) {
                root = kvm_tdp_mmu_get_vcpu_root_hpa(vcpu);
 
                if (!VALID_PAGE(root))
 
        kvm_zap_obsolete_pages(kvm);
 
-       if (kvm->arch.tdp_mmu_enabled)
+       if (is_tdp_mmu_enabled(kvm))
                kvm_tdp_mmu_zap_all(kvm);
 
        write_unlock(&kvm->mmu_lock);
                }
        }
 
-       if (kvm->arch.tdp_mmu_enabled) {
+       if (is_tdp_mmu_enabled(kvm)) {
                flush = kvm_tdp_mmu_zap_gfn_range(kvm, gfn_start, gfn_end);
                if (flush)
                        kvm_flush_remote_tlbs(kvm);
        write_lock(&kvm->mmu_lock);
        flush = slot_handle_level(kvm, memslot, slot_rmap_write_protect,
                                start_level, KVM_MAX_HUGEPAGE_LEVEL, false);
-       if (kvm->arch.tdp_mmu_enabled)
+       if (is_tdp_mmu_enabled(kvm))
                flush |= kvm_tdp_mmu_wrprot_slot(kvm, memslot, PG_LEVEL_4K);
        write_unlock(&kvm->mmu_lock);
 
        slot_handle_leaf(kvm, (struct kvm_memory_slot *)memslot,
                         kvm_mmu_zap_collapsible_spte, true);
 
-       if (kvm->arch.tdp_mmu_enabled)
+       if (is_tdp_mmu_enabled(kvm))
                kvm_tdp_mmu_zap_collapsible_sptes(kvm, memslot);
        write_unlock(&kvm->mmu_lock);
 }
 
        write_lock(&kvm->mmu_lock);
        flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty, false);
-       if (kvm->arch.tdp_mmu_enabled)
+       if (is_tdp_mmu_enabled(kvm))
                flush |= kvm_tdp_mmu_clear_dirty_slot(kvm, memslot);
        write_unlock(&kvm->mmu_lock);
 
        write_lock(&kvm->mmu_lock);
        flush = slot_handle_large_level(kvm, memslot, slot_rmap_write_protect,
                                        false);
-       if (kvm->arch.tdp_mmu_enabled)
+       if (is_tdp_mmu_enabled(kvm))
                flush |= kvm_tdp_mmu_wrprot_slot(kvm, memslot, PG_LEVEL_2M);
        write_unlock(&kvm->mmu_lock);
 
 
        write_lock(&kvm->mmu_lock);
        flush = slot_handle_all_level(kvm, memslot, __rmap_set_dirty, false);
-       if (kvm->arch.tdp_mmu_enabled)
+       if (is_tdp_mmu_enabled(kvm))
                flush |= kvm_tdp_mmu_slot_set_dirty(kvm, memslot);
        write_unlock(&kvm->mmu_lock);
 
 
        kvm_mmu_commit_zap_page(kvm, &invalid_list);
 
-       if (kvm->arch.tdp_mmu_enabled)
+       if (is_tdp_mmu_enabled(kvm))
                kvm_tdp_mmu_zap_all(kvm);
 
        write_unlock(&kvm->mmu_lock);
                                      struct kvm_mmu_page,
                                      lpage_disallowed_link);
                WARN_ON_ONCE(!sp->lpage_disallowed);
-               if (sp->tdp_mmu_page) {
+               if (is_tdp_mmu_page(sp)) {
                        kvm_tdp_mmu_zap_gfn_range(kvm, sp->gfn,
                                sp->gfn + KVM_PAGES_PER_HPAGE(sp->role.level));
                } else {
 
 #include <asm/cmpxchg.h>
 #include <trace/events/kvm.h>
 
-#ifdef CONFIG_X86_64
 static bool __read_mostly tdp_mmu_enabled = false;
 module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0644);
-#endif
-
-static bool is_tdp_mmu_enabled(void)
-{
-#ifdef CONFIG_X86_64
-       return tdp_enabled && READ_ONCE(tdp_mmu_enabled);
-#else
-       return false;
-#endif /* CONFIG_X86_64 */
-}
 
 /* Initializes the TDP MMU for the VM, if enabled. */
 void kvm_mmu_init_tdp_mmu(struct kvm *kvm)
 {
-       if (!is_tdp_mmu_enabled())
+       if (!tdp_enabled || !READ_ONCE(tdp_mmu_enabled))
                return;
 
        /* This should not be changed for the lifetime of the VM. */
 #define for_each_tdp_mmu_root(_kvm, _root)                             \
        list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link)
 
-bool is_tdp_mmu_root(struct kvm *kvm, hpa_t hpa)
-{
-       struct kvm_mmu_page *sp;
-
-       if (!kvm->arch.tdp_mmu_enabled)
-               return false;
-       if (WARN_ON(!VALID_PAGE(hpa)))
-               return false;
-
-       sp = to_shadow_page(hpa);
-       if (WARN_ON(!sp))
-               return false;
-
-       return sp->tdp_mmu_page && sp->root_count;
-}
-
 static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
                          gfn_t start, gfn_t end, bool can_yield);
 
 
 
 #include <linux/kvm_host.h>
 
-void kvm_mmu_init_tdp_mmu(struct kvm *kvm);
-void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm);
-
-bool is_tdp_mmu_root(struct kvm *kvm, hpa_t root);
 hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu);
 void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root);
 
 int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
                         int *root_level);
 
+#ifdef CONFIG_X86_64
+void kvm_mmu_init_tdp_mmu(struct kvm *kvm);
+void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm);
+static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return kvm->arch.tdp_mmu_enabled; }
+static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return sp->tdp_mmu_page; }
+#else
+static inline void kvm_mmu_init_tdp_mmu(struct kvm *kvm) {}
+static inline void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) {}
+static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return false; }
+static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return false; }
+#endif
+
+static inline bool is_tdp_mmu_root(struct kvm *kvm, hpa_t hpa)
+{
+       struct kvm_mmu_page *sp;
+
+       if (!is_tdp_mmu_enabled(kvm))
+               return false;
+       if (WARN_ON(!VALID_PAGE(hpa)))
+               return false;
+
+       sp = to_shadow_page(hpa);
+       if (WARN_ON(!sp))
+               return false;
+
+       return is_tdp_mmu_page(sp) && sp->root_count;
+}
+
 #endif /* __KVM_X86_MMU_TDP_MMU_H */