The TDX support will need the "suppress #VE" bit (bit 63) set as the
initial value for SPTE.  To reduce code change size, introduce a new macro
SHADOW_NONPRESENT_VALUE for the initial value for the shadow page table
entry (SPTE) and replace hard-coded value 0 for it.  Initialize shadow page
tables with their value.
The plan is to unconditionally set the "suppress #VE" bit for both AMD and
Intel as: 1) AMD hardware uses the bit 63 as NX for present SPTE and
ignored for non-present SPTE; 2) for conventional VMX guests, KVM never
enables the "EPT-violation #VE" in VMCS control and "suppress #VE" bit is
ignored by hardware.
No functional change intended.
Signed-off-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com>
Message-Id: <
acdf09bf60cad12c495005bf3495c54f6b3069c9.
1705965635.git.isaku.yamahata@intel.com>
[Remove unnecessary CONFIG_X86_64 check. - Paolo]
Reviewed-by: Xiaoyao Li <xiaoyao.li@intel.com>
Reviewed-by: Binbin Wu <binbin.wu@linux.intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
 
        if (!is_shadow_present_pte(old_spte) ||
            !spte_has_volatile_bits(old_spte))
-               __update_clear_spte_fast(sptep, 0ull);
+               __update_clear_spte_fast(sptep, SHADOW_NONPRESENT_VALUE);
        else
-               old_spte = __update_clear_spte_slow(sptep, 0ull);
+               old_spte = __update_clear_spte_slow(sptep, SHADOW_NONPRESENT_VALUE);
 
        if (!is_shadow_present_pte(old_spte))
                return old_spte;
  */
 static void mmu_spte_clear_no_track(u64 *sptep)
 {
-       __update_clear_spte_fast(sptep, 0ull);
+       __update_clear_spte_fast(sptep, SHADOW_NONPRESENT_VALUE);
 }
 
 static u64 mmu_spte_get_lockless(u64 *sptep)
 
 static int kvm_sync_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, int i)
 {
-       if (!sp->spt[i])
+       /* sp->spt[i] has initial value of shadow page table allocation */
+       if (sp->spt[i] == SHADOW_NONPRESENT_VALUE)
                return 0;
 
        return vcpu->arch.mmu->sync_spte(vcpu, sp, i);
        vcpu->arch.mmu_page_header_cache.kmem_cache = mmu_page_header_cache;
        vcpu->arch.mmu_page_header_cache.gfp_zero = __GFP_ZERO;
 
-       vcpu->arch.mmu_shadow_page_cache.gfp_zero = __GFP_ZERO;
+       vcpu->arch.mmu_shadow_page_cache.init_value =
+               SHADOW_NONPRESENT_VALUE;
+       if (!vcpu->arch.mmu_shadow_page_cache.init_value)
+               vcpu->arch.mmu_shadow_page_cache.gfp_zero = __GFP_ZERO;
 
        vcpu->arch.mmu = &vcpu->arch.root_mmu;
        vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
 
        gpa_t pte_gpa;
        gfn_t gfn;
 
-       if (WARN_ON_ONCE(!sp->spt[i]))
+       if (WARN_ON_ONCE(sp->spt[i] == SHADOW_NONPRESENT_VALUE))
                return 0;
 
        first_pte_gpa = FNAME(get_level1_sp_gpa)(sp);
 
 
 #define MMIO_SPTE_GEN_MASK             GENMASK_ULL(MMIO_SPTE_GEN_LOW_BITS + MMIO_SPTE_GEN_HIGH_BITS - 1, 0)
 
+#define SHADOW_NONPRESENT_VALUE        0ULL
+
 extern u64 __read_mostly shadow_host_writable_mask;
 extern u64 __read_mostly shadow_mmu_writable_mask;
 extern u64 __read_mostly shadow_nx_mask;
  *
  * Only used by the TDP MMU.
  */
-#define REMOVED_SPTE   0x5a0ULL
+#define REMOVED_SPTE   (SHADOW_NONPRESENT_VALUE | 0x5a0ULL)
 
 /* Removed SPTEs must not be misconstrued as shadow present PTEs. */
 static_assert(!(REMOVED_SPTE & SPTE_MMU_PRESENT_MASK));
 
         * here since the SPTE is going from non-present to non-present.  Use
         * the raw write helper to avoid an unnecessary check on volatile bits.
         */
-       __kvm_tdp_mmu_write_spte(iter->sptep, 0);
+       __kvm_tdp_mmu_write_spte(iter->sptep, SHADOW_NONPRESENT_VALUE);
 
        return 0;
 }
                        continue;
 
                if (!shared)
-                       tdp_mmu_iter_set_spte(kvm, &iter, 0);
-               else if (tdp_mmu_set_spte_atomic(kvm, &iter, 0))
+                       tdp_mmu_iter_set_spte(kvm, &iter, SHADOW_NONPRESENT_VALUE);
+               else if (tdp_mmu_set_spte_atomic(kvm, &iter, SHADOW_NONPRESENT_VALUE))
                        goto retry;
        }
 }
        if (WARN_ON_ONCE(!is_shadow_present_pte(old_spte)))
                return false;
 
-       tdp_mmu_set_spte(kvm, kvm_mmu_page_as_id(sp), sp->ptep, old_spte, 0,
-                        sp->gfn, sp->role.level + 1);
+       tdp_mmu_set_spte(kvm, kvm_mmu_page_as_id(sp), sp->ptep, old_spte,
+                        SHADOW_NONPRESENT_VALUE, sp->gfn, sp->role.level + 1);
 
        return true;
 }
                    !is_last_spte(iter.old_spte, iter.level))
                        continue;
 
-               tdp_mmu_iter_set_spte(kvm, &iter, 0);
+               tdp_mmu_iter_set_spte(kvm, &iter, SHADOW_NONPRESENT_VALUE);
 
                /*
                 * Zappings SPTEs in invalid roots doesn't require a TLB flush,