TDX will use a different shadow PTE entry value for MMIO from VMX.  Add a
member to kvm_arch and track value for MMIO per-VM instead of a global
variable.  By using the per-VM EPT entry value for MMIO, the existing VMX
logic is kept working.  Introduce a separate setter function so that guest
TD can use a different value later.
Signed-off-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com>
Message-Id: <
229a18434e5d83f45b1fcd7bf1544d79db1becb6.
1705965635.git.isaku.yamahata@intel.com>
Reviewed-by: Xiaoyao Li <xiaoyao.li@intel.com>
Reviewed-by: Binbin Wu <binbin.wu@linux.intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
         */
        spinlock_t mmu_unsync_pages_lock;
 
+       u64 shadow_mmio_value;
+
        struct iommu_domain *iommu_domain;
        bool iommu_noncoherent;
 #define __KVM_HAVE_ARCH_NONCOHERENT_DMA
 
                                return kvm_mmu_prepare_zap_page(kvm, child,
                                                                invalid_list);
                }
-       } else if (is_mmio_spte(pte)) {
+       } else if (is_mmio_spte(kvm, pte)) {
                mmu_spte_clear_no_track(spte);
        }
        return 0;
        if (WARN_ON_ONCE(reserved))
                return -EINVAL;
 
-       if (is_mmio_spte(spte)) {
+       if (is_mmio_spte(vcpu->kvm, spte)) {
                gfn_t gfn = get_mmio_spte_gfn(spte);
                unsigned int access = get_mmio_spte_access(spte);
 
 static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
                           unsigned int access)
 {
-       if (unlikely(is_mmio_spte(*sptep))) {
+       if (unlikely(is_mmio_spte(vcpu->kvm, *sptep))) {
                if (gfn != get_mmio_spte_gfn(*sptep)) {
                        mmu_spte_clear_no_track(sptep);
                        return true;
 
 void kvm_mmu_init_vm(struct kvm *kvm)
 {
+       kvm->arch.shadow_mmio_value = shadow_mmio_value;
        INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
        INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
        INIT_LIST_HEAD(&kvm->arch.possible_nx_huge_pages);
 
        u64 spte = generation_mmio_spte_mask(gen);
        u64 gpa = gfn << PAGE_SHIFT;
 
-       WARN_ON_ONCE(!shadow_mmio_value);
+       WARN_ON_ONCE(!vcpu->kvm->arch.shadow_mmio_value);
 
        access &= shadow_mmio_access_mask;
-       spte |= shadow_mmio_value | access;
+       spte |= vcpu->kvm->arch.shadow_mmio_value | access;
        spte |= gpa | shadow_nonpresent_or_rsvd_mask;
        spte |= (gpa & shadow_nonpresent_or_rsvd_mask)
                << SHADOW_NONPRESENT_OR_RSVD_MASK_LEN;
 
        return spte_to_child_sp(root);
 }
 
-static inline bool is_mmio_spte(u64 spte)
+static inline bool is_mmio_spte(struct kvm *kvm, u64 spte)
 {
-       return (spte & shadow_mmio_mask) == shadow_mmio_value &&
+       return (spte & shadow_mmio_mask) == kvm->arch.shadow_mmio_value &&
               likely(enable_mmio_caching);
 }
 
 
                 * impact the guest since both the former and current SPTEs
                 * are nonpresent.
                 */
-               if (WARN_ON_ONCE(!is_mmio_spte(old_spte) &&
-                                !is_mmio_spte(new_spte) &&
+               if (WARN_ON_ONCE(!is_mmio_spte(kvm, old_spte) &&
+                                !is_mmio_spte(kvm, new_spte) &&
                                 !is_removed_spte(new_spte)))
                        pr_err("Unexpected SPTE change! Nonpresent SPTEs\n"
                               "should not be replaced with another,\n"
        }
 
        /* If a MMIO SPTE is installed, the MMIO will need to be emulated. */
-       if (unlikely(is_mmio_spte(new_spte))) {
+       if (unlikely(is_mmio_spte(vcpu->kvm, new_spte))) {
                vcpu->stat.pf_mmio_spte_created++;
                trace_mark_mmio_spte(rcu_dereference(iter->sptep), iter->gfn,
                                     new_spte);