return (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
 }
 
-static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
-                                   gfn_t gfn)
+static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu,
+                                   struct kvm_page_fault *fault)
 {
        struct kvm_arch_async_pf arch;
 
        arch.token = alloc_apf_token(vcpu);
-       arch.gfn = gfn;
+       arch.gfn = fault->gfn;
+       arch.error_code = fault->error_code;
        arch.direct_map = vcpu->arch.mmu->root_role.direct;
        arch.cr3 = kvm_mmu_get_guest_pgd(vcpu, vcpu->arch.mmu);
 
-       return kvm_setup_async_pf(vcpu, cr2_or_gpa,
-                                 kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
+       return kvm_setup_async_pf(vcpu, fault->addr,
+                                 kvm_vcpu_gfn_to_hva(vcpu, fault->gfn), &arch);
 }
 
 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
 {
        int r;
 
+       if (WARN_ON_ONCE(work->arch.error_code & PFERR_PRIVATE_ACCESS))
+               return;
+
        if ((vcpu->arch.mmu->root_role.direct != work->arch.direct_map) ||
              work->wakeup_all)
                return;
              work->arch.cr3 != kvm_mmu_get_guest_pgd(vcpu, vcpu->arch.mmu))
                return;
 
-       kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, 0, true, NULL);
+       kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, work->arch.error_code, true, NULL);
 }
 
 static inline u8 kvm_max_level_for_order(int order)
                        trace_kvm_async_pf_repeated_fault(fault->addr, fault->gfn);
                        kvm_make_request(KVM_REQ_APF_HALT, vcpu);
                        return RET_PF_RETRY;
-               } else if (kvm_arch_setup_async_pf(vcpu, fault->addr, fault->gfn)) {
+               } else if (kvm_arch_setup_async_pf(vcpu, fault)) {
                        return RET_PF_RETRY;
                }
        }