struct kvm_mmu_page *sp);
        void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva);
        void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
-                       u64 *spte, const void *pte, unsigned long mmu_seq);
+                          u64 *spte, const void *pte);
        hpa_t root_hpa;
        int root_level;
        int shadow_root_level;
 
 
 static void nonpaging_update_pte(struct kvm_vcpu *vcpu,
                                 struct kvm_mmu_page *sp, u64 *spte,
-                                const void *pte, unsigned long mmu_seq)
+                                const void *pte)
 {
        WARN_ON(1);
 }
 }
 
 static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
-                                 struct kvm_mmu_page *sp,
-                                 u64 *spte,
-                                 const void *new, unsigned long mmu_seq)
+                                 struct kvm_mmu_page *sp, u64 *spte,
+                                 const void *new)
 {
        if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
                ++vcpu->kvm->stat.mmu_pde_zapped;
         }
 
        ++vcpu->kvm->stat.mmu_pte_updated;
-       vcpu->arch.mmu.update_pte(vcpu, sp, spte, new, mmu_seq);
+       vcpu->arch.mmu.update_pte(vcpu, sp, spte, new);
 }
 
 static bool need_remote_flush(u64 old, u64 new)
        struct kvm_mmu_page *sp;
        struct hlist_node *node;
        LIST_HEAD(invalid_list);
-       unsigned long mmu_seq;
        u64 entry, gentry, *spte;
        unsigned pte_size, page_offset, misaligned, quadrant, offset;
        int level, npte, invlpg_counter, r, flooded = 0;
                break;
        }
 
-       mmu_seq = vcpu->kvm->mmu_notifier_seq;
-       smp_rmb();
-
        spin_lock(&vcpu->kvm->mmu_lock);
        if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
                gentry = 0;
                        if (gentry &&
                              !((sp->role.word ^ vcpu->arch.mmu.base_role.word)
                              & mask.word))
-                               mmu_pte_write_new_pte(vcpu, sp, spte, &gentry,
-                                                     mmu_seq);
+                               mmu_pte_write_new_pte(vcpu, sp, spte, &gentry);
                        if (!remote_flush && need_remote_flush(entry, *spte))
                                remote_flush = true;
                        ++spte;
 
 }
 
 static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
-                             u64 *spte, const void *pte, unsigned long mmu_seq)
+                             u64 *spte, const void *pte)
 {
        pt_element_t gpte;
        unsigned pte_access;
                kvm_release_pfn_clean(pfn);
                return;
        }
-       if (mmu_notifier_retry(vcpu, mmu_seq))
-               return;
 
        /*
         * we call mmu_set_spte() with host_writable = true because that