static void kvm_mmu_commit_zap_page(struct kvm *kvm,
                                    struct list_head *invalid_list);
 
+static bool sp_has_gptes(struct kvm_mmu_page *sp)
+{
+       if (sp->role.direct)
+               return false;
+
+       return true;
+}
+
 #define for_each_valid_sp(_kvm, _sp, _list)                            \
        hlist_for_each_entry(_sp, _list, hash_link)                     \
                if (is_obsolete_sp((_kvm), (_sp))) {                    \
                } else
 
-#define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn)                        \
+#define for_each_gfn_valid_sp_with_gptes(_kvm, _sp, _gfn)              \
        for_each_valid_sp(_kvm, _sp,                                    \
          &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)])     \
-               if ((_sp)->gfn != (_gfn) || (_sp)->role.direct) {} else
+               if ((_sp)->gfn != (_gfn) || !sp_has_gptes(_sp)) {} else
 
 static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
                         struct list_head *invalid_list)
        sp->gfn = gfn;
        sp->role = role;
        hlist_add_head(&sp->hash_link, sp_list);
-       if (!direct) {
+       if (sp_has_gptes(sp)) {
                account_shadowed(vcpu->kvm, sp);
                if (level == PG_LEVEL_4K && kvm_vcpu_write_protect_gfn(vcpu, gfn))
                        kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn, 1);
        /* Zapping children means active_mmu_pages has become unstable. */
        list_unstable = *nr_zapped;
 
-       if (!sp->role.invalid && !sp->role.direct)
+       if (!sp->role.invalid && sp_has_gptes(sp))
                unaccount_shadowed(kvm, sp);
 
        if (sp->unsync)
        pgprintk("%s: looking for gfn %llx\n", __func__, gfn);
        r = 0;
        write_lock(&kvm->mmu_lock);
-       for_each_gfn_indirect_valid_sp(kvm, sp, gfn) {
+       for_each_gfn_valid_sp_with_gptes(kvm, sp, gfn) {
                pgprintk("%s: gfn %llx role %x\n", __func__, gfn,
                         sp->role.word);
                r = 1;
         * that case, KVM must complete emulation of the guest TLB flush before
         * allowing shadow pages to become unsync (writable by the guest).
         */
-       for_each_gfn_indirect_valid_sp(kvm, sp, gfn) {
+       for_each_gfn_valid_sp_with_gptes(kvm, sp, gfn) {
                if (!can_unsync)
                        return -EPERM;
 
 
        ++vcpu->kvm->stat.mmu_pte_write;
 
-       for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
+       for_each_gfn_valid_sp_with_gptes(vcpu->kvm, sp, gfn) {
                if (detect_write_misaligned(sp, gpa, bytes) ||
                      detect_write_flooding(sp)) {
                        kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);