KVM: arm64: Move setting the page as dirty out of the critical section
authorFuad Tabba <tabba@google.com>
Tue, 23 Apr 2024 15:05:23 +0000 (16:05 +0100)
committerMarc Zyngier <maz@kernel.org>
Wed, 1 May 2024 15:48:14 +0000 (16:48 +0100)
Move the unlock earlier in user_mem_abort() to shorten the
critical section. This also helps for future refactoring and
reuse of similar code.

This moves out marking the page as dirty outside of the critical
section. That code does not interact with the stage-2 page
tables, which the read lock in the critical section protects.

Signed-off-by: Fuad Tabba <tabba@google.com>
Acked-by: Oliver Upton <oliver.upton@linux.dev>
Link: https://lore.kernel.org/r/20240423150538.2103045-16-tabba@google.com
Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/arm64/kvm/mmu.c

index 18680771cdb0ea4c9ee2fcea29d1219189fda752..3afc42d8833eabb14493e0fa4fa11f443d402fd4 100644 (file)
@@ -1522,8 +1522,10 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 
        read_lock(&kvm->mmu_lock);
        pgt = vcpu->arch.hw_mmu->pgt;
-       if (mmu_invalidate_retry(kvm, mmu_seq))
+       if (mmu_invalidate_retry(kvm, mmu_seq)) {
+               ret = -EAGAIN;
                goto out_unlock;
+       }
 
        /*
         * If we are not forced to use page mapping, check if we are
@@ -1581,6 +1583,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
                                             memcache,
                                             KVM_PGTABLE_WALK_HANDLE_FAULT |
                                             KVM_PGTABLE_WALK_SHARED);
+out_unlock:
+       read_unlock(&kvm->mmu_lock);
 
        /* Mark the page dirty only if the fault is handled successfully */
        if (writable && !ret) {
@@ -1588,8 +1592,6 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
                mark_page_dirty_in_slot(kvm, memslot, gfn);
        }
 
-out_unlock:
-       read_unlock(&kvm->mmu_lock);
        kvm_release_pfn_clean(pfn);
        return ret != -EAGAIN ? ret : 0;
 }