KVM: arm64: Remove the creation time's mapping of MMIO regions
authorKeqian Zhu <zhukeqian1@huawei.com>
Fri, 7 May 2021 11:03:21 +0000 (19:03 +0800)
committerMarc Zyngier <maz@kernel.org>
Tue, 1 Jun 2021 11:01:40 +0000 (12:01 +0100)
The MMIO regions may be unmapped for many reasons and can be remapped
by stage2 fault path. Map MMIO regions at creation time becomes a
minor optimization and makes these two mapping path hard to sync.

Remove the mapping code while keep the useful sanity check.

Signed-off-by: Keqian Zhu <zhukeqian1@huawei.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20210507110322.23348-2-zhukeqian1@huawei.com
arch/arm64/kvm/mmu.c

index c10207fed2f36fe6a46970912e302e7528d4b196..e982178c8c72b25d32c3f8b84faf3cdd9a5240e9 100644 (file)
@@ -1346,7 +1346,6 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
 {
        hva_t hva = mem->userspace_addr;
        hva_t reg_end = hva + mem->memory_size;
-       bool writable = !(mem->flags & KVM_MEM_READONLY);
        int ret = 0;
 
        if (change != KVM_MR_CREATE && change != KVM_MR_MOVE &&
@@ -1363,8 +1362,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
        mmap_read_lock(current->mm);
        /*
         * A memory region could potentially cover multiple VMAs, and any holes
-        * between them, so iterate over all of them to find out if we can map
-        * any of them right now.
+        * between them, so iterate over all of them.
         *
         *     +--------------------------------------------+
         * +---------------+----------------+   +----------------+
@@ -1375,51 +1373,21 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
         */
        do {
                struct vm_area_struct *vma;
-               hva_t vm_start, vm_end;
 
                vma = find_vma_intersection(current->mm, hva, reg_end);
                if (!vma)
                        break;
 
-               /*
-                * Take the intersection of this VMA with the memory region
-                */
-               vm_start = max(hva, vma->vm_start);
-               vm_end = min(reg_end, vma->vm_end);
-
                if (vma->vm_flags & VM_PFNMAP) {
-                       gpa_t gpa = mem->guest_phys_addr +
-                                   (vm_start - mem->userspace_addr);
-                       phys_addr_t pa;
-
-                       pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
-                       pa += vm_start - vma->vm_start;
-
                        /* IO region dirty page logging not allowed */
                        if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) {
                                ret = -EINVAL;
-                               goto out;
-                       }
-
-                       ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
-                                                   vm_end - vm_start,
-                                                   writable);
-                       if (ret)
                                break;
+                       }
                }
-               hva = vm_end;
+               hva = min(reg_end, vma->vm_end);
        } while (hva < reg_end);
 
-       if (change == KVM_MR_FLAGS_ONLY)
-               goto out;
-
-       spin_lock(&kvm->mmu_lock);
-       if (ret)
-               unmap_stage2_range(&kvm->arch.mmu, mem->guest_phys_addr, mem->memory_size);
-       else if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
-               stage2_flush_memslot(kvm, memslot);
-       spin_unlock(&kvm->mmu_lock);
-out:
        mmap_read_unlock(current->mm);
        return ret;
 }