From: Paolo Bonzini Date: Sun, 9 Aug 2020 17:24:02 +0000 (-0400) Subject: Merge tag 'kvm-ppc-next-5.9-1' of git://git.kernel.org/pub/scm/linux/kernel/git/paulu... X-Git-Url: http://git.maquefel.me/?a=commitdiff_plain;h=3ff0327899800a190782005483c96f4d7862ecd9;p=linux.git Merge tag 'kvm-ppc-next-5.9-1' of git://git./linux/kernel/git/paulus/powerpc into kvm-next-5.6 PPC KVM update for 5.9 - Improvements and bug-fixes for secure VM support, giving reduced startup time and memory hotplug support. - Locking fixes in nested KVM code - Increase number of guests supported by HV KVM to 4094 - Preliminary POWER10 support --- 3ff0327899800a190782005483c96f4d7862ecd9 diff --cc arch/powerpc/kvm/book3s_hv_uvmem.c index 6850bd04bcb94,0d49e3425a125..7705d55572395 --- a/arch/powerpc/kvm/book3s_hv_uvmem.c +++ b/arch/powerpc/kvm/book3s_hv_uvmem.c @@@ -253,14 -496,94 +496,95 @@@ unsigned long kvmppc_h_svm_init_start(s return ret; } - unsigned long kvmppc_h_svm_init_done(struct kvm *kvm) + /* + * Provision a new page on HV side and copy over the contents + * from secure memory using UV_PAGE_OUT uvcall. + * Caller must held kvm->arch.uvmem_lock. + */ + static int __kvmppc_svm_page_out(struct vm_area_struct *vma, + unsigned long start, + unsigned long end, unsigned long page_shift, + struct kvm *kvm, unsigned long gpa) { - if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)) - return H_UNSUPPORTED; + unsigned long src_pfn, dst_pfn = 0; + struct migrate_vma mig; + struct page *dpage, *spage; + struct kvmppc_uvmem_page_pvt *pvt; + unsigned long pfn; + int ret = U_SUCCESS; - kvm->arch.secure_guest |= KVMPPC_SECURE_INIT_DONE; - pr_info("LPID %d went secure\n", kvm->arch.lpid); - return H_SUCCESS; + memset(&mig, 0, sizeof(mig)); + mig.vma = vma; + mig.start = start; + mig.end = end; + mig.src = &src_pfn; + mig.dst = &dst_pfn; - mig.src_owner = &kvmppc_uvmem_pgmap; ++ mig.pgmap_owner = &kvmppc_uvmem_pgmap; ++ mig.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE; + + /* The requested page is already paged-out, nothing to do */ + if (!kvmppc_gfn_is_uvmem_pfn(gpa >> page_shift, kvm, NULL)) + return ret; + + ret = migrate_vma_setup(&mig); + if (ret) + return -1; + + spage = migrate_pfn_to_page(*mig.src); + if (!spage || !(*mig.src & MIGRATE_PFN_MIGRATE)) + goto out_finalize; + + if (!is_zone_device_page(spage)) + goto out_finalize; + + dpage = alloc_page_vma(GFP_HIGHUSER, vma, start); + if (!dpage) { + ret = -1; + goto out_finalize; + } + + lock_page(dpage); + pvt = spage->zone_device_data; + pfn = page_to_pfn(dpage); + + /* + * This function is used in two cases: + * - When HV touches a secure page, for which we do UV_PAGE_OUT + * - When a secure page is converted to shared page, we *get* + * the page to essentially unmap the device page. In this + * case we skip page-out. + */ + if (!pvt->skip_page_out) + ret = uv_page_out(kvm->arch.lpid, pfn << page_shift, + gpa, 0, page_shift); + + if (ret == U_SUCCESS) + *mig.dst = migrate_pfn(pfn) | MIGRATE_PFN_LOCKED; + else { + unlock_page(dpage); + __free_page(dpage); + goto out_finalize; + } + + migrate_vma_pages(&mig); + + out_finalize: + migrate_vma_finalize(&mig); + return ret; + } + + static inline int kvmppc_svm_page_out(struct vm_area_struct *vma, + unsigned long start, unsigned long end, + unsigned long page_shift, + struct kvm *kvm, unsigned long gpa) + { + int ret; + + mutex_lock(&kvm->arch.uvmem_lock); + ret = __kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa); + mutex_unlock(&kvm->arch.uvmem_lock); + + return ret; } /* @@@ -400,20 -744,7 +745,8 @@@ static int kvmppc_svm_page_in(struct vm mig.end = end; mig.src = &src_pfn; mig.dst = &dst_pfn; + mig.flags = MIGRATE_VMA_SELECT_SYSTEM; - /* - * We come here with mmap_lock write lock held just for - * ksm_madvise(), otherwise we only need read mmap_lock. - * Hence downgrade to read lock once ksm_madvise() is done. - */ - ret = ksm_madvise(vma, vma->vm_start, vma->vm_end, - MADV_UNMERGEABLE, &vma->vm_flags); - mmap_write_downgrade(kvm->mm); - *downgrade = true; - if (ret) - return ret; - ret = migrate_vma_setup(&mig); if (ret) return ret; diff --cc arch/powerpc/kvm/book3s_pr.c index ed12dfbf9bb5a,01c8fe5abe0dd..88fac22fbf094 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@@ -1826,8 -1826,10 +1826,7 @@@ static void kvmppc_core_vcpu_free_pr(st static int kvmppc_vcpu_run_pr(struct kvm_vcpu *vcpu) { - struct kvm_run *run = vcpu->run; int ret; -#ifdef CONFIG_ALTIVEC - unsigned long uninitialized_var(vrsave); -#endif /* Check if we can run the vcpu at all */ if (!vcpu->arch.sane) {