From: Marc Zyngier Date: Wed, 30 Sep 2020 08:48:30 +0000 (+0100) Subject: Merge remote-tracking branch 'arm64/for-next/ghostbusters' into kvm-arm64/hyp-pcpu X-Git-Url: http://git.maquefel.me/?a=commitdiff_plain;h=816c347f3a48fb15370b23664760ea61286fea05;p=linux.git Merge remote-tracking branch 'arm64/for-next/ghostbusters' into kvm-arm64/hyp-pcpu Signed-off-by: Marc Zyngier --- 816c347f3a48fb15370b23664760ea61286fea05 diff --cc arch/arm64/include/asm/kvm_asm.h index 863f669d4dc86,7f7072f6cb45f..3438e85e1df6d --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@@ -7,12 -7,8 +7,9 @@@ #ifndef __ARM_KVM_ASM_H__ #define __ARM_KVM_ASM_H__ +#include #include - #define VCPU_WORKAROUND_2_FLAG_SHIFT 0 - #define VCPU_WORKAROUND_2_FLAG (_AC(1, UL) << VCPU_WORKAROUND_2_FLAG_SHIFT) - #define ARM_EXIT_WITH_SERROR_BIT 31 #define ARM_EXCEPTION_CODE(x) ((x) & ~(1U << ARM_EXIT_WITH_SERROR_BIT)) #define ARM_EXCEPTION_IS_TRAP(x) (ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_TRAP) @@@ -128,11 -99,6 +125,10 @@@ DECLARE_KVM_HYP_SYM(__kvm_hyp_vector) #define __kvm_hyp_init CHOOSE_NVHE_SYM(__kvm_hyp_init) #define __kvm_hyp_vector CHOOSE_HYP_SYM(__kvm_hyp_vector) +extern unsigned long kvm_arm_hyp_percpu_base[NR_CPUS]; +DECLARE_KVM_NVHE_SYM(__per_cpu_start); +DECLARE_KVM_NVHE_SYM(__per_cpu_end); + - #ifdef CONFIG_KVM_INDIRECT_VECTORS extern atomic_t arm64_el2_vector_last_slot; DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs); #define __bp_harden_hyp_vecs CHOOSE_HYP_SYM(__bp_harden_hyp_vecs) diff --cc arch/arm64/kernel/image-vars.h index 80da861b8180c,d0f3f35dd0d70..fbd4b6b1fde5d --- a/arch/arm64/kernel/image-vars.h +++ b/arch/arm64/kernel/image-vars.h @@@ -61,8 -61,9 +61,7 @@@ __efistub__ctype = _ctype * memory mappings. */ -#define KVM_NVHE_ALIAS(sym) __kvm_nvhe_##sym = sym; - /* Alternative callbacks for init-time patching of nVHE hyp code. */ - KVM_NVHE_ALIAS(arm64_enable_wa2_handling); KVM_NVHE_ALIAS(kvm_patch_vector_branch); KVM_NVHE_ALIAS(kvm_update_va_mask); diff --cc arch/arm64/kvm/arm.c index 92c88deea3575,13e559ac7235b..f8388da6f3c76 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@@ -1256,19 -1256,40 +1256,53 @@@ long kvm_arch_vm_ioctl(struct file *fil } } +static unsigned long nvhe_percpu_size(void) +{ + return (unsigned long)CHOOSE_NVHE_SYM(__per_cpu_end) - + (unsigned long)CHOOSE_NVHE_SYM(__per_cpu_start); +} + +static unsigned long nvhe_percpu_order(void) +{ + unsigned long size = nvhe_percpu_size(); + + return size ? get_order(size) : 0; +} + + static int kvm_map_vectors(void) + { + /* + * SV2 = ARM64_SPECTRE_V2 + * HEL2 = ARM64_HARDEN_EL2_VECTORS + * + * !SV2 + !HEL2 -> use direct vectors + * SV2 + !HEL2 -> use hardened vectors in place + * !SV2 + HEL2 -> allocate one vector slot and use exec mapping + * SV2 + HEL2 -> use hardened vectors and use exec mapping + */ + if (cpus_have_const_cap(ARM64_SPECTRE_V2)) { + __kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs); + __kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base); + } + + if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) { + phys_addr_t vect_pa = __pa_symbol(__bp_harden_hyp_vecs); + unsigned long size = __BP_HARDEN_HYP_VECS_SZ; + + /* + * Always allocate a spare vector slot, as we don't + * know yet which CPUs have a BP hardening slot that + * we can reuse. + */ + __kvm_harden_el2_vector_slot = atomic_inc_return(&arm64_el2_vector_last_slot); + BUG_ON(__kvm_harden_el2_vector_slot >= BP_HARDEN_EL2_SLOTS); + return create_hyp_exec_mappings(vect_pa, size, + &__kvm_bp_vect_base); + } + + return 0; + } + static void cpu_init_hyp_mode(void) { phys_addr_t pgd_ptr;