From: Marc Zyngier Date: Wed, 30 Sep 2020 13:05:35 +0000 (+0100) Subject: Merge branch 'kvm-arm64/hyp-pcpu' into kvmarm-master/next X-Git-Url: http://git.maquefel.me/?a=commitdiff_plain;h=14ef9d04928b61d699fd0dd858b14b5d8150113e;p=linux.git Merge branch 'kvm-arm64/hyp-pcpu' into kvmarm-master/next Signed-off-by: Marc Zyngier --- 14ef9d04928b61d699fd0dd858b14b5d8150113e diff --cc arch/arm64/include/asm/kvm_asm.h index 3e4577013d33c,3438e85e1df6d..54387ccd1ab26 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@@ -84,24 -51,33 +75,53 @@@ DECLARE_KVM_VHE_SYM(sym); \ DECLARE_KVM_NVHE_SYM(sym) + #define DECLARE_KVM_VHE_PER_CPU(type, sym) \ + DECLARE_PER_CPU(type, sym) + #define DECLARE_KVM_NVHE_PER_CPU(type, sym) \ + DECLARE_PER_CPU(type, kvm_nvhe_sym(sym)) + + #define DECLARE_KVM_HYP_PER_CPU(type, sym) \ + DECLARE_KVM_VHE_PER_CPU(type, sym); \ + DECLARE_KVM_NVHE_PER_CPU(type, sym) + -#define CHOOSE_VHE_SYM(sym) sym -#define CHOOSE_NVHE_SYM(sym) kvm_nvhe_sym(sym) - + /* + * Compute pointer to a symbol defined in nVHE percpu region. + * Returns NULL if percpu memory has not been allocated yet. + */ + #define this_cpu_ptr_nvhe_sym(sym) per_cpu_ptr_nvhe_sym(sym, smp_processor_id()) + #define per_cpu_ptr_nvhe_sym(sym, cpu) \ + ({ \ + unsigned long base, off; \ + base = kvm_arm_hyp_percpu_base[cpu]; \ + off = (unsigned long)&CHOOSE_NVHE_SYM(sym) - \ + (unsigned long)&CHOOSE_NVHE_SYM(__per_cpu_start); \ + base ? (typeof(CHOOSE_NVHE_SYM(sym))*)(base + off) : NULL; \ + }) + -#ifndef __KVM_NVHE_HYPERVISOR__ +#if defined(__KVM_NVHE_HYPERVISOR__) + - #define CHOOSE_HYP_SYM(sym) CHOOSE_NVHE_SYM(sym) +#define CHOOSE_NVHE_SYM(sym) sym ++#define CHOOSE_HYP_SYM(sym) CHOOSE_NVHE_SYM(sym) ++ +/* The nVHE hypervisor shouldn't even try to access VHE symbols */ +extern void *__nvhe_undefined_symbol; - #define CHOOSE_VHE_SYM(sym) __nvhe_undefined_symbol ++#define CHOOSE_VHE_SYM(sym) __nvhe_undefined_symbol ++#define this_cpu_ptr_hyp_sym(sym) (&__nvhe_undefined_symbol) ++#define per_cpu_ptr_hyp_sym(sym, cpu) (&__nvhe_undefined_symbol) + - #elif defined(__KVM_VHE_HYPERVISOR) ++#elif defined(__KVM_VHE_HYPERVISOR__) + - #define CHOOSE_HYP_SYM(sym) CHOOSE_VHE_SYM(sym) +#define CHOOSE_VHE_SYM(sym) sym ++#define CHOOSE_HYP_SYM(sym) CHOOSE_VHE_SYM(sym) ++ +/* The VHE hypervisor shouldn't even try to access nVHE symbols */ +extern void *__vhe_undefined_symbol; - #define CHOOSE_NVHE_SYM(sym) __vhe_undefined_symbol ++#define CHOOSE_NVHE_SYM(sym) __vhe_undefined_symbol ++#define this_cpu_ptr_hyp_sym(sym) (&__vhe_undefined_symbol) ++#define per_cpu_ptr_hyp_sym(sym, cpu) (&__vhe_undefined_symbol) + +#else + /* * BIG FAT WARNINGS: * @@@ -113,11 -89,21 +133,21 @@@ * - Don't let the nVHE hypervisor have access to this, as it will * pick the *wrong* symbol (yes, it runs at EL2...). */ - #define CHOOSE_HYP_SYM(sym) (is_kernel_in_hyp_mode() ? CHOOSE_VHE_SYM(sym) \ + #define CHOOSE_HYP_SYM(sym) (is_kernel_in_hyp_mode() \ + ? CHOOSE_VHE_SYM(sym) \ : CHOOSE_NVHE_SYM(sym)) ++ + #define this_cpu_ptr_hyp_sym(sym) (is_kernel_in_hyp_mode() \ + ? this_cpu_ptr(&sym) \ + : this_cpu_ptr_nvhe_sym(sym)) ++ + #define per_cpu_ptr_hyp_sym(sym, cpu) (is_kernel_in_hyp_mode() \ + ? per_cpu_ptr(&sym, cpu) \ + : per_cpu_ptr_nvhe_sym(sym, cpu)) -#else -/* The nVHE hypervisor shouldn't even try to access anything */ -extern void *__nvhe_undefined_symbol; -#define CHOOSE_HYP_SYM(sym) __nvhe_undefined_symbol -#define this_cpu_ptr_hyp_sym(sym) (&__nvhe_undefined_symbol) -#define per_cpu_ptr_hyp_sym(sym, cpu) (&__nvhe_undefined_symbol) ++ +#define CHOOSE_VHE_SYM(sym) sym +#define CHOOSE_NVHE_SYM(sym) kvm_nvhe_sym(sym) + #endif /* Translate a kernel address @ptr into its equivalent linear mapping */ @@@ -135,13 -121,14 +165,16 @@@ struct kvm_vcpu struct kvm_s2_mmu; DECLARE_KVM_NVHE_SYM(__kvm_hyp_init); +DECLARE_KVM_NVHE_SYM(__kvm_hyp_host_vector); DECLARE_KVM_HYP_SYM(__kvm_hyp_vector); #define __kvm_hyp_init CHOOSE_NVHE_SYM(__kvm_hyp_init) +#define __kvm_hyp_host_vector CHOOSE_NVHE_SYM(__kvm_hyp_host_vector) #define __kvm_hyp_vector CHOOSE_HYP_SYM(__kvm_hyp_vector) - #ifdef CONFIG_KVM_INDIRECT_VECTORS + extern unsigned long kvm_arm_hyp_percpu_base[NR_CPUS]; + DECLARE_KVM_NVHE_SYM(__per_cpu_start); + DECLARE_KVM_NVHE_SYM(__per_cpu_end); + extern atomic_t arm64_el2_vector_last_slot; DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs); #define __bp_harden_hyp_vecs CHOOSE_HYP_SYM(__bp_harden_hyp_vecs) @@@ -260,16 -214,6 +260,16 @@@ extern char __smccc_workaround_1_smc[__ ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU] .endm +.macro get_loaded_vcpu vcpu, ctxt - hyp_adr_this_cpu \ctxt, kvm_hyp_ctxt, \vcpu ++ adr_this_cpu \ctxt, kvm_hyp_ctxt, \vcpu + ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU] +.endm + +.macro set_loaded_vcpu vcpu, ctxt, tmp - hyp_adr_this_cpu \ctxt, kvm_hyp_ctxt, \tmp ++ adr_this_cpu \ctxt, kvm_hyp_ctxt, \tmp + str \vcpu, [\ctxt, #HOST_CONTEXT_VCPU] +.endm + /* * KVM extable for unexpected exceptions. * In the same format _asm_extable, but output to a different section so that diff --cc arch/arm64/include/asm/kvm_host.h index d56d67c3787e3,1247d1f30cb3a..0aecbab6a7fb3 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@@ -568,7 -565,7 +568,7 @@@ void kvm_set_sei_esr(struct kvm_vcpu *v struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr); - DECLARE_PER_CPU(struct kvm_host_data, kvm_host_data); -DECLARE_KVM_HYP_PER_CPU(kvm_host_data_t, kvm_host_data); ++DECLARE_KVM_HYP_PER_CPU(struct kvm_host_data, kvm_host_data); static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt) { diff --cc arch/arm64/include/asm/kvm_mmu.h index c490fe8089b3e,cff1cebc75904..331394306ccee --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@@ -258,78 -477,32 +254,8 @@@ static inline void *kvm_get_hyp_vector( return vect; } - /* This is only called on a !VHE system */ - static inline int kvm_map_vectors(void) - { - /* - * HBP = ARM64_HARDEN_BRANCH_PREDICTOR - * HEL2 = ARM64_HARDEN_EL2_VECTORS - * - * !HBP + !HEL2 -> use direct vectors - * HBP + !HEL2 -> use hardened vectors in place - * !HBP + HEL2 -> allocate one vector slot and use exec mapping - * HBP + HEL2 -> use hardened vertors and use exec mapping - */ - if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR)) { - __kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs); - __kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base); - } - - if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) { - phys_addr_t vect_pa = __pa_symbol(__bp_harden_hyp_vecs); - unsigned long size = __BP_HARDEN_HYP_VECS_SZ; - - /* - * Always allocate a spare vector slot, as we don't - * know yet which CPUs have a BP hardening slot that - * we can reuse. - */ - __kvm_harden_el2_vector_slot = atomic_inc_return(&arm64_el2_vector_last_slot); - BUG_ON(__kvm_harden_el2_vector_slot >= BP_HARDEN_EL2_SLOTS); - return create_hyp_exec_mappings(vect_pa, size, - &__kvm_bp_vect_base); - } - - return 0; - } - #else - static inline void *kvm_get_hyp_vector(void) - { - return kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector)); - } - - static inline int kvm_map_vectors(void) - { - return 0; - } - #endif - - #ifdef CONFIG_ARM64_SSBD - DECLARE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required); - - static inline int hyp_map_aux_data(void) - { - int cpu, err; - - for_each_possible_cpu(cpu) { - u64 *ptr; - - ptr = per_cpu_ptr(&arm64_ssbd_callback_required, cpu); - err = create_hyp_mappings(ptr, ptr + 1, PAGE_HYP); - if (err) - return err; - } - return 0; - } - #else - static inline int hyp_map_aux_data(void) - { - return 0; - } - #endif - #define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr) -/* - * Get the magic number 'x' for VTTBR:BADDR of this KVM instance. - * With v8.2 LVA extensions, 'x' should be a minimum of 6 with - * 52bit IPS. - */ -static inline int arm64_vttbr_x(u32 ipa_shift, u32 levels) -{ - int x = ARM64_VTTBR_X(ipa_shift, levels); - - return (IS_ENABLED(CONFIG_ARM64_PA_BITS_52) && x < 6) ? 6 : x; -} - -static inline u64 vttbr_baddr_mask(u32 ipa_shift, u32 levels) -{ - unsigned int x = arm64_vttbr_x(ipa_shift, levels); - - return GENMASK_ULL(PHYS_MASK_SHIFT - 1, x); -} - -static inline u64 kvm_vttbr_baddr_mask(struct kvm *kvm) -{ - return vttbr_baddr_mask(kvm_phys_shift(kvm), kvm_stage2_levels(kvm)); -} - static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu) { struct kvm_vmid *vmid = &mmu->vmid; diff --cc arch/arm64/kvm/arm.c index e49189012af11,f8388da6f3c76..f56122eedffc8 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@@ -46,10 -46,8 +46,10 @@@ __asm__(".arch_extension virt"); #endif - DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data); - DEFINE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt); - DEFINE_PER_CPU(unsigned long, kvm_hyp_vector); ++DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector); ++ static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page); + unsigned long kvm_arm_hyp_percpu_base[NR_CPUS]; /* The VMID used in the VTTBR */ static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1); @@@ -1320,8 -1356,6 +1367,8 @@@ static void cpu_hyp_reinit(void cpu_hyp_reset(); - __this_cpu_write(kvm_hyp_vector, (unsigned long)kvm_get_hyp_vector()); ++ *this_cpu_ptr_hyp_sym(kvm_hyp_vector) = (unsigned long)kvm_get_hyp_vector(); + if (is_kernel_in_hyp_mode()) kvm_timer_init_vhe(); else diff --cc arch/arm64/kvm/hyp/Makefile index 607b8a8988266,d898f0da5802e..4a81eddabcd83 --- a/arch/arm64/kvm/hyp/Makefile +++ b/arch/arm64/kvm/hyp/Makefile @@@ -10,5 -10,4 +10,4 @@@ subdir-ccflags-y := -I$(incdir) -DDISABLE_BRANCH_PROFILING \ $(DISABLE_STACKLEAK_PLUGIN) - obj-$(CONFIG_KVM) += vhe/ nvhe/ pgtable.o - obj-$(CONFIG_KVM_INDIRECT_VECTORS) += smccc_wa.o -obj-$(CONFIG_KVM) += vhe/ nvhe/ smccc_wa.o ++obj-$(CONFIG_KVM) += vhe/ nvhe/ pgtable.o smccc_wa.o diff --cc arch/arm64/kvm/hyp/entry.S index afaa8d1f24854,76e7eaf4675eb..b0afad7a99c6e --- a/arch/arm64/kvm/hyp/entry.S +++ b/arch/arm64/kvm/hyp/entry.S @@@ -22,21 -62,20 +22,21 @@@ */ SYM_FUNC_START(__guest_enter) // x0: vcpu - // x1: host context - // x2-x17: clobbered by macros + // x1-x17: clobbered by macros // x29: guest context - hyp_adr_this_cpu x1, kvm_hyp_ctxt, x2 - // Store the host regs ++ adr_this_cpu x1, kvm_hyp_ctxt, x2 + + // Store the hyp regs save_callee_saved_regs x1 - // Save the host's sp_el0 + // Save hyp's sp_el0 save_sp_el0 x1, x2 - // Now the host state is stored if we have a pending RAS SError it must - // affect the host. If any asynchronous exception is pending we defer - // the guest entry. The DSB isn't necessary before v8.2 as any SError - // would be fatal. + // Now the hyp state is stored if we have a pending RAS SError it must + // affect the host or hyp. If any asynchronous exception is pending we + // defer the guest entry. The DSB isn't necessary before v8.2 as any + // SError would be fatal. alternative_if ARM64_HAS_RAS_EXTN dsb nshst isb @@@ -79,26 -116,6 +79,26 @@@ alternative_else_nop_endi eret sb +SYM_INNER_LABEL(__guest_exit_panic, SYM_L_GLOBAL) + // x2-x29,lr: vcpu regs + // vcpu x0-x1 on the stack + + // If the hyp context is loaded, go straight to hyp_panic + get_loaded_vcpu x0, x1 + cbz x0, hyp_panic + + // The hyp context is saved so make sure it is restored to allow + // hyp_panic to run at hyp and, subsequently, panic to run in the host. + // This makes use of __guest_exit to avoid duplication but sets the + // return address to tail call into hyp_panic. As a side effect, the + // current state is saved to the guest context but it will only be + // accurate if the guest had been completely restored. - hyp_adr_this_cpu x0, kvm_hyp_ctxt, x1 ++ adr_this_cpu x0, kvm_hyp_ctxt, x1 + adr x1, hyp_panic + str x1, [x0, #CPU_XREG_OFFSET(30)] + + get_vcpu_ptr x1, x0 + SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL) // x0: return code // x1: vcpu @@@ -131,10 -148,10 +131,10 @@@ // Store the guest's sp_el0 save_sp_el0 x1, x2 - hyp_adr_this_cpu x2, kvm_hyp_ctxt, x3 - get_host_ctxt x2, x3 ++ adr_this_cpu x2, kvm_hyp_ctxt, x3 - // Macro ptrauth_switch_to_guest format: - // ptrauth_switch_to_host(guest cxt, host cxt, tmp1, tmp2, tmp3) + // Macro ptrauth_switch_to_hyp format: + // ptrauth_switch_to_hyp(guest cxt, host cxt, tmp1, tmp2, tmp3) // The below macro to save/restore keys is not implemented in C code // as it may cause Pointer Authentication key signing mismatch errors // when this feature is enabled for kernel code. diff --cc arch/arm64/kvm/hyp/include/hyp/switch.h index 4536b50ddc06c,0d656914f4210..eeac62b685a9e --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@@ -383,7 -386,7 +383,7 @@@ static inline bool __hyp_handle_ptrauth !esr_is_ptrauth_trap(kvm_vcpu_get_esr(vcpu))) return false; - ctxt = __hyp_this_cpu_ptr(kvm_hyp_ctxt); - ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; ++ ctxt = this_cpu_ptr(&kvm_hyp_ctxt); __ptrauth_save_key(ctxt, APIA); __ptrauth_save_key(ctxt, APIB); __ptrauth_save_key(ctxt, APDA); @@@ -476,43 -479,10 +476,10 @@@ exit return false; } - static inline bool __needs_ssbd_off(struct kvm_vcpu *vcpu) - { - if (!cpus_have_final_cap(ARM64_SSBD)) - return false; - - return !(vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG); - } - - static inline void __set_guest_arch_workaround_state(struct kvm_vcpu *vcpu) - { - #ifdef CONFIG_ARM64_SSBD - /* - * The host runs with the workaround always present. If the - * guest wants it disabled, so be it... - */ - if (__needs_ssbd_off(vcpu) && - __hyp_this_cpu_read(arm64_ssbd_callback_required)) - arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 0, NULL); - #endif - } - - static inline void __set_host_arch_workaround_state(struct kvm_vcpu *vcpu) - { - #ifdef CONFIG_ARM64_SSBD - /* - * If the guest has disabled the workaround, bring it back on. - */ - if (__needs_ssbd_off(vcpu) && - __hyp_this_cpu_read(arm64_ssbd_callback_required)) - arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 1, NULL); - #endif - } - static inline void __kvm_unexpected_el2_exception(void) { + extern char __guest_exit_panic[]; unsigned long addr, fixup; - struct kvm_cpu_context *host_ctxt; struct exception_table_entry *entry, *end; unsigned long elr_el2 = read_sysreg(elr_el2); diff --cc arch/arm64/kvm/hyp/nvhe/switch.c index a29f247f35e3a,4472558cbdd95..a457a0306e031 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@@ -27,6 -27,12 +27,11 @@@ #include #include -/* Non-VHE copy of the kernel symbol. */ -DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required); - -/* Non-VHE instance of kvm_host_data. */ -DEFINE_PER_CPU(kvm_host_data_t, kvm_host_data); ++/* Non-VHE specific context */ ++DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data); ++DEFINE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt); ++DEFINE_PER_CPU(unsigned long, kvm_hyp_vector); + static void __activate_traps(struct kvm_vcpu *vcpu) { u64 val; @@@ -42,7 -48,6 +47,7 @@@ } write_sysreg(val, cptr_el2); - write_sysreg(__hyp_this_cpu_read(kvm_hyp_vector), vbar_el2); ++ write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el2); if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) { struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt; @@@ -176,7 -179,9 +181,7 @@@ int __kvm_vcpu_run(struct kvm_vcpu *vcp pmr_sync(); } - host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt; - vcpu = kern_hyp_va(vcpu); - + host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; host_ctxt->__hyp_running_vcpu = vcpu; guest_ctxt = &vcpu->arch.ctxt; @@@ -203,11 -208,9 +208,9 @@@ __debug_switch_to_guest(vcpu); - __set_guest_arch_workaround_state(vcpu); - do { /* Jump in the fire! */ - exit_code = __guest_enter(vcpu, host_ctxt); + exit_code = __guest_enter(vcpu); /* And we're baaack! */ } while (fixup_guest_exit(vcpu, &exit_code)); @@@ -250,17 -249,13 +251,17 @@@ void __noreturn hyp_panic(void u64 spsr = read_sysreg_el2(SYS_SPSR); u64 elr = read_sysreg_el2(SYS_ELR); u64 par = read_sysreg(par_el1); - struct kvm_vcpu *vcpu = host_ctxt->__hyp_running_vcpu; - unsigned long str_va; + bool restore_host = true; + struct kvm_cpu_context *host_ctxt; + struct kvm_vcpu *vcpu; - host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt; - if (read_sysreg(vttbr_el2)) { ++ host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; + vcpu = host_ctxt->__hyp_running_vcpu; + + if (vcpu) { __timer_disable_traps(vcpu); __deactivate_traps(vcpu); - __deactivate_vm(vcpu); + __load_host_stage2(); __sysreg_restore_state_nvhe(host_ctxt); } diff --cc arch/arm64/kvm/hyp/vhe/switch.c index cf477f856e51b,a8d4075327985..fe69de16dadc6 --- a/arch/arm64/kvm/hyp/vhe/switch.c +++ b/arch/arm64/kvm/hyp/vhe/switch.c @@@ -28,6 -28,9 +28,11 @@@ const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n"; -/* VHE instance of kvm_host_data. */ -DEFINE_PER_CPU(kvm_host_data_t, kvm_host_data); ++/* VHE specific context */ ++DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data); ++DEFINE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt); ++DEFINE_PER_CPU(unsigned long, kvm_hyp_vector); + static void __activate_traps(struct kvm_vcpu *vcpu) { u64 val; @@@ -131,11 -134,9 +136,9 @@@ static int __kvm_vcpu_run_vhe(struct kv sysreg_restore_guest_state_vhe(guest_ctxt); __debug_switch_to_guest(vcpu); - __set_guest_arch_workaround_state(vcpu); - do { /* Jump in the fire! */ - exit_code = __guest_enter(vcpu, host_ctxt); + exit_code = __guest_enter(vcpu); /* And we're baaack! */ } while (fixup_guest_exit(vcpu, &exit_code)); @@@ -192,12 -191,10 +193,12 @@@ int __kvm_vcpu_run(struct kvm_vcpu *vcp return ret; } -static void __hyp_call_panic(u64 spsr, u64 elr, u64 par, - struct kvm_cpu_context *host_ctxt) +static void __hyp_call_panic(u64 spsr, u64 elr, u64 par) { + struct kvm_cpu_context *host_ctxt; struct kvm_vcpu *vcpu; + - host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt; ++ host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; vcpu = host_ctxt->__hyp_running_vcpu; __deactivate_traps(vcpu);