KVM: arm64: Move __get_fault_info() and co into their own include file
authorMarc Zyngier <maz@kernel.org>
Sun, 10 Oct 2021 14:56:26 +0000 (15:56 +0100)
committerMarc Zyngier <maz@kernel.org>
Mon, 11 Oct 2021 13:57:27 +0000 (14:57 +0100)
In order to avoid including the whole of the switching helpers
in unrelated files, move the __get_fault_info() and related helpers
into their own include file.

Signed-off-by: Marc Zyngier <maz@kernel.org>
Signed-off-by: Fuad Tabba <tabba@google.com>
Link: https://lore.kernel.org/r/20211010145636.1950948-2-tabba@google.com
arch/arm64/kvm/hyp/include/hyp/fault.h [new file with mode: 0644]
arch/arm64/kvm/hyp/include/hyp/switch.h
arch/arm64/kvm/hyp/nvhe/mem_protect.c

diff --git a/arch/arm64/kvm/hyp/include/hyp/fault.h b/arch/arm64/kvm/hyp/include/hyp/fault.h
new file mode 100644 (file)
index 0000000..1b8a2dc
--- /dev/null
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2015 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ */
+
+#ifndef __ARM64_KVM_HYP_FAULT_H__
+#define __ARM64_KVM_HYP_FAULT_H__
+
+#include <asm/kvm_asm.h>
+#include <asm/kvm_emulate.h>
+#include <asm/kvm_hyp.h>
+#include <asm/kvm_mmu.h>
+
+static inline bool __translate_far_to_hpfar(u64 far, u64 *hpfar)
+{
+       u64 par, tmp;
+
+       /*
+        * Resolve the IPA the hard way using the guest VA.
+        *
+        * Stage-1 translation already validated the memory access
+        * rights. As such, we can use the EL1 translation regime, and
+        * don't have to distinguish between EL0 and EL1 access.
+        *
+        * We do need to save/restore PAR_EL1 though, as we haven't
+        * saved the guest context yet, and we may return early...
+        */
+       par = read_sysreg_par();
+       if (!__kvm_at("s1e1r", far))
+               tmp = read_sysreg_par();
+       else
+               tmp = SYS_PAR_EL1_F; /* back to the guest */
+       write_sysreg(par, par_el1);
+
+       if (unlikely(tmp & SYS_PAR_EL1_F))
+               return false; /* Translation failed, back to guest */
+
+       /* Convert PAR to HPFAR format */
+       *hpfar = PAR_TO_HPFAR(tmp);
+       return true;
+}
+
+static inline bool __get_fault_info(u64 esr, struct kvm_vcpu_fault_info *fault)
+{
+       u64 hpfar, far;
+
+       far = read_sysreg_el2(SYS_FAR);
+
+       /*
+        * The HPFAR can be invalid if the stage 2 fault did not
+        * happen during a stage 1 page table walk (the ESR_EL2.S1PTW
+        * bit is clear) and one of the two following cases are true:
+        *   1. The fault was due to a permission fault
+        *   2. The processor carries errata 834220
+        *
+        * Therefore, for all non S1PTW faults where we either have a
+        * permission fault or the errata workaround is enabled, we
+        * resolve the IPA using the AT instruction.
+        */
+       if (!(esr & ESR_ELx_S1PTW) &&
+           (cpus_have_final_cap(ARM64_WORKAROUND_834220) ||
+            (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) {
+               if (!__translate_far_to_hpfar(far, &hpfar))
+                       return false;
+       } else {
+               hpfar = read_sysreg(hpfar_el2);
+       }
+
+       fault->far_el2 = far;
+       fault->hpfar_el2 = hpfar;
+       return true;
+}
+
+#endif
index a0e78a6027be0d1f5a7a1cd4f74fa1d38feeece5..54abc8298ec382ec0cca95f171dea482d92f303a 100644 (file)
@@ -8,6 +8,7 @@
 #define __ARM64_KVM_HYP_SWITCH_H__
 
 #include <hyp/adjust_pc.h>
+#include <hyp/fault.h>
 
 #include <linux/arm-smccc.h>
 #include <linux/kvm_host.h>
@@ -133,66 +134,6 @@ static inline void ___deactivate_traps(struct kvm_vcpu *vcpu)
        }
 }
 
-static inline bool __translate_far_to_hpfar(u64 far, u64 *hpfar)
-{
-       u64 par, tmp;
-
-       /*
-        * Resolve the IPA the hard way using the guest VA.
-        *
-        * Stage-1 translation already validated the memory access
-        * rights. As such, we can use the EL1 translation regime, and
-        * don't have to distinguish between EL0 and EL1 access.
-        *
-        * We do need to save/restore PAR_EL1 though, as we haven't
-        * saved the guest context yet, and we may return early...
-        */
-       par = read_sysreg_par();
-       if (!__kvm_at("s1e1r", far))
-               tmp = read_sysreg_par();
-       else
-               tmp = SYS_PAR_EL1_F; /* back to the guest */
-       write_sysreg(par, par_el1);
-
-       if (unlikely(tmp & SYS_PAR_EL1_F))
-               return false; /* Translation failed, back to guest */
-
-       /* Convert PAR to HPFAR format */
-       *hpfar = PAR_TO_HPFAR(tmp);
-       return true;
-}
-
-static inline bool __get_fault_info(u64 esr, struct kvm_vcpu_fault_info *fault)
-{
-       u64 hpfar, far;
-
-       far = read_sysreg_el2(SYS_FAR);
-
-       /*
-        * The HPFAR can be invalid if the stage 2 fault did not
-        * happen during a stage 1 page table walk (the ESR_EL2.S1PTW
-        * bit is clear) and one of the two following cases are true:
-        *   1. The fault was due to a permission fault
-        *   2. The processor carries errata 834220
-        *
-        * Therefore, for all non S1PTW faults where we either have a
-        * permission fault or the errata workaround is enabled, we
-        * resolve the IPA using the AT instruction.
-        */
-       if (!(esr & ESR_ELx_S1PTW) &&
-           (cpus_have_final_cap(ARM64_WORKAROUND_834220) ||
-            (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) {
-               if (!__translate_far_to_hpfar(far, &hpfar))
-                       return false;
-       } else {
-               hpfar = read_sysreg(hpfar_el2);
-       }
-
-       fault->far_el2 = far;
-       fault->hpfar_el2 = hpfar;
-       return true;
-}
-
 static inline bool __populate_fault_info(struct kvm_vcpu *vcpu)
 {
        u8 ec;
index bacd493a4eacdefc1af400ec1a97309f02ac3510..2a07d63b849868bac6a3bbe25aff6658a218e9e5 100644 (file)
@@ -11,7 +11,7 @@
 #include <asm/kvm_pgtable.h>
 #include <asm/stage2_pgtable.h>
 
-#include <hyp/switch.h>
+#include <hyp/fault.h>
 
 #include <nvhe/gfp.h>
 #include <nvhe/memory.h>