KVM: arm64: Introduce and use predicates that check for protected VMs
authorFuad Tabba <tabba@google.com>
Tue, 23 Apr 2024 15:05:26 +0000 (16:05 +0100)
committerMarc Zyngier <maz@kernel.org>
Wed, 1 May 2024 15:48:14 +0000 (16:48 +0100)
In order to determine whether or not a VM or vcpu are protected,
introduce helpers to query this state. While at it, use the vcpu
helper to check vcpus protected state instead of the kvm one.

Co-authored-by: Marc Zyngier <maz@kernel.org>
Signed-off-by: Fuad Tabba <tabba@google.com>
Acked-by: Oliver Upton <oliver.upton@linux.dev>
Link: https://lore.kernel.org/r/20240423150538.2103045-19-tabba@google.com
Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/arm64/include/asm/kvm_host.h
arch/arm64/kvm/hyp/include/nvhe/pkvm.h
arch/arm64/kvm/hyp/nvhe/switch.c

index 74dc5a60f17185ee2e7ca1084d903171f19398ea..0e6c186a6d6c0c4a37d0d318740340eeb474febe 100644 (file)
@@ -211,6 +211,7 @@ typedef unsigned int pkvm_handle_t;
 struct kvm_protected_vm {
        pkvm_handle_t handle;
        struct kvm_hyp_memcache teardown_mc;
+       bool enabled;
 };
 
 struct kvm_mpidr_data {
@@ -1295,10 +1296,9 @@ struct kvm *kvm_arch_alloc_vm(void);
 
 #define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS_RANGE
 
-static inline bool kvm_vm_is_protected(struct kvm *kvm)
-{
-       return false;
-}
+#define kvm_vm_is_protected(kvm)       (is_protected_kvm_enabled() && (kvm)->arch.pkvm.enabled)
+
+#define vcpu_is_protected(vcpu)                kvm_vm_is_protected((vcpu)->kvm)
 
 int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature);
 bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
index 20c3f6e13b99f4750fdaf1c9b157c8f6ef62ebba..22f374e9f532968937fd179c6732e458cffeceee 100644 (file)
@@ -53,6 +53,11 @@ pkvm_hyp_vcpu_to_hyp_vm(struct pkvm_hyp_vcpu *hyp_vcpu)
        return container_of(hyp_vcpu->vcpu.kvm, struct pkvm_hyp_vm, kvm);
 }
 
+static inline bool pkvm_hyp_vcpu_is_protected(struct pkvm_hyp_vcpu *hyp_vcpu)
+{
+       return vcpu_is_protected(&hyp_vcpu->vcpu);
+}
+
 void pkvm_hyp_vm_table_init(void *tbl);
 void pkvm_host_fpsimd_state_init(void);
 
index 5d2d4d6465e8ca0b753c2433e42b2e0e9a6ec0c4..41d1ba6de41a28fd194fa0861fcf384e07b64d73 100644 (file)
@@ -209,7 +209,7 @@ static const exit_handler_fn pvm_exit_handlers[] = {
 
 static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
 {
-       if (unlikely(kvm_vm_is_protected(kern_hyp_va(vcpu->kvm))))
+       if (unlikely(vcpu_is_protected(vcpu)))
                return pvm_exit_handlers;
 
        return hyp_exit_handlers;
@@ -228,9 +228,7 @@ static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
  */
 static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
 {
-       struct kvm *kvm = kern_hyp_va(vcpu->kvm);
-
-       if (kvm_vm_is_protected(kvm) && vcpu_mode_is_32bit(vcpu)) {
+       if (unlikely(vcpu_is_protected(vcpu) && vcpu_mode_is_32bit(vcpu))) {
                /*
                 * As we have caught the guest red-handed, decide that it isn't
                 * fit for purpose anymore by making the vcpu invalid. The VMM