KVM: arm64: nv: Add sanitising to VNCR-backed sysregs
authorMarc Zyngier <maz@kernel.org>
Wed, 14 Feb 2024 13:18:04 +0000 (13:18 +0000)
committerOliver Upton <oliver.upton@linux.dev>
Mon, 19 Feb 2024 17:13:00 +0000 (17:13 +0000)
VNCR-backed "registers" are actually only memory. Which means that
there is zero control over what the guest can write, and that it
is the hypervisor's job to actually sanitise the content of the
backing store. Yeah, this is fun.

In order to preserve some form of sanity, add a repainting mechanism
that makes use of a per-VM set of RES0/RES1 masks, one pair per VNCR
register. These masks get applied on access to the backing store via
__vcpu_sys_reg(), ensuring that the state that is consumed by KVM is
correct.

So far, nothing populates these masks, but stay tuned.

Signed-off-by: Marc Zyngier <maz@kernel.org>
Reviewed-by: Joey Gouly <joey.gouly@arm.com>
Link: https://lore.kernel.org/r/20240214131827.2856277-4-maz@kernel.org
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
arch/arm64/include/asm/kvm_host.h
arch/arm64/kvm/arm.c
arch/arm64/kvm/nested.c

index 527da3d2cedb6a2a4264431bd0251370a9d9903e..8cda003d6267ab72f4d66a25428cdbd43bb0ad85 100644 (file)
@@ -238,6 +238,8 @@ static inline u16 kvm_mpidr_index(struct kvm_mpidr_data *data, u64 mpidr)
        return index;
 }
 
+struct kvm_sysreg_masks;
+
 struct kvm_arch {
        struct kvm_s2_mmu mmu;
 
@@ -312,6 +314,9 @@ struct kvm_arch {
 #define KVM_ARM_ID_REG_NUM     (IDREG_IDX(sys_reg(3, 0, 0, 7, 7)) + 1)
        u64 id_regs[KVM_ARM_ID_REG_NUM];
 
+       /* Masks for VNCR-baked sysregs */
+       struct kvm_sysreg_masks *sysreg_masks;
+
        /*
         * For an untrusted host VM, 'pkvm.handle' is used to lookup
         * the associated pKVM instance in the hypervisor.
@@ -474,6 +479,13 @@ enum vcpu_sysreg {
        NR_SYS_REGS     /* Nothing after this line! */
 };
 
+struct kvm_sysreg_masks {
+       struct {
+               u64     res0;
+               u64     res1;
+       } mask[NR_SYS_REGS - __VNCR_START__];
+};
+
 struct kvm_cpu_context {
        struct user_pt_regs regs;       /* sp = sp_el0 */
 
@@ -868,7 +880,15 @@ static inline u64 *__ctxt_sys_reg(const struct kvm_cpu_context *ctxt, int r)
 
 #define ctxt_sys_reg(c,r)      (*__ctxt_sys_reg(c,r))
 
-#define __vcpu_sys_reg(v,r)    (ctxt_sys_reg(&(v)->arch.ctxt, (r)))
+u64 kvm_vcpu_sanitise_vncr_reg(const struct kvm_vcpu *, enum vcpu_sysreg);
+#define __vcpu_sys_reg(v,r)                                            \
+       (*({                                                            \
+               const struct kvm_cpu_context *ctxt = &(v)->arch.ctxt;   \
+               u64 *__r = __ctxt_sys_reg(ctxt, (r));                   \
+               if (vcpu_has_nv((v)) && (r) >= __VNCR_START__)          \
+                       *__r = kvm_vcpu_sanitise_vncr_reg((v), (r));    \
+               __r;                                                    \
+       }))
 
 u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg);
 void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg);
index a25265aca4324e490298794f5418aaa85b64480c..c063e84fc72c763409d4202d3d556620277e41d9 100644 (file)
@@ -206,6 +206,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
                pkvm_destroy_hyp_vm(kvm);
 
        kfree(kvm->arch.mpidr_data);
+       kfree(kvm->arch.sysreg_masks);
        kvm_destroy_vcpus(kvm);
 
        kvm_unshare_hyp(kvm, kvm + 1);
index d55e809e26cb377e736621c681619b37df66d69f..c976cd4b83793bbce787a2cc65f4d5492108f0a1 100644 (file)
@@ -163,15 +163,54 @@ static u64 limit_nv_id_reg(u32 id, u64 val)
 
        return val;
 }
+
+u64 kvm_vcpu_sanitise_vncr_reg(const struct kvm_vcpu *vcpu, enum vcpu_sysreg sr)
+{
+       u64 v = ctxt_sys_reg(&vcpu->arch.ctxt, sr);
+       struct kvm_sysreg_masks *masks;
+
+       masks = vcpu->kvm->arch.sysreg_masks;
+
+       if (masks) {
+               sr -= __VNCR_START__;
+
+               v &= ~masks->mask[sr].res0;
+               v |= masks->mask[sr].res1;
+       }
+
+       return v;
+}
+
+static void __maybe_unused set_sysreg_masks(struct kvm *kvm, int sr, u64 res0, u64 res1)
+{
+       int i = sr - __VNCR_START__;
+
+       kvm->arch.sysreg_masks->mask[i].res0 = res0;
+       kvm->arch.sysreg_masks->mask[i].res1 = res1;
+}
+
 int kvm_init_nv_sysregs(struct kvm *kvm)
 {
+       int ret = 0;
+
        mutex_lock(&kvm->arch.config_lock);
 
+       if (kvm->arch.sysreg_masks)
+               goto out;
+
+       kvm->arch.sysreg_masks = kzalloc(sizeof(*(kvm->arch.sysreg_masks)),
+                                        GFP_KERNEL);
+       if (!kvm->arch.sysreg_masks) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
        for (int i = 0; i < KVM_ARM_ID_REG_NUM; i++)
                kvm->arch.id_regs[i] = limit_nv_id_reg(IDX_IDREG(i),
                                                       kvm->arch.id_regs[i]);
 
+out:
        mutex_unlock(&kvm->arch.config_lock);
 
-       return 0;
+       return ret;
 }