KVM: arm64: Introduce vcpu_sve_vq() helper
authorMarc Zyngier <maz@kernel.org>
Fri, 12 Mar 2021 14:38:43 +0000 (14:38 +0000)
committerMarc Zyngier <maz@kernel.org>
Thu, 18 Mar 2021 11:24:10 +0000 (11:24 +0000)
The KVM code contains a number of "sve_vq_from_vl(vcpu->arch.sve_max_vl)"
instances, and we are about to add more.

Introduce vcpu_sve_vq() as a shorthand for this expression.

Acked-by: Will Deacon <will@kernel.org>
Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/arm64/include/asm/kvm_host.h
arch/arm64/kvm/guest.c
arch/arm64/kvm/hyp/include/hyp/switch.h

index fb1d78299ba024d77d4e36fcf826cd6e47de1e69..e59b16008868ab0cf43729c5c6846b0f7be23e95 100644 (file)
@@ -375,6 +375,8 @@ struct kvm_vcpu_arch {
 #define vcpu_sve_pffr(vcpu) (kern_hyp_va((vcpu)->arch.sve_state) +     \
                             sve_ffr_offset((vcpu)->arch.sve_max_vl))
 
+#define vcpu_sve_max_vq(vcpu)  sve_vq_from_vl((vcpu)->arch.sve_max_vl)
+
 #define vcpu_sve_state_size(vcpu) ({                                   \
        size_t __size_ret;                                              \
        unsigned int __vcpu_vq;                                         \
@@ -382,7 +384,7 @@ struct kvm_vcpu_arch {
        if (WARN_ON(!sve_vl_valid((vcpu)->arch.sve_max_vl))) {          \
                __size_ret = 0;                                         \
        } else {                                                        \
-               __vcpu_vq = sve_vq_from_vl((vcpu)->arch.sve_max_vl);    \
+               __vcpu_vq = vcpu_sve_max_vq(vcpu);                      \
                __size_ret = SVE_SIG_REGS_SIZE(__vcpu_vq);              \
        }                                                               \
                                                                        \
index 9bbd30e62799a27da6e8030abeb25ed9ebe73f7c..c763808cacdfe961e91609064cffbc2f2210a75a 100644 (file)
@@ -299,7 +299,7 @@ static int get_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
 
        memset(vqs, 0, sizeof(vqs));
 
-       max_vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
+       max_vq = vcpu_sve_max_vq(vcpu);
        for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq)
                if (sve_vq_available(vq))
                        vqs[vq_word(vq)] |= vq_mask(vq);
@@ -427,7 +427,7 @@ static int sve_reg_to_region(struct sve_state_reg_region *region,
                if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
                        return -ENOENT;
 
-               vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
+               vq = vcpu_sve_max_vq(vcpu);
 
                reqoffset = SVE_SIG_ZREG_OFFSET(vq, reg_num) -
                                SVE_SIG_REGS_OFFSET;
@@ -437,7 +437,7 @@ static int sve_reg_to_region(struct sve_state_reg_region *region,
                if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
                        return -ENOENT;
 
-               vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
+               vq = vcpu_sve_max_vq(vcpu);
 
                reqoffset = SVE_SIG_PREG_OFFSET(vq, reg_num) -
                                SVE_SIG_REGS_OFFSET;
index d762d5bdc2d56d9700620530178cf619fdac9070..fb68271c1a0f3022f31ca6808211d899bc578714 100644 (file)
@@ -268,7 +268,7 @@ static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
        if (sve_guest) {
                __sve_restore_state(vcpu_sve_pffr(vcpu),
                                    &vcpu->arch.ctxt.fp_regs.fpsr,
-                                   sve_vq_from_vl(vcpu->arch.sve_max_vl) - 1);
+                                   vcpu_sve_vq(vcpu) - 1);
                write_sysreg_el1(__vcpu_sys_reg(vcpu, ZCR_EL1), SYS_ZCR);
        } else {
                __fpsimd_restore_state(&vcpu->arch.ctxt.fp_regs);