KVM: arm64: Get rid of vCPU-scoped feature bitmap
authorOliver Upton <oliver.upton@linux.dev>
Wed, 20 Sep 2023 19:50:36 +0000 (19:50 +0000)
committerOliver Upton <oliver.upton@linux.dev>
Thu, 21 Sep 2023 18:13:29 +0000 (18:13 +0000)
The vCPU-scoped feature bitmap was left in place a couple of releases
ago in case the change to VM-scoped vCPU features broke anyone. Nobody
has complained and the interop between VM and vCPU bitmaps is pretty
gross. Throw it out.

Link: https://lore.kernel.org/r/20230920195036.1169791-9-oliver.upton@linux.dev
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
arch/arm64/include/asm/kvm_emulate.h
arch/arm64/include/asm/kvm_host.h
arch/arm64/include/asm/kvm_nested.h
arch/arm64/kvm/arm.c
arch/arm64/kvm/hypercalls.c
arch/arm64/kvm/reset.c
include/kvm/arm_pmu.h
include/kvm/arm_psci.h

index 3d6725ff0bf6d24577c84c6814fcc814b0681fb8..965b4cd8c24703c7993808b8d7ab6857a4bf9073 100644 (file)
@@ -54,6 +54,11 @@ void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu);
 int kvm_inject_nested_sync(struct kvm_vcpu *vcpu, u64 esr_el2);
 int kvm_inject_nested_irq(struct kvm_vcpu *vcpu);
 
+static inline bool vcpu_has_feature(const struct kvm_vcpu *vcpu, int feature)
+{
+       return test_bit(feature, vcpu->kvm->arch.vcpu_features);
+}
+
 #if defined(__KVM_VHE_HYPERVISOR__) || defined(__KVM_NVHE_HYPERVISOR__)
 static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
 {
@@ -62,7 +67,7 @@ static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
 #else
 static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
 {
-       return test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features);
+       return vcpu_has_feature(vcpu, KVM_ARM_VCPU_EL1_32BIT);
 }
 #endif
 
@@ -565,12 +570,6 @@ static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu)
                vcpu_set_flag((v), e);                                  \
        } while (0)
 
-
-static inline bool vcpu_has_feature(struct kvm_vcpu *vcpu, int feature)
-{
-       return test_bit(feature, vcpu->arch.features);
-}
-
 static __always_inline void kvm_write_cptr_el2(u64 val)
 {
        if (has_vhe() || has_hvhe())
index cb2cde7b2682a523ed9880531a33ab97d2e6a26e..c3a17888f183de5af47f495dc1275856b3470803 100644 (file)
@@ -574,9 +574,6 @@ struct kvm_vcpu_arch {
        /* Cache some mmu pages needed inside spinlock regions */
        struct kvm_mmu_memory_cache mmu_page_cache;
 
-       /* feature flags */
-       DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
-
        /* Virtual SError ESR to restore when HCR_EL2.VSE is set */
        u64 vsesr_el2;
 
index fa23cc9c2adc77fb278128eb50a401e7eb3d9201..6cec8e9c6c9126b1f9b01ba0eed4a2f156deb334 100644 (file)
@@ -2,13 +2,14 @@
 #ifndef __ARM64_KVM_NESTED_H
 #define __ARM64_KVM_NESTED_H
 
+#include <asm/kvm_emulate.h>
 #include <linux/kvm_host.h>
 
 static inline bool vcpu_has_nv(const struct kvm_vcpu *vcpu)
 {
        return (!__is_defined(__KVM_NVHE_HYPERVISOR__) &&
                cpus_have_final_cap(ARM64_HAS_NESTED_VIRT) &&
-               test_bit(KVM_ARM_VCPU_HAS_EL2, vcpu->arch.features));
+               vcpu_has_feature(vcpu, KVM_ARM_VCPU_HAS_EL2));
 }
 
 extern bool __check_nv_sr_forward(struct kvm_vcpu *vcpu);
index ac30c4f3d55cadf5506d0c43bc93e3d986c79bfc..1bfdd583b261d461ead8cf4ae3843181dbfc9c7c 100644 (file)
@@ -367,7 +367,6 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
 
        /* Force users to call KVM_ARM_VCPU_INIT */
        vcpu_clear_flag(vcpu, VCPU_INITIALIZED);
-       bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
 
        vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
 
@@ -1263,7 +1262,8 @@ static bool kvm_vcpu_init_changed(struct kvm_vcpu *vcpu,
 {
        unsigned long features = init->features[0];
 
-       return !bitmap_equal(vcpu->arch.features, &features, KVM_VCPU_MAX_FEATURES);
+       return !bitmap_equal(vcpu->kvm->arch.vcpu_features, &features,
+                            KVM_VCPU_MAX_FEATURES);
 }
 
 static int __kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
@@ -1276,15 +1276,14 @@ static int __kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
        mutex_lock(&kvm->arch.config_lock);
 
        if (test_bit(KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED, &kvm->arch.flags) &&
-           !bitmap_equal(kvm->arch.vcpu_features, &features, KVM_VCPU_MAX_FEATURES))
+           kvm_vcpu_init_changed(vcpu, init))
                goto out_unlock;
 
-       bitmap_copy(vcpu->arch.features, &features, KVM_VCPU_MAX_FEATURES);
+       bitmap_copy(kvm->arch.vcpu_features, &features, KVM_VCPU_MAX_FEATURES);
 
        /* Now we know what it is, we can reset it. */
        kvm_reset_vcpu(vcpu);
 
-       bitmap_copy(kvm->arch.vcpu_features, &features, KVM_VCPU_MAX_FEATURES);
        set_bit(KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED, &kvm->arch.flags);
        vcpu_set_flag(vcpu, VCPU_INITIALIZED);
        ret = 0;
index 7fb4df0456dea53f9cdeccdd28c2ff47a8ce6ff3..1b79219c590c144dcdca435a67583241c121c95b 100644 (file)
@@ -554,7 +554,7 @@ int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
        {
                bool wants_02;
 
-               wants_02 = test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features);
+               wants_02 = vcpu_has_feature(vcpu, KVM_ARM_VCPU_PSCI_0_2);
 
                switch (val) {
                case KVM_ARM_PSCI_0_1:
index 96ef9b7e74d4455403d19f9610bae92f24feb90b..5bb4de162cab5df9ff443e3c37b3917f444944a3 100644 (file)
@@ -208,14 +208,14 @@ void kvm_reset_vcpu(struct kvm_vcpu *vcpu)
                kvm_arch_vcpu_put(vcpu);
 
        if (!kvm_arm_vcpu_sve_finalized(vcpu)) {
-               if (test_bit(KVM_ARM_VCPU_SVE, vcpu->arch.features))
+               if (vcpu_has_feature(vcpu, KVM_ARM_VCPU_SVE))
                        kvm_vcpu_enable_sve(vcpu);
        } else {
                kvm_vcpu_reset_sve(vcpu);
        }
 
-       if (test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) ||
-           test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features))
+       if (vcpu_has_feature(vcpu, KVM_ARM_VCPU_PTRAUTH_ADDRESS) ||
+           vcpu_has_feature(vcpu, KVM_ARM_VCPU_PTRAUTH_GENERIC))
                kvm_vcpu_enable_ptrauth(vcpu);
 
        if (vcpu_el1_is_32bit(vcpu))
index 31029f4f7be851d684c9500c243b59e979e6df67..3546ebc469ad77459ac9d711ed347cfb8835d652 100644 (file)
@@ -77,7 +77,7 @@ void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
 void kvm_vcpu_pmu_resync_el0(void);
 
 #define kvm_vcpu_has_pmu(vcpu)                                 \
-       (test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features))
+       (vcpu_has_feature(vcpu, KVM_ARM_VCPU_PMU_V3))
 
 /*
  * Updates the vcpu's view of the pmu events for this cpu.
index 6e55b9283789b148f76030e63de34de03fc35cc0..e8fb624013d15c275fdfff258826ca90b3bb35da 100644 (file)
@@ -26,7 +26,7 @@ static inline int kvm_psci_version(struct kvm_vcpu *vcpu)
         * revisions. It is thus safe to return the latest, unless
         * userspace has instructed us otherwise.
         */
-       if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features)) {
+       if (vcpu_has_feature(vcpu, KVM_ARM_VCPU_PSCI_0_2)) {
                if (vcpu->kvm->arch.psci_version)
                        return vcpu->kvm->arch.psci_version;