KVM: x86: Move MSR_IA32_PRED_CMD WRMSR emulation to common code
authorSean Christopherson <seanjc@google.com>
Wed, 22 Mar 2023 01:14:38 +0000 (18:14 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 6 Apr 2023 17:37:36 +0000 (13:37 -0400)
Dedup the handling of MSR_IA32_PRED_CMD across VMX and SVM by moving the
logic to kvm_set_msr_common().  Now that the MSR interception toggling is
handled as part of setting guest CPUID, the VMX and SVM paths are
identical.

Opportunistically massage the code to make it a wee bit denser.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Reviewed-by: Xiaoyao Li <xiaoyao.li@intel.com>
Message-Id: <20230322011440.2195485-5-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/svm/svm.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c

index f757b436ffae436c650dccbe2f9efbde7302b050..85bb535fc3213e9b9d4ce6c1434781f965318043 100644 (file)
@@ -2942,20 +2942,6 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
                 */
                set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1);
                break;
-       case MSR_IA32_PRED_CMD:
-               if (!msr->host_initiated &&
-                   !guest_has_pred_cmd_msr(vcpu))
-                       return 1;
-
-               if (data & ~PRED_CMD_IBPB)
-                       return 1;
-               if (!boot_cpu_has(X86_FEATURE_IBPB))
-                       return 1;
-               if (!data)
-                       break;
-
-               wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
-               break;
        case MSR_AMD64_VIRT_SPEC_CTRL:
                if (!msr->host_initiated &&
                    !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD))
index 5c01c76c0d452fa6873af466bc8d49846019cbc7..29807be219b971de9df09a3ff98e398a7d5b9df9 100644 (file)
@@ -2285,20 +2285,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                if (data & ~(TSX_CTRL_RTM_DISABLE | TSX_CTRL_CPUID_CLEAR))
                        return 1;
                goto find_uret_msr;
-       case MSR_IA32_PRED_CMD:
-               if (!msr_info->host_initiated &&
-                   !guest_has_pred_cmd_msr(vcpu))
-                       return 1;
-
-               if (data & ~PRED_CMD_IBPB)
-                       return 1;
-               if (!boot_cpu_has(X86_FEATURE_IBPB))
-                       return 1;
-               if (!data)
-                       break;
-
-               wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
-               break;
        case MSR_IA32_CR_PAT:
                if (!kvm_pat_valid(data))
                        return 1;
index 237c483b1230168e8bc4cb16e9bb1de7f392c9af..c83ec88da0434162785d4a8eb39863f61ff48b05 100644 (file)
@@ -3617,6 +3617,17 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                vcpu->arch.perf_capabilities = data;
                kvm_pmu_refresh(vcpu);
                return 0;
+       case MSR_IA32_PRED_CMD:
+               if (!msr_info->host_initiated && !guest_has_pred_cmd_msr(vcpu))
+                       return 1;
+
+               if (!boot_cpu_has(X86_FEATURE_IBPB) || (data & ~PRED_CMD_IBPB))
+                       return 1;
+               if (!data)
+                       break;
+
+               wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
+               break;
        case MSR_EFER:
                return set_efer(vcpu, msr_info);
        case MSR_K7_HWCR: