}
}
+ if (cpu->ucode_rev == 0) {
+ /* The default is the same as KVM's. */
+ if (IS_AMD_CPU(env)) {
+ cpu->ucode_rev = 0x01000065;
+ } else {
+ cpu->ucode_rev = 0x100000000ULL;
+ }
+ }
+
/* mwait extended info: needed for Core compatibility */
/* We always wake on interrupt even if host does not have the capability */
cpu->mwait.ecx |= CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
+ DEFINE_PROP_UINT64("ucode-rev", X86CPU, ucode_rev, 0),
DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
#define MSR_IA32_SPEC_CTRL 0x48
#define MSR_VIRT_SSBD 0xc001011f
#define MSR_IA32_PRED_CMD 0x49
+#define MSR_IA32_UCODE_REV 0x8b
#define MSR_IA32_CORE_CAPABILITY 0xcf
#define MSR_IA32_ARCH_CAPABILITIES 0x10a
CPUNegativeOffsetState neg;
CPUX86State env;
+ uint64_t ucode_rev;
+
uint32_t hyperv_spinlock_attempts;
char *hyperv_vendor_id;
bool hyperv_synic_kvm_only;
RIP(env) += decode->len;
}
-#define MSR_IA32_UCODE_REV 0x00000017
-
void simulate_rdmsr(struct CPUState *cpu)
{
X86CPU *x86_cpu = X86_CPU(cpu);
val = cpu_get_apic_base(X86_CPU(cpu)->apic_state);
break;
case MSR_IA32_UCODE_REV:
- val = (0x100000000ULL << 32) | 0x100000000ULL;
+ val = x86_cpu->ucode_rev;
break;
case MSR_EFER:
val = rvmcs(cpu->hvf_fd, VMCS_GUEST_IA32_EFER);
#else
void helper_wrmsr(CPUX86State *env)
{
+ X86CPU *x86_cpu = env_archcpu(env);
uint64_t val;
cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 1, GETPC());
env->msr_bndcfgs = val;
cpu_sync_bndcs_hflags(env);
break;
+ case MSR_IA32_UCODE_REV:
+ val = x86_cpu->ucode_rev;
+ break;
default:
if ((uint32_t)env->regs[R_ECX] >= MSR_MC0_CTL
&& (uint32_t)env->regs[R_ECX] < MSR_MC0_CTL +