#define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
#define TCG_EXT4_FEATURES 0
-#define TCG_SVM_FEATURES CPUID_SVM_NPT
+#define TCG_SVM_FEATURES (CPUID_SVM_NPT | CPUID_SVM_VGIF | \
+ CPUID_SVM_SVME_ADDR_CHK)
#define TCG_KVM_FEATURES 0
#define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
#define V_IRQ_SHIFT 8
#define V_IRQ_MASK (1 << V_IRQ_SHIFT)
+#define V_GIF_ENABLED_SHIFT 25
+#define V_GIF_ENABLED_MASK (1 << V_GIF_ENABLED_SHIFT)
+
+#define V_GIF_SHIFT 9
+#define V_GIF_MASK (1 << V_GIF_SHIFT)
+
#define V_INTR_PRIO_SHIFT 16
#define V_INTR_PRIO_MASK (0x0f << V_INTR_PRIO_SHIFT)
return false;
}
+static inline bool virtual_gif_enabled(CPUX86State *env, uint32_t *int_ctl)
+{
+ if (likely(env->hflags & HF_GUEST_MASK)) {
+ *int_ctl = x86_ldl_phys(env_cpu(env),
+ env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
+ return (env->features[FEAT_SVM] & CPUID_SVM_VGIF)
+ && (*int_ctl & V_GIF_ENABLED_MASK);
+ }
+ return false;
+}
+
void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
{
CPUState *cs = env_cpu(env);
void helper_stgi(CPUX86State *env)
{
cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0, GETPC());
- env->hflags2 |= HF2_GIF_MASK;
+
+ CPUState *cs = env_cpu(env);
+ uint32_t int_ctl;
+ if (virtual_gif_enabled(env, &int_ctl)) {
+ x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.int_ctl),
+ int_ctl | V_GIF_MASK);
+ } else {
+ env->hflags2 |= HF2_GIF_MASK;
+ }
}
void helper_clgi(CPUX86State *env)
{
cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0, GETPC());
- env->hflags2 &= ~HF2_GIF_MASK;
+
+ CPUState *cs = env_cpu(env);
+ uint32_t int_ctl;
+ if (virtual_gif_enabled(env, &int_ctl)) {
+ x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.int_ctl),
+ int_ctl & ~V_GIF_MASK);
+ } else {
+ env->hflags2 &= ~HF2_GIF_MASK;
+ }
}
bool cpu_svm_has_intercept(CPUX86State *env, uint32_t type)