Linux-5.8 introduced interrupt based mechanism for 'page ready' events
delivery and disabled the old, #PF based one (see commit
2635b5c4a0e4
"KVM: x86: interrupt based APF 'page ready' event delivery"). Linux
guest switches to using in in 5.9 (see commit
b1d405751cd5 "KVM: x86:
Switch KVM guest to using interrupts for page ready APF delivery").
The feature has a new KVM_FEATURE_ASYNC_PF_INT bit assigned and
the interrupt vector is set in MSR_KVM_ASYNC_PF_INT MSR. Support this
in QEMU.
Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Message-Id: <
20200908141206.357450-1-vkuznets@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
"kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
"kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
NULL, "kvm-pv-tlb-flush", NULL, "kvm-pv-ipi",
- "kvm-poll-control", "kvm-pv-sched-yield", NULL, NULL,
+ "kvm-poll-control", "kvm-pv-sched-yield", "kvm-asyncpf-int", NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
"kvmclock-stable-bit", NULL, NULL, NULL,
object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay");
object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu");
object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf");
+ object_property_add_alias(obj, "kvm_asyncpf_int", obj, "kvm-asyncpf-int");
object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time");
object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi");
object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt");
uint64_t wall_clock_msr;
uint64_t steal_time_msr;
uint64_t async_pf_en_msr;
+ uint64_t async_pf_int_msr;
uint64_t pv_eoi_en_msr;
uint64_t poll_control_msr;
{ KVM_CAP_NOP_IO_DELAY, KVM_FEATURE_NOP_IO_DELAY },
{ KVM_CAP_PV_MMU, KVM_FEATURE_MMU_OP },
{ KVM_CAP_ASYNC_PF, KVM_FEATURE_ASYNC_PF },
+ { KVM_CAP_ASYNC_PF_INT, KVM_FEATURE_ASYNC_PF_INT },
};
static int get_para_features(KVMState *s)
if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF)) {
kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, env->async_pf_en_msr);
}
+ if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF_INT)) {
+ kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, env->async_pf_int_msr);
+ }
if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_PV_EOI)) {
kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, env->pv_eoi_en_msr);
}
if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF)) {
kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, 0);
}
+ if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF_INT)) {
+ kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_INT, 0);
+ }
if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_PV_EOI)) {
kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, 0);
}
case MSR_KVM_ASYNC_PF_EN:
env->async_pf_en_msr = msrs[i].data;
break;
+ case MSR_KVM_ASYNC_PF_INT:
+ env->async_pf_int_msr = msrs[i].data;
+ break;
case MSR_KVM_PV_EOI_EN:
env->pv_eoi_en_msr = msrs[i].data;
break;
return cpu->env.async_pf_en_msr != 0;
}
+static bool async_pf_int_msr_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+
+ return cpu->env.async_pf_int_msr != 0;
+}
+
static bool pv_eoi_msr_needed(void *opaque)
{
X86CPU *cpu = opaque;
}
};
+static const VMStateDescription vmstate_async_pf_int_msr = {
+ .name = "cpu/async_pf_int_msr",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = async_pf_int_msr_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(env.async_pf_int_msr, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
static const VMStateDescription vmstate_pv_eoi_msr = {
.name = "cpu/async_pv_eoi_msr",
.version_id = 1,
.subsections = (const VMStateDescription*[]) {
&vmstate_exception_info,
&vmstate_async_pf_msr,
+ &vmstate_async_pf_int_msr,
&vmstate_pv_eoi_msr,
&vmstate_steal_time_msr,
&vmstate_poll_control_msr,