KVM: x86/xen: allow vcpu_info to be mapped by fixed HVA
authorPaul Durrant <pdurrant@amazon.com>
Thu, 15 Feb 2024 15:29:08 +0000 (15:29 +0000)
committerSean Christopherson <seanjc@google.com>
Thu, 22 Feb 2024 15:01:17 +0000 (07:01 -0800)
If the guest does not explicitly set the GPA of vcpu_info structure in
memory then, for guests with 32 vCPUs or fewer, the vcpu_info embedded
in the shared_info page may be used. As described in a previous commit,
the shared_info page is an overlay at a fixed HVA within the VMM, so in
this case it also more optimal to activate the vcpu_info cache with a
fixed HVA to avoid unnecessary invalidation if the guest memory layout
is modified.

Signed-off-by: Paul Durrant <pdurrant@amazon.com>
Reviewed-by: David Woodhouse <dwmw@amazon.co.uk>
Link: https://lore.kernel.org/r/20240215152916.1158-14-paul@xen.org
[sean: use kvm_gpc_is_{gpa,hva}_active()]
Signed-off-by: Sean Christopherson <seanjc@google.com>
Documentation/virt/kvm/api.rst
arch/x86/include/uapi/asm/kvm.h
arch/x86/kvm/xen.c

index 3372be85b33557ea3e5a14c5e76dfcdd6363aaa3..bd93cafd3e4e3e5f69be921a2ebe3c9258af77ab 100644 (file)
@@ -5523,11 +5523,12 @@ KVM_XEN_ATTR_TYPE_SHARED_INFO
   Sets the guest physical frame number at which the Xen shared_info
   page resides. Note that although Xen places vcpu_info for the first
   32 vCPUs in the shared_info page, KVM does not automatically do so
-  and instead requires that KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO be used
-  explicitly even when the vcpu_info for a given vCPU resides at the
-  "default" location in the shared_info page. This is because KVM may
-  not be aware of the Xen CPU id which is used as the index into the
-  vcpu_info[] array, so may know the correct default location.
+  and instead requires that KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO or
+  KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO_HVA be used explicitly even when
+  the vcpu_info for a given vCPU resides at the "default" location
+  in the shared_info page. This is because KVM may not be aware of
+  the Xen CPU id which is used as the index into the vcpu_info[]
+  array, so may know the correct default location.
 
   Note that the shared_info page may be constantly written to by KVM;
   it contains the event channel bitmap used to deliver interrupts to
@@ -5649,6 +5650,21 @@ KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO
   on dirty logging. Setting the gpa to KVM_XEN_INVALID_GPA will disable
   the vcpu_info.
 
+KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO_HVA
+  If the KVM_XEN_HVM_CONFIG_SHARED_INFO_HVA flag is also set in the
+  Xen capabilities, then this attribute may be used to set the
+  userspace address of the vcpu_info for a given vCPU. It should
+  only be used when the vcpu_info resides at the "default" location
+  in the shared_info page. In this case it is safe to assume the
+  userspace address will not change, because the shared_info page is
+  an overlay on guest memory and remains at a fixed host address
+  regardless of where it is mapped in guest physical address space
+  and hence unnecessary invalidation of an internal cache may be
+  avoided if the guest memory layout is modified.
+  If the vcpu_info does not reside at the "default" location then
+  it is not guaranteed to remain at the same host address and
+  hence the aforementioned cache invalidation is required.
+
 KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO
   Sets the guest physical address of an additional pvclock structure
   for a given vCPU. This is typically used for guest vsyscall support.
index 9482f22333c8804b7d449878eb10029a26353d07..ad29984d5e398da425c0516f14b5cf538a023696 100644 (file)
@@ -622,6 +622,7 @@ struct kvm_xen_vcpu_attr {
        union {
                __u64 gpa;
 #define KVM_XEN_INVALID_GPA ((__u64)-1)
+               __u64 hva;
                __u64 pad[8];
                struct {
                        __u64 state;
@@ -652,6 +653,8 @@ struct kvm_xen_vcpu_attr {
 #define KVM_XEN_VCPU_ATTR_TYPE_VCPU_ID         0x6
 #define KVM_XEN_VCPU_ATTR_TYPE_TIMER           0x7
 #define KVM_XEN_VCPU_ATTR_TYPE_UPCALL_VECTOR   0x8
+/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO_HVA */
+#define KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO_HVA   0x9
 
 /* Secure Encrypted Virtualization command */
 enum sev_cmd_id {
index 45a74307cc254767f670a855bcb4da6a7738518e..cd05faa193082467396b2dfce45bdb24bad60c41 100644 (file)
@@ -782,20 +782,33 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
 
        switch (data->type) {
        case KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO:
+       case KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO_HVA:
                /* No compat necessary here. */
                BUILD_BUG_ON(sizeof(struct vcpu_info) !=
                             sizeof(struct compat_vcpu_info));
                BUILD_BUG_ON(offsetof(struct vcpu_info, time) !=
                             offsetof(struct compat_vcpu_info, time));
 
-               if (data->u.gpa == KVM_XEN_INVALID_GPA) {
-                       kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_info_cache);
-                       r = 0;
-                       break;
+               if (data->type == KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO) {
+                       if (data->u.gpa == KVM_XEN_INVALID_GPA) {
+                               kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_info_cache);
+                               r = 0;
+                               break;
+                       }
+
+                       r = kvm_gpc_activate(&vcpu->arch.xen.vcpu_info_cache,
+                                            data->u.gpa, sizeof(struct vcpu_info));
+               } else {
+                       if (data->u.hva == 0) {
+                               kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_info_cache);
+                               r = 0;
+                               break;
+                       }
+
+                       r = kvm_gpc_activate_hva(&vcpu->arch.xen.vcpu_info_cache,
+                                                data->u.hva, sizeof(struct vcpu_info));
                }
 
-               r = kvm_gpc_activate(&vcpu->arch.xen.vcpu_info_cache,
-                                    data->u.gpa, sizeof(struct vcpu_info));
                if (!r)
                        kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
 
@@ -1017,13 +1030,21 @@ int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
 
        switch (data->type) {
        case KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO:
-               if (vcpu->arch.xen.vcpu_info_cache.active)
+               if (kvm_gpc_is_gpa_active(&vcpu->arch.xen.vcpu_info_cache))
                        data->u.gpa = vcpu->arch.xen.vcpu_info_cache.gpa;
                else
                        data->u.gpa = KVM_XEN_INVALID_GPA;
                r = 0;
                break;
 
+       case KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO_HVA:
+               if (kvm_gpc_is_hva_active(&vcpu->arch.xen.vcpu_info_cache))
+                       data->u.hva = vcpu->arch.xen.vcpu_info_cache.uhva;
+               else
+                       data->u.hva = 0;
+               r = 0;
+               break;
+
        case KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO:
                if (vcpu->arch.xen.vcpu_time_info_cache.active)
                        data->u.gpa = vcpu->arch.xen.vcpu_time_info_cache.gpa;