KVM: riscv: Add KVM_GET_REG_LIST API support
authorHaibo Xu <haibo1.xu@intel.com>
Tue, 25 Jul 2023 08:41:38 +0000 (16:41 +0800)
committerAnup Patel <anup@brainfault.org>
Wed, 9 Aug 2023 06:45:25 +0000 (12:15 +0530)
KVM_GET_REG_LIST API will return all registers that are available to
KVM_GET/SET_ONE_REG APIs. It's very useful to identify some platform
regression issue during VM migration.

Since this API was already supported on arm64, it is straightforward
to enable it on riscv with similar code structure.

Signed-off-by: Haibo Xu <haibo1.xu@intel.com>
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
Signed-off-by: Anup Patel <anup@brainfault.org>
Documentation/virt/kvm/api.rst
arch/riscv/include/asm/kvm_host.h
arch/riscv/kvm/vcpu.c
arch/riscv/kvm/vcpu_onereg.c

index 3249fb56cc69688d8040d80bd14ee66a8aa80959..660d9ca7a251dbe5a3b1053cc5bafce774c0d879 100644 (file)
@@ -3501,7 +3501,7 @@ VCPU matching underlying host.
 ---------------------
 
 :Capability: basic
-:Architectures: arm64, mips
+:Architectures: arm64, mips, riscv
 :Type: vcpu ioctl
 :Parameters: struct kvm_reg_list (in/out)
 :Returns: 0 on success; -1 on error
index 55bc7bdbff4869a856096b73afde23a557bd3ba6..1ebf20dfbaa6982c32777af62ad30fba7581a223 100644 (file)
@@ -338,6 +338,9 @@ int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
 void __kvm_riscv_switch_to(struct kvm_vcpu_arch *vcpu_arch);
 
 void kvm_riscv_vcpu_setup_isa(struct kvm_vcpu *vcpu);
+unsigned long kvm_riscv_vcpu_num_regs(struct kvm_vcpu *vcpu);
+int kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu *vcpu,
+                                   u64 __user *uindices);
 int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu,
                           const struct kvm_one_reg *reg);
 int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
index 452d6548e9512f2b0c92183f8aba284d44ec7c1e..82229db1ce73f3b42de478e2af6cbc9a0756fca1 100644 (file)
@@ -254,6 +254,24 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
                        r = kvm_riscv_vcpu_get_reg(vcpu, &reg);
                break;
        }
+       case KVM_GET_REG_LIST: {
+               struct kvm_reg_list __user *user_list = argp;
+               struct kvm_reg_list reg_list;
+               unsigned int n;
+
+               r = -EFAULT;
+               if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
+                       break;
+               n = reg_list.n;
+               reg_list.n = kvm_riscv_vcpu_num_regs(vcpu);
+               if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
+                       break;
+               r = -E2BIG;
+               if (n < reg_list.n)
+                       break;
+               r = kvm_riscv_vcpu_copy_reg_indices(vcpu, user_list->reg);
+               break;
+       }
        default:
                break;
        }
index 9fee1c176fbb7ad7f36dbb2a4459005e3df3b62a..1b7e9fa265cbb8dd68b30cc06ed8b7b00dc03fba 100644 (file)
@@ -622,6 +622,372 @@ static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu,
        return 0;
 }
 
+static int copy_config_reg_indices(const struct kvm_vcpu *vcpu,
+                               u64 __user *uindices)
+{
+       int n = 0;
+
+       for (int i = 0; i < sizeof(struct kvm_riscv_config)/sizeof(unsigned long);
+                i++) {
+               u64 size;
+               u64 reg;
+
+               /*
+                * Avoid reporting config reg if the corresponding extension
+                * was not available.
+                */
+               if (i == KVM_REG_RISCV_CONFIG_REG(zicbom_block_size) &&
+                       !riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
+                       continue;
+               else if (i == KVM_REG_RISCV_CONFIG_REG(zicboz_block_size) &&
+                       !riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
+                       continue;
+
+               size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
+               reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CONFIG | i;
+
+               if (uindices) {
+                       if (put_user(reg, uindices))
+                               return -EFAULT;
+                       uindices++;
+               }
+
+               n++;
+       }
+
+       return n;
+}
+
+static unsigned long num_config_regs(const struct kvm_vcpu *vcpu)
+{
+       return copy_config_reg_indices(vcpu, NULL);
+}
+
+static inline unsigned long num_core_regs(void)
+{
+       return sizeof(struct kvm_riscv_core) / sizeof(unsigned long);
+}
+
+static int copy_core_reg_indices(u64 __user *uindices)
+{
+       int n = num_core_regs();
+
+       for (int i = 0; i < n; i++) {
+               u64 size = IS_ENABLED(CONFIG_32BIT) ?
+                          KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
+               u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CORE | i;
+
+               if (uindices) {
+                       if (put_user(reg, uindices))
+                               return -EFAULT;
+                       uindices++;
+               }
+       }
+
+       return n;
+}
+
+static inline unsigned long num_csr_regs(const struct kvm_vcpu *vcpu)
+{
+       unsigned long n = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
+
+       if (riscv_isa_extension_available(vcpu->arch.isa, SSAIA))
+               n += sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
+
+       return n;
+}
+
+static int copy_csr_reg_indices(const struct kvm_vcpu *vcpu,
+                               u64 __user *uindices)
+{
+       int n1 = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
+       int n2 = 0;
+
+       /* copy general csr regs */
+       for (int i = 0; i < n1; i++) {
+               u64 size = IS_ENABLED(CONFIG_32BIT) ?
+                          KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
+               u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
+                                 KVM_REG_RISCV_CSR_GENERAL | i;
+
+               if (uindices) {
+                       if (put_user(reg, uindices))
+                               return -EFAULT;
+                       uindices++;
+               }
+       }
+
+       /* copy AIA csr regs */
+       if (riscv_isa_extension_available(vcpu->arch.isa, SSAIA)) {
+               n2 = sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
+
+               for (int i = 0; i < n2; i++) {
+                       u64 size = IS_ENABLED(CONFIG_32BIT) ?
+                                  KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
+                       u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
+                                         KVM_REG_RISCV_CSR_AIA | i;
+
+                       if (uindices) {
+                               if (put_user(reg, uindices))
+                                       return -EFAULT;
+                               uindices++;
+                       }
+               }
+       }
+
+       return n1 + n2;
+}
+
+static inline unsigned long num_timer_regs(void)
+{
+       return sizeof(struct kvm_riscv_timer) / sizeof(u64);
+}
+
+static int copy_timer_reg_indices(u64 __user *uindices)
+{
+       int n = num_timer_regs();
+
+       for (int i = 0; i < n; i++) {
+               u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 |
+                         KVM_REG_RISCV_TIMER | i;
+
+               if (uindices) {
+                       if (put_user(reg, uindices))
+                               return -EFAULT;
+                       uindices++;
+               }
+       }
+
+       return n;
+}
+
+static inline unsigned long num_fp_f_regs(const struct kvm_vcpu *vcpu)
+{
+       const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
+
+       if (riscv_isa_extension_available(vcpu->arch.isa, f))
+               return sizeof(cntx->fp.f) / sizeof(u32);
+       else
+               return 0;
+}
+
+static int copy_fp_f_reg_indices(const struct kvm_vcpu *vcpu,
+                               u64 __user *uindices)
+{
+       int n = num_fp_f_regs(vcpu);
+
+       for (int i = 0; i < n; i++) {
+               u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U32 |
+                         KVM_REG_RISCV_FP_F | i;
+
+               if (uindices) {
+                       if (put_user(reg, uindices))
+                               return -EFAULT;
+                       uindices++;
+               }
+       }
+
+       return n;
+}
+
+static inline unsigned long num_fp_d_regs(const struct kvm_vcpu *vcpu)
+{
+       const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
+
+       if (riscv_isa_extension_available(vcpu->arch.isa, d))
+               return sizeof(cntx->fp.d.f) / sizeof(u64) + 1;
+       else
+               return 0;
+}
+
+static int copy_fp_d_reg_indices(const struct kvm_vcpu *vcpu,
+                               u64 __user *uindices)
+{
+       int i;
+       int n = num_fp_d_regs(vcpu);
+       u64 reg;
+
+       /* copy fp.d.f indices */
+       for (i = 0; i < n-1; i++) {
+               reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 |
+                     KVM_REG_RISCV_FP_D | i;
+
+               if (uindices) {
+                       if (put_user(reg, uindices))
+                               return -EFAULT;
+                       uindices++;
+               }
+       }
+
+       /* copy fp.d.fcsr indices */
+       reg = KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_D | i;
+       if (uindices) {
+               if (put_user(reg, uindices))
+                       return -EFAULT;
+               uindices++;
+       }
+
+       return n;
+}
+
+static int copy_isa_ext_reg_indices(const struct kvm_vcpu *vcpu,
+                               u64 __user *uindices)
+{
+       unsigned int n = 0;
+       unsigned long isa_ext;
+
+       for (int i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
+               u64 size = IS_ENABLED(CONFIG_32BIT) ?
+                          KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
+               u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_ISA_EXT | i;
+
+               isa_ext = kvm_isa_ext_arr[i];
+               if (!__riscv_isa_extension_available(vcpu->arch.isa, isa_ext))
+                       continue;
+
+               if (uindices) {
+                       if (put_user(reg, uindices))
+                               return -EFAULT;
+                       uindices++;
+               }
+
+               n++;
+       }
+
+       return n;
+}
+
+static inline unsigned long num_isa_ext_regs(const struct kvm_vcpu *vcpu)
+{
+       return copy_isa_ext_reg_indices(vcpu, NULL);;
+}
+
+static inline unsigned long num_sbi_ext_regs(void)
+{
+       /*
+        * number of KVM_REG_RISCV_SBI_SINGLE +
+        * 2 x (number of KVM_REG_RISCV_SBI_MULTI)
+        */
+       return KVM_RISCV_SBI_EXT_MAX + 2*(KVM_REG_RISCV_SBI_MULTI_REG_LAST+1);
+}
+
+static int copy_sbi_ext_reg_indices(u64 __user *uindices)
+{
+       int n;
+
+       /* copy KVM_REG_RISCV_SBI_SINGLE */
+       n = KVM_RISCV_SBI_EXT_MAX;
+       for (int i = 0; i < n; i++) {
+               u64 size = IS_ENABLED(CONFIG_32BIT) ?
+                          KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
+               u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT |
+                         KVM_REG_RISCV_SBI_SINGLE | i;
+
+               if (uindices) {
+                       if (put_user(reg, uindices))
+                               return -EFAULT;
+                       uindices++;
+               }
+       }
+
+       /* copy KVM_REG_RISCV_SBI_MULTI */
+       n = KVM_REG_RISCV_SBI_MULTI_REG_LAST + 1;
+       for (int i = 0; i < n; i++) {
+               u64 size = IS_ENABLED(CONFIG_32BIT) ?
+                          KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
+               u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT |
+                         KVM_REG_RISCV_SBI_MULTI_EN | i;
+
+               if (uindices) {
+                       if (put_user(reg, uindices))
+                               return -EFAULT;
+                       uindices++;
+               }
+
+               reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT |
+                         KVM_REG_RISCV_SBI_MULTI_DIS | i;
+
+               if (uindices) {
+                       if (put_user(reg, uindices))
+                               return -EFAULT;
+                       uindices++;
+               }
+       }
+
+       return num_sbi_ext_regs();
+}
+
+/*
+ * kvm_riscv_vcpu_num_regs - how many registers do we present via KVM_GET/SET_ONE_REG
+ *
+ * This is for all registers.
+ */
+unsigned long kvm_riscv_vcpu_num_regs(struct kvm_vcpu *vcpu)
+{
+       unsigned long res = 0;
+
+       res += num_config_regs(vcpu);
+       res += num_core_regs();
+       res += num_csr_regs(vcpu);
+       res += num_timer_regs();
+       res += num_fp_f_regs(vcpu);
+       res += num_fp_d_regs(vcpu);
+       res += num_isa_ext_regs(vcpu);
+       res += num_sbi_ext_regs();
+
+       return res;
+}
+
+/*
+ * kvm_riscv_vcpu_copy_reg_indices - get indices of all registers.
+ */
+int kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu *vcpu,
+                                   u64 __user *uindices)
+{
+       int ret;
+
+       ret = copy_config_reg_indices(vcpu, uindices);
+       if (ret < 0)
+               return ret;
+       uindices += ret;
+
+       ret = copy_core_reg_indices(uindices);
+       if (ret < 0)
+               return ret;
+       uindices += ret;
+
+       ret = copy_csr_reg_indices(vcpu, uindices);
+       if (ret < 0)
+               return ret;
+       uindices += ret;
+
+       ret = copy_timer_reg_indices(uindices);
+       if (ret < 0)
+               return ret;
+       uindices += ret;
+
+       ret = copy_fp_f_reg_indices(vcpu, uindices);
+       if (ret < 0)
+               return ret;
+       uindices += ret;
+
+       ret = copy_fp_d_reg_indices(vcpu, uindices);
+       if (ret < 0)
+               return ret;
+       uindices += ret;
+
+       ret = copy_isa_ext_reg_indices(vcpu, uindices);
+       if (ret < 0)
+               return ret;
+       uindices += ret;
+
+       ret = copy_sbi_ext_reg_indices(uindices);
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
 int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
                           const struct kvm_one_reg *reg)
 {