RISCV: KVM: Add sstateen0 to ONE_REG
authorMayuresh Chitale <mchitale@ventanamicro.com>
Wed, 13 Sep 2023 16:39:05 +0000 (22:09 +0530)
committerAnup Patel <anup@brainfault.org>
Thu, 12 Oct 2023 13:14:13 +0000 (18:44 +0530)
Add support for sstateen0 CSR to the ONE_REG interface to allow its
access from user space.

Signed-off-by: Mayuresh Chitale <mchitale@ventanamicro.com>
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
Signed-off-by: Anup Patel <anup@brainfault.org>
arch/riscv/include/uapi/asm/kvm.h
arch/riscv/kvm/vcpu_onereg.c

index c161791f55cb4af7c8620dd9b90c9f61f4255e97..b1baf6f096a3502f6ac669b4d5bcad94c5e631a7 100644 (file)
@@ -94,6 +94,11 @@ struct kvm_riscv_aia_csr {
        unsigned long iprio2h;
 };
 
+/* Smstateen CSR for KVM_GET_ONE_REG and KVM_SET_ONE_REG */
+struct kvm_riscv_smstateen_csr {
+       unsigned long sstateen0;
+};
+
 /* TIMER registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */
 struct kvm_riscv_timer {
        __u64 frequency;
@@ -180,10 +185,13 @@ enum KVM_RISCV_SBI_EXT_ID {
 #define KVM_REG_RISCV_CSR              (0x03 << KVM_REG_RISCV_TYPE_SHIFT)
 #define KVM_REG_RISCV_CSR_GENERAL      (0x0 << KVM_REG_RISCV_SUBTYPE_SHIFT)
 #define KVM_REG_RISCV_CSR_AIA          (0x1 << KVM_REG_RISCV_SUBTYPE_SHIFT)
+#define KVM_REG_RISCV_CSR_SMSTATEEN    (0x2 << KVM_REG_RISCV_SUBTYPE_SHIFT)
 #define KVM_REG_RISCV_CSR_REG(name)    \
                (offsetof(struct kvm_riscv_csr, name) / sizeof(unsigned long))
 #define KVM_REG_RISCV_CSR_AIA_REG(name)        \
        (offsetof(struct kvm_riscv_aia_csr, name) / sizeof(unsigned long))
+#define KVM_REG_RISCV_CSR_SMSTATEEN_REG(name)  \
+       (offsetof(struct kvm_riscv_smstateen_csr, name) / sizeof(unsigned long))
 
 /* Timer registers are mapped as type 4 */
 #define KVM_REG_RISCV_TIMER            (0x04 << KVM_REG_RISCV_TYPE_SHIFT)
index 9086e3abb52f8731a0c41c5d67f177fc936ff50b..388599fcf684cebfaace6bed9fb5520d0bacfb34 100644 (file)
@@ -382,6 +382,34 @@ static int kvm_riscv_vcpu_general_set_csr(struct kvm_vcpu *vcpu,
        return 0;
 }
 
+static inline int kvm_riscv_vcpu_smstateen_set_csr(struct kvm_vcpu *vcpu,
+                                                  unsigned long reg_num,
+                                                  unsigned long reg_val)
+{
+       struct kvm_vcpu_smstateen_csr *csr = &vcpu->arch.smstateen_csr;
+
+       if (reg_num >= sizeof(struct kvm_riscv_smstateen_csr) /
+               sizeof(unsigned long))
+               return -EINVAL;
+
+       ((unsigned long *)csr)[reg_num] = reg_val;
+       return 0;
+}
+
+static int kvm_riscv_vcpu_smstateen_get_csr(struct kvm_vcpu *vcpu,
+                                           unsigned long reg_num,
+                                           unsigned long *out_val)
+{
+       struct kvm_vcpu_smstateen_csr *csr = &vcpu->arch.smstateen_csr;
+
+       if (reg_num >= sizeof(struct kvm_riscv_smstateen_csr) /
+               sizeof(unsigned long))
+               return -EINVAL;
+
+       *out_val = ((unsigned long *)csr)[reg_num];
+       return 0;
+}
+
 static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu,
                                      const struct kvm_one_reg *reg)
 {
@@ -405,6 +433,12 @@ static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu,
        case KVM_REG_RISCV_CSR_AIA:
                rc = kvm_riscv_vcpu_aia_get_csr(vcpu, reg_num, &reg_val);
                break;
+       case KVM_REG_RISCV_CSR_SMSTATEEN:
+               rc = -EINVAL;
+               if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN))
+                       rc = kvm_riscv_vcpu_smstateen_get_csr(vcpu, reg_num,
+                                                             &reg_val);
+               break;
        default:
                rc = -ENOENT;
                break;
@@ -444,6 +478,12 @@ static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu,
        case KVM_REG_RISCV_CSR_AIA:
                rc = kvm_riscv_vcpu_aia_set_csr(vcpu, reg_num, reg_val);
                break;
+       case KVM_REG_RISCV_CSR_SMSTATEEN:
+               rc = -EINVAL;
+               if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN))
+                       rc = kvm_riscv_vcpu_smstateen_set_csr(vcpu, reg_num,
+                                                             reg_val);
+break;
        default:
                rc = -ENOENT;
                break;
@@ -700,6 +740,8 @@ static inline unsigned long num_csr_regs(const struct kvm_vcpu *vcpu)
 
        if (riscv_isa_extension_available(vcpu->arch.isa, SSAIA))
                n += sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
+       if (riscv_isa_extension_available(vcpu->arch.isa, SMSTATEEN))
+               n += sizeof(struct kvm_riscv_smstateen_csr) / sizeof(unsigned long);
 
        return n;
 }
@@ -708,7 +750,7 @@ static int copy_csr_reg_indices(const struct kvm_vcpu *vcpu,
                                u64 __user *uindices)
 {
        int n1 = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
-       int n2 = 0;
+       int n2 = 0, n3 = 0;
 
        /* copy general csr regs */
        for (int i = 0; i < n1; i++) {
@@ -742,7 +784,25 @@ static int copy_csr_reg_indices(const struct kvm_vcpu *vcpu,
                }
        }
 
-       return n1 + n2;
+       /* copy Smstateen csr regs */
+       if (riscv_isa_extension_available(vcpu->arch.isa, SMSTATEEN)) {
+               n3 = sizeof(struct kvm_riscv_smstateen_csr) / sizeof(unsigned long);
+
+               for (int i = 0; i < n3; i++) {
+                       u64 size = IS_ENABLED(CONFIG_32BIT) ?
+                                  KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
+                       u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
+                                         KVM_REG_RISCV_CSR_SMSTATEEN | i;
+
+                       if (uindices) {
+                               if (put_user(reg, uindices))
+                                       return -EFAULT;
+                               uindices++;
+                       }
+               }
+       }
+
+       return n1 + n2 + n3;
 }
 
 static inline unsigned long num_timer_regs(void)