#define RISCV_FP_D_REG(idx) kvm_riscv_reg_id_u64(KVM_REG_RISCV_FP_D, idx)
+#define RISCV_VECTOR_CSR_REG(env, name) \
+ kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_VECTOR, \
+ KVM_REG_RISCV_VECTOR_CSR_REG(name))
+
#define KVM_RISCV_GET_CSR(cs, env, csr, reg) \
do { \
int _ret = kvm_get_one_reg(cs, RISCV_CSR_REG(env, csr), ®); \
KVM_MISA_CFG(RVH, KVM_RISCV_ISA_EXT_H),
KVM_MISA_CFG(RVI, KVM_RISCV_ISA_EXT_I),
KVM_MISA_CFG(RVM, KVM_RISCV_ISA_EXT_M),
+ KVM_MISA_CFG(RVV, KVM_RISCV_ISA_EXT_V),
};
static void kvm_cpu_get_misa_ext_cfg(Object *obj, Visitor *v,
env->kvm_timer_dirty = false;
}
+static int kvm_riscv_get_regs_vector(CPUState *cs)
+{
+ CPURISCVState *env = &RISCV_CPU(cs)->env;
+ target_ulong reg;
+ int ret = 0;
+
+ if (!riscv_has_ext(env, RVV)) {
+ return 0;
+ }
+
+ ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vstart), ®);
+ if (ret) {
+ return ret;
+ }
+ env->vstart = reg;
+
+ ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vl), ®);
+ if (ret) {
+ return ret;
+ }
+ env->vl = reg;
+
+ ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vtype), ®);
+ if (ret) {
+ return ret;
+ }
+ env->vtype = reg;
+
+ return 0;
+}
+
+static int kvm_riscv_put_regs_vector(CPUState *cs)
+{
+ CPURISCVState *env = &RISCV_CPU(cs)->env;
+ target_ulong reg;
+ int ret = 0;
+
+ if (!riscv_has_ext(env, RVV)) {
+ return 0;
+ }
+
+ reg = env->vstart;
+ ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vstart), ®);
+ if (ret) {
+ return ret;
+ }
+
+ reg = env->vl;
+ ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vl), ®);
+ if (ret) {
+ return ret;
+ }
+
+ reg = env->vtype;
+ ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vtype), ®);
+
+ return ret;
+}
+
typedef struct KVMScratchCPU {
int kvmfd;
int vmfd;
return ret;
}
+ ret = kvm_riscv_get_regs_vector(cs);
+ if (ret) {
+ return ret;
+ }
+
return ret;
}
return ret;
}
+ ret = kvm_riscv_put_regs_vector(cs);
+ if (ret) {
+ return ret;
+ }
+
if (KVM_PUT_RESET_STATE == level) {
RISCVCPU *cpu = RISCV_CPU(cs);
if (cs->cpu_index == 0) {