unsigned long hvip;
unsigned long vsatp;
unsigned long scounteren;
+ unsigned long senvcfg;
};
struct kvm_vcpu_config {
unsigned long host_sscratch;
unsigned long host_stvec;
unsigned long host_scounteren;
+ unsigned long host_senvcfg;
/* CPU context of Host */
struct kvm_cpu_context host_context;
kvm_riscv_vcpu_aia_update_hvip(vcpu);
}
+static __always_inline void kvm_riscv_vcpu_swap_in_guest_state(struct kvm_vcpu *vcpu)
+{
+ struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
+
+ vcpu->arch.host_senvcfg = csr_swap(CSR_SENVCFG, csr->senvcfg);
+}
+
+static __always_inline void kvm_riscv_vcpu_swap_in_host_state(struct kvm_vcpu *vcpu)
+{
+ struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
+
+ csr->senvcfg = csr_swap(CSR_SENVCFG, vcpu->arch.host_senvcfg);
+}
+
/*
* Actually run the vCPU, entering an RCU extended quiescent state (EQS) while
* the vCPU is running.
*/
static void noinstr kvm_riscv_vcpu_enter_exit(struct kvm_vcpu *vcpu)
{
+ kvm_riscv_vcpu_swap_in_guest_state(vcpu);
guest_state_enter_irqoff();
__kvm_riscv_switch_to(&vcpu->arch);
vcpu->arch.last_exit_cpu = vcpu->cpu;
guest_state_exit_irqoff();
+ kvm_riscv_vcpu_swap_in_host_state(vcpu);
}
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)