RISCV: KVM: Introduce vcpu->reset_cntx_lock
authorYong-Xuan Wang <yongxuan.wang@sifive.com>
Wed, 17 Apr 2024 07:45:26 +0000 (15:45 +0800)
committerAnup Patel <anup@brainfault.org>
Mon, 22 Apr 2024 05:09:03 +0000 (10:39 +0530)
Originally, the use of kvm->lock in SBI_EXT_HSM_HART_START also avoids
the simultaneous updates to the reset context of target VCPU. Since this
lock has been replace with vcpu->mp_state_lock, and this new lock also
protects the vcpu->mp_state. We have to add a separate lock for
vcpu->reset_cntx.

Signed-off-by: Yong-Xuan Wang <yongxuan.wang@sifive.com>
Reviewed-by: Anup Patel <anup@brainfault.org>
Link: https://lore.kernel.org/r/20240417074528.16506-3-yongxuan.wang@sifive.com
Signed-off-by: Anup Patel <anup@brainfault.org>
arch/riscv/include/asm/kvm_host.h
arch/riscv/kvm/vcpu.c
arch/riscv/kvm/vcpu_sbi_hsm.c

index 48691f55d1a57dea133832a64d8a6f93167648ca..d962812785868bb09d39fa01eff5c918f23bbdaa 100644 (file)
@@ -223,6 +223,7 @@ struct kvm_vcpu_arch {
 
        /* CPU context upon Guest VCPU reset */
        struct kvm_cpu_context guest_reset_context;
+       spinlock_t reset_cntx_lock;
 
        /* CPU CSR context upon Guest VCPU reset */
        struct kvm_vcpu_csr guest_reset_csr;
index 57d78be4e6ad74b49a0b81c2a01bbfedd7b8914b..1cef82047eeadbfebc306b53b88b1a4bf4f86ebc 100644 (file)
@@ -64,7 +64,9 @@ static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
 
        memcpy(csr, reset_csr, sizeof(*csr));
 
+       spin_lock(&vcpu->arch.reset_cntx_lock);
        memcpy(cntx, reset_cntx, sizeof(*cntx));
+       spin_unlock(&vcpu->arch.reset_cntx_lock);
 
        kvm_riscv_vcpu_fp_reset(vcpu);
 
@@ -121,12 +123,16 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
        spin_lock_init(&vcpu->arch.hfence_lock);
 
        /* Setup reset state of shadow SSTATUS and HSTATUS CSRs */
+       spin_lock_init(&vcpu->arch.reset_cntx_lock);
+
+       spin_lock(&vcpu->arch.reset_cntx_lock);
        cntx = &vcpu->arch.guest_reset_context;
        cntx->sstatus = SR_SPP | SR_SPIE;
        cntx->hstatus = 0;
        cntx->hstatus |= HSTATUS_VTW;
        cntx->hstatus |= HSTATUS_SPVP;
        cntx->hstatus |= HSTATUS_SPV;
+       spin_unlock(&vcpu->arch.reset_cntx_lock);
 
        if (kvm_riscv_vcpu_alloc_vector_context(vcpu, cntx))
                return -ENOMEM;
index 827d946ab8714043de664f000ab4314436b29e8a..dce667f4b6ab08d023f54ac64cf072b65118bc1b 100644 (file)
@@ -31,6 +31,7 @@ static int kvm_sbi_hsm_vcpu_start(struct kvm_vcpu *vcpu)
                goto out;
        }
 
+       spin_lock(&target_vcpu->arch.reset_cntx_lock);
        reset_cntx = &target_vcpu->arch.guest_reset_context;
        /* start address */
        reset_cntx->sepc = cp->a1;
@@ -38,6 +39,8 @@ static int kvm_sbi_hsm_vcpu_start(struct kvm_vcpu *vcpu)
        reset_cntx->a0 = target_vcpuid;
        /* private data passed from kernel */
        reset_cntx->a1 = cp->a2;
+       spin_unlock(&target_vcpu->arch.reset_cntx_lock);
+
        kvm_make_request(KVM_REQ_VCPU_RESET, target_vcpu);
 
        __kvm_riscv_vcpu_power_on(target_vcpu);