RISC-V: KVM: Reorganize SBI code by moving SBI v0.1 to its own file
authorAtish Patra <atish.patra@wdc.com>
Thu, 18 Nov 2021 08:39:09 +0000 (00:39 -0800)
committerAnup Patel <anup@brainfault.org>
Thu, 6 Jan 2022 09:27:16 +0000 (14:57 +0530)
With SBI v0.2, there may be more SBI extensions in future. It makes more
sense to group related extensions in separate files. Guest kernel will
choose appropriate SBI version dynamically.

Move the existing implementation to a separate file so that it can be
removed in future without much conflict.

Signed-off-by: Atish Patra <atish.patra@wdc.com>
Signed-off-by: Atish Patra <atishp@rivosinc.com>
Signed-off-by: Anup Patel <anup.patel@wdc.com>
arch/riscv/include/asm/kvm_vcpu_sbi.h
arch/riscv/kvm/Makefile
arch/riscv/kvm/vcpu_sbi.c
arch/riscv/kvm/vcpu_sbi_v01.c [new file with mode: 0644]

index 1a4cb0db2d0b9541b1e04da1b2a313474395b3ad..704151969ceb1be6330bf81a46d1ba0a2e633db2 100644 (file)
@@ -25,5 +25,7 @@ struct kvm_vcpu_sbi_extension {
                       bool *exit);
 };
 
+void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run);
 const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(unsigned long extid);
+
 #endif /* __RISCV_KVM_VCPU_SBI_H__ */
index 3005902253487160bb85b87c039c8bcfeb2ac89c..892c60b07823da4aa846f236d26c67d023f4d67f 100644 (file)
@@ -19,4 +19,5 @@ kvm-y += vcpu_exit.o
 kvm-y += vcpu_fp.o
 kvm-y += vcpu_switch.o
 kvm-y += vcpu_sbi.o
+kvm-$(CONFIG_RISCV_SBI_V01) += vcpu_sbi_v01.o
 kvm-y += vcpu_timer.o
index 8c5b50d2b27e8a8a98d13279b2aa8996b48f27cb..a8e0191cd9fcc484d1e5c3844e6f577b32254288 100644 (file)
@@ -9,9 +9,7 @@
 #include <linux/errno.h>
 #include <linux/err.h>
 #include <linux/kvm_host.h>
-#include <asm/csr.h>
 #include <asm/sbi.h>
-#include <asm/kvm_vcpu_timer.h>
 #include <asm/kvm_vcpu_sbi.h>
 
 static int kvm_linux_err_map_sbi(int err)
@@ -32,8 +30,21 @@ static int kvm_linux_err_map_sbi(int err)
        };
 }
 
-static void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu,
-                                      struct kvm_run *run)
+#ifdef CONFIG_RISCV_SBI_V01
+extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01;
+#else
+static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01 = {
+       .extid_start = -1UL,
+       .extid_end = -1UL,
+       .handler = NULL,
+};
+#endif
+
+static const struct kvm_vcpu_sbi_extension *sbi_ext[] = {
+       &vcpu_sbi_ext_v01,
+};
+
+void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run)
 {
        struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
 
@@ -71,123 +82,6 @@ int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
        return 0;
 }
 
-#ifdef CONFIG_RISCV_SBI_V01
-
-static void kvm_sbi_system_shutdown(struct kvm_vcpu *vcpu,
-                                   struct kvm_run *run, u32 type)
-{
-       unsigned long i;
-       struct kvm_vcpu *tmp;
-
-       kvm_for_each_vcpu(i, tmp, vcpu->kvm)
-               tmp->arch.power_off = true;
-       kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
-
-       memset(&run->system_event, 0, sizeof(run->system_event));
-       run->system_event.type = type;
-       run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
-}
-
-static int kvm_sbi_ext_v01_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
-                                     unsigned long *out_val,
-                                     struct kvm_cpu_trap *utrap,
-                                     bool *exit)
-{
-       ulong hmask;
-       int i, ret = 0;
-       u64 next_cycle;
-       struct kvm_vcpu *rvcpu;
-       struct cpumask cm, hm;
-       struct kvm *kvm = vcpu->kvm;
-       struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
-
-       switch (cp->a7) {
-       case SBI_EXT_0_1_CONSOLE_GETCHAR:
-       case SBI_EXT_0_1_CONSOLE_PUTCHAR:
-               /*
-                * The CONSOLE_GETCHAR/CONSOLE_PUTCHAR SBI calls cannot be
-                * handled in kernel so we forward these to user-space
-                */
-               kvm_riscv_vcpu_sbi_forward(vcpu, run);
-               *exit = true;
-               break;
-       case SBI_EXT_0_1_SET_TIMER:
-#if __riscv_xlen == 32
-               next_cycle = ((u64)cp->a1 << 32) | (u64)cp->a0;
-#else
-               next_cycle = (u64)cp->a0;
-#endif
-               ret = kvm_riscv_vcpu_timer_next_event(vcpu, next_cycle);
-               break;
-       case SBI_EXT_0_1_CLEAR_IPI:
-               ret = kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_SOFT);
-               break;
-       case SBI_EXT_0_1_SEND_IPI:
-               if (cp->a0)
-                       hmask = kvm_riscv_vcpu_unpriv_read(vcpu, false, cp->a0,
-                                                          utrap);
-               else
-                       hmask = (1UL << atomic_read(&kvm->online_vcpus)) - 1;
-               if (utrap->scause)
-                       break;
-
-               for_each_set_bit(i, &hmask, BITS_PER_LONG) {
-                       rvcpu = kvm_get_vcpu_by_id(vcpu->kvm, i);
-                       ret = kvm_riscv_vcpu_set_interrupt(rvcpu, IRQ_VS_SOFT);
-                       if (ret < 0)
-                               break;
-               }
-               break;
-       case SBI_EXT_0_1_SHUTDOWN:
-               kvm_sbi_system_shutdown(vcpu, run, KVM_SYSTEM_EVENT_SHUTDOWN);
-               *exit = true;
-               break;
-       case SBI_EXT_0_1_REMOTE_FENCE_I:
-       case SBI_EXT_0_1_REMOTE_SFENCE_VMA:
-       case SBI_EXT_0_1_REMOTE_SFENCE_VMA_ASID:
-               if (cp->a0)
-                       hmask = kvm_riscv_vcpu_unpriv_read(vcpu, false, cp->a0,
-                                                          utrap);
-               else
-                       hmask = (1UL << atomic_read(&kvm->online_vcpus)) - 1;
-               if (utrap->scause)
-                       break;
-
-               cpumask_clear(&cm);
-               for_each_set_bit(i, &hmask, BITS_PER_LONG) {
-                       rvcpu = kvm_get_vcpu_by_id(vcpu->kvm, i);
-                       if (rvcpu->cpu < 0)
-                               continue;
-                       cpumask_set_cpu(rvcpu->cpu, &cm);
-               }
-               riscv_cpuid_to_hartid_mask(&cm, &hm);
-               if (cp->a7 == SBI_EXT_0_1_REMOTE_FENCE_I)
-                       ret = sbi_remote_fence_i(cpumask_bits(&hm));
-               else if (cp->a7 == SBI_EXT_0_1_REMOTE_SFENCE_VMA)
-                       ret = sbi_remote_hfence_vvma(cpumask_bits(&hm),
-                                               cp->a1, cp->a2);
-               else
-                       ret = sbi_remote_hfence_vvma_asid(cpumask_bits(&hm),
-                                               cp->a1, cp->a2, cp->a3);
-               break;
-       default:
-               ret = -EINVAL;
-               break;
-       }
-
-       return ret;
-}
-
-const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01 = {
-       .extid_start = SBI_EXT_0_1_SET_TIMER,
-       .extid_end = SBI_EXT_0_1_SHUTDOWN,
-       .handler = kvm_sbi_ext_v01_handler,
-};
-
-static const struct kvm_vcpu_sbi_extension *sbi_ext[] = {
-       &vcpu_sbi_ext_v01,
-};
-
 const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(unsigned long extid)
 {
        int i = 0;
@@ -214,9 +108,11 @@ int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run)
 
        sbi_ext = kvm_vcpu_sbi_find_ext(cp->a7);
        if (sbi_ext && sbi_ext->handler) {
+#ifdef CONFIG_RISCV_SBI_V01
                if (cp->a7 >= SBI_EXT_0_1_SET_TIMER &&
                    cp->a7 <= SBI_EXT_0_1_SHUTDOWN)
                        ext_is_v01 = true;
+#endif
                ret = sbi_ext->handler(vcpu, run, &out_val, &utrap, &userspace_exit);
        } else {
                /* Return error for unsupported SBI calls */
@@ -256,13 +152,3 @@ ecall_done:
 
        return ret;
 }
-
-#else
-
-int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run)
-{
-       kvm_riscv_vcpu_sbi_forward(vcpu, run);
-       return 0;
-}
-
-#endif
diff --git a/arch/riscv/kvm/vcpu_sbi_v01.c b/arch/riscv/kvm/vcpu_sbi_v01.c
new file mode 100644 (file)
index 0000000..4c7e13e
--- /dev/null
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2021 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ *     Atish Patra <atish.patra@wdc.com>
+ */
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/kvm_host.h>
+#include <asm/csr.h>
+#include <asm/sbi.h>
+#include <asm/kvm_vcpu_timer.h>
+#include <asm/kvm_vcpu_sbi.h>
+
+static void kvm_sbi_system_shutdown(struct kvm_vcpu *vcpu,
+                                   struct kvm_run *run, u32 type)
+{
+       unsigned long i;
+       struct kvm_vcpu *tmp;
+
+       kvm_for_each_vcpu(i, tmp, vcpu->kvm)
+               tmp->arch.power_off = true;
+       kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
+
+       memset(&run->system_event, 0, sizeof(run->system_event));
+       run->system_event.type = type;
+       run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
+}
+
+static int kvm_sbi_ext_v01_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
+                                     unsigned long *out_val,
+                                     struct kvm_cpu_trap *utrap,
+                                     bool *exit)
+{
+       ulong hmask;
+       int i, ret = 0;
+       u64 next_cycle;
+       struct kvm_vcpu *rvcpu;
+       struct cpumask cm, hm;
+       struct kvm *kvm = vcpu->kvm;
+       struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
+
+       switch (cp->a7) {
+       case SBI_EXT_0_1_CONSOLE_GETCHAR:
+       case SBI_EXT_0_1_CONSOLE_PUTCHAR:
+               /*
+                * The CONSOLE_GETCHAR/CONSOLE_PUTCHAR SBI calls cannot be
+                * handled in kernel so we forward these to user-space
+                */
+               kvm_riscv_vcpu_sbi_forward(vcpu, run);
+               *exit = true;
+               break;
+       case SBI_EXT_0_1_SET_TIMER:
+#if __riscv_xlen == 32
+               next_cycle = ((u64)cp->a1 << 32) | (u64)cp->a0;
+#else
+               next_cycle = (u64)cp->a0;
+#endif
+               ret = kvm_riscv_vcpu_timer_next_event(vcpu, next_cycle);
+               break;
+       case SBI_EXT_0_1_CLEAR_IPI:
+               ret = kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_SOFT);
+               break;
+       case SBI_EXT_0_1_SEND_IPI:
+               if (cp->a0)
+                       hmask = kvm_riscv_vcpu_unpriv_read(vcpu, false, cp->a0,
+                                                          utrap);
+               else
+                       hmask = (1UL << atomic_read(&kvm->online_vcpus)) - 1;
+               if (utrap->scause)
+                       break;
+
+               for_each_set_bit(i, &hmask, BITS_PER_LONG) {
+                       rvcpu = kvm_get_vcpu_by_id(vcpu->kvm, i);
+                       ret = kvm_riscv_vcpu_set_interrupt(rvcpu, IRQ_VS_SOFT);
+                       if (ret < 0)
+                               break;
+               }
+               break;
+       case SBI_EXT_0_1_SHUTDOWN:
+               kvm_sbi_system_shutdown(vcpu, run, KVM_SYSTEM_EVENT_SHUTDOWN);
+               *exit = true;
+               break;
+       case SBI_EXT_0_1_REMOTE_FENCE_I:
+       case SBI_EXT_0_1_REMOTE_SFENCE_VMA:
+       case SBI_EXT_0_1_REMOTE_SFENCE_VMA_ASID:
+               if (cp->a0)
+                       hmask = kvm_riscv_vcpu_unpriv_read(vcpu, false, cp->a0,
+                                                          utrap);
+               else
+                       hmask = (1UL << atomic_read(&kvm->online_vcpus)) - 1;
+               if (utrap->scause)
+                       break;
+
+               cpumask_clear(&cm);
+               for_each_set_bit(i, &hmask, BITS_PER_LONG) {
+                       rvcpu = kvm_get_vcpu_by_id(vcpu->kvm, i);
+                       if (rvcpu->cpu < 0)
+                               continue;
+                       cpumask_set_cpu(rvcpu->cpu, &cm);
+               }
+               riscv_cpuid_to_hartid_mask(&cm, &hm);
+               if (cp->a7 == SBI_EXT_0_1_REMOTE_FENCE_I)
+                       ret = sbi_remote_fence_i(cpumask_bits(&hm));
+               else if (cp->a7 == SBI_EXT_0_1_REMOTE_SFENCE_VMA)
+                       ret = sbi_remote_hfence_vvma(cpumask_bits(&hm),
+                                               cp->a1, cp->a2);
+               else
+                       ret = sbi_remote_hfence_vvma_asid(cpumask_bits(&hm),
+                                               cp->a1, cp->a2, cp->a3);
+               break;
+       default:
+               ret = -EINVAL;
+               break;
+       };
+
+       return ret;
+}
+
+const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01 = {
+       .extid_start = SBI_EXT_0_1_SET_TIMER,
+       .extid_end = SBI_EXT_0_1_SHUTDOWN,
+       .handler = kvm_sbi_ext_v01_handler,
+};