RISC-V: KVM: Initial skeletal support for AIA
authorAnup Patel <apatel@ventanamicro.com>
Tue, 10 Jan 2023 11:14:25 +0000 (16:44 +0530)
committerAnup Patel <anup@brainfault.org>
Fri, 21 Apr 2023 12:15:48 +0000 (17:45 +0530)
To incrementally implement AIA support, we first add minimal skeletal
support which only compiles and detects AIA hardware support at the
boot-time but does not provide any functionality.

Signed-off-by: Anup Patel <apatel@ventanamicro.com>
Reviewed-by: Atish Patra <atishp@rivosinc.com>
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
Signed-off-by: Anup Patel <anup@brainfault.org>
arch/riscv/include/asm/hwcap.h
arch/riscv/include/asm/kvm_aia.h [new file with mode: 0644]
arch/riscv/include/asm/kvm_host.h
arch/riscv/kvm/Makefile
arch/riscv/kvm/aia.c [new file with mode: 0644]
arch/riscv/kvm/main.c
arch/riscv/kvm/vcpu.c
arch/riscv/kvm/vcpu_insn.c
arch/riscv/kvm/vm.c

index 74f5dab2148f93a4d4ec2789a0e9cc9be04681f8..ab2abf5615208689ac8c684d584e6b815baef26d 100644 (file)
 #define RISCV_ISA_EXT_MAX              64
 #define RISCV_ISA_EXT_NAME_LEN_MAX     32
 
+#ifdef CONFIG_RISCV_M_MODE
+#define RISCV_ISA_EXT_SxAIA            RISCV_ISA_EXT_SMAIA
+#else
+#define RISCV_ISA_EXT_SxAIA            RISCV_ISA_EXT_SSAIA
+#endif
+
 #ifndef __ASSEMBLY__
 
 #include <linux/jump_label.h>
diff --git a/arch/riscv/include/asm/kvm_aia.h b/arch/riscv/include/asm/kvm_aia.h
new file mode 100644 (file)
index 0000000..258a835
--- /dev/null
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (C) 2022 Ventana Micro Systems Inc.
+ *
+ * Authors:
+ *     Anup Patel <apatel@ventanamicro.com>
+ */
+
+#ifndef __KVM_RISCV_AIA_H
+#define __KVM_RISCV_AIA_H
+
+#include <linux/jump_label.h>
+#include <linux/kvm_types.h>
+
+struct kvm_aia {
+       /* In-kernel irqchip created */
+       bool            in_kernel;
+
+       /* In-kernel irqchip initialized */
+       bool            initialized;
+};
+
+struct kvm_vcpu_aia {
+};
+
+#define kvm_riscv_aia_initialized(k)   ((k)->arch.aia.initialized)
+
+#define irqchip_in_kernel(k)           ((k)->arch.aia.in_kernel)
+
+DECLARE_STATIC_KEY_FALSE(kvm_riscv_aia_available);
+#define kvm_riscv_aia_available() \
+       static_branch_unlikely(&kvm_riscv_aia_available)
+
+static inline void kvm_riscv_vcpu_aia_flush_interrupts(struct kvm_vcpu *vcpu)
+{
+}
+
+static inline void kvm_riscv_vcpu_aia_sync_interrupts(struct kvm_vcpu *vcpu)
+{
+}
+
+static inline bool kvm_riscv_vcpu_aia_has_interrupts(struct kvm_vcpu *vcpu,
+                                                    u64 mask)
+{
+       return false;
+}
+
+static inline void kvm_riscv_vcpu_aia_update_hvip(struct kvm_vcpu *vcpu)
+{
+}
+
+static inline void kvm_riscv_vcpu_aia_load(struct kvm_vcpu *vcpu, int cpu)
+{
+}
+
+static inline void kvm_riscv_vcpu_aia_put(struct kvm_vcpu *vcpu)
+{
+}
+
+static inline int kvm_riscv_vcpu_aia_get_csr(struct kvm_vcpu *vcpu,
+                                            unsigned long reg_num,
+                                            unsigned long *out_val)
+{
+       *out_val = 0;
+       return 0;
+}
+
+static inline int kvm_riscv_vcpu_aia_set_csr(struct kvm_vcpu *vcpu,
+                                            unsigned long reg_num,
+                                            unsigned long val)
+{
+       return 0;
+}
+
+#define KVM_RISCV_VCPU_AIA_CSR_FUNCS
+
+static inline int kvm_riscv_vcpu_aia_update(struct kvm_vcpu *vcpu)
+{
+       return 1;
+}
+
+static inline void kvm_riscv_vcpu_aia_reset(struct kvm_vcpu *vcpu)
+{
+}
+
+static inline int kvm_riscv_vcpu_aia_init(struct kvm_vcpu *vcpu)
+{
+       return 0;
+}
+
+static inline void kvm_riscv_vcpu_aia_deinit(struct kvm_vcpu *vcpu)
+{
+}
+
+static inline void kvm_riscv_aia_init_vm(struct kvm *kvm)
+{
+}
+
+static inline void kvm_riscv_aia_destroy_vm(struct kvm *kvm)
+{
+}
+
+void kvm_riscv_aia_enable(void);
+void kvm_riscv_aia_disable(void);
+int kvm_riscv_aia_init(void);
+void kvm_riscv_aia_exit(void);
+
+#endif
index cc7da66ee0c0be982d0a880b7180c149c7414486..3157cf748df1674474614ba7b71bfa7f42f81e7a 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/kvm_types.h>
 #include <linux/spinlock.h>
 #include <asm/hwcap.h>
+#include <asm/kvm_aia.h>
 #include <asm/kvm_vcpu_fp.h>
 #include <asm/kvm_vcpu_insn.h>
 #include <asm/kvm_vcpu_sbi.h>
@@ -94,6 +95,9 @@ struct kvm_arch {
 
        /* Guest Timer */
        struct kvm_guest_timer timer;
+
+       /* AIA Guest/VM context */
+       struct kvm_aia aia;
 };
 
 struct kvm_cpu_trap {
@@ -221,6 +225,9 @@ struct kvm_vcpu_arch {
        /* SBI context */
        struct kvm_vcpu_sbi_context sbi_context;
 
+       /* AIA VCPU context */
+       struct kvm_vcpu_aia aia_context;
+
        /* Cache pages needed to program page tables with spinlock held */
        struct kvm_mmu_memory_cache mmu_page_cache;
 
index 278e97c06e0a7f28fa0fb6faa4b1ed7127365732..8031b8912a0d0bb8bc3a640f0d8ec43fcd39cdf5 100644 (file)
@@ -26,3 +26,4 @@ kvm-y += vcpu_sbi_replace.o
 kvm-y += vcpu_sbi_hsm.o
 kvm-y += vcpu_timer.o
 kvm-$(CONFIG_RISCV_PMU_SBI) += vcpu_pmu.o vcpu_sbi_pmu.o
+kvm-y += aia.o
diff --git a/arch/riscv/kvm/aia.c b/arch/riscv/kvm/aia.c
new file mode 100644 (file)
index 0000000..7a63333
--- /dev/null
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (C) 2022 Ventana Micro Systems Inc.
+ *
+ * Authors:
+ *     Anup Patel <apatel@ventanamicro.com>
+ */
+
+#include <linux/kvm_host.h>
+#include <asm/hwcap.h>
+
+DEFINE_STATIC_KEY_FALSE(kvm_riscv_aia_available);
+
+static void aia_set_hvictl(bool ext_irq_pending)
+{
+       unsigned long hvictl;
+
+       /*
+        * HVICTL.IID == 9 and HVICTL.IPRIO == 0 represents
+        * no interrupt in HVICTL.
+        */
+
+       hvictl = (IRQ_S_EXT << HVICTL_IID_SHIFT) & HVICTL_IID;
+       hvictl |= ext_irq_pending;
+       csr_write(CSR_HVICTL, hvictl);
+}
+
+void kvm_riscv_aia_enable(void)
+{
+       if (!kvm_riscv_aia_available())
+               return;
+
+       aia_set_hvictl(false);
+       csr_write(CSR_HVIPRIO1, 0x0);
+       csr_write(CSR_HVIPRIO2, 0x0);
+#ifdef CONFIG_32BIT
+       csr_write(CSR_HVIPH, 0x0);
+       csr_write(CSR_HIDELEGH, 0x0);
+       csr_write(CSR_HVIPRIO1H, 0x0);
+       csr_write(CSR_HVIPRIO2H, 0x0);
+#endif
+}
+
+void kvm_riscv_aia_disable(void)
+{
+       if (!kvm_riscv_aia_available())
+               return;
+
+       aia_set_hvictl(false);
+}
+
+int kvm_riscv_aia_init(void)
+{
+       if (!riscv_isa_extension_available(NULL, SxAIA))
+               return -ENODEV;
+
+       /* Enable KVM AIA support */
+       static_branch_enable(&kvm_riscv_aia_available);
+
+       return 0;
+}
+
+void kvm_riscv_aia_exit(void)
+{
+}
index 41ad7639a17bfca59b39cafd0248b55d3772885c..6396352b4e4db2a1d229f966fd1955b311e4b1df 100644 (file)
@@ -44,11 +44,15 @@ int kvm_arch_hardware_enable(void)
 
        csr_write(CSR_HVIP, 0);
 
+       kvm_riscv_aia_enable();
+
        return 0;
 }
 
 void kvm_arch_hardware_disable(void)
 {
+       kvm_riscv_aia_disable();
+
        /*
         * After clearing the hideleg CSR, the host kernel will receive
         * spurious interrupts if hvip CSR has pending interrupts and the
@@ -63,6 +67,7 @@ void kvm_arch_hardware_disable(void)
 
 static int __init riscv_kvm_init(void)
 {
+       int rc;
        const char *str;
 
        if (!riscv_isa_extension_available(NULL, h)) {
@@ -84,6 +89,10 @@ static int __init riscv_kvm_init(void)
 
        kvm_riscv_gstage_vmid_detect();
 
+       rc = kvm_riscv_aia_init();
+       if (rc && rc != -ENODEV)
+               return rc;
+
        kvm_info("hypervisor extension available\n");
 
        switch (kvm_riscv_gstage_mode()) {
@@ -106,12 +115,23 @@ static int __init riscv_kvm_init(void)
 
        kvm_info("VMID %ld bits available\n", kvm_riscv_gstage_vmid_bits());
 
-       return kvm_init(sizeof(struct kvm_vcpu), 0, THIS_MODULE);
+       if (kvm_riscv_aia_available())
+               kvm_info("AIA available\n");
+
+       rc = kvm_init(sizeof(struct kvm_vcpu), 0, THIS_MODULE);
+       if (rc) {
+               kvm_riscv_aia_exit();
+               return rc;
+       }
+
+       return 0;
 }
 module_init(riscv_kvm_init);
 
 static void __exit riscv_kvm_exit(void)
 {
+       kvm_riscv_aia_exit();
+
        kvm_exit();
 }
 module_exit(riscv_kvm_exit);
index 02b49cb9456198d351ed2b15a0f24c418b689ece..b46e9cc929389d6e159d379f7d6abbf614194e41 100644 (file)
@@ -137,6 +137,8 @@ static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
 
        kvm_riscv_vcpu_timer_reset(vcpu);
 
+       kvm_riscv_vcpu_aia_reset(vcpu);
+
        WRITE_ONCE(vcpu->arch.irqs_pending, 0);
        WRITE_ONCE(vcpu->arch.irqs_pending_mask, 0);
 
@@ -159,6 +161,7 @@ int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
 
 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
 {
+       int rc;
        struct kvm_cpu_context *cntx;
        struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr;
        unsigned long host_isa, i;
@@ -201,6 +204,11 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
        /* setup performance monitoring */
        kvm_riscv_vcpu_pmu_init(vcpu);
 
+       /* Setup VCPU AIA */
+       rc = kvm_riscv_vcpu_aia_init(vcpu);
+       if (rc)
+               return rc;
+
        /* Reset VCPU */
        kvm_riscv_reset_vcpu(vcpu);
 
@@ -220,6 +228,9 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
 
 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
 {
+       /* Cleanup VCPU AIA context */
+       kvm_riscv_vcpu_aia_deinit(vcpu);
+
        /* Cleanup VCPU timer */
        kvm_riscv_vcpu_timer_deinit(vcpu);
 
@@ -741,6 +752,9 @@ void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu *vcpu)
                csr->hvip &= ~mask;
                csr->hvip |= val;
        }
+
+       /* Flush AIA high interrupts */
+       kvm_riscv_vcpu_aia_flush_interrupts(vcpu);
 }
 
 void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu)
@@ -766,6 +780,9 @@ void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu)
                }
        }
 
+       /* Sync-up AIA high interrupts */
+       kvm_riscv_vcpu_aia_sync_interrupts(vcpu);
+
        /* Sync-up timer CSRs */
        kvm_riscv_vcpu_timer_sync(vcpu);
 }
@@ -802,10 +819,15 @@ int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq)
 
 bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, unsigned long mask)
 {
-       unsigned long ie = ((vcpu->arch.guest_csr.vsie & VSIP_VALID_MASK)
-                           << VSIP_TO_HVIP_SHIFT) & mask;
+       unsigned long ie;
+
+       ie = ((vcpu->arch.guest_csr.vsie & VSIP_VALID_MASK)
+               << VSIP_TO_HVIP_SHIFT) & mask;
+       if (READ_ONCE(vcpu->arch.irqs_pending) & ie)
+               return true;
 
-       return (READ_ONCE(vcpu->arch.irqs_pending) & ie) ? true : false;
+       /* Check AIA high interrupts */
+       return kvm_riscv_vcpu_aia_has_interrupts(vcpu, mask);
 }
 
 void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu)
@@ -901,6 +923,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
        kvm_riscv_vcpu_guest_fp_restore(&vcpu->arch.guest_context,
                                        vcpu->arch.isa);
 
+       kvm_riscv_vcpu_aia_load(vcpu, cpu);
+
        vcpu->cpu = cpu;
 }
 
@@ -910,6 +934,8 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 
        vcpu->cpu = -1;
 
+       kvm_riscv_vcpu_aia_put(vcpu);
+
        kvm_riscv_vcpu_guest_fp_save(&vcpu->arch.guest_context,
                                     vcpu->arch.isa);
        kvm_riscv_vcpu_host_fp_restore(&vcpu->arch.host_context);
@@ -977,6 +1003,7 @@ static void kvm_riscv_update_hvip(struct kvm_vcpu *vcpu)
        struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
 
        csr_write(CSR_HVIP, csr->hvip);
+       kvm_riscv_vcpu_aia_update_hvip(vcpu);
 }
 
 /*
@@ -1049,6 +1076,15 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
 
                kvm_riscv_check_vcpu_requests(vcpu);
 
+               preempt_disable();
+
+               /* Update AIA HW state before entering guest */
+               ret = kvm_riscv_vcpu_aia_update(vcpu);
+               if (ret <= 0) {
+                       preempt_enable();
+                       continue;
+               }
+
                local_irq_disable();
 
                /*
@@ -1077,6 +1113,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
                    xfer_to_guest_mode_work_pending()) {
                        vcpu->mode = OUTSIDE_GUEST_MODE;
                        local_irq_enable();
+                       preempt_enable();
                        kvm_vcpu_srcu_read_lock(vcpu);
                        continue;
                }
@@ -1110,8 +1147,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
                /* Syncup interrupts state with HW */
                kvm_riscv_vcpu_sync_interrupts(vcpu);
 
-               preempt_disable();
-
                /*
                 * We must ensure that any pending interrupts are taken before
                 * we exit guest timing so that timer ticks are accounted as
index f689337b78ff9c01a51c250c777fd501dbb11ab4..7a6abed41bc170b9545662c3c9387b109ca2db3f 100644 (file)
@@ -214,6 +214,7 @@ struct csr_func {
 };
 
 static const struct csr_func csr_funcs[] = {
+       KVM_RISCV_VCPU_AIA_CSR_FUNCS
        KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS
 };
 
index 65a964d7e70d89ec988b1e9b7392abd8cdfc7317..bc03d2ddcb51b7d7cec99bfec6a2c4be112f9a7a 100644 (file)
@@ -41,6 +41,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
                return r;
        }
 
+       kvm_riscv_aia_init_vm(kvm);
+
        kvm_riscv_guest_timer_init(kvm);
 
        return 0;
@@ -49,6 +51,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 void kvm_arch_destroy_vm(struct kvm *kvm)
 {
        kvm_destroy_vcpus(kvm);
+
+       kvm_riscv_aia_destroy_vm(kvm);
 }
 
 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)