KVM: arm64: Move host EL1 code out of hyp/ directory
authorWill Deacon <will@kernel.org>
Thu, 2 Dec 2021 17:10:48 +0000 (17:10 +0000)
committerMarc Zyngier <maz@kernel.org>
Mon, 6 Dec 2021 08:37:03 +0000 (08:37 +0000)
kvm/hyp/reserved_mem.c contains host code executing at EL1 and is not
linked into the hypervisor object. Move the file into kvm/pkvm.c and
rework the headers so that the definitions shared between the host and
the hypervisor live in asm/kvm_pkvm.h.

Signed-off-by: Will Deacon <will@kernel.org>
Tested-by: Fuad Tabba <tabba@google.com>
Reviewed-by: Fuad Tabba <tabba@google.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20211202171048.26924-4-will@kernel.org
arch/arm64/include/asm/kvm_pkvm.h [new file with mode: 0644]
arch/arm64/kvm/Makefile
arch/arm64/kvm/hyp/Makefile
arch/arm64/kvm/hyp/include/nvhe/mm.h
arch/arm64/kvm/hyp/nvhe/mem_protect.c
arch/arm64/kvm/hyp/nvhe/mm.c
arch/arm64/kvm/hyp/nvhe/setup.c
arch/arm64/kvm/hyp/reserved_mem.c [deleted file]
arch/arm64/kvm/pkvm.c [new file with mode: 0644]

diff --git a/arch/arm64/include/asm/kvm_pkvm.h b/arch/arm64/include/asm/kvm_pkvm.h
new file mode 100644 (file)
index 0000000..9f4ad2a
--- /dev/null
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 - Google LLC
+ * Author: Quentin Perret <qperret@google.com>
+ */
+#ifndef __ARM64_KVM_PKVM_H__
+#define __ARM64_KVM_PKVM_H__
+
+#include <linux/memblock.h>
+#include <asm/kvm_pgtable.h>
+
+#define HYP_MEMBLOCK_REGIONS 128
+
+extern struct memblock_region kvm_nvhe_sym(hyp_memory)[];
+extern unsigned int kvm_nvhe_sym(hyp_memblock_nr);
+
+static inline unsigned long __hyp_pgtable_max_pages(unsigned long nr_pages)
+{
+       unsigned long total = 0, i;
+
+       /* Provision the worst case scenario */
+       for (i = 0; i < KVM_PGTABLE_MAX_LEVELS; i++) {
+               nr_pages = DIV_ROUND_UP(nr_pages, PTRS_PER_PTE);
+               total += nr_pages;
+       }
+
+       return total;
+}
+
+static inline unsigned long __hyp_pgtable_total_pages(void)
+{
+       unsigned long res = 0, i;
+
+       /* Cover all of memory with page-granularity */
+       for (i = 0; i < kvm_nvhe_sym(hyp_memblock_nr); i++) {
+               struct memblock_region *reg = &kvm_nvhe_sym(hyp_memory)[i];
+               res += __hyp_pgtable_max_pages(reg->size >> PAGE_SHIFT);
+       }
+
+       return res;
+}
+
+static inline unsigned long hyp_s1_pgtable_pages(void)
+{
+       unsigned long res;
+
+       res = __hyp_pgtable_total_pages();
+
+       /* Allow 1 GiB for private mappings */
+       res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
+
+       return res;
+}
+
+static inline unsigned long host_s2_pgtable_pages(void)
+{
+       unsigned long res;
+
+       /*
+        * Include an extra 16 pages to safely upper-bound the worst case of
+        * concatenated pgds.
+        */
+       res = __hyp_pgtable_total_pages() + 16;
+
+       /* Allow 1 GiB for MMIO mappings */
+       res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
+
+       return res;
+}
+
+#endif /* __ARM64_KVM_PKVM_H__ */
index 0b561752f8d8f291c73d2a89c333378722add34c..8ca8cf6f5619acba7b6ef3c35d3c155732aa2b03 100644 (file)
@@ -15,7 +15,7 @@ kvm-y := $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o \
         arm.o mmu.o mmio.o psci.o perf.o hypercalls.o pvtime.o \
         inject_fault.o va_layout.o handle_exit.o \
         guest.o debug.o reset.o sys_regs.o \
-        vgic-sys-reg-v3.o fpsimd.o pmu.o \
+        vgic-sys-reg-v3.o fpsimd.o pmu.o pkvm.o \
         arch_timer.o trng.o\
         vgic/vgic.o vgic/vgic-init.o \
         vgic/vgic-irqfd.o vgic/vgic-v2.o \
index b726332eec49eb2a254a68c82b713d5945f7b43d..687598e41b21f68c76479aac298507af58dd7221 100644 (file)
@@ -10,4 +10,4 @@ subdir-ccflags-y := -I$(incdir)                               \
                    -DDISABLE_BRANCH_PROFILING          \
                    $(DISABLE_STACKLEAK_PLUGIN)
 
-obj-$(CONFIG_KVM) += vhe/ nvhe/ pgtable.o reserved_mem.o
+obj-$(CONFIG_KVM) += vhe/ nvhe/ pgtable.o
index c9a8f535212ea277a70d6875124a4241099d726f..ef6a58a042352c3d0b2d251a5015823873bc1a02 100644 (file)
@@ -10,9 +10,6 @@
 #include <nvhe/memory.h>
 #include <nvhe/spinlock.h>
 
-#define HYP_MEMBLOCK_REGIONS 128
-extern struct memblock_region kvm_nvhe_sym(hyp_memory)[];
-extern unsigned int kvm_nvhe_sym(hyp_memblock_nr);
 extern struct kvm_pgtable pkvm_pgtable;
 extern hyp_spinlock_t pkvm_pgd_lock;
 extern struct hyp_pool hpool;
@@ -39,58 +36,4 @@ static inline void hyp_vmemmap_range(phys_addr_t phys, unsigned long size,
        *end = ALIGN(*end, PAGE_SIZE);
 }
 
-static inline unsigned long __hyp_pgtable_max_pages(unsigned long nr_pages)
-{
-       unsigned long total = 0, i;
-
-       /* Provision the worst case scenario */
-       for (i = 0; i < KVM_PGTABLE_MAX_LEVELS; i++) {
-               nr_pages = DIV_ROUND_UP(nr_pages, PTRS_PER_PTE);
-               total += nr_pages;
-       }
-
-       return total;
-}
-
-static inline unsigned long __hyp_pgtable_total_pages(void)
-{
-       unsigned long res = 0, i;
-
-       /* Cover all of memory with page-granularity */
-       for (i = 0; i < kvm_nvhe_sym(hyp_memblock_nr); i++) {
-               struct memblock_region *reg = &kvm_nvhe_sym(hyp_memory)[i];
-               res += __hyp_pgtable_max_pages(reg->size >> PAGE_SHIFT);
-       }
-
-       return res;
-}
-
-static inline unsigned long hyp_s1_pgtable_pages(void)
-{
-       unsigned long res;
-
-       res = __hyp_pgtable_total_pages();
-
-       /* Allow 1 GiB for private mappings */
-       res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
-
-       return res;
-}
-
-static inline unsigned long host_s2_pgtable_pages(void)
-{
-       unsigned long res;
-
-       /*
-        * Include an extra 16 pages to safely upper-bound the worst case of
-        * concatenated pgds.
-        */
-       res = __hyp_pgtable_total_pages() + 16;
-
-       /* Allow 1 GiB for MMIO mappings */
-       res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
-
-       return res;
-}
-
 #endif /* __KVM_HYP_MM_H */
index c1a90dd022b8c151bf30152e7ddbfccf12676d19..92262e89672d48ee61dd99a5140885d080c92a2b 100644 (file)
@@ -9,6 +9,7 @@
 #include <asm/kvm_hyp.h>
 #include <asm/kvm_mmu.h>
 #include <asm/kvm_pgtable.h>
+#include <asm/kvm_pkvm.h>
 #include <asm/stage2_pgtable.h>
 
 #include <hyp/fault.h>
index 2fabeceb889a96e4cb15bf87328be1f4de2d323c..9e0ff5a700dddd2c9fc3934d4b901aefb4076566 100644 (file)
@@ -8,6 +8,7 @@
 #include <asm/kvm_hyp.h>
 #include <asm/kvm_mmu.h>
 #include <asm/kvm_pgtable.h>
+#include <asm/kvm_pkvm.h>
 #include <asm/spectre.h>
 
 #include <nvhe/early_alloc.h>
index 578f71798c2e0b81c358053a9f486fac33b5d374..51e68a040d8ab7c269dc732abcad32d3ae674bdf 100644 (file)
@@ -8,6 +8,7 @@
 #include <asm/kvm_hyp.h>
 #include <asm/kvm_mmu.h>
 #include <asm/kvm_pgtable.h>
+#include <asm/kvm_pkvm.h>
 
 #include <nvhe/early_alloc.h>
 #include <nvhe/fixed_config.h>
diff --git a/arch/arm64/kvm/hyp/reserved_mem.c b/arch/arm64/kvm/hyp/reserved_mem.c
deleted file mode 100644 (file)
index 578670e..0000000
+++ /dev/null
@@ -1,109 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2020 - Google LLC
- * Author: Quentin Perret <qperret@google.com>
- */
-
-#include <linux/kvm_host.h>
-#include <linux/memblock.h>
-#include <linux/sort.h>
-
-#include <asm/kvm_host.h>
-
-#include <nvhe/memory.h>
-#include <nvhe/mm.h>
-
-static struct memblock_region *hyp_memory = kvm_nvhe_sym(hyp_memory);
-static unsigned int *hyp_memblock_nr_ptr = &kvm_nvhe_sym(hyp_memblock_nr);
-
-phys_addr_t hyp_mem_base;
-phys_addr_t hyp_mem_size;
-
-static int cmp_hyp_memblock(const void *p1, const void *p2)
-{
-       const struct memblock_region *r1 = p1;
-       const struct memblock_region *r2 = p2;
-
-       return r1->base < r2->base ? -1 : (r1->base > r2->base);
-}
-
-static void __init sort_memblock_regions(void)
-{
-       sort(hyp_memory,
-            *hyp_memblock_nr_ptr,
-            sizeof(struct memblock_region),
-            cmp_hyp_memblock,
-            NULL);
-}
-
-static int __init register_memblock_regions(void)
-{
-       struct memblock_region *reg;
-
-       for_each_mem_region(reg) {
-               if (*hyp_memblock_nr_ptr >= HYP_MEMBLOCK_REGIONS)
-                       return -ENOMEM;
-
-               hyp_memory[*hyp_memblock_nr_ptr] = *reg;
-               (*hyp_memblock_nr_ptr)++;
-       }
-       sort_memblock_regions();
-
-       return 0;
-}
-
-void __init kvm_hyp_reserve(void)
-{
-       u64 nr_pages, prev, hyp_mem_pages = 0;
-       int ret;
-
-       if (!is_hyp_mode_available() || is_kernel_in_hyp_mode())
-               return;
-
-       if (kvm_get_mode() != KVM_MODE_PROTECTED)
-               return;
-
-       ret = register_memblock_regions();
-       if (ret) {
-               *hyp_memblock_nr_ptr = 0;
-               kvm_err("Failed to register hyp memblocks: %d\n", ret);
-               return;
-       }
-
-       hyp_mem_pages += hyp_s1_pgtable_pages();
-       hyp_mem_pages += host_s2_pgtable_pages();
-
-       /*
-        * The hyp_vmemmap needs to be backed by pages, but these pages
-        * themselves need to be present in the vmemmap, so compute the number
-        * of pages needed by looking for a fixed point.
-        */
-       nr_pages = 0;
-       do {
-               prev = nr_pages;
-               nr_pages = hyp_mem_pages + prev;
-               nr_pages = DIV_ROUND_UP(nr_pages * sizeof(struct hyp_page), PAGE_SIZE);
-               nr_pages += __hyp_pgtable_max_pages(nr_pages);
-       } while (nr_pages != prev);
-       hyp_mem_pages += nr_pages;
-
-       /*
-        * Try to allocate a PMD-aligned region to reduce TLB pressure once
-        * this is unmapped from the host stage-2, and fallback to PAGE_SIZE.
-        */
-       hyp_mem_size = hyp_mem_pages << PAGE_SHIFT;
-       hyp_mem_base = memblock_phys_alloc(ALIGN(hyp_mem_size, PMD_SIZE),
-                                          PMD_SIZE);
-       if (!hyp_mem_base)
-               hyp_mem_base = memblock_phys_alloc(hyp_mem_size, PAGE_SIZE);
-       else
-               hyp_mem_size = ALIGN(hyp_mem_size, PMD_SIZE);
-
-       if (!hyp_mem_base) {
-               kvm_err("Failed to reserve hyp memory\n");
-               return;
-       }
-
-       kvm_info("Reserved %lld MiB at 0x%llx\n", hyp_mem_size >> 20,
-                hyp_mem_base);
-}
diff --git a/arch/arm64/kvm/pkvm.c b/arch/arm64/kvm/pkvm.c
new file mode 100644 (file)
index 0000000..ebecb7c
--- /dev/null
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 - Google LLC
+ * Author: Quentin Perret <qperret@google.com>
+ */
+
+#include <linux/kvm_host.h>
+#include <linux/memblock.h>
+#include <linux/sort.h>
+
+#include <asm/kvm_pkvm.h>
+
+#include "hyp_constants.h"
+
+static struct memblock_region *hyp_memory = kvm_nvhe_sym(hyp_memory);
+static unsigned int *hyp_memblock_nr_ptr = &kvm_nvhe_sym(hyp_memblock_nr);
+
+phys_addr_t hyp_mem_base;
+phys_addr_t hyp_mem_size;
+
+static int cmp_hyp_memblock(const void *p1, const void *p2)
+{
+       const struct memblock_region *r1 = p1;
+       const struct memblock_region *r2 = p2;
+
+       return r1->base < r2->base ? -1 : (r1->base > r2->base);
+}
+
+static void __init sort_memblock_regions(void)
+{
+       sort(hyp_memory,
+            *hyp_memblock_nr_ptr,
+            sizeof(struct memblock_region),
+            cmp_hyp_memblock,
+            NULL);
+}
+
+static int __init register_memblock_regions(void)
+{
+       struct memblock_region *reg;
+
+       for_each_mem_region(reg) {
+               if (*hyp_memblock_nr_ptr >= HYP_MEMBLOCK_REGIONS)
+                       return -ENOMEM;
+
+               hyp_memory[*hyp_memblock_nr_ptr] = *reg;
+               (*hyp_memblock_nr_ptr)++;
+       }
+       sort_memblock_regions();
+
+       return 0;
+}
+
+void __init kvm_hyp_reserve(void)
+{
+       u64 nr_pages, prev, hyp_mem_pages = 0;
+       int ret;
+
+       if (!is_hyp_mode_available() || is_kernel_in_hyp_mode())
+               return;
+
+       if (kvm_get_mode() != KVM_MODE_PROTECTED)
+               return;
+
+       ret = register_memblock_regions();
+       if (ret) {
+               *hyp_memblock_nr_ptr = 0;
+               kvm_err("Failed to register hyp memblocks: %d\n", ret);
+               return;
+       }
+
+       hyp_mem_pages += hyp_s1_pgtable_pages();
+       hyp_mem_pages += host_s2_pgtable_pages();
+
+       /*
+        * The hyp_vmemmap needs to be backed by pages, but these pages
+        * themselves need to be present in the vmemmap, so compute the number
+        * of pages needed by looking for a fixed point.
+        */
+       nr_pages = 0;
+       do {
+               prev = nr_pages;
+               nr_pages = hyp_mem_pages + prev;
+               nr_pages = DIV_ROUND_UP(nr_pages * STRUCT_HYP_PAGE_SIZE,
+                                       PAGE_SIZE);
+               nr_pages += __hyp_pgtable_max_pages(nr_pages);
+       } while (nr_pages != prev);
+       hyp_mem_pages += nr_pages;
+
+       /*
+        * Try to allocate a PMD-aligned region to reduce TLB pressure once
+        * this is unmapped from the host stage-2, and fallback to PAGE_SIZE.
+        */
+       hyp_mem_size = hyp_mem_pages << PAGE_SHIFT;
+       hyp_mem_base = memblock_phys_alloc(ALIGN(hyp_mem_size, PMD_SIZE),
+                                          PMD_SIZE);
+       if (!hyp_mem_base)
+               hyp_mem_base = memblock_phys_alloc(hyp_mem_size, PAGE_SIZE);
+       else
+               hyp_mem_size = ALIGN(hyp_mem_size, PMD_SIZE);
+
+       if (!hyp_mem_base) {
+               kvm_err("Failed to reserve hyp memory\n");
+               return;
+       }
+
+       kvm_info("Reserved %lld MiB at 0x%llx\n", hyp_mem_size >> 20,
+                hyp_mem_base);
+}