riscv: Enable HAVE_ARCH_HUGE_VMAP for 64BIT
authorLiu Shixin <liushixin2@huawei.com>
Wed, 12 Oct 2022 12:00:37 +0000 (20:00 +0800)
committerPalmer Dabbelt <palmer@rivosinc.com>
Sat, 29 Oct 2022 00:10:01 +0000 (17:10 -0700)
This sets the HAVE_ARCH_HUGE_VMAP option, and defines the required page
table functions. With this feature, ioremap area will be mapped with
huge page granularity according to its actual size. This feature can be
disabled by kernel parameter "nohugeiomap".

Signed-off-by: Liu Shixin <liushixin2@huawei.com>
Reviewed-by: Björn Töpel <bjorn@kernel.org>
Tested-by: Björn Töpel <bjorn@kernel.org>
Link: https://lore.kernel.org/r/20221012120038.1034354-2-liushixin2@huawei.com
[Palmer: minor formatting]
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
Documentation/features/vm/huge-vmap/arch-support.txt
arch/riscv/Kconfig
arch/riscv/include/asm/vmalloc.h
arch/riscv/mm/Makefile
arch/riscv/mm/pgtable.c [new file with mode: 0644]

index 13b4940e0c3a7190f60e3a3fdbe54f2e938db43c..7274a4b15bcc81faa347dc91b0bac88374a18abe 100644 (file)
@@ -21,7 +21,7 @@
     |    openrisc: | TODO |
     |      parisc: | TODO |
     |     powerpc: |  ok  |
-    |       riscv: | TODO |
+    |       riscv: |  ok  |
     |        s390: | TODO |
     |          sh: | TODO |
     |       sparc: | TODO |
index 6b48a3ae984394016f8fbd3ec60bc76763baba5d..db2082dd456d64f3b957f17cce2f8a5ecbfadefa 100644 (file)
@@ -72,6 +72,7 @@ config RISCV
        select GENERIC_VDSO_TIME_NS if HAVE_GENERIC_VDSO
        select HARDIRQS_SW_RESEND
        select HAVE_ARCH_AUDITSYSCALL
+       select HAVE_ARCH_HUGE_VMAP if MMU && 64BIT && !XIP_KERNEL
        select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
        select HAVE_ARCH_JUMP_LABEL_RELATIVE if !XIP_KERNEL
        select HAVE_ARCH_KASAN if MMU && 64BIT
index ff9abc00d1394f58933cb4f90d9b773c795b0f0d..48da5371f1e9acf96a5637df7baadf2d4a67ad20 100644 (file)
@@ -1,4 +1,22 @@
 #ifndef _ASM_RISCV_VMALLOC_H
 #define _ASM_RISCV_VMALLOC_H
 
+#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
+
+#define IOREMAP_MAX_ORDER (PUD_SHIFT)
+
+#define arch_vmap_pud_supported arch_vmap_pud_supported
+static inline bool arch_vmap_pud_supported(pgprot_t prot)
+{
+       return true;
+}
+
+#define arch_vmap_pmd_supported arch_vmap_pmd_supported
+static inline bool arch_vmap_pmd_supported(pgprot_t prot)
+{
+       return true;
+}
+
+#endif
+
 #endif /* _ASM_RISCV_VMALLOC_H */
index d76aabf4b94d6a8bf4ef20eab680de4d4be16ff9..ce7f121ad2dc50c37a4f489e478c55a555fb660d 100644 (file)
@@ -13,6 +13,7 @@ obj-y += extable.o
 obj-$(CONFIG_MMU) += fault.o pageattr.o
 obj-y += cacheflush.o
 obj-y += context.o
+obj-y += pgtable.o
 
 ifeq ($(CONFIG_MMU),y)
 obj-$(CONFIG_SMP) += tlbflush.o
diff --git a/arch/riscv/mm/pgtable.c b/arch/riscv/mm/pgtable.c
new file mode 100644 (file)
index 0000000..6645ead
--- /dev/null
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <asm/pgalloc.h>
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/pgtable.h>
+
+#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
+int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
+{
+       return 0;
+}
+
+void p4d_clear_huge(p4d_t *p4d)
+{
+}
+
+int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot)
+{
+       pud_t new_pud = pfn_pud(__phys_to_pfn(phys), prot);
+
+       set_pud(pud, new_pud);
+       return 1;
+}
+
+int pud_clear_huge(pud_t *pud)
+{
+       if (!pud_leaf(READ_ONCE(*pud)))
+               return 0;
+       pud_clear(pud);
+       return 1;
+}
+
+int pud_free_pmd_page(pud_t *pud, unsigned long addr)
+{
+       pmd_t *pmd = pud_pgtable(*pud);
+       int i;
+
+       pud_clear(pud);
+
+       flush_tlb_kernel_range(addr, addr + PUD_SIZE);
+
+       for (i = 0; i < PTRS_PER_PMD; i++) {
+               if (!pmd_none(pmd[i])) {
+                       pte_t *pte = (pte_t *)pmd_page_vaddr(pmd[i]);
+
+                       pte_free_kernel(NULL, pte);
+               }
+       }
+
+       pmd_free(NULL, pmd);
+
+       return 1;
+}
+
+int pmd_set_huge(pmd_t *pmd, phys_addr_t phys, pgprot_t prot)
+{
+       pmd_t new_pmd = pfn_pmd(__phys_to_pfn(phys), prot);
+
+       set_pmd(pmd, new_pmd);
+       return 1;
+}
+
+int pmd_clear_huge(pmd_t *pmd)
+{
+       if (!pmd_leaf(READ_ONCE(*pmd)))
+               return 0;
+       pmd_clear(pmd);
+       return 1;
+}
+
+int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
+{
+       pte_t *pte = (pte_t *)pmd_page_vaddr(*pmd);
+
+       pmd_clear(pmd);
+
+       flush_tlb_kernel_range(addr, addr + PMD_SIZE);
+       pte_free_kernel(NULL, pte);
+       return 1;
+}
+
+#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */