powerpc/modules: Make module_alloc() Strict Module RWX aware
authorJordan Niethe <jniethe5@gmail.com>
Wed, 9 Jun 2021 01:34:25 +0000 (11:34 +1000)
committerMichael Ellerman <mpe@ellerman.id.au>
Mon, 21 Jun 2021 11:13:20 +0000 (21:13 +1000)
Make module_alloc() use PAGE_KERNEL protections instead of
PAGE_KERNEL_EXEX if Strict Module RWX is enabled.

Signed-off-by: Jordan Niethe <jniethe5@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20210609013431.9805-4-jniethe5@gmail.com
arch/powerpc/include/asm/mmu.h
arch/powerpc/kernel/module.c

index 998fe01dd1a85bade6ff79b2dc19ad9b4c13b16f..27016b98ecb2bc24aa82d9392330007307e0313d 100644 (file)
@@ -345,6 +345,11 @@ static inline bool strict_kernel_rwx_enabled(void)
        return false;
 }
 #endif
+
+static inline bool strict_module_rwx_enabled(void)
+{
+       return IS_ENABLED(CONFIG_STRICT_MODULE_RWX) && strict_kernel_rwx_enabled();
+}
 #endif /* !__ASSEMBLY__ */
 
 /* The kernel use the constants below to index in the page sizes array.
index 3f35c8d20be75ffeea1bda50b20f72c0f29034ca..ed04a3ba66fe8b33184be16357bb6e8d7188e161 100644 (file)
@@ -92,12 +92,14 @@ int module_finalize(const Elf_Ehdr *hdr,
 static __always_inline void *
 __module_alloc(unsigned long size, unsigned long start, unsigned long end)
 {
+       pgprot_t prot = strict_module_rwx_enabled() ? PAGE_KERNEL : PAGE_KERNEL_EXEC;
+
        /*
         * Don't do huge page allocations for modules yet until more testing
         * is done. STRICT_MODULE_RWX may require extra work to support this
         * too.
         */
-       return __vmalloc_node_range(size, 1, start, end, GFP_KERNEL, PAGE_KERNEL_EXEC,
+       return __vmalloc_node_range(size, 1, start, end, GFP_KERNEL, prot,
                                    VM_FLUSH_RESET_PERMS | VM_NO_HUGE_VMAP,
                                    NUMA_NO_NODE, __builtin_return_address(0));
 }