#endif
 extern unsigned long va_kernel_xip_pa_offset;
 extern unsigned long pfn_base;
+extern uintptr_t load_sz;
 #define ARCH_PFN_OFFSET                (pfn_base)
 #else
 #define va_pa_offset           0
 extern unsigned long kernel_virt_addr;
 
 #ifdef CONFIG_64BIT
+#define is_kernel_mapping(x)   \
+       ((x) >= kernel_virt_addr && (x) < (kernel_virt_addr + load_sz))
+#define is_linear_mapping(x)   \
+       ((x) >= PAGE_OFFSET && (x) < kernel_virt_addr)
+
 #define linear_mapping_pa_to_va(x)     ((void *)((unsigned long)(x) + va_pa_offset))
 #define kernel_mapping_pa_to_va(y)     ({                                              \
        unsigned long _y = y;                                                           \
 
 #define __va_to_pa_nodebug(x)  ({                                              \
        unsigned long _x = x;                                                   \
-       (_x < kernel_virt_addr) ?                                               \
+       is_linear_mapping(_x) ?                                                 \
                linear_mapping_va_to_pa(_x) : kernel_mapping_va_to_pa(_x);      \
        })
 #else
+#define is_kernel_mapping(x)   \
+       ((x) >= kernel_virt_addr && (x) < (kernel_virt_addr + load_sz))
+#define is_linear_mapping(x)   \
+       ((x) >= PAGE_OFFSET)
+
 #define __pa_to_va_nodebug(x)  ((void *)((unsigned long) (x) + va_pa_offset))
 #define __va_to_pa_nodebug(x)  ((unsigned long)(x) - va_pa_offset)
 #endif /* CONFIG_64BIT */
 
 int set_memory_x(unsigned long addr, int numpages);
 int set_memory_nx(unsigned long addr, int numpages);
 int set_memory_rw_nx(unsigned long addr, int numpages);
-void protect_kernel_text_data(void);
 static __always_inline int set_kernel_memory(char *startp, char *endp,
                                             int (*set_memory)(unsigned long start,
                                                               int num_pages))
 static inline int set_memory_rw(unsigned long addr, int numpages) { return 0; }
 static inline int set_memory_x(unsigned long addr, int numpages) { return 0; }
 static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; }
-static inline void protect_kernel_text_data(void) {}
 static inline int set_memory_rw_nx(unsigned long addr, int numpages) { return 0; }
 static inline int set_kernel_memory(char *startp, char *endp,
                                    int (*set_memory)(unsigned long start,
 }
 #endif
 
-#if defined(CONFIG_64BIT) && defined(CONFIG_STRICT_KERNEL_RWX)
-void protect_kernel_linear_mapping_text_rodata(void);
-#else
-static inline void protect_kernel_linear_mapping_text_rodata(void) {}
-#endif
-
 int set_direct_map_invalid_noflush(struct page *page);
 int set_direct_map_default_noflush(struct page *page);
 bool kernel_page_present(struct page *page);
 
 }
 #endif
 
+#ifdef CONFIG_STRICT_KERNEL_RWX
+static __init pgprot_t pgprot_from_va(uintptr_t va)
+{
+       if (is_va_kernel_text(va))
+               return PAGE_KERNEL_READ_EXEC;
+
+       /*
+        * In 64-bit kernel, the kernel mapping is outside the linear mapping so
+        * we must protect its linear mapping alias from being executed and
+        * written.
+        * And rodata section is marked readonly in mark_rodata_ro.
+        */
+       if (IS_ENABLED(CONFIG_64BIT) && is_va_kernel_lm_alias_text(va))
+               return PAGE_KERNEL_READ;
+
+       return PAGE_KERNEL;
+}
+
+void mark_rodata_ro(void)
+{
+       set_kernel_memory(__start_rodata, _data, set_memory_ro);
+       if (IS_ENABLED(CONFIG_64BIT))
+               set_kernel_memory(lm_alias(__start_rodata), lm_alias(_data),
+                                 set_memory_ro);
+
+       debug_checkwx();
+}
+#else
+static __init pgprot_t pgprot_from_va(uintptr_t va)
+{
+       if (IS_ENABLED(CONFIG_64BIT) && !is_kernel_mapping(va))
+               return PAGE_KERNEL;
+
+       return PAGE_KERNEL_EXEC;
+}
+#endif /* CONFIG_STRICT_KERNEL_RWX */
+
 /*
  * setup_vm() is called from head.S with MMU-off.
  *
 #error "setup_vm() is called from head.S before relocate so it should not use absolute addressing."
 #endif
 
-uintptr_t load_pa, load_sz;
+static uintptr_t load_pa __initdata;
+uintptr_t load_sz;
 #ifdef CONFIG_XIP_KERNEL
 #define load_pa        (*((uintptr_t *)XIP_FIXUP(&load_pa)))
 #define load_sz        (*((uintptr_t *)XIP_FIXUP(&load_sz)))
 #define xiprom_sz      (*((uintptr_t *)XIP_FIXUP(&xiprom_sz)))
 #define xiprom         (*((uintptr_t *)XIP_FIXUP(&xiprom)))
 
-static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size)
+static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size,
+                                           __always_unused bool early)
 {
        uintptr_t va, end_va;
 
                                   map_size, PAGE_KERNEL);
 }
 #else
-static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size)
+static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size,
+                                           bool early)
 {
        uintptr_t va, end_va;
 
        for (va = kernel_virt_addr; va < end_va; va += map_size)
                create_pgd_mapping(pgdir, va,
                                   load_pa + (va - kernel_virt_addr),
-                                  map_size, PAGE_KERNEL_EXEC);
+                                  map_size,
+                                  early ?
+                                       PAGE_KERNEL_EXEC : pgprot_from_va(va));
 }
 #endif
 
         * us to reach paging_init(). We map all memory banks later
         * in setup_vm_final() below.
         */
-       create_kernel_page_table(early_pg_dir, map_size);
+       create_kernel_page_table(early_pg_dir, map_size, true);
 
 #ifndef __PAGETABLE_PMD_FOLDED
        /* Setup early PMD for DTB */
 #endif
 }
 
-#if defined(CONFIG_64BIT) && defined(CONFIG_STRICT_KERNEL_RWX)
-void protect_kernel_linear_mapping_text_rodata(void)
-{
-       unsigned long text_start = (unsigned long)lm_alias(_start);
-       unsigned long init_text_start = (unsigned long)lm_alias(__init_text_begin);
-       unsigned long rodata_start = (unsigned long)lm_alias(__start_rodata);
-       unsigned long data_start = (unsigned long)lm_alias(_data);
-
-       set_memory_ro(text_start, (init_text_start - text_start) >> PAGE_SHIFT);
-       set_memory_nx(text_start, (init_text_start - text_start) >> PAGE_SHIFT);
-
-       set_memory_ro(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
-       set_memory_nx(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
-}
-#endif
-
 static void __init setup_vm_final(void)
 {
        uintptr_t va, map_size;
                map_size = best_map_size(start, end - start);
                for (pa = start; pa < end; pa += map_size) {
                        va = (uintptr_t)__va(pa);
-                       create_pgd_mapping(swapper_pg_dir, va, pa,
-                                          map_size,
-#ifdef CONFIG_64BIT
-                                          PAGE_KERNEL
-#else
-                                          PAGE_KERNEL_EXEC
-#endif
-                                       );
 
+                       create_pgd_mapping(swapper_pg_dir, va, pa, map_size,
+                                          pgprot_from_va(va));
                }
        }
 
 #ifdef CONFIG_64BIT
        /* Map the kernel */
-       create_kernel_page_table(swapper_pg_dir, PMD_SIZE);
+       create_kernel_page_table(swapper_pg_dir, PMD_SIZE, false);
 #endif
 
        /* Clear fixmap PTE and PMD mappings */
 }
 #endif /* CONFIG_MMU */
 
-#ifdef CONFIG_STRICT_KERNEL_RWX
-void __init protect_kernel_text_data(void)
-{
-       unsigned long text_start = (unsigned long)_start;
-       unsigned long init_text_start = (unsigned long)__init_text_begin;
-       unsigned long init_data_start = (unsigned long)__init_data_begin;
-       unsigned long rodata_start = (unsigned long)__start_rodata;
-       unsigned long data_start = (unsigned long)_data;
-#if defined(CONFIG_64BIT) && defined(CONFIG_MMU)
-       unsigned long end_va = kernel_virt_addr + load_sz;
-#else
-       unsigned long end_va = (unsigned long)(__va(PFN_PHYS(max_low_pfn)));
-#endif
-
-       set_memory_ro(text_start, (init_text_start - text_start) >> PAGE_SHIFT);
-       set_memory_ro(init_text_start, (init_data_start - init_text_start) >> PAGE_SHIFT);
-       set_memory_nx(init_data_start, (rodata_start - init_data_start) >> PAGE_SHIFT);
-       /* rodata section is marked readonly in mark_rodata_ro */
-       set_memory_nx(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
-       set_memory_nx(data_start, (end_va - data_start) >> PAGE_SHIFT);
-}
-
-void mark_rodata_ro(void)
-{
-       unsigned long rodata_start = (unsigned long)__start_rodata;
-       unsigned long data_start = (unsigned long)_data;
-
-       set_memory_ro(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
-
-       debug_checkwx();
-}
-#endif
-
 #ifdef CONFIG_KEXEC_CORE
 /*
  * reserve_crashkernel() - reserves memory for crash kernel