arm64: kexec: use ld script for relocation function
authorPasha Tatashin <pasha.tatashin@soleen.com>
Thu, 30 Sep 2021 14:31:08 +0000 (14:31 +0000)
committerWill Deacon <will@kernel.org>
Fri, 1 Oct 2021 12:31:00 +0000 (13:31 +0100)
Currently, relocation code declares start and end variables
which are used to compute its size.

The better way to do this is to use ld script, and put relocation
function in its own section.

Signed-off-by: Pasha Tatashin <pasha.tatashin@soleen.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Link: https://lore.kernel.org/r/20210930143113.1502553-11-pasha.tatashin@soleen.com
Signed-off-by: Will Deacon <will@kernel.org>
arch/arm64/include/asm/sections.h
arch/arm64/kernel/machine_kexec.c
arch/arm64/kernel/relocate_kernel.S
arch/arm64/kernel/vmlinux.lds.S

index e4ad9db53af1d7ad4a32c999bcbfcfdbf70a3b7b..152cb35bf9df71421d73b20e94c52fb6ad59d692 100644 (file)
@@ -21,5 +21,6 @@ extern char __exittext_begin[], __exittext_end[];
 extern char __irqentry_text_start[], __irqentry_text_end[];
 extern char __mmuoff_data_start[], __mmuoff_data_end[];
 extern char __entry_tramp_text_start[], __entry_tramp_text_end[];
+extern char __relocate_new_kernel_start[], __relocate_new_kernel_end[];
 
 #endif /* __ASM_SECTIONS_H */
index cf5d6f22a041be64224520420aaa00763c80ad0f..320442d35811a537544ced50f18e290f23fc106f 100644 (file)
 #include <asm/mmu.h>
 #include <asm/mmu_context.h>
 #include <asm/page.h>
+#include <asm/sections.h>
 #include <asm/trans_pgd.h>
 
 #include "cpu-reset.h"
 
-/* Global variables for the arm64_relocate_new_kernel routine. */
-extern const unsigned char arm64_relocate_new_kernel[];
-extern const unsigned long arm64_relocate_new_kernel_size;
-
 /**
  * kexec_image_info - For debugging output.
  */
@@ -163,6 +160,7 @@ static void *kexec_page_alloc(void *arg)
 int machine_kexec_post_load(struct kimage *kimage)
 {
        void *reloc_code = page_to_virt(kimage->control_code_page);
+       long reloc_size;
        struct trans_pgd_info info = {
                .trans_alloc_page       = kexec_page_alloc,
                .trans_alloc_arg        = kimage,
@@ -183,17 +181,15 @@ int machine_kexec_post_load(struct kimage *kimage)
                        return rc;
        }
 
-       memcpy(reloc_code, arm64_relocate_new_kernel,
-              arm64_relocate_new_kernel_size);
+       reloc_size = __relocate_new_kernel_end - __relocate_new_kernel_start;
+       memcpy(reloc_code, __relocate_new_kernel_start, reloc_size);
        kimage->arch.kern_reloc = __pa(reloc_code);
 
        /* Flush the reloc_code in preparation for its execution. */
        dcache_clean_inval_poc((unsigned long)reloc_code,
-                              (unsigned long)reloc_code +
-                              arm64_relocate_new_kernel_size);
+                              (unsigned long)reloc_code + reloc_size);
        icache_inval_pou((uintptr_t)reloc_code,
-                        (uintptr_t)reloc_code +
-                        arm64_relocate_new_kernel_size);
+                        (uintptr_t)reloc_code + reloc_size);
        kexec_list_flush(kimage);
        kexec_image_info(kimage);
 
index b4fb97312a801b3250e2fcc212d5dc62462c9d29..2227741b96fa91a7018bfd301ecad583e00eb5d6 100644 (file)
@@ -15,6 +15,7 @@
 #include <asm/sysreg.h>
 #include <asm/virt.h>
 
+.section    ".kexec_relocate.text", "ax"
 /*
  * arm64_relocate_new_kernel - Put a 2nd stage image in place and boot it.
  *
@@ -77,16 +78,3 @@ SYM_CODE_START(arm64_relocate_new_kernel)
        mov     x3, xzr
        br      x4                              /* Jumps from el1 */
 SYM_CODE_END(arm64_relocate_new_kernel)
-
-.align 3       /* To keep the 64-bit values below naturally aligned. */
-
-.Lcopy_end:
-.org   KEXEC_CONTROL_PAGE_SIZE
-
-/*
- * arm64_relocate_new_kernel_size - Number of bytes to copy to the
- * control_code_page.
- */
-.globl arm64_relocate_new_kernel_size
-arm64_relocate_new_kernel_size:
-       .quad   .Lcopy_end - arm64_relocate_new_kernel
index f6b1a88245db2f01ad596ffb342db3e024d3a868..0760331af85c4e031148a960f5099e2eebdbde22 100644 (file)
@@ -63,6 +63,7 @@
 #include <asm-generic/vmlinux.lds.h>
 #include <asm/cache.h>
 #include <asm/kernel-pgtable.h>
+#include <asm/kexec.h>
 #include <asm/memory.h>
 #include <asm/page.h>
 
@@ -100,6 +101,16 @@ jiffies = jiffies_64;
 #define HIBERNATE_TEXT
 #endif
 
+#ifdef CONFIG_KEXEC_CORE
+#define KEXEC_TEXT                                     \
+       . = ALIGN(SZ_4K);                               \
+       __relocate_new_kernel_start = .;                \
+       *(.kexec_relocate.text)                         \
+       __relocate_new_kernel_end = .;
+#else
+#define KEXEC_TEXT
+#endif
+
 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
 #define TRAMP_TEXT                                     \
        . = ALIGN(PAGE_SIZE);                           \
@@ -160,6 +171,7 @@ SECTIONS
                        HYPERVISOR_TEXT
                        IDMAP_TEXT
                        HIBERNATE_TEXT
+                       KEXEC_TEXT
                        TRAMP_TEXT
                        *(.fixup)
                        *(.gnu.warning)
@@ -348,3 +360,10 @@ ASSERT(swapper_pg_dir - reserved_pg_dir == RESERVED_SWAPPER_OFFSET,
 ASSERT(swapper_pg_dir - tramp_pg_dir == TRAMP_SWAPPER_OFFSET,
        "TRAMP_SWAPPER_OFFSET is wrong!")
 #endif
+
+#ifdef CONFIG_KEXEC_CORE
+/* kexec relocation code should fit into one KEXEC_CONTROL_PAGE_SIZE */
+ASSERT(__relocate_new_kernel_end - (__relocate_new_kernel_start & ~(SZ_4K - 1))
+       <= SZ_4K, "kexec relocation code is too big or misaligned")
+ASSERT(KEXEC_CONTROL_PAGE_SIZE >= SZ_4K, "KEXEC_CONTROL_PAGE_SIZE is broken")
+#endif