extern char __irqentry_text_start[], __irqentry_text_end[];
 extern char __mmuoff_data_start[], __mmuoff_data_end[];
 extern char __entry_tramp_text_start[], __entry_tramp_text_end[];
+extern char __relocate_new_kernel_start[], __relocate_new_kernel_end[];
 
 #endif /* __ASM_SECTIONS_H */
 
 #include <asm/mmu.h>
 #include <asm/mmu_context.h>
 #include <asm/page.h>
+#include <asm/sections.h>
 #include <asm/trans_pgd.h>
 
 #include "cpu-reset.h"
 
-/* Global variables for the arm64_relocate_new_kernel routine. */
-extern const unsigned char arm64_relocate_new_kernel[];
-extern const unsigned long arm64_relocate_new_kernel_size;
-
 /**
  * kexec_image_info - For debugging output.
  */
 int machine_kexec_post_load(struct kimage *kimage)
 {
        void *reloc_code = page_to_virt(kimage->control_code_page);
+       long reloc_size;
        struct trans_pgd_info info = {
                .trans_alloc_page       = kexec_page_alloc,
                .trans_alloc_arg        = kimage,
                        return rc;
        }
 
-       memcpy(reloc_code, arm64_relocate_new_kernel,
-              arm64_relocate_new_kernel_size);
+       reloc_size = __relocate_new_kernel_end - __relocate_new_kernel_start;
+       memcpy(reloc_code, __relocate_new_kernel_start, reloc_size);
        kimage->arch.kern_reloc = __pa(reloc_code);
 
        /* Flush the reloc_code in preparation for its execution. */
        dcache_clean_inval_poc((unsigned long)reloc_code,
-                              (unsigned long)reloc_code +
-                              arm64_relocate_new_kernel_size);
+                              (unsigned long)reloc_code + reloc_size);
        icache_inval_pou((uintptr_t)reloc_code,
-                        (uintptr_t)reloc_code +
-                        arm64_relocate_new_kernel_size);
+                        (uintptr_t)reloc_code + reloc_size);
        kexec_list_flush(kimage);
        kexec_image_info(kimage);
 
 
 #include <asm/sysreg.h>
 #include <asm/virt.h>
 
+.section    ".kexec_relocate.text", "ax"
 /*
  * arm64_relocate_new_kernel - Put a 2nd stage image in place and boot it.
  *
        mov     x3, xzr
        br      x4                              /* Jumps from el1 */
 SYM_CODE_END(arm64_relocate_new_kernel)
-
-.align 3       /* To keep the 64-bit values below naturally aligned. */
-
-.Lcopy_end:
-.org   KEXEC_CONTROL_PAGE_SIZE
-
-/*
- * arm64_relocate_new_kernel_size - Number of bytes to copy to the
- * control_code_page.
- */
-.globl arm64_relocate_new_kernel_size
-arm64_relocate_new_kernel_size:
-       .quad   .Lcopy_end - arm64_relocate_new_kernel
 
 #include <asm-generic/vmlinux.lds.h>
 #include <asm/cache.h>
 #include <asm/kernel-pgtable.h>
+#include <asm/kexec.h>
 #include <asm/memory.h>
 #include <asm/page.h>
 
 #define HIBERNATE_TEXT
 #endif
 
+#ifdef CONFIG_KEXEC_CORE
+#define KEXEC_TEXT                                     \
+       . = ALIGN(SZ_4K);                               \
+       __relocate_new_kernel_start = .;                \
+       *(.kexec_relocate.text)                         \
+       __relocate_new_kernel_end = .;
+#else
+#define KEXEC_TEXT
+#endif
+
 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
 #define TRAMP_TEXT                                     \
        . = ALIGN(PAGE_SIZE);                           \
                        HYPERVISOR_TEXT
                        IDMAP_TEXT
                        HIBERNATE_TEXT
+                       KEXEC_TEXT
                        TRAMP_TEXT
                        *(.fixup)
                        *(.gnu.warning)
 ASSERT(swapper_pg_dir - tramp_pg_dir == TRAMP_SWAPPER_OFFSET,
        "TRAMP_SWAPPER_OFFSET is wrong!")
 #endif
+
+#ifdef CONFIG_KEXEC_CORE
+/* kexec relocation code should fit into one KEXEC_CONTROL_PAGE_SIZE */
+ASSERT(__relocate_new_kernel_end - (__relocate_new_kernel_start & ~(SZ_4K - 1))
+       <= SZ_4K, "kexec relocation code is too big or misaligned")
+ASSERT(KEXEC_CONTROL_PAGE_SIZE >= SZ_4K, "KEXEC_CONTROL_PAGE_SIZE is broken")
+#endif