DEFINE(KIMAGE_ARCH_DTB_MEM,          offsetof(struct kimage, arch.dtb_mem));
   DEFINE(KIMAGE_ARCH_EL2_VECTORS,      offsetof(struct kimage, arch.el2_vectors));
   DEFINE(KIMAGE_ARCH_ZERO_PAGE,                offsetof(struct kimage, arch.zero_page));
+  DEFINE(KIMAGE_ARCH_PHYS_OFFSET,      offsetof(struct kimage, arch.phys_offset));
   DEFINE(KIMAGE_ARCH_TTBR1,            offsetof(struct kimage, arch.ttbr1));
   DEFINE(KIMAGE_HEAD,                  offsetof(struct kimage, head));
   DEFINE(KIMAGE_START,                 offsetof(struct kimage, start));
 
        reloc_size = __relocate_new_kernel_end - __relocate_new_kernel_start;
        memcpy(reloc_code, __relocate_new_kernel_start, reloc_size);
        kimage->arch.kern_reloc = __pa(reloc_code);
+       rc = trans_pgd_idmap_page(&info, &kimage->arch.ttbr0,
+                                 &kimage->arch.t0sz, reloc_code);
+       if (rc)
+               return rc;
+       kimage->arch.phys_offset = virt_to_phys(kimage) - (long)kimage;
 
        /* Flush the reloc_code in preparation for its execution. */
        dcache_clean_inval_poc((unsigned long)reloc_code,
        local_daif_mask();
 
        /*
-        * Both restart and cpu_soft_restart will shutdown the MMU, disable data
+        * Both restart and kernel_reloc will shutdown the MMU, disable data
         * caches. However, restart will start new kernel or purgatory directly,
-        * cpu_soft_restart will transfer control to arm64_relocate_new_kernel
+        * kernel_reloc contains the body of arm64_relocate_new_kernel
         * In kexec case, kimage->start points to purgatory assuming that
         * kernel entry and dtb address are embedded in purgatory by
         * userspace (kexec-tools).
                restart(is_hyp_nvhe(), kimage->start, kimage->arch.dtb_mem,
                        0, 0);
        } else {
+               void (*kernel_reloc)(struct kimage *kimage);
+
                if (is_hyp_nvhe())
                        __hyp_set_vectors(kimage->arch.el2_vectors);
-               cpu_soft_restart(kimage->arch.kern_reloc,
-                                virt_to_phys(kimage), 0, 0);
+               cpu_install_ttbr0(kimage->arch.ttbr0, kimage->arch.t0sz);
+               kernel_reloc = (void *)kimage->arch.kern_reloc;
+               kernel_reloc(kimage);
        }
 
        BUG(); /* Should never get here. */
 
  *
  * Copyright (C) Linaro.
  * Copyright (C) Huawei Futurewei Technologies.
+ * Copyright (C) 2021, Microsoft Corporation.
+ * Pasha Tatashin <pasha.tatashin@soleen.com>
  */
 
 #include <linux/kexec.h>
 #include <asm/sysreg.h>
 #include <asm/virt.h>
 
+.macro turn_off_mmu tmp1, tmp2
+       mov_q   \tmp1, INIT_SCTLR_EL1_MMU_OFF
+       pre_disable_mmu_workaround
+       msr     sctlr_el1, \tmp1
+       isb
+.endm
+
 .section    ".kexec_relocate.text", "ax"
 /*
  * arm64_relocate_new_kernel - Put a 2nd stage image in place and boot it.
        ldr     x18, [x0, #KIMAGE_ARCH_ZERO_PAGE] /* x18 = zero page for BBM */
        ldr     x17, [x0, #KIMAGE_ARCH_TTBR1]   /* x17 = linear map copy */
        ldr     x16, [x0, #KIMAGE_HEAD]         /* x16 = kimage_head */
-       mov     x14, xzr                        /* x14 = entry ptr */
-       mov     x13, xzr                        /* x13 = copy dest */
+       ldr     x22, [x0, #KIMAGE_ARCH_PHYS_OFFSET]     /* x22 phys_offset */
        raw_dcache_line_size x15, x1            /* x15 = dcache line size */
        break_before_make_ttbr_switch   x18, x17, x1, x2 /* set linear map */
 .Lloop:
        and     x12, x16, PAGE_MASK             /* x12 = addr */
-
+       sub     x12, x12, x22                   /* Convert x12 to virt */
        /* Test the entry flags. */
 .Ltest_source:
        tbz     x16, IND_SOURCE_BIT, .Ltest_indirection
 
        /* Invalidate dest page to PoC. */
-       mov     x2, x13
-       add     x1, x2, #PAGE_SIZE
-       dcache_by_myline_op ivac, sy, x2, x1, x15, x20
+       mov     x19, x13
        copy_page x13, x12, x1, x2, x3, x4, x5, x6, x7, x8
+       add     x1, x19, #PAGE_SIZE
+       dcache_by_myline_op civac, sy, x19, x1, x15, x20
        b       .Lnext
 .Ltest_indirection:
        tbz     x16, IND_INDIRECTION_BIT, .Ltest_destination
        ic      iallu
        dsb     nsh
        isb
+       ldr     x4, [x0, #KIMAGE_START]                 /* relocation start */
+       ldr     x1, [x0, #KIMAGE_ARCH_EL2_VECTORS]      /* relocation start */
+       ldr     x0, [x0, #KIMAGE_ARCH_DTB_MEM]          /* dtb address */
+       turn_off_mmu x12, x13
 
        /* Start new image. */
-       ldr     x1, [x0, #KIMAGE_ARCH_EL2_VECTORS]      /* relocation start */
        cbz     x1, .Lel1
-       ldr     x1, [x0, #KIMAGE_START]         /* relocation start */
-       ldr     x2, [x0, #KIMAGE_ARCH_DTB_MEM]  /* dtb address */
+       mov     x1, x4                          /* relocation start */
+       mov     x2, x0                          /* dtb address */
        mov     x3, xzr
        mov     x4, xzr
        mov     x0, #HVC_SOFT_RESTART
        hvc     #0                              /* Jumps from el2 */
 .Lel1:
-       ldr     x4, [x0, #KIMAGE_START]         /* relocation start */
-       ldr     x0, [x0, #KIMAGE_ARCH_DTB_MEM]  /* dtb address */
        mov     x2, xzr
        mov     x3, xzr
        br      x4                              /* Jumps from el1 */