--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _ASM_POWERPC_CRASHDUMP_PPC64_H
+#define _ASM_POWERPC_CRASHDUMP_PPC64_H
+
+/*
+ * Backup region - first 64KB of System RAM
+ *
+ * If ever the below macros are to be changed, please be judicious.
+ * The implicit assumptions are:
+ * - start, end & size are less than UINT32_MAX.
+ * - start & size are at least 8 byte aligned.
+ *
+ * For implementation details: arch/powerpc/purgatory/trampoline_64.S
+ */
+#define BACKUP_SRC_START 0
+#define BACKUP_SRC_END 0xffff
+#define BACKUP_SRC_SIZE (BACKUP_SRC_END - BACKUP_SRC_START + 1)
+
+#endif /* __ASM_POWERPC_CRASHDUMP_PPC64_H */
#include <linux/of_device.h>
#include <linux/memblock.h>
#include <linux/slab.h>
+#include <linux/vmalloc.h>
#include <asm/drmem.h>
#include <asm/kexec_ranges.h>
+#include <asm/crashdump-ppc64.h>
struct umem_info {
u64 *buf; /* data buffer for usable-memory property */
return ret;
}
+/**
+ * load_backup_segment - Locate a memory hole to place the backup region.
+ * @image: Kexec image.
+ * @kbuf: Buffer contents and memory parameters.
+ *
+ * Returns 0 on success, negative errno on error.
+ */
+static int load_backup_segment(struct kimage *image, struct kexec_buf *kbuf)
+{
+ void *buf;
+ int ret;
+
+ /*
+ * Setup a source buffer for backup segment.
+ *
+ * A source buffer has no meaning for backup region as data will
+ * be copied from backup source, after crash, in the purgatory.
+ * But as load segment code doesn't recognize such segments,
+ * setup a dummy source buffer to keep it happy for now.
+ */
+ buf = vzalloc(BACKUP_SRC_SIZE);
+ if (!buf)
+ return -ENOMEM;
+
+ kbuf->buffer = buf;
+ kbuf->mem = KEXEC_BUF_MEM_UNKNOWN;
+ kbuf->bufsz = kbuf->memsz = BACKUP_SRC_SIZE;
+ kbuf->top_down = false;
+
+ ret = kexec_add_buffer(kbuf);
+ if (ret) {
+ vfree(buf);
+ return ret;
+ }
+
+ image->arch.backup_buf = buf;
+ image->arch.backup_start = kbuf->mem;
+ return 0;
+}
+
+/**
+ * load_crashdump_segments_ppc64 - Initialize the additional segements needed
+ * to load kdump kernel.
+ * @image: Kexec image.
+ * @kbuf: Buffer contents and memory parameters.
+ *
+ * Returns 0 on success, negative errno on error.
+ */
+int load_crashdump_segments_ppc64(struct kimage *image,
+ struct kexec_buf *kbuf)
+{
+ int ret;
+
+ /* Load backup segment - first 64K bytes of the crashing kernel */
+ ret = load_backup_segment(image, kbuf);
+ if (ret) {
+ pr_err("Failed to load backup segment\n");
+ return ret;
+ }
+ pr_debug("Loaded the backup region at 0x%lx\n", kbuf->mem);
+
+ return 0;
+}
+
/**
* setup_purgatory_ppc64 - initialize PPC64 specific purgatory's global
* variables and call setup_purgatory() to initialize
goto out;
}
+ /* Tell purgatory where to look for backup region */
+ ret = kexec_purgatory_get_set_symbol(image, "backup_start",
+ &image->arch.backup_start,
+ sizeof(image->arch.backup_start),
+ false);
out:
if (ret)
pr_err("Failed to setup purgatory symbols");
/*
* Restrict memory usage for kdump kernel by setting up
- * usable memory ranges.
+ * usable memory ranges and memory reserve map.
*/
if (image->type == KEXEC_TYPE_CRASH) {
ret = get_usable_memory_ranges(&umem);
goto out;
}
- /* Ensure we don't touch crashed kernel's memory */
- ret = fdt_add_mem_rsv(fdt, 0, crashk_res.start);
+ /*
+ * Ensure we don't touch crashed kernel's memory except the
+ * first 64K of RAM, which will be backed up.
+ */
+ ret = fdt_add_mem_rsv(fdt, BACKUP_SRC_END + 1,
+ crashk_res.start - BACKUP_SRC_SIZE);
if (ret) {
pr_err("Error reserving crash memory: %s\n",
fdt_strerror(ret));
goto out;
}
+
+ /* Ensure backup region is not used by kdump/capture kernel */
+ ret = fdt_add_mem_rsv(fdt, image->arch.backup_start,
+ BACKUP_SRC_SIZE);
+ if (ret) {
+ pr_err("Error reserving memory for backup: %s\n",
+ fdt_strerror(ret));
+ goto out;
+ }
}
out:
kfree(image->arch.exclude_ranges);
image->arch.exclude_ranges = NULL;
+ vfree(image->arch.backup_buf);
+ image->arch.backup_buf = NULL;
+
return kexec_image_post_load_cleanup_default(image);
}
*/
#include <asm/asm-compat.h>
+#include <asm/crashdump-ppc64.h>
.machine ppc64
.balign 256
mr %r17,%r3 /* save cpu id to r17 */
mr %r15,%r4 /* save physical address in reg15 */
+ /* Work out where we're running */
+ bcl 20, 31, 0f
+0: mflr %r18
+
+ /*
+ * Copy BACKUP_SRC_SIZE bytes from BACKUP_SRC_START to
+ * backup_start 8 bytes at a time.
+ *
+ * Use r3 = dest, r4 = src, r5 = size, r6 = count
+ */
+ ld %r3, (backup_start - 0b)(%r18)
+ cmpdi %cr0, %r3, 0
+ beq .Lskip_copy /* skip if there is no backup region */
+ lis %r5, BACKUP_SRC_SIZE@h
+ ori %r5, %r5, BACKUP_SRC_SIZE@l
+ cmpdi %cr0, %r5, 0
+ beq .Lskip_copy /* skip if copy size is zero */
+ lis %r4, BACKUP_SRC_START@h
+ ori %r4, %r4, BACKUP_SRC_START@l
+ li %r6, 0
+.Lcopy_loop:
+ ldx %r0, %r6, %r4
+ stdx %r0, %r6, %r3
+ addi %r6, %r6, 8
+ cmpld %cr0, %r6, %r5
+ blt .Lcopy_loop
+
+.Lskip_copy:
or %r3,%r3,%r3 /* ok now to high priority, lets boot */
lis %r6,0x1
mtctr %r6 /* delay a bit for slaves to catch up */
bdnz . /* before we overwrite 0-100 again */
- bl 0f /* Work out where we're running */
-0: mflr %r18
-
/* load device-tree address */
ld %r3, (dt_offset - 0b)(%r18)
mr %r16,%r3 /* save dt address in reg16 */
rfid /* update MSR and start kernel */
-
.balign 8
.globl kernel
kernel:
.8byte 0x0
.size dt_offset, . - dt_offset
+ .balign 8
+ .globl backup_start
+backup_start:
+ .8byte 0x0
+ .size backup_start, . - backup_start
.data
.balign 8