void startup_kernel(void);
unsigned long detect_memory(unsigned long *safe_addr);
+void mem_detect_truncate(unsigned long limit);
bool is_ipl_block_dump(void);
void store_ipl_parmblock(void);
unsigned long read_ipl_report(unsigned long safe_addr);
void sclp_early_setup_buffer(void);
void print_pgm_check_info(void);
unsigned long get_random_base(unsigned long safe_addr);
-void setup_vmem(unsigned long ident_map_size, unsigned long asce_limit);
+void setup_vmem(unsigned long asce_limit);
void __printf(1, 2) decompressor_printk(const char *fmt, ...);
void error(char *m);
unsigned long base_pos, max_pos, kernel_size;
int i;
- memory_limit = min(memory_limit, ident_map_size);
-
/*
* Avoid putting kernel in the end of physical memory
* which kasan will use for shadow memory and early pgtable
return max_physmem_end;
}
+
+void mem_detect_truncate(unsigned long limit)
+{
+ struct mem_detect_block *block;
+ int i;
+
+ for (i = 0; i < mem_detect.count; i++) {
+ block = __get_mem_detect_block_ptr(i);
+ if (block->start >= limit) {
+ mem_detect.count = i;
+ break;
+ } else if (block->end > limit) {
+ block->end = (u64)limit;
+ mem_detect.count = i + 1;
+ break;
+ }
+ }
+}
setup_ident_map_size(max_physmem_end);
setup_vmalloc_size();
asce_limit = setup_kernel_memory_layout();
+ mem_detect_truncate(ident_map_size);
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_enabled) {
random_lma = get_random_base(safe_addr);
*/
clear_bss_section();
handle_relocs(__kaslr_offset);
- setup_vmem(ident_map_size, asce_limit);
+ setup_vmem(asce_limit);
copy_bootdata();
if (__kaslr_offset) {
error("out of memory on boot\n");
}
-static void pgtable_populate_init(unsigned long ident_map_size)
+static void pgtable_populate_init(void)
{
unsigned long initrd_end;
unsigned long kernel_end;
pgalloc_low = max(pgalloc_low, initrd_end);
}
- pgalloc_end = round_down(min(ident_map_size, get_mem_detect_end()), PAGE_SIZE);
+ pgalloc_end = round_down(get_mem_detect_end(), PAGE_SIZE);
pgalloc_pos = pgalloc_end;
boot_check_oom();
}
}
-void setup_vmem(unsigned long ident_map_size, unsigned long asce_limit)
+void setup_vmem(unsigned long asce_limit)
{
unsigned long start, end;
unsigned long asce_type;
* To prevent creation of a large page at address 0 first map
* the lowcore and create the identity mapping only afterwards.
*/
- pgtable_populate_init(ident_map_size);
+ pgtable_populate_init();
pgtable_populate(0, sizeof(struct lowcore), POPULATE_ONE2ONE);
- for_each_mem_detect_block(i, &start, &end) {
- if (start >= ident_map_size)
- break;
- pgtable_populate(start, min(end, ident_map_size), POPULATE_ONE2ONE);
- }
+ for_each_mem_detect_block(i, &start, &end)
+ pgtable_populate(start, end, POPULATE_ONE2ONE);
pgtable_populate(__abs_lowcore, __abs_lowcore + sizeof(struct lowcore),
POPULATE_ABS_LOWCORE);
pgtable_populate(__memcpy_real_area, __memcpy_real_area + PAGE_SIZE,
static void __init setup_memory_end(void)
{
- memblock_remove(ident_map_size, PHYS_ADDR_MAX - ident_map_size);
max_pfn = max_low_pfn = PFN_DOWN(ident_map_size);
pr_notice("The maximum memory size is %luMB\n", ident_map_size >> 20);
}
* The rest [memsize, ident_map_size] if memsize < ident_map_size
* could be mapped/unmapped dynamically later during memory hotplug.
*/
- memsize = min(memsize, ident_map_size);
-
BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, P4D_SIZE));
BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, P4D_SIZE));