static void mark_kernel_pgd(void)
{
- unsigned long addr, next;
+ unsigned long addr, next, max_addr;
struct page *page;
pgd_t *pgd;
int i;
addr = 0;
+ /*
+ * Figure out maximum virtual address accessible with the
+ * kernel ASCE. This is required to keep the page table walker
+ * from accessing non-existent entries.
+ */
+ max_addr = (S390_lowcore.kernel_asce.val & _ASCE_TYPE_MASK) >> 2;
+ max_addr = 1UL << (max_addr * 11 + 31);
pgd = pgd_offset_k(addr);
do {
- next = pgd_addr_end(addr, MODULES_END);
+ next = pgd_addr_end(addr, max_addr);
if (pgd_none(*pgd))
continue;
if (!pgd_folded(*pgd)) {
set_bit(PG_arch_1, &page[i].flags);
}
mark_kernel_p4d(pgd, addr, next);
- } while (pgd++, addr = next, addr != MODULES_END);
+ } while (pgd++, addr = next, addr != max_addr);
}
void __init cmma_init_nodat(void)