From: Will Deacon <will@kernel.org>
Date: Thu, 20 Apr 2023 10:22:33 +0000 (+0100)
Subject: Merge branch 'for-next/mm' into for-next/core
X-Git-Url: http://git.maquefel.me/?a=commitdiff_plain;h=1bb31cc7afe6b1524d1cd74a1ed53c1add16fc73;p=linux.git

Merge branch 'for-next/mm' into for-next/core

* for-next/mm:
  arm64: mm: always map fixmap at page granularity
  arm64: mm: move fixmap code to its own file
  arm64: add FIXADDR_TOT_{START,SIZE}
  Revert "Revert "arm64: dma: Drop cache invalidation from arch_dma_prep_coherent()""
  arm: uaccess: Remove memcpy_page_flushcache()
  mm,kfence: decouple kfence from page granularity mapping judgement
---

1bb31cc7afe6b1524d1cd74a1ed53c1add16fc73
diff --cc arch/arm64/mm/mmu.c
index 7556020a27b76,ef59c18fbf8c5..af6bc8403ee46
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@@ -510,6 -508,75 +508,60 @@@ void __init mark_linear_text_alias_ro(v
  			    PAGE_KERNEL_RO);
  }
  
 -static bool crash_mem_map __initdata;
 -
 -static int __init enable_crash_mem_map(char *arg)
 -{
 -	/*
 -	 * Proper parameter parsing is done by reserve_crashkernel(). We only
 -	 * need to know if the linear map has to avoid block mappings so that
 -	 * the crashkernel reservations can be unmapped later.
 -	 */
 -	crash_mem_map = true;
 -
 -	return 0;
 -}
 -early_param("crashkernel", enable_crash_mem_map);
 -
+ #ifdef CONFIG_KFENCE
+ 
+ bool __ro_after_init kfence_early_init = !!CONFIG_KFENCE_SAMPLE_INTERVAL;
+ 
+ /* early_param() will be parsed before map_mem() below. */
+ static int __init parse_kfence_early_init(char *arg)
+ {
+ 	int val;
+ 
+ 	if (get_option(&arg, &val))
+ 		kfence_early_init = !!val;
+ 	return 0;
+ }
+ early_param("kfence.sample_interval", parse_kfence_early_init);
+ 
+ static phys_addr_t __init arm64_kfence_alloc_pool(void)
+ {
+ 	phys_addr_t kfence_pool;
+ 
+ 	if (!kfence_early_init)
+ 		return 0;
+ 
+ 	kfence_pool = memblock_phys_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
+ 	if (!kfence_pool) {
+ 		pr_err("failed to allocate kfence pool\n");
+ 		kfence_early_init = false;
+ 		return 0;
+ 	}
+ 
+ 	/* Temporarily mark as NOMAP. */
+ 	memblock_mark_nomap(kfence_pool, KFENCE_POOL_SIZE);
+ 
+ 	return kfence_pool;
+ }
+ 
+ static void __init arm64_kfence_map_pool(phys_addr_t kfence_pool, pgd_t *pgdp)
+ {
+ 	if (!kfence_pool)
+ 		return;
+ 
+ 	/* KFENCE pool needs page-level mapping. */
+ 	__map_memblock(pgdp, kfence_pool, kfence_pool + KFENCE_POOL_SIZE,
+ 			pgprot_tagged(PAGE_KERNEL),
+ 			NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS);
+ 	memblock_clear_nomap(kfence_pool, KFENCE_POOL_SIZE);
+ 	__kfence_pool = phys_to_virt(kfence_pool);
+ }
+ #else /* CONFIG_KFENCE */
+ 
+ static inline phys_addr_t arm64_kfence_alloc_pool(void) { return 0; }
+ static inline void arm64_kfence_map_pool(phys_addr_t kfence_pool, pgd_t *pgdp) { }
+ 
+ #endif /* CONFIG_KFENCE */
+ 
  static void __init map_mem(pgd_t *pgdp)
  {
  	static const u64 direct_map_end = _PAGE_END(VA_BITS_MIN);
@@@ -565,6 -645,26 +620,7 @@@
  	__map_memblock(pgdp, kernel_start, kernel_end,
  		       PAGE_KERNEL, NO_CONT_MAPPINGS);
  	memblock_clear_nomap(kernel_start, kernel_end - kernel_start);
 -
 -	/*
 -	 * Use page-level mappings here so that we can shrink the region
 -	 * in page granularity and put back unused memory to buddy system
 -	 * through /sys/kernel/kexec_crash_size interface.
 -	 */
 -#ifdef CONFIG_KEXEC_CORE
 -	if (crash_mem_map && !defer_reserve_crashkernel()) {
 -		if (crashk_res.end) {
 -			__map_memblock(pgdp, crashk_res.start,
 -				       crashk_res.end + 1,
 -				       PAGE_KERNEL,
 -				       NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS);
 -			memblock_clear_nomap(crashk_res.start,
 -					     resource_size(&crashk_res));
 -		}
 -	}
 -#endif
 -
+ 	arm64_kfence_map_pool(early_kfence_pool, pgdp);
  }
  
  void mark_rodata_ro(void)