extern bool hugetlb_disabled;
-void hugetlbpage_init_default(void);
+void __init hugetlbpage_init_default(void);
int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
unsigned long len);
}
extern int hash__alloc_context_id(void);
-extern void hash__reserve_context_id(int id);
+void __init hash__reserve_context_id(int id);
extern void __destroy_context(int context_id);
static inline void mmu_context_init(void) { }
return 0;
}
-static int find_free_bat(void)
+static int __init find_free_bat(void)
{
int b;
int n = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4;
}
#endif /* CONFIG_HUGETLB_PAGE */
-static void mmu_psize_set_default_penc(void)
+static void __init mmu_psize_set_default_penc(void)
{
int bpsize, apsize;
for (bpsize = 0; bpsize < MMU_PAGE_COUNT; bpsize++)
#ifdef CONFIG_PPC_64K_PAGES
-static bool might_have_hea(void)
+static bool __init might_have_hea(void)
{
/*
* The HEA ethernet adapter requires awareness of the
* low-order N bits as the encoding for the 2^(12+N) byte page size
* (if it exists).
*/
-static void init_hpte_page_sizes(void)
+static void __init init_hpte_page_sizes(void)
{
long int ap, bp;
long int shift, penc;
set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
}
-void hugetlbpage_init_default(void)
+void __init hugetlbpage_init_default(void)
{
/* Set default large page size. Currently, we pick 16M or 1M
* depending on what is available
}
#ifdef CONFIG_PPC_64S_HASH_MMU
-void hash__reserve_context_id(int id)
+void __init hash__reserve_context_id(int id)
{
int result = ida_alloc_range(&mmu_context_ida, id, id, GFP_KERNEL);
return 1;
}
-static int scan_pkey_feature(void)
+static int __init scan_pkey_feature(void)
{
int ret;
int pkeys_total = 0;
return 1;
}
-static unsigned long radix_memory_block_size(void)
+static unsigned long __init radix_memory_block_size(void)
{
unsigned long mem_block_size = MIN_MEMORY_BLOCK_SIZE;
#else /* CONFIG_MEMORY_HOTPLUG */
-static unsigned long radix_memory_block_size(void)
+static unsigned long __init radix_memory_block_size(void)
{
return 1UL * 1024 * 1024 * 1024;
}
unsigned long tlb_47x_boltmap[1024/8];
-static void ppc44x_update_tlb_hwater(void)
+static void __init ppc44x_update_tlb_hwater(void)
{
/* The TLB miss handlers hard codes the watermark in a cmpli
* instruction to improve performances rather than loading it
/*
* "Pins" a 256MB TLB entry in AS0 for kernel lowmem for 47x type MMU
*/
-static void ppc47x_pin_tlb(unsigned int virt, unsigned int phys)
+static void __init ppc47x_pin_tlb(unsigned int virt, unsigned int phys)
{
unsigned int rA;
int bolted;
flush_instruction_cache();
}
-static unsigned long tlbcam_sz(int idx)
+static unsigned long __init tlbcam_sz(int idx)
{
return tlbcam_addrs[idx].limit - tlbcam_addrs[idx].start + 1;
}
}
}
-static void setup_page_sizes(void)
+static void __init setup_page_sizes(void)
{
unsigned int tlb0cfg;
unsigned int tlb0ps;
}
}
-static void setup_mmu_htw(void)
+static void __init setup_mmu_htw(void)
{
/*
* If we want to use HW tablewalk, enable it by patching the TLB miss
return 0;
}
-static void reset_numa_cpu_lookup_table(void)
+static void __init reset_numa_cpu_lookup_table(void)
{
unsigned int cpu;
* ibm,numa-lookup-index-table= {N, domainid1, domainid2, ..... domainidN}
* ibm,numa-distance-table = { N, 1, 2, 4, 5, 1, 6, .... N elements}
*/
-static void initialize_form2_numa_distance_lookup_table(void)
+static void __init initialize_form2_numa_distance_lookup_table(void)
{
int i, j;
struct device_node *root;
return 0;
}
-static int get_nid_and_numa_distance(struct drmem_lmb *lmb)
+static int __init get_nid_and_numa_distance(struct drmem_lmb *lmb)
{
struct assoc_arrays aa = { .arrays = NULL };
int default_nid = NUMA_NO_NODE;
DEFINE_SHOW_ATTRIBUTE(ptdump);
-static void build_pgtable_complete_mask(void)
+static void __init build_pgtable_complete_mask(void)
{
unsigned int i, j;