select ARCH_HAS_KCOV
        select ARCH_HAS_MEMBARRIER_CALLBACKS
        select ARCH_HAS_MEMBARRIER_SYNC_CORE
-       select ARCH_HAS_MEMREMAP_COMPAT_ALIGN   if PPC_BOOK3S_64
+       select ARCH_HAS_MEMREMAP_COMPAT_ALIGN   if PPC_64S_HASH_MMU
        select ARCH_HAS_MMIOWB                  if PPC64
        select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
        select ARCH_HAS_PHYS_TO_DMA
 
 void slb_dump_contents(struct slb_entry *slb_ptr);
 
 extern void slb_vmalloc_update(void);
-extern void slb_set_size(u16 size);
 void preload_new_slb_context(unsigned long start, unsigned long sp);
+
+#ifdef CONFIG_PPC_64S_HASH_MMU
+void slb_set_size(u16 size);
+#else
+static inline void slb_set_size(u16 size) { }
+#endif
+
 #endif /* __ASSEMBLY__ */
 
 /*
 
                 * from EA and new context ids to build the new VAs.
                 */
                mm_context_id_t id;
+#ifdef CONFIG_PPC_64S_HASH_MMU
                mm_context_id_t extended_id[TASK_SIZE_USER64/TASK_CONTEXT_SIZE];
+#endif
        };
 
        /* Number of bits in the mm_cpumask */
        /* Number of user space windows opened in process mm_context */
        atomic_t vas_windows;
 
+#ifdef CONFIG_PPC_64S_HASH_MMU
        struct hash_mm_context *hash_context;
+#endif
 
        void __user *vdso;
        /*
 #endif
 } mm_context_t;
 
+#ifdef CONFIG_PPC_64S_HASH_MMU
 static inline u16 mm_ctx_user_psize(mm_context_t *ctx)
 {
        return ctx->hash_context->user_psize;
 extern int mmu_linear_psize;
 extern int mmu_virtual_psize;
 extern int mmu_vmalloc_psize;
-extern int mmu_vmemmap_psize;
 extern int mmu_io_psize;
+#else /* CONFIG_PPC_64S_HASH_MMU */
+#ifdef CONFIG_PPC_64K_PAGES
+#define mmu_virtual_psize MMU_PAGE_64K
+#else
+#define mmu_virtual_psize MMU_PAGE_4K
+#endif
+#endif
+extern int mmu_vmemmap_psize;
 
 /* MMU initialization */
 void mmu_early_init_devtree(void);
         * know which translations we will pick. Hence go with hash
         * restrictions.
         */
-       return hash__setup_initial_memory_limit(first_memblock_base,
-                                          first_memblock_size);
+       if (!early_radix_enabled())
+               hash__setup_initial_memory_limit(first_memblock_base,
+                                                first_memblock_size);
 }
 
 #ifdef CONFIG_PPC_PSERIES
 void cleanup_cpu_mmu_context(void);
 #endif
 
+#ifdef CONFIG_PPC_64S_HASH_MMU
 static inline int get_user_context(mm_context_t *ctx, unsigned long ea)
 {
        int index = ea >> MAX_EA_BITS_PER_CONTEXT;
 
        return get_vsid(context, ea, ssize);
 }
+#endif
 
 #endif /* __ASSEMBLY__ */
 #endif /* _ASM_POWERPC_BOOK3S_64_MMU_H_ */
 
 
 struct mmu_gather;
 extern void hash__tlb_flush(struct mmu_gather *tlb);
+void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr);
+
+#ifdef CONFIG_PPC_64S_HASH_MMU
 /* Private function for use by PCI IO mapping code */
 extern void __flush_hash_table_range(unsigned long start, unsigned long end);
 extern void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd,
                                unsigned long addr);
+#else
+static inline void __flush_hash_table_range(unsigned long start, unsigned long end) { }
+#endif
 #endif /*  _ASM_POWERPC_BOOK3S_64_TLBFLUSH_HASH_H */
 
                                     unsigned long size, pgprot_t vma_prot);
 #define __HAVE_PHYS_MEM_ACCESS_PROT
 
+#if defined(CONFIG_PPC32) || defined(CONFIG_PPC_64S_HASH_MMU)
 /*
  * This gets called at the end of handling a page fault, when
  * the kernel has put a new PTE into the page table for the process.
  * waiting for the inevitable extra hash-table miss exception.
  */
 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep);
+#else
+static inline void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) {}
+#endif
 
 #endif /* __ASSEMBLY__ */
 #endif
 
 extern void __destroy_context(int context_id);
 static inline void mmu_context_init(void) { }
 
+#ifdef CONFIG_PPC_64S_HASH_MMU
 static inline int alloc_extended_context(struct mm_struct *mm,
                                         unsigned long ea)
 {
                return true;
        return false;
 }
+#endif
 
 #else
 extern void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,
 
                                        /* this becomes non-zero. */
        u8 kexec_state;         /* set when kexec down has irqs off */
 #ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
        struct slb_shadow *slb_shadow_ptr;
+#endif
        struct dtl_entry *dispatch_log;
        struct dtl_entry *dispatch_log_end;
 #endif
        /* used for most interrupts/exceptions */
        u64 exgen[EX_SIZE] __attribute__((aligned(0x80)));
 
+#ifdef CONFIG_PPC_64S_HASH_MMU
        /* SLB related definitions */
        u16 vmalloc_sllp;
        u8 slb_cache_ptr;
        u32 slb_used_bitmap;            /* Bitmaps for first 32 SLB entries. */
        u32 slb_kern_bitmap;
        u32 slb_cache[SLB_CACHE_ENTRIES];
+#endif
 #endif /* CONFIG_PPC_BOOK3S_64 */
 
 #ifdef CONFIG_PPC_BOOK3E
 #endif /* CONFIG_PPC_BOOK3E */
 
 #ifdef CONFIG_PPC_BOOK3S
+#ifdef CONFIG_PPC_64S_HASH_MMU
 #ifdef CONFIG_PPC_MM_SLICES
        unsigned char mm_ctx_low_slices_psize[BITS_PER_LONG / BITS_PER_BYTE];
        unsigned char mm_ctx_high_slices_psize[SLICE_ARRAY_SIZE];
        u16 mm_ctx_user_psize;
        u16 mm_ctx_sllp;
 #endif
+#endif
 #endif
 
        /*
 #endif /* CONFIG_PPC_PSERIES */
 
 #ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
        /* Capture SLB related old contents in MCE handler. */
        struct slb_entry *mce_faulty_slbs;
        u16 slb_save_cache_ptr;
+#endif
 #endif /* CONFIG_PPC_BOOK3S_64 */
 #ifdef CONFIG_STACKPROTECTOR
        unsigned long canary;
 
        OFFSET(PACA_EXGEN, paca_struct, exgen);
        OFFSET(PACA_EXMC, paca_struct, exmc);
        OFFSET(PACA_EXNMI, paca_struct, exnmi);
+#ifdef CONFIG_PPC_64S_HASH_MMU
        OFFSET(PACA_SLBSHADOWPTR, paca_struct, slb_shadow_ptr);
        OFFSET(SLBSHADOW_STACKVSID, slb_shadow, save_area[SLB_NUM_BOLTED - 1].vsid);
        OFFSET(SLBSHADOW_STACKESID, slb_shadow, save_area[SLB_NUM_BOLTED - 1].esid);
        OFFSET(SLBSHADOW_SAVEAREA, slb_shadow, save_area);
+#endif
        OFFSET(LPPACA_PMCINUSE, lppaca, pmcregs_in_use);
 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
        OFFSET(PACA_PMCINUSE, paca_struct, pmcregs_in_use);
 
 #endif
 
        ld      r8,KSP(r4)      /* new stack pointer */
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
 BEGIN_MMU_FTR_SECTION
        b       2f
 END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
        slbmte  r7,r0
        isync
 2:
-#endif /* CONFIG_PPC_BOOK3S_64 */
+#endif /* CONFIG_PPC_64S_HASH_MMU */
 
        clrrdi  r7, r8, THREAD_SHIFT    /* base of new stack */
        /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
 
        addi    r3,r1,STACK_FRAME_OVERHEAD
        andis.  r0,r4,DSISR_DABRMATCH@h
        bne-    1f
+#ifdef CONFIG_PPC_64S_HASH_MMU
 BEGIN_MMU_FTR_SECTION
        bl      do_hash_fault
 MMU_FTR_SECTION_ELSE
        bl      do_page_fault
 ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
+#else
+       bl      do_page_fault
+#endif
        b       interrupt_return_srr
 
 1:     bl      do_break
 EXC_VIRT_END(data_access_slb, 0x4380, 0x80)
 EXC_COMMON_BEGIN(data_access_slb_common)
        GEN_COMMON data_access_slb
+#ifdef CONFIG_PPC_64S_HASH_MMU
 BEGIN_MMU_FTR_SECTION
        /* HPT case, do SLB fault */
        addi    r3,r1,STACK_FRAME_OVERHEAD
        /* Radix case, access is outside page table range */
        li      r3,-EFAULT
 ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
+#else
+       li      r3,-EFAULT
+#endif
        std     r3,RESULT(r1)
        addi    r3,r1,STACK_FRAME_OVERHEAD
        bl      do_bad_segment_interrupt
 EXC_COMMON_BEGIN(instruction_access_common)
        GEN_COMMON instruction_access
        addi    r3,r1,STACK_FRAME_OVERHEAD
+#ifdef CONFIG_PPC_64S_HASH_MMU
 BEGIN_MMU_FTR_SECTION
        bl      do_hash_fault
 MMU_FTR_SECTION_ELSE
        bl      do_page_fault
 ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
+#else
+       bl      do_page_fault
+#endif
        b       interrupt_return_srr
 
 
 EXC_VIRT_END(instruction_access_slb, 0x4480, 0x80)
 EXC_COMMON_BEGIN(instruction_access_slb_common)
        GEN_COMMON instruction_access_slb
+#ifdef CONFIG_PPC_64S_HASH_MMU
 BEGIN_MMU_FTR_SECTION
        /* HPT case, do SLB fault */
        addi    r3,r1,STACK_FRAME_OVERHEAD
        /* Radix case, access is outside page table range */
        li      r3,-EFAULT
 ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
+#else
+       li      r3,-EFAULT
+#endif
        std     r3,RESULT(r1)
        addi    r3,r1,STACK_FRAME_OVERHEAD
        bl      do_bad_segment_interrupt
 
                mc_error_class[evt->error_class] : "Unknown";
        printk("%sMCE: CPU%d: %s\n", level, evt->cpu, subtype);
 
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
        /* Display faulty slb contents for SLB errors. */
        if (evt->error_type == MCE_ERROR_TYPE_SLB && !in_guest)
                slb_dump_contents(local_paca->mce_faulty_slbs);
 
 }
 
 /* flush SLBs and reload */
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
 void flush_and_reload_slb(void)
 {
        if (early_radix_enabled())
 
 void flush_erat(void)
 {
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
        if (!early_cpu_has_feature(CPU_FTR_ARCH_300)) {
                flush_and_reload_slb();
                return;
 
 static int mce_flush(int what)
 {
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
        if (what == MCE_FLUSH_SLB) {
                flush_and_reload_slb();
                return 1;
                        /* attempt to correct the error */
                        switch (table[i].error_type) {
                        case MCE_ERROR_TYPE_SLB:
+#ifdef CONFIG_PPC_64S_HASH_MMU
                                if (local_paca->in_mce == 1)
                                        slb_save_contents(local_paca->mce_faulty_slbs);
+#endif
                                handled = mce_flush(MCE_FLUSH_SLB);
                                break;
                        case MCE_ERROR_TYPE_ERAT:
                        /* attempt to correct the error */
                        switch (table[i].error_type) {
                        case MCE_ERROR_TYPE_SLB:
+#ifdef CONFIG_PPC_64S_HASH_MMU
                                if (local_paca->in_mce == 1)
                                        slb_save_contents(local_paca->mce_faulty_slbs);
+#endif
                                if (mce_flush(MCE_FLUSH_SLB))
                                        handled = 1;
                                break;
 
 }
 #endif /* CONFIG_PPC_PSERIES */
 
-#ifdef CONFIG_PPC_BOOK3S_64
-
+#ifdef CONFIG_PPC_64S_HASH_MMU
 /*
  * 3 persistent SLBs are allocated here.  The buffer will be zero
  * initially, hence will all be invaild until we actually write them.
 
        return s;
 }
-
-#endif /* CONFIG_PPC_BOOK3S_64 */
+#endif /* CONFIG_PPC_64S_HASH_MMU */
 
 #ifdef CONFIG_PPC_PSERIES
 /**
        new_paca->kexec_state = KEXEC_STATE_NONE;
        new_paca->__current = &init_task;
        new_paca->data_offset = 0xfeeeeeeeeeeeeeeeULL;
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
        new_paca->slb_shadow_ptr = NULL;
 #endif
 
 #ifdef CONFIG_PPC_PSERIES
        paca->lppaca_ptr = new_lppaca(cpu, limit);
 #endif
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
        paca->slb_shadow_ptr = new_slb_shadow(cpu, limit);
 #endif
 #ifdef CONFIG_PPC_PSERIES
        paca_nr_cpu_ids = nr_cpu_ids;
        paca_ptrs_size = new_ptrs_size;
 
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
        if (early_radix_enabled()) {
                /* Ugly fixup, see new_slb_shadow() */
                memblock_phys_free(__pa(paca_ptrs[boot_cpuid]->slb_shadow_ptr),
                        paca_ptrs_size + paca_struct_size, nr_cpu_ids);
 }
 
+#ifdef CONFIG_PPC_64S_HASH_MMU
 void copy_mm_to_paca(struct mm_struct *mm)
 {
-#ifdef CONFIG_PPC_BOOK3S
        mm_context_t *context = &mm->context;
 
 #ifdef CONFIG_PPC_MM_SLICES
        get_paca()->mm_ctx_user_psize = context->user_psize;
        get_paca()->mm_ctx_sllp = context->sllp;
 #endif
-#else /* !CONFIG_PPC_BOOK3S */
-       return;
-#endif
 }
+#endif /* CONFIG_PPC_64S_HASH_MMU */
 
 {
        struct thread_struct *new_thread, *old_thread;
        struct task_struct *last;
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
        struct ppc64_tlb_batch *batch;
 #endif
 
 
        WARN_ON(!irqs_disabled());
 
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
        batch = this_cpu_ptr(&ppc64_tlb_batch);
        if (batch->active) {
                current_thread_info()->local_flags |= _TLF_LAZY_MMU;
         */
 
 #ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
        /*
         * This applies to a process that was context switched while inside
         * arch_enter_lazy_mmu_mode(), to re-activate the batch that was
                batch = this_cpu_ptr(&ppc64_tlb_batch);
                batch->active = 1;
        }
+#endif
 
        /*
         * Math facilities are masked out of the child MSR in copy_thread.
 
 static void setup_ksp_vsid(struct task_struct *p, unsigned long sp)
 {
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
        unsigned long sp_vsid;
        unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
 
         * the heap, we can put it above 1TB so it is backed by a 1TB
         * segment. Otherwise the heap will be in the bottom 1TB
         * which always uses 256MB segments and this may result in a
-        * performance penalty. We don't need to worry about radix. For
-        * radix, mmu_highuser_ssize remains unchanged from 256MB.
+        * performance penalty.
         */
-       if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
+       if (!radix_enabled() && !is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
                base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
 #endif
 
 
                      ibm_pa_features, ARRAY_SIZE(ibm_pa_features));
 }
 
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
 static void __init init_mmu_slb_size(unsigned long node)
 {
        const __be32 *slb_size_ptr;
 
                atom_size = SZ_1M;
        } else if (radix_enabled()) {
                atom_size = PAGE_SIZE;
-       } else {
+       } else if (IS_ENABLED(CONFIG_PPC_64S_HASH_MMU)) {
                /*
                 * Linear mapping is one of 4K, 1M and 16M.  For 4K, no need
                 * to group units.  For larger mappings, use 1M atom which
 
        /* NOTREACHED */
 }
 
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
 /* Values we need to export to the second kernel via the device tree. */
 static unsigned long htab_base;
 static unsigned long htab_size;
        return 0;
 }
 late_initcall(export_htab_values);
-#endif /* CONFIG_PPC_BOOK3S_64 */
+#endif /* CONFIG_PPC_64S_HASH_MMU */
 
        return ret;
 }
 
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
 /**
  * add_htab_mem_range - Adds htab range to the given memory ranges list,
  *                      if it exists
 
 
 ccflags-y      := $(NO_MINIMAL_TOC)
 
+obj-y                          += mmu_context.o pgtable.o trace.o
+ifdef CONFIG_PPC_64S_HASH_MMU
 CFLAGS_REMOVE_slb.o = $(CC_FLAGS_FTRACE)
-
-obj-y                          += hash_pgtable.o hash_utils.o slb.o \
-                                  mmu_context.o pgtable.o hash_tlb.o trace.o
+obj-y                          += hash_pgtable.o hash_utils.o hash_tlb.o slb.o
 obj-$(CONFIG_PPC_HASH_MMU_NATIVE)      += hash_native.o
-obj-$(CONFIG_PPC_RADIX_MMU)    += radix_pgtable.o radix_tlb.o
 obj-$(CONFIG_PPC_4K_PAGES)     += hash_4k.o
 obj-$(CONFIG_PPC_64K_PAGES)    += hash_64k.o
+obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += hash_hugepage.o
+obj-$(CONFIG_PPC_SUBPAGE_PROT) += subpage_prot.o
+endif
+
 obj-$(CONFIG_HUGETLB_PAGE)     += hugetlbpage.o
+
+obj-$(CONFIG_PPC_RADIX_MMU)    += radix_pgtable.o radix_tlb.o
 ifdef CONFIG_HUGETLB_PAGE
 obj-$(CONFIG_PPC_RADIX_MMU)    += radix_hugetlbpage.o
 endif
-obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += hash_hugepage.o
-obj-$(CONFIG_PPC_SUBPAGE_PROT) += subpage_prot.o
 obj-$(CONFIG_SPAPR_TCE_IOMMU)  += iommu_api.o
 obj-$(CONFIG_PPC_PKEY) += pkeys.o
 
 
 unsigned int hpage_shift;
 EXPORT_SYMBOL(hpage_shift);
 
+#ifdef CONFIG_PPC_64S_HASH_MMU
 int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
                     pte_t *ptep, unsigned long trap, unsigned long flags,
                     int ssize, unsigned int shift, unsigned int mmu_psize)
        *ptep = __pte(new_pte & ~H_PAGE_BUSY);
        return 0;
 }
+#endif
 
 pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
                                  unsigned long addr, pte_t *ptep)
 
        return ida_alloc_range(&mmu_context_ida, min_id, max_id, GFP_KERNEL);
 }
 
+#ifdef CONFIG_PPC_64S_HASH_MMU
 void hash__reserve_context_id(int id)
 {
        int result = ida_alloc_range(&mmu_context_ida, id, id, GFP_KERNEL);
        return alloc_context_id(MIN_USER_CONTEXT, max);
 }
 EXPORT_SYMBOL_GPL(hash__alloc_context_id);
+#endif
 
+#ifdef CONFIG_PPC_64S_HASH_MMU
 static int realloc_context_ids(mm_context_t *ctx)
 {
        int i, id;
 
        slb_setup_new_exec();
 }
+#else
+static inline int hash__init_new_context(struct mm_struct *mm)
+{
+       BUILD_BUG();
+       return 0;
+}
+#endif
 
 static int radix__init_new_context(struct mm_struct *mm)
 {
         */
        asm volatile("ptesync;isync" : : : "memory");
 
+#ifdef CONFIG_PPC_64S_HASH_MMU
        mm->context.hash_context = NULL;
+#endif
 
        return index;
 }
 
 static void destroy_contexts(mm_context_t *ctx)
 {
-       int index, context_id;
+       if (radix_enabled()) {
+               ida_free(&mmu_context_ida, ctx->id);
+       } else {
+#ifdef CONFIG_PPC_64S_HASH_MMU
+               int index, context_id;
 
-       for (index = 0; index < ARRAY_SIZE(ctx->extended_id); index++) {
-               context_id = ctx->extended_id[index];
-               if (context_id)
-                       ida_free(&mmu_context_ida, context_id);
+               for (index = 0; index < ARRAY_SIZE(ctx->extended_id); index++) {
+                       context_id = ctx->extended_id[index];
+                       if (context_id)
+                               ida_free(&mmu_context_ida, context_id);
+               }
+               kfree(ctx->hash_context);
+#else
+               BUILD_BUG(); // radix_enabled() should be constant true
+#endif
        }
-       kfree(ctx->hash_context);
 }
 
 static void pmd_frag_destroy(void *pmd_frag)
 
 }
 arch_initcall(pgtable_debugfs_setup);
 
-#ifdef CONFIG_ZONE_DEVICE
+#if defined(CONFIG_ZONE_DEVICE) && defined(CONFIG_ARCH_HAS_MEMREMAP_COMPAT_ALIGN)
 /*
  * Override the generic version in mm/memremap.c.
  *
 
        u64 i;
 
        /* We don't support slb for radix */
-       mmu_slb_size = 0;
+       slb_set_size(0);
 
        /*
         * Create the linear mapping
 {
        unsigned long lpcr;
 
+#ifdef CONFIG_PPC_64S_HASH_MMU
 #ifdef CONFIG_PPC_64K_PAGES
        /* PAGE_SIZE mappings */
        mmu_virtual_psize = MMU_PAGE_64K;
                mmu_vmemmap_psize = MMU_PAGE_2M;
        } else
                mmu_vmemmap_psize = mmu_virtual_psize;
+#endif
 #endif
        /*
         * initialize page table size
 
 }
 EXPORT_SYMBOL_GPL(copro_handle_mm_fault);
 
+#ifdef CONFIG_PPC_64S_HASH_MMU
 int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb)
 {
        u64 vsid, vsidkey;
        cxl_slbia(mm);
 }
 EXPORT_SYMBOL_GPL(copro_flush_all_slbs);
+#endif
 
 
 ifdef CONFIG_PTDUMP_DEBUGFS
 obj-$(CONFIG_PPC_BOOK3S_32)    += bats.o segment_regs.o
-obj-$(CONFIG_PPC_BOOK3S_64)    += hashpagetable.o
+obj-$(CONFIG_PPC_64S_HASH_MMU) += hashpagetable.o
 endif
 
 
        mtspr(SPRN_SPRG3,       local_paca->sprg_vdso);
 
+#ifdef CONFIG_PPC_64S_HASH_MMU
        /*
         * The SLB has to be restored here, but it sometimes still
         * contains entries, so the __ variant must be used to prevent
         * multi hits.
         */
        __slb_restore_bolted_realmode();
+#endif
 
        return srr1;
 }
 
 #endif
                add_preferred_console("hvc", 0, NULL);
 
+#ifdef CONFIG_PPC_64S_HASH_MMU
        if (!radix_enabled()) {
                size_t size = sizeof(struct slb_entry) * mmu_slb_size;
                int i;
                                                cpu_to_node(i));
                }
        }
+#endif
 }
 
 static void __init pnv_init_IRQ(void)
 
 EXPORT_SYMBOL(plpar_hcall9);
 EXPORT_SYMBOL(plpar_hcall_norets);
 
+#ifdef CONFIG_PPC_64S_HASH_MMU
 /*
  * H_BLOCK_REMOVE supported block size for this page size in segment who's base
  * page size is that page size.
  * page size.
  */
 static int hblkrm_size[MMU_PAGE_COUNT][MMU_PAGE_COUNT] __ro_after_init;
+#endif
 
 /*
  * Due to the involved complexity, and that the current hypervisor is only
                return;
        }
 
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
        /*
         * PAPR says this feature is SLB-Buffer but firmware never
         * reports that.  All SPLPAR support SLB shadow buffer.
                               "cpu %d (hw %d) of area %lx failed with %ld\n",
                               cpu, hwcpu, addr, ret);
        }
-#endif /* CONFIG_PPC_BOOK3S_64 */
+#endif /* CONFIG_PPC_64S_HASH_MMU */
 
        /*
         * Register dispatch trace log, if one has been allocated.
        return rc;
 }
 
+#ifdef CONFIG_PPC_64S_HASH_MMU
+
 static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
                                     unsigned long vpn, unsigned long pa,
                                     unsigned long rflags, unsigned long vflags,
        if (cpu_has_feature(CPU_FTR_ARCH_300))
                pseries_lpar_register_process_table(0, 0, 0);
 }
+#endif /* CONFIG_PPC_64S_HASH_MMU */
 
 #ifdef CONFIG_PPC_RADIX_MMU
 void radix_init_pseries(void)
        return rc;
 }
 
+#ifdef CONFIG_PPC_64S_HASH_MMU
 static unsigned long vsid_unscramble(unsigned long vsid, int ssize)
 {
        unsigned long protovsid;
        return 0;
 }
 machine_device_initcall(pseries, reserve_vrma_context_id);
+#endif
 
 #ifdef CONFIG_DEBUG_FS
 /* debugfs file interface for vpa data */
 
        seq_printf(m, "shared_processor_mode=%d\n",
                   lppaca_shared_proc(get_lppaca()));
 
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
        if (!radix_enabled())
                seq_printf(m, "slb_size=%d\n", mmu_slb_size);
 #endif
 
 
 static u16 clamp_slb_size(void)
 {
+#ifdef CONFIG_PPC_64S_HASH_MMU
        u16 prev = mmu_slb_size;
 
        slb_set_size(SLB_MIN_SIZE);
 
        return prev;
+#else
+       return 0;
+#endif
 }
 
 static int do_suspend(void)
 
 
 extern u32 pseries_security_flavor;
 void pseries_setup_security_mitigations(void);
+
+#ifdef CONFIG_PPC_64S_HASH_MMU
 void pseries_lpar_read_hblkrm_characteristics(void);
+#else
+static inline void pseries_lpar_read_hblkrm_characteristics(void) { }
+#endif
 
 #endif /* _PSERIES_PSERIES_H */
 
                        disposition = RTAS_DISP_FULLY_RECOVERED;
                        break;
                case    MC_ERROR_TYPE_SLB:
+#ifdef CONFIG_PPC_64S_HASH_MMU
                        /*
                         * Store the old slb content in paca before flushing.
                         * Print this when we go to virtual mode.
                                slb_save_contents(local_paca->mce_faulty_slbs);
                        flush_and_reload_slb();
                        disposition = RTAS_DISP_FULLY_RECOVERED;
+#endif
                        break;
                default:
                        break;
 
        u8 *mce_data_buf;
        unsigned int i;
        int nr_cpus = num_possible_cpus();
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
        struct slb_entry *slb_ptr;
        size_t size;
 #endif
                                                (RTAS_ERROR_LOG_MAX * i);
        }
 
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
        if (!radix_enabled()) {
                /* Allocate per cpu area to save old slb contents during MCE */
                size = sizeof(struct slb_entry) * mmu_slb_size * nr_cpus;
        fwnmi_init();
 
        pseries_setup_security_mitigations();
+#ifdef CONFIG_PPC_64S_HASH_MMU
        pseries_lpar_read_hblkrm_characteristics();
+#endif
 
        /* By default, only probe PCI (can be overridden by rtas_pci) */
        pci_add_flags(PCI_PROBE_ONLY);
 
                case 'P':
                        show_tasks();
                        break;
-#ifdef CONFIG_PPC_BOOK3S
+#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_PPC_64S_HASH_MMU)
                case 'u':
                        dump_segments();
                        break;
 static void dump_one_paca(int cpu)
 {
        struct paca_struct *p;
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
        int i = 0;
 #endif
 
        DUMP(p, cpu_start, "%#-*x");
        DUMP(p, kexec_state, "%#-*x");
 #ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
        if (!early_radix_enabled()) {
                for (i = 0; i < SLB_NUM_BOLTED; i++) {
                        u64 esid, vsid;
                                       22, "slb_cache", i, p->slb_cache[i]);
                }
        }
+#endif
 
        DUMP(p, rfi_flush_fallback_area, "%-*px");
 #endif
        printf("%s", after);
 }
 
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
 void dump_segments(void)
 {
        int i;
 
        CRASHTYPE(FORTIFIED_SUBOBJECT),
        CRASHTYPE(FORTIFIED_STRSCPY),
        CRASHTYPE(DOUBLE_FAULT),
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
        CRASHTYPE(PPC_SLB_MULTIHIT),
 #endif
 };