#include <linux/jump_label.h>
 #include <asm/firmware.h>
 
-DECLARE_STATIC_KEY_FALSE(pkey_disabled);
 extern int num_pkey;
 extern u32 reserved_allocation_mask; /* bits set for reserved keys */
 
 
 static inline int vma_pkey(struct vm_area_struct *vma)
 {
-       if (static_branch_likely(&pkey_disabled))
+       if (!mmu_has_feature(MMU_FTR_PKEY))
                return 0;
        return (vma->vm_flags & ARCH_VM_PKEY_FLAGS) >> VM_PKEY_SHIFT;
 }
        u32 all_pkeys_mask = (u32)(~(0x0));
        int ret;
 
-       if (static_branch_likely(&pkey_disabled))
+       if (!mmu_has_feature(MMU_FTR_PKEY))
                return -1;
-
        /*
         * Are we out of pkeys? We must handle this specially because ffz()
         * behavior is undefined if there are no zeros.
 
 static inline int mm_pkey_free(struct mm_struct *mm, int pkey)
 {
-       if (static_branch_likely(&pkey_disabled))
+       if (!mmu_has_feature(MMU_FTR_PKEY))
                return -1;
 
        if (!mm_pkey_is_allocated(mm, pkey))
 static inline int arch_override_mprotect_pkey(struct vm_area_struct *vma,
                                              int prot, int pkey)
 {
-       if (static_branch_likely(&pkey_disabled))
+       if (!mmu_has_feature(MMU_FTR_PKEY))
                return 0;
 
        /*
 static inline int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
                                            unsigned long init_val)
 {
-       if (static_branch_likely(&pkey_disabled))
+       if (!mmu_has_feature(MMU_FTR_PKEY))
                return -EINVAL;
 
        /*
 
 static inline bool arch_pkeys_enabled(void)
 {
-       return !static_branch_likely(&pkey_disabled);
+       return mmu_has_feature(MMU_FTR_PKEY);
 }
 
 extern void pkey_mm_init(struct mm_struct *mm);
 
 #include <linux/pkeys.h>
 #include <linux/of_fdt.h>
 
-
-DEFINE_STATIC_KEY_FALSE(pkey_disabled);
 int  num_pkey;         /* Max number of pkeys supported */
 /*
  *  Keys marked in the reservation list cannot be allocated by  userspace
        pkeys_total = scan_pkey_feature();
        if (!pkeys_total) {
                /* No support for pkey. Mark it disabled */
-               static_branch_enable(&pkey_disabled);
                return;
        }
 
 
 void pkey_mm_init(struct mm_struct *mm)
 {
-       if (static_branch_likely(&pkey_disabled))
+       if (!mmu_has_feature(MMU_FTR_PKEY))
                return;
        mm_pkey_allocation_map(mm) = initial_allocation_mask;
        mm->context.execute_only_pkey = execute_only_key;
 
 void thread_pkey_regs_save(struct thread_struct *thread)
 {
-       if (static_branch_likely(&pkey_disabled))
+       if (!mmu_has_feature(MMU_FTR_PKEY))
                return;
 
        /*
 void thread_pkey_regs_restore(struct thread_struct *new_thread,
                              struct thread_struct *old_thread)
 {
-       if (static_branch_likely(&pkey_disabled))
+       if (!mmu_has_feature(MMU_FTR_PKEY))
                return;
 
        if (old_thread->amr != new_thread->amr)
 
 void thread_pkey_regs_init(struct thread_struct *thread)
 {
-       if (static_branch_likely(&pkey_disabled))
+       if (!mmu_has_feature(MMU_FTR_PKEY))
                return;
 
        thread->amr   = default_amr;
 
 bool arch_pte_access_permitted(u64 pte, bool write, bool execute)
 {
-       if (static_branch_likely(&pkey_disabled))
+       if (!mmu_has_feature(MMU_FTR_PKEY))
                return true;
 
        return pkey_access_permitted(pte_to_pkey_bits(pte), write, execute);
 bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write,
                               bool execute, bool foreign)
 {
-       if (static_branch_likely(&pkey_disabled))
+       if (!mmu_has_feature(MMU_FTR_PKEY))
                return true;
        /*
         * Do not enforce our key-permissions on a foreign vma.
 
 void arch_dup_pkeys(struct mm_struct *oldmm, struct mm_struct *mm)
 {
-       if (static_branch_likely(&pkey_disabled))
+       if (!mmu_has_feature(MMU_FTR_PKEY))
                return;
 
        /* Duplicate the oldmm pkey state in mm: */