unsigned long cap, max_cap = 0;
                int cpu, max_cpu = -1;
 
-               if (!static_branch_unlikely(&sched_asym_cpucapacity))
+               if (!sched_asym_cpucap_active())
                        return 1;
 
                /* Ensure the capacity of the CPUs fits the task. */
 
  */
 static inline unsigned long dl_bw_capacity(int i)
 {
-       if (!static_branch_unlikely(&sched_asym_cpucapacity) &&
+       if (!sched_asym_cpucap_active() &&
            capacity_orig_of(i) == SCHED_CAPACITY_SCALE) {
                return dl_bw_cpus(i) << SCHED_CAPACITY_SHIFT;
        } else {
         * Take the capacity of the CPU into account to
         * ensure it fits the requirement of the task.
         */
-       if (static_branch_unlikely(&sched_asym_cpucapacity))
+       if (sched_asym_cpucap_active())
                select_rq |= !dl_task_fits_capacity(p, cpu);
 
        if (select_rq) {
 
 
 static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
 {
-       if (!static_branch_unlikely(&sched_asym_cpucapacity))
+       if (!sched_asym_cpucap_active())
                return;
 
        if (!p || p->nr_cpus_allowed == 1) {
 
 static inline bool asym_fits_capacity(unsigned long task_util, int cpu)
 {
-       if (static_branch_unlikely(&sched_asym_cpucapacity))
+       if (sched_asym_cpucap_active())
                return fits_capacity(task_util, capacity_of(cpu));
 
        return true;
         * On asymmetric system, update task utilization because we will check
         * that the task fits with cpu's capacity.
         */
-       if (static_branch_unlikely(&sched_asym_cpucapacity)) {
+       if (sched_asym_cpucap_active()) {
                sync_entity_load_avg(&p->se);
                task_util = uclamp_task_util(p);
        }
         * For asymmetric CPU capacity systems, our domain of interest is
         * sd_asym_cpucapacity rather than sd_llc.
         */
-       if (static_branch_unlikely(&sched_asym_cpucapacity)) {
+       if (sched_asym_cpucap_active()) {
                sd = rcu_dereference(per_cpu(sd_asym_cpucapacity, target));
                /*
                 * On an asymmetric CPU capacity system where an exclusive
 
        unsigned int cpu_cap;
 
        /* Only heterogeneous systems can benefit from this check */
-       if (!static_branch_unlikely(&sched_asym_cpucapacity))
+       if (!sched_asym_cpucap_active())
                return true;
 
        min_cap = uclamp_eff_value(p, UCLAMP_MIN);
         * If we're on asym system ensure we consider the different capacities
         * of the CPUs when searching for the lowest_mask.
         */
-       if (static_branch_unlikely(&sched_asym_cpucapacity)) {
+       if (sched_asym_cpucap_active()) {
 
                ret = cpupri_find_fitness(&task_rq(task)->rd->cpupri,
                                          task, lowest_mask,
 
 DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity);
 extern struct static_key_false sched_asym_cpucapacity;
 
+static __always_inline bool sched_asym_cpucap_active(void)
+{
+       return static_branch_unlikely(&sched_asym_cpucapacity);
+}
+
 struct sched_group_capacity {
        atomic_t                ref;
        /*