#include <linux/ftrace.h>
 #include <linux/slab.h>
 #include <linux/init_task.h>
+#include <linux/jump_label.h>
 
 #include <asm/tlb.h>
 #include <asm/irq_regs.h>
        hrtimer_cancel(&cfs_b->period_timer);
        hrtimer_cancel(&cfs_b->slack_timer);
 }
-#else
+
+#ifdef HAVE_JUMP_LABEL
+static struct jump_label_key __cfs_bandwidth_used;
+
+static inline bool cfs_bandwidth_used(void)
+{
+       return static_branch(&__cfs_bandwidth_used);
+}
+
+static void account_cfs_bandwidth_used(int enabled, int was_enabled)
+{
+       /* only need to count groups transitioning between enabled/!enabled */
+       if (enabled && !was_enabled)
+               jump_label_inc(&__cfs_bandwidth_used);
+       else if (!enabled && was_enabled)
+               jump_label_dec(&__cfs_bandwidth_used);
+}
+#else /* !HAVE_JUMP_LABEL */
+/* static_branch doesn't help unless supported */
+static int cfs_bandwidth_used(void)
+{
+       return 1;
+}
+static void account_cfs_bandwidth_used(int enabled, int was_enabled) {}
+#endif /* HAVE_JUMP_LABEL */
+#else /* !CONFIG_CFS_BANDWIDTH */
 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
 static void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
 static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
 
 static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
 {
-       int i, ret = 0, runtime_enabled;
+       int i, ret = 0, runtime_enabled, runtime_was_enabled;
        struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
 
        if (tg == &root_task_group)
                goto out_unlock;
 
        runtime_enabled = quota != RUNTIME_INF;
+       runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
+       account_cfs_bandwidth_used(runtime_enabled, runtime_was_enabled);
+
        raw_spin_lock_irq(&cfs_b->lock);
        cfs_b->period = ns_to_ktime(period);
        cfs_b->quota = quota;
 
 static __always_inline void account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
                                                   unsigned long delta_exec)
 {
-       if (!cfs_rq->runtime_enabled)
+       if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
                return;
 
        __account_cfs_rq_runtime(cfs_rq, delta_exec);
 
 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
 {
-       return cfs_rq->throttled;
+       return cfs_bandwidth_used() && cfs_rq->throttled;
 }
 
 /* check whether cfs_rq, or any parent, is throttled */
 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
 {
-       return cfs_rq->throttle_count;
+       return cfs_bandwidth_used() && cfs_rq->throttle_count;
 }
 
 /*
 
 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
 {
+       if (!cfs_bandwidth_used())
+               return;
+
        if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
                return;
 
  */
 static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
 {
+       if (!cfs_bandwidth_used())
+               return;
+
        /* an active group must be handled by the update_curr()->put() path */
        if (!cfs_rq->runtime_enabled || cfs_rq->curr)
                return;
 /* conditionally throttle active cfs_rq's from put_prev_entity() */
 static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
 {
+       if (!cfs_bandwidth_used())
+               return;
+
        if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
                return;