dst_bstat->cputime.utime += src_bstat->cputime.utime;
        dst_bstat->cputime.stime += src_bstat->cputime.stime;
        dst_bstat->cputime.sum_exec_runtime += src_bstat->cputime.sum_exec_runtime;
+#ifdef CONFIG_SCHED_CORE
+       dst_bstat->forceidle_sum += src_bstat->forceidle_sum;
+#endif
 }
 
 static void cgroup_base_stat_sub(struct cgroup_base_stat *dst_bstat,
        dst_bstat->cputime.utime -= src_bstat->cputime.utime;
        dst_bstat->cputime.stime -= src_bstat->cputime.stime;
        dst_bstat->cputime.sum_exec_runtime -= src_bstat->cputime.sum_exec_runtime;
+#ifdef CONFIG_SCHED_CORE
+       dst_bstat->forceidle_sum -= src_bstat->forceidle_sum;
+#endif
 }
 
 static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu)
        case CPUTIME_SOFTIRQ:
                rstatc->bstat.cputime.stime += delta_exec;
                break;
+#ifdef CONFIG_SCHED_CORE
+       case CPUTIME_FORCEIDLE:
+               rstatc->bstat.forceidle_sum += delta_exec;
+               break;
+#endif
        default:
                break;
        }
  * with how it is done by __cgroup_account_cputime_field for each bit of
  * cpu time attributed to a cgroup.
  */
-static void root_cgroup_cputime(struct task_cputime *cputime)
+static void root_cgroup_cputime(struct cgroup_base_stat *bstat)
 {
+       struct task_cputime *cputime = &bstat->cputime;
        int i;
 
        cputime->stime = 0;
                cputime->sum_exec_runtime += user;
                cputime->sum_exec_runtime += sys;
                cputime->sum_exec_runtime += cpustat[CPUTIME_STEAL];
+
+#ifdef CONFIG_SCHED_CORE
+               bstat->forceidle_sum += cpustat[CPUTIME_FORCEIDLE];
+#endif
        }
 }
 
 {
        struct cgroup *cgrp = seq_css(seq)->cgroup;
        u64 usage, utime, stime;
-       struct task_cputime cputime;
+       struct cgroup_base_stat bstat;
+#ifdef CONFIG_SCHED_CORE
+       u64 forceidle_time;
+#endif
 
        if (cgroup_parent(cgrp)) {
                cgroup_rstat_flush_hold(cgrp);
                usage = cgrp->bstat.cputime.sum_exec_runtime;
                cputime_adjust(&cgrp->bstat.cputime, &cgrp->prev_cputime,
                               &utime, &stime);
+#ifdef CONFIG_SCHED_CORE
+               forceidle_time = cgrp->bstat.forceidle_sum;
+#endif
                cgroup_rstat_flush_release();
        } else {
-               root_cgroup_cputime(&cputime);
-               usage = cputime.sum_exec_runtime;
-               utime = cputime.utime;
-               stime = cputime.stime;
+               root_cgroup_cputime(&bstat);
+               usage = bstat.cputime.sum_exec_runtime;
+               utime = bstat.cputime.utime;
+               stime = bstat.cputime.stime;
+#ifdef CONFIG_SCHED_CORE
+               forceidle_time = bstat.forceidle_sum;
+#endif
        }
 
        do_div(usage, NSEC_PER_USEC);
        do_div(utime, NSEC_PER_USEC);
        do_div(stime, NSEC_PER_USEC);
+#ifdef CONFIG_SCHED_CORE
+       do_div(forceidle_time, NSEC_PER_USEC);
+#endif
 
        seq_printf(seq, "usage_usec %llu\n"
                   "user_usec %llu\n"
                   "system_usec %llu\n",
                   usage, utime, stime);
+
+#ifdef CONFIG_SCHED_CORE
+       seq_printf(seq, "core_sched.force_idle_usec %llu\n", forceidle_time);
+#endif
 }
 
                cpustat[CPUTIME_IDLE] += cputime;
 }
 
+
+#ifdef CONFIG_SCHED_CORE
+/*
+ * Account for forceidle time due to core scheduling.
+ *
+ * REQUIRES: schedstat is enabled.
+ */
+void __account_forceidle_time(struct task_struct *p, u64 delta)
+{
+       __schedstat_add(p->stats.core_forceidle_sum, delta);
+
+       task_group_account_field(p, CPUTIME_FORCEIDLE, delta);
+}
+#endif
+
 /*
  * When a guest is interrupted for a longer amount of time, missed clock
  * ticks are not redelivered later. Due to that, this function may on