cfs_rq->tg_load_contrib += tg_contrib;
        }
 }
+
+static inline void __update_group_entity_contrib(struct sched_entity *se)
+{
+       struct cfs_rq *cfs_rq = group_cfs_rq(se);
+       struct task_group *tg = cfs_rq->tg;
+       u64 contrib;
+
+       contrib = cfs_rq->tg_load_contrib * tg->shares;
+       se->avg.load_avg_contrib = div64_u64(contrib,
+                                            atomic64_read(&tg->load_avg) + 1);
+}
 #else
 static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
                                                 int force_update) {}
+static inline void __update_group_entity_contrib(struct sched_entity *se) {}
 #endif
 
+static inline void __update_task_entity_contrib(struct sched_entity *se)
+{
+       u32 contrib;
+
+       /* avoid overflowing a 32-bit type w/ SCHED_LOAD_SCALE */
+       contrib = se->avg.runnable_avg_sum * scale_load_down(se->load.weight);
+       contrib /= (se->avg.runnable_avg_period + 1);
+       se->avg.load_avg_contrib = scale_load(contrib);
+}
+
 /* Compute the current contribution to load_avg by se, return any delta */
 static long __update_entity_load_avg_contrib(struct sched_entity *se)
 {
        long old_contrib = se->avg.load_avg_contrib;
 
-       if (!entity_is_task(se))
-               return 0;
-
-       se->avg.load_avg_contrib = div64_u64(se->avg.runnable_avg_sum *
-                                            se->load.weight,
-                                            se->avg.runnable_avg_period + 1);
+       if (entity_is_task(se)) {
+               __update_task_entity_contrib(se);
+       } else {
+               __update_group_entity_contrib(se);
+       }
 
        return se->avg.load_avg_contrib - old_contrib;
 }