}
 
        /* ensure we never gain time by being placed backwards. */
-       cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
-#ifndef CONFIG_64BIT
-       smp_wmb();
-       cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
-#endif
+       u64_u32_store(cfs_rq->min_vruntime,
+                     max_vruntime(cfs_rq->min_vruntime, vruntime));
 }
 
 static inline bool __entity_less(struct rb_node *a, const struct rb_node *b)
 }
 
 #ifdef CONFIG_SMP
+static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
+{
+       return u64_u32_load_copy(cfs_rq->avg.last_update_time,
+                                cfs_rq->last_update_time_copy);
+}
 #ifdef CONFIG_FAIR_GROUP_SCHED
 /*
  * Because list_add_leaf_cfs_rq always places a child cfs_rq on the list
        if (!(se->avg.last_update_time && prev))
                return;
 
-#ifndef CONFIG_64BIT
-       {
-               u64 p_last_update_time_copy;
-               u64 n_last_update_time_copy;
-
-               do {
-                       p_last_update_time_copy = prev->load_last_update_time_copy;
-                       n_last_update_time_copy = next->load_last_update_time_copy;
-
-                       smp_rmb();
-
-                       p_last_update_time = prev->avg.last_update_time;
-                       n_last_update_time = next->avg.last_update_time;
+       p_last_update_time = cfs_rq_last_update_time(prev);
+       n_last_update_time = cfs_rq_last_update_time(next);
 
-               } while (p_last_update_time != p_last_update_time_copy ||
-                        n_last_update_time != n_last_update_time_copy);
-       }
-#else
-       p_last_update_time = prev->avg.last_update_time;
-       n_last_update_time = next->avg.last_update_time;
-#endif
        __update_load_avg_blocked_se(p_last_update_time, se);
        se->avg.last_update_time = n_last_update_time;
 }
        }
 
        decayed |= __update_load_avg_cfs_rq(now, cfs_rq);
-
-#ifndef CONFIG_64BIT
-       smp_wmb();
-       cfs_rq->load_last_update_time_copy = sa->last_update_time;
-#endif
-
+       u64_u32_store_copy(sa->last_update_time,
+                          cfs_rq->last_update_time_copy,
+                          sa->last_update_time);
        return decayed;
 }
 
        }
 }
 
-#ifndef CONFIG_64BIT
-static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
-{
-       u64 last_update_time_copy;
-       u64 last_update_time;
-
-       do {
-               last_update_time_copy = cfs_rq->load_last_update_time_copy;
-               smp_rmb();
-               last_update_time = cfs_rq->avg.last_update_time;
-       } while (last_update_time != last_update_time_copy);
-
-       return last_update_time;
-}
-#else
-static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
-{
-       return cfs_rq->avg.last_update_time;
-}
-#endif
-
 /*
  * Synchronize entity load avg of dequeued entity without locking
  * the previous rq.
        if (READ_ONCE(p->__state) == TASK_WAKING) {
                struct sched_entity *se = &p->se;
                struct cfs_rq *cfs_rq = cfs_rq_of(se);
-               u64 min_vruntime;
-
-#ifndef CONFIG_64BIT
-               u64 min_vruntime_copy;
-
-               do {
-                       min_vruntime_copy = cfs_rq->min_vruntime_copy;
-                       smp_rmb();
-                       min_vruntime = cfs_rq->min_vruntime;
-               } while (min_vruntime != min_vruntime_copy);
-#else
-               min_vruntime = cfs_rq->min_vruntime;
-#endif
 
-               se->vruntime -= min_vruntime;
+               se->vruntime -= u64_u32_load(cfs_rq->min_vruntime);
        }
 
        if (p->on_rq == TASK_ON_RQ_MIGRATING) {
 void init_cfs_rq(struct cfs_rq *cfs_rq)
 {
        cfs_rq->tasks_timeline = RB_ROOT_CACHED;
-       cfs_rq->min_vruntime = (u64)(-(1LL << 20));
-#ifndef CONFIG_64BIT
-       cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
-#endif
+       u64_u32_store(cfs_rq->min_vruntime, (u64)(-(1LL << 20)));
 #ifdef CONFIG_SMP
        raw_spin_lock_init(&cfs_rq->removed.lock);
 #endif
 
 
 #endif /* CONFIG_CGROUP_SCHED */
 
+/*
+ * u64_u32_load/u64_u32_store
+ *
+ * Use a copy of a u64 value to protect against data race. This is only
+ * applicable for 32-bits architectures.
+ */
+#ifdef CONFIG_64BIT
+# define u64_u32_load_copy(var, copy)       var
+# define u64_u32_store_copy(var, copy, val) (var = val)
+#else
+# define u64_u32_load_copy(var, copy)                                  \
+({                                                                     \
+       u64 __val, __val_copy;                                          \
+       do {                                                            \
+               __val_copy = copy;                                      \
+               /*                                                      \
+                * paired with u64_u32_store_copy(), ordering access    \
+                * to var and copy.                                     \
+                */                                                     \
+               smp_rmb();                                              \
+               __val = var;                                            \
+       } while (__val != __val_copy);                                  \
+       __val;                                                          \
+})
+# define u64_u32_store_copy(var, copy, val)                            \
+do {                                                                   \
+       typeof(val) __val = (val);                                      \
+       var = __val;                                                    \
+       /*                                                              \
+        * paired with u64_u32_load_copy(), ordering access to var and  \
+        * copy.                                                        \
+        */                                                             \
+       smp_wmb();                                                      \
+       copy = __val;                                                   \
+} while (0)
+#endif
+# define u64_u32_load(var)      u64_u32_load_copy(var, var##_copy)
+# define u64_u32_store(var, val) u64_u32_store_copy(var, var##_copy, val)
+
 /* CFS-related fields in a runqueue */
 struct cfs_rq {
        struct load_weight      load;
         */
        struct sched_avg        avg;
 #ifndef CONFIG_64BIT
-       u64                     load_last_update_time_copy;
+       u64                     last_update_time_copy;
 #endif
        struct {
                raw_spinlock_t  lock ____cacheline_aligned;