From: Frederic Weisbecker <fweisbec@gmail.com>
Date: Thu, 2 May 2013 15:37:49 +0000 (+0200)
Subject: Merge commit '8700c95adb03' into timers/nohz
X-Git-Url: http://git.maquefel.me/?a=commitdiff_plain;h=c032862fba51a3ca504752d3a25186b324c5ce83;p=linux.git

Merge commit '8700c95adb03' into timers/nohz

The full dynticks tree needs the latest RCU and sched
upstream updates in order to fix some dependencies.

Merge a common upstream merge point that has these
updates.

Conflicts:
	include/linux/perf_event.h
	kernel/rcutree.h
	kernel/rcutree_plugin.h

Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
---

c032862fba51a3ca504752d3a25186b324c5ce83
diff --cc include/linux/perf_event.h
index 0140830225e21,e0373d26c2445..f463a46424e24
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@@ -799,12 -788,12 +788,18 @@@ static inline int __perf_event_disable(
  static inline void perf_event_task_tick(void)				{ }
  #endif
  
 +#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_NO_HZ_FULL)
 +extern bool perf_event_can_stop_tick(void);
 +#else
 +static inline bool perf_event_can_stop_tick(void)			{ return true; }
 +#endif
 +
+ #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
+ extern void perf_restore_debug_store(void);
+ #else
+ static inline void perf_restore_debug_store(void)			{ }
+ #endif
+ 
  #define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
  
  /*
diff --cc init/Kconfig
index 8f97a74077147,4367e1379002d..66f67afad4fad
--- a/init/Kconfig
+++ b/init/Kconfig
@@@ -580,16 -576,19 +576,19 @@@ config RCU_FANOUT_EXAC
  
  config RCU_FAST_NO_HZ
  	bool "Accelerate last non-dyntick-idle CPU's grace periods"
 -	depends on NO_HZ && SMP
 +	depends on NO_HZ_COMMON && SMP
  	default n
  	help
- 	  This option causes RCU to attempt to accelerate grace periods in
- 	  order to allow CPUs to enter dynticks-idle state more quickly.
- 	  On the other hand, this option increases the overhead of the
- 	  dynticks-idle checking, thus degrading scheduling latency.
+ 	  This option permits CPUs to enter dynticks-idle state even if
+ 	  they have RCU callbacks queued, and prevents RCU from waking
+ 	  these CPUs up more than roughly once every four jiffies (by
+ 	  default, you can adjust this using the rcutree.rcu_idle_gp_delay
+ 	  parameter), thus improving energy efficiency.  On the other
+ 	  hand, this option increases the duration of RCU grace periods,
+ 	  for example, slowing down synchronize_rcu().
  
- 	  Say Y if energy efficiency is critically important, and you don't
- 	  	care about real-time response.
+ 	  Say Y if energy efficiency is critically important, and you
+ 	  	don't care about increased grace-period durations.
  
  	  Say N if you are unsure.
  
diff --cc kernel/rcutree.h
index 38acc49da2c6c,14ee40795d6fe..da77a8f57ff95
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@@ -529,16 -526,18 +526,18 @@@ static void print_cpu_stall_info(struc
  static void print_cpu_stall_info_end(void);
  static void zero_cpu_stall_ticks(struct rcu_data *rdp);
  static void increment_cpu_stall_ticks(void);
+ static int rcu_nocb_needs_gp(struct rcu_state *rsp);
+ static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq);
+ static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp);
+ static void rcu_init_one_nocb(struct rcu_node *rnp);
 -static bool is_nocb_cpu(int cpu);
  static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
  			    bool lazy);
  static bool rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
  				      struct rcu_data *rdp);
- static bool nocb_cpu_expendable(int cpu);
  static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);
  static void rcu_spawn_nocb_kthreads(struct rcu_state *rsp);
- static void init_nocb_callback_list(struct rcu_data *rdp);
- static void __init rcu_init_nocb(void);
 +static void rcu_kick_nohz_cpu(int cpu);
+ static bool init_nocb_callback_list(struct rcu_data *rdp);
  
  #endif /* #ifndef RCU_TREE_NONCORE */
  
diff --cc kernel/rcutree_plugin.h
index 0cd91cc18db41,d084ae3f281c2..71bd7337d0ccf
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@@ -2166,8 -2010,49 +2011,49 @@@ static int __init parse_rcu_nocb_poll(c
  }
  early_param("rcu_nocb_poll", parse_rcu_nocb_poll);
  
+ /*
+  * Do any no-CBs CPUs need another grace period?
+  *
+  * Interrupts must be disabled.  If the caller does not hold the root
+  * rnp_node structure's ->lock, the results are advisory only.
+  */
+ static int rcu_nocb_needs_gp(struct rcu_state *rsp)
+ {
+ 	struct rcu_node *rnp = rcu_get_root(rsp);
+ 
+ 	return rnp->need_future_gp[(ACCESS_ONCE(rnp->completed) + 1) & 0x1];
+ }
+ 
+ /*
+  * Wake up any no-CBs CPUs' kthreads that were waiting on the just-ended
+  * grace period.
+  */
+ static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
+ {
+ 	wake_up_all(&rnp->nocb_gp_wq[rnp->completed & 0x1]);
+ }
+ 
+ /*
+  * Set the root rcu_node structure's ->need_future_gp field
+  * based on the sum of those of all rcu_node structures.  This does
+  * double-count the root rcu_node structure's requests, but this
+  * is necessary to handle the possibility of a rcu_nocb_kthread()
+  * having awakened during the time that the rcu_node structures
+  * were being updated for the end of the previous grace period.
+  */
+ static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq)
+ {
+ 	rnp->need_future_gp[(rnp->completed + 1) & 0x1] += nrq;
+ }
+ 
+ static void rcu_init_one_nocb(struct rcu_node *rnp)
+ {
+ 	init_waitqueue_head(&rnp->nocb_gp_wq[0]);
+ 	init_waitqueue_head(&rnp->nocb_gp_wq[1]);
+ }
+ 
  /* Is the specified CPU a no-CPUs CPU? */
 -static bool is_nocb_cpu(int cpu)
 +bool rcu_is_nocb_cpu(int cpu)
  {
  	if (have_rcu_nocb_mask)
  		return cpumask_test_cpu(cpu, rcu_nocb_mask);
@@@ -2225,9 -2110,16 +2111,16 @@@ static bool __call_rcu_nocb(struct rcu_
  			    bool lazy)
  {
  
 -	if (!is_nocb_cpu(rdp->cpu))
 +	if (!rcu_is_nocb_cpu(rdp->cpu))
  		return 0;
  	__call_rcu_nocb_enqueue(rdp, rhp, &rhp->next, 1, lazy);
+ 	if (__is_kfree_rcu_offset((unsigned long)rhp->func))
+ 		trace_rcu_kfree_callback(rdp->rsp->name, rhp,
+ 					 (unsigned long)rhp->func,
+ 					 rdp->qlen_lazy, rdp->qlen);
+ 	else
+ 		trace_rcu_callback(rdp->rsp->name, rhp,
+ 				   rdp->qlen_lazy, rdp->qlen);
  	return 1;
  }
  
@@@ -2448,22 -2282,35 +2283,30 @@@ static bool init_nocb_callback_list(str
  {
  	if (rcu_nocb_mask == NULL ||
  	    !cpumask_test_cpu(rdp->cpu, rcu_nocb_mask))
- 		return;
+ 		return false;
  	rdp->nxttail[RCU_NEXT_TAIL] = NULL;
+ 	return true;
+ }
+ 
+ #else /* #ifdef CONFIG_RCU_NOCB_CPU */
+ 
+ static int rcu_nocb_needs_gp(struct rcu_state *rsp)
+ {
+ 	return 0;
  }
  
- /* Initialize the ->call_remote fields in the rcu_state structures. */
- static void __init rcu_init_nocb(void)
+ static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
  {
- #ifdef CONFIG_PREEMPT_RCU
- 	rcu_preempt_state.call_remote = call_rcu_preempt_remote;
- #endif /* #ifdef CONFIG_PREEMPT_RCU */
- 	rcu_bh_state.call_remote = call_rcu_bh_remote;
- 	rcu_sched_state.call_remote = call_rcu_sched_remote;
  }
  
- #else /* #ifdef CONFIG_RCU_NOCB_CPU */
+ static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq)
+ {
+ }
+ 
+ static void rcu_init_one_nocb(struct rcu_node *rnp)
+ {
+ }
  
 -static bool is_nocb_cpu(int cpu)
 -{
 -	return false;
 -}
 -
  static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
  			    bool lazy)
  {
diff --cc kernel/sched/sched.h
index eb363aa5d83cf,4c225c4c7111d..24dc298977493
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@@ -5,9 -5,9 +5,10 @@@
  #include <linux/mutex.h>
  #include <linux/spinlock.h>
  #include <linux/stop_machine.h>
 +#include <linux/tick.h>
  
  #include "cpupri.h"
+ #include "cpuacct.h"
  
  extern __read_mostly int scheduler_running;
  
diff --cc kernel/softirq.c
index 8b1446d4a4dbb,14d7758074aad..51a09d56e78b8
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@@ -323,33 -323,12 +323,25 @@@ void irq_enter(void
  
  static inline void invoke_softirq(void)
  {
- 	if (!force_irqthreads) {
- #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
+ 	if (!force_irqthreads)
  		__do_softirq();
- #else
- 		do_softirq();
- #endif
- 	} else {
- 		__local_bh_disable((unsigned long)__builtin_return_address(0),
- 				SOFTIRQ_OFFSET);
+ 	else
  		wakeup_softirqd();
- 		__local_bh_enable(SOFTIRQ_OFFSET);
- 	}
  }
  
 +static inline void tick_irq_exit(void)
 +{
 +#ifdef CONFIG_NO_HZ_COMMON
 +	int cpu = smp_processor_id();
 +
 +	/* Make sure that timer wheel updates are propagated */
 +	if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
 +		if (!in_interrupt())
 +			tick_nohz_irq_exit();
 +	}
 +#endif
 +}
 +
  /*
   * Exit an interrupt context. Process softirqs if needed and possible:
   */
@@@ -361,9 -346,12 +359,8 @@@ void irq_exit(void
  	if (!in_interrupt() && local_softirq_pending())
  		invoke_softirq();
  
 -#ifdef CONFIG_NO_HZ
 -	/* Make sure that timer wheel updates are propagated */
 -	if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched())
 -		tick_nohz_irq_exit();
 -#endif
 +	tick_irq_exit();
  	rcu_irq_exit();
- 	sched_preempt_enable_no_resched();
  }
  
  /*