static inline void tracepoint_synchronize_unregister(void)
{
synchronize_srcu(&tracepoint_srcu);
- synchronize_sched();
+ synchronize_rcu();
}
#else
static inline void tracepoint_synchronize_unregister(void)
{
/*
* This function is just a stub to implement a hard force
- * of synchronize_sched(). This requires synchronizing
+ * of synchronize_rcu(). This requires synchronizing
* tasks even in userspace and idle.
*
* Yes, function tracing is rude.
ftrace_profile_enabled = 0;
/*
* unregister_ftrace_profiler calls stop_machine
- * so this acts like an synchronize_sched.
+ * so this acts like an synchronize_rcu.
*/
unregister_ftrace_profiler();
}
/*
* Some of the ops may be dynamically allocated,
- * they are freed after a synchronize_sched().
+ * they are freed after a synchronize_rcu().
*/
preempt_disable_notrace();
{
if (!hash || hash == EMPTY_HASH)
return;
- call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
+ call_rcu(&hash->rcu, __free_ftrace_hash_rcu);
}
void ftrace_free_filter(struct ftrace_ops *ops)
* the ip is not in the ops->notrace_hash.
*
* This needs to be called with preemption disabled as
- * the hashes are freed with call_rcu_sched().
+ * the hashes are freed with call_rcu().
*/
static int
ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
if (ftrace_enabled && !ftrace_hash_empty(hash))
ftrace_run_modify_code(&probe->ops, FTRACE_UPDATE_CALLS,
&old_hash_ops);
- synchronize_sched();
+ synchronize_rcu();
hlist_for_each_entry_safe(entry, tmp, &hhd, hlist) {
hlist_del(&entry->hlist);
mutex_unlock(&graph_lock);
/* Wait till all users are no longer using the old hash */
- synchronize_sched();
+ synchronize_rcu();
free_ftrace_hash(old_hash);
}
list_for_each_entry_safe(mod_map, n, &ftrace_mod_maps, list) {
if (mod_map->mod == mod) {
list_del_rcu(&mod_map->list);
- call_rcu_sched(&mod_map->rcu, ftrace_free_mod_map);
+ call_rcu(&mod_map->rcu, ftrace_free_mod_map);
break;
}
}
struct ftrace_mod_map *mod_map;
const char *ret = NULL;
- /* mod_map is freed via call_rcu_sched() */
+ /* mod_map is freed via call_rcu() */
preempt_disable();
list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) {
ret = ftrace_func_address_lookup(mod_map, addr, size, off, sym);
/*
* Some of the ops may be dynamically allocated,
- * they must be freed after a synchronize_sched().
+ * they must be freed after a synchronize_rcu().
*/
preempt_disable_notrace();
rcu_assign_pointer(tr->function_pids, NULL);
/* Wait till all users are no longer using pid filtering */
- synchronize_sched();
+ synchronize_rcu();
trace_free_pid_list(pid_list);
}
rcu_assign_pointer(tr->function_pids, pid_list);
if (filtered_pids) {
- synchronize_sched();
+ synchronize_rcu();
trace_free_pid_list(filtered_pids);
} else if (pid_list) {
/* Register a probe to set whether to ignore the tracing of a task */
* There could have been a race between checking
* record_disable and incrementing it.
*/
- synchronize_sched();
+ synchronize_rcu();
for_each_buffer_cpu(buffer, cpu) {
cpu_buffer = buffer->buffers[cpu];
rb_check_pages(cpu_buffer);
* This prevents all writes to the buffer. Any attempt to write
* to the buffer after this will fail and return NULL.
*
- * The caller should call synchronize_sched() after this.
+ * The caller should call synchronize_rcu() after this.
*/
void ring_buffer_record_disable(struct ring_buffer *buffer)
{
* This prevents all writes to the buffer. Any attempt to write
* to the buffer after this will fail and return NULL.
*
- * The caller should call synchronize_sched() after this.
+ * The caller should call synchronize_rcu() after this.
*/
void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
{
void
ring_buffer_read_prepare_sync(void)
{
- synchronize_sched();
+ synchronize_rcu();
}
EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync);
atomic_inc(&cpu_buffer->record_disabled);
/* Make sure all commits have finished */
- synchronize_sched();
+ synchronize_rcu();
raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
goto out;
/*
- * We can't do a synchronize_sched here because this
+ * We can't do a synchronize_rcu here because this
* function can be called in atomic context.
* Normally this will be called from the same CPU as cpu.
* If not it's up to the caller to protect this.
ring_buffer_record_disable(buffer);
/* Make sure all commits have finished */
- synchronize_sched();
+ synchronize_rcu();
ring_buffer_reset_cpu(buffer, cpu);
ring_buffer_record_enable(buffer);
ring_buffer_record_disable(buffer);
/* Make sure all commits have finished */
- synchronize_sched();
+ synchronize_rcu();
buf->time_start = buffer_ftrace_now(buf, buf->cpu);
preempt_enable();
/* Wait for all current users to finish */
- synchronize_sched();
+ synchronize_rcu();
for_each_tracing_cpu(cpu) {
free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
if (tr->current_trace->reset)
tr->current_trace->reset(tr);
- /* Current trace needs to be nop_trace before synchronize_sched */
+ /* Current trace needs to be nop_trace before synchronize_rcu */
tr->current_trace = &nop_trace;
#ifdef CONFIG_TRACER_MAX_TRACE
* The update_max_tr is called from interrupts disabled
* so a synchronized_sched() is sufficient.
*/
- synchronize_sched();
+ synchronize_rcu();
free_snapshot(tr);
}
#endif
/*
* The calls can still be using the old filters.
- * Do a synchronize_sched() and to ensure all calls are
+ * Do a synchronize_rcu() and to ensure all calls are
* done with them before we free them.
*/
tracepoint_synchronize_unregister();
if (filter) {
/*
* No event actually uses the system filter
- * we can free it without synchronize_sched().
+ * we can free it without synchronize_rcu().
*/
__free_filter(system->filter);
system->filter = filter;
* event_call related objects, which will be accessed in
* the kprobe_trace_func/kretprobe_trace_func.
*/
- synchronize_sched();
+ synchronize_rcu();
kfree(link); /* Ignored if link == NULL */
}
while (early_probes) {
tmp = early_probes;
early_probes = tmp->next;
- call_rcu_sched(tmp, rcu_free_old_probes);
+ call_rcu(tmp, rcu_free_old_probes);
}
return 0;
* cover both cases. So let us chain the SRCU and sched RCU
* callbacks to wait for both grace periods.
*/
- call_rcu_sched(&tp_probes->rcu, rcu_free_old_probes);
+ call_rcu(&tp_probes->rcu, rcu_free_old_probes);
}
}