Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b1169cc6 authored by Steven Rostedt (Red Hat)'s avatar Steven Rostedt (Red Hat) Committed by Steven Rostedt
Browse files

tracing: Remove mock up poll wait function



Now that the ring buffer has a built in way to wake up readers
when there's data, using irq_work such that it is safe to do it
in any context. But it was still using the old "poor man's"
wait polling that checks every 1/10 of a second to see if it
should wake up a waiter. This makes the latency for a wake up
excruciatingly long. No need to do that anymore.

Completely remove the different wait_poll types from the tracers
and have them all use the default one now.

Reported-by: default avatarJohannes Berg <johannes@sipsolutions.net>
Signed-off-by: default avatarSteven Rostedt <rostedt@goodmis.org>
parent f4874261
Loading
Loading
Loading
Loading
+4 −25
Original line number Diff line number Diff line
@@ -1085,7 +1085,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
}
#endif /* CONFIG_TRACER_MAX_TRACE */

static void default_wait_pipe(struct trace_iterator *iter)
static void wait_on_pipe(struct trace_iterator *iter)
{
	/* Iterators are static, they should be filled or empty */
	if (trace_buffer_iter(iter, iter->cpu_file))
@@ -1202,8 +1202,6 @@ int register_tracer(struct tracer *type)
	else
		if (!type->flags->opts)
			type->flags->opts = dummy_tracer_opt;
	if (!type->wait_pipe)
		type->wait_pipe = default_wait_pipe;

	ret = run_tracer_selftest(type);
	if (ret < 0)
@@ -4207,25 +4205,6 @@ tracing_poll_pipe(struct file *filp, poll_table *poll_table)
	return trace_poll(iter, filp, poll_table);
}

/*
 * This is a make-shift waitqueue.
 * A tracer might use this callback on some rare cases:
 *
 *  1) the current tracer might hold the runqueue lock when it wakes up
 *     a reader, hence a deadlock (sched, function, and function graph tracers)
 *  2) the function tracers, trace all functions, we don't want
 *     the overhead of calling wake_up and friends
 *     (and tracing them too)
 *
 *     Anyway, this is really very primitive wakeup.
 */
void poll_wait_pipe(struct trace_iterator *iter)
{
	set_current_state(TASK_INTERRUPTIBLE);
	/* sleep for 100 msecs, and try again. */
	schedule_timeout(HZ / 10);
}

/* Must be called with trace_types_lock mutex held. */
static int tracing_wait_pipe(struct file *filp)
{
@@ -4251,7 +4230,7 @@ static int tracing_wait_pipe(struct file *filp)

		mutex_unlock(&iter->mutex);

		iter->trace->wait_pipe(iter);
		wait_on_pipe(iter);

		mutex_lock(&iter->mutex);

@@ -5179,7 +5158,7 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
				goto out_unlock;
			}
			mutex_unlock(&trace_types_lock);
			iter->trace->wait_pipe(iter);
			wait_on_pipe(iter);
			mutex_lock(&trace_types_lock);
			if (signal_pending(current)) {
				size = -EINTR;
@@ -5390,7 +5369,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
			goto out;
		}
		mutex_unlock(&trace_types_lock);
		iter->trace->wait_pipe(iter);
		wait_on_pipe(iter);
		mutex_lock(&trace_types_lock);
		if (signal_pending(current)) {
			ret = -EINTR;
+0 −4
Original line number Diff line number Diff line
@@ -338,7 +338,6 @@ struct tracer_flags {
 * @stop: called when tracing is paused (echo 0 > tracing_enabled)
 * @open: called when the trace file is opened
 * @pipe_open: called when the trace_pipe file is opened
 * @wait_pipe: override how the user waits for traces on trace_pipe
 * @close: called when the trace file is released
 * @pipe_close: called when the trace_pipe file is released
 * @read: override the default read callback on trace_pipe
@@ -357,7 +356,6 @@ struct tracer {
	void			(*stop)(struct trace_array *tr);
	void			(*open)(struct trace_iterator *iter);
	void			(*pipe_open)(struct trace_iterator *iter);
	void			(*wait_pipe)(struct trace_iterator *iter);
	void			(*close)(struct trace_iterator *iter);
	void			(*pipe_close)(struct trace_iterator *iter);
	ssize_t			(*read)(struct trace_iterator *iter,
@@ -566,8 +564,6 @@ void trace_init_global_iter(struct trace_iterator *iter);

void tracing_iter_reset(struct trace_iterator *iter, int cpu);

void poll_wait_pipe(struct trace_iterator *iter);

void tracing_sched_switch_trace(struct trace_array *tr,
				struct task_struct *prev,
				struct task_struct *next,
+0 −1
Original line number Diff line number Diff line
@@ -252,7 +252,6 @@ static struct tracer function_trace __tracer_data =
	.init		= function_trace_init,
	.reset		= function_trace_reset,
	.start		= function_trace_start,
	.wait_pipe	= poll_wait_pipe,
	.flags		= &func_flags,
	.set_flag	= func_set_flag,
	.allow_instances = true,
+0 −1
Original line number Diff line number Diff line
@@ -1505,7 +1505,6 @@ static struct tracer graph_trace __tracer_data = {
	.pipe_open	= graph_trace_open,
	.close		= graph_trace_close,
	.pipe_close	= graph_trace_close,
	.wait_pipe	= poll_wait_pipe,
	.init		= graph_trace_init,
	.reset		= graph_trace_reset,
	.print_line	= print_graph_function,
+0 −1
Original line number Diff line number Diff line
@@ -91,7 +91,6 @@ struct tracer nop_trace __read_mostly =
	.name		= "nop",
	.init		= nop_trace_init,
	.reset		= nop_trace_reset,
	.wait_pipe	= poll_wait_pipe,
#ifdef CONFIG_FTRACE_SELFTEST
	.selftest	= trace_selftest_startup_nop,
#endif
Loading