Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0a987751 authored by Arnaldo Carvalho de Melo's avatar Arnaldo Carvalho de Melo Committed by Ingo Molnar
Browse files

ring_buffer: remove unused flags parameter



Impact: API change, cleanup

>From ring_buffer_{lock_reserve,unlock_commit}.

$ codiff /tmp/vmlinux.before /tmp/vmlinux.after
linux-2.6-tip/kernel/trace/trace.c:
  trace_vprintk              |  -14
  trace_graph_return         |  -14
  trace_graph_entry          |  -10
  trace_function             |   -8
  __ftrace_trace_stack       |   -8
  ftrace_trace_userstack     |   -8
  tracing_sched_switch_trace |   -8
  ftrace_trace_special       |  -12
  tracing_sched_wakeup_trace |   -8
 9 functions changed, 90 bytes removed, diff: -90

linux-2.6-tip/block/blktrace.c:
  __blk_add_trace |   -1
 1 function changed, 1 bytes removed, diff: -1

/tmp/vmlinux.after:
 10 functions changed, 91 bytes removed, diff: -91

Signed-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
Acked-by: default avatarFrédéric Weisbecker <fweisbec@gmail.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent dac74940
Loading
Loading
Loading
Loading
+4 −4
Original line number Diff line number Diff line
@@ -165,7 +165,7 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
	struct task_struct *tsk = current;
	struct ring_buffer_event *event = NULL;
	struct blk_io_trace *t;
	unsigned long flags;
	unsigned long flags = 0;
	unsigned long *sequence;
	pid_t pid;
	int cpu, pc = 0;
@@ -191,7 +191,7 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
		tracing_record_cmdline(current);

		event = ring_buffer_lock_reserve(blk_tr->buffer,
						 sizeof(*t) + pdu_len, &flags);
						 sizeof(*t) + pdu_len);
		if (!event)
			return;

@@ -241,11 +241,11 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
			memcpy((void *) t + sizeof(*t), pdu_data, pdu_len);

		if (blk_tr) {
			ring_buffer_unlock_commit(blk_tr->buffer, event, flags);
			ring_buffer_unlock_commit(blk_tr->buffer, event);
			if (pid != 0 &&
			    !(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC) &&
			    (trace_flags & TRACE_ITER_STACKTRACE) != 0)
				__trace_stack(blk_tr, flags, 5, pc);
				__trace_stack(blk_tr, 0, 5, pc);
			trace_wake_up();
			return;
		}
+3 −6
Original line number Diff line number Diff line
@@ -74,13 +74,10 @@ void ring_buffer_free(struct ring_buffer *buffer);

int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size);

struct ring_buffer_event *
ring_buffer_lock_reserve(struct ring_buffer *buffer,
			 unsigned long length,
			 unsigned long *flags);
struct ring_buffer_event *ring_buffer_lock_reserve(struct ring_buffer *buffer,
						   unsigned long length);
int ring_buffer_unlock_commit(struct ring_buffer *buffer,
			      struct ring_buffer_event *event,
			      unsigned long flags);
			      struct ring_buffer_event *event);
int ring_buffer_write(struct ring_buffer *buffer,
		      unsigned long length, void *data);

+4 −8
Original line number Diff line number Diff line
@@ -272,13 +272,11 @@ void kmemtrace_mark_alloc_node(enum kmemtrace_type_id type_id,
	struct ring_buffer_event *event;
	struct kmemtrace_alloc_entry *entry;
	struct trace_array *tr = kmemtrace_array;
	unsigned long irq_flags;

	if (!kmem_tracing_enabled)
		return;

	event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
					 &irq_flags);
	event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
	if (!event)
		return;
	entry	= ring_buffer_event_data(event);
@@ -292,7 +290,7 @@ void kmemtrace_mark_alloc_node(enum kmemtrace_type_id type_id,
	entry->gfp_flags = gfp_flags;
	entry->node	=	node;

	ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
	ring_buffer_unlock_commit(tr->buffer, event);

	trace_wake_up();
}
@@ -305,13 +303,11 @@ void kmemtrace_mark_free(enum kmemtrace_type_id type_id,
	struct ring_buffer_event *event;
	struct kmemtrace_free_entry *entry;
	struct trace_array *tr = kmemtrace_array;
	unsigned long irq_flags;

	if (!kmem_tracing_enabled)
		return;

	event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
					 &irq_flags);
	event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
	if (!event)
		return;
	entry	= ring_buffer_event_data(event);
@@ -322,7 +318,7 @@ void kmemtrace_mark_free(enum kmemtrace_type_id type_id,
	entry->call_site = call_site;
	entry->ptr = ptr;

	ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
	ring_buffer_unlock_commit(tr->buffer, event);

	trace_wake_up();
}
+2 −7
Original line number Diff line number Diff line
@@ -1257,7 +1257,6 @@ static DEFINE_PER_CPU(int, rb_need_resched);
 * ring_buffer_lock_reserve - reserve a part of the buffer
 * @buffer: the ring buffer to reserve from
 * @length: the length of the data to reserve (excluding event header)
 * @flags: a pointer to save the interrupt flags
 *
 * Returns a reseverd event on the ring buffer to copy directly to.
 * The user of this interface will need to get the body to write into
@@ -1270,9 +1269,7 @@ static DEFINE_PER_CPU(int, rb_need_resched);
 * If NULL is returned, then nothing has been allocated or locked.
 */
struct ring_buffer_event *
ring_buffer_lock_reserve(struct ring_buffer *buffer,
			 unsigned long length,
			 unsigned long *flags)
ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
{
	struct ring_buffer_per_cpu *cpu_buffer;
	struct ring_buffer_event *event;
@@ -1339,15 +1336,13 @@ static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
 * ring_buffer_unlock_commit - commit a reserved
 * @buffer: The buffer to commit to
 * @event: The event pointer to commit.
 * @flags: the interrupt flags received from ring_buffer_lock_reserve.
 *
 * This commits the data to the ring buffer, and releases any locks held.
 *
 * Must be paired with ring_buffer_lock_reserve.
 */
int ring_buffer_unlock_commit(struct ring_buffer *buffer,
			      struct ring_buffer_event *event,
			      unsigned long flags)
			      struct ring_buffer_event *event)
{
	struct ring_buffer_per_cpu *cpu_buffer;
	int cpu = raw_smp_processor_id();
+20 −36
Original line number Diff line number Diff line
@@ -783,14 +783,12 @@ trace_function(struct trace_array *tr,
{
	struct ring_buffer_event *event;
	struct ftrace_entry *entry;
	unsigned long irq_flags;

	/* If we are reading the ring buffer, don't trace */
	if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
		return;

	event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
					 &irq_flags);
	event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
	if (!event)
		return;
	entry	= ring_buffer_event_data(event);
@@ -798,7 +796,7 @@ trace_function(struct trace_array *tr,
	entry->ent.type			= TRACE_FN;
	entry->ip			= ip;
	entry->parent_ip		= parent_ip;
	ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
	ring_buffer_unlock_commit(tr->buffer, event);
}

#ifdef CONFIG_FUNCTION_GRAPH_TRACER
@@ -809,20 +807,18 @@ static void __trace_graph_entry(struct trace_array *tr,
{
	struct ring_buffer_event *event;
	struct ftrace_graph_ent_entry *entry;
	unsigned long irq_flags;

	if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
		return;

	event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry),
					 &irq_flags);
	event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry));
	if (!event)
		return;
	entry	= ring_buffer_event_data(event);
	tracing_generic_entry_update(&entry->ent, flags, pc);
	entry->ent.type			= TRACE_GRAPH_ENT;
	entry->graph_ent			= *trace;
	ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags);
	ring_buffer_unlock_commit(global_trace.buffer, event);
}

static void __trace_graph_return(struct trace_array *tr,
@@ -832,20 +828,18 @@ static void __trace_graph_return(struct trace_array *tr,
{
	struct ring_buffer_event *event;
	struct ftrace_graph_ret_entry *entry;
	unsigned long irq_flags;

	if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
		return;

	event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry),
					 &irq_flags);
	event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry));
	if (!event)
		return;
	entry	= ring_buffer_event_data(event);
	tracing_generic_entry_update(&entry->ent, flags, pc);
	entry->ent.type			= TRACE_GRAPH_RET;
	entry->ret				= *trace;
	ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags);
	ring_buffer_unlock_commit(global_trace.buffer, event);
}
#endif

@@ -866,10 +860,8 @@ static void __ftrace_trace_stack(struct trace_array *tr,
	struct ring_buffer_event *event;
	struct stack_entry *entry;
	struct stack_trace trace;
	unsigned long irq_flags;

	event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
					 &irq_flags);
	event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
	if (!event)
		return;
	entry	= ring_buffer_event_data(event);
@@ -884,7 +876,7 @@ static void __ftrace_trace_stack(struct trace_array *tr,
	trace.entries		= entry->caller;

	save_stack_trace(&trace);
	ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
	ring_buffer_unlock_commit(tr->buffer, event);
#endif
}

@@ -912,13 +904,11 @@ static void ftrace_trace_userstack(struct trace_array *tr,
	struct ring_buffer_event *event;
	struct userstack_entry *entry;
	struct stack_trace trace;
	unsigned long irq_flags;

	if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
		return;

	event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
					 &irq_flags);
	event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
	if (!event)
		return;
	entry	= ring_buffer_event_data(event);
@@ -933,7 +923,7 @@ static void ftrace_trace_userstack(struct trace_array *tr,
	trace.entries		= entry->caller;

	save_stack_trace_user(&trace);
	ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
	ring_buffer_unlock_commit(tr->buffer, event);
#endif
}

@@ -950,10 +940,8 @@ ftrace_trace_special(void *__tr,
	struct ring_buffer_event *event;
	struct trace_array *tr = __tr;
	struct special_entry *entry;
	unsigned long irq_flags;

	event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
					 &irq_flags);
	event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
	if (!event)
		return;
	entry	= ring_buffer_event_data(event);
@@ -962,9 +950,9 @@ ftrace_trace_special(void *__tr,
	entry->arg1			= arg1;
	entry->arg2			= arg2;
	entry->arg3			= arg3;
	ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
	ftrace_trace_stack(tr, irq_flags, 4, pc);
	ftrace_trace_userstack(tr, irq_flags, pc);
	ring_buffer_unlock_commit(tr->buffer, event);
	ftrace_trace_stack(tr, 0, 4, pc);
	ftrace_trace_userstack(tr, 0, pc);

	trace_wake_up();
}
@@ -984,10 +972,8 @@ tracing_sched_switch_trace(struct trace_array *tr,
{
	struct ring_buffer_event *event;
	struct ctx_switch_entry *entry;
	unsigned long irq_flags;

	event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
					   &irq_flags);
	event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
	if (!event)
		return;
	entry	= ring_buffer_event_data(event);
@@ -1000,7 +986,7 @@ tracing_sched_switch_trace(struct trace_array *tr,
	entry->next_prio		= next->prio;
	entry->next_state		= next->state;
	entry->next_cpu	= task_cpu(next);
	ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
	ring_buffer_unlock_commit(tr->buffer, event);
	ftrace_trace_stack(tr, flags, 5, pc);
	ftrace_trace_userstack(tr, flags, pc);
}
@@ -1013,10 +999,8 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
{
	struct ring_buffer_event *event;
	struct ctx_switch_entry *entry;
	unsigned long irq_flags;

	event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
					   &irq_flags);
	event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
	if (!event)
		return;
	entry	= ring_buffer_event_data(event);
@@ -1029,7 +1013,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
	entry->next_prio		= wakee->prio;
	entry->next_state		= wakee->state;
	entry->next_cpu			= task_cpu(wakee);
	ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
	ring_buffer_unlock_commit(tr->buffer, event);
	ftrace_trace_stack(tr, flags, 6, pc);
	ftrace_trace_userstack(tr, flags, pc);

@@ -2841,7 +2825,7 @@ int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
	trace_buf[len] = 0;

	size = sizeof(*entry) + len + 1;
	event = ring_buffer_lock_reserve(tr->buffer, size, &irq_flags);
	event = ring_buffer_lock_reserve(tr->buffer, size);
	if (!event)
		goto out_unlock;
	entry = ring_buffer_event_data(event);
@@ -2852,7 +2836,7 @@ int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)

	memcpy(&entry->buf, trace_buf, len);
	entry->buf[len] = 0;
	ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
	ring_buffer_unlock_commit(tr->buffer, event);

 out_unlock:
	spin_unlock_irqrestore(&trace_buf_lock, irq_flags);
Loading