Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6113e45f authored by Ingo Molnar's avatar Ingo Molnar
Browse files

Merge branch 'tip/perf/core-3' of...

Merge branch 'tip/perf/core-3' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace into perf/core
parents 84bb671d 5168ae50
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -3730,7 +3730,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
 * off of preempt_enable. Kernel preemptions off return from interrupt
 * occur there and call schedule directly.
 */
asmlinkage void __sched preempt_schedule(void)
asmlinkage void __sched notrace preempt_schedule(void)
{
	struct thread_info *ti = current_thread_info();

@@ -3742,9 +3742,9 @@ asmlinkage void __sched preempt_schedule(void)
		return;

	do {
		add_preempt_count(PREEMPT_ACTIVE);
		add_preempt_count_notrace(PREEMPT_ACTIVE);
		schedule();
		sub_preempt_count(PREEMPT_ACTIVE);
		sub_preempt_count_notrace(PREEMPT_ACTIVE);

		/*
		 * Check again in case we missed a preemption opportunity
+2 −3
Original line number Diff line number Diff line
@@ -1883,7 +1883,6 @@ function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
	struct hlist_head *hhd;
	struct hlist_node *n;
	unsigned long key;
	int resched;

	key = hash_long(ip, FTRACE_HASH_BITS);

@@ -1897,12 +1896,12 @@ function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
	 * period. This syncs the hash iteration and freeing of items
	 * on the hash. rcu_read_lock is too dangerous here.
	 */
	resched = ftrace_preempt_disable();
	preempt_disable_notrace();
	hlist_for_each_entry_rcu(entry, n, hhd, node) {
		if (entry->ip == ip)
			entry->ops->func(ip, parent_ip, &entry->data);
	}
	ftrace_preempt_enable(resched);
	preempt_enable_notrace();
}

static struct ftrace_ops trace_probe_ops __read_mostly =
+8 −30
Original line number Diff line number Diff line
@@ -2242,8 +2242,6 @@ static void trace_recursive_unlock(void)

#endif

static DEFINE_PER_CPU(int, rb_need_resched);

/**
 * ring_buffer_lock_reserve - reserve a part of the buffer
 * @buffer: the ring buffer to reserve from
@@ -2264,13 +2262,13 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
{
	struct ring_buffer_per_cpu *cpu_buffer;
	struct ring_buffer_event *event;
	int cpu, resched;
	int cpu;

	if (ring_buffer_flags != RB_BUFFERS_ON)
		return NULL;

	/* If we are tracing schedule, we don't want to recurse */
	resched = ftrace_preempt_disable();
	preempt_disable_notrace();

	if (atomic_read(&buffer->record_disabled))
		goto out_nocheck;
@@ -2295,21 +2293,13 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
	if (!event)
		goto out;

	/*
	 * Need to store resched state on this cpu.
	 * Only the first needs to.
	 */

	if (preempt_count() == 1)
		per_cpu(rb_need_resched, cpu) = resched;

	return event;

 out:
	trace_recursive_unlock();

 out_nocheck:
	ftrace_preempt_enable(resched);
	preempt_enable_notrace();
	return NULL;
}
EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
@@ -2355,13 +2345,7 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer,

	trace_recursive_unlock();

	/*
	 * Only the last preempt count needs to restore preemption.
	 */
	if (preempt_count() == 1)
		ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
	else
		preempt_enable_no_resched_notrace();
	preempt_enable_notrace();

	return 0;
}
@@ -2469,13 +2453,7 @@ void ring_buffer_discard_commit(struct ring_buffer *buffer,

	trace_recursive_unlock();

	/*
	 * Only the last preempt count needs to restore preemption.
	 */
	if (preempt_count() == 1)
		ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
	else
		preempt_enable_no_resched_notrace();
	preempt_enable_notrace();

}
EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
@@ -2501,12 +2479,12 @@ int ring_buffer_write(struct ring_buffer *buffer,
	struct ring_buffer_event *event;
	void *body;
	int ret = -EBUSY;
	int cpu, resched;
	int cpu;

	if (ring_buffer_flags != RB_BUFFERS_ON)
		return -EBUSY;

	resched = ftrace_preempt_disable();
	preempt_disable_notrace();

	if (atomic_read(&buffer->record_disabled))
		goto out;
@@ -2536,7 +2514,7 @@ int ring_buffer_write(struct ring_buffer *buffer,

	ret = 0;
 out:
	ftrace_preempt_enable(resched);
	preempt_enable_notrace();

	return ret;
}
+2 −3
Original line number Diff line number Diff line
@@ -1404,7 +1404,6 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
	struct bprint_entry *entry;
	unsigned long flags;
	int disable;
	int resched;
	int cpu, len = 0, size, pc;

	if (unlikely(tracing_selftest_running || tracing_disabled))
@@ -1414,7 +1413,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
	pause_graph_tracing();

	pc = preempt_count();
	resched = ftrace_preempt_disable();
	preempt_disable_notrace();
	cpu = raw_smp_processor_id();
	data = tr->data[cpu];

@@ -1452,7 +1451,7 @@ out_unlock:

out:
	atomic_dec_return(&data->disabled);
	ftrace_preempt_enable(resched);
	preempt_enable_notrace();
	unpause_graph_tracing();

	return len;
+0 −48
Original line number Diff line number Diff line
@@ -628,54 +628,6 @@ enum trace_iterator_flags {

extern struct tracer nop_trace;

/**
 * ftrace_preempt_disable - disable preemption scheduler safe
 *
 * When tracing can happen inside the scheduler, there exists
 * cases that the tracing might happen before the need_resched
 * flag is checked. If this happens and the tracer calls
 * preempt_enable (after a disable), a schedule might take place
 * causing an infinite recursion.
 *
 * To prevent this, we read the need_resched flag before
 * disabling preemption. When we want to enable preemption we
 * check the flag, if it is set, then we call preempt_enable_no_resched.
 * Otherwise, we call preempt_enable.
 *
 * The rational for doing the above is that if need_resched is set
 * and we have yet to reschedule, we are either in an atomic location
 * (where we do not need to check for scheduling) or we are inside
 * the scheduler and do not want to resched.
 */
static inline int ftrace_preempt_disable(void)
{
	int resched;

	resched = need_resched();
	preempt_disable_notrace();

	return resched;
}

/**
 * ftrace_preempt_enable - enable preemption scheduler safe
 * @resched: the return value from ftrace_preempt_disable
 *
 * This is a scheduler safe way to enable preemption and not miss
 * any preemption checks. The disabled saved the state of preemption.
 * If resched is set, then we are either inside an atomic or
 * are inside the scheduler (we would have already scheduled
 * otherwise). In this case, we do not want to call normal
 * preempt_enable, but preempt_enable_no_resched instead.
 */
static inline void ftrace_preempt_enable(int resched)
{
	if (resched)
		preempt_enable_no_resched_notrace();
	else
		preempt_enable_notrace();
}

#ifdef CONFIG_BRANCH_TRACER
extern int enable_branch_tracing(struct trace_array *tr);
extern void disable_branch_tracing(void);
Loading