Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a81bd80a authored by Steven Rostedt's avatar Steven Rostedt
Browse files

ring-buffer: use generic version of in_nmi



Impact: clean up

Now that a generic in_nmi is available, this patch removes the
special code in the ring_buffer and implements the in_nmi generic
version instead.

With this change, I was also able to rename the "arch_ftrace_nmi_enter"
back to "ftrace_nmi_enter" and remove the code from the ring buffer.

Signed-off-by: default avatarSteven Rostedt <srostedt@redhat.com>
parent 9a5fd902
Loading
Loading
Loading
Loading
+2 −2
Original line number Original line Diff line number Diff line
@@ -113,7 +113,7 @@ static void ftrace_mod_code(void)
					     MCOUNT_INSN_SIZE);
					     MCOUNT_INSN_SIZE);
}
}


void arch_ftrace_nmi_enter(void)
void ftrace_nmi_enter(void)
{
{
	atomic_inc(&nmi_running);
	atomic_inc(&nmi_running);
	/* Must have nmi_running seen before reading write flag */
	/* Must have nmi_running seen before reading write flag */
@@ -124,7 +124,7 @@ void arch_ftrace_nmi_enter(void)
	}
	}
}
}


void arch_ftrace_nmi_exit(void)
void ftrace_nmi_exit(void)
{
{
	/* Finish all executions before clearing nmi_running */
	/* Finish all executions before clearing nmi_running */
	smp_wmb();
	smp_wmb();
+0 −8
Original line number Original line Diff line number Diff line
@@ -3,14 +3,6 @@




#ifdef CONFIG_FTRACE_NMI_ENTER
#ifdef CONFIG_FTRACE_NMI_ENTER
extern void arch_ftrace_nmi_enter(void);
extern void arch_ftrace_nmi_exit(void);
#else
static inline void arch_ftrace_nmi_enter(void) { }
static inline void arch_ftrace_nmi_exit(void) { }
#endif

#ifdef CONFIG_RING_BUFFER
extern void ftrace_nmi_enter(void);
extern void ftrace_nmi_enter(void);
extern void ftrace_nmi_exit(void);
extern void ftrace_nmi_exit(void);
#else
#else
+13 −30
Original line number Original line Diff line number Diff line
@@ -8,6 +8,7 @@
#include <linux/spinlock.h>
#include <linux/spinlock.h>
#include <linux/debugfs.h>
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/uaccess.h>
#include <linux/hardirq.h>
#include <linux/module.h>
#include <linux/module.h>
#include <linux/percpu.h>
#include <linux/percpu.h>
#include <linux/mutex.h>
#include <linux/mutex.h>
@@ -19,35 +20,6 @@


#include "trace.h"
#include "trace.h"


/*
 * Since the write to the buffer is still not fully lockless,
 * we must be careful with NMIs. The locks in the writers
 * are taken when a write crosses to a new page. The locks
 * protect against races with the readers (this will soon
 * be fixed with a lockless solution).
 *
 * Because we can not protect against NMIs, and we want to
 * keep traces reentrant, we need to manage what happens
 * when we are in an NMI.
 */
static DEFINE_PER_CPU(int, rb_in_nmi);

void ftrace_nmi_enter(void)
{
	__get_cpu_var(rb_in_nmi)++;
	/* call arch specific handler too */
	arch_ftrace_nmi_enter();
}

void ftrace_nmi_exit(void)
{
	arch_ftrace_nmi_exit();
	__get_cpu_var(rb_in_nmi)--;
	/* NMIs are not recursive */
	WARN_ON_ONCE(__get_cpu_var(rb_in_nmi));
}


/*
/*
 * A fast way to enable or disable all ring buffers is to
 * A fast way to enable or disable all ring buffers is to
 * call tracing_on or tracing_off. Turning off the ring buffers
 * call tracing_on or tracing_off. Turning off the ring buffers
@@ -1027,12 +999,23 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,


		local_irq_save(flags);
		local_irq_save(flags);
		/*
		/*
		 * Since the write to the buffer is still not
		 * fully lockless, we must be careful with NMIs.
		 * The locks in the writers are taken when a write
		 * crosses to a new page. The locks protect against
		 * races with the readers (this will soon be fixed
		 * with a lockless solution).
		 *
		 * Because we can not protect against NMIs, and we
		 * want to keep traces reentrant, we need to manage
		 * what happens when we are in an NMI.
		 *
		 * NMIs can happen after we take the lock.
		 * NMIs can happen after we take the lock.
		 * If we are in an NMI, only take the lock
		 * If we are in an NMI, only take the lock
		 * if it is not already taken. Otherwise
		 * if it is not already taken. Otherwise
		 * simply fail.
		 * simply fail.
		 */
		 */
		if (unlikely(__get_cpu_var(rb_in_nmi))) {
		if (unlikely(in_nmi())) {
			if (!__raw_spin_trylock(&cpu_buffer->lock))
			if (!__raw_spin_trylock(&cpu_buffer->lock))
				goto out_unlock;
				goto out_unlock;
		} else
		} else