Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 925d519a authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

perf_counter: unify and fix delayed counter wakeup



While going over the wakeup code I noticed delayed wakeups only work
for hardware counters but basically all software counters rely on
them.

This patch unifies and generalizes the delayed wakeup to fix this
issue.

Since we're dealing with NMI context bits here, use a cmpxchg() based
single link list implementation to track counters that have pending
wakeups.

[ This should really be generic code for delayed wakeups, but since we
  cannot use cmpxchg()/xchg() in generic code, I've let it live in the
  perf_counter code. -- Eric Dumazet could use it to aggregate the
  network wakeups. ]

Furthermore, the x86 method of using TIF flags was flawed in that its
quite possible to end up setting the bit on the idle task, loosing the
wakeup.

The powerpc method uses per-cpu storage and does appear to be
sufficient.

Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: default avatarPaul Mackerras <paulus@samba.org>
Orig-LKML-Reference: <20090330171023.153932974@chello.nl>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 53cfbf59
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -132,7 +132,7 @@ static inline int irqs_disabled_flags(unsigned long flags)
struct irq_chip;

#ifdef CONFIG_PERF_COUNTERS
static inline unsigned long get_perf_counter_pending(void)
static inline unsigned long test_perf_counter_pending(void)
{
	unsigned long x;

@@ -160,7 +160,7 @@ extern void perf_counter_do_pending(void);

#else

static inline unsigned long get_perf_counter_pending(void)
static inline unsigned long test_perf_counter_pending(void)
{
	return 0;
}
+1 −1
Original line number Diff line number Diff line
@@ -135,7 +135,7 @@ notrace void raw_local_irq_restore(unsigned long en)
			iseries_handle_interrupts();
	}

	if (get_perf_counter_pending()) {
	if (test_perf_counter_pending()) {
		clear_perf_counter_pending();
		perf_counter_do_pending();
	}
+2 −20
Original line number Diff line number Diff line
@@ -649,24 +649,6 @@ hw_perf_counter_init(struct perf_counter *counter)
	return &power_perf_ops;
}

/*
 * Handle wakeups.
 */
void perf_counter_do_pending(void)
{
	int i;
	struct cpu_hw_counters *cpuhw = &__get_cpu_var(cpu_hw_counters);
	struct perf_counter *counter;

	for (i = 0; i < cpuhw->n_counters; ++i) {
		counter = cpuhw->counter[i];
		if (counter && counter->wakeup_pending) {
			counter->wakeup_pending = 0;
			wake_up(&counter->waitq);
		}
	}
}

/*
 * A counter has overflowed; update its count and record
 * things if requested.  Note that interrupts are hard-disabled
@@ -720,7 +702,7 @@ static void perf_counter_interrupt(struct pt_regs *regs)
	struct cpu_hw_counters *cpuhw = &__get_cpu_var(cpu_hw_counters);
	struct perf_counter *counter;
	long val;
	int need_wakeup = 0, found = 0;
	int found = 0;

	for (i = 0; i < cpuhw->n_counters; ++i) {
		counter = cpuhw->counter[i];
@@ -761,7 +743,7 @@ static void perf_counter_interrupt(struct pt_regs *regs)
	 * immediately; otherwise we'll have do the wakeup when interrupts
	 * get soft-enabled.
	 */
	if (get_perf_counter_pending() && regs->softe) {
	if (test_perf_counter_pending() && regs->softe) {
		irq_enter();
		clear_perf_counter_pending();
		perf_counter_do_pending();
+3 −2
Original line number Diff line number Diff line
@@ -84,8 +84,9 @@ union cpuid10_edx {
#define MSR_ARCH_PERFMON_FIXED_CTR2			0x30b
#define X86_PMC_IDX_FIXED_BUS_CYCLES			(X86_PMC_IDX_FIXED + 2)

#define set_perf_counter_pending()	\
		set_tsk_thread_flag(current, TIF_PERF_COUNTERS);
#define set_perf_counter_pending()	do { } while (0)
#define clear_perf_counter_pending()	do { } while (0)
#define test_perf_counter_pending()	(0)

#ifdef CONFIG_PERF_COUNTERS
extern void init_hw_perf_counters(void);
+1 −3
Original line number Diff line number Diff line
@@ -83,7 +83,6 @@ struct thread_info {
#define TIF_SYSCALL_AUDIT	7	/* syscall auditing active */
#define TIF_SECCOMP		8	/* secure computing */
#define TIF_MCE_NOTIFY		10	/* notify userspace of an MCE */
#define TIF_PERF_COUNTERS	11	/* notify perf counter work */
#define TIF_NOTSC		16	/* TSC is not accessible in userland */
#define TIF_IA32		17	/* 32bit process */
#define TIF_FORK		18	/* ret_from_fork */
@@ -107,7 +106,6 @@ struct thread_info {
#define _TIF_SYSCALL_AUDIT	(1 << TIF_SYSCALL_AUDIT)
#define _TIF_SECCOMP		(1 << TIF_SECCOMP)
#define _TIF_MCE_NOTIFY		(1 << TIF_MCE_NOTIFY)
#define _TIF_PERF_COUNTERS	(1 << TIF_PERF_COUNTERS)
#define _TIF_NOTSC		(1 << TIF_NOTSC)
#define _TIF_IA32		(1 << TIF_IA32)
#define _TIF_FORK		(1 << TIF_FORK)
@@ -141,7 +139,7 @@ struct thread_info {

/* Only used for 64 bit */
#define _TIF_DO_NOTIFY_MASK						\
	(_TIF_SIGPENDING|_TIF_MCE_NOTIFY|_TIF_PERF_COUNTERS|_TIF_NOTIFY_RESUME)
	(_TIF_SIGPENDING|_TIF_MCE_NOTIFY|_TIF_NOTIFY_RESUME)

/* flags to check in __switch_to() */
#define _TIF_WORK_CTXSW							\
Loading