Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e360adbe authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

irq_work: Add generic hardirq context callbacks



Provide a mechanism that allows running code in IRQ context. It is
most useful for NMI code that needs to interact with the rest of the
system -- like wakeup a task to drain buffers.

Perf currently has such a mechanism, so extract that and provide it as
a generic feature, independent of perf so that others may also
benefit.

The IRQ context callback is generated through self-IPIs where
possible, or on architectures like powerpc the decrementer (the
built-in timer facility) is set to generate an interrupt immediately.

Architectures that don't have anything like this get to do with a
callback from the timer tick. These architectures can call
irq_work_run() at the tail of any IRQ handlers that might enqueue such
work (like the perf IRQ handler) to avoid undue latencies in
processing the work.

Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: default avatarKyle McMartin <kyle@mcmartin.ca>
Acked-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
[ various fixes ]
Signed-off-by: default avatarHuang Ying <ying.huang@intel.com>
LKML-Reference: <1287036094.7768.291.camel@yhuang-dev>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 8e5fc1a7
Loading
Loading
Loading
Loading
+1 −0
Original line number Original line Diff line number Diff line
@@ -9,6 +9,7 @@ config ALPHA
	select HAVE_IDE
	select HAVE_IDE
	select HAVE_OPROFILE
	select HAVE_OPROFILE
	select HAVE_SYSCALL_WRAPPERS
	select HAVE_SYSCALL_WRAPPERS
	select HAVE_IRQ_WORK
	select HAVE_PERF_EVENTS
	select HAVE_PERF_EVENTS
	select HAVE_DMA_ATTRS
	select HAVE_DMA_ATTRS
	help
	help
+0 −5
Original line number Original line Diff line number Diff line
#ifndef __ASM_ALPHA_PERF_EVENT_H
#ifndef __ASM_ALPHA_PERF_EVENT_H
#define __ASM_ALPHA_PERF_EVENT_H
#define __ASM_ALPHA_PERF_EVENT_H


/* Alpha only supports software events through this interface. */
extern void set_perf_event_pending(void);

#define PERF_EVENT_INDEX_OFFSET 0

#ifdef CONFIG_PERF_EVENTS
#ifdef CONFIG_PERF_EVENTS
extern void init_hw_perf_events(void);
extern void init_hw_perf_events(void);
#else
#else
+15 −15
Original line number Original line Diff line number Diff line
@@ -41,7 +41,7 @@
#include <linux/init.h>
#include <linux/init.h>
#include <linux/bcd.h>
#include <linux/bcd.h>
#include <linux/profile.h>
#include <linux/profile.h>
#include <linux/perf_event.h>
#include <linux/irq_work.h>


#include <asm/uaccess.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/io.h>
@@ -83,25 +83,25 @@ static struct {


unsigned long est_cycle_freq;
unsigned long est_cycle_freq;


#ifdef CONFIG_PERF_EVENTS
#ifdef CONFIG_IRQ_WORK


DEFINE_PER_CPU(u8, perf_event_pending);
DEFINE_PER_CPU(u8, irq_work_pending);


#define set_perf_event_pending_flag()  __get_cpu_var(perf_event_pending) = 1
#define set_irq_work_pending_flag()  __get_cpu_var(irq_work_pending) = 1
#define test_perf_event_pending()      __get_cpu_var(perf_event_pending)
#define test_irq_work_pending()      __get_cpu_var(irq_work_pending)
#define clear_perf_event_pending()     __get_cpu_var(perf_event_pending) = 0
#define clear_irq_work_pending()     __get_cpu_var(irq_work_pending) = 0


void set_perf_event_pending(void)
void set_irq_work_pending(void)
{
{
	set_perf_event_pending_flag();
	set_irq_work_pending_flag();
}
}


#else  /* CONFIG_PERF_EVENTS */
#else  /* CONFIG_IRQ_WORK */


#define test_perf_event_pending()      0
#define test_irq_work_pending()      0
#define clear_perf_event_pending()
#define clear_irq_work_pending()


#endif /* CONFIG_PERF_EVENTS */
#endif /* CONFIG_IRQ_WORK */




static inline __u32 rpcc(void)
static inline __u32 rpcc(void)
@@ -191,9 +191,9 @@ irqreturn_t timer_interrupt(int irq, void *dev)


	write_sequnlock(&xtime_lock);
	write_sequnlock(&xtime_lock);


	if (test_perf_event_pending()) {
	if (test_irq_work_pending()) {
		clear_perf_event_pending();
		clear_irq_work_pending();
		perf_event_do_pending();
		irq_work_run();
	}
	}


#ifndef CONFIG_SMP
#ifndef CONFIG_SMP
+1 −0
Original line number Original line Diff line number Diff line
@@ -23,6 +23,7 @@ config ARM
	select HAVE_KERNEL_GZIP
	select HAVE_KERNEL_GZIP
	select HAVE_KERNEL_LZO
	select HAVE_KERNEL_LZO
	select HAVE_KERNEL_LZMA
	select HAVE_KERNEL_LZMA
	select HAVE_IRQ_WORK
	select HAVE_PERF_EVENTS
	select HAVE_PERF_EVENTS
	select PERF_USE_VMALLOC
	select PERF_USE_VMALLOC
	select HAVE_REGS_AND_STACK_ACCESS_API
	select HAVE_REGS_AND_STACK_ACCESS_API
+0 −12
Original line number Original line Diff line number Diff line
@@ -12,18 +12,6 @@
#ifndef __ARM_PERF_EVENT_H__
#ifndef __ARM_PERF_EVENT_H__
#define __ARM_PERF_EVENT_H__
#define __ARM_PERF_EVENT_H__


/*
 * NOP: on *most* (read: all supported) ARM platforms, the performance
 * counter interrupts are regular interrupts and not an NMI. This
 * means that when we receive the interrupt we can call
 * perf_event_do_pending() that handles all of the work with
 * interrupts disabled.
 */
static inline void
set_perf_event_pending(void)
{
}

/* ARM performance counters start from 1 (in the cp15 accesses) so use the
/* ARM performance counters start from 1 (in the cp15 accesses) so use the
 * same indexes here for consistency. */
 * same indexes here for consistency. */
#define PERF_EVENT_INDEX_OFFSET 1
#define PERF_EVENT_INDEX_OFFSET 1
Loading