Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4a0deca6 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

perf_counter: generic context switch event



Impact: cleanup

Use the generic software events for context switches.

Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Orig-LKML-Reference: <20090319194233.283522645@chello.nl>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 01ef09d9
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -138,7 +138,6 @@ extern unsigned long nr_running(void);
extern unsigned long nr_uninterruptible(void);
extern unsigned long nr_active(void);
extern unsigned long nr_iowait(void);
extern u64 cpu_nr_switches(int cpu);
extern u64 cpu_nr_migrations(int cpu);

extern unsigned long get_parent_ip(unsigned long addr);
+4 −56
Original line number Diff line number Diff line
@@ -710,10 +710,13 @@ void perf_counter_task_sched_out(struct task_struct *task, int cpu)
{
	struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
	struct perf_counter_context *ctx = &task->perf_counter_ctx;
	struct pt_regs *regs;

	if (likely(!cpuctx->task_ctx))
		return;

	regs = task_pt_regs(task);
	perf_swcounter_event(PERF_COUNT_CONTEXT_SWITCHES, 1, 1, regs);
	__perf_counter_sched_out(ctx, cpuctx);

	cpuctx->task_ctx = NULL;
@@ -1667,58 +1670,6 @@ static const struct hw_perf_counter_ops perf_ops_task_clock = {
	.read		= task_clock_perf_counter_read,
};

/*
 * Software counter: context switches
 */

static u64 get_context_switches(struct perf_counter *counter)
{
	struct task_struct *curr = counter->ctx->task;

	if (curr)
		return curr->nvcsw + curr->nivcsw;
	return cpu_nr_switches(smp_processor_id());
}

static void context_switches_perf_counter_update(struct perf_counter *counter)
{
	u64 prev, now;
	s64 delta;

	prev = atomic64_read(&counter->hw.prev_count);
	now = get_context_switches(counter);

	atomic64_set(&counter->hw.prev_count, now);

	delta = now - prev;

	atomic64_add(delta, &counter->count);
}

static void context_switches_perf_counter_read(struct perf_counter *counter)
{
	context_switches_perf_counter_update(counter);
}

static int context_switches_perf_counter_enable(struct perf_counter *counter)
{
	if (counter->prev_state <= PERF_COUNTER_STATE_OFF)
		atomic64_set(&counter->hw.prev_count,
			     get_context_switches(counter));
	return 0;
}

static void context_switches_perf_counter_disable(struct perf_counter *counter)
{
	context_switches_perf_counter_update(counter);
}

static const struct hw_perf_counter_ops perf_ops_context_switches = {
	.enable		= context_switches_perf_counter_enable,
	.disable	= context_switches_perf_counter_disable,
	.read		= context_switches_perf_counter_read,
};

/*
 * Software counter: cpu migrations
 */
@@ -1808,11 +1759,8 @@ sw_perf_counter_init(struct perf_counter *counter)
	case PERF_COUNT_PAGE_FAULTS:
	case PERF_COUNT_PAGE_FAULTS_MIN:
	case PERF_COUNT_PAGE_FAULTS_MAJ:
		hw_ops = &perf_ops_generic;
		break;
	case PERF_COUNT_CONTEXT_SWITCHES:
		if (!counter->hw_event.exclude_kernel)
			hw_ops = &perf_ops_context_switches;
		hw_ops = &perf_ops_generic;
		break;
	case PERF_COUNT_CPU_MIGRATIONS:
		if (!counter->hw_event.exclude_kernel)
+0 −6
Original line number Diff line number Diff line
@@ -2900,14 +2900,8 @@ unsigned long nr_active(void)

/*
 * Externally visible per-cpu scheduler statistics:
 * cpu_nr_switches(cpu) - number of context switches on that cpu
 * cpu_nr_migrations(cpu) - number of migrations into that cpu
 */
u64 cpu_nr_switches(int cpu)
{
	return cpu_rq(cpu)->nr_switches;
}

u64 cpu_nr_migrations(int cpu)
{
	return cpu_rq(cpu)->nr_migrations_in;