Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7dd1fcc2 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

perf_counter: provide pagefault software events



We use the generic software counter infrastructure to provide
page fault events.

Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 15dbf27c
Loading
Loading
Loading
Loading
+3 −0
Original line number Diff line number Diff line
@@ -29,6 +29,7 @@
#include <linux/module.h>
#include <linux/kprobes.h>
#include <linux/kdebug.h>
#include <linux/perf_counter.h>

#include <asm/firmware.h>
#include <asm/page.h>
@@ -170,6 +171,8 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
		die("Weird page fault", regs, SIGSEGV);
	}

	perf_swcounter_event(PERF_COUNT_PAGE_FAULTS, 1, 0, regs);

	/* When running in the kernel we expect faults to occur only to
	 * addresses in user space.  All other faults represent errors in the
	 * kernel and should generate an OOPS.  Unfortunately, in the case of an
+3 −0
Original line number Diff line number Diff line
@@ -27,6 +27,7 @@
#include <linux/tty.h>
#include <linux/smp.h>
#include <linux/mm.h>
#include <linux/perf_counter.h>

#include <asm-generic/sections.h>

@@ -1044,6 +1045,8 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
	if (unlikely(error_code & PF_RSVD))
		pgtable_bad(regs, error_code, address);

	perf_swcounter_event(PERF_COUNT_PAGE_FAULTS, 1, 0, regs);

	/*
	 * If we're in an interrupt, have no user context or are running
	 * in an atomic region then we must not take the fault:
+3 −50
Original line number Diff line number Diff line
@@ -1607,57 +1607,10 @@ static const struct hw_perf_counter_ops perf_ops_task_clock = {
 * Software counter: page faults
 */

#ifdef CONFIG_VM_EVENT_COUNTERS
#define cpu_page_faults()	__get_cpu_var(vm_event_states).event[PGFAULT]
#else
#define cpu_page_faults()	0
#endif

static u64 get_page_faults(struct perf_counter *counter)
{
	struct task_struct *curr = counter->ctx->task;

	if (curr)
		return curr->maj_flt + curr->min_flt;
	return cpu_page_faults();
}

static void page_faults_perf_counter_update(struct perf_counter *counter)
{
	u64 prev, now;
	s64 delta;

	prev = atomic64_read(&counter->hw.prev_count);
	now = get_page_faults(counter);

	atomic64_set(&counter->hw.prev_count, now);

	delta = now - prev;

	atomic64_add(delta, &counter->count);
}

static void page_faults_perf_counter_read(struct perf_counter *counter)
{
	page_faults_perf_counter_update(counter);
}

static int page_faults_perf_counter_enable(struct perf_counter *counter)
{
	if (counter->prev_state <= PERF_COUNTER_STATE_OFF)
		atomic64_set(&counter->hw.prev_count, get_page_faults(counter));
	return 0;
}

static void page_faults_perf_counter_disable(struct perf_counter *counter)
{
	page_faults_perf_counter_update(counter);
}

static const struct hw_perf_counter_ops perf_ops_page_faults = {
	.enable		= page_faults_perf_counter_enable,
	.disable	= page_faults_perf_counter_disable,
	.read		= page_faults_perf_counter_read,
	.enable		= perf_swcounter_enable,
	.disable	= perf_swcounter_disable,
	.read		= perf_swcounter_read,
};

/*