Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2023b359 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

perf_counter: inheritable sample counters



Redirect the output to the parent counter and put in some sanity checks.

[ Impact: new perfcounter feature - inherited sampling counters ]

Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
LKML-Reference: <20090505155437.331556171@chello.nl>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 22c1558e
Loading
Loading
Loading
Loading
+30 −2
Original line number Diff line number Diff line
@@ -738,10 +738,18 @@ static void perf_counter_enable(struct perf_counter *counter)
	spin_unlock_irq(&ctx->lock);
}

static void perf_counter_refresh(struct perf_counter *counter, int refresh)
static int perf_counter_refresh(struct perf_counter *counter, int refresh)
{
	/*
	 * not supported on inherited counters
	 */
	if (counter->hw_event.inherit)
		return -EINVAL;

	atomic_add(refresh, &counter->event_limit);
	perf_counter_enable(counter);

	return 0;
}

/*
@@ -1307,7 +1315,7 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
		perf_counter_disable_family(counter);
		break;
	case PERF_COUNTER_IOC_REFRESH:
		perf_counter_refresh(counter, arg);
		err = perf_counter_refresh(counter, arg);
		break;
	case PERF_COUNTER_IOC_RESET:
		perf_counter_reset(counter);
@@ -1814,6 +1822,12 @@ static int perf_output_begin(struct perf_output_handle *handle,
	struct perf_mmap_data *data;
	unsigned int offset, head;

	/*
	 * For inherited counters we send all the output towards the parent.
	 */
	if (counter->parent)
		counter = counter->parent;

	rcu_read_lock();
	data = rcu_dereference(counter->data);
	if (!data)
@@ -1995,6 +2009,9 @@ static void perf_counter_output(struct perf_counter *counter,
	if (record_type & PERF_RECORD_ADDR)
		perf_output_put(&handle, addr);

	/*
	 * XXX PERF_RECORD_GROUP vs inherited counters seems difficult.
	 */
	if (record_type & PERF_RECORD_GROUP) {
		struct perf_counter *leader, *sub;
		u64 nr = counter->nr_siblings;
@@ -2281,6 +2298,11 @@ int perf_counter_overflow(struct perf_counter *counter,
	int events = atomic_read(&counter->event_limit);
	int ret = 0;

	/*
	 * XXX event_limit might not quite work as expected on inherited
	 * counters
	 */

	counter->pending_kill = POLL_IN;
	if (events && atomic_dec_and_test(&counter->event_limit)) {
		ret = 1;
@@ -2801,6 +2823,12 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event,

	pmu = NULL;

	/*
	 * we currently do not support PERF_RECORD_GROUP on inherited counters
	 */
	if (hw_event->inherit && (hw_event->record_type & PERF_RECORD_GROUP))
		goto done;

	if (perf_event_raw(hw_event)) {
		pmu = hw_perf_counter_init(counter);
		goto done;