Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ad5133b7 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

perf: Default PMU ops



Provide default implementations for the pmu txn methods, this
allows us to remove some conditional code.

Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
Cc: stephane eranian <eranian@googlemail.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Lin Ming <ming.m.lin@intel.com>
Cc: Yanmin <yanmin_zhang@linux.intel.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: David Miller <davem@davemloft.net>
Cc: Michael Cree <mcree@orcon.net.nz>
LKML-Reference: <new-submission>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 33696fc0
Loading
Loading
Loading
Loading
+5 −5
Original line number Diff line number Diff line
@@ -565,8 +565,8 @@ struct pmu {

	int				*pmu_disable_count;

	void (*pmu_enable)		(struct pmu *pmu);
	void (*pmu_disable)		(struct pmu *pmu);
	void (*pmu_enable)		(struct pmu *pmu); /* optional */
	void (*pmu_disable)		(struct pmu *pmu); /* optional */

	/*
	 * Should return -ENOENT when the @event doesn't match this PMU.
@@ -590,19 +590,19 @@ struct pmu {
	 * Start the transaction, after this ->enable() doesn't need to
	 * do schedulability tests.
	 */
	void (*start_txn)	(struct pmu *pmu);
	void (*start_txn)	(struct pmu *pmu); /* optional */
	/*
	 * If ->start_txn() disabled the ->enable() schedulability test
	 * then ->commit_txn() is required to perform one. On success
	 * the transaction is closed. On error the transaction is kept
	 * open until ->cancel_txn() is called.
	 */
	int  (*commit_txn)	(struct pmu *pmu);
	int  (*commit_txn)	(struct pmu *pmu); /* optional */
	/*
	 * Will cancel the transaction, assumes ->disable() is called
	 * for each successfull ->enable() during the transaction.
	 */
	void (*cancel_txn)	(struct pmu *pmu);
	void (*cancel_txn)	(struct pmu *pmu); /* optional */
};

/**
+52 −12
Original line number Diff line number Diff line
@@ -674,20 +674,13 @@ group_sched_in(struct perf_event *group_event,
{
	struct perf_event *event, *partial_group = NULL;
	struct pmu *pmu = group_event->pmu;
	bool txn = false;

	if (group_event->state == PERF_EVENT_STATE_OFF)
		return 0;

	/* Check if group transaction availabe */
	if (pmu->start_txn)
		txn = true;

	if (txn)
	pmu->start_txn(pmu);

	if (event_sched_in(group_event, cpuctx, ctx)) {
		if (txn)
		pmu->cancel_txn(pmu);
		return -EAGAIN;
	}
@@ -702,7 +695,7 @@ group_sched_in(struct perf_event *group_event,
		}
	}

	if (!txn || !pmu->commit_txn(pmu))
	if (!pmu->commit_txn(pmu))
		return 0;

group_error:
@@ -717,7 +710,6 @@ group_sched_in(struct perf_event *group_event,
	}
	event_sched_out(group_event, cpuctx, ctx);

	if (txn)
	pmu->cancel_txn(pmu);

	return -EAGAIN;
@@ -4965,6 +4957,31 @@ static LIST_HEAD(pmus);
static DEFINE_MUTEX(pmus_lock);
static struct srcu_struct pmus_srcu;

static void perf_pmu_nop_void(struct pmu *pmu)
{
}

static int perf_pmu_nop_int(struct pmu *pmu)
{
	return 0;
}

static void perf_pmu_start_txn(struct pmu *pmu)
{
	perf_pmu_disable(pmu);
}

static int perf_pmu_commit_txn(struct pmu *pmu)
{
	perf_pmu_enable(pmu);
	return 0;
}

static void perf_pmu_cancel_txn(struct pmu *pmu)
{
	perf_pmu_enable(pmu);
}

int perf_pmu_register(struct pmu *pmu)
{
	int ret;
@@ -4974,6 +4991,29 @@ int perf_pmu_register(struct pmu *pmu)
	pmu->pmu_disable_count = alloc_percpu(int);
	if (!pmu->pmu_disable_count)
		goto unlock;

	if (!pmu->start_txn) {
		if (pmu->pmu_enable) {
			/*
			 * If we have pmu_enable/pmu_disable calls, install
			 * transaction stubs that use that to try and batch
			 * hardware accesses.
			 */
			pmu->start_txn  = perf_pmu_start_txn;
			pmu->commit_txn = perf_pmu_commit_txn;
			pmu->cancel_txn = perf_pmu_cancel_txn;
		} else {
			pmu->start_txn  = perf_pmu_nop_void;
			pmu->commit_txn = perf_pmu_nop_int;
			pmu->cancel_txn = perf_pmu_nop_void;
		}
	}

	if (!pmu->pmu_enable) {
		pmu->pmu_enable  = perf_pmu_nop_void;
		pmu->pmu_disable = perf_pmu_nop_void;
	}

	list_add_rcu(&pmu->entry, &pmus);
	ret = 0;
unlock: