Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b0a873eb authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

perf: Register PMU implementations



Simple registration interface for struct pmu, this provides the
infrastructure for removing all the weak functions.

Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
Cc: stephane eranian <eranian@googlemail.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Lin Ming <ming.m.lin@intel.com>
Cc: Yanmin <yanmin_zhang@linux.intel.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: David Miller <davem@davemloft.net>
Cc: Michael Cree <mcree@orcon.net.nz>
LKML-Reference: <new-submission>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 51b0fe39
Loading
Loading
Loading
Loading
+22 −15
Original line number Diff line number Diff line
@@ -642,34 +642,39 @@ static int __hw_perf_event_init(struct perf_event *event)
	return 0;
}

static struct pmu pmu = {
	.enable		= alpha_pmu_enable,
	.disable	= alpha_pmu_disable,
	.read		= alpha_pmu_read,
	.unthrottle	= alpha_pmu_unthrottle,
};


/*
 * Main entry point to initialise a HW performance event.
 */
struct pmu *hw_perf_event_init(struct perf_event *event)
static int alpha_pmu_event_init(struct perf_event *event)
{
	int err;

	switch (event->attr.type) {
	case PERF_TYPE_RAW:
	case PERF_TYPE_HARDWARE:
	case PERF_TYPE_HW_CACHE:
		break;

	default:
		return -ENOENT;
	}

	if (!alpha_pmu)
		return ERR_PTR(-ENODEV);
		return -ENODEV;

	/* Do the real initialisation work. */
	err = __hw_perf_event_init(event);

	if (err)
		return ERR_PTR(err);

	return &pmu;
	return err;
}


static struct pmu pmu = {
	.event_init	= alpha_pmu_event_init,
	.enable		= alpha_pmu_enable,
	.disable	= alpha_pmu_disable,
	.read		= alpha_pmu_read,
	.unthrottle	= alpha_pmu_unthrottle,
};

/*
 * Main entry point - enable HW performance counters.
@@ -838,5 +843,7 @@ void __init init_hw_perf_events(void)
	/* And set up PMU specification */
	alpha_pmu = &ev67_pmu;
	perf_max_events = alpha_pmu->num_pmcs;

	perf_pmu_register(&pmu);
}
+26 −12
Original line number Diff line number Diff line
@@ -306,12 +306,7 @@ out:
	return err;
}

static struct pmu pmu = {
	.enable	    = armpmu_enable,
	.disable    = armpmu_disable,
	.unthrottle = armpmu_unthrottle,
	.read	    = armpmu_read,
};
static struct pmu pmu;

static int
validate_event(struct cpu_hw_events *cpuc,
@@ -491,20 +486,29 @@ __hw_perf_event_init(struct perf_event *event)
	return err;
}

struct pmu *
hw_perf_event_init(struct perf_event *event)
static int armpmu_event_init(struct perf_event *event)
{
	int err = 0;

	switch (event->attr.type) {
	case PERF_TYPE_RAW:
	case PERF_TYPE_HARDWARE:
	case PERF_TYPE_HW_CACHE:
		break;

	default:
		return -ENOENT;
	}

	if (!armpmu)
		return ERR_PTR(-ENODEV);
		return -ENODEV;

	event->destroy = hw_perf_event_destroy;

	if (!atomic_inc_not_zero(&active_events)) {
		if (atomic_read(&active_events) > perf_max_events) {
			atomic_dec(&active_events);
			return ERR_PTR(-ENOSPC);
			return -ENOSPC;
		}

		mutex_lock(&pmu_reserve_mutex);
@@ -518,15 +522,23 @@ hw_perf_event_init(struct perf_event *event)
	}

	if (err)
		return ERR_PTR(err);
		return err;

	err = __hw_perf_event_init(event);
	if (err)
		hw_perf_event_destroy(event);

	return err ? ERR_PTR(err) : &pmu;
	return err;
}

static struct pmu pmu = {
	.event_init = armpmu_event_init,
	.enable	    = armpmu_enable,
	.disable    = armpmu_disable,
	.unthrottle = armpmu_unthrottle,
	.read	    = armpmu_read,
};

void
hw_perf_enable(void)
{
@@ -2994,6 +3006,8 @@ init_hw_perf_events(void)
		perf_max_events = -1;
	}

	perf_pmu_register(&pmu);

	return 0;
}
arch_initcall(init_hw_perf_events);
+24 −22
Original line number Diff line number Diff line
@@ -904,16 +904,6 @@ int power_pmu_commit_txn(struct pmu *pmu)
	return 0;
}

struct pmu power_pmu = {
	.enable		= power_pmu_enable,
	.disable	= power_pmu_disable,
	.read		= power_pmu_read,
	.unthrottle	= power_pmu_unthrottle,
	.start_txn	= power_pmu_start_txn,
	.cancel_txn	= power_pmu_cancel_txn,
	.commit_txn	= power_pmu_commit_txn,
};

/*
 * Return 1 if we might be able to put event on a limited PMC,
 * or 0 if not.
@@ -1014,7 +1004,7 @@ static int hw_perf_cache_event(u64 config, u64 *eventp)
	return 0;
}

struct pmu *hw_perf_event_init(struct perf_event *event)
static int power_pmu_event_init(struct perf_event *event)
{
	u64 ev;
	unsigned long flags;
@@ -1026,25 +1016,27 @@ struct pmu *hw_perf_event_init(struct perf_event *event)
	struct cpu_hw_events *cpuhw;

	if (!ppmu)
		return ERR_PTR(-ENXIO);
		return -ENOENT;

	switch (event->attr.type) {
	case PERF_TYPE_HARDWARE:
		ev = event->attr.config;
		if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
			return ERR_PTR(-EOPNOTSUPP);
			return -EOPNOTSUPP;
		ev = ppmu->generic_events[ev];
		break;
	case PERF_TYPE_HW_CACHE:
		err = hw_perf_cache_event(event->attr.config, &ev);
		if (err)
			return ERR_PTR(err);
			return err;
		break;
	case PERF_TYPE_RAW:
		ev = event->attr.config;
		break;
	default:
		return ERR_PTR(-EINVAL);
		return -ENOENT;
	}

	event->hw.config_base = ev;
	event->hw.idx = 0;

@@ -1081,7 +1073,7 @@ struct pmu *hw_perf_event_init(struct perf_event *event)
			 */
			ev = normal_pmc_alternative(ev, flags);
			if (!ev)
				return ERR_PTR(-EINVAL);
				return -EINVAL;
		}
	}

@@ -1095,19 +1087,19 @@ struct pmu *hw_perf_event_init(struct perf_event *event)
		n = collect_events(event->group_leader, ppmu->n_counter - 1,
				   ctrs, events, cflags);
		if (n < 0)
			return ERR_PTR(-EINVAL);
			return -EINVAL;
	}
	events[n] = ev;
	ctrs[n] = event;
	cflags[n] = flags;
	if (check_excludes(ctrs, cflags, n, 1))
		return ERR_PTR(-EINVAL);
		return -EINVAL;

	cpuhw = &get_cpu_var(cpu_hw_events);
	err = power_check_constraints(cpuhw, events, cflags, n + 1);
	put_cpu_var(cpu_hw_events);
	if (err)
		return ERR_PTR(-EINVAL);
		return -EINVAL;

	event->hw.config = events[n];
	event->hw.event_base = cflags[n];
@@ -1132,11 +1124,20 @@ struct pmu *hw_perf_event_init(struct perf_event *event)
	}
	event->destroy = hw_perf_event_destroy;

	if (err)
		return ERR_PTR(err);
	return &power_pmu;
	return err;
}

struct pmu power_pmu = {
	.event_init	= power_pmu_event_init,
	.enable		= power_pmu_enable,
	.disable	= power_pmu_disable,
	.read		= power_pmu_read,
	.unthrottle	= power_pmu_unthrottle,
	.start_txn	= power_pmu_start_txn,
	.cancel_txn	= power_pmu_cancel_txn,
	.commit_txn	= power_pmu_commit_txn,
};

/*
 * A counter has overflowed; update its count and record
 * things if requested.  Note that interrupts are hard-disabled
@@ -1342,6 +1343,7 @@ int register_power_pmu(struct power_pmu *pmu)
		freeze_events_kernel = MMCR0_FCHV;
#endif /* CONFIG_PPC64 */

	perf_pmu_register(&power_pmu);
	perf_cpu_notifier(power_pmu_notifier);

	return 0;
+19 −18
Original line number Diff line number Diff line
@@ -378,13 +378,6 @@ static void fsl_emb_pmu_unthrottle(struct perf_event *event)
	local_irq_restore(flags);
}

static struct pmu fsl_emb_pmu = {
	.enable		= fsl_emb_pmu_enable,
	.disable	= fsl_emb_pmu_disable,
	.read		= fsl_emb_pmu_read,
	.unthrottle	= fsl_emb_pmu_unthrottle,
};

/*
 * Release the PMU if this is the last perf_event.
 */
@@ -428,7 +421,7 @@ static int hw_perf_cache_event(u64 config, u64 *eventp)
	return 0;
}

struct pmu *hw_perf_event_init(struct perf_event *event)
static int fsl_emb_pmu_event_init(struct perf_event *event)
{
	u64 ev;
	struct perf_event *events[MAX_HWEVENTS];
@@ -441,14 +434,14 @@ struct pmu *hw_perf_event_init(struct perf_event *event)
	case PERF_TYPE_HARDWARE:
		ev = event->attr.config;
		if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
			return ERR_PTR(-EOPNOTSUPP);
			return -EOPNOTSUPP;
		ev = ppmu->generic_events[ev];
		break;

	case PERF_TYPE_HW_CACHE:
		err = hw_perf_cache_event(event->attr.config, &ev);
		if (err)
			return ERR_PTR(err);
			return err;
		break;

	case PERF_TYPE_RAW:
@@ -456,12 +449,12 @@ struct pmu *hw_perf_event_init(struct perf_event *event)
		break;

	default:
		return ERR_PTR(-EINVAL);
		return -ENOENT;
	}

	event->hw.config = ppmu->xlate_event(ev);
	if (!(event->hw.config & FSL_EMB_EVENT_VALID))
		return ERR_PTR(-EINVAL);
		return -EINVAL;

	/*
	 * If this is in a group, check if it can go on with all the
@@ -473,7 +466,7 @@ struct pmu *hw_perf_event_init(struct perf_event *event)
		n = collect_events(event->group_leader,
		                   ppmu->n_counter - 1, events);
		if (n < 0)
			return ERR_PTR(-EINVAL);
			return -EINVAL;
	}

	if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) {
@@ -484,7 +477,7 @@ struct pmu *hw_perf_event_init(struct perf_event *event)
		}

		if (num_restricted >= ppmu->n_restricted)
			return ERR_PTR(-EINVAL);
			return -EINVAL;
	}

	event->hw.idx = -1;
@@ -497,7 +490,7 @@ struct pmu *hw_perf_event_init(struct perf_event *event)
	if (event->attr.exclude_kernel)
		event->hw.config_base |= PMLCA_FCS;
	if (event->attr.exclude_idle)
		return ERR_PTR(-ENOTSUPP);
		return -ENOTSUPP;

	event->hw.last_period = event->hw.sample_period;
	local64_set(&event->hw.period_left, event->hw.last_period);
@@ -523,11 +516,17 @@ struct pmu *hw_perf_event_init(struct perf_event *event)
	}
	event->destroy = hw_perf_event_destroy;

	if (err)
		return ERR_PTR(err);
	return &fsl_emb_pmu;
	return err;
}

static struct pmu fsl_emb_pmu = {
	.event_init	= fsl_emb_pmu_event_init,
	.enable		= fsl_emb_pmu_enable,
	.disable	= fsl_emb_pmu_disable,
	.read		= fsl_emb_pmu_read,
	.unthrottle	= fsl_emb_pmu_unthrottle,
};

/*
 * A counter has overflowed; update its count and record
 * things if requested.  Note that interrupts are hard-disabled
@@ -651,5 +650,7 @@ int register_fsl_emb_pmu(struct fsl_emb_pmu *pmu)
	pr_info("%s performance monitor hardware support registered\n",
		pmu->name);

	perf_pmu_register(&fsl_emb_pmu);

	return 0;
}
+24 −11
Original line number Diff line number Diff line
@@ -257,26 +257,38 @@ static void sh_pmu_read(struct perf_event *event)
	sh_perf_event_update(event, &event->hw, event->hw.idx);
}

static struct pmu pmu = {
	.enable		= sh_pmu_enable,
	.disable	= sh_pmu_disable,
	.read		= sh_pmu_read,
};

struct pmu *hw_perf_event_init(struct perf_event *event)
static int sh_pmu_event_init(struct perf_event *event)
{
	int err = __hw_perf_event_init(event);
	int err;

	switch (event->attr.type) {
	case PERF_TYPE_RAW:
	case PERF_TYPE_HW_CACHE:
	case PERF_TYPE_HARDWARE:
		err = __hw_perf_event_init(event);
		break;

	default:
		return -ENOENT;
	}

	if (unlikely(err)) {
		if (event->destroy)
			event->destroy(event);
		return ERR_PTR(err);
	}

	return &pmu;
	return err;
}

static struct pmu pmu = {
	.event_init	= sh_pmu_event_init,
	.enable		= sh_pmu_enable,
	.disable	= sh_pmu_disable,
	.read		= sh_pmu_read,
};

static void sh_pmu_setup(int cpu)
{

	struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);

	memset(cpuhw, 0, sizeof(struct cpu_hw_events));
@@ -325,6 +337,7 @@ int __cpuinit register_sh_pmu(struct sh_pmu *pmu)

	WARN_ON(pmu->num_events > MAX_HWEVENTS);

	perf_pmu_register(&pmu);
	perf_cpu_notifier(sh_pmu_notifier);
	return 0;
}
Loading