Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3bf101ba authored by Matt Fleming's avatar Matt Fleming Committed by Robert Richter
Browse files

perf: Add helper function to return number of counters



The number of counters for the registered pmu is needed in a few places
so provide a helper function that returns this number.

Signed-off-by: default avatarMatt Fleming <matt@console-pimps.org>
Tested-by: default avatarWill Deacon <will.deacon@arm.com>
Acked-by: default avatarPaul Mundt <lethal@linux-sh.org>
Acked-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: default avatarRobert Richter <robert.richter@amd.com>
parent 4cbe75be
Loading
Loading
Loading
Loading
+6 −0
Original line number Diff line number Diff line
@@ -123,6 +123,12 @@ armpmu_get_max_events(void)
}
EXPORT_SYMBOL_GPL(armpmu_get_max_events);

int perf_num_counters(void)
{
	return armpmu_get_max_events();
}
EXPORT_SYMBOL_GPL(perf_num_counters);

#define HW_OP_UNSUPPORTED		0xFFFF

#define C(_x) \
+18 −13
Original line number Diff line number Diff line
@@ -43,7 +43,7 @@ static DEFINE_MUTEX(op_arm_mutex);

static struct op_counter_config *counter_config;
static struct perf_event **perf_events[nr_cpumask_bits];
static int perf_num_counters;
static int num_counters;

/*
 * Overflow callback for oprofile.
@@ -54,11 +54,11 @@ static void op_overflow_handler(struct perf_event *event, int unused,
	int id;
	u32 cpu = smp_processor_id();

	for (id = 0; id < perf_num_counters; ++id)
	for (id = 0; id < num_counters; ++id)
		if (perf_events[cpu][id] == event)
			break;

	if (id != perf_num_counters)
	if (id != num_counters)
		oprofile_add_sample(regs, id);
	else
		pr_warning("oprofile: ignoring spurious overflow "
@@ -76,7 +76,7 @@ static void op_perf_setup(void)
	u32 size = sizeof(struct perf_event_attr);
	struct perf_event_attr *attr;

	for (i = 0; i < perf_num_counters; ++i) {
	for (i = 0; i < num_counters; ++i) {
		attr = &counter_config[i].attr;
		memset(attr, 0, size);
		attr->type		= PERF_TYPE_RAW;
@@ -131,7 +131,7 @@ static int op_perf_start(void)
	int cpu, event, ret = 0;

	for_each_online_cpu(cpu) {
		for (event = 0; event < perf_num_counters; ++event) {
		for (event = 0; event < num_counters; ++event) {
			ret = op_create_counter(cpu, event);
			if (ret)
				goto out;
@@ -150,7 +150,7 @@ static void op_perf_stop(void)
	int cpu, event;

	for_each_online_cpu(cpu)
		for (event = 0; event < perf_num_counters; ++event)
		for (event = 0; event < num_counters; ++event)
			op_destroy_counter(cpu, event);
}

@@ -179,7 +179,7 @@ static int op_arm_create_files(struct super_block *sb, struct dentry *root)
{
	unsigned int i;

	for (i = 0; i < perf_num_counters; i++) {
	for (i = 0; i < num_counters; i++) {
		struct dentry *dir;
		char buf[4];

@@ -353,14 +353,19 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)

	memset(&perf_events, 0, sizeof(perf_events));

	perf_num_counters = armpmu_get_max_events();
	num_counters = perf_num_counters();
	if (num_counters <= 0) {
		pr_info("oprofile: no performance counters\n");
		ret = -ENODEV;
		goto out;
	}

	counter_config = kcalloc(perf_num_counters,
	counter_config = kcalloc(num_counters,
			sizeof(struct op_counter_config), GFP_KERNEL);

	if (!counter_config) {
		pr_info("oprofile: failed to allocate %d "
				"counters\n", perf_num_counters);
				"counters\n", num_counters);
		ret = -ENOMEM;
		goto out;
	}
@@ -370,11 +375,11 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
		goto out;

	for_each_possible_cpu(cpu) {
		perf_events[cpu] = kcalloc(perf_num_counters,
		perf_events[cpu] = kcalloc(num_counters,
				sizeof(struct perf_event *), GFP_KERNEL);
		if (!perf_events[cpu]) {
			pr_info("oprofile: failed to allocate %d perf events "
					"for cpu %d\n", perf_num_counters, cpu);
					"for cpu %d\n", num_counters, cpu);
			ret = -ENOMEM;
			goto out;
		}
@@ -409,7 +414,7 @@ void __exit oprofile_arch_exit(void)
	struct perf_event *event;

	for_each_possible_cpu(cpu) {
		for (id = 0; id < perf_num_counters; ++id) {
		for (id = 0; id < num_counters; ++id) {
			event = perf_events[cpu][id];
			if (event)
				perf_event_release_kernel(event);
+9 −0
Original line number Diff line number Diff line
@@ -59,6 +59,15 @@ static inline int sh_pmu_initialized(void)
	return !!sh_pmu;
}

int perf_num_counters(void)
{
	if (!sh_pmu)
		return 0;

	return sh_pmu->num_events;
}
EXPORT_SYMBOL_GPL(perf_num_counters);

/*
 * Release the PMU if this is the last perf_event.
 */
+1 −0
Original line number Diff line number Diff line
@@ -849,6 +849,7 @@ extern int perf_max_events;

extern const struct pmu *hw_perf_event_init(struct perf_event *event);

extern int perf_num_counters(void);
extern void perf_event_task_sched_in(struct task_struct *task);
extern void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next);
extern void perf_event_task_tick(struct task_struct *task);