Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 95cdd2e7 authored by Ingo Molnar's avatar Ingo Molnar
Browse files

perfcounters: enable lowlevel pmc code to schedule counters



Allow lowlevel ->enable() op to return an error if a counter can not be
added. This can be used to handle counter constraints.

Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 78b6084c
Loading
Loading
Loading
Loading
+5 −1
Original line number Diff line number Diff line
@@ -244,7 +244,7 @@ static int fixed_mode_idx(struct hw_perf_counter *hwc)
/*
 * Find a PMC slot for the freshly enabled / scheduled in counter:
 */
static void pmc_generic_enable(struct perf_counter *counter)
static int pmc_generic_enable(struct perf_counter *counter)
{
	struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
	struct hw_perf_counter *hwc = &counter->hw;
@@ -253,6 +253,8 @@ static void pmc_generic_enable(struct perf_counter *counter)
	/* Try to get the previous counter again */
	if (test_and_set_bit(idx, cpuc->used)) {
		idx = find_first_zero_bit(cpuc->used, nr_counters_generic);
		if (idx == nr_counters_generic)
			return -EAGAIN;
		set_bit(idx, cpuc->used);
		hwc->idx = idx;
	}
@@ -265,6 +267,8 @@ static void pmc_generic_enable(struct perf_counter *counter)

	__hw_perf_counter_set_period(counter, hwc, idx);
	__pmc_generic_enable(counter, hwc, idx);

	return 0;
}

void perf_counter_print_debug(void)
+1 −1
Original line number Diff line number Diff line
@@ -128,7 +128,7 @@ struct perf_counter;
 * struct hw_perf_counter_ops - performance counter hw ops
 */
struct hw_perf_counter_ops {
	void (*enable)			(struct perf_counter *counter);
	int (*enable)			(struct perf_counter *counter);
	void (*disable)			(struct perf_counter *counter);
	void (*read)			(struct perf_counter *counter);
};
+45 −17
Original line number Diff line number Diff line
@@ -355,21 +355,25 @@ void perf_counter_task_sched_out(struct task_struct *task, int cpu)
	cpuctx->task_ctx = NULL;
}

static void
static int
counter_sched_in(struct perf_counter *counter,
		 struct perf_cpu_context *cpuctx,
		 struct perf_counter_context *ctx,
		 int cpu)
{
	if (counter->state == PERF_COUNTER_STATE_OFF)
		return;
		return 0;

	if (counter->hw_ops->enable(counter))
		return -EAGAIN;

	counter->hw_ops->enable(counter);
	counter->state = PERF_COUNTER_STATE_ACTIVE;
	counter->oncpu = cpu;	/* TODO: put 'cpu' into cpuctx->cpu */

	cpuctx->active_oncpu++;
	ctx->nr_active++;

	return 0;
}

static int
@@ -378,20 +382,38 @@ group_sched_in(struct perf_counter *group_counter,
	       struct perf_counter_context *ctx,
	       int cpu)
{
	struct perf_counter *counter;
	int was_group = 0;
	struct perf_counter *counter, *partial_group;
	int ret = 0;

	counter_sched_in(group_counter, cpuctx, ctx, cpu);
	if (counter_sched_in(group_counter, cpuctx, ctx, cpu))
		return -EAGAIN;

	/*
	 * Schedule in siblings as one group (if any):
	 */
	list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
		counter_sched_in(counter, cpuctx, ctx, cpu);
		was_group = 1;
		if (counter_sched_in(counter, cpuctx, ctx, cpu)) {
			partial_group = counter;
			goto group_error;
		}
		ret = -EAGAIN;
	}

	return ret;

group_error:
	/*
	 * Groups can be scheduled in as one unit only, so undo any
	 * partial group before returning:
	 */
	list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
		if (counter == partial_group)
			break;
		counter_sched_out(counter, cpuctx, ctx);
	}
	counter_sched_out(group_counter, cpuctx, ctx);

	return was_group;
	return -EAGAIN;
}

/*
@@ -416,9 +438,6 @@ void perf_counter_task_sched_in(struct task_struct *task, int cpu)

	spin_lock(&ctx->lock);
	list_for_each_entry(counter, &ctx->counter_list, list_entry) {
		if (ctx->nr_active == cpuctx->max_pertask)
			break;

		/*
		 * Listen to the 'cpu' scheduling filter constraint
		 * of counters:
@@ -856,8 +875,9 @@ static const struct file_operations perf_fops = {
	.poll			= perf_poll,
};

static void cpu_clock_perf_counter_enable(struct perf_counter *counter)
static int cpu_clock_perf_counter_enable(struct perf_counter *counter)
{
	return 0;
}

static void cpu_clock_perf_counter_disable(struct perf_counter *counter)
@@ -913,11 +933,13 @@ static void task_clock_perf_counter_read(struct perf_counter *counter)
	task_clock_perf_counter_update(counter, now);
}

static void task_clock_perf_counter_enable(struct perf_counter *counter)
static int task_clock_perf_counter_enable(struct perf_counter *counter)
{
	u64 now = task_clock_perf_counter_val(counter, 0);

	atomic64_set(&counter->hw.prev_count, now);

	return 0;
}

static void task_clock_perf_counter_disable(struct perf_counter *counter)
@@ -960,12 +982,14 @@ static void page_faults_perf_counter_read(struct perf_counter *counter)
	page_faults_perf_counter_update(counter);
}

static void page_faults_perf_counter_enable(struct perf_counter *counter)
static int page_faults_perf_counter_enable(struct perf_counter *counter)
{
	/*
	 * page-faults is a per-task value already,
	 * so we dont have to clear it on switch-in.
	 */

	return 0;
}

static void page_faults_perf_counter_disable(struct perf_counter *counter)
@@ -1006,12 +1030,14 @@ static void context_switches_perf_counter_read(struct perf_counter *counter)
	context_switches_perf_counter_update(counter);
}

static void context_switches_perf_counter_enable(struct perf_counter *counter)
static int context_switches_perf_counter_enable(struct perf_counter *counter)
{
	/*
	 * ->nvcsw + curr->nivcsw is a per-task value already,
	 * so we dont have to clear it on switch-in.
	 */

	return 0;
}

static void context_switches_perf_counter_disable(struct perf_counter *counter)
@@ -1050,12 +1076,14 @@ static void cpu_migrations_perf_counter_read(struct perf_counter *counter)
	cpu_migrations_perf_counter_update(counter);
}

static void cpu_migrations_perf_counter_enable(struct perf_counter *counter)
static int cpu_migrations_perf_counter_enable(struct perf_counter *counter)
{
	/*
	 * se.nr_migrations is a per-task value already,
	 * so we dont have to clear it on switch-in.
	 */

	return 0;
}

static void cpu_migrations_perf_counter_disable(struct perf_counter *counter)