Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a13c3afd authored by Lin Ming's avatar Lin Ming Committed by Ingo Molnar
Browse files

perf, sparc: Implement group scheduling transactional APIs



Convert to the transactional PMU API and remove the duplication of
group_sched_in().

[cross build only]

Signed-off-by: default avatarLin Ming <ming.m.lin@intel.com>
Acked-by: default avatarDavid Miller <davem@davemloft.net>
Cc: Paul Mackerras <paulus@samba.org>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1272002193.5707.65.camel@minggr.sh.intel.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 6d1acfd5
Loading
Loading
Loading
Loading
+61 −47
Original line number Original line Diff line number Diff line
@@ -91,6 +91,8 @@ struct cpu_hw_events {


	/* Enabled/disable state.  */
	/* Enabled/disable state.  */
	int			enabled;
	int			enabled;

	unsigned int		group_flag;
};
};
DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, };
DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, };


@@ -980,53 +982,6 @@ static int collect_events(struct perf_event *group, int max_count,
	return n;
	return n;
}
}


static void event_sched_in(struct perf_event *event)
{
	event->state = PERF_EVENT_STATE_ACTIVE;
	event->oncpu = smp_processor_id();
	event->tstamp_running += event->ctx->time - event->tstamp_stopped;
	if (is_software_event(event))
		event->pmu->enable(event);
}

int hw_perf_group_sched_in(struct perf_event *group_leader,
			   struct perf_cpu_context *cpuctx,
			   struct perf_event_context *ctx)
{
	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
	struct perf_event *sub;
	int n0, n;

	if (!sparc_pmu)
		return 0;

	n0 = cpuc->n_events;
	n = collect_events(group_leader, perf_max_events - n0,
			   &cpuc->event[n0], &cpuc->events[n0],
			   &cpuc->current_idx[n0]);
	if (n < 0)
		return -EAGAIN;
	if (check_excludes(cpuc->event, n0, n))
		return -EINVAL;
	if (sparc_check_constraints(cpuc->event, cpuc->events, n + n0))
		return -EAGAIN;
	cpuc->n_events = n0 + n;
	cpuc->n_added += n;

	cpuctx->active_oncpu += n;
	n = 1;
	event_sched_in(group_leader);
	list_for_each_entry(sub, &group_leader->sibling_list, group_entry) {
		if (sub->state != PERF_EVENT_STATE_OFF) {
			event_sched_in(sub);
			n++;
		}
	}
	ctx->nr_active += n;

	return 1;
}

static int sparc_pmu_enable(struct perf_event *event)
static int sparc_pmu_enable(struct perf_event *event)
{
{
	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
@@ -1044,11 +999,20 @@ static int sparc_pmu_enable(struct perf_event *event)
	cpuc->events[n0] = event->hw.event_base;
	cpuc->events[n0] = event->hw.event_base;
	cpuc->current_idx[n0] = PIC_NO_INDEX;
	cpuc->current_idx[n0] = PIC_NO_INDEX;


	/*
	 * If group events scheduling transaction was started,
	 * skip the schedulability test here, it will be peformed
	 * at commit time(->commit_txn) as a whole
	 */
	if (cpuc->group_flag & PERF_EVENT_TXN_STARTED)
		goto nocheck;

	if (check_excludes(cpuc->event, n0, 1))
	if (check_excludes(cpuc->event, n0, 1))
		goto out;
		goto out;
	if (sparc_check_constraints(cpuc->event, cpuc->events, n0 + 1))
	if (sparc_check_constraints(cpuc->event, cpuc->events, n0 + 1))
		goto out;
		goto out;


nocheck:
	cpuc->n_events++;
	cpuc->n_events++;
	cpuc->n_added++;
	cpuc->n_added++;


@@ -1128,11 +1092,61 @@ static int __hw_perf_event_init(struct perf_event *event)
	return 0;
	return 0;
}
}


/*
 * Start group events scheduling transaction
 * Set the flag to make pmu::enable() not perform the
 * schedulability test, it will be performed at commit time
 */
static void sparc_pmu_start_txn(const struct pmu *pmu)
{
	struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);

	cpuhw->group_flag |= PERF_EVENT_TXN_STARTED;
}

/*
 * Stop group events scheduling transaction
 * Clear the flag and pmu::enable() will perform the
 * schedulability test.
 */
static void sparc_pmu_cancel_txn(const struct pmu *pmu)
{
	struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);

	cpuhw->group_flag &= ~PERF_EVENT_TXN_STARTED;
}

/*
 * Commit group events scheduling transaction
 * Perform the group schedulability test as a whole
 * Return 0 if success
 */
static int sparc_pmu_commit_txn(const struct pmu *pmu)
{
	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
	int n;

	if (!sparc_pmu)
		return -EINVAL;

	cpuc = &__get_cpu_var(cpu_hw_events);
	n = cpuc->n_events;
	if (check_excludes(cpuc->event, 0, n))
		return -EINVAL;
	if (sparc_check_constraints(cpuc->event, cpuc->events, n))
		return -EAGAIN;

	return 0;
}

static const struct pmu pmu = {
static const struct pmu pmu = {
	.enable		= sparc_pmu_enable,
	.enable		= sparc_pmu_enable,
	.disable	= sparc_pmu_disable,
	.disable	= sparc_pmu_disable,
	.read		= sparc_pmu_read,
	.read		= sparc_pmu_read,
	.unthrottle	= sparc_pmu_unthrottle,
	.unthrottle	= sparc_pmu_unthrottle,
	.start_txn	= sparc_pmu_start_txn,
	.cancel_txn	= sparc_pmu_cancel_txn,
	.commit_txn	= sparc_pmu_commit_txn,
};
};


const struct pmu *hw_perf_event_init(struct perf_event *event)
const struct pmu *hw_perf_event_init(struct perf_event *event)