Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 33696fc0 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

perf: Per PMU disable



Changes perf_disable() into perf_pmu_disable().

Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
Cc: stephane eranian <eranian@googlemail.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Lin Ming <ming.m.lin@intel.com>
Cc: Yanmin <yanmin_zhang@linux.intel.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: David Miller <davem@davemloft.net>
Cc: Michael Cree <mcree@orcon.net.nz>
LKML-Reference: <new-submission>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 24cd7f54
Loading
Loading
Loading
Loading
+16 −14
Original line number Diff line number Diff line
@@ -435,7 +435,7 @@ static int alpha_pmu_enable(struct perf_event *event)
	 * nevertheless we disable the PMCs first to enable a potential
	 * final PMI to occur before we disable interrupts.
	 */
	perf_disable();
	perf_pmu_disable(event->pmu);
	local_irq_save(flags);

	/* Default to error to be returned */
@@ -456,7 +456,7 @@ static int alpha_pmu_enable(struct perf_event *event)
	}

	local_irq_restore(flags);
	perf_enable();
	perf_pmu_enable(event->pmu);

	return ret;
}
@@ -474,7 +474,7 @@ static void alpha_pmu_disable(struct perf_event *event)
	unsigned long flags;
	int j;

	perf_disable();
	perf_pmu_disable(event->pmu);
	local_irq_save(flags);

	for (j = 0; j < cpuc->n_events; j++) {
@@ -502,7 +502,7 @@ static void alpha_pmu_disable(struct perf_event *event)
	}

	local_irq_restore(flags);
	perf_enable();
	perf_pmu_enable(event->pmu);
}


@@ -668,18 +668,10 @@ static int alpha_pmu_event_init(struct perf_event *event)
	return err;
}

static struct pmu pmu = {
	.event_init	= alpha_pmu_event_init,
	.enable		= alpha_pmu_enable,
	.disable	= alpha_pmu_disable,
	.read		= alpha_pmu_read,
	.unthrottle	= alpha_pmu_unthrottle,
};

/*
 * Main entry point - enable HW performance counters.
 */
void hw_perf_enable(void)
static void alpha_pmu_pmu_enable(struct pmu *pmu)
{
	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);

@@ -705,7 +697,7 @@ void hw_perf_enable(void)
 * Main entry point - disable HW performance counters.
 */

void hw_perf_disable(void)
static void alpha_pmu_pmu_disable(struct pmu *pmu)
{
	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);

@@ -718,6 +710,16 @@ void hw_perf_disable(void)
	wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask);
}

static struct pmu pmu = {
	.pmu_enable	= alpha_pmu_pmu_enable,
	.pmu_disable	= alpha_pmu_pmu_disable,
	.event_init	= alpha_pmu_event_init,
	.enable		= alpha_pmu_enable,
	.disable	= alpha_pmu_disable,
	.read		= alpha_pmu_read,
	.unthrottle	= alpha_pmu_unthrottle,
};


/*
 * Main entry point - don't know when this is called but it
+14 −14
Original line number Diff line number Diff line
@@ -277,7 +277,7 @@ armpmu_enable(struct perf_event *event)
	int idx;
	int err = 0;

	perf_disable();
	perf_pmu_disable(event->pmu);

	/* If we don't have a space for the counter then finish early. */
	idx = armpmu->get_event_idx(cpuc, hwc);
@@ -305,7 +305,7 @@ armpmu_enable(struct perf_event *event)
	perf_event_update_userpage(event);

out:
	perf_enable();
	perf_pmu_enable(event->pmu);
	return err;
}

@@ -534,16 +534,7 @@ static int armpmu_event_init(struct perf_event *event)
	return err;
}

static struct pmu pmu = {
	.event_init = armpmu_event_init,
	.enable	    = armpmu_enable,
	.disable    = armpmu_disable,
	.unthrottle = armpmu_unthrottle,
	.read	    = armpmu_read,
};

void
hw_perf_enable(void)
static void armpmu_pmu_enable(struct pmu *pmu)
{
	/* Enable all of the perf events on hardware. */
	int idx;
@@ -564,13 +555,22 @@ hw_perf_enable(void)
	armpmu->start();
}

void
hw_perf_disable(void)
static void armpmu_pmu_disable(struct pmu *pmu)
{
	if (armpmu)
		armpmu->stop();
}

static struct pmu pmu = {
	.pmu_enable = armpmu_pmu_enable,
	.pmu_disable= armpmu_pmu_disable,
	.event_init = armpmu_event_init,
	.enable	    = armpmu_enable,
	.disable    = armpmu_disable,
	.unthrottle = armpmu_unthrottle,
	.read	    = armpmu_read,
};

/*
 * ARMv6 Performance counter handling code.
 *
+13 −11
Original line number Diff line number Diff line
@@ -517,7 +517,7 @@ static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0)
 * Disable all events to prevent PMU interrupts and to allow
 * events to be added or removed.
 */
void hw_perf_disable(void)
static void power_pmu_pmu_disable(struct pmu *pmu)
{
	struct cpu_hw_events *cpuhw;
	unsigned long flags;
@@ -565,7 +565,7 @@ void hw_perf_disable(void)
 * If we were previously disabled and events were added, then
 * put the new config on the PMU.
 */
void hw_perf_enable(void)
static void power_pmu_pmu_enable(struct pmu *pmu)
{
	struct perf_event *event;
	struct cpu_hw_events *cpuhw;
@@ -735,7 +735,7 @@ static int power_pmu_enable(struct perf_event *event)
	int ret = -EAGAIN;

	local_irq_save(flags);
	perf_disable();
	perf_pmu_disable(event->pmu);

	/*
	 * Add the event to the list (if there is room)
@@ -769,7 +769,7 @@ nocheck:

	ret = 0;
 out:
	perf_enable();
	perf_pmu_enable(event->pmu);
	local_irq_restore(flags);
	return ret;
}
@@ -784,7 +784,7 @@ static void power_pmu_disable(struct perf_event *event)
	unsigned long flags;

	local_irq_save(flags);
	perf_disable();
	perf_pmu_disable(event->pmu);

	power_pmu_read(event);

@@ -821,7 +821,7 @@ static void power_pmu_disable(struct perf_event *event)
		cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE);
	}

	perf_enable();
	perf_pmu_enable(event->pmu);
	local_irq_restore(flags);
}

@@ -837,7 +837,7 @@ static void power_pmu_unthrottle(struct perf_event *event)
	if (!event->hw.idx || !event->hw.sample_period)
		return;
	local_irq_save(flags);
	perf_disable();
	perf_pmu_disable(event->pmu);
	power_pmu_read(event);
	left = event->hw.sample_period;
	event->hw.last_period = left;
@@ -848,7 +848,7 @@ static void power_pmu_unthrottle(struct perf_event *event)
	local64_set(&event->hw.prev_count, val);
	local64_set(&event->hw.period_left, left);
	perf_event_update_userpage(event);
	perf_enable();
	perf_pmu_enable(event->pmu);
	local_irq_restore(flags);
}

@@ -861,7 +861,7 @@ void power_pmu_start_txn(struct pmu *pmu)
{
	struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);

	perf_disable();
	perf_pmu_disable(pmu);
	cpuhw->group_flag |= PERF_EVENT_TXN;
	cpuhw->n_txn_start = cpuhw->n_events;
}
@@ -876,7 +876,7 @@ void power_pmu_cancel_txn(struct pmu *pmu)
	struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);

	cpuhw->group_flag &= ~PERF_EVENT_TXN;
	perf_enable();
	perf_pmu_enable(pmu);
}

/*
@@ -903,7 +903,7 @@ int power_pmu_commit_txn(struct pmu *pmu)
		cpuhw->event[i]->hw.config = cpuhw->events[i];

	cpuhw->group_flag &= ~PERF_EVENT_TXN;
	perf_enable();
	perf_pmu_enable(pmu);
	return 0;
}

@@ -1131,6 +1131,8 @@ static int power_pmu_event_init(struct perf_event *event)
}

struct pmu power_pmu = {
	.pmu_enable	= power_pmu_pmu_enable,
	.pmu_disable	= power_pmu_pmu_disable,
	.event_init	= power_pmu_event_init,
	.enable		= power_pmu_enable,
	.disable	= power_pmu_disable,
+10 −8
Original line number Diff line number Diff line
@@ -177,7 +177,7 @@ static void fsl_emb_pmu_read(struct perf_event *event)
 * Disable all events to prevent PMU interrupts and to allow
 * events to be added or removed.
 */
void hw_perf_disable(void)
static void fsl_emb_pmu_pmu_disable(struct pmu *pmu)
{
	struct cpu_hw_events *cpuhw;
	unsigned long flags;
@@ -216,7 +216,7 @@ void hw_perf_disable(void)
 * If we were previously disabled and events were added, then
 * put the new config on the PMU.
 */
void hw_perf_enable(void)
static void fsl_emb_pmu_pmu_enable(struct pmu *pmu)
{
	struct cpu_hw_events *cpuhw;
	unsigned long flags;
@@ -271,7 +271,7 @@ static int fsl_emb_pmu_enable(struct perf_event *event)
	u64 val;
	int i;

	perf_disable();
	perf_pmu_disable(event->pmu);
	cpuhw = &get_cpu_var(cpu_hw_events);

	if (event->hw.config & FSL_EMB_EVENT_RESTRICTED)
@@ -311,7 +311,7 @@ static int fsl_emb_pmu_enable(struct perf_event *event)
	ret = 0;
 out:
	put_cpu_var(cpu_hw_events);
	perf_enable();
	perf_pmu_enable(event->pmu);
	return ret;
}

@@ -321,7 +321,7 @@ static void fsl_emb_pmu_disable(struct perf_event *event)
	struct cpu_hw_events *cpuhw;
	int i = event->hw.idx;

	perf_disable();
	perf_pmu_disable(event->pmu);
	if (i < 0)
		goto out;

@@ -349,7 +349,7 @@ static void fsl_emb_pmu_disable(struct perf_event *event)
	cpuhw->n_events--;

 out:
	perf_enable();
	perf_pmu_enable(event->pmu);
	put_cpu_var(cpu_hw_events);
}

@@ -367,7 +367,7 @@ static void fsl_emb_pmu_unthrottle(struct perf_event *event)
	if (event->hw.idx < 0 || !event->hw.sample_period)
		return;
	local_irq_save(flags);
	perf_disable();
	perf_pmu_disable(event->pmu);
	fsl_emb_pmu_read(event);
	left = event->hw.sample_period;
	event->hw.last_period = left;
@@ -378,7 +378,7 @@ static void fsl_emb_pmu_unthrottle(struct perf_event *event)
	local64_set(&event->hw.prev_count, val);
	local64_set(&event->hw.period_left, left);
	perf_event_update_userpage(event);
	perf_enable();
	perf_pmu_enable(event->pmu);
	local_irq_restore(flags);
}

@@ -524,6 +524,8 @@ static int fsl_emb_pmu_event_init(struct perf_event *event)
}

static struct pmu fsl_emb_pmu = {
	.pmu_enable	= fsl_emb_pmu_pmu_enable,
	.pmu_disable	= fsl_emb_pmu_pmu_disable,
	.event_init	= fsl_emb_pmu_event_init,
	.enable		= fsl_emb_pmu_enable,
	.disable	= fsl_emb_pmu_disable,
+20 −18
Original line number Diff line number Diff line
@@ -232,7 +232,7 @@ static int sh_pmu_enable(struct perf_event *event)
	int idx = hwc->idx;
	int ret = -EAGAIN;

	perf_disable();
	perf_pmu_disable(event->pmu);

	if (test_and_set_bit(idx, cpuc->used_mask)) {
		idx = find_first_zero_bit(cpuc->used_mask, sh_pmu->num_events);
@@ -253,7 +253,7 @@ static int sh_pmu_enable(struct perf_event *event)
	perf_event_update_userpage(event);
	ret = 0;
out:
	perf_enable();
	perf_pmu_enable(event->pmu);
	return ret;
}

@@ -285,7 +285,25 @@ static int sh_pmu_event_init(struct perf_event *event)
	return err;
}

static void sh_pmu_pmu_enable(struct pmu *pmu)
{
	if (!sh_pmu_initialized())
		return;

	sh_pmu->enable_all();
}

static void sh_pmu_pmu_disable(struct pmu *pmu)
{
	if (!sh_pmu_initialized())
		return;

	sh_pmu->disable_all();
}

static struct pmu pmu = {
	.pmu_enable	= sh_pmu_pmu_enable,
	.pmu_disable	= sh_pmu_pmu_disable,
	.event_init	= sh_pmu_event_init,
	.enable		= sh_pmu_enable,
	.disable	= sh_pmu_disable,
@@ -316,22 +334,6 @@ sh_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
	return NOTIFY_OK;
}

void hw_perf_enable(void)
{
	if (!sh_pmu_initialized())
		return;

	sh_pmu->enable_all();
}

void hw_perf_disable(void)
{
	if (!sh_pmu_initialized())
		return;

	sh_pmu->disable_all();
}

int __cpuinit register_sh_pmu(struct sh_pmu *pmu)
{
	if (sh_pmu)
Loading