Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 24cd7f54 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

perf: Reduce perf_disable() usage



Since the current perf_disable() usage is only an optimization,
remove it for now. This eases the removal of the __weak
hw_perf_enable() interface.

Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
Cc: stephane eranian <eranian@googlemail.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Lin Ming <ming.m.lin@intel.com>
Cc: Yanmin <yanmin_zhang@linux.intel.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: David Miller <davem@davemloft.net>
Cc: Michael Cree <mcree@orcon.net.nz>
LKML-Reference: <new-submission>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 9ed6060d
Loading
Loading
Loading
Loading
+3 −0
Original line number Diff line number Diff line
@@ -277,6 +277,8 @@ armpmu_enable(struct perf_event *event)
	int idx;
	int err = 0;

	perf_disable();

	/* If we don't have a space for the counter then finish early. */
	idx = armpmu->get_event_idx(cpuc, hwc);
	if (idx < 0) {
@@ -303,6 +305,7 @@ armpmu_enable(struct perf_event *event)
	perf_event_update_userpage(event);

out:
	perf_enable();
	return err;
}

+3 −0
Original line number Diff line number Diff line
@@ -861,6 +861,7 @@ void power_pmu_start_txn(struct pmu *pmu)
{
	struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);

	perf_disable();
	cpuhw->group_flag |= PERF_EVENT_TXN;
	cpuhw->n_txn_start = cpuhw->n_events;
}
@@ -875,6 +876,7 @@ void power_pmu_cancel_txn(struct pmu *pmu)
	struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);

	cpuhw->group_flag &= ~PERF_EVENT_TXN;
	perf_enable();
}

/*
@@ -901,6 +903,7 @@ int power_pmu_commit_txn(struct pmu *pmu)
		cpuhw->event[i]->hw.config = cpuhw->events[i];

	cpuhw->group_flag &= ~PERF_EVENT_TXN;
	perf_enable();
	return 0;
}

+6 −2
Original line number Diff line number Diff line
@@ -262,7 +262,7 @@ static int collect_events(struct perf_event *group, int max_count,
	return n;
}

/* perf must be disabled, context locked on entry */
/* context locked on entry */
static int fsl_emb_pmu_enable(struct perf_event *event)
{
	struct cpu_hw_events *cpuhw;
@@ -271,6 +271,7 @@ static int fsl_emb_pmu_enable(struct perf_event *event)
	u64 val;
	int i;

	perf_disable();
	cpuhw = &get_cpu_var(cpu_hw_events);

	if (event->hw.config & FSL_EMB_EVENT_RESTRICTED)
@@ -310,15 +311,17 @@ static int fsl_emb_pmu_enable(struct perf_event *event)
	ret = 0;
 out:
	put_cpu_var(cpu_hw_events);
	perf_enable();
	return ret;
}

/* perf must be disabled, context locked on entry */
/* context locked on entry */
static void fsl_emb_pmu_disable(struct perf_event *event)
{
	struct cpu_hw_events *cpuhw;
	int i = event->hw.idx;

	perf_disable();
	if (i < 0)
		goto out;

@@ -346,6 +349,7 @@ static void fsl_emb_pmu_disable(struct perf_event *event)
	cpuhw->n_events--;

 out:
	perf_enable();
	put_cpu_var(cpu_hw_events);
}

+8 −3
Original line number Diff line number Diff line
@@ -230,11 +230,14 @@ static int sh_pmu_enable(struct perf_event *event)
	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
	struct hw_perf_event *hwc = &event->hw;
	int idx = hwc->idx;
	int ret = -EAGAIN;

	perf_disable();

	if (test_and_set_bit(idx, cpuc->used_mask)) {
		idx = find_first_zero_bit(cpuc->used_mask, sh_pmu->num_events);
		if (idx == sh_pmu->num_events)
			return -EAGAIN;
			goto out;

		set_bit(idx, cpuc->used_mask);
		hwc->idx = idx;
@@ -248,8 +251,10 @@ static int sh_pmu_enable(struct perf_event *event)
	sh_pmu->enable(hwc, idx);

	perf_event_update_userpage(event);

	return 0;
	ret = 0;
out:
	perf_enable();
	return ret;
}

static void sh_pmu_read(struct perf_event *event)
+3 −0
Original line number Diff line number Diff line
@@ -1113,6 +1113,7 @@ static void sparc_pmu_start_txn(struct pmu *pmu)
{
	struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);

	perf_disable();
	cpuhw->group_flag |= PERF_EVENT_TXN;
}

@@ -1126,6 +1127,7 @@ static void sparc_pmu_cancel_txn(struct pmu *pmu)
	struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);

	cpuhw->group_flag &= ~PERF_EVENT_TXN;
	perf_enable();
}

/*
@@ -1149,6 +1151,7 @@ static int sparc_pmu_commit_txn(struct pmu *pmu)
		return -EAGAIN;

	cpuc->group_flag &= ~PERF_EVENT_TXN;
	perf_enable();
	return 0;
}

Loading