Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 51b0fe39 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

perf: Deconstify struct pmu



sed -ie 's/const struct pmu\>/struct pmu/g' `git grep -l "const struct pmu\>"`

Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
Cc: stephane eranian <eranian@googlemail.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Lin Ming <ming.m.lin@intel.com>
Cc: Yanmin <yanmin_zhang@linux.intel.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: David Miller <davem@davemloft.net>
Cc: Michael Cree <mcree@orcon.net.nz>
LKML-Reference: <new-submission>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 2aa61274
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -642,7 +642,7 @@ static int __hw_perf_event_init(struct perf_event *event)
	return 0;
}

static const struct pmu pmu = {
static struct pmu pmu = {
	.enable		= alpha_pmu_enable,
	.disable	= alpha_pmu_disable,
	.read		= alpha_pmu_read,
@@ -653,7 +653,7 @@ static const struct pmu pmu = {
/*
 * Main entry point to initialise a HW performance event.
 */
const struct pmu *hw_perf_event_init(struct perf_event *event)
struct pmu *hw_perf_event_init(struct perf_event *event)
{
	int err;

+1 −1
Original line number Diff line number Diff line
@@ -491,7 +491,7 @@ __hw_perf_event_init(struct perf_event *event)
	return err;
}

const struct pmu *
struct pmu *
hw_perf_event_init(struct perf_event *event)
{
	int err = 0;
+4 −4
Original line number Diff line number Diff line
@@ -857,7 +857,7 @@ static void power_pmu_unthrottle(struct perf_event *event)
 * Set the flag to make pmu::enable() not perform the
 * schedulability test, it will be performed at commit time
 */
void power_pmu_start_txn(const struct pmu *pmu)
void power_pmu_start_txn(struct pmu *pmu)
{
	struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);

@@ -870,7 +870,7 @@ void power_pmu_start_txn(const struct pmu *pmu)
 * Clear the flag and pmu::enable() will perform the
 * schedulability test.
 */
void power_pmu_cancel_txn(const struct pmu *pmu)
void power_pmu_cancel_txn(struct pmu *pmu)
{
	struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);

@@ -882,7 +882,7 @@ void power_pmu_cancel_txn(const struct pmu *pmu)
 * Perform the group schedulability test as a whole
 * Return 0 if success
 */
int power_pmu_commit_txn(const struct pmu *pmu)
int power_pmu_commit_txn(struct pmu *pmu)
{
	struct cpu_hw_events *cpuhw;
	long i, n;
@@ -1014,7 +1014,7 @@ static int hw_perf_cache_event(u64 config, u64 *eventp)
	return 0;
}

const struct pmu *hw_perf_event_init(struct perf_event *event)
struct pmu *hw_perf_event_init(struct perf_event *event)
{
	u64 ev;
	unsigned long flags;
+1 −1
Original line number Diff line number Diff line
@@ -428,7 +428,7 @@ static int hw_perf_cache_event(u64 config, u64 *eventp)
	return 0;
}

const struct pmu *hw_perf_event_init(struct perf_event *event)
struct pmu *hw_perf_event_init(struct perf_event *event)
{
	u64 ev;
	struct perf_event *events[MAX_HWEVENTS];
+2 −2
Original line number Diff line number Diff line
@@ -257,13 +257,13 @@ static void sh_pmu_read(struct perf_event *event)
	sh_perf_event_update(event, &event->hw, event->hw.idx);
}

static const struct pmu pmu = {
static struct pmu pmu = {
	.enable		= sh_pmu_enable,
	.disable	= sh_pmu_disable,
	.read		= sh_pmu_read,
};

const struct pmu *hw_perf_event_init(struct perf_event *event)
struct pmu *hw_perf_event_init(struct perf_event *event)
{
	int err = __hw_perf_event_init(event);
	if (unlikely(err)) {
Loading