Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e7850595 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

perf: Convert perf_event to local_t



Since now all modification to event->count (and ->prev_count
and ->period_left) are local to a cpu, change then to local64_t so we
avoid the LOCK'ed ops.

Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent a6e6dea6
Loading
Loading
Loading
Loading
+9 −9
Original line number Diff line number Diff line
@@ -164,20 +164,20 @@ armpmu_event_set_period(struct perf_event *event,
			struct hw_perf_event *hwc,
			int idx)
{
	s64 left = atomic64_read(&hwc->period_left);
	s64 left = local64_read(&hwc->period_left);
	s64 period = hwc->sample_period;
	int ret = 0;

	if (unlikely(left <= -period)) {
		left = period;
		atomic64_set(&hwc->period_left, left);
		local64_set(&hwc->period_left, left);
		hwc->last_period = period;
		ret = 1;
	}

	if (unlikely(left <= 0)) {
		left += period;
		atomic64_set(&hwc->period_left, left);
		local64_set(&hwc->period_left, left);
		hwc->last_period = period;
		ret = 1;
	}
@@ -185,7 +185,7 @@ armpmu_event_set_period(struct perf_event *event,
	if (left > (s64)armpmu->max_period)
		left = armpmu->max_period;

	atomic64_set(&hwc->prev_count, (u64)-left);
	local64_set(&hwc->prev_count, (u64)-left);

	armpmu->write_counter(idx, (u64)(-left) & 0xffffffff);

@@ -204,18 +204,18 @@ armpmu_event_update(struct perf_event *event,
	s64 delta;

again:
	prev_raw_count = atomic64_read(&hwc->prev_count);
	prev_raw_count = local64_read(&hwc->prev_count);
	new_raw_count = armpmu->read_counter(idx);

	if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
	if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
			     new_raw_count) != prev_raw_count)
		goto again;

	delta = (new_raw_count << shift) - (prev_raw_count << shift);
	delta >>= shift;

	atomic64_add(delta, &event->count);
	atomic64_sub(delta, &hwc->period_left);
	local64_add(delta, &event->count);
	local64_sub(delta, &hwc->period_left);

	return new_raw_count;
}
@@ -478,7 +478,7 @@ __hw_perf_event_init(struct perf_event *event)
	if (!hwc->sample_period) {
		hwc->sample_period  = armpmu->max_period;
		hwc->last_period    = hwc->sample_period;
		atomic64_set(&hwc->period_left, hwc->sample_period);
		local64_set(&hwc->period_left, hwc->sample_period);
	}

	err = 0;
+17 −17
Original line number Diff line number Diff line
@@ -410,15 +410,15 @@ static void power_pmu_read(struct perf_event *event)
	 * Therefore we treat them like NMIs.
	 */
	do {
		prev = atomic64_read(&event->hw.prev_count);
		prev = local64_read(&event->hw.prev_count);
		barrier();
		val = read_pmc(event->hw.idx);
	} while (atomic64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
	} while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);

	/* The counters are only 32 bits wide */
	delta = (val - prev) & 0xfffffffful;
	atomic64_add(delta, &event->count);
	atomic64_sub(delta, &event->hw.period_left);
	local64_add(delta, &event->count);
	local64_sub(delta, &event->hw.period_left);
}

/*
@@ -444,10 +444,10 @@ static void freeze_limited_counters(struct cpu_hw_events *cpuhw,
		if (!event->hw.idx)
			continue;
		val = (event->hw.idx == 5) ? pmc5 : pmc6;
		prev = atomic64_read(&event->hw.prev_count);
		prev = local64_read(&event->hw.prev_count);
		event->hw.idx = 0;
		delta = (val - prev) & 0xfffffffful;
		atomic64_add(delta, &event->count);
		local64_add(delta, &event->count);
	}
}

@@ -462,7 +462,7 @@ static void thaw_limited_counters(struct cpu_hw_events *cpuhw,
		event = cpuhw->limited_counter[i];
		event->hw.idx = cpuhw->limited_hwidx[i];
		val = (event->hw.idx == 5) ? pmc5 : pmc6;
		atomic64_set(&event->hw.prev_count, val);
		local64_set(&event->hw.prev_count, val);
		perf_event_update_userpage(event);
	}
}
@@ -666,11 +666,11 @@ void hw_perf_enable(void)
		}
		val = 0;
		if (event->hw.sample_period) {
			left = atomic64_read(&event->hw.period_left);
			left = local64_read(&event->hw.period_left);
			if (left < 0x80000000L)
				val = 0x80000000L - left;
		}
		atomic64_set(&event->hw.prev_count, val);
		local64_set(&event->hw.prev_count, val);
		event->hw.idx = idx;
		write_pmc(idx, val);
		perf_event_update_userpage(event);
@@ -842,8 +842,8 @@ static void power_pmu_unthrottle(struct perf_event *event)
	if (left < 0x80000000L)
		val = 0x80000000L - left;
	write_pmc(event->hw.idx, val);
	atomic64_set(&event->hw.prev_count, val);
	atomic64_set(&event->hw.period_left, left);
	local64_set(&event->hw.prev_count, val);
	local64_set(&event->hw.period_left, left);
	perf_event_update_userpage(event);
	perf_enable();
	local_irq_restore(flags);
@@ -1109,7 +1109,7 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
	event->hw.config = events[n];
	event->hw.event_base = cflags[n];
	event->hw.last_period = event->hw.sample_period;
	atomic64_set(&event->hw.period_left, event->hw.last_period);
	local64_set(&event->hw.period_left, event->hw.last_period);

	/*
	 * See if we need to reserve the PMU.
@@ -1147,16 +1147,16 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
	int record = 0;

	/* we don't have to worry about interrupts here */
	prev = atomic64_read(&event->hw.prev_count);
	prev = local64_read(&event->hw.prev_count);
	delta = (val - prev) & 0xfffffffful;
	atomic64_add(delta, &event->count);
	local64_add(delta, &event->count);

	/*
	 * See if the total period for this event has expired,
	 * and update for the next period.
	 */
	val = 0;
	left = atomic64_read(&event->hw.period_left) - delta;
	left = local64_read(&event->hw.period_left) - delta;
	if (period) {
		if (left <= 0) {
			left += period;
@@ -1194,8 +1194,8 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
	}

	write_pmc(event->hw.idx, val);
	atomic64_set(&event->hw.prev_count, val);
	atomic64_set(&event->hw.period_left, left);
	local64_set(&event->hw.prev_count, val);
	local64_set(&event->hw.period_left, left);
	perf_event_update_userpage(event);
}

+3 −3
Original line number Diff line number Diff line
@@ -185,10 +185,10 @@ static void sh_perf_event_update(struct perf_event *event,
	 * this is the simplest approach for maintaining consistency.
	 */
again:
	prev_raw_count = atomic64_read(&hwc->prev_count);
	prev_raw_count = local64_read(&hwc->prev_count);
	new_raw_count = sh_pmu->read(idx);

	if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
	if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
			     new_raw_count) != prev_raw_count)
		goto again;

@@ -203,7 +203,7 @@ static void sh_perf_event_update(struct perf_event *event,
	delta = (new_raw_count << shift) - (prev_raw_count << shift);
	delta >>= shift;

	atomic64_add(delta, &event->count);
	local64_add(delta, &event->count);
}

static void sh_pmu_disable(struct perf_event *event)
+9 −9
Original line number Diff line number Diff line
@@ -572,18 +572,18 @@ static u64 sparc_perf_event_update(struct perf_event *event,
	s64 delta;

again:
	prev_raw_count = atomic64_read(&hwc->prev_count);
	prev_raw_count = local64_read(&hwc->prev_count);
	new_raw_count = read_pmc(idx);

	if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
	if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
			     new_raw_count) != prev_raw_count)
		goto again;

	delta = (new_raw_count << shift) - (prev_raw_count << shift);
	delta >>= shift;

	atomic64_add(delta, &event->count);
	atomic64_sub(delta, &hwc->period_left);
	local64_add(delta, &event->count);
	local64_sub(delta, &hwc->period_left);

	return new_raw_count;
}
@@ -591,27 +591,27 @@ static u64 sparc_perf_event_update(struct perf_event *event,
static int sparc_perf_event_set_period(struct perf_event *event,
				       struct hw_perf_event *hwc, int idx)
{
	s64 left = atomic64_read(&hwc->period_left);
	s64 left = local64_read(&hwc->period_left);
	s64 period = hwc->sample_period;
	int ret = 0;

	if (unlikely(left <= -period)) {
		left = period;
		atomic64_set(&hwc->period_left, left);
		local64_set(&hwc->period_left, left);
		hwc->last_period = period;
		ret = 1;
	}

	if (unlikely(left <= 0)) {
		left += period;
		atomic64_set(&hwc->period_left, left);
		local64_set(&hwc->period_left, left);
		hwc->last_period = period;
		ret = 1;
	}
	if (left > MAX_PERIOD)
		left = MAX_PERIOD;

	atomic64_set(&hwc->prev_count, (u64)-left);
	local64_set(&hwc->prev_count, (u64)-left);

	write_pmc(idx, (u64)(-left) & 0xffffffff);

@@ -1087,7 +1087,7 @@ static int __hw_perf_event_init(struct perf_event *event)
	if (!hwc->sample_period) {
		hwc->sample_period = MAX_PERIOD;
		hwc->last_period = hwc->sample_period;
		atomic64_set(&hwc->period_left, hwc->sample_period);
		local64_set(&hwc->period_left, hwc->sample_period);
	}

	return 0;
+9 −9
Original line number Diff line number Diff line
@@ -296,10 +296,10 @@ x86_perf_event_update(struct perf_event *event)
	 * count to the generic event atomically:
	 */
again:
	prev_raw_count = atomic64_read(&hwc->prev_count);
	prev_raw_count = local64_read(&hwc->prev_count);
	rdmsrl(hwc->event_base + idx, new_raw_count);

	if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
	if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
					new_raw_count) != prev_raw_count)
		goto again;

@@ -314,8 +314,8 @@ x86_perf_event_update(struct perf_event *event)
	delta = (new_raw_count << shift) - (prev_raw_count << shift);
	delta >>= shift;

	atomic64_add(delta, &event->count);
	atomic64_sub(delta, &hwc->period_left);
	local64_add(delta, &event->count);
	local64_sub(delta, &hwc->period_left);

	return new_raw_count;
}
@@ -439,7 +439,7 @@ static int x86_setup_perfctr(struct perf_event *event)
	if (!hwc->sample_period) {
		hwc->sample_period = x86_pmu.max_period;
		hwc->last_period = hwc->sample_period;
		atomic64_set(&hwc->period_left, hwc->sample_period);
		local64_set(&hwc->period_left, hwc->sample_period);
	} else {
		/*
		 * If we have a PMU initialized but no APIC
@@ -886,7 +886,7 @@ static int
x86_perf_event_set_period(struct perf_event *event)
{
	struct hw_perf_event *hwc = &event->hw;
	s64 left = atomic64_read(&hwc->period_left);
	s64 left = local64_read(&hwc->period_left);
	s64 period = hwc->sample_period;
	int ret = 0, idx = hwc->idx;

@@ -898,14 +898,14 @@ x86_perf_event_set_period(struct perf_event *event)
	 */
	if (unlikely(left <= -period)) {
		left = period;
		atomic64_set(&hwc->period_left, left);
		local64_set(&hwc->period_left, left);
		hwc->last_period = period;
		ret = 1;
	}

	if (unlikely(left <= 0)) {
		left += period;
		atomic64_set(&hwc->period_left, left);
		local64_set(&hwc->period_left, left);
		hwc->last_period = period;
		ret = 1;
	}
@@ -924,7 +924,7 @@ x86_perf_event_set_period(struct perf_event *event)
	 * The hw event starts counting from this event offset,
	 * mark it to be able to extra future deltas:
	 */
	atomic64_set(&hwc->prev_count, (u64)-left);
	local64_set(&hwc->prev_count, (u64)-left);

	wrmsrl(hwc->event_base + idx, (u64)(-left) & x86_pmu.cntval_mask);

Loading