Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4b39fd96 authored by Mike Galbraith's avatar Mike Galbraith Committed by Ingo Molnar
Browse files

perfcounters: ratelimit performance counter interrupts



Ratelimit performance counter interrupts to 100KHz per CPU.

This replaces the irq-delta-time based method.

Signed-off-by: default avatarMike Galbraith <efault@gmx.de>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 1b023a96
Loading
Loading
Loading
Loading
+15 −12
Original line number Diff line number Diff line
@@ -33,9 +33,8 @@ static int nr_counters_fixed __read_mostly;
struct cpu_hw_counters {
	struct perf_counter	*counters[X86_PMC_IDX_MAX];
	unsigned long		used[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
	u64			last_interrupt;
	unsigned long		interrupts;
	u64			global_enable;
	int			throttled;
};

/*
@@ -470,6 +469,11 @@ perf_handle_group(struct perf_counter *sibling, u64 *status, u64 *overflown)
	}
}

/*
 * Maximum interrupt frequency of 100KHz per CPU
 */
#define PERFMON_MAX_INTERRUPTS 100000/HZ

/*
 * This handler is triggered by the local APIC, so the APIC IRQ handling
 * rules apply:
@@ -477,7 +481,7 @@ perf_handle_group(struct perf_counter *sibling, u64 *status, u64 *overflown)
static void __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi)
{
	int bit, cpu = smp_processor_id();
	u64 ack, status, now;
	u64 ack, status;
	struct cpu_hw_counters *cpuc = &per_cpu(cpu_hw_counters, cpu);

	rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, cpuc->global_enable);
@@ -486,11 +490,6 @@ static void __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi)
	wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
	ack_APIC_irq();

	now = sched_clock();
	if (now - cpuc->last_interrupt < PERFMON_MIN_PERIOD_NS)
		cpuc->throttled = 1;
	cpuc->last_interrupt = now;

	rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
	if (!status)
		goto out;
@@ -541,13 +540,14 @@ static void __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi)
	/*
	 * Restore - do not reenable when global enable is off or throttled:
	 */
	if (!cpuc->throttled)
	if (++cpuc->interrupts < PERFMON_MAX_INTERRUPTS)
		wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, cpuc->global_enable);
}

void perf_counter_unthrottle(void)
{
	struct cpu_hw_counters *cpuc;
	u64 global_enable;

	if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
		return;
@@ -556,12 +556,15 @@ void perf_counter_unthrottle(void)
		return;

	cpuc = &per_cpu(cpu_hw_counters, smp_processor_id());
	if (cpuc->throttled) {
	if (cpuc->interrupts >= PERFMON_MAX_INTERRUPTS) {
		if (printk_ratelimit())
			printk(KERN_WARNING "PERFMON: max event frequency exceeded!\n");
			printk(KERN_WARNING "PERFMON: max interrupts exceeded!\n");
		wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, cpuc->global_enable);
		cpuc->throttled = 0;
	}
	rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, global_enable);
	if (unlikely(cpuc->global_enable && !global_enable))
		wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, cpuc->global_enable);
	cpuc->interrupts = 0;
}

void smp_perf_counter_interrupt(struct pt_regs *regs)
+0 −2
Original line number Diff line number Diff line
@@ -271,8 +271,6 @@ static inline int is_software_counter(struct perf_counter *counter)
	return !counter->hw_event.raw && counter->hw_event.type < 0;
}

#define PERFMON_MIN_PERIOD_NS 10000

#else
static inline void
perf_counter_task_sched_in(struct task_struct *task, int cpu)		{ }