Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8617f98c authored by Robert Richter's avatar Robert Richter
Browse files

oprofile/x86: return -EBUSY if counters are already reserved



In case a counter is already reserved by the watchdog or perf_event
subsystem, oprofile ignored this counters silently. This case is
handled now and oprofile_setup() now reports an error.

Signed-off-by: default avatarRobert Richter <robert.richter@amd.com>
parent 83300ce0
Loading
Loading
Loading
Loading
+4 −1
Original line number Diff line number Diff line
@@ -357,7 +357,10 @@ static int nmi_setup(void)
	 */

	/* Assume saved/restored counters are the same on all CPUs */
	model->fill_in_addresses(&per_cpu(cpu_msrs, 0));
	err = model->fill_in_addresses(&per_cpu(cpu_msrs, 0));
	if (err)
		goto fail;

	for_each_possible_cpu(cpu) {
		if (!cpu)
			continue;
+13 −11
Original line number Diff line number Diff line
@@ -138,21 +138,30 @@ static void op_amd_shutdown(struct op_msrs const * const msrs)
	}
}

static void op_amd_fill_in_addresses(struct op_msrs * const msrs)
static int op_amd_fill_in_addresses(struct op_msrs * const msrs)
{
	int i;

	for (i = 0; i < NUM_COUNTERS; i++) {
		if (!reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i))
			continue;
			goto fail;
		if (!reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i)) {
			release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
			continue;
			goto fail;
		}
		/* both registers must be reserved */
		msrs->counters[i].addr = MSR_K7_PERFCTR0 + i;
		msrs->controls[i].addr = MSR_K7_EVNTSEL0 + i;
		continue;
	fail:
		if (!counter_config[i].enabled)
			continue;
		op_x86_warn_reserved(i);
		op_amd_shutdown(msrs);
		return -EBUSY;
	}

	return 0;
}

static void op_amd_setup_ctrs(struct op_x86_model_spec const *model,
@@ -172,15 +181,8 @@ static void op_amd_setup_ctrs(struct op_x86_model_spec const *model,

	/* clear all counters */
	for (i = 0; i < NUM_COUNTERS; ++i) {
		if (unlikely(!msrs->controls[i].addr)) {
			if (counter_config[i].enabled && !smp_processor_id())
				/*
				 * counter is reserved, this is on all
				 * cpus, so report only for cpu #0
				 */
				op_x86_warn_reserved(i);
		if (!msrs->controls[i].addr)
			continue;
		}
		rdmsrl(msrs->controls[i].addr, val);
		if (val & ARCH_PERFMON_EVENTSEL_ENABLE)
			op_x86_warn_in_use(i);
+13 −1
Original line number Diff line number Diff line
@@ -404,7 +404,7 @@ static void p4_shutdown(struct op_msrs const * const msrs)
	}
}

static void p4_fill_in_addresses(struct op_msrs * const msrs)
static int p4_fill_in_addresses(struct op_msrs * const msrs)
{
	unsigned int i;
	unsigned int addr, cccraddr, stag;
@@ -486,6 +486,18 @@ static void p4_fill_in_addresses(struct op_msrs * const msrs)
			msrs->controls[i++].addr = MSR_P4_CRU_ESCR5;
		}
	}

	for (i = 0; i < num_counters; ++i) {
		if (!counter_config[i].enabled)
			continue;
		if (msrs->controls[i].addr)
			continue;
		op_x86_warn_reserved(i);
		p4_shutdown(msrs);
		return -EBUSY;
	}

	return 0;
}


+13 −11
Original line number Diff line number Diff line
@@ -46,21 +46,30 @@ static void ppro_shutdown(struct op_msrs const * const msrs)
	}
}

static void ppro_fill_in_addresses(struct op_msrs * const msrs)
static int ppro_fill_in_addresses(struct op_msrs * const msrs)
{
	int i;

	for (i = 0; i < num_counters; i++) {
		if (!reserve_perfctr_nmi(MSR_P6_PERFCTR0 + i))
			continue;
			goto fail;
		if (!reserve_evntsel_nmi(MSR_P6_EVNTSEL0 + i)) {
			release_perfctr_nmi(MSR_P6_PERFCTR0 + i);
			continue;
			goto fail;
		}
		/* both registers must be reserved */
		msrs->counters[i].addr = MSR_P6_PERFCTR0 + i;
		msrs->controls[i].addr = MSR_P6_EVNTSEL0 + i;
		continue;
	fail:
		if (!counter_config[i].enabled)
			continue;
		op_x86_warn_reserved(i);
		ppro_shutdown(msrs);
		return -EBUSY;
	}

	return 0;
}


@@ -96,15 +105,8 @@ static void ppro_setup_ctrs(struct op_x86_model_spec const *model,

	/* clear all counters */
	for (i = 0; i < num_counters; ++i) {
		if (unlikely(!msrs->controls[i].addr)) {
			if (counter_config[i].enabled && !smp_processor_id())
				/*
				 * counter is reserved, this is on all
				 * cpus, so report only for cpu #0
				 */
				op_x86_warn_reserved(i);
		if (!msrs->controls[i].addr)
			continue;
		}
		rdmsrl(msrs->controls[i].addr, val);
		if (val & ARCH_PERFMON_EVENTSEL_ENABLE)
			op_x86_warn_in_use(i);
+1 −1
Original line number Diff line number Diff line
@@ -41,7 +41,7 @@ struct op_x86_model_spec {
	u16		event_mask;
	int		(*init)(struct oprofile_operations *ops);
	void		(*exit)(void);
	void		(*fill_in_addresses)(struct op_msrs * const msrs);
	int		(*fill_in_addresses)(struct op_msrs * const msrs);
	void		(*setup_ctrs)(struct op_x86_model_spec const *model,
				      struct op_msrs const * const msrs);
	int		(*check_ctrs)(struct pt_regs * const regs,