Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 03086359 authored by Ingo Molnar's avatar Ingo Molnar
Browse files

Merge branch 'perf/x86' into perf/core



Merge reason: The new P4 driver is stable and ready now for more
              testing.

Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parents 3997d377 0b861225
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -5,7 +5,7 @@
 * Performance event hw details:
 */

#define X86_PMC_MAX_GENERIC					8
#define X86_PMC_MAX_GENERIC				       32
#define X86_PMC_MAX_FIXED					3

#define X86_PMC_IDX_GENERIC				        0
+707 −0

File added.

Preview size limit exceeded, changes collapsed.

+32 −17
Original line number Diff line number Diff line
@@ -190,6 +190,8 @@ struct x86_pmu {
	void		(*enable_all)(void);
	void		(*enable)(struct perf_event *);
	void		(*disable)(struct perf_event *);
	int		(*hw_config)(struct perf_event_attr *attr, struct hw_perf_event *hwc);
	int		(*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
	unsigned	eventsel;
	unsigned	perfctr;
	u64		(*event_map)(int);
@@ -415,6 +417,25 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr)
	return 0;
}

static int x86_hw_config(struct perf_event_attr *attr, struct hw_perf_event *hwc)
{
	/*
	 * Generate PMC IRQs:
	 * (keep 'enabled' bit clear for now)
	 */
	hwc->config = ARCH_PERFMON_EVENTSEL_INT;

	/*
	 * Count user and OS events unless requested not to
	 */
	if (!attr->exclude_user)
		hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
	if (!attr->exclude_kernel)
		hwc->config |= ARCH_PERFMON_EVENTSEL_OS;

	return 0;
}

/*
 * Setup the hardware configuration for a given attr_type
 */
@@ -446,23 +467,13 @@ static int __hw_perf_event_init(struct perf_event *event)

	event->destroy = hw_perf_event_destroy;

	/*
	 * Generate PMC IRQs:
	 * (keep 'enabled' bit clear for now)
	 */
	hwc->config = ARCH_PERFMON_EVENTSEL_INT;

	hwc->idx = -1;
	hwc->last_cpu = -1;
	hwc->last_tag = ~0ULL;

	/*
	 * Count user and OS events unless requested not to.
	 */
	if (!attr->exclude_user)
		hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
	if (!attr->exclude_kernel)
		hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
	/* Processor specifics */
	if (x86_pmu.hw_config(attr, hwc))
		return -EOPNOTSUPP;

	if (!hwc->sample_period) {
		hwc->sample_period = x86_pmu.max_period;
@@ -517,7 +528,7 @@ static int __hw_perf_event_init(struct perf_event *event)
			return -EOPNOTSUPP;

		/* BTS is currently only allowed for user-mode. */
		if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
		if (!attr->exclude_kernel)
			return -EOPNOTSUPP;
	}

@@ -925,7 +936,7 @@ static int x86_pmu_enable(struct perf_event *event)
	if (n < 0)
		return n;

	ret = x86_schedule_events(cpuc, n, assign);
	ret = x86_pmu.schedule_events(cpuc, n, assign);
	if (ret)
		return ret;
	/*
@@ -1252,12 +1263,15 @@ int hw_perf_group_sched_in(struct perf_event *leader,
	int assign[X86_PMC_IDX_MAX];
	int n0, n1, ret;

	if (!x86_pmu_initialized())
		return 0;

	/* n0 = total number of events */
	n0 = collect_events(cpuc, leader, true);
	if (n0 < 0)
		return n0;

	ret = x86_schedule_events(cpuc, n0, assign);
	ret = x86_pmu.schedule_events(cpuc, n0, assign);
	if (ret)
		return ret;

@@ -1307,6 +1321,7 @@ int hw_perf_group_sched_in(struct perf_event *leader,

#include "perf_event_amd.c"
#include "perf_event_p6.c"
#include "perf_event_p4.c"
#include "perf_event_intel_lbr.c"
#include "perf_event_intel_ds.c"
#include "perf_event_intel.c"
@@ -1509,7 +1524,7 @@ static int validate_group(struct perf_event *event)

	fake_cpuc->n_events = n;

	ret = x86_schedule_events(fake_cpuc, n, NULL);
	ret = x86_pmu.schedule_events(fake_cpuc, n, NULL);

out_free:
	kfree(fake_cpuc);
+2 −0
Original line number Diff line number Diff line
@@ -363,6 +363,8 @@ static __initconst struct x86_pmu amd_pmu = {
	.enable_all		= x86_pmu_enable_all,
	.enable			= x86_pmu_enable_event,
	.disable		= x86_pmu_disable_event,
	.hw_config		= x86_hw_config,
	.schedule_events	= x86_schedule_events,
	.eventsel		= MSR_K7_EVNTSEL0,
	.perfctr		= MSR_K7_PERFCTR0,
	.event_map		= amd_pmu_event_map,
+10 −5
Original line number Diff line number Diff line
@@ -749,6 +749,8 @@ static __initconst struct x86_pmu core_pmu = {
	.enable_all		= x86_pmu_enable_all,
	.enable			= x86_pmu_enable_event,
	.disable		= x86_pmu_disable_event,
	.hw_config		= x86_hw_config,
	.schedule_events	= x86_schedule_events,
	.eventsel		= MSR_ARCH_PERFMON_EVENTSEL0,
	.perfctr		= MSR_ARCH_PERFMON_PERFCTR0,
	.event_map		= intel_pmu_event_map,
@@ -786,6 +788,8 @@ static __initconst struct x86_pmu intel_pmu = {
	.enable_all		= intel_pmu_enable_all,
	.enable			= intel_pmu_enable_event,
	.disable		= intel_pmu_disable_event,
	.hw_config		= x86_hw_config,
	.schedule_events	= x86_schedule_events,
	.eventsel		= MSR_ARCH_PERFMON_EVENTSEL0,
	.perfctr		= MSR_ARCH_PERFMON_PERFCTR0,
	.event_map		= intel_pmu_event_map,
@@ -839,12 +843,13 @@ static __init int intel_pmu_init(void)
	int version;

	if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
		/* check for P6 processor family */
	   if (boot_cpu_data.x86 == 6) {
		switch (boot_cpu_data.x86) {
		case 0x6:
			return p6_pmu_init();
	   } else {
		return -ENODEV;
		case 0xf:
			return p4_pmu_init();
		}
		return -ENODEV;
	}

	/*
Loading