Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit dd6f29da authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull perf fixes from Ingo Molnar:
 "Two fixes on the kernel side: fix an over-eager condition that failed
  larger perf ring-buffer sizes, plus fix crashes in the Intel BTS code
  for a corner case, found by fuzzing"

* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  perf/core: Fix impossible ring-buffer sizes warning
  perf/x86: Add check_period PMU callback
parents c5f1ac5e 528871b4
Loading
Loading
Loading
Loading
+14 −0
Original line number Diff line number Diff line
@@ -2278,6 +2278,19 @@ void perf_check_microcode(void)
		x86_pmu.check_microcode();
}

static int x86_pmu_check_period(struct perf_event *event, u64 value)
{
	if (x86_pmu.check_period && x86_pmu.check_period(event, value))
		return -EINVAL;

	if (value && x86_pmu.limit_period) {
		if (x86_pmu.limit_period(event, value) > value)
			return -EINVAL;
	}

	return 0;
}

static struct pmu pmu = {
	.pmu_enable		= x86_pmu_enable,
	.pmu_disable		= x86_pmu_disable,
@@ -2302,6 +2315,7 @@ static struct pmu pmu = {
	.event_idx		= x86_pmu_event_idx,
	.sched_task		= x86_pmu_sched_task,
	.task_ctx_size          = sizeof(struct x86_perf_task_context),
	.check_period		= x86_pmu_check_period,
};

void arch_perf_update_userpage(struct perf_event *event,
+9 −0
Original line number Diff line number Diff line
@@ -3587,6 +3587,11 @@ static void intel_pmu_sched_task(struct perf_event_context *ctx,
	intel_pmu_lbr_sched_task(ctx, sched_in);
}

static int intel_pmu_check_period(struct perf_event *event, u64 value)
{
	return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0;
}

PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");

PMU_FORMAT_ATTR(ldlat, "config1:0-15");
@@ -3667,6 +3672,8 @@ static __initconst const struct x86_pmu core_pmu = {
	.cpu_starting		= intel_pmu_cpu_starting,
	.cpu_dying		= intel_pmu_cpu_dying,
	.cpu_dead		= intel_pmu_cpu_dead,

	.check_period		= intel_pmu_check_period,
};

static struct attribute *intel_pmu_attrs[];
@@ -3711,6 +3718,8 @@ static __initconst const struct x86_pmu intel_pmu = {

	.guest_get_msrs		= intel_guest_get_msrs,
	.sched_task		= intel_pmu_sched_task,

	.check_period		= intel_pmu_check_period,
};

static __init void intel_clovertown_quirk(void)
+14 −2
Original line number Diff line number Diff line
@@ -646,6 +646,11 @@ struct x86_pmu {
	 * Intel host/guest support (KVM)
	 */
	struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr);

	/*
	 * Check period value for PERF_EVENT_IOC_PERIOD ioctl.
	 */
	int (*check_period) (struct perf_event *event, u64 period);
};

struct x86_perf_task_context {
@@ -857,7 +862,7 @@ static inline int amd_pmu_init(void)

#ifdef CONFIG_CPU_SUP_INTEL

static inline bool intel_pmu_has_bts(struct perf_event *event)
static inline bool intel_pmu_has_bts_period(struct perf_event *event, u64 period)
{
	struct hw_perf_event *hwc = &event->hw;
	unsigned int hw_event, bts_event;
@@ -868,7 +873,14 @@ static inline bool intel_pmu_has_bts(struct perf_event *event)
	hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
	bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);

	return hw_event == bts_event && hwc->sample_period == 1;
	return hw_event == bts_event && period == 1;
}

static inline bool intel_pmu_has_bts(struct perf_event *event)
{
	struct hw_perf_event *hwc = &event->hw;

	return intel_pmu_has_bts_period(event, hwc->sample_period);
}

int intel_pmu_save_and_restart(struct perf_event *event);
+5 −0
Original line number Diff line number Diff line
@@ -447,6 +447,11 @@ struct pmu {
	 * Filter events for PMU-specific reasons.
	 */
	int (*filter_match)		(struct perf_event *event); /* optional */

	/*
	 * Check period value for PERF_EVENT_IOC_PERIOD ioctl.
	 */
	int (*check_period)		(struct perf_event *event, u64 value); /* optional */
};

enum perf_addr_filter_action_t {
+16 −0
Original line number Diff line number Diff line
@@ -4963,6 +4963,11 @@ static void __perf_event_period(struct perf_event *event,
	}
}

static int perf_event_check_period(struct perf_event *event, u64 value)
{
	return event->pmu->check_period(event, value);
}

static int perf_event_period(struct perf_event *event, u64 __user *arg)
{
	u64 value;
@@ -4979,6 +4984,9 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
	if (event->attr.freq && value > sysctl_perf_event_sample_rate)
		return -EINVAL;

	if (perf_event_check_period(event, value))
		return -EINVAL;

	event_function_call(event, __perf_event_period, &value);

	return 0;
@@ -9391,6 +9399,11 @@ static int perf_pmu_nop_int(struct pmu *pmu)
	return 0;
}

static int perf_event_nop_int(struct perf_event *event, u64 value)
{
	return 0;
}

static DEFINE_PER_CPU(unsigned int, nop_txn_flags);

static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags)
@@ -9691,6 +9704,9 @@ int perf_pmu_register(struct pmu *pmu, const char *name, int type)
		pmu->pmu_disable = perf_pmu_nop_void;
	}

	if (!pmu->check_period)
		pmu->check_period = perf_event_nop_int;

	if (!pmu->event_idx)
		pmu->event_idx = perf_event_idx_default;

Loading