Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 42a0789b authored by Ingo Molnar's avatar Ingo Molnar
Browse files

Merge branch 'perf/urgent' into perf/core, to pick up fixes



Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents ac675d0d 4e93ad60
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -391,7 +391,7 @@ struct cpu_hw_events {
/* Check flags and event code/umask, and set the HSW N/A flag */
#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \
	__EVENT_CONSTRAINT(code, n, 			\
			  INTEL_ARCH_EVENT_MASK|INTEL_ARCH_EVENT_MASK, \
			  INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_NA_HSW)


+1 −1
Original line number Diff line number Diff line
@@ -232,7 +232,7 @@ static struct event_constraint intel_hsw_event_constraints[] = {
	FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
	FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
	FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
	INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.* */
	INTEL_UEVENT_CONSTRAINT(0x148, 0x4),	/* L1D_PEND_MISS.PENDING */
	INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
	INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
	/* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
+39 −12
Original line number Diff line number Diff line
@@ -4225,7 +4225,14 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
		goto retry;
	}

	__perf_event_period(&pe);
	if (event->attr.freq) {
		event->attr.sample_freq = value;
	} else {
		event->attr.sample_period = value;
		event->hw.sample_period = value;
	}

	local64_set(&event->hw.period_left, 0);
	raw_spin_unlock_irq(&ctx->lock);

	return 0;
@@ -5675,6 +5682,17 @@ perf_event_aux_ctx(struct perf_event_context *ctx,
	}
}

static void
perf_event_aux_task_ctx(perf_event_aux_output_cb output, void *data,
			struct perf_event_context *task_ctx)
{
	rcu_read_lock();
	preempt_disable();
	perf_event_aux_ctx(task_ctx, output, data);
	preempt_enable();
	rcu_read_unlock();
}

static void
perf_event_aux(perf_event_aux_output_cb output, void *data,
	       struct perf_event_context *task_ctx)
@@ -5684,14 +5702,23 @@ perf_event_aux(perf_event_aux_output_cb output, void *data,
	struct pmu *pmu;
	int ctxn;

	/*
	 * If we have task_ctx != NULL we only notify
	 * the task context itself. The task_ctx is set
	 * only for EXIT events before releasing task
	 * context.
	 */
	if (task_ctx) {
		perf_event_aux_task_ctx(output, data, task_ctx);
		return;
	}

	rcu_read_lock();
	list_for_each_entry_rcu(pmu, &pmus, entry) {
		cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
		if (cpuctx->unique_pmu != pmu)
			goto next;
		perf_event_aux_ctx(&cpuctx->ctx, output, data);
		if (task_ctx)
			goto next;
		ctxn = pmu->task_ctx_nr;
		if (ctxn < 0)
			goto next;
@@ -5701,12 +5728,6 @@ perf_event_aux(perf_event_aux_output_cb output, void *data,
next:
		put_cpu_ptr(pmu->pmu_cpu_context);
	}

	if (task_ctx) {
		preempt_disable();
		perf_event_aux_ctx(task_ctx, output, data);
		preempt_enable();
	}
	rcu_read_unlock();
}

@@ -8796,10 +8817,8 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
	struct perf_event_context *child_ctx, *clone_ctx = NULL;
	unsigned long flags;

	if (likely(!child->perf_event_ctxp[ctxn])) {
		perf_event_task(child, NULL, 0);
	if (likely(!child->perf_event_ctxp[ctxn]))
		return;
	}

	local_irq_save(flags);
	/*
@@ -8883,6 +8902,14 @@ void perf_event_exit_task(struct task_struct *child)

	for_each_task_context_nr(ctxn)
		perf_event_exit_task_context(child, ctxn);

	/*
	 * The perf_event_exit_task_context calls perf_event_task
	 * with child's task_ctx, which generates EXIT events for
	 * child contexts and sets child->perf_event_ctxp[] to NULL.
	 * At this point we need to send EXIT events to cpu contexts.
	 */
	perf_event_task(child, NULL, 0);
}

static void perf_free_event(struct perf_event *event,