Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3d325bf0 authored by Ingo Molnar's avatar Ingo Molnar
Browse files

Merge branch 'perf/urgent' into perf/core, to pick up fixes before applying new changes



Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents f1d800bf d7a702f0
Loading
Loading
Loading
Loading
+16 −7
Original line number Diff line number Diff line
@@ -2758,7 +2758,7 @@ static int intel_pmu_cpu_prepare(int cpu)
	if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
		cpuc->shared_regs = allocate_shared_regs(cpu);
		if (!cpuc->shared_regs)
			return NOTIFY_BAD;
			goto err;
	}

	if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
@@ -2766,18 +2766,27 @@ static int intel_pmu_cpu_prepare(int cpu)

		cpuc->constraint_list = kzalloc(sz, GFP_KERNEL);
		if (!cpuc->constraint_list)
			return NOTIFY_BAD;
			goto err_shared_regs;

		cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
		if (!cpuc->excl_cntrs) {
			kfree(cpuc->constraint_list);
			kfree(cpuc->shared_regs);
			return NOTIFY_BAD;
		}
		if (!cpuc->excl_cntrs)
			goto err_constraint_list;

		cpuc->excl_thread_id = 0;
	}

	return NOTIFY_OK;

err_constraint_list:
	kfree(cpuc->constraint_list);
	cpuc->constraint_list = NULL;

err_shared_regs:
	kfree(cpuc->shared_regs);
	cpuc->shared_regs = NULL;

err:
	return NOTIFY_BAD;
}

static void intel_pmu_cpu_starting(int cpu)
+3 −5
Original line number Diff line number Diff line
@@ -1255,7 +1255,7 @@ static inline void cqm_pick_event_reader(int cpu)
	cpumask_set_cpu(cpu, &cqm_cpumask);
}

static void intel_cqm_cpu_prepare(unsigned int cpu)
static void intel_cqm_cpu_starting(unsigned int cpu)
{
	struct intel_pqr_state *state = &per_cpu(pqr_state, cpu);
	struct cpuinfo_x86 *c = &cpu_data(cpu);
@@ -1296,13 +1296,11 @@ static int intel_cqm_cpu_notifier(struct notifier_block *nb,
	unsigned int cpu  = (unsigned long)hcpu;

	switch (action & ~CPU_TASKS_FROZEN) {
	case CPU_UP_PREPARE:
		intel_cqm_cpu_prepare(cpu);
		break;
	case CPU_DOWN_PREPARE:
		intel_cqm_cpu_exit(cpu);
		break;
	case CPU_STARTING:
		intel_cqm_cpu_starting(cpu);
		cqm_pick_event_reader(cpu);
		break;
	}
@@ -1373,7 +1371,7 @@ static int __init intel_cqm_init(void)
		goto out;

	for_each_online_cpu(i) {
		intel_cqm_cpu_prepare(i);
		intel_cqm_cpu_starting(i);
		cqm_pick_event_reader(i);
	}

+65 −22
Original line number Diff line number Diff line
@@ -3972,28 +3972,21 @@ static void perf_event_for_each(struct perf_event *event,
		perf_event_for_each_child(sibling, func);
}

static int perf_event_period(struct perf_event *event, u64 __user *arg)
{
	struct perf_event_context *ctx = event->ctx;
	int ret = 0, active;
struct period_event {
	struct perf_event *event;
	u64 value;
};

	if (!is_sampling_event(event))
		return -EINVAL;

	if (copy_from_user(&value, arg, sizeof(value)))
		return -EFAULT;

	if (!value)
		return -EINVAL;
static int __perf_event_period(void *info)
{
	struct period_event *pe = info;
	struct perf_event *event = pe->event;
	struct perf_event_context *ctx = event->ctx;
	u64 value = pe->value;
	bool active;

	raw_spin_lock_irq(&ctx->lock);
	raw_spin_lock(&ctx->lock);
	if (event->attr.freq) {
		if (value > sysctl_perf_event_sample_rate) {
			ret = -EINVAL;
			goto unlock;
		}

		event->attr.sample_freq = value;
	} else {
		event->attr.sample_period = value;
@@ -4012,11 +4005,53 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
		event->pmu->start(event, PERF_EF_RELOAD);
		perf_pmu_enable(ctx->pmu);
	}
	raw_spin_unlock(&ctx->lock);

unlock:
	return 0;
}

static int perf_event_period(struct perf_event *event, u64 __user *arg)
{
	struct period_event pe = { .event = event, };
	struct perf_event_context *ctx = event->ctx;
	struct task_struct *task;
	u64 value;

	if (!is_sampling_event(event))
		return -EINVAL;

	if (copy_from_user(&value, arg, sizeof(value)))
		return -EFAULT;

	if (!value)
		return -EINVAL;

	if (event->attr.freq && value > sysctl_perf_event_sample_rate)
		return -EINVAL;

	task = ctx->task;
	pe.value = value;

	if (!task) {
		cpu_function_call(event->cpu, __perf_event_period, &pe);
		return 0;
	}

retry:
	if (!task_function_call(task, __perf_event_period, &pe))
		return 0;

	raw_spin_lock_irq(&ctx->lock);
	if (ctx->is_active) {
		raw_spin_unlock_irq(&ctx->lock);
		task = ctx->task;
		goto retry;
	}

	return ret;
	__perf_event_period(&pe);
	raw_spin_unlock_irq(&ctx->lock);

	return 0;
}

static const struct file_operations perf_fops;
@@ -4754,12 +4789,20 @@ static const struct file_operations perf_fops = {
 * to user-space before waking everybody up.
 */

static inline struct fasync_struct **perf_event_fasync(struct perf_event *event)
{
	/* only the parent has fasync state */
	if (event->parent)
		event = event->parent;
	return &event->fasync;
}

void perf_event_wakeup(struct perf_event *event)
{
	ring_buffer_wakeup(event);

	if (event->pending_kill) {
		kill_fasync(&event->fasync, SIGIO, event->pending_kill);
		kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill);
		event->pending_kill = 0;
	}
}
@@ -6221,7 +6264,7 @@ static int __perf_event_overflow(struct perf_event *event,
	else
		perf_event_output(event, data, regs);

	if (event->fasync && event->pending_kill) {
	if (*perf_event_fasync(event) && event->pending_kill) {
		event->pending_wakeup = 1;
		irq_work_queue(&event->pending);
	}
+6 −4
Original line number Diff line number Diff line
@@ -559,12 +559,14 @@ static void __rb_free_aux(struct ring_buffer *rb)
		rb->aux_priv = NULL;
	}

	if (rb->aux_nr_pages) {
		for (pg = 0; pg < rb->aux_nr_pages; pg++)
			rb_free_aux_page(rb, pg);

		kfree(rb->aux_pages);
		rb->aux_nr_pages = 0;
	}
}

void rb_free_aux(struct ring_buffer *rb)
{