Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2d1400d6 authored by Neil Leeder's avatar Neil Leeder Committed by Matt Wagantall
Browse files

perf: support hotplug



Add support for hotplugged cpu cores.

Change-Id: I0538ed67f1ad90bbd0510a7ba137cb6d1ad42172
Signed-off-by: default avatarNeil Leeder <nleeder@codeaurora.org>
parent a7a3cd4f
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -193,6 +193,8 @@ struct pmu {
	struct perf_cpu_context * __percpu pmu_cpu_context;
	int				task_ctx_nr;
	int				hrtimer_interval_ms;
	u32                             events_across_hotplug:1,
					reserved:31;

	/*
	 * Fully disable/enable this PMU, can be used to protect from the PMI
+2 −1
Original line number Diff line number Diff line
@@ -303,7 +303,8 @@ struct perf_event_attr {
				exclude_callchain_user   : 1, /* exclude user callchains */
				mmap2          :  1, /* include mmap with inode data     */
				comm_exec      :  1, /* flag comm events that are due to an exec */
				__reserved_1   : 39;
				constraint_duplicate : 1,
				__reserved_1   : 38;

	union {
		__u32		wakeup_events;	  /* wakeup every n events */
+104 −5
Original line number Diff line number Diff line
@@ -171,7 +171,11 @@ static struct srcu_struct pmus_srcu;
 *   1 - disallow cpu events for unpriv
 *   2 - disallow kernel profiling for unpriv
 */
#ifdef CONFIG_PERF_EVENTS_USERMODE
int sysctl_perf_event_paranoid __read_mostly = -1;
#else
int sysctl_perf_event_paranoid __read_mostly = 1;
#endif

/* Minimum for 512 kiB + 1 user control page */
int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
@@ -1536,6 +1540,29 @@ static int __perf_remove_from_context(void *info)
}


#ifdef CONFIG_SMP
static void perf_retry_remove(struct perf_event *event)
{
	int up_ret;
	/*
	 * CPU was offline. Bring it online so we can
	 * gracefully exit a perf context.
	 */
	up_ret = cpu_up(event->cpu);
	if (!up_ret)
		/* Try the remove call once again. */
		cpu_function_call(event->cpu, __perf_remove_from_context,
				  event);
	else
		pr_err("Failed to bring up CPU: %d, ret: %d\n",
		       event->cpu, up_ret);
}
#else
static void perf_retry_remove(struct perf_event *event)
{
}
#endif

 /*
 * Remove the event from a task's (or a CPU's) list of events.
 *
@@ -1549,7 +1576,8 @@ static int __perf_remove_from_context(void *info)
 * When called from perf_event_exit_task, it's OK because the
 * context has been detached from its task.
 */
static void perf_remove_from_context(struct perf_event *event, bool detach_group)
static void __ref perf_remove_from_context(struct perf_event *event,
					   bool detach_group)
{
	struct perf_event_context *ctx = event->ctx;
	struct task_struct *task = ctx->task;
@@ -1557,6 +1585,7 @@ static void perf_remove_from_context(struct perf_event *event, bool detach_group
		.event = event,
		.detach_group = detach_group,
	};
	int ret;

	lockdep_assert_held(&ctx->mutex);

@@ -1567,7 +1596,11 @@ static void perf_remove_from_context(struct perf_event *event, bool detach_group
		 * already called __perf_remove_from_context from
		 * perf_event_exit_cpu.
		 */
		cpu_function_call(event->cpu, __perf_remove_from_context, &re);
		ret = cpu_function_call(event->cpu, __perf_remove_from_context,
					&re);
		if (ret == -ENXIO)
			perf_retry_remove(event);

		return;
	}

@@ -3477,6 +3510,15 @@ EXPORT_SYMBOL_GPL(perf_event_release_kernel);

static int perf_release(struct inode *inode, struct file *file)
{
	struct perf_event *event = file->private_data;

	/*
	 * Event can be in state OFF because of a constraint check.
	 * Change to ACTIVE so that it gets cleaned up correctly.
	 */
	if ((event->state == PERF_EVENT_STATE_OFF) &&
	    event->attr.constraint_duplicate)
		event->state = PERF_EVENT_STATE_ACTIVE;
	put_event(file->private_data);
	return 0;
}
@@ -6082,6 +6124,7 @@ static struct pmu perf_swevent = {
	.start		= perf_swevent_start,
	.stop		= perf_swevent_stop,
	.read		= perf_swevent_read,
	.events_across_hotplug = 1,
};

#ifdef CONFIG_EVENT_TRACING
@@ -6199,6 +6242,7 @@ static struct pmu perf_tracepoint = {
	.start		= perf_swevent_start,
	.stop		= perf_swevent_stop,
	.read		= perf_swevent_read,
	.events_across_hotplug = 1,
};

static inline void perf_tp_register(void)
@@ -6424,6 +6468,7 @@ static struct pmu perf_cpu_clock = {
	.start		= cpu_clock_event_start,
	.stop		= cpu_clock_event_stop,
	.read		= cpu_clock_event_read,
	.events_across_hotplug = 1,
};

/*
@@ -6502,6 +6547,7 @@ static struct pmu perf_task_clock = {
	.start		= task_clock_event_start,
	.stop		= task_clock_event_stop,
	.read		= task_clock_event_read,
	.events_across_hotplug = 1,
};

static void perf_pmu_nop_void(struct pmu *pmu)
@@ -8130,6 +8176,18 @@ static void __perf_event_exit_context(void *__info)
	rcu_read_unlock();
}

static void __perf_event_stop_swclock(void *__info)
{
	struct perf_event_context *ctx = __info;
	struct perf_event *event, *tmp;

	list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry) {
		if (event->attr.config == PERF_COUNT_SW_CPU_CLOCK &&
		    event->attr.type == PERF_TYPE_SOFTWARE)
			cpu_clock_event_stop(event, 0);
	}
}

static void perf_event_exit_cpu_context(int cpu)
{
	struct perf_event_context *ctx;
@@ -8139,14 +8197,49 @@ static void perf_event_exit_cpu_context(int cpu)
	idx = srcu_read_lock(&pmus_srcu);
	list_for_each_entry_rcu(pmu, &pmus, entry) {
		ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;

		mutex_lock(&ctx->mutex);
		smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
		/*
		 * If keeping events across hotplugging is supported, do not
		 * remove the event list, but keep it alive across CPU hotplug.
		 * The context is exited via an fd close path when userspace
		 * is done and the target CPU is online. If software clock
		 * event is active, then stop hrtimer associated with it.
		 * Start the timer when the CPU comes back online.
		 */
		if (!pmu->events_across_hotplug)
			smp_call_function_single(cpu, __perf_event_exit_context,
						 ctx, 1);
		else
			smp_call_function_single(cpu, __perf_event_stop_swclock,
						 ctx, 1);
		mutex_unlock(&ctx->mutex);
	}
	srcu_read_unlock(&pmus_srcu, idx);
}

static void perf_event_start_swclock(int cpu)
{
	struct perf_event_context *ctx;
	struct pmu *pmu;
	int idx;
	struct perf_event *event, *tmp;

	idx = srcu_read_lock(&pmus_srcu);
	list_for_each_entry_rcu(pmu, &pmus, entry) {
		if (pmu->events_across_hotplug) {
			ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
			list_for_each_entry_safe(event, tmp, &ctx->event_list,
						 event_entry) {
				if (event->attr.config ==
				    PERF_COUNT_SW_CPU_CLOCK &&
				    event->attr.type == PERF_TYPE_SOFTWARE)
					cpu_clock_event_start(event, 0);
			}
		}
	}
	srcu_read_unlock(&pmus_srcu, idx);
}

static void perf_event_exit_cpu(int cpu)
{
	struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
@@ -8160,6 +8253,7 @@ static void perf_event_exit_cpu(int cpu)
}
#else
static inline void perf_event_exit_cpu(int cpu) { }
static inline void perf_event_start_swclock(int cpu) { }
#endif

static int
@@ -8198,6 +8292,11 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
	case CPU_DOWN_PREPARE:
		perf_event_exit_cpu(cpu);
		break;

	case CPU_STARTING:
		perf_event_start_swclock(cpu);
		break;

	default:
		break;
	}
+1 −0
Original line number Diff line number Diff line
@@ -614,6 +614,7 @@ static struct pmu perf_breakpoint = {
	.start		= hw_breakpoint_start,
	.stop		= hw_breakpoint_stop,
	.read		= hw_breakpoint_pmu_read,
	.events_across_hotplug = 1,
};

int __init init_hw_breakpoint(void)