Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f5aea0b9 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "Perf: Fix hrtimer operation across hotplug"

parents 93511045 fef52d93
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -43,6 +43,7 @@ static char *descriptions =
	"18 ARM: dts: msm: add perf-events support for msm8916\n"
	"19 Perf: Enable pmuv3 on 32 bit kernel\n"
	"20 msm: perf: set filter bits for cycle counter on krait\n"
	"21 Perf: Fix hrtimer operation across hotplug\n"
;

static ssize_t desc_read(struct file *fp, char __user *buf,
+47 −6
Original line number Diff line number Diff line
@@ -7471,6 +7471,17 @@ static void __perf_event_exit_context(void *__info)
	rcu_read_unlock();
}

static void __perf_event_stop_swclock(void *__info)
{
	struct perf_event_context *ctx = __info;
	struct perf_event *event, *tmp;

	list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry) {
		if (event->attr.config == PERF_COUNT_SW_CPU_CLOCK)
			cpu_clock_event_stop(event, 0);
	}
}

static void perf_event_exit_cpu_context(int cpu)
{
	struct perf_event_context *ctx;
@@ -7479,20 +7490,45 @@ static void perf_event_exit_cpu_context(int cpu)

	idx = srcu_read_lock(&pmus_srcu);
	list_for_each_entry_rcu(pmu, &pmus, entry) {
		ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
		mutex_lock(&ctx->mutex);
		/*
		 * If keeping events across hotplugging is supported, do not
		 * remove the event list, but keep it alive across CPU hotplug.
		 * The context is exited via an fd close path when userspace
		 * is done and the target CPU is online.
		 * is done and the target CPU is online. If software clock
		 * event is active, then stop hrtimer associated with it.
		 * Start the timer when the CPU comes back online.
		 */
		if (!pmu->events_across_hotplug) {
			ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;

			mutex_lock(&ctx->mutex);
		if (!pmu->events_across_hotplug)
			smp_call_function_single(cpu, __perf_event_exit_context,
						 ctx, 1);
		else
			smp_call_function_single(cpu, __perf_event_stop_swclock,
						 ctx, 1);
		mutex_unlock(&ctx->mutex);
	}
	srcu_read_unlock(&pmus_srcu, idx);
}

static void perf_event_start_swclock(int cpu)
{
	struct perf_event_context *ctx;
	struct pmu *pmu;
	int idx;
	struct perf_event *event, *tmp;

	idx = srcu_read_lock(&pmus_srcu);
	list_for_each_entry_rcu(pmu, &pmus, entry) {
		if (pmu->events_across_hotplug) {
			ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
			list_for_each_entry_safe(event, tmp, &ctx->event_list,
							event_entry) {
				if (event->attr.config ==
						PERF_COUNT_SW_CPU_CLOCK)
					cpu_clock_event_start(event, 0);
			}
		}
	}
	srcu_read_unlock(&pmus_srcu, idx);
}
@@ -7509,6 +7545,7 @@ static void perf_event_exit_cpu(int cpu)
}
#else
static inline void perf_event_exit_cpu(int cpu) { }
static inline void perf_event_start_swclock(int cpu) { }
#endif

static int
@@ -7548,6 +7585,10 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
		perf_event_exit_cpu(cpu);
		break;

	case CPU_STARTING:
		perf_event_start_swclock(cpu);
		break;

	default:
		break;
	}