Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1ae6e1e8 authored by qctecmdr Service's avatar qctecmdr Service Committed by Gerrit - the friendly Code Review server
Browse files

Merge "perf/core: Add support for PMUs that can be read from more than 1 CPU"

parents 68aa09e9 f994cb43
Loading
Loading
Loading
Loading
+2 −13
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (c) 2017-2018, 2019, The Linux Foundation. All rights reserved.
 * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
 */

#include <linux/of.h>
@@ -42,7 +42,6 @@ static ktime_t last_read;
static int qcom_llcc_event_init(struct perf_event *event)
{
	u64 config = event->attr.config;
	u64 type = event->attr.type;

	if (config == LLCC_RD_EV) {
		event->hw.config_base = event->attr.config;
@@ -80,8 +79,6 @@ static void qcom_llcc_event_read(struct perf_event *event)

static void qcom_llcc_event_start(struct perf_event *event, int flags)
{
	struct llcc_pmu *llccpmu = to_llcc_pmu(event->pmu);

	if (flags & PERF_EF_RELOAD)
		WARN_ON(!(event->hw.state & PERF_HES_UPTODATE));
	event->hw.state = 0;
@@ -89,17 +86,12 @@ static void qcom_llcc_event_start(struct perf_event *event, int flags)

static void qcom_llcc_event_stop(struct perf_event *event, int flags)
{
	struct llcc_pmu *llccpmu = to_llcc_pmu(event->pmu);

	qcom_llcc_event_read(event);
	event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
}

static int qcom_llcc_event_add(struct perf_event *event, int flags)
{
	int i;
	unsigned int cpu = event->cpu;
	unsigned long irq_flags;
	struct llcc_pmu *llccpmu = to_llcc_pmu(event->pmu);

	raw_spin_lock(&users_lock);
@@ -118,9 +110,6 @@ static int qcom_llcc_event_add(struct perf_event *event, int flags)

static void qcom_llcc_event_del(struct perf_event *event, int flags)
{
	int i;
	unsigned int cpu = event->cpu;
	unsigned long irq_flags;
	struct llcc_pmu *llccpmu = to_llcc_pmu(event->pmu);

	raw_spin_lock(&users_lock);
@@ -134,7 +123,7 @@ static int qcom_llcc_pmu_probe(struct platform_device *pdev)
{
	struct llcc_pmu *llccpmu;
	struct resource *res;
	int ret, i;
	int ret;

	llccpmu = devm_kzalloc(&pdev->dev, sizeof(struct llcc_pmu), GFP_KERNEL);
	if (!llccpmu)
+1 −0
Original line number Diff line number Diff line
@@ -646,6 +646,7 @@ struct perf_event {

	int				oncpu;
	int				cpu;
	cpumask_t			readable_on_cpus;

	struct list_head		owner_entry;
	struct task_struct		*owner;
+19 −10
Original line number Diff line number Diff line
@@ -3901,10 +3901,12 @@ struct perf_read_data {
static int __perf_event_read_cpu(struct perf_event *event, int event_cpu)
{
	u16 local_pkg, event_pkg;

	if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) {
	int local_cpu = smp_processor_id();

	if (cpumask_test_cpu(local_cpu, &event->readable_on_cpus))
		return local_cpu;

	if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) {
		event_pkg = topology_physical_package_id(event_cpu);
		local_pkg = topology_physical_package_id(local_cpu);

@@ -3996,7 +3998,8 @@ int perf_event_read_local(struct perf_event *event, u64 *value,
{
	unsigned long flags;
	int ret = 0;

	int local_cpu = smp_processor_id();
	bool readable = cpumask_test_cpu(local_cpu, &event->readable_on_cpus);
	/*
	 * Disabling interrupts avoids all counter scheduling (context
	 * switches, timer based rotation and IPIs).
@@ -4021,7 +4024,8 @@ int perf_event_read_local(struct perf_event *event, u64 *value,

	/* If this is a per-CPU event, it must be for this CPU */
	if (!(event->attach_state & PERF_ATTACH_TASK) &&
	    event->cpu != smp_processor_id()) {
	    event->cpu != local_cpu &&
	    !readable) {
		ret = -EINVAL;
		goto out;
	}
@@ -4037,7 +4041,7 @@ int perf_event_read_local(struct perf_event *event, u64 *value,
	 * or local to this CPU. Furthermore it means its ACTIVE (otherwise
	 * oncpu == -1).
	 */
	if (event->oncpu == smp_processor_id())
	if (event->oncpu == smp_processor_id() || readable)
		event->pmu->read(event);

	*value = local64_read(&event->count);
@@ -4062,11 +4066,13 @@ static int perf_event_read(struct perf_event *event, bool group)
	enum perf_event_state state = READ_ONCE(event->state);
	int event_cpu, ret = 0;
	bool active_event_skip_read = false;
	bool readable;

	/*
	 * If event is enabled and currently active on a CPU, update the
	 * value in the event structure:
	 */
	preempt_disable();
again:
	if (state == PERF_EVENT_STATE_ACTIVE) {

@@ -4077,13 +4083,16 @@ static int perf_event_read(struct perf_event *event, bool group)
		 * Matches the smp_wmb() from event_sched_in().
		 */
		smp_rmb();

		event_cpu = READ_ONCE(event->oncpu);
		if ((unsigned)event_cpu >= nr_cpu_ids)
		readable = cpumask_test_cpu(smp_processor_id(),
				    &event->readable_on_cpus);
		if ((unsigned int)event_cpu >= nr_cpu_ids) {
			preempt_enable();
			return 0;
		}
		if (cpu_isolated(event_cpu) ||
			(event->attr.exclude_idle &&
				per_cpu(is_idle, event_cpu)) ||
				per_cpu(is_idle, event_cpu) && !readable) ||
				per_cpu(is_hotplugging, event_cpu))
			active_event_skip_read = true;
	}
@@ -4095,7 +4104,6 @@ static int perf_event_read(struct perf_event *event, bool group)
			.ret = 0,
		};

		preempt_disable();
		event_cpu = __perf_event_read_cpu(event, event_cpu);

		/*
@@ -4110,7 +4118,6 @@ static int perf_event_read(struct perf_event *event, bool group)
		 */
		(void)smp_call_function_single(event_cpu,
				__perf_event_read, &data, 1);
		preempt_enable();
		ret = data.ret;
	} else if (state == PERF_EVENT_STATE_INACTIVE ||
			(active_event_skip_read &&
@@ -4139,6 +4146,8 @@ static int perf_event_read(struct perf_event *event, bool group)
		raw_spin_unlock_irqrestore(&ctx->lock, flags);
	}

	preempt_enable();

	return ret;
}