diff --git a/drivers/perf/qcom_llcc_pmu.c b/drivers/perf/qcom_llcc_pmu.c index d4003cc950b0486b6de2b3dc44d1ba991f3bcf52..59f95e2fdd14cea15f5fdcb2c8e57db90f831f41 100644 --- a/drivers/perf/qcom_llcc_pmu.c +++ b/drivers/perf/qcom_llcc_pmu.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2017-2018, 2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. */ #include @@ -42,7 +42,6 @@ static ktime_t last_read; static int qcom_llcc_event_init(struct perf_event *event) { u64 config = event->attr.config; - u64 type = event->attr.type; if (config == LLCC_RD_EV) { event->hw.config_base = event->attr.config; @@ -80,8 +79,6 @@ static void qcom_llcc_event_read(struct perf_event *event) static void qcom_llcc_event_start(struct perf_event *event, int flags) { - struct llcc_pmu *llccpmu = to_llcc_pmu(event->pmu); - if (flags & PERF_EF_RELOAD) WARN_ON(!(event->hw.state & PERF_HES_UPTODATE)); event->hw.state = 0; @@ -89,17 +86,12 @@ static void qcom_llcc_event_start(struct perf_event *event, int flags) static void qcom_llcc_event_stop(struct perf_event *event, int flags) { - struct llcc_pmu *llccpmu = to_llcc_pmu(event->pmu); - qcom_llcc_event_read(event); event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; } static int qcom_llcc_event_add(struct perf_event *event, int flags) { - int i; - unsigned int cpu = event->cpu; - unsigned long irq_flags; struct llcc_pmu *llccpmu = to_llcc_pmu(event->pmu); raw_spin_lock(&users_lock); @@ -118,9 +110,6 @@ static int qcom_llcc_event_add(struct perf_event *event, int flags) static void qcom_llcc_event_del(struct perf_event *event, int flags) { - int i; - unsigned int cpu = event->cpu; - unsigned long irq_flags; struct llcc_pmu *llccpmu = to_llcc_pmu(event->pmu); raw_spin_lock(&users_lock); @@ -134,7 +123,7 @@ static int qcom_llcc_pmu_probe(struct platform_device *pdev) { struct llcc_pmu *llccpmu; struct resource *res; - int ret, i; + int ret; llccpmu = devm_kzalloc(&pdev->dev, sizeof(struct llcc_pmu), GFP_KERNEL); if (!llccpmu) diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index b1eeb8a751b1dfb6f7ccc467305d5668ba494f16..4f3baecccb0b5bdbdc3397abea79852ec2637725 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -646,6 +646,7 @@ struct perf_event { int oncpu; int cpu; + cpumask_t readable_on_cpus; struct list_head owner_entry; struct task_struct *owner; diff --git a/kernel/events/core.c b/kernel/events/core.c index 1ed727c799b4ec7b694b1a5848b6880fa93735a1..6693615e769862c825b3df9ed507a5900ba2c32e 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -3901,10 +3901,12 @@ struct perf_read_data { static int __perf_event_read_cpu(struct perf_event *event, int event_cpu) { u16 local_pkg, event_pkg; + int local_cpu = smp_processor_id(); - if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) { - int local_cpu = smp_processor_id(); + if (cpumask_test_cpu(local_cpu, &event->readable_on_cpus)) + return local_cpu; + if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) { event_pkg = topology_physical_package_id(event_cpu); local_pkg = topology_physical_package_id(local_cpu); @@ -3996,7 +3998,8 @@ int perf_event_read_local(struct perf_event *event, u64 *value, { unsigned long flags; int ret = 0; - + int local_cpu = smp_processor_id(); + bool readable = cpumask_test_cpu(local_cpu, &event->readable_on_cpus); /* * Disabling interrupts avoids all counter scheduling (context * switches, timer based rotation and IPIs). @@ -4021,7 +4024,8 @@ int perf_event_read_local(struct perf_event *event, u64 *value, /* If this is a per-CPU event, it must be for this CPU */ if (!(event->attach_state & PERF_ATTACH_TASK) && - event->cpu != smp_processor_id()) { + event->cpu != local_cpu && + !readable) { ret = -EINVAL; goto out; } @@ -4037,7 +4041,7 @@ int perf_event_read_local(struct perf_event *event, u64 *value, * or local to this CPU. Furthermore it means its ACTIVE (otherwise * oncpu == -1). */ - if (event->oncpu == smp_processor_id()) + if (event->oncpu == smp_processor_id() || readable) event->pmu->read(event); *value = local64_read(&event->count); @@ -4062,11 +4066,13 @@ static int perf_event_read(struct perf_event *event, bool group) enum perf_event_state state = READ_ONCE(event->state); int event_cpu, ret = 0; bool active_event_skip_read = false; + bool readable; /* * If event is enabled and currently active on a CPU, update the * value in the event structure: */ + preempt_disable(); again: if (state == PERF_EVENT_STATE_ACTIVE) { @@ -4077,13 +4083,16 @@ static int perf_event_read(struct perf_event *event, bool group) * Matches the smp_wmb() from event_sched_in(). */ smp_rmb(); - event_cpu = READ_ONCE(event->oncpu); - if ((unsigned)event_cpu >= nr_cpu_ids) + readable = cpumask_test_cpu(smp_processor_id(), + &event->readable_on_cpus); + if ((unsigned int)event_cpu >= nr_cpu_ids) { + preempt_enable(); return 0; + } if (cpu_isolated(event_cpu) || (event->attr.exclude_idle && - per_cpu(is_idle, event_cpu)) || + per_cpu(is_idle, event_cpu) && !readable) || per_cpu(is_hotplugging, event_cpu)) active_event_skip_read = true; } @@ -4095,7 +4104,6 @@ static int perf_event_read(struct perf_event *event, bool group) .ret = 0, }; - preempt_disable(); event_cpu = __perf_event_read_cpu(event, event_cpu); /* @@ -4110,7 +4118,6 @@ static int perf_event_read(struct perf_event *event, bool group) */ (void)smp_call_function_single(event_cpu, __perf_event_read, &data, 1); - preempt_enable(); ret = data.ret; } else if (state == PERF_EVENT_STATE_INACTIVE || (active_event_skip_read && @@ -4139,6 +4146,8 @@ static int perf_event_read(struct perf_event *event, bool group) raw_spin_unlock_irqrestore(&ctx->lock, flags); } + preempt_enable(); + return ret; }