Loading kernel/events/core.c +9 −4 Original line number Diff line number Diff line Loading @@ -3437,22 +3437,27 @@ u64 perf_event_read_local(struct perf_event *event) static int perf_event_read(struct perf_event *event, bool group) { int ret = 0; int event_cpu, ret = 0; /* * If event is enabled and currently active on a CPU, update the * value in the event structure: */ event_cpu = READ_ONCE(event->oncpu); if (event->state == PERF_EVENT_STATE_ACTIVE && !cpu_isolated(event->oncpu)) { !cpu_isolated(event_cpu)) { struct perf_read_data data = { .event = event, .group = group, .ret = 0, }; if ((unsigned int)event_cpu >= nr_cpu_ids) return 0; if (!event->attr.exclude_idle || !per_cpu(is_idle, event->oncpu)) { smp_call_function_single(event->oncpu, !per_cpu(is_idle, event_cpu)) { smp_call_function_single(event_cpu, __perf_event_read, &data, 1); ret = data.ret; } Loading Loading
kernel/events/core.c +9 −4 Original line number Diff line number Diff line Loading @@ -3437,22 +3437,27 @@ u64 perf_event_read_local(struct perf_event *event) static int perf_event_read(struct perf_event *event, bool group) { int ret = 0; int event_cpu, ret = 0; /* * If event is enabled and currently active on a CPU, update the * value in the event structure: */ event_cpu = READ_ONCE(event->oncpu); if (event->state == PERF_EVENT_STATE_ACTIVE && !cpu_isolated(event->oncpu)) { !cpu_isolated(event_cpu)) { struct perf_read_data data = { .event = event, .group = group, .ret = 0, }; if ((unsigned int)event_cpu >= nr_cpu_ids) return 0; if (!event->attr.exclude_idle || !per_cpu(is_idle, event->oncpu)) { smp_call_function_single(event->oncpu, !per_cpu(is_idle, event_cpu)) { smp_call_function_single(event_cpu, __perf_event_read, &data, 1); ret = data.ret; } Loading