Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 315c0a1f authored by Jiri Olsa's avatar Jiri Olsa Committed by Arnaldo Carvalho de Melo
Browse files

libperf: Move perf's cpu_map__empty() to perf_cpu_map__empty()



So it's part of the libperf library as one of basic functions operating
on the perf_cpu_map class.

Signed-off-by: default avatarJiri Olsa <jolsa@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Michael Petlan <mpetlan@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/20190822111141.25823-4-jolsa@kernel.org


Signed-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parent 6549cd8f
Loading
Loading
Loading
Loading
+4 −4
Original line number Original line Diff line number Diff line
@@ -396,7 +396,7 @@ static int cs_etm_recording_options(struct auxtrace_record *itr,
	 * AUX event.  We also need the contextID in order to be notified
	 * AUX event.  We also need the contextID in order to be notified
	 * when a context switch happened.
	 * when a context switch happened.
	 */
	 */
	if (!cpu_map__empty(cpus)) {
	if (!perf_cpu_map__empty(cpus)) {
		perf_evsel__set_sample_bit(cs_etm_evsel, CPU);
		perf_evsel__set_sample_bit(cs_etm_evsel, CPU);


		err = cs_etm_set_option(itr, cs_etm_evsel,
		err = cs_etm_set_option(itr, cs_etm_evsel,
@@ -420,7 +420,7 @@ static int cs_etm_recording_options(struct auxtrace_record *itr,
		tracking_evsel->core.attr.sample_period = 1;
		tracking_evsel->core.attr.sample_period = 1;


		/* In per-cpu case, always need the time of mmap events etc */
		/* In per-cpu case, always need the time of mmap events etc */
		if (!cpu_map__empty(cpus))
		if (!perf_cpu_map__empty(cpus))
			perf_evsel__set_sample_bit(tracking_evsel, TIME);
			perf_evsel__set_sample_bit(tracking_evsel, TIME);
	}
	}


@@ -493,7 +493,7 @@ cs_etm_info_priv_size(struct auxtrace_record *itr __maybe_unused,
	struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL);
	struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL);


	/* cpu map is not empty, we have specific CPUs to work with */
	/* cpu map is not empty, we have specific CPUs to work with */
	if (!cpu_map__empty(event_cpus)) {
	if (!perf_cpu_map__empty(event_cpus)) {
		for (i = 0; i < cpu__max_cpu(); i++) {
		for (i = 0; i < cpu__max_cpu(); i++) {
			if (!cpu_map__has(event_cpus, i) ||
			if (!cpu_map__has(event_cpus, i) ||
			    !cpu_map__has(online_cpus, i))
			    !cpu_map__has(online_cpus, i))
@@ -649,7 +649,7 @@ static int cs_etm_info_fill(struct auxtrace_record *itr,
		return -EINVAL;
		return -EINVAL;


	/* If the cpu_map is empty all online CPUs are involved */
	/* If the cpu_map is empty all online CPUs are involved */
	if (cpu_map__empty(event_cpus)) {
	if (perf_cpu_map__empty(event_cpus)) {
		cpu_map = online_cpus;
		cpu_map = online_cpus;
	} else {
	} else {
		/* Make sure all specified CPUs are online */
		/* Make sure all specified CPUs are online */
+2 −2
Original line number Original line Diff line number Diff line
@@ -133,7 +133,7 @@ static int intel_bts_recording_options(struct auxtrace_record *itr,
	if (!opts->full_auxtrace)
	if (!opts->full_auxtrace)
		return 0;
		return 0;


	if (opts->full_auxtrace && !cpu_map__empty(cpus)) {
	if (opts->full_auxtrace && !perf_cpu_map__empty(cpus)) {
		pr_err(INTEL_BTS_PMU_NAME " does not support per-cpu recording\n");
		pr_err(INTEL_BTS_PMU_NAME " does not support per-cpu recording\n");
		return -EINVAL;
		return -EINVAL;
	}
	}
@@ -214,7 +214,7 @@ static int intel_bts_recording_options(struct auxtrace_record *itr,
		 * In the case of per-cpu mmaps, we need the CPU on the
		 * In the case of per-cpu mmaps, we need the CPU on the
		 * AUX event.
		 * AUX event.
		 */
		 */
		if (!cpu_map__empty(cpus))
		if (!perf_cpu_map__empty(cpus))
			perf_evsel__set_sample_bit(intel_bts_evsel, CPU);
			perf_evsel__set_sample_bit(intel_bts_evsel, CPU);
	}
	}


+5 −5
Original line number Original line Diff line number Diff line
@@ -365,7 +365,7 @@ static int intel_pt_info_fill(struct auxtrace_record *itr,
			ui__warning("Intel Processor Trace: TSC not available\n");
			ui__warning("Intel Processor Trace: TSC not available\n");
	}
	}


	per_cpu_mmaps = !cpu_map__empty(session->evlist->core.cpus);
	per_cpu_mmaps = !perf_cpu_map__empty(session->evlist->core.cpus);


	auxtrace_info->type = PERF_AUXTRACE_INTEL_PT;
	auxtrace_info->type = PERF_AUXTRACE_INTEL_PT;
	auxtrace_info->priv[INTEL_PT_PMU_TYPE] = intel_pt_pmu->type;
	auxtrace_info->priv[INTEL_PT_PMU_TYPE] = intel_pt_pmu->type;
@@ -702,7 +702,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
	 * Per-cpu recording needs sched_switch events to distinguish different
	 * Per-cpu recording needs sched_switch events to distinguish different
	 * threads.
	 * threads.
	 */
	 */
	if (have_timing_info && !cpu_map__empty(cpus)) {
	if (have_timing_info && !perf_cpu_map__empty(cpus)) {
		if (perf_can_record_switch_events()) {
		if (perf_can_record_switch_events()) {
			bool cpu_wide = !target__none(&opts->target) &&
			bool cpu_wide = !target__none(&opts->target) &&
					!target__has_task(&opts->target);
					!target__has_task(&opts->target);
@@ -760,7 +760,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
		 * In the case of per-cpu mmaps, we need the CPU on the
		 * In the case of per-cpu mmaps, we need the CPU on the
		 * AUX event.
		 * AUX event.
		 */
		 */
		if (!cpu_map__empty(cpus))
		if (!perf_cpu_map__empty(cpus))
			perf_evsel__set_sample_bit(intel_pt_evsel, CPU);
			perf_evsel__set_sample_bit(intel_pt_evsel, CPU);
	}
	}


@@ -784,7 +784,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
			tracking_evsel->immediate = true;
			tracking_evsel->immediate = true;


		/* In per-cpu case, always need the time of mmap events etc */
		/* In per-cpu case, always need the time of mmap events etc */
		if (!cpu_map__empty(cpus)) {
		if (!perf_cpu_map__empty(cpus)) {
			perf_evsel__set_sample_bit(tracking_evsel, TIME);
			perf_evsel__set_sample_bit(tracking_evsel, TIME);
			/* And the CPU for switch events */
			/* And the CPU for switch events */
			perf_evsel__set_sample_bit(tracking_evsel, CPU);
			perf_evsel__set_sample_bit(tracking_evsel, CPU);
@@ -796,7 +796,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
	 * Warn the user when we do not have enough information to decode i.e.
	 * Warn the user when we do not have enough information to decode i.e.
	 * per-cpu with no sched_switch (except workload-only).
	 * per-cpu with no sched_switch (except workload-only).
	 */
	 */
	if (!ptr->have_sched_switch && !cpu_map__empty(cpus) &&
	if (!ptr->have_sched_switch && !perf_cpu_map__empty(cpus) &&
	    !target__none(&opts->target))
	    !target__none(&opts->target))
		ui__warning("Intel Processor Trace decoding will not be possible except for kernel tracing!\n");
		ui__warning("Intel Processor Trace decoding will not be possible except for kernel tracing!\n");


+1 −1
Original line number Original line Diff line number Diff line
@@ -2059,7 +2059,7 @@ static int setup_nodes(struct perf_session *session)
		nodes[node] = set;
		nodes[node] = set;


		/* empty node, skip */
		/* empty node, skip */
		if (cpu_map__empty(map))
		if (perf_cpu_map__empty(map))
			continue;
			continue;


		for (cpu = 0; cpu < map->nr; cpu++) {
		for (cpu = 0; cpu < map->nr; cpu++) {
+2 −2
Original line number Original line Diff line number Diff line
@@ -928,7 +928,7 @@ static int perf_stat_init_aggr_mode(void)
	 * the aggregation translate cpumap.
	 * the aggregation translate cpumap.
	 */
	 */
	nr = cpu_map__get_max(evsel_list->core.cpus);
	nr = cpu_map__get_max(evsel_list->core.cpus);
	stat_config.cpus_aggr_map = cpu_map__empty_new(nr + 1);
	stat_config.cpus_aggr_map = perf_cpu_map__empty_new(nr + 1);
	return stat_config.cpus_aggr_map ? 0 : -ENOMEM;
	return stat_config.cpus_aggr_map ? 0 : -ENOMEM;
}
}


@@ -1493,7 +1493,7 @@ int process_stat_config_event(struct perf_session *session,


	perf_event__read_stat_config(&stat_config, &event->stat_config);
	perf_event__read_stat_config(&stat_config, &event->stat_config);


	if (cpu_map__empty(st->cpus)) {
	if (perf_cpu_map__empty(st->cpus)) {
		if (st->aggr_mode != AGGR_UNSET)
		if (st->aggr_mode != AGGR_UNSET)
			pr_warning("warning: processing task data, aggregation mode not set\n");
			pr_warning("warning: processing task data, aggregation mode not set\n");
		return 0;
		return 0;
Loading