Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 803ca418 authored by Ingo Molnar's avatar Ingo Molnar
Browse files

Merge tag 'perf-core-for-mingo' of...

Merge tag 'perf-core-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux

 into perf/core

Pull perf/core improvements and fixes from Arnaldo Carvalho de Melo:

User visible changes:

  - Fixes and improvements for supporting annotating ARM binaries, support ARM
    call and jump instructions, more work needed to have arch specific stuff
    separated into tools/perf/arch/*/annotate/ (Russell King)

  - Fix several 'perf test' entries broken by recent perf/core changes (Jiri Olsa)

Infrastructure changes:

  - Consolidate perf_ev{list,sel}__{enable,disable}() calls (Jiri Olsa)

  - Pass correct string to dso__adjust_kmod_long_name() (Wang Nan)

Signed-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents f1ad4488 cfef25b8
Loading
Loading
Loading
Loading
+27 −17
Original line number Diff line number Diff line
@@ -168,15 +168,25 @@ static int create_perf_stat_counter(struct perf_evsel *evsel)
	attr->sample_period = 0;
	attr->sample_type   = 0;

	if (target__has_cpu(&target))
		return perf_evsel__open_per_cpu(evsel, perf_evsel__cpus(evsel));

	if (!target__has_task(&target) && perf_evsel__is_group_leader(evsel)) {
	/*
	 * Disabling all counters initially, they will be enabled
	 * either manually by us or by kernel via enable_on_exec
	 * set later.
	 */
	if (perf_evsel__is_group_leader(evsel)) {
		attr->disabled = 1;
		if (!initial_delay)

		/*
		 * In case of initial_delay we enable tracee
		 * events manually.
		 */
		if (target__none(&target) && !initial_delay)
			attr->enable_on_exec = 1;
	}

	if (target__has_cpu(&target))
		return perf_evsel__open_per_cpu(evsel, perf_evsel__cpus(evsel));

	return perf_evsel__open_per_thread(evsel, evsel_list->threads);
}

@@ -251,18 +261,18 @@ static void process_interval(void)
	print_counters(&rs, 0, NULL);
}

static void handle_initial_delay(void)
static void enable_counters(void)
{
	struct perf_evsel *counter;

	if (initial_delay) {
		const int ncpus = cpu_map__nr(evsel_list->cpus),
			nthreads = thread_map__nr(evsel_list->threads);

	if (initial_delay)
		usleep(initial_delay * 1000);
		evlist__for_each(evsel_list, counter)
			perf_evsel__enable(counter, ncpus, nthreads);
	}

	/*
	 * We need to enable counters only if:
	 * - we don't have tracee (attaching to task or cpu)
	 * - we have initial delay configured
	 */
	if (!target__none(&target) || initial_delay)
		perf_evlist__enable(evsel_list);
}

static volatile int workload_exec_errno;
@@ -359,7 +369,7 @@ static int __run_perf_stat(int argc, const char **argv)

	if (forks) {
		perf_evlist__start_workload(evsel_list);
		handle_initial_delay();
		enable_counters();

		if (interval) {
			while (!waitpid(child_pid, &status, WNOHANG)) {
@@ -378,7 +388,7 @@ static int __run_perf_stat(int argc, const char **argv)
		if (WIFSIGNALED(status))
			psignal(WTERMSIG(status), argv[0]);
	} else {
		handle_initial_delay();
		enable_counters();
		while (!done) {
			nanosleep(&ts, NULL);
			if (interval)
+9 −5
Original line number Diff line number Diff line
@@ -433,7 +433,6 @@ enum {

static int do_test_code_reading(bool try_kcore)
{
	struct machines machines;
	struct machine *machine;
	struct thread *thread;
	struct record_opts opts = {
@@ -459,8 +458,7 @@ static int do_test_code_reading(bool try_kcore)

	pid = getpid();

	machines__init(&machines);
	machine = &machines.host;
	machine = machine__new_host();

	ret = machine__create_kernel_maps(machine);
	if (ret < 0) {
@@ -549,6 +547,13 @@ static int do_test_code_reading(bool try_kcore)
		if (ret < 0) {
			if (!excl_kernel) {
				excl_kernel = true;
				/*
				 * Both cpus and threads are now owned by evlist
				 * and will be freed by following perf_evlist__set_maps
				 * call. Getting refference to keep them alive.
				 */
				cpu_map__get(cpus);
				thread_map__get(threads);
				perf_evlist__set_maps(evlist, NULL, NULL);
				perf_evlist__delete(evlist);
				evlist = NULL;
@@ -594,9 +599,8 @@ static int do_test_code_reading(bool try_kcore)
		cpu_map__put(cpus);
		thread_map__put(threads);
	}
	machines__destroy_kernel_maps(&machines);
	machine__delete_threads(machine);
	machines__exit(&machines);
	machine__delete(machine);

	return err;
}
+2 −6
Original line number Diff line number Diff line
@@ -160,14 +160,11 @@ static int krava_1(struct thread *thread)

int test__dwarf_unwind(int subtest __maybe_unused)
{
	struct machines machines;
	struct machine *machine;
	struct thread *thread;
	int err = -1;

	machines__init(&machines);

	machine = machines__find(&machines, HOST_KERNEL_ID);
	machine = machine__new_host();
	if (!machine) {
		pr_err("Could not get machine\n");
		return -1;
@@ -199,7 +196,6 @@ int test__dwarf_unwind(int subtest __maybe_unused)

 out:
	machine__delete_threads(machine);
	machine__exit(machine);
	machines__exit(&machines);
	machine__delete(machine);
	return err;
}
+2 −1
Original line number Diff line number Diff line
@@ -103,7 +103,8 @@ int test__perf_evsel__roundtrip_name_test(int subtest __maybe_unused)
	if (err)
		ret = err;

	err = perf_evsel__name_array_test(perf_evsel__sw_names);
	err = __perf_evsel__name_array_test(perf_evsel__sw_names,
					    PERF_COUNT_SW_DUMMY + 1);
	if (err)
		ret = err;

+5 −0
Original line number Diff line number Diff line
@@ -87,6 +87,11 @@ struct machine *setup_fake_machine(struct machines *machines)
		return NULL;
	}

	if (machine__create_kernel_maps(machine)) {
		pr_debug("Not enough memory for machine setup\n");
		goto out;
	}

	for (i = 0; i < ARRAY_SIZE(fake_threads); i++) {
		struct thread *thread;

Loading