Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f6cf87f7 authored by Ingo Molnar's avatar Ingo Molnar
Browse files

Merge tag 'perf-urgent-for-mingo' of...

Merge tag 'perf-urgent-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux

 into perf/urgent

Pull perf/urgent fixes from Arnaldo Carvalho de Melo:

- Fix segfault pressing -> in 'perf top' with no hist entries. (Wang Nan)

   E.g:
	perf top -e page-faults --pid 11400 # 11400 generates no page-fault

- Fix propagation of thread and cpu maps, that got broken when doing incomplete
  changes to better support events with a PMU cpu mask, leading to Intel PT to
  fail with an error like:

    $ perf record -e intel_pt//u uname
    Error: The sys_perf_event_open() syscall returned with
              22 (Invalid argument) for event (sched:sched_switch).

  Because intel_pt adds that sched:sched_switch evsel to the evlist after the
  thread/cpu maps were propagated to the evsels, fix it. (Adrian Hunter)

Signed-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents 91a4dc9f c5e6bd2e
Loading
Loading
Loading
Loading
+14 −4
Original line number Original line Diff line number Diff line
@@ -34,6 +34,8 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id)
		.disabled = 1,
		.disabled = 1,
		.freq = 1,
		.freq = 1,
	};
	};
	struct cpu_map *cpus;
	struct thread_map *threads;


	attr.sample_freq = 500;
	attr.sample_freq = 500;


@@ -50,14 +52,19 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id)
	}
	}
	perf_evlist__add(evlist, evsel);
	perf_evlist__add(evlist, evsel);


	evlist->cpus = cpu_map__dummy_new();
	cpus = cpu_map__dummy_new();
	evlist->threads = thread_map__new_by_tid(getpid());
	threads = thread_map__new_by_tid(getpid());
	if (!evlist->cpus || !evlist->threads) {
	if (!cpus || !threads) {
		err = -ENOMEM;
		err = -ENOMEM;
		pr_debug("Not enough memory to create thread/cpu maps\n");
		pr_debug("Not enough memory to create thread/cpu maps\n");
		goto out_delete_evlist;
		goto out_free_maps;
	}
	}


	perf_evlist__set_maps(evlist, cpus, threads);

	cpus	= NULL;
	threads = NULL;

	if (perf_evlist__open(evlist)) {
	if (perf_evlist__open(evlist)) {
		const char *knob = "/proc/sys/kernel/perf_event_max_sample_rate";
		const char *knob = "/proc/sys/kernel/perf_event_max_sample_rate";


@@ -107,6 +114,9 @@ next_event:
		err = -1;
		err = -1;
	}
	}


out_free_maps:
	cpu_map__put(cpus);
	thread_map__put(threads);
out_delete_evlist:
out_delete_evlist:
	perf_evlist__delete(evlist);
	perf_evlist__delete(evlist);
	return err;
	return err;
+14 −4
Original line number Original line Diff line number Diff line
@@ -43,6 +43,8 @@ int test__task_exit(void)
	};
	};
	const char *argv[] = { "true", NULL };
	const char *argv[] = { "true", NULL };
	char sbuf[STRERR_BUFSIZE];
	char sbuf[STRERR_BUFSIZE];
	struct cpu_map *cpus;
	struct thread_map *threads;


	signal(SIGCHLD, sig_handler);
	signal(SIGCHLD, sig_handler);


@@ -58,14 +60,19 @@ int test__task_exit(void)
	 * perf_evlist__prepare_workload we'll fill in the only thread
	 * perf_evlist__prepare_workload we'll fill in the only thread
	 * we're monitoring, the one forked there.
	 * we're monitoring, the one forked there.
	 */
	 */
	evlist->cpus = cpu_map__dummy_new();
	cpus = cpu_map__dummy_new();
	evlist->threads = thread_map__new_by_tid(-1);
	threads = thread_map__new_by_tid(-1);
	if (!evlist->cpus || !evlist->threads) {
	if (!cpus || !threads) {
		err = -ENOMEM;
		err = -ENOMEM;
		pr_debug("Not enough memory to create thread/cpu maps\n");
		pr_debug("Not enough memory to create thread/cpu maps\n");
		goto out_delete_evlist;
		goto out_free_maps;
	}
	}


	perf_evlist__set_maps(evlist, cpus, threads);

	cpus	= NULL;
	threads = NULL;

	err = perf_evlist__prepare_workload(evlist, &target, argv, false,
	err = perf_evlist__prepare_workload(evlist, &target, argv, false,
					    workload_exec_failed_signal);
					    workload_exec_failed_signal);
	if (err < 0) {
	if (err < 0) {
@@ -114,6 +121,9 @@ retry:
		err = -1;
		err = -1;
	}
	}


out_free_maps:
	cpu_map__put(cpus);
	thread_map__put(threads);
out_delete_evlist:
out_delete_evlist:
	perf_evlist__delete(evlist);
	perf_evlist__delete(evlist);
	return err;
	return err;
+11 −1
Original line number Original line Diff line number Diff line
@@ -1968,7 +1968,8 @@ skip_annotation:
					  &options[nr_options], dso);
					  &options[nr_options], dso);
		nr_options += add_map_opt(browser, &actions[nr_options],
		nr_options += add_map_opt(browser, &actions[nr_options],
					  &options[nr_options],
					  &options[nr_options],
					  browser->selection->map);
					  browser->selection ?
						browser->selection->map : NULL);


		/* perf script support */
		/* perf script support */
		if (browser->he_selection) {
		if (browser->he_selection) {
@@ -1976,6 +1977,15 @@ skip_annotation:
						     &actions[nr_options],
						     &actions[nr_options],
						     &options[nr_options],
						     &options[nr_options],
						     thread, NULL);
						     thread, NULL);
			/*
			 * Note that browser->selection != NULL
			 * when browser->he_selection is not NULL,
			 * so we don't need to check browser->selection
			 * before fetching browser->selection->sym like what
			 * we do before fetching browser->selection->map.
			 *
			 * See hist_browser__show_entry.
			 */
			nr_options += add_script_opt(browser,
			nr_options += add_script_opt(browser,
						     &actions[nr_options],
						     &actions[nr_options],
						     &options[nr_options],
						     &options[nr_options],
+76 −62
Original line number Original line Diff line number Diff line
@@ -124,6 +124,33 @@ void perf_evlist__delete(struct perf_evlist *evlist)
	free(evlist);
	free(evlist);
}
}


static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
					  struct perf_evsel *evsel)
{
	/*
	 * We already have cpus for evsel (via PMU sysfs) so
	 * keep it, if there's no target cpu list defined.
	 */
	if (!evsel->own_cpus || evlist->has_user_cpus) {
		cpu_map__put(evsel->cpus);
		evsel->cpus = cpu_map__get(evlist->cpus);
	} else if (evsel->cpus != evsel->own_cpus) {
		cpu_map__put(evsel->cpus);
		evsel->cpus = cpu_map__get(evsel->own_cpus);
	}

	thread_map__put(evsel->threads);
	evsel->threads = thread_map__get(evlist->threads);
}

static void perf_evlist__propagate_maps(struct perf_evlist *evlist)
{
	struct perf_evsel *evsel;

	evlist__for_each(evlist, evsel)
		__perf_evlist__propagate_maps(evlist, evsel);
}

void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
{
{
	entry->evlist = evlist;
	entry->evlist = evlist;
@@ -133,18 +160,19 @@ void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)


	if (!evlist->nr_entries++)
	if (!evlist->nr_entries++)
		perf_evlist__set_id_pos(evlist);
		perf_evlist__set_id_pos(evlist);

	__perf_evlist__propagate_maps(evlist, entry);
}
}


void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
				   struct list_head *list,
				   struct list_head *list)
				   int nr_entries)
{
{
	bool set_id_pos = !evlist->nr_entries;
	struct perf_evsel *evsel, *temp;


	list_splice_tail(list, &evlist->entries);
	__evlist__for_each_safe(list, temp, evsel) {
	evlist->nr_entries += nr_entries;
		list_del_init(&evsel->node);
	if (set_id_pos)
		perf_evlist__add(evlist, evsel);
		perf_evlist__set_id_pos(evlist);
	}
}
}


void __perf_evlist__set_leader(struct list_head *list)
void __perf_evlist__set_leader(struct list_head *list)
@@ -210,7 +238,7 @@ static int perf_evlist__add_attrs(struct perf_evlist *evlist,
		list_add_tail(&evsel->node, &head);
		list_add_tail(&evsel->node, &head);
	}
	}


	perf_evlist__splice_list_tail(evlist, &head, nr_attrs);
	perf_evlist__splice_list_tail(evlist, &head);


	return 0;
	return 0;


@@ -1103,71 +1131,56 @@ int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
	return perf_evlist__mmap_ex(evlist, pages, overwrite, 0, false);
	return perf_evlist__mmap_ex(evlist, pages, overwrite, 0, false);
}
}


static int perf_evlist__propagate_maps(struct perf_evlist *evlist,
				       bool has_user_cpus)
{
	struct perf_evsel *evsel;

	evlist__for_each(evlist, evsel) {
		/*
		 * We already have cpus for evsel (via PMU sysfs) so
		 * keep it, if there's no target cpu list defined.
		 */
		if (evsel->cpus && has_user_cpus)
			cpu_map__put(evsel->cpus);

		if (!evsel->cpus || has_user_cpus)
			evsel->cpus = cpu_map__get(evlist->cpus);

		evsel->threads = thread_map__get(evlist->threads);

		if ((evlist->cpus && !evsel->cpus) ||
		    (evlist->threads && !evsel->threads))
			return -ENOMEM;
	}

	return 0;
}

int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
{
{
	evlist->threads = thread_map__new_str(target->pid, target->tid,
	struct cpu_map *cpus;
					      target->uid);
	struct thread_map *threads;

	threads = thread_map__new_str(target->pid, target->tid, target->uid);


	if (evlist->threads == NULL)
	if (!threads)
		return -1;
		return -1;


	if (target__uses_dummy_map(target))
	if (target__uses_dummy_map(target))
		evlist->cpus = cpu_map__dummy_new();
		cpus = cpu_map__dummy_new();
	else
	else
		evlist->cpus = cpu_map__new(target->cpu_list);
		cpus = cpu_map__new(target->cpu_list);


	if (evlist->cpus == NULL)
	if (!cpus)
		goto out_delete_threads;
		goto out_delete_threads;


	return perf_evlist__propagate_maps(evlist, !!target->cpu_list);
	evlist->has_user_cpus = !!target->cpu_list;

	perf_evlist__set_maps(evlist, cpus, threads);

	return 0;


out_delete_threads:
out_delete_threads:
	thread_map__put(evlist->threads);
	thread_map__put(threads);
	evlist->threads = NULL;
	return -1;
	return -1;
}
}


int perf_evlist__set_maps(struct perf_evlist *evlist,
void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus,
			  struct cpu_map *cpus,
			   struct thread_map *threads)
			   struct thread_map *threads)
{
{
	if (evlist->cpus)
	/*
	 * Allow for the possibility that one or another of the maps isn't being
	 * changed i.e. don't put it.  Note we are assuming the maps that are
	 * being applied are brand new and evlist is taking ownership of the
	 * original reference count of 1.  If that is not the case it is up to
	 * the caller to increase the reference count.
	 */
	if (cpus != evlist->cpus) {
		cpu_map__put(evlist->cpus);
		cpu_map__put(evlist->cpus);

		evlist->cpus = cpus;
		evlist->cpus = cpus;
	}


	if (evlist->threads)
	if (threads != evlist->threads) {
		thread_map__put(evlist->threads);
		thread_map__put(evlist->threads);

		evlist->threads = threads;
		evlist->threads = threads;
	}


	return perf_evlist__propagate_maps(evlist, false);
	perf_evlist__propagate_maps(evlist);
}
}


int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel)
int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel)
@@ -1387,6 +1400,8 @@ void perf_evlist__close(struct perf_evlist *evlist)


static int perf_evlist__create_syswide_maps(struct perf_evlist *evlist)
static int perf_evlist__create_syswide_maps(struct perf_evlist *evlist)
{
{
	struct cpu_map	  *cpus;
	struct thread_map *threads;
	int err = -ENOMEM;
	int err = -ENOMEM;


	/*
	/*
@@ -1398,20 +1413,19 @@ static int perf_evlist__create_syswide_maps(struct perf_evlist *evlist)
	 * error, and we may not want to do that fallback to a
	 * error, and we may not want to do that fallback to a
	 * default cpu identity map :-\
	 * default cpu identity map :-\
	 */
	 */
	evlist->cpus = cpu_map__new(NULL);
	cpus = cpu_map__new(NULL);
	if (evlist->cpus == NULL)
	if (!cpus)
		goto out;
		goto out;


	evlist->threads = thread_map__new_dummy();
	threads = thread_map__new_dummy();
	if (evlist->threads == NULL)
	if (!threads)
		goto out_free_cpus;
		goto out_put;


	err = 0;
	perf_evlist__set_maps(evlist, cpus, threads);
out:
out:
	return err;
	return err;
out_free_cpus:
out_put:
	cpu_map__put(evlist->cpus);
	cpu_map__put(cpus);
	evlist->cpus = NULL;
	goto out;
	goto out;
}
}


+4 −5
Original line number Original line Diff line number Diff line
@@ -42,6 +42,7 @@ struct perf_evlist {
	int		 nr_mmaps;
	int		 nr_mmaps;
	bool		 overwrite;
	bool		 overwrite;
	bool		 enabled;
	bool		 enabled;
	bool		 has_user_cpus;
	size_t		 mmap_len;
	size_t		 mmap_len;
	int		 id_pos;
	int		 id_pos;
	int		 is_pos;
	int		 is_pos;
@@ -155,8 +156,7 @@ int perf_evlist__enable_event_idx(struct perf_evlist *evlist,
void perf_evlist__set_selected(struct perf_evlist *evlist,
void perf_evlist__set_selected(struct perf_evlist *evlist,
			       struct perf_evsel *evsel);
			       struct perf_evsel *evsel);


int perf_evlist__set_maps(struct perf_evlist *evlist,
void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus,
			  struct cpu_map *cpus,
			   struct thread_map *threads);
			   struct thread_map *threads);
int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target);
int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target);
int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel);
int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel);
@@ -179,8 +179,7 @@ bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist);
bool perf_evlist__valid_read_format(struct perf_evlist *evlist);
bool perf_evlist__valid_read_format(struct perf_evlist *evlist);


void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
				   struct list_head *list,
				   struct list_head *list);
				   int nr_entries);


static inline struct perf_evsel *perf_evlist__first(struct perf_evlist *evlist)
static inline struct perf_evsel *perf_evlist__first(struct perf_evlist *evlist)
{
{
Loading