Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d5b76bef authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull perf fixes from Ingo Molnar:
 "A kernel crash fix plus three tooling fixes"

* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  perf/core: Fix crash in perf_event_read()
  perf callchain: Reference count maps
  perf diff: Fix -o/--order option behavior (again)
  perf diff: Fix segfault on 'perf diff -o N' option
parents 4e4f74a7 451d24d1
Loading
Loading
Loading
Loading
+15 −10
Original line number Diff line number Diff line
@@ -3487,12 +3487,13 @@ struct perf_read_data {
	int ret;
};

static int find_cpu_to_read(struct perf_event *event, int local_cpu)
static int __perf_event_read_cpu(struct perf_event *event, int event_cpu)
{
	int event_cpu = event->oncpu;
	u16 local_pkg, event_pkg;

	if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) {
		int local_cpu = smp_processor_id();

		event_pkg = topology_physical_package_id(event_cpu);
		local_pkg = topology_physical_package_id(local_cpu);

@@ -3624,7 +3625,7 @@ u64 perf_event_read_local(struct perf_event *event)

static int perf_event_read(struct perf_event *event, bool group)
{
	int ret = 0, cpu_to_read, local_cpu;
	int event_cpu, ret = 0;

	/*
	 * If event is enabled and currently active on a CPU, update the
@@ -3637,21 +3638,25 @@ static int perf_event_read(struct perf_event *event, bool group)
			.ret = 0,
		};

		local_cpu = get_cpu();
		cpu_to_read = find_cpu_to_read(event, local_cpu);
		put_cpu();
		event_cpu = READ_ONCE(event->oncpu);
		if ((unsigned)event_cpu >= nr_cpu_ids)
			return 0;

		preempt_disable();
		event_cpu = __perf_event_read_cpu(event, event_cpu);

		/*
		 * Purposely ignore the smp_call_function_single() return
		 * value.
		 *
		 * If event->oncpu isn't a valid CPU it means the event got
		 * If event_cpu isn't a valid CPU it means the event got
		 * scheduled out and that will have updated the event count.
		 *
		 * Therefore, either way, we'll have an up-to-date event count
		 * after this.
		 */
		(void)smp_call_function_single(cpu_to_read, __perf_event_read, &data, 1);
		(void)smp_call_function_single(event_cpu, __perf_event_read, &data, 1);
		preempt_enable();
		ret = data.ret;
	} else if (event->state == PERF_EVENT_STATE_INACTIVE) {
		struct perf_event_context *ctx = event->ctx;
+1 −1
Original line number Diff line number Diff line
@@ -1199,7 +1199,7 @@ static int ui_init(void)
		BUG_ON(1);
	}

	perf_hpp__register_sort_field(fmt);
	perf_hpp__prepend_sort_field(fmt);
	return 0;
}

+10 −0
Original line number Diff line number Diff line
@@ -521,6 +521,12 @@ void perf_hpp_list__register_sort_field(struct perf_hpp_list *list,
	list_add_tail(&format->sort_list, &list->sorts);
}

void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list,
				       struct perf_hpp_fmt *format)
{
	list_add(&format->sort_list, &list->sorts);
}

void perf_hpp__column_unregister(struct perf_hpp_fmt *format)
{
	list_del(&format->list);
@@ -560,6 +566,10 @@ void perf_hpp__setup_output_field(struct perf_hpp_list *list)
	perf_hpp_list__for_each_sort_list(list, fmt) {
		struct perf_hpp_fmt *pos;

		/* skip sort-only fields ("sort_compute" in perf diff) */
		if (!fmt->entry && !fmt->color)
			continue;

		perf_hpp_list__for_each_format(list, pos) {
			if (fmt_equal(fmt, pos))
				goto next;
+9 −2
Original line number Diff line number Diff line
@@ -437,7 +437,7 @@ fill_node(struct callchain_node *node, struct callchain_cursor *cursor)
		}
		call->ip = cursor_node->ip;
		call->ms.sym = cursor_node->sym;
		call->ms.map = cursor_node->map;
		call->ms.map = map__get(cursor_node->map);

		if (cursor_node->branch) {
			call->branch_count = 1;
@@ -477,6 +477,7 @@ add_child(struct callchain_node *parent,

		list_for_each_entry_safe(call, tmp, &new->val, list) {
			list_del(&call->list);
			map__zput(call->ms.map);
			free(call);
		}
		free(new);
@@ -761,6 +762,7 @@ merge_chain_branch(struct callchain_cursor *cursor,
					list->ms.map, list->ms.sym,
					false, NULL, 0, 0);
		list_del(&list->list);
		map__zput(list->ms.map);
		free(list);
	}

@@ -811,7 +813,8 @@ int callchain_cursor_append(struct callchain_cursor *cursor,
	}

	node->ip = ip;
	node->map = map;
	map__zput(node->map);
	node->map = map__get(map);
	node->sym = sym;
	node->branch = branch;
	node->nr_loop_iter = nr_loop_iter;
@@ -1142,11 +1145,13 @@ static void free_callchain_node(struct callchain_node *node)

	list_for_each_entry_safe(list, tmp, &node->parent_val, list) {
		list_del(&list->list);
		map__zput(list->ms.map);
		free(list);
	}

	list_for_each_entry_safe(list, tmp, &node->val, list) {
		list_del(&list->list);
		map__zput(list->ms.map);
		free(list);
	}

@@ -1210,6 +1215,7 @@ int callchain_node__make_parent_list(struct callchain_node *node)
				goto out;
			*new = *chain;
			new->has_children = false;
			map__get(new->ms.map);
			list_add_tail(&new->list, &head);
		}
		parent = parent->parent;
@@ -1230,6 +1236,7 @@ int callchain_node__make_parent_list(struct callchain_node *node)
out:
	list_for_each_entry_safe(chain, new, &head, list) {
		list_del(&chain->list);
		map__zput(chain->ms.map);
		free(chain);
	}
	return -ENOMEM;
+6 −0
Original line number Diff line number Diff line
@@ -5,6 +5,7 @@
#include <linux/list.h>
#include <linux/rbtree.h>
#include "event.h"
#include "map.h"
#include "symbol.h"

#define HELP_PAD "\t\t\t\t"
@@ -184,8 +185,13 @@ int callchain_merge(struct callchain_cursor *cursor,
 */
static inline void callchain_cursor_reset(struct callchain_cursor *cursor)
{
	struct callchain_cursor_node *node;

	cursor->nr = 0;
	cursor->last = &cursor->first;

	for (node = cursor->first; node != NULL; node = node->next)
		map__zput(node->map);
}

int callchain_cursor_append(struct callchain_cursor *cursor, u64 ip,
Loading