Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a34cc1dc authored by Greg Kroah-Hartman's avatar Greg Kroah-Hartman
Browse files

Reapply "perf: Disallow mis-matched inherited group reads"



This reverts commit 4934e8f7.

Keeps the ABI stable by taking advantage of a hole in the structure!

Bug: 307236803
Change-Id: Ic5f7ebeb3a9b13afdb3bfff7e54c4a93b863dab6
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@google.com>
parent aea71020
Loading
Loading
Loading
Loading
+102604 −106062

File changed.

Preview size limit exceeded, changes collapsed.

+3 −0
Original line number Diff line number Diff line
@@ -608,6 +608,9 @@ struct perf_event {
	/* The cumulative AND of all event_caps for events in this group. */
	int				group_caps;

#ifndef __GENKSYMS__
	unsigned int			group_generation;
#endif
	struct perf_event		*group_leader;
	struct pmu			*pmu;
	void				*pmu_private;
+33 −6
Original line number Diff line number Diff line
@@ -1849,6 +1849,7 @@ static void perf_group_attach(struct perf_event *event)

	list_add_tail(&event->sibling_list, &group_leader->sibling_list);
	group_leader->nr_siblings++;
	group_leader->group_generation++;

	perf_event__header_size(group_leader);

@@ -2004,6 +2005,7 @@ static void perf_group_detach(struct perf_event *event)
	if (event->group_leader != event) {
		list_del_init(&event->sibling_list);
		event->group_leader->nr_siblings--;
		event->group_leader->group_generation++;
		goto out;
	}

@@ -4857,7 +4859,7 @@ static int __perf_read_group_add(struct perf_event *leader,
					u64 read_format, u64 *values)
{
	struct perf_event_context *ctx = leader->ctx;
	struct perf_event *sub;
	struct perf_event *sub, *parent;
	unsigned long flags;
	int n = 1; /* skip @nr */
	int ret;
@@ -4867,6 +4869,33 @@ static int __perf_read_group_add(struct perf_event *leader,
		return ret;

	raw_spin_lock_irqsave(&ctx->lock, flags);
	/*
	 * Verify the grouping between the parent and child (inherited)
	 * events is still in tact.
	 *
	 * Specifically:
	 *  - leader->ctx->lock pins leader->sibling_list
	 *  - parent->child_mutex pins parent->child_list
	 *  - parent->ctx->mutex pins parent->sibling_list
	 *
	 * Because parent->ctx != leader->ctx (and child_list nests inside
	 * ctx->mutex), group destruction is not atomic between children, also
	 * see perf_event_release_kernel(). Additionally, parent can grow the
	 * group.
	 *
	 * Therefore it is possible to have parent and child groups in a
	 * different configuration and summing over such a beast makes no sense
	 * what so ever.
	 *
	 * Reject this.
	 */
	parent = leader->parent;
	if (parent &&
	    (parent->group_generation != leader->group_generation ||
	     parent->nr_siblings != leader->nr_siblings)) {
		ret = -ECHILD;
		goto unlock;
	}

	/*
	 * Since we co-schedule groups, {enabled,running} times of siblings
@@ -4896,8 +4925,9 @@ static int __perf_read_group_add(struct perf_event *leader,
			values[n++] = primary_event_id(sub);
	}

unlock:
	raw_spin_unlock_irqrestore(&ctx->lock, flags);
	return 0;
	return ret;
}

static int perf_read_group(struct perf_event *event,
@@ -4916,10 +4946,6 @@ static int perf_read_group(struct perf_event *event,

	values[0] = 1 + leader->nr_siblings;

	/*
	 * By locking the child_mutex of the leader we effectively
	 * lock the child list of all siblings.. XXX explain how.
	 */
	mutex_lock(&leader->child_mutex);

	ret = __perf_read_group_add(leader, read_format, values);
@@ -12024,6 +12050,7 @@ static int inherit_group(struct perf_event *parent_event,
		    !perf_get_aux_event(child_ctr, leader))
			return -EINVAL;
	}
	leader->group_generation = parent_event->group_generation;
	return 0;
}