Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 41cd2e0f authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge changes Ie740315c,Ided097bd into msm-next

* changes:
  Perf: arm64: Add additional CPUHP state for perf events
  Perf: core: create/delete shared kernel events
parents 4ae66ebe 28ffa0c8
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -1197,8 +1197,8 @@ static int perf_event_cpu_hp_init(void)
{
	int ret;

	ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ONLINE,
				"PERF_EVENT/CPUHP_AP_PERF_ONLINE",
	ret = cpuhp_setup_state_nocalls(CPUHP_AP_NOTIFY_PERF_ONLINE,
				"PERF_EVENT/CPUHP_AP_NOTIFY_PERF_ONLINE",
				perf_event_hotplug_coming_up,
				perf_event_hotplug_going_down);
	if (ret)
+1 −0
Original line number Diff line number Diff line
@@ -164,6 +164,7 @@ enum cpuhp_state {
	CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE,
	CPUHP_AP_WORKQUEUE_ONLINE,
	CPUHP_AP_RCUTREE_ONLINE,
	CPUHP_AP_NOTIFY_PERF_ONLINE,
	CPUHP_AP_ONLINE_DYN,
	CPUHP_AP_ONLINE_DYN_END		= CPUHP_AP_ONLINE_DYN + 30,
	CPUHP_AP_X86_HPET_ONLINE,
+182 −2
Original line number Diff line number Diff line
@@ -364,6 +364,32 @@ enum event_type_t {
	EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
};

/* The shared events struct. */
#define SHARED_EVENTS_MAX 7

struct shared_events_str {
	/*
	 * Mutex to serialize access to shared list. Needed for the
	 * read/modify/write sequences.
	 */
	struct mutex		list_mutex;

	/*
	 * A 1 bit for an index indicates that the slot is being used for
	 * an event. A 0 means that the slot can be used.
	 */
	DECLARE_BITMAP(used_mask, SHARED_EVENTS_MAX);

	/*
	 * The kernel events that are shared for a cpu;
	 */
	struct perf_event	*events[SHARED_EVENTS_MAX];
	struct perf_event_attr	attr[SHARED_EVENTS_MAX];
	atomic_t		refcount[SHARED_EVENTS_MAX];
};

static struct shared_events_str __percpu *shared_events;

/*
 * perf_sched_events : >0 events exist
 * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
@@ -4185,6 +4211,35 @@ static bool exclusive_event_installable(struct perf_event *event,
static void perf_addr_filters_splice(struct perf_event *event,
				       struct list_head *head);

static int
perf_event_delete_kernel_shared(struct perf_event *event)
{
	int rc = -1, cpu = event->cpu;
	struct shared_events_str *shrd_events;
	unsigned long idx;

	if (!shared_events || (u32)cpu >= nr_cpu_ids)
		return 0;

	shrd_events = per_cpu_ptr(shared_events, cpu);

	mutex_lock(&shrd_events->list_mutex);

	for_each_set_bit(idx, shrd_events->used_mask, SHARED_EVENTS_MAX) {
		if (shrd_events->events[idx] == event) {
			if (atomic_dec_and_test(&shrd_events->refcount[idx])) {
				clear_bit(idx, shrd_events->used_mask);
				shrd_events->events[idx] = NULL;
			}
			rc = (int)atomic_read(&shrd_events->refcount[idx]);
			break;
		}
	}

	mutex_unlock(&shrd_events->list_mutex);
	return rc;
}

static void _free_event(struct perf_event *event)
{
	irq_work_sync(&event->pending);
@@ -4322,8 +4377,12 @@ int perf_event_release_kernel(struct perf_event *event)
		goto no_ctx;
	}

	if (!is_kernel_event(event))
	if (!is_kernel_event(event)) {
		perf_remove_from_owner(event);
	} else {
		if (perf_event_delete_kernel_shared(event) > 0)
			return 0;
	}

	ctx = perf_event_ctx_lock(event);
	WARN_ON_ONCE(ctx->parent_ctx);
@@ -10368,6 +10427,103 @@ SYSCALL_DEFINE5(perf_event_open,
	return err;
}

static struct perf_event *
perf_event_create_kernel_shared_check(struct perf_event_attr *attr, int cpu,
				 struct task_struct *task,
				 perf_overflow_handler_t overflow_handler,
				 void *context)
{
	unsigned long idx;
	struct perf_event *event;
	struct shared_events_str *shrd_events;

	/*
	 * Have to be per cpu events for sharing
	 */
	if (!shared_events || (u32)cpu >= nr_cpu_ids)
		return NULL;

	/*
	 * Can't handle these type requests for sharing right now.
	 */
	if (task || context || overflow_handler ||
	    (attr->type != PERF_TYPE_HARDWARE &&
	     attr->type != PERF_TYPE_RAW))
		return NULL;

	/*
	 * Using per_cpu_ptr (or could do cross cpu call which is what most of
	 * perf does to access per cpu data structures
	 */
	shrd_events = per_cpu_ptr(shared_events, cpu);

	mutex_lock(&shrd_events->list_mutex);

	event = NULL;
	for_each_set_bit(idx, shrd_events->used_mask, SHARED_EVENTS_MAX) {
		if (memcmp(attr, &shrd_events->attr[idx],
		    sizeof(shrd_events->attr[idx])) == 0) {
			atomic_inc(&shrd_events->refcount[idx]);
			event = shrd_events->events[idx];
			break;
		}
	}
	mutex_unlock(&shrd_events->list_mutex);
	return event;
}

static void
perf_event_create_kernel_shared_add(struct perf_event_attr *attr, int cpu,
				 struct task_struct *task,
				 perf_overflow_handler_t overflow_handler,
				 void *context,
				 struct perf_event *event)
{
	unsigned long idx;
	struct shared_events_str *shrd_events;

	/*
	 * Have to be per cpu events for sharing
	 */
	if (!shared_events || (u32)cpu >= nr_cpu_ids)
		return;

	/*
	 * Can't handle these type requests for sharing right now.
	 */
	if (task || context || overflow_handler ||
	    (attr->type != PERF_TYPE_HARDWARE &&
	     attr->type != PERF_TYPE_RAW))
		return;

	/*
	 * Using per_cpu_ptr (or could do cross cpu call which is what most of
	 * perf does to access per cpu data structures
	 */
	shrd_events = per_cpu_ptr(shared_events, cpu);

	mutex_lock(&shrd_events->list_mutex);

	/*
	 * If we are in this routine, we know that this event isn't already in
	 * the shared list. Check if slot available in shared list
	 */
	idx = find_first_zero_bit(shrd_events->used_mask, SHARED_EVENTS_MAX);

	if (idx >= SHARED_EVENTS_MAX)
		goto out;

	/*
	 * The event isn't in the list and there is an empty slot so add it.
	 */
	shrd_events->attr[idx]   = *attr;
	shrd_events->events[idx] = event;
	set_bit(idx, shrd_events->used_mask);
	atomic_set(&shrd_events->refcount[idx], 1);
out:
	mutex_unlock(&shrd_events->list_mutex);
}

/**
 * perf_event_create_kernel_counter
 *
@@ -10385,6 +10541,14 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
	struct perf_event *event;
	int err;

	/*
	 * Check if the requested attributes match a shared event
	 */
	event = perf_event_create_kernel_shared_check(attr, cpu,
				 task, overflow_handler, context);
	if (event)
		return event;

	/*
	 * Get the target context (task or percpu):
	 */
@@ -10436,6 +10600,11 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
	perf_unpin_context(ctx);
	mutex_unlock(&ctx->mutex);

	/*
	 * Check if can add event to shared list
	 */
	perf_event_create_kernel_shared_add(attr, cpu,
			 task, overflow_handler, context, event);
	return event;

err_unlock:
@@ -11309,10 +11478,21 @@ static int perf_cpu_hp_init(void) { return 0; }

void __init perf_event_init(void)
{
	int ret;
	int ret, cpu;

	idr_init(&pmu_idr);

	shared_events = alloc_percpu(struct shared_events_str);
	if (!shared_events) {
		WARN(1, "alloc_percpu failed for shared_events struct");
	} else {
		for_each_possible_cpu(cpu) {
			struct shared_events_str *shrd_events =
				per_cpu_ptr(shared_events, cpu);

			mutex_init(&shrd_events->list_mutex);
		}
	}
	perf_event_init_all_cpus();
	init_srcu_struct(&pmus_srcu);
	perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE);