Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a11227dc authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'bpf-Add-BPF-support-to-all-perf_event'



Merge branch 'bpf-Add-BPF-support-to-all-perf_event'

Alexei Starovoitov says:

====================
bpf: Add BPF support to all perf_event

v3->v4: one more tweak to reject unsupported events at map
update time as Peter suggested

v2->v3: more refactoring to address Peter's feedback.
Now all perf_events are attachable and readable

v1->v2: address Peter's feedback. Refactor patch 1 to allow attaching
bpf programs to all event types and reading counters from all of them as well
patch 2 - more tests
patch 3 - address Dave's feedback and document bpf_perf_event_read()
and bpf_perf_event_output() properly
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 5071034e b7d3ed5b
Loading
Loading
Loading
Loading
+5 −2
Original line number Diff line number Diff line
@@ -896,7 +896,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr,
				void *context);
extern void perf_pmu_migrate_context(struct pmu *pmu,
				int src_cpu, int dst_cpu);
extern u64 perf_event_read_local(struct perf_event *event);
int perf_event_read_local(struct perf_event *event, u64 *value);
extern u64 perf_event_read_value(struct perf_event *event,
				 u64 *enabled, u64 *running);

@@ -1301,7 +1301,10 @@ static inline const struct perf_event_attr *perf_event_attrs(struct perf_event *
{
	return ERR_PTR(-EINVAL);
}
static inline u64 perf_event_read_local(struct perf_event *event)	{ return -EINVAL; }
static inline int perf_event_read_local(struct perf_event *event, u64 *value)
{
	return -EINVAL;
}
static inline void perf_event_print_debug(void)				{ }
static inline int perf_event_task_disable(void)				{ return -EINVAL; }
static inline int perf_event_task_enable(void)				{ return -EINVAL; }
+7 −4
Original line number Diff line number Diff line
@@ -313,8 +313,11 @@ union bpf_attr {
 *     @flags: room for future extensions
 *     Return: 0 on success or negative error
 *
 * u64 bpf_perf_event_read(&map, index)
 *     Return: Number events read or error code
 * u64 bpf_perf_event_read(map, flags)
 *     read perf event counter value
 *     @map: pointer to perf_event_array map
 *     @flags: index of event in the map or bitmask flags
 *     Return: value of perf event counter read or error code
 *
 * int bpf_redirect(ifindex, flags)
 *     redirect to another netdev
@@ -328,11 +331,11 @@ union bpf_attr {
 *     @skb: pointer to skb
 *     Return: realm if != 0
 *
 * int bpf_perf_event_output(ctx, map, index, data, size)
 * int bpf_perf_event_output(ctx, map, flags, data, size)
 *     output perf raw sample
 *     @ctx: struct pt_regs*
 *     @map: pointer to perf_event_array map
 *     @index: index of event in the map
 *     @flags: index of event in the map or bitmask flags
 *     @data: data on stack to be output as raw data
 *     @size: size of data
 *     Return: 0 on success or negative error
+7 −21
Original line number Diff line number Diff line
@@ -452,38 +452,24 @@ static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee)
static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
					 struct file *map_file, int fd)
{
	const struct perf_event_attr *attr;
	struct bpf_event_entry *ee;
	struct perf_event *event;
	struct file *perf_file;
	u64 value;

	perf_file = perf_event_get(fd);
	if (IS_ERR(perf_file))
		return perf_file;

	ee = ERR_PTR(-EOPNOTSUPP);
	event = perf_file->private_data;
	ee = ERR_PTR(-EINVAL);

	attr = perf_event_attrs(event);
	if (IS_ERR(attr) || attr->inherit)
	if (perf_event_read_local(event, &value) == -EOPNOTSUPP)
		goto err_out;

	switch (attr->type) {
	case PERF_TYPE_SOFTWARE:
		if (attr->config != PERF_COUNT_SW_BPF_OUTPUT)
			goto err_out;
		/* fall-through */
	case PERF_TYPE_RAW:
	case PERF_TYPE_HARDWARE:
	ee = bpf_event_entry_gen(perf_file, map_file);
	if (ee)
		return ee;
	ee = ERR_PTR(-ENOMEM);
		/* fall-through */
	default:
		break;
	}

err_out:
	fput(perf_file);
	return ee;
+28 −19
Original line number Diff line number Diff line
@@ -3636,10 +3636,10 @@ static inline u64 perf_event_count(struct perf_event *event)
 *     will not be local and we cannot read them atomically
 *   - must not have a pmu::count method
 */
u64 perf_event_read_local(struct perf_event *event)
int perf_event_read_local(struct perf_event *event, u64 *value)
{
	unsigned long flags;
	u64 val;
	int ret = 0;

	/*
	 * Disabling interrupts avoids all counter scheduling (context
@@ -3647,25 +3647,37 @@ u64 perf_event_read_local(struct perf_event *event)
	 */
	local_irq_save(flags);

	/* If this is a per-task event, it must be for current */
	WARN_ON_ONCE((event->attach_state & PERF_ATTACH_TASK) &&
		     event->hw.target != current);

	/* If this is a per-CPU event, it must be for this CPU */
	WARN_ON_ONCE(!(event->attach_state & PERF_ATTACH_TASK) &&
		     event->cpu != smp_processor_id());

	/*
	 * It must not be an event with inherit set, we cannot read
	 * all child counters from atomic context.
	 */
	WARN_ON_ONCE(event->attr.inherit);
	if (event->attr.inherit) {
		ret = -EOPNOTSUPP;
		goto out;
	}

	/*
	 * It must not have a pmu::count method, those are not
	 * NMI safe.
	 */
	WARN_ON_ONCE(event->pmu->count);
	if (event->pmu->count) {
		ret = -EOPNOTSUPP;
		goto out;
	}

	/* If this is a per-task event, it must be for current */
	if ((event->attach_state & PERF_ATTACH_TASK) &&
	    event->hw.target != current) {
		ret = -EINVAL;
		goto out;
	}

	/* If this is a per-CPU event, it must be for this CPU */
	if (!(event->attach_state & PERF_ATTACH_TASK) &&
	    event->cpu != smp_processor_id()) {
		ret = -EINVAL;
		goto out;
	}

	/*
	 * If the event is currently on this CPU, its either a per-task event,
@@ -3675,10 +3687,11 @@ u64 perf_event_read_local(struct perf_event *event)
	if (event->oncpu == smp_processor_id())
		event->pmu->read(event);

	val = local64_read(&event->count);
	*value = local64_read(&event->count);
out:
	local_irq_restore(flags);

	return val;
	return ret;
}

static int perf_event_read(struct perf_event *event, bool group)
@@ -8037,12 +8050,8 @@ static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd)
	bool is_kprobe, is_tracepoint;
	struct bpf_prog *prog;

	if (event->attr.type == PERF_TYPE_HARDWARE ||
	    event->attr.type == PERF_TYPE_SOFTWARE)
		return perf_event_set_bpf_handler(event, prog_fd);

	if (event->attr.type != PERF_TYPE_TRACEPOINT)
		return -EINVAL;
		return perf_event_set_bpf_handler(event, prog_fd);

	if (event->tp_event->prog)
		return -EEXIST;
+8 −14
Original line number Diff line number Diff line
@@ -234,7 +234,8 @@ BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
	unsigned int cpu = smp_processor_id();
	u64 index = flags & BPF_F_INDEX_MASK;
	struct bpf_event_entry *ee;
	struct perf_event *event;
	u64 value = 0;
	int err;

	if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
		return -EINVAL;
@@ -247,21 +248,14 @@ BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
	if (!ee)
		return -ENOENT;

	event = ee->event;
	if (unlikely(event->attr.type != PERF_TYPE_HARDWARE &&
		     event->attr.type != PERF_TYPE_RAW))
		return -EINVAL;

	/* make sure event is local and doesn't have pmu::count */
	if (unlikely(event->oncpu != cpu || event->pmu->count))
		return -EINVAL;

	err = perf_event_read_local(ee->event, &value);
	/*
	 * we don't know if the function is run successfully by the
	 * return value. It can be judged in other places, such as
	 * eBPF programs.
	 * this api is ugly since we miss [-22..-2] range of valid
	 * counter values, but that's uapi
	 */
	return perf_event_read_local(event);
	if (err)
		return err;
	return value;
}

static const struct bpf_func_proto bpf_perf_event_read_proto = {
Loading