Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ab01f4ac authored by Jakub Kicinski's avatar Jakub Kicinski Committed by Daniel Borkmann
Browse files

nfp: bpf: remember maps by ID



Record perf maps by map ID, not raw kernel pointer.  This helps
with debug messages, because printing pointers to logs is frowned
upon, and makes debug easier for the users, as map ID is something
they should be more familiar with.  Note that perf maps are offload
neutral, therefore IDs won't be orphaned.

While at it use a rate limited print helper for the error message.

Reported-by: default avatarKees Cook <keescook@chromium.org>
Signed-off-by: default avatarJakub Kicinski <jakub.kicinski@netronome.com>
Reviewed-by: default avatarDirk van der Merwe <dirk.vandermerwe@netronome.com>
Reviewed-by: default avatarQuentin Monnet <quentin.monnet@netronome.com>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
parent 09587627
Loading
Loading
Loading
Loading
+0 −2
Original line number Diff line number Diff line
@@ -43,8 +43,6 @@
#include "fw.h"
#include "main.h"

#define cmsg_warn(bpf, msg...)	nn_dp_warn(&(bpf)->app->ctrl->dp, msg)

#define NFP_BPF_TAG_ALLOC_SPAN	(U16_MAX / 4)

static bool nfp_bpf_all_tags_busy(struct nfp_app_bpf *bpf)
+8 −4
Original line number Diff line number Diff line
@@ -3883,6 +3883,7 @@ static int nfp_bpf_replace_map_ptrs(struct nfp_prog *nfp_prog)
	struct nfp_insn_meta *meta1, *meta2;
	struct nfp_bpf_map *nfp_map;
	struct bpf_map *map;
	u32 id;

	nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) {
		if (meta1->skip || meta2->skip)
@@ -3894,11 +3895,14 @@ static int nfp_bpf_replace_map_ptrs(struct nfp_prog *nfp_prog)

		map = (void *)(unsigned long)((u32)meta1->insn.imm |
					      (u64)meta2->insn.imm << 32);
		if (bpf_map_offload_neutral(map))
			continue;
		if (bpf_map_offload_neutral(map)) {
			id = map->id;
		} else {
			nfp_map = map_to_offmap(map)->dev_priv;
			id = nfp_map->tid;
		}

		meta1->insn.imm = nfp_map->tid;
		meta1->insn.imm = id;
		meta2->insn.imm = 0;
	}

+2 −2
Original line number Diff line number Diff line
@@ -45,8 +45,8 @@

const struct rhashtable_params nfp_bpf_maps_neutral_params = {
	.nelem_hint		= 4,
	.key_len		= FIELD_SIZEOF(struct nfp_bpf_neutral_map, ptr),
	.key_offset		= offsetof(struct nfp_bpf_neutral_map, ptr),
	.key_len		= FIELD_SIZEOF(struct bpf_map, id),
	.key_offset		= offsetof(struct nfp_bpf_neutral_map, map_id),
	.head_offset		= offsetof(struct nfp_bpf_neutral_map, l),
	.automatic_shrinking	= true,
};
+3 −0
Original line number Diff line number Diff line
@@ -47,6 +47,8 @@
#include "../nfp_asm.h"
#include "fw.h"

#define cmsg_warn(bpf, msg...)	nn_dp_warn(&(bpf)->app->ctrl->dp, msg)

/* For relocation logic use up-most byte of branch instruction as scratch
 * area.  Remember to clear this before sending instructions to HW!
 */
@@ -221,6 +223,7 @@ struct nfp_bpf_map {
struct nfp_bpf_neutral_map {
	struct rhash_head l;
	struct bpf_map *ptr;
	u32 map_id;
	u32 count;
};

+13 −9
Original line number Diff line number Diff line
@@ -67,7 +67,7 @@ nfp_map_ptr_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
	ASSERT_RTNL();

	/* Reuse path - other offloaded program is already tracking this map. */
	record = rhashtable_lookup_fast(&bpf->maps_neutral, &map,
	record = rhashtable_lookup_fast(&bpf->maps_neutral, &map->id,
					nfp_bpf_maps_neutral_params);
	if (record) {
		nfp_prog->map_records[nfp_prog->map_records_cnt++] = record;
@@ -89,6 +89,7 @@ nfp_map_ptr_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
	}

	record->ptr = map;
	record->map_id = map->id;
	record->count = 1;

	err = rhashtable_insert_fast(&bpf->maps_neutral, &record->l,
@@ -457,15 +458,17 @@ int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data,
			 unsigned int len)
{
	struct cmsg_bpf_event *cbe = (void *)data;
	u32 pkt_size, data_size;
	struct bpf_map *map;
	struct nfp_bpf_neutral_map *record;
	u32 pkt_size, data_size, map_id;
	u64 map_id_full;

	if (len < sizeof(struct cmsg_bpf_event))
		return -EINVAL;

	pkt_size = be32_to_cpu(cbe->pkt_size);
	data_size = be32_to_cpu(cbe->data_size);
	map = (void *)(unsigned long)be64_to_cpu(cbe->map_ptr);
	map_id_full = be64_to_cpu(cbe->map_ptr);
	map_id = map_id_full;

	if (len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size)
		return -EINVAL;
@@ -473,15 +476,16 @@ int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data,
		return -EINVAL;

	rcu_read_lock();
	if (!rhashtable_lookup_fast(&bpf->maps_neutral, &map,
				    nfp_bpf_maps_neutral_params)) {
	record = rhashtable_lookup_fast(&bpf->maps_neutral, &map_id,
					nfp_bpf_maps_neutral_params);
	if (!record || map_id_full > U32_MAX) {
		rcu_read_unlock();
		pr_warn("perf event: dest map pointer %px not recognized, dropping event\n",
			map);
		cmsg_warn(bpf, "perf event: map id %lld (0x%llx) not recognized, dropping event\n",
			  map_id_full, map_id_full);
		return -EINVAL;
	}

	bpf_event_output(map, be32_to_cpu(cbe->cpu_id),
	bpf_event_output(record->ptr, be32_to_cpu(cbe->cpu_id),
			 &cbe->data[round_up(pkt_size, 4)], data_size,
			 cbe->data, pkt_size, nfp_bpf_perf_event_copy);
	rcu_read_unlock();