Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e3b073c7 authored by Daniel Borkmann's avatar Daniel Borkmann
Browse files

Merge branch 'bpf-nfp-map-offload'



Jakub Kicinski says:

====================
This set adds support for creating maps on networking devices.  BPF is
programs+maps, the pure program offload has been around for quite some
time, this patchset adds the map part of the equation.

Maps are allocated on the target device from the start.  There is no
host copy when map is created on the device.  Device maps are represented
by struct bpf_offloaded_map, regardless of type.  Host programs can't
access such maps, access is only possible from a program also loaded
to the same device and/or via the BPF syscall.

Offloaded programs are currently only allowed to perform lookups,
control plane is responsible for populating the maps.

For brevity only infrastructure and basic NFP patches are included.
Target device reporting, netdevsim and tests will follow up as well as
some further optimizations to the NFP code.

v2:
 - leave out the array maps, we will add them trivially later to avoid
   merge conflicts with ongoing spectere&meltdown mitigations.
====================

Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
parents fdde5f3b 1bba4c41
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -44,6 +44,7 @@ endif

ifeq ($(CONFIG_BPF_SYSCALL),y)
nfp-objs += \
	    bpf/cmsg.o \
	    bpf/main.o \
	    bpf/offload.o \
	    bpf/verifier.o \
+446 −0
Original line number Diff line number Diff line
/*
 * Copyright (C) 2017 Netronome Systems, Inc.
 *
 * This software is dual licensed under the GNU General License Version 2,
 * June 1991 as shown in the file COPYING in the top-level directory of this
 * source tree or the BSD 2-Clause License provided below.  You have the
 * option to license this software under the complete terms of either license.
 *
 * The BSD 2-Clause License:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      1. Redistributions of source code must retain the above
 *         copyright notice, this list of conditions and the following
 *         disclaimer.
 *
 *      2. Redistributions in binary form must reproduce the above
 *         copyright notice, this list of conditions and the following
 *         disclaimer in the documentation and/or other materials
 *         provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

#include <linux/bpf.h>
#include <linux/bitops.h>
#include <linux/bug.h>
#include <linux/jiffies.h>
#include <linux/skbuff.h>
#include <linux/wait.h>

#include "../nfp_app.h"
#include "../nfp_net.h"
#include "fw.h"
#include "main.h"

#define cmsg_warn(bpf, msg...)	nn_dp_warn(&(bpf)->app->ctrl->dp, msg)

#define NFP_BPF_TAG_ALLOC_SPAN	(U16_MAX / 4)

static bool nfp_bpf_all_tags_busy(struct nfp_app_bpf *bpf)
{
	u16 used_tags;

	used_tags = bpf->tag_alloc_next - bpf->tag_alloc_last;

	return used_tags > NFP_BPF_TAG_ALLOC_SPAN;
}

static int nfp_bpf_alloc_tag(struct nfp_app_bpf *bpf)
{
	/* All FW communication for BPF is request-reply.  To make sure we
	 * don't reuse the message ID too early after timeout - limit the
	 * number of requests in flight.
	 */
	if (nfp_bpf_all_tags_busy(bpf)) {
		cmsg_warn(bpf, "all FW request contexts busy!\n");
		return -EAGAIN;
	}

	WARN_ON(__test_and_set_bit(bpf->tag_alloc_next, bpf->tag_allocator));
	return bpf->tag_alloc_next++;
}

static void nfp_bpf_free_tag(struct nfp_app_bpf *bpf, u16 tag)
{
	WARN_ON(!__test_and_clear_bit(tag, bpf->tag_allocator));

	while (!test_bit(bpf->tag_alloc_last, bpf->tag_allocator) &&
	       bpf->tag_alloc_last != bpf->tag_alloc_next)
		bpf->tag_alloc_last++;
}

static struct sk_buff *
nfp_bpf_cmsg_alloc(struct nfp_app_bpf *bpf, unsigned int size)
{
	struct sk_buff *skb;

	skb = nfp_app_ctrl_msg_alloc(bpf->app, size, GFP_KERNEL);
	skb_put(skb, size);

	return skb;
}

static struct sk_buff *
nfp_bpf_cmsg_map_req_alloc(struct nfp_app_bpf *bpf, unsigned int n)
{
	unsigned int size;

	size = sizeof(struct cmsg_req_map_op);
	size += sizeof(struct cmsg_key_value_pair) * n;

	return nfp_bpf_cmsg_alloc(bpf, size);
}

static unsigned int nfp_bpf_cmsg_get_tag(struct sk_buff *skb)
{
	struct cmsg_hdr *hdr;

	hdr = (struct cmsg_hdr *)skb->data;

	return be16_to_cpu(hdr->tag);
}

static struct sk_buff *__nfp_bpf_reply(struct nfp_app_bpf *bpf, u16 tag)
{
	unsigned int msg_tag;
	struct sk_buff *skb;

	skb_queue_walk(&bpf->cmsg_replies, skb) {
		msg_tag = nfp_bpf_cmsg_get_tag(skb);
		if (msg_tag == tag) {
			nfp_bpf_free_tag(bpf, tag);
			__skb_unlink(skb, &bpf->cmsg_replies);
			return skb;
		}
	}

	return NULL;
}

static struct sk_buff *nfp_bpf_reply(struct nfp_app_bpf *bpf, u16 tag)
{
	struct sk_buff *skb;

	nfp_ctrl_lock(bpf->app->ctrl);
	skb = __nfp_bpf_reply(bpf, tag);
	nfp_ctrl_unlock(bpf->app->ctrl);

	return skb;
}

static struct sk_buff *nfp_bpf_reply_drop_tag(struct nfp_app_bpf *bpf, u16 tag)
{
	struct sk_buff *skb;

	nfp_ctrl_lock(bpf->app->ctrl);
	skb = __nfp_bpf_reply(bpf, tag);
	if (!skb)
		nfp_bpf_free_tag(bpf, tag);
	nfp_ctrl_unlock(bpf->app->ctrl);

	return skb;
}

static struct sk_buff *
nfp_bpf_cmsg_wait_reply(struct nfp_app_bpf *bpf, enum nfp_bpf_cmsg_type type,
			int tag)
{
	struct sk_buff *skb;
	int err;

	err = wait_event_interruptible_timeout(bpf->cmsg_wq,
					       skb = nfp_bpf_reply(bpf, tag),
					       msecs_to_jiffies(5000));
	/* We didn't get a response - try last time and atomically drop
	 * the tag even if no response is matched.
	 */
	if (!skb)
		skb = nfp_bpf_reply_drop_tag(bpf, tag);
	if (err < 0) {
		cmsg_warn(bpf, "%s waiting for response to 0x%02x: %d\n",
			  err == ERESTARTSYS ? "interrupted" : "error",
			  type, err);
		return ERR_PTR(err);
	}
	if (!skb) {
		cmsg_warn(bpf, "timeout waiting for response to 0x%02x\n",
			  type);
		return ERR_PTR(-ETIMEDOUT);
	}

	return skb;
}

static struct sk_buff *
nfp_bpf_cmsg_communicate(struct nfp_app_bpf *bpf, struct sk_buff *skb,
			 enum nfp_bpf_cmsg_type type, unsigned int reply_size)
{
	struct cmsg_hdr *hdr;
	int tag;

	nfp_ctrl_lock(bpf->app->ctrl);
	tag = nfp_bpf_alloc_tag(bpf);
	if (tag < 0) {
		nfp_ctrl_unlock(bpf->app->ctrl);
		dev_kfree_skb_any(skb);
		return ERR_PTR(tag);
	}

	hdr = (void *)skb->data;
	hdr->ver = CMSG_MAP_ABI_VERSION;
	hdr->type = type;
	hdr->tag = cpu_to_be16(tag);

	__nfp_app_ctrl_tx(bpf->app, skb);

	nfp_ctrl_unlock(bpf->app->ctrl);

	skb = nfp_bpf_cmsg_wait_reply(bpf, type, tag);
	if (IS_ERR(skb))
		return skb;

	hdr = (struct cmsg_hdr *)skb->data;
	/* 0 reply_size means caller will do the validation */
	if (reply_size && skb->len != reply_size) {
		cmsg_warn(bpf, "cmsg drop - wrong size %d != %d!\n",
			  skb->len, reply_size);
		goto err_free;
	}
	if (hdr->type != __CMSG_REPLY(type)) {
		cmsg_warn(bpf, "cmsg drop - wrong type 0x%02x != 0x%02lx!\n",
			  hdr->type, __CMSG_REPLY(type));
		goto err_free;
	}

	return skb;
err_free:
	dev_kfree_skb_any(skb);
	return ERR_PTR(-EIO);
}

static int
nfp_bpf_ctrl_rc_to_errno(struct nfp_app_bpf *bpf,
			 struct cmsg_reply_map_simple *reply)
{
	static const int res_table[] = {
		[CMSG_RC_SUCCESS]	= 0,
		[CMSG_RC_ERR_MAP_FD]	= -EBADFD,
		[CMSG_RC_ERR_MAP_NOENT]	= -ENOENT,
		[CMSG_RC_ERR_MAP_ERR]	= -EINVAL,
		[CMSG_RC_ERR_MAP_PARSE]	= -EIO,
		[CMSG_RC_ERR_MAP_EXIST]	= -EEXIST,
		[CMSG_RC_ERR_MAP_NOMEM]	= -ENOMEM,
		[CMSG_RC_ERR_MAP_E2BIG]	= -E2BIG,
	};
	u32 rc;

	rc = be32_to_cpu(reply->rc);
	if (rc >= ARRAY_SIZE(res_table)) {
		cmsg_warn(bpf, "FW responded with invalid status: %u\n", rc);
		return -EIO;
	}

	return res_table[rc];
}

long long int
nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map)
{
	struct cmsg_reply_map_alloc_tbl *reply;
	struct cmsg_req_map_alloc_tbl *req;
	struct sk_buff *skb;
	u32 tid;
	int err;

	skb = nfp_bpf_cmsg_alloc(bpf, sizeof(*req));
	if (!skb)
		return -ENOMEM;

	req = (void *)skb->data;
	req->key_size = cpu_to_be32(map->key_size);
	req->value_size = cpu_to_be32(map->value_size);
	req->max_entries = cpu_to_be32(map->max_entries);
	req->map_type = cpu_to_be32(map->map_type);
	req->map_flags = 0;

	skb = nfp_bpf_cmsg_communicate(bpf, skb, CMSG_TYPE_MAP_ALLOC,
				       sizeof(*reply));
	if (IS_ERR(skb))
		return PTR_ERR(skb);

	reply = (void *)skb->data;
	err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr);
	if (err)
		goto err_free;

	tid = be32_to_cpu(reply->tid);
	dev_consume_skb_any(skb);

	return tid;
err_free:
	dev_kfree_skb_any(skb);
	return err;
}

void nfp_bpf_ctrl_free_map(struct nfp_app_bpf *bpf, struct nfp_bpf_map *nfp_map)
{
	struct cmsg_reply_map_free_tbl *reply;
	struct cmsg_req_map_free_tbl *req;
	struct sk_buff *skb;
	int err;

	skb = nfp_bpf_cmsg_alloc(bpf, sizeof(*req));
	if (!skb) {
		cmsg_warn(bpf, "leaking map - failed to allocate msg\n");
		return;
	}

	req = (void *)skb->data;
	req->tid = cpu_to_be32(nfp_map->tid);

	skb = nfp_bpf_cmsg_communicate(bpf, skb, CMSG_TYPE_MAP_FREE,
				       sizeof(*reply));
	if (IS_ERR(skb)) {
		cmsg_warn(bpf, "leaking map - I/O error\n");
		return;
	}

	reply = (void *)skb->data;
	err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr);
	if (err)
		cmsg_warn(bpf, "leaking map - FW responded with: %d\n", err);

	dev_consume_skb_any(skb);
}

static int
nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap,
		      enum nfp_bpf_cmsg_type op,
		      u8 *key, u8 *value, u64 flags, u8 *out_key, u8 *out_value)
{
	struct nfp_bpf_map *nfp_map = offmap->dev_priv;
	struct nfp_app_bpf *bpf = nfp_map->bpf;
	struct bpf_map *map = &offmap->map;
	struct cmsg_reply_map_op *reply;
	struct cmsg_req_map_op *req;
	struct sk_buff *skb;
	int err;

	/* FW messages have no space for more than 32 bits of flags */
	if (flags >> 32)
		return -EOPNOTSUPP;

	skb = nfp_bpf_cmsg_map_req_alloc(bpf, 1);
	if (!skb)
		return -ENOMEM;

	req = (void *)skb->data;
	req->tid = cpu_to_be32(nfp_map->tid);
	req->count = cpu_to_be32(1);
	req->flags = cpu_to_be32(flags);

	/* Copy inputs */
	if (key)
		memcpy(&req->elem[0].key, key, map->key_size);
	if (value)
		memcpy(&req->elem[0].value, value, map->value_size);

	skb = nfp_bpf_cmsg_communicate(bpf, skb, op,
				       sizeof(*reply) + sizeof(*reply->elem));
	if (IS_ERR(skb))
		return PTR_ERR(skb);

	reply = (void *)skb->data;
	err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr);
	if (err)
		goto err_free;

	/* Copy outputs */
	if (out_key)
		memcpy(out_key, &reply->elem[0].key, map->key_size);
	if (out_value)
		memcpy(out_value, &reply->elem[0].value, map->value_size);

	dev_consume_skb_any(skb);

	return 0;
err_free:
	dev_kfree_skb_any(skb);
	return err;
}

int nfp_bpf_ctrl_update_entry(struct bpf_offloaded_map *offmap,
			      void *key, void *value, u64 flags)
{
	return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_UPDATE,
				     key, value, flags, NULL, NULL);
}

int nfp_bpf_ctrl_del_entry(struct bpf_offloaded_map *offmap, void *key)
{
	return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_DELETE,
				     key, NULL, 0, NULL, NULL);
}

int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap,
			      void *key, void *value)
{
	return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_LOOKUP,
				     key, NULL, 0, NULL, value);
}

int nfp_bpf_ctrl_getfirst_entry(struct bpf_offloaded_map *offmap,
				void *next_key)
{
	return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_GETFIRST,
				     NULL, NULL, 0, next_key, NULL);
}

int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap,
			       void *key, void *next_key)
{
	return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_GETNEXT,
				     key, NULL, 0, next_key, NULL);
}

void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb)
{
	struct nfp_app_bpf *bpf = app->priv;
	unsigned int tag;

	if (unlikely(skb->len < sizeof(struct cmsg_reply_map_simple))) {
		cmsg_warn(bpf, "cmsg drop - too short %d!\n", skb->len);
		goto err_free;
	}

	nfp_ctrl_lock(bpf->app->ctrl);

	tag = nfp_bpf_cmsg_get_tag(skb);
	if (unlikely(!test_bit(tag, bpf->tag_allocator))) {
		cmsg_warn(bpf, "cmsg drop - no one is waiting for tag %u!\n",
			  tag);
		goto err_unlock;
	}

	__skb_queue_tail(&bpf->cmsg_replies, skb);
	wake_up_interruptible_all(&bpf->cmsg_wq);

	nfp_ctrl_unlock(bpf->app->ctrl);

	return;
err_unlock:
	nfp_ctrl_unlock(bpf->app->ctrl);
err_free:
	dev_kfree_skb_any(skb);
}
+103 −0
Original line number Diff line number Diff line
@@ -38,7 +38,14 @@
#include <linux/types.h>

enum bpf_cap_tlv_type {
	NFP_BPF_CAP_TYPE_FUNC		= 1,
	NFP_BPF_CAP_TYPE_ADJUST_HEAD	= 2,
	NFP_BPF_CAP_TYPE_MAPS		= 3,
};

struct nfp_bpf_cap_tlv_func {
	__le32 func_id;
	__le32 func_addr;
};

struct nfp_bpf_cap_tlv_adjust_head {
@@ -51,4 +58,100 @@ struct nfp_bpf_cap_tlv_adjust_head {

#define NFP_BPF_ADJUST_HEAD_NO_META	BIT(0)

struct nfp_bpf_cap_tlv_maps {
	__le32 types;
	__le32 max_maps;
	__le32 max_elems;
	__le32 max_key_sz;
	__le32 max_val_sz;
	__le32 max_elem_sz;
};

/*
 * Types defined for map related control messages
 */
#define CMSG_MAP_ABI_VERSION		1

enum nfp_bpf_cmsg_type {
	CMSG_TYPE_MAP_ALLOC	= 1,
	CMSG_TYPE_MAP_FREE	= 2,
	CMSG_TYPE_MAP_LOOKUP	= 3,
	CMSG_TYPE_MAP_UPDATE	= 4,
	CMSG_TYPE_MAP_DELETE	= 5,
	CMSG_TYPE_MAP_GETNEXT	= 6,
	CMSG_TYPE_MAP_GETFIRST	= 7,
	__CMSG_TYPE_MAP_MAX,
};

#define CMSG_TYPE_MAP_REPLY_BIT		7
#define __CMSG_REPLY(req)		(BIT(CMSG_TYPE_MAP_REPLY_BIT) | (req))

#define CMSG_MAP_KEY_LW			16
#define CMSG_MAP_VALUE_LW		16

enum nfp_bpf_cmsg_status {
	CMSG_RC_SUCCESS			= 0,
	CMSG_RC_ERR_MAP_FD		= 1,
	CMSG_RC_ERR_MAP_NOENT		= 2,
	CMSG_RC_ERR_MAP_ERR		= 3,
	CMSG_RC_ERR_MAP_PARSE		= 4,
	CMSG_RC_ERR_MAP_EXIST		= 5,
	CMSG_RC_ERR_MAP_NOMEM		= 6,
	CMSG_RC_ERR_MAP_E2BIG		= 7,
};

struct cmsg_hdr {
	u8 type;
	u8 ver;
	__be16 tag;
};

struct cmsg_reply_map_simple {
	struct cmsg_hdr hdr;
	__be32 rc;
};

struct cmsg_req_map_alloc_tbl {
	struct cmsg_hdr hdr;
	__be32 key_size;		/* in bytes */
	__be32 value_size;		/* in bytes */
	__be32 max_entries;
	__be32 map_type;
	__be32 map_flags;		/* reserved */
};

struct cmsg_reply_map_alloc_tbl {
	struct cmsg_reply_map_simple reply_hdr;
	__be32 tid;
};

struct cmsg_req_map_free_tbl {
	struct cmsg_hdr hdr;
	__be32 tid;
};

struct cmsg_reply_map_free_tbl {
	struct cmsg_reply_map_simple reply_hdr;
	__be32 count;
};

struct cmsg_key_value_pair {
	__be32 key[CMSG_MAP_KEY_LW];
	__be32 value[CMSG_MAP_VALUE_LW];
};

struct cmsg_req_map_op {
	struct cmsg_hdr hdr;
	__be32 tid;
	__be32 count;
	__be32 flags;
	struct cmsg_key_value_pair elem[0];
};

struct cmsg_reply_map_op {
	struct cmsg_reply_map_simple reply_hdr;
	__be32 count;
	__be32 resv;
	struct cmsg_key_value_pair elem[0];
};
#endif
+154 −9
Original line number Diff line number Diff line
@@ -483,6 +483,21 @@ static void wrp_immed(struct nfp_prog *nfp_prog, swreg dst, u32 imm)
	}
}

static void
wrp_immed_relo(struct nfp_prog *nfp_prog, swreg dst, u32 imm,
	       enum nfp_relo_type relo)
{
	if (imm > 0xffff) {
		pr_err("relocation of a large immediate!\n");
		nfp_prog->error = -EFAULT;
		return;
	}
	emit_immed(nfp_prog, dst, imm, IMMED_WIDTH_ALL, false, IMMED_SHIFT_0B);

	nfp_prog->prog[nfp_prog->prog_len - 1] |=
		FIELD_PREP(OP_RELO_TYPE, relo);
}

/* ur_load_imm_any() - encode immediate or use tmp register (unrestricted)
 * If the @imm is small enough encode it directly in operand and return
 * otherwise load @imm to a spare register and return its encoding.
@@ -538,27 +553,51 @@ wrp_reg_subpart(struct nfp_prog *nfp_prog, swreg dst, swreg src, u8 field_len,
	emit_ld_field_any(nfp_prog, dst, mask, src, sc, offset * 8, true);
}

static void
addr40_offset(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
	      swreg *rega, swreg *regb)
{
	if (offset == reg_imm(0)) {
		*rega = reg_a(src_gpr);
		*regb = reg_b(src_gpr + 1);
		return;
	}

	emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(src_gpr), ALU_OP_ADD, offset);
	emit_alu(nfp_prog, imm_b(nfp_prog), reg_b(src_gpr + 1), ALU_OP_ADD_C,
		 reg_imm(0));
	*rega = imm_a(nfp_prog);
	*regb = imm_b(nfp_prog);
}

/* NFP has Command Push Pull bus which supports bluk memory operations. */
static int nfp_cpp_memcpy(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
	bool descending_seq = meta->ldst_gather_len < 0;
	s16 len = abs(meta->ldst_gather_len);
	swreg src_base, off;
	bool src_40bit_addr;
	unsigned int i;
	u8 xfer_num;

	off = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
	src_40bit_addr = meta->ptr.type == PTR_TO_MAP_VALUE;
	src_base = reg_a(meta->insn.src_reg * 2);
	xfer_num = round_up(len, 4) / 4;

	if (src_40bit_addr)
		addr40_offset(nfp_prog, meta->insn.src_reg, off, &src_base,
			      &off);

	/* Setup PREV_ALU fields to override memory read length. */
	if (len > 32)
		wrp_immed(nfp_prog, reg_none(),
			  CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1));

	/* Memory read from source addr into transfer-in registers. */
	emit_cmd_any(nfp_prog, CMD_TGT_READ32_SWAP, CMD_MODE_32b, 0, src_base,
		     off, xfer_num - 1, true, len > 32);
	emit_cmd_any(nfp_prog, CMD_TGT_READ32_SWAP,
		     src_40bit_addr ? CMD_MODE_40b_BA : CMD_MODE_32b, 0,
		     src_base, off, xfer_num - 1, true, len > 32);

	/* Move from transfer-in to transfer-out. */
	for (i = 0; i < xfer_num; i++)
@@ -696,20 +735,20 @@ data_ld(struct nfp_prog *nfp_prog, swreg offset, u8 dst_gpr, int size)
}

static int
data_ld_host_order(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
		   u8 dst_gpr, int size)
data_ld_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr,
		   swreg lreg, swreg rreg, int size, enum cmd_mode mode)
{
	unsigned int i;
	u8 mask, sz;

	/* We load the value from the address indicated in @offset and then
	/* We load the value from the address indicated in rreg + lreg and then
	 * mask out the data we don't need.  Note: this is little endian!
	 */
	sz = max(size, 4);
	mask = size < 4 ? GENMASK(size - 1, 0) : 0;

	emit_cmd(nfp_prog, CMD_TGT_READ32_SWAP, CMD_MODE_32b, 0,
		 reg_a(src_gpr), offset, sz / 4 - 1, true);
	emit_cmd(nfp_prog, CMD_TGT_READ32_SWAP, mode, 0,
		 lreg, rreg, sz / 4 - 1, true);

	i = 0;
	if (mask)
@@ -725,6 +764,26 @@ data_ld_host_order(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
	return 0;
}

static int
data_ld_host_order_addr32(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
			  u8 dst_gpr, u8 size)
{
	return data_ld_host_order(nfp_prog, dst_gpr, reg_a(src_gpr), offset,
				  size, CMD_MODE_32b);
}

static int
data_ld_host_order_addr40(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
			  u8 dst_gpr, u8 size)
{
	swreg rega, regb;

	addr40_offset(nfp_prog, src_gpr, offset, &rega, &regb);

	return data_ld_host_order(nfp_prog, dst_gpr, rega, regb,
				  size, CMD_MODE_40b_BA);
}

static int
construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset, u16 src, u8 size)
{
@@ -1279,6 +1338,56 @@ static int adjust_head(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
	return 0;
}

static int
map_lookup_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
	struct bpf_offloaded_map *offmap;
	struct nfp_bpf_map *nfp_map;
	bool load_lm_ptr;
	u32 ret_tgt;
	s64 lm_off;
	swreg tid;

	offmap = (struct bpf_offloaded_map *)meta->arg1.map_ptr;
	nfp_map = offmap->dev_priv;

	/* We only have to reload LM0 if the key is not at start of stack */
	lm_off = nfp_prog->stack_depth;
	lm_off += meta->arg2.var_off.value + meta->arg2.off;
	load_lm_ptr = meta->arg2_var_off || lm_off;

	/* Set LM0 to start of key */
	if (load_lm_ptr)
		emit_csr_wr(nfp_prog, reg_b(2 * 2), NFP_CSR_ACT_LM_ADDR0);

	/* Load map ID into a register, it should actually fit as an immediate
	 * but in case it doesn't deal with it here, not in the delay slots.
	 */
	tid = ur_load_imm_any(nfp_prog, nfp_map->tid, imm_a(nfp_prog));

	emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO + BPF_FUNC_map_lookup_elem,
		     2, RELO_BR_HELPER);
	ret_tgt = nfp_prog_current_offset(nfp_prog) + 2;

	/* Load map ID into A0 */
	wrp_mov(nfp_prog, reg_a(0), tid);

	/* Load the return address into B0 */
	wrp_immed_relo(nfp_prog, reg_b(0), ret_tgt, RELO_IMMED_REL);

	if (!nfp_prog_confirm_current_offset(nfp_prog, ret_tgt))
		return -EINVAL;

	/* Reset the LM0 pointer */
	if (!load_lm_ptr)
		return 0;

	emit_csr_wr(nfp_prog, stack_reg(nfp_prog),  NFP_CSR_ACT_LM_ADDR0);
	wrp_nops(nfp_prog, 3);

	return 0;
}

/* --- Callbacks --- */
static int mov_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
@@ -1713,8 +1822,20 @@ mem_ldx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,

	tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));

	return data_ld_host_order(nfp_prog, meta->insn.src_reg * 2, tmp_reg,
				  meta->insn.dst_reg * 2, size);
	return data_ld_host_order_addr32(nfp_prog, meta->insn.src_reg * 2,
					 tmp_reg, meta->insn.dst_reg * 2, size);
}

static int
mem_ldx_emem(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
	     unsigned int size)
{
	swreg tmp_reg;

	tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));

	return data_ld_host_order_addr40(nfp_prog, meta->insn.src_reg * 2,
					 tmp_reg, meta->insn.dst_reg * 2, size);
}

static int
@@ -1738,6 +1859,9 @@ mem_ldx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
		return mem_ldx_stack(nfp_prog, meta, size,
				     meta->ptr.off + meta->ptr.var_off.value);

	if (meta->ptr.type == PTR_TO_MAP_VALUE)
		return mem_ldx_emem(nfp_prog, meta, size);

	return -EOPNOTSUPP;
}

@@ -2058,6 +2182,8 @@ static int call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
	switch (meta->insn.imm) {
	case BPF_FUNC_xdp_adjust_head:
		return adjust_head(nfp_prog, meta);
	case BPF_FUNC_map_lookup_elem:
		return map_lookup_stack(nfp_prog, meta);
	default:
		WARN_ONCE(1, "verifier allowed unsupported function\n");
		return -EOPNOTSUPP;
@@ -2794,6 +2920,7 @@ void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv)

	for (i = 0; i < nfp_prog->prog_len; i++) {
		enum nfp_relo_type special;
		u32 val;

		special = FIELD_GET(OP_RELO_TYPE, prog[i]);
		switch (special) {
@@ -2813,6 +2940,24 @@ void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv)
		case RELO_BR_NEXT_PKT:
			br_set_offset(&prog[i], bv->tgt_done);
			break;
		case RELO_BR_HELPER:
			val = br_get_offset(prog[i]);
			val -= BR_OFF_RELO;
			switch (val) {
			case BPF_FUNC_map_lookup_elem:
				val = nfp_prog->bpf->helpers.map_lookup;
				break;
			default:
				pr_err("relocation of unknown helper %d\n",
				       val);
				err = -EINVAL;
				goto err_free_prog;
			}
			br_set_offset(&prog[i], val);
			break;
		case RELO_IMMED_REL:
			immed_add_value(&prog[i], bv->start_off);
			break;
		}

		prog[i] &= ~OP_RELO_TYPE;
+59 −1
Original line number Diff line number Diff line
@@ -251,6 +251,45 @@ nfp_bpf_parse_cap_adjust_head(struct nfp_app_bpf *bpf, void __iomem *value,
	return 0;
}

static int
nfp_bpf_parse_cap_func(struct nfp_app_bpf *bpf, void __iomem *value, u32 length)
{
	struct nfp_bpf_cap_tlv_func __iomem *cap = value;

	if (length < sizeof(*cap)) {
		nfp_err(bpf->app->cpp, "truncated function TLV: %d\n", length);
		return -EINVAL;
	}

	switch (readl(&cap->func_id)) {
	case BPF_FUNC_map_lookup_elem:
		bpf->helpers.map_lookup = readl(&cap->func_addr);
		break;
	}

	return 0;
}

static int
nfp_bpf_parse_cap_maps(struct nfp_app_bpf *bpf, void __iomem *value, u32 length)
{
	struct nfp_bpf_cap_tlv_maps __iomem *cap = value;

	if (length < sizeof(*cap)) {
		nfp_err(bpf->app->cpp, "truncated maps TLV: %d\n", length);
		return -EINVAL;
	}

	bpf->maps.types = readl(&cap->types);
	bpf->maps.max_maps = readl(&cap->max_maps);
	bpf->maps.max_elems = readl(&cap->max_elems);
	bpf->maps.max_key_sz = readl(&cap->max_key_sz);
	bpf->maps.max_val_sz = readl(&cap->max_val_sz);
	bpf->maps.max_elem_sz = readl(&cap->max_elem_sz);

	return 0;
}

static int nfp_bpf_parse_capabilities(struct nfp_app *app)
{
	struct nfp_cpp *cpp = app->pf->cpp;
@@ -276,11 +315,19 @@ static int nfp_bpf_parse_capabilities(struct nfp_app *app)
			goto err_release_free;

		switch (type) {
		case NFP_BPF_CAP_TYPE_FUNC:
			if (nfp_bpf_parse_cap_func(app->priv, value, length))
				goto err_release_free;
			break;
		case NFP_BPF_CAP_TYPE_ADJUST_HEAD:
			if (nfp_bpf_parse_cap_adjust_head(app->priv, value,
							  length))
				goto err_release_free;
			break;
		case NFP_BPF_CAP_TYPE_MAPS:
			if (nfp_bpf_parse_cap_maps(app->priv, value, length))
				goto err_release_free;
			break;
		default:
			nfp_dbg(cpp, "unknown BPF capability: %d\n", type);
			break;
@@ -313,6 +360,10 @@ static int nfp_bpf_init(struct nfp_app *app)
	bpf->app = app;
	app->priv = bpf;

	skb_queue_head_init(&bpf->cmsg_replies);
	init_waitqueue_head(&bpf->cmsg_wq);
	INIT_LIST_HEAD(&bpf->map_list);

	err = nfp_bpf_parse_capabilities(app);
	if (err)
		goto err_free_bpf;
@@ -326,7 +377,12 @@ static int nfp_bpf_init(struct nfp_app *app)

static void nfp_bpf_clean(struct nfp_app *app)
{
	kfree(app->priv);
	struct nfp_app_bpf *bpf = app->priv;

	WARN_ON(!skb_queue_empty(&bpf->cmsg_replies));
	WARN_ON(!list_empty(&bpf->map_list));
	WARN_ON(bpf->maps_in_use || bpf->map_elems_in_use);
	kfree(bpf);
}

const struct nfp_app_type app_bpf = {
@@ -343,6 +399,8 @@ const struct nfp_app_type app_bpf = {
	.vnic_alloc	= nfp_bpf_vnic_alloc,
	.vnic_free	= nfp_bpf_vnic_free,

	.ctrl_msg_rx	= nfp_bpf_ctrl_msg_rx,

	.setup_tc	= nfp_bpf_setup_tc,
	.tc_busy	= nfp_bpf_tc_busy,
	.bpf		= nfp_ndo_bpf,
Loading