Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1ca94d79 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'nfp-flower-vxlan-tunnel-offload'



Simon Horman says:

====================
nfp: flower vxlan tunnel offload

John says:

This patch set allows offloading of TC flower match and set tunnel fields
to the NFP. The initial focus is on VXLAN traffic. Due to the current
state of the NFP firmware, only VXLAN traffic on well known port 4789 is
handled. The match and action fields must explicity set this value to be
supported. Tunnel end point information is also offloaded to the NFP for
both encapsulation and decapsulation. The NFP expects 3 separate data sets
to be supplied.

For decapsulation, 2 separate lists exist; a list of MAC addresses
referenced by an index comprised of the port number, and a list of IP
addresses. These IP addresses are not connected to a MAC or port. The MAC
addresses can be written as a block or one at a time (because they have an
index, previous values can be overwritten) while the IP addresses are
always written as a list of all the available IPs. Because the MAC address
used as a tunnel end point may be associated with a physical port or may
be a virtual netdev like an OVS bridge, we do not know which addresses
should be offloaded. For this reason, all MAC addresses of active netdevs
are offloaded to the NFP. A notifier checks for changes to any currently
offloaded MACs or any new netdevs that may occur. For IP addresses, the
tunnel end point used in the rules is known as the destination IP address
must be specified in the flower classifier rule. When a new IP address
appears in a rule, the IP address is offloaded. The IP is removed from the
offloaded list when all rules matching on that IP are deleted.

For encapsulation, a next hop table is updated on the NFP that contains
the source/dest IPs, MACs and egress port. These are written individually
when requested. If the NFP tries to encapsulate a packet but does not know
the next hop, then is sends a request to the host. The host carries out a
route lookup and populates the given entry on the NFP table. A notifier
also exists to check for any links changing or going down in the kernel
next hop table. If an offloaded next hop entry is removed from the kernel
then it is also removed on the NFP.

The NFP periodically sends a message to the host telling it which tunnel
ports have packets egressing the system. The host uses this information to
update the used value in the neighbour entry. This means that, rather than
expire when it times out, the kernel will send an ARP to check if the link
is still live. From an NFP perspective, this means that valid entries will
not be removed from its next hop table.
====================

Acked-by: default avatarJakub Kicinski <jakub.kicinski@netronome.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 14a0d032 856f5b13
Loading
Loading
Loading
Loading
+2 −1
Original line number Diff line number Diff line
@@ -37,7 +37,8 @@ nfp-objs += \
	    flower/main.o \
	    flower/match.o \
	    flower/metadata.o \
	    flower/offload.o
	    flower/offload.o \
	    flower/tunnel_conf.o
endif

ifeq ($(CONFIG_BPF_SYSCALL),y)
+152 −17
Original line number Diff line number Diff line
@@ -37,6 +37,7 @@
#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_mirred.h>
#include <net/tc_act/tc_vlan.h>
#include <net/tc_act/tc_tunnel_key.h>

#include "cmsg.h"
#include "main.h"
@@ -80,14 +81,27 @@ nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan,
	push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci);
}

static bool nfp_fl_netdev_is_tunnel_type(struct net_device *out_dev,
					 enum nfp_flower_tun_type tun_type)
{
	if (!out_dev->rtnl_link_ops)
		return false;

	if (!strcmp(out_dev->rtnl_link_ops->kind, "vxlan"))
		return tun_type == NFP_FL_TUNNEL_VXLAN;

	return false;
}

static int
nfp_fl_output(struct nfp_fl_output *output, const struct tc_action *action,
	      struct nfp_fl_payload *nfp_flow, bool last,
	      struct net_device *in_dev)
	      struct net_device *in_dev, enum nfp_flower_tun_type tun_type,
	      int *tun_out_cnt)
{
	size_t act_size = sizeof(struct nfp_fl_output);
	u16 tmp_output_op, tmp_flags;
	struct net_device *out_dev;
	u16 tmp_output_op;
	int ifindex;

	/* Set action opcode to output action. */
@@ -97,16 +111,31 @@ nfp_fl_output(struct nfp_fl_output *output, const struct tc_action *action,

	output->a_op = cpu_to_be16(tmp_output_op);

	/* Set action output parameters. */
	output->flags = cpu_to_be16(last ? NFP_FL_OUT_FLAGS_LAST : 0);

	ifindex = tcf_mirred_ifindex(action);
	out_dev = __dev_get_by_index(dev_net(in_dev), ifindex);
	if (!out_dev)
		return -EOPNOTSUPP;

	/* Only offload egress ports are on the same device as the ingress
	 * port.
	tmp_flags = last ? NFP_FL_OUT_FLAGS_LAST : 0;

	if (tun_type) {
		/* Verify the egress netdev matches the tunnel type. */
		if (!nfp_fl_netdev_is_tunnel_type(out_dev, tun_type))
			return -EOPNOTSUPP;

		if (*tun_out_cnt)
			return -EOPNOTSUPP;
		(*tun_out_cnt)++;

		output->flags = cpu_to_be16(tmp_flags |
					    NFP_FL_OUT_FLAGS_USE_TUN);
		output->port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type);
	} else {
		/* Set action output parameters. */
		output->flags = cpu_to_be16(tmp_flags);

		/* Only offload if egress ports are on the same device as the
		 * ingress port.
		 */
		if (!switchdev_port_same_parent_id(in_dev, out_dev))
			return -EOPNOTSUPP;
@@ -114,17 +143,94 @@ nfp_fl_output(struct nfp_fl_output *output, const struct tc_action *action,
		output->port = cpu_to_be32(nfp_repr_get_port_id(out_dev));
		if (!output->port)
			return -EOPNOTSUPP;

	}
	nfp_flow->meta.shortcut = output->port;

	return 0;
}

static bool nfp_fl_supported_tun_port(const struct tc_action *action)
{
	struct ip_tunnel_info *tun = tcf_tunnel_info(action);

	return tun->key.tp_dst == htons(NFP_FL_VXLAN_PORT);
}

static struct nfp_fl_pre_tunnel *nfp_fl_pre_tunnel(char *act_data, int act_len)
{
	size_t act_size = sizeof(struct nfp_fl_pre_tunnel);
	struct nfp_fl_pre_tunnel *pre_tun_act;
	u16 tmp_pre_tun_op;

	/* Pre_tunnel action must be first on action list.
	 * If other actions already exist they need pushed forward.
	 */
	if (act_len)
		memmove(act_data + act_size, act_data, act_len);

	pre_tun_act = (struct nfp_fl_pre_tunnel *)act_data;

	memset(pre_tun_act, 0, act_size);

	tmp_pre_tun_op =
		FIELD_PREP(NFP_FL_ACT_LEN_LW, act_size >> NFP_FL_LW_SIZ) |
		FIELD_PREP(NFP_FL_ACT_JMP_ID, NFP_FL_ACTION_OPCODE_PRE_TUNNEL);

	pre_tun_act->a_op = cpu_to_be16(tmp_pre_tun_op);

	return pre_tun_act;
}

static int
nfp_fl_set_vxlan(struct nfp_fl_set_vxlan *set_vxlan,
		 const struct tc_action *action,
		 struct nfp_fl_pre_tunnel *pre_tun)
{
	struct ip_tunnel_info *vxlan = tcf_tunnel_info(action);
	size_t act_size = sizeof(struct nfp_fl_set_vxlan);
	u32 tmp_set_vxlan_type_index = 0;
	u16 tmp_set_vxlan_op;
	/* Currently support one pre-tunnel so index is always 0. */
	int pretun_idx = 0;

	if (vxlan->options_len) {
		/* Do not support options e.g. vxlan gpe. */
		return -EOPNOTSUPP;
	}

	tmp_set_vxlan_op =
		FIELD_PREP(NFP_FL_ACT_LEN_LW, act_size >> NFP_FL_LW_SIZ) |
		FIELD_PREP(NFP_FL_ACT_JMP_ID,
			   NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL);

	set_vxlan->a_op = cpu_to_be16(tmp_set_vxlan_op);

	/* Set tunnel type and pre-tunnel index. */
	tmp_set_vxlan_type_index |=
		FIELD_PREP(NFP_FL_IPV4_TUNNEL_TYPE, NFP_FL_TUNNEL_VXLAN) |
		FIELD_PREP(NFP_FL_IPV4_PRE_TUN_INDEX, pretun_idx);

	set_vxlan->tun_type_index = cpu_to_be32(tmp_set_vxlan_type_index);

	set_vxlan->tun_id = vxlan->key.tun_id;
	set_vxlan->tun_flags = vxlan->key.tun_flags;
	set_vxlan->ipv4_ttl = vxlan->key.ttl;
	set_vxlan->ipv4_tos = vxlan->key.tos;

	/* Complete pre_tunnel action. */
	pre_tun->ipv4_dst = vxlan->key.u.ipv4.dst;

	return 0;
}

static int
nfp_flower_loop_action(const struct tc_action *a,
		       struct nfp_fl_payload *nfp_fl, int *a_len,
		       struct net_device *netdev)
		       struct net_device *netdev,
		       enum nfp_flower_tun_type *tun_type, int *tun_out_cnt)
{
	struct nfp_fl_pre_tunnel *pre_tun;
	struct nfp_fl_set_vxlan *s_vxl;
	struct nfp_fl_push_vlan *psh_v;
	struct nfp_fl_pop_vlan *pop_v;
	struct nfp_fl_output *output;
@@ -137,7 +243,8 @@ nfp_flower_loop_action(const struct tc_action *a,
			return -EOPNOTSUPP;

		output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len];
		err = nfp_fl_output(output, a, nfp_fl, true, netdev);
		err = nfp_fl_output(output, a, nfp_fl, true, netdev, *tun_type,
				    tun_out_cnt);
		if (err)
			return err;

@@ -147,7 +254,8 @@ nfp_flower_loop_action(const struct tc_action *a,
			return -EOPNOTSUPP;

		output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len];
		err = nfp_fl_output(output, a, nfp_fl, false, netdev);
		err = nfp_fl_output(output, a, nfp_fl, false, netdev, *tun_type,
				    tun_out_cnt);
		if (err)
			return err;

@@ -170,6 +278,29 @@ nfp_flower_loop_action(const struct tc_action *a,

		nfp_fl_push_vlan(psh_v, a);
		*a_len += sizeof(struct nfp_fl_push_vlan);
	} else if (is_tcf_tunnel_set(a) && nfp_fl_supported_tun_port(a)) {
		/* Pre-tunnel action is required for tunnel encap.
		 * This checks for next hop entries on NFP.
		 * If none, the packet falls back before applying other actions.
		 */
		if (*a_len + sizeof(struct nfp_fl_pre_tunnel) +
		    sizeof(struct nfp_fl_set_vxlan) > NFP_FL_MAX_A_SIZ)
			return -EOPNOTSUPP;

		*tun_type = NFP_FL_TUNNEL_VXLAN;
		pre_tun = nfp_fl_pre_tunnel(nfp_fl->action_data, *a_len);
		nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
		*a_len += sizeof(struct nfp_fl_pre_tunnel);

		s_vxl = (struct nfp_fl_set_vxlan *)&nfp_fl->action_data[*a_len];
		err = nfp_fl_set_vxlan(s_vxl, a, pre_tun);
		if (err)
			return err;

		*a_len += sizeof(struct nfp_fl_set_vxlan);
	} else if (is_tcf_tunnel_release(a)) {
		/* Tunnel decap is handled by default so accept action. */
		return 0;
	} else {
		/* Currently we do not handle any other actions. */
		return -EOPNOTSUPP;
@@ -182,18 +313,22 @@ int nfp_flower_compile_action(struct tc_cls_flower_offload *flow,
			      struct net_device *netdev,
			      struct nfp_fl_payload *nfp_flow)
{
	int act_len, act_cnt, err;
	int act_len, act_cnt, err, tun_out_cnt;
	enum nfp_flower_tun_type tun_type;
	const struct tc_action *a;
	LIST_HEAD(actions);

	memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ);
	nfp_flow->meta.act_len = 0;
	tun_type = NFP_FL_TUNNEL_NONE;
	act_len = 0;
	act_cnt = 0;
	tun_out_cnt = 0;

	tcf_exts_to_list(flow->exts, &actions);
	list_for_each_entry(a, &actions, list) {
		err = nfp_flower_loop_action(a, nfp_flow, &act_len, netdev);
		err = nfp_flower_loop_action(a, nfp_flow, &act_len, netdev,
					     &tun_type, &tun_out_cnt);
		if (err)
			return err;
		act_cnt++;
+9 −7
Original line number Diff line number Diff line
@@ -38,17 +38,10 @@
#include <net/dst_metadata.h>

#include "main.h"
#include "../nfpcore/nfp_cpp.h"
#include "../nfp_net.h"
#include "../nfp_net_repr.h"
#include "./cmsg.h"

#define nfp_flower_cmsg_warn(app, fmt, args...)				\
	do {								\
		if (net_ratelimit())					\
			nfp_warn((app)->cpp, fmt, ## args);		\
	} while (0)

static struct nfp_flower_cmsg_hdr *
nfp_flower_cmsg_get_hdr(struct sk_buff *skb)
{
@@ -188,6 +181,15 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
	case NFP_FLOWER_CMSG_TYPE_FLOW_STATS:
		nfp_flower_rx_flow_stats(app, skb);
		break;
	case NFP_FLOWER_CMSG_TYPE_NO_NEIGH:
		nfp_tunnel_request_route(app, skb);
		break;
	case NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS:
		nfp_tunnel_keep_alive(app, skb);
		break;
	case NFP_FLOWER_CMSG_TYPE_TUN_NEIGH:
		/* Acks from the NFP that the route is added - ignore. */
		break;
	default:
		nfp_flower_cmsg_warn(app, "Cannot handle invalid repr control type %u\n",
				     type);
+83 −4
Original line number Diff line number Diff line
@@ -39,6 +39,7 @@
#include <linux/types.h>

#include "../nfp_app.h"
#include "../nfpcore/nfp_cpp.h"

#define NFP_FLOWER_LAYER_META		BIT(0)
#define NFP_FLOWER_LAYER_PORT		BIT(1)
@@ -70,6 +71,8 @@
#define NFP_FL_ACTION_OPCODE_OUTPUT		0
#define NFP_FL_ACTION_OPCODE_PUSH_VLAN		1
#define NFP_FL_ACTION_OPCODE_POP_VLAN		2
#define NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL	6
#define NFP_FL_ACTION_OPCODE_PRE_TUNNEL		17
#define NFP_FL_ACTION_OPCODE_NUM		32

#define NFP_FL_ACT_JMP_ID		GENMASK(15, 8)
@@ -83,6 +86,22 @@
#define NFP_FL_PUSH_VLAN_CFI		BIT(12)
#define NFP_FL_PUSH_VLAN_VID		GENMASK(11, 0)

/* Tunnel ports */
#define NFP_FL_PORT_TYPE_TUN		0x50000000
#define NFP_FL_IPV4_TUNNEL_TYPE		GENMASK(7, 4)
#define NFP_FL_IPV4_PRE_TUN_INDEX	GENMASK(2, 0)

#define nfp_flower_cmsg_warn(app, fmt, args...)                         \
	do {                                                            \
		if (net_ratelimit())                                    \
			nfp_warn((app)->cpp, fmt, ## args);             \
	} while (0)

enum nfp_flower_tun_type {
	NFP_FL_TUNNEL_NONE =	0,
	NFP_FL_TUNNEL_VXLAN =	2,
};

struct nfp_fl_output {
	__be16 a_op;
	__be16 flags;
@@ -115,6 +134,25 @@ struct nfp_flower_meta_one {
	u16 reserved;
};

struct nfp_fl_pre_tunnel {
	__be16 a_op;
	__be16 reserved;
	__be32 ipv4_dst;
	/* reserved for use with IPv6 addresses */
	__be32 extra[3];
};

struct nfp_fl_set_vxlan {
	__be16 a_op;
	__be16 reserved;
	__be64 tun_id;
	__be32 tun_type_index;
	__be16 tun_flags;
	u8 ipv4_ttl;
	u8 ipv4_tos;
	__be32 extra[2];
} __packed;

/* Metadata with L2 (1W/4B)
 * ----------------------------------------------------------------
 *    3                   2                   1
@@ -230,6 +268,36 @@ struct nfp_flower_ipv6 {
	struct in6_addr ipv6_dst;
};

/* Flow Frame VXLAN --> Tunnel details (4W/16B)
 * -----------------------------------------------------------------
 *    3                   2                   1
 *  1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 * |                         ipv4_addr_src                         |
 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 * |                         ipv4_addr_dst                         |
 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 * |           tun_flags           |       tos     |       ttl     |
 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 * |   gpe_flags   |            Reserved           | Next Protocol |
 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 * |                     VNI                       |   Reserved    |
 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 */
struct nfp_flower_vxlan {
	__be32 ip_src;
	__be32 ip_dst;
	__be16 tun_flags;
	u8 tos;
	u8 ttl;
	u8 gpe_flags;
	u8 reserved[2];
	u8 nxt_proto;
	__be32 tun_id;
};

#define NFP_FL_TUN_VNI_OFFSET 8

/* The base header for a control message packet.
 * Defines an 8-bit version, and an 8-bit type, padded
 * to a 32-bit word. Rest of the packet is type-specific.
@@ -249,6 +317,11 @@ enum nfp_flower_cmsg_type_port {
	NFP_FLOWER_CMSG_TYPE_FLOW_DEL =		2,
	NFP_FLOWER_CMSG_TYPE_MAC_REPR =		7,
	NFP_FLOWER_CMSG_TYPE_PORT_MOD =		8,
	NFP_FLOWER_CMSG_TYPE_NO_NEIGH =		10,
	NFP_FLOWER_CMSG_TYPE_TUN_MAC =		11,
	NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS =	12,
	NFP_FLOWER_CMSG_TYPE_TUN_NEIGH =	13,
	NFP_FLOWER_CMSG_TYPE_TUN_IPS =		14,
	NFP_FLOWER_CMSG_TYPE_FLOW_STATS =	15,
	NFP_FLOWER_CMSG_TYPE_PORT_ECHO =	16,
	NFP_FLOWER_CMSG_TYPE_MAX =		32,
@@ -282,6 +355,7 @@ enum nfp_flower_cmsg_port_type {
	NFP_FLOWER_CMSG_PORT_TYPE_UNSPEC =	0x0,
	NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT =	0x1,
	NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT =	0x2,
	NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT =  0x3,
};

enum nfp_flower_cmsg_port_vnic_type {
@@ -323,6 +397,11 @@ static inline void *nfp_flower_cmsg_get_data(struct sk_buff *skb)
	return (unsigned char *)skb->data + NFP_FLOWER_CMSG_HLEN;
}

static inline int nfp_flower_cmsg_get_data_len(struct sk_buff *skb)
{
	return skb->len - NFP_FLOWER_CMSG_HLEN;
}

struct sk_buff *
nfp_flower_cmsg_mac_repr_start(struct nfp_app *app, unsigned int num_ports);
void
+13 −0
Original line number Diff line number Diff line
@@ -436,6 +436,16 @@ static void nfp_flower_clean(struct nfp_app *app)
	app->priv = NULL;
}

static int nfp_flower_start(struct nfp_app *app)
{
	return nfp_tunnel_config_start(app);
}

static void nfp_flower_stop(struct nfp_app *app)
{
	nfp_tunnel_config_stop(app);
}

const struct nfp_app_type app_flower = {
	.id		= NFP_APP_FLOWER_NIC,
	.name		= "flower",
@@ -453,6 +463,9 @@ const struct nfp_app_type app_flower = {
	.repr_open	= nfp_flower_repr_netdev_open,
	.repr_stop	= nfp_flower_repr_netdev_stop,

	.start		= nfp_flower_start,
	.stop		= nfp_flower_stop,

	.ctrl_msg_rx	= nfp_flower_cmsg_rx,

	.sriov_enable	= nfp_flower_sriov_enable,
Loading