Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bb5e6a82 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'net-sched-indirect-tc-block-cb-registration'



Jakub Kicinski says:

====================
net: sched: indirect tc block cb registration

John says:

This patchset introduces an alternative to egdev offload by allowing a
driver to register for block updates when an external device (e.g. tunnel
netdev) is bound to a TC block. Drivers can track new netdevs or register
to existing ones to receive information on such events. Based on this,
they may register for block offload rules using already existing
functions.

The patchset also implements this new indirect block registration in the
NFP driver to allow the offloading of tunnel rules. The use of egdev
offload (which is currently only used for tunnel offload) is subsequently
removed.

RFC v2 -> PATCH
 - removed embedded tracking function from indir block register (now up to
   driver to clean up after itself)
 - refactored NFP code due to recent submissions
 - removed priv list clean function in NFP (list should be cleared by
   indirect block unregisters)

RFC v1->v2:
 - free allocated owner struct in block_owner_clean function
 - add geneve type helper function
 - move test stub in NFP (v1 patch 2) to full tunnel offload
   implementation via indirect blocks (v2 patches 3-8)
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents d1ce0114 d4b69bad
Loading
Loading
Loading
Loading
+7 −21
Original line number Diff line number Diff line
@@ -2,7 +2,6 @@
/* Copyright (C) 2017-2018 Netronome Systems, Inc. */

#include <linux/bitfield.h>
#include <net/geneve.h>
#include <net/pkt_cls.h>
#include <net/switchdev.h>
#include <net/tc_act/tc_csum.h>
@@ -11,7 +10,6 @@
#include <net/tc_act/tc_pedit.h>
#include <net/tc_act/tc_vlan.h>
#include <net/tc_act/tc_tunnel_key.h>
#include <net/vxlan.h>

#include "cmsg.h"
#include "main.h"
@@ -92,18 +90,6 @@ nfp_fl_pre_lag(struct nfp_app *app, const struct tc_action *action,
	return act_size;
}

static bool nfp_fl_netdev_is_tunnel_type(struct net_device *out_dev,
					 enum nfp_flower_tun_type tun_type)
{
	if (netif_is_vxlan(out_dev))
		return tun_type == NFP_FL_TUNNEL_VXLAN;

	if (netif_is_geneve(out_dev))
		return tun_type == NFP_FL_TUNNEL_GENEVE;

	return false;
}

static int
nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
	      const struct tc_action *action, struct nfp_fl_payload *nfp_flow,
@@ -149,11 +135,12 @@ nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
		/* Set action output parameters. */
		output->flags = cpu_to_be16(tmp_flags);

		/* Only offload if egress ports are on the same device as the
		 * ingress port.
		 */
		if (nfp_netdev_is_nfp_repr(in_dev)) {
			/* Confirm ingress and egress are on same device. */
			if (!switchdev_port_same_parent_id(in_dev, out_dev))
				return -EOPNOTSUPP;
		}

		if (!nfp_netdev_is_nfp_repr(out_dev))
			return -EOPNOTSUPP;

@@ -840,9 +827,8 @@ nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a,
		*a_len += sizeof(struct nfp_fl_push_vlan);
	} else if (is_tcf_tunnel_set(a)) {
		struct ip_tunnel_info *ip_tun = tcf_tunnel_info(a);
		struct nfp_repr *repr = netdev_priv(netdev);

		*tun_type = nfp_fl_get_tun_from_act_l4_port(repr->app, a);
		*tun_type = nfp_fl_get_tun_from_act_l4_port(app, a);
		if (*tun_type == NFP_FL_TUNNEL_NONE)
			return -EOPNOTSUPP;

+27 −0
Original line number Diff line number Diff line
@@ -8,6 +8,7 @@
#include <linux/skbuff.h>
#include <linux/types.h>
#include <net/geneve.h>
#include <net/vxlan.h>

#include "../nfp_app.h"
#include "../nfpcore/nfp_cpp.h"
@@ -499,6 +500,32 @@ static inline int nfp_flower_cmsg_get_data_len(struct sk_buff *skb)
	return skb->len - NFP_FLOWER_CMSG_HLEN;
}

static inline bool
nfp_fl_netdev_is_tunnel_type(struct net_device *netdev,
			     enum nfp_flower_tun_type tun_type)
{
	if (netif_is_vxlan(netdev))
		return tun_type == NFP_FL_TUNNEL_VXLAN;
	if (netif_is_geneve(netdev))
		return tun_type == NFP_FL_TUNNEL_GENEVE;

	return false;
}

static inline bool nfp_fl_is_netdev_to_offload(struct net_device *netdev)
{
	if (!netdev->rtnl_link_ops)
		return false;
	if (!strcmp(netdev->rtnl_link_ops->kind, "openvswitch"))
		return true;
	if (netif_is_vxlan(netdev))
		return true;
	if (netif_is_geneve(netdev))
		return true;

	return false;
}

struct sk_buff *
nfp_flower_cmsg_mac_repr_start(struct nfp_app *app, unsigned int num_ports);
void
+6 −12
Original line number Diff line number Diff line
@@ -146,23 +146,12 @@ nfp_flower_repr_netdev_stop(struct nfp_app *app, struct nfp_repr *repr)
	return nfp_flower_cmsg_portmod(repr, false, repr->netdev->mtu, false);
}

static int
nfp_flower_repr_netdev_init(struct nfp_app *app, struct net_device *netdev)
{
	return tc_setup_cb_egdev_register(netdev,
					  nfp_flower_setup_tc_egress_cb,
					  netdev_priv(netdev));
}

static void
nfp_flower_repr_netdev_clean(struct nfp_app *app, struct net_device *netdev)
{
	struct nfp_repr *repr = netdev_priv(netdev);

	kfree(repr->app_priv);

	tc_setup_cb_egdev_unregister(netdev, nfp_flower_setup_tc_egress_cb,
				     netdev_priv(netdev));
}

static void
@@ -568,6 +557,8 @@ static int nfp_flower_init(struct nfp_app *app)
		goto err_cleanup_metadata;
	}

	INIT_LIST_HEAD(&app_priv->indr_block_cb_priv);

	return 0;

err_cleanup_metadata:
@@ -684,6 +675,10 @@ nfp_flower_netdev_event(struct nfp_app *app, struct net_device *netdev,
			return ret;
	}

	ret = nfp_flower_reg_indir_block_handler(app, netdev, event);
	if (ret & NOTIFY_STOP_MASK)
		return ret;

	return nfp_tunnel_mac_event_handler(app, netdev, event, ptr);
}

@@ -705,7 +700,6 @@ const struct nfp_app_type app_flower = {
	.vnic_init	= nfp_flower_vnic_init,
	.vnic_clean	= nfp_flower_vnic_clean,

	.repr_init	= nfp_flower_repr_netdev_init,
	.repr_preclean	= nfp_flower_repr_netdev_preclean,
	.repr_clean	= nfp_flower_repr_netdev_clean,

+8 −6
Original line number Diff line number Diff line
@@ -20,7 +20,6 @@ struct nfp_fl_pre_lag;
struct net_device;
struct nfp_app;

#define NFP_FL_STATS_CTX_DONT_CARE	cpu_to_be32(0xffffffff)
#define NFP_FL_STATS_ELEM_RS		FIELD_SIZEOF(struct nfp_fl_stats_id, \
						     init_unalloc)
#define NFP_FLOWER_MASK_ENTRY_RS	256
@@ -130,6 +129,7 @@ struct nfp_fl_lag {
 * @reify_wait_queue:	wait queue for repr reify response counting
 * @mtu_conf:		Configuration of repr MTU value
 * @nfp_lag:		Link aggregation data block
 * @indr_block_cb_priv:	List of priv data passed to indirect block cbs
 */
struct nfp_flower_priv {
	struct nfp_app *app;
@@ -162,6 +162,7 @@ struct nfp_flower_priv {
	wait_queue_head_t reify_wait_queue;
	struct nfp_mtu_conf mtu_conf;
	struct nfp_fl_lag nfp_lag;
	struct list_head indr_block_cb_priv;
};

/**
@@ -205,7 +206,6 @@ struct nfp_fl_payload {
	char *unmasked_data;
	char *mask_data;
	char *action_data;
	bool ingress_offload;
};

extern const struct rhashtable_params nfp_flower_table_params;
@@ -222,7 +222,8 @@ void nfp_flower_metadata_cleanup(struct nfp_app *app);

int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
			enum tc_setup_type type, void *type_data);
int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
int nfp_flower_compile_flow_match(struct nfp_app *app,
				  struct tc_cls_flower_offload *flow,
				  struct nfp_fl_key_ls *key_ls,
				  struct net_device *netdev,
				  struct nfp_fl_payload *nfp_flow,
@@ -240,7 +241,7 @@ int nfp_modify_flow_metadata(struct nfp_app *app,

struct nfp_fl_payload *
nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie,
			   struct net_device *netdev, __be32 host_ctx);
			   struct net_device *netdev);
struct nfp_fl_payload *
nfp_flower_remove_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie);

@@ -256,8 +257,6 @@ void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4);
void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4);
void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb);
void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb);
int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data,
				  void *cb_priv);
void nfp_flower_lag_init(struct nfp_fl_lag *lag);
void nfp_flower_lag_cleanup(struct nfp_fl_lag *lag);
int nfp_flower_lag_reset(struct nfp_fl_lag *lag);
@@ -270,5 +269,8 @@ int nfp_flower_lag_populate_pre_action(struct nfp_app *app,
				       struct nfp_fl_pre_lag *pre_act);
int nfp_flower_lag_get_output_id(struct nfp_app *app,
				 struct net_device *master);
int nfp_flower_reg_indir_block_handler(struct nfp_app *app,
				       struct net_device *netdev,
				       unsigned long event);

#endif
+20 −18
Original line number Diff line number Diff line
@@ -52,10 +52,13 @@ nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
		return 0;
	}

	if (tun_type)
	if (tun_type) {
		frame->in_port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type);
	else
	} else {
		if (!cmsg_port)
			return -EOPNOTSUPP;
		frame->in_port = cpu_to_be32(cmsg_port);
	}

	return 0;
}
@@ -289,17 +292,21 @@ nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *frame,
	}
}

int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
int nfp_flower_compile_flow_match(struct nfp_app *app,
				  struct tc_cls_flower_offload *flow,
				  struct nfp_fl_key_ls *key_ls,
				  struct net_device *netdev,
				  struct nfp_fl_payload *nfp_flow,
				  enum nfp_flower_tun_type tun_type)
{
	struct nfp_repr *netdev_repr;
	u32 cmsg_port = 0;
	int err;
	u8 *ext;
	u8 *msk;

	if (nfp_netdev_is_nfp_repr(netdev))
		cmsg_port = nfp_repr_get_port_id(netdev);

	memset(nfp_flow->unmasked_data, 0, key_ls->key_size);
	memset(nfp_flow->mask_data, 0, key_ls->key_size);

@@ -327,15 +334,13 @@ int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,

	/* Populate Exact Port data. */
	err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext,
				      nfp_repr_get_port_id(netdev),
				      false, tun_type);
				      cmsg_port, false, tun_type);
	if (err)
		return err;

	/* Populate Mask Port Data. */
	err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk,
				      nfp_repr_get_port_id(netdev),
				      true, tun_type);
				      cmsg_port, true, tun_type);
	if (err)
		return err;

@@ -399,16 +404,13 @@ int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
		msk += sizeof(struct nfp_flower_ipv4_udp_tun);

		/* Configure tunnel end point MAC. */
		if (nfp_netdev_is_nfp_repr(netdev)) {
			netdev_repr = netdev_priv(netdev);
			nfp_tunnel_write_macs(netdev_repr->app);
		nfp_tunnel_write_macs(app);

		/* Store the tunnel destination in the rule data.
		 * This must be present and be an exact match.
		 */
		nfp_flow->nfp_tun_ipv4_addr = tun_dst;
			nfp_tunnel_add_ipv4_off(netdev_repr->app, tun_dst);
		}
		nfp_tunnel_add_ipv4_off(app, tun_dst);

		if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) {
			err = nfp_flower_compile_geneve_opt(ext, flow, false);
Loading