Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 336f2c03 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'mlxsw-Offload-IPv6-multicast-routes'



Ido Schimmel says:

====================
mlxsw: Offload IPv6 multicast routes

Yuval says:

The series is intended to allow offloading IPv6 multicast routes
and is split into two parts:

  - First half of the patches continue extending ip6mr [& refactor ipmr]
    with missing bits necessary for the offloading - fib-notifications,
    mfc refcounting and default rule identification.

  - Second half of the patches extend functionality inside mlxsw,
    beginning with extending lower-parts to support IPv6 mroutes
    to host and later extending the router/mr internal APIs within
    the driver to accommodate support in ipv6 configurations.
    Lastly it adds support in the RTNL_FAMILY_IP6MR notifications,
    allowing driver to react and offload related routes.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 02a21de9 6a170d32
Loading
Loading
Loading
Loading
+60 −14
Original line number Diff line number Diff line
@@ -4225,6 +4225,12 @@ MLXSW_ITEM32(reg, ritr, ipv6, 0x00, 28, 1);
 */
MLXSW_ITEM32(reg, ritr, ipv4_mc, 0x00, 27, 1);

/* reg_ritr_ipv6_mc
 * IPv6 multicast routing enable.
 * Access: RW
 */
MLXSW_ITEM32(reg, ritr, ipv6_mc, 0x00, 26, 1);

enum mlxsw_reg_ritr_if_type {
	/* VLAN interface. */
	MLXSW_REG_RITR_VLAN_IF,
@@ -4290,6 +4296,14 @@ MLXSW_ITEM32(reg, ritr, ipv6_fe, 0x04, 28, 1);
 */
MLXSW_ITEM32(reg, ritr, ipv4_mc_fe, 0x04, 27, 1);

/* reg_ritr_ipv6_mc_fe
 * IPv6 Multicast Forwarding Enable.
 * When disabled, forwarding is blocked but local traffic (traps and IP to me)
 * will be enabled.
 * Access: RW
 */
MLXSW_ITEM32(reg, ritr, ipv6_mc_fe, 0x04, 26, 1);

/* reg_ritr_lb_en
 * Loop-back filter enable for unicast packets.
 * If the flag is set then loop-back filter for unicast packets is
@@ -4513,12 +4527,14 @@ static inline void mlxsw_reg_ritr_pack(char *payload, bool enable,
	mlxsw_reg_ritr_ipv4_set(payload, 1);
	mlxsw_reg_ritr_ipv6_set(payload, 1);
	mlxsw_reg_ritr_ipv4_mc_set(payload, 1);
	mlxsw_reg_ritr_ipv6_mc_set(payload, 1);
	mlxsw_reg_ritr_type_set(payload, type);
	mlxsw_reg_ritr_op_set(payload, op);
	mlxsw_reg_ritr_rif_set(payload, rif);
	mlxsw_reg_ritr_ipv4_fe_set(payload, 1);
	mlxsw_reg_ritr_ipv6_fe_set(payload, 1);
	mlxsw_reg_ritr_ipv4_mc_fe_set(payload, 1);
	mlxsw_reg_ritr_ipv6_mc_fe_set(payload, 1);
	mlxsw_reg_ritr_lb_en_set(payload, 1);
	mlxsw_reg_ritr_virtual_router_set(payload, vr_id);
	mlxsw_reg_ritr_mtu_set(payload, mtu);
@@ -6302,30 +6318,34 @@ MLXSW_ITEM32(reg, rmft2, irif_mask, 0x08, 24, 1);
 */
MLXSW_ITEM32(reg, rmft2, irif, 0x08, 0, 16);

/* reg_rmft2_dip4
 * Destination IPv4 address
/* reg_rmft2_dip{4,6}
 * Destination IPv4/6 address
 * Access: RW
 */
MLXSW_ITEM_BUF(reg, rmft2, dip6, 0x10, 16);
MLXSW_ITEM32(reg, rmft2, dip4, 0x1C, 0, 32);

/* reg_rmft2_dip4_mask
/* reg_rmft2_dip{4,6}_mask
 * A bit that is set directs the TCAM to compare the corresponding bit in key. A
 * bit that is clear directs the TCAM to ignore the corresponding bit in key.
 * Access: RW
 */
MLXSW_ITEM_BUF(reg, rmft2, dip6_mask, 0x20, 16);
MLXSW_ITEM32(reg, rmft2, dip4_mask, 0x2C, 0, 32);

/* reg_rmft2_sip4
 * Source IPv4 address
/* reg_rmft2_sip{4,6}
 * Source IPv4/6 address
 * Access: RW
 */
MLXSW_ITEM_BUF(reg, rmft2, sip6, 0x30, 16);
MLXSW_ITEM32(reg, rmft2, sip4, 0x3C, 0, 32);

/* reg_rmft2_sip4_mask
/* reg_rmft2_sip{4,6}_mask
 * A bit that is set directs the TCAM to compare the corresponding bit in key. A
 * bit that is clear directs the TCAM to ignore the corresponding bit in key.
 * Access: RW
 */
MLXSW_ITEM_BUF(reg, rmft2, sip6_mask, 0x40, 16);
MLXSW_ITEM32(reg, rmft2, sip4_mask, 0x4C, 0, 32);

/* reg_rmft2_flexible_action_set
@@ -6343,26 +6363,52 @@ MLXSW_ITEM_BUF(reg, rmft2, flexible_action_set, 0x80,
	       MLXSW_REG_FLEX_ACTION_SET_LEN);

static inline void
mlxsw_reg_rmft2_ipv4_pack(char *payload, bool v, u16 offset, u16 virtual_router,
mlxsw_reg_rmft2_common_pack(char *payload, bool v, u16 offset,
			    u16 virtual_router,
			    enum mlxsw_reg_rmft2_irif_mask irif_mask, u16 irif,
			  u32 dip4, u32 dip4_mask, u32 sip4, u32 sip4_mask,
			  const char *flexible_action_set)
			    const char *flex_action_set)
{
	MLXSW_REG_ZERO(rmft2, payload);
	mlxsw_reg_rmft2_v_set(payload, v);
	mlxsw_reg_rmft2_type_set(payload, MLXSW_REG_RMFT2_TYPE_IPV4);
	mlxsw_reg_rmft2_op_set(payload, MLXSW_REG_RMFT2_OP_READ_WRITE);
	mlxsw_reg_rmft2_offset_set(payload, offset);
	mlxsw_reg_rmft2_virtual_router_set(payload, virtual_router);
	mlxsw_reg_rmft2_irif_mask_set(payload, irif_mask);
	mlxsw_reg_rmft2_irif_set(payload, irif);
	if (flex_action_set)
		mlxsw_reg_rmft2_flexible_action_set_memcpy_to(payload,
							      flex_action_set);
}

static inline void
mlxsw_reg_rmft2_ipv4_pack(char *payload, bool v, u16 offset, u16 virtual_router,
			  enum mlxsw_reg_rmft2_irif_mask irif_mask, u16 irif,
			  u32 dip4, u32 dip4_mask, u32 sip4, u32 sip4_mask,
			  const char *flexible_action_set)
{
	mlxsw_reg_rmft2_common_pack(payload, v, offset, virtual_router,
				    irif_mask, irif, flexible_action_set);
	mlxsw_reg_rmft2_type_set(payload, MLXSW_REG_RMFT2_TYPE_IPV4);
	mlxsw_reg_rmft2_dip4_set(payload, dip4);
	mlxsw_reg_rmft2_dip4_mask_set(payload, dip4_mask);
	mlxsw_reg_rmft2_sip4_set(payload, sip4);
	mlxsw_reg_rmft2_sip4_mask_set(payload, sip4_mask);
	if (flexible_action_set)
		mlxsw_reg_rmft2_flexible_action_set_memcpy_to(payload,
							      flexible_action_set);
}

static inline void
mlxsw_reg_rmft2_ipv6_pack(char *payload, bool v, u16 offset, u16 virtual_router,
			  enum mlxsw_reg_rmft2_irif_mask irif_mask, u16 irif,
			  struct in6_addr dip6, struct in6_addr dip6_mask,
			  struct in6_addr sip6, struct in6_addr sip6_mask,
			  const char *flexible_action_set)
{
	mlxsw_reg_rmft2_common_pack(payload, v, offset, virtual_router,
				    irif_mask, irif, flexible_action_set);
	mlxsw_reg_rmft2_type_set(payload, MLXSW_REG_RMFT2_TYPE_IPV6);
	mlxsw_reg_rmft2_dip6_memcpy_to(payload, (void *)&dip6);
	mlxsw_reg_rmft2_dip6_mask_memcpy_to(payload, (void *)&dip6_mask);
	mlxsw_reg_rmft2_sip6_memcpy_to(payload, (void *)&sip6);
	mlxsw_reg_rmft2_sip6_mask_memcpy_to(payload, (void *)&sip6_mask);
}

/* MFCR - Management Fan Control Register
+1 −0
Original line number Diff line number Diff line
@@ -3380,6 +3380,7 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
	MLXSW_SP_RXL_NO_MARK(ACL0, TRAP_TO_CPU, IP2ME, false),
	/* Multicast Router Traps */
	MLXSW_SP_RXL_MARK(IPV4_PIM, TRAP_TO_CPU, PIM, false),
	MLXSW_SP_RXL_MARK(IPV6_PIM, TRAP_TO_CPU, PIM, false),
	MLXSW_SP_RXL_MARK(RPF, TRAP_TO_CPU, RPF, false),
	MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false),
	MLXSW_SP_RXL_MR_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
+180 −113
Original line number Diff line number Diff line
@@ -33,6 +33,7 @@
 */

#include <linux/rhashtable.h>
#include <net/ipv6.h>

#include "spectrum_mr.h"
#include "spectrum_router.h"
@@ -47,6 +48,11 @@ struct mlxsw_sp_mr {
	/* priv has to be always the last item */
};

struct mlxsw_sp_mr_vif;
struct mlxsw_sp_mr_vif_ops {
	bool (*is_regular)(const struct mlxsw_sp_mr_vif *vif);
};

struct mlxsw_sp_mr_vif {
	struct net_device *dev;
	const struct mlxsw_sp_rif *rif;
@@ -61,6 +67,9 @@ struct mlxsw_sp_mr_vif {
	 * instance is used as an ingress VIF
	 */
	struct list_head route_ivif_list;

	/* Protocol specific operations for a VIF */
	const struct mlxsw_sp_mr_vif_ops *ops;
};

struct mlxsw_sp_mr_route_vif_entry {
@@ -70,6 +79,17 @@ struct mlxsw_sp_mr_route_vif_entry {
	struct mlxsw_sp_mr_route *mr_route;
};

struct mlxsw_sp_mr_table;
struct mlxsw_sp_mr_table_ops {
	bool (*is_route_valid)(const struct mlxsw_sp_mr_table *mr_table,
			       const struct mr_mfc *mfc);
	void (*key_create)(struct mlxsw_sp_mr_table *mr_table,
			   struct mlxsw_sp_mr_route_key *key,
			   struct mr_mfc *mfc);
	bool (*is_route_starg)(const struct mlxsw_sp_mr_table *mr_table,
			       const struct mlxsw_sp_mr_route *mr_route);
};

struct mlxsw_sp_mr_table {
	struct list_head node;
	enum mlxsw_sp_l3proto proto;
@@ -78,6 +98,7 @@ struct mlxsw_sp_mr_table {
	struct mlxsw_sp_mr_vif vifs[MAXVIFS];
	struct list_head route_list;
	struct rhashtable route_ht;
	const struct mlxsw_sp_mr_table_ops *ops;
	char catchall_route_priv[0];
	/* catchall_route_priv has to be always the last item */
};
@@ -88,7 +109,7 @@ struct mlxsw_sp_mr_route {
	struct mlxsw_sp_mr_route_key key;
	enum mlxsw_sp_mr_route_action route_action;
	u16 min_mtu;
	struct mfc_cache *mfc4;
	struct mr_mfc *mfc;
	void *route_priv;
	const struct mlxsw_sp_mr_table *mr_table;
	/* A list of route_vif_entry structs that point to the egress VIFs */
@@ -104,14 +125,9 @@ static const struct rhashtable_params mlxsw_sp_mr_route_ht_params = {
	.automatic_shrinking = true,
};

static bool mlxsw_sp_mr_vif_regular(const struct mlxsw_sp_mr_vif *vif)
{
	return !(vif->vif_flags & (VIFF_TUNNEL | VIFF_REGISTER));
}

static bool mlxsw_sp_mr_vif_valid(const struct mlxsw_sp_mr_vif *vif)
{
	return mlxsw_sp_mr_vif_regular(vif) && vif->dev && vif->rif;
	return vif->ops->is_regular(vif) && vif->dev && vif->rif;
}

static bool mlxsw_sp_mr_vif_exists(const struct mlxsw_sp_mr_vif *vif)
@@ -122,18 +138,9 @@ static bool mlxsw_sp_mr_vif_exists(const struct mlxsw_sp_mr_vif *vif)
static bool
mlxsw_sp_mr_route_ivif_in_evifs(const struct mlxsw_sp_mr_route *mr_route)
{
	vifi_t ivif;
	vifi_t ivif = mr_route->mfc->mfc_parent;

	switch (mr_route->mr_table->proto) {
	case MLXSW_SP_L3_PROTO_IPV4:
		ivif = mr_route->mfc4->_c.mfc_parent;
		return mr_route->mfc4->_c.mfc_un.res.ttls[ivif] != 255;
	case MLXSW_SP_L3_PROTO_IPV6:
		/* fall through */
	default:
		WARN_ON_ONCE(1);
	}
	return false;
	return mr_route->mfc->mfc_un.res.ttls[ivif] != 255;
}

static int
@@ -149,19 +156,6 @@ mlxsw_sp_mr_route_valid_evifs_num(const struct mlxsw_sp_mr_route *mr_route)
	return valid_evifs;
}

static bool mlxsw_sp_mr_route_starg(const struct mlxsw_sp_mr_route *mr_route)
{
	switch (mr_route->mr_table->proto) {
	case MLXSW_SP_L3_PROTO_IPV4:
		return mr_route->key.source_mask.addr4 == htonl(INADDR_ANY);
	case MLXSW_SP_L3_PROTO_IPV6:
		/* fall through */
	default:
		WARN_ON_ONCE(1);
	}
	return false;
}

static enum mlxsw_sp_mr_route_action
mlxsw_sp_mr_route_action(const struct mlxsw_sp_mr_route *mr_route)
{
@@ -174,7 +168,8 @@ mlxsw_sp_mr_route_action(const struct mlxsw_sp_mr_route *mr_route)
	/* The kernel does not match a (*,G) route that the ingress interface is
	 * not one of the egress interfaces, so trap these kind of routes.
	 */
	if (mlxsw_sp_mr_route_starg(mr_route) &&
	if (mr_route->mr_table->ops->is_route_starg(mr_route->mr_table,
						    mr_route) &&
	    !mlxsw_sp_mr_route_ivif_in_evifs(mr_route))
		return MLXSW_SP_MR_ROUTE_ACTION_TRAP;

@@ -195,25 +190,11 @@ mlxsw_sp_mr_route_action(const struct mlxsw_sp_mr_route *mr_route)
static enum mlxsw_sp_mr_route_prio
mlxsw_sp_mr_route_prio(const struct mlxsw_sp_mr_route *mr_route)
{
	return mlxsw_sp_mr_route_starg(mr_route) ?
	return mr_route->mr_table->ops->is_route_starg(mr_route->mr_table,
						       mr_route) ?
		MLXSW_SP_MR_ROUTE_PRIO_STARG : MLXSW_SP_MR_ROUTE_PRIO_SG;
}

static void mlxsw_sp_mr_route4_key(struct mlxsw_sp_mr_table *mr_table,
				   struct mlxsw_sp_mr_route_key *key,
				   const struct mfc_cache *mfc)
{
	bool starg = (mfc->mfc_origin == htonl(INADDR_ANY));

	memset(key, 0, sizeof(*key));
	key->vrid = mr_table->vr_id;
	key->proto = mr_table->proto;
	key->group.addr4 = mfc->mfc_mcastgrp;
	key->group_mask.addr4 = htonl(0xffffffff);
	key->source.addr4 = mfc->mfc_origin;
	key->source_mask.addr4 = htonl(starg ? 0 : 0xffffffff);
}

static int mlxsw_sp_mr_route_evif_link(struct mlxsw_sp_mr_route *mr_route,
				       struct mlxsw_sp_mr_vif *mr_vif)
{
@@ -343,8 +324,8 @@ static void mlxsw_sp_mr_route_erase(struct mlxsw_sp_mr_table *mr_table,
}

static struct mlxsw_sp_mr_route *
mlxsw_sp_mr_route4_create(struct mlxsw_sp_mr_table *mr_table,
			  struct mfc_cache *mfc)
mlxsw_sp_mr_route_create(struct mlxsw_sp_mr_table *mr_table,
			 struct mr_mfc *mfc)
{
	struct mlxsw_sp_mr_route_vif_entry *rve, *tmp;
	struct mlxsw_sp_mr_route *mr_route;
@@ -356,15 +337,16 @@ mlxsw_sp_mr_route4_create(struct mlxsw_sp_mr_table *mr_table,
	if (!mr_route)
		return ERR_PTR(-ENOMEM);
	INIT_LIST_HEAD(&mr_route->evif_list);
	mlxsw_sp_mr_route4_key(mr_table, &mr_route->key, mfc);

	/* Find min_mtu and link iVIF and eVIFs */
	mr_route->min_mtu = ETH_MAX_MTU;
	ipmr_cache_hold(mfc);
	mr_route->mfc4 = mfc;
	mr_cache_hold(mfc);
	mr_route->mfc = mfc;
	mr_table->ops->key_create(mr_table, &mr_route->key, mr_route->mfc);

	mr_route->mr_table = mr_table;
	for (i = 0; i < MAXVIFS; i++) {
		if (mfc->_c.mfc_un.res.ttls[i] != 255) {
		if (mfc->mfc_un.res.ttls[i] != 255) {
			err = mlxsw_sp_mr_route_evif_link(mr_route,
							  &mr_table->vifs[i]);
			if (err)
@@ -375,59 +357,37 @@ mlxsw_sp_mr_route4_create(struct mlxsw_sp_mr_table *mr_table,
		}
	}
	mlxsw_sp_mr_route_ivif_link(mr_route,
				    &mr_table->vifs[mfc->_c.mfc_parent]);
				    &mr_table->vifs[mfc->mfc_parent]);

	mr_route->route_action = mlxsw_sp_mr_route_action(mr_route);
	return mr_route;
err:
	ipmr_cache_put(mfc);
	mr_cache_put(mfc);
	list_for_each_entry_safe(rve, tmp, &mr_route->evif_list, route_node)
		mlxsw_sp_mr_route_evif_unlink(rve);
	kfree(mr_route);
	return ERR_PTR(err);
}

static void mlxsw_sp_mr_route4_destroy(struct mlxsw_sp_mr_table *mr_table,
static void mlxsw_sp_mr_route_destroy(struct mlxsw_sp_mr_table *mr_table,
				      struct mlxsw_sp_mr_route *mr_route)
{
	struct mlxsw_sp_mr_route_vif_entry *rve, *tmp;

	mlxsw_sp_mr_route_ivif_unlink(mr_route);
	ipmr_cache_put(mr_route->mfc4);
	mr_cache_put(mr_route->mfc);
	list_for_each_entry_safe(rve, tmp, &mr_route->evif_list, route_node)
		mlxsw_sp_mr_route_evif_unlink(rve);
	kfree(mr_route);
}

static void mlxsw_sp_mr_route_destroy(struct mlxsw_sp_mr_table *mr_table,
				      struct mlxsw_sp_mr_route *mr_route)
{
	switch (mr_table->proto) {
	case MLXSW_SP_L3_PROTO_IPV4:
		mlxsw_sp_mr_route4_destroy(mr_table, mr_route);
		break;
	case MLXSW_SP_L3_PROTO_IPV6:
		/* fall through */
	default:
		WARN_ON_ONCE(1);
	}
}

static void mlxsw_sp_mr_mfc_offload_set(struct mlxsw_sp_mr_route *mr_route,
					bool offload)
{
	switch (mr_route->mr_table->proto) {
	case MLXSW_SP_L3_PROTO_IPV4:
	if (offload)
			mr_route->mfc4->_c.mfc_flags |= MFC_OFFLOAD;
		mr_route->mfc->mfc_flags |= MFC_OFFLOAD;
	else
			mr_route->mfc4->_c.mfc_flags &= ~MFC_OFFLOAD;
		break;
	case MLXSW_SP_L3_PROTO_IPV6:
		/* fall through */
	default:
		WARN_ON_ONCE(1);
	}
		mr_route->mfc->mfc_flags &= ~MFC_OFFLOAD;
}

static void mlxsw_sp_mr_mfc_offload_update(struct mlxsw_sp_mr_route *mr_route)
@@ -449,25 +409,18 @@ static void __mlxsw_sp_mr_route_del(struct mlxsw_sp_mr_table *mr_table,
	mlxsw_sp_mr_route_destroy(mr_table, mr_route);
}

int mlxsw_sp_mr_route4_add(struct mlxsw_sp_mr_table *mr_table,
			   struct mfc_cache *mfc, bool replace)
int mlxsw_sp_mr_route_add(struct mlxsw_sp_mr_table *mr_table,
			  struct mr_mfc *mfc, bool replace)
{
	struct mlxsw_sp_mr_route *mr_orig_route = NULL;
	struct mlxsw_sp_mr_route *mr_route;
	int err;

	/* If the route is a (*,*) route, abort, as these kind of routes are
	 * used for proxy routes.
	 */
	if (mfc->mfc_origin == htonl(INADDR_ANY) &&
	    mfc->mfc_mcastgrp == htonl(INADDR_ANY)) {
		dev_warn(mr_table->mlxsw_sp->bus_info->dev,
			 "Offloading proxy routes is not supported.\n");
	if (!mr_table->ops->is_route_valid(mr_table, mfc))
		return -EINVAL;
	}

	/* Create a new route */
	mr_route = mlxsw_sp_mr_route4_create(mr_table, mfc);
	mr_route = mlxsw_sp_mr_route_create(mr_table, mfc);
	if (IS_ERR(mr_route))
		return PTR_ERR(mr_route);

@@ -512,7 +465,7 @@ int mlxsw_sp_mr_route4_add(struct mlxsw_sp_mr_table *mr_table,
				       &mr_orig_route->ht_node,
				       mlxsw_sp_mr_route_ht_params);
		list_del(&mr_orig_route->node);
		mlxsw_sp_mr_route4_destroy(mr_table, mr_orig_route);
		mlxsw_sp_mr_route_destroy(mr_table, mr_orig_route);
	}

	mlxsw_sp_mr_mfc_offload_update(mr_route);
@@ -525,17 +478,17 @@ int mlxsw_sp_mr_route4_add(struct mlxsw_sp_mr_table *mr_table,
	list_del(&mr_route->node);
err_no_orig_route:
err_duplicate_route:
	mlxsw_sp_mr_route4_destroy(mr_table, mr_route);
	mlxsw_sp_mr_route_destroy(mr_table, mr_route);
	return err;
}

void mlxsw_sp_mr_route4_del(struct mlxsw_sp_mr_table *mr_table,
			    struct mfc_cache *mfc)
void mlxsw_sp_mr_route_del(struct mlxsw_sp_mr_table *mr_table,
			   struct mr_mfc *mfc)
{
	struct mlxsw_sp_mr_route *mr_route;
	struct mlxsw_sp_mr_route_key key;

	mlxsw_sp_mr_route4_key(mr_table, &key, mfc);
	mr_table->ops->key_create(mr_table, &key, mfc);
	mr_route = rhashtable_lookup_fast(&mr_table->route_ht, &key,
					  mlxsw_sp_mr_route_ht_params);
	if (mr_route)
@@ -840,6 +793,125 @@ void mlxsw_sp_mr_rif_mtu_update(struct mlxsw_sp_mr_table *mr_table,
	}
}

/* Protocol specific functions */
static bool
mlxsw_sp_mr_route4_validate(const struct mlxsw_sp_mr_table *mr_table,
			    const struct mr_mfc *c)
{
	struct mfc_cache *mfc = (struct mfc_cache *) c;

	/* If the route is a (*,*) route, abort, as these kind of routes are
	 * used for proxy routes.
	 */
	if (mfc->mfc_origin == htonl(INADDR_ANY) &&
	    mfc->mfc_mcastgrp == htonl(INADDR_ANY)) {
		dev_warn(mr_table->mlxsw_sp->bus_info->dev,
			 "Offloading proxy routes is not supported.\n");
		return false;
	}
	return true;
}

static void mlxsw_sp_mr_route4_key(struct mlxsw_sp_mr_table *mr_table,
				   struct mlxsw_sp_mr_route_key *key,
				   struct mr_mfc *c)
{
	const struct mfc_cache *mfc = (struct mfc_cache *) c;
	bool starg;

	starg = (mfc->mfc_origin == htonl(INADDR_ANY));

	memset(key, 0, sizeof(*key));
	key->vrid = mr_table->vr_id;
	key->proto = MLXSW_SP_L3_PROTO_IPV4;
	key->group.addr4 = mfc->mfc_mcastgrp;
	key->group_mask.addr4 = htonl(0xffffffff);
	key->source.addr4 = mfc->mfc_origin;
	key->source_mask.addr4 = htonl(starg ? 0 : 0xffffffff);
}

static bool mlxsw_sp_mr_route4_starg(const struct mlxsw_sp_mr_table *mr_table,
				     const struct mlxsw_sp_mr_route *mr_route)
{
	return mr_route->key.source_mask.addr4 == htonl(INADDR_ANY);
}

static bool mlxsw_sp_mr_vif4_is_regular(const struct mlxsw_sp_mr_vif *vif)
{
	return !(vif->vif_flags & (VIFF_TUNNEL | VIFF_REGISTER));
}

static bool
mlxsw_sp_mr_route6_validate(const struct mlxsw_sp_mr_table *mr_table,
			    const struct mr_mfc *c)
{
	struct mfc6_cache *mfc = (struct mfc6_cache *) c;

	/* If the route is a (*,*) route, abort, as these kind of routes are
	 * used for proxy routes.
	 */
	if (ipv6_addr_any(&mfc->mf6c_origin) &&
	    ipv6_addr_any(&mfc->mf6c_mcastgrp)) {
		dev_warn(mr_table->mlxsw_sp->bus_info->dev,
			 "Offloading proxy routes is not supported.\n");
		return false;
	}
	return true;
}

static void mlxsw_sp_mr_route6_key(struct mlxsw_sp_mr_table *mr_table,
				   struct mlxsw_sp_mr_route_key *key,
				   struct mr_mfc *c)
{
	const struct mfc6_cache *mfc = (struct mfc6_cache *) c;

	memset(key, 0, sizeof(*key));
	key->vrid = mr_table->vr_id;
	key->proto = MLXSW_SP_L3_PROTO_IPV6;
	key->group.addr6 = mfc->mf6c_mcastgrp;
	memset(&key->group_mask.addr6, 0xff, sizeof(key->group_mask.addr6));
	key->source.addr6 = mfc->mf6c_origin;
	if (!ipv6_addr_any(&mfc->mf6c_origin))
		memset(&key->source_mask.addr6, 0xff,
		       sizeof(key->source_mask.addr6));
}

static bool mlxsw_sp_mr_route6_starg(const struct mlxsw_sp_mr_table *mr_table,
				     const struct mlxsw_sp_mr_route *mr_route)
{
	return ipv6_addr_any(&mr_route->key.source_mask.addr6);
}

static bool mlxsw_sp_mr_vif6_is_regular(const struct mlxsw_sp_mr_vif *vif)
{
	return !(vif->vif_flags & MIFF_REGISTER);
}

static struct
mlxsw_sp_mr_vif_ops mlxsw_sp_mr_vif_ops_arr[] = {
	{
		.is_regular = mlxsw_sp_mr_vif4_is_regular,
	},
	{
		.is_regular = mlxsw_sp_mr_vif6_is_regular,
	},
};

static struct
mlxsw_sp_mr_table_ops mlxsw_sp_mr_table_ops_arr[] = {
	{
		.is_route_valid = mlxsw_sp_mr_route4_validate,
		.key_create = mlxsw_sp_mr_route4_key,
		.is_route_starg = mlxsw_sp_mr_route4_starg,
	},
	{
		.is_route_valid = mlxsw_sp_mr_route6_validate,
		.key_create = mlxsw_sp_mr_route6_key,
		.is_route_starg = mlxsw_sp_mr_route6_starg,
	},

};

struct mlxsw_sp_mr_table *mlxsw_sp_mr_table_create(struct mlxsw_sp *mlxsw_sp,
						   u32 vr_id,
						   enum mlxsw_sp_l3proto proto)
@@ -848,6 +920,7 @@ struct mlxsw_sp_mr_table *mlxsw_sp_mr_table_create(struct mlxsw_sp *mlxsw_sp,
		.prio = MLXSW_SP_MR_ROUTE_PRIO_CATCHALL,
		.key = {
			.vrid = vr_id,
			.proto = proto,
		},
		.value = {
			.route_action = MLXSW_SP_MR_ROUTE_ACTION_TRAP,
@@ -866,6 +939,7 @@ struct mlxsw_sp_mr_table *mlxsw_sp_mr_table_create(struct mlxsw_sp *mlxsw_sp,
	mr_table->vr_id = vr_id;
	mr_table->mlxsw_sp = mlxsw_sp;
	mr_table->proto = proto;
	mr_table->ops = &mlxsw_sp_mr_table_ops_arr[proto];
	INIT_LIST_HEAD(&mr_table->route_list);

	err = rhashtable_init(&mr_table->route_ht,
@@ -876,6 +950,7 @@ struct mlxsw_sp_mr_table *mlxsw_sp_mr_table_create(struct mlxsw_sp *mlxsw_sp,
	for (i = 0; i < MAXVIFS; i++) {
		INIT_LIST_HEAD(&mr_table->vifs[i].route_evif_list);
		INIT_LIST_HEAD(&mr_table->vifs[i].route_ivif_list);
		mr_table->vifs[i].ops = &mlxsw_sp_mr_vif_ops_arr[proto];
	}

	err = mr->mr_ops->route_create(mlxsw_sp, mr->priv,
@@ -942,18 +1017,10 @@ static void mlxsw_sp_mr_route_stats_update(struct mlxsw_sp *mlxsw_sp,
	mr->mr_ops->route_stats(mlxsw_sp, mr_route->route_priv, &packets,
				&bytes);

	switch (mr_route->mr_table->proto) {
	case MLXSW_SP_L3_PROTO_IPV4:
		if (mr_route->mfc4->_c.mfc_un.res.pkt != packets)
			mr_route->mfc4->_c.mfc_un.res.lastuse = jiffies;
		mr_route->mfc4->_c.mfc_un.res.pkt = packets;
		mr_route->mfc4->_c.mfc_un.res.bytes = bytes;
		break;
	case MLXSW_SP_L3_PROTO_IPV6:
		/* fall through */
	default:
		WARN_ON_ONCE(1);
	}
	if (mr_route->mfc->mfc_un.res.pkt != packets)
		mr_route->mfc->mfc_un.res.lastuse = jiffies;
	mr_route->mfc->mfc_un.res.pkt = packets;
	mr_route->mfc->mfc_un.res.bytes = bytes;
}

static void mlxsw_sp_mr_stats_update(struct work_struct *work)
+5 −4
Original line number Diff line number Diff line
@@ -36,6 +36,7 @@
#define _MLXSW_SPECTRUM_MCROUTER_H

#include <linux/mroute.h>
#include <linux/mroute6.h>
#include "spectrum_router.h"
#include "spectrum.h"

@@ -109,10 +110,10 @@ struct mlxsw_sp_mr_table;
int mlxsw_sp_mr_init(struct mlxsw_sp *mlxsw_sp,
		     const struct mlxsw_sp_mr_ops *mr_ops);
void mlxsw_sp_mr_fini(struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_mr_route4_add(struct mlxsw_sp_mr_table *mr_table,
			   struct mfc_cache *mfc, bool replace);
void mlxsw_sp_mr_route4_del(struct mlxsw_sp_mr_table *mr_table,
			    struct mfc_cache *mfc);
int mlxsw_sp_mr_route_add(struct mlxsw_sp_mr_table *mr_table,
			  struct mr_mfc *mfc, bool replace);
void mlxsw_sp_mr_route_del(struct mlxsw_sp_mr_table *mr_table,
			   struct mr_mfc *mfc);
int mlxsw_sp_mr_vif_add(struct mlxsw_sp_mr_table *mr_table,
			struct net_device *dev, vifi_t vif_index,
			unsigned long vif_flags,
+72 −33
Original line number Diff line number Diff line
@@ -51,7 +51,7 @@ struct mlxsw_sp_mr_tcam_region {
};

struct mlxsw_sp_mr_tcam {
	struct mlxsw_sp_mr_tcam_region ipv4_tcam_region;
	struct mlxsw_sp_mr_tcam_region tcam_regions[MLXSW_SP_L3_PROTO_MAX];
};

/* This struct maps to one RIGR2 register entry */
@@ -316,20 +316,37 @@ static int mlxsw_sp_mr_tcam_route_replace(struct mlxsw_sp *mlxsw_sp,
					  mlxsw_afa_block_first_set(afa_block));
		break;
	case MLXSW_SP_L3_PROTO_IPV6:
	default:
		WARN_ON_ONCE(1);
		mlxsw_reg_rmft2_ipv6_pack(rmft2_pl, true, parman_item->index,
					  key->vrid,
					  MLXSW_REG_RMFT2_IRIF_MASK_IGNORE, 0,
					  key->group.addr6,
					  key->group_mask.addr6,
					  key->source.addr6,
					  key->source_mask.addr6,
					  mlxsw_afa_block_first_set(afa_block));
	}

	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rmft2), rmft2_pl);
}

static int mlxsw_sp_mr_tcam_route_remove(struct mlxsw_sp *mlxsw_sp, int vrid,
					 struct mlxsw_sp_mr_route_key *key,
					 struct parman_item *parman_item)
{
	struct in6_addr zero_addr = IN6ADDR_ANY_INIT;
	char rmft2_pl[MLXSW_REG_RMFT2_LEN];

	mlxsw_reg_rmft2_ipv4_pack(rmft2_pl, false, parman_item->index, vrid,
				  0, 0, 0, 0, 0, 0, NULL);
	switch (key->proto) {
	case MLXSW_SP_L3_PROTO_IPV4:
		mlxsw_reg_rmft2_ipv4_pack(rmft2_pl, false, parman_item->index,
					  vrid, 0, 0, 0, 0, 0, 0, NULL);
		break;
	case MLXSW_SP_L3_PROTO_IPV6:
		mlxsw_reg_rmft2_ipv6_pack(rmft2_pl, false, parman_item->index,
					  vrid, 0, 0, zero_addr, zero_addr,
					  zero_addr, zero_addr, NULL);
		break;
	}

	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rmft2), rmft2_pl);
}
@@ -353,27 +370,30 @@ mlxsw_sp_mr_tcam_erif_populate(struct mlxsw_sp *mlxsw_sp,
	return 0;
}

static struct mlxsw_sp_mr_tcam_region *
mlxsw_sp_mr_tcam_protocol_region(struct mlxsw_sp_mr_tcam *mr_tcam,
				 enum mlxsw_sp_l3proto proto)
{
	return &mr_tcam->tcam_regions[proto];
}

static int
mlxsw_sp_mr_tcam_route_parman_item_add(struct mlxsw_sp_mr_tcam *mr_tcam,
				       struct mlxsw_sp_mr_tcam_route *route,
				       enum mlxsw_sp_mr_route_prio prio)
{
	struct parman_prio *parman_prio = NULL;
	struct mlxsw_sp_mr_tcam_region *tcam_region;
	int err;

	switch (route->key.proto) {
	case MLXSW_SP_L3_PROTO_IPV4:
		parman_prio = &mr_tcam->ipv4_tcam_region.parman_prios[prio];
		err = parman_item_add(mr_tcam->ipv4_tcam_region.parman,
				      parman_prio, &route->parman_item);
	tcam_region = mlxsw_sp_mr_tcam_protocol_region(mr_tcam,
						       route->key.proto);
	err = parman_item_add(tcam_region->parman,
			      &tcam_region->parman_prios[prio],
			      &route->parman_item);
	if (err)
		return err;
		break;
	case MLXSW_SP_L3_PROTO_IPV6:
	default:
		WARN_ON_ONCE(1);
	}
	route->parman_prio = parman_prio;

	route->parman_prio = &tcam_region->parman_prios[prio];
	return 0;
}

@@ -381,15 +401,13 @@ static void
mlxsw_sp_mr_tcam_route_parman_item_remove(struct mlxsw_sp_mr_tcam *mr_tcam,
					  struct mlxsw_sp_mr_tcam_route *route)
{
	switch (route->key.proto) {
	case MLXSW_SP_L3_PROTO_IPV4:
		parman_item_remove(mr_tcam->ipv4_tcam_region.parman,
	struct mlxsw_sp_mr_tcam_region *tcam_region;

	tcam_region = mlxsw_sp_mr_tcam_protocol_region(mr_tcam,
						       route->key.proto);

	parman_item_remove(tcam_region->parman,
			   route->parman_prio, &route->parman_item);
		break;
	case MLXSW_SP_L3_PROTO_IPV6:
	default:
		WARN_ON_ONCE(1);
	}
}

static int
@@ -462,7 +480,7 @@ static void mlxsw_sp_mr_tcam_route_destroy(struct mlxsw_sp *mlxsw_sp,
	struct mlxsw_sp_mr_tcam *mr_tcam = priv;

	mlxsw_sp_mr_tcam_route_remove(mlxsw_sp, route->key.vrid,
				      &route->parman_item);
				      &route->key, &route->parman_item);
	mlxsw_sp_mr_tcam_route_parman_item_remove(mr_tcam, route);
	mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
	mlxsw_sp_flow_counter_free(mlxsw_sp, route->counter_index);
@@ -806,21 +824,42 @@ mlxsw_sp_mr_tcam_region_fini(struct mlxsw_sp_mr_tcam_region *mr_tcam_region)
static int mlxsw_sp_mr_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv)
{
	struct mlxsw_sp_mr_tcam *mr_tcam = priv;
	struct mlxsw_sp_mr_tcam_region *region = &mr_tcam->tcam_regions[0];
	u32 rtar_key;
	int err;

	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MC_ERIF_LIST_ENTRIES) ||
	    !MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_MAX_TCAM_RULES))
		return -EIO;

	return mlxsw_sp_mr_tcam_region_init(mlxsw_sp,
					    &mr_tcam->ipv4_tcam_region,
					    MLXSW_REG_RTAR_KEY_TYPE_IPV4_MULTICAST);
	rtar_key = MLXSW_REG_RTAR_KEY_TYPE_IPV4_MULTICAST;
	err = mlxsw_sp_mr_tcam_region_init(mlxsw_sp,
					   &region[MLXSW_SP_L3_PROTO_IPV4],
					   rtar_key);
	if (err)
		return err;

	rtar_key = MLXSW_REG_RTAR_KEY_TYPE_IPV6_MULTICAST;
	err = mlxsw_sp_mr_tcam_region_init(mlxsw_sp,
					   &region[MLXSW_SP_L3_PROTO_IPV6],
					   rtar_key);
	if (err)
		goto err_ipv6_region_init;

	return 0;

err_ipv6_region_init:
	mlxsw_sp_mr_tcam_region_fini(&region[MLXSW_SP_L3_PROTO_IPV4]);
	return err;
}

static void mlxsw_sp_mr_tcam_fini(void *priv)
{
	struct mlxsw_sp_mr_tcam *mr_tcam = priv;
	struct mlxsw_sp_mr_tcam_region *region = &mr_tcam->tcam_regions[0];

	mlxsw_sp_mr_tcam_region_fini(&mr_tcam->ipv4_tcam_region);
	mlxsw_sp_mr_tcam_region_fini(&region[MLXSW_SP_L3_PROTO_IPV6]);
	mlxsw_sp_mr_tcam_region_fini(&region[MLXSW_SP_L3_PROTO_IPV4]);
}

const struct mlxsw_sp_mr_ops mlxsw_sp_mr_tcam_ops = {
Loading