Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6ecfdd28 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'mlx5e-next'



Amir Vadai says:

====================
ConnectX-4 driver update 2015-07-23

This patchset introduce some performance enhancements to the ConnectX-4 driver.
1. Improving RSS distribution, and make RSS function controlable using ethtool.
2. Make memory that is written by NIC and read by host CPU allocate in the
   local NUMA to the processing CPU
3. Support tx copybreak
4. Using hardware feature called blueflame to save DMA reads when possible

Another patch by Achiad fix some cosmetic issues in the driver.

Patchset was applied and tested on top of commit 045a0fa0 ("ip_tunnel: Call
ip_tunnel_core_init() from inet_init()")
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents fda19e83 a741749f
Loading
Loading
Loading
Loading
+40 −8
Original line number Diff line number Diff line
@@ -45,15 +45,34 @@
 * register it in a memory region at HCA virtual address 0.
 */

int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf)
static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev,
					   size_t size, dma_addr_t *dma_handle,
					   int node)
{
	struct mlx5_priv *priv = &dev->priv;
	int original_node;
	void *cpu_handle;

	mutex_lock(&priv->alloc_mutex);
	original_node = dev_to_node(&dev->pdev->dev);
	set_dev_node(&dev->pdev->dev, node);
	cpu_handle = dma_zalloc_coherent(&dev->pdev->dev, size,
					 dma_handle, GFP_KERNEL);
	set_dev_node(&dev->pdev->dev, original_node);
	mutex_unlock(&priv->alloc_mutex);
	return cpu_handle;
}

int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
			struct mlx5_buf *buf, int node)
{
	dma_addr_t t;

	buf->size = size;
	buf->npages       = 1;
	buf->page_shift   = (u8)get_order(size) + PAGE_SHIFT;
	buf->direct.buf   = dma_zalloc_coherent(&dev->pdev->dev,
						size, &t, GFP_KERNEL);
	buf->direct.buf   = mlx5_dma_zalloc_coherent_node(dev, size,
							  &t, node);
	if (!buf->direct.buf)
		return -ENOMEM;

@@ -66,6 +85,11 @@ int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf)

	return 0;
}

int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf)
{
	return mlx5_buf_alloc_node(dev, size, buf, dev->priv.numa_node);
}
EXPORT_SYMBOL_GPL(mlx5_buf_alloc);

void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf)
@@ -75,7 +99,8 @@ void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf)
}
EXPORT_SYMBOL_GPL(mlx5_buf_free);

static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct device *dma_device)
static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev,
						 int node)
{
	struct mlx5_db_pgdir *pgdir;

@@ -84,8 +109,9 @@ static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct device *dma_device)
		return NULL;

	bitmap_fill(pgdir->bitmap, MLX5_DB_PER_PAGE);
	pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE,
					    &pgdir->db_dma, GFP_KERNEL);

	pgdir->db_page = mlx5_dma_zalloc_coherent_node(dev, PAGE_SIZE,
						       &pgdir->db_dma, node);
	if (!pgdir->db_page) {
		kfree(pgdir);
		return NULL;
@@ -118,7 +144,7 @@ static int mlx5_alloc_db_from_pgdir(struct mlx5_db_pgdir *pgdir,
	return 0;
}

int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db)
int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db, int node)
{
	struct mlx5_db_pgdir *pgdir;
	int ret = 0;
@@ -129,7 +155,7 @@ int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db)
		if (!mlx5_alloc_db_from_pgdir(pgdir, db))
			goto out;

	pgdir = mlx5_alloc_db_pgdir(&(dev->pdev->dev));
	pgdir = mlx5_alloc_db_pgdir(dev, node);
	if (!pgdir) {
		ret = -ENOMEM;
		goto out;
@@ -145,6 +171,12 @@ int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db)

	return ret;
}
EXPORT_SYMBOL_GPL(mlx5_db_alloc_node);

int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db)
{
	return mlx5_db_alloc_node(dev, db, dev->priv.numa_node);
}
EXPORT_SYMBOL_GPL(mlx5_db_alloc);

void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
+33 −14
Original line number Diff line number Diff line
@@ -60,6 +60,7 @@

#define MLX5E_TX_CQ_POLL_BUDGET        128
#define MLX5E_UPDATE_STATS_INTERVAL    200 /* msecs */
#define MLX5E_SQ_BF_BUDGET             16

static const char vport_strings[][ETH_GSTRING_LEN] = {
	/* vport statistics */
@@ -195,6 +196,8 @@ struct mlx5e_params {
	u16 rx_hash_log_tbl_sz;
	bool lro_en;
	u32 lro_wqe_sz;
	u8  rss_hfunc;
	u16 tx_max_inline;
};

enum {
@@ -266,7 +269,9 @@ struct mlx5e_sq {
	/* dirtied @xmit */
	u16                        pc ____cacheline_aligned_in_smp;
	u32                        dma_fifo_pc;
	u32                        bf_offset;
	u16                        bf_offset;
	u16                        prev_cc;
	u8                         bf_budget;
	struct mlx5e_sq_stats      stats;

	struct mlx5e_cq            cq;
@@ -279,9 +284,10 @@ struct mlx5e_sq {
	struct mlx5_wq_cyc         wq;
	u32                        dma_fifo_mask;
	void __iomem              *uar_map;
	void __iomem              *uar_bf_map;
	struct netdev_queue       *txq;
	u32                        sqn;
	u32                        bf_buf_size;
	u16                        bf_buf_size;
	u16                        max_inline;
	u16                        edge;
	struct device             *pdev;
@@ -324,14 +330,18 @@ struct mlx5e_channel {
};

enum mlx5e_traffic_types {
	MLX5E_TT_IPV4_TCP = 0,
	MLX5E_TT_IPV6_TCP = 1,
	MLX5E_TT_IPV4_UDP = 2,
	MLX5E_TT_IPV6_UDP = 3,
	MLX5E_TT_IPV4     = 4,
	MLX5E_TT_IPV6     = 5,
	MLX5E_TT_ANY      = 6,
	MLX5E_NUM_TT      = 7,
	MLX5E_TT_IPV4_TCP,
	MLX5E_TT_IPV6_TCP,
	MLX5E_TT_IPV4_UDP,
	MLX5E_TT_IPV6_UDP,
	MLX5E_TT_IPV4_IPSEC_AH,
	MLX5E_TT_IPV6_IPSEC_AH,
	MLX5E_TT_IPV4_IPSEC_ESP,
	MLX5E_TT_IPV6_IPSEC_ESP,
	MLX5E_TT_IPV4,
	MLX5E_TT_IPV6,
	MLX5E_TT_ANY,
	MLX5E_NUM_TT,
};

enum {
@@ -491,8 +501,10 @@ int mlx5e_update_priv_params(struct mlx5e_priv *priv,
			     struct mlx5e_params *new_params);

static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
				      struct mlx5e_tx_wqe *wqe)
				      struct mlx5e_tx_wqe *wqe, int bf_sz)
{
	u16 ofst = MLX5_BF_OFFSET + sq->bf_offset;

	/* ensure wqe is visible to device before updating doorbell record */
	dma_wmb();

@@ -503,9 +515,15 @@ static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
	 */
	wmb();

	mlx5_write64((__be32 *)&wqe->ctrl,
		     sq->uar_map + MLX5_BF_OFFSET + sq->bf_offset,
		     NULL);
	if (bf_sz) {
		__iowrite64_copy(sq->uar_bf_map + ofst, &wqe->ctrl, bf_sz);

		/* flush the write-combining mapped buffer */
		wmb();

	} else {
		mlx5_write64((__be32 *)&wqe->ctrl, sq->uar_map + ofst, NULL);
	}

	sq->bf_offset ^= sq->bf_buf_size;
}
@@ -519,3 +537,4 @@ static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
}

extern const struct ethtool_ops mlx5e_ethtool_ops;
u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev);
+92 −0
Original line number Diff line number Diff line
@@ -662,6 +662,94 @@ static int mlx5e_set_settings(struct net_device *netdev,
	return err;
}

static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
			  u8 *hfunc)
{
	struct mlx5e_priv *priv = netdev_priv(netdev);

	if (hfunc)
		*hfunc = priv->params.rss_hfunc;

	return 0;
}

static int mlx5e_set_rxfh(struct net_device *netdev, const u32 *indir,
			  const u8 *key, const u8 hfunc)
{
	struct mlx5e_priv *priv = netdev_priv(netdev);
	int err = 0;

	if (hfunc == ETH_RSS_HASH_NO_CHANGE)
		return 0;

	if ((hfunc != ETH_RSS_HASH_XOR) &&
	    (hfunc != ETH_RSS_HASH_TOP))
		return -EINVAL;

	mutex_lock(&priv->state_lock);

	priv->params.rss_hfunc = hfunc;
	if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
		mlx5e_close_locked(priv->netdev);
		err = mlx5e_open_locked(priv->netdev);
	}

	mutex_unlock(&priv->state_lock);

	return err;
}

static int mlx5e_get_tunable(struct net_device *dev,
			     const struct ethtool_tunable *tuna,
			     void *data)
{
	const struct mlx5e_priv *priv = netdev_priv(dev);
	int err = 0;

	switch (tuna->id) {
	case ETHTOOL_TX_COPYBREAK:
		*(u32 *)data = priv->params.tx_max_inline;
		break;
	default:
		err = -EINVAL;
		break;
	}

	return err;
}

static int mlx5e_set_tunable(struct net_device *dev,
			     const struct ethtool_tunable *tuna,
			     const void *data)
{
	struct mlx5e_priv *priv = netdev_priv(dev);
	struct mlx5_core_dev *mdev = priv->mdev;
	struct mlx5e_params new_params;
	u32 val;
	int err = 0;

	switch (tuna->id) {
	case ETHTOOL_TX_COPYBREAK:
		val = *(u32 *)data;
		if (val > mlx5e_get_max_inline_cap(mdev)) {
			err = -EINVAL;
			break;
		}

		mutex_lock(&priv->state_lock);
		new_params = priv->params;
		new_params.tx_max_inline = val;
		err = mlx5e_update_priv_params(priv, &new_params);
		mutex_unlock(&priv->state_lock);
		break;
	default:
		err = -EINVAL;
		break;
	}

	return err;
}

const struct ethtool_ops mlx5e_ethtool_ops = {
	.get_drvinfo       = mlx5e_get_drvinfo,
	.get_link          = ethtool_op_get_link,
@@ -676,4 +764,8 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
	.set_coalesce      = mlx5e_set_coalesce,
	.get_settings      = mlx5e_get_settings,
	.set_settings      = mlx5e_set_settings,
	.get_rxfh          = mlx5e_get_rxfh,
	.set_rxfh          = mlx5e_set_rxfh,
	.get_tunable       = mlx5e_get_tunable,
	.set_tunable       = mlx5e_set_tunable,
};
+177 −81
Original line number Diff line number Diff line
@@ -105,25 +105,41 @@ static void mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv,
{
	void *ft = priv->ft.main;

	if (ai->tt_vec & (1 << MLX5E_TT_IPV6_TCP))
	if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP))
		mlx5_del_flow_table_entry(ft,
					  ai->ft_ix[MLX5E_TT_IPV6_IPSEC_ESP]);

	if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP))
		mlx5_del_flow_table_entry(ft,
					  ai->ft_ix[MLX5E_TT_IPV4_IPSEC_ESP]);

	if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH))
		mlx5_del_flow_table_entry(ft,
					  ai->ft_ix[MLX5E_TT_IPV6_IPSEC_AH]);

	if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH))
		mlx5_del_flow_table_entry(ft,
					  ai->ft_ix[MLX5E_TT_IPV4_IPSEC_AH]);

	if (ai->tt_vec & BIT(MLX5E_TT_IPV6_TCP))
		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_TCP]);

	if (ai->tt_vec & (1 << MLX5E_TT_IPV4_TCP))
	if (ai->tt_vec & BIT(MLX5E_TT_IPV4_TCP))
		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_TCP]);

	if (ai->tt_vec & (1 << MLX5E_TT_IPV6_UDP))
	if (ai->tt_vec & BIT(MLX5E_TT_IPV6_UDP))
		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_UDP]);

	if (ai->tt_vec & (1 << MLX5E_TT_IPV4_UDP))
	if (ai->tt_vec & BIT(MLX5E_TT_IPV4_UDP))
		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_UDP]);

	if (ai->tt_vec & (1 << MLX5E_TT_IPV6))
	if (ai->tt_vec & BIT(MLX5E_TT_IPV6))
		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6]);

	if (ai->tt_vec & (1 << MLX5E_TT_IPV4))
	if (ai->tt_vec & BIT(MLX5E_TT_IPV4))
		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4]);

	if (ai->tt_vec & (1 << MLX5E_TT_ANY))
	if (ai->tt_vec & BIT(MLX5E_TT_ANY))
		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_ANY]);
}

@@ -156,33 +172,37 @@ static u32 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)
		switch (eth_addr_type) {
		case MLX5E_UC:
			ret =
				(1 << MLX5E_TT_IPV4_TCP) |
				(1 << MLX5E_TT_IPV6_TCP) |
				(1 << MLX5E_TT_IPV4_UDP) |
				(1 << MLX5E_TT_IPV6_UDP) |
				(1 << MLX5E_TT_IPV4)     |
				(1 << MLX5E_TT_IPV6)     |
				(1 << MLX5E_TT_ANY)      |
				BIT(MLX5E_TT_IPV4_TCP)       |
				BIT(MLX5E_TT_IPV6_TCP)       |
				BIT(MLX5E_TT_IPV4_UDP)       |
				BIT(MLX5E_TT_IPV6_UDP)       |
				BIT(MLX5E_TT_IPV4_IPSEC_AH)  |
				BIT(MLX5E_TT_IPV6_IPSEC_AH)  |
				BIT(MLX5E_TT_IPV4_IPSEC_ESP) |
				BIT(MLX5E_TT_IPV6_IPSEC_ESP) |
				BIT(MLX5E_TT_IPV4)           |
				BIT(MLX5E_TT_IPV6)           |
				BIT(MLX5E_TT_ANY)            |
				0;
			break;

		case MLX5E_MC_IPV4:
			ret =
				(1 << MLX5E_TT_IPV4_UDP) |
				(1 << MLX5E_TT_IPV4)     |
				BIT(MLX5E_TT_IPV4_UDP)       |
				BIT(MLX5E_TT_IPV4)           |
				0;
			break;

		case MLX5E_MC_IPV6:
			ret =
				(1 << MLX5E_TT_IPV6_UDP) |
				(1 << MLX5E_TT_IPV6)     |
				BIT(MLX5E_TT_IPV6_UDP)       |
				BIT(MLX5E_TT_IPV6)           |
				0;
			break;

		case MLX5E_MC_OTHER:
			ret =
				(1 << MLX5E_TT_ANY)      |
				BIT(MLX5E_TT_ANY)            |
				0;
			break;
		}
@@ -191,23 +211,27 @@ static u32 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)

	case MLX5E_ALLMULTI:
		ret =
			(1 << MLX5E_TT_IPV4_UDP) |
			(1 << MLX5E_TT_IPV6_UDP) |
			(1 << MLX5E_TT_IPV4)     |
			(1 << MLX5E_TT_IPV6)     |
			(1 << MLX5E_TT_ANY)      |
			BIT(MLX5E_TT_IPV4_UDP) |
			BIT(MLX5E_TT_IPV6_UDP) |
			BIT(MLX5E_TT_IPV4)     |
			BIT(MLX5E_TT_IPV6)     |
			BIT(MLX5E_TT_ANY)      |
			0;
		break;

	default: /* MLX5E_PROMISC */
		ret =
			(1 << MLX5E_TT_IPV4_TCP) |
			(1 << MLX5E_TT_IPV6_TCP) |
			(1 << MLX5E_TT_IPV4_UDP) |
			(1 << MLX5E_TT_IPV6_UDP) |
			(1 << MLX5E_TT_IPV4)     |
			(1 << MLX5E_TT_IPV6)     |
			(1 << MLX5E_TT_ANY)      |
			BIT(MLX5E_TT_IPV4_TCP)       |
			BIT(MLX5E_TT_IPV6_TCP)       |
			BIT(MLX5E_TT_IPV4_UDP)       |
			BIT(MLX5E_TT_IPV6_UDP)       |
			BIT(MLX5E_TT_IPV4_IPSEC_AH)  |
			BIT(MLX5E_TT_IPV6_IPSEC_AH)  |
			BIT(MLX5E_TT_IPV4_IPSEC_ESP) |
			BIT(MLX5E_TT_IPV6_IPSEC_ESP) |
			BIT(MLX5E_TT_IPV4)           |
			BIT(MLX5E_TT_IPV6)           |
			BIT(MLX5E_TT_ANY)            |
			0;
		break;
	}
@@ -226,6 +250,7 @@ static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
	u8   *match_criteria_dmac;
	void *ft   = priv->ft.main;
	u32  *tirn = priv->tirn;
	u32  *ft_ix;
	u32  tt_vec;
	int  err;

@@ -261,51 +286,51 @@ static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,

	tt_vec = mlx5e_get_tt_vec(ai, type);

	if (tt_vec & (1 << MLX5E_TT_ANY)) {
	ft_ix = &ai->ft_ix[MLX5E_TT_ANY];
	if (tt_vec & BIT(MLX5E_TT_ANY)) {
		MLX5_SET(dest_format_struct, dest, destination_id,
			 tirn[MLX5E_TT_ANY]);
		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
						match_criteria, flow_context,
						&ai->ft_ix[MLX5E_TT_ANY]);
		if (err) {
			mlx5e_del_eth_addr_from_flow_table(priv, ai);
			return err;
		}
		ai->tt_vec |= (1 << MLX5E_TT_ANY);
						ft_ix);
		if (err)
			goto err_del_ai;

		ai->tt_vec |= BIT(MLX5E_TT_ANY);
	}

	match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
	MLX5_SET_TO_ONES(fte_match_param, match_criteria,
			 outer_headers.ethertype);

	if (tt_vec & (1 << MLX5E_TT_IPV4)) {
	ft_ix = &ai->ft_ix[MLX5E_TT_IPV4];
	if (tt_vec & BIT(MLX5E_TT_IPV4)) {
		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
			 ETH_P_IP);
		MLX5_SET(dest_format_struct, dest, destination_id,
			 tirn[MLX5E_TT_IPV4]);
		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
						match_criteria, flow_context,
						&ai->ft_ix[MLX5E_TT_IPV4]);
		if (err) {
			mlx5e_del_eth_addr_from_flow_table(priv, ai);
			return err;
		}
		ai->tt_vec |= (1 << MLX5E_TT_IPV4);
						ft_ix);
		if (err)
			goto err_del_ai;

		ai->tt_vec |= BIT(MLX5E_TT_IPV4);
	}

	if (tt_vec & (1 << MLX5E_TT_IPV6)) {
	ft_ix = &ai->ft_ix[MLX5E_TT_IPV6];
	if (tt_vec & BIT(MLX5E_TT_IPV6)) {
		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
			 ETH_P_IPV6);
		MLX5_SET(dest_format_struct, dest, destination_id,
			 tirn[MLX5E_TT_IPV6]);
		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
						match_criteria, flow_context,
						&ai->ft_ix[MLX5E_TT_IPV6]);
		if (err) {
			mlx5e_del_eth_addr_from_flow_table(priv, ai);
			return err;
		}
		ai->tt_vec |= (1 << MLX5E_TT_IPV6);
						ft_ix);
		if (err)
			goto err_del_ai;

		ai->tt_vec |= BIT(MLX5E_TT_IPV6);
	}

	MLX5_SET_TO_ONES(fte_match_param, match_criteria,
@@ -313,70 +338,141 @@ static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
	MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
		 IPPROTO_UDP);

	if (tt_vec & (1 << MLX5E_TT_IPV4_UDP)) {
	ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_UDP];
	if (tt_vec & BIT(MLX5E_TT_IPV4_UDP)) {
		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
			 ETH_P_IP);
		MLX5_SET(dest_format_struct, dest, destination_id,
			 tirn[MLX5E_TT_IPV4_UDP]);
		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
						match_criteria, flow_context,
						&ai->ft_ix[MLX5E_TT_IPV4_UDP]);
		if (err) {
			mlx5e_del_eth_addr_from_flow_table(priv, ai);
			return err;
		}
		ai->tt_vec |= (1 << MLX5E_TT_IPV4_UDP);
						ft_ix);
		if (err)
			goto err_del_ai;

		ai->tt_vec |= BIT(MLX5E_TT_IPV4_UDP);
	}

	if (tt_vec & (1 << MLX5E_TT_IPV6_UDP)) {
	ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_UDP];
	if (tt_vec & BIT(MLX5E_TT_IPV6_UDP)) {
		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
			 ETH_P_IPV6);
		MLX5_SET(dest_format_struct, dest, destination_id,
			 tirn[MLX5E_TT_IPV6_UDP]);
		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
						match_criteria, flow_context,
						&ai->ft_ix[MLX5E_TT_IPV6_UDP]);
		if (err) {
			mlx5e_del_eth_addr_from_flow_table(priv, ai);
			return err;
		}
		ai->tt_vec |= (1 << MLX5E_TT_IPV6_UDP);
						ft_ix);
		if (err)
			goto err_del_ai;

		ai->tt_vec |= BIT(MLX5E_TT_IPV6_UDP);
	}

	MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
		 IPPROTO_TCP);

	if (tt_vec & (1 << MLX5E_TT_IPV4_TCP)) {
	ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_TCP];
	if (tt_vec & BIT(MLX5E_TT_IPV4_TCP)) {
		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
			 ETH_P_IP);
		MLX5_SET(dest_format_struct, dest, destination_id,
			 tirn[MLX5E_TT_IPV4_TCP]);
		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
						match_criteria, flow_context,
						&ai->ft_ix[MLX5E_TT_IPV4_TCP]);
		if (err) {
			mlx5e_del_eth_addr_from_flow_table(priv, ai);
			return err;
		}
		ai->tt_vec |= (1 << MLX5E_TT_IPV4_TCP);
						ft_ix);
		if (err)
			goto err_del_ai;

		ai->tt_vec |= BIT(MLX5E_TT_IPV4_TCP);
	}

	if (tt_vec & (1 << MLX5E_TT_IPV6_TCP)) {
	ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_TCP];
	if (tt_vec & BIT(MLX5E_TT_IPV6_TCP)) {
		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
			 ETH_P_IPV6);
		MLX5_SET(dest_format_struct, dest, destination_id,
			 tirn[MLX5E_TT_IPV6_TCP]);
		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
						match_criteria, flow_context,
						&ai->ft_ix[MLX5E_TT_IPV6_TCP]);
		if (err) {
			mlx5e_del_eth_addr_from_flow_table(priv, ai);
			return err;
						ft_ix);
		if (err)
			goto err_del_ai;

		ai->tt_vec |= BIT(MLX5E_TT_IPV6_TCP);
	}

	MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
		 IPPROTO_AH);

	ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_IPSEC_AH];
	if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH)) {
		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
			 ETH_P_IP);
		MLX5_SET(dest_format_struct, dest, destination_id,
			 tirn[MLX5E_TT_IPV4_IPSEC_AH]);
		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
						match_criteria, flow_context,
						ft_ix);
		if (err)
			goto err_del_ai;

		ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_AH);
	}
		ai->tt_vec |= (1 << MLX5E_TT_IPV6_TCP);

	ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_IPSEC_AH];
	if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH)) {
		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
			 ETH_P_IPV6);
		MLX5_SET(dest_format_struct, dest, destination_id,
			 tirn[MLX5E_TT_IPV6_IPSEC_AH]);
		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
						match_criteria, flow_context,
						ft_ix);
		if (err)
			goto err_del_ai;

		ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_AH);
	}

	MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
		 IPPROTO_ESP);

	ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_IPSEC_ESP];
	if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP)) {
		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
			 ETH_P_IP);
		MLX5_SET(dest_format_struct, dest, destination_id,
			 tirn[MLX5E_TT_IPV4_IPSEC_ESP]);
		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
						match_criteria, flow_context,
						ft_ix);
		if (err)
			goto err_del_ai;

		ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_ESP);
	}

	ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_IPSEC_ESP];
	if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP)) {
		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
			 ETH_P_IPV6);
		MLX5_SET(dest_format_struct, dest, destination_id,
			 tirn[MLX5E_TT_IPV6_IPSEC_ESP]);
		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
						match_criteria, flow_context,
						ft_ix);
		if (err)
			goto err_del_ai;

		ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_ESP);
	}

	return 0;

err_del_ai:
	mlx5e_del_eth_addr_from_flow_table(priv, ai);

	return err;
}

static int mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
@@ -725,7 +821,7 @@ static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv)
	if (!g)
		return -ENOMEM;

	g[0].log_sz = 2;
	g[0].log_sz = 3;
	g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
	MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
			 outer_headers.ethertype);
+106 −28

File changed.

Preview size limit exceeded, changes collapsed.

Loading