Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 12d4ae9d authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'mlx-next'



Or Gerlitz says:

====================
Mellanox NIC drivers update, June 23 2015

This series has two fixes from Eran to his recent SRIOV counters work in
mlx4 and few more updates from Saeed and Achiad to the mlx5 Ethernet
code. All fixes here relate to net-next code, so no need for -stable.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 0a51f76e 99611ba1
Loading
Loading
Loading
Loading
+14 −0
Original line number Original line Diff line number Diff line
@@ -203,6 +203,20 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
		priv->port_stats.tso_packets       += ring->tso_packets;
		priv->port_stats.tso_packets       += ring->tso_packets;
		priv->port_stats.xmit_more         += ring->xmit_more;
		priv->port_stats.xmit_more         += ring->xmit_more;
	}
	}
	if (mlx4_is_master(mdev->dev)) {
		stats->rx_packets = en_stats_adder(&mlx4_en_stats->RTOT_prio_0,
						   &mlx4_en_stats->RTOT_prio_1,
						   NUM_PRIORITIES);
		stats->tx_packets = en_stats_adder(&mlx4_en_stats->TTOT_prio_0,
						   &mlx4_en_stats->TTOT_prio_1,
						   NUM_PRIORITIES);
		stats->rx_bytes = en_stats_adder(&mlx4_en_stats->ROCT_prio_0,
						 &mlx4_en_stats->ROCT_prio_1,
						 NUM_PRIORITIES);
		stats->tx_bytes = en_stats_adder(&mlx4_en_stats->TOCT_prio_0,
						 &mlx4_en_stats->TOCT_prio_1,
						 NUM_PRIORITIES);
	}


	/* net device stats */
	/* net device stats */
	stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) +
	stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) +
+2 −3
Original line number Original line Diff line number Diff line
@@ -79,8 +79,7 @@ struct mlx4_en_flow_stats_tx {


#define NUM_FLOW_STATS (NUM_FLOW_STATS_RX + NUM_FLOW_STATS_TX + \
#define NUM_FLOW_STATS (NUM_FLOW_STATS_RX + NUM_FLOW_STATS_TX + \
			NUM_FLOW_PRIORITY_STATS_TX + \
			NUM_FLOW_PRIORITY_STATS_TX + \
			NUM_FLOW_PRIORITY_STATS_RX + \
			NUM_FLOW_PRIORITY_STATS_RX)
			NUM_PF_STATS)


struct mlx4_en_stat_out_flow_control_mbox {
struct mlx4_en_stat_out_flow_control_mbox {
	/* Total number of PAUSE frames received from the far-end port */
	/* Total number of PAUSE frames received from the far-end port */
@@ -108,7 +107,7 @@ enum {
};
};


#define NUM_ALL_STATS	(NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + \
#define NUM_ALL_STATS	(NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + \
			 NUM_FLOW_STATS + NUM_PERF_STATS)
			 NUM_FLOW_STATS + NUM_PERF_STATS + NUM_PF_STATS)


#define MLX4_FIND_NETDEV_STAT(n) (offsetof(struct net_device_stats, n) / \
#define MLX4_FIND_NETDEV_STAT(n) (offsetof(struct net_device_stats, n) / \
				  sizeof(((struct net_device_stats *)0)->n))
				  sizeof(((struct net_device_stats *)0)->n))
+2 −4
Original line number Original line Diff line number Diff line
@@ -208,7 +208,6 @@ enum cq_flags {
struct mlx5e_cq {
struct mlx5e_cq {
	/* data path - accessed per cqe */
	/* data path - accessed per cqe */
	struct mlx5_cqwq           wq;
	struct mlx5_cqwq           wq;
	void                      *sqrq;
	unsigned long              flags;
	unsigned long              flags;


	/* data path - accessed per napi poll */
	/* data path - accessed per napi poll */
@@ -316,6 +315,7 @@ struct mlx5e_channel {
	__be32                     mkey_be;
	__be32                     mkey_be;
	u8                         num_tc;
	u8                         num_tc;
	unsigned long              flags;
	unsigned long              flags;
	int                        tc_to_txq_map[MLX5E_MAX_NUM_TC];


	/* control */
	/* control */
	struct mlx5e_priv         *priv;
	struct mlx5e_priv         *priv;
@@ -379,10 +379,9 @@ struct mlx5e_flow_table {


struct mlx5e_priv {
struct mlx5e_priv {
	/* priv data path fields - start */
	/* priv data path fields - start */
	int                        order_base_2_num_channels;
	int                        queue_mapping_channel_mask;
	int                        num_tc;
	int                        num_tc;
	int                        default_vlan_prio;
	int                        default_vlan_prio;
	struct mlx5e_sq            **txq_to_sq_map;
	/* priv data path fields - end */
	/* priv data path fields - end */


	unsigned long              state;
	unsigned long              state;
@@ -460,7 +459,6 @@ void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw);
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
		       void *accel_priv, select_queue_fallback_t fallback);
		       void *accel_priv, select_queue_fallback_t fallback);
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
netdev_tx_t mlx5e_xmit_multi_tc(struct sk_buff *skb, struct net_device *dev);


void mlx5e_completion_event(struct mlx5_core_cq *mcq);
void mlx5e_completion_event(struct mlx5_core_cq *mcq);
void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
+27 −18
Original line number Original line Diff line number Diff line
@@ -345,7 +345,6 @@ static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
	MLX5_SET(rqc,  rqc, cqn,		c->rq.cq.mcq.cqn);
	MLX5_SET(rqc,  rqc, cqn,		c->rq.cq.mcq.cqn);
	MLX5_SET(rqc,  rqc, state,		MLX5_RQC_STATE_RST);
	MLX5_SET(rqc,  rqc, state,		MLX5_RQC_STATE_RST);
	MLX5_SET(rqc,  rqc, flush_in_error_en,	1);
	MLX5_SET(rqc,  rqc, flush_in_error_en,	1);
	MLX5_SET(wq,   wq,  wq_type,		MLX5_WQ_TYPE_LINKED_LIST);
	MLX5_SET(wq,   wq,  log_wq_pg_sz,	rq->wq_ctrl.buf.page_shift -
	MLX5_SET(wq,   wq,  log_wq_pg_sz,	rq->wq_ctrl.buf.page_shift -
						PAGE_SHIFT);
						PAGE_SHIFT);
	MLX5_SET64(wq, wq,  dbr_addr,		rq->wq_ctrl.db.dma);
	MLX5_SET64(wq, wq,  dbr_addr,		rq->wq_ctrl.db.dma);
@@ -496,6 +495,7 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,


	void *sqc = param->sqc;
	void *sqc = param->sqc;
	void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
	void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
	int txq_ix;
	int err;
	int err;


	err = mlx5_alloc_map_uar(mdev, &sq->uar);
	err = mlx5_alloc_map_uar(mdev, &sq->uar);
@@ -515,14 +515,15 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
	if (err)
	if (err)
		goto err_sq_wq_destroy;
		goto err_sq_wq_destroy;


	sq->txq = netdev_get_tx_queue(priv->netdev,
	txq_ix = c->ix + tc * priv->params.num_channels;
				      c->ix + tc * priv->params.num_channels);
	sq->txq = netdev_get_tx_queue(priv->netdev, txq_ix);


	sq->pdev    = c->pdev;
	sq->pdev    = c->pdev;
	sq->mkey_be = c->mkey_be;
	sq->mkey_be = c->mkey_be;
	sq->channel = c;
	sq->channel = c;
	sq->tc      = tc;
	sq->tc      = tc;
	sq->edge    = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
	sq->edge    = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
	priv->txq_to_sq_map[txq_ix] = sq;


	return 0;
	return 0;


@@ -852,8 +853,6 @@ static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
				    priv->params.tx_cq_moderation_pkts);
				    priv->params.tx_cq_moderation_pkts);
		if (err)
		if (err)
			goto err_close_tx_cqs;
			goto err_close_tx_cqs;

		c->sq[tc].cq.sqrq = &c->sq[tc];
	}
	}


	return 0;
	return 0;
@@ -902,6 +901,15 @@ static void mlx5e_close_sqs(struct mlx5e_channel *c)
		mlx5e_close_sq(&c->sq[tc]);
		mlx5e_close_sq(&c->sq[tc]);
}
}


static void mlx5e_build_tc_to_txq_map(struct mlx5e_channel *c,
				      int num_channels)
{
	int i;

	for (i = 0; i < MLX5E_MAX_NUM_TC; i++)
		c->tc_to_txq_map[i] = c->ix + i * num_channels;
}

static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
			      struct mlx5e_channel_param *cparam,
			      struct mlx5e_channel_param *cparam,
			      struct mlx5e_channel **cp)
			      struct mlx5e_channel **cp)
@@ -923,6 +931,8 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
	c->mkey_be  = cpu_to_be32(priv->mr.key);
	c->mkey_be  = cpu_to_be32(priv->mr.key);
	c->num_tc   = priv->num_tc;
	c->num_tc   = priv->num_tc;


	mlx5e_build_tc_to_txq_map(c, priv->params.num_channels);

	netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
	netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);


	err = mlx5e_open_tx_cqs(c, cparam);
	err = mlx5e_open_tx_cqs(c, cparam);
@@ -934,7 +944,6 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
			    priv->params.rx_cq_moderation_pkts);
			    priv->params.rx_cq_moderation_pkts);
	if (err)
	if (err)
		goto err_close_tx_cqs;
		goto err_close_tx_cqs;
	c->rq.cq.sqrq = &c->rq;


	napi_enable(&c->napi);
	napi_enable(&c->napi);


@@ -1050,14 +1059,18 @@ static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
static int mlx5e_open_channels(struct mlx5e_priv *priv)
static int mlx5e_open_channels(struct mlx5e_priv *priv)
{
{
	struct mlx5e_channel_param cparam;
	struct mlx5e_channel_param cparam;
	int err;
	int err = -ENOMEM;
	int i;
	int i;
	int j;
	int j;


	priv->channel = kcalloc(priv->params.num_channels,
	priv->channel = kcalloc(priv->params.num_channels,
				sizeof(struct mlx5e_channel *), GFP_KERNEL);
				sizeof(struct mlx5e_channel *), GFP_KERNEL);
	if (!priv->channel)

		return -ENOMEM;
	priv->txq_to_sq_map = kcalloc(priv->params.num_channels * priv->num_tc,
				      sizeof(struct mlx5e_sq *), GFP_KERNEL);

	if (!priv->channel || !priv->txq_to_sq_map)
		goto err_free_txq_to_sq_map;


	mlx5e_build_channel_param(priv, &cparam);
	mlx5e_build_channel_param(priv, &cparam);
	for (i = 0; i < priv->params.num_channels; i++) {
	for (i = 0; i < priv->params.num_channels; i++) {
@@ -1078,6 +1091,8 @@ static int mlx5e_open_channels(struct mlx5e_priv *priv)
	for (i--; i >= 0; i--)
	for (i--; i >= 0; i--)
		mlx5e_close_channel(priv->channel[i]);
		mlx5e_close_channel(priv->channel[i]);


err_free_txq_to_sq_map:
	kfree(priv->txq_to_sq_map);
	kfree(priv->channel);
	kfree(priv->channel);


	return err;
	return err;
@@ -1090,6 +1105,7 @@ static void mlx5e_close_channels(struct mlx5e_priv *priv)
	for (i = 0; i < priv->params.num_channels; i++)
	for (i = 0; i < priv->params.num_channels; i++)
		mlx5e_close_channel(priv->channel[i]);
		mlx5e_close_channel(priv->channel[i]);


	kfree(priv->txq_to_sq_map);
	kfree(priv->channel);
	kfree(priv->channel);
}
}


@@ -1384,8 +1400,7 @@ int mlx5e_open_locked(struct net_device *netdev)
	int num_txqs;
	int num_txqs;
	int err;
	int err;


	num_txqs = roundup_pow_of_two(priv->params.num_channels) *
	num_txqs = priv->params.num_channels * priv->params.num_tc;
		   priv->params.num_tc;
	netif_set_real_num_tx_queues(netdev, num_txqs);
	netif_set_real_num_tx_queues(netdev, num_txqs);
	netif_set_real_num_rx_queues(netdev, priv->params.num_channels);
	netif_set_real_num_rx_queues(netdev, priv->params.num_channels);


@@ -1693,9 +1708,6 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
	priv->mdev                         = mdev;
	priv->mdev                         = mdev;
	priv->netdev                       = netdev;
	priv->netdev                       = netdev;
	priv->params.num_channels          = num_comp_vectors;
	priv->params.num_channels          = num_comp_vectors;
	priv->order_base_2_num_channels    = order_base_2(num_comp_vectors);
	priv->queue_mapping_channel_mask   =
		roundup_pow_of_two(num_comp_vectors) - 1;
	priv->num_tc                       = priv->params.num_tc;
	priv->num_tc                       = priv->params.num_tc;
	priv->default_vlan_prio            = priv->params.default_vlan_prio;
	priv->default_vlan_prio            = priv->params.default_vlan_prio;


@@ -1723,7 +1735,6 @@ static void mlx5e_build_netdev(struct net_device *netdev)


	if (priv->num_tc > 1) {
	if (priv->num_tc > 1) {
		mlx5e_netdev_ops.ndo_select_queue = mlx5e_select_queue;
		mlx5e_netdev_ops.ndo_select_queue = mlx5e_select_queue;
		mlx5e_netdev_ops.ndo_start_xmit   = mlx5e_xmit_multi_tc;
	}
	}


	netdev->netdev_ops        = &mlx5e_netdev_ops;
	netdev->netdev_ops        = &mlx5e_netdev_ops;
@@ -1793,9 +1804,7 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
	if (mlx5e_check_required_hca_cap(mdev))
	if (mlx5e_check_required_hca_cap(mdev))
		return NULL;
		return NULL;


	netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv),
	netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), ncv, ncv);
				    roundup_pow_of_two(ncv) * MLX5E_MAX_NUM_TC,
				    ncv);
	if (!netdev) {
	if (!netdev) {
		mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
		mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
		return NULL;
		return NULL;
+4 −1
Original line number Original line Diff line number Diff line
@@ -191,7 +191,7 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,


bool mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
bool mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
{
{
	struct mlx5e_rq *rq = cq->sqrq;
	struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
	int i;
	int i;


	/* avoid accessing cq (dma coherent memory) if not needed */
	/* avoid accessing cq (dma coherent memory) if not needed */
@@ -209,10 +209,13 @@ bool mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
		if (!cqe)
		if (!cqe)
			break;
			break;


		mlx5_cqwq_pop(&cq->wq);

		wqe_counter_be = cqe->wqe_counter;
		wqe_counter_be = cqe->wqe_counter;
		wqe_counter    = be16_to_cpu(wqe_counter_be);
		wqe_counter    = be16_to_cpu(wqe_counter_be);
		wqe            = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
		wqe            = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
		skb            = rq->skb[wqe_counter];
		skb            = rq->skb[wqe_counter];
		prefetch(skb->data);
		rq->skb[wqe_counter] = NULL;
		rq->skb[wqe_counter] = NULL;


		dma_unmap_single(rq->pdev,
		dma_unmap_single(rq->pdev,
Loading