Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 32f16e14 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'mlx5-fixes'



Saeed Mahameed says:

====================
Mellanox 100G mlx5 fixes 2016-12-04

Some bug fixes for mlx5 core and mlx5e driver.

v1->v2:
 - replace "uint" with "unsigned int"
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents f85de666 c0f1147d
Loading
Loading
Loading
Loading
+0 −5
Original line number Diff line number Diff line
@@ -268,11 +268,6 @@ static void dump_buf(void *buf, int size, int data_only, int offset)
		pr_debug("\n");
}

enum {
	MLX5_DRIVER_STATUS_ABORTED = 0xfe,
	MLX5_DRIVER_SYND = 0xbadd00de,
};

static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
				       u32 *synd, u8 *status)
{
+2 −2
Original line number Diff line number Diff line
@@ -241,7 +241,7 @@ struct mlx5e_tstamp {
};

enum {
	MLX5E_RQ_STATE_FLUSH,
	MLX5E_RQ_STATE_ENABLED,
	MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS,
	MLX5E_RQ_STATE_AM,
};
@@ -394,7 +394,7 @@ struct mlx5e_sq_dma {
};

enum {
	MLX5E_SQ_STATE_FLUSH,
	MLX5E_SQ_STATE_ENABLED,
	MLX5E_SQ_STATE_BF_ENABLE,
};

+9 −6
Original line number Diff line number Diff line
@@ -759,6 +759,7 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
	if (err)
		goto err_destroy_rq;

	set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
	err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
	if (err)
		goto err_disable_rq;
@@ -773,6 +774,7 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
	return 0;

err_disable_rq:
	clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
	mlx5e_disable_rq(rq);
err_destroy_rq:
	mlx5e_destroy_rq(rq);
@@ -782,7 +784,7 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,

static void mlx5e_close_rq(struct mlx5e_rq *rq)
{
	set_bit(MLX5E_RQ_STATE_FLUSH, &rq->state);
	clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
	napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
	cancel_work_sync(&rq->am.work);

@@ -1006,7 +1008,6 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
	MLX5_SET(sqc,  sqc, min_wqe_inline_mode, sq->min_inline_mode);
	MLX5_SET(sqc,  sqc, state,		MLX5_SQC_STATE_RST);
	MLX5_SET(sqc,  sqc, tis_lst_sz, param->type == MLX5E_SQ_ICO ? 0 : 1);
	MLX5_SET(sqc,  sqc, flush_in_error_en,	1);

	MLX5_SET(wq,   wq, wq_type,       MLX5_WQ_TYPE_CYCLIC);
	MLX5_SET(wq,   wq, uar_page,      sq->uar.index);
@@ -1083,6 +1084,7 @@ static int mlx5e_open_sq(struct mlx5e_channel *c,
	if (err)
		goto err_destroy_sq;

	set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
	err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY,
			      false, 0);
	if (err)
@@ -1096,6 +1098,7 @@ static int mlx5e_open_sq(struct mlx5e_channel *c,
	return 0;

err_disable_sq:
	clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
	mlx5e_disable_sq(sq);
err_destroy_sq:
	mlx5e_destroy_sq(sq);
@@ -1112,7 +1115,7 @@ static inline void netif_tx_disable_queue(struct netdev_queue *txq)

static void mlx5e_close_sq(struct mlx5e_sq *sq)
{
	set_bit(MLX5E_SQ_STATE_FLUSH, &sq->state);
	clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
	/* prevent netif_tx_wake_queue */
	napi_synchronize(&sq->channel->napi);

@@ -3092,7 +3095,7 @@ static void mlx5e_tx_timeout(struct net_device *dev)
		if (!netif_xmit_stopped(netdev_get_tx_queue(dev, i)))
			continue;
		sched_work = true;
		set_bit(MLX5E_SQ_STATE_FLUSH, &sq->state);
		clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
		netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x\n",
			   i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc);
	}
@@ -3147,13 +3150,13 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
	for (i = 0; i < priv->params.num_channels; i++) {
		struct mlx5e_channel *c = priv->channel[i];

		set_bit(MLX5E_RQ_STATE_FLUSH, &c->rq.state);
		clear_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state);
		napi_synchronize(&c->napi);
		/* prevent mlx5e_poll_rx_cq from accessing rq->xdp_prog */

		old_prog = xchg(&c->rq.xdp_prog, prog);

		clear_bit(MLX5E_RQ_STATE_FLUSH, &c->rq.state);
		set_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state);
		/* napi_schedule in case we have missed anything */
		set_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags);
		napi_schedule(&c->napi);
+4 −4
Original line number Diff line number Diff line
@@ -340,7 +340,7 @@ static inline void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix)
	while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) {
		sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP;
		sq->db.ico_wqe[pi].num_wqebbs = 1;
		mlx5e_send_nop(sq, true);
		mlx5e_send_nop(sq, false);
	}

	wqe = mlx5_wq_cyc_get_wqe(wq, pi);
@@ -412,7 +412,7 @@ void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq)

	clear_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state);

	if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH, &rq->state))) {
	if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state))) {
		mlx5e_free_rx_mpwqe(rq, &rq->mpwqe.info[wq->head]);
		return;
	}
@@ -445,7 +445,7 @@ void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
}

#define RQ_CANNOT_POST(rq) \
	(test_bit(MLX5E_RQ_STATE_FLUSH, &rq->state) || \
	(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state) || \
	 test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))

bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
@@ -924,7 +924,7 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
	struct mlx5e_sq *xdp_sq = &rq->channel->xdp_sq;
	int work_done = 0;

	if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH, &rq->state)))
	if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
		return 0;

	if (cq->decmprs_left)
+1 −1
Original line number Diff line number Diff line
@@ -409,7 +409,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)

	sq = container_of(cq, struct mlx5e_sq, cq);

	if (unlikely(test_bit(MLX5E_SQ_STATE_FLUSH, &sq->state)))
	if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
		return false;

	npkts = 0;
Loading