Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8b4837c8 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'mlx5-next'



Saeed Mahameed says:

====================
mlx5 driver updates

This series includes some bug fixes and updates for the mlx5 core
and ethernet driver.

From Gal, two fixes that protects the update CQ moderation flows
when it is not allowed.

From Moshe, two fixes for the core and ethernet driver in
non-cached(NC) and write combining(WC) buffers mappings,
which prevents the driver from double memory mappings.

From Or, reduce the firmware command completion timeout.

From Tariq, several small trivial fixes.

Changes from v0:
	- "Fix global UAR mapping" commit messages updated to explain ARCH_HAS_IOREMAP_WC usage.
	- rebased to commit 8d3f2806 'Merge branch ethtool-ksettings'

Changes from v1:
	- Removed ARCH_HAS_IOREMAP_WC config flag from "Fix global UAR mapping" commit,	as it was not accurate to use it.
	- Squashed "Fix global UAR mapping" and "net/mlx5: Avoid double mapping of io mapped memory"
	- Added more info for "Fix global UAR mapping" in commit message

Changes from v2:
	- None. resubmission per Dave's request due to two parallel submissions to mlx5 driver.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 4f25a111 0ba42241
Loading
Loading
Loading
Loading
+12 −15
Original line number Original line Diff line number Diff line
@@ -260,26 +260,28 @@ static const char sq_stats_strings[][ETH_GSTRING_LEN] = {
	"tso_bytes",
	"tso_bytes",
	"tso_inner_packets",
	"tso_inner_packets",
	"tso_inner_bytes",
	"tso_inner_bytes",
	"csum_offload_none",
	"csum_offload_inner",
	"csum_offload_inner",
	"nop",
	"csum_offload_none",
	"stopped",
	"stopped",
	"wake",
	"wake",
	"dropped",
	"dropped",
	"nop"
};
};


struct mlx5e_sq_stats {
struct mlx5e_sq_stats {
	/* commonly accessed in data path */
	u64 packets;
	u64 packets;
	u64 tso_packets;
	u64 tso_packets;
	u64 tso_bytes;
	u64 tso_bytes;
	u64 tso_inner_packets;
	u64 tso_inner_packets;
	u64 tso_inner_bytes;
	u64 tso_inner_bytes;
	u64 csum_offload_none;
	u64 csum_offload_inner;
	u64 csum_offload_inner;
	u64 nop;
	/* less likely accessed in data path */
	u64 csum_offload_none;
	u64 stopped;
	u64 stopped;
	u64 wake;
	u64 wake;
	u64 dropped;
	u64 dropped;
	u64 nop;
#define NUM_SQ_STATS 11
#define NUM_SQ_STATS 11
};
};


@@ -386,6 +388,7 @@ struct mlx5e_sq_dma {


enum {
enum {
	MLX5E_SQ_STATE_WAKE_TXQ_ENABLE,
	MLX5E_SQ_STATE_WAKE_TXQ_ENABLE,
	MLX5E_SQ_STATE_BF_ENABLE,
};
};


struct mlx5e_sq {
struct mlx5e_sq {
@@ -414,7 +417,6 @@ struct mlx5e_sq {
	struct mlx5_wq_cyc         wq;
	struct mlx5_wq_cyc         wq;
	u32                        dma_fifo_mask;
	u32                        dma_fifo_mask;
	void __iomem              *uar_map;
	void __iomem              *uar_map;
	void __iomem              *uar_bf_map;
	struct netdev_queue       *txq;
	struct netdev_queue       *txq;
	u32                        sqn;
	u32                        sqn;
	u16                        bf_buf_size;
	u16                        bf_buf_size;
@@ -555,7 +557,6 @@ struct mlx5e_priv {
	struct mlx5e_vxlan_db      vxlan;
	struct mlx5e_vxlan_db      vxlan;


	struct mlx5e_params        params;
	struct mlx5e_params        params;
	spinlock_t                 async_events_spinlock; /* sync hw events */
	struct work_struct         update_carrier_work;
	struct work_struct         update_carrier_work;
	struct work_struct         set_rx_mode_work;
	struct work_struct         set_rx_mode_work;
	struct delayed_work        update_stats_work;
	struct delayed_work        update_stats_work;
@@ -663,17 +664,13 @@ static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
	 * doorbell
	 * doorbell
	 */
	 */
	wmb();
	wmb();

	if (bf_sz)
	if (bf_sz) {
		__iowrite64_copy(sq->uar_map + ofst, &wqe->ctrl, bf_sz);
		__iowrite64_copy(sq->uar_bf_map + ofst, &wqe->ctrl, bf_sz);
	else

		mlx5_write64((__be32 *)&wqe->ctrl, sq->uar_map + ofst, NULL);
	/* flush the write-combining mapped buffer */
	/* flush the write-combining mapped buffer */
	wmb();
	wmb();


	} else {
		mlx5_write64((__be32 *)&wqe->ctrl, sq->uar_map + ofst, NULL);
	}

	sq->bf_offset ^= sq->bf_buf_size;
	sq->bf_offset ^= sq->bf_buf_size;
}
}


+20 −7
Original line number Original line Diff line number Diff line
@@ -211,12 +211,13 @@ static void mlx5e_get_strings(struct net_device *dev,
				sprintf(data + (idx++) * ETH_GSTRING_LEN,
				sprintf(data + (idx++) * ETH_GSTRING_LEN,
					"rx%d_%s", i, rq_stats_strings[j]);
					"rx%d_%s", i, rq_stats_strings[j]);


		for (i = 0; i < priv->params.num_channels; i++)
		for (tc = 0; tc < priv->params.num_tc; tc++)
		for (tc = 0; tc < priv->params.num_tc; tc++)
			for (i = 0; i < priv->params.num_channels; i++)
				for (j = 0; j < NUM_SQ_STATS; j++)
				for (j = 0; j < NUM_SQ_STATS; j++)
					sprintf(data +
					sprintf(data +
					      (idx++) * ETH_GSTRING_LEN,
					      (idx++) * ETH_GSTRING_LEN,
						"tx%d_%d_%s", i, tc,
					      "tx%d_%s",
					      priv->channeltc_to_txq_map[i][tc],
					      sq_stats_strings[j]);
					      sq_stats_strings[j]);
		break;
		break;
	}
	}
@@ -249,8 +250,8 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
						&priv->state) ? 0 :
						&priv->state) ? 0 :
				       ((u64 *)&priv->channel[i]->rq.stats)[j];
				       ((u64 *)&priv->channel[i]->rq.stats)[j];


	for (i = 0; i < priv->params.num_channels; i++)
	for (tc = 0; tc < priv->params.num_tc; tc++)
	for (tc = 0; tc < priv->params.num_tc; tc++)
		for (i = 0; i < priv->params.num_channels; i++)
			for (j = 0; j < NUM_SQ_STATS; j++)
			for (j = 0; j < NUM_SQ_STATS; j++)
				data[idx++] = !test_bit(MLX5E_STATE_OPENED,
				data[idx++] = !test_bit(MLX5E_STATE_OPENED,
							&priv->state) ? 0 :
							&priv->state) ? 0 :
@@ -399,6 +400,9 @@ static int mlx5e_get_coalesce(struct net_device *netdev,
{
{
	struct mlx5e_priv *priv = netdev_priv(netdev);
	struct mlx5e_priv *priv = netdev_priv(netdev);


	if (!MLX5_CAP_GEN(priv->mdev, cq_moderation))
		return -ENOTSUPP;

	coal->rx_coalesce_usecs       = priv->params.rx_cq_moderation_usec;
	coal->rx_coalesce_usecs       = priv->params.rx_cq_moderation_usec;
	coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation_pkts;
	coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation_pkts;
	coal->tx_coalesce_usecs       = priv->params.tx_cq_moderation_usec;
	coal->tx_coalesce_usecs       = priv->params.tx_cq_moderation_usec;
@@ -416,11 +420,18 @@ static int mlx5e_set_coalesce(struct net_device *netdev,
	int tc;
	int tc;
	int i;
	int i;


	if (!MLX5_CAP_GEN(mdev, cq_moderation))
		return -ENOTSUPP;

	mutex_lock(&priv->state_lock);
	priv->params.tx_cq_moderation_usec = coal->tx_coalesce_usecs;
	priv->params.tx_cq_moderation_usec = coal->tx_coalesce_usecs;
	priv->params.tx_cq_moderation_pkts = coal->tx_max_coalesced_frames;
	priv->params.tx_cq_moderation_pkts = coal->tx_max_coalesced_frames;
	priv->params.rx_cq_moderation_usec = coal->rx_coalesce_usecs;
	priv->params.rx_cq_moderation_usec = coal->rx_coalesce_usecs;
	priv->params.rx_cq_moderation_pkts = coal->rx_max_coalesced_frames;
	priv->params.rx_cq_moderation_pkts = coal->rx_max_coalesced_frames;


	if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
		goto out;

	for (i = 0; i < priv->params.num_channels; ++i) {
	for (i = 0; i < priv->params.num_channels; ++i) {
		c = priv->channel[i];
		c = priv->channel[i];


@@ -436,6 +447,8 @@ static int mlx5e_set_coalesce(struct net_device *netdev,
					       coal->rx_max_coalesced_frames);
					       coal->rx_max_coalesced_frames);
	}
	}


out:
	mutex_unlock(&priv->state_lock);
	return 0;
	return 0;
}
}


+34 −30
Original line number Original line Diff line number Diff line
@@ -275,9 +275,14 @@ static void mlx5e_update_stats_work(struct work_struct *work)
	mutex_unlock(&priv->state_lock);
	mutex_unlock(&priv->state_lock);
}
}


static void __mlx5e_async_event(struct mlx5e_priv *priv,
static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
				enum mlx5_dev_event event)
			      enum mlx5_dev_event event, unsigned long param)
{
{
	struct mlx5e_priv *priv = vpriv;

	if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state))
		return;

	switch (event) {
	switch (event) {
	case MLX5_DEV_EVENT_PORT_UP:
	case MLX5_DEV_EVENT_PORT_UP:
	case MLX5_DEV_EVENT_PORT_DOWN:
	case MLX5_DEV_EVENT_PORT_DOWN:
@@ -289,17 +294,6 @@ static void __mlx5e_async_event(struct mlx5e_priv *priv,
	}
	}
}
}


static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
			      enum mlx5_dev_event event, unsigned long param)
{
	struct mlx5e_priv *priv = vpriv;

	spin_lock(&priv->async_events_spinlock);
	if (test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state))
		__mlx5e_async_event(priv, event);
	spin_unlock(&priv->async_events_spinlock);
}

static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
{
{
	set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
	set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
@@ -307,9 +301,8 @@ static void mlx5e_enable_async_events(struct mlx5e_priv *priv)


static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
{
{
	spin_lock_irq(&priv->async_events_spinlock);
	clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
	clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
	spin_unlock_irq(&priv->async_events_spinlock);
	synchronize_irq(mlx5_get_msix_vec(priv->mdev, MLX5_EQ_VEC_ASYNC));
}
}


#define MLX5E_HW2SW_MTU(hwmtu) (hwmtu - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
#define MLX5E_HW2SW_MTU(hwmtu) (hwmtu - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
@@ -555,7 +548,7 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
	int txq_ix;
	int txq_ix;
	int err;
	int err;


	err = mlx5_alloc_map_uar(mdev, &sq->uar);
	err = mlx5_alloc_map_uar(mdev, &sq->uar, true);
	if (err)
	if (err)
		return err;
		return err;


@@ -567,8 +560,12 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
		goto err_unmap_free_uar;
		goto err_unmap_free_uar;


	sq->wq.db       = &sq->wq.db[MLX5_SND_DBR];
	sq->wq.db       = &sq->wq.db[MLX5_SND_DBR];
	if (sq->uar.bf_map) {
		set_bit(MLX5E_SQ_STATE_BF_ENABLE, &sq->state);
		sq->uar_map = sq->uar.bf_map;
	} else {
		sq->uar_map = sq->uar.map;
		sq->uar_map = sq->uar.map;
	sq->uar_bf_map  = sq->uar.bf_map;
	}
	sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
	sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
	sq->max_inline  = param->max_inline;
	sq->max_inline  = param->max_inline;


@@ -877,12 +874,10 @@ static int mlx5e_open_cq(struct mlx5e_channel *c,
	if (err)
	if (err)
		goto err_destroy_cq;
		goto err_destroy_cq;


	err = mlx5_core_modify_cq_moderation(mdev, &cq->mcq,
	if (MLX5_CAP_GEN(mdev, cq_moderation))
		mlx5_core_modify_cq_moderation(mdev, &cq->mcq,
					       moderation_usecs,
					       moderation_usecs,
					       moderation_frames);
					       moderation_frames);
	if (err)
		goto err_destroy_cq;

	return 0;
	return 0;


err_destroy_cq:
err_destroy_cq:
@@ -1071,6 +1066,15 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
	param->wq.linear = 1;
	param->wq.linear = 1;
}
}


static void mlx5e_build_drop_rq_param(struct mlx5e_rq_param *param)
{
	void *rqc = param->rqc;
	void *wq = MLX5_ADDR_OF(rqc, rqc, wq);

	MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
	MLX5_SET(wq, wq, log_wq_stride,    ilog2(sizeof(struct mlx5e_rx_wqe)));
}

static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
				 struct mlx5e_sq_param *param)
				 struct mlx5e_sq_param *param)
{
{
@@ -1458,8 +1462,8 @@ int mlx5e_open_locked(struct net_device *netdev)
		goto err_close_channels;
		goto err_close_channels;
	}
	}


	mlx5e_update_carrier(priv);
	mlx5e_redirect_rqts(priv);
	mlx5e_redirect_rqts(priv);
	mlx5e_update_carrier(priv);
	mlx5e_timestamp_init(priv);
	mlx5e_timestamp_init(priv);


	schedule_delayed_work(&priv->update_stats_work, 0);
	schedule_delayed_work(&priv->update_stats_work, 0);
@@ -1498,8 +1502,8 @@ int mlx5e_close_locked(struct net_device *netdev)
	clear_bit(MLX5E_STATE_OPENED, &priv->state);
	clear_bit(MLX5E_STATE_OPENED, &priv->state);


	mlx5e_timestamp_cleanup(priv);
	mlx5e_timestamp_cleanup(priv);
	mlx5e_redirect_rqts(priv);
	netif_carrier_off(priv->netdev);
	netif_carrier_off(priv->netdev);
	mlx5e_redirect_rqts(priv);
	mlx5e_close_channels(priv);
	mlx5e_close_channels(priv);


	return 0;
	return 0;
@@ -1581,8 +1585,7 @@ static int mlx5e_open_drop_rq(struct mlx5e_priv *priv)


	memset(&cq_param, 0, sizeof(cq_param));
	memset(&cq_param, 0, sizeof(cq_param));
	memset(&rq_param, 0, sizeof(rq_param));
	memset(&rq_param, 0, sizeof(rq_param));
	mlx5e_build_rx_cq_param(priv, &cq_param);
	mlx5e_build_drop_rq_param(&rq_param);
	mlx5e_build_rq_param(priv, &rq_param);


	err = mlx5e_create_drop_cq(priv, cq, &cq_param);
	err = mlx5e_create_drop_cq(priv, cq, &cq_param);
	if (err)
	if (err)
@@ -2217,6 +2220,8 @@ static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
	}
	}
	if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
	if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
		mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
		mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
	if (!MLX5_CAP_GEN(mdev, cq_moderation))
		mlx5_core_warn(mdev, "CQ modiration is not supported\n");


	return 0;
	return 0;
}
}
@@ -2290,7 +2295,6 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
	mlx5e_ets_init(priv);
	mlx5e_ets_init(priv);
#endif
#endif


	spin_lock_init(&priv->async_events_spinlock);
	mutex_init(&priv->state_lock);
	mutex_init(&priv->state_lock);


	INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
	INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
@@ -2418,7 +2422,7 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)


	priv = netdev_priv(netdev);
	priv = netdev_priv(netdev);


	err = mlx5_alloc_map_uar(mdev, &priv->cq_uar);
	err = mlx5_alloc_map_uar(mdev, &priv->cq_uar, false);
	if (err) {
	if (err) {
		mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err);
		mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err);
		goto err_free_netdev;
		goto err_free_netdev;
+1 −1
Original line number Original line Diff line number Diff line
@@ -303,7 +303,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
	if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) {
	if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) {
		int bf_sz = 0;
		int bf_sz = 0;


		if (bf && sq->uar_bf_map)
		if (bf && test_bit(MLX5E_SQ_STATE_BF_ENABLE, &sq->state))
			bf_sz = wi->num_wqebbs << 3;
			bf_sz = wi->num_wqebbs << 3;


		cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
		cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
+5 −0
Original line number Original line Diff line number Diff line
@@ -442,6 +442,11 @@ int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
}
}
EXPORT_SYMBOL_GPL(mlx5_destroy_unmap_eq);
EXPORT_SYMBOL_GPL(mlx5_destroy_unmap_eq);


u32 mlx5_get_msix_vec(struct mlx5_core_dev *dev, int vecidx)
{
	return dev->priv.msix_arr[MLX5_EQ_VEC_ASYNC].vector;
}

int mlx5_eq_init(struct mlx5_core_dev *dev)
int mlx5_eq_init(struct mlx5_core_dev *dev)
{
{
	int err;
	int err;
Loading