Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e8e96081 authored by David S. Miller's avatar David S. Miller
Browse files

Merge tag 'mlx5-fixes-2018-04-25' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux



Saeed Mahameed says:

====================
Mellanox, mlx5 fixes 2018-04-26

This pull request includes fixes for mlx5 core and netdev driver.

Please pull and let me know if there's any problems.

For -stable v4.12
    net/mlx5e: TX, Use correct counter in dma_map error flow
For -stable v4.13
    net/mlx5: Avoid cleaning flow steering table twice during error flow
For -stable v4.14
    net/mlx5e: Allow offloading ipv4 header re-write for icmp
For -stable v4.15
    net/mlx5e: DCBNL fix min inline header size for dscp
For -stable v4.16
    net/mlx5: Fix mlx5_get_vector_affinity function
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 1da9a586 202854e9
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -4757,7 +4757,7 @@ mlx5_ib_get_vector_affinity(struct ib_device *ibdev, int comp_vector)
{
	struct mlx5_ib_dev *dev = to_mdev(ibdev);

	return mlx5_get_vector_affinity(dev->mdev, comp_vector);
	return mlx5_get_vector_affinity_hint(dev->mdev, comp_vector);
}

/* The mlx5_ib_multiport_mutex should be held when calling this function */
+5 −3
Original line number Diff line number Diff line
@@ -1007,12 +1007,14 @@ static void mlx5e_trust_update_sq_inline_mode(struct mlx5e_priv *priv)

	mutex_lock(&priv->state_lock);

	if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
		goto out;

	new_channels.params = priv->channels.params;
	mlx5e_trust_update_tx_min_inline_mode(priv, &new_channels.params);

	if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
		priv->channels.params = new_channels.params;
		goto out;
	}

	/* Skip if tx_min_inline is the same */
	if (new_channels.params.tx_min_inline_mode ==
	    priv->channels.params.tx_min_inline_mode)
+3 −2
Original line number Diff line number Diff line
@@ -877,13 +877,14 @@ static const struct net_device_ops mlx5e_netdev_ops_rep = {
};

static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev,
				   struct mlx5e_params *params)
				   struct mlx5e_params *params, u16 mtu)
{
	u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
					 MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
					 MLX5_CQ_PERIOD_MODE_START_FROM_EQE;

	params->hard_mtu    = MLX5E_ETH_HARD_MTU;
	params->sw_mtu      = mtu;
	params->log_sq_size = MLX5E_REP_PARAMS_LOG_SQ_SIZE;
	params->rq_wq_type  = MLX5_WQ_TYPE_LINKED_LIST;
	params->log_rq_mtu_frames = MLX5E_REP_PARAMS_LOG_RQ_SIZE;
@@ -931,7 +932,7 @@ static void mlx5e_init_rep(struct mlx5_core_dev *mdev,

	priv->channels.params.num_channels = profile->max_nch(mdev);

	mlx5e_build_rep_params(mdev, &priv->channels.params);
	mlx5e_build_rep_params(mdev, &priv->channels.params, netdev->mtu);
	mlx5e_build_rep_netdev(netdev);

	mlx5e_timestamp_init(priv);
+2 −1
Original line number Diff line number Diff line
@@ -1864,7 +1864,8 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
	}

	ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
	if (modify_ip_header && ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
	if (modify_ip_header && ip_proto != IPPROTO_TCP &&
	    ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) {
		pr_info("can't offload re-write of ip proto %d\n", ip_proto);
		return false;
	}
+10 −10
Original line number Diff line number Diff line
@@ -255,7 +255,7 @@ mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb,
		dma_addr = dma_map_single(sq->pdev, skb_data, headlen,
					  DMA_TO_DEVICE);
		if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
			return -ENOMEM;
			goto dma_unmap_wqe_err;

		dseg->addr       = cpu_to_be64(dma_addr);
		dseg->lkey       = sq->mkey_be;
@@ -273,7 +273,7 @@ mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb,
		dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
					    DMA_TO_DEVICE);
		if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
			return -ENOMEM;
			goto dma_unmap_wqe_err;

		dseg->addr       = cpu_to_be64(dma_addr);
		dseg->lkey       = sq->mkey_be;
@@ -285,6 +285,10 @@ mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb,
	}

	return num_dma;

dma_unmap_wqe_err:
	mlx5e_dma_unmap_wqe_err(sq, num_dma);
	return -ENOMEM;
}

static inline void
@@ -380,17 +384,15 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
	num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen,
					  (struct mlx5_wqe_data_seg *)cseg + ds_cnt);
	if (unlikely(num_dma < 0))
		goto dma_unmap_wqe_err;
		goto err_drop;

	mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma,
			     num_bytes, num_dma, wi, cseg);

	return NETDEV_TX_OK;

dma_unmap_wqe_err:
err_drop:
	sq->stats.dropped++;
	mlx5e_dma_unmap_wqe_err(sq, wi->num_dma);

	dev_kfree_skb_any(skb);

	return NETDEV_TX_OK;
@@ -645,17 +647,15 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
	num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen,
					  (struct mlx5_wqe_data_seg *)cseg + ds_cnt);
	if (unlikely(num_dma < 0))
		goto dma_unmap_wqe_err;
		goto err_drop;

	mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma,
			     num_bytes, num_dma, wi, cseg);

	return NETDEV_TX_OK;

dma_unmap_wqe_err:
err_drop:
	sq->stats.dropped++;
	mlx5e_dma_unmap_wqe_err(sq, wi->num_dma);

	dev_kfree_skb_any(skb);

	return NETDEV_TX_OK;
Loading