Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 716dcaeb authored by David S. Miller's avatar David S. Miller
Browse files

Merge tag 'mlx5-updates-2017-01-24' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux



Saeed Mahameed says:

====================
mlx5-updates-2017-24-01

The first seven patches from Or Gerlitz in this series further enhances
the mlx5 SRIOV switchdev mode to support offloading IPv6 tunnels using the
TC tunnel key set (encap) and unset (decap) actions.

Or Gerlitz says:
========================
As part of doing this change, few cleanups are done in the IPv4 code,
later we move to use the full tunnel key info provided to the driver as
the key for our internal hashing which is used to identify cases where
the same tunnel is used for encapsulating multiple flows. As done in the
IPv4 case, the control path for offloading IPv6 tunnels uses route/neigh
lookups and construction of the IPv6 tunnel headers on the encap path and
matching on the outer hears in the decap path.

The last patch of the series enlarges the HW FDB size for the switchdev mode,
so it has now room to contain offloaded flows as many as min(max number
of HW flow counters supported, max HW table size supported).
========================

Next to Or's series you can find several patches handling several topics.

From Mohamad, add support for SRIOV VF min rate guarantee by using the
TSAR BW share weights mechanism.

From Or, Two patches to enable Eth VFs to query their min-inline value for
user-space.
for that we move a mlx5 low level min inline helper function from mlx5
ethernet driver into the core driver and then use it in mlx5_ib to expose
the inline mode to rdma applications through libmlx5.

From Kamal Heib, Reduce memory consumption on kdump kernel.

From Shaker Daibes, code reuse in CQE compression control logic
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents a08ef476 5eb0249b
Loading
Loading
Loading
Loading
+9 −0
Original line number Original line Diff line number Diff line
@@ -53,6 +53,7 @@
#include <linux/in.h>
#include <linux/in.h>
#include <linux/etherdevice.h>
#include <linux/etherdevice.h>
#include <linux/mlx5/fs.h>
#include <linux/mlx5/fs.h>
#include <linux/mlx5/vport.h>
#include "mlx5_ib.h"
#include "mlx5_ib.h"


#define DRIVER_NAME "mlx5_ib"
#define DRIVER_NAME "mlx5_ib"
@@ -1202,6 +1203,14 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
		resp.response_length += sizeof(resp.cmds_supp_uhw);
		resp.response_length += sizeof(resp.cmds_supp_uhw);
	}
	}


	if (field_avail(typeof(resp), eth_min_inline, udata->outlen)) {
		if (mlx5_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET) {
			mlx5_query_min_inline(dev->mdev, &resp.eth_min_inline);
			resp.eth_min_inline++;
		}
		resp.response_length += sizeof(resp.eth_min_inline);
	}

	/*
	/*
	 * We don't want to expose information from the PCI bar that is located
	 * We don't want to expose information from the PCI bar that is located
	 * after 4096 bytes, so if the arch only supports larger pages, let's
	 * after 4096 bytes, so if the arch only supports larger pages, let's
+2 −7
Original line number Original line Diff line number Diff line
@@ -101,6 +101,7 @@


#define MLX5E_LOG_INDIR_RQT_SIZE       0x7
#define MLX5E_LOG_INDIR_RQT_SIZE       0x7
#define MLX5E_INDIR_RQT_SIZE           BIT(MLX5E_LOG_INDIR_RQT_SIZE)
#define MLX5E_INDIR_RQT_SIZE           BIT(MLX5E_LOG_INDIR_RQT_SIZE)
#define MLX5E_MIN_NUM_CHANNELS         0x1
#define MLX5E_MAX_NUM_CHANNELS         (MLX5E_INDIR_RQT_SIZE >> 1)
#define MLX5E_MAX_NUM_CHANNELS         (MLX5E_INDIR_RQT_SIZE >> 1)
#define MLX5E_MAX_NUM_SQS              (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC)
#define MLX5E_MAX_NUM_SQS              (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC)
#define MLX5E_TX_CQ_POLL_BUDGET        128
#define MLX5E_TX_CQ_POLL_BUDGET        128
@@ -786,7 +787,7 @@ void mlx5e_pps_event_handler(struct mlx5e_priv *priv,
			     struct ptp_clock_event *event);
			     struct ptp_clock_event *event);
int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr);
int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr);
int mlx5e_hwstamp_get(struct net_device *dev, struct ifreq *ifr);
int mlx5e_hwstamp_get(struct net_device *dev, struct ifreq *ifr);
void mlx5e_modify_rx_cqe_compression(struct mlx5e_priv *priv, bool val);
void mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val);


int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
			  u16 vid);
			  u16 vid);
@@ -847,12 +848,6 @@ static inline u32 mlx5e_get_wqe_mtt_offset(struct mlx5e_rq *rq, u16 wqe_ix)
	return wqe_ix * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8);
	return wqe_ix * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8);
}
}


static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
{
	return min_t(int, mdev->priv.eq_table.num_comp_vectors,
		     MLX5E_MAX_NUM_CHANNELS);
}

extern const struct ethtool_ops mlx5e_ethtool_ops;
extern const struct ethtool_ops mlx5e_ethtool_ops;
#ifdef CONFIG_MLX5_CORE_EN_DCB
#ifdef CONFIG_MLX5_CORE_EN_DCB
extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops;
extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops;
+5 −2
Original line number Original line Diff line number Diff line
@@ -106,11 +106,12 @@ int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr)
		return -ERANGE;
		return -ERANGE;
	}
	}


	mutex_lock(&priv->state_lock);
	/* RX HW timestamp */
	/* RX HW timestamp */
	switch (config.rx_filter) {
	switch (config.rx_filter) {
	case HWTSTAMP_FILTER_NONE:
	case HWTSTAMP_FILTER_NONE:
		/* Reset CQE compression to Admin default */
		/* Reset CQE compression to Admin default */
		mlx5e_modify_rx_cqe_compression(priv, priv->params.rx_cqe_compress_def);
		mlx5e_modify_rx_cqe_compression_locked(priv, priv->params.rx_cqe_compress_def);
		break;
		break;
	case HWTSTAMP_FILTER_ALL:
	case HWTSTAMP_FILTER_ALL:
	case HWTSTAMP_FILTER_SOME:
	case HWTSTAMP_FILTER_SOME:
@@ -128,14 +129,16 @@ int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr)
	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
		/* Disable CQE compression */
		/* Disable CQE compression */
		netdev_warn(dev, "Disabling cqe compression");
		netdev_warn(dev, "Disabling cqe compression");
		mlx5e_modify_rx_cqe_compression(priv, false);
		mlx5e_modify_rx_cqe_compression_locked(priv, false);
		config.rx_filter = HWTSTAMP_FILTER_ALL;
		config.rx_filter = HWTSTAMP_FILTER_ALL;
		break;
		break;
	default:
	default:
		mutex_unlock(&priv->state_lock);
		return -ERANGE;
		return -ERANGE;
	}
	}


	memcpy(&priv->tstamp.hwtstamp_config, &config, sizeof(config));
	memcpy(&priv->tstamp.hwtstamp_config, &config, sizeof(config));
	mutex_unlock(&priv->state_lock);


	return copy_to_user(ifr->ifr_data, &config,
	return copy_to_user(ifr->ifr_data, &config,
			    sizeof(config)) ? -EFAULT : 0;
			    sizeof(config)) ? -EFAULT : 0;
+4 −13
Original line number Original line Diff line number Diff line
@@ -552,7 +552,7 @@ static void mlx5e_get_channels(struct net_device *dev,
{
{
	struct mlx5e_priv *priv = netdev_priv(dev);
	struct mlx5e_priv *priv = netdev_priv(dev);


	ch->max_combined   = mlx5e_get_max_num_channels(priv->mdev);
	ch->max_combined   = priv->profile->max_nch(priv->mdev);
	ch->combined_count = priv->params.num_channels;
	ch->combined_count = priv->params.num_channels;
}
}


@@ -560,7 +560,7 @@ static int mlx5e_set_channels(struct net_device *dev,
			      struct ethtool_channels *ch)
			      struct ethtool_channels *ch)
{
{
	struct mlx5e_priv *priv = netdev_priv(dev);
	struct mlx5e_priv *priv = netdev_priv(dev);
	int ncv = mlx5e_get_max_num_channels(priv->mdev);
	int ncv = priv->profile->max_nch(priv->mdev);
	unsigned int count = ch->combined_count;
	unsigned int count = ch->combined_count;
	bool arfs_enabled;
	bool arfs_enabled;
	bool was_opened;
	bool was_opened;
@@ -1476,8 +1476,6 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev,
{
{
	struct mlx5e_priv *priv = netdev_priv(netdev);
	struct mlx5e_priv *priv = netdev_priv(netdev);
	struct mlx5_core_dev *mdev = priv->mdev;
	struct mlx5_core_dev *mdev = priv->mdev;
	int err = 0;
	bool reset;


	if (!MLX5_CAP_GEN(mdev, cqe_compression))
	if (!MLX5_CAP_GEN(mdev, cqe_compression))
		return -ENOTSUPP;
		return -ENOTSUPP;
@@ -1487,17 +1485,10 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev,
		return -EINVAL;
		return -EINVAL;
	}
	}


	reset = test_bit(MLX5E_STATE_OPENED, &priv->state);
	mlx5e_modify_rx_cqe_compression_locked(priv, enable);

	if (reset)
		mlx5e_close_locked(netdev);

	MLX5E_SET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS, enable);
	priv->params.rx_cqe_compress_def = enable;
	priv->params.rx_cqe_compress_def = enable;


	if (reset)
	return 0;
		err = mlx5e_open_locked(netdev);
	return err;
}
}


static int mlx5e_handle_pflag(struct net_device *netdev,
static int mlx5e_handle_pflag(struct net_device *netdev,
+20 −24
Original line number Original line Diff line number Diff line
@@ -31,6 +31,7 @@
 */
 */


#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_gact.h>
#include <linux/crash_dump.h>
#include <net/pkt_cls.h>
#include <net/pkt_cls.h>
#include <linux/mlx5/fs.h>
#include <linux/mlx5/fs.h>
#include <net/vxlan.h>
#include <net/vxlan.h>
@@ -83,7 +84,9 @@ static void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type)
	priv->params.rq_wq_type = rq_type;
	priv->params.rq_wq_type = rq_type;
	switch (priv->params.rq_wq_type) {
	switch (priv->params.rq_wq_type) {
	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
		priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW;
		priv->params.log_rq_size = is_kdump_kernel() ?
			MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW :
			MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW;
		priv->params.mpwqe_log_stride_sz =
		priv->params.mpwqe_log_stride_sz =
			MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS) ?
			MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS) ?
			MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS :
			MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS :
@@ -92,7 +95,9 @@ static void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type)
			priv->params.mpwqe_log_stride_sz;
			priv->params.mpwqe_log_stride_sz;
		break;
		break;
	default: /* MLX5_WQ_TYPE_LINKED_LIST */
	default: /* MLX5_WQ_TYPE_LINKED_LIST */
		priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
		priv->params.log_rq_size = is_kdump_kernel() ?
			MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
			MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
	}
	}
	priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type,
	priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type,
					       BIT(priv->params.log_rq_size));
					       BIT(priv->params.log_rq_size));
@@ -1508,6 +1513,14 @@ static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
	return err;
	return err;
}
}


static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
{
	return is_kdump_kernel() ?
		MLX5E_MIN_NUM_CHANNELS :
		min_t(int, mdev->priv.eq_table.num_comp_vectors,
		      MLX5E_MAX_NUM_CHANNELS);
}

static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
			      struct mlx5e_channel_param *cparam,
			      struct mlx5e_channel_param *cparam,
			      struct mlx5e_channel **cp)
			      struct mlx5e_channel **cp)
@@ -3021,11 +3034,8 @@ static int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
	struct mlx5e_priv *priv = netdev_priv(dev);
	struct mlx5e_priv *priv = netdev_priv(dev);
	struct mlx5_core_dev *mdev = priv->mdev;
	struct mlx5_core_dev *mdev = priv->mdev;


	if (min_tx_rate)
		return -EOPNOTSUPP;

	return mlx5_eswitch_set_vport_rate(mdev->priv.eswitch, vf + 1,
	return mlx5_eswitch_set_vport_rate(mdev->priv.eswitch, vf + 1,
					   max_tx_rate);
					   max_tx_rate, min_tx_rate);
}
}


static int mlx5_vport_link2ifla(u8 esw_link)
static int mlx5_vport_link2ifla(u8 esw_link)
@@ -3461,22 +3471,6 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
			MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
			MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
}
}


static void mlx5e_query_min_inline(struct mlx5_core_dev *mdev,
				   u8 *min_inline_mode)
{
	switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) {
	case MLX5_CAP_INLINE_MODE_L2:
		*min_inline_mode = MLX5_INLINE_MODE_L2;
		break;
	case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
		mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode);
		break;
	case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
		*min_inline_mode = MLX5_INLINE_MODE_NONE;
		break;
	}
}

u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
{
{
	int i;
	int i;
@@ -3510,7 +3504,9 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
	priv->params.lro_timeout =
	priv->params.lro_timeout =
		mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
		mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);


	priv->params.log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
	priv->params.log_sq_size = is_kdump_kernel() ?
		MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE :
		MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;


	/* set CQE compression */
	/* set CQE compression */
	priv->params.rx_cqe_compress_def = false;
	priv->params.rx_cqe_compress_def = false;
@@ -3536,7 +3532,7 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
	priv->params.tx_cq_moderation.pkts =
	priv->params.tx_cq_moderation.pkts =
		MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
		MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
	priv->params.tx_max_inline         = mlx5e_get_max_inline_cap(mdev);
	priv->params.tx_max_inline         = mlx5e_get_max_inline_cap(mdev);
	mlx5e_query_min_inline(mdev, &priv->params.tx_min_inline_mode);
	mlx5_query_min_inline(mdev, &priv->params.tx_min_inline_mode);
	priv->params.num_tc                = 1;
	priv->params.num_tc                = 1;
	priv->params.rss_hfunc             = ETH_RSS_HASH_XOR;
	priv->params.rss_hfunc             = ETH_RSS_HASH_XOR;


Loading