Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6a9764ef authored by Saeed Mahameed's avatar Saeed Mahameed
Browse files

net/mlx5e: Isolate open_channels from priv->params



In order to have a clean separation between channels resources creation
flows and current active mlx5e netdev parameters, make sure each
resource creation function do not access priv->params, and only works
with on a new fresh set of parameters.

For this we add "new" mlx5e_params field to mlx5e_channels structure
and use it down the road to mlx5e_open_{cq,rq,sq} and so on.

Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
Reviewed-by: default avatarTariq Toukan <tariqt@mellanox.com>
parent acc6c595
Loading
Loading
Loading
Loading
+11 −11
Original line number Diff line number Diff line
@@ -182,15 +182,15 @@ enum mlx5e_priv_flag {
	MLX5E_PFLAG_RX_CQE_COMPRESS = (1 << 1),
};

#define MLX5E_SET_PFLAG(priv, pflag, enable)			\
#define MLX5E_SET_PFLAG(params, pflag, enable)			\
	do {							\
		if (enable)					\
			(priv)->params.pflags |= (pflag);	\
			(params)->pflags |= (pflag);		\
		else						\
			(priv)->params.pflags &= ~(pflag);	\
			(params)->pflags &= ~(pflag);		\
	} while (0)

#define MLX5E_GET_PFLAG(priv, pflag) (!!((priv)->params.pflags & (pflag)))
#define MLX5E_GET_PFLAG(params, pflag) (!!((params)->pflags & (pflag)))

#ifdef CONFIG_MLX5_CORE_EN_DCB
#define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
@@ -213,7 +213,6 @@ struct mlx5e_params {
	bool rx_cqe_compress_def;
	struct mlx5e_cq_moder rx_cq_moderation;
	struct mlx5e_cq_moder tx_cq_moderation;
	u16 min_rx_wqes;
	bool lro_en;
	u32 lro_wqe_sz;
	u16 tx_max_inline;
@@ -225,6 +224,7 @@ struct mlx5e_params {
	bool rx_am_enabled;
	u32 lro_timeout;
	u32 pflags;
	struct bpf_prog *xdp_prog;
};

#ifdef CONFIG_MLX5_CORE_EN_DCB
@@ -357,7 +357,6 @@ struct mlx5e_txqsq {
	/* control path */
	struct mlx5_wq_ctrl        wq_ctrl;
	struct mlx5e_channel      *channel;
	int                        tc;
	int                        txq_ix;
	u32                        rate_limit;
} ____cacheline_aligned_in_smp;
@@ -564,6 +563,7 @@ struct mlx5e_channel {
struct mlx5e_channels {
	struct mlx5e_channel **c;
	unsigned int           num;
	struct mlx5e_params    params;
};

enum mlx5e_traffic_types {
@@ -735,7 +735,6 @@ struct mlx5e_priv {
	/* priv data path fields - start */
	struct mlx5e_txqsq *txq2sq[MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC];
	int channel_tc2txq[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC];
	struct bpf_prog *xdp_prog;
	/* priv data path fields - end */

	unsigned long              state;
@@ -752,7 +751,6 @@ struct mlx5e_priv {
	struct mlx5e_flow_steering fs;
	struct mlx5e_vxlan_db      vxlan;

	struct mlx5e_params        params;
	struct workqueue_struct    *wq;
	struct work_struct         update_carrier_work;
	struct work_struct         set_rx_mode_work;
@@ -857,8 +855,9 @@ struct mlx5e_redirect_rqt_param {

int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz,
		       struct mlx5e_redirect_rqt_param rrp);
void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_priv *priv, void *tirc,
				    enum mlx5e_traffic_types tt);
void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_params *params,
				    enum mlx5e_traffic_types tt,
				    void *tirc);

int mlx5e_open_locked(struct net_device *netdev);
int mlx5e_close_locked(struct net_device *netdev);
@@ -869,7 +868,8 @@ int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);

void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params,
				 u8 cq_period_mode);
void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type);
void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev,
			      struct mlx5e_params *params, u8 rq_type);

static inline
struct mlx5e_tx_wqe *mlx5e_post_nop(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc)
+1 −1
Original line number Diff line number Diff line
@@ -111,7 +111,7 @@ int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr)
	switch (config.rx_filter) {
	case HWTSTAMP_FILTER_NONE:
		/* Reset CQE compression to Admin default */
		mlx5e_modify_rx_cqe_compression_locked(priv, priv->params.rx_cqe_compress_def);
		mlx5e_modify_rx_cqe_compression_locked(priv, priv->channels.params.rx_cqe_compress_def);
		break;
	case HWTSTAMP_FILTER_ALL:
	case HWTSTAMP_FILTER_SOME:
+58 −61
Original line number Diff line number Diff line
@@ -154,7 +154,7 @@ static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv)
#define MLX5E_NUM_Q_CNTRS(priv) (NUM_Q_COUNTERS * (!!priv->q_counter))
#define MLX5E_NUM_RQ_STATS(priv) (NUM_RQ_STATS * (priv)->channels.num)
#define MLX5E_NUM_SQ_STATS(priv) \
	(NUM_SQ_STATS * (priv)->channels.num * (priv)->params.num_tc)
	(NUM_SQ_STATS * (priv)->channels.num * (priv)->channels.params.num_tc)
#define MLX5E_NUM_PFC_COUNTERS(priv) \
	((mlx5e_query_global_pause_combined(priv) + hweight8(mlx5e_query_pfc_combined(priv))) * \
	  NUM_PPORT_PER_PRIO_PFC_COUNTERS)
@@ -264,7 +264,7 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data)
			sprintf(data + (idx++) * ETH_GSTRING_LEN,
				rq_stats_desc[j].format, i);

	for (tc = 0; tc < priv->params.num_tc; tc++)
	for (tc = 0; tc < priv->channels.params.num_tc; tc++)
		for (i = 0; i < priv->channels.num; i++)
			for (j = 0; j < NUM_SQ_STATS; j++)
				sprintf(data + (idx++) * ETH_GSTRING_LEN,
@@ -387,7 +387,7 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
			       MLX5E_READ_CTR64_CPU(&channels->c[i]->rq.stats,
						    rq_stats_desc, j);

	for (tc = 0; tc < priv->params.num_tc; tc++)
	for (tc = 0; tc < priv->channels.params.num_tc; tc++)
		for (i = 0; i < channels->num; i++)
			for (j = 0; j < NUM_SQ_STATS; j++)
				data[idx++] = MLX5E_READ_CTR64_CPU(&channels->c[i]->sq[tc].stats,
@@ -405,8 +405,8 @@ static u32 mlx5e_rx_wqes_to_packets(struct mlx5e_priv *priv, int rq_wq_type,
	if (rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
		return num_wqe;

	stride_size = 1 << priv->params.mpwqe_log_stride_sz;
	num_strides = 1 << priv->params.mpwqe_log_num_strides;
	stride_size = 1 << priv->channels.params.mpwqe_log_stride_sz;
	num_strides = 1 << priv->channels.params.mpwqe_log_num_strides;
	wqe_size = stride_size * num_strides;

	packets_per_wqe = wqe_size /
@@ -426,8 +426,8 @@ static u32 mlx5e_packets_to_rx_wqes(struct mlx5e_priv *priv, int rq_wq_type,
	if (rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
		return num_packets;

	stride_size = 1 << priv->params.mpwqe_log_stride_sz;
	num_strides = 1 << priv->params.mpwqe_log_num_strides;
	stride_size = 1 << priv->channels.params.mpwqe_log_stride_sz;
	num_strides = 1 << priv->channels.params.mpwqe_log_num_strides;
	wqe_size = stride_size * num_strides;

	num_packets = (1 << order_base_2(num_packets));
@@ -442,26 +442,25 @@ static void mlx5e_get_ringparam(struct net_device *dev,
				struct ethtool_ringparam *param)
{
	struct mlx5e_priv *priv = netdev_priv(dev);
	int rq_wq_type = priv->params.rq_wq_type;
	int rq_wq_type = priv->channels.params.rq_wq_type;

	param->rx_max_pending = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
							 1 << mlx5_max_log_rq_size(rq_wq_type));
	param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE;
	param->rx_pending = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
						     1 << priv->params.log_rq_size);
	param->tx_pending     = 1 << priv->params.log_sq_size;
						     1 << priv->channels.params.log_rq_size);
	param->tx_pending     = 1 << priv->channels.params.log_sq_size;
}

static int mlx5e_set_ringparam(struct net_device *dev,
			       struct ethtool_ringparam *param)
{
	struct mlx5e_priv *priv = netdev_priv(dev);
	bool was_opened;
	int rq_wq_type = priv->params.rq_wq_type;
	int rq_wq_type = priv->channels.params.rq_wq_type;
	u32 rx_pending_wqes;
	bool was_opened;
	u32 min_rq_size;
	u32 max_rq_size;
	u16 min_rx_wqes;
	u8 log_rq_size;
	u8 log_sq_size;
	u32 num_mtts;
@@ -499,7 +498,7 @@ static int mlx5e_set_ringparam(struct net_device *dev,
	}

	num_mtts = MLX5E_REQUIRED_MTTS(rx_pending_wqes);
	if (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
	if (priv->channels.params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
	    !MLX5E_VALID_NUM_MTTS(num_mtts)) {
		netdev_info(dev, "%s: rx_pending (%d) request can't be satisfied, try to reduce.\n",
			    __func__, param->rx_pending);
@@ -521,11 +520,9 @@ static int mlx5e_set_ringparam(struct net_device *dev,

	log_rq_size = order_base_2(rx_pending_wqes);
	log_sq_size = order_base_2(param->tx_pending);
	min_rx_wqes = mlx5_min_rx_wqes(rq_wq_type, rx_pending_wqes);

	if (log_rq_size == priv->params.log_rq_size &&
	    log_sq_size == priv->params.log_sq_size &&
	    min_rx_wqes == priv->params.min_rx_wqes)
	if (log_rq_size == priv->channels.params.log_rq_size &&
	    log_sq_size == priv->channels.params.log_sq_size)
		return 0;

	mutex_lock(&priv->state_lock);
@@ -534,9 +531,8 @@ static int mlx5e_set_ringparam(struct net_device *dev,
	if (was_opened)
		mlx5e_close_locked(dev);

	priv->params.log_rq_size = log_rq_size;
	priv->params.log_sq_size = log_sq_size;
	priv->params.min_rx_wqes = min_rx_wqes;
	priv->channels.params.log_rq_size = log_rq_size;
	priv->channels.params.log_sq_size = log_sq_size;

	if (was_opened)
		err = mlx5e_open_locked(dev);
@@ -552,7 +548,7 @@ static void mlx5e_get_channels(struct net_device *dev,
	struct mlx5e_priv *priv = netdev_priv(dev);

	ch->max_combined   = priv->profile->max_nch(priv->mdev);
	ch->combined_count = priv->params.num_channels;
	ch->combined_count = priv->channels.params.num_channels;
}

static int mlx5e_set_channels(struct net_device *dev,
@@ -570,7 +566,7 @@ static int mlx5e_set_channels(struct net_device *dev,
		return -EINVAL;
	}

	if (priv->params.num_channels == count)
	if (priv->channels.params.num_channels == count)
		return 0;

	mutex_lock(&priv->state_lock);
@@ -583,8 +579,8 @@ static int mlx5e_set_channels(struct net_device *dev,
	if (arfs_enabled)
		mlx5e_arfs_disable(priv);

	priv->params.num_channels = count;
	mlx5e_build_default_indir_rqt(priv->mdev, priv->params.indirection_rqt,
	priv->channels.params.num_channels = count;
	mlx5e_build_default_indir_rqt(priv->mdev, priv->channels.params.indirection_rqt,
				      MLX5E_INDIR_RQT_SIZE, count);

	if (was_opened)
@@ -613,11 +609,11 @@ static int mlx5e_get_coalesce(struct net_device *netdev,
	if (!MLX5_CAP_GEN(priv->mdev, cq_moderation))
		return -EOPNOTSUPP;

	coal->rx_coalesce_usecs       = priv->params.rx_cq_moderation.usec;
	coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation.pkts;
	coal->tx_coalesce_usecs       = priv->params.tx_cq_moderation.usec;
	coal->tx_max_coalesced_frames = priv->params.tx_cq_moderation.pkts;
	coal->use_adaptive_rx_coalesce = priv->params.rx_am_enabled;
	coal->rx_coalesce_usecs       = priv->channels.params.rx_cq_moderation.usec;
	coal->rx_max_coalesced_frames = priv->channels.params.rx_cq_moderation.pkts;
	coal->tx_coalesce_usecs       = priv->channels.params.tx_cq_moderation.usec;
	coal->tx_max_coalesced_frames = priv->channels.params.tx_cq_moderation.pkts;
	coal->use_adaptive_rx_coalesce = priv->channels.params.rx_am_enabled;

	return 0;
}
@@ -628,7 +624,7 @@ static int mlx5e_set_coalesce(struct net_device *netdev,
	struct mlx5e_priv *priv    = netdev_priv(netdev);
	struct mlx5_core_dev *mdev = priv->mdev;
	bool restart =
		!!coal->use_adaptive_rx_coalesce != priv->params.rx_am_enabled;
		!!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_am_enabled;
	bool was_opened;
	int err = 0;
	int tc;
@@ -642,13 +638,13 @@ static int mlx5e_set_coalesce(struct net_device *netdev,
	was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
	if (was_opened && restart) {
		mlx5e_close_locked(netdev);
		priv->params.rx_am_enabled = !!coal->use_adaptive_rx_coalesce;
		priv->channels.params.rx_am_enabled = !!coal->use_adaptive_rx_coalesce;
	}

	priv->params.tx_cq_moderation.usec = coal->tx_coalesce_usecs;
	priv->params.tx_cq_moderation.pkts = coal->tx_max_coalesced_frames;
	priv->params.rx_cq_moderation.usec = coal->rx_coalesce_usecs;
	priv->params.rx_cq_moderation.pkts = coal->rx_max_coalesced_frames;
	priv->channels.params.tx_cq_moderation.usec = coal->tx_coalesce_usecs;
	priv->channels.params.tx_cq_moderation.pkts = coal->tx_max_coalesced_frames;
	priv->channels.params.rx_cq_moderation.usec = coal->rx_coalesce_usecs;
	priv->channels.params.rx_cq_moderation.pkts = coal->rx_max_coalesced_frames;

	if (!was_opened || restart)
		goto out;
@@ -965,7 +961,7 @@ static u32 mlx5e_get_rxfh_key_size(struct net_device *netdev)
{
	struct mlx5e_priv *priv = netdev_priv(netdev);

	return sizeof(priv->params.toeplitz_hash_key);
	return sizeof(priv->channels.params.toeplitz_hash_key);
}

static u32 mlx5e_get_rxfh_indir_size(struct net_device *netdev)
@@ -979,15 +975,15 @@ static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
	struct mlx5e_priv *priv = netdev_priv(netdev);

	if (indir)
		memcpy(indir, priv->params.indirection_rqt,
		       sizeof(priv->params.indirection_rqt));
		memcpy(indir, priv->channels.params.indirection_rqt,
		       sizeof(priv->channels.params.indirection_rqt));

	if (key)
		memcpy(key, priv->params.toeplitz_hash_key,
		       sizeof(priv->params.toeplitz_hash_key));
		memcpy(key, priv->channels.params.toeplitz_hash_key,
		       sizeof(priv->channels.params.toeplitz_hash_key));

	if (hfunc)
		*hfunc = priv->params.rss_hfunc;
		*hfunc = priv->channels.params.rss_hfunc;

	return 0;
}
@@ -1003,7 +999,7 @@ static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)

	for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
		memset(tirc, 0, ctxlen);
		mlx5e_build_indir_tir_ctx_hash(priv, tirc, tt);
		mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc);
		mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in, inlen);
	}
}
@@ -1028,20 +1024,20 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
	mutex_lock(&priv->state_lock);

	if (hfunc != ETH_RSS_HASH_NO_CHANGE &&
	    hfunc != priv->params.rss_hfunc) {
		priv->params.rss_hfunc = hfunc;
	    hfunc != priv->channels.params.rss_hfunc) {
		priv->channels.params.rss_hfunc = hfunc;
		hash_changed = true;
	}

	if (indir) {
		memcpy(priv->params.indirection_rqt, indir,
		       sizeof(priv->params.indirection_rqt));
		memcpy(priv->channels.params.indirection_rqt, indir,
		       sizeof(priv->channels.params.indirection_rqt));

		if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
			u32 rqtn = priv->indir_rqt.rqtn;
			struct mlx5e_redirect_rqt_param rrp = {
				.is_rss = true,
				.rss.hfunc = priv->params.rss_hfunc,
				.rss.hfunc = priv->channels.params.rss_hfunc,
				.rss.channels  = &priv->channels
			};

@@ -1050,10 +1046,10 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
	}

	if (key) {
		memcpy(priv->params.toeplitz_hash_key, key,
		       sizeof(priv->params.toeplitz_hash_key));
		memcpy(priv->channels.params.toeplitz_hash_key, key,
		       sizeof(priv->channels.params.toeplitz_hash_key));
		hash_changed = hash_changed ||
			       priv->params.rss_hfunc == ETH_RSS_HASH_TOP;
			       priv->channels.params.rss_hfunc == ETH_RSS_HASH_TOP;
	}

	if (hash_changed)
@@ -1074,7 +1070,7 @@ static int mlx5e_get_rxnfc(struct net_device *netdev,

	switch (info->cmd) {
	case ETHTOOL_GRXRINGS:
		info->data = priv->params.num_channels;
		info->data = priv->channels.params.num_channels;
		break;
	case ETHTOOL_GRXCLSRLCNT:
		info->rule_cnt = priv->fs.ethtool.tot_num_rules;
@@ -1102,7 +1098,7 @@ static int mlx5e_get_tunable(struct net_device *dev,

	switch (tuna->id) {
	case ETHTOOL_TX_COPYBREAK:
		*(u32 *)data = priv->params.tx_max_inline;
		*(u32 *)data = priv->channels.params.tx_max_inline;
		break;
	default:
		err = -EINVAL;
@@ -1136,7 +1132,7 @@ static int mlx5e_set_tunable(struct net_device *dev,
		if (was_opened)
			mlx5e_close_locked(dev);

		priv->params.tx_max_inline = val;
		priv->channels.params.tx_max_inline = val;

		if (was_opened)
			err = mlx5e_open_locked(dev);
@@ -1455,7 +1451,7 @@ static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable)
	rx_cq_period_mode = enable ?
		MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
		MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
	rx_mode_changed = rx_cq_period_mode != priv->params.rx_cq_period_mode;
	rx_mode_changed = rx_cq_period_mode != priv->channels.params.rx_cq_period_mode;

	if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE &&
	    !MLX5_CAP_GEN(mdev, cq_period_start_from_cqe))
@@ -1468,7 +1464,7 @@ static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable)
	if (reset)
		mlx5e_close_locked(netdev);

	mlx5e_set_rx_cq_mode_params(&priv->params, rx_cq_period_mode);
	mlx5e_set_rx_cq_mode_params(&priv->channels.params, rx_cq_period_mode);

	if (reset)
		err = mlx5e_open_locked(netdev);
@@ -1491,8 +1487,9 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev,
	}

	mlx5e_modify_rx_cqe_compression_locked(priv, enable);
	priv->params.rx_cqe_compress_def = enable;
	mlx5e_set_rq_type_params(priv, priv->params.rq_wq_type);
	priv->channels.params.rx_cqe_compress_def = enable;
	mlx5e_set_rq_type_params(priv->mdev, &priv->channels.params,
				 priv->channels.params.rq_wq_type);

	return 0;
}
@@ -1504,7 +1501,7 @@ static int mlx5e_handle_pflag(struct net_device *netdev,
{
	struct mlx5e_priv *priv = netdev_priv(netdev);
	bool enable = !!(wanted_flags & flag);
	u32 changes = wanted_flags ^ priv->params.pflags;
	u32 changes = wanted_flags ^ priv->channels.params.pflags;
	int err;

	if (!(changes & flag))
@@ -1517,7 +1514,7 @@ static int mlx5e_handle_pflag(struct net_device *netdev,
		return err;
	}

	MLX5E_SET_PFLAG(priv, flag, enable);
	MLX5E_SET_PFLAG(&priv->channels.params, flag, enable);
	return 0;
}

@@ -1546,7 +1543,7 @@ static u32 mlx5e_get_priv_flags(struct net_device *netdev)
{
	struct mlx5e_priv *priv = netdev_priv(netdev);

	return priv->params.pflags;
	return priv->channels.params.pflags;
}

static int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+1 −1
Original line number Diff line number Diff line
@@ -390,7 +390,7 @@ static int validate_flow(struct mlx5e_priv *priv,
	if (fs->location >= MAX_NUM_OF_ETHTOOL_RULES)
		return -EINVAL;

	if (fs->ring_cookie >= priv->params.num_channels &&
	if (fs->ring_cookie >= priv->channels.params.num_channels &&
	    fs->ring_cookie != RX_CLS_FLOW_DISC)
		return -EINVAL;

+222 −226

File changed.

Preview size limit exceeded, changes collapsed.

Loading