Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a3b1933d authored by David S. Miller's avatar David S. Miller
Browse files

Merge tag 'mlx5-fixes-2019-03-11' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux



Saeed Mahameed says:

====================
Mellanox, mlx5 fixes 2019-03-11

For -stable v5.0:
('net/mlx5e: Fix access to non-existing receive queue')
('net/mlx5e: Properly get the PF number phys port name ndo')
('net/mlx5: Fix multiple updates of steering rules in parallel')
('net/mlx5: Avoid panic when setting vport mac, getting vport config')
('net/mlx5: Avoid panic when setting vport rate')
('net/mlx5e: IPoIB, Fix RX checksum statistics update')
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents c6873d18 24319258
Loading
Loading
Loading
Loading
+3 −0
Original line number Diff line number Diff line
@@ -424,6 +424,9 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,

	if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
		priv->channels.params = new_channels.params;
		if (!netif_is_rxfh_configured(priv->netdev))
			mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt,
						      MLX5E_INDIR_RQT_SIZE, count);
		goto out;
	}

+7 −6
Original line number Diff line number Diff line
@@ -1129,16 +1129,17 @@ static int mlx5e_rep_get_phys_port_name(struct net_device *dev,
	struct mlx5e_priv *priv = netdev_priv(dev);
	struct mlx5e_rep_priv *rpriv = priv->ppriv;
	struct mlx5_eswitch_rep *rep = rpriv->rep;
	int ret, pf_num;
	unsigned int fn;
	int ret;

	ret = mlx5_lag_get_pf_num(priv->mdev, &pf_num);
	if (ret)
		return ret;
	fn = PCI_FUNC(priv->mdev->pdev->devfn);
	if (fn >= MLX5_MAX_PORTS)
		return -EOPNOTSUPP;

	if (rep->vport == MLX5_VPORT_UPLINK)
		ret = snprintf(buf, len, "p%d", pf_num);
		ret = snprintf(buf, len, "p%d", fn);
	else
		ret = snprintf(buf, len, "pf%dvf%d", pf_num, rep->vport - 1);
		ret = snprintf(buf, len, "pf%dvf%d", fn, rep->vport - 1);

	if (ret >= len)
		return -EOPNOTSUPP;
+8 −3
Original line number Diff line number Diff line
@@ -1295,8 +1295,14 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,

	skb->protocol = *((__be16 *)(skb->data));

	if (netdev->features & NETIF_F_RXCSUM) {
		skb->ip_summed = CHECKSUM_COMPLETE;
		skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
		stats->csum_complete++;
	} else {
		skb->ip_summed = CHECKSUM_NONE;
		stats->csum_none++;
	}

	if (unlikely(mlx5e_rx_hw_stamp(tstamp)))
		skb_hwtstamps(skb)->hwtstamp =
@@ -1315,7 +1321,6 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,

	skb->dev = netdev;

	stats->csum_complete++;
	stats->packets++;
	stats->bytes += cqe_bcnt;
}
+11 −6
Original line number Diff line number Diff line
@@ -1931,7 +1931,7 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
	u64 node_guid;
	int err = 0;

	if (!MLX5_CAP_GEN(esw->dev, vport_group_manager))
	if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager))
		return -EPERM;
	if (!LEGAL_VPORT(esw, vport) || is_multicast_ether_addr(mac))
		return -EINVAL;
@@ -2005,7 +2005,7 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
{
	struct mlx5_vport *evport;

	if (!MLX5_CAP_GEN(esw->dev, vport_group_manager))
	if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager))
		return -EPERM;
	if (!LEGAL_VPORT(esw, vport))
		return -EINVAL;
@@ -2297,19 +2297,24 @@ static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider)
int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, int vport,
				u32 max_rate, u32 min_rate)
{
	u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
	bool min_rate_supported = MLX5_CAP_QOS(esw->dev, esw_bw_share) &&
					fw_max_bw_share >= MLX5_MIN_BW_SHARE;
	bool max_rate_supported = MLX5_CAP_QOS(esw->dev, esw_rate_limit);
	struct mlx5_vport *evport;
	u32 fw_max_bw_share;
	u32 previous_min_rate;
	u32 divider;
	bool min_rate_supported;
	bool max_rate_supported;
	int err = 0;

	if (!ESW_ALLOWED(esw))
		return -EPERM;
	if (!LEGAL_VPORT(esw, vport))
		return -EINVAL;

	fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
	min_rate_supported = MLX5_CAP_QOS(esw->dev, esw_bw_share) &&
				fw_max_bw_share >= MLX5_MIN_BW_SHARE;
	max_rate_supported = MLX5_CAP_QOS(esw->dev, esw_rate_limit);

	if ((min_rate && !min_rate_supported) || (max_rate && !max_rate_supported))
		return -EOPNOTSUPP;

+98 −63
Original line number Diff line number Diff line
@@ -263,9 +263,10 @@ static void nested_down_write_ref_node(struct fs_node *node,
	}
}

static void down_write_ref_node(struct fs_node *node)
static void down_write_ref_node(struct fs_node *node, bool locked)
{
	if (node) {
		if (!locked)
			down_write(&node->lock);
		refcount_inc(&node->refcount);
	}
@@ -277,13 +278,14 @@ static void up_read_ref_node(struct fs_node *node)
	up_read(&node->lock);
}

static void up_write_ref_node(struct fs_node *node)
static void up_write_ref_node(struct fs_node *node, bool locked)
{
	refcount_dec(&node->refcount);
	if (!locked)
		up_write(&node->lock);
}

static void tree_put_node(struct fs_node *node)
static void tree_put_node(struct fs_node *node, bool locked)
{
	struct fs_node *parent_node = node->parent;

@@ -294,27 +296,27 @@ static void tree_put_node(struct fs_node *node)
			/* Only root namespace doesn't have parent and we just
			 * need to free its node.
			 */
			down_write_ref_node(parent_node);
			down_write_ref_node(parent_node, locked);
			list_del_init(&node->list);
			if (node->del_sw_func)
				node->del_sw_func(node);
			up_write_ref_node(parent_node);
			up_write_ref_node(parent_node, locked);
		} else {
			kfree(node);
		}
		node = NULL;
	}
	if (!node && parent_node)
		tree_put_node(parent_node);
		tree_put_node(parent_node, locked);
}

static int tree_remove_node(struct fs_node *node)
static int tree_remove_node(struct fs_node *node, bool locked)
{
	if (refcount_read(&node->refcount) > 1) {
		refcount_dec(&node->refcount);
		return -EEXIST;
	}
	tree_put_node(node);
	tree_put_node(node, locked);
	return 0;
}

@@ -420,22 +422,34 @@ static void del_sw_flow_table(struct fs_node *node)
	kfree(ft);
}

static void del_sw_hw_rule(struct fs_node *node)
static void modify_fte(struct fs_fte *fte)
{
	struct mlx5_flow_root_namespace *root;
	struct mlx5_flow_rule *rule;
	struct mlx5_flow_table *ft;
	struct mlx5_flow_group *fg;
	struct fs_fte *fte;
	int modify_mask;
	struct mlx5_core_dev *dev = get_dev(node);
	struct mlx5_core_dev *dev;
	int err;
	bool update_fte = false;

	fs_get_obj(rule, node);
	fs_get_obj(fte, rule->node.parent);
	fs_get_obj(fg, fte->node.parent);
	fs_get_obj(ft, fg->node.parent);
	dev = get_dev(&fte->node);

	root = find_root(&ft->node);
	err = root->cmds->update_fte(dev, ft, fg->id, fte->modify_mask, fte);
	if (err)
		mlx5_core_warn(dev,
			       "%s can't del rule fg id=%d fte_index=%d\n",
			       __func__, fg->id, fte->index);
	fte->modify_mask = 0;
}

static void del_sw_hw_rule(struct fs_node *node)
{
	struct mlx5_flow_rule *rule;
	struct fs_fte *fte;

	fs_get_obj(rule, node);
	fs_get_obj(fte, rule->node.parent);
	trace_mlx5_fs_del_rule(rule);
	if (rule->sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
		mutex_lock(&rule->dest_attr.ft->lock);
@@ -445,27 +459,19 @@ static void del_sw_hw_rule(struct fs_node *node)

	if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER  &&
	    --fte->dests_size) {
		modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
		fte->modify_mask |=
			BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
			BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
		fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
		update_fte = true;
		goto out;
	}

	if ((fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
	    --fte->dests_size) {
		modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
		update_fte = true;
		fte->modify_mask |=
			BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
	}
out:
	root = find_root(&ft->node);
	if (update_fte && fte->dests_size) {
		err = root->cmds->update_fte(dev, ft, fg->id, modify_mask, fte);
		if (err)
			mlx5_core_warn(dev,
				       "%s can't del rule fg id=%d fte_index=%d\n",
				       __func__, fg->id, fte->index);
	}
	kfree(rule);
}

@@ -491,6 +497,7 @@ static void del_hw_fte(struct fs_node *node)
			mlx5_core_warn(dev,
				       "flow steering can't delete fte in index %d of flow group id %d\n",
				       fte->index, fg->id);
		node->active = 0;
	}
}

@@ -591,7 +598,7 @@ static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft,
	fte->node.type =  FS_TYPE_FLOW_ENTRY;
	fte->action = *flow_act;

	tree_init_node(&fte->node, del_hw_fte, del_sw_fte);
	tree_init_node(&fte->node, NULL, del_sw_fte);

	return fte;
}
@@ -858,7 +865,7 @@ static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
	fs_get_obj(fte, rule->node.parent);
	if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
		return -EINVAL;
	down_write_ref_node(&fte->node);
	down_write_ref_node(&fte->node, false);
	fs_get_obj(fg, fte->node.parent);
	fs_get_obj(ft, fg->node.parent);

@@ -866,7 +873,7 @@ static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
	root = find_root(&ft->node);
	err = root->cmds->update_fte(get_dev(&ft->node), ft, fg->id,
				     modify_mask, fte);
	up_write_ref_node(&fte->node);
	up_write_ref_node(&fte->node, false);

	return err;
}
@@ -1016,11 +1023,11 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
	if (err)
		goto destroy_ft;
	ft->node.active = true;
	down_write_ref_node(&fs_prio->node);
	down_write_ref_node(&fs_prio->node, false);
	tree_add_node(&ft->node, &fs_prio->node);
	list_add_flow_table(ft, fs_prio);
	fs_prio->num_ft++;
	up_write_ref_node(&fs_prio->node);
	up_write_ref_node(&fs_prio->node, false);
	mutex_unlock(&root->chain_lock);
	trace_mlx5_fs_add_ft(ft);
	return ft;
@@ -1114,17 +1121,17 @@ struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
	if (ft->autogroup.active)
		return ERR_PTR(-EPERM);

	down_write_ref_node(&ft->node);
	down_write_ref_node(&ft->node, false);
	fg = alloc_insert_flow_group(ft, match_criteria_enable, match_criteria,
				     start_index, end_index,
				     ft->node.children.prev);
	up_write_ref_node(&ft->node);
	up_write_ref_node(&ft->node, false);
	if (IS_ERR(fg))
		return fg;

	err = root->cmds->create_flow_group(dev, ft, fg_in, &fg->id);
	if (err) {
		tree_put_node(&fg->node);
		tree_put_node(&fg->node, false);
		return ERR_PTR(err);
	}
	trace_mlx5_fs_add_fg(fg);
@@ -1521,10 +1528,10 @@ static void free_match_list(struct match_list_head *head)
		struct match_list *iter, *match_tmp;

		list_del(&head->first.list);
		tree_put_node(&head->first.g->node);
		tree_put_node(&head->first.g->node, false);
		list_for_each_entry_safe(iter, match_tmp, &head->list,
					 list) {
			tree_put_node(&iter->g->node);
			tree_put_node(&iter->g->node, false);
			list_del(&iter->list);
			kfree(iter);
		}
@@ -1601,11 +1608,16 @@ lookup_fte_locked(struct mlx5_flow_group *g,
		fte_tmp = NULL;
		goto out;
	}
	if (!fte_tmp->node.active) {
		tree_put_node(&fte_tmp->node, false);
		fte_tmp = NULL;
		goto out;
	}

	nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
out:
	if (take_write)
		up_write_ref_node(&g->node);
		up_write_ref_node(&g->node, false);
	else
		up_read_ref_node(&g->node);
	return fte_tmp;
@@ -1647,8 +1659,8 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft,
			continue;
		rule = add_rule_fg(g, spec->match_value,
				   flow_act, dest, dest_num, fte_tmp);
		up_write_ref_node(&fte_tmp->node);
		tree_put_node(&fte_tmp->node);
		up_write_ref_node(&fte_tmp->node, false);
		tree_put_node(&fte_tmp->node, false);
		kmem_cache_free(steering->ftes_cache, fte);
		return rule;
	}
@@ -1684,7 +1696,7 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft,

		err = insert_fte(g, fte);
		if (err) {
			up_write_ref_node(&g->node);
			up_write_ref_node(&g->node, false);
			if (err == -ENOSPC)
				continue;
			kmem_cache_free(steering->ftes_cache, fte);
@@ -1692,11 +1704,11 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft,
		}

		nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
		up_write_ref_node(&g->node);
		up_write_ref_node(&g->node, false);
		rule = add_rule_fg(g, spec->match_value,
				   flow_act, dest, dest_num, fte);
		up_write_ref_node(&fte->node);
		tree_put_node(&fte->node);
		up_write_ref_node(&fte->node, false);
		tree_put_node(&fte->node, false);
		return rule;
	}
	rule = ERR_PTR(-ENOENT);
@@ -1738,7 +1750,7 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
	err = build_match_list(&match_head, ft, spec);
	if (err) {
		if (take_write)
			up_write_ref_node(&ft->node);
			up_write_ref_node(&ft->node, false);
		else
			up_read_ref_node(&ft->node);
		return ERR_PTR(err);
@@ -1753,7 +1765,7 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
	if (!IS_ERR(rule) ||
	    (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) {
		if (take_write)
			up_write_ref_node(&ft->node);
			up_write_ref_node(&ft->node, false);
		return rule;
	}

@@ -1769,12 +1781,12 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
	g = alloc_auto_flow_group(ft, spec);
	if (IS_ERR(g)) {
		rule = ERR_CAST(g);
		up_write_ref_node(&ft->node);
		up_write_ref_node(&ft->node, false);
		return rule;
	}

	nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
	up_write_ref_node(&ft->node);
	up_write_ref_node(&ft->node, false);

	err = create_auto_flow_group(ft, g);
	if (err)
@@ -1793,17 +1805,17 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
	}

	nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
	up_write_ref_node(&g->node);
	up_write_ref_node(&g->node, false);
	rule = add_rule_fg(g, spec->match_value, flow_act, dest,
			   dest_num, fte);
	up_write_ref_node(&fte->node);
	tree_put_node(&fte->node);
	tree_put_node(&g->node);
	up_write_ref_node(&fte->node, false);
	tree_put_node(&fte->node, false);
	tree_put_node(&g->node, false);
	return rule;

err_release_fg:
	up_write_ref_node(&g->node);
	tree_put_node(&g->node);
	up_write_ref_node(&g->node, false);
	tree_put_node(&g->node, false);
	return ERR_PTR(err);
}

@@ -1866,10 +1878,33 @@ EXPORT_SYMBOL(mlx5_add_flow_rules);

void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
{
	struct fs_fte *fte;
	int i;

	/* In order to consolidate the HW changes we lock the FTE for other
	 * changes, and increase its refcount, in order not to perform the
	 * "del" functions of the FTE. Will handle them here.
	 * The removal of the rules is done under locked FTE.
	 * After removing all the handle's rules, if there are remaining
	 * rules, it means we just need to modify the FTE in FW, and
	 * unlock/decrease the refcount we increased before.
	 * Otherwise, it means the FTE should be deleted. First delete the
	 * FTE in FW. Then, unlock the FTE, and proceed the tree_put_node of
	 * the FTE, which will handle the last decrease of the refcount, as
	 * well as required handling of its parent.
	 */
	fs_get_obj(fte, handle->rule[0]->node.parent);
	down_write_ref_node(&fte->node, false);
	for (i = handle->num_rules - 1; i >= 0; i--)
		tree_remove_node(&handle->rule[i]->node);
		tree_remove_node(&handle->rule[i]->node, true);
	if (fte->modify_mask && fte->dests_size) {
		modify_fte(fte);
		up_write_ref_node(&fte->node, false);
	} else {
		del_hw_fte(&fte->node);
		up_write(&fte->node.lock);
		tree_put_node(&fte->node, false);
	}
	kfree(handle);
}
EXPORT_SYMBOL(mlx5_del_flow_rules);
@@ -1972,7 +2007,7 @@ int mlx5_destroy_flow_table(struct mlx5_flow_table *ft)
		mutex_unlock(&root->chain_lock);
		return err;
	}
	if (tree_remove_node(&ft->node))
	if (tree_remove_node(&ft->node, false))
		mlx5_core_warn(get_dev(&ft->node), "Flow table %d wasn't destroyed, refcount > 1\n",
			       ft->id);
	mutex_unlock(&root->chain_lock);
@@ -1983,7 +2018,7 @@ EXPORT_SYMBOL(mlx5_destroy_flow_table);

void mlx5_destroy_flow_group(struct mlx5_flow_group *fg)
{
	if (tree_remove_node(&fg->node))
	if (tree_remove_node(&fg->node, false))
		mlx5_core_warn(get_dev(&fg->node), "Flow group %d wasn't destroyed, refcount > 1\n",
			       fg->id);
}
@@ -2367,8 +2402,8 @@ static void clean_tree(struct fs_node *node)
		tree_get_node(node);
		list_for_each_entry_safe(iter, temp, &node->children, list)
			clean_tree(iter);
		tree_put_node(node);
		tree_remove_node(node);
		tree_put_node(node, false);
		tree_remove_node(node, false);
	}
}

Loading