Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6c711c86 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'mlx5-fixes'



Saeed Mahameed says:

====================
Mellanox mlx5 fixes and cleanups 2017-01-10

This series includes some mlx5e general cleanups from Daniel, Gil, Hadar
and myself.
Also it includes some critical mlx5e TC offloads fixes from Or Gerlitz.

For -stable:
 - net/mlx5e: Remove WARN_ONCE from adaptive moderation code

   Although this fix doesn't affect any functionality, I thought it is
   better to clean this -WARN_ONCE- up for -stable in case someone hits
   such corner case.

Please apply and let me know if there's any problem.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 57ea52a8 5e44fca5
Loading
Loading
Loading
Loading
+6 −7
Original line number Diff line number Diff line
@@ -3675,14 +3675,8 @@ static void mlx5e_nic_init(struct mlx5_core_dev *mdev,

static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
{
	struct mlx5_core_dev *mdev = priv->mdev;
	struct mlx5_eswitch *esw = mdev->priv.eswitch;

	mlx5e_vxlan_cleanup(priv);

	if (MLX5_CAP_GEN(mdev, vport_group_manager))
		mlx5_eswitch_unregister_vport_rep(esw, 0);

	if (priv->xdp_prog)
		bpf_prog_put(priv->xdp_prog);
}
@@ -3807,9 +3801,14 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)

static void mlx5e_nic_disable(struct mlx5e_priv *priv)
{
	struct mlx5_core_dev *mdev = priv->mdev;
	struct mlx5_eswitch *esw = mdev->priv.eswitch;

	queue_work(priv->wq, &priv->set_rx_mode_work);
	if (MLX5_CAP_GEN(mdev, vport_group_manager))
		mlx5_eswitch_unregister_vport_rep(esw, 0);
	mlx5e_disable_async_events(priv);
	mlx5_lag_remove(priv->mdev);
	mlx5_lag_remove(mdev);
}

static const struct mlx5e_profile mlx5e_nic_profile = {
+1 −6
Original line number Diff line number Diff line
@@ -109,7 +109,6 @@ static bool mlx5e_am_on_top(struct mlx5e_rx_am *am)
	switch (am->tune_state) {
	case MLX5E_AM_PARKING_ON_TOP:
	case MLX5E_AM_PARKING_TIRED:
		WARN_ONCE(true, "mlx5e_am_on_top: PARKING\n");
		return true;
	case MLX5E_AM_GOING_RIGHT:
		return (am->steps_left > 1) && (am->steps_right == 1);
@@ -123,7 +122,6 @@ static void mlx5e_am_turn(struct mlx5e_rx_am *am)
	switch (am->tune_state) {
	case MLX5E_AM_PARKING_ON_TOP:
	case MLX5E_AM_PARKING_TIRED:
		WARN_ONCE(true, "mlx5e_am_turn: PARKING\n");
		break;
	case MLX5E_AM_GOING_RIGHT:
		am->tune_state = MLX5E_AM_GOING_LEFT;
@@ -144,7 +142,6 @@ static int mlx5e_am_step(struct mlx5e_rx_am *am)
	switch (am->tune_state) {
	case MLX5E_AM_PARKING_ON_TOP:
	case MLX5E_AM_PARKING_TIRED:
		WARN_ONCE(true, "mlx5e_am_step: PARKING\n");
		break;
	case MLX5E_AM_GOING_RIGHT:
		if (am->profile_ix == (MLX5E_PARAMS_AM_NUM_PROFILES - 1))
@@ -282,10 +279,8 @@ static void mlx5e_am_calc_stats(struct mlx5e_rx_am_sample *start,
	u32 delta_us = ktime_us_delta(end->time, start->time);
	unsigned int npkts = end->pkt_ctr - start->pkt_ctr;

	if (!delta_us) {
		WARN_ONCE(true, "mlx5e_am_calc_stats: delta_us=0\n");
	if (!delta_us)
		return;
	}

	curr_stats->ppms =            (npkts * USEC_PER_MSEC) / delta_us;
	curr_stats->epms = (MLX5E_AM_NEVENTS * USEC_PER_MSEC) / delta_us;
+64 −29
Original line number Diff line number Diff line
@@ -161,15 +161,21 @@ static void mlx5e_detach_encap(struct mlx5e_priv *priv,
	}
}

/* we get here also when setting rule to the FW failed, etc. It means that the
 * flow rule itself might not exist, but some offloading related to the actions
 * should be cleaned.
 */
static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
			      struct mlx5e_tc_flow *flow)
{
	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
	struct mlx5_fc *counter = NULL;

	if (!IS_ERR(flow->rule)) {
		counter = mlx5_flow_rule_counter(flow->rule);

		mlx5_del_flow_rules(flow->rule);
		mlx5_fc_destroy(priv->mdev, counter);
	}

	if (esw && esw->mode == SRIOV_OFFLOADS) {
		mlx5_eswitch_del_vlan_action(esw, flow->attr);
@@ -177,8 +183,6 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
			mlx5e_detach_encap(priv, flow);
	}

	mlx5_fc_destroy(priv->mdev, counter);

	if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
		mlx5_destroy_flow_table(priv->fs.tc.t);
		priv->fs.tc.t = NULL;
@@ -225,6 +229,11 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
	void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
				       outer_headers);

	struct flow_dissector_key_control *enc_control =
		skb_flow_dissector_target(f->dissector,
					  FLOW_DISSECTOR_KEY_ENC_CONTROL,
					  f->key);

	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
		struct flow_dissector_key_ports *key =
			skb_flow_dissector_target(f->dissector,
@@ -237,28 +246,34 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,

		/* Full udp dst port must be given */
		if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
			return -EOPNOTSUPP;

		/* udp src port isn't supported */
		if (memchr_inv(&mask->src, 0, sizeof(mask->src)))
			return -EOPNOTSUPP;
			goto vxlan_match_offload_err;

		if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->dst)) &&
		    MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
			parse_vxlan_attr(spec, f);
		else
		else {
			netdev_warn(priv->netdev,
				    "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->dst));
			return -EOPNOTSUPP;
		}

		MLX5_SET(fte_match_set_lyr_2_4, headers_c,
			 udp_dport, ntohs(mask->dst));
		MLX5_SET(fte_match_set_lyr_2_4, headers_v,
			 udp_dport, ntohs(key->dst));

		MLX5_SET(fte_match_set_lyr_2_4, headers_c,
			 udp_sport, ntohs(mask->src));
		MLX5_SET(fte_match_set_lyr_2_4, headers_v,
			 udp_sport, ntohs(key->src));
	} else { /* udp dst port must be given */
vxlan_match_offload_err:
		netdev_warn(priv->netdev,
			    "IP tunnel decap offload supported only for vxlan, must set UDP dport\n");
		return -EOPNOTSUPP;
	}

	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
	if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
		struct flow_dissector_key_ipv4_addrs *key =
			skb_flow_dissector_target(f->dissector,
						  FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
@@ -280,10 +295,10 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
		MLX5_SET(fte_match_set_lyr_2_4, headers_v,
			 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
			 ntohl(key->dst));
	}

		MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
	}

	/* Enforce DMAC when offloading incoming tunneled flows.
	 * Flow counters require a match on the DMAC.
@@ -346,6 +361,9 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
			if (parse_tunnel_attr(priv, spec, f))
				return -EOPNOTSUPP;
			break;
		case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
			netdev_warn(priv->netdev,
				    "IPv6 tunnel decap offload isn't supported\n");
		default:
			return -EOPNOTSUPP;
		}
@@ -375,6 +393,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
			MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
			MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
				 key->flags & FLOW_DIS_IS_FRAGMENT);

			/* the HW doesn't need L3 inline to match on frag=no */
			if (key->flags & FLOW_DIS_IS_FRAGMENT)
				*min_inline = MLX5_INLINE_MODE_IP;
		}
	}

@@ -647,17 +669,14 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,

#if IS_ENABLED(CONFIG_INET)
	rt = ip_route_output_key(dev_net(mirred_dev), fl4);
	if (IS_ERR(rt)) {
		pr_warn("%s: no route to %pI4\n", __func__, &fl4->daddr);
		return -EOPNOTSUPP;
	}
	if (IS_ERR(rt))
		return PTR_ERR(rt);
#else
	return -EOPNOTSUPP;
#endif

	if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev)) {
		pr_warn("%s: Can't offload the flow, netdevices aren't on the same HW e-switch\n",
			__func__);
		pr_warn("%s: can't offload, devices not on same HW e-switch\n", __func__);
		ip_rt_put(rt);
		return -EOPNOTSUPP;
	}
@@ -718,12 +737,12 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
					  struct net_device **out_dev)
{
	int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
	struct neighbour *n = NULL;
	struct flowi4 fl4 = {};
	struct neighbour *n;
	char *encap_header;
	int encap_size;
	__be32 saddr;
	int ttl;
	__be32 saddr = 0;
	int ttl = 0;
	int err;

	encap_header = kzalloc(max_encap_size, GFP_KERNEL);
@@ -750,7 +769,8 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
	e->out_dev = *out_dev;

	if (!(n->nud_state & NUD_VALID)) {
		err = -ENOTSUPP;
		pr_warn("%s: can't offload, neighbour to %pI4 invalid\n", __func__, &fl4.daddr);
		err = -EOPNOTSUPP;
		goto out;
	}

@@ -772,6 +792,8 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
	err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
			       encap_size, encap_header, &e->encap_id);
out:
	if (err && n)
		neigh_release(n);
	kfree(encap_header);
	return err;
}
@@ -792,9 +814,17 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
	int tunnel_type;
	int err;

	/* udp dst port must be given */
	/* udp dst port must be set */
	if (!memchr_inv(&key->tp_dst, 0, sizeof(key->tp_dst)))
		goto vxlan_encap_offload_err;

	/* setting udp src port isn't supported */
	if (memchr_inv(&key->tp_src, 0, sizeof(key->tp_src))) {
vxlan_encap_offload_err:
		netdev_warn(priv->netdev,
			    "must set udp dst port and not set udp src port\n");
		return -EOPNOTSUPP;
	}

	if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->tp_dst)) &&
	    MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
@@ -802,6 +832,8 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
		info.tun_id = tunnel_id_to_key32(key->tun_id);
		tunnel_type = MLX5_HEADER_TYPE_VXLAN;
	} else {
		netdev_warn(priv->netdev,
			    "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->tp_dst));
		return -EOPNOTSUPP;
	}

@@ -809,6 +841,9 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
	case AF_INET:
		info.daddr = key->u.ipv4.dst;
		break;
	case AF_INET6:
		netdev_warn(priv->netdev,
			    "IPv6 tunnel encap offload isn't supported\n");
	default:
		return -EOPNOTSUPP;
	}
@@ -986,7 +1021,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,

	if (IS_ERR(flow->rule)) {
		err = PTR_ERR(flow->rule);
		goto err_free;
		goto err_del_rule;
	}

	err = rhashtable_insert_fast(&tc->ht, &flow->node,
@@ -997,7 +1032,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
	goto out;

err_del_rule:
	mlx5_del_flow_rules(flow->rule);
	mlx5e_tc_del_flow(priv, flow);

err_free:
	kfree(flow);
+4 −2
Original line number Diff line number Diff line
@@ -1195,6 +1195,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
{
	int err = 0;

	if (cleanup)
		mlx5_drain_health_wq(dev);

	mutex_lock(&dev->intf_state_mutex);
@@ -1359,9 +1360,10 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,

	mlx5_enter_error_state(dev);
	mlx5_unload_one(dev, priv, false);
	/* In case of kernel call save the pci state */
	/* In case of kernel call save the pci state and drain the health wq */
	if (state) {
		pci_save_state(pdev);
		mlx5_drain_health_wq(dev);
		mlx5_pci_disable_device(dev);
	}