Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 187335cd authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'mlx5-fixes'



Saeed Mahameed says:

====================
Mellanox 100G mlx5 fixes 2016-08-16

This series includes some bug fixes for mlx5e driver.

From Saeed and Tariq, Optimize MTU change to not reset when it is not required.

From Paul, Command interface message length check to speedup firmware
command preparation.

From Mohamad, Save pci state when pci error is detected.

From Amir, Flow counters "lastuse" update fix.

From Hadar, Use correct flow dissector key on flower offloading.
Plus a small optimization for switchdev hardware id query.

From Or, three patches to address some E-Switch offloads issues.

For -stable of 4.6.y and 4.7.y:
    net/mlx5e: Use correct flow dissector key on flower offloading
    net/mlx5: Fix pci error recovery flow
    net/mlx5: Added missing check of msg length in verifying its signature
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 98a384ec f96750f8
Loading
Loading
Loading
Loading
+54 −31
Original line number Diff line number Diff line
@@ -143,13 +143,14 @@ static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
	return cmd->cmd_buf + (idx << cmd->log_stride);
}

static u8 xor8_buf(void *buf, int len)
static u8 xor8_buf(void *buf, size_t offset, int len)
{
	u8 *ptr = buf;
	u8 sum = 0;
	int i;
	int end = len + offset;

	for (i = 0; i < len; i++)
	for (i = offset; i < end; i++)
		sum ^= ptr[i];

	return sum;
@@ -157,41 +158,49 @@ static u8 xor8_buf(void *buf, int len)

static int verify_block_sig(struct mlx5_cmd_prot_block *block)
{
	if (xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 1) != 0xff)
	size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0);
	int xor_len = sizeof(*block) - sizeof(block->data) - 1;

	if (xor8_buf(block, rsvd0_off, xor_len) != 0xff)
		return -EINVAL;

	if (xor8_buf(block, sizeof(*block)) != 0xff)
	if (xor8_buf(block, 0, sizeof(*block)) != 0xff)
		return -EINVAL;

	return 0;
}

static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token,
			   int csum)
static void calc_block_sig(struct mlx5_cmd_prot_block *block)
{
	block->token = token;
	if (csum) {
		block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) -
					    sizeof(block->data) - 2);
		block->sig = ~xor8_buf(block, sizeof(*block) - 1);
	}
	int ctrl_xor_len = sizeof(*block) - sizeof(block->data) - 2;
	size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0);

	block->ctrl_sig = ~xor8_buf(block, rsvd0_off, ctrl_xor_len);
	block->sig = ~xor8_buf(block, 0, sizeof(*block) - 1);
}

static void calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token, int csum)
static void calc_chain_sig(struct mlx5_cmd_msg *msg)
{
	struct mlx5_cmd_mailbox *next = msg->next;

	while (next) {
		calc_block_sig(next->buf, token, csum);
	int size = msg->len;
	int blen = size - min_t(int, sizeof(msg->first.data), size);
	int n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1)
		/ MLX5_CMD_DATA_BLOCK_SIZE;
	int i = 0;

	for (i = 0; i < n && next; i++)  {
		calc_block_sig(next->buf);
		next = next->next;
	}
}

static void set_signature(struct mlx5_cmd_work_ent *ent, int csum)
{
	ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay));
	calc_chain_sig(ent->in, ent->token, csum);
	calc_chain_sig(ent->out, ent->token, csum);
	ent->lay->sig = ~xor8_buf(ent->lay, 0,  sizeof(*ent->lay));
	if (csum) {
		calc_chain_sig(ent->in);
		calc_chain_sig(ent->out);
	}
}

static void poll_timeout(struct mlx5_cmd_work_ent *ent)
@@ -222,12 +231,17 @@ static int verify_signature(struct mlx5_cmd_work_ent *ent)
	struct mlx5_cmd_mailbox *next = ent->out->next;
	int err;
	u8 sig;
	int size = ent->out->len;
	int blen = size - min_t(int, sizeof(ent->out->first.data), size);
	int n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1)
		/ MLX5_CMD_DATA_BLOCK_SIZE;
	int i = 0;

	sig = xor8_buf(ent->lay, sizeof(*ent->lay));
	sig = xor8_buf(ent->lay, 0, sizeof(*ent->lay));
	if (sig != 0xff)
		return -EINVAL;

	while (next) {
	for (i = 0; i < n && next; i++) {
		err = verify_block_sig(next->buf);
		if (err)
			return err;
@@ -656,7 +670,6 @@ static void cmd_work_handler(struct work_struct *work)
		spin_unlock_irqrestore(&cmd->alloc_lock, flags);
	}

	ent->token = alloc_token(cmd);
	cmd->ent_arr[ent->idx] = ent;
	lay = get_inst(cmd, ent->idx);
	ent->lay = lay;
@@ -766,7 +779,8 @@ static u8 *get_status_ptr(struct mlx5_outbox_hdr *out)
static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
			   struct mlx5_cmd_msg *out, void *uout, int uout_size,
			   mlx5_cmd_cbk_t callback,
			   void *context, int page_queue, u8 *status)
			   void *context, int page_queue, u8 *status,
			   u8 token)
{
	struct mlx5_cmd *cmd = &dev->cmd;
	struct mlx5_cmd_work_ent *ent;
@@ -783,6 +797,8 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
	if (IS_ERR(ent))
		return PTR_ERR(ent);

	ent->token = token;

	if (!callback)
		init_completion(&ent->done);

@@ -854,7 +870,8 @@ static const struct file_operations fops = {
	.write	= dbg_write,
};

static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size)
static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size,
			    u8 token)
{
	struct mlx5_cmd_prot_block *block;
	struct mlx5_cmd_mailbox *next;
@@ -880,6 +897,7 @@ static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size)
		memcpy(block->data, from, copy);
		from += copy;
		size -= copy;
		block->token = token;
		next = next->next;
	}

@@ -949,7 +967,8 @@ static void free_cmd_box(struct mlx5_core_dev *dev,
}

static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
					       gfp_t flags, int size)
					       gfp_t flags, int size,
					       u8 token)
{
	struct mlx5_cmd_mailbox *tmp, *head = NULL;
	struct mlx5_cmd_prot_block *block;
@@ -978,6 +997,7 @@ static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
		tmp->next = head;
		block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0);
		block->block_num = cpu_to_be32(n - i - 1);
		block->token = token;
		head = tmp;
	}
	msg->next = head;
@@ -1352,7 +1372,7 @@ static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
	}

	if (IS_ERR(msg))
		msg = mlx5_alloc_cmd_msg(dev, gfp, in_size);
		msg = mlx5_alloc_cmd_msg(dev, gfp, in_size, 0);

	return msg;
}
@@ -1377,6 +1397,7 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
	int err;
	u8 status = 0;
	u32 drv_synd;
	u8 token;

	if (pci_channel_offline(dev->pdev) ||
	    dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
@@ -1395,20 +1416,22 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
		return err;
	}

	err = mlx5_copy_to_msg(inb, in, in_size);
	token = alloc_token(&dev->cmd);

	err = mlx5_copy_to_msg(inb, in, in_size, token);
	if (err) {
		mlx5_core_warn(dev, "err %d\n", err);
		goto out_in;
	}

	outb = mlx5_alloc_cmd_msg(dev, gfp, out_size);
	outb = mlx5_alloc_cmd_msg(dev, gfp, out_size, token);
	if (IS_ERR(outb)) {
		err = PTR_ERR(outb);
		goto out_in;
	}

	err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context,
			      pages_queue, &status);
			      pages_queue, &status, token);
	if (err)
		goto out_out;

@@ -1476,7 +1499,7 @@ static int create_msg_cache(struct mlx5_core_dev *dev)
	INIT_LIST_HEAD(&cmd->cache.med.head);

	for (i = 0; i < NUM_LONG_LISTS; i++) {
		msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE);
		msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE, 0);
		if (IS_ERR(msg)) {
			err = PTR_ERR(msg);
			goto ex_err;
@@ -1486,7 +1509,7 @@ static int create_msg_cache(struct mlx5_core_dev *dev)
	}

	for (i = 0; i < NUM_MED_LISTS; i++) {
		msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE);
		msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE, 0);
		if (IS_ERR(msg)) {
			err = PTR_ERR(msg);
			goto ex_err;
+15 −6
Original line number Diff line number Diff line
@@ -1826,10 +1826,6 @@ int mlx5e_open_locked(struct net_device *netdev)
	netif_set_real_num_tx_queues(netdev, num_txqs);
	netif_set_real_num_rx_queues(netdev, priv->params.num_channels);

	err = mlx5e_set_dev_port_mtu(netdev);
	if (err)
		goto err_clear_state_opened_flag;

	err = mlx5e_open_channels(priv);
	if (err) {
		netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n",
@@ -2573,6 +2569,7 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
	u16 max_mtu;
	u16 min_mtu;
	int err = 0;
	bool reset;

	mlx5_query_port_max_mtu(mdev, &max_mtu, 1);

@@ -2588,13 +2585,18 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)

	mutex_lock(&priv->state_lock);

	reset = !priv->params.lro_en &&
		(priv->params.rq_wq_type !=
		 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ);

	was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
	if (was_opened)
	if (was_opened && reset)
		mlx5e_close_locked(netdev);

	netdev->mtu = new_mtu;
	mlx5e_set_dev_port_mtu(netdev);

	if (was_opened)
	if (was_opened && reset)
		err = mlx5e_open_locked(netdev);

	mutex_unlock(&priv->state_lock);
@@ -3385,6 +3387,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
	queue_work(priv->wq, &priv->set_rx_mode_work);

	if (MLX5_CAP_GEN(mdev, vport_group_manager)) {
		mlx5_query_nic_vport_mac_address(mdev, 0, rep.hw_id);
		rep.load = mlx5e_nic_rep_load;
		rep.unload = mlx5e_nic_rep_unload;
		rep.vport = 0;
@@ -3463,6 +3466,8 @@ void *mlx5e_create_netdev(struct mlx5_core_dev *mdev,

	mlx5e_init_l2_addr(priv);

	mlx5e_set_dev_port_mtu(netdev);

	err = register_netdev(netdev);
	if (err) {
		mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
@@ -3501,16 +3506,20 @@ static void mlx5e_register_vport_rep(struct mlx5_core_dev *mdev)
	struct mlx5_eswitch *esw = mdev->priv.eswitch;
	int total_vfs = MLX5_TOTAL_VPORTS(mdev);
	int vport;
	u8 mac[ETH_ALEN];

	if (!MLX5_CAP_GEN(mdev, vport_group_manager))
		return;

	mlx5_query_nic_vport_mac_address(mdev, 0, mac);

	for (vport = 1; vport < total_vfs; vport++) {
		struct mlx5_eswitch_rep rep;

		rep.load = mlx5e_vport_rep_load;
		rep.unload = mlx5e_vport_rep_unload;
		rep.vport = vport;
		ether_addr_copy(rep.hw_id, mac);
		mlx5_eswitch_register_vport_rep(esw, &rep);
	}
}
+2 −3
Original line number Diff line number Diff line
@@ -135,17 +135,16 @@ static const struct ethtool_ops mlx5e_rep_ethtool_ops = {
int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr)
{
	struct mlx5e_priv *priv = netdev_priv(dev);
	struct mlx5_eswitch_rep *rep = priv->ppriv;
	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
	u8 mac[ETH_ALEN];

	if (esw->mode == SRIOV_NONE)
		return -EOPNOTSUPP;

	switch (attr->id) {
	case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
		mlx5_query_nic_vport_mac_address(priv->mdev, 0, mac);
		attr->u.ppid.id_len = ETH_ALEN;
		memcpy(&attr->u.ppid.id, &mac, ETH_ALEN);
		ether_addr_copy(attr->u.ppid.id, rep->hw_id);
		break;
	default:
		return -EOPNOTSUPP;
+1 −1
Original line number Diff line number Diff line
@@ -170,7 +170,7 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec
	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
		struct flow_dissector_key_control *key =
			skb_flow_dissector_target(f->dissector,
						  FLOW_DISSECTOR_KEY_BASIC,
						  FLOW_DISSECTOR_KEY_CONTROL,
						  f->key);
		addr_type = key->addr_type;
	}
+9 −7
Original line number Diff line number Diff line
@@ -1451,7 +1451,8 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,

	esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);

	if (vport_num) { /* Only VFs need ACLs for VST and spoofchk filtering */
	/* Only VFs need ACLs for VST and spoofchk filtering */
	if (vport_num && esw->mode == SRIOV_LEGACY) {
		esw_vport_ingress_config(esw, vport);
		esw_vport_egress_config(esw, vport);
	}
@@ -1502,7 +1503,7 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
	 */
	esw_vport_change_handle_locked(vport);
	vport->enabled_events = 0;
	if (vport_num) {
	if (vport_num && esw->mode == SRIOV_LEGACY) {
		esw_vport_disable_egress_acl(esw, vport);
		esw_vport_disable_ingress_acl(esw, vport);
	}
@@ -1767,7 +1768,7 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
			       vport, err);

	mutex_lock(&esw->state_lock);
	if (evport->enabled)
	if (evport->enabled && esw->mode == SRIOV_LEGACY)
		err = esw_vport_ingress_config(esw, evport);
	mutex_unlock(&esw->state_lock);
	return err;
@@ -1839,7 +1840,7 @@ int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
	mutex_lock(&esw->state_lock);
	evport->vlan = vlan;
	evport->qos = qos;
	if (evport->enabled) {
	if (evport->enabled && esw->mode == SRIOV_LEGACY) {
		err = esw_vport_ingress_config(esw, evport);
		if (err)
			goto out;
@@ -1868,10 +1869,11 @@ int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
	mutex_lock(&esw->state_lock);
	pschk = evport->spoofchk;
	evport->spoofchk = spoofchk;
	if (evport->enabled)
	if (evport->enabled && esw->mode == SRIOV_LEGACY) {
		err = esw_vport_ingress_config(esw, evport);
		if (err)
			evport->spoofchk = pschk;
	}
	mutex_unlock(&esw->state_lock);

	return err;
Loading