Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9ddffb8a authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'mlx4'



Amir Vadai says:

====================
net/mlx4: Mellanox driver update 08-12-2013

This patchset contains:
1. Support in ndo_get_phys_port_id added by Hadar.
2. Change the driver to use by default CQE/EQE size of 64 bytes done by Eyal.
   This doubles the packet-rate  of the NIC.
3. Configure the XPS queue mapping on driver load - added by Ido.
4. Fixes for some small bugs done by Jenny and Matan

Patchset was applied and tested against commit: "23721754 lib: hash: follow-up
fixups for arch hash"

Changes from V1:
- Removed Patch 10 "net/mlx4_en: Fix Supported/Advertised link mode reported by
  ethtool". This patch is needed to be rewritten from scratch and I wouldn't
  like it to block the rest of the patches in this set.
  Also, fix to Kconfig suggested by Ben will be sent in next patchset.

Changes from V0:
- Found some issues in "Reuse memory in RX flow" patch from V0.
  Removing this patch from the patchset till analyzed and fixed.
- Fix some coding style issues in patch 6 "Configure the XPS queue mapping on
  driver load"
- Changed patch 9 "Add NAPI support for transmit side" to use NAPI_POLL_WEIGHT
  instead of MLX4_EN_TX_BUDGET
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 5b59d467 982290a7
Loading
Loading
Loading
Loading
+8 −4
Original line number Diff line number Diff line
@@ -161,12 +161,16 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
	cq->mcq.comp  = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq;
	cq->mcq.event = mlx4_en_cq_event;

	if (!cq->is_tx) {
	if (cq->is_tx) {
		netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq,
			       NAPI_POLL_WEIGHT);
	} else {
		netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
		napi_hash_add(&cq->napi);
		napi_enable(&cq->napi);
	}

	napi_enable(&cq->napi);

	return 0;
}

@@ -188,12 +192,12 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)

void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
{
	if (!cq->is_tx) {
	napi_disable(&cq->napi);
	if (!cq->is_tx) {
		napi_hash_del(&cq->napi);
		synchronize_rcu();
		netif_napi_del(&cq->napi);
	}
	netif_napi_del(&cq->napi);

	mlx4_cq_free(priv->mdev->dev, &cq->mcq);
}
+3 −0
Original line number Diff line number Diff line
@@ -174,6 +174,9 @@ static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr,
		mlx4_err(mdev, "Internal error detected, restarting device\n");
		break;

	case MLX4_DEV_EVENT_SLAVE_INIT:
	case MLX4_DEV_EVENT_SLAVE_SHUTDOWN:
		break;
	default:
		if (port < 1 || port > dev->caps.num_ports ||
		    !mdev->pndev[port])
+27 −2
Original line number Diff line number Diff line
@@ -1910,8 +1910,10 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
				      prof->tx_ring_size, i, TX, node))
			goto err;

		if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], priv->base_tx_qpn + i,
					   prof->tx_ring_size, TXBB_SIZE, node))
		if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i],
					   priv->base_tx_qpn + i,
					   prof->tx_ring_size, TXBB_SIZE,
					   node, i))
			goto err;
	}

@@ -2164,6 +2166,27 @@ static int mlx4_en_set_vf_link_state(struct net_device *dev, int vf, int link_st

	return mlx4_set_vf_link_state(mdev->dev, en_priv->port, vf, link_state);
}

#define PORT_ID_BYTE_LEN 8
static int mlx4_en_get_phys_port_id(struct net_device *dev,
				    struct netdev_phys_port_id *ppid)
{
	struct mlx4_en_priv *priv = netdev_priv(dev);
	struct mlx4_dev *mdev = priv->mdev->dev;
	int i;
	u64 phys_port_id = mdev->caps.phys_port_id[priv->port];

	if (!phys_port_id)
		return -EOPNOTSUPP;

	ppid->id_len = sizeof(phys_port_id);
	for (i = PORT_ID_BYTE_LEN - 1; i >= 0; --i) {
		ppid->id[i] =  phys_port_id & 0xff;
		phys_port_id >>= 8;
	}
	return 0;
}

static const struct net_device_ops mlx4_netdev_ops = {
	.ndo_open		= mlx4_en_open,
	.ndo_stop		= mlx4_en_close,
@@ -2189,6 +2212,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
#ifdef CONFIG_NET_RX_BUSY_POLL
	.ndo_busy_poll		= mlx4_en_low_latency_recv,
#endif
	.ndo_get_phys_port_id	= mlx4_en_get_phys_port_id,
};

static const struct net_device_ops mlx4_netdev_ops_master = {
@@ -2217,6 +2241,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
#ifdef CONFIG_RFS_ACCEL
	.ndo_rx_flow_steer	= mlx4_en_filter_rfs,
#endif
	.ndo_get_phys_port_id	= mlx4_en_get_phys_port_id,
};

int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+40 −8
Original line number Diff line number Diff line
@@ -55,7 +55,7 @@ MODULE_PARM_DESC(inline_thold, "threshold for using inline data");

int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
			   struct mlx4_en_tx_ring **pring, int qpn, u32 size,
			   u16 stride, int node)
			   u16 stride, int node, int queue_index)
{
	struct mlx4_en_dev *mdev = priv->mdev;
	struct mlx4_en_tx_ring *ring;
@@ -140,6 +140,10 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
		ring->bf_enabled = true;

	ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type;
	ring->queue_index = queue_index;

	if (queue_index < priv->num_tx_rings_p_up && cpu_online(queue_index))
		cpumask_set_cpu(queue_index, &ring->affinity_mask);

	*pring = ring;
	return 0;
@@ -206,6 +210,9 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,

	err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
			       &ring->qp, &ring->qp_state);
	if (!user_prio && cpu_online(ring->queue_index))
		netif_set_xps_queue(priv->dev, &ring->affinity_mask,
				    ring->queue_index);

	return err;
}
@@ -317,7 +324,7 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
			}
		}
	}
	dev_kfree_skb_any(skb);
	dev_kfree_skb(skb);
	return tx_info->nr_txbb;
}

@@ -354,7 +361,9 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
	return cnt;
}

static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
static int mlx4_en_process_tx_cq(struct net_device *dev,
				 struct mlx4_en_cq *cq,
				 int budget)
{
	struct mlx4_en_priv *priv = netdev_priv(dev);
	struct mlx4_cq *mcq = &cq->mcq;
@@ -372,9 +381,10 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
	u32 bytes = 0;
	int factor = priv->cqe_factor;
	u64 timestamp = 0;
	int done = 0;

	if (!priv->port_up)
		return;
		return 0;

	index = cons_index & size_mask;
	cqe = &buf[(index << factor) + factor];
@@ -383,7 +393,7 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)

	/* Process all completed CQEs */
	while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
			cons_index & size)) {
			cons_index & size) && (done < budget)) {
		/*
		 * make sure we read the CQE after we read the
		 * ownership bit
@@ -421,7 +431,7 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
			txbbs_stamp = txbbs_skipped;
			packets++;
			bytes += ring->tx_info[ring_index].nr_bytes;
		} while (ring_index != new_index);
		} while ((++done < budget) && (ring_index != new_index));

		++cons_index;
		index = cons_index & size_mask;
@@ -447,6 +457,7 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
		netif_tx_wake_queue(ring->tx_queue);
		priv->port_stats.wake_queue++;
	}
	return done;
}

void mlx4_en_tx_irq(struct mlx4_cq *mcq)
@@ -454,10 +465,31 @@ void mlx4_en_tx_irq(struct mlx4_cq *mcq)
	struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
	struct mlx4_en_priv *priv = netdev_priv(cq->dev);

	mlx4_en_process_tx_cq(cq->dev, cq);
	if (priv->port_up)
		napi_schedule(&cq->napi);
	else
		mlx4_en_arm_cq(priv, cq);
}

/* TX CQ polling - called by NAPI */
int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget)
{
	struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
	struct net_device *dev = cq->dev;
	struct mlx4_en_priv *priv = netdev_priv(dev);
	int done;

	done = mlx4_en_process_tx_cq(dev, cq, budget);

	/* If we used up all the quota - we're probably not done yet... */
	if (done < budget) {
		/* Done for now */
		napi_complete(napi);
		mlx4_en_arm_cq(priv, cq);
		return done;
	}
	return budget;
}

static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
						      struct mlx4_en_tx_ring *ring,
+59 −15
Original line number Diff line number Diff line
@@ -207,25 +207,25 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,

/* when opcode modifier = 1 */
#define QUERY_FUNC_CAP_PHYS_PORT_OFFSET		0x3
#define QUERY_FUNC_CAP_RDMA_PROPS_OFFSET	0x8
#define QUERY_FUNC_CAP_ETH_PROPS_OFFSET		0xc
#define QUERY_FUNC_CAP_FLAGS0_OFFSET		0x8
#define QUERY_FUNC_CAP_FLAGS1_OFFSET		0xc

#define QUERY_FUNC_CAP_QP0_TUNNEL		0x10
#define QUERY_FUNC_CAP_QP0_PROXY		0x14
#define QUERY_FUNC_CAP_QP1_TUNNEL		0x18
#define QUERY_FUNC_CAP_QP1_PROXY		0x1c
#define QUERY_FUNC_CAP_PHYS_PORT_ID		0x28

#define QUERY_FUNC_CAP_ETH_PROPS_FORCE_MAC	0x40
#define QUERY_FUNC_CAP_ETH_PROPS_FORCE_VLAN	0x80
#define QUERY_FUNC_CAP_FLAGS1_FORCE_MAC		0x40
#define QUERY_FUNC_CAP_FLAGS1_FORCE_VLAN	0x80
#define QUERY_FUNC_CAP_FLAGS1_NIC_INFO			0x10

#define QUERY_FUNC_CAP_RDMA_PROPS_FORCE_PHY_WQE_GID 0x80
#define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80

	if (vhcr->op_modifier == 1) {
		field = 0;
		/* ensure force vlan and force mac bits are not set */
		MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_ETH_PROPS_OFFSET);
		/* ensure that phy_wqe_gid bit is not set */
		MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_RDMA_PROPS_OFFSET);
		/* Set nic_info bit to mark new fields support */
		field  = QUERY_FUNC_CAP_FLAGS1_NIC_INFO;
		MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS1_OFFSET);

		field = vhcr->in_modifier; /* phys-port = logical-port */
		MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
@@ -243,6 +243,9 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
		size += 2;
		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP1_PROXY);

		MLX4_PUT(outbox->buf, dev->caps.phys_port_id[vhcr->in_modifier],
			 QUERY_FUNC_CAP_PHYS_PORT_ID);

	} else if (vhcr->op_modifier == 0) {
		/* enable rdma and ethernet interfaces, and new quota locations */
		field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA |
@@ -391,22 +394,22 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
		goto out;
	}

	MLX4_GET(func_cap->flags1, outbox, QUERY_FUNC_CAP_FLAGS1_OFFSET);
	if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_ETH) {
		MLX4_GET(field, outbox, QUERY_FUNC_CAP_ETH_PROPS_OFFSET);
		if (field & QUERY_FUNC_CAP_ETH_PROPS_FORCE_VLAN) {
		if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_OFFSET) {
			mlx4_err(dev, "VLAN is enforced on this port\n");
			err = -EPROTONOSUPPORT;
			goto out;
		}

		if (field & QUERY_FUNC_CAP_ETH_PROPS_FORCE_MAC) {
		if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_FORCE_MAC) {
			mlx4_err(dev, "Force mac is enabled on this port\n");
			err = -EPROTONOSUPPORT;
			goto out;
		}
	} else if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_IB) {
		MLX4_GET(field, outbox, QUERY_FUNC_CAP_RDMA_PROPS_OFFSET);
		if (field & QUERY_FUNC_CAP_RDMA_PROPS_FORCE_PHY_WQE_GID) {
		MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET);
		if (field & QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID) {
			mlx4_err(dev, "phy_wqe_gid is "
				 "enforced on this ib port\n");
			err = -EPROTONOSUPPORT;
@@ -433,6 +436,10 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
	MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP1_PROXY);
	func_cap->qp1_proxy_qpn = size & 0xFFFFFF;

	if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_NIC_INFO)
		MLX4_GET(func_cap->phys_port_id, outbox,
			 QUERY_FUNC_CAP_PHYS_PORT_ID);

	/* All other resources are allocated by the master, but we still report
	 * 'num' and 'reserved' capabilities as follows:
	 * - num remains the maximum resource index
@@ -1713,6 +1720,43 @@ int mlx4_NOP(struct mlx4_dev *dev)
	return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100, MLX4_CMD_NATIVE);
}

int mlx4_get_phys_port_id(struct mlx4_dev *dev)
{
	u8 port;
	u32 *outbox;
	struct mlx4_cmd_mailbox *mailbox;
	u32 in_mod;
	u32 guid_hi, guid_lo;
	int err, ret = 0;
#define MOD_STAT_CFG_PORT_OFFSET 8
#define MOD_STAT_CFG_GUID_H	 0X14
#define MOD_STAT_CFG_GUID_L	 0X1c

	mailbox = mlx4_alloc_cmd_mailbox(dev);
	if (IS_ERR(mailbox))
		return PTR_ERR(mailbox);
	outbox = mailbox->buf;

	for (port = 1; port <= dev->caps.num_ports; port++) {
		in_mod = port << MOD_STAT_CFG_PORT_OFFSET;
		err = mlx4_cmd_box(dev, 0, mailbox->dma, in_mod, 0x2,
				   MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A,
				   MLX4_CMD_NATIVE);
		if (err) {
			mlx4_err(dev, "Fail to get port %d uplink guid\n",
				 port);
			ret = err;
		} else {
			MLX4_GET(guid_hi, outbox, MOD_STAT_CFG_GUID_H);
			MLX4_GET(guid_lo, outbox, MOD_STAT_CFG_GUID_L);
			dev->caps.phys_port_id[port] = (u64)guid_lo |
						       (u64)guid_hi << 32;
		}
	}
	mlx4_free_cmd_mailbox(dev, mailbox);
	return ret;
}

#define MLX4_WOL_SETUP_MODE (5 << 28)
int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port)
{
Loading