Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ab9f2faf authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull rdma updates from Doug Ledford:
 "This is my initial round of 4.4 merge window patches.  There are a few
  other things I wish to get in for 4.4 that aren't in this pull, as
  this represents what has gone through merge/build/run testing and not
  what is the last few items for which testing is not yet complete.

   - "Checksum offload support in user space" enablement
   - Misc cxgb4 fixes, add T6 support
   - Misc usnic fixes
   - 32 bit build warning fixes
   - Misc ocrdma fixes
   - Multicast loopback prevention extension
   - Extend the GID cache to store and return attributes of GIDs
   - Misc iSER updates
   - iSER clustering update
   - Network NameSpace support for rdma CM
   - Work Request cleanup series
   - New Memory Registration API"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma: (76 commits)
  IB/core, cma: Make __attribute_const__ declarations sparse-friendly
  IB/core: Remove old fast registration API
  IB/ipath: Remove fast registration from the code
  IB/hfi1: Remove fast registration from the code
  RDMA/nes: Remove old FRWR API
  IB/qib: Remove old FRWR API
  iw_cxgb4: Remove old FRWR API
  RDMA/cxgb3: Remove old FRWR API
  RDMA/ocrdma: Remove old FRWR API
  IB/mlx4: Remove old FRWR API support
  IB/mlx5: Remove old FRWR API support
  IB/srp: Dont allocate a page vector when using fast_reg
  IB/srp: Remove srp_finish_mapping
  IB/srp: Convert to new registration API
  IB/srp: Split srp_map_sg
  RDS/IW: Convert to new memory registration API
  svcrdma: Port to new memory registration API
  xprtrdma: Port to new memory registration API
  iser-target: Port to new memory registration API
  IB/iser: Port to new fast registration API
  ...
parents 75021d28 db7489e0
Loading
Loading
Loading
Loading
+3 −2
Original line number Diff line number Diff line
@@ -2757,9 +2757,10 @@ S: Supported
F:	drivers/net/ethernet/cisco/enic/

CISCO VIC LOW LATENCY NIC DRIVER
M:	Upinder Malhi <umalhi@cisco.com>
M:	Christian Benvenuti <benve@cisco.com>
M:	Dave Goodell <dgoodell@cisco.com>
S:	Supported
F:	drivers/infiniband/hw/usnic
F:	drivers/infiniband/hw/usnic/

CIRRUS LOGIC EP93XX ETHERNET DRIVER
M:	Hartley Sweeten <hsweeten@visionengravers.com>
+11 −9
Original line number Diff line number Diff line
@@ -128,7 +128,7 @@ int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr,
	int ret = -EADDRNOTAVAIL;

	if (dev_addr->bound_dev_if) {
		dev = dev_get_by_index(&init_net, dev_addr->bound_dev_if);
		dev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
		if (!dev)
			return -ENODEV;
		ret = rdma_copy_addr(dev_addr, dev, NULL);
@@ -138,7 +138,7 @@ int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr,

	switch (addr->sa_family) {
	case AF_INET:
		dev = ip_dev_find(&init_net,
		dev = ip_dev_find(dev_addr->net,
			((struct sockaddr_in *) addr)->sin_addr.s_addr);

		if (!dev)
@@ -149,12 +149,11 @@ int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr,
			*vlan_id = rdma_vlan_dev_vlan_id(dev);
		dev_put(dev);
		break;

#if IS_ENABLED(CONFIG_IPV6)
	case AF_INET6:
		rcu_read_lock();
		for_each_netdev_rcu(&init_net, dev) {
			if (ipv6_chk_addr(&init_net,
		for_each_netdev_rcu(dev_addr->net, dev) {
			if (ipv6_chk_addr(dev_addr->net,
					  &((struct sockaddr_in6 *) addr)->sin6_addr,
					  dev, 1)) {
				ret = rdma_copy_addr(dev_addr, dev, NULL);
@@ -236,7 +235,7 @@ static int addr4_resolve(struct sockaddr_in *src_in,
	fl4.daddr = dst_ip;
	fl4.saddr = src_ip;
	fl4.flowi4_oif = addr->bound_dev_if;
	rt = ip_route_output_key(&init_net, &fl4);
	rt = ip_route_output_key(addr->net, &fl4);
	if (IS_ERR(rt)) {
		ret = PTR_ERR(rt);
		goto out;
@@ -278,12 +277,12 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
	fl6.saddr = src_in->sin6_addr;
	fl6.flowi6_oif = addr->bound_dev_if;

	dst = ip6_route_output(&init_net, NULL, &fl6);
	dst = ip6_route_output(addr->net, NULL, &fl6);
	if ((ret = dst->error))
		goto put;

	if (ipv6_addr_any(&fl6.saddr)) {
		ret = ipv6_dev_get_saddr(&init_net, ip6_dst_idev(dst)->dev,
		ret = ipv6_dev_get_saddr(addr->net, ip6_dst_idev(dst)->dev,
					 &fl6.daddr, 0, &fl6.saddr);
		if (ret)
			goto put;
@@ -458,7 +457,7 @@ static void resolve_cb(int status, struct sockaddr *src_addr,
}

int rdma_addr_find_dmac_by_grh(const union ib_gid *sgid, const union ib_gid *dgid,
			       u8 *dmac, u16 *vlan_id)
			       u8 *dmac, u16 *vlan_id, int if_index)
{
	int ret = 0;
	struct rdma_dev_addr dev_addr;
@@ -476,6 +475,8 @@ int rdma_addr_find_dmac_by_grh(const union ib_gid *sgid, const union ib_gid *dgi
	rdma_gid2ip(&dgid_addr._sockaddr, dgid);

	memset(&dev_addr, 0, sizeof(dev_addr));
	dev_addr.bound_dev_if = if_index;
	dev_addr.net = &init_net;

	ctx.addr = &dev_addr;
	init_completion(&ctx.comp);
@@ -510,6 +511,7 @@ int rdma_addr_find_smac_by_sgid(union ib_gid *sgid, u8 *smac, u16 *vlan_id)
	rdma_gid2ip(&gid_addr._sockaddr, sgid);

	memset(&dev_addr, 0, sizeof(dev_addr));
	dev_addr.net = &init_net;
	ret = rdma_translate_ip(&gid_addr._sockaddr, &dev_addr, vlan_id);
	if (ret)
		return ret;
+1 −1
Original line number Diff line number Diff line
@@ -126,7 +126,7 @@ void agent_send_response(const struct ib_mad_hdr *mad_hdr, const struct ib_grh *
		mad_send_wr = container_of(send_buf,
					   struct ib_mad_send_wr_private,
					   send_buf);
		mad_send_wr->send_wr.wr.ud.port_num = port_num;
		mad_send_wr->send_wr.port_num = port_num;
	}

	if (ib_post_send_mad(send_buf, NULL)) {
+104 −8
Original line number Diff line number Diff line
@@ -409,7 +409,7 @@ static int ib_cache_gid_find(struct ib_device *ib_dev,
					mask, port, index);
}

int ib_cache_gid_find_by_port(struct ib_device *ib_dev,
int ib_find_cached_gid_by_port(struct ib_device *ib_dev,
			       const union ib_gid *gid,
			       u8 port, struct net_device *ndev,
			       u16 *index)
@@ -438,6 +438,82 @@ int ib_cache_gid_find_by_port(struct ib_device *ib_dev,

	return -ENOENT;
}
EXPORT_SYMBOL(ib_find_cached_gid_by_port);

/**
 * ib_find_gid_by_filter - Returns the GID table index where a specified
 * GID value occurs
 * @device: The device to query.
 * @gid: The GID value to search for.
 * @port_num: The port number of the device where the GID value could be
 *   searched.
 * @filter: The filter function is executed on any matching GID in the table.
 *   If the filter function returns true, the corresponding index is returned,
 *   otherwise, we continue searching the GID table. It's guaranteed that
 *   while filter is executed, ndev field is valid and the structure won't
 *   change. filter is executed in an atomic context. filter must not be NULL.
 * @index: The index into the cached GID table where the GID was found.  This
 *   parameter may be NULL.
 *
 * ib_cache_gid_find_by_filter() searches for the specified GID value
 * of which the filter function returns true in the port's GID table.
 * This function is only supported on RoCE ports.
 *
 */
static int ib_cache_gid_find_by_filter(struct ib_device *ib_dev,
				       const union ib_gid *gid,
				       u8 port,
				       bool (*filter)(const union ib_gid *,
						      const struct ib_gid_attr *,
						      void *),
				       void *context,
				       u16 *index)
{
	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
	struct ib_gid_table *table;
	unsigned int i;
	bool found = false;

	if (!ports_table)
		return -EOPNOTSUPP;

	if (port < rdma_start_port(ib_dev) ||
	    port > rdma_end_port(ib_dev) ||
	    !rdma_protocol_roce(ib_dev, port))
		return -EPROTONOSUPPORT;

	table = ports_table[port - rdma_start_port(ib_dev)];

	for (i = 0; i < table->sz; i++) {
		struct ib_gid_attr attr;
		unsigned long flags;

		read_lock_irqsave(&table->data_vec[i].lock, flags);
		if (table->data_vec[i].props & GID_TABLE_ENTRY_INVALID)
			goto next;

		if (memcmp(gid, &table->data_vec[i].gid, sizeof(*gid)))
			goto next;

		memcpy(&attr, &table->data_vec[i].attr, sizeof(attr));

		if (filter(gid, &attr, context))
			found = true;

next:
		read_unlock_irqrestore(&table->data_vec[i].lock, flags);

		if (found)
			break;
	}

	if (!found)
		return -ENOENT;

	if (index)
		*index = i;
	return 0;
}

static struct ib_gid_table *alloc_gid_table(int sz)
{
@@ -649,24 +725,44 @@ static int gid_table_setup_one(struct ib_device *ib_dev)
int ib_get_cached_gid(struct ib_device *device,
		      u8                port_num,
		      int               index,
		      union ib_gid     *gid)
		      union ib_gid     *gid,
		      struct ib_gid_attr *gid_attr)
{
	if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
		return -EINVAL;

	return __ib_cache_gid_get(device, port_num, index, gid, NULL);
	return __ib_cache_gid_get(device, port_num, index, gid, gid_attr);
}
EXPORT_SYMBOL(ib_get_cached_gid);

int ib_find_cached_gid(struct ib_device *device,
		       const union ib_gid *gid,
		       struct net_device *ndev,
		       u8               *port_num,
		       u16              *index)
{
	return ib_cache_gid_find(device, gid, NULL, port_num, index);
	return ib_cache_gid_find(device, gid, ndev, port_num, index);
}
EXPORT_SYMBOL(ib_find_cached_gid);

int ib_find_gid_by_filter(struct ib_device *device,
			  const union ib_gid *gid,
			  u8 port_num,
			  bool (*filter)(const union ib_gid *gid,
					 const struct ib_gid_attr *,
					 void *),
			  void *context, u16 *index)
{
	/* Only RoCE GID table supports filter function */
	if (!rdma_cap_roce_gid_table(device, port_num) && filter)
		return -EPROTONOSUPPORT;

	return ib_cache_gid_find_by_filter(device, gid,
					   port_num, filter,
					   context, index);
}
EXPORT_SYMBOL(ib_find_gid_by_filter);

int ib_get_cached_pkey(struct ib_device *device,
		       u8                port_num,
		       int               index,
@@ -845,7 +941,7 @@ static void ib_cache_update(struct ib_device *device,
	if (!use_roce_gid_table) {
		for (i = 0;  i < gid_cache->table_len; ++i) {
			ret = ib_query_gid(device, port, i,
					   gid_cache->table + i);
					   gid_cache->table + i, NULL);
			if (ret) {
				printk(KERN_WARNING "ib_query_gid failed (%d) for %s (index %d)\n",
				       ret, device->name, i);
+7 −33
Original line number Diff line number Diff line
@@ -179,8 +179,6 @@ struct cm_av {
	struct ib_ah_attr ah_attr;
	u16 pkey_index;
	u8 timeout;
	u8  valid;
	u8  smac[ETH_ALEN];
};

struct cm_work {
@@ -361,17 +359,21 @@ static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
	unsigned long flags;
	int ret;
	u8 p;
	struct net_device *ndev = ib_get_ndev_from_path(path);

	read_lock_irqsave(&cm.device_lock, flags);
	list_for_each_entry(cm_dev, &cm.device_list, list) {
		if (!ib_find_cached_gid(cm_dev->ib_device, &path->sgid,
					&p, NULL)) {
					ndev, &p, NULL)) {
			port = cm_dev->port[p-1];
			break;
		}
	}
	read_unlock_irqrestore(&cm.device_lock, flags);

	if (ndev)
		dev_put(ndev);

	if (!port)
		return -EINVAL;

@@ -384,9 +386,7 @@ static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
	ib_init_ah_from_path(cm_dev->ib_device, port->port_num, path,
			     &av->ah_attr);
	av->timeout = path->packet_life_time + 1;
	memcpy(av->smac, path->smac, sizeof(av->smac));

	av->valid = 1;
	return 0;
}

@@ -1639,11 +1639,11 @@ static int cm_req_handler(struct cm_work *work)
	cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);

	memcpy(work->path[0].dmac, cm_id_priv->av.ah_attr.dmac, ETH_ALEN);
	work->path[0].vlan_id = cm_id_priv->av.ah_attr.vlan_id;
	ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av);
	if (ret) {
		ib_get_cached_gid(work->port->cm_dev->ib_device,
				  work->port->port_num, 0, &work->path[0].sgid);
				  work->port->port_num, 0, &work->path[0].sgid,
				  NULL);
		ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID,
			       &work->path[0].sgid, sizeof work->path[0].sgid,
			       NULL, 0);
@@ -3618,32 +3618,6 @@ static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
		*qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU |
				IB_QP_DEST_QPN | IB_QP_RQ_PSN;
		qp_attr->ah_attr = cm_id_priv->av.ah_attr;
		if (!cm_id_priv->av.valid) {
			spin_unlock_irqrestore(&cm_id_priv->lock, flags);
			return -EINVAL;
		}
		if (cm_id_priv->av.ah_attr.vlan_id != 0xffff) {
			qp_attr->vlan_id = cm_id_priv->av.ah_attr.vlan_id;
			*qp_attr_mask |= IB_QP_VID;
		}
		if (!is_zero_ether_addr(cm_id_priv->av.smac)) {
			memcpy(qp_attr->smac, cm_id_priv->av.smac,
			       sizeof(qp_attr->smac));
			*qp_attr_mask |= IB_QP_SMAC;
		}
		if (cm_id_priv->alt_av.valid) {
			if (cm_id_priv->alt_av.ah_attr.vlan_id != 0xffff) {
				qp_attr->alt_vlan_id =
					cm_id_priv->alt_av.ah_attr.vlan_id;
				*qp_attr_mask |= IB_QP_ALT_VID;
			}
			if (!is_zero_ether_addr(cm_id_priv->alt_av.smac)) {
				memcpy(qp_attr->alt_smac,
				       cm_id_priv->alt_av.smac,
				       sizeof(qp_attr->alt_smac));
				*qp_attr_mask |= IB_QP_ALT_SMAC;
			}
		}
		qp_attr->path_mtu = cm_id_priv->path_mtu;
		qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
		qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn);
Loading