Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7b1cd95d authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull RDMA subsystem updates from Jason Gunthorpe:
 "Overall this cycle did not have any major excitement, and did not
  require any shared branch with netdev.

  Lots of driver updates, particularly of the scale-up and performance
  variety. The largest body of core work was Parav's patches fixing and
  restructing some of the core code to make way for future RDMA
  containerization.

  Summary:

   - misc small driver fixups to
     bnxt_re/hfi1/qib/hns/ocrdma/rdmavt/vmw_pvrdma/nes

   - several major feature adds to bnxt_re driver: SRIOV VF RoCE
     support, HugePages support, extended hardware stats support, and
     SRQ support

   - a notable number of fixes to the i40iw driver from debugging scale
     up testing

   - more work to enable the new hip08 chip in the hns driver

   - misc small ULP fixups to srp/srpt//ipoib

   - preparation for srp initiator and target to support the RDMA-CM
     protocol for connections

   - add RDMA-CM support to srp initiator, srp target is still a WIP

   - fixes for a couple of places where ipoib could spam the dmesg log

   - fix encode/decode of FDR/EDR data rates in the core

   - many patches from Parav with ongoing work to clean up
     inconsistencies and bugs in RoCE support around the rdma_cm

   - mlx5 driver support for the userspace features 'thread domain',
     'wallclock timestamps' and 'DV Direct Connected transport'. Support
     for the firmware dual port rocee capability

   - core support for more than 32 rdma devices in the char dev
     allocation

   - kernel doc updates from Randy Dunlap

   - new netlink uAPI for inspecting RDMA objects similar in spirit to 'ss'

   - one minor change to the kobject code acked by Greg KH"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (259 commits)
  RDMA/nldev: Provide detailed QP information
  RDMA/nldev: Provide global resource utilization
  RDMA/core: Add resource tracking for create and destroy PDs
  RDMA/core: Add resource tracking for create and destroy CQs
  RDMA/core: Add resource tracking for create and destroy QPs
  RDMA/restrack: Add general infrastructure to track RDMA resources
  RDMA/core: Save kernel caller name when creating PD and CQ objects
  RDMA/core: Use the MODNAME instead of the function name for pd callers
  RDMA: Move enum ib_cq_creation_flags to uapi headers
  IB/rxe: Change RDMA_RXE kconfig to use select
  IB/qib: remove qib_keys.c
  IB/mthca: remove mthca_user.h
  RDMA/cm: Fix access to uninitialized variable
  RDMA/cma: Use existing netif_is_bond_master function
  IB/core: Avoid SGID attributes query while converting GID from OPA to IB
  RDMA/mlx5: Avoid memory leak in case of XRCD dealloc failure
  IB/umad: Fix use of unprotected device pointer
  IB/iser: Combine substrings for three messages
  IB/iser: Delete an unnecessary variable initialisation in iser_send_data_out()
  IB/iser: Delete an error message for a failed memory allocation in iser_send_data_out()
  ...
parents 2155e69a e7996a9a
Loading
Loading
Loading
Loading
+4 −3
Original line number Diff line number Diff line
@@ -6892,7 +6892,7 @@ M: Jason Gunthorpe <jgg@mellanox.com>
L:	linux-rdma@vger.kernel.org
W:	http://www.openfabrics.org/
Q:	http://patchwork.kernel.org/project/linux-rdma/list/
T:	git git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma.git
T:	git git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma.git
S:	Supported
F:	Documentation/devicetree/bindings/infiniband/
F:	Documentation/infiniband/
@@ -11218,7 +11218,8 @@ S: Maintained
F:	drivers/firmware/qemu_fw_cfg.c

QIB DRIVER
M:	Mike Marciniszyn <infinipath@intel.com>
M:	Dennis Dalessandro <dennis.dalessandro@intel.com>
M:	Mike Marciniszyn <mike.marciniszyn@intel.com>
L:	linux-rdma@vger.kernel.org
S:	Supported
F:	drivers/infiniband/hw/qib/
@@ -11245,7 +11246,6 @@ F: include/linux/qed/
F:	drivers/net/ethernet/qlogic/qede/

QLOGIC QL4xxx RDMA DRIVER
M:	Ram Amrani <Ram.Amrani@cavium.com>
M:	Michal Kalderon <Michal.Kalderon@cavium.com>
M:	Ariel Elior <Ariel.Elior@cavium.com>
L:	linux-rdma@vger.kernel.org
@@ -11507,6 +11507,7 @@ F: drivers/net/ethernet/rdc/r6040.c

RDMAVT - RDMA verbs software
M:	Dennis Dalessandro <dennis.dalessandro@intel.com>
M:	Mike Marciniszyn <mike.marciniszyn@intel.com>
L:	linux-rdma@vger.kernel.org
S:	Supported
F:	drivers/infiniband/sw/rdmavt
+1 −1
Original line number Diff line number Diff line
@@ -12,7 +12,7 @@ ib_core-y := packer.o ud_header.o verbs.o cq.o rw.o sysfs.o \
				device.o fmr_pool.o cache.o netlink.o \
				roce_gid_mgmt.o mr_pool.o addr.o sa_query.o \
				multicast.o mad.o smi.o agent.o mad_rmpp.o \
				security.o nldev.o
				security.o nldev.o restrack.o

ib_core-$(CONFIG_INFINIBAND_USER_MEM) += umem.o
ib_core-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += umem_odp.o
+8 −57
Original line number Diff line number Diff line
@@ -243,8 +243,7 @@ void rdma_copy_addr(struct rdma_dev_addr *dev_addr,
EXPORT_SYMBOL(rdma_copy_addr);

int rdma_translate_ip(const struct sockaddr *addr,
		      struct rdma_dev_addr *dev_addr,
		      u16 *vlan_id)
		      struct rdma_dev_addr *dev_addr)
{
	struct net_device *dev;

@@ -266,9 +265,6 @@ int rdma_translate_ip(const struct sockaddr *addr,
			return -EADDRNOTAVAIL;

		rdma_copy_addr(dev_addr, dev, NULL);
		dev_addr->bound_dev_if = dev->ifindex;
		if (vlan_id)
			*vlan_id = rdma_vlan_dev_vlan_id(dev);
		dev_put(dev);
		break;
#if IS_ENABLED(CONFIG_IPV6)
@@ -279,9 +275,6 @@ int rdma_translate_ip(const struct sockaddr *addr,
					  &((const struct sockaddr_in6 *)addr)->sin6_addr,
					  dev, 1)) {
				rdma_copy_addr(dev_addr, dev, NULL);
				dev_addr->bound_dev_if = dev->ifindex;
				if (vlan_id)
					*vlan_id = rdma_vlan_dev_vlan_id(dev);
				break;
			}
		}
@@ -481,7 +474,7 @@ static int addr_resolve_neigh(struct dst_entry *dst,
	if (dst->dev->flags & IFF_LOOPBACK) {
		int ret;

		ret = rdma_translate_ip(dst_in, addr, NULL);
		ret = rdma_translate_ip(dst_in, addr);
		if (!ret)
			memcpy(addr->dst_dev_addr, addr->src_dev_addr,
			       MAX_ADDR_LEN);
@@ -558,7 +551,7 @@ static int addr_resolve(struct sockaddr *src_in,
	}

	if (ndev->flags & IFF_LOOPBACK) {
		ret = rdma_translate_ip(dst_in, addr, NULL);
		ret = rdma_translate_ip(dst_in, addr);
		/*
		 * Put the loopback device and get the translated
		 * device instead.
@@ -744,7 +737,6 @@ void rdma_addr_cancel(struct rdma_dev_addr *addr)
EXPORT_SYMBOL(rdma_addr_cancel);

struct resolve_cb_context {
	struct rdma_dev_addr *addr;
	struct completion comp;
	int status;
};
@@ -752,39 +744,31 @@ struct resolve_cb_context {
static void resolve_cb(int status, struct sockaddr *src_addr,
	     struct rdma_dev_addr *addr, void *context)
{
	if (!status)
		memcpy(((struct resolve_cb_context *)context)->addr,
		       addr, sizeof(struct rdma_dev_addr));
	((struct resolve_cb_context *)context)->status = status;
	complete(&((struct resolve_cb_context *)context)->comp);
}

int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
				 const union ib_gid *dgid,
				 u8 *dmac, u16 *vlan_id, int *if_index,
				 u8 *dmac, const struct net_device *ndev,
				 int *hoplimit)
{
	int ret = 0;
	struct rdma_dev_addr dev_addr;
	struct resolve_cb_context ctx;
	struct net_device *dev;

	union {
		struct sockaddr     _sockaddr;
		struct sockaddr_in  _sockaddr_in;
		struct sockaddr_in6 _sockaddr_in6;
	} sgid_addr, dgid_addr;

	int ret;

	rdma_gid2ip(&sgid_addr._sockaddr, sgid);
	rdma_gid2ip(&dgid_addr._sockaddr, dgid);

	memset(&dev_addr, 0, sizeof(dev_addr));
	if (if_index)
		dev_addr.bound_dev_if = *if_index;
	dev_addr.bound_dev_if = ndev->ifindex;
	dev_addr.net = &init_net;

	ctx.addr = &dev_addr;
	init_completion(&ctx.comp);
	ret = rdma_resolve_ip(&self, &sgid_addr._sockaddr, &dgid_addr._sockaddr,
			&dev_addr, 1000, resolve_cb, &ctx);
@@ -798,42 +782,9 @@ int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
		return ret;

	memcpy(dmac, dev_addr.dst_dev_addr, ETH_ALEN);
	dev = dev_get_by_index(&init_net, dev_addr.bound_dev_if);
	if (!dev)
		return -ENODEV;
	if (if_index)
		*if_index = dev_addr.bound_dev_if;
	if (vlan_id)
		*vlan_id = rdma_vlan_dev_vlan_id(dev);
	if (hoplimit)
	*hoplimit = dev_addr.hoplimit;
	dev_put(dev);
	return ret;
}
EXPORT_SYMBOL(rdma_addr_find_l2_eth_by_grh);

int rdma_addr_find_smac_by_sgid(union ib_gid *sgid, u8 *smac, u16 *vlan_id)
{
	int ret = 0;
	struct rdma_dev_addr dev_addr;
	union {
		struct sockaddr     _sockaddr;
		struct sockaddr_in  _sockaddr_in;
		struct sockaddr_in6 _sockaddr_in6;
	} gid_addr;

	rdma_gid2ip(&gid_addr._sockaddr, sgid);

	memset(&dev_addr, 0, sizeof(dev_addr));
	dev_addr.net = &init_net;
	ret = rdma_translate_ip(&gid_addr._sockaddr, &dev_addr, vlan_id);
	if (ret)
		return ret;

	memcpy(smac, dev_addr.src_dev_addr, ETH_ALEN);
	return ret;
	return 0;
}
EXPORT_SYMBOL(rdma_addr_find_smac_by_sgid);

static int netevent_callback(struct notifier_block *self, unsigned long event,
	void *ctx)
+7 −16
Original line number Diff line number Diff line
@@ -573,27 +573,24 @@ static int ib_cache_gid_find_by_filter(struct ib_device *ib_dev,
		struct ib_gid_attr attr;

		if (table->data_vec[i].props & GID_TABLE_ENTRY_INVALID)
			goto next;
			continue;

		if (memcmp(gid, &table->data_vec[i].gid, sizeof(*gid)))
			goto next;
			continue;

		memcpy(&attr, &table->data_vec[i].attr, sizeof(attr));

		if (filter(gid, &attr, context))
		if (filter(gid, &attr, context)) {
			found = true;

next:
		if (found)
			if (index)
				*index = i;
			break;
		}
	}
	read_unlock_irqrestore(&table->rwlock, flags);

	if (!found)
		return -ENOENT;

	if (index)
		*index = i;
	return 0;
}

@@ -824,12 +821,7 @@ static int gid_table_setup_one(struct ib_device *ib_dev)
	if (err)
		return err;

	err = roce_rescan_device(ib_dev);

	if (err) {
		gid_table_cleanup_one(ib_dev);
		gid_table_release_one(ib_dev);
	}
	rdma_roce_rescan_device(ib_dev);

	return err;
}
@@ -883,7 +875,6 @@ int ib_find_gid_by_filter(struct ib_device *device,
					   port_num, filter,
					   context, index);
}
EXPORT_SYMBOL(ib_find_gid_by_filter);

int ib_get_cached_pkey(struct ib_device *device,
		       u8                port_num,
+159 −68
Original line number Diff line number Diff line
@@ -452,12 +452,13 @@ static void cm_set_private_data(struct cm_id_private *cm_id_priv,
	cm_id_priv->private_data_len = private_data_len;
}

static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
static int cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
				   struct ib_grh *grh, struct cm_av *av)
{
	av->port = port;
	av->pkey_index = wc->pkey_index;
	ib_init_ah_from_wc(port->cm_dev->ib_device, port->port_num, wc,
	return ib_init_ah_attr_from_wc(port->cm_dev->ib_device,
				       port->port_num, wc,
				       grh, &av->ah_attr);
}

@@ -494,8 +495,11 @@ static int cm_init_av_by_path(struct sa_path_rec *path, struct cm_av *av,
		return ret;

	av->port = port;
	ib_init_ah_from_path(cm_dev->ib_device, port->port_num, path,
	ret = ib_init_ah_attr_from_path(cm_dev->ib_device, port->port_num, path,
					&av->ah_attr);
	if (ret)
		return ret;

	av->timeout = path->packet_life_time + 1;

	spin_lock_irqsave(&cm.lock, flags);
@@ -1560,6 +1564,35 @@ static u16 cm_get_bth_pkey(struct cm_work *work)
	return pkey;
}

/**
 * Convert OPA SGID to IB SGID
 * ULPs (such as IPoIB) do not understand OPA GIDs and will
 * reject them as the local_gid will not match the sgid. Therefore,
 * change the pathrec's SGID to an IB SGID.
 *
 * @work: Work completion
 * @path: Path record
 */
static void cm_opa_to_ib_sgid(struct cm_work *work,
			      struct sa_path_rec *path)
{
	struct ib_device *dev = work->port->cm_dev->ib_device;
	u8 port_num = work->port->port_num;

	if (rdma_cap_opa_ah(dev, port_num) &&
	    (ib_is_opa_gid(&path->sgid))) {
		union ib_gid sgid;

		if (ib_get_cached_gid(dev, port_num, 0, &sgid, NULL)) {
			dev_warn(&dev->dev,
				 "Error updating sgid in CM request\n");
			return;
		}

		path->sgid = sgid;
	}
}

static void cm_format_req_event(struct cm_work *work,
				struct cm_id_private *cm_id_priv,
				struct ib_cm_id *listen_id)
@@ -1573,10 +1606,13 @@ static void cm_format_req_event(struct cm_work *work,
	param->bth_pkey = cm_get_bth_pkey(work);
	param->port = cm_id_priv->av.port->port_num;
	param->primary_path = &work->path[0];
	if (cm_req_has_alt_path(req_msg))
	cm_opa_to_ib_sgid(work, param->primary_path);
	if (cm_req_has_alt_path(req_msg)) {
		param->alternate_path = &work->path[1];
	else
		cm_opa_to_ib_sgid(work, param->alternate_path);
	} else {
		param->alternate_path = NULL;
	}
	param->remote_ca_guid = req_msg->local_ca_guid;
	param->remote_qkey = be32_to_cpu(req_msg->local_qkey);
	param->remote_qpn = be32_to_cpu(cm_req_get_local_qpn(req_msg));
@@ -1826,9 +1862,11 @@ static int cm_req_handler(struct cm_work *work)

	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
	cm_id_priv->id.remote_id = req_msg->local_comm_id;
	cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
	ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
				      work->mad_recv_wc->recv_buf.grh,
				      &cm_id_priv->av);
	if (ret)
		goto destroy;
	cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
							    id.local_id);
	if (IS_ERR(cm_id_priv->timewait_info)) {
@@ -1841,9 +1879,10 @@ static int cm_req_handler(struct cm_work *work)

	listen_cm_id_priv = cm_match_req(work, cm_id_priv);
	if (!listen_cm_id_priv) {
		pr_debug("%s: local_id %d, no listen_cm_id_priv\n", __func__,
			 be32_to_cpu(cm_id->local_id));
		ret = -EINVAL;
		kfree(cm_id_priv->timewait_info);
		goto destroy;
		goto free_timeinfo;
	}

	cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
@@ -1861,7 +1900,11 @@ static int cm_req_handler(struct cm_work *work)
				work->port->port_num,
				grh->sgid_index,
				&gid, &gid_attr);
	if (!ret) {
	if (ret) {
		ib_send_cm_rej(cm_id, IB_CM_REJ_UNSUPPORTED, NULL, 0, NULL, 0);
		goto rejected;
	}

	if (gid_attr.ndev) {
		work->path[0].rec_type =
			sa_conv_gid_to_pathrec_type(gid_attr.gid_type);
@@ -1886,30 +1929,20 @@ static int cm_req_handler(struct cm_work *work)
	work->path[0].hop_limit = grh->hop_limit;
	ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av,
				 cm_id_priv);
	}
	if (ret) {
		int err = ib_get_cached_gid(work->port->cm_dev->ib_device,
		int err;

		err = ib_get_cached_gid(work->port->cm_dev->ib_device,
					work->port->port_num, 0,
					&work->path[0].sgid,
					    &gid_attr);
		if (!err && gid_attr.ndev) {
			work->path[0].rec_type =
				sa_conv_gid_to_pathrec_type(gid_attr.gid_type);
			sa_path_set_ifindex(&work->path[0],
					    gid_attr.ndev->ifindex);
			sa_path_set_ndev(&work->path[0],
					 dev_net(gid_attr.ndev));
			dev_put(gid_attr.ndev);
		} else {
			cm_path_set_rec_type(work->port->cm_dev->ib_device,
					     work->port->port_num,
					     &work->path[0],
					     &req_msg->primary_local_gid);
		}
		if (cm_req_has_alt_path(req_msg))
			work->path[1].rec_type = work->path[0].rec_type;
					NULL);
		if (err)
			ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID,
				       NULL, 0, NULL, 0);
		else
			ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID,
			       &work->path[0].sgid, sizeof work->path[0].sgid,
				       &work->path[0].sgid,
				       sizeof(work->path[0].sgid),
				       NULL, 0);
		goto rejected;
	}
@@ -1919,7 +1952,7 @@ static int cm_req_handler(struct cm_work *work)
		if (ret) {
			ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID,
				       &work->path[0].sgid,
				       sizeof work->path[0].sgid, NULL, 0);
				       sizeof(work->path[0].sgid), NULL, 0);
			goto rejected;
		}
	}
@@ -1945,6 +1978,8 @@ static int cm_req_handler(struct cm_work *work)
rejected:
	atomic_dec(&cm_id_priv->refcount);
	cm_deref_id(listen_cm_id_priv);
free_timeinfo:
	kfree(cm_id_priv->timewait_info);
destroy:
	ib_destroy_cm_id(cm_id);
	return ret;
@@ -1997,6 +2032,8 @@ int ib_send_cm_rep(struct ib_cm_id *cm_id,
	spin_lock_irqsave(&cm_id_priv->lock, flags);
	if (cm_id->state != IB_CM_REQ_RCVD &&
	    cm_id->state != IB_CM_MRA_REQ_SENT) {
		pr_debug("%s: local_comm_id %d, cm_id->state: %d\n", __func__,
			 be32_to_cpu(cm_id_priv->id.local_id), cm_id->state);
		ret = -EINVAL;
		goto out;
	}
@@ -2063,6 +2100,8 @@ int ib_send_cm_rtu(struct ib_cm_id *cm_id,
	spin_lock_irqsave(&cm_id_priv->lock, flags);
	if (cm_id->state != IB_CM_REP_RCVD &&
	    cm_id->state != IB_CM_MRA_REP_SENT) {
		pr_debug("%s: local_id %d, cm_id->state %d\n", __func__,
			 be32_to_cpu(cm_id->local_id), cm_id->state);
		ret = -EINVAL;
		goto error;
	}
@@ -2170,6 +2209,8 @@ static int cm_rep_handler(struct cm_work *work)
	cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0);
	if (!cm_id_priv) {
		cm_dup_rep_handler(work);
		pr_debug("%s: remote_comm_id %d, no cm_id_priv\n", __func__,
			 be32_to_cpu(rep_msg->remote_comm_id));
		return -EINVAL;
	}

@@ -2183,6 +2224,10 @@ static int cm_rep_handler(struct cm_work *work)
	default:
		spin_unlock_irq(&cm_id_priv->lock);
		ret = -EINVAL;
		pr_debug("%s: cm_id_priv->id.state: %d, local_comm_id %d, remote_comm_id %d\n",
			 __func__, cm_id_priv->id.state,
			 be32_to_cpu(rep_msg->local_comm_id),
			 be32_to_cpu(rep_msg->remote_comm_id));
		goto error;
	}

@@ -2196,6 +2241,8 @@ static int cm_rep_handler(struct cm_work *work)
		spin_unlock(&cm.lock);
		spin_unlock_irq(&cm_id_priv->lock);
		ret = -EINVAL;
		pr_debug("%s: Failed to insert remote id %d\n", __func__,
			 be32_to_cpu(rep_msg->remote_comm_id));
		goto error;
	}
	/* Check for a stale connection. */
@@ -2213,6 +2260,10 @@ static int cm_rep_handler(struct cm_work *work)
			     IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
			     NULL, 0);
		ret = -EINVAL;
		pr_debug("%s: Stale connection. local_comm_id %d, remote_comm_id %d\n",
			 __func__, be32_to_cpu(rep_msg->local_comm_id),
			 be32_to_cpu(rep_msg->remote_comm_id));

		if (cur_cm_id_priv) {
			cm_id = &cur_cm_id_priv->id;
			ib_send_cm_dreq(cm_id, NULL, 0);
@@ -2359,6 +2410,8 @@ int ib_send_cm_dreq(struct ib_cm_id *cm_id,
	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
	spin_lock_irqsave(&cm_id_priv->lock, flags);
	if (cm_id->state != IB_CM_ESTABLISHED) {
		pr_debug("%s: local_id %d, cm_id->state: %d\n", __func__,
			 be32_to_cpu(cm_id->local_id), cm_id->state);
		ret = -EINVAL;
		goto out;
	}
@@ -2428,6 +2481,8 @@ int ib_send_cm_drep(struct ib_cm_id *cm_id,
	if (cm_id->state != IB_CM_DREQ_RCVD) {
		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
		kfree(data);
		pr_debug("%s: local_id %d, cm_idcm_id->state(%d) != IB_CM_DREQ_RCVD\n",
			 __func__, be32_to_cpu(cm_id->local_id), cm_id->state);
		return -EINVAL;
	}

@@ -2493,6 +2548,9 @@ static int cm_dreq_handler(struct cm_work *work)
		atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
				counter[CM_DREQ_COUNTER]);
		cm_issue_drep(work->port, work->mad_recv_wc);
		pr_debug("%s: no cm_id_priv, local_comm_id %d, remote_comm_id %d\n",
			 __func__, be32_to_cpu(dreq_msg->local_comm_id),
			 be32_to_cpu(dreq_msg->remote_comm_id));
		return -EINVAL;
	}

@@ -2535,6 +2593,9 @@ static int cm_dreq_handler(struct cm_work *work)
				counter[CM_DREQ_COUNTER]);
		goto unlock;
	default:
		pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
			 __func__, be32_to_cpu(cm_id_priv->id.local_id),
			 cm_id_priv->id.state);
		goto unlock;
	}
	cm_id_priv->id.state = IB_CM_DREQ_RCVD;
@@ -2638,6 +2699,8 @@ int ib_send_cm_rej(struct ib_cm_id *cm_id,
		cm_enter_timewait(cm_id_priv);
		break;
	default:
		pr_debug("%s: local_id %d, cm_id->state: %d\n", __func__,
			 be32_to_cpu(cm_id_priv->id.local_id), cm_id->state);
		ret = -EINVAL;
		goto out;
	}
@@ -2748,6 +2811,9 @@ static int cm_rej_handler(struct cm_work *work)
		/* fall through */
	default:
		spin_unlock_irq(&cm_id_priv->lock);
		pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
			 __func__, be32_to_cpu(cm_id_priv->id.local_id),
			 cm_id_priv->id.state);
		ret = -EINVAL;
		goto out;
	}
@@ -2811,6 +2877,9 @@ int ib_send_cm_mra(struct ib_cm_id *cm_id,
		}
		/* fall through */
	default:
		pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
			 __func__, be32_to_cpu(cm_id_priv->id.local_id),
			 cm_id_priv->id.state);
		ret = -EINVAL;
		goto error1;
	}
@@ -2912,6 +2981,9 @@ static int cm_mra_handler(struct cm_work *work)
				counter[CM_MRA_COUNTER]);
		/* fall through */
	default:
		pr_debug("%s local_id %d, cm_id_priv->id.state: %d\n",
			 __func__, be32_to_cpu(cm_id_priv->id.local_id),
			 cm_id_priv->id.state);
		goto out;
	}

@@ -3085,6 +3157,12 @@ static int cm_lap_handler(struct cm_work *work)
	if (!cm_id_priv)
		return -EINVAL;

	ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
				      work->mad_recv_wc->recv_buf.grh,
				      &cm_id_priv->av);
	if (ret)
		goto deref;

	param = &work->cm_event.param.lap_rcvd;
	memset(&work->path[0], 0, sizeof(work->path[1]));
	cm_path_set_rec_type(work->port->cm_dev->ib_device,
@@ -3131,9 +3209,6 @@ static int cm_lap_handler(struct cm_work *work)

	cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
	cm_id_priv->tid = lap_msg->hdr.tid;
	cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
				work->mad_recv_wc->recv_buf.grh,
				&cm_id_priv->av);
	cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av,
			   cm_id_priv);
	ret = atomic_inc_and_test(&cm_id_priv->work_count);
@@ -3386,6 +3461,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
	struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
	struct cm_sidr_req_msg *sidr_req_msg;
	struct ib_wc *wc;
	int ret;

	cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL);
	if (IS_ERR(cm_id))
@@ -3398,9 +3474,12 @@ static int cm_sidr_req_handler(struct cm_work *work)
	wc = work->mad_recv_wc->wc;
	cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid);
	cm_id_priv->av.dgid.global.interface_id = 0;
	cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
	ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
				      work->mad_recv_wc->recv_buf.grh,
				      &cm_id_priv->av);
	if (ret)
		goto out;

	cm_id_priv->id.remote_id = sidr_req_msg->request_id;
	cm_id_priv->tid = sidr_req_msg->hdr.tid;
	atomic_inc(&cm_id_priv->work_count);
@@ -3692,6 +3771,7 @@ static void cm_work_handler(struct work_struct *_work)
		ret = cm_timewait_handler(work);
		break;
	default:
		pr_debug("cm_event.event: 0x%x\n", work->cm_event.event);
		ret = -EINVAL;
		break;
	}
@@ -3727,6 +3807,8 @@ static int cm_establish(struct ib_cm_id *cm_id)
		ret = -EISCONN;
		break;
	default:
		pr_debug("%s: local_id %d, cm_id->state: %d\n", __func__,
			 be32_to_cpu(cm_id->local_id), cm_id->state);
		ret = -EINVAL;
		break;
	}
@@ -3924,6 +4006,9 @@ static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
		ret = 0;
		break;
	default:
		pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
			 __func__, be32_to_cpu(cm_id_priv->id.local_id),
			 cm_id_priv->id.state);
		ret = -EINVAL;
		break;
	}
@@ -3971,6 +4056,9 @@ static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
		ret = 0;
		break;
	default:
		pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
			 __func__, be32_to_cpu(cm_id_priv->id.local_id),
			 cm_id_priv->id.state);
		ret = -EINVAL;
		break;
	}
@@ -4030,6 +4118,9 @@ static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
		ret = 0;
		break;
	default:
		pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
			 __func__, be32_to_cpu(cm_id_priv->id.local_id),
			 cm_id_priv->id.state);
		ret = -EINVAL;
		break;
	}
Loading