Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4c171acc authored by Linus Torvalds's avatar Linus Torvalds
Browse files
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
  RDMA/cma: Save PID of ID's owner
  RDMA/cma: Add support for netlink statistics export
  RDMA/cma: Pass QP type into rdma_create_id()
  RDMA: Update exported headers list
  RDMA/cma: Export enum cma_state in <rdma/rdma_cm.h>
  RDMA/nes: Add a check for strict_strtoul()
  RDMA/cxgb3: Don't post zero-byte read if endpoint is going away
  RDMA/cxgb4: Use completion objects for event blocking
  IB/srp: Fix integer -> pointer cast warnings
  IB: Add devnode methods to cm_class and umad_class
  IB/mad: Return EPROTONOSUPPORT when an RDMA device lacks the QP required
  IB/uverbs: Add devnode method to set path/mode
  RDMA/ucma: Add .nodename/.mode to tell userspace where to create device node
  RDMA: Add netlink infrastructure
  RDMA: Add error handling to ib_core_init()
parents 20e0ec11 8dc4abdf
Loading
Loading
Loading
Loading
+1 −0
Original line number Original line Diff line number Diff line
@@ -2,6 +2,7 @@ menuconfig INFINIBAND
	tristate "InfiniBand support"
	tristate "InfiniBand support"
	depends on PCI || BROKEN
	depends on PCI || BROKEN
	depends on HAS_IOMEM
	depends on HAS_IOMEM
	depends on NET
	---help---
	---help---
	  Core support for InfiniBand (IB).  Make sure to also select
	  Core support for InfiniBand (IB).  Make sure to also select
	  any protocols you wish to use as well as drivers for your
	  any protocols you wish to use as well as drivers for your
+1 −1
Original line number Original line Diff line number Diff line
@@ -8,7 +8,7 @@ obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o \
					$(user_access-y)
					$(user_access-y)


ib_core-y :=			packer.o ud_header.o verbs.o sysfs.o \
ib_core-y :=			packer.o ud_header.o verbs.o sysfs.o \
				device.o fmr_pool.o cache.o
				device.o fmr_pool.o cache.o netlink.o
ib_core-$(CONFIG_INFINIBAND_USER_MEM) += umem.o
ib_core-$(CONFIG_INFINIBAND_USER_MEM) += umem.o


ib_mad-y :=			mad.o smi.o agent.o mad_rmpp.o
ib_mad-y :=			mad.o smi.o agent.o mad_rmpp.o
+8 −0
Original line number Original line Diff line number Diff line
@@ -3639,8 +3639,16 @@ static struct kobj_type cm_port_obj_type = {
	.release = cm_release_port_obj
	.release = cm_release_port_obj
};
};


static char *cm_devnode(struct device *dev, mode_t *mode)
{
	*mode = 0666;
	return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
}

struct class cm_class = {
struct class cm_class = {
	.owner   = THIS_MODULE,
	.name    = "infiniband_cm",
	.name    = "infiniband_cm",
	.devnode = cm_devnode,
};
};
EXPORT_SYMBOL(cm_class);
EXPORT_SYMBOL(cm_class);


+200 −108
Original line number Original line Diff line number Diff line
@@ -47,6 +47,7 @@


#include <rdma/rdma_cm.h>
#include <rdma/rdma_cm.h>
#include <rdma/rdma_cm_ib.h>
#include <rdma/rdma_cm_ib.h>
#include <rdma/rdma_netlink.h>
#include <rdma/ib_cache.h>
#include <rdma/ib_cache.h>
#include <rdma/ib_cm.h>
#include <rdma/ib_cm.h>
#include <rdma/ib_sa.h>
#include <rdma/ib_sa.h>
@@ -89,20 +90,6 @@ struct cma_device {
	struct list_head	id_list;
	struct list_head	id_list;
};
};


enum cma_state {
	CMA_IDLE,
	CMA_ADDR_QUERY,
	CMA_ADDR_RESOLVED,
	CMA_ROUTE_QUERY,
	CMA_ROUTE_RESOLVED,
	CMA_CONNECT,
	CMA_DISCONNECT,
	CMA_ADDR_BOUND,
	CMA_LISTEN,
	CMA_DEVICE_REMOVAL,
	CMA_DESTROYING
};

struct rdma_bind_list {
struct rdma_bind_list {
	struct idr		*ps;
	struct idr		*ps;
	struct hlist_head	owners;
	struct hlist_head	owners;
@@ -126,7 +113,7 @@ struct rdma_id_private {
	struct list_head	mc_list;
	struct list_head	mc_list;


	int			internal_id;
	int			internal_id;
	enum cma_state		state;
	enum rdma_cm_state	state;
	spinlock_t		lock;
	spinlock_t		lock;
	struct mutex		qp_mutex;
	struct mutex		qp_mutex;


@@ -146,6 +133,7 @@ struct rdma_id_private {
	u32			seq_num;
	u32			seq_num;
	u32			qkey;
	u32			qkey;
	u32			qp_num;
	u32			qp_num;
	pid_t			owner;
	u8			srq;
	u8			srq;
	u8			tos;
	u8			tos;
	u8			reuseaddr;
	u8			reuseaddr;
@@ -165,8 +153,8 @@ struct cma_multicast {
struct cma_work {
struct cma_work {
	struct work_struct	work;
	struct work_struct	work;
	struct rdma_id_private	*id;
	struct rdma_id_private	*id;
	enum cma_state		old_state;
	enum rdma_cm_state	old_state;
	enum cma_state		new_state;
	enum rdma_cm_state	new_state;
	struct rdma_cm_event	event;
	struct rdma_cm_event	event;
};
};


@@ -217,7 +205,7 @@ struct sdp_hah {
#define CMA_VERSION 0x00
#define CMA_VERSION 0x00
#define SDP_MAJ_VERSION 0x2
#define SDP_MAJ_VERSION 0x2


static int cma_comp(struct rdma_id_private *id_priv, enum cma_state comp)
static int cma_comp(struct rdma_id_private *id_priv, enum rdma_cm_state comp)
{
{
	unsigned long flags;
	unsigned long flags;
	int ret;
	int ret;
@@ -229,7 +217,7 @@ static int cma_comp(struct rdma_id_private *id_priv, enum cma_state comp)
}
}


static int cma_comp_exch(struct rdma_id_private *id_priv,
static int cma_comp_exch(struct rdma_id_private *id_priv,
			 enum cma_state comp, enum cma_state exch)
			 enum rdma_cm_state comp, enum rdma_cm_state exch)
{
{
	unsigned long flags;
	unsigned long flags;
	int ret;
	int ret;
@@ -241,11 +229,11 @@ static int cma_comp_exch(struct rdma_id_private *id_priv,
	return ret;
	return ret;
}
}


static enum cma_state cma_exch(struct rdma_id_private *id_priv,
static enum rdma_cm_state cma_exch(struct rdma_id_private *id_priv,
			       enum cma_state exch)
				   enum rdma_cm_state exch)
{
{
	unsigned long flags;
	unsigned long flags;
	enum cma_state old;
	enum rdma_cm_state old;


	spin_lock_irqsave(&id_priv->lock, flags);
	spin_lock_irqsave(&id_priv->lock, flags);
	old = id_priv->state;
	old = id_priv->state;
@@ -279,11 +267,6 @@ static inline void sdp_set_ip_ver(struct sdp_hh *hh, u8 ip_ver)
	hh->ip_version = (ip_ver << 4) | (hh->ip_version & 0xF);
	hh->ip_version = (ip_ver << 4) | (hh->ip_version & 0xF);
}
}


static inline int cma_is_ud_ps(enum rdma_port_space ps)
{
	return (ps == RDMA_PS_UDP || ps == RDMA_PS_IPOIB);
}

static void cma_attach_to_dev(struct rdma_id_private *id_priv,
static void cma_attach_to_dev(struct rdma_id_private *id_priv,
			      struct cma_device *cma_dev)
			      struct cma_device *cma_dev)
{
{
@@ -413,7 +396,7 @@ static void cma_deref_id(struct rdma_id_private *id_priv)
}
}


static int cma_disable_callback(struct rdma_id_private *id_priv,
static int cma_disable_callback(struct rdma_id_private *id_priv,
			      enum cma_state state)
				enum rdma_cm_state state)
{
{
	mutex_lock(&id_priv->handler_mutex);
	mutex_lock(&id_priv->handler_mutex);
	if (id_priv->state != state) {
	if (id_priv->state != state) {
@@ -429,7 +412,8 @@ static int cma_has_cm_dev(struct rdma_id_private *id_priv)
}
}


struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler,
struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler,
				  void *context, enum rdma_port_space ps)
				  void *context, enum rdma_port_space ps,
				  enum ib_qp_type qp_type)
{
{
	struct rdma_id_private *id_priv;
	struct rdma_id_private *id_priv;


@@ -437,10 +421,12 @@ struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler,
	if (!id_priv)
	if (!id_priv)
		return ERR_PTR(-ENOMEM);
		return ERR_PTR(-ENOMEM);


	id_priv->state = CMA_IDLE;
	id_priv->owner = task_pid_nr(current);
	id_priv->state = RDMA_CM_IDLE;
	id_priv->id.context = context;
	id_priv->id.context = context;
	id_priv->id.event_handler = event_handler;
	id_priv->id.event_handler = event_handler;
	id_priv->id.ps = ps;
	id_priv->id.ps = ps;
	id_priv->id.qp_type = qp_type;
	spin_lock_init(&id_priv->lock);
	spin_lock_init(&id_priv->lock);
	mutex_init(&id_priv->qp_mutex);
	mutex_init(&id_priv->qp_mutex);
	init_completion(&id_priv->comp);
	init_completion(&id_priv->comp);
@@ -508,7 +494,7 @@ int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
	if (IS_ERR(qp))
	if (IS_ERR(qp))
		return PTR_ERR(qp);
		return PTR_ERR(qp);


	if (cma_is_ud_ps(id_priv->id.ps))
	if (id->qp_type == IB_QPT_UD)
		ret = cma_init_ud_qp(id_priv, qp);
		ret = cma_init_ud_qp(id_priv, qp);
	else
	else
		ret = cma_init_conn_qp(id_priv, qp);
		ret = cma_init_conn_qp(id_priv, qp);
@@ -636,7 +622,7 @@ static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv,
	qp_attr->port_num = id_priv->id.port_num;
	qp_attr->port_num = id_priv->id.port_num;
	*qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT;
	*qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT;


	if (cma_is_ud_ps(id_priv->id.ps)) {
	if (id_priv->id.qp_type == IB_QPT_UD) {
		ret = cma_set_qkey(id_priv);
		ret = cma_set_qkey(id_priv);
		if (ret)
		if (ret)
			return ret;
			return ret;
@@ -659,7 +645,7 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
	id_priv = container_of(id, struct rdma_id_private, id);
	id_priv = container_of(id, struct rdma_id_private, id);
	switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
	switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
	case RDMA_TRANSPORT_IB:
	case RDMA_TRANSPORT_IB:
		if (!id_priv->cm_id.ib || cma_is_ud_ps(id_priv->id.ps))
		if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD))
			ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask);
			ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask);
		else
		else
			ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr,
			ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr,
@@ -858,16 +844,16 @@ static void cma_cancel_listens(struct rdma_id_private *id_priv)
}
}


static void cma_cancel_operation(struct rdma_id_private *id_priv,
static void cma_cancel_operation(struct rdma_id_private *id_priv,
				 enum cma_state state)
				 enum rdma_cm_state state)
{
{
	switch (state) {
	switch (state) {
	case CMA_ADDR_QUERY:
	case RDMA_CM_ADDR_QUERY:
		rdma_addr_cancel(&id_priv->id.route.addr.dev_addr);
		rdma_addr_cancel(&id_priv->id.route.addr.dev_addr);
		break;
		break;
	case CMA_ROUTE_QUERY:
	case RDMA_CM_ROUTE_QUERY:
		cma_cancel_route(id_priv);
		cma_cancel_route(id_priv);
		break;
		break;
	case CMA_LISTEN:
	case RDMA_CM_LISTEN:
		if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr)
		if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr)
				&& !id_priv->cma_dev)
				&& !id_priv->cma_dev)
			cma_cancel_listens(id_priv);
			cma_cancel_listens(id_priv);
@@ -918,10 +904,10 @@ static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
void rdma_destroy_id(struct rdma_cm_id *id)
void rdma_destroy_id(struct rdma_cm_id *id)
{
{
	struct rdma_id_private *id_priv;
	struct rdma_id_private *id_priv;
	enum cma_state state;
	enum rdma_cm_state state;


	id_priv = container_of(id, struct rdma_id_private, id);
	id_priv = container_of(id, struct rdma_id_private, id);
	state = cma_exch(id_priv, CMA_DESTROYING);
	state = cma_exch(id_priv, RDMA_CM_DESTROYING);
	cma_cancel_operation(id_priv, state);
	cma_cancel_operation(id_priv, state);


	/*
	/*
@@ -1015,9 +1001,9 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
	int ret = 0;
	int ret = 0;


	if ((ib_event->event != IB_CM_TIMEWAIT_EXIT &&
	if ((ib_event->event != IB_CM_TIMEWAIT_EXIT &&
		cma_disable_callback(id_priv, CMA_CONNECT)) ||
		cma_disable_callback(id_priv, RDMA_CM_CONNECT)) ||
	    (ib_event->event == IB_CM_TIMEWAIT_EXIT &&
	    (ib_event->event == IB_CM_TIMEWAIT_EXIT &&
		cma_disable_callback(id_priv, CMA_DISCONNECT)))
		cma_disable_callback(id_priv, RDMA_CM_DISCONNECT)))
		return 0;
		return 0;


	memset(&event, 0, sizeof event);
	memset(&event, 0, sizeof event);
@@ -1048,7 +1034,8 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
		event.status = -ETIMEDOUT; /* fall through */
		event.status = -ETIMEDOUT; /* fall through */
	case IB_CM_DREQ_RECEIVED:
	case IB_CM_DREQ_RECEIVED:
	case IB_CM_DREP_RECEIVED:
	case IB_CM_DREP_RECEIVED:
		if (!cma_comp_exch(id_priv, CMA_CONNECT, CMA_DISCONNECT))
		if (!cma_comp_exch(id_priv, RDMA_CM_CONNECT,
				   RDMA_CM_DISCONNECT))
			goto out;
			goto out;
		event.event = RDMA_CM_EVENT_DISCONNECTED;
		event.event = RDMA_CM_EVENT_DISCONNECTED;
		break;
		break;
@@ -1075,7 +1062,7 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
	if (ret) {
	if (ret) {
		/* Destroy the CM ID by returning a non-zero value. */
		/* Destroy the CM ID by returning a non-zero value. */
		id_priv->cm_id.ib = NULL;
		id_priv->cm_id.ib = NULL;
		cma_exch(id_priv, CMA_DESTROYING);
		cma_exch(id_priv, RDMA_CM_DESTROYING);
		mutex_unlock(&id_priv->handler_mutex);
		mutex_unlock(&id_priv->handler_mutex);
		rdma_destroy_id(&id_priv->id);
		rdma_destroy_id(&id_priv->id);
		return ret;
		return ret;
@@ -1101,7 +1088,7 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
		goto err;
		goto err;


	id = rdma_create_id(listen_id->event_handler, listen_id->context,
	id = rdma_create_id(listen_id->event_handler, listen_id->context,
			    listen_id->ps);
			    listen_id->ps, ib_event->param.req_rcvd.qp_type);
	if (IS_ERR(id))
	if (IS_ERR(id))
		goto err;
		goto err;


@@ -1132,7 +1119,7 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
	rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid);
	rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid);


	id_priv = container_of(id, struct rdma_id_private, id);
	id_priv = container_of(id, struct rdma_id_private, id);
	id_priv->state = CMA_CONNECT;
	id_priv->state = RDMA_CM_CONNECT;
	return id_priv;
	return id_priv;


destroy_id:
destroy_id:
@@ -1152,7 +1139,7 @@ static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id,
	int ret;
	int ret;


	id = rdma_create_id(listen_id->event_handler, listen_id->context,
	id = rdma_create_id(listen_id->event_handler, listen_id->context,
			    listen_id->ps);
			    listen_id->ps, IB_QPT_UD);
	if (IS_ERR(id))
	if (IS_ERR(id))
		return NULL;
		return NULL;


@@ -1172,7 +1159,7 @@ static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id,
	}
	}


	id_priv = container_of(id, struct rdma_id_private, id);
	id_priv = container_of(id, struct rdma_id_private, id);
	id_priv->state = CMA_CONNECT;
	id_priv->state = RDMA_CM_CONNECT;
	return id_priv;
	return id_priv;
err:
err:
	rdma_destroy_id(id);
	rdma_destroy_id(id);
@@ -1201,13 +1188,13 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
	int offset, ret;
	int offset, ret;


	listen_id = cm_id->context;
	listen_id = cm_id->context;
	if (cma_disable_callback(listen_id, CMA_LISTEN))
	if (cma_disable_callback(listen_id, RDMA_CM_LISTEN))
		return -ECONNABORTED;
		return -ECONNABORTED;


	memset(&event, 0, sizeof event);
	memset(&event, 0, sizeof event);
	offset = cma_user_data_offset(listen_id->id.ps);
	offset = cma_user_data_offset(listen_id->id.ps);
	event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
	event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
	if (cma_is_ud_ps(listen_id->id.ps)) {
	if (listen_id->id.qp_type == IB_QPT_UD) {
		conn_id = cma_new_udp_id(&listen_id->id, ib_event);
		conn_id = cma_new_udp_id(&listen_id->id, ib_event);
		event.param.ud.private_data = ib_event->private_data + offset;
		event.param.ud.private_data = ib_event->private_data + offset;
		event.param.ud.private_data_len =
		event.param.ud.private_data_len =
@@ -1243,8 +1230,7 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
		 * while we're accessing the cm_id.
		 * while we're accessing the cm_id.
		 */
		 */
		mutex_lock(&lock);
		mutex_lock(&lock);
		if (cma_comp(conn_id, CMA_CONNECT) &&
		if (cma_comp(conn_id, RDMA_CM_CONNECT) && (conn_id->id.qp_type != IB_QPT_UD))
		    !cma_is_ud_ps(conn_id->id.ps))
			ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
			ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
		mutex_unlock(&lock);
		mutex_unlock(&lock);
		mutex_unlock(&conn_id->handler_mutex);
		mutex_unlock(&conn_id->handler_mutex);
@@ -1257,7 +1243,7 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
	conn_id->cm_id.ib = NULL;
	conn_id->cm_id.ib = NULL;


release_conn_id:
release_conn_id:
	cma_exch(conn_id, CMA_DESTROYING);
	cma_exch(conn_id, RDMA_CM_DESTROYING);
	mutex_unlock(&conn_id->handler_mutex);
	mutex_unlock(&conn_id->handler_mutex);
	rdma_destroy_id(&conn_id->id);
	rdma_destroy_id(&conn_id->id);


@@ -1328,7 +1314,7 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
	struct sockaddr_in *sin;
	struct sockaddr_in *sin;
	int ret = 0;
	int ret = 0;


	if (cma_disable_callback(id_priv, CMA_CONNECT))
	if (cma_disable_callback(id_priv, RDMA_CM_CONNECT))
		return 0;
		return 0;


	memset(&event, 0, sizeof event);
	memset(&event, 0, sizeof event);
@@ -1371,7 +1357,7 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
	if (ret) {
	if (ret) {
		/* Destroy the CM ID by returning a non-zero value. */
		/* Destroy the CM ID by returning a non-zero value. */
		id_priv->cm_id.iw = NULL;
		id_priv->cm_id.iw = NULL;
		cma_exch(id_priv, CMA_DESTROYING);
		cma_exch(id_priv, RDMA_CM_DESTROYING);
		mutex_unlock(&id_priv->handler_mutex);
		mutex_unlock(&id_priv->handler_mutex);
		rdma_destroy_id(&id_priv->id);
		rdma_destroy_id(&id_priv->id);
		return ret;
		return ret;
@@ -1393,20 +1379,20 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
	struct ib_device_attr attr;
	struct ib_device_attr attr;


	listen_id = cm_id->context;
	listen_id = cm_id->context;
	if (cma_disable_callback(listen_id, CMA_LISTEN))
	if (cma_disable_callback(listen_id, RDMA_CM_LISTEN))
		return -ECONNABORTED;
		return -ECONNABORTED;


	/* Create a new RDMA id for the new IW CM ID */
	/* Create a new RDMA id for the new IW CM ID */
	new_cm_id = rdma_create_id(listen_id->id.event_handler,
	new_cm_id = rdma_create_id(listen_id->id.event_handler,
				   listen_id->id.context,
				   listen_id->id.context,
				   RDMA_PS_TCP);
				   RDMA_PS_TCP, IB_QPT_RC);
	if (IS_ERR(new_cm_id)) {
	if (IS_ERR(new_cm_id)) {
		ret = -ENOMEM;
		ret = -ENOMEM;
		goto out;
		goto out;
	}
	}
	conn_id = container_of(new_cm_id, struct rdma_id_private, id);
	conn_id = container_of(new_cm_id, struct rdma_id_private, id);
	mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
	mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
	conn_id->state = CMA_CONNECT;
	conn_id->state = RDMA_CM_CONNECT;


	dev = ip_dev_find(&init_net, iw_event->local_addr.sin_addr.s_addr);
	dev = ip_dev_find(&init_net, iw_event->local_addr.sin_addr.s_addr);
	if (!dev) {
	if (!dev) {
@@ -1461,7 +1447,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
	if (ret) {
	if (ret) {
		/* User wants to destroy the CM ID */
		/* User wants to destroy the CM ID */
		conn_id->cm_id.iw = NULL;
		conn_id->cm_id.iw = NULL;
		cma_exch(conn_id, CMA_DESTROYING);
		cma_exch(conn_id, RDMA_CM_DESTROYING);
		mutex_unlock(&conn_id->handler_mutex);
		mutex_unlock(&conn_id->handler_mutex);
		cma_deref_id(conn_id);
		cma_deref_id(conn_id);
		rdma_destroy_id(&conn_id->id);
		rdma_destroy_id(&conn_id->id);
@@ -1548,13 +1534,14 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv,
	struct rdma_cm_id *id;
	struct rdma_cm_id *id;
	int ret;
	int ret;


	id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps);
	id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps,
			    id_priv->id.qp_type);
	if (IS_ERR(id))
	if (IS_ERR(id))
		return;
		return;


	dev_id_priv = container_of(id, struct rdma_id_private, id);
	dev_id_priv = container_of(id, struct rdma_id_private, id);


	dev_id_priv->state = CMA_ADDR_BOUND;
	dev_id_priv->state = RDMA_CM_ADDR_BOUND;
	memcpy(&id->route.addr.src_addr, &id_priv->id.route.addr.src_addr,
	memcpy(&id->route.addr.src_addr, &id_priv->id.route.addr.src_addr,
	       ip_addr_size((struct sockaddr *) &id_priv->id.route.addr.src_addr));
	       ip_addr_size((struct sockaddr *) &id_priv->id.route.addr.src_addr));


@@ -1601,8 +1588,8 @@ static void cma_query_handler(int status, struct ib_sa_path_rec *path_rec,
		route->num_paths = 1;
		route->num_paths = 1;
		*route->path_rec = *path_rec;
		*route->path_rec = *path_rec;
	} else {
	} else {
		work->old_state = CMA_ROUTE_QUERY;
		work->old_state = RDMA_CM_ROUTE_QUERY;
		work->new_state = CMA_ADDR_RESOLVED;
		work->new_state = RDMA_CM_ADDR_RESOLVED;
		work->event.event = RDMA_CM_EVENT_ROUTE_ERROR;
		work->event.event = RDMA_CM_EVENT_ROUTE_ERROR;
		work->event.status = status;
		work->event.status = status;
	}
	}
@@ -1660,7 +1647,7 @@ static void cma_work_handler(struct work_struct *_work)
		goto out;
		goto out;


	if (id_priv->id.event_handler(&id_priv->id, &work->event)) {
	if (id_priv->id.event_handler(&id_priv->id, &work->event)) {
		cma_exch(id_priv, CMA_DESTROYING);
		cma_exch(id_priv, RDMA_CM_DESTROYING);
		destroy = 1;
		destroy = 1;
	}
	}
out:
out:
@@ -1678,12 +1665,12 @@ static void cma_ndev_work_handler(struct work_struct *_work)
	int destroy = 0;
	int destroy = 0;


	mutex_lock(&id_priv->handler_mutex);
	mutex_lock(&id_priv->handler_mutex);
	if (id_priv->state == CMA_DESTROYING ||
	if (id_priv->state == RDMA_CM_DESTROYING ||
	    id_priv->state == CMA_DEVICE_REMOVAL)
	    id_priv->state == RDMA_CM_DEVICE_REMOVAL)
		goto out;
		goto out;


	if (id_priv->id.event_handler(&id_priv->id, &work->event)) {
	if (id_priv->id.event_handler(&id_priv->id, &work->event)) {
		cma_exch(id_priv, CMA_DESTROYING);
		cma_exch(id_priv, RDMA_CM_DESTROYING);
		destroy = 1;
		destroy = 1;
	}
	}


@@ -1707,8 +1694,8 @@ static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms)


	work->id = id_priv;
	work->id = id_priv;
	INIT_WORK(&work->work, cma_work_handler);
	INIT_WORK(&work->work, cma_work_handler);
	work->old_state = CMA_ROUTE_QUERY;
	work->old_state = RDMA_CM_ROUTE_QUERY;
	work->new_state = CMA_ROUTE_RESOLVED;
	work->new_state = RDMA_CM_ROUTE_RESOLVED;
	work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
	work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;


	route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL);
	route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL);
@@ -1737,7 +1724,8 @@ int rdma_set_ib_paths(struct rdma_cm_id *id,
	int ret;
	int ret;


	id_priv = container_of(id, struct rdma_id_private, id);
	id_priv = container_of(id, struct rdma_id_private, id);
	if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_RESOLVED))
	if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED,
			   RDMA_CM_ROUTE_RESOLVED))
		return -EINVAL;
		return -EINVAL;


	id->route.path_rec = kmemdup(path_rec, sizeof *path_rec * num_paths,
	id->route.path_rec = kmemdup(path_rec, sizeof *path_rec * num_paths,
@@ -1750,7 +1738,7 @@ int rdma_set_ib_paths(struct rdma_cm_id *id,
	id->route.num_paths = num_paths;
	id->route.num_paths = num_paths;
	return 0;
	return 0;
err:
err:
	cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_ADDR_RESOLVED);
	cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_ADDR_RESOLVED);
	return ret;
	return ret;
}
}
EXPORT_SYMBOL(rdma_set_ib_paths);
EXPORT_SYMBOL(rdma_set_ib_paths);
@@ -1765,8 +1753,8 @@ static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms)


	work->id = id_priv;
	work->id = id_priv;
	INIT_WORK(&work->work, cma_work_handler);
	INIT_WORK(&work->work, cma_work_handler);
	work->old_state = CMA_ROUTE_QUERY;
	work->old_state = RDMA_CM_ROUTE_QUERY;
	work->new_state = CMA_ROUTE_RESOLVED;
	work->new_state = RDMA_CM_ROUTE_RESOLVED;
	work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
	work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
	queue_work(cma_wq, &work->work);
	queue_work(cma_wq, &work->work);
	return 0;
	return 0;
@@ -1830,8 +1818,8 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
		goto err2;
		goto err2;
	}
	}


	work->old_state = CMA_ROUTE_QUERY;
	work->old_state = RDMA_CM_ROUTE_QUERY;
	work->new_state = CMA_ROUTE_RESOLVED;
	work->new_state = RDMA_CM_ROUTE_RESOLVED;
	work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
	work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
	work->event.status = 0;
	work->event.status = 0;


@@ -1853,7 +1841,7 @@ int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
	int ret;
	int ret;


	id_priv = container_of(id, struct rdma_id_private, id);
	id_priv = container_of(id, struct rdma_id_private, id);
	if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_QUERY))
	if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, RDMA_CM_ROUTE_QUERY))
		return -EINVAL;
		return -EINVAL;


	atomic_inc(&id_priv->refcount);
	atomic_inc(&id_priv->refcount);
@@ -1882,7 +1870,7 @@ int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)


	return 0;
	return 0;
err:
err:
	cma_comp_exch(id_priv, CMA_ROUTE_QUERY, CMA_ADDR_RESOLVED);
	cma_comp_exch(id_priv, RDMA_CM_ROUTE_QUERY, RDMA_CM_ADDR_RESOLVED);
	cma_deref_id(id_priv);
	cma_deref_id(id_priv);
	return ret;
	return ret;
}
}
@@ -1941,14 +1929,16 @@ static void addr_handler(int status, struct sockaddr *src_addr,


	memset(&event, 0, sizeof event);
	memset(&event, 0, sizeof event);
	mutex_lock(&id_priv->handler_mutex);
	mutex_lock(&id_priv->handler_mutex);
	if (!cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_RESOLVED))
	if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY,
			   RDMA_CM_ADDR_RESOLVED))
		goto out;
		goto out;


	if (!status && !id_priv->cma_dev)
	if (!status && !id_priv->cma_dev)
		status = cma_acquire_dev(id_priv);
		status = cma_acquire_dev(id_priv);


	if (status) {
	if (status) {
		if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ADDR_BOUND))
		if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED,
				   RDMA_CM_ADDR_BOUND))
			goto out;
			goto out;
		event.event = RDMA_CM_EVENT_ADDR_ERROR;
		event.event = RDMA_CM_EVENT_ADDR_ERROR;
		event.status = status;
		event.status = status;
@@ -1959,7 +1949,7 @@ static void addr_handler(int status, struct sockaddr *src_addr,
	}
	}


	if (id_priv->id.event_handler(&id_priv->id, &event)) {
	if (id_priv->id.event_handler(&id_priv->id, &event)) {
		cma_exch(id_priv, CMA_DESTROYING);
		cma_exch(id_priv, RDMA_CM_DESTROYING);
		mutex_unlock(&id_priv->handler_mutex);
		mutex_unlock(&id_priv->handler_mutex);
		cma_deref_id(id_priv);
		cma_deref_id(id_priv);
		rdma_destroy_id(&id_priv->id);
		rdma_destroy_id(&id_priv->id);
@@ -2004,8 +1994,8 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv)


	work->id = id_priv;
	work->id = id_priv;
	INIT_WORK(&work->work, cma_work_handler);
	INIT_WORK(&work->work, cma_work_handler);
	work->old_state = CMA_ADDR_QUERY;
	work->old_state = RDMA_CM_ADDR_QUERY;
	work->new_state = CMA_ADDR_RESOLVED;
	work->new_state = RDMA_CM_ADDR_RESOLVED;
	work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
	work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
	queue_work(cma_wq, &work->work);
	queue_work(cma_wq, &work->work);
	return 0;
	return 0;
@@ -2034,13 +2024,13 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
	int ret;
	int ret;


	id_priv = container_of(id, struct rdma_id_private, id);
	id_priv = container_of(id, struct rdma_id_private, id);
	if (id_priv->state == CMA_IDLE) {
	if (id_priv->state == RDMA_CM_IDLE) {
		ret = cma_bind_addr(id, src_addr, dst_addr);
		ret = cma_bind_addr(id, src_addr, dst_addr);
		if (ret)
		if (ret)
			return ret;
			return ret;
	}
	}


	if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_ADDR_QUERY))
	if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY))
		return -EINVAL;
		return -EINVAL;


	atomic_inc(&id_priv->refcount);
	atomic_inc(&id_priv->refcount);
@@ -2056,7 +2046,7 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,


	return 0;
	return 0;
err:
err:
	cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_BOUND);
	cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND);
	cma_deref_id(id_priv);
	cma_deref_id(id_priv);
	return ret;
	return ret;
}
}
@@ -2070,7 +2060,7 @@ int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse)


	id_priv = container_of(id, struct rdma_id_private, id);
	id_priv = container_of(id, struct rdma_id_private, id);
	spin_lock_irqsave(&id_priv->lock, flags);
	spin_lock_irqsave(&id_priv->lock, flags);
	if (id_priv->state == CMA_IDLE) {
	if (id_priv->state == RDMA_CM_IDLE) {
		id_priv->reuseaddr = reuse;
		id_priv->reuseaddr = reuse;
		ret = 0;
		ret = 0;
	} else {
	} else {
@@ -2177,7 +2167,7 @@ static int cma_check_port(struct rdma_bind_list *bind_list,
		if (id_priv == cur_id)
		if (id_priv == cur_id)
			continue;
			continue;


		if ((cur_id->state == CMA_LISTEN) ||
		if ((cur_id->state == RDMA_CM_LISTEN) ||
		    !reuseaddr || !cur_id->reuseaddr) {
		    !reuseaddr || !cur_id->reuseaddr) {
			cur_addr = (struct sockaddr *) &cur_id->id.route.addr.src_addr;
			cur_addr = (struct sockaddr *) &cur_id->id.route.addr.src_addr;
			if (cma_any_addr(cur_addr))
			if (cma_any_addr(cur_addr))
@@ -2280,14 +2270,14 @@ int rdma_listen(struct rdma_cm_id *id, int backlog)
	int ret;
	int ret;


	id_priv = container_of(id, struct rdma_id_private, id);
	id_priv = container_of(id, struct rdma_id_private, id);
	if (id_priv->state == CMA_IDLE) {
	if (id_priv->state == RDMA_CM_IDLE) {
		((struct sockaddr *) &id->route.addr.src_addr)->sa_family = AF_INET;
		((struct sockaddr *) &id->route.addr.src_addr)->sa_family = AF_INET;
		ret = rdma_bind_addr(id, (struct sockaddr *) &id->route.addr.src_addr);
		ret = rdma_bind_addr(id, (struct sockaddr *) &id->route.addr.src_addr);
		if (ret)
		if (ret)
			return ret;
			return ret;
	}
	}


	if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_LISTEN))
	if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN))
		return -EINVAL;
		return -EINVAL;


	if (id_priv->reuseaddr) {
	if (id_priv->reuseaddr) {
@@ -2319,7 +2309,7 @@ int rdma_listen(struct rdma_cm_id *id, int backlog)
	return 0;
	return 0;
err:
err:
	id_priv->backlog = 0;
	id_priv->backlog = 0;
	cma_comp_exch(id_priv, CMA_LISTEN, CMA_ADDR_BOUND);
	cma_comp_exch(id_priv, RDMA_CM_LISTEN, RDMA_CM_ADDR_BOUND);
	return ret;
	return ret;
}
}
EXPORT_SYMBOL(rdma_listen);
EXPORT_SYMBOL(rdma_listen);
@@ -2333,7 +2323,7 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
		return -EAFNOSUPPORT;
		return -EAFNOSUPPORT;


	id_priv = container_of(id, struct rdma_id_private, id);
	id_priv = container_of(id, struct rdma_id_private, id);
	if (!cma_comp_exch(id_priv, CMA_IDLE, CMA_ADDR_BOUND))
	if (!cma_comp_exch(id_priv, RDMA_CM_IDLE, RDMA_CM_ADDR_BOUND))
		return -EINVAL;
		return -EINVAL;


	ret = cma_check_linklocal(&id->route.addr.dev_addr, addr);
	ret = cma_check_linklocal(&id->route.addr.dev_addr, addr);
@@ -2360,7 +2350,7 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
	if (id_priv->cma_dev)
	if (id_priv->cma_dev)
		cma_release_dev(id_priv);
		cma_release_dev(id_priv);
err1:
err1:
	cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_IDLE);
	cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_IDLE);
	return ret;
	return ret;
}
}
EXPORT_SYMBOL(rdma_bind_addr);
EXPORT_SYMBOL(rdma_bind_addr);
@@ -2433,7 +2423,7 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
	struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd;
	struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd;
	int ret = 0;
	int ret = 0;


	if (cma_disable_callback(id_priv, CMA_CONNECT))
	if (cma_disable_callback(id_priv, RDMA_CM_CONNECT))
		return 0;
		return 0;


	memset(&event, 0, sizeof event);
	memset(&event, 0, sizeof event);
@@ -2479,7 +2469,7 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
	if (ret) {
	if (ret) {
		/* Destroy the CM ID by returning a non-zero value. */
		/* Destroy the CM ID by returning a non-zero value. */
		id_priv->cm_id.ib = NULL;
		id_priv->cm_id.ib = NULL;
		cma_exch(id_priv, CMA_DESTROYING);
		cma_exch(id_priv, RDMA_CM_DESTROYING);
		mutex_unlock(&id_priv->handler_mutex);
		mutex_unlock(&id_priv->handler_mutex);
		rdma_destroy_id(&id_priv->id);
		rdma_destroy_id(&id_priv->id);
		return ret;
		return ret;
@@ -2645,7 +2635,7 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
	int ret;
	int ret;


	id_priv = container_of(id, struct rdma_id_private, id);
	id_priv = container_of(id, struct rdma_id_private, id);
	if (!cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_CONNECT))
	if (!cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_CONNECT))
		return -EINVAL;
		return -EINVAL;


	if (!id->qp) {
	if (!id->qp) {
@@ -2655,7 +2645,7 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)


	switch (rdma_node_get_transport(id->device->node_type)) {
	switch (rdma_node_get_transport(id->device->node_type)) {
	case RDMA_TRANSPORT_IB:
	case RDMA_TRANSPORT_IB:
		if (cma_is_ud_ps(id->ps))
		if (id->qp_type == IB_QPT_UD)
			ret = cma_resolve_ib_udp(id_priv, conn_param);
			ret = cma_resolve_ib_udp(id_priv, conn_param);
		else
		else
			ret = cma_connect_ib(id_priv, conn_param);
			ret = cma_connect_ib(id_priv, conn_param);
@@ -2672,7 +2662,7 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)


	return 0;
	return 0;
err:
err:
	cma_comp_exch(id_priv, CMA_CONNECT, CMA_ROUTE_RESOLVED);
	cma_comp_exch(id_priv, RDMA_CM_CONNECT, RDMA_CM_ROUTE_RESOLVED);
	return ret;
	return ret;
}
}
EXPORT_SYMBOL(rdma_connect);
EXPORT_SYMBOL(rdma_connect);
@@ -2758,7 +2748,10 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
	int ret;
	int ret;


	id_priv = container_of(id, struct rdma_id_private, id);
	id_priv = container_of(id, struct rdma_id_private, id);
	if (!cma_comp(id_priv, CMA_CONNECT))

	id_priv->owner = task_pid_nr(current);

	if (!cma_comp(id_priv, RDMA_CM_CONNECT))
		return -EINVAL;
		return -EINVAL;


	if (!id->qp && conn_param) {
	if (!id->qp && conn_param) {
@@ -2768,7 +2761,7 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)


	switch (rdma_node_get_transport(id->device->node_type)) {
	switch (rdma_node_get_transport(id->device->node_type)) {
	case RDMA_TRANSPORT_IB:
	case RDMA_TRANSPORT_IB:
		if (cma_is_ud_ps(id->ps))
		if (id->qp_type == IB_QPT_UD)
			ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
			ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
						conn_param->private_data,
						conn_param->private_data,
						conn_param->private_data_len);
						conn_param->private_data_len);
@@ -2829,7 +2822,7 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data,


	switch (rdma_node_get_transport(id->device->node_type)) {
	switch (rdma_node_get_transport(id->device->node_type)) {
	case RDMA_TRANSPORT_IB:
	case RDMA_TRANSPORT_IB:
		if (cma_is_ud_ps(id->ps))
		if (id->qp_type == IB_QPT_UD)
			ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT,
			ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT,
						private_data, private_data_len);
						private_data, private_data_len);
		else
		else
@@ -2887,8 +2880,8 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
	int ret;
	int ret;


	id_priv = mc->id_priv;
	id_priv = mc->id_priv;
	if (cma_disable_callback(id_priv, CMA_ADDR_BOUND) &&
	if (cma_disable_callback(id_priv, RDMA_CM_ADDR_BOUND) &&
	    cma_disable_callback(id_priv, CMA_ADDR_RESOLVED))
	    cma_disable_callback(id_priv, RDMA_CM_ADDR_RESOLVED))
		return 0;
		return 0;


	mutex_lock(&id_priv->qp_mutex);
	mutex_lock(&id_priv->qp_mutex);
@@ -2912,7 +2905,7 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)


	ret = id_priv->id.event_handler(&id_priv->id, &event);
	ret = id_priv->id.event_handler(&id_priv->id, &event);
	if (ret) {
	if (ret) {
		cma_exch(id_priv, CMA_DESTROYING);
		cma_exch(id_priv, RDMA_CM_DESTROYING);
		mutex_unlock(&id_priv->handler_mutex);
		mutex_unlock(&id_priv->handler_mutex);
		rdma_destroy_id(&id_priv->id);
		rdma_destroy_id(&id_priv->id);
		return 0;
		return 0;
@@ -3095,8 +3088,8 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
	int ret;
	int ret;


	id_priv = container_of(id, struct rdma_id_private, id);
	id_priv = container_of(id, struct rdma_id_private, id);
	if (!cma_comp(id_priv, CMA_ADDR_BOUND) &&
	if (!cma_comp(id_priv, RDMA_CM_ADDR_BOUND) &&
	    !cma_comp(id_priv, CMA_ADDR_RESOLVED))
	    !cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED))
		return -EINVAL;
		return -EINVAL;


	mc = kmalloc(sizeof *mc, GFP_KERNEL);
	mc = kmalloc(sizeof *mc, GFP_KERNEL);
@@ -3261,19 +3254,19 @@ static void cma_add_one(struct ib_device *device)
static int cma_remove_id_dev(struct rdma_id_private *id_priv)
static int cma_remove_id_dev(struct rdma_id_private *id_priv)
{
{
	struct rdma_cm_event event;
	struct rdma_cm_event event;
	enum cma_state state;
	enum rdma_cm_state state;
	int ret = 0;
	int ret = 0;


	/* Record that we want to remove the device */
	/* Record that we want to remove the device */
	state = cma_exch(id_priv, CMA_DEVICE_REMOVAL);
	state = cma_exch(id_priv, RDMA_CM_DEVICE_REMOVAL);
	if (state == CMA_DESTROYING)
	if (state == RDMA_CM_DESTROYING)
		return 0;
		return 0;


	cma_cancel_operation(id_priv, state);
	cma_cancel_operation(id_priv, state);
	mutex_lock(&id_priv->handler_mutex);
	mutex_lock(&id_priv->handler_mutex);


	/* Check for destruction from another callback. */
	/* Check for destruction from another callback. */
	if (!cma_comp(id_priv, CMA_DEVICE_REMOVAL))
	if (!cma_comp(id_priv, RDMA_CM_DEVICE_REMOVAL))
		goto out;
		goto out;


	memset(&event, 0, sizeof event);
	memset(&event, 0, sizeof event);
@@ -3328,6 +3321,100 @@ static void cma_remove_one(struct ib_device *device)
	kfree(cma_dev);
	kfree(cma_dev);
}
}


static int cma_get_id_stats(struct sk_buff *skb, struct netlink_callback *cb)
{
	struct nlmsghdr *nlh;
	struct rdma_cm_id_stats *id_stats;
	struct rdma_id_private *id_priv;
	struct rdma_cm_id *id = NULL;
	struct cma_device *cma_dev;
	int i_dev = 0, i_id = 0;

	/*
	 * We export all of the IDs as a sequence of messages.  Each
	 * ID gets its own netlink message.
	 */
	mutex_lock(&lock);

	list_for_each_entry(cma_dev, &dev_list, list) {
		if (i_dev < cb->args[0]) {
			i_dev++;
			continue;
		}

		i_id = 0;
		list_for_each_entry(id_priv, &cma_dev->id_list, list) {
			if (i_id < cb->args[1]) {
				i_id++;
				continue;
			}

			id_stats = ibnl_put_msg(skb, &nlh, cb->nlh->nlmsg_seq,
						sizeof *id_stats, RDMA_NL_RDMA_CM,
						RDMA_NL_RDMA_CM_ID_STATS);
			if (!id_stats)
				goto out;

			memset(id_stats, 0, sizeof *id_stats);
			id = &id_priv->id;
			id_stats->node_type = id->route.addr.dev_addr.dev_type;
			id_stats->port_num = id->port_num;
			id_stats->bound_dev_if =
				id->route.addr.dev_addr.bound_dev_if;

			if (id->route.addr.src_addr.ss_family == AF_INET) {
				if (ibnl_put_attr(skb, nlh,
						  sizeof(struct sockaddr_in),
						  &id->route.addr.src_addr,
						  RDMA_NL_RDMA_CM_ATTR_SRC_ADDR)) {
					goto out;
				}
				if (ibnl_put_attr(skb, nlh,
						  sizeof(struct sockaddr_in),
						  &id->route.addr.dst_addr,
						  RDMA_NL_RDMA_CM_ATTR_DST_ADDR)) {
					goto out;
				}
			} else if (id->route.addr.src_addr.ss_family == AF_INET6) {
				if (ibnl_put_attr(skb, nlh,
						  sizeof(struct sockaddr_in6),
						  &id->route.addr.src_addr,
						  RDMA_NL_RDMA_CM_ATTR_SRC_ADDR)) {
					goto out;
				}
				if (ibnl_put_attr(skb, nlh,
						  sizeof(struct sockaddr_in6),
						  &id->route.addr.dst_addr,
						  RDMA_NL_RDMA_CM_ATTR_DST_ADDR)) {
					goto out;
				}
			}

			id_stats->pid		= id_priv->owner;
			id_stats->port_space	= id->ps;
			id_stats->cm_state	= id_priv->state;
			id_stats->qp_num	= id_priv->qp_num;
			id_stats->qp_type	= id->qp_type;

			i_id++;
		}

		cb->args[1] = 0;
		i_dev++;
	}

out:
	mutex_unlock(&lock);
	cb->args[0] = i_dev;
	cb->args[1] = i_id;

	return skb->len;
}

static const struct ibnl_client_cbs cma_cb_table[] = {
	[RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats },
};

static int __init cma_init(void)
static int __init cma_init(void)
{
{
	int ret;
	int ret;
@@ -3343,6 +3430,10 @@ static int __init cma_init(void)
	ret = ib_register_client(&cma_client);
	ret = ib_register_client(&cma_client);
	if (ret)
	if (ret)
		goto err;
		goto err;

	if (ibnl_add_client(RDMA_NL_RDMA_CM, RDMA_NL_RDMA_CM_NUM_OPS, cma_cb_table))
		printk(KERN_WARNING "RDMA CMA: failed to add netlink callback\n");

	return 0;
	return 0;


err:
err:
@@ -3355,6 +3446,7 @@ static int __init cma_init(void)


static void __exit cma_cleanup(void)
static void __exit cma_cleanup(void)
{
{
	ibnl_remove_client(RDMA_NL_RDMA_CM);
	ib_unregister_client(&cma_client);
	ib_unregister_client(&cma_client);
	unregister_netdevice_notifier(&cma_nb);
	unregister_netdevice_notifier(&cma_nb);
	rdma_addr_unregister_client(&addr_client);
	rdma_addr_unregister_client(&addr_client);
+22 −3
Original line number Original line Diff line number Diff line
@@ -38,6 +38,7 @@
#include <linux/slab.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/mutex.h>
#include <rdma/rdma_netlink.h>


#include "core_priv.h"
#include "core_priv.h"


@@ -725,22 +726,40 @@ static int __init ib_core_init(void)
		return -ENOMEM;
		return -ENOMEM;


	ret = ib_sysfs_setup();
	ret = ib_sysfs_setup();
	if (ret)
	if (ret) {
		printk(KERN_WARNING "Couldn't create InfiniBand device class\n");
		printk(KERN_WARNING "Couldn't create InfiniBand device class\n");
		goto err;
	}

	ret = ibnl_init();
	if (ret) {
		printk(KERN_WARNING "Couldn't init IB netlink interface\n");
		goto err_sysfs;
	}


	ret = ib_cache_setup();
	ret = ib_cache_setup();
	if (ret) {
	if (ret) {
		printk(KERN_WARNING "Couldn't set up InfiniBand P_Key/GID cache\n");
		printk(KERN_WARNING "Couldn't set up InfiniBand P_Key/GID cache\n");
		ib_sysfs_cleanup();
		goto err_nl;
		destroy_workqueue(ib_wq);
	}
	}


	return 0;

err_nl:
	ibnl_cleanup();

err_sysfs:
	ib_sysfs_cleanup();

err:
	destroy_workqueue(ib_wq);
	return ret;
	return ret;
}
}


static void __exit ib_core_cleanup(void)
static void __exit ib_core_cleanup(void)
{
{
	ib_cache_cleanup();
	ib_cache_cleanup();
	ibnl_cleanup();
	ib_sysfs_cleanup();
	ib_sysfs_cleanup();
	/* Make sure that any pending umem accounting work is done. */
	/* Make sure that any pending umem accounting work is done. */
	destroy_workqueue(ib_wq);
	destroy_workqueue(ib_wq);
Loading