Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5f56bbdf authored by Linus Torvalds's avatar Linus Torvalds
Browse files
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: (25 commits)
  IB/ucm: Fix deadlock in cleanup
  IB/cm: Fix automatic path migration support
  IPoIB: Fix skb leak when freeing neighbour
  IB/srp: Fix memory leak on reconnect
  RDMA/addr: list_move() cleanups
  RDMA/addr: Fix some cancellation problems in process_req()
  RDMA/amso1100: Prevent deadlock in destroy QP
  IB/mthca: Fix initial SRQ logsize for mem-free HCAs
  IB/ehca: Use WQE offset instead of WQE addr for pending work reqs
  RDMA/iwcm: Fix comment for iwcm_deref_id() to match code
  RDMA/iwcm: Remove unnecessary function argument
  RDMA/iwcm: Remove unnecessary initializations
  RDMA/iwcm: Fix memory leak
  RDMA/iwcm: Fix memory corruption bug in cm_work_handler()
  IB: Convert kmem_cache_t -> struct kmem_cache
  IB/ipath: Fix typo in pma_counter_select subscript
  RDMA/amso1100: Fix section mismatches
  IB/mthca: Fix section mismatches
  IB/srp: Increase supported CDB size
  RDMA/cm: Remove setting local write as part of QP access flags
  ...
parents 96412198 f469b262
Loading
Loading
Loading
Loading
+8 −11
Original line number Diff line number Diff line
@@ -139,7 +139,7 @@ static void queue_req(struct addr_req *req)

	mutex_lock(&lock);
	list_for_each_entry_reverse(temp_req, &req_list, list) {
		if (time_after(req->timeout, temp_req->timeout))
		if (time_after_eq(req->timeout, temp_req->timeout))
			break;
	}

@@ -225,19 +225,17 @@ static void process_req(void *data)

	mutex_lock(&lock);
	list_for_each_entry_safe(req, temp_req, &req_list, list) {
		if (req->status) {
		if (req->status == -ENODATA) {
			src_in = (struct sockaddr_in *) &req->src_addr;
			dst_in = (struct sockaddr_in *) &req->dst_addr;
			req->status = addr_resolve_remote(src_in, dst_in,
							  req->addr);
		}
		if (req->status && time_after(jiffies, req->timeout))
			if (req->status && time_after_eq(jiffies, req->timeout))
				req->status = -ETIMEDOUT;
			else if (req->status == -ENODATA)
				continue;

		list_del(&req->list);
		list_add_tail(&req->list, &done_list);
		}
		list_move_tail(&req->list, &done_list);
	}

	if (!list_empty(&req_list)) {
@@ -347,8 +345,7 @@ void rdma_addr_cancel(struct rdma_dev_addr *addr)
		if (req->addr == addr) {
			req->status = -ECANCELED;
			req->timeout = jiffies;
			list_del(&req->list);
			list_add(&req->list, &req_list);
			list_move(&req->list, &req_list);
			set_timeout(req->timeout);
			break;
		}
+90 −31
Original line number Diff line number Diff line
@@ -147,12 +147,12 @@ struct cm_id_private {
	__be32 rq_psn;
	int timeout_ms;
	enum ib_mtu path_mtu;
	__be16 pkey;
	u8 private_data_len;
	u8 max_cm_retries;
	u8 peer_to_peer;
	u8 responder_resources;
	u8 initiator_depth;
	u8 local_ack_timeout;
	u8 retry_count;
	u8 rnr_retry_count;
	u8 service_timeout;
@@ -240,11 +240,10 @@ static void * cm_copy_private_data(const void *private_data,
	if (!private_data || !private_data_len)
		return NULL;

	data = kmalloc(private_data_len, GFP_KERNEL);
	data = kmemdup(private_data, private_data_len, GFP_KERNEL);
	if (!data)
		return ERR_PTR(-ENOMEM);

	memcpy(data, private_data, private_data_len);
	return data;
}

@@ -691,7 +690,7 @@ static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
	 * timewait before notifying the user that we've exited timewait.
	 */
	cm_id_priv->id.state = IB_CM_TIMEWAIT;
	wait_time = cm_convert_to_ms(cm_id_priv->local_ack_timeout);
	wait_time = cm_convert_to_ms(cm_id_priv->av.packet_life_time + 1);
	queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
			   msecs_to_jiffies(wait_time));
	cm_id_priv->timewait_info = NULL;
@@ -1010,6 +1009,7 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
	cm_id_priv->responder_resources = param->responder_resources;
	cm_id_priv->retry_count = param->retry_count;
	cm_id_priv->path_mtu = param->primary_path->mtu;
	cm_id_priv->pkey = param->primary_path->pkey;
	cm_id_priv->qp_type = param->qp_type;

	ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg);
@@ -1024,8 +1024,6 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,

	cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg);
	cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg);
	cm_id_priv->local_ack_timeout =
				cm_req_get_primary_local_ack_timeout(req_msg);

	spin_lock_irqsave(&cm_id_priv->lock, flags);
	ret = ib_post_send_mad(cm_id_priv->msg, NULL);
@@ -1410,9 +1408,8 @@ static int cm_req_handler(struct cm_work *work)
	cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg);
	cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg);
	cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg);
	cm_id_priv->pkey = req_msg->pkey;
	cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg);
	cm_id_priv->local_ack_timeout =
				cm_req_get_primary_local_ack_timeout(req_msg);
	cm_id_priv->retry_count = cm_req_get_retry_count(req_msg);
	cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
	cm_id_priv->qp_type = cm_req_get_qp_type(req_msg);
@@ -1716,7 +1713,7 @@ static int cm_establish_handler(struct cm_work *work)
	unsigned long flags;
	int ret;

	/* See comment in ib_cm_establish about lookup. */
	/* See comment in cm_establish about lookup. */
	cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
	if (!cm_id_priv)
		return -EINVAL;
@@ -2402,11 +2399,16 @@ int ib_send_cm_lap(struct ib_cm_id *cm_id,
	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
	spin_lock_irqsave(&cm_id_priv->lock, flags);
	if (cm_id->state != IB_CM_ESTABLISHED ||
	    cm_id->lap_state != IB_CM_LAP_IDLE) {
	    (cm_id->lap_state != IB_CM_LAP_UNINIT &&
	     cm_id->lap_state != IB_CM_LAP_IDLE)) {
		ret = -EINVAL;
		goto out;
	}

	ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av);
	if (ret)
		goto out;

	ret = cm_alloc_msg(cm_id_priv, &msg);
	if (ret)
		goto out;
@@ -2431,7 +2433,8 @@ out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
}
EXPORT_SYMBOL(ib_send_cm_lap);

static void cm_format_path_from_lap(struct ib_sa_path_rec *path,
static void cm_format_path_from_lap(struct cm_id_private *cm_id_priv,
				    struct ib_sa_path_rec *path,
				    struct cm_lap_msg *lap_msg)
{
	memset(path, 0, sizeof *path);
@@ -2443,10 +2446,10 @@ static void cm_format_path_from_lap(struct ib_sa_path_rec *path,
	path->hop_limit = lap_msg->alt_hop_limit;
	path->traffic_class = cm_lap_get_traffic_class(lap_msg);
	path->reversible = 1;
	/* pkey is same as in REQ */
	path->pkey = cm_id_priv->pkey;
	path->sl = cm_lap_get_sl(lap_msg);
	path->mtu_selector = IB_SA_EQ;
	/* mtu is same as in REQ */
	path->mtu = cm_id_priv->path_mtu;
	path->rate_selector = IB_SA_EQ;
	path->rate = cm_lap_get_packet_rate(lap_msg);
	path->packet_life_time_selector = IB_SA_EQ;
@@ -2472,7 +2475,7 @@ static int cm_lap_handler(struct cm_work *work)

	param = &work->cm_event.param.lap_rcvd;
	param->alternate_path = &work->path[0];
	cm_format_path_from_lap(param->alternate_path, lap_msg);
	cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg);
	work->cm_event.private_data = &lap_msg->private_data;

	spin_lock_irqsave(&cm_id_priv->lock, flags);
@@ -2480,6 +2483,7 @@ static int cm_lap_handler(struct cm_work *work)
		goto unlock;

	switch (cm_id_priv->id.lap_state) {
	case IB_CM_LAP_UNINIT:
	case IB_CM_LAP_IDLE:
		break;
	case IB_CM_MRA_LAP_SENT:
@@ -2502,6 +2506,10 @@ static int cm_lap_handler(struct cm_work *work)

	cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
	cm_id_priv->tid = lap_msg->hdr.tid;
	cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
				work->mad_recv_wc->recv_buf.grh,
				&cm_id_priv->av);
	cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av);
	ret = atomic_inc_and_test(&cm_id_priv->work_count);
	if (!ret)
		list_add_tail(&work->list, &cm_id_priv->work_list);
@@ -3040,7 +3048,7 @@ static void cm_work_handler(void *data)
		cm_free_work(work);
}

int ib_cm_establish(struct ib_cm_id *cm_id)
static int cm_establish(struct ib_cm_id *cm_id)
{
	struct cm_id_private *cm_id_priv;
	struct cm_work *work;
@@ -3088,7 +3096,44 @@ int ib_cm_establish(struct ib_cm_id *cm_id)
out:
	return ret;
}
EXPORT_SYMBOL(ib_cm_establish);

static int cm_migrate(struct ib_cm_id *cm_id)
{
	struct cm_id_private *cm_id_priv;
	unsigned long flags;
	int ret = 0;

	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
	spin_lock_irqsave(&cm_id_priv->lock, flags);
	if (cm_id->state == IB_CM_ESTABLISHED &&
	    (cm_id->lap_state == IB_CM_LAP_UNINIT ||
	     cm_id->lap_state == IB_CM_LAP_IDLE)) {
		cm_id->lap_state = IB_CM_LAP_IDLE;
		cm_id_priv->av = cm_id_priv->alt_av;
	} else
		ret = -EINVAL;
	spin_unlock_irqrestore(&cm_id_priv->lock, flags);

	return ret;
}

int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event)
{
	int ret;

	switch (event) {
	case IB_EVENT_COMM_EST:
		ret = cm_establish(cm_id);
		break;
	case IB_EVENT_PATH_MIG:
		ret = cm_migrate(cm_id);
		break;
	default:
		ret = -EINVAL;
	}
	return ret;
}
EXPORT_SYMBOL(ib_cm_notify);

static void cm_recv_handler(struct ib_mad_agent *mad_agent,
			    struct ib_mad_recv_wc *mad_recv_wc)
@@ -3173,8 +3218,7 @@ static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
	case IB_CM_ESTABLISHED:
		*qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS |
				IB_QP_PKEY_INDEX | IB_QP_PORT;
		qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE |
					   IB_ACCESS_REMOTE_WRITE;
		qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE;
		if (cm_id_priv->responder_resources)
			qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ |
						    IB_ACCESS_REMOTE_ATOMIC;
@@ -3222,6 +3266,9 @@ static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
		if (cm_id_priv->alt_av.ah_attr.dlid) {
			*qp_attr_mask |= IB_QP_ALT_PATH;
			qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
			qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
			qp_attr->alt_timeout =
					cm_id_priv->alt_av.packet_life_time + 1;
			qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
		}
		ret = 0;
@@ -3248,21 +3295,33 @@ static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
	case IB_CM_REP_SENT:
	case IB_CM_MRA_REP_RCVD:
	case IB_CM_ESTABLISHED:
		if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) {
			*qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN;
			qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
			if (cm_id_priv->qp_type == IB_QPT_RC) {
				*qp_attr_mask |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT |
						 IB_QP_RNR_RETRY |
						 IB_QP_MAX_QP_RD_ATOMIC;
			qp_attr->timeout = cm_id_priv->local_ack_timeout;
				qp_attr->timeout =
					cm_id_priv->av.packet_life_time + 1;
				qp_attr->retry_cnt = cm_id_priv->retry_count;
				qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
			qp_attr->max_rd_atomic = cm_id_priv->initiator_depth;
				qp_attr->max_rd_atomic =
					cm_id_priv->initiator_depth;
			}
			if (cm_id_priv->alt_av.ah_attr.dlid) {
				*qp_attr_mask |= IB_QP_PATH_MIG_STATE;
				qp_attr->path_mig_state = IB_MIG_REARM;
			}
		} else {
			*qp_attr_mask = IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE;
			qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
			qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
			qp_attr->alt_timeout =
				cm_id_priv->alt_av.packet_life_time + 1;
			qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
			qp_attr->path_mig_state = IB_MIG_REARM;
		}
		ret = 0;
		break;
	default:
+21 −28
Original line number Diff line number Diff line
@@ -344,7 +344,7 @@ static int cma_init_ib_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
		return ret;

	qp_attr.qp_state = IB_QPS_INIT;
	qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE;
	qp_attr.qp_access_flags = 0;
	qp_attr.port_num = id_priv->id.port_num;
	return ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_ACCESS_FLAGS |
					  IB_QP_PKEY_INDEX | IB_QP_PORT);
@@ -935,13 +935,8 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
	mutex_lock(&lock);
	ret = cma_acquire_dev(conn_id);
	mutex_unlock(&lock);
	if (ret) {
		ret = -ENODEV;
		cma_exch(conn_id, CMA_DESTROYING);
		cma_release_remove(conn_id);
		rdma_destroy_id(&conn_id->id);
		goto out;
	}
	if (ret)
		goto release_conn_id;

	conn_id->cm_id.ib = cm_id;
	cm_id->context = conn_id;
@@ -951,13 +946,17 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
	ret = cma_notify_user(conn_id, RDMA_CM_EVENT_CONNECT_REQUEST, 0,
			      ib_event->private_data + offset,
			      IB_CM_REQ_PRIVATE_DATA_SIZE - offset);
	if (ret) {
	if (!ret)
		goto out;

	/* Destroy the CM ID by returning a non-zero value. */
	conn_id->cm_id.ib = NULL;

release_conn_id:
	cma_exch(conn_id, CMA_DESTROYING);
	cma_release_remove(conn_id);
	rdma_destroy_id(&conn_id->id);
	}

out:
	cma_release_remove(listen_id);
	return ret;
@@ -1481,19 +1480,18 @@ static int cma_bind_loopback(struct rdma_id_private *id_priv)
	u8 p;

	mutex_lock(&lock);
	if (list_empty(&dev_list)) {
		ret = -ENODEV;
		goto out;
	}
	list_for_each_entry(cma_dev, &dev_list, list)
		for (p = 1; p <= cma_dev->device->phys_port_cnt; ++p)
			if (!ib_query_port(cma_dev->device, p, &port_attr) &&
			    port_attr.state == IB_PORT_ACTIVE)
				goto port_found;

	if (!list_empty(&dev_list)) {
	p = 1;
	cma_dev = list_entry(dev_list.next, struct cma_device, list);
	} else {
		ret = -ENODEV;
		goto out;
	}

port_found:
	ret = ib_get_cached_gid(cma_dev->device, p, 0, &gid);
@@ -2123,8 +2121,6 @@ static void cma_add_one(struct ib_device *device)

	cma_dev->device = device;
	cma_dev->node_guid = device->node_guid;
	if (!cma_dev->node_guid)
		goto err;

	init_completion(&cma_dev->comp);
	atomic_set(&cma_dev->refcount, 1);
@@ -2136,9 +2132,6 @@ static void cma_add_one(struct ib_device *device)
	list_for_each_entry(id_priv, &listen_any_list, list)
		cma_listen_on_dev(id_priv, cma_dev);
	mutex_unlock(&lock);
	return;
err:
	kfree(cma_dev);
}

static int cma_remove_id_dev(struct rdma_id_private *id_priv)
+22 −21
Original line number Diff line number Diff line
@@ -80,7 +80,7 @@ struct iwcm_work {
 * 1) in the event upcall, cm_event_handler(), for a listening cm_id.  If
 *    the backlog is exceeded, then no more connection request events will
 *    be processed.  cm_event_handler() returns -ENOMEM in this case.  Its up
 *    to the provider to reject the connectino request.
 *    to the provider to reject the connection request.
 * 2) in the connection request workqueue handler, cm_conn_req_handler().
 *    If work elements cannot be allocated for the new connect request cm_id,
 *    then IWCM will call the provider reject method.  This is ok since
@@ -131,26 +131,25 @@ static int alloc_work_entries(struct iwcm_id_private *cm_id_priv, int count)
}

/*
 * Save private data from incoming connection requests in the
 * cm_id_priv so the low level driver doesn't have to.  Adjust
 * Save private data from incoming connection requests to
 * iw_cm_event, so the low level driver doesn't have to. Adjust
 * the event ptr to point to the local copy.
 */
static int copy_private_data(struct iwcm_id_private *cm_id_priv,
		       struct iw_cm_event *event)
static int copy_private_data(struct iw_cm_event *event)
{
	void *p;

	p = kmalloc(event->private_data_len, GFP_ATOMIC);
	p = kmemdup(event->private_data, event->private_data_len, GFP_ATOMIC);
	if (!p)
		return -ENOMEM;
	memcpy(p, event->private_data, event->private_data_len);
	event->private_data = p;
	return 0;
}

/*
 * Release a reference on cm_id. If the last reference is being removed
 * and iw_destroy_cm_id is waiting, wake up the waiting thread.
 * Release a reference on cm_id. If the last reference is being
 * released, enable the waiting thread (in iw_destroy_cm_id) to
 * get woken up, and return 1 if a thread is already waiting.
 */
static int iwcm_deref_id(struct iwcm_id_private *cm_id_priv)
{
@@ -243,7 +242,7 @@ static int iwcm_modify_qp_sqd(struct ib_qp *qp)
/*
 * CM_ID <-- CLOSING
 *
 * Block if a passive or active connection is currenlty being processed. Then
 * Block if a passive or active connection is currently being processed. Then
 * process the event as follows:
 * - If we are ESTABLISHED, move to CLOSING and modify the QP state
 *   based on the abrupt flag
@@ -408,7 +407,7 @@ int iw_cm_listen(struct iw_cm_id *cm_id, int backlog)
{
	struct iwcm_id_private *cm_id_priv;
	unsigned long flags;
	int ret = 0;
	int ret;

	cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);

@@ -535,7 +534,7 @@ EXPORT_SYMBOL(iw_cm_accept);
int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
{
	struct iwcm_id_private *cm_id_priv;
	int ret = 0;
	int ret;
	unsigned long flags;
	struct ib_qp *qp;

@@ -620,7 +619,7 @@ static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv,
	spin_lock_irqsave(&listen_id_priv->lock, flags);
	if (listen_id_priv->state != IW_CM_STATE_LISTEN) {
		spin_unlock_irqrestore(&listen_id_priv->lock, flags);
		return;
		goto out;
	}
	spin_unlock_irqrestore(&listen_id_priv->lock, flags);

@@ -629,7 +628,7 @@ static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv,
				listen_id_priv->id.context);
	/* If the cm_id could not be created, ignore the request */
	if (IS_ERR(cm_id))
		return;
		goto out;

	cm_id->provider_data = iw_event->provider_data;
	cm_id->local_addr = iw_event->local_addr;
@@ -642,7 +641,7 @@ static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv,
	if (ret) {
		iw_cm_reject(cm_id, NULL, 0);
		iw_destroy_cm_id(cm_id);
		return;
		goto out;
	}

	/* Call the client CM handler */
@@ -654,6 +653,7 @@ static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv,
			kfree(cm_id);
	}

out:
	if (iw_event->private_data_len)
		kfree(iw_event->private_data);
}
@@ -674,7 +674,7 @@ static int cm_conn_est_handler(struct iwcm_id_private *cm_id_priv,
			       struct iw_cm_event *iw_event)
{
	unsigned long flags;
	int ret = 0;
	int ret;

	spin_lock_irqsave(&cm_id_priv->lock, flags);

@@ -704,7 +704,7 @@ static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv,
			       struct iw_cm_event *iw_event)
{
	unsigned long flags;
	int ret = 0;
	int ret;

	spin_lock_irqsave(&cm_id_priv->lock, flags);
	/*
@@ -830,7 +830,8 @@ static int process_event(struct iwcm_id_private *cm_id_priv,
 */
static void cm_work_handler(void *arg)
{
	struct iwcm_work *work = arg, lwork;
	struct iwcm_work *work = arg;
	struct iw_cm_event levent;
	struct iwcm_id_private *cm_id_priv = work->cm_id;
	unsigned long flags;
	int empty;
@@ -843,11 +844,11 @@ static void cm_work_handler(void *arg)
				  struct iwcm_work, list);
		list_del_init(&work->list);
		empty = list_empty(&cm_id_priv->work_list);
		lwork = *work;
		levent = work->event;
		put_work(work);
		spin_unlock_irqrestore(&cm_id_priv->lock, flags);

		ret = process_event(cm_id_priv, &work->event);
		ret = process_event(cm_id_priv, &levent);
		if (ret) {
			set_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags);
			destroy_cm_id(&cm_id_priv->id);
@@ -906,7 +907,7 @@ static int cm_event_handler(struct iw_cm_id *cm_id,
	if ((work->event.event == IW_CM_EVENT_CONNECT_REQUEST ||
	     work->event.event == IW_CM_EVENT_CONNECT_REPLY) &&
	    work->event.private_data_len) {
		ret = copy_private_data(cm_id_priv, &work->event);
		ret = copy_private_data(&work->event);
		if (ret) {
			put_work(work);
			goto out;
+1 −1
Original line number Diff line number Diff line
@@ -46,7 +46,7 @@ MODULE_DESCRIPTION("kernel IB MAD API");
MODULE_AUTHOR("Hal Rosenstock");
MODULE_AUTHOR("Sean Hefty");

static kmem_cache_t *ib_mad_cache;
static struct kmem_cache *ib_mad_cache;

static struct list_head ib_mad_port_list;
static u32 ib_mad_client_id = 0;
Loading