Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 94d7f1a2 authored by Doug Ledford's avatar Doug Ledford
Browse files

Merge branches 'hfi1' and 'iw_cxgb4' into k.o/for-4.7

Loading
Loading
Loading
Loading
+175 −77
Original line number Diff line number Diff line
@@ -145,6 +145,7 @@ static struct sk_buff_head rxq;
static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
static void ep_timeout(unsigned long arg);
static void connect_reply_upcall(struct c4iw_ep *ep, int status);
static int sched(struct c4iw_dev *dev, struct sk_buff *skb);

static LIST_HEAD(timeout_list);
static spinlock_t timeout_lock;
@@ -295,7 +296,7 @@ void _c4iw_free_ep(struct kref *kref)
	struct c4iw_ep *ep;

	ep = container_of(kref, struct c4iw_ep, com.kref);
	PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]);
	PDBG("%s ep %p state %s\n", __func__, ep, states[ep->com.state]);
	if (test_bit(QP_REFERENCED, &ep->com.flags))
		deref_qp(ep);
	if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
@@ -432,10 +433,57 @@ static struct dst_entry *find_route(struct c4iw_dev *dev, __be32 local_ip,

static void arp_failure_discard(void *handle, struct sk_buff *skb)
{
	PDBG("%s c4iw_dev %p\n", __func__, handle);
	pr_err(MOD "ARP failure\n");
	kfree_skb(skb);
}

enum {
	NUM_FAKE_CPLS = 1,
	FAKE_CPL_PUT_EP_SAFE = NUM_CPL_CMDS + 0,
};

static int _put_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb)
{
	struct c4iw_ep *ep;

	ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
	release_ep_resources(ep);
	return 0;
}

/*
 * Fake up a special CPL opcode and call sched() so process_work() will call
 * _put_ep_safe() in a safe context to free the ep resources.  This is needed
 * because ARP error handlers are called in an ATOMIC context, and
 * _c4iw_free_ep() needs to block.
 */
static void queue_arp_failure_cpl(struct c4iw_ep *ep, struct sk_buff *skb)
{
	struct cpl_act_establish *rpl = cplhdr(skb);

	/* Set our special ARP_FAILURE opcode */
	rpl->ot.opcode = FAKE_CPL_PUT_EP_SAFE;

	/*
	 * Save ep in the skb->cb area, after where sched() will save the dev
	 * ptr.
	 */
	*((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *))) = ep;
	sched(ep->com.dev, skb);
}

/* Handle an ARP failure for an accept */
static void pass_accept_rpl_arp_failure(void *handle, struct sk_buff *skb)
{
	struct c4iw_ep *ep = handle;

	pr_err(MOD "ARP failure during accept - tid %u -dropping connection\n",
	       ep->hwtid);

	__state_set(&ep->com, DEAD);
	queue_arp_failure_cpl(ep, skb);
}

/*
 * Handle an ARP failure for an active open.
 */
@@ -444,9 +492,8 @@ static void act_open_req_arp_failure(void *handle, struct sk_buff *skb)
	struct c4iw_ep *ep = handle;

	printk(KERN_ERR MOD "ARP failure during connect\n");
	kfree_skb(skb);
	connect_reply_upcall(ep, -EHOSTUNREACH);
	state_set(&ep->com, DEAD);
	__state_set(&ep->com, DEAD);
	if (ep->com.remote_addr.ss_family == AF_INET6) {
		struct sockaddr_in6 *sin6 =
			(struct sockaddr_in6 *)&ep->com.local_addr;
@@ -455,9 +502,7 @@ static void act_open_req_arp_failure(void *handle, struct sk_buff *skb)
	}
	remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
	cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
	dst_release(ep->dst);
	cxgb4_l2t_release(ep->l2t);
	c4iw_put_ep(&ep->com);
	queue_arp_failure_cpl(ep, skb);
}

/*
@@ -474,7 +519,7 @@ static void abort_arp_failure(void *handle, struct sk_buff *skb)
	c4iw_ofld_send(rdev, skb);
}

static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb)
static int send_flowc(struct c4iw_ep *ep, struct sk_buff *skb)
{
	unsigned int flowclen = 80;
	struct fw_flowc_wr *flowc;
@@ -530,7 +575,7 @@ static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb)
	}

	set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
	c4iw_ofld_send(&ep->com.dev->rdev, skb);
	return c4iw_ofld_send(&ep->com.dev->rdev, skb);
}

static int send_halfclose(struct c4iw_ep *ep, gfp_t gfp)
@@ -1074,6 +1119,7 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
	unsigned int tid = GET_TID(req);
	unsigned int atid = TID_TID_G(ntohl(req->tos_atid));
	struct tid_info *t = dev->rdev.lldi.tids;
	int ret;

	ep = lookup_atid(t, atid);

@@ -1099,13 +1145,20 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
	set_bit(ACT_ESTAB, &ep->com.history);

	/* start MPA negotiation */
	send_flowc(ep, NULL);
	ret = send_flowc(ep, NULL);
	if (ret)
		goto err;
	if (ep->retry_with_mpa_v1)
		send_mpa_req(ep, skb, 1);
	else
		send_mpa_req(ep, skb, mpa_rev);
	mutex_unlock(&ep->com.mutex);
	return 0;
err:
	mutex_unlock(&ep->com.mutex);
	connect_reply_upcall(ep, -ENOMEM);
	c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
	return 0;
}

static void close_complete_upcall(struct c4iw_ep *ep, int status)
@@ -1126,14 +1179,6 @@ static void close_complete_upcall(struct c4iw_ep *ep, int status)
	}
}

static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
{
	PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
	__state_set(&ep->com, ABORTING);
	set_bit(ABORT_CONN, &ep->com.history);
	return send_abort(ep, skb, gfp);
}

static void peer_close_upcall(struct c4iw_ep *ep)
{
	struct iw_cm_event event;
@@ -1301,6 +1346,18 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits)

#define RELAXED_IRD_NEGOTIATION 1

/*
 * process_mpa_reply - process streaming mode MPA reply
 *
 * Returns:
 *
 * 0 upon success indicating a connect request was delivered to the ULP
 * or the mpa request is incomplete but valid so far.
 *
 * 1 if a failure requires the caller to close the connection.
 *
 * 2 if a failure requires the caller to abort the connection.
 */
static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
{
	struct mpa_message *mpa;
@@ -1530,14 +1587,25 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
	}
	goto out;
err:
	__state_set(&ep->com, ABORTING);
	send_abort(ep, skb, GFP_KERNEL);
	disconnect = 2;
out:
	connect_reply_upcall(ep, err);
	return disconnect;
}

static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
/*
 * process_mpa_request - process streaming mode MPA request
 *
 * Returns:
 *
 * 0 upon success indicating a connect request was delivered to the ULP
 * or the mpa request is incomplete but valid so far.
 *
 * 1 if a failure requires the caller to close the connection.
 *
 * 2 if a failure requires the caller to abort the connection.
 */
static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
{
	struct mpa_message *mpa;
	struct mpa_v2_conn_params *mpa_v2_params;
@@ -1549,11 +1617,8 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
	 * If we get more than the supported amount of private data
	 * then we must fail this connection.
	 */
	if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
		(void)stop_ep_timer(ep);
		abort_connection(ep, skb, GFP_KERNEL);
		return;
	}
	if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt))
		goto err_stop_timer;

	PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);

@@ -1569,7 +1634,7 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
	 * We'll continue process when more data arrives.
	 */
	if (ep->mpa_pkt_len < sizeof(*mpa))
		return;
		return 0;

	PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
	mpa = (struct mpa_message *) ep->mpa_pkt;
@@ -1580,43 +1645,32 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
	if (mpa->revision > mpa_rev) {
		printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d,"
		       " Received = %d\n", __func__, mpa_rev, mpa->revision);
		(void)stop_ep_timer(ep);
		abort_connection(ep, skb, GFP_KERNEL);
		return;
		goto err_stop_timer;
	}

	if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) {
		(void)stop_ep_timer(ep);
		abort_connection(ep, skb, GFP_KERNEL);
		return;
	}
	if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)))
		goto err_stop_timer;

	plen = ntohs(mpa->private_data_size);

	/*
	 * Fail if there's too much private data.
	 */
	if (plen > MPA_MAX_PRIVATE_DATA) {
		(void)stop_ep_timer(ep);
		abort_connection(ep, skb, GFP_KERNEL);
		return;
	}
	if (plen > MPA_MAX_PRIVATE_DATA)
		goto err_stop_timer;

	/*
	 * If plen does not account for pkt size
	 */
	if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
		(void)stop_ep_timer(ep);
		abort_connection(ep, skb, GFP_KERNEL);
		return;
	}
	if (ep->mpa_pkt_len > (sizeof(*mpa) + plen))
		goto err_stop_timer;
	ep->plen = (u8) plen;

	/*
	 * If we don't have all the pdata yet, then bail.
	 */
	if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
		return;
		return 0;

	/*
	 * If we get here we have accumulated the entire mpa
@@ -1678,13 +1732,21 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
				  SINGLE_DEPTH_NESTING);
		if (ep->parent_ep->com.state != DEAD) {
			if (connect_request_upcall(ep))
				abort_connection(ep, skb, GFP_KERNEL);
				goto err_unlock_parent;
		} else {
			abort_connection(ep, skb, GFP_KERNEL);
			goto err_unlock_parent;
		}
		mutex_unlock(&ep->parent_ep->com.mutex);
	}
	return;
	return 0;

err_unlock_parent:
	mutex_unlock(&ep->parent_ep->com.mutex);
	goto err_out;
err_stop_timer:
	(void)stop_ep_timer(ep);
err_out:
	return 2;
}

static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
@@ -2198,7 +2260,7 @@ static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
	return 0;
}

static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
		     struct cpl_pass_accept_req *req)
{
	struct cpl_pass_accept_rpl *rpl;
@@ -2287,10 +2349,9 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
	rpl->opt0 = cpu_to_be64(opt0);
	rpl->opt2 = cpu_to_be32(opt2);
	set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
	t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
	c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
	t4_set_arp_err_handler(skb, ep, pass_accept_rpl_arp_failure);

	return;
	return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
}

static void reject_cr(struct c4iw_dev *dev, u32 hwtid, struct sk_buff *skb)
@@ -2469,8 +2530,12 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
	init_timer(&child_ep->timer);
	cxgb4_insert_tid(t, child_ep, hwtid);
	insert_handle(dev, &dev->hwtid_idr, child_ep, child_ep->hwtid);
	accept_cr(child_ep, skb, req);
	if (accept_cr(child_ep, skb, req)) {
		c4iw_put_ep(&parent_ep->com);
		release_ep_resources(child_ep);
	} else {
		set_bit(PASS_ACCEPT_REQ, &child_ep->com.history);
	}
	if (iptype == 6) {
		sin6 = (struct sockaddr_in6 *)&child_ep->com.local_addr;
		cxgb4_clip_get(child_ep->com.dev->rdev.lldi.ports[0],
@@ -2489,6 +2554,7 @@ static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb)
	struct cpl_pass_establish *req = cplhdr(skb);
	struct tid_info *t = dev->rdev.lldi.tids;
	unsigned int tid = GET_TID(req);
	int ret;

	ep = lookup_tid(t, tid);
	PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
@@ -2501,10 +2567,14 @@ static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb)
	set_emss(ep, ntohs(req->tcp_opt));

	dst_confirm(ep->dst);
	state_set(&ep->com, MPA_REQ_WAIT);
	mutex_lock(&ep->com.mutex);
	ep->com.state = MPA_REQ_WAIT;
	start_ep_timer(ep);
	send_flowc(ep, skb);
	set_bit(PASS_ESTAB, &ep->com.history);
	ret = send_flowc(ep, skb);
	mutex_unlock(&ep->com.mutex);
	if (ret)
		c4iw_ep_disconnect(ep, 1, GFP_KERNEL);

	return 0;
}
@@ -2633,6 +2703,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
	mutex_lock(&ep->com.mutex);
	switch (ep->com.state) {
	case CONNECTING:
		c4iw_put_ep(&ep->parent_ep->com);
		break;
	case MPA_REQ_WAIT:
		(void)stop_ep_timer(ep);
@@ -2849,14 +2920,14 @@ int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
	set_bit(ULP_REJECT, &ep->com.history);
	BUG_ON(ep->com.state != MPA_REQ_RCVD);
	if (mpa_rev == 0)
		abort_connection(ep, NULL, GFP_KERNEL);
		disconnect = 2;
	else {
		err = send_mpa_reject(ep, pdata, pdata_len);
		disconnect = 1;
	}
	mutex_unlock(&ep->com.mutex);
	if (disconnect)
		err = c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
		err = c4iw_ep_disconnect(ep, disconnect == 2, GFP_KERNEL);
	c4iw_put_ep(&ep->com);
	return 0;
}
@@ -2869,13 +2940,14 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
	struct c4iw_ep *ep = to_ep(cm_id);
	struct c4iw_dev *h = to_c4iw_dev(cm_id->device);
	struct c4iw_qp *qp = get_qhp(h, conn_param->qpn);
	int abort = 0;

	PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);

	mutex_lock(&ep->com.mutex);
	if (ep->com.state == DEAD) {
		err = -ECONNRESET;
		goto err;
		goto err_out;
	}

	BUG_ON(ep->com.state != MPA_REQ_RCVD);
@@ -2884,9 +2956,8 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
	set_bit(ULP_ACCEPT, &ep->com.history);
	if ((conn_param->ord > cur_max_read_depth(ep->com.dev)) ||
	    (conn_param->ird > cur_max_read_depth(ep->com.dev))) {
		abort_connection(ep, NULL, GFP_KERNEL);
		err = -EINVAL;
		goto err;
		goto err_abort;
	}

	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
@@ -2898,9 +2969,8 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
				ep->ord = conn_param->ord;
				send_mpa_reject(ep, conn_param->private_data,
						conn_param->private_data_len);
				abort_connection(ep, NULL, GFP_KERNEL);
				err = -ENOMEM;
				goto err;
				goto err_abort;
			}
		}
		if (conn_param->ird < ep->ord) {
@@ -2908,9 +2978,8 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
			    ep->ord <= h->rdev.lldi.max_ordird_qp) {
				conn_param->ird = ep->ord;
			} else {
				abort_connection(ep, NULL, GFP_KERNEL);
				err = -ENOMEM;
				goto err;
				goto err_abort;
			}
		}
	}
@@ -2951,23 +3020,26 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
	err = c4iw_modify_qp(ep->com.qp->rhp,
			     ep->com.qp, mask, &attrs, 1);
	if (err)
		goto err1;
		goto err_deref_cm_id;
	err = send_mpa_reply(ep, conn_param->private_data,
			     conn_param->private_data_len);
	if (err)
		goto err1;
		goto err_deref_cm_id;

	__state_set(&ep->com, FPDU_MODE);
	established_upcall(ep);
	mutex_unlock(&ep->com.mutex);
	c4iw_put_ep(&ep->com);
	return 0;
err1:
err_deref_cm_id:
	ep->com.cm_id = NULL;
	abort_connection(ep, NULL, GFP_KERNEL);
	cm_id->rem_ref(cm_id);
err:
err_abort:
	abort = 1;
err_out:
	mutex_unlock(&ep->com.mutex);
	if (abort)
		c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
	c4iw_put_ep(&ep->com);
	return err;
}
@@ -3367,6 +3439,12 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
	PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep,
	     states[ep->com.state], abrupt);

	/*
	 * Ref the ep here in case we have fatal errors causing the
	 * ep to be released and freed.
	 */
	c4iw_get_ep(&ep->com);

	rdev = &ep->com.dev->rdev;
	if (c4iw_fatal_error(rdev)) {
		fatal = 1;
@@ -3418,10 +3496,29 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
			set_bit(EP_DISC_CLOSE, &ep->com.history);
			ret = send_halfclose(ep, gfp);
		}
		if (ret) {
			if (!abrupt) {
				stop_ep_timer(ep);
				close_complete_upcall(ep, -EIO);
			}
			if (ep->com.qp) {
				struct c4iw_qp_attributes attrs;

				attrs.next_state = C4IW_QP_STATE_ERROR;
				ret = c4iw_modify_qp(ep->com.qp->rhp,
						     ep->com.qp,
						     C4IW_QP_ATTR_NEXT_STATE,
						     &attrs, 1);
				if (ret)
					pr_err(MOD
					       "%s - qp <- error failed!\n",
					       __func__);
			}
			fatal = 1;
		}
	}
	mutex_unlock(&ep->com.mutex);
	c4iw_put_ep(&ep->com);
	if (fatal)
		release_ep_resources(ep);
	return ret;
@@ -3809,7 +3906,7 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
 * These are the real handlers that are called from a
 * work queue.
 */
static c4iw_handler_func work_handlers[NUM_CPL_CMDS] = {
static c4iw_handler_func work_handlers[NUM_CPL_CMDS + NUM_FAKE_CPLS] = {
	[CPL_ACT_ESTABLISH] = act_establish,
	[CPL_ACT_OPEN_RPL] = act_open_rpl,
	[CPL_RX_DATA] = rx_data,
@@ -3825,7 +3922,8 @@ static c4iw_handler_func work_handlers[NUM_CPL_CMDS] = {
	[CPL_RDMA_TERMINATE] = terminate,
	[CPL_FW4_ACK] = fw4_ack,
	[CPL_FW6_MSG] = deferred_fw6_msg,
	[CPL_RX_PKT] = rx_pkt
	[CPL_RX_PKT] = rx_pkt,
	[FAKE_CPL_PUT_EP_SAFE] = _put_ep_safe
};

static void process_timeout(struct c4iw_ep *ep)
@@ -3871,9 +3969,9 @@ static void process_timeout(struct c4iw_ep *ep)
			__func__, ep, ep->hwtid, ep->com.state);
		abort = 0;
	}
	if (abort)
		abort_connection(ep, NULL, GFP_KERNEL);
	mutex_unlock(&ep->com.mutex);
	if (abort)
		c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
	c4iw_put_ep(&ep->com);
}

+0 −3
Original line number Diff line number Diff line
@@ -500,9 +500,6 @@ static int nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev)
	 *		skb_shinfo(skb)->nr_frags, skb_is_gso(skb));
	 */

	if (!netif_carrier_ok(netdev))
		return NETDEV_TX_OK;

	if (netif_queue_stopped(netdev))
		return NETDEV_TX_BUSY;

+2 −2
Original line number Diff line number Diff line
@@ -1090,7 +1090,7 @@ void qib_free_devdata(struct qib_devdata *dd)
	qib_dbg_ibdev_exit(&dd->verbs_dev);
#endif
	free_percpu(dd->int_counter);
	ib_dealloc_device(&dd->verbs_dev.rdi.ibdev);
	rvt_dealloc_device(&dd->verbs_dev.rdi);
}

u64 qib_int_counter(struct qib_devdata *dd)
@@ -1183,7 +1183,7 @@ struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra)
bail:
	if (!list_empty(&dd->list))
		list_del_init(&dd->list);
	ib_dealloc_device(&dd->verbs_dev.rdi.ibdev);
	rvt_dealloc_device(&dd->verbs_dev.rdi);
	return ERR_PTR(ret);
}

+1 −1
Original line number Diff line number Diff line
@@ -230,7 +230,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct rvt_qp *qp,
 *
 * Return 1 if constructed; otherwise, return 0.
 */
int qib_make_rc_req(struct rvt_qp *qp)
int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags)
{
	struct qib_qp_priv *priv = qp->priv;
	struct qib_ibdev *dev = to_idev(qp->ibqp.device);
+2 −2
Original line number Diff line number Diff line
@@ -739,7 +739,7 @@ void qib_do_send(struct rvt_qp *qp)
	struct qib_qp_priv *priv = qp->priv;
	struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
	struct qib_pportdata *ppd = ppd_from_ibp(ibp);
	int (*make_req)(struct rvt_qp *qp);
	int (*make_req)(struct rvt_qp *qp, unsigned long *flags);
	unsigned long flags;

	if ((qp->ibqp.qp_type == IB_QPT_RC ||
@@ -781,7 +781,7 @@ void qib_do_send(struct rvt_qp *qp)
			qp->s_hdrwords = 0;
			spin_lock_irqsave(&qp->s_lock, flags);
		}
	} while (make_req(qp));
	} while (make_req(qp, &flags));

	spin_unlock_irqrestore(&qp->s_lock, flags);
}
Loading