Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit eb9c4f2e authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband

* 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband:
  IPoIB: Turn on interface's carrier after broadcast group is joined
  RDMA/ucma: Avoid sending reject if backlog is full
  RDMA/cxgb3: Fix MR permission problems
  RDMA/cxgb3: Don't reuse skbs that are non-linear or cloned
  RDMA/cxgb3: Squelch logging AE errors
  RDMA/cxgb3: Stop EP timer when MPA exchange is aborted by peer
  RDMA/cxgb3: Move QP to error on destroy if the state is IDLE
  RDMA/cxgb3: Fixes for "normal close" failures
  RDMA/cxgb3: Fix build on sparc64
  RDMA/cma: Initialize rdma_bind_list in cma_alloc_any_port()
  RDMA/cxgb3: Don't use mm after it's freed in iwch_mmap()
  RDMA/cxgb3: Start ep timer on a MPA reject
  IB/mthca: Fix error path in mthca_alloc_memfree()
  IB/ehca: Fix sync between completion handler and destroy cq
  IPoIB: Only handle async events for one port
parents c5bfdb72 55c9adde
Loading
Loading
Loading
Loading
+1 −1
Original line number Original line Diff line number Diff line
@@ -1821,7 +1821,7 @@ static int cma_alloc_port(struct idr *ps, struct rdma_id_private *id_priv,
	struct rdma_bind_list *bind_list;
	struct rdma_bind_list *bind_list;
	int port, ret;
	int port, ret;


	bind_list = kmalloc(sizeof *bind_list, GFP_KERNEL);
	bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
	if (!bind_list)
	if (!bind_list)
		return -ENOMEM;
		return -ENOMEM;


+1 −1
Original line number Original line Diff line number Diff line
@@ -266,7 +266,7 @@ static int ucma_event_handler(struct rdma_cm_id *cm_id,
	mutex_lock(&ctx->file->mut);
	mutex_lock(&ctx->file->mut);
	if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) {
	if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) {
		if (!ctx->backlog) {
		if (!ctx->backlog) {
			ret = -EDQUOT;
			ret = -ENOMEM;
			kfree(uevent);
			kfree(uevent);
			goto out;
			goto out;
		}
		}
+1 −0
Original line number Original line Diff line number Diff line
@@ -36,6 +36,7 @@
#include <linux/sched.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/spinlock.h>
#include <linux/pci.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>


#include "cxio_resource.h"
#include "cxio_resource.h"
#include "cxio_hal.h"
#include "cxio_hal.h"
+11 −8
Original line number Original line Diff line number Diff line
@@ -305,8 +305,7 @@ static int status2errno(int status)
 */
 */
static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
{
{
	if (skb) {
	if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) {
		BUG_ON(skb_cloned(skb));
		skb_trim(skb, 0);
		skb_trim(skb, 0);
		skb_get(skb);
		skb_get(skb);
	} else {
	} else {
@@ -1415,6 +1414,7 @@ static int peer_close(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
		wake_up(&ep->com.waitq);
		wake_up(&ep->com.waitq);
		break;
		break;
	case FPDU_MODE:
	case FPDU_MODE:
		start_ep_timer(ep);
		__state_set(&ep->com, CLOSING);
		__state_set(&ep->com, CLOSING);
		attrs.next_state = IWCH_QP_STATE_CLOSING;
		attrs.next_state = IWCH_QP_STATE_CLOSING;
		iwch_modify_qp(ep->com.qp->rhp, ep->com.qp,
		iwch_modify_qp(ep->com.qp->rhp, ep->com.qp,
@@ -1425,7 +1425,6 @@ static int peer_close(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
		disconnect = 0;
		disconnect = 0;
		break;
		break;
	case CLOSING:
	case CLOSING:
		start_ep_timer(ep);
		__state_set(&ep->com, MORIBUND);
		__state_set(&ep->com, MORIBUND);
		disconnect = 0;
		disconnect = 0;
		break;
		break;
@@ -1487,8 +1486,10 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
	case CONNECTING:
	case CONNECTING:
		break;
		break;
	case MPA_REQ_WAIT:
	case MPA_REQ_WAIT:
		stop_ep_timer(ep);
		break;
		break;
	case MPA_REQ_SENT:
	case MPA_REQ_SENT:
		stop_ep_timer(ep);
		connect_reply_upcall(ep, -ECONNRESET);
		connect_reply_upcall(ep, -ECONNRESET);
		break;
		break;
	case MPA_REP_SENT:
	case MPA_REP_SENT:
@@ -1507,9 +1508,10 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
		get_ep(&ep->com);
		get_ep(&ep->com);
		break;
		break;
	case MORIBUND:
	case MORIBUND:
	case CLOSING:
		stop_ep_timer(ep);
		stop_ep_timer(ep);
		/*FALLTHROUGH*/
	case FPDU_MODE:
	case FPDU_MODE:
	case CLOSING:
		if (ep->com.cm_id && ep->com.qp) {
		if (ep->com.cm_id && ep->com.qp) {
			attrs.next_state = IWCH_QP_STATE_ERROR;
			attrs.next_state = IWCH_QP_STATE_ERROR;
			ret = iwch_modify_qp(ep->com.qp->rhp,
			ret = iwch_modify_qp(ep->com.qp->rhp,
@@ -1570,7 +1572,6 @@ static int close_con_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
	spin_lock_irqsave(&ep->com.lock, flags);
	spin_lock_irqsave(&ep->com.lock, flags);
	switch (ep->com.state) {
	switch (ep->com.state) {
	case CLOSING:
	case CLOSING:
		start_ep_timer(ep);
		__state_set(&ep->com, MORIBUND);
		__state_set(&ep->com, MORIBUND);
		break;
		break;
	case MORIBUND:
	case MORIBUND:
@@ -1586,6 +1587,8 @@ static int close_con_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
		__state_set(&ep->com, DEAD);
		__state_set(&ep->com, DEAD);
		release = 1;
		release = 1;
		break;
		break;
	case ABORTING:
		break;
	case DEAD:
	case DEAD:
	default:
	default:
		BUG_ON(1);
		BUG_ON(1);
@@ -1659,6 +1662,7 @@ static void ep_timeout(unsigned long arg)
		break;
		break;
	case MPA_REQ_WAIT:
	case MPA_REQ_WAIT:
		break;
		break;
	case CLOSING:
	case MORIBUND:
	case MORIBUND:
		if (ep->com.cm_id && ep->com.qp) {
		if (ep->com.cm_id && ep->com.qp) {
			attrs.next_state = IWCH_QP_STATE_ERROR;
			attrs.next_state = IWCH_QP_STATE_ERROR;
@@ -1687,12 +1691,11 @@ int iwch_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
		return -ECONNRESET;
		return -ECONNRESET;
	}
	}
	BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
	BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
	state_set(&ep->com, CLOSING);
	if (mpa_rev == 0)
	if (mpa_rev == 0)
		abort_connection(ep, NULL, GFP_KERNEL);
		abort_connection(ep, NULL, GFP_KERNEL);
	else {
	else {
		err = send_mpa_reject(ep, pdata, pdata_len);
		err = send_mpa_reject(ep, pdata, pdata_len);
		err = send_halfclose(ep, GFP_KERNEL);
		err = iwch_ep_disconnect(ep, 0, GFP_KERNEL);
	}
	}
	return 0;
	return 0;
}
}
@@ -1957,11 +1960,11 @@ int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt, gfp_t gfp)
	case MPA_REQ_RCVD:
	case MPA_REQ_RCVD:
	case MPA_REP_SENT:
	case MPA_REP_SENT:
	case FPDU_MODE:
	case FPDU_MODE:
		start_ep_timer(ep);
		ep->com.state = CLOSING;
		ep->com.state = CLOSING;
		close = 1;
		close = 1;
		break;
		break;
	case CLOSING:
	case CLOSING:
		start_ep_timer(ep);
		ep->com.state = MORIBUND;
		ep->com.state = MORIBUND;
		close = 1;
		close = 1;
		break;
		break;
+6 −6
Original line number Original line Diff line number Diff line
@@ -47,12 +47,6 @@ static void post_qp_event(struct iwch_dev *rnicp, struct iwch_cq *chp,
	struct iwch_qp_attributes attrs;
	struct iwch_qp_attributes attrs;
	struct iwch_qp *qhp;
	struct iwch_qp *qhp;


	printk(KERN_ERR "%s - AE qpid 0x%x opcode %d status 0x%x "
	       "type %d wrid.hi 0x%x wrid.lo 0x%x \n", __FUNCTION__,
	       CQE_QPID(rsp_msg->cqe), CQE_OPCODE(rsp_msg->cqe),
	       CQE_STATUS(rsp_msg->cqe), CQE_TYPE(rsp_msg->cqe),
	       CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe));

	spin_lock(&rnicp->lock);
	spin_lock(&rnicp->lock);
	qhp = get_qhp(rnicp, CQE_QPID(rsp_msg->cqe));
	qhp = get_qhp(rnicp, CQE_QPID(rsp_msg->cqe));


@@ -73,6 +67,12 @@ static void post_qp_event(struct iwch_dev *rnicp, struct iwch_cq *chp,
		return;
		return;
	}
	}


	printk(KERN_ERR "%s - AE qpid 0x%x opcode %d status 0x%x "
	       "type %d wrid.hi 0x%x wrid.lo 0x%x \n", __FUNCTION__,
	       CQE_QPID(rsp_msg->cqe), CQE_OPCODE(rsp_msg->cqe),
	       CQE_STATUS(rsp_msg->cqe), CQE_TYPE(rsp_msg->cqe),
	       CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe));

	atomic_inc(&qhp->refcnt);
	atomic_inc(&qhp->refcnt);
	spin_unlock(&rnicp->lock);
	spin_unlock(&rnicp->lock);


Loading