Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a1530636 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
  mlx4_core: Support creation of FMRs with pages smaller than 4K
  IB/ehca: Fix function return types
  RDMA/cxgb3: Bump up the MPA connection setup timeout.
  RDMA/cxgb3: Silently ignore close reply after abort.
  RDMA/cxgb3: QP flush fixes
  IB/ipoib: Fix transmit queue stalling forever
  IB/mlx4: Fix off-by-one errors in calls to mlx4_ib_free_cq_buf()
parents bb896afe c5057ddc
Loading
Loading
Loading
Loading
+10 −3
Original line number Diff line number Diff line
@@ -359,9 +359,10 @@ static void insert_recv_cqe(struct t3_wq *wq, struct t3_cq *cq)
	cq->sw_wptr++;
}

void cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count)
int cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count)
{
	u32 ptr;
	int flushed = 0;

	PDBG("%s wq %p cq %p\n", __func__, wq, cq);

@@ -369,8 +370,11 @@ void cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count)
	PDBG("%s rq_rptr %u rq_wptr %u skip count %u\n", __func__,
	    wq->rq_rptr, wq->rq_wptr, count);
	ptr = wq->rq_rptr + count;
	while (ptr++ != wq->rq_wptr)
	while (ptr++ != wq->rq_wptr) {
		insert_recv_cqe(wq, cq);
		flushed++;
	}
	return flushed;
}

static void insert_sq_cqe(struct t3_wq *wq, struct t3_cq *cq,
@@ -394,9 +398,10 @@ static void insert_sq_cqe(struct t3_wq *wq, struct t3_cq *cq,
	cq->sw_wptr++;
}

void cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count)
int cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count)
{
	__u32 ptr;
	int flushed = 0;
	struct t3_swsq *sqp = wq->sq + Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2);

	ptr = wq->sq_rptr + count;
@@ -405,7 +410,9 @@ void cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count)
		insert_sq_cqe(wq, cq, sqp);
		sqp++;
		ptr++;
		flushed++;
	}
	return flushed;
}

/*
+2 −2
Original line number Diff line number Diff line
@@ -173,8 +173,8 @@ u32 cxio_hal_get_pdid(struct cxio_hal_resource *rscp);
void cxio_hal_put_pdid(struct cxio_hal_resource *rscp, u32 pdid);
int __init cxio_hal_init(void);
void __exit cxio_hal_exit(void);
void cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count);
void cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count);
int cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count);
int cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count);
void cxio_count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count);
void cxio_count_scqes(struct t3_cq *cq, struct t3_wq *wq, int *count);
void cxio_flush_hw_cq(struct t3_cq *cq);
+3 −3
Original line number Diff line number Diff line
@@ -67,10 +67,10 @@ int peer2peer = 0;
module_param(peer2peer, int, 0644);
MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=0)");

static int ep_timeout_secs = 10;
static int ep_timeout_secs = 60;
module_param(ep_timeout_secs, int, 0644);
MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout "
				   "in seconds (default=10)");
				   "in seconds (default=60)");

static int mpa_rev = 1;
module_param(mpa_rev, int, 0644);
@@ -1650,8 +1650,8 @@ static int close_con_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
		release = 1;
		break;
	case ABORTING:
		break;
	case DEAD:
		break;
	default:
		BUG_ON(1);
		break;
+8 −5
Original line number Diff line number Diff line
@@ -655,6 +655,7 @@ static void __flush_qp(struct iwch_qp *qhp, unsigned long *flag)
{
	struct iwch_cq *rchp, *schp;
	int count;
	int flushed;

	rchp = get_chp(qhp->rhp, qhp->attr.rcq);
	schp = get_chp(qhp->rhp, qhp->attr.scq);
@@ -669,9 +670,10 @@ static void __flush_qp(struct iwch_qp *qhp, unsigned long *flag)
	spin_lock(&qhp->lock);
	cxio_flush_hw_cq(&rchp->cq);
	cxio_count_rcqes(&rchp->cq, &qhp->wq, &count);
	cxio_flush_rq(&qhp->wq, &rchp->cq, count);
	flushed = cxio_flush_rq(&qhp->wq, &rchp->cq, count);
	spin_unlock(&qhp->lock);
	spin_unlock_irqrestore(&rchp->lock, *flag);
	if (flushed)
		(*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);

	/* locking heirarchy: cq lock first, then qp lock. */
@@ -679,9 +681,10 @@ static void __flush_qp(struct iwch_qp *qhp, unsigned long *flag)
	spin_lock(&qhp->lock);
	cxio_flush_hw_cq(&schp->cq);
	cxio_count_scqes(&schp->cq, &qhp->wq, &count);
	cxio_flush_sq(&qhp->wq, &schp->cq, count);
	flushed = cxio_flush_sq(&qhp->wq, &schp->cq, count);
	spin_unlock(&qhp->lock);
	spin_unlock_irqrestore(&schp->lock, *flag);
	if (flushed)
		(*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);

	/* deref */
@@ -880,7 +883,6 @@ int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp,
				ep = qhp->ep;
				get_ep(&ep->com);
			}
			flush_qp(qhp, &flag);
			break;
		case IWCH_QP_STATE_TERMINATE:
			qhp->attr.state = IWCH_QP_STATE_TERMINATE;
@@ -911,6 +913,7 @@ int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp,
		}
		switch (attrs->next_state) {
			case IWCH_QP_STATE_IDLE:
				flush_qp(qhp, &flag);
				qhp->attr.state = IWCH_QP_STATE_IDLE;
				qhp->attr.llp_stream_handle = NULL;
				put_ep(&qhp->ep->com);
+3 −4
Original line number Diff line number Diff line
@@ -101,7 +101,6 @@ int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props)
	props->max_ee          = limit_uint(rblock->max_rd_ee_context);
	props->max_rdd         = limit_uint(rblock->max_rd_domain);
	props->max_fmr         = limit_uint(rblock->max_mr);
	props->local_ca_ack_delay  = limit_uint(rblock->local_ca_ack_delay);
	props->max_qp_rd_atom  = limit_uint(rblock->max_rr_qp);
	props->max_ee_rd_atom  = limit_uint(rblock->max_rr_ee_context);
	props->max_res_rd_atom = limit_uint(rblock->max_rr_hca);
@@ -115,7 +114,7 @@ int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props)
	}

	props->max_pkeys           = 16;
	props->local_ca_ack_delay  = limit_uint(rblock->local_ca_ack_delay);
	props->local_ca_ack_delay  = min_t(u8, rblock->local_ca_ack_delay, 255);
	props->max_raw_ipv6_qp     = limit_uint(rblock->max_raw_ipv6_qp);
	props->max_raw_ethy_qp     = limit_uint(rblock->max_raw_ethy_qp);
	props->max_mcast_grp       = limit_uint(rblock->max_mcast_grp);
@@ -136,7 +135,7 @@ int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props)
	return ret;
}

static int map_mtu(struct ehca_shca *shca, u32 fw_mtu)
static enum ib_mtu map_mtu(struct ehca_shca *shca, u32 fw_mtu)
{
	switch (fw_mtu) {
	case 0x1:
@@ -156,7 +155,7 @@ static int map_mtu(struct ehca_shca *shca, u32 fw_mtu)
	}
}

static int map_number_of_vls(struct ehca_shca *shca, u32 vl_cap)
static u8 map_number_of_vls(struct ehca_shca *shca, u32 vl_cap)
{
	switch (vl_cap) {
	case 0x1:
Loading