Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit dd286422 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
  RDMA/cxgb3: Wrap the software send queue pointer as needed on flush
  IB/ipath: Change ipath_devdata.ipath_sdma_status to be unsigned long
  IB/ipath: Make ipath_portdata work with struct pid * not pid_t
  IB/ipath: Fix RDMA read response sequence checking
  IB/ipath: Fix many locking issues when switching to error state
  IB/ipath: Fix RC and UC error handling
  RDMA/nes: Fix up nes_lro_max_aggr module parameter
parents 4717df58 a58e58fa
Loading
Loading
Loading
Loading
+2 −2
Original line number Original line Diff line number Diff line
@@ -405,11 +405,11 @@ int cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count)
	struct t3_swsq *sqp = wq->sq + Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2);
	struct t3_swsq *sqp = wq->sq + Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2);


	ptr = wq->sq_rptr + count;
	ptr = wq->sq_rptr + count;
	sqp += count;
	sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2);
	while (ptr != wq->sq_wptr) {
	while (ptr != wq->sq_wptr) {
		insert_sq_cqe(wq, cq, sqp);
		insert_sq_cqe(wq, cq, sqp);
		sqp++;
		ptr++;
		ptr++;
		sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2);
		flushed++;
		flushed++;
	}
	}
	return flushed;
	return flushed;
+11 −9
Original line number Original line Diff line number Diff line
@@ -1894,7 +1894,7 @@ void ipath_cancel_sends(struct ipath_devdata *dd, int restore_sendctrl)
	 */
	 */
	if (dd->ipath_flags & IPATH_HAS_SEND_DMA) {
	if (dd->ipath_flags & IPATH_HAS_SEND_DMA) {
		int skip_cancel;
		int skip_cancel;
		u64 *statp = &dd->ipath_sdma_status;
		unsigned long *statp = &dd->ipath_sdma_status;


		spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
		spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
		skip_cancel =
		skip_cancel =
@@ -2616,7 +2616,7 @@ int ipath_reset_device(int unit)
				ipath_dbg("unit %u port %d is in use "
				ipath_dbg("unit %u port %d is in use "
					  "(PID %u cmd %s), can't reset\n",
					  "(PID %u cmd %s), can't reset\n",
					  unit, i,
					  unit, i,
					  dd->ipath_pd[i]->port_pid,
					  pid_nr(dd->ipath_pd[i]->port_pid),
					  dd->ipath_pd[i]->port_comm);
					  dd->ipath_pd[i]->port_comm);
				ret = -EBUSY;
				ret = -EBUSY;
				goto bail;
				goto bail;
@@ -2654,19 +2654,21 @@ bail:
static int ipath_signal_procs(struct ipath_devdata *dd, int sig)
static int ipath_signal_procs(struct ipath_devdata *dd, int sig)
{
{
	int i, sub, any = 0;
	int i, sub, any = 0;
	pid_t pid;
	struct pid *pid;


	if (!dd->ipath_pd)
	if (!dd->ipath_pd)
		return 0;
		return 0;
	for (i = 1; i < dd->ipath_cfgports; i++) {
	for (i = 1; i < dd->ipath_cfgports; i++) {
		if (!dd->ipath_pd[i] || !dd->ipath_pd[i]->port_cnt ||
		if (!dd->ipath_pd[i] || !dd->ipath_pd[i]->port_cnt)
		    !dd->ipath_pd[i]->port_pid)
			continue;
			continue;
		pid = dd->ipath_pd[i]->port_pid;
		pid = dd->ipath_pd[i]->port_pid;
		if (!pid)
			continue;

		dev_info(&dd->pcidev->dev, "context %d in use "
		dev_info(&dd->pcidev->dev, "context %d in use "
			  "(PID %u), sending signal %d\n",
			  "(PID %u), sending signal %d\n",
			  i, pid, sig);
			  i, pid_nr(pid), sig);
		kill_proc(pid, sig, 1);
		kill_pid(pid, sig, 1);
		any++;
		any++;
		for (sub = 0; sub < INFINIPATH_MAX_SUBPORT; sub++) {
		for (sub = 0; sub < INFINIPATH_MAX_SUBPORT; sub++) {
			pid = dd->ipath_pd[i]->port_subpid[sub];
			pid = dd->ipath_pd[i]->port_subpid[sub];
@@ -2674,8 +2676,8 @@ static int ipath_signal_procs(struct ipath_devdata *dd, int sig)
				continue;
				continue;
			dev_info(&dd->pcidev->dev, "sub-context "
			dev_info(&dd->pcidev->dev, "sub-context "
				"%d:%d in use (PID %u), sending "
				"%d:%d in use (PID %u), sending "
				"signal %d\n", i, sub, pid, sig);
				"signal %d\n", i, sub, pid_nr(pid), sig);
			kill_proc(pid, sig, 1);
			kill_pid(pid, sig, 1);
			any++;
			any++;
		}
		}
	}
	}
+11 −8
Original line number Original line Diff line number Diff line
@@ -555,7 +555,7 @@ static int ipath_tid_free(struct ipath_portdata *pd, unsigned subport,
			p = dd->ipath_pageshadow[porttid + tid];
			p = dd->ipath_pageshadow[porttid + tid];
			dd->ipath_pageshadow[porttid + tid] = NULL;
			dd->ipath_pageshadow[porttid + tid] = NULL;
			ipath_cdbg(VERBOSE, "PID %u freeing TID %u\n",
			ipath_cdbg(VERBOSE, "PID %u freeing TID %u\n",
				   pd->port_pid, tid);
				   pid_nr(pd->port_pid), tid);
			dd->ipath_f_put_tid(dd, &tidbase[tid],
			dd->ipath_f_put_tid(dd, &tidbase[tid],
					    RCVHQ_RCV_TYPE_EXPECTED,
					    RCVHQ_RCV_TYPE_EXPECTED,
					    dd->ipath_tidinvalid);
					    dd->ipath_tidinvalid);
@@ -1609,7 +1609,7 @@ static int try_alloc_port(struct ipath_devdata *dd, int port,
			   port);
			   port);
		pd->port_cnt = 1;
		pd->port_cnt = 1;
		port_fp(fp) = pd;
		port_fp(fp) = pd;
		pd->port_pid = current->pid;
		pd->port_pid = get_pid(task_pid(current));
		strncpy(pd->port_comm, current->comm, sizeof(pd->port_comm));
		strncpy(pd->port_comm, current->comm, sizeof(pd->port_comm));
		ipath_stats.sps_ports++;
		ipath_stats.sps_ports++;
		ret = 0;
		ret = 0;
@@ -1793,14 +1793,15 @@ static int find_shared_port(struct file *fp,
			}
			}
			port_fp(fp) = pd;
			port_fp(fp) = pd;
			subport_fp(fp) = pd->port_cnt++;
			subport_fp(fp) = pd->port_cnt++;
			pd->port_subpid[subport_fp(fp)] = current->pid;
			pd->port_subpid[subport_fp(fp)] =
				get_pid(task_pid(current));
			tidcursor_fp(fp) = 0;
			tidcursor_fp(fp) = 0;
			pd->active_slaves |= 1 << subport_fp(fp);
			pd->active_slaves |= 1 << subport_fp(fp);
			ipath_cdbg(PROC,
			ipath_cdbg(PROC,
				   "%s[%u] %u sharing %s[%u] unit:port %u:%u\n",
				   "%s[%u] %u sharing %s[%u] unit:port %u:%u\n",
				   current->comm, current->pid,
				   current->comm, current->pid,
				   subport_fp(fp),
				   subport_fp(fp),
				   pd->port_comm, pd->port_pid,
				   pd->port_comm, pid_nr(pd->port_pid),
				   dd->ipath_unit, pd->port_port);
				   dd->ipath_unit, pd->port_port);
			ret = 1;
			ret = 1;
			goto done;
			goto done;
@@ -2066,7 +2067,8 @@ static int ipath_close(struct inode *in, struct file *fp)
		 * the slave(s) don't wait for receive data forever.
		 * the slave(s) don't wait for receive data forever.
		 */
		 */
		pd->active_slaves &= ~(1 << fd->subport);
		pd->active_slaves &= ~(1 << fd->subport);
		pd->port_subpid[fd->subport] = 0;
		put_pid(pd->port_subpid[fd->subport]);
		pd->port_subpid[fd->subport] = NULL;
		mutex_unlock(&ipath_mutex);
		mutex_unlock(&ipath_mutex);
		goto bail;
		goto bail;
	}
	}
@@ -2074,7 +2076,7 @@ static int ipath_close(struct inode *in, struct file *fp)


	if (pd->port_hdrqfull) {
	if (pd->port_hdrqfull) {
		ipath_cdbg(PROC, "%s[%u] had %u rcvhdrqfull errors "
		ipath_cdbg(PROC, "%s[%u] had %u rcvhdrqfull errors "
			   "during run\n", pd->port_comm, pd->port_pid,
			   "during run\n", pd->port_comm, pid_nr(pd->port_pid),
			   pd->port_hdrqfull);
			   pd->port_hdrqfull);
		pd->port_hdrqfull = 0;
		pd->port_hdrqfull = 0;
	}
	}
@@ -2134,11 +2136,12 @@ static int ipath_close(struct inode *in, struct file *fp)
			unlock_expected_tids(pd);
			unlock_expected_tids(pd);
		ipath_stats.sps_ports--;
		ipath_stats.sps_ports--;
		ipath_cdbg(PROC, "%s[%u] closed port %u:%u\n",
		ipath_cdbg(PROC, "%s[%u] closed port %u:%u\n",
			   pd->port_comm, pd->port_pid,
			   pd->port_comm, pid_nr(pd->port_pid),
			   dd->ipath_unit, port);
			   dd->ipath_unit, port);
	}
	}


	pd->port_pid = 0;
	put_pid(pd->port_pid);
	pd->port_pid = NULL;
	dd->ipath_pd[pd->port_port] = NULL; /* before releasing mutex */
	dd->ipath_pd[pd->port_port] = NULL; /* before releasing mutex */
	mutex_unlock(&ipath_mutex);
	mutex_unlock(&ipath_mutex);
	ipath_free_pddata(dd, pd); /* after releasing the mutex */
	ipath_free_pddata(dd, pd); /* after releasing the mutex */
+5 −5
Original line number Original line Diff line number Diff line
@@ -159,8 +159,8 @@ struct ipath_portdata {
	/* saved total number of polled urgent packets for poll edge trigger */
	/* saved total number of polled urgent packets for poll edge trigger */
	u32 port_urgent_poll;
	u32 port_urgent_poll;
	/* pid of process using this port */
	/* pid of process using this port */
	pid_t port_pid;
	struct pid *port_pid;
	pid_t port_subpid[INFINIPATH_MAX_SUBPORT];
	struct pid *port_subpid[INFINIPATH_MAX_SUBPORT];
	/* same size as task_struct .comm[] */
	/* same size as task_struct .comm[] */
	char port_comm[16];
	char port_comm[16];
	/* pkeys set by this use of this port */
	/* pkeys set by this use of this port */
@@ -483,7 +483,7 @@ struct ipath_devdata {


	/* SendDMA related entries */
	/* SendDMA related entries */
	spinlock_t            ipath_sdma_lock;
	spinlock_t            ipath_sdma_lock;
	u64                   ipath_sdma_status;
	unsigned long         ipath_sdma_status;
	unsigned long         ipath_sdma_abort_jiffies;
	unsigned long         ipath_sdma_abort_jiffies;
	unsigned long         ipath_sdma_abort_intr_timeout;
	unsigned long         ipath_sdma_abort_intr_timeout;
	unsigned long         ipath_sdma_buf_jiffies;
	unsigned long         ipath_sdma_buf_jiffies;
@@ -822,8 +822,8 @@ struct ipath_devdata {
#define IPATH_SDMA_DISARMED  1
#define IPATH_SDMA_DISARMED  1
#define IPATH_SDMA_DISABLED  2
#define IPATH_SDMA_DISABLED  2
#define IPATH_SDMA_LAYERBUF  3
#define IPATH_SDMA_LAYERBUF  3
#define IPATH_SDMA_RUNNING  62
#define IPATH_SDMA_RUNNING  30
#define IPATH_SDMA_SHUTDOWN 63
#define IPATH_SDMA_SHUTDOWN 31


/* bit combinations that correspond to abort states */
/* bit combinations that correspond to abort states */
#define IPATH_SDMA_ABORT_NONE 0
#define IPATH_SDMA_ABORT_NONE 0
+99 −138
Original line number Original line Diff line number Diff line
@@ -242,7 +242,6 @@ static void ipath_free_qp(struct ipath_qp_table *qpt, struct ipath_qp *qp)
{
{
	struct ipath_qp *q, **qpp;
	struct ipath_qp *q, **qpp;
	unsigned long flags;
	unsigned long flags;
	int fnd = 0;


	spin_lock_irqsave(&qpt->lock, flags);
	spin_lock_irqsave(&qpt->lock, flags);


@@ -253,51 +252,40 @@ static void ipath_free_qp(struct ipath_qp_table *qpt, struct ipath_qp *qp)
			*qpp = qp->next;
			*qpp = qp->next;
			qp->next = NULL;
			qp->next = NULL;
			atomic_dec(&qp->refcount);
			atomic_dec(&qp->refcount);
			fnd = 1;
			break;
			break;
		}
		}
	}
	}


	spin_unlock_irqrestore(&qpt->lock, flags);
	spin_unlock_irqrestore(&qpt->lock, flags);

	if (!fnd)
		return;

	free_qpn(qpt, qp->ibqp.qp_num);

	wait_event(qp->wait, !atomic_read(&qp->refcount));
}
}


/**
/**
 * ipath_free_all_qps - remove all QPs from the table
 * ipath_free_all_qps - check for QPs still in use
 * @qpt: the QP table to empty
 * @qpt: the QP table to empty
 *
 * There should not be any QPs still in use.
 * Free memory for table.
 */
 */
void ipath_free_all_qps(struct ipath_qp_table *qpt)
unsigned ipath_free_all_qps(struct ipath_qp_table *qpt)
{
{
	unsigned long flags;
	unsigned long flags;
	struct ipath_qp *qp, *nqp;
	struct ipath_qp *qp;
	u32 n;
	u32 n, qp_inuse = 0;


	for (n = 0; n < qpt->max; n++) {
	spin_lock_irqsave(&qpt->lock, flags);
	spin_lock_irqsave(&qpt->lock, flags);
	for (n = 0; n < qpt->max; n++) {
		qp = qpt->table[n];
		qp = qpt->table[n];
		qpt->table[n] = NULL;
		qpt->table[n] = NULL;
		spin_unlock_irqrestore(&qpt->lock, flags);


		while (qp) {
		for (; qp; qp = qp->next)
			nqp = qp->next;
			qp_inuse++;
			free_qpn(qpt, qp->ibqp.qp_num);
			if (!atomic_dec_and_test(&qp->refcount) ||
			    !ipath_destroy_qp(&qp->ibqp))
				ipath_dbg("QP memory leak!\n");
			qp = nqp;
		}
	}
	}
	spin_unlock_irqrestore(&qpt->lock, flags);


	for (n = 0; n < ARRAY_SIZE(qpt->map); n++) {
	for (n = 0; n < ARRAY_SIZE(qpt->map); n++)
		if (qpt->map[n].page)
		if (qpt->map[n].page)
			free_page((unsigned long) qpt->map[n].page);
			free_page((unsigned long) qpt->map[n].page);
	}
	return qp_inuse;
}
}


/**
/**
@@ -336,11 +324,12 @@ static void ipath_reset_qp(struct ipath_qp *qp, enum ib_qp_type type)
	qp->remote_qpn = 0;
	qp->remote_qpn = 0;
	qp->qkey = 0;
	qp->qkey = 0;
	qp->qp_access_flags = 0;
	qp->qp_access_flags = 0;
	qp->s_busy = 0;
	atomic_set(&qp->s_dma_busy, 0);
	qp->s_flags &= IPATH_S_SIGNAL_REQ_WR;
	qp->s_flags &= IPATH_S_SIGNAL_REQ_WR;
	qp->s_hdrwords = 0;
	qp->s_hdrwords = 0;
	qp->s_wqe = NULL;
	qp->s_wqe = NULL;
	qp->s_pkt_delay = 0;
	qp->s_pkt_delay = 0;
	qp->s_draining = 0;
	qp->s_psn = 0;
	qp->s_psn = 0;
	qp->r_psn = 0;
	qp->r_psn = 0;
	qp->r_msn = 0;
	qp->r_msn = 0;
@@ -353,7 +342,8 @@ static void ipath_reset_qp(struct ipath_qp *qp, enum ib_qp_type type)
	}
	}
	qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
	qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
	qp->r_nak_state = 0;
	qp->r_nak_state = 0;
	qp->r_wrid_valid = 0;
	qp->r_aflags = 0;
	qp->r_flags = 0;
	qp->s_rnr_timeout = 0;
	qp->s_rnr_timeout = 0;
	qp->s_head = 0;
	qp->s_head = 0;
	qp->s_tail = 0;
	qp->s_tail = 0;
@@ -361,7 +351,6 @@ static void ipath_reset_qp(struct ipath_qp *qp, enum ib_qp_type type)
	qp->s_last = 0;
	qp->s_last = 0;
	qp->s_ssn = 1;
	qp->s_ssn = 1;
	qp->s_lsn = 0;
	qp->s_lsn = 0;
	qp->s_wait_credit = 0;
	memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue));
	memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue));
	qp->r_head_ack_queue = 0;
	qp->r_head_ack_queue = 0;
	qp->s_tail_ack_queue = 0;
	qp->s_tail_ack_queue = 0;
@@ -370,17 +359,17 @@ static void ipath_reset_qp(struct ipath_qp *qp, enum ib_qp_type type)
		qp->r_rq.wq->head = 0;
		qp->r_rq.wq->head = 0;
		qp->r_rq.wq->tail = 0;
		qp->r_rq.wq->tail = 0;
	}
	}
	qp->r_reuse_sge = 0;
}
}


/**
/**
 * ipath_error_qp - put a QP into an error state
 * ipath_error_qp - put a QP into the error state
 * @qp: the QP to put into an error state
 * @qp: the QP to put into the error state
 * @err: the receive completion error to signal if a RWQE is active
 * @err: the receive completion error to signal if a RWQE is active
 *
 *
 * Flushes both send and receive work queues.
 * Flushes both send and receive work queues.
 * Returns true if last WQE event should be generated.
 * Returns true if last WQE event should be generated.
 * The QP s_lock should be held and interrupts disabled.
 * The QP s_lock should be held and interrupts disabled.
 * If we are already in error state, just return.
 */
 */


int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
@@ -389,8 +378,10 @@ int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
	struct ib_wc wc;
	struct ib_wc wc;
	int ret = 0;
	int ret = 0;


	ipath_dbg("QP%d/%d in error state (%d)\n",
	if (qp->state == IB_QPS_ERR)
		  qp->ibqp.qp_num, qp->remote_qpn, err);
		goto bail;

	qp->state = IB_QPS_ERR;


	spin_lock(&dev->pending_lock);
	spin_lock(&dev->pending_lock);
	if (!list_empty(&qp->timerwait))
	if (!list_empty(&qp->timerwait))
@@ -399,39 +390,21 @@ int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
		list_del_init(&qp->piowait);
		list_del_init(&qp->piowait);
	spin_unlock(&dev->pending_lock);
	spin_unlock(&dev->pending_lock);


	wc.vendor_err = 0;
	/* Schedule the sending tasklet to drain the send work queue. */
	wc.byte_len = 0;
	if (qp->s_last != qp->s_head)
	wc.imm_data = 0;
		ipath_schedule_send(qp);

	memset(&wc, 0, sizeof(wc));
	wc.qp = &qp->ibqp;
	wc.qp = &qp->ibqp;
	wc.src_qp = 0;
	wc.wc_flags = 0;
	wc.pkey_index = 0;
	wc.slid = 0;
	wc.sl = 0;
	wc.dlid_path_bits = 0;
	wc.port_num = 0;
	if (qp->r_wrid_valid) {
		qp->r_wrid_valid = 0;
		wc.wr_id = qp->r_wr_id;
	wc.opcode = IB_WC_RECV;
	wc.opcode = IB_WC_RECV;

	if (test_and_clear_bit(IPATH_R_WRID_VALID, &qp->r_aflags)) {
		wc.wr_id = qp->r_wr_id;
		wc.status = err;
		wc.status = err;
		ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
		ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
	}
	}
	wc.status = IB_WC_WR_FLUSH_ERR;
	wc.status = IB_WC_WR_FLUSH_ERR;


	while (qp->s_last != qp->s_head) {
		struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);

		wc.wr_id = wqe->wr.wr_id;
		wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
		if (++qp->s_last >= qp->s_size)
			qp->s_last = 0;
		ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1);
	}
	qp->s_cur = qp->s_tail = qp->s_head;
	qp->s_hdrwords = 0;
	qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;

	if (qp->r_rq.wq) {
	if (qp->r_rq.wq) {
		struct ipath_rwq *wq;
		struct ipath_rwq *wq;
		u32 head;
		u32 head;
@@ -447,7 +420,6 @@ int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
		tail = wq->tail;
		tail = wq->tail;
		if (tail >= qp->r_rq.size)
		if (tail >= qp->r_rq.size)
			tail = 0;
			tail = 0;
		wc.opcode = IB_WC_RECV;
		while (tail != head) {
		while (tail != head) {
			wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
			wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
			if (++tail >= qp->r_rq.size)
			if (++tail >= qp->r_rq.size)
@@ -460,6 +432,7 @@ int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
	} else if (qp->ibqp.event_handler)
	} else if (qp->ibqp.event_handler)
		ret = 1;
		ret = 1;


bail:
	return ret;
	return ret;
}
}


@@ -478,11 +451,10 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
	struct ipath_ibdev *dev = to_idev(ibqp->device);
	struct ipath_ibdev *dev = to_idev(ibqp->device);
	struct ipath_qp *qp = to_iqp(ibqp);
	struct ipath_qp *qp = to_iqp(ibqp);
	enum ib_qp_state cur_state, new_state;
	enum ib_qp_state cur_state, new_state;
	unsigned long flags;
	int lastwqe = 0;
	int lastwqe = 0;
	int ret;
	int ret;


	spin_lock_irqsave(&qp->s_lock, flags);
	spin_lock_irq(&qp->s_lock);


	cur_state = attr_mask & IB_QP_CUR_STATE ?
	cur_state = attr_mask & IB_QP_CUR_STATE ?
		attr->cur_qp_state : qp->state;
		attr->cur_qp_state : qp->state;
@@ -535,16 +507,42 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,


	switch (new_state) {
	switch (new_state) {
	case IB_QPS_RESET:
	case IB_QPS_RESET:
		if (qp->state != IB_QPS_RESET) {
			qp->state = IB_QPS_RESET;
			spin_lock(&dev->pending_lock);
			if (!list_empty(&qp->timerwait))
				list_del_init(&qp->timerwait);
			if (!list_empty(&qp->piowait))
				list_del_init(&qp->piowait);
			spin_unlock(&dev->pending_lock);
			qp->s_flags &= ~IPATH_S_ANY_WAIT;
			spin_unlock_irq(&qp->s_lock);
			/* Stop the sending tasklet */
			tasklet_kill(&qp->s_task);
			wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
			spin_lock_irq(&qp->s_lock);
		}
		ipath_reset_qp(qp, ibqp->qp_type);
		ipath_reset_qp(qp, ibqp->qp_type);
		break;
		break;


	case IB_QPS_SQD:
		qp->s_draining = qp->s_last != qp->s_cur;
		qp->state = new_state;
		break;

	case IB_QPS_SQE:
		if (qp->ibqp.qp_type == IB_QPT_RC)
			goto inval;
		qp->state = new_state;
		break;

	case IB_QPS_ERR:
	case IB_QPS_ERR:
		lastwqe = ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR);
		lastwqe = ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR);
		break;
		break;


	default:
	default:
		qp->state = new_state;
		break;
		break;

	}
	}


	if (attr_mask & IB_QP_PKEY_INDEX)
	if (attr_mask & IB_QP_PKEY_INDEX)
@@ -597,8 +595,7 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
		qp->s_max_rd_atomic = attr->max_rd_atomic;
		qp->s_max_rd_atomic = attr->max_rd_atomic;


	qp->state = new_state;
	spin_unlock_irq(&qp->s_lock);
	spin_unlock_irqrestore(&qp->s_lock, flags);


	if (lastwqe) {
	if (lastwqe) {
		struct ib_event ev;
		struct ib_event ev;
@@ -612,7 +609,7 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
	goto bail;
	goto bail;


inval:
inval:
	spin_unlock_irqrestore(&qp->s_lock, flags);
	spin_unlock_irq(&qp->s_lock);
	ret = -EINVAL;
	ret = -EINVAL;


bail:
bail:
@@ -643,7 +640,7 @@ int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
	attr->pkey_index = qp->s_pkey_index;
	attr->pkey_index = qp->s_pkey_index;
	attr->alt_pkey_index = 0;
	attr->alt_pkey_index = 0;
	attr->en_sqd_async_notify = 0;
	attr->en_sqd_async_notify = 0;
	attr->sq_draining = 0;
	attr->sq_draining = qp->s_draining;
	attr->max_rd_atomic = qp->s_max_rd_atomic;
	attr->max_rd_atomic = qp->s_max_rd_atomic;
	attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
	attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
	attr->min_rnr_timer = qp->r_min_rnr_timer;
	attr->min_rnr_timer = qp->r_min_rnr_timer;
@@ -833,6 +830,7 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
		spin_lock_init(&qp->r_rq.lock);
		spin_lock_init(&qp->r_rq.lock);
		atomic_set(&qp->refcount, 0);
		atomic_set(&qp->refcount, 0);
		init_waitqueue_head(&qp->wait);
		init_waitqueue_head(&qp->wait);
		init_waitqueue_head(&qp->wait_dma);
		tasklet_init(&qp->s_task, ipath_do_send, (unsigned long)qp);
		tasklet_init(&qp->s_task, ipath_do_send, (unsigned long)qp);
		INIT_LIST_HEAD(&qp->piowait);
		INIT_LIST_HEAD(&qp->piowait);
		INIT_LIST_HEAD(&qp->timerwait);
		INIT_LIST_HEAD(&qp->timerwait);
@@ -926,6 +924,7 @@ bail_ip:
	else
	else
		vfree(qp->r_rq.wq);
		vfree(qp->r_rq.wq);
	ipath_free_qp(&dev->qp_table, qp);
	ipath_free_qp(&dev->qp_table, qp);
	free_qpn(&dev->qp_table, qp->ibqp.qp_num);
bail_qp:
bail_qp:
	kfree(qp);
	kfree(qp);
bail_swq:
bail_swq:
@@ -947,41 +946,44 @@ int ipath_destroy_qp(struct ib_qp *ibqp)
{
{
	struct ipath_qp *qp = to_iqp(ibqp);
	struct ipath_qp *qp = to_iqp(ibqp);
	struct ipath_ibdev *dev = to_idev(ibqp->device);
	struct ipath_ibdev *dev = to_idev(ibqp->device);
	unsigned long flags;

	spin_lock_irqsave(&qp->s_lock, flags);
	qp->state = IB_QPS_ERR;
	spin_unlock_irqrestore(&qp->s_lock, flags);
	spin_lock(&dev->n_qps_lock);
	dev->n_qps_allocated--;
	spin_unlock(&dev->n_qps_lock);


	/* Stop the sending tasklet. */
	/* Make sure HW and driver activity is stopped. */
	spin_lock_irq(&qp->s_lock);
	if (qp->state != IB_QPS_RESET) {
		qp->state = IB_QPS_RESET;
		spin_lock(&dev->pending_lock);
		if (!list_empty(&qp->timerwait))
			list_del_init(&qp->timerwait);
		if (!list_empty(&qp->piowait))
			list_del_init(&qp->piowait);
		spin_unlock(&dev->pending_lock);
		qp->s_flags &= ~IPATH_S_ANY_WAIT;
		spin_unlock_irq(&qp->s_lock);
		/* Stop the sending tasklet */
		tasklet_kill(&qp->s_task);
		tasklet_kill(&qp->s_task);
		wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
	} else
		spin_unlock_irq(&qp->s_lock);

	ipath_free_qp(&dev->qp_table, qp);


	if (qp->s_tx) {
	if (qp->s_tx) {
		atomic_dec(&qp->refcount);
		atomic_dec(&qp->refcount);
		if (qp->s_tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEBUF)
		if (qp->s_tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEBUF)
			kfree(qp->s_tx->txreq.map_addr);
			kfree(qp->s_tx->txreq.map_addr);
		spin_lock_irq(&dev->pending_lock);
		list_add(&qp->s_tx->txreq.list, &dev->txreq_free);
		spin_unlock_irq(&dev->pending_lock);
		qp->s_tx = NULL;
	}
	}


	/* Make sure the QP isn't on the timeout list. */
	wait_event(qp->wait, !atomic_read(&qp->refcount));
	spin_lock_irqsave(&dev->pending_lock, flags);
	if (!list_empty(&qp->timerwait))
		list_del_init(&qp->timerwait);
	if (!list_empty(&qp->piowait))
		list_del_init(&qp->piowait);
	if (qp->s_tx)
		list_add(&qp->s_tx->txreq.list, &dev->txreq_free);
	spin_unlock_irqrestore(&dev->pending_lock, flags);


	/*
	/* all user's cleaned up, mark it available */
	 * Make sure that the QP is not in the QPN table so receive
	free_qpn(&dev->qp_table, qp->ibqp.qp_num);
	 * interrupts will discard packets for this QP.  XXX Also remove QP
	spin_lock(&dev->n_qps_lock);
	 * from multicast table.
	dev->n_qps_allocated--;
	 */
	spin_unlock(&dev->n_qps_lock);
	if (atomic_read(&qp->refcount) != 0)
		ipath_free_qp(&dev->qp_table, qp);


	if (qp->ip)
	if (qp->ip)
		kref_put(&qp->ip->ref, ipath_release_mmap_info);
		kref_put(&qp->ip->ref, ipath_release_mmap_info);
@@ -1025,48 +1027,6 @@ bail:
	return ret;
	return ret;
}
}


/**
 * ipath_sqerror_qp - put a QP's send queue into an error state
 * @qp: QP who's send queue will be put into an error state
 * @wc: the WC responsible for putting the QP in this state
 *
 * Flushes the send work queue.
 * The QP s_lock should be held and interrupts disabled.
 */

void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc)
{
	struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
	struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);

	ipath_dbg("Send queue error on QP%d/%d: err: %d\n",
		  qp->ibqp.qp_num, qp->remote_qpn, wc->status);

	spin_lock(&dev->pending_lock);
	if (!list_empty(&qp->timerwait))
		list_del_init(&qp->timerwait);
	if (!list_empty(&qp->piowait))
		list_del_init(&qp->piowait);
	spin_unlock(&dev->pending_lock);

	ipath_cq_enter(to_icq(qp->ibqp.send_cq), wc, 1);
	if (++qp->s_last >= qp->s_size)
		qp->s_last = 0;

	wc->status = IB_WC_WR_FLUSH_ERR;

	while (qp->s_last != qp->s_head) {
		wqe = get_swqe_ptr(qp, qp->s_last);
		wc->wr_id = wqe->wr.wr_id;
		wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
		ipath_cq_enter(to_icq(qp->ibqp.send_cq), wc, 1);
		if (++qp->s_last >= qp->s_size)
			qp->s_last = 0;
	}
	qp->s_cur = qp->s_tail = qp->s_head;
	qp->state = IB_QPS_SQE;
}

/**
/**
 * ipath_get_credit - flush the send work queue of a QP
 * ipath_get_credit - flush the send work queue of a QP
 * @qp: the qp who's send work queue to flush
 * @qp: the qp who's send work queue to flush
@@ -1093,9 +1053,10 @@ void ipath_get_credit(struct ipath_qp *qp, u32 aeth)
	}
	}


	/* Restart sending if it was blocked due to lack of credits. */
	/* Restart sending if it was blocked due to lack of credits. */
	if (qp->s_cur != qp->s_head &&
	if ((qp->s_flags & IPATH_S_WAIT_SSN_CREDIT) &&
	    qp->s_cur != qp->s_head &&
	    (qp->s_lsn == (u32) -1 ||
	    (qp->s_lsn == (u32) -1 ||
	     ipath_cmp24(get_swqe_ptr(qp, qp->s_cur)->ssn,
	     ipath_cmp24(get_swqe_ptr(qp, qp->s_cur)->ssn,
			 qp->s_lsn + 1) <= 0))
			 qp->s_lsn + 1) <= 0))
		tasklet_hi_schedule(&qp->s_task);
		ipath_schedule_send(qp);
}
}
Loading