Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit be4c9bad authored by Roland Dreier's avatar Roland Dreier
Browse files

MAINTAINERS: Add cxgb4 and iw_cxgb4 entries



Signed-off-by: default avatarRoland Dreier <rolandd@cisco.com>
parent cfdda9d7
Loading
Loading
Loading
Loading
+14 −0
Original line number Diff line number Diff line
@@ -1719,6 +1719,20 @@ W: http://www.openfabrics.org
S:	Supported
F:	drivers/infiniband/hw/cxgb3/

CXGB4 ETHERNET DRIVER (CXGB4)
M:	Dimitris Michailidis <dm@chelsio.com>
L:	netdev@vger.kernel.org
W:	http://www.chelsio.com
S:	Supported
F:	drivers/net/cxgb4/

CXGB4 IWARP RNIC DRIVER (IW_CXGB4)
M:	Steve Wise <swise@chelsio.com>
L:	linux-rdma@vger.kernel.org
W:	http://www.openfabrics.org
S:	Supported
F:	drivers/infiniband/hw/cxgb4/

CYBERPRO FB DRIVER
M:	Russell King <linux@arm.linux.org.uk>
L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+180 −136
Original line number Diff line number Diff line
@@ -61,6 +61,10 @@ static char *states[] = {
	NULL,
};

int c4iw_max_read_depth = 8;
module_param(c4iw_max_read_depth, int, 0644);
MODULE_PARM_DESC(c4iw_max_read_depth, "Per-connection max ORD/IRD (default=8)");

static int enable_tcp_timestamps;
module_param(enable_tcp_timestamps, int, 0644);
MODULE_PARM_DESC(enable_tcp_timestamps, "Enable tcp timestamps (default=0)");
@@ -113,18 +117,17 @@ static int snd_win = 32 * 1024;
module_param(snd_win, int, 0644);
MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=32KB)");

static void process_work(struct work_struct *work);
static struct workqueue_struct *workq;
static DECLARE_WORK(skb_work, process_work);

static struct sk_buff_head rxq;
static c4iw_handler_func work_handlers[NUM_CPL_CMDS];
c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS];

static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
static void ep_timeout(unsigned long arg);
static void connect_reply_upcall(struct c4iw_ep *ep, int status);

static LIST_HEAD(timeout_list);
static spinlock_t timeout_lock;

static void start_ep_timer(struct c4iw_ep *ep)
{
	PDBG("%s ep %p\n", __func__, ep);
@@ -271,26 +274,6 @@ static void release_ep_resources(struct c4iw_ep *ep)
	c4iw_put_ep(&ep->com);
}

static void process_work(struct work_struct *work)
{
	struct sk_buff *skb = NULL;
	struct c4iw_dev *dev;
	struct cpl_act_establish *rpl = cplhdr(skb);
	unsigned int opcode;
	int ret;

	while ((skb = skb_dequeue(&rxq))) {
		rpl = cplhdr(skb);
		dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
		opcode = rpl->ot.opcode;

		BUG_ON(!work_handlers[opcode]);
		ret = work_handlers[opcode](dev, skb);
		if (!ret)
			kfree_skb(skb);
	}
}

static int status2errno(int status)
{
	switch (status) {
@@ -1795,76 +1778,6 @@ static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb)
	return 0;
}

static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
{
	struct cpl_fw6_msg *rpl = cplhdr(skb);
	struct c4iw_wr_wait *wr_waitp;
	int ret;

	PDBG("%s type %u\n", __func__, rpl->type);

	switch (rpl->type) {
	case 1:
		ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff);
		wr_waitp = (__force struct c4iw_wr_wait *)rpl->data[1];
		PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret);
		if (wr_waitp) {
			wr_waitp->ret = ret;
			wr_waitp->done = 1;
			wake_up(&wr_waitp->wait);
		}
		break;
	case 2:
		c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]);
		break;
	default:
		printk(KERN_ERR MOD "%s unexpected fw6 msg type %u\n", __func__,
		       rpl->type);
		break;
	}
	return 0;
}

static void ep_timeout(unsigned long arg)
{
	struct c4iw_ep *ep = (struct c4iw_ep *)arg;
	struct c4iw_qp_attributes attrs;
	unsigned long flags;
	int abort = 1;

	spin_lock_irqsave(&ep->com.lock, flags);
	PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
	     ep->com.state);
	switch (ep->com.state) {
	case MPA_REQ_SENT:
		__state_set(&ep->com, ABORTING);
		connect_reply_upcall(ep, -ETIMEDOUT);
		break;
	case MPA_REQ_WAIT:
		__state_set(&ep->com, ABORTING);
		break;
	case CLOSING:
	case MORIBUND:
		if (ep->com.cm_id && ep->com.qp) {
			attrs.next_state = C4IW_QP_STATE_ERROR;
			c4iw_modify_qp(ep->com.qp->rhp,
				     ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
				     &attrs, 1);
		}
		__state_set(&ep->com, ABORTING);
		break;
	default:
		printk(KERN_ERR "%s unexpected state ep %p tid %u state %u\n",
			__func__, ep, ep->hwtid, ep->com.state);
		WARN_ON(1);
		abort = 0;
	}
	spin_unlock_irqrestore(&ep->com.lock, flags);
	if (abort)
		abort_connection(ep, NULL, GFP_ATOMIC);
	c4iw_put_ep(&ep->com);
}

int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
{
	int err;
@@ -1904,8 +1817,8 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
	BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
	BUG_ON(!qp);

	if ((conn_param->ord > T4_MAX_READ_DEPTH) ||
	    (conn_param->ird > T4_MAX_READ_DEPTH)) {
	if ((conn_param->ord > c4iw_max_read_depth) ||
	    (conn_param->ird > c4iw_max_read_depth)) {
		abort_connection(ep, NULL, GFP_KERNEL);
		err = -EINVAL;
		goto err;
@@ -1968,6 +1881,11 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
	struct net_device *pdev;
	int step;

	if ((conn_param->ord > c4iw_max_read_depth) ||
	    (conn_param->ird > c4iw_max_read_depth)) {
		err = -EINVAL;
		goto out;
	}
	ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
	if (!ep) {
		printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
@@ -2115,7 +2033,7 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
	 */
	ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids, PF_INET, ep);
	if (ep->stid == -1) {
		printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
		printk(KERN_ERR MOD "%s - cannot alloc stid.\n", __func__);
		err = -ENOMEM;
		goto fail2;
	}
@@ -2243,6 +2161,116 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
	return ret;
}

/*
 * These are the real handlers that are called from a
 * work queue.
 */
static c4iw_handler_func work_handlers[NUM_CPL_CMDS] = {
	[CPL_ACT_ESTABLISH] = act_establish,
	[CPL_ACT_OPEN_RPL] = act_open_rpl,
	[CPL_RX_DATA] = rx_data,
	[CPL_ABORT_RPL_RSS] = abort_rpl,
	[CPL_ABORT_RPL] = abort_rpl,
	[CPL_PASS_OPEN_RPL] = pass_open_rpl,
	[CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl,
	[CPL_PASS_ACCEPT_REQ] = pass_accept_req,
	[CPL_PASS_ESTABLISH] = pass_establish,
	[CPL_PEER_CLOSE] = peer_close,
	[CPL_ABORT_REQ_RSS] = peer_abort,
	[CPL_CLOSE_CON_RPL] = close_con_rpl,
	[CPL_RDMA_TERMINATE] = terminate,
	[CPL_FW4_ACK] = fw4_ack
};

static void process_timeout(struct c4iw_ep *ep)
{
	struct c4iw_qp_attributes attrs;
	int abort = 1;

	spin_lock_irq(&ep->com.lock);
	PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
	     ep->com.state);
	switch (ep->com.state) {
	case MPA_REQ_SENT:
		__state_set(&ep->com, ABORTING);
		connect_reply_upcall(ep, -ETIMEDOUT);
		break;
	case MPA_REQ_WAIT:
		__state_set(&ep->com, ABORTING);
		break;
	case CLOSING:
	case MORIBUND:
		if (ep->com.cm_id && ep->com.qp) {
			attrs.next_state = C4IW_QP_STATE_ERROR;
			c4iw_modify_qp(ep->com.qp->rhp,
				     ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
				     &attrs, 1);
		}
		__state_set(&ep->com, ABORTING);
		break;
	default:
		printk(KERN_ERR "%s unexpected state ep %p tid %u state %u\n",
			__func__, ep, ep->hwtid, ep->com.state);
		WARN_ON(1);
		abort = 0;
	}
	spin_unlock_irq(&ep->com.lock);
	if (abort)
		abort_connection(ep, NULL, GFP_KERNEL);
	c4iw_put_ep(&ep->com);
}

static void process_timedout_eps(void)
{
	struct c4iw_ep *ep;

	spin_lock_irq(&timeout_lock);
	while (!list_empty(&timeout_list)) {
		struct list_head *tmp;

		tmp = timeout_list.next;
		list_del(tmp);
		spin_unlock_irq(&timeout_lock);
		ep = list_entry(tmp, struct c4iw_ep, entry);
		process_timeout(ep);
		spin_lock_irq(&timeout_lock);
	}
	spin_unlock_irq(&timeout_lock);
}

static void process_work(struct work_struct *work)
{
	struct sk_buff *skb = NULL;
	struct c4iw_dev *dev;
	struct cpl_act_establish *rpl = cplhdr(skb);
	unsigned int opcode;
	int ret;

	while ((skb = skb_dequeue(&rxq))) {
		rpl = cplhdr(skb);
		dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
		opcode = rpl->ot.opcode;

		BUG_ON(!work_handlers[opcode]);
		ret = work_handlers[opcode](dev, skb);
		if (!ret)
			kfree_skb(skb);
	}
	process_timedout_eps();
}

static DECLARE_WORK(skb_work, process_work);

static void ep_timeout(unsigned long arg)
{
	struct c4iw_ep *ep = (struct c4iw_ep *)arg;

	spin_lock(&timeout_lock);
	list_add_tail(&ep->entry, &timeout_list);
	spin_unlock(&timeout_lock);
	queue_work(workq, &skb_work);
}

/*
 * All the CM events are handled on a work queue to have a safe context.
 */
@@ -2273,58 +2301,74 @@ static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
	return 0;
}

int __init c4iw_cm_init(void)
static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
{
	skb_queue_head_init(&rxq);
	struct cpl_fw6_msg *rpl = cplhdr(skb);
	struct c4iw_wr_wait *wr_waitp;
	int ret;

	workq = create_singlethread_workqueue("iw_cxgb4");
	if (!workq)
		return -ENOMEM;
	PDBG("%s type %u\n", __func__, rpl->type);

	switch (rpl->type) {
	case 1:
		ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff);
		wr_waitp = (__force struct c4iw_wr_wait *)rpl->data[1];
		PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret);
		if (wr_waitp) {
			wr_waitp->ret = ret;
			wr_waitp->done = 1;
			wake_up(&wr_waitp->wait);
		}
		break;
	case 2:
		c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]);
		break;
	default:
		printk(KERN_ERR MOD "%s unexpected fw6 msg type %u\n", __func__,
		       rpl->type);
		break;
	}
	return 0;
}

/*
 * Most upcalls from the T4 Core go to sched() to
 * schedule the processing on a work queue.
 */
	c4iw_handlers[CPL_ACT_ESTABLISH] = sched;
	c4iw_handlers[CPL_ACT_OPEN_RPL] = sched;
	c4iw_handlers[CPL_RX_DATA] = sched;
	c4iw_handlers[CPL_ABORT_RPL_RSS] = sched;
	c4iw_handlers[CPL_ABORT_RPL] = sched;
	c4iw_handlers[CPL_PASS_OPEN_RPL] = sched;
	c4iw_handlers[CPL_CLOSE_LISTSRV_RPL] = sched;
	c4iw_handlers[CPL_PASS_ACCEPT_REQ] = sched;
	c4iw_handlers[CPL_PASS_ESTABLISH] = sched;
	c4iw_handlers[CPL_PEER_CLOSE] = sched;
	c4iw_handlers[CPL_CLOSE_CON_RPL] = sched;
	c4iw_handlers[CPL_ABORT_REQ_RSS] = sched;
	c4iw_handlers[CPL_RDMA_TERMINATE] = sched;
	c4iw_handlers[CPL_FW4_ACK] = sched;
	c4iw_handlers[CPL_SET_TCB_RPL] = set_tcb_rpl;
	c4iw_handlers[CPL_FW6_MSG] = fw6_msg;
c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = {
	[CPL_ACT_ESTABLISH] = sched,
	[CPL_ACT_OPEN_RPL] = sched,
	[CPL_RX_DATA] = sched,
	[CPL_ABORT_RPL_RSS] = sched,
	[CPL_ABORT_RPL] = sched,
	[CPL_PASS_OPEN_RPL] = sched,
	[CPL_CLOSE_LISTSRV_RPL] = sched,
	[CPL_PASS_ACCEPT_REQ] = sched,
	[CPL_PASS_ESTABLISH] = sched,
	[CPL_PEER_CLOSE] = sched,
	[CPL_CLOSE_CON_RPL] = sched,
	[CPL_ABORT_REQ_RSS] = sched,
	[CPL_RDMA_TERMINATE] = sched,
	[CPL_FW4_ACK] = sched,
	[CPL_SET_TCB_RPL] = set_tcb_rpl,
	[CPL_FW6_MSG] = fw6_msg
};

int __init c4iw_cm_init(void)
{
	spin_lock_init(&timeout_lock);
	skb_queue_head_init(&rxq);

	workq = create_singlethread_workqueue("iw_cxgb4");
	if (!workq)
		return -ENOMEM;

	/*
	 * These are the real handlers that are called from a
	 * work queue.
	 */
	work_handlers[CPL_ACT_ESTABLISH] = act_establish;
	work_handlers[CPL_ACT_OPEN_RPL] = act_open_rpl;
	work_handlers[CPL_RX_DATA] = rx_data;
	work_handlers[CPL_ABORT_RPL_RSS] = abort_rpl;
	work_handlers[CPL_ABORT_RPL] = abort_rpl;
	work_handlers[CPL_PASS_OPEN_RPL] = pass_open_rpl;
	work_handlers[CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl;
	work_handlers[CPL_PASS_ACCEPT_REQ] = pass_accept_req;
	work_handlers[CPL_PASS_ESTABLISH] = pass_establish;
	work_handlers[CPL_PEER_CLOSE] = peer_close;
	work_handlers[CPL_ABORT_REQ_RSS] = peer_abort;
	work_handlers[CPL_CLOSE_CON_RPL] = close_con_rpl;
	work_handlers[CPL_RDMA_TERMINATE] = terminate;
	work_handlers[CPL_FW4_ACK] = fw4_ack;
	return 0;
}

void __exit c4iw_cm_term(void)
{
	WARN_ON(!list_empty(&timeout_list));
	flush_workqueue(workq);
	destroy_workqueue(workq);
}
+3 −3
Original line number Diff line number Diff line
@@ -51,8 +51,8 @@ static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp,
		return;
	}

	printk(KERN_ERR "%s - AE qpid 0x%x opcode %d status 0x%x "
	       "type %d wrid.hi 0x%x wrid.lo 0x%x\n", __func__,
	printk(KERN_ERR MOD "AE qpid 0x%x opcode %d status 0x%x "
	       "type %d wrid.hi 0x%x wrid.lo 0x%x\n",
	       CQE_QPID(err_cqe), CQE_OPCODE(err_cqe),
	       CQE_STATUS(err_cqe), CQE_TYPE(err_cqe),
	       CQE_WRID_HI(err_cqe), CQE_WRID_LOW(err_cqe));
@@ -60,7 +60,7 @@ static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp,
	if (qhp->attr.state == C4IW_QP_STATE_RTS) {
		attrs.next_state = C4IW_QP_STATE_TERMINATE;
		c4iw_modify_qp(qhp->rhp, qhp, C4IW_QP_ATTR_NEXT_STATE,
			       &attrs, 0);
			       &attrs, 1);
	}

	event.event = ib_event;
+2 −0
Original line number Diff line number Diff line
@@ -597,6 +597,7 @@ struct c4iw_ep {
	struct c4iw_ep_common com;
	struct c4iw_ep *parent_ep;
	struct timer_list timer;
	struct list_head entry;
	unsigned int atid;
	u32 hwtid;
	u32 snd_seq;
@@ -739,5 +740,6 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe);

extern struct cxgb4_client t4c_client;
extern c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS];
extern int c4iw_max_read_depth;

#endif
+2 −2
Original line number Diff line number Diff line
@@ -267,8 +267,8 @@ static int c4iw_query_device(struct ib_device *ibdev,
	props->max_qp_wr = T4_MAX_QP_DEPTH;
	props->max_sge = T4_MAX_RECV_SGE;
	props->max_sge_rd = 1;
	props->max_qp_rd_atom = T4_MAX_READ_DEPTH;
	props->max_qp_init_rd_atom = T4_MAX_READ_DEPTH;
	props->max_qp_rd_atom = c4iw_max_read_depth;
	props->max_qp_init_rd_atom = c4iw_max_read_depth;
	props->max_cq = T4_MAX_NUM_CQ;
	props->max_cqe = T4_MAX_CQ_DEPTH;
	props->max_mr = c4iw_num_stags(&dev->rdev);
Loading