Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1c79a5a8 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'cxgb4-next'



Hariprasad Shenai says:

====================
Doorbell drop Avoidance Bug fix for iw_cxgb4

This patch series provides fixes for Chelsio T4/T5 adapters
related to DB Drop avoidance and other small fix related to keepalive on
iw-cxgb4.

The patches series is created against David Miller's 'net-next' tree.
And includes patches on cxgb4 and iw_cxgb4 driver.

We would like to request this patch series to get merged via David Miller's
'net-next' tree.

We have included all the maintainers of respective drivers. Kindly review the
change and let us know in case of any review comments.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 57a7744e 05eb2389
Loading
Loading
Loading
Loading
+12 −12
Original line number Original line Diff line number Diff line
@@ -1647,6 +1647,15 @@ static inline int act_open_has_tid(int status)
	       status != CPL_ERR_ARP_MISS;
	       status != CPL_ERR_ARP_MISS;
}
}


/* Returns whether a CPL status conveys negative advice.
 */
static int is_neg_adv(unsigned int status)
{
	return status == CPL_ERR_RTX_NEG_ADVICE ||
	       status == CPL_ERR_PERSIST_NEG_ADVICE ||
	       status == CPL_ERR_KEEPALV_NEG_ADVICE;
}

#define ACT_OPEN_RETRY_COUNT 2
#define ACT_OPEN_RETRY_COUNT 2


static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
@@ -1835,7 +1844,7 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
	PDBG("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid,
	PDBG("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid,
	     status, status2errno(status));
	     status, status2errno(status));


	if (status == CPL_ERR_RTX_NEG_ADVICE) {
	if (is_neg_adv(status)) {
		printk(KERN_WARNING MOD "Connection problems for atid %u\n",
		printk(KERN_WARNING MOD "Connection problems for atid %u\n",
			atid);
			atid);
		return 0;
		return 0;
@@ -2265,15 +2274,6 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
	return 0;
	return 0;
}
}


/*
 * Returns whether an ABORT_REQ_RSS message is a negative advice.
 */
static int is_neg_adv_abort(unsigned int status)
{
	return status == CPL_ERR_RTX_NEG_ADVICE ||
	       status == CPL_ERR_PERSIST_NEG_ADVICE;
}

static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
{
{
	struct cpl_abort_req_rss *req = cplhdr(skb);
	struct cpl_abort_req_rss *req = cplhdr(skb);
@@ -2287,7 +2287,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
	unsigned int tid = GET_TID(req);
	unsigned int tid = GET_TID(req);


	ep = lookup_tid(t, tid);
	ep = lookup_tid(t, tid);
	if (is_neg_adv_abort(req->status)) {
	if (is_neg_adv(req->status)) {
		PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep,
		PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep,
		     ep->hwtid);
		     ep->hwtid);
		return 0;
		return 0;
@@ -3570,7 +3570,7 @@ static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
		kfree_skb(skb);
		kfree_skb(skb);
		return 0;
		return 0;
	}
	}
	if (is_neg_adv_abort(req->status)) {
	if (is_neg_adv(req->status)) {
		PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep,
		PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep,
		     ep->hwtid);
		     ep->hwtid);
		kfree_skb(skb);
		kfree_skb(skb);
+107 −70
Original line number Original line Diff line number Diff line
@@ -64,6 +64,10 @@ struct uld_ctx {
static LIST_HEAD(uld_ctx_list);
static LIST_HEAD(uld_ctx_list);
static DEFINE_MUTEX(dev_mutex);
static DEFINE_MUTEX(dev_mutex);


#define DB_FC_RESUME_SIZE 64
#define DB_FC_RESUME_DELAY 1
#define DB_FC_DRAIN_THRESH 0

static struct dentry *c4iw_debugfs_root;
static struct dentry *c4iw_debugfs_root;


struct c4iw_debugfs_data {
struct c4iw_debugfs_data {
@@ -282,7 +286,7 @@ static const struct file_operations stag_debugfs_fops = {
	.llseek  = default_llseek,
	.llseek  = default_llseek,
};
};


static char *db_state_str[] = {"NORMAL", "FLOW_CONTROL", "RECOVERY"};
static char *db_state_str[] = {"NORMAL", "FLOW_CONTROL", "RECOVERY", "STOPPED"};


static int stats_show(struct seq_file *seq, void *v)
static int stats_show(struct seq_file *seq, void *v)
{
{
@@ -311,9 +315,10 @@ static int stats_show(struct seq_file *seq, void *v)
	seq_printf(seq, "  DB FULL: %10llu\n", dev->rdev.stats.db_full);
	seq_printf(seq, "  DB FULL: %10llu\n", dev->rdev.stats.db_full);
	seq_printf(seq, " DB EMPTY: %10llu\n", dev->rdev.stats.db_empty);
	seq_printf(seq, " DB EMPTY: %10llu\n", dev->rdev.stats.db_empty);
	seq_printf(seq, "  DB DROP: %10llu\n", dev->rdev.stats.db_drop);
	seq_printf(seq, "  DB DROP: %10llu\n", dev->rdev.stats.db_drop);
	seq_printf(seq, " DB State: %s Transitions %llu\n",
	seq_printf(seq, " DB State: %s Transitions %llu FC Interruptions %llu\n",
		   db_state_str[dev->db_state],
		   db_state_str[dev->db_state],
		   dev->rdev.stats.db_state_transitions);
		   dev->rdev.stats.db_state_transitions,
		   dev->rdev.stats.db_fc_interruptions);
	seq_printf(seq, "TCAM_FULL: %10llu\n", dev->rdev.stats.tcam_full);
	seq_printf(seq, "TCAM_FULL: %10llu\n", dev->rdev.stats.tcam_full);
	seq_printf(seq, "ACT_OFLD_CONN_FAILS: %10llu\n",
	seq_printf(seq, "ACT_OFLD_CONN_FAILS: %10llu\n",
		   dev->rdev.stats.act_ofld_conn_fails);
		   dev->rdev.stats.act_ofld_conn_fails);
@@ -643,6 +648,12 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
		printk(KERN_ERR MOD "error %d initializing ocqp pool\n", err);
		printk(KERN_ERR MOD "error %d initializing ocqp pool\n", err);
		goto err4;
		goto err4;
	}
	}
	rdev->status_page = (struct t4_dev_status_page *)
			    __get_free_page(GFP_KERNEL);
	if (!rdev->status_page) {
		pr_err(MOD "error allocating status page\n");
		goto err4;
	}
	return 0;
	return 0;
err4:
err4:
	c4iw_rqtpool_destroy(rdev);
	c4iw_rqtpool_destroy(rdev);
@@ -656,6 +667,7 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)


static void c4iw_rdev_close(struct c4iw_rdev *rdev)
static void c4iw_rdev_close(struct c4iw_rdev *rdev)
{
{
	free_page((unsigned long)rdev->status_page);
	c4iw_pblpool_destroy(rdev);
	c4iw_pblpool_destroy(rdev);
	c4iw_rqtpool_destroy(rdev);
	c4iw_rqtpool_destroy(rdev);
	c4iw_destroy_resource(&rdev->resource);
	c4iw_destroy_resource(&rdev->resource);
@@ -703,18 +715,6 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
		pr_info("%s: On-Chip Queues not supported on this device.\n",
		pr_info("%s: On-Chip Queues not supported on this device.\n",
			pci_name(infop->pdev));
			pci_name(infop->pdev));


	if (!is_t4(infop->adapter_type)) {
		if (!allow_db_fc_on_t5) {
			db_fc_threshold = 100000;
			pr_info("DB Flow Control Disabled.\n");
		}

		if (!allow_db_coalescing_on_t5) {
			db_coalescing_threshold = -1;
			pr_info("DB Coalescing Disabled.\n");
		}
	}

	devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp));
	devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp));
	if (!devp) {
	if (!devp) {
		printk(KERN_ERR MOD "Cannot allocate ib device\n");
		printk(KERN_ERR MOD "Cannot allocate ib device\n");
@@ -749,6 +749,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
	spin_lock_init(&devp->lock);
	spin_lock_init(&devp->lock);
	mutex_init(&devp->rdev.stats.lock);
	mutex_init(&devp->rdev.stats.lock);
	mutex_init(&devp->db_mutex);
	mutex_init(&devp->db_mutex);
	INIT_LIST_HEAD(&devp->db_fc_list);


	if (c4iw_debugfs_root) {
	if (c4iw_debugfs_root) {
		devp->debugfs_root = debugfs_create_dir(
		devp->debugfs_root = debugfs_create_dir(
@@ -977,13 +978,16 @@ static int disable_qp_db(int id, void *p, void *data)


static void stop_queues(struct uld_ctx *ctx)
static void stop_queues(struct uld_ctx *ctx)
{
{
	spin_lock_irq(&ctx->dev->lock);
	unsigned long flags;
	if (ctx->dev->db_state == NORMAL) {

	spin_lock_irqsave(&ctx->dev->lock, flags);
	ctx->dev->rdev.stats.db_state_transitions++;
	ctx->dev->rdev.stats.db_state_transitions++;
		ctx->dev->db_state = FLOW_CONTROL;
	ctx->dev->db_state = STOPPED;
	if (ctx->dev->rdev.flags & T4_STATUS_PAGE_DISABLED)
		idr_for_each(&ctx->dev->qpidr, disable_qp_db, NULL);
		idr_for_each(&ctx->dev->qpidr, disable_qp_db, NULL);
	}
	else
	spin_unlock_irq(&ctx->dev->lock);
		ctx->dev->rdev.status_page->db_off = 1;
	spin_unlock_irqrestore(&ctx->dev->lock, flags);
}
}


static int enable_qp_db(int id, void *p, void *data)
static int enable_qp_db(int id, void *p, void *data)
@@ -994,15 +998,70 @@ static int enable_qp_db(int id, void *p, void *data)
	return 0;
	return 0;
}
}


static void resume_rc_qp(struct c4iw_qp *qp)
{
	spin_lock(&qp->lock);
	t4_ring_sq_db(&qp->wq, qp->wq.sq.wq_pidx_inc);
	qp->wq.sq.wq_pidx_inc = 0;
	t4_ring_rq_db(&qp->wq, qp->wq.rq.wq_pidx_inc);
	qp->wq.rq.wq_pidx_inc = 0;
	spin_unlock(&qp->lock);
}

static void resume_a_chunk(struct uld_ctx *ctx)
{
	int i;
	struct c4iw_qp *qp;

	for (i = 0; i < DB_FC_RESUME_SIZE; i++) {
		qp = list_first_entry(&ctx->dev->db_fc_list, struct c4iw_qp,
				      db_fc_entry);
		list_del_init(&qp->db_fc_entry);
		resume_rc_qp(qp);
		if (list_empty(&ctx->dev->db_fc_list))
			break;
	}
}

static void resume_queues(struct uld_ctx *ctx)
static void resume_queues(struct uld_ctx *ctx)
{
{
	spin_lock_irq(&ctx->dev->lock);
	spin_lock_irq(&ctx->dev->lock);
	if (ctx->dev->qpcnt <= db_fc_threshold &&
	if (ctx->dev->db_state != STOPPED)
	    ctx->dev->db_state == FLOW_CONTROL) {
		goto out;
	ctx->dev->db_state = FLOW_CONTROL;
	while (1) {
		if (list_empty(&ctx->dev->db_fc_list)) {
			WARN_ON(ctx->dev->db_state != FLOW_CONTROL);
			ctx->dev->db_state = NORMAL;
			ctx->dev->db_state = NORMAL;
			ctx->dev->rdev.stats.db_state_transitions++;
			ctx->dev->rdev.stats.db_state_transitions++;
		idr_for_each(&ctx->dev->qpidr, enable_qp_db, NULL);
			if (ctx->dev->rdev.flags & T4_STATUS_PAGE_DISABLED) {
				idr_for_each(&ctx->dev->qpidr, enable_qp_db,
					     NULL);
			} else {
				ctx->dev->rdev.status_page->db_off = 0;
			}
			break;
		} else {
			if (cxgb4_dbfifo_count(ctx->dev->rdev.lldi.ports[0], 1)
			    < (ctx->dev->rdev.lldi.dbfifo_int_thresh <<
			       DB_FC_DRAIN_THRESH)) {
				resume_a_chunk(ctx);
			}
			if (!list_empty(&ctx->dev->db_fc_list)) {
				spin_unlock_irq(&ctx->dev->lock);
				if (DB_FC_RESUME_DELAY) {
					set_current_state(TASK_UNINTERRUPTIBLE);
					schedule_timeout(DB_FC_RESUME_DELAY);
				}
				spin_lock_irq(&ctx->dev->lock);
				if (ctx->dev->db_state != FLOW_CONTROL)
					break;
			}
		}
		}
	}
out:
	if (ctx->dev->db_state != NORMAL)
		ctx->dev->rdev.stats.db_fc_interruptions++;
	spin_unlock_irq(&ctx->dev->lock);
	spin_unlock_irq(&ctx->dev->lock);
}
}


@@ -1028,12 +1087,12 @@ static int count_qps(int id, void *p, void *data)
	return 0;
	return 0;
}
}


static void deref_qps(struct qp_list qp_list)
static void deref_qps(struct qp_list *qp_list)
{
{
	int idx;
	int idx;


	for (idx = 0; idx < qp_list.idx; idx++)
	for (idx = 0; idx < qp_list->idx; idx++)
		c4iw_qp_rem_ref(&qp_list.qps[idx]->ibqp);
		c4iw_qp_rem_ref(&qp_list->qps[idx]->ibqp);
}
}


static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list)
static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list)
@@ -1044,17 +1103,22 @@ static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list)
	for (idx = 0; idx < qp_list->idx; idx++) {
	for (idx = 0; idx < qp_list->idx; idx++) {
		struct c4iw_qp *qp = qp_list->qps[idx];
		struct c4iw_qp *qp = qp_list->qps[idx];


		spin_lock_irq(&qp->rhp->lock);
		spin_lock(&qp->lock);
		ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0],
		ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0],
					  qp->wq.sq.qid,
					  qp->wq.sq.qid,
					  t4_sq_host_wq_pidx(&qp->wq),
					  t4_sq_host_wq_pidx(&qp->wq),
					  t4_sq_wq_size(&qp->wq));
					  t4_sq_wq_size(&qp->wq));
		if (ret) {
		if (ret) {
			printk(KERN_ERR MOD "%s: Fatal error - "
			pr_err(KERN_ERR MOD "%s: Fatal error - "
			       "DB overflow recovery failed - "
			       "DB overflow recovery failed - "
			       "error syncing SQ qid %u\n",
			       "error syncing SQ qid %u\n",
			       pci_name(ctx->lldi.pdev), qp->wq.sq.qid);
			       pci_name(ctx->lldi.pdev), qp->wq.sq.qid);
			spin_unlock(&qp->lock);
			spin_unlock_irq(&qp->rhp->lock);
			return;
			return;
		}
		}
		qp->wq.sq.wq_pidx_inc = 0;


		ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0],
		ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0],
					  qp->wq.rq.qid,
					  qp->wq.rq.qid,
@@ -1062,12 +1126,17 @@ static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list)
					  t4_rq_wq_size(&qp->wq));
					  t4_rq_wq_size(&qp->wq));


		if (ret) {
		if (ret) {
			printk(KERN_ERR MOD "%s: Fatal error - "
			pr_err(KERN_ERR MOD "%s: Fatal error - "
			       "DB overflow recovery failed - "
			       "DB overflow recovery failed - "
			       "error syncing RQ qid %u\n",
			       "error syncing RQ qid %u\n",
			       pci_name(ctx->lldi.pdev), qp->wq.rq.qid);
			       pci_name(ctx->lldi.pdev), qp->wq.rq.qid);
			spin_unlock(&qp->lock);
			spin_unlock_irq(&qp->rhp->lock);
			return;
			return;
		}
		}
		qp->wq.rq.wq_pidx_inc = 0;
		spin_unlock(&qp->lock);
		spin_unlock_irq(&qp->rhp->lock);


		/* Wait for the dbfifo to drain */
		/* Wait for the dbfifo to drain */
		while (cxgb4_dbfifo_count(qp->rhp->rdev.lldi.ports[0], 1) > 0) {
		while (cxgb4_dbfifo_count(qp->rhp->rdev.lldi.ports[0], 1) > 0) {
@@ -1083,36 +1152,22 @@ static void recover_queues(struct uld_ctx *ctx)
	struct qp_list qp_list;
	struct qp_list qp_list;
	int ret;
	int ret;


	/* lock out kernel db ringers */
	mutex_lock(&ctx->dev->db_mutex);

	/* put all queues in to recovery mode */
	spin_lock_irq(&ctx->dev->lock);
	ctx->dev->db_state = RECOVERY;
	ctx->dev->rdev.stats.db_state_transitions++;
	idr_for_each(&ctx->dev->qpidr, disable_qp_db, NULL);
	spin_unlock_irq(&ctx->dev->lock);

	/* slow everybody down */
	/* slow everybody down */
	set_current_state(TASK_UNINTERRUPTIBLE);
	set_current_state(TASK_UNINTERRUPTIBLE);
	schedule_timeout(usecs_to_jiffies(1000));
	schedule_timeout(usecs_to_jiffies(1000));


	/* Wait for the dbfifo to completely drain. */
	while (cxgb4_dbfifo_count(ctx->dev->rdev.lldi.ports[0], 1) > 0) {
		set_current_state(TASK_UNINTERRUPTIBLE);
		schedule_timeout(usecs_to_jiffies(10));
	}

	/* flush the SGE contexts */
	/* flush the SGE contexts */
	ret = cxgb4_flush_eq_cache(ctx->dev->rdev.lldi.ports[0]);
	ret = cxgb4_flush_eq_cache(ctx->dev->rdev.lldi.ports[0]);
	if (ret) {
	if (ret) {
		printk(KERN_ERR MOD "%s: Fatal error - DB overflow recovery failed\n",
		printk(KERN_ERR MOD "%s: Fatal error - DB overflow recovery failed\n",
		       pci_name(ctx->lldi.pdev));
		       pci_name(ctx->lldi.pdev));
		goto out;
		return;
	}
	}


	/* Count active queues so we can build a list of queues to recover */
	/* Count active queues so we can build a list of queues to recover */
	spin_lock_irq(&ctx->dev->lock);
	spin_lock_irq(&ctx->dev->lock);
	WARN_ON(ctx->dev->db_state != STOPPED);
	ctx->dev->db_state = RECOVERY;
	idr_for_each(&ctx->dev->qpidr, count_qps, &count);
	idr_for_each(&ctx->dev->qpidr, count_qps, &count);


	qp_list.qps = kzalloc(count * sizeof *qp_list.qps, GFP_ATOMIC);
	qp_list.qps = kzalloc(count * sizeof *qp_list.qps, GFP_ATOMIC);
@@ -1120,7 +1175,7 @@ static void recover_queues(struct uld_ctx *ctx)
		printk(KERN_ERR MOD "%s: Fatal error - DB overflow recovery failed\n",
		printk(KERN_ERR MOD "%s: Fatal error - DB overflow recovery failed\n",
		       pci_name(ctx->lldi.pdev));
		       pci_name(ctx->lldi.pdev));
		spin_unlock_irq(&ctx->dev->lock);
		spin_unlock_irq(&ctx->dev->lock);
		goto out;
		return;
	}
	}
	qp_list.idx = 0;
	qp_list.idx = 0;


@@ -1133,29 +1188,13 @@ static void recover_queues(struct uld_ctx *ctx)
	recover_lost_dbs(ctx, &qp_list);
	recover_lost_dbs(ctx, &qp_list);


	/* we're almost done!  deref the qps and clean up */
	/* we're almost done!  deref the qps and clean up */
	deref_qps(qp_list);
	deref_qps(&qp_list);
	kfree(qp_list.qps);
	kfree(qp_list.qps);


	/* Wait for the dbfifo to completely drain again */
	while (cxgb4_dbfifo_count(ctx->dev->rdev.lldi.ports[0], 1) > 0) {
		set_current_state(TASK_UNINTERRUPTIBLE);
		schedule_timeout(usecs_to_jiffies(10));
	}

	/* resume the queues */
	spin_lock_irq(&ctx->dev->lock);
	spin_lock_irq(&ctx->dev->lock);
	if (ctx->dev->qpcnt > db_fc_threshold)
	WARN_ON(ctx->dev->db_state != RECOVERY);
		ctx->dev->db_state = FLOW_CONTROL;
	ctx->dev->db_state = STOPPED;
	else {
		ctx->dev->db_state = NORMAL;
		idr_for_each(&ctx->dev->qpidr, enable_qp_db, NULL);
	}
	ctx->dev->rdev.stats.db_state_transitions++;
	spin_unlock_irq(&ctx->dev->lock);
	spin_unlock_irq(&ctx->dev->lock);

out:
	/* start up kernel db ringers again */
	mutex_unlock(&ctx->dev->db_mutex);
}
}


static int c4iw_uld_control(void *handle, enum cxgb4_control control, ...)
static int c4iw_uld_control(void *handle, enum cxgb4_control control, ...)
@@ -1165,9 +1204,7 @@ static int c4iw_uld_control(void *handle, enum cxgb4_control control, ...)
	switch (control) {
	switch (control) {
	case CXGB4_CONTROL_DB_FULL:
	case CXGB4_CONTROL_DB_FULL:
		stop_queues(ctx);
		stop_queues(ctx);
		mutex_lock(&ctx->dev->rdev.stats.lock);
		ctx->dev->rdev.stats.db_full++;
		ctx->dev->rdev.stats.db_full++;
		mutex_unlock(&ctx->dev->rdev.stats.lock);
		break;
		break;
	case CXGB4_CONTROL_DB_EMPTY:
	case CXGB4_CONTROL_DB_EMPTY:
		resume_queues(ctx);
		resume_queues(ctx);
+7 −2
Original line number Original line Diff line number Diff line
@@ -109,6 +109,7 @@ struct c4iw_dev_ucontext {


enum c4iw_rdev_flags {
enum c4iw_rdev_flags {
	T4_FATAL_ERROR = (1<<0),
	T4_FATAL_ERROR = (1<<0),
	T4_STATUS_PAGE_DISABLED = (1<<1),
};
};


struct c4iw_stat {
struct c4iw_stat {
@@ -130,6 +131,7 @@ struct c4iw_stats {
	u64  db_empty;
	u64  db_empty;
	u64  db_drop;
	u64  db_drop;
	u64  db_state_transitions;
	u64  db_state_transitions;
	u64  db_fc_interruptions;
	u64  tcam_full;
	u64  tcam_full;
	u64  act_ofld_conn_fails;
	u64  act_ofld_conn_fails;
	u64  pas_ofld_conn_fails;
	u64  pas_ofld_conn_fails;
@@ -150,6 +152,7 @@ struct c4iw_rdev {
	unsigned long oc_mw_pa;
	unsigned long oc_mw_pa;
	void __iomem *oc_mw_kva;
	void __iomem *oc_mw_kva;
	struct c4iw_stats stats;
	struct c4iw_stats stats;
	struct t4_dev_status_page *status_page;
};
};


static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
@@ -211,7 +214,8 @@ static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev,
enum db_state {
enum db_state {
	NORMAL = 0,
	NORMAL = 0,
	FLOW_CONTROL = 1,
	FLOW_CONTROL = 1,
	RECOVERY = 2
	RECOVERY = 2,
	STOPPED = 3
};
};


struct c4iw_dev {
struct c4iw_dev {
@@ -225,10 +229,10 @@ struct c4iw_dev {
	struct mutex db_mutex;
	struct mutex db_mutex;
	struct dentry *debugfs_root;
	struct dentry *debugfs_root;
	enum db_state db_state;
	enum db_state db_state;
	int qpcnt;
	struct idr hwtid_idr;
	struct idr hwtid_idr;
	struct idr atid_idr;
	struct idr atid_idr;
	struct idr stid_idr;
	struct idr stid_idr;
	struct list_head db_fc_list;
};
};


static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
@@ -432,6 +436,7 @@ struct c4iw_qp_attributes {


struct c4iw_qp {
struct c4iw_qp {
	struct ib_qp ibqp;
	struct ib_qp ibqp;
	struct list_head db_fc_entry;
	struct c4iw_dev *rhp;
	struct c4iw_dev *rhp;
	struct c4iw_ep *ep;
	struct c4iw_ep *ep;
	struct c4iw_qp_attributes attr;
	struct c4iw_qp_attributes attr;
+41 −2
Original line number Original line Diff line number Diff line
@@ -106,15 +106,54 @@ static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
{
{
	struct c4iw_ucontext *context;
	struct c4iw_ucontext *context;
	struct c4iw_dev *rhp = to_c4iw_dev(ibdev);
	struct c4iw_dev *rhp = to_c4iw_dev(ibdev);
	static int warned;
	struct c4iw_alloc_ucontext_resp uresp;
	int ret = 0;
	struct c4iw_mm_entry *mm = NULL;


	PDBG("%s ibdev %p\n", __func__, ibdev);
	PDBG("%s ibdev %p\n", __func__, ibdev);
	context = kzalloc(sizeof(*context), GFP_KERNEL);
	context = kzalloc(sizeof(*context), GFP_KERNEL);
	if (!context)
	if (!context) {
		return ERR_PTR(-ENOMEM);
		ret = -ENOMEM;
		goto err;
	}

	c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx);
	c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx);
	INIT_LIST_HEAD(&context->mmaps);
	INIT_LIST_HEAD(&context->mmaps);
	spin_lock_init(&context->mmap_lock);
	spin_lock_init(&context->mmap_lock);

	if (udata->outlen < sizeof(uresp)) {
		if (!warned++)
			pr_err(MOD "Warning - downlevel libcxgb4 (non-fatal), device status page disabled.");
		rhp->rdev.flags |= T4_STATUS_PAGE_DISABLED;
	} else {
		mm = kmalloc(sizeof(*mm), GFP_KERNEL);
		if (!mm)
			goto err_free;

		uresp.status_page_size = PAGE_SIZE;

		spin_lock(&context->mmap_lock);
		uresp.status_page_key = context->key;
		context->key += PAGE_SIZE;
		spin_unlock(&context->mmap_lock);

		ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
		if (ret)
			goto err_mm;

		mm->key = uresp.status_page_key;
		mm->addr = virt_to_phys(rhp->rdev.status_page);
		mm->len = PAGE_SIZE;
		insert_mmap(context, mm);
	}
	return &context->ibucontext;
	return &context->ibucontext;
err_mm:
	kfree(mm);
err_free:
	kfree(context);
err:
	return ERR_PTR(ret);
}
}


static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
+62 −78
Original line number Original line Diff line number Diff line
@@ -638,6 +638,46 @@ void c4iw_qp_rem_ref(struct ib_qp *qp)
		wake_up(&(to_c4iw_qp(qp)->wait));
		wake_up(&(to_c4iw_qp(qp)->wait));
}
}


static void add_to_fc_list(struct list_head *head, struct list_head *entry)
{
	if (list_empty(entry))
		list_add_tail(entry, head);
}

static int ring_kernel_sq_db(struct c4iw_qp *qhp, u16 inc)
{
	unsigned long flags;

	spin_lock_irqsave(&qhp->rhp->lock, flags);
	spin_lock(&qhp->lock);
	if (qhp->rhp->db_state == NORMAL) {
		t4_ring_sq_db(&qhp->wq, inc);
	} else {
		add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry);
		qhp->wq.sq.wq_pidx_inc += inc;
	}
	spin_unlock(&qhp->lock);
	spin_unlock_irqrestore(&qhp->rhp->lock, flags);
	return 0;
}

static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc)
{
	unsigned long flags;

	spin_lock_irqsave(&qhp->rhp->lock, flags);
	spin_lock(&qhp->lock);
	if (qhp->rhp->db_state == NORMAL) {
		t4_ring_rq_db(&qhp->wq, inc);
	} else {
		add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry);
		qhp->wq.rq.wq_pidx_inc += inc;
	}
	spin_unlock(&qhp->lock);
	spin_unlock_irqrestore(&qhp->rhp->lock, flags);
	return 0;
}

int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
		   struct ib_send_wr **bad_wr)
		   struct ib_send_wr **bad_wr)
{
{
@@ -750,9 +790,13 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
		t4_sq_produce(&qhp->wq, len16);
		t4_sq_produce(&qhp->wq, len16);
		idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
		idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
	}
	}
	if (t4_wq_db_enabled(&qhp->wq))
	if (!qhp->rhp->rdev.status_page->db_off) {
		t4_ring_sq_db(&qhp->wq, idx);
		t4_ring_sq_db(&qhp->wq, idx);
		spin_unlock_irqrestore(&qhp->lock, flag);
		spin_unlock_irqrestore(&qhp->lock, flag);
	} else {
		spin_unlock_irqrestore(&qhp->lock, flag);
		ring_kernel_sq_db(qhp, idx);
	}
	return err;
	return err;
}
}


@@ -812,9 +856,13 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
		wr = wr->next;
		wr = wr->next;
		num_wrs--;
		num_wrs--;
	}
	}
	if (t4_wq_db_enabled(&qhp->wq))
	if (!qhp->rhp->rdev.status_page->db_off) {
		t4_ring_rq_db(&qhp->wq, idx);
		t4_ring_rq_db(&qhp->wq, idx);
		spin_unlock_irqrestore(&qhp->lock, flag);
		spin_unlock_irqrestore(&qhp->lock, flag);
	} else {
		spin_unlock_irqrestore(&qhp->lock, flag);
		ring_kernel_rq_db(qhp, idx);
	}
	return err;
	return err;
}
}


@@ -1200,35 +1248,6 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
	return ret;
	return ret;
}
}


/*
 * Called by the library when the qp has user dbs disabled due to
 * a DB_FULL condition.  This function will single-thread all user
 * DB rings to avoid overflowing the hw db-fifo.
 */
static int ring_kernel_db(struct c4iw_qp *qhp, u32 qid, u16 inc)
{
	int delay = db_delay_usecs;

	mutex_lock(&qhp->rhp->db_mutex);
	do {

		/*
		 * The interrupt threshold is dbfifo_int_thresh << 6. So
		 * make sure we don't cross that and generate an interrupt.
		 */
		if (cxgb4_dbfifo_count(qhp->rhp->rdev.lldi.ports[0], 1) <
		    (qhp->rhp->rdev.lldi.dbfifo_int_thresh << 5)) {
			writel(QID(qid) | PIDX(inc), qhp->wq.db);
			break;
		}
		set_current_state(TASK_UNINTERRUPTIBLE);
		schedule_timeout(usecs_to_jiffies(delay));
		delay = min(delay << 1, 2000);
	} while (1);
	mutex_unlock(&qhp->rhp->db_mutex);
	return 0;
}

int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
		   enum c4iw_qp_attr_mask mask,
		   enum c4iw_qp_attr_mask mask,
		   struct c4iw_qp_attributes *attrs,
		   struct c4iw_qp_attributes *attrs,
@@ -1278,11 +1297,11 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
	}
	}


	if (mask & C4IW_QP_ATTR_SQ_DB) {
	if (mask & C4IW_QP_ATTR_SQ_DB) {
		ret = ring_kernel_db(qhp, qhp->wq.sq.qid, attrs->sq_db_inc);
		ret = ring_kernel_sq_db(qhp, attrs->sq_db_inc);
		goto out;
		goto out;
	}
	}
	if (mask & C4IW_QP_ATTR_RQ_DB) {
	if (mask & C4IW_QP_ATTR_RQ_DB) {
		ret = ring_kernel_db(qhp, qhp->wq.rq.qid, attrs->rq_db_inc);
		ret = ring_kernel_rq_db(qhp, attrs->rq_db_inc);
		goto out;
		goto out;
	}
	}


@@ -1465,14 +1484,6 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
	return ret;
	return ret;
}
}


static int enable_qp_db(int id, void *p, void *data)
{
	struct c4iw_qp *qp = p;

	t4_enable_wq_db(&qp->wq);
	return 0;
}

int c4iw_destroy_qp(struct ib_qp *ib_qp)
int c4iw_destroy_qp(struct ib_qp *ib_qp)
{
{
	struct c4iw_dev *rhp;
	struct c4iw_dev *rhp;
@@ -1490,22 +1501,15 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
		c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
		c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
	wait_event(qhp->wait, !qhp->ep);
	wait_event(qhp->wait, !qhp->ep);


	spin_lock_irq(&rhp->lock);
	remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
	remove_handle_nolock(rhp, &rhp->qpidr, qhp->wq.sq.qid);
	rhp->qpcnt--;
	BUG_ON(rhp->qpcnt < 0);
	if (rhp->qpcnt <= db_fc_threshold && rhp->db_state == FLOW_CONTROL) {
		rhp->rdev.stats.db_state_transitions++;
		rhp->db_state = NORMAL;
		idr_for_each(&rhp->qpidr, enable_qp_db, NULL);
	}
	if (db_coalescing_threshold >= 0)
		if (rhp->qpcnt <= db_coalescing_threshold)
			cxgb4_enable_db_coalescing(rhp->rdev.lldi.ports[0]);
	spin_unlock_irq(&rhp->lock);
	atomic_dec(&qhp->refcnt);
	atomic_dec(&qhp->refcnt);
	wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
	wait_event(qhp->wait, !atomic_read(&qhp->refcnt));


	spin_lock_irq(&rhp->lock);
	if (!list_empty(&qhp->db_fc_entry))
		list_del_init(&qhp->db_fc_entry);
	spin_unlock_irq(&rhp->lock);

	ucontext = ib_qp->uobject ?
	ucontext = ib_qp->uobject ?
		   to_c4iw_ucontext(ib_qp->uobject->context) : NULL;
		   to_c4iw_ucontext(ib_qp->uobject->context) : NULL;
	destroy_qp(&rhp->rdev, &qhp->wq,
	destroy_qp(&rhp->rdev, &qhp->wq,
@@ -1516,14 +1520,6 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
	return 0;
	return 0;
}
}


static int disable_qp_db(int id, void *p, void *data)
{
	struct c4iw_qp *qp = p;

	t4_disable_wq_db(&qp->wq);
	return 0;
}

struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
			     struct ib_udata *udata)
			     struct ib_udata *udata)
{
{
@@ -1610,20 +1606,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
	init_waitqueue_head(&qhp->wait);
	init_waitqueue_head(&qhp->wait);
	atomic_set(&qhp->refcnt, 1);
	atomic_set(&qhp->refcnt, 1);


	spin_lock_irq(&rhp->lock);
	ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
	if (rhp->db_state != NORMAL)
		t4_disable_wq_db(&qhp->wq);
	rhp->qpcnt++;
	if (rhp->qpcnt > db_fc_threshold && rhp->db_state == NORMAL) {
		rhp->rdev.stats.db_state_transitions++;
		rhp->db_state = FLOW_CONTROL;
		idr_for_each(&rhp->qpidr, disable_qp_db, NULL);
	}
	if (db_coalescing_threshold >= 0)
		if (rhp->qpcnt > db_coalescing_threshold)
			cxgb4_disable_db_coalescing(rhp->rdev.lldi.ports[0]);
	ret = insert_handle_nolock(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
	spin_unlock_irq(&rhp->lock);
	if (ret)
	if (ret)
		goto err2;
		goto err2;


@@ -1709,6 +1692,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
	}
	}
	qhp->ibqp.qp_num = qhp->wq.sq.qid;
	qhp->ibqp.qp_num = qhp->wq.sq.qid;
	init_timer(&(qhp->timer));
	init_timer(&(qhp->timer));
	INIT_LIST_HEAD(&qhp->db_fc_entry);
	PDBG("%s qhp %p sq_num_entries %d, rq_num_entries %d qpid 0x%0x\n",
	PDBG("%s qhp %p sq_num_entries %d, rq_num_entries %d qpid 0x%0x\n",
	     __func__, qhp, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
	     __func__, qhp, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
	     qhp->wq.sq.qid);
	     qhp->wq.sq.qid);
Loading