Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 75f5076b authored by Linus Torvalds's avatar Linus Torvalds
Browse files
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
  IB/qib: Use pci_dev->revision
  RDMA/iwcm: Get rid of enum iw_cm_event_status
  IB/ipath: Use pci_dev->revision, again
  IB/qib: Prevent driver hang with unprogrammed boards
  RDMA/cxgb4: EEH errors can hang the driver
  RDMA/cxgb4: Reset wait condition atomically
  RDMA/cxgb4: Fix missing parentheses
  RDMA/cxgb4: Initialization errors can cause crash
  RDMA/cxgb4: Don't change QP state outside EP lock
  RDMA/cma: Add an ID_REUSEADDR option
  RDMA/cma: Fix handling of IPv6 addressing in cma_use_port
parents 83d7e948 1df9fad1
Loading
Loading
Loading
Loading
+139 −68
Original line number Diff line number Diff line
@@ -148,6 +148,7 @@ struct rdma_id_private {
	u32			qp_num;
	u8			srq;
	u8			tos;
	u8			reuseaddr;
};

struct cma_multicast {
@@ -712,6 +713,21 @@ static inline int cma_any_addr(struct sockaddr *addr)
	return cma_zero_addr(addr) || cma_loopback_addr(addr);
}

static int cma_addr_cmp(struct sockaddr *src, struct sockaddr *dst)
{
	if (src->sa_family != dst->sa_family)
		return -1;

	switch (src->sa_family) {
	case AF_INET:
		return ((struct sockaddr_in *) src)->sin_addr.s_addr !=
		       ((struct sockaddr_in *) dst)->sin_addr.s_addr;
	default:
		return ipv6_addr_cmp(&((struct sockaddr_in6 *) src)->sin6_addr,
				     &((struct sockaddr_in6 *) dst)->sin6_addr);
	}
}

static inline __be16 cma_port(struct sockaddr *addr)
{
	if (addr->sa_family == AF_INET)
@@ -1564,50 +1580,6 @@ static void cma_listen_on_all(struct rdma_id_private *id_priv)
	mutex_unlock(&lock);
}

int rdma_listen(struct rdma_cm_id *id, int backlog)
{
	struct rdma_id_private *id_priv;
	int ret;

	id_priv = container_of(id, struct rdma_id_private, id);
	if (id_priv->state == CMA_IDLE) {
		((struct sockaddr *) &id->route.addr.src_addr)->sa_family = AF_INET;
		ret = rdma_bind_addr(id, (struct sockaddr *) &id->route.addr.src_addr);
		if (ret)
			return ret;
	}

	if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_LISTEN))
		return -EINVAL;

	id_priv->backlog = backlog;
	if (id->device) {
		switch (rdma_node_get_transport(id->device->node_type)) {
		case RDMA_TRANSPORT_IB:
			ret = cma_ib_listen(id_priv);
			if (ret)
				goto err;
			break;
		case RDMA_TRANSPORT_IWARP:
			ret = cma_iw_listen(id_priv, backlog);
			if (ret)
				goto err;
			break;
		default:
			ret = -ENOSYS;
			goto err;
		}
	} else
		cma_listen_on_all(id_priv);

	return 0;
err:
	id_priv->backlog = 0;
	cma_comp_exch(id_priv, CMA_LISTEN, CMA_ADDR_BOUND);
	return ret;
}
EXPORT_SYMBOL(rdma_listen);

void rdma_set_service_type(struct rdma_cm_id *id, int tos)
{
	struct rdma_id_private *id_priv;
@@ -2090,6 +2062,25 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
}
EXPORT_SYMBOL(rdma_resolve_addr);

int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse)
{
	struct rdma_id_private *id_priv;
	unsigned long flags;
	int ret;

	id_priv = container_of(id, struct rdma_id_private, id);
	spin_lock_irqsave(&id_priv->lock, flags);
	if (id_priv->state == CMA_IDLE) {
		id_priv->reuseaddr = reuse;
		ret = 0;
	} else {
		ret = -EINVAL;
	}
	spin_unlock_irqrestore(&id_priv->lock, flags);
	return ret;
}
EXPORT_SYMBOL(rdma_set_reuseaddr);

static void cma_bind_port(struct rdma_bind_list *bind_list,
			  struct rdma_id_private *id_priv)
{
@@ -2165,41 +2156,71 @@ static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv)
	return -EADDRNOTAVAIL;
}

static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv)
/*
 * Check that the requested port is available.  This is called when trying to
 * bind to a specific port, or when trying to listen on a bound port.  In
 * the latter case, the provided id_priv may already be on the bind_list, but
 * we still need to check that it's okay to start listening.
 */
static int cma_check_port(struct rdma_bind_list *bind_list,
			  struct rdma_id_private *id_priv, uint8_t reuseaddr)
{
	struct rdma_id_private *cur_id;
	struct sockaddr_in *sin, *cur_sin;
	struct rdma_bind_list *bind_list;
	struct sockaddr *addr, *cur_addr;
	struct hlist_node *node;
	unsigned short snum;

	sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
	snum = ntohs(sin->sin_port);
	if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))
		return -EACCES;

	bind_list = idr_find(ps, snum);
	if (!bind_list)
		return cma_alloc_port(ps, id_priv, snum);

	/*
	 * We don't support binding to any address if anyone is bound to
	 * a specific address on the same port.
	 */
	if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr))
	addr = (struct sockaddr *) &id_priv->id.route.addr.src_addr;
	if (cma_any_addr(addr) && !reuseaddr)
		return -EADDRNOTAVAIL;

	hlist_for_each_entry(cur_id, node, &bind_list->owners, node) {
		if (cma_any_addr((struct sockaddr *) &cur_id->id.route.addr.src_addr))
		if (id_priv == cur_id)
			continue;

		if ((cur_id->state == CMA_LISTEN) ||
		    !reuseaddr || !cur_id->reuseaddr) {
			cur_addr = (struct sockaddr *) &cur_id->id.route.addr.src_addr;
			if (cma_any_addr(cur_addr))
				return -EADDRNOTAVAIL;

		cur_sin = (struct sockaddr_in *) &cur_id->id.route.addr.src_addr;
		if (sin->sin_addr.s_addr == cur_sin->sin_addr.s_addr)
			if (!cma_addr_cmp(addr, cur_addr))
				return -EADDRINUSE;
		}
	}
	return 0;
}

static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv)
{
	struct rdma_bind_list *bind_list;
	unsigned short snum;
	int ret;

	snum = ntohs(cma_port((struct sockaddr *) &id_priv->id.route.addr.src_addr));
	if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))
		return -EACCES;

	bind_list = idr_find(ps, snum);
	if (!bind_list) {
		ret = cma_alloc_port(ps, id_priv, snum);
	} else {
		ret = cma_check_port(bind_list, id_priv, id_priv->reuseaddr);
		if (!ret)
			cma_bind_port(bind_list, id_priv);
	return 0;
	}
	return ret;
}

static int cma_bind_listen(struct rdma_id_private *id_priv)
{
	struct rdma_bind_list *bind_list = id_priv->bind_list;
	int ret = 0;

	mutex_lock(&lock);
	if (bind_list->owners.first->next)
		ret = cma_check_port(bind_list, id_priv, 0);
	mutex_unlock(&lock);
	return ret;
}

static int cma_get_port(struct rdma_id_private *id_priv)
@@ -2253,6 +2274,56 @@ static int cma_check_linklocal(struct rdma_dev_addr *dev_addr,
	return 0;
}

int rdma_listen(struct rdma_cm_id *id, int backlog)
{
	struct rdma_id_private *id_priv;
	int ret;

	id_priv = container_of(id, struct rdma_id_private, id);
	if (id_priv->state == CMA_IDLE) {
		((struct sockaddr *) &id->route.addr.src_addr)->sa_family = AF_INET;
		ret = rdma_bind_addr(id, (struct sockaddr *) &id->route.addr.src_addr);
		if (ret)
			return ret;
	}

	if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_LISTEN))
		return -EINVAL;

	if (id_priv->reuseaddr) {
		ret = cma_bind_listen(id_priv);
		if (ret)
			goto err;
	}

	id_priv->backlog = backlog;
	if (id->device) {
		switch (rdma_node_get_transport(id->device->node_type)) {
		case RDMA_TRANSPORT_IB:
			ret = cma_ib_listen(id_priv);
			if (ret)
				goto err;
			break;
		case RDMA_TRANSPORT_IWARP:
			ret = cma_iw_listen(id_priv, backlog);
			if (ret)
				goto err;
			break;
		default:
			ret = -ENOSYS;
			goto err;
		}
	} else
		cma_listen_on_all(id_priv);

	return 0;
err:
	id_priv->backlog = 0;
	cma_comp_exch(id_priv, CMA_LISTEN, CMA_ADDR_BOUND);
	return ret;
}
EXPORT_SYMBOL(rdma_listen);

int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
{
	struct rdma_id_private *id_priv;
+1 −1
Original line number Diff line number Diff line
@@ -725,7 +725,7 @@ static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv,
	 */
	clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
	BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_SENT);
	if (iw_event->status == IW_CM_EVENT_STATUS_ACCEPTED) {
	if (iw_event->status == 0) {
		cm_id_priv->id.local_addr = iw_event->local_addr;
		cm_id_priv->id.remote_addr = iw_event->remote_addr;
		cm_id_priv->state = IW_CM_STATE_ESTABLISHED;
+7 −0
Original line number Diff line number Diff line
@@ -883,6 +883,13 @@ static int ucma_set_option_id(struct ucma_context *ctx, int optname,
		}
		rdma_set_service_type(ctx->cm_id, *((u8 *) optval));
		break;
	case RDMA_OPTION_ID_REUSEADDR:
		if (optlen != sizeof(int)) {
			ret = -EINVAL;
			break;
		}
		ret = rdma_set_reuseaddr(ctx->cm_id, *((int *) optval) ? 1 : 0);
		break;
	default:
		ret = -ENOSYS;
	}
+14 −32
Original line number Diff line number Diff line
@@ -1198,9 +1198,7 @@ static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
	}
	PDBG("%s ep %p status %d error %d\n", __func__, ep,
	     rpl->status, status2errno(rpl->status));
	ep->com.wr_wait.ret = status2errno(rpl->status);
	ep->com.wr_wait.done = 1;
	wake_up(&ep->com.wr_wait.wait);
	c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));

	return 0;
}
@@ -1234,9 +1232,7 @@ static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
	struct c4iw_listen_ep *ep = lookup_stid(t, stid);

	PDBG("%s ep %p\n", __func__, ep);
	ep->com.wr_wait.ret = status2errno(rpl->status);
	ep->com.wr_wait.done = 1;
	wake_up(&ep->com.wr_wait.wait);
	c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
	return 0;
}

@@ -1466,7 +1462,7 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
	struct c4iw_qp_attributes attrs;
	int disconnect = 1;
	int release = 0;
	int closing = 0;
	int abort = 0;
	struct tid_info *t = dev->rdev.lldi.tids;
	unsigned int tid = GET_TID(hdr);

@@ -1492,23 +1488,22 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
		 * in rdma connection migration (see c4iw_accept_cr()).
		 */
		__state_set(&ep->com, CLOSING);
		ep->com.wr_wait.done = 1;
		ep->com.wr_wait.ret = -ECONNRESET;
		PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
		wake_up(&ep->com.wr_wait.wait);
		c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
		break;
	case MPA_REP_SENT:
		__state_set(&ep->com, CLOSING);
		ep->com.wr_wait.done = 1;
		ep->com.wr_wait.ret = -ECONNRESET;
		PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
		wake_up(&ep->com.wr_wait.wait);
		c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
		break;
	case FPDU_MODE:
		start_ep_timer(ep);
		__state_set(&ep->com, CLOSING);
		closing = 1;
		attrs.next_state = C4IW_QP_STATE_CLOSING;
		abort = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
				       C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
		peer_close_upcall(ep);
		disconnect = 1;
		break;
	case ABORTING:
		disconnect = 0;
@@ -1536,11 +1531,6 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
		BUG_ON(1);
	}
	mutex_unlock(&ep->com.mutex);
	if (closing) {
		attrs.next_state = C4IW_QP_STATE_CLOSING;
		c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
			       C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
	}
	if (disconnect)
		c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
	if (release)
@@ -1581,9 +1571,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
	/*
	 * Wake up any threads in rdma_init() or rdma_fini().
	 */
	ep->com.wr_wait.done = 1;
	ep->com.wr_wait.ret = -ECONNRESET;
	wake_up(&ep->com.wr_wait.wait);
	c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);

	mutex_lock(&ep->com.mutex);
	switch (ep->com.state) {
@@ -1710,14 +1698,14 @@ static int terminate(struct c4iw_dev *dev, struct sk_buff *skb)
	ep = lookup_tid(t, tid);
	BUG_ON(!ep);

	if (ep->com.qp) {
	if (ep && ep->com.qp) {
		printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", tid,
		       ep->com.qp->wq.sq.qid);
		attrs.next_state = C4IW_QP_STATE_TERMINATE;
		c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
			       C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
	} else
		printk(KERN_WARNING MOD "TERM received tid %u no qp\n", tid);
		printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n", tid);

	return 0;
}
@@ -2296,14 +2284,8 @@ static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
		ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff);
		wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1];
		PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret);
		if (wr_waitp) {
			if (ret)
				wr_waitp->ret = -ret;
			else
				wr_waitp->ret = 0;
			wr_waitp->done = 1;
			wake_up(&wr_waitp->wait);
		}
		if (wr_waitp)
			c4iw_wake_up(wr_waitp, ret ? -ret : 0);
		kfree_skb(skb);
		break;
	case 2:
+64 −51
Original line number Diff line number Diff line
@@ -44,7 +44,7 @@ MODULE_DESCRIPTION("Chelsio T4 RDMA Driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRV_VERSION);

static LIST_HEAD(dev_list);
static LIST_HEAD(uld_ctx_list);
static DEFINE_MUTEX(dev_mutex);

static struct dentry *c4iw_debugfs_root;
@@ -370,18 +370,23 @@ static void c4iw_rdev_close(struct c4iw_rdev *rdev)
	c4iw_destroy_resource(&rdev->resource);
}

static void c4iw_remove(struct c4iw_dev *dev)
struct uld_ctx {
	struct list_head entry;
	struct cxgb4_lld_info lldi;
	struct c4iw_dev *dev;
};

static void c4iw_remove(struct uld_ctx *ctx)
{
	PDBG("%s c4iw_dev %p\n", __func__,  dev);
	list_del(&dev->entry);
	if (dev->registered)
		c4iw_unregister_device(dev);
	c4iw_rdev_close(&dev->rdev);
	idr_destroy(&dev->cqidr);
	idr_destroy(&dev->qpidr);
	idr_destroy(&dev->mmidr);
	iounmap(dev->rdev.oc_mw_kva);
	ib_dealloc_device(&dev->ibdev);
	PDBG("%s c4iw_dev %p\n", __func__,  ctx->dev);
	c4iw_unregister_device(ctx->dev);
	c4iw_rdev_close(&ctx->dev->rdev);
	idr_destroy(&ctx->dev->cqidr);
	idr_destroy(&ctx->dev->qpidr);
	idr_destroy(&ctx->dev->mmidr);
	iounmap(ctx->dev->rdev.oc_mw_kva);
	ib_dealloc_device(&ctx->dev->ibdev);
	ctx->dev = NULL;
}

static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
@@ -392,7 +397,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
	devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp));
	if (!devp) {
		printk(KERN_ERR MOD "Cannot allocate ib device\n");
		return NULL;
		return ERR_PTR(-ENOMEM);
	}
	devp->rdev.lldi = *infop;

@@ -402,27 +407,23 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
	devp->rdev.oc_mw_kva = ioremap_wc(devp->rdev.oc_mw_pa,
					       devp->rdev.lldi.vr->ocq.size);

	printk(KERN_INFO MOD "ocq memory: "
	PDBG(KERN_INFO MOD "ocq memory: "
	       "hw_start 0x%x size %u mw_pa 0x%lx mw_kva %p\n",
	       devp->rdev.lldi.vr->ocq.start, devp->rdev.lldi.vr->ocq.size,
	       devp->rdev.oc_mw_pa, devp->rdev.oc_mw_kva);

	mutex_lock(&dev_mutex);

	ret = c4iw_rdev_open(&devp->rdev);
	if (ret) {
		mutex_unlock(&dev_mutex);
		printk(KERN_ERR MOD "Unable to open CXIO rdev err %d\n", ret);
		ib_dealloc_device(&devp->ibdev);
		return NULL;
		return ERR_PTR(ret);
	}

	idr_init(&devp->cqidr);
	idr_init(&devp->qpidr);
	idr_init(&devp->mmidr);
	spin_lock_init(&devp->lock);
	list_add_tail(&devp->entry, &dev_list);
	mutex_unlock(&dev_mutex);

	if (c4iw_debugfs_root) {
		devp->debugfs_root = debugfs_create_dir(
@@ -435,7 +436,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)

static void *c4iw_uld_add(const struct cxgb4_lld_info *infop)
{
	struct c4iw_dev *dev;
	struct uld_ctx *ctx;
	static int vers_printed;
	int i;

@@ -443,25 +444,33 @@ static void *c4iw_uld_add(const struct cxgb4_lld_info *infop)
		printk(KERN_INFO MOD "Chelsio T4 RDMA Driver - version %s\n",
		       DRV_VERSION);

	dev = c4iw_alloc(infop);
	if (!dev)
	ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
	if (!ctx) {
		ctx = ERR_PTR(-ENOMEM);
		goto out;
	}
	ctx->lldi = *infop;

	PDBG("%s found device %s nchan %u nrxq %u ntxq %u nports %u\n",
	     __func__, pci_name(dev->rdev.lldi.pdev),
	     dev->rdev.lldi.nchan, dev->rdev.lldi.nrxq,
	     dev->rdev.lldi.ntxq, dev->rdev.lldi.nports);
	     __func__, pci_name(ctx->lldi.pdev),
	     ctx->lldi.nchan, ctx->lldi.nrxq,
	     ctx->lldi.ntxq, ctx->lldi.nports);

	for (i = 0; i < dev->rdev.lldi.nrxq; i++)
		PDBG("rxqid[%u] %u\n", i, dev->rdev.lldi.rxq_ids[i]);
	mutex_lock(&dev_mutex);
	list_add_tail(&ctx->entry, &uld_ctx_list);
	mutex_unlock(&dev_mutex);

	for (i = 0; i < ctx->lldi.nrxq; i++)
		PDBG("rxqid[%u] %u\n", i, ctx->lldi.rxq_ids[i]);
out:
	return dev;
	return ctx;
}

static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
			const struct pkt_gl *gl)
{
	struct c4iw_dev *dev = handle;
	struct uld_ctx *ctx = handle;
	struct c4iw_dev *dev = ctx->dev;
	struct sk_buff *skb;
	const struct cpl_act_establish *rpl;
	unsigned int opcode;
@@ -503,47 +512,49 @@ static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,

static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
{
	struct c4iw_dev *dev = handle;
	struct uld_ctx *ctx = handle;

	PDBG("%s new_state %u\n", __func__, new_state);
	switch (new_state) {
	case CXGB4_STATE_UP:
		printk(KERN_INFO MOD "%s: Up\n", pci_name(dev->rdev.lldi.pdev));
		if (!dev->registered) {
			int ret;
			ret = c4iw_register_device(dev);
			if (ret)
		printk(KERN_INFO MOD "%s: Up\n", pci_name(ctx->lldi.pdev));
		if (!ctx->dev) {
			int ret = 0;

			ctx->dev = c4iw_alloc(&ctx->lldi);
			if (!IS_ERR(ctx->dev))
				ret = c4iw_register_device(ctx->dev);
			if (IS_ERR(ctx->dev) || ret)
				printk(KERN_ERR MOD
				       "%s: RDMA registration failed: %d\n",
				       pci_name(dev->rdev.lldi.pdev), ret);
				       pci_name(ctx->lldi.pdev), ret);
		}
		break;
	case CXGB4_STATE_DOWN:
		printk(KERN_INFO MOD "%s: Down\n",
		       pci_name(dev->rdev.lldi.pdev));
		if (dev->registered)
			c4iw_unregister_device(dev);
		       pci_name(ctx->lldi.pdev));
		if (ctx->dev)
			c4iw_remove(ctx);
		break;
	case CXGB4_STATE_START_RECOVERY:
		printk(KERN_INFO MOD "%s: Fatal Error\n",
		       pci_name(dev->rdev.lldi.pdev));
		dev->rdev.flags |= T4_FATAL_ERROR;
		if (dev->registered) {
		       pci_name(ctx->lldi.pdev));
		if (ctx->dev) {
			struct ib_event event;

			ctx->dev->rdev.flags |= T4_FATAL_ERROR;
			memset(&event, 0, sizeof event);
			event.event  = IB_EVENT_DEVICE_FATAL;
			event.device = &dev->ibdev;
			event.device = &ctx->dev->ibdev;
			ib_dispatch_event(&event);
			c4iw_unregister_device(dev);
			c4iw_remove(ctx);
		}
		break;
	case CXGB4_STATE_DETACH:
		printk(KERN_INFO MOD "%s: Detach\n",
		       pci_name(dev->rdev.lldi.pdev));
		mutex_lock(&dev_mutex);
		c4iw_remove(dev);
		mutex_unlock(&dev_mutex);
		       pci_name(ctx->lldi.pdev));
		if (ctx->dev)
			c4iw_remove(ctx);
		break;
	}
	return 0;
@@ -576,11 +587,13 @@ static int __init c4iw_init_module(void)

static void __exit c4iw_exit_module(void)
{
	struct c4iw_dev *dev, *tmp;
	struct uld_ctx *ctx, *tmp;

	mutex_lock(&dev_mutex);
	list_for_each_entry_safe(dev, tmp, &dev_list, entry) {
		c4iw_remove(dev);
	list_for_each_entry_safe(ctx, tmp, &uld_ctx_list, entry) {
		if (ctx->dev)
			c4iw_remove(ctx);
		kfree(ctx);
	}
	mutex_unlock(&dev_mutex);
	cxgb4_unregister_uld(CXGB4_ULD_RDMA);
Loading