Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6c47d773 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
  RDMA/iwcm: Don't access a cm_id after dropping reference
  IB/iser: Handle iser_device allocation error gracefully
  IB/iser: Fix list iteration bug
  RDMA/cxgb3: Fix iwch_create_cq() off-by-one error
  RDMA/cxgb3: Return correct max_inline_data when creating a QP
  IB/fmr_pool: Flush all dirty FMRs from ib_fmr_pool_flush()
  Revert "IB/fmr_pool: ib_fmr_pool_flush() should flush all dirty FMRs"
  IB/cm: Flush workqueue when removing device
  MAINTAINERS: update ipath owner
parents 2f44bbb4 d7c1fbd6
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -2156,7 +2156,7 @@ L: netdev@vger.kernel.org
S:	Maintained

IPATH DRIVER:
P:	Arthur Jones
P:	Ralph Campbell
M:	infinipath@qlogic.com
L:	general@lists.openfabrics.org
T:	git git://git.qlogic.com/ipath-linux-2.6
+2 −1
Original line number Diff line number Diff line
@@ -3759,6 +3759,7 @@ static void cm_remove_one(struct ib_device *device)
		port = cm_dev->port[i-1];
		ib_modify_port(device, port->port_num, 0, &port_modify);
		ib_unregister_mad_agent(port->mad_agent);
		flush_workqueue(cm.wq);
		cm_remove_port_fs(port);
	}
	kobject_put(&cm_dev->dev_obj);
@@ -3813,6 +3814,7 @@ static void __exit ib_cm_cleanup(void)
		cancel_delayed_work(&timewait_info->work.work);
	spin_unlock_irq(&cm.lock);

	ib_unregister_client(&cm_client);
	destroy_workqueue(cm.wq);

	list_for_each_entry_safe(timewait_info, tmp, &cm.timewait_list, list) {
@@ -3820,7 +3822,6 @@ static void __exit ib_cm_cleanup(void)
		kfree(timewait_info);
	}

	ib_unregister_client(&cm_client);
	class_unregister(&cm_class);
	idr_destroy(&cm.local_id_table);
}
+22 −16
Original line number Diff line number Diff line
@@ -139,7 +139,7 @@ static inline struct ib_pool_fmr *ib_fmr_cache_lookup(struct ib_fmr_pool *pool,
static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
{
	int                 ret;
	struct ib_pool_fmr *fmr, *next;
	struct ib_pool_fmr *fmr;
	LIST_HEAD(unmap_list);
	LIST_HEAD(fmr_list);

@@ -158,20 +158,6 @@ static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
#endif
	}

	/*
	 * The free_list may hold FMRs that have been put there
	 * because they haven't reached the max_remap count.
	 * Invalidate their mapping as well.
	 */
	list_for_each_entry_safe(fmr, next, &pool->free_list, list) {
		if (fmr->remap_count == 0)
			continue;
		hlist_del_init(&fmr->cache_node);
		fmr->remap_count = 0;
		list_add_tail(&fmr->fmr->list, &fmr_list);
		list_move(&fmr->list, &unmap_list);
	}

	list_splice(&pool->dirty_list, &unmap_list);
	INIT_LIST_HEAD(&pool->dirty_list);
	pool->dirty_len = 0;
@@ -384,6 +370,11 @@ void ib_destroy_fmr_pool(struct ib_fmr_pool *pool)

	i = 0;
	list_for_each_entry_safe(fmr, tmp, &pool->free_list, list) {
		if (fmr->remap_count) {
			INIT_LIST_HEAD(&fmr_list);
			list_add_tail(&fmr->fmr->list, &fmr_list);
			ib_unmap_fmr(&fmr_list);
		}
		ib_dealloc_fmr(fmr->fmr);
		list_del(&fmr->list);
		kfree(fmr);
@@ -407,8 +398,23 @@ EXPORT_SYMBOL(ib_destroy_fmr_pool);
 */
int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
{
	int serial = atomic_inc_return(&pool->req_ser);
	int serial;
	struct ib_pool_fmr *fmr, *next;

	/*
	 * The free_list holds FMRs that may have been used
	 * but have not been remapped enough times to be dirty.
	 * Put them on the dirty list now so that the cleanup
	 * thread will reap them too.
	 */
	spin_lock_irq(&pool->pool_lock);
	list_for_each_entry_safe(fmr, next, &pool->free_list, list) {
		if (fmr->remap_count > 0)
			list_move(&fmr->list, &pool->dirty_list);
	}
	spin_unlock_irq(&pool->pool_lock);

	serial = atomic_inc_return(&pool->req_ser);
	wake_up_process(pool->thread);

	if (wait_event_interruptible(pool->force_wait,
+3 −2
Original line number Diff line number Diff line
@@ -839,6 +839,7 @@ static void cm_work_handler(struct work_struct *_work)
	unsigned long flags;
	int empty;
	int ret = 0;
	int destroy_id;

	spin_lock_irqsave(&cm_id_priv->lock, flags);
	empty = list_empty(&cm_id_priv->work_list);
@@ -857,9 +858,9 @@ static void cm_work_handler(struct work_struct *_work)
			destroy_cm_id(&cm_id_priv->id);
		}
		BUG_ON(atomic_read(&cm_id_priv->refcount)==0);
		destroy_id = test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags);
		if (iwcm_deref_id(cm_id_priv)) {
			if (test_bit(IWCM_F_CALLBACK_DESTROY,
				     &cm_id_priv->flags)) {
			if (destroy_id) {
				BUG_ON(!list_empty(&cm_id_priv->work_list));
				free_cm_id(cm_id_priv);
			}
+4 −1
Original line number Diff line number Diff line
@@ -189,7 +189,7 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int ve
		return ERR_PTR(-ENOMEM);
	}
	chp->rhp = rhp;
	chp->ibcq.cqe = (1 << chp->cq.size_log2) - 1;
	chp->ibcq.cqe = 1 << chp->cq.size_log2;
	spin_lock_init(&chp->lock);
	atomic_set(&chp->refcnt, 1);
	init_waitqueue_head(&chp->wait);
@@ -819,8 +819,11 @@ static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
		kfree(qhp);
		return ERR_PTR(-ENOMEM);
	}

	attrs->cap.max_recv_wr = rqsize - 1;
	attrs->cap.max_send_wr = sqsize;
	attrs->cap.max_inline_data = T3_MAX_INLINE;

	qhp->rhp = rhp;
	qhp->attr.pd = php->pdid;
	qhp->attr.scq = ((struct iwch_cq *) attrs->send_cq)->cq.cqid;
Loading