Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c367074b authored by Jason Gunthorpe's avatar Jason Gunthorpe
Browse files

RDMA/rxe: Use driver_unregister and new unregistration API



rxe does not have correct locking for its registration/unregistration
paths, use the core code to handle it instead. In this mode
ib_unregister_device will also do the dealloc, so rxe is required to do
clean up from a callback.

The core code ensures that unregistration is done only once, and generally
takes care of locking and concurrency problems for rxe.

Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent d0899892
Loading
Loading
Loading
Loading
+9 −31
Original line number Diff line number Diff line
@@ -50,8 +50,10 @@ static void rxe_cleanup_ports(struct rxe_dev *rxe)
/* free resources for a rxe device all objects created for this device must
 * have been destroyed
 */
static void rxe_cleanup(struct rxe_dev *rxe)
void rxe_dealloc(struct ib_device *ib_dev)
{
	struct rxe_dev *rxe = container_of(ib_dev, struct rxe_dev, ib_dev);

	rxe_pool_cleanup(&rxe->uc_pool);
	rxe_pool_cleanup(&rxe->pd_pool);
	rxe_pool_cleanup(&rxe->ah_pool);
@@ -65,16 +67,10 @@ static void rxe_cleanup(struct rxe_dev *rxe)

	rxe_cleanup_ports(rxe);

	if (rxe->tfm)
		crypto_free_shash(rxe->tfm);
}

/* called when all references have been dropped */
void rxe_release(struct kref *kref)
{
	struct rxe_dev *rxe = container_of(kref, struct rxe_dev, ref_cnt);

	rxe_cleanup(rxe);
	ib_dealloc_device(&rxe->ib_dev);
	list_del(&rxe->list);
}

/* initialize rxe device parameters */
@@ -312,31 +308,13 @@ int rxe_add(struct rxe_dev *rxe, unsigned int mtu)
{
	int err;

	kref_init(&rxe->ref_cnt);

	err = rxe_init(rxe);
	if (err)
		goto err1;

	rxe_set_mtu(rxe, mtu);

	err = rxe_register_device(rxe);
	if (err)
		goto err1;

	return 0;

err1:
	rxe_dev_put(rxe);
		return err;
}

/* called by the ifc layer to remove a device */
void rxe_remove(struct rxe_dev *rxe)
{
	rxe_unregister_device(rxe);
	rxe_set_mtu(rxe, mtu);

	rxe_dev_put(rxe);
	return rxe_register_device(rxe);
}

static int __init rxe_module_init(void)
@@ -360,7 +338,7 @@ static int __init rxe_module_init(void)

static void __exit rxe_module_exit(void)
{
	rxe_remove_all();
	ib_unregister_driver(RDMA_DRIVER_RXE);
	rxe_net_exit();
	rxe_cache_exit();

+0 −6
Original line number Diff line number Diff line
@@ -96,15 +96,9 @@ static inline u32 rxe_crc32(struct rxe_dev *rxe,
void rxe_set_mtu(struct rxe_dev *rxe, unsigned int dev_mtu);

int rxe_add(struct rxe_dev *rxe, unsigned int mtu);
void rxe_remove(struct rxe_dev *rxe);
void rxe_remove_all(void);

void rxe_rcv(struct sk_buff *skb);

static inline void rxe_dev_put(struct rxe_dev *rxe)
{
	kref_put(&rxe->ref_cnt, rxe_release);
}
struct rxe_dev *get_rxe_by_name(const char *name);

/* The caller must do a matching ib_device_put(&dev->ib_dev) */
+1 −1
Original line number Diff line number Diff line
@@ -232,7 +232,7 @@ int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
		      struct ib_srq_attr *attr, enum ib_srq_attr_mask mask,
		      struct rxe_modify_srq_cmd *ucmd);

void rxe_release(struct kref *kref);
void rxe_dealloc(struct ib_device *ib_dev);

int rxe_completer(void *arg);
int rxe_requester(void *arg);
+2 −19
Original line number Diff line number Diff line
@@ -559,21 +559,6 @@ struct rxe_dev *rxe_net_add(struct net_device *ndev)
	return rxe;
}

void rxe_remove_all(void)
{
	spin_lock_bh(&dev_list_lock);
	while (!list_empty(&rxe_dev_list)) {
		struct rxe_dev *rxe =
			list_first_entry(&rxe_dev_list, struct rxe_dev, list);

		list_del(&rxe->list);
		spin_unlock_bh(&dev_list_lock);
		rxe_remove(rxe);
		spin_lock_bh(&dev_list_lock);
	}
	spin_unlock_bh(&dev_list_lock);
}

static void rxe_port_event(struct rxe_dev *rxe,
			   enum ib_event_type event)
{
@@ -631,10 +616,8 @@ static int rxe_notify(struct notifier_block *not_blk,

	switch (event) {
	case NETDEV_UNREGISTER:
		list_del(&rxe->list);
		ib_device_put(&rxe->ib_dev);
		rxe_remove(rxe);
		return NOTIFY_OK;
		ib_unregister_device_queued(&rxe->ib_dev);
		break;
	case NETDEV_UP:
		rxe_port_up(rxe);
		break;
+14 −10
Original line number Diff line number Diff line
@@ -398,25 +398,27 @@ void *rxe_alloc(struct rxe_pool *pool)
	kref_get(&pool->ref_cnt);
	read_unlock_irqrestore(&pool->pool_lock, flags);

	kref_get(&pool->rxe->ref_cnt);
	if (!ib_device_try_get(&pool->rxe->ib_dev))
		goto out_put_pool;

	if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
		goto out_put_pool;
		goto out_cnt;

	elem = kmem_cache_zalloc(pool_cache(pool),
				 (pool->flags & RXE_POOL_ATOMIC) ?
				 GFP_ATOMIC : GFP_KERNEL);
	if (!elem)
		goto out_put_pool;
		goto out_cnt;

	elem->pool = pool;
	kref_init(&elem->ref_cnt);

	return elem;

out_put_pool:
out_cnt:
	atomic_dec(&pool->num_elem);
	rxe_dev_put(pool->rxe);
	ib_device_put(&pool->rxe->ib_dev);
out_put_pool:
	rxe_pool_put(pool);
	return NULL;
}
@@ -435,19 +437,21 @@ int rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_entry *elem)
	kref_get(&pool->ref_cnt);
	read_unlock_irqrestore(&pool->pool_lock, flags);

	kref_get(&pool->rxe->ref_cnt);
	if (!ib_device_try_get(&pool->rxe->ib_dev))
		goto out_put_pool;

	if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
		goto out_put_pool;
		goto out_cnt;

	elem->pool = pool;
	kref_init(&elem->ref_cnt);

	return 0;

out_put_pool:
out_cnt:
	atomic_dec(&pool->num_elem);
	rxe_dev_put(pool->rxe);
	ib_device_put(&pool->rxe->ib_dev);
out_put_pool:
	rxe_pool_put(pool);
	return -EINVAL;
}
@@ -464,7 +468,7 @@ void rxe_elem_release(struct kref *kref)
	if (!(pool->flags & RXE_POOL_NO_ALLOC))
		kmem_cache_free(pool_cache(pool), elem);
	atomic_dec(&pool->num_elem);
	rxe_dev_put(pool->rxe);
	ib_device_put(&pool->rxe->ib_dev);
	rxe_pool_put(pool);
}

Loading