Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6b6f302c authored by Thomas Graf's avatar Thomas Graf Committed by David S. Miller
Browse files

rhashtable: Add rhashtable_free_and_destroy()



rhashtable_destroy() variant which stops rehashes, iterates over
the table and calls a callback to release resources.

Avoids need for nft_hash to embed rhashtable internals and allows to
get rid of the being_destroyed flag. It also saves a 2nd mutex
lock upon destruction.

Also fixes an RCU lockdep splash on nft set destruction due to
calling rht_for_each_entry_safe() without holding bucket locks.
Open code this loop as we need know that no mutations may occur in
parallel.

Signed-off-by: default avatarThomas Graf <tgraf@suug.ch>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent b5e2c150
Loading
Loading
Loading
Loading
+3 −2
Original line number Diff line number Diff line
@@ -136,12 +136,10 @@ struct rhashtable_params {
 * @run_work: Deferred worker to expand/shrink asynchronously
 * @mutex: Mutex to protect current/future table swapping
 * @lock: Spin lock to protect walker list
 * @being_destroyed: True if table is set up for destruction
 */
struct rhashtable {
	struct bucket_table __rcu	*tbl;
	atomic_t			nelems;
	bool                            being_destroyed;
	unsigned int			key_len;
	unsigned int			elasticity;
	struct rhashtable_params	p;
@@ -334,6 +332,9 @@ int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU);
void *rhashtable_walk_next(struct rhashtable_iter *iter);
void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU);

void rhashtable_free_and_destroy(struct rhashtable *ht,
				 void (*free_fn)(void *ptr, void *arg),
				 void *arg);
void rhashtable_destroy(struct rhashtable *ht);

#define rht_dereference(p, ht) \
+39 −10
Original line number Diff line number Diff line
@@ -359,8 +359,6 @@ static void rht_deferred_worker(struct work_struct *work)

	ht = container_of(work, struct rhashtable, run_work);
	mutex_lock(&ht->mutex);
	if (ht->being_destroyed)
		goto unlock;

	tbl = rht_dereference(ht->tbl, ht);
	tbl = rhashtable_last_table(ht, tbl);
@@ -372,7 +370,6 @@ static void rht_deferred_worker(struct work_struct *work)

	err = rhashtable_rehash_table(ht);

unlock:
	mutex_unlock(&ht->mutex);

	if (err)
@@ -783,21 +780,53 @@ int rhashtable_init(struct rhashtable *ht,
EXPORT_SYMBOL_GPL(rhashtable_init);

/**
 * rhashtable_destroy - destroy hash table
 * rhashtable_free_and_destroy - free elements and destroy hash table
 * @ht:		the hash table to destroy
 * @free_fn:	callback to release resources of element
 * @arg:	pointer passed to free_fn
 *
 * Frees the bucket array. This function is not rcu safe, therefore the caller
 * has to make sure that no resizing may happen by unpublishing the hashtable
 * and waiting for the quiescent cycle before releasing the bucket array.
 * Stops an eventual async resize. If defined, invokes free_fn for each
 * element to releasal resources. Please note that RCU protected
 * readers may still be accessing the elements. Releasing of resources
 * must occur in a compatible manner. Then frees the bucket array.
 *
 * This function will eventually sleep to wait for an async resize
 * to complete. The caller is responsible that no further write operations
 * occurs in parallel.
 */
void rhashtable_destroy(struct rhashtable *ht)
void rhashtable_free_and_destroy(struct rhashtable *ht,
				 void (*free_fn)(void *ptr, void *arg),
				 void *arg)
{
	ht->being_destroyed = true;
	const struct bucket_table *tbl;
	unsigned int i;

	cancel_work_sync(&ht->run_work);

	mutex_lock(&ht->mutex);
	bucket_table_free(rht_dereference(ht->tbl, ht));
	tbl = rht_dereference(ht->tbl, ht);
	if (free_fn) {
		for (i = 0; i < tbl->size; i++) {
			struct rhash_head *pos, *next;

			for (pos = rht_dereference(tbl->buckets[i], ht),
			     next = !rht_is_a_nulls(pos) ?
					rht_dereference(pos->next, ht) : NULL;
			     !rht_is_a_nulls(pos);
			     pos = next,
			     next = !rht_is_a_nulls(pos) ?
					rht_dereference(pos->next, ht) : NULL)
				free_fn(rht_obj(ht, pos), arg);
		}
	}

	bucket_table_free(tbl);
	mutex_unlock(&ht->mutex);
}
EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy);

void rhashtable_destroy(struct rhashtable *ht)
{
	return rhashtable_free_and_destroy(ht, NULL, NULL);
}
EXPORT_SYMBOL_GPL(rhashtable_destroy);
+7 −18
Original line number Diff line number Diff line
@@ -188,26 +188,15 @@ static int nft_hash_init(const struct nft_set *set,
	return rhashtable_init(priv, &params);
}

static void nft_hash_destroy(const struct nft_set *set)
static void nft_free_element(void *ptr, void *arg)
{
	struct rhashtable *priv = nft_set_priv(set);
	const struct bucket_table *tbl;
	struct nft_hash_elem *he;
	struct rhash_head *pos, *next;
	unsigned int i;

	/* Stop an eventual async resizing */
	priv->being_destroyed = true;
	mutex_lock(&priv->mutex);

	tbl = rht_dereference(priv->tbl, priv);
	for (i = 0; i < tbl->size; i++) {
		rht_for_each_entry_safe(he, pos, next, tbl, i, node)
			nft_hash_elem_destroy(set, he);
	nft_hash_elem_destroy((const struct nft_set *)arg, ptr);
}
	mutex_unlock(&priv->mutex);

	rhashtable_destroy(priv);
static void nft_hash_destroy(const struct nft_set *set)
{
	rhashtable_free_and_destroy(nft_set_priv(set), nft_free_element,
				    (void *)set);
}

static bool nft_hash_estimate(const struct nft_set_desc *desc, u32 features,