Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bae2e81a authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'concurrent_hash_tables'



Thomas Graf says:

====================
Lockless netlink_lookup() with new concurrent hash table

Netlink sockets are maintained in a hash table to allow efficient lookup
via the port ID for unicast messages. However, lookups currently require
a read lock to be taken. This series adds a new generic, resizable,
scalable, concurrent hash table based on the paper referenced in the first
patch. It then makes use of the new data type to implement lockless
netlink_lookup().

Patch 3/3 to convert nft_hash is included for reference but should be
merged via the netfilter tree. Inclusion in this series is to provide
context for the suggested API.

Against net-next since the initial user of the new hash table is in net/

Changes:
v4-v5:
 - use GFP_KERNEL to alloc Netlink buckets as suggested by Nikolay
   Aleksandrov
 - free nft hash element on removal as spotted by Nikolay Aleksandrov
   and Patrick McHardy
v3-v4:
 - fixed wrong shift assignment placement as spotted by Nikolay Aleksandrov
 - reverted default size of nft_hash to 4 as requested by Patrick McHardy,
   default size for other hash tables remains at 64 if no hint is given
 - fixed copyright as requested by Patrick McHardy
v2-v3:
 - fixed typo in nft_hash_destroy() when passing rhashtable handle
v1-v2:
 - fixed traversal off-by-one as spotted by Tobias Klauser
 - removed unlikely() from BUG_ON() as spotted by Josh Triplett
 - new 3rd patch to convert nft_hash to rhashtable
 - make rhashtable_insert() return void
 - nl_sk_hash_lock must be a mutex
 - fixed wrong name of rht_shrink_below_30()
 - exported symbols rht_grow_above_75() and rht_shrink_below_30()
 - allow table freeing with RCU callback
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents d39a9ffc cfe4a9dd
Loading
Loading
Loading
Loading
+213 −0
Original line number Diff line number Diff line
/*
 * Resizable, Scalable, Concurrent Hash Table
 *
 * Copyright (c) 2014 Thomas Graf <tgraf@suug.ch>
 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
 *
 * Based on the following paper by Josh Triplett, Paul E. McKenney
 * and Jonathan Walpole:
 * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf
 *
 * Code partially derived from nft_hash
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#ifndef _LINUX_RHASHTABLE_H
#define _LINUX_RHASHTABLE_H

#include <linux/rculist.h>

struct rhash_head {
	struct rhash_head		*next;
};

#define INIT_HASH_HEAD(ptr) ((ptr)->next = NULL)

struct bucket_table {
	size_t				size;
	struct rhash_head __rcu		*buckets[];
};

typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed);
typedef u32 (*rht_obj_hashfn_t)(const void *data, u32 seed);

struct rhashtable;

/**
 * struct rhashtable_params - Hash table construction parameters
 * @nelem_hint: Hint on number of elements, should be 75% of desired size
 * @key_len: Length of key
 * @key_offset: Offset of key in struct to be hashed
 * @head_offset: Offset of rhash_head in struct to be hashed
 * @hash_rnd: Seed to use while hashing
 * @max_shift: Maximum number of shifts while expanding
 * @hashfn: Function to hash key
 * @obj_hashfn: Function to hash object
 * @grow_decision: If defined, may return true if table should expand
 * @shrink_decision: If defined, may return true if table should shrink
 * @mutex_is_held: Must return true if protecting mutex is held
 */
struct rhashtable_params {
	size_t			nelem_hint;
	size_t			key_len;
	size_t			key_offset;
	size_t			head_offset;
	u32			hash_rnd;
	size_t			max_shift;
	rht_hashfn_t		hashfn;
	rht_obj_hashfn_t	obj_hashfn;
	bool			(*grow_decision)(const struct rhashtable *ht,
						 size_t new_size);
	bool			(*shrink_decision)(const struct rhashtable *ht,
						   size_t new_size);
	int			(*mutex_is_held)(void);
};

/**
 * struct rhashtable - Hash table handle
 * @tbl: Bucket table
 * @nelems: Number of elements in table
 * @shift: Current size (1 << shift)
 * @p: Configuration parameters
 */
struct rhashtable {
	struct bucket_table __rcu	*tbl;
	size_t				nelems;
	size_t				shift;
	struct rhashtable_params	p;
};

#ifdef CONFIG_PROVE_LOCKING
int lockdep_rht_mutex_is_held(const struct rhashtable *ht);
#else
static inline int lockdep_rht_mutex_is_held(const struct rhashtable *ht)
{
	return 1;
}
#endif /* CONFIG_PROVE_LOCKING */

int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params);

u32 rhashtable_hashfn(const struct rhashtable *ht, const void *key, u32 len);
u32 rhashtable_obj_hashfn(const struct rhashtable *ht, void *ptr);

void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node, gfp_t);
bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node, gfp_t);
void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj,
			     struct rhash_head **pprev, gfp_t flags);

bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size);
bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size);

int rhashtable_expand(struct rhashtable *ht, gfp_t flags);
int rhashtable_shrink(struct rhashtable *ht, gfp_t flags);

void *rhashtable_lookup(const struct rhashtable *ht, const void *key);
void *rhashtable_lookup_compare(const struct rhashtable *ht, u32 hash,
				bool (*compare)(void *, void *), void *arg);

void rhashtable_destroy(const struct rhashtable *ht);

#define rht_dereference(p, ht) \
	rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))

#define rht_dereference_rcu(p, ht) \
	rcu_dereference_check(p, lockdep_rht_mutex_is_held(ht))

/* Internal, use rht_obj() instead */
#define rht_entry(ptr, type, member) container_of(ptr, type, member)
#define rht_entry_safe(ptr, type, member) \
({ \
	typeof(ptr) __ptr = (ptr); \
	   __ptr ? rht_entry(__ptr, type, member) : NULL; \
})
#define rht_entry_safe_rcu(ptr, type, member) \
({ \
	typeof(*ptr) __rcu *__ptr = (typeof(*ptr) __rcu __force *)ptr; \
	__ptr ? container_of((typeof(ptr))rcu_dereference_raw(__ptr), type, member) : NULL; \
})

#define rht_next_entry_safe(pos, ht, member) \
({ \
	pos ? rht_entry_safe(rht_dereference((pos)->member.next, ht), \
			     typeof(*(pos)), member) : NULL; \
})

/**
 * rht_for_each - iterate over hash chain
 * @pos:	&struct rhash_head to use as a loop cursor.
 * @head:	head of the hash chain (struct rhash_head *)
 * @ht:		pointer to your struct rhashtable
 */
#define rht_for_each(pos, head, ht) \
	for (pos = rht_dereference(head, ht); \
	     pos; \
	     pos = rht_dereference((pos)->next, ht))

/**
 * rht_for_each_entry - iterate over hash chain of given type
 * @pos:	type * to use as a loop cursor.
 * @head:	head of the hash chain (struct rhash_head *)
 * @ht:		pointer to your struct rhashtable
 * @member:	name of the rhash_head within the hashable struct.
 */
#define rht_for_each_entry(pos, head, ht, member) \
	for (pos = rht_entry_safe(rht_dereference(head, ht), \
				   typeof(*(pos)), member); \
	     pos; \
	     pos = rht_next_entry_safe(pos, ht, member))

/**
 * rht_for_each_entry_safe - safely iterate over hash chain of given type
 * @pos:	type * to use as a loop cursor.
 * @n:		type * to use for temporary next object storage
 * @head:	head of the hash chain (struct rhash_head *)
 * @ht:		pointer to your struct rhashtable
 * @member:	name of the rhash_head within the hashable struct.
 *
 * This hash chain list-traversal primitive allows for the looped code to
 * remove the loop cursor from the list.
 */
#define rht_for_each_entry_safe(pos, n, head, ht, member)		\
	for (pos = rht_entry_safe(rht_dereference(head, ht), \
				  typeof(*(pos)), member), \
	     n = rht_next_entry_safe(pos, ht, member); \
	     pos; \
	     pos = n, \
	     n = rht_next_entry_safe(pos, ht, member))

/**
 * rht_for_each_rcu - iterate over rcu hash chain
 * @pos:	&struct rhash_head to use as a loop cursor.
 * @head:	head of the hash chain (struct rhash_head *)
 * @ht:		pointer to your struct rhashtable
 *
 * This hash chain list-traversal primitive may safely run concurrently with
 * the _rcu fkht mutation primitives such as rht_insert() as long as the
 * traversal is guarded by rcu_read_lock().
 */
#define rht_for_each_rcu(pos, head, ht) \
	for (pos = rht_dereference_rcu(head, ht); \
	     pos; \
	     pos = rht_dereference_rcu((pos)->next, ht))

/**
 * rht_for_each_entry_rcu - iterate over rcu hash chain of given type
 * @pos:	type * to use as a loop cursor.
 * @head:	head of the hash chain (struct rhash_head *)
 * @member:	name of the rhash_head within the hashable struct.
 *
 * This hash chain list-traversal primitive may safely run concurrently with
 * the _rcu fkht mutation primitives such as rht_insert() as long as the
 * traversal is guarded by rcu_read_lock().
 */
#define rht_for_each_entry_rcu(pos, head, member) \
	for (pos = rht_entry_safe_rcu(head, typeof(*(pos)), member); \
	     pos; \
	     pos = rht_entry_safe_rcu((pos)->member.next, \
				      typeof(*(pos)), member))

#endif /* _LINUX_RHASHTABLE_H */
+8 −0
Original line number Diff line number Diff line
@@ -1550,6 +1550,14 @@ config TEST_STRING_HELPERS
config TEST_KSTRTOX
	tristate "Test kstrto*() family of functions at runtime"

config TEST_RHASHTABLE
	bool "Perform selftest on resizable hash table"
	default n
	help
	  Enable this option to test the rhashtable functions at boot.

	  If unsure, say N.

endmenu # runtime tests

config PROVIDE_OHCI1394_DMA_INIT
+1 −1
Original line number Diff line number Diff line
@@ -26,7 +26,7 @@ obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
	 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
	 gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o clz_ctz.o \
	 bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \
	 percpu-refcount.o percpu_ida.o hash.o
	 percpu-refcount.o percpu_ida.o hash.o rhashtable.o
obj-y += string_helpers.o
obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
obj-y += kstrtox.o

lib/rhashtable.c

0 → 100644
+797 −0

File added.

Preview size limit exceeded, changes collapsed.

+55 −236
Original line number Diff line number Diff line
@@ -15,209 +15,40 @@
#include <linux/log2.h>
#include <linux/jhash.h>
#include <linux/netlink.h>
#include <linux/vmalloc.h>
#include <linux/rhashtable.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables.h>

#define NFT_HASH_MIN_SIZE	4UL

struct nft_hash {
	struct nft_hash_table __rcu	*tbl;
};

struct nft_hash_table {
	unsigned int			size;
	struct nft_hash_elem __rcu	*buckets[];
};
/* We target a hash table size of 4, element hint is 75% of final size */
#define NFT_HASH_ELEMENT_HINT 3

struct nft_hash_elem {
	struct nft_hash_elem __rcu	*next;
	struct rhash_head		node;
	struct nft_data			key;
	struct nft_data			data[];
};

#define nft_hash_for_each_entry(i, head) \
	for (i = nft_dereference(head); i != NULL; i = nft_dereference(i->next))
#define nft_hash_for_each_entry_rcu(i, head) \
	for (i = rcu_dereference(head); i != NULL; i = rcu_dereference(i->next))

static u32 nft_hash_rnd __read_mostly;
static bool nft_hash_rnd_initted __read_mostly;

static unsigned int nft_hash_data(const struct nft_data *data,
				  unsigned int hsize, unsigned int len)
{
	unsigned int h;

	h = jhash(data->data, len, nft_hash_rnd);
	return h & (hsize - 1);
}

static bool nft_hash_lookup(const struct nft_set *set,
			    const struct nft_data *key,
			    struct nft_data *data)
{
	const struct nft_hash *priv = nft_set_priv(set);
	const struct nft_hash_table *tbl = rcu_dereference(priv->tbl);
	const struct rhashtable *priv = nft_set_priv(set);
	const struct nft_hash_elem *he;
	unsigned int h;

	h = nft_hash_data(key, tbl->size, set->klen);
	nft_hash_for_each_entry_rcu(he, tbl->buckets[h]) {
		if (nft_data_cmp(&he->key, key, set->klen))
			continue;
		if (set->flags & NFT_SET_MAP)
	he = rhashtable_lookup(priv, key);
	if (he && set->flags & NFT_SET_MAP)
		nft_data_copy(data, he->data);
		return true;
	}
	return false;
}

static void nft_hash_tbl_free(const struct nft_hash_table *tbl)
{
	kvfree(tbl);
}

static unsigned int nft_hash_tbl_size(unsigned int nelem)
{
	return max(roundup_pow_of_two(nelem * 4 / 3), NFT_HASH_MIN_SIZE);
}

static struct nft_hash_table *nft_hash_tbl_alloc(unsigned int nbuckets)
{
	struct nft_hash_table *tbl;
	size_t size;

	size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
	tbl = kzalloc(size, GFP_KERNEL | __GFP_REPEAT | __GFP_NOWARN);
	if (tbl == NULL)
		tbl = vzalloc(size);
	if (tbl == NULL)
		return NULL;
	tbl->size = nbuckets;

	return tbl;
}

static void nft_hash_chain_unzip(const struct nft_set *set,
				 const struct nft_hash_table *ntbl,
				 struct nft_hash_table *tbl, unsigned int n)
{
	struct nft_hash_elem *he, *last, *next;
	unsigned int h;

	he = nft_dereference(tbl->buckets[n]);
	if (he == NULL)
		return;
	h = nft_hash_data(&he->key, ntbl->size, set->klen);

	/* Find last element of first chain hashing to bucket h */
	last = he;
	nft_hash_for_each_entry(he, he->next) {
		if (nft_hash_data(&he->key, ntbl->size, set->klen) != h)
			break;
		last = he;
	}

	/* Unlink first chain from the old table */
	RCU_INIT_POINTER(tbl->buckets[n], last->next);

	/* If end of chain reached, done */
	if (he == NULL)
		return;

	/* Find first element of second chain hashing to bucket h */
	next = NULL;
	nft_hash_for_each_entry(he, he->next) {
		if (nft_hash_data(&he->key, ntbl->size, set->klen) != h)
			continue;
		next = he;
		break;
	}

	/* Link the two chains */
	RCU_INIT_POINTER(last->next, next);
}

static int nft_hash_tbl_expand(const struct nft_set *set, struct nft_hash *priv)
{
	struct nft_hash_table *tbl = nft_dereference(priv->tbl), *ntbl;
	struct nft_hash_elem *he;
	unsigned int i, h;
	bool complete;

	ntbl = nft_hash_tbl_alloc(tbl->size * 2);
	if (ntbl == NULL)
		return -ENOMEM;

	/* Link new table's buckets to first element in the old table
	 * hashing to the new bucket.
	 */
	for (i = 0; i < ntbl->size; i++) {
		h = i < tbl->size ? i : i - tbl->size;
		nft_hash_for_each_entry(he, tbl->buckets[h]) {
			if (nft_hash_data(&he->key, ntbl->size, set->klen) != i)
				continue;
			RCU_INIT_POINTER(ntbl->buckets[i], he);
			break;
		}
	}

	/* Publish new table */
	rcu_assign_pointer(priv->tbl, ntbl);

	/* Unzip interleaved hash chains */
	do {
		/* Wait for readers to use new table/unzipped chains */
		synchronize_rcu();

		complete = true;
		for (i = 0; i < tbl->size; i++) {
			nft_hash_chain_unzip(set, ntbl, tbl, i);
			if (tbl->buckets[i] != NULL)
				complete = false;
		}
	} while (!complete);

	nft_hash_tbl_free(tbl);
	return 0;
}

static int nft_hash_tbl_shrink(const struct nft_set *set, struct nft_hash *priv)
{
	struct nft_hash_table *tbl = nft_dereference(priv->tbl), *ntbl;
	struct nft_hash_elem __rcu **pprev;
	unsigned int i;

	ntbl = nft_hash_tbl_alloc(tbl->size / 2);
	if (ntbl == NULL)
		return -ENOMEM;

	for (i = 0; i < ntbl->size; i++) {
		ntbl->buckets[i] = tbl->buckets[i];

		for (pprev = &ntbl->buckets[i]; *pprev != NULL;
		     pprev = &nft_dereference(*pprev)->next)
			;
		RCU_INIT_POINTER(*pprev, tbl->buckets[i + ntbl->size]);
	}

	/* Publish new table */
	rcu_assign_pointer(priv->tbl, ntbl);
	synchronize_rcu();

	nft_hash_tbl_free(tbl);
	return 0;
	return !!he;
}

static int nft_hash_insert(const struct nft_set *set,
			   const struct nft_set_elem *elem)
{
	struct nft_hash *priv = nft_set_priv(set);
	struct nft_hash_table *tbl = nft_dereference(priv->tbl);
	struct rhashtable *priv = nft_set_priv(set);
	struct nft_hash_elem *he;
	unsigned int size, h;
	unsigned int size;

	if (elem->flags != 0)
		return -EINVAL;
@@ -234,13 +65,7 @@ static int nft_hash_insert(const struct nft_set *set,
	if (set->flags & NFT_SET_MAP)
		nft_data_copy(he->data, &elem->data);

	h = nft_hash_data(&he->key, tbl->size, set->klen);
	RCU_INIT_POINTER(he->next, tbl->buckets[h]);
	rcu_assign_pointer(tbl->buckets[h], he);

	/* Expand table when exceeding 75% load */
	if (set->nelems + 1 > tbl->size / 4 * 3)
		nft_hash_tbl_expand(set, priv);
	rhashtable_insert(priv, &he->node, GFP_KERNEL);

	return 0;
}
@@ -257,36 +82,31 @@ static void nft_hash_elem_destroy(const struct nft_set *set,
static void nft_hash_remove(const struct nft_set *set,
			    const struct nft_set_elem *elem)
{
	struct nft_hash *priv = nft_set_priv(set);
	struct nft_hash_table *tbl = nft_dereference(priv->tbl);
	struct nft_hash_elem *he, __rcu **pprev;
	struct rhashtable *priv = nft_set_priv(set);
	struct rhash_head *he, __rcu **pprev;

	pprev = elem->cookie;
	he = nft_dereference((*pprev));
	he = rht_dereference((*pprev), priv);

	rhashtable_remove_pprev(priv, he, pprev, GFP_KERNEL);

	RCU_INIT_POINTER(*pprev, he->next);
	synchronize_rcu();
	kfree(he);

	/* Shrink table beneath 30% load */
	if (set->nelems - 1 < tbl->size * 3 / 10 &&
	    tbl->size > NFT_HASH_MIN_SIZE)
		nft_hash_tbl_shrink(set, priv);
}

static int nft_hash_get(const struct nft_set *set, struct nft_set_elem *elem)
{
	const struct nft_hash *priv = nft_set_priv(set);
	const struct nft_hash_table *tbl = nft_dereference(priv->tbl);
	struct nft_hash_elem __rcu * const *pprev;
	const struct rhashtable *priv = nft_set_priv(set);
	const struct bucket_table *tbl = rht_dereference_rcu(priv->tbl, priv);
	struct rhash_head __rcu * const *pprev;
	struct nft_hash_elem *he;
	unsigned int h;
	u32 h;

	h = nft_hash_data(&elem->key, tbl->size, set->klen);
	h = rhashtable_hashfn(priv, &elem->key, set->klen);
	pprev = &tbl->buckets[h];
	nft_hash_for_each_entry(he, tbl->buckets[h]) {
	rht_for_each_entry_rcu(he, tbl->buckets[h], node) {
		if (nft_data_cmp(&he->key, &elem->key, set->klen)) {
			pprev = &he->next;
			pprev = &he->node.next;
			continue;
		}

@@ -302,14 +122,15 @@ static int nft_hash_get(const struct nft_set *set, struct nft_set_elem *elem)
static void nft_hash_walk(const struct nft_ctx *ctx, const struct nft_set *set,
			  struct nft_set_iter *iter)
{
	const struct nft_hash *priv = nft_set_priv(set);
	const struct nft_hash_table *tbl = nft_dereference(priv->tbl);
	const struct rhashtable *priv = nft_set_priv(set);
	const struct bucket_table *tbl;
	const struct nft_hash_elem *he;
	struct nft_set_elem elem;
	unsigned int i;

	tbl = rht_dereference_rcu(priv->tbl, priv);
	for (i = 0; i < tbl->size; i++) {
		nft_hash_for_each_entry(he, tbl->buckets[i]) {
		rht_for_each_entry_rcu(he, tbl->buckets[i], node) {
			if (iter->count < iter->skip)
				goto cont;

@@ -329,48 +150,46 @@ static void nft_hash_walk(const struct nft_ctx *ctx, const struct nft_set *set,

static unsigned int nft_hash_privsize(const struct nlattr * const nla[])
{
	return sizeof(struct nft_hash);
	return sizeof(struct rhashtable);
}

static int lockdep_nfnl_lock_is_held(void)
{
	return lockdep_nfnl_is_held(NFNL_SUBSYS_NFTABLES);
}

static int nft_hash_init(const struct nft_set *set,
			 const struct nft_set_desc *desc,
			 const struct nlattr * const tb[])
{
	struct nft_hash *priv = nft_set_priv(set);
	struct nft_hash_table *tbl;
	unsigned int size;
	struct rhashtable *priv = nft_set_priv(set);
	struct rhashtable_params params = {
		.nelem_hint = desc->size ? : NFT_HASH_ELEMENT_HINT,
		.head_offset = offsetof(struct nft_hash_elem, node),
		.key_offset = offsetof(struct nft_hash_elem, key),
		.key_len = set->klen,
		.hashfn = jhash,
		.grow_decision = rht_grow_above_75,
		.shrink_decision = rht_shrink_below_30,
		.mutex_is_held = lockdep_nfnl_lock_is_held,
	};

	if (unlikely(!nft_hash_rnd_initted)) {
		get_random_bytes(&nft_hash_rnd, 4);
		nft_hash_rnd_initted = true;
	}

	size = NFT_HASH_MIN_SIZE;
	if (desc->size)
		size = nft_hash_tbl_size(desc->size);

	tbl = nft_hash_tbl_alloc(size);
	if (tbl == NULL)
		return -ENOMEM;
	RCU_INIT_POINTER(priv->tbl, tbl);
	return 0;
	return rhashtable_init(priv, &params);
}

static void nft_hash_destroy(const struct nft_set *set)
{
	const struct nft_hash *priv = nft_set_priv(set);
	const struct nft_hash_table *tbl = nft_dereference(priv->tbl);
	const struct rhashtable *priv = nft_set_priv(set);
	const struct bucket_table *tbl;
	struct nft_hash_elem *he, *next;
	unsigned int i;

	for (i = 0; i < tbl->size; i++) {
		for (he = nft_dereference(tbl->buckets[i]); he != NULL;
		     he = next) {
			next = nft_dereference(he->next);
	tbl = rht_dereference(priv->tbl, priv);
	for (i = 0; i < tbl->size; i++)
		rht_for_each_entry_safe(he, next, tbl->buckets[i], priv, node)
			nft_hash_elem_destroy(set, he);
		}
	}
	kfree(tbl);

	rhashtable_destroy(priv);
}

static bool nft_hash_estimate(const struct nft_set_desc *desc, u32 features,
@@ -383,8 +202,8 @@ static bool nft_hash_estimate(const struct nft_set_desc *desc, u32 features,
		esize += FIELD_SIZEOF(struct nft_hash_elem, data[0]);

	if (desc->size) {
		est->size = sizeof(struct nft_hash) +
			    nft_hash_tbl_size(desc->size) *
		est->size = sizeof(struct rhashtable) +
			    roundup_pow_of_two(desc->size * 4 / 3) *
			    sizeof(struct nft_hash_elem *) +
			    desc->size * esize;
	} else {
Loading