Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6aebd940 authored by Herbert Xu's avatar Herbert Xu Committed by David S. Miller
Browse files

rhashtable: Remove shift from bucket_table



Keeping both size and shift is silly.  We only need one.

Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent a61bfa65
Loading
Loading
Loading
Loading
+0 −2
Original line number Original line Diff line number Diff line
@@ -51,7 +51,6 @@ struct rhash_head {
 * @size: Number of hash buckets
 * @size: Number of hash buckets
 * @rehash: Current bucket being rehashed
 * @rehash: Current bucket being rehashed
 * @hash_rnd: Random seed to fold into hash
 * @hash_rnd: Random seed to fold into hash
 * @shift: Current size (1 << shift)
 * @locks_mask: Mask to apply before accessing locks[]
 * @locks_mask: Mask to apply before accessing locks[]
 * @locks: Array of spinlocks protecting individual buckets
 * @locks: Array of spinlocks protecting individual buckets
 * @walkers: List of active walkers
 * @walkers: List of active walkers
@@ -63,7 +62,6 @@ struct bucket_table {
	unsigned int		size;
	unsigned int		size;
	unsigned int		rehash;
	unsigned int		rehash;
	u32			hash_rnd;
	u32			hash_rnd;
	u32			shift;
	unsigned int		locks_mask;
	unsigned int		locks_mask;
	spinlock_t		*locks;
	spinlock_t		*locks;
	struct list_head	walkers;
	struct list_head	walkers;
+2 −3
Original line number Original line Diff line number Diff line
@@ -162,7 +162,6 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
		return NULL;
		return NULL;


	tbl->size = nbuckets;
	tbl->size = nbuckets;
	tbl->shift = ilog2(nbuckets);


	if (alloc_bucket_locks(ht, tbl) < 0) {
	if (alloc_bucket_locks(ht, tbl) < 0) {
		bucket_table_free(tbl);
		bucket_table_free(tbl);
@@ -189,7 +188,7 @@ static bool rht_grow_above_75(const struct rhashtable *ht,
{
{
	/* Expand table when exceeding 75% load */
	/* Expand table when exceeding 75% load */
	return atomic_read(&ht->nelems) > (tbl->size / 4 * 3) &&
	return atomic_read(&ht->nelems) > (tbl->size / 4 * 3) &&
	       (!ht->p.max_shift || tbl->shift < ht->p.max_shift);
	       (!ht->p.max_shift || tbl->size < (1 << ht->p.max_shift));
}
}


/**
/**
@@ -202,7 +201,7 @@ static bool rht_shrink_below_30(const struct rhashtable *ht,
{
{
	/* Shrink table beneath 30% load */
	/* Shrink table beneath 30% load */
	return atomic_read(&ht->nelems) < (tbl->size * 3 / 10) &&
	return atomic_read(&ht->nelems) < (tbl->size * 3 / 10) &&
	       tbl->shift > ht->p.min_shift;
	       tbl->size > (1 << ht->p.min_shift);
}
}


static int rhashtable_rehash_one(struct rhashtable *ht, unsigned old_hash)
static int rhashtable_rehash_one(struct rhashtable *ht, unsigned old_hash)