Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 34498825 authored by Patrick McHardy's avatar Patrick McHardy Committed by David S. Miller
Browse files

[NETFILTER]: non-power-of-two jhash optimizations



Apply Eric Dumazet's jhash optimizations where applicable. Quoting Eric:

Thanks to jhash, hash value uses full 32 bits. Instead of returning
hash % size (implying a divide) we return the high 32 bits of the
(hash * size) that will give results between [0 and size-1] and same
hash distribution.

On most cpus, a multiply is less expensive than a divide, by an order
of magnitude.

Signed-off-by: default avatarPatrick McHardy <kaber@trash.net>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 7b21e09d
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -273,7 +273,7 @@ clusterip_hashfn(const struct sk_buff *skb,
	}

	/* node numbers are 1..n, not 0..n */
	return (hashval % config->num_total_nodes) + 1;
	return (((u64)hashval * config->num_total_nodes) >> 32) + 1;
}

static inline int
+7 −3
Original line number Diff line number Diff line
@@ -77,10 +77,13 @@ EXPORT_SYMBOL_GPL(nf_nat_proto_put);
static inline unsigned int
hash_by_src(const struct nf_conntrack_tuple *tuple)
{
	unsigned int hash;

	/* Original src, to ensure we map it consistently if poss. */
	return jhash_3words((__force u32)tuple->src.u3.ip,
	hash = jhash_3words((__force u32)tuple->src.u3.ip,
			    (__force u32)tuple->src.u.all,
			    tuple->dst.protonum, 0) % nf_nat_htable_size;
			    tuple->dst.protonum, 0);
	return ((u64)hash * nf_nat_htable_size) >> 32;
}

/* Is this tuple already taken? (not by us) */
@@ -211,7 +214,8 @@ find_best_ips_proto(struct nf_conntrack_tuple *tuple,
	maxip = ntohl(range->max_ip);
	j = jhash_2words((__force u32)tuple->src.u3.ip,
			 (__force u32)tuple->dst.u3.ip, 0);
	*var_ipp = htonl(minip + j % (maxip - minip + 1));
	j = ((u64)j * (maxip - minip + 1)) >> 32;
	*var_ipp = htonl(minip + j);
}

/* Manipulate the tuple into the range given.  For NF_INET_POST_ROUTING,
+1 −1
Original line number Diff line number Diff line
@@ -81,7 +81,7 @@ static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
		   ((__force __u16)tuple->src.u.all << 16) |
		    (__force __u16)tuple->dst.u.all);

	return jhash_2words(a, b, rnd) % size;
	return ((u64)jhash_2words(a, b, rnd) * size) >> 32;
}

static inline u_int32_t hash_conntrack(const struct nf_conntrack_tuple *tuple)
+5 −3
Original line number Diff line number Diff line
@@ -73,15 +73,17 @@ static void nf_ct_expectation_timed_out(unsigned long ul_expect)

static unsigned int nf_ct_expect_dst_hash(const struct nf_conntrack_tuple *tuple)
{
	unsigned int hash;

	if (unlikely(!nf_ct_expect_hash_rnd_initted)) {
		get_random_bytes(&nf_ct_expect_hash_rnd, 4);
		nf_ct_expect_hash_rnd_initted = 1;
	}

	return jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all),
	hash = jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all),
		      (((tuple->dst.protonum ^ tuple->src.l3num) << 16) |
		       (__force __u16)tuple->dst.u.all) ^ nf_ct_expect_hash_rnd) %
	       nf_ct_expect_hsize;
		       (__force __u16)tuple->dst.u.all) ^ nf_ct_expect_hash_rnd);
	return ((u64)hash * nf_ct_expect_hsize) >> 32;
}

struct nf_conntrack_expect *