Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7f7e23df authored by Eric Dumazet's avatar Eric Dumazet Committed by Sasha Levin
Browse files

inet: use bigger hash table for IP ID generation



commit aa6dd211e4b1dde9d5dc25d699d35f789ae7eeba upstream.

In commit 73f156a6 ("inetpeer: get rid of ip_id_count")
I used a very small hash table that could be abused
by patient attackers to reveal sensitive information.

Switch to a dynamic sizing, depending on RAM size.

Typical big hosts will now use 128x more storage (2 MB)
to get a similar increase in security and reduction
of hash collisions.

As a bonus, use of alloc_large_system_hash() spreads
allocated memory among all NUMA nodes.

Fixes: 73f156a6 ("inetpeer: get rid of ip_id_count")
Reported-by: default avatarAmit Klein <aksecurity@gmail.com>
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Cc: Willy Tarreau <w@1wt.eu>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent d42c3ebb
Loading
Loading
Loading
Loading
+28 −14
Original line number Diff line number Diff line
@@ -70,6 +70,7 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/bootmem.h>
#include <linux/string.h>
#include <linux/socket.h>
#include <linux/sockios.h>
@@ -470,8 +471,10 @@ static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr)
	__ipv4_confirm_neigh(dev, *(__force u32 *)pkey);
}

#define IP_IDENTS_SZ 2048u

/* Hash tables of size 2048..262144 depending on RAM size.
 * Each bucket uses 8 bytes.
 */
static u32 ip_idents_mask __read_mostly;
static atomic_t *ip_idents __read_mostly;
static u32 *ip_tstamps __read_mostly;

@@ -481,12 +484,16 @@ static u32 *ip_tstamps __read_mostly;
 */
u32 ip_idents_reserve(u32 hash, int segs)
{
	u32 *p_tstamp = ip_tstamps + hash % IP_IDENTS_SZ;
	atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ;
	u32 old = READ_ONCE(*p_tstamp);
	u32 now = (u32)jiffies;
	u32 bucket, old, now = (u32)jiffies;
	atomic_t *p_id;
	u32 *p_tstamp;
	u32 delta = 0;

	bucket = hash & ip_idents_mask;
	p_tstamp = ip_tstamps + bucket;
	p_id = ip_idents + bucket;
	old = READ_ONCE(*p_tstamp);

	if (old != now && cmpxchg(p_tstamp, old, now) == old)
		delta = prandom_u32_max(now - old);

@@ -3197,18 +3204,25 @@ struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;

int __init ip_rt_init(void)
{
	void *idents_hash;
	int cpu;

	ip_idents = kmalloc_array(IP_IDENTS_SZ, sizeof(*ip_idents),
				  GFP_KERNEL);
	if (!ip_idents)
		panic("IP: failed to allocate ip_idents\n");
	/* For modern hosts, this will use 2 MB of memory */
	idents_hash = alloc_large_system_hash("IP idents",
					      sizeof(*ip_idents) + sizeof(*ip_tstamps),
					      0,
					      16, /* one bucket per 64 KB */
					      HASH_ZERO,
					      NULL,
					      &ip_idents_mask,
					      2048,
					      256*1024);

	ip_idents = idents_hash;

	prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
	prandom_bytes(ip_idents, (ip_idents_mask + 1) * sizeof(*ip_idents));

	ip_tstamps = kcalloc(IP_IDENTS_SZ, sizeof(*ip_tstamps), GFP_KERNEL);
	if (!ip_tstamps)
		panic("IP: failed to allocate ip_tstamps\n");
	ip_tstamps = idents_hash + (ip_idents_mask + 1) * sizeof(*ip_idents);

	for_each_possible_cpu(cpu) {
		struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);