Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3b3a5b0a authored by Willem de Bruijn's avatar Willem de Bruijn Committed by David S. Miller
Browse files

packet: rollover huge flows before small flows



Migrate flows from a socket to another socket in the fanout group not
only when the socket is full. Start migrating huge flows early, to
divert possible 4-tuple attacks without affecting normal traffic.

Introduce fanout_flow_is_huge(). This detects huge flows, which are
defined as taking up more than half the load. It does so cheaply, by
storing the rxhashes of the N most recent packets. If over half of
these are the same rxhash as the current packet, then drop it. This
only protects against 4-tuple attacks. N is chosen to fit all data in
a single cache line.

Tested:
  Ran bench_rollover for 10 sec with 1.5 Mpps of single flow input.

    lpbb5:/export/hda3/willemb# ./bench_rollover -l 1000 -r -s
    cpu         rx       rx.k     drop.k   rollover     r.huge   r.failed
      0         14         14          0          0          0          0
      1         20         20          0          0          0          0
      2         16         16          0          0          0          0
      3    6168824    6168824          0    4867721    4867721          0
      4    4867741    4867741          0          0          0          0
      5         12         12          0          0          0          0
      6         15         15          0          0          0          0
      7         17         17          0          0          0          0

Signed-off-by: default avatarWillem de Bruijn <willemb@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 2ccdbaa6
Loading
Loading
Loading
Loading
+22 −3
Original line number Diff line number Diff line
@@ -1341,6 +1341,20 @@ static int fanout_rr_next(struct packet_fanout *f, unsigned int num)
	return x;
}

static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb)
{
	u32 rxhash;
	int i, count = 0;

	rxhash = skb_get_hash(skb);
	for (i = 0; i < ROLLOVER_HLEN; i++)
		if (po->rollover->history[i] == rxhash)
			count++;

	po->rollover->history[prandom_u32() % ROLLOVER_HLEN] = rxhash;
	return count > (ROLLOVER_HLEN >> 1);
}

static unsigned int fanout_demux_hash(struct packet_fanout *f,
				      struct sk_buff *skb,
				      unsigned int num)
@@ -1381,11 +1395,16 @@ static unsigned int fanout_demux_rollover(struct packet_fanout *f,
					  unsigned int num)
{
	struct packet_sock *po, *po_next;
	unsigned int i, j;
	unsigned int i, j, room;

	po = pkt_sk(f->arr[idx]);
	if (try_self && packet_rcv_has_room(po, skb) != ROOM_NONE)

	if (try_self) {
		room = packet_rcv_has_room(po, skb);
		if (room == ROOM_NORMAL ||
		    (room == ROOM_LOW && !fanout_flow_is_huge(po, skb)))
			return idx;
	}

	i = j = min_t(int, po->rollover->sock, num - 1);
	do {
+2 −0
Original line number Diff line number Diff line
@@ -89,6 +89,8 @@ struct packet_fanout {

struct packet_rollover {
	int			sock;
#define ROLLOVER_HLEN	(L1_CACHE_BYTES / sizeof(u32))
	u32			history[ROLLOVER_HLEN] ____cacheline_aligned;
} ____cacheline_aligned_in_smp;

struct packet_sock {