Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 186142d8 authored by Jason A. Donenfeld's avatar Jason A. Donenfeld Committed by Lee Jones
Browse files

UPSTREAM: wireguard: queueing: use saner cpu selection wrapping



Using `% nr_cpumask_bits` is slow and complicated, and not totally
robust toward dynamic changes to CPU topologies. Rather than storing the
next CPU in the round-robin, just store the last one, and also return
that value. This simplifies the loop drastically into a much more common
pattern.

Bug: 254441685
Fixes: e7096c131e51 ("net: WireGuard secure network tunnel")
Cc: stable@vger.kernel.org
Reported-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
Tested-by: default avatarManuel Leiner <manuel.leiner@gmx.de>
Signed-off-by: default avatarJason A. Donenfeld <Jason@zx2c4.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
(cherry picked from commit 7387943fa35516f6f8017a3b0e9ce48a3bef9faa)
Signed-off-by: default avatarLee Jones <joneslee@google.com>
Change-Id: If8ee62e0fd2aa01333b63c905734059ac2bf3c6f
parent 2ed479d6
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -28,6 +28,7 @@ int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
	int ret;

	memset(queue, 0, sizeof(*queue));
	queue->last_cpu = -1;
	ret = ptr_ring_init(&queue->ring, len, GFP_KERNEL);
	if (ret)
		return ret;
+11 −14
Original line number Diff line number Diff line
@@ -119,20 +119,17 @@ static inline int wg_cpumask_choose_online(int *stored_cpu, unsigned int id)
	return cpu;
}

/* This function is racy, in the sense that next is unlocked, so it could return
 * the same CPU twice. A race-free version of this would be to instead store an
 * atomic sequence number, do an increment-and-return, and then iterate through
 * every possible CPU until we get to that index -- choose_cpu. However that's
 * a bit slower, and it doesn't seem like this potential race actually
 * introduces any performance loss, so we live with it.
/* This function is racy, in the sense that it's called while last_cpu is
 * unlocked, so it could return the same CPU twice. Adding locking or using
 * atomic sequence numbers is slower though, and the consequences of racing are
 * harmless, so live with it.
 */
static inline int wg_cpumask_next_online(int *next)
static inline int wg_cpumask_next_online(int *last_cpu)
{
	int cpu = *next;

	while (unlikely(!cpumask_test_cpu(cpu, cpu_online_mask)))
		cpu = cpumask_next(cpu, cpu_online_mask) % nr_cpumask_bits;
	*next = cpumask_next(cpu, cpu_online_mask) % nr_cpumask_bits;
	int cpu = cpumask_next(*last_cpu, cpu_online_mask);
	if (cpu >= nr_cpu_ids)
		cpu = cpumask_first(cpu_online_mask);
	*last_cpu = cpu;
	return cpu;
}

@@ -161,7 +158,7 @@ static inline void wg_prev_queue_drop_peeked(struct prev_queue *queue)

static inline int wg_queue_enqueue_per_device_and_peer(
	struct crypt_queue *device_queue, struct prev_queue *peer_queue,
	struct sk_buff *skb, struct workqueue_struct *wq, int *next_cpu)
	struct sk_buff *skb, struct workqueue_struct *wq)
{
	int cpu;

@@ -175,7 +172,7 @@ static inline int wg_queue_enqueue_per_device_and_peer(
	/* Then we queue it up in the device queue, which consumes the
	 * packet as soon as it can.
	 */
	cpu = wg_cpumask_next_online(next_cpu);
	cpu = wg_cpumask_next_online(&device_queue->last_cpu);
	if (unlikely(ptr_ring_produce_bh(&device_queue->ring, skb)))
		return -EPIPE;
	queue_work_on(cpu, wq, &per_cpu_ptr(device_queue->worker, cpu)->work);
+1 −1
Original line number Diff line number Diff line
@@ -531,7 +531,7 @@ static void wg_packet_consume_data(struct wg_device *wg, struct sk_buff *skb)
		goto err;

	ret = wg_queue_enqueue_per_device_and_peer(&wg->decrypt_queue, &peer->rx_queue, skb,
						   wg->packet_crypt_wq, &wg->decrypt_queue.last_cpu);
						   wg->packet_crypt_wq);
	if (unlikely(ret == -EPIPE))
		wg_queue_enqueue_per_peer_rx(skb, PACKET_STATE_DEAD);
	if (likely(!ret || ret == -EPIPE)) {
+1 −1
Original line number Diff line number Diff line
@@ -318,7 +318,7 @@ static void wg_packet_create_data(struct wg_peer *peer, struct sk_buff *first)
		goto err;

	ret = wg_queue_enqueue_per_device_and_peer(&wg->encrypt_queue, &peer->tx_queue, first,
						   wg->packet_crypt_wq, &wg->encrypt_queue.last_cpu);
						   wg->packet_crypt_wq);
	if (unlikely(ret == -EPIPE))
		wg_queue_enqueue_per_peer_tx(first, PACKET_STATE_DEAD);
err: