Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d199fab6 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller
Browse files

packet: fix races in fanout_add()



Multiple threads can call fanout_add() at the same time.

We need to grab fanout_mutex earlier to avoid races that could
lead to one thread freeing po->rollover that was set by another thread.

Do the same in fanout_release(), for peace of mind, and to help us
finding lockdep issues earlier.

Fixes: dc99f600 ("packet: Add fanout support.")
Fixes: 0648ab70 ("packet: rollover prepare: per-socket state")
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Cc: Willem de Bruijn <willemb@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent f39f0d1e
Loading
Loading
Loading
Loading
+30 −25
Original line number Diff line number Diff line
@@ -1619,6 +1619,7 @@ static void fanout_release_data(struct packet_fanout *f)

static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
{
	struct packet_rollover *rollover = NULL;
	struct packet_sock *po = pkt_sk(sk);
	struct packet_fanout *f, *match;
	u8 type = type_flags & 0xff;
@@ -1641,23 +1642,28 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
		return -EINVAL;
	}

	mutex_lock(&fanout_mutex);

	err = -EINVAL;
	if (!po->running)
		return -EINVAL;
		goto out;

	err = -EALREADY;
	if (po->fanout)
		return -EALREADY;
		goto out;

	if (type == PACKET_FANOUT_ROLLOVER ||
	    (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) {
		po->rollover = kzalloc(sizeof(*po->rollover), GFP_KERNEL);
		if (!po->rollover)
			return -ENOMEM;
		atomic_long_set(&po->rollover->num, 0);
		atomic_long_set(&po->rollover->num_huge, 0);
		atomic_long_set(&po->rollover->num_failed, 0);
		err = -ENOMEM;
		rollover = kzalloc(sizeof(*rollover), GFP_KERNEL);
		if (!rollover)
			goto out;
		atomic_long_set(&rollover->num, 0);
		atomic_long_set(&rollover->num_huge, 0);
		atomic_long_set(&rollover->num_failed, 0);
		po->rollover = rollover;
	}

	mutex_lock(&fanout_mutex);
	match = NULL;
	list_for_each_entry(f, &fanout_list, list) {
		if (f->id == id &&
@@ -1704,11 +1710,11 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
		}
	}
out:
	mutex_unlock(&fanout_mutex);
	if (err) {
		kfree(po->rollover);
	if (err && rollover) {
		kfree(rollover);
		po->rollover = NULL;
	}
	mutex_unlock(&fanout_mutex);
	return err;
}

@@ -1717,11 +1723,9 @@ static void fanout_release(struct sock *sk)
	struct packet_sock *po = pkt_sk(sk);
	struct packet_fanout *f;

	f = po->fanout;
	if (!f)
		return;

	mutex_lock(&fanout_mutex);
	f = po->fanout;
	if (f) {
		po->fanout = NULL;

		if (atomic_dec_and_test(&f->sk_ref)) {
@@ -1730,11 +1734,12 @@ static void fanout_release(struct sock *sk)
			fanout_release_data(f);
			kfree(f);
		}
	mutex_unlock(&fanout_mutex);

		if (po->rollover)
			kfree_rcu(po->rollover, rcu);
	}
	mutex_unlock(&fanout_mutex);
}

static bool packet_extra_vlan_len_allowed(const struct net_device *dev,
					  struct sk_buff *skb)