Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 94886c86 authored by Cong Wang's avatar Cong Wang Committed by Greg Kroah-Hartman
Browse files

cgroup: fix cgroup_sk_alloc() for sk_clone_lock()



[ Upstream commit ad0f75e5f57ccbceec13274e1e242f2b5a6397ed ]

When we clone a socket in sk_clone_lock(), its sk_cgrp_data is
copied, so the cgroup refcnt must be taken too. And, unlike the
sk_alloc() path, sock_update_netprioidx() is not called here.
Therefore, it is safe and necessary to grab the cgroup refcnt
even when cgroup_sk_alloc is disabled.

sk_clone_lock() is in BH context anyway, the in_interrupt()
would terminate this function if called there. And for sk_alloc()
skcd->val is always zero. So it's safe to factor out the code
to make it more readable.

The global variable 'cgroup_sk_alloc_disabled' is used to determine
whether to take these reference counts. It is impossible to make
the reference counting correct unless we save this bit of information
in skcd->val. So, add a new bit there to record whether the socket
has already taken the reference counts. This obviously relies on
kmalloc() to align cgroup pointers to at least 4 bytes,
ARCH_KMALLOC_MINALIGN is certainly larger than that.

This bug seems to be introduced since the beginning, commit
d979a39d ("cgroup: duplicate cgroup reference when cloning sockets")
tried to fix it but not compeletely. It seems not easy to trigger until
the recent commit 090e28b229af
("netprio_cgroup: Fix unlimited memory leak of v2 cgroups") was merged.

Fixes: bd1060a1 ("sock, cgroup: add sock->sk_cgroup")
Reported-by: default avatarCameron Berkenpas <cam@neo-zeon.de>
Reported-by: default avatarPeter Geis <pgwipeout@gmail.com>
Reported-by: default avatarLu Fengqi <lufq.fnst@cn.fujitsu.com>
Reported-by: default avatarDaniël Sonck <dsonck92@gmail.com>
Reported-by: default avatarZhang Qiang <qiang.zhang@windriver.com>
Tested-by: default avatarCameron Berkenpas <cam@neo-zeon.de>
Tested-by: default avatarPeter Geis <pgwipeout@gmail.com>
Tested-by: default avatarThomas Lamprecht <t.lamprecht@proxmox.com>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: Zefan Li <lizefan@huawei.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: Roman Gushchin <guro@fb.com>
Signed-off-by: default avatarCong Wang <xiyou.wangcong@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 17164472
Loading
Loading
Loading
Loading
+4 −2
Original line number Diff line number Diff line
@@ -797,7 +797,8 @@ struct sock_cgroup_data {
	union {
#ifdef __LITTLE_ENDIAN
		struct {
			u8	is_data;
			u8	is_data : 1;
			u8	no_refcnt : 1;
			u8	padding;
			u16	prioidx;
			u32	classid;
@@ -807,7 +808,8 @@ struct sock_cgroup_data {
			u32	classid;
			u16	prioidx;
			u8	padding;
			u8	is_data;
			u8	no_refcnt : 1;
			u8	is_data : 1;
		} __packed;
#endif
		u64		val;
+3 −1
Original line number Diff line number Diff line
@@ -822,6 +822,7 @@ extern spinlock_t cgroup_sk_update_lock;

void cgroup_sk_alloc_disable(void);
void cgroup_sk_alloc(struct sock_cgroup_data *skcd);
void cgroup_sk_clone(struct sock_cgroup_data *skcd);
void cgroup_sk_free(struct sock_cgroup_data *skcd);

static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd)
@@ -835,7 +836,7 @@ static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd)
	 */
	v = READ_ONCE(skcd->val);

	if (v & 1)
	if (v & 3)
		return &cgrp_dfl_root.cgrp;

	return (struct cgroup *)(unsigned long)v ?: &cgrp_dfl_root.cgrp;
@@ -847,6 +848,7 @@ static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd)
#else	/* CONFIG_CGROUP_DATA */

static inline void cgroup_sk_alloc(struct sock_cgroup_data *skcd) {}
static inline void cgroup_sk_clone(struct sock_cgroup_data *skcd) {}
static inline void cgroup_sk_free(struct sock_cgroup_data *skcd) {}

#endif	/* CONFIG_CGROUP_DATA */
+19 −12
Original line number Diff line number Diff line
@@ -6379,18 +6379,8 @@ void cgroup_sk_alloc_disable(void)

void cgroup_sk_alloc(struct sock_cgroup_data *skcd)
{
	if (cgroup_sk_alloc_disabled)
		return;

	/* Socket clone path */
	if (skcd->val) {
		/*
		 * We might be cloning a socket which is left in an empty
		 * cgroup and the cgroup might have already been rmdir'd.
		 * Don't use cgroup_get_live().
		 */
		cgroup_get(sock_cgroup_ptr(skcd));
		cgroup_bpf_get(sock_cgroup_ptr(skcd));
	if (cgroup_sk_alloc_disabled) {
		skcd->no_refcnt = 1;
		return;
	}

@@ -6415,10 +6405,27 @@ void cgroup_sk_alloc(struct sock_cgroup_data *skcd)
	rcu_read_unlock();
}

void cgroup_sk_clone(struct sock_cgroup_data *skcd)
{
	if (skcd->val) {
		if (skcd->no_refcnt)
			return;
		/*
		 * We might be cloning a socket which is left in an empty
		 * cgroup and the cgroup might have already been rmdir'd.
		 * Don't use cgroup_get_live().
		 */
		cgroup_get(sock_cgroup_ptr(skcd));
		cgroup_bpf_get(sock_cgroup_ptr(skcd));
	}
}

void cgroup_sk_free(struct sock_cgroup_data *skcd)
{
	struct cgroup *cgrp = sock_cgroup_ptr(skcd);

	if (skcd->no_refcnt)
		return;
	cgroup_bpf_put(cgrp);
	cgroup_put(cgrp);
}
+1 −1
Original line number Diff line number Diff line
@@ -1837,7 +1837,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
		/* sk->sk_memcg will be populated at accept() time */
		newsk->sk_memcg = NULL;

		cgroup_sk_alloc(&newsk->sk_cgrp_data);
		cgroup_sk_clone(&newsk->sk_cgrp_data);

		rcu_read_lock();
		filter = rcu_dereference(sk->sk_filter);