Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit baac50bb authored by Johannes Weiner's avatar Johannes Weiner Committed by Linus Torvalds
Browse files

net: tcp_memcontrol: simplify linkage between socket and page counter



There won't be any separate counters for socket memory consumed by
protocols other than TCP in the future.  Remove the indirection and link
sockets directly to their owning memory cgroup.

Signed-off-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Reviewed-by: default avatarVladimir Davydov <vdavydov@virtuozzo.com>
Acked-by: default avatarDavid S. Miller <davem@davemloft.net>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent e805605c
Loading
Loading
Loading
Loading
+5 −15
Original line number Diff line number Diff line
@@ -89,16 +89,6 @@ struct cg_proto {
	struct page_counter	memory_allocated;	/* Current allocated memory. */
	int			memory_pressure;
	bool			active;
	/*
	 * memcg field is used to find which memcg we belong directly
	 * Each memcg struct can hold more than one cg_proto, so container_of
	 * won't really cut.
	 *
	 * The elegant solution would be having an inverse function to
	 * proto_cgroup in struct proto, but that means polluting the structure
	 * for everybody, instead of just for memcg users.
	 */
	struct mem_cgroup	*memcg;
};

#ifdef CONFIG_MEMCG
@@ -688,15 +678,15 @@ static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
struct sock;
void sock_update_memcg(struct sock *sk);
void sock_release_memcg(struct sock *sk);
bool mem_cgroup_charge_skmem(struct cg_proto *proto, unsigned int nr_pages);
void mem_cgroup_uncharge_skmem(struct cg_proto *proto, unsigned int nr_pages);
bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET)
static inline bool mem_cgroup_under_socket_pressure(struct cg_proto *proto)
static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
{
	return proto->memory_pressure;
	return memcg->tcp_mem.memory_pressure;
}
#else
static inline bool mem_cgroup_under_pressure(struct cg_proto *proto)
static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
{
	return false;
}
+4 −21
Original line number Diff line number Diff line
@@ -71,22 +71,6 @@
#include <net/tcp_states.h>
#include <linux/net_tstamp.h>

struct cgroup;
struct cgroup_subsys;
#ifdef CONFIG_NET
int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss);
void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg);
#else
static inline
int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
{
	return 0;
}
static inline
void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg)
{
}
#endif
/*
 * This structure really needs to be cleaned up.
 * Most of it is for TCP, and not used by any of
@@ -245,7 +229,6 @@ struct sock_common {
	/* public: */
};

struct cg_proto;
/**
  *	struct sock - network layer representation of sockets
  *	@__sk_common: shared layout with inet_timewait_sock
@@ -310,7 +293,7 @@ struct cg_proto;
  *	@sk_security: used by security modules
  *	@sk_mark: generic packet mark
  *	@sk_cgrp_data: cgroup data for this cgroup
  *	@sk_cgrp: this socket's cgroup-specific proto data
  *	@sk_memcg: this socket's memory cgroup association
  *	@sk_write_pending: a write to stream socket waits to start
  *	@sk_state_change: callback to indicate change in the state of the sock
  *	@sk_data_ready: callback to indicate there is data to be processed
@@ -446,7 +429,7 @@ struct sock {
	void			*sk_security;
#endif
	struct sock_cgroup_data	sk_cgrp_data;
	struct cg_proto		*sk_cgrp;
	struct mem_cgroup	*sk_memcg;
	void			(*sk_state_change)(struct sock *sk);
	void			(*sk_data_ready)(struct sock *sk);
	void			(*sk_write_space)(struct sock *sk);
@@ -1129,8 +1112,8 @@ static inline bool sk_under_memory_pressure(const struct sock *sk)
	if (!sk->sk_prot->memory_pressure)
		return false;

	if (mem_cgroup_sockets_enabled && sk->sk_cgrp &&
	    mem_cgroup_under_socket_pressure(sk->sk_cgrp))
	if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
	    mem_cgroup_under_socket_pressure(sk->sk_memcg))
		return true;

	return !!*sk->sk_prot->memory_pressure;
+2 −2
Original line number Diff line number Diff line
@@ -289,8 +289,8 @@ extern int tcp_memory_pressure;
/* optimized version of sk_under_memory_pressure() for TCP sockets */
static inline bool tcp_under_memory_pressure(const struct sock *sk)
{
	if (mem_cgroup_sockets_enabled && sk->sk_cgrp &&
	    mem_cgroup_under_socket_pressure(sk->sk_cgrp))
	if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
	    mem_cgroup_under_socket_pressure(sk->sk_memcg))
		return true;

	return tcp_memory_pressure;
+0 −1
Original line number Diff line number Diff line
#ifndef _TCP_MEMCG_H
#define _TCP_MEMCG_H

struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg);
int tcp_init_cgroup(struct mem_cgroup *memcg, struct cgroup_subsys *ss);
void tcp_destroy_cgroup(struct mem_cgroup *memcg);
#endif /* _TCP_MEMCG_H */
+22 −35
Original line number Diff line number Diff line
@@ -294,9 +294,6 @@ static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
void sock_update_memcg(struct sock *sk)
{
	struct mem_cgroup *memcg;
	struct cg_proto *cg_proto;

	BUG_ON(!sk->sk_prot->proto_cgroup);

	/* Socket cloning can throw us here with sk_cgrp already
	 * filled. It won't however, necessarily happen from
@@ -306,68 +303,58 @@ void sock_update_memcg(struct sock *sk)
	 * Respecting the original socket's memcg is a better
	 * decision in this case.
	 */
	if (sk->sk_cgrp) {
		BUG_ON(mem_cgroup_is_root(sk->sk_cgrp->memcg));
		css_get(&sk->sk_cgrp->memcg->css);
	if (sk->sk_memcg) {
		BUG_ON(mem_cgroup_is_root(sk->sk_memcg));
		css_get(&sk->sk_memcg->css);
		return;
	}

	rcu_read_lock();
	memcg = mem_cgroup_from_task(current);
	cg_proto = sk->sk_prot->proto_cgroup(memcg);
	if (cg_proto && cg_proto->active &&
	    css_tryget_online(&memcg->css)) {
		sk->sk_cgrp = cg_proto;
	}
	if (memcg != root_mem_cgroup &&
	    memcg->tcp_mem.active &&
	    css_tryget_online(&memcg->css))
		sk->sk_memcg = memcg;
	rcu_read_unlock();
}
EXPORT_SYMBOL(sock_update_memcg);

void sock_release_memcg(struct sock *sk)
{
	WARN_ON(!sk->sk_cgrp->memcg);
	css_put(&sk->sk_cgrp->memcg->css);
}

struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg)
{
	if (!memcg || mem_cgroup_is_root(memcg))
		return NULL;

	return &memcg->tcp_mem;
	WARN_ON(!sk->sk_memcg);
	css_put(&sk->sk_memcg->css);
}
EXPORT_SYMBOL(tcp_proto_cgroup);

/**
 * mem_cgroup_charge_skmem - charge socket memory
 * @proto: proto to charge
 * @memcg: memcg to charge
 * @nr_pages: number of pages to charge
 *
 * Charges @nr_pages to @proto. Returns %true if the charge fit within
 * @proto's configured limit, %false if the charge had to be forced.
 * Charges @nr_pages to @memcg. Returns %true if the charge fit within
 * @memcg's configured limit, %false if the charge had to be forced.
 */
bool mem_cgroup_charge_skmem(struct cg_proto *proto, unsigned int nr_pages)
bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
{
	struct page_counter *counter;

	if (page_counter_try_charge(&proto->memory_allocated,
	if (page_counter_try_charge(&memcg->tcp_mem.memory_allocated,
				    nr_pages, &counter)) {
		proto->memory_pressure = 0;
		memcg->tcp_mem.memory_pressure = 0;
		return true;
	}
	page_counter_charge(&proto->memory_allocated, nr_pages);
	proto->memory_pressure = 1;
	page_counter_charge(&memcg->tcp_mem.memory_allocated, nr_pages);
	memcg->tcp_mem.memory_pressure = 1;
	return false;
}

/**
 * mem_cgroup_uncharge_skmem - uncharge socket memory
 * @proto - proto to uncharge
 * @memcg - memcg to uncharge
 * @nr_pages - number of pages to uncharge
 */
void mem_cgroup_uncharge_skmem(struct cg_proto *proto, unsigned int nr_pages)
void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
{
	page_counter_uncharge(&proto->memory_allocated, nr_pages);
	page_counter_uncharge(&memcg->tcp_mem.memory_allocated, nr_pages);
}

#endif
@@ -3653,7 +3640,7 @@ static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
	if (ret)
		return ret;

	return mem_cgroup_sockets_init(memcg, ss);
	return tcp_init_cgroup(memcg, ss);
}

static void memcg_deactivate_kmem(struct mem_cgroup *memcg)
@@ -3709,7 +3696,7 @@ static void memcg_destroy_kmem(struct mem_cgroup *memcg)
		static_key_slow_dec(&memcg_kmem_enabled_key);
		WARN_ON(page_counter_read(&memcg->kmem));
	}
	mem_cgroup_sockets_destroy(memcg);
	tcp_destroy_cgroup(memcg);
}
#else
static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
Loading