2 #include <net/tcp_memcontrol.h>
5 #include <linux/nsproxy.h>
6 #include <linux/memcontrol.h>
7 #include <linux/module.h>
9 static inline struct tcp_memcontrol *tcp_from_cgproto(struct cg_proto *cg_proto)
11 return container_of(cg_proto, struct tcp_memcontrol, cg_proto);
14 static void memcg_tcp_enter_memory_pressure(struct sock *sk)
16 if (sk->sk_cgrp->memory_pressure)
17 *sk->sk_cgrp->memory_pressure = 1;
19 EXPORT_SYMBOL(memcg_tcp_enter_memory_pressure);
21 int tcp_init_cgroup(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
24 * The root cgroup does not use res_counters, but rather,
25 * rely on the data already collected by the network
28 struct res_counter *res_parent = NULL;
29 struct cg_proto *cg_proto, *parent_cg;
30 struct tcp_memcontrol *tcp;
31 struct mem_cgroup *parent = parent_mem_cgroup(memcg);
33 cg_proto = tcp_prot.proto_cgroup(memcg);
37 tcp = tcp_from_cgproto(cg_proto);
39 tcp->tcp_prot_mem[0] = sysctl_tcp_mem[0];
40 tcp->tcp_prot_mem[1] = sysctl_tcp_mem[1];
41 tcp->tcp_prot_mem[2] = sysctl_tcp_mem[2];
42 tcp->tcp_memory_pressure = 0;
44 parent_cg = tcp_prot.proto_cgroup(parent);
46 res_parent = parent_cg->memory_allocated;
48 res_counter_init(&tcp->tcp_memory_allocated, res_parent);
49 percpu_counter_init(&tcp->tcp_sockets_allocated, 0);
51 cg_proto->enter_memory_pressure = memcg_tcp_enter_memory_pressure;
52 cg_proto->memory_pressure = &tcp->tcp_memory_pressure;
53 cg_proto->sysctl_mem = tcp->tcp_prot_mem;
54 cg_proto->memory_allocated = &tcp->tcp_memory_allocated;
55 cg_proto->sockets_allocated = &tcp->tcp_sockets_allocated;
56 cg_proto->memcg = memcg;
60 EXPORT_SYMBOL(tcp_init_cgroup);
62 void tcp_destroy_cgroup(struct mem_cgroup *memcg)
64 struct cg_proto *cg_proto;
65 struct tcp_memcontrol *tcp;
67 cg_proto = tcp_prot.proto_cgroup(memcg);
71 tcp = tcp_from_cgproto(cg_proto);
72 percpu_counter_destroy(&tcp->tcp_sockets_allocated);
74 EXPORT_SYMBOL(tcp_destroy_cgroup);
76 static int tcp_update_limit(struct mem_cgroup *memcg, u64 val)
78 struct tcp_memcontrol *tcp;
79 struct cg_proto *cg_proto;
84 cg_proto = tcp_prot.proto_cgroup(memcg);
88 if (val > RES_COUNTER_MAX)
89 val = RES_COUNTER_MAX;
91 tcp = tcp_from_cgproto(cg_proto);
93 old_lim = res_counter_read_u64(&tcp->tcp_memory_allocated, RES_LIMIT);
94 ret = res_counter_set_limit(&tcp->tcp_memory_allocated, val);
98 for (i = 0; i < 3; i++)
99 tcp->tcp_prot_mem[i] = min_t(long, val >> PAGE_SHIFT,
102 if (val == RES_COUNTER_MAX)
103 clear_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags);
104 else if (val != RES_COUNTER_MAX) {
106 * The active bit needs to be written after the static_key
107 * update. This is what guarantees that the socket activation
108 * function is the last one to run. See sock_update_memcg() for
109 * details, and note that we don't mark any socket as belonging
110 * to this memcg until that flag is up.
112 * We need to do this, because static_keys will span multiple
113 * sites, but we can't control their order. If we mark a socket
114 * as accounted, but the accounting functions are not patched in
115 * yet, we'll lose accounting.
117 * We never race with the readers in sock_update_memcg(),
118 * because when this value change, the code to process it is not
121 * The activated bit is used to guarantee that no two writers
122 * will do the update in the same memcg. Without that, we can't
123 * properly shutdown the static key.
125 if (!test_and_set_bit(MEMCG_SOCK_ACTIVATED, &cg_proto->flags))
126 static_key_slow_inc(&memcg_socket_limit_enabled);
127 set_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags);
133 static int tcp_cgroup_write(struct cgroup_subsys_state *css, struct cftype *cft,
136 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
137 unsigned long long val;
140 switch (cft->private) {
142 /* see memcontrol.c */
143 ret = res_counter_memparse_write_strategy(buffer, &val);
146 ret = tcp_update_limit(memcg, val);
155 static u64 tcp_read_stat(struct mem_cgroup *memcg, int type, u64 default_val)
157 struct tcp_memcontrol *tcp;
158 struct cg_proto *cg_proto;
160 cg_proto = tcp_prot.proto_cgroup(memcg);
164 tcp = tcp_from_cgproto(cg_proto);
165 return res_counter_read_u64(&tcp->tcp_memory_allocated, type);
168 static u64 tcp_read_usage(struct mem_cgroup *memcg)
170 struct tcp_memcontrol *tcp;
171 struct cg_proto *cg_proto;
173 cg_proto = tcp_prot.proto_cgroup(memcg);
175 return atomic_long_read(&tcp_memory_allocated) << PAGE_SHIFT;
177 tcp = tcp_from_cgproto(cg_proto);
178 return res_counter_read_u64(&tcp->tcp_memory_allocated, RES_USAGE);
181 static u64 tcp_cgroup_read(struct cgroup_subsys_state *css, struct cftype *cft)
183 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
186 switch (cft->private) {
188 val = tcp_read_stat(memcg, RES_LIMIT, RES_COUNTER_MAX);
191 val = tcp_read_usage(memcg);
195 val = tcp_read_stat(memcg, cft->private, 0);
203 static int tcp_cgroup_reset(struct cgroup_subsys_state *css, unsigned int event)
205 struct mem_cgroup *memcg;
206 struct tcp_memcontrol *tcp;
207 struct cg_proto *cg_proto;
209 memcg = mem_cgroup_from_css(css);
210 cg_proto = tcp_prot.proto_cgroup(memcg);
213 tcp = tcp_from_cgproto(cg_proto);
217 res_counter_reset_max(&tcp->tcp_memory_allocated);
220 res_counter_reset_failcnt(&tcp->tcp_memory_allocated);
227 static struct cftype tcp_files[] = {
229 .name = "kmem.tcp.limit_in_bytes",
230 .write_string = tcp_cgroup_write,
231 .read_u64 = tcp_cgroup_read,
232 .private = RES_LIMIT,
235 .name = "kmem.tcp.usage_in_bytes",
236 .read_u64 = tcp_cgroup_read,
237 .private = RES_USAGE,
240 .name = "kmem.tcp.failcnt",
241 .private = RES_FAILCNT,
242 .trigger = tcp_cgroup_reset,
243 .read_u64 = tcp_cgroup_read,
246 .name = "kmem.tcp.max_usage_in_bytes",
247 .private = RES_MAX_USAGE,
248 .trigger = tcp_cgroup_reset,
249 .read_u64 = tcp_cgroup_read,
254 static int __init tcp_memcontrol_init(void)
256 WARN_ON(cgroup_add_cftypes(&mem_cgroup_subsys, tcp_files));
259 __initcall(tcp_memcontrol_init);