]> Pileus Git - ~andy/linux/blob - kernel/sched/cpuacct.c
sched/cpuacct: Add cpuacct_init()
[~andy/linux] / kernel / sched / cpuacct.c
1 #include <linux/cgroup.h>
2 #include <linux/slab.h>
3 #include <linux/percpu.h>
4 #include <linux/spinlock.h>
5 #include <linux/cpumask.h>
6 #include <linux/seq_file.h>
7 #include <linux/rcupdate.h>
8 #include <linux/kernel_stat.h>
9
10 #include "sched.h"
11
12 /*
13  * CPU accounting code for task groups.
14  *
15  * Based on the work by Paul Menage (menage@google.com) and Balbir Singh
16  * (balbir@in.ibm.com).
17  */
18
19 struct cpuacct root_cpuacct;
20
21 /* create a new cpu accounting group */
22 static struct cgroup_subsys_state *cpuacct_css_alloc(struct cgroup *cgrp)
23 {
24         struct cpuacct *ca;
25
26         if (!cgrp->parent)
27                 return &root_cpuacct.css;
28
29         ca = kzalloc(sizeof(*ca), GFP_KERNEL);
30         if (!ca)
31                 goto out;
32
33         ca->cpuusage = alloc_percpu(u64);
34         if (!ca->cpuusage)
35                 goto out_free_ca;
36
37         ca->cpustat = alloc_percpu(struct kernel_cpustat);
38         if (!ca->cpustat)
39                 goto out_free_cpuusage;
40
41         return &ca->css;
42
43 out_free_cpuusage:
44         free_percpu(ca->cpuusage);
45 out_free_ca:
46         kfree(ca);
47 out:
48         return ERR_PTR(-ENOMEM);
49 }
50
51 /* destroy an existing cpu accounting group */
52 static void cpuacct_css_free(struct cgroup *cgrp)
53 {
54         struct cpuacct *ca = cgroup_ca(cgrp);
55
56         free_percpu(ca->cpustat);
57         free_percpu(ca->cpuusage);
58         kfree(ca);
59 }
60
61 static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
62 {
63         u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
64         u64 data;
65
66 #ifndef CONFIG_64BIT
67         /*
68          * Take rq->lock to make 64-bit read safe on 32-bit platforms.
69          */
70         raw_spin_lock_irq(&cpu_rq(cpu)->lock);
71         data = *cpuusage;
72         raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
73 #else
74         data = *cpuusage;
75 #endif
76
77         return data;
78 }
79
80 static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
81 {
82         u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
83
84 #ifndef CONFIG_64BIT
85         /*
86          * Take rq->lock to make 64-bit write safe on 32-bit platforms.
87          */
88         raw_spin_lock_irq(&cpu_rq(cpu)->lock);
89         *cpuusage = val;
90         raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
91 #else
92         *cpuusage = val;
93 #endif
94 }
95
96 /* return total cpu usage (in nanoseconds) of a group */
97 static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft)
98 {
99         struct cpuacct *ca = cgroup_ca(cgrp);
100         u64 totalcpuusage = 0;
101         int i;
102
103         for_each_present_cpu(i)
104                 totalcpuusage += cpuacct_cpuusage_read(ca, i);
105
106         return totalcpuusage;
107 }
108
109 static int cpuusage_write(struct cgroup *cgrp, struct cftype *cftype,
110                                                                 u64 reset)
111 {
112         struct cpuacct *ca = cgroup_ca(cgrp);
113         int err = 0;
114         int i;
115
116         if (reset) {
117                 err = -EINVAL;
118                 goto out;
119         }
120
121         for_each_present_cpu(i)
122                 cpuacct_cpuusage_write(ca, i, 0);
123
124 out:
125         return err;
126 }
127
128 static int cpuacct_percpu_seq_read(struct cgroup *cgroup, struct cftype *cft,
129                                    struct seq_file *m)
130 {
131         struct cpuacct *ca = cgroup_ca(cgroup);
132         u64 percpu;
133         int i;
134
135         for_each_present_cpu(i) {
136                 percpu = cpuacct_cpuusage_read(ca, i);
137                 seq_printf(m, "%llu ", (unsigned long long) percpu);
138         }
139         seq_printf(m, "\n");
140         return 0;
141 }
142
143 static const char * const cpuacct_stat_desc[] = {
144         [CPUACCT_STAT_USER] = "user",
145         [CPUACCT_STAT_SYSTEM] = "system",
146 };
147
148 static int cpuacct_stats_show(struct cgroup *cgrp, struct cftype *cft,
149                               struct cgroup_map_cb *cb)
150 {
151         struct cpuacct *ca = cgroup_ca(cgrp);
152         int cpu;
153         s64 val = 0;
154
155         for_each_online_cpu(cpu) {
156                 struct kernel_cpustat *kcpustat = per_cpu_ptr(ca->cpustat, cpu);
157                 val += kcpustat->cpustat[CPUTIME_USER];
158                 val += kcpustat->cpustat[CPUTIME_NICE];
159         }
160         val = cputime64_to_clock_t(val);
161         cb->fill(cb, cpuacct_stat_desc[CPUACCT_STAT_USER], val);
162
163         val = 0;
164         for_each_online_cpu(cpu) {
165                 struct kernel_cpustat *kcpustat = per_cpu_ptr(ca->cpustat, cpu);
166                 val += kcpustat->cpustat[CPUTIME_SYSTEM];
167                 val += kcpustat->cpustat[CPUTIME_IRQ];
168                 val += kcpustat->cpustat[CPUTIME_SOFTIRQ];
169         }
170
171         val = cputime64_to_clock_t(val);
172         cb->fill(cb, cpuacct_stat_desc[CPUACCT_STAT_SYSTEM], val);
173
174         return 0;
175 }
176
177 static struct cftype files[] = {
178         {
179                 .name = "usage",
180                 .read_u64 = cpuusage_read,
181                 .write_u64 = cpuusage_write,
182         },
183         {
184                 .name = "usage_percpu",
185                 .read_seq_string = cpuacct_percpu_seq_read,
186         },
187         {
188                 .name = "stat",
189                 .read_map = cpuacct_stats_show,
190         },
191         { }     /* terminate */
192 };
193
194 /*
195  * charge this task's execution time to its accounting group.
196  *
197  * called with rq->lock held.
198  */
199 void cpuacct_charge(struct task_struct *tsk, u64 cputime)
200 {
201         struct cpuacct *ca;
202         int cpu;
203
204         if (unlikely(!cpuacct_subsys.active))
205                 return;
206
207         cpu = task_cpu(tsk);
208
209         rcu_read_lock();
210
211         ca = task_ca(tsk);
212
213         for (; ca; ca = parent_ca(ca)) {
214                 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
215                 *cpuusage += cputime;
216         }
217
218         rcu_read_unlock();
219 }
220
221 void __init cpuacct_init(void)
222 {
223         root_cpuacct.cpustat = &kernel_cpustat;
224         root_cpuacct.cpuusage = alloc_percpu(u64);
225         BUG_ON(!root_cpuacct.cpuusage); /* Too early, not expected to fail */
226 }
227
228 struct cgroup_subsys cpuacct_subsys = {
229         .name = "cpuacct",
230         .css_alloc = cpuacct_css_alloc,
231         .css_free = cpuacct_css_free,
232         .subsys_id = cpuacct_subsys_id,
233         .base_cftypes = files,
234 };