]> Pileus Git - ~andy/linux/blob - drivers/base/cpu.c
genl: Fix genl dumpit() locking.
[~andy/linux] / drivers / base / cpu.c
1 /*
2  * CPU subsystem support
3  */
4
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/init.h>
8 #include <linux/sched.h>
9 #include <linux/cpu.h>
10 #include <linux/topology.h>
11 #include <linux/device.h>
12 #include <linux/node.h>
13 #include <linux/gfp.h>
14 #include <linux/slab.h>
15 #include <linux/percpu.h>
16 #include <linux/acpi.h>
17
18 #include "base.h"
19
20 static DEFINE_PER_CPU(struct device *, cpu_sys_devices);
21
22 static int cpu_subsys_match(struct device *dev, struct device_driver *drv)
23 {
24         /* ACPI style match is the only one that may succeed. */
25         if (acpi_driver_match_device(dev, drv))
26                 return 1;
27
28         return 0;
29 }
30
31 #ifdef CONFIG_HOTPLUG_CPU
32 static void change_cpu_under_node(struct cpu *cpu,
33                         unsigned int from_nid, unsigned int to_nid)
34 {
35         int cpuid = cpu->dev.id;
36         unregister_cpu_under_node(cpuid, from_nid);
37         register_cpu_under_node(cpuid, to_nid);
38         cpu->node_id = to_nid;
39 }
40
41 static int __ref cpu_subsys_online(struct device *dev)
42 {
43         struct cpu *cpu = container_of(dev, struct cpu, dev);
44         int cpuid = dev->id;
45         int from_nid, to_nid;
46         int ret;
47
48         cpu_hotplug_driver_lock();
49
50         from_nid = cpu_to_node(cpuid);
51         ret = cpu_up(cpuid);
52         /*
53          * When hot adding memory to memoryless node and enabling a cpu
54          * on the node, node number of the cpu may internally change.
55          */
56         to_nid = cpu_to_node(cpuid);
57         if (from_nid != to_nid)
58                 change_cpu_under_node(cpu, from_nid, to_nid);
59
60         cpu_hotplug_driver_unlock();
61         return ret;
62 }
63
64 static int cpu_subsys_offline(struct device *dev)
65 {
66         int ret;
67
68         cpu_hotplug_driver_lock();
69         ret = cpu_down(dev->id);
70         cpu_hotplug_driver_unlock();
71         return ret;
72 }
73
74 void unregister_cpu(struct cpu *cpu)
75 {
76         int logical_cpu = cpu->dev.id;
77
78         unregister_cpu_under_node(logical_cpu, cpu_to_node(logical_cpu));
79
80         device_unregister(&cpu->dev);
81         per_cpu(cpu_sys_devices, logical_cpu) = NULL;
82         return;
83 }
84
85 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
86 static ssize_t cpu_probe_store(struct device *dev,
87                                struct device_attribute *attr,
88                                const char *buf,
89                                size_t count)
90 {
91         return arch_cpu_probe(buf, count);
92 }
93
94 static ssize_t cpu_release_store(struct device *dev,
95                                  struct device_attribute *attr,
96                                  const char *buf,
97                                  size_t count)
98 {
99         return arch_cpu_release(buf, count);
100 }
101
102 static DEVICE_ATTR(probe, S_IWUSR, NULL, cpu_probe_store);
103 static DEVICE_ATTR(release, S_IWUSR, NULL, cpu_release_store);
104 #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
105 #endif /* CONFIG_HOTPLUG_CPU */
106
107 struct bus_type cpu_subsys = {
108         .name = "cpu",
109         .dev_name = "cpu",
110         .match = cpu_subsys_match,
111 #ifdef CONFIG_HOTPLUG_CPU
112         .online = cpu_subsys_online,
113         .offline = cpu_subsys_offline,
114 #endif
115 };
116 EXPORT_SYMBOL_GPL(cpu_subsys);
117
118 #ifdef CONFIG_KEXEC
119 #include <linux/kexec.h>
120
121 static ssize_t show_crash_notes(struct device *dev, struct device_attribute *attr,
122                                 char *buf)
123 {
124         struct cpu *cpu = container_of(dev, struct cpu, dev);
125         ssize_t rc;
126         unsigned long long addr;
127         int cpunum;
128
129         cpunum = cpu->dev.id;
130
131         /*
132          * Might be reading other cpu's data based on which cpu read thread
133          * has been scheduled. But cpu data (memory) is allocated once during
134          * boot up and this data does not change there after. Hence this
135          * operation should be safe. No locking required.
136          */
137         addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpunum));
138         rc = sprintf(buf, "%Lx\n", addr);
139         return rc;
140 }
141 static DEVICE_ATTR(crash_notes, 0400, show_crash_notes, NULL);
142
143 static ssize_t show_crash_notes_size(struct device *dev,
144                                      struct device_attribute *attr,
145                                      char *buf)
146 {
147         ssize_t rc;
148
149         rc = sprintf(buf, "%zu\n", sizeof(note_buf_t));
150         return rc;
151 }
152 static DEVICE_ATTR(crash_notes_size, 0400, show_crash_notes_size, NULL);
153
154 static struct attribute *crash_note_cpu_attrs[] = {
155         &dev_attr_crash_notes.attr,
156         &dev_attr_crash_notes_size.attr,
157         NULL
158 };
159
160 static struct attribute_group crash_note_cpu_attr_group = {
161         .attrs = crash_note_cpu_attrs,
162 };
163 #endif
164
165 static const struct attribute_group *common_cpu_attr_groups[] = {
166 #ifdef CONFIG_KEXEC
167         &crash_note_cpu_attr_group,
168 #endif
169         NULL
170 };
171
172 static const struct attribute_group *hotplugable_cpu_attr_groups[] = {
173 #ifdef CONFIG_KEXEC
174         &crash_note_cpu_attr_group,
175 #endif
176         NULL
177 };
178
179 /*
180  * Print cpu online, possible, present, and system maps
181  */
182
183 struct cpu_attr {
184         struct device_attribute attr;
185         const struct cpumask *const * const map;
186 };
187
188 static ssize_t show_cpus_attr(struct device *dev,
189                               struct device_attribute *attr,
190                               char *buf)
191 {
192         struct cpu_attr *ca = container_of(attr, struct cpu_attr, attr);
193         int n = cpulist_scnprintf(buf, PAGE_SIZE-2, *(ca->map));
194
195         buf[n++] = '\n';
196         buf[n] = '\0';
197         return n;
198 }
199
200 #define _CPU_ATTR(name, map) \
201         { __ATTR(name, 0444, show_cpus_attr, NULL), map }
202
203 /* Keep in sync with cpu_subsys_attrs */
204 static struct cpu_attr cpu_attrs[] = {
205         _CPU_ATTR(online, &cpu_online_mask),
206         _CPU_ATTR(possible, &cpu_possible_mask),
207         _CPU_ATTR(present, &cpu_present_mask),
208 };
209
210 /*
211  * Print values for NR_CPUS and offlined cpus
212  */
213 static ssize_t print_cpus_kernel_max(struct device *dev,
214                                      struct device_attribute *attr, char *buf)
215 {
216         int n = snprintf(buf, PAGE_SIZE-2, "%d\n", NR_CPUS - 1);
217         return n;
218 }
219 static DEVICE_ATTR(kernel_max, 0444, print_cpus_kernel_max, NULL);
220
221 /* arch-optional setting to enable display of offline cpus >= nr_cpu_ids */
222 unsigned int total_cpus;
223
224 static ssize_t print_cpus_offline(struct device *dev,
225                                   struct device_attribute *attr, char *buf)
226 {
227         int n = 0, len = PAGE_SIZE-2;
228         cpumask_var_t offline;
229
230         /* display offline cpus < nr_cpu_ids */
231         if (!alloc_cpumask_var(&offline, GFP_KERNEL))
232                 return -ENOMEM;
233         cpumask_andnot(offline, cpu_possible_mask, cpu_online_mask);
234         n = cpulist_scnprintf(buf, len, offline);
235         free_cpumask_var(offline);
236
237         /* display offline cpus >= nr_cpu_ids */
238         if (total_cpus && nr_cpu_ids < total_cpus) {
239                 if (n && n < len)
240                         buf[n++] = ',';
241
242                 if (nr_cpu_ids == total_cpus-1)
243                         n += snprintf(&buf[n], len - n, "%d", nr_cpu_ids);
244                 else
245                         n += snprintf(&buf[n], len - n, "%d-%d",
246                                                       nr_cpu_ids, total_cpus-1);
247         }
248
249         n += snprintf(&buf[n], len - n, "\n");
250         return n;
251 }
252 static DEVICE_ATTR(offline, 0444, print_cpus_offline, NULL);
253
254 static void cpu_device_release(struct device *dev)
255 {
256         /*
257          * This is an empty function to prevent the driver core from spitting a
258          * warning at us.  Yes, I know this is directly opposite of what the
259          * documentation for the driver core and kobjects say, and the author
260          * of this code has already been publically ridiculed for doing
261          * something as foolish as this.  However, at this point in time, it is
262          * the only way to handle the issue of statically allocated cpu
263          * devices.  The different architectures will have their cpu device
264          * code reworked to properly handle this in the near future, so this
265          * function will then be changed to correctly free up the memory held
266          * by the cpu device.
267          *
268          * Never copy this way of doing things, or you too will be made fun of
269          * on the linux-kernel list, you have been warned.
270          */
271 }
272
273 /*
274  * register_cpu - Setup a sysfs device for a CPU.
275  * @cpu - cpu->hotpluggable field set to 1 will generate a control file in
276  *        sysfs for this CPU.
277  * @num - CPU number to use when creating the device.
278  *
279  * Initialize and register the CPU device.
280  */
281 int register_cpu(struct cpu *cpu, int num)
282 {
283         int error;
284
285         cpu->node_id = cpu_to_node(num);
286         memset(&cpu->dev, 0x00, sizeof(struct device));
287         cpu->dev.id = num;
288         cpu->dev.bus = &cpu_subsys;
289         cpu->dev.release = cpu_device_release;
290         cpu->dev.offline_disabled = !cpu->hotpluggable;
291         cpu->dev.offline = !cpu_online(num);
292 #ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
293         cpu->dev.bus->uevent = arch_cpu_uevent;
294 #endif
295         cpu->dev.groups = common_cpu_attr_groups;
296         if (cpu->hotpluggable)
297                 cpu->dev.groups = hotplugable_cpu_attr_groups;
298         error = device_register(&cpu->dev);
299         if (!error)
300                 per_cpu(cpu_sys_devices, num) = &cpu->dev;
301         if (!error)
302                 register_cpu_under_node(num, cpu_to_node(num));
303
304         return error;
305 }
306
307 struct device *get_cpu_device(unsigned cpu)
308 {
309         if (cpu < nr_cpu_ids && cpu_possible(cpu))
310                 return per_cpu(cpu_sys_devices, cpu);
311         else
312                 return NULL;
313 }
314 EXPORT_SYMBOL_GPL(get_cpu_device);
315
316 #ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
317 static DEVICE_ATTR(modalias, 0444, arch_print_cpu_modalias, NULL);
318 #endif
319
320 static struct attribute *cpu_root_attrs[] = {
321 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
322         &dev_attr_probe.attr,
323         &dev_attr_release.attr,
324 #endif
325         &cpu_attrs[0].attr.attr,
326         &cpu_attrs[1].attr.attr,
327         &cpu_attrs[2].attr.attr,
328         &dev_attr_kernel_max.attr,
329         &dev_attr_offline.attr,
330 #ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
331         &dev_attr_modalias.attr,
332 #endif
333         NULL
334 };
335
336 static struct attribute_group cpu_root_attr_group = {
337         .attrs = cpu_root_attrs,
338 };
339
340 static const struct attribute_group *cpu_root_attr_groups[] = {
341         &cpu_root_attr_group,
342         NULL,
343 };
344
345 bool cpu_is_hotpluggable(unsigned cpu)
346 {
347         struct device *dev = get_cpu_device(cpu);
348         return dev && container_of(dev, struct cpu, dev)->hotpluggable;
349 }
350 EXPORT_SYMBOL_GPL(cpu_is_hotpluggable);
351
352 #ifdef CONFIG_GENERIC_CPU_DEVICES
353 static DEFINE_PER_CPU(struct cpu, cpu_devices);
354 #endif
355
356 static void __init cpu_dev_register_generic(void)
357 {
358 #ifdef CONFIG_GENERIC_CPU_DEVICES
359         int i;
360
361         for_each_possible_cpu(i) {
362                 if (register_cpu(&per_cpu(cpu_devices, i), i))
363                         panic("Failed to register CPU device");
364         }
365 #endif
366 }
367
368 void __init cpu_dev_init(void)
369 {
370         if (subsys_system_register(&cpu_subsys, cpu_root_attr_groups))
371                 panic("Failed to register CPU subsystem");
372
373         cpu_dev_register_generic();
374 }