1 #include <linux/kernel.h>
2 #include <linux/module.h>
3 #include <linux/init.h>
4 #include <linux/bootmem.h>
5 #include <linux/percpu.h>
6 #include <linux/kexec.h>
7 #include <linux/crash_dump.h>
9 #include <linux/topology.h>
10 #include <asm/sections.h>
11 #include <asm/processor.h>
12 #include <asm/setup.h>
13 #include <asm/mpspec.h>
14 #include <asm/apicdef.h>
15 #include <asm/highmem.h>
16 #include <asm/cpumask.h>
18 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
19 # define DBG(x...) printk(KERN_DEBUG x)
24 #ifdef CONFIG_X86_LOCAL_APIC
25 unsigned int num_processors;
26 unsigned disabled_cpus __cpuinitdata;
27 /* Processor that is doing the boot up */
28 unsigned int boot_cpu_physical_apicid = -1U;
29 EXPORT_SYMBOL(boot_cpu_physical_apicid);
30 unsigned int max_physical_apicid;
32 /* Bitmask of physically existing CPUs */
33 physid_mask_t phys_cpu_present_map;
37 * Map cpu index to physical APIC ID
39 DEFINE_EARLY_PER_CPU(u16, x86_cpu_to_apicid, BAD_APICID);
40 DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID);
41 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
42 EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
44 #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
45 #define X86_64_NUMA 1 /* (used later) */
48 * Map cpu index to node index
50 DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
51 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
54 * Which logical CPUs are on which nodes
56 cpumask_t *node_to_cpumask_map;
57 EXPORT_SYMBOL(node_to_cpumask_map);
60 * Setup node_to_cpumask_map
62 static void __init setup_node_to_cpumask_map(void);
65 static inline void setup_node_to_cpumask_map(void) { }
68 #ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
70 * Copy data used in early init routines from the initial arrays to the
71 * per cpu data areas. These arrays then become expendable and the
72 * *_early_ptr's are zeroed indicating that the static arrays are gone.
74 static void __init setup_per_cpu_maps(void)
78 for_each_possible_cpu(cpu) {
79 per_cpu(x86_cpu_to_apicid, cpu) =
80 early_per_cpu_map(x86_cpu_to_apicid, cpu);
81 per_cpu(x86_bios_cpu_apicid, cpu) =
82 early_per_cpu_map(x86_bios_cpu_apicid, cpu);
84 per_cpu(x86_cpu_to_node_map, cpu) =
85 early_per_cpu_map(x86_cpu_to_node_map, cpu);
89 /* indicate the early static arrays will soon be gone */
90 early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
91 early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
93 early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
99 * Great future not-so-futuristic plan: make i386 and x86_64 do it
102 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
103 EXPORT_SYMBOL(__per_cpu_offset);
104 static inline void setup_cpu_pda_map(void) { }
106 #elif !defined(CONFIG_SMP)
107 static inline void setup_cpu_pda_map(void) { }
109 #else /* CONFIG_SMP && CONFIG_X86_64 */
112 * Allocate cpu_pda pointer table and array via alloc_bootmem.
114 static void __init setup_cpu_pda_map(void)
117 struct x8664_pda **new_cpu_pda;
121 size = roundup(sizeof(struct x8664_pda), cache_line_size());
123 /* allocate cpu_pda array and pointer table */
125 unsigned long tsize = nr_cpu_ids * sizeof(void *);
126 unsigned long asize = size * (nr_cpu_ids - 1);
128 tsize = roundup(tsize, cache_line_size());
129 new_cpu_pda = alloc_bootmem(tsize + asize);
130 pda = (char *)new_cpu_pda + tsize;
133 /* initialize pointer table to static pda's */
134 for_each_possible_cpu(cpu) {
136 /* leave boot cpu pda in place */
137 new_cpu_pda[0] = cpu_pda(0);
140 new_cpu_pda[cpu] = (struct x8664_pda *)pda;
141 new_cpu_pda[cpu]->in_bootmem = 1;
145 /* point to new pointer table */
146 _cpu_pda = new_cpu_pda;
149 #endif /* CONFIG_SMP && CONFIG_X86_64 */
153 /* correctly size the local cpu masks */
154 static void setup_cpu_local_masks(void)
156 alloc_bootmem_cpumask_var(&cpu_initialized_mask);
157 alloc_bootmem_cpumask_var(&cpu_callin_mask);
158 alloc_bootmem_cpumask_var(&cpu_callout_mask);
159 alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
162 #else /* CONFIG_X86_32 */
164 static inline void setup_cpu_local_masks(void)
168 #endif /* CONFIG_X86_32 */
172 * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
173 * Always point %gs to its beginning
175 void __init setup_per_cpu_areas(void)
177 ssize_t size, old_size;
180 unsigned long align = 1;
182 /* Setup cpu_pda map */
185 /* Copy section for each CPU (we discard the original) */
186 old_size = PERCPU_ENOUGH_ROOM;
187 align = max_t(unsigned long, PAGE_SIZE, align);
188 size = roundup(old_size, align);
190 pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
191 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
193 pr_info("PERCPU: Allocating %zd bytes of per cpu data\n", size);
195 for_each_possible_cpu(cpu) {
196 #ifndef CONFIG_NEED_MULTIPLE_NODES
197 ptr = __alloc_bootmem(size, align,
198 __pa(MAX_DMA_ADDRESS));
200 int node = early_cpu_to_node(cpu);
201 if (!node_online(node) || !NODE_DATA(node)) {
202 ptr = __alloc_bootmem(size, align,
203 __pa(MAX_DMA_ADDRESS));
204 pr_info("cpu %d has no node %d or node-local memory\n",
206 pr_debug("per cpu data for cpu%d at %016lx\n",
209 ptr = __alloc_bootmem_node(NODE_DATA(node), size, align,
210 __pa(MAX_DMA_ADDRESS));
211 pr_debug("per cpu data for cpu%d on node%d at %016lx\n",
212 cpu, node, __pa(ptr));
215 per_cpu_offset(cpu) = ptr - __per_cpu_start;
216 memcpy(ptr, __per_cpu_load, __per_cpu_end - __per_cpu_start);
218 DBG("PERCPU: cpu %4d %p\n", cpu, ptr);
221 /* Setup percpu data maps */
222 setup_per_cpu_maps();
224 /* Setup node to cpumask map */
225 setup_node_to_cpumask_map();
227 /* Setup cpu initialized, callin, callout masks */
228 setup_cpu_local_masks();
236 * Allocate node_to_cpumask_map based on number of available nodes
237 * Requires node_possible_map to be valid.
239 * Note: node_to_cpumask() is not valid until after this is done.
240 * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.)
242 static void __init setup_node_to_cpumask_map(void)
244 unsigned int node, num = 0;
247 /* setup nr_node_ids if not done yet */
248 if (nr_node_ids == MAX_NUMNODES) {
249 for_each_node_mask(node, node_possible_map)
251 nr_node_ids = num + 1;
254 /* allocate the map */
255 map = alloc_bootmem_low(nr_node_ids * sizeof(cpumask_t));
256 DBG("node_to_cpumask_map at %p for %d nodes\n", map, nr_node_ids);
258 pr_debug("Node to cpumask map at %p for %d nodes\n",
261 /* node_to_cpumask() will now work */
262 node_to_cpumask_map = map;
265 void __cpuinit numa_set_node(int cpu, int node)
267 int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
269 /* early setting, no percpu area yet */
270 if (cpu_to_node_map) {
271 cpu_to_node_map[cpu] = node;
275 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
276 if (cpu >= nr_cpu_ids || !per_cpu_offset(cpu)) {
277 printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
282 per_cpu(x86_cpu_to_node_map, cpu) = node;
284 if (node != NUMA_NO_NODE)
285 cpu_pda(cpu)->nodenumber = node;
288 void __cpuinit numa_clear_node(int cpu)
290 numa_set_node(cpu, NUMA_NO_NODE);
293 #ifndef CONFIG_DEBUG_PER_CPU_MAPS
295 void __cpuinit numa_add_cpu(int cpu)
297 cpu_set(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
300 void __cpuinit numa_remove_cpu(int cpu)
302 cpu_clear(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
305 #else /* CONFIG_DEBUG_PER_CPU_MAPS */
308 * --------- debug versions of the numa functions ---------
310 static void __cpuinit numa_set_cpumask(int cpu, int enable)
312 int node = early_cpu_to_node(cpu);
316 if (node_to_cpumask_map == NULL) {
317 printk(KERN_ERR "node_to_cpumask_map NULL\n");
322 mask = &node_to_cpumask_map[node];
326 cpu_clear(cpu, *mask);
328 cpulist_scnprintf(buf, sizeof(buf), mask);
329 printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
330 enable ? "numa_add_cpu" : "numa_remove_cpu", cpu, node, buf);
333 void __cpuinit numa_add_cpu(int cpu)
335 numa_set_cpumask(cpu, 1);
338 void __cpuinit numa_remove_cpu(int cpu)
340 numa_set_cpumask(cpu, 0);
343 int cpu_to_node(int cpu)
345 if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
347 "cpu_to_node(%d): usage too early!\n", cpu);
349 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
351 return per_cpu(x86_cpu_to_node_map, cpu);
353 EXPORT_SYMBOL(cpu_to_node);
356 * Same function as cpu_to_node() but used if called before the
357 * per_cpu areas are setup.
359 int early_cpu_to_node(int cpu)
361 if (early_per_cpu_ptr(x86_cpu_to_node_map))
362 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
364 if (!per_cpu_offset(cpu)) {
366 "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
370 return per_cpu(x86_cpu_to_node_map, cpu);
375 static const cpumask_t cpu_mask_none;
378 * Returns a pointer to the bitmask of CPUs on Node 'node'.
380 const cpumask_t *cpumask_of_node(int node)
382 if (node_to_cpumask_map == NULL) {
384 "cpumask_of_node(%d): no node_to_cpumask_map!\n",
387 return (const cpumask_t *)&cpu_online_map;
389 if (node >= nr_node_ids) {
391 "cpumask_of_node(%d): node > nr_node_ids(%d)\n",
394 return &cpu_mask_none;
396 return &node_to_cpumask_map[node];
398 EXPORT_SYMBOL(cpumask_of_node);
401 * Returns a bitmask of CPUs on Node 'node'.
403 * Side note: this function creates the returned cpumask on the stack
404 * so with a high NR_CPUS count, excessive stack space is used. The
405 * node_to_cpumask_ptr function should be used whenever possible.
407 cpumask_t node_to_cpumask(int node)
409 if (node_to_cpumask_map == NULL) {
411 "node_to_cpumask(%d): no node_to_cpumask_map!\n", node);
413 return cpu_online_map;
415 if (node >= nr_node_ids) {
417 "node_to_cpumask(%d): node > nr_node_ids(%d)\n",
420 return cpu_mask_none;
422 return node_to_cpumask_map[node];
424 EXPORT_SYMBOL(node_to_cpumask);
427 * --------- end of debug versions of the numa functions ---------
430 #endif /* CONFIG_DEBUG_PER_CPU_MAPS */
432 #endif /* X86_64_NUMA */