2 * ACPI 3.0 based NUMA setup
3 * Copyright 2004 Andi Kleen, SuSE Labs.
5 * Reads the ACPI SRAT table to figure out what memory belongs to which CPUs.
7 * Called from acpi_numa_init while reading the SRAT and SLIT tables.
8 * Assumes all memory regions belonging to a single proximity domain
9 * are in one chunk. Holes between them will be included in the node.
12 #include <linux/kernel.h>
13 #include <linux/acpi.h>
14 #include <linux/mmzone.h>
15 #include <linux/bitmap.h>
16 #include <linux/module.h>
17 #include <linux/topology.h>
18 #include <linux/bootmem.h>
19 #include <linux/memblock.h>
21 #include <asm/proto.h>
25 #include <asm/uv/uv.h>
27 int acpi_numa __initdata;
29 static struct acpi_table_slit *acpi_slit;
31 static nodemask_t nodes_parsed __initdata;
32 static nodemask_t cpu_nodes_parsed __initdata;
33 static struct bootnode nodes[MAX_NUMNODES] __initdata;
34 static struct bootnode nodes_add[MAX_NUMNODES];
36 static int num_node_memblks __initdata;
37 static struct bootnode node_memblk_range[NR_NODE_MEMBLKS] __initdata;
38 static int memblk_nodeid[NR_NODE_MEMBLKS] __initdata;
40 static __init int setup_node(int pxm)
42 return acpi_map_pxm_to_node(pxm);
45 static __init int conflicting_memblks(unsigned long start, unsigned long end)
48 for (i = 0; i < num_node_memblks; i++) {
49 struct bootnode *nd = &node_memblk_range[i];
50 if (nd->start == nd->end)
52 if (nd->end > start && nd->start < end)
53 return memblk_nodeid[i];
54 if (nd->end == end && nd->start == start)
55 return memblk_nodeid[i];
60 static __init void cutoff_node(int i, unsigned long start, unsigned long end)
62 struct bootnode *nd = &nodes[i];
64 if (nd->start < start) {
66 if (nd->end < nd->start)
71 if (nd->start > nd->end)
76 static __init void bad_srat(void)
79 printk(KERN_ERR "SRAT: SRAT not used.\n");
81 for (i = 0; i < MAX_NUMNODES; i++) {
82 nodes[i].start = nodes[i].end = 0;
83 nodes_add[i].start = nodes_add[i].end = 0;
85 remove_all_active_ranges();
88 static __init inline int srat_disabled(void)
93 /* Callback for SLIT parsing */
94 void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
99 length = slit->header.length;
100 phys = memblock_find_in_range(0, max_pfn_mapped<<PAGE_SHIFT, length,
103 if (phys == MEMBLOCK_ERROR)
104 panic(" Can not save slit!\n");
106 acpi_slit = __va(phys);
107 memcpy(acpi_slit, slit, length);
108 memblock_x86_reserve_range(phys, phys + length, "ACPI SLIT");
111 /* Callback for Proximity Domain -> x2APIC mapping */
113 acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa)
120 if (pa->header.length < sizeof(struct acpi_srat_x2apic_cpu_affinity)) {
124 if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0)
126 pxm = pa->proximity_domain;
127 node = setup_node(pxm);
129 printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm);
134 apic_id = pa->apic_id;
135 if (apic_id >= MAX_LOCAL_APIC) {
136 printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%04x -> Node %u skipped apicid that is too big\n", pxm, apic_id, node);
139 set_apicid_to_node(apic_id, node);
140 node_set(node, cpu_nodes_parsed);
142 printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%04x -> Node %u\n",
146 /* Callback for Proximity Domain -> LAPIC mapping */
148 acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
155 if (pa->header.length != sizeof(struct acpi_srat_cpu_affinity)) {
159 if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0)
161 pxm = pa->proximity_domain_lo;
162 node = setup_node(pxm);
164 printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm);
169 if (get_uv_system_type() >= UV_X2APIC)
170 apic_id = (pa->apic_id << 8) | pa->local_sapic_eid;
172 apic_id = pa->apic_id;
174 if (apic_id >= MAX_LOCAL_APIC) {
175 printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%02x -> Node %u skipped apicid that is too big\n", pxm, apic_id, node);
179 set_apicid_to_node(apic_id, node);
180 node_set(node, cpu_nodes_parsed);
182 printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%02x -> Node %u\n",
186 #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
187 static inline int save_add_info(void) {return 1;}
189 static inline int save_add_info(void) {return 0;}
193 * This code supports one contiguous hot add area per node
196 update_nodes_add(int node, unsigned long start, unsigned long end)
198 unsigned long s_pfn = start >> PAGE_SHIFT;
199 unsigned long e_pfn = end >> PAGE_SHIFT;
201 struct bootnode *nd = &nodes_add[node];
203 /* I had some trouble with strange memory hotadd regions breaking
204 the boot. Be very strict here and reject anything unexpected.
205 If you want working memory hotadd write correct SRATs.
207 The node size check is a basic sanity check to guard against
209 if ((signed long)(end - start) < NODE_MIN_SIZE) {
210 printk(KERN_ERR "SRAT: Hotplug area too small\n");
214 /* This check might be a bit too strict, but I'm keeping it for now. */
215 if (absent_pages_in_range(s_pfn, e_pfn) != e_pfn - s_pfn) {
217 "SRAT: Hotplug area %lu -> %lu has existing memory\n",
224 if (nd->start == nd->end) {
229 if (nd->start == end) {
233 if (nd->end == start) {
238 printk(KERN_ERR "SRAT: Hotplug zone not continuous. Partly ignored\n");
242 node_set(node, cpu_nodes_parsed);
243 printk(KERN_INFO "SRAT: hot plug zone found %Lx - %Lx\n",
248 /* Callback for parsing of the Proximity Domain <-> Memory Area mappings */
250 acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
253 unsigned long start, end;
259 if (ma->header.length != sizeof(struct acpi_srat_mem_affinity)) {
263 if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0)
266 if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) && !save_add_info())
268 start = ma->base_address;
269 end = start + ma->length;
270 pxm = ma->proximity_domain;
271 node = setup_node(pxm);
273 printk(KERN_ERR "SRAT: Too many proximity domains.\n");
277 i = conflicting_memblks(start, end);
280 "SRAT: Warning: PXM %d (%lx-%lx) overlaps with itself (%Lx-%Lx)\n",
281 pxm, start, end, nodes[i].start, nodes[i].end);
284 "SRAT: PXM %d (%lx-%lx) overlaps with PXM %d (%Lx-%Lx)\n",
285 pxm, start, end, node_to_pxm(i),
286 nodes[i].start, nodes[i].end);
291 printk(KERN_INFO "SRAT: Node %u PXM %u %lx-%lx\n", node, pxm,
294 if (!(ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE)) {
296 if (!node_test_and_set(node, nodes_parsed)) {
300 if (start < nd->start)
306 update_nodes_add(node, start, end);
308 node_memblk_range[num_node_memblks].start = start;
309 node_memblk_range[num_node_memblks].end = end;
310 memblk_nodeid[num_node_memblks] = node;
314 /* Sanity check to catch more bad SRATs (they are amazingly common).
315 Make sure the PXMs cover all memory. */
316 static int __init nodes_cover_memory(const struct bootnode *nodes)
319 unsigned long pxmram, e820ram;
322 for_each_node_mask(i, nodes_parsed) {
323 unsigned long s = nodes[i].start >> PAGE_SHIFT;
324 unsigned long e = nodes[i].end >> PAGE_SHIFT;
326 pxmram -= __absent_pages_in_range(i, s, e);
327 if ((long)pxmram < 0)
331 e820ram = max_pfn - (memblock_x86_hole_size(0, max_pfn<<PAGE_SHIFT)>>PAGE_SHIFT);
332 /* We seem to lose 3 pages somewhere. Allow 1M of slack. */
333 if ((long)(e820ram - pxmram) >= (1<<(20 - PAGE_SHIFT))) {
335 "SRAT: PXMs only cover %luMB of your %luMB e820 RAM. Not used.\n",
336 (pxmram << PAGE_SHIFT) >> 20,
337 (e820ram << PAGE_SHIFT) >> 20);
343 void __init acpi_numa_arch_fixup(void) {}
345 #ifdef CONFIG_NUMA_EMU
346 void __init acpi_get_nodes(struct bootnode *physnodes, unsigned long start,
351 for_each_node_mask(i, nodes_parsed) {
352 cutoff_node(i, start, end);
353 physnodes[i].start = nodes[i].start;
354 physnodes[i].end = nodes[i].end;
357 #endif /* CONFIG_NUMA_EMU */
359 int __init x86_acpi_numa_init(void)
363 ret = acpi_numa_init();
366 return srat_disabled() ? -EINVAL : 0;
369 /* Use the information discovered above to actually set up the nodes. */
370 int __init acpi_scan_nodes(void)
377 /* First clean up the node list */
378 for (i = 0; i < MAX_NUMNODES; i++)
379 cutoff_node(i, 0, max_pfn << PAGE_SHIFT);
382 * Join together blocks on the same node, holes between
383 * which don't overlap with memory on other nodes.
385 for (i = 0; i < num_node_memblks; ++i) {
388 for (j = i + 1; j < num_node_memblks; ++j) {
389 unsigned long start, end;
391 if (memblk_nodeid[i] != memblk_nodeid[j])
393 start = min(node_memblk_range[i].end,
394 node_memblk_range[j].end);
395 end = max(node_memblk_range[i].start,
396 node_memblk_range[j].start);
397 for (k = 0; k < num_node_memblks; ++k) {
398 if (memblk_nodeid[i] == memblk_nodeid[k])
400 if (start < node_memblk_range[k].end &&
401 end > node_memblk_range[k].start)
404 if (k < num_node_memblks)
406 start = min(node_memblk_range[i].start,
407 node_memblk_range[j].start);
408 end = max(node_memblk_range[i].end,
409 node_memblk_range[j].end);
410 printk(KERN_INFO "SRAT: Node %d "
411 "[%Lx,%Lx) + [%Lx,%Lx) -> [%lx,%lx)\n",
413 node_memblk_range[i].start,
414 node_memblk_range[i].end,
415 node_memblk_range[j].start,
416 node_memblk_range[j].end,
418 node_memblk_range[i].start = start;
419 node_memblk_range[i].end = end;
420 k = --num_node_memblks - j;
421 memmove(memblk_nodeid + j, memblk_nodeid + j+1,
422 k * sizeof(*memblk_nodeid));
423 memmove(node_memblk_range + j, node_memblk_range + j+1,
424 k * sizeof(*node_memblk_range));
429 memnode_shift = compute_hash_shift(node_memblk_range, num_node_memblks,
431 if (memnode_shift < 0) {
433 "SRAT: No NUMA node hash function found. Contact maintainer\n");
438 for (i = 0; i < num_node_memblks; i++)
439 memblock_x86_register_active_regions(memblk_nodeid[i],
440 node_memblk_range[i].start >> PAGE_SHIFT,
441 node_memblk_range[i].end >> PAGE_SHIFT);
443 /* for out of order entries in SRAT */
445 if (!nodes_cover_memory(nodes)) {
450 init_memory_mapping_high();
452 /* Account for nodes with cpus and no memory */
453 nodes_or(node_possible_map, nodes_parsed, cpu_nodes_parsed);
455 /* Finally register nodes */
456 for_each_node_mask(i, node_possible_map)
457 setup_node_bootmem(i, nodes[i].start, nodes[i].end);
458 /* Try again in case setup_node_bootmem missed one due
459 to missing bootmem */
460 for_each_node_mask(i, node_possible_map)
462 setup_node_bootmem(i, nodes[i].start, nodes[i].end);
464 for (i = 0; i < nr_cpu_ids; i++) {
465 int node = early_cpu_to_node(i);
467 if (node == NUMA_NO_NODE)
469 if (!node_online(node))
476 #ifdef CONFIG_NUMA_EMU
477 static int fake_node_to_pxm_map[MAX_NUMNODES] __initdata = {
478 [0 ... MAX_NUMNODES-1] = PXM_INVAL
480 static s16 fake_apicid_to_node[MAX_LOCAL_APIC] __initdata = {
481 [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
483 static int __init find_node_by_addr(unsigned long addr)
485 int ret = NUMA_NO_NODE;
488 for_each_node_mask(i, nodes_parsed) {
490 * Find the real node that this emulated node appears on. For
491 * the sake of simplicity, we only use a real node's starting
492 * address to determine which emulated node it appears on.
494 if (addr >= nodes[i].start && addr < nodes[i].end) {
503 * In NUMA emulation, we need to setup proximity domain (_PXM) to node ID
504 * mappings that respect the real ACPI topology but reflect our emulated
505 * environment. For each emulated node, we find which real node it appears on
506 * and create PXM to NID mappings for those fake nodes which mirror that
507 * locality. SLIT will now represent the correct distances between emulated
508 * nodes as a result of the real topology.
510 void __init acpi_fake_nodes(const struct bootnode *fake_nodes, int num_nodes)
514 for (i = 0; i < num_nodes; i++) {
517 nid = find_node_by_addr(fake_nodes[i].start);
518 if (nid == NUMA_NO_NODE)
520 pxm = node_to_pxm(nid);
521 if (pxm == PXM_INVAL)
523 fake_node_to_pxm_map[i] = pxm;
525 * For each apicid_to_node mapping that exists for this real
526 * node, it must now point to the fake node ID.
528 for (j = 0; j < MAX_LOCAL_APIC; j++)
529 if (__apicid_to_node[j] == nid &&
530 fake_apicid_to_node[j] == NUMA_NO_NODE)
531 fake_apicid_to_node[j] = i;
535 * If there are apicid-to-node mappings for physical nodes that do not
536 * have a corresponding emulated node, it should default to a guaranteed
539 for (i = 0; i < MAX_LOCAL_APIC; i++)
540 if (__apicid_to_node[i] != NUMA_NO_NODE &&
541 fake_apicid_to_node[i] == NUMA_NO_NODE)
542 fake_apicid_to_node[i] = 0;
544 for (i = 0; i < num_nodes; i++)
545 __acpi_map_pxm_to_node(fake_node_to_pxm_map[i], i);
546 memcpy(__apicid_to_node, fake_apicid_to_node, sizeof(__apicid_to_node));
548 nodes_clear(nodes_parsed);
549 for (i = 0; i < num_nodes; i++)
550 if (fake_nodes[i].start != fake_nodes[i].end)
551 node_set(i, nodes_parsed);
554 static int null_slit_node_compare(int a, int b)
556 return node_to_pxm(a) == node_to_pxm(b);
559 static int null_slit_node_compare(int a, int b)
563 #endif /* CONFIG_NUMA_EMU */
565 int __node_distance(int a, int b)
570 return null_slit_node_compare(a, b) ? LOCAL_DISTANCE :
572 index = acpi_slit->locality_count * node_to_pxm(a);
573 return acpi_slit->entry[index + node_to_pxm(b)];
576 EXPORT_SYMBOL(__node_distance);
578 #if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) || defined(CONFIG_ACPI_HOTPLUG_MEMORY)
579 int memory_add_physaddr_to_nid(u64 start)
584 if (nodes_add[i].start <= start && nodes_add[i].end > start)
589 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);