2 * Generic VM initialization for x86-64 NUMA setups.
3 * Copyright 2002,2003 Andi Kleen, SuSE Labs.
5 #include <linux/kernel.h>
7 #include <linux/string.h>
8 #include <linux/init.h>
9 #include <linux/bootmem.h>
10 #include <linux/memblock.h>
11 #include <linux/mmzone.h>
12 #include <linux/ctype.h>
13 #include <linux/module.h>
14 #include <linux/nodemask.h>
15 #include <linux/sched.h>
16 #include <linux/acpi.h>
19 #include <asm/proto.h>
23 #include <asm/amd_nb.h>
33 struct numa_memblk blk[NR_NODE_MEMBLKS];
36 struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
37 EXPORT_SYMBOL(node_data);
39 nodemask_t numa_nodes_parsed __initdata;
41 struct memnode memnode;
43 static unsigned long __initdata nodemap_addr;
44 static unsigned long __initdata nodemap_size;
46 static struct numa_meminfo numa_meminfo __initdata;
48 static int numa_distance_cnt;
49 static u8 *numa_distance;
51 #ifdef CONFIG_NUMA_EMU
52 static bool numa_emu_dist;
56 * Given a shift value, try to populate memnodemap[]
59 * 0 if memnodmap[] too small (of shift too small)
60 * -1 if node overlap or lost ram (shift too big)
62 static int __init populate_memnodemap(const struct numa_meminfo *mi, int shift)
64 unsigned long addr, end;
67 memset(memnodemap, 0xff, sizeof(s16)*memnodemapsize);
68 for (i = 0; i < mi->nr_blks; i++) {
69 addr = mi->blk[i].start;
73 if ((end >> shift) >= memnodemapsize)
76 if (memnodemap[addr >> shift] != NUMA_NO_NODE)
78 memnodemap[addr >> shift] = mi->blk[i].nid;
79 addr += (1UL << shift);
86 static int __init allocate_cachealigned_memnodemap(void)
90 memnodemap = memnode.embedded_map;
91 if (memnodemapsize <= ARRAY_SIZE(memnode.embedded_map))
95 nodemap_size = roundup(sizeof(s16) * memnodemapsize, L1_CACHE_BYTES);
96 nodemap_addr = memblock_find_in_range(addr, get_max_mapped(),
97 nodemap_size, L1_CACHE_BYTES);
98 if (nodemap_addr == MEMBLOCK_ERROR) {
100 "NUMA: Unable to allocate Memory to Node hash map\n");
101 nodemap_addr = nodemap_size = 0;
104 memnodemap = phys_to_virt(nodemap_addr);
105 memblock_x86_reserve_range(nodemap_addr, nodemap_addr + nodemap_size, "MEMNODEMAP");
107 printk(KERN_DEBUG "NUMA: Allocated memnodemap from %lx - %lx\n",
108 nodemap_addr, nodemap_addr + nodemap_size);
113 * The LSB of all start and end addresses in the node map is the value of the
114 * maximum possible shift.
116 static int __init extract_lsb_from_nodes(const struct numa_meminfo *mi)
118 int i, nodes_used = 0;
119 unsigned long start, end;
120 unsigned long bitfield = 0, memtop = 0;
122 for (i = 0; i < mi->nr_blks; i++) {
123 start = mi->blk[i].start;
124 end = mi->blk[i].end;
135 i = find_first_bit(&bitfield, sizeof(unsigned long)*8);
136 memnodemapsize = (memtop >> i)+1;
140 static int __init compute_hash_shift(const struct numa_meminfo *mi)
144 shift = extract_lsb_from_nodes(mi);
145 if (allocate_cachealigned_memnodemap())
147 printk(KERN_DEBUG "NUMA: Using %d for the hash shift.\n",
150 if (populate_memnodemap(mi, shift) != 1) {
151 printk(KERN_INFO "Your memory is not aligned you need to "
152 "rebuild your kernel with a bigger NODEMAPSIZE "
153 "shift=%d\n", shift);
159 int __meminit __early_pfn_to_nid(unsigned long pfn)
161 return phys_to_nid(pfn << PAGE_SHIFT);
164 static void * __init early_node_mem(int nodeid, unsigned long start,
165 unsigned long end, unsigned long size,
171 * put it on high as possible
172 * something will go with NODE_DATA
174 if (start < (MAX_DMA_PFN<<PAGE_SHIFT))
175 start = MAX_DMA_PFN<<PAGE_SHIFT;
176 if (start < (MAX_DMA32_PFN<<PAGE_SHIFT) &&
177 end > (MAX_DMA32_PFN<<PAGE_SHIFT))
178 start = MAX_DMA32_PFN<<PAGE_SHIFT;
179 mem = memblock_x86_find_in_range_node(nodeid, start, end, size, align);
180 if (mem != MEMBLOCK_ERROR)
183 /* extend the search scope */
184 end = max_pfn_mapped << PAGE_SHIFT;
185 start = MAX_DMA_PFN << PAGE_SHIFT;
186 mem = memblock_find_in_range(start, end, size, align);
187 if (mem != MEMBLOCK_ERROR)
190 printk(KERN_ERR "Cannot find %lu bytes in node %d\n",
196 static int __init numa_add_memblk_to(int nid, u64 start, u64 end,
197 struct numa_meminfo *mi)
199 /* ignore zero length blks */
203 /* whine about and ignore invalid blks */
204 if (start > end || nid < 0 || nid >= MAX_NUMNODES) {
205 pr_warning("NUMA: Warning: invalid memblk node %d (%Lx-%Lx)\n",
210 if (mi->nr_blks >= NR_NODE_MEMBLKS) {
211 pr_err("NUMA: too many memblk ranges\n");
215 mi->blk[mi->nr_blks].start = start;
216 mi->blk[mi->nr_blks].end = end;
217 mi->blk[mi->nr_blks].nid = nid;
222 static void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi)
225 memmove(&mi->blk[idx], &mi->blk[idx + 1],
226 (mi->nr_blks - idx) * sizeof(mi->blk[0]));
229 int __init numa_add_memblk(int nid, u64 start, u64 end)
231 return numa_add_memblk_to(nid, start, end, &numa_meminfo);
234 /* Initialize bootmem allocator for a node */
236 setup_node_bootmem(int nodeid, unsigned long start, unsigned long end)
238 unsigned long start_pfn, last_pfn, nodedata_phys;
239 const int pgdat_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
246 * Don't confuse VM with a node that doesn't have the
247 * minimum amount of memory:
249 if (end && (end - start) < NODE_MIN_SIZE)
252 start = roundup(start, ZONE_ALIGN);
254 printk(KERN_INFO "Initmem setup node %d %016lx-%016lx\n", nodeid,
257 start_pfn = start >> PAGE_SHIFT;
258 last_pfn = end >> PAGE_SHIFT;
260 node_data[nodeid] = early_node_mem(nodeid, start, end, pgdat_size,
262 if (node_data[nodeid] == NULL)
264 nodedata_phys = __pa(node_data[nodeid]);
265 memblock_x86_reserve_range(nodedata_phys, nodedata_phys + pgdat_size, "NODE_DATA");
266 printk(KERN_INFO " NODE_DATA [%016lx - %016lx]\n", nodedata_phys,
267 nodedata_phys + pgdat_size - 1);
268 nid = phys_to_nid(nodedata_phys);
270 printk(KERN_INFO " NODE_DATA(%d) on node %d\n", nodeid, nid);
272 memset(NODE_DATA(nodeid), 0, sizeof(pg_data_t));
273 NODE_DATA(nodeid)->node_id = nodeid;
274 NODE_DATA(nodeid)->node_start_pfn = start_pfn;
275 NODE_DATA(nodeid)->node_spanned_pages = last_pfn - start_pfn;
277 node_set_online(nodeid);
280 static int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
283 const u64 high = (u64)max_pfn << PAGE_SHIFT;
286 for (i = 0; i < mi->nr_blks; i++) {
287 struct numa_memblk *bi = &mi->blk[i];
289 /* make sure all blocks are inside the limits */
290 bi->start = max(bi->start, low);
291 bi->end = min(bi->end, high);
293 /* and there's no empty block */
294 if (bi->start == bi->end) {
295 numa_remove_memblk_from(i--, mi);
299 for (j = i + 1; j < mi->nr_blks; j++) {
300 struct numa_memblk *bj = &mi->blk[j];
301 unsigned long start, end;
304 * See whether there are overlapping blocks. Whine
305 * about but allow overlaps of the same nid. They
306 * will be merged below.
308 if (bi->end > bj->start && bi->start < bj->end) {
309 if (bi->nid != bj->nid) {
310 pr_err("NUMA: node %d (%Lx-%Lx) overlaps with node %d (%Lx-%Lx)\n",
311 bi->nid, bi->start, bi->end,
312 bj->nid, bj->start, bj->end);
315 pr_warning("NUMA: Warning: node %d (%Lx-%Lx) overlaps with itself (%Lx-%Lx)\n",
316 bi->nid, bi->start, bi->end,
321 * Join together blocks on the same node, holes
322 * between which don't overlap with memory on other
325 if (bi->nid != bj->nid)
327 start = max(min(bi->start, bj->start), low);
328 end = min(max(bi->end, bj->end), high);
329 for (k = 0; k < mi->nr_blks; k++) {
330 struct numa_memblk *bk = &mi->blk[k];
332 if (bi->nid == bk->nid)
334 if (start < bk->end && end > bk->start)
339 printk(KERN_INFO "NUMA: Node %d [%Lx,%Lx) + [%Lx,%Lx) -> [%lx,%lx)\n",
340 bi->nid, bi->start, bi->end, bj->start, bj->end,
344 numa_remove_memblk_from(j--, mi);
348 for (i = mi->nr_blks; i < ARRAY_SIZE(mi->blk); i++) {
349 mi->blk[i].start = mi->blk[i].end = 0;
350 mi->blk[i].nid = NUMA_NO_NODE;
357 * Set nodes, which have memory in @mi, in *@nodemask.
359 static void __init numa_nodemask_from_meminfo(nodemask_t *nodemask,
360 const struct numa_meminfo *mi)
364 for (i = 0; i < ARRAY_SIZE(mi->blk); i++)
365 if (mi->blk[i].start != mi->blk[i].end &&
366 mi->blk[i].nid != NUMA_NO_NODE)
367 node_set(mi->blk[i].nid, *nodemask);
371 * Reset distance table. The current table is freed. The next
372 * numa_set_distance() call will create a new one.
374 static void __init numa_reset_distance(void)
378 size = numa_distance_cnt * sizeof(numa_distance[0]);
379 memblock_x86_free_range(__pa(numa_distance),
380 __pa(numa_distance) + size);
381 numa_distance = NULL;
382 numa_distance_cnt = 0;
386 * Set the distance between node @from to @to to @distance. If distance
387 * table doesn't exist, one which is large enough to accomodate all the
388 * currently known nodes will be created.
390 void __init numa_set_distance(int from, int to, int distance)
392 if (!numa_distance) {
393 nodemask_t nodes_parsed;
398 /* size the new table and allocate it */
399 nodes_parsed = numa_nodes_parsed;
400 numa_nodemask_from_meminfo(&nodes_parsed, &numa_meminfo);
402 for_each_node_mask(i, nodes_parsed)
404 size = ++cnt * sizeof(numa_distance[0]);
406 phys = memblock_find_in_range(0,
407 (u64)max_pfn_mapped << PAGE_SHIFT,
409 if (phys == MEMBLOCK_ERROR) {
410 pr_warning("NUMA: Warning: can't allocate distance table!\n");
411 /* don't retry until explicitly reset */
412 numa_distance = (void *)1LU;
415 memblock_x86_reserve_range(phys, phys + size, "NUMA DIST");
417 numa_distance = __va(phys);
418 numa_distance_cnt = cnt;
420 /* fill with the default distances */
421 for (i = 0; i < cnt; i++)
422 for (j = 0; j < cnt; j++)
423 numa_distance[i * cnt + j] = i == j ?
424 LOCAL_DISTANCE : REMOTE_DISTANCE;
425 printk(KERN_DEBUG "NUMA: Initialized distance table, cnt=%d\n", cnt);
428 if (from >= numa_distance_cnt || to >= numa_distance_cnt) {
429 printk_once(KERN_DEBUG "NUMA: Debug: distance out of bound, from=%d to=%d distance=%d\n",
434 if ((u8)distance != distance ||
435 (from == to && distance != LOCAL_DISTANCE)) {
436 pr_warn_once("NUMA: Warning: invalid distance parameter, from=%d to=%d distance=%d\n",
441 numa_distance[from * numa_distance_cnt + to] = distance;
444 int __node_distance(int from, int to)
446 #if defined(CONFIG_ACPI_NUMA) && defined(CONFIG_NUMA_EMU)
448 return acpi_emu_node_distance(from, to);
450 if (from >= numa_distance_cnt || to >= numa_distance_cnt)
451 return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE;
452 return numa_distance[from * numa_distance_cnt + to];
454 EXPORT_SYMBOL(__node_distance);
457 * Sanity check to catch more bad NUMA configurations (they are amazingly
458 * common). Make sure the nodes cover all memory.
460 static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
462 unsigned long numaram, e820ram;
466 for (i = 0; i < mi->nr_blks; i++) {
467 unsigned long s = mi->blk[i].start >> PAGE_SHIFT;
468 unsigned long e = mi->blk[i].end >> PAGE_SHIFT;
470 numaram -= __absent_pages_in_range(mi->blk[i].nid, s, e);
471 if ((long)numaram < 0)
475 e820ram = max_pfn - (memblock_x86_hole_size(0,
476 max_pfn << PAGE_SHIFT) >> PAGE_SHIFT);
477 /* We seem to lose 3 pages somewhere. Allow 1M of slack. */
478 if ((long)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) {
479 printk(KERN_ERR "NUMA: nodes only cover %luMB of your %luMB e820 RAM. Not used.\n",
480 (numaram << PAGE_SHIFT) >> 20,
481 (e820ram << PAGE_SHIFT) >> 20);
487 static int __init numa_register_memblks(struct numa_meminfo *mi)
491 /* Account for nodes with cpus and no memory */
492 node_possible_map = numa_nodes_parsed;
493 numa_nodemask_from_meminfo(&node_possible_map, mi);
494 if (WARN_ON(nodes_empty(node_possible_map)))
497 memnode_shift = compute_hash_shift(mi);
498 if (memnode_shift < 0) {
499 printk(KERN_ERR "NUMA: No NUMA node hash function found. Contact maintainer\n");
503 for (i = 0; i < mi->nr_blks; i++)
504 memblock_x86_register_active_regions(mi->blk[i].nid,
505 mi->blk[i].start >> PAGE_SHIFT,
506 mi->blk[i].end >> PAGE_SHIFT);
508 /* for out of order entries */
510 if (!numa_meminfo_cover_memory(mi))
513 init_memory_mapping_high();
516 * Finally register nodes. Do it twice in case setup_node_bootmem
517 * missed one due to missing bootmem.
519 for (i = 0; i < 2; i++) {
520 for_each_node_mask(nid, node_possible_map) {
521 u64 start = (u64)max_pfn << PAGE_SHIFT;
524 if (node_online(nid))
527 for (j = 0; j < mi->nr_blks; j++) {
528 if (nid != mi->blk[j].nid)
530 start = min(mi->blk[j].start, start);
531 end = max(mi->blk[j].end, end);
535 setup_node_bootmem(nid, start, end);
542 #ifdef CONFIG_NUMA_EMU
544 static struct bootnode nodes[MAX_NUMNODES] __initdata;
545 static struct bootnode physnodes[MAX_NUMNODES] __initdata;
547 static int emu_nid_to_phys[MAX_NUMNODES] __cpuinitdata;
548 static char *emu_cmdline __initdata;
550 void __init numa_emu_cmdline(char *str)
555 int __init find_node_by_addr(unsigned long addr)
557 const struct numa_meminfo *mi = &numa_meminfo;
560 for (i = 0; i < mi->nr_blks; i++) {
562 * Find the real node that this emulated node appears on. For
563 * the sake of simplicity, we only use a real node's starting
564 * address to determine which emulated node it appears on.
566 if (addr >= mi->blk[i].start && addr < mi->blk[i].end)
567 return mi->blk[i].nid;
572 static int __init setup_physnodes(unsigned long start, unsigned long end)
574 const struct numa_meminfo *mi = &numa_meminfo;
578 memset(physnodes, 0, sizeof(physnodes));
580 for (i = 0; i < mi->nr_blks; i++) {
581 int nid = mi->blk[i].nid;
583 if (physnodes[nid].start == physnodes[nid].end) {
584 physnodes[nid].start = mi->blk[i].start;
585 physnodes[nid].end = mi->blk[i].end;
587 physnodes[nid].start = min(physnodes[nid].start,
589 physnodes[nid].end = max(physnodes[nid].end,
595 * Basic sanity checking on the physical node map: there may be errors
596 * if the SRAT or AMD code incorrectly reported the topology or the mem=
597 * kernel parameter is used.
599 for (i = 0; i < MAX_NUMNODES; i++) {
600 if (physnodes[i].start == physnodes[i].end)
602 if (physnodes[i].start > end) {
603 physnodes[i].end = physnodes[i].start;
606 if (physnodes[i].end < start) {
607 physnodes[i].start = physnodes[i].end;
610 if (physnodes[i].start < start)
611 physnodes[i].start = start;
612 if (physnodes[i].end > end)
613 physnodes[i].end = end;
618 * If no physical topology was detected, a single node is faked to cover
619 * the entire address space.
622 physnodes[ret].start = start;
623 physnodes[ret].end = end;
629 static void __init fake_physnodes(int acpi, int amd, int nr_nodes)
634 #ifdef CONFIG_ACPI_NUMA
636 acpi_fake_nodes(nodes, nr_nodes);
638 #ifdef CONFIG_AMD_NUMA
640 amd_fake_nodes(nodes, nr_nodes);
643 for (i = 0; i < nr_cpu_ids; i++)
648 * Setups up nid to range from addr to addr + size. If the end
649 * boundary is greater than max_addr, then max_addr is used instead.
650 * The return value is 0 if there is additional memory left for
651 * allocation past addr and -1 otherwise. addr is adjusted to be at
652 * the end of the node.
654 static int __init setup_node_range(int nid, int physnid,
655 u64 *addr, u64 size, u64 max_addr)
658 nodes[nid].start = *addr;
660 if (*addr >= max_addr) {
664 nodes[nid].end = *addr;
665 node_set(nid, node_possible_map);
667 if (emu_nid_to_phys[nid] == NUMA_NO_NODE)
668 emu_nid_to_phys[nid] = physnid;
670 printk(KERN_INFO "Faking node %d at %016Lx-%016Lx (%LuMB)\n", nid,
671 nodes[nid].start, nodes[nid].end,
672 (nodes[nid].end - nodes[nid].start) >> 20);
677 * Sets up nr_nodes fake nodes interleaved over physical nodes ranging from addr
678 * to max_addr. The return value is the number of nodes allocated.
680 static int __init split_nodes_interleave(u64 addr, u64 max_addr, int nr_nodes)
682 nodemask_t physnode_mask = NODE_MASK_NONE;
690 if (nr_nodes > MAX_NUMNODES) {
691 pr_info("numa=fake=%d too large, reducing to %d\n",
692 nr_nodes, MAX_NUMNODES);
693 nr_nodes = MAX_NUMNODES;
696 size = (max_addr - addr - memblock_x86_hole_size(addr, max_addr)) / nr_nodes;
698 * Calculate the number of big nodes that can be allocated as a result
699 * of consolidating the remainder.
701 big = ((size & ~FAKE_NODE_MIN_HASH_MASK) * nr_nodes) /
704 size &= FAKE_NODE_MIN_HASH_MASK;
706 pr_err("Not enough memory for each node. "
707 "NUMA emulation disabled.\n");
711 for (i = 0; i < MAX_NUMNODES; i++)
712 if (physnodes[i].start != physnodes[i].end)
713 node_set(i, physnode_mask);
716 * Continue to fill physical nodes with fake nodes until there is no
717 * memory left on any of them.
719 while (nodes_weight(physnode_mask)) {
720 for_each_node_mask(i, physnode_mask) {
721 u64 end = physnodes[i].start + size;
722 u64 dma32_end = PFN_PHYS(MAX_DMA32_PFN);
725 end += FAKE_NODE_MIN_SIZE;
728 * Continue to add memory to this fake node if its
729 * non-reserved memory is less than the per-node size.
731 while (end - physnodes[i].start -
732 memblock_x86_hole_size(physnodes[i].start, end) < size) {
733 end += FAKE_NODE_MIN_SIZE;
734 if (end > physnodes[i].end) {
735 end = physnodes[i].end;
741 * If there won't be at least FAKE_NODE_MIN_SIZE of
742 * non-reserved memory in ZONE_DMA32 for the next node,
743 * this one must extend to the boundary.
745 if (end < dma32_end && dma32_end - end -
746 memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
750 * If there won't be enough non-reserved memory for the
751 * next node, this one must extend to the end of the
754 if (physnodes[i].end - end -
755 memblock_x86_hole_size(end, physnodes[i].end) < size)
756 end = physnodes[i].end;
759 * Avoid allocating more nodes than requested, which can
760 * happen as a result of rounding down each node's size
761 * to FAKE_NODE_MIN_SIZE.
763 if (nodes_weight(physnode_mask) + ret >= nr_nodes)
764 end = physnodes[i].end;
766 if (setup_node_range(ret++, i, &physnodes[i].start,
767 end - physnodes[i].start,
768 physnodes[i].end) < 0)
769 node_clear(i, physnode_mask);
776 * Returns the end address of a node so that there is at least `size' amount of
777 * non-reserved memory or `max_addr' is reached.
779 static u64 __init find_end_of_node(u64 start, u64 max_addr, u64 size)
781 u64 end = start + size;
783 while (end - start - memblock_x86_hole_size(start, end) < size) {
784 end += FAKE_NODE_MIN_SIZE;
785 if (end > max_addr) {
794 * Sets up fake nodes of `size' interleaved over physical nodes ranging from
795 * `addr' to `max_addr'. The return value is the number of nodes allocated.
797 static int __init split_nodes_size_interleave(u64 addr, u64 max_addr, u64 size)
799 nodemask_t physnode_mask = NODE_MASK_NONE;
807 * The limit on emulated nodes is MAX_NUMNODES, so the size per node is
808 * increased accordingly if the requested size is too small. This
809 * creates a uniform distribution of node sizes across the entire
810 * machine (but not necessarily over physical nodes).
812 min_size = (max_addr - addr - memblock_x86_hole_size(addr, max_addr)) /
814 min_size = max(min_size, FAKE_NODE_MIN_SIZE);
815 if ((min_size & FAKE_NODE_MIN_HASH_MASK) < min_size)
816 min_size = (min_size + FAKE_NODE_MIN_SIZE) &
817 FAKE_NODE_MIN_HASH_MASK;
818 if (size < min_size) {
819 pr_err("Fake node size %LuMB too small, increasing to %LuMB\n",
820 size >> 20, min_size >> 20);
823 size &= FAKE_NODE_MIN_HASH_MASK;
825 for (i = 0; i < MAX_NUMNODES; i++)
826 if (physnodes[i].start != physnodes[i].end)
827 node_set(i, physnode_mask);
829 * Fill physical nodes with fake nodes of size until there is no memory
830 * left on any of them.
832 while (nodes_weight(physnode_mask)) {
833 for_each_node_mask(i, physnode_mask) {
834 u64 dma32_end = MAX_DMA32_PFN << PAGE_SHIFT;
837 end = find_end_of_node(physnodes[i].start,
838 physnodes[i].end, size);
840 * If there won't be at least FAKE_NODE_MIN_SIZE of
841 * non-reserved memory in ZONE_DMA32 for the next node,
842 * this one must extend to the boundary.
844 if (end < dma32_end && dma32_end - end -
845 memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
849 * If there won't be enough non-reserved memory for the
850 * next node, this one must extend to the end of the
853 if (physnodes[i].end - end -
854 memblock_x86_hole_size(end, physnodes[i].end) < size)
855 end = physnodes[i].end;
858 * Setup the fake node that will be allocated as bootmem
859 * later. If setup_node_range() returns non-zero, there
860 * is no more memory available on this physical node.
862 if (setup_node_range(ret++, i, &physnodes[i].start,
863 end - physnodes[i].start,
864 physnodes[i].end) < 0)
865 node_clear(i, physnode_mask);
872 * Sets up the system RAM area from start_pfn to last_pfn according to the
873 * numa=fake command-line option.
875 static int __init numa_emulation(int acpi, int amd)
877 static struct numa_meminfo ei __initdata;
878 const u64 max_addr = max_pfn << PAGE_SHIFT;
882 for (i = 0; i < MAX_NUMNODES; i++)
883 emu_nid_to_phys[i] = NUMA_NO_NODE;
886 * If the numa=fake command-line contains a 'M' or 'G', it represents
887 * the fixed node size. Otherwise, if it is just a single number N,
888 * split the system RAM into N fake nodes.
890 if (strchr(emu_cmdline, 'M') || strchr(emu_cmdline, 'G')) {
893 size = memparse(emu_cmdline, &emu_cmdline);
894 num_nodes = split_nodes_size_interleave(0, max_addr, size);
898 n = simple_strtoul(emu_cmdline, NULL, 0);
899 num_nodes = split_nodes_interleave(0, max_addr, n);
905 /* make sure all emulated nodes are mapped to a physical node */
906 for (i = 0; i < ARRAY_SIZE(emu_nid_to_phys); i++)
907 if (emu_nid_to_phys[i] == NUMA_NO_NODE)
908 emu_nid_to_phys[i] = 0;
910 ei.nr_blks = num_nodes;
911 for (i = 0; i < ei.nr_blks; i++) {
912 ei.blk[i].start = nodes[i].start;
913 ei.blk[i].end = nodes[i].end;
917 memnode_shift = compute_hash_shift(&ei);
918 if (memnode_shift < 0) {
920 printk(KERN_ERR "No NUMA hash function found. NUMA emulation "
926 * We need to vacate all active ranges that may have been registered for
927 * the e820 memory map.
929 remove_all_active_ranges();
930 for_each_node_mask(i, node_possible_map)
931 memblock_x86_register_active_regions(i, nodes[i].start >> PAGE_SHIFT,
932 nodes[i].end >> PAGE_SHIFT);
933 init_memory_mapping_high();
934 for_each_node_mask(i, node_possible_map)
935 setup_node_bootmem(i, nodes[i].start, nodes[i].end);
936 fake_physnodes(acpi, amd, num_nodes);
938 numa_emu_dist = true;
941 #endif /* CONFIG_NUMA_EMU */
943 static int dummy_numa_init(void)
945 printk(KERN_INFO "%s\n",
946 numa_off ? "NUMA turned off" : "No NUMA configuration found");
947 printk(KERN_INFO "Faking a node at %016lx-%016lx\n",
948 0LU, max_pfn << PAGE_SHIFT);
950 node_set(0, numa_nodes_parsed);
951 numa_add_memblk(0, 0, (u64)max_pfn << PAGE_SHIFT);
956 void __init initmem_init(void)
958 int (*numa_init[])(void) = { [2] = dummy_numa_init };
962 #ifdef CONFIG_ACPI_NUMA
963 numa_init[0] = x86_acpi_numa_init;
965 #ifdef CONFIG_AMD_NUMA
966 numa_init[1] = amd_numa_init;
970 for (i = 0; i < ARRAY_SIZE(numa_init); i++) {
974 for (j = 0; j < MAX_LOCAL_APIC; j++)
975 set_apicid_to_node(j, NUMA_NO_NODE);
977 nodes_clear(numa_nodes_parsed);
978 nodes_clear(node_possible_map);
979 nodes_clear(node_online_map);
980 memset(&numa_meminfo, 0, sizeof(numa_meminfo));
981 remove_all_active_ranges();
982 numa_reset_distance();
984 if (numa_init[i]() < 0)
987 if (numa_cleanup_meminfo(&numa_meminfo) < 0)
989 #ifdef CONFIG_NUMA_EMU
990 setup_physnodes(0, max_pfn << PAGE_SHIFT);
991 if (emu_cmdline && !numa_emulation(i == 0, i == 1))
994 /* not emulating, build identity mapping for numa_add_cpu() */
995 for (j = 0; j < ARRAY_SIZE(emu_nid_to_phys); j++)
996 emu_nid_to_phys[j] = j;
998 nodes_clear(node_possible_map);
999 nodes_clear(node_online_map);
1001 if (numa_register_memblks(&numa_meminfo) < 0)
1004 for (j = 0; j < nr_cpu_ids; j++) {
1005 int nid = early_cpu_to_node(j);
1007 if (nid == NUMA_NO_NODE)
1009 if (!node_online(nid))
1018 unsigned long __init numa_free_all_bootmem(void)
1020 unsigned long pages = 0;
1023 for_each_online_node(i)
1024 pages += free_all_bootmem_node(NODE_DATA(i));
1026 pages += free_all_memory_core_early(MAX_NUMNODES);
1031 int __cpuinit numa_cpu_node(int cpu)
1033 int apicid = early_per_cpu(x86_cpu_to_apicid, cpu);
1035 if (apicid != BAD_APICID)
1036 return __apicid_to_node[apicid];
1037 return NUMA_NO_NODE;
1041 * UGLINESS AHEAD: Currently, CONFIG_NUMA_EMU is 64bit only and makes use
1042 * of 64bit specific data structures. The distinction is artificial and
1043 * should be removed. numa_{add|remove}_cpu() are implemented in numa.c
1044 * for both 32 and 64bit when CONFIG_NUMA_EMU is disabled but here when
1047 * NUMA emulation is planned to be made generic and the following and other
1048 * related code should be moved to numa.c.
1050 #ifdef CONFIG_NUMA_EMU
1051 # ifndef CONFIG_DEBUG_PER_CPU_MAPS
1052 void __cpuinit numa_add_cpu(int cpu)
1056 nid = numa_cpu_node(cpu);
1057 if (nid == NUMA_NO_NODE)
1058 nid = early_cpu_to_node(cpu);
1059 BUG_ON(nid == NUMA_NO_NODE || !node_online(nid));
1061 physnid = emu_nid_to_phys[nid];
1064 * Map the cpu to each emulated node that is allocated on the physical
1065 * node of the cpu's apic id.
1067 for_each_online_node(nid)
1068 if (emu_nid_to_phys[nid] == physnid)
1069 cpumask_set_cpu(cpu, node_to_cpumask_map[nid]);
1072 void __cpuinit numa_remove_cpu(int cpu)
1076 for_each_online_node(i)
1077 cpumask_clear_cpu(cpu, node_to_cpumask_map[i]);
1079 # else /* !CONFIG_DEBUG_PER_CPU_MAPS */
1080 static void __cpuinit numa_set_cpumask(int cpu, int enable)
1082 struct cpumask *mask;
1083 int nid, physnid, i;
1085 nid = early_cpu_to_node(cpu);
1086 if (nid == NUMA_NO_NODE) {
1087 /* early_cpu_to_node() already emits a warning and trace */
1091 physnid = emu_nid_to_phys[nid];
1093 for_each_online_node(i) {
1094 if (emu_nid_to_phys[nid] != physnid)
1097 mask = debug_cpumask_set_cpu(cpu, enable);
1102 cpumask_set_cpu(cpu, mask);
1104 cpumask_clear_cpu(cpu, mask);
1108 void __cpuinit numa_add_cpu(int cpu)
1110 numa_set_cpumask(cpu, 1);
1113 void __cpuinit numa_remove_cpu(int cpu)
1115 numa_set_cpumask(cpu, 0);
1117 # endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
1118 #endif /* CONFIG_NUMA_EMU */