]> Pileus Git - ~andy/linux/blob - arch/powerpc/mm/numa.c
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
[~andy/linux] / arch / powerpc / mm / numa.c
1 /*
2  * pSeries NUMA support
3  *
4  * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 #include <linux/threads.h>
12 #include <linux/bootmem.h>
13 #include <linux/init.h>
14 #include <linux/mm.h>
15 #include <linux/mmzone.h>
16 #include <linux/export.h>
17 #include <linux/nodemask.h>
18 #include <linux/cpu.h>
19 #include <linux/notifier.h>
20 #include <linux/memblock.h>
21 #include <linux/of.h>
22 #include <linux/pfn.h>
23 #include <linux/cpuset.h>
24 #include <linux/node.h>
25 #include <linux/stop_machine.h>
26 #include <linux/proc_fs.h>
27 #include <linux/seq_file.h>
28 #include <linux/uaccess.h>
29 #include <linux/slab.h>
30 #include <asm/cputhreads.h>
31 #include <asm/sparsemem.h>
32 #include <asm/prom.h>
33 #include <asm/smp.h>
34 #include <asm/cputhreads.h>
35 #include <asm/topology.h>
36 #include <asm/firmware.h>
37 #include <asm/paca.h>
38 #include <asm/hvcall.h>
39 #include <asm/setup.h>
40 #include <asm/vdso.h>
41
42 static int numa_enabled = 1;
43
44 static char *cmdline __initdata;
45
46 static int numa_debug;
47 #define dbg(args...) if (numa_debug) { printk(KERN_INFO args); }
48
49 int numa_cpu_lookup_table[NR_CPUS];
50 cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
51 struct pglist_data *node_data[MAX_NUMNODES];
52
53 EXPORT_SYMBOL(numa_cpu_lookup_table);
54 EXPORT_SYMBOL(node_to_cpumask_map);
55 EXPORT_SYMBOL(node_data);
56
57 static int min_common_depth;
58 static int n_mem_addr_cells, n_mem_size_cells;
59 static int form1_affinity;
60
61 #define MAX_DISTANCE_REF_POINTS 4
62 static int distance_ref_points_depth;
63 static const __be32 *distance_ref_points;
64 static int distance_lookup_table[MAX_NUMNODES][MAX_DISTANCE_REF_POINTS];
65
66 /*
67  * Allocate node_to_cpumask_map based on number of available nodes
68  * Requires node_possible_map to be valid.
69  *
70  * Note: cpumask_of_node() is not valid until after this is done.
71  */
72 static void __init setup_node_to_cpumask_map(void)
73 {
74         unsigned int node;
75
76         /* setup nr_node_ids if not done yet */
77         if (nr_node_ids == MAX_NUMNODES)
78                 setup_nr_node_ids();
79
80         /* allocate the map */
81         for (node = 0; node < nr_node_ids; node++)
82                 alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
83
84         /* cpumask_of_node() will now work */
85         dbg("Node to cpumask map for %d nodes\n", nr_node_ids);
86 }
87
88 static int __init fake_numa_create_new_node(unsigned long end_pfn,
89                                                 unsigned int *nid)
90 {
91         unsigned long long mem;
92         char *p = cmdline;
93         static unsigned int fake_nid;
94         static unsigned long long curr_boundary;
95
96         /*
97          * Modify node id, iff we started creating NUMA nodes
98          * We want to continue from where we left of the last time
99          */
100         if (fake_nid)
101                 *nid = fake_nid;
102         /*
103          * In case there are no more arguments to parse, the
104          * node_id should be the same as the last fake node id
105          * (we've handled this above).
106          */
107         if (!p)
108                 return 0;
109
110         mem = memparse(p, &p);
111         if (!mem)
112                 return 0;
113
114         if (mem < curr_boundary)
115                 return 0;
116
117         curr_boundary = mem;
118
119         if ((end_pfn << PAGE_SHIFT) > mem) {
120                 /*
121                  * Skip commas and spaces
122                  */
123                 while (*p == ',' || *p == ' ' || *p == '\t')
124                         p++;
125
126                 cmdline = p;
127                 fake_nid++;
128                 *nid = fake_nid;
129                 dbg("created new fake_node with id %d\n", fake_nid);
130                 return 1;
131         }
132         return 0;
133 }
134
135 /*
136  * get_node_active_region - Return active region containing pfn
137  * Active range returned is empty if none found.
138  * @pfn: The page to return the region for
139  * @node_ar: Returned set to the active region containing @pfn
140  */
141 static void __init get_node_active_region(unsigned long pfn,
142                                           struct node_active_region *node_ar)
143 {
144         unsigned long start_pfn, end_pfn;
145         int i, nid;
146
147         for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
148                 if (pfn >= start_pfn && pfn < end_pfn) {
149                         node_ar->nid = nid;
150                         node_ar->start_pfn = start_pfn;
151                         node_ar->end_pfn = end_pfn;
152                         break;
153                 }
154         }
155 }
156
157 static void reset_numa_cpu_lookup_table(void)
158 {
159         unsigned int cpu;
160
161         for_each_possible_cpu(cpu)
162                 numa_cpu_lookup_table[cpu] = -1;
163 }
164
165 static void update_numa_cpu_lookup_table(unsigned int cpu, int node)
166 {
167         numa_cpu_lookup_table[cpu] = node;
168 }
169
170 static void map_cpu_to_node(int cpu, int node)
171 {
172         update_numa_cpu_lookup_table(cpu, node);
173
174         dbg("adding cpu %d to node %d\n", cpu, node);
175
176         if (!(cpumask_test_cpu(cpu, node_to_cpumask_map[node])))
177                 cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
178 }
179
180 #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PPC_SPLPAR)
181 static void unmap_cpu_from_node(unsigned long cpu)
182 {
183         int node = numa_cpu_lookup_table[cpu];
184
185         dbg("removing cpu %lu from node %d\n", cpu, node);
186
187         if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) {
188                 cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
189         } else {
190                 printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n",
191                        cpu, node);
192         }
193 }
194 #endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */
195
196 /* must hold reference to node during call */
197 static const __be32 *of_get_associativity(struct device_node *dev)
198 {
199         return of_get_property(dev, "ibm,associativity", NULL);
200 }
201
202 /*
203  * Returns the property linux,drconf-usable-memory if
204  * it exists (the property exists only in kexec/kdump kernels,
205  * added by kexec-tools)
206  */
207 static const __be32 *of_get_usable_memory(struct device_node *memory)
208 {
209         const __be32 *prop;
210         u32 len;
211         prop = of_get_property(memory, "linux,drconf-usable-memory", &len);
212         if (!prop || len < sizeof(unsigned int))
213                 return NULL;
214         return prop;
215 }
216
217 int __node_distance(int a, int b)
218 {
219         int i;
220         int distance = LOCAL_DISTANCE;
221
222         if (!form1_affinity)
223                 return ((a == b) ? LOCAL_DISTANCE : REMOTE_DISTANCE);
224
225         for (i = 0; i < distance_ref_points_depth; i++) {
226                 if (distance_lookup_table[a][i] == distance_lookup_table[b][i])
227                         break;
228
229                 /* Double the distance for each NUMA level */
230                 distance *= 2;
231         }
232
233         return distance;
234 }
235
236 static void initialize_distance_lookup_table(int nid,
237                 const __be32 *associativity)
238 {
239         int i;
240
241         if (!form1_affinity)
242                 return;
243
244         for (i = 0; i < distance_ref_points_depth; i++) {
245                 const __be32 *entry;
246
247                 entry = &associativity[be32_to_cpu(distance_ref_points[i])];
248                 distance_lookup_table[nid][i] = of_read_number(entry, 1);
249         }
250 }
251
252 /* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa
253  * info is found.
254  */
255 static int associativity_to_nid(const __be32 *associativity)
256 {
257         int nid = -1;
258
259         if (min_common_depth == -1)
260                 goto out;
261
262         if (of_read_number(associativity, 1) >= min_common_depth)
263                 nid = of_read_number(&associativity[min_common_depth], 1);
264
265         /* POWER4 LPAR uses 0xffff as invalid node */
266         if (nid == 0xffff || nid >= MAX_NUMNODES)
267                 nid = -1;
268
269         if (nid > 0 &&
270             of_read_number(associativity, 1) >= distance_ref_points_depth)
271                 initialize_distance_lookup_table(nid, associativity);
272
273 out:
274         return nid;
275 }
276
277 /* Returns the nid associated with the given device tree node,
278  * or -1 if not found.
279  */
280 static int of_node_to_nid_single(struct device_node *device)
281 {
282         int nid = -1;
283         const __be32 *tmp;
284
285         tmp = of_get_associativity(device);
286         if (tmp)
287                 nid = associativity_to_nid(tmp);
288         return nid;
289 }
290
291 /* Walk the device tree upwards, looking for an associativity id */
292 int of_node_to_nid(struct device_node *device)
293 {
294         struct device_node *tmp;
295         int nid = -1;
296
297         of_node_get(device);
298         while (device) {
299                 nid = of_node_to_nid_single(device);
300                 if (nid != -1)
301                         break;
302
303                 tmp = device;
304                 device = of_get_parent(tmp);
305                 of_node_put(tmp);
306         }
307         of_node_put(device);
308
309         return nid;
310 }
311 EXPORT_SYMBOL_GPL(of_node_to_nid);
312
313 static int __init find_min_common_depth(void)
314 {
315         int depth;
316         struct device_node *root;
317
318         if (firmware_has_feature(FW_FEATURE_OPAL))
319                 root = of_find_node_by_path("/ibm,opal");
320         else
321                 root = of_find_node_by_path("/rtas");
322         if (!root)
323                 root = of_find_node_by_path("/");
324
325         /*
326          * This property is a set of 32-bit integers, each representing
327          * an index into the ibm,associativity nodes.
328          *
329          * With form 0 affinity the first integer is for an SMP configuration
330          * (should be all 0's) and the second is for a normal NUMA
331          * configuration. We have only one level of NUMA.
332          *
333          * With form 1 affinity the first integer is the most significant
334          * NUMA boundary and the following are progressively less significant
335          * boundaries. There can be more than one level of NUMA.
336          */
337         distance_ref_points = of_get_property(root,
338                                         "ibm,associativity-reference-points",
339                                         &distance_ref_points_depth);
340
341         if (!distance_ref_points) {
342                 dbg("NUMA: ibm,associativity-reference-points not found.\n");
343                 goto err;
344         }
345
346         distance_ref_points_depth /= sizeof(int);
347
348         if (firmware_has_feature(FW_FEATURE_OPAL) ||
349             firmware_has_feature(FW_FEATURE_TYPE1_AFFINITY)) {
350                 dbg("Using form 1 affinity\n");
351                 form1_affinity = 1;
352         }
353
354         if (form1_affinity) {
355                 depth = of_read_number(distance_ref_points, 1);
356         } else {
357                 if (distance_ref_points_depth < 2) {
358                         printk(KERN_WARNING "NUMA: "
359                                 "short ibm,associativity-reference-points\n");
360                         goto err;
361                 }
362
363                 depth = of_read_number(&distance_ref_points[1], 1);
364         }
365
366         /*
367          * Warn and cap if the hardware supports more than
368          * MAX_DISTANCE_REF_POINTS domains.
369          */
370         if (distance_ref_points_depth > MAX_DISTANCE_REF_POINTS) {
371                 printk(KERN_WARNING "NUMA: distance array capped at "
372                         "%d entries\n", MAX_DISTANCE_REF_POINTS);
373                 distance_ref_points_depth = MAX_DISTANCE_REF_POINTS;
374         }
375
376         of_node_put(root);
377         return depth;
378
379 err:
380         of_node_put(root);
381         return -1;
382 }
383
384 static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells)
385 {
386         struct device_node *memory = NULL;
387
388         memory = of_find_node_by_type(memory, "memory");
389         if (!memory)
390                 panic("numa.c: No memory nodes found!");
391
392         *n_addr_cells = of_n_addr_cells(memory);
393         *n_size_cells = of_n_size_cells(memory);
394         of_node_put(memory);
395 }
396
397 static unsigned long read_n_cells(int n, const __be32 **buf)
398 {
399         unsigned long result = 0;
400
401         while (n--) {
402                 result = (result << 32) | of_read_number(*buf, 1);
403                 (*buf)++;
404         }
405         return result;
406 }
407
408 /*
409  * Read the next memblock list entry from the ibm,dynamic-memory property
410  * and return the information in the provided of_drconf_cell structure.
411  */
412 static void read_drconf_cell(struct of_drconf_cell *drmem, const __be32 **cellp)
413 {
414         const __be32 *cp;
415
416         drmem->base_addr = read_n_cells(n_mem_addr_cells, cellp);
417
418         cp = *cellp;
419         drmem->drc_index = of_read_number(cp, 1);
420         drmem->reserved = of_read_number(&cp[1], 1);
421         drmem->aa_index = of_read_number(&cp[2], 1);
422         drmem->flags = of_read_number(&cp[3], 1);
423
424         *cellp = cp + 4;
425 }
426
427 /*
428  * Retrieve and validate the ibm,dynamic-memory property of the device tree.
429  *
430  * The layout of the ibm,dynamic-memory property is a number N of memblock
431  * list entries followed by N memblock list entries.  Each memblock list entry
432  * contains information as laid out in the of_drconf_cell struct above.
433  */
434 static int of_get_drconf_memory(struct device_node *memory, const __be32 **dm)
435 {
436         const __be32 *prop;
437         u32 len, entries;
438
439         prop = of_get_property(memory, "ibm,dynamic-memory", &len);
440         if (!prop || len < sizeof(unsigned int))
441                 return 0;
442
443         entries = of_read_number(prop++, 1);
444
445         /* Now that we know the number of entries, revalidate the size
446          * of the property read in to ensure we have everything
447          */
448         if (len < (entries * (n_mem_addr_cells + 4) + 1) * sizeof(unsigned int))
449                 return 0;
450
451         *dm = prop;
452         return entries;
453 }
454
455 /*
456  * Retrieve and validate the ibm,lmb-size property for drconf memory
457  * from the device tree.
458  */
459 static u64 of_get_lmb_size(struct device_node *memory)
460 {
461         const __be32 *prop;
462         u32 len;
463
464         prop = of_get_property(memory, "ibm,lmb-size", &len);
465         if (!prop || len < sizeof(unsigned int))
466                 return 0;
467
468         return read_n_cells(n_mem_size_cells, &prop);
469 }
470
471 struct assoc_arrays {
472         u32     n_arrays;
473         u32     array_sz;
474         const __be32 *arrays;
475 };
476
477 /*
478  * Retrieve and validate the list of associativity arrays for drconf
479  * memory from the ibm,associativity-lookup-arrays property of the
480  * device tree..
481  *
482  * The layout of the ibm,associativity-lookup-arrays property is a number N
483  * indicating the number of associativity arrays, followed by a number M
484  * indicating the size of each associativity array, followed by a list
485  * of N associativity arrays.
486  */
487 static int of_get_assoc_arrays(struct device_node *memory,
488                                struct assoc_arrays *aa)
489 {
490         const __be32 *prop;
491         u32 len;
492
493         prop = of_get_property(memory, "ibm,associativity-lookup-arrays", &len);
494         if (!prop || len < 2 * sizeof(unsigned int))
495                 return -1;
496
497         aa->n_arrays = of_read_number(prop++, 1);
498         aa->array_sz = of_read_number(prop++, 1);
499
500         /* Now that we know the number of arrays and size of each array,
501          * revalidate the size of the property read in.
502          */
503         if (len < (aa->n_arrays * aa->array_sz + 2) * sizeof(unsigned int))
504                 return -1;
505
506         aa->arrays = prop;
507         return 0;
508 }
509
510 /*
511  * This is like of_node_to_nid_single() for memory represented in the
512  * ibm,dynamic-reconfiguration-memory node.
513  */
514 static int of_drconf_to_nid_single(struct of_drconf_cell *drmem,
515                                    struct assoc_arrays *aa)
516 {
517         int default_nid = 0;
518         int nid = default_nid;
519         int index;
520
521         if (min_common_depth > 0 && min_common_depth <= aa->array_sz &&
522             !(drmem->flags & DRCONF_MEM_AI_INVALID) &&
523             drmem->aa_index < aa->n_arrays) {
524                 index = drmem->aa_index * aa->array_sz + min_common_depth - 1;
525                 nid = of_read_number(&aa->arrays[index], 1);
526
527                 if (nid == 0xffff || nid >= MAX_NUMNODES)
528                         nid = default_nid;
529         }
530
531         return nid;
532 }
533
534 /*
535  * Figure out to which domain a cpu belongs and stick it there.
536  * Return the id of the domain used.
537  */
538 static int numa_setup_cpu(unsigned long lcpu)
539 {
540         int nid;
541         struct device_node *cpu;
542
543         /*
544          * If a valid cpu-to-node mapping is already available, use it
545          * directly instead of querying the firmware, since it represents
546          * the most recent mapping notified to us by the platform (eg: VPHN).
547          */
548         if ((nid = numa_cpu_lookup_table[lcpu]) >= 0) {
549                 map_cpu_to_node(lcpu, nid);
550                 return nid;
551         }
552
553         cpu = of_get_cpu_node(lcpu, NULL);
554
555         if (!cpu) {
556                 WARN_ON(1);
557                 nid = 0;
558                 goto out;
559         }
560
561         nid = of_node_to_nid_single(cpu);
562
563         if (nid < 0 || !node_online(nid))
564                 nid = first_online_node;
565 out:
566         map_cpu_to_node(lcpu, nid);
567
568         of_node_put(cpu);
569
570         return nid;
571 }
572
573 static void verify_cpu_node_mapping(int cpu, int node)
574 {
575         int base, sibling, i;
576
577         /* Verify that all the threads in the core belong to the same node */
578         base = cpu_first_thread_sibling(cpu);
579
580         for (i = 0; i < threads_per_core; i++) {
581                 sibling = base + i;
582
583                 if (sibling == cpu || cpu_is_offline(sibling))
584                         continue;
585
586                 if (cpu_to_node(sibling) != node) {
587                         WARN(1, "CPU thread siblings %d and %d don't belong"
588                                 " to the same node!\n", cpu, sibling);
589                         break;
590                 }
591         }
592 }
593
594 static int cpu_numa_callback(struct notifier_block *nfb, unsigned long action,
595                              void *hcpu)
596 {
597         unsigned long lcpu = (unsigned long)hcpu;
598         int ret = NOTIFY_DONE, nid;
599
600         switch (action) {
601         case CPU_UP_PREPARE:
602         case CPU_UP_PREPARE_FROZEN:
603                 nid = numa_setup_cpu(lcpu);
604                 verify_cpu_node_mapping((int)lcpu, nid);
605                 ret = NOTIFY_OK;
606                 break;
607 #ifdef CONFIG_HOTPLUG_CPU
608         case CPU_DEAD:
609         case CPU_DEAD_FROZEN:
610         case CPU_UP_CANCELED:
611         case CPU_UP_CANCELED_FROZEN:
612                 unmap_cpu_from_node(lcpu);
613                 break;
614                 ret = NOTIFY_OK;
615 #endif
616         }
617         return ret;
618 }
619
620 /*
621  * Check and possibly modify a memory region to enforce the memory limit.
622  *
623  * Returns the size the region should have to enforce the memory limit.
624  * This will either be the original value of size, a truncated value,
625  * or zero. If the returned value of size is 0 the region should be
626  * discarded as it lies wholly above the memory limit.
627  */
628 static unsigned long __init numa_enforce_memory_limit(unsigned long start,
629                                                       unsigned long size)
630 {
631         /*
632          * We use memblock_end_of_DRAM() in here instead of memory_limit because
633          * we've already adjusted it for the limit and it takes care of
634          * having memory holes below the limit.  Also, in the case of
635          * iommu_is_off, memory_limit is not set but is implicitly enforced.
636          */
637
638         if (start + size <= memblock_end_of_DRAM())
639                 return size;
640
641         if (start >= memblock_end_of_DRAM())
642                 return 0;
643
644         return memblock_end_of_DRAM() - start;
645 }
646
647 /*
648  * Reads the counter for a given entry in
649  * linux,drconf-usable-memory property
650  */
651 static inline int __init read_usm_ranges(const __be32 **usm)
652 {
653         /*
654          * For each lmb in ibm,dynamic-memory a corresponding
655          * entry in linux,drconf-usable-memory property contains
656          * a counter followed by that many (base, size) duple.
657          * read the counter from linux,drconf-usable-memory
658          */
659         return read_n_cells(n_mem_size_cells, usm);
660 }
661
662 /*
663  * Extract NUMA information from the ibm,dynamic-reconfiguration-memory
664  * node.  This assumes n_mem_{addr,size}_cells have been set.
665  */
666 static void __init parse_drconf_memory(struct device_node *memory)
667 {
668         const __be32 *uninitialized_var(dm), *usm;
669         unsigned int n, rc, ranges, is_kexec_kdump = 0;
670         unsigned long lmb_size, base, size, sz;
671         int nid;
672         struct assoc_arrays aa = { .arrays = NULL };
673
674         n = of_get_drconf_memory(memory, &dm);
675         if (!n)
676                 return;
677
678         lmb_size = of_get_lmb_size(memory);
679         if (!lmb_size)
680                 return;
681
682         rc = of_get_assoc_arrays(memory, &aa);
683         if (rc)
684                 return;
685
686         /* check if this is a kexec/kdump kernel */
687         usm = of_get_usable_memory(memory);
688         if (usm != NULL)
689                 is_kexec_kdump = 1;
690
691         for (; n != 0; --n) {
692                 struct of_drconf_cell drmem;
693
694                 read_drconf_cell(&drmem, &dm);
695
696                 /* skip this block if the reserved bit is set in flags (0x80)
697                    or if the block is not assigned to this partition (0x8) */
698                 if ((drmem.flags & DRCONF_MEM_RESERVED)
699                     || !(drmem.flags & DRCONF_MEM_ASSIGNED))
700                         continue;
701
702                 base = drmem.base_addr;
703                 size = lmb_size;
704                 ranges = 1;
705
706                 if (is_kexec_kdump) {
707                         ranges = read_usm_ranges(&usm);
708                         if (!ranges) /* there are no (base, size) duple */
709                                 continue;
710                 }
711                 do {
712                         if (is_kexec_kdump) {
713                                 base = read_n_cells(n_mem_addr_cells, &usm);
714                                 size = read_n_cells(n_mem_size_cells, &usm);
715                         }
716                         nid = of_drconf_to_nid_single(&drmem, &aa);
717                         fake_numa_create_new_node(
718                                 ((base + size) >> PAGE_SHIFT),
719                                            &nid);
720                         node_set_online(nid);
721                         sz = numa_enforce_memory_limit(base, size);
722                         if (sz)
723                                 memblock_set_node(base, sz,
724                                                   &memblock.memory, nid);
725                 } while (--ranges);
726         }
727 }
728
729 static int __init parse_numa_properties(void)
730 {
731         struct device_node *memory;
732         int default_nid = 0;
733         unsigned long i;
734
735         if (numa_enabled == 0) {
736                 printk(KERN_WARNING "NUMA disabled by user\n");
737                 return -1;
738         }
739
740         min_common_depth = find_min_common_depth();
741
742         if (min_common_depth < 0)
743                 return min_common_depth;
744
745         dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth);
746
747         /*
748          * Even though we connect cpus to numa domains later in SMP
749          * init, we need to know the node ids now. This is because
750          * each node to be onlined must have NODE_DATA etc backing it.
751          */
752         for_each_present_cpu(i) {
753                 struct device_node *cpu;
754                 int nid;
755
756                 cpu = of_get_cpu_node(i, NULL);
757                 BUG_ON(!cpu);
758                 nid = of_node_to_nid_single(cpu);
759                 of_node_put(cpu);
760
761                 /*
762                  * Don't fall back to default_nid yet -- we will plug
763                  * cpus into nodes once the memory scan has discovered
764                  * the topology.
765                  */
766                 if (nid < 0)
767                         continue;
768                 node_set_online(nid);
769         }
770
771         get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells);
772
773         for_each_node_by_type(memory, "memory") {
774                 unsigned long start;
775                 unsigned long size;
776                 int nid;
777                 int ranges;
778                 const __be32 *memcell_buf;
779                 unsigned int len;
780
781                 memcell_buf = of_get_property(memory,
782                         "linux,usable-memory", &len);
783                 if (!memcell_buf || len <= 0)
784                         memcell_buf = of_get_property(memory, "reg", &len);
785                 if (!memcell_buf || len <= 0)
786                         continue;
787
788                 /* ranges in cell */
789                 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
790 new_range:
791                 /* these are order-sensitive, and modify the buffer pointer */
792                 start = read_n_cells(n_mem_addr_cells, &memcell_buf);
793                 size = read_n_cells(n_mem_size_cells, &memcell_buf);
794
795                 /*
796                  * Assumption: either all memory nodes or none will
797                  * have associativity properties.  If none, then
798                  * everything goes to default_nid.
799                  */
800                 nid = of_node_to_nid_single(memory);
801                 if (nid < 0)
802                         nid = default_nid;
803
804                 fake_numa_create_new_node(((start + size) >> PAGE_SHIFT), &nid);
805                 node_set_online(nid);
806
807                 if (!(size = numa_enforce_memory_limit(start, size))) {
808                         if (--ranges)
809                                 goto new_range;
810                         else
811                                 continue;
812                 }
813
814                 memblock_set_node(start, size, &memblock.memory, nid);
815
816                 if (--ranges)
817                         goto new_range;
818         }
819
820         /*
821          * Now do the same thing for each MEMBLOCK listed in the
822          * ibm,dynamic-memory property in the
823          * ibm,dynamic-reconfiguration-memory node.
824          */
825         memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
826         if (memory)
827                 parse_drconf_memory(memory);
828
829         return 0;
830 }
831
832 static void __init setup_nonnuma(void)
833 {
834         unsigned long top_of_ram = memblock_end_of_DRAM();
835         unsigned long total_ram = memblock_phys_mem_size();
836         unsigned long start_pfn, end_pfn;
837         unsigned int nid = 0;
838         struct memblock_region *reg;
839
840         printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
841                top_of_ram, total_ram);
842         printk(KERN_DEBUG "Memory hole size: %ldMB\n",
843                (top_of_ram - total_ram) >> 20);
844
845         for_each_memblock(memory, reg) {
846                 start_pfn = memblock_region_memory_base_pfn(reg);
847                 end_pfn = memblock_region_memory_end_pfn(reg);
848
849                 fake_numa_create_new_node(end_pfn, &nid);
850                 memblock_set_node(PFN_PHYS(start_pfn),
851                                   PFN_PHYS(end_pfn - start_pfn),
852                                   &memblock.memory, nid);
853                 node_set_online(nid);
854         }
855 }
856
857 void __init dump_numa_cpu_topology(void)
858 {
859         unsigned int node;
860         unsigned int cpu, count;
861
862         if (min_common_depth == -1 || !numa_enabled)
863                 return;
864
865         for_each_online_node(node) {
866                 printk(KERN_DEBUG "Node %d CPUs:", node);
867
868                 count = 0;
869                 /*
870                  * If we used a CPU iterator here we would miss printing
871                  * the holes in the cpumap.
872                  */
873                 for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
874                         if (cpumask_test_cpu(cpu,
875                                         node_to_cpumask_map[node])) {
876                                 if (count == 0)
877                                         printk(" %u", cpu);
878                                 ++count;
879                         } else {
880                                 if (count > 1)
881                                         printk("-%u", cpu - 1);
882                                 count = 0;
883                         }
884                 }
885
886                 if (count > 1)
887                         printk("-%u", nr_cpu_ids - 1);
888                 printk("\n");
889         }
890 }
891
892 static void __init dump_numa_memory_topology(void)
893 {
894         unsigned int node;
895         unsigned int count;
896
897         if (min_common_depth == -1 || !numa_enabled)
898                 return;
899
900         for_each_online_node(node) {
901                 unsigned long i;
902
903                 printk(KERN_DEBUG "Node %d Memory:", node);
904
905                 count = 0;
906
907                 for (i = 0; i < memblock_end_of_DRAM();
908                      i += (1 << SECTION_SIZE_BITS)) {
909                         if (early_pfn_to_nid(i >> PAGE_SHIFT) == node) {
910                                 if (count == 0)
911                                         printk(" 0x%lx", i);
912                                 ++count;
913                         } else {
914                                 if (count > 0)
915                                         printk("-0x%lx", i);
916                                 count = 0;
917                         }
918                 }
919
920                 if (count > 0)
921                         printk("-0x%lx", i);
922                 printk("\n");
923         }
924 }
925
926 /*
927  * Allocate some memory, satisfying the memblock or bootmem allocator where
928  * required. nid is the preferred node and end is the physical address of
929  * the highest address in the node.
930  *
931  * Returns the virtual address of the memory.
932  */
933 static void __init *careful_zallocation(int nid, unsigned long size,
934                                        unsigned long align,
935                                        unsigned long end_pfn)
936 {
937         void *ret;
938         int new_nid;
939         unsigned long ret_paddr;
940
941         ret_paddr = __memblock_alloc_base(size, align, end_pfn << PAGE_SHIFT);
942
943         /* retry over all memory */
944         if (!ret_paddr)
945                 ret_paddr = __memblock_alloc_base(size, align, memblock_end_of_DRAM());
946
947         if (!ret_paddr)
948                 panic("numa.c: cannot allocate %lu bytes for node %d",
949                       size, nid);
950
951         ret = __va(ret_paddr);
952
953         /*
954          * We initialize the nodes in numeric order: 0, 1, 2...
955          * and hand over control from the MEMBLOCK allocator to the
956          * bootmem allocator.  If this function is called for
957          * node 5, then we know that all nodes <5 are using the
958          * bootmem allocator instead of the MEMBLOCK allocator.
959          *
960          * So, check the nid from which this allocation came
961          * and double check to see if we need to use bootmem
962          * instead of the MEMBLOCK.  We don't free the MEMBLOCK memory
963          * since it would be useless.
964          */
965         new_nid = early_pfn_to_nid(ret_paddr >> PAGE_SHIFT);
966         if (new_nid < nid) {
967                 ret = __alloc_bootmem_node(NODE_DATA(new_nid),
968                                 size, align, 0);
969
970                 dbg("alloc_bootmem %p %lx\n", ret, size);
971         }
972
973         memset(ret, 0, size);
974         return ret;
975 }
976
977 static struct notifier_block ppc64_numa_nb = {
978         .notifier_call = cpu_numa_callback,
979         .priority = 1 /* Must run before sched domains notifier. */
980 };
981
982 static void __init mark_reserved_regions_for_nid(int nid)
983 {
984         struct pglist_data *node = NODE_DATA(nid);
985         struct memblock_region *reg;
986
987         for_each_memblock(reserved, reg) {
988                 unsigned long physbase = reg->base;
989                 unsigned long size = reg->size;
990                 unsigned long start_pfn = physbase >> PAGE_SHIFT;
991                 unsigned long end_pfn = PFN_UP(physbase + size);
992                 struct node_active_region node_ar;
993                 unsigned long node_end_pfn = pgdat_end_pfn(node);
994
995                 /*
996                  * Check to make sure that this memblock.reserved area is
997                  * within the bounds of the node that we care about.
998                  * Checking the nid of the start and end points is not
999                  * sufficient because the reserved area could span the
1000                  * entire node.
1001                  */
1002                 if (end_pfn <= node->node_start_pfn ||
1003                     start_pfn >= node_end_pfn)
1004                         continue;
1005
1006                 get_node_active_region(start_pfn, &node_ar);
1007                 while (start_pfn < end_pfn &&
1008                         node_ar.start_pfn < node_ar.end_pfn) {
1009                         unsigned long reserve_size = size;
1010                         /*
1011                          * if reserved region extends past active region
1012                          * then trim size to active region
1013                          */
1014                         if (end_pfn > node_ar.end_pfn)
1015                                 reserve_size = (node_ar.end_pfn << PAGE_SHIFT)
1016                                         - physbase;
1017                         /*
1018                          * Only worry about *this* node, others may not
1019                          * yet have valid NODE_DATA().
1020                          */
1021                         if (node_ar.nid == nid) {
1022                                 dbg("reserve_bootmem %lx %lx nid=%d\n",
1023                                         physbase, reserve_size, node_ar.nid);
1024                                 reserve_bootmem_node(NODE_DATA(node_ar.nid),
1025                                                 physbase, reserve_size,
1026                                                 BOOTMEM_DEFAULT);
1027                         }
1028                         /*
1029                          * if reserved region is contained in the active region
1030                          * then done.
1031                          */
1032                         if (end_pfn <= node_ar.end_pfn)
1033                                 break;
1034
1035                         /*
1036                          * reserved region extends past the active region
1037                          *   get next active region that contains this
1038                          *   reserved region
1039                          */
1040                         start_pfn = node_ar.end_pfn;
1041                         physbase = start_pfn << PAGE_SHIFT;
1042                         size = size - reserve_size;
1043                         get_node_active_region(start_pfn, &node_ar);
1044                 }
1045         }
1046 }
1047
1048
1049 void __init do_init_bootmem(void)
1050 {
1051         int nid;
1052
1053         min_low_pfn = 0;
1054         max_low_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
1055         max_pfn = max_low_pfn;
1056
1057         if (parse_numa_properties())
1058                 setup_nonnuma();
1059         else
1060                 dump_numa_memory_topology();
1061
1062         for_each_online_node(nid) {
1063                 unsigned long start_pfn, end_pfn;
1064                 void *bootmem_vaddr;
1065                 unsigned long bootmap_pages;
1066
1067                 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
1068
1069                 /*
1070                  * Allocate the node structure node local if possible
1071                  *
1072                  * Be careful moving this around, as it relies on all
1073                  * previous nodes' bootmem to be initialized and have
1074                  * all reserved areas marked.
1075                  */
1076                 NODE_DATA(nid) = careful_zallocation(nid,
1077                                         sizeof(struct pglist_data),
1078                                         SMP_CACHE_BYTES, end_pfn);
1079
1080                 dbg("node %d\n", nid);
1081                 dbg("NODE_DATA() = %p\n", NODE_DATA(nid));
1082
1083                 NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
1084                 NODE_DATA(nid)->node_start_pfn = start_pfn;
1085                 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
1086
1087                 if (NODE_DATA(nid)->node_spanned_pages == 0)
1088                         continue;
1089
1090                 dbg("start_paddr = %lx\n", start_pfn << PAGE_SHIFT);
1091                 dbg("end_paddr = %lx\n", end_pfn << PAGE_SHIFT);
1092
1093                 bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
1094                 bootmem_vaddr = careful_zallocation(nid,
1095                                         bootmap_pages << PAGE_SHIFT,
1096                                         PAGE_SIZE, end_pfn);
1097
1098                 dbg("bootmap_vaddr = %p\n", bootmem_vaddr);
1099
1100                 init_bootmem_node(NODE_DATA(nid),
1101                                   __pa(bootmem_vaddr) >> PAGE_SHIFT,
1102                                   start_pfn, end_pfn);
1103
1104                 free_bootmem_with_active_regions(nid, end_pfn);
1105                 /*
1106                  * Be very careful about moving this around.  Future
1107                  * calls to careful_zallocation() depend on this getting
1108                  * done correctly.
1109                  */
1110                 mark_reserved_regions_for_nid(nid);
1111                 sparse_memory_present_with_active_regions(nid);
1112         }
1113
1114         init_bootmem_done = 1;
1115
1116         /*
1117          * Now bootmem is initialised we can create the node to cpumask
1118          * lookup tables and setup the cpu callback to populate them.
1119          */
1120         setup_node_to_cpumask_map();
1121
1122         reset_numa_cpu_lookup_table();
1123         register_cpu_notifier(&ppc64_numa_nb);
1124         cpu_numa_callback(&ppc64_numa_nb, CPU_UP_PREPARE,
1125                           (void *)(unsigned long)boot_cpuid);
1126 }
1127
1128 void __init paging_init(void)
1129 {
1130         unsigned long max_zone_pfns[MAX_NR_ZONES];
1131         memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
1132         max_zone_pfns[ZONE_DMA] = memblock_end_of_DRAM() >> PAGE_SHIFT;
1133         free_area_init_nodes(max_zone_pfns);
1134 }
1135
1136 static int __init early_numa(char *p)
1137 {
1138         if (!p)
1139                 return 0;
1140
1141         if (strstr(p, "off"))
1142                 numa_enabled = 0;
1143
1144         if (strstr(p, "debug"))
1145                 numa_debug = 1;
1146
1147         p = strstr(p, "fake=");
1148         if (p)
1149                 cmdline = p + strlen("fake=");
1150
1151         return 0;
1152 }
1153 early_param("numa", early_numa);
1154
1155 #ifdef CONFIG_MEMORY_HOTPLUG
1156 /*
1157  * Find the node associated with a hot added memory section for
1158  * memory represented in the device tree by the property
1159  * ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory.
1160  */
1161 static int hot_add_drconf_scn_to_nid(struct device_node *memory,
1162                                      unsigned long scn_addr)
1163 {
1164         const __be32 *dm;
1165         unsigned int drconf_cell_cnt, rc;
1166         unsigned long lmb_size;
1167         struct assoc_arrays aa;
1168         int nid = -1;
1169
1170         drconf_cell_cnt = of_get_drconf_memory(memory, &dm);
1171         if (!drconf_cell_cnt)
1172                 return -1;
1173
1174         lmb_size = of_get_lmb_size(memory);
1175         if (!lmb_size)
1176                 return -1;
1177
1178         rc = of_get_assoc_arrays(memory, &aa);
1179         if (rc)
1180                 return -1;
1181
1182         for (; drconf_cell_cnt != 0; --drconf_cell_cnt) {
1183                 struct of_drconf_cell drmem;
1184
1185                 read_drconf_cell(&drmem, &dm);
1186
1187                 /* skip this block if it is reserved or not assigned to
1188                  * this partition */
1189                 if ((drmem.flags & DRCONF_MEM_RESERVED)
1190                     || !(drmem.flags & DRCONF_MEM_ASSIGNED))
1191                         continue;
1192
1193                 if ((scn_addr < drmem.base_addr)
1194                     || (scn_addr >= (drmem.base_addr + lmb_size)))
1195                         continue;
1196
1197                 nid = of_drconf_to_nid_single(&drmem, &aa);
1198                 break;
1199         }
1200
1201         return nid;
1202 }
1203
1204 /*
1205  * Find the node associated with a hot added memory section for memory
1206  * represented in the device tree as a node (i.e. memory@XXXX) for
1207  * each memblock.
1208  */
1209 static int hot_add_node_scn_to_nid(unsigned long scn_addr)
1210 {
1211         struct device_node *memory;
1212         int nid = -1;
1213
1214         for_each_node_by_type(memory, "memory") {
1215                 unsigned long start, size;
1216                 int ranges;
1217                 const __be32 *memcell_buf;
1218                 unsigned int len;
1219
1220                 memcell_buf = of_get_property(memory, "reg", &len);
1221                 if (!memcell_buf || len <= 0)
1222                         continue;
1223
1224                 /* ranges in cell */
1225                 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
1226
1227                 while (ranges--) {
1228                         start = read_n_cells(n_mem_addr_cells, &memcell_buf);
1229                         size = read_n_cells(n_mem_size_cells, &memcell_buf);
1230
1231                         if ((scn_addr < start) || (scn_addr >= (start + size)))
1232                                 continue;
1233
1234                         nid = of_node_to_nid_single(memory);
1235                         break;
1236                 }
1237
1238                 if (nid >= 0)
1239                         break;
1240         }
1241
1242         of_node_put(memory);
1243
1244         return nid;
1245 }
1246
1247 /*
1248  * Find the node associated with a hot added memory section.  Section
1249  * corresponds to a SPARSEMEM section, not an MEMBLOCK.  It is assumed that
1250  * sections are fully contained within a single MEMBLOCK.
1251  */
1252 int hot_add_scn_to_nid(unsigned long scn_addr)
1253 {
1254         struct device_node *memory = NULL;
1255         int nid, found = 0;
1256
1257         if (!numa_enabled || (min_common_depth < 0))
1258                 return first_online_node;
1259
1260         memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1261         if (memory) {
1262                 nid = hot_add_drconf_scn_to_nid(memory, scn_addr);
1263                 of_node_put(memory);
1264         } else {
1265                 nid = hot_add_node_scn_to_nid(scn_addr);
1266         }
1267
1268         if (nid < 0 || !node_online(nid))
1269                 nid = first_online_node;
1270
1271         if (NODE_DATA(nid)->node_spanned_pages)
1272                 return nid;
1273
1274         for_each_online_node(nid) {
1275                 if (NODE_DATA(nid)->node_spanned_pages) {
1276                         found = 1;
1277                         break;
1278                 }
1279         }
1280
1281         BUG_ON(!found);
1282         return nid;
1283 }
1284
1285 static u64 hot_add_drconf_memory_max(void)
1286 {
1287         struct device_node *memory = NULL;
1288         unsigned int drconf_cell_cnt = 0;
1289         u64 lmb_size = 0;
1290         const __be32 *dm = NULL;
1291
1292         memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1293         if (memory) {
1294                 drconf_cell_cnt = of_get_drconf_memory(memory, &dm);
1295                 lmb_size = of_get_lmb_size(memory);
1296                 of_node_put(memory);
1297         }
1298         return lmb_size * drconf_cell_cnt;
1299 }
1300
1301 /*
1302  * memory_hotplug_max - return max address of memory that may be added
1303  *
1304  * This is currently only used on systems that support drconfig memory
1305  * hotplug.
1306  */
1307 u64 memory_hotplug_max(void)
1308 {
1309         return max(hot_add_drconf_memory_max(), memblock_end_of_DRAM());
1310 }
1311 #endif /* CONFIG_MEMORY_HOTPLUG */
1312
1313 /* Virtual Processor Home Node (VPHN) support */
1314 #ifdef CONFIG_PPC_SPLPAR
1315 struct topology_update_data {
1316         struct topology_update_data *next;
1317         unsigned int cpu;
1318         int old_nid;
1319         int new_nid;
1320 };
1321
1322 static u8 vphn_cpu_change_counts[NR_CPUS][MAX_DISTANCE_REF_POINTS];
1323 static cpumask_t cpu_associativity_changes_mask;
1324 static int vphn_enabled;
1325 static int prrn_enabled;
1326 static void reset_topology_timer(void);
1327
1328 /*
1329  * Store the current values of the associativity change counters in the
1330  * hypervisor.
1331  */
1332 static void setup_cpu_associativity_change_counters(void)
1333 {
1334         int cpu;
1335
1336         /* The VPHN feature supports a maximum of 8 reference points */
1337         BUILD_BUG_ON(MAX_DISTANCE_REF_POINTS > 8);
1338
1339         for_each_possible_cpu(cpu) {
1340                 int i;
1341                 u8 *counts = vphn_cpu_change_counts[cpu];
1342                 volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
1343
1344                 for (i = 0; i < distance_ref_points_depth; i++)
1345                         counts[i] = hypervisor_counts[i];
1346         }
1347 }
1348
1349 /*
1350  * The hypervisor maintains a set of 8 associativity change counters in
1351  * the VPA of each cpu that correspond to the associativity levels in the
1352  * ibm,associativity-reference-points property. When an associativity
1353  * level changes, the corresponding counter is incremented.
1354  *
1355  * Set a bit in cpu_associativity_changes_mask for each cpu whose home
1356  * node associativity levels have changed.
1357  *
1358  * Returns the number of cpus with unhandled associativity changes.
1359  */
1360 static int update_cpu_associativity_changes_mask(void)
1361 {
1362         int cpu;
1363         cpumask_t *changes = &cpu_associativity_changes_mask;
1364
1365         for_each_possible_cpu(cpu) {
1366                 int i, changed = 0;
1367                 u8 *counts = vphn_cpu_change_counts[cpu];
1368                 volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
1369
1370                 for (i = 0; i < distance_ref_points_depth; i++) {
1371                         if (hypervisor_counts[i] != counts[i]) {
1372                                 counts[i] = hypervisor_counts[i];
1373                                 changed = 1;
1374                         }
1375                 }
1376                 if (changed) {
1377                         cpumask_or(changes, changes, cpu_sibling_mask(cpu));
1378                         cpu = cpu_last_thread_sibling(cpu);
1379                 }
1380         }
1381
1382         return cpumask_weight(changes);
1383 }
1384
1385 /*
1386  * 6 64-bit registers unpacked into 12 32-bit associativity values. To form
1387  * the complete property we have to add the length in the first cell.
1388  */
1389 #define VPHN_ASSOC_BUFSIZE (6*sizeof(u64)/sizeof(u32) + 1)
1390
1391 /*
1392  * Convert the associativity domain numbers returned from the hypervisor
1393  * to the sequence they would appear in the ibm,associativity property.
1394  */
1395 static int vphn_unpack_associativity(const long *packed, __be32 *unpacked)
1396 {
1397         int i, nr_assoc_doms = 0;
1398         const __be16 *field = (const __be16 *) packed;
1399
1400 #define VPHN_FIELD_UNUSED       (0xffff)
1401 #define VPHN_FIELD_MSB          (0x8000)
1402 #define VPHN_FIELD_MASK         (~VPHN_FIELD_MSB)
1403
1404         for (i = 1; i < VPHN_ASSOC_BUFSIZE; i++) {
1405                 if (be16_to_cpup(field) == VPHN_FIELD_UNUSED) {
1406                         /* All significant fields processed, and remaining
1407                          * fields contain the reserved value of all 1's.
1408                          * Just store them.
1409                          */
1410                         unpacked[i] = *((__be32 *)field);
1411                         field += 2;
1412                 } else if (be16_to_cpup(field) & VPHN_FIELD_MSB) {
1413                         /* Data is in the lower 15 bits of this field */
1414                         unpacked[i] = cpu_to_be32(
1415                                 be16_to_cpup(field) & VPHN_FIELD_MASK);
1416                         field++;
1417                         nr_assoc_doms++;
1418                 } else {
1419                         /* Data is in the lower 15 bits of this field
1420                          * concatenated with the next 16 bit field
1421                          */
1422                         unpacked[i] = *((__be32 *)field);
1423                         field += 2;
1424                         nr_assoc_doms++;
1425                 }
1426         }
1427
1428         /* The first cell contains the length of the property */
1429         unpacked[0] = cpu_to_be32(nr_assoc_doms);
1430
1431         return nr_assoc_doms;
1432 }
1433
1434 /*
1435  * Retrieve the new associativity information for a virtual processor's
1436  * home node.
1437  */
1438 static long hcall_vphn(unsigned long cpu, __be32 *associativity)
1439 {
1440         long rc;
1441         long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
1442         u64 flags = 1;
1443         int hwcpu = get_hard_smp_processor_id(cpu);
1444
1445         rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, retbuf, flags, hwcpu);
1446         vphn_unpack_associativity(retbuf, associativity);
1447
1448         return rc;
1449 }
1450
1451 static long vphn_get_associativity(unsigned long cpu,
1452                                         __be32 *associativity)
1453 {
1454         long rc;
1455
1456         rc = hcall_vphn(cpu, associativity);
1457
1458         switch (rc) {
1459         case H_FUNCTION:
1460                 printk(KERN_INFO
1461                         "VPHN is not supported. Disabling polling...\n");
1462                 stop_topology_update();
1463                 break;
1464         case H_HARDWARE:
1465                 printk(KERN_ERR
1466                         "hcall_vphn() experienced a hardware fault "
1467                         "preventing VPHN. Disabling polling...\n");
1468                 stop_topology_update();
1469         }
1470
1471         return rc;
1472 }
1473
1474 /*
1475  * Update the CPU maps and sysfs entries for a single CPU when its NUMA
1476  * characteristics change. This function doesn't perform any locking and is
1477  * only safe to call from stop_machine().
1478  */
1479 static int update_cpu_topology(void *data)
1480 {
1481         struct topology_update_data *update;
1482         unsigned long cpu;
1483
1484         if (!data)
1485                 return -EINVAL;
1486
1487         cpu = smp_processor_id();
1488
1489         for (update = data; update; update = update->next) {
1490                 if (cpu != update->cpu)
1491                         continue;
1492
1493                 unmap_cpu_from_node(update->cpu);
1494                 map_cpu_to_node(update->cpu, update->new_nid);
1495                 vdso_getcpu_init();
1496         }
1497
1498         return 0;
1499 }
1500
1501 static int update_lookup_table(void *data)
1502 {
1503         struct topology_update_data *update;
1504
1505         if (!data)
1506                 return -EINVAL;
1507
1508         /*
1509          * Upon topology update, the numa-cpu lookup table needs to be updated
1510          * for all threads in the core, including offline CPUs, to ensure that
1511          * future hotplug operations respect the cpu-to-node associativity
1512          * properly.
1513          */
1514         for (update = data; update; update = update->next) {
1515                 int nid, base, j;
1516
1517                 nid = update->new_nid;
1518                 base = cpu_first_thread_sibling(update->cpu);
1519
1520                 for (j = 0; j < threads_per_core; j++) {
1521                         update_numa_cpu_lookup_table(base + j, nid);
1522                 }
1523         }
1524
1525         return 0;
1526 }
1527
1528 /*
1529  * Update the node maps and sysfs entries for each cpu whose home node
1530  * has changed. Returns 1 when the topology has changed, and 0 otherwise.
1531  */
1532 int arch_update_cpu_topology(void)
1533 {
1534         unsigned int cpu, sibling, changed = 0;
1535         struct topology_update_data *updates, *ud;
1536         __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
1537         cpumask_t updated_cpus;
1538         struct device *dev;
1539         int weight, new_nid, i = 0;
1540
1541         weight = cpumask_weight(&cpu_associativity_changes_mask);
1542         if (!weight)
1543                 return 0;
1544
1545         updates = kzalloc(weight * (sizeof(*updates)), GFP_KERNEL);
1546         if (!updates)
1547                 return 0;
1548
1549         cpumask_clear(&updated_cpus);
1550
1551         for_each_cpu(cpu, &cpu_associativity_changes_mask) {
1552                 /*
1553                  * If siblings aren't flagged for changes, updates list
1554                  * will be too short. Skip on this update and set for next
1555                  * update.
1556                  */
1557                 if (!cpumask_subset(cpu_sibling_mask(cpu),
1558                                         &cpu_associativity_changes_mask)) {
1559                         pr_info("Sibling bits not set for associativity "
1560                                         "change, cpu%d\n", cpu);
1561                         cpumask_or(&cpu_associativity_changes_mask,
1562                                         &cpu_associativity_changes_mask,
1563                                         cpu_sibling_mask(cpu));
1564                         cpu = cpu_last_thread_sibling(cpu);
1565                         continue;
1566                 }
1567
1568                 /* Use associativity from first thread for all siblings */
1569                 vphn_get_associativity(cpu, associativity);
1570                 new_nid = associativity_to_nid(associativity);
1571                 if (new_nid < 0 || !node_online(new_nid))
1572                         new_nid = first_online_node;
1573
1574                 if (new_nid == numa_cpu_lookup_table[cpu]) {
1575                         cpumask_andnot(&cpu_associativity_changes_mask,
1576                                         &cpu_associativity_changes_mask,
1577                                         cpu_sibling_mask(cpu));
1578                         cpu = cpu_last_thread_sibling(cpu);
1579                         continue;
1580                 }
1581
1582                 for_each_cpu(sibling, cpu_sibling_mask(cpu)) {
1583                         ud = &updates[i++];
1584                         ud->cpu = sibling;
1585                         ud->new_nid = new_nid;
1586                         ud->old_nid = numa_cpu_lookup_table[sibling];
1587                         cpumask_set_cpu(sibling, &updated_cpus);
1588                         if (i < weight)
1589                                 ud->next = &updates[i];
1590                 }
1591                 cpu = cpu_last_thread_sibling(cpu);
1592         }
1593
1594         stop_machine(update_cpu_topology, &updates[0], &updated_cpus);
1595
1596         /*
1597          * Update the numa-cpu lookup table with the new mappings, even for
1598          * offline CPUs. It is best to perform this update from the stop-
1599          * machine context.
1600          */
1601         stop_machine(update_lookup_table, &updates[0],
1602                                         cpumask_of(raw_smp_processor_id()));
1603
1604         for (ud = &updates[0]; ud; ud = ud->next) {
1605                 unregister_cpu_under_node(ud->cpu, ud->old_nid);
1606                 register_cpu_under_node(ud->cpu, ud->new_nid);
1607
1608                 dev = get_cpu_device(ud->cpu);
1609                 if (dev)
1610                         kobject_uevent(&dev->kobj, KOBJ_CHANGE);
1611                 cpumask_clear_cpu(ud->cpu, &cpu_associativity_changes_mask);
1612                 changed = 1;
1613         }
1614
1615         kfree(updates);
1616         return changed;
1617 }
1618
1619 static void topology_work_fn(struct work_struct *work)
1620 {
1621         rebuild_sched_domains();
1622 }
1623 static DECLARE_WORK(topology_work, topology_work_fn);
1624
1625 static void topology_schedule_update(void)
1626 {
1627         schedule_work(&topology_work);
1628 }
1629
1630 static void topology_timer_fn(unsigned long ignored)
1631 {
1632         if (prrn_enabled && cpumask_weight(&cpu_associativity_changes_mask))
1633                 topology_schedule_update();
1634         else if (vphn_enabled) {
1635                 if (update_cpu_associativity_changes_mask() > 0)
1636                         topology_schedule_update();
1637                 reset_topology_timer();
1638         }
1639 }
1640 static struct timer_list topology_timer =
1641         TIMER_INITIALIZER(topology_timer_fn, 0, 0);
1642
1643 static void reset_topology_timer(void)
1644 {
1645         topology_timer.data = 0;
1646         topology_timer.expires = jiffies + 60 * HZ;
1647         mod_timer(&topology_timer, topology_timer.expires);
1648 }
1649
1650 #ifdef CONFIG_SMP
1651
1652 static void stage_topology_update(int core_id)
1653 {
1654         cpumask_or(&cpu_associativity_changes_mask,
1655                 &cpu_associativity_changes_mask, cpu_sibling_mask(core_id));
1656         reset_topology_timer();
1657 }
1658
1659 static int dt_update_callback(struct notifier_block *nb,
1660                                 unsigned long action, void *data)
1661 {
1662         struct of_prop_reconfig *update;
1663         int rc = NOTIFY_DONE;
1664
1665         switch (action) {
1666         case OF_RECONFIG_UPDATE_PROPERTY:
1667                 update = (struct of_prop_reconfig *)data;
1668                 if (!of_prop_cmp(update->dn->type, "cpu") &&
1669                     !of_prop_cmp(update->prop->name, "ibm,associativity")) {
1670                         u32 core_id;
1671                         of_property_read_u32(update->dn, "reg", &core_id);
1672                         stage_topology_update(core_id);
1673                         rc = NOTIFY_OK;
1674                 }
1675                 break;
1676         }
1677
1678         return rc;
1679 }
1680
1681 static struct notifier_block dt_update_nb = {
1682         .notifier_call = dt_update_callback,
1683 };
1684
1685 #endif
1686
1687 /*
1688  * Start polling for associativity changes.
1689  */
1690 int start_topology_update(void)
1691 {
1692         int rc = 0;
1693
1694         if (firmware_has_feature(FW_FEATURE_PRRN)) {
1695                 if (!prrn_enabled) {
1696                         prrn_enabled = 1;
1697                         vphn_enabled = 0;
1698 #ifdef CONFIG_SMP
1699                         rc = of_reconfig_notifier_register(&dt_update_nb);
1700 #endif
1701                 }
1702         } else if (firmware_has_feature(FW_FEATURE_VPHN) &&
1703                    lppaca_shared_proc(get_lppaca())) {
1704                 if (!vphn_enabled) {
1705                         prrn_enabled = 0;
1706                         vphn_enabled = 1;
1707                         setup_cpu_associativity_change_counters();
1708                         init_timer_deferrable(&topology_timer);
1709                         reset_topology_timer();
1710                 }
1711         }
1712
1713         return rc;
1714 }
1715
1716 /*
1717  * Disable polling for VPHN associativity changes.
1718  */
1719 int stop_topology_update(void)
1720 {
1721         int rc = 0;
1722
1723         if (prrn_enabled) {
1724                 prrn_enabled = 0;
1725 #ifdef CONFIG_SMP
1726                 rc = of_reconfig_notifier_unregister(&dt_update_nb);
1727 #endif
1728         } else if (vphn_enabled) {
1729                 vphn_enabled = 0;
1730                 rc = del_timer_sync(&topology_timer);
1731         }
1732
1733         return rc;
1734 }
1735
1736 int prrn_is_enabled(void)
1737 {
1738         return prrn_enabled;
1739 }
1740
1741 static int topology_read(struct seq_file *file, void *v)
1742 {
1743         if (vphn_enabled || prrn_enabled)
1744                 seq_puts(file, "on\n");
1745         else
1746                 seq_puts(file, "off\n");
1747
1748         return 0;
1749 }
1750
1751 static int topology_open(struct inode *inode, struct file *file)
1752 {
1753         return single_open(file, topology_read, NULL);
1754 }
1755
1756 static ssize_t topology_write(struct file *file, const char __user *buf,
1757                               size_t count, loff_t *off)
1758 {
1759         char kbuf[4]; /* "on" or "off" plus null. */
1760         int read_len;
1761
1762         read_len = count < 3 ? count : 3;
1763         if (copy_from_user(kbuf, buf, read_len))
1764                 return -EINVAL;
1765
1766         kbuf[read_len] = '\0';
1767
1768         if (!strncmp(kbuf, "on", 2))
1769                 start_topology_update();
1770         else if (!strncmp(kbuf, "off", 3))
1771                 stop_topology_update();
1772         else
1773                 return -EINVAL;
1774
1775         return count;
1776 }
1777
1778 static const struct file_operations topology_ops = {
1779         .read = seq_read,
1780         .write = topology_write,
1781         .open = topology_open,
1782         .release = single_release
1783 };
1784
1785 static int topology_update_init(void)
1786 {
1787         start_topology_update();
1788         proc_create("powerpc/topology_updates", 644, NULL, &topology_ops);
1789
1790         return 0;
1791 }
1792 device_initcall(topology_update_init);
1793 #endif /* CONFIG_PPC_SPLPAR */