]> Pileus Git - ~andy/linux/blobdiff - arch/x86/kernel/setup_percpu.c
x86: merge setup_per_cpu_maps() into setup_per_cpu_areas()
[~andy/linux] / arch / x86 / kernel / setup_percpu.c
index 49f3f709ee1f3ec989916c5f778b8d858689664f..d0b1476490a725fa7da40a8348efef9694cefb05 100644 (file)
 #include <linux/percpu.h>
 #include <linux/kexec.h>
 #include <linux/crash_dump.h>
-#include <asm/smp.h>
-#include <asm/percpu.h>
+#include <linux/smp.h>
+#include <linux/topology.h>
 #include <asm/sections.h>
 #include <asm/processor.h>
 #include <asm/setup.h>
-#include <asm/topology.h>
 #include <asm/mpspec.h>
 #include <asm/apicdef.h>
 #include <asm/highmem.h>
+#include <asm/proto.h>
+#include <asm/cpumask.h>
+
+#ifdef CONFIG_DEBUG_PER_CPU_MAPS
+# define DBG(x...) printk(KERN_DEBUG x)
+#else
+# define DBG(x...)
+#endif
+
+/*
+ * Could be inside CONFIG_HAVE_SETUP_PER_CPU_AREA with other stuff but
+ * voyager wants cpu_number too.
+ */
+#ifdef CONFIG_SMP
+DEFINE_PER_CPU(int, cpu_number);
+EXPORT_PER_CPU_SYMBOL(cpu_number);
+#endif
 
 #ifdef CONFIG_X86_LOCAL_APIC
 unsigned int num_processors;
 unsigned disabled_cpus __cpuinitdata;
 /* Processor that is doing the boot up */
 unsigned int boot_cpu_physical_apicid = -1U;
-unsigned int max_physical_apicid;
 EXPORT_SYMBOL(boot_cpu_physical_apicid);
+unsigned int max_physical_apicid;
 
 /* Bitmask of physically existing CPUs */
 physid_mask_t phys_cpu_present_map;
 #endif
 
-/* map cpu index to physical APIC ID */
+/*
+ * Map cpu index to physical APIC ID
+ */
 DEFINE_EARLY_PER_CPU(u16, x86_cpu_to_apicid, BAD_APICID);
 DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID);
 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
 EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
 
 #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
-#define        X86_64_NUMA     1
+#define        X86_64_NUMA     1       /* (used later) */
+DEFINE_PER_CPU(int, node_number) = 0;
+EXPORT_PER_CPU_SYMBOL(node_number);
 
-/* map cpu index to node index */
+/*
+ * Map cpu index to node index
+ */
 DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
 
-/* which logical CPUs are on which nodes */
+/*
+ * Which logical CPUs are on which nodes
+ */
 cpumask_t *node_to_cpumask_map;
 EXPORT_SYMBOL(node_to_cpumask_map);
 
-/* setup node_to_cpumask_map */
+/*
+ * Setup node_to_cpumask_map
+ */
 static void __init setup_node_to_cpumask_map(void);
 
 #else
 static inline void setup_node_to_cpumask_map(void) { }
 #endif
 
-#if defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) && defined(CONFIG_X86_SMP)
-/*
- * Copy data used in early init routines from the initial arrays to the
- * per cpu data areas.  These arrays then become expendable and the
- * *_early_ptr's are zeroed indicating that the static arrays are gone.
- */
-static void __init setup_per_cpu_maps(void)
-{
-       int cpu;
-
-       for_each_possible_cpu(cpu) {
-               per_cpu(x86_cpu_to_apicid, cpu) =
-                               early_per_cpu_map(x86_cpu_to_apicid, cpu);
-               per_cpu(x86_bios_cpu_apicid, cpu) =
-                               early_per_cpu_map(x86_bios_cpu_apicid, cpu);
-#ifdef X86_64_NUMA
-               per_cpu(x86_cpu_to_node_map, cpu) =
-                               early_per_cpu_map(x86_cpu_to_node_map, cpu);
-#endif
-       }
+#ifdef CONFIG_X86_64
 
-       /* indicate the early static arrays will soon be gone */
-       early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
-       early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
-#ifdef X86_64_NUMA
-       early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
-#endif
+/* correctly size the local cpu masks */
+static void setup_cpu_local_masks(void)
+{
+       alloc_bootmem_cpumask_var(&cpu_initialized_mask);
+       alloc_bootmem_cpumask_var(&cpu_callin_mask);
+       alloc_bootmem_cpumask_var(&cpu_callout_mask);
+       alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
 }
 
-#ifdef CONFIG_X86_32
-/*
- * Great future not-so-futuristic plan: make i386 and x86_64 do it
- * the same way
- */
-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
-EXPORT_SYMBOL(__per_cpu_offset);
-static inline void setup_cpu_pda_map(void) { }
-
-#elif !defined(CONFIG_SMP)
-static inline void setup_cpu_pda_map(void) { }
+#else /* CONFIG_X86_32 */
 
-#else /* CONFIG_SMP && CONFIG_X86_64 */
-
-/*
- * Allocate cpu_pda pointer table and array via alloc_bootmem.
- */
-static void __init setup_cpu_pda_map(void)
+static inline void setup_cpu_local_masks(void)
 {
-       char *pda;
-       struct x8664_pda **new_cpu_pda;
-       unsigned long size;
-       int cpu;
-
-       size = roundup(sizeof(struct x8664_pda), cache_line_size());
-
-       /* allocate cpu_pda array and pointer table */
-       {
-               unsigned long tsize = nr_cpu_ids * sizeof(void *);
-               unsigned long asize = size * (nr_cpu_ids - 1);
+}
 
-               tsize = roundup(tsize, cache_line_size());
-               new_cpu_pda = alloc_bootmem(tsize + asize);
-               pda = (char *)new_cpu_pda + tsize;
-       }
+#endif /* CONFIG_X86_32 */
 
-       /* initialize pointer table to static pda's */
-       for_each_possible_cpu(cpu) {
-               if (cpu == 0) {
-                       /* leave boot cpu pda in place */
-                       new_cpu_pda[0] = cpu_pda(0);
-                       continue;
-               }
-               new_cpu_pda[cpu] = (struct x8664_pda *)pda;
-               new_cpu_pda[cpu]->in_bootmem = 1;
-               pda += size;
-       }
+#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
 
-       /* point to new pointer table */
-       _cpu_pda = new_cpu_pda;
-}
+#ifdef CONFIG_X86_64
+unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
+       [0] = (unsigned long)__per_cpu_load,
+};
+#else
+unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
 #endif
+EXPORT_SYMBOL(__per_cpu_offset);
 
 /*
  * Great future plan:
@@ -145,20 +119,15 @@ void __init setup_per_cpu_areas(void)
        int cpu;
        unsigned long align = 1;
 
-       /* Setup cpu_pda map */
-       setup_cpu_pda_map();
-
        /* Copy section for each CPU (we discard the original) */
        old_size = PERCPU_ENOUGH_ROOM;
        align = max_t(unsigned long, PAGE_SIZE, align);
        size = roundup(old_size, align);
 
-       printk(KERN_INFO
-               "NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
+       pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
                NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
 
-       printk(KERN_INFO "PERCPU: Allocating %zd bytes of per cpu data\n",
-                         size);
+       pr_info("PERCPU: Allocating %zd bytes of per cpu data\n", size);
 
        for_each_possible_cpu(cpu) {
 #ifndef CONFIG_NEED_MULTIPLE_NODES
@@ -169,33 +138,61 @@ void __init setup_per_cpu_areas(void)
                if (!node_online(node) || !NODE_DATA(node)) {
                        ptr = __alloc_bootmem(size, align,
                                         __pa(MAX_DMA_ADDRESS));
-                       printk(KERN_INFO
-                              "cpu %d has no node %d or node-local memory\n",
+                       pr_info("cpu %d has no node %d or node-local memory\n",
                                cpu, node);
-                       if (ptr)
-                               printk(KERN_DEBUG
-                                       "per cpu data for cpu%d at %016lx\n",
-                                        cpu, __pa(ptr));
-               }
-               else {
+                       pr_debug("per cpu data for cpu%d at %016lx\n",
+                                cpu, __pa(ptr));
+               } else {
                        ptr = __alloc_bootmem_node(NODE_DATA(node), size, align,
                                                        __pa(MAX_DMA_ADDRESS));
-                       if (ptr)
-                               printk(KERN_DEBUG
-                                       "per cpu data for cpu%d on node%d "
-                                       "at %016lx\n",
-                                       cpu, node, __pa(ptr));
+                       pr_debug("per cpu data for cpu%d on node%d at %016lx\n",
+                               cpu, node, __pa(ptr));
                }
 #endif
+
+               memcpy(ptr, __per_cpu_load, __per_cpu_end - __per_cpu_start);
                per_cpu_offset(cpu) = ptr - __per_cpu_start;
-               memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
+               per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
+               per_cpu(cpu_number, cpu) = cpu;
+               /*
+                * Copy data used in early init routines from the initial arrays to the
+                * per cpu data areas.  These arrays then become expendable and the
+                * *_early_ptr's are zeroed indicating that the static arrays are gone.
+                */
+               per_cpu(x86_cpu_to_apicid, cpu) =
+                               early_per_cpu_map(x86_cpu_to_apicid, cpu);
+               per_cpu(x86_bios_cpu_apicid, cpu) =
+                               early_per_cpu_map(x86_bios_cpu_apicid, cpu);
+#ifdef X86_64_NUMA
+               per_cpu(x86_cpu_to_node_map, cpu) =
+                               early_per_cpu_map(x86_cpu_to_node_map, cpu);
+#endif
+#ifdef CONFIG_X86_64
+               per_cpu(irq_stack_ptr, cpu) =
+                       per_cpu(irq_stack_union.irq_stack, cpu) + IRQ_STACK_SIZE - 64;
+               /*
+                * Up to this point, CPU0 has been using .data.init
+                * area.  Reload %gs offset for CPU0.
+                */
+               if (cpu == 0)
+                       load_gs_base(cpu);
+#endif
+
+               DBG("PERCPU: cpu %4d %p\n", cpu, ptr);
        }
 
-       /* Setup percpu data maps */
-       setup_per_cpu_maps();
+       /* indicate the early static arrays will soon be gone */
+       early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
+       early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
+#ifdef X86_64_NUMA
+       early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
+#endif
 
        /* Setup node to cpumask map */
        setup_node_to_cpumask_map();
+
+       /* Setup cpu initialized, callin, callout masks */
+       setup_cpu_local_masks();
 }
 
 #endif
@@ -207,6 +204,7 @@ void __init setup_per_cpu_areas(void)
  * Requires node_possible_map to be valid.
  *
  * Note: node_to_cpumask() is not valid until after this is done.
+ * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.)
  */
 static void __init setup_node_to_cpumask_map(void)
 {
@@ -222,6 +220,7 @@ static void __init setup_node_to_cpumask_map(void)
 
        /* allocate the map */
        map = alloc_bootmem_low(nr_node_ids * sizeof(cpumask_t));
+       DBG("node_to_cpumask_map at %p for %d nodes\n", map, nr_node_ids);
 
        pr_debug("Node to cpumask map at %p for %d nodes\n",
                 map, nr_node_ids);
@@ -234,17 +233,23 @@ void __cpuinit numa_set_node(int cpu, int node)
 {
        int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
 
-       if (cpu_pda(cpu) && node != NUMA_NO_NODE)
-               cpu_pda(cpu)->nodenumber = node;
-
-       if (cpu_to_node_map)
+       /* early setting, no percpu area yet */
+       if (cpu_to_node_map) {
                cpu_to_node_map[cpu] = node;
+               return;
+       }
 
-       else if (per_cpu_offset(cpu))
-               per_cpu(x86_cpu_to_node_map, cpu) = node;
+#ifdef CONFIG_DEBUG_PER_CPU_MAPS
+       if (cpu >= nr_cpu_ids || !per_cpu_offset(cpu)) {
+               printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
+               dump_stack();
+               return;
+       }
+#endif
+       per_cpu(x86_cpu_to_node_map, cpu) = node;
 
-       else
-               pr_debug("Setting node for non-present cpu %d\n", cpu);
+       if (node != NUMA_NO_NODE)
+               per_cpu(node_number, cpu) = node;
 }
 
 void __cpuinit numa_clear_node(int cpu)
@@ -261,7 +266,7 @@ void __cpuinit numa_add_cpu(int cpu)
 
 void __cpuinit numa_remove_cpu(int cpu)
 {
-       cpu_clear(cpu, node_to_cpumask_map[cpu_to_node(cpu)]);
+       cpu_clear(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
 }
 
 #else /* CONFIG_DEBUG_PER_CPU_MAPS */
@@ -271,7 +276,7 @@ void __cpuinit numa_remove_cpu(int cpu)
  */
 static void __cpuinit numa_set_cpumask(int cpu, int enable)
 {
-       int node = cpu_to_node(cpu);
+       int node = early_cpu_to_node(cpu);
        cpumask_t *mask;
        char buf[64];
 
@@ -289,8 +294,8 @@ static void __cpuinit numa_set_cpumask(int cpu, int enable)
 
        cpulist_scnprintf(buf, sizeof(buf), mask);
        printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
-               enable? "numa_add_cpu":"numa_remove_cpu", cpu, node, buf);
- }
+               enable ? "numa_add_cpu" : "numa_remove_cpu", cpu, node, buf);
+}
 
 void __cpuinit numa_add_cpu(int cpu)
 {