]> Pileus Git - ~andy/linux/blobdiff - arch/x86/kernel/setup_percpu.c
x86: merge setup_per_cpu_maps() into setup_per_cpu_areas()
[~andy/linux] / arch / x86 / kernel / setup_percpu.c
index 73ab01b297c5f9a1fa1de904aca249c659437794..d0b1476490a725fa7da40a8348efef9694cefb05 100644 (file)
@@ -13,6 +13,7 @@
 #include <asm/mpspec.h>
 #include <asm/apicdef.h>
 #include <asm/highmem.h>
+#include <asm/proto.h>
 #include <asm/cpumask.h>
 
 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
 # define DBG(x...)
 #endif
 
+/*
+ * Could be inside CONFIG_HAVE_SETUP_PER_CPU_AREA with other stuff but
+ * voyager wants cpu_number too.
+ */
+#ifdef CONFIG_SMP
+DEFINE_PER_CPU(int, cpu_number);
+EXPORT_PER_CPU_SYMBOL(cpu_number);
+#endif
+
 #ifdef CONFIG_X86_LOCAL_APIC
 unsigned int num_processors;
 unsigned disabled_cpus __cpuinitdata;
@@ -43,6 +53,8 @@ EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
 
 #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
 #define        X86_64_NUMA     1       /* (used later) */
+DEFINE_PER_CPU(int, node_number) = 0;
+EXPORT_PER_CPU_SYMBOL(node_number);
 
 /*
  * Map cpu index to node index
@@ -65,81 +77,6 @@ static void __init setup_node_to_cpumask_map(void);
 static inline void setup_node_to_cpumask_map(void) { }
 #endif
 
-#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
-/*
- * Copy data used in early init routines from the initial arrays to the
- * per cpu data areas.  These arrays then become expendable and the
- * *_early_ptr's are zeroed indicating that the static arrays are gone.
- */
-static void __init setup_per_cpu_maps(void)
-{
-       int cpu;
-
-       for_each_possible_cpu(cpu) {
-               per_cpu(x86_cpu_to_apicid, cpu) =
-                               early_per_cpu_map(x86_cpu_to_apicid, cpu);
-               per_cpu(x86_bios_cpu_apicid, cpu) =
-                               early_per_cpu_map(x86_bios_cpu_apicid, cpu);
-#ifdef X86_64_NUMA
-               per_cpu(x86_cpu_to_node_map, cpu) =
-                               early_per_cpu_map(x86_cpu_to_node_map, cpu);
-#endif
-       }
-
-       /* indicate the early static arrays will soon be gone */
-       early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
-       early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
-#ifdef X86_64_NUMA
-       early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
-#endif
-}
-
-#ifdef CONFIG_X86_32
-/*
- * Great future not-so-futuristic plan: make i386 and x86_64 do it
- * the same way
- */
-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
-EXPORT_SYMBOL(__per_cpu_offset);
-static inline void setup_cpu_pda_map(void) { }
-
-#elif !defined(CONFIG_SMP)
-static inline void setup_cpu_pda_map(void) { }
-
-#else /* CONFIG_SMP && CONFIG_X86_64 */
-
-/*
- * Allocate cpu_pda pointer table and array via alloc_bootmem.
- */
-static void __init setup_cpu_pda_map(void)
-{
-       char *pda;
-       unsigned long size;
-       int cpu;
-
-       size = roundup(sizeof(struct x8664_pda), cache_line_size());
-
-       /* allocate cpu_pda array and pointer table */
-       {
-               unsigned long asize = size * (nr_cpu_ids - 1);
-
-               pda = alloc_bootmem(asize);
-       }
-
-       /* initialize pointer table to static pda's */
-       for_each_possible_cpu(cpu) {
-               if (cpu == 0) {
-                       /* leave boot cpu pda in place */
-                       continue;
-               }
-               cpu_pda(cpu) = (struct x8664_pda *)pda;
-               cpu_pda(cpu)->in_bootmem = 1;
-               pda += size;
-       }
-}
-
-#endif /* CONFIG_SMP && CONFIG_X86_64 */
-
 #ifdef CONFIG_X86_64
 
 /* correctly size the local cpu masks */
@@ -159,6 +96,17 @@ static inline void setup_cpu_local_masks(void)
 
 #endif /* CONFIG_X86_32 */
 
+#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
+
+#ifdef CONFIG_X86_64
+unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
+       [0] = (unsigned long)__per_cpu_load,
+};
+#else
+unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
+#endif
+EXPORT_SYMBOL(__per_cpu_offset);
+
 /*
  * Great future plan:
  * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
@@ -171,9 +119,6 @@ void __init setup_per_cpu_areas(void)
        int cpu;
        unsigned long align = 1;
 
-       /* Setup cpu_pda map */
-       setup_cpu_pda_map();
-
        /* Copy section for each CPU (we discard the original) */
        old_size = PERCPU_ENOUGH_ROOM;
        align = max_t(unsigned long, PAGE_SIZE, align);
@@ -204,14 +149,44 @@ void __init setup_per_cpu_areas(void)
                                cpu, node, __pa(ptr));
                }
 #endif
-               per_cpu_offset(cpu) = ptr - __per_cpu_start;
+
                memcpy(ptr, __per_cpu_load, __per_cpu_end - __per_cpu_start);
+               per_cpu_offset(cpu) = ptr - __per_cpu_start;
+               per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
+               per_cpu(cpu_number, cpu) = cpu;
+               /*
+                * Copy data used in early init routines from the initial arrays to the
+                * per cpu data areas.  These arrays then become expendable and the
+                * *_early_ptr's are zeroed indicating that the static arrays are gone.
+                */
+               per_cpu(x86_cpu_to_apicid, cpu) =
+                               early_per_cpu_map(x86_cpu_to_apicid, cpu);
+               per_cpu(x86_bios_cpu_apicid, cpu) =
+                               early_per_cpu_map(x86_bios_cpu_apicid, cpu);
+#ifdef X86_64_NUMA
+               per_cpu(x86_cpu_to_node_map, cpu) =
+                               early_per_cpu_map(x86_cpu_to_node_map, cpu);
+#endif
+#ifdef CONFIG_X86_64
+               per_cpu(irq_stack_ptr, cpu) =
+                       per_cpu(irq_stack_union.irq_stack, cpu) + IRQ_STACK_SIZE - 64;
+               /*
+                * Up to this point, CPU0 has been using .data.init
+                * area.  Reload %gs offset for CPU0.
+                */
+               if (cpu == 0)
+                       load_gs_base(cpu);
+#endif
 
                DBG("PERCPU: cpu %4d %p\n", cpu, ptr);
        }
 
-       /* Setup percpu data maps */
-       setup_per_cpu_maps();
+       /* indicate the early static arrays will soon be gone */
+       early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
+       early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
+#ifdef X86_64_NUMA
+       early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
+#endif
 
        /* Setup node to cpumask map */
        setup_node_to_cpumask_map();
@@ -274,7 +249,7 @@ void __cpuinit numa_set_node(int cpu, int node)
        per_cpu(x86_cpu_to_node_map, cpu) = node;
 
        if (node != NUMA_NO_NODE)
-               cpu_pda(cpu)->nodenumber = node;
+               per_cpu(node_number, cpu) = node;
 }
 
 void __cpuinit numa_clear_node(int cpu)