]> Pileus Git - ~andy/linux/blobdiff - arch/x86/kernel/setup_percpu.c
x86: merge setup_per_cpu_maps() into setup_per_cpu_areas()
[~andy/linux] / arch / x86 / kernel / setup_percpu.c
index b5c35af2011d0325a2f3b3d331362d20b8dda41f..d0b1476490a725fa7da40a8348efef9694cefb05 100644 (file)
 # define DBG(x...)
 #endif
 
+/*
+ * Could be inside CONFIG_HAVE_SETUP_PER_CPU_AREA with other stuff but
+ * voyager wants cpu_number too.
+ */
+#ifdef CONFIG_SMP
+DEFINE_PER_CPU(int, cpu_number);
+EXPORT_PER_CPU_SYMBOL(cpu_number);
+#endif
+
 #ifdef CONFIG_X86_LOCAL_APIC
 unsigned int num_processors;
 unsigned disabled_cpus __cpuinitdata;
@@ -44,6 +53,8 @@ EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
 
 #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
 #define        X86_64_NUMA     1       /* (used later) */
+DEFINE_PER_CPU(int, node_number) = 0;
+EXPORT_PER_CPU_SYMBOL(node_number);
 
 /*
  * Map cpu index to node index
@@ -66,30 +77,6 @@ static void __init setup_node_to_cpumask_map(void);
 static inline void setup_node_to_cpumask_map(void) { }
 #endif
 
-/*
- * Define load_pda_offset() and per-cpu __pda for x86_64.
- * load_pda_offset() is responsible for loading the offset of pda into
- * %gs.
- *
- * On SMP, pda offset also duals as percpu base address and thus it
- * should be at the start of per-cpu area.  To achieve this, it's
- * preallocated in vmlinux_64.lds.S directly instead of using
- * DEFINE_PER_CPU().
- */
-#ifdef CONFIG_X86_64
-void __cpuinit load_pda_offset(int cpu)
-{
-       /* Memory clobbers used to order pda/percpu accesses */
-       mb();
-       wrmsrl(MSR_GS_BASE, cpu_pda(cpu));
-       mb();
-}
-#ifndef CONFIG_SMP
-DEFINE_PER_CPU(struct x8664_pda, __pda);
-#endif
-EXPORT_PER_CPU_SYMBOL(__pda);
-#endif /* CONFIG_SMP && CONFIG_X86_64 */
-
 #ifdef CONFIG_X86_64
 
 /* correctly size the local cpu masks */
@@ -110,33 +97,6 @@ static inline void setup_cpu_local_masks(void)
 #endif /* CONFIG_X86_32 */
 
 #ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
-/*
- * Copy data used in early init routines from the initial arrays to the
- * per cpu data areas.  These arrays then become expendable and the
- * *_early_ptr's are zeroed indicating that the static arrays are gone.
- */
-static void __init setup_per_cpu_maps(void)
-{
-       int cpu;
-
-       for_each_possible_cpu(cpu) {
-               per_cpu(x86_cpu_to_apicid, cpu) =
-                               early_per_cpu_map(x86_cpu_to_apicid, cpu);
-               per_cpu(x86_bios_cpu_apicid, cpu) =
-                               early_per_cpu_map(x86_bios_cpu_apicid, cpu);
-#ifdef X86_64_NUMA
-               per_cpu(x86_cpu_to_node_map, cpu) =
-                               early_per_cpu_map(x86_cpu_to_node_map, cpu);
-#endif
-       }
-
-       /* indicate the early static arrays will soon be gone */
-       early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
-       early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
-#ifdef X86_64_NUMA
-       early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
-#endif
-}
 
 #ifdef CONFIG_X86_64
 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
@@ -192,23 +152,41 @@ void __init setup_per_cpu_areas(void)
 
                memcpy(ptr, __per_cpu_load, __per_cpu_end - __per_cpu_start);
                per_cpu_offset(cpu) = ptr - __per_cpu_start;
+               per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
+               per_cpu(cpu_number, cpu) = cpu;
+               /*
+                * Copy data used in early init routines from the initial arrays to the
+                * per cpu data areas.  These arrays then become expendable and the
+                * *_early_ptr's are zeroed indicating that the static arrays are gone.
+                */
+               per_cpu(x86_cpu_to_apicid, cpu) =
+                               early_per_cpu_map(x86_cpu_to_apicid, cpu);
+               per_cpu(x86_bios_cpu_apicid, cpu) =
+                               early_per_cpu_map(x86_bios_cpu_apicid, cpu);
+#ifdef X86_64_NUMA
+               per_cpu(x86_cpu_to_node_map, cpu) =
+                               early_per_cpu_map(x86_cpu_to_node_map, cpu);
+#endif
 #ifdef CONFIG_X86_64
+               per_cpu(irq_stack_ptr, cpu) =
+                       per_cpu(irq_stack_union.irq_stack, cpu) + IRQ_STACK_SIZE - 64;
                /*
-                * CPU0 modified pda in the init data area, reload pda
-                * offset for CPU0 and clear the area for others.
+                * Up to this point, CPU0 has been using .data.init
+                * area.  Reload %gs offset for CPU0.
                 */
                if (cpu == 0)
-                       load_pda_offset(0);
-               else
-                       memset(cpu_pda(cpu), 0, sizeof(*cpu_pda(cpu)));
+                       load_gs_base(cpu);
 #endif
-               per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
 
                DBG("PERCPU: cpu %4d %p\n", cpu, ptr);
        }
 
-       /* Setup percpu data maps */
-       setup_per_cpu_maps();
+       /* indicate the early static arrays will soon be gone */
+       early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
+       early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
+#ifdef X86_64_NUMA
+       early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
+#endif
 
        /* Setup node to cpumask map */
        setup_node_to_cpumask_map();
@@ -271,7 +249,7 @@ void __cpuinit numa_set_node(int cpu, int node)
        per_cpu(x86_cpu_to_node_map, cpu) = node;
 
        if (node != NUMA_NO_NODE)
-               cpu_pda(cpu)->nodenumber = node;
+               per_cpu(node_number, cpu) = node;
 }
 
 void __cpuinit numa_clear_node(int cpu)