]> Pileus Git - ~andy/linux/commitdiff
x86: change bios_cpu_apicid to percpu data variable
authortravis@sgi.com <travis@sgi.com>
Wed, 30 Jan 2008 12:33:12 +0000 (13:33 +0100)
committerIngo Molnar <mingo@elte.hu>
Wed, 30 Jan 2008 12:33:12 +0000 (13:33 +0100)
Change static bios_cpu_apicid array to a per_cpu data variable.
This includes using a static array used during initialization
similar to the way x86_cpu_to_apicid[] is handled.

There is one early use of bios_cpu_apicid in apic_is_clustered_box().
The other reference in cpu_present_to_apicid() is called after
smp_set_apicids() has setup the percpu version of bios_cpu_apicid.

[ mingo@elte.hu: build fix ]

Signed-off-by: Mike Travis <travis@sgi.com>
Reviewed-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
arch/x86/kernel/apic_64.c
arch/x86/kernel/mpparse_64.c
arch/x86/kernel/setup_64.c
arch/x86/kernel/smpboot_64.c
include/asm-x86/smp_64.h

index 01d4ca27ecf0496ade8cffde14b2107ae9ae5931..f9919c492699ebd163cc76a93bb11a4517f6f113 100644 (file)
@@ -1180,14 +1180,26 @@ __cpuinit int apic_is_clustered_box(void)
        bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
 
        for (i = 0; i < NR_CPUS; i++) {
-               id = bios_cpu_apicid[i];
+               /* are we being called early in kernel startup? */
+               if (x86_bios_cpu_apicid_early_ptr) {
+                       id = ((u16 *)x86_bios_cpu_apicid_early_ptr)[i];
+               }
+               else if (i < nr_cpu_ids) {
+                       if (cpu_present(i))
+                               id = per_cpu(x86_bios_cpu_apicid, i);
+                       else
+                               continue;
+               }
+               else
+                       break;
+
                if (id != BAD_APICID)
                        __set_bit(APIC_CLUSTERID(id), clustermap);
        }
 
        /* Problem:  Partially populated chassis may not have CPUs in some of
         * the APIC clusters they have been allocated.  Only present CPUs have
-        * bios_cpu_apicid entries, thus causing zeroes in the bitmap.  Since
+        * x86_bios_cpu_apicid entries, thus causing zeroes in the bitmap.  Since
         * clusters are allocated sequentially, count zeros only if they are
         * bounded by ones.
         */
index 528ad9696d9669285a6b67cf1499bc89e59717ee..fd671754dcb5a08a83122bd5b023b9ad4363c545 100644 (file)
@@ -67,7 +67,11 @@ unsigned disabled_cpus __cpuinitdata;
 /* Bitmask of physically existing CPUs */
 physid_mask_t phys_cpu_present_map = PHYSID_MASK_NONE;
 
-u16 bios_cpu_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
+u16 x86_bios_cpu_apicid_init[NR_CPUS] __initdata
+                               = { [0 ... NR_CPUS-1] = BAD_APICID };
+void *x86_bios_cpu_apicid_early_ptr;
+DEFINE_PER_CPU(u16, x86_bios_cpu_apicid) = BAD_APICID;
+EXPORT_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
 
 
 /*
@@ -118,19 +122,22 @@ static void __cpuinit MP_processor_info(struct mpc_config_processor *m)
        physid_set(m->mpc_apicid, phys_cpu_present_map);
        if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
                /*
-                * bios_cpu_apicid is required to have processors listed
+                * x86_bios_cpu_apicid is required to have processors listed
                 * in same order as logical cpu numbers. Hence the first
                 * entry is BSP, and so on.
                 */
                cpu = 0;
        }
-       bios_cpu_apicid[cpu] = m->mpc_apicid;
        /* are we being called early in kernel startup? */
        if (x86_cpu_to_apicid_early_ptr) {
-               u16 *x86_cpu_to_apicid = (u16 *)x86_cpu_to_apicid_early_ptr;
-               x86_cpu_to_apicid[cpu] = m->mpc_apicid;
+               u16 *cpu_to_apicid = (u16 *)x86_cpu_to_apicid_early_ptr;
+               u16 *bios_cpu_apicid = (u16 *)x86_bios_cpu_apicid_early_ptr;
+
+               cpu_to_apicid[cpu] = m->mpc_apicid;
+               bios_cpu_apicid[cpu] = m->mpc_apicid;
        } else {
                per_cpu(x86_cpu_to_apicid, cpu) = m->mpc_apicid;
+               per_cpu(x86_bios_cpu_apicid, cpu) = m->mpc_apicid;
        }
 
        cpu_set(cpu, cpu_possible_map);
index 529e45c37b1ce104905f7fd559c1dc6b35e86faa..71a420c7fee7ae45d4a91451953d1fe952d69fe5 100644 (file)
@@ -362,8 +362,11 @@ void __init setup_arch(char **cmdline_p)
 #ifdef CONFIG_SMP
        /* setup to use the early static init tables during kernel startup */
        x86_cpu_to_apicid_early_ptr = (void *)&x86_cpu_to_apicid_init;
+#ifdef CONFIG_NUMA
        x86_cpu_to_node_map_early_ptr = (void *)&x86_cpu_to_node_map_init;
 #endif
+       x86_bios_cpu_apicid_early_ptr = (void *)&x86_bios_cpu_apicid_init;
+#endif
 
 #ifdef CONFIG_ACPI
        /*
index a8bc2bcdb74aefef12dfe05b2df9907c8417c76b..93071cdf0849a9f79da270dceb67a340fb26c1ac 100644 (file)
@@ -864,8 +864,12 @@ void __init smp_set_apicids(void)
                if (per_cpu_offset(cpu)) {
                        per_cpu(x86_cpu_to_apicid, cpu) =
                                                x86_cpu_to_apicid_init[cpu];
+#ifdef CONFIG_NUMA
                        per_cpu(x86_cpu_to_node_map, cpu) =
                                                x86_cpu_to_node_map_init[cpu];
+#endif
+                       per_cpu(x86_bios_cpu_apicid, cpu) =
+                                               x86_bios_cpu_apicid_init[cpu];
                }
                else
                        printk(KERN_NOTICE "per_cpu_offset zero for cpu %d\n",
@@ -874,7 +878,10 @@ void __init smp_set_apicids(void)
 
        /* indicate the early static arrays are gone */
        x86_cpu_to_apicid_early_ptr = NULL;
+#ifdef CONFIG_NUMA
        x86_cpu_to_node_map_early_ptr = NULL;
+#endif
+       x86_bios_cpu_apicid_early_ptr = NULL;
 }
 
 static void __init smp_cpu_index_default(void)
index 6fa332db29cc3eddf043962a8b32270f6d9b6133..e0a75519ad216e714a1d5430d183c2608dd1a3c4 100644 (file)
@@ -27,18 +27,20 @@ extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *),
                                  void *info, int wait);
 
 extern u16 __initdata x86_cpu_to_apicid_init[];
+extern u16 __initdata x86_bios_cpu_apicid_init[];
 extern void *x86_cpu_to_apicid_early_ptr;
-extern u16 bios_cpu_apicid[];
+extern void *x86_bios_cpu_apicid_early_ptr;
 
 DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
 DECLARE_PER_CPU(cpumask_t, cpu_core_map);
 DECLARE_PER_CPU(u16, cpu_llc_id);
 DECLARE_PER_CPU(u16, x86_cpu_to_apicid);
+DECLARE_PER_CPU(u16, x86_bios_cpu_apicid);
 
 static inline int cpu_present_to_apicid(int mps_cpu)
 {
-       if (mps_cpu < NR_CPUS)
-               return (int)bios_cpu_apicid[mps_cpu];
+       if (cpu_present(mps_cpu))
+               return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
        else
                return BAD_APICID;
 }