]> Pileus Git - ~andy/linux/commitdiff
x86, cacheinfo: Base cache sharing info on CPUID 0x8000001d on AMD
authorAndreas Herrmann <andreas.herrmann3@amd.com>
Fri, 19 Oct 2012 09:02:09 +0000 (11:02 +0200)
committerH. Peter Anvin <hpa@linux.intel.com>
Tue, 13 Nov 2012 19:22:31 +0000 (11:22 -0800)
The patch is based on a patch submitted by Hans Rosenfeld.
See http://marc.info/?l=linux-kernel&m=133908777200931

Note that  CPUID Fn8000_001D_EAX slightly differs to Intel's CPUID function 4.

Bits 14-25 contain NumSharingCache. Actual number of cores sharing
           this cache. SW to add value of one to get result.

The corresponding bits on Intel are defined as "maximum number of threads
sharing this cache" (with a "plus 1" encoding).

Thus a different method to determine which cores are sharing a cache
level has to be used.

Signed-off-by: Andreas Herrmann <andreas.herrmann3@amd.com>
Link: http://lkml.kernel.org/r/20121019090209.GG26718@alberich
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
arch/x86/kernel/cpu/intel_cacheinfo.c

index cd2e1ccce591e34d686b4eb37a9fd13fccd8ab88..fe9edec6698a3c0c15846c0e56c1a502cea05c08 100644 (file)
@@ -750,37 +750,50 @@ static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info);
 static int __cpuinit cache_shared_amd_cpu_map_setup(unsigned int cpu, int index)
 {
        struct _cpuid4_info *this_leaf;
-       int ret, i, sibling;
-       struct cpuinfo_x86 *c = &cpu_data(cpu);
+       int i, sibling;
 
-       ret = 0;
-       if (index == 3) {
-               ret = 1;
-               for_each_cpu(i, cpu_llc_shared_mask(cpu)) {
+       if (cpu_has_topoext) {
+               unsigned int apicid, nshared, first, last;
+
+               if (!per_cpu(ici_cpuid4_info, cpu))
+                       return 0;
+
+               this_leaf = CPUID4_INFO_IDX(cpu, index);
+               nshared = this_leaf->base.eax.split.num_threads_sharing + 1;
+               apicid = cpu_data(cpu).apicid;
+               first = apicid - (apicid % nshared);
+               last = first + nshared - 1;
+
+               for_each_online_cpu(i) {
+                       apicid = cpu_data(i).apicid;
+                       if ((apicid < first) || (apicid > last))
+                               continue;
                        if (!per_cpu(ici_cpuid4_info, i))
                                continue;
                        this_leaf = CPUID4_INFO_IDX(i, index);
-                       for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) {
-                               if (!cpu_online(sibling))
+
+                       for_each_online_cpu(sibling) {
+                               apicid = cpu_data(sibling).apicid;
+                               if ((apicid < first) || (apicid > last))
                                        continue;
                                set_bit(sibling, this_leaf->shared_cpu_map);
                        }
                }
-       } else if ((c->x86 == 0x15) && ((index == 1) || (index == 2))) {
-               ret = 1;
-               for_each_cpu(i, cpu_sibling_mask(cpu)) {
+       } else if (index == 3) {
+               for_each_cpu(i, cpu_llc_shared_mask(cpu)) {
                        if (!per_cpu(ici_cpuid4_info, i))
                                continue;
                        this_leaf = CPUID4_INFO_IDX(i, index);
-                       for_each_cpu(sibling, cpu_sibling_mask(cpu)) {
+                       for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) {
                                if (!cpu_online(sibling))
                                        continue;
                                set_bit(sibling, this_leaf->shared_cpu_map);
                        }
                }
-       }
+       } else
+               return 0;
 
-       return ret;
+       return 1;
 }
 
 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)