2 * Routines to indentify caches on Intel CPU.
5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
7 * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
10 #include <linux/init.h>
11 #include <linux/slab.h>
12 #include <linux/device.h>
13 #include <linux/compiler.h>
14 #include <linux/cpu.h>
15 #include <linux/sched.h>
16 #include <linux/pci.h>
18 #include <asm/processor.h>
19 #include <linux/smp.h>
20 #include <asm/amd_nb.h>
30 unsigned char descriptor;
35 #define MB(x) ((x) * 1024)
37 /* All the cache descriptor types we care about (no TLB or
38 trace cache entries) */
40 static const struct _cache_table __cpuinitconst cache_table[] =
42 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
43 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
44 { 0x09, LVL_1_INST, 32 }, /* 4-way set assoc, 64 byte line size */
45 { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
46 { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
47 { 0x0d, LVL_1_DATA, 16 }, /* 4-way set assoc, 64 byte line size */
48 { 0x0e, LVL_1_DATA, 24 }, /* 6-way set assoc, 64 byte line size */
49 { 0x21, LVL_2, 256 }, /* 8-way set assoc, 64 byte line size */
50 { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
51 { 0x23, LVL_3, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */
52 { 0x25, LVL_3, MB(2) }, /* 8-way set assoc, sectored cache, 64 byte line size */
53 { 0x29, LVL_3, MB(4) }, /* 8-way set assoc, sectored cache, 64 byte line size */
54 { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */
55 { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */
56 { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
57 { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */
58 { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
59 { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
60 { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
61 { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
62 { 0x3f, LVL_2, 256 }, /* 2-way set assoc, 64 byte line size */
63 { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */
64 { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */
65 { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */
66 { 0x44, LVL_2, MB(1) }, /* 4-way set assoc, 32 byte line size */
67 { 0x45, LVL_2, MB(2) }, /* 4-way set assoc, 32 byte line size */
68 { 0x46, LVL_3, MB(4) }, /* 4-way set assoc, 64 byte line size */
69 { 0x47, LVL_3, MB(8) }, /* 8-way set assoc, 64 byte line size */
70 { 0x48, LVL_2, MB(3) }, /* 12-way set assoc, 64 byte line size */
71 { 0x49, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */
72 { 0x4a, LVL_3, MB(6) }, /* 12-way set assoc, 64 byte line size */
73 { 0x4b, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */
74 { 0x4c, LVL_3, MB(12) }, /* 12-way set assoc, 64 byte line size */
75 { 0x4d, LVL_3, MB(16) }, /* 16-way set assoc, 64 byte line size */
76 { 0x4e, LVL_2, MB(6) }, /* 24-way set assoc, 64 byte line size */
77 { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
78 { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
79 { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
80 { 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
81 { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */
82 { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */
83 { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */
84 { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */
85 { 0x78, LVL_2, MB(1) }, /* 4-way set assoc, 64 byte line size */
86 { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
87 { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
88 { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */
89 { 0x7c, LVL_2, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */
90 { 0x7d, LVL_2, MB(2) }, /* 8-way set assoc, 64 byte line size */
91 { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */
92 { 0x80, LVL_2, 512 }, /* 8-way set assoc, 64 byte line size */
93 { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */
94 { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */
95 { 0x84, LVL_2, MB(1) }, /* 8-way set assoc, 32 byte line size */
96 { 0x85, LVL_2, MB(2) }, /* 8-way set assoc, 32 byte line size */
97 { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */
98 { 0x87, LVL_2, MB(1) }, /* 8-way set assoc, 64 byte line size */
99 { 0xd0, LVL_3, 512 }, /* 4-way set assoc, 64 byte line size */
100 { 0xd1, LVL_3, MB(1) }, /* 4-way set assoc, 64 byte line size */
101 { 0xd2, LVL_3, MB(2) }, /* 4-way set assoc, 64 byte line size */
102 { 0xd6, LVL_3, MB(1) }, /* 8-way set assoc, 64 byte line size */
103 { 0xd7, LVL_3, MB(2) }, /* 8-way set assoc, 64 byte line size */
104 { 0xd8, LVL_3, MB(4) }, /* 12-way set assoc, 64 byte line size */
105 { 0xdc, LVL_3, MB(2) }, /* 12-way set assoc, 64 byte line size */
106 { 0xdd, LVL_3, MB(4) }, /* 12-way set assoc, 64 byte line size */
107 { 0xde, LVL_3, MB(8) }, /* 12-way set assoc, 64 byte line size */
108 { 0xe2, LVL_3, MB(2) }, /* 16-way set assoc, 64 byte line size */
109 { 0xe3, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */
110 { 0xe4, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */
111 { 0xea, LVL_3, MB(12) }, /* 24-way set assoc, 64 byte line size */
112 { 0xeb, LVL_3, MB(18) }, /* 24-way set assoc, 64 byte line size */
113 { 0xec, LVL_3, MB(24) }, /* 24-way set assoc, 64 byte line size */
122 CACHE_TYPE_UNIFIED = 3
125 union _cpuid4_leaf_eax {
127 enum _cache_type type:5;
128 unsigned int level:3;
129 unsigned int is_self_initializing:1;
130 unsigned int is_fully_associative:1;
131 unsigned int reserved:4;
132 unsigned int num_threads_sharing:12;
133 unsigned int num_cores_on_die:6;
138 union _cpuid4_leaf_ebx {
140 unsigned int coherency_line_size:12;
141 unsigned int physical_line_partition:10;
142 unsigned int ways_of_associativity:10;
147 union _cpuid4_leaf_ecx {
149 unsigned int number_of_sets:32;
154 struct _cpuid4_info_regs {
155 union _cpuid4_leaf_eax eax;
156 union _cpuid4_leaf_ebx ebx;
157 union _cpuid4_leaf_ecx ecx;
159 struct amd_northbridge *nb;
162 struct _cpuid4_info {
163 struct _cpuid4_info_regs base;
164 DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
167 unsigned short num_cache_leaves;
169 /* AMD doesn't have CPUID4. Emulate it here to report the same
170 information to the user. This makes some assumptions about the machine:
171 L2 not shared, no SMT etc. that is currently true on AMD CPUs.
173 In theory the TLBs could be reported as fake type (they are in "dummy").
177 unsigned line_size:8;
178 unsigned lines_per_tag:8;
180 unsigned size_in_kb:8;
187 unsigned line_size:8;
188 unsigned lines_per_tag:4;
190 unsigned size_in_kb:16;
197 unsigned line_size:8;
198 unsigned lines_per_tag:4;
201 unsigned size_encoded:14;
206 static const unsigned short __cpuinitconst assocs[] = {
217 [0xf] = 0xffff /* fully associative - no way to show this currently */
220 static const unsigned char __cpuinitconst levels[] = { 1, 1, 2, 3 };
221 static const unsigned char __cpuinitconst types[] = { 1, 2, 3, 3 };
223 static void __cpuinit
224 amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
225 union _cpuid4_leaf_ebx *ebx,
226 union _cpuid4_leaf_ecx *ecx)
229 unsigned line_size, lines_per_tag, assoc, size_in_kb;
230 union l1_cache l1i, l1d;
233 union l1_cache *l1 = &l1d;
239 cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
240 cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val);
248 assoc = assocs[l1->assoc];
249 line_size = l1->line_size;
250 lines_per_tag = l1->lines_per_tag;
251 size_in_kb = l1->size_in_kb;
256 assoc = assocs[l2.assoc];
257 line_size = l2.line_size;
258 lines_per_tag = l2.lines_per_tag;
259 /* cpu_data has errata corrections for K7 applied */
260 size_in_kb = __this_cpu_read(cpu_info.x86_cache_size);
265 assoc = assocs[l3.assoc];
266 line_size = l3.line_size;
267 lines_per_tag = l3.lines_per_tag;
268 size_in_kb = l3.size_encoded * 512;
269 if (boot_cpu_has(X86_FEATURE_AMD_DCM)) {
270 size_in_kb = size_in_kb >> 1;
278 eax->split.is_self_initializing = 1;
279 eax->split.type = types[leaf];
280 eax->split.level = levels[leaf];
281 eax->split.num_threads_sharing = 0;
282 eax->split.num_cores_on_die = __this_cpu_read(cpu_info.x86_max_cores) - 1;
286 eax->split.is_fully_associative = 1;
287 ebx->split.coherency_line_size = line_size - 1;
288 ebx->split.ways_of_associativity = assoc - 1;
289 ebx->split.physical_line_partition = lines_per_tag - 1;
290 ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
291 (ebx->split.ways_of_associativity + 1) - 1;
295 struct attribute attr;
296 ssize_t (*show)(struct _cpuid4_info *, char *, unsigned int);
297 ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count,
304 * L3 cache descriptors
306 static void __cpuinit amd_calc_l3_indices(struct amd_northbridge *nb)
308 struct amd_l3_cache *l3 = &nb->l3_cache;
309 unsigned int sc0, sc1, sc2, sc3;
312 pci_read_config_dword(nb->misc, 0x1C4, &val);
314 /* calculate subcache sizes */
315 l3->subcaches[0] = sc0 = !(val & BIT(0));
316 l3->subcaches[1] = sc1 = !(val & BIT(4));
318 if (boot_cpu_data.x86 == 0x15) {
319 l3->subcaches[0] = sc0 += !(val & BIT(1));
320 l3->subcaches[1] = sc1 += !(val & BIT(5));
323 l3->subcaches[2] = sc2 = !(val & BIT(8)) + !(val & BIT(9));
324 l3->subcaches[3] = sc3 = !(val & BIT(12)) + !(val & BIT(13));
326 l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1;
329 static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index)
333 /* only for L3, and not in virtualized environments */
337 node = amd_get_nb_id(smp_processor_id());
338 this_leaf->nb = node_to_amd_nb(node);
339 if (this_leaf->nb && !this_leaf->nb->l3_cache.indices)
340 amd_calc_l3_indices(this_leaf->nb);
344 * check whether a slot used for disabling an L3 index is occupied.
345 * @l3: L3 cache descriptor
346 * @slot: slot number (0..1)
348 * @returns: the disabled index if used or negative value if slot free.
350 int amd_get_l3_disable_slot(struct amd_northbridge *nb, unsigned slot)
352 unsigned int reg = 0;
354 pci_read_config_dword(nb->misc, 0x1BC + slot * 4, ®);
356 /* check whether this slot is activated already */
357 if (reg & (3UL << 30))
363 static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
368 if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
371 index = amd_get_l3_disable_slot(this_leaf->base.nb, slot);
373 return sprintf(buf, "%d\n", index);
375 return sprintf(buf, "FREE\n");
378 #define SHOW_CACHE_DISABLE(slot) \
380 show_cache_disable_##slot(struct _cpuid4_info *this_leaf, char *buf, \
383 return show_cache_disable(this_leaf, buf, slot); \
385 SHOW_CACHE_DISABLE(0)
386 SHOW_CACHE_DISABLE(1)
388 static void amd_l3_disable_index(struct amd_northbridge *nb, int cpu,
389 unsigned slot, unsigned long idx)
396 * disable index in all 4 subcaches
398 for (i = 0; i < 4; i++) {
399 u32 reg = idx | (i << 20);
401 if (!nb->l3_cache.subcaches[i])
404 pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg);
407 * We need to WBINVD on a core on the node containing the L3
408 * cache which indices we disable therefore a simple wbinvd()
414 pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg);
419 * disable a L3 cache index by using a disable-slot
421 * @l3: L3 cache descriptor
422 * @cpu: A CPU on the node containing the L3 cache
423 * @slot: slot number (0..1)
424 * @index: index to disable
426 * @return: 0 on success, error status on failure
428 int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu, unsigned slot,
433 /* check if @slot is already used or the index is already disabled */
434 ret = amd_get_l3_disable_slot(nb, slot);
438 if (index > nb->l3_cache.indices)
441 /* check whether the other slot has disabled the same index already */
442 if (index == amd_get_l3_disable_slot(nb, !slot))
445 amd_l3_disable_index(nb, cpu, slot, index);
450 static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
451 const char *buf, size_t count,
454 unsigned long val = 0;
457 if (!capable(CAP_SYS_ADMIN))
460 if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
463 cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
465 if (strict_strtoul(buf, 10, &val) < 0)
468 err = amd_set_l3_disable_slot(this_leaf->base.nb, cpu, slot, val);
471 pr_warning("L3 slot %d in use/index already disabled!\n",
478 #define STORE_CACHE_DISABLE(slot) \
480 store_cache_disable_##slot(struct _cpuid4_info *this_leaf, \
481 const char *buf, size_t count, \
484 return store_cache_disable(this_leaf, buf, count, slot); \
486 STORE_CACHE_DISABLE(0)
487 STORE_CACHE_DISABLE(1)
489 static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644,
490 show_cache_disable_0, store_cache_disable_0);
491 static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
492 show_cache_disable_1, store_cache_disable_1);
495 show_subcaches(struct _cpuid4_info *this_leaf, char *buf, unsigned int cpu)
497 if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
500 return sprintf(buf, "%x\n", amd_get_subcaches(cpu));
504 store_subcaches(struct _cpuid4_info *this_leaf, const char *buf, size_t count,
509 if (!capable(CAP_SYS_ADMIN))
512 if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
515 if (strict_strtoul(buf, 16, &val) < 0)
518 if (amd_set_subcaches(cpu, val))
524 static struct _cache_attr subcaches =
525 __ATTR(subcaches, 0644, show_subcaches, store_subcaches);
527 #else /* CONFIG_AMD_NB */
528 #define amd_init_l3_cache(x, y)
529 #endif /* CONFIG_AMD_NB */
532 __cpuinit cpuid4_cache_lookup_regs(int index,
533 struct _cpuid4_info_regs *this_leaf)
535 union _cpuid4_leaf_eax eax;
536 union _cpuid4_leaf_ebx ebx;
537 union _cpuid4_leaf_ecx ecx;
540 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
541 amd_cpuid4(index, &eax, &ebx, &ecx);
542 amd_init_l3_cache(this_leaf, index);
544 cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
547 if (eax.split.type == CACHE_TYPE_NULL)
548 return -EIO; /* better error ? */
550 this_leaf->eax = eax;
551 this_leaf->ebx = ebx;
552 this_leaf->ecx = ecx;
553 this_leaf->size = (ecx.split.number_of_sets + 1) *
554 (ebx.split.coherency_line_size + 1) *
555 (ebx.split.physical_line_partition + 1) *
556 (ebx.split.ways_of_associativity + 1);
560 static int __cpuinit find_num_cache_leaves(struct cpuinfo_x86 *c)
562 unsigned int eax, ebx, ecx, edx, op;
563 union _cpuid4_leaf_eax cache_eax;
566 if (c->x86_vendor == X86_VENDOR_AMD)
573 /* Do cpuid(op) loop to find out num_cache_leaves */
574 cpuid_count(op, i, &eax, &ebx, &ecx, &edx);
575 cache_eax.full = eax;
576 } while (cache_eax.split.type != CACHE_TYPE_NULL);
580 void __cpuinit init_amd_cacheinfo(struct cpuinfo_x86 *c)
583 if (cpu_has_topoext) {
584 num_cache_leaves = find_num_cache_leaves(c);
585 } else if (c->extended_cpuid_level >= 0x80000006) {
586 if (cpuid_edx(0x80000006) & 0xf000)
587 num_cache_leaves = 4;
589 num_cache_leaves = 3;
593 unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
596 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0;
597 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
598 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
599 unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
601 unsigned int cpu = c->cpu_index;
604 if (c->cpuid_level > 3) {
605 static int is_initialized;
607 if (is_initialized == 0) {
608 /* Init num_cache_leaves from boot CPU */
609 num_cache_leaves = find_num_cache_leaves(c);
614 * Whenever possible use cpuid(4), deterministic cache
615 * parameters cpuid leaf to find the cache details
617 for (i = 0; i < num_cache_leaves; i++) {
618 struct _cpuid4_info_regs this_leaf;
621 retval = cpuid4_cache_lookup_regs(i, &this_leaf);
623 switch (this_leaf.eax.split.level) {
625 if (this_leaf.eax.split.type ==
627 new_l1d = this_leaf.size/1024;
628 else if (this_leaf.eax.split.type ==
630 new_l1i = this_leaf.size/1024;
633 new_l2 = this_leaf.size/1024;
634 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
635 index_msb = get_count_order(num_threads_sharing);
636 l2_id = c->apicid & ~((1 << index_msb) - 1);
639 new_l3 = this_leaf.size/1024;
640 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
641 index_msb = get_count_order(
642 num_threads_sharing);
643 l3_id = c->apicid & ~((1 << index_msb) - 1);
652 * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
655 if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
656 /* supports eax=2 call */
658 unsigned int regs[4];
659 unsigned char *dp = (unsigned char *)regs;
662 if (num_cache_leaves != 0 && c->x86 == 15)
665 /* Number of times to iterate */
666 n = cpuid_eax(2) & 0xFF;
668 for (i = 0 ; i < n ; i++) {
669 cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]);
671 /* If bit 31 is set, this is an unknown format */
672 for (j = 0 ; j < 3 ; j++)
673 if (regs[j] & (1 << 31))
676 /* Byte 0 is level count, not a descriptor */
677 for (j = 1 ; j < 16 ; j++) {
678 unsigned char des = dp[j];
681 /* look up this descriptor in the table */
682 while (cache_table[k].descriptor != 0) {
683 if (cache_table[k].descriptor == des) {
684 if (only_trace && cache_table[k].cache_type != LVL_TRACE)
686 switch (cache_table[k].cache_type) {
688 l1i += cache_table[k].size;
691 l1d += cache_table[k].size;
694 l2 += cache_table[k].size;
697 l3 += cache_table[k].size;
700 trace += cache_table[k].size;
722 per_cpu(cpu_llc_id, cpu) = l2_id;
729 per_cpu(cpu_llc_id, cpu) = l3_id;
733 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
740 /* pointer to _cpuid4_info array (for each cache leaf) */
741 static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info);
742 #define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y]))
746 static int __cpuinit cache_shared_amd_cpu_map_setup(unsigned int cpu, int index)
748 struct _cpuid4_info *this_leaf;
750 struct cpuinfo_x86 *c = &cpu_data(cpu);
755 for_each_cpu(i, cpu_llc_shared_mask(cpu)) {
756 if (!per_cpu(ici_cpuid4_info, i))
758 this_leaf = CPUID4_INFO_IDX(i, index);
759 for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) {
760 if (!cpu_online(sibling))
762 set_bit(sibling, this_leaf->shared_cpu_map);
765 } else if ((c->x86 == 0x15) && ((index == 1) || (index == 2))) {
767 for_each_cpu(i, cpu_sibling_mask(cpu)) {
768 if (!per_cpu(ici_cpuid4_info, i))
770 this_leaf = CPUID4_INFO_IDX(i, index);
771 for_each_cpu(sibling, cpu_sibling_mask(cpu)) {
772 if (!cpu_online(sibling))
774 set_bit(sibling, this_leaf->shared_cpu_map);
782 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
784 struct _cpuid4_info *this_leaf, *sibling_leaf;
785 unsigned long num_threads_sharing;
787 struct cpuinfo_x86 *c = &cpu_data(cpu);
789 if (c->x86_vendor == X86_VENDOR_AMD) {
790 if (cache_shared_amd_cpu_map_setup(cpu, index))
794 this_leaf = CPUID4_INFO_IDX(cpu, index);
795 num_threads_sharing = 1 + this_leaf->base.eax.split.num_threads_sharing;
797 if (num_threads_sharing == 1)
798 cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map));
800 index_msb = get_count_order(num_threads_sharing);
802 for_each_online_cpu(i) {
803 if (cpu_data(i).apicid >> index_msb ==
804 c->apicid >> index_msb) {
806 to_cpumask(this_leaf->shared_cpu_map));
807 if (i != cpu && per_cpu(ici_cpuid4_info, i)) {
809 CPUID4_INFO_IDX(i, index);
810 cpumask_set_cpu(cpu, to_cpumask(
811 sibling_leaf->shared_cpu_map));
817 static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
819 struct _cpuid4_info *this_leaf, *sibling_leaf;
822 this_leaf = CPUID4_INFO_IDX(cpu, index);
823 for_each_cpu(sibling, to_cpumask(this_leaf->shared_cpu_map)) {
824 sibling_leaf = CPUID4_INFO_IDX(sibling, index);
825 cpumask_clear_cpu(cpu,
826 to_cpumask(sibling_leaf->shared_cpu_map));
830 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
834 static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
839 static void __cpuinit free_cache_attributes(unsigned int cpu)
843 for (i = 0; i < num_cache_leaves; i++)
844 cache_remove_shared_cpu_map(cpu, i);
846 kfree(per_cpu(ici_cpuid4_info, cpu));
847 per_cpu(ici_cpuid4_info, cpu) = NULL;
850 static void __cpuinit get_cpu_leaves(void *_retval)
852 int j, *retval = _retval, cpu = smp_processor_id();
854 /* Do cpuid and store the results */
855 for (j = 0; j < num_cache_leaves; j++) {
856 struct _cpuid4_info *this_leaf = CPUID4_INFO_IDX(cpu, j);
858 *retval = cpuid4_cache_lookup_regs(j, &this_leaf->base);
859 if (unlikely(*retval < 0)) {
862 for (i = 0; i < j; i++)
863 cache_remove_shared_cpu_map(cpu, i);
866 cache_shared_cpu_map_setup(cpu, j);
870 static int __cpuinit detect_cache_attributes(unsigned int cpu)
874 if (num_cache_leaves == 0)
877 per_cpu(ici_cpuid4_info, cpu) = kzalloc(
878 sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
879 if (per_cpu(ici_cpuid4_info, cpu) == NULL)
882 smp_call_function_single(cpu, get_cpu_leaves, &retval, true);
884 kfree(per_cpu(ici_cpuid4_info, cpu));
885 per_cpu(ici_cpuid4_info, cpu) = NULL;
891 #include <linux/kobject.h>
892 #include <linux/sysfs.h>
893 #include <linux/cpu.h>
895 /* pointer to kobject for cpuX/cache */
896 static DEFINE_PER_CPU(struct kobject *, ici_cache_kobject);
898 struct _index_kobject {
901 unsigned short index;
904 /* pointer to array of kobjects for cpuX/cache/indexY */
905 static DEFINE_PER_CPU(struct _index_kobject *, ici_index_kobject);
906 #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(ici_index_kobject, x))[y]))
908 #define show_one_plus(file_name, object, val) \
909 static ssize_t show_##file_name(struct _cpuid4_info *this_leaf, char *buf, \
912 return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \
915 show_one_plus(level, base.eax.split.level, 0);
916 show_one_plus(coherency_line_size, base.ebx.split.coherency_line_size, 1);
917 show_one_plus(physical_line_partition, base.ebx.split.physical_line_partition, 1);
918 show_one_plus(ways_of_associativity, base.ebx.split.ways_of_associativity, 1);
919 show_one_plus(number_of_sets, base.ecx.split.number_of_sets, 1);
921 static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf,
924 return sprintf(buf, "%luK\n", this_leaf->base.size / 1024);
927 static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
930 ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
934 const struct cpumask *mask;
936 mask = to_cpumask(this_leaf->shared_cpu_map);
938 cpulist_scnprintf(buf, len-2, mask) :
939 cpumask_scnprintf(buf, len-2, mask);
946 static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf,
949 return show_shared_cpu_map_func(leaf, 0, buf);
952 static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf,
955 return show_shared_cpu_map_func(leaf, 1, buf);
958 static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf,
961 switch (this_leaf->base.eax.split.type) {
962 case CACHE_TYPE_DATA:
963 return sprintf(buf, "Data\n");
964 case CACHE_TYPE_INST:
965 return sprintf(buf, "Instruction\n");
966 case CACHE_TYPE_UNIFIED:
967 return sprintf(buf, "Unified\n");
969 return sprintf(buf, "Unknown\n");
973 #define to_object(k) container_of(k, struct _index_kobject, kobj)
974 #define to_attr(a) container_of(a, struct _cache_attr, attr)
976 #define define_one_ro(_name) \
977 static struct _cache_attr _name = \
978 __ATTR(_name, 0444, show_##_name, NULL)
980 define_one_ro(level);
982 define_one_ro(coherency_line_size);
983 define_one_ro(physical_line_partition);
984 define_one_ro(ways_of_associativity);
985 define_one_ro(number_of_sets);
987 define_one_ro(shared_cpu_map);
988 define_one_ro(shared_cpu_list);
990 static struct attribute *default_attrs[] = {
993 &coherency_line_size.attr,
994 &physical_line_partition.attr,
995 &ways_of_associativity.attr,
996 &number_of_sets.attr,
998 &shared_cpu_map.attr,
999 &shared_cpu_list.attr,
1003 #ifdef CONFIG_AMD_NB
1004 static struct attribute ** __cpuinit amd_l3_attrs(void)
1006 static struct attribute **attrs;
1012 n = ARRAY_SIZE(default_attrs);
1014 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
1017 if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
1020 attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
1022 return attrs = default_attrs;
1024 for (n = 0; default_attrs[n]; n++)
1025 attrs[n] = default_attrs[n];
1027 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
1028 attrs[n++] = &cache_disable_0.attr;
1029 attrs[n++] = &cache_disable_1.attr;
1032 if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
1033 attrs[n++] = &subcaches.attr;
1039 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
1041 struct _cache_attr *fattr = to_attr(attr);
1042 struct _index_kobject *this_leaf = to_object(kobj);
1046 fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
1047 buf, this_leaf->cpu) :
1052 static ssize_t store(struct kobject *kobj, struct attribute *attr,
1053 const char *buf, size_t count)
1055 struct _cache_attr *fattr = to_attr(attr);
1056 struct _index_kobject *this_leaf = to_object(kobj);
1059 ret = fattr->store ?
1060 fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
1061 buf, count, this_leaf->cpu) :
1066 static const struct sysfs_ops sysfs_ops = {
1071 static struct kobj_type ktype_cache = {
1072 .sysfs_ops = &sysfs_ops,
1073 .default_attrs = default_attrs,
1076 static struct kobj_type ktype_percpu_entry = {
1077 .sysfs_ops = &sysfs_ops,
1080 static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
1082 kfree(per_cpu(ici_cache_kobject, cpu));
1083 kfree(per_cpu(ici_index_kobject, cpu));
1084 per_cpu(ici_cache_kobject, cpu) = NULL;
1085 per_cpu(ici_index_kobject, cpu) = NULL;
1086 free_cache_attributes(cpu);
1089 static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
1093 if (num_cache_leaves == 0)
1096 err = detect_cache_attributes(cpu);
1100 /* Allocate all required memory */
1101 per_cpu(ici_cache_kobject, cpu) =
1102 kzalloc(sizeof(struct kobject), GFP_KERNEL);
1103 if (unlikely(per_cpu(ici_cache_kobject, cpu) == NULL))
1106 per_cpu(ici_index_kobject, cpu) = kzalloc(
1107 sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL);
1108 if (unlikely(per_cpu(ici_index_kobject, cpu) == NULL))
1114 cpuid4_cache_sysfs_exit(cpu);
1118 static DECLARE_BITMAP(cache_dev_map, NR_CPUS);
1120 /* Add/Remove cache interface for CPU device */
1121 static int __cpuinit cache_add_dev(struct device *dev)
1123 unsigned int cpu = dev->id;
1125 struct _index_kobject *this_object;
1126 struct _cpuid4_info *this_leaf;
1129 retval = cpuid4_cache_sysfs_init(cpu);
1130 if (unlikely(retval < 0))
1133 retval = kobject_init_and_add(per_cpu(ici_cache_kobject, cpu),
1134 &ktype_percpu_entry,
1135 &dev->kobj, "%s", "cache");
1137 cpuid4_cache_sysfs_exit(cpu);
1141 for (i = 0; i < num_cache_leaves; i++) {
1142 this_object = INDEX_KOBJECT_PTR(cpu, i);
1143 this_object->cpu = cpu;
1144 this_object->index = i;
1146 this_leaf = CPUID4_INFO_IDX(cpu, i);
1148 ktype_cache.default_attrs = default_attrs;
1149 #ifdef CONFIG_AMD_NB
1150 if (this_leaf->base.nb)
1151 ktype_cache.default_attrs = amd_l3_attrs();
1153 retval = kobject_init_and_add(&(this_object->kobj),
1155 per_cpu(ici_cache_kobject, cpu),
1157 if (unlikely(retval)) {
1158 for (j = 0; j < i; j++)
1159 kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj));
1160 kobject_put(per_cpu(ici_cache_kobject, cpu));
1161 cpuid4_cache_sysfs_exit(cpu);
1164 kobject_uevent(&(this_object->kobj), KOBJ_ADD);
1166 cpumask_set_cpu(cpu, to_cpumask(cache_dev_map));
1168 kobject_uevent(per_cpu(ici_cache_kobject, cpu), KOBJ_ADD);
1172 static void __cpuinit cache_remove_dev(struct device *dev)
1174 unsigned int cpu = dev->id;
1177 if (per_cpu(ici_cpuid4_info, cpu) == NULL)
1179 if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map)))
1181 cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map));
1183 for (i = 0; i < num_cache_leaves; i++)
1184 kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj));
1185 kobject_put(per_cpu(ici_cache_kobject, cpu));
1186 cpuid4_cache_sysfs_exit(cpu);
1189 static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
1190 unsigned long action, void *hcpu)
1192 unsigned int cpu = (unsigned long)hcpu;
1195 dev = get_cpu_device(cpu);
1198 case CPU_ONLINE_FROZEN:
1202 case CPU_DEAD_FROZEN:
1203 cache_remove_dev(dev);
1209 static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = {
1210 .notifier_call = cacheinfo_cpu_callback,
1213 static int __cpuinit cache_sysfs_init(void)
1217 if (num_cache_leaves == 0)
1220 for_each_online_cpu(i) {
1222 struct device *dev = get_cpu_device(i);
1224 err = cache_add_dev(dev);
1228 register_hotcpu_notifier(&cacheinfo_cpu_notifier);
1232 device_initcall(cache_sysfs_init);