1 #include "amd64_edac.h"
2 #include <asm/amd_nb.h>
4 static struct edac_pci_ctl_info *amd64_ctl_pci;
6 static int report_gart_errors;
7 module_param(report_gart_errors, int, 0644);
10 * Set by command line parameter. If BIOS has enabled the ECC, this override is
11 * cleared to prevent re-enabling the hardware by this driver.
13 static int ecc_enable_override;
14 module_param(ecc_enable_override, int, 0644);
16 static struct msr __percpu *msrs;
19 * count successfully initialized driver instances for setup_pci_device()
21 static atomic_t drv_instances = ATOMIC_INIT(0);
23 /* Per-node driver instances */
24 static struct mem_ctl_info **mcis;
25 static struct ecc_settings **ecc_stngs;
28 * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
29 * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
32 *FIXME: Produce a better mapping/linearisation.
35 u32 scrubval; /* bit pattern for scrub rate */
36 u32 bandwidth; /* bandwidth consumed (bytes/sec) */
38 { 0x01, 1600000000UL},
60 { 0x00, 0UL}, /* scrubbing off */
63 int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
64 u32 *val, const char *func)
68 err = pci_read_config_dword(pdev, offset, val);
70 amd64_warn("%s: error reading F%dx%03x.\n",
71 func, PCI_FUNC(pdev->devfn), offset);
76 int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
77 u32 val, const char *func)
81 err = pci_write_config_dword(pdev, offset, val);
83 amd64_warn("%s: error writing to F%dx%03x.\n",
84 func, PCI_FUNC(pdev->devfn), offset);
91 * Depending on the family, F2 DCT reads need special handling:
93 * K8: has a single DCT only
95 * F10h: each DCT has its own set of regs
99 * F15h: we select which DCT we access using F1x10C[DctCfgSel]
102 static int k8_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
108 return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
111 static int f10_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
114 return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
118 * Select DCT to which PCI cfg accesses are routed
120 static void f15h_select_dct(struct amd64_pvt *pvt, u8 dct)
124 amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, ®);
127 amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
130 static int f15_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
135 if (addr >= 0x140 && addr <= 0x1a0) {
140 f15h_select_dct(pvt, dct);
142 return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
146 * Memory scrubber control interface. For K8, memory scrubbing is handled by
147 * hardware and can involve L2 cache, dcache as well as the main memory. With
148 * F10, this is extended to L3 cache scrubbing on CPU models sporting that
151 * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
152 * (dram) over to cache lines. This is nasty, so we will use bandwidth in
153 * bytes/sec for the setting.
155 * Currently, we only do dram scrubbing. If the scrubbing is done in software on
156 * other archs, we might not have access to the caches directly.
160 * scan the scrub rate mapping table for a close or matching bandwidth value to
161 * issue. If requested is too big, then use last maximum value found.
163 static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
169 * map the configured rate (new_bw) to a value specific to the AMD64
170 * memory controller and apply to register. Search for the first
171 * bandwidth entry that is greater or equal than the setting requested
172 * and program that. If at last entry, turn off DRAM scrubbing.
174 * If no suitable bandwidth is found, turn off DRAM scrubbing entirely
175 * by falling back to the last element in scrubrates[].
177 for (i = 0; i < ARRAY_SIZE(scrubrates) - 1; i++) {
179 * skip scrub rates which aren't recommended
180 * (see F10 BKDG, F3x58)
182 if (scrubrates[i].scrubval < min_rate)
185 if (scrubrates[i].bandwidth <= new_bw)
189 scrubval = scrubrates[i].scrubval;
191 pci_write_bits32(ctl, SCRCTRL, scrubval, 0x001F);
194 return scrubrates[i].bandwidth;
199 static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
201 struct amd64_pvt *pvt = mci->pvt_info;
202 u32 min_scrubrate = 0x5;
204 if (boot_cpu_data.x86 == 0xf)
207 /* F15h Erratum #505 */
208 if (boot_cpu_data.x86 == 0x15)
209 f15h_select_dct(pvt, 0);
211 return __amd64_set_scrub_rate(pvt->F3, bw, min_scrubrate);
214 static int amd64_get_scrub_rate(struct mem_ctl_info *mci)
216 struct amd64_pvt *pvt = mci->pvt_info;
218 int i, retval = -EINVAL;
220 /* F15h Erratum #505 */
221 if (boot_cpu_data.x86 == 0x15)
222 f15h_select_dct(pvt, 0);
224 amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
226 scrubval = scrubval & 0x001F;
228 for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
229 if (scrubrates[i].scrubval == scrubval) {
230 retval = scrubrates[i].bandwidth;
238 * returns true if the SysAddr given by sys_addr matches the
239 * DRAM base/limit associated with node_id
241 static bool amd64_base_limit_match(struct amd64_pvt *pvt, u64 sys_addr,
246 /* The K8 treats this as a 40-bit value. However, bits 63-40 will be
247 * all ones if the most significant implemented address bit is 1.
248 * Here we discard bits 63-40. See section 3.4.2 of AMD publication
249 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
250 * Application Programming.
252 addr = sys_addr & 0x000000ffffffffffull;
254 return ((addr >= get_dram_base(pvt, nid)) &&
255 (addr <= get_dram_limit(pvt, nid)));
259 * Attempt to map a SysAddr to a node. On success, return a pointer to the
260 * mem_ctl_info structure for the node that the SysAddr maps to.
262 * On failure, return NULL.
264 static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
267 struct amd64_pvt *pvt;
272 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
273 * 3.4.4.2) registers to map the SysAddr to a node ID.
278 * The value of this field should be the same for all DRAM Base
279 * registers. Therefore we arbitrarily choose to read it from the
280 * register for node 0.
282 intlv_en = dram_intlv_en(pvt, 0);
285 for (node_id = 0; node_id < DRAM_RANGES; node_id++) {
286 if (amd64_base_limit_match(pvt, sys_addr, node_id))
292 if (unlikely((intlv_en != 0x01) &&
293 (intlv_en != 0x03) &&
294 (intlv_en != 0x07))) {
295 amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en);
299 bits = (((u32) sys_addr) >> 12) & intlv_en;
301 for (node_id = 0; ; ) {
302 if ((dram_intlv_sel(pvt, node_id) & intlv_en) == bits)
303 break; /* intlv_sel field matches */
305 if (++node_id >= DRAM_RANGES)
309 /* sanity test for sys_addr */
310 if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) {
311 amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address"
312 "range for node %d with node interleaving enabled.\n",
313 __func__, sys_addr, node_id);
318 return edac_mc_find((int)node_id);
321 edac_dbg(2, "sys_addr 0x%lx doesn't match any node\n",
322 (unsigned long)sys_addr);
328 * compute the CS base address of the @csrow on the DRAM controller @dct.
329 * For details see F2x[5C:40] in the processor's BKDG
331 static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
332 u64 *base, u64 *mask)
334 u64 csbase, csmask, base_bits, mask_bits;
337 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) {
338 csbase = pvt->csels[dct].csbases[csrow];
339 csmask = pvt->csels[dct].csmasks[csrow];
340 base_bits = GENMASK(21, 31) | GENMASK(9, 15);
341 mask_bits = GENMASK(21, 29) | GENMASK(9, 15);
344 csbase = pvt->csels[dct].csbases[csrow];
345 csmask = pvt->csels[dct].csmasks[csrow >> 1];
348 if (boot_cpu_data.x86 == 0x15)
349 base_bits = mask_bits = GENMASK(19,30) | GENMASK(5,13);
351 base_bits = mask_bits = GENMASK(19,28) | GENMASK(5,13);
354 *base = (csbase & base_bits) << addr_shift;
357 /* poke holes for the csmask */
358 *mask &= ~(mask_bits << addr_shift);
360 *mask |= (csmask & mask_bits) << addr_shift;
363 #define for_each_chip_select(i, dct, pvt) \
364 for (i = 0; i < pvt->csels[dct].b_cnt; i++)
366 #define chip_select_base(i, dct, pvt) \
367 pvt->csels[dct].csbases[i]
369 #define for_each_chip_select_mask(i, dct, pvt) \
370 for (i = 0; i < pvt->csels[dct].m_cnt; i++)
373 * @input_addr is an InputAddr associated with the node given by mci. Return the
374 * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
376 static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
378 struct amd64_pvt *pvt;
384 for_each_chip_select(csrow, 0, pvt) {
385 if (!csrow_enabled(csrow, 0, pvt))
388 get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
392 if ((input_addr & mask) == (base & mask)) {
393 edac_dbg(2, "InputAddr 0x%lx matches csrow %d (node %d)\n",
394 (unsigned long)input_addr, csrow,
400 edac_dbg(2, "no matching csrow for InputAddr 0x%lx (MC node %d)\n",
401 (unsigned long)input_addr, pvt->mc_node_id);
407 * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
408 * for the node represented by mci. Info is passed back in *hole_base,
409 * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if
410 * info is invalid. Info may be invalid for either of the following reasons:
412 * - The revision of the node is not E or greater. In this case, the DRAM Hole
413 * Address Register does not exist.
415 * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
416 * indicating that its contents are not valid.
418 * The values passed back in *hole_base, *hole_offset, and *hole_size are
419 * complete 32-bit values despite the fact that the bitfields in the DHAR
420 * only represent bits 31-24 of the base and offset values.
422 int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
423 u64 *hole_offset, u64 *hole_size)
425 struct amd64_pvt *pvt = mci->pvt_info;
427 /* only revE and later have the DRAM Hole Address Register */
428 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_E) {
429 edac_dbg(1, " revision %d for node %d does not support DHAR\n",
430 pvt->ext_model, pvt->mc_node_id);
434 /* valid for Fam10h and above */
435 if (boot_cpu_data.x86 >= 0x10 && !dhar_mem_hoist_valid(pvt)) {
436 edac_dbg(1, " Dram Memory Hoisting is DISABLED on this system\n");
440 if (!dhar_valid(pvt)) {
441 edac_dbg(1, " Dram Memory Hoisting is DISABLED on this node %d\n",
446 /* This node has Memory Hoisting */
448 /* +------------------+--------------------+--------------------+-----
449 * | memory | DRAM hole | relocated |
450 * | [0, (x - 1)] | [x, 0xffffffff] | addresses from |
452 * | | | [0x100000000, |
453 * | | | (0x100000000+ |
454 * | | | (0xffffffff-x))] |
455 * +------------------+--------------------+--------------------+-----
457 * Above is a diagram of physical memory showing the DRAM hole and the
458 * relocated addresses from the DRAM hole. As shown, the DRAM hole
459 * starts at address x (the base address) and extends through address
460 * 0xffffffff. The DRAM Hole Address Register (DHAR) relocates the
461 * addresses in the hole so that they start at 0x100000000.
464 *hole_base = dhar_base(pvt);
465 *hole_size = (1ULL << 32) - *hole_base;
467 if (boot_cpu_data.x86 > 0xf)
468 *hole_offset = f10_dhar_offset(pvt);
470 *hole_offset = k8_dhar_offset(pvt);
472 edac_dbg(1, " DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
473 pvt->mc_node_id, (unsigned long)*hole_base,
474 (unsigned long)*hole_offset, (unsigned long)*hole_size);
478 EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info);
481 * Return the DramAddr that the SysAddr given by @sys_addr maps to. It is
482 * assumed that sys_addr maps to the node given by mci.
484 * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
485 * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
486 * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
487 * then it is also involved in translating a SysAddr to a DramAddr. Sections
488 * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
489 * These parts of the documentation are unclear. I interpret them as follows:
491 * When node n receives a SysAddr, it processes the SysAddr as follows:
493 * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
494 * Limit registers for node n. If the SysAddr is not within the range
495 * specified by the base and limit values, then node n ignores the Sysaddr
496 * (since it does not map to node n). Otherwise continue to step 2 below.
498 * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
499 * disabled so skip to step 3 below. Otherwise see if the SysAddr is within
500 * the range of relocated addresses (starting at 0x100000000) from the DRAM
501 * hole. If not, skip to step 3 below. Else get the value of the
502 * DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
503 * offset defined by this value from the SysAddr.
505 * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
506 * Base register for node n. To obtain the DramAddr, subtract the base
507 * address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
509 static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
511 struct amd64_pvt *pvt = mci->pvt_info;
512 u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
515 dram_base = get_dram_base(pvt, pvt->mc_node_id);
517 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
520 if ((sys_addr >= (1ULL << 32)) &&
521 (sys_addr < ((1ULL << 32) + hole_size))) {
522 /* use DHAR to translate SysAddr to DramAddr */
523 dram_addr = sys_addr - hole_offset;
525 edac_dbg(2, "using DHAR to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
526 (unsigned long)sys_addr,
527 (unsigned long)dram_addr);
534 * Translate the SysAddr to a DramAddr as shown near the start of
535 * section 3.4.4 (p. 70). Although sys_addr is a 64-bit value, the k8
536 * only deals with 40-bit values. Therefore we discard bits 63-40 of
537 * sys_addr below. If bit 39 of sys_addr is 1 then the bits we
538 * discard are all 1s. Otherwise the bits we discard are all 0s. See
539 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
540 * Programmer's Manual Volume 1 Application Programming.
542 dram_addr = (sys_addr & GENMASK(0, 39)) - dram_base;
544 edac_dbg(2, "using DRAM Base register to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
545 (unsigned long)sys_addr, (unsigned long)dram_addr);
550 * @intlv_en is the value of the IntlvEn field from a DRAM Base register
551 * (section 3.4.4.1). Return the number of bits from a SysAddr that are used
552 * for node interleaving.
554 static int num_node_interleave_bits(unsigned intlv_en)
556 static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
559 BUG_ON(intlv_en > 7);
560 n = intlv_shift_table[intlv_en];
564 /* Translate the DramAddr given by @dram_addr to an InputAddr. */
565 static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
567 struct amd64_pvt *pvt;
574 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
575 * concerning translating a DramAddr to an InputAddr.
577 intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
578 input_addr = ((dram_addr >> intlv_shift) & GENMASK(12, 35)) +
581 edac_dbg(2, " Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
582 intlv_shift, (unsigned long)dram_addr,
583 (unsigned long)input_addr);
589 * Translate the SysAddr represented by @sys_addr to an InputAddr. It is
590 * assumed that @sys_addr maps to the node given by mci.
592 static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
597 dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
599 edac_dbg(2, "SysAdddr 0x%lx translates to InputAddr 0x%lx\n",
600 (unsigned long)sys_addr, (unsigned long)input_addr);
607 * @input_addr is an InputAddr associated with the node represented by mci.
608 * Translate @input_addr to a DramAddr and return the result.
610 static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
612 struct amd64_pvt *pvt;
613 unsigned node_id, intlv_shift;
618 * Near the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
619 * shows how to translate a DramAddr to an InputAddr. Here we reverse
620 * this procedure. When translating from a DramAddr to an InputAddr, the
621 * bits used for node interleaving are discarded. Here we recover these
622 * bits from the IntlvSel field of the DRAM Limit register (section
623 * 3.4.4.2) for the node that input_addr is associated with.
626 node_id = pvt->mc_node_id;
630 intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
631 if (intlv_shift == 0) {
632 edac_dbg(1, " InputAddr 0x%lx translates to DramAddr of same value\n",
633 (unsigned long)input_addr);
638 bits = ((input_addr & GENMASK(12, 35)) << intlv_shift) +
639 (input_addr & 0xfff);
641 intlv_sel = dram_intlv_sel(pvt, node_id) & ((1 << intlv_shift) - 1);
642 dram_addr = bits + (intlv_sel << 12);
644 edac_dbg(1, "InputAddr 0x%lx translates to DramAddr 0x%lx (%d node interleave bits)\n",
645 (unsigned long)input_addr,
646 (unsigned long)dram_addr, intlv_shift);
652 * @dram_addr is a DramAddr that maps to the node represented by mci. Convert
653 * @dram_addr to a SysAddr.
655 static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr)
657 struct amd64_pvt *pvt = mci->pvt_info;
658 u64 hole_base, hole_offset, hole_size, base, sys_addr;
661 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
664 if ((dram_addr >= hole_base) &&
665 (dram_addr < (hole_base + hole_size))) {
666 sys_addr = dram_addr + hole_offset;
668 edac_dbg(1, "using DHAR to translate DramAddr 0x%lx to SysAddr 0x%lx\n",
669 (unsigned long)dram_addr,
670 (unsigned long)sys_addr);
676 base = get_dram_base(pvt, pvt->mc_node_id);
677 sys_addr = dram_addr + base;
680 * The sys_addr we have computed up to this point is a 40-bit value
681 * because the k8 deals with 40-bit values. However, the value we are
682 * supposed to return is a full 64-bit physical address. The AMD
683 * x86-64 architecture specifies that the most significant implemented
684 * address bit through bit 63 of a physical address must be either all
685 * 0s or all 1s. Therefore we sign-extend the 40-bit sys_addr to a
686 * 64-bit value below. See section 3.4.2 of AMD publication 24592:
687 * AMD x86-64 Architecture Programmer's Manual Volume 1 Application
690 sys_addr |= ~((sys_addr & (1ull << 39)) - 1);
692 edac_dbg(1, " Node %d, DramAddr 0x%lx to SysAddr 0x%lx\n",
693 pvt->mc_node_id, (unsigned long)dram_addr,
694 (unsigned long)sys_addr);
700 * @input_addr is an InputAddr associated with the node given by mci. Translate
701 * @input_addr to a SysAddr.
703 static inline u64 input_addr_to_sys_addr(struct mem_ctl_info *mci,
706 return dram_addr_to_sys_addr(mci,
707 input_addr_to_dram_addr(mci, input_addr));
710 /* Map the Error address to a PAGE and PAGE OFFSET. */
711 static inline void error_address_to_page_and_offset(u64 error_address,
712 u32 *page, u32 *offset)
714 *page = (u32) (error_address >> PAGE_SHIFT);
715 *offset = ((u32) error_address) & ~PAGE_MASK;
719 * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
720 * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
721 * of a node that detected an ECC memory error. mci represents the node that
722 * the error address maps to (possibly different from the node that detected
723 * the error). Return the number of the csrow that sys_addr maps to, or -1 on
726 static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
730 csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
733 amd64_mc_err(mci, "Failed to translate InputAddr to csrow for "
734 "address 0x%lx\n", (unsigned long)sys_addr);
738 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
741 * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
744 static unsigned long amd64_determine_edac_cap(struct amd64_pvt *pvt)
747 unsigned long edac_cap = EDAC_FLAG_NONE;
749 bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= K8_REV_F)
753 if (pvt->dclr0 & BIT(bit))
754 edac_cap = EDAC_FLAG_SECDED;
759 static void amd64_debug_display_dimm_sizes(struct amd64_pvt *, u8);
761 static void amd64_dump_dramcfg_low(u32 dclr, int chan)
763 edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
765 edac_dbg(1, " DIMM type: %sbuffered; all DIMMs support ECC: %s\n",
766 (dclr & BIT(16)) ? "un" : "",
767 (dclr & BIT(19)) ? "yes" : "no");
769 edac_dbg(1, " PAR/ERR parity: %s\n",
770 (dclr & BIT(8)) ? "enabled" : "disabled");
772 if (boot_cpu_data.x86 == 0x10)
773 edac_dbg(1, " DCT 128bit mode width: %s\n",
774 (dclr & BIT(11)) ? "128b" : "64b");
776 edac_dbg(1, " x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
777 (dclr & BIT(12)) ? "yes" : "no",
778 (dclr & BIT(13)) ? "yes" : "no",
779 (dclr & BIT(14)) ? "yes" : "no",
780 (dclr & BIT(15)) ? "yes" : "no");
783 /* Display and decode various NB registers for debug purposes. */
784 static void dump_misc_regs(struct amd64_pvt *pvt)
786 edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
788 edac_dbg(1, " NB two channel DRAM capable: %s\n",
789 (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
791 edac_dbg(1, " ECC capable: %s, ChipKill ECC capable: %s\n",
792 (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
793 (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
795 amd64_dump_dramcfg_low(pvt->dclr0, 0);
797 edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
799 edac_dbg(1, "F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, offset: 0x%08x\n",
800 pvt->dhar, dhar_base(pvt),
801 (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt)
802 : f10_dhar_offset(pvt));
804 edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
806 amd64_debug_display_dimm_sizes(pvt, 0);
808 /* everything below this point is Fam10h and above */
809 if (boot_cpu_data.x86 == 0xf)
812 amd64_debug_display_dimm_sizes(pvt, 1);
814 amd64_info("using %s syndromes.\n", ((pvt->ecc_sym_sz == 8) ? "x8" : "x4"));
816 /* Only if NOT ganged does dclr1 have valid info */
817 if (!dct_ganging_enabled(pvt))
818 amd64_dump_dramcfg_low(pvt->dclr1, 1);
822 * see BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
824 static void prep_chip_selects(struct amd64_pvt *pvt)
826 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) {
827 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
828 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8;
830 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
831 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
836 * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
838 static void read_dct_base_mask(struct amd64_pvt *pvt)
842 prep_chip_selects(pvt);
844 for_each_chip_select(cs, 0, pvt) {
845 int reg0 = DCSB0 + (cs * 4);
846 int reg1 = DCSB1 + (cs * 4);
847 u32 *base0 = &pvt->csels[0].csbases[cs];
848 u32 *base1 = &pvt->csels[1].csbases[cs];
850 if (!amd64_read_dct_pci_cfg(pvt, reg0, base0))
851 edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n",
854 if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt))
857 if (!amd64_read_dct_pci_cfg(pvt, reg1, base1))
858 edac_dbg(0, " DCSB1[%d]=0x%08x reg: F2x%x\n",
862 for_each_chip_select_mask(cs, 0, pvt) {
863 int reg0 = DCSM0 + (cs * 4);
864 int reg1 = DCSM1 + (cs * 4);
865 u32 *mask0 = &pvt->csels[0].csmasks[cs];
866 u32 *mask1 = &pvt->csels[1].csmasks[cs];
868 if (!amd64_read_dct_pci_cfg(pvt, reg0, mask0))
869 edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n",
872 if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt))
875 if (!amd64_read_dct_pci_cfg(pvt, reg1, mask1))
876 edac_dbg(0, " DCSM1[%d]=0x%08x reg: F2x%x\n",
881 static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt, int cs)
885 /* F15h supports only DDR3 */
886 if (boot_cpu_data.x86 >= 0x15)
887 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
888 else if (boot_cpu_data.x86 == 0x10 || pvt->ext_model >= K8_REV_F) {
889 if (pvt->dchr0 & DDR3_MODE)
890 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
892 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
894 type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
897 amd64_info("CS%d: %s\n", cs, edac_mem_types[type]);
902 /* Get the number of DCT channels the memory controller is using. */
903 static int k8_early_channel_count(struct amd64_pvt *pvt)
907 if (pvt->ext_model >= K8_REV_F)
908 /* RevF (NPT) and later */
909 flag = pvt->dclr0 & WIDTH_128;
911 /* RevE and earlier */
912 flag = pvt->dclr0 & REVE_WIDTH_128;
917 return (flag) ? 2 : 1;
920 /* On F10h and later ErrAddr is MC4_ADDR[47:1] */
921 static u64 get_error_address(struct mce *m)
923 struct cpuinfo_x86 *c = &boot_cpu_data;
933 addr = m->addr & GENMASK(start_bit, end_bit);
936 * Erratum 637 workaround
938 if (c->x86 == 0x15) {
939 struct amd64_pvt *pvt;
940 u64 cc6_base, tmp_addr;
942 u8 mce_nid, intlv_en;
944 if ((addr & GENMASK(24, 47)) >> 24 != 0x00fdf7)
947 mce_nid = amd_get_nb_id(m->extcpu);
948 pvt = mcis[mce_nid]->pvt_info;
950 amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_LIM, &tmp);
951 intlv_en = tmp >> 21 & 0x7;
953 /* add [47:27] + 3 trailing bits */
954 cc6_base = (tmp & GENMASK(0, 20)) << 3;
956 /* reverse and add DramIntlvEn */
957 cc6_base |= intlv_en ^ 0x7;
963 return cc6_base | (addr & GENMASK(0, 23));
965 amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_BASE, &tmp);
968 tmp_addr = (addr & GENMASK(12, 23)) << __fls(intlv_en + 1);
970 /* OR DramIntlvSel into bits [14:12] */
971 tmp_addr |= (tmp & GENMASK(21, 23)) >> 9;
973 /* add remaining [11:0] bits from original MC4_ADDR */
974 tmp_addr |= addr & GENMASK(0, 11);
976 return cc6_base | tmp_addr;
982 static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
984 struct cpuinfo_x86 *c = &boot_cpu_data;
985 int off = range << 3;
987 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo);
988 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
993 if (!dram_rw(pvt, range))
996 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off, &pvt->ranges[range].base.hi);
997 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
999 /* Factor in CC6 save area by reading dst node's limit reg */
1000 if (c->x86 == 0x15) {
1001 struct pci_dev *f1 = NULL;
1002 u8 nid = dram_dst_node(pvt, range);
1005 f1 = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0x18 + nid, 1));
1009 amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim);
1011 pvt->ranges[range].lim.lo &= GENMASK(0, 15);
1013 /* {[39:27],111b} */
1014 pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16;
1016 pvt->ranges[range].lim.hi &= GENMASK(0, 7);
1019 pvt->ranges[range].lim.hi |= llim >> 13;
1025 static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1028 struct mem_ctl_info *src_mci;
1029 struct amd64_pvt *pvt = mci->pvt_info;
1033 error_address_to_page_and_offset(sys_addr, &page, &offset);
1036 * Find out which node the error address belongs to. This may be
1037 * different from the node that detected the error.
1039 src_mci = find_mc_by_sys_addr(mci, sys_addr);
1041 amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
1042 (unsigned long)sys_addr);
1043 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
1044 page, offset, syndrome,
1046 "failed to map error addr to a node",
1051 /* Now map the sys_addr to a CSROW */
1052 csrow = sys_addr_to_csrow(src_mci, sys_addr);
1054 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
1055 page, offset, syndrome,
1057 "failed to map error addr to a csrow",
1062 /* CHIPKILL enabled */
1063 if (pvt->nbcfg & NBCFG_CHIPKILL) {
1064 channel = get_channel_from_ecc_syndrome(mci, syndrome);
1067 * Syndrome didn't map, so we don't know which of the
1068 * 2 DIMMs is in error. So we need to ID 'both' of them
1071 amd64_mc_warn(src_mci, "unknown syndrome 0x%04x - "
1072 "possible error reporting race\n",
1074 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
1075 page, offset, syndrome,
1077 "unknown syndrome - possible error reporting race",
1083 * non-chipkill ecc mode
1085 * The k8 documentation is unclear about how to determine the
1086 * channel number when using non-chipkill memory. This method
1087 * was obtained from email communication with someone at AMD.
1088 * (Wish the email was placed in this comment - norsk)
1090 channel = ((sys_addr & BIT(3)) != 0);
1093 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, src_mci, 1,
1094 page, offset, syndrome,
1099 static int ddr2_cs_size(unsigned i, bool dct_width)
1105 else if (!(i & 0x1))
1108 shift = (i + 1) >> 1;
1110 return 128 << (shift + !!dct_width);
1113 static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1116 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1118 if (pvt->ext_model >= K8_REV_F) {
1119 WARN_ON(cs_mode > 11);
1120 return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1122 else if (pvt->ext_model >= K8_REV_D) {
1124 WARN_ON(cs_mode > 10);
1127 * the below calculation, besides trying to win an obfuscated C
1128 * contest, maps cs_mode values to DIMM chip select sizes. The
1131 * cs_mode CS size (mb)
1132 * ======= ============
1145 * Basically, it calculates a value with which to shift the
1146 * smallest CS size of 32MB.
1148 * ddr[23]_cs_size have a similar purpose.
1150 diff = cs_mode/3 + (unsigned)(cs_mode > 5);
1152 return 32 << (cs_mode - diff);
1155 WARN_ON(cs_mode > 6);
1156 return 32 << cs_mode;
1161 * Get the number of DCT channels in use.
1164 * number of Memory Channels in operation
1166 * contents of the DCL0_LOW register
1168 static int f1x_early_channel_count(struct amd64_pvt *pvt)
1170 int i, j, channels = 0;
1172 /* On F10h, if we are in 128 bit mode, then we are using 2 channels */
1173 if (boot_cpu_data.x86 == 0x10 && (pvt->dclr0 & WIDTH_128))
1177 * Need to check if in unganged mode: In such, there are 2 channels,
1178 * but they are not in 128 bit mode and thus the above 'dclr0' status
1181 * Need to check DCT0[0] and DCT1[0] to see if only one of them has
1182 * their CSEnable bit on. If so, then SINGLE DIMM case.
1184 edac_dbg(0, "Data width is not 128 bits - need more decoding\n");
1187 * Check DRAM Bank Address Mapping values for each DIMM to see if there
1188 * is more than just one DIMM present in unganged mode. Need to check
1189 * both controllers since DIMMs can be placed in either one.
1191 for (i = 0; i < 2; i++) {
1192 u32 dbam = (i ? pvt->dbam1 : pvt->dbam0);
1194 for (j = 0; j < 4; j++) {
1195 if (DBAM_DIMM(j, dbam) > 0) {
1205 amd64_info("MCT channel count: %d\n", channels);
1210 static int ddr3_cs_size(unsigned i, bool dct_width)
1215 if (i == 0 || i == 3 || i == 4)
1221 else if (!(i & 0x1))
1224 shift = (i + 1) >> 1;
1227 cs_size = (128 * (1 << !!dct_width)) << shift;
1232 static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1235 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1237 WARN_ON(cs_mode > 11);
1239 if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
1240 return ddr3_cs_size(cs_mode, dclr & WIDTH_128);
1242 return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1246 * F15h supports only 64bit DCT interfaces
1248 static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1251 WARN_ON(cs_mode > 12);
1253 return ddr3_cs_size(cs_mode, false);
1256 static void read_dram_ctl_register(struct amd64_pvt *pvt)
1259 if (boot_cpu_data.x86 == 0xf)
1262 if (!amd64_read_dct_pci_cfg(pvt, DCT_SEL_LO, &pvt->dct_sel_lo)) {
1263 edac_dbg(0, "F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
1264 pvt->dct_sel_lo, dct_sel_baseaddr(pvt));
1266 edac_dbg(0, " DCTs operate in %s mode\n",
1267 (dct_ganging_enabled(pvt) ? "ganged" : "unganged"));
1269 if (!dct_ganging_enabled(pvt))
1270 edac_dbg(0, " Address range split per DCT: %s\n",
1271 (dct_high_range_enabled(pvt) ? "yes" : "no"));
1273 edac_dbg(0, " data interleave for ECC: %s, DRAM cleared since last warm reset: %s\n",
1274 (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
1275 (dct_memory_cleared(pvt) ? "yes" : "no"));
1277 edac_dbg(0, " channel interleave: %s, "
1278 "interleave bits selector: 0x%x\n",
1279 (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
1280 dct_sel_interleave_addr(pvt));
1283 amd64_read_dct_pci_cfg(pvt, DCT_SEL_HI, &pvt->dct_sel_hi);
1287 * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory
1288 * Interleaving Modes.
1290 static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1291 bool hi_range_sel, u8 intlv_en)
1293 u8 dct_sel_high = (pvt->dct_sel_lo >> 1) & 1;
1295 if (dct_ganging_enabled(pvt))
1299 return dct_sel_high;
1302 * see F2x110[DctSelIntLvAddr] - channel interleave mode
1304 if (dct_interleave_enabled(pvt)) {
1305 u8 intlv_addr = dct_sel_interleave_addr(pvt);
1307 /* return DCT select function: 0=DCT0, 1=DCT1 */
1309 return sys_addr >> 6 & 1;
1311 if (intlv_addr & 0x2) {
1312 u8 shift = intlv_addr & 0x1 ? 9 : 6;
1313 u32 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) % 2;
1315 return ((sys_addr >> shift) & 1) ^ temp;
1318 return (sys_addr >> (12 + hweight8(intlv_en))) & 1;
1321 if (dct_high_range_enabled(pvt))
1322 return ~dct_sel_high & 1;
1327 /* Convert the sys_addr to the normalized DCT address */
1328 static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, unsigned range,
1329 u64 sys_addr, bool hi_rng,
1330 u32 dct_sel_base_addr)
1333 u64 dram_base = get_dram_base(pvt, range);
1334 u64 hole_off = f10_dhar_offset(pvt);
1335 u64 dct_sel_base_off = (pvt->dct_sel_hi & 0xFFFFFC00) << 16;
1340 * base address of high range is below 4Gb
1341 * (bits [47:27] at [31:11])
1342 * DRAM address space on this DCT is hoisted above 4Gb &&
1345 * remove hole offset from sys_addr
1347 * remove high range offset from sys_addr
1349 if ((!(dct_sel_base_addr >> 16) ||
1350 dct_sel_base_addr < dhar_base(pvt)) &&
1352 (sys_addr >= BIT_64(32)))
1353 chan_off = hole_off;
1355 chan_off = dct_sel_base_off;
1359 * we have a valid hole &&
1364 * remove dram base to normalize to DCT address
1366 if (dhar_valid(pvt) && (sys_addr >= BIT_64(32)))
1367 chan_off = hole_off;
1369 chan_off = dram_base;
1372 return (sys_addr & GENMASK(6,47)) - (chan_off & GENMASK(23,47));
1376 * checks if the csrow passed in is marked as SPARED, if so returns the new
1379 static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
1383 if (online_spare_swap_done(pvt, dct) &&
1384 csrow == online_spare_bad_dramcs(pvt, dct)) {
1386 for_each_chip_select(tmp_cs, dct, pvt) {
1387 if (chip_select_base(tmp_cs, dct, pvt) & 0x2) {
1397 * Iterate over the DRAM DCT "base" and "mask" registers looking for a
1398 * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
1401 * -EINVAL: NOT FOUND
1402 * 0..csrow = Chip-Select Row
1404 static int f1x_lookup_addr_in_dct(u64 in_addr, u32 nid, u8 dct)
1406 struct mem_ctl_info *mci;
1407 struct amd64_pvt *pvt;
1408 u64 cs_base, cs_mask;
1409 int cs_found = -EINVAL;
1416 pvt = mci->pvt_info;
1418 edac_dbg(1, "input addr: 0x%llx, DCT: %d\n", in_addr, dct);
1420 for_each_chip_select(csrow, dct, pvt) {
1421 if (!csrow_enabled(csrow, dct, pvt))
1424 get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask);
1426 edac_dbg(1, " CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
1427 csrow, cs_base, cs_mask);
1431 edac_dbg(1, " (InputAddr & ~CSMask)=0x%llx (CSBase & ~CSMask)=0x%llx\n",
1432 (in_addr & cs_mask), (cs_base & cs_mask));
1434 if ((in_addr & cs_mask) == (cs_base & cs_mask)) {
1435 cs_found = f10_process_possible_spare(pvt, dct, csrow);
1437 edac_dbg(1, " MATCH csrow=%d\n", cs_found);
1445 * See F2x10C. Non-interleaved graphics framebuffer memory under the 16G is
1446 * swapped with a region located at the bottom of memory so that the GPU can use
1447 * the interleaved region and thus two channels.
1449 static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr)
1451 u32 swap_reg, swap_base, swap_limit, rgn_size, tmp_addr;
1453 if (boot_cpu_data.x86 == 0x10) {
1454 /* only revC3 and revE have that feature */
1455 if (boot_cpu_data.x86_model < 4 ||
1456 (boot_cpu_data.x86_model < 0xa &&
1457 boot_cpu_data.x86_mask < 3))
1461 amd64_read_dct_pci_cfg(pvt, SWAP_INTLV_REG, &swap_reg);
1463 if (!(swap_reg & 0x1))
1466 swap_base = (swap_reg >> 3) & 0x7f;
1467 swap_limit = (swap_reg >> 11) & 0x7f;
1468 rgn_size = (swap_reg >> 20) & 0x7f;
1469 tmp_addr = sys_addr >> 27;
1471 if (!(sys_addr >> 34) &&
1472 (((tmp_addr >= swap_base) &&
1473 (tmp_addr <= swap_limit)) ||
1474 (tmp_addr < rgn_size)))
1475 return sys_addr ^ (u64)swap_base << 27;
1480 /* For a given @dram_range, check if @sys_addr falls within it. */
1481 static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
1482 u64 sys_addr, int *nid, int *chan_sel)
1484 int cs_found = -EINVAL;
1488 bool high_range = false;
1490 u8 node_id = dram_dst_node(pvt, range);
1491 u8 intlv_en = dram_intlv_en(pvt, range);
1492 u32 intlv_sel = dram_intlv_sel(pvt, range);
1494 edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
1495 range, sys_addr, get_dram_limit(pvt, range));
1497 if (dhar_valid(pvt) &&
1498 dhar_base(pvt) <= sys_addr &&
1499 sys_addr < BIT_64(32)) {
1500 amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
1505 if (intlv_en && (intlv_sel != ((sys_addr >> 12) & intlv_en)))
1508 sys_addr = f1x_swap_interleaved_region(pvt, sys_addr);
1510 dct_sel_base = dct_sel_baseaddr(pvt);
1513 * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
1514 * select between DCT0 and DCT1.
1516 if (dct_high_range_enabled(pvt) &&
1517 !dct_ganging_enabled(pvt) &&
1518 ((sys_addr >> 27) >= (dct_sel_base >> 11)))
1521 channel = f1x_determine_channel(pvt, sys_addr, high_range, intlv_en);
1523 chan_addr = f1x_get_norm_dct_addr(pvt, range, sys_addr,
1524 high_range, dct_sel_base);
1526 /* Remove node interleaving, see F1x120 */
1528 chan_addr = ((chan_addr >> (12 + hweight8(intlv_en))) << 12) |
1529 (chan_addr & 0xfff);
1531 /* remove channel interleave */
1532 if (dct_interleave_enabled(pvt) &&
1533 !dct_high_range_enabled(pvt) &&
1534 !dct_ganging_enabled(pvt)) {
1536 if (dct_sel_interleave_addr(pvt) != 1) {
1537 if (dct_sel_interleave_addr(pvt) == 0x3)
1539 chan_addr = ((chan_addr >> 10) << 9) |
1540 (chan_addr & 0x1ff);
1542 /* A[6] or hash 6 */
1543 chan_addr = ((chan_addr >> 7) << 6) |
1547 chan_addr = ((chan_addr >> 13) << 12) |
1548 (chan_addr & 0xfff);
1551 edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr);
1553 cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel);
1555 if (cs_found >= 0) {
1557 *chan_sel = channel;
1562 static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr,
1563 int *node, int *chan_sel)
1565 int cs_found = -EINVAL;
1568 for (range = 0; range < DRAM_RANGES; range++) {
1570 if (!dram_rw(pvt, range))
1573 if ((get_dram_base(pvt, range) <= sys_addr) &&
1574 (get_dram_limit(pvt, range) >= sys_addr)) {
1576 cs_found = f1x_match_to_this_node(pvt, range,
1587 * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps
1588 * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW).
1590 * The @sys_addr is usually an error address received from the hardware
1593 static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1596 struct amd64_pvt *pvt = mci->pvt_info;
1598 int nid, csrow, chan = 0;
1600 error_address_to_page_and_offset(sys_addr, &page, &offset);
1602 csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan);
1605 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
1606 page, offset, syndrome,
1608 "failed to map error addr to a csrow",
1614 * We need the syndromes for channel detection only when we're
1615 * ganged. Otherwise @chan should already contain the channel at
1618 if (dct_ganging_enabled(pvt))
1619 chan = get_channel_from_ecc_syndrome(mci, syndrome);
1621 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
1622 page, offset, syndrome,
1628 * debug routine to display the memory sizes of all logical DIMMs and its
1631 static void amd64_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
1633 int dimm, size0, size1, factor = 0;
1634 u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
1635 u32 dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
1637 if (boot_cpu_data.x86 == 0xf) {
1638 if (pvt->dclr0 & WIDTH_128)
1641 /* K8 families < revF not supported yet */
1642 if (pvt->ext_model < K8_REV_F)
1648 dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1 : pvt->dbam0;
1649 dcsb = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->csels[1].csbases
1650 : pvt->csels[0].csbases;
1652 edac_dbg(1, "F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
1655 edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
1657 /* Dump memory sizes for DIMM and its CSROWs */
1658 for (dimm = 0; dimm < 4; dimm++) {
1661 if (dcsb[dimm*2] & DCSB_CS_ENABLE)
1662 size0 = pvt->ops->dbam_to_cs(pvt, ctrl,
1663 DBAM_DIMM(dimm, dbam));
1666 if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE)
1667 size1 = pvt->ops->dbam_to_cs(pvt, ctrl,
1668 DBAM_DIMM(dimm, dbam));
1670 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
1671 dimm * 2, size0 << factor,
1672 dimm * 2 + 1, size1 << factor);
1676 static struct amd64_family_type amd64_family_types[] = {
1679 .f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
1680 .f3_id = PCI_DEVICE_ID_AMD_K8_NB_MISC,
1682 .early_channel_count = k8_early_channel_count,
1683 .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
1684 .dbam_to_cs = k8_dbam_to_chip_select,
1685 .read_dct_pci_cfg = k8_read_dct_pci_cfg,
1690 .f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
1691 .f3_id = PCI_DEVICE_ID_AMD_10H_NB_MISC,
1693 .early_channel_count = f1x_early_channel_count,
1694 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
1695 .dbam_to_cs = f10_dbam_to_chip_select,
1696 .read_dct_pci_cfg = f10_read_dct_pci_cfg,
1701 .f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1,
1702 .f3_id = PCI_DEVICE_ID_AMD_15H_NB_F3,
1704 .early_channel_count = f1x_early_channel_count,
1705 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
1706 .dbam_to_cs = f15_dbam_to_chip_select,
1707 .read_dct_pci_cfg = f15_read_dct_pci_cfg,
1712 static struct pci_dev *pci_get_related_function(unsigned int vendor,
1713 unsigned int device,
1714 struct pci_dev *related)
1716 struct pci_dev *dev = NULL;
1718 dev = pci_get_device(vendor, device, dev);
1720 if ((dev->bus->number == related->bus->number) &&
1721 (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
1723 dev = pci_get_device(vendor, device, dev);
1730 * These are tables of eigenvectors (one per line) which can be used for the
1731 * construction of the syndrome tables. The modified syndrome search algorithm
1732 * uses those to find the symbol in error and thus the DIMM.
1734 * Algorithm courtesy of Ross LaFetra from AMD.
1736 static u16 x4_vectors[] = {
1737 0x2f57, 0x1afe, 0x66cc, 0xdd88,
1738 0x11eb, 0x3396, 0x7f4c, 0xeac8,
1739 0x0001, 0x0002, 0x0004, 0x0008,
1740 0x1013, 0x3032, 0x4044, 0x8088,
1741 0x106b, 0x30d6, 0x70fc, 0xe0a8,
1742 0x4857, 0xc4fe, 0x13cc, 0x3288,
1743 0x1ac5, 0x2f4a, 0x5394, 0xa1e8,
1744 0x1f39, 0x251e, 0xbd6c, 0x6bd8,
1745 0x15c1, 0x2a42, 0x89ac, 0x4758,
1746 0x2b03, 0x1602, 0x4f0c, 0xca08,
1747 0x1f07, 0x3a0e, 0x6b04, 0xbd08,
1748 0x8ba7, 0x465e, 0x244c, 0x1cc8,
1749 0x2b87, 0x164e, 0x642c, 0xdc18,
1750 0x40b9, 0x80de, 0x1094, 0x20e8,
1751 0x27db, 0x1eb6, 0x9dac, 0x7b58,
1752 0x11c1, 0x2242, 0x84ac, 0x4c58,
1753 0x1be5, 0x2d7a, 0x5e34, 0xa718,
1754 0x4b39, 0x8d1e, 0x14b4, 0x28d8,
1755 0x4c97, 0xc87e, 0x11fc, 0x33a8,
1756 0x8e97, 0x497e, 0x2ffc, 0x1aa8,
1757 0x16b3, 0x3d62, 0x4f34, 0x8518,
1758 0x1e2f, 0x391a, 0x5cac, 0xf858,
1759 0x1d9f, 0x3b7a, 0x572c, 0xfe18,
1760 0x15f5, 0x2a5a, 0x5264, 0xa3b8,
1761 0x1dbb, 0x3b66, 0x715c, 0xe3f8,
1762 0x4397, 0xc27e, 0x17fc, 0x3ea8,
1763 0x1617, 0x3d3e, 0x6464, 0xb8b8,
1764 0x23ff, 0x12aa, 0xab6c, 0x56d8,
1765 0x2dfb, 0x1ba6, 0x913c, 0x7328,
1766 0x185d, 0x2ca6, 0x7914, 0x9e28,
1767 0x171b, 0x3e36, 0x7d7c, 0xebe8,
1768 0x4199, 0x82ee, 0x19f4, 0x2e58,
1769 0x4807, 0xc40e, 0x130c, 0x3208,
1770 0x1905, 0x2e0a, 0x5804, 0xac08,
1771 0x213f, 0x132a, 0xadfc, 0x5ba8,
1772 0x19a9, 0x2efe, 0xb5cc, 0x6f88,
1775 static u16 x8_vectors[] = {
1776 0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
1777 0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
1778 0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
1779 0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80,
1780 0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780,
1781 0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080,
1782 0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080,
1783 0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080,
1784 0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80,
1785 0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580,
1786 0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880,
1787 0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280,
1788 0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180,
1789 0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580,
1790 0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280,
1791 0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180,
1792 0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080,
1793 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080,
1794 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
1797 static int decode_syndrome(u16 syndrome, u16 *vectors, unsigned num_vecs,
1800 unsigned int i, err_sym;
1802 for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) {
1804 unsigned v_idx = err_sym * v_dim;
1805 unsigned v_end = (err_sym + 1) * v_dim;
1807 /* walk over all 16 bits of the syndrome */
1808 for (i = 1; i < (1U << 16); i <<= 1) {
1810 /* if bit is set in that eigenvector... */
1811 if (v_idx < v_end && vectors[v_idx] & i) {
1812 u16 ev_comp = vectors[v_idx++];
1814 /* ... and bit set in the modified syndrome, */
1824 /* can't get to zero, move to next symbol */
1829 edac_dbg(0, "syndrome(%x) not found\n", syndrome);
1833 static int map_err_sym_to_channel(int err_sym, int sym_size)
1846 return err_sym >> 4;
1852 /* imaginary bits not in a DIMM */
1854 WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n",
1866 return err_sym >> 3;
1872 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
1874 struct amd64_pvt *pvt = mci->pvt_info;
1877 if (pvt->ecc_sym_sz == 8)
1878 err_sym = decode_syndrome(syndrome, x8_vectors,
1879 ARRAY_SIZE(x8_vectors),
1881 else if (pvt->ecc_sym_sz == 4)
1882 err_sym = decode_syndrome(syndrome, x4_vectors,
1883 ARRAY_SIZE(x4_vectors),
1886 amd64_warn("Illegal syndrome type: %u\n", pvt->ecc_sym_sz);
1890 return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz);
1894 * Handle any Correctable Errors (CEs) that have occurred. Check for valid ERROR
1895 * ADDRESS and process.
1897 static void amd64_handle_ce(struct mem_ctl_info *mci, struct mce *m)
1899 struct amd64_pvt *pvt = mci->pvt_info;
1903 sys_addr = get_error_address(m);
1904 syndrome = extract_syndrome(m->status);
1906 amd64_mc_err(mci, "CE ERROR_ADDRESS= 0x%llx\n", sys_addr);
1908 pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, syndrome);
1911 /* Handle any Un-correctable Errors (UEs) */
1912 static void amd64_handle_ue(struct mem_ctl_info *mci, struct mce *m)
1914 struct mem_ctl_info *log_mci, *src_mci = NULL;
1921 sys_addr = get_error_address(m);
1922 error_address_to_page_and_offset(sys_addr, &page, &offset);
1925 * Find out which node the error address belongs to. This may be
1926 * different from the node that detected the error.
1928 src_mci = find_mc_by_sys_addr(mci, sys_addr);
1930 amd64_mc_err(mci, "ERROR ADDRESS (0x%lx) NOT mapped to a MC\n",
1931 (unsigned long)sys_addr);
1932 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
1935 "ERROR ADDRESS NOT mapped to a MC",
1942 csrow = sys_addr_to_csrow(log_mci, sys_addr);
1944 amd64_mc_err(mci, "ERROR_ADDRESS (0x%lx) NOT mapped to CS\n",
1945 (unsigned long)sys_addr);
1946 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
1949 "ERROR ADDRESS NOT mapped to CS",
1952 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
1959 static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
1962 u8 ecc_type = (m->status >> 45) & 0x3;
1963 u8 xec = XEC(m->status, 0x1f);
1964 u16 ec = EC(m->status);
1966 /* Bail out early if this was an 'observed' error */
1967 if (PP(ec) == NBSL_PP_OBS)
1970 /* Do only ECC errors */
1971 if (xec && xec != F10_NBSL_EXT_ERR_ECC)
1975 amd64_handle_ce(mci, m);
1976 else if (ecc_type == 1)
1977 amd64_handle_ue(mci, m);
1980 void amd64_decode_bus_error(int node_id, struct mce *m)
1982 __amd64_decode_bus_error(mcis[node_id], m);
1986 * Use pvt->F2 which contains the F2 CPU PCI device to get the related
1987 * F1 (AddrMap) and F3 (Misc) devices. Return negative value on error.
1989 static int reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 f1_id, u16 f3_id)
1991 /* Reserve the ADDRESS MAP Device */
1992 pvt->F1 = pci_get_related_function(pvt->F2->vendor, f1_id, pvt->F2);
1994 amd64_err("error address map device not found: "
1995 "vendor %x device 0x%x (broken BIOS?)\n",
1996 PCI_VENDOR_ID_AMD, f1_id);
2000 /* Reserve the MISC Device */
2001 pvt->F3 = pci_get_related_function(pvt->F2->vendor, f3_id, pvt->F2);
2003 pci_dev_put(pvt->F1);
2006 amd64_err("error F3 device not found: "
2007 "vendor %x device 0x%x (broken BIOS?)\n",
2008 PCI_VENDOR_ID_AMD, f3_id);
2012 edac_dbg(1, "F1: %s\n", pci_name(pvt->F1));
2013 edac_dbg(1, "F2: %s\n", pci_name(pvt->F2));
2014 edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
2019 static void free_mc_sibling_devs(struct amd64_pvt *pvt)
2021 pci_dev_put(pvt->F1);
2022 pci_dev_put(pvt->F3);
2026 * Retrieve the hardware registers of the memory controller (this includes the
2027 * 'Address Map' and 'Misc' device regs)
2029 static void read_mc_regs(struct amd64_pvt *pvt)
2031 struct cpuinfo_x86 *c = &boot_cpu_data;
2037 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
2038 * those are Read-As-Zero
2040 rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
2041 edac_dbg(0, " TOP_MEM: 0x%016llx\n", pvt->top_mem);
2043 /* check first whether TOP_MEM2 is enabled */
2044 rdmsrl(MSR_K8_SYSCFG, msr_val);
2045 if (msr_val & (1U << 21)) {
2046 rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
2047 edac_dbg(0, " TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
2049 edac_dbg(0, " TOP_MEM2 disabled\n");
2051 amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap);
2053 read_dram_ctl_register(pvt);
2055 for (range = 0; range < DRAM_RANGES; range++) {
2058 /* read settings for this DRAM range */
2059 read_dram_base_limit_regs(pvt, range);
2061 rw = dram_rw(pvt, range);
2065 edac_dbg(1, " DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
2067 get_dram_base(pvt, range),
2068 get_dram_limit(pvt, range));
2070 edac_dbg(1, " IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
2071 dram_intlv_en(pvt, range) ? "Enabled" : "Disabled",
2072 (rw & 0x1) ? "R" : "-",
2073 (rw & 0x2) ? "W" : "-",
2074 dram_intlv_sel(pvt, range),
2075 dram_dst_node(pvt, range));
2078 read_dct_base_mask(pvt);
2080 amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar);
2081 amd64_read_dct_pci_cfg(pvt, DBAM0, &pvt->dbam0);
2083 amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
2085 amd64_read_dct_pci_cfg(pvt, DCLR0, &pvt->dclr0);
2086 amd64_read_dct_pci_cfg(pvt, DCHR0, &pvt->dchr0);
2088 if (!dct_ganging_enabled(pvt)) {
2089 amd64_read_dct_pci_cfg(pvt, DCLR1, &pvt->dclr1);
2090 amd64_read_dct_pci_cfg(pvt, DCHR1, &pvt->dchr1);
2093 pvt->ecc_sym_sz = 4;
2095 if (c->x86 >= 0x10) {
2096 amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
2097 amd64_read_dct_pci_cfg(pvt, DBAM1, &pvt->dbam1);
2099 /* F10h, revD and later can do x8 ECC too */
2100 if ((c->x86 > 0x10 || c->x86_model > 7) && tmp & BIT(25))
2101 pvt->ecc_sym_sz = 8;
2103 dump_misc_regs(pvt);
2107 * NOTE: CPU Revision Dependent code
2110 * @csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1)
2111 * k8 private pointer to -->
2112 * DRAM Bank Address mapping register
2114 * DCL register where dual_channel_active is
2116 * The DBAM register consists of 4 sets of 4 bits each definitions:
2119 * 0-3 CSROWs 0 and 1
2120 * 4-7 CSROWs 2 and 3
2121 * 8-11 CSROWs 4 and 5
2122 * 12-15 CSROWs 6 and 7
2124 * Values range from: 0 to 15
2125 * The meaning of the values depends on CPU revision and dual-channel state,
2126 * see relevant BKDG more info.
2128 * The memory controller provides for total of only 8 CSROWs in its current
2129 * architecture. Each "pair" of CSROWs normally represents just one DIMM in
2130 * single channel or two (2) DIMMs in dual channel mode.
2132 * The following code logic collapses the various tables for CSROW based on CPU
2136 * The number of PAGE_SIZE pages on the specified CSROW number it
2140 static u32 amd64_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
2142 u32 cs_mode, nr_pages;
2143 u32 dbam = dct ? pvt->dbam1 : pvt->dbam0;
2146 * The math on this doesn't look right on the surface because x/2*4 can
2147 * be simplified to x*2 but this expression makes use of the fact that
2148 * it is integral math where 1/2=0. This intermediate value becomes the
2149 * number of bits to shift the DBAM register to extract the proper CSROW
2152 cs_mode = (dbam >> ((csrow_nr / 2) * 4)) & 0xF;
2154 nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode) << (20 - PAGE_SHIFT);
2156 edac_dbg(0, " (csrow=%d) DBAM map index= %d\n", csrow_nr, cs_mode);
2157 edac_dbg(0, " nr_pages/channel= %u channel-count = %d\n",
2158 nr_pages, pvt->channel_count);
2164 * Initialize the array of csrow attribute instances, based on the values
2165 * from pci config hardware registers.
2167 static int init_csrows(struct mem_ctl_info *mci)
2169 struct csrow_info *csrow;
2170 struct dimm_info *dimm;
2171 struct amd64_pvt *pvt = mci->pvt_info;
2174 int i, j, empty = 1;
2175 enum mem_type mtype;
2176 enum edac_type edac_mode;
2179 amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
2183 edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
2184 pvt->mc_node_id, val,
2185 !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
2187 for_each_chip_select(i, 0, pvt) {
2188 csrow = mci->csrows[i];
2190 if (!csrow_enabled(i, 0, pvt) && !csrow_enabled(i, 1, pvt)) {
2191 edac_dbg(1, "----CSROW %d VALID for MC node %d\n",
2192 i, pvt->mc_node_id);
2197 if (csrow_enabled(i, 0, pvt))
2198 nr_pages = amd64_csrow_nr_pages(pvt, 0, i);
2199 if (csrow_enabled(i, 1, pvt))
2200 nr_pages += amd64_csrow_nr_pages(pvt, 1, i);
2202 get_cs_base_and_mask(pvt, i, 0, &base, &mask);
2203 /* 8 bytes of resolution */
2205 mtype = amd64_determine_memory_type(pvt, i);
2207 edac_dbg(1, " for MC node %d csrow %d:\n", pvt->mc_node_id, i);
2208 edac_dbg(1, " nr_pages: %u\n",
2209 nr_pages * pvt->channel_count);
2212 * determine whether CHIPKILL or JUST ECC or NO ECC is operating
2214 if (pvt->nbcfg & NBCFG_ECC_ENABLE)
2215 edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL) ?
2216 EDAC_S4ECD4ED : EDAC_SECDED;
2218 edac_mode = EDAC_NONE;
2220 for (j = 0; j < pvt->channel_count; j++) {
2221 dimm = csrow->channels[j]->dimm;
2222 dimm->mtype = mtype;
2223 dimm->edac_mode = edac_mode;
2224 dimm->nr_pages = nr_pages;
2231 /* get all cores on this DCT */
2232 static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, unsigned nid)
2236 for_each_online_cpu(cpu)
2237 if (amd_get_nb_id(cpu) == nid)
2238 cpumask_set_cpu(cpu, mask);
2241 /* check MCG_CTL on all the cpus on this node */
2242 static bool amd64_nb_mce_bank_enabled_on_node(unsigned nid)
2248 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
2249 amd64_warn("%s: Error allocating mask\n", __func__);
2253 get_cpus_on_this_dct_cpumask(mask, nid);
2255 rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
2257 for_each_cpu(cpu, mask) {
2258 struct msr *reg = per_cpu_ptr(msrs, cpu);
2259 nbe = reg->l & MSR_MCGCTL_NBE;
2261 edac_dbg(0, "core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
2263 (nbe ? "enabled" : "disabled"));
2271 free_cpumask_var(mask);
2275 static int toggle_ecc_err_reporting(struct ecc_settings *s, u8 nid, bool on)
2277 cpumask_var_t cmask;
2280 if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
2281 amd64_warn("%s: error allocating mask\n", __func__);
2285 get_cpus_on_this_dct_cpumask(cmask, nid);
2287 rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2289 for_each_cpu(cpu, cmask) {
2291 struct msr *reg = per_cpu_ptr(msrs, cpu);
2294 if (reg->l & MSR_MCGCTL_NBE)
2295 s->flags.nb_mce_enable = 1;
2297 reg->l |= MSR_MCGCTL_NBE;
2300 * Turn off NB MCE reporting only when it was off before
2302 if (!s->flags.nb_mce_enable)
2303 reg->l &= ~MSR_MCGCTL_NBE;
2306 wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2308 free_cpumask_var(cmask);
2313 static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid,
2317 u32 value, mask = 0x3; /* UECC/CECC enable */
2319 if (toggle_ecc_err_reporting(s, nid, ON)) {
2320 amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
2324 amd64_read_pci_cfg(F3, NBCTL, &value);
2326 s->old_nbctl = value & mask;
2327 s->nbctl_valid = true;
2330 amd64_write_pci_cfg(F3, NBCTL, value);
2332 amd64_read_pci_cfg(F3, NBCFG, &value);
2334 edac_dbg(0, "1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
2335 nid, value, !!(value & NBCFG_ECC_ENABLE));
2337 if (!(value & NBCFG_ECC_ENABLE)) {
2338 amd64_warn("DRAM ECC disabled on this node, enabling...\n");
2340 s->flags.nb_ecc_prev = 0;
2342 /* Attempt to turn on DRAM ECC Enable */
2343 value |= NBCFG_ECC_ENABLE;
2344 amd64_write_pci_cfg(F3, NBCFG, value);
2346 amd64_read_pci_cfg(F3, NBCFG, &value);
2348 if (!(value & NBCFG_ECC_ENABLE)) {
2349 amd64_warn("Hardware rejected DRAM ECC enable,"
2350 "check memory DIMM configuration.\n");
2353 amd64_info("Hardware accepted DRAM ECC Enable\n");
2356 s->flags.nb_ecc_prev = 1;
2359 edac_dbg(0, "2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
2360 nid, value, !!(value & NBCFG_ECC_ENABLE));
2365 static void restore_ecc_error_reporting(struct ecc_settings *s, u8 nid,
2368 u32 value, mask = 0x3; /* UECC/CECC enable */
2371 if (!s->nbctl_valid)
2374 amd64_read_pci_cfg(F3, NBCTL, &value);
2376 value |= s->old_nbctl;
2378 amd64_write_pci_cfg(F3, NBCTL, value);
2380 /* restore previous BIOS DRAM ECC "off" setting we force-enabled */
2381 if (!s->flags.nb_ecc_prev) {
2382 amd64_read_pci_cfg(F3, NBCFG, &value);
2383 value &= ~NBCFG_ECC_ENABLE;
2384 amd64_write_pci_cfg(F3, NBCFG, value);
2387 /* restore the NB Enable MCGCTL bit */
2388 if (toggle_ecc_err_reporting(s, nid, OFF))
2389 amd64_warn("Error restoring NB MCGCTL settings!\n");
2393 * EDAC requires that the BIOS have ECC enabled before
2394 * taking over the processing of ECC errors. A command line
2395 * option allows to force-enable hardware ECC later in
2396 * enable_ecc_error_reporting().
2398 static const char *ecc_msg =
2399 "ECC disabled in the BIOS or no ECC capability, module will not load.\n"
2400 " Either enable ECC checking or force module loading by setting "
2401 "'ecc_enable_override'.\n"
2402 " (Note that use of the override may cause unknown side effects.)\n";
2404 static bool ecc_enabled(struct pci_dev *F3, u8 nid)
2408 bool nb_mce_en = false;
2410 amd64_read_pci_cfg(F3, NBCFG, &value);
2412 ecc_en = !!(value & NBCFG_ECC_ENABLE);
2413 amd64_info("DRAM ECC %s.\n", (ecc_en ? "enabled" : "disabled"));
2415 nb_mce_en = amd64_nb_mce_bank_enabled_on_node(nid);
2417 amd64_notice("NB MCE bank disabled, set MSR "
2418 "0x%08x[4] on node %d to enable.\n",
2419 MSR_IA32_MCG_CTL, nid);
2421 if (!ecc_en || !nb_mce_en) {
2422 amd64_notice("%s", ecc_msg);
2428 static int set_mc_sysfs_attrs(struct mem_ctl_info *mci)
2432 rc = amd64_create_sysfs_dbg_files(mci);
2436 if (boot_cpu_data.x86 >= 0x10) {
2437 rc = amd64_create_sysfs_inject_files(mci);
2445 static void del_mc_sysfs_attrs(struct mem_ctl_info *mci)
2447 amd64_remove_sysfs_dbg_files(mci);
2449 if (boot_cpu_data.x86 >= 0x10)
2450 amd64_remove_sysfs_inject_files(mci);
2453 static void setup_mci_misc_attrs(struct mem_ctl_info *mci,
2454 struct amd64_family_type *fam)
2456 struct amd64_pvt *pvt = mci->pvt_info;
2458 mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
2459 mci->edac_ctl_cap = EDAC_FLAG_NONE;
2461 if (pvt->nbcap & NBCAP_SECDED)
2462 mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
2464 if (pvt->nbcap & NBCAP_CHIPKILL)
2465 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
2467 mci->edac_cap = amd64_determine_edac_cap(pvt);
2468 mci->mod_name = EDAC_MOD_STR;
2469 mci->mod_ver = EDAC_AMD64_VERSION;
2470 mci->ctl_name = fam->ctl_name;
2471 mci->dev_name = pci_name(pvt->F2);
2472 mci->ctl_page_to_phys = NULL;
2474 /* memory scrubber interface */
2475 mci->set_sdram_scrub_rate = amd64_set_scrub_rate;
2476 mci->get_sdram_scrub_rate = amd64_get_scrub_rate;
2480 * returns a pointer to the family descriptor on success, NULL otherwise.
2482 static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt)
2484 u8 fam = boot_cpu_data.x86;
2485 struct amd64_family_type *fam_type = NULL;
2489 fam_type = &amd64_family_types[K8_CPUS];
2490 pvt->ops = &amd64_family_types[K8_CPUS].ops;
2494 fam_type = &amd64_family_types[F10_CPUS];
2495 pvt->ops = &amd64_family_types[F10_CPUS].ops;
2499 fam_type = &amd64_family_types[F15_CPUS];
2500 pvt->ops = &amd64_family_types[F15_CPUS].ops;
2504 amd64_err("Unsupported family!\n");
2508 pvt->ext_model = boot_cpu_data.x86_model >> 4;
2510 amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name,
2512 (pvt->ext_model >= K8_REV_F ? "revF or later "
2513 : "revE or earlier ")
2514 : ""), pvt->mc_node_id);
2518 static int amd64_init_one_instance(struct pci_dev *F2)
2520 struct amd64_pvt *pvt = NULL;
2521 struct amd64_family_type *fam_type = NULL;
2522 struct mem_ctl_info *mci = NULL;
2523 struct edac_mc_layer layers[2];
2525 u8 nid = get_node_id(F2);
2528 pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
2532 pvt->mc_node_id = nid;
2536 fam_type = amd64_per_family_init(pvt);
2541 err = reserve_mc_sibling_devs(pvt, fam_type->f1_id, fam_type->f3_id);
2548 * We need to determine how many memory channels there are. Then use
2549 * that information for calculating the size of the dynamic instance
2550 * tables in the 'mci' structure.
2553 pvt->channel_count = pvt->ops->early_channel_count(pvt);
2554 if (pvt->channel_count < 0)
2558 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
2559 layers[0].size = pvt->csels[0].b_cnt;
2560 layers[0].is_virt_csrow = true;
2561 layers[1].type = EDAC_MC_LAYER_CHANNEL;
2562 layers[1].size = pvt->channel_count;
2563 layers[1].is_virt_csrow = false;
2564 mci = edac_mc_alloc(nid, ARRAY_SIZE(layers), layers, 0);
2568 mci->pvt_info = pvt;
2569 mci->pdev = &pvt->F2->dev;
2571 setup_mci_misc_attrs(mci, fam_type);
2573 if (init_csrows(mci))
2574 mci->edac_cap = EDAC_FLAG_NONE;
2577 if (edac_mc_add_mc(mci)) {
2578 edac_dbg(1, "failed edac_mc_add_mc()\n");
2581 if (set_mc_sysfs_attrs(mci)) {
2582 edac_dbg(1, "failed edac_mc_add_mc()\n");
2586 /* register stuff with EDAC MCE */
2587 if (report_gart_errors)
2588 amd_report_gart_errors(true);
2590 amd_register_ecc_decoder(amd64_decode_bus_error);
2594 atomic_inc(&drv_instances);
2599 edac_mc_del_mc(mci->pdev);
2604 free_mc_sibling_devs(pvt);
2613 static int __devinit amd64_probe_one_instance(struct pci_dev *pdev,
2614 const struct pci_device_id *mc_type)
2616 u8 nid = get_node_id(pdev);
2617 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
2618 struct ecc_settings *s;
2621 ret = pci_enable_device(pdev);
2623 edac_dbg(0, "ret=%d\n", ret);
2628 s = kzalloc(sizeof(struct ecc_settings), GFP_KERNEL);
2634 if (!ecc_enabled(F3, nid)) {
2637 if (!ecc_enable_override)
2640 amd64_warn("Forcing ECC on!\n");
2642 if (!enable_ecc_error_reporting(s, nid, F3))
2646 ret = amd64_init_one_instance(pdev);
2648 amd64_err("Error probing instance: %d\n", nid);
2649 restore_ecc_error_reporting(s, nid, F3);
2656 ecc_stngs[nid] = NULL;
2662 static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
2664 struct mem_ctl_info *mci;
2665 struct amd64_pvt *pvt;
2666 u8 nid = get_node_id(pdev);
2667 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
2668 struct ecc_settings *s = ecc_stngs[nid];
2670 mci = find_mci_by_dev(&pdev->dev);
2671 del_mc_sysfs_attrs(mci);
2672 /* Remove from EDAC CORE tracking list */
2673 mci = edac_mc_del_mc(&pdev->dev);
2677 pvt = mci->pvt_info;
2679 restore_ecc_error_reporting(s, nid, F3);
2681 free_mc_sibling_devs(pvt);
2683 /* unregister from EDAC MCE */
2684 amd_report_gart_errors(false);
2685 amd_unregister_ecc_decoder(amd64_decode_bus_error);
2687 kfree(ecc_stngs[nid]);
2688 ecc_stngs[nid] = NULL;
2690 /* Free the EDAC CORE resources */
2691 mci->pvt_info = NULL;
2699 * This table is part of the interface for loading drivers for PCI devices. The
2700 * PCI core identifies what devices are on a system during boot, and then
2701 * inquiry this table to see if this driver is for a given device found.
2703 static DEFINE_PCI_DEVICE_TABLE(amd64_pci_table) = {
2705 .vendor = PCI_VENDOR_ID_AMD,
2706 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
2707 .subvendor = PCI_ANY_ID,
2708 .subdevice = PCI_ANY_ID,
2713 .vendor = PCI_VENDOR_ID_AMD,
2714 .device = PCI_DEVICE_ID_AMD_10H_NB_DRAM,
2715 .subvendor = PCI_ANY_ID,
2716 .subdevice = PCI_ANY_ID,
2721 .vendor = PCI_VENDOR_ID_AMD,
2722 .device = PCI_DEVICE_ID_AMD_15H_NB_F2,
2723 .subvendor = PCI_ANY_ID,
2724 .subdevice = PCI_ANY_ID,
2731 MODULE_DEVICE_TABLE(pci, amd64_pci_table);
2733 static struct pci_driver amd64_pci_driver = {
2734 .name = EDAC_MOD_STR,
2735 .probe = amd64_probe_one_instance,
2736 .remove = __devexit_p(amd64_remove_one_instance),
2737 .id_table = amd64_pci_table,
2740 static void setup_pci_device(void)
2742 struct mem_ctl_info *mci;
2743 struct amd64_pvt *pvt;
2751 pvt = mci->pvt_info;
2753 edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR);
2755 if (!amd64_ctl_pci) {
2756 pr_warning("%s(): Unable to create PCI control\n",
2759 pr_warning("%s(): PCI error report via EDAC not set\n",
2765 static int __init amd64_edac_init(void)
2769 printk(KERN_INFO "AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION);
2773 if (amd_cache_northbridges() < 0)
2777 mcis = kzalloc(amd_nb_num() * sizeof(mcis[0]), GFP_KERNEL);
2778 ecc_stngs = kzalloc(amd_nb_num() * sizeof(ecc_stngs[0]), GFP_KERNEL);
2779 if (!(mcis && ecc_stngs))
2782 msrs = msrs_alloc();
2786 err = pci_register_driver(&amd64_pci_driver);
2791 if (!atomic_read(&drv_instances))
2792 goto err_no_instances;
2798 pci_unregister_driver(&amd64_pci_driver);
2815 static void __exit amd64_edac_exit(void)
2818 edac_pci_release_generic_ctl(amd64_ctl_pci);
2820 pci_unregister_driver(&amd64_pci_driver);
2832 module_init(amd64_edac_init);
2833 module_exit(amd64_edac_exit);
2835 MODULE_LICENSE("GPL");
2836 MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
2837 "Dave Peterson, Thayne Harbaugh");
2838 MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "
2839 EDAC_AMD64_VERSION);
2841 module_param(edac_op_state, int, 0444);
2842 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");