1 #include "amd64_edac.h"
2 #include <asm/amd_nb.h>
4 static struct edac_pci_ctl_info *amd64_ctl_pci;
6 static int report_gart_errors;
7 module_param(report_gart_errors, int, 0644);
10 * Set by command line parameter. If BIOS has enabled the ECC, this override is
11 * cleared to prevent re-enabling the hardware by this driver.
13 static int ecc_enable_override;
14 module_param(ecc_enable_override, int, 0644);
16 static struct msr __percpu *msrs;
19 * count successfully initialized driver instances for setup_pci_device()
21 static atomic_t drv_instances = ATOMIC_INIT(0);
23 /* Per-node driver instances */
24 static struct mem_ctl_info **mcis;
25 static struct ecc_settings **ecc_stngs;
28 * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
29 * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
32 *FIXME: Produce a better mapping/linearisation.
35 u32 scrubval; /* bit pattern for scrub rate */
36 u32 bandwidth; /* bandwidth consumed (bytes/sec) */
38 { 0x01, 1600000000UL},
60 { 0x00, 0UL}, /* scrubbing off */
63 int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
64 u32 *val, const char *func)
68 err = pci_read_config_dword(pdev, offset, val);
70 amd64_warn("%s: error reading F%dx%03x.\n",
71 func, PCI_FUNC(pdev->devfn), offset);
76 int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
77 u32 val, const char *func)
81 err = pci_write_config_dword(pdev, offset, val);
83 amd64_warn("%s: error writing to F%dx%03x.\n",
84 func, PCI_FUNC(pdev->devfn), offset);
91 * Depending on the family, F2 DCT reads need special handling:
93 * K8: has a single DCT only
95 * F10h: each DCT has its own set of regs
99 * F15h: we select which DCT we access using F1x10C[DctCfgSel]
102 static int k8_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
108 return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
111 static int f10_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
114 return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
118 * Select DCT to which PCI cfg accesses are routed
120 static void f15h_select_dct(struct amd64_pvt *pvt, u8 dct)
124 amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, ®);
127 amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
130 static int f15_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
135 if (addr >= 0x140 && addr <= 0x1a0) {
140 f15h_select_dct(pvt, dct);
142 return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
146 * Memory scrubber control interface. For K8, memory scrubbing is handled by
147 * hardware and can involve L2 cache, dcache as well as the main memory. With
148 * F10, this is extended to L3 cache scrubbing on CPU models sporting that
151 * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
152 * (dram) over to cache lines. This is nasty, so we will use bandwidth in
153 * bytes/sec for the setting.
155 * Currently, we only do dram scrubbing. If the scrubbing is done in software on
156 * other archs, we might not have access to the caches directly.
160 * scan the scrub rate mapping table for a close or matching bandwidth value to
161 * issue. If requested is too big, then use last maximum value found.
163 static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
169 * map the configured rate (new_bw) to a value specific to the AMD64
170 * memory controller and apply to register. Search for the first
171 * bandwidth entry that is greater or equal than the setting requested
172 * and program that. If at last entry, turn off DRAM scrubbing.
174 * If no suitable bandwidth is found, turn off DRAM scrubbing entirely
175 * by falling back to the last element in scrubrates[].
177 for (i = 0; i < ARRAY_SIZE(scrubrates) - 1; i++) {
179 * skip scrub rates which aren't recommended
180 * (see F10 BKDG, F3x58)
182 if (scrubrates[i].scrubval < min_rate)
185 if (scrubrates[i].bandwidth <= new_bw)
189 scrubval = scrubrates[i].scrubval;
191 pci_write_bits32(ctl, SCRCTRL, scrubval, 0x001F);
194 return scrubrates[i].bandwidth;
199 static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
201 struct amd64_pvt *pvt = mci->pvt_info;
202 u32 min_scrubrate = 0x5;
204 if (boot_cpu_data.x86 == 0xf)
207 /* F15h Erratum #505 */
208 if (boot_cpu_data.x86 == 0x15)
209 f15h_select_dct(pvt, 0);
211 return __amd64_set_scrub_rate(pvt->F3, bw, min_scrubrate);
214 static int amd64_get_scrub_rate(struct mem_ctl_info *mci)
216 struct amd64_pvt *pvt = mci->pvt_info;
218 int i, retval = -EINVAL;
220 /* F15h Erratum #505 */
221 if (boot_cpu_data.x86 == 0x15)
222 f15h_select_dct(pvt, 0);
224 amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
226 scrubval = scrubval & 0x001F;
228 for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
229 if (scrubrates[i].scrubval == scrubval) {
230 retval = scrubrates[i].bandwidth;
238 * returns true if the SysAddr given by sys_addr matches the
239 * DRAM base/limit associated with node_id
241 static bool amd64_base_limit_match(struct amd64_pvt *pvt, u64 sys_addr,
246 /* The K8 treats this as a 40-bit value. However, bits 63-40 will be
247 * all ones if the most significant implemented address bit is 1.
248 * Here we discard bits 63-40. See section 3.4.2 of AMD publication
249 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
250 * Application Programming.
252 addr = sys_addr & 0x000000ffffffffffull;
254 return ((addr >= get_dram_base(pvt, nid)) &&
255 (addr <= get_dram_limit(pvt, nid)));
259 * Attempt to map a SysAddr to a node. On success, return a pointer to the
260 * mem_ctl_info structure for the node that the SysAddr maps to.
262 * On failure, return NULL.
264 static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
267 struct amd64_pvt *pvt;
272 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
273 * 3.4.4.2) registers to map the SysAddr to a node ID.
278 * The value of this field should be the same for all DRAM Base
279 * registers. Therefore we arbitrarily choose to read it from the
280 * register for node 0.
282 intlv_en = dram_intlv_en(pvt, 0);
285 for (node_id = 0; node_id < DRAM_RANGES; node_id++) {
286 if (amd64_base_limit_match(pvt, sys_addr, node_id))
292 if (unlikely((intlv_en != 0x01) &&
293 (intlv_en != 0x03) &&
294 (intlv_en != 0x07))) {
295 amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en);
299 bits = (((u32) sys_addr) >> 12) & intlv_en;
301 for (node_id = 0; ; ) {
302 if ((dram_intlv_sel(pvt, node_id) & intlv_en) == bits)
303 break; /* intlv_sel field matches */
305 if (++node_id >= DRAM_RANGES)
309 /* sanity test for sys_addr */
310 if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) {
311 amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address"
312 "range for node %d with node interleaving enabled.\n",
313 __func__, sys_addr, node_id);
318 return edac_mc_find((int)node_id);
321 edac_dbg(2, "sys_addr 0x%lx doesn't match any node\n",
322 (unsigned long)sys_addr);
328 * compute the CS base address of the @csrow on the DRAM controller @dct.
329 * For details see F2x[5C:40] in the processor's BKDG
331 static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
332 u64 *base, u64 *mask)
334 u64 csbase, csmask, base_bits, mask_bits;
337 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) {
338 csbase = pvt->csels[dct].csbases[csrow];
339 csmask = pvt->csels[dct].csmasks[csrow];
340 base_bits = GENMASK(21, 31) | GENMASK(9, 15);
341 mask_bits = GENMASK(21, 29) | GENMASK(9, 15);
344 csbase = pvt->csels[dct].csbases[csrow];
345 csmask = pvt->csels[dct].csmasks[csrow >> 1];
348 if (boot_cpu_data.x86 == 0x15)
349 base_bits = mask_bits = GENMASK(19,30) | GENMASK(5,13);
351 base_bits = mask_bits = GENMASK(19,28) | GENMASK(5,13);
354 *base = (csbase & base_bits) << addr_shift;
357 /* poke holes for the csmask */
358 *mask &= ~(mask_bits << addr_shift);
360 *mask |= (csmask & mask_bits) << addr_shift;
363 #define for_each_chip_select(i, dct, pvt) \
364 for (i = 0; i < pvt->csels[dct].b_cnt; i++)
366 #define chip_select_base(i, dct, pvt) \
367 pvt->csels[dct].csbases[i]
369 #define for_each_chip_select_mask(i, dct, pvt) \
370 for (i = 0; i < pvt->csels[dct].m_cnt; i++)
373 * @input_addr is an InputAddr associated with the node given by mci. Return the
374 * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
376 static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
378 struct amd64_pvt *pvt;
384 for_each_chip_select(csrow, 0, pvt) {
385 if (!csrow_enabled(csrow, 0, pvt))
388 get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
392 if ((input_addr & mask) == (base & mask)) {
393 edac_dbg(2, "InputAddr 0x%lx matches csrow %d (node %d)\n",
394 (unsigned long)input_addr, csrow,
400 edac_dbg(2, "no matching csrow for InputAddr 0x%lx (MC node %d)\n",
401 (unsigned long)input_addr, pvt->mc_node_id);
407 * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
408 * for the node represented by mci. Info is passed back in *hole_base,
409 * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if
410 * info is invalid. Info may be invalid for either of the following reasons:
412 * - The revision of the node is not E or greater. In this case, the DRAM Hole
413 * Address Register does not exist.
415 * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
416 * indicating that its contents are not valid.
418 * The values passed back in *hole_base, *hole_offset, and *hole_size are
419 * complete 32-bit values despite the fact that the bitfields in the DHAR
420 * only represent bits 31-24 of the base and offset values.
422 int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
423 u64 *hole_offset, u64 *hole_size)
425 struct amd64_pvt *pvt = mci->pvt_info;
427 /* only revE and later have the DRAM Hole Address Register */
428 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_E) {
429 edac_dbg(1, " revision %d for node %d does not support DHAR\n",
430 pvt->ext_model, pvt->mc_node_id);
434 /* valid for Fam10h and above */
435 if (boot_cpu_data.x86 >= 0x10 && !dhar_mem_hoist_valid(pvt)) {
436 edac_dbg(1, " Dram Memory Hoisting is DISABLED on this system\n");
440 if (!dhar_valid(pvt)) {
441 edac_dbg(1, " Dram Memory Hoisting is DISABLED on this node %d\n",
446 /* This node has Memory Hoisting */
448 /* +------------------+--------------------+--------------------+-----
449 * | memory | DRAM hole | relocated |
450 * | [0, (x - 1)] | [x, 0xffffffff] | addresses from |
452 * | | | [0x100000000, |
453 * | | | (0x100000000+ |
454 * | | | (0xffffffff-x))] |
455 * +------------------+--------------------+--------------------+-----
457 * Above is a diagram of physical memory showing the DRAM hole and the
458 * relocated addresses from the DRAM hole. As shown, the DRAM hole
459 * starts at address x (the base address) and extends through address
460 * 0xffffffff. The DRAM Hole Address Register (DHAR) relocates the
461 * addresses in the hole so that they start at 0x100000000.
464 *hole_base = dhar_base(pvt);
465 *hole_size = (1ULL << 32) - *hole_base;
467 if (boot_cpu_data.x86 > 0xf)
468 *hole_offset = f10_dhar_offset(pvt);
470 *hole_offset = k8_dhar_offset(pvt);
472 edac_dbg(1, " DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
473 pvt->mc_node_id, (unsigned long)*hole_base,
474 (unsigned long)*hole_offset, (unsigned long)*hole_size);
478 EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info);
481 * Return the DramAddr that the SysAddr given by @sys_addr maps to. It is
482 * assumed that sys_addr maps to the node given by mci.
484 * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
485 * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
486 * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
487 * then it is also involved in translating a SysAddr to a DramAddr. Sections
488 * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
489 * These parts of the documentation are unclear. I interpret them as follows:
491 * When node n receives a SysAddr, it processes the SysAddr as follows:
493 * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
494 * Limit registers for node n. If the SysAddr is not within the range
495 * specified by the base and limit values, then node n ignores the Sysaddr
496 * (since it does not map to node n). Otherwise continue to step 2 below.
498 * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
499 * disabled so skip to step 3 below. Otherwise see if the SysAddr is within
500 * the range of relocated addresses (starting at 0x100000000) from the DRAM
501 * hole. If not, skip to step 3 below. Else get the value of the
502 * DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
503 * offset defined by this value from the SysAddr.
505 * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
506 * Base register for node n. To obtain the DramAddr, subtract the base
507 * address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
509 static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
511 struct amd64_pvt *pvt = mci->pvt_info;
512 u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
515 dram_base = get_dram_base(pvt, pvt->mc_node_id);
517 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
520 if ((sys_addr >= (1ULL << 32)) &&
521 (sys_addr < ((1ULL << 32) + hole_size))) {
522 /* use DHAR to translate SysAddr to DramAddr */
523 dram_addr = sys_addr - hole_offset;
525 edac_dbg(2, "using DHAR to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
526 (unsigned long)sys_addr,
527 (unsigned long)dram_addr);
534 * Translate the SysAddr to a DramAddr as shown near the start of
535 * section 3.4.4 (p. 70). Although sys_addr is a 64-bit value, the k8
536 * only deals with 40-bit values. Therefore we discard bits 63-40 of
537 * sys_addr below. If bit 39 of sys_addr is 1 then the bits we
538 * discard are all 1s. Otherwise the bits we discard are all 0s. See
539 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
540 * Programmer's Manual Volume 1 Application Programming.
542 dram_addr = (sys_addr & GENMASK(0, 39)) - dram_base;
544 edac_dbg(2, "using DRAM Base register to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
545 (unsigned long)sys_addr, (unsigned long)dram_addr);
550 * @intlv_en is the value of the IntlvEn field from a DRAM Base register
551 * (section 3.4.4.1). Return the number of bits from a SysAddr that are used
552 * for node interleaving.
554 static int num_node_interleave_bits(unsigned intlv_en)
556 static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
559 BUG_ON(intlv_en > 7);
560 n = intlv_shift_table[intlv_en];
564 /* Translate the DramAddr given by @dram_addr to an InputAddr. */
565 static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
567 struct amd64_pvt *pvt;
574 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
575 * concerning translating a DramAddr to an InputAddr.
577 intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
578 input_addr = ((dram_addr >> intlv_shift) & GENMASK(12, 35)) +
581 edac_dbg(2, " Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
582 intlv_shift, (unsigned long)dram_addr,
583 (unsigned long)input_addr);
589 * Translate the SysAddr represented by @sys_addr to an InputAddr. It is
590 * assumed that @sys_addr maps to the node given by mci.
592 static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
597 dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
599 edac_dbg(2, "SysAdddr 0x%lx translates to InputAddr 0x%lx\n",
600 (unsigned long)sys_addr, (unsigned long)input_addr);
607 * @input_addr is an InputAddr associated with the node represented by mci.
608 * Translate @input_addr to a DramAddr and return the result.
610 static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
612 struct amd64_pvt *pvt;
613 unsigned node_id, intlv_shift;
618 * Near the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
619 * shows how to translate a DramAddr to an InputAddr. Here we reverse
620 * this procedure. When translating from a DramAddr to an InputAddr, the
621 * bits used for node interleaving are discarded. Here we recover these
622 * bits from the IntlvSel field of the DRAM Limit register (section
623 * 3.4.4.2) for the node that input_addr is associated with.
626 node_id = pvt->mc_node_id;
630 intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
631 if (intlv_shift == 0) {
632 edac_dbg(1, " InputAddr 0x%lx translates to DramAddr of same value\n",
633 (unsigned long)input_addr);
638 bits = ((input_addr & GENMASK(12, 35)) << intlv_shift) +
639 (input_addr & 0xfff);
641 intlv_sel = dram_intlv_sel(pvt, node_id) & ((1 << intlv_shift) - 1);
642 dram_addr = bits + (intlv_sel << 12);
644 edac_dbg(1, "InputAddr 0x%lx translates to DramAddr 0x%lx (%d node interleave bits)\n",
645 (unsigned long)input_addr,
646 (unsigned long)dram_addr, intlv_shift);
652 * @dram_addr is a DramAddr that maps to the node represented by mci. Convert
653 * @dram_addr to a SysAddr.
655 static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr)
657 struct amd64_pvt *pvt = mci->pvt_info;
658 u64 hole_base, hole_offset, hole_size, base, sys_addr;
661 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
664 if ((dram_addr >= hole_base) &&
665 (dram_addr < (hole_base + hole_size))) {
666 sys_addr = dram_addr + hole_offset;
668 edac_dbg(1, "using DHAR to translate DramAddr 0x%lx to SysAddr 0x%lx\n",
669 (unsigned long)dram_addr,
670 (unsigned long)sys_addr);
676 base = get_dram_base(pvt, pvt->mc_node_id);
677 sys_addr = dram_addr + base;
680 * The sys_addr we have computed up to this point is a 40-bit value
681 * because the k8 deals with 40-bit values. However, the value we are
682 * supposed to return is a full 64-bit physical address. The AMD
683 * x86-64 architecture specifies that the most significant implemented
684 * address bit through bit 63 of a physical address must be either all
685 * 0s or all 1s. Therefore we sign-extend the 40-bit sys_addr to a
686 * 64-bit value below. See section 3.4.2 of AMD publication 24592:
687 * AMD x86-64 Architecture Programmer's Manual Volume 1 Application
690 sys_addr |= ~((sys_addr & (1ull << 39)) - 1);
692 edac_dbg(1, " Node %d, DramAddr 0x%lx to SysAddr 0x%lx\n",
693 pvt->mc_node_id, (unsigned long)dram_addr,
694 (unsigned long)sys_addr);
700 * @input_addr is an InputAddr associated with the node given by mci. Translate
701 * @input_addr to a SysAddr.
703 static inline u64 input_addr_to_sys_addr(struct mem_ctl_info *mci,
706 return dram_addr_to_sys_addr(mci,
707 input_addr_to_dram_addr(mci, input_addr));
710 /* Map the Error address to a PAGE and PAGE OFFSET. */
711 static inline void error_address_to_page_and_offset(u64 error_address,
712 struct err_info *err)
714 err->page = (u32) (error_address >> PAGE_SHIFT);
715 err->offset = ((u32) error_address) & ~PAGE_MASK;
719 * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
720 * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
721 * of a node that detected an ECC memory error. mci represents the node that
722 * the error address maps to (possibly different from the node that detected
723 * the error). Return the number of the csrow that sys_addr maps to, or -1 on
726 static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
730 csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
733 amd64_mc_err(mci, "Failed to translate InputAddr to csrow for "
734 "address 0x%lx\n", (unsigned long)sys_addr);
738 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
741 * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
744 static unsigned long amd64_determine_edac_cap(struct amd64_pvt *pvt)
747 unsigned long edac_cap = EDAC_FLAG_NONE;
749 bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= K8_REV_F)
753 if (pvt->dclr0 & BIT(bit))
754 edac_cap = EDAC_FLAG_SECDED;
759 static void amd64_debug_display_dimm_sizes(struct amd64_pvt *, u8);
761 static void amd64_dump_dramcfg_low(u32 dclr, int chan)
763 edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
765 edac_dbg(1, " DIMM type: %sbuffered; all DIMMs support ECC: %s\n",
766 (dclr & BIT(16)) ? "un" : "",
767 (dclr & BIT(19)) ? "yes" : "no");
769 edac_dbg(1, " PAR/ERR parity: %s\n",
770 (dclr & BIT(8)) ? "enabled" : "disabled");
772 if (boot_cpu_data.x86 == 0x10)
773 edac_dbg(1, " DCT 128bit mode width: %s\n",
774 (dclr & BIT(11)) ? "128b" : "64b");
776 edac_dbg(1, " x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
777 (dclr & BIT(12)) ? "yes" : "no",
778 (dclr & BIT(13)) ? "yes" : "no",
779 (dclr & BIT(14)) ? "yes" : "no",
780 (dclr & BIT(15)) ? "yes" : "no");
783 /* Display and decode various NB registers for debug purposes. */
784 static void dump_misc_regs(struct amd64_pvt *pvt)
786 edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
788 edac_dbg(1, " NB two channel DRAM capable: %s\n",
789 (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
791 edac_dbg(1, " ECC capable: %s, ChipKill ECC capable: %s\n",
792 (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
793 (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
795 amd64_dump_dramcfg_low(pvt->dclr0, 0);
797 edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
799 edac_dbg(1, "F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, offset: 0x%08x\n",
800 pvt->dhar, dhar_base(pvt),
801 (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt)
802 : f10_dhar_offset(pvt));
804 edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
806 amd64_debug_display_dimm_sizes(pvt, 0);
808 /* everything below this point is Fam10h and above */
809 if (boot_cpu_data.x86 == 0xf)
812 amd64_debug_display_dimm_sizes(pvt, 1);
814 amd64_info("using %s syndromes.\n", ((pvt->ecc_sym_sz == 8) ? "x8" : "x4"));
816 /* Only if NOT ganged does dclr1 have valid info */
817 if (!dct_ganging_enabled(pvt))
818 amd64_dump_dramcfg_low(pvt->dclr1, 1);
822 * see BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
824 static void prep_chip_selects(struct amd64_pvt *pvt)
826 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) {
827 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
828 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8;
830 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
831 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
836 * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
838 static void read_dct_base_mask(struct amd64_pvt *pvt)
842 prep_chip_selects(pvt);
844 for_each_chip_select(cs, 0, pvt) {
845 int reg0 = DCSB0 + (cs * 4);
846 int reg1 = DCSB1 + (cs * 4);
847 u32 *base0 = &pvt->csels[0].csbases[cs];
848 u32 *base1 = &pvt->csels[1].csbases[cs];
850 if (!amd64_read_dct_pci_cfg(pvt, reg0, base0))
851 edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n",
854 if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt))
857 if (!amd64_read_dct_pci_cfg(pvt, reg1, base1))
858 edac_dbg(0, " DCSB1[%d]=0x%08x reg: F2x%x\n",
862 for_each_chip_select_mask(cs, 0, pvt) {
863 int reg0 = DCSM0 + (cs * 4);
864 int reg1 = DCSM1 + (cs * 4);
865 u32 *mask0 = &pvt->csels[0].csmasks[cs];
866 u32 *mask1 = &pvt->csels[1].csmasks[cs];
868 if (!amd64_read_dct_pci_cfg(pvt, reg0, mask0))
869 edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n",
872 if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt))
875 if (!amd64_read_dct_pci_cfg(pvt, reg1, mask1))
876 edac_dbg(0, " DCSM1[%d]=0x%08x reg: F2x%x\n",
881 static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt, int cs)
885 /* F15h supports only DDR3 */
886 if (boot_cpu_data.x86 >= 0x15)
887 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
888 else if (boot_cpu_data.x86 == 0x10 || pvt->ext_model >= K8_REV_F) {
889 if (pvt->dchr0 & DDR3_MODE)
890 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
892 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
894 type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
897 amd64_info("CS%d: %s\n", cs, edac_mem_types[type]);
902 /* Get the number of DCT channels the memory controller is using. */
903 static int k8_early_channel_count(struct amd64_pvt *pvt)
907 if (pvt->ext_model >= K8_REV_F)
908 /* RevF (NPT) and later */
909 flag = pvt->dclr0 & WIDTH_128;
911 /* RevE and earlier */
912 flag = pvt->dclr0 & REVE_WIDTH_128;
917 return (flag) ? 2 : 1;
920 /* On F10h and later ErrAddr is MC4_ADDR[47:1] */
921 static u64 get_error_address(struct mce *m)
923 struct cpuinfo_x86 *c = &boot_cpu_data;
933 addr = m->addr & GENMASK(start_bit, end_bit);
936 * Erratum 637 workaround
938 if (c->x86 == 0x15) {
939 struct amd64_pvt *pvt;
940 u64 cc6_base, tmp_addr;
942 u8 mce_nid, intlv_en;
944 if ((addr & GENMASK(24, 47)) >> 24 != 0x00fdf7)
947 mce_nid = amd_get_nb_id(m->extcpu);
948 pvt = mcis[mce_nid]->pvt_info;
950 amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_LIM, &tmp);
951 intlv_en = tmp >> 21 & 0x7;
953 /* add [47:27] + 3 trailing bits */
954 cc6_base = (tmp & GENMASK(0, 20)) << 3;
956 /* reverse and add DramIntlvEn */
957 cc6_base |= intlv_en ^ 0x7;
963 return cc6_base | (addr & GENMASK(0, 23));
965 amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_BASE, &tmp);
968 tmp_addr = (addr & GENMASK(12, 23)) << __fls(intlv_en + 1);
970 /* OR DramIntlvSel into bits [14:12] */
971 tmp_addr |= (tmp & GENMASK(21, 23)) >> 9;
973 /* add remaining [11:0] bits from original MC4_ADDR */
974 tmp_addr |= addr & GENMASK(0, 11);
976 return cc6_base | tmp_addr;
982 static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
984 struct cpuinfo_x86 *c = &boot_cpu_data;
985 int off = range << 3;
987 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo);
988 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
993 if (!dram_rw(pvt, range))
996 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off, &pvt->ranges[range].base.hi);
997 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
999 /* Factor in CC6 save area by reading dst node's limit reg */
1000 if (c->x86 == 0x15) {
1001 struct pci_dev *f1 = NULL;
1002 u8 nid = dram_dst_node(pvt, range);
1005 f1 = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0x18 + nid, 1));
1009 amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim);
1011 pvt->ranges[range].lim.lo &= GENMASK(0, 15);
1013 /* {[39:27],111b} */
1014 pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16;
1016 pvt->ranges[range].lim.hi &= GENMASK(0, 7);
1019 pvt->ranges[range].lim.hi |= llim >> 13;
1025 static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1026 struct err_info *err)
1028 struct amd64_pvt *pvt = mci->pvt_info;
1030 error_address_to_page_and_offset(sys_addr, err);
1033 * Find out which node the error address belongs to. This may be
1034 * different from the node that detected the error.
1036 err->src_mci = find_mc_by_sys_addr(mci, sys_addr);
1037 if (!err->src_mci) {
1038 amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
1039 (unsigned long)sys_addr);
1040 err->err_code = ERR_NODE;
1044 /* Now map the sys_addr to a CSROW */
1045 err->csrow = sys_addr_to_csrow(err->src_mci, sys_addr);
1046 if (err->csrow < 0) {
1047 err->err_code = ERR_CSROW;
1051 /* CHIPKILL enabled */
1052 if (pvt->nbcfg & NBCFG_CHIPKILL) {
1053 err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
1054 if (err->channel < 0) {
1056 * Syndrome didn't map, so we don't know which of the
1057 * 2 DIMMs is in error. So we need to ID 'both' of them
1060 amd64_mc_warn(err->src_mci, "unknown syndrome 0x%04x - "
1061 "possible error reporting race\n",
1063 err->err_code = ERR_CHANNEL;
1068 * non-chipkill ecc mode
1070 * The k8 documentation is unclear about how to determine the
1071 * channel number when using non-chipkill memory. This method
1072 * was obtained from email communication with someone at AMD.
1073 * (Wish the email was placed in this comment - norsk)
1075 err->channel = ((sys_addr & BIT(3)) != 0);
1079 static int ddr2_cs_size(unsigned i, bool dct_width)
1085 else if (!(i & 0x1))
1088 shift = (i + 1) >> 1;
1090 return 128 << (shift + !!dct_width);
1093 static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1096 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1098 if (pvt->ext_model >= K8_REV_F) {
1099 WARN_ON(cs_mode > 11);
1100 return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1102 else if (pvt->ext_model >= K8_REV_D) {
1104 WARN_ON(cs_mode > 10);
1107 * the below calculation, besides trying to win an obfuscated C
1108 * contest, maps cs_mode values to DIMM chip select sizes. The
1111 * cs_mode CS size (mb)
1112 * ======= ============
1125 * Basically, it calculates a value with which to shift the
1126 * smallest CS size of 32MB.
1128 * ddr[23]_cs_size have a similar purpose.
1130 diff = cs_mode/3 + (unsigned)(cs_mode > 5);
1132 return 32 << (cs_mode - diff);
1135 WARN_ON(cs_mode > 6);
1136 return 32 << cs_mode;
1141 * Get the number of DCT channels in use.
1144 * number of Memory Channels in operation
1146 * contents of the DCL0_LOW register
1148 static int f1x_early_channel_count(struct amd64_pvt *pvt)
1150 int i, j, channels = 0;
1152 /* On F10h, if we are in 128 bit mode, then we are using 2 channels */
1153 if (boot_cpu_data.x86 == 0x10 && (pvt->dclr0 & WIDTH_128))
1157 * Need to check if in unganged mode: In such, there are 2 channels,
1158 * but they are not in 128 bit mode and thus the above 'dclr0' status
1161 * Need to check DCT0[0] and DCT1[0] to see if only one of them has
1162 * their CSEnable bit on. If so, then SINGLE DIMM case.
1164 edac_dbg(0, "Data width is not 128 bits - need more decoding\n");
1167 * Check DRAM Bank Address Mapping values for each DIMM to see if there
1168 * is more than just one DIMM present in unganged mode. Need to check
1169 * both controllers since DIMMs can be placed in either one.
1171 for (i = 0; i < 2; i++) {
1172 u32 dbam = (i ? pvt->dbam1 : pvt->dbam0);
1174 for (j = 0; j < 4; j++) {
1175 if (DBAM_DIMM(j, dbam) > 0) {
1185 amd64_info("MCT channel count: %d\n", channels);
1190 static int ddr3_cs_size(unsigned i, bool dct_width)
1195 if (i == 0 || i == 3 || i == 4)
1201 else if (!(i & 0x1))
1204 shift = (i + 1) >> 1;
1207 cs_size = (128 * (1 << !!dct_width)) << shift;
1212 static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1215 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1217 WARN_ON(cs_mode > 11);
1219 if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
1220 return ddr3_cs_size(cs_mode, dclr & WIDTH_128);
1222 return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1226 * F15h supports only 64bit DCT interfaces
1228 static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1231 WARN_ON(cs_mode > 12);
1233 return ddr3_cs_size(cs_mode, false);
1236 static void read_dram_ctl_register(struct amd64_pvt *pvt)
1239 if (boot_cpu_data.x86 == 0xf)
1242 if (!amd64_read_dct_pci_cfg(pvt, DCT_SEL_LO, &pvt->dct_sel_lo)) {
1243 edac_dbg(0, "F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
1244 pvt->dct_sel_lo, dct_sel_baseaddr(pvt));
1246 edac_dbg(0, " DCTs operate in %s mode\n",
1247 (dct_ganging_enabled(pvt) ? "ganged" : "unganged"));
1249 if (!dct_ganging_enabled(pvt))
1250 edac_dbg(0, " Address range split per DCT: %s\n",
1251 (dct_high_range_enabled(pvt) ? "yes" : "no"));
1253 edac_dbg(0, " data interleave for ECC: %s, DRAM cleared since last warm reset: %s\n",
1254 (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
1255 (dct_memory_cleared(pvt) ? "yes" : "no"));
1257 edac_dbg(0, " channel interleave: %s, "
1258 "interleave bits selector: 0x%x\n",
1259 (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
1260 dct_sel_interleave_addr(pvt));
1263 amd64_read_dct_pci_cfg(pvt, DCT_SEL_HI, &pvt->dct_sel_hi);
1267 * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory
1268 * Interleaving Modes.
1270 static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1271 bool hi_range_sel, u8 intlv_en)
1273 u8 dct_sel_high = (pvt->dct_sel_lo >> 1) & 1;
1275 if (dct_ganging_enabled(pvt))
1279 return dct_sel_high;
1282 * see F2x110[DctSelIntLvAddr] - channel interleave mode
1284 if (dct_interleave_enabled(pvt)) {
1285 u8 intlv_addr = dct_sel_interleave_addr(pvt);
1287 /* return DCT select function: 0=DCT0, 1=DCT1 */
1289 return sys_addr >> 6 & 1;
1291 if (intlv_addr & 0x2) {
1292 u8 shift = intlv_addr & 0x1 ? 9 : 6;
1293 u32 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) % 2;
1295 return ((sys_addr >> shift) & 1) ^ temp;
1298 return (sys_addr >> (12 + hweight8(intlv_en))) & 1;
1301 if (dct_high_range_enabled(pvt))
1302 return ~dct_sel_high & 1;
1307 /* Convert the sys_addr to the normalized DCT address */
1308 static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, unsigned range,
1309 u64 sys_addr, bool hi_rng,
1310 u32 dct_sel_base_addr)
1313 u64 dram_base = get_dram_base(pvt, range);
1314 u64 hole_off = f10_dhar_offset(pvt);
1315 u64 dct_sel_base_off = (pvt->dct_sel_hi & 0xFFFFFC00) << 16;
1320 * base address of high range is below 4Gb
1321 * (bits [47:27] at [31:11])
1322 * DRAM address space on this DCT is hoisted above 4Gb &&
1325 * remove hole offset from sys_addr
1327 * remove high range offset from sys_addr
1329 if ((!(dct_sel_base_addr >> 16) ||
1330 dct_sel_base_addr < dhar_base(pvt)) &&
1332 (sys_addr >= BIT_64(32)))
1333 chan_off = hole_off;
1335 chan_off = dct_sel_base_off;
1339 * we have a valid hole &&
1344 * remove dram base to normalize to DCT address
1346 if (dhar_valid(pvt) && (sys_addr >= BIT_64(32)))
1347 chan_off = hole_off;
1349 chan_off = dram_base;
1352 return (sys_addr & GENMASK(6,47)) - (chan_off & GENMASK(23,47));
1356 * checks if the csrow passed in is marked as SPARED, if so returns the new
1359 static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
1363 if (online_spare_swap_done(pvt, dct) &&
1364 csrow == online_spare_bad_dramcs(pvt, dct)) {
1366 for_each_chip_select(tmp_cs, dct, pvt) {
1367 if (chip_select_base(tmp_cs, dct, pvt) & 0x2) {
1377 * Iterate over the DRAM DCT "base" and "mask" registers looking for a
1378 * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
1381 * -EINVAL: NOT FOUND
1382 * 0..csrow = Chip-Select Row
1384 static int f1x_lookup_addr_in_dct(u64 in_addr, u32 nid, u8 dct)
1386 struct mem_ctl_info *mci;
1387 struct amd64_pvt *pvt;
1388 u64 cs_base, cs_mask;
1389 int cs_found = -EINVAL;
1396 pvt = mci->pvt_info;
1398 edac_dbg(1, "input addr: 0x%llx, DCT: %d\n", in_addr, dct);
1400 for_each_chip_select(csrow, dct, pvt) {
1401 if (!csrow_enabled(csrow, dct, pvt))
1404 get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask);
1406 edac_dbg(1, " CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
1407 csrow, cs_base, cs_mask);
1411 edac_dbg(1, " (InputAddr & ~CSMask)=0x%llx (CSBase & ~CSMask)=0x%llx\n",
1412 (in_addr & cs_mask), (cs_base & cs_mask));
1414 if ((in_addr & cs_mask) == (cs_base & cs_mask)) {
1415 cs_found = f10_process_possible_spare(pvt, dct, csrow);
1417 edac_dbg(1, " MATCH csrow=%d\n", cs_found);
1425 * See F2x10C. Non-interleaved graphics framebuffer memory under the 16G is
1426 * swapped with a region located at the bottom of memory so that the GPU can use
1427 * the interleaved region and thus two channels.
1429 static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr)
1431 u32 swap_reg, swap_base, swap_limit, rgn_size, tmp_addr;
1433 if (boot_cpu_data.x86 == 0x10) {
1434 /* only revC3 and revE have that feature */
1435 if (boot_cpu_data.x86_model < 4 ||
1436 (boot_cpu_data.x86_model < 0xa &&
1437 boot_cpu_data.x86_mask < 3))
1441 amd64_read_dct_pci_cfg(pvt, SWAP_INTLV_REG, &swap_reg);
1443 if (!(swap_reg & 0x1))
1446 swap_base = (swap_reg >> 3) & 0x7f;
1447 swap_limit = (swap_reg >> 11) & 0x7f;
1448 rgn_size = (swap_reg >> 20) & 0x7f;
1449 tmp_addr = sys_addr >> 27;
1451 if (!(sys_addr >> 34) &&
1452 (((tmp_addr >= swap_base) &&
1453 (tmp_addr <= swap_limit)) ||
1454 (tmp_addr < rgn_size)))
1455 return sys_addr ^ (u64)swap_base << 27;
1460 /* For a given @dram_range, check if @sys_addr falls within it. */
1461 static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
1462 u64 sys_addr, int *chan_sel)
1464 int cs_found = -EINVAL;
1468 bool high_range = false;
1470 u8 node_id = dram_dst_node(pvt, range);
1471 u8 intlv_en = dram_intlv_en(pvt, range);
1472 u32 intlv_sel = dram_intlv_sel(pvt, range);
1474 edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
1475 range, sys_addr, get_dram_limit(pvt, range));
1477 if (dhar_valid(pvt) &&
1478 dhar_base(pvt) <= sys_addr &&
1479 sys_addr < BIT_64(32)) {
1480 amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
1485 if (intlv_en && (intlv_sel != ((sys_addr >> 12) & intlv_en)))
1488 sys_addr = f1x_swap_interleaved_region(pvt, sys_addr);
1490 dct_sel_base = dct_sel_baseaddr(pvt);
1493 * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
1494 * select between DCT0 and DCT1.
1496 if (dct_high_range_enabled(pvt) &&
1497 !dct_ganging_enabled(pvt) &&
1498 ((sys_addr >> 27) >= (dct_sel_base >> 11)))
1501 channel = f1x_determine_channel(pvt, sys_addr, high_range, intlv_en);
1503 chan_addr = f1x_get_norm_dct_addr(pvt, range, sys_addr,
1504 high_range, dct_sel_base);
1506 /* Remove node interleaving, see F1x120 */
1508 chan_addr = ((chan_addr >> (12 + hweight8(intlv_en))) << 12) |
1509 (chan_addr & 0xfff);
1511 /* remove channel interleave */
1512 if (dct_interleave_enabled(pvt) &&
1513 !dct_high_range_enabled(pvt) &&
1514 !dct_ganging_enabled(pvt)) {
1516 if (dct_sel_interleave_addr(pvt) != 1) {
1517 if (dct_sel_interleave_addr(pvt) == 0x3)
1519 chan_addr = ((chan_addr >> 10) << 9) |
1520 (chan_addr & 0x1ff);
1522 /* A[6] or hash 6 */
1523 chan_addr = ((chan_addr >> 7) << 6) |
1527 chan_addr = ((chan_addr >> 13) << 12) |
1528 (chan_addr & 0xfff);
1531 edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr);
1533 cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel);
1536 *chan_sel = channel;
1541 static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr,
1544 int cs_found = -EINVAL;
1547 for (range = 0; range < DRAM_RANGES; range++) {
1549 if (!dram_rw(pvt, range))
1552 if ((get_dram_base(pvt, range) <= sys_addr) &&
1553 (get_dram_limit(pvt, range) >= sys_addr)) {
1555 cs_found = f1x_match_to_this_node(pvt, range,
1556 sys_addr, chan_sel);
1565 * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps
1566 * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW).
1568 * The @sys_addr is usually an error address received from the hardware
1571 static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1572 struct err_info *err)
1574 struct amd64_pvt *pvt = mci->pvt_info;
1576 error_address_to_page_and_offset(sys_addr, err);
1578 err->csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &err->channel);
1579 if (err->csrow < 0) {
1580 err->err_code = ERR_CSROW;
1585 * We need the syndromes for channel detection only when we're
1586 * ganged. Otherwise @chan should already contain the channel at
1589 if (dct_ganging_enabled(pvt))
1590 err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
1594 * debug routine to display the memory sizes of all logical DIMMs and its
1597 static void amd64_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
1599 int dimm, size0, size1, factor = 0;
1600 u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
1601 u32 dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
1603 if (boot_cpu_data.x86 == 0xf) {
1604 if (pvt->dclr0 & WIDTH_128)
1607 /* K8 families < revF not supported yet */
1608 if (pvt->ext_model < K8_REV_F)
1614 dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1 : pvt->dbam0;
1615 dcsb = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->csels[1].csbases
1616 : pvt->csels[0].csbases;
1618 edac_dbg(1, "F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
1621 edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
1623 /* Dump memory sizes for DIMM and its CSROWs */
1624 for (dimm = 0; dimm < 4; dimm++) {
1627 if (dcsb[dimm*2] & DCSB_CS_ENABLE)
1628 size0 = pvt->ops->dbam_to_cs(pvt, ctrl,
1629 DBAM_DIMM(dimm, dbam));
1632 if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE)
1633 size1 = pvt->ops->dbam_to_cs(pvt, ctrl,
1634 DBAM_DIMM(dimm, dbam));
1636 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
1637 dimm * 2, size0 << factor,
1638 dimm * 2 + 1, size1 << factor);
1642 static struct amd64_family_type amd64_family_types[] = {
1645 .f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
1646 .f3_id = PCI_DEVICE_ID_AMD_K8_NB_MISC,
1648 .early_channel_count = k8_early_channel_count,
1649 .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
1650 .dbam_to_cs = k8_dbam_to_chip_select,
1651 .read_dct_pci_cfg = k8_read_dct_pci_cfg,
1656 .f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
1657 .f3_id = PCI_DEVICE_ID_AMD_10H_NB_MISC,
1659 .early_channel_count = f1x_early_channel_count,
1660 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
1661 .dbam_to_cs = f10_dbam_to_chip_select,
1662 .read_dct_pci_cfg = f10_read_dct_pci_cfg,
1667 .f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1,
1668 .f3_id = PCI_DEVICE_ID_AMD_15H_NB_F3,
1670 .early_channel_count = f1x_early_channel_count,
1671 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
1672 .dbam_to_cs = f15_dbam_to_chip_select,
1673 .read_dct_pci_cfg = f15_read_dct_pci_cfg,
1678 static struct pci_dev *pci_get_related_function(unsigned int vendor,
1679 unsigned int device,
1680 struct pci_dev *related)
1682 struct pci_dev *dev = NULL;
1684 dev = pci_get_device(vendor, device, dev);
1686 if ((dev->bus->number == related->bus->number) &&
1687 (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
1689 dev = pci_get_device(vendor, device, dev);
1696 * These are tables of eigenvectors (one per line) which can be used for the
1697 * construction of the syndrome tables. The modified syndrome search algorithm
1698 * uses those to find the symbol in error and thus the DIMM.
1700 * Algorithm courtesy of Ross LaFetra from AMD.
1702 static u16 x4_vectors[] = {
1703 0x2f57, 0x1afe, 0x66cc, 0xdd88,
1704 0x11eb, 0x3396, 0x7f4c, 0xeac8,
1705 0x0001, 0x0002, 0x0004, 0x0008,
1706 0x1013, 0x3032, 0x4044, 0x8088,
1707 0x106b, 0x30d6, 0x70fc, 0xe0a8,
1708 0x4857, 0xc4fe, 0x13cc, 0x3288,
1709 0x1ac5, 0x2f4a, 0x5394, 0xa1e8,
1710 0x1f39, 0x251e, 0xbd6c, 0x6bd8,
1711 0x15c1, 0x2a42, 0x89ac, 0x4758,
1712 0x2b03, 0x1602, 0x4f0c, 0xca08,
1713 0x1f07, 0x3a0e, 0x6b04, 0xbd08,
1714 0x8ba7, 0x465e, 0x244c, 0x1cc8,
1715 0x2b87, 0x164e, 0x642c, 0xdc18,
1716 0x40b9, 0x80de, 0x1094, 0x20e8,
1717 0x27db, 0x1eb6, 0x9dac, 0x7b58,
1718 0x11c1, 0x2242, 0x84ac, 0x4c58,
1719 0x1be5, 0x2d7a, 0x5e34, 0xa718,
1720 0x4b39, 0x8d1e, 0x14b4, 0x28d8,
1721 0x4c97, 0xc87e, 0x11fc, 0x33a8,
1722 0x8e97, 0x497e, 0x2ffc, 0x1aa8,
1723 0x16b3, 0x3d62, 0x4f34, 0x8518,
1724 0x1e2f, 0x391a, 0x5cac, 0xf858,
1725 0x1d9f, 0x3b7a, 0x572c, 0xfe18,
1726 0x15f5, 0x2a5a, 0x5264, 0xa3b8,
1727 0x1dbb, 0x3b66, 0x715c, 0xe3f8,
1728 0x4397, 0xc27e, 0x17fc, 0x3ea8,
1729 0x1617, 0x3d3e, 0x6464, 0xb8b8,
1730 0x23ff, 0x12aa, 0xab6c, 0x56d8,
1731 0x2dfb, 0x1ba6, 0x913c, 0x7328,
1732 0x185d, 0x2ca6, 0x7914, 0x9e28,
1733 0x171b, 0x3e36, 0x7d7c, 0xebe8,
1734 0x4199, 0x82ee, 0x19f4, 0x2e58,
1735 0x4807, 0xc40e, 0x130c, 0x3208,
1736 0x1905, 0x2e0a, 0x5804, 0xac08,
1737 0x213f, 0x132a, 0xadfc, 0x5ba8,
1738 0x19a9, 0x2efe, 0xb5cc, 0x6f88,
1741 static u16 x8_vectors[] = {
1742 0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
1743 0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
1744 0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
1745 0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80,
1746 0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780,
1747 0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080,
1748 0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080,
1749 0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080,
1750 0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80,
1751 0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580,
1752 0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880,
1753 0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280,
1754 0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180,
1755 0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580,
1756 0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280,
1757 0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180,
1758 0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080,
1759 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080,
1760 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
1763 static int decode_syndrome(u16 syndrome, u16 *vectors, unsigned num_vecs,
1766 unsigned int i, err_sym;
1768 for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) {
1770 unsigned v_idx = err_sym * v_dim;
1771 unsigned v_end = (err_sym + 1) * v_dim;
1773 /* walk over all 16 bits of the syndrome */
1774 for (i = 1; i < (1U << 16); i <<= 1) {
1776 /* if bit is set in that eigenvector... */
1777 if (v_idx < v_end && vectors[v_idx] & i) {
1778 u16 ev_comp = vectors[v_idx++];
1780 /* ... and bit set in the modified syndrome, */
1790 /* can't get to zero, move to next symbol */
1795 edac_dbg(0, "syndrome(%x) not found\n", syndrome);
1799 static int map_err_sym_to_channel(int err_sym, int sym_size)
1812 return err_sym >> 4;
1818 /* imaginary bits not in a DIMM */
1820 WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n",
1832 return err_sym >> 3;
1838 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
1840 struct amd64_pvt *pvt = mci->pvt_info;
1843 if (pvt->ecc_sym_sz == 8)
1844 err_sym = decode_syndrome(syndrome, x8_vectors,
1845 ARRAY_SIZE(x8_vectors),
1847 else if (pvt->ecc_sym_sz == 4)
1848 err_sym = decode_syndrome(syndrome, x4_vectors,
1849 ARRAY_SIZE(x4_vectors),
1852 amd64_warn("Illegal syndrome type: %u\n", pvt->ecc_sym_sz);
1856 return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz);
1859 static void __log_bus_error(struct mem_ctl_info *mci, struct err_info *err,
1862 enum hw_event_mc_err_type err_type;
1866 err_type = HW_EVENT_ERR_CORRECTED;
1867 else if (ecc_type == 1)
1868 err_type = HW_EVENT_ERR_UNCORRECTED;
1870 WARN(1, "Something is rotten in the state of Denmark.\n");
1874 switch (err->err_code) {
1879 string = "Failed to map error addr to a node";
1882 string = "Failed to map error addr to a csrow";
1885 string = "unknown syndrome - possible error reporting race";
1888 string = "WTF error";
1892 edac_mc_handle_error(err_type, mci, 1,
1893 err->page, err->offset, err->syndrome,
1894 err->csrow, err->channel, -1,
1898 static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
1901 struct amd64_pvt *pvt = mci->pvt_info;
1902 u8 ecc_type = (m->status >> 45) & 0x3;
1903 u8 xec = XEC(m->status, 0x1f);
1904 u16 ec = EC(m->status);
1906 struct err_info err;
1908 /* Bail out early if this was an 'observed' error */
1909 if (PP(ec) == NBSL_PP_OBS)
1912 /* Do only ECC errors */
1913 if (xec && xec != F10_NBSL_EXT_ERR_ECC)
1916 memset(&err, 0, sizeof(err));
1918 sys_addr = get_error_address(m);
1921 err.syndrome = extract_syndrome(m->status);
1923 pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, &err);
1925 __log_bus_error(mci, &err, ecc_type);
1928 void amd64_decode_bus_error(int node_id, struct mce *m)
1930 __amd64_decode_bus_error(mcis[node_id], m);
1934 * Use pvt->F2 which contains the F2 CPU PCI device to get the related
1935 * F1 (AddrMap) and F3 (Misc) devices. Return negative value on error.
1937 static int reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 f1_id, u16 f3_id)
1939 /* Reserve the ADDRESS MAP Device */
1940 pvt->F1 = pci_get_related_function(pvt->F2->vendor, f1_id, pvt->F2);
1942 amd64_err("error address map device not found: "
1943 "vendor %x device 0x%x (broken BIOS?)\n",
1944 PCI_VENDOR_ID_AMD, f1_id);
1948 /* Reserve the MISC Device */
1949 pvt->F3 = pci_get_related_function(pvt->F2->vendor, f3_id, pvt->F2);
1951 pci_dev_put(pvt->F1);
1954 amd64_err("error F3 device not found: "
1955 "vendor %x device 0x%x (broken BIOS?)\n",
1956 PCI_VENDOR_ID_AMD, f3_id);
1960 edac_dbg(1, "F1: %s\n", pci_name(pvt->F1));
1961 edac_dbg(1, "F2: %s\n", pci_name(pvt->F2));
1962 edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
1967 static void free_mc_sibling_devs(struct amd64_pvt *pvt)
1969 pci_dev_put(pvt->F1);
1970 pci_dev_put(pvt->F3);
1974 * Retrieve the hardware registers of the memory controller (this includes the
1975 * 'Address Map' and 'Misc' device regs)
1977 static void read_mc_regs(struct amd64_pvt *pvt)
1979 struct cpuinfo_x86 *c = &boot_cpu_data;
1985 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
1986 * those are Read-As-Zero
1988 rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
1989 edac_dbg(0, " TOP_MEM: 0x%016llx\n", pvt->top_mem);
1991 /* check first whether TOP_MEM2 is enabled */
1992 rdmsrl(MSR_K8_SYSCFG, msr_val);
1993 if (msr_val & (1U << 21)) {
1994 rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
1995 edac_dbg(0, " TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
1997 edac_dbg(0, " TOP_MEM2 disabled\n");
1999 amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap);
2001 read_dram_ctl_register(pvt);
2003 for (range = 0; range < DRAM_RANGES; range++) {
2006 /* read settings for this DRAM range */
2007 read_dram_base_limit_regs(pvt, range);
2009 rw = dram_rw(pvt, range);
2013 edac_dbg(1, " DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
2015 get_dram_base(pvt, range),
2016 get_dram_limit(pvt, range));
2018 edac_dbg(1, " IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
2019 dram_intlv_en(pvt, range) ? "Enabled" : "Disabled",
2020 (rw & 0x1) ? "R" : "-",
2021 (rw & 0x2) ? "W" : "-",
2022 dram_intlv_sel(pvt, range),
2023 dram_dst_node(pvt, range));
2026 read_dct_base_mask(pvt);
2028 amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar);
2029 amd64_read_dct_pci_cfg(pvt, DBAM0, &pvt->dbam0);
2031 amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
2033 amd64_read_dct_pci_cfg(pvt, DCLR0, &pvt->dclr0);
2034 amd64_read_dct_pci_cfg(pvt, DCHR0, &pvt->dchr0);
2036 if (!dct_ganging_enabled(pvt)) {
2037 amd64_read_dct_pci_cfg(pvt, DCLR1, &pvt->dclr1);
2038 amd64_read_dct_pci_cfg(pvt, DCHR1, &pvt->dchr1);
2041 pvt->ecc_sym_sz = 4;
2043 if (c->x86 >= 0x10) {
2044 amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
2045 amd64_read_dct_pci_cfg(pvt, DBAM1, &pvt->dbam1);
2047 /* F10h, revD and later can do x8 ECC too */
2048 if ((c->x86 > 0x10 || c->x86_model > 7) && tmp & BIT(25))
2049 pvt->ecc_sym_sz = 8;
2051 dump_misc_regs(pvt);
2055 * NOTE: CPU Revision Dependent code
2058 * @csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1)
2059 * k8 private pointer to -->
2060 * DRAM Bank Address mapping register
2062 * DCL register where dual_channel_active is
2064 * The DBAM register consists of 4 sets of 4 bits each definitions:
2067 * 0-3 CSROWs 0 and 1
2068 * 4-7 CSROWs 2 and 3
2069 * 8-11 CSROWs 4 and 5
2070 * 12-15 CSROWs 6 and 7
2072 * Values range from: 0 to 15
2073 * The meaning of the values depends on CPU revision and dual-channel state,
2074 * see relevant BKDG more info.
2076 * The memory controller provides for total of only 8 CSROWs in its current
2077 * architecture. Each "pair" of CSROWs normally represents just one DIMM in
2078 * single channel or two (2) DIMMs in dual channel mode.
2080 * The following code logic collapses the various tables for CSROW based on CPU
2084 * The number of PAGE_SIZE pages on the specified CSROW number it
2088 static u32 amd64_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
2090 u32 cs_mode, nr_pages;
2091 u32 dbam = dct ? pvt->dbam1 : pvt->dbam0;
2094 * The math on this doesn't look right on the surface because x/2*4 can
2095 * be simplified to x*2 but this expression makes use of the fact that
2096 * it is integral math where 1/2=0. This intermediate value becomes the
2097 * number of bits to shift the DBAM register to extract the proper CSROW
2100 cs_mode = (dbam >> ((csrow_nr / 2) * 4)) & 0xF;
2102 nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode) << (20 - PAGE_SHIFT);
2104 edac_dbg(0, " (csrow=%d) DBAM map index= %d\n", csrow_nr, cs_mode);
2105 edac_dbg(0, " nr_pages/channel= %u channel-count = %d\n",
2106 nr_pages, pvt->channel_count);
2112 * Initialize the array of csrow attribute instances, based on the values
2113 * from pci config hardware registers.
2115 static int init_csrows(struct mem_ctl_info *mci)
2117 struct csrow_info *csrow;
2118 struct dimm_info *dimm;
2119 struct amd64_pvt *pvt = mci->pvt_info;
2122 int i, j, empty = 1;
2123 enum mem_type mtype;
2124 enum edac_type edac_mode;
2127 amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
2131 edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
2132 pvt->mc_node_id, val,
2133 !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
2135 for_each_chip_select(i, 0, pvt) {
2136 csrow = mci->csrows[i];
2138 if (!csrow_enabled(i, 0, pvt) && !csrow_enabled(i, 1, pvt)) {
2139 edac_dbg(1, "----CSROW %d VALID for MC node %d\n",
2140 i, pvt->mc_node_id);
2145 if (csrow_enabled(i, 0, pvt))
2146 nr_pages = amd64_csrow_nr_pages(pvt, 0, i);
2147 if (csrow_enabled(i, 1, pvt))
2148 nr_pages += amd64_csrow_nr_pages(pvt, 1, i);
2150 get_cs_base_and_mask(pvt, i, 0, &base, &mask);
2151 /* 8 bytes of resolution */
2153 mtype = amd64_determine_memory_type(pvt, i);
2155 edac_dbg(1, " for MC node %d csrow %d:\n", pvt->mc_node_id, i);
2156 edac_dbg(1, " nr_pages: %u\n",
2157 nr_pages * pvt->channel_count);
2160 * determine whether CHIPKILL or JUST ECC or NO ECC is operating
2162 if (pvt->nbcfg & NBCFG_ECC_ENABLE)
2163 edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL) ?
2164 EDAC_S4ECD4ED : EDAC_SECDED;
2166 edac_mode = EDAC_NONE;
2168 for (j = 0; j < pvt->channel_count; j++) {
2169 dimm = csrow->channels[j]->dimm;
2170 dimm->mtype = mtype;
2171 dimm->edac_mode = edac_mode;
2172 dimm->nr_pages = nr_pages;
2179 /* get all cores on this DCT */
2180 static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, unsigned nid)
2184 for_each_online_cpu(cpu)
2185 if (amd_get_nb_id(cpu) == nid)
2186 cpumask_set_cpu(cpu, mask);
2189 /* check MCG_CTL on all the cpus on this node */
2190 static bool amd64_nb_mce_bank_enabled_on_node(unsigned nid)
2196 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
2197 amd64_warn("%s: Error allocating mask\n", __func__);
2201 get_cpus_on_this_dct_cpumask(mask, nid);
2203 rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
2205 for_each_cpu(cpu, mask) {
2206 struct msr *reg = per_cpu_ptr(msrs, cpu);
2207 nbe = reg->l & MSR_MCGCTL_NBE;
2209 edac_dbg(0, "core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
2211 (nbe ? "enabled" : "disabled"));
2219 free_cpumask_var(mask);
2223 static int toggle_ecc_err_reporting(struct ecc_settings *s, u8 nid, bool on)
2225 cpumask_var_t cmask;
2228 if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
2229 amd64_warn("%s: error allocating mask\n", __func__);
2233 get_cpus_on_this_dct_cpumask(cmask, nid);
2235 rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2237 for_each_cpu(cpu, cmask) {
2239 struct msr *reg = per_cpu_ptr(msrs, cpu);
2242 if (reg->l & MSR_MCGCTL_NBE)
2243 s->flags.nb_mce_enable = 1;
2245 reg->l |= MSR_MCGCTL_NBE;
2248 * Turn off NB MCE reporting only when it was off before
2250 if (!s->flags.nb_mce_enable)
2251 reg->l &= ~MSR_MCGCTL_NBE;
2254 wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2256 free_cpumask_var(cmask);
2261 static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid,
2265 u32 value, mask = 0x3; /* UECC/CECC enable */
2267 if (toggle_ecc_err_reporting(s, nid, ON)) {
2268 amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
2272 amd64_read_pci_cfg(F3, NBCTL, &value);
2274 s->old_nbctl = value & mask;
2275 s->nbctl_valid = true;
2278 amd64_write_pci_cfg(F3, NBCTL, value);
2280 amd64_read_pci_cfg(F3, NBCFG, &value);
2282 edac_dbg(0, "1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
2283 nid, value, !!(value & NBCFG_ECC_ENABLE));
2285 if (!(value & NBCFG_ECC_ENABLE)) {
2286 amd64_warn("DRAM ECC disabled on this node, enabling...\n");
2288 s->flags.nb_ecc_prev = 0;
2290 /* Attempt to turn on DRAM ECC Enable */
2291 value |= NBCFG_ECC_ENABLE;
2292 amd64_write_pci_cfg(F3, NBCFG, value);
2294 amd64_read_pci_cfg(F3, NBCFG, &value);
2296 if (!(value & NBCFG_ECC_ENABLE)) {
2297 amd64_warn("Hardware rejected DRAM ECC enable,"
2298 "check memory DIMM configuration.\n");
2301 amd64_info("Hardware accepted DRAM ECC Enable\n");
2304 s->flags.nb_ecc_prev = 1;
2307 edac_dbg(0, "2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
2308 nid, value, !!(value & NBCFG_ECC_ENABLE));
2313 static void restore_ecc_error_reporting(struct ecc_settings *s, u8 nid,
2316 u32 value, mask = 0x3; /* UECC/CECC enable */
2319 if (!s->nbctl_valid)
2322 amd64_read_pci_cfg(F3, NBCTL, &value);
2324 value |= s->old_nbctl;
2326 amd64_write_pci_cfg(F3, NBCTL, value);
2328 /* restore previous BIOS DRAM ECC "off" setting we force-enabled */
2329 if (!s->flags.nb_ecc_prev) {
2330 amd64_read_pci_cfg(F3, NBCFG, &value);
2331 value &= ~NBCFG_ECC_ENABLE;
2332 amd64_write_pci_cfg(F3, NBCFG, value);
2335 /* restore the NB Enable MCGCTL bit */
2336 if (toggle_ecc_err_reporting(s, nid, OFF))
2337 amd64_warn("Error restoring NB MCGCTL settings!\n");
2341 * EDAC requires that the BIOS have ECC enabled before
2342 * taking over the processing of ECC errors. A command line
2343 * option allows to force-enable hardware ECC later in
2344 * enable_ecc_error_reporting().
2346 static const char *ecc_msg =
2347 "ECC disabled in the BIOS or no ECC capability, module will not load.\n"
2348 " Either enable ECC checking or force module loading by setting "
2349 "'ecc_enable_override'.\n"
2350 " (Note that use of the override may cause unknown side effects.)\n";
2352 static bool ecc_enabled(struct pci_dev *F3, u8 nid)
2356 bool nb_mce_en = false;
2358 amd64_read_pci_cfg(F3, NBCFG, &value);
2360 ecc_en = !!(value & NBCFG_ECC_ENABLE);
2361 amd64_info("DRAM ECC %s.\n", (ecc_en ? "enabled" : "disabled"));
2363 nb_mce_en = amd64_nb_mce_bank_enabled_on_node(nid);
2365 amd64_notice("NB MCE bank disabled, set MSR "
2366 "0x%08x[4] on node %d to enable.\n",
2367 MSR_IA32_MCG_CTL, nid);
2369 if (!ecc_en || !nb_mce_en) {
2370 amd64_notice("%s", ecc_msg);
2376 static int set_mc_sysfs_attrs(struct mem_ctl_info *mci)
2380 rc = amd64_create_sysfs_dbg_files(mci);
2384 if (boot_cpu_data.x86 >= 0x10) {
2385 rc = amd64_create_sysfs_inject_files(mci);
2393 static void del_mc_sysfs_attrs(struct mem_ctl_info *mci)
2395 amd64_remove_sysfs_dbg_files(mci);
2397 if (boot_cpu_data.x86 >= 0x10)
2398 amd64_remove_sysfs_inject_files(mci);
2401 static void setup_mci_misc_attrs(struct mem_ctl_info *mci,
2402 struct amd64_family_type *fam)
2404 struct amd64_pvt *pvt = mci->pvt_info;
2406 mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
2407 mci->edac_ctl_cap = EDAC_FLAG_NONE;
2409 if (pvt->nbcap & NBCAP_SECDED)
2410 mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
2412 if (pvt->nbcap & NBCAP_CHIPKILL)
2413 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
2415 mci->edac_cap = amd64_determine_edac_cap(pvt);
2416 mci->mod_name = EDAC_MOD_STR;
2417 mci->mod_ver = EDAC_AMD64_VERSION;
2418 mci->ctl_name = fam->ctl_name;
2419 mci->dev_name = pci_name(pvt->F2);
2420 mci->ctl_page_to_phys = NULL;
2422 /* memory scrubber interface */
2423 mci->set_sdram_scrub_rate = amd64_set_scrub_rate;
2424 mci->get_sdram_scrub_rate = amd64_get_scrub_rate;
2428 * returns a pointer to the family descriptor on success, NULL otherwise.
2430 static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt)
2432 u8 fam = boot_cpu_data.x86;
2433 struct amd64_family_type *fam_type = NULL;
2437 fam_type = &amd64_family_types[K8_CPUS];
2438 pvt->ops = &amd64_family_types[K8_CPUS].ops;
2442 fam_type = &amd64_family_types[F10_CPUS];
2443 pvt->ops = &amd64_family_types[F10_CPUS].ops;
2447 fam_type = &amd64_family_types[F15_CPUS];
2448 pvt->ops = &amd64_family_types[F15_CPUS].ops;
2452 amd64_err("Unsupported family!\n");
2456 pvt->ext_model = boot_cpu_data.x86_model >> 4;
2458 amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name,
2460 (pvt->ext_model >= K8_REV_F ? "revF or later "
2461 : "revE or earlier ")
2462 : ""), pvt->mc_node_id);
2466 static int amd64_init_one_instance(struct pci_dev *F2)
2468 struct amd64_pvt *pvt = NULL;
2469 struct amd64_family_type *fam_type = NULL;
2470 struct mem_ctl_info *mci = NULL;
2471 struct edac_mc_layer layers[2];
2473 u8 nid = get_node_id(F2);
2476 pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
2480 pvt->mc_node_id = nid;
2484 fam_type = amd64_per_family_init(pvt);
2489 err = reserve_mc_sibling_devs(pvt, fam_type->f1_id, fam_type->f3_id);
2496 * We need to determine how many memory channels there are. Then use
2497 * that information for calculating the size of the dynamic instance
2498 * tables in the 'mci' structure.
2501 pvt->channel_count = pvt->ops->early_channel_count(pvt);
2502 if (pvt->channel_count < 0)
2506 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
2507 layers[0].size = pvt->csels[0].b_cnt;
2508 layers[0].is_virt_csrow = true;
2509 layers[1].type = EDAC_MC_LAYER_CHANNEL;
2510 layers[1].size = pvt->channel_count;
2511 layers[1].is_virt_csrow = false;
2512 mci = edac_mc_alloc(nid, ARRAY_SIZE(layers), layers, 0);
2516 mci->pvt_info = pvt;
2517 mci->pdev = &pvt->F2->dev;
2519 setup_mci_misc_attrs(mci, fam_type);
2521 if (init_csrows(mci))
2522 mci->edac_cap = EDAC_FLAG_NONE;
2525 if (edac_mc_add_mc(mci)) {
2526 edac_dbg(1, "failed edac_mc_add_mc()\n");
2529 if (set_mc_sysfs_attrs(mci)) {
2530 edac_dbg(1, "failed edac_mc_add_mc()\n");
2534 /* register stuff with EDAC MCE */
2535 if (report_gart_errors)
2536 amd_report_gart_errors(true);
2538 amd_register_ecc_decoder(amd64_decode_bus_error);
2542 atomic_inc(&drv_instances);
2547 edac_mc_del_mc(mci->pdev);
2552 free_mc_sibling_devs(pvt);
2561 static int __devinit amd64_probe_one_instance(struct pci_dev *pdev,
2562 const struct pci_device_id *mc_type)
2564 u8 nid = get_node_id(pdev);
2565 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
2566 struct ecc_settings *s;
2569 ret = pci_enable_device(pdev);
2571 edac_dbg(0, "ret=%d\n", ret);
2576 s = kzalloc(sizeof(struct ecc_settings), GFP_KERNEL);
2582 if (!ecc_enabled(F3, nid)) {
2585 if (!ecc_enable_override)
2588 amd64_warn("Forcing ECC on!\n");
2590 if (!enable_ecc_error_reporting(s, nid, F3))
2594 ret = amd64_init_one_instance(pdev);
2596 amd64_err("Error probing instance: %d\n", nid);
2597 restore_ecc_error_reporting(s, nid, F3);
2604 ecc_stngs[nid] = NULL;
2610 static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
2612 struct mem_ctl_info *mci;
2613 struct amd64_pvt *pvt;
2614 u8 nid = get_node_id(pdev);
2615 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
2616 struct ecc_settings *s = ecc_stngs[nid];
2618 mci = find_mci_by_dev(&pdev->dev);
2619 del_mc_sysfs_attrs(mci);
2620 /* Remove from EDAC CORE tracking list */
2621 mci = edac_mc_del_mc(&pdev->dev);
2625 pvt = mci->pvt_info;
2627 restore_ecc_error_reporting(s, nid, F3);
2629 free_mc_sibling_devs(pvt);
2631 /* unregister from EDAC MCE */
2632 amd_report_gart_errors(false);
2633 amd_unregister_ecc_decoder(amd64_decode_bus_error);
2635 kfree(ecc_stngs[nid]);
2636 ecc_stngs[nid] = NULL;
2638 /* Free the EDAC CORE resources */
2639 mci->pvt_info = NULL;
2647 * This table is part of the interface for loading drivers for PCI devices. The
2648 * PCI core identifies what devices are on a system during boot, and then
2649 * inquiry this table to see if this driver is for a given device found.
2651 static DEFINE_PCI_DEVICE_TABLE(amd64_pci_table) = {
2653 .vendor = PCI_VENDOR_ID_AMD,
2654 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
2655 .subvendor = PCI_ANY_ID,
2656 .subdevice = PCI_ANY_ID,
2661 .vendor = PCI_VENDOR_ID_AMD,
2662 .device = PCI_DEVICE_ID_AMD_10H_NB_DRAM,
2663 .subvendor = PCI_ANY_ID,
2664 .subdevice = PCI_ANY_ID,
2669 .vendor = PCI_VENDOR_ID_AMD,
2670 .device = PCI_DEVICE_ID_AMD_15H_NB_F2,
2671 .subvendor = PCI_ANY_ID,
2672 .subdevice = PCI_ANY_ID,
2679 MODULE_DEVICE_TABLE(pci, amd64_pci_table);
2681 static struct pci_driver amd64_pci_driver = {
2682 .name = EDAC_MOD_STR,
2683 .probe = amd64_probe_one_instance,
2684 .remove = __devexit_p(amd64_remove_one_instance),
2685 .id_table = amd64_pci_table,
2688 static void setup_pci_device(void)
2690 struct mem_ctl_info *mci;
2691 struct amd64_pvt *pvt;
2699 pvt = mci->pvt_info;
2701 edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR);
2703 if (!amd64_ctl_pci) {
2704 pr_warning("%s(): Unable to create PCI control\n",
2707 pr_warning("%s(): PCI error report via EDAC not set\n",
2713 static int __init amd64_edac_init(void)
2717 printk(KERN_INFO "AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION);
2721 if (amd_cache_northbridges() < 0)
2725 mcis = kzalloc(amd_nb_num() * sizeof(mcis[0]), GFP_KERNEL);
2726 ecc_stngs = kzalloc(amd_nb_num() * sizeof(ecc_stngs[0]), GFP_KERNEL);
2727 if (!(mcis && ecc_stngs))
2730 msrs = msrs_alloc();
2734 err = pci_register_driver(&amd64_pci_driver);
2739 if (!atomic_read(&drv_instances))
2740 goto err_no_instances;
2746 pci_unregister_driver(&amd64_pci_driver);
2763 static void __exit amd64_edac_exit(void)
2766 edac_pci_release_generic_ctl(amd64_ctl_pci);
2768 pci_unregister_driver(&amd64_pci_driver);
2780 module_init(amd64_edac_init);
2781 module_exit(amd64_edac_exit);
2783 MODULE_LICENSE("GPL");
2784 MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
2785 "Dave Peterson, Thayne Harbaugh");
2786 MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "
2787 EDAC_AMD64_VERSION);
2789 module_param(edac_op_state, int, 0444);
2790 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");