1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/ras.h>
3 #include "amd64_edac.h"
4 #include <asm/amd_nb.h>
5 
6 static struct edac_pci_ctl_info *pci_ctl;
7 
8 /*
9  * Set by command line parameter. If BIOS has enabled the ECC, this override is
10  * cleared to prevent re-enabling the hardware by this driver.
11  */
12 static int ecc_enable_override;
13 module_param(ecc_enable_override, int, 0644);
14 
15 static struct msr __percpu *msrs;
16 
get_umc_reg(struct amd64_pvt * pvt,u32 reg)17 static inline u32 get_umc_reg(struct amd64_pvt *pvt, u32 reg)
18 {
19 	if (!pvt->flags.zn_regs_v2)
20 		return reg;
21 
22 	switch (reg) {
23 	case UMCCH_ADDR_MASK_SEC:	return UMCCH_ADDR_MASK_SEC_DDR5;
24 	case UMCCH_DIMM_CFG:		return UMCCH_DIMM_CFG_DDR5;
25 	}
26 
27 	WARN_ONCE(1, "%s: unknown register 0x%x", __func__, reg);
28 	return 0;
29 }
30 
31 /* Per-node stuff */
32 static struct ecc_settings **ecc_stngs;
33 
34 /* Device for the PCI component */
35 static struct device *pci_ctl_dev;
36 
37 /*
38  * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
39  * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
40  * or higher value'.
41  *
42  *FIXME: Produce a better mapping/linearisation.
43  */
44 static const struct scrubrate {
45        u32 scrubval;           /* bit pattern for scrub rate */
46        u32 bandwidth;          /* bandwidth consumed (bytes/sec) */
47 } scrubrates[] = {
48 	{ 0x01, 1600000000UL},
49 	{ 0x02, 800000000UL},
50 	{ 0x03, 400000000UL},
51 	{ 0x04, 200000000UL},
52 	{ 0x05, 100000000UL},
53 	{ 0x06, 50000000UL},
54 	{ 0x07, 25000000UL},
55 	{ 0x08, 12284069UL},
56 	{ 0x09, 6274509UL},
57 	{ 0x0A, 3121951UL},
58 	{ 0x0B, 1560975UL},
59 	{ 0x0C, 781440UL},
60 	{ 0x0D, 390720UL},
61 	{ 0x0E, 195300UL},
62 	{ 0x0F, 97650UL},
63 	{ 0x10, 48854UL},
64 	{ 0x11, 24427UL},
65 	{ 0x12, 12213UL},
66 	{ 0x13, 6101UL},
67 	{ 0x14, 3051UL},
68 	{ 0x15, 1523UL},
69 	{ 0x16, 761UL},
70 	{ 0x00, 0UL},        /* scrubbing off */
71 };
72 
__amd64_read_pci_cfg_dword(struct pci_dev * pdev,int offset,u32 * val,const char * func)73 int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
74 			       u32 *val, const char *func)
75 {
76 	int err = 0;
77 
78 	err = pci_read_config_dword(pdev, offset, val);
79 	if (err)
80 		amd64_warn("%s: error reading F%dx%03x.\n",
81 			   func, PCI_FUNC(pdev->devfn), offset);
82 
83 	return pcibios_err_to_errno(err);
84 }
85 
__amd64_write_pci_cfg_dword(struct pci_dev * pdev,int offset,u32 val,const char * func)86 int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
87 				u32 val, const char *func)
88 {
89 	int err = 0;
90 
91 	err = pci_write_config_dword(pdev, offset, val);
92 	if (err)
93 		amd64_warn("%s: error writing to F%dx%03x.\n",
94 			   func, PCI_FUNC(pdev->devfn), offset);
95 
96 	return pcibios_err_to_errno(err);
97 }
98 
99 /*
100  * Select DCT to which PCI cfg accesses are routed
101  */
f15h_select_dct(struct amd64_pvt * pvt,u8 dct)102 static void f15h_select_dct(struct amd64_pvt *pvt, u8 dct)
103 {
104 	u32 reg = 0;
105 
106 	amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, &reg);
107 	reg &= (pvt->model == 0x30) ? ~3 : ~1;
108 	reg |= dct;
109 	amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
110 }
111 
112 /*
113  *
114  * Depending on the family, F2 DCT reads need special handling:
115  *
116  * K8: has a single DCT only and no address offsets >= 0x100
117  *
118  * F10h: each DCT has its own set of regs
119  *	DCT0 -> F2x040..
120  *	DCT1 -> F2x140..
121  *
122  * F16h: has only 1 DCT
123  *
124  * F15h: we select which DCT we access using F1x10C[DctCfgSel]
125  */
amd64_read_dct_pci_cfg(struct amd64_pvt * pvt,u8 dct,int offset,u32 * val)126 static inline int amd64_read_dct_pci_cfg(struct amd64_pvt *pvt, u8 dct,
127 					 int offset, u32 *val)
128 {
129 	switch (pvt->fam) {
130 	case 0xf:
131 		if (dct || offset >= 0x100)
132 			return -EINVAL;
133 		break;
134 
135 	case 0x10:
136 		if (dct) {
137 			/*
138 			 * Note: If ganging is enabled, barring the regs
139 			 * F2x[1,0]98 and F2x[1,0]9C; reads reads to F2x1xx
140 			 * return 0. (cf. Section 2.8.1 F10h BKDG)
141 			 */
142 			if (dct_ganging_enabled(pvt))
143 				return 0;
144 
145 			offset += 0x100;
146 		}
147 		break;
148 
149 	case 0x15:
150 		/*
151 		 * F15h: F2x1xx addresses do not map explicitly to DCT1.
152 		 * We should select which DCT we access using F1x10C[DctCfgSel]
153 		 */
154 		dct = (dct && pvt->model == 0x30) ? 3 : dct;
155 		f15h_select_dct(pvt, dct);
156 		break;
157 
158 	case 0x16:
159 		if (dct)
160 			return -EINVAL;
161 		break;
162 
163 	default:
164 		break;
165 	}
166 	return amd64_read_pci_cfg(pvt->F2, offset, val);
167 }
168 
169 /*
170  * Memory scrubber control interface. For K8, memory scrubbing is handled by
171  * hardware and can involve L2 cache, dcache as well as the main memory. With
172  * F10, this is extended to L3 cache scrubbing on CPU models sporting that
173  * functionality.
174  *
175  * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
176  * (dram) over to cache lines. This is nasty, so we will use bandwidth in
177  * bytes/sec for the setting.
178  *
179  * Currently, we only do dram scrubbing. If the scrubbing is done in software on
180  * other archs, we might not have access to the caches directly.
181  */
182 
183 /*
184  * Scan the scrub rate mapping table for a close or matching bandwidth value to
185  * issue. If requested is too big, then use last maximum value found.
186  */
__set_scrub_rate(struct amd64_pvt * pvt,u32 new_bw,u32 min_rate)187 static int __set_scrub_rate(struct amd64_pvt *pvt, u32 new_bw, u32 min_rate)
188 {
189 	u32 scrubval;
190 	int i;
191 
192 	/*
193 	 * map the configured rate (new_bw) to a value specific to the AMD64
194 	 * memory controller and apply to register. Search for the first
195 	 * bandwidth entry that is greater or equal than the setting requested
196 	 * and program that. If at last entry, turn off DRAM scrubbing.
197 	 *
198 	 * If no suitable bandwidth is found, turn off DRAM scrubbing entirely
199 	 * by falling back to the last element in scrubrates[].
200 	 */
201 	for (i = 0; i < ARRAY_SIZE(scrubrates) - 1; i++) {
202 		/*
203 		 * skip scrub rates which aren't recommended
204 		 * (see F10 BKDG, F3x58)
205 		 */
206 		if (scrubrates[i].scrubval < min_rate)
207 			continue;
208 
209 		if (scrubrates[i].bandwidth <= new_bw)
210 			break;
211 	}
212 
213 	scrubval = scrubrates[i].scrubval;
214 
215 	if (pvt->fam == 0x15 && pvt->model == 0x60) {
216 		f15h_select_dct(pvt, 0);
217 		pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
218 		f15h_select_dct(pvt, 1);
219 		pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
220 	} else {
221 		pci_write_bits32(pvt->F3, SCRCTRL, scrubval, 0x001F);
222 	}
223 
224 	if (scrubval)
225 		return scrubrates[i].bandwidth;
226 
227 	return 0;
228 }
229 
set_scrub_rate(struct mem_ctl_info * mci,u32 bw)230 static int set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
231 {
232 	struct amd64_pvt *pvt = mci->pvt_info;
233 	u32 min_scrubrate = 0x5;
234 
235 	if (pvt->fam == 0xf)
236 		min_scrubrate = 0x0;
237 
238 	if (pvt->fam == 0x15) {
239 		/* Erratum #505 */
240 		if (pvt->model < 0x10)
241 			f15h_select_dct(pvt, 0);
242 
243 		if (pvt->model == 0x60)
244 			min_scrubrate = 0x6;
245 	}
246 	return __set_scrub_rate(pvt, bw, min_scrubrate);
247 }
248 
get_scrub_rate(struct mem_ctl_info * mci)249 static int get_scrub_rate(struct mem_ctl_info *mci)
250 {
251 	struct amd64_pvt *pvt = mci->pvt_info;
252 	int i, retval = -EINVAL;
253 	u32 scrubval = 0;
254 
255 	if (pvt->fam == 0x15) {
256 		/* Erratum #505 */
257 		if (pvt->model < 0x10)
258 			f15h_select_dct(pvt, 0);
259 
260 		if (pvt->model == 0x60)
261 			amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval);
262 		else
263 			amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
264 	} else {
265 		amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
266 	}
267 
268 	scrubval = scrubval & 0x001F;
269 
270 	for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
271 		if (scrubrates[i].scrubval == scrubval) {
272 			retval = scrubrates[i].bandwidth;
273 			break;
274 		}
275 	}
276 	return retval;
277 }
278 
279 /*
280  * returns true if the SysAddr given by sys_addr matches the
281  * DRAM base/limit associated with node_id
282  */
base_limit_match(struct amd64_pvt * pvt,u64 sys_addr,u8 nid)283 static bool base_limit_match(struct amd64_pvt *pvt, u64 sys_addr, u8 nid)
284 {
285 	u64 addr;
286 
287 	/* The K8 treats this as a 40-bit value.  However, bits 63-40 will be
288 	 * all ones if the most significant implemented address bit is 1.
289 	 * Here we discard bits 63-40.  See section 3.4.2 of AMD publication
290 	 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
291 	 * Application Programming.
292 	 */
293 	addr = sys_addr & 0x000000ffffffffffull;
294 
295 	return ((addr >= get_dram_base(pvt, nid)) &&
296 		(addr <= get_dram_limit(pvt, nid)));
297 }
298 
299 /*
300  * Attempt to map a SysAddr to a node. On success, return a pointer to the
301  * mem_ctl_info structure for the node that the SysAddr maps to.
302  *
303  * On failure, return NULL.
304  */
find_mc_by_sys_addr(struct mem_ctl_info * mci,u64 sys_addr)305 static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
306 						u64 sys_addr)
307 {
308 	struct amd64_pvt *pvt;
309 	u8 node_id;
310 	u32 intlv_en, bits;
311 
312 	/*
313 	 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
314 	 * 3.4.4.2) registers to map the SysAddr to a node ID.
315 	 */
316 	pvt = mci->pvt_info;
317 
318 	/*
319 	 * The value of this field should be the same for all DRAM Base
320 	 * registers.  Therefore we arbitrarily choose to read it from the
321 	 * register for node 0.
322 	 */
323 	intlv_en = dram_intlv_en(pvt, 0);
324 
325 	if (intlv_en == 0) {
326 		for (node_id = 0; node_id < DRAM_RANGES; node_id++) {
327 			if (base_limit_match(pvt, sys_addr, node_id))
328 				goto found;
329 		}
330 		goto err_no_match;
331 	}
332 
333 	if (unlikely((intlv_en != 0x01) &&
334 		     (intlv_en != 0x03) &&
335 		     (intlv_en != 0x07))) {
336 		amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en);
337 		return NULL;
338 	}
339 
340 	bits = (((u32) sys_addr) >> 12) & intlv_en;
341 
342 	for (node_id = 0; ; ) {
343 		if ((dram_intlv_sel(pvt, node_id) & intlv_en) == bits)
344 			break;	/* intlv_sel field matches */
345 
346 		if (++node_id >= DRAM_RANGES)
347 			goto err_no_match;
348 	}
349 
350 	/* sanity test for sys_addr */
351 	if (unlikely(!base_limit_match(pvt, sys_addr, node_id))) {
352 		amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address"
353 			   "range for node %d with node interleaving enabled.\n",
354 			   __func__, sys_addr, node_id);
355 		return NULL;
356 	}
357 
358 found:
359 	return edac_mc_find((int)node_id);
360 
361 err_no_match:
362 	edac_dbg(2, "sys_addr 0x%lx doesn't match any node\n",
363 		 (unsigned long)sys_addr);
364 
365 	return NULL;
366 }
367 
368 /*
369  * compute the CS base address of the @csrow on the DRAM controller @dct.
370  * For details see F2x[5C:40] in the processor's BKDG
371  */
get_cs_base_and_mask(struct amd64_pvt * pvt,int csrow,u8 dct,u64 * base,u64 * mask)372 static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
373 				 u64 *base, u64 *mask)
374 {
375 	u64 csbase, csmask, base_bits, mask_bits;
376 	u8 addr_shift;
377 
378 	if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
379 		csbase		= pvt->csels[dct].csbases[csrow];
380 		csmask		= pvt->csels[dct].csmasks[csrow];
381 		base_bits	= GENMASK_ULL(31, 21) | GENMASK_ULL(15, 9);
382 		mask_bits	= GENMASK_ULL(29, 21) | GENMASK_ULL(15, 9);
383 		addr_shift	= 4;
384 
385 	/*
386 	 * F16h and F15h, models 30h and later need two addr_shift values:
387 	 * 8 for high and 6 for low (cf. F16h BKDG).
388 	 */
389 	} else if (pvt->fam == 0x16 ||
390 		  (pvt->fam == 0x15 && pvt->model >= 0x30)) {
391 		csbase          = pvt->csels[dct].csbases[csrow];
392 		csmask          = pvt->csels[dct].csmasks[csrow >> 1];
393 
394 		*base  = (csbase & GENMASK_ULL(15,  5)) << 6;
395 		*base |= (csbase & GENMASK_ULL(30, 19)) << 8;
396 
397 		*mask = ~0ULL;
398 		/* poke holes for the csmask */
399 		*mask &= ~((GENMASK_ULL(15, 5)  << 6) |
400 			   (GENMASK_ULL(30, 19) << 8));
401 
402 		*mask |= (csmask & GENMASK_ULL(15, 5))  << 6;
403 		*mask |= (csmask & GENMASK_ULL(30, 19)) << 8;
404 
405 		return;
406 	} else {
407 		csbase		= pvt->csels[dct].csbases[csrow];
408 		csmask		= pvt->csels[dct].csmasks[csrow >> 1];
409 		addr_shift	= 8;
410 
411 		if (pvt->fam == 0x15)
412 			base_bits = mask_bits =
413 				GENMASK_ULL(30,19) | GENMASK_ULL(13,5);
414 		else
415 			base_bits = mask_bits =
416 				GENMASK_ULL(28,19) | GENMASK_ULL(13,5);
417 	}
418 
419 	*base  = (csbase & base_bits) << addr_shift;
420 
421 	*mask  = ~0ULL;
422 	/* poke holes for the csmask */
423 	*mask &= ~(mask_bits << addr_shift);
424 	/* OR them in */
425 	*mask |= (csmask & mask_bits) << addr_shift;
426 }
427 
428 #define for_each_chip_select(i, dct, pvt) \
429 	for (i = 0; i < pvt->csels[dct].b_cnt; i++)
430 
431 #define chip_select_base(i, dct, pvt) \
432 	pvt->csels[dct].csbases[i]
433 
434 #define for_each_chip_select_mask(i, dct, pvt) \
435 	for (i = 0; i < pvt->csels[dct].m_cnt; i++)
436 
437 #define for_each_umc(i) \
438 	for (i = 0; i < pvt->max_mcs; i++)
439 
440 /*
441  * @input_addr is an InputAddr associated with the node given by mci. Return the
442  * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
443  */
input_addr_to_csrow(struct mem_ctl_info * mci,u64 input_addr)444 static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
445 {
446 	struct amd64_pvt *pvt;
447 	int csrow;
448 	u64 base, mask;
449 
450 	pvt = mci->pvt_info;
451 
452 	for_each_chip_select(csrow, 0, pvt) {
453 		if (!csrow_enabled(csrow, 0, pvt))
454 			continue;
455 
456 		get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
457 
458 		mask = ~mask;
459 
460 		if ((input_addr & mask) == (base & mask)) {
461 			edac_dbg(2, "InputAddr 0x%lx matches csrow %d (node %d)\n",
462 				 (unsigned long)input_addr, csrow,
463 				 pvt->mc_node_id);
464 
465 			return csrow;
466 		}
467 	}
468 	edac_dbg(2, "no matching csrow for InputAddr 0x%lx (MC node %d)\n",
469 		 (unsigned long)input_addr, pvt->mc_node_id);
470 
471 	return -1;
472 }
473 
474 /*
475  * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
476  * for the node represented by mci. Info is passed back in *hole_base,
477  * *hole_offset, and *hole_size.  Function returns 0 if info is valid or 1 if
478  * info is invalid. Info may be invalid for either of the following reasons:
479  *
480  * - The revision of the node is not E or greater.  In this case, the DRAM Hole
481  *   Address Register does not exist.
482  *
483  * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
484  *   indicating that its contents are not valid.
485  *
486  * The values passed back in *hole_base, *hole_offset, and *hole_size are
487  * complete 32-bit values despite the fact that the bitfields in the DHAR
488  * only represent bits 31-24 of the base and offset values.
489  */
get_dram_hole_info(struct mem_ctl_info * mci,u64 * hole_base,u64 * hole_offset,u64 * hole_size)490 static int get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
491 			      u64 *hole_offset, u64 *hole_size)
492 {
493 	struct amd64_pvt *pvt = mci->pvt_info;
494 
495 	/* only revE and later have the DRAM Hole Address Register */
496 	if (pvt->fam == 0xf && pvt->ext_model < K8_REV_E) {
497 		edac_dbg(1, "  revision %d for node %d does not support DHAR\n",
498 			 pvt->ext_model, pvt->mc_node_id);
499 		return 1;
500 	}
501 
502 	/* valid for Fam10h and above */
503 	if (pvt->fam >= 0x10 && !dhar_mem_hoist_valid(pvt)) {
504 		edac_dbg(1, "  Dram Memory Hoisting is DISABLED on this system\n");
505 		return 1;
506 	}
507 
508 	if (!dhar_valid(pvt)) {
509 		edac_dbg(1, "  Dram Memory Hoisting is DISABLED on this node %d\n",
510 			 pvt->mc_node_id);
511 		return 1;
512 	}
513 
514 	/* This node has Memory Hoisting */
515 
516 	/* +------------------+--------------------+--------------------+-----
517 	 * | memory           | DRAM hole          | relocated          |
518 	 * | [0, (x - 1)]     | [x, 0xffffffff]    | addresses from     |
519 	 * |                  |                    | DRAM hole          |
520 	 * |                  |                    | [0x100000000,      |
521 	 * |                  |                    |  (0x100000000+     |
522 	 * |                  |                    |   (0xffffffff-x))] |
523 	 * +------------------+--------------------+--------------------+-----
524 	 *
525 	 * Above is a diagram of physical memory showing the DRAM hole and the
526 	 * relocated addresses from the DRAM hole.  As shown, the DRAM hole
527 	 * starts at address x (the base address) and extends through address
528 	 * 0xffffffff.  The DRAM Hole Address Register (DHAR) relocates the
529 	 * addresses in the hole so that they start at 0x100000000.
530 	 */
531 
532 	*hole_base = dhar_base(pvt);
533 	*hole_size = (1ULL << 32) - *hole_base;
534 
535 	*hole_offset = (pvt->fam > 0xf) ? f10_dhar_offset(pvt)
536 					: k8_dhar_offset(pvt);
537 
538 	edac_dbg(1, "  DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
539 		 pvt->mc_node_id, (unsigned long)*hole_base,
540 		 (unsigned long)*hole_offset, (unsigned long)*hole_size);
541 
542 	return 0;
543 }
544 
545 #ifdef CONFIG_EDAC_DEBUG
546 #define EDAC_DCT_ATTR_SHOW(reg)						\
547 static ssize_t reg##_show(struct device *dev,				\
548 			 struct device_attribute *mattr, char *data)	\
549 {									\
550 	struct mem_ctl_info *mci = to_mci(dev);				\
551 	struct amd64_pvt *pvt = mci->pvt_info;				\
552 									\
553 	return sprintf(data, "0x%016llx\n", (u64)pvt->reg);		\
554 }
555 
556 EDAC_DCT_ATTR_SHOW(dhar);
557 EDAC_DCT_ATTR_SHOW(dbam0);
558 EDAC_DCT_ATTR_SHOW(top_mem);
559 EDAC_DCT_ATTR_SHOW(top_mem2);
560 
dram_hole_show(struct device * dev,struct device_attribute * mattr,char * data)561 static ssize_t dram_hole_show(struct device *dev, struct device_attribute *mattr,
562 			      char *data)
563 {
564 	struct mem_ctl_info *mci = to_mci(dev);
565 
566 	u64 hole_base = 0;
567 	u64 hole_offset = 0;
568 	u64 hole_size = 0;
569 
570 	get_dram_hole_info(mci, &hole_base, &hole_offset, &hole_size);
571 
572 	return sprintf(data, "%llx %llx %llx\n", hole_base, hole_offset,
573 						 hole_size);
574 }
575 
576 /*
577  * update NUM_DBG_ATTRS in case you add new members
578  */
579 static DEVICE_ATTR(dhar, S_IRUGO, dhar_show, NULL);
580 static DEVICE_ATTR(dbam, S_IRUGO, dbam0_show, NULL);
581 static DEVICE_ATTR(topmem, S_IRUGO, top_mem_show, NULL);
582 static DEVICE_ATTR(topmem2, S_IRUGO, top_mem2_show, NULL);
583 static DEVICE_ATTR_RO(dram_hole);
584 
585 static struct attribute *dbg_attrs[] = {
586 	&dev_attr_dhar.attr,
587 	&dev_attr_dbam.attr,
588 	&dev_attr_topmem.attr,
589 	&dev_attr_topmem2.attr,
590 	&dev_attr_dram_hole.attr,
591 	NULL
592 };
593 
594 static const struct attribute_group dbg_group = {
595 	.attrs = dbg_attrs,
596 };
597 
inject_section_show(struct device * dev,struct device_attribute * mattr,char * buf)598 static ssize_t inject_section_show(struct device *dev,
599 				   struct device_attribute *mattr, char *buf)
600 {
601 	struct mem_ctl_info *mci = to_mci(dev);
602 	struct amd64_pvt *pvt = mci->pvt_info;
603 	return sprintf(buf, "0x%x\n", pvt->injection.section);
604 }
605 
606 /*
607  * store error injection section value which refers to one of 4 16-byte sections
608  * within a 64-byte cacheline
609  *
610  * range: 0..3
611  */
inject_section_store(struct device * dev,struct device_attribute * mattr,const char * data,size_t count)612 static ssize_t inject_section_store(struct device *dev,
613 				    struct device_attribute *mattr,
614 				    const char *data, size_t count)
615 {
616 	struct mem_ctl_info *mci = to_mci(dev);
617 	struct amd64_pvt *pvt = mci->pvt_info;
618 	unsigned long value;
619 	int ret;
620 
621 	ret = kstrtoul(data, 10, &value);
622 	if (ret < 0)
623 		return ret;
624 
625 	if (value > 3) {
626 		amd64_warn("%s: invalid section 0x%lx\n", __func__, value);
627 		return -EINVAL;
628 	}
629 
630 	pvt->injection.section = (u32) value;
631 	return count;
632 }
633 
inject_word_show(struct device * dev,struct device_attribute * mattr,char * buf)634 static ssize_t inject_word_show(struct device *dev,
635 				struct device_attribute *mattr, char *buf)
636 {
637 	struct mem_ctl_info *mci = to_mci(dev);
638 	struct amd64_pvt *pvt = mci->pvt_info;
639 	return sprintf(buf, "0x%x\n", pvt->injection.word);
640 }
641 
642 /*
643  * store error injection word value which refers to one of 9 16-bit word of the
644  * 16-byte (128-bit + ECC bits) section
645  *
646  * range: 0..8
647  */
inject_word_store(struct device * dev,struct device_attribute * mattr,const char * data,size_t count)648 static ssize_t inject_word_store(struct device *dev,
649 				 struct device_attribute *mattr,
650 				 const char *data, size_t count)
651 {
652 	struct mem_ctl_info *mci = to_mci(dev);
653 	struct amd64_pvt *pvt = mci->pvt_info;
654 	unsigned long value;
655 	int ret;
656 
657 	ret = kstrtoul(data, 10, &value);
658 	if (ret < 0)
659 		return ret;
660 
661 	if (value > 8) {
662 		amd64_warn("%s: invalid word 0x%lx\n", __func__, value);
663 		return -EINVAL;
664 	}
665 
666 	pvt->injection.word = (u32) value;
667 	return count;
668 }
669 
inject_ecc_vector_show(struct device * dev,struct device_attribute * mattr,char * buf)670 static ssize_t inject_ecc_vector_show(struct device *dev,
671 				      struct device_attribute *mattr,
672 				      char *buf)
673 {
674 	struct mem_ctl_info *mci = to_mci(dev);
675 	struct amd64_pvt *pvt = mci->pvt_info;
676 	return sprintf(buf, "0x%x\n", pvt->injection.bit_map);
677 }
678 
679 /*
680  * store 16 bit error injection vector which enables injecting errors to the
681  * corresponding bit within the error injection word above. When used during a
682  * DRAM ECC read, it holds the contents of the of the DRAM ECC bits.
683  */
inject_ecc_vector_store(struct device * dev,struct device_attribute * mattr,const char * data,size_t count)684 static ssize_t inject_ecc_vector_store(struct device *dev,
685 				       struct device_attribute *mattr,
686 				       const char *data, size_t count)
687 {
688 	struct mem_ctl_info *mci = to_mci(dev);
689 	struct amd64_pvt *pvt = mci->pvt_info;
690 	unsigned long value;
691 	int ret;
692 
693 	ret = kstrtoul(data, 16, &value);
694 	if (ret < 0)
695 		return ret;
696 
697 	if (value & 0xFFFF0000) {
698 		amd64_warn("%s: invalid EccVector: 0x%lx\n", __func__, value);
699 		return -EINVAL;
700 	}
701 
702 	pvt->injection.bit_map = (u32) value;
703 	return count;
704 }
705 
706 /*
707  * Do a DRAM ECC read. Assemble staged values in the pvt area, format into
708  * fields needed by the injection registers and read the NB Array Data Port.
709  */
inject_read_store(struct device * dev,struct device_attribute * mattr,const char * data,size_t count)710 static ssize_t inject_read_store(struct device *dev,
711 				 struct device_attribute *mattr,
712 				 const char *data, size_t count)
713 {
714 	struct mem_ctl_info *mci = to_mci(dev);
715 	struct amd64_pvt *pvt = mci->pvt_info;
716 	unsigned long value;
717 	u32 section, word_bits;
718 	int ret;
719 
720 	ret = kstrtoul(data, 10, &value);
721 	if (ret < 0)
722 		return ret;
723 
724 	/* Form value to choose 16-byte section of cacheline */
725 	section = F10_NB_ARRAY_DRAM | SET_NB_ARRAY_ADDR(pvt->injection.section);
726 
727 	amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_ADDR, section);
728 
729 	word_bits = SET_NB_DRAM_INJECTION_READ(pvt->injection);
730 
731 	/* Issue 'word' and 'bit' along with the READ request */
732 	amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
733 
734 	edac_dbg(0, "section=0x%x word_bits=0x%x\n", section, word_bits);
735 
736 	return count;
737 }
738 
739 /*
740  * Do a DRAM ECC write. Assemble staged values in the pvt area and format into
741  * fields needed by the injection registers.
742  */
inject_write_store(struct device * dev,struct device_attribute * mattr,const char * data,size_t count)743 static ssize_t inject_write_store(struct device *dev,
744 				  struct device_attribute *mattr,
745 				  const char *data, size_t count)
746 {
747 	struct mem_ctl_info *mci = to_mci(dev);
748 	struct amd64_pvt *pvt = mci->pvt_info;
749 	u32 section, word_bits, tmp;
750 	unsigned long value;
751 	int ret;
752 
753 	ret = kstrtoul(data, 10, &value);
754 	if (ret < 0)
755 		return ret;
756 
757 	/* Form value to choose 16-byte section of cacheline */
758 	section = F10_NB_ARRAY_DRAM | SET_NB_ARRAY_ADDR(pvt->injection.section);
759 
760 	amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_ADDR, section);
761 
762 	word_bits = SET_NB_DRAM_INJECTION_WRITE(pvt->injection);
763 
764 	pr_notice_once("Don't forget to decrease MCE polling interval in\n"
765 			"/sys/bus/machinecheck/devices/machinecheck<CPUNUM>/check_interval\n"
766 			"so that you can get the error report faster.\n");
767 
768 	on_each_cpu(disable_caches, NULL, 1);
769 
770 	/* Issue 'word' and 'bit' along with the READ request */
771 	amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
772 
773  retry:
774 	/* wait until injection happens */
775 	amd64_read_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, &tmp);
776 	if (tmp & F10_NB_ARR_ECC_WR_REQ) {
777 		cpu_relax();
778 		goto retry;
779 	}
780 
781 	on_each_cpu(enable_caches, NULL, 1);
782 
783 	edac_dbg(0, "section=0x%x word_bits=0x%x\n", section, word_bits);
784 
785 	return count;
786 }
787 
788 /*
789  * update NUM_INJ_ATTRS in case you add new members
790  */
791 
792 static DEVICE_ATTR_RW(inject_section);
793 static DEVICE_ATTR_RW(inject_word);
794 static DEVICE_ATTR_RW(inject_ecc_vector);
795 static DEVICE_ATTR_WO(inject_write);
796 static DEVICE_ATTR_WO(inject_read);
797 
798 static struct attribute *inj_attrs[] = {
799 	&dev_attr_inject_section.attr,
800 	&dev_attr_inject_word.attr,
801 	&dev_attr_inject_ecc_vector.attr,
802 	&dev_attr_inject_write.attr,
803 	&dev_attr_inject_read.attr,
804 	NULL
805 };
806 
inj_is_visible(struct kobject * kobj,struct attribute * attr,int idx)807 static umode_t inj_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
808 {
809 	struct device *dev = kobj_to_dev(kobj);
810 	struct mem_ctl_info *mci = container_of(dev, struct mem_ctl_info, dev);
811 	struct amd64_pvt *pvt = mci->pvt_info;
812 
813 	/* Families which have that injection hw */
814 	if (pvt->fam >= 0x10 && pvt->fam <= 0x16)
815 		return attr->mode;
816 
817 	return 0;
818 }
819 
820 static const struct attribute_group inj_group = {
821 	.attrs = inj_attrs,
822 	.is_visible = inj_is_visible,
823 };
824 #endif /* CONFIG_EDAC_DEBUG */
825 
826 /*
827  * Return the DramAddr that the SysAddr given by @sys_addr maps to.  It is
828  * assumed that sys_addr maps to the node given by mci.
829  *
830  * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
831  * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
832  * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
833  * then it is also involved in translating a SysAddr to a DramAddr. Sections
834  * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
835  * These parts of the documentation are unclear. I interpret them as follows:
836  *
837  * When node n receives a SysAddr, it processes the SysAddr as follows:
838  *
839  * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
840  *    Limit registers for node n. If the SysAddr is not within the range
841  *    specified by the base and limit values, then node n ignores the Sysaddr
842  *    (since it does not map to node n). Otherwise continue to step 2 below.
843  *
844  * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
845  *    disabled so skip to step 3 below. Otherwise see if the SysAddr is within
846  *    the range of relocated addresses (starting at 0x100000000) from the DRAM
847  *    hole. If not, skip to step 3 below. Else get the value of the
848  *    DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
849  *    offset defined by this value from the SysAddr.
850  *
851  * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
852  *    Base register for node n. To obtain the DramAddr, subtract the base
853  *    address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
854  */
sys_addr_to_dram_addr(struct mem_ctl_info * mci,u64 sys_addr)855 static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
856 {
857 	struct amd64_pvt *pvt = mci->pvt_info;
858 	u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
859 	int ret;
860 
861 	dram_base = get_dram_base(pvt, pvt->mc_node_id);
862 
863 	ret = get_dram_hole_info(mci, &hole_base, &hole_offset, &hole_size);
864 	if (!ret) {
865 		if ((sys_addr >= (1ULL << 32)) &&
866 		    (sys_addr < ((1ULL << 32) + hole_size))) {
867 			/* use DHAR to translate SysAddr to DramAddr */
868 			dram_addr = sys_addr - hole_offset;
869 
870 			edac_dbg(2, "using DHAR to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
871 				 (unsigned long)sys_addr,
872 				 (unsigned long)dram_addr);
873 
874 			return dram_addr;
875 		}
876 	}
877 
878 	/*
879 	 * Translate the SysAddr to a DramAddr as shown near the start of
880 	 * section 3.4.4 (p. 70).  Although sys_addr is a 64-bit value, the k8
881 	 * only deals with 40-bit values.  Therefore we discard bits 63-40 of
882 	 * sys_addr below.  If bit 39 of sys_addr is 1 then the bits we
883 	 * discard are all 1s.  Otherwise the bits we discard are all 0s.  See
884 	 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
885 	 * Programmer's Manual Volume 1 Application Programming.
886 	 */
887 	dram_addr = (sys_addr & GENMASK_ULL(39, 0)) - dram_base;
888 
889 	edac_dbg(2, "using DRAM Base register to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
890 		 (unsigned long)sys_addr, (unsigned long)dram_addr);
891 	return dram_addr;
892 }
893 
894 /*
895  * @intlv_en is the value of the IntlvEn field from a DRAM Base register
896  * (section 3.4.4.1).  Return the number of bits from a SysAddr that are used
897  * for node interleaving.
898  */
num_node_interleave_bits(unsigned intlv_en)899 static int num_node_interleave_bits(unsigned intlv_en)
900 {
901 	static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
902 	int n;
903 
904 	BUG_ON(intlv_en > 7);
905 	n = intlv_shift_table[intlv_en];
906 	return n;
907 }
908 
909 /* Translate the DramAddr given by @dram_addr to an InputAddr. */
dram_addr_to_input_addr(struct mem_ctl_info * mci,u64 dram_addr)910 static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
911 {
912 	struct amd64_pvt *pvt;
913 	int intlv_shift;
914 	u64 input_addr;
915 
916 	pvt = mci->pvt_info;
917 
918 	/*
919 	 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
920 	 * concerning translating a DramAddr to an InputAddr.
921 	 */
922 	intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
923 	input_addr = ((dram_addr >> intlv_shift) & GENMASK_ULL(35, 12)) +
924 		      (dram_addr & 0xfff);
925 
926 	edac_dbg(2, "  Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
927 		 intlv_shift, (unsigned long)dram_addr,
928 		 (unsigned long)input_addr);
929 
930 	return input_addr;
931 }
932 
933 /*
934  * Translate the SysAddr represented by @sys_addr to an InputAddr.  It is
935  * assumed that @sys_addr maps to the node given by mci.
936  */
sys_addr_to_input_addr(struct mem_ctl_info * mci,u64 sys_addr)937 static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
938 {
939 	u64 input_addr;
940 
941 	input_addr =
942 	    dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
943 
944 	edac_dbg(2, "SysAddr 0x%lx translates to InputAddr 0x%lx\n",
945 		 (unsigned long)sys_addr, (unsigned long)input_addr);
946 
947 	return input_addr;
948 }
949 
950 /* Map the Error address to a PAGE and PAGE OFFSET. */
error_address_to_page_and_offset(u64 error_address,struct err_info * err)951 static inline void error_address_to_page_and_offset(u64 error_address,
952 						    struct err_info *err)
953 {
954 	err->page = (u32) (error_address >> PAGE_SHIFT);
955 	err->offset = ((u32) error_address) & ~PAGE_MASK;
956 }
957 
958 /*
959  * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
960  * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
961  * of a node that detected an ECC memory error.  mci represents the node that
962  * the error address maps to (possibly different from the node that detected
963  * the error).  Return the number of the csrow that sys_addr maps to, or -1 on
964  * error.
965  */
sys_addr_to_csrow(struct mem_ctl_info * mci,u64 sys_addr)966 static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
967 {
968 	int csrow;
969 
970 	csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
971 
972 	if (csrow == -1)
973 		amd64_mc_err(mci, "Failed to translate InputAddr to csrow for "
974 				  "address 0x%lx\n", (unsigned long)sys_addr);
975 	return csrow;
976 }
977 
978 /*
979  * See AMD PPR DF::LclNodeTypeMap
980  *
981  * This register gives information for nodes of the same type within a system.
982  *
983  * Reading this register from a GPU node will tell how many GPU nodes are in the
984  * system and what the lowest AMD Node ID value is for the GPU nodes. Use this
985  * info to fixup the Linux logical "Node ID" value set in the AMD NB code and EDAC.
986  */
987 static struct local_node_map {
988 	u16 node_count;
989 	u16 base_node_id;
990 } gpu_node_map;
991 
992 #define PCI_DEVICE_ID_AMD_MI200_DF_F1		0x14d1
993 #define REG_LOCAL_NODE_TYPE_MAP			0x144
994 
995 /* Local Node Type Map (LNTM) fields */
996 #define LNTM_NODE_COUNT				GENMASK(27, 16)
997 #define LNTM_BASE_NODE_ID			GENMASK(11, 0)
998 
gpu_get_node_map(struct amd64_pvt * pvt)999 static int gpu_get_node_map(struct amd64_pvt *pvt)
1000 {
1001 	struct pci_dev *pdev;
1002 	int ret;
1003 	u32 tmp;
1004 
1005 	/*
1006 	 * Mapping of nodes from hardware-provided AMD Node ID to a
1007 	 * Linux logical one is applicable for MI200 models. Therefore,
1008 	 * return early for other heterogeneous systems.
1009 	 */
1010 	if (pvt->F3->device != PCI_DEVICE_ID_AMD_MI200_DF_F3)
1011 		return 0;
1012 
1013 	/*
1014 	 * Node ID 0 is reserved for CPUs. Therefore, a non-zero Node ID
1015 	 * means the values have been already cached.
1016 	 */
1017 	if (gpu_node_map.base_node_id)
1018 		return 0;
1019 
1020 	pdev = pci_get_device(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_DF_F1, NULL);
1021 	if (!pdev) {
1022 		ret = -ENODEV;
1023 		goto out;
1024 	}
1025 
1026 	ret = pci_read_config_dword(pdev, REG_LOCAL_NODE_TYPE_MAP, &tmp);
1027 	if (ret) {
1028 		ret = pcibios_err_to_errno(ret);
1029 		goto out;
1030 	}
1031 
1032 	gpu_node_map.node_count = FIELD_GET(LNTM_NODE_COUNT, tmp);
1033 	gpu_node_map.base_node_id = FIELD_GET(LNTM_BASE_NODE_ID, tmp);
1034 
1035 out:
1036 	pci_dev_put(pdev);
1037 	return ret;
1038 }
1039 
fixup_node_id(int node_id,struct mce * m)1040 static int fixup_node_id(int node_id, struct mce *m)
1041 {
1042 	/* MCA_IPID[InstanceIdHi] give the AMD Node ID for the bank. */
1043 	u8 nid = (m->ipid >> 44) & 0xF;
1044 
1045 	if (smca_get_bank_type(m->extcpu, m->bank) != SMCA_UMC_V2)
1046 		return node_id;
1047 
1048 	/* Nodes below the GPU base node are CPU nodes and don't need a fixup. */
1049 	if (nid < gpu_node_map.base_node_id)
1050 		return node_id;
1051 
1052 	/* Convert the hardware-provided AMD Node ID to a Linux logical one. */
1053 	return nid - gpu_node_map.base_node_id + 1;
1054 }
1055 
1056 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
1057 
1058 /*
1059  * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
1060  * are ECC capable.
1061  */
dct_determine_edac_cap(struct amd64_pvt * pvt)1062 static unsigned long dct_determine_edac_cap(struct amd64_pvt *pvt)
1063 {
1064 	unsigned long edac_cap = EDAC_FLAG_NONE;
1065 	u8 bit;
1066 
1067 	bit = (pvt->fam > 0xf || pvt->ext_model >= K8_REV_F)
1068 		? 19
1069 		: 17;
1070 
1071 	if (pvt->dclr0 & BIT(bit))
1072 		edac_cap = EDAC_FLAG_SECDED;
1073 
1074 	return edac_cap;
1075 }
1076 
umc_determine_edac_cap(struct amd64_pvt * pvt)1077 static unsigned long umc_determine_edac_cap(struct amd64_pvt *pvt)
1078 {
1079 	u8 i, umc_en_mask = 0, dimm_ecc_en_mask = 0;
1080 	unsigned long edac_cap = EDAC_FLAG_NONE;
1081 
1082 	for_each_umc(i) {
1083 		if (!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT))
1084 			continue;
1085 
1086 		umc_en_mask |= BIT(i);
1087 
1088 		/* UMC Configuration bit 12 (DimmEccEn) */
1089 		if (pvt->umc[i].umc_cfg & BIT(12))
1090 			dimm_ecc_en_mask |= BIT(i);
1091 	}
1092 
1093 	if (umc_en_mask == dimm_ecc_en_mask)
1094 		edac_cap = EDAC_FLAG_SECDED;
1095 
1096 	return edac_cap;
1097 }
1098 
1099 /*
1100  * debug routine to display the memory sizes of all logical DIMMs and its
1101  * CSROWs
1102  */
dct_debug_display_dimm_sizes(struct amd64_pvt * pvt,u8 ctrl)1103 static void dct_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
1104 {
1105 	u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
1106 	u32 dbam  = ctrl ? pvt->dbam1 : pvt->dbam0;
1107 	int dimm, size0, size1;
1108 
1109 	if (pvt->fam == 0xf) {
1110 		/* K8 families < revF not supported yet */
1111 		if (pvt->ext_model < K8_REV_F)
1112 			return;
1113 
1114 		WARN_ON(ctrl != 0);
1115 	}
1116 
1117 	if (pvt->fam == 0x10) {
1118 		dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1
1119 							   : pvt->dbam0;
1120 		dcsb = (ctrl && !dct_ganging_enabled(pvt)) ?
1121 				 pvt->csels[1].csbases :
1122 				 pvt->csels[0].csbases;
1123 	} else if (ctrl) {
1124 		dbam = pvt->dbam0;
1125 		dcsb = pvt->csels[1].csbases;
1126 	}
1127 	edac_dbg(1, "F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
1128 		 ctrl, dbam);
1129 
1130 	edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
1131 
1132 	/* Dump memory sizes for DIMM and its CSROWs */
1133 	for (dimm = 0; dimm < 4; dimm++) {
1134 		size0 = 0;
1135 		if (dcsb[dimm * 2] & DCSB_CS_ENABLE)
1136 			/*
1137 			 * For F15m60h, we need multiplier for LRDIMM cs_size
1138 			 * calculation. We pass dimm value to the dbam_to_cs
1139 			 * mapper so we can find the multiplier from the
1140 			 * corresponding DCSM.
1141 			 */
1142 			size0 = pvt->ops->dbam_to_cs(pvt, ctrl,
1143 						     DBAM_DIMM(dimm, dbam),
1144 						     dimm);
1145 
1146 		size1 = 0;
1147 		if (dcsb[dimm * 2 + 1] & DCSB_CS_ENABLE)
1148 			size1 = pvt->ops->dbam_to_cs(pvt, ctrl,
1149 						     DBAM_DIMM(dimm, dbam),
1150 						     dimm);
1151 
1152 		amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
1153 			   dimm * 2,     size0,
1154 			   dimm * 2 + 1, size1);
1155 	}
1156 }
1157 
1158 
debug_dump_dramcfg_low(struct amd64_pvt * pvt,u32 dclr,int chan)1159 static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
1160 {
1161 	edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
1162 
1163 	if (pvt->dram_type == MEM_LRDDR3) {
1164 		u32 dcsm = pvt->csels[chan].csmasks[0];
1165 		/*
1166 		 * It's assumed all LRDIMMs in a DCT are going to be of
1167 		 * same 'type' until proven otherwise. So, use a cs
1168 		 * value of '0' here to get dcsm value.
1169 		 */
1170 		edac_dbg(1, " LRDIMM %dx rank multiply\n", (dcsm & 0x3));
1171 	}
1172 
1173 	edac_dbg(1, "All DIMMs support ECC:%s\n",
1174 		    (dclr & BIT(19)) ? "yes" : "no");
1175 
1176 
1177 	edac_dbg(1, "  PAR/ERR parity: %s\n",
1178 		 (dclr & BIT(8)) ?  "enabled" : "disabled");
1179 
1180 	if (pvt->fam == 0x10)
1181 		edac_dbg(1, "  DCT 128bit mode width: %s\n",
1182 			 (dclr & BIT(11)) ?  "128b" : "64b");
1183 
1184 	edac_dbg(1, "  x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
1185 		 (dclr & BIT(12)) ?  "yes" : "no",
1186 		 (dclr & BIT(13)) ?  "yes" : "no",
1187 		 (dclr & BIT(14)) ?  "yes" : "no",
1188 		 (dclr & BIT(15)) ?  "yes" : "no");
1189 }
1190 
1191 #define CS_EVEN_PRIMARY		BIT(0)
1192 #define CS_ODD_PRIMARY		BIT(1)
1193 #define CS_EVEN_SECONDARY	BIT(2)
1194 #define CS_ODD_SECONDARY	BIT(3)
1195 #define CS_3R_INTERLEAVE	BIT(4)
1196 
1197 #define CS_EVEN			(CS_EVEN_PRIMARY | CS_EVEN_SECONDARY)
1198 #define CS_ODD			(CS_ODD_PRIMARY | CS_ODD_SECONDARY)
1199 
umc_get_cs_mode(int dimm,u8 ctrl,struct amd64_pvt * pvt)1200 static int umc_get_cs_mode(int dimm, u8 ctrl, struct amd64_pvt *pvt)
1201 {
1202 	u8 base, count = 0;
1203 	int cs_mode = 0;
1204 
1205 	if (csrow_enabled(2 * dimm, ctrl, pvt))
1206 		cs_mode |= CS_EVEN_PRIMARY;
1207 
1208 	if (csrow_enabled(2 * dimm + 1, ctrl, pvt))
1209 		cs_mode |= CS_ODD_PRIMARY;
1210 
1211 	/* Asymmetric dual-rank DIMM support. */
1212 	if (csrow_sec_enabled(2 * dimm + 1, ctrl, pvt))
1213 		cs_mode |= CS_ODD_SECONDARY;
1214 
1215 	/*
1216 	 * 3 Rank inteleaving support.
1217 	 * There should be only three bases enabled and their two masks should
1218 	 * be equal.
1219 	 */
1220 	for_each_chip_select(base, ctrl, pvt)
1221 		count += csrow_enabled(base, ctrl, pvt);
1222 
1223 	if (count == 3 &&
1224 	    pvt->csels[ctrl].csmasks[0] == pvt->csels[ctrl].csmasks[1]) {
1225 		edac_dbg(1, "3R interleaving in use.\n");
1226 		cs_mode |= CS_3R_INTERLEAVE;
1227 	}
1228 
1229 	return cs_mode;
1230 }
1231 
__addr_mask_to_cs_size(u32 addr_mask_orig,unsigned int cs_mode,int csrow_nr,int dimm)1232 static int __addr_mask_to_cs_size(u32 addr_mask_orig, unsigned int cs_mode,
1233 				  int csrow_nr, int dimm)
1234 {
1235 	u32 msb, weight, num_zero_bits;
1236 	u32 addr_mask_deinterleaved;
1237 	int size = 0;
1238 
1239 	/*
1240 	 * The number of zero bits in the mask is equal to the number of bits
1241 	 * in a full mask minus the number of bits in the current mask.
1242 	 *
1243 	 * The MSB is the number of bits in the full mask because BIT[0] is
1244 	 * always 0.
1245 	 *
1246 	 * In the special 3 Rank interleaving case, a single bit is flipped
1247 	 * without swapping with the most significant bit. This can be handled
1248 	 * by keeping the MSB where it is and ignoring the single zero bit.
1249 	 */
1250 	msb = fls(addr_mask_orig) - 1;
1251 	weight = hweight_long(addr_mask_orig);
1252 	num_zero_bits = msb - weight - !!(cs_mode & CS_3R_INTERLEAVE);
1253 
1254 	/* Take the number of zero bits off from the top of the mask. */
1255 	addr_mask_deinterleaved = GENMASK_ULL(msb - num_zero_bits, 1);
1256 
1257 	edac_dbg(1, "CS%d DIMM%d AddrMasks:\n", csrow_nr, dimm);
1258 	edac_dbg(1, "  Original AddrMask: 0x%x\n", addr_mask_orig);
1259 	edac_dbg(1, "  Deinterleaved AddrMask: 0x%x\n", addr_mask_deinterleaved);
1260 
1261 	/* Register [31:1] = Address [39:9]. Size is in kBs here. */
1262 	size = (addr_mask_deinterleaved >> 2) + 1;
1263 
1264 	/* Return size in MBs. */
1265 	return size >> 10;
1266 }
1267 
umc_addr_mask_to_cs_size(struct amd64_pvt * pvt,u8 umc,unsigned int cs_mode,int csrow_nr)1268 static int umc_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
1269 				    unsigned int cs_mode, int csrow_nr)
1270 {
1271 	int cs_mask_nr = csrow_nr;
1272 	u32 addr_mask_orig;
1273 	int dimm, size = 0;
1274 
1275 	/* No Chip Selects are enabled. */
1276 	if (!cs_mode)
1277 		return size;
1278 
1279 	/* Requested size of an even CS but none are enabled. */
1280 	if (!(cs_mode & CS_EVEN) && !(csrow_nr & 1))
1281 		return size;
1282 
1283 	/* Requested size of an odd CS but none are enabled. */
1284 	if (!(cs_mode & CS_ODD) && (csrow_nr & 1))
1285 		return size;
1286 
1287 	/*
1288 	 * Family 17h introduced systems with one mask per DIMM,
1289 	 * and two Chip Selects per DIMM.
1290 	 *
1291 	 *	CS0 and CS1 -> MASK0 / DIMM0
1292 	 *	CS2 and CS3 -> MASK1 / DIMM1
1293 	 *
1294 	 * Family 19h Model 10h introduced systems with one mask per Chip Select,
1295 	 * and two Chip Selects per DIMM.
1296 	 *
1297 	 *	CS0 -> MASK0 -> DIMM0
1298 	 *	CS1 -> MASK1 -> DIMM0
1299 	 *	CS2 -> MASK2 -> DIMM1
1300 	 *	CS3 -> MASK3 -> DIMM1
1301 	 *
1302 	 * Keep the mask number equal to the Chip Select number for newer systems,
1303 	 * and shift the mask number for older systems.
1304 	 */
1305 	dimm = csrow_nr >> 1;
1306 
1307 	if (!pvt->flags.zn_regs_v2)
1308 		cs_mask_nr >>= 1;
1309 
1310 	/* Asymmetric dual-rank DIMM support. */
1311 	if ((csrow_nr & 1) && (cs_mode & CS_ODD_SECONDARY))
1312 		addr_mask_orig = pvt->csels[umc].csmasks_sec[cs_mask_nr];
1313 	else
1314 		addr_mask_orig = pvt->csels[umc].csmasks[cs_mask_nr];
1315 
1316 	return __addr_mask_to_cs_size(addr_mask_orig, cs_mode, csrow_nr, dimm);
1317 }
1318 
umc_debug_display_dimm_sizes(struct amd64_pvt * pvt,u8 ctrl)1319 static void umc_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
1320 {
1321 	int dimm, size0, size1, cs0, cs1, cs_mode;
1322 
1323 	edac_printk(KERN_DEBUG, EDAC_MC, "UMC%d chip selects:\n", ctrl);
1324 
1325 	for (dimm = 0; dimm < 2; dimm++) {
1326 		cs0 = dimm * 2;
1327 		cs1 = dimm * 2 + 1;
1328 
1329 		cs_mode = umc_get_cs_mode(dimm, ctrl, pvt);
1330 
1331 		size0 = umc_addr_mask_to_cs_size(pvt, ctrl, cs_mode, cs0);
1332 		size1 = umc_addr_mask_to_cs_size(pvt, ctrl, cs_mode, cs1);
1333 
1334 		amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
1335 				cs0,	size0,
1336 				cs1,	size1);
1337 	}
1338 }
1339 
umc_dump_misc_regs(struct amd64_pvt * pvt)1340 static void umc_dump_misc_regs(struct amd64_pvt *pvt)
1341 {
1342 	struct amd64_umc *umc;
1343 	u32 i;
1344 
1345 	for_each_umc(i) {
1346 		umc = &pvt->umc[i];
1347 
1348 		edac_dbg(1, "UMC%d DIMM cfg: 0x%x\n", i, umc->dimm_cfg);
1349 		edac_dbg(1, "UMC%d UMC cfg: 0x%x\n", i, umc->umc_cfg);
1350 		edac_dbg(1, "UMC%d SDP ctrl: 0x%x\n", i, umc->sdp_ctrl);
1351 		edac_dbg(1, "UMC%d ECC ctrl: 0x%x\n", i, umc->ecc_ctrl);
1352 		edac_dbg(1, "UMC%d UMC cap high: 0x%x\n", i, umc->umc_cap_hi);
1353 
1354 		edac_dbg(1, "UMC%d ECC capable: %s, ChipKill ECC capable: %s\n",
1355 				i, (umc->umc_cap_hi & BIT(30)) ? "yes" : "no",
1356 				    (umc->umc_cap_hi & BIT(31)) ? "yes" : "no");
1357 		edac_dbg(1, "UMC%d All DIMMs support ECC: %s\n",
1358 				i, (umc->umc_cfg & BIT(12)) ? "yes" : "no");
1359 		edac_dbg(1, "UMC%d x4 DIMMs present: %s\n",
1360 				i, (umc->dimm_cfg & BIT(6)) ? "yes" : "no");
1361 		edac_dbg(1, "UMC%d x16 DIMMs present: %s\n",
1362 				i, (umc->dimm_cfg & BIT(7)) ? "yes" : "no");
1363 
1364 		umc_debug_display_dimm_sizes(pvt, i);
1365 	}
1366 }
1367 
dct_dump_misc_regs(struct amd64_pvt * pvt)1368 static void dct_dump_misc_regs(struct amd64_pvt *pvt)
1369 {
1370 	edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
1371 
1372 	edac_dbg(1, "  NB two channel DRAM capable: %s\n",
1373 		 (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
1374 
1375 	edac_dbg(1, "  ECC capable: %s, ChipKill ECC capable: %s\n",
1376 		 (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
1377 		 (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
1378 
1379 	debug_dump_dramcfg_low(pvt, pvt->dclr0, 0);
1380 
1381 	edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
1382 
1383 	edac_dbg(1, "F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, offset: 0x%08x\n",
1384 		 pvt->dhar, dhar_base(pvt),
1385 		 (pvt->fam == 0xf) ? k8_dhar_offset(pvt)
1386 				   : f10_dhar_offset(pvt));
1387 
1388 	dct_debug_display_dimm_sizes(pvt, 0);
1389 
1390 	/* everything below this point is Fam10h and above */
1391 	if (pvt->fam == 0xf)
1392 		return;
1393 
1394 	dct_debug_display_dimm_sizes(pvt, 1);
1395 
1396 	/* Only if NOT ganged does dclr1 have valid info */
1397 	if (!dct_ganging_enabled(pvt))
1398 		debug_dump_dramcfg_low(pvt, pvt->dclr1, 1);
1399 
1400 	edac_dbg(1, "  DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
1401 
1402 	amd64_info("using x%u syndromes.\n", pvt->ecc_sym_sz);
1403 }
1404 
1405 /*
1406  * See BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
1407  */
dct_prep_chip_selects(struct amd64_pvt * pvt)1408 static void dct_prep_chip_selects(struct amd64_pvt *pvt)
1409 {
1410 	if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
1411 		pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
1412 		pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8;
1413 	} else if (pvt->fam == 0x15 && pvt->model == 0x30) {
1414 		pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 4;
1415 		pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 2;
1416 	} else {
1417 		pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
1418 		pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
1419 	}
1420 }
1421 
umc_prep_chip_selects(struct amd64_pvt * pvt)1422 static void umc_prep_chip_selects(struct amd64_pvt *pvt)
1423 {
1424 	int umc;
1425 
1426 	for_each_umc(umc) {
1427 		pvt->csels[umc].b_cnt = 4;
1428 		pvt->csels[umc].m_cnt = pvt->flags.zn_regs_v2 ? 4 : 2;
1429 	}
1430 }
1431 
umc_read_base_mask(struct amd64_pvt * pvt)1432 static void umc_read_base_mask(struct amd64_pvt *pvt)
1433 {
1434 	u32 umc_base_reg, umc_base_reg_sec;
1435 	u32 umc_mask_reg, umc_mask_reg_sec;
1436 	u32 base_reg, base_reg_sec;
1437 	u32 mask_reg, mask_reg_sec;
1438 	u32 *base, *base_sec;
1439 	u32 *mask, *mask_sec;
1440 	int cs, umc;
1441 	u32 tmp;
1442 
1443 	for_each_umc(umc) {
1444 		umc_base_reg = get_umc_base(umc) + UMCCH_BASE_ADDR;
1445 		umc_base_reg_sec = get_umc_base(umc) + UMCCH_BASE_ADDR_SEC;
1446 
1447 		for_each_chip_select(cs, umc, pvt) {
1448 			base = &pvt->csels[umc].csbases[cs];
1449 			base_sec = &pvt->csels[umc].csbases_sec[cs];
1450 
1451 			base_reg = umc_base_reg + (cs * 4);
1452 			base_reg_sec = umc_base_reg_sec + (cs * 4);
1453 
1454 			if (!amd_smn_read(pvt->mc_node_id, base_reg, &tmp)) {
1455 				*base = tmp;
1456 				edac_dbg(0, "  DCSB%d[%d]=0x%08x reg: 0x%x\n",
1457 					 umc, cs, *base, base_reg);
1458 			}
1459 
1460 			if (!amd_smn_read(pvt->mc_node_id, base_reg_sec, &tmp)) {
1461 				*base_sec = tmp;
1462 				edac_dbg(0, "    DCSB_SEC%d[%d]=0x%08x reg: 0x%x\n",
1463 					 umc, cs, *base_sec, base_reg_sec);
1464 			}
1465 		}
1466 
1467 		umc_mask_reg = get_umc_base(umc) + UMCCH_ADDR_MASK;
1468 		umc_mask_reg_sec = get_umc_base(umc) + get_umc_reg(pvt, UMCCH_ADDR_MASK_SEC);
1469 
1470 		for_each_chip_select_mask(cs, umc, pvt) {
1471 			mask = &pvt->csels[umc].csmasks[cs];
1472 			mask_sec = &pvt->csels[umc].csmasks_sec[cs];
1473 
1474 			mask_reg = umc_mask_reg + (cs * 4);
1475 			mask_reg_sec = umc_mask_reg_sec + (cs * 4);
1476 
1477 			if (!amd_smn_read(pvt->mc_node_id, mask_reg, &tmp)) {
1478 				*mask = tmp;
1479 				edac_dbg(0, "  DCSM%d[%d]=0x%08x reg: 0x%x\n",
1480 					 umc, cs, *mask, mask_reg);
1481 			}
1482 
1483 			if (!amd_smn_read(pvt->mc_node_id, mask_reg_sec, &tmp)) {
1484 				*mask_sec = tmp;
1485 				edac_dbg(0, "    DCSM_SEC%d[%d]=0x%08x reg: 0x%x\n",
1486 					 umc, cs, *mask_sec, mask_reg_sec);
1487 			}
1488 		}
1489 	}
1490 }
1491 
1492 /*
1493  * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
1494  */
dct_read_base_mask(struct amd64_pvt * pvt)1495 static void dct_read_base_mask(struct amd64_pvt *pvt)
1496 {
1497 	int cs;
1498 
1499 	for_each_chip_select(cs, 0, pvt) {
1500 		int reg0   = DCSB0 + (cs * 4);
1501 		int reg1   = DCSB1 + (cs * 4);
1502 		u32 *base0 = &pvt->csels[0].csbases[cs];
1503 		u32 *base1 = &pvt->csels[1].csbases[cs];
1504 
1505 		if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, base0))
1506 			edac_dbg(0, "  DCSB0[%d]=0x%08x reg: F2x%x\n",
1507 				 cs, *base0, reg0);
1508 
1509 		if (pvt->fam == 0xf)
1510 			continue;
1511 
1512 		if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, base1))
1513 			edac_dbg(0, "  DCSB1[%d]=0x%08x reg: F2x%x\n",
1514 				 cs, *base1, (pvt->fam == 0x10) ? reg1
1515 							: reg0);
1516 	}
1517 
1518 	for_each_chip_select_mask(cs, 0, pvt) {
1519 		int reg0   = DCSM0 + (cs * 4);
1520 		int reg1   = DCSM1 + (cs * 4);
1521 		u32 *mask0 = &pvt->csels[0].csmasks[cs];
1522 		u32 *mask1 = &pvt->csels[1].csmasks[cs];
1523 
1524 		if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, mask0))
1525 			edac_dbg(0, "    DCSM0[%d]=0x%08x reg: F2x%x\n",
1526 				 cs, *mask0, reg0);
1527 
1528 		if (pvt->fam == 0xf)
1529 			continue;
1530 
1531 		if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, mask1))
1532 			edac_dbg(0, "    DCSM1[%d]=0x%08x reg: F2x%x\n",
1533 				 cs, *mask1, (pvt->fam == 0x10) ? reg1
1534 							: reg0);
1535 	}
1536 }
1537 
umc_determine_memory_type(struct amd64_pvt * pvt)1538 static void umc_determine_memory_type(struct amd64_pvt *pvt)
1539 {
1540 	struct amd64_umc *umc;
1541 	u32 i;
1542 
1543 	for_each_umc(i) {
1544 		umc = &pvt->umc[i];
1545 
1546 		if (!(umc->sdp_ctrl & UMC_SDP_INIT)) {
1547 			umc->dram_type = MEM_EMPTY;
1548 			continue;
1549 		}
1550 
1551 		/*
1552 		 * Check if the system supports the "DDR Type" field in UMC Config
1553 		 * and has DDR5 DIMMs in use.
1554 		 */
1555 		if (pvt->flags.zn_regs_v2 && ((umc->umc_cfg & GENMASK(2, 0)) == 0x1)) {
1556 			if (umc->dimm_cfg & BIT(5))
1557 				umc->dram_type = MEM_LRDDR5;
1558 			else if (umc->dimm_cfg & BIT(4))
1559 				umc->dram_type = MEM_RDDR5;
1560 			else
1561 				umc->dram_type = MEM_DDR5;
1562 		} else {
1563 			if (umc->dimm_cfg & BIT(5))
1564 				umc->dram_type = MEM_LRDDR4;
1565 			else if (umc->dimm_cfg & BIT(4))
1566 				umc->dram_type = MEM_RDDR4;
1567 			else
1568 				umc->dram_type = MEM_DDR4;
1569 		}
1570 
1571 		edac_dbg(1, "  UMC%d DIMM type: %s\n", i, edac_mem_types[umc->dram_type]);
1572 	}
1573 }
1574 
dct_determine_memory_type(struct amd64_pvt * pvt)1575 static void dct_determine_memory_type(struct amd64_pvt *pvt)
1576 {
1577 	u32 dram_ctrl, dcsm;
1578 
1579 	switch (pvt->fam) {
1580 	case 0xf:
1581 		if (pvt->ext_model >= K8_REV_F)
1582 			goto ddr3;
1583 
1584 		pvt->dram_type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
1585 		return;
1586 
1587 	case 0x10:
1588 		if (pvt->dchr0 & DDR3_MODE)
1589 			goto ddr3;
1590 
1591 		pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
1592 		return;
1593 
1594 	case 0x15:
1595 		if (pvt->model < 0x60)
1596 			goto ddr3;
1597 
1598 		/*
1599 		 * Model 0x60h needs special handling:
1600 		 *
1601 		 * We use a Chip Select value of '0' to obtain dcsm.
1602 		 * Theoretically, it is possible to populate LRDIMMs of different
1603 		 * 'Rank' value on a DCT. But this is not the common case. So,
1604 		 * it's reasonable to assume all DIMMs are going to be of same
1605 		 * 'type' until proven otherwise.
1606 		 */
1607 		amd64_read_dct_pci_cfg(pvt, 0, DRAM_CONTROL, &dram_ctrl);
1608 		dcsm = pvt->csels[0].csmasks[0];
1609 
1610 		if (((dram_ctrl >> 8) & 0x7) == 0x2)
1611 			pvt->dram_type = MEM_DDR4;
1612 		else if (pvt->dclr0 & BIT(16))
1613 			pvt->dram_type = MEM_DDR3;
1614 		else if (dcsm & 0x3)
1615 			pvt->dram_type = MEM_LRDDR3;
1616 		else
1617 			pvt->dram_type = MEM_RDDR3;
1618 
1619 		return;
1620 
1621 	case 0x16:
1622 		goto ddr3;
1623 
1624 	default:
1625 		WARN(1, KERN_ERR "%s: Family??? 0x%x\n", __func__, pvt->fam);
1626 		pvt->dram_type = MEM_EMPTY;
1627 	}
1628 
1629 	edac_dbg(1, "  DIMM type: %s\n", edac_mem_types[pvt->dram_type]);
1630 	return;
1631 
1632 ddr3:
1633 	pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
1634 }
1635 
1636 /* On F10h and later ErrAddr is MC4_ADDR[47:1] */
get_error_address(struct amd64_pvt * pvt,struct mce * m)1637 static u64 get_error_address(struct amd64_pvt *pvt, struct mce *m)
1638 {
1639 	u16 mce_nid = topology_amd_node_id(m->extcpu);
1640 	struct mem_ctl_info *mci;
1641 	u8 start_bit = 1;
1642 	u8 end_bit   = 47;
1643 	u64 addr;
1644 
1645 	mci = edac_mc_find(mce_nid);
1646 	if (!mci)
1647 		return 0;
1648 
1649 	pvt = mci->pvt_info;
1650 
1651 	if (pvt->fam == 0xf) {
1652 		start_bit = 3;
1653 		end_bit   = 39;
1654 	}
1655 
1656 	addr = m->addr & GENMASK_ULL(end_bit, start_bit);
1657 
1658 	/*
1659 	 * Erratum 637 workaround
1660 	 */
1661 	if (pvt->fam == 0x15) {
1662 		u64 cc6_base, tmp_addr;
1663 		u32 tmp;
1664 		u8 intlv_en;
1665 
1666 		if ((addr & GENMASK_ULL(47, 24)) >> 24 != 0x00fdf7)
1667 			return addr;
1668 
1669 
1670 		amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_LIM, &tmp);
1671 		intlv_en = tmp >> 21 & 0x7;
1672 
1673 		/* add [47:27] + 3 trailing bits */
1674 		cc6_base  = (tmp & GENMASK_ULL(20, 0)) << 3;
1675 
1676 		/* reverse and add DramIntlvEn */
1677 		cc6_base |= intlv_en ^ 0x7;
1678 
1679 		/* pin at [47:24] */
1680 		cc6_base <<= 24;
1681 
1682 		if (!intlv_en)
1683 			return cc6_base | (addr & GENMASK_ULL(23, 0));
1684 
1685 		amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_BASE, &tmp);
1686 
1687 							/* faster log2 */
1688 		tmp_addr  = (addr & GENMASK_ULL(23, 12)) << __fls(intlv_en + 1);
1689 
1690 		/* OR DramIntlvSel into bits [14:12] */
1691 		tmp_addr |= (tmp & GENMASK_ULL(23, 21)) >> 9;
1692 
1693 		/* add remaining [11:0] bits from original MC4_ADDR */
1694 		tmp_addr |= addr & GENMASK_ULL(11, 0);
1695 
1696 		return cc6_base | tmp_addr;
1697 	}
1698 
1699 	return addr;
1700 }
1701 
pci_get_related_function(unsigned int vendor,unsigned int device,struct pci_dev * related)1702 static struct pci_dev *pci_get_related_function(unsigned int vendor,
1703 						unsigned int device,
1704 						struct pci_dev *related)
1705 {
1706 	struct pci_dev *dev = NULL;
1707 
1708 	while ((dev = pci_get_device(vendor, device, dev))) {
1709 		if (pci_domain_nr(dev->bus) == pci_domain_nr(related->bus) &&
1710 		    (dev->bus->number == related->bus->number) &&
1711 		    (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
1712 			break;
1713 	}
1714 
1715 	return dev;
1716 }
1717 
read_dram_base_limit_regs(struct amd64_pvt * pvt,unsigned range)1718 static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
1719 {
1720 	struct amd_northbridge *nb;
1721 	struct pci_dev *f1 = NULL;
1722 	unsigned int pci_func;
1723 	int off = range << 3;
1724 	u32 llim;
1725 
1726 	amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off,  &pvt->ranges[range].base.lo);
1727 	amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
1728 
1729 	if (pvt->fam == 0xf)
1730 		return;
1731 
1732 	if (!dram_rw(pvt, range))
1733 		return;
1734 
1735 	amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off,  &pvt->ranges[range].base.hi);
1736 	amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
1737 
1738 	/* F15h: factor in CC6 save area by reading dst node's limit reg */
1739 	if (pvt->fam != 0x15)
1740 		return;
1741 
1742 	nb = node_to_amd_nb(dram_dst_node(pvt, range));
1743 	if (WARN_ON(!nb))
1744 		return;
1745 
1746 	if (pvt->model == 0x60)
1747 		pci_func = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1;
1748 	else if (pvt->model == 0x30)
1749 		pci_func = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1;
1750 	else
1751 		pci_func = PCI_DEVICE_ID_AMD_15H_NB_F1;
1752 
1753 	f1 = pci_get_related_function(nb->misc->vendor, pci_func, nb->misc);
1754 	if (WARN_ON(!f1))
1755 		return;
1756 
1757 	amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim);
1758 
1759 	pvt->ranges[range].lim.lo &= GENMASK_ULL(15, 0);
1760 
1761 				    /* {[39:27],111b} */
1762 	pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16;
1763 
1764 	pvt->ranges[range].lim.hi &= GENMASK_ULL(7, 0);
1765 
1766 				    /* [47:40] */
1767 	pvt->ranges[range].lim.hi |= llim >> 13;
1768 
1769 	pci_dev_put(f1);
1770 }
1771 
k8_map_sysaddr_to_csrow(struct mem_ctl_info * mci,u64 sys_addr,struct err_info * err)1772 static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1773 				    struct err_info *err)
1774 {
1775 	struct amd64_pvt *pvt = mci->pvt_info;
1776 
1777 	error_address_to_page_and_offset(sys_addr, err);
1778 
1779 	/*
1780 	 * Find out which node the error address belongs to. This may be
1781 	 * different from the node that detected the error.
1782 	 */
1783 	err->src_mci = find_mc_by_sys_addr(mci, sys_addr);
1784 	if (!err->src_mci) {
1785 		amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
1786 			     (unsigned long)sys_addr);
1787 		err->err_code = ERR_NODE;
1788 		return;
1789 	}
1790 
1791 	/* Now map the sys_addr to a CSROW */
1792 	err->csrow = sys_addr_to_csrow(err->src_mci, sys_addr);
1793 	if (err->csrow < 0) {
1794 		err->err_code = ERR_CSROW;
1795 		return;
1796 	}
1797 
1798 	/* CHIPKILL enabled */
1799 	if (pvt->nbcfg & NBCFG_CHIPKILL) {
1800 		err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
1801 		if (err->channel < 0) {
1802 			/*
1803 			 * Syndrome didn't map, so we don't know which of the
1804 			 * 2 DIMMs is in error. So we need to ID 'both' of them
1805 			 * as suspect.
1806 			 */
1807 			amd64_mc_warn(err->src_mci, "unknown syndrome 0x%04x - "
1808 				      "possible error reporting race\n",
1809 				      err->syndrome);
1810 			err->err_code = ERR_CHANNEL;
1811 			return;
1812 		}
1813 	} else {
1814 		/*
1815 		 * non-chipkill ecc mode
1816 		 *
1817 		 * The k8 documentation is unclear about how to determine the
1818 		 * channel number when using non-chipkill memory.  This method
1819 		 * was obtained from email communication with someone at AMD.
1820 		 * (Wish the email was placed in this comment - norsk)
1821 		 */
1822 		err->channel = ((sys_addr & BIT(3)) != 0);
1823 	}
1824 }
1825 
ddr2_cs_size(unsigned i,bool dct_width)1826 static int ddr2_cs_size(unsigned i, bool dct_width)
1827 {
1828 	unsigned shift = 0;
1829 
1830 	if (i <= 2)
1831 		shift = i;
1832 	else if (!(i & 0x1))
1833 		shift = i >> 1;
1834 	else
1835 		shift = (i + 1) >> 1;
1836 
1837 	return 128 << (shift + !!dct_width);
1838 }
1839 
k8_dbam_to_chip_select(struct amd64_pvt * pvt,u8 dct,unsigned cs_mode,int cs_mask_nr)1840 static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1841 				  unsigned cs_mode, int cs_mask_nr)
1842 {
1843 	u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1844 
1845 	if (pvt->ext_model >= K8_REV_F) {
1846 		WARN_ON(cs_mode > 11);
1847 		return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1848 	}
1849 	else if (pvt->ext_model >= K8_REV_D) {
1850 		unsigned diff;
1851 		WARN_ON(cs_mode > 10);
1852 
1853 		/*
1854 		 * the below calculation, besides trying to win an obfuscated C
1855 		 * contest, maps cs_mode values to DIMM chip select sizes. The
1856 		 * mappings are:
1857 		 *
1858 		 * cs_mode	CS size (mb)
1859 		 * =======	============
1860 		 * 0		32
1861 		 * 1		64
1862 		 * 2		128
1863 		 * 3		128
1864 		 * 4		256
1865 		 * 5		512
1866 		 * 6		256
1867 		 * 7		512
1868 		 * 8		1024
1869 		 * 9		1024
1870 		 * 10		2048
1871 		 *
1872 		 * Basically, it calculates a value with which to shift the
1873 		 * smallest CS size of 32MB.
1874 		 *
1875 		 * ddr[23]_cs_size have a similar purpose.
1876 		 */
1877 		diff = cs_mode/3 + (unsigned)(cs_mode > 5);
1878 
1879 		return 32 << (cs_mode - diff);
1880 	}
1881 	else {
1882 		WARN_ON(cs_mode > 6);
1883 		return 32 << cs_mode;
1884 	}
1885 }
1886 
ddr3_cs_size(unsigned i,bool dct_width)1887 static int ddr3_cs_size(unsigned i, bool dct_width)
1888 {
1889 	unsigned shift = 0;
1890 	int cs_size = 0;
1891 
1892 	if (i == 0 || i == 3 || i == 4)
1893 		cs_size = -1;
1894 	else if (i <= 2)
1895 		shift = i;
1896 	else if (i == 12)
1897 		shift = 7;
1898 	else if (!(i & 0x1))
1899 		shift = i >> 1;
1900 	else
1901 		shift = (i + 1) >> 1;
1902 
1903 	if (cs_size != -1)
1904 		cs_size = (128 * (1 << !!dct_width)) << shift;
1905 
1906 	return cs_size;
1907 }
1908 
ddr3_lrdimm_cs_size(unsigned i,unsigned rank_multiply)1909 static int ddr3_lrdimm_cs_size(unsigned i, unsigned rank_multiply)
1910 {
1911 	unsigned shift = 0;
1912 	int cs_size = 0;
1913 
1914 	if (i < 4 || i == 6)
1915 		cs_size = -1;
1916 	else if (i == 12)
1917 		shift = 7;
1918 	else if (!(i & 0x1))
1919 		shift = i >> 1;
1920 	else
1921 		shift = (i + 1) >> 1;
1922 
1923 	if (cs_size != -1)
1924 		cs_size = rank_multiply * (128 << shift);
1925 
1926 	return cs_size;
1927 }
1928 
ddr4_cs_size(unsigned i)1929 static int ddr4_cs_size(unsigned i)
1930 {
1931 	int cs_size = 0;
1932 
1933 	if (i == 0)
1934 		cs_size = -1;
1935 	else if (i == 1)
1936 		cs_size = 1024;
1937 	else
1938 		/* Min cs_size = 1G */
1939 		cs_size = 1024 * (1 << (i >> 1));
1940 
1941 	return cs_size;
1942 }
1943 
f10_dbam_to_chip_select(struct amd64_pvt * pvt,u8 dct,unsigned cs_mode,int cs_mask_nr)1944 static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1945 				   unsigned cs_mode, int cs_mask_nr)
1946 {
1947 	u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1948 
1949 	WARN_ON(cs_mode > 11);
1950 
1951 	if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
1952 		return ddr3_cs_size(cs_mode, dclr & WIDTH_128);
1953 	else
1954 		return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1955 }
1956 
1957 /*
1958  * F15h supports only 64bit DCT interfaces
1959  */
f15_dbam_to_chip_select(struct amd64_pvt * pvt,u8 dct,unsigned cs_mode,int cs_mask_nr)1960 static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1961 				   unsigned cs_mode, int cs_mask_nr)
1962 {
1963 	WARN_ON(cs_mode > 12);
1964 
1965 	return ddr3_cs_size(cs_mode, false);
1966 }
1967 
1968 /* F15h M60h supports DDR4 mapping as well.. */
f15_m60h_dbam_to_chip_select(struct amd64_pvt * pvt,u8 dct,unsigned cs_mode,int cs_mask_nr)1969 static int f15_m60h_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1970 					unsigned cs_mode, int cs_mask_nr)
1971 {
1972 	int cs_size;
1973 	u32 dcsm = pvt->csels[dct].csmasks[cs_mask_nr];
1974 
1975 	WARN_ON(cs_mode > 12);
1976 
1977 	if (pvt->dram_type == MEM_DDR4) {
1978 		if (cs_mode > 9)
1979 			return -1;
1980 
1981 		cs_size = ddr4_cs_size(cs_mode);
1982 	} else if (pvt->dram_type == MEM_LRDDR3) {
1983 		unsigned rank_multiply = dcsm & 0xf;
1984 
1985 		if (rank_multiply == 3)
1986 			rank_multiply = 4;
1987 		cs_size = ddr3_lrdimm_cs_size(cs_mode, rank_multiply);
1988 	} else {
1989 		/* Minimum cs size is 512mb for F15hM60h*/
1990 		if (cs_mode == 0x1)
1991 			return -1;
1992 
1993 		cs_size = ddr3_cs_size(cs_mode, false);
1994 	}
1995 
1996 	return cs_size;
1997 }
1998 
1999 /*
2000  * F16h and F15h model 30h have only limited cs_modes.
2001  */
f16_dbam_to_chip_select(struct amd64_pvt * pvt,u8 dct,unsigned cs_mode,int cs_mask_nr)2002 static int f16_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
2003 				unsigned cs_mode, int cs_mask_nr)
2004 {
2005 	WARN_ON(cs_mode > 12);
2006 
2007 	if (cs_mode == 6 || cs_mode == 8 ||
2008 	    cs_mode == 9 || cs_mode == 12)
2009 		return -1;
2010 	else
2011 		return ddr3_cs_size(cs_mode, false);
2012 }
2013 
read_dram_ctl_register(struct amd64_pvt * pvt)2014 static void read_dram_ctl_register(struct amd64_pvt *pvt)
2015 {
2016 
2017 	if (pvt->fam == 0xf)
2018 		return;
2019 
2020 	if (!amd64_read_pci_cfg(pvt->F2, DCT_SEL_LO, &pvt->dct_sel_lo)) {
2021 		edac_dbg(0, "F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
2022 			 pvt->dct_sel_lo, dct_sel_baseaddr(pvt));
2023 
2024 		edac_dbg(0, "  DCTs operate in %s mode\n",
2025 			 (dct_ganging_enabled(pvt) ? "ganged" : "unganged"));
2026 
2027 		if (!dct_ganging_enabled(pvt))
2028 			edac_dbg(0, "  Address range split per DCT: %s\n",
2029 				 (dct_high_range_enabled(pvt) ? "yes" : "no"));
2030 
2031 		edac_dbg(0, "  data interleave for ECC: %s, DRAM cleared since last warm reset: %s\n",
2032 			 (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
2033 			 (dct_memory_cleared(pvt) ? "yes" : "no"));
2034 
2035 		edac_dbg(0, "  channel interleave: %s, "
2036 			 "interleave bits selector: 0x%x\n",
2037 			 (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
2038 			 dct_sel_interleave_addr(pvt));
2039 	}
2040 
2041 	amd64_read_pci_cfg(pvt->F2, DCT_SEL_HI, &pvt->dct_sel_hi);
2042 }
2043 
2044 /*
2045  * Determine channel (DCT) based on the interleaving mode (see F15h M30h BKDG,
2046  * 2.10.12 Memory Interleaving Modes).
2047  */
f15_m30h_determine_channel(struct amd64_pvt * pvt,u64 sys_addr,u8 intlv_en,int num_dcts_intlv,u32 dct_sel)2048 static u8 f15_m30h_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
2049 				     u8 intlv_en, int num_dcts_intlv,
2050 				     u32 dct_sel)
2051 {
2052 	u8 channel = 0;
2053 	u8 select;
2054 
2055 	if (!(intlv_en))
2056 		return (u8)(dct_sel);
2057 
2058 	if (num_dcts_intlv == 2) {
2059 		select = (sys_addr >> 8) & 0x3;
2060 		channel = select ? 0x3 : 0;
2061 	} else if (num_dcts_intlv == 4) {
2062 		u8 intlv_addr = dct_sel_interleave_addr(pvt);
2063 		switch (intlv_addr) {
2064 		case 0x4:
2065 			channel = (sys_addr >> 8) & 0x3;
2066 			break;
2067 		case 0x5:
2068 			channel = (sys_addr >> 9) & 0x3;
2069 			break;
2070 		}
2071 	}
2072 	return channel;
2073 }
2074 
2075 /*
2076  * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory
2077  * Interleaving Modes.
2078  */
f1x_determine_channel(struct amd64_pvt * pvt,u64 sys_addr,bool hi_range_sel,u8 intlv_en)2079 static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
2080 				bool hi_range_sel, u8 intlv_en)
2081 {
2082 	u8 dct_sel_high = (pvt->dct_sel_lo >> 1) & 1;
2083 
2084 	if (dct_ganging_enabled(pvt))
2085 		return 0;
2086 
2087 	if (hi_range_sel)
2088 		return dct_sel_high;
2089 
2090 	/*
2091 	 * see F2x110[DctSelIntLvAddr] - channel interleave mode
2092 	 */
2093 	if (dct_interleave_enabled(pvt)) {
2094 		u8 intlv_addr = dct_sel_interleave_addr(pvt);
2095 
2096 		/* return DCT select function: 0=DCT0, 1=DCT1 */
2097 		if (!intlv_addr)
2098 			return sys_addr >> 6 & 1;
2099 
2100 		if (intlv_addr & 0x2) {
2101 			u8 shift = intlv_addr & 0x1 ? 9 : 6;
2102 			u32 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) & 1;
2103 
2104 			return ((sys_addr >> shift) & 1) ^ temp;
2105 		}
2106 
2107 		if (intlv_addr & 0x4) {
2108 			u8 shift = intlv_addr & 0x1 ? 9 : 8;
2109 
2110 			return (sys_addr >> shift) & 1;
2111 		}
2112 
2113 		return (sys_addr >> (12 + hweight8(intlv_en))) & 1;
2114 	}
2115 
2116 	if (dct_high_range_enabled(pvt))
2117 		return ~dct_sel_high & 1;
2118 
2119 	return 0;
2120 }
2121 
2122 /* Convert the sys_addr to the normalized DCT address */
f1x_get_norm_dct_addr(struct amd64_pvt * pvt,u8 range,u64 sys_addr,bool hi_rng,u32 dct_sel_base_addr)2123 static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, u8 range,
2124 				 u64 sys_addr, bool hi_rng,
2125 				 u32 dct_sel_base_addr)
2126 {
2127 	u64 chan_off;
2128 	u64 dram_base		= get_dram_base(pvt, range);
2129 	u64 hole_off		= f10_dhar_offset(pvt);
2130 	u64 dct_sel_base_off	= (u64)(pvt->dct_sel_hi & 0xFFFFFC00) << 16;
2131 
2132 	if (hi_rng) {
2133 		/*
2134 		 * if
2135 		 * base address of high range is below 4Gb
2136 		 * (bits [47:27] at [31:11])
2137 		 * DRAM address space on this DCT is hoisted above 4Gb	&&
2138 		 * sys_addr > 4Gb
2139 		 *
2140 		 *	remove hole offset from sys_addr
2141 		 * else
2142 		 *	remove high range offset from sys_addr
2143 		 */
2144 		if ((!(dct_sel_base_addr >> 16) ||
2145 		     dct_sel_base_addr < dhar_base(pvt)) &&
2146 		    dhar_valid(pvt) &&
2147 		    (sys_addr >= BIT_64(32)))
2148 			chan_off = hole_off;
2149 		else
2150 			chan_off = dct_sel_base_off;
2151 	} else {
2152 		/*
2153 		 * if
2154 		 * we have a valid hole		&&
2155 		 * sys_addr > 4Gb
2156 		 *
2157 		 *	remove hole
2158 		 * else
2159 		 *	remove dram base to normalize to DCT address
2160 		 */
2161 		if (dhar_valid(pvt) && (sys_addr >= BIT_64(32)))
2162 			chan_off = hole_off;
2163 		else
2164 			chan_off = dram_base;
2165 	}
2166 
2167 	return (sys_addr & GENMASK_ULL(47,6)) - (chan_off & GENMASK_ULL(47,23));
2168 }
2169 
2170 /*
2171  * checks if the csrow passed in is marked as SPARED, if so returns the new
2172  * spare row
2173  */
f10_process_possible_spare(struct amd64_pvt * pvt,u8 dct,int csrow)2174 static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
2175 {
2176 	int tmp_cs;
2177 
2178 	if (online_spare_swap_done(pvt, dct) &&
2179 	    csrow == online_spare_bad_dramcs(pvt, dct)) {
2180 
2181 		for_each_chip_select(tmp_cs, dct, pvt) {
2182 			if (chip_select_base(tmp_cs, dct, pvt) & 0x2) {
2183 				csrow = tmp_cs;
2184 				break;
2185 			}
2186 		}
2187 	}
2188 	return csrow;
2189 }
2190 
2191 /*
2192  * Iterate over the DRAM DCT "base" and "mask" registers looking for a
2193  * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
2194  *
2195  * Return:
2196  *	-EINVAL:  NOT FOUND
2197  *	0..csrow = Chip-Select Row
2198  */
f1x_lookup_addr_in_dct(u64 in_addr,u8 nid,u8 dct)2199 static int f1x_lookup_addr_in_dct(u64 in_addr, u8 nid, u8 dct)
2200 {
2201 	struct mem_ctl_info *mci;
2202 	struct amd64_pvt *pvt;
2203 	u64 cs_base, cs_mask;
2204 	int cs_found = -EINVAL;
2205 	int csrow;
2206 
2207 	mci = edac_mc_find(nid);
2208 	if (!mci)
2209 		return cs_found;
2210 
2211 	pvt = mci->pvt_info;
2212 
2213 	edac_dbg(1, "input addr: 0x%llx, DCT: %d\n", in_addr, dct);
2214 
2215 	for_each_chip_select(csrow, dct, pvt) {
2216 		if (!csrow_enabled(csrow, dct, pvt))
2217 			continue;
2218 
2219 		get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask);
2220 
2221 		edac_dbg(1, "    CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
2222 			 csrow, cs_base, cs_mask);
2223 
2224 		cs_mask = ~cs_mask;
2225 
2226 		edac_dbg(1, "    (InputAddr & ~CSMask)=0x%llx (CSBase & ~CSMask)=0x%llx\n",
2227 			 (in_addr & cs_mask), (cs_base & cs_mask));
2228 
2229 		if ((in_addr & cs_mask) == (cs_base & cs_mask)) {
2230 			if (pvt->fam == 0x15 && pvt->model >= 0x30) {
2231 				cs_found =  csrow;
2232 				break;
2233 			}
2234 			cs_found = f10_process_possible_spare(pvt, dct, csrow);
2235 
2236 			edac_dbg(1, " MATCH csrow=%d\n", cs_found);
2237 			break;
2238 		}
2239 	}
2240 	return cs_found;
2241 }
2242 
2243 /*
2244  * See F2x10C. Non-interleaved graphics framebuffer memory under the 16G is
2245  * swapped with a region located at the bottom of memory so that the GPU can use
2246  * the interleaved region and thus two channels.
2247  */
f1x_swap_interleaved_region(struct amd64_pvt * pvt,u64 sys_addr)2248 static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr)
2249 {
2250 	u32 swap_reg, swap_base, swap_limit, rgn_size, tmp_addr;
2251 
2252 	if (pvt->fam == 0x10) {
2253 		/* only revC3 and revE have that feature */
2254 		if (pvt->model < 4 || (pvt->model < 0xa && pvt->stepping < 3))
2255 			return sys_addr;
2256 	}
2257 
2258 	amd64_read_pci_cfg(pvt->F2, SWAP_INTLV_REG, &swap_reg);
2259 
2260 	if (!(swap_reg & 0x1))
2261 		return sys_addr;
2262 
2263 	swap_base	= (swap_reg >> 3) & 0x7f;
2264 	swap_limit	= (swap_reg >> 11) & 0x7f;
2265 	rgn_size	= (swap_reg >> 20) & 0x7f;
2266 	tmp_addr	= sys_addr >> 27;
2267 
2268 	if (!(sys_addr >> 34) &&
2269 	    (((tmp_addr >= swap_base) &&
2270 	     (tmp_addr <= swap_limit)) ||
2271 	     (tmp_addr < rgn_size)))
2272 		return sys_addr ^ (u64)swap_base << 27;
2273 
2274 	return sys_addr;
2275 }
2276 
2277 /* For a given @dram_range, check if @sys_addr falls within it. */
f1x_match_to_this_node(struct amd64_pvt * pvt,unsigned range,u64 sys_addr,int * chan_sel)2278 static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
2279 				  u64 sys_addr, int *chan_sel)
2280 {
2281 	int cs_found = -EINVAL;
2282 	u64 chan_addr;
2283 	u32 dct_sel_base;
2284 	u8 channel;
2285 	bool high_range = false;
2286 
2287 	u8 node_id    = dram_dst_node(pvt, range);
2288 	u8 intlv_en   = dram_intlv_en(pvt, range);
2289 	u32 intlv_sel = dram_intlv_sel(pvt, range);
2290 
2291 	edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
2292 		 range, sys_addr, get_dram_limit(pvt, range));
2293 
2294 	if (dhar_valid(pvt) &&
2295 	    dhar_base(pvt) <= sys_addr &&
2296 	    sys_addr < BIT_64(32)) {
2297 		amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
2298 			    sys_addr);
2299 		return -EINVAL;
2300 	}
2301 
2302 	if (intlv_en && (intlv_sel != ((sys_addr >> 12) & intlv_en)))
2303 		return -EINVAL;
2304 
2305 	sys_addr = f1x_swap_interleaved_region(pvt, sys_addr);
2306 
2307 	dct_sel_base = dct_sel_baseaddr(pvt);
2308 
2309 	/*
2310 	 * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
2311 	 * select between DCT0 and DCT1.
2312 	 */
2313 	if (dct_high_range_enabled(pvt) &&
2314 	   !dct_ganging_enabled(pvt) &&
2315 	   ((sys_addr >> 27) >= (dct_sel_base >> 11)))
2316 		high_range = true;
2317 
2318 	channel = f1x_determine_channel(pvt, sys_addr, high_range, intlv_en);
2319 
2320 	chan_addr = f1x_get_norm_dct_addr(pvt, range, sys_addr,
2321 					  high_range, dct_sel_base);
2322 
2323 	/* Remove node interleaving, see F1x120 */
2324 	if (intlv_en)
2325 		chan_addr = ((chan_addr >> (12 + hweight8(intlv_en))) << 12) |
2326 			    (chan_addr & 0xfff);
2327 
2328 	/* remove channel interleave */
2329 	if (dct_interleave_enabled(pvt) &&
2330 	   !dct_high_range_enabled(pvt) &&
2331 	   !dct_ganging_enabled(pvt)) {
2332 
2333 		if (dct_sel_interleave_addr(pvt) != 1) {
2334 			if (dct_sel_interleave_addr(pvt) == 0x3)
2335 				/* hash 9 */
2336 				chan_addr = ((chan_addr >> 10) << 9) |
2337 					     (chan_addr & 0x1ff);
2338 			else
2339 				/* A[6] or hash 6 */
2340 				chan_addr = ((chan_addr >> 7) << 6) |
2341 					     (chan_addr & 0x3f);
2342 		} else
2343 			/* A[12] */
2344 			chan_addr = ((chan_addr >> 13) << 12) |
2345 				     (chan_addr & 0xfff);
2346 	}
2347 
2348 	edac_dbg(1, "   Normalized DCT addr: 0x%llx\n", chan_addr);
2349 
2350 	cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel);
2351 
2352 	if (cs_found >= 0)
2353 		*chan_sel = channel;
2354 
2355 	return cs_found;
2356 }
2357 
f15_m30h_match_to_this_node(struct amd64_pvt * pvt,unsigned range,u64 sys_addr,int * chan_sel)2358 static int f15_m30h_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
2359 					u64 sys_addr, int *chan_sel)
2360 {
2361 	int cs_found = -EINVAL;
2362 	int num_dcts_intlv = 0;
2363 	u64 chan_addr, chan_offset;
2364 	u64 dct_base, dct_limit;
2365 	u32 dct_cont_base_reg, dct_cont_limit_reg, tmp;
2366 	u8 channel, alias_channel, leg_mmio_hole, dct_sel, dct_offset_en;
2367 
2368 	u64 dhar_offset		= f10_dhar_offset(pvt);
2369 	u8 intlv_addr		= dct_sel_interleave_addr(pvt);
2370 	u8 node_id		= dram_dst_node(pvt, range);
2371 	u8 intlv_en		= dram_intlv_en(pvt, range);
2372 
2373 	amd64_read_pci_cfg(pvt->F1, DRAM_CONT_BASE, &dct_cont_base_reg);
2374 	amd64_read_pci_cfg(pvt->F1, DRAM_CONT_LIMIT, &dct_cont_limit_reg);
2375 
2376 	dct_offset_en		= (u8) ((dct_cont_base_reg >> 3) & BIT(0));
2377 	dct_sel			= (u8) ((dct_cont_base_reg >> 4) & 0x7);
2378 
2379 	edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
2380 		 range, sys_addr, get_dram_limit(pvt, range));
2381 
2382 	if (!(get_dram_base(pvt, range)  <= sys_addr) &&
2383 	    !(get_dram_limit(pvt, range) >= sys_addr))
2384 		return -EINVAL;
2385 
2386 	if (dhar_valid(pvt) &&
2387 	    dhar_base(pvt) <= sys_addr &&
2388 	    sys_addr < BIT_64(32)) {
2389 		amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
2390 			    sys_addr);
2391 		return -EINVAL;
2392 	}
2393 
2394 	/* Verify sys_addr is within DCT Range. */
2395 	dct_base = (u64) dct_sel_baseaddr(pvt);
2396 	dct_limit = (dct_cont_limit_reg >> 11) & 0x1FFF;
2397 
2398 	if (!(dct_cont_base_reg & BIT(0)) &&
2399 	    !(dct_base <= (sys_addr >> 27) &&
2400 	      dct_limit >= (sys_addr >> 27)))
2401 		return -EINVAL;
2402 
2403 	/* Verify number of dct's that participate in channel interleaving. */
2404 	num_dcts_intlv = (int) hweight8(intlv_en);
2405 
2406 	if (!(num_dcts_intlv % 2 == 0) || (num_dcts_intlv > 4))
2407 		return -EINVAL;
2408 
2409 	if (pvt->model >= 0x60)
2410 		channel = f1x_determine_channel(pvt, sys_addr, false, intlv_en);
2411 	else
2412 		channel = f15_m30h_determine_channel(pvt, sys_addr, intlv_en,
2413 						     num_dcts_intlv, dct_sel);
2414 
2415 	/* Verify we stay within the MAX number of channels allowed */
2416 	if (channel > 3)
2417 		return -EINVAL;
2418 
2419 	leg_mmio_hole = (u8) (dct_cont_base_reg >> 1 & BIT(0));
2420 
2421 	/* Get normalized DCT addr */
2422 	if (leg_mmio_hole && (sys_addr >= BIT_64(32)))
2423 		chan_offset = dhar_offset;
2424 	else
2425 		chan_offset = dct_base << 27;
2426 
2427 	chan_addr = sys_addr - chan_offset;
2428 
2429 	/* remove channel interleave */
2430 	if (num_dcts_intlv == 2) {
2431 		if (intlv_addr == 0x4)
2432 			chan_addr = ((chan_addr >> 9) << 8) |
2433 						(chan_addr & 0xff);
2434 		else if (intlv_addr == 0x5)
2435 			chan_addr = ((chan_addr >> 10) << 9) |
2436 						(chan_addr & 0x1ff);
2437 		else
2438 			return -EINVAL;
2439 
2440 	} else if (num_dcts_intlv == 4) {
2441 		if (intlv_addr == 0x4)
2442 			chan_addr = ((chan_addr >> 10) << 8) |
2443 							(chan_addr & 0xff);
2444 		else if (intlv_addr == 0x5)
2445 			chan_addr = ((chan_addr >> 11) << 9) |
2446 							(chan_addr & 0x1ff);
2447 		else
2448 			return -EINVAL;
2449 	}
2450 
2451 	if (dct_offset_en) {
2452 		amd64_read_pci_cfg(pvt->F1,
2453 				   DRAM_CONT_HIGH_OFF + (int) channel * 4,
2454 				   &tmp);
2455 		chan_addr +=  (u64) ((tmp >> 11) & 0xfff) << 27;
2456 	}
2457 
2458 	f15h_select_dct(pvt, channel);
2459 
2460 	edac_dbg(1, "   Normalized DCT addr: 0x%llx\n", chan_addr);
2461 
2462 	/*
2463 	 * Find Chip select:
2464 	 * if channel = 3, then alias it to 1. This is because, in F15 M30h,
2465 	 * there is support for 4 DCT's, but only 2 are currently functional.
2466 	 * They are DCT0 and DCT3. But we have read all registers of DCT3 into
2467 	 * pvt->csels[1]. So we need to use '1' here to get correct info.
2468 	 * Refer F15 M30h BKDG Section 2.10 and 2.10.3 for clarifications.
2469 	 */
2470 	alias_channel =  (channel == 3) ? 1 : channel;
2471 
2472 	cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, alias_channel);
2473 
2474 	if (cs_found >= 0)
2475 		*chan_sel = alias_channel;
2476 
2477 	return cs_found;
2478 }
2479 
f1x_translate_sysaddr_to_cs(struct amd64_pvt * pvt,u64 sys_addr,int * chan_sel)2480 static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt,
2481 					u64 sys_addr,
2482 					int *chan_sel)
2483 {
2484 	int cs_found = -EINVAL;
2485 	unsigned range;
2486 
2487 	for (range = 0; range < DRAM_RANGES; range++) {
2488 		if (!dram_rw(pvt, range))
2489 			continue;
2490 
2491 		if (pvt->fam == 0x15 && pvt->model >= 0x30)
2492 			cs_found = f15_m30h_match_to_this_node(pvt, range,
2493 							       sys_addr,
2494 							       chan_sel);
2495 
2496 		else if ((get_dram_base(pvt, range)  <= sys_addr) &&
2497 			 (get_dram_limit(pvt, range) >= sys_addr)) {
2498 			cs_found = f1x_match_to_this_node(pvt, range,
2499 							  sys_addr, chan_sel);
2500 			if (cs_found >= 0)
2501 				break;
2502 		}
2503 	}
2504 	return cs_found;
2505 }
2506 
2507 /*
2508  * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps
2509  * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW).
2510  *
2511  * The @sys_addr is usually an error address received from the hardware
2512  * (MCX_ADDR).
2513  */
f1x_map_sysaddr_to_csrow(struct mem_ctl_info * mci,u64 sys_addr,struct err_info * err)2514 static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
2515 				     struct err_info *err)
2516 {
2517 	struct amd64_pvt *pvt = mci->pvt_info;
2518 
2519 	error_address_to_page_and_offset(sys_addr, err);
2520 
2521 	err->csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &err->channel);
2522 	if (err->csrow < 0) {
2523 		err->err_code = ERR_CSROW;
2524 		return;
2525 	}
2526 
2527 	/*
2528 	 * We need the syndromes for channel detection only when we're
2529 	 * ganged. Otherwise @chan should already contain the channel at
2530 	 * this point.
2531 	 */
2532 	if (dct_ganging_enabled(pvt))
2533 		err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
2534 }
2535 
2536 /*
2537  * These are tables of eigenvectors (one per line) which can be used for the
2538  * construction of the syndrome tables. The modified syndrome search algorithm
2539  * uses those to find the symbol in error and thus the DIMM.
2540  *
2541  * Algorithm courtesy of Ross LaFetra from AMD.
2542  */
2543 static const u16 x4_vectors[] = {
2544 	0x2f57, 0x1afe, 0x66cc, 0xdd88,
2545 	0x11eb, 0x3396, 0x7f4c, 0xeac8,
2546 	0x0001, 0x0002, 0x0004, 0x0008,
2547 	0x1013, 0x3032, 0x4044, 0x8088,
2548 	0x106b, 0x30d6, 0x70fc, 0xe0a8,
2549 	0x4857, 0xc4fe, 0x13cc, 0x3288,
2550 	0x1ac5, 0x2f4a, 0x5394, 0xa1e8,
2551 	0x1f39, 0x251e, 0xbd6c, 0x6bd8,
2552 	0x15c1, 0x2a42, 0x89ac, 0x4758,
2553 	0x2b03, 0x1602, 0x4f0c, 0xca08,
2554 	0x1f07, 0x3a0e, 0x6b04, 0xbd08,
2555 	0x8ba7, 0x465e, 0x244c, 0x1cc8,
2556 	0x2b87, 0x164e, 0x642c, 0xdc18,
2557 	0x40b9, 0x80de, 0x1094, 0x20e8,
2558 	0x27db, 0x1eb6, 0x9dac, 0x7b58,
2559 	0x11c1, 0x2242, 0x84ac, 0x4c58,
2560 	0x1be5, 0x2d7a, 0x5e34, 0xa718,
2561 	0x4b39, 0x8d1e, 0x14b4, 0x28d8,
2562 	0x4c97, 0xc87e, 0x11fc, 0x33a8,
2563 	0x8e97, 0x497e, 0x2ffc, 0x1aa8,
2564 	0x16b3, 0x3d62, 0x4f34, 0x8518,
2565 	0x1e2f, 0x391a, 0x5cac, 0xf858,
2566 	0x1d9f, 0x3b7a, 0x572c, 0xfe18,
2567 	0x15f5, 0x2a5a, 0x5264, 0xa3b8,
2568 	0x1dbb, 0x3b66, 0x715c, 0xe3f8,
2569 	0x4397, 0xc27e, 0x17fc, 0x3ea8,
2570 	0x1617, 0x3d3e, 0x6464, 0xb8b8,
2571 	0x23ff, 0x12aa, 0xab6c, 0x56d8,
2572 	0x2dfb, 0x1ba6, 0x913c, 0x7328,
2573 	0x185d, 0x2ca6, 0x7914, 0x9e28,
2574 	0x171b, 0x3e36, 0x7d7c, 0xebe8,
2575 	0x4199, 0x82ee, 0x19f4, 0x2e58,
2576 	0x4807, 0xc40e, 0x130c, 0x3208,
2577 	0x1905, 0x2e0a, 0x5804, 0xac08,
2578 	0x213f, 0x132a, 0xadfc, 0x5ba8,
2579 	0x19a9, 0x2efe, 0xb5cc, 0x6f88,
2580 };
2581 
2582 static const u16 x8_vectors[] = {
2583 	0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
2584 	0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
2585 	0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
2586 	0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80,
2587 	0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780,
2588 	0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080,
2589 	0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080,
2590 	0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080,
2591 	0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80,
2592 	0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580,
2593 	0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880,
2594 	0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280,
2595 	0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180,
2596 	0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580,
2597 	0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280,
2598 	0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180,
2599 	0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080,
2600 	0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080,
2601 	0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
2602 };
2603 
decode_syndrome(u16 syndrome,const u16 * vectors,unsigned num_vecs,unsigned v_dim)2604 static int decode_syndrome(u16 syndrome, const u16 *vectors, unsigned num_vecs,
2605 			   unsigned v_dim)
2606 {
2607 	unsigned int i, err_sym;
2608 
2609 	for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) {
2610 		u16 s = syndrome;
2611 		unsigned v_idx =  err_sym * v_dim;
2612 		unsigned v_end = (err_sym + 1) * v_dim;
2613 
2614 		/* walk over all 16 bits of the syndrome */
2615 		for (i = 1; i < (1U << 16); i <<= 1) {
2616 
2617 			/* if bit is set in that eigenvector... */
2618 			if (v_idx < v_end && vectors[v_idx] & i) {
2619 				u16 ev_comp = vectors[v_idx++];
2620 
2621 				/* ... and bit set in the modified syndrome, */
2622 				if (s & i) {
2623 					/* remove it. */
2624 					s ^= ev_comp;
2625 
2626 					if (!s)
2627 						return err_sym;
2628 				}
2629 
2630 			} else if (s & i)
2631 				/* can't get to zero, move to next symbol */
2632 				break;
2633 		}
2634 	}
2635 
2636 	edac_dbg(0, "syndrome(%x) not found\n", syndrome);
2637 	return -1;
2638 }
2639 
map_err_sym_to_channel(int err_sym,int sym_size)2640 static int map_err_sym_to_channel(int err_sym, int sym_size)
2641 {
2642 	if (sym_size == 4)
2643 		switch (err_sym) {
2644 		case 0x20:
2645 		case 0x21:
2646 			return 0;
2647 		case 0x22:
2648 		case 0x23:
2649 			return 1;
2650 		default:
2651 			return err_sym >> 4;
2652 		}
2653 	/* x8 symbols */
2654 	else
2655 		switch (err_sym) {
2656 		/* imaginary bits not in a DIMM */
2657 		case 0x10:
2658 			WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n",
2659 					  err_sym);
2660 			return -1;
2661 		case 0x11:
2662 			return 0;
2663 		case 0x12:
2664 			return 1;
2665 		default:
2666 			return err_sym >> 3;
2667 		}
2668 	return -1;
2669 }
2670 
get_channel_from_ecc_syndrome(struct mem_ctl_info * mci,u16 syndrome)2671 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
2672 {
2673 	struct amd64_pvt *pvt = mci->pvt_info;
2674 	int err_sym = -1;
2675 
2676 	if (pvt->ecc_sym_sz == 8)
2677 		err_sym = decode_syndrome(syndrome, x8_vectors,
2678 					  ARRAY_SIZE(x8_vectors),
2679 					  pvt->ecc_sym_sz);
2680 	else if (pvt->ecc_sym_sz == 4)
2681 		err_sym = decode_syndrome(syndrome, x4_vectors,
2682 					  ARRAY_SIZE(x4_vectors),
2683 					  pvt->ecc_sym_sz);
2684 	else {
2685 		amd64_warn("Illegal syndrome type: %u\n", pvt->ecc_sym_sz);
2686 		return err_sym;
2687 	}
2688 
2689 	return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz);
2690 }
2691 
__log_ecc_error(struct mem_ctl_info * mci,struct err_info * err,u8 ecc_type)2692 static void __log_ecc_error(struct mem_ctl_info *mci, struct err_info *err,
2693 			    u8 ecc_type)
2694 {
2695 	enum hw_event_mc_err_type err_type;
2696 	const char *string;
2697 
2698 	if (ecc_type == 2)
2699 		err_type = HW_EVENT_ERR_CORRECTED;
2700 	else if (ecc_type == 1)
2701 		err_type = HW_EVENT_ERR_UNCORRECTED;
2702 	else if (ecc_type == 3)
2703 		err_type = HW_EVENT_ERR_DEFERRED;
2704 	else {
2705 		WARN(1, "Something is rotten in the state of Denmark.\n");
2706 		return;
2707 	}
2708 
2709 	switch (err->err_code) {
2710 	case DECODE_OK:
2711 		string = "";
2712 		break;
2713 	case ERR_NODE:
2714 		string = "Failed to map error addr to a node";
2715 		break;
2716 	case ERR_CSROW:
2717 		string = "Failed to map error addr to a csrow";
2718 		break;
2719 	case ERR_CHANNEL:
2720 		string = "Unknown syndrome - possible error reporting race";
2721 		break;
2722 	case ERR_SYND:
2723 		string = "MCA_SYND not valid - unknown syndrome and csrow";
2724 		break;
2725 	case ERR_NORM_ADDR:
2726 		string = "Cannot decode normalized address";
2727 		break;
2728 	default:
2729 		string = "WTF error";
2730 		break;
2731 	}
2732 
2733 	edac_mc_handle_error(err_type, mci, 1,
2734 			     err->page, err->offset, err->syndrome,
2735 			     err->csrow, err->channel, -1,
2736 			     string, "");
2737 }
2738 
decode_bus_error(int node_id,struct mce * m)2739 static inline void decode_bus_error(int node_id, struct mce *m)
2740 {
2741 	struct mem_ctl_info *mci;
2742 	struct amd64_pvt *pvt;
2743 	u8 ecc_type = (m->status >> 45) & 0x3;
2744 	u8 xec = XEC(m->status, 0x1f);
2745 	u16 ec = EC(m->status);
2746 	u64 sys_addr;
2747 	struct err_info err;
2748 
2749 	mci = edac_mc_find(node_id);
2750 	if (!mci)
2751 		return;
2752 
2753 	pvt = mci->pvt_info;
2754 
2755 	/* Bail out early if this was an 'observed' error */
2756 	if (PP(ec) == NBSL_PP_OBS)
2757 		return;
2758 
2759 	/* Do only ECC errors */
2760 	if (xec && xec != F10_NBSL_EXT_ERR_ECC)
2761 		return;
2762 
2763 	memset(&err, 0, sizeof(err));
2764 
2765 	sys_addr = get_error_address(pvt, m);
2766 
2767 	if (ecc_type == 2)
2768 		err.syndrome = extract_syndrome(m->status);
2769 
2770 	pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, &err);
2771 
2772 	__log_ecc_error(mci, &err, ecc_type);
2773 }
2774 
2775 /*
2776  * To find the UMC channel represented by this bank we need to match on its
2777  * instance_id. The instance_id of a bank is held in the lower 32 bits of its
2778  * IPID.
2779  *
2780  * Currently, we can derive the channel number by looking at the 6th nibble in
2781  * the instance_id. For example, instance_id=0xYXXXXX where Y is the channel
2782  * number.
2783  *
2784  * For DRAM ECC errors, the Chip Select number is given in bits [2:0] of
2785  * the MCA_SYND[ErrorInformation] field.
2786  */
umc_get_err_info(struct mce * m,struct err_info * err)2787 static void umc_get_err_info(struct mce *m, struct err_info *err)
2788 {
2789 	err->channel = (m->ipid & GENMASK(31, 0)) >> 20;
2790 	err->csrow = m->synd & 0x7;
2791 }
2792 
decode_umc_error(int node_id,struct mce * m)2793 static void decode_umc_error(int node_id, struct mce *m)
2794 {
2795 	u8 ecc_type = (m->status >> 45) & 0x3;
2796 	struct mem_ctl_info *mci;
2797 	unsigned long sys_addr;
2798 	struct amd64_pvt *pvt;
2799 	struct atl_err a_err;
2800 	struct err_info err;
2801 
2802 	node_id = fixup_node_id(node_id, m);
2803 
2804 	mci = edac_mc_find(node_id);
2805 	if (!mci)
2806 		return;
2807 
2808 	pvt = mci->pvt_info;
2809 
2810 	memset(&err, 0, sizeof(err));
2811 
2812 	if (m->status & MCI_STATUS_DEFERRED)
2813 		ecc_type = 3;
2814 
2815 	if (!(m->status & MCI_STATUS_SYNDV)) {
2816 		err.err_code = ERR_SYND;
2817 		goto log_error;
2818 	}
2819 
2820 	if (ecc_type == 2) {
2821 		u8 length = (m->synd >> 18) & 0x3f;
2822 
2823 		if (length)
2824 			err.syndrome = (m->synd >> 32) & GENMASK(length - 1, 0);
2825 		else
2826 			err.err_code = ERR_CHANNEL;
2827 	}
2828 
2829 	pvt->ops->get_err_info(m, &err);
2830 
2831 	a_err.addr = m->addr;
2832 	a_err.ipid = m->ipid;
2833 	a_err.cpu  = m->extcpu;
2834 
2835 	sys_addr = amd_convert_umc_mca_addr_to_sys_addr(&a_err);
2836 	if (IS_ERR_VALUE(sys_addr)) {
2837 		err.err_code = ERR_NORM_ADDR;
2838 		goto log_error;
2839 	}
2840 
2841 	error_address_to_page_and_offset(sys_addr, &err);
2842 
2843 log_error:
2844 	__log_ecc_error(mci, &err, ecc_type);
2845 }
2846 
2847 /*
2848  * Use pvt->F3 which contains the F3 CPU PCI device to get the related
2849  * F1 (AddrMap) and F2 (Dct) devices. Return negative value on error.
2850  */
2851 static int
reserve_mc_sibling_devs(struct amd64_pvt * pvt,u16 pci_id1,u16 pci_id2)2852 reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 pci_id1, u16 pci_id2)
2853 {
2854 	/* Reserve the ADDRESS MAP Device */
2855 	pvt->F1 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
2856 	if (!pvt->F1) {
2857 		edac_dbg(1, "F1 not found: device 0x%x\n", pci_id1);
2858 		return -ENODEV;
2859 	}
2860 
2861 	/* Reserve the DCT Device */
2862 	pvt->F2 = pci_get_related_function(pvt->F3->vendor, pci_id2, pvt->F3);
2863 	if (!pvt->F2) {
2864 		pci_dev_put(pvt->F1);
2865 		pvt->F1 = NULL;
2866 
2867 		edac_dbg(1, "F2 not found: device 0x%x\n", pci_id2);
2868 		return -ENODEV;
2869 	}
2870 
2871 	if (!pci_ctl_dev)
2872 		pci_ctl_dev = &pvt->F2->dev;
2873 
2874 	edac_dbg(1, "F1: %s\n", pci_name(pvt->F1));
2875 	edac_dbg(1, "F2: %s\n", pci_name(pvt->F2));
2876 	edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
2877 
2878 	return 0;
2879 }
2880 
determine_ecc_sym_sz(struct amd64_pvt * pvt)2881 static void determine_ecc_sym_sz(struct amd64_pvt *pvt)
2882 {
2883 	pvt->ecc_sym_sz = 4;
2884 
2885 	if (pvt->fam >= 0x10) {
2886 		u32 tmp;
2887 
2888 		amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
2889 		/* F16h has only DCT0, so no need to read dbam1. */
2890 		if (pvt->fam != 0x16)
2891 			amd64_read_dct_pci_cfg(pvt, 1, DBAM0, &pvt->dbam1);
2892 
2893 		/* F10h, revD and later can do x8 ECC too. */
2894 		if ((pvt->fam > 0x10 || pvt->model > 7) && tmp & BIT(25))
2895 			pvt->ecc_sym_sz = 8;
2896 	}
2897 }
2898 
2899 /*
2900  * Retrieve the hardware registers of the memory controller.
2901  */
umc_read_mc_regs(struct amd64_pvt * pvt)2902 static void umc_read_mc_regs(struct amd64_pvt *pvt)
2903 {
2904 	u8 nid = pvt->mc_node_id;
2905 	struct amd64_umc *umc;
2906 	u32 i, tmp, umc_base;
2907 
2908 	/* Read registers from each UMC */
2909 	for_each_umc(i) {
2910 
2911 		umc_base = get_umc_base(i);
2912 		umc = &pvt->umc[i];
2913 
2914 		if (!amd_smn_read(nid, umc_base + get_umc_reg(pvt, UMCCH_DIMM_CFG), &tmp))
2915 			umc->dimm_cfg = tmp;
2916 
2917 		if (!amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &tmp))
2918 			umc->umc_cfg = tmp;
2919 
2920 		if (!amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &tmp))
2921 			umc->sdp_ctrl = tmp;
2922 
2923 		if (!amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &tmp))
2924 			umc->ecc_ctrl = tmp;
2925 
2926 		if (!amd_smn_read(nid, umc_base + UMCCH_UMC_CAP_HI, &tmp))
2927 			umc->umc_cap_hi = tmp;
2928 	}
2929 }
2930 
2931 /*
2932  * Retrieve the hardware registers of the memory controller (this includes the
2933  * 'Address Map' and 'Misc' device regs)
2934  */
dct_read_mc_regs(struct amd64_pvt * pvt)2935 static void dct_read_mc_regs(struct amd64_pvt *pvt)
2936 {
2937 	unsigned int range;
2938 	u64 msr_val;
2939 
2940 	/*
2941 	 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
2942 	 * those are Read-As-Zero.
2943 	 */
2944 	rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
2945 	edac_dbg(0, "  TOP_MEM:  0x%016llx\n", pvt->top_mem);
2946 
2947 	/* Check first whether TOP_MEM2 is enabled: */
2948 	rdmsrl(MSR_AMD64_SYSCFG, msr_val);
2949 	if (msr_val & BIT(21)) {
2950 		rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
2951 		edac_dbg(0, "  TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
2952 	} else {
2953 		edac_dbg(0, "  TOP_MEM2 disabled\n");
2954 	}
2955 
2956 	amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap);
2957 
2958 	read_dram_ctl_register(pvt);
2959 
2960 	for (range = 0; range < DRAM_RANGES; range++) {
2961 		u8 rw;
2962 
2963 		/* read settings for this DRAM range */
2964 		read_dram_base_limit_regs(pvt, range);
2965 
2966 		rw = dram_rw(pvt, range);
2967 		if (!rw)
2968 			continue;
2969 
2970 		edac_dbg(1, "  DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
2971 			 range,
2972 			 get_dram_base(pvt, range),
2973 			 get_dram_limit(pvt, range));
2974 
2975 		edac_dbg(1, "   IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
2976 			 dram_intlv_en(pvt, range) ? "Enabled" : "Disabled",
2977 			 (rw & 0x1) ? "R" : "-",
2978 			 (rw & 0x2) ? "W" : "-",
2979 			 dram_intlv_sel(pvt, range),
2980 			 dram_dst_node(pvt, range));
2981 	}
2982 
2983 	amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar);
2984 	amd64_read_dct_pci_cfg(pvt, 0, DBAM0, &pvt->dbam0);
2985 
2986 	amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
2987 
2988 	amd64_read_dct_pci_cfg(pvt, 0, DCLR0, &pvt->dclr0);
2989 	amd64_read_dct_pci_cfg(pvt, 0, DCHR0, &pvt->dchr0);
2990 
2991 	if (!dct_ganging_enabled(pvt)) {
2992 		amd64_read_dct_pci_cfg(pvt, 1, DCLR0, &pvt->dclr1);
2993 		amd64_read_dct_pci_cfg(pvt, 1, DCHR0, &pvt->dchr1);
2994 	}
2995 
2996 	determine_ecc_sym_sz(pvt);
2997 }
2998 
2999 /*
3000  * NOTE: CPU Revision Dependent code
3001  *
3002  * Input:
3003  *	@csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1)
3004  *	k8 private pointer to -->
3005  *			DRAM Bank Address mapping register
3006  *			node_id
3007  *			DCL register where dual_channel_active is
3008  *
3009  * The DBAM register consists of 4 sets of 4 bits each definitions:
3010  *
3011  * Bits:	CSROWs
3012  * 0-3		CSROWs 0 and 1
3013  * 4-7		CSROWs 2 and 3
3014  * 8-11		CSROWs 4 and 5
3015  * 12-15	CSROWs 6 and 7
3016  *
3017  * Values range from: 0 to 15
3018  * The meaning of the values depends on CPU revision and dual-channel state,
3019  * see relevant BKDG more info.
3020  *
3021  * The memory controller provides for total of only 8 CSROWs in its current
3022  * architecture. Each "pair" of CSROWs normally represents just one DIMM in
3023  * single channel or two (2) DIMMs in dual channel mode.
3024  *
3025  * The following code logic collapses the various tables for CSROW based on CPU
3026  * revision.
3027  *
3028  * Returns:
3029  *	The number of PAGE_SIZE pages on the specified CSROW number it
3030  *	encompasses
3031  *
3032  */
dct_get_csrow_nr_pages(struct amd64_pvt * pvt,u8 dct,int csrow_nr)3033 static u32 dct_get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
3034 {
3035 	u32 dbam = dct ? pvt->dbam1 : pvt->dbam0;
3036 	u32 cs_mode, nr_pages;
3037 
3038 	csrow_nr >>= 1;
3039 	cs_mode = DBAM_DIMM(csrow_nr, dbam);
3040 
3041 	nr_pages   = pvt->ops->dbam_to_cs(pvt, dct, cs_mode, csrow_nr);
3042 	nr_pages <<= 20 - PAGE_SHIFT;
3043 
3044 	edac_dbg(0, "csrow: %d, channel: %d, DBAM idx: %d\n",
3045 		    csrow_nr, dct,  cs_mode);
3046 	edac_dbg(0, "nr_pages/channel: %u\n", nr_pages);
3047 
3048 	return nr_pages;
3049 }
3050 
umc_get_csrow_nr_pages(struct amd64_pvt * pvt,u8 dct,int csrow_nr_orig)3051 static u32 umc_get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr_orig)
3052 {
3053 	int csrow_nr = csrow_nr_orig;
3054 	u32 cs_mode, nr_pages;
3055 
3056 	cs_mode = umc_get_cs_mode(csrow_nr >> 1, dct, pvt);
3057 
3058 	nr_pages   = umc_addr_mask_to_cs_size(pvt, dct, cs_mode, csrow_nr);
3059 	nr_pages <<= 20 - PAGE_SHIFT;
3060 
3061 	edac_dbg(0, "csrow: %d, channel: %d, cs_mode %d\n",
3062 		 csrow_nr_orig, dct,  cs_mode);
3063 	edac_dbg(0, "nr_pages/channel: %u\n", nr_pages);
3064 
3065 	return nr_pages;
3066 }
3067 
umc_init_csrows(struct mem_ctl_info * mci)3068 static void umc_init_csrows(struct mem_ctl_info *mci)
3069 {
3070 	struct amd64_pvt *pvt = mci->pvt_info;
3071 	enum edac_type edac_mode = EDAC_NONE;
3072 	enum dev_type dev_type = DEV_UNKNOWN;
3073 	struct dimm_info *dimm;
3074 	u8 umc, cs;
3075 
3076 	if (mci->edac_ctl_cap & EDAC_FLAG_S16ECD16ED) {
3077 		edac_mode = EDAC_S16ECD16ED;
3078 		dev_type = DEV_X16;
3079 	} else if (mci->edac_ctl_cap & EDAC_FLAG_S8ECD8ED) {
3080 		edac_mode = EDAC_S8ECD8ED;
3081 		dev_type = DEV_X8;
3082 	} else if (mci->edac_ctl_cap & EDAC_FLAG_S4ECD4ED) {
3083 		edac_mode = EDAC_S4ECD4ED;
3084 		dev_type = DEV_X4;
3085 	} else if (mci->edac_ctl_cap & EDAC_FLAG_SECDED) {
3086 		edac_mode = EDAC_SECDED;
3087 	}
3088 
3089 	for_each_umc(umc) {
3090 		for_each_chip_select(cs, umc, pvt) {
3091 			if (!csrow_enabled(cs, umc, pvt))
3092 				continue;
3093 
3094 			dimm = mci->csrows[cs]->channels[umc]->dimm;
3095 
3096 			edac_dbg(1, "MC node: %d, csrow: %d\n",
3097 					pvt->mc_node_id, cs);
3098 
3099 			dimm->nr_pages = umc_get_csrow_nr_pages(pvt, umc, cs);
3100 			dimm->mtype = pvt->umc[umc].dram_type;
3101 			dimm->edac_mode = edac_mode;
3102 			dimm->dtype = dev_type;
3103 			dimm->grain = 64;
3104 		}
3105 	}
3106 }
3107 
3108 /*
3109  * Initialize the array of csrow attribute instances, based on the values
3110  * from pci config hardware registers.
3111  */
dct_init_csrows(struct mem_ctl_info * mci)3112 static void dct_init_csrows(struct mem_ctl_info *mci)
3113 {
3114 	struct amd64_pvt *pvt = mci->pvt_info;
3115 	enum edac_type edac_mode = EDAC_NONE;
3116 	struct csrow_info *csrow;
3117 	struct dimm_info *dimm;
3118 	int nr_pages = 0;
3119 	int i, j;
3120 	u32 val;
3121 
3122 	amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
3123 
3124 	pvt->nbcfg = val;
3125 
3126 	edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
3127 		 pvt->mc_node_id, val,
3128 		 !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
3129 
3130 	/*
3131 	 * We iterate over DCT0 here but we look at DCT1 in parallel, if needed.
3132 	 */
3133 	for_each_chip_select(i, 0, pvt) {
3134 		bool row_dct0 = !!csrow_enabled(i, 0, pvt);
3135 		bool row_dct1 = false;
3136 
3137 		if (pvt->fam != 0xf)
3138 			row_dct1 = !!csrow_enabled(i, 1, pvt);
3139 
3140 		if (!row_dct0 && !row_dct1)
3141 			continue;
3142 
3143 		csrow = mci->csrows[i];
3144 
3145 		edac_dbg(1, "MC node: %d, csrow: %d\n",
3146 			    pvt->mc_node_id, i);
3147 
3148 		if (row_dct0) {
3149 			nr_pages = dct_get_csrow_nr_pages(pvt, 0, i);
3150 			csrow->channels[0]->dimm->nr_pages = nr_pages;
3151 		}
3152 
3153 		/* K8 has only one DCT */
3154 		if (pvt->fam != 0xf && row_dct1) {
3155 			int row_dct1_pages = dct_get_csrow_nr_pages(pvt, 1, i);
3156 
3157 			csrow->channels[1]->dimm->nr_pages = row_dct1_pages;
3158 			nr_pages += row_dct1_pages;
3159 		}
3160 
3161 		edac_dbg(1, "Total csrow%d pages: %u\n", i, nr_pages);
3162 
3163 		/* Determine DIMM ECC mode: */
3164 		if (pvt->nbcfg & NBCFG_ECC_ENABLE) {
3165 			edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL)
3166 					? EDAC_S4ECD4ED
3167 					: EDAC_SECDED;
3168 		}
3169 
3170 		for (j = 0; j < pvt->max_mcs; j++) {
3171 			dimm = csrow->channels[j]->dimm;
3172 			dimm->mtype = pvt->dram_type;
3173 			dimm->edac_mode = edac_mode;
3174 			dimm->grain = 64;
3175 		}
3176 	}
3177 }
3178 
3179 /* get all cores on this DCT */
get_cpus_on_this_dct_cpumask(struct cpumask * mask,u16 nid)3180 static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, u16 nid)
3181 {
3182 	int cpu;
3183 
3184 	for_each_online_cpu(cpu)
3185 		if (topology_amd_node_id(cpu) == nid)
3186 			cpumask_set_cpu(cpu, mask);
3187 }
3188 
3189 /* check MCG_CTL on all the cpus on this node */
nb_mce_bank_enabled_on_node(u16 nid)3190 static bool nb_mce_bank_enabled_on_node(u16 nid)
3191 {
3192 	cpumask_var_t mask;
3193 	int cpu, nbe;
3194 	bool ret = false;
3195 
3196 	if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
3197 		amd64_warn("%s: Error allocating mask\n", __func__);
3198 		return false;
3199 	}
3200 
3201 	get_cpus_on_this_dct_cpumask(mask, nid);
3202 
3203 	rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
3204 
3205 	for_each_cpu(cpu, mask) {
3206 		struct msr *reg = per_cpu_ptr(msrs, cpu);
3207 		nbe = reg->l & MSR_MCGCTL_NBE;
3208 
3209 		edac_dbg(0, "core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
3210 			 cpu, reg->q,
3211 			 (nbe ? "enabled" : "disabled"));
3212 
3213 		if (!nbe)
3214 			goto out;
3215 	}
3216 	ret = true;
3217 
3218 out:
3219 	free_cpumask_var(mask);
3220 	return ret;
3221 }
3222 
toggle_ecc_err_reporting(struct ecc_settings * s,u16 nid,bool on)3223 static int toggle_ecc_err_reporting(struct ecc_settings *s, u16 nid, bool on)
3224 {
3225 	cpumask_var_t cmask;
3226 	int cpu;
3227 
3228 	if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
3229 		amd64_warn("%s: error allocating mask\n", __func__);
3230 		return -ENOMEM;
3231 	}
3232 
3233 	get_cpus_on_this_dct_cpumask(cmask, nid);
3234 
3235 	rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
3236 
3237 	for_each_cpu(cpu, cmask) {
3238 
3239 		struct msr *reg = per_cpu_ptr(msrs, cpu);
3240 
3241 		if (on) {
3242 			if (reg->l & MSR_MCGCTL_NBE)
3243 				s->flags.nb_mce_enable = 1;
3244 
3245 			reg->l |= MSR_MCGCTL_NBE;
3246 		} else {
3247 			/*
3248 			 * Turn off NB MCE reporting only when it was off before
3249 			 */
3250 			if (!s->flags.nb_mce_enable)
3251 				reg->l &= ~MSR_MCGCTL_NBE;
3252 		}
3253 	}
3254 	wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
3255 
3256 	free_cpumask_var(cmask);
3257 
3258 	return 0;
3259 }
3260 
enable_ecc_error_reporting(struct ecc_settings * s,u16 nid,struct pci_dev * F3)3261 static bool enable_ecc_error_reporting(struct ecc_settings *s, u16 nid,
3262 				       struct pci_dev *F3)
3263 {
3264 	bool ret = true;
3265 	u32 value, mask = 0x3;		/* UECC/CECC enable */
3266 
3267 	if (toggle_ecc_err_reporting(s, nid, ON)) {
3268 		amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
3269 		return false;
3270 	}
3271 
3272 	amd64_read_pci_cfg(F3, NBCTL, &value);
3273 
3274 	s->old_nbctl   = value & mask;
3275 	s->nbctl_valid = true;
3276 
3277 	value |= mask;
3278 	amd64_write_pci_cfg(F3, NBCTL, value);
3279 
3280 	amd64_read_pci_cfg(F3, NBCFG, &value);
3281 
3282 	edac_dbg(0, "1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
3283 		 nid, value, !!(value & NBCFG_ECC_ENABLE));
3284 
3285 	if (!(value & NBCFG_ECC_ENABLE)) {
3286 		amd64_warn("DRAM ECC disabled on this node, enabling...\n");
3287 
3288 		s->flags.nb_ecc_prev = 0;
3289 
3290 		/* Attempt to turn on DRAM ECC Enable */
3291 		value |= NBCFG_ECC_ENABLE;
3292 		amd64_write_pci_cfg(F3, NBCFG, value);
3293 
3294 		amd64_read_pci_cfg(F3, NBCFG, &value);
3295 
3296 		if (!(value & NBCFG_ECC_ENABLE)) {
3297 			amd64_warn("Hardware rejected DRAM ECC enable,"
3298 				   "check memory DIMM configuration.\n");
3299 			ret = false;
3300 		} else {
3301 			amd64_info("Hardware accepted DRAM ECC Enable\n");
3302 		}
3303 	} else {
3304 		s->flags.nb_ecc_prev = 1;
3305 	}
3306 
3307 	edac_dbg(0, "2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
3308 		 nid, value, !!(value & NBCFG_ECC_ENABLE));
3309 
3310 	return ret;
3311 }
3312 
restore_ecc_error_reporting(struct ecc_settings * s,u16 nid,struct pci_dev * F3)3313 static void restore_ecc_error_reporting(struct ecc_settings *s, u16 nid,
3314 					struct pci_dev *F3)
3315 {
3316 	u32 value, mask = 0x3;		/* UECC/CECC enable */
3317 
3318 	if (!s->nbctl_valid)
3319 		return;
3320 
3321 	amd64_read_pci_cfg(F3, NBCTL, &value);
3322 	value &= ~mask;
3323 	value |= s->old_nbctl;
3324 
3325 	amd64_write_pci_cfg(F3, NBCTL, value);
3326 
3327 	/* restore previous BIOS DRAM ECC "off" setting we force-enabled */
3328 	if (!s->flags.nb_ecc_prev) {
3329 		amd64_read_pci_cfg(F3, NBCFG, &value);
3330 		value &= ~NBCFG_ECC_ENABLE;
3331 		amd64_write_pci_cfg(F3, NBCFG, value);
3332 	}
3333 
3334 	/* restore the NB Enable MCGCTL bit */
3335 	if (toggle_ecc_err_reporting(s, nid, OFF))
3336 		amd64_warn("Error restoring NB MCGCTL settings!\n");
3337 }
3338 
dct_ecc_enabled(struct amd64_pvt * pvt)3339 static bool dct_ecc_enabled(struct amd64_pvt *pvt)
3340 {
3341 	u16 nid = pvt->mc_node_id;
3342 	bool nb_mce_en = false;
3343 	u8 ecc_en = 0;
3344 	u32 value;
3345 
3346 	amd64_read_pci_cfg(pvt->F3, NBCFG, &value);
3347 
3348 	ecc_en = !!(value & NBCFG_ECC_ENABLE);
3349 
3350 	nb_mce_en = nb_mce_bank_enabled_on_node(nid);
3351 	if (!nb_mce_en)
3352 		edac_dbg(0, "NB MCE bank disabled, set MSR 0x%08x[4] on node %d to enable.\n",
3353 			 MSR_IA32_MCG_CTL, nid);
3354 
3355 	edac_dbg(3, "Node %d: DRAM ECC %s.\n", nid, (ecc_en ? "enabled" : "disabled"));
3356 
3357 	if (!ecc_en || !nb_mce_en)
3358 		return false;
3359 	else
3360 		return true;
3361 }
3362 
umc_ecc_enabled(struct amd64_pvt * pvt)3363 static bool umc_ecc_enabled(struct amd64_pvt *pvt)
3364 {
3365 	u8 umc_en_mask = 0, ecc_en_mask = 0;
3366 	u16 nid = pvt->mc_node_id;
3367 	struct amd64_umc *umc;
3368 	u8 ecc_en = 0, i;
3369 
3370 	for_each_umc(i) {
3371 		umc = &pvt->umc[i];
3372 
3373 		/* Only check enabled UMCs. */
3374 		if (!(umc->sdp_ctrl & UMC_SDP_INIT))
3375 			continue;
3376 
3377 		umc_en_mask |= BIT(i);
3378 
3379 		if (umc->umc_cap_hi & UMC_ECC_ENABLED)
3380 			ecc_en_mask |= BIT(i);
3381 	}
3382 
3383 	/* Check whether at least one UMC is enabled: */
3384 	if (umc_en_mask)
3385 		ecc_en = umc_en_mask == ecc_en_mask;
3386 	else
3387 		edac_dbg(0, "Node %d: No enabled UMCs.\n", nid);
3388 
3389 	edac_dbg(3, "Node %d: DRAM ECC %s.\n", nid, (ecc_en ? "enabled" : "disabled"));
3390 
3391 	if (!ecc_en)
3392 		return false;
3393 	else
3394 		return true;
3395 }
3396 
3397 static inline void
umc_determine_edac_ctl_cap(struct mem_ctl_info * mci,struct amd64_pvt * pvt)3398 umc_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt)
3399 {
3400 	u8 i, ecc_en = 1, cpk_en = 1, dev_x4 = 1, dev_x16 = 1;
3401 
3402 	for_each_umc(i) {
3403 		if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
3404 			ecc_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_ENABLED);
3405 			cpk_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_CHIPKILL_CAP);
3406 
3407 			dev_x4  &= !!(pvt->umc[i].dimm_cfg & BIT(6));
3408 			dev_x16 &= !!(pvt->umc[i].dimm_cfg & BIT(7));
3409 		}
3410 	}
3411 
3412 	/* Set chipkill only if ECC is enabled: */
3413 	if (ecc_en) {
3414 		mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
3415 
3416 		if (!cpk_en)
3417 			return;
3418 
3419 		if (dev_x4)
3420 			mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
3421 		else if (dev_x16)
3422 			mci->edac_ctl_cap |= EDAC_FLAG_S16ECD16ED;
3423 		else
3424 			mci->edac_ctl_cap |= EDAC_FLAG_S8ECD8ED;
3425 	}
3426 }
3427 
dct_setup_mci_misc_attrs(struct mem_ctl_info * mci)3428 static void dct_setup_mci_misc_attrs(struct mem_ctl_info *mci)
3429 {
3430 	struct amd64_pvt *pvt = mci->pvt_info;
3431 
3432 	mci->mtype_cap		= MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
3433 	mci->edac_ctl_cap	= EDAC_FLAG_NONE;
3434 
3435 	if (pvt->nbcap & NBCAP_SECDED)
3436 		mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
3437 
3438 	if (pvt->nbcap & NBCAP_CHIPKILL)
3439 		mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
3440 
3441 	mci->edac_cap		= dct_determine_edac_cap(pvt);
3442 	mci->mod_name		= EDAC_MOD_STR;
3443 	mci->ctl_name		= pvt->ctl_name;
3444 	mci->dev_name		= pci_name(pvt->F3);
3445 	mci->ctl_page_to_phys	= NULL;
3446 
3447 	/* memory scrubber interface */
3448 	mci->set_sdram_scrub_rate = set_scrub_rate;
3449 	mci->get_sdram_scrub_rate = get_scrub_rate;
3450 
3451 	dct_init_csrows(mci);
3452 }
3453 
umc_setup_mci_misc_attrs(struct mem_ctl_info * mci)3454 static void umc_setup_mci_misc_attrs(struct mem_ctl_info *mci)
3455 {
3456 	struct amd64_pvt *pvt = mci->pvt_info;
3457 
3458 	mci->mtype_cap		= MEM_FLAG_DDR4 | MEM_FLAG_RDDR4;
3459 	mci->edac_ctl_cap	= EDAC_FLAG_NONE;
3460 
3461 	umc_determine_edac_ctl_cap(mci, pvt);
3462 
3463 	mci->edac_cap		= umc_determine_edac_cap(pvt);
3464 	mci->mod_name		= EDAC_MOD_STR;
3465 	mci->ctl_name		= pvt->ctl_name;
3466 	mci->dev_name		= pci_name(pvt->F3);
3467 	mci->ctl_page_to_phys	= NULL;
3468 
3469 	umc_init_csrows(mci);
3470 }
3471 
dct_hw_info_get(struct amd64_pvt * pvt)3472 static int dct_hw_info_get(struct amd64_pvt *pvt)
3473 {
3474 	int ret = reserve_mc_sibling_devs(pvt, pvt->f1_id, pvt->f2_id);
3475 
3476 	if (ret)
3477 		return ret;
3478 
3479 	dct_prep_chip_selects(pvt);
3480 	dct_read_base_mask(pvt);
3481 	dct_read_mc_regs(pvt);
3482 	dct_determine_memory_type(pvt);
3483 
3484 	return 0;
3485 }
3486 
umc_hw_info_get(struct amd64_pvt * pvt)3487 static int umc_hw_info_get(struct amd64_pvt *pvt)
3488 {
3489 	pvt->umc = kcalloc(pvt->max_mcs, sizeof(struct amd64_umc), GFP_KERNEL);
3490 	if (!pvt->umc)
3491 		return -ENOMEM;
3492 
3493 	umc_prep_chip_selects(pvt);
3494 	umc_read_base_mask(pvt);
3495 	umc_read_mc_regs(pvt);
3496 	umc_determine_memory_type(pvt);
3497 
3498 	return 0;
3499 }
3500 
3501 /*
3502  * The CPUs have one channel per UMC, so UMC number is equivalent to a
3503  * channel number. The GPUs have 8 channels per UMC, so the UMC number no
3504  * longer works as a channel number.
3505  *
3506  * The channel number within a GPU UMC is given in MCA_IPID[15:12].
3507  * However, the IDs are split such that two UMC values go to one UMC, and
3508  * the channel numbers are split in two groups of four.
3509  *
3510  * Refer to comment on gpu_get_umc_base().
3511  *
3512  * For example,
3513  * UMC0 CH[3:0] = 0x0005[3:0]000
3514  * UMC0 CH[7:4] = 0x0015[3:0]000
3515  * UMC1 CH[3:0] = 0x0025[3:0]000
3516  * UMC1 CH[7:4] = 0x0035[3:0]000
3517  */
gpu_get_err_info(struct mce * m,struct err_info * err)3518 static void gpu_get_err_info(struct mce *m, struct err_info *err)
3519 {
3520 	u8 ch = (m->ipid & GENMASK(31, 0)) >> 20;
3521 	u8 phy = ((m->ipid >> 12) & 0xf);
3522 
3523 	err->channel = ch % 2 ? phy + 4 : phy;
3524 	err->csrow = phy;
3525 }
3526 
gpu_addr_mask_to_cs_size(struct amd64_pvt * pvt,u8 umc,unsigned int cs_mode,int csrow_nr)3527 static int gpu_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
3528 				    unsigned int cs_mode, int csrow_nr)
3529 {
3530 	u32 addr_mask_orig = pvt->csels[umc].csmasks[csrow_nr];
3531 
3532 	return __addr_mask_to_cs_size(addr_mask_orig, cs_mode, csrow_nr, csrow_nr >> 1);
3533 }
3534 
gpu_debug_display_dimm_sizes(struct amd64_pvt * pvt,u8 ctrl)3535 static void gpu_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
3536 {
3537 	int size, cs_mode, cs = 0;
3538 
3539 	edac_printk(KERN_DEBUG, EDAC_MC, "UMC%d chip selects:\n", ctrl);
3540 
3541 	cs_mode = CS_EVEN_PRIMARY | CS_ODD_PRIMARY;
3542 
3543 	for_each_chip_select(cs, ctrl, pvt) {
3544 		size = gpu_addr_mask_to_cs_size(pvt, ctrl, cs_mode, cs);
3545 		amd64_info(EDAC_MC ": %d: %5dMB\n", cs, size);
3546 	}
3547 }
3548 
gpu_dump_misc_regs(struct amd64_pvt * pvt)3549 static void gpu_dump_misc_regs(struct amd64_pvt *pvt)
3550 {
3551 	struct amd64_umc *umc;
3552 	u32 i;
3553 
3554 	for_each_umc(i) {
3555 		umc = &pvt->umc[i];
3556 
3557 		edac_dbg(1, "UMC%d UMC cfg: 0x%x\n", i, umc->umc_cfg);
3558 		edac_dbg(1, "UMC%d SDP ctrl: 0x%x\n", i, umc->sdp_ctrl);
3559 		edac_dbg(1, "UMC%d ECC ctrl: 0x%x\n", i, umc->ecc_ctrl);
3560 		edac_dbg(1, "UMC%d All HBMs support ECC: yes\n", i);
3561 
3562 		gpu_debug_display_dimm_sizes(pvt, i);
3563 	}
3564 }
3565 
gpu_get_csrow_nr_pages(struct amd64_pvt * pvt,u8 dct,int csrow_nr)3566 static u32 gpu_get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
3567 {
3568 	u32 nr_pages;
3569 	int cs_mode = CS_EVEN_PRIMARY | CS_ODD_PRIMARY;
3570 
3571 	nr_pages   = gpu_addr_mask_to_cs_size(pvt, dct, cs_mode, csrow_nr);
3572 	nr_pages <<= 20 - PAGE_SHIFT;
3573 
3574 	edac_dbg(0, "csrow: %d, channel: %d\n", csrow_nr, dct);
3575 	edac_dbg(0, "nr_pages/channel: %u\n", nr_pages);
3576 
3577 	return nr_pages;
3578 }
3579 
gpu_init_csrows(struct mem_ctl_info * mci)3580 static void gpu_init_csrows(struct mem_ctl_info *mci)
3581 {
3582 	struct amd64_pvt *pvt = mci->pvt_info;
3583 	struct dimm_info *dimm;
3584 	u8 umc, cs;
3585 
3586 	for_each_umc(umc) {
3587 		for_each_chip_select(cs, umc, pvt) {
3588 			if (!csrow_enabled(cs, umc, pvt))
3589 				continue;
3590 
3591 			dimm = mci->csrows[umc]->channels[cs]->dimm;
3592 
3593 			edac_dbg(1, "MC node: %d, csrow: %d\n",
3594 				 pvt->mc_node_id, cs);
3595 
3596 			dimm->nr_pages = gpu_get_csrow_nr_pages(pvt, umc, cs);
3597 			dimm->edac_mode = EDAC_SECDED;
3598 			dimm->mtype = pvt->dram_type;
3599 			dimm->dtype = DEV_X16;
3600 			dimm->grain = 64;
3601 		}
3602 	}
3603 }
3604 
gpu_setup_mci_misc_attrs(struct mem_ctl_info * mci)3605 static void gpu_setup_mci_misc_attrs(struct mem_ctl_info *mci)
3606 {
3607 	struct amd64_pvt *pvt = mci->pvt_info;
3608 
3609 	mci->mtype_cap		= MEM_FLAG_HBM2;
3610 	mci->edac_ctl_cap	= EDAC_FLAG_SECDED;
3611 
3612 	mci->edac_cap		= EDAC_FLAG_EC;
3613 	mci->mod_name		= EDAC_MOD_STR;
3614 	mci->ctl_name		= pvt->ctl_name;
3615 	mci->dev_name		= pci_name(pvt->F3);
3616 	mci->ctl_page_to_phys	= NULL;
3617 
3618 	gpu_init_csrows(mci);
3619 }
3620 
3621 /* ECC is enabled by default on GPU nodes */
gpu_ecc_enabled(struct amd64_pvt * pvt)3622 static bool gpu_ecc_enabled(struct amd64_pvt *pvt)
3623 {
3624 	return true;
3625 }
3626 
gpu_get_umc_base(struct amd64_pvt * pvt,u8 umc,u8 channel)3627 static inline u32 gpu_get_umc_base(struct amd64_pvt *pvt, u8 umc, u8 channel)
3628 {
3629 	/*
3630 	 * On CPUs, there is one channel per UMC, so UMC numbering equals
3631 	 * channel numbering. On GPUs, there are eight channels per UMC,
3632 	 * so the channel numbering is different from UMC numbering.
3633 	 *
3634 	 * On CPU nodes channels are selected in 6th nibble
3635 	 * UMC chY[3:0]= [(chY*2 + 1) : (chY*2)]50000;
3636 	 *
3637 	 * On GPU nodes channels are selected in 3rd nibble
3638 	 * HBM chX[3:0]= [Y  ]5X[3:0]000;
3639 	 * HBM chX[7:4]= [Y+1]5X[3:0]000
3640 	 *
3641 	 * On MI300 APU nodes, same as GPU nodes but channels are selected
3642 	 * in the base address of 0x90000
3643 	 */
3644 	umc *= 2;
3645 
3646 	if (channel >= 4)
3647 		umc++;
3648 
3649 	return pvt->gpu_umc_base + (umc << 20) + ((channel % 4) << 12);
3650 }
3651 
gpu_read_mc_regs(struct amd64_pvt * pvt)3652 static void gpu_read_mc_regs(struct amd64_pvt *pvt)
3653 {
3654 	u8 nid = pvt->mc_node_id;
3655 	struct amd64_umc *umc;
3656 	u32 i, tmp, umc_base;
3657 
3658 	/* Read registers from each UMC */
3659 	for_each_umc(i) {
3660 		umc_base = gpu_get_umc_base(pvt, i, 0);
3661 		umc = &pvt->umc[i];
3662 
3663 		if (!amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &tmp))
3664 			umc->umc_cfg = tmp;
3665 
3666 		if (!amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &tmp))
3667 			umc->sdp_ctrl = tmp;
3668 
3669 		if (!amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &tmp))
3670 			umc->ecc_ctrl = tmp;
3671 	}
3672 }
3673 
gpu_read_base_mask(struct amd64_pvt * pvt)3674 static void gpu_read_base_mask(struct amd64_pvt *pvt)
3675 {
3676 	u32 base_reg, mask_reg;
3677 	u32 *base, *mask;
3678 	int umc, cs;
3679 
3680 	for_each_umc(umc) {
3681 		for_each_chip_select(cs, umc, pvt) {
3682 			base_reg = gpu_get_umc_base(pvt, umc, cs) + UMCCH_BASE_ADDR;
3683 			base = &pvt->csels[umc].csbases[cs];
3684 
3685 			if (!amd_smn_read(pvt->mc_node_id, base_reg, base)) {
3686 				edac_dbg(0, "  DCSB%d[%d]=0x%08x reg: 0x%x\n",
3687 					 umc, cs, *base, base_reg);
3688 			}
3689 
3690 			mask_reg = gpu_get_umc_base(pvt, umc, cs) + UMCCH_ADDR_MASK;
3691 			mask = &pvt->csels[umc].csmasks[cs];
3692 
3693 			if (!amd_smn_read(pvt->mc_node_id, mask_reg, mask)) {
3694 				edac_dbg(0, "  DCSM%d[%d]=0x%08x reg: 0x%x\n",
3695 					 umc, cs, *mask, mask_reg);
3696 			}
3697 		}
3698 	}
3699 }
3700 
gpu_prep_chip_selects(struct amd64_pvt * pvt)3701 static void gpu_prep_chip_selects(struct amd64_pvt *pvt)
3702 {
3703 	int umc;
3704 
3705 	for_each_umc(umc) {
3706 		pvt->csels[umc].b_cnt = 8;
3707 		pvt->csels[umc].m_cnt = 8;
3708 	}
3709 }
3710 
gpu_hw_info_get(struct amd64_pvt * pvt)3711 static int gpu_hw_info_get(struct amd64_pvt *pvt)
3712 {
3713 	int ret;
3714 
3715 	ret = gpu_get_node_map(pvt);
3716 	if (ret)
3717 		return ret;
3718 
3719 	pvt->umc = kcalloc(pvt->max_mcs, sizeof(struct amd64_umc), GFP_KERNEL);
3720 	if (!pvt->umc)
3721 		return -ENOMEM;
3722 
3723 	gpu_prep_chip_selects(pvt);
3724 	gpu_read_base_mask(pvt);
3725 	gpu_read_mc_regs(pvt);
3726 
3727 	return 0;
3728 }
3729 
hw_info_put(struct amd64_pvt * pvt)3730 static void hw_info_put(struct amd64_pvt *pvt)
3731 {
3732 	pci_dev_put(pvt->F1);
3733 	pci_dev_put(pvt->F2);
3734 	kfree(pvt->umc);
3735 }
3736 
3737 static struct low_ops umc_ops = {
3738 	.hw_info_get			= umc_hw_info_get,
3739 	.ecc_enabled			= umc_ecc_enabled,
3740 	.setup_mci_misc_attrs		= umc_setup_mci_misc_attrs,
3741 	.dump_misc_regs			= umc_dump_misc_regs,
3742 	.get_err_info			= umc_get_err_info,
3743 };
3744 
3745 static struct low_ops gpu_ops = {
3746 	.hw_info_get			= gpu_hw_info_get,
3747 	.ecc_enabled			= gpu_ecc_enabled,
3748 	.setup_mci_misc_attrs		= gpu_setup_mci_misc_attrs,
3749 	.dump_misc_regs			= gpu_dump_misc_regs,
3750 	.get_err_info			= gpu_get_err_info,
3751 };
3752 
3753 /* Use Family 16h versions for defaults and adjust as needed below. */
3754 static struct low_ops dct_ops = {
3755 	.map_sysaddr_to_csrow		= f1x_map_sysaddr_to_csrow,
3756 	.dbam_to_cs			= f16_dbam_to_chip_select,
3757 	.hw_info_get			= dct_hw_info_get,
3758 	.ecc_enabled			= dct_ecc_enabled,
3759 	.setup_mci_misc_attrs		= dct_setup_mci_misc_attrs,
3760 	.dump_misc_regs			= dct_dump_misc_regs,
3761 };
3762 
per_family_init(struct amd64_pvt * pvt)3763 static int per_family_init(struct amd64_pvt *pvt)
3764 {
3765 	pvt->ext_model  = boot_cpu_data.x86_model >> 4;
3766 	pvt->stepping	= boot_cpu_data.x86_stepping;
3767 	pvt->model	= boot_cpu_data.x86_model;
3768 	pvt->fam	= boot_cpu_data.x86;
3769 	pvt->max_mcs	= 2;
3770 
3771 	/*
3772 	 * Decide on which ops group to use here and do any family/model
3773 	 * overrides below.
3774 	 */
3775 	if (pvt->fam >= 0x17)
3776 		pvt->ops = &umc_ops;
3777 	else
3778 		pvt->ops = &dct_ops;
3779 
3780 	switch (pvt->fam) {
3781 	case 0xf:
3782 		pvt->ctl_name				= (pvt->ext_model >= K8_REV_F) ?
3783 							  "K8 revF or later" : "K8 revE or earlier";
3784 		pvt->f1_id				= PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP;
3785 		pvt->f2_id				= PCI_DEVICE_ID_AMD_K8_NB_MEMCTL;
3786 		pvt->ops->map_sysaddr_to_csrow		= k8_map_sysaddr_to_csrow;
3787 		pvt->ops->dbam_to_cs			= k8_dbam_to_chip_select;
3788 		break;
3789 
3790 	case 0x10:
3791 		pvt->ctl_name				= "F10h";
3792 		pvt->f1_id				= PCI_DEVICE_ID_AMD_10H_NB_MAP;
3793 		pvt->f2_id				= PCI_DEVICE_ID_AMD_10H_NB_DRAM;
3794 		pvt->ops->dbam_to_cs			= f10_dbam_to_chip_select;
3795 		break;
3796 
3797 	case 0x15:
3798 		switch (pvt->model) {
3799 		case 0x30:
3800 			pvt->ctl_name			= "F15h_M30h";
3801 			pvt->f1_id			= PCI_DEVICE_ID_AMD_15H_M30H_NB_F1;
3802 			pvt->f2_id			= PCI_DEVICE_ID_AMD_15H_M30H_NB_F2;
3803 			break;
3804 		case 0x60:
3805 			pvt->ctl_name			= "F15h_M60h";
3806 			pvt->f1_id			= PCI_DEVICE_ID_AMD_15H_M60H_NB_F1;
3807 			pvt->f2_id			= PCI_DEVICE_ID_AMD_15H_M60H_NB_F2;
3808 			pvt->ops->dbam_to_cs		= f15_m60h_dbam_to_chip_select;
3809 			break;
3810 		case 0x13:
3811 			/* Richland is only client */
3812 			return -ENODEV;
3813 		default:
3814 			pvt->ctl_name			= "F15h";
3815 			pvt->f1_id			= PCI_DEVICE_ID_AMD_15H_NB_F1;
3816 			pvt->f2_id			= PCI_DEVICE_ID_AMD_15H_NB_F2;
3817 			pvt->ops->dbam_to_cs		= f15_dbam_to_chip_select;
3818 			break;
3819 		}
3820 		break;
3821 
3822 	case 0x16:
3823 		switch (pvt->model) {
3824 		case 0x30:
3825 			pvt->ctl_name			= "F16h_M30h";
3826 			pvt->f1_id			= PCI_DEVICE_ID_AMD_16H_M30H_NB_F1;
3827 			pvt->f2_id			= PCI_DEVICE_ID_AMD_16H_M30H_NB_F2;
3828 			break;
3829 		default:
3830 			pvt->ctl_name			= "F16h";
3831 			pvt->f1_id			= PCI_DEVICE_ID_AMD_16H_NB_F1;
3832 			pvt->f2_id			= PCI_DEVICE_ID_AMD_16H_NB_F2;
3833 			break;
3834 		}
3835 		break;
3836 
3837 	case 0x17:
3838 		switch (pvt->model) {
3839 		case 0x10 ... 0x2f:
3840 			pvt->ctl_name			= "F17h_M10h";
3841 			break;
3842 		case 0x30 ... 0x3f:
3843 			pvt->ctl_name			= "F17h_M30h";
3844 			pvt->max_mcs			= 8;
3845 			break;
3846 		case 0x60 ... 0x6f:
3847 			pvt->ctl_name			= "F17h_M60h";
3848 			break;
3849 		case 0x70 ... 0x7f:
3850 			pvt->ctl_name			= "F17h_M70h";
3851 			break;
3852 		default:
3853 			pvt->ctl_name			= "F17h";
3854 			break;
3855 		}
3856 		break;
3857 
3858 	case 0x18:
3859 		pvt->ctl_name				= "F18h";
3860 		break;
3861 
3862 	case 0x19:
3863 		switch (pvt->model) {
3864 		case 0x00 ... 0x0f:
3865 			pvt->ctl_name			= "F19h";
3866 			pvt->max_mcs			= 8;
3867 			break;
3868 		case 0x10 ... 0x1f:
3869 			pvt->ctl_name			= "F19h_M10h";
3870 			pvt->max_mcs			= 12;
3871 			pvt->flags.zn_regs_v2		= 1;
3872 			break;
3873 		case 0x20 ... 0x2f:
3874 			pvt->ctl_name			= "F19h_M20h";
3875 			break;
3876 		case 0x30 ... 0x3f:
3877 			if (pvt->F3->device == PCI_DEVICE_ID_AMD_MI200_DF_F3) {
3878 				pvt->ctl_name		= "MI200";
3879 				pvt->max_mcs		= 4;
3880 				pvt->dram_type		= MEM_HBM2;
3881 				pvt->gpu_umc_base	= 0x50000;
3882 				pvt->ops		= &gpu_ops;
3883 			} else {
3884 				pvt->ctl_name		= "F19h_M30h";
3885 				pvt->max_mcs		= 8;
3886 			}
3887 			break;
3888 		case 0x50 ... 0x5f:
3889 			pvt->ctl_name			= "F19h_M50h";
3890 			break;
3891 		case 0x60 ... 0x6f:
3892 			pvt->ctl_name			= "F19h_M60h";
3893 			pvt->flags.zn_regs_v2		= 1;
3894 			break;
3895 		case 0x70 ... 0x7f:
3896 			pvt->ctl_name			= "F19h_M70h";
3897 			pvt->flags.zn_regs_v2		= 1;
3898 			break;
3899 		case 0x90 ... 0x9f:
3900 			pvt->ctl_name			= "F19h_M90h";
3901 			pvt->max_mcs			= 4;
3902 			pvt->dram_type			= MEM_HBM3;
3903 			pvt->gpu_umc_base		= 0x90000;
3904 			pvt->ops			= &gpu_ops;
3905 			break;
3906 		case 0xa0 ... 0xaf:
3907 			pvt->ctl_name			= "F19h_MA0h";
3908 			pvt->max_mcs			= 12;
3909 			pvt->flags.zn_regs_v2		= 1;
3910 			break;
3911 		}
3912 		break;
3913 
3914 	case 0x1A:
3915 		switch (pvt->model) {
3916 		case 0x00 ... 0x1f:
3917 			pvt->ctl_name           = "F1Ah";
3918 			pvt->max_mcs            = 12;
3919 			pvt->flags.zn_regs_v2   = 1;
3920 			break;
3921 		case 0x40 ... 0x4f:
3922 			pvt->ctl_name           = "F1Ah_M40h";
3923 			pvt->flags.zn_regs_v2   = 1;
3924 			break;
3925 		}
3926 		break;
3927 
3928 	default:
3929 		amd64_err("Unsupported family!\n");
3930 		return -ENODEV;
3931 	}
3932 
3933 	return 0;
3934 }
3935 
3936 static const struct attribute_group *amd64_edac_attr_groups[] = {
3937 #ifdef CONFIG_EDAC_DEBUG
3938 	&dbg_group,
3939 	&inj_group,
3940 #endif
3941 	NULL
3942 };
3943 
3944 /*
3945  * For heterogeneous and APU models EDAC CHIP_SELECT and CHANNEL layers
3946  * should be swapped to fit into the layers.
3947  */
get_layer_size(struct amd64_pvt * pvt,u8 layer)3948 static unsigned int get_layer_size(struct amd64_pvt *pvt, u8 layer)
3949 {
3950 	bool is_gpu = (pvt->ops == &gpu_ops);
3951 
3952 	if (!layer)
3953 		return is_gpu ? pvt->max_mcs
3954 			      : pvt->csels[0].b_cnt;
3955 	else
3956 		return is_gpu ? pvt->csels[0].b_cnt
3957 			      : pvt->max_mcs;
3958 }
3959 
init_one_instance(struct amd64_pvt * pvt)3960 static int init_one_instance(struct amd64_pvt *pvt)
3961 {
3962 	struct mem_ctl_info *mci = NULL;
3963 	struct edac_mc_layer layers[2];
3964 	int ret = -ENOMEM;
3965 
3966 	layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
3967 	layers[0].size = get_layer_size(pvt, 0);
3968 	layers[0].is_virt_csrow = true;
3969 	layers[1].type = EDAC_MC_LAYER_CHANNEL;
3970 	layers[1].size = get_layer_size(pvt, 1);
3971 	layers[1].is_virt_csrow = false;
3972 
3973 	mci = edac_mc_alloc(pvt->mc_node_id, ARRAY_SIZE(layers), layers, 0);
3974 	if (!mci)
3975 		return ret;
3976 
3977 	mci->pvt_info = pvt;
3978 	mci->pdev = &pvt->F3->dev;
3979 
3980 	pvt->ops->setup_mci_misc_attrs(mci);
3981 
3982 	ret = -ENODEV;
3983 	if (edac_mc_add_mc_with_groups(mci, amd64_edac_attr_groups)) {
3984 		edac_dbg(1, "failed edac_mc_add_mc()\n");
3985 		edac_mc_free(mci);
3986 		return ret;
3987 	}
3988 
3989 	return 0;
3990 }
3991 
instance_has_memory(struct amd64_pvt * pvt)3992 static bool instance_has_memory(struct amd64_pvt *pvt)
3993 {
3994 	bool cs_enabled = false;
3995 	int cs = 0, dct = 0;
3996 
3997 	for (dct = 0; dct < pvt->max_mcs; dct++) {
3998 		for_each_chip_select(cs, dct, pvt)
3999 			cs_enabled |= csrow_enabled(cs, dct, pvt);
4000 	}
4001 
4002 	return cs_enabled;
4003 }
4004 
probe_one_instance(unsigned int nid)4005 static int probe_one_instance(unsigned int nid)
4006 {
4007 	struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
4008 	struct amd64_pvt *pvt = NULL;
4009 	struct ecc_settings *s;
4010 	int ret;
4011 
4012 	ret = -ENOMEM;
4013 	s = kzalloc(sizeof(struct ecc_settings), GFP_KERNEL);
4014 	if (!s)
4015 		goto err_out;
4016 
4017 	ecc_stngs[nid] = s;
4018 
4019 	pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
4020 	if (!pvt)
4021 		goto err_settings;
4022 
4023 	pvt->mc_node_id	= nid;
4024 	pvt->F3 = F3;
4025 
4026 	ret = per_family_init(pvt);
4027 	if (ret < 0)
4028 		goto err_enable;
4029 
4030 	ret = pvt->ops->hw_info_get(pvt);
4031 	if (ret < 0)
4032 		goto err_enable;
4033 
4034 	ret = 0;
4035 	if (!instance_has_memory(pvt)) {
4036 		amd64_info("Node %d: No DIMMs detected.\n", nid);
4037 		goto err_enable;
4038 	}
4039 
4040 	if (!pvt->ops->ecc_enabled(pvt)) {
4041 		ret = -ENODEV;
4042 
4043 		if (!ecc_enable_override)
4044 			goto err_enable;
4045 
4046 		if (boot_cpu_data.x86 >= 0x17) {
4047 			amd64_warn("Forcing ECC on is not recommended on newer systems. Please enable ECC in BIOS.");
4048 			goto err_enable;
4049 		} else
4050 			amd64_warn("Forcing ECC on!\n");
4051 
4052 		if (!enable_ecc_error_reporting(s, nid, F3))
4053 			goto err_enable;
4054 	}
4055 
4056 	ret = init_one_instance(pvt);
4057 	if (ret < 0) {
4058 		amd64_err("Error probing instance: %d\n", nid);
4059 
4060 		if (boot_cpu_data.x86 < 0x17)
4061 			restore_ecc_error_reporting(s, nid, F3);
4062 
4063 		goto err_enable;
4064 	}
4065 
4066 	amd64_info("%s detected (node %d).\n", pvt->ctl_name, pvt->mc_node_id);
4067 
4068 	/* Display and decode various registers for debug purposes. */
4069 	pvt->ops->dump_misc_regs(pvt);
4070 
4071 	return ret;
4072 
4073 err_enable:
4074 	hw_info_put(pvt);
4075 	kfree(pvt);
4076 
4077 err_settings:
4078 	kfree(s);
4079 	ecc_stngs[nid] = NULL;
4080 
4081 err_out:
4082 	return ret;
4083 }
4084 
remove_one_instance(unsigned int nid)4085 static void remove_one_instance(unsigned int nid)
4086 {
4087 	struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
4088 	struct ecc_settings *s = ecc_stngs[nid];
4089 	struct mem_ctl_info *mci;
4090 	struct amd64_pvt *pvt;
4091 
4092 	/* Remove from EDAC CORE tracking list */
4093 	mci = edac_mc_del_mc(&F3->dev);
4094 	if (!mci)
4095 		return;
4096 
4097 	pvt = mci->pvt_info;
4098 
4099 	restore_ecc_error_reporting(s, nid, F3);
4100 
4101 	kfree(ecc_stngs[nid]);
4102 	ecc_stngs[nid] = NULL;
4103 
4104 	/* Free the EDAC CORE resources */
4105 	mci->pvt_info = NULL;
4106 
4107 	hw_info_put(pvt);
4108 	kfree(pvt);
4109 	edac_mc_free(mci);
4110 }
4111 
setup_pci_device(void)4112 static void setup_pci_device(void)
4113 {
4114 	if (pci_ctl)
4115 		return;
4116 
4117 	pci_ctl = edac_pci_create_generic_ctl(pci_ctl_dev, EDAC_MOD_STR);
4118 	if (!pci_ctl) {
4119 		pr_warn("%s(): Unable to create PCI control\n", __func__);
4120 		pr_warn("%s(): PCI error report via EDAC not set\n", __func__);
4121 	}
4122 }
4123 
4124 static const struct x86_cpu_id amd64_cpuids[] = {
4125 	X86_MATCH_VENDOR_FAM(AMD,	0x0F, NULL),
4126 	X86_MATCH_VENDOR_FAM(AMD,	0x10, NULL),
4127 	X86_MATCH_VENDOR_FAM(AMD,	0x15, NULL),
4128 	X86_MATCH_VENDOR_FAM(AMD,	0x16, NULL),
4129 	X86_MATCH_VENDOR_FAM(AMD,	0x17, NULL),
4130 	X86_MATCH_VENDOR_FAM(HYGON,	0x18, NULL),
4131 	X86_MATCH_VENDOR_FAM(AMD,	0x19, NULL),
4132 	X86_MATCH_VENDOR_FAM(AMD,	0x1A, NULL),
4133 	{ }
4134 };
4135 MODULE_DEVICE_TABLE(x86cpu, amd64_cpuids);
4136 
amd64_edac_init(void)4137 static int __init amd64_edac_init(void)
4138 {
4139 	const char *owner;
4140 	int err = -ENODEV;
4141 	int i;
4142 
4143 	if (ghes_get_devices())
4144 		return -EBUSY;
4145 
4146 	owner = edac_get_owner();
4147 	if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
4148 		return -EBUSY;
4149 
4150 	if (!x86_match_cpu(amd64_cpuids))
4151 		return -ENODEV;
4152 
4153 	if (!amd_nb_num())
4154 		return -ENODEV;
4155 
4156 	opstate_init();
4157 
4158 	err = -ENOMEM;
4159 	ecc_stngs = kcalloc(amd_nb_num(), sizeof(ecc_stngs[0]), GFP_KERNEL);
4160 	if (!ecc_stngs)
4161 		goto err_free;
4162 
4163 	msrs = msrs_alloc();
4164 	if (!msrs)
4165 		goto err_free;
4166 
4167 	for (i = 0; i < amd_nb_num(); i++) {
4168 		err = probe_one_instance(i);
4169 		if (err) {
4170 			/* unwind properly */
4171 			while (--i >= 0)
4172 				remove_one_instance(i);
4173 
4174 			goto err_pci;
4175 		}
4176 	}
4177 
4178 	if (!edac_has_mcs()) {
4179 		err = -ENODEV;
4180 		goto err_pci;
4181 	}
4182 
4183 	/* register stuff with EDAC MCE */
4184 	if (boot_cpu_data.x86 >= 0x17) {
4185 		amd_register_ecc_decoder(decode_umc_error);
4186 	} else {
4187 		amd_register_ecc_decoder(decode_bus_error);
4188 		setup_pci_device();
4189 	}
4190 
4191 #ifdef CONFIG_X86_32
4192 	amd64_err("%s on 32-bit is unsupported. USE AT YOUR OWN RISK!\n", EDAC_MOD_STR);
4193 #endif
4194 
4195 	return 0;
4196 
4197 err_pci:
4198 	pci_ctl_dev = NULL;
4199 
4200 	msrs_free(msrs);
4201 	msrs = NULL;
4202 
4203 err_free:
4204 	kfree(ecc_stngs);
4205 	ecc_stngs = NULL;
4206 
4207 	return err;
4208 }
4209 
amd64_edac_exit(void)4210 static void __exit amd64_edac_exit(void)
4211 {
4212 	int i;
4213 
4214 	if (pci_ctl)
4215 		edac_pci_release_generic_ctl(pci_ctl);
4216 
4217 	/* unregister from EDAC MCE */
4218 	if (boot_cpu_data.x86 >= 0x17)
4219 		amd_unregister_ecc_decoder(decode_umc_error);
4220 	else
4221 		amd_unregister_ecc_decoder(decode_bus_error);
4222 
4223 	for (i = 0; i < amd_nb_num(); i++)
4224 		remove_one_instance(i);
4225 
4226 	kfree(ecc_stngs);
4227 	ecc_stngs = NULL;
4228 
4229 	pci_ctl_dev = NULL;
4230 
4231 	msrs_free(msrs);
4232 	msrs = NULL;
4233 }
4234 
4235 module_init(amd64_edac_init);
4236 module_exit(amd64_edac_exit);
4237 
4238 MODULE_LICENSE("GPL");
4239 MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, Dave Peterson, Thayne Harbaugh; AMD");
4240 MODULE_DESCRIPTION("MC support for AMD64 memory controllers");
4241 
4242 module_param(edac_op_state, int, 0444);
4243 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
4244