1 /*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include <linux/firmware.h>
25 #include <linux/pci.h>
26
27 #include <drm/drm_cache.h>
28
29 #include "amdgpu.h"
30 #include "gmc_v9_0.h"
31 #include "amdgpu_atomfirmware.h"
32 #include "amdgpu_gem.h"
33
34 #include "gc/gc_9_0_sh_mask.h"
35 #include "dce/dce_12_0_offset.h"
36 #include "dce/dce_12_0_sh_mask.h"
37 #include "vega10_enum.h"
38 #include "mmhub/mmhub_1_0_offset.h"
39 #include "athub/athub_1_0_sh_mask.h"
40 #include "athub/athub_1_0_offset.h"
41 #include "oss/osssys_4_0_offset.h"
42
43 #include "soc15.h"
44 #include "soc15d.h"
45 #include "soc15_common.h"
46 #include "umc/umc_6_0_sh_mask.h"
47
48 #include "gfxhub_v1_0.h"
49 #include "mmhub_v1_0.h"
50 #include "athub_v1_0.h"
51 #include "gfxhub_v1_1.h"
52 #include "gfxhub_v1_2.h"
53 #include "mmhub_v9_4.h"
54 #include "mmhub_v1_7.h"
55 #include "mmhub_v1_8.h"
56 #include "umc_v6_1.h"
57 #include "umc_v6_0.h"
58 #include "umc_v6_7.h"
59 #include "umc_v12_0.h"
60 #include "hdp_v4_0.h"
61 #include "mca_v3_0.h"
62
63 #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
64
65 #include "amdgpu_ras.h"
66 #include "amdgpu_xgmi.h"
67
68 /* add these here since we already include dce12 headers and these are for DCN */
69 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x055d
70 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
71 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT 0x0
72 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10
73 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK 0x00003FFFL
74 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
75 #define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0 0x049d
76 #define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0_BASE_IDX 2
77
78 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2 0x05ea
79 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2_BASE_IDX 2
80
81 #define MAX_MEM_RANGES 8
82
83 static const char * const gfxhub_client_ids[] = {
84 "CB",
85 "DB",
86 "IA",
87 "WD",
88 "CPF",
89 "CPC",
90 "CPG",
91 "RLC",
92 "TCP",
93 "SQC (inst)",
94 "SQC (data)",
95 "SQG",
96 "PA",
97 };
98
99 static const char *mmhub_client_ids_raven[][2] = {
100 [0][0] = "MP1",
101 [1][0] = "MP0",
102 [2][0] = "VCN",
103 [3][0] = "VCNU",
104 [4][0] = "HDP",
105 [5][0] = "DCE",
106 [13][0] = "UTCL2",
107 [19][0] = "TLS",
108 [26][0] = "OSS",
109 [27][0] = "SDMA0",
110 [0][1] = "MP1",
111 [1][1] = "MP0",
112 [2][1] = "VCN",
113 [3][1] = "VCNU",
114 [4][1] = "HDP",
115 [5][1] = "XDP",
116 [6][1] = "DBGU0",
117 [7][1] = "DCE",
118 [8][1] = "DCEDWB0",
119 [9][1] = "DCEDWB1",
120 [26][1] = "OSS",
121 [27][1] = "SDMA0",
122 };
123
124 static const char *mmhub_client_ids_renoir[][2] = {
125 [0][0] = "MP1",
126 [1][0] = "MP0",
127 [2][0] = "HDP",
128 [4][0] = "DCEDMC",
129 [5][0] = "DCEVGA",
130 [13][0] = "UTCL2",
131 [19][0] = "TLS",
132 [26][0] = "OSS",
133 [27][0] = "SDMA0",
134 [28][0] = "VCN",
135 [29][0] = "VCNU",
136 [30][0] = "JPEG",
137 [0][1] = "MP1",
138 [1][1] = "MP0",
139 [2][1] = "HDP",
140 [3][1] = "XDP",
141 [6][1] = "DBGU0",
142 [7][1] = "DCEDMC",
143 [8][1] = "DCEVGA",
144 [9][1] = "DCEDWB",
145 [26][1] = "OSS",
146 [27][1] = "SDMA0",
147 [28][1] = "VCN",
148 [29][1] = "VCNU",
149 [30][1] = "JPEG",
150 };
151
152 static const char *mmhub_client_ids_vega10[][2] = {
153 [0][0] = "MP0",
154 [1][0] = "UVD",
155 [2][0] = "UVDU",
156 [3][0] = "HDP",
157 [13][0] = "UTCL2",
158 [14][0] = "OSS",
159 [15][0] = "SDMA1",
160 [32+0][0] = "VCE0",
161 [32+1][0] = "VCE0U",
162 [32+2][0] = "XDMA",
163 [32+3][0] = "DCE",
164 [32+4][0] = "MP1",
165 [32+14][0] = "SDMA0",
166 [0][1] = "MP0",
167 [1][1] = "UVD",
168 [2][1] = "UVDU",
169 [3][1] = "DBGU0",
170 [4][1] = "HDP",
171 [5][1] = "XDP",
172 [14][1] = "OSS",
173 [15][1] = "SDMA0",
174 [32+0][1] = "VCE0",
175 [32+1][1] = "VCE0U",
176 [32+2][1] = "XDMA",
177 [32+3][1] = "DCE",
178 [32+4][1] = "DCEDWB",
179 [32+5][1] = "MP1",
180 [32+6][1] = "DBGU1",
181 [32+14][1] = "SDMA1",
182 };
183
184 static const char *mmhub_client_ids_vega12[][2] = {
185 [0][0] = "MP0",
186 [1][0] = "VCE0",
187 [2][0] = "VCE0U",
188 [3][0] = "HDP",
189 [13][0] = "UTCL2",
190 [14][0] = "OSS",
191 [15][0] = "SDMA1",
192 [32+0][0] = "DCE",
193 [32+1][0] = "XDMA",
194 [32+2][0] = "UVD",
195 [32+3][0] = "UVDU",
196 [32+4][0] = "MP1",
197 [32+15][0] = "SDMA0",
198 [0][1] = "MP0",
199 [1][1] = "VCE0",
200 [2][1] = "VCE0U",
201 [3][1] = "DBGU0",
202 [4][1] = "HDP",
203 [5][1] = "XDP",
204 [14][1] = "OSS",
205 [15][1] = "SDMA0",
206 [32+0][1] = "DCE",
207 [32+1][1] = "DCEDWB",
208 [32+2][1] = "XDMA",
209 [32+3][1] = "UVD",
210 [32+4][1] = "UVDU",
211 [32+5][1] = "MP1",
212 [32+6][1] = "DBGU1",
213 [32+15][1] = "SDMA1",
214 };
215
216 static const char *mmhub_client_ids_vega20[][2] = {
217 [0][0] = "XDMA",
218 [1][0] = "DCE",
219 [2][0] = "VCE0",
220 [3][0] = "VCE0U",
221 [4][0] = "UVD",
222 [5][0] = "UVD1U",
223 [13][0] = "OSS",
224 [14][0] = "HDP",
225 [15][0] = "SDMA0",
226 [32+0][0] = "UVD",
227 [32+1][0] = "UVDU",
228 [32+2][0] = "MP1",
229 [32+3][0] = "MP0",
230 [32+12][0] = "UTCL2",
231 [32+14][0] = "SDMA1",
232 [0][1] = "XDMA",
233 [1][1] = "DCE",
234 [2][1] = "DCEDWB",
235 [3][1] = "VCE0",
236 [4][1] = "VCE0U",
237 [5][1] = "UVD1",
238 [6][1] = "UVD1U",
239 [7][1] = "DBGU0",
240 [8][1] = "XDP",
241 [13][1] = "OSS",
242 [14][1] = "HDP",
243 [15][1] = "SDMA0",
244 [32+0][1] = "UVD",
245 [32+1][1] = "UVDU",
246 [32+2][1] = "DBGU1",
247 [32+3][1] = "MP1",
248 [32+4][1] = "MP0",
249 [32+14][1] = "SDMA1",
250 };
251
252 static const char *mmhub_client_ids_arcturus[][2] = {
253 [0][0] = "DBGU1",
254 [1][0] = "XDP",
255 [2][0] = "MP1",
256 [14][0] = "HDP",
257 [171][0] = "JPEG",
258 [172][0] = "VCN",
259 [173][0] = "VCNU",
260 [203][0] = "JPEG1",
261 [204][0] = "VCN1",
262 [205][0] = "VCN1U",
263 [256][0] = "SDMA0",
264 [257][0] = "SDMA1",
265 [258][0] = "SDMA2",
266 [259][0] = "SDMA3",
267 [260][0] = "SDMA4",
268 [261][0] = "SDMA5",
269 [262][0] = "SDMA6",
270 [263][0] = "SDMA7",
271 [384][0] = "OSS",
272 [0][1] = "DBGU1",
273 [1][1] = "XDP",
274 [2][1] = "MP1",
275 [14][1] = "HDP",
276 [171][1] = "JPEG",
277 [172][1] = "VCN",
278 [173][1] = "VCNU",
279 [203][1] = "JPEG1",
280 [204][1] = "VCN1",
281 [205][1] = "VCN1U",
282 [256][1] = "SDMA0",
283 [257][1] = "SDMA1",
284 [258][1] = "SDMA2",
285 [259][1] = "SDMA3",
286 [260][1] = "SDMA4",
287 [261][1] = "SDMA5",
288 [262][1] = "SDMA6",
289 [263][1] = "SDMA7",
290 [384][1] = "OSS",
291 };
292
293 static const char *mmhub_client_ids_aldebaran[][2] = {
294 [2][0] = "MP1",
295 [3][0] = "MP0",
296 [32+1][0] = "DBGU_IO0",
297 [32+2][0] = "DBGU_IO2",
298 [32+4][0] = "MPIO",
299 [96+11][0] = "JPEG0",
300 [96+12][0] = "VCN0",
301 [96+13][0] = "VCNU0",
302 [128+11][0] = "JPEG1",
303 [128+12][0] = "VCN1",
304 [128+13][0] = "VCNU1",
305 [160+1][0] = "XDP",
306 [160+14][0] = "HDP",
307 [256+0][0] = "SDMA0",
308 [256+1][0] = "SDMA1",
309 [256+2][0] = "SDMA2",
310 [256+3][0] = "SDMA3",
311 [256+4][0] = "SDMA4",
312 [384+0][0] = "OSS",
313 [2][1] = "MP1",
314 [3][1] = "MP0",
315 [32+1][1] = "DBGU_IO0",
316 [32+2][1] = "DBGU_IO2",
317 [32+4][1] = "MPIO",
318 [96+11][1] = "JPEG0",
319 [96+12][1] = "VCN0",
320 [96+13][1] = "VCNU0",
321 [128+11][1] = "JPEG1",
322 [128+12][1] = "VCN1",
323 [128+13][1] = "VCNU1",
324 [160+1][1] = "XDP",
325 [160+14][1] = "HDP",
326 [256+0][1] = "SDMA0",
327 [256+1][1] = "SDMA1",
328 [256+2][1] = "SDMA2",
329 [256+3][1] = "SDMA3",
330 [256+4][1] = "SDMA4",
331 [384+0][1] = "OSS",
332 };
333
334 static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] = {
335 SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa),
336 SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0, 0x00000030, 0x55555565)
337 };
338
339 static const struct soc15_reg_golden golden_settings_athub_1_0_0[] = {
340 SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL, 0x0000ff00, 0x00000800),
341 SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008)
342 };
343
344 static const uint32_t ecc_umc_mcumc_ctrl_addrs[] = {
345 (0x000143c0 + 0x00000000),
346 (0x000143c0 + 0x00000800),
347 (0x000143c0 + 0x00001000),
348 (0x000143c0 + 0x00001800),
349 (0x000543c0 + 0x00000000),
350 (0x000543c0 + 0x00000800),
351 (0x000543c0 + 0x00001000),
352 (0x000543c0 + 0x00001800),
353 (0x000943c0 + 0x00000000),
354 (0x000943c0 + 0x00000800),
355 (0x000943c0 + 0x00001000),
356 (0x000943c0 + 0x00001800),
357 (0x000d43c0 + 0x00000000),
358 (0x000d43c0 + 0x00000800),
359 (0x000d43c0 + 0x00001000),
360 (0x000d43c0 + 0x00001800),
361 (0x001143c0 + 0x00000000),
362 (0x001143c0 + 0x00000800),
363 (0x001143c0 + 0x00001000),
364 (0x001143c0 + 0x00001800),
365 (0x001543c0 + 0x00000000),
366 (0x001543c0 + 0x00000800),
367 (0x001543c0 + 0x00001000),
368 (0x001543c0 + 0x00001800),
369 (0x001943c0 + 0x00000000),
370 (0x001943c0 + 0x00000800),
371 (0x001943c0 + 0x00001000),
372 (0x001943c0 + 0x00001800),
373 (0x001d43c0 + 0x00000000),
374 (0x001d43c0 + 0x00000800),
375 (0x001d43c0 + 0x00001000),
376 (0x001d43c0 + 0x00001800),
377 };
378
379 static const uint32_t ecc_umc_mcumc_ctrl_mask_addrs[] = {
380 (0x000143e0 + 0x00000000),
381 (0x000143e0 + 0x00000800),
382 (0x000143e0 + 0x00001000),
383 (0x000143e0 + 0x00001800),
384 (0x000543e0 + 0x00000000),
385 (0x000543e0 + 0x00000800),
386 (0x000543e0 + 0x00001000),
387 (0x000543e0 + 0x00001800),
388 (0x000943e0 + 0x00000000),
389 (0x000943e0 + 0x00000800),
390 (0x000943e0 + 0x00001000),
391 (0x000943e0 + 0x00001800),
392 (0x000d43e0 + 0x00000000),
393 (0x000d43e0 + 0x00000800),
394 (0x000d43e0 + 0x00001000),
395 (0x000d43e0 + 0x00001800),
396 (0x001143e0 + 0x00000000),
397 (0x001143e0 + 0x00000800),
398 (0x001143e0 + 0x00001000),
399 (0x001143e0 + 0x00001800),
400 (0x001543e0 + 0x00000000),
401 (0x001543e0 + 0x00000800),
402 (0x001543e0 + 0x00001000),
403 (0x001543e0 + 0x00001800),
404 (0x001943e0 + 0x00000000),
405 (0x001943e0 + 0x00000800),
406 (0x001943e0 + 0x00001000),
407 (0x001943e0 + 0x00001800),
408 (0x001d43e0 + 0x00000000),
409 (0x001d43e0 + 0x00000800),
410 (0x001d43e0 + 0x00001000),
411 (0x001d43e0 + 0x00001800),
412 };
413
gmc_v9_0_ecc_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned int type,enum amdgpu_interrupt_state state)414 static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev,
415 struct amdgpu_irq_src *src,
416 unsigned int type,
417 enum amdgpu_interrupt_state state)
418 {
419 u32 bits, i, tmp, reg;
420
421 /* Devices newer then VEGA10/12 shall have these programming
422 * sequences performed by PSP BL
423 */
424 if (adev->asic_type >= CHIP_VEGA20)
425 return 0;
426
427 bits = 0x7f;
428
429 switch (state) {
430 case AMDGPU_IRQ_STATE_DISABLE:
431 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
432 reg = ecc_umc_mcumc_ctrl_addrs[i];
433 tmp = RREG32(reg);
434 tmp &= ~bits;
435 WREG32(reg, tmp);
436 }
437 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
438 reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
439 tmp = RREG32(reg);
440 tmp &= ~bits;
441 WREG32(reg, tmp);
442 }
443 break;
444 case AMDGPU_IRQ_STATE_ENABLE:
445 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
446 reg = ecc_umc_mcumc_ctrl_addrs[i];
447 tmp = RREG32(reg);
448 tmp |= bits;
449 WREG32(reg, tmp);
450 }
451 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
452 reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
453 tmp = RREG32(reg);
454 tmp |= bits;
455 WREG32(reg, tmp);
456 }
457 break;
458 default:
459 break;
460 }
461
462 return 0;
463 }
464
gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned int type,enum amdgpu_interrupt_state state)465 static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
466 struct amdgpu_irq_src *src,
467 unsigned int type,
468 enum amdgpu_interrupt_state state)
469 {
470 struct amdgpu_vmhub *hub;
471 u32 tmp, reg, bits, i, j;
472
473 bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
474 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
475 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
476 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
477 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
478 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
479 VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
480
481 switch (state) {
482 case AMDGPU_IRQ_STATE_DISABLE:
483 for_each_set_bit(j, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) {
484 hub = &adev->vmhub[j];
485 for (i = 0; i < 16; i++) {
486 reg = hub->vm_context0_cntl + i;
487
488 /* This works because this interrupt is only
489 * enabled at init/resume and disabled in
490 * fini/suspend, so the overall state doesn't
491 * change over the course of suspend/resume.
492 */
493 if (adev->in_s0ix && (j == AMDGPU_GFXHUB(0)))
494 continue;
495
496 if (j >= AMDGPU_MMHUB0(0))
497 tmp = RREG32_SOC15_IP(MMHUB, reg);
498 else
499 tmp = RREG32_XCC(reg, j);
500
501 tmp &= ~bits;
502
503 if (j >= AMDGPU_MMHUB0(0))
504 WREG32_SOC15_IP(MMHUB, reg, tmp);
505 else
506 WREG32_XCC(reg, tmp, j);
507 }
508 }
509 break;
510 case AMDGPU_IRQ_STATE_ENABLE:
511 for_each_set_bit(j, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) {
512 hub = &adev->vmhub[j];
513 for (i = 0; i < 16; i++) {
514 reg = hub->vm_context0_cntl + i;
515
516 /* This works because this interrupt is only
517 * enabled at init/resume and disabled in
518 * fini/suspend, so the overall state doesn't
519 * change over the course of suspend/resume.
520 */
521 if (adev->in_s0ix && (j == AMDGPU_GFXHUB(0)))
522 continue;
523
524 if (j >= AMDGPU_MMHUB0(0))
525 tmp = RREG32_SOC15_IP(MMHUB, reg);
526 else
527 tmp = RREG32_XCC(reg, j);
528
529 tmp |= bits;
530
531 if (j >= AMDGPU_MMHUB0(0))
532 WREG32_SOC15_IP(MMHUB, reg, tmp);
533 else
534 WREG32_XCC(reg, tmp, j);
535 }
536 }
537 break;
538 default:
539 break;
540 }
541
542 return 0;
543 }
544
gmc_v9_0_process_interrupt(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)545 static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
546 struct amdgpu_irq_src *source,
547 struct amdgpu_iv_entry *entry)
548 {
549 bool retry_fault = !!(entry->src_data[1] & 0x80);
550 bool write_fault = !!(entry->src_data[1] & 0x20);
551 uint32_t status = 0, cid = 0, rw = 0, fed = 0;
552 struct amdgpu_task_info *task_info;
553 struct amdgpu_vmhub *hub;
554 const char *mmhub_cid;
555 const char *hub_name;
556 unsigned int vmhub;
557 u64 addr;
558 uint32_t cam_index = 0;
559 int ret, xcc_id = 0;
560 uint32_t node_id;
561
562 node_id = entry->node_id;
563
564 addr = (u64)entry->src_data[0] << 12;
565 addr |= ((u64)entry->src_data[1] & 0xf) << 44;
566
567 if (entry->client_id == SOC15_IH_CLIENTID_VMC) {
568 hub_name = "mmhub0";
569 vmhub = AMDGPU_MMHUB0(node_id / 4);
570 } else if (entry->client_id == SOC15_IH_CLIENTID_VMC1) {
571 hub_name = "mmhub1";
572 vmhub = AMDGPU_MMHUB1(0);
573 } else {
574 hub_name = "gfxhub0";
575 if (adev->gfx.funcs->ih_node_to_logical_xcc) {
576 xcc_id = adev->gfx.funcs->ih_node_to_logical_xcc(adev,
577 node_id);
578 if (xcc_id < 0)
579 xcc_id = 0;
580 }
581 vmhub = xcc_id;
582 }
583 hub = &adev->vmhub[vmhub];
584
585 if (retry_fault) {
586 if (adev->irq.retry_cam_enabled) {
587 /* Delegate it to a different ring if the hardware hasn't
588 * already done it.
589 */
590 if (entry->ih == &adev->irq.ih) {
591 amdgpu_irq_delegate(adev, entry, 8);
592 return 1;
593 }
594
595 cam_index = entry->src_data[2] & 0x3ff;
596
597 ret = amdgpu_vm_handle_fault(adev, entry->pasid, entry->vmid, node_id,
598 addr, entry->timestamp, write_fault);
599 WDOORBELL32(adev->irq.retry_cam_doorbell_index, cam_index);
600 if (ret)
601 return 1;
602 } else {
603 /* Process it onyl if it's the first fault for this address */
604 if (entry->ih != &adev->irq.ih_soft &&
605 amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid,
606 entry->timestamp))
607 return 1;
608
609 /* Delegate it to a different ring if the hardware hasn't
610 * already done it.
611 */
612 if (entry->ih == &adev->irq.ih) {
613 amdgpu_irq_delegate(adev, entry, 8);
614 return 1;
615 }
616
617 /* Try to handle the recoverable page faults by filling page
618 * tables
619 */
620 if (amdgpu_vm_handle_fault(adev, entry->pasid, entry->vmid, node_id,
621 addr, entry->timestamp, write_fault))
622 return 1;
623 }
624 }
625
626 if (!printk_ratelimit())
627 return 0;
628
629 dev_err(adev->dev,
630 "[%s] %s page fault (src_id:%u ring:%u vmid:%u pasid:%u)\n", hub_name,
631 retry_fault ? "retry" : "no-retry",
632 entry->src_id, entry->ring_id, entry->vmid, entry->pasid);
633
634 task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid);
635 if (task_info) {
636 dev_err(adev->dev,
637 " for process %s pid %d thread %s pid %d)\n",
638 task_info->process_name, task_info->tgid,
639 task_info->task_name, task_info->pid);
640 amdgpu_vm_put_task_info(task_info);
641 }
642
643 dev_err(adev->dev, " in page starting at address 0x%016llx from IH client 0x%x (%s)\n",
644 addr, entry->client_id,
645 soc15_ih_clientid_name[entry->client_id]);
646
647 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
648 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4))
649 dev_err(adev->dev, " cookie node_id %d fault from die %s%d%s\n",
650 node_id, node_id % 4 == 3 ? "RSV" : "AID", node_id / 4,
651 node_id % 4 == 1 ? ".XCD0" : node_id % 4 == 2 ? ".XCD1" : "");
652
653 if (amdgpu_sriov_vf(adev))
654 return 0;
655
656 /*
657 * Issue a dummy read to wait for the status register to
658 * be updated to avoid reading an incorrect value due to
659 * the new fast GRBM interface.
660 */
661 if ((entry->vmid_src == AMDGPU_GFXHUB(0)) &&
662 (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 4, 2)))
663 RREG32(hub->vm_l2_pro_fault_status);
664
665 status = RREG32(hub->vm_l2_pro_fault_status);
666 cid = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, CID);
667 rw = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, RW);
668 fed = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, FED);
669
670 /* for fed error, kfd will handle it, return directly */
671 if (fed && amdgpu_ras_is_poison_mode_supported(adev) &&
672 (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 4, 2)))
673 return 0;
674
675 if (!amdgpu_sriov_vf(adev))
676 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
677
678 amdgpu_vm_update_fault_cache(adev, entry->pasid, addr, status, vmhub);
679
680 dev_err(adev->dev,
681 "VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
682 status);
683 if (entry->vmid_src == AMDGPU_GFXHUB(0)) {
684 dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
685 cid >= ARRAY_SIZE(gfxhub_client_ids) ? "unknown" :
686 gfxhub_client_ids[cid],
687 cid);
688 } else {
689 switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
690 case IP_VERSION(9, 0, 0):
691 mmhub_cid = mmhub_client_ids_vega10[cid][rw];
692 break;
693 case IP_VERSION(9, 3, 0):
694 mmhub_cid = mmhub_client_ids_vega12[cid][rw];
695 break;
696 case IP_VERSION(9, 4, 0):
697 mmhub_cid = mmhub_client_ids_vega20[cid][rw];
698 break;
699 case IP_VERSION(9, 4, 1):
700 mmhub_cid = mmhub_client_ids_arcturus[cid][rw];
701 break;
702 case IP_VERSION(9, 1, 0):
703 case IP_VERSION(9, 2, 0):
704 mmhub_cid = mmhub_client_ids_raven[cid][rw];
705 break;
706 case IP_VERSION(1, 5, 0):
707 case IP_VERSION(2, 4, 0):
708 mmhub_cid = mmhub_client_ids_renoir[cid][rw];
709 break;
710 case IP_VERSION(1, 8, 0):
711 case IP_VERSION(9, 4, 2):
712 mmhub_cid = mmhub_client_ids_aldebaran[cid][rw];
713 break;
714 default:
715 mmhub_cid = NULL;
716 break;
717 }
718 dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
719 mmhub_cid ? mmhub_cid : "unknown", cid);
720 }
721 dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
722 REG_GET_FIELD(status,
723 VM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
724 dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
725 REG_GET_FIELD(status,
726 VM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
727 dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
728 REG_GET_FIELD(status,
729 VM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
730 dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
731 REG_GET_FIELD(status,
732 VM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
733 dev_err(adev->dev, "\t RW: 0x%x\n", rw);
734 return 0;
735 }
736
737 static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
738 .set = gmc_v9_0_vm_fault_interrupt_state,
739 .process = gmc_v9_0_process_interrupt,
740 };
741
742
743 static const struct amdgpu_irq_src_funcs gmc_v9_0_ecc_funcs = {
744 .set = gmc_v9_0_ecc_interrupt_state,
745 .process = amdgpu_umc_process_ecc_irq,
746 };
747
gmc_v9_0_set_irq_funcs(struct amdgpu_device * adev)748 static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
749 {
750 adev->gmc.vm_fault.num_types = 1;
751 adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
752
753 if (!amdgpu_sriov_vf(adev) &&
754 !adev->gmc.xgmi.connected_to_cpu &&
755 !adev->gmc.is_app_apu) {
756 adev->gmc.ecc_irq.num_types = 1;
757 adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs;
758 }
759 }
760
gmc_v9_0_get_invalidate_req(unsigned int vmid,uint32_t flush_type)761 static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
762 uint32_t flush_type)
763 {
764 u32 req = 0;
765
766 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
767 PER_VMID_INVALIDATE_REQ, 1 << vmid);
768 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
769 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
770 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
771 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
772 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
773 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
774 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
775 CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
776
777 return req;
778 }
779
780 /**
781 * gmc_v9_0_use_invalidate_semaphore - judge whether to use semaphore
782 *
783 * @adev: amdgpu_device pointer
784 * @vmhub: vmhub type
785 *
786 */
gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device * adev,uint32_t vmhub)787 static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev,
788 uint32_t vmhub)
789 {
790 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) ||
791 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
792 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4))
793 return false;
794
795 return ((vmhub == AMDGPU_MMHUB0(0) ||
796 vmhub == AMDGPU_MMHUB1(0)) &&
797 (!amdgpu_sriov_vf(adev)) &&
798 (!(!(adev->apu_flags & AMD_APU_IS_RAVEN2) &&
799 (adev->apu_flags & AMD_APU_IS_PICASSO))));
800 }
801
gmc_v9_0_get_atc_vmid_pasid_mapping_info(struct amdgpu_device * adev,uint8_t vmid,uint16_t * p_pasid)802 static bool gmc_v9_0_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
803 uint8_t vmid, uint16_t *p_pasid)
804 {
805 uint32_t value;
806
807 value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
808 + vmid);
809 *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
810
811 return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
812 }
813
814 /*
815 * GART
816 * VMID 0 is the physical GPU addresses as used by the kernel.
817 * VMIDs 1-15 are used for userspace clients and are handled
818 * by the amdgpu vm/hsa code.
819 */
820
821 /**
822 * gmc_v9_0_flush_gpu_tlb - tlb flush with certain type
823 *
824 * @adev: amdgpu_device pointer
825 * @vmid: vm instance to flush
826 * @vmhub: which hub to flush
827 * @flush_type: the flush type
828 *
829 * Flush the TLB for the requested page table using certain type.
830 */
gmc_v9_0_flush_gpu_tlb(struct amdgpu_device * adev,uint32_t vmid,uint32_t vmhub,uint32_t flush_type)831 static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
832 uint32_t vmhub, uint32_t flush_type)
833 {
834 bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub);
835 u32 j, inv_req, tmp, sem, req, ack, inst;
836 const unsigned int eng = 17;
837 struct amdgpu_vmhub *hub;
838
839 BUG_ON(vmhub >= AMDGPU_MAX_VMHUBS);
840
841 hub = &adev->vmhub[vmhub];
842 inv_req = gmc_v9_0_get_invalidate_req(vmid, flush_type);
843 sem = hub->vm_inv_eng0_sem + hub->eng_distance * eng;
844 req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
845 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
846
847 if (vmhub >= AMDGPU_MMHUB0(0))
848 inst = 0;
849 else
850 inst = vmhub;
851
852 /* This is necessary for SRIOV as well as for GFXOFF to function
853 * properly under bare metal
854 */
855 if (adev->gfx.kiq[inst].ring.sched.ready &&
856 (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) {
857 uint32_t req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
858 uint32_t ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
859
860 amdgpu_gmc_fw_reg_write_reg_wait(adev, req, ack, inv_req,
861 1 << vmid, inst);
862 return;
863 }
864
865 /* This path is needed before KIQ/MES/GFXOFF are set up */
866 spin_lock(&adev->gmc.invalidate_lock);
867
868 /*
869 * It may lose gpuvm invalidate acknowldege state across power-gating
870 * off cycle, add semaphore acquire before invalidation and semaphore
871 * release after invalidation to avoid entering power gated state
872 * to WA the Issue
873 */
874
875 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
876 if (use_semaphore) {
877 for (j = 0; j < adev->usec_timeout; j++) {
878 /* a read return value of 1 means semaphore acquire */
879 if (vmhub >= AMDGPU_MMHUB0(0))
880 tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, sem, GET_INST(GC, inst));
881 else
882 tmp = RREG32_SOC15_IP_NO_KIQ(GC, sem, GET_INST(GC, inst));
883 if (tmp & 0x1)
884 break;
885 udelay(1);
886 }
887
888 if (j >= adev->usec_timeout)
889 DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
890 }
891
892 if (vmhub >= AMDGPU_MMHUB0(0))
893 WREG32_SOC15_IP_NO_KIQ(MMHUB, req, inv_req, GET_INST(GC, inst));
894 else
895 WREG32_SOC15_IP_NO_KIQ(GC, req, inv_req, GET_INST(GC, inst));
896
897 /*
898 * Issue a dummy read to wait for the ACK register to
899 * be cleared to avoid a false ACK due to the new fast
900 * GRBM interface.
901 */
902 if ((vmhub == AMDGPU_GFXHUB(0)) &&
903 (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 4, 2)))
904 RREG32_NO_KIQ(req);
905
906 for (j = 0; j < adev->usec_timeout; j++) {
907 if (vmhub >= AMDGPU_MMHUB0(0))
908 tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, ack, GET_INST(GC, inst));
909 else
910 tmp = RREG32_SOC15_IP_NO_KIQ(GC, ack, GET_INST(GC, inst));
911 if (tmp & (1 << vmid))
912 break;
913 udelay(1);
914 }
915
916 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
917 if (use_semaphore) {
918 /*
919 * add semaphore release after invalidation,
920 * write with 0 means semaphore release
921 */
922 if (vmhub >= AMDGPU_MMHUB0(0))
923 WREG32_SOC15_IP_NO_KIQ(MMHUB, sem, 0, GET_INST(GC, inst));
924 else
925 WREG32_SOC15_IP_NO_KIQ(GC, sem, 0, GET_INST(GC, inst));
926 }
927
928 spin_unlock(&adev->gmc.invalidate_lock);
929
930 if (j < adev->usec_timeout)
931 return;
932
933 DRM_ERROR("Timeout waiting for VM flush ACK!\n");
934 }
935
936 /**
937 * gmc_v9_0_flush_gpu_tlb_pasid - tlb flush via pasid
938 *
939 * @adev: amdgpu_device pointer
940 * @pasid: pasid to be flush
941 * @flush_type: the flush type
942 * @all_hub: flush all hubs
943 * @inst: is used to select which instance of KIQ to use for the invalidation
944 *
945 * Flush the TLB for the requested pasid.
946 */
gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device * adev,uint16_t pasid,uint32_t flush_type,bool all_hub,uint32_t inst)947 static void gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
948 uint16_t pasid, uint32_t flush_type,
949 bool all_hub, uint32_t inst)
950 {
951 uint16_t queried;
952 int i, vmid;
953
954 for (vmid = 1; vmid < 16; vmid++) {
955 bool valid;
956
957 valid = gmc_v9_0_get_atc_vmid_pasid_mapping_info(adev, vmid,
958 &queried);
959 if (!valid || queried != pasid)
960 continue;
961
962 if (all_hub) {
963 for_each_set_bit(i, adev->vmhubs_mask,
964 AMDGPU_MAX_VMHUBS)
965 gmc_v9_0_flush_gpu_tlb(adev, vmid, i,
966 flush_type);
967 } else {
968 gmc_v9_0_flush_gpu_tlb(adev, vmid,
969 AMDGPU_GFXHUB(0),
970 flush_type);
971 }
972 }
973 }
974
gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring * ring,unsigned int vmid,uint64_t pd_addr)975 static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
976 unsigned int vmid, uint64_t pd_addr)
977 {
978 bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(ring->adev, ring->vm_hub);
979 struct amdgpu_device *adev = ring->adev;
980 struct amdgpu_vmhub *hub = &adev->vmhub[ring->vm_hub];
981 uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0);
982 unsigned int eng = ring->vm_inv_eng;
983
984 /*
985 * It may lose gpuvm invalidate acknowldege state across power-gating
986 * off cycle, add semaphore acquire before invalidation and semaphore
987 * release after invalidation to avoid entering power gated state
988 * to WA the Issue
989 */
990
991 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
992 if (use_semaphore)
993 /* a read return value of 1 means semaphore acuqire */
994 amdgpu_ring_emit_reg_wait(ring,
995 hub->vm_inv_eng0_sem +
996 hub->eng_distance * eng, 0x1, 0x1);
997
998 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
999 (hub->ctx_addr_distance * vmid),
1000 lower_32_bits(pd_addr));
1001
1002 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
1003 (hub->ctx_addr_distance * vmid),
1004 upper_32_bits(pd_addr));
1005
1006 amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req +
1007 hub->eng_distance * eng,
1008 hub->vm_inv_eng0_ack +
1009 hub->eng_distance * eng,
1010 req, 1 << vmid);
1011
1012 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
1013 if (use_semaphore)
1014 /*
1015 * add semaphore release after invalidation,
1016 * write with 0 means semaphore release
1017 */
1018 amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem +
1019 hub->eng_distance * eng, 0);
1020
1021 return pd_addr;
1022 }
1023
gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring * ring,unsigned int vmid,unsigned int pasid)1024 static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int vmid,
1025 unsigned int pasid)
1026 {
1027 struct amdgpu_device *adev = ring->adev;
1028 uint32_t reg;
1029
1030 /* Do nothing because there's no lut register for mmhub1. */
1031 if (ring->vm_hub == AMDGPU_MMHUB1(0))
1032 return;
1033
1034 if (ring->vm_hub == AMDGPU_GFXHUB(0))
1035 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
1036 else
1037 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
1038
1039 amdgpu_ring_emit_wreg(ring, reg, pasid);
1040 }
1041
1042 /*
1043 * PTE format on VEGA 10:
1044 * 63:59 reserved
1045 * 58:57 mtype
1046 * 56 F
1047 * 55 L
1048 * 54 P
1049 * 53 SW
1050 * 52 T
1051 * 50:48 reserved
1052 * 47:12 4k physical page base address
1053 * 11:7 fragment
1054 * 6 write
1055 * 5 read
1056 * 4 exe
1057 * 3 Z
1058 * 2 snooped
1059 * 1 system
1060 * 0 valid
1061 *
1062 * PDE format on VEGA 10:
1063 * 63:59 block fragment size
1064 * 58:55 reserved
1065 * 54 P
1066 * 53:48 reserved
1067 * 47:6 physical base address of PD or PTE
1068 * 5:3 reserved
1069 * 2 C
1070 * 1 system
1071 * 0 valid
1072 */
1073
gmc_v9_0_map_mtype(struct amdgpu_device * adev,uint32_t flags)1074 static uint64_t gmc_v9_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
1075
1076 {
1077 switch (flags) {
1078 case AMDGPU_VM_MTYPE_DEFAULT:
1079 return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_NC);
1080 case AMDGPU_VM_MTYPE_NC:
1081 return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_NC);
1082 case AMDGPU_VM_MTYPE_WC:
1083 return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_WC);
1084 case AMDGPU_VM_MTYPE_RW:
1085 return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_RW);
1086 case AMDGPU_VM_MTYPE_CC:
1087 return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_CC);
1088 case AMDGPU_VM_MTYPE_UC:
1089 return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_UC);
1090 default:
1091 return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_NC);
1092 }
1093 }
1094
gmc_v9_0_get_vm_pde(struct amdgpu_device * adev,int level,uint64_t * addr,uint64_t * flags)1095 static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
1096 uint64_t *addr, uint64_t *flags)
1097 {
1098 if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
1099 *addr = amdgpu_gmc_vram_mc2pa(adev, *addr);
1100 BUG_ON(*addr & 0xFFFF00000000003FULL);
1101
1102 if (!adev->gmc.translate_further)
1103 return;
1104
1105 if (level == AMDGPU_VM_PDB1) {
1106 /* Set the block fragment size */
1107 if (!(*flags & AMDGPU_PDE_PTE))
1108 *flags |= AMDGPU_PDE_BFS(0x9);
1109
1110 } else if (level == AMDGPU_VM_PDB0) {
1111 if (*flags & AMDGPU_PDE_PTE) {
1112 *flags &= ~AMDGPU_PDE_PTE;
1113 if (!(*flags & AMDGPU_PTE_VALID))
1114 *addr |= 1 << PAGE_SHIFT;
1115 } else {
1116 *flags |= AMDGPU_PTE_TF;
1117 }
1118 }
1119 }
1120
gmc_v9_0_get_coherence_flags(struct amdgpu_device * adev,struct amdgpu_bo * bo,struct amdgpu_bo_va_mapping * mapping,uint64_t * flags)1121 static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
1122 struct amdgpu_bo *bo,
1123 struct amdgpu_bo_va_mapping *mapping,
1124 uint64_t *flags)
1125 {
1126 struct amdgpu_device *bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
1127 bool is_vram = bo->tbo.resource &&
1128 bo->tbo.resource->mem_type == TTM_PL_VRAM;
1129 bool coherent = bo->flags & (AMDGPU_GEM_CREATE_COHERENT |
1130 AMDGPU_GEM_CREATE_EXT_COHERENT);
1131 bool ext_coherent = bo->flags & AMDGPU_GEM_CREATE_EXT_COHERENT;
1132 bool uncached = bo->flags & AMDGPU_GEM_CREATE_UNCACHED;
1133 struct amdgpu_vm *vm = mapping->bo_va->base.vm;
1134 unsigned int mtype_local, mtype;
1135 bool snoop = false;
1136 bool is_local;
1137
1138 dma_resv_assert_held(bo->tbo.base.resv);
1139
1140 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1141 case IP_VERSION(9, 4, 1):
1142 case IP_VERSION(9, 4, 2):
1143 if (is_vram) {
1144 if (bo_adev == adev) {
1145 if (uncached)
1146 mtype = MTYPE_UC;
1147 else if (coherent)
1148 mtype = MTYPE_CC;
1149 else
1150 mtype = MTYPE_RW;
1151 /* FIXME: is this still needed? Or does
1152 * amdgpu_ttm_tt_pde_flags already handle this?
1153 */
1154 if ((amdgpu_ip_version(adev, GC_HWIP, 0) ==
1155 IP_VERSION(9, 4, 2) ||
1156 amdgpu_ip_version(adev, GC_HWIP, 0) ==
1157 IP_VERSION(9, 4, 3)) &&
1158 adev->gmc.xgmi.connected_to_cpu)
1159 snoop = true;
1160 } else {
1161 if (uncached || coherent)
1162 mtype = MTYPE_UC;
1163 else
1164 mtype = MTYPE_NC;
1165 if (mapping->bo_va->is_xgmi)
1166 snoop = true;
1167 }
1168 } else {
1169 if (uncached || coherent)
1170 mtype = MTYPE_UC;
1171 else
1172 mtype = MTYPE_NC;
1173 /* FIXME: is this still needed? Or does
1174 * amdgpu_ttm_tt_pde_flags already handle this?
1175 */
1176 snoop = true;
1177 }
1178 break;
1179 case IP_VERSION(9, 4, 3):
1180 case IP_VERSION(9, 4, 4):
1181 /* Only local VRAM BOs or system memory on non-NUMA APUs
1182 * can be assumed to be local in their entirety. Choose
1183 * MTYPE_NC as safe fallback for all system memory BOs on
1184 * NUMA systems. Their MTYPE can be overridden per-page in
1185 * gmc_v9_0_override_vm_pte_flags.
1186 */
1187 mtype_local = MTYPE_RW;
1188 if (amdgpu_mtype_local == 1) {
1189 DRM_INFO_ONCE("Using MTYPE_NC for local memory\n");
1190 mtype_local = MTYPE_NC;
1191 } else if (amdgpu_mtype_local == 2) {
1192 DRM_INFO_ONCE("Using MTYPE_CC for local memory\n");
1193 mtype_local = MTYPE_CC;
1194 } else {
1195 DRM_INFO_ONCE("Using MTYPE_RW for local memory\n");
1196 }
1197 is_local = (!is_vram && (adev->flags & AMD_IS_APU) &&
1198 num_possible_nodes() <= 1) ||
1199 (is_vram && adev == bo_adev &&
1200 KFD_XCP_MEM_ID(adev, bo->xcp_id) == vm->mem_id);
1201 snoop = true;
1202 if (uncached) {
1203 mtype = MTYPE_UC;
1204 } else if (ext_coherent) {
1205 if (adev->rev_id)
1206 mtype = is_local ? MTYPE_CC : MTYPE_UC;
1207 else
1208 mtype = MTYPE_UC;
1209 } else if (adev->flags & AMD_IS_APU) {
1210 mtype = is_local ? mtype_local : MTYPE_NC;
1211 } else {
1212 /* dGPU */
1213 if (is_local)
1214 mtype = mtype_local;
1215 else if (is_vram)
1216 mtype = MTYPE_NC;
1217 else
1218 mtype = MTYPE_UC;
1219 }
1220
1221 break;
1222 default:
1223 if (uncached || coherent)
1224 mtype = MTYPE_UC;
1225 else
1226 mtype = MTYPE_NC;
1227
1228 /* FIXME: is this still needed? Or does
1229 * amdgpu_ttm_tt_pde_flags already handle this?
1230 */
1231 if (!is_vram)
1232 snoop = true;
1233 }
1234
1235 if (mtype != MTYPE_NC)
1236 *flags = AMDGPU_PTE_MTYPE_VG10(*flags, mtype);
1237
1238 *flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
1239 }
1240
gmc_v9_0_get_vm_pte(struct amdgpu_device * adev,struct amdgpu_bo_va_mapping * mapping,uint64_t * flags)1241 static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
1242 struct amdgpu_bo_va_mapping *mapping,
1243 uint64_t *flags)
1244 {
1245 struct amdgpu_bo *bo = mapping->bo_va->base.bo;
1246
1247 *flags &= ~AMDGPU_PTE_EXECUTABLE;
1248 *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
1249
1250 *flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
1251 *flags |= mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK;
1252
1253 if (mapping->flags & AMDGPU_PTE_PRT) {
1254 *flags |= AMDGPU_PTE_PRT;
1255 *flags &= ~AMDGPU_PTE_VALID;
1256 }
1257
1258 if ((*flags & AMDGPU_PTE_VALID) && bo)
1259 gmc_v9_0_get_coherence_flags(adev, bo, mapping, flags);
1260 }
1261
gmc_v9_0_override_vm_pte_flags(struct amdgpu_device * adev,struct amdgpu_vm * vm,uint64_t addr,uint64_t * flags)1262 static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev,
1263 struct amdgpu_vm *vm,
1264 uint64_t addr, uint64_t *flags)
1265 {
1266 int local_node, nid;
1267
1268 /* Only GFX 9.4.3 APUs associate GPUs with NUMA nodes. Local system
1269 * memory can use more efficient MTYPEs.
1270 */
1271 if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 3) &&
1272 amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 4))
1273 return;
1274
1275 /* Only direct-mapped memory allows us to determine the NUMA node from
1276 * the DMA address.
1277 */
1278 if (!adev->ram_is_direct_mapped) {
1279 dev_dbg_ratelimited(adev->dev, "RAM is not direct mapped\n");
1280 return;
1281 }
1282
1283 /* MTYPE_NC is the same default and can be overridden.
1284 * MTYPE_UC will be present if the memory is extended-coherent
1285 * and can also be overridden.
1286 */
1287 if ((*flags & AMDGPU_PTE_MTYPE_VG10_MASK) !=
1288 AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_NC) &&
1289 (*flags & AMDGPU_PTE_MTYPE_VG10_MASK) !=
1290 AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_UC)) {
1291 dev_dbg_ratelimited(adev->dev, "MTYPE is not NC or UC\n");
1292 return;
1293 }
1294
1295 /* FIXME: Only supported on native mode for now. For carve-out, the
1296 * NUMA affinity of the GPU/VM needs to come from the PCI info because
1297 * memory partitions are not associated with different NUMA nodes.
1298 */
1299 if (adev->gmc.is_app_apu && vm->mem_id >= 0) {
1300 local_node = adev->gmc.mem_partitions[vm->mem_id].numa.node;
1301 } else {
1302 dev_dbg_ratelimited(adev->dev, "Only native mode APU is supported.\n");
1303 return;
1304 }
1305
1306 /* Only handle real RAM. Mappings of PCIe resources don't have struct
1307 * page or NUMA nodes.
1308 */
1309 if (!page_is_ram(addr >> PAGE_SHIFT)) {
1310 dev_dbg_ratelimited(adev->dev, "Page is not RAM.\n");
1311 return;
1312 }
1313 nid = pfn_to_nid(addr >> PAGE_SHIFT);
1314 dev_dbg_ratelimited(adev->dev, "vm->mem_id=%d, local_node=%d, nid=%d\n",
1315 vm->mem_id, local_node, nid);
1316 if (nid == local_node) {
1317 uint64_t old_flags = *flags;
1318 if ((*flags & AMDGPU_PTE_MTYPE_VG10_MASK) ==
1319 AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_NC)) {
1320 unsigned int mtype_local = MTYPE_RW;
1321
1322 if (amdgpu_mtype_local == 1)
1323 mtype_local = MTYPE_NC;
1324 else if (amdgpu_mtype_local == 2)
1325 mtype_local = MTYPE_CC;
1326
1327 *flags = AMDGPU_PTE_MTYPE_VG10(*flags, mtype_local);
1328 } else if (adev->rev_id) {
1329 /* MTYPE_UC case */
1330 *flags = AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_CC);
1331 }
1332
1333 dev_dbg_ratelimited(adev->dev, "flags updated from %llx to %llx\n",
1334 old_flags, *flags);
1335 }
1336 }
1337
gmc_v9_0_get_vbios_fb_size(struct amdgpu_device * adev)1338 static unsigned int gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
1339 {
1340 u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
1341 unsigned int size;
1342
1343 /* TODO move to DC so GMC doesn't need to hard-code DCN registers */
1344
1345 if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
1346 size = AMDGPU_VBIOS_VGA_ALLOCATION;
1347 } else {
1348 u32 viewport;
1349
1350 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
1351 case IP_VERSION(1, 0, 0):
1352 case IP_VERSION(1, 0, 1):
1353 viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
1354 size = (REG_GET_FIELD(viewport,
1355 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
1356 REG_GET_FIELD(viewport,
1357 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
1358 4);
1359 break;
1360 case IP_VERSION(2, 1, 0):
1361 viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2);
1362 size = (REG_GET_FIELD(viewport,
1363 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
1364 REG_GET_FIELD(viewport,
1365 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
1366 4);
1367 break;
1368 default:
1369 viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE);
1370 size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
1371 REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) *
1372 4);
1373 break;
1374 }
1375 }
1376
1377 return size;
1378 }
1379
1380 static enum amdgpu_memory_partition
gmc_v9_0_get_memory_partition(struct amdgpu_device * adev,u32 * supp_modes)1381 gmc_v9_0_get_memory_partition(struct amdgpu_device *adev, u32 *supp_modes)
1382 {
1383 enum amdgpu_memory_partition mode = UNKNOWN_MEMORY_PARTITION_MODE;
1384
1385 if (adev->nbio.funcs->get_memory_partition_mode)
1386 mode = adev->nbio.funcs->get_memory_partition_mode(adev,
1387 supp_modes);
1388
1389 return mode;
1390 }
1391
1392 static enum amdgpu_memory_partition
gmc_v9_0_query_memory_partition(struct amdgpu_device * adev)1393 gmc_v9_0_query_memory_partition(struct amdgpu_device *adev)
1394 {
1395 if (amdgpu_sriov_vf(adev))
1396 return AMDGPU_NPS1_PARTITION_MODE;
1397
1398 return gmc_v9_0_get_memory_partition(adev, NULL);
1399 }
1400
1401 static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
1402 .flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
1403 .flush_gpu_tlb_pasid = gmc_v9_0_flush_gpu_tlb_pasid,
1404 .emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
1405 .emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
1406 .map_mtype = gmc_v9_0_map_mtype,
1407 .get_vm_pde = gmc_v9_0_get_vm_pde,
1408 .get_vm_pte = gmc_v9_0_get_vm_pte,
1409 .override_vm_pte_flags = gmc_v9_0_override_vm_pte_flags,
1410 .get_vbios_fb_size = gmc_v9_0_get_vbios_fb_size,
1411 .query_mem_partition_mode = &gmc_v9_0_query_memory_partition,
1412 };
1413
gmc_v9_0_set_gmc_funcs(struct amdgpu_device * adev)1414 static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
1415 {
1416 adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
1417 }
1418
gmc_v9_0_set_umc_funcs(struct amdgpu_device * adev)1419 static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
1420 {
1421 switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) {
1422 case IP_VERSION(6, 0, 0):
1423 adev->umc.funcs = &umc_v6_0_funcs;
1424 break;
1425 case IP_VERSION(6, 1, 1):
1426 adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
1427 adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
1428 adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
1429 adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_VG20;
1430 adev->umc.retire_unit = 1;
1431 adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
1432 adev->umc.ras = &umc_v6_1_ras;
1433 break;
1434 case IP_VERSION(6, 1, 2):
1435 adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
1436 adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
1437 adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
1438 adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_ARCT;
1439 adev->umc.retire_unit = 1;
1440 adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
1441 adev->umc.ras = &umc_v6_1_ras;
1442 break;
1443 case IP_VERSION(6, 7, 0):
1444 adev->umc.max_ras_err_cnt_per_query =
1445 UMC_V6_7_TOTAL_CHANNEL_NUM * UMC_V6_7_BAD_PAGE_NUM_PER_CHANNEL;
1446 adev->umc.channel_inst_num = UMC_V6_7_CHANNEL_INSTANCE_NUM;
1447 adev->umc.umc_inst_num = UMC_V6_7_UMC_INSTANCE_NUM;
1448 adev->umc.channel_offs = UMC_V6_7_PER_CHANNEL_OFFSET;
1449 adev->umc.retire_unit = (UMC_V6_7_NA_MAP_PA_NUM * 2);
1450 if (!adev->gmc.xgmi.connected_to_cpu)
1451 adev->umc.ras = &umc_v6_7_ras;
1452 if (1 & adev->smuio.funcs->get_die_id(adev))
1453 adev->umc.channel_idx_tbl = &umc_v6_7_channel_idx_tbl_first[0][0];
1454 else
1455 adev->umc.channel_idx_tbl = &umc_v6_7_channel_idx_tbl_second[0][0];
1456 break;
1457 case IP_VERSION(12, 0, 0):
1458 adev->umc.max_ras_err_cnt_per_query =
1459 UMC_V12_0_TOTAL_CHANNEL_NUM(adev) * UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL;
1460 adev->umc.channel_inst_num = UMC_V12_0_CHANNEL_INSTANCE_NUM;
1461 adev->umc.umc_inst_num = UMC_V12_0_UMC_INSTANCE_NUM;
1462 adev->umc.node_inst_num /= UMC_V12_0_UMC_INSTANCE_NUM;
1463 adev->umc.channel_offs = UMC_V12_0_PER_CHANNEL_OFFSET;
1464 adev->umc.active_mask = adev->aid_mask;
1465 adev->umc.retire_unit = UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL;
1466 if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu)
1467 adev->umc.ras = &umc_v12_0_ras;
1468 break;
1469 default:
1470 break;
1471 }
1472 }
1473
gmc_v9_0_set_mmhub_funcs(struct amdgpu_device * adev)1474 static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
1475 {
1476 switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
1477 case IP_VERSION(9, 4, 1):
1478 adev->mmhub.funcs = &mmhub_v9_4_funcs;
1479 break;
1480 case IP_VERSION(9, 4, 2):
1481 adev->mmhub.funcs = &mmhub_v1_7_funcs;
1482 break;
1483 case IP_VERSION(1, 8, 0):
1484 adev->mmhub.funcs = &mmhub_v1_8_funcs;
1485 break;
1486 default:
1487 adev->mmhub.funcs = &mmhub_v1_0_funcs;
1488 break;
1489 }
1490 }
1491
gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device * adev)1492 static void gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device *adev)
1493 {
1494 switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
1495 case IP_VERSION(9, 4, 0):
1496 adev->mmhub.ras = &mmhub_v1_0_ras;
1497 break;
1498 case IP_VERSION(9, 4, 1):
1499 adev->mmhub.ras = &mmhub_v9_4_ras;
1500 break;
1501 case IP_VERSION(9, 4, 2):
1502 adev->mmhub.ras = &mmhub_v1_7_ras;
1503 break;
1504 case IP_VERSION(1, 8, 0):
1505 adev->mmhub.ras = &mmhub_v1_8_ras;
1506 break;
1507 default:
1508 /* mmhub ras is not available */
1509 break;
1510 }
1511 }
1512
gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device * adev)1513 static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev)
1514 {
1515 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
1516 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4))
1517 adev->gfxhub.funcs = &gfxhub_v1_2_funcs;
1518 else
1519 adev->gfxhub.funcs = &gfxhub_v1_0_funcs;
1520 }
1521
gmc_v9_0_set_hdp_ras_funcs(struct amdgpu_device * adev)1522 static void gmc_v9_0_set_hdp_ras_funcs(struct amdgpu_device *adev)
1523 {
1524 adev->hdp.ras = &hdp_v4_0_ras;
1525 }
1526
gmc_v9_0_set_mca_ras_funcs(struct amdgpu_device * adev)1527 static void gmc_v9_0_set_mca_ras_funcs(struct amdgpu_device *adev)
1528 {
1529 struct amdgpu_mca *mca = &adev->mca;
1530
1531 /* is UMC the right IP to check for MCA? Maybe DF? */
1532 switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) {
1533 case IP_VERSION(6, 7, 0):
1534 if (!adev->gmc.xgmi.connected_to_cpu) {
1535 mca->mp0.ras = &mca_v3_0_mp0_ras;
1536 mca->mp1.ras = &mca_v3_0_mp1_ras;
1537 mca->mpio.ras = &mca_v3_0_mpio_ras;
1538 }
1539 break;
1540 default:
1541 break;
1542 }
1543 }
1544
gmc_v9_0_set_xgmi_ras_funcs(struct amdgpu_device * adev)1545 static void gmc_v9_0_set_xgmi_ras_funcs(struct amdgpu_device *adev)
1546 {
1547 if (!adev->gmc.xgmi.connected_to_cpu)
1548 adev->gmc.xgmi.ras = &xgmi_ras;
1549 }
1550
gmc_v9_0_early_init(void * handle)1551 static int gmc_v9_0_early_init(void *handle)
1552 {
1553 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1554
1555 /*
1556 * 9.4.0, 9.4.1 and 9.4.3 don't have XGMI defined
1557 * in their IP discovery tables
1558 */
1559 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0) ||
1560 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1) ||
1561 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
1562 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4))
1563 adev->gmc.xgmi.supported = true;
1564
1565 if (amdgpu_ip_version(adev, XGMI_HWIP, 0) == IP_VERSION(6, 1, 0)) {
1566 adev->gmc.xgmi.supported = true;
1567 adev->gmc.xgmi.connected_to_cpu =
1568 adev->smuio.funcs->is_host_gpu_xgmi_supported(adev);
1569 }
1570
1571 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
1572 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)) {
1573 enum amdgpu_pkg_type pkg_type =
1574 adev->smuio.funcs->get_pkg_type(adev);
1575 /* On GFXIP 9.4.3. APU, there is no physical VRAM domain present
1576 * and the APU, can be in used two possible modes:
1577 * - carveout mode
1578 * - native APU mode
1579 * "is_app_apu" can be used to identify the APU in the native
1580 * mode.
1581 */
1582 adev->gmc.is_app_apu = (pkg_type == AMDGPU_PKG_TYPE_APU &&
1583 !pci_resource_len(adev->pdev, 0));
1584 }
1585
1586 gmc_v9_0_set_gmc_funcs(adev);
1587 gmc_v9_0_set_irq_funcs(adev);
1588 gmc_v9_0_set_umc_funcs(adev);
1589 gmc_v9_0_set_mmhub_funcs(adev);
1590 gmc_v9_0_set_mmhub_ras_funcs(adev);
1591 gmc_v9_0_set_gfxhub_funcs(adev);
1592 gmc_v9_0_set_hdp_ras_funcs(adev);
1593 gmc_v9_0_set_mca_ras_funcs(adev);
1594 gmc_v9_0_set_xgmi_ras_funcs(adev);
1595
1596 adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
1597 adev->gmc.shared_aperture_end =
1598 adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
1599 adev->gmc.private_aperture_start = 0x1000000000000000ULL;
1600 adev->gmc.private_aperture_end =
1601 adev->gmc.private_aperture_start + (4ULL << 30) - 1;
1602 adev->gmc.noretry_flags = AMDGPU_VM_NORETRY_FLAGS_TF;
1603
1604 return 0;
1605 }
1606
gmc_v9_0_late_init(void * handle)1607 static int gmc_v9_0_late_init(void *handle)
1608 {
1609 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1610 int r;
1611
1612 r = amdgpu_gmc_allocate_vm_inv_eng(adev);
1613 if (r)
1614 return r;
1615
1616 /*
1617 * Workaround performance drop issue with VBIOS enables partial
1618 * writes, while disables HBM ECC for vega10.
1619 */
1620 if (!amdgpu_sriov_vf(adev) &&
1621 (amdgpu_ip_version(adev, UMC_HWIP, 0) == IP_VERSION(6, 0, 0))) {
1622 if (!(adev->ras_enabled & (1 << AMDGPU_RAS_BLOCK__UMC))) {
1623 if (adev->df.funcs &&
1624 adev->df.funcs->enable_ecc_force_par_wr_rmw)
1625 adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false);
1626 }
1627 }
1628
1629 if (!amdgpu_persistent_edc_harvesting_supported(adev)) {
1630 amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__MMHUB);
1631 amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__HDP);
1632 }
1633
1634 r = amdgpu_gmc_ras_late_init(adev);
1635 if (r)
1636 return r;
1637
1638 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
1639 }
1640
gmc_v9_0_vram_gtt_location(struct amdgpu_device * adev,struct amdgpu_gmc * mc)1641 static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
1642 struct amdgpu_gmc *mc)
1643 {
1644 u64 base = adev->mmhub.funcs->get_fb_location(adev);
1645
1646 amdgpu_gmc_set_agp_default(adev, mc);
1647
1648 /* add the xgmi offset of the physical node */
1649 base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
1650 if (adev->gmc.xgmi.connected_to_cpu) {
1651 amdgpu_gmc_sysvm_location(adev, mc);
1652 } else {
1653 amdgpu_gmc_vram_location(adev, mc, base);
1654 amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_BEST_FIT);
1655 if (!amdgpu_sriov_vf(adev) && (amdgpu_agp == 1))
1656 amdgpu_gmc_agp_location(adev, mc);
1657 }
1658 /* base offset of vram pages */
1659 adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev);
1660
1661 /* XXX: add the xgmi offset of the physical node? */
1662 adev->vm_manager.vram_base_offset +=
1663 adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
1664 }
1665
1666 /**
1667 * gmc_v9_0_mc_init - initialize the memory controller driver params
1668 *
1669 * @adev: amdgpu_device pointer
1670 *
1671 * Look up the amount of vram, vram width, and decide how to place
1672 * vram and gart within the GPU's physical address space.
1673 * Returns 0 for success.
1674 */
gmc_v9_0_mc_init(struct amdgpu_device * adev)1675 static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
1676 {
1677 int r;
1678
1679 /* size in MB on si */
1680 if (!adev->gmc.is_app_apu) {
1681 adev->gmc.mc_vram_size =
1682 adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
1683 } else {
1684 DRM_DEBUG("Set mc_vram_size = 0 for APP APU\n");
1685 adev->gmc.mc_vram_size = 0;
1686 }
1687 adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
1688
1689 if (!(adev->flags & AMD_IS_APU) &&
1690 !adev->gmc.xgmi.connected_to_cpu) {
1691 r = amdgpu_device_resize_fb_bar(adev);
1692 if (r)
1693 return r;
1694 }
1695 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
1696 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
1697
1698 #ifdef CONFIG_X86_64
1699 /*
1700 * AMD Accelerated Processing Platform (APP) supporting GPU-HOST xgmi
1701 * interface can use VRAM through here as it appears system reserved
1702 * memory in host address space.
1703 *
1704 * For APUs, VRAM is just the stolen system memory and can be accessed
1705 * directly.
1706 *
1707 * Otherwise, use the legacy Host Data Path (HDP) through PCIe BAR.
1708 */
1709
1710 /* check whether both host-gpu and gpu-gpu xgmi links exist */
1711 if ((!amdgpu_sriov_vf(adev) &&
1712 (adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) ||
1713 (adev->gmc.xgmi.supported &&
1714 adev->gmc.xgmi.connected_to_cpu)) {
1715 adev->gmc.aper_base =
1716 adev->gfxhub.funcs->get_mc_fb_offset(adev) +
1717 adev->gmc.xgmi.physical_node_id *
1718 adev->gmc.xgmi.node_segment_size;
1719 adev->gmc.aper_size = adev->gmc.real_vram_size;
1720 }
1721
1722 #endif
1723 adev->gmc.visible_vram_size = adev->gmc.aper_size;
1724
1725 /* set the gart size */
1726 if (amdgpu_gart_size == -1) {
1727 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1728 case IP_VERSION(9, 0, 1): /* all engines support GPUVM */
1729 case IP_VERSION(9, 2, 1): /* all engines support GPUVM */
1730 case IP_VERSION(9, 4, 0):
1731 case IP_VERSION(9, 4, 1):
1732 case IP_VERSION(9, 4, 2):
1733 case IP_VERSION(9, 4, 3):
1734 case IP_VERSION(9, 4, 4):
1735 default:
1736 adev->gmc.gart_size = 512ULL << 20;
1737 break;
1738 case IP_VERSION(9, 1, 0): /* DCE SG support */
1739 case IP_VERSION(9, 2, 2): /* DCE SG support */
1740 case IP_VERSION(9, 3, 0):
1741 adev->gmc.gart_size = 1024ULL << 20;
1742 break;
1743 }
1744 } else {
1745 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
1746 }
1747
1748 adev->gmc.gart_size += adev->pm.smu_prv_buffer_size;
1749
1750 gmc_v9_0_vram_gtt_location(adev, &adev->gmc);
1751
1752 return 0;
1753 }
1754
gmc_v9_0_gart_init(struct amdgpu_device * adev)1755 static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
1756 {
1757 int r;
1758
1759 if (adev->gart.bo) {
1760 WARN(1, "VEGA10 PCIE GART already initialized\n");
1761 return 0;
1762 }
1763
1764 if (adev->gmc.xgmi.connected_to_cpu) {
1765 adev->gmc.vmid0_page_table_depth = 1;
1766 adev->gmc.vmid0_page_table_block_size = 12;
1767 } else {
1768 adev->gmc.vmid0_page_table_depth = 0;
1769 adev->gmc.vmid0_page_table_block_size = 0;
1770 }
1771
1772 /* Initialize common gart structure */
1773 r = amdgpu_gart_init(adev);
1774 if (r)
1775 return r;
1776 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
1777 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_UC) |
1778 AMDGPU_PTE_EXECUTABLE;
1779
1780 if (!adev->gmc.real_vram_size) {
1781 dev_info(adev->dev, "Put GART in system memory for APU\n");
1782 r = amdgpu_gart_table_ram_alloc(adev);
1783 if (r)
1784 dev_err(adev->dev, "Failed to allocate GART in system memory\n");
1785 } else {
1786 r = amdgpu_gart_table_vram_alloc(adev);
1787 if (r)
1788 return r;
1789
1790 if (adev->gmc.xgmi.connected_to_cpu)
1791 r = amdgpu_gmc_pdb0_alloc(adev);
1792 }
1793
1794 return r;
1795 }
1796
1797 /**
1798 * gmc_v9_0_save_registers - saves regs
1799 *
1800 * @adev: amdgpu_device pointer
1801 *
1802 * This saves potential register values that should be
1803 * restored upon resume
1804 */
gmc_v9_0_save_registers(struct amdgpu_device * adev)1805 static void gmc_v9_0_save_registers(struct amdgpu_device *adev)
1806 {
1807 if ((amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 0)) ||
1808 (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 1)))
1809 adev->gmc.sdpif_register = RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0);
1810 }
1811
gmc_v9_0_validate_partition_info(struct amdgpu_device * adev)1812 static bool gmc_v9_0_validate_partition_info(struct amdgpu_device *adev)
1813 {
1814 enum amdgpu_memory_partition mode;
1815 u32 supp_modes;
1816 bool valid;
1817
1818 mode = gmc_v9_0_get_memory_partition(adev, &supp_modes);
1819
1820 /* Mode detected by hardware not present in supported modes */
1821 if ((mode != UNKNOWN_MEMORY_PARTITION_MODE) &&
1822 !(BIT(mode - 1) & supp_modes))
1823 return false;
1824
1825 switch (mode) {
1826 case UNKNOWN_MEMORY_PARTITION_MODE:
1827 case AMDGPU_NPS1_PARTITION_MODE:
1828 valid = (adev->gmc.num_mem_partitions == 1);
1829 break;
1830 case AMDGPU_NPS2_PARTITION_MODE:
1831 valid = (adev->gmc.num_mem_partitions == 2);
1832 break;
1833 case AMDGPU_NPS4_PARTITION_MODE:
1834 valid = (adev->gmc.num_mem_partitions == 3 ||
1835 adev->gmc.num_mem_partitions == 4);
1836 break;
1837 default:
1838 valid = false;
1839 }
1840
1841 return valid;
1842 }
1843
gmc_v9_0_is_node_present(int * node_ids,int num_ids,int nid)1844 static bool gmc_v9_0_is_node_present(int *node_ids, int num_ids, int nid)
1845 {
1846 int i;
1847
1848 /* Check if node with id 'nid' is present in 'node_ids' array */
1849 for (i = 0; i < num_ids; ++i)
1850 if (node_ids[i] == nid)
1851 return true;
1852
1853 return false;
1854 }
1855
1856 static void
gmc_v9_0_init_acpi_mem_ranges(struct amdgpu_device * adev,struct amdgpu_mem_partition_info * mem_ranges)1857 gmc_v9_0_init_acpi_mem_ranges(struct amdgpu_device *adev,
1858 struct amdgpu_mem_partition_info *mem_ranges)
1859 {
1860 struct amdgpu_numa_info numa_info;
1861 int node_ids[MAX_MEM_RANGES];
1862 int num_ranges = 0, ret;
1863 int num_xcc, xcc_id;
1864 uint32_t xcc_mask;
1865
1866 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1867 xcc_mask = (1U << num_xcc) - 1;
1868
1869 for_each_inst(xcc_id, xcc_mask) {
1870 ret = amdgpu_acpi_get_mem_info(adev, xcc_id, &numa_info);
1871 if (ret)
1872 continue;
1873
1874 if (numa_info.nid == NUMA_NO_NODE) {
1875 mem_ranges[0].size = numa_info.size;
1876 mem_ranges[0].numa.node = numa_info.nid;
1877 num_ranges = 1;
1878 break;
1879 }
1880
1881 if (gmc_v9_0_is_node_present(node_ids, num_ranges,
1882 numa_info.nid))
1883 continue;
1884
1885 node_ids[num_ranges] = numa_info.nid;
1886 mem_ranges[num_ranges].numa.node = numa_info.nid;
1887 mem_ranges[num_ranges].size = numa_info.size;
1888 ++num_ranges;
1889 }
1890
1891 adev->gmc.num_mem_partitions = num_ranges;
1892 }
1893
1894 static void
gmc_v9_0_init_sw_mem_ranges(struct amdgpu_device * adev,struct amdgpu_mem_partition_info * mem_ranges)1895 gmc_v9_0_init_sw_mem_ranges(struct amdgpu_device *adev,
1896 struct amdgpu_mem_partition_info *mem_ranges)
1897 {
1898 enum amdgpu_memory_partition mode;
1899 u32 start_addr = 0, size;
1900 int i, r, l;
1901
1902 mode = gmc_v9_0_query_memory_partition(adev);
1903
1904 switch (mode) {
1905 case UNKNOWN_MEMORY_PARTITION_MODE:
1906 case AMDGPU_NPS1_PARTITION_MODE:
1907 adev->gmc.num_mem_partitions = 1;
1908 break;
1909 case AMDGPU_NPS2_PARTITION_MODE:
1910 adev->gmc.num_mem_partitions = 2;
1911 break;
1912 case AMDGPU_NPS4_PARTITION_MODE:
1913 if (adev->flags & AMD_IS_APU)
1914 adev->gmc.num_mem_partitions = 3;
1915 else
1916 adev->gmc.num_mem_partitions = 4;
1917 break;
1918 default:
1919 adev->gmc.num_mem_partitions = 1;
1920 break;
1921 }
1922
1923 /* Use NPS range info, if populated */
1924 r = amdgpu_gmc_get_nps_memranges(adev, mem_ranges,
1925 adev->gmc.num_mem_partitions);
1926 if (!r) {
1927 l = 0;
1928 for (i = 1; i < adev->gmc.num_mem_partitions; ++i) {
1929 if (mem_ranges[i].range.lpfn >
1930 mem_ranges[i - 1].range.lpfn)
1931 l = i;
1932 }
1933
1934 } else {
1935 /* Fallback to sw based calculation */
1936 size = (adev->gmc.real_vram_size + SZ_16M) >> AMDGPU_GPU_PAGE_SHIFT;
1937 size /= adev->gmc.num_mem_partitions;
1938
1939 for (i = 0; i < adev->gmc.num_mem_partitions; ++i) {
1940 mem_ranges[i].range.fpfn = start_addr;
1941 mem_ranges[i].size =
1942 ((u64)size << AMDGPU_GPU_PAGE_SHIFT);
1943 mem_ranges[i].range.lpfn = start_addr + size - 1;
1944 start_addr += size;
1945 }
1946
1947 l = adev->gmc.num_mem_partitions - 1;
1948 }
1949
1950 /* Adjust the last one */
1951 mem_ranges[l].range.lpfn =
1952 (adev->gmc.real_vram_size >> AMDGPU_GPU_PAGE_SHIFT) - 1;
1953 mem_ranges[l].size =
1954 adev->gmc.real_vram_size -
1955 ((u64)mem_ranges[l].range.fpfn << AMDGPU_GPU_PAGE_SHIFT);
1956 }
1957
gmc_v9_0_init_mem_ranges(struct amdgpu_device * adev)1958 static int gmc_v9_0_init_mem_ranges(struct amdgpu_device *adev)
1959 {
1960 bool valid;
1961
1962 adev->gmc.mem_partitions = kcalloc(MAX_MEM_RANGES,
1963 sizeof(struct amdgpu_mem_partition_info),
1964 GFP_KERNEL);
1965 if (!adev->gmc.mem_partitions)
1966 return -ENOMEM;
1967
1968 /* TODO : Get the range from PSP/Discovery for dGPU */
1969 if (adev->gmc.is_app_apu)
1970 gmc_v9_0_init_acpi_mem_ranges(adev, adev->gmc.mem_partitions);
1971 else
1972 gmc_v9_0_init_sw_mem_ranges(adev, adev->gmc.mem_partitions);
1973
1974 if (amdgpu_sriov_vf(adev))
1975 valid = true;
1976 else
1977 valid = gmc_v9_0_validate_partition_info(adev);
1978 if (!valid) {
1979 /* TODO: handle invalid case */
1980 dev_WARN(adev->dev,
1981 "Mem ranges not matching with hardware config");
1982 }
1983
1984 return 0;
1985 }
1986
gmc_v9_4_3_init_vram_info(struct amdgpu_device * adev)1987 static void gmc_v9_4_3_init_vram_info(struct amdgpu_device *adev)
1988 {
1989 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM;
1990 adev->gmc.vram_width = 128 * 64;
1991 }
1992
gmc_v9_0_sw_init(void * handle)1993 static int gmc_v9_0_sw_init(void *handle)
1994 {
1995 int r, vram_width = 0, vram_type = 0, vram_vendor = 0, dma_addr_bits;
1996 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1997 unsigned long inst_mask = adev->aid_mask;
1998
1999 adev->gfxhub.funcs->init(adev);
2000
2001 adev->mmhub.funcs->init(adev);
2002
2003 spin_lock_init(&adev->gmc.invalidate_lock);
2004
2005 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
2006 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)) {
2007 gmc_v9_4_3_init_vram_info(adev);
2008 } else if (!adev->bios) {
2009 if (adev->flags & AMD_IS_APU) {
2010 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_DDR4;
2011 adev->gmc.vram_width = 64 * 64;
2012 } else {
2013 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM;
2014 adev->gmc.vram_width = 128 * 64;
2015 }
2016 } else {
2017 r = amdgpu_atomfirmware_get_vram_info(adev,
2018 &vram_width, &vram_type, &vram_vendor);
2019 if (amdgpu_sriov_vf(adev))
2020 /* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN,
2021 * and DF related registers is not readable, seems hardcord is the
2022 * only way to set the correct vram_width
2023 */
2024 adev->gmc.vram_width = 2048;
2025 else if (amdgpu_emu_mode != 1)
2026 adev->gmc.vram_width = vram_width;
2027
2028 if (!adev->gmc.vram_width) {
2029 int chansize, numchan;
2030
2031 /* hbm memory channel size */
2032 if (adev->flags & AMD_IS_APU)
2033 chansize = 64;
2034 else
2035 chansize = 128;
2036 if (adev->df.funcs &&
2037 adev->df.funcs->get_hbm_channel_number) {
2038 numchan = adev->df.funcs->get_hbm_channel_number(adev);
2039 adev->gmc.vram_width = numchan * chansize;
2040 }
2041 }
2042
2043 adev->gmc.vram_type = vram_type;
2044 adev->gmc.vram_vendor = vram_vendor;
2045 }
2046 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2047 case IP_VERSION(9, 1, 0):
2048 case IP_VERSION(9, 2, 2):
2049 set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
2050 set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask);
2051
2052 if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
2053 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
2054 } else {
2055 /* vm_size is 128TB + 512GB for legacy 3-level page support */
2056 amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48);
2057 adev->gmc.translate_further =
2058 adev->vm_manager.num_level > 1;
2059 }
2060 break;
2061 case IP_VERSION(9, 0, 1):
2062 case IP_VERSION(9, 2, 1):
2063 case IP_VERSION(9, 4, 0):
2064 case IP_VERSION(9, 3, 0):
2065 case IP_VERSION(9, 4, 2):
2066 set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
2067 set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask);
2068
2069 /*
2070 * To fulfill 4-level page support,
2071 * vm size is 256TB (48bit), maximum size of Vega10,
2072 * block size 512 (9bit)
2073 */
2074
2075 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
2076 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2))
2077 adev->gmc.translate_further = adev->vm_manager.num_level > 1;
2078 break;
2079 case IP_VERSION(9, 4, 1):
2080 set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
2081 set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask);
2082 set_bit(AMDGPU_MMHUB1(0), adev->vmhubs_mask);
2083
2084 /* Keep the vm size same with Vega20 */
2085 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
2086 adev->gmc.translate_further = adev->vm_manager.num_level > 1;
2087 break;
2088 case IP_VERSION(9, 4, 3):
2089 case IP_VERSION(9, 4, 4):
2090 bitmap_set(adev->vmhubs_mask, AMDGPU_GFXHUB(0),
2091 NUM_XCC(adev->gfx.xcc_mask));
2092
2093 inst_mask <<= AMDGPU_MMHUB0(0);
2094 bitmap_or(adev->vmhubs_mask, adev->vmhubs_mask, &inst_mask, 32);
2095
2096 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
2097 adev->gmc.translate_further = adev->vm_manager.num_level > 1;
2098 break;
2099 default:
2100 break;
2101 }
2102
2103 /* This interrupt is VMC page fault.*/
2104 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT,
2105 &adev->gmc.vm_fault);
2106 if (r)
2107 return r;
2108
2109 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1)) {
2110 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC1, VMC_1_0__SRCID__VM_FAULT,
2111 &adev->gmc.vm_fault);
2112 if (r)
2113 return r;
2114 }
2115
2116 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT,
2117 &adev->gmc.vm_fault);
2118
2119 if (r)
2120 return r;
2121
2122 if (!amdgpu_sriov_vf(adev) &&
2123 !adev->gmc.xgmi.connected_to_cpu &&
2124 !adev->gmc.is_app_apu) {
2125 /* interrupt sent to DF. */
2126 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
2127 &adev->gmc.ecc_irq);
2128 if (r)
2129 return r;
2130 }
2131
2132 /* Set the internal MC address mask
2133 * This is the max address of the GPU's
2134 * internal address space.
2135 */
2136 adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
2137
2138 dma_addr_bits = amdgpu_ip_version(adev, GC_HWIP, 0) >=
2139 IP_VERSION(9, 4, 2) ?
2140 48 :
2141 44;
2142 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(dma_addr_bits));
2143 if (r) {
2144 dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n");
2145 return r;
2146 }
2147 adev->need_swiotlb = drm_need_swiotlb(dma_addr_bits);
2148
2149 r = gmc_v9_0_mc_init(adev);
2150 if (r)
2151 return r;
2152
2153 amdgpu_gmc_get_vbios_allocations(adev);
2154
2155 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
2156 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)) {
2157 r = gmc_v9_0_init_mem_ranges(adev);
2158 if (r)
2159 return r;
2160 }
2161
2162 /* Memory manager */
2163 r = amdgpu_bo_init(adev);
2164 if (r)
2165 return r;
2166
2167 r = gmc_v9_0_gart_init(adev);
2168 if (r)
2169 return r;
2170
2171 /*
2172 * number of VMs
2173 * VMID 0 is reserved for System
2174 * amdgpu graphics/compute will use VMIDs 1..n-1
2175 * amdkfd will use VMIDs n..15
2176 *
2177 * The first KFD VMID is 8 for GPUs with graphics, 3 for
2178 * compute-only GPUs. On compute-only GPUs that leaves 2 VMIDs
2179 * for video processing.
2180 */
2181 adev->vm_manager.first_kfd_vmid =
2182 (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1) ||
2183 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) ||
2184 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
2185 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)) ?
2186 3 :
2187 8;
2188
2189 amdgpu_vm_manager_init(adev);
2190
2191 gmc_v9_0_save_registers(adev);
2192
2193 r = amdgpu_gmc_ras_sw_init(adev);
2194 if (r)
2195 return r;
2196
2197 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
2198 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4))
2199 amdgpu_gmc_sysfs_init(adev);
2200
2201 return 0;
2202 }
2203
gmc_v9_0_sw_fini(void * handle)2204 static int gmc_v9_0_sw_fini(void *handle)
2205 {
2206 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2207
2208 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
2209 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4))
2210 amdgpu_gmc_sysfs_fini(adev);
2211
2212 amdgpu_gmc_ras_fini(adev);
2213 amdgpu_gem_force_release(adev);
2214 amdgpu_vm_manager_fini(adev);
2215 if (!adev->gmc.real_vram_size) {
2216 dev_info(adev->dev, "Put GART in system memory for APU free\n");
2217 amdgpu_gart_table_ram_free(adev);
2218 } else {
2219 amdgpu_gart_table_vram_free(adev);
2220 }
2221 amdgpu_bo_free_kernel(&adev->gmc.pdb0_bo, NULL, &adev->gmc.ptr_pdb0);
2222 amdgpu_bo_fini(adev);
2223
2224 adev->gmc.num_mem_partitions = 0;
2225 kfree(adev->gmc.mem_partitions);
2226
2227 return 0;
2228 }
2229
gmc_v9_0_init_golden_registers(struct amdgpu_device * adev)2230 static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
2231 {
2232 switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
2233 case IP_VERSION(9, 0, 0):
2234 if (amdgpu_sriov_vf(adev))
2235 break;
2236 fallthrough;
2237 case IP_VERSION(9, 4, 0):
2238 soc15_program_register_sequence(adev,
2239 golden_settings_mmhub_1_0_0,
2240 ARRAY_SIZE(golden_settings_mmhub_1_0_0));
2241 soc15_program_register_sequence(adev,
2242 golden_settings_athub_1_0_0,
2243 ARRAY_SIZE(golden_settings_athub_1_0_0));
2244 break;
2245 case IP_VERSION(9, 1, 0):
2246 case IP_VERSION(9, 2, 0):
2247 /* TODO for renoir */
2248 soc15_program_register_sequence(adev,
2249 golden_settings_athub_1_0_0,
2250 ARRAY_SIZE(golden_settings_athub_1_0_0));
2251 break;
2252 default:
2253 break;
2254 }
2255 }
2256
2257 /**
2258 * gmc_v9_0_restore_registers - restores regs
2259 *
2260 * @adev: amdgpu_device pointer
2261 *
2262 * This restores register values, saved at suspend.
2263 */
gmc_v9_0_restore_registers(struct amdgpu_device * adev)2264 void gmc_v9_0_restore_registers(struct amdgpu_device *adev)
2265 {
2266 if ((amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 0)) ||
2267 (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 1))) {
2268 WREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0, adev->gmc.sdpif_register);
2269 WARN_ON(adev->gmc.sdpif_register !=
2270 RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0));
2271 }
2272 }
2273
2274 /**
2275 * gmc_v9_0_gart_enable - gart enable
2276 *
2277 * @adev: amdgpu_device pointer
2278 */
gmc_v9_0_gart_enable(struct amdgpu_device * adev)2279 static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
2280 {
2281 int r;
2282
2283 if (adev->gmc.xgmi.connected_to_cpu)
2284 amdgpu_gmc_init_pdb0(adev);
2285
2286 if (adev->gart.bo == NULL) {
2287 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
2288 return -EINVAL;
2289 }
2290
2291 amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
2292
2293 if (!adev->in_s0ix) {
2294 r = adev->gfxhub.funcs->gart_enable(adev);
2295 if (r)
2296 return r;
2297 }
2298
2299 r = adev->mmhub.funcs->gart_enable(adev);
2300 if (r)
2301 return r;
2302
2303 DRM_INFO("PCIE GART of %uM enabled.\n",
2304 (unsigned int)(adev->gmc.gart_size >> 20));
2305 if (adev->gmc.pdb0_bo)
2306 DRM_INFO("PDB0 located at 0x%016llX\n",
2307 (unsigned long long)amdgpu_bo_gpu_offset(adev->gmc.pdb0_bo));
2308 DRM_INFO("PTB located at 0x%016llX\n",
2309 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
2310
2311 return 0;
2312 }
2313
gmc_v9_0_hw_init(void * handle)2314 static int gmc_v9_0_hw_init(void *handle)
2315 {
2316 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2317 bool value;
2318 int i, r;
2319
2320 adev->gmc.flush_pasid_uses_kiq = true;
2321
2322 /* Vega20+XGMI caches PTEs in TC and TLB. Add a heavy-weight TLB flush
2323 * (type 2), which flushes both. Due to a race condition with
2324 * concurrent memory accesses using the same TLB cache line, we still
2325 * need a second TLB flush after this.
2326 */
2327 adev->gmc.flush_tlb_needs_extra_type_2 =
2328 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0) &&
2329 adev->gmc.xgmi.num_physical_nodes;
2330 /*
2331 * TODO: This workaround is badly documented and had a buggy
2332 * implementation. We should probably verify what we do here.
2333 */
2334 adev->gmc.flush_tlb_needs_extra_type_0 =
2335 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) &&
2336 adev->rev_id == 0;
2337
2338 /* The sequence of these two function calls matters.*/
2339 gmc_v9_0_init_golden_registers(adev);
2340
2341 if (adev->mode_info.num_crtc) {
2342 /* Lockout access through VGA aperture*/
2343 WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
2344 /* disable VGA render */
2345 WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
2346 }
2347
2348 if (adev->mmhub.funcs->update_power_gating)
2349 adev->mmhub.funcs->update_power_gating(adev, true);
2350
2351 adev->hdp.funcs->init_registers(adev);
2352
2353 /* After HDP is initialized, flush HDP.*/
2354 adev->hdp.funcs->flush_hdp(adev, NULL);
2355
2356 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
2357 value = false;
2358 else
2359 value = true;
2360
2361 if (!amdgpu_sriov_vf(adev)) {
2362 if (!adev->in_s0ix)
2363 adev->gfxhub.funcs->set_fault_enable_default(adev, value);
2364 adev->mmhub.funcs->set_fault_enable_default(adev, value);
2365 }
2366 for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) {
2367 if (adev->in_s0ix && (i == AMDGPU_GFXHUB(0)))
2368 continue;
2369 gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0);
2370 }
2371
2372 if (adev->umc.funcs && adev->umc.funcs->init_registers)
2373 adev->umc.funcs->init_registers(adev);
2374
2375 r = gmc_v9_0_gart_enable(adev);
2376 if (r)
2377 return r;
2378
2379 if (amdgpu_emu_mode == 1)
2380 return amdgpu_gmc_vram_checking(adev);
2381
2382 return 0;
2383 }
2384
2385 /**
2386 * gmc_v9_0_gart_disable - gart disable
2387 *
2388 * @adev: amdgpu_device pointer
2389 *
2390 * This disables all VM page table.
2391 */
gmc_v9_0_gart_disable(struct amdgpu_device * adev)2392 static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
2393 {
2394 if (!adev->in_s0ix)
2395 adev->gfxhub.funcs->gart_disable(adev);
2396 adev->mmhub.funcs->gart_disable(adev);
2397 }
2398
gmc_v9_0_hw_fini(void * handle)2399 static int gmc_v9_0_hw_fini(void *handle)
2400 {
2401 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2402
2403 gmc_v9_0_gart_disable(adev);
2404
2405 if (amdgpu_sriov_vf(adev)) {
2406 /* full access mode, so don't touch any GMC register */
2407 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
2408 return 0;
2409 }
2410
2411 /*
2412 * Pair the operations did in gmc_v9_0_hw_init and thus maintain
2413 * a correct cached state for GMC. Otherwise, the "gate" again
2414 * operation on S3 resuming will fail due to wrong cached state.
2415 */
2416 if (adev->mmhub.funcs->update_power_gating)
2417 adev->mmhub.funcs->update_power_gating(adev, false);
2418
2419 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
2420
2421 if (adev->gmc.ecc_irq.funcs &&
2422 amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC))
2423 amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
2424
2425 return 0;
2426 }
2427
gmc_v9_0_suspend(void * handle)2428 static int gmc_v9_0_suspend(void *handle)
2429 {
2430 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2431
2432 return gmc_v9_0_hw_fini(adev);
2433 }
2434
gmc_v9_0_resume(void * handle)2435 static int gmc_v9_0_resume(void *handle)
2436 {
2437 int r;
2438 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2439
2440 r = gmc_v9_0_hw_init(adev);
2441 if (r)
2442 return r;
2443
2444 amdgpu_vmid_reset_all(adev);
2445
2446 return 0;
2447 }
2448
gmc_v9_0_is_idle(void * handle)2449 static bool gmc_v9_0_is_idle(void *handle)
2450 {
2451 /* MC is always ready in GMC v9.*/
2452 return true;
2453 }
2454
gmc_v9_0_wait_for_idle(void * handle)2455 static int gmc_v9_0_wait_for_idle(void *handle)
2456 {
2457 /* There is no need to wait for MC idle in GMC v9.*/
2458 return 0;
2459 }
2460
gmc_v9_0_soft_reset(void * handle)2461 static int gmc_v9_0_soft_reset(void *handle)
2462 {
2463 /* XXX for emulation.*/
2464 return 0;
2465 }
2466
gmc_v9_0_set_clockgating_state(void * handle,enum amd_clockgating_state state)2467 static int gmc_v9_0_set_clockgating_state(void *handle,
2468 enum amd_clockgating_state state)
2469 {
2470 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2471
2472 adev->mmhub.funcs->set_clockgating(adev, state);
2473
2474 athub_v1_0_set_clockgating(adev, state);
2475
2476 return 0;
2477 }
2478
gmc_v9_0_get_clockgating_state(void * handle,u64 * flags)2479 static void gmc_v9_0_get_clockgating_state(void *handle, u64 *flags)
2480 {
2481 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2482
2483 adev->mmhub.funcs->get_clockgating(adev, flags);
2484
2485 athub_v1_0_get_clockgating(adev, flags);
2486 }
2487
gmc_v9_0_set_powergating_state(void * handle,enum amd_powergating_state state)2488 static int gmc_v9_0_set_powergating_state(void *handle,
2489 enum amd_powergating_state state)
2490 {
2491 return 0;
2492 }
2493
2494 const struct amd_ip_funcs gmc_v9_0_ip_funcs = {
2495 .name = "gmc_v9_0",
2496 .early_init = gmc_v9_0_early_init,
2497 .late_init = gmc_v9_0_late_init,
2498 .sw_init = gmc_v9_0_sw_init,
2499 .sw_fini = gmc_v9_0_sw_fini,
2500 .hw_init = gmc_v9_0_hw_init,
2501 .hw_fini = gmc_v9_0_hw_fini,
2502 .suspend = gmc_v9_0_suspend,
2503 .resume = gmc_v9_0_resume,
2504 .is_idle = gmc_v9_0_is_idle,
2505 .wait_for_idle = gmc_v9_0_wait_for_idle,
2506 .soft_reset = gmc_v9_0_soft_reset,
2507 .set_clockgating_state = gmc_v9_0_set_clockgating_state,
2508 .set_powergating_state = gmc_v9_0_set_powergating_state,
2509 .get_clockgating_state = gmc_v9_0_get_clockgating_state,
2510 };
2511
2512 const struct amdgpu_ip_block_version gmc_v9_0_ip_block = {
2513 .type = AMD_IP_BLOCK_TYPE_GMC,
2514 .major = 9,
2515 .minor = 0,
2516 .rev = 0,
2517 .funcs = &gmc_v9_0_ip_funcs,
2518 };
2519