1 /*
2 * Copyright 2022 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23 #include "amdgpu.h"
24 #include "soc15.h"
25
26 #include "soc15_common.h"
27 #include "amdgpu_reg_state.h"
28 #include "amdgpu_xcp.h"
29 #include "gfx_v9_4_3.h"
30 #include "gfxhub_v1_2.h"
31 #include "sdma_v4_4_2.h"
32
33 #define XCP_INST_MASK(num_inst, xcp_id) \
34 (num_inst ? GENMASK(num_inst - 1, 0) << (xcp_id * num_inst) : 0)
35
36 #define AMDGPU_XCP_OPS_KFD (1 << 0)
37
aqua_vanjaram_doorbell_index_init(struct amdgpu_device * adev)38 void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev)
39 {
40 int i;
41
42 adev->doorbell_index.kiq = AMDGPU_DOORBELL_LAYOUT1_KIQ_START;
43
44 adev->doorbell_index.mec_ring0 = AMDGPU_DOORBELL_LAYOUT1_MEC_RING_START;
45
46 adev->doorbell_index.userqueue_start = AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_START;
47 adev->doorbell_index.userqueue_end = AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_END;
48 adev->doorbell_index.xcc_doorbell_range = AMDGPU_DOORBELL_LAYOUT1_XCC_RANGE;
49
50 adev->doorbell_index.sdma_doorbell_range = 20;
51 for (i = 0; i < adev->sdma.num_instances; i++)
52 adev->doorbell_index.sdma_engine[i] =
53 AMDGPU_DOORBELL_LAYOUT1_sDMA_ENGINE_START +
54 i * (adev->doorbell_index.sdma_doorbell_range >> 1);
55
56 adev->doorbell_index.ih = AMDGPU_DOORBELL_LAYOUT1_IH;
57 adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_DOORBELL_LAYOUT1_VCN_START;
58
59 adev->doorbell_index.first_non_cp = AMDGPU_DOORBELL_LAYOUT1_FIRST_NON_CP;
60 adev->doorbell_index.last_non_cp = AMDGPU_DOORBELL_LAYOUT1_LAST_NON_CP;
61
62 adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_LAYOUT1_MAX_ASSIGNMENT << 1;
63 }
64
aqua_vanjaram_xcp_vcn_shared(struct amdgpu_device * adev)65 static bool aqua_vanjaram_xcp_vcn_shared(struct amdgpu_device *adev)
66 {
67 return (adev->xcp_mgr->num_xcps > adev->vcn.num_vcn_inst);
68 }
69
aqua_vanjaram_set_xcp_id(struct amdgpu_device * adev,uint32_t inst_idx,struct amdgpu_ring * ring)70 static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev,
71 uint32_t inst_idx, struct amdgpu_ring *ring)
72 {
73 int xcp_id;
74 enum AMDGPU_XCP_IP_BLOCK ip_blk;
75 uint32_t inst_mask;
76
77 ring->xcp_id = AMDGPU_XCP_NO_PARTITION;
78 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
79 adev->gfx.enforce_isolation[0].xcp_id = ring->xcp_id;
80 if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
81 return;
82
83 inst_mask = 1 << inst_idx;
84
85 switch (ring->funcs->type) {
86 case AMDGPU_HW_IP_GFX:
87 case AMDGPU_RING_TYPE_COMPUTE:
88 case AMDGPU_RING_TYPE_KIQ:
89 ip_blk = AMDGPU_XCP_GFX;
90 break;
91 case AMDGPU_RING_TYPE_SDMA:
92 ip_blk = AMDGPU_XCP_SDMA;
93 break;
94 case AMDGPU_RING_TYPE_VCN_ENC:
95 case AMDGPU_RING_TYPE_VCN_JPEG:
96 ip_blk = AMDGPU_XCP_VCN;
97 break;
98 default:
99 DRM_ERROR("Not support ring type %d!", ring->funcs->type);
100 return;
101 }
102
103 for (xcp_id = 0; xcp_id < adev->xcp_mgr->num_xcps; xcp_id++) {
104 if (adev->xcp_mgr->xcp[xcp_id].ip[ip_blk].inst_mask & inst_mask) {
105 ring->xcp_id = xcp_id;
106 dev_dbg(adev->dev, "ring:%s xcp_id :%u", ring->name,
107 ring->xcp_id);
108 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
109 adev->gfx.enforce_isolation[xcp_id].xcp_id = xcp_id;
110 break;
111 }
112 }
113 }
114
aqua_vanjaram_xcp_gpu_sched_update(struct amdgpu_device * adev,struct amdgpu_ring * ring,unsigned int sel_xcp_id)115 static void aqua_vanjaram_xcp_gpu_sched_update(
116 struct amdgpu_device *adev,
117 struct amdgpu_ring *ring,
118 unsigned int sel_xcp_id)
119 {
120 unsigned int *num_gpu_sched;
121
122 num_gpu_sched = &adev->xcp_mgr->xcp[sel_xcp_id]
123 .gpu_sched[ring->funcs->type][ring->hw_prio].num_scheds;
124 adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[ring->funcs->type][ring->hw_prio]
125 .sched[(*num_gpu_sched)++] = &ring->sched;
126 DRM_DEBUG("%s :[%d] gpu_sched[%d][%d] = %d", ring->name,
127 sel_xcp_id, ring->funcs->type,
128 ring->hw_prio, *num_gpu_sched);
129 }
130
aqua_vanjaram_xcp_sched_list_update(struct amdgpu_device * adev)131 static int aqua_vanjaram_xcp_sched_list_update(
132 struct amdgpu_device *adev)
133 {
134 struct amdgpu_ring *ring;
135 int i;
136
137 for (i = 0; i < MAX_XCP; i++) {
138 atomic_set(&adev->xcp_mgr->xcp[i].ref_cnt, 0);
139 memset(adev->xcp_mgr->xcp[i].gpu_sched, 0, sizeof(adev->xcp_mgr->xcp->gpu_sched));
140 }
141
142 if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
143 return 0;
144
145 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
146 ring = adev->rings[i];
147 if (!ring || !ring->sched.ready || ring->no_scheduler)
148 continue;
149
150 aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id);
151
152 /* VCN may be shared by two partitions under CPX MODE in certain
153 * configs.
154 */
155 if ((ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC ||
156 ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) &&
157 aqua_vanjaram_xcp_vcn_shared(adev))
158 aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id + 1);
159 }
160
161 return 0;
162 }
163
aqua_vanjaram_update_partition_sched_list(struct amdgpu_device * adev)164 static int aqua_vanjaram_update_partition_sched_list(struct amdgpu_device *adev)
165 {
166 int i;
167
168 for (i = 0; i < adev->num_rings; i++) {
169 struct amdgpu_ring *ring = adev->rings[i];
170
171 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ||
172 ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
173 aqua_vanjaram_set_xcp_id(adev, ring->xcc_id, ring);
174 else
175 aqua_vanjaram_set_xcp_id(adev, ring->me, ring);
176 }
177
178 return aqua_vanjaram_xcp_sched_list_update(adev);
179 }
180
aqua_vanjaram_select_scheds(struct amdgpu_device * adev,u32 hw_ip,u32 hw_prio,struct amdgpu_fpriv * fpriv,unsigned int * num_scheds,struct drm_gpu_scheduler *** scheds)181 static int aqua_vanjaram_select_scheds(
182 struct amdgpu_device *adev,
183 u32 hw_ip,
184 u32 hw_prio,
185 struct amdgpu_fpriv *fpriv,
186 unsigned int *num_scheds,
187 struct drm_gpu_scheduler ***scheds)
188 {
189 u32 sel_xcp_id;
190 int i;
191
192 if (fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION) {
193 u32 least_ref_cnt = ~0;
194
195 fpriv->xcp_id = 0;
196 for (i = 0; i < adev->xcp_mgr->num_xcps; i++) {
197 u32 total_ref_cnt;
198
199 total_ref_cnt = atomic_read(&adev->xcp_mgr->xcp[i].ref_cnt);
200 if (total_ref_cnt < least_ref_cnt) {
201 fpriv->xcp_id = i;
202 least_ref_cnt = total_ref_cnt;
203 }
204 }
205 }
206 sel_xcp_id = fpriv->xcp_id;
207
208 if (adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds) {
209 *num_scheds = adev->xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds;
210 *scheds = adev->xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].sched;
211 atomic_inc(&adev->xcp_mgr->xcp[sel_xcp_id].ref_cnt);
212 DRM_DEBUG("Selected partition #%d", sel_xcp_id);
213 } else {
214 DRM_ERROR("Failed to schedule partition #%d.", sel_xcp_id);
215 return -ENOENT;
216 }
217
218 return 0;
219 }
220
aqua_vanjaram_logical_to_dev_inst(struct amdgpu_device * adev,enum amd_hw_ip_block_type block,int8_t inst)221 static int8_t aqua_vanjaram_logical_to_dev_inst(struct amdgpu_device *adev,
222 enum amd_hw_ip_block_type block,
223 int8_t inst)
224 {
225 int8_t dev_inst;
226
227 switch (block) {
228 case GC_HWIP:
229 case SDMA0_HWIP:
230 /* Both JPEG and VCN as JPEG is only alias of VCN */
231 case VCN_HWIP:
232 dev_inst = adev->ip_map.dev_inst[block][inst];
233 break;
234 default:
235 /* For rest of the IPs, no look up required.
236 * Assume 'logical instance == physical instance' for all configs. */
237 dev_inst = inst;
238 break;
239 }
240
241 return dev_inst;
242 }
243
aqua_vanjaram_logical_to_dev_mask(struct amdgpu_device * adev,enum amd_hw_ip_block_type block,uint32_t mask)244 static uint32_t aqua_vanjaram_logical_to_dev_mask(struct amdgpu_device *adev,
245 enum amd_hw_ip_block_type block,
246 uint32_t mask)
247 {
248 uint32_t dev_mask = 0;
249 int8_t log_inst, dev_inst;
250
251 while (mask) {
252 log_inst = ffs(mask) - 1;
253 dev_inst = aqua_vanjaram_logical_to_dev_inst(adev, block, log_inst);
254 dev_mask |= (1 << dev_inst);
255 mask &= ~(1 << log_inst);
256 }
257
258 return dev_mask;
259 }
260
aqua_vanjaram_populate_ip_map(struct amdgpu_device * adev,enum amd_hw_ip_block_type ip_block,uint32_t inst_mask)261 static void aqua_vanjaram_populate_ip_map(struct amdgpu_device *adev,
262 enum amd_hw_ip_block_type ip_block,
263 uint32_t inst_mask)
264 {
265 int l = 0, i;
266
267 while (inst_mask) {
268 i = ffs(inst_mask) - 1;
269 adev->ip_map.dev_inst[ip_block][l++] = i;
270 inst_mask &= ~(1 << i);
271 }
272 for (; l < HWIP_MAX_INSTANCE; l++)
273 adev->ip_map.dev_inst[ip_block][l] = -1;
274 }
275
aqua_vanjaram_ip_map_init(struct amdgpu_device * adev)276 void aqua_vanjaram_ip_map_init(struct amdgpu_device *adev)
277 {
278 u32 ip_map[][2] = {
279 { GC_HWIP, adev->gfx.xcc_mask },
280 { SDMA0_HWIP, adev->sdma.sdma_mask },
281 { VCN_HWIP, adev->vcn.inst_mask },
282 };
283 int i;
284
285 for (i = 0; i < ARRAY_SIZE(ip_map); ++i)
286 aqua_vanjaram_populate_ip_map(adev, ip_map[i][0], ip_map[i][1]);
287
288 adev->ip_map.logical_to_dev_inst = aqua_vanjaram_logical_to_dev_inst;
289 adev->ip_map.logical_to_dev_mask = aqua_vanjaram_logical_to_dev_mask;
290 }
291
292 /* Fixed pattern for smn addressing on different AIDs:
293 * bit[34]: indicate cross AID access
294 * bit[33:32]: indicate target AID id
295 * AID id range is 0 ~ 3 as maximum AID number is 4.
296 */
aqua_vanjaram_encode_ext_smn_addressing(int ext_id)297 u64 aqua_vanjaram_encode_ext_smn_addressing(int ext_id)
298 {
299 u64 ext_offset;
300
301 /* local routing and bit[34:32] will be zeros */
302 if (ext_id == 0)
303 return 0;
304
305 /* Initiated from host, accessing to all non-zero aids are cross traffic */
306 ext_offset = ((u64)(ext_id & 0x3) << 32) | (1ULL << 34);
307
308 return ext_offset;
309 }
310
311 static enum amdgpu_gfx_partition
__aqua_vanjaram_calc_xcp_mode(struct amdgpu_xcp_mgr * xcp_mgr)312 __aqua_vanjaram_calc_xcp_mode(struct amdgpu_xcp_mgr *xcp_mgr)
313 {
314 struct amdgpu_device *adev = xcp_mgr->adev;
315 int num_xcc, num_xcc_per_xcp = 0, mode = 0;
316
317 num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
318 if (adev->gfx.funcs->get_xccs_per_xcp)
319 num_xcc_per_xcp = adev->gfx.funcs->get_xccs_per_xcp(adev);
320 if ((num_xcc_per_xcp) && (num_xcc % num_xcc_per_xcp == 0))
321 mode = num_xcc / num_xcc_per_xcp;
322
323 if (num_xcc_per_xcp == 1)
324 return AMDGPU_CPX_PARTITION_MODE;
325
326 switch (mode) {
327 case 1:
328 return AMDGPU_SPX_PARTITION_MODE;
329 case 2:
330 return AMDGPU_DPX_PARTITION_MODE;
331 case 3:
332 return AMDGPU_TPX_PARTITION_MODE;
333 case 4:
334 return AMDGPU_QPX_PARTITION_MODE;
335 default:
336 return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
337 }
338
339 return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
340 }
341
aqua_vanjaram_query_partition_mode(struct amdgpu_xcp_mgr * xcp_mgr)342 static int aqua_vanjaram_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
343 {
344 enum amdgpu_gfx_partition derv_mode,
345 mode = AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
346 struct amdgpu_device *adev = xcp_mgr->adev;
347
348 derv_mode = __aqua_vanjaram_calc_xcp_mode(xcp_mgr);
349
350 if (amdgpu_sriov_vf(adev))
351 return derv_mode;
352
353 if (adev->nbio.funcs->get_compute_partition_mode) {
354 mode = adev->nbio.funcs->get_compute_partition_mode(adev);
355 if (mode != derv_mode)
356 dev_warn(
357 adev->dev,
358 "Mismatch in compute partition mode - reported : %d derived : %d",
359 mode, derv_mode);
360 }
361
362 return mode;
363 }
364
__aqua_vanjaram_get_xcc_per_xcp(struct amdgpu_xcp_mgr * xcp_mgr,int mode)365 static int __aqua_vanjaram_get_xcc_per_xcp(struct amdgpu_xcp_mgr *xcp_mgr, int mode)
366 {
367 int num_xcc, num_xcc_per_xcp = 0;
368
369 num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
370
371 switch (mode) {
372 case AMDGPU_SPX_PARTITION_MODE:
373 num_xcc_per_xcp = num_xcc;
374 break;
375 case AMDGPU_DPX_PARTITION_MODE:
376 num_xcc_per_xcp = num_xcc / 2;
377 break;
378 case AMDGPU_TPX_PARTITION_MODE:
379 num_xcc_per_xcp = num_xcc / 3;
380 break;
381 case AMDGPU_QPX_PARTITION_MODE:
382 num_xcc_per_xcp = num_xcc / 4;
383 break;
384 case AMDGPU_CPX_PARTITION_MODE:
385 num_xcc_per_xcp = 1;
386 break;
387 }
388
389 return num_xcc_per_xcp;
390 }
391
__aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr * xcp_mgr,int xcp_id,enum AMDGPU_XCP_IP_BLOCK ip_id,struct amdgpu_xcp_ip * ip)392 static int __aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
393 enum AMDGPU_XCP_IP_BLOCK ip_id,
394 struct amdgpu_xcp_ip *ip)
395 {
396 struct amdgpu_device *adev = xcp_mgr->adev;
397 int num_sdma, num_vcn, num_shared_vcn, num_xcp;
398 int num_xcc_xcp, num_sdma_xcp, num_vcn_xcp;
399
400 num_sdma = adev->sdma.num_instances;
401 num_vcn = adev->vcn.num_vcn_inst;
402 num_shared_vcn = 1;
403
404 num_xcc_xcp = adev->gfx.num_xcc_per_xcp;
405 num_xcp = NUM_XCC(adev->gfx.xcc_mask) / num_xcc_xcp;
406
407 switch (xcp_mgr->mode) {
408 case AMDGPU_SPX_PARTITION_MODE:
409 case AMDGPU_DPX_PARTITION_MODE:
410 case AMDGPU_TPX_PARTITION_MODE:
411 case AMDGPU_QPX_PARTITION_MODE:
412 case AMDGPU_CPX_PARTITION_MODE:
413 num_sdma_xcp = DIV_ROUND_UP(num_sdma, num_xcp);
414 num_vcn_xcp = DIV_ROUND_UP(num_vcn, num_xcp);
415 break;
416 default:
417 return -EINVAL;
418 }
419
420 if (num_vcn && num_xcp > num_vcn)
421 num_shared_vcn = num_xcp / num_vcn;
422
423 switch (ip_id) {
424 case AMDGPU_XCP_GFXHUB:
425 ip->inst_mask = XCP_INST_MASK(num_xcc_xcp, xcp_id);
426 ip->ip_funcs = &gfxhub_v1_2_xcp_funcs;
427 break;
428 case AMDGPU_XCP_GFX:
429 ip->inst_mask = XCP_INST_MASK(num_xcc_xcp, xcp_id);
430 ip->ip_funcs = &gfx_v9_4_3_xcp_funcs;
431 break;
432 case AMDGPU_XCP_SDMA:
433 ip->inst_mask = XCP_INST_MASK(num_sdma_xcp, xcp_id);
434 ip->ip_funcs = &sdma_v4_4_2_xcp_funcs;
435 break;
436 case AMDGPU_XCP_VCN:
437 ip->inst_mask =
438 XCP_INST_MASK(num_vcn_xcp, xcp_id / num_shared_vcn);
439 /* TODO : Assign IP funcs */
440 break;
441 default:
442 return -EINVAL;
443 }
444
445 ip->ip_id = ip_id;
446
447 return 0;
448 }
449
450 static enum amdgpu_gfx_partition
__aqua_vanjaram_get_auto_mode(struct amdgpu_xcp_mgr * xcp_mgr)451 __aqua_vanjaram_get_auto_mode(struct amdgpu_xcp_mgr *xcp_mgr)
452 {
453 struct amdgpu_device *adev = xcp_mgr->adev;
454 int num_xcc;
455
456 num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
457
458 if (adev->gmc.num_mem_partitions == 1)
459 return AMDGPU_SPX_PARTITION_MODE;
460
461 if (adev->gmc.num_mem_partitions == num_xcc)
462 return AMDGPU_CPX_PARTITION_MODE;
463
464 if (adev->gmc.num_mem_partitions == num_xcc / 2)
465 return (adev->flags & AMD_IS_APU) ? AMDGPU_TPX_PARTITION_MODE :
466 AMDGPU_CPX_PARTITION_MODE;
467
468 if (adev->gmc.num_mem_partitions == 2 && !(adev->flags & AMD_IS_APU))
469 return AMDGPU_DPX_PARTITION_MODE;
470
471 return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
472 }
473
__aqua_vanjaram_is_valid_mode(struct amdgpu_xcp_mgr * xcp_mgr,enum amdgpu_gfx_partition mode)474 static bool __aqua_vanjaram_is_valid_mode(struct amdgpu_xcp_mgr *xcp_mgr,
475 enum amdgpu_gfx_partition mode)
476 {
477 struct amdgpu_device *adev = xcp_mgr->adev;
478 int num_xcc, num_xccs_per_xcp;
479
480 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
481 switch (mode) {
482 case AMDGPU_SPX_PARTITION_MODE:
483 return adev->gmc.num_mem_partitions == 1 && num_xcc > 0;
484 case AMDGPU_DPX_PARTITION_MODE:
485 return adev->gmc.num_mem_partitions <= 2 && (num_xcc % 4) == 0;
486 case AMDGPU_TPX_PARTITION_MODE:
487 return (adev->gmc.num_mem_partitions == 1 ||
488 adev->gmc.num_mem_partitions == 3) &&
489 ((num_xcc % 3) == 0);
490 case AMDGPU_QPX_PARTITION_MODE:
491 num_xccs_per_xcp = num_xcc / 4;
492 return (adev->gmc.num_mem_partitions == 1 ||
493 adev->gmc.num_mem_partitions == 4) &&
494 (num_xccs_per_xcp >= 2);
495 case AMDGPU_CPX_PARTITION_MODE:
496 return ((num_xcc > 1) &&
497 (adev->gmc.num_mem_partitions == 1 || adev->gmc.num_mem_partitions == 4) &&
498 (num_xcc % adev->gmc.num_mem_partitions) == 0);
499 default:
500 return false;
501 }
502
503 return false;
504 }
505
__aqua_vanjaram_pre_partition_switch(struct amdgpu_xcp_mgr * xcp_mgr,u32 flags)506 static int __aqua_vanjaram_pre_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
507 {
508 /* TODO:
509 * Stop user queues and threads, and make sure GPU is empty of work.
510 */
511
512 if (flags & AMDGPU_XCP_OPS_KFD)
513 amdgpu_amdkfd_device_fini_sw(xcp_mgr->adev);
514
515 return 0;
516 }
517
__aqua_vanjaram_post_partition_switch(struct amdgpu_xcp_mgr * xcp_mgr,u32 flags)518 static int __aqua_vanjaram_post_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
519 {
520 int ret = 0;
521
522 if (flags & AMDGPU_XCP_OPS_KFD) {
523 amdgpu_amdkfd_device_probe(xcp_mgr->adev);
524 amdgpu_amdkfd_device_init(xcp_mgr->adev);
525 /* If KFD init failed, return failure */
526 if (!xcp_mgr->adev->kfd.init_complete)
527 ret = -EIO;
528 }
529
530 return ret;
531 }
532
aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr * xcp_mgr,int mode,int * num_xcps)533 static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
534 int mode, int *num_xcps)
535 {
536 int num_xcc_per_xcp, num_xcc, ret;
537 struct amdgpu_device *adev;
538 u32 flags = 0;
539
540 adev = xcp_mgr->adev;
541 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
542
543 if (mode == AMDGPU_AUTO_COMPUTE_PARTITION_MODE) {
544 mode = __aqua_vanjaram_get_auto_mode(xcp_mgr);
545 if (mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) {
546 dev_err(adev->dev,
547 "Invalid config, no compatible compute partition mode found, available memory partitions: %d",
548 adev->gmc.num_mem_partitions);
549 return -EINVAL;
550 }
551 } else if (!__aqua_vanjaram_is_valid_mode(xcp_mgr, mode)) {
552 dev_err(adev->dev,
553 "Invalid compute partition mode requested, requested: %s, available memory partitions: %d",
554 amdgpu_gfx_compute_mode_desc(mode), adev->gmc.num_mem_partitions);
555 return -EINVAL;
556 }
557
558 if (adev->kfd.init_complete && !amdgpu_in_reset(adev))
559 flags |= AMDGPU_XCP_OPS_KFD;
560
561 if (flags & AMDGPU_XCP_OPS_KFD) {
562 ret = amdgpu_amdkfd_check_and_lock_kfd(adev);
563 if (ret)
564 goto out;
565 }
566
567 ret = __aqua_vanjaram_pre_partition_switch(xcp_mgr, flags);
568 if (ret)
569 goto unlock;
570
571 num_xcc_per_xcp = __aqua_vanjaram_get_xcc_per_xcp(xcp_mgr, mode);
572 if (adev->gfx.funcs->switch_partition_mode)
573 adev->gfx.funcs->switch_partition_mode(xcp_mgr->adev,
574 num_xcc_per_xcp);
575
576 /* Init info about new xcps */
577 *num_xcps = num_xcc / num_xcc_per_xcp;
578 amdgpu_xcp_init(xcp_mgr, *num_xcps, mode);
579
580 ret = __aqua_vanjaram_post_partition_switch(xcp_mgr, flags);
581 unlock:
582 if (flags & AMDGPU_XCP_OPS_KFD)
583 amdgpu_amdkfd_unlock_kfd(adev);
584 out:
585 return ret;
586 }
587
__aqua_vanjaram_get_xcp_mem_id(struct amdgpu_device * adev,int xcc_id,uint8_t * mem_id)588 static int __aqua_vanjaram_get_xcp_mem_id(struct amdgpu_device *adev,
589 int xcc_id, uint8_t *mem_id)
590 {
591 /* memory/spatial modes validation check is already done */
592 *mem_id = xcc_id / adev->gfx.num_xcc_per_xcp;
593 *mem_id /= adev->xcp_mgr->num_xcp_per_mem_partition;
594
595 return 0;
596 }
597
aqua_vanjaram_get_xcp_mem_id(struct amdgpu_xcp_mgr * xcp_mgr,struct amdgpu_xcp * xcp,uint8_t * mem_id)598 static int aqua_vanjaram_get_xcp_mem_id(struct amdgpu_xcp_mgr *xcp_mgr,
599 struct amdgpu_xcp *xcp, uint8_t *mem_id)
600 {
601 struct amdgpu_numa_info numa_info;
602 struct amdgpu_device *adev;
603 uint32_t xcc_mask;
604 int r, i, xcc_id;
605
606 adev = xcp_mgr->adev;
607 /* TODO: BIOS is not returning the right info now
608 * Check on this later
609 */
610 /*
611 if (adev->gmc.gmc_funcs->query_mem_partition_mode)
612 mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
613 */
614 if (adev->gmc.num_mem_partitions == 1) {
615 /* Only one range */
616 *mem_id = 0;
617 return 0;
618 }
619
620 r = amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &xcc_mask);
621 if (r || !xcc_mask)
622 return -EINVAL;
623
624 xcc_id = ffs(xcc_mask) - 1;
625 if (!adev->gmc.is_app_apu)
626 return __aqua_vanjaram_get_xcp_mem_id(adev, xcc_id, mem_id);
627
628 r = amdgpu_acpi_get_mem_info(adev, xcc_id, &numa_info);
629
630 if (r)
631 return r;
632
633 r = -EINVAL;
634 for (i = 0; i < adev->gmc.num_mem_partitions; ++i) {
635 if (adev->gmc.mem_partitions[i].numa.node == numa_info.nid) {
636 *mem_id = i;
637 r = 0;
638 break;
639 }
640 }
641
642 return r;
643 }
644
aqua_vanjaram_get_xcp_ip_details(struct amdgpu_xcp_mgr * xcp_mgr,int xcp_id,enum AMDGPU_XCP_IP_BLOCK ip_id,struct amdgpu_xcp_ip * ip)645 static int aqua_vanjaram_get_xcp_ip_details(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
646 enum AMDGPU_XCP_IP_BLOCK ip_id,
647 struct amdgpu_xcp_ip *ip)
648 {
649 if (!ip)
650 return -EINVAL;
651
652 return __aqua_vanjaram_get_xcp_ip_info(xcp_mgr, xcp_id, ip_id, ip);
653 }
654
655 struct amdgpu_xcp_mgr_funcs aqua_vanjaram_xcp_funcs = {
656 .switch_partition_mode = &aqua_vanjaram_switch_partition_mode,
657 .query_partition_mode = &aqua_vanjaram_query_partition_mode,
658 .get_ip_details = &aqua_vanjaram_get_xcp_ip_details,
659 .get_xcp_mem_id = &aqua_vanjaram_get_xcp_mem_id,
660 .select_scheds = &aqua_vanjaram_select_scheds,
661 .update_partition_sched_list = &aqua_vanjaram_update_partition_sched_list
662 };
663
aqua_vanjaram_xcp_mgr_init(struct amdgpu_device * adev)664 static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev)
665 {
666 int ret;
667
668 if (amdgpu_sriov_vf(adev))
669 aqua_vanjaram_xcp_funcs.switch_partition_mode = NULL;
670
671 ret = amdgpu_xcp_mgr_init(adev, AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE, 1,
672 &aqua_vanjaram_xcp_funcs);
673 if (ret)
674 return ret;
675
676 /* TODO: Default memory node affinity init */
677
678 return ret;
679 }
680
aqua_vanjaram_init_soc_config(struct amdgpu_device * adev)681 int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev)
682 {
683 u32 mask, avail_inst, inst_mask = adev->sdma.sdma_mask;
684 int ret, i;
685
686 /* generally 1 AID supports 4 instances */
687 adev->sdma.num_inst_per_aid = 4;
688 adev->sdma.num_instances = NUM_SDMA(adev->sdma.sdma_mask);
689
690 adev->aid_mask = i = 1;
691 inst_mask >>= adev->sdma.num_inst_per_aid;
692
693 for (mask = (1 << adev->sdma.num_inst_per_aid) - 1; inst_mask;
694 inst_mask >>= adev->sdma.num_inst_per_aid, ++i) {
695 avail_inst = inst_mask & mask;
696 if (avail_inst == mask || avail_inst == 0x3 ||
697 avail_inst == 0xc)
698 adev->aid_mask |= (1 << i);
699 }
700
701 /* Harvest config is not used for aqua vanjaram. VCN and JPEGs will be
702 * addressed based on logical instance ids.
703 */
704 adev->vcn.harvest_config = 0;
705 adev->vcn.num_inst_per_aid = 1;
706 adev->vcn.num_vcn_inst = hweight32(adev->vcn.inst_mask);
707 adev->jpeg.harvest_config = 0;
708 adev->jpeg.num_inst_per_aid = 1;
709 adev->jpeg.num_jpeg_inst = hweight32(adev->jpeg.inst_mask);
710
711 ret = aqua_vanjaram_xcp_mgr_init(adev);
712 if (ret)
713 return ret;
714
715 aqua_vanjaram_ip_map_init(adev);
716
717 return 0;
718 }
719
aqua_read_smn(struct amdgpu_device * adev,struct amdgpu_smn_reg_data * regdata,uint64_t smn_addr)720 static void aqua_read_smn(struct amdgpu_device *adev,
721 struct amdgpu_smn_reg_data *regdata,
722 uint64_t smn_addr)
723 {
724 regdata->addr = smn_addr;
725 regdata->value = RREG32_PCIE(smn_addr);
726 }
727
728 struct aqua_reg_list {
729 uint64_t start_addr;
730 uint32_t num_regs;
731 uint32_t incrx;
732 };
733
734 #define DW_ADDR_INCR 4
735
aqua_read_smn_ext(struct amdgpu_device * adev,struct amdgpu_smn_reg_data * regdata,uint64_t smn_addr,int i)736 static void aqua_read_smn_ext(struct amdgpu_device *adev,
737 struct amdgpu_smn_reg_data *regdata,
738 uint64_t smn_addr, int i)
739 {
740 regdata->addr =
741 smn_addr + adev->asic_funcs->encode_ext_smn_addressing(i);
742 regdata->value = RREG32_PCIE_EXT(regdata->addr);
743 }
744
745 #define smnreg_0x1A340218 0x1A340218
746 #define smnreg_0x1A3402E4 0x1A3402E4
747 #define smnreg_0x1A340294 0x1A340294
748 #define smreg_0x1A380088 0x1A380088
749
750 #define NUM_PCIE_SMN_REGS 14
751
752 static struct aqua_reg_list pcie_reg_addrs[] = {
753 { smnreg_0x1A340218, 1, 0 },
754 { smnreg_0x1A3402E4, 1, 0 },
755 { smnreg_0x1A340294, 6, DW_ADDR_INCR },
756 { smreg_0x1A380088, 6, DW_ADDR_INCR },
757 };
758
aqua_vanjaram_read_pcie_state(struct amdgpu_device * adev,void * buf,size_t max_size)759 static ssize_t aqua_vanjaram_read_pcie_state(struct amdgpu_device *adev,
760 void *buf, size_t max_size)
761 {
762 struct amdgpu_reg_state_pcie_v1_0 *pcie_reg_state;
763 uint32_t start_addr, incrx, num_regs, szbuf;
764 struct amdgpu_regs_pcie_v1_0 *pcie_regs;
765 struct amdgpu_smn_reg_data *reg_data;
766 struct pci_dev *us_pdev, *ds_pdev;
767 int aer_cap, r, n;
768
769 if (!buf || !max_size)
770 return -EINVAL;
771
772 pcie_reg_state = (struct amdgpu_reg_state_pcie_v1_0 *)buf;
773
774 szbuf = sizeof(*pcie_reg_state) +
775 amdgpu_reginst_size(1, sizeof(*pcie_regs), NUM_PCIE_SMN_REGS);
776 /* Only one instance of pcie regs */
777 if (max_size < szbuf)
778 return -EOVERFLOW;
779
780 pcie_regs = (struct amdgpu_regs_pcie_v1_0 *)((uint8_t *)buf +
781 sizeof(*pcie_reg_state));
782 pcie_regs->inst_header.instance = 0;
783 pcie_regs->inst_header.state = AMDGPU_INST_S_OK;
784 pcie_regs->inst_header.num_smn_regs = NUM_PCIE_SMN_REGS;
785
786 reg_data = pcie_regs->smn_reg_values;
787
788 for (r = 0; r < ARRAY_SIZE(pcie_reg_addrs); r++) {
789 start_addr = pcie_reg_addrs[r].start_addr;
790 incrx = pcie_reg_addrs[r].incrx;
791 num_regs = pcie_reg_addrs[r].num_regs;
792 for (n = 0; n < num_regs; n++) {
793 aqua_read_smn(adev, reg_data, start_addr + n * incrx);
794 ++reg_data;
795 }
796 }
797
798 ds_pdev = pci_upstream_bridge(adev->pdev);
799 us_pdev = pci_upstream_bridge(ds_pdev);
800
801 pcie_capability_read_word(us_pdev, PCI_EXP_DEVSTA,
802 &pcie_regs->device_status);
803 pcie_capability_read_word(us_pdev, PCI_EXP_LNKSTA,
804 &pcie_regs->link_status);
805
806 aer_cap = pci_find_ext_capability(us_pdev, PCI_EXT_CAP_ID_ERR);
807 if (aer_cap) {
808 pci_read_config_dword(us_pdev, aer_cap + PCI_ERR_COR_STATUS,
809 &pcie_regs->pcie_corr_err_status);
810 pci_read_config_dword(us_pdev, aer_cap + PCI_ERR_UNCOR_STATUS,
811 &pcie_regs->pcie_uncorr_err_status);
812 }
813
814 pci_read_config_dword(us_pdev, PCI_PRIMARY_BUS,
815 &pcie_regs->sub_bus_number_latency);
816
817 pcie_reg_state->common_header.structure_size = szbuf;
818 pcie_reg_state->common_header.format_revision = 1;
819 pcie_reg_state->common_header.content_revision = 0;
820 pcie_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_PCIE;
821 pcie_reg_state->common_header.num_instances = 1;
822
823 return pcie_reg_state->common_header.structure_size;
824 }
825
826 #define smnreg_0x11A00050 0x11A00050
827 #define smnreg_0x11A00180 0x11A00180
828 #define smnreg_0x11A00070 0x11A00070
829 #define smnreg_0x11A00200 0x11A00200
830 #define smnreg_0x11A0020C 0x11A0020C
831 #define smnreg_0x11A00210 0x11A00210
832 #define smnreg_0x11A00108 0x11A00108
833
834 #define XGMI_LINK_REG(smnreg, l) ((smnreg) | (l << 20))
835
836 #define NUM_XGMI_SMN_REGS 25
837
838 static struct aqua_reg_list xgmi_reg_addrs[] = {
839 { smnreg_0x11A00050, 1, 0 },
840 { smnreg_0x11A00180, 16, DW_ADDR_INCR },
841 { smnreg_0x11A00070, 4, DW_ADDR_INCR },
842 { smnreg_0x11A00200, 1, 0 },
843 { smnreg_0x11A0020C, 1, 0 },
844 { smnreg_0x11A00210, 1, 0 },
845 { smnreg_0x11A00108, 1, 0 },
846 };
847
aqua_vanjaram_read_xgmi_state(struct amdgpu_device * adev,void * buf,size_t max_size)848 static ssize_t aqua_vanjaram_read_xgmi_state(struct amdgpu_device *adev,
849 void *buf, size_t max_size)
850 {
851 struct amdgpu_reg_state_xgmi_v1_0 *xgmi_reg_state;
852 uint32_t start_addr, incrx, num_regs, szbuf;
853 struct amdgpu_regs_xgmi_v1_0 *xgmi_regs;
854 struct amdgpu_smn_reg_data *reg_data;
855 const int max_xgmi_instances = 8;
856 int inst = 0, i, j, r, n;
857 const int xgmi_inst = 2;
858 void *p;
859
860 if (!buf || !max_size)
861 return -EINVAL;
862
863 xgmi_reg_state = (struct amdgpu_reg_state_xgmi_v1_0 *)buf;
864
865 szbuf = sizeof(*xgmi_reg_state) +
866 amdgpu_reginst_size(max_xgmi_instances, sizeof(*xgmi_regs),
867 NUM_XGMI_SMN_REGS);
868 /* Only one instance of pcie regs */
869 if (max_size < szbuf)
870 return -EOVERFLOW;
871
872 p = &xgmi_reg_state->xgmi_state_regs[0];
873 for_each_inst(i, adev->aid_mask) {
874 for (j = 0; j < xgmi_inst; ++j) {
875 xgmi_regs = (struct amdgpu_regs_xgmi_v1_0 *)p;
876 xgmi_regs->inst_header.instance = inst++;
877
878 xgmi_regs->inst_header.state = AMDGPU_INST_S_OK;
879 xgmi_regs->inst_header.num_smn_regs = NUM_XGMI_SMN_REGS;
880
881 reg_data = xgmi_regs->smn_reg_values;
882
883 for (r = 0; r < ARRAY_SIZE(xgmi_reg_addrs); r++) {
884 start_addr = xgmi_reg_addrs[r].start_addr;
885 incrx = xgmi_reg_addrs[r].incrx;
886 num_regs = xgmi_reg_addrs[r].num_regs;
887
888 for (n = 0; n < num_regs; n++) {
889 aqua_read_smn_ext(
890 adev, reg_data,
891 XGMI_LINK_REG(start_addr, j) +
892 n * incrx,
893 i);
894 ++reg_data;
895 }
896 }
897 p = reg_data;
898 }
899 }
900
901 xgmi_reg_state->common_header.structure_size = szbuf;
902 xgmi_reg_state->common_header.format_revision = 1;
903 xgmi_reg_state->common_header.content_revision = 0;
904 xgmi_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_XGMI;
905 xgmi_reg_state->common_header.num_instances = max_xgmi_instances;
906
907 return xgmi_reg_state->common_header.structure_size;
908 }
909
910 #define smnreg_0x11C00070 0x11C00070
911 #define smnreg_0x11C00210 0x11C00210
912
913 static struct aqua_reg_list wafl_reg_addrs[] = {
914 { smnreg_0x11C00070, 4, DW_ADDR_INCR },
915 { smnreg_0x11C00210, 1, 0 },
916 };
917
918 #define WAFL_LINK_REG(smnreg, l) ((smnreg) | (l << 20))
919
920 #define NUM_WAFL_SMN_REGS 5
921
aqua_vanjaram_read_wafl_state(struct amdgpu_device * adev,void * buf,size_t max_size)922 static ssize_t aqua_vanjaram_read_wafl_state(struct amdgpu_device *adev,
923 void *buf, size_t max_size)
924 {
925 struct amdgpu_reg_state_wafl_v1_0 *wafl_reg_state;
926 uint32_t start_addr, incrx, num_regs, szbuf;
927 struct amdgpu_regs_wafl_v1_0 *wafl_regs;
928 struct amdgpu_smn_reg_data *reg_data;
929 const int max_wafl_instances = 8;
930 int inst = 0, i, j, r, n;
931 const int wafl_inst = 2;
932 void *p;
933
934 if (!buf || !max_size)
935 return -EINVAL;
936
937 wafl_reg_state = (struct amdgpu_reg_state_wafl_v1_0 *)buf;
938
939 szbuf = sizeof(*wafl_reg_state) +
940 amdgpu_reginst_size(max_wafl_instances, sizeof(*wafl_regs),
941 NUM_WAFL_SMN_REGS);
942
943 if (max_size < szbuf)
944 return -EOVERFLOW;
945
946 p = &wafl_reg_state->wafl_state_regs[0];
947 for_each_inst(i, adev->aid_mask) {
948 for (j = 0; j < wafl_inst; ++j) {
949 wafl_regs = (struct amdgpu_regs_wafl_v1_0 *)p;
950 wafl_regs->inst_header.instance = inst++;
951
952 wafl_regs->inst_header.state = AMDGPU_INST_S_OK;
953 wafl_regs->inst_header.num_smn_regs = NUM_WAFL_SMN_REGS;
954
955 reg_data = wafl_regs->smn_reg_values;
956
957 for (r = 0; r < ARRAY_SIZE(wafl_reg_addrs); r++) {
958 start_addr = wafl_reg_addrs[r].start_addr;
959 incrx = wafl_reg_addrs[r].incrx;
960 num_regs = wafl_reg_addrs[r].num_regs;
961 for (n = 0; n < num_regs; n++) {
962 aqua_read_smn_ext(
963 adev, reg_data,
964 WAFL_LINK_REG(start_addr, j) +
965 n * incrx,
966 i);
967 ++reg_data;
968 }
969 }
970 p = reg_data;
971 }
972 }
973
974 wafl_reg_state->common_header.structure_size = szbuf;
975 wafl_reg_state->common_header.format_revision = 1;
976 wafl_reg_state->common_header.content_revision = 0;
977 wafl_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_WAFL;
978 wafl_reg_state->common_header.num_instances = max_wafl_instances;
979
980 return wafl_reg_state->common_header.structure_size;
981 }
982
983 #define smnreg_0x1B311060 0x1B311060
984 #define smnreg_0x1B411060 0x1B411060
985 #define smnreg_0x1B511060 0x1B511060
986 #define smnreg_0x1B611060 0x1B611060
987
988 #define smnreg_0x1C307120 0x1C307120
989 #define smnreg_0x1C317120 0x1C317120
990
991 #define smnreg_0x1C320830 0x1C320830
992 #define smnreg_0x1C380830 0x1C380830
993 #define smnreg_0x1C3D0830 0x1C3D0830
994 #define smnreg_0x1C420830 0x1C420830
995
996 #define smnreg_0x1C320100 0x1C320100
997 #define smnreg_0x1C380100 0x1C380100
998 #define smnreg_0x1C3D0100 0x1C3D0100
999 #define smnreg_0x1C420100 0x1C420100
1000
1001 #define smnreg_0x1B310500 0x1B310500
1002 #define smnreg_0x1C300400 0x1C300400
1003
1004 #define USR_CAKE_INCR 0x11000
1005 #define USR_LINK_INCR 0x100000
1006 #define USR_CP_INCR 0x10000
1007
1008 #define NUM_USR_SMN_REGS 20
1009
1010 struct aqua_reg_list usr_reg_addrs[] = {
1011 { smnreg_0x1B311060, 4, DW_ADDR_INCR },
1012 { smnreg_0x1B411060, 4, DW_ADDR_INCR },
1013 { smnreg_0x1B511060, 4, DW_ADDR_INCR },
1014 { smnreg_0x1B611060, 4, DW_ADDR_INCR },
1015 { smnreg_0x1C307120, 2, DW_ADDR_INCR },
1016 { smnreg_0x1C317120, 2, DW_ADDR_INCR },
1017 };
1018
1019 #define NUM_USR1_SMN_REGS 46
1020 struct aqua_reg_list usr1_reg_addrs[] = {
1021 { smnreg_0x1C320830, 6, USR_CAKE_INCR },
1022 { smnreg_0x1C380830, 5, USR_CAKE_INCR },
1023 { smnreg_0x1C3D0830, 5, USR_CAKE_INCR },
1024 { smnreg_0x1C420830, 4, USR_CAKE_INCR },
1025 { smnreg_0x1C320100, 6, USR_CAKE_INCR },
1026 { smnreg_0x1C380100, 5, USR_CAKE_INCR },
1027 { smnreg_0x1C3D0100, 5, USR_CAKE_INCR },
1028 { smnreg_0x1C420100, 4, USR_CAKE_INCR },
1029 { smnreg_0x1B310500, 4, USR_LINK_INCR },
1030 { smnreg_0x1C300400, 2, USR_CP_INCR },
1031 };
1032
aqua_vanjaram_read_usr_state(struct amdgpu_device * adev,void * buf,size_t max_size,int reg_state)1033 static ssize_t aqua_vanjaram_read_usr_state(struct amdgpu_device *adev,
1034 void *buf, size_t max_size,
1035 int reg_state)
1036 {
1037 uint32_t start_addr, incrx, num_regs, szbuf, num_smn;
1038 struct amdgpu_reg_state_usr_v1_0 *usr_reg_state;
1039 struct amdgpu_regs_usr_v1_0 *usr_regs;
1040 struct amdgpu_smn_reg_data *reg_data;
1041 const int max_usr_instances = 4;
1042 struct aqua_reg_list *reg_addrs;
1043 int inst = 0, i, n, r, arr_size;
1044 void *p;
1045
1046 if (!buf || !max_size)
1047 return -EINVAL;
1048
1049 switch (reg_state) {
1050 case AMDGPU_REG_STATE_TYPE_USR:
1051 arr_size = ARRAY_SIZE(usr_reg_addrs);
1052 reg_addrs = usr_reg_addrs;
1053 num_smn = NUM_USR_SMN_REGS;
1054 break;
1055 case AMDGPU_REG_STATE_TYPE_USR_1:
1056 arr_size = ARRAY_SIZE(usr1_reg_addrs);
1057 reg_addrs = usr1_reg_addrs;
1058 num_smn = NUM_USR1_SMN_REGS;
1059 break;
1060 default:
1061 return -EINVAL;
1062 }
1063
1064 usr_reg_state = (struct amdgpu_reg_state_usr_v1_0 *)buf;
1065
1066 szbuf = sizeof(*usr_reg_state) + amdgpu_reginst_size(max_usr_instances,
1067 sizeof(*usr_regs),
1068 num_smn);
1069 if (max_size < szbuf)
1070 return -EOVERFLOW;
1071
1072 p = &usr_reg_state->usr_state_regs[0];
1073 for_each_inst(i, adev->aid_mask) {
1074 usr_regs = (struct amdgpu_regs_usr_v1_0 *)p;
1075 usr_regs->inst_header.instance = inst++;
1076 usr_regs->inst_header.state = AMDGPU_INST_S_OK;
1077 usr_regs->inst_header.num_smn_regs = num_smn;
1078 reg_data = usr_regs->smn_reg_values;
1079
1080 for (r = 0; r < arr_size; r++) {
1081 start_addr = reg_addrs[r].start_addr;
1082 incrx = reg_addrs[r].incrx;
1083 num_regs = reg_addrs[r].num_regs;
1084 for (n = 0; n < num_regs; n++) {
1085 aqua_read_smn_ext(adev, reg_data,
1086 start_addr + n * incrx, i);
1087 reg_data++;
1088 }
1089 }
1090 p = reg_data;
1091 }
1092
1093 usr_reg_state->common_header.structure_size = szbuf;
1094 usr_reg_state->common_header.format_revision = 1;
1095 usr_reg_state->common_header.content_revision = 0;
1096 usr_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_USR;
1097 usr_reg_state->common_header.num_instances = max_usr_instances;
1098
1099 return usr_reg_state->common_header.structure_size;
1100 }
1101
aqua_vanjaram_get_reg_state(struct amdgpu_device * adev,enum amdgpu_reg_state reg_state,void * buf,size_t max_size)1102 ssize_t aqua_vanjaram_get_reg_state(struct amdgpu_device *adev,
1103 enum amdgpu_reg_state reg_state, void *buf,
1104 size_t max_size)
1105 {
1106 ssize_t size;
1107
1108 switch (reg_state) {
1109 case AMDGPU_REG_STATE_TYPE_PCIE:
1110 size = aqua_vanjaram_read_pcie_state(adev, buf, max_size);
1111 break;
1112 case AMDGPU_REG_STATE_TYPE_XGMI:
1113 size = aqua_vanjaram_read_xgmi_state(adev, buf, max_size);
1114 break;
1115 case AMDGPU_REG_STATE_TYPE_WAFL:
1116 size = aqua_vanjaram_read_wafl_state(adev, buf, max_size);
1117 break;
1118 case AMDGPU_REG_STATE_TYPE_USR:
1119 size = aqua_vanjaram_read_usr_state(adev, buf, max_size,
1120 AMDGPU_REG_STATE_TYPE_USR);
1121 break;
1122 case AMDGPU_REG_STATE_TYPE_USR_1:
1123 size = aqua_vanjaram_read_usr_state(
1124 adev, buf, max_size, AMDGPU_REG_STATE_TYPE_USR_1);
1125 break;
1126 default:
1127 return -EINVAL;
1128 }
1129
1130 return size;
1131 }
1132