1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 #include <drm/drm_exec.h>
26 
27 #include "amdgpu_mes.h"
28 #include "amdgpu.h"
29 #include "soc15_common.h"
30 #include "amdgpu_mes_ctx.h"
31 
32 #define AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
33 #define AMDGPU_ONE_DOORBELL_SIZE 8
34 
amdgpu_mes_doorbell_process_slice(struct amdgpu_device * adev)35 int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev)
36 {
37 	return roundup(AMDGPU_ONE_DOORBELL_SIZE *
38 		       AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS,
39 		       PAGE_SIZE);
40 }
41 
amdgpu_mes_kernel_doorbell_get(struct amdgpu_device * adev,int ip_type,uint64_t * doorbell_index)42 static int amdgpu_mes_kernel_doorbell_get(struct amdgpu_device *adev,
43 					 int ip_type, uint64_t *doorbell_index)
44 {
45 	unsigned int offset, found;
46 	struct amdgpu_mes *mes = &adev->mes;
47 
48 	if (ip_type == AMDGPU_RING_TYPE_SDMA)
49 		offset = adev->doorbell_index.sdma_engine[0];
50 	else
51 		offset = 0;
52 
53 	found = find_next_zero_bit(mes->doorbell_bitmap, mes->num_mes_dbs, offset);
54 	if (found >= mes->num_mes_dbs) {
55 		DRM_WARN("No doorbell available\n");
56 		return -ENOSPC;
57 	}
58 
59 	set_bit(found, mes->doorbell_bitmap);
60 
61 	/* Get the absolute doorbell index on BAR */
62 	*doorbell_index = mes->db_start_dw_offset + found * 2;
63 	return 0;
64 }
65 
amdgpu_mes_kernel_doorbell_free(struct amdgpu_device * adev,uint32_t doorbell_index)66 static void amdgpu_mes_kernel_doorbell_free(struct amdgpu_device *adev,
67 					   uint32_t doorbell_index)
68 {
69 	unsigned int old, rel_index;
70 	struct amdgpu_mes *mes = &adev->mes;
71 
72 	/* Find the relative index of the doorbell in this object */
73 	rel_index = (doorbell_index - mes->db_start_dw_offset) / 2;
74 	old = test_and_clear_bit(rel_index, mes->doorbell_bitmap);
75 	WARN_ON(!old);
76 }
77 
amdgpu_mes_doorbell_init(struct amdgpu_device * adev)78 static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev)
79 {
80 	int i;
81 	struct amdgpu_mes *mes = &adev->mes;
82 
83 	/* Bitmap for dynamic allocation of kernel doorbells */
84 	mes->doorbell_bitmap = bitmap_zalloc(PAGE_SIZE / sizeof(u32), GFP_KERNEL);
85 	if (!mes->doorbell_bitmap) {
86 		DRM_ERROR("Failed to allocate MES doorbell bitmap\n");
87 		return -ENOMEM;
88 	}
89 
90 	mes->num_mes_dbs = PAGE_SIZE / AMDGPU_ONE_DOORBELL_SIZE;
91 	for (i = 0; i < AMDGPU_MES_PRIORITY_NUM_LEVELS; i++) {
92 		adev->mes.aggregated_doorbells[i] = mes->db_start_dw_offset + i * 2;
93 		set_bit(i, mes->doorbell_bitmap);
94 	}
95 
96 	return 0;
97 }
98 
amdgpu_mes_event_log_init(struct amdgpu_device * adev)99 static int amdgpu_mes_event_log_init(struct amdgpu_device *adev)
100 {
101 	int r;
102 
103 	if (!amdgpu_mes_log_enable)
104 		return 0;
105 
106 	r = amdgpu_bo_create_kernel(adev, adev->mes.event_log_size, PAGE_SIZE,
107 				    AMDGPU_GEM_DOMAIN_GTT,
108 				    &adev->mes.event_log_gpu_obj,
109 				    &adev->mes.event_log_gpu_addr,
110 				    &adev->mes.event_log_cpu_addr);
111 	if (r) {
112 		dev_warn(adev->dev, "failed to create MES event log buffer (%d)", r);
113 		return r;
114 	}
115 
116 	memset(adev->mes.event_log_cpu_addr, 0, adev->mes.event_log_size);
117 
118 	return  0;
119 
120 }
121 
amdgpu_mes_doorbell_free(struct amdgpu_device * adev)122 static void amdgpu_mes_doorbell_free(struct amdgpu_device *adev)
123 {
124 	bitmap_free(adev->mes.doorbell_bitmap);
125 }
126 
amdgpu_mes_init(struct amdgpu_device * adev)127 int amdgpu_mes_init(struct amdgpu_device *adev)
128 {
129 	int i, r;
130 
131 	adev->mes.adev = adev;
132 
133 	idr_init(&adev->mes.pasid_idr);
134 	idr_init(&adev->mes.gang_id_idr);
135 	idr_init(&adev->mes.queue_id_idr);
136 	ida_init(&adev->mes.doorbell_ida);
137 	spin_lock_init(&adev->mes.queue_id_lock);
138 	mutex_init(&adev->mes.mutex_hidden);
139 
140 	for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++)
141 		spin_lock_init(&adev->mes.ring_lock[i]);
142 
143 	adev->mes.total_max_queue = AMDGPU_FENCE_MES_QUEUE_ID_MASK;
144 	adev->mes.vmid_mask_mmhub = 0xffffff00;
145 	adev->mes.vmid_mask_gfxhub = 0xffffff00;
146 
147 	for (i = 0; i < AMDGPU_MES_MAX_COMPUTE_PIPES; i++) {
148 		/* use only 1st MEC pipes */
149 		if (i >= adev->gfx.mec.num_pipe_per_mec)
150 			continue;
151 		adev->mes.compute_hqd_mask[i] = 0xc;
152 	}
153 
154 	for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++)
155 		adev->mes.gfx_hqd_mask[i] = i ? 0 : 0xfffffffe;
156 
157 	for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++) {
158 		if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) <
159 		    IP_VERSION(6, 0, 0))
160 			adev->mes.sdma_hqd_mask[i] = i ? 0 : 0x3fc;
161 		/* zero sdma_hqd_mask for non-existent engine */
162 		else if (adev->sdma.num_instances == 1)
163 			adev->mes.sdma_hqd_mask[i] = i ? 0 : 0xfc;
164 		else
165 			adev->mes.sdma_hqd_mask[i] = 0xfc;
166 	}
167 
168 	for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++) {
169 		r = amdgpu_device_wb_get(adev, &adev->mes.sch_ctx_offs[i]);
170 		if (r) {
171 			dev_err(adev->dev,
172 				"(%d) ring trail_fence_offs wb alloc failed\n",
173 				r);
174 			goto error;
175 		}
176 		adev->mes.sch_ctx_gpu_addr[i] =
177 			adev->wb.gpu_addr + (adev->mes.sch_ctx_offs[i] * 4);
178 		adev->mes.sch_ctx_ptr[i] =
179 			(uint64_t *)&adev->wb.wb[adev->mes.sch_ctx_offs[i]];
180 
181 		r = amdgpu_device_wb_get(adev,
182 				 &adev->mes.query_status_fence_offs[i]);
183 		if (r) {
184 			dev_err(adev->dev,
185 			      "(%d) query_status_fence_offs wb alloc failed\n",
186 			      r);
187 			goto error;
188 		}
189 		adev->mes.query_status_fence_gpu_addr[i] = adev->wb.gpu_addr +
190 			(adev->mes.query_status_fence_offs[i] * 4);
191 		adev->mes.query_status_fence_ptr[i] =
192 			(uint64_t *)&adev->wb.wb[adev->mes.query_status_fence_offs[i]];
193 	}
194 
195 	r = amdgpu_device_wb_get(adev, &adev->mes.read_val_offs);
196 	if (r) {
197 		dev_err(adev->dev,
198 			"(%d) read_val_offs alloc failed\n", r);
199 		goto error;
200 	}
201 	adev->mes.read_val_gpu_addr =
202 		adev->wb.gpu_addr + (adev->mes.read_val_offs * 4);
203 	adev->mes.read_val_ptr =
204 		(uint32_t *)&adev->wb.wb[adev->mes.read_val_offs];
205 
206 	r = amdgpu_mes_doorbell_init(adev);
207 	if (r)
208 		goto error;
209 
210 	r = amdgpu_mes_event_log_init(adev);
211 	if (r)
212 		goto error_doorbell;
213 
214 	return 0;
215 
216 error_doorbell:
217 	amdgpu_mes_doorbell_free(adev);
218 error:
219 	for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++) {
220 		if (adev->mes.sch_ctx_ptr[i])
221 			amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs[i]);
222 		if (adev->mes.query_status_fence_ptr[i])
223 			amdgpu_device_wb_free(adev,
224 				      adev->mes.query_status_fence_offs[i]);
225 	}
226 	if (adev->mes.read_val_ptr)
227 		amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
228 
229 	idr_destroy(&adev->mes.pasid_idr);
230 	idr_destroy(&adev->mes.gang_id_idr);
231 	idr_destroy(&adev->mes.queue_id_idr);
232 	ida_destroy(&adev->mes.doorbell_ida);
233 	mutex_destroy(&adev->mes.mutex_hidden);
234 	return r;
235 }
236 
amdgpu_mes_fini(struct amdgpu_device * adev)237 void amdgpu_mes_fini(struct amdgpu_device *adev)
238 {
239 	int i;
240 
241 	amdgpu_bo_free_kernel(&adev->mes.event_log_gpu_obj,
242 			      &adev->mes.event_log_gpu_addr,
243 			      &adev->mes.event_log_cpu_addr);
244 
245 	for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++) {
246 		if (adev->mes.sch_ctx_ptr[i])
247 			amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs[i]);
248 		if (adev->mes.query_status_fence_ptr[i])
249 			amdgpu_device_wb_free(adev,
250 				      adev->mes.query_status_fence_offs[i]);
251 	}
252 	if (adev->mes.read_val_ptr)
253 		amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
254 
255 	amdgpu_mes_doorbell_free(adev);
256 
257 	idr_destroy(&adev->mes.pasid_idr);
258 	idr_destroy(&adev->mes.gang_id_idr);
259 	idr_destroy(&adev->mes.queue_id_idr);
260 	ida_destroy(&adev->mes.doorbell_ida);
261 	mutex_destroy(&adev->mes.mutex_hidden);
262 }
263 
amdgpu_mes_queue_free_mqd(struct amdgpu_mes_queue * q)264 static void amdgpu_mes_queue_free_mqd(struct amdgpu_mes_queue *q)
265 {
266 	amdgpu_bo_free_kernel(&q->mqd_obj,
267 			      &q->mqd_gpu_addr,
268 			      &q->mqd_cpu_ptr);
269 }
270 
amdgpu_mes_create_process(struct amdgpu_device * adev,int pasid,struct amdgpu_vm * vm)271 int amdgpu_mes_create_process(struct amdgpu_device *adev, int pasid,
272 			      struct amdgpu_vm *vm)
273 {
274 	struct amdgpu_mes_process *process;
275 	int r;
276 
277 	/* allocate the mes process buffer */
278 	process = kzalloc(sizeof(struct amdgpu_mes_process), GFP_KERNEL);
279 	if (!process) {
280 		DRM_ERROR("no more memory to create mes process\n");
281 		return -ENOMEM;
282 	}
283 
284 	/* allocate the process context bo and map it */
285 	r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_PROC_CTX_SIZE, PAGE_SIZE,
286 				    AMDGPU_GEM_DOMAIN_GTT,
287 				    &process->proc_ctx_bo,
288 				    &process->proc_ctx_gpu_addr,
289 				    &process->proc_ctx_cpu_ptr);
290 	if (r) {
291 		DRM_ERROR("failed to allocate process context bo\n");
292 		goto clean_up_memory;
293 	}
294 	memset(process->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
295 
296 	/*
297 	 * Avoid taking any other locks under MES lock to avoid circular
298 	 * lock dependencies.
299 	 */
300 	amdgpu_mes_lock(&adev->mes);
301 
302 	/* add the mes process to idr list */
303 	r = idr_alloc(&adev->mes.pasid_idr, process, pasid, pasid + 1,
304 		      GFP_KERNEL);
305 	if (r < 0) {
306 		DRM_ERROR("failed to lock pasid=%d\n", pasid);
307 		goto clean_up_ctx;
308 	}
309 
310 	INIT_LIST_HEAD(&process->gang_list);
311 	process->vm = vm;
312 	process->pasid = pasid;
313 	process->process_quantum = adev->mes.default_process_quantum;
314 	process->pd_gpu_addr = amdgpu_bo_gpu_offset(vm->root.bo);
315 
316 	amdgpu_mes_unlock(&adev->mes);
317 	return 0;
318 
319 clean_up_ctx:
320 	amdgpu_mes_unlock(&adev->mes);
321 	amdgpu_bo_free_kernel(&process->proc_ctx_bo,
322 			      &process->proc_ctx_gpu_addr,
323 			      &process->proc_ctx_cpu_ptr);
324 clean_up_memory:
325 	kfree(process);
326 	return r;
327 }
328 
amdgpu_mes_destroy_process(struct amdgpu_device * adev,int pasid)329 void amdgpu_mes_destroy_process(struct amdgpu_device *adev, int pasid)
330 {
331 	struct amdgpu_mes_process *process;
332 	struct amdgpu_mes_gang *gang, *tmp1;
333 	struct amdgpu_mes_queue *queue, *tmp2;
334 	struct mes_remove_queue_input queue_input;
335 	unsigned long flags;
336 	int r;
337 
338 	/*
339 	 * Avoid taking any other locks under MES lock to avoid circular
340 	 * lock dependencies.
341 	 */
342 	amdgpu_mes_lock(&adev->mes);
343 
344 	process = idr_find(&adev->mes.pasid_idr, pasid);
345 	if (!process) {
346 		DRM_WARN("pasid %d doesn't exist\n", pasid);
347 		amdgpu_mes_unlock(&adev->mes);
348 		return;
349 	}
350 
351 	/* Remove all queues from hardware */
352 	list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
353 		list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
354 			spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
355 			idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
356 			spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
357 
358 			queue_input.doorbell_offset = queue->doorbell_off;
359 			queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
360 
361 			r = adev->mes.funcs->remove_hw_queue(&adev->mes,
362 							     &queue_input);
363 			if (r)
364 				DRM_WARN("failed to remove hardware queue\n");
365 		}
366 
367 		idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
368 	}
369 
370 	idr_remove(&adev->mes.pasid_idr, pasid);
371 	amdgpu_mes_unlock(&adev->mes);
372 
373 	/* free all memory allocated by the process */
374 	list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
375 		/* free all queues in the gang */
376 		list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
377 			amdgpu_mes_queue_free_mqd(queue);
378 			list_del(&queue->list);
379 			kfree(queue);
380 		}
381 		amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
382 				      &gang->gang_ctx_gpu_addr,
383 				      &gang->gang_ctx_cpu_ptr);
384 		list_del(&gang->list);
385 		kfree(gang);
386 
387 	}
388 	amdgpu_bo_free_kernel(&process->proc_ctx_bo,
389 			      &process->proc_ctx_gpu_addr,
390 			      &process->proc_ctx_cpu_ptr);
391 	kfree(process);
392 }
393 
amdgpu_mes_add_gang(struct amdgpu_device * adev,int pasid,struct amdgpu_mes_gang_properties * gprops,int * gang_id)394 int amdgpu_mes_add_gang(struct amdgpu_device *adev, int pasid,
395 			struct amdgpu_mes_gang_properties *gprops,
396 			int *gang_id)
397 {
398 	struct amdgpu_mes_process *process;
399 	struct amdgpu_mes_gang *gang;
400 	int r;
401 
402 	/* allocate the mes gang buffer */
403 	gang = kzalloc(sizeof(struct amdgpu_mes_gang), GFP_KERNEL);
404 	if (!gang) {
405 		return -ENOMEM;
406 	}
407 
408 	/* allocate the gang context bo and map it to cpu space */
409 	r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_GANG_CTX_SIZE, PAGE_SIZE,
410 				    AMDGPU_GEM_DOMAIN_GTT,
411 				    &gang->gang_ctx_bo,
412 				    &gang->gang_ctx_gpu_addr,
413 				    &gang->gang_ctx_cpu_ptr);
414 	if (r) {
415 		DRM_ERROR("failed to allocate process context bo\n");
416 		goto clean_up_mem;
417 	}
418 	memset(gang->gang_ctx_cpu_ptr, 0, AMDGPU_MES_GANG_CTX_SIZE);
419 
420 	/*
421 	 * Avoid taking any other locks under MES lock to avoid circular
422 	 * lock dependencies.
423 	 */
424 	amdgpu_mes_lock(&adev->mes);
425 
426 	process = idr_find(&adev->mes.pasid_idr, pasid);
427 	if (!process) {
428 		DRM_ERROR("pasid %d doesn't exist\n", pasid);
429 		r = -EINVAL;
430 		goto clean_up_ctx;
431 	}
432 
433 	/* add the mes gang to idr list */
434 	r = idr_alloc(&adev->mes.gang_id_idr, gang, 1, 0,
435 		      GFP_KERNEL);
436 	if (r < 0) {
437 		DRM_ERROR("failed to allocate idr for gang\n");
438 		goto clean_up_ctx;
439 	}
440 
441 	gang->gang_id = r;
442 	*gang_id = r;
443 
444 	INIT_LIST_HEAD(&gang->queue_list);
445 	gang->process = process;
446 	gang->priority = gprops->priority;
447 	gang->gang_quantum = gprops->gang_quantum ?
448 		gprops->gang_quantum : adev->mes.default_gang_quantum;
449 	gang->global_priority_level = gprops->global_priority_level;
450 	gang->inprocess_gang_priority = gprops->inprocess_gang_priority;
451 	list_add_tail(&gang->list, &process->gang_list);
452 
453 	amdgpu_mes_unlock(&adev->mes);
454 	return 0;
455 
456 clean_up_ctx:
457 	amdgpu_mes_unlock(&adev->mes);
458 	amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
459 			      &gang->gang_ctx_gpu_addr,
460 			      &gang->gang_ctx_cpu_ptr);
461 clean_up_mem:
462 	kfree(gang);
463 	return r;
464 }
465 
amdgpu_mes_remove_gang(struct amdgpu_device * adev,int gang_id)466 int amdgpu_mes_remove_gang(struct amdgpu_device *adev, int gang_id)
467 {
468 	struct amdgpu_mes_gang *gang;
469 
470 	/*
471 	 * Avoid taking any other locks under MES lock to avoid circular
472 	 * lock dependencies.
473 	 */
474 	amdgpu_mes_lock(&adev->mes);
475 
476 	gang = idr_find(&adev->mes.gang_id_idr, gang_id);
477 	if (!gang) {
478 		DRM_ERROR("gang id %d doesn't exist\n", gang_id);
479 		amdgpu_mes_unlock(&adev->mes);
480 		return -EINVAL;
481 	}
482 
483 	if (!list_empty(&gang->queue_list)) {
484 		DRM_ERROR("queue list is not empty\n");
485 		amdgpu_mes_unlock(&adev->mes);
486 		return -EBUSY;
487 	}
488 
489 	idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
490 	list_del(&gang->list);
491 	amdgpu_mes_unlock(&adev->mes);
492 
493 	amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
494 			      &gang->gang_ctx_gpu_addr,
495 			      &gang->gang_ctx_cpu_ptr);
496 
497 	kfree(gang);
498 
499 	return 0;
500 }
501 
amdgpu_mes_suspend(struct amdgpu_device * adev)502 int amdgpu_mes_suspend(struct amdgpu_device *adev)
503 {
504 	struct mes_suspend_gang_input input;
505 	int r;
506 
507 	if (!amdgpu_mes_suspend_resume_all_supported(adev))
508 		return 0;
509 
510 	memset(&input, 0x0, sizeof(struct mes_suspend_gang_input));
511 	input.suspend_all_gangs = 1;
512 
513 	/*
514 	 * Avoid taking any other locks under MES lock to avoid circular
515 	 * lock dependencies.
516 	 */
517 	amdgpu_mes_lock(&adev->mes);
518 	r = adev->mes.funcs->suspend_gang(&adev->mes, &input);
519 	amdgpu_mes_unlock(&adev->mes);
520 	if (r)
521 		DRM_ERROR("failed to suspend all gangs");
522 
523 	return r;
524 }
525 
amdgpu_mes_resume(struct amdgpu_device * adev)526 int amdgpu_mes_resume(struct amdgpu_device *adev)
527 {
528 	struct mes_resume_gang_input input;
529 	int r;
530 
531 	if (!amdgpu_mes_suspend_resume_all_supported(adev))
532 		return 0;
533 
534 	memset(&input, 0x0, sizeof(struct mes_resume_gang_input));
535 	input.resume_all_gangs = 1;
536 
537 	/*
538 	 * Avoid taking any other locks under MES lock to avoid circular
539 	 * lock dependencies.
540 	 */
541 	amdgpu_mes_lock(&adev->mes);
542 	r = adev->mes.funcs->resume_gang(&adev->mes, &input);
543 	amdgpu_mes_unlock(&adev->mes);
544 	if (r)
545 		DRM_ERROR("failed to resume all gangs");
546 
547 	return r;
548 }
549 
amdgpu_mes_queue_alloc_mqd(struct amdgpu_device * adev,struct amdgpu_mes_queue * q,struct amdgpu_mes_queue_properties * p)550 static int amdgpu_mes_queue_alloc_mqd(struct amdgpu_device *adev,
551 				     struct amdgpu_mes_queue *q,
552 				     struct amdgpu_mes_queue_properties *p)
553 {
554 	struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
555 	u32 mqd_size = mqd_mgr->mqd_size;
556 	int r;
557 
558 	r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
559 				    AMDGPU_GEM_DOMAIN_GTT,
560 				    &q->mqd_obj,
561 				    &q->mqd_gpu_addr, &q->mqd_cpu_ptr);
562 	if (r) {
563 		dev_warn(adev->dev, "failed to create queue mqd bo (%d)", r);
564 		return r;
565 	}
566 	memset(q->mqd_cpu_ptr, 0, mqd_size);
567 
568 	r = amdgpu_bo_reserve(q->mqd_obj, false);
569 	if (unlikely(r != 0))
570 		goto clean_up;
571 
572 	return 0;
573 
574 clean_up:
575 	amdgpu_bo_free_kernel(&q->mqd_obj,
576 			      &q->mqd_gpu_addr,
577 			      &q->mqd_cpu_ptr);
578 	return r;
579 }
580 
amdgpu_mes_queue_init_mqd(struct amdgpu_device * adev,struct amdgpu_mes_queue * q,struct amdgpu_mes_queue_properties * p)581 static void amdgpu_mes_queue_init_mqd(struct amdgpu_device *adev,
582 				     struct amdgpu_mes_queue *q,
583 				     struct amdgpu_mes_queue_properties *p)
584 {
585 	struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
586 	struct amdgpu_mqd_prop mqd_prop = {0};
587 
588 	mqd_prop.mqd_gpu_addr = q->mqd_gpu_addr;
589 	mqd_prop.hqd_base_gpu_addr = p->hqd_base_gpu_addr;
590 	mqd_prop.rptr_gpu_addr = p->rptr_gpu_addr;
591 	mqd_prop.wptr_gpu_addr = p->wptr_gpu_addr;
592 	mqd_prop.queue_size = p->queue_size;
593 	mqd_prop.use_doorbell = true;
594 	mqd_prop.doorbell_index = p->doorbell_off;
595 	mqd_prop.eop_gpu_addr = p->eop_gpu_addr;
596 	mqd_prop.hqd_pipe_priority = p->hqd_pipe_priority;
597 	mqd_prop.hqd_queue_priority = p->hqd_queue_priority;
598 	mqd_prop.hqd_active = false;
599 
600 	if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
601 	    p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
602 		mutex_lock(&adev->srbm_mutex);
603 		amdgpu_gfx_select_me_pipe_q(adev, p->ring->me, p->ring->pipe, 0, 0, 0);
604 	}
605 
606 	mqd_mgr->init_mqd(adev, q->mqd_cpu_ptr, &mqd_prop);
607 
608 	if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
609 	    p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
610 		amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0, 0);
611 		mutex_unlock(&adev->srbm_mutex);
612 	}
613 
614 	amdgpu_bo_unreserve(q->mqd_obj);
615 }
616 
amdgpu_mes_add_hw_queue(struct amdgpu_device * adev,int gang_id,struct amdgpu_mes_queue_properties * qprops,int * queue_id)617 int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id,
618 			    struct amdgpu_mes_queue_properties *qprops,
619 			    int *queue_id)
620 {
621 	struct amdgpu_mes_queue *queue;
622 	struct amdgpu_mes_gang *gang;
623 	struct mes_add_queue_input queue_input;
624 	unsigned long flags;
625 	int r;
626 
627 	memset(&queue_input, 0, sizeof(struct mes_add_queue_input));
628 
629 	/* allocate the mes queue buffer */
630 	queue = kzalloc(sizeof(struct amdgpu_mes_queue), GFP_KERNEL);
631 	if (!queue) {
632 		DRM_ERROR("Failed to allocate memory for queue\n");
633 		return -ENOMEM;
634 	}
635 
636 	/* Allocate the queue mqd */
637 	r = amdgpu_mes_queue_alloc_mqd(adev, queue, qprops);
638 	if (r)
639 		goto clean_up_memory;
640 
641 	/*
642 	 * Avoid taking any other locks under MES lock to avoid circular
643 	 * lock dependencies.
644 	 */
645 	amdgpu_mes_lock(&adev->mes);
646 
647 	gang = idr_find(&adev->mes.gang_id_idr, gang_id);
648 	if (!gang) {
649 		DRM_ERROR("gang id %d doesn't exist\n", gang_id);
650 		r = -EINVAL;
651 		goto clean_up_mqd;
652 	}
653 
654 	/* add the mes gang to idr list */
655 	spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
656 	r = idr_alloc(&adev->mes.queue_id_idr, queue, 1, 0,
657 		      GFP_ATOMIC);
658 	if (r < 0) {
659 		spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
660 		goto clean_up_mqd;
661 	}
662 	spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
663 	*queue_id = queue->queue_id = r;
664 
665 	/* allocate a doorbell index for the queue */
666 	r = amdgpu_mes_kernel_doorbell_get(adev,
667 					  qprops->queue_type,
668 					  &qprops->doorbell_off);
669 	if (r)
670 		goto clean_up_queue_id;
671 
672 	/* initialize the queue mqd */
673 	amdgpu_mes_queue_init_mqd(adev, queue, qprops);
674 
675 	/* add hw queue to mes */
676 	queue_input.process_id = gang->process->pasid;
677 
678 	queue_input.page_table_base_addr =
679 		adev->vm_manager.vram_base_offset + gang->process->pd_gpu_addr -
680 		adev->gmc.vram_start;
681 
682 	queue_input.process_va_start = 0;
683 	queue_input.process_va_end =
684 		(adev->vm_manager.max_pfn - 1) << AMDGPU_GPU_PAGE_SHIFT;
685 	queue_input.process_quantum = gang->process->process_quantum;
686 	queue_input.process_context_addr = gang->process->proc_ctx_gpu_addr;
687 	queue_input.gang_quantum = gang->gang_quantum;
688 	queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
689 	queue_input.inprocess_gang_priority = gang->inprocess_gang_priority;
690 	queue_input.gang_global_priority_level = gang->global_priority_level;
691 	queue_input.doorbell_offset = qprops->doorbell_off;
692 	queue_input.mqd_addr = queue->mqd_gpu_addr;
693 	queue_input.wptr_addr = qprops->wptr_gpu_addr;
694 	queue_input.wptr_mc_addr = qprops->wptr_mc_addr;
695 	queue_input.queue_type = qprops->queue_type;
696 	queue_input.paging = qprops->paging;
697 	queue_input.is_kfd_process = 0;
698 
699 	r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input);
700 	if (r) {
701 		DRM_ERROR("failed to add hardware queue to MES, doorbell=0x%llx\n",
702 			  qprops->doorbell_off);
703 		goto clean_up_doorbell;
704 	}
705 
706 	DRM_DEBUG("MES hw queue was added, pasid=%d, gang id=%d, "
707 		  "queue type=%d, doorbell=0x%llx\n",
708 		  gang->process->pasid, gang_id, qprops->queue_type,
709 		  qprops->doorbell_off);
710 
711 	queue->ring = qprops->ring;
712 	queue->doorbell_off = qprops->doorbell_off;
713 	queue->wptr_gpu_addr = qprops->wptr_gpu_addr;
714 	queue->queue_type = qprops->queue_type;
715 	queue->paging = qprops->paging;
716 	queue->gang = gang;
717 	queue->ring->mqd_ptr = queue->mqd_cpu_ptr;
718 	list_add_tail(&queue->list, &gang->queue_list);
719 
720 	amdgpu_mes_unlock(&adev->mes);
721 	return 0;
722 
723 clean_up_doorbell:
724 	amdgpu_mes_kernel_doorbell_free(adev, qprops->doorbell_off);
725 clean_up_queue_id:
726 	spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
727 	idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
728 	spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
729 clean_up_mqd:
730 	amdgpu_mes_unlock(&adev->mes);
731 	amdgpu_mes_queue_free_mqd(queue);
732 clean_up_memory:
733 	kfree(queue);
734 	return r;
735 }
736 
amdgpu_mes_remove_hw_queue(struct amdgpu_device * adev,int queue_id)737 int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id)
738 {
739 	unsigned long flags;
740 	struct amdgpu_mes_queue *queue;
741 	struct amdgpu_mes_gang *gang;
742 	struct mes_remove_queue_input queue_input;
743 	int r;
744 
745 	/*
746 	 * Avoid taking any other locks under MES lock to avoid circular
747 	 * lock dependencies.
748 	 */
749 	amdgpu_mes_lock(&adev->mes);
750 
751 	/* remove the mes gang from idr list */
752 	spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
753 
754 	queue = idr_find(&adev->mes.queue_id_idr, queue_id);
755 	if (!queue) {
756 		spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
757 		amdgpu_mes_unlock(&adev->mes);
758 		DRM_ERROR("queue id %d doesn't exist\n", queue_id);
759 		return -EINVAL;
760 	}
761 
762 	idr_remove(&adev->mes.queue_id_idr, queue_id);
763 	spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
764 
765 	DRM_DEBUG("try to remove queue, doorbell off = 0x%llx\n",
766 		  queue->doorbell_off);
767 
768 	gang = queue->gang;
769 	queue_input.doorbell_offset = queue->doorbell_off;
770 	queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
771 
772 	r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input);
773 	if (r)
774 		DRM_ERROR("failed to remove hardware queue, queue id = %d\n",
775 			  queue_id);
776 
777 	list_del(&queue->list);
778 	amdgpu_mes_kernel_doorbell_free(adev, queue->doorbell_off);
779 	amdgpu_mes_unlock(&adev->mes);
780 
781 	amdgpu_mes_queue_free_mqd(queue);
782 	kfree(queue);
783 	return 0;
784 }
785 
amdgpu_mes_reset_hw_queue(struct amdgpu_device * adev,int queue_id)786 int amdgpu_mes_reset_hw_queue(struct amdgpu_device *adev, int queue_id)
787 {
788 	unsigned long flags;
789 	struct amdgpu_mes_queue *queue;
790 	struct amdgpu_mes_gang *gang;
791 	struct mes_reset_queue_input queue_input;
792 	int r;
793 
794 	/*
795 	 * Avoid taking any other locks under MES lock to avoid circular
796 	 * lock dependencies.
797 	 */
798 	amdgpu_mes_lock(&adev->mes);
799 
800 	/* remove the mes gang from idr list */
801 	spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
802 
803 	queue = idr_find(&adev->mes.queue_id_idr, queue_id);
804 	if (!queue) {
805 		spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
806 		amdgpu_mes_unlock(&adev->mes);
807 		DRM_ERROR("queue id %d doesn't exist\n", queue_id);
808 		return -EINVAL;
809 	}
810 	spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
811 
812 	DRM_DEBUG("try to reset queue, doorbell off = 0x%llx\n",
813 		  queue->doorbell_off);
814 
815 	gang = queue->gang;
816 	queue_input.doorbell_offset = queue->doorbell_off;
817 	queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
818 
819 	r = adev->mes.funcs->reset_hw_queue(&adev->mes, &queue_input);
820 	if (r)
821 		DRM_ERROR("failed to reset hardware queue, queue id = %d\n",
822 			  queue_id);
823 
824 	amdgpu_mes_unlock(&adev->mes);
825 
826 	return 0;
827 }
828 
amdgpu_mes_reset_hw_queue_mmio(struct amdgpu_device * adev,int queue_type,int me_id,int pipe_id,int queue_id,int vmid)829 int amdgpu_mes_reset_hw_queue_mmio(struct amdgpu_device *adev, int queue_type,
830 				   int me_id, int pipe_id, int queue_id, int vmid)
831 {
832 	struct mes_reset_queue_input queue_input;
833 	int r;
834 
835 	queue_input.queue_type = queue_type;
836 	queue_input.use_mmio = true;
837 	queue_input.me_id = me_id;
838 	queue_input.pipe_id = pipe_id;
839 	queue_input.queue_id = queue_id;
840 	queue_input.vmid = vmid;
841 	r = adev->mes.funcs->reset_hw_queue(&adev->mes, &queue_input);
842 	if (r)
843 		DRM_ERROR("failed to reset hardware queue by mmio, queue id = %d\n",
844 			  queue_id);
845 	return r;
846 }
847 
amdgpu_mes_map_legacy_queue(struct amdgpu_device * adev,struct amdgpu_ring * ring)848 int amdgpu_mes_map_legacy_queue(struct amdgpu_device *adev,
849 				struct amdgpu_ring *ring)
850 {
851 	struct mes_map_legacy_queue_input queue_input;
852 	int r;
853 
854 	memset(&queue_input, 0, sizeof(queue_input));
855 
856 	queue_input.queue_type = ring->funcs->type;
857 	queue_input.doorbell_offset = ring->doorbell_index;
858 	queue_input.pipe_id = ring->pipe;
859 	queue_input.queue_id = ring->queue;
860 	queue_input.mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
861 	queue_input.wptr_addr = ring->wptr_gpu_addr;
862 
863 	r = adev->mes.funcs->map_legacy_queue(&adev->mes, &queue_input);
864 	if (r)
865 		DRM_ERROR("failed to map legacy queue\n");
866 
867 	return r;
868 }
869 
amdgpu_mes_unmap_legacy_queue(struct amdgpu_device * adev,struct amdgpu_ring * ring,enum amdgpu_unmap_queues_action action,u64 gpu_addr,u64 seq)870 int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
871 				  struct amdgpu_ring *ring,
872 				  enum amdgpu_unmap_queues_action action,
873 				  u64 gpu_addr, u64 seq)
874 {
875 	struct mes_unmap_legacy_queue_input queue_input;
876 	int r;
877 
878 	queue_input.action = action;
879 	queue_input.queue_type = ring->funcs->type;
880 	queue_input.doorbell_offset = ring->doorbell_index;
881 	queue_input.pipe_id = ring->pipe;
882 	queue_input.queue_id = ring->queue;
883 	queue_input.trail_fence_addr = gpu_addr;
884 	queue_input.trail_fence_data = seq;
885 
886 	r = adev->mes.funcs->unmap_legacy_queue(&adev->mes, &queue_input);
887 	if (r)
888 		DRM_ERROR("failed to unmap legacy queue\n");
889 
890 	return r;
891 }
892 
amdgpu_mes_reset_legacy_queue(struct amdgpu_device * adev,struct amdgpu_ring * ring,unsigned int vmid,bool use_mmio)893 int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev,
894 				  struct amdgpu_ring *ring,
895 				  unsigned int vmid,
896 				  bool use_mmio)
897 {
898 	struct mes_reset_legacy_queue_input queue_input;
899 	int r;
900 
901 	memset(&queue_input, 0, sizeof(queue_input));
902 
903 	queue_input.queue_type = ring->funcs->type;
904 	queue_input.doorbell_offset = ring->doorbell_index;
905 	queue_input.me_id = ring->me;
906 	queue_input.pipe_id = ring->pipe;
907 	queue_input.queue_id = ring->queue;
908 	queue_input.mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
909 	queue_input.wptr_addr = ring->wptr_gpu_addr;
910 	queue_input.vmid = vmid;
911 	queue_input.use_mmio = use_mmio;
912 
913 	r = adev->mes.funcs->reset_legacy_queue(&adev->mes, &queue_input);
914 	if (r)
915 		DRM_ERROR("failed to reset legacy queue\n");
916 
917 	return r;
918 }
919 
amdgpu_mes_rreg(struct amdgpu_device * adev,uint32_t reg)920 uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg)
921 {
922 	struct mes_misc_op_input op_input;
923 	int r, val = 0;
924 
925 	op_input.op = MES_MISC_OP_READ_REG;
926 	op_input.read_reg.reg_offset = reg;
927 	op_input.read_reg.buffer_addr = adev->mes.read_val_gpu_addr;
928 
929 	if (!adev->mes.funcs->misc_op) {
930 		DRM_ERROR("mes rreg is not supported!\n");
931 		goto error;
932 	}
933 
934 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
935 	if (r)
936 		DRM_ERROR("failed to read reg (0x%x)\n", reg);
937 	else
938 		val = *(adev->mes.read_val_ptr);
939 
940 error:
941 	return val;
942 }
943 
amdgpu_mes_wreg(struct amdgpu_device * adev,uint32_t reg,uint32_t val)944 int amdgpu_mes_wreg(struct amdgpu_device *adev,
945 		    uint32_t reg, uint32_t val)
946 {
947 	struct mes_misc_op_input op_input;
948 	int r;
949 
950 	op_input.op = MES_MISC_OP_WRITE_REG;
951 	op_input.write_reg.reg_offset = reg;
952 	op_input.write_reg.reg_value = val;
953 
954 	if (!adev->mes.funcs->misc_op) {
955 		DRM_ERROR("mes wreg is not supported!\n");
956 		r = -EINVAL;
957 		goto error;
958 	}
959 
960 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
961 	if (r)
962 		DRM_ERROR("failed to write reg (0x%x)\n", reg);
963 
964 error:
965 	return r;
966 }
967 
amdgpu_mes_reg_write_reg_wait(struct amdgpu_device * adev,uint32_t reg0,uint32_t reg1,uint32_t ref,uint32_t mask)968 int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev,
969 				  uint32_t reg0, uint32_t reg1,
970 				  uint32_t ref, uint32_t mask)
971 {
972 	struct mes_misc_op_input op_input;
973 	int r;
974 
975 	op_input.op = MES_MISC_OP_WRM_REG_WR_WAIT;
976 	op_input.wrm_reg.reg0 = reg0;
977 	op_input.wrm_reg.reg1 = reg1;
978 	op_input.wrm_reg.ref = ref;
979 	op_input.wrm_reg.mask = mask;
980 
981 	if (!adev->mes.funcs->misc_op) {
982 		DRM_ERROR("mes reg_write_reg_wait is not supported!\n");
983 		r = -EINVAL;
984 		goto error;
985 	}
986 
987 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
988 	if (r)
989 		DRM_ERROR("failed to reg_write_reg_wait\n");
990 
991 error:
992 	return r;
993 }
994 
amdgpu_mes_reg_wait(struct amdgpu_device * adev,uint32_t reg,uint32_t val,uint32_t mask)995 int amdgpu_mes_reg_wait(struct amdgpu_device *adev, uint32_t reg,
996 			uint32_t val, uint32_t mask)
997 {
998 	struct mes_misc_op_input op_input;
999 	int r;
1000 
1001 	op_input.op = MES_MISC_OP_WRM_REG_WAIT;
1002 	op_input.wrm_reg.reg0 = reg;
1003 	op_input.wrm_reg.ref = val;
1004 	op_input.wrm_reg.mask = mask;
1005 
1006 	if (!adev->mes.funcs->misc_op) {
1007 		DRM_ERROR("mes reg wait is not supported!\n");
1008 		r = -EINVAL;
1009 		goto error;
1010 	}
1011 
1012 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
1013 	if (r)
1014 		DRM_ERROR("failed to reg_write_reg_wait\n");
1015 
1016 error:
1017 	return r;
1018 }
1019 
amdgpu_mes_set_shader_debugger(struct amdgpu_device * adev,uint64_t process_context_addr,uint32_t spi_gdbg_per_vmid_cntl,const uint32_t * tcp_watch_cntl,uint32_t flags,bool trap_en)1020 int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
1021 				uint64_t process_context_addr,
1022 				uint32_t spi_gdbg_per_vmid_cntl,
1023 				const uint32_t *tcp_watch_cntl,
1024 				uint32_t flags,
1025 				bool trap_en)
1026 {
1027 	struct mes_misc_op_input op_input = {0};
1028 	int r;
1029 
1030 	if (!adev->mes.funcs->misc_op) {
1031 		DRM_ERROR("mes set shader debugger is not supported!\n");
1032 		return -EINVAL;
1033 	}
1034 
1035 	op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER;
1036 	op_input.set_shader_debugger.process_context_addr = process_context_addr;
1037 	op_input.set_shader_debugger.flags.u32all = flags;
1038 
1039 	/* use amdgpu mes_flush_shader_debugger instead */
1040 	if (op_input.set_shader_debugger.flags.process_ctx_flush)
1041 		return -EINVAL;
1042 
1043 	op_input.set_shader_debugger.spi_gdbg_per_vmid_cntl = spi_gdbg_per_vmid_cntl;
1044 	memcpy(op_input.set_shader_debugger.tcp_watch_cntl, tcp_watch_cntl,
1045 			sizeof(op_input.set_shader_debugger.tcp_watch_cntl));
1046 
1047 	if (((adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK) >>
1048 			AMDGPU_MES_API_VERSION_SHIFT) >= 14)
1049 		op_input.set_shader_debugger.trap_en = trap_en;
1050 
1051 	amdgpu_mes_lock(&adev->mes);
1052 
1053 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
1054 	if (r)
1055 		DRM_ERROR("failed to set_shader_debugger\n");
1056 
1057 	amdgpu_mes_unlock(&adev->mes);
1058 
1059 	return r;
1060 }
1061 
amdgpu_mes_flush_shader_debugger(struct amdgpu_device * adev,uint64_t process_context_addr)1062 int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev,
1063 				     uint64_t process_context_addr)
1064 {
1065 	struct mes_misc_op_input op_input = {0};
1066 	int r;
1067 
1068 	if (!adev->mes.funcs->misc_op) {
1069 		DRM_ERROR("mes flush shader debugger is not supported!\n");
1070 		return -EINVAL;
1071 	}
1072 
1073 	op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER;
1074 	op_input.set_shader_debugger.process_context_addr = process_context_addr;
1075 	op_input.set_shader_debugger.flags.process_ctx_flush = true;
1076 
1077 	amdgpu_mes_lock(&adev->mes);
1078 
1079 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
1080 	if (r)
1081 		DRM_ERROR("failed to set_shader_debugger\n");
1082 
1083 	amdgpu_mes_unlock(&adev->mes);
1084 
1085 	return r;
1086 }
1087 
1088 static void
amdgpu_mes_ring_to_queue_props(struct amdgpu_device * adev,struct amdgpu_ring * ring,struct amdgpu_mes_queue_properties * props)1089 amdgpu_mes_ring_to_queue_props(struct amdgpu_device *adev,
1090 			       struct amdgpu_ring *ring,
1091 			       struct amdgpu_mes_queue_properties *props)
1092 {
1093 	props->queue_type = ring->funcs->type;
1094 	props->hqd_base_gpu_addr = ring->gpu_addr;
1095 	props->rptr_gpu_addr = ring->rptr_gpu_addr;
1096 	props->wptr_gpu_addr = ring->wptr_gpu_addr;
1097 	props->wptr_mc_addr =
1098 		ring->mes_ctx->meta_data_mc_addr + ring->wptr_offs;
1099 	props->queue_size = ring->ring_size;
1100 	props->eop_gpu_addr = ring->eop_gpu_addr;
1101 	props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL;
1102 	props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM;
1103 	props->paging = false;
1104 	props->ring = ring;
1105 }
1106 
1107 #define DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(_eng)			\
1108 do {									\
1109        if (id_offs < AMDGPU_MES_CTX_MAX_OFFS)				\
1110 		return offsetof(struct amdgpu_mes_ctx_meta_data,	\
1111 				_eng[ring->idx].slots[id_offs]);        \
1112        else if (id_offs == AMDGPU_MES_CTX_RING_OFFS)			\
1113 		return offsetof(struct amdgpu_mes_ctx_meta_data,        \
1114 				_eng[ring->idx].ring);                  \
1115        else if (id_offs == AMDGPU_MES_CTX_IB_OFFS)			\
1116 		return offsetof(struct amdgpu_mes_ctx_meta_data,        \
1117 				_eng[ring->idx].ib);                    \
1118        else if (id_offs == AMDGPU_MES_CTX_PADDING_OFFS)			\
1119 		return offsetof(struct amdgpu_mes_ctx_meta_data,        \
1120 				_eng[ring->idx].padding);               \
1121 } while(0)
1122 
amdgpu_mes_ctx_get_offs(struct amdgpu_ring * ring,unsigned int id_offs)1123 int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs)
1124 {
1125 	switch (ring->funcs->type) {
1126 	case AMDGPU_RING_TYPE_GFX:
1127 		DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(gfx);
1128 		break;
1129 	case AMDGPU_RING_TYPE_COMPUTE:
1130 		DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(compute);
1131 		break;
1132 	case AMDGPU_RING_TYPE_SDMA:
1133 		DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(sdma);
1134 		break;
1135 	default:
1136 		break;
1137 	}
1138 
1139 	WARN_ON(1);
1140 	return -EINVAL;
1141 }
1142 
amdgpu_mes_add_ring(struct amdgpu_device * adev,int gang_id,int queue_type,int idx,struct amdgpu_mes_ctx_data * ctx_data,struct amdgpu_ring ** out)1143 int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
1144 			int queue_type, int idx,
1145 			struct amdgpu_mes_ctx_data *ctx_data,
1146 			struct amdgpu_ring **out)
1147 {
1148 	struct amdgpu_ring *ring;
1149 	struct amdgpu_mes_gang *gang;
1150 	struct amdgpu_mes_queue_properties qprops = {0};
1151 	int r, queue_id, pasid;
1152 
1153 	/*
1154 	 * Avoid taking any other locks under MES lock to avoid circular
1155 	 * lock dependencies.
1156 	 */
1157 	amdgpu_mes_lock(&adev->mes);
1158 	gang = idr_find(&adev->mes.gang_id_idr, gang_id);
1159 	if (!gang) {
1160 		DRM_ERROR("gang id %d doesn't exist\n", gang_id);
1161 		amdgpu_mes_unlock(&adev->mes);
1162 		return -EINVAL;
1163 	}
1164 	pasid = gang->process->pasid;
1165 
1166 	ring = kzalloc(sizeof(struct amdgpu_ring), GFP_KERNEL);
1167 	if (!ring) {
1168 		amdgpu_mes_unlock(&adev->mes);
1169 		return -ENOMEM;
1170 	}
1171 
1172 	ring->ring_obj = NULL;
1173 	ring->use_doorbell = true;
1174 	ring->is_mes_queue = true;
1175 	ring->mes_ctx = ctx_data;
1176 	ring->idx = idx;
1177 	ring->no_scheduler = true;
1178 
1179 	if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
1180 		int offset = offsetof(struct amdgpu_mes_ctx_meta_data,
1181 				      compute[ring->idx].mec_hpd);
1182 		ring->eop_gpu_addr =
1183 			amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
1184 	}
1185 
1186 	switch (queue_type) {
1187 	case AMDGPU_RING_TYPE_GFX:
1188 		ring->funcs = adev->gfx.gfx_ring[0].funcs;
1189 		ring->me = adev->gfx.gfx_ring[0].me;
1190 		ring->pipe = adev->gfx.gfx_ring[0].pipe;
1191 		break;
1192 	case AMDGPU_RING_TYPE_COMPUTE:
1193 		ring->funcs = adev->gfx.compute_ring[0].funcs;
1194 		ring->me = adev->gfx.compute_ring[0].me;
1195 		ring->pipe = adev->gfx.compute_ring[0].pipe;
1196 		break;
1197 	case AMDGPU_RING_TYPE_SDMA:
1198 		ring->funcs = adev->sdma.instance[0].ring.funcs;
1199 		break;
1200 	default:
1201 		BUG();
1202 	}
1203 
1204 	r = amdgpu_ring_init(adev, ring, 1024, NULL, 0,
1205 			     AMDGPU_RING_PRIO_DEFAULT, NULL);
1206 	if (r) {
1207 		amdgpu_mes_unlock(&adev->mes);
1208 		goto clean_up_memory;
1209 	}
1210 
1211 	amdgpu_mes_ring_to_queue_props(adev, ring, &qprops);
1212 
1213 	dma_fence_wait(gang->process->vm->last_update, false);
1214 	dma_fence_wait(ctx_data->meta_data_va->last_pt_update, false);
1215 	amdgpu_mes_unlock(&adev->mes);
1216 
1217 	r = amdgpu_mes_add_hw_queue(adev, gang_id, &qprops, &queue_id);
1218 	if (r)
1219 		goto clean_up_ring;
1220 
1221 	ring->hw_queue_id = queue_id;
1222 	ring->doorbell_index = qprops.doorbell_off;
1223 
1224 	if (queue_type == AMDGPU_RING_TYPE_GFX)
1225 		sprintf(ring->name, "gfx_%d.%d.%d", pasid, gang_id, queue_id);
1226 	else if (queue_type == AMDGPU_RING_TYPE_COMPUTE)
1227 		sprintf(ring->name, "compute_%d.%d.%d", pasid, gang_id,
1228 			queue_id);
1229 	else if (queue_type == AMDGPU_RING_TYPE_SDMA)
1230 		sprintf(ring->name, "sdma_%d.%d.%d", pasid, gang_id,
1231 			queue_id);
1232 	else
1233 		BUG();
1234 
1235 	*out = ring;
1236 	return 0;
1237 
1238 clean_up_ring:
1239 	amdgpu_ring_fini(ring);
1240 clean_up_memory:
1241 	kfree(ring);
1242 	return r;
1243 }
1244 
amdgpu_mes_remove_ring(struct amdgpu_device * adev,struct amdgpu_ring * ring)1245 void amdgpu_mes_remove_ring(struct amdgpu_device *adev,
1246 			    struct amdgpu_ring *ring)
1247 {
1248 	if (!ring)
1249 		return;
1250 
1251 	amdgpu_mes_remove_hw_queue(adev, ring->hw_queue_id);
1252 	del_timer_sync(&ring->fence_drv.fallback_timer);
1253 	amdgpu_ring_fini(ring);
1254 	kfree(ring);
1255 }
1256 
amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device * adev,enum amdgpu_mes_priority_level prio)1257 uint32_t amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device *adev,
1258 						   enum amdgpu_mes_priority_level prio)
1259 {
1260 	return adev->mes.aggregated_doorbells[prio];
1261 }
1262 
amdgpu_mes_ctx_alloc_meta_data(struct amdgpu_device * adev,struct amdgpu_mes_ctx_data * ctx_data)1263 int amdgpu_mes_ctx_alloc_meta_data(struct amdgpu_device *adev,
1264 				   struct amdgpu_mes_ctx_data *ctx_data)
1265 {
1266 	int r;
1267 
1268 	r = amdgpu_bo_create_kernel(adev,
1269 			    sizeof(struct amdgpu_mes_ctx_meta_data),
1270 			    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1271 			    &ctx_data->meta_data_obj,
1272 			    &ctx_data->meta_data_mc_addr,
1273 			    &ctx_data->meta_data_ptr);
1274 	if (r) {
1275 		dev_warn(adev->dev, "(%d) create CTX bo failed\n", r);
1276 		return r;
1277 	}
1278 
1279 	if (!ctx_data->meta_data_obj)
1280 		return -ENOMEM;
1281 
1282 	memset(ctx_data->meta_data_ptr, 0,
1283 	       sizeof(struct amdgpu_mes_ctx_meta_data));
1284 
1285 	return 0;
1286 }
1287 
amdgpu_mes_ctx_free_meta_data(struct amdgpu_mes_ctx_data * ctx_data)1288 void amdgpu_mes_ctx_free_meta_data(struct amdgpu_mes_ctx_data *ctx_data)
1289 {
1290 	if (ctx_data->meta_data_obj)
1291 		amdgpu_bo_free_kernel(&ctx_data->meta_data_obj,
1292 				      &ctx_data->meta_data_mc_addr,
1293 				      &ctx_data->meta_data_ptr);
1294 }
1295 
amdgpu_mes_ctx_map_meta_data(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct amdgpu_mes_ctx_data * ctx_data)1296 int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev,
1297 				 struct amdgpu_vm *vm,
1298 				 struct amdgpu_mes_ctx_data *ctx_data)
1299 {
1300 	struct amdgpu_bo_va *bo_va;
1301 	struct amdgpu_sync sync;
1302 	struct drm_exec exec;
1303 	int r;
1304 
1305 	amdgpu_sync_create(&sync);
1306 
1307 	drm_exec_init(&exec, 0, 0);
1308 	drm_exec_until_all_locked(&exec) {
1309 		r = drm_exec_lock_obj(&exec,
1310 				      &ctx_data->meta_data_obj->tbo.base);
1311 		drm_exec_retry_on_contention(&exec);
1312 		if (unlikely(r))
1313 			goto error_fini_exec;
1314 
1315 		r = amdgpu_vm_lock_pd(vm, &exec, 0);
1316 		drm_exec_retry_on_contention(&exec);
1317 		if (unlikely(r))
1318 			goto error_fini_exec;
1319 	}
1320 
1321 	bo_va = amdgpu_vm_bo_add(adev, vm, ctx_data->meta_data_obj);
1322 	if (!bo_va) {
1323 		DRM_ERROR("failed to create bo_va for meta data BO\n");
1324 		r = -ENOMEM;
1325 		goto error_fini_exec;
1326 	}
1327 
1328 	r = amdgpu_vm_bo_map(adev, bo_va, ctx_data->meta_data_gpu_addr, 0,
1329 			     sizeof(struct amdgpu_mes_ctx_meta_data),
1330 			     AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
1331 			     AMDGPU_PTE_EXECUTABLE);
1332 
1333 	if (r) {
1334 		DRM_ERROR("failed to do bo_map on meta data, err=%d\n", r);
1335 		goto error_del_bo_va;
1336 	}
1337 
1338 	r = amdgpu_vm_bo_update(adev, bo_va, false);
1339 	if (r) {
1340 		DRM_ERROR("failed to do vm_bo_update on meta data\n");
1341 		goto error_del_bo_va;
1342 	}
1343 	amdgpu_sync_fence(&sync, bo_va->last_pt_update);
1344 
1345 	r = amdgpu_vm_update_pdes(adev, vm, false);
1346 	if (r) {
1347 		DRM_ERROR("failed to update pdes on meta data\n");
1348 		goto error_del_bo_va;
1349 	}
1350 	amdgpu_sync_fence(&sync, vm->last_update);
1351 
1352 	amdgpu_sync_wait(&sync, false);
1353 	drm_exec_fini(&exec);
1354 
1355 	amdgpu_sync_free(&sync);
1356 	ctx_data->meta_data_va = bo_va;
1357 	return 0;
1358 
1359 error_del_bo_va:
1360 	amdgpu_vm_bo_del(adev, bo_va);
1361 
1362 error_fini_exec:
1363 	drm_exec_fini(&exec);
1364 	amdgpu_sync_free(&sync);
1365 	return r;
1366 }
1367 
amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device * adev,struct amdgpu_mes_ctx_data * ctx_data)1368 int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev,
1369 				   struct amdgpu_mes_ctx_data *ctx_data)
1370 {
1371 	struct amdgpu_bo_va *bo_va = ctx_data->meta_data_va;
1372 	struct amdgpu_bo *bo = ctx_data->meta_data_obj;
1373 	struct amdgpu_vm *vm = bo_va->base.vm;
1374 	struct dma_fence *fence;
1375 	struct drm_exec exec;
1376 	long r;
1377 
1378 	drm_exec_init(&exec, 0, 0);
1379 	drm_exec_until_all_locked(&exec) {
1380 		r = drm_exec_lock_obj(&exec,
1381 				      &ctx_data->meta_data_obj->tbo.base);
1382 		drm_exec_retry_on_contention(&exec);
1383 		if (unlikely(r))
1384 			goto out_unlock;
1385 
1386 		r = amdgpu_vm_lock_pd(vm, &exec, 0);
1387 		drm_exec_retry_on_contention(&exec);
1388 		if (unlikely(r))
1389 			goto out_unlock;
1390 	}
1391 
1392 	amdgpu_vm_bo_del(adev, bo_va);
1393 	if (!amdgpu_vm_ready(vm))
1394 		goto out_unlock;
1395 
1396 	r = dma_resv_get_singleton(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP,
1397 				   &fence);
1398 	if (r)
1399 		goto out_unlock;
1400 	if (fence) {
1401 		amdgpu_bo_fence(bo, fence, true);
1402 		fence = NULL;
1403 	}
1404 
1405 	r = amdgpu_vm_clear_freed(adev, vm, &fence);
1406 	if (r || !fence)
1407 		goto out_unlock;
1408 
1409 	dma_fence_wait(fence, false);
1410 	amdgpu_bo_fence(bo, fence, true);
1411 	dma_fence_put(fence);
1412 
1413 out_unlock:
1414 	if (unlikely(r < 0))
1415 		dev_err(adev->dev, "failed to clear page tables (%ld)\n", r);
1416 	drm_exec_fini(&exec);
1417 
1418 	return r;
1419 }
1420 
amdgpu_mes_test_create_gang_and_queues(struct amdgpu_device * adev,int pasid,int * gang_id,int queue_type,int num_queue,struct amdgpu_ring ** added_rings,struct amdgpu_mes_ctx_data * ctx_data)1421 static int amdgpu_mes_test_create_gang_and_queues(struct amdgpu_device *adev,
1422 					  int pasid, int *gang_id,
1423 					  int queue_type, int num_queue,
1424 					  struct amdgpu_ring **added_rings,
1425 					  struct amdgpu_mes_ctx_data *ctx_data)
1426 {
1427 	struct amdgpu_ring *ring;
1428 	struct amdgpu_mes_gang_properties gprops = {0};
1429 	int r, j;
1430 
1431 	/* create a gang for the process */
1432 	gprops.priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1433 	gprops.gang_quantum = adev->mes.default_gang_quantum;
1434 	gprops.inprocess_gang_priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1435 	gprops.priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1436 	gprops.global_priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1437 
1438 	r = amdgpu_mes_add_gang(adev, pasid, &gprops, gang_id);
1439 	if (r) {
1440 		DRM_ERROR("failed to add gang\n");
1441 		return r;
1442 	}
1443 
1444 	/* create queues for the gang */
1445 	for (j = 0; j < num_queue; j++) {
1446 		r = amdgpu_mes_add_ring(adev, *gang_id, queue_type, j,
1447 					ctx_data, &ring);
1448 		if (r) {
1449 			DRM_ERROR("failed to add ring\n");
1450 			break;
1451 		}
1452 
1453 		DRM_INFO("ring %s was added\n", ring->name);
1454 		added_rings[j] = ring;
1455 	}
1456 
1457 	return 0;
1458 }
1459 
amdgpu_mes_test_queues(struct amdgpu_ring ** added_rings)1460 static int amdgpu_mes_test_queues(struct amdgpu_ring **added_rings)
1461 {
1462 	struct amdgpu_ring *ring;
1463 	int i, r;
1464 
1465 	for (i = 0; i < AMDGPU_MES_CTX_MAX_RINGS; i++) {
1466 		ring = added_rings[i];
1467 		if (!ring)
1468 			continue;
1469 
1470 		r = amdgpu_ring_test_helper(ring);
1471 		if (r)
1472 			return r;
1473 
1474 		r = amdgpu_ring_test_ib(ring, 1000 * 10);
1475 		if (r) {
1476 			DRM_DEV_ERROR(ring->adev->dev,
1477 				      "ring %s ib test failed (%d)\n",
1478 				      ring->name, r);
1479 			return r;
1480 		} else
1481 			DRM_INFO("ring %s ib test pass\n", ring->name);
1482 	}
1483 
1484 	return 0;
1485 }
1486 
amdgpu_mes_self_test(struct amdgpu_device * adev)1487 int amdgpu_mes_self_test(struct amdgpu_device *adev)
1488 {
1489 	struct amdgpu_vm *vm = NULL;
1490 	struct amdgpu_mes_ctx_data ctx_data = {0};
1491 	struct amdgpu_ring *added_rings[AMDGPU_MES_CTX_MAX_RINGS] = { NULL };
1492 	int gang_ids[3] = {0};
1493 	int queue_types[][2] = { { AMDGPU_RING_TYPE_GFX, 1 },
1494 				 { AMDGPU_RING_TYPE_COMPUTE, 1 },
1495 				 { AMDGPU_RING_TYPE_SDMA, 1} };
1496 	int i, r, pasid, k = 0;
1497 
1498 	pasid = amdgpu_pasid_alloc(16);
1499 	if (pasid < 0) {
1500 		dev_warn(adev->dev, "No more PASIDs available!");
1501 		pasid = 0;
1502 	}
1503 
1504 	vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1505 	if (!vm) {
1506 		r = -ENOMEM;
1507 		goto error_pasid;
1508 	}
1509 
1510 	r = amdgpu_vm_init(adev, vm, -1);
1511 	if (r) {
1512 		DRM_ERROR("failed to initialize vm\n");
1513 		goto error_pasid;
1514 	}
1515 
1516 	r = amdgpu_mes_ctx_alloc_meta_data(adev, &ctx_data);
1517 	if (r) {
1518 		DRM_ERROR("failed to alloc ctx meta data\n");
1519 		goto error_fini;
1520 	}
1521 
1522 	ctx_data.meta_data_gpu_addr = AMDGPU_VA_RESERVED_BOTTOM;
1523 	r = amdgpu_mes_ctx_map_meta_data(adev, vm, &ctx_data);
1524 	if (r) {
1525 		DRM_ERROR("failed to map ctx meta data\n");
1526 		goto error_vm;
1527 	}
1528 
1529 	r = amdgpu_mes_create_process(adev, pasid, vm);
1530 	if (r) {
1531 		DRM_ERROR("failed to create MES process\n");
1532 		goto error_vm;
1533 	}
1534 
1535 	for (i = 0; i < ARRAY_SIZE(queue_types); i++) {
1536 		/* On GFX v10.3, fw hasn't supported to map sdma queue. */
1537 		if (amdgpu_ip_version(adev, GC_HWIP, 0) >=
1538 			    IP_VERSION(10, 3, 0) &&
1539 		    amdgpu_ip_version(adev, GC_HWIP, 0) <
1540 			    IP_VERSION(11, 0, 0) &&
1541 		    queue_types[i][0] == AMDGPU_RING_TYPE_SDMA)
1542 			continue;
1543 
1544 		r = amdgpu_mes_test_create_gang_and_queues(adev, pasid,
1545 							   &gang_ids[i],
1546 							   queue_types[i][0],
1547 							   queue_types[i][1],
1548 							   &added_rings[k],
1549 							   &ctx_data);
1550 		if (r)
1551 			goto error_queues;
1552 
1553 		k += queue_types[i][1];
1554 	}
1555 
1556 	/* start ring test and ib test for MES queues */
1557 	amdgpu_mes_test_queues(added_rings);
1558 
1559 error_queues:
1560 	/* remove all queues */
1561 	for (i = 0; i < ARRAY_SIZE(added_rings); i++) {
1562 		if (!added_rings[i])
1563 			continue;
1564 		amdgpu_mes_remove_ring(adev, added_rings[i]);
1565 	}
1566 
1567 	for (i = 0; i < ARRAY_SIZE(gang_ids); i++) {
1568 		if (!gang_ids[i])
1569 			continue;
1570 		amdgpu_mes_remove_gang(adev, gang_ids[i]);
1571 	}
1572 
1573 	amdgpu_mes_destroy_process(adev, pasid);
1574 
1575 error_vm:
1576 	amdgpu_mes_ctx_unmap_meta_data(adev, &ctx_data);
1577 
1578 error_fini:
1579 	amdgpu_vm_fini(adev, vm);
1580 
1581 error_pasid:
1582 	if (pasid)
1583 		amdgpu_pasid_free(pasid);
1584 
1585 	amdgpu_mes_ctx_free_meta_data(&ctx_data);
1586 	kfree(vm);
1587 	return 0;
1588 }
1589 
amdgpu_mes_init_microcode(struct amdgpu_device * adev,int pipe)1590 int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe)
1591 {
1592 	const struct mes_firmware_header_v1_0 *mes_hdr;
1593 	struct amdgpu_firmware_info *info;
1594 	char ucode_prefix[30];
1595 	char fw_name[50];
1596 	bool need_retry = false;
1597 	int r;
1598 
1599 	amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix,
1600 				       sizeof(ucode_prefix));
1601 	if (adev->enable_uni_mes) {
1602 		snprintf(fw_name, sizeof(fw_name),
1603 			 "amdgpu/%s_uni_mes.bin", ucode_prefix);
1604 	} else if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0) &&
1605 	    amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(12, 0, 0)) {
1606 		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
1607 			 ucode_prefix,
1608 			 pipe == AMDGPU_MES_SCHED_PIPE ? "_2" : "1");
1609 		need_retry = true;
1610 	} else {
1611 		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
1612 			 ucode_prefix,
1613 			 pipe == AMDGPU_MES_SCHED_PIPE ? "" : "1");
1614 	}
1615 
1616 	r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], "%s", fw_name);
1617 	if (r && need_retry && pipe == AMDGPU_MES_SCHED_PIPE) {
1618 		dev_info(adev->dev, "try to fall back to %s_mes.bin\n", ucode_prefix);
1619 		r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe],
1620 					 "amdgpu/%s_mes.bin", ucode_prefix);
1621 	}
1622 
1623 	if (r)
1624 		goto out;
1625 
1626 	mes_hdr = (const struct mes_firmware_header_v1_0 *)
1627 		adev->mes.fw[pipe]->data;
1628 	adev->mes.uc_start_addr[pipe] =
1629 		le32_to_cpu(mes_hdr->mes_uc_start_addr_lo) |
1630 		((uint64_t)(le32_to_cpu(mes_hdr->mes_uc_start_addr_hi)) << 32);
1631 	adev->mes.data_start_addr[pipe] =
1632 		le32_to_cpu(mes_hdr->mes_data_start_addr_lo) |
1633 		((uint64_t)(le32_to_cpu(mes_hdr->mes_data_start_addr_hi)) << 32);
1634 
1635 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1636 		int ucode, ucode_data;
1637 
1638 		if (pipe == AMDGPU_MES_SCHED_PIPE) {
1639 			ucode = AMDGPU_UCODE_ID_CP_MES;
1640 			ucode_data = AMDGPU_UCODE_ID_CP_MES_DATA;
1641 		} else {
1642 			ucode = AMDGPU_UCODE_ID_CP_MES1;
1643 			ucode_data = AMDGPU_UCODE_ID_CP_MES1_DATA;
1644 		}
1645 
1646 		info = &adev->firmware.ucode[ucode];
1647 		info->ucode_id = ucode;
1648 		info->fw = adev->mes.fw[pipe];
1649 		adev->firmware.fw_size +=
1650 			ALIGN(le32_to_cpu(mes_hdr->mes_ucode_size_bytes),
1651 			      PAGE_SIZE);
1652 
1653 		info = &adev->firmware.ucode[ucode_data];
1654 		info->ucode_id = ucode_data;
1655 		info->fw = adev->mes.fw[pipe];
1656 		adev->firmware.fw_size +=
1657 			ALIGN(le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes),
1658 			      PAGE_SIZE);
1659 	}
1660 
1661 	return 0;
1662 out:
1663 	amdgpu_ucode_release(&adev->mes.fw[pipe]);
1664 	return r;
1665 }
1666 
amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device * adev)1667 bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev)
1668 {
1669 	uint32_t mes_rev = adev->mes.sched_version & AMDGPU_MES_VERSION_MASK;
1670 	bool is_supported = false;
1671 
1672 	if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0) &&
1673 	    amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(12, 0, 0) &&
1674 	    mes_rev >= 0x63)
1675 		is_supported = true;
1676 
1677 	return is_supported;
1678 }
1679 
1680 #if defined(CONFIG_DEBUG_FS)
1681 
amdgpu_debugfs_mes_event_log_show(struct seq_file * m,void * unused)1682 static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused)
1683 {
1684 	struct amdgpu_device *adev = m->private;
1685 	uint32_t *mem = (uint32_t *)(adev->mes.event_log_cpu_addr);
1686 
1687 	seq_hex_dump(m, "", DUMP_PREFIX_OFFSET, 32, 4,
1688 		     mem, adev->mes.event_log_size, false);
1689 
1690 	return 0;
1691 }
1692 
1693 DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_mes_event_log);
1694 
1695 #endif
1696 
amdgpu_debugfs_mes_event_log_init(struct amdgpu_device * adev)1697 void amdgpu_debugfs_mes_event_log_init(struct amdgpu_device *adev)
1698 {
1699 
1700 #if defined(CONFIG_DEBUG_FS)
1701 	struct drm_minor *minor = adev_to_drm(adev)->primary;
1702 	struct dentry *root = minor->debugfs_root;
1703 	if (adev->enable_mes && amdgpu_mes_log_enable)
1704 		debugfs_create_file("amdgpu_mes_event_log", 0444, root,
1705 				    adev, &amdgpu_debugfs_mes_event_log_fops);
1706 
1707 #endif
1708 }
1709