1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3  * Copyright 2022 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <drm/drm_drv.h>
25 
26 #include "amdgpu.h"
27 #include "amdgpu_trace.h"
28 #include "amdgpu_vm.h"
29 
30 /*
31  * amdgpu_vm_pt_cursor - state for for_each_amdgpu_vm_pt
32  */
33 struct amdgpu_vm_pt_cursor {
34 	uint64_t pfn;
35 	struct amdgpu_vm_bo_base *parent;
36 	struct amdgpu_vm_bo_base *entry;
37 	unsigned int level;
38 };
39 
40 /**
41  * amdgpu_vm_pt_level_shift - return the addr shift for each level
42  *
43  * @adev: amdgpu_device pointer
44  * @level: VMPT level
45  *
46  * Returns:
47  * The number of bits the pfn needs to be right shifted for a level.
48  */
amdgpu_vm_pt_level_shift(struct amdgpu_device * adev,unsigned int level)49 static unsigned int amdgpu_vm_pt_level_shift(struct amdgpu_device *adev,
50 					     unsigned int level)
51 {
52 	switch (level) {
53 	case AMDGPU_VM_PDB2:
54 	case AMDGPU_VM_PDB1:
55 	case AMDGPU_VM_PDB0:
56 		return 9 * (AMDGPU_VM_PDB0 - level) +
57 			adev->vm_manager.block_size;
58 	case AMDGPU_VM_PTB:
59 		return 0;
60 	default:
61 		return ~0;
62 	}
63 }
64 
65 /**
66  * amdgpu_vm_pt_num_entries - return the number of entries in a PD/PT
67  *
68  * @adev: amdgpu_device pointer
69  * @level: VMPT level
70  *
71  * Returns:
72  * The number of entries in a page directory or page table.
73  */
amdgpu_vm_pt_num_entries(struct amdgpu_device * adev,unsigned int level)74 static unsigned int amdgpu_vm_pt_num_entries(struct amdgpu_device *adev,
75 					     unsigned int level)
76 {
77 	unsigned int shift;
78 
79 	shift = amdgpu_vm_pt_level_shift(adev, adev->vm_manager.root_level);
80 	if (level == adev->vm_manager.root_level)
81 		/* For the root directory */
82 		return round_up(adev->vm_manager.max_pfn, 1ULL << shift)
83 			>> shift;
84 	else if (level != AMDGPU_VM_PTB)
85 		/* Everything in between */
86 		return 512;
87 
88 	/* For the page tables on the leaves */
89 	return AMDGPU_VM_PTE_COUNT(adev);
90 }
91 
92 /**
93  * amdgpu_vm_pt_entries_mask - the mask to get the entry number of a PD/PT
94  *
95  * @adev: amdgpu_device pointer
96  * @level: VMPT level
97  *
98  * Returns:
99  * The mask to extract the entry number of a PD/PT from an address.
100  */
amdgpu_vm_pt_entries_mask(struct amdgpu_device * adev,unsigned int level)101 static uint32_t amdgpu_vm_pt_entries_mask(struct amdgpu_device *adev,
102 					  unsigned int level)
103 {
104 	if (level <= adev->vm_manager.root_level)
105 		return 0xffffffff;
106 	else if (level != AMDGPU_VM_PTB)
107 		return 0x1ff;
108 	else
109 		return AMDGPU_VM_PTE_COUNT(adev) - 1;
110 }
111 
112 /**
113  * amdgpu_vm_pt_size - returns the size of the page table in bytes
114  *
115  * @adev: amdgpu_device pointer
116  * @level: VMPT level
117  *
118  * Returns:
119  * The size of the BO for a page directory or page table in bytes.
120  */
amdgpu_vm_pt_size(struct amdgpu_device * adev,unsigned int level)121 static unsigned int amdgpu_vm_pt_size(struct amdgpu_device *adev,
122 				      unsigned int level)
123 {
124 	return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_pt_num_entries(adev, level) * 8);
125 }
126 
127 /**
128  * amdgpu_vm_pt_parent - get the parent page directory
129  *
130  * @pt: child page table
131  *
132  * Helper to get the parent entry for the child page table. NULL if we are at
133  * the root page directory.
134  */
135 static struct amdgpu_vm_bo_base *
amdgpu_vm_pt_parent(struct amdgpu_vm_bo_base * pt)136 amdgpu_vm_pt_parent(struct amdgpu_vm_bo_base *pt)
137 {
138 	struct amdgpu_bo *parent = pt->bo->parent;
139 
140 	if (!parent)
141 		return NULL;
142 
143 	return parent->vm_bo;
144 }
145 
146 /**
147  * amdgpu_vm_pt_start - start PD/PT walk
148  *
149  * @adev: amdgpu_device pointer
150  * @vm: amdgpu_vm structure
151  * @start: start address of the walk
152  * @cursor: state to initialize
153  *
154  * Initialize a amdgpu_vm_pt_cursor to start a walk.
155  */
amdgpu_vm_pt_start(struct amdgpu_device * adev,struct amdgpu_vm * vm,uint64_t start,struct amdgpu_vm_pt_cursor * cursor)156 static void amdgpu_vm_pt_start(struct amdgpu_device *adev,
157 			       struct amdgpu_vm *vm, uint64_t start,
158 			       struct amdgpu_vm_pt_cursor *cursor)
159 {
160 	cursor->pfn = start;
161 	cursor->parent = NULL;
162 	cursor->entry = &vm->root;
163 	cursor->level = adev->vm_manager.root_level;
164 }
165 
166 /**
167  * amdgpu_vm_pt_descendant - go to child node
168  *
169  * @adev: amdgpu_device pointer
170  * @cursor: current state
171  *
172  * Walk to the child node of the current node.
173  * Returns:
174  * True if the walk was possible, false otherwise.
175  */
amdgpu_vm_pt_descendant(struct amdgpu_device * adev,struct amdgpu_vm_pt_cursor * cursor)176 static bool amdgpu_vm_pt_descendant(struct amdgpu_device *adev,
177 				    struct amdgpu_vm_pt_cursor *cursor)
178 {
179 	unsigned int mask, shift, idx;
180 
181 	if ((cursor->level == AMDGPU_VM_PTB) || !cursor->entry ||
182 	    !cursor->entry->bo)
183 		return false;
184 
185 	mask = amdgpu_vm_pt_entries_mask(adev, cursor->level);
186 	shift = amdgpu_vm_pt_level_shift(adev, cursor->level);
187 
188 	++cursor->level;
189 	idx = (cursor->pfn >> shift) & mask;
190 	cursor->parent = cursor->entry;
191 	cursor->entry = &to_amdgpu_bo_vm(cursor->entry->bo)->entries[idx];
192 	return true;
193 }
194 
195 /**
196  * amdgpu_vm_pt_sibling - go to sibling node
197  *
198  * @adev: amdgpu_device pointer
199  * @cursor: current state
200  *
201  * Walk to the sibling node of the current node.
202  * Returns:
203  * True if the walk was possible, false otherwise.
204  */
amdgpu_vm_pt_sibling(struct amdgpu_device * adev,struct amdgpu_vm_pt_cursor * cursor)205 static bool amdgpu_vm_pt_sibling(struct amdgpu_device *adev,
206 				 struct amdgpu_vm_pt_cursor *cursor)
207 {
208 
209 	unsigned int shift, num_entries;
210 	struct amdgpu_bo_vm *parent;
211 
212 	/* Root doesn't have a sibling */
213 	if (!cursor->parent)
214 		return false;
215 
216 	/* Go to our parents and see if we got a sibling */
217 	shift = amdgpu_vm_pt_level_shift(adev, cursor->level - 1);
218 	num_entries = amdgpu_vm_pt_num_entries(adev, cursor->level - 1);
219 	parent = to_amdgpu_bo_vm(cursor->parent->bo);
220 
221 	if (cursor->entry == &parent->entries[num_entries - 1])
222 		return false;
223 
224 	cursor->pfn += 1ULL << shift;
225 	cursor->pfn &= ~((1ULL << shift) - 1);
226 	++cursor->entry;
227 	return true;
228 }
229 
230 /**
231  * amdgpu_vm_pt_ancestor - go to parent node
232  *
233  * @cursor: current state
234  *
235  * Walk to the parent node of the current node.
236  * Returns:
237  * True if the walk was possible, false otherwise.
238  */
amdgpu_vm_pt_ancestor(struct amdgpu_vm_pt_cursor * cursor)239 static bool amdgpu_vm_pt_ancestor(struct amdgpu_vm_pt_cursor *cursor)
240 {
241 	if (!cursor->parent)
242 		return false;
243 
244 	--cursor->level;
245 	cursor->entry = cursor->parent;
246 	cursor->parent = amdgpu_vm_pt_parent(cursor->parent);
247 	return true;
248 }
249 
250 /**
251  * amdgpu_vm_pt_next - get next PD/PT in hieratchy
252  *
253  * @adev: amdgpu_device pointer
254  * @cursor: current state
255  *
256  * Walk the PD/PT tree to the next node.
257  */
amdgpu_vm_pt_next(struct amdgpu_device * adev,struct amdgpu_vm_pt_cursor * cursor)258 static void amdgpu_vm_pt_next(struct amdgpu_device *adev,
259 			      struct amdgpu_vm_pt_cursor *cursor)
260 {
261 	/* First try a newborn child */
262 	if (amdgpu_vm_pt_descendant(adev, cursor))
263 		return;
264 
265 	/* If that didn't worked try to find a sibling */
266 	while (!amdgpu_vm_pt_sibling(adev, cursor)) {
267 		/* No sibling, go to our parents and grandparents */
268 		if (!amdgpu_vm_pt_ancestor(cursor)) {
269 			cursor->pfn = ~0ll;
270 			return;
271 		}
272 	}
273 }
274 
275 /**
276  * amdgpu_vm_pt_first_dfs - start a deep first search
277  *
278  * @adev: amdgpu_device structure
279  * @vm: amdgpu_vm structure
280  * @start: optional cursor to start with
281  * @cursor: state to initialize
282  *
283  * Starts a deep first traversal of the PD/PT tree.
284  */
amdgpu_vm_pt_first_dfs(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct amdgpu_vm_pt_cursor * start,struct amdgpu_vm_pt_cursor * cursor)285 static void amdgpu_vm_pt_first_dfs(struct amdgpu_device *adev,
286 				   struct amdgpu_vm *vm,
287 				   struct amdgpu_vm_pt_cursor *start,
288 				   struct amdgpu_vm_pt_cursor *cursor)
289 {
290 	if (start)
291 		*cursor = *start;
292 	else
293 		amdgpu_vm_pt_start(adev, vm, 0, cursor);
294 
295 	while (amdgpu_vm_pt_descendant(adev, cursor))
296 		;
297 }
298 
299 /**
300  * amdgpu_vm_pt_continue_dfs - check if the deep first search should continue
301  *
302  * @start: starting point for the search
303  * @entry: current entry
304  *
305  * Returns:
306  * True when the search should continue, false otherwise.
307  */
amdgpu_vm_pt_continue_dfs(struct amdgpu_vm_pt_cursor * start,struct amdgpu_vm_bo_base * entry)308 static bool amdgpu_vm_pt_continue_dfs(struct amdgpu_vm_pt_cursor *start,
309 				      struct amdgpu_vm_bo_base *entry)
310 {
311 	return entry && (!start || entry != start->entry);
312 }
313 
314 /**
315  * amdgpu_vm_pt_next_dfs - get the next node for a deep first search
316  *
317  * @adev: amdgpu_device structure
318  * @cursor: current state
319  *
320  * Move the cursor to the next node in a deep first search.
321  */
amdgpu_vm_pt_next_dfs(struct amdgpu_device * adev,struct amdgpu_vm_pt_cursor * cursor)322 static void amdgpu_vm_pt_next_dfs(struct amdgpu_device *adev,
323 				  struct amdgpu_vm_pt_cursor *cursor)
324 {
325 	if (!cursor->entry)
326 		return;
327 
328 	if (!cursor->parent)
329 		cursor->entry = NULL;
330 	else if (amdgpu_vm_pt_sibling(adev, cursor))
331 		while (amdgpu_vm_pt_descendant(adev, cursor))
332 			;
333 	else
334 		amdgpu_vm_pt_ancestor(cursor);
335 }
336 
337 /*
338  * for_each_amdgpu_vm_pt_dfs_safe - safe deep first search of all PDs/PTs
339  */
340 #define for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry)		\
341 	for (amdgpu_vm_pt_first_dfs((adev), (vm), (start), &(cursor)),		\
342 	     (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor));\
343 	     amdgpu_vm_pt_continue_dfs((start), (entry));			\
344 	     (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor)))
345 
346 /**
347  * amdgpu_vm_pt_clear - initially clear the PDs/PTs
348  *
349  * @adev: amdgpu_device pointer
350  * @vm: VM to clear BO from
351  * @vmbo: BO to clear
352  * @immediate: use an immediate update
353  *
354  * Root PD needs to be reserved when calling this.
355  *
356  * Returns:
357  * 0 on success, errno otherwise.
358  */
amdgpu_vm_pt_clear(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct amdgpu_bo_vm * vmbo,bool immediate)359 int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm,
360 		       struct amdgpu_bo_vm *vmbo, bool immediate)
361 {
362 	unsigned int level = adev->vm_manager.root_level;
363 	struct ttm_operation_ctx ctx = { true, false };
364 	struct amdgpu_vm_update_params params;
365 	struct amdgpu_bo *ancestor = &vmbo->bo;
366 	unsigned int entries;
367 	struct amdgpu_bo *bo = &vmbo->bo;
368 	uint64_t addr;
369 	int r, idx;
370 
371 	/* Figure out our place in the hierarchy */
372 	if (ancestor->parent) {
373 		++level;
374 		while (ancestor->parent->parent) {
375 			++level;
376 			ancestor = ancestor->parent;
377 		}
378 	}
379 
380 	entries = amdgpu_bo_size(bo) / 8;
381 
382 	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
383 	if (r)
384 		return r;
385 
386 	if (!drm_dev_enter(adev_to_drm(adev), &idx))
387 		return -ENODEV;
388 
389 	r = vm->update_funcs->map_table(vmbo);
390 	if (r)
391 		goto exit;
392 
393 	memset(&params, 0, sizeof(params));
394 	params.adev = adev;
395 	params.vm = vm;
396 	params.immediate = immediate;
397 
398 	r = vm->update_funcs->prepare(&params, NULL);
399 	if (r)
400 		goto exit;
401 
402 	addr = 0;
403 
404 	uint64_t value = 0, flags = 0;
405 	if (adev->asic_type >= CHIP_VEGA10) {
406 		if (level != AMDGPU_VM_PTB) {
407 			/* Handle leaf PDEs as PTEs */
408 			flags |= AMDGPU_PDE_PTE_FLAG(adev);
409 			amdgpu_gmc_get_vm_pde(adev, level,
410 					      &value, &flags);
411 		} else {
412 			/* Workaround for fault priority problem on GMC9 */
413 			flags = AMDGPU_PTE_EXECUTABLE;
414 		}
415 	}
416 
417 	r = vm->update_funcs->update(&params, vmbo, addr, 0, entries,
418 				     value, flags);
419 	if (r)
420 		goto exit;
421 
422 	r = vm->update_funcs->commit(&params, NULL);
423 exit:
424 	drm_dev_exit(idx);
425 	return r;
426 }
427 
428 /**
429  * amdgpu_vm_pt_create - create bo for PD/PT
430  *
431  * @adev: amdgpu_device pointer
432  * @vm: requesting vm
433  * @level: the page table level
434  * @immediate: use a immediate update
435  * @vmbo: pointer to the buffer object pointer
436  * @xcp_id: GPU partition id
437  */
amdgpu_vm_pt_create(struct amdgpu_device * adev,struct amdgpu_vm * vm,int level,bool immediate,struct amdgpu_bo_vm ** vmbo,int32_t xcp_id)438 int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
439 			int level, bool immediate, struct amdgpu_bo_vm **vmbo,
440 			int32_t xcp_id)
441 {
442 	struct amdgpu_bo_param bp;
443 	unsigned int num_entries;
444 
445 	memset(&bp, 0, sizeof(bp));
446 
447 	bp.size = amdgpu_vm_pt_size(adev, level);
448 	bp.byte_align = AMDGPU_GPU_PAGE_SIZE;
449 
450 	if (!adev->gmc.is_app_apu)
451 		bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
452 	else
453 		bp.domain = AMDGPU_GEM_DOMAIN_GTT;
454 
455 	bp.domain = amdgpu_bo_get_preferred_domain(adev, bp.domain);
456 	bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
457 		AMDGPU_GEM_CREATE_CPU_GTT_USWC;
458 
459 	if (level < AMDGPU_VM_PTB)
460 		num_entries = amdgpu_vm_pt_num_entries(adev, level);
461 	else
462 		num_entries = 0;
463 
464 	bp.bo_ptr_size = struct_size((*vmbo), entries, num_entries);
465 
466 	if (vm->use_cpu_for_update)
467 		bp.flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
468 
469 	bp.type = ttm_bo_type_kernel;
470 	bp.no_wait_gpu = immediate;
471 	bp.xcp_id_plus1 = xcp_id + 1;
472 
473 	if (vm->root.bo)
474 		bp.resv = vm->root.bo->tbo.base.resv;
475 
476 	return amdgpu_bo_create_vm(adev, &bp, vmbo);
477 }
478 
479 /**
480  * amdgpu_vm_pt_alloc - Allocate a specific page table
481  *
482  * @adev: amdgpu_device pointer
483  * @vm: VM to allocate page tables for
484  * @cursor: Which page table to allocate
485  * @immediate: use an immediate update
486  *
487  * Make sure a specific page table or directory is allocated.
488  *
489  * Returns:
490  * 1 if page table needed to be allocated, 0 if page table was already
491  * allocated, negative errno if an error occurred.
492  */
amdgpu_vm_pt_alloc(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct amdgpu_vm_pt_cursor * cursor,bool immediate)493 static int amdgpu_vm_pt_alloc(struct amdgpu_device *adev,
494 			      struct amdgpu_vm *vm,
495 			      struct amdgpu_vm_pt_cursor *cursor,
496 			      bool immediate)
497 {
498 	struct amdgpu_vm_bo_base *entry = cursor->entry;
499 	struct amdgpu_bo *pt_bo;
500 	struct amdgpu_bo_vm *pt;
501 	int r;
502 
503 	if (entry->bo)
504 		return 0;
505 
506 	amdgpu_vm_eviction_unlock(vm);
507 	r = amdgpu_vm_pt_create(adev, vm, cursor->level, immediate, &pt,
508 				vm->root.bo->xcp_id);
509 	amdgpu_vm_eviction_lock(vm);
510 	if (r)
511 		return r;
512 
513 	/* Keep a reference to the root directory to avoid
514 	 * freeing them up in the wrong order.
515 	 */
516 	pt_bo = &pt->bo;
517 	pt_bo->parent = amdgpu_bo_ref(cursor->parent->bo);
518 	amdgpu_vm_bo_base_init(entry, vm, pt_bo);
519 	r = amdgpu_vm_pt_clear(adev, vm, pt, immediate);
520 	if (r)
521 		goto error_free_pt;
522 
523 	return 0;
524 
525 error_free_pt:
526 	amdgpu_bo_unref(&pt_bo);
527 	return r;
528 }
529 
530 /**
531  * amdgpu_vm_pt_free - free one PD/PT
532  *
533  * @entry: PDE to free
534  */
amdgpu_vm_pt_free(struct amdgpu_vm_bo_base * entry)535 static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry)
536 {
537 	if (!entry->bo)
538 		return;
539 
540 	entry->bo->vm_bo = NULL;
541 	ttm_bo_set_bulk_move(&entry->bo->tbo, NULL);
542 
543 	spin_lock(&entry->vm->status_lock);
544 	list_del(&entry->vm_status);
545 	spin_unlock(&entry->vm->status_lock);
546 	amdgpu_bo_unref(&entry->bo);
547 }
548 
amdgpu_vm_pt_free_work(struct work_struct * work)549 void amdgpu_vm_pt_free_work(struct work_struct *work)
550 {
551 	struct amdgpu_vm_bo_base *entry, *next;
552 	struct amdgpu_vm *vm;
553 	LIST_HEAD(pt_freed);
554 
555 	vm = container_of(work, struct amdgpu_vm, pt_free_work);
556 
557 	spin_lock(&vm->status_lock);
558 	list_splice_init(&vm->pt_freed, &pt_freed);
559 	spin_unlock(&vm->status_lock);
560 
561 	/* flush_work in amdgpu_vm_fini ensure vm->root.bo is valid. */
562 	amdgpu_bo_reserve(vm->root.bo, true);
563 
564 	list_for_each_entry_safe(entry, next, &pt_freed, vm_status)
565 		amdgpu_vm_pt_free(entry);
566 
567 	amdgpu_bo_unreserve(vm->root.bo);
568 }
569 
570 /**
571  * amdgpu_vm_pt_free_list - free PD/PT levels
572  *
573  * @adev: amdgpu device structure
574  * @params: see amdgpu_vm_update_params definition
575  *
576  * Free the page directory objects saved in the flush list
577  */
amdgpu_vm_pt_free_list(struct amdgpu_device * adev,struct amdgpu_vm_update_params * params)578 void amdgpu_vm_pt_free_list(struct amdgpu_device *adev,
579 			    struct amdgpu_vm_update_params *params)
580 {
581 	struct amdgpu_vm_bo_base *entry, *next;
582 	struct amdgpu_vm *vm = params->vm;
583 	bool unlocked = params->unlocked;
584 
585 	if (list_empty(&params->tlb_flush_waitlist))
586 		return;
587 
588 	if (unlocked) {
589 		spin_lock(&vm->status_lock);
590 		list_splice_init(&params->tlb_flush_waitlist, &vm->pt_freed);
591 		spin_unlock(&vm->status_lock);
592 		schedule_work(&vm->pt_free_work);
593 		return;
594 	}
595 
596 	list_for_each_entry_safe(entry, next, &params->tlb_flush_waitlist, vm_status)
597 		amdgpu_vm_pt_free(entry);
598 }
599 
600 /**
601  * amdgpu_vm_pt_add_list - add PD/PT level to the flush list
602  *
603  * @params: parameters for the update
604  * @cursor: first PT entry to start DF search from, non NULL
605  *
606  * This list will be freed after TLB flush.
607  */
amdgpu_vm_pt_add_list(struct amdgpu_vm_update_params * params,struct amdgpu_vm_pt_cursor * cursor)608 static void amdgpu_vm_pt_add_list(struct amdgpu_vm_update_params *params,
609 				  struct amdgpu_vm_pt_cursor *cursor)
610 {
611 	struct amdgpu_vm_pt_cursor seek;
612 	struct amdgpu_vm_bo_base *entry;
613 
614 	spin_lock(&params->vm->status_lock);
615 	for_each_amdgpu_vm_pt_dfs_safe(params->adev, params->vm, cursor, seek, entry) {
616 		if (entry && entry->bo)
617 			list_move(&entry->vm_status, &params->tlb_flush_waitlist);
618 	}
619 
620 	/* enter start node now */
621 	list_move(&cursor->entry->vm_status, &params->tlb_flush_waitlist);
622 	spin_unlock(&params->vm->status_lock);
623 }
624 
625 /**
626  * amdgpu_vm_pt_free_root - free root PD
627  * @adev: amdgpu device structure
628  * @vm: amdgpu vm structure
629  *
630  * Free the root page directory and everything below it.
631  */
amdgpu_vm_pt_free_root(struct amdgpu_device * adev,struct amdgpu_vm * vm)632 void amdgpu_vm_pt_free_root(struct amdgpu_device *adev, struct amdgpu_vm *vm)
633 {
634 	struct amdgpu_vm_pt_cursor cursor;
635 	struct amdgpu_vm_bo_base *entry;
636 
637 	for_each_amdgpu_vm_pt_dfs_safe(adev, vm, NULL, cursor, entry) {
638 		if (entry)
639 			amdgpu_vm_pt_free(entry);
640 	}
641 }
642 
643 /**
644  * amdgpu_vm_pde_update - update a single level in the hierarchy
645  *
646  * @params: parameters for the update
647  * @entry: entry to update
648  *
649  * Makes sure the requested entry in parent is up to date.
650  */
amdgpu_vm_pde_update(struct amdgpu_vm_update_params * params,struct amdgpu_vm_bo_base * entry)651 int amdgpu_vm_pde_update(struct amdgpu_vm_update_params *params,
652 			 struct amdgpu_vm_bo_base *entry)
653 {
654 	struct amdgpu_vm_bo_base *parent = amdgpu_vm_pt_parent(entry);
655 	struct amdgpu_bo *bo, *pbo;
656 	struct amdgpu_vm *vm = params->vm;
657 	uint64_t pde, pt, flags;
658 	unsigned int level;
659 
660 	if (WARN_ON(!parent))
661 		return -EINVAL;
662 
663 	bo = parent->bo;
664 	for (level = 0, pbo = bo->parent; pbo; ++level)
665 		pbo = pbo->parent;
666 
667 	level += params->adev->vm_manager.root_level;
668 	amdgpu_gmc_get_pde_for_bo(entry->bo, level, &pt, &flags);
669 	pde = (entry - to_amdgpu_bo_vm(parent->bo)->entries) * 8;
670 	return vm->update_funcs->update(params, to_amdgpu_bo_vm(bo), pde, pt,
671 					1, 0, flags);
672 }
673 
674 /**
675  * amdgpu_vm_pte_update_noretry_flags - Update PTE no-retry flags
676  *
677  * @adev: amdgpu_device pointer
678  * @flags: pointer to PTE flags
679  *
680  * Update PTE no-retry flags when TF is enabled.
681  */
amdgpu_vm_pte_update_noretry_flags(struct amdgpu_device * adev,uint64_t * flags)682 static void amdgpu_vm_pte_update_noretry_flags(struct amdgpu_device *adev,
683 						uint64_t *flags)
684 {
685 	/*
686 	 * Update no-retry flags with the corresponding TF
687 	 * no-retry combination.
688 	 */
689 	if ((*flags & AMDGPU_VM_NORETRY_FLAGS) == AMDGPU_VM_NORETRY_FLAGS) {
690 		*flags &= ~AMDGPU_VM_NORETRY_FLAGS;
691 		*flags |= adev->gmc.noretry_flags;
692 	}
693 }
694 
695 /*
696  * amdgpu_vm_pte_update_flags - figure out flags for PTE updates
697  *
698  * Make sure to set the right flags for the PTEs at the desired level.
699  */
amdgpu_vm_pte_update_flags(struct amdgpu_vm_update_params * params,struct amdgpu_bo_vm * pt,unsigned int level,uint64_t pe,uint64_t addr,unsigned int count,uint32_t incr,uint64_t flags)700 static void amdgpu_vm_pte_update_flags(struct amdgpu_vm_update_params *params,
701 				       struct amdgpu_bo_vm *pt,
702 				       unsigned int level,
703 				       uint64_t pe, uint64_t addr,
704 				       unsigned int count, uint32_t incr,
705 				       uint64_t flags)
706 {
707 	struct amdgpu_device *adev = params->adev;
708 
709 	if (level != AMDGPU_VM_PTB) {
710 		flags |= AMDGPU_PDE_PTE_FLAG(params->adev);
711 		amdgpu_gmc_get_vm_pde(adev, level, &addr, &flags);
712 
713 	} else if (adev->asic_type >= CHIP_VEGA10 &&
714 		   !(flags & AMDGPU_PTE_VALID) &&
715 		   !(flags & AMDGPU_PTE_PRT_FLAG(params->adev))) {
716 
717 		/* Workaround for fault priority problem on GMC9 */
718 		flags |= AMDGPU_PTE_EXECUTABLE;
719 	}
720 
721 	/*
722 	 * Update no-retry flags to use the no-retry flag combination
723 	 * with TF enabled. The AMDGPU_VM_NORETRY_FLAGS flag combination
724 	 * does not work when TF is enabled. So, replace them with
725 	 * AMDGPU_VM_NORETRY_FLAGS_TF flag combination which works for
726 	 * all cases.
727 	 */
728 	if (level == AMDGPU_VM_PTB)
729 		amdgpu_vm_pte_update_noretry_flags(adev, &flags);
730 
731 	/* APUs mapping system memory may need different MTYPEs on different
732 	 * NUMA nodes. Only do this for contiguous ranges that can be assumed
733 	 * to be on the same NUMA node.
734 	 */
735 	if ((flags & AMDGPU_PTE_SYSTEM) && (adev->flags & AMD_IS_APU) &&
736 	    adev->gmc.gmc_funcs->override_vm_pte_flags &&
737 	    num_possible_nodes() > 1 && !params->pages_addr && params->allow_override)
738 		amdgpu_gmc_override_vm_pte_flags(adev, params->vm, addr, &flags);
739 
740 	params->vm->update_funcs->update(params, pt, pe, addr, count, incr,
741 					 flags);
742 }
743 
744 /**
745  * amdgpu_vm_pte_fragment - get fragment for PTEs
746  *
747  * @params: see amdgpu_vm_update_params definition
748  * @start: first PTE to handle
749  * @end: last PTE to handle
750  * @flags: hw mapping flags
751  * @frag: resulting fragment size
752  * @frag_end: end of this fragment
753  *
754  * Returns the first possible fragment for the start and end address.
755  */
amdgpu_vm_pte_fragment(struct amdgpu_vm_update_params * params,uint64_t start,uint64_t end,uint64_t flags,unsigned int * frag,uint64_t * frag_end)756 static void amdgpu_vm_pte_fragment(struct amdgpu_vm_update_params *params,
757 				   uint64_t start, uint64_t end, uint64_t flags,
758 				   unsigned int *frag, uint64_t *frag_end)
759 {
760 	/**
761 	 * The MC L1 TLB supports variable sized pages, based on a fragment
762 	 * field in the PTE. When this field is set to a non-zero value, page
763 	 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
764 	 * flags are considered valid for all PTEs within the fragment range
765 	 * and corresponding mappings are assumed to be physically contiguous.
766 	 *
767 	 * The L1 TLB can store a single PTE for the whole fragment,
768 	 * significantly increasing the space available for translation
769 	 * caching. This leads to large improvements in throughput when the
770 	 * TLB is under pressure.
771 	 *
772 	 * The L2 TLB distributes small and large fragments into two
773 	 * asymmetric partitions. The large fragment cache is significantly
774 	 * larger. Thus, we try to use large fragments wherever possible.
775 	 * Userspace can support this by aligning virtual base address and
776 	 * allocation size to the fragment size.
777 	 *
778 	 * Starting with Vega10 the fragment size only controls the L1. The L2
779 	 * is now directly feed with small/huge/giant pages from the walker.
780 	 */
781 	unsigned int max_frag;
782 
783 	if (params->adev->asic_type < CHIP_VEGA10)
784 		max_frag = params->adev->vm_manager.fragment_size;
785 	else
786 		max_frag = 31;
787 
788 	/* system pages are non continuously */
789 	if (params->pages_addr) {
790 		*frag = 0;
791 		*frag_end = end;
792 		return;
793 	}
794 
795 	/* This intentionally wraps around if no bit is set */
796 	*frag = min_t(unsigned int, ffs(start) - 1, fls64(end - start) - 1);
797 	if (*frag >= max_frag) {
798 		*frag = max_frag;
799 		*frag_end = end & ~((1ULL << max_frag) - 1);
800 	} else {
801 		*frag_end = start + (1 << *frag);
802 	}
803 }
804 
805 /**
806  * amdgpu_vm_ptes_update - make sure that page tables are valid
807  *
808  * @params: see amdgpu_vm_update_params definition
809  * @start: start of GPU address range
810  * @end: end of GPU address range
811  * @dst: destination address to map to, the next dst inside the function
812  * @flags: mapping flags
813  *
814  * Update the page tables in the range @start - @end.
815  *
816  * Returns:
817  * 0 for success, -EINVAL for failure.
818  */
amdgpu_vm_ptes_update(struct amdgpu_vm_update_params * params,uint64_t start,uint64_t end,uint64_t dst,uint64_t flags)819 int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params,
820 			  uint64_t start, uint64_t end,
821 			  uint64_t dst, uint64_t flags)
822 {
823 	struct amdgpu_device *adev = params->adev;
824 	struct amdgpu_vm_pt_cursor cursor;
825 	uint64_t frag_start = start, frag_end;
826 	unsigned int frag;
827 	int r;
828 
829 	/* figure out the initial fragment */
830 	amdgpu_vm_pte_fragment(params, frag_start, end, flags, &frag,
831 			       &frag_end);
832 
833 	/* walk over the address space and update the PTs */
834 	amdgpu_vm_pt_start(adev, params->vm, start, &cursor);
835 	while (cursor.pfn < end) {
836 		unsigned int shift, parent_shift, mask;
837 		uint64_t incr, entry_end, pe_start;
838 		struct amdgpu_bo *pt;
839 
840 		if (!params->unlocked) {
841 			/* make sure that the page tables covering the
842 			 * address range are actually allocated
843 			 */
844 			r = amdgpu_vm_pt_alloc(params->adev, params->vm,
845 					       &cursor, params->immediate);
846 			if (r)
847 				return r;
848 		}
849 
850 		shift = amdgpu_vm_pt_level_shift(adev, cursor.level);
851 		parent_shift = amdgpu_vm_pt_level_shift(adev, cursor.level - 1);
852 		if (params->unlocked) {
853 			/* Unlocked updates are only allowed on the leaves */
854 			if (amdgpu_vm_pt_descendant(adev, &cursor))
855 				continue;
856 		} else if (adev->asic_type < CHIP_VEGA10 &&
857 			   (flags & AMDGPU_PTE_VALID)) {
858 			/* No huge page support before GMC v9 */
859 			if (cursor.level != AMDGPU_VM_PTB) {
860 				if (!amdgpu_vm_pt_descendant(adev, &cursor))
861 					return -ENOENT;
862 				continue;
863 			}
864 		} else if (frag < shift) {
865 			/* We can't use this level when the fragment size is
866 			 * smaller than the address shift. Go to the next
867 			 * child entry and try again.
868 			 */
869 			if (amdgpu_vm_pt_descendant(adev, &cursor))
870 				continue;
871 		} else if (frag >= parent_shift) {
872 			/* If the fragment size is even larger than the parent
873 			 * shift we should go up one level and check it again.
874 			 */
875 			if (!amdgpu_vm_pt_ancestor(&cursor))
876 				return -EINVAL;
877 			continue;
878 		}
879 
880 		pt = cursor.entry->bo;
881 		if (!pt) {
882 			/* We need all PDs and PTs for mapping something, */
883 			if (flags & AMDGPU_PTE_VALID)
884 				return -ENOENT;
885 
886 			/* but unmapping something can happen at a higher
887 			 * level.
888 			 */
889 			if (!amdgpu_vm_pt_ancestor(&cursor))
890 				return -EINVAL;
891 
892 			pt = cursor.entry->bo;
893 			shift = parent_shift;
894 			frag_end = max(frag_end, ALIGN(frag_start + 1,
895 				   1ULL << shift));
896 		}
897 
898 		/* Looks good so far, calculate parameters for the update */
899 		incr = (uint64_t)AMDGPU_GPU_PAGE_SIZE << shift;
900 		mask = amdgpu_vm_pt_entries_mask(adev, cursor.level);
901 		pe_start = ((cursor.pfn >> shift) & mask) * 8;
902 		entry_end = ((uint64_t)mask + 1) << shift;
903 		entry_end += cursor.pfn & ~(entry_end - 1);
904 		entry_end = min(entry_end, end);
905 
906 		do {
907 			struct amdgpu_vm *vm = params->vm;
908 			uint64_t upd_end = min(entry_end, frag_end);
909 			unsigned int nptes = (upd_end - frag_start) >> shift;
910 			uint64_t upd_flags = flags | AMDGPU_PTE_FRAG(frag);
911 
912 			/* This can happen when we set higher level PDs to
913 			 * silent to stop fault floods.
914 			 */
915 			nptes = max(nptes, 1u);
916 
917 			trace_amdgpu_vm_update_ptes(params, frag_start, upd_end,
918 						    min(nptes, 32u), dst, incr,
919 						    upd_flags,
920 						    vm->task_info ? vm->task_info->tgid : 0,
921 						    vm->immediate.fence_context);
922 			amdgpu_vm_pte_update_flags(params, to_amdgpu_bo_vm(pt),
923 						   cursor.level, pe_start, dst,
924 						   nptes, incr, upd_flags);
925 
926 			pe_start += nptes * 8;
927 			dst += nptes * incr;
928 
929 			frag_start = upd_end;
930 			if (frag_start >= frag_end) {
931 				/* figure out the next fragment */
932 				amdgpu_vm_pte_fragment(params, frag_start, end,
933 						       flags, &frag, &frag_end);
934 				if (frag < shift)
935 					break;
936 			}
937 		} while (frag_start < entry_end);
938 
939 		if (amdgpu_vm_pt_descendant(adev, &cursor)) {
940 			/* Free all child entries.
941 			 * Update the tables with the flags and addresses and free up subsequent
942 			 * tables in the case of huge pages or freed up areas.
943 			 * This is the maximum you can free, because all other page tables are not
944 			 * completely covered by the range and so potentially still in use.
945 			 */
946 			while (cursor.pfn < frag_start) {
947 				/* Make sure previous mapping is freed */
948 				if (cursor.entry->bo) {
949 					params->needs_flush = true;
950 					amdgpu_vm_pt_add_list(params, &cursor);
951 				}
952 				amdgpu_vm_pt_next(adev, &cursor);
953 			}
954 
955 		} else if (frag >= shift) {
956 			/* or just move on to the next on the same level. */
957 			amdgpu_vm_pt_next(adev, &cursor);
958 		}
959 	}
960 
961 	return 0;
962 }
963 
964 /**
965  * amdgpu_vm_pt_map_tables - have bo of root PD cpu accessible
966  * @adev: amdgpu device structure
967  * @vm: amdgpu vm structure
968  *
969  * make root page directory and everything below it cpu accessible.
970  */
amdgpu_vm_pt_map_tables(struct amdgpu_device * adev,struct amdgpu_vm * vm)971 int amdgpu_vm_pt_map_tables(struct amdgpu_device *adev, struct amdgpu_vm *vm)
972 {
973 	struct amdgpu_vm_pt_cursor cursor;
974 	struct amdgpu_vm_bo_base *entry;
975 
976 	for_each_amdgpu_vm_pt_dfs_safe(adev, vm, NULL, cursor, entry) {
977 
978 		struct amdgpu_bo_vm *bo;
979 		int r;
980 
981 		if (entry->bo) {
982 			bo = to_amdgpu_bo_vm(entry->bo);
983 			r = vm->update_funcs->map_table(bo);
984 			if (r)
985 				return r;
986 		}
987 	}
988 
989 	return 0;
990 }
991