1  // SPDX-License-Identifier: MIT
2  /*
3   * Copyright © 2022 Intel Corporation
4   */
5  
6  #include <linux/dma-fence-array.h>
7  
8  #include "xe_pt.h"
9  
10  #include "regs/xe_gtt_defs.h"
11  #include "xe_bo.h"
12  #include "xe_device.h"
13  #include "xe_drm_client.h"
14  #include "xe_exec_queue.h"
15  #include "xe_gt.h"
16  #include "xe_gt_tlb_invalidation.h"
17  #include "xe_migrate.h"
18  #include "xe_pt_types.h"
19  #include "xe_pt_walk.h"
20  #include "xe_res_cursor.h"
21  #include "xe_sched_job.h"
22  #include "xe_sync.h"
23  #include "xe_trace.h"
24  #include "xe_ttm_stolen_mgr.h"
25  #include "xe_vm.h"
26  
27  struct xe_pt_dir {
28  	struct xe_pt pt;
29  	/** @children: Array of page-table child nodes */
30  	struct xe_ptw *children[XE_PDES];
31  };
32  
33  #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
34  #define xe_pt_set_addr(__xe_pt, __addr) ((__xe_pt)->addr = (__addr))
35  #define xe_pt_addr(__xe_pt) ((__xe_pt)->addr)
36  #else
37  #define xe_pt_set_addr(__xe_pt, __addr)
38  #define xe_pt_addr(__xe_pt) 0ull
39  #endif
40  
41  static const u64 xe_normal_pt_shifts[] = {12, 21, 30, 39, 48};
42  static const u64 xe_compact_pt_shifts[] = {16, 21, 30, 39, 48};
43  
44  #define XE_PT_HIGHEST_LEVEL (ARRAY_SIZE(xe_normal_pt_shifts) - 1)
45  
as_xe_pt_dir(struct xe_pt * pt)46  static struct xe_pt_dir *as_xe_pt_dir(struct xe_pt *pt)
47  {
48  	return container_of(pt, struct xe_pt_dir, pt);
49  }
50  
xe_pt_entry(struct xe_pt_dir * pt_dir,unsigned int index)51  static struct xe_pt *xe_pt_entry(struct xe_pt_dir *pt_dir, unsigned int index)
52  {
53  	return container_of(pt_dir->children[index], struct xe_pt, base);
54  }
55  
__xe_pt_empty_pte(struct xe_tile * tile,struct xe_vm * vm,unsigned int level)56  static u64 __xe_pt_empty_pte(struct xe_tile *tile, struct xe_vm *vm,
57  			     unsigned int level)
58  {
59  	struct xe_device *xe = tile_to_xe(tile);
60  	u16 pat_index = xe->pat.idx[XE_CACHE_WB];
61  	u8 id = tile->id;
62  
63  	if (!xe_vm_has_scratch(vm))
64  		return 0;
65  
66  	if (level > MAX_HUGEPTE_LEVEL)
67  		return vm->pt_ops->pde_encode_bo(vm->scratch_pt[id][level - 1]->bo,
68  						 0, pat_index);
69  
70  	return vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level, IS_DGFX(xe), 0) |
71  		XE_PTE_NULL;
72  }
73  
xe_pt_free(struct xe_pt * pt)74  static void xe_pt_free(struct xe_pt *pt)
75  {
76  	if (pt->level)
77  		kfree(as_xe_pt_dir(pt));
78  	else
79  		kfree(pt);
80  }
81  
82  /**
83   * xe_pt_create() - Create a page-table.
84   * @vm: The vm to create for.
85   * @tile: The tile to create for.
86   * @level: The page-table level.
87   *
88   * Allocate and initialize a single struct xe_pt metadata structure. Also
89   * create the corresponding page-table bo, but don't initialize it. If the
90   * level is grater than zero, then it's assumed to be a directory page-
91   * table and the directory structure is also allocated and initialized to
92   * NULL pointers.
93   *
94   * Return: A valid struct xe_pt pointer on success, Pointer error code on
95   * error.
96   */
xe_pt_create(struct xe_vm * vm,struct xe_tile * tile,unsigned int level)97  struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile,
98  			   unsigned int level)
99  {
100  	struct xe_pt *pt;
101  	struct xe_bo *bo;
102  	int err;
103  
104  	if (level) {
105  		struct xe_pt_dir *dir = kzalloc(sizeof(*dir), GFP_KERNEL);
106  
107  		pt = (dir) ? &dir->pt : NULL;
108  	} else {
109  		pt = kzalloc(sizeof(*pt), GFP_KERNEL);
110  	}
111  	if (!pt)
112  		return ERR_PTR(-ENOMEM);
113  
114  	pt->level = level;
115  	bo = xe_bo_create_pin_map(vm->xe, tile, vm, SZ_4K,
116  				  ttm_bo_type_kernel,
117  				  XE_BO_FLAG_VRAM_IF_DGFX(tile) |
118  				  XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE |
119  				  XE_BO_FLAG_PINNED |
120  				  XE_BO_FLAG_NO_RESV_EVICT |
121  				  XE_BO_FLAG_PAGETABLE);
122  	if (IS_ERR(bo)) {
123  		err = PTR_ERR(bo);
124  		goto err_kfree;
125  	}
126  	pt->bo = bo;
127  	pt->base.children = level ? as_xe_pt_dir(pt)->children : NULL;
128  
129  	if (vm->xef)
130  		xe_drm_client_add_bo(vm->xef->client, pt->bo);
131  	xe_tile_assert(tile, level <= XE_VM_MAX_LEVEL);
132  
133  	return pt;
134  
135  err_kfree:
136  	xe_pt_free(pt);
137  	return ERR_PTR(err);
138  }
139  
140  /**
141   * xe_pt_populate_empty() - Populate a page-table bo with scratch- or zero
142   * entries.
143   * @tile: The tile the scratch pagetable of which to use.
144   * @vm: The vm we populate for.
145   * @pt: The pagetable the bo of which to initialize.
146   *
147   * Populate the page-table bo of @pt with entries pointing into the tile's
148   * scratch page-table tree if any. Otherwise populate with zeros.
149   */
xe_pt_populate_empty(struct xe_tile * tile,struct xe_vm * vm,struct xe_pt * pt)150  void xe_pt_populate_empty(struct xe_tile *tile, struct xe_vm *vm,
151  			  struct xe_pt *pt)
152  {
153  	struct iosys_map *map = &pt->bo->vmap;
154  	u64 empty;
155  	int i;
156  
157  	if (!xe_vm_has_scratch(vm)) {
158  		/*
159  		 * FIXME: Some memory is allocated already allocated to zero?
160  		 * Find out which memory that is and avoid this memset...
161  		 */
162  		xe_map_memset(vm->xe, map, 0, 0, SZ_4K);
163  	} else {
164  		empty = __xe_pt_empty_pte(tile, vm, pt->level);
165  		for (i = 0; i < XE_PDES; i++)
166  			xe_pt_write(vm->xe, map, i, empty);
167  	}
168  }
169  
170  /**
171   * xe_pt_shift() - Return the ilog2 value of the size of the address range of
172   * a page-table at a certain level.
173   * @level: The level.
174   *
175   * Return: The ilog2 value of the size of the address range of a page-table
176   * at level @level.
177   */
xe_pt_shift(unsigned int level)178  unsigned int xe_pt_shift(unsigned int level)
179  {
180  	return XE_PTE_SHIFT + XE_PDE_SHIFT * level;
181  }
182  
183  /**
184   * xe_pt_destroy() - Destroy a page-table tree.
185   * @pt: The root of the page-table tree to destroy.
186   * @flags: vm flags. Currently unused.
187   * @deferred: List head of lockless list for deferred putting. NULL for
188   *            immediate putting.
189   *
190   * Puts the page-table bo, recursively calls xe_pt_destroy on all children
191   * and finally frees @pt. TODO: Can we remove the @flags argument?
192   */
xe_pt_destroy(struct xe_pt * pt,u32 flags,struct llist_head * deferred)193  void xe_pt_destroy(struct xe_pt *pt, u32 flags, struct llist_head *deferred)
194  {
195  	int i;
196  
197  	if (!pt)
198  		return;
199  
200  	XE_WARN_ON(!list_empty(&pt->bo->ttm.base.gpuva.list));
201  	xe_bo_unpin(pt->bo);
202  	xe_bo_put_deferred(pt->bo, deferred);
203  
204  	if (pt->level > 0 && pt->num_live) {
205  		struct xe_pt_dir *pt_dir = as_xe_pt_dir(pt);
206  
207  		for (i = 0; i < XE_PDES; i++) {
208  			if (xe_pt_entry(pt_dir, i))
209  				xe_pt_destroy(xe_pt_entry(pt_dir, i), flags,
210  					      deferred);
211  		}
212  	}
213  	xe_pt_free(pt);
214  }
215  
216  /**
217   * DOC: Pagetable building
218   *
219   * Below we use the term "page-table" for both page-directories, containing
220   * pointers to lower level page-directories or page-tables, and level 0
221   * page-tables that contain only page-table-entries pointing to memory pages.
222   *
223   * When inserting an address range in an already existing page-table tree
224   * there will typically be a set of page-tables that are shared with other
225   * address ranges, and a set that are private to this address range.
226   * The set of shared page-tables can be at most two per level,
227   * and those can't be updated immediately because the entries of those
228   * page-tables may still be in use by the gpu for other mappings. Therefore
229   * when inserting entries into those, we instead stage those insertions by
230   * adding insertion data into struct xe_vm_pgtable_update structures. This
231   * data, (subtrees for the cpu and page-table-entries for the gpu) is then
232   * added in a separate commit step. CPU-data is committed while still under the
233   * vm lock, the object lock and for userptr, the notifier lock in read mode.
234   * The GPU async data is committed either by the GPU or CPU after fulfilling
235   * relevant dependencies.
236   * For non-shared page-tables (and, in fact, for shared ones that aren't
237   * existing at the time of staging), we add the data in-place without the
238   * special update structures. This private part of the page-table tree will
239   * remain disconnected from the vm page-table tree until data is committed to
240   * the shared page tables of the vm tree in the commit phase.
241   */
242  
243  struct xe_pt_update {
244  	/** @update: The update structure we're building for this parent. */
245  	struct xe_vm_pgtable_update *update;
246  	/** @parent: The parent. Used to detect a parent change. */
247  	struct xe_pt *parent;
248  	/** @preexisting: Whether the parent was pre-existing or allocated */
249  	bool preexisting;
250  };
251  
252  struct xe_pt_stage_bind_walk {
253  	/** base: The base class. */
254  	struct xe_pt_walk base;
255  
256  	/* Input parameters for the walk */
257  	/** @vm: The vm we're building for. */
258  	struct xe_vm *vm;
259  	/** @tile: The tile we're building for. */
260  	struct xe_tile *tile;
261  	/** @default_pte: PTE flag only template. No address is associated */
262  	u64 default_pte;
263  	/** @dma_offset: DMA offset to add to the PTE. */
264  	u64 dma_offset;
265  	/**
266  	 * @needs_64k: This address range enforces 64K alignment and
267  	 * granularity.
268  	 */
269  	bool needs_64K;
270  	/**
271  	 * @vma: VMA being mapped
272  	 */
273  	struct xe_vma *vma;
274  
275  	/* Also input, but is updated during the walk*/
276  	/** @curs: The DMA address cursor. */
277  	struct xe_res_cursor *curs;
278  	/** @va_curs_start: The Virtual address coresponding to @curs->start */
279  	u64 va_curs_start;
280  
281  	/* Output */
282  	struct xe_walk_update {
283  		/** @wupd.entries: Caller provided storage. */
284  		struct xe_vm_pgtable_update *entries;
285  		/** @wupd.num_used_entries: Number of update @entries used. */
286  		unsigned int num_used_entries;
287  		/** @wupd.updates: Tracks the update entry at a given level */
288  		struct xe_pt_update updates[XE_VM_MAX_LEVEL + 1];
289  	} wupd;
290  
291  	/* Walk state */
292  	/**
293  	 * @l0_end_addr: The end address of the current l0 leaf. Used for
294  	 * 64K granularity detection.
295  	 */
296  	u64 l0_end_addr;
297  	/** @addr_64K: The start address of the current 64K chunk. */
298  	u64 addr_64K;
299  	/** @found_64: Whether @add_64K actually points to a 64K chunk. */
300  	bool found_64K;
301  };
302  
303  static int
xe_pt_new_shared(struct xe_walk_update * wupd,struct xe_pt * parent,pgoff_t offset,bool alloc_entries)304  xe_pt_new_shared(struct xe_walk_update *wupd, struct xe_pt *parent,
305  		 pgoff_t offset, bool alloc_entries)
306  {
307  	struct xe_pt_update *upd = &wupd->updates[parent->level];
308  	struct xe_vm_pgtable_update *entry;
309  
310  	/*
311  	 * For *each level*, we could only have one active
312  	 * struct xt_pt_update at any one time. Once we move on to a
313  	 * new parent and page-directory, the old one is complete, and
314  	 * updates are either already stored in the build tree or in
315  	 * @wupd->entries
316  	 */
317  	if (likely(upd->parent == parent))
318  		return 0;
319  
320  	upd->parent = parent;
321  	upd->preexisting = true;
322  
323  	if (wupd->num_used_entries == XE_VM_MAX_LEVEL * 2 + 1)
324  		return -EINVAL;
325  
326  	entry = wupd->entries + wupd->num_used_entries++;
327  	upd->update = entry;
328  	entry->ofs = offset;
329  	entry->pt_bo = parent->bo;
330  	entry->pt = parent;
331  	entry->flags = 0;
332  	entry->qwords = 0;
333  	entry->pt_bo->update_index = -1;
334  
335  	if (alloc_entries) {
336  		entry->pt_entries = kmalloc_array(XE_PDES,
337  						  sizeof(*entry->pt_entries),
338  						  GFP_KERNEL);
339  		if (!entry->pt_entries)
340  			return -ENOMEM;
341  	}
342  
343  	return 0;
344  }
345  
346  /*
347   * NOTE: This is a very frequently called function so we allow ourselves
348   * to annotate (using branch prediction hints) the fastpath of updating a
349   * non-pre-existing pagetable with leaf ptes.
350   */
351  static int
xe_pt_insert_entry(struct xe_pt_stage_bind_walk * xe_walk,struct xe_pt * parent,pgoff_t offset,struct xe_pt * xe_child,u64 pte)352  xe_pt_insert_entry(struct xe_pt_stage_bind_walk *xe_walk, struct xe_pt *parent,
353  		   pgoff_t offset, struct xe_pt *xe_child, u64 pte)
354  {
355  	struct xe_pt_update *upd = &xe_walk->wupd.updates[parent->level];
356  	struct xe_pt_update *child_upd = xe_child ?
357  		&xe_walk->wupd.updates[xe_child->level] : NULL;
358  	int ret;
359  
360  	ret = xe_pt_new_shared(&xe_walk->wupd, parent, offset, true);
361  	if (unlikely(ret))
362  		return ret;
363  
364  	/*
365  	 * Register this new pagetable so that it won't be recognized as
366  	 * a shared pagetable by a subsequent insertion.
367  	 */
368  	if (unlikely(child_upd)) {
369  		child_upd->update = NULL;
370  		child_upd->parent = xe_child;
371  		child_upd->preexisting = false;
372  	}
373  
374  	if (likely(!upd->preexisting)) {
375  		/* Continue building a non-connected subtree. */
376  		struct iosys_map *map = &parent->bo->vmap;
377  
378  		if (unlikely(xe_child))
379  			parent->base.children[offset] = &xe_child->base;
380  
381  		xe_pt_write(xe_walk->vm->xe, map, offset, pte);
382  		parent->num_live++;
383  	} else {
384  		/* Shared pt. Stage update. */
385  		unsigned int idx;
386  		struct xe_vm_pgtable_update *entry = upd->update;
387  
388  		idx = offset - entry->ofs;
389  		entry->pt_entries[idx].pt = xe_child;
390  		entry->pt_entries[idx].pte = pte;
391  		entry->qwords++;
392  	}
393  
394  	return 0;
395  }
396  
xe_pt_hugepte_possible(u64 addr,u64 next,unsigned int level,struct xe_pt_stage_bind_walk * xe_walk)397  static bool xe_pt_hugepte_possible(u64 addr, u64 next, unsigned int level,
398  				   struct xe_pt_stage_bind_walk *xe_walk)
399  {
400  	u64 size, dma;
401  
402  	if (level > MAX_HUGEPTE_LEVEL)
403  		return false;
404  
405  	/* Does the virtual range requested cover a huge pte? */
406  	if (!xe_pt_covers(addr, next, level, &xe_walk->base))
407  		return false;
408  
409  	/* Does the DMA segment cover the whole pte? */
410  	if (next - xe_walk->va_curs_start > xe_walk->curs->size)
411  		return false;
412  
413  	/* null VMA's do not have dma addresses */
414  	if (xe_vma_is_null(xe_walk->vma))
415  		return true;
416  
417  	/* Is the DMA address huge PTE size aligned? */
418  	size = next - addr;
419  	dma = addr - xe_walk->va_curs_start + xe_res_dma(xe_walk->curs);
420  
421  	return IS_ALIGNED(dma, size);
422  }
423  
424  /*
425   * Scan the requested mapping to check whether it can be done entirely
426   * with 64K PTEs.
427   */
428  static bool
xe_pt_scan_64K(u64 addr,u64 next,struct xe_pt_stage_bind_walk * xe_walk)429  xe_pt_scan_64K(u64 addr, u64 next, struct xe_pt_stage_bind_walk *xe_walk)
430  {
431  	struct xe_res_cursor curs = *xe_walk->curs;
432  
433  	if (!IS_ALIGNED(addr, SZ_64K))
434  		return false;
435  
436  	if (next > xe_walk->l0_end_addr)
437  		return false;
438  
439  	/* null VMA's do not have dma addresses */
440  	if (xe_vma_is_null(xe_walk->vma))
441  		return true;
442  
443  	xe_res_next(&curs, addr - xe_walk->va_curs_start);
444  	for (; addr < next; addr += SZ_64K) {
445  		if (!IS_ALIGNED(xe_res_dma(&curs), SZ_64K) || curs.size < SZ_64K)
446  			return false;
447  
448  		xe_res_next(&curs, SZ_64K);
449  	}
450  
451  	return addr == next;
452  }
453  
454  /*
455   * For non-compact "normal" 4K level-0 pagetables, we want to try to group
456   * addresses together in 64K-contigous regions to add a 64K TLB hint for the
457   * device to the PTE.
458   * This function determines whether the address is part of such a
459   * segment. For VRAM in normal pagetables, this is strictly necessary on
460   * some devices.
461   */
462  static bool
xe_pt_is_pte_ps64K(u64 addr,u64 next,struct xe_pt_stage_bind_walk * xe_walk)463  xe_pt_is_pte_ps64K(u64 addr, u64 next, struct xe_pt_stage_bind_walk *xe_walk)
464  {
465  	/* Address is within an already found 64k region */
466  	if (xe_walk->found_64K && addr - xe_walk->addr_64K < SZ_64K)
467  		return true;
468  
469  	xe_walk->found_64K = xe_pt_scan_64K(addr, addr + SZ_64K, xe_walk);
470  	xe_walk->addr_64K = addr;
471  
472  	return xe_walk->found_64K;
473  }
474  
475  static int
xe_pt_stage_bind_entry(struct xe_ptw * parent,pgoff_t offset,unsigned int level,u64 addr,u64 next,struct xe_ptw ** child,enum page_walk_action * action,struct xe_pt_walk * walk)476  xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset,
477  		       unsigned int level, u64 addr, u64 next,
478  		       struct xe_ptw **child,
479  		       enum page_walk_action *action,
480  		       struct xe_pt_walk *walk)
481  {
482  	struct xe_pt_stage_bind_walk *xe_walk =
483  		container_of(walk, typeof(*xe_walk), base);
484  	u16 pat_index = xe_walk->vma->pat_index;
485  	struct xe_pt *xe_parent = container_of(parent, typeof(*xe_parent), base);
486  	struct xe_vm *vm = xe_walk->vm;
487  	struct xe_pt *xe_child;
488  	bool covers;
489  	int ret = 0;
490  	u64 pte;
491  
492  	/* Is this a leaf entry ?*/
493  	if (level == 0 || xe_pt_hugepte_possible(addr, next, level, xe_walk)) {
494  		struct xe_res_cursor *curs = xe_walk->curs;
495  		bool is_null = xe_vma_is_null(xe_walk->vma);
496  
497  		XE_WARN_ON(xe_walk->va_curs_start != addr);
498  
499  		pte = vm->pt_ops->pte_encode_vma(is_null ? 0 :
500  						 xe_res_dma(curs) + xe_walk->dma_offset,
501  						 xe_walk->vma, pat_index, level);
502  		pte |= xe_walk->default_pte;
503  
504  		/*
505  		 * Set the XE_PTE_PS64 hint if possible, otherwise if
506  		 * this device *requires* 64K PTE size for VRAM, fail.
507  		 */
508  		if (level == 0 && !xe_parent->is_compact) {
509  			if (xe_pt_is_pte_ps64K(addr, next, xe_walk)) {
510  				xe_walk->vma->gpuva.flags |= XE_VMA_PTE_64K;
511  				pte |= XE_PTE_PS64;
512  			} else if (XE_WARN_ON(xe_walk->needs_64K)) {
513  				return -EINVAL;
514  			}
515  		}
516  
517  		ret = xe_pt_insert_entry(xe_walk, xe_parent, offset, NULL, pte);
518  		if (unlikely(ret))
519  			return ret;
520  
521  		if (!is_null)
522  			xe_res_next(curs, next - addr);
523  		xe_walk->va_curs_start = next;
524  		xe_walk->vma->gpuva.flags |= (XE_VMA_PTE_4K << level);
525  		*action = ACTION_CONTINUE;
526  
527  		return ret;
528  	}
529  
530  	/*
531  	 * Descending to lower level. Determine if we need to allocate a
532  	 * new page table or -directory, which we do if there is no
533  	 * previous one or there is one we can completely replace.
534  	 */
535  	if (level == 1) {
536  		walk->shifts = xe_normal_pt_shifts;
537  		xe_walk->l0_end_addr = next;
538  	}
539  
540  	covers = xe_pt_covers(addr, next, level, &xe_walk->base);
541  	if (covers || !*child) {
542  		u64 flags = 0;
543  
544  		xe_child = xe_pt_create(xe_walk->vm, xe_walk->tile, level - 1);
545  		if (IS_ERR(xe_child))
546  			return PTR_ERR(xe_child);
547  
548  		xe_pt_set_addr(xe_child,
549  			       round_down(addr, 1ull << walk->shifts[level]));
550  
551  		if (!covers)
552  			xe_pt_populate_empty(xe_walk->tile, xe_walk->vm, xe_child);
553  
554  		*child = &xe_child->base;
555  
556  		/*
557  		 * Prefer the compact pagetable layout for L0 if possible. Only
558  		 * possible if VMA covers entire 2MB region as compact 64k and
559  		 * 4k pages cannot be mixed within a 2MB region.
560  		 * TODO: Suballocate the pt bo to avoid wasting a lot of
561  		 * memory.
562  		 */
563  		if (GRAPHICS_VERx100(tile_to_xe(xe_walk->tile)) >= 1250 && level == 1 &&
564  		    covers && xe_pt_scan_64K(addr, next, xe_walk)) {
565  			walk->shifts = xe_compact_pt_shifts;
566  			xe_walk->vma->gpuva.flags |= XE_VMA_PTE_COMPACT;
567  			flags |= XE_PDE_64K;
568  			xe_child->is_compact = true;
569  		}
570  
571  		pte = vm->pt_ops->pde_encode_bo(xe_child->bo, 0, pat_index) | flags;
572  		ret = xe_pt_insert_entry(xe_walk, xe_parent, offset, xe_child,
573  					 pte);
574  	}
575  
576  	*action = ACTION_SUBTREE;
577  	return ret;
578  }
579  
580  static const struct xe_pt_walk_ops xe_pt_stage_bind_ops = {
581  	.pt_entry = xe_pt_stage_bind_entry,
582  };
583  
584  /**
585   * xe_pt_stage_bind() - Build a disconnected page-table tree for a given address
586   * range.
587   * @tile: The tile we're building for.
588   * @vma: The vma indicating the address range.
589   * @entries: Storage for the update entries used for connecting the tree to
590   * the main tree at commit time.
591   * @num_entries: On output contains the number of @entries used.
592   *
593   * This function builds a disconnected page-table tree for a given address
594   * range. The tree is connected to the main vm tree for the gpu using
595   * xe_migrate_update_pgtables() and for the cpu using xe_pt_commit_bind().
596   * The function builds xe_vm_pgtable_update structures for already existing
597   * shared page-tables, and non-existing shared and non-shared page-tables
598   * are built and populated directly.
599   *
600   * Return 0 on success, negative error code on error.
601   */
602  static int
xe_pt_stage_bind(struct xe_tile * tile,struct xe_vma * vma,struct xe_vm_pgtable_update * entries,u32 * num_entries)603  xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
604  		 struct xe_vm_pgtable_update *entries, u32 *num_entries)
605  {
606  	struct xe_device *xe = tile_to_xe(tile);
607  	struct xe_bo *bo = xe_vma_bo(vma);
608  	bool is_devmem = !xe_vma_is_userptr(vma) && bo &&
609  		(xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo));
610  	struct xe_res_cursor curs;
611  	struct xe_pt_stage_bind_walk xe_walk = {
612  		.base = {
613  			.ops = &xe_pt_stage_bind_ops,
614  			.shifts = xe_normal_pt_shifts,
615  			.max_level = XE_PT_HIGHEST_LEVEL,
616  		},
617  		.vm = xe_vma_vm(vma),
618  		.tile = tile,
619  		.curs = &curs,
620  		.va_curs_start = xe_vma_start(vma),
621  		.vma = vma,
622  		.wupd.entries = entries,
623  		.needs_64K = (xe_vma_vm(vma)->flags & XE_VM_FLAG_64K) && is_devmem,
624  	};
625  	struct xe_pt *pt = xe_vma_vm(vma)->pt_root[tile->id];
626  	int ret;
627  
628  	/**
629  	 * Default atomic expectations for different allocation scenarios are as follows:
630  	 *
631  	 * 1. Traditional API: When the VM is not in LR mode:
632  	 *    - Device atomics are expected to function with all allocations.
633  	 *
634  	 * 2. Compute/SVM API: When the VM is in LR mode:
635  	 *    - Device atomics are the default behavior when the bo is placed in a single region.
636  	 *    - In all other cases device atomics will be disabled with AE=0 until an application
637  	 *      request differently using a ioctl like madvise.
638  	 */
639  	if (vma->gpuva.flags & XE_VMA_ATOMIC_PTE_BIT) {
640  		if (xe_vm_in_lr_mode(xe_vma_vm(vma))) {
641  			if (bo && xe_bo_has_single_placement(bo))
642  				xe_walk.default_pte |= XE_USM_PPGTT_PTE_AE;
643  			/**
644  			 * If a SMEM+LMEM allocation is backed by SMEM, a device
645  			 * atomics will cause a gpu page fault and which then
646  			 * gets migrated to LMEM, bind such allocations with
647  			 * device atomics enabled.
648  			 */
649  			else if (is_devmem && !xe_bo_has_single_placement(bo))
650  				xe_walk.default_pte |= XE_USM_PPGTT_PTE_AE;
651  		} else {
652  			xe_walk.default_pte |= XE_USM_PPGTT_PTE_AE;
653  		}
654  
655  		/**
656  		 * Unset AE if the platform(PVC) doesn't support it on an
657  		 * allocation
658  		 */
659  		if (!xe->info.has_device_atomics_on_smem && !is_devmem)
660  			xe_walk.default_pte &= ~XE_USM_PPGTT_PTE_AE;
661  	}
662  
663  	if (is_devmem) {
664  		xe_walk.default_pte |= XE_PPGTT_PTE_DM;
665  		xe_walk.dma_offset = vram_region_gpu_offset(bo->ttm.resource);
666  	}
667  
668  	if (!xe_vma_has_no_bo(vma) && xe_bo_is_stolen(bo))
669  		xe_walk.dma_offset = xe_ttm_stolen_gpu_offset(xe_bo_device(bo));
670  
671  	xe_bo_assert_held(bo);
672  
673  	if (!xe_vma_is_null(vma)) {
674  		if (xe_vma_is_userptr(vma))
675  			xe_res_first_sg(to_userptr_vma(vma)->userptr.sg, 0,
676  					xe_vma_size(vma), &curs);
677  		else if (xe_bo_is_vram(bo) || xe_bo_is_stolen(bo))
678  			xe_res_first(bo->ttm.resource, xe_vma_bo_offset(vma),
679  				     xe_vma_size(vma), &curs);
680  		else
681  			xe_res_first_sg(xe_bo_sg(bo), xe_vma_bo_offset(vma),
682  					xe_vma_size(vma), &curs);
683  	} else {
684  		curs.size = xe_vma_size(vma);
685  	}
686  
687  	ret = xe_pt_walk_range(&pt->base, pt->level, xe_vma_start(vma),
688  			       xe_vma_end(vma), &xe_walk.base);
689  
690  	*num_entries = xe_walk.wupd.num_used_entries;
691  	return ret;
692  }
693  
694  /**
695   * xe_pt_nonshared_offsets() - Determine the non-shared entry offsets of a
696   * shared pagetable.
697   * @addr: The start address within the non-shared pagetable.
698   * @end: The end address within the non-shared pagetable.
699   * @level: The level of the non-shared pagetable.
700   * @walk: Walk info. The function adjusts the walk action.
701   * @action: next action to perform (see enum page_walk_action)
702   * @offset: Ignored on input, First non-shared entry on output.
703   * @end_offset: Ignored on input, Last non-shared entry + 1 on output.
704   *
705   * A non-shared page-table has some entries that belong to the address range
706   * and others that don't. This function determines the entries that belong
707   * fully to the address range. Depending on level, some entries may
708   * partially belong to the address range (that can't happen at level 0).
709   * The function detects that and adjust those offsets to not include those
710   * partial entries. Iff it does detect partial entries, we know that there must
711   * be shared page tables also at lower levels, so it adjusts the walk action
712   * accordingly.
713   *
714   * Return: true if there were non-shared entries, false otherwise.
715   */
xe_pt_nonshared_offsets(u64 addr,u64 end,unsigned int level,struct xe_pt_walk * walk,enum page_walk_action * action,pgoff_t * offset,pgoff_t * end_offset)716  static bool xe_pt_nonshared_offsets(u64 addr, u64 end, unsigned int level,
717  				    struct xe_pt_walk *walk,
718  				    enum page_walk_action *action,
719  				    pgoff_t *offset, pgoff_t *end_offset)
720  {
721  	u64 size = 1ull << walk->shifts[level];
722  
723  	*offset = xe_pt_offset(addr, level, walk);
724  	*end_offset = xe_pt_num_entries(addr, end, level, walk) + *offset;
725  
726  	if (!level)
727  		return true;
728  
729  	/*
730  	 * If addr or next are not size aligned, there are shared pts at lower
731  	 * level, so in that case traverse down the subtree
732  	 */
733  	*action = ACTION_CONTINUE;
734  	if (!IS_ALIGNED(addr, size)) {
735  		*action = ACTION_SUBTREE;
736  		(*offset)++;
737  	}
738  
739  	if (!IS_ALIGNED(end, size)) {
740  		*action = ACTION_SUBTREE;
741  		(*end_offset)--;
742  	}
743  
744  	return *end_offset > *offset;
745  }
746  
747  struct xe_pt_zap_ptes_walk {
748  	/** @base: The walk base-class */
749  	struct xe_pt_walk base;
750  
751  	/* Input parameters for the walk */
752  	/** @tile: The tile we're building for */
753  	struct xe_tile *tile;
754  
755  	/* Output */
756  	/** @needs_invalidate: Whether we need to invalidate TLB*/
757  	bool needs_invalidate;
758  };
759  
xe_pt_zap_ptes_entry(struct xe_ptw * parent,pgoff_t offset,unsigned int level,u64 addr,u64 next,struct xe_ptw ** child,enum page_walk_action * action,struct xe_pt_walk * walk)760  static int xe_pt_zap_ptes_entry(struct xe_ptw *parent, pgoff_t offset,
761  				unsigned int level, u64 addr, u64 next,
762  				struct xe_ptw **child,
763  				enum page_walk_action *action,
764  				struct xe_pt_walk *walk)
765  {
766  	struct xe_pt_zap_ptes_walk *xe_walk =
767  		container_of(walk, typeof(*xe_walk), base);
768  	struct xe_pt *xe_child = container_of(*child, typeof(*xe_child), base);
769  	pgoff_t end_offset;
770  
771  	XE_WARN_ON(!*child);
772  	XE_WARN_ON(!level);
773  
774  	/*
775  	 * Note that we're called from an entry callback, and we're dealing
776  	 * with the child of that entry rather than the parent, so need to
777  	 * adjust level down.
778  	 */
779  	if (xe_pt_nonshared_offsets(addr, next, --level, walk, action, &offset,
780  				    &end_offset)) {
781  		xe_map_memset(tile_to_xe(xe_walk->tile), &xe_child->bo->vmap,
782  			      offset * sizeof(u64), 0,
783  			      (end_offset - offset) * sizeof(u64));
784  		xe_walk->needs_invalidate = true;
785  	}
786  
787  	return 0;
788  }
789  
790  static const struct xe_pt_walk_ops xe_pt_zap_ptes_ops = {
791  	.pt_entry = xe_pt_zap_ptes_entry,
792  };
793  
794  /**
795   * xe_pt_zap_ptes() - Zap (zero) gpu ptes of an address range
796   * @tile: The tile we're zapping for.
797   * @vma: GPU VMA detailing address range.
798   *
799   * Eviction and Userptr invalidation needs to be able to zap the
800   * gpu ptes of a given address range in pagefaulting mode.
801   * In order to be able to do that, that function needs access to the shared
802   * page-table entrieaso it can either clear the leaf PTEs or
803   * clear the pointers to lower-level page-tables. The caller is required
804   * to hold the necessary locks to ensure neither the page-table connectivity
805   * nor the page-table entries of the range is updated from under us.
806   *
807   * Return: Whether ptes were actually updated and a TLB invalidation is
808   * required.
809   */
xe_pt_zap_ptes(struct xe_tile * tile,struct xe_vma * vma)810  bool xe_pt_zap_ptes(struct xe_tile *tile, struct xe_vma *vma)
811  {
812  	struct xe_pt_zap_ptes_walk xe_walk = {
813  		.base = {
814  			.ops = &xe_pt_zap_ptes_ops,
815  			.shifts = xe_normal_pt_shifts,
816  			.max_level = XE_PT_HIGHEST_LEVEL,
817  		},
818  		.tile = tile,
819  	};
820  	struct xe_pt *pt = xe_vma_vm(vma)->pt_root[tile->id];
821  	u8 pt_mask = (vma->tile_present & ~vma->tile_invalidated);
822  
823  	if (!(pt_mask & BIT(tile->id)))
824  		return false;
825  
826  	(void)xe_pt_walk_shared(&pt->base, pt->level, xe_vma_start(vma),
827  				xe_vma_end(vma), &xe_walk.base);
828  
829  	return xe_walk.needs_invalidate;
830  }
831  
832  static void
xe_vm_populate_pgtable(struct xe_migrate_pt_update * pt_update,struct xe_tile * tile,struct iosys_map * map,void * data,u32 qword_ofs,u32 num_qwords,const struct xe_vm_pgtable_update * update)833  xe_vm_populate_pgtable(struct xe_migrate_pt_update *pt_update, struct xe_tile *tile,
834  		       struct iosys_map *map, void *data,
835  		       u32 qword_ofs, u32 num_qwords,
836  		       const struct xe_vm_pgtable_update *update)
837  {
838  	struct xe_pt_entry *ptes = update->pt_entries;
839  	u64 *ptr = data;
840  	u32 i;
841  
842  	for (i = 0; i < num_qwords; i++) {
843  		if (map)
844  			xe_map_wr(tile_to_xe(tile), map, (qword_ofs + i) *
845  				  sizeof(u64), u64, ptes[i].pte);
846  		else
847  			ptr[i] = ptes[i].pte;
848  	}
849  }
850  
xe_pt_cancel_bind(struct xe_vma * vma,struct xe_vm_pgtable_update * entries,u32 num_entries)851  static void xe_pt_cancel_bind(struct xe_vma *vma,
852  			      struct xe_vm_pgtable_update *entries,
853  			      u32 num_entries)
854  {
855  	u32 i, j;
856  
857  	for (i = 0; i < num_entries; i++) {
858  		struct xe_pt *pt = entries[i].pt;
859  
860  		if (!pt)
861  			continue;
862  
863  		if (pt->level) {
864  			for (j = 0; j < entries[i].qwords; j++)
865  				xe_pt_destroy(entries[i].pt_entries[j].pt,
866  					      xe_vma_vm(vma)->flags, NULL);
867  		}
868  
869  		kfree(entries[i].pt_entries);
870  		entries[i].pt_entries = NULL;
871  		entries[i].qwords = 0;
872  	}
873  }
874  
xe_pt_commit_locks_assert(struct xe_vma * vma)875  static void xe_pt_commit_locks_assert(struct xe_vma *vma)
876  {
877  	struct xe_vm *vm = xe_vma_vm(vma);
878  
879  	lockdep_assert_held(&vm->lock);
880  
881  	if (!xe_vma_is_userptr(vma) && !xe_vma_is_null(vma))
882  		dma_resv_assert_held(xe_vma_bo(vma)->ttm.base.resv);
883  
884  	xe_vm_assert_held(vm);
885  }
886  
xe_pt_commit(struct xe_vma * vma,struct xe_vm_pgtable_update * entries,u32 num_entries,struct llist_head * deferred)887  static void xe_pt_commit(struct xe_vma *vma,
888  			 struct xe_vm_pgtable_update *entries,
889  			 u32 num_entries, struct llist_head *deferred)
890  {
891  	u32 i, j;
892  
893  	xe_pt_commit_locks_assert(vma);
894  
895  	for (i = 0; i < num_entries; i++) {
896  		struct xe_pt *pt = entries[i].pt;
897  
898  		if (!pt->level)
899  			continue;
900  
901  		for (j = 0; j < entries[i].qwords; j++) {
902  			struct xe_pt *oldpte = entries[i].pt_entries[j].pt;
903  
904  			xe_pt_destroy(oldpte, xe_vma_vm(vma)->flags, deferred);
905  		}
906  	}
907  }
908  
xe_pt_abort_bind(struct xe_vma * vma,struct xe_vm_pgtable_update * entries,u32 num_entries,bool rebind)909  static void xe_pt_abort_bind(struct xe_vma *vma,
910  			     struct xe_vm_pgtable_update *entries,
911  			     u32 num_entries, bool rebind)
912  {
913  	int i, j;
914  
915  	xe_pt_commit_locks_assert(vma);
916  
917  	for (i = num_entries - 1; i >= 0; --i) {
918  		struct xe_pt *pt = entries[i].pt;
919  		struct xe_pt_dir *pt_dir;
920  
921  		if (!rebind)
922  			pt->num_live -= entries[i].qwords;
923  
924  		if (!pt->level)
925  			continue;
926  
927  		pt_dir = as_xe_pt_dir(pt);
928  		for (j = 0; j < entries[i].qwords; j++) {
929  			u32 j_ = j + entries[i].ofs;
930  			struct xe_pt *newpte = xe_pt_entry(pt_dir, j_);
931  			struct xe_pt *oldpte = entries[i].pt_entries[j].pt;
932  
933  			pt_dir->children[j_] = oldpte ? &oldpte->base : 0;
934  			xe_pt_destroy(newpte, xe_vma_vm(vma)->flags, NULL);
935  		}
936  	}
937  }
938  
xe_pt_commit_prepare_bind(struct xe_vma * vma,struct xe_vm_pgtable_update * entries,u32 num_entries,bool rebind)939  static void xe_pt_commit_prepare_bind(struct xe_vma *vma,
940  				      struct xe_vm_pgtable_update *entries,
941  				      u32 num_entries, bool rebind)
942  {
943  	u32 i, j;
944  
945  	xe_pt_commit_locks_assert(vma);
946  
947  	for (i = 0; i < num_entries; i++) {
948  		struct xe_pt *pt = entries[i].pt;
949  		struct xe_pt_dir *pt_dir;
950  
951  		if (!rebind)
952  			pt->num_live += entries[i].qwords;
953  
954  		if (!pt->level)
955  			continue;
956  
957  		pt_dir = as_xe_pt_dir(pt);
958  		for (j = 0; j < entries[i].qwords; j++) {
959  			u32 j_ = j + entries[i].ofs;
960  			struct xe_pt *newpte = entries[i].pt_entries[j].pt;
961  			struct xe_pt *oldpte = NULL;
962  
963  			if (xe_pt_entry(pt_dir, j_))
964  				oldpte = xe_pt_entry(pt_dir, j_);
965  
966  			pt_dir->children[j_] = &newpte->base;
967  			entries[i].pt_entries[j].pt = oldpte;
968  		}
969  	}
970  }
971  
xe_pt_free_bind(struct xe_vm_pgtable_update * entries,u32 num_entries)972  static void xe_pt_free_bind(struct xe_vm_pgtable_update *entries,
973  			    u32 num_entries)
974  {
975  	u32 i;
976  
977  	for (i = 0; i < num_entries; i++)
978  		kfree(entries[i].pt_entries);
979  }
980  
981  static int
xe_pt_prepare_bind(struct xe_tile * tile,struct xe_vma * vma,struct xe_vm_pgtable_update * entries,u32 * num_entries)982  xe_pt_prepare_bind(struct xe_tile *tile, struct xe_vma *vma,
983  		   struct xe_vm_pgtable_update *entries, u32 *num_entries)
984  {
985  	int err;
986  
987  	*num_entries = 0;
988  	err = xe_pt_stage_bind(tile, vma, entries, num_entries);
989  	if (!err)
990  		xe_tile_assert(tile, *num_entries);
991  
992  	return err;
993  }
994  
xe_vm_dbg_print_entries(struct xe_device * xe,const struct xe_vm_pgtable_update * entries,unsigned int num_entries,bool bind)995  static void xe_vm_dbg_print_entries(struct xe_device *xe,
996  				    const struct xe_vm_pgtable_update *entries,
997  				    unsigned int num_entries, bool bind)
998  #if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM))
999  {
1000  	unsigned int i;
1001  
1002  	vm_dbg(&xe->drm, "%s: %u entries to update\n", bind ? "bind" : "unbind",
1003  	       num_entries);
1004  	for (i = 0; i < num_entries; i++) {
1005  		const struct xe_vm_pgtable_update *entry = &entries[i];
1006  		struct xe_pt *xe_pt = entry->pt;
1007  		u64 page_size = 1ull << xe_pt_shift(xe_pt->level);
1008  		u64 end;
1009  		u64 start;
1010  
1011  		xe_assert(xe, !entry->pt->is_compact);
1012  		start = entry->ofs * page_size;
1013  		end = start + page_size * entry->qwords;
1014  		vm_dbg(&xe->drm,
1015  		       "\t%u: Update level %u at (%u + %u) [%llx...%llx) f:%x\n",
1016  		       i, xe_pt->level, entry->ofs, entry->qwords,
1017  		       xe_pt_addr(xe_pt) + start, xe_pt_addr(xe_pt) + end, 0);
1018  	}
1019  }
1020  #else
1021  {}
1022  #endif
1023  
no_in_syncs(struct xe_sync_entry * syncs,u32 num_syncs)1024  static bool no_in_syncs(struct xe_sync_entry *syncs, u32 num_syncs)
1025  {
1026  	int i;
1027  
1028  	for (i = 0; i < num_syncs; i++) {
1029  		struct dma_fence *fence = syncs[i].fence;
1030  
1031  		if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
1032  				       &fence->flags))
1033  			return false;
1034  	}
1035  
1036  	return true;
1037  }
1038  
job_test_add_deps(struct xe_sched_job * job,struct dma_resv * resv,enum dma_resv_usage usage)1039  static int job_test_add_deps(struct xe_sched_job *job,
1040  			     struct dma_resv *resv,
1041  			     enum dma_resv_usage usage)
1042  {
1043  	if (!job) {
1044  		if (!dma_resv_test_signaled(resv, usage))
1045  			return -ETIME;
1046  
1047  		return 0;
1048  	}
1049  
1050  	return xe_sched_job_add_deps(job, resv, usage);
1051  }
1052  
vma_add_deps(struct xe_vma * vma,struct xe_sched_job * job)1053  static int vma_add_deps(struct xe_vma *vma, struct xe_sched_job *job)
1054  {
1055  	struct xe_bo *bo = xe_vma_bo(vma);
1056  
1057  	xe_bo_assert_held(bo);
1058  
1059  	if (bo && !bo->vm)
1060  		return job_test_add_deps(job, bo->ttm.base.resv,
1061  					 DMA_RESV_USAGE_KERNEL);
1062  
1063  	return 0;
1064  }
1065  
op_add_deps(struct xe_vm * vm,struct xe_vma_op * op,struct xe_sched_job * job)1066  static int op_add_deps(struct xe_vm *vm, struct xe_vma_op *op,
1067  		       struct xe_sched_job *job)
1068  {
1069  	int err = 0;
1070  
1071  	switch (op->base.op) {
1072  	case DRM_GPUVA_OP_MAP:
1073  		if (!op->map.immediate && xe_vm_in_fault_mode(vm))
1074  			break;
1075  
1076  		err = vma_add_deps(op->map.vma, job);
1077  		break;
1078  	case DRM_GPUVA_OP_REMAP:
1079  		if (op->remap.prev)
1080  			err = vma_add_deps(op->remap.prev, job);
1081  		if (!err && op->remap.next)
1082  			err = vma_add_deps(op->remap.next, job);
1083  		break;
1084  	case DRM_GPUVA_OP_UNMAP:
1085  		break;
1086  	case DRM_GPUVA_OP_PREFETCH:
1087  		err = vma_add_deps(gpuva_to_vma(op->base.prefetch.va), job);
1088  		break;
1089  	default:
1090  		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
1091  	}
1092  
1093  	return err;
1094  }
1095  
xe_pt_vm_dependencies(struct xe_sched_job * job,struct xe_vm * vm,struct xe_vma_ops * vops,struct xe_vm_pgtable_update_ops * pt_update_ops,struct xe_range_fence_tree * rftree)1096  static int xe_pt_vm_dependencies(struct xe_sched_job *job,
1097  				 struct xe_vm *vm,
1098  				 struct xe_vma_ops *vops,
1099  				 struct xe_vm_pgtable_update_ops *pt_update_ops,
1100  				 struct xe_range_fence_tree *rftree)
1101  {
1102  	struct xe_range_fence *rtfence;
1103  	struct dma_fence *fence;
1104  	struct xe_vma_op *op;
1105  	int err = 0, i;
1106  
1107  	xe_vm_assert_held(vm);
1108  
1109  	if (!job && !no_in_syncs(vops->syncs, vops->num_syncs))
1110  		return -ETIME;
1111  
1112  	if (!job && !xe_exec_queue_is_idle(pt_update_ops->q))
1113  		return -ETIME;
1114  
1115  	if (pt_update_ops->wait_vm_bookkeep || pt_update_ops->wait_vm_kernel) {
1116  		err = job_test_add_deps(job, xe_vm_resv(vm),
1117  					pt_update_ops->wait_vm_bookkeep ?
1118  					DMA_RESV_USAGE_BOOKKEEP :
1119  					DMA_RESV_USAGE_KERNEL);
1120  		if (err)
1121  			return err;
1122  	}
1123  
1124  	rtfence = xe_range_fence_tree_first(rftree, pt_update_ops->start,
1125  					    pt_update_ops->last);
1126  	while (rtfence) {
1127  		fence = rtfence->fence;
1128  
1129  		if (!dma_fence_is_signaled(fence)) {
1130  			/*
1131  			 * Is this a CPU update? GPU is busy updating, so return
1132  			 * an error
1133  			 */
1134  			if (!job)
1135  				return -ETIME;
1136  
1137  			dma_fence_get(fence);
1138  			err = drm_sched_job_add_dependency(&job->drm, fence);
1139  			if (err)
1140  				return err;
1141  		}
1142  
1143  		rtfence = xe_range_fence_tree_next(rtfence,
1144  						   pt_update_ops->start,
1145  						   pt_update_ops->last);
1146  	}
1147  
1148  	list_for_each_entry(op, &vops->list, link) {
1149  		err = op_add_deps(vm, op, job);
1150  		if (err)
1151  			return err;
1152  	}
1153  
1154  	if (!(pt_update_ops->q->flags & EXEC_QUEUE_FLAG_KERNEL)) {
1155  		if (job)
1156  			err = xe_sched_job_last_fence_add_dep(job, vm);
1157  		else
1158  			err = xe_exec_queue_last_fence_test_dep(pt_update_ops->q, vm);
1159  	}
1160  
1161  	for (i = 0; job && !err && i < vops->num_syncs; i++)
1162  		err = xe_sync_entry_add_deps(&vops->syncs[i], job);
1163  
1164  	return err;
1165  }
1166  
xe_pt_pre_commit(struct xe_migrate_pt_update * pt_update)1167  static int xe_pt_pre_commit(struct xe_migrate_pt_update *pt_update)
1168  {
1169  	struct xe_vma_ops *vops = pt_update->vops;
1170  	struct xe_vm *vm = vops->vm;
1171  	struct xe_range_fence_tree *rftree = &vm->rftree[pt_update->tile_id];
1172  	struct xe_vm_pgtable_update_ops *pt_update_ops =
1173  		&vops->pt_update_ops[pt_update->tile_id];
1174  
1175  	return xe_pt_vm_dependencies(pt_update->job, vm, pt_update->vops,
1176  				     pt_update_ops, rftree);
1177  }
1178  
1179  #ifdef CONFIG_DRM_XE_USERPTR_INVAL_INJECT
1180  
xe_pt_userptr_inject_eagain(struct xe_userptr_vma * uvma)1181  static bool xe_pt_userptr_inject_eagain(struct xe_userptr_vma *uvma)
1182  {
1183  	u32 divisor = uvma->userptr.divisor ? uvma->userptr.divisor : 2;
1184  	static u32 count;
1185  
1186  	if (count++ % divisor == divisor - 1) {
1187  		uvma->userptr.divisor = divisor << 1;
1188  		return true;
1189  	}
1190  
1191  	return false;
1192  }
1193  
1194  #else
1195  
xe_pt_userptr_inject_eagain(struct xe_userptr_vma * uvma)1196  static bool xe_pt_userptr_inject_eagain(struct xe_userptr_vma *uvma)
1197  {
1198  	return false;
1199  }
1200  
1201  #endif
1202  
vma_check_userptr(struct xe_vm * vm,struct xe_vma * vma,struct xe_vm_pgtable_update_ops * pt_update)1203  static int vma_check_userptr(struct xe_vm *vm, struct xe_vma *vma,
1204  			     struct xe_vm_pgtable_update_ops *pt_update)
1205  {
1206  	struct xe_userptr_vma *uvma;
1207  	unsigned long notifier_seq;
1208  
1209  	lockdep_assert_held_read(&vm->userptr.notifier_lock);
1210  
1211  	if (!xe_vma_is_userptr(vma))
1212  		return 0;
1213  
1214  	uvma = to_userptr_vma(vma);
1215  	notifier_seq = uvma->userptr.notifier_seq;
1216  
1217  	if (uvma->userptr.initial_bind && !xe_vm_in_fault_mode(vm))
1218  		return 0;
1219  
1220  	if (!mmu_interval_read_retry(&uvma->userptr.notifier,
1221  				     notifier_seq) &&
1222  	    !xe_pt_userptr_inject_eagain(uvma))
1223  		return 0;
1224  
1225  	if (xe_vm_in_fault_mode(vm)) {
1226  		return -EAGAIN;
1227  	} else {
1228  		spin_lock(&vm->userptr.invalidated_lock);
1229  		list_move_tail(&uvma->userptr.invalidate_link,
1230  			       &vm->userptr.invalidated);
1231  		spin_unlock(&vm->userptr.invalidated_lock);
1232  
1233  		if (xe_vm_in_preempt_fence_mode(vm)) {
1234  			struct dma_resv_iter cursor;
1235  			struct dma_fence *fence;
1236  			long err;
1237  
1238  			dma_resv_iter_begin(&cursor, xe_vm_resv(vm),
1239  					    DMA_RESV_USAGE_BOOKKEEP);
1240  			dma_resv_for_each_fence_unlocked(&cursor, fence)
1241  				dma_fence_enable_sw_signaling(fence);
1242  			dma_resv_iter_end(&cursor);
1243  
1244  			err = dma_resv_wait_timeout(xe_vm_resv(vm),
1245  						    DMA_RESV_USAGE_BOOKKEEP,
1246  						    false, MAX_SCHEDULE_TIMEOUT);
1247  			XE_WARN_ON(err <= 0);
1248  		}
1249  	}
1250  
1251  	return 0;
1252  }
1253  
op_check_userptr(struct xe_vm * vm,struct xe_vma_op * op,struct xe_vm_pgtable_update_ops * pt_update)1254  static int op_check_userptr(struct xe_vm *vm, struct xe_vma_op *op,
1255  			    struct xe_vm_pgtable_update_ops *pt_update)
1256  {
1257  	int err = 0;
1258  
1259  	lockdep_assert_held_read(&vm->userptr.notifier_lock);
1260  
1261  	switch (op->base.op) {
1262  	case DRM_GPUVA_OP_MAP:
1263  		if (!op->map.immediate && xe_vm_in_fault_mode(vm))
1264  			break;
1265  
1266  		err = vma_check_userptr(vm, op->map.vma, pt_update);
1267  		break;
1268  	case DRM_GPUVA_OP_REMAP:
1269  		if (op->remap.prev)
1270  			err = vma_check_userptr(vm, op->remap.prev, pt_update);
1271  		if (!err && op->remap.next)
1272  			err = vma_check_userptr(vm, op->remap.next, pt_update);
1273  		break;
1274  	case DRM_GPUVA_OP_UNMAP:
1275  		break;
1276  	case DRM_GPUVA_OP_PREFETCH:
1277  		err = vma_check_userptr(vm, gpuva_to_vma(op->base.prefetch.va),
1278  					pt_update);
1279  		break;
1280  	default:
1281  		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
1282  	}
1283  
1284  	return err;
1285  }
1286  
xe_pt_userptr_pre_commit(struct xe_migrate_pt_update * pt_update)1287  static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update)
1288  {
1289  	struct xe_vm *vm = pt_update->vops->vm;
1290  	struct xe_vma_ops *vops = pt_update->vops;
1291  	struct xe_vm_pgtable_update_ops *pt_update_ops =
1292  		&vops->pt_update_ops[pt_update->tile_id];
1293  	struct xe_vma_op *op;
1294  	int err;
1295  
1296  	err = xe_pt_pre_commit(pt_update);
1297  	if (err)
1298  		return err;
1299  
1300  	down_read(&vm->userptr.notifier_lock);
1301  
1302  	list_for_each_entry(op, &vops->list, link) {
1303  		err = op_check_userptr(vm, op, pt_update_ops);
1304  		if (err) {
1305  			up_read(&vm->userptr.notifier_lock);
1306  			break;
1307  		}
1308  	}
1309  
1310  	return err;
1311  }
1312  
1313  struct invalidation_fence {
1314  	struct xe_gt_tlb_invalidation_fence base;
1315  	struct xe_gt *gt;
1316  	struct dma_fence *fence;
1317  	struct dma_fence_cb cb;
1318  	struct work_struct work;
1319  	u64 start;
1320  	u64 end;
1321  	u32 asid;
1322  };
1323  
invalidation_fence_cb(struct dma_fence * fence,struct dma_fence_cb * cb)1324  static void invalidation_fence_cb(struct dma_fence *fence,
1325  				  struct dma_fence_cb *cb)
1326  {
1327  	struct invalidation_fence *ifence =
1328  		container_of(cb, struct invalidation_fence, cb);
1329  	struct xe_device *xe = gt_to_xe(ifence->gt);
1330  
1331  	trace_xe_gt_tlb_invalidation_fence_cb(xe, &ifence->base);
1332  	if (!ifence->fence->error) {
1333  		queue_work(system_wq, &ifence->work);
1334  	} else {
1335  		ifence->base.base.error = ifence->fence->error;
1336  		dma_fence_signal(&ifence->base.base);
1337  		dma_fence_put(&ifence->base.base);
1338  	}
1339  	dma_fence_put(ifence->fence);
1340  }
1341  
invalidation_fence_work_func(struct work_struct * w)1342  static void invalidation_fence_work_func(struct work_struct *w)
1343  {
1344  	struct invalidation_fence *ifence =
1345  		container_of(w, struct invalidation_fence, work);
1346  	struct xe_device *xe = gt_to_xe(ifence->gt);
1347  
1348  	trace_xe_gt_tlb_invalidation_fence_work_func(xe, &ifence->base);
1349  	xe_gt_tlb_invalidation_range(ifence->gt, &ifence->base, ifence->start,
1350  				     ifence->end, ifence->asid);
1351  }
1352  
invalidation_fence_init(struct xe_gt * gt,struct invalidation_fence * ifence,struct dma_fence * fence,u64 start,u64 end,u32 asid)1353  static void invalidation_fence_init(struct xe_gt *gt,
1354  				    struct invalidation_fence *ifence,
1355  				    struct dma_fence *fence,
1356  				    u64 start, u64 end, u32 asid)
1357  {
1358  	int ret;
1359  
1360  	trace_xe_gt_tlb_invalidation_fence_create(gt_to_xe(gt), &ifence->base);
1361  
1362  	xe_gt_tlb_invalidation_fence_init(gt, &ifence->base, false);
1363  
1364  	ifence->fence = fence;
1365  	ifence->gt = gt;
1366  	ifence->start = start;
1367  	ifence->end = end;
1368  	ifence->asid = asid;
1369  
1370  	INIT_WORK(&ifence->work, invalidation_fence_work_func);
1371  	ret = dma_fence_add_callback(fence, &ifence->cb, invalidation_fence_cb);
1372  	if (ret == -ENOENT) {
1373  		dma_fence_put(ifence->fence);	/* Usually dropped in CB */
1374  		invalidation_fence_work_func(&ifence->work);
1375  	} else if (ret) {
1376  		dma_fence_put(&ifence->base.base);	/* Caller ref */
1377  		dma_fence_put(&ifence->base.base);	/* Creation ref */
1378  	}
1379  
1380  	xe_gt_assert(gt, !ret || ret == -ENOENT);
1381  }
1382  
1383  struct xe_pt_stage_unbind_walk {
1384  	/** @base: The pagewalk base-class. */
1385  	struct xe_pt_walk base;
1386  
1387  	/* Input parameters for the walk */
1388  	/** @tile: The tile we're unbinding from. */
1389  	struct xe_tile *tile;
1390  
1391  	/**
1392  	 * @modified_start: Walk range start, modified to include any
1393  	 * shared pagetables that we're the only user of and can thus
1394  	 * treat as private.
1395  	 */
1396  	u64 modified_start;
1397  	/** @modified_end: Walk range start, modified like @modified_start. */
1398  	u64 modified_end;
1399  
1400  	/* Output */
1401  	/* @wupd: Structure to track the page-table updates we're building */
1402  	struct xe_walk_update wupd;
1403  };
1404  
1405  /*
1406   * Check whether this range is the only one populating this pagetable,
1407   * and in that case, update the walk range checks so that higher levels don't
1408   * view us as a shared pagetable.
1409   */
xe_pt_check_kill(u64 addr,u64 next,unsigned int level,const struct xe_pt * child,enum page_walk_action * action,struct xe_pt_walk * walk)1410  static bool xe_pt_check_kill(u64 addr, u64 next, unsigned int level,
1411  			     const struct xe_pt *child,
1412  			     enum page_walk_action *action,
1413  			     struct xe_pt_walk *walk)
1414  {
1415  	struct xe_pt_stage_unbind_walk *xe_walk =
1416  		container_of(walk, typeof(*xe_walk), base);
1417  	unsigned int shift = walk->shifts[level];
1418  	u64 size = 1ull << shift;
1419  
1420  	if (IS_ALIGNED(addr, size) && IS_ALIGNED(next, size) &&
1421  	    ((next - addr) >> shift) == child->num_live) {
1422  		u64 size = 1ull << walk->shifts[level + 1];
1423  
1424  		*action = ACTION_CONTINUE;
1425  
1426  		if (xe_walk->modified_start >= addr)
1427  			xe_walk->modified_start = round_down(addr, size);
1428  		if (xe_walk->modified_end <= next)
1429  			xe_walk->modified_end = round_up(next, size);
1430  
1431  		return true;
1432  	}
1433  
1434  	return false;
1435  }
1436  
xe_pt_stage_unbind_entry(struct xe_ptw * parent,pgoff_t offset,unsigned int level,u64 addr,u64 next,struct xe_ptw ** child,enum page_walk_action * action,struct xe_pt_walk * walk)1437  static int xe_pt_stage_unbind_entry(struct xe_ptw *parent, pgoff_t offset,
1438  				    unsigned int level, u64 addr, u64 next,
1439  				    struct xe_ptw **child,
1440  				    enum page_walk_action *action,
1441  				    struct xe_pt_walk *walk)
1442  {
1443  	struct xe_pt *xe_child = container_of(*child, typeof(*xe_child), base);
1444  
1445  	XE_WARN_ON(!*child);
1446  	XE_WARN_ON(!level);
1447  
1448  	xe_pt_check_kill(addr, next, level - 1, xe_child, action, walk);
1449  
1450  	return 0;
1451  }
1452  
1453  static int
xe_pt_stage_unbind_post_descend(struct xe_ptw * parent,pgoff_t offset,unsigned int level,u64 addr,u64 next,struct xe_ptw ** child,enum page_walk_action * action,struct xe_pt_walk * walk)1454  xe_pt_stage_unbind_post_descend(struct xe_ptw *parent, pgoff_t offset,
1455  				unsigned int level, u64 addr, u64 next,
1456  				struct xe_ptw **child,
1457  				enum page_walk_action *action,
1458  				struct xe_pt_walk *walk)
1459  {
1460  	struct xe_pt_stage_unbind_walk *xe_walk =
1461  		container_of(walk, typeof(*xe_walk), base);
1462  	struct xe_pt *xe_child = container_of(*child, typeof(*xe_child), base);
1463  	pgoff_t end_offset;
1464  	u64 size = 1ull << walk->shifts[--level];
1465  	int err;
1466  
1467  	if (!IS_ALIGNED(addr, size))
1468  		addr = xe_walk->modified_start;
1469  	if (!IS_ALIGNED(next, size))
1470  		next = xe_walk->modified_end;
1471  
1472  	/* Parent == *child is the root pt. Don't kill it. */
1473  	if (parent != *child &&
1474  	    xe_pt_check_kill(addr, next, level, xe_child, action, walk))
1475  		return 0;
1476  
1477  	if (!xe_pt_nonshared_offsets(addr, next, level, walk, action, &offset,
1478  				     &end_offset))
1479  		return 0;
1480  
1481  	err = xe_pt_new_shared(&xe_walk->wupd, xe_child, offset, true);
1482  	if (err)
1483  		return err;
1484  
1485  	xe_walk->wupd.updates[level].update->qwords = end_offset - offset;
1486  
1487  	return 0;
1488  }
1489  
1490  static const struct xe_pt_walk_ops xe_pt_stage_unbind_ops = {
1491  	.pt_entry = xe_pt_stage_unbind_entry,
1492  	.pt_post_descend = xe_pt_stage_unbind_post_descend,
1493  };
1494  
1495  /**
1496   * xe_pt_stage_unbind() - Build page-table update structures for an unbind
1497   * operation
1498   * @tile: The tile we're unbinding for.
1499   * @vma: The vma we're unbinding.
1500   * @entries: Caller-provided storage for the update structures.
1501   *
1502   * Builds page-table update structures for an unbind operation. The function
1503   * will attempt to remove all page-tables that we're the only user
1504   * of, and for that to work, the unbind operation must be committed in the
1505   * same critical section that blocks racing binds to the same page-table tree.
1506   *
1507   * Return: The number of entries used.
1508   */
xe_pt_stage_unbind(struct xe_tile * tile,struct xe_vma * vma,struct xe_vm_pgtable_update * entries)1509  static unsigned int xe_pt_stage_unbind(struct xe_tile *tile, struct xe_vma *vma,
1510  				       struct xe_vm_pgtable_update *entries)
1511  {
1512  	struct xe_pt_stage_unbind_walk xe_walk = {
1513  		.base = {
1514  			.ops = &xe_pt_stage_unbind_ops,
1515  			.shifts = xe_normal_pt_shifts,
1516  			.max_level = XE_PT_HIGHEST_LEVEL,
1517  		},
1518  		.tile = tile,
1519  		.modified_start = xe_vma_start(vma),
1520  		.modified_end = xe_vma_end(vma),
1521  		.wupd.entries = entries,
1522  	};
1523  	struct xe_pt *pt = xe_vma_vm(vma)->pt_root[tile->id];
1524  
1525  	(void)xe_pt_walk_shared(&pt->base, pt->level, xe_vma_start(vma),
1526  				xe_vma_end(vma), &xe_walk.base);
1527  
1528  	return xe_walk.wupd.num_used_entries;
1529  }
1530  
1531  static void
xe_migrate_clear_pgtable_callback(struct xe_migrate_pt_update * pt_update,struct xe_tile * tile,struct iosys_map * map,void * ptr,u32 qword_ofs,u32 num_qwords,const struct xe_vm_pgtable_update * update)1532  xe_migrate_clear_pgtable_callback(struct xe_migrate_pt_update *pt_update,
1533  				  struct xe_tile *tile, struct iosys_map *map,
1534  				  void *ptr, u32 qword_ofs, u32 num_qwords,
1535  				  const struct xe_vm_pgtable_update *update)
1536  {
1537  	struct xe_vm *vm = pt_update->vops->vm;
1538  	u64 empty = __xe_pt_empty_pte(tile, vm, update->pt->level);
1539  	int i;
1540  
1541  	if (map && map->is_iomem)
1542  		for (i = 0; i < num_qwords; ++i)
1543  			xe_map_wr(tile_to_xe(tile), map, (qword_ofs + i) *
1544  				  sizeof(u64), u64, empty);
1545  	else if (map)
1546  		memset64(map->vaddr + qword_ofs * sizeof(u64), empty,
1547  			 num_qwords);
1548  	else
1549  		memset64(ptr, empty, num_qwords);
1550  }
1551  
xe_pt_abort_unbind(struct xe_vma * vma,struct xe_vm_pgtable_update * entries,u32 num_entries)1552  static void xe_pt_abort_unbind(struct xe_vma *vma,
1553  			       struct xe_vm_pgtable_update *entries,
1554  			       u32 num_entries)
1555  {
1556  	int i, j;
1557  
1558  	xe_pt_commit_locks_assert(vma);
1559  
1560  	for (i = num_entries - 1; i >= 0; --i) {
1561  		struct xe_vm_pgtable_update *entry = &entries[i];
1562  		struct xe_pt *pt = entry->pt;
1563  		struct xe_pt_dir *pt_dir = as_xe_pt_dir(pt);
1564  
1565  		pt->num_live += entry->qwords;
1566  
1567  		if (!pt->level)
1568  			continue;
1569  
1570  		for (j = entry->ofs; j < entry->ofs + entry->qwords; j++)
1571  			pt_dir->children[j] =
1572  				entries[i].pt_entries[j - entry->ofs].pt ?
1573  				&entries[i].pt_entries[j - entry->ofs].pt->base : NULL;
1574  	}
1575  }
1576  
1577  static void
xe_pt_commit_prepare_unbind(struct xe_vma * vma,struct xe_vm_pgtable_update * entries,u32 num_entries)1578  xe_pt_commit_prepare_unbind(struct xe_vma *vma,
1579  			    struct xe_vm_pgtable_update *entries,
1580  			    u32 num_entries)
1581  {
1582  	int i, j;
1583  
1584  	xe_pt_commit_locks_assert(vma);
1585  
1586  	for (i = 0; i < num_entries; ++i) {
1587  		struct xe_vm_pgtable_update *entry = &entries[i];
1588  		struct xe_pt *pt = entry->pt;
1589  		struct xe_pt_dir *pt_dir;
1590  
1591  		pt->num_live -= entry->qwords;
1592  		if (!pt->level)
1593  			continue;
1594  
1595  		pt_dir = as_xe_pt_dir(pt);
1596  		for (j = entry->ofs; j < entry->ofs + entry->qwords; j++) {
1597  			entry->pt_entries[j - entry->ofs].pt =
1598  				xe_pt_entry(pt_dir, j);
1599  			pt_dir->children[j] = NULL;
1600  		}
1601  	}
1602  }
1603  
1604  static void
xe_pt_update_ops_rfence_interval(struct xe_vm_pgtable_update_ops * pt_update_ops,struct xe_vma * vma)1605  xe_pt_update_ops_rfence_interval(struct xe_vm_pgtable_update_ops *pt_update_ops,
1606  				 struct xe_vma *vma)
1607  {
1608  	u32 current_op = pt_update_ops->current_op;
1609  	struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op];
1610  	int i, level = 0;
1611  	u64 start, last;
1612  
1613  	for (i = 0; i < pt_op->num_entries; i++) {
1614  		const struct xe_vm_pgtable_update *entry = &pt_op->entries[i];
1615  
1616  		if (entry->pt->level > level)
1617  			level = entry->pt->level;
1618  	}
1619  
1620  	/* Greedy (non-optimal) calculation but simple */
1621  	start = ALIGN_DOWN(xe_vma_start(vma), 0x1ull << xe_pt_shift(level));
1622  	last = ALIGN(xe_vma_end(vma), 0x1ull << xe_pt_shift(level)) - 1;
1623  
1624  	if (start < pt_update_ops->start)
1625  		pt_update_ops->start = start;
1626  	if (last > pt_update_ops->last)
1627  		pt_update_ops->last = last;
1628  }
1629  
vma_reserve_fences(struct xe_device * xe,struct xe_vma * vma)1630  static int vma_reserve_fences(struct xe_device *xe, struct xe_vma *vma)
1631  {
1632  	int shift = xe_device_get_root_tile(xe)->media_gt ? 1 : 0;
1633  
1634  	if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm)
1635  		return dma_resv_reserve_fences(xe_vma_bo(vma)->ttm.base.resv,
1636  					       xe->info.tile_count << shift);
1637  
1638  	return 0;
1639  }
1640  
bind_op_prepare(struct xe_vm * vm,struct xe_tile * tile,struct xe_vm_pgtable_update_ops * pt_update_ops,struct xe_vma * vma)1641  static int bind_op_prepare(struct xe_vm *vm, struct xe_tile *tile,
1642  			   struct xe_vm_pgtable_update_ops *pt_update_ops,
1643  			   struct xe_vma *vma)
1644  {
1645  	u32 current_op = pt_update_ops->current_op;
1646  	struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op];
1647  	int err;
1648  
1649  	xe_bo_assert_held(xe_vma_bo(vma));
1650  
1651  	vm_dbg(&xe_vma_vm(vma)->xe->drm,
1652  	       "Preparing bind, with range [%llx...%llx)\n",
1653  	       xe_vma_start(vma), xe_vma_end(vma) - 1);
1654  
1655  	pt_op->vma = NULL;
1656  	pt_op->bind = true;
1657  	pt_op->rebind = BIT(tile->id) & vma->tile_present;
1658  
1659  	err = vma_reserve_fences(tile_to_xe(tile), vma);
1660  	if (err)
1661  		return err;
1662  
1663  	err = xe_pt_prepare_bind(tile, vma, pt_op->entries,
1664  				 &pt_op->num_entries);
1665  	if (!err) {
1666  		xe_tile_assert(tile, pt_op->num_entries <=
1667  			       ARRAY_SIZE(pt_op->entries));
1668  		xe_vm_dbg_print_entries(tile_to_xe(tile), pt_op->entries,
1669  					pt_op->num_entries, true);
1670  
1671  		xe_pt_update_ops_rfence_interval(pt_update_ops, vma);
1672  		++pt_update_ops->current_op;
1673  		pt_update_ops->needs_userptr_lock |= xe_vma_is_userptr(vma);
1674  
1675  		/*
1676  		 * If rebind, we have to invalidate TLB on !LR vms to invalidate
1677  		 * cached PTEs point to freed memory. On LR vms this is done
1678  		 * automatically when the context is re-enabled by the rebind worker,
1679  		 * or in fault mode it was invalidated on PTE zapping.
1680  		 *
1681  		 * If !rebind, and scratch enabled VMs, there is a chance the scratch
1682  		 * PTE is already cached in the TLB so it needs to be invalidated.
1683  		 * On !LR VMs this is done in the ring ops preceding a batch, but on
1684  		 * non-faulting LR, in particular on user-space batch buffer chaining,
1685  		 * it needs to be done here.
1686  		 */
1687  		if ((!pt_op->rebind && xe_vm_has_scratch(vm) &&
1688  		     xe_vm_in_preempt_fence_mode(vm)))
1689  			pt_update_ops->needs_invalidation = true;
1690  		else if (pt_op->rebind && !xe_vm_in_lr_mode(vm))
1691  			/* We bump also if batch_invalidate_tlb is true */
1692  			vm->tlb_flush_seqno++;
1693  
1694  		vma->tile_staged |= BIT(tile->id);
1695  		pt_op->vma = vma;
1696  		xe_pt_commit_prepare_bind(vma, pt_op->entries,
1697  					  pt_op->num_entries, pt_op->rebind);
1698  	} else {
1699  		xe_pt_cancel_bind(vma, pt_op->entries, pt_op->num_entries);
1700  	}
1701  
1702  	return err;
1703  }
1704  
unbind_op_prepare(struct xe_tile * tile,struct xe_vm_pgtable_update_ops * pt_update_ops,struct xe_vma * vma)1705  static int unbind_op_prepare(struct xe_tile *tile,
1706  			     struct xe_vm_pgtable_update_ops *pt_update_ops,
1707  			     struct xe_vma *vma)
1708  {
1709  	u32 current_op = pt_update_ops->current_op;
1710  	struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op];
1711  	int err;
1712  
1713  	if (!((vma->tile_present | vma->tile_staged) & BIT(tile->id)))
1714  		return 0;
1715  
1716  	xe_bo_assert_held(xe_vma_bo(vma));
1717  
1718  	vm_dbg(&xe_vma_vm(vma)->xe->drm,
1719  	       "Preparing unbind, with range [%llx...%llx)\n",
1720  	       xe_vma_start(vma), xe_vma_end(vma) - 1);
1721  
1722  	/*
1723  	 * Wait for invalidation to complete. Can corrupt internal page table
1724  	 * state if an invalidation is running while preparing an unbind.
1725  	 */
1726  	if (xe_vma_is_userptr(vma) && xe_vm_in_fault_mode(xe_vma_vm(vma)))
1727  		mmu_interval_read_begin(&to_userptr_vma(vma)->userptr.notifier);
1728  
1729  	pt_op->vma = vma;
1730  	pt_op->bind = false;
1731  	pt_op->rebind = false;
1732  
1733  	err = vma_reserve_fences(tile_to_xe(tile), vma);
1734  	if (err)
1735  		return err;
1736  
1737  	pt_op->num_entries = xe_pt_stage_unbind(tile, vma, pt_op->entries);
1738  
1739  	xe_vm_dbg_print_entries(tile_to_xe(tile), pt_op->entries,
1740  				pt_op->num_entries, false);
1741  	xe_pt_update_ops_rfence_interval(pt_update_ops, vma);
1742  	++pt_update_ops->current_op;
1743  	pt_update_ops->needs_userptr_lock |= xe_vma_is_userptr(vma);
1744  	pt_update_ops->needs_invalidation = true;
1745  
1746  	xe_pt_commit_prepare_unbind(vma, pt_op->entries, pt_op->num_entries);
1747  
1748  	return 0;
1749  }
1750  
op_prepare(struct xe_vm * vm,struct xe_tile * tile,struct xe_vm_pgtable_update_ops * pt_update_ops,struct xe_vma_op * op)1751  static int op_prepare(struct xe_vm *vm,
1752  		      struct xe_tile *tile,
1753  		      struct xe_vm_pgtable_update_ops *pt_update_ops,
1754  		      struct xe_vma_op *op)
1755  {
1756  	int err = 0;
1757  
1758  	xe_vm_assert_held(vm);
1759  
1760  	switch (op->base.op) {
1761  	case DRM_GPUVA_OP_MAP:
1762  		if (!op->map.immediate && xe_vm_in_fault_mode(vm))
1763  			break;
1764  
1765  		err = bind_op_prepare(vm, tile, pt_update_ops, op->map.vma);
1766  		pt_update_ops->wait_vm_kernel = true;
1767  		break;
1768  	case DRM_GPUVA_OP_REMAP:
1769  		err = unbind_op_prepare(tile, pt_update_ops,
1770  					gpuva_to_vma(op->base.remap.unmap->va));
1771  
1772  		if (!err && op->remap.prev) {
1773  			err = bind_op_prepare(vm, tile, pt_update_ops,
1774  					      op->remap.prev);
1775  			pt_update_ops->wait_vm_bookkeep = true;
1776  		}
1777  		if (!err && op->remap.next) {
1778  			err = bind_op_prepare(vm, tile, pt_update_ops,
1779  					      op->remap.next);
1780  			pt_update_ops->wait_vm_bookkeep = true;
1781  		}
1782  		break;
1783  	case DRM_GPUVA_OP_UNMAP:
1784  		err = unbind_op_prepare(tile, pt_update_ops,
1785  					gpuva_to_vma(op->base.unmap.va));
1786  		break;
1787  	case DRM_GPUVA_OP_PREFETCH:
1788  		err = bind_op_prepare(vm, tile, pt_update_ops,
1789  				      gpuva_to_vma(op->base.prefetch.va));
1790  		pt_update_ops->wait_vm_kernel = true;
1791  		break;
1792  	default:
1793  		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
1794  	}
1795  
1796  	return err;
1797  }
1798  
1799  static void
xe_pt_update_ops_init(struct xe_vm_pgtable_update_ops * pt_update_ops)1800  xe_pt_update_ops_init(struct xe_vm_pgtable_update_ops *pt_update_ops)
1801  {
1802  	init_llist_head(&pt_update_ops->deferred);
1803  	pt_update_ops->start = ~0x0ull;
1804  	pt_update_ops->last = 0x0ull;
1805  }
1806  
1807  /**
1808   * xe_pt_update_ops_prepare() - Prepare PT update operations
1809   * @tile: Tile of PT update operations
1810   * @vops: VMA operationa
1811   *
1812   * Prepare PT update operations which includes updating internal PT state,
1813   * allocate memory for page tables, populate page table being pruned in, and
1814   * create PT update operations for leaf insertion / removal.
1815   *
1816   * Return: 0 on success, negative error code on error.
1817   */
xe_pt_update_ops_prepare(struct xe_tile * tile,struct xe_vma_ops * vops)1818  int xe_pt_update_ops_prepare(struct xe_tile *tile, struct xe_vma_ops *vops)
1819  {
1820  	struct xe_vm_pgtable_update_ops *pt_update_ops =
1821  		&vops->pt_update_ops[tile->id];
1822  	struct xe_vma_op *op;
1823  	int shift = tile->media_gt ? 1 : 0;
1824  	int err;
1825  
1826  	lockdep_assert_held(&vops->vm->lock);
1827  	xe_vm_assert_held(vops->vm);
1828  
1829  	xe_pt_update_ops_init(pt_update_ops);
1830  
1831  	err = dma_resv_reserve_fences(xe_vm_resv(vops->vm),
1832  				      tile_to_xe(tile)->info.tile_count << shift);
1833  	if (err)
1834  		return err;
1835  
1836  	list_for_each_entry(op, &vops->list, link) {
1837  		err = op_prepare(vops->vm, tile, pt_update_ops, op);
1838  
1839  		if (err)
1840  			return err;
1841  	}
1842  
1843  	xe_tile_assert(tile, pt_update_ops->current_op <=
1844  		       pt_update_ops->num_ops);
1845  
1846  #ifdef TEST_VM_OPS_ERROR
1847  	if (vops->inject_error &&
1848  	    vops->vm->xe->vm_inject_error_position == FORCE_OP_ERROR_PREPARE)
1849  		return -ENOSPC;
1850  #endif
1851  
1852  	return 0;
1853  }
1854  
bind_op_commit(struct xe_vm * vm,struct xe_tile * tile,struct xe_vm_pgtable_update_ops * pt_update_ops,struct xe_vma * vma,struct dma_fence * fence,struct dma_fence * fence2)1855  static void bind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
1856  			   struct xe_vm_pgtable_update_ops *pt_update_ops,
1857  			   struct xe_vma *vma, struct dma_fence *fence,
1858  			   struct dma_fence *fence2)
1859  {
1860  	if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm) {
1861  		dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence,
1862  				   pt_update_ops->wait_vm_bookkeep ?
1863  				   DMA_RESV_USAGE_KERNEL :
1864  				   DMA_RESV_USAGE_BOOKKEEP);
1865  		if (fence2)
1866  			dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence2,
1867  					   pt_update_ops->wait_vm_bookkeep ?
1868  					   DMA_RESV_USAGE_KERNEL :
1869  					   DMA_RESV_USAGE_BOOKKEEP);
1870  	}
1871  	vma->tile_present |= BIT(tile->id);
1872  	vma->tile_staged &= ~BIT(tile->id);
1873  	if (xe_vma_is_userptr(vma)) {
1874  		lockdep_assert_held_read(&vm->userptr.notifier_lock);
1875  		to_userptr_vma(vma)->userptr.initial_bind = true;
1876  	}
1877  
1878  	/*
1879  	 * Kick rebind worker if this bind triggers preempt fences and not in
1880  	 * the rebind worker
1881  	 */
1882  	if (pt_update_ops->wait_vm_bookkeep &&
1883  	    xe_vm_in_preempt_fence_mode(vm) &&
1884  	    !current->mm)
1885  		xe_vm_queue_rebind_worker(vm);
1886  }
1887  
unbind_op_commit(struct xe_vm * vm,struct xe_tile * tile,struct xe_vm_pgtable_update_ops * pt_update_ops,struct xe_vma * vma,struct dma_fence * fence,struct dma_fence * fence2)1888  static void unbind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
1889  			     struct xe_vm_pgtable_update_ops *pt_update_ops,
1890  			     struct xe_vma *vma, struct dma_fence *fence,
1891  			     struct dma_fence *fence2)
1892  {
1893  	if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm) {
1894  		dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence,
1895  				   pt_update_ops->wait_vm_bookkeep ?
1896  				   DMA_RESV_USAGE_KERNEL :
1897  				   DMA_RESV_USAGE_BOOKKEEP);
1898  		if (fence2)
1899  			dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence2,
1900  					   pt_update_ops->wait_vm_bookkeep ?
1901  					   DMA_RESV_USAGE_KERNEL :
1902  					   DMA_RESV_USAGE_BOOKKEEP);
1903  	}
1904  	vma->tile_present &= ~BIT(tile->id);
1905  	if (!vma->tile_present) {
1906  		list_del_init(&vma->combined_links.rebind);
1907  		if (xe_vma_is_userptr(vma)) {
1908  			lockdep_assert_held_read(&vm->userptr.notifier_lock);
1909  
1910  			spin_lock(&vm->userptr.invalidated_lock);
1911  			list_del_init(&to_userptr_vma(vma)->userptr.invalidate_link);
1912  			spin_unlock(&vm->userptr.invalidated_lock);
1913  		}
1914  	}
1915  }
1916  
op_commit(struct xe_vm * vm,struct xe_tile * tile,struct xe_vm_pgtable_update_ops * pt_update_ops,struct xe_vma_op * op,struct dma_fence * fence,struct dma_fence * fence2)1917  static void op_commit(struct xe_vm *vm,
1918  		      struct xe_tile *tile,
1919  		      struct xe_vm_pgtable_update_ops *pt_update_ops,
1920  		      struct xe_vma_op *op, struct dma_fence *fence,
1921  		      struct dma_fence *fence2)
1922  {
1923  	xe_vm_assert_held(vm);
1924  
1925  	switch (op->base.op) {
1926  	case DRM_GPUVA_OP_MAP:
1927  		if (!op->map.immediate && xe_vm_in_fault_mode(vm))
1928  			break;
1929  
1930  		bind_op_commit(vm, tile, pt_update_ops, op->map.vma, fence,
1931  			       fence2);
1932  		break;
1933  	case DRM_GPUVA_OP_REMAP:
1934  		unbind_op_commit(vm, tile, pt_update_ops,
1935  				 gpuva_to_vma(op->base.remap.unmap->va), fence,
1936  				 fence2);
1937  
1938  		if (op->remap.prev)
1939  			bind_op_commit(vm, tile, pt_update_ops, op->remap.prev,
1940  				       fence, fence2);
1941  		if (op->remap.next)
1942  			bind_op_commit(vm, tile, pt_update_ops, op->remap.next,
1943  				       fence, fence2);
1944  		break;
1945  	case DRM_GPUVA_OP_UNMAP:
1946  		unbind_op_commit(vm, tile, pt_update_ops,
1947  				 gpuva_to_vma(op->base.unmap.va), fence, fence2);
1948  		break;
1949  	case DRM_GPUVA_OP_PREFETCH:
1950  		bind_op_commit(vm, tile, pt_update_ops,
1951  			       gpuva_to_vma(op->base.prefetch.va), fence, fence2);
1952  		break;
1953  	default:
1954  		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
1955  	}
1956  }
1957  
1958  static const struct xe_migrate_pt_update_ops migrate_ops = {
1959  	.populate = xe_vm_populate_pgtable,
1960  	.clear = xe_migrate_clear_pgtable_callback,
1961  	.pre_commit = xe_pt_pre_commit,
1962  };
1963  
1964  static const struct xe_migrate_pt_update_ops userptr_migrate_ops = {
1965  	.populate = xe_vm_populate_pgtable,
1966  	.clear = xe_migrate_clear_pgtable_callback,
1967  	.pre_commit = xe_pt_userptr_pre_commit,
1968  };
1969  
1970  /**
1971   * xe_pt_update_ops_run() - Run PT update operations
1972   * @tile: Tile of PT update operations
1973   * @vops: VMA operationa
1974   *
1975   * Run PT update operations which includes committing internal PT state changes,
1976   * creating job for PT update operations for leaf insertion / removal, and
1977   * installing job fence in various places.
1978   *
1979   * Return: fence on success, negative ERR_PTR on error.
1980   */
1981  struct dma_fence *
xe_pt_update_ops_run(struct xe_tile * tile,struct xe_vma_ops * vops)1982  xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops)
1983  {
1984  	struct xe_vm *vm = vops->vm;
1985  	struct xe_vm_pgtable_update_ops *pt_update_ops =
1986  		&vops->pt_update_ops[tile->id];
1987  	struct dma_fence *fence;
1988  	struct invalidation_fence *ifence = NULL, *mfence = NULL;
1989  	struct dma_fence **fences = NULL;
1990  	struct dma_fence_array *cf = NULL;
1991  	struct xe_range_fence *rfence;
1992  	struct xe_vma_op *op;
1993  	int err = 0, i;
1994  	struct xe_migrate_pt_update update = {
1995  		.ops = pt_update_ops->needs_userptr_lock ?
1996  			&userptr_migrate_ops :
1997  			&migrate_ops,
1998  		.vops = vops,
1999  		.tile_id = tile->id,
2000  	};
2001  
2002  	lockdep_assert_held(&vm->lock);
2003  	xe_vm_assert_held(vm);
2004  
2005  	if (!pt_update_ops->current_op) {
2006  		xe_tile_assert(tile, xe_vm_in_fault_mode(vm));
2007  
2008  		return dma_fence_get_stub();
2009  	}
2010  
2011  #ifdef TEST_VM_OPS_ERROR
2012  	if (vops->inject_error &&
2013  	    vm->xe->vm_inject_error_position == FORCE_OP_ERROR_RUN)
2014  		return ERR_PTR(-ENOSPC);
2015  #endif
2016  
2017  	if (pt_update_ops->needs_invalidation) {
2018  		ifence = kzalloc(sizeof(*ifence), GFP_KERNEL);
2019  		if (!ifence) {
2020  			err = -ENOMEM;
2021  			goto kill_vm_tile1;
2022  		}
2023  		if (tile->media_gt) {
2024  			mfence = kzalloc(sizeof(*ifence), GFP_KERNEL);
2025  			if (!mfence) {
2026  				err = -ENOMEM;
2027  				goto free_ifence;
2028  			}
2029  			fences = kmalloc_array(2, sizeof(*fences), GFP_KERNEL);
2030  			if (!fences) {
2031  				err = -ENOMEM;
2032  				goto free_ifence;
2033  			}
2034  			cf = dma_fence_array_alloc(2);
2035  			if (!cf) {
2036  				err = -ENOMEM;
2037  				goto free_ifence;
2038  			}
2039  		}
2040  	}
2041  
2042  	rfence = kzalloc(sizeof(*rfence), GFP_KERNEL);
2043  	if (!rfence) {
2044  		err = -ENOMEM;
2045  		goto free_ifence;
2046  	}
2047  
2048  	fence = xe_migrate_update_pgtables(tile->migrate, &update);
2049  	if (IS_ERR(fence)) {
2050  		err = PTR_ERR(fence);
2051  		goto free_rfence;
2052  	}
2053  
2054  	/* Point of no return - VM killed if failure after this */
2055  	for (i = 0; i < pt_update_ops->current_op; ++i) {
2056  		struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[i];
2057  
2058  		xe_pt_commit(pt_op->vma, pt_op->entries,
2059  			     pt_op->num_entries, &pt_update_ops->deferred);
2060  		pt_op->vma = NULL;	/* skip in xe_pt_update_ops_abort */
2061  	}
2062  
2063  	if (xe_range_fence_insert(&vm->rftree[tile->id], rfence,
2064  				  &xe_range_fence_kfree_ops,
2065  				  pt_update_ops->start,
2066  				  pt_update_ops->last, fence))
2067  		dma_fence_wait(fence, false);
2068  
2069  	/* tlb invalidation must be done before signaling rebind */
2070  	if (ifence) {
2071  		if (mfence)
2072  			dma_fence_get(fence);
2073  		invalidation_fence_init(tile->primary_gt, ifence, fence,
2074  					pt_update_ops->start,
2075  					pt_update_ops->last, vm->usm.asid);
2076  		if (mfence) {
2077  			invalidation_fence_init(tile->media_gt, mfence, fence,
2078  						pt_update_ops->start,
2079  						pt_update_ops->last, vm->usm.asid);
2080  			fences[0] = &ifence->base.base;
2081  			fences[1] = &mfence->base.base;
2082  			dma_fence_array_init(cf, 2, fences,
2083  					     vm->composite_fence_ctx,
2084  					     vm->composite_fence_seqno++,
2085  					     false);
2086  			fence = &cf->base;
2087  		} else {
2088  			fence = &ifence->base.base;
2089  		}
2090  	}
2091  
2092  	if (!mfence) {
2093  		dma_resv_add_fence(xe_vm_resv(vm), fence,
2094  				   pt_update_ops->wait_vm_bookkeep ?
2095  				   DMA_RESV_USAGE_KERNEL :
2096  				   DMA_RESV_USAGE_BOOKKEEP);
2097  
2098  		list_for_each_entry(op, &vops->list, link)
2099  			op_commit(vops->vm, tile, pt_update_ops, op, fence, NULL);
2100  	} else {
2101  		dma_resv_add_fence(xe_vm_resv(vm), &ifence->base.base,
2102  				   pt_update_ops->wait_vm_bookkeep ?
2103  				   DMA_RESV_USAGE_KERNEL :
2104  				   DMA_RESV_USAGE_BOOKKEEP);
2105  
2106  		dma_resv_add_fence(xe_vm_resv(vm), &mfence->base.base,
2107  				   pt_update_ops->wait_vm_bookkeep ?
2108  				   DMA_RESV_USAGE_KERNEL :
2109  				   DMA_RESV_USAGE_BOOKKEEP);
2110  
2111  		list_for_each_entry(op, &vops->list, link)
2112  			op_commit(vops->vm, tile, pt_update_ops, op,
2113  				  &ifence->base.base, &mfence->base.base);
2114  	}
2115  
2116  	if (pt_update_ops->needs_userptr_lock)
2117  		up_read(&vm->userptr.notifier_lock);
2118  
2119  	return fence;
2120  
2121  free_rfence:
2122  	kfree(rfence);
2123  free_ifence:
2124  	kfree(cf);
2125  	kfree(fences);
2126  	kfree(mfence);
2127  	kfree(ifence);
2128  kill_vm_tile1:
2129  	if (err != -EAGAIN && tile->id)
2130  		xe_vm_kill(vops->vm, false);
2131  
2132  	return ERR_PTR(err);
2133  }
2134  
2135  /**
2136   * xe_pt_update_ops_fini() - Finish PT update operations
2137   * @tile: Tile of PT update operations
2138   * @vops: VMA operations
2139   *
2140   * Finish PT update operations by committing to destroy page table memory
2141   */
xe_pt_update_ops_fini(struct xe_tile * tile,struct xe_vma_ops * vops)2142  void xe_pt_update_ops_fini(struct xe_tile *tile, struct xe_vma_ops *vops)
2143  {
2144  	struct xe_vm_pgtable_update_ops *pt_update_ops =
2145  		&vops->pt_update_ops[tile->id];
2146  	int i;
2147  
2148  	lockdep_assert_held(&vops->vm->lock);
2149  	xe_vm_assert_held(vops->vm);
2150  
2151  	for (i = 0; i < pt_update_ops->current_op; ++i) {
2152  		struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[i];
2153  
2154  		xe_pt_free_bind(pt_op->entries, pt_op->num_entries);
2155  	}
2156  	xe_bo_put_commit(&vops->pt_update_ops[tile->id].deferred);
2157  }
2158  
2159  /**
2160   * xe_pt_update_ops_abort() - Abort PT update operations
2161   * @tile: Tile of PT update operations
2162   * @vops: VMA operationa
2163   *
2164   *  Abort PT update operations by unwinding internal PT state
2165   */
xe_pt_update_ops_abort(struct xe_tile * tile,struct xe_vma_ops * vops)2166  void xe_pt_update_ops_abort(struct xe_tile *tile, struct xe_vma_ops *vops)
2167  {
2168  	struct xe_vm_pgtable_update_ops *pt_update_ops =
2169  		&vops->pt_update_ops[tile->id];
2170  	int i;
2171  
2172  	lockdep_assert_held(&vops->vm->lock);
2173  	xe_vm_assert_held(vops->vm);
2174  
2175  	for (i = pt_update_ops->num_ops - 1; i >= 0; --i) {
2176  		struct xe_vm_pgtable_update_op *pt_op =
2177  			&pt_update_ops->ops[i];
2178  
2179  		if (!pt_op->vma || i >= pt_update_ops->current_op)
2180  			continue;
2181  
2182  		if (pt_op->bind)
2183  			xe_pt_abort_bind(pt_op->vma, pt_op->entries,
2184  					 pt_op->num_entries,
2185  					 pt_op->rebind);
2186  		else
2187  			xe_pt_abort_unbind(pt_op->vma, pt_op->entries,
2188  					   pt_op->num_entries);
2189  	}
2190  
2191  	xe_pt_update_ops_fini(tile, vops);
2192  }
2193