1  // SPDX-License-Identifier: GPL-2.0-only OR MIT
2  /*
3   * Copyright (c) 2022 Red Hat.
4   *
5   * Permission is hereby granted, free of charge, to any person obtaining a
6   * copy of this software and associated documentation files (the "Software"),
7   * to deal in the Software without restriction, including without limitation
8   * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9   * and/or sell copies of the Software, and to permit persons to whom the
10   * Software is furnished to do so, subject to the following conditions:
11   *
12   * The above copyright notice and this permission notice shall be included in
13   * all copies or substantial portions of the Software.
14   *
15   * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16   * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17   * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18   * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19   * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20   * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21   * OTHER DEALINGS IN THE SOFTWARE.
22   *
23   * Authors:
24   *     Danilo Krummrich <dakr@redhat.com>
25   *
26   */
27  
28  #include <drm/drm_gpuvm.h>
29  
30  #include <linux/interval_tree_generic.h>
31  #include <linux/mm.h>
32  
33  /**
34   * DOC: Overview
35   *
36   * The DRM GPU VA Manager, represented by struct drm_gpuvm keeps track of a
37   * GPU's virtual address (VA) space and manages the corresponding virtual
38   * mappings represented by &drm_gpuva objects. It also keeps track of the
39   * mapping's backing &drm_gem_object buffers.
40   *
41   * &drm_gem_object buffers maintain a list of &drm_gpuva objects representing
42   * all existent GPU VA mappings using this &drm_gem_object as backing buffer.
43   *
44   * GPU VAs can be flagged as sparse, such that drivers may use GPU VAs to also
45   * keep track of sparse PTEs in order to support Vulkan 'Sparse Resources'.
46   *
47   * The GPU VA manager internally uses a rb-tree to manage the
48   * &drm_gpuva mappings within a GPU's virtual address space.
49   *
50   * The &drm_gpuvm structure contains a special &drm_gpuva representing the
51   * portion of VA space reserved by the kernel. This node is initialized together
52   * with the GPU VA manager instance and removed when the GPU VA manager is
53   * destroyed.
54   *
55   * In a typical application drivers would embed struct drm_gpuvm and
56   * struct drm_gpuva within their own driver specific structures, there won't be
57   * any memory allocations of its own nor memory allocations of &drm_gpuva
58   * entries.
59   *
60   * The data structures needed to store &drm_gpuvas within the &drm_gpuvm are
61   * contained within struct drm_gpuva already. Hence, for inserting &drm_gpuva
62   * entries from within dma-fence signalling critical sections it is enough to
63   * pre-allocate the &drm_gpuva structures.
64   *
65   * &drm_gem_objects which are private to a single VM can share a common
66   * &dma_resv in order to improve locking efficiency (e.g. with &drm_exec).
67   * For this purpose drivers must pass a &drm_gem_object to drm_gpuvm_init(), in
68   * the following called 'resv object', which serves as the container of the
69   * GPUVM's shared &dma_resv. This resv object can be a driver specific
70   * &drm_gem_object, such as the &drm_gem_object containing the root page table,
71   * but it can also be a 'dummy' object, which can be allocated with
72   * drm_gpuvm_resv_object_alloc().
73   *
74   * In order to connect a struct drm_gpuva its backing &drm_gem_object each
75   * &drm_gem_object maintains a list of &drm_gpuvm_bo structures, and each
76   * &drm_gpuvm_bo contains a list of &drm_gpuva structures.
77   *
78   * A &drm_gpuvm_bo is an abstraction that represents a combination of a
79   * &drm_gpuvm and a &drm_gem_object. Every such combination should be unique.
80   * This is ensured by the API through drm_gpuvm_bo_obtain() and
81   * drm_gpuvm_bo_obtain_prealloc() which first look into the corresponding
82   * &drm_gem_object list of &drm_gpuvm_bos for an existing instance of this
83   * particular combination. If not existent a new instance is created and linked
84   * to the &drm_gem_object.
85   *
86   * &drm_gpuvm_bo structures, since unique for a given &drm_gpuvm, are also used
87   * as entry for the &drm_gpuvm's lists of external and evicted objects. Those
88   * lists are maintained in order to accelerate locking of dma-resv locks and
89   * validation of evicted objects bound in a &drm_gpuvm. For instance, all
90   * &drm_gem_object's &dma_resv of a given &drm_gpuvm can be locked by calling
91   * drm_gpuvm_exec_lock(). Once locked drivers can call drm_gpuvm_validate() in
92   * order to validate all evicted &drm_gem_objects. It is also possible to lock
93   * additional &drm_gem_objects by providing the corresponding parameters to
94   * drm_gpuvm_exec_lock() as well as open code the &drm_exec loop while making
95   * use of helper functions such as drm_gpuvm_prepare_range() or
96   * drm_gpuvm_prepare_objects().
97   *
98   * Every bound &drm_gem_object is treated as external object when its &dma_resv
99   * structure is different than the &drm_gpuvm's common &dma_resv structure.
100   */
101  
102  /**
103   * DOC: Split and Merge
104   *
105   * Besides its capability to manage and represent a GPU VA space, the
106   * GPU VA manager also provides functions to let the &drm_gpuvm calculate a
107   * sequence of operations to satisfy a given map or unmap request.
108   *
109   * Therefore the DRM GPU VA manager provides an algorithm implementing splitting
110   * and merging of existent GPU VA mappings with the ones that are requested to
111   * be mapped or unmapped. This feature is required by the Vulkan API to
112   * implement Vulkan 'Sparse Memory Bindings' - drivers UAPIs often refer to this
113   * as VM BIND.
114   *
115   * Drivers can call drm_gpuvm_sm_map() to receive a sequence of callbacks
116   * containing map, unmap and remap operations for a given newly requested
117   * mapping. The sequence of callbacks represents the set of operations to
118   * execute in order to integrate the new mapping cleanly into the current state
119   * of the GPU VA space.
120   *
121   * Depending on how the new GPU VA mapping intersects with the existent mappings
122   * of the GPU VA space the &drm_gpuvm_ops callbacks contain an arbitrary amount
123   * of unmap operations, a maximum of two remap operations and a single map
124   * operation. The caller might receive no callback at all if no operation is
125   * required, e.g. if the requested mapping already exists in the exact same way.
126   *
127   * The single map operation represents the original map operation requested by
128   * the caller.
129   *
130   * &drm_gpuva_op_unmap contains a 'keep' field, which indicates whether the
131   * &drm_gpuva to unmap is physically contiguous with the original mapping
132   * request. Optionally, if 'keep' is set, drivers may keep the actual page table
133   * entries for this &drm_gpuva, adding the missing page table entries only and
134   * update the &drm_gpuvm's view of things accordingly.
135   *
136   * Drivers may do the same optimization, namely delta page table updates, also
137   * for remap operations. This is possible since &drm_gpuva_op_remap consists of
138   * one unmap operation and one or two map operations, such that drivers can
139   * derive the page table update delta accordingly.
140   *
141   * Note that there can't be more than two existent mappings to split up, one at
142   * the beginning and one at the end of the new mapping, hence there is a
143   * maximum of two remap operations.
144   *
145   * Analogous to drm_gpuvm_sm_map() drm_gpuvm_sm_unmap() uses &drm_gpuvm_ops to
146   * call back into the driver in order to unmap a range of GPU VA space. The
147   * logic behind this function is way simpler though: For all existent mappings
148   * enclosed by the given range unmap operations are created. For mappings which
149   * are only partically located within the given range, remap operations are
150   * created such that those mappings are split up and re-mapped partically.
151   *
152   * As an alternative to drm_gpuvm_sm_map() and drm_gpuvm_sm_unmap(),
153   * drm_gpuvm_sm_map_ops_create() and drm_gpuvm_sm_unmap_ops_create() can be used
154   * to directly obtain an instance of struct drm_gpuva_ops containing a list of
155   * &drm_gpuva_op, which can be iterated with drm_gpuva_for_each_op(). This list
156   * contains the &drm_gpuva_ops analogous to the callbacks one would receive when
157   * calling drm_gpuvm_sm_map() or drm_gpuvm_sm_unmap(). While this way requires
158   * more memory (to allocate the &drm_gpuva_ops), it provides drivers a way to
159   * iterate the &drm_gpuva_op multiple times, e.g. once in a context where memory
160   * allocations are possible (e.g. to allocate GPU page tables) and once in the
161   * dma-fence signalling critical path.
162   *
163   * To update the &drm_gpuvm's view of the GPU VA space drm_gpuva_insert() and
164   * drm_gpuva_remove() may be used. These functions can safely be used from
165   * &drm_gpuvm_ops callbacks originating from drm_gpuvm_sm_map() or
166   * drm_gpuvm_sm_unmap(). However, it might be more convenient to use the
167   * provided helper functions drm_gpuva_map(), drm_gpuva_remap() and
168   * drm_gpuva_unmap() instead.
169   *
170   * The following diagram depicts the basic relationships of existent GPU VA
171   * mappings, a newly requested mapping and the resulting mappings as implemented
172   * by drm_gpuvm_sm_map() - it doesn't cover any arbitrary combinations of these.
173   *
174   * 1) Requested mapping is identical. Replace it, but indicate the backing PTEs
175   *    could be kept.
176   *
177   *    ::
178   *
179   *	     0     a     1
180   *	old: |-----------| (bo_offset=n)
181   *
182   *	     0     a     1
183   *	req: |-----------| (bo_offset=n)
184   *
185   *	     0     a     1
186   *	new: |-----------| (bo_offset=n)
187   *
188   *
189   * 2) Requested mapping is identical, except for the BO offset, hence replace
190   *    the mapping.
191   *
192   *    ::
193   *
194   *	     0     a     1
195   *	old: |-----------| (bo_offset=n)
196   *
197   *	     0     a     1
198   *	req: |-----------| (bo_offset=m)
199   *
200   *	     0     a     1
201   *	new: |-----------| (bo_offset=m)
202   *
203   *
204   * 3) Requested mapping is identical, except for the backing BO, hence replace
205   *    the mapping.
206   *
207   *    ::
208   *
209   *	     0     a     1
210   *	old: |-----------| (bo_offset=n)
211   *
212   *	     0     b     1
213   *	req: |-----------| (bo_offset=n)
214   *
215   *	     0     b     1
216   *	new: |-----------| (bo_offset=n)
217   *
218   *
219   * 4) Existent mapping is a left aligned subset of the requested one, hence
220   *    replace the existent one.
221   *
222   *    ::
223   *
224   *	     0  a  1
225   *	old: |-----|       (bo_offset=n)
226   *
227   *	     0     a     2
228   *	req: |-----------| (bo_offset=n)
229   *
230   *	     0     a     2
231   *	new: |-----------| (bo_offset=n)
232   *
233   *    .. note::
234   *       We expect to see the same result for a request with a different BO
235   *       and/or non-contiguous BO offset.
236   *
237   *
238   * 5) Requested mapping's range is a left aligned subset of the existent one,
239   *    but backed by a different BO. Hence, map the requested mapping and split
240   *    the existent one adjusting its BO offset.
241   *
242   *    ::
243   *
244   *	     0     a     2
245   *	old: |-----------| (bo_offset=n)
246   *
247   *	     0  b  1
248   *	req: |-----|       (bo_offset=n)
249   *
250   *	     0  b  1  a' 2
251   *	new: |-----|-----| (b.bo_offset=n, a.bo_offset=n+1)
252   *
253   *    .. note::
254   *       We expect to see the same result for a request with a different BO
255   *       and/or non-contiguous BO offset.
256   *
257   *
258   * 6) Existent mapping is a superset of the requested mapping. Split it up, but
259   *    indicate that the backing PTEs could be kept.
260   *
261   *    ::
262   *
263   *	     0     a     2
264   *	old: |-----------| (bo_offset=n)
265   *
266   *	     0  a  1
267   *	req: |-----|       (bo_offset=n)
268   *
269   *	     0  a  1  a' 2
270   *	new: |-----|-----| (a.bo_offset=n, a'.bo_offset=n+1)
271   *
272   *
273   * 7) Requested mapping's range is a right aligned subset of the existent one,
274   *    but backed by a different BO. Hence, map the requested mapping and split
275   *    the existent one, without adjusting the BO offset.
276   *
277   *    ::
278   *
279   *	     0     a     2
280   *	old: |-----------| (bo_offset=n)
281   *
282   *	           1  b  2
283   *	req:       |-----| (bo_offset=m)
284   *
285   *	     0  a  1  b  2
286   *	new: |-----|-----| (a.bo_offset=n,b.bo_offset=m)
287   *
288   *
289   * 8) Existent mapping is a superset of the requested mapping. Split it up, but
290   *    indicate that the backing PTEs could be kept.
291   *
292   *    ::
293   *
294   *	      0     a     2
295   *	old: |-----------| (bo_offset=n)
296   *
297   *	           1  a  2
298   *	req:       |-----| (bo_offset=n+1)
299   *
300   *	     0  a' 1  a  2
301   *	new: |-----|-----| (a'.bo_offset=n, a.bo_offset=n+1)
302   *
303   *
304   * 9) Existent mapping is overlapped at the end by the requested mapping backed
305   *    by a different BO. Hence, map the requested mapping and split up the
306   *    existent one, without adjusting the BO offset.
307   *
308   *    ::
309   *
310   *	     0     a     2
311   *	old: |-----------|       (bo_offset=n)
312   *
313   *	           1     b     3
314   *	req:       |-----------| (bo_offset=m)
315   *
316   *	     0  a  1     b     3
317   *	new: |-----|-----------| (a.bo_offset=n,b.bo_offset=m)
318   *
319   *
320   * 10) Existent mapping is overlapped by the requested mapping, both having the
321   *     same backing BO with a contiguous offset. Indicate the backing PTEs of
322   *     the old mapping could be kept.
323   *
324   *     ::
325   *
326   *	      0     a     2
327   *	 old: |-----------|       (bo_offset=n)
328   *
329   *	            1     a     3
330   *	 req:       |-----------| (bo_offset=n+1)
331   *
332   *	      0  a' 1     a     3
333   *	 new: |-----|-----------| (a'.bo_offset=n, a.bo_offset=n+1)
334   *
335   *
336   * 11) Requested mapping's range is a centered subset of the existent one
337   *     having a different backing BO. Hence, map the requested mapping and split
338   *     up the existent one in two mappings, adjusting the BO offset of the right
339   *     one accordingly.
340   *
341   *     ::
342   *
343   *	      0        a        3
344   *	 old: |-----------------| (bo_offset=n)
345   *
346   *	            1  b  2
347   *	 req:       |-----|       (bo_offset=m)
348   *
349   *	      0  a  1  b  2  a' 3
350   *	 new: |-----|-----|-----| (a.bo_offset=n,b.bo_offset=m,a'.bo_offset=n+2)
351   *
352   *
353   * 12) Requested mapping is a contiguous subset of the existent one. Split it
354   *     up, but indicate that the backing PTEs could be kept.
355   *
356   *     ::
357   *
358   *	      0        a        3
359   *	 old: |-----------------| (bo_offset=n)
360   *
361   *	            1  a  2
362   *	 req:       |-----|       (bo_offset=n+1)
363   *
364   *	      0  a' 1  a  2 a'' 3
365   *	 old: |-----|-----|-----| (a'.bo_offset=n, a.bo_offset=n+1, a''.bo_offset=n+2)
366   *
367   *
368   * 13) Existent mapping is a right aligned subset of the requested one, hence
369   *     replace the existent one.
370   *
371   *     ::
372   *
373   *	            1  a  2
374   *	 old:       |-----| (bo_offset=n+1)
375   *
376   *	      0     a     2
377   *	 req: |-----------| (bo_offset=n)
378   *
379   *	      0     a     2
380   *	 new: |-----------| (bo_offset=n)
381   *
382   *     .. note::
383   *        We expect to see the same result for a request with a different bo
384   *        and/or non-contiguous bo_offset.
385   *
386   *
387   * 14) Existent mapping is a centered subset of the requested one, hence
388   *     replace the existent one.
389   *
390   *     ::
391   *
392   *	            1  a  2
393   *	 old:       |-----| (bo_offset=n+1)
394   *
395   *	      0        a       3
396   *	 req: |----------------| (bo_offset=n)
397   *
398   *	      0        a       3
399   *	 new: |----------------| (bo_offset=n)
400   *
401   *     .. note::
402   *        We expect to see the same result for a request with a different bo
403   *        and/or non-contiguous bo_offset.
404   *
405   *
406   * 15) Existent mappings is overlapped at the beginning by the requested mapping
407   *     backed by a different BO. Hence, map the requested mapping and split up
408   *     the existent one, adjusting its BO offset accordingly.
409   *
410   *     ::
411   *
412   *	            1     a     3
413   *	 old:       |-----------| (bo_offset=n)
414   *
415   *	      0     b     2
416   *	 req: |-----------|       (bo_offset=m)
417   *
418   *	      0     b     2  a' 3
419   *	 new: |-----------|-----| (b.bo_offset=m,a.bo_offset=n+2)
420   */
421  
422  /**
423   * DOC: Locking
424   *
425   * In terms of managing &drm_gpuva entries DRM GPUVM does not take care of
426   * locking itself, it is the drivers responsibility to take care about locking.
427   * Drivers might want to protect the following operations: inserting, removing
428   * and iterating &drm_gpuva objects as well as generating all kinds of
429   * operations, such as split / merge or prefetch.
430   *
431   * DRM GPUVM also does not take care of the locking of the backing
432   * &drm_gem_object buffers GPU VA lists and &drm_gpuvm_bo abstractions by
433   * itself; drivers are responsible to enforce mutual exclusion using either the
434   * GEMs dma_resv lock or alternatively a driver specific external lock. For the
435   * latter see also drm_gem_gpuva_set_lock().
436   *
437   * However, DRM GPUVM contains lockdep checks to ensure callers of its API hold
438   * the corresponding lock whenever the &drm_gem_objects GPU VA list is accessed
439   * by functions such as drm_gpuva_link() or drm_gpuva_unlink(), but also
440   * drm_gpuvm_bo_obtain() and drm_gpuvm_bo_put().
441   *
442   * The latter is required since on creation and destruction of a &drm_gpuvm_bo
443   * the &drm_gpuvm_bo is attached / removed from the &drm_gem_objects gpuva list.
444   * Subsequent calls to drm_gpuvm_bo_obtain() for the same &drm_gpuvm and
445   * &drm_gem_object must be able to observe previous creations and destructions
446   * of &drm_gpuvm_bos in order to keep instances unique.
447   *
448   * The &drm_gpuvm's lists for keeping track of external and evicted objects are
449   * protected against concurrent insertion / removal and iteration internally.
450   *
451   * However, drivers still need ensure to protect concurrent calls to functions
452   * iterating those lists, namely drm_gpuvm_prepare_objects() and
453   * drm_gpuvm_validate().
454   *
455   * Alternatively, drivers can set the &DRM_GPUVM_RESV_PROTECTED flag to indicate
456   * that the corresponding &dma_resv locks are held in order to protect the
457   * lists. If &DRM_GPUVM_RESV_PROTECTED is set, internal locking is disabled and
458   * the corresponding lockdep checks are enabled. This is an optimization for
459   * drivers which are capable of taking the corresponding &dma_resv locks and
460   * hence do not require internal locking.
461   */
462  
463  /**
464   * DOC: Examples
465   *
466   * This section gives two examples on how to let the DRM GPUVA Manager generate
467   * &drm_gpuva_op in order to satisfy a given map or unmap request and how to
468   * make use of them.
469   *
470   * The below code is strictly limited to illustrate the generic usage pattern.
471   * To maintain simplicitly, it doesn't make use of any abstractions for common
472   * code, different (asyncronous) stages with fence signalling critical paths,
473   * any other helpers or error handling in terms of freeing memory and dropping
474   * previously taken locks.
475   *
476   * 1) Obtain a list of &drm_gpuva_op to create a new mapping::
477   *
478   *	// Allocates a new &drm_gpuva.
479   *	struct drm_gpuva * driver_gpuva_alloc(void);
480   *
481   *	// Typically drivers would embedd the &drm_gpuvm and &drm_gpuva
482   *	// structure in individual driver structures and lock the dma-resv with
483   *	// drm_exec or similar helpers.
484   *	int driver_mapping_create(struct drm_gpuvm *gpuvm,
485   *				  u64 addr, u64 range,
486   *				  struct drm_gem_object *obj, u64 offset)
487   *	{
488   *		struct drm_gpuva_ops *ops;
489   *		struct drm_gpuva_op *op
490   *		struct drm_gpuvm_bo *vm_bo;
491   *
492   *		driver_lock_va_space();
493   *		ops = drm_gpuvm_sm_map_ops_create(gpuvm, addr, range,
494   *						  obj, offset);
495   *		if (IS_ERR(ops))
496   *			return PTR_ERR(ops);
497   *
498   *		vm_bo = drm_gpuvm_bo_obtain(gpuvm, obj);
499   *		if (IS_ERR(vm_bo))
500   *			return PTR_ERR(vm_bo);
501   *
502   *		drm_gpuva_for_each_op(op, ops) {
503   *			struct drm_gpuva *va;
504   *
505   *			switch (op->op) {
506   *			case DRM_GPUVA_OP_MAP:
507   *				va = driver_gpuva_alloc();
508   *				if (!va)
509   *					; // unwind previous VA space updates,
510   *					  // free memory and unlock
511   *
512   *				driver_vm_map();
513   *				drm_gpuva_map(gpuvm, va, &op->map);
514   *				drm_gpuva_link(va, vm_bo);
515   *
516   *				break;
517   *			case DRM_GPUVA_OP_REMAP: {
518   *				struct drm_gpuva *prev = NULL, *next = NULL;
519   *
520   *				va = op->remap.unmap->va;
521   *
522   *				if (op->remap.prev) {
523   *					prev = driver_gpuva_alloc();
524   *					if (!prev)
525   *						; // unwind previous VA space
526   *						  // updates, free memory and
527   *						  // unlock
528   *				}
529   *
530   *				if (op->remap.next) {
531   *					next = driver_gpuva_alloc();
532   *					if (!next)
533   *						; // unwind previous VA space
534   *						  // updates, free memory and
535   *						  // unlock
536   *				}
537   *
538   *				driver_vm_remap();
539   *				drm_gpuva_remap(prev, next, &op->remap);
540   *
541   *				if (prev)
542   *					drm_gpuva_link(prev, va->vm_bo);
543   *				if (next)
544   *					drm_gpuva_link(next, va->vm_bo);
545   *				drm_gpuva_unlink(va);
546   *
547   *				break;
548   *			}
549   *			case DRM_GPUVA_OP_UNMAP:
550   *				va = op->unmap->va;
551   *
552   *				driver_vm_unmap();
553   *				drm_gpuva_unlink(va);
554   *				drm_gpuva_unmap(&op->unmap);
555   *
556   *				break;
557   *			default:
558   *				break;
559   *			}
560   *		}
561   *		drm_gpuvm_bo_put(vm_bo);
562   *		driver_unlock_va_space();
563   *
564   *		return 0;
565   *	}
566   *
567   * 2) Receive a callback for each &drm_gpuva_op to create a new mapping::
568   *
569   *	struct driver_context {
570   *		struct drm_gpuvm *gpuvm;
571   *		struct drm_gpuvm_bo *vm_bo;
572   *		struct drm_gpuva *new_va;
573   *		struct drm_gpuva *prev_va;
574   *		struct drm_gpuva *next_va;
575   *	};
576   *
577   *	// ops to pass to drm_gpuvm_init()
578   *	static const struct drm_gpuvm_ops driver_gpuvm_ops = {
579   *		.sm_step_map = driver_gpuva_map,
580   *		.sm_step_remap = driver_gpuva_remap,
581   *		.sm_step_unmap = driver_gpuva_unmap,
582   *	};
583   *
584   *	// Typically drivers would embedd the &drm_gpuvm and &drm_gpuva
585   *	// structure in individual driver structures and lock the dma-resv with
586   *	// drm_exec or similar helpers.
587   *	int driver_mapping_create(struct drm_gpuvm *gpuvm,
588   *				  u64 addr, u64 range,
589   *				  struct drm_gem_object *obj, u64 offset)
590   *	{
591   *		struct driver_context ctx;
592   *		struct drm_gpuvm_bo *vm_bo;
593   *		struct drm_gpuva_ops *ops;
594   *		struct drm_gpuva_op *op;
595   *		int ret = 0;
596   *
597   *		ctx.gpuvm = gpuvm;
598   *
599   *		ctx.new_va = kzalloc(sizeof(*ctx.new_va), GFP_KERNEL);
600   *		ctx.prev_va = kzalloc(sizeof(*ctx.prev_va), GFP_KERNEL);
601   *		ctx.next_va = kzalloc(sizeof(*ctx.next_va), GFP_KERNEL);
602   *		ctx.vm_bo = drm_gpuvm_bo_create(gpuvm, obj);
603   *		if (!ctx.new_va || !ctx.prev_va || !ctx.next_va || !vm_bo) {
604   *			ret = -ENOMEM;
605   *			goto out;
606   *		}
607   *
608   *		// Typically protected with a driver specific GEM gpuva lock
609   *		// used in the fence signaling path for drm_gpuva_link() and
610   *		// drm_gpuva_unlink(), hence pre-allocate.
611   *		ctx.vm_bo = drm_gpuvm_bo_obtain_prealloc(ctx.vm_bo);
612   *
613   *		driver_lock_va_space();
614   *		ret = drm_gpuvm_sm_map(gpuvm, &ctx, addr, range, obj, offset);
615   *		driver_unlock_va_space();
616   *
617   *	out:
618   *		drm_gpuvm_bo_put(ctx.vm_bo);
619   *		kfree(ctx.new_va);
620   *		kfree(ctx.prev_va);
621   *		kfree(ctx.next_va);
622   *		return ret;
623   *	}
624   *
625   *	int driver_gpuva_map(struct drm_gpuva_op *op, void *__ctx)
626   *	{
627   *		struct driver_context *ctx = __ctx;
628   *
629   *		drm_gpuva_map(ctx->vm, ctx->new_va, &op->map);
630   *
631   *		drm_gpuva_link(ctx->new_va, ctx->vm_bo);
632   *
633   *		// prevent the new GPUVA from being freed in
634   *		// driver_mapping_create()
635   *		ctx->new_va = NULL;
636   *
637   *		return 0;
638   *	}
639   *
640   *	int driver_gpuva_remap(struct drm_gpuva_op *op, void *__ctx)
641   *	{
642   *		struct driver_context *ctx = __ctx;
643   *		struct drm_gpuva *va = op->remap.unmap->va;
644   *
645   *		drm_gpuva_remap(ctx->prev_va, ctx->next_va, &op->remap);
646   *
647   *		if (op->remap.prev) {
648   *			drm_gpuva_link(ctx->prev_va, va->vm_bo);
649   *			ctx->prev_va = NULL;
650   *		}
651   *
652   *		if (op->remap.next) {
653   *			drm_gpuva_link(ctx->next_va, va->vm_bo);
654   *			ctx->next_va = NULL;
655   *		}
656   *
657   *		drm_gpuva_unlink(va);
658   *		kfree(va);
659   *
660   *		return 0;
661   *	}
662   *
663   *	int driver_gpuva_unmap(struct drm_gpuva_op *op, void *__ctx)
664   *	{
665   *		drm_gpuva_unlink(op->unmap.va);
666   *		drm_gpuva_unmap(&op->unmap);
667   *		kfree(op->unmap.va);
668   *
669   *		return 0;
670   *	}
671   */
672  
673  /**
674   * get_next_vm_bo_from_list() - get the next vm_bo element
675   * @__gpuvm: the &drm_gpuvm
676   * @__list_name: the name of the list we're iterating on
677   * @__local_list: a pointer to the local list used to store already iterated items
678   * @__prev_vm_bo: the previous element we got from get_next_vm_bo_from_list()
679   *
680   * This helper is here to provide lockless list iteration. Lockless as in, the
681   * iterator releases the lock immediately after picking the first element from
682   * the list, so list insertion deletion can happen concurrently.
683   *
684   * Elements popped from the original list are kept in a local list, so removal
685   * and is_empty checks can still happen while we're iterating the list.
686   */
687  #define get_next_vm_bo_from_list(__gpuvm, __list_name, __local_list, __prev_vm_bo)	\
688  	({										\
689  		struct drm_gpuvm_bo *__vm_bo = NULL;					\
690  											\
691  		drm_gpuvm_bo_put(__prev_vm_bo);						\
692  											\
693  		spin_lock(&(__gpuvm)->__list_name.lock);				\
694  		if (!(__gpuvm)->__list_name.local_list)					\
695  			(__gpuvm)->__list_name.local_list = __local_list;		\
696  		else									\
697  			drm_WARN_ON((__gpuvm)->drm,					\
698  				    (__gpuvm)->__list_name.local_list != __local_list);	\
699  											\
700  		while (!list_empty(&(__gpuvm)->__list_name.list)) {			\
701  			__vm_bo = list_first_entry(&(__gpuvm)->__list_name.list,	\
702  						   struct drm_gpuvm_bo,			\
703  						   list.entry.__list_name);		\
704  			if (kref_get_unless_zero(&__vm_bo->kref)) {			\
705  				list_move_tail(&(__vm_bo)->list.entry.__list_name,	\
706  					       __local_list);				\
707  				break;							\
708  			} else {							\
709  				list_del_init(&(__vm_bo)->list.entry.__list_name);	\
710  				__vm_bo = NULL;						\
711  			}								\
712  		}									\
713  		spin_unlock(&(__gpuvm)->__list_name.lock);				\
714  											\
715  		__vm_bo;								\
716  	})
717  
718  /**
719   * for_each_vm_bo_in_list() - internal vm_bo list iterator
720   * @__gpuvm: the &drm_gpuvm
721   * @__list_name: the name of the list we're iterating on
722   * @__local_list: a pointer to the local list used to store already iterated items
723   * @__vm_bo: the struct drm_gpuvm_bo to assign in each iteration step
724   *
725   * This helper is here to provide lockless list iteration. Lockless as in, the
726   * iterator releases the lock immediately after picking the first element from the
727   * list, hence list insertion and deletion can happen concurrently.
728   *
729   * It is not allowed to re-assign the vm_bo pointer from inside this loop.
730   *
731   * Typical use:
732   *
733   *	struct drm_gpuvm_bo *vm_bo;
734   *	LIST_HEAD(my_local_list);
735   *
736   *	ret = 0;
737   *	for_each_vm_bo_in_list(gpuvm, <list_name>, &my_local_list, vm_bo) {
738   *		ret = do_something_with_vm_bo(..., vm_bo);
739   *		if (ret)
740   *			break;
741   *	}
742   *	// Drop ref in case we break out of the loop.
743   *	drm_gpuvm_bo_put(vm_bo);
744   *	restore_vm_bo_list(gpuvm, <list_name>, &my_local_list);
745   *
746   *
747   * Only used for internal list iterations, not meant to be exposed to the outside
748   * world.
749   */
750  #define for_each_vm_bo_in_list(__gpuvm, __list_name, __local_list, __vm_bo)	\
751  	for (__vm_bo = get_next_vm_bo_from_list(__gpuvm, __list_name,		\
752  						__local_list, NULL);		\
753  	     __vm_bo;								\
754  	     __vm_bo = get_next_vm_bo_from_list(__gpuvm, __list_name,		\
755  						__local_list, __vm_bo))
756  
757  static void
__restore_vm_bo_list(struct drm_gpuvm * gpuvm,spinlock_t * lock,struct list_head * list,struct list_head ** local_list)758  __restore_vm_bo_list(struct drm_gpuvm *gpuvm, spinlock_t *lock,
759  		     struct list_head *list, struct list_head **local_list)
760  {
761  	/* Merge back the two lists, moving local list elements to the
762  	 * head to preserve previous ordering, in case it matters.
763  	 */
764  	spin_lock(lock);
765  	if (*local_list) {
766  		list_splice(*local_list, list);
767  		*local_list = NULL;
768  	}
769  	spin_unlock(lock);
770  }
771  
772  /**
773   * restore_vm_bo_list() - move vm_bo elements back to their original list
774   * @__gpuvm: the &drm_gpuvm
775   * @__list_name: the name of the list we're iterating on
776   *
777   * When we're done iterating a vm_bo list, we should call restore_vm_bo_list()
778   * to restore the original state and let new iterations take place.
779   */
780  #define restore_vm_bo_list(__gpuvm, __list_name)			\
781  	__restore_vm_bo_list((__gpuvm), &(__gpuvm)->__list_name.lock,	\
782  			     &(__gpuvm)->__list_name.list,		\
783  			     &(__gpuvm)->__list_name.local_list)
784  
785  static void
cond_spin_lock(spinlock_t * lock,bool cond)786  cond_spin_lock(spinlock_t *lock, bool cond)
787  {
788  	if (cond)
789  		spin_lock(lock);
790  }
791  
792  static void
cond_spin_unlock(spinlock_t * lock,bool cond)793  cond_spin_unlock(spinlock_t *lock, bool cond)
794  {
795  	if (cond)
796  		spin_unlock(lock);
797  }
798  
799  static void
__drm_gpuvm_bo_list_add(struct drm_gpuvm * gpuvm,spinlock_t * lock,struct list_head * entry,struct list_head * list)800  __drm_gpuvm_bo_list_add(struct drm_gpuvm *gpuvm, spinlock_t *lock,
801  			struct list_head *entry, struct list_head *list)
802  {
803  	cond_spin_lock(lock, !!lock);
804  	if (list_empty(entry))
805  		list_add_tail(entry, list);
806  	cond_spin_unlock(lock, !!lock);
807  }
808  
809  /**
810   * drm_gpuvm_bo_list_add() - insert a vm_bo into the given list
811   * @__vm_bo: the &drm_gpuvm_bo
812   * @__list_name: the name of the list to insert into
813   * @__lock: whether to lock with the internal spinlock
814   *
815   * Inserts the given @__vm_bo into the list specified by @__list_name.
816   */
817  #define drm_gpuvm_bo_list_add(__vm_bo, __list_name, __lock)			\
818  	__drm_gpuvm_bo_list_add((__vm_bo)->vm,					\
819  				__lock ? &(__vm_bo)->vm->__list_name.lock :	\
820  					 NULL,					\
821  				&(__vm_bo)->list.entry.__list_name,		\
822  				&(__vm_bo)->vm->__list_name.list)
823  
824  static void
__drm_gpuvm_bo_list_del(struct drm_gpuvm * gpuvm,spinlock_t * lock,struct list_head * entry,bool init)825  __drm_gpuvm_bo_list_del(struct drm_gpuvm *gpuvm, spinlock_t *lock,
826  			struct list_head *entry, bool init)
827  {
828  	cond_spin_lock(lock, !!lock);
829  	if (init) {
830  		if (!list_empty(entry))
831  			list_del_init(entry);
832  	} else {
833  		list_del(entry);
834  	}
835  	cond_spin_unlock(lock, !!lock);
836  }
837  
838  /**
839   * drm_gpuvm_bo_list_del_init() - remove a vm_bo from the given list
840   * @__vm_bo: the &drm_gpuvm_bo
841   * @__list_name: the name of the list to insert into
842   * @__lock: whether to lock with the internal spinlock
843   *
844   * Removes the given @__vm_bo from the list specified by @__list_name.
845   */
846  #define drm_gpuvm_bo_list_del_init(__vm_bo, __list_name, __lock)		\
847  	__drm_gpuvm_bo_list_del((__vm_bo)->vm,					\
848  				__lock ? &(__vm_bo)->vm->__list_name.lock :	\
849  					 NULL,					\
850  				&(__vm_bo)->list.entry.__list_name,		\
851  				true)
852  
853  /**
854   * drm_gpuvm_bo_list_del() - remove a vm_bo from the given list
855   * @__vm_bo: the &drm_gpuvm_bo
856   * @__list_name: the name of the list to insert into
857   * @__lock: whether to lock with the internal spinlock
858   *
859   * Removes the given @__vm_bo from the list specified by @__list_name.
860   */
861  #define drm_gpuvm_bo_list_del(__vm_bo, __list_name, __lock)			\
862  	__drm_gpuvm_bo_list_del((__vm_bo)->vm,					\
863  				__lock ? &(__vm_bo)->vm->__list_name.lock :	\
864  					 NULL,					\
865  				&(__vm_bo)->list.entry.__list_name,		\
866  				false)
867  
868  #define to_drm_gpuva(__node)	container_of((__node), struct drm_gpuva, rb.node)
869  
870  #define GPUVA_START(node) ((node)->va.addr)
871  #define GPUVA_LAST(node) ((node)->va.addr + (node)->va.range - 1)
872  
873  /* We do not actually use drm_gpuva_it_next(), tell the compiler to not complain
874   * about this.
875   */
876  INTERVAL_TREE_DEFINE(struct drm_gpuva, rb.node, u64, rb.__subtree_last,
877  		     GPUVA_START, GPUVA_LAST, static __maybe_unused,
878  		     drm_gpuva_it)
879  
880  static int __drm_gpuva_insert(struct drm_gpuvm *gpuvm,
881  			      struct drm_gpuva *va);
882  static void __drm_gpuva_remove(struct drm_gpuva *va);
883  
884  static bool
drm_gpuvm_check_overflow(u64 addr,u64 range)885  drm_gpuvm_check_overflow(u64 addr, u64 range)
886  {
887  	u64 end;
888  
889  	return check_add_overflow(addr, range, &end);
890  }
891  
892  static bool
drm_gpuvm_warn_check_overflow(struct drm_gpuvm * gpuvm,u64 addr,u64 range)893  drm_gpuvm_warn_check_overflow(struct drm_gpuvm *gpuvm, u64 addr, u64 range)
894  {
895  	return drm_WARN(gpuvm->drm, drm_gpuvm_check_overflow(addr, range),
896  			"GPUVA address limited to %zu bytes.\n", sizeof(addr));
897  }
898  
899  static bool
drm_gpuvm_in_mm_range(struct drm_gpuvm * gpuvm,u64 addr,u64 range)900  drm_gpuvm_in_mm_range(struct drm_gpuvm *gpuvm, u64 addr, u64 range)
901  {
902  	u64 end = addr + range;
903  	u64 mm_start = gpuvm->mm_start;
904  	u64 mm_end = mm_start + gpuvm->mm_range;
905  
906  	return addr >= mm_start && end <= mm_end;
907  }
908  
909  static bool
drm_gpuvm_in_kernel_node(struct drm_gpuvm * gpuvm,u64 addr,u64 range)910  drm_gpuvm_in_kernel_node(struct drm_gpuvm *gpuvm, u64 addr, u64 range)
911  {
912  	u64 end = addr + range;
913  	u64 kstart = gpuvm->kernel_alloc_node.va.addr;
914  	u64 krange = gpuvm->kernel_alloc_node.va.range;
915  	u64 kend = kstart + krange;
916  
917  	return krange && addr < kend && kstart < end;
918  }
919  
920  /**
921   * drm_gpuvm_range_valid() - checks whether the given range is valid for the
922   * given &drm_gpuvm
923   * @gpuvm: the GPUVM to check the range for
924   * @addr: the base address
925   * @range: the range starting from the base address
926   *
927   * Checks whether the range is within the GPUVM's managed boundaries.
928   *
929   * Returns: true for a valid range, false otherwise
930   */
931  bool
drm_gpuvm_range_valid(struct drm_gpuvm * gpuvm,u64 addr,u64 range)932  drm_gpuvm_range_valid(struct drm_gpuvm *gpuvm,
933  		      u64 addr, u64 range)
934  {
935  	return !drm_gpuvm_check_overflow(addr, range) &&
936  	       drm_gpuvm_in_mm_range(gpuvm, addr, range) &&
937  	       !drm_gpuvm_in_kernel_node(gpuvm, addr, range);
938  }
939  EXPORT_SYMBOL_GPL(drm_gpuvm_range_valid);
940  
941  static void
drm_gpuvm_gem_object_free(struct drm_gem_object * obj)942  drm_gpuvm_gem_object_free(struct drm_gem_object *obj)
943  {
944  	drm_gem_object_release(obj);
945  	kfree(obj);
946  }
947  
948  static const struct drm_gem_object_funcs drm_gpuvm_object_funcs = {
949  	.free = drm_gpuvm_gem_object_free,
950  };
951  
952  /**
953   * drm_gpuvm_resv_object_alloc() - allocate a dummy &drm_gem_object
954   * @drm: the drivers &drm_device
955   *
956   * Allocates a dummy &drm_gem_object which can be passed to drm_gpuvm_init() in
957   * order to serve as root GEM object providing the &drm_resv shared across
958   * &drm_gem_objects local to a single GPUVM.
959   *
960   * Returns: the &drm_gem_object on success, NULL on failure
961   */
962  struct drm_gem_object *
drm_gpuvm_resv_object_alloc(struct drm_device * drm)963  drm_gpuvm_resv_object_alloc(struct drm_device *drm)
964  {
965  	struct drm_gem_object *obj;
966  
967  	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
968  	if (!obj)
969  		return NULL;
970  
971  	obj->funcs = &drm_gpuvm_object_funcs;
972  	drm_gem_private_object_init(drm, obj, 0);
973  
974  	return obj;
975  }
976  EXPORT_SYMBOL_GPL(drm_gpuvm_resv_object_alloc);
977  
978  /**
979   * drm_gpuvm_init() - initialize a &drm_gpuvm
980   * @gpuvm: pointer to the &drm_gpuvm to initialize
981   * @name: the name of the GPU VA space
982   * @flags: the &drm_gpuvm_flags for this GPUVM
983   * @drm: the &drm_device this VM resides in
984   * @r_obj: the resv &drm_gem_object providing the GPUVM's common &dma_resv
985   * @start_offset: the start offset of the GPU VA space
986   * @range: the size of the GPU VA space
987   * @reserve_offset: the start of the kernel reserved GPU VA area
988   * @reserve_range: the size of the kernel reserved GPU VA area
989   * @ops: &drm_gpuvm_ops called on &drm_gpuvm_sm_map / &drm_gpuvm_sm_unmap
990   *
991   * The &drm_gpuvm must be initialized with this function before use.
992   *
993   * Note that @gpuvm must be cleared to 0 before calling this function. The given
994   * &name is expected to be managed by the surrounding driver structures.
995   */
996  void
drm_gpuvm_init(struct drm_gpuvm * gpuvm,const char * name,enum drm_gpuvm_flags flags,struct drm_device * drm,struct drm_gem_object * r_obj,u64 start_offset,u64 range,u64 reserve_offset,u64 reserve_range,const struct drm_gpuvm_ops * ops)997  drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name,
998  	       enum drm_gpuvm_flags flags,
999  	       struct drm_device *drm,
1000  	       struct drm_gem_object *r_obj,
1001  	       u64 start_offset, u64 range,
1002  	       u64 reserve_offset, u64 reserve_range,
1003  	       const struct drm_gpuvm_ops *ops)
1004  {
1005  	gpuvm->rb.tree = RB_ROOT_CACHED;
1006  	INIT_LIST_HEAD(&gpuvm->rb.list);
1007  
1008  	INIT_LIST_HEAD(&gpuvm->extobj.list);
1009  	spin_lock_init(&gpuvm->extobj.lock);
1010  
1011  	INIT_LIST_HEAD(&gpuvm->evict.list);
1012  	spin_lock_init(&gpuvm->evict.lock);
1013  
1014  	kref_init(&gpuvm->kref);
1015  
1016  	gpuvm->name = name ? name : "unknown";
1017  	gpuvm->flags = flags;
1018  	gpuvm->ops = ops;
1019  	gpuvm->drm = drm;
1020  	gpuvm->r_obj = r_obj;
1021  
1022  	drm_gem_object_get(r_obj);
1023  
1024  	drm_gpuvm_warn_check_overflow(gpuvm, start_offset, range);
1025  	gpuvm->mm_start = start_offset;
1026  	gpuvm->mm_range = range;
1027  
1028  	memset(&gpuvm->kernel_alloc_node, 0, sizeof(struct drm_gpuva));
1029  	if (reserve_range) {
1030  		gpuvm->kernel_alloc_node.va.addr = reserve_offset;
1031  		gpuvm->kernel_alloc_node.va.range = reserve_range;
1032  
1033  		if (likely(!drm_gpuvm_warn_check_overflow(gpuvm, reserve_offset,
1034  							  reserve_range)))
1035  			__drm_gpuva_insert(gpuvm, &gpuvm->kernel_alloc_node);
1036  	}
1037  }
1038  EXPORT_SYMBOL_GPL(drm_gpuvm_init);
1039  
1040  static void
drm_gpuvm_fini(struct drm_gpuvm * gpuvm)1041  drm_gpuvm_fini(struct drm_gpuvm *gpuvm)
1042  {
1043  	gpuvm->name = NULL;
1044  
1045  	if (gpuvm->kernel_alloc_node.va.range)
1046  		__drm_gpuva_remove(&gpuvm->kernel_alloc_node);
1047  
1048  	drm_WARN(gpuvm->drm, !RB_EMPTY_ROOT(&gpuvm->rb.tree.rb_root),
1049  		 "GPUVA tree is not empty, potentially leaking memory.\n");
1050  
1051  	drm_WARN(gpuvm->drm, !list_empty(&gpuvm->extobj.list),
1052  		 "Extobj list should be empty.\n");
1053  	drm_WARN(gpuvm->drm, !list_empty(&gpuvm->evict.list),
1054  		 "Evict list should be empty.\n");
1055  
1056  	drm_gem_object_put(gpuvm->r_obj);
1057  }
1058  
1059  static void
drm_gpuvm_free(struct kref * kref)1060  drm_gpuvm_free(struct kref *kref)
1061  {
1062  	struct drm_gpuvm *gpuvm = container_of(kref, struct drm_gpuvm, kref);
1063  
1064  	drm_gpuvm_fini(gpuvm);
1065  
1066  	if (drm_WARN_ON(gpuvm->drm, !gpuvm->ops->vm_free))
1067  		return;
1068  
1069  	gpuvm->ops->vm_free(gpuvm);
1070  }
1071  
1072  /**
1073   * drm_gpuvm_put() - drop a struct drm_gpuvm reference
1074   * @gpuvm: the &drm_gpuvm to release the reference of
1075   *
1076   * This releases a reference to @gpuvm.
1077   *
1078   * This function may be called from atomic context.
1079   */
1080  void
drm_gpuvm_put(struct drm_gpuvm * gpuvm)1081  drm_gpuvm_put(struct drm_gpuvm *gpuvm)
1082  {
1083  	if (gpuvm)
1084  		kref_put(&gpuvm->kref, drm_gpuvm_free);
1085  }
1086  EXPORT_SYMBOL_GPL(drm_gpuvm_put);
1087  
1088  static int
exec_prepare_obj(struct drm_exec * exec,struct drm_gem_object * obj,unsigned int num_fences)1089  exec_prepare_obj(struct drm_exec *exec, struct drm_gem_object *obj,
1090  		 unsigned int num_fences)
1091  {
1092  	return num_fences ? drm_exec_prepare_obj(exec, obj, num_fences) :
1093  			    drm_exec_lock_obj(exec, obj);
1094  }
1095  
1096  /**
1097   * drm_gpuvm_prepare_vm() - prepare the GPUVMs common dma-resv
1098   * @gpuvm: the &drm_gpuvm
1099   * @exec: the &drm_exec context
1100   * @num_fences: the amount of &dma_fences to reserve
1101   *
1102   * Calls drm_exec_prepare_obj() for the GPUVMs dummy &drm_gem_object; if
1103   * @num_fences is zero drm_exec_lock_obj() is called instead.
1104   *
1105   * Using this function directly, it is the drivers responsibility to call
1106   * drm_exec_init() and drm_exec_fini() accordingly.
1107   *
1108   * Returns: 0 on success, negative error code on failure.
1109   */
1110  int
drm_gpuvm_prepare_vm(struct drm_gpuvm * gpuvm,struct drm_exec * exec,unsigned int num_fences)1111  drm_gpuvm_prepare_vm(struct drm_gpuvm *gpuvm,
1112  		     struct drm_exec *exec,
1113  		     unsigned int num_fences)
1114  {
1115  	return exec_prepare_obj(exec, gpuvm->r_obj, num_fences);
1116  }
1117  EXPORT_SYMBOL_GPL(drm_gpuvm_prepare_vm);
1118  
1119  static int
__drm_gpuvm_prepare_objects(struct drm_gpuvm * gpuvm,struct drm_exec * exec,unsigned int num_fences)1120  __drm_gpuvm_prepare_objects(struct drm_gpuvm *gpuvm,
1121  			    struct drm_exec *exec,
1122  			    unsigned int num_fences)
1123  {
1124  	struct drm_gpuvm_bo *vm_bo;
1125  	LIST_HEAD(extobjs);
1126  	int ret = 0;
1127  
1128  	for_each_vm_bo_in_list(gpuvm, extobj, &extobjs, vm_bo) {
1129  		ret = exec_prepare_obj(exec, vm_bo->obj, num_fences);
1130  		if (ret)
1131  			break;
1132  	}
1133  	/* Drop ref in case we break out of the loop. */
1134  	drm_gpuvm_bo_put(vm_bo);
1135  	restore_vm_bo_list(gpuvm, extobj);
1136  
1137  	return ret;
1138  }
1139  
1140  static int
drm_gpuvm_prepare_objects_locked(struct drm_gpuvm * gpuvm,struct drm_exec * exec,unsigned int num_fences)1141  drm_gpuvm_prepare_objects_locked(struct drm_gpuvm *gpuvm,
1142  				 struct drm_exec *exec,
1143  				 unsigned int num_fences)
1144  {
1145  	struct drm_gpuvm_bo *vm_bo;
1146  	int ret = 0;
1147  
1148  	drm_gpuvm_resv_assert_held(gpuvm);
1149  	list_for_each_entry(vm_bo, &gpuvm->extobj.list, list.entry.extobj) {
1150  		ret = exec_prepare_obj(exec, vm_bo->obj, num_fences);
1151  		if (ret)
1152  			break;
1153  
1154  		if (vm_bo->evicted)
1155  			drm_gpuvm_bo_list_add(vm_bo, evict, false);
1156  	}
1157  
1158  	return ret;
1159  }
1160  
1161  /**
1162   * drm_gpuvm_prepare_objects() - prepare all assoiciated BOs
1163   * @gpuvm: the &drm_gpuvm
1164   * @exec: the &drm_exec locking context
1165   * @num_fences: the amount of &dma_fences to reserve
1166   *
1167   * Calls drm_exec_prepare_obj() for all &drm_gem_objects the given
1168   * &drm_gpuvm contains mappings of; if @num_fences is zero drm_exec_lock_obj()
1169   * is called instead.
1170   *
1171   * Using this function directly, it is the drivers responsibility to call
1172   * drm_exec_init() and drm_exec_fini() accordingly.
1173   *
1174   * Note: This function is safe against concurrent insertion and removal of
1175   * external objects, however it is not safe against concurrent usage itself.
1176   *
1177   * Drivers need to make sure to protect this case with either an outer VM lock
1178   * or by calling drm_gpuvm_prepare_vm() before this function within the
1179   * drm_exec_until_all_locked() loop, such that the GPUVM's dma-resv lock ensures
1180   * mutual exclusion.
1181   *
1182   * Returns: 0 on success, negative error code on failure.
1183   */
1184  int
drm_gpuvm_prepare_objects(struct drm_gpuvm * gpuvm,struct drm_exec * exec,unsigned int num_fences)1185  drm_gpuvm_prepare_objects(struct drm_gpuvm *gpuvm,
1186  			  struct drm_exec *exec,
1187  			  unsigned int num_fences)
1188  {
1189  	if (drm_gpuvm_resv_protected(gpuvm))
1190  		return drm_gpuvm_prepare_objects_locked(gpuvm, exec,
1191  							num_fences);
1192  	else
1193  		return __drm_gpuvm_prepare_objects(gpuvm, exec, num_fences);
1194  }
1195  EXPORT_SYMBOL_GPL(drm_gpuvm_prepare_objects);
1196  
1197  /**
1198   * drm_gpuvm_prepare_range() - prepare all BOs mapped within a given range
1199   * @gpuvm: the &drm_gpuvm
1200   * @exec: the &drm_exec locking context
1201   * @addr: the start address within the VA space
1202   * @range: the range to iterate within the VA space
1203   * @num_fences: the amount of &dma_fences to reserve
1204   *
1205   * Calls drm_exec_prepare_obj() for all &drm_gem_objects mapped between @addr
1206   * and @addr + @range; if @num_fences is zero drm_exec_lock_obj() is called
1207   * instead.
1208   *
1209   * Returns: 0 on success, negative error code on failure.
1210   */
1211  int
drm_gpuvm_prepare_range(struct drm_gpuvm * gpuvm,struct drm_exec * exec,u64 addr,u64 range,unsigned int num_fences)1212  drm_gpuvm_prepare_range(struct drm_gpuvm *gpuvm, struct drm_exec *exec,
1213  			u64 addr, u64 range, unsigned int num_fences)
1214  {
1215  	struct drm_gpuva *va;
1216  	u64 end = addr + range;
1217  	int ret;
1218  
1219  	drm_gpuvm_for_each_va_range(va, gpuvm, addr, end) {
1220  		struct drm_gem_object *obj = va->gem.obj;
1221  
1222  		ret = exec_prepare_obj(exec, obj, num_fences);
1223  		if (ret)
1224  			return ret;
1225  	}
1226  
1227  	return 0;
1228  }
1229  EXPORT_SYMBOL_GPL(drm_gpuvm_prepare_range);
1230  
1231  /**
1232   * drm_gpuvm_exec_lock() - lock all dma-resv of all assoiciated BOs
1233   * @vm_exec: the &drm_gpuvm_exec wrapper
1234   *
1235   * Acquires all dma-resv locks of all &drm_gem_objects the given
1236   * &drm_gpuvm contains mappings of.
1237   *
1238   * Addionally, when calling this function with struct drm_gpuvm_exec::extra
1239   * being set the driver receives the given @fn callback to lock additional
1240   * dma-resv in the context of the &drm_gpuvm_exec instance. Typically, drivers
1241   * would call drm_exec_prepare_obj() from within this callback.
1242   *
1243   * Returns: 0 on success, negative error code on failure.
1244   */
1245  int
drm_gpuvm_exec_lock(struct drm_gpuvm_exec * vm_exec)1246  drm_gpuvm_exec_lock(struct drm_gpuvm_exec *vm_exec)
1247  {
1248  	struct drm_gpuvm *gpuvm = vm_exec->vm;
1249  	struct drm_exec *exec = &vm_exec->exec;
1250  	unsigned int num_fences = vm_exec->num_fences;
1251  	int ret;
1252  
1253  	drm_exec_init(exec, vm_exec->flags, 0);
1254  
1255  	drm_exec_until_all_locked(exec) {
1256  		ret = drm_gpuvm_prepare_vm(gpuvm, exec, num_fences);
1257  		drm_exec_retry_on_contention(exec);
1258  		if (ret)
1259  			goto err;
1260  
1261  		ret = drm_gpuvm_prepare_objects(gpuvm, exec, num_fences);
1262  		drm_exec_retry_on_contention(exec);
1263  		if (ret)
1264  			goto err;
1265  
1266  		if (vm_exec->extra.fn) {
1267  			ret = vm_exec->extra.fn(vm_exec);
1268  			drm_exec_retry_on_contention(exec);
1269  			if (ret)
1270  				goto err;
1271  		}
1272  	}
1273  
1274  	return 0;
1275  
1276  err:
1277  	drm_exec_fini(exec);
1278  	return ret;
1279  }
1280  EXPORT_SYMBOL_GPL(drm_gpuvm_exec_lock);
1281  
1282  static int
fn_lock_array(struct drm_gpuvm_exec * vm_exec)1283  fn_lock_array(struct drm_gpuvm_exec *vm_exec)
1284  {
1285  	struct {
1286  		struct drm_gem_object **objs;
1287  		unsigned int num_objs;
1288  	} *args = vm_exec->extra.priv;
1289  
1290  	return drm_exec_prepare_array(&vm_exec->exec, args->objs,
1291  				      args->num_objs, vm_exec->num_fences);
1292  }
1293  
1294  /**
1295   * drm_gpuvm_exec_lock_array() - lock all dma-resv of all assoiciated BOs
1296   * @vm_exec: the &drm_gpuvm_exec wrapper
1297   * @objs: additional &drm_gem_objects to lock
1298   * @num_objs: the number of additional &drm_gem_objects to lock
1299   *
1300   * Acquires all dma-resv locks of all &drm_gem_objects the given &drm_gpuvm
1301   * contains mappings of, plus the ones given through @objs.
1302   *
1303   * Returns: 0 on success, negative error code on failure.
1304   */
1305  int
drm_gpuvm_exec_lock_array(struct drm_gpuvm_exec * vm_exec,struct drm_gem_object ** objs,unsigned int num_objs)1306  drm_gpuvm_exec_lock_array(struct drm_gpuvm_exec *vm_exec,
1307  			  struct drm_gem_object **objs,
1308  			  unsigned int num_objs)
1309  {
1310  	struct {
1311  		struct drm_gem_object **objs;
1312  		unsigned int num_objs;
1313  	} args;
1314  
1315  	args.objs = objs;
1316  	args.num_objs = num_objs;
1317  
1318  	vm_exec->extra.fn = fn_lock_array;
1319  	vm_exec->extra.priv = &args;
1320  
1321  	return drm_gpuvm_exec_lock(vm_exec);
1322  }
1323  EXPORT_SYMBOL_GPL(drm_gpuvm_exec_lock_array);
1324  
1325  /**
1326   * drm_gpuvm_exec_lock_range() - prepare all BOs mapped within a given range
1327   * @vm_exec: the &drm_gpuvm_exec wrapper
1328   * @addr: the start address within the VA space
1329   * @range: the range to iterate within the VA space
1330   *
1331   * Acquires all dma-resv locks of all &drm_gem_objects mapped between @addr and
1332   * @addr + @range.
1333   *
1334   * Returns: 0 on success, negative error code on failure.
1335   */
1336  int
drm_gpuvm_exec_lock_range(struct drm_gpuvm_exec * vm_exec,u64 addr,u64 range)1337  drm_gpuvm_exec_lock_range(struct drm_gpuvm_exec *vm_exec,
1338  			  u64 addr, u64 range)
1339  {
1340  	struct drm_gpuvm *gpuvm = vm_exec->vm;
1341  	struct drm_exec *exec = &vm_exec->exec;
1342  	int ret;
1343  
1344  	drm_exec_init(exec, vm_exec->flags, 0);
1345  
1346  	drm_exec_until_all_locked(exec) {
1347  		ret = drm_gpuvm_prepare_range(gpuvm, exec, addr, range,
1348  					      vm_exec->num_fences);
1349  		drm_exec_retry_on_contention(exec);
1350  		if (ret)
1351  			goto err;
1352  	}
1353  
1354  	return ret;
1355  
1356  err:
1357  	drm_exec_fini(exec);
1358  	return ret;
1359  }
1360  EXPORT_SYMBOL_GPL(drm_gpuvm_exec_lock_range);
1361  
1362  static int
__drm_gpuvm_validate(struct drm_gpuvm * gpuvm,struct drm_exec * exec)1363  __drm_gpuvm_validate(struct drm_gpuvm *gpuvm, struct drm_exec *exec)
1364  {
1365  	const struct drm_gpuvm_ops *ops = gpuvm->ops;
1366  	struct drm_gpuvm_bo *vm_bo;
1367  	LIST_HEAD(evict);
1368  	int ret = 0;
1369  
1370  	for_each_vm_bo_in_list(gpuvm, evict, &evict, vm_bo) {
1371  		ret = ops->vm_bo_validate(vm_bo, exec);
1372  		if (ret)
1373  			break;
1374  	}
1375  	/* Drop ref in case we break out of the loop. */
1376  	drm_gpuvm_bo_put(vm_bo);
1377  	restore_vm_bo_list(gpuvm, evict);
1378  
1379  	return ret;
1380  }
1381  
1382  static int
drm_gpuvm_validate_locked(struct drm_gpuvm * gpuvm,struct drm_exec * exec)1383  drm_gpuvm_validate_locked(struct drm_gpuvm *gpuvm, struct drm_exec *exec)
1384  {
1385  	const struct drm_gpuvm_ops *ops = gpuvm->ops;
1386  	struct drm_gpuvm_bo *vm_bo, *next;
1387  	int ret = 0;
1388  
1389  	drm_gpuvm_resv_assert_held(gpuvm);
1390  
1391  	list_for_each_entry_safe(vm_bo, next, &gpuvm->evict.list,
1392  				 list.entry.evict) {
1393  		ret = ops->vm_bo_validate(vm_bo, exec);
1394  		if (ret)
1395  			break;
1396  
1397  		dma_resv_assert_held(vm_bo->obj->resv);
1398  		if (!vm_bo->evicted)
1399  			drm_gpuvm_bo_list_del_init(vm_bo, evict, false);
1400  	}
1401  
1402  	return ret;
1403  }
1404  
1405  /**
1406   * drm_gpuvm_validate() - validate all BOs marked as evicted
1407   * @gpuvm: the &drm_gpuvm to validate evicted BOs
1408   * @exec: the &drm_exec instance used for locking the GPUVM
1409   *
1410   * Calls the &drm_gpuvm_ops::vm_bo_validate callback for all evicted buffer
1411   * objects being mapped in the given &drm_gpuvm.
1412   *
1413   * Returns: 0 on success, negative error code on failure.
1414   */
1415  int
drm_gpuvm_validate(struct drm_gpuvm * gpuvm,struct drm_exec * exec)1416  drm_gpuvm_validate(struct drm_gpuvm *gpuvm, struct drm_exec *exec)
1417  {
1418  	const struct drm_gpuvm_ops *ops = gpuvm->ops;
1419  
1420  	if (unlikely(!ops || !ops->vm_bo_validate))
1421  		return -EOPNOTSUPP;
1422  
1423  	if (drm_gpuvm_resv_protected(gpuvm))
1424  		return drm_gpuvm_validate_locked(gpuvm, exec);
1425  	else
1426  		return __drm_gpuvm_validate(gpuvm, exec);
1427  }
1428  EXPORT_SYMBOL_GPL(drm_gpuvm_validate);
1429  
1430  /**
1431   * drm_gpuvm_resv_add_fence - add fence to private and all extobj
1432   * dma-resv
1433   * @gpuvm: the &drm_gpuvm to add a fence to
1434   * @exec: the &drm_exec locking context
1435   * @fence: fence to add
1436   * @private_usage: private dma-resv usage
1437   * @extobj_usage: extobj dma-resv usage
1438   */
1439  void
drm_gpuvm_resv_add_fence(struct drm_gpuvm * gpuvm,struct drm_exec * exec,struct dma_fence * fence,enum dma_resv_usage private_usage,enum dma_resv_usage extobj_usage)1440  drm_gpuvm_resv_add_fence(struct drm_gpuvm *gpuvm,
1441  			 struct drm_exec *exec,
1442  			 struct dma_fence *fence,
1443  			 enum dma_resv_usage private_usage,
1444  			 enum dma_resv_usage extobj_usage)
1445  {
1446  	struct drm_gem_object *obj;
1447  	unsigned long index;
1448  
1449  	drm_exec_for_each_locked_object(exec, index, obj) {
1450  		dma_resv_assert_held(obj->resv);
1451  		dma_resv_add_fence(obj->resv, fence,
1452  				   drm_gpuvm_is_extobj(gpuvm, obj) ?
1453  				   extobj_usage : private_usage);
1454  	}
1455  }
1456  EXPORT_SYMBOL_GPL(drm_gpuvm_resv_add_fence);
1457  
1458  /**
1459   * drm_gpuvm_bo_create() - create a new instance of struct drm_gpuvm_bo
1460   * @gpuvm: The &drm_gpuvm the @obj is mapped in.
1461   * @obj: The &drm_gem_object being mapped in the @gpuvm.
1462   *
1463   * If provided by the driver, this function uses the &drm_gpuvm_ops
1464   * vm_bo_alloc() callback to allocate.
1465   *
1466   * Returns: a pointer to the &drm_gpuvm_bo on success, NULL on failure
1467   */
1468  struct drm_gpuvm_bo *
drm_gpuvm_bo_create(struct drm_gpuvm * gpuvm,struct drm_gem_object * obj)1469  drm_gpuvm_bo_create(struct drm_gpuvm *gpuvm,
1470  		    struct drm_gem_object *obj)
1471  {
1472  	const struct drm_gpuvm_ops *ops = gpuvm->ops;
1473  	struct drm_gpuvm_bo *vm_bo;
1474  
1475  	if (ops && ops->vm_bo_alloc)
1476  		vm_bo = ops->vm_bo_alloc();
1477  	else
1478  		vm_bo = kzalloc(sizeof(*vm_bo), GFP_KERNEL);
1479  
1480  	if (unlikely(!vm_bo))
1481  		return NULL;
1482  
1483  	vm_bo->vm = drm_gpuvm_get(gpuvm);
1484  	vm_bo->obj = obj;
1485  	drm_gem_object_get(obj);
1486  
1487  	kref_init(&vm_bo->kref);
1488  	INIT_LIST_HEAD(&vm_bo->list.gpuva);
1489  	INIT_LIST_HEAD(&vm_bo->list.entry.gem);
1490  
1491  	INIT_LIST_HEAD(&vm_bo->list.entry.extobj);
1492  	INIT_LIST_HEAD(&vm_bo->list.entry.evict);
1493  
1494  	return vm_bo;
1495  }
1496  EXPORT_SYMBOL_GPL(drm_gpuvm_bo_create);
1497  
1498  static void
drm_gpuvm_bo_destroy(struct kref * kref)1499  drm_gpuvm_bo_destroy(struct kref *kref)
1500  {
1501  	struct drm_gpuvm_bo *vm_bo = container_of(kref, struct drm_gpuvm_bo,
1502  						  kref);
1503  	struct drm_gpuvm *gpuvm = vm_bo->vm;
1504  	const struct drm_gpuvm_ops *ops = gpuvm->ops;
1505  	struct drm_gem_object *obj = vm_bo->obj;
1506  	bool lock = !drm_gpuvm_resv_protected(gpuvm);
1507  
1508  	if (!lock)
1509  		drm_gpuvm_resv_assert_held(gpuvm);
1510  
1511  	drm_gpuvm_bo_list_del(vm_bo, extobj, lock);
1512  	drm_gpuvm_bo_list_del(vm_bo, evict, lock);
1513  
1514  	drm_gem_gpuva_assert_lock_held(obj);
1515  	list_del(&vm_bo->list.entry.gem);
1516  
1517  	if (ops && ops->vm_bo_free)
1518  		ops->vm_bo_free(vm_bo);
1519  	else
1520  		kfree(vm_bo);
1521  
1522  	drm_gpuvm_put(gpuvm);
1523  	drm_gem_object_put(obj);
1524  }
1525  
1526  /**
1527   * drm_gpuvm_bo_put() - drop a struct drm_gpuvm_bo reference
1528   * @vm_bo: the &drm_gpuvm_bo to release the reference of
1529   *
1530   * This releases a reference to @vm_bo.
1531   *
1532   * If the reference count drops to zero, the &gpuvm_bo is destroyed, which
1533   * includes removing it from the GEMs gpuva list. Hence, if a call to this
1534   * function can potentially let the reference count drop to zero the caller must
1535   * hold the dma-resv or driver specific GEM gpuva lock.
1536   *
1537   * This function may only be called from non-atomic context.
1538   *
1539   * Returns: true if vm_bo was destroyed, false otherwise.
1540   */
1541  bool
drm_gpuvm_bo_put(struct drm_gpuvm_bo * vm_bo)1542  drm_gpuvm_bo_put(struct drm_gpuvm_bo *vm_bo)
1543  {
1544  	might_sleep();
1545  
1546  	if (vm_bo)
1547  		return !!kref_put(&vm_bo->kref, drm_gpuvm_bo_destroy);
1548  
1549  	return false;
1550  }
1551  EXPORT_SYMBOL_GPL(drm_gpuvm_bo_put);
1552  
1553  static struct drm_gpuvm_bo *
__drm_gpuvm_bo_find(struct drm_gpuvm * gpuvm,struct drm_gem_object * obj)1554  __drm_gpuvm_bo_find(struct drm_gpuvm *gpuvm,
1555  		    struct drm_gem_object *obj)
1556  {
1557  	struct drm_gpuvm_bo *vm_bo;
1558  
1559  	drm_gem_gpuva_assert_lock_held(obj);
1560  	drm_gem_for_each_gpuvm_bo(vm_bo, obj)
1561  		if (vm_bo->vm == gpuvm)
1562  			return vm_bo;
1563  
1564  	return NULL;
1565  }
1566  
1567  /**
1568   * drm_gpuvm_bo_find() - find the &drm_gpuvm_bo for the given
1569   * &drm_gpuvm and &drm_gem_object
1570   * @gpuvm: The &drm_gpuvm the @obj is mapped in.
1571   * @obj: The &drm_gem_object being mapped in the @gpuvm.
1572   *
1573   * Find the &drm_gpuvm_bo representing the combination of the given
1574   * &drm_gpuvm and &drm_gem_object. If found, increases the reference
1575   * count of the &drm_gpuvm_bo accordingly.
1576   *
1577   * Returns: a pointer to the &drm_gpuvm_bo on success, NULL on failure
1578   */
1579  struct drm_gpuvm_bo *
drm_gpuvm_bo_find(struct drm_gpuvm * gpuvm,struct drm_gem_object * obj)1580  drm_gpuvm_bo_find(struct drm_gpuvm *gpuvm,
1581  		  struct drm_gem_object *obj)
1582  {
1583  	struct drm_gpuvm_bo *vm_bo = __drm_gpuvm_bo_find(gpuvm, obj);
1584  
1585  	return vm_bo ? drm_gpuvm_bo_get(vm_bo) : NULL;
1586  }
1587  EXPORT_SYMBOL_GPL(drm_gpuvm_bo_find);
1588  
1589  /**
1590   * drm_gpuvm_bo_obtain() - obtains and instance of the &drm_gpuvm_bo for the
1591   * given &drm_gpuvm and &drm_gem_object
1592   * @gpuvm: The &drm_gpuvm the @obj is mapped in.
1593   * @obj: The &drm_gem_object being mapped in the @gpuvm.
1594   *
1595   * Find the &drm_gpuvm_bo representing the combination of the given
1596   * &drm_gpuvm and &drm_gem_object. If found, increases the reference
1597   * count of the &drm_gpuvm_bo accordingly. If not found, allocates a new
1598   * &drm_gpuvm_bo.
1599   *
1600   * A new &drm_gpuvm_bo is added to the GEMs gpuva list.
1601   *
1602   * Returns: a pointer to the &drm_gpuvm_bo on success, an ERR_PTR on failure
1603   */
1604  struct drm_gpuvm_bo *
drm_gpuvm_bo_obtain(struct drm_gpuvm * gpuvm,struct drm_gem_object * obj)1605  drm_gpuvm_bo_obtain(struct drm_gpuvm *gpuvm,
1606  		    struct drm_gem_object *obj)
1607  {
1608  	struct drm_gpuvm_bo *vm_bo;
1609  
1610  	vm_bo = drm_gpuvm_bo_find(gpuvm, obj);
1611  	if (vm_bo)
1612  		return vm_bo;
1613  
1614  	vm_bo = drm_gpuvm_bo_create(gpuvm, obj);
1615  	if (!vm_bo)
1616  		return ERR_PTR(-ENOMEM);
1617  
1618  	drm_gem_gpuva_assert_lock_held(obj);
1619  	list_add_tail(&vm_bo->list.entry.gem, &obj->gpuva.list);
1620  
1621  	return vm_bo;
1622  }
1623  EXPORT_SYMBOL_GPL(drm_gpuvm_bo_obtain);
1624  
1625  /**
1626   * drm_gpuvm_bo_obtain_prealloc() - obtains and instance of the &drm_gpuvm_bo
1627   * for the given &drm_gpuvm and &drm_gem_object
1628   * @__vm_bo: A pre-allocated struct drm_gpuvm_bo.
1629   *
1630   * Find the &drm_gpuvm_bo representing the combination of the given
1631   * &drm_gpuvm and &drm_gem_object. If found, increases the reference
1632   * count of the found &drm_gpuvm_bo accordingly, while the @__vm_bo reference
1633   * count is decreased. If not found @__vm_bo is returned without further
1634   * increase of the reference count.
1635   *
1636   * A new &drm_gpuvm_bo is added to the GEMs gpuva list.
1637   *
1638   * Returns: a pointer to the found &drm_gpuvm_bo or @__vm_bo if no existing
1639   * &drm_gpuvm_bo was found
1640   */
1641  struct drm_gpuvm_bo *
drm_gpuvm_bo_obtain_prealloc(struct drm_gpuvm_bo * __vm_bo)1642  drm_gpuvm_bo_obtain_prealloc(struct drm_gpuvm_bo *__vm_bo)
1643  {
1644  	struct drm_gpuvm *gpuvm = __vm_bo->vm;
1645  	struct drm_gem_object *obj = __vm_bo->obj;
1646  	struct drm_gpuvm_bo *vm_bo;
1647  
1648  	vm_bo = drm_gpuvm_bo_find(gpuvm, obj);
1649  	if (vm_bo) {
1650  		drm_gpuvm_bo_put(__vm_bo);
1651  		return vm_bo;
1652  	}
1653  
1654  	drm_gem_gpuva_assert_lock_held(obj);
1655  	list_add_tail(&__vm_bo->list.entry.gem, &obj->gpuva.list);
1656  
1657  	return __vm_bo;
1658  }
1659  EXPORT_SYMBOL_GPL(drm_gpuvm_bo_obtain_prealloc);
1660  
1661  /**
1662   * drm_gpuvm_bo_extobj_add() - adds the &drm_gpuvm_bo to its &drm_gpuvm's
1663   * extobj list
1664   * @vm_bo: The &drm_gpuvm_bo to add to its &drm_gpuvm's the extobj list.
1665   *
1666   * Adds the given @vm_bo to its &drm_gpuvm's extobj list if not on the list
1667   * already and if the corresponding &drm_gem_object is an external object,
1668   * actually.
1669   */
1670  void
drm_gpuvm_bo_extobj_add(struct drm_gpuvm_bo * vm_bo)1671  drm_gpuvm_bo_extobj_add(struct drm_gpuvm_bo *vm_bo)
1672  {
1673  	struct drm_gpuvm *gpuvm = vm_bo->vm;
1674  	bool lock = !drm_gpuvm_resv_protected(gpuvm);
1675  
1676  	if (!lock)
1677  		drm_gpuvm_resv_assert_held(gpuvm);
1678  
1679  	if (drm_gpuvm_is_extobj(gpuvm, vm_bo->obj))
1680  		drm_gpuvm_bo_list_add(vm_bo, extobj, lock);
1681  }
1682  EXPORT_SYMBOL_GPL(drm_gpuvm_bo_extobj_add);
1683  
1684  /**
1685   * drm_gpuvm_bo_evict() - add / remove a &drm_gpuvm_bo to / from the &drm_gpuvms
1686   * evicted list
1687   * @vm_bo: the &drm_gpuvm_bo to add or remove
1688   * @evict: indicates whether the object is evicted
1689   *
1690   * Adds a &drm_gpuvm_bo to or removes it from the &drm_gpuvms evicted list.
1691   */
1692  void
drm_gpuvm_bo_evict(struct drm_gpuvm_bo * vm_bo,bool evict)1693  drm_gpuvm_bo_evict(struct drm_gpuvm_bo *vm_bo, bool evict)
1694  {
1695  	struct drm_gpuvm *gpuvm = vm_bo->vm;
1696  	struct drm_gem_object *obj = vm_bo->obj;
1697  	bool lock = !drm_gpuvm_resv_protected(gpuvm);
1698  
1699  	dma_resv_assert_held(obj->resv);
1700  	vm_bo->evicted = evict;
1701  
1702  	/* Can't add external objects to the evicted list directly if not using
1703  	 * internal spinlocks, since in this case the evicted list is protected
1704  	 * with the VM's common dma-resv lock.
1705  	 */
1706  	if (drm_gpuvm_is_extobj(gpuvm, obj) && !lock)
1707  		return;
1708  
1709  	if (evict)
1710  		drm_gpuvm_bo_list_add(vm_bo, evict, lock);
1711  	else
1712  		drm_gpuvm_bo_list_del_init(vm_bo, evict, lock);
1713  }
1714  EXPORT_SYMBOL_GPL(drm_gpuvm_bo_evict);
1715  
1716  static int
__drm_gpuva_insert(struct drm_gpuvm * gpuvm,struct drm_gpuva * va)1717  __drm_gpuva_insert(struct drm_gpuvm *gpuvm,
1718  		   struct drm_gpuva *va)
1719  {
1720  	struct rb_node *node;
1721  	struct list_head *head;
1722  
1723  	if (drm_gpuva_it_iter_first(&gpuvm->rb.tree,
1724  				    GPUVA_START(va),
1725  				    GPUVA_LAST(va)))
1726  		return -EEXIST;
1727  
1728  	va->vm = gpuvm;
1729  
1730  	drm_gpuva_it_insert(va, &gpuvm->rb.tree);
1731  
1732  	node = rb_prev(&va->rb.node);
1733  	if (node)
1734  		head = &(to_drm_gpuva(node))->rb.entry;
1735  	else
1736  		head = &gpuvm->rb.list;
1737  
1738  	list_add(&va->rb.entry, head);
1739  
1740  	return 0;
1741  }
1742  
1743  /**
1744   * drm_gpuva_insert() - insert a &drm_gpuva
1745   * @gpuvm: the &drm_gpuvm to insert the &drm_gpuva in
1746   * @va: the &drm_gpuva to insert
1747   *
1748   * Insert a &drm_gpuva with a given address and range into a
1749   * &drm_gpuvm.
1750   *
1751   * It is safe to use this function using the safe versions of iterating the GPU
1752   * VA space, such as drm_gpuvm_for_each_va_safe() and
1753   * drm_gpuvm_for_each_va_range_safe().
1754   *
1755   * Returns: 0 on success, negative error code on failure.
1756   */
1757  int
drm_gpuva_insert(struct drm_gpuvm * gpuvm,struct drm_gpuva * va)1758  drm_gpuva_insert(struct drm_gpuvm *gpuvm,
1759  		 struct drm_gpuva *va)
1760  {
1761  	u64 addr = va->va.addr;
1762  	u64 range = va->va.range;
1763  	int ret;
1764  
1765  	if (unlikely(!drm_gpuvm_range_valid(gpuvm, addr, range)))
1766  		return -EINVAL;
1767  
1768  	ret = __drm_gpuva_insert(gpuvm, va);
1769  	if (likely(!ret))
1770  		/* Take a reference of the GPUVM for the successfully inserted
1771  		 * drm_gpuva. We can't take the reference in
1772  		 * __drm_gpuva_insert() itself, since we don't want to increse
1773  		 * the reference count for the GPUVM's kernel_alloc_node.
1774  		 */
1775  		drm_gpuvm_get(gpuvm);
1776  
1777  	return ret;
1778  }
1779  EXPORT_SYMBOL_GPL(drm_gpuva_insert);
1780  
1781  static void
__drm_gpuva_remove(struct drm_gpuva * va)1782  __drm_gpuva_remove(struct drm_gpuva *va)
1783  {
1784  	drm_gpuva_it_remove(va, &va->vm->rb.tree);
1785  	list_del_init(&va->rb.entry);
1786  }
1787  
1788  /**
1789   * drm_gpuva_remove() - remove a &drm_gpuva
1790   * @va: the &drm_gpuva to remove
1791   *
1792   * This removes the given &va from the underlaying tree.
1793   *
1794   * It is safe to use this function using the safe versions of iterating the GPU
1795   * VA space, such as drm_gpuvm_for_each_va_safe() and
1796   * drm_gpuvm_for_each_va_range_safe().
1797   */
1798  void
drm_gpuva_remove(struct drm_gpuva * va)1799  drm_gpuva_remove(struct drm_gpuva *va)
1800  {
1801  	struct drm_gpuvm *gpuvm = va->vm;
1802  
1803  	if (unlikely(va == &gpuvm->kernel_alloc_node)) {
1804  		drm_WARN(gpuvm->drm, 1,
1805  			 "Can't destroy kernel reserved node.\n");
1806  		return;
1807  	}
1808  
1809  	__drm_gpuva_remove(va);
1810  	drm_gpuvm_put(va->vm);
1811  }
1812  EXPORT_SYMBOL_GPL(drm_gpuva_remove);
1813  
1814  /**
1815   * drm_gpuva_link() - link a &drm_gpuva
1816   * @va: the &drm_gpuva to link
1817   * @vm_bo: the &drm_gpuvm_bo to add the &drm_gpuva to
1818   *
1819   * This adds the given &va to the GPU VA list of the &drm_gpuvm_bo and the
1820   * &drm_gpuvm_bo to the &drm_gem_object it is associated with.
1821   *
1822   * For every &drm_gpuva entry added to the &drm_gpuvm_bo an additional
1823   * reference of the latter is taken.
1824   *
1825   * This function expects the caller to protect the GEM's GPUVA list against
1826   * concurrent access using either the GEMs dma_resv lock or a driver specific
1827   * lock set through drm_gem_gpuva_set_lock().
1828   */
1829  void
drm_gpuva_link(struct drm_gpuva * va,struct drm_gpuvm_bo * vm_bo)1830  drm_gpuva_link(struct drm_gpuva *va, struct drm_gpuvm_bo *vm_bo)
1831  {
1832  	struct drm_gem_object *obj = va->gem.obj;
1833  	struct drm_gpuvm *gpuvm = va->vm;
1834  
1835  	if (unlikely(!obj))
1836  		return;
1837  
1838  	drm_WARN_ON(gpuvm->drm, obj != vm_bo->obj);
1839  
1840  	va->vm_bo = drm_gpuvm_bo_get(vm_bo);
1841  
1842  	drm_gem_gpuva_assert_lock_held(obj);
1843  	list_add_tail(&va->gem.entry, &vm_bo->list.gpuva);
1844  }
1845  EXPORT_SYMBOL_GPL(drm_gpuva_link);
1846  
1847  /**
1848   * drm_gpuva_unlink() - unlink a &drm_gpuva
1849   * @va: the &drm_gpuva to unlink
1850   *
1851   * This removes the given &va from the GPU VA list of the &drm_gem_object it is
1852   * associated with.
1853   *
1854   * This removes the given &va from the GPU VA list of the &drm_gpuvm_bo and
1855   * the &drm_gpuvm_bo from the &drm_gem_object it is associated with in case
1856   * this call unlinks the last &drm_gpuva from the &drm_gpuvm_bo.
1857   *
1858   * For every &drm_gpuva entry removed from the &drm_gpuvm_bo a reference of
1859   * the latter is dropped.
1860   *
1861   * This function expects the caller to protect the GEM's GPUVA list against
1862   * concurrent access using either the GEMs dma_resv lock or a driver specific
1863   * lock set through drm_gem_gpuva_set_lock().
1864   */
1865  void
drm_gpuva_unlink(struct drm_gpuva * va)1866  drm_gpuva_unlink(struct drm_gpuva *va)
1867  {
1868  	struct drm_gem_object *obj = va->gem.obj;
1869  	struct drm_gpuvm_bo *vm_bo = va->vm_bo;
1870  
1871  	if (unlikely(!obj))
1872  		return;
1873  
1874  	drm_gem_gpuva_assert_lock_held(obj);
1875  	list_del_init(&va->gem.entry);
1876  
1877  	va->vm_bo = NULL;
1878  	drm_gpuvm_bo_put(vm_bo);
1879  }
1880  EXPORT_SYMBOL_GPL(drm_gpuva_unlink);
1881  
1882  /**
1883   * drm_gpuva_find_first() - find the first &drm_gpuva in the given range
1884   * @gpuvm: the &drm_gpuvm to search in
1885   * @addr: the &drm_gpuvas address
1886   * @range: the &drm_gpuvas range
1887   *
1888   * Returns: the first &drm_gpuva within the given range
1889   */
1890  struct drm_gpuva *
drm_gpuva_find_first(struct drm_gpuvm * gpuvm,u64 addr,u64 range)1891  drm_gpuva_find_first(struct drm_gpuvm *gpuvm,
1892  		     u64 addr, u64 range)
1893  {
1894  	u64 last = addr + range - 1;
1895  
1896  	return drm_gpuva_it_iter_first(&gpuvm->rb.tree, addr, last);
1897  }
1898  EXPORT_SYMBOL_GPL(drm_gpuva_find_first);
1899  
1900  /**
1901   * drm_gpuva_find() - find a &drm_gpuva
1902   * @gpuvm: the &drm_gpuvm to search in
1903   * @addr: the &drm_gpuvas address
1904   * @range: the &drm_gpuvas range
1905   *
1906   * Returns: the &drm_gpuva at a given &addr and with a given &range
1907   */
1908  struct drm_gpuva *
drm_gpuva_find(struct drm_gpuvm * gpuvm,u64 addr,u64 range)1909  drm_gpuva_find(struct drm_gpuvm *gpuvm,
1910  	       u64 addr, u64 range)
1911  {
1912  	struct drm_gpuva *va;
1913  
1914  	va = drm_gpuva_find_first(gpuvm, addr, range);
1915  	if (!va)
1916  		goto out;
1917  
1918  	if (va->va.addr != addr ||
1919  	    va->va.range != range)
1920  		goto out;
1921  
1922  	return va;
1923  
1924  out:
1925  	return NULL;
1926  }
1927  EXPORT_SYMBOL_GPL(drm_gpuva_find);
1928  
1929  /**
1930   * drm_gpuva_find_prev() - find the &drm_gpuva before the given address
1931   * @gpuvm: the &drm_gpuvm to search in
1932   * @start: the given GPU VA's start address
1933   *
1934   * Find the adjacent &drm_gpuva before the GPU VA with given &start address.
1935   *
1936   * Note that if there is any free space between the GPU VA mappings no mapping
1937   * is returned.
1938   *
1939   * Returns: a pointer to the found &drm_gpuva or NULL if none was found
1940   */
1941  struct drm_gpuva *
drm_gpuva_find_prev(struct drm_gpuvm * gpuvm,u64 start)1942  drm_gpuva_find_prev(struct drm_gpuvm *gpuvm, u64 start)
1943  {
1944  	if (!drm_gpuvm_range_valid(gpuvm, start - 1, 1))
1945  		return NULL;
1946  
1947  	return drm_gpuva_it_iter_first(&gpuvm->rb.tree, start - 1, start);
1948  }
1949  EXPORT_SYMBOL_GPL(drm_gpuva_find_prev);
1950  
1951  /**
1952   * drm_gpuva_find_next() - find the &drm_gpuva after the given address
1953   * @gpuvm: the &drm_gpuvm to search in
1954   * @end: the given GPU VA's end address
1955   *
1956   * Find the adjacent &drm_gpuva after the GPU VA with given &end address.
1957   *
1958   * Note that if there is any free space between the GPU VA mappings no mapping
1959   * is returned.
1960   *
1961   * Returns: a pointer to the found &drm_gpuva or NULL if none was found
1962   */
1963  struct drm_gpuva *
drm_gpuva_find_next(struct drm_gpuvm * gpuvm,u64 end)1964  drm_gpuva_find_next(struct drm_gpuvm *gpuvm, u64 end)
1965  {
1966  	if (!drm_gpuvm_range_valid(gpuvm, end, 1))
1967  		return NULL;
1968  
1969  	return drm_gpuva_it_iter_first(&gpuvm->rb.tree, end, end + 1);
1970  }
1971  EXPORT_SYMBOL_GPL(drm_gpuva_find_next);
1972  
1973  /**
1974   * drm_gpuvm_interval_empty() - indicate whether a given interval of the VA space
1975   * is empty
1976   * @gpuvm: the &drm_gpuvm to check the range for
1977   * @addr: the start address of the range
1978   * @range: the range of the interval
1979   *
1980   * Returns: true if the interval is empty, false otherwise
1981   */
1982  bool
drm_gpuvm_interval_empty(struct drm_gpuvm * gpuvm,u64 addr,u64 range)1983  drm_gpuvm_interval_empty(struct drm_gpuvm *gpuvm, u64 addr, u64 range)
1984  {
1985  	return !drm_gpuva_find_first(gpuvm, addr, range);
1986  }
1987  EXPORT_SYMBOL_GPL(drm_gpuvm_interval_empty);
1988  
1989  /**
1990   * drm_gpuva_map() - helper to insert a &drm_gpuva according to a
1991   * &drm_gpuva_op_map
1992   * @gpuvm: the &drm_gpuvm
1993   * @va: the &drm_gpuva to insert
1994   * @op: the &drm_gpuva_op_map to initialize @va with
1995   *
1996   * Initializes the @va from the @op and inserts it into the given @gpuvm.
1997   */
1998  void
drm_gpuva_map(struct drm_gpuvm * gpuvm,struct drm_gpuva * va,struct drm_gpuva_op_map * op)1999  drm_gpuva_map(struct drm_gpuvm *gpuvm,
2000  	      struct drm_gpuva *va,
2001  	      struct drm_gpuva_op_map *op)
2002  {
2003  	drm_gpuva_init_from_op(va, op);
2004  	drm_gpuva_insert(gpuvm, va);
2005  }
2006  EXPORT_SYMBOL_GPL(drm_gpuva_map);
2007  
2008  /**
2009   * drm_gpuva_remap() - helper to remap a &drm_gpuva according to a
2010   * &drm_gpuva_op_remap
2011   * @prev: the &drm_gpuva to remap when keeping the start of a mapping
2012   * @next: the &drm_gpuva to remap when keeping the end of a mapping
2013   * @op: the &drm_gpuva_op_remap to initialize @prev and @next with
2014   *
2015   * Removes the currently mapped &drm_gpuva and remaps it using @prev and/or
2016   * @next.
2017   */
2018  void
drm_gpuva_remap(struct drm_gpuva * prev,struct drm_gpuva * next,struct drm_gpuva_op_remap * op)2019  drm_gpuva_remap(struct drm_gpuva *prev,
2020  		struct drm_gpuva *next,
2021  		struct drm_gpuva_op_remap *op)
2022  {
2023  	struct drm_gpuva *va = op->unmap->va;
2024  	struct drm_gpuvm *gpuvm = va->vm;
2025  
2026  	drm_gpuva_remove(va);
2027  
2028  	if (op->prev) {
2029  		drm_gpuva_init_from_op(prev, op->prev);
2030  		drm_gpuva_insert(gpuvm, prev);
2031  	}
2032  
2033  	if (op->next) {
2034  		drm_gpuva_init_from_op(next, op->next);
2035  		drm_gpuva_insert(gpuvm, next);
2036  	}
2037  }
2038  EXPORT_SYMBOL_GPL(drm_gpuva_remap);
2039  
2040  /**
2041   * drm_gpuva_unmap() - helper to remove a &drm_gpuva according to a
2042   * &drm_gpuva_op_unmap
2043   * @op: the &drm_gpuva_op_unmap specifying the &drm_gpuva to remove
2044   *
2045   * Removes the &drm_gpuva associated with the &drm_gpuva_op_unmap.
2046   */
2047  void
drm_gpuva_unmap(struct drm_gpuva_op_unmap * op)2048  drm_gpuva_unmap(struct drm_gpuva_op_unmap *op)
2049  {
2050  	drm_gpuva_remove(op->va);
2051  }
2052  EXPORT_SYMBOL_GPL(drm_gpuva_unmap);
2053  
2054  static int
op_map_cb(const struct drm_gpuvm_ops * fn,void * priv,u64 addr,u64 range,struct drm_gem_object * obj,u64 offset)2055  op_map_cb(const struct drm_gpuvm_ops *fn, void *priv,
2056  	  u64 addr, u64 range,
2057  	  struct drm_gem_object *obj, u64 offset)
2058  {
2059  	struct drm_gpuva_op op = {};
2060  
2061  	op.op = DRM_GPUVA_OP_MAP;
2062  	op.map.va.addr = addr;
2063  	op.map.va.range = range;
2064  	op.map.gem.obj = obj;
2065  	op.map.gem.offset = offset;
2066  
2067  	return fn->sm_step_map(&op, priv);
2068  }
2069  
2070  static int
op_remap_cb(const struct drm_gpuvm_ops * fn,void * priv,struct drm_gpuva_op_map * prev,struct drm_gpuva_op_map * next,struct drm_gpuva_op_unmap * unmap)2071  op_remap_cb(const struct drm_gpuvm_ops *fn, void *priv,
2072  	    struct drm_gpuva_op_map *prev,
2073  	    struct drm_gpuva_op_map *next,
2074  	    struct drm_gpuva_op_unmap *unmap)
2075  {
2076  	struct drm_gpuva_op op = {};
2077  	struct drm_gpuva_op_remap *r;
2078  
2079  	op.op = DRM_GPUVA_OP_REMAP;
2080  	r = &op.remap;
2081  	r->prev = prev;
2082  	r->next = next;
2083  	r->unmap = unmap;
2084  
2085  	return fn->sm_step_remap(&op, priv);
2086  }
2087  
2088  static int
op_unmap_cb(const struct drm_gpuvm_ops * fn,void * priv,struct drm_gpuva * va,bool merge)2089  op_unmap_cb(const struct drm_gpuvm_ops *fn, void *priv,
2090  	    struct drm_gpuva *va, bool merge)
2091  {
2092  	struct drm_gpuva_op op = {};
2093  
2094  	op.op = DRM_GPUVA_OP_UNMAP;
2095  	op.unmap.va = va;
2096  	op.unmap.keep = merge;
2097  
2098  	return fn->sm_step_unmap(&op, priv);
2099  }
2100  
2101  static int
__drm_gpuvm_sm_map(struct drm_gpuvm * gpuvm,const struct drm_gpuvm_ops * ops,void * priv,u64 req_addr,u64 req_range,struct drm_gem_object * req_obj,u64 req_offset)2102  __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
2103  		   const struct drm_gpuvm_ops *ops, void *priv,
2104  		   u64 req_addr, u64 req_range,
2105  		   struct drm_gem_object *req_obj, u64 req_offset)
2106  {
2107  	struct drm_gpuva *va, *next;
2108  	u64 req_end = req_addr + req_range;
2109  	int ret;
2110  
2111  	if (unlikely(!drm_gpuvm_range_valid(gpuvm, req_addr, req_range)))
2112  		return -EINVAL;
2113  
2114  	drm_gpuvm_for_each_va_range_safe(va, next, gpuvm, req_addr, req_end) {
2115  		struct drm_gem_object *obj = va->gem.obj;
2116  		u64 offset = va->gem.offset;
2117  		u64 addr = va->va.addr;
2118  		u64 range = va->va.range;
2119  		u64 end = addr + range;
2120  		bool merge = !!va->gem.obj;
2121  
2122  		if (addr == req_addr) {
2123  			merge &= obj == req_obj &&
2124  				 offset == req_offset;
2125  
2126  			if (end == req_end) {
2127  				ret = op_unmap_cb(ops, priv, va, merge);
2128  				if (ret)
2129  					return ret;
2130  				break;
2131  			}
2132  
2133  			if (end < req_end) {
2134  				ret = op_unmap_cb(ops, priv, va, merge);
2135  				if (ret)
2136  					return ret;
2137  				continue;
2138  			}
2139  
2140  			if (end > req_end) {
2141  				struct drm_gpuva_op_map n = {
2142  					.va.addr = req_end,
2143  					.va.range = range - req_range,
2144  					.gem.obj = obj,
2145  					.gem.offset = offset + req_range,
2146  				};
2147  				struct drm_gpuva_op_unmap u = {
2148  					.va = va,
2149  					.keep = merge,
2150  				};
2151  
2152  				ret = op_remap_cb(ops, priv, NULL, &n, &u);
2153  				if (ret)
2154  					return ret;
2155  				break;
2156  			}
2157  		} else if (addr < req_addr) {
2158  			u64 ls_range = req_addr - addr;
2159  			struct drm_gpuva_op_map p = {
2160  				.va.addr = addr,
2161  				.va.range = ls_range,
2162  				.gem.obj = obj,
2163  				.gem.offset = offset,
2164  			};
2165  			struct drm_gpuva_op_unmap u = { .va = va };
2166  
2167  			merge &= obj == req_obj &&
2168  				 offset + ls_range == req_offset;
2169  			u.keep = merge;
2170  
2171  			if (end == req_end) {
2172  				ret = op_remap_cb(ops, priv, &p, NULL, &u);
2173  				if (ret)
2174  					return ret;
2175  				break;
2176  			}
2177  
2178  			if (end < req_end) {
2179  				ret = op_remap_cb(ops, priv, &p, NULL, &u);
2180  				if (ret)
2181  					return ret;
2182  				continue;
2183  			}
2184  
2185  			if (end > req_end) {
2186  				struct drm_gpuva_op_map n = {
2187  					.va.addr = req_end,
2188  					.va.range = end - req_end,
2189  					.gem.obj = obj,
2190  					.gem.offset = offset + ls_range +
2191  						      req_range,
2192  				};
2193  
2194  				ret = op_remap_cb(ops, priv, &p, &n, &u);
2195  				if (ret)
2196  					return ret;
2197  				break;
2198  			}
2199  		} else if (addr > req_addr) {
2200  			merge &= obj == req_obj &&
2201  				 offset == req_offset +
2202  					   (addr - req_addr);
2203  
2204  			if (end == req_end) {
2205  				ret = op_unmap_cb(ops, priv, va, merge);
2206  				if (ret)
2207  					return ret;
2208  				break;
2209  			}
2210  
2211  			if (end < req_end) {
2212  				ret = op_unmap_cb(ops, priv, va, merge);
2213  				if (ret)
2214  					return ret;
2215  				continue;
2216  			}
2217  
2218  			if (end > req_end) {
2219  				struct drm_gpuva_op_map n = {
2220  					.va.addr = req_end,
2221  					.va.range = end - req_end,
2222  					.gem.obj = obj,
2223  					.gem.offset = offset + req_end - addr,
2224  				};
2225  				struct drm_gpuva_op_unmap u = {
2226  					.va = va,
2227  					.keep = merge,
2228  				};
2229  
2230  				ret = op_remap_cb(ops, priv, NULL, &n, &u);
2231  				if (ret)
2232  					return ret;
2233  				break;
2234  			}
2235  		}
2236  	}
2237  
2238  	return op_map_cb(ops, priv,
2239  			 req_addr, req_range,
2240  			 req_obj, req_offset);
2241  }
2242  
2243  static int
__drm_gpuvm_sm_unmap(struct drm_gpuvm * gpuvm,const struct drm_gpuvm_ops * ops,void * priv,u64 req_addr,u64 req_range)2244  __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm,
2245  		     const struct drm_gpuvm_ops *ops, void *priv,
2246  		     u64 req_addr, u64 req_range)
2247  {
2248  	struct drm_gpuva *va, *next;
2249  	u64 req_end = req_addr + req_range;
2250  	int ret;
2251  
2252  	if (unlikely(!drm_gpuvm_range_valid(gpuvm, req_addr, req_range)))
2253  		return -EINVAL;
2254  
2255  	drm_gpuvm_for_each_va_range_safe(va, next, gpuvm, req_addr, req_end) {
2256  		struct drm_gpuva_op_map prev = {}, next = {};
2257  		bool prev_split = false, next_split = false;
2258  		struct drm_gem_object *obj = va->gem.obj;
2259  		u64 offset = va->gem.offset;
2260  		u64 addr = va->va.addr;
2261  		u64 range = va->va.range;
2262  		u64 end = addr + range;
2263  
2264  		if (addr < req_addr) {
2265  			prev.va.addr = addr;
2266  			prev.va.range = req_addr - addr;
2267  			prev.gem.obj = obj;
2268  			prev.gem.offset = offset;
2269  
2270  			prev_split = true;
2271  		}
2272  
2273  		if (end > req_end) {
2274  			next.va.addr = req_end;
2275  			next.va.range = end - req_end;
2276  			next.gem.obj = obj;
2277  			next.gem.offset = offset + (req_end - addr);
2278  
2279  			next_split = true;
2280  		}
2281  
2282  		if (prev_split || next_split) {
2283  			struct drm_gpuva_op_unmap unmap = { .va = va };
2284  
2285  			ret = op_remap_cb(ops, priv,
2286  					  prev_split ? &prev : NULL,
2287  					  next_split ? &next : NULL,
2288  					  &unmap);
2289  			if (ret)
2290  				return ret;
2291  		} else {
2292  			ret = op_unmap_cb(ops, priv, va, false);
2293  			if (ret)
2294  				return ret;
2295  		}
2296  	}
2297  
2298  	return 0;
2299  }
2300  
2301  /**
2302   * drm_gpuvm_sm_map() - creates the &drm_gpuva_op split/merge steps
2303   * @gpuvm: the &drm_gpuvm representing the GPU VA space
2304   * @req_addr: the start address of the new mapping
2305   * @req_range: the range of the new mapping
2306   * @req_obj: the &drm_gem_object to map
2307   * @req_offset: the offset within the &drm_gem_object
2308   * @priv: pointer to a driver private data structure
2309   *
2310   * This function iterates the given range of the GPU VA space. It utilizes the
2311   * &drm_gpuvm_ops to call back into the driver providing the split and merge
2312   * steps.
2313   *
2314   * Drivers may use these callbacks to update the GPU VA space right away within
2315   * the callback. In case the driver decides to copy and store the operations for
2316   * later processing neither this function nor &drm_gpuvm_sm_unmap is allowed to
2317   * be called before the &drm_gpuvm's view of the GPU VA space was
2318   * updated with the previous set of operations. To update the
2319   * &drm_gpuvm's view of the GPU VA space drm_gpuva_insert(),
2320   * drm_gpuva_destroy_locked() and/or drm_gpuva_destroy_unlocked() should be
2321   * used.
2322   *
2323   * A sequence of callbacks can contain map, unmap and remap operations, but
2324   * the sequence of callbacks might also be empty if no operation is required,
2325   * e.g. if the requested mapping already exists in the exact same way.
2326   *
2327   * There can be an arbitrary amount of unmap operations, a maximum of two remap
2328   * operations and a single map operation. The latter one represents the original
2329   * map operation requested by the caller.
2330   *
2331   * Returns: 0 on success or a negative error code
2332   */
2333  int
drm_gpuvm_sm_map(struct drm_gpuvm * gpuvm,void * priv,u64 req_addr,u64 req_range,struct drm_gem_object * req_obj,u64 req_offset)2334  drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
2335  		 u64 req_addr, u64 req_range,
2336  		 struct drm_gem_object *req_obj, u64 req_offset)
2337  {
2338  	const struct drm_gpuvm_ops *ops = gpuvm->ops;
2339  
2340  	if (unlikely(!(ops && ops->sm_step_map &&
2341  		       ops->sm_step_remap &&
2342  		       ops->sm_step_unmap)))
2343  		return -EINVAL;
2344  
2345  	return __drm_gpuvm_sm_map(gpuvm, ops, priv,
2346  				  req_addr, req_range,
2347  				  req_obj, req_offset);
2348  }
2349  EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map);
2350  
2351  /**
2352   * drm_gpuvm_sm_unmap() - creates the &drm_gpuva_ops to split on unmap
2353   * @gpuvm: the &drm_gpuvm representing the GPU VA space
2354   * @priv: pointer to a driver private data structure
2355   * @req_addr: the start address of the range to unmap
2356   * @req_range: the range of the mappings to unmap
2357   *
2358   * This function iterates the given range of the GPU VA space. It utilizes the
2359   * &drm_gpuvm_ops to call back into the driver providing the operations to
2360   * unmap and, if required, split existent mappings.
2361   *
2362   * Drivers may use these callbacks to update the GPU VA space right away within
2363   * the callback. In case the driver decides to copy and store the operations for
2364   * later processing neither this function nor &drm_gpuvm_sm_map is allowed to be
2365   * called before the &drm_gpuvm's view of the GPU VA space was updated
2366   * with the previous set of operations. To update the &drm_gpuvm's view
2367   * of the GPU VA space drm_gpuva_insert(), drm_gpuva_destroy_locked() and/or
2368   * drm_gpuva_destroy_unlocked() should be used.
2369   *
2370   * A sequence of callbacks can contain unmap and remap operations, depending on
2371   * whether there are actual overlapping mappings to split.
2372   *
2373   * There can be an arbitrary amount of unmap operations and a maximum of two
2374   * remap operations.
2375   *
2376   * Returns: 0 on success or a negative error code
2377   */
2378  int
drm_gpuvm_sm_unmap(struct drm_gpuvm * gpuvm,void * priv,u64 req_addr,u64 req_range)2379  drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, void *priv,
2380  		   u64 req_addr, u64 req_range)
2381  {
2382  	const struct drm_gpuvm_ops *ops = gpuvm->ops;
2383  
2384  	if (unlikely(!(ops && ops->sm_step_remap &&
2385  		       ops->sm_step_unmap)))
2386  		return -EINVAL;
2387  
2388  	return __drm_gpuvm_sm_unmap(gpuvm, ops, priv,
2389  				    req_addr, req_range);
2390  }
2391  EXPORT_SYMBOL_GPL(drm_gpuvm_sm_unmap);
2392  
2393  static struct drm_gpuva_op *
gpuva_op_alloc(struct drm_gpuvm * gpuvm)2394  gpuva_op_alloc(struct drm_gpuvm *gpuvm)
2395  {
2396  	const struct drm_gpuvm_ops *fn = gpuvm->ops;
2397  	struct drm_gpuva_op *op;
2398  
2399  	if (fn && fn->op_alloc)
2400  		op = fn->op_alloc();
2401  	else
2402  		op = kzalloc(sizeof(*op), GFP_KERNEL);
2403  
2404  	if (unlikely(!op))
2405  		return NULL;
2406  
2407  	return op;
2408  }
2409  
2410  static void
gpuva_op_free(struct drm_gpuvm * gpuvm,struct drm_gpuva_op * op)2411  gpuva_op_free(struct drm_gpuvm *gpuvm,
2412  	      struct drm_gpuva_op *op)
2413  {
2414  	const struct drm_gpuvm_ops *fn = gpuvm->ops;
2415  
2416  	if (fn && fn->op_free)
2417  		fn->op_free(op);
2418  	else
2419  		kfree(op);
2420  }
2421  
2422  static int
drm_gpuva_sm_step(struct drm_gpuva_op * __op,void * priv)2423  drm_gpuva_sm_step(struct drm_gpuva_op *__op,
2424  		  void *priv)
2425  {
2426  	struct {
2427  		struct drm_gpuvm *vm;
2428  		struct drm_gpuva_ops *ops;
2429  	} *args = priv;
2430  	struct drm_gpuvm *gpuvm = args->vm;
2431  	struct drm_gpuva_ops *ops = args->ops;
2432  	struct drm_gpuva_op *op;
2433  
2434  	op = gpuva_op_alloc(gpuvm);
2435  	if (unlikely(!op))
2436  		goto err;
2437  
2438  	memcpy(op, __op, sizeof(*op));
2439  
2440  	if (op->op == DRM_GPUVA_OP_REMAP) {
2441  		struct drm_gpuva_op_remap *__r = &__op->remap;
2442  		struct drm_gpuva_op_remap *r = &op->remap;
2443  
2444  		r->unmap = kmemdup(__r->unmap, sizeof(*r->unmap),
2445  				   GFP_KERNEL);
2446  		if (unlikely(!r->unmap))
2447  			goto err_free_op;
2448  
2449  		if (__r->prev) {
2450  			r->prev = kmemdup(__r->prev, sizeof(*r->prev),
2451  					  GFP_KERNEL);
2452  			if (unlikely(!r->prev))
2453  				goto err_free_unmap;
2454  		}
2455  
2456  		if (__r->next) {
2457  			r->next = kmemdup(__r->next, sizeof(*r->next),
2458  					  GFP_KERNEL);
2459  			if (unlikely(!r->next))
2460  				goto err_free_prev;
2461  		}
2462  	}
2463  
2464  	list_add_tail(&op->entry, &ops->list);
2465  
2466  	return 0;
2467  
2468  err_free_unmap:
2469  	kfree(op->remap.unmap);
2470  err_free_prev:
2471  	kfree(op->remap.prev);
2472  err_free_op:
2473  	gpuva_op_free(gpuvm, op);
2474  err:
2475  	return -ENOMEM;
2476  }
2477  
2478  static const struct drm_gpuvm_ops gpuvm_list_ops = {
2479  	.sm_step_map = drm_gpuva_sm_step,
2480  	.sm_step_remap = drm_gpuva_sm_step,
2481  	.sm_step_unmap = drm_gpuva_sm_step,
2482  };
2483  
2484  /**
2485   * drm_gpuvm_sm_map_ops_create() - creates the &drm_gpuva_ops to split and merge
2486   * @gpuvm: the &drm_gpuvm representing the GPU VA space
2487   * @req_addr: the start address of the new mapping
2488   * @req_range: the range of the new mapping
2489   * @req_obj: the &drm_gem_object to map
2490   * @req_offset: the offset within the &drm_gem_object
2491   *
2492   * This function creates a list of operations to perform splitting and merging
2493   * of existent mapping(s) with the newly requested one.
2494   *
2495   * The list can be iterated with &drm_gpuva_for_each_op and must be processed
2496   * in the given order. It can contain map, unmap and remap operations, but it
2497   * also can be empty if no operation is required, e.g. if the requested mapping
2498   * already exists is the exact same way.
2499   *
2500   * There can be an arbitrary amount of unmap operations, a maximum of two remap
2501   * operations and a single map operation. The latter one represents the original
2502   * map operation requested by the caller.
2503   *
2504   * Note that before calling this function again with another mapping request it
2505   * is necessary to update the &drm_gpuvm's view of the GPU VA space. The
2506   * previously obtained operations must be either processed or abandoned. To
2507   * update the &drm_gpuvm's view of the GPU VA space drm_gpuva_insert(),
2508   * drm_gpuva_destroy_locked() and/or drm_gpuva_destroy_unlocked() should be
2509   * used.
2510   *
2511   * After the caller finished processing the returned &drm_gpuva_ops, they must
2512   * be freed with &drm_gpuva_ops_free.
2513   *
2514   * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure
2515   */
2516  struct drm_gpuva_ops *
drm_gpuvm_sm_map_ops_create(struct drm_gpuvm * gpuvm,u64 req_addr,u64 req_range,struct drm_gem_object * req_obj,u64 req_offset)2517  drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
2518  			    u64 req_addr, u64 req_range,
2519  			    struct drm_gem_object *req_obj, u64 req_offset)
2520  {
2521  	struct drm_gpuva_ops *ops;
2522  	struct {
2523  		struct drm_gpuvm *vm;
2524  		struct drm_gpuva_ops *ops;
2525  	} args;
2526  	int ret;
2527  
2528  	ops = kzalloc(sizeof(*ops), GFP_KERNEL);
2529  	if (unlikely(!ops))
2530  		return ERR_PTR(-ENOMEM);
2531  
2532  	INIT_LIST_HEAD(&ops->list);
2533  
2534  	args.vm = gpuvm;
2535  	args.ops = ops;
2536  
2537  	ret = __drm_gpuvm_sm_map(gpuvm, &gpuvm_list_ops, &args,
2538  				 req_addr, req_range,
2539  				 req_obj, req_offset);
2540  	if (ret)
2541  		goto err_free_ops;
2542  
2543  	return ops;
2544  
2545  err_free_ops:
2546  	drm_gpuva_ops_free(gpuvm, ops);
2547  	return ERR_PTR(ret);
2548  }
2549  EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map_ops_create);
2550  
2551  /**
2552   * drm_gpuvm_sm_unmap_ops_create() - creates the &drm_gpuva_ops to split on
2553   * unmap
2554   * @gpuvm: the &drm_gpuvm representing the GPU VA space
2555   * @req_addr: the start address of the range to unmap
2556   * @req_range: the range of the mappings to unmap
2557   *
2558   * This function creates a list of operations to perform unmapping and, if
2559   * required, splitting of the mappings overlapping the unmap range.
2560   *
2561   * The list can be iterated with &drm_gpuva_for_each_op and must be processed
2562   * in the given order. It can contain unmap and remap operations, depending on
2563   * whether there are actual overlapping mappings to split.
2564   *
2565   * There can be an arbitrary amount of unmap operations and a maximum of two
2566   * remap operations.
2567   *
2568   * Note that before calling this function again with another range to unmap it
2569   * is necessary to update the &drm_gpuvm's view of the GPU VA space. The
2570   * previously obtained operations must be processed or abandoned. To update the
2571   * &drm_gpuvm's view of the GPU VA space drm_gpuva_insert(),
2572   * drm_gpuva_destroy_locked() and/or drm_gpuva_destroy_unlocked() should be
2573   * used.
2574   *
2575   * After the caller finished processing the returned &drm_gpuva_ops, they must
2576   * be freed with &drm_gpuva_ops_free.
2577   *
2578   * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure
2579   */
2580  struct drm_gpuva_ops *
drm_gpuvm_sm_unmap_ops_create(struct drm_gpuvm * gpuvm,u64 req_addr,u64 req_range)2581  drm_gpuvm_sm_unmap_ops_create(struct drm_gpuvm *gpuvm,
2582  			      u64 req_addr, u64 req_range)
2583  {
2584  	struct drm_gpuva_ops *ops;
2585  	struct {
2586  		struct drm_gpuvm *vm;
2587  		struct drm_gpuva_ops *ops;
2588  	} args;
2589  	int ret;
2590  
2591  	ops = kzalloc(sizeof(*ops), GFP_KERNEL);
2592  	if (unlikely(!ops))
2593  		return ERR_PTR(-ENOMEM);
2594  
2595  	INIT_LIST_HEAD(&ops->list);
2596  
2597  	args.vm = gpuvm;
2598  	args.ops = ops;
2599  
2600  	ret = __drm_gpuvm_sm_unmap(gpuvm, &gpuvm_list_ops, &args,
2601  				   req_addr, req_range);
2602  	if (ret)
2603  		goto err_free_ops;
2604  
2605  	return ops;
2606  
2607  err_free_ops:
2608  	drm_gpuva_ops_free(gpuvm, ops);
2609  	return ERR_PTR(ret);
2610  }
2611  EXPORT_SYMBOL_GPL(drm_gpuvm_sm_unmap_ops_create);
2612  
2613  /**
2614   * drm_gpuvm_prefetch_ops_create() - creates the &drm_gpuva_ops to prefetch
2615   * @gpuvm: the &drm_gpuvm representing the GPU VA space
2616   * @addr: the start address of the range to prefetch
2617   * @range: the range of the mappings to prefetch
2618   *
2619   * This function creates a list of operations to perform prefetching.
2620   *
2621   * The list can be iterated with &drm_gpuva_for_each_op and must be processed
2622   * in the given order. It can contain prefetch operations.
2623   *
2624   * There can be an arbitrary amount of prefetch operations.
2625   *
2626   * After the caller finished processing the returned &drm_gpuva_ops, they must
2627   * be freed with &drm_gpuva_ops_free.
2628   *
2629   * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure
2630   */
2631  struct drm_gpuva_ops *
drm_gpuvm_prefetch_ops_create(struct drm_gpuvm * gpuvm,u64 addr,u64 range)2632  drm_gpuvm_prefetch_ops_create(struct drm_gpuvm *gpuvm,
2633  			      u64 addr, u64 range)
2634  {
2635  	struct drm_gpuva_ops *ops;
2636  	struct drm_gpuva_op *op;
2637  	struct drm_gpuva *va;
2638  	u64 end = addr + range;
2639  	int ret;
2640  
2641  	ops = kzalloc(sizeof(*ops), GFP_KERNEL);
2642  	if (!ops)
2643  		return ERR_PTR(-ENOMEM);
2644  
2645  	INIT_LIST_HEAD(&ops->list);
2646  
2647  	drm_gpuvm_for_each_va_range(va, gpuvm, addr, end) {
2648  		op = gpuva_op_alloc(gpuvm);
2649  		if (!op) {
2650  			ret = -ENOMEM;
2651  			goto err_free_ops;
2652  		}
2653  
2654  		op->op = DRM_GPUVA_OP_PREFETCH;
2655  		op->prefetch.va = va;
2656  		list_add_tail(&op->entry, &ops->list);
2657  	}
2658  
2659  	return ops;
2660  
2661  err_free_ops:
2662  	drm_gpuva_ops_free(gpuvm, ops);
2663  	return ERR_PTR(ret);
2664  }
2665  EXPORT_SYMBOL_GPL(drm_gpuvm_prefetch_ops_create);
2666  
2667  /**
2668   * drm_gpuvm_bo_unmap_ops_create() - creates the &drm_gpuva_ops to unmap a GEM
2669   * @vm_bo: the &drm_gpuvm_bo abstraction
2670   *
2671   * This function creates a list of operations to perform unmapping for every
2672   * GPUVA attached to a GEM.
2673   *
2674   * The list can be iterated with &drm_gpuva_for_each_op and consists out of an
2675   * arbitrary amount of unmap operations.
2676   *
2677   * After the caller finished processing the returned &drm_gpuva_ops, they must
2678   * be freed with &drm_gpuva_ops_free.
2679   *
2680   * It is the callers responsibility to protect the GEMs GPUVA list against
2681   * concurrent access using the GEMs dma_resv lock.
2682   *
2683   * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure
2684   */
2685  struct drm_gpuva_ops *
drm_gpuvm_bo_unmap_ops_create(struct drm_gpuvm_bo * vm_bo)2686  drm_gpuvm_bo_unmap_ops_create(struct drm_gpuvm_bo *vm_bo)
2687  {
2688  	struct drm_gpuva_ops *ops;
2689  	struct drm_gpuva_op *op;
2690  	struct drm_gpuva *va;
2691  	int ret;
2692  
2693  	drm_gem_gpuva_assert_lock_held(vm_bo->obj);
2694  
2695  	ops = kzalloc(sizeof(*ops), GFP_KERNEL);
2696  	if (!ops)
2697  		return ERR_PTR(-ENOMEM);
2698  
2699  	INIT_LIST_HEAD(&ops->list);
2700  
2701  	drm_gpuvm_bo_for_each_va(va, vm_bo) {
2702  		op = gpuva_op_alloc(vm_bo->vm);
2703  		if (!op) {
2704  			ret = -ENOMEM;
2705  			goto err_free_ops;
2706  		}
2707  
2708  		op->op = DRM_GPUVA_OP_UNMAP;
2709  		op->unmap.va = va;
2710  		list_add_tail(&op->entry, &ops->list);
2711  	}
2712  
2713  	return ops;
2714  
2715  err_free_ops:
2716  	drm_gpuva_ops_free(vm_bo->vm, ops);
2717  	return ERR_PTR(ret);
2718  }
2719  EXPORT_SYMBOL_GPL(drm_gpuvm_bo_unmap_ops_create);
2720  
2721  /**
2722   * drm_gpuva_ops_free() - free the given &drm_gpuva_ops
2723   * @gpuvm: the &drm_gpuvm the ops were created for
2724   * @ops: the &drm_gpuva_ops to free
2725   *
2726   * Frees the given &drm_gpuva_ops structure including all the ops associated
2727   * with it.
2728   */
2729  void
drm_gpuva_ops_free(struct drm_gpuvm * gpuvm,struct drm_gpuva_ops * ops)2730  drm_gpuva_ops_free(struct drm_gpuvm *gpuvm,
2731  		   struct drm_gpuva_ops *ops)
2732  {
2733  	struct drm_gpuva_op *op, *next;
2734  
2735  	drm_gpuva_for_each_op_safe(op, next, ops) {
2736  		list_del(&op->entry);
2737  
2738  		if (op->op == DRM_GPUVA_OP_REMAP) {
2739  			kfree(op->remap.prev);
2740  			kfree(op->remap.next);
2741  			kfree(op->remap.unmap);
2742  		}
2743  
2744  		gpuva_op_free(gpuvm, op);
2745  	}
2746  
2747  	kfree(ops);
2748  }
2749  EXPORT_SYMBOL_GPL(drm_gpuva_ops_free);
2750  
2751  MODULE_DESCRIPTION("DRM GPUVM");
2752  MODULE_LICENSE("GPL");
2753