1  /**************************************************************************
2   *
3   * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4   * All Rights Reserved.
5   *
6   * Permission is hereby granted, free of charge, to any person obtaining a
7   * copy of this software and associated documentation files (the
8   * "Software"), to deal in the Software without restriction, including
9   * without limitation the rights to use, copy, modify, merge, publish,
10   * distribute, sub license, and/or sell copies of the Software, and to
11   * permit persons to whom the Software is furnished to do so, subject to
12   * the following conditions:
13   *
14   * The above copyright notice and this permission notice (including the
15   * next paragraph) shall be included in all copies or substantial portions
16   * of the Software.
17   *
18   * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19   * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20   * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21   * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22   * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23   * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24   * USE OR OTHER DEALINGS IN THE SOFTWARE.
25   *
26   **************************************************************************/
27  /*
28   * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29   */
30  
31  #ifndef _TTM_BO_API_H_
32  #define _TTM_BO_API_H_
33  
34  #include <drm/drm_gem.h>
35  
36  #include <linux/kref.h>
37  #include <linux/list.h>
38  
39  #include "ttm_device.h"
40  
41  /* Default number of pre-faulted pages in the TTM fault handler */
42  #define TTM_BO_VM_NUM_PREFAULT 16
43  
44  struct iosys_map;
45  
46  struct ttm_global;
47  struct ttm_device;
48  struct ttm_placement;
49  struct ttm_place;
50  struct ttm_resource;
51  struct ttm_resource_manager;
52  struct ttm_tt;
53  
54  /**
55   * enum ttm_bo_type
56   *
57   * @ttm_bo_type_device:	These are 'normal' buffers that can
58   * be mmapped by user space. Each of these bos occupy a slot in the
59   * device address space, that can be used for normal vm operations.
60   *
61   * @ttm_bo_type_kernel: These buffers are like ttm_bo_type_device buffers,
62   * but they cannot be accessed from user-space. For kernel-only use.
63   *
64   * @ttm_bo_type_sg: Buffer made from dmabuf sg table shared with another
65   * driver.
66   */
67  enum ttm_bo_type {
68  	ttm_bo_type_device,
69  	ttm_bo_type_kernel,
70  	ttm_bo_type_sg
71  };
72  
73  /**
74   * struct ttm_buffer_object
75   *
76   * @base: drm_gem_object superclass data.
77   * @bdev: Pointer to the buffer object device structure.
78   * @type: The bo type.
79   * @page_alignment: Page alignment.
80   * @destroy: Destruction function. If NULL, kfree is used.
81   * @kref: Reference count of this buffer object. When this refcount reaches
82   * zero, the object is destroyed or put on the delayed delete list.
83   * @resource: structure describing current placement.
84   * @ttm: TTM structure holding system pages.
85   * @deleted: True if the object is only a zombie and already deleted.
86   * @bulk_move: The bulk move object.
87   * @priority: Priority for LRU, BOs with lower priority are evicted first.
88   * @pin_count: Pin count.
89   *
90   * Base class for TTM buffer object, that deals with data placement and CPU
91   * mappings. GPU mappings are really up to the driver, but for simpler GPUs
92   * the driver can usually use the placement offset @offset directly as the
93   * GPU virtual address. For drivers implementing multiple
94   * GPU memory manager contexts, the driver should manage the address space
95   * in these contexts separately and use these objects to get the correct
96   * placement and caching for these GPU maps. This makes it possible to use
97   * these objects for even quite elaborate memory management schemes.
98   * The destroy member, the API visibility of this object makes it possible
99   * to derive driver specific types.
100   */
101  struct ttm_buffer_object {
102  	struct drm_gem_object base;
103  
104  	/*
105  	 * Members constant at init.
106  	 */
107  	struct ttm_device *bdev;
108  	enum ttm_bo_type type;
109  	uint32_t page_alignment;
110  	void (*destroy) (struct ttm_buffer_object *);
111  
112  	/*
113  	* Members not needing protection.
114  	*/
115  	struct kref kref;
116  
117  	/*
118  	 * Members protected by the bo::resv::reserved lock.
119  	 */
120  	struct ttm_resource *resource;
121  	struct ttm_tt *ttm;
122  	bool deleted;
123  	struct ttm_lru_bulk_move *bulk_move;
124  	unsigned priority;
125  	unsigned pin_count;
126  
127  	/**
128  	 * @delayed_delete: Work item used when we can't delete the BO
129  	 * immediately
130  	 */
131  	struct work_struct delayed_delete;
132  
133  	/**
134  	 * @sg: external source of pages and DMA addresses, protected by the
135  	 * reservation lock.
136  	 */
137  	struct sg_table *sg;
138  };
139  
140  #define TTM_BO_MAP_IOMEM_MASK 0x80
141  
142  /**
143   * struct ttm_bo_kmap_obj
144   *
145   * @virtual: The current kernel virtual address.
146   * @page: The page when kmap'ing a single page.
147   * @bo_kmap_type: Type of bo_kmap.
148   * @bo: The TTM BO.
149   *
150   * Object describing a kernel mapping. Since a TTM bo may be located
151   * in various memory types with various caching policies, the
152   * mapping can either be an ioremap, a vmap, a kmap or part of a
153   * premapped region.
154   */
155  struct ttm_bo_kmap_obj {
156  	void *virtual;
157  	struct page *page;
158  	enum {
159  		ttm_bo_map_iomap        = 1 | TTM_BO_MAP_IOMEM_MASK,
160  		ttm_bo_map_vmap         = 2,
161  		ttm_bo_map_kmap         = 3,
162  		ttm_bo_map_premapped    = 4 | TTM_BO_MAP_IOMEM_MASK,
163  	} bo_kmap_type;
164  	struct ttm_buffer_object *bo;
165  };
166  
167  /**
168   * struct ttm_operation_ctx
169   *
170   * @interruptible: Sleep interruptible if sleeping.
171   * @no_wait_gpu: Return immediately if the GPU is busy.
172   * @gfp_retry_mayfail: Set the __GFP_RETRY_MAYFAIL when allocation pages.
173   * @allow_res_evict: Allow eviction of reserved BOs. Can be used when multiple
174   * BOs share the same reservation object.
175   * @force_alloc: Don't check the memory account during suspend or CPU page
176   * faults. Should only be used by TTM internally.
177   * @resv: Reservation object to allow reserved evictions with.
178   * @bytes_moved: Statistics on how many bytes have been moved.
179   *
180   * Context for TTM operations like changing buffer placement or general memory
181   * allocation.
182   */
183  struct ttm_operation_ctx {
184  	bool interruptible;
185  	bool no_wait_gpu;
186  	bool gfp_retry_mayfail;
187  	bool allow_res_evict;
188  	bool force_alloc;
189  	struct dma_resv *resv;
190  	uint64_t bytes_moved;
191  };
192  
193  struct ttm_lru_walk;
194  
195  /** struct ttm_lru_walk_ops - Operations for a LRU walk. */
196  struct ttm_lru_walk_ops {
197  	/**
198  	 * process_bo - Process this bo.
199  	 * @walk: struct ttm_lru_walk describing the walk.
200  	 * @bo: A locked and referenced buffer object.
201  	 *
202  	 * Return: Negative error code on error, User-defined positive value
203  	 * (typically, but not always, size of the processed bo) on success.
204  	 * On success, the returned values are summed by the walk and the
205  	 * walk exits when its target is met.
206  	 * 0 also indicates success, -EBUSY means this bo was skipped.
207  	 */
208  	s64 (*process_bo)(struct ttm_lru_walk *walk, struct ttm_buffer_object *bo);
209  };
210  
211  /**
212   * struct ttm_lru_walk - Structure describing a LRU walk.
213   */
214  struct ttm_lru_walk {
215  	/** @ops: Pointer to the ops structure. */
216  	const struct ttm_lru_walk_ops *ops;
217  	/** @ctx: Pointer to the struct ttm_operation_ctx. */
218  	struct ttm_operation_ctx *ctx;
219  	/** @ticket: The struct ww_acquire_ctx if any. */
220  	struct ww_acquire_ctx *ticket;
221  	/** @trylock_only: Only use trylock for locking. */
222  	bool trylock_only;
223  };
224  
225  s64 ttm_lru_walk_for_evict(struct ttm_lru_walk *walk, struct ttm_device *bdev,
226  			   struct ttm_resource_manager *man, s64 target);
227  
228  /**
229   * ttm_bo_get - reference a struct ttm_buffer_object
230   *
231   * @bo: The buffer object.
232   */
ttm_bo_get(struct ttm_buffer_object * bo)233  static inline void ttm_bo_get(struct ttm_buffer_object *bo)
234  {
235  	kref_get(&bo->kref);
236  }
237  
238  /**
239   * ttm_bo_get_unless_zero - reference a struct ttm_buffer_object unless
240   * its refcount has already reached zero.
241   * @bo: The buffer object.
242   *
243   * Used to reference a TTM buffer object in lookups where the object is removed
244   * from the lookup structure during the destructor and for RCU lookups.
245   *
246   * Returns: @bo if the referencing was successful, NULL otherwise.
247   */
248  static inline __must_check struct ttm_buffer_object *
ttm_bo_get_unless_zero(struct ttm_buffer_object * bo)249  ttm_bo_get_unless_zero(struct ttm_buffer_object *bo)
250  {
251  	if (!kref_get_unless_zero(&bo->kref))
252  		return NULL;
253  	return bo;
254  }
255  
256  /**
257   * ttm_bo_reserve:
258   *
259   * @bo: A pointer to a struct ttm_buffer_object.
260   * @interruptible: Sleep interruptible if waiting.
261   * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
262   * @ticket: ticket used to acquire the ww_mutex.
263   *
264   * Locks a buffer object for validation. (Or prevents other processes from
265   * locking it for validation), while taking a number of measures to prevent
266   * deadlocks.
267   *
268   * Returns:
269   * -EDEADLK: The reservation may cause a deadlock.
270   * Release all buffer reservations, wait for @bo to become unreserved and
271   * try again.
272   * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
273   * a signal. Release all buffer reservations and return to user-space.
274   * -EBUSY: The function needed to sleep, but @no_wait was true
275   * -EALREADY: Bo already reserved using @ticket. This error code will only
276   * be returned if @use_ticket is set to true.
277   */
ttm_bo_reserve(struct ttm_buffer_object * bo,bool interruptible,bool no_wait,struct ww_acquire_ctx * ticket)278  static inline int ttm_bo_reserve(struct ttm_buffer_object *bo,
279  				 bool interruptible, bool no_wait,
280  				 struct ww_acquire_ctx *ticket)
281  {
282  	int ret = 0;
283  
284  	if (no_wait) {
285  		bool success;
286  
287  		if (WARN_ON(ticket))
288  			return -EBUSY;
289  
290  		success = dma_resv_trylock(bo->base.resv);
291  		return success ? 0 : -EBUSY;
292  	}
293  
294  	if (interruptible)
295  		ret = dma_resv_lock_interruptible(bo->base.resv, ticket);
296  	else
297  		ret = dma_resv_lock(bo->base.resv, ticket);
298  	if (ret == -EINTR)
299  		return -ERESTARTSYS;
300  	return ret;
301  }
302  
303  /**
304   * ttm_bo_reserve_slowpath:
305   * @bo: A pointer to a struct ttm_buffer_object.
306   * @interruptible: Sleep interruptible if waiting.
307   * @ticket: Ticket used to acquire the ww_mutex.
308   *
309   * This is called after ttm_bo_reserve returns -EAGAIN and we backed off
310   * from all our other reservations. Because there are no other reservations
311   * held by us, this function cannot deadlock any more.
312   */
ttm_bo_reserve_slowpath(struct ttm_buffer_object * bo,bool interruptible,struct ww_acquire_ctx * ticket)313  static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
314  					  bool interruptible,
315  					  struct ww_acquire_ctx *ticket)
316  {
317  	if (interruptible) {
318  		int ret = dma_resv_lock_slow_interruptible(bo->base.resv,
319  							   ticket);
320  		if (ret == -EINTR)
321  			ret = -ERESTARTSYS;
322  		return ret;
323  	}
324  	dma_resv_lock_slow(bo->base.resv, ticket);
325  	return 0;
326  }
327  
328  void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo);
329  
330  static inline void
ttm_bo_move_to_lru_tail_unlocked(struct ttm_buffer_object * bo)331  ttm_bo_move_to_lru_tail_unlocked(struct ttm_buffer_object *bo)
332  {
333  	spin_lock(&bo->bdev->lru_lock);
334  	ttm_bo_move_to_lru_tail(bo);
335  	spin_unlock(&bo->bdev->lru_lock);
336  }
337  
ttm_bo_assign_mem(struct ttm_buffer_object * bo,struct ttm_resource * new_mem)338  static inline void ttm_bo_assign_mem(struct ttm_buffer_object *bo,
339  				     struct ttm_resource *new_mem)
340  {
341  	WARN_ON(bo->resource);
342  	bo->resource = new_mem;
343  }
344  
345  /**
346   * ttm_bo_move_null - assign memory for a buffer object.
347   * @bo: The bo to assign the memory to
348   * @new_mem: The memory to be assigned.
349   *
350   * Assign the memory from new_mem to the memory of the buffer object bo.
351   */
ttm_bo_move_null(struct ttm_buffer_object * bo,struct ttm_resource * new_mem)352  static inline void ttm_bo_move_null(struct ttm_buffer_object *bo,
353  				    struct ttm_resource *new_mem)
354  {
355  	ttm_resource_free(bo, &bo->resource);
356  	ttm_bo_assign_mem(bo, new_mem);
357  }
358  
359  /**
360   * ttm_bo_unreserve
361   *
362   * @bo: A pointer to a struct ttm_buffer_object.
363   *
364   * Unreserve a previous reservation of @bo.
365   */
ttm_bo_unreserve(struct ttm_buffer_object * bo)366  static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
367  {
368  	ttm_bo_move_to_lru_tail_unlocked(bo);
369  	dma_resv_unlock(bo->base.resv);
370  }
371  
372  /**
373   * ttm_kmap_obj_virtual
374   *
375   * @map: A struct ttm_bo_kmap_obj returned from ttm_bo_kmap.
376   * @is_iomem: Pointer to an integer that on return indicates 1 if the
377   * virtual map is io memory, 0 if normal memory.
378   *
379   * Returns the virtual address of a buffer object area mapped by ttm_bo_kmap.
380   * If *is_iomem is 1 on return, the virtual address points to an io memory area,
381   * that should strictly be accessed by the iowriteXX() and similar functions.
382   */
ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj * map,bool * is_iomem)383  static inline void *ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj *map,
384  					 bool *is_iomem)
385  {
386  	*is_iomem = !!(map->bo_kmap_type & TTM_BO_MAP_IOMEM_MASK);
387  	return map->virtual;
388  }
389  
390  int ttm_bo_wait_ctx(struct ttm_buffer_object *bo,
391  		    struct ttm_operation_ctx *ctx);
392  int ttm_bo_validate(struct ttm_buffer_object *bo,
393  		    struct ttm_placement *placement,
394  		    struct ttm_operation_ctx *ctx);
395  void ttm_bo_put(struct ttm_buffer_object *bo);
396  void ttm_bo_set_bulk_move(struct ttm_buffer_object *bo,
397  			  struct ttm_lru_bulk_move *bulk);
398  bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
399  			      const struct ttm_place *place);
400  int ttm_bo_init_reserved(struct ttm_device *bdev, struct ttm_buffer_object *bo,
401  			 enum ttm_bo_type type, struct ttm_placement *placement,
402  			 uint32_t alignment, struct ttm_operation_ctx *ctx,
403  			 struct sg_table *sg, struct dma_resv *resv,
404  			 void (*destroy)(struct ttm_buffer_object *));
405  int ttm_bo_init_validate(struct ttm_device *bdev, struct ttm_buffer_object *bo,
406  			 enum ttm_bo_type type, struct ttm_placement *placement,
407  			 uint32_t alignment, bool interruptible,
408  			 struct sg_table *sg, struct dma_resv *resv,
409  			 void (*destroy)(struct ttm_buffer_object *));
410  int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page,
411  		unsigned long num_pages, struct ttm_bo_kmap_obj *map);
412  void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map);
413  int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map);
414  void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct iosys_map *map);
415  int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo);
416  s64 ttm_bo_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
417  		   struct ttm_resource_manager *man, gfp_t gfp_flags,
418  		   s64 target);
419  void ttm_bo_pin(struct ttm_buffer_object *bo);
420  void ttm_bo_unpin(struct ttm_buffer_object *bo);
421  int ttm_bo_evict_first(struct ttm_device *bdev,
422  		       struct ttm_resource_manager *man,
423  		       struct ttm_operation_ctx *ctx);
424  vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
425  			     struct vm_fault *vmf);
426  vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
427  				    pgprot_t prot,
428  				    pgoff_t num_prefault);
429  vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf);
430  void ttm_bo_vm_open(struct vm_area_struct *vma);
431  void ttm_bo_vm_close(struct vm_area_struct *vma);
432  int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
433  		     void *buf, int len, int write);
434  vm_fault_t ttm_bo_vm_dummy_page(struct vm_fault *vmf, pgprot_t prot);
435  
436  int ttm_bo_mem_space(struct ttm_buffer_object *bo,
437  		     struct ttm_placement *placement,
438  		     struct ttm_resource **mem,
439  		     struct ttm_operation_ctx *ctx);
440  
441  void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
442  /*
443   * ttm_bo_util.c
444   */
445  int ttm_mem_io_reserve(struct ttm_device *bdev,
446  		       struct ttm_resource *mem);
447  void ttm_mem_io_free(struct ttm_device *bdev,
448  		     struct ttm_resource *mem);
449  void ttm_move_memcpy(bool clear, u32 num_pages,
450  		     struct ttm_kmap_iter *dst_iter,
451  		     struct ttm_kmap_iter *src_iter);
452  int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
453  		       struct ttm_operation_ctx *ctx,
454  		       struct ttm_resource *new_mem);
455  int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
456  			      struct dma_fence *fence, bool evict,
457  			      bool pipeline,
458  			      struct ttm_resource *new_mem);
459  void ttm_bo_move_sync_cleanup(struct ttm_buffer_object *bo,
460  			      struct ttm_resource *new_mem);
461  int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo);
462  pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res,
463  		     pgprot_t tmp);
464  void ttm_bo_tt_destroy(struct ttm_buffer_object *bo);
465  
466  #endif
467