1  /*
2   * Copyright © 2012 Red Hat
3   *
4   * Permission is hereby granted, free of charge, to any person obtaining a
5   * copy of this software and associated documentation files (the "Software"),
6   * to deal in the Software without restriction, including without limitation
7   * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8   * and/or sell copies of the Software, and to permit persons to whom the
9   * Software is furnished to do so, subject to the following conditions:
10   *
11   * The above copyright notice and this permission notice (including the next
12   * paragraph) shall be included in all copies or substantial portions of the
13   * Software.
14   *
15   * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16   * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17   * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18   * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19   * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20   * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21   * IN THE SOFTWARE.
22   *
23   * Authors:
24   *      Dave Airlie <airlied@redhat.com>
25   *      Rob Clark <rob.clark@linaro.org>
26   *
27   */
28  
29  #include <linux/export.h>
30  #include <linux/dma-buf.h>
31  #include <linux/rbtree.h>
32  #include <linux/module.h>
33  
34  #include <drm/drm.h>
35  #include <drm/drm_drv.h>
36  #include <drm/drm_file.h>
37  #include <drm/drm_framebuffer.h>
38  #include <drm/drm_gem.h>
39  #include <drm/drm_prime.h>
40  
41  #include "drm_internal.h"
42  
43  MODULE_IMPORT_NS(DMA_BUF);
44  
45  /**
46   * DOC: overview and lifetime rules
47   *
48   * Similar to GEM global names, PRIME file descriptors are also used to share
49   * buffer objects across processes. They offer additional security: as file
50   * descriptors must be explicitly sent over UNIX domain sockets to be shared
51   * between applications, they can't be guessed like the globally unique GEM
52   * names.
53   *
54   * Drivers that support the PRIME API implement the drm_gem_object_funcs.export
55   * and &drm_driver.gem_prime_import hooks. &dma_buf_ops implementations for
56   * drivers are all individually exported for drivers which need to overwrite
57   * or reimplement some of them.
58   *
59   * Reference Counting for GEM Drivers
60   * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
61   *
62   * On the export the &dma_buf holds a reference to the exported buffer object,
63   * usually a &drm_gem_object. It takes this reference in the PRIME_HANDLE_TO_FD
64   * IOCTL, when it first calls &drm_gem_object_funcs.export
65   * and stores the exporting GEM object in the &dma_buf.priv field. This
66   * reference needs to be released when the final reference to the &dma_buf
67   * itself is dropped and its &dma_buf_ops.release function is called.  For
68   * GEM-based drivers, the &dma_buf should be exported using
69   * drm_gem_dmabuf_export() and then released by drm_gem_dmabuf_release().
70   *
71   * Thus the chain of references always flows in one direction, avoiding loops:
72   * importing GEM object -> dma-buf -> exported GEM bo. A further complication
73   * are the lookup caches for import and export. These are required to guarantee
74   * that any given object will always have only one unique userspace handle. This
75   * is required to allow userspace to detect duplicated imports, since some GEM
76   * drivers do fail command submissions if a given buffer object is listed more
77   * than once. These import and export caches in &drm_prime_file_private only
78   * retain a weak reference, which is cleaned up when the corresponding object is
79   * released.
80   *
81   * Self-importing: If userspace is using PRIME as a replacement for flink then
82   * it will get a fd->handle request for a GEM object that it created.  Drivers
83   * should detect this situation and return back the underlying object from the
84   * dma-buf private. For GEM based drivers this is handled in
85   * drm_gem_prime_import() already.
86   */
87  
88  struct drm_prime_member {
89  	struct dma_buf *dma_buf;
90  	uint32_t handle;
91  
92  	struct rb_node dmabuf_rb;
93  	struct rb_node handle_rb;
94  };
95  
drm_prime_add_buf_handle(struct drm_prime_file_private * prime_fpriv,struct dma_buf * dma_buf,uint32_t handle)96  static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
97  				    struct dma_buf *dma_buf, uint32_t handle)
98  {
99  	struct drm_prime_member *member;
100  	struct rb_node **p, *rb;
101  
102  	member = kmalloc(sizeof(*member), GFP_KERNEL);
103  	if (!member)
104  		return -ENOMEM;
105  
106  	get_dma_buf(dma_buf);
107  	member->dma_buf = dma_buf;
108  	member->handle = handle;
109  
110  	rb = NULL;
111  	p = &prime_fpriv->dmabufs.rb_node;
112  	while (*p) {
113  		struct drm_prime_member *pos;
114  
115  		rb = *p;
116  		pos = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
117  		if (dma_buf > pos->dma_buf)
118  			p = &rb->rb_right;
119  		else
120  			p = &rb->rb_left;
121  	}
122  	rb_link_node(&member->dmabuf_rb, rb, p);
123  	rb_insert_color(&member->dmabuf_rb, &prime_fpriv->dmabufs);
124  
125  	rb = NULL;
126  	p = &prime_fpriv->handles.rb_node;
127  	while (*p) {
128  		struct drm_prime_member *pos;
129  
130  		rb = *p;
131  		pos = rb_entry(rb, struct drm_prime_member, handle_rb);
132  		if (handle > pos->handle)
133  			p = &rb->rb_right;
134  		else
135  			p = &rb->rb_left;
136  	}
137  	rb_link_node(&member->handle_rb, rb, p);
138  	rb_insert_color(&member->handle_rb, &prime_fpriv->handles);
139  
140  	return 0;
141  }
142  
drm_prime_lookup_buf_by_handle(struct drm_prime_file_private * prime_fpriv,uint32_t handle)143  static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv,
144  						      uint32_t handle)
145  {
146  	struct rb_node *rb;
147  
148  	rb = prime_fpriv->handles.rb_node;
149  	while (rb) {
150  		struct drm_prime_member *member;
151  
152  		member = rb_entry(rb, struct drm_prime_member, handle_rb);
153  		if (member->handle == handle)
154  			return member->dma_buf;
155  		else if (member->handle < handle)
156  			rb = rb->rb_right;
157  		else
158  			rb = rb->rb_left;
159  	}
160  
161  	return NULL;
162  }
163  
drm_prime_lookup_buf_handle(struct drm_prime_file_private * prime_fpriv,struct dma_buf * dma_buf,uint32_t * handle)164  static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv,
165  				       struct dma_buf *dma_buf,
166  				       uint32_t *handle)
167  {
168  	struct rb_node *rb;
169  
170  	rb = prime_fpriv->dmabufs.rb_node;
171  	while (rb) {
172  		struct drm_prime_member *member;
173  
174  		member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
175  		if (member->dma_buf == dma_buf) {
176  			*handle = member->handle;
177  			return 0;
178  		} else if (member->dma_buf < dma_buf) {
179  			rb = rb->rb_right;
180  		} else {
181  			rb = rb->rb_left;
182  		}
183  	}
184  
185  	return -ENOENT;
186  }
187  
drm_prime_remove_buf_handle(struct drm_prime_file_private * prime_fpriv,uint32_t handle)188  void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv,
189  				 uint32_t handle)
190  {
191  	struct rb_node *rb;
192  
193  	mutex_lock(&prime_fpriv->lock);
194  
195  	rb = prime_fpriv->handles.rb_node;
196  	while (rb) {
197  		struct drm_prime_member *member;
198  
199  		member = rb_entry(rb, struct drm_prime_member, handle_rb);
200  		if (member->handle == handle) {
201  			rb_erase(&member->handle_rb, &prime_fpriv->handles);
202  			rb_erase(&member->dmabuf_rb, &prime_fpriv->dmabufs);
203  
204  			dma_buf_put(member->dma_buf);
205  			kfree(member);
206  			break;
207  		} else if (member->handle < handle) {
208  			rb = rb->rb_right;
209  		} else {
210  			rb = rb->rb_left;
211  		}
212  	}
213  
214  	mutex_unlock(&prime_fpriv->lock);
215  }
216  
drm_prime_init_file_private(struct drm_prime_file_private * prime_fpriv)217  void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
218  {
219  	mutex_init(&prime_fpriv->lock);
220  	prime_fpriv->dmabufs = RB_ROOT;
221  	prime_fpriv->handles = RB_ROOT;
222  }
223  
drm_prime_destroy_file_private(struct drm_prime_file_private * prime_fpriv)224  void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
225  {
226  	/* by now drm_gem_release should've made sure the list is empty */
227  	WARN_ON(!RB_EMPTY_ROOT(&prime_fpriv->dmabufs));
228  }
229  
230  /**
231   * drm_gem_dmabuf_export - &dma_buf export implementation for GEM
232   * @dev: parent device for the exported dmabuf
233   * @exp_info: the export information used by dma_buf_export()
234   *
235   * This wraps dma_buf_export() for use by generic GEM drivers that are using
236   * drm_gem_dmabuf_release(). In addition to calling dma_buf_export(), we take
237   * a reference to the &drm_device and the exported &drm_gem_object (stored in
238   * &dma_buf_export_info.priv) which is released by drm_gem_dmabuf_release().
239   *
240   * Returns the new dmabuf.
241   */
drm_gem_dmabuf_export(struct drm_device * dev,struct dma_buf_export_info * exp_info)242  struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
243  				      struct dma_buf_export_info *exp_info)
244  {
245  	struct drm_gem_object *obj = exp_info->priv;
246  	struct dma_buf *dma_buf;
247  
248  	dma_buf = dma_buf_export(exp_info);
249  	if (IS_ERR(dma_buf))
250  		return dma_buf;
251  
252  	drm_dev_get(dev);
253  	drm_gem_object_get(obj);
254  	dma_buf->file->f_mapping = obj->dev->anon_inode->i_mapping;
255  
256  	return dma_buf;
257  }
258  EXPORT_SYMBOL(drm_gem_dmabuf_export);
259  
260  /**
261   * drm_gem_dmabuf_release - &dma_buf release implementation for GEM
262   * @dma_buf: buffer to be released
263   *
264   * Generic release function for dma_bufs exported as PRIME buffers. GEM drivers
265   * must use this in their &dma_buf_ops structure as the release callback.
266   * drm_gem_dmabuf_release() should be used in conjunction with
267   * drm_gem_dmabuf_export().
268   */
drm_gem_dmabuf_release(struct dma_buf * dma_buf)269  void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
270  {
271  	struct drm_gem_object *obj = dma_buf->priv;
272  	struct drm_device *dev = obj->dev;
273  
274  	/* drop the reference on the export fd holds */
275  	drm_gem_object_put(obj);
276  
277  	drm_dev_put(dev);
278  }
279  EXPORT_SYMBOL(drm_gem_dmabuf_release);
280  
281  /**
282   * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers
283   * @dev: drm_device to import into
284   * @file_priv: drm file-private structure
285   * @prime_fd: fd id of the dma-buf which should be imported
286   * @handle: pointer to storage for the handle of the imported buffer object
287   *
288   * This is the PRIME import function which must be used mandatorily by GEM
289   * drivers to ensure correct lifetime management of the underlying GEM object.
290   * The actual importing of GEM object from the dma-buf is done through the
291   * &drm_driver.gem_prime_import driver callback.
292   *
293   * Returns 0 on success or a negative error code on failure.
294   */
drm_gem_prime_fd_to_handle(struct drm_device * dev,struct drm_file * file_priv,int prime_fd,uint32_t * handle)295  int drm_gem_prime_fd_to_handle(struct drm_device *dev,
296  			       struct drm_file *file_priv, int prime_fd,
297  			       uint32_t *handle)
298  {
299  	struct dma_buf *dma_buf;
300  	struct drm_gem_object *obj;
301  	int ret;
302  
303  	dma_buf = dma_buf_get(prime_fd);
304  	if (IS_ERR(dma_buf))
305  		return PTR_ERR(dma_buf);
306  
307  	mutex_lock(&file_priv->prime.lock);
308  
309  	ret = drm_prime_lookup_buf_handle(&file_priv->prime,
310  			dma_buf, handle);
311  	if (ret == 0)
312  		goto out_put;
313  
314  	/* never seen this one, need to import */
315  	mutex_lock(&dev->object_name_lock);
316  	if (dev->driver->gem_prime_import)
317  		obj = dev->driver->gem_prime_import(dev, dma_buf);
318  	else
319  		obj = drm_gem_prime_import(dev, dma_buf);
320  	if (IS_ERR(obj)) {
321  		ret = PTR_ERR(obj);
322  		goto out_unlock;
323  	}
324  
325  	if (obj->dma_buf) {
326  		WARN_ON(obj->dma_buf != dma_buf);
327  	} else {
328  		obj->dma_buf = dma_buf;
329  		get_dma_buf(dma_buf);
330  	}
331  
332  	/* _handle_create_tail unconditionally unlocks dev->object_name_lock. */
333  	ret = drm_gem_handle_create_tail(file_priv, obj, handle);
334  	drm_gem_object_put(obj);
335  	if (ret)
336  		goto out_put;
337  
338  	ret = drm_prime_add_buf_handle(&file_priv->prime,
339  			dma_buf, *handle);
340  	mutex_unlock(&file_priv->prime.lock);
341  	if (ret)
342  		goto fail;
343  
344  	dma_buf_put(dma_buf);
345  
346  	return 0;
347  
348  fail:
349  	/* hmm, if driver attached, we are relying on the free-object path
350  	 * to detach.. which seems ok..
351  	 */
352  	drm_gem_handle_delete(file_priv, *handle);
353  	dma_buf_put(dma_buf);
354  	return ret;
355  
356  out_unlock:
357  	mutex_unlock(&dev->object_name_lock);
358  out_put:
359  	mutex_unlock(&file_priv->prime.lock);
360  	dma_buf_put(dma_buf);
361  	return ret;
362  }
363  EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
364  
drm_prime_fd_to_handle_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)365  int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
366  				 struct drm_file *file_priv)
367  {
368  	struct drm_prime_handle *args = data;
369  
370  	if (dev->driver->prime_fd_to_handle) {
371  		return dev->driver->prime_fd_to_handle(dev, file_priv, args->fd,
372  						       &args->handle);
373  	}
374  
375  	return drm_gem_prime_fd_to_handle(dev, file_priv, args->fd, &args->handle);
376  }
377  
export_and_register_object(struct drm_device * dev,struct drm_gem_object * obj,uint32_t flags)378  static struct dma_buf *export_and_register_object(struct drm_device *dev,
379  						  struct drm_gem_object *obj,
380  						  uint32_t flags)
381  {
382  	struct dma_buf *dmabuf;
383  
384  	/* prevent races with concurrent gem_close. */
385  	if (obj->handle_count == 0) {
386  		dmabuf = ERR_PTR(-ENOENT);
387  		return dmabuf;
388  	}
389  
390  	if (obj->funcs && obj->funcs->export)
391  		dmabuf = obj->funcs->export(obj, flags);
392  	else
393  		dmabuf = drm_gem_prime_export(obj, flags);
394  	if (IS_ERR(dmabuf)) {
395  		/* normally the created dma-buf takes ownership of the ref,
396  		 * but if that fails then drop the ref
397  		 */
398  		return dmabuf;
399  	}
400  
401  	/*
402  	 * Note that callers do not need to clean up the export cache
403  	 * since the check for obj->handle_count guarantees that someone
404  	 * will clean it up.
405  	 */
406  	obj->dma_buf = dmabuf;
407  	get_dma_buf(obj->dma_buf);
408  
409  	return dmabuf;
410  }
411  
412  /**
413   * drm_gem_prime_handle_to_dmabuf - PRIME export function for GEM drivers
414   * @dev: dev to export the buffer from
415   * @file_priv: drm file-private structure
416   * @handle: buffer handle to export
417   * @flags: flags like DRM_CLOEXEC
418   *
419   * This is the PRIME export function which must be used mandatorily by GEM
420   * drivers to ensure correct lifetime management of the underlying GEM object.
421   * The actual exporting from GEM object to a dma-buf is done through the
422   * &drm_gem_object_funcs.export callback.
423   *
424   * Unlike drm_gem_prime_handle_to_fd(), it returns the struct dma_buf it
425   * has created, without attaching it to any file descriptors.  The difference
426   * between those two is similar to that between anon_inode_getfile() and
427   * anon_inode_getfd(); insertion into descriptor table is something you
428   * can not revert if any cleanup is needed, so the descriptor-returning
429   * variants should only be used when you are past the last failure exit
430   * and the only thing left is passing the new file descriptor to userland.
431   * When all you need is the object itself or when you need to do something
432   * else that might fail, use that one instead.
433   */
drm_gem_prime_handle_to_dmabuf(struct drm_device * dev,struct drm_file * file_priv,uint32_t handle,uint32_t flags)434  struct dma_buf *drm_gem_prime_handle_to_dmabuf(struct drm_device *dev,
435  			       struct drm_file *file_priv, uint32_t handle,
436  			       uint32_t flags)
437  {
438  	struct drm_gem_object *obj;
439  	int ret = 0;
440  	struct dma_buf *dmabuf;
441  
442  	mutex_lock(&file_priv->prime.lock);
443  	obj = drm_gem_object_lookup(file_priv, handle);
444  	if (!obj)  {
445  		dmabuf = ERR_PTR(-ENOENT);
446  		goto out_unlock;
447  	}
448  
449  	dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle);
450  	if (dmabuf) {
451  		get_dma_buf(dmabuf);
452  		goto out;
453  	}
454  
455  	mutex_lock(&dev->object_name_lock);
456  	/* re-export the original imported object */
457  	if (obj->import_attach) {
458  		dmabuf = obj->import_attach->dmabuf;
459  		get_dma_buf(dmabuf);
460  		goto out_have_obj;
461  	}
462  
463  	if (obj->dma_buf) {
464  		get_dma_buf(obj->dma_buf);
465  		dmabuf = obj->dma_buf;
466  		goto out_have_obj;
467  	}
468  
469  	dmabuf = export_and_register_object(dev, obj, flags);
470  	if (IS_ERR(dmabuf)) {
471  		/* normally the created dma-buf takes ownership of the ref,
472  		 * but if that fails then drop the ref
473  		 */
474  		mutex_unlock(&dev->object_name_lock);
475  		goto out;
476  	}
477  
478  out_have_obj:
479  	/*
480  	 * If we've exported this buffer then cheat and add it to the import list
481  	 * so we get the correct handle back. We must do this under the
482  	 * protection of dev->object_name_lock to ensure that a racing gem close
483  	 * ioctl doesn't miss to remove this buffer handle from the cache.
484  	 */
485  	ret = drm_prime_add_buf_handle(&file_priv->prime,
486  				       dmabuf, handle);
487  	mutex_unlock(&dev->object_name_lock);
488  	if (ret) {
489  		dma_buf_put(dmabuf);
490  		dmabuf = ERR_PTR(ret);
491  	}
492  out:
493  	drm_gem_object_put(obj);
494  out_unlock:
495  	mutex_unlock(&file_priv->prime.lock);
496  	return dmabuf;
497  }
498  EXPORT_SYMBOL(drm_gem_prime_handle_to_dmabuf);
499  
500  /**
501   * drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers
502   * @dev: dev to export the buffer from
503   * @file_priv: drm file-private structure
504   * @handle: buffer handle to export
505   * @flags: flags like DRM_CLOEXEC
506   * @prime_fd: pointer to storage for the fd id of the create dma-buf
507   *
508   * This is the PRIME export function which must be used mandatorily by GEM
509   * drivers to ensure correct lifetime management of the underlying GEM object.
510   * The actual exporting from GEM object to a dma-buf is done through the
511   * &drm_gem_object_funcs.export callback.
512   */
drm_gem_prime_handle_to_fd(struct drm_device * dev,struct drm_file * file_priv,uint32_t handle,uint32_t flags,int * prime_fd)513  int drm_gem_prime_handle_to_fd(struct drm_device *dev,
514  			       struct drm_file *file_priv, uint32_t handle,
515  			       uint32_t flags,
516  			       int *prime_fd)
517  {
518  	struct dma_buf *dmabuf;
519  	int fd = get_unused_fd_flags(flags);
520  
521  	if (fd < 0)
522  		return fd;
523  
524  	dmabuf = drm_gem_prime_handle_to_dmabuf(dev, file_priv, handle, flags);
525  	if (IS_ERR(dmabuf)) {
526  		put_unused_fd(fd);
527  		return PTR_ERR(dmabuf);
528  	}
529  
530  	fd_install(fd, dmabuf->file);
531  	*prime_fd = fd;
532  	return 0;
533  }
534  EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
535  
drm_prime_handle_to_fd_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)536  int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
537  				 struct drm_file *file_priv)
538  {
539  	struct drm_prime_handle *args = data;
540  
541  	/* check flags are valid */
542  	if (args->flags & ~(DRM_CLOEXEC | DRM_RDWR))
543  		return -EINVAL;
544  
545  	if (dev->driver->prime_handle_to_fd) {
546  		return dev->driver->prime_handle_to_fd(dev, file_priv,
547  						       args->handle, args->flags,
548  						       &args->fd);
549  	}
550  	return drm_gem_prime_handle_to_fd(dev, file_priv, args->handle,
551  					  args->flags, &args->fd);
552  }
553  
554  /**
555   * DOC: PRIME Helpers
556   *
557   * Drivers can implement &drm_gem_object_funcs.export and
558   * &drm_driver.gem_prime_import in terms of simpler APIs by using the helper
559   * functions drm_gem_prime_export() and drm_gem_prime_import(). These functions
560   * implement dma-buf support in terms of some lower-level helpers, which are
561   * again exported for drivers to use individually:
562   *
563   * Exporting buffers
564   * ~~~~~~~~~~~~~~~~~
565   *
566   * Optional pinning of buffers is handled at dma-buf attach and detach time in
567   * drm_gem_map_attach() and drm_gem_map_detach(). Backing storage itself is
568   * handled by drm_gem_map_dma_buf() and drm_gem_unmap_dma_buf(), which relies on
569   * &drm_gem_object_funcs.get_sg_table. If &drm_gem_object_funcs.get_sg_table is
570   * unimplemented, exports into another device are rejected.
571   *
572   * For kernel-internal access there's drm_gem_dmabuf_vmap() and
573   * drm_gem_dmabuf_vunmap(). Userspace mmap support is provided by
574   * drm_gem_dmabuf_mmap().
575   *
576   * Note that these export helpers can only be used if the underlying backing
577   * storage is fully coherent and either permanently pinned, or it is safe to pin
578   * it indefinitely.
579   *
580   * FIXME: The underlying helper functions are named rather inconsistently.
581   *
582   * Importing buffers
583   * ~~~~~~~~~~~~~~~~~
584   *
585   * Importing dma-bufs using drm_gem_prime_import() relies on
586   * &drm_driver.gem_prime_import_sg_table.
587   *
588   * Note that similarly to the export helpers this permanently pins the
589   * underlying backing storage. Which is ok for scanout, but is not the best
590   * option for sharing lots of buffers for rendering.
591   */
592  
593  /**
594   * drm_gem_map_attach - dma_buf attach implementation for GEM
595   * @dma_buf: buffer to attach device to
596   * @attach: buffer attachment data
597   *
598   * Calls &drm_gem_object_funcs.pin for device specific handling. This can be
599   * used as the &dma_buf_ops.attach callback. Must be used together with
600   * drm_gem_map_detach().
601   *
602   * Returns 0 on success, negative error code on failure.
603   */
drm_gem_map_attach(struct dma_buf * dma_buf,struct dma_buf_attachment * attach)604  int drm_gem_map_attach(struct dma_buf *dma_buf,
605  		       struct dma_buf_attachment *attach)
606  {
607  	struct drm_gem_object *obj = dma_buf->priv;
608  
609  	/*
610  	 * drm_gem_map_dma_buf() requires obj->get_sg_table(), but drivers
611  	 * that implement their own ->map_dma_buf() do not.
612  	 */
613  	if (dma_buf->ops->map_dma_buf == drm_gem_map_dma_buf &&
614  	    !obj->funcs->get_sg_table)
615  		return -ENOSYS;
616  
617  	return drm_gem_pin(obj);
618  }
619  EXPORT_SYMBOL(drm_gem_map_attach);
620  
621  /**
622   * drm_gem_map_detach - dma_buf detach implementation for GEM
623   * @dma_buf: buffer to detach from
624   * @attach: attachment to be detached
625   *
626   * Calls &drm_gem_object_funcs.pin for device specific handling.  Cleans up
627   * &dma_buf_attachment from drm_gem_map_attach(). This can be used as the
628   * &dma_buf_ops.detach callback.
629   */
drm_gem_map_detach(struct dma_buf * dma_buf,struct dma_buf_attachment * attach)630  void drm_gem_map_detach(struct dma_buf *dma_buf,
631  			struct dma_buf_attachment *attach)
632  {
633  	struct drm_gem_object *obj = dma_buf->priv;
634  
635  	drm_gem_unpin(obj);
636  }
637  EXPORT_SYMBOL(drm_gem_map_detach);
638  
639  /**
640   * drm_gem_map_dma_buf - map_dma_buf implementation for GEM
641   * @attach: attachment whose scatterlist is to be returned
642   * @dir: direction of DMA transfer
643   *
644   * Calls &drm_gem_object_funcs.get_sg_table and then maps the scatterlist. This
645   * can be used as the &dma_buf_ops.map_dma_buf callback. Should be used together
646   * with drm_gem_unmap_dma_buf().
647   *
648   * Returns:sg_table containing the scatterlist to be returned; returns ERR_PTR
649   * on error. May return -EINTR if it is interrupted by a signal.
650   */
drm_gem_map_dma_buf(struct dma_buf_attachment * attach,enum dma_data_direction dir)651  struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
652  				     enum dma_data_direction dir)
653  {
654  	struct drm_gem_object *obj = attach->dmabuf->priv;
655  	struct sg_table *sgt;
656  	int ret;
657  
658  	if (WARN_ON(dir == DMA_NONE))
659  		return ERR_PTR(-EINVAL);
660  
661  	if (WARN_ON(!obj->funcs->get_sg_table))
662  		return ERR_PTR(-ENOSYS);
663  
664  	sgt = obj->funcs->get_sg_table(obj);
665  	if (IS_ERR(sgt))
666  		return sgt;
667  
668  	ret = dma_map_sgtable(attach->dev, sgt, dir,
669  			      DMA_ATTR_SKIP_CPU_SYNC);
670  	if (ret) {
671  		sg_free_table(sgt);
672  		kfree(sgt);
673  		sgt = ERR_PTR(ret);
674  	}
675  
676  	return sgt;
677  }
678  EXPORT_SYMBOL(drm_gem_map_dma_buf);
679  
680  /**
681   * drm_gem_unmap_dma_buf - unmap_dma_buf implementation for GEM
682   * @attach: attachment to unmap buffer from
683   * @sgt: scatterlist info of the buffer to unmap
684   * @dir: direction of DMA transfer
685   *
686   * This can be used as the &dma_buf_ops.unmap_dma_buf callback.
687   */
drm_gem_unmap_dma_buf(struct dma_buf_attachment * attach,struct sg_table * sgt,enum dma_data_direction dir)688  void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
689  			   struct sg_table *sgt,
690  			   enum dma_data_direction dir)
691  {
692  	if (!sgt)
693  		return;
694  
695  	dma_unmap_sgtable(attach->dev, sgt, dir, DMA_ATTR_SKIP_CPU_SYNC);
696  	sg_free_table(sgt);
697  	kfree(sgt);
698  }
699  EXPORT_SYMBOL(drm_gem_unmap_dma_buf);
700  
701  /**
702   * drm_gem_dmabuf_vmap - dma_buf vmap implementation for GEM
703   * @dma_buf: buffer to be mapped
704   * @map: the virtual address of the buffer
705   *
706   * Sets up a kernel virtual mapping. This can be used as the &dma_buf_ops.vmap
707   * callback. Calls into &drm_gem_object_funcs.vmap for device specific handling.
708   * The kernel virtual address is returned in map.
709   *
710   * Returns 0 on success or a negative errno code otherwise.
711   */
drm_gem_dmabuf_vmap(struct dma_buf * dma_buf,struct iosys_map * map)712  int drm_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct iosys_map *map)
713  {
714  	struct drm_gem_object *obj = dma_buf->priv;
715  
716  	return drm_gem_vmap(obj, map);
717  }
718  EXPORT_SYMBOL(drm_gem_dmabuf_vmap);
719  
720  /**
721   * drm_gem_dmabuf_vunmap - dma_buf vunmap implementation for GEM
722   * @dma_buf: buffer to be unmapped
723   * @map: the virtual address of the buffer
724   *
725   * Releases a kernel virtual mapping. This can be used as the
726   * &dma_buf_ops.vunmap callback. Calls into &drm_gem_object_funcs.vunmap for device specific handling.
727   */
drm_gem_dmabuf_vunmap(struct dma_buf * dma_buf,struct iosys_map * map)728  void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct iosys_map *map)
729  {
730  	struct drm_gem_object *obj = dma_buf->priv;
731  
732  	drm_gem_vunmap(obj, map);
733  }
734  EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
735  
736  /**
737   * drm_gem_prime_mmap - PRIME mmap function for GEM drivers
738   * @obj: GEM object
739   * @vma: Virtual address range
740   *
741   * This function sets up a userspace mapping for PRIME exported buffers using
742   * the same codepath that is used for regular GEM buffer mapping on the DRM fd.
743   * The fake GEM offset is added to vma->vm_pgoff and &drm_driver->fops->mmap is
744   * called to set up the mapping.
745   */
drm_gem_prime_mmap(struct drm_gem_object * obj,struct vm_area_struct * vma)746  int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
747  {
748  	struct drm_file *priv;
749  	struct file *fil;
750  	int ret;
751  
752  	/* Add the fake offset */
753  	vma->vm_pgoff += drm_vma_node_start(&obj->vma_node);
754  
755  	if (obj->funcs && obj->funcs->mmap) {
756  		vma->vm_ops = obj->funcs->vm_ops;
757  
758  		drm_gem_object_get(obj);
759  		ret = obj->funcs->mmap(obj, vma);
760  		if (ret) {
761  			drm_gem_object_put(obj);
762  			return ret;
763  		}
764  		vma->vm_private_data = obj;
765  		return 0;
766  	}
767  
768  	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
769  	fil = kzalloc(sizeof(*fil), GFP_KERNEL);
770  	if (!priv || !fil) {
771  		ret = -ENOMEM;
772  		goto out;
773  	}
774  
775  	/* Used by drm_gem_mmap() to lookup the GEM object */
776  	priv->minor = obj->dev->primary;
777  	fil->private_data = priv;
778  
779  	ret = drm_vma_node_allow(&obj->vma_node, priv);
780  	if (ret)
781  		goto out;
782  
783  	ret = obj->dev->driver->fops->mmap(fil, vma);
784  
785  	drm_vma_node_revoke(&obj->vma_node, priv);
786  out:
787  	kfree(priv);
788  	kfree(fil);
789  
790  	return ret;
791  }
792  EXPORT_SYMBOL(drm_gem_prime_mmap);
793  
794  /**
795   * drm_gem_dmabuf_mmap - dma_buf mmap implementation for GEM
796   * @dma_buf: buffer to be mapped
797   * @vma: virtual address range
798   *
799   * Provides memory mapping for the buffer. This can be used as the
800   * &dma_buf_ops.mmap callback. It just forwards to drm_gem_prime_mmap().
801   *
802   * Returns 0 on success or a negative error code on failure.
803   */
drm_gem_dmabuf_mmap(struct dma_buf * dma_buf,struct vm_area_struct * vma)804  int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
805  {
806  	struct drm_gem_object *obj = dma_buf->priv;
807  
808  	return drm_gem_prime_mmap(obj, vma);
809  }
810  EXPORT_SYMBOL(drm_gem_dmabuf_mmap);
811  
812  static const struct dma_buf_ops drm_gem_prime_dmabuf_ops =  {
813  	.cache_sgt_mapping = true,
814  	.attach = drm_gem_map_attach,
815  	.detach = drm_gem_map_detach,
816  	.map_dma_buf = drm_gem_map_dma_buf,
817  	.unmap_dma_buf = drm_gem_unmap_dma_buf,
818  	.release = drm_gem_dmabuf_release,
819  	.mmap = drm_gem_dmabuf_mmap,
820  	.vmap = drm_gem_dmabuf_vmap,
821  	.vunmap = drm_gem_dmabuf_vunmap,
822  };
823  
824  /**
825   * drm_prime_pages_to_sg - converts a page array into an sg list
826   * @dev: DRM device
827   * @pages: pointer to the array of page pointers to convert
828   * @nr_pages: length of the page vector
829   *
830   * This helper creates an sg table object from a set of pages
831   * the driver is responsible for mapping the pages into the
832   * importers address space for use with dma_buf itself.
833   *
834   * This is useful for implementing &drm_gem_object_funcs.get_sg_table.
835   */
drm_prime_pages_to_sg(struct drm_device * dev,struct page ** pages,unsigned int nr_pages)836  struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev,
837  				       struct page **pages, unsigned int nr_pages)
838  {
839  	struct sg_table *sg;
840  	size_t max_segment = 0;
841  	int err;
842  
843  	sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
844  	if (!sg)
845  		return ERR_PTR(-ENOMEM);
846  
847  	if (dev)
848  		max_segment = dma_max_mapping_size(dev->dev);
849  	if (max_segment == 0)
850  		max_segment = UINT_MAX;
851  	err = sg_alloc_table_from_pages_segment(sg, pages, nr_pages, 0,
852  						(unsigned long)nr_pages << PAGE_SHIFT,
853  						max_segment, GFP_KERNEL);
854  	if (err) {
855  		kfree(sg);
856  		sg = ERR_PTR(err);
857  	}
858  	return sg;
859  }
860  EXPORT_SYMBOL(drm_prime_pages_to_sg);
861  
862  /**
863   * drm_prime_get_contiguous_size - returns the contiguous size of the buffer
864   * @sgt: sg_table describing the buffer to check
865   *
866   * This helper calculates the contiguous size in the DMA address space
867   * of the buffer described by the provided sg_table.
868   *
869   * This is useful for implementing
870   * &drm_gem_object_funcs.gem_prime_import_sg_table.
871   */
drm_prime_get_contiguous_size(struct sg_table * sgt)872  unsigned long drm_prime_get_contiguous_size(struct sg_table *sgt)
873  {
874  	dma_addr_t expected = sg_dma_address(sgt->sgl);
875  	struct scatterlist *sg;
876  	unsigned long size = 0;
877  	int i;
878  
879  	for_each_sgtable_dma_sg(sgt, sg, i) {
880  		unsigned int len = sg_dma_len(sg);
881  
882  		if (!len)
883  			break;
884  		if (sg_dma_address(sg) != expected)
885  			break;
886  		expected += len;
887  		size += len;
888  	}
889  	return size;
890  }
891  EXPORT_SYMBOL(drm_prime_get_contiguous_size);
892  
893  /**
894   * drm_gem_prime_export - helper library implementation of the export callback
895   * @obj: GEM object to export
896   * @flags: flags like DRM_CLOEXEC and DRM_RDWR
897   *
898   * This is the implementation of the &drm_gem_object_funcs.export functions for GEM drivers
899   * using the PRIME helpers. It is used as the default in
900   * drm_gem_prime_handle_to_fd().
901   */
drm_gem_prime_export(struct drm_gem_object * obj,int flags)902  struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj,
903  				     int flags)
904  {
905  	struct drm_device *dev = obj->dev;
906  	struct dma_buf_export_info exp_info = {
907  		.exp_name = KBUILD_MODNAME, /* white lie for debug */
908  		.owner = dev->driver->fops->owner,
909  		.ops = &drm_gem_prime_dmabuf_ops,
910  		.size = obj->size,
911  		.flags = flags,
912  		.priv = obj,
913  		.resv = obj->resv,
914  	};
915  
916  	return drm_gem_dmabuf_export(dev, &exp_info);
917  }
918  EXPORT_SYMBOL(drm_gem_prime_export);
919  
920  /**
921   * drm_gem_prime_import_dev - core implementation of the import callback
922   * @dev: drm_device to import into
923   * @dma_buf: dma-buf object to import
924   * @attach_dev: struct device to dma_buf attach
925   *
926   * This is the core of drm_gem_prime_import(). It's designed to be called by
927   * drivers who want to use a different device structure than &drm_device.dev for
928   * attaching via dma_buf. This function calls
929   * &drm_driver.gem_prime_import_sg_table internally.
930   *
931   * Drivers must arrange to call drm_prime_gem_destroy() from their
932   * &drm_gem_object_funcs.free hook when using this function.
933   */
drm_gem_prime_import_dev(struct drm_device * dev,struct dma_buf * dma_buf,struct device * attach_dev)934  struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev,
935  					    struct dma_buf *dma_buf,
936  					    struct device *attach_dev)
937  {
938  	struct dma_buf_attachment *attach;
939  	struct sg_table *sgt;
940  	struct drm_gem_object *obj;
941  	int ret;
942  
943  	if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) {
944  		obj = dma_buf->priv;
945  		if (obj->dev == dev) {
946  			/*
947  			 * Importing dmabuf exported from our own gem increases
948  			 * refcount on gem itself instead of f_count of dmabuf.
949  			 */
950  			drm_gem_object_get(obj);
951  			return obj;
952  		}
953  	}
954  
955  	if (!dev->driver->gem_prime_import_sg_table)
956  		return ERR_PTR(-EINVAL);
957  
958  	attach = dma_buf_attach(dma_buf, attach_dev);
959  	if (IS_ERR(attach))
960  		return ERR_CAST(attach);
961  
962  	get_dma_buf(dma_buf);
963  
964  	sgt = dma_buf_map_attachment_unlocked(attach, DMA_BIDIRECTIONAL);
965  	if (IS_ERR(sgt)) {
966  		ret = PTR_ERR(sgt);
967  		goto fail_detach;
968  	}
969  
970  	obj = dev->driver->gem_prime_import_sg_table(dev, attach, sgt);
971  	if (IS_ERR(obj)) {
972  		ret = PTR_ERR(obj);
973  		goto fail_unmap;
974  	}
975  
976  	obj->import_attach = attach;
977  	obj->resv = dma_buf->resv;
978  
979  	return obj;
980  
981  fail_unmap:
982  	dma_buf_unmap_attachment_unlocked(attach, sgt, DMA_BIDIRECTIONAL);
983  fail_detach:
984  	dma_buf_detach(dma_buf, attach);
985  	dma_buf_put(dma_buf);
986  
987  	return ERR_PTR(ret);
988  }
989  EXPORT_SYMBOL(drm_gem_prime_import_dev);
990  
991  /**
992   * drm_gem_prime_import - helper library implementation of the import callback
993   * @dev: drm_device to import into
994   * @dma_buf: dma-buf object to import
995   *
996   * This is the implementation of the gem_prime_import functions for GEM drivers
997   * using the PRIME helpers. Drivers can use this as their
998   * &drm_driver.gem_prime_import implementation. It is used as the default
999   * implementation in drm_gem_prime_fd_to_handle().
1000   *
1001   * Drivers must arrange to call drm_prime_gem_destroy() from their
1002   * &drm_gem_object_funcs.free hook when using this function.
1003   */
drm_gem_prime_import(struct drm_device * dev,struct dma_buf * dma_buf)1004  struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
1005  					    struct dma_buf *dma_buf)
1006  {
1007  	return drm_gem_prime_import_dev(dev, dma_buf, dev->dev);
1008  }
1009  EXPORT_SYMBOL(drm_gem_prime_import);
1010  
1011  /**
1012   * drm_prime_sg_to_page_array - convert an sg table into a page array
1013   * @sgt: scatter-gather table to convert
1014   * @pages: array of page pointers to store the pages in
1015   * @max_entries: size of the passed-in array
1016   *
1017   * Exports an sg table into an array of pages.
1018   *
1019   * This function is deprecated and strongly discouraged to be used.
1020   * The page array is only useful for page faults and those can corrupt fields
1021   * in the struct page if they are not handled by the exporting driver.
1022   */
drm_prime_sg_to_page_array(struct sg_table * sgt,struct page ** pages,int max_entries)1023  int __deprecated drm_prime_sg_to_page_array(struct sg_table *sgt,
1024  					    struct page **pages,
1025  					    int max_entries)
1026  {
1027  	struct sg_page_iter page_iter;
1028  	struct page **p = pages;
1029  
1030  	for_each_sgtable_page(sgt, &page_iter, 0) {
1031  		if (WARN_ON(p - pages >= max_entries))
1032  			return -1;
1033  		*p++ = sg_page_iter_page(&page_iter);
1034  	}
1035  	return 0;
1036  }
1037  EXPORT_SYMBOL(drm_prime_sg_to_page_array);
1038  
1039  /**
1040   * drm_prime_sg_to_dma_addr_array - convert an sg table into a dma addr array
1041   * @sgt: scatter-gather table to convert
1042   * @addrs: array to store the dma bus address of each page
1043   * @max_entries: size of both the passed-in arrays
1044   *
1045   * Exports an sg table into an array of addresses.
1046   *
1047   * Drivers should use this in their &drm_driver.gem_prime_import_sg_table
1048   * implementation.
1049   */
drm_prime_sg_to_dma_addr_array(struct sg_table * sgt,dma_addr_t * addrs,int max_entries)1050  int drm_prime_sg_to_dma_addr_array(struct sg_table *sgt, dma_addr_t *addrs,
1051  				   int max_entries)
1052  {
1053  	struct sg_dma_page_iter dma_iter;
1054  	dma_addr_t *a = addrs;
1055  
1056  	for_each_sgtable_dma_page(sgt, &dma_iter, 0) {
1057  		if (WARN_ON(a - addrs >= max_entries))
1058  			return -1;
1059  		*a++ = sg_page_iter_dma_address(&dma_iter);
1060  	}
1061  	return 0;
1062  }
1063  EXPORT_SYMBOL(drm_prime_sg_to_dma_addr_array);
1064  
1065  /**
1066   * drm_prime_gem_destroy - helper to clean up a PRIME-imported GEM object
1067   * @obj: GEM object which was created from a dma-buf
1068   * @sg: the sg-table which was pinned at import time
1069   *
1070   * This is the cleanup functions which GEM drivers need to call when they use
1071   * drm_gem_prime_import() or drm_gem_prime_import_dev() to import dma-bufs.
1072   */
drm_prime_gem_destroy(struct drm_gem_object * obj,struct sg_table * sg)1073  void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
1074  {
1075  	struct dma_buf_attachment *attach;
1076  	struct dma_buf *dma_buf;
1077  
1078  	attach = obj->import_attach;
1079  	if (sg)
1080  		dma_buf_unmap_attachment_unlocked(attach, sg, DMA_BIDIRECTIONAL);
1081  	dma_buf = attach->dmabuf;
1082  	dma_buf_detach(attach->dmabuf, attach);
1083  	/* remove the reference */
1084  	dma_buf_put(dma_buf);
1085  }
1086  EXPORT_SYMBOL(drm_prime_gem_destroy);
1087