1  /* SPDX-License-Identifier: GPL-2.0-only */
2  /*
3   * Header file for dma buffer sharing framework.
4   *
5   * Copyright(C) 2011 Linaro Limited. All rights reserved.
6   * Author: Sumit Semwal <sumit.semwal@ti.com>
7   *
8   * Many thanks to linaro-mm-sig list, and specially
9   * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
10   * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
11   * refining of this idea.
12   */
13  #ifndef __DMA_BUF_H__
14  #define __DMA_BUF_H__
15  
16  #include <linux/iosys-map.h>
17  #include <linux/file.h>
18  #include <linux/err.h>
19  #include <linux/scatterlist.h>
20  #include <linux/list.h>
21  #include <linux/dma-mapping.h>
22  #include <linux/fs.h>
23  #include <linux/dma-fence.h>
24  #include <linux/wait.h>
25  
26  struct device;
27  struct dma_buf;
28  struct dma_buf_attachment;
29  
30  /**
31   * struct dma_buf_ops - operations possible on struct dma_buf
32   * @vmap: [optional] creates a virtual mapping for the buffer into kernel
33   *	  address space. Same restrictions as for vmap and friends apply.
34   * @vunmap: [optional] unmaps a vmap from the buffer
35   */
36  struct dma_buf_ops {
37  	/**
38  	  * @cache_sgt_mapping:
39  	  *
40  	  * If true the framework will cache the first mapping made for each
41  	  * attachment. This avoids creating mappings for attachments multiple
42  	  * times.
43  	  */
44  	bool cache_sgt_mapping;
45  
46  	/**
47  	 * @attach:
48  	 *
49  	 * This is called from dma_buf_attach() to make sure that a given
50  	 * &dma_buf_attachment.dev can access the provided &dma_buf. Exporters
51  	 * which support buffer objects in special locations like VRAM or
52  	 * device-specific carveout areas should check whether the buffer could
53  	 * be move to system memory (or directly accessed by the provided
54  	 * device), and otherwise need to fail the attach operation.
55  	 *
56  	 * The exporter should also in general check whether the current
57  	 * allocation fulfills the DMA constraints of the new device. If this
58  	 * is not the case, and the allocation cannot be moved, it should also
59  	 * fail the attach operation.
60  	 *
61  	 * Any exporter-private housekeeping data can be stored in the
62  	 * &dma_buf_attachment.priv pointer.
63  	 *
64  	 * This callback is optional.
65  	 *
66  	 * Returns:
67  	 *
68  	 * 0 on success, negative error code on failure. It might return -EBUSY
69  	 * to signal that backing storage is already allocated and incompatible
70  	 * with the requirements of requesting device.
71  	 */
72  	int (*attach)(struct dma_buf *, struct dma_buf_attachment *);
73  
74  	/**
75  	 * @detach:
76  	 *
77  	 * This is called by dma_buf_detach() to release a &dma_buf_attachment.
78  	 * Provided so that exporters can clean up any housekeeping for an
79  	 * &dma_buf_attachment.
80  	 *
81  	 * This callback is optional.
82  	 */
83  	void (*detach)(struct dma_buf *, struct dma_buf_attachment *);
84  
85  	/**
86  	 * @pin:
87  	 *
88  	 * This is called by dma_buf_pin() and lets the exporter know that the
89  	 * DMA-buf can't be moved any more. Ideally, the exporter should
90  	 * pin the buffer so that it is generally accessible by all
91  	 * devices.
92  	 *
93  	 * This is called with the &dmabuf.resv object locked and is mutual
94  	 * exclusive with @cache_sgt_mapping.
95  	 *
96  	 * This is called automatically for non-dynamic importers from
97  	 * dma_buf_attach().
98  	 *
99  	 * Note that similar to non-dynamic exporters in their @map_dma_buf
100  	 * callback the driver must guarantee that the memory is available for
101  	 * use and cleared of any old data by the time this function returns.
102  	 * Drivers which pipeline their buffer moves internally must wait for
103  	 * all moves and clears to complete.
104  	 *
105  	 * Returns:
106  	 *
107  	 * 0 on success, negative error code on failure.
108  	 */
109  	int (*pin)(struct dma_buf_attachment *attach);
110  
111  	/**
112  	 * @unpin:
113  	 *
114  	 * This is called by dma_buf_unpin() and lets the exporter know that the
115  	 * DMA-buf can be moved again.
116  	 *
117  	 * This is called with the dmabuf->resv object locked and is mutual
118  	 * exclusive with @cache_sgt_mapping.
119  	 *
120  	 * This callback is optional.
121  	 */
122  	void (*unpin)(struct dma_buf_attachment *attach);
123  
124  	/**
125  	 * @map_dma_buf:
126  	 *
127  	 * This is called by dma_buf_map_attachment() and is used to map a
128  	 * shared &dma_buf into device address space, and it is mandatory. It
129  	 * can only be called if @attach has been called successfully.
130  	 *
131  	 * This call may sleep, e.g. when the backing storage first needs to be
132  	 * allocated, or moved to a location suitable for all currently attached
133  	 * devices.
134  	 *
135  	 * Note that any specific buffer attributes required for this function
136  	 * should get added to device_dma_parameters accessible via
137  	 * &device.dma_params from the &dma_buf_attachment. The @attach callback
138  	 * should also check these constraints.
139  	 *
140  	 * If this is being called for the first time, the exporter can now
141  	 * choose to scan through the list of attachments for this buffer,
142  	 * collate the requirements of the attached devices, and choose an
143  	 * appropriate backing storage for the buffer.
144  	 *
145  	 * Based on enum dma_data_direction, it might be possible to have
146  	 * multiple users accessing at the same time (for reading, maybe), or
147  	 * any other kind of sharing that the exporter might wish to make
148  	 * available to buffer-users.
149  	 *
150  	 * This is always called with the dmabuf->resv object locked when
151  	 * the dynamic_mapping flag is true.
152  	 *
153  	 * Note that for non-dynamic exporters the driver must guarantee that
154  	 * that the memory is available for use and cleared of any old data by
155  	 * the time this function returns.  Drivers which pipeline their buffer
156  	 * moves internally must wait for all moves and clears to complete.
157  	 * Dynamic exporters do not need to follow this rule: For non-dynamic
158  	 * importers the buffer is already pinned through @pin, which has the
159  	 * same requirements. Dynamic importers otoh are required to obey the
160  	 * dma_resv fences.
161  	 *
162  	 * Returns:
163  	 *
164  	 * A &sg_table scatter list of the backing storage of the DMA buffer,
165  	 * already mapped into the device address space of the &device attached
166  	 * with the provided &dma_buf_attachment. The addresses and lengths in
167  	 * the scatter list are PAGE_SIZE aligned.
168  	 *
169  	 * On failure, returns a negative error value wrapped into a pointer.
170  	 * May also return -EINTR when a signal was received while being
171  	 * blocked.
172  	 *
173  	 * Note that exporters should not try to cache the scatter list, or
174  	 * return the same one for multiple calls. Caching is done either by the
175  	 * DMA-BUF code (for non-dynamic importers) or the importer. Ownership
176  	 * of the scatter list is transferred to the caller, and returned by
177  	 * @unmap_dma_buf.
178  	 */
179  	struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *,
180  					 enum dma_data_direction);
181  	/**
182  	 * @unmap_dma_buf:
183  	 *
184  	 * This is called by dma_buf_unmap_attachment() and should unmap and
185  	 * release the &sg_table allocated in @map_dma_buf, and it is mandatory.
186  	 * For static dma_buf handling this might also unpin the backing
187  	 * storage if this is the last mapping of the DMA buffer.
188  	 */
189  	void (*unmap_dma_buf)(struct dma_buf_attachment *,
190  			      struct sg_table *,
191  			      enum dma_data_direction);
192  
193  	/* TODO: Add try_map_dma_buf version, to return immed with -EBUSY
194  	 * if the call would block.
195  	 */
196  
197  	/**
198  	 * @release:
199  	 *
200  	 * Called after the last dma_buf_put to release the &dma_buf, and
201  	 * mandatory.
202  	 */
203  	void (*release)(struct dma_buf *);
204  
205  	/**
206  	 * @begin_cpu_access:
207  	 *
208  	 * This is called from dma_buf_begin_cpu_access() and allows the
209  	 * exporter to ensure that the memory is actually coherent for cpu
210  	 * access. The exporter also needs to ensure that cpu access is coherent
211  	 * for the access direction. The direction can be used by the exporter
212  	 * to optimize the cache flushing, i.e. access with a different
213  	 * direction (read instead of write) might return stale or even bogus
214  	 * data (e.g. when the exporter needs to copy the data to temporary
215  	 * storage).
216  	 *
217  	 * Note that this is both called through the DMA_BUF_IOCTL_SYNC IOCTL
218  	 * command for userspace mappings established through @mmap, and also
219  	 * for kernel mappings established with @vmap.
220  	 *
221  	 * This callback is optional.
222  	 *
223  	 * Returns:
224  	 *
225  	 * 0 on success or a negative error code on failure. This can for
226  	 * example fail when the backing storage can't be allocated. Can also
227  	 * return -ERESTARTSYS or -EINTR when the call has been interrupted and
228  	 * needs to be restarted.
229  	 */
230  	int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction);
231  
232  	/**
233  	 * @end_cpu_access:
234  	 *
235  	 * This is called from dma_buf_end_cpu_access() when the importer is
236  	 * done accessing the CPU. The exporter can use this to flush caches and
237  	 * undo anything else done in @begin_cpu_access.
238  	 *
239  	 * This callback is optional.
240  	 *
241  	 * Returns:
242  	 *
243  	 * 0 on success or a negative error code on failure. Can return
244  	 * -ERESTARTSYS or -EINTR when the call has been interrupted and needs
245  	 * to be restarted.
246  	 */
247  	int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction);
248  
249  	/**
250  	 * @mmap:
251  	 *
252  	 * This callback is used by the dma_buf_mmap() function
253  	 *
254  	 * Note that the mapping needs to be incoherent, userspace is expected
255  	 * to bracket CPU access using the DMA_BUF_IOCTL_SYNC interface.
256  	 *
257  	 * Because dma-buf buffers have invariant size over their lifetime, the
258  	 * dma-buf core checks whether a vma is too large and rejects such
259  	 * mappings. The exporter hence does not need to duplicate this check.
260  	 * Drivers do not need to check this themselves.
261  	 *
262  	 * If an exporter needs to manually flush caches and hence needs to fake
263  	 * coherency for mmap support, it needs to be able to zap all the ptes
264  	 * pointing at the backing storage. Now linux mm needs a struct
265  	 * address_space associated with the struct file stored in vma->vm_file
266  	 * to do that with the function unmap_mapping_range. But the dma_buf
267  	 * framework only backs every dma_buf fd with the anon_file struct file,
268  	 * i.e. all dma_bufs share the same file.
269  	 *
270  	 * Hence exporters need to setup their own file (and address_space)
271  	 * association by setting vma->vm_file and adjusting vma->vm_pgoff in
272  	 * the dma_buf mmap callback. In the specific case of a gem driver the
273  	 * exporter could use the shmem file already provided by gem (and set
274  	 * vm_pgoff = 0). Exporters can then zap ptes by unmapping the
275  	 * corresponding range of the struct address_space associated with their
276  	 * own file.
277  	 *
278  	 * This callback is optional.
279  	 *
280  	 * Returns:
281  	 *
282  	 * 0 on success or a negative error code on failure.
283  	 */
284  	int (*mmap)(struct dma_buf *, struct vm_area_struct *vma);
285  
286  	int (*vmap)(struct dma_buf *dmabuf, struct iosys_map *map);
287  	void (*vunmap)(struct dma_buf *dmabuf, struct iosys_map *map);
288  };
289  
290  /**
291   * struct dma_buf - shared buffer object
292   *
293   * This represents a shared buffer, created by calling dma_buf_export(). The
294   * userspace representation is a normal file descriptor, which can be created by
295   * calling dma_buf_fd().
296   *
297   * Shared dma buffers are reference counted using dma_buf_put() and
298   * get_dma_buf().
299   *
300   * Device DMA access is handled by the separate &struct dma_buf_attachment.
301   */
302  struct dma_buf {
303  	/**
304  	 * @size:
305  	 *
306  	 * Size of the buffer; invariant over the lifetime of the buffer.
307  	 */
308  	size_t size;
309  
310  	/**
311  	 * @file:
312  	 *
313  	 * File pointer used for sharing buffers across, and for refcounting.
314  	 * See dma_buf_get() and dma_buf_put().
315  	 */
316  	struct file *file;
317  
318  	/**
319  	 * @attachments:
320  	 *
321  	 * List of dma_buf_attachment that denotes all devices attached,
322  	 * protected by &dma_resv lock @resv.
323  	 */
324  	struct list_head attachments;
325  
326  	/** @ops: dma_buf_ops associated with this buffer object. */
327  	const struct dma_buf_ops *ops;
328  
329  	/**
330  	 * @vmapping_counter:
331  	 *
332  	 * Used internally to refcnt the vmaps returned by dma_buf_vmap().
333  	 * Protected by @lock.
334  	 */
335  	unsigned vmapping_counter;
336  
337  	/**
338  	 * @vmap_ptr:
339  	 * The current vmap ptr if @vmapping_counter > 0. Protected by @lock.
340  	 */
341  	struct iosys_map vmap_ptr;
342  
343  	/**
344  	 * @exp_name:
345  	 *
346  	 * Name of the exporter; useful for debugging. Must not be NULL
347  	 */
348  	const char *exp_name;
349  
350  	/**
351  	 * @name:
352  	 *
353  	 * Userspace-provided name. Default value is NULL. If not NULL,
354  	 * length cannot be longer than DMA_BUF_NAME_LEN, including NIL
355  	 * char. Useful for accounting and debugging. Read/Write accesses
356  	 * are protected by @name_lock
357  	 *
358  	 * See the IOCTLs DMA_BUF_SET_NAME or DMA_BUF_SET_NAME_A/B
359  	 */
360  	const char *name;
361  
362  	/** @name_lock: Spinlock to protect name access for read access. */
363  	spinlock_t name_lock;
364  
365  	/**
366  	 * @owner:
367  	 *
368  	 * Pointer to exporter module; used for refcounting when exporter is a
369  	 * kernel module.
370  	 */
371  	struct module *owner;
372  
373  #if IS_ENABLED(CONFIG_DEBUG_FS)
374  	/** @list_node: node for dma_buf accounting and debugging. */
375  	struct list_head list_node;
376  #endif
377  
378  	/** @priv: exporter specific private data for this buffer object. */
379  	void *priv;
380  
381  	/**
382  	 * @resv:
383  	 *
384  	 * Reservation object linked to this dma-buf.
385  	 *
386  	 * IMPLICIT SYNCHRONIZATION RULES:
387  	 *
388  	 * Drivers which support implicit synchronization of buffer access as
389  	 * e.g. exposed in `Implicit Fence Poll Support`_ must follow the
390  	 * below rules.
391  	 *
392  	 * - Drivers must add a read fence through dma_resv_add_fence() with the
393  	 *   DMA_RESV_USAGE_READ flag for anything the userspace API considers a
394  	 *   read access. This highly depends upon the API and window system.
395  	 *
396  	 * - Similarly drivers must add a write fence through
397  	 *   dma_resv_add_fence() with the DMA_RESV_USAGE_WRITE flag for
398  	 *   anything the userspace API considers write access.
399  	 *
400  	 * - Drivers may just always add a write fence, since that only
401  	 *   causes unnecessary synchronization, but no correctness issues.
402  	 *
403  	 * - Some drivers only expose a synchronous userspace API with no
404  	 *   pipelining across drivers. These do not set any fences for their
405  	 *   access. An example here is v4l.
406  	 *
407  	 * - Driver should use dma_resv_usage_rw() when retrieving fences as
408  	 *   dependency for implicit synchronization.
409  	 *
410  	 * DYNAMIC IMPORTER RULES:
411  	 *
412  	 * Dynamic importers, see dma_buf_attachment_is_dynamic(), have
413  	 * additional constraints on how they set up fences:
414  	 *
415  	 * - Dynamic importers must obey the write fences and wait for them to
416  	 *   signal before allowing access to the buffer's underlying storage
417  	 *   through the device.
418  	 *
419  	 * - Dynamic importers should set fences for any access that they can't
420  	 *   disable immediately from their &dma_buf_attach_ops.move_notify
421  	 *   callback.
422  	 *
423  	 * IMPORTANT:
424  	 *
425  	 * All drivers and memory management related functions must obey the
426  	 * struct dma_resv rules, specifically the rules for updating and
427  	 * obeying fences. See enum dma_resv_usage for further descriptions.
428  	 */
429  	struct dma_resv *resv;
430  
431  	/** @poll: for userspace poll support */
432  	wait_queue_head_t poll;
433  
434  	/** @cb_in: for userspace poll support */
435  	/** @cb_out: for userspace poll support */
436  	struct dma_buf_poll_cb_t {
437  		struct dma_fence_cb cb;
438  		wait_queue_head_t *poll;
439  
440  		__poll_t active;
441  	} cb_in, cb_out;
442  #ifdef CONFIG_DMABUF_SYSFS_STATS
443  	/**
444  	 * @sysfs_entry:
445  	 *
446  	 * For exposing information about this buffer in sysfs. See also
447  	 * `DMA-BUF statistics`_ for the uapi this enables.
448  	 */
449  	struct dma_buf_sysfs_entry {
450  		struct kobject kobj;
451  		struct dma_buf *dmabuf;
452  	} *sysfs_entry;
453  #endif
454  };
455  
456  /**
457   * struct dma_buf_attach_ops - importer operations for an attachment
458   *
459   * Attachment operations implemented by the importer.
460   */
461  struct dma_buf_attach_ops {
462  	/**
463  	 * @allow_peer2peer:
464  	 *
465  	 * If this is set to true the importer must be able to handle peer
466  	 * resources without struct pages.
467  	 */
468  	bool allow_peer2peer;
469  
470  	/**
471  	 * @move_notify: [optional] notification that the DMA-buf is moving
472  	 *
473  	 * If this callback is provided the framework can avoid pinning the
474  	 * backing store while mappings exists.
475  	 *
476  	 * This callback is called with the lock of the reservation object
477  	 * associated with the dma_buf held and the mapping function must be
478  	 * called with this lock held as well. This makes sure that no mapping
479  	 * is created concurrently with an ongoing move operation.
480  	 *
481  	 * Mappings stay valid and are not directly affected by this callback.
482  	 * But the DMA-buf can now be in a different physical location, so all
483  	 * mappings should be destroyed and re-created as soon as possible.
484  	 *
485  	 * New mappings can be created after this callback returns, and will
486  	 * point to the new location of the DMA-buf.
487  	 */
488  	void (*move_notify)(struct dma_buf_attachment *attach);
489  };
490  
491  /**
492   * struct dma_buf_attachment - holds device-buffer attachment data
493   * @dmabuf: buffer for this attachment.
494   * @dev: device attached to the buffer.
495   * @node: list of dma_buf_attachment, protected by dma_resv lock of the dmabuf.
496   * @sgt: cached mapping.
497   * @dir: direction of cached mapping.
498   * @peer2peer: true if the importer can handle peer resources without pages.
499   * @priv: exporter specific attachment data.
500   * @importer_ops: importer operations for this attachment, if provided
501   * dma_buf_map/unmap_attachment() must be called with the dma_resv lock held.
502   * @importer_priv: importer specific attachment data.
503   *
504   * This structure holds the attachment information between the dma_buf buffer
505   * and its user device(s). The list contains one attachment struct per device
506   * attached to the buffer.
507   *
508   * An attachment is created by calling dma_buf_attach(), and released again by
509   * calling dma_buf_detach(). The DMA mapping itself needed to initiate a
510   * transfer is created by dma_buf_map_attachment() and freed again by calling
511   * dma_buf_unmap_attachment().
512   */
513  struct dma_buf_attachment {
514  	struct dma_buf *dmabuf;
515  	struct device *dev;
516  	struct list_head node;
517  	struct sg_table *sgt;
518  	enum dma_data_direction dir;
519  	bool peer2peer;
520  	const struct dma_buf_attach_ops *importer_ops;
521  	void *importer_priv;
522  	void *priv;
523  };
524  
525  /**
526   * struct dma_buf_export_info - holds information needed to export a dma_buf
527   * @exp_name:	name of the exporter - useful for debugging.
528   * @owner:	pointer to exporter module - used for refcounting kernel module
529   * @ops:	Attach allocator-defined dma buf ops to the new buffer
530   * @size:	Size of the buffer - invariant over the lifetime of the buffer
531   * @flags:	mode flags for the file
532   * @resv:	reservation-object, NULL to allocate default one
533   * @priv:	Attach private data of allocator to this buffer
534   *
535   * This structure holds the information required to export the buffer. Used
536   * with dma_buf_export() only.
537   */
538  struct dma_buf_export_info {
539  	const char *exp_name;
540  	struct module *owner;
541  	const struct dma_buf_ops *ops;
542  	size_t size;
543  	int flags;
544  	struct dma_resv *resv;
545  	void *priv;
546  };
547  
548  /**
549   * DEFINE_DMA_BUF_EXPORT_INFO - helper macro for exporters
550   * @name: export-info name
551   *
552   * DEFINE_DMA_BUF_EXPORT_INFO macro defines the &struct dma_buf_export_info,
553   * zeroes it out and pre-populates exp_name in it.
554   */
555  #define DEFINE_DMA_BUF_EXPORT_INFO(name)	\
556  	struct dma_buf_export_info name = { .exp_name = KBUILD_MODNAME, \
557  					 .owner = THIS_MODULE }
558  
559  /**
560   * get_dma_buf - convenience wrapper for get_file.
561   * @dmabuf:	[in]	pointer to dma_buf
562   *
563   * Increments the reference count on the dma-buf, needed in case of drivers
564   * that either need to create additional references to the dmabuf on the
565   * kernel side.  For example, an exporter that needs to keep a dmabuf ptr
566   * so that subsequent exports don't create a new dmabuf.
567   */
get_dma_buf(struct dma_buf * dmabuf)568  static inline void get_dma_buf(struct dma_buf *dmabuf)
569  {
570  	get_file(dmabuf->file);
571  }
572  
573  /**
574   * dma_buf_is_dynamic - check if a DMA-buf uses dynamic mappings.
575   * @dmabuf: the DMA-buf to check
576   *
577   * Returns true if a DMA-buf exporter wants to be called with the dma_resv
578   * locked for the map/unmap callbacks, false if it doesn't wants to be called
579   * with the lock held.
580   */
dma_buf_is_dynamic(struct dma_buf * dmabuf)581  static inline bool dma_buf_is_dynamic(struct dma_buf *dmabuf)
582  {
583  	return !!dmabuf->ops->pin;
584  }
585  
586  /**
587   * dma_buf_attachment_is_dynamic - check if a DMA-buf attachment uses dynamic
588   * mappings
589   * @attach: the DMA-buf attachment to check
590   *
591   * Returns true if a DMA-buf importer wants to call the map/unmap functions with
592   * the dma_resv lock held.
593   */
594  static inline bool
dma_buf_attachment_is_dynamic(struct dma_buf_attachment * attach)595  dma_buf_attachment_is_dynamic(struct dma_buf_attachment *attach)
596  {
597  	return !!attach->importer_ops;
598  }
599  
600  struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
601  					  struct device *dev);
602  struct dma_buf_attachment *
603  dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
604  		       const struct dma_buf_attach_ops *importer_ops,
605  		       void *importer_priv);
606  void dma_buf_detach(struct dma_buf *dmabuf,
607  		    struct dma_buf_attachment *attach);
608  int dma_buf_pin(struct dma_buf_attachment *attach);
609  void dma_buf_unpin(struct dma_buf_attachment *attach);
610  
611  struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info);
612  
613  int dma_buf_fd(struct dma_buf *dmabuf, int flags);
614  struct dma_buf *dma_buf_get(int fd);
615  void dma_buf_put(struct dma_buf *dmabuf);
616  
617  struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *,
618  					enum dma_data_direction);
619  void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *,
620  				enum dma_data_direction);
621  void dma_buf_move_notify(struct dma_buf *dma_buf);
622  int dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
623  			     enum dma_data_direction dir);
624  int dma_buf_end_cpu_access(struct dma_buf *dma_buf,
625  			   enum dma_data_direction dir);
626  struct sg_table *
627  dma_buf_map_attachment_unlocked(struct dma_buf_attachment *attach,
628  				enum dma_data_direction direction);
629  void dma_buf_unmap_attachment_unlocked(struct dma_buf_attachment *attach,
630  				       struct sg_table *sg_table,
631  				       enum dma_data_direction direction);
632  
633  int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *,
634  		 unsigned long);
635  int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map);
636  void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map);
637  int dma_buf_vmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map);
638  void dma_buf_vunmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map);
639  #endif /* __DMA_BUF_H__ */
640