Lines Matching +full:scatter +full:- +full:gather
1 // SPDX-License-Identifier: GPL-2.0
6 #include <linux/dma-buf.h>
60 if (dev->driver->gem_create_object) { in __drm_gem_shmem_create()
61 obj = dev->driver->gem_create_object(dev, size); in __drm_gem_shmem_create()
68 return ERR_PTR(-ENOMEM); in __drm_gem_shmem_create()
69 obj = &shmem->base; in __drm_gem_shmem_create()
72 if (!obj->funcs) in __drm_gem_shmem_create()
73 obj->funcs = &drm_gem_shmem_funcs; in __drm_gem_shmem_create()
77 shmem->map_wc = false; /* dma-buf mappings use always writecombine */ in __drm_gem_shmem_create()
90 INIT_LIST_HEAD(&shmem->madv_list); in __drm_gem_shmem_create()
100 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER | in __drm_gem_shmem_create()
114 * drm_gem_shmem_create - Allocate an object with the given size
121 * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
131 * drm_gem_shmem_free - Free resources associated with a shmem GEM object
139 struct drm_gem_object *obj = &shmem->base; in drm_gem_shmem_free()
141 if (obj->import_attach) { in drm_gem_shmem_free()
142 drm_prime_gem_destroy(obj, shmem->sgt); in drm_gem_shmem_free()
144 dma_resv_lock(shmem->base.resv, NULL); in drm_gem_shmem_free()
146 drm_WARN_ON(obj->dev, shmem->vmap_use_count); in drm_gem_shmem_free()
148 if (shmem->sgt) { in drm_gem_shmem_free()
149 dma_unmap_sgtable(obj->dev->dev, shmem->sgt, in drm_gem_shmem_free()
151 sg_free_table(shmem->sgt); in drm_gem_shmem_free()
152 kfree(shmem->sgt); in drm_gem_shmem_free()
154 if (shmem->pages) in drm_gem_shmem_free()
157 drm_WARN_ON(obj->dev, shmem->pages_use_count); in drm_gem_shmem_free()
159 dma_resv_unlock(shmem->base.resv); in drm_gem_shmem_free()
169 struct drm_gem_object *obj = &shmem->base; in drm_gem_shmem_get_pages()
172 dma_resv_assert_held(shmem->base.resv); in drm_gem_shmem_get_pages()
174 if (shmem->pages_use_count++ > 0) in drm_gem_shmem_get_pages()
179 drm_dbg_kms(obj->dev, "Failed to get pages (%ld)\n", in drm_gem_shmem_get_pages()
181 shmem->pages_use_count = 0; in drm_gem_shmem_get_pages()
191 if (shmem->map_wc) in drm_gem_shmem_get_pages()
192 set_pages_array_wc(pages, obj->size >> PAGE_SHIFT); in drm_gem_shmem_get_pages()
195 shmem->pages = pages; in drm_gem_shmem_get_pages()
201 * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
208 struct drm_gem_object *obj = &shmem->base; in drm_gem_shmem_put_pages()
210 dma_resv_assert_held(shmem->base.resv); in drm_gem_shmem_put_pages()
212 if (drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count)) in drm_gem_shmem_put_pages()
215 if (--shmem->pages_use_count > 0) in drm_gem_shmem_put_pages()
219 if (shmem->map_wc) in drm_gem_shmem_put_pages()
220 set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT); in drm_gem_shmem_put_pages()
223 drm_gem_put_pages(obj, shmem->pages, in drm_gem_shmem_put_pages()
224 shmem->pages_mark_dirty_on_put, in drm_gem_shmem_put_pages()
225 shmem->pages_mark_accessed_on_put); in drm_gem_shmem_put_pages()
226 shmem->pages = NULL; in drm_gem_shmem_put_pages()
234 dma_resv_assert_held(shmem->base.resv); in drm_gem_shmem_pin_locked()
236 drm_WARN_ON(shmem->base.dev, shmem->base.import_attach); in drm_gem_shmem_pin_locked()
246 dma_resv_assert_held(shmem->base.resv); in drm_gem_shmem_unpin_locked()
253 * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
264 struct drm_gem_object *obj = &shmem->base; in drm_gem_shmem_pin()
267 drm_WARN_ON(obj->dev, obj->import_attach); in drm_gem_shmem_pin()
269 ret = dma_resv_lock_interruptible(shmem->base.resv, NULL); in drm_gem_shmem_pin()
273 dma_resv_unlock(shmem->base.resv); in drm_gem_shmem_pin()
280 * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
288 struct drm_gem_object *obj = &shmem->base; in drm_gem_shmem_unpin()
290 drm_WARN_ON(obj->dev, obj->import_attach); in drm_gem_shmem_unpin()
292 dma_resv_lock(shmem->base.resv, NULL); in drm_gem_shmem_unpin()
294 dma_resv_unlock(shmem->base.resv); in drm_gem_shmem_unpin()
299 * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
306 * between dma-buf imported and natively allocated objects.
316 struct drm_gem_object *obj = &shmem->base; in drm_gem_shmem_vmap()
319 if (obj->import_attach) { in drm_gem_shmem_vmap()
320 ret = dma_buf_vmap(obj->import_attach->dmabuf, map); in drm_gem_shmem_vmap()
322 if (drm_WARN_ON(obj->dev, map->is_iomem)) { in drm_gem_shmem_vmap()
323 dma_buf_vunmap(obj->import_attach->dmabuf, map); in drm_gem_shmem_vmap()
324 return -EIO; in drm_gem_shmem_vmap()
330 dma_resv_assert_held(shmem->base.resv); in drm_gem_shmem_vmap()
332 if (shmem->vmap_use_count++ > 0) { in drm_gem_shmem_vmap()
333 iosys_map_set_vaddr(map, shmem->vaddr); in drm_gem_shmem_vmap()
341 if (shmem->map_wc) in drm_gem_shmem_vmap()
343 shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT, in drm_gem_shmem_vmap()
345 if (!shmem->vaddr) in drm_gem_shmem_vmap()
346 ret = -ENOMEM; in drm_gem_shmem_vmap()
348 iosys_map_set_vaddr(map, shmem->vaddr); in drm_gem_shmem_vmap()
352 drm_dbg_kms(obj->dev, "Failed to vmap pages, error %d\n", ret); in drm_gem_shmem_vmap()
359 if (!obj->import_attach) in drm_gem_shmem_vmap()
362 shmem->vmap_use_count = 0; in drm_gem_shmem_vmap()
369 * drm_gem_shmem_vunmap - Unmap a virtual mapping for a shmem GEM object
377 * This function hides the differences between dma-buf imported and natively
383 struct drm_gem_object *obj = &shmem->base; in drm_gem_shmem_vunmap()
385 if (obj->import_attach) { in drm_gem_shmem_vunmap()
386 dma_buf_vunmap(obj->import_attach->dmabuf, map); in drm_gem_shmem_vunmap()
388 dma_resv_assert_held(shmem->base.resv); in drm_gem_shmem_vunmap()
390 if (drm_WARN_ON_ONCE(obj->dev, !shmem->vmap_use_count)) in drm_gem_shmem_vunmap()
393 if (--shmem->vmap_use_count > 0) in drm_gem_shmem_vunmap()
396 vunmap(shmem->vaddr); in drm_gem_shmem_vunmap()
400 shmem->vaddr = NULL; in drm_gem_shmem_vunmap()
420 ret = drm_gem_handle_create(file_priv, &shmem->base, handle); in drm_gem_shmem_create_with_handle()
421 /* drop reference from allocate - handle holds it now. */ in drm_gem_shmem_create_with_handle()
422 drm_gem_object_put(&shmem->base); in drm_gem_shmem_create_with_handle()
428 * false or -errno.
432 dma_resv_assert_held(shmem->base.resv); in drm_gem_shmem_madvise()
434 if (shmem->madv >= 0) in drm_gem_shmem_madvise()
435 shmem->madv = madv; in drm_gem_shmem_madvise()
437 madv = shmem->madv; in drm_gem_shmem_madvise()
445 struct drm_gem_object *obj = &shmem->base; in drm_gem_shmem_purge()
446 struct drm_device *dev = obj->dev; in drm_gem_shmem_purge()
448 dma_resv_assert_held(shmem->base.resv); in drm_gem_shmem_purge()
450 drm_WARN_ON(obj->dev, !drm_gem_shmem_is_purgeable(shmem)); in drm_gem_shmem_purge()
452 dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0); in drm_gem_shmem_purge()
453 sg_free_table(shmem->sgt); in drm_gem_shmem_purge()
454 kfree(shmem->sgt); in drm_gem_shmem_purge()
455 shmem->sgt = NULL; in drm_gem_shmem_purge()
459 shmem->madv = -1; in drm_gem_shmem_purge()
461 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); in drm_gem_shmem_purge()
469 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1); in drm_gem_shmem_purge()
471 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1); in drm_gem_shmem_purge()
476 * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
495 u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); in drm_gem_shmem_dumb_create()
497 if (!args->pitch || !args->size) { in drm_gem_shmem_dumb_create()
498 args->pitch = min_pitch; in drm_gem_shmem_dumb_create()
499 args->size = PAGE_ALIGN(args->pitch * args->height); in drm_gem_shmem_dumb_create()
502 if (args->pitch < min_pitch) in drm_gem_shmem_dumb_create()
503 args->pitch = min_pitch; in drm_gem_shmem_dumb_create()
504 if (args->size < args->pitch * args->height) in drm_gem_shmem_dumb_create()
505 args->size = PAGE_ALIGN(args->pitch * args->height); in drm_gem_shmem_dumb_create()
508 return drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle); in drm_gem_shmem_dumb_create()
514 struct vm_area_struct *vma = vmf->vma; in drm_gem_shmem_fault()
515 struct drm_gem_object *obj = vma->vm_private_data; in drm_gem_shmem_fault()
517 loff_t num_pages = obj->size >> PAGE_SHIFT; in drm_gem_shmem_fault()
522 /* We don't use vmf->pgoff since that has the fake offset */ in drm_gem_shmem_fault()
523 page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT; in drm_gem_shmem_fault()
525 dma_resv_lock(shmem->base.resv, NULL); in drm_gem_shmem_fault()
528 drm_WARN_ON_ONCE(obj->dev, !shmem->pages) || in drm_gem_shmem_fault()
529 shmem->madv < 0) { in drm_gem_shmem_fault()
532 page = shmem->pages[page_offset]; in drm_gem_shmem_fault()
534 ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page)); in drm_gem_shmem_fault()
537 dma_resv_unlock(shmem->base.resv); in drm_gem_shmem_fault()
544 struct drm_gem_object *obj = vma->vm_private_data; in drm_gem_shmem_vm_open()
547 drm_WARN_ON(obj->dev, obj->import_attach); in drm_gem_shmem_vm_open()
549 dma_resv_lock(shmem->base.resv, NULL); in drm_gem_shmem_vm_open()
556 if (!drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count)) in drm_gem_shmem_vm_open()
557 shmem->pages_use_count++; in drm_gem_shmem_vm_open()
559 dma_resv_unlock(shmem->base.resv); in drm_gem_shmem_vm_open()
566 struct drm_gem_object *obj = vma->vm_private_data; in drm_gem_shmem_vm_close()
569 dma_resv_lock(shmem->base.resv, NULL); in drm_gem_shmem_vm_close()
571 dma_resv_unlock(shmem->base.resv); in drm_gem_shmem_vm_close()
584 * drm_gem_shmem_mmap - Memory-map a shmem GEM object
596 struct drm_gem_object *obj = &shmem->base; in drm_gem_shmem_mmap()
599 if (obj->import_attach) { in drm_gem_shmem_mmap()
601 * vm_ops pointing to our implementation if the dma-buf backend in drm_gem_shmem_mmap()
604 vma->vm_private_data = NULL; in drm_gem_shmem_mmap()
605 vma->vm_ops = NULL; in drm_gem_shmem_mmap()
607 ret = dma_buf_mmap(obj->dma_buf, vma, 0); in drm_gem_shmem_mmap()
616 if (is_cow_mapping(vma->vm_flags)) in drm_gem_shmem_mmap()
617 return -EINVAL; in drm_gem_shmem_mmap()
619 dma_resv_lock(shmem->base.resv, NULL); in drm_gem_shmem_mmap()
621 dma_resv_unlock(shmem->base.resv); in drm_gem_shmem_mmap()
627 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); in drm_gem_shmem_mmap()
628 if (shmem->map_wc) in drm_gem_shmem_mmap()
629 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); in drm_gem_shmem_mmap()
636 * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs
644 if (shmem->base.import_attach) in drm_gem_shmem_print_info()
647 drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count); in drm_gem_shmem_print_info()
648 drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count); in drm_gem_shmem_print_info()
649 drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr); in drm_gem_shmem_print_info()
654 * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
658 * This function exports a scatter/gather table suitable for PRIME usage by
661 * Drivers who need to acquire an scatter/gather table for objects need to call
665 * A pointer to the scatter/gather table of pinned pages or error pointer on failure.
669 struct drm_gem_object *obj = &shmem->base; in drm_gem_shmem_get_sg_table()
671 drm_WARN_ON(obj->dev, obj->import_attach); in drm_gem_shmem_get_sg_table()
673 return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT); in drm_gem_shmem_get_sg_table()
679 struct drm_gem_object *obj = &shmem->base; in drm_gem_shmem_get_pages_sgt_locked()
683 if (shmem->sgt) in drm_gem_shmem_get_pages_sgt_locked()
684 return shmem->sgt; in drm_gem_shmem_get_pages_sgt_locked()
686 drm_WARN_ON(obj->dev, obj->import_attach); in drm_gem_shmem_get_pages_sgt_locked()
698 ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 0); in drm_gem_shmem_get_pages_sgt_locked()
702 shmem->sgt = sgt; in drm_gem_shmem_get_pages_sgt_locked()
715 * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
716 * scatter/gather table for a shmem GEM object.
719 * This function returns a scatter/gather table suitable for driver usage. If
720 * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
724 * and difference between dma-buf imported and natively allocated objects.
728 * A pointer to the scatter/gather table of pinned pages or errno on failure.
735 ret = dma_resv_lock_interruptible(shmem->base.resv, NULL); in drm_gem_shmem_get_pages_sgt()
739 dma_resv_unlock(shmem->base.resv); in drm_gem_shmem_get_pages_sgt()
746 * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
747 * another driver's scatter/gather table of pinned pages
749 * @attach: DMA-BUF attachment
750 * @sgt: Scatter/gather table of pinned pages
752 * This function imports a scatter/gather table exported via DMA-BUF by
757 * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
765 size_t size = PAGE_ALIGN(attach->dmabuf->size); in drm_gem_shmem_prime_import_sg_table()
772 shmem->sgt = sgt; in drm_gem_shmem_prime_import_sg_table()
776 return &shmem->base; in drm_gem_shmem_prime_import_sg_table()
780 MODULE_DESCRIPTION("DRM SHMEM memory-management helpers");