Lines Matching full:mapping
6 #include <linux/dma-mapping.h>
65 /* unroll mapping in case something went wrong */ in etnaviv_context_map()
127 struct etnaviv_vram_mapping *mapping) in etnaviv_iommu_remove_mapping() argument
129 struct etnaviv_gem_object *etnaviv_obj = mapping->object; in etnaviv_iommu_remove_mapping()
133 etnaviv_iommu_unmap(context, mapping->vram_node.start, in etnaviv_iommu_remove_mapping()
135 drm_mm_remove_node(&mapping->vram_node); in etnaviv_iommu_remove_mapping()
138 void etnaviv_iommu_reap_mapping(struct etnaviv_vram_mapping *mapping) in etnaviv_iommu_reap_mapping() argument
140 struct etnaviv_iommu_context *context = mapping->context; in etnaviv_iommu_reap_mapping()
143 WARN_ON(mapping->use); in etnaviv_iommu_reap_mapping()
145 etnaviv_iommu_remove_mapping(context, mapping); in etnaviv_iommu_reap_mapping()
146 etnaviv_iommu_context_put(mapping->context); in etnaviv_iommu_reap_mapping()
147 mapping->context = NULL; in etnaviv_iommu_reap_mapping()
148 list_del_init(&mapping->mmu_node); in etnaviv_iommu_reap_mapping()
183 * so we must keep its mapping. in etnaviv_iommu_find_iova()
215 * this mapping. in etnaviv_iommu_find_iova()
249 * When we can't insert the node, due to a existing mapping blocking in etnaviv_iommu_insert_exact()
256 * here to make space for the new mapping. in etnaviv_iommu_insert_exact()
280 struct etnaviv_vram_mapping *mapping, u64 va) in etnaviv_iommu_map_gem() argument
297 mapping->iova = iova; in etnaviv_iommu_map_gem()
298 mapping->context = etnaviv_iommu_context_get(context); in etnaviv_iommu_map_gem()
299 list_add_tail(&mapping->mmu_node, &context->mappings); in etnaviv_iommu_map_gem()
305 node = &mapping->vram_node; in etnaviv_iommu_map_gem()
316 mapping->iova = node->start; in etnaviv_iommu_map_gem()
325 mapping->context = etnaviv_iommu_context_get(context); in etnaviv_iommu_map_gem()
326 list_add_tail(&mapping->mmu_node, &context->mappings); in etnaviv_iommu_map_gem()
334 struct etnaviv_vram_mapping *mapping) in etnaviv_iommu_unmap_gem() argument
336 WARN_ON(mapping->use); in etnaviv_iommu_unmap_gem()
340 /* Bail if the mapping has been reaped by another thread */ in etnaviv_iommu_unmap_gem()
341 if (!mapping->context) { in etnaviv_iommu_unmap_gem()
347 if (mapping->vram_node.mm == &context->mm) in etnaviv_iommu_unmap_gem()
348 etnaviv_iommu_remove_mapping(context, mapping); in etnaviv_iommu_unmap_gem()
350 list_del(&mapping->mmu_node); in etnaviv_iommu_unmap_gem()
412 struct etnaviv_vram_mapping *mapping, in etnaviv_iommu_get_suballoc_va() argument
418 if (mapping->use > 0) { in etnaviv_iommu_get_suballoc_va()
419 mapping->use++; in etnaviv_iommu_get_suballoc_va()
427 * window. Instead we manufacture a mapping to make it look uniform in etnaviv_iommu_get_suballoc_va()
431 mapping->iova = paddr - memory_base; in etnaviv_iommu_get_suballoc_va()
433 struct drm_mm_node *node = &mapping->vram_node; in etnaviv_iommu_get_suballoc_va()
442 mapping->iova = node->start; in etnaviv_iommu_get_suballoc_va()
454 list_add_tail(&mapping->mmu_node, &context->mappings); in etnaviv_iommu_get_suballoc_va()
455 mapping->use = 1; in etnaviv_iommu_get_suballoc_va()
463 struct etnaviv_vram_mapping *mapping) in etnaviv_iommu_put_suballoc_va() argument
465 struct drm_mm_node *node = &mapping->vram_node; in etnaviv_iommu_put_suballoc_va()
468 mapping->use--; in etnaviv_iommu_put_suballoc_va()
470 if (mapping->use > 0 || context->global->version == ETNAVIV_IOMMU_V1) { in etnaviv_iommu_put_suballoc_va()