/linux-6.12.1/drivers/block/xen-blkback/ |
D | blkback.c | 650 struct page **unmap_pages) in xen_blkbk_unmap_prepare() argument 661 unmap_pages[invcount] = pages[i]->page; in xen_blkbk_unmap_prepare() 711 req->unmap, req->unmap_pages); in xen_blkbk_unmap_and_respond() 717 work->pages = req->unmap_pages; in xen_blkbk_unmap_and_respond() 736 struct page *unmap_pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; in xen_blkbk_unmap() local 744 unmap, unmap_pages); in xen_blkbk_unmap() 746 ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount); in xen_blkbk_unmap() 748 gnttab_page_cache_put(&ring->free_pages, unmap_pages, in xen_blkbk_unmap()
|
D | common.h | 358 struct page *unmap_pages[MAX_INDIRECT_SEGMENTS]; member
|
/linux-6.12.1/include/linux/ |
D | io-pgtable.h | 195 size_t (*unmap_pages)(struct io_pgtable_ops *ops, unsigned long iova, member
|
D | iommu.h | 650 size_t (*unmap_pages)(struct iommu_domain *domain, unsigned long iova, member
|
/linux-6.12.1/drivers/iommu/ |
D | io-pgtable-arm-v7s.c | 832 .unmap_pages = arm_v7s_unmap_pages, in arm_v7s_alloc_pgtable() 996 if (ops->unmap_pages(ops, iova_start + size, size, 1, NULL) != size) in arm_v7s_do_selftests() 1015 if (ops->unmap_pages(ops, iova, size, 1, NULL) != size) in arm_v7s_do_selftests()
|
D | ipmmu-vmsa.c | 684 return domain->iop->unmap_pages(domain->iop, iova, pgsize, pgcount, gather); in ipmmu_unmap() 890 .unmap_pages = ipmmu_unmap,
|
D | msm_iommu.c | 519 ret = priv->iop->unmap_pages(priv->iop, iova, pgsize, pgcount, gather); in msm_iommu_unmap() 700 .unmap_pages = msm_iommu_unmap,
|
D | io-pgtable-arm.c | 918 .unmap_pages = arm_lpae_unmap_pages, in arm_lpae_alloc_pgtable() 1352 if (ops->unmap_pages(ops, SZ_1G + size, size, 1, NULL) != size) in arm_lpae_run_tests() 1368 if (ops->unmap_pages(ops, iova, size, 1, NULL) != size) in arm_lpae_run_tests()
|
D | io-pgtable-dart.c | 384 .unmap_pages = dart_unmap_pages, in dart_alloc_pgtable()
|
D | sprd-iommu.c | 418 .unmap_pages = sprd_iommu_unmap,
|
D | apple-dart.c | 552 return ops->unmap_pages(ops, iova, pgsize, pgcount, gather); in apple_dart_unmap_pages() 994 .unmap_pages = apple_dart_unmap_pages,
|
D | s390-iommu.c | 790 .unmap_pages = s390_iommu_unmap_pages,
|
D | mtk_iommu_v1.c | 586 .unmap_pages = mtk_iommu_v1_unmap,
|
D | mtk_iommu.c | 819 return dom->iop->unmap_pages(dom->iop, iova, pgsize, pgcount, gather); in mtk_iommu_unmap() 1026 .unmap_pages = mtk_iommu_unmap,
|
D | sun50i-iommu.c | 857 .unmap_pages = sun50i_iommu_unmap,
|
D | iommu.c | 2547 if (WARN_ON(!ops->unmap_pages || domain->pgsize_bitmap == 0UL)) in __iommu_unmap() 2574 unmapped_page = ops->unmap_pages(domain, iova, pgsize, count, iotlb_gather); in __iommu_unmap()
|
D | tegra-smmu.c | 1005 .unmap_pages = tegra_smmu_unmap,
|
D | virtio-iommu.c | 1075 .unmap_pages = viommu_unmap_pages,
|
/linux-6.12.1/drivers/iommu/amd/ |
D | io_pgtable_v2.c | 354 pgtable->pgtbl.ops.unmap_pages = iommu_v2_unmap_pages; in v2_alloc_pgtable()
|
D | io_pgtable.c | 556 pgtable->pgtbl.ops.unmap_pages = iommu_v1_unmap_pages; in v1_alloc_pgtable()
|
D | iommu.c | 2577 r = (ops->unmap_pages) ? ops->unmap_pages(ops, iova, pgsize, pgcount, NULL) : 0; in amd_iommu_unmap_pages() 2861 .unmap_pages = amd_iommu_unmap_pages,
|
/linux-6.12.1/drivers/iommu/arm/arm-smmu/ |
D | qcom_iommu.c | 466 ret = ops->unmap_pages(ops, iova, pgsize, pgcount, gather); in qcom_iommu_unmap() 605 .unmap_pages = qcom_iommu_unmap,
|
D | arm-smmu.c | 1285 ret = ops->unmap_pages(ops, iova, pgsize, pgcount, iotlb_gather); in arm_smmu_unmap_pages() 1655 .unmap_pages = arm_smmu_unmap_pages,
|
/linux-6.12.1/drivers/gpu/drm/msm/ |
D | msm_iommu.c | 102 unmapped = ops->unmap_pages(ops, iova, pgsize, count, NULL); in msm_iommu_pagetable_unmap()
|
/linux-6.12.1/drivers/gpu/drm/panfrost/ |
D | panfrost_mmu.c | 372 unmapped_page = ops->unmap_pages(ops, iova, pgsize, pgcount, NULL); in panfrost_mmu_unmap()
|