/linux-6.12.1/drivers/gpu/drm/msm/ |
D | msm_gem_shrinker.c | 188 unsigned idx, unmapped = 0; in msm_gem_shrinker_vmap() local 191 for (idx = 0; lrus[idx] && unmapped < vmap_shrink_limit; idx++) { in msm_gem_shrinker_vmap() 192 unmapped += drm_gem_lru_scan(lrus[idx], in msm_gem_shrinker_vmap() 193 vmap_shrink_limit - unmapped, in msm_gem_shrinker_vmap() 198 *(unsigned long *)ptr += unmapped; in msm_gem_shrinker_vmap() 200 if (unmapped > 0) in msm_gem_shrinker_vmap() 201 trace_msm_gem_purge_vmaps(unmapped); in msm_gem_shrinker_vmap()
|
D | msm_gpu_trace.h | 143 TP_PROTO(u32 unmapped), 144 TP_ARGS(unmapped), 146 __field(u32, unmapped) 149 __entry->unmapped = unmapped; 151 TP_printk("Purging %u vmaps", __entry->unmapped)
|
D | msm_iommu.c | 98 size_t unmapped, pgsize, count; in msm_iommu_pagetable_unmap() local 102 unmapped = ops->unmap_pages(ops, iova, pgsize, count, NULL); in msm_iommu_pagetable_unmap() 103 if (!unmapped) in msm_iommu_pagetable_unmap() 106 iova += unmapped; in msm_iommu_pagetable_unmap() 107 size -= unmapped; in msm_iommu_pagetable_unmap()
|
/linux-6.12.1/include/trace/events/ |
D | huge_memory.h | 59 int referenced, int none_or_zero, int status, int unmapped), 61 TP_ARGS(mm, page, writable, referenced, none_or_zero, status, unmapped), 70 __field(int, unmapped) 80 __entry->unmapped = unmapped; 90 __entry->unmapped)
|
/linux-6.12.1/drivers/iommu/amd/ |
D | io_pgtable_v2.c | 289 unsigned long unmapped = 0; in iommu_v2_unmap_pages() local 296 while (unmapped < size) { in iommu_v2_unmap_pages() 299 return unmapped; in iommu_v2_unmap_pages() 304 unmapped += unmap_size; in iommu_v2_unmap_pages() 307 return unmapped; in iommu_v2_unmap_pages()
|
D | io_pgtable.c | 411 unsigned long long unmapped; in iommu_v1_unmap_pages() local 418 unmapped = 0; in iommu_v1_unmap_pages() 420 while (unmapped < size) { in iommu_v1_unmap_pages() 429 return unmapped; in iommu_v1_unmap_pages() 433 unmapped += unmap_size; in iommu_v1_unmap_pages() 436 return unmapped; in iommu_v1_unmap_pages()
|
/linux-6.12.1/mm/ |
D | migrate_device.c | 65 unsigned long addr = start, unmapped = 0; in migrate_vma_collect_pmd() local 261 unmapped++; in migrate_vma_collect_pmd() 273 if (unmapped) in migrate_vma_collect_pmd() 368 unsigned long unmapped = 0; in migrate_device_unmap() local 378 unmapped++; in migrate_device_unmap() 416 unmapped++; in migrate_device_unmap() 435 return unmapped; in migrate_device_unmap()
|
D | khugepaged.c | 1093 int referenced, int unmapped, in collapse_huge_page() argument 1133 if (unmapped) { in collapse_huge_page() 1268 int node = NUMA_NO_NODE, unmapped = 0; in hpage_collapse_scan_pmd() local 1289 ++unmapped; in hpage_collapse_scan_pmd() 1291 unmapped <= khugepaged_max_ptes_swap) { in hpage_collapse_scan_pmd() 1414 (unmapped && referenced < HPAGE_PMD_NR / 2))) { in hpage_collapse_scan_pmd() 1423 unmapped, cc); in hpage_collapse_scan_pmd() 1429 none_or_zero, result, unmapped); in hpage_collapse_scan_pmd()
|
/linux-6.12.1/drivers/media/pci/intel/ipu6/ |
D | ipu6-mmu.c | 345 size_t unmapped = 0; in l2_unmap() local 372 unmapped++; in l2_unmap() 376 return unmapped << ISP_PAGE_SHIFT; in l2_unmap() 653 size_t unmapped_page, unmapped = 0; in ipu6_mmu_unmap() local 674 while (unmapped < size) { in ipu6_mmu_unmap() 676 iova, size - unmapped); in ipu6_mmu_unmap() 686 unmapped += unmapped_page; in ipu6_mmu_unmap() 689 return unmapped; in ipu6_mmu_unmap()
|
/linux-6.12.1/drivers/vfio/ |
D | vfio_iommu_type1.c | 970 size_t unmapped = 0; in unmap_unpin_fast() local 974 unmapped = iommu_unmap_fast(domain->domain, *iova, len, in unmap_unpin_fast() 977 if (!unmapped) { in unmap_unpin_fast() 982 entry->len = unmapped; in unmap_unpin_fast() 985 *iova += unmapped; in unmap_unpin_fast() 994 if (*unmapped_cnt >= VFIO_IOMMU_TLB_SYNC_MAX || !unmapped) { in unmap_unpin_fast() 1000 return unmapped; in unmap_unpin_fast() 1008 size_t unmapped = iommu_unmap(domain->domain, *iova, len); in unmap_unpin_slow() local 1010 if (unmapped) { in unmap_unpin_slow() 1013 unmapped >> PAGE_SHIFT, in unmap_unpin_slow() [all …]
|
/linux-6.12.1/drivers/staging/media/ipu3/ |
D | ipu3-mmu.c | 383 size_t unmapped_page, unmapped = 0; in imgu_mmu_unmap() local 402 while (unmapped < size) { in imgu_mmu_unmap() 411 unmapped += unmapped_page; in imgu_mmu_unmap() 416 return unmapped; in imgu_mmu_unmap()
|
/linux-6.12.1/drivers/iommu/iommufd/ |
D | ioas.c | 300 unsigned long unmapped = 0; in iommufd_ioas_unmap() local 308 rc = iopt_unmap_all(&ioas->iopt, &unmapped); in iommufd_ioas_unmap() 317 &unmapped); in iommufd_ioas_unmap() 322 cmd->length = unmapped; in iommufd_ioas_unmap()
|
D | vfio_compat.c | 209 unsigned long unmapped = 0; in iommufd_vfio_unmap_dma() local 228 rc = iopt_unmap_all(&ioas->iopt, &unmapped); in iommufd_vfio_unmap_dma() 245 &unmapped); in iommufd_vfio_unmap_dma() 247 unmap.size = unmapped; in iommufd_vfio_unmap_dma()
|
D | io_pagetable.c | 649 unsigned long last, unsigned long *unmapped) in iopt_unmap_iova_range() argument 722 if (unmapped) in iopt_unmap_iova_range() 723 *unmapped = unmapped_bytes; in iopt_unmap_iova_range() 738 unsigned long length, unsigned long *unmapped) in iopt_unmap_iova() argument 748 return iopt_unmap_iova_range(iopt, iova, iova_last, unmapped); in iopt_unmap_iova() 751 int iopt_unmap_all(struct io_pagetable *iopt, unsigned long *unmapped) in iopt_unmap_all() argument 755 rc = iopt_unmap_iova_range(iopt, 0, ULONG_MAX, unmapped); in iopt_unmap_all()
|
D | iommufd_private.h | 76 unsigned long length, unsigned long *unmapped); 77 int iopt_unmap_all(struct io_pagetable *iopt, unsigned long *unmapped);
|
/linux-6.12.1/drivers/iommu/ |
D | virtio-iommu.c | 347 size_t unmapped = 0; in viommu_del_mappings() local 367 unmapped += mapping->iova.last - mapping->iova.start + 1; in viommu_del_mappings() 374 return unmapped; in viommu_del_mappings() 863 size_t unmapped; in viommu_unmap_pages() local 868 unmapped = viommu_del_mappings(vdomain, iova, iova + size - 1); in viommu_unmap_pages() 869 if (unmapped < size) in viommu_unmap_pages() 874 return unmapped; in viommu_unmap_pages() 880 .virt_end = cpu_to_le64(iova + unmapped - 1), in viommu_unmap_pages() 884 return ret ? 0 : unmapped; in viommu_unmap_pages()
|
/linux-6.12.1/drivers/media/platform/qcom/venus/ |
D | firmware.c | 178 size_t unmapped; in venus_shutdown_no_tz() local 199 unmapped = iommu_unmap(iommu, VENUS_FW_START_ADDR, mapped); in venus_shutdown_no_tz() 201 if (unmapped != mapped) in venus_shutdown_no_tz()
|
/linux-6.12.1/Documentation/features/vm/TLB/ |
D | arch-support.txt | 4 # description: arch supports deferral of TLB flush until multiple pages are unmapped
|
/linux-6.12.1/Documentation/arch/x86/x86_64/ |
D | 5level-paging.rst | 49 to look for unmapped area by specified address. If it's already 50 occupied, we look for unmapped area in *full* address space, rather than
|
/linux-6.12.1/drivers/gpu/drm/etnaviv/ |
D | etnaviv_mmu.c | 19 size_t unmapped_page, unmapped = 0; in etnaviv_context_unmap() local 28 while (unmapped < size) { in etnaviv_context_unmap() 35 unmapped += unmapped_page; in etnaviv_context_unmap()
|
/linux-6.12.1/Documentation/networking/device_drivers/ethernet/marvell/ |
D | octeontx2.rst | 172 - Error due to operation of unmapped PF. 186 - Error due to unmapped slot. 236 - Receive packet on an unmapped PF. 248 - Error due to unmapped slot. 290 Rx on unmapped PF_FUNC
|
/linux-6.12.1/Documentation/userspace-api/ |
D | tee.rst | 16 any longer it should be unmapped with munmap() to allow the reuse of
|
/linux-6.12.1/arch/arm64/kvm/hyp/ |
D | pgtable.c | 475 u64 *unmapped = ctx->arg; in hyp_unmap_walker() local 497 *unmapped += granule; in hyp_unmap_walker() 512 u64 unmapped = 0; in kvm_pgtable_hyp_unmap() local 515 .arg = &unmapped, in kvm_pgtable_hyp_unmap() 523 return unmapped; in kvm_pgtable_hyp_unmap()
|
/linux-6.12.1/Documentation/ABI/testing/ |
D | sysfs-class-rnbd-server | 32 When the device is unmapped by that client, the directory will be removed.
|
/linux-6.12.1/drivers/gpu/drm/amd/amdgpu/ |
D | amdgpu_umsch_mm.h | 112 uint32_t unmapped; member
|