Lines Matching full:entries
34 u64 entries; /* number of entries in hpas/hpages[] */ member
57 unsigned long entries, unsigned long dev_hpa, in mm_iommu_do_alloc() argument
66 ret = account_locked_vm(mm, entries, true); in mm_iommu_do_alloc()
70 locked_entries = entries; in mm_iommu_do_alloc()
80 mem->pageshift = __ffs(dev_hpa | (entries << PAGE_SHIFT)); in mm_iommu_do_alloc()
88 * we use @ua and @entries natural alignment to allow IOMMU pages in mm_iommu_do_alloc()
91 mem->pageshift = __ffs(ua | (entries << PAGE_SHIFT)); in mm_iommu_do_alloc()
92 mem->hpas = vzalloc(array_size(entries, sizeof(mem->hpas[0]))); in mm_iommu_do_alloc()
102 chunk = min(chunk, entries); in mm_iommu_do_alloc()
103 for (entry = 0; entry < entries; entry += chunk) { in mm_iommu_do_alloc()
104 unsigned long n = min(entries - entry, chunk); in mm_iommu_do_alloc()
118 if (pinned != entries) { in mm_iommu_do_alloc()
128 mem->entries = entries; in mm_iommu_do_alloc()
135 if ((mem2->ua < (ua + (entries << PAGE_SHIFT))) && in mm_iommu_do_alloc()
137 (mem2->entries << PAGE_SHIFT)))) { in mm_iommu_do_alloc()
151 for (i = 0; i < entries; ++i) { in mm_iommu_do_alloc()
186 long mm_iommu_new(struct mm_struct *mm, unsigned long ua, unsigned long entries, in mm_iommu_new() argument
189 return mm_iommu_do_alloc(mm, ua, entries, MM_IOMMU_TABLE_INVALID_HPA, in mm_iommu_new()
195 unsigned long entries, unsigned long dev_hpa, in mm_iommu_newdev() argument
198 return mm_iommu_do_alloc(mm, ua, entries, dev_hpa, pmem); in mm_iommu_newdev()
210 for (i = 0; i < mem->entries; ++i) { in mm_iommu_unpin()
274 unlock_entries = mem->entries; in mm_iommu_put()
297 (mem->entries << PAGE_SHIFT))) { in mm_iommu_lookup()
309 unsigned long ua, unsigned long entries) in mm_iommu_get() argument
317 if ((mem->ua == ua) && (mem->entries == entries)) { in mm_iommu_get()
336 if (entry >= mem->entries) in mm_iommu_ua_to_hpa()
365 end = mem->dev_hpa + (mem->entries << PAGE_SHIFT); in mm_iommu_is_devmem()