Lines Matching +full:x +full:- +full:size

1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright 2018-2020 Broadcom.
5 #include <linux/dma-mapping.h>
21 * Valkyrie has a hardware limitation of 16M transfer size.
44 u32 size; in bcm_vk_dma_alloc() local
51 /* Get 64-bit user address */ in bcm_vk_dma_alloc()
52 data = get_unaligned(&vkdata->address); in bcm_vk_dma_alloc()
59 last = ((data + vkdata->size - 1) & PAGE_MASK) >> PAGE_SHIFT; in bcm_vk_dma_alloc()
60 dma->nr_pages = last - first + 1; in bcm_vk_dma_alloc()
63 dma->pages = kmalloc_array(dma->nr_pages, in bcm_vk_dma_alloc()
66 if (!dma->pages) in bcm_vk_dma_alloc()
67 return -ENOMEM; in bcm_vk_dma_alloc()
69 dev_dbg(dev, "Alloc DMA Pages [0x%llx+0x%x => %d pages]\n", in bcm_vk_dma_alloc()
70 data, vkdata->size, dma->nr_pages); in bcm_vk_dma_alloc()
72 dma->direction = direction; in bcm_vk_dma_alloc()
76 dma->nr_pages, in bcm_vk_dma_alloc()
78 dma->pages); in bcm_vk_dma_alloc()
79 if (err != dma->nr_pages) { in bcm_vk_dma_alloc()
80 dma->nr_pages = (err >= 0) ? err : 0; in bcm_vk_dma_alloc()
82 err, dma->nr_pages); in bcm_vk_dma_alloc()
83 return err < 0 ? err : -EINVAL; in bcm_vk_dma_alloc()
86 /* Max size of sg list is 1 per mapped page + fields at start */ in bcm_vk_dma_alloc()
87 dma->sglen = (dma->nr_pages * sizeof(*sgdata)) + in bcm_vk_dma_alloc()
91 dma->sglist = dma_alloc_coherent(dev, in bcm_vk_dma_alloc()
92 dma->sglen, in bcm_vk_dma_alloc()
93 &dma->handle, in bcm_vk_dma_alloc()
95 if (!dma->sglist) in bcm_vk_dma_alloc()
96 return -ENOMEM; in bcm_vk_dma_alloc()
98 dma->sglist[SGLIST_NUM_SG] = 0; in bcm_vk_dma_alloc()
99 dma->sglist[SGLIST_TOTALSIZE] = vkdata->size; in bcm_vk_dma_alloc()
100 remaining_size = vkdata->size; in bcm_vk_dma_alloc()
101 sgdata = (struct _vk_data *)&dma->sglist[SGLIST_VKDATA_START]; in bcm_vk_dma_alloc()
104 size = min_t(size_t, PAGE_SIZE - offset, remaining_size); in bcm_vk_dma_alloc()
105 remaining_size -= size; in bcm_vk_dma_alloc()
107 dma->pages[0], in bcm_vk_dma_alloc()
109 size, in bcm_vk_dma_alloc()
110 dma->direction); in bcm_vk_dma_alloc()
111 transfer_size = size; in bcm_vk_dma_alloc()
113 __free_page(dma->pages[0]); in bcm_vk_dma_alloc()
114 return -EIO; in bcm_vk_dma_alloc()
117 for (i = 1; i < dma->nr_pages; i++) { in bcm_vk_dma_alloc()
118 size = min_t(size_t, PAGE_SIZE, remaining_size); in bcm_vk_dma_alloc()
119 remaining_size -= size; in bcm_vk_dma_alloc()
121 dma->pages[i], in bcm_vk_dma_alloc()
123 size, in bcm_vk_dma_alloc()
124 dma->direction); in bcm_vk_dma_alloc()
126 __free_page(dma->pages[i]); in bcm_vk_dma_alloc()
127 return -EIO; in bcm_vk_dma_alloc()
132 * and transfer size less or equal to BCM_VK_MAX_SGL_CHUNK in bcm_vk_dma_alloc()
135 ((transfer_size + size) <= BCM_VK_MAX_SGL_CHUNK)) { in bcm_vk_dma_alloc()
137 transfer_size += size; in bcm_vk_dma_alloc()
140 sgdata->size = transfer_size; in bcm_vk_dma_alloc()
141 put_unaligned(sg_addr, (u64 *)&sgdata->address); in bcm_vk_dma_alloc()
142 dma->sglist[SGLIST_NUM_SG]++; in bcm_vk_dma_alloc()
147 transfer_size = size; in bcm_vk_dma_alloc()
151 sgdata->size = transfer_size; in bcm_vk_dma_alloc()
152 put_unaligned(sg_addr, (u64 *)&sgdata->address); in bcm_vk_dma_alloc()
153 dma->sglist[SGLIST_NUM_SG]++; in bcm_vk_dma_alloc()
155 /* Update pointers and size field to point to sglist */ in bcm_vk_dma_alloc()
156 put_unaligned((u64)dma->handle, &vkdata->address); in bcm_vk_dma_alloc()
157 vkdata->size = (dma->sglist[SGLIST_NUM_SG] * sizeof(*sgdata)) + in bcm_vk_dma_alloc()
162 "sgl 0x%llx handle 0x%llx, sglen: 0x%x sgsize: 0x%x\n", in bcm_vk_dma_alloc()
163 (u64)dma->sglist, in bcm_vk_dma_alloc()
164 dma->handle, in bcm_vk_dma_alloc()
165 dma->sglen, in bcm_vk_dma_alloc()
166 vkdata->size); in bcm_vk_dma_alloc()
167 for (i = 0; i < vkdata->size / sizeof(u32); i++) in bcm_vk_dma_alloc()
168 dev_dbg(dev, "i:0x%x 0x%x\n", i, dma->sglist[i]); in bcm_vk_dma_alloc()
181 int rc = -EINVAL; in bcm_vk_sg_alloc()
185 if (vkdata[i].size && vkdata[i].address) { in bcm_vk_sg_alloc()
187 * If both size and address are non-zero in bcm_vk_sg_alloc()
194 } else if (vkdata[i].size || in bcm_vk_sg_alloc()
197 * If one of size and address are zero in bcm_vk_sg_alloc()
201 "Invalid vkdata %x 0x%x 0x%llx\n", in bcm_vk_sg_alloc()
202 i, vkdata[i].size, vkdata[i].address); in bcm_vk_sg_alloc()
203 rc = -EINVAL; in bcm_vk_sg_alloc()
206 * If size and address are both zero in bcm_vk_sg_alloc()
219 i--; in bcm_vk_sg_alloc()
231 u32 size; in bcm_vk_dma_free() local
234 dev_dbg(dev, "free sglist=%p sglen=0x%x\n", dma->sglist, dma->sglen); in bcm_vk_dma_free()
237 num_sg = dma->sglist[SGLIST_NUM_SG]; in bcm_vk_dma_free()
238 vkdata = (struct _vk_data *)&dma->sglist[SGLIST_VKDATA_START]; in bcm_vk_dma_free()
240 size = vkdata[i].size; in bcm_vk_dma_free()
243 dma_unmap_page(dev, addr, size, dma->direction); in bcm_vk_dma_free()
247 dma_free_coherent(dev, dma->sglen, dma->sglist, dma->handle); in bcm_vk_dma_free()
250 for (i = 0; i < dma->nr_pages; i++) in bcm_vk_dma_free()
251 put_page(dma->pages[i]); in bcm_vk_dma_free()
254 kfree(dma->pages); in bcm_vk_dma_free()
255 dma->sglist = NULL; in bcm_vk_dma_free()