Lines Matching full:binding

47 void __net_devmem_dmabuf_binding_free(struct net_devmem_dmabuf_binding *binding)  in __net_devmem_dmabuf_binding_free()  argument
51 gen_pool_for_each_chunk(binding->chunk_pool, in __net_devmem_dmabuf_binding_free()
54 size = gen_pool_size(binding->chunk_pool); in __net_devmem_dmabuf_binding_free()
55 avail = gen_pool_avail(binding->chunk_pool); in __net_devmem_dmabuf_binding_free()
59 gen_pool_destroy(binding->chunk_pool); in __net_devmem_dmabuf_binding_free()
61 dma_buf_unmap_attachment_unlocked(binding->attachment, binding->sgt, in __net_devmem_dmabuf_binding_free()
63 dma_buf_detach(binding->dmabuf, binding->attachment); in __net_devmem_dmabuf_binding_free()
64 dma_buf_put(binding->dmabuf); in __net_devmem_dmabuf_binding_free()
65 xa_destroy(&binding->bound_rxqs); in __net_devmem_dmabuf_binding_free()
66 kfree(binding); in __net_devmem_dmabuf_binding_free()
70 net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding) in net_devmem_alloc_dmabuf() argument
78 dma_addr = gen_pool_alloc_owner(binding->chunk_pool, PAGE_SIZE, in net_devmem_alloc_dmabuf()
96 struct net_devmem_dmabuf_binding *binding = net_iov_binding(niov); in net_devmem_free_dmabuf() local
99 if (WARN_ON(!gen_pool_has_addr(binding->chunk_pool, dma_addr, in net_devmem_free_dmabuf()
103 gen_pool_free(binding->chunk_pool, dma_addr, PAGE_SIZE); in net_devmem_free_dmabuf()
106 void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding) in net_devmem_unbind_dmabuf() argument
112 if (binding->list.next) in net_devmem_unbind_dmabuf()
113 list_del(&binding->list); in net_devmem_unbind_dmabuf()
115 xa_for_each(&binding->bound_rxqs, xa_idx, rxq) { in net_devmem_unbind_dmabuf()
116 WARN_ON(rxq->mp_params.mp_priv != binding); in net_devmem_unbind_dmabuf()
122 WARN_ON(netdev_rx_queue_restart(binding->dev, rxq_idx)); in net_devmem_unbind_dmabuf()
125 xa_erase(&net_devmem_dmabuf_bindings, binding->id); in net_devmem_unbind_dmabuf()
127 net_devmem_dmabuf_binding_put(binding); in net_devmem_unbind_dmabuf()
131 struct net_devmem_dmabuf_binding *binding, in net_devmem_bind_dmabuf_to_queue() argument
156 err = xa_alloc(&binding->bound_rxqs, &xa_idx, rxq, xa_limit_32b, in net_devmem_bind_dmabuf_to_queue()
161 rxq->mp_params.mp_priv = binding; in net_devmem_bind_dmabuf_to_queue()
171 xa_erase(&binding->bound_rxqs, xa_idx); in net_devmem_bind_dmabuf_to_queue()
180 struct net_devmem_dmabuf_binding *binding; in net_devmem_bind_dmabuf() local
192 binding = kzalloc_node(sizeof(*binding), GFP_KERNEL, in net_devmem_bind_dmabuf()
194 if (!binding) { in net_devmem_bind_dmabuf()
199 binding->dev = dev; in net_devmem_bind_dmabuf()
201 err = xa_alloc_cyclic(&net_devmem_dmabuf_bindings, &binding->id, in net_devmem_bind_dmabuf()
202 binding, xa_limit_32b, &id_alloc_next, in net_devmem_bind_dmabuf()
207 xa_init_flags(&binding->bound_rxqs, XA_FLAGS_ALLOC); in net_devmem_bind_dmabuf()
209 refcount_set(&binding->ref, 1); in net_devmem_bind_dmabuf()
211 binding->dmabuf = dmabuf; in net_devmem_bind_dmabuf()
213 binding->attachment = dma_buf_attach(binding->dmabuf, dev->dev.parent); in net_devmem_bind_dmabuf()
214 if (IS_ERR(binding->attachment)) { in net_devmem_bind_dmabuf()
215 err = PTR_ERR(binding->attachment); in net_devmem_bind_dmabuf()
220 binding->sgt = dma_buf_map_attachment_unlocked(binding->attachment, in net_devmem_bind_dmabuf()
222 if (IS_ERR(binding->sgt)) { in net_devmem_bind_dmabuf()
223 err = PTR_ERR(binding->sgt); in net_devmem_bind_dmabuf()
229 * binding can be much more flexible than that. We may be able to in net_devmem_bind_dmabuf()
232 binding->chunk_pool = in net_devmem_bind_dmabuf()
234 if (!binding->chunk_pool) { in net_devmem_bind_dmabuf()
240 for_each_sgtable_dma_sg(binding->sgt, sg, sg_idx) { in net_devmem_bind_dmabuf()
256 owner->binding = binding; in net_devmem_bind_dmabuf()
258 err = gen_pool_add_owner(binding->chunk_pool, dma_addr, in net_devmem_bind_dmabuf()
285 return binding; in net_devmem_bind_dmabuf()
288 gen_pool_for_each_chunk(binding->chunk_pool, in net_devmem_bind_dmabuf()
290 gen_pool_destroy(binding->chunk_pool); in net_devmem_bind_dmabuf()
292 dma_buf_unmap_attachment_unlocked(binding->attachment, binding->sgt, in net_devmem_bind_dmabuf()
295 dma_buf_detach(dmabuf, binding->attachment); in net_devmem_bind_dmabuf()
297 xa_erase(&net_devmem_dmabuf_bindings, binding->id); in net_devmem_bind_dmabuf()
299 kfree(binding); in net_devmem_bind_dmabuf()
307 struct net_devmem_dmabuf_binding *binding; in dev_dmabuf_uninstall() local
313 binding = dev->_rx[i].mp_params.mp_priv; in dev_dmabuf_uninstall()
314 if (!binding) in dev_dmabuf_uninstall()
317 xa_for_each(&binding->bound_rxqs, xa_idx, rxq) in dev_dmabuf_uninstall()
319 xa_erase(&binding->bound_rxqs, xa_idx); in dev_dmabuf_uninstall()
329 struct net_devmem_dmabuf_binding *binding = pool->mp_priv; in mp_dmabuf_devmem_init() local
331 if (!binding) in mp_dmabuf_devmem_init()
343 net_devmem_dmabuf_binding_get(binding); in mp_dmabuf_devmem_init()
349 struct net_devmem_dmabuf_binding *binding = pool->mp_priv; in mp_dmabuf_devmem_alloc_netmems() local
353 niov = net_devmem_alloc_dmabuf(binding); in mp_dmabuf_devmem_alloc_netmems()
368 struct net_devmem_dmabuf_binding *binding = pool->mp_priv; in mp_dmabuf_devmem_destroy() local
370 net_devmem_dmabuf_binding_put(binding); in mp_dmabuf_devmem_destroy()