Lines Matching full:binding
55 void __net_devmem_dmabuf_binding_free(struct net_devmem_dmabuf_binding *binding) in __net_devmem_dmabuf_binding_free() argument
59 gen_pool_for_each_chunk(binding->chunk_pool, in __net_devmem_dmabuf_binding_free()
62 size = gen_pool_size(binding->chunk_pool); in __net_devmem_dmabuf_binding_free()
63 avail = gen_pool_avail(binding->chunk_pool); in __net_devmem_dmabuf_binding_free()
67 gen_pool_destroy(binding->chunk_pool); in __net_devmem_dmabuf_binding_free()
69 dma_buf_unmap_attachment_unlocked(binding->attachment, binding->sgt, in __net_devmem_dmabuf_binding_free()
71 dma_buf_detach(binding->dmabuf, binding->attachment); in __net_devmem_dmabuf_binding_free()
72 dma_buf_put(binding->dmabuf); in __net_devmem_dmabuf_binding_free()
73 xa_destroy(&binding->bound_rxqs); in __net_devmem_dmabuf_binding_free()
74 kfree(binding); in __net_devmem_dmabuf_binding_free()
78 net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding) in net_devmem_alloc_dmabuf() argument
86 dma_addr = gen_pool_alloc_owner(binding->chunk_pool, PAGE_SIZE, in net_devmem_alloc_dmabuf()
104 struct net_devmem_dmabuf_binding *binding = net_devmem_iov_binding(niov); in net_devmem_free_dmabuf() local
107 if (WARN_ON(!gen_pool_has_addr(binding->chunk_pool, dma_addr, in net_devmem_free_dmabuf()
111 gen_pool_free(binding->chunk_pool, dma_addr, PAGE_SIZE); in net_devmem_free_dmabuf()
114 void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding) in net_devmem_unbind_dmabuf() argument
120 if (binding->list.next) in net_devmem_unbind_dmabuf()
121 list_del(&binding->list); in net_devmem_unbind_dmabuf()
123 xa_for_each(&binding->bound_rxqs, xa_idx, rxq) { in net_devmem_unbind_dmabuf()
125 .mp_priv = binding, in net_devmem_unbind_dmabuf()
131 __net_mp_close_rxq(binding->dev, rxq_idx, &mp_params); in net_devmem_unbind_dmabuf()
134 xa_erase(&net_devmem_dmabuf_bindings, binding->id); in net_devmem_unbind_dmabuf()
136 net_devmem_dmabuf_binding_put(binding); in net_devmem_unbind_dmabuf()
140 struct net_devmem_dmabuf_binding *binding, in net_devmem_bind_dmabuf_to_queue() argument
144 .mp_priv = binding, in net_devmem_bind_dmabuf_to_queue()
156 err = xa_alloc(&binding->bound_rxqs, &xa_idx, rxq, xa_limit_32b, in net_devmem_bind_dmabuf_to_queue()
172 struct net_devmem_dmabuf_binding *binding; in net_devmem_bind_dmabuf() local
184 binding = kzalloc_node(sizeof(*binding), GFP_KERNEL, in net_devmem_bind_dmabuf()
186 if (!binding) { in net_devmem_bind_dmabuf()
191 binding->dev = dev; in net_devmem_bind_dmabuf()
193 err = xa_alloc_cyclic(&net_devmem_dmabuf_bindings, &binding->id, in net_devmem_bind_dmabuf()
194 binding, xa_limit_32b, &id_alloc_next, in net_devmem_bind_dmabuf()
199 xa_init_flags(&binding->bound_rxqs, XA_FLAGS_ALLOC); in net_devmem_bind_dmabuf()
201 refcount_set(&binding->ref, 1); in net_devmem_bind_dmabuf()
203 mutex_init(&binding->lock); in net_devmem_bind_dmabuf()
205 binding->dmabuf = dmabuf; in net_devmem_bind_dmabuf()
207 binding->attachment = dma_buf_attach(binding->dmabuf, dev->dev.parent); in net_devmem_bind_dmabuf()
208 if (IS_ERR(binding->attachment)) { in net_devmem_bind_dmabuf()
209 err = PTR_ERR(binding->attachment); in net_devmem_bind_dmabuf()
214 binding->sgt = dma_buf_map_attachment_unlocked(binding->attachment, in net_devmem_bind_dmabuf()
216 if (IS_ERR(binding->sgt)) { in net_devmem_bind_dmabuf()
217 err = PTR_ERR(binding->sgt); in net_devmem_bind_dmabuf()
223 * binding can be much more flexible than that. We may be able to in net_devmem_bind_dmabuf()
226 binding->chunk_pool = in net_devmem_bind_dmabuf()
228 if (!binding->chunk_pool) { in net_devmem_bind_dmabuf()
234 for_each_sgtable_dma_sg(binding->sgt, sg, sg_idx) { in net_devmem_bind_dmabuf()
250 owner->binding = binding; in net_devmem_bind_dmabuf()
252 err = gen_pool_add_owner(binding->chunk_pool, dma_addr, in net_devmem_bind_dmabuf()
279 return binding; in net_devmem_bind_dmabuf()
282 gen_pool_for_each_chunk(binding->chunk_pool, in net_devmem_bind_dmabuf()
284 gen_pool_destroy(binding->chunk_pool); in net_devmem_bind_dmabuf()
286 dma_buf_unmap_attachment_unlocked(binding->attachment, binding->sgt, in net_devmem_bind_dmabuf()
289 dma_buf_detach(dmabuf, binding->attachment); in net_devmem_bind_dmabuf()
291 xa_erase(&net_devmem_dmabuf_bindings, binding->id); in net_devmem_bind_dmabuf()
293 kfree(binding); in net_devmem_bind_dmabuf()
303 struct net_devmem_dmabuf_binding *binding = pool->mp_priv; in mp_dmabuf_devmem_init() local
305 if (!binding) in mp_dmabuf_devmem_init()
317 net_devmem_dmabuf_binding_get(binding); in mp_dmabuf_devmem_init()
323 struct net_devmem_dmabuf_binding *binding = pool->mp_priv; in mp_dmabuf_devmem_alloc_netmems() local
327 niov = net_devmem_alloc_dmabuf(binding); in mp_dmabuf_devmem_alloc_netmems()
342 struct net_devmem_dmabuf_binding *binding = pool->mp_priv; in mp_dmabuf_devmem_destroy() local
344 net_devmem_dmabuf_binding_put(binding); in mp_dmabuf_devmem_destroy()
368 const struct net_devmem_dmabuf_binding *binding = mp_priv; in mp_dmabuf_devmem_nl_fill() local
371 return nla_put_u32(rsp, type, binding->id); in mp_dmabuf_devmem_nl_fill()
377 struct net_devmem_dmabuf_binding *binding = mp_priv; in mp_dmabuf_devmem_uninstall() local
381 xa_for_each(&binding->bound_rxqs, xa_idx, bound_rxq) { in mp_dmabuf_devmem_uninstall()
383 xa_erase(&binding->bound_rxqs, xa_idx); in mp_dmabuf_devmem_uninstall()
384 if (xa_empty(&binding->bound_rxqs)) { in mp_dmabuf_devmem_uninstall()
385 mutex_lock(&binding->lock); in mp_dmabuf_devmem_uninstall()
386 binding->dev = NULL; in mp_dmabuf_devmem_uninstall()
387 mutex_unlock(&binding->lock); in mp_dmabuf_devmem_uninstall()