Lines Matching refs:binding
54 struct net_devmem_dmabuf_binding *binding = in net_devmem_dmabuf_binding_release() local
57 INIT_WORK(&binding->unbind_w, __net_devmem_dmabuf_binding_free); in net_devmem_dmabuf_binding_release()
58 schedule_work(&binding->unbind_w); in net_devmem_dmabuf_binding_release()
63 struct net_devmem_dmabuf_binding *binding = container_of(wq, typeof(*binding), unbind_w); in __net_devmem_dmabuf_binding_free() local
67 gen_pool_for_each_chunk(binding->chunk_pool, in __net_devmem_dmabuf_binding_free()
70 size = gen_pool_size(binding->chunk_pool); in __net_devmem_dmabuf_binding_free()
71 avail = gen_pool_avail(binding->chunk_pool); in __net_devmem_dmabuf_binding_free()
75 gen_pool_destroy(binding->chunk_pool); in __net_devmem_dmabuf_binding_free()
77 dma_buf_unmap_attachment_unlocked(binding->attachment, binding->sgt, in __net_devmem_dmabuf_binding_free()
78 binding->direction); in __net_devmem_dmabuf_binding_free()
79 dma_buf_detach(binding->dmabuf, binding->attachment); in __net_devmem_dmabuf_binding_free()
80 dma_buf_put(binding->dmabuf); in __net_devmem_dmabuf_binding_free()
81 xa_destroy(&binding->bound_rxqs); in __net_devmem_dmabuf_binding_free()
82 percpu_ref_exit(&binding->ref); in __net_devmem_dmabuf_binding_free()
83 kvfree(binding->tx_vec); in __net_devmem_dmabuf_binding_free()
84 kfree(binding); in __net_devmem_dmabuf_binding_free()
88 net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding) in net_devmem_alloc_dmabuf() argument
96 dma_addr = gen_pool_alloc_owner(binding->chunk_pool, PAGE_SIZE, in net_devmem_alloc_dmabuf()
114 struct net_devmem_dmabuf_binding *binding = net_devmem_iov_binding(niov); in net_devmem_free_dmabuf() local
117 if (WARN_ON(!gen_pool_has_addr(binding->chunk_pool, dma_addr, in net_devmem_free_dmabuf()
121 gen_pool_free(binding->chunk_pool, dma_addr, PAGE_SIZE); in net_devmem_free_dmabuf()
124 void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding) in net_devmem_unbind_dmabuf() argument
130 xa_erase(&net_devmem_dmabuf_bindings, binding->id); in net_devmem_unbind_dmabuf()
137 if (binding->list.next) in net_devmem_unbind_dmabuf()
138 list_del(&binding->list); in net_devmem_unbind_dmabuf()
140 xa_for_each(&binding->bound_rxqs, xa_idx, rxq) { in net_devmem_unbind_dmabuf()
142 .mp_priv = binding, in net_devmem_unbind_dmabuf()
148 __net_mp_close_rxq(binding->dev, rxq_idx, &mp_params); in net_devmem_unbind_dmabuf()
151 percpu_ref_kill(&binding->ref); in net_devmem_unbind_dmabuf()
155 struct net_devmem_dmabuf_binding *binding, in net_devmem_bind_dmabuf_to_queue() argument
159 .mp_priv = binding, in net_devmem_bind_dmabuf_to_queue()
171 err = xa_alloc(&binding->bound_rxqs, &xa_idx, rxq, xa_limit_32b, in net_devmem_bind_dmabuf_to_queue()
190 struct net_devmem_dmabuf_binding *binding; in net_devmem_bind_dmabuf() local
207 binding = kzalloc_node(sizeof(*binding), GFP_KERNEL, in net_devmem_bind_dmabuf()
209 if (!binding) { in net_devmem_bind_dmabuf()
214 binding->dev = dev; in net_devmem_bind_dmabuf()
215 xa_init_flags(&binding->bound_rxqs, XA_FLAGS_ALLOC); in net_devmem_bind_dmabuf()
217 err = percpu_ref_init(&binding->ref, in net_devmem_bind_dmabuf()
223 mutex_init(&binding->lock); in net_devmem_bind_dmabuf()
225 binding->dmabuf = dmabuf; in net_devmem_bind_dmabuf()
226 binding->direction = direction; in net_devmem_bind_dmabuf()
228 binding->attachment = dma_buf_attach(binding->dmabuf, dma_dev); in net_devmem_bind_dmabuf()
229 if (IS_ERR(binding->attachment)) { in net_devmem_bind_dmabuf()
230 err = PTR_ERR(binding->attachment); in net_devmem_bind_dmabuf()
235 binding->sgt = dma_buf_map_attachment_unlocked(binding->attachment, in net_devmem_bind_dmabuf()
237 if (IS_ERR(binding->sgt)) { in net_devmem_bind_dmabuf()
238 err = PTR_ERR(binding->sgt); in net_devmem_bind_dmabuf()
244 binding->tx_vec = kvmalloc_objs(struct net_iov *, in net_devmem_bind_dmabuf()
246 if (!binding->tx_vec) { in net_devmem_bind_dmabuf()
256 binding->chunk_pool = gen_pool_create(PAGE_SHIFT, in net_devmem_bind_dmabuf()
258 if (!binding->chunk_pool) { in net_devmem_bind_dmabuf()
264 for_each_sgtable_dma_sg(binding->sgt, sg, sg_idx) { in net_devmem_bind_dmabuf()
280 owner->binding = binding; in net_devmem_bind_dmabuf()
282 err = gen_pool_add_owner(binding->chunk_pool, dma_addr, in net_devmem_bind_dmabuf()
305 binding->tx_vec[owner->area.base_virtual / PAGE_SIZE + i] = niov; in net_devmem_bind_dmabuf()
311 err = xa_alloc_cyclic(&net_devmem_dmabuf_bindings, &binding->id, in net_devmem_bind_dmabuf()
312 binding, xa_limit_32b, &id_alloc_next, in net_devmem_bind_dmabuf()
317 list_add(&binding->list, &priv->bindings); in net_devmem_bind_dmabuf()
319 return binding; in net_devmem_bind_dmabuf()
322 gen_pool_for_each_chunk(binding->chunk_pool, in net_devmem_bind_dmabuf()
324 gen_pool_destroy(binding->chunk_pool); in net_devmem_bind_dmabuf()
326 kvfree(binding->tx_vec); in net_devmem_bind_dmabuf()
328 dma_buf_unmap_attachment_unlocked(binding->attachment, binding->sgt, in net_devmem_bind_dmabuf()
331 dma_buf_detach(dmabuf, binding->attachment); in net_devmem_bind_dmabuf()
333 percpu_ref_exit(&binding->ref); in net_devmem_bind_dmabuf()
335 kfree(binding); in net_devmem_bind_dmabuf()
343 struct net_devmem_dmabuf_binding *binding; in net_devmem_lookup_dmabuf() local
346 binding = xa_load(&net_devmem_dmabuf_bindings, id); in net_devmem_lookup_dmabuf()
347 if (binding) { in net_devmem_lookup_dmabuf()
348 if (!net_devmem_dmabuf_binding_get(binding)) in net_devmem_lookup_dmabuf()
349 binding = NULL; in net_devmem_lookup_dmabuf()
353 return binding; in net_devmem_lookup_dmabuf()
369 struct net_devmem_dmabuf_binding *binding; in net_devmem_get_binding() local
374 binding = net_devmem_lookup_dmabuf(dmabuf_id); in net_devmem_get_binding()
375 if (!binding || !binding->tx_vec) { in net_devmem_get_binding()
400 unlikely(dst_dev != READ_ONCE(binding->dev))) { in net_devmem_get_binding()
406 return binding; in net_devmem_get_binding()
411 if (binding) in net_devmem_get_binding()
412 net_devmem_dmabuf_binding_put(binding); in net_devmem_get_binding()
418 net_devmem_get_niov_at(struct net_devmem_dmabuf_binding *binding, in net_devmem_get_niov_at() argument
421 if (virt_addr >= binding->dmabuf->size) in net_devmem_get_niov_at()
427 return binding->tx_vec[virt_addr / PAGE_SIZE]; in net_devmem_get_niov_at()
434 struct net_devmem_dmabuf_binding *binding = pool->mp_priv; in mp_dmabuf_devmem_init() local
436 if (!binding) in mp_dmabuf_devmem_init()
448 net_devmem_dmabuf_binding_get(binding); in mp_dmabuf_devmem_init()
454 struct net_devmem_dmabuf_binding *binding = pool->mp_priv; in mp_dmabuf_devmem_alloc_netmems() local
458 niov = net_devmem_alloc_dmabuf(binding); in mp_dmabuf_devmem_alloc_netmems()
473 struct net_devmem_dmabuf_binding *binding = pool->mp_priv; in mp_dmabuf_devmem_destroy() local
475 net_devmem_dmabuf_binding_put(binding); in mp_dmabuf_devmem_destroy()
499 const struct net_devmem_dmabuf_binding *binding = mp_priv; in mp_dmabuf_devmem_nl_fill() local
502 return nla_put_u32(rsp, type, binding->id); in mp_dmabuf_devmem_nl_fill()
508 struct net_devmem_dmabuf_binding *binding = mp_priv; in mp_dmabuf_devmem_uninstall() local
512 xa_for_each(&binding->bound_rxqs, xa_idx, bound_rxq) { in mp_dmabuf_devmem_uninstall()
514 xa_erase(&binding->bound_rxqs, xa_idx); in mp_dmabuf_devmem_uninstall()
515 if (xa_empty(&binding->bound_rxqs)) { in mp_dmabuf_devmem_uninstall()
516 mutex_lock(&binding->lock); in mp_dmabuf_devmem_uninstall()
517 ASSERT_EXCLUSIVE_WRITER(binding->dev); in mp_dmabuf_devmem_uninstall()
518 WRITE_ONCE(binding->dev, NULL); in mp_dmabuf_devmem_uninstall()
519 mutex_unlock(&binding->lock); in mp_dmabuf_devmem_uninstall()