Lines Matching refs:q
120 static inline void __xskq_cons_read_addr_unchecked(struct xsk_queue *q, u32 cached_cons, u64 *addr) in __xskq_cons_read_addr_unchecked() argument
122 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; in __xskq_cons_read_addr_unchecked()
123 u32 idx = cached_cons & q->ring_mask; in __xskq_cons_read_addr_unchecked()
128 static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr) in xskq_cons_read_addr_unchecked() argument
130 if (q->cached_cons != q->cached_prod) { in xskq_cons_read_addr_unchecked()
131 __xskq_cons_read_addr_unchecked(q, q->cached_cons, addr); in xskq_cons_read_addr_unchecked()
217 static inline bool xskq_has_descs(struct xsk_queue *q) in xskq_has_descs() argument
219 return q->cached_cons != q->cached_prod; in xskq_has_descs()
222 static inline bool xskq_cons_is_valid_desc(struct xsk_queue *q, in xskq_cons_is_valid_desc() argument
227 q->invalid_descs++; in xskq_cons_is_valid_desc()
233 static inline bool xskq_cons_read_desc(struct xsk_queue *q, in xskq_cons_read_desc() argument
237 if (q->cached_cons != q->cached_prod) { in xskq_cons_read_desc()
238 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring; in xskq_cons_read_desc()
239 u32 idx = q->cached_cons & q->ring_mask; in xskq_cons_read_desc()
242 return xskq_cons_is_valid_desc(q, desc, pool); in xskq_cons_read_desc()
245 q->queue_empty_descs++; in xskq_cons_read_desc()
249 static inline void xskq_cons_release_n(struct xsk_queue *q, u32 cnt) in xskq_cons_release_n() argument
251 q->cached_cons += cnt; in xskq_cons_release_n()
254 static inline void parse_desc(struct xsk_queue *q, struct xsk_buff_pool *pool, in parse_desc() argument
257 parsed->valid = xskq_cons_is_valid_desc(q, desc, pool); in parse_desc()
262 u32 xskq_cons_read_desc_batch(struct xsk_queue *q, struct xsk_buff_pool *pool, in xskq_cons_read_desc_batch() argument
265 u32 cached_cons = q->cached_cons, nb_entries = 0; in xskq_cons_read_desc_batch()
272 while (cached_cons != q->cached_prod && nb_entries < max) { in xskq_cons_read_desc_batch()
273 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring; in xskq_cons_read_desc_batch()
274 u32 idx = cached_cons & q->ring_mask; in xskq_cons_read_desc_batch()
279 parse_desc(q, pool, &descs[nb_entries], &parsed); in xskq_cons_read_desc_batch()
298 xskq_cons_release_n(q, cached_cons - q->cached_cons); in xskq_cons_read_desc_batch()
304 static inline void __xskq_cons_release(struct xsk_queue *q) in __xskq_cons_release() argument
306 smp_store_release(&q->ring->consumer, q->cached_cons); /* D, matchees A */ in __xskq_cons_release()
309 static inline void __xskq_cons_peek(struct xsk_queue *q) in __xskq_cons_peek() argument
312 q->cached_prod = smp_load_acquire(&q->ring->producer); /* C, matches B */ in __xskq_cons_peek()
315 static inline void xskq_cons_get_entries(struct xsk_queue *q) in xskq_cons_get_entries() argument
317 __xskq_cons_release(q); in xskq_cons_get_entries()
318 __xskq_cons_peek(q); in xskq_cons_get_entries()
321 static inline u32 xskq_cons_nb_entries(struct xsk_queue *q, u32 max) in xskq_cons_nb_entries() argument
323 u32 entries = q->cached_prod - q->cached_cons; in xskq_cons_nb_entries()
328 __xskq_cons_peek(q); in xskq_cons_nb_entries()
329 entries = q->cached_prod - q->cached_cons; in xskq_cons_nb_entries()
334 static inline bool xskq_cons_peek_addr_unchecked(struct xsk_queue *q, u64 *addr) in xskq_cons_peek_addr_unchecked() argument
336 if (q->cached_prod == q->cached_cons) in xskq_cons_peek_addr_unchecked()
337 xskq_cons_get_entries(q); in xskq_cons_peek_addr_unchecked()
338 return xskq_cons_read_addr_unchecked(q, addr); in xskq_cons_peek_addr_unchecked()
341 static inline bool xskq_cons_peek_desc(struct xsk_queue *q, in xskq_cons_peek_desc() argument
345 if (q->cached_prod == q->cached_cons) in xskq_cons_peek_desc()
346 xskq_cons_get_entries(q); in xskq_cons_peek_desc()
347 return xskq_cons_read_desc(q, desc, pool); in xskq_cons_peek_desc()
354 static inline void xskq_cons_release(struct xsk_queue *q) in xskq_cons_release() argument
356 q->cached_cons++; in xskq_cons_release()
359 static inline void xskq_cons_cancel_n(struct xsk_queue *q, u32 cnt) in xskq_cons_cancel_n() argument
361 q->cached_cons -= cnt; in xskq_cons_cancel_n()
364 static inline u32 xskq_cons_present_entries(struct xsk_queue *q) in xskq_cons_present_entries() argument
367 return READ_ONCE(q->ring->producer) - READ_ONCE(q->ring->consumer); in xskq_cons_present_entries()
372 static inline u32 xskq_get_prod(struct xsk_queue *q) in xskq_get_prod() argument
374 return READ_ONCE(q->ring->producer); in xskq_get_prod()
377 static inline u32 xskq_prod_nb_free(struct xsk_queue *q, u32 max) in xskq_prod_nb_free() argument
379 u32 free_entries = q->nentries - (q->cached_prod - q->cached_cons); in xskq_prod_nb_free()
385 q->cached_cons = READ_ONCE(q->ring->consumer); in xskq_prod_nb_free()
386 free_entries = q->nentries - (q->cached_prod - q->cached_cons); in xskq_prod_nb_free()
391 static inline bool xskq_prod_is_full(struct xsk_queue *q) in xskq_prod_is_full() argument
393 return xskq_prod_nb_free(q, 1) ? false : true; in xskq_prod_is_full()
396 static inline void xskq_prod_cancel_n(struct xsk_queue *q, u32 cnt) in xskq_prod_cancel_n() argument
398 q->cached_prod -= cnt; in xskq_prod_cancel_n()
401 static inline int xskq_prod_reserve(struct xsk_queue *q) in xskq_prod_reserve() argument
403 if (xskq_prod_is_full(q)) in xskq_prod_reserve()
407 q->cached_prod++; in xskq_prod_reserve()
411 static inline int xskq_prod_reserve_addr(struct xsk_queue *q, u64 addr) in xskq_prod_reserve_addr() argument
413 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; in xskq_prod_reserve_addr()
415 if (xskq_prod_is_full(q)) in xskq_prod_reserve_addr()
419 ring->desc[q->cached_prod++ & q->ring_mask] = addr; in xskq_prod_reserve_addr()
423 static inline void xskq_prod_write_addr(struct xsk_queue *q, u32 idx, u64 addr) in xskq_prod_write_addr() argument
425 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; in xskq_prod_write_addr()
427 ring->desc[idx & q->ring_mask] = addr; in xskq_prod_write_addr()
430 static inline void xskq_prod_write_addr_batch(struct xsk_queue *q, struct xdp_desc *descs, in xskq_prod_write_addr_batch() argument
433 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; in xskq_prod_write_addr_batch()
437 cached_prod = q->cached_prod; in xskq_prod_write_addr_batch()
439 ring->desc[cached_prod++ & q->ring_mask] = descs[i].addr; in xskq_prod_write_addr_batch()
440 q->cached_prod = cached_prod; in xskq_prod_write_addr_batch()
443 static inline int xskq_prod_reserve_desc(struct xsk_queue *q, in xskq_prod_reserve_desc() argument
446 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring; in xskq_prod_reserve_desc()
449 if (xskq_prod_is_full(q)) in xskq_prod_reserve_desc()
453 idx = q->cached_prod++ & q->ring_mask; in xskq_prod_reserve_desc()
461 static inline void __xskq_prod_submit(struct xsk_queue *q, u32 idx) in __xskq_prod_submit() argument
463 smp_store_release(&q->ring->producer, idx); /* B, matches C */ in __xskq_prod_submit()
466 static inline void xskq_prod_submit(struct xsk_queue *q) in xskq_prod_submit() argument
468 __xskq_prod_submit(q, q->cached_prod); in xskq_prod_submit()
471 static inline void xskq_prod_submit_n(struct xsk_queue *q, u32 nb_entries) in xskq_prod_submit_n() argument
473 __xskq_prod_submit(q, q->ring->producer + nb_entries); in xskq_prod_submit_n()
476 static inline bool xskq_prod_is_empty(struct xsk_queue *q) in xskq_prod_is_empty() argument
479 return READ_ONCE(q->ring->consumer) == READ_ONCE(q->ring->producer); in xskq_prod_is_empty()
484 static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q) in xskq_nb_invalid_descs() argument
486 return q ? q->invalid_descs : 0; in xskq_nb_invalid_descs()
489 static inline u64 xskq_nb_queue_empty_descs(struct xsk_queue *q) in xskq_nb_queue_empty_descs() argument
491 return q ? q->queue_empty_descs : 0; in xskq_nb_queue_empty_descs()