Lines Matching full:fifo
25 * gve_tx_fifo_* manages the Registered Segment as a FIFO - clients must
29 static int gve_tx_fifo_init(struct gve_priv *priv, struct gve_tx_fifo *fifo) in gve_tx_fifo_init() argument
31 fifo->base = vmap(fifo->qpl->pages, fifo->qpl->num_entries, VM_MAP, in gve_tx_fifo_init()
33 if (unlikely(!fifo->base)) { in gve_tx_fifo_init()
34 netif_err(priv, drv, priv->dev, "Failed to vmap fifo, qpl_id = %d\n", in gve_tx_fifo_init()
35 fifo->qpl->id); in gve_tx_fifo_init()
39 fifo->size = fifo->qpl->num_entries * PAGE_SIZE; in gve_tx_fifo_init()
40 atomic_set(&fifo->available, fifo->size); in gve_tx_fifo_init()
41 fifo->head = 0; in gve_tx_fifo_init()
45 static void gve_tx_fifo_release(struct gve_priv *priv, struct gve_tx_fifo *fifo) in gve_tx_fifo_release() argument
47 WARN(atomic_read(&fifo->available) != fifo->size, in gve_tx_fifo_release()
48 "Releasing non-empty fifo"); in gve_tx_fifo_release()
50 vunmap(fifo->base); in gve_tx_fifo_release()
53 static int gve_tx_fifo_pad_alloc_one_frag(struct gve_tx_fifo *fifo, in gve_tx_fifo_pad_alloc_one_frag() argument
56 return (fifo->head + bytes < fifo->size) ? 0 : fifo->size - fifo->head; in gve_tx_fifo_pad_alloc_one_frag()
59 static bool gve_tx_fifo_can_alloc(struct gve_tx_fifo *fifo, size_t bytes) in gve_tx_fifo_can_alloc() argument
61 return (atomic_read(&fifo->available) <= bytes) ? false : true; in gve_tx_fifo_can_alloc()
64 /* gve_tx_alloc_fifo - Allocate fragment(s) from Tx FIFO
65 * @fifo: FIFO to allocate from
71 * Allocations from a given FIFO must be externally synchronized but concurrent
74 static int gve_tx_alloc_fifo(struct gve_tx_fifo *fifo, size_t bytes, in gve_tx_alloc_fifo() argument
86 * because the FIFO head always start aligned, and the FIFO's boundaries in gve_tx_alloc_fifo()
90 WARN(!gve_tx_fifo_can_alloc(fifo, bytes), in gve_tx_alloc_fifo()
91 "Reached %s when there's not enough space in the fifo", __func__); in gve_tx_alloc_fifo()
95 iov[0].iov_offset = fifo->head; in gve_tx_alloc_fifo()
97 fifo->head += bytes; in gve_tx_alloc_fifo()
99 if (fifo->head > fifo->size) { in gve_tx_alloc_fifo()
101 * FIFO, also use the head fragment. in gve_tx_alloc_fifo()
104 overflow = fifo->head - fifo->size; in gve_tx_alloc_fifo()
106 iov[1].iov_offset = 0; /* Start of fifo*/ in gve_tx_alloc_fifo()
109 fifo->head = overflow; in gve_tx_alloc_fifo()
113 aligned_head = L1_CACHE_ALIGN(fifo->head); in gve_tx_alloc_fifo()
114 padding = aligned_head - fifo->head; in gve_tx_alloc_fifo()
116 atomic_sub(bytes + padding, &fifo->available); in gve_tx_alloc_fifo()
117 fifo->head = aligned_head; in gve_tx_alloc_fifo()
119 if (fifo->head == fifo->size) in gve_tx_alloc_fifo()
120 fifo->head = 0; in gve_tx_alloc_fifo()
125 /* gve_tx_free_fifo - Return space to Tx FIFO
126 * @fifo: FIFO to return fragments to
129 static void gve_tx_free_fifo(struct gve_tx_fifo *fifo, size_t bytes) in gve_tx_free_fifo() argument
131 atomic_add(bytes, &fifo->available); in gve_tx_free_fifo()
211 /* map Tx FIFO */ in gve_tx_alloc_ring()
305 * the beginning of the payload at the end of the FIFO, and 1 if the
306 * payload wraps to the beginning of the FIFO.
310 /* Check if sufficient resources (descriptor ring space, FIFO space) are
434 * of the fifo and then put the header at the beginning of the fifo. in gve_tx_add_skb()
534 /* FIFO free */ in gve_clean_tx_done()