Lines Matching full:rx

16 struct gve_rx_buf_state_dqo *gve_alloc_buf_state(struct gve_rx_ring *rx)  in gve_alloc_buf_state()  argument
21 buffer_id = rx->dqo.free_buf_states; in gve_alloc_buf_state()
25 buf_state = &rx->dqo.buf_states[buffer_id]; in gve_alloc_buf_state()
28 rx->dqo.free_buf_states = buf_state->next; in gve_alloc_buf_state()
40 bool gve_buf_state_is_allocated(struct gve_rx_ring *rx, in gve_buf_state_is_allocated() argument
43 s16 buffer_id = buf_state - rx->dqo.buf_states; in gve_buf_state_is_allocated()
48 void gve_free_buf_state(struct gve_rx_ring *rx, in gve_free_buf_state() argument
51 s16 buffer_id = buf_state - rx->dqo.buf_states; in gve_free_buf_state()
53 buf_state->next = rx->dqo.free_buf_states; in gve_free_buf_state()
54 rx->dqo.free_buf_states = buffer_id; in gve_free_buf_state()
57 struct gve_rx_buf_state_dqo *gve_dequeue_buf_state(struct gve_rx_ring *rx, in gve_dequeue_buf_state() argument
67 buf_state = &rx->dqo.buf_states[buffer_id]; in gve_dequeue_buf_state()
80 void gve_enqueue_buf_state(struct gve_rx_ring *rx, struct gve_index_list *list, in gve_enqueue_buf_state() argument
83 s16 buffer_id = buf_state - rx->dqo.buf_states; in gve_enqueue_buf_state()
93 rx->dqo.buf_states[tail].next = buffer_id; in gve_enqueue_buf_state()
98 struct gve_rx_buf_state_dqo *gve_get_recycled_buf_state(struct gve_rx_ring *rx) in gve_get_recycled_buf_state() argument
104 buf_state = gve_dequeue_buf_state(rx, &rx->dqo.recycled_buf_states); in gve_get_recycled_buf_state()
108 if (unlikely(rx->dqo.used_buf_states.head == -1)) in gve_get_recycled_buf_state()
117 buf_state = gve_dequeue_buf_state(rx, &rx->dqo.used_buf_states); in gve_get_recycled_buf_state()
119 rx->dqo.used_buf_states_cnt--; in gve_get_recycled_buf_state()
123 gve_enqueue_buf_state(rx, &rx->dqo.used_buf_states, buf_state); in gve_get_recycled_buf_state()
129 int gve_alloc_qpl_page_dqo(struct gve_rx_ring *rx, in gve_alloc_qpl_page_dqo() argument
132 struct gve_priv *priv = rx->gve; in gve_alloc_qpl_page_dqo()
135 idx = rx->dqo.next_qpl_page_idx; in gve_alloc_qpl_page_dqo()
141 buf_state->page_info.page = rx->dqo.qpl->pages[idx]; in gve_alloc_qpl_page_dqo()
142 buf_state->addr = rx->dqo.qpl->page_buses[idx]; in gve_alloc_qpl_page_dqo()
143 rx->dqo.next_qpl_page_idx++; in gve_alloc_qpl_page_dqo()
147 buf_state->page_info.buf_size = rx->packet_buffer_truesize; in gve_alloc_qpl_page_dqo()
148 buf_state->page_info.pad = rx->rx_headroom; in gve_alloc_qpl_page_dqo()
168 void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx, in gve_try_recycle_buf() argument
171 const u16 data_buffer_size = rx->packet_buffer_truesize; in gve_try_recycle_buf()
202 gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states, buf_state); in gve_try_recycle_buf()
206 gve_enqueue_buf_state(rx, &rx->dqo.used_buf_states, buf_state); in gve_try_recycle_buf()
207 rx->dqo.used_buf_states_cnt++; in gve_try_recycle_buf()
210 void gve_free_to_page_pool(struct gve_rx_ring *rx, in gve_free_to_page_pool() argument
223 static int gve_alloc_from_page_pool(struct gve_rx_ring *rx, in gve_alloc_from_page_pool() argument
228 buf_state->page_info.buf_size = rx->packet_buffer_truesize; in gve_alloc_from_page_pool()
229 netmem = page_pool_alloc_netmem(rx->dqo.page_pool, in gve_alloc_from_page_pool()
240 buf_state->page_info.pad = rx->dqo.page_pool->p.offset; in gve_alloc_from_page_pool()
246 struct gve_rx_ring *rx, in gve_rx_create_page_pool() argument
249 u32 ntfy_id = gve_rx_idx_to_ntfy(priv, rx->q_num); in gve_rx_create_page_pool()
266 void gve_free_buffer(struct gve_rx_ring *rx, in gve_free_buffer() argument
269 if (rx->dqo.page_pool) { in gve_free_buffer()
270 gve_free_to_page_pool(rx, buf_state, true); in gve_free_buffer()
271 gve_free_buf_state(rx, buf_state); in gve_free_buffer()
273 gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states, in gve_free_buffer()
278 void gve_reuse_buffer(struct gve_rx_ring *rx, in gve_reuse_buffer() argument
281 if (rx->dqo.page_pool) { in gve_reuse_buffer()
283 gve_free_buf_state(rx, buf_state); in gve_reuse_buffer()
286 gve_try_recycle_buf(rx->gve, rx, buf_state); in gve_reuse_buffer()
290 int gve_alloc_buffer(struct gve_rx_ring *rx, struct gve_rx_desc_dqo *desc) in gve_alloc_buffer() argument
294 if (rx->xsk_pool) { in gve_alloc_buffer()
295 buf_state = gve_alloc_buf_state(rx); in gve_alloc_buffer()
299 buf_state->xsk_buff = xsk_buff_alloc(rx->xsk_pool); in gve_alloc_buffer()
301 xsk_set_rx_need_wakeup(rx->xsk_pool); in gve_alloc_buffer()
302 gve_free_buf_state(rx, buf_state); in gve_alloc_buffer()
306 xsk_clear_rx_need_wakeup(rx->xsk_pool); in gve_alloc_buffer()
307 desc->buf_id = cpu_to_le16(buf_state - rx->dqo.buf_states); in gve_alloc_buffer()
311 } else if (rx->dqo.page_pool) { in gve_alloc_buffer()
312 buf_state = gve_alloc_buf_state(rx); in gve_alloc_buffer()
316 if (gve_alloc_from_page_pool(rx, buf_state)) in gve_alloc_buffer()
319 buf_state = gve_get_recycled_buf_state(rx); in gve_alloc_buffer()
321 buf_state = gve_alloc_buf_state(rx); in gve_alloc_buffer()
325 if (unlikely(gve_alloc_qpl_page_dqo(rx, buf_state))) in gve_alloc_buffer()
329 desc->buf_id = cpu_to_le16(buf_state - rx->dqo.buf_states); in gve_alloc_buffer()
337 gve_free_buf_state(rx, buf_state); in gve_alloc_buffer()