Lines Matching full:rx
15 struct gve_rx_buf_state_dqo *gve_alloc_buf_state(struct gve_rx_ring *rx) in gve_alloc_buf_state() argument
20 buffer_id = rx->dqo.free_buf_states; in gve_alloc_buf_state()
24 buf_state = &rx->dqo.buf_states[buffer_id]; in gve_alloc_buf_state()
27 rx->dqo.free_buf_states = buf_state->next; in gve_alloc_buf_state()
35 bool gve_buf_state_is_allocated(struct gve_rx_ring *rx, in gve_buf_state_is_allocated() argument
38 s16 buffer_id = buf_state - rx->dqo.buf_states; in gve_buf_state_is_allocated()
43 void gve_free_buf_state(struct gve_rx_ring *rx, in gve_free_buf_state() argument
46 s16 buffer_id = buf_state - rx->dqo.buf_states; in gve_free_buf_state()
48 buf_state->next = rx->dqo.free_buf_states; in gve_free_buf_state()
49 rx->dqo.free_buf_states = buffer_id; in gve_free_buf_state()
52 struct gve_rx_buf_state_dqo *gve_dequeue_buf_state(struct gve_rx_ring *rx, in gve_dequeue_buf_state() argument
62 buf_state = &rx->dqo.buf_states[buffer_id]; in gve_dequeue_buf_state()
75 void gve_enqueue_buf_state(struct gve_rx_ring *rx, struct gve_index_list *list, in gve_enqueue_buf_state() argument
78 s16 buffer_id = buf_state - rx->dqo.buf_states; in gve_enqueue_buf_state()
88 rx->dqo.buf_states[tail].next = buffer_id; in gve_enqueue_buf_state()
93 struct gve_rx_buf_state_dqo *gve_get_recycled_buf_state(struct gve_rx_ring *rx) in gve_get_recycled_buf_state() argument
99 buf_state = gve_dequeue_buf_state(rx, &rx->dqo.recycled_buf_states); in gve_get_recycled_buf_state()
103 if (unlikely(rx->dqo.used_buf_states.head == -1)) in gve_get_recycled_buf_state()
112 buf_state = gve_dequeue_buf_state(rx, &rx->dqo.used_buf_states); in gve_get_recycled_buf_state()
114 rx->dqo.used_buf_states_cnt--; in gve_get_recycled_buf_state()
118 gve_enqueue_buf_state(rx, &rx->dqo.used_buf_states, buf_state); in gve_get_recycled_buf_state()
124 int gve_alloc_qpl_page_dqo(struct gve_rx_ring *rx, in gve_alloc_qpl_page_dqo() argument
127 struct gve_priv *priv = rx->gve; in gve_alloc_qpl_page_dqo()
130 idx = rx->dqo.next_qpl_page_idx; in gve_alloc_qpl_page_dqo()
136 buf_state->page_info.page = rx->dqo.qpl->pages[idx]; in gve_alloc_qpl_page_dqo()
137 buf_state->addr = rx->dqo.qpl->page_buses[idx]; in gve_alloc_qpl_page_dqo()
138 rx->dqo.next_qpl_page_idx++; in gve_alloc_qpl_page_dqo()
142 buf_state->page_info.buf_size = rx->packet_buffer_truesize; in gve_alloc_qpl_page_dqo()
143 buf_state->page_info.pad = rx->rx_headroom; in gve_alloc_qpl_page_dqo()
163 void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx, in gve_try_recycle_buf() argument
166 const u16 data_buffer_size = rx->packet_buffer_truesize; in gve_try_recycle_buf()
197 gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states, buf_state); in gve_try_recycle_buf()
201 gve_enqueue_buf_state(rx, &rx->dqo.used_buf_states, buf_state); in gve_try_recycle_buf()
202 rx->dqo.used_buf_states_cnt++; in gve_try_recycle_buf()
205 void gve_free_to_page_pool(struct gve_rx_ring *rx, in gve_free_to_page_pool() argument
218 static int gve_alloc_from_page_pool(struct gve_rx_ring *rx, in gve_alloc_from_page_pool() argument
223 buf_state->page_info.buf_size = rx->packet_buffer_truesize; in gve_alloc_from_page_pool()
224 netmem = page_pool_alloc_netmem(rx->dqo.page_pool, in gve_alloc_from_page_pool()
235 buf_state->page_info.pad = rx->dqo.page_pool->p.offset; in gve_alloc_from_page_pool()
241 struct gve_rx_ring *rx, in gve_rx_create_page_pool() argument
244 u32 ntfy_id = gve_rx_idx_to_ntfy(priv, rx->q_num); in gve_rx_create_page_pool()
260 void gve_free_buffer(struct gve_rx_ring *rx, in gve_free_buffer() argument
263 if (rx->dqo.page_pool) { in gve_free_buffer()
264 gve_free_to_page_pool(rx, buf_state, true); in gve_free_buffer()
265 gve_free_buf_state(rx, buf_state); in gve_free_buffer()
267 gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states, in gve_free_buffer()
272 void gve_reuse_buffer(struct gve_rx_ring *rx, in gve_reuse_buffer() argument
275 if (rx->dqo.page_pool) { in gve_reuse_buffer()
277 gve_free_buf_state(rx, buf_state); in gve_reuse_buffer()
280 gve_try_recycle_buf(rx->gve, rx, buf_state); in gve_reuse_buffer()
284 int gve_alloc_buffer(struct gve_rx_ring *rx, struct gve_rx_desc_dqo *desc) in gve_alloc_buffer() argument
288 if (rx->dqo.page_pool) { in gve_alloc_buffer()
289 buf_state = gve_alloc_buf_state(rx); in gve_alloc_buffer()
293 if (gve_alloc_from_page_pool(rx, buf_state)) in gve_alloc_buffer()
296 buf_state = gve_get_recycled_buf_state(rx); in gve_alloc_buffer()
298 buf_state = gve_alloc_buf_state(rx); in gve_alloc_buffer()
302 if (unlikely(gve_alloc_qpl_page_dqo(rx, buf_state))) in gve_alloc_buffer()
306 desc->buf_id = cpu_to_le16(buf_state - rx->dqo.buf_states); in gve_alloc_buffer()
314 gve_free_buf_state(rx, buf_state); in gve_alloc_buffer()