Lines Matching refs:rx_buf

44 				     struct efx_rx_buffer *rx_buf,
48 unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
56 rx_buf->flags |= EFX_RX_PKT_DISCARD;
68 struct efx_rx_buffer *rx_buf,
84 EFX_WARN_ON_ONCE_PARANOID(rx_buf->len < hdr_len);
92 if (rx_buf->len > hdr_len) {
93 rx_buf->page_offset += hdr_len;
94 rx_buf->len -= hdr_len;
98 rx_buf->page, rx_buf->page_offset,
99 rx_buf->len, efx->rx_buffer_truesize);
100 rx_buf->page = NULL;
105 rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
108 __free_pages(rx_buf->page, efx->rx_buffer_order);
109 rx_buf->page = NULL;
126 struct efx_rx_buffer *rx_buf;
128 rx_buf = efx_rx_buffer(rx_queue, index);
129 rx_buf->flags |= flags;
134 efx_rx_packet__check_len(rx_queue, rx_buf, len);
142 WARN_ON(!(len == 0 && rx_buf->flags & EFX_RX_PKT_DISCARD));
143 rx_buf->flags |= EFX_RX_PKT_DISCARD;
150 (rx_buf->flags & EFX_RX_PKT_CSUMMED) ? " [SUMMED]" : "",
151 (rx_buf->flags & EFX_RX_PKT_DISCARD) ? " [DISCARD]" : "");
156 if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) {
158 efx_discard_rx_packet(channel, rx_buf, n_frags);
163 rx_buf->len = len;
168 efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
173 prefetch(efx_rx_buf_va(rx_buf));
175 rx_buf->page_offset += efx->rx_prefix_size;
176 rx_buf->len -= efx->rx_prefix_size;
185 rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
188 efx_sync_rx_buffer(efx, rx_buf, efx->rx_dma_len);
190 rx_buf->len = len - (n_frags - 1) * efx->rx_dma_len;
191 efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
195 rx_buf = efx_rx_buffer(rx_queue, index);
196 efx_recycle_rx_pages(channel, rx_buf, n_frags);
207 struct efx_rx_buffer *rx_buf,
211 u16 hdr_len = min_t(u16, rx_buf->len, EFX_SKB_HEADERS);
213 skb = efx_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len);
218 efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
225 if (likely(rx_buf->flags & EFX_RX_PKT_CSUMMED)) {
227 skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL);
250 struct efx_rx_buffer *rx_buf, u8 **ehp)
269 efx_free_rx_buffers(rx_queue, rx_buf,
279 dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr,
280 rx_buf->len, DMA_FROM_DEVICE);
290 rx_buf->len, false);
301 rx_buf->page_offset += offset;
302 rx_buf->len -= offset;
313 efx_free_rx_buffers(rx_queue, rx_buf, 1);
327 efx_free_rx_buffers(rx_queue, rx_buf, 1);
340 efx_free_rx_buffers(rx_queue, rx_buf, 1);
349 efx_free_rx_buffers(rx_queue, rx_buf, 1);
362 struct efx_rx_buffer *rx_buf =
364 u8 *eh = efx_rx_buf_va(rx_buf);
369 if (rx_buf->flags & EFX_RX_PKT_PREFIX_LEN) {
370 rx_buf->len = le16_to_cpup((__le16 *)
377 if (unlikely(!rx_buf->len)) {
378 efx_free_rx_buffers(rx_queue, rx_buf,
386 * loopback layer, and free the rx_buf here
389 efx_loopback_rx_packet(efx, eh, rx_buf->len);
390 efx_free_rx_buffers(rx_queue, rx_buf,
396 rx_queue->rx_bytes += rx_buf->len;
398 if (!efx_do_xdp(efx, channel, rx_buf, &eh))
402 rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
404 if ((rx_buf->flags & EFX_RX_PKT_TCP) && !channel->type->receive_skb)
405 efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh, 0);
407 efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags);