Lines Matching refs:rx_buf

390 	if (!rx_ring->rx_buf)
405 struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
407 if (!rx_buf->page)
413 dma_sync_single_range_for_cpu(dev, rx_buf->dma,
414 rx_buf->page_offset,
419 dma_unmap_page_attrs(dev, rx_buf->dma, ice_rx_pg_size(rx_ring),
421 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
423 rx_buf->page = NULL;
424 rx_buf->page_offset = 0;
431 memset(rx_ring->rx_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->rx_buf)));
463 kfree(rx_ring->rx_buf);
464 rx_ring->rx_buf = NULL;
491 WARN_ON(rx_ring->rx_buf);
492 rx_ring->rx_buf =
493 kcalloc(rx_ring->count, sizeof(*rx_ring->rx_buf), GFP_KERNEL);
494 if (!rx_ring->rx_buf)
518 kfree(rx_ring->rx_buf);
519 rx_ring->rx_buf = NULL;
664 * @bi: rx_buf struct to modify
764 bi = &rx_ring->rx_buf[ntu];
787 bi = rx_ring->rx_buf;
805 * @rx_buf: Rx buffer to adjust
814 ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
818 rx_buf->page_offset ^= size;
821 rx_buf->page_offset += size;
827 * @rx_buf: buffer containing the page
835 ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
837 unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
838 struct page *page = rx_buf->page;
845 if (unlikely(rx_buf->pgcnt - pagecnt_bias > 1))
850 if (rx_buf->page_offset > ICE_LAST_OFFSET)
860 rx_buf->pagecnt_bias = USHRT_MAX;
870 * @rx_buf: buffer containing page to add
873 * This function will add the data contained in rx_buf->page to the xdp buf.
878 struct ice_rx_buf *rx_buf, const unsigned int size)
894 __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, rx_buf->page,
895 rx_buf->page_offset, size);
902 if (page_is_pfmemalloc(rx_buf->page))
921 new_buf = &rx_ring->rx_buf[nta];
950 struct ice_rx_buf *rx_buf;
952 rx_buf = &rx_ring->rx_buf[ntc];
953 prefetchw(rx_buf->page);
956 return rx_buf;
958 dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma,
959 rx_buf->page_offset, size,
963 rx_buf->pagecnt_bias--;
965 return rx_buf;
981 struct ice_rx_buf *rx_buf;
985 rx_buf = &rx_ring->rx_buf[idx];
986 rx_buf->pgcnt = page_count(rx_buf->page);
1060 struct ice_rx_buf *rx_buf;
1078 rx_buf = &rx_ring->rx_buf[rx_ring->first_desc];
1100 skb_add_rx_frag(skb, 0, rx_buf->page,
1101 rx_buf->page_offset + headlen, size,
1109 rx_buf->pagecnt_bias++;
1130 * @rx_buf: Rx buffer to pull data from
1132 * This function will clean up the contents of the rx_buf. It will either
1136 ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf)
1138 if (!rx_buf)
1141 if (ice_can_reuse_rx_page(rx_buf)) {
1143 ice_reuse_rx_page(rx_ring, rx_buf);
1146 dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma,
1149 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
1153 rx_buf->page = NULL;
1182 buf = &rx_ring->rx_buf[idx];
1204 buf = &rx_ring->rx_buf[idx];
1288 struct ice_rx_buf *rx_buf;
1318 rx_buf = ice_get_rx_buf(rx_ring, size, ntc);
1323 hard_start = page_address(rx_buf->page) + rx_buf->page_offset -
1327 } else if (ice_add_xdp_frag(rx_ring, xdp, rx_buf, size)) {