Lines Matching +full:queue +full:- +full:rx

1 /* SPDX-License-Identifier: GPL-2.0-only */
14 /* Mailbox Queue */
22 /* Number of descriptors in a queue should be a multiple of 32. RX queue
61 * given RX completion queue has descriptors. This includes _ALL_ buffer
63 * you have a total of 1024 buffers so your RX queue _must_ have at least that
64 * many descriptors. This macro divides a given number of RX descriptors by
65 * number of buffer queues to calculate how many descriptors each buffer queue
66 * can have without overrunning the RX queue.
78 #define IDPF_RX_BUFQ_WORKING_SET(rxq) ((rxq)->desc_count - 1)
82 if (unlikely(++(ntc) == (rxq)->desc_count)) { \
84 change_bit(__IDPF_Q_GEN_CHK, (rxq)->flags); \
90 if (unlikely(++(idx) == (q)->desc_count)) \
119 (&(((struct virtchnl2_singleq_rx_buf_desc *)((rxq)->desc_ring))[i]))
121 (&(((struct virtchnl2_splitq_rx_buf_desc *)((rxq)->desc_ring))[i]))
122 #define IDPF_SPLITQ_RX_BI_DESC(rxq, i) ((((rxq)->ring))[i])
125 (&(((struct idpf_base_tx_desc *)((txq)->desc_ring))[i]))
127 (&(((struct idpf_base_tx_ctx_desc *)((txq)->desc_ring))[i]))
129 (&(((struct idpf_splitq_tx_compl_desc *)((txcq)->desc_ring))[i]))
132 (&(((union idpf_tx_flex_desc *)((txq)->desc_ring))[i]))
134 (&(((struct idpf_flex_tx_ctx_desc *)((txq)->desc_ring))[i]))
137 ((((txq)->next_to_clean > (txq)->next_to_use) ? 0 : (txq)->desc_count) + \
138 (txq)->next_to_clean - (txq)->next_to_use - 1)
140 #define IDPF_TX_BUF_RSV_UNUSED(txq) ((txq)->buf_stack.top)
142 (txq)->desc_count >> 2)
144 #define IDPF_TX_COMPLQ_OVERFLOW_THRESH(txcq) ((txcq)->desc_count >> 1)
146 * completions that are expected to arrive on the TX completion queue.
149 (((txq)->num_completions_pending >= (txq)->complq->num_completions ? \
151 (txq)->num_completions_pending - (txq)->complq->num_completions)
154 #define IDPF_SPLITQ_TX_INVAL_COMPL_TAG -1
157 ((++(txq)->compl_tag_cur_gen) >= (txq)->compl_tag_gen_max ? \
158 0 : (txq)->compl_tag_cur_gen)
168 struct idpf_flex_tx_desc q; /* queue based scheduling */
188 * compl_tag_gen_s in struct idpf_queue. We'll use a value of -1
215 * struct idpf_buf_lifo - LIFO for managing OOO completions
227 * struct idpf_tx_offload_params - Offload parameters for a given packet
229 * @hdr_offsets: Offset parameter for single queue model
230 * @cd_tunneling: Type of tunneling enabled for single queue model
307 /* The size limit for a transmit buffer in a descriptor is (16K - 1).
312 #define IDPF_TX_MAX_DESC_DATA (SZ_16K - 1)
319 (&(((union virtchnl2_rx_desc *)((rxq)->desc_ring))[i]))
332 DIV_ROUND_DOWN_ULL((IDPF_CTLQ_MAX_BUF_LEN - IDPF_RX_PTYPE_HDR_SZ), \
335 #define IDPF_GET_PTYPE_SIZE(p) struct_size((p), proto_id, (p)->proto_id_count)
349 /* Packet type non-ip values */
371 (((ptype)->outer_ip == IDPF_RX_PTYPE_OUTER_IP) && \
372 ((ptype)->outer_ip_ver == (ipv)))
447 * 0->1 or 1->0 on each ring wrap. SW maintains its own
456 * @__IDPF_Q_SW_MARKER: Used to indicate TX queue marker completions
490 * @rx_itr: RX ITR register
520 * @num_rxq: Number of RX queues
521 * @rx: Array of RX queues to service
522 * @rx_dim: Data for RX net_dim algorithm
523 * @rx_itr_value: RX interrupt throttling rate
525 * @rx_itr_idx: RX ITR index
529 * @name: Queue vector name
546 struct idpf_queue **rx; member
585 struct idpf_rx_queue_stats rx; member
609 * @idx: For buffer queue, it is used as group id, either 0 or 1. On clean,
610 * buffer queue uses this index to determine which group of refill queues
612 * For TX queue, it is used as index to map between TX queue group and
614 * For RX queue, it is used to index to total RX queue across groups and
616 * @tail: Tail offset. Used for both queue models single and split. In splitq
617 * model relevant only for TX queue and RX queue.
619 * @rx_buf: Struct with RX buffer related members
625 * @q_type: Queue type (TX, RX, TX completion, RX buffer)
626 * @q_id: Queue id
630 * @next_to_clean: Next descriptor to clean. In split queue model, only
631 * relevant to TX completion queue and RX queue.
632 * @next_to_alloc: RX buffer to allocate at. Used only for RX. In splitq model
633 * only relevant to RX queue.
638 * the TX completion queue, it can be for any TXQ associated
639 * with that completion queue. This means we can clean up to
640 * N TXQs during a single call to clean the completion queue.
642 * that single call to clean the completion queue. By doing so,
646 * @rx_hsplit_en: RX headsplit enable
649 * @rx_max_pkt_size: RX max packet size
650 * @rx_buf_stride: RX buffer stride
651 * @rx_buffer_low_watermark: RX buffer low watermark
652 * @rxdids: Supported RX descriptor ids
657 * @tx_max_bufs: Max buffers that can be transmitted with scatter-gather
659 * @num_completions: Only relevant for TX completion queue. It tracks the
673 * --------------------------------
674 * | GEN=0-1023 |IDX = 0-63|
675 * --------------------------------
681 * --------------------------------
682 * |GEN | IDX = 0-8159 |
683 * --------------------------------
776 * @rxq: RX queue
777 * @refillq0: Pointer to refill queue 0
778 * @refillq1: Pointer to refill queue 1
792 * @bufq: Buffer queue
814 * @singleq: Struct with single queue related members
815 * @singleq.num_rxq: Number of RX queues associated
816 * @singleq.rxqs: Array of RX queue pointers
817 * @splitq: Struct with split queue related members
818 * @splitq.num_rxq_sets: Number of RX queue sets
819 * @splitq.rxq_sets: Array of RX queue sets
820 * @splitq.bufq_sets: Buffer queue set pointer
846 * @txqs: Array of TX queue pointers
847 * @complq: Associated completion queue pointer, split queue only
849 * completion queue, acculumated for all TX queues
850 * associated with that completion queue.
868 * idpf_size_to_txd_count - Get number of descriptors needed for large Tx frag
881 * idpf_tx_singleq_build_ctob - populate command tag offset and size
906 * idpf_tx_splitq_build_desc - determine which type of data descriptor to build
916 if (params->dtype == IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2) in idpf_tx_splitq_build_desc()
923 * idpf_alloc_page - Allocate a new RX buffer from the page pool
928 * Returns &dma_addr_t to be passed to HW for Rx, %DMA_MAPPING_ERROR otherwise.
935 buf->page = page_pool_dev_alloc_frag(pool, &buf->page_offset, in idpf_alloc_page()
938 buf->page = page_pool_dev_alloc_pages(pool); in idpf_alloc_page()
940 if (!buf->page) in idpf_alloc_page()
943 buf->truesize = buf_size; in idpf_alloc_page()
945 return page_pool_get_dma_addr(buf->page) + buf->page_offset + in idpf_alloc_page()
946 pool->p.offset; in idpf_alloc_page()
950 * idpf_rx_put_page - Return RX buffer page to pool
951 * @rx_buf: RX buffer metadata struct
955 page_pool_put_page(rx_buf->page->pp, rx_buf->page, in idpf_rx_put_page()
956 rx_buf->truesize, true); in idpf_rx_put_page()
957 rx_buf->page = NULL; in idpf_rx_put_page()
961 * idpf_rx_sync_for_cpu - Synchronize DMA buffer
962 * @rx_buf: RX buffer metadata struct
967 struct page *page = rx_buf->page; in idpf_rx_sync_for_cpu()
968 struct page_pool *pp = page->pp; in idpf_rx_sync_for_cpu()
970 dma_sync_single_range_for_cpu(pp->p.dev, in idpf_rx_sync_for_cpu()
972 rx_buf->page_offset + pp->p.offset, len, in idpf_rx_sync_for_cpu()