Lines Matching +full:sw +full:- +full:managed
1 /* SPDX-License-Identifier: GPL-2.0-only */
75 * descriptors before SW gets an interrupt and overwrites SW head, the gen bit
77 * be gone forever and SW has no reasonable way to tell that this has happened.
78 * From SW perspective, when we finally get an interrupt, it looks like we're
83 #define IDPF_RX_BUFQ_WORKING_SET(rxq) ((rxq)->desc_count - 1)
87 if (unlikely(++(ntc) == (rxq)->desc_count)) { \
95 if (unlikely(++(idx) == (q)->desc_count)) \
117 ((((txq)->next_to_clean > (txq)->next_to_use) ? 0 : (txq)->desc_count) + \
118 (txq)->next_to_clean - (txq)->next_to_use - 1)
120 #define IDPF_TX_BUF_RSV_UNUSED(txq) ((txq)->stash->buf_stack.top)
122 (txq)->desc_count >> 2)
124 #define IDPF_TX_COMPLQ_OVERFLOW_THRESH(txcq) ((txcq)->desc_count >> 1)
129 (((txq)->num_completions_pending >= (txq)->complq->num_completions ? \
131 (txq)->num_completions_pending - (txq)->complq->num_completions)
136 ((++(txq)->compl_tag_cur_gen) >= (txq)->compl_tag_gen_max ? \
137 0 : (txq)->compl_tag_cur_gen)
154 * struct idpf_buf_lifo - LIFO for managing OOO completions
166 * struct idpf_tx_offload_params - Offload parameters for a given packet
227 /* The size limit for a transmit buffer in a descriptor is (16K - 1).
232 #define IDPF_TX_MAX_DESC_DATA (SZ_16K - 1)
243 DIV_ROUND_DOWN_ULL((IDPF_CTLQ_MAX_BUF_LEN - IDPF_RX_PTYPE_HDR_SZ), \
246 #define IDPF_GET_PTYPE_SIZE(p) struct_size((p), proto_id, (p)->proto_id_count)
279 * 0->1 or 1->0 on each ring wrap. SW maintains its own
285 * @__IDPF_Q_RFL_GEN_CHK: Refill queues are SW only, so Q_GEN acts as the HW
286 * bit and Q_RFL_GEN is the SW bit.
306 #define idpf_queue_set(f, q) __set_bit(__IDPF_Q_##f, (q)->flags)
307 #define idpf_queue_clear(f, q) __clear_bit(__IDPF_Q_##f, (q)->flags)
308 #define idpf_queue_change(f, q) __change_bit(__IDPF_Q_##f, (q)->flags)
309 #define idpf_queue_has(f, q) test_bit(__IDPF_Q_##f, (q)->flags)
312 __test_and_clear_bit(__IDPF_Q_##f, (q)->flags)
314 __assign_bit(__IDPF_Q_##f, (q)->flags, v)
338 * @dyn_ctl_sw_itridx_ena_m: Mask for SW ITR index
339 * @dyn_ctl_swint_trig_m: Mask for dyn_ctl SW triggered interrupt enable
457 /* Index used for 'SW ITR' update in DYN_CTL register */
465 * struct idpf_txq_stash - Tx buffer stash for Flow-based scheduling mode
476 * struct idpf_rx_queue - software structure representing a receive queue
567 * struct idpf_tx_queue - software structure representing a transmit queue
589 * --------------------------------
590 * | GEN=0-1023 |IDX = 0-63|
591 * --------------------------------
597 * --------------------------------
598 * |GEN | IDX = 0-8159 |
599 * --------------------------------
615 * @tx_max_bufs: Max buffers that can be transmitted with scatter-gather
616 * @stash: Tx buffer stash for Flow-based scheduling mode
686 * struct idpf_buf_queue - software structure representing a buffer queue
745 * struct idpf_compl_queue - software structure representing a completion queue
849 * managed by at most two bufqs (depending on performance configuration).
922 cpu = cpumask_first(&q_vector->napi.config->affinity_mask); in idpf_q_vector_to_mem()
928 * idpf_size_to_txd_count - Get number of descriptors needed for large Tx frag
941 * idpf_tx_singleq_build_ctob - populate command tag offset and size
966 * idpf_tx_splitq_build_desc - determine which type of data descriptor to build
976 if (params->dtype == IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2) in idpf_tx_splitq_build_desc()
983 * idpf_vport_intr_set_wb_on_itr - enable descriptor writeback on disabled interrupts
990 if (q_vector->wb_on_itr) in idpf_vport_intr_set_wb_on_itr()
993 q_vector->wb_on_itr = true; in idpf_vport_intr_set_wb_on_itr()
994 reg = &q_vector->intr_reg; in idpf_vport_intr_set_wb_on_itr()
996 writel(reg->dyn_ctl_wb_on_itr_m | reg->dyn_ctl_intena_msk_m | in idpf_vport_intr_set_wb_on_itr()
997 (IDPF_NO_ITR_UPDATE_IDX << reg->dyn_ctl_itridx_s), in idpf_vport_intr_set_wb_on_itr()
998 reg->dyn_ctl); in idpf_vport_intr_set_wb_on_itr()
1043 return !netif_subqueue_maybe_stop(tx_q->netdev, tx_q->idx, in idpf_tx_maybe_stop_common()