Lines Matching defs:q_vector
904 i40e_force_wb(vsi, tx_ring->q_vector);
1057 * @q_vector: the vector on which to enable writeback
1061 struct i40e_q_vector *q_vector)
1063 u16 flags = q_vector->tx.ring[0].flags;
1069 if (q_vector->arm_wb_state)
1077 I40E_PFINT_DYN_CTLN(q_vector->reg_idx),
1085 q_vector->arm_wb_state = true;
1091 * @q_vector: the vector on which to force writeback
1094 void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
1104 I40E_PFINT_DYN_CTLN(q_vector->reg_idx), val);
1116 static inline bool i40e_container_is_rx(struct i40e_q_vector *q_vector,
1119 return &q_vector->rx == rc;
1122 static inline unsigned int i40e_itr_divisor(struct i40e_q_vector *q_vector)
1126 switch (q_vector->vsi->back->hw.phy.link_info.link_speed) {
1149 * @q_vector: structure containing interrupt and ring information
1160 static void i40e_update_itr(struct i40e_q_vector *q_vector,
1175 itr = i40e_container_is_rx(q_vector, rc) ?
1193 if (q_vector->itr_countdown) {
1201 if (i40e_container_is_rx(q_vector, rc)) {
1208 (q_vector->tx.target_itr & I40E_ITR_ADAPTIVE_LATENCY)) {
1219 (q_vector->rx.target_itr & I40E_ITR_MASK) ==
1247 itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
1334 itr += DIV_ROUND_UP(avg_wire_size, i40e_itr_divisor(q_vector)) *
2112 skb = napi_alloc_skb(&rx_ring->q_vector->napi, I40E_RX_HDR_SIZE);
2339 rx_ring->q_vector->rx.total_packets += total_rx_packets;
2340 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
2573 napi_gro_receive(&rx_ring->q_vector->napi, skb);
2664 * @q_vector: q_vector for which itr is being updated and interrupt enabled
2668 struct i40e_q_vector *q_vector)
2682 i40e_update_itr(q_vector, &q_vector->tx);
2683 i40e_update_itr(q_vector, &q_vector->rx);
2693 if (q_vector->rx.target_itr < q_vector->rx.current_itr) {
2696 interval = q_vector->rx.target_itr;
2697 q_vector->rx.current_itr = q_vector->rx.target_itr;
2698 q_vector->itr_countdown = ITR_COUNTDOWN_START;
2699 } else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) ||
2700 ((q_vector->rx.target_itr - q_vector->rx.current_itr) <
2701 (q_vector->tx.target_itr - q_vector->tx.current_itr))) {
2706 interval = q_vector->tx.target_itr;
2707 q_vector->tx.current_itr = q_vector->tx.target_itr;
2708 q_vector->itr_countdown = ITR_COUNTDOWN_START;
2709 } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) {
2712 interval = q_vector->rx.target_itr;
2713 q_vector->rx.current_itr = q_vector->rx.target_itr;
2714 q_vector->itr_countdown = ITR_COUNTDOWN_START;
2717 if (q_vector->itr_countdown)
2718 q_vector->itr_countdown--;
2728 if (q_vector->in_busy_poll) {
2730 q_vector->in_busy_poll = false;
2734 wr32(hw, I40E_PFINT_DYN_CTLN(q_vector->reg_idx), itr_val);
2742 * This function will clean all queues associated with a q_vector.
2748 struct i40e_q_vector *q_vector =
2750 struct i40e_vsi *vsi = q_vector->vsi;
2769 i40e_for_each_ring(ring, q_vector->tx) {
2786 /* normally we have 1 Rx ring per q_vector */
2787 if (unlikely(q_vector->num_ringpairs > 1))
2792 budget_per_ring = max_t(int, budget / q_vector->num_ringpairs, 1);
2794 /* Max of 1 Rx ring in this q_vector so give it the budget */
2797 i40e_for_each_ring(ring, q_vector->rx) {
2809 trace_i40e_napi_poll(napi, q_vector, budget, budget_per_ring, rx_cleaned,
2823 if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) {
2828 i40e_force_wb(vsi, q_vector);
2835 q_vector->tx.ring[0].tx_stats.tx_force_wb++;
2836 i40e_enable_wb_on_itr(vsi, q_vector);
2841 if (q_vector->tx.ring[0].flags & I40E_TXR_FLAGS_WB_ON_ITR)
2842 q_vector->arm_wb_state = false;
2848 i40e_update_enable_itr(vsi, q_vector);
2850 q_vector->in_busy_poll = true;