Lines Matching refs:tx_q
192 * @tx_q: queue for which the buffers are allocated
196 static int idpf_tx_buf_alloc_all(struct idpf_tx_queue *tx_q)
205 buf_size = sizeof(struct idpf_tx_buf) * tx_q->desc_count;
206 tx_q->tx_buf = kzalloc(buf_size, GFP_KERNEL);
207 if (!tx_q->tx_buf)
210 if (!idpf_queue_has(FLOW_SCH_EN, tx_q))
213 buf_stack = &tx_q->stash->buf_stack;
218 buf_stack->bufs = kcalloc(tx_q->desc_count, sizeof(*buf_stack->bufs),
223 buf_stack->size = tx_q->desc_count;
224 buf_stack->top = tx_q->desc_count;
226 for (i = 0; i < tx_q->desc_count; i++) {
239 * @tx_q: the tx ring to set up
244 struct idpf_tx_queue *tx_q)
246 struct device *dev = tx_q->dev;
249 err = idpf_tx_buf_alloc_all(tx_q);
253 tx_q->size = tx_q->desc_count * sizeof(*tx_q->base_tx);
256 tx_q->size = ALIGN(tx_q->size, 4096);
257 tx_q->desc_ring = dmam_alloc_coherent(dev, tx_q->size, &tx_q->dma,
259 if (!tx_q->desc_ring) {
261 tx_q->size);
266 tx_q->next_to_use = 0;
267 tx_q->next_to_clean = 0;
268 idpf_queue_set(GEN_CHK, tx_q);
273 idpf_tx_desc_rel(tx_q);
1642 * @tx_q: tx queue to handle software marker
1644 static void idpf_tx_handle_sw_marker(struct idpf_tx_queue *tx_q)
1646 struct idpf_netdev_priv *priv = netdev_priv(tx_q->netdev);
1650 idpf_queue_clear(SW_MARKER, tx_q);
1795 * @tx_q: Tx queue to clean
1815 static bool idpf_tx_splitq_clean(struct idpf_tx_queue *tx_q, u16 end,
1822 u32 ntc = tx_q->next_to_clean;
1824 .dev = tx_q->dev,
1831 tx_desc = &tx_q->flex_tx[ntc];
1832 next_pending_desc = &tx_q->flex_tx[end];
1833 tx_buf = &tx_q->tx_buf[ntc];
1851 if (IDPF_TX_BUF_RSV_UNUSED(tx_q) < tx_buf->nr_frags) {
1856 idpf_stash_flow_sch_buffers(tx_q, tx_buf);
1859 idpf_tx_splitq_clean_bump_ntc(tx_q, ntc,
1861 idpf_stash_flow_sch_buffers(tx_q, tx_buf);
1868 idpf_tx_splitq_clean_bump_ntc(tx_q, ntc,
1877 idpf_tx_splitq_clean_bump_ntc(tx_q, ntc, tx_desc, tx_buf);
1881 tx_q->next_to_clean = ntc;
2039 struct idpf_tx_queue *tx_q;
2059 tx_q = complq->txq_grp->txqs[rel_tx_qid];
2068 idpf_tx_splitq_clean(tx_q, hw_head, budget,
2072 idpf_tx_handle_rs_completion(tx_q, tx_desc,
2076 idpf_tx_handle_sw_marker(tx_q);
2079 netdev_err(tx_q->netdev,
2084 u64_stats_update_begin(&tx_q->stats_sync);
2085 u64_stats_add(&tx_q->q_stats.packets, cleaned_stats.packets);
2086 u64_stats_add(&tx_q->q_stats.bytes, cleaned_stats.bytes);
2087 tx_q->cleaned_pkts += cleaned_stats.packets;
2088 tx_q->cleaned_bytes += cleaned_stats.bytes;
2090 u64_stats_update_end(&tx_q->stats_sync);
2116 struct idpf_tx_queue *tx_q = complq->txq_grp->txqs[i];
2121 if (!tx_q->cleaned_bytes)
2124 *cleaned += tx_q->cleaned_pkts;
2127 nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
2129 dont_wake = !complq_ok || IDPF_TX_BUF_RSV_LOW(tx_q) ||
2131 !netif_carrier_ok(tx_q->netdev);
2133 __netif_txq_completed_wake(nq, tx_q->cleaned_pkts, tx_q->cleaned_bytes,
2134 IDPF_DESC_UNUSED(tx_q), IDPF_TX_WAKE_THRESH,
2140 tx_q->cleaned_bytes = 0;
2141 tx_q->cleaned_pkts = 0;
2190 static int idpf_txq_has_room(struct idpf_tx_queue *tx_q, u32 size)
2192 if (IDPF_DESC_UNUSED(tx_q) < size ||
2193 IDPF_TX_COMPLQ_PENDING(tx_q->txq_grp) >
2194 IDPF_TX_COMPLQ_OVERFLOW_THRESH(tx_q->txq_grp->complq) ||
2195 IDPF_TX_BUF_RSV_LOW(tx_q))
2202 * @tx_q: the queue to be checked
2207 static int idpf_tx_maybe_stop_splitq(struct idpf_tx_queue *tx_q,
2210 if (netif_subqueue_maybe_stop(tx_q->netdev, tx_q->idx,
2211 idpf_txq_has_room(tx_q, descs_needed),
2215 u64_stats_update_begin(&tx_q->stats_sync);
2216 u64_stats_inc(&tx_q->q_stats.q_busy);
2217 u64_stats_update_end(&tx_q->stats_sync);
2224 * @tx_q: queue to bump
2232 void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val,
2237 nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
2238 tx_q->next_to_use = val;
2249 writel(val, tx_q->tail);
2369 * @tx_q: queue to send buffer on
2377 static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q,
2384 u16 i = tx_q->next_to_use;
2398 tx_desc = &tx_q->flex_tx[i];
2400 dma = dma_map_single(tx_q->dev, skb->data, size, DMA_TO_DEVICE);
2406 (tx_q->compl_tag_cur_gen << tx_q->compl_tag_gen_s) | i;
2411 if (dma_mapping_error(tx_q->dev, dma))
2412 return idpf_tx_dma_map_error(tx_q, skb, first, i);
2470 if (unlikely(++i == tx_q->desc_count)) {
2471 tx_buf = tx_q->tx_buf;
2472 tx_desc = &tx_q->flex_tx[0];
2474 tx_q->compl_tag_cur_gen =
2475 IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q);
2517 if (unlikely(++i == tx_q->desc_count)) {
2518 tx_buf = tx_q->tx_buf;
2519 tx_desc = &tx_q->flex_tx[0];
2521 tx_q->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q);
2530 dma = skb_frag_dma_map(tx_q->dev, frag, 0, size,
2543 i = idpf_tx_splitq_bump_ntu(tx_q, i);
2545 tx_q->txq_grp->num_completions_pending++;
2548 nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
2551 idpf_tx_buf_hw_update(tx_q, i, netdev_xmit_more());
2760 * @tx_q: queue to send buffer on
2763 netdev_tx_t idpf_tx_drop_skb(struct idpf_tx_queue *tx_q, struct sk_buff *skb)
2765 u64_stats_update_begin(&tx_q->stats_sync);
2766 u64_stats_inc(&tx_q->q_stats.skb_drops);
2767 u64_stats_update_end(&tx_q->stats_sync);
2769 idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
2779 * @tx_q: queue to send buffer on
2785 static int idpf_tx_tstamp(struct idpf_tx_queue *tx_q, struct sk_buff *skb,
2794 if (!idpf_ptp_get_txq_tstamp_capability(tx_q))
2802 err = idpf_ptp_request_ts(tx_q, skb, &idx);
2804 u64_stats_update_begin(&tx_q->stats_sync);
2805 u64_stats_inc(&tx_q->q_stats.tstamp_skipped);
2806 u64_stats_update_end(&tx_q->stats_sync);
2832 static int idpf_tx_tstamp(struct idpf_tx_queue *tx_q, struct sk_buff *skb,
2846 * @tx_q: queue to send buffer on
2851 struct idpf_tx_queue *tx_q)
2859 count = idpf_tx_desc_count_required(tx_q, skb);
2861 return idpf_tx_drop_skb(tx_q, skb);
2865 return idpf_tx_drop_skb(tx_q, skb);
2869 if (idpf_tx_maybe_stop_splitq(tx_q, count)) {
2870 idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
2877 ctx_desc = idpf_tx_splitq_get_ctx_desc(tx_q);
2890 u64_stats_update_begin(&tx_q->stats_sync);
2891 u64_stats_inc(&tx_q->q_stats.lso_pkts);
2892 u64_stats_update_end(&tx_q->stats_sync);
2895 idx = idpf_tx_tstamp(tx_q, skb, &tx_params.offload);
2897 ctx_desc = idpf_tx_splitq_get_ctx_desc(tx_q);
2902 first = &tx_q->tx_buf[tx_q->next_to_use];
2914 if (idpf_queue_has(FLOW_SCH_EN, tx_q)) {
2922 if (!(tx_q->next_to_use % IDPF_TX_SPLITQ_RE_MIN_GAP)) {
2924 tx_q->txq_grp->num_completions_pending++;
2938 idpf_tx_splitq_map(tx_q, &tx_params, first);
2953 struct idpf_tx_queue *tx_q;
2961 tx_q = vport->txqs[skb_get_queue_mapping(skb)];
2966 if (skb_put_padto(skb, tx_q->tx_min_pkt_len)) {
2967 idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
2973 return idpf_tx_splitq_frame(skb, tx_q);
2975 return idpf_tx_singleq_frame(skb, tx_q);