Lines Matching +full:queue +full:- +full:pkt +full:- +full:tx

1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
4 * Copyright (C) 2015-2021 Google, Inc.
20 iowrite32be(val, &priv->db_bar2[be32_to_cpu(q_resources->db_index)]); in gve_tx_put_doorbell()
26 struct gve_tx_ring *tx = &priv->tx[tx_qid]; in gve_xdp_tx_flush() local
28 gve_tx_put_doorbell(priv, tx->q_resources, tx->req); in gve_xdp_tx_flush()
32 * We copy skb payloads into the registered segment before writing Tx
33 * descriptors and ringing the Tx doorbell.
35 * gve_tx_fifo_* manages the Registered Segment as a FIFO - clients must
41 fifo->base = vmap(fifo->qpl->pages, fifo->qpl->num_entries, VM_MAP, in gve_tx_fifo_init()
43 if (unlikely(!fifo->base)) { in gve_tx_fifo_init()
44 netif_err(priv, drv, priv->dev, "Failed to vmap fifo, qpl_id = %d\n", in gve_tx_fifo_init()
45 fifo->qpl->id); in gve_tx_fifo_init()
46 return -ENOMEM; in gve_tx_fifo_init()
49 fifo->size = fifo->qpl->num_entries * PAGE_SIZE; in gve_tx_fifo_init()
50 atomic_set(&fifo->available, fifo->size); in gve_tx_fifo_init()
51 fifo->head = 0; in gve_tx_fifo_init()
57 WARN(atomic_read(&fifo->available) != fifo->size, in gve_tx_fifo_release()
58 "Releasing non-empty fifo"); in gve_tx_fifo_release()
60 vunmap(fifo->base); in gve_tx_fifo_release()
66 return (fifo->head + bytes < fifo->size) ? 0 : fifo->size - fifo->head; in gve_tx_fifo_pad_alloc_one_frag()
71 return (atomic_read(&fifo->available) <= bytes) ? false : true; in gve_tx_fifo_can_alloc()
74 /* gve_tx_alloc_fifo - Allocate fragment(s) from Tx FIFO
77 * @iov: Scatter-gather elements to fill with allocation fragment base/len
105 iov[0].iov_offset = fifo->head; in gve_tx_alloc_fifo()
107 fifo->head += bytes; in gve_tx_alloc_fifo()
109 if (fifo->head > fifo->size) { in gve_tx_alloc_fifo()
114 overflow = fifo->head - fifo->size; in gve_tx_alloc_fifo()
115 iov[0].iov_len -= overflow; in gve_tx_alloc_fifo()
119 fifo->head = overflow; in gve_tx_alloc_fifo()
122 /* Re-align to a cacheline boundary */ in gve_tx_alloc_fifo()
123 aligned_head = L1_CACHE_ALIGN(fifo->head); in gve_tx_alloc_fifo()
124 padding = aligned_head - fifo->head; in gve_tx_alloc_fifo()
125 iov[nfrags - 1].iov_padding = padding; in gve_tx_alloc_fifo()
126 atomic_sub(bytes + padding, &fifo->available); in gve_tx_alloc_fifo()
127 fifo->head = aligned_head; in gve_tx_alloc_fifo()
129 if (fifo->head == fifo->size) in gve_tx_alloc_fifo()
130 fifo->head = 0; in gve_tx_alloc_fifo()
135 /* gve_tx_free_fifo - Return space to Tx FIFO
141 atomic_add(bytes, &fifo->available); in gve_tx_free_fifo()
149 for (i = 0; i < ARRAY_SIZE(info->iov); i++) { in gve_tx_clear_buffer_state()
150 space_freed += info->iov[i].iov_len + info->iov[i].iov_padding; in gve_tx_clear_buffer_state()
151 info->iov[i].iov_len = 0; in gve_tx_clear_buffer_state()
152 info->iov[i].iov_padding = 0; in gve_tx_clear_buffer_state()
157 static int gve_clean_xdp_done(struct gve_priv *priv, struct gve_tx_ring *tx, in gve_clean_xdp_done() argument
168 idx = tx->done & tx->mask; in gve_clean_xdp_done()
169 info = &tx->info[idx]; in gve_clean_xdp_done()
170 tx->done++; in gve_clean_xdp_done()
172 if (unlikely(!info->xdp.size)) in gve_clean_xdp_done()
175 bytes += info->xdp.size; in gve_clean_xdp_done()
177 xsk_complete += info->xdp.is_xsk; in gve_clean_xdp_done()
179 info->xdp.size = 0; in gve_clean_xdp_done()
180 if (info->xdp_frame) { in gve_clean_xdp_done()
181 xdp_return_frame(info->xdp_frame); in gve_clean_xdp_done()
182 info->xdp_frame = NULL; in gve_clean_xdp_done()
187 gve_tx_free_fifo(&tx->tx_fifo, space_freed); in gve_clean_xdp_done()
188 if (xsk_complete > 0 && tx->xsk_pool) in gve_clean_xdp_done()
189 xsk_tx_completed(tx->xsk_pool, xsk_complete); in gve_clean_xdp_done()
190 u64_stats_update_begin(&tx->statss); in gve_clean_xdp_done()
191 tx->bytes_done += bytes; in gve_clean_xdp_done()
192 tx->pkt_done += pkts; in gve_clean_xdp_done()
193 u64_stats_update_end(&tx->statss); in gve_clean_xdp_done()
197 static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx,
203 struct gve_tx_ring *tx = &priv->tx[idx]; in gve_tx_stop_ring_gqi() local
209 if (tx->q_num < priv->tx_cfg.num_queues) in gve_tx_stop_ring_gqi()
210 gve_clean_tx_done(priv, tx, priv->tx_desc_cnt, false); in gve_tx_stop_ring_gqi()
212 gve_clean_xdp_done(priv, tx, priv->tx_desc_cnt); in gve_tx_stop_ring_gqi()
213 netdev_tx_reset_queue(tx->netdev_txq); in gve_tx_stop_ring_gqi()
217 static void gve_tx_free_ring_gqi(struct gve_priv *priv, struct gve_tx_ring *tx, in gve_tx_free_ring_gqi() argument
220 struct device *hdev = &priv->pdev->dev; in gve_tx_free_ring_gqi()
221 int idx = tx->q_num; in gve_tx_free_ring_gqi()
226 slots = tx->mask + 1; in gve_tx_free_ring_gqi()
227 dma_free_coherent(hdev, sizeof(*tx->q_resources), in gve_tx_free_ring_gqi()
228 tx->q_resources, tx->q_resources_bus); in gve_tx_free_ring_gqi()
229 tx->q_resources = NULL; in gve_tx_free_ring_gqi()
231 if (tx->tx_fifo.qpl) { in gve_tx_free_ring_gqi()
232 if (tx->tx_fifo.base) in gve_tx_free_ring_gqi()
233 gve_tx_fifo_release(priv, &tx->tx_fifo); in gve_tx_free_ring_gqi()
235 qpl_id = gve_tx_qpl_id(priv, tx->q_num); in gve_tx_free_ring_gqi()
236 gve_free_queue_page_list(priv, tx->tx_fifo.qpl, qpl_id); in gve_tx_free_ring_gqi()
237 tx->tx_fifo.qpl = NULL; in gve_tx_free_ring_gqi()
240 bytes = sizeof(*tx->desc) * slots; in gve_tx_free_ring_gqi()
241 dma_free_coherent(hdev, bytes, tx->desc, tx->bus); in gve_tx_free_ring_gqi()
242 tx->desc = NULL; in gve_tx_free_ring_gqi()
244 vfree(tx->info); in gve_tx_free_ring_gqi()
245 tx->info = NULL; in gve_tx_free_ring_gqi()
247 netif_dbg(priv, drv, priv->dev, "freed tx queue %d\n", idx); in gve_tx_free_ring_gqi()
253 struct gve_tx_ring *tx = &priv->tx[idx]; in gve_tx_start_ring_gqi() local
257 tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx); in gve_tx_start_ring_gqi()
263 struct gve_tx_ring *tx, in gve_tx_alloc_ring_gqi() argument
266 struct device *hdev = &priv->pdev->dev; in gve_tx_alloc_ring_gqi()
272 memset(tx, 0, sizeof(*tx)); in gve_tx_alloc_ring_gqi()
273 spin_lock_init(&tx->clean_lock); in gve_tx_alloc_ring_gqi()
274 spin_lock_init(&tx->xdp_lock); in gve_tx_alloc_ring_gqi()
275 tx->q_num = idx; in gve_tx_alloc_ring_gqi()
277 tx->mask = cfg->ring_size - 1; in gve_tx_alloc_ring_gqi()
280 tx->info = vcalloc(cfg->ring_size, sizeof(*tx->info)); in gve_tx_alloc_ring_gqi()
281 if (!tx->info) in gve_tx_alloc_ring_gqi()
282 return -ENOMEM; in gve_tx_alloc_ring_gqi()
284 /* alloc tx queue */ in gve_tx_alloc_ring_gqi()
285 bytes = sizeof(*tx->desc) * cfg->ring_size; in gve_tx_alloc_ring_gqi()
286 tx->desc = dma_alloc_coherent(hdev, bytes, &tx->bus, GFP_KERNEL); in gve_tx_alloc_ring_gqi()
287 if (!tx->desc) in gve_tx_alloc_ring_gqi()
290 tx->raw_addressing = cfg->raw_addressing; in gve_tx_alloc_ring_gqi()
291 tx->dev = hdev; in gve_tx_alloc_ring_gqi()
292 if (!tx->raw_addressing) { in gve_tx_alloc_ring_gqi()
293 qpl_id = gve_tx_qpl_id(priv, tx->q_num); in gve_tx_alloc_ring_gqi()
294 qpl_page_cnt = priv->tx_pages_per_qpl; in gve_tx_alloc_ring_gqi()
296 tx->tx_fifo.qpl = gve_alloc_queue_page_list(priv, qpl_id, in gve_tx_alloc_ring_gqi()
298 if (!tx->tx_fifo.qpl) in gve_tx_alloc_ring_gqi()
301 /* map Tx FIFO */ in gve_tx_alloc_ring_gqi()
302 if (gve_tx_fifo_init(priv, &tx->tx_fifo)) in gve_tx_alloc_ring_gqi()
306 tx->q_resources = in gve_tx_alloc_ring_gqi()
308 sizeof(*tx->q_resources), in gve_tx_alloc_ring_gqi()
309 &tx->q_resources_bus, in gve_tx_alloc_ring_gqi()
311 if (!tx->q_resources) in gve_tx_alloc_ring_gqi()
317 if (!tx->raw_addressing) in gve_tx_alloc_ring_gqi()
318 gve_tx_fifo_release(priv, &tx->tx_fifo); in gve_tx_alloc_ring_gqi()
320 if (!tx->raw_addressing) { in gve_tx_alloc_ring_gqi()
321 gve_free_queue_page_list(priv, tx->tx_fifo.qpl, qpl_id); in gve_tx_alloc_ring_gqi()
322 tx->tx_fifo.qpl = NULL; in gve_tx_alloc_ring_gqi()
325 dma_free_coherent(hdev, bytes, tx->desc, tx->bus); in gve_tx_alloc_ring_gqi()
326 tx->desc = NULL; in gve_tx_alloc_ring_gqi()
328 vfree(tx->info); in gve_tx_alloc_ring_gqi()
329 tx->info = NULL; in gve_tx_alloc_ring_gqi()
330 return -ENOMEM; in gve_tx_alloc_ring_gqi()
336 struct gve_tx_ring *tx = cfg->tx; in gve_tx_alloc_rings_gqi() local
341 total_queues = cfg->qcfg->num_queues + cfg->num_xdp_rings; in gve_tx_alloc_rings_gqi()
342 if (total_queues > cfg->qcfg->max_queues) { in gve_tx_alloc_rings_gqi()
343 netif_err(priv, drv, priv->dev, in gve_tx_alloc_rings_gqi()
344 "Cannot alloc more than the max num of Tx rings\n"); in gve_tx_alloc_rings_gqi()
345 return -EINVAL; in gve_tx_alloc_rings_gqi()
348 tx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_tx_ring), in gve_tx_alloc_rings_gqi()
350 if (!tx) in gve_tx_alloc_rings_gqi()
351 return -ENOMEM; in gve_tx_alloc_rings_gqi()
354 err = gve_tx_alloc_ring_gqi(priv, cfg, &tx[i], i); in gve_tx_alloc_rings_gqi()
356 netif_err(priv, drv, priv->dev, in gve_tx_alloc_rings_gqi()
357 "Failed to alloc tx ring=%d: err=%d\n", in gve_tx_alloc_rings_gqi()
363 cfg->tx = tx; in gve_tx_alloc_rings_gqi()
368 gve_tx_free_ring_gqi(priv, &tx[j], cfg); in gve_tx_alloc_rings_gqi()
369 kvfree(tx); in gve_tx_alloc_rings_gqi()
376 struct gve_tx_ring *tx = cfg->tx; in gve_tx_free_rings_gqi() local
379 if (!tx) in gve_tx_free_rings_gqi()
382 for (i = 0; i < cfg->qcfg->num_queues + cfg->qcfg->num_xdp_queues; i++) in gve_tx_free_rings_gqi()
383 gve_tx_free_ring_gqi(priv, &tx[i], cfg); in gve_tx_free_rings_gqi()
385 kvfree(tx); in gve_tx_free_rings_gqi()
386 cfg->tx = NULL; in gve_tx_free_rings_gqi()
389 /* gve_tx_avail - Calculates the number of slots available in the ring
390 * @tx: tx ring to check
394 * The capacity of the queue is mask + 1. We don't need to reserve an entry.
396 static inline u32 gve_tx_avail(struct gve_tx_ring *tx) in gve_tx_avail() argument
398 return tx->mask + 1 - (tx->req - tx->done); in gve_tx_avail()
401 static inline int gve_skb_fifo_bytes_required(struct gve_tx_ring *tx, in gve_skb_fifo_bytes_required() argument
409 min_t(int, GVE_GQ_TX_MIN_PKT_DESC_BYTES, skb->len); in gve_skb_fifo_bytes_required()
411 pad_bytes = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo, in gve_skb_fifo_bytes_required()
414 align_hdr_pad = L1_CACHE_ALIGN(hlen) - hlen; in gve_skb_fifo_bytes_required()
415 bytes = align_hdr_pad + pad_bytes + skb->len; in gve_skb_fifo_bytes_required()
430 if (info->skb) { in gve_tx_unmap_buf()
446 static inline bool gve_can_tx(struct gve_tx_ring *tx, int bytes_required) in gve_can_tx() argument
450 if (!tx->raw_addressing) in gve_can_tx()
451 can_alloc = gve_tx_fifo_can_alloc(&tx->tx_fifo, bytes_required); in gve_can_tx()
453 return (gve_tx_avail(tx) >= MAX_TX_DESC_NEEDED && can_alloc); in gve_can_tx()
458 /* Stops the queue if the skb cannot be transmitted. */
459 static int gve_maybe_stop_tx(struct gve_priv *priv, struct gve_tx_ring *tx, in gve_maybe_stop_tx() argument
467 if (!tx->raw_addressing) in gve_maybe_stop_tx()
468 bytes_required = gve_skb_fifo_bytes_required(tx, skb); in gve_maybe_stop_tx()
470 if (likely(gve_can_tx(tx, bytes_required))) in gve_maybe_stop_tx()
473 ret = -EBUSY; in gve_maybe_stop_tx()
474 spin_lock(&tx->clean_lock); in gve_maybe_stop_tx()
475 nic_done = gve_tx_load_event_counter(priv, tx); in gve_maybe_stop_tx()
476 to_do = nic_done - tx->done; in gve_maybe_stop_tx()
478 /* Only try to clean if there is hope for TX */ in gve_maybe_stop_tx()
479 if (to_do + gve_tx_avail(tx) >= MAX_TX_DESC_NEEDED) { in gve_maybe_stop_tx()
482 gve_clean_tx_done(priv, tx, to_do, false); in gve_maybe_stop_tx()
484 if (likely(gve_can_tx(tx, bytes_required))) in gve_maybe_stop_tx()
488 /* No space, so stop the queue */ in gve_maybe_stop_tx()
489 tx->stop_queue++; in gve_maybe_stop_tx()
490 netif_tx_stop_queue(tx->netdev_txq); in gve_maybe_stop_tx()
492 spin_unlock(&tx->clean_lock); in gve_maybe_stop_tx()
502 /* l4_hdr_offset and csum_offset are in units of 16-bit words */ in gve_tx_fill_pkt_desc()
504 pkt_desc->pkt.type_flags = GVE_TXD_TSO | GVE_TXF_L4CSUM; in gve_tx_fill_pkt_desc()
505 pkt_desc->pkt.l4_csum_offset = csum_offset >> 1; in gve_tx_fill_pkt_desc()
506 pkt_desc->pkt.l4_hdr_offset = l4_hdr_offset >> 1; in gve_tx_fill_pkt_desc()
508 pkt_desc->pkt.type_flags = GVE_TXD_STD | GVE_TXF_L4CSUM; in gve_tx_fill_pkt_desc()
509 pkt_desc->pkt.l4_csum_offset = csum_offset >> 1; in gve_tx_fill_pkt_desc()
510 pkt_desc->pkt.l4_hdr_offset = l4_hdr_offset >> 1; in gve_tx_fill_pkt_desc()
512 pkt_desc->pkt.type_flags = GVE_TXD_STD; in gve_tx_fill_pkt_desc()
513 pkt_desc->pkt.l4_csum_offset = 0; in gve_tx_fill_pkt_desc()
514 pkt_desc->pkt.l4_hdr_offset = 0; in gve_tx_fill_pkt_desc()
516 pkt_desc->pkt.desc_cnt = desc_cnt; in gve_tx_fill_pkt_desc()
517 pkt_desc->pkt.len = cpu_to_be16(pkt_len); in gve_tx_fill_pkt_desc()
518 pkt_desc->pkt.seg_len = cpu_to_be16(hlen); in gve_tx_fill_pkt_desc()
519 pkt_desc->pkt.seg_addr = cpu_to_be64(addr); in gve_tx_fill_pkt_desc()
525 BUILD_BUG_ON(sizeof(mtd_desc->mtd) != sizeof(mtd_desc->pkt)); in gve_tx_fill_mtd_desc()
527 mtd_desc->mtd.type_flags = GVE_TXD_MTD | GVE_MTD_SUBTYPE_PATH; in gve_tx_fill_mtd_desc()
528 mtd_desc->mtd.path_state = GVE_MTD_PATH_STATE_DEFAULT | in gve_tx_fill_mtd_desc()
530 mtd_desc->mtd.path_hash = cpu_to_be32(skb->hash); in gve_tx_fill_mtd_desc()
531 mtd_desc->mtd.reserved0 = 0; in gve_tx_fill_mtd_desc()
532 mtd_desc->mtd.reserved1 = 0; in gve_tx_fill_mtd_desc()
540 seg_desc->seg.type_flags = GVE_TXD_SEG; in gve_tx_fill_seg_desc()
543 seg_desc->seg.type_flags |= GVE_TXSF_IPV6; in gve_tx_fill_seg_desc()
544 seg_desc->seg.l3_offset = l3_offset >> 1; in gve_tx_fill_seg_desc()
545 seg_desc->seg.mss = cpu_to_be16(gso_size); in gve_tx_fill_seg_desc()
547 seg_desc->seg.seg_len = cpu_to_be16(len); in gve_tx_fill_seg_desc()
548 seg_desc->seg.seg_addr = cpu_to_be64(addr); in gve_tx_fill_seg_desc()
554 u64 last_page = (iov_offset + iov_len - 1) / PAGE_SIZE; in gve_dma_sync_for_device()
562 static int gve_tx_add_skb_copy(struct gve_priv *priv, struct gve_tx_ring *tx, struct sk_buff *skb) in gve_tx_add_skb_copy() argument
567 int mtd_desc_nr = !!skb->l4_hash; in gve_tx_add_skb_copy()
569 u32 idx = tx->req & tx->mask; in gve_tx_add_skb_copy()
575 info = &tx->info[idx]; in gve_tx_add_skb_copy()
576 pkt_desc = &tx->desc[idx]; in gve_tx_add_skb_copy()
583 min_t(int, GVE_GQ_TX_MIN_PKT_DESC_BYTES, skb->len); in gve_tx_add_skb_copy()
585 info->skb = skb; in gve_tx_add_skb_copy()
589 pad_bytes = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo, hlen); in gve_tx_add_skb_copy()
590 hdr_nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, hlen + pad_bytes, in gve_tx_add_skb_copy()
591 &info->iov[0]); in gve_tx_add_skb_copy()
593 payload_nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, skb->len - hlen, in gve_tx_add_skb_copy()
594 &info->iov[payload_iov]); in gve_tx_add_skb_copy()
596 gve_tx_fill_pkt_desc(pkt_desc, skb->csum_offset, skb->ip_summed, in gve_tx_add_skb_copy()
599 info->iov[hdr_nfrags - 1].iov_offset, skb->len); in gve_tx_add_skb_copy()
602 tx->tx_fifo.base + info->iov[hdr_nfrags - 1].iov_offset, in gve_tx_add_skb_copy()
604 gve_dma_sync_for_device(&priv->pdev->dev, tx->tx_fifo.qpl->page_buses, in gve_tx_add_skb_copy()
605 info->iov[hdr_nfrags - 1].iov_offset, in gve_tx_add_skb_copy()
606 info->iov[hdr_nfrags - 1].iov_len); in gve_tx_add_skb_copy()
610 next_idx = (tx->req + 1) & tx->mask; in gve_tx_add_skb_copy()
611 gve_tx_fill_mtd_desc(&tx->desc[next_idx], skb); in gve_tx_add_skb_copy()
615 next_idx = (tx->req + 1 + mtd_desc_nr + i - payload_iov) & tx->mask; in gve_tx_add_skb_copy()
616 seg_desc = &tx->desc[next_idx]; in gve_tx_add_skb_copy()
619 skb_shinfo(skb)->gso_size, in gve_tx_add_skb_copy()
621 info->iov[i].iov_len, in gve_tx_add_skb_copy()
622 info->iov[i].iov_offset); in gve_tx_add_skb_copy()
625 tx->tx_fifo.base + info->iov[i].iov_offset, in gve_tx_add_skb_copy()
626 info->iov[i].iov_len); in gve_tx_add_skb_copy()
627 gve_dma_sync_for_device(&priv->pdev->dev, tx->tx_fifo.qpl->page_buses, in gve_tx_add_skb_copy()
628 info->iov[i].iov_offset, in gve_tx_add_skb_copy()
629 info->iov[i].iov_len); in gve_tx_add_skb_copy()
630 copy_offset += info->iov[i].iov_len; in gve_tx_add_skb_copy()
636 static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx, in gve_tx_add_skb_no_copy() argument
643 int mtd_desc_nr = !!skb->l4_hash; in gve_tx_add_skb_no_copy()
645 u32 idx = tx->req & tx->mask; in gve_tx_add_skb_no_copy()
650 info = &tx->info[idx]; in gve_tx_add_skb_no_copy()
651 pkt_desc = &tx->desc[idx]; in gve_tx_add_skb_no_copy()
656 * of the skb (which will contain the checksum because skb->csum_start and in gve_tx_add_skb_no_copy()
657 * skb->csum_offset are given relative to skb->head) in the first segment. in gve_tx_add_skb_no_copy()
662 info->skb = skb; in gve_tx_add_skb_no_copy()
664 addr = dma_map_single(tx->dev, skb->data, len, DMA_TO_DEVICE); in gve_tx_add_skb_no_copy()
665 if (unlikely(dma_mapping_error(tx->dev, addr))) { in gve_tx_add_skb_no_copy()
666 tx->dma_mapping_error++; in gve_tx_add_skb_no_copy()
672 num_descriptors = 1 + shinfo->nr_frags; in gve_tx_add_skb_no_copy()
678 gve_tx_fill_pkt_desc(pkt_desc, skb->csum_offset, skb->ip_summed, in gve_tx_add_skb_no_copy()
680 num_descriptors, hlen, addr, skb->len); in gve_tx_add_skb_no_copy()
683 idx = (idx + 1) & tx->mask; in gve_tx_add_skb_no_copy()
684 mtd_desc = &tx->desc[idx]; in gve_tx_add_skb_no_copy()
692 len -= hlen; in gve_tx_add_skb_no_copy()
694 idx = (idx + 1) & tx->mask; in gve_tx_add_skb_no_copy()
695 seg_desc = &tx->desc[idx]; in gve_tx_add_skb_no_copy()
697 skb_shinfo(skb)->gso_size, in gve_tx_add_skb_no_copy()
701 for (i = 0; i < shinfo->nr_frags; i++) { in gve_tx_add_skb_no_copy()
702 const skb_frag_t *frag = &shinfo->frags[i]; in gve_tx_add_skb_no_copy()
704 idx = (idx + 1) & tx->mask; in gve_tx_add_skb_no_copy()
705 seg_desc = &tx->desc[idx]; in gve_tx_add_skb_no_copy()
707 addr = skb_frag_dma_map(tx->dev, frag, 0, len, DMA_TO_DEVICE); in gve_tx_add_skb_no_copy()
708 if (unlikely(dma_mapping_error(tx->dev, addr))) { in gve_tx_add_skb_no_copy()
709 tx->dma_mapping_error++; in gve_tx_add_skb_no_copy()
712 tx->info[idx].skb = NULL; in gve_tx_add_skb_no_copy()
713 dma_unmap_len_set(&tx->info[idx], len, len); in gve_tx_add_skb_no_copy()
714 dma_unmap_addr_set(&tx->info[idx], dma, addr); in gve_tx_add_skb_no_copy()
717 skb_shinfo(skb)->gso_size, in gve_tx_add_skb_no_copy()
724 i += num_descriptors - shinfo->nr_frags; in gve_tx_add_skb_no_copy()
725 while (i--) { in gve_tx_add_skb_no_copy()
729 idx--; in gve_tx_add_skb_no_copy()
730 gve_tx_unmap_buf(tx->dev, &tx->info[idx & tx->mask]); in gve_tx_add_skb_no_copy()
733 tx->dropped_pkt++; in gve_tx_add_skb_no_copy()
740 struct gve_tx_ring *tx; in gve_tx() local
743 WARN(skb_get_queue_mapping(skb) >= priv->tx_cfg.num_queues, in gve_tx()
744 "skb queue index out of range"); in gve_tx()
745 tx = &priv->tx[skb_get_queue_mapping(skb)]; in gve_tx()
746 if (unlikely(gve_maybe_stop_tx(priv, tx, skb))) { in gve_tx()
747 /* We need to ring the txq doorbell -- we have stopped the Tx in gve_tx()
748 * queue for want of resources, but prior calls to gve_tx() in gve_tx()
752 gve_tx_put_doorbell(priv, tx->q_resources, tx->req); in gve_tx()
755 if (tx->raw_addressing) in gve_tx()
756 nsegs = gve_tx_add_skb_no_copy(priv, tx, skb); in gve_tx()
758 nsegs = gve_tx_add_skb_copy(priv, tx, skb); in gve_tx()
762 netdev_tx_sent_queue(tx->netdev_txq, skb->len); in gve_tx()
764 tx->req += nsegs; in gve_tx()
769 if (!netif_xmit_stopped(tx->netdev_txq) && netdev_xmit_more()) in gve_tx()
775 gve_tx_put_doorbell(priv, tx->q_resources, tx->req); in gve_tx()
779 static int gve_tx_fill_xdp(struct gve_priv *priv, struct gve_tx_ring *tx, in gve_tx_fill_xdp() argument
784 u32 reqi = tx->req; in gve_tx_fill_xdp()
786 pad = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo, len); in gve_tx_fill_xdp()
789 info = &tx->info[reqi & tx->mask]; in gve_tx_fill_xdp()
790 info->xdp_frame = frame_p; in gve_tx_fill_xdp()
791 info->xdp.size = len; in gve_tx_fill_xdp()
792 info->xdp.is_xsk = is_xsk; in gve_tx_fill_xdp()
794 nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, pad + len, in gve_tx_fill_xdp()
795 &info->iov[0]); in gve_tx_fill_xdp()
797 ndescs = nfrags - iovi; in gve_tx_fill_xdp()
802 gve_tx_fill_pkt_desc(&tx->desc[reqi & tx->mask], 0, in gve_tx_fill_xdp()
804 info->iov[iovi].iov_len, in gve_tx_fill_xdp()
805 info->iov[iovi].iov_offset, len); in gve_tx_fill_xdp()
807 gve_tx_fill_seg_desc(&tx->desc[reqi & tx->mask], in gve_tx_fill_xdp()
809 info->iov[iovi].iov_len, in gve_tx_fill_xdp()
810 info->iov[iovi].iov_offset); in gve_tx_fill_xdp()
812 memcpy(tx->tx_fifo.base + info->iov[iovi].iov_offset, in gve_tx_fill_xdp()
813 data + offset, info->iov[iovi].iov_len); in gve_tx_fill_xdp()
814 gve_dma_sync_for_device(&priv->pdev->dev, in gve_tx_fill_xdp()
815 tx->tx_fifo.qpl->page_buses, in gve_tx_fill_xdp()
816 info->iov[iovi].iov_offset, in gve_tx_fill_xdp()
817 info->iov[iovi].iov_len); in gve_tx_fill_xdp()
818 offset += info->iov[iovi].iov_len; in gve_tx_fill_xdp()
830 struct gve_tx_ring *tx; in gve_xdp_xmit() local
833 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK) || !priv->xdp_prog) in gve_xdp_xmit()
834 return -EINVAL; in gve_xdp_xmit()
837 return -ENETDOWN; in gve_xdp_xmit()
840 smp_processor_id() % priv->tx_cfg.num_xdp_queues); in gve_xdp_xmit()
842 tx = &priv->tx[qid]; in gve_xdp_xmit()
844 spin_lock(&tx->xdp_lock); in gve_xdp_xmit()
846 err = gve_xdp_xmit_one(priv, tx, frames[i]->data, in gve_xdp_xmit()
847 frames[i]->len, frames[i]); in gve_xdp_xmit()
853 gve_tx_put_doorbell(priv, tx->q_resources, tx->req); in gve_xdp_xmit()
855 spin_unlock(&tx->xdp_lock); in gve_xdp_xmit()
857 u64_stats_update_begin(&tx->statss); in gve_xdp_xmit()
858 tx->xdp_xmit += n; in gve_xdp_xmit()
859 tx->xdp_xmit_errors += n - i; in gve_xdp_xmit()
860 u64_stats_update_end(&tx->statss); in gve_xdp_xmit()
865 int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx, in gve_xdp_xmit_one() argument
870 if (!gve_can_tx(tx, len + GVE_GQ_TX_MIN_PKT_DESC_BYTES - 1)) in gve_xdp_xmit_one()
871 return -EBUSY; in gve_xdp_xmit_one()
873 nsegs = gve_tx_fill_xdp(priv, tx, data, len, frame_p, false); in gve_xdp_xmit_one()
874 tx->req += nsegs; in gve_xdp_xmit_one()
881 static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx, in gve_clean_tx_done() argument
892 idx = tx->done & tx->mask; in gve_clean_tx_done()
893 netif_info(priv, tx_done, priv->dev, in gve_clean_tx_done()
895 tx->q_num, __func__, idx, tx->req, tx->done); in gve_clean_tx_done()
896 info = &tx->info[idx]; in gve_clean_tx_done()
897 skb = info->skb; in gve_clean_tx_done()
900 if (tx->raw_addressing) in gve_clean_tx_done()
901 gve_tx_unmap_buf(tx->dev, info); in gve_clean_tx_done()
902 tx->done++; in gve_clean_tx_done()
905 info->skb = NULL; in gve_clean_tx_done()
906 bytes += skb->len; in gve_clean_tx_done()
909 if (tx->raw_addressing) in gve_clean_tx_done()
915 if (!tx->raw_addressing) in gve_clean_tx_done()
916 gve_tx_free_fifo(&tx->tx_fifo, space_freed); in gve_clean_tx_done()
917 u64_stats_update_begin(&tx->statss); in gve_clean_tx_done()
918 tx->bytes_done += bytes; in gve_clean_tx_done()
919 tx->pkt_done += pkts; in gve_clean_tx_done()
920 u64_stats_update_end(&tx->statss); in gve_clean_tx_done()
921 netdev_tx_completed_queue(tx->netdev_txq, pkts, bytes); in gve_clean_tx_done()
923 /* start the queue if we've stopped it */ in gve_clean_tx_done()
928 if (try_to_wake && netif_tx_queue_stopped(tx->netdev_txq) && in gve_clean_tx_done()
929 likely(gve_can_tx(tx, GVE_TX_START_THRESH))) { in gve_clean_tx_done()
930 tx->wake_queue++; in gve_clean_tx_done()
931 netif_tx_wake_queue(tx->netdev_txq); in gve_clean_tx_done()
938 struct gve_tx_ring *tx) in gve_tx_load_event_counter() argument
940 u32 counter_index = be32_to_cpu(tx->q_resources->counter_index); in gve_tx_load_event_counter()
941 __be32 counter = READ_ONCE(priv->counter_array[counter_index]); in gve_tx_load_event_counter()
946 static int gve_xsk_tx(struct gve_priv *priv, struct gve_tx_ring *tx, in gve_xsk_tx() argument
953 spin_lock(&tx->xdp_lock); in gve_xsk_tx()
955 if (!gve_can_tx(tx, GVE_TX_START_THRESH) || in gve_xsk_tx()
956 !xsk_tx_peek_desc(tx->xsk_pool, &desc)) in gve_xsk_tx()
959 data = xsk_buff_raw_get_data(tx->xsk_pool, desc.addr); in gve_xsk_tx()
960 nsegs = gve_tx_fill_xdp(priv, tx, data, desc.len, NULL, true); in gve_xsk_tx()
961 tx->req += nsegs; in gve_xsk_tx()
966 gve_tx_put_doorbell(priv, tx->q_resources, tx->req); in gve_xsk_tx()
967 xsk_tx_release(tx->xsk_pool); in gve_xsk_tx()
969 spin_unlock(&tx->xdp_lock); in gve_xsk_tx()
975 struct gve_rx_ring *rx = rx_block->rx; in gve_xsk_tx_poll()
976 struct gve_priv *priv = rx->gve; in gve_xsk_tx_poll()
977 struct gve_tx_ring *tx; in gve_xsk_tx_poll() local
980 tx = &priv->tx[gve_xdp_tx_queue_id(priv, rx->q_num)]; in gve_xsk_tx_poll()
981 if (tx->xsk_pool) { in gve_xsk_tx_poll()
982 sent = gve_xsk_tx(priv, tx, budget); in gve_xsk_tx_poll()
984 u64_stats_update_begin(&tx->statss); in gve_xsk_tx_poll()
985 tx->xdp_xsk_sent += sent; in gve_xsk_tx_poll()
986 u64_stats_update_end(&tx->statss); in gve_xsk_tx_poll()
987 if (xsk_uses_need_wakeup(tx->xsk_pool)) in gve_xsk_tx_poll()
988 xsk_set_tx_need_wakeup(tx->xsk_pool); in gve_xsk_tx_poll()
996 struct gve_priv *priv = block->priv; in gve_xdp_poll()
997 struct gve_tx_ring *tx = block->tx; in gve_xdp_poll() local
1002 nic_done = gve_tx_load_event_counter(priv, tx); in gve_xdp_poll()
1003 to_do = min_t(u32, (nic_done - tx->done), budget); in gve_xdp_poll()
1004 gve_clean_xdp_done(priv, tx, to_do); in gve_xdp_poll()
1007 return nic_done != tx->done; in gve_xdp_poll()
1012 struct gve_priv *priv = block->priv; in gve_tx_poll()
1013 struct gve_tx_ring *tx = block->tx; in gve_tx_poll() local
1021 /* In TX path, it may try to clean completed pkts in order to xmit, in gve_tx_poll()
1025 spin_lock(&tx->clean_lock); in gve_tx_poll()
1027 nic_done = gve_tx_load_event_counter(priv, tx); in gve_tx_poll()
1028 to_do = min_t(u32, (nic_done - tx->done), budget); in gve_tx_poll()
1029 gve_clean_tx_done(priv, tx, to_do, true); in gve_tx_poll()
1030 spin_unlock(&tx->clean_lock); in gve_tx_poll()
1032 return nic_done != tx->done; in gve_tx_poll()
1035 bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx) in gve_tx_clean_pending() argument
1037 u32 nic_done = gve_tx_load_event_counter(priv, tx); in gve_tx_clean_pending()
1039 return nic_done != tx->done; in gve_tx_clean_pending()