1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /* Google virtual Ethernet (gve) driver
3 *
4 * Copyright (C) 2015-2021 Google, Inc.
5 */
6
7 #include "gve.h"
8 #include "gve_adminq.h"
9 #include "gve_utils.h"
10 #include "gve_dqo.h"
11 #include <net/ip.h>
12 #include <linux/tcp.h>
13 #include <linux/slab.h>
14 #include <linux/skbuff.h>
15
16 /* Returns true if tx_bufs are available. */
gve_has_free_tx_qpl_bufs(struct gve_tx_ring * tx,int count)17 static bool gve_has_free_tx_qpl_bufs(struct gve_tx_ring *tx, int count)
18 {
19 int num_avail;
20
21 if (!tx->dqo.qpl)
22 return true;
23
24 num_avail = tx->dqo.num_tx_qpl_bufs -
25 (tx->dqo_tx.alloc_tx_qpl_buf_cnt -
26 tx->dqo_tx.free_tx_qpl_buf_cnt);
27
28 if (count <= num_avail)
29 return true;
30
31 /* Update cached value from dqo_compl. */
32 tx->dqo_tx.free_tx_qpl_buf_cnt =
33 atomic_read_acquire(&tx->dqo_compl.free_tx_qpl_buf_cnt);
34
35 num_avail = tx->dqo.num_tx_qpl_bufs -
36 (tx->dqo_tx.alloc_tx_qpl_buf_cnt -
37 tx->dqo_tx.free_tx_qpl_buf_cnt);
38
39 return count <= num_avail;
40 }
41
42 static s16
gve_alloc_tx_qpl_buf(struct gve_tx_ring * tx)43 gve_alloc_tx_qpl_buf(struct gve_tx_ring *tx)
44 {
45 s16 index;
46
47 index = tx->dqo_tx.free_tx_qpl_buf_head;
48
49 /* No TX buffers available, try to steal the list from the
50 * completion handler.
51 */
52 if (unlikely(index == -1)) {
53 tx->dqo_tx.free_tx_qpl_buf_head =
54 atomic_xchg(&tx->dqo_compl.free_tx_qpl_buf_head, -1);
55 index = tx->dqo_tx.free_tx_qpl_buf_head;
56
57 if (unlikely(index == -1))
58 return index;
59 }
60
61 /* Remove TX buf from free list */
62 tx->dqo_tx.free_tx_qpl_buf_head = tx->dqo.tx_qpl_buf_next[index];
63
64 return index;
65 }
66
67 static void
gve_free_tx_qpl_bufs(struct gve_tx_ring * tx,struct gve_tx_pending_packet_dqo * pkt)68 gve_free_tx_qpl_bufs(struct gve_tx_ring *tx,
69 struct gve_tx_pending_packet_dqo *pkt)
70 {
71 s16 index;
72 int i;
73
74 if (!pkt->num_bufs)
75 return;
76
77 index = pkt->tx_qpl_buf_ids[0];
78 /* Create a linked list of buffers to be added to the free list */
79 for (i = 1; i < pkt->num_bufs; i++) {
80 tx->dqo.tx_qpl_buf_next[index] = pkt->tx_qpl_buf_ids[i];
81 index = pkt->tx_qpl_buf_ids[i];
82 }
83
84 while (true) {
85 s16 old_head = atomic_read_acquire(&tx->dqo_compl.free_tx_qpl_buf_head);
86
87 tx->dqo.tx_qpl_buf_next[index] = old_head;
88 if (atomic_cmpxchg(&tx->dqo_compl.free_tx_qpl_buf_head,
89 old_head,
90 pkt->tx_qpl_buf_ids[0]) == old_head) {
91 break;
92 }
93 }
94
95 atomic_add(pkt->num_bufs, &tx->dqo_compl.free_tx_qpl_buf_cnt);
96 pkt->num_bufs = 0;
97 }
98
99 /* Returns true if a gve_tx_pending_packet_dqo object is available. */
gve_has_pending_packet(struct gve_tx_ring * tx)100 static bool gve_has_pending_packet(struct gve_tx_ring *tx)
101 {
102 /* Check TX path's list. */
103 if (tx->dqo_tx.free_pending_packets != -1)
104 return true;
105
106 /* Check completion handler's list. */
107 if (atomic_read_acquire(&tx->dqo_compl.free_pending_packets) != -1)
108 return true;
109
110 return false;
111 }
112
113 static struct gve_tx_pending_packet_dqo *
gve_alloc_pending_packet(struct gve_tx_ring * tx)114 gve_alloc_pending_packet(struct gve_tx_ring *tx)
115 {
116 struct gve_tx_pending_packet_dqo *pending_packet;
117 s16 index;
118
119 index = tx->dqo_tx.free_pending_packets;
120
121 /* No pending_packets available, try to steal the list from the
122 * completion handler.
123 */
124 if (unlikely(index == -1)) {
125 tx->dqo_tx.free_pending_packets =
126 atomic_xchg(&tx->dqo_compl.free_pending_packets, -1);
127 index = tx->dqo_tx.free_pending_packets;
128
129 if (unlikely(index == -1))
130 return NULL;
131 }
132
133 pending_packet = &tx->dqo.pending_packets[index];
134
135 /* Remove pending_packet from free list */
136 tx->dqo_tx.free_pending_packets = pending_packet->next;
137 pending_packet->state = GVE_PACKET_STATE_PENDING_DATA_COMPL;
138
139 return pending_packet;
140 }
141
142 static void
gve_free_pending_packet(struct gve_tx_ring * tx,struct gve_tx_pending_packet_dqo * pending_packet)143 gve_free_pending_packet(struct gve_tx_ring *tx,
144 struct gve_tx_pending_packet_dqo *pending_packet)
145 {
146 s16 index = pending_packet - tx->dqo.pending_packets;
147
148 pending_packet->state = GVE_PACKET_STATE_UNALLOCATED;
149 while (true) {
150 s16 old_head = atomic_read_acquire(&tx->dqo_compl.free_pending_packets);
151
152 pending_packet->next = old_head;
153 if (atomic_cmpxchg(&tx->dqo_compl.free_pending_packets,
154 old_head, index) == old_head) {
155 break;
156 }
157 }
158 }
159
160 /* gve_tx_free_desc - Cleans up all pending tx requests and buffers.
161 */
gve_tx_clean_pending_packets(struct gve_tx_ring * tx)162 static void gve_tx_clean_pending_packets(struct gve_tx_ring *tx)
163 {
164 int i;
165
166 for (i = 0; i < tx->dqo.num_pending_packets; i++) {
167 struct gve_tx_pending_packet_dqo *cur_state =
168 &tx->dqo.pending_packets[i];
169 int j;
170
171 for (j = 0; j < cur_state->num_bufs; j++) {
172 if (j == 0) {
173 dma_unmap_single(tx->dev,
174 dma_unmap_addr(cur_state, dma[j]),
175 dma_unmap_len(cur_state, len[j]),
176 DMA_TO_DEVICE);
177 } else {
178 dma_unmap_page(tx->dev,
179 dma_unmap_addr(cur_state, dma[j]),
180 dma_unmap_len(cur_state, len[j]),
181 DMA_TO_DEVICE);
182 }
183 }
184 if (cur_state->skb) {
185 dev_consume_skb_any(cur_state->skb);
186 cur_state->skb = NULL;
187 }
188 }
189 }
190
gve_tx_free_ring_dqo(struct gve_priv * priv,int idx)191 static void gve_tx_free_ring_dqo(struct gve_priv *priv, int idx)
192 {
193 struct gve_tx_ring *tx = &priv->tx[idx];
194 struct device *hdev = &priv->pdev->dev;
195 size_t bytes;
196
197 gve_tx_remove_from_block(priv, idx);
198
199 if (tx->q_resources) {
200 dma_free_coherent(hdev, sizeof(*tx->q_resources),
201 tx->q_resources, tx->q_resources_bus);
202 tx->q_resources = NULL;
203 }
204
205 if (tx->dqo.compl_ring) {
206 bytes = sizeof(tx->dqo.compl_ring[0]) *
207 (tx->dqo.complq_mask + 1);
208 dma_free_coherent(hdev, bytes, tx->dqo.compl_ring,
209 tx->complq_bus_dqo);
210 tx->dqo.compl_ring = NULL;
211 }
212
213 if (tx->dqo.tx_ring) {
214 bytes = sizeof(tx->dqo.tx_ring[0]) * (tx->mask + 1);
215 dma_free_coherent(hdev, bytes, tx->dqo.tx_ring, tx->bus);
216 tx->dqo.tx_ring = NULL;
217 }
218
219 kvfree(tx->dqo.pending_packets);
220 tx->dqo.pending_packets = NULL;
221
222 kvfree(tx->dqo.tx_qpl_buf_next);
223 tx->dqo.tx_qpl_buf_next = NULL;
224
225 if (tx->dqo.qpl) {
226 gve_unassign_qpl(priv, tx->dqo.qpl->id);
227 tx->dqo.qpl = NULL;
228 }
229
230 netif_dbg(priv, drv, priv->dev, "freed tx queue %d\n", idx);
231 }
232
gve_tx_qpl_buf_init(struct gve_tx_ring * tx)233 static int gve_tx_qpl_buf_init(struct gve_tx_ring *tx)
234 {
235 int num_tx_qpl_bufs = GVE_TX_BUFS_PER_PAGE_DQO *
236 tx->dqo.qpl->num_entries;
237 int i;
238
239 tx->dqo.tx_qpl_buf_next = kvcalloc(num_tx_qpl_bufs,
240 sizeof(tx->dqo.tx_qpl_buf_next[0]),
241 GFP_KERNEL);
242 if (!tx->dqo.tx_qpl_buf_next)
243 return -ENOMEM;
244
245 tx->dqo.num_tx_qpl_bufs = num_tx_qpl_bufs;
246
247 /* Generate free TX buf list */
248 for (i = 0; i < num_tx_qpl_bufs - 1; i++)
249 tx->dqo.tx_qpl_buf_next[i] = i + 1;
250 tx->dqo.tx_qpl_buf_next[num_tx_qpl_bufs - 1] = -1;
251
252 atomic_set_release(&tx->dqo_compl.free_tx_qpl_buf_head, -1);
253 return 0;
254 }
255
gve_tx_alloc_ring_dqo(struct gve_priv * priv,int idx)256 static int gve_tx_alloc_ring_dqo(struct gve_priv *priv, int idx)
257 {
258 struct gve_tx_ring *tx = &priv->tx[idx];
259 struct device *hdev = &priv->pdev->dev;
260 int num_pending_packets;
261 size_t bytes;
262 int i;
263
264 memset(tx, 0, sizeof(*tx));
265 tx->q_num = idx;
266 tx->dev = &priv->pdev->dev;
267 tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx);
268 atomic_set_release(&tx->dqo_compl.hw_tx_head, 0);
269
270 /* Queue sizes must be a power of 2 */
271 tx->mask = priv->tx_desc_cnt - 1;
272 tx->dqo.complq_mask = priv->queue_format == GVE_DQO_RDA_FORMAT ?
273 priv->options_dqo_rda.tx_comp_ring_entries - 1 :
274 tx->mask;
275
276 /* The max number of pending packets determines the maximum number of
277 * descriptors which maybe written to the completion queue.
278 *
279 * We must set the number small enough to make sure we never overrun the
280 * completion queue.
281 */
282 num_pending_packets = tx->dqo.complq_mask + 1;
283
284 /* Reserve space for descriptor completions, which will be reported at
285 * most every GVE_TX_MIN_RE_INTERVAL packets.
286 */
287 num_pending_packets -=
288 (tx->dqo.complq_mask + 1) / GVE_TX_MIN_RE_INTERVAL;
289
290 /* Each packet may have at most 2 buffer completions if it receives both
291 * a miss and reinjection completion.
292 */
293 num_pending_packets /= 2;
294
295 tx->dqo.num_pending_packets = min_t(int, num_pending_packets, S16_MAX);
296 tx->dqo.pending_packets = kvcalloc(tx->dqo.num_pending_packets,
297 sizeof(tx->dqo.pending_packets[0]),
298 GFP_KERNEL);
299 if (!tx->dqo.pending_packets)
300 goto err;
301
302 /* Set up linked list of pending packets */
303 for (i = 0; i < tx->dqo.num_pending_packets - 1; i++)
304 tx->dqo.pending_packets[i].next = i + 1;
305
306 tx->dqo.pending_packets[tx->dqo.num_pending_packets - 1].next = -1;
307 atomic_set_release(&tx->dqo_compl.free_pending_packets, -1);
308 tx->dqo_compl.miss_completions.head = -1;
309 tx->dqo_compl.miss_completions.tail = -1;
310 tx->dqo_compl.timed_out_completions.head = -1;
311 tx->dqo_compl.timed_out_completions.tail = -1;
312
313 bytes = sizeof(tx->dqo.tx_ring[0]) * (tx->mask + 1);
314 tx->dqo.tx_ring = dma_alloc_coherent(hdev, bytes, &tx->bus, GFP_KERNEL);
315 if (!tx->dqo.tx_ring)
316 goto err;
317
318 bytes = sizeof(tx->dqo.compl_ring[0]) * (tx->dqo.complq_mask + 1);
319 tx->dqo.compl_ring = dma_alloc_coherent(hdev, bytes,
320 &tx->complq_bus_dqo,
321 GFP_KERNEL);
322 if (!tx->dqo.compl_ring)
323 goto err;
324
325 tx->q_resources = dma_alloc_coherent(hdev, sizeof(*tx->q_resources),
326 &tx->q_resources_bus, GFP_KERNEL);
327 if (!tx->q_resources)
328 goto err;
329
330 if (gve_is_qpl(priv)) {
331 tx->dqo.qpl = gve_assign_tx_qpl(priv, idx);
332 if (!tx->dqo.qpl)
333 goto err;
334
335 if (gve_tx_qpl_buf_init(tx))
336 goto err;
337 }
338
339 gve_tx_add_to_block(priv, idx);
340
341 return 0;
342
343 err:
344 gve_tx_free_ring_dqo(priv, idx);
345 return -ENOMEM;
346 }
347
gve_tx_alloc_rings_dqo(struct gve_priv * priv)348 int gve_tx_alloc_rings_dqo(struct gve_priv *priv)
349 {
350 int err = 0;
351 int i;
352
353 for (i = 0; i < priv->tx_cfg.num_queues; i++) {
354 err = gve_tx_alloc_ring_dqo(priv, i);
355 if (err) {
356 netif_err(priv, drv, priv->dev,
357 "Failed to alloc tx ring=%d: err=%d\n",
358 i, err);
359 goto err;
360 }
361 }
362
363 return 0;
364
365 err:
366 for (i--; i >= 0; i--)
367 gve_tx_free_ring_dqo(priv, i);
368
369 return err;
370 }
371
gve_tx_free_rings_dqo(struct gve_priv * priv)372 void gve_tx_free_rings_dqo(struct gve_priv *priv)
373 {
374 int i;
375
376 for (i = 0; i < priv->tx_cfg.num_queues; i++) {
377 struct gve_tx_ring *tx = &priv->tx[i];
378
379 gve_clean_tx_done_dqo(priv, tx, /*napi=*/NULL);
380 netdev_tx_reset_queue(tx->netdev_txq);
381 gve_tx_clean_pending_packets(tx);
382
383 gve_tx_free_ring_dqo(priv, i);
384 }
385 }
386
387 /* Returns the number of slots available in the ring */
num_avail_tx_slots(const struct gve_tx_ring * tx)388 static u32 num_avail_tx_slots(const struct gve_tx_ring *tx)
389 {
390 u32 num_used = (tx->dqo_tx.tail - tx->dqo_tx.head) & tx->mask;
391
392 return tx->mask - num_used;
393 }
394
gve_has_avail_slots_tx_dqo(struct gve_tx_ring * tx,int desc_count,int buf_count)395 static bool gve_has_avail_slots_tx_dqo(struct gve_tx_ring *tx,
396 int desc_count, int buf_count)
397 {
398 return gve_has_pending_packet(tx) &&
399 num_avail_tx_slots(tx) >= desc_count &&
400 gve_has_free_tx_qpl_bufs(tx, buf_count);
401 }
402
403 /* Stops the queue if available descriptors is less than 'count'.
404 * Return: 0 if stop is not required.
405 */
gve_maybe_stop_tx_dqo(struct gve_tx_ring * tx,int desc_count,int buf_count)406 static int gve_maybe_stop_tx_dqo(struct gve_tx_ring *tx,
407 int desc_count, int buf_count)
408 {
409 if (likely(gve_has_avail_slots_tx_dqo(tx, desc_count, buf_count)))
410 return 0;
411
412 /* Update cached TX head pointer */
413 tx->dqo_tx.head = atomic_read_acquire(&tx->dqo_compl.hw_tx_head);
414
415 if (likely(gve_has_avail_slots_tx_dqo(tx, desc_count, buf_count)))
416 return 0;
417
418 /* No space, so stop the queue */
419 tx->stop_queue++;
420 netif_tx_stop_queue(tx->netdev_txq);
421
422 /* Sync with restarting queue in `gve_tx_poll_dqo()` */
423 mb();
424
425 /* After stopping queue, check if we can transmit again in order to
426 * avoid TOCTOU bug.
427 */
428 tx->dqo_tx.head = atomic_read_acquire(&tx->dqo_compl.hw_tx_head);
429
430 if (likely(!gve_has_avail_slots_tx_dqo(tx, desc_count, buf_count)))
431 return -EBUSY;
432
433 netif_tx_start_queue(tx->netdev_txq);
434 tx->wake_queue++;
435 return 0;
436 }
437
gve_extract_tx_metadata_dqo(const struct sk_buff * skb,struct gve_tx_metadata_dqo * metadata)438 static void gve_extract_tx_metadata_dqo(const struct sk_buff *skb,
439 struct gve_tx_metadata_dqo *metadata)
440 {
441 memset(metadata, 0, sizeof(*metadata));
442 metadata->version = GVE_TX_METADATA_VERSION_DQO;
443
444 if (skb->l4_hash) {
445 u16 path_hash = skb->hash ^ (skb->hash >> 16);
446
447 path_hash &= (1 << 15) - 1;
448 if (unlikely(path_hash == 0))
449 path_hash = ~path_hash;
450
451 metadata->path_hash = path_hash;
452 }
453 }
454
gve_tx_fill_pkt_desc_dqo(struct gve_tx_ring * tx,u32 * desc_idx,struct sk_buff * skb,u32 len,u64 addr,s16 compl_tag,bool eop,bool is_gso)455 static void gve_tx_fill_pkt_desc_dqo(struct gve_tx_ring *tx, u32 *desc_idx,
456 struct sk_buff *skb, u32 len, u64 addr,
457 s16 compl_tag, bool eop, bool is_gso)
458 {
459 const bool checksum_offload_en = skb->ip_summed == CHECKSUM_PARTIAL;
460
461 while (len > 0) {
462 struct gve_tx_pkt_desc_dqo *desc =
463 &tx->dqo.tx_ring[*desc_idx].pkt;
464 u32 cur_len = min_t(u32, len, GVE_TX_MAX_BUF_SIZE_DQO);
465 bool cur_eop = eop && cur_len == len;
466
467 *desc = (struct gve_tx_pkt_desc_dqo){
468 .buf_addr = cpu_to_le64(addr),
469 .dtype = GVE_TX_PKT_DESC_DTYPE_DQO,
470 .end_of_packet = cur_eop,
471 .checksum_offload_enable = checksum_offload_en,
472 .compl_tag = cpu_to_le16(compl_tag),
473 .buf_size = cur_len,
474 };
475
476 addr += cur_len;
477 len -= cur_len;
478 *desc_idx = (*desc_idx + 1) & tx->mask;
479 }
480 }
481
482 /* Validates and prepares `skb` for TSO.
483 *
484 * Returns header length, or < 0 if invalid.
485 */
gve_prep_tso(struct sk_buff * skb)486 static int gve_prep_tso(struct sk_buff *skb)
487 {
488 struct tcphdr *tcp;
489 int header_len;
490 u32 paylen;
491 int err;
492
493 /* Note: HW requires MSS (gso_size) to be <= 9728 and the total length
494 * of the TSO to be <= 262143.
495 *
496 * However, we don't validate these because:
497 * - Hypervisor enforces a limit of 9K MTU
498 * - Kernel will not produce a TSO larger than 64k
499 */
500
501 if (unlikely(skb_shinfo(skb)->gso_size < GVE_TX_MIN_TSO_MSS_DQO))
502 return -1;
503
504 /* Needed because we will modify header. */
505 err = skb_cow_head(skb, 0);
506 if (err < 0)
507 return err;
508
509 tcp = tcp_hdr(skb);
510
511 /* Remove payload length from checksum. */
512 paylen = skb->len - skb_transport_offset(skb);
513
514 switch (skb_shinfo(skb)->gso_type) {
515 case SKB_GSO_TCPV4:
516 case SKB_GSO_TCPV6:
517 csum_replace_by_diff(&tcp->check,
518 (__force __wsum)htonl(paylen));
519
520 /* Compute length of segmentation header. */
521 header_len = skb_tcp_all_headers(skb);
522 break;
523 default:
524 return -EINVAL;
525 }
526
527 if (unlikely(header_len > GVE_TX_MAX_HDR_SIZE_DQO))
528 return -EINVAL;
529
530 return header_len;
531 }
532
gve_tx_fill_tso_ctx_desc(struct gve_tx_tso_context_desc_dqo * desc,const struct sk_buff * skb,const struct gve_tx_metadata_dqo * metadata,int header_len)533 static void gve_tx_fill_tso_ctx_desc(struct gve_tx_tso_context_desc_dqo *desc,
534 const struct sk_buff *skb,
535 const struct gve_tx_metadata_dqo *metadata,
536 int header_len)
537 {
538 *desc = (struct gve_tx_tso_context_desc_dqo){
539 .header_len = header_len,
540 .cmd_dtype = {
541 .dtype = GVE_TX_TSO_CTX_DESC_DTYPE_DQO,
542 .tso = 1,
543 },
544 .flex0 = metadata->bytes[0],
545 .flex5 = metadata->bytes[5],
546 .flex6 = metadata->bytes[6],
547 .flex7 = metadata->bytes[7],
548 .flex8 = metadata->bytes[8],
549 .flex9 = metadata->bytes[9],
550 .flex10 = metadata->bytes[10],
551 .flex11 = metadata->bytes[11],
552 };
553 desc->tso_total_len = skb->len - header_len;
554 desc->mss = skb_shinfo(skb)->gso_size;
555 }
556
557 static void
gve_tx_fill_general_ctx_desc(struct gve_tx_general_context_desc_dqo * desc,const struct gve_tx_metadata_dqo * metadata)558 gve_tx_fill_general_ctx_desc(struct gve_tx_general_context_desc_dqo *desc,
559 const struct gve_tx_metadata_dqo *metadata)
560 {
561 *desc = (struct gve_tx_general_context_desc_dqo){
562 .flex0 = metadata->bytes[0],
563 .flex1 = metadata->bytes[1],
564 .flex2 = metadata->bytes[2],
565 .flex3 = metadata->bytes[3],
566 .flex4 = metadata->bytes[4],
567 .flex5 = metadata->bytes[5],
568 .flex6 = metadata->bytes[6],
569 .flex7 = metadata->bytes[7],
570 .flex8 = metadata->bytes[8],
571 .flex9 = metadata->bytes[9],
572 .flex10 = metadata->bytes[10],
573 .flex11 = metadata->bytes[11],
574 .cmd_dtype = {.dtype = GVE_TX_GENERAL_CTX_DESC_DTYPE_DQO},
575 };
576 }
577
gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring * tx,struct sk_buff * skb,struct gve_tx_pending_packet_dqo * pkt,s16 completion_tag,u32 * desc_idx,bool is_gso)578 static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
579 struct sk_buff *skb,
580 struct gve_tx_pending_packet_dqo *pkt,
581 s16 completion_tag,
582 u32 *desc_idx,
583 bool is_gso)
584 {
585 const struct skb_shared_info *shinfo = skb_shinfo(skb);
586 int i;
587
588 /* Note: HW requires that the size of a non-TSO packet be within the
589 * range of [17, 9728].
590 *
591 * We don't double check because
592 * - We limited `netdev->min_mtu` to ETH_MIN_MTU.
593 * - Hypervisor won't allow MTU larger than 9216.
594 */
595
596 pkt->num_bufs = 0;
597 /* Map the linear portion of skb */
598 {
599 u32 len = skb_headlen(skb);
600 dma_addr_t addr;
601
602 addr = dma_map_single(tx->dev, skb->data, len, DMA_TO_DEVICE);
603 if (unlikely(dma_mapping_error(tx->dev, addr)))
604 goto err;
605
606 dma_unmap_len_set(pkt, len[pkt->num_bufs], len);
607 dma_unmap_addr_set(pkt, dma[pkt->num_bufs], addr);
608 ++pkt->num_bufs;
609
610 gve_tx_fill_pkt_desc_dqo(tx, desc_idx, skb, len, addr,
611 completion_tag,
612 /*eop=*/shinfo->nr_frags == 0, is_gso);
613 }
614
615 for (i = 0; i < shinfo->nr_frags; i++) {
616 const skb_frag_t *frag = &shinfo->frags[i];
617 bool is_eop = i == (shinfo->nr_frags - 1);
618 u32 len = skb_frag_size(frag);
619 dma_addr_t addr;
620
621 addr = skb_frag_dma_map(tx->dev, frag, 0, len, DMA_TO_DEVICE);
622 if (unlikely(dma_mapping_error(tx->dev, addr)))
623 goto err;
624
625 dma_unmap_len_set(pkt, len[pkt->num_bufs], len);
626 dma_unmap_addr_set(pkt, dma[pkt->num_bufs], addr);
627 ++pkt->num_bufs;
628
629 gve_tx_fill_pkt_desc_dqo(tx, desc_idx, skb, len, addr,
630 completion_tag, is_eop, is_gso);
631 }
632
633 return 0;
634 err:
635 for (i = 0; i < pkt->num_bufs; i++) {
636 if (i == 0) {
637 dma_unmap_single(tx->dev,
638 dma_unmap_addr(pkt, dma[i]),
639 dma_unmap_len(pkt, len[i]),
640 DMA_TO_DEVICE);
641 } else {
642 dma_unmap_page(tx->dev,
643 dma_unmap_addr(pkt, dma[i]),
644 dma_unmap_len(pkt, len[i]),
645 DMA_TO_DEVICE);
646 }
647 }
648 pkt->num_bufs = 0;
649 return -1;
650 }
651
652 /* Tx buffer i corresponds to
653 * qpl_page_id = i / GVE_TX_BUFS_PER_PAGE_DQO
654 * qpl_page_offset = (i % GVE_TX_BUFS_PER_PAGE_DQO) * GVE_TX_BUF_SIZE_DQO
655 */
gve_tx_buf_get_addr(struct gve_tx_ring * tx,s16 index,void ** va,dma_addr_t * dma_addr)656 static void gve_tx_buf_get_addr(struct gve_tx_ring *tx,
657 s16 index,
658 void **va, dma_addr_t *dma_addr)
659 {
660 int page_id = index >> (PAGE_SHIFT - GVE_TX_BUF_SHIFT_DQO);
661 int offset = (index & (GVE_TX_BUFS_PER_PAGE_DQO - 1)) << GVE_TX_BUF_SHIFT_DQO;
662
663 *va = page_address(tx->dqo.qpl->pages[page_id]) + offset;
664 *dma_addr = tx->dqo.qpl->page_buses[page_id] + offset;
665 }
666
gve_tx_add_skb_copy_dqo(struct gve_tx_ring * tx,struct sk_buff * skb,struct gve_tx_pending_packet_dqo * pkt,s16 completion_tag,u32 * desc_idx,bool is_gso)667 static int gve_tx_add_skb_copy_dqo(struct gve_tx_ring *tx,
668 struct sk_buff *skb,
669 struct gve_tx_pending_packet_dqo *pkt,
670 s16 completion_tag,
671 u32 *desc_idx,
672 bool is_gso)
673 {
674 u32 copy_offset = 0;
675 dma_addr_t dma_addr;
676 u32 copy_len;
677 s16 index;
678 void *va;
679
680 /* Break the packet into buffer size chunks */
681 pkt->num_bufs = 0;
682 while (copy_offset < skb->len) {
683 index = gve_alloc_tx_qpl_buf(tx);
684 if (unlikely(index == -1))
685 goto err;
686
687 gve_tx_buf_get_addr(tx, index, &va, &dma_addr);
688 copy_len = min_t(u32, GVE_TX_BUF_SIZE_DQO,
689 skb->len - copy_offset);
690 skb_copy_bits(skb, copy_offset, va, copy_len);
691
692 copy_offset += copy_len;
693 dma_sync_single_for_device(tx->dev, dma_addr,
694 copy_len, DMA_TO_DEVICE);
695 gve_tx_fill_pkt_desc_dqo(tx, desc_idx, skb,
696 copy_len,
697 dma_addr,
698 completion_tag,
699 copy_offset == skb->len,
700 is_gso);
701
702 pkt->tx_qpl_buf_ids[pkt->num_bufs] = index;
703 ++tx->dqo_tx.alloc_tx_qpl_buf_cnt;
704 ++pkt->num_bufs;
705 }
706
707 return 0;
708 err:
709 /* Should not be here if gve_has_free_tx_qpl_bufs() check is correct */
710 gve_free_tx_qpl_bufs(tx, pkt);
711 return -ENOMEM;
712 }
713
714 /* Returns 0 on success, or < 0 on error.
715 *
716 * Before this function is called, the caller must ensure
717 * gve_has_pending_packet(tx) returns true.
718 */
gve_tx_add_skb_dqo(struct gve_tx_ring * tx,struct sk_buff * skb)719 static int gve_tx_add_skb_dqo(struct gve_tx_ring *tx,
720 struct sk_buff *skb)
721 {
722 const bool is_gso = skb_is_gso(skb);
723 u32 desc_idx = tx->dqo_tx.tail;
724 struct gve_tx_pending_packet_dqo *pkt;
725 struct gve_tx_metadata_dqo metadata;
726 s16 completion_tag;
727
728 pkt = gve_alloc_pending_packet(tx);
729 pkt->skb = skb;
730 completion_tag = pkt - tx->dqo.pending_packets;
731
732 gve_extract_tx_metadata_dqo(skb, &metadata);
733 if (is_gso) {
734 int header_len = gve_prep_tso(skb);
735
736 if (unlikely(header_len < 0))
737 goto err;
738
739 gve_tx_fill_tso_ctx_desc(&tx->dqo.tx_ring[desc_idx].tso_ctx,
740 skb, &metadata, header_len);
741 desc_idx = (desc_idx + 1) & tx->mask;
742 }
743
744 gve_tx_fill_general_ctx_desc(&tx->dqo.tx_ring[desc_idx].general_ctx,
745 &metadata);
746 desc_idx = (desc_idx + 1) & tx->mask;
747
748 if (tx->dqo.qpl) {
749 if (gve_tx_add_skb_copy_dqo(tx, skb, pkt,
750 completion_tag,
751 &desc_idx, is_gso))
752 goto err;
753 } else {
754 if (gve_tx_add_skb_no_copy_dqo(tx, skb, pkt,
755 completion_tag,
756 &desc_idx, is_gso))
757 goto err;
758 }
759
760 tx->dqo_tx.posted_packet_desc_cnt += pkt->num_bufs;
761
762 /* Commit the changes to our state */
763 tx->dqo_tx.tail = desc_idx;
764
765 /* Request a descriptor completion on the last descriptor of the
766 * packet if we are allowed to by the HW enforced interval.
767 */
768 {
769 u32 last_desc_idx = (desc_idx - 1) & tx->mask;
770 u32 last_report_event_interval =
771 (last_desc_idx - tx->dqo_tx.last_re_idx) & tx->mask;
772
773 if (unlikely(last_report_event_interval >=
774 GVE_TX_MIN_RE_INTERVAL)) {
775 tx->dqo.tx_ring[last_desc_idx].pkt.report_event = true;
776 tx->dqo_tx.last_re_idx = last_desc_idx;
777 }
778 }
779
780 return 0;
781
782 err:
783 pkt->skb = NULL;
784 gve_free_pending_packet(tx, pkt);
785
786 return -1;
787 }
788
gve_num_descs_per_buf(size_t size)789 static int gve_num_descs_per_buf(size_t size)
790 {
791 return DIV_ROUND_UP(size, GVE_TX_MAX_BUF_SIZE_DQO);
792 }
793
gve_num_buffer_descs_needed(const struct sk_buff * skb)794 static int gve_num_buffer_descs_needed(const struct sk_buff *skb)
795 {
796 const struct skb_shared_info *shinfo = skb_shinfo(skb);
797 int num_descs;
798 int i;
799
800 num_descs = gve_num_descs_per_buf(skb_headlen(skb));
801
802 for (i = 0; i < shinfo->nr_frags; i++) {
803 unsigned int frag_size = skb_frag_size(&shinfo->frags[i]);
804
805 num_descs += gve_num_descs_per_buf(frag_size);
806 }
807
808 return num_descs;
809 }
810
811 /* Returns true if HW is capable of sending TSO represented by `skb`.
812 *
813 * Each segment must not span more than GVE_TX_MAX_DATA_DESCS buffers.
814 * - The header is counted as one buffer for every single segment.
815 * - A buffer which is split between two segments is counted for both.
816 * - If a buffer contains both header and payload, it is counted as two buffers.
817 */
gve_can_send_tso(const struct sk_buff * skb)818 static bool gve_can_send_tso(const struct sk_buff *skb)
819 {
820 const int max_bufs_per_seg = GVE_TX_MAX_DATA_DESCS - 1;
821 const struct skb_shared_info *shinfo = skb_shinfo(skb);
822 const int header_len = skb_tcp_all_headers(skb);
823 const int gso_size = shinfo->gso_size;
824 int cur_seg_num_bufs;
825 int cur_seg_size;
826 int i;
827
828 cur_seg_size = skb_headlen(skb) - header_len;
829 cur_seg_num_bufs = cur_seg_size > 0;
830
831 for (i = 0; i < shinfo->nr_frags; i++) {
832 if (cur_seg_size >= gso_size) {
833 cur_seg_size %= gso_size;
834 cur_seg_num_bufs = cur_seg_size > 0;
835 }
836
837 if (unlikely(++cur_seg_num_bufs > max_bufs_per_seg))
838 return false;
839
840 cur_seg_size += skb_frag_size(&shinfo->frags[i]);
841 }
842
843 return true;
844 }
845
gve_features_check_dqo(struct sk_buff * skb,struct net_device * dev,netdev_features_t features)846 netdev_features_t gve_features_check_dqo(struct sk_buff *skb,
847 struct net_device *dev,
848 netdev_features_t features)
849 {
850 if (skb_is_gso(skb) && !gve_can_send_tso(skb))
851 return features & ~NETIF_F_GSO_MASK;
852
853 return features;
854 }
855
856 /* Attempt to transmit specified SKB.
857 *
858 * Returns 0 if the SKB was transmitted or dropped.
859 * Returns -1 if there is not currently enough space to transmit the SKB.
860 */
gve_try_tx_skb(struct gve_priv * priv,struct gve_tx_ring * tx,struct sk_buff * skb)861 static int gve_try_tx_skb(struct gve_priv *priv, struct gve_tx_ring *tx,
862 struct sk_buff *skb)
863 {
864 int num_buffer_descs;
865 int total_num_descs;
866
867 if (skb_is_gso(skb) && unlikely(ipv6_hopopt_jumbo_remove(skb)))
868 goto drop;
869
870 if (tx->dqo.qpl) {
871 /* We do not need to verify the number of buffers used per
872 * packet or per segment in case of TSO as with 2K size buffers
873 * none of the TX packet rules would be violated.
874 *
875 * gve_can_send_tso() checks that each TCP segment of gso_size is
876 * not distributed over more than 9 SKB frags..
877 */
878 num_buffer_descs = DIV_ROUND_UP(skb->len, GVE_TX_BUF_SIZE_DQO);
879 } else {
880 num_buffer_descs = gve_num_buffer_descs_needed(skb);
881 if (!skb_is_gso(skb)) {
882 if (unlikely(num_buffer_descs > GVE_TX_MAX_DATA_DESCS)) {
883 if (unlikely(skb_linearize(skb) < 0))
884 goto drop;
885
886 num_buffer_descs = 1;
887 }
888 }
889 }
890
891 /* Metadata + (optional TSO) + data descriptors. */
892 total_num_descs = 1 + skb_is_gso(skb) + num_buffer_descs;
893 if (unlikely(gve_maybe_stop_tx_dqo(tx, total_num_descs +
894 GVE_TX_MIN_DESC_PREVENT_CACHE_OVERLAP,
895 num_buffer_descs))) {
896 return -1;
897 }
898
899 if (unlikely(gve_tx_add_skb_dqo(tx, skb) < 0))
900 goto drop;
901
902 netdev_tx_sent_queue(tx->netdev_txq, skb->len);
903 skb_tx_timestamp(skb);
904 return 0;
905
906 drop:
907 tx->dropped_pkt++;
908 dev_kfree_skb_any(skb);
909 return 0;
910 }
911
912 /* Transmit a given skb and ring the doorbell. */
gve_tx_dqo(struct sk_buff * skb,struct net_device * dev)913 netdev_tx_t gve_tx_dqo(struct sk_buff *skb, struct net_device *dev)
914 {
915 struct gve_priv *priv = netdev_priv(dev);
916 struct gve_tx_ring *tx;
917
918 tx = &priv->tx[skb_get_queue_mapping(skb)];
919 if (unlikely(gve_try_tx_skb(priv, tx, skb) < 0)) {
920 /* We need to ring the txq doorbell -- we have stopped the Tx
921 * queue for want of resources, but prior calls to gve_tx()
922 * may have added descriptors without ringing the doorbell.
923 */
924 gve_tx_put_doorbell_dqo(priv, tx->q_resources, tx->dqo_tx.tail);
925 return NETDEV_TX_BUSY;
926 }
927
928 if (!netif_xmit_stopped(tx->netdev_txq) && netdev_xmit_more())
929 return NETDEV_TX_OK;
930
931 gve_tx_put_doorbell_dqo(priv, tx->q_resources, tx->dqo_tx.tail);
932 return NETDEV_TX_OK;
933 }
934
add_to_list(struct gve_tx_ring * tx,struct gve_index_list * list,struct gve_tx_pending_packet_dqo * pending_packet)935 static void add_to_list(struct gve_tx_ring *tx, struct gve_index_list *list,
936 struct gve_tx_pending_packet_dqo *pending_packet)
937 {
938 s16 old_tail, index;
939
940 index = pending_packet - tx->dqo.pending_packets;
941 old_tail = list->tail;
942 list->tail = index;
943 if (old_tail == -1)
944 list->head = index;
945 else
946 tx->dqo.pending_packets[old_tail].next = index;
947
948 pending_packet->next = -1;
949 pending_packet->prev = old_tail;
950 }
951
remove_from_list(struct gve_tx_ring * tx,struct gve_index_list * list,struct gve_tx_pending_packet_dqo * pkt)952 static void remove_from_list(struct gve_tx_ring *tx,
953 struct gve_index_list *list,
954 struct gve_tx_pending_packet_dqo *pkt)
955 {
956 s16 prev_index, next_index;
957
958 prev_index = pkt->prev;
959 next_index = pkt->next;
960
961 if (prev_index == -1) {
962 /* Node is head */
963 list->head = next_index;
964 } else {
965 tx->dqo.pending_packets[prev_index].next = next_index;
966 }
967 if (next_index == -1) {
968 /* Node is tail */
969 list->tail = prev_index;
970 } else {
971 tx->dqo.pending_packets[next_index].prev = prev_index;
972 }
973 }
974
gve_unmap_packet(struct device * dev,struct gve_tx_pending_packet_dqo * pkt)975 static void gve_unmap_packet(struct device *dev,
976 struct gve_tx_pending_packet_dqo *pkt)
977 {
978 int i;
979
980 /* SKB linear portion is guaranteed to be mapped */
981 dma_unmap_single(dev, dma_unmap_addr(pkt, dma[0]),
982 dma_unmap_len(pkt, len[0]), DMA_TO_DEVICE);
983 for (i = 1; i < pkt->num_bufs; i++) {
984 dma_unmap_page(dev, dma_unmap_addr(pkt, dma[i]),
985 dma_unmap_len(pkt, len[i]), DMA_TO_DEVICE);
986 }
987 pkt->num_bufs = 0;
988 }
989
990 /* Completion types and expected behavior:
991 * No Miss compl + Packet compl = Packet completed normally.
992 * Miss compl + Re-inject compl = Packet completed normally.
993 * No Miss compl + Re-inject compl = Skipped i.e. packet not completed.
994 * Miss compl + Packet compl = Skipped i.e. packet not completed.
995 */
gve_handle_packet_completion(struct gve_priv * priv,struct gve_tx_ring * tx,bool is_napi,u16 compl_tag,u64 * bytes,u64 * pkts,bool is_reinjection)996 static void gve_handle_packet_completion(struct gve_priv *priv,
997 struct gve_tx_ring *tx, bool is_napi,
998 u16 compl_tag, u64 *bytes, u64 *pkts,
999 bool is_reinjection)
1000 {
1001 struct gve_tx_pending_packet_dqo *pending_packet;
1002
1003 if (unlikely(compl_tag >= tx->dqo.num_pending_packets)) {
1004 net_err_ratelimited("%s: Invalid TX completion tag: %d\n",
1005 priv->dev->name, (int)compl_tag);
1006 return;
1007 }
1008
1009 pending_packet = &tx->dqo.pending_packets[compl_tag];
1010
1011 if (unlikely(is_reinjection)) {
1012 if (unlikely(pending_packet->state ==
1013 GVE_PACKET_STATE_TIMED_OUT_COMPL)) {
1014 net_err_ratelimited("%s: Re-injection completion: %d received after timeout.\n",
1015 priv->dev->name, (int)compl_tag);
1016 /* Packet was already completed as a result of timeout,
1017 * so just remove from list and free pending packet.
1018 */
1019 remove_from_list(tx,
1020 &tx->dqo_compl.timed_out_completions,
1021 pending_packet);
1022 gve_free_pending_packet(tx, pending_packet);
1023 return;
1024 }
1025 if (unlikely(pending_packet->state !=
1026 GVE_PACKET_STATE_PENDING_REINJECT_COMPL)) {
1027 /* No outstanding miss completion but packet allocated
1028 * implies packet receives a re-injection completion
1029 * without a prior miss completion. Return without
1030 * completing the packet.
1031 */
1032 net_err_ratelimited("%s: Re-injection completion received without corresponding miss completion: %d\n",
1033 priv->dev->name, (int)compl_tag);
1034 return;
1035 }
1036 remove_from_list(tx, &tx->dqo_compl.miss_completions,
1037 pending_packet);
1038 } else {
1039 /* Packet is allocated but not a pending data completion. */
1040 if (unlikely(pending_packet->state !=
1041 GVE_PACKET_STATE_PENDING_DATA_COMPL)) {
1042 net_err_ratelimited("%s: No pending data completion: %d\n",
1043 priv->dev->name, (int)compl_tag);
1044 return;
1045 }
1046 }
1047 tx->dqo_tx.completed_packet_desc_cnt += pending_packet->num_bufs;
1048 if (tx->dqo.qpl)
1049 gve_free_tx_qpl_bufs(tx, pending_packet);
1050 else
1051 gve_unmap_packet(tx->dev, pending_packet);
1052
1053 *bytes += pending_packet->skb->len;
1054 (*pkts)++;
1055 napi_consume_skb(pending_packet->skb, is_napi);
1056 pending_packet->skb = NULL;
1057 gve_free_pending_packet(tx, pending_packet);
1058 }
1059
gve_handle_miss_completion(struct gve_priv * priv,struct gve_tx_ring * tx,u16 compl_tag,u64 * bytes,u64 * pkts)1060 static void gve_handle_miss_completion(struct gve_priv *priv,
1061 struct gve_tx_ring *tx, u16 compl_tag,
1062 u64 *bytes, u64 *pkts)
1063 {
1064 struct gve_tx_pending_packet_dqo *pending_packet;
1065
1066 if (unlikely(compl_tag >= tx->dqo.num_pending_packets)) {
1067 net_err_ratelimited("%s: Invalid TX completion tag: %d\n",
1068 priv->dev->name, (int)compl_tag);
1069 return;
1070 }
1071
1072 pending_packet = &tx->dqo.pending_packets[compl_tag];
1073 if (unlikely(pending_packet->state !=
1074 GVE_PACKET_STATE_PENDING_DATA_COMPL)) {
1075 net_err_ratelimited("%s: Unexpected packet state: %d for completion tag : %d\n",
1076 priv->dev->name, (int)pending_packet->state,
1077 (int)compl_tag);
1078 return;
1079 }
1080
1081 pending_packet->state = GVE_PACKET_STATE_PENDING_REINJECT_COMPL;
1082 /* jiffies can wraparound but time comparisons can handle overflows. */
1083 pending_packet->timeout_jiffies =
1084 jiffies +
1085 msecs_to_jiffies(GVE_REINJECT_COMPL_TIMEOUT *
1086 MSEC_PER_SEC);
1087 add_to_list(tx, &tx->dqo_compl.miss_completions, pending_packet);
1088
1089 *bytes += pending_packet->skb->len;
1090 (*pkts)++;
1091 }
1092
remove_miss_completions(struct gve_priv * priv,struct gve_tx_ring * tx)1093 static void remove_miss_completions(struct gve_priv *priv,
1094 struct gve_tx_ring *tx)
1095 {
1096 struct gve_tx_pending_packet_dqo *pending_packet;
1097 s16 next_index;
1098
1099 next_index = tx->dqo_compl.miss_completions.head;
1100 while (next_index != -1) {
1101 pending_packet = &tx->dqo.pending_packets[next_index];
1102 next_index = pending_packet->next;
1103 /* Break early because packets should timeout in order. */
1104 if (time_is_after_jiffies(pending_packet->timeout_jiffies))
1105 break;
1106
1107 remove_from_list(tx, &tx->dqo_compl.miss_completions,
1108 pending_packet);
1109 /* Unmap/free TX buffers and free skb but do not unallocate packet i.e.
1110 * the completion tag is not freed to ensure that the driver
1111 * can take appropriate action if a corresponding valid
1112 * completion is received later.
1113 */
1114 if (tx->dqo.qpl)
1115 gve_free_tx_qpl_bufs(tx, pending_packet);
1116 else
1117 gve_unmap_packet(tx->dev, pending_packet);
1118
1119 /* This indicates the packet was dropped. */
1120 dev_kfree_skb_any(pending_packet->skb);
1121 pending_packet->skb = NULL;
1122 tx->dropped_pkt++;
1123 net_err_ratelimited("%s: No reinjection completion was received for: %d.\n",
1124 priv->dev->name,
1125 (int)(pending_packet - tx->dqo.pending_packets));
1126
1127 pending_packet->state = GVE_PACKET_STATE_TIMED_OUT_COMPL;
1128 pending_packet->timeout_jiffies =
1129 jiffies +
1130 msecs_to_jiffies(GVE_DEALLOCATE_COMPL_TIMEOUT *
1131 MSEC_PER_SEC);
1132 /* Maintain pending packet in another list so the packet can be
1133 * unallocated at a later time.
1134 */
1135 add_to_list(tx, &tx->dqo_compl.timed_out_completions,
1136 pending_packet);
1137 }
1138 }
1139
remove_timed_out_completions(struct gve_priv * priv,struct gve_tx_ring * tx)1140 static void remove_timed_out_completions(struct gve_priv *priv,
1141 struct gve_tx_ring *tx)
1142 {
1143 struct gve_tx_pending_packet_dqo *pending_packet;
1144 s16 next_index;
1145
1146 next_index = tx->dqo_compl.timed_out_completions.head;
1147 while (next_index != -1) {
1148 pending_packet = &tx->dqo.pending_packets[next_index];
1149 next_index = pending_packet->next;
1150 /* Break early because packets should timeout in order. */
1151 if (time_is_after_jiffies(pending_packet->timeout_jiffies))
1152 break;
1153
1154 remove_from_list(tx, &tx->dqo_compl.timed_out_completions,
1155 pending_packet);
1156 gve_free_pending_packet(tx, pending_packet);
1157 }
1158 }
1159
gve_clean_tx_done_dqo(struct gve_priv * priv,struct gve_tx_ring * tx,struct napi_struct * napi)1160 int gve_clean_tx_done_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
1161 struct napi_struct *napi)
1162 {
1163 u64 reinject_compl_bytes = 0;
1164 u64 reinject_compl_pkts = 0;
1165 int num_descs_cleaned = 0;
1166 u64 miss_compl_bytes = 0;
1167 u64 miss_compl_pkts = 0;
1168 u64 pkt_compl_bytes = 0;
1169 u64 pkt_compl_pkts = 0;
1170
1171 /* Limit in order to avoid blocking for too long */
1172 while (!napi || pkt_compl_pkts < napi->weight) {
1173 struct gve_tx_compl_desc *compl_desc =
1174 &tx->dqo.compl_ring[tx->dqo_compl.head];
1175 u16 type;
1176
1177 if (compl_desc->generation == tx->dqo_compl.cur_gen_bit)
1178 break;
1179
1180 /* Prefetch the next descriptor. */
1181 prefetch(&tx->dqo.compl_ring[(tx->dqo_compl.head + 1) &
1182 tx->dqo.complq_mask]);
1183
1184 /* Do not read data until we own the descriptor */
1185 dma_rmb();
1186 type = compl_desc->type;
1187
1188 if (type == GVE_COMPL_TYPE_DQO_DESC) {
1189 /* This is the last descriptor fetched by HW plus one */
1190 u16 tx_head = le16_to_cpu(compl_desc->tx_head);
1191
1192 atomic_set_release(&tx->dqo_compl.hw_tx_head, tx_head);
1193 } else if (type == GVE_COMPL_TYPE_DQO_PKT) {
1194 u16 compl_tag = le16_to_cpu(compl_desc->completion_tag);
1195 if (compl_tag & GVE_ALT_MISS_COMPL_BIT) {
1196 compl_tag &= ~GVE_ALT_MISS_COMPL_BIT;
1197 gve_handle_miss_completion(priv, tx, compl_tag,
1198 &miss_compl_bytes,
1199 &miss_compl_pkts);
1200 } else {
1201 gve_handle_packet_completion(priv, tx, !!napi,
1202 compl_tag,
1203 &pkt_compl_bytes,
1204 &pkt_compl_pkts,
1205 false);
1206 }
1207 } else if (type == GVE_COMPL_TYPE_DQO_MISS) {
1208 u16 compl_tag = le16_to_cpu(compl_desc->completion_tag);
1209
1210 gve_handle_miss_completion(priv, tx, compl_tag,
1211 &miss_compl_bytes,
1212 &miss_compl_pkts);
1213 } else if (type == GVE_COMPL_TYPE_DQO_REINJECTION) {
1214 u16 compl_tag = le16_to_cpu(compl_desc->completion_tag);
1215
1216 gve_handle_packet_completion(priv, tx, !!napi,
1217 compl_tag,
1218 &reinject_compl_bytes,
1219 &reinject_compl_pkts,
1220 true);
1221 }
1222
1223 tx->dqo_compl.head =
1224 (tx->dqo_compl.head + 1) & tx->dqo.complq_mask;
1225 /* Flip the generation bit when we wrap around */
1226 tx->dqo_compl.cur_gen_bit ^= tx->dqo_compl.head == 0;
1227 num_descs_cleaned++;
1228 }
1229
1230 netdev_tx_completed_queue(tx->netdev_txq,
1231 pkt_compl_pkts + miss_compl_pkts,
1232 pkt_compl_bytes + miss_compl_bytes);
1233
1234 remove_miss_completions(priv, tx);
1235 remove_timed_out_completions(priv, tx);
1236
1237 u64_stats_update_begin(&tx->statss);
1238 tx->bytes_done += pkt_compl_bytes + reinject_compl_bytes;
1239 tx->pkt_done += pkt_compl_pkts + reinject_compl_pkts;
1240 u64_stats_update_end(&tx->statss);
1241 return num_descs_cleaned;
1242 }
1243
gve_tx_poll_dqo(struct gve_notify_block * block,bool do_clean)1244 bool gve_tx_poll_dqo(struct gve_notify_block *block, bool do_clean)
1245 {
1246 struct gve_tx_compl_desc *compl_desc;
1247 struct gve_tx_ring *tx = block->tx;
1248 struct gve_priv *priv = block->priv;
1249
1250 if (do_clean) {
1251 int num_descs_cleaned = gve_clean_tx_done_dqo(priv, tx,
1252 &block->napi);
1253
1254 /* Sync with queue being stopped in `gve_maybe_stop_tx_dqo()` */
1255 mb();
1256
1257 if (netif_tx_queue_stopped(tx->netdev_txq) &&
1258 num_descs_cleaned > 0) {
1259 tx->wake_queue++;
1260 netif_tx_wake_queue(tx->netdev_txq);
1261 }
1262 }
1263
1264 /* Return true if we still have work. */
1265 compl_desc = &tx->dqo.compl_ring[tx->dqo_compl.head];
1266 return compl_desc->generation != tx->dqo_compl.cur_gen_bit;
1267 }
1268