1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2015-2024 Amazon.com, Inc. or its affiliates.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 *
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30 #include <sys/cdefs.h>
31 #ifdef DEV_NETMAP
32
33 #include "ena.h"
34 #include "ena_netmap.h"
35
36 #define ENA_NETMAP_MORE_FRAMES 1
37 #define ENA_NETMAP_NO_MORE_FRAMES 0
38 #define ENA_MAX_FRAMES 16384
39
40 struct ena_netmap_ctx {
41 struct netmap_kring *kring;
42 struct ena_adapter *adapter;
43 struct netmap_adapter *na;
44 struct netmap_slot *slots;
45 struct ena_ring *ring;
46 struct ena_com_io_cq *io_cq;
47 struct ena_com_io_sq *io_sq;
48 u_int nm_i;
49 uint16_t nt;
50 uint16_t lim;
51 };
52
53 /* Netmap callbacks */
54 static int ena_netmap_reg(struct netmap_adapter *, int);
55 static int ena_netmap_txsync(struct netmap_kring *, int);
56 static int ena_netmap_rxsync(struct netmap_kring *, int);
57
58 /* Helper functions */
59 static int ena_netmap_tx_frames(struct ena_netmap_ctx *);
60 static int ena_netmap_tx_frame(struct ena_netmap_ctx *);
61 static inline uint16_t ena_netmap_count_slots(struct ena_netmap_ctx *);
62 static inline uint16_t ena_netmap_packet_len(struct netmap_slot *, u_int,
63 uint16_t);
64 static int ena_netmap_copy_data(struct netmap_adapter *, struct netmap_slot *,
65 u_int, uint16_t, uint16_t, void *);
66 static int ena_netmap_map_single_slot(struct netmap_adapter *,
67 struct netmap_slot *, bus_dma_tag_t, bus_dmamap_t, void **, uint64_t *);
68 static int ena_netmap_tx_map_slots(struct ena_netmap_ctx *,
69 struct ena_tx_buffer *, void **, uint16_t *, uint16_t *);
70 static void ena_netmap_unmap_last_socket_chain(struct ena_netmap_ctx *,
71 struct ena_tx_buffer *);
72 static void ena_netmap_tx_cleanup(struct ena_netmap_ctx *);
73 static uint16_t ena_netmap_tx_clean_one(struct ena_netmap_ctx *, uint16_t);
74 static int ena_netmap_rx_frames(struct ena_netmap_ctx *);
75 static int ena_netmap_rx_frame(struct ena_netmap_ctx *);
76 static int ena_netmap_rx_load_desc(struct ena_netmap_ctx *, uint16_t, int *);
77 static void ena_netmap_rx_cleanup(struct ena_netmap_ctx *);
78 static void ena_netmap_fill_ctx(struct netmap_kring *, struct ena_netmap_ctx *,
79 uint16_t);
80
81 int
ena_netmap_attach(struct ena_adapter * adapter)82 ena_netmap_attach(struct ena_adapter *adapter)
83 {
84 struct netmap_adapter na;
85
86 ena_log_nm(adapter->pdev, INFO, "netmap attach\n");
87
88 bzero(&na, sizeof(na));
89 na.na_flags = NAF_MOREFRAG;
90 na.ifp = adapter->ifp;
91 na.num_tx_desc = adapter->requested_tx_ring_size;
92 na.num_rx_desc = adapter->requested_rx_ring_size;
93 na.num_tx_rings = adapter->num_io_queues;
94 na.num_rx_rings = adapter->num_io_queues;
95 na.rx_buf_maxsize = adapter->buf_ring_size;
96 na.nm_txsync = ena_netmap_txsync;
97 na.nm_rxsync = ena_netmap_rxsync;
98 na.nm_register = ena_netmap_reg;
99
100 return (netmap_attach(&na));
101 }
102
103 int
ena_netmap_alloc_rx_slot(struct ena_adapter * adapter,struct ena_ring * rx_ring,struct ena_rx_buffer * rx_info)104 ena_netmap_alloc_rx_slot(struct ena_adapter *adapter, struct ena_ring *rx_ring,
105 struct ena_rx_buffer *rx_info)
106 {
107 struct netmap_adapter *na = NA(adapter->ifp);
108 struct netmap_kring *kring;
109 struct netmap_ring *ring;
110 struct netmap_slot *slot;
111 void *addr;
112 uint64_t paddr;
113 int nm_i, qid, head, lim, rc;
114
115 /* if previously allocated frag is not used */
116 if (unlikely(rx_info->netmap_buf_idx != 0))
117 return (0);
118
119 qid = rx_ring->qid;
120 kring = na->rx_rings[qid];
121 nm_i = kring->nr_hwcur;
122 head = kring->rhead;
123
124 ena_log_nm(adapter->pdev, DBG,
125 "nr_hwcur: %d, nr_hwtail: %d, rhead: %d, rcur: %d, rtail: %d\n",
126 kring->nr_hwcur, kring->nr_hwtail, kring->rhead, kring->rcur,
127 kring->rtail);
128
129 if ((nm_i == head) && rx_ring->initialized) {
130 ena_log_nm(adapter->pdev, ERR,
131 "No free slots in netmap ring\n");
132 return (ENOMEM);
133 }
134
135 ring = kring->ring;
136 if (ring == NULL) {
137 ena_log_nm(adapter->pdev, ERR, "Rx ring %d is NULL\n", qid);
138 return (EFAULT);
139 }
140 slot = &ring->slot[nm_i];
141
142 addr = PNMB(na, slot, &paddr);
143 if (addr == NETMAP_BUF_BASE(na)) {
144 ena_log_nm(adapter->pdev, ERR, "Bad buff in slot\n");
145 return (EFAULT);
146 }
147
148 rc = netmap_load_map(na, adapter->rx_buf_tag, rx_info->map, addr);
149 if (rc != 0) {
150 ena_log_nm(adapter->pdev, WARN, "DMA mapping error\n");
151 return (rc);
152 }
153 bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, BUS_DMASYNC_PREREAD);
154
155 rx_info->ena_buf.paddr = paddr;
156 rx_info->ena_buf.len = ring->nr_buf_size;
157 rx_info->mbuf = NULL;
158 rx_info->netmap_buf_idx = slot->buf_idx;
159
160 slot->buf_idx = 0;
161
162 lim = kring->nkr_num_slots - 1;
163 kring->nr_hwcur = nm_next(nm_i, lim);
164
165 return (0);
166 }
167
168 void
ena_netmap_free_rx_slot(struct ena_adapter * adapter,struct ena_ring * rx_ring,struct ena_rx_buffer * rx_info)169 ena_netmap_free_rx_slot(struct ena_adapter *adapter, struct ena_ring *rx_ring,
170 struct ena_rx_buffer *rx_info)
171 {
172 struct netmap_adapter *na;
173 struct netmap_kring *kring;
174 struct netmap_slot *slot;
175 int nm_i, qid, lim;
176
177 na = NA(adapter->ifp);
178 if (na == NULL) {
179 ena_log_nm(adapter->pdev, ERR, "netmap adapter is NULL\n");
180 return;
181 }
182
183 if (na->rx_rings == NULL) {
184 ena_log_nm(adapter->pdev, ERR, "netmap rings are NULL\n");
185 return;
186 }
187
188 qid = rx_ring->qid;
189 kring = na->rx_rings[qid];
190 if (kring == NULL) {
191 ena_log_nm(adapter->pdev, ERR,
192 "netmap kernel ring %d is NULL\n", qid);
193 return;
194 }
195
196 lim = kring->nkr_num_slots - 1;
197 nm_i = nm_prev(kring->nr_hwcur, lim);
198
199 if (kring->nr_mode != NKR_NETMAP_ON)
200 return;
201
202 bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map,
203 BUS_DMASYNC_POSTREAD);
204 netmap_unload_map(na, adapter->rx_buf_tag, rx_info->map);
205
206 KASSERT(kring->ring != NULL, ("Netmap Rx ring is NULL\n"));
207
208 slot = &kring->ring->slot[nm_i];
209
210 ENA_WARN(slot->buf_idx != 0, adapter->ena_dev, "Overwrite slot buf\n");
211 slot->buf_idx = rx_info->netmap_buf_idx;
212 slot->flags = NS_BUF_CHANGED;
213
214 rx_info->netmap_buf_idx = 0;
215 kring->nr_hwcur = nm_i;
216 }
217
218 static bool
ena_ring_in_netmap(struct ena_adapter * adapter,int qid,enum txrx x)219 ena_ring_in_netmap(struct ena_adapter *adapter, int qid, enum txrx x)
220 {
221 struct netmap_adapter *na;
222 struct netmap_kring *kring;
223
224 if (if_getcapenable(adapter->ifp) & IFCAP_NETMAP) {
225 na = NA(adapter->ifp);
226 if (na->na_flags & NAF_NATIVE) {
227 kring = (x == NR_RX) ? na->rx_rings[qid] : na->tx_rings[qid];
228 if (kring->nr_mode == NKR_NETMAP_ON)
229 return true;
230 }
231 }
232 return false;
233 }
234
235 bool
ena_tx_ring_in_netmap(struct ena_adapter * adapter,int qid)236 ena_tx_ring_in_netmap(struct ena_adapter *adapter, int qid)
237 {
238 return ena_ring_in_netmap(adapter, qid, NR_TX);
239 }
240
241 bool
ena_rx_ring_in_netmap(struct ena_adapter * adapter,int qid)242 ena_rx_ring_in_netmap(struct ena_adapter *adapter, int qid)
243 {
244 return ena_ring_in_netmap(adapter, qid, NR_RX);
245 }
246
247 static void
ena_netmap_reset_ring(struct ena_adapter * adapter,int qid,enum txrx x)248 ena_netmap_reset_ring(struct ena_adapter *adapter, int qid, enum txrx x)
249 {
250 if (!ena_ring_in_netmap(adapter, qid, x))
251 return;
252
253 netmap_reset(NA(adapter->ifp), x, qid, 0);
254 ena_log_nm(adapter->pdev, INFO, "%s ring %d is in netmap mode\n",
255 (x == NR_TX) ? "Tx" : "Rx", qid);
256 }
257
258 void
ena_netmap_reset_rx_ring(struct ena_adapter * adapter,int qid)259 ena_netmap_reset_rx_ring(struct ena_adapter *adapter, int qid)
260 {
261 ena_netmap_reset_ring(adapter, qid, NR_RX);
262 }
263
264 void
ena_netmap_reset_tx_ring(struct ena_adapter * adapter,int qid)265 ena_netmap_reset_tx_ring(struct ena_adapter *adapter, int qid)
266 {
267 ena_netmap_reset_ring(adapter, qid, NR_TX);
268 }
269
270 static int
ena_netmap_reg(struct netmap_adapter * na,int onoff)271 ena_netmap_reg(struct netmap_adapter *na, int onoff)
272 {
273 if_t ifp = na->ifp;
274 struct ena_adapter *adapter = if_getsoftc(ifp);
275 device_t pdev = adapter->pdev;
276 struct netmap_kring *kring;
277 enum txrx t;
278 int rc, i;
279
280 ENA_LOCK_LOCK();
281 ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_TRIGGER_RESET, adapter);
282 ena_down(adapter);
283
284 if (onoff) {
285 ena_log_nm(pdev, INFO, "netmap on\n");
286 for_rx_tx(t) {
287 for (i = 0; i <= nma_get_nrings(na, t); i++) {
288 kring = NMR(na, t)[i];
289 if (nm_kring_pending_on(kring)) {
290 kring->nr_mode = NKR_NETMAP_ON;
291 }
292 }
293 }
294 nm_set_native_flags(na);
295 } else {
296 ena_log_nm(pdev, INFO, "netmap off\n");
297 nm_clear_native_flags(na);
298 for_rx_tx(t) {
299 for (i = 0; i <= nma_get_nrings(na, t); i++) {
300 kring = NMR(na, t)[i];
301 if (nm_kring_pending_off(kring)) {
302 kring->nr_mode = NKR_NETMAP_OFF;
303 }
304 }
305 }
306 }
307
308 rc = ena_up(adapter);
309 if (rc != 0) {
310 ena_log_nm(pdev, WARN, "ena_up failed with rc=%d\n", rc);
311 adapter->reset_reason = ENA_REGS_RESET_DRIVER_INVALID_STATE;
312 nm_clear_native_flags(na);
313 ena_destroy_device(adapter, false);
314 ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter);
315 rc = ena_restore_device(adapter);
316 }
317 ENA_LOCK_UNLOCK();
318
319 return (rc);
320 }
321
322 static int
ena_netmap_txsync(struct netmap_kring * kring,int flags)323 ena_netmap_txsync(struct netmap_kring *kring, int flags)
324 {
325 struct ena_netmap_ctx ctx;
326 int rc = 0;
327
328 ena_netmap_fill_ctx(kring, &ctx, ENA_IO_TXQ_IDX(kring->ring_id));
329 ctx.ring = &ctx.adapter->tx_ring[kring->ring_id];
330
331 ENA_RING_MTX_LOCK(ctx.ring);
332 if (unlikely(!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, ctx.adapter)))
333 goto txsync_end;
334
335 if (unlikely(!ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, ctx.adapter)))
336 goto txsync_end;
337
338 rc = ena_netmap_tx_frames(&ctx);
339 ena_netmap_tx_cleanup(&ctx);
340
341 txsync_end:
342 ENA_RING_MTX_UNLOCK(ctx.ring);
343 return (rc);
344 }
345
346 static int
ena_netmap_tx_frames(struct ena_netmap_ctx * ctx)347 ena_netmap_tx_frames(struct ena_netmap_ctx *ctx)
348 {
349 struct ena_ring *tx_ring = ctx->ring;
350 int rc = 0;
351
352 ctx->nm_i = ctx->kring->nr_hwcur;
353 ctx->nt = ctx->ring->next_to_use;
354
355 __builtin_prefetch(&ctx->slots[ctx->nm_i]);
356
357 while (ctx->nm_i != ctx->kring->rhead) {
358 if ((rc = ena_netmap_tx_frame(ctx)) != 0) {
359 /*
360 * When there is no empty space in Tx ring, error is
361 * still being returned. It should not be passed to the
362 * netmap, as application knows current ring state from
363 * netmap ring pointers. Returning error there could
364 * cause application to exit, but the Tx ring is
365 * commonly being full.
366 */
367 if (rc == ENA_COM_NO_MEM)
368 rc = 0;
369 break;
370 }
371 tx_ring->acum_pkts++;
372 }
373
374 /* If any packet was sent... */
375 if (likely(ctx->nm_i != ctx->kring->nr_hwcur)) {
376 /* ...send the doorbell to the device. */
377 ena_ring_tx_doorbell(tx_ring);
378
379 ctx->ring->next_to_use = ctx->nt;
380 ctx->kring->nr_hwcur = ctx->nm_i;
381 }
382
383 return (rc);
384 }
385
386 static int
ena_netmap_tx_frame(struct ena_netmap_ctx * ctx)387 ena_netmap_tx_frame(struct ena_netmap_ctx *ctx)
388 {
389 struct ena_com_tx_ctx ena_tx_ctx;
390 struct ena_adapter *adapter;
391 struct ena_ring *tx_ring;
392 struct ena_tx_buffer *tx_info;
393 uint16_t req_id;
394 uint16_t header_len;
395 uint16_t packet_len;
396 int nb_hw_desc;
397 int rc;
398 void *push_hdr;
399
400 adapter = ctx->adapter;
401 if (ena_netmap_count_slots(ctx) > adapter->max_tx_sgl_size) {
402 ena_log_nm(adapter->pdev, WARN, "Too many slots per packet\n");
403 return (EINVAL);
404 }
405
406 tx_ring = ctx->ring;
407
408 req_id = tx_ring->free_tx_ids[ctx->nt];
409 tx_info = &tx_ring->tx_buffer_info[req_id];
410 tx_info->num_of_bufs = 0;
411 tx_info->nm_info.sockets_used = 0;
412
413 rc = ena_netmap_tx_map_slots(ctx, tx_info, &push_hdr, &header_len,
414 &packet_len);
415 if (unlikely(rc != 0)) {
416 ena_log_nm(adapter->pdev, ERR, "Failed to map Tx slot\n");
417 return (rc);
418 }
419
420 bzero(&ena_tx_ctx, sizeof(struct ena_com_tx_ctx));
421 ena_tx_ctx.ena_bufs = tx_info->bufs;
422 ena_tx_ctx.push_header = push_hdr;
423 ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
424 ena_tx_ctx.req_id = req_id;
425 ena_tx_ctx.header_len = header_len;
426 ena_tx_ctx.meta_valid = adapter->disable_meta_caching;
427
428 /* There are no any offloads, as the netmap doesn't support them */
429
430 if (tx_ring->acum_pkts == ENA_DB_THRESHOLD ||
431 ena_com_is_doorbell_needed(ctx->io_sq, &ena_tx_ctx))
432 ena_ring_tx_doorbell(tx_ring);
433
434 rc = ena_com_prepare_tx(ctx->io_sq, &ena_tx_ctx, &nb_hw_desc);
435 if (unlikely(rc != 0)) {
436 if (likely(rc == ENA_COM_NO_MEM)) {
437 ena_log_nm(adapter->pdev, DBG,
438 "Tx ring[%d] is out of space\n", tx_ring->que->id);
439 } else {
440 ena_log_nm(adapter->pdev, ERR,
441 "Failed to prepare Tx bufs\n");
442 ena_trigger_reset(adapter,
443 ENA_REGS_RESET_DRIVER_INVALID_STATE);
444 }
445 counter_u64_add(tx_ring->tx_stats.prepare_ctx_err, 1);
446
447 ena_netmap_unmap_last_socket_chain(ctx, tx_info);
448 return (rc);
449 }
450
451 counter_enter();
452 counter_u64_add_protected(tx_ring->tx_stats.cnt, 1);
453 counter_u64_add_protected(tx_ring->tx_stats.bytes, packet_len);
454 counter_u64_add_protected(adapter->hw_stats.tx_packets, 1);
455 counter_u64_add_protected(adapter->hw_stats.tx_bytes, packet_len);
456 counter_exit();
457
458 tx_info->tx_descs = nb_hw_desc;
459
460 ctx->nt = ENA_TX_RING_IDX_NEXT(ctx->nt, ctx->ring->ring_size);
461
462 for (unsigned int i = 0; i < tx_info->num_of_bufs; i++)
463 bus_dmamap_sync(adapter->tx_buf_tag,
464 tx_info->nm_info.map_seg[i], BUS_DMASYNC_PREWRITE);
465
466 return (0);
467 }
468
469 static inline uint16_t
ena_netmap_count_slots(struct ena_netmap_ctx * ctx)470 ena_netmap_count_slots(struct ena_netmap_ctx *ctx)
471 {
472 uint16_t slots = 1;
473 uint16_t nm = ctx->nm_i;
474
475 while ((ctx->slots[nm].flags & NS_MOREFRAG) != 0) {
476 slots++;
477 nm = nm_next(nm, ctx->lim);
478 }
479
480 return slots;
481 }
482
483 static inline uint16_t
ena_netmap_packet_len(struct netmap_slot * slots,u_int slot_index,uint16_t limit)484 ena_netmap_packet_len(struct netmap_slot *slots, u_int slot_index,
485 uint16_t limit)
486 {
487 struct netmap_slot *nm_slot;
488 uint16_t packet_size = 0;
489
490 do {
491 nm_slot = &slots[slot_index];
492 packet_size += nm_slot->len;
493 slot_index = nm_next(slot_index, limit);
494 } while ((nm_slot->flags & NS_MOREFRAG) != 0);
495
496 return packet_size;
497 }
498
499 static int
ena_netmap_copy_data(struct netmap_adapter * na,struct netmap_slot * slots,u_int slot_index,uint16_t limit,uint16_t bytes_to_copy,void * destination)500 ena_netmap_copy_data(struct netmap_adapter *na, struct netmap_slot *slots,
501 u_int slot_index, uint16_t limit, uint16_t bytes_to_copy, void *destination)
502 {
503 struct netmap_slot *nm_slot;
504 void *slot_vaddr;
505 uint16_t data_amount;
506
507 do {
508 nm_slot = &slots[slot_index];
509 slot_vaddr = NMB(na, nm_slot);
510 if (unlikely(slot_vaddr == NULL))
511 return (EINVAL);
512
513 data_amount = min_t(uint16_t, bytes_to_copy, nm_slot->len);
514 memcpy(destination, slot_vaddr, data_amount);
515 bytes_to_copy -= data_amount;
516
517 slot_index = nm_next(slot_index, limit);
518 } while ((nm_slot->flags & NS_MOREFRAG) != 0 && bytes_to_copy > 0);
519
520 return (0);
521 }
522
523 static int
ena_netmap_map_single_slot(struct netmap_adapter * na,struct netmap_slot * slot,bus_dma_tag_t dmatag,bus_dmamap_t dmamap,void ** vaddr,uint64_t * paddr)524 ena_netmap_map_single_slot(struct netmap_adapter *na, struct netmap_slot *slot,
525 bus_dma_tag_t dmatag, bus_dmamap_t dmamap, void **vaddr, uint64_t *paddr)
526 {
527 device_t pdev;
528 int rc;
529
530 pdev = ((struct ena_adapter *)if_getsoftc(na->ifp))->pdev;
531
532 *vaddr = PNMB(na, slot, paddr);
533 if (unlikely(vaddr == NULL)) {
534 ena_log_nm(pdev, ERR, "Slot address is NULL\n");
535 return (EINVAL);
536 }
537
538 rc = netmap_load_map(na, dmatag, dmamap, *vaddr);
539 if (unlikely(rc != 0)) {
540 ena_log_nm(pdev, ERR, "Failed to map slot %d for DMA\n",
541 slot->buf_idx);
542 return (EINVAL);
543 }
544
545 return (0);
546 }
547
548 static int
ena_netmap_tx_map_slots(struct ena_netmap_ctx * ctx,struct ena_tx_buffer * tx_info,void ** push_hdr,uint16_t * header_len,uint16_t * packet_len)549 ena_netmap_tx_map_slots(struct ena_netmap_ctx *ctx,
550 struct ena_tx_buffer *tx_info, void **push_hdr, uint16_t *header_len,
551 uint16_t *packet_len)
552 {
553 struct netmap_slot *slot;
554 struct ena_com_buf *ena_buf;
555 struct ena_adapter *adapter;
556 struct ena_ring *tx_ring;
557 struct ena_netmap_tx_info *nm_info;
558 bus_dmamap_t *nm_maps;
559 void *vaddr;
560 uint64_t paddr;
561 uint32_t *nm_buf_idx;
562 uint32_t slot_head_len;
563 uint32_t frag_len;
564 uint32_t remaining_len;
565 uint16_t push_len;
566 uint16_t delta;
567 int rc;
568
569 adapter = ctx->adapter;
570 tx_ring = ctx->ring;
571 ena_buf = tx_info->bufs;
572 nm_info = &tx_info->nm_info;
573 nm_maps = nm_info->map_seg;
574 nm_buf_idx = nm_info->socket_buf_idx;
575 slot = &ctx->slots[ctx->nm_i];
576
577 slot_head_len = slot->len;
578 *packet_len = ena_netmap_packet_len(ctx->slots, ctx->nm_i, ctx->lim);
579 remaining_len = *packet_len;
580 delta = 0;
581
582 __builtin_prefetch(&ctx->slots[nm_next(ctx->nm_i, ctx->lim)]);
583 if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
584 /*
585 * When the device is in LLQ mode, the driver will copy
586 * the header into the device memory space.
587 * The ena_com layer assumes that the header is in a linear
588 * memory space.
589 * This assumption might be wrong since part of the header
590 * can be in the fragmented buffers.
591 * First, check if header fits in the first slot. If not, copy
592 * it to separate buffer that will be holding linearized data.
593 */
594 push_len = min_t(uint32_t, *packet_len,
595 tx_ring->tx_max_header_size);
596 *header_len = push_len;
597 /* If header is in linear space, just point to socket's data. */
598 if (likely(push_len <= slot_head_len)) {
599 *push_hdr = NMB(ctx->na, slot);
600 if (unlikely(push_hdr == NULL)) {
601 ena_log_nm(adapter->pdev, ERR,
602 "Slot vaddress is NULL\n");
603 return (EINVAL);
604 }
605 /*
606 * Otherwise, copy whole portion of header from multiple
607 * slots to intermediate buffer.
608 */
609 } else {
610 rc = ena_netmap_copy_data(ctx->na, ctx->slots,
611 ctx->nm_i, ctx->lim, push_len,
612 tx_ring->push_buf_intermediate_buf);
613 if (unlikely(rc)) {
614 ena_log_nm(adapter->pdev, ERR,
615 "Failed to copy data from slots to push_buf\n");
616 return (EINVAL);
617 }
618
619 *push_hdr = tx_ring->push_buf_intermediate_buf;
620 counter_u64_add(tx_ring->tx_stats.llq_buffer_copy, 1);
621
622 delta = push_len - slot_head_len;
623 }
624
625 ena_log_nm(adapter->pdev, DBG,
626 "slot: %d header_buf->vaddr: %p push_len: %d\n",
627 slot->buf_idx, *push_hdr, push_len);
628
629 /*
630 * If header was in linear memory space, map for the dma rest of
631 * the data in the first mbuf of the mbuf chain.
632 */
633 if (slot_head_len > push_len) {
634 rc = ena_netmap_map_single_slot(ctx->na, slot,
635 adapter->tx_buf_tag, *nm_maps, &vaddr, &paddr);
636 if (unlikely(rc != 0)) {
637 ena_log_nm(adapter->pdev, ERR,
638 "DMA mapping error\n");
639 return (rc);
640 }
641 nm_maps++;
642
643 ena_buf->paddr = paddr + push_len;
644 ena_buf->len = slot->len - push_len;
645 ena_buf++;
646
647 tx_info->num_of_bufs++;
648 }
649
650 remaining_len -= slot->len;
651
652 /* Save buf idx before advancing */
653 *nm_buf_idx = slot->buf_idx;
654 nm_buf_idx++;
655 slot->buf_idx = 0;
656
657 /* Advance to the next socket */
658 ctx->nm_i = nm_next(ctx->nm_i, ctx->lim);
659 slot = &ctx->slots[ctx->nm_i];
660 nm_info->sockets_used++;
661
662 /*
663 * If header is in non linear space (delta > 0), then skip mbufs
664 * containing header and map the last one containing both header
665 * and the packet data.
666 * The first segment is already counted in.
667 */
668 while (delta > 0) {
669 __builtin_prefetch(&ctx->slots[nm_next(ctx->nm_i, ctx->lim)]);
670 frag_len = slot->len;
671
672 /*
673 * If whole segment contains header just move to the
674 * next one and reduce delta.
675 */
676 if (unlikely(delta >= frag_len)) {
677 delta -= frag_len;
678 } else {
679 /*
680 * Map the data and then assign it with the
681 * offsets
682 */
683 rc = ena_netmap_map_single_slot(ctx->na, slot,
684 adapter->tx_buf_tag, *nm_maps, &vaddr,
685 &paddr);
686 if (unlikely(rc != 0)) {
687 ena_log_nm(adapter->pdev, ERR,
688 "DMA mapping error\n");
689 goto error_map;
690 }
691 nm_maps++;
692
693 ena_buf->paddr = paddr + delta;
694 ena_buf->len = slot->len - delta;
695 ena_buf++;
696
697 tx_info->num_of_bufs++;
698 delta = 0;
699 }
700
701 remaining_len -= slot->len;
702
703 /* Save buf idx before advancing */
704 *nm_buf_idx = slot->buf_idx;
705 nm_buf_idx++;
706 slot->buf_idx = 0;
707
708 /* Advance to the next socket */
709 ctx->nm_i = nm_next(ctx->nm_i, ctx->lim);
710 slot = &ctx->slots[ctx->nm_i];
711 nm_info->sockets_used++;
712 }
713 } else {
714 *push_hdr = NULL;
715 /*
716 * header_len is just a hint for the device. Because netmap is
717 * not giving us any information about packet header length and
718 * it is not guaranteed that all packet headers will be in the
719 * 1st slot, setting header_len to 0 is making the device ignore
720 * this value and resolve header on it's own.
721 */
722 *header_len = 0;
723 }
724
725 /* Map all remaining data (regular routine for non-LLQ mode) */
726 while (remaining_len > 0) {
727 __builtin_prefetch(&ctx->slots[nm_next(ctx->nm_i, ctx->lim)]);
728
729 rc = ena_netmap_map_single_slot(ctx->na, slot,
730 adapter->tx_buf_tag, *nm_maps, &vaddr, &paddr);
731 if (unlikely(rc != 0)) {
732 ena_log_nm(adapter->pdev, ERR, "DMA mapping error\n");
733 goto error_map;
734 }
735 nm_maps++;
736
737 ena_buf->paddr = paddr;
738 ena_buf->len = slot->len;
739 ena_buf++;
740
741 tx_info->num_of_bufs++;
742
743 remaining_len -= slot->len;
744
745 /* Save buf idx before advancing */
746 *nm_buf_idx = slot->buf_idx;
747 nm_buf_idx++;
748 slot->buf_idx = 0;
749
750 /* Advance to the next socket */
751 ctx->nm_i = nm_next(ctx->nm_i, ctx->lim);
752 slot = &ctx->slots[ctx->nm_i];
753 nm_info->sockets_used++;
754 }
755
756 return (0);
757
758 error_map:
759 ena_netmap_unmap_last_socket_chain(ctx, tx_info);
760
761 return (rc);
762 }
763
764 static void
ena_netmap_unmap_last_socket_chain(struct ena_netmap_ctx * ctx,struct ena_tx_buffer * tx_info)765 ena_netmap_unmap_last_socket_chain(struct ena_netmap_ctx *ctx,
766 struct ena_tx_buffer *tx_info)
767 {
768 struct ena_netmap_tx_info *nm_info;
769 int n;
770
771 nm_info = &tx_info->nm_info;
772
773 /**
774 * As the used sockets must not be equal to the buffers used in the LLQ
775 * mode, they must be treated separately.
776 * First, unmap the DMA maps.
777 */
778 n = tx_info->num_of_bufs;
779 while (n--) {
780 netmap_unload_map(ctx->na, ctx->adapter->tx_buf_tag,
781 nm_info->map_seg[n]);
782 }
783 tx_info->num_of_bufs = 0;
784
785 /* Next, retain the sockets back to the userspace */
786 n = nm_info->sockets_used;
787 while (n--) {
788 ctx->nm_i = nm_prev(ctx->nm_i, ctx->lim);
789 ctx->slots[ctx->nm_i].buf_idx = nm_info->socket_buf_idx[n];
790 ctx->slots[ctx->nm_i].flags = NS_BUF_CHANGED;
791 nm_info->socket_buf_idx[n] = 0;
792 }
793 nm_info->sockets_used = 0;
794 }
795
796 static void
ena_netmap_tx_cleanup(struct ena_netmap_ctx * ctx)797 ena_netmap_tx_cleanup(struct ena_netmap_ctx *ctx)
798 {
799 struct ena_ring *tx_ring = ctx->ring;
800 int rc;
801 uint16_t req_id;
802 uint16_t total_tx_descs = 0;
803
804 ctx->nm_i = ctx->kring->nr_hwtail;
805 ctx->nt = tx_ring->next_to_clean;
806
807 /* Reclaim buffers for completed transmissions */
808 do {
809 rc = ena_com_tx_comp_req_id_get(ctx->io_cq, &req_id);
810 if(unlikely(rc == ENA_COM_TRY_AGAIN))
811 break;
812
813 rc = validate_tx_req_id(tx_ring, req_id, rc);
814 if(unlikely(rc != 0))
815 break;
816
817 total_tx_descs += ena_netmap_tx_clean_one(ctx, req_id);
818 } while (1);
819
820 ctx->kring->nr_hwtail = ctx->nm_i;
821
822 if (total_tx_descs > 0) {
823 /* acknowledge completion of sent packets */
824 tx_ring->next_to_clean = ctx->nt;
825 ena_com_comp_ack(tx_ring->ena_com_io_sq, total_tx_descs);
826 }
827 }
828
829 static uint16_t
ena_netmap_tx_clean_one(struct ena_netmap_ctx * ctx,uint16_t req_id)830 ena_netmap_tx_clean_one(struct ena_netmap_ctx *ctx, uint16_t req_id)
831 {
832 struct ena_tx_buffer *tx_info;
833 struct ena_netmap_tx_info *nm_info;
834 int n;
835
836 tx_info = &ctx->ring->tx_buffer_info[req_id];
837 nm_info = &tx_info->nm_info;
838
839 /**
840 * As the used sockets must not be equal to the buffers used in the LLQ
841 * mode, they must be treated separately.
842 * First, unmap the DMA maps.
843 */
844 n = tx_info->num_of_bufs;
845 for (n = 0; n < tx_info->num_of_bufs; n++) {
846 netmap_unload_map(ctx->na, ctx->adapter->tx_buf_tag,
847 nm_info->map_seg[n]);
848 }
849 tx_info->num_of_bufs = 0;
850
851 /* Next, retain the sockets back to the userspace */
852 for (n = 0; n < nm_info->sockets_used; n++) {
853 ctx->nm_i = nm_next(ctx->nm_i, ctx->lim);
854 ENA_WARN(ctx->slots[ctx->nm_i].buf_idx != 0,
855 ctx->adapter->ena_dev, "Tx idx is not 0.\n");
856 ctx->slots[ctx->nm_i].buf_idx = nm_info->socket_buf_idx[n];
857 ctx->slots[ctx->nm_i].flags = NS_BUF_CHANGED;
858 nm_info->socket_buf_idx[n] = 0;
859 }
860 nm_info->sockets_used = 0;
861
862 ctx->ring->free_tx_ids[ctx->nt] = req_id;
863 ctx->nt = ENA_TX_RING_IDX_NEXT(ctx->nt, ctx->lim);
864
865 return tx_info->tx_descs;
866 }
867
868 static int
ena_netmap_rxsync(struct netmap_kring * kring,int flags)869 ena_netmap_rxsync(struct netmap_kring *kring, int flags)
870 {
871 struct ena_netmap_ctx ctx;
872 int rc;
873
874 ena_netmap_fill_ctx(kring, &ctx, ENA_IO_RXQ_IDX(kring->ring_id));
875 ctx.ring = &ctx.adapter->rx_ring[kring->ring_id];
876
877 if (ctx.kring->rhead > ctx.lim) {
878 /* Probably not needed to release slots from RX ring. */
879 return (netmap_ring_reinit(ctx.kring));
880 }
881
882 if (unlikely((if_getdrvflags(ctx.na->ifp) & IFF_DRV_RUNNING) == 0))
883 return (0);
884
885 if (unlikely(!ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, ctx.adapter)))
886 return (0);
887
888 if ((rc = ena_netmap_rx_frames(&ctx)) != 0)
889 return (rc);
890
891 ena_netmap_rx_cleanup(&ctx);
892
893 return (0);
894 }
895
896 static inline int
ena_netmap_rx_frames(struct ena_netmap_ctx * ctx)897 ena_netmap_rx_frames(struct ena_netmap_ctx *ctx)
898 {
899 int rc = 0;
900 int frames_counter = 0;
901
902 ctx->nt = ctx->ring->next_to_clean;
903 ctx->nm_i = ctx->kring->nr_hwtail;
904
905 while ((rc = ena_netmap_rx_frame(ctx)) == ENA_NETMAP_MORE_FRAMES) {
906 frames_counter++;
907 /* In case of multiple frames, it is not an error. */
908 rc = 0;
909 if (frames_counter > ENA_MAX_FRAMES) {
910 ena_log_nm(ctx->adapter->pdev, ERR,
911 "Driver is stuck in the Rx loop\n");
912 break;
913 }
914 };
915
916 ctx->kring->nr_hwtail = ctx->nm_i;
917 ctx->kring->nr_kflags &= ~NKR_PENDINTR;
918 ctx->ring->next_to_clean = ctx->nt;
919
920 return (rc);
921 }
922
923 static inline int
ena_netmap_rx_frame(struct ena_netmap_ctx * ctx)924 ena_netmap_rx_frame(struct ena_netmap_ctx *ctx)
925 {
926 struct ena_com_rx_ctx ena_rx_ctx;
927 enum ena_regs_reset_reason_types reset_reason;
928 int rc, len = 0;
929 uint16_t buf, nm;
930
931 ena_rx_ctx.ena_bufs = ctx->ring->ena_bufs;
932 ena_rx_ctx.max_bufs = ctx->adapter->max_rx_sgl_size;
933 bus_dmamap_sync(ctx->io_cq->cdesc_addr.mem_handle.tag,
934 ctx->io_cq->cdesc_addr.mem_handle.map, BUS_DMASYNC_POSTREAD);
935
936 rc = ena_com_rx_pkt(ctx->io_cq, ctx->io_sq, &ena_rx_ctx);
937 if (unlikely(rc != 0)) {
938 ena_log_nm(ctx->adapter->pdev, ERR,
939 "Failed to read pkt from the device with error: %d\n", rc);
940 if (rc == ENA_COM_NO_SPACE) {
941 counter_u64_add(ctx->ring->rx_stats.bad_desc_num, 1);
942 reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS;
943 } else if (rc == ENA_COM_FAULT) {
944 reset_reason = ENA_REGS_RESET_RX_DESCRIPTOR_MALFORMED;
945 } else {
946 counter_u64_add(ctx->ring->rx_stats.bad_req_id, 1);
947 reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
948 }
949 ena_trigger_reset(ctx->adapter, reset_reason);
950 return (rc);
951 }
952 if (unlikely(ena_rx_ctx.descs == 0))
953 return (ENA_NETMAP_NO_MORE_FRAMES);
954
955 ena_log_nm(ctx->adapter->pdev, DBG,
956 "Rx: q %d got packet from ena. descs #:"
957 " %d l3 proto %d l4 proto %d hash: %x\n",
958 ctx->ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto,
959 ena_rx_ctx.l4_proto, ena_rx_ctx.hash);
960
961 for (buf = 0; buf < ena_rx_ctx.descs; buf++)
962 if ((rc = ena_netmap_rx_load_desc(ctx, buf, &len)) != 0)
963 break;
964 /*
965 * ena_netmap_rx_load_desc doesn't know the number of descriptors.
966 * It just set flag NS_MOREFRAG to all slots, then here flag of
967 * last slot is cleared.
968 */
969 ctx->slots[nm_prev(ctx->nm_i, ctx->lim)].flags &= ~NS_MOREFRAG;
970
971 if (rc != 0) {
972 goto rx_clear_desc;
973 }
974
975 bus_dmamap_sync(ctx->io_cq->cdesc_addr.mem_handle.tag,
976 ctx->io_cq->cdesc_addr.mem_handle.map, BUS_DMASYNC_PREREAD);
977
978 counter_enter();
979 counter_u64_add_protected(ctx->ring->rx_stats.bytes, len);
980 counter_u64_add_protected(ctx->adapter->hw_stats.rx_bytes, len);
981 counter_u64_add_protected(ctx->ring->rx_stats.cnt, 1);
982 counter_u64_add_protected(ctx->adapter->hw_stats.rx_packets, 1);
983 counter_exit();
984
985 return (ENA_NETMAP_MORE_FRAMES);
986
987 rx_clear_desc:
988 nm = ctx->nm_i;
989
990 /* Remove failed packet from ring */
991 while (buf--) {
992 ctx->slots[nm].flags = 0;
993 ctx->slots[nm].len = 0;
994 nm = nm_prev(nm, ctx->lim);
995 }
996
997 return (rc);
998 }
999
1000 static inline int
ena_netmap_rx_load_desc(struct ena_netmap_ctx * ctx,uint16_t buf,int * len)1001 ena_netmap_rx_load_desc(struct ena_netmap_ctx *ctx, uint16_t buf, int *len)
1002 {
1003 struct ena_rx_buffer *rx_info;
1004 uint16_t req_id;
1005
1006 req_id = ctx->ring->ena_bufs[buf].req_id;
1007 rx_info = &ctx->ring->rx_buffer_info[req_id];
1008 bus_dmamap_sync(ctx->adapter->rx_buf_tag, rx_info->map,
1009 BUS_DMASYNC_POSTREAD);
1010 netmap_unload_map(ctx->na, ctx->adapter->rx_buf_tag, rx_info->map);
1011
1012 ENA_WARN(ctx->slots[ctx->nm_i].buf_idx != 0, ctx->adapter->ena_dev,
1013 "Rx idx is not 0.\n");
1014
1015 ctx->slots[ctx->nm_i].buf_idx = rx_info->netmap_buf_idx;
1016 rx_info->netmap_buf_idx = 0;
1017 /*
1018 * Set NS_MOREFRAG to all slots.
1019 * Then ena_netmap_rx_frame clears it from last one.
1020 */
1021 ctx->slots[ctx->nm_i].flags |= NS_MOREFRAG | NS_BUF_CHANGED;
1022 ctx->slots[ctx->nm_i].len = ctx->ring->ena_bufs[buf].len;
1023 *len += ctx->slots[ctx->nm_i].len;
1024 ctx->ring->free_rx_ids[ctx->nt] = req_id;
1025 ena_log_nm(ctx->adapter->pdev, DBG,
1026 "rx_info %p, buf_idx %d, paddr %jx, nm: %d\n", rx_info,
1027 ctx->slots[ctx->nm_i].buf_idx, (uintmax_t)rx_info->ena_buf.paddr,
1028 ctx->nm_i);
1029
1030 ctx->nm_i = nm_next(ctx->nm_i, ctx->lim);
1031 ctx->nt = ENA_RX_RING_IDX_NEXT(ctx->nt, ctx->ring->ring_size);
1032
1033 return (0);
1034 }
1035
1036 static inline void
ena_netmap_rx_cleanup(struct ena_netmap_ctx * ctx)1037 ena_netmap_rx_cleanup(struct ena_netmap_ctx *ctx)
1038 {
1039 int refill_required;
1040
1041 refill_required = ctx->kring->rhead - ctx->kring->nr_hwcur;
1042 if (ctx->kring->nr_hwcur != ctx->kring->nr_hwtail)
1043 refill_required -= 1;
1044
1045 if (refill_required == 0)
1046 return;
1047 else if (refill_required < 0)
1048 refill_required += ctx->kring->nkr_num_slots;
1049
1050 ena_refill_rx_bufs(ctx->ring, refill_required);
1051 }
1052
1053 static inline void
ena_netmap_fill_ctx(struct netmap_kring * kring,struct ena_netmap_ctx * ctx,uint16_t ena_qid)1054 ena_netmap_fill_ctx(struct netmap_kring *kring, struct ena_netmap_ctx *ctx,
1055 uint16_t ena_qid)
1056 {
1057 ctx->kring = kring;
1058 ctx->na = kring->na;
1059 ctx->adapter = if_getsoftc(ctx->na->ifp);
1060 ctx->lim = kring->nkr_num_slots - 1;
1061 ctx->io_cq = &ctx->adapter->ena_dev->io_cq_queues[ena_qid];
1062 ctx->io_sq = &ctx->adapter->ena_dev->io_sq_queues[ena_qid];
1063 ctx->slots = kring->ring->slot;
1064 }
1065
1066 void
ena_netmap_unload(struct ena_adapter * adapter,bus_dmamap_t map)1067 ena_netmap_unload(struct ena_adapter *adapter, bus_dmamap_t map)
1068 {
1069 struct netmap_adapter *na = NA(adapter->ifp);
1070
1071 netmap_unload_map(na, adapter->tx_buf_tag, map);
1072 }
1073
1074 #endif /* DEV_NETMAP */
1075