Lines Matching +full:disable +full:- +full:eop

4  * Copyright (C) 2008-2022, VMware, Inc. All Rights Reserved.
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
23 * Maintained by: pv-drivers@vmware.com
53 * Enable/Disable the given intr
70 * Enable/Disable all intrs used by the device
77 for (i = 0; i < adapter->intr.num_intrs; i++) in vmxnet3_enable_all_intrs()
80 !adapter->queuesExtEnabled) { in vmxnet3_enable_all_intrs()
81 adapter->shared->devRead.intrConf.intrCtrl &= in vmxnet3_enable_all_intrs()
84 adapter->shared->devReadExt.intrConfExt.intrCtrl &= in vmxnet3_enable_all_intrs()
96 !adapter->queuesExtEnabled) { in vmxnet3_disable_all_intrs()
97 adapter->shared->devRead.intrConf.intrCtrl |= in vmxnet3_disable_all_intrs()
100 adapter->shared->devReadExt.intrConfExt.intrCtrl |= in vmxnet3_disable_all_intrs()
103 for (i = 0; i < adapter->intr.num_intrs; i++) in vmxnet3_disable_all_intrs()
118 return tq->stopped; in vmxnet3_tq_stopped()
125 tq->stopped = false; in vmxnet3_tq_start()
126 netif_start_subqueue(adapter->netdev, tq - adapter->tx_queue); in vmxnet3_tq_start()
133 tq->stopped = false; in vmxnet3_tq_wake()
134 netif_wake_subqueue(adapter->netdev, (tq - adapter->tx_queue)); in vmxnet3_tq_wake()
141 tq->stopped = true; in vmxnet3_tq_stop()
142 tq->num_stop++; in vmxnet3_tq_stop()
143 netif_stop_subqueue(adapter->netdev, (tq - adapter->tx_queue)); in vmxnet3_tq_stop()
171 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_check_link()
174 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_check_link()
176 adapter->link_speed = ret >> 16; in vmxnet3_check_link()
178 netdev_info(adapter->netdev, "NIC Link is Up %d Mbps\n", in vmxnet3_check_link()
179 adapter->link_speed); in vmxnet3_check_link()
180 netif_carrier_on(adapter->netdev); in vmxnet3_check_link()
183 for (i = 0; i < adapter->num_tx_queues; i++) in vmxnet3_check_link()
184 vmxnet3_tq_start(&adapter->tx_queue[i], in vmxnet3_check_link()
188 netdev_info(adapter->netdev, "NIC Link is Down\n"); in vmxnet3_check_link()
189 netif_carrier_off(adapter->netdev); in vmxnet3_check_link()
192 for (i = 0; i < adapter->num_tx_queues; i++) in vmxnet3_check_link()
193 vmxnet3_tq_stop(&adapter->tx_queue[i], adapter); in vmxnet3_check_link()
203 u32 events = le32_to_cpu(adapter->shared->ecr); in vmxnet3_process_events()
215 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_process_events()
218 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_process_events()
220 for (i = 0; i < adapter->num_tx_queues; i++) in vmxnet3_process_events()
221 if (adapter->tqd_start[i].status.stopped) in vmxnet3_process_events()
222 dev_err(&adapter->netdev->dev, in vmxnet3_process_events()
224 adapter->netdev->name, i, le32_to_cpu( in vmxnet3_process_events()
225 adapter->tqd_start[i].status.error)); in vmxnet3_process_events()
226 for (i = 0; i < adapter->num_rx_queues; i++) in vmxnet3_process_events()
227 if (adapter->rqd_start[i].status.stopped) in vmxnet3_process_events()
228 dev_err(&adapter->netdev->dev, in vmxnet3_process_events()
230 adapter->netdev->name, i, in vmxnet3_process_events()
231 adapter->rqd_start[i].status.error); in vmxnet3_process_events()
233 schedule_work(&adapter->work); in vmxnet3_process_events()
255 dstDesc->addr = le64_to_cpu(srcDesc->addr); in vmxnet3_RxDescToCPU()
257 dstDesc->ext1 = le32_to_cpu(srcDesc->ext1); in vmxnet3_RxDescToCPU()
268 for (i = 2; i > 0; i--) { in vmxnet3_TxDescToLe()
269 src--; in vmxnet3_TxDescToLe()
270 dst--; in vmxnet3_TxDescToLe()
294 u32 mask = ((1 << size) - 1) << pos; in get_bitfield32()
328 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
329 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
330 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
331 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
342 u32 map_type = tbi->map_type; in vmxnet3_unmap_tx_buf()
345 dma_unmap_single(&pdev->dev, tbi->dma_addr, tbi->len, in vmxnet3_unmap_tx_buf()
348 dma_unmap_page(&pdev->dev, tbi->dma_addr, tbi->len, in vmxnet3_unmap_tx_buf()
353 tbi->map_type = VMXNET3_MAP_NONE; /* to help debugging */ in vmxnet3_unmap_tx_buf()
367 BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp); in vmxnet3_unmap_pkt()
368 BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq->tx_ring.base[eop_idx].txd)) != 1); in vmxnet3_unmap_pkt()
370 tbi = &tq->buf_info[eop_idx]; in vmxnet3_unmap_pkt()
371 BUG_ON(!tbi->skb); in vmxnet3_unmap_pkt()
372 map_type = tbi->map_type; in vmxnet3_unmap_pkt()
373 VMXNET3_INC_RING_IDX_ONLY(eop_idx, tq->tx_ring.size); in vmxnet3_unmap_pkt()
375 while (tq->tx_ring.next2comp != eop_idx) { in vmxnet3_unmap_pkt()
376 vmxnet3_unmap_tx_buf(tq->buf_info + tq->tx_ring.next2comp, in vmxnet3_unmap_pkt()
381 * that the tx routine incorrectly re-queues a pkt due to in vmxnet3_unmap_pkt()
384 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring); in vmxnet3_unmap_pkt()
389 xdp_return_frame_bulk(tbi->xdpf, bq); in vmxnet3_unmap_pkt()
391 dev_kfree_skb_any(tbi->skb); in vmxnet3_unmap_pkt()
394 tbi->skb = NULL; in vmxnet3_unmap_pkt()
411 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc; in vmxnet3_tq_tx_complete()
412 while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) { in vmxnet3_tq_tx_complete()
413 /* Prevent any &gdesc->tcd field from being (speculatively) in vmxnet3_tq_tx_complete()
414 * read before (&gdesc->tcd)->gen is read. in vmxnet3_tq_tx_complete()
419 &gdesc->tcd), tq, adapter->pdev, in vmxnet3_tq_tx_complete()
422 vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring); in vmxnet3_tq_tx_complete()
423 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc; in vmxnet3_tq_tx_complete()
429 spin_lock(&tq->tx_lock); in vmxnet3_tq_tx_complete()
431 vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) > in vmxnet3_tq_tx_complete()
433 netif_carrier_ok(adapter->netdev))) { in vmxnet3_tq_tx_complete()
436 spin_unlock(&tq->tx_lock); in vmxnet3_tq_tx_complete()
453 while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) { in vmxnet3_tq_cleanup()
456 tbi = tq->buf_info + tq->tx_ring.next2comp; in vmxnet3_tq_cleanup()
457 map_type = tbi->map_type; in vmxnet3_tq_cleanup()
459 vmxnet3_unmap_tx_buf(tbi, adapter->pdev); in vmxnet3_tq_cleanup()
460 if (tbi->skb) { in vmxnet3_tq_cleanup()
462 xdp_return_frame_bulk(tbi->xdpf, &bq); in vmxnet3_tq_cleanup()
464 dev_kfree_skb_any(tbi->skb); in vmxnet3_tq_cleanup()
465 tbi->skb = NULL; in vmxnet3_tq_cleanup()
467 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring); in vmxnet3_tq_cleanup()
474 for (i = 0; i < tq->tx_ring.size; i++) in vmxnet3_tq_cleanup()
475 BUG_ON(tq->buf_info[i].map_type != VMXNET3_MAP_NONE); in vmxnet3_tq_cleanup()
477 tq->tx_ring.gen = VMXNET3_INIT_GEN; in vmxnet3_tq_cleanup()
478 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0; in vmxnet3_tq_cleanup()
480 tq->comp_ring.gen = VMXNET3_INIT_GEN; in vmxnet3_tq_cleanup()
481 tq->comp_ring.next2proc = 0; in vmxnet3_tq_cleanup()
489 if (tq->tx_ring.base) { in vmxnet3_tq_destroy()
490 dma_free_coherent(&adapter->pdev->dev, tq->tx_ring.size * in vmxnet3_tq_destroy()
492 tq->tx_ring.base, tq->tx_ring.basePA); in vmxnet3_tq_destroy()
493 tq->tx_ring.base = NULL; in vmxnet3_tq_destroy()
495 if (tq->data_ring.base) { in vmxnet3_tq_destroy()
496 dma_free_coherent(&adapter->pdev->dev, in vmxnet3_tq_destroy()
497 tq->data_ring.size * tq->txdata_desc_size, in vmxnet3_tq_destroy()
498 tq->data_ring.base, tq->data_ring.basePA); in vmxnet3_tq_destroy()
499 tq->data_ring.base = NULL; in vmxnet3_tq_destroy()
501 if (tq->comp_ring.base) { in vmxnet3_tq_destroy()
502 dma_free_coherent(&adapter->pdev->dev, tq->comp_ring.size * in vmxnet3_tq_destroy()
504 tq->comp_ring.base, tq->comp_ring.basePA); in vmxnet3_tq_destroy()
505 tq->comp_ring.base = NULL; in vmxnet3_tq_destroy()
507 kfree(tq->buf_info); in vmxnet3_tq_destroy()
508 tq->buf_info = NULL; in vmxnet3_tq_destroy()
518 for (i = 0; i < adapter->num_tx_queues; i++) in vmxnet3_tq_destroy_all()
519 vmxnet3_tq_destroy(&adapter->tx_queue[i], adapter); in vmxnet3_tq_destroy_all()
530 memset(tq->tx_ring.base, 0, tq->tx_ring.size * in vmxnet3_tq_init()
532 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0; in vmxnet3_tq_init()
533 tq->tx_ring.gen = VMXNET3_INIT_GEN; in vmxnet3_tq_init()
535 memset(tq->data_ring.base, 0, in vmxnet3_tq_init()
536 tq->data_ring.size * tq->txdata_desc_size); in vmxnet3_tq_init()
539 memset(tq->comp_ring.base, 0, tq->comp_ring.size * in vmxnet3_tq_init()
541 tq->comp_ring.next2proc = 0; in vmxnet3_tq_init()
542 tq->comp_ring.gen = VMXNET3_INIT_GEN; in vmxnet3_tq_init()
545 memset(tq->buf_info, 0, sizeof(tq->buf_info[0]) * tq->tx_ring.size); in vmxnet3_tq_init()
546 for (i = 0; i < tq->tx_ring.size; i++) in vmxnet3_tq_init()
547 tq->buf_info[i].map_type = VMXNET3_MAP_NONE; in vmxnet3_tq_init()
557 BUG_ON(tq->tx_ring.base || tq->data_ring.base || in vmxnet3_tq_create()
558 tq->comp_ring.base || tq->buf_info); in vmxnet3_tq_create()
560 tq->tx_ring.base = dma_alloc_coherent(&adapter->pdev->dev, in vmxnet3_tq_create()
561 tq->tx_ring.size * sizeof(struct Vmxnet3_TxDesc), in vmxnet3_tq_create()
562 &tq->tx_ring.basePA, GFP_KERNEL); in vmxnet3_tq_create()
563 if (!tq->tx_ring.base) { in vmxnet3_tq_create()
564 netdev_err(adapter->netdev, "failed to allocate tx ring\n"); in vmxnet3_tq_create()
568 tq->data_ring.base = dma_alloc_coherent(&adapter->pdev->dev, in vmxnet3_tq_create()
569 tq->data_ring.size * tq->txdata_desc_size, in vmxnet3_tq_create()
570 &tq->data_ring.basePA, GFP_KERNEL); in vmxnet3_tq_create()
571 if (!tq->data_ring.base) { in vmxnet3_tq_create()
572 netdev_err(adapter->netdev, "failed to allocate tx data ring\n"); in vmxnet3_tq_create()
576 tq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, in vmxnet3_tq_create()
577 tq->comp_ring.size * sizeof(struct Vmxnet3_TxCompDesc), in vmxnet3_tq_create()
578 &tq->comp_ring.basePA, GFP_KERNEL); in vmxnet3_tq_create()
579 if (!tq->comp_ring.base) { in vmxnet3_tq_create()
580 netdev_err(adapter->netdev, "failed to allocate tx comp ring\n"); in vmxnet3_tq_create()
584 tq->buf_info = kcalloc_node(tq->tx_ring.size, sizeof(tq->buf_info[0]), in vmxnet3_tq_create()
586 dev_to_node(&adapter->pdev->dev)); in vmxnet3_tq_create()
587 if (!tq->buf_info) in vmxnet3_tq_create()
594 return -ENOMEM; in vmxnet3_tq_create()
602 for (i = 0; i < adapter->num_tx_queues; i++) in vmxnet3_tq_cleanup_all()
603 vmxnet3_tq_cleanup(&adapter->tx_queue[i], adapter); in vmxnet3_tq_cleanup_all()
607 * starting from ring->next2fill, allocate rx buffers for the given ring
617 struct vmxnet3_rx_buf_info *rbi_base = rq->buf_info[ring_idx]; in vmxnet3_rq_alloc_rx_buf()
618 struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx]; in vmxnet3_rq_alloc_rx_buf()
625 rbi = rbi_base + ring->next2fill; in vmxnet3_rq_alloc_rx_buf()
626 gd = ring->base + ring->next2fill; in vmxnet3_rq_alloc_rx_buf()
627 rbi->comp_state = VMXNET3_RXD_COMP_PENDING; in vmxnet3_rq_alloc_rx_buf()
629 if (rbi->buf_type == VMXNET3_RX_BUF_XDP) { in vmxnet3_rq_alloc_rx_buf()
630 void *data = vmxnet3_pp_get_buff(rq->page_pool, in vmxnet3_rq_alloc_rx_buf()
631 &rbi->dma_addr, in vmxnet3_rq_alloc_rx_buf()
634 rq->stats.rx_buf_alloc_failure++; in vmxnet3_rq_alloc_rx_buf()
637 rbi->page = virt_to_page(data); in vmxnet3_rq_alloc_rx_buf()
639 } else if (rbi->buf_type == VMXNET3_RX_BUF_SKB) { in vmxnet3_rq_alloc_rx_buf()
640 if (rbi->skb == NULL) { in vmxnet3_rq_alloc_rx_buf()
641 rbi->skb = __netdev_alloc_skb_ip_align(adapter->netdev, in vmxnet3_rq_alloc_rx_buf()
642 rbi->len, in vmxnet3_rq_alloc_rx_buf()
644 if (unlikely(rbi->skb == NULL)) { in vmxnet3_rq_alloc_rx_buf()
645 rq->stats.rx_buf_alloc_failure++; in vmxnet3_rq_alloc_rx_buf()
649 rbi->dma_addr = dma_map_single( in vmxnet3_rq_alloc_rx_buf()
650 &adapter->pdev->dev, in vmxnet3_rq_alloc_rx_buf()
651 rbi->skb->data, rbi->len, in vmxnet3_rq_alloc_rx_buf()
653 if (dma_mapping_error(&adapter->pdev->dev, in vmxnet3_rq_alloc_rx_buf()
654 rbi->dma_addr)) { in vmxnet3_rq_alloc_rx_buf()
655 dev_kfree_skb_any(rbi->skb); in vmxnet3_rq_alloc_rx_buf()
656 rbi->skb = NULL; in vmxnet3_rq_alloc_rx_buf()
657 rq->stats.rx_buf_alloc_failure++; in vmxnet3_rq_alloc_rx_buf()
665 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE || in vmxnet3_rq_alloc_rx_buf()
666 rbi->len != PAGE_SIZE); in vmxnet3_rq_alloc_rx_buf()
668 if (rbi->page == NULL) { in vmxnet3_rq_alloc_rx_buf()
669 rbi->page = alloc_page(GFP_ATOMIC); in vmxnet3_rq_alloc_rx_buf()
670 if (unlikely(rbi->page == NULL)) { in vmxnet3_rq_alloc_rx_buf()
671 rq->stats.rx_buf_alloc_failure++; in vmxnet3_rq_alloc_rx_buf()
674 rbi->dma_addr = dma_map_page( in vmxnet3_rq_alloc_rx_buf()
675 &adapter->pdev->dev, in vmxnet3_rq_alloc_rx_buf()
676 rbi->page, 0, PAGE_SIZE, in vmxnet3_rq_alloc_rx_buf()
678 if (dma_mapping_error(&adapter->pdev->dev, in vmxnet3_rq_alloc_rx_buf()
679 rbi->dma_addr)) { in vmxnet3_rq_alloc_rx_buf()
680 put_page(rbi->page); in vmxnet3_rq_alloc_rx_buf()
681 rbi->page = NULL; in vmxnet3_rq_alloc_rx_buf()
682 rq->stats.rx_buf_alloc_failure++; in vmxnet3_rq_alloc_rx_buf()
691 gd->rxd.addr = cpu_to_le64(rbi->dma_addr); in vmxnet3_rq_alloc_rx_buf()
692 gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT) in vmxnet3_rq_alloc_rx_buf()
693 | val | rbi->len); in vmxnet3_rq_alloc_rx_buf()
698 rbi->comp_state = VMXNET3_RXD_COMP_DONE; in vmxnet3_rq_alloc_rx_buf()
702 gd->dword[2] |= cpu_to_le32(ring->gen << VMXNET3_RXD_GEN_SHIFT); in vmxnet3_rq_alloc_rx_buf()
707 netdev_dbg(adapter->netdev, in vmxnet3_rq_alloc_rx_buf()
709 num_allocated, ring->next2fill, ring->next2comp); in vmxnet3_rq_alloc_rx_buf()
712 BUG_ON(num_allocated != 0 && ring->next2fill == ring->next2comp); in vmxnet3_rq_alloc_rx_buf()
722 skb_frag_t *frag = skb_shinfo(skb)->frags + skb_shinfo(skb)->nr_frags; in vmxnet3_append_frag()
724 BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS); in vmxnet3_append_frag()
726 skb_frag_fill_page_desc(frag, rbi->page, 0, rcd->len); in vmxnet3_append_frag()
727 skb->data_len += rcd->len; in vmxnet3_append_frag()
728 skb->truesize += PAGE_SIZE; in vmxnet3_append_frag()
729 skb_shinfo(skb)->nr_frags++; in vmxnet3_append_frag()
744 BUG_ON(ctx->copy_size > skb_headlen(skb)); in vmxnet3_map_pkt()
747 dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT; in vmxnet3_map_pkt()
749 ctx->sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill; in vmxnet3_map_pkt()
750 gdesc = ctx->sop_txd; /* both loops below can be skipped */ in vmxnet3_map_pkt()
753 if (ctx->copy_size) { in vmxnet3_map_pkt()
754 ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA + in vmxnet3_map_pkt()
755 tq->tx_ring.next2fill * in vmxnet3_map_pkt()
756 tq->txdata_desc_size); in vmxnet3_map_pkt()
757 ctx->sop_txd->dword[2] = cpu_to_le32(dw2 | ctx->copy_size); in vmxnet3_map_pkt()
758 ctx->sop_txd->dword[3] = 0; in vmxnet3_map_pkt()
760 tbi = tq->buf_info + tq->tx_ring.next2fill; in vmxnet3_map_pkt()
761 tbi->map_type = VMXNET3_MAP_NONE; in vmxnet3_map_pkt()
763 netdev_dbg(adapter->netdev, in vmxnet3_map_pkt()
765 tq->tx_ring.next2fill, in vmxnet3_map_pkt()
766 le64_to_cpu(ctx->sop_txd->txd.addr), in vmxnet3_map_pkt()
767 ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]); in vmxnet3_map_pkt()
768 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); in vmxnet3_map_pkt()
770 /* use the right gen for non-SOP desc */ in vmxnet3_map_pkt()
771 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; in vmxnet3_map_pkt()
775 len = skb_headlen(skb) - ctx->copy_size; in vmxnet3_map_pkt()
776 buf_offset = ctx->copy_size; in vmxnet3_map_pkt()
788 tbi = tq->buf_info + tq->tx_ring.next2fill; in vmxnet3_map_pkt()
789 tbi->map_type = VMXNET3_MAP_SINGLE; in vmxnet3_map_pkt()
790 tbi->dma_addr = dma_map_single(&adapter->pdev->dev, in vmxnet3_map_pkt()
791 skb->data + buf_offset, buf_size, in vmxnet3_map_pkt()
793 if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr)) in vmxnet3_map_pkt()
794 return -EFAULT; in vmxnet3_map_pkt()
796 tbi->len = buf_size; in vmxnet3_map_pkt()
798 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill; in vmxnet3_map_pkt()
799 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen); in vmxnet3_map_pkt()
801 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr); in vmxnet3_map_pkt()
802 gdesc->dword[2] = cpu_to_le32(dw2); in vmxnet3_map_pkt()
803 gdesc->dword[3] = 0; in vmxnet3_map_pkt()
805 netdev_dbg(adapter->netdev, in vmxnet3_map_pkt()
807 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr), in vmxnet3_map_pkt()
808 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]); in vmxnet3_map_pkt()
809 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); in vmxnet3_map_pkt()
810 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; in vmxnet3_map_pkt()
812 len -= buf_size; in vmxnet3_map_pkt()
816 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in vmxnet3_map_pkt()
817 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in vmxnet3_map_pkt()
823 tbi = tq->buf_info + tq->tx_ring.next2fill; in vmxnet3_map_pkt()
831 tbi->map_type = VMXNET3_MAP_PAGE; in vmxnet3_map_pkt()
832 tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag, in vmxnet3_map_pkt()
835 if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr)) in vmxnet3_map_pkt()
836 return -EFAULT; in vmxnet3_map_pkt()
838 tbi->len = buf_size; in vmxnet3_map_pkt()
840 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill; in vmxnet3_map_pkt()
841 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen); in vmxnet3_map_pkt()
843 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr); in vmxnet3_map_pkt()
844 gdesc->dword[2] = cpu_to_le32(dw2); in vmxnet3_map_pkt()
845 gdesc->dword[3] = 0; in vmxnet3_map_pkt()
847 netdev_dbg(adapter->netdev, in vmxnet3_map_pkt()
849 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr), in vmxnet3_map_pkt()
850 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]); in vmxnet3_map_pkt()
851 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); in vmxnet3_map_pkt()
852 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; in vmxnet3_map_pkt()
854 len -= buf_size; in vmxnet3_map_pkt()
859 ctx->eop_txd = gdesc; in vmxnet3_map_pkt()
862 tbi->skb = skb; in vmxnet3_map_pkt()
863 tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base; in vmxnet3_map_pkt()
875 for (i = 0; i < adapter->num_tx_queues; i++) in vmxnet3_tq_init_all()
876 vmxnet3_tq_init(&adapter->tx_queue[i], adapter); in vmxnet3_tq_init_all()
887 * -1: error happens during parsing
893 * 2. ctx->copy_size is # of bytes copied
904 if (ctx->mss) { /* TSO */ in vmxnet3_parse_hdr()
905 if (VMXNET3_VERSION_GE_4(adapter) && skb->encapsulation) { in vmxnet3_parse_hdr()
906 ctx->l4_offset = skb_inner_transport_offset(skb); in vmxnet3_parse_hdr()
907 ctx->l4_hdr_size = inner_tcp_hdrlen(skb); in vmxnet3_parse_hdr()
908 ctx->copy_size = ctx->l4_offset + ctx->l4_hdr_size; in vmxnet3_parse_hdr()
910 ctx->l4_offset = skb_transport_offset(skb); in vmxnet3_parse_hdr()
911 ctx->l4_hdr_size = tcp_hdrlen(skb); in vmxnet3_parse_hdr()
912 ctx->copy_size = ctx->l4_offset + ctx->l4_hdr_size; in vmxnet3_parse_hdr()
915 if (skb->ip_summed == CHECKSUM_PARTIAL) { in vmxnet3_parse_hdr()
918 * well as non-encap case in vmxnet3_parse_hdr()
920 ctx->l4_offset = skb_checksum_start_offset(skb); in vmxnet3_parse_hdr()
923 skb->encapsulation) { in vmxnet3_parse_hdr()
926 if (iph->version == 4) { in vmxnet3_parse_hdr()
927 protocol = iph->protocol; in vmxnet3_parse_hdr()
932 protocol = ipv6h->nexthdr; in vmxnet3_parse_hdr()
935 if (ctx->ipv4) { in vmxnet3_parse_hdr()
938 protocol = iph->protocol; in vmxnet3_parse_hdr()
939 } else if (ctx->ipv6) { in vmxnet3_parse_hdr()
943 protocol = ipv6h->nexthdr; in vmxnet3_parse_hdr()
949 ctx->l4_hdr_size = skb->encapsulation ? inner_tcp_hdrlen(skb) : in vmxnet3_parse_hdr()
953 ctx->l4_hdr_size = sizeof(struct udphdr); in vmxnet3_parse_hdr()
956 ctx->l4_hdr_size = 0; in vmxnet3_parse_hdr()
960 ctx->copy_size = min(ctx->l4_offset + in vmxnet3_parse_hdr()
961 ctx->l4_hdr_size, skb->len); in vmxnet3_parse_hdr()
963 ctx->l4_offset = 0; in vmxnet3_parse_hdr()
964 ctx->l4_hdr_size = 0; in vmxnet3_parse_hdr()
966 ctx->copy_size = min_t(unsigned int, in vmxnet3_parse_hdr()
967 tq->txdata_desc_size, in vmxnet3_parse_hdr()
971 if (skb->len <= VMXNET3_HDR_COPY_SIZE) in vmxnet3_parse_hdr()
972 ctx->copy_size = skb->len; in vmxnet3_parse_hdr()
975 if (unlikely(!pskb_may_pull(skb, ctx->copy_size))) in vmxnet3_parse_hdr()
979 if (unlikely(ctx->copy_size > tq->txdata_desc_size)) { in vmxnet3_parse_hdr()
980 tq->stats.oversized_hdr++; in vmxnet3_parse_hdr()
981 ctx->copy_size = 0; in vmxnet3_parse_hdr()
987 return -1; in vmxnet3_parse_hdr()
1007 tdd = (struct Vmxnet3_TxDataDesc *)((u8 *)tq->data_ring.base + in vmxnet3_copy_hdr()
1008 tq->tx_ring.next2fill * in vmxnet3_copy_hdr()
1009 tq->txdata_desc_size); in vmxnet3_copy_hdr()
1011 memcpy(tdd->data, skb->data, ctx->copy_size); in vmxnet3_copy_hdr()
1012 netdev_dbg(adapter->netdev, in vmxnet3_copy_hdr()
1014 ctx->copy_size, tq->tx_ring.next2fill); in vmxnet3_copy_hdr()
1025 if (iph->version == 4) { in vmxnet3_prepare_inner_tso()
1026 iph->check = 0; in vmxnet3_prepare_inner_tso()
1027 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0, in vmxnet3_prepare_inner_tso()
1032 tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0, in vmxnet3_prepare_inner_tso()
1043 if (ctx->ipv4) { in vmxnet3_prepare_tso()
1046 iph->check = 0; in vmxnet3_prepare_tso()
1047 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0, in vmxnet3_prepare_tso()
1049 } else if (ctx->ipv6) { in vmxnet3_prepare_tso()
1059 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in txd_estimate()
1060 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in txd_estimate()
1074 * Side-effects:
1077 * 3. shared->txNumDeferred may be updated
1101 ctx.mss = skb_shinfo(skb)->gso_size; in vmxnet3_tq_xmit()
1106 tq->stats.drop_tso++; in vmxnet3_tq_xmit()
1109 tq->stats.copy_skb_header++; in vmxnet3_tq_xmit()
1116 tq->stats.drop_too_many_frags++; in vmxnet3_tq_xmit()
1119 tq->stats.linearized++; in vmxnet3_tq_xmit()
1124 tq->stats.drop_too_many_frags++; in vmxnet3_tq_xmit()
1128 if (skb->encapsulation) { in vmxnet3_tq_xmit()
1136 /* non-tso pkts must not use more than in vmxnet3_tq_xmit()
1140 tq->stats.drop_too_many_frags++; in vmxnet3_tq_xmit()
1143 tq->stats.linearized++; in vmxnet3_tq_xmit()
1157 tq->stats.drop_oversized_hdr++; in vmxnet3_tq_xmit()
1161 if (skb->ip_summed == CHECKSUM_PARTIAL) { in vmxnet3_tq_xmit()
1163 skb->csum_offset > in vmxnet3_tq_xmit()
1165 tq->stats.drop_oversized_hdr++; in vmxnet3_tq_xmit()
1171 tq->stats.drop_hdr_inspect_err++; in vmxnet3_tq_xmit()
1175 spin_lock_irqsave(&tq->tx_lock, flags); in vmxnet3_tq_xmit()
1177 if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) { in vmxnet3_tq_xmit()
1178 tq->stats.tx_ring_full++; in vmxnet3_tq_xmit()
1179 netdev_dbg(adapter->netdev, in vmxnet3_tq_xmit()
1181 " next2fill %u\n", adapter->netdev->name, in vmxnet3_tq_xmit()
1182 tq->tx_ring.next2comp, tq->tx_ring.next2fill); in vmxnet3_tq_xmit()
1185 spin_unlock_irqrestore(&tq->tx_lock, flags); in vmxnet3_tq_xmit()
1193 if (vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter)) in vmxnet3_tq_xmit()
1196 /* setup the EOP desc */ in vmxnet3_tq_xmit()
1197 ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP); in vmxnet3_tq_xmit()
1202 gdesc->dword[2] = ctx.sop_txd->dword[2]; in vmxnet3_tq_xmit()
1203 gdesc->dword[3] = ctx.sop_txd->dword[3]; in vmxnet3_tq_xmit()
1207 tx_num_deferred = le32_to_cpu(tq->shared->txNumDeferred); in vmxnet3_tq_xmit()
1209 if (VMXNET3_VERSION_GE_4(adapter) && skb->encapsulation) { in vmxnet3_tq_xmit()
1210 gdesc->txd.hlen = ctx.l4_offset + ctx.l4_hdr_size; in vmxnet3_tq_xmit()
1212 gdesc->txd.om = VMXNET3_OM_TSO; in vmxnet3_tq_xmit()
1213 gdesc->txd.ext1 = 1; in vmxnet3_tq_xmit()
1215 gdesc->txd.om = VMXNET3_OM_ENCAP; in vmxnet3_tq_xmit()
1217 gdesc->txd.msscof = ctx.mss; in vmxnet3_tq_xmit()
1219 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) in vmxnet3_tq_xmit()
1220 gdesc->txd.oco = 1; in vmxnet3_tq_xmit()
1222 gdesc->txd.hlen = ctx.l4_offset + ctx.l4_hdr_size; in vmxnet3_tq_xmit()
1223 gdesc->txd.om = VMXNET3_OM_TSO; in vmxnet3_tq_xmit()
1224 gdesc->txd.msscof = ctx.mss; in vmxnet3_tq_xmit()
1226 num_pkts = (skb->len - gdesc->txd.hlen + ctx.mss - 1) / ctx.mss; in vmxnet3_tq_xmit()
1228 if (skb->ip_summed == CHECKSUM_PARTIAL) { in vmxnet3_tq_xmit()
1230 skb->encapsulation) { in vmxnet3_tq_xmit()
1231 gdesc->txd.hlen = ctx.l4_offset + in vmxnet3_tq_xmit()
1234 gdesc->txd.om = VMXNET3_OM_CSUM; in vmxnet3_tq_xmit()
1235 gdesc->txd.msscof = ctx.l4_offset + in vmxnet3_tq_xmit()
1236 skb->csum_offset; in vmxnet3_tq_xmit()
1237 gdesc->txd.ext1 = 1; in vmxnet3_tq_xmit()
1239 gdesc->txd.om = VMXNET3_OM_ENCAP; in vmxnet3_tq_xmit()
1240 gdesc->txd.msscof = 0; /* Reserved */ in vmxnet3_tq_xmit()
1243 gdesc->txd.hlen = ctx.l4_offset; in vmxnet3_tq_xmit()
1244 gdesc->txd.om = VMXNET3_OM_CSUM; in vmxnet3_tq_xmit()
1245 gdesc->txd.msscof = ctx.l4_offset + in vmxnet3_tq_xmit()
1246 skb->csum_offset; in vmxnet3_tq_xmit()
1249 gdesc->txd.om = 0; in vmxnet3_tq_xmit()
1250 gdesc->txd.msscof = 0; in vmxnet3_tq_xmit()
1254 le32_add_cpu(&tq->shared->txNumDeferred, num_pkts); in vmxnet3_tq_xmit()
1258 gdesc->txd.ti = 1; in vmxnet3_tq_xmit()
1259 gdesc->txd.tci = skb_vlan_tag_get(skb); in vmxnet3_tq_xmit()
1262 /* Ensure that the write to (&gdesc->txd)->gen will be observed after in vmxnet3_tq_xmit()
1263 * all other writes to &gdesc->txd. in vmxnet3_tq_xmit()
1268 gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^ in vmxnet3_tq_xmit()
1278 netdev_dbg(adapter->netdev, in vmxnet3_tq_xmit()
1280 (u32)(ctx.sop_txd - in vmxnet3_tq_xmit()
1281 tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr), in vmxnet3_tq_xmit()
1282 le32_to_cpu(gdesc->dword[2]), le32_to_cpu(gdesc->dword[3])); in vmxnet3_tq_xmit()
1284 spin_unlock_irqrestore(&tq->tx_lock, flags); in vmxnet3_tq_xmit()
1286 if (tx_num_deferred >= le32_to_cpu(tq->shared->txThreshold)) { in vmxnet3_tq_xmit()
1287 tq->shared->txNumDeferred = 0; in vmxnet3_tq_xmit()
1289 adapter->tx_prod_offset + tq->qid * 8, in vmxnet3_tq_xmit()
1290 tq->tx_ring.next2fill); in vmxnet3_tq_xmit()
1296 spin_unlock_irqrestore(&tq->tx_lock, flags); in vmxnet3_tq_xmit()
1298 tq->stats.drop_total++; in vmxnet3_tq_xmit()
1313 .dev = &adapter->pdev->dev, in vmxnet3_create_pp()
1325 err = xdp_rxq_info_reg(&rq->xdp_rxq, adapter->netdev, rq->qid, in vmxnet3_create_pp()
1326 rq->napi.napi_id); in vmxnet3_create_pp()
1330 err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq, MEM_TYPE_PAGE_POOL, pp); in vmxnet3_create_pp()
1334 rq->page_pool = pp; in vmxnet3_create_pp()
1339 xdp_rxq_info_unreg(&rq->xdp_rxq); in vmxnet3_create_pp()
1356 *dma_addr = page_pool_get_dma_addr(page) + pp->p.offset; in vmxnet3_pp_get_buff()
1366 BUG_ON(skb->queue_mapping > adapter->num_tx_queues); in vmxnet3_xmit_frame()
1368 &adapter->tx_queue[skb->queue_mapping], in vmxnet3_xmit_frame()
1378 if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) { in vmxnet3_rx_csum()
1379 if (gdesc->rcd.v4 && in vmxnet3_rx_csum()
1380 (le32_to_cpu(gdesc->dword[3]) & in vmxnet3_rx_csum()
1382 skb->ip_summed = CHECKSUM_UNNECESSARY; in vmxnet3_rx_csum()
1383 if ((le32_to_cpu(gdesc->dword[0]) & in vmxnet3_rx_csum()
1385 skb->csum_level = 1; in vmxnet3_rx_csum()
1387 WARN_ON_ONCE(!(gdesc->rcd.tcp || gdesc->rcd.udp) && in vmxnet3_rx_csum()
1388 !(le32_to_cpu(gdesc->dword[0]) & in vmxnet3_rx_csum()
1390 WARN_ON_ONCE(gdesc->rcd.frg && in vmxnet3_rx_csum()
1391 !(le32_to_cpu(gdesc->dword[0]) & in vmxnet3_rx_csum()
1393 } else if (gdesc->rcd.v6 && (le32_to_cpu(gdesc->dword[3]) & in vmxnet3_rx_csum()
1395 skb->ip_summed = CHECKSUM_UNNECESSARY; in vmxnet3_rx_csum()
1396 if ((le32_to_cpu(gdesc->dword[0]) & in vmxnet3_rx_csum()
1398 skb->csum_level = 1; in vmxnet3_rx_csum()
1400 WARN_ON_ONCE(!(gdesc->rcd.tcp || gdesc->rcd.udp) && in vmxnet3_rx_csum()
1401 !(le32_to_cpu(gdesc->dword[0]) & in vmxnet3_rx_csum()
1403 WARN_ON_ONCE(gdesc->rcd.frg && in vmxnet3_rx_csum()
1404 !(le32_to_cpu(gdesc->dword[0]) & in vmxnet3_rx_csum()
1407 if (gdesc->rcd.csum) { in vmxnet3_rx_csum()
1408 skb->csum = htons(gdesc->rcd.csum); in vmxnet3_rx_csum()
1409 skb->ip_summed = CHECKSUM_PARTIAL; in vmxnet3_rx_csum()
1424 rq->stats.drop_err++; in vmxnet3_rx_error()
1425 if (!rcd->fcs) in vmxnet3_rx_error()
1426 rq->stats.drop_fcs++; in vmxnet3_rx_error()
1428 rq->stats.drop_total++; in vmxnet3_rx_error()
1437 * ctx->skb may be NULL if this is the first and the only one in vmxnet3_rx_error()
1440 if (ctx->skb) in vmxnet3_rx_error()
1441 dev_kfree_skb_irq(ctx->skb); in vmxnet3_rx_error()
1443 ctx->skb = NULL; in vmxnet3_rx_error()
1460 BUG_ON(gdesc->rcd.tcp == 0); in vmxnet3_get_hdr_len()
1466 if (skb->protocol == cpu_to_be16(ETH_P_8021Q) || in vmxnet3_get_hdr_len()
1467 skb->protocol == cpu_to_be16(ETH_P_8021AD)) in vmxnet3_get_hdr_len()
1473 if (gdesc->rcd.v4) { in vmxnet3_get_hdr_len()
1474 BUG_ON(hdr.eth->h_proto != htons(ETH_P_IP) && in vmxnet3_get_hdr_len()
1475 hdr.veth->h_vlan_encapsulated_proto != htons(ETH_P_IP)); in vmxnet3_get_hdr_len()
1477 BUG_ON(hdr.ipv4->protocol != IPPROTO_TCP); in vmxnet3_get_hdr_len()
1478 hlen = hdr.ipv4->ihl << 2; in vmxnet3_get_hdr_len()
1479 hdr.ptr += hdr.ipv4->ihl << 2; in vmxnet3_get_hdr_len()
1480 } else if (gdesc->rcd.v6) { in vmxnet3_get_hdr_len()
1481 BUG_ON(hdr.eth->h_proto != htons(ETH_P_IPV6) && in vmxnet3_get_hdr_len()
1482 hdr.veth->h_vlan_encapsulated_proto != htons(ETH_P_IPV6)); in vmxnet3_get_hdr_len()
1487 if (hdr.ipv6->nexthdr != IPPROTO_TCP) in vmxnet3_get_hdr_len()
1492 /* Non-IP pkt, dont estimate header length */ in vmxnet3_get_hdr_len()
1499 return (hlen + (hdr.tcp->doff << 2)); in vmxnet3_get_hdr_len()
1507 adapter->rx_prod_offset, adapter->rx_prod2_offset in vmxnet3_rq_rx_complete()
1513 struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx; in vmxnet3_rq_rx_complete()
1522 vmxnet3_getRxComp(rcd, &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, in vmxnet3_rq_rx_complete()
1524 while (rcd->gen == rq->comp_ring.gen) { in vmxnet3_rq_rx_complete()
1534 /* we may stop even before we see the EOP desc of in vmxnet3_rq_rx_complete()
1541 * rcd->gen is read. in vmxnet3_rq_rx_complete()
1545 BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2 && in vmxnet3_rq_rx_complete()
1546 rcd->rqID != rq->dataRingQid); in vmxnet3_rq_rx_complete()
1547 idx = rcd->rxdIdx; in vmxnet3_rq_rx_complete()
1548 ring_idx = VMXNET3_GET_RING_IDX(adapter, rcd->rqID); in vmxnet3_rq_rx_complete()
1549 ring = rq->rx_ring + ring_idx; in vmxnet3_rq_rx_complete()
1550 vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd, in vmxnet3_rq_rx_complete()
1552 rbi = rq->buf_info[ring_idx] + idx; in vmxnet3_rq_rx_complete()
1554 BUG_ON(rxd->addr != rbi->dma_addr || in vmxnet3_rq_rx_complete()
1555 rxd->len != rbi->len); in vmxnet3_rq_rx_complete()
1557 if (unlikely(rcd->eop && rcd->err)) { in vmxnet3_rq_rx_complete()
1562 if (rcd->sop && rcd->eop && vmxnet3_xdp_enabled(adapter)) { in vmxnet3_rq_rx_complete()
1566 if (VMXNET3_RX_DATA_RING(adapter, rcd->rqID)) { in vmxnet3_rq_rx_complete()
1567 ctx->skb = NULL; in vmxnet3_rq_rx_complete()
1571 if (rbi->buf_type != VMXNET3_RX_BUF_XDP) in vmxnet3_rq_rx_complete()
1577 ctx->skb = skb_xdp_pass; in vmxnet3_rq_rx_complete()
1580 ctx->skb = NULL; in vmxnet3_rq_rx_complete()
1587 if (rcd->sop) { /* first buf of the pkt */ in vmxnet3_rq_rx_complete()
1591 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_HEAD || in vmxnet3_rq_rx_complete()
1592 (rcd->rqID != rq->qid && in vmxnet3_rq_rx_complete()
1593 rcd->rqID != rq->dataRingQid)); in vmxnet3_rq_rx_complete()
1595 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB && in vmxnet3_rq_rx_complete()
1596 rbi->buf_type != VMXNET3_RX_BUF_XDP); in vmxnet3_rq_rx_complete()
1597 BUG_ON(ctx->skb != NULL || rbi->skb == NULL); in vmxnet3_rq_rx_complete()
1599 if (unlikely(rcd->len == 0)) { in vmxnet3_rq_rx_complete()
1601 BUG_ON(!(rcd->sop && rcd->eop)); in vmxnet3_rq_rx_complete()
1602 netdev_dbg(adapter->netdev, in vmxnet3_rq_rx_complete()
1609 ctx->skb = rbi->skb; in vmxnet3_rq_rx_complete()
1612 VMXNET3_RX_DATA_RING(adapter, rcd->rqID); in vmxnet3_rq_rx_complete()
1613 len = rxDataRingUsed ? rcd->len : rbi->len; in vmxnet3_rq_rx_complete()
1620 sz = rcd->rxdIdx * rq->data_ring.desc_size; in vmxnet3_rq_rx_complete()
1622 &rq->data_ring.base[sz], in vmxnet3_rq_rx_complete()
1623 rcd->len, in vmxnet3_rq_rx_complete()
1626 ctx->skb = skb_xdp_pass; in vmxnet3_rq_rx_complete()
1633 new_skb = netdev_alloc_skb_ip_align(adapter->netdev, in vmxnet3_rq_rx_complete()
1639 rq->stats.rx_buf_alloc_failure++; in vmxnet3_rq_rx_complete()
1640 ctx->skb = NULL; in vmxnet3_rq_rx_complete()
1641 rq->stats.drop_total++; in vmxnet3_rq_rx_complete()
1646 if (rxDataRingUsed && adapter->rxdataring_enabled) { in vmxnet3_rq_rx_complete()
1649 BUG_ON(rcd->len > rq->data_ring.desc_size); in vmxnet3_rq_rx_complete()
1651 ctx->skb = new_skb; in vmxnet3_rq_rx_complete()
1652 sz = rcd->rxdIdx * rq->data_ring.desc_size; in vmxnet3_rq_rx_complete()
1653 memcpy(new_skb->data, in vmxnet3_rq_rx_complete()
1654 &rq->data_ring.base[sz], rcd->len); in vmxnet3_rq_rx_complete()
1656 ctx->skb = rbi->skb; in vmxnet3_rq_rx_complete()
1659 dma_map_single(&adapter->pdev->dev, in vmxnet3_rq_rx_complete()
1660 new_skb->data, rbi->len, in vmxnet3_rq_rx_complete()
1662 if (dma_mapping_error(&adapter->pdev->dev, in vmxnet3_rq_rx_complete()
1669 rq->stats.rx_buf_alloc_failure++; in vmxnet3_rq_rx_complete()
1670 ctx->skb = NULL; in vmxnet3_rq_rx_complete()
1671 rq->stats.drop_total++; in vmxnet3_rq_rx_complete()
1676 dma_unmap_single(&adapter->pdev->dev, in vmxnet3_rq_rx_complete()
1677 rbi->dma_addr, in vmxnet3_rq_rx_complete()
1678 rbi->len, in vmxnet3_rq_rx_complete()
1682 rbi->skb = new_skb; in vmxnet3_rq_rx_complete()
1683 rbi->dma_addr = new_dma_addr; in vmxnet3_rq_rx_complete()
1684 rxd->addr = cpu_to_le64(rbi->dma_addr); in vmxnet3_rq_rx_complete()
1685 rxd->len = rbi->len; in vmxnet3_rq_rx_complete()
1688 skb_record_rx_queue(ctx->skb, rq->qid); in vmxnet3_rq_rx_complete()
1689 skb_put(ctx->skb, rcd->len); in vmxnet3_rq_rx_complete()
1692 rcd->type == VMXNET3_CDTYPE_RXCOMP_LRO) { in vmxnet3_rq_rx_complete()
1699 segCnt = rcdlro->segCnt; in vmxnet3_rq_rx_complete()
1701 mss = rcdlro->mss; in vmxnet3_rq_rx_complete()
1704 encap_lro = (le32_to_cpu(gdesc->dword[0]) & in vmxnet3_rq_rx_complete()
1710 BUG_ON(ctx->skb == NULL && !skip_page_frags); in vmxnet3_rq_rx_complete()
1713 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE); in vmxnet3_rq_rx_complete()
1714 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY); in vmxnet3_rq_rx_complete()
1717 * following non-sop fragments. They will be reused. in vmxnet3_rq_rx_complete()
1722 if (rcd->len) { in vmxnet3_rq_rx_complete()
1727 * processing all the following non-sop frags. in vmxnet3_rq_rx_complete()
1730 rq->stats.rx_buf_alloc_failure++; in vmxnet3_rq_rx_complete()
1731 dev_kfree_skb(ctx->skb); in vmxnet3_rq_rx_complete()
1732 ctx->skb = NULL; in vmxnet3_rq_rx_complete()
1736 new_dma_addr = dma_map_page(&adapter->pdev->dev, in vmxnet3_rq_rx_complete()
1740 if (dma_mapping_error(&adapter->pdev->dev, in vmxnet3_rq_rx_complete()
1743 rq->stats.rx_buf_alloc_failure++; in vmxnet3_rq_rx_complete()
1744 dev_kfree_skb(ctx->skb); in vmxnet3_rq_rx_complete()
1745 ctx->skb = NULL; in vmxnet3_rq_rx_complete()
1750 dma_unmap_page(&adapter->pdev->dev, in vmxnet3_rq_rx_complete()
1751 rbi->dma_addr, rbi->len, in vmxnet3_rq_rx_complete()
1754 vmxnet3_append_frag(ctx->skb, rcd, rbi); in vmxnet3_rq_rx_complete()
1757 rbi->page = new_page; in vmxnet3_rq_rx_complete()
1758 rbi->dma_addr = new_dma_addr; in vmxnet3_rq_rx_complete()
1759 rxd->addr = cpu_to_le64(rbi->dma_addr); in vmxnet3_rq_rx_complete()
1760 rxd->len = rbi->len; in vmxnet3_rq_rx_complete()
1766 skb = ctx->skb; in vmxnet3_rq_rx_complete()
1767 if (rcd->eop) { in vmxnet3_rq_rx_complete()
1768 u32 mtu = adapter->netdev->mtu; in vmxnet3_rq_rx_complete()
1769 skb->len += skb->data_len; in vmxnet3_rq_rx_complete()
1772 if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE && in vmxnet3_rq_rx_complete()
1773 (adapter->netdev->features & NETIF_F_RXHASH)) { in vmxnet3_rq_rx_complete()
1776 switch (rcd->rssType) { in vmxnet3_rq_rx_complete()
1792 le32_to_cpu(rcd->rssHash), in vmxnet3_rq_rx_complete()
1798 skb->protocol = eth_type_trans(skb, adapter->netdev); in vmxnet3_rq_rx_complete()
1799 if ((!rcd->tcp && !encap_lro) || in vmxnet3_rq_rx_complete()
1800 !(adapter->netdev->features & NETIF_F_LRO)) in vmxnet3_rq_rx_complete()
1804 skb_shinfo(skb)->gso_type = rcd->v4 ? in vmxnet3_rq_rx_complete()
1806 skb_shinfo(skb)->gso_size = mss; in vmxnet3_rq_rx_complete()
1807 skb_shinfo(skb)->gso_segs = segCnt; in vmxnet3_rq_rx_complete()
1808 } else if ((segCnt != 0 || skb->len > mtu) && !encap_lro) { in vmxnet3_rq_rx_complete()
1816 skb_shinfo(skb)->gso_type = in vmxnet3_rq_rx_complete()
1817 rcd->v4 ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6; in vmxnet3_rq_rx_complete()
1819 skb_shinfo(skb)->gso_segs = segCnt; in vmxnet3_rq_rx_complete()
1820 skb_shinfo(skb)->gso_size = in vmxnet3_rq_rx_complete()
1821 DIV_ROUND_UP(skb->len - in vmxnet3_rq_rx_complete()
1824 skb_shinfo(skb)->gso_size = mtu - hlen; in vmxnet3_rq_rx_complete()
1828 if (unlikely(rcd->ts)) in vmxnet3_rq_rx_complete()
1829 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rcd->tci); in vmxnet3_rq_rx_complete()
1832 if ((adapter->netdev->features & NETIF_F_LRO) && in vmxnet3_rq_rx_complete()
1833 !rq->shared->updateRxProd) in vmxnet3_rq_rx_complete()
1836 napi_gro_receive(&rq->napi, skb); in vmxnet3_rq_rx_complete()
1838 ctx->skb = NULL; in vmxnet3_rq_rx_complete()
1845 ring = rq->rx_ring + ring_idx; in vmxnet3_rq_rx_complete()
1846 rbi->comp_state = VMXNET3_RXD_COMP_DONE; in vmxnet3_rq_rx_complete()
1849 fill_offset = (idx > ring->next2fill ? 0 : ring->size) + in vmxnet3_rq_rx_complete()
1850 idx - ring->next2fill - 1; in vmxnet3_rq_rx_complete()
1851 if (!ring->isOutOfOrder || fill_offset >= comp_offset) in vmxnet3_rq_rx_complete()
1852 ring->next2comp = idx; in vmxnet3_rq_rx_complete()
1855 /* Ensure that the writes to rxd->gen bits will be observed in vmxnet3_rq_rx_complete()
1861 rbi = rq->buf_info[ring_idx] + ring->next2fill; in vmxnet3_rq_rx_complete()
1862 if (!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_OOORX_COMP))) in vmxnet3_rq_rx_complete()
1865 /* ring0 Type1 buffers can get skipped; re-fill them */ in vmxnet3_rq_rx_complete()
1866 if (rbi->buf_type != VMXNET3_RX_BUF_SKB) in vmxnet3_rq_rx_complete()
1869 if (rbi->comp_state == VMXNET3_RXD_COMP_DONE) { in vmxnet3_rq_rx_complete()
1871 vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd, in vmxnet3_rq_rx_complete()
1873 WARN_ON(!rxd->addr); in vmxnet3_rq_rx_complete()
1876 rxd->gen = ring->gen; in vmxnet3_rq_rx_complete()
1878 rbi->comp_state = VMXNET3_RXD_COMP_PENDING; in vmxnet3_rq_rx_complete()
1879 num_to_alloc--; in vmxnet3_rq_rx_complete()
1882 ring->isOutOfOrder = 1; in vmxnet3_rq_rx_complete()
1888 ring->isOutOfOrder = 0; in vmxnet3_rq_rx_complete()
1892 if (unlikely(rq->shared->updateRxProd) && (ring->next2fill & 0xf) == 0) { in vmxnet3_rq_rx_complete()
1894 rxprod_reg[ring_idx] + rq->qid * 8, in vmxnet3_rq_rx_complete()
1895 ring->next2fill); in vmxnet3_rq_rx_complete()
1898 vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring); in vmxnet3_rq_rx_complete()
1900 &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp); in vmxnet3_rq_rx_complete()
1917 if (!rq->rx_ring[0].base) in vmxnet3_rq_cleanup()
1921 for (i = 0; i < rq->rx_ring[ring_idx].size; i++) { in vmxnet3_rq_cleanup()
1927 rbi = &rq->buf_info[ring_idx][i]; in vmxnet3_rq_cleanup()
1929 &rq->rx_ring[ring_idx].base[i].rxd, &rxDesc); in vmxnet3_rq_cleanup()
1931 if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD && in vmxnet3_rq_cleanup()
1932 rbi->page && rbi->buf_type == VMXNET3_RX_BUF_XDP) { in vmxnet3_rq_cleanup()
1933 page_pool_recycle_direct(rq->page_pool, in vmxnet3_rq_cleanup()
1934 rbi->page); in vmxnet3_rq_cleanup()
1935 rbi->page = NULL; in vmxnet3_rq_cleanup()
1936 } else if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD && in vmxnet3_rq_cleanup()
1937 rbi->skb) { in vmxnet3_rq_cleanup()
1938 dma_unmap_single(&adapter->pdev->dev, rxd->addr, in vmxnet3_rq_cleanup()
1939 rxd->len, DMA_FROM_DEVICE); in vmxnet3_rq_cleanup()
1940 dev_kfree_skb(rbi->skb); in vmxnet3_rq_cleanup()
1941 rbi->skb = NULL; in vmxnet3_rq_cleanup()
1942 } else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY && in vmxnet3_rq_cleanup()
1943 rbi->page) { in vmxnet3_rq_cleanup()
1944 dma_unmap_page(&adapter->pdev->dev, rxd->addr, in vmxnet3_rq_cleanup()
1945 rxd->len, DMA_FROM_DEVICE); in vmxnet3_rq_cleanup()
1946 put_page(rbi->page); in vmxnet3_rq_cleanup()
1947 rbi->page = NULL; in vmxnet3_rq_cleanup()
1951 rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN; in vmxnet3_rq_cleanup()
1952 rq->rx_ring[ring_idx].next2fill = in vmxnet3_rq_cleanup()
1953 rq->rx_ring[ring_idx].next2comp = 0; in vmxnet3_rq_cleanup()
1956 rq->comp_ring.gen = VMXNET3_INIT_GEN; in vmxnet3_rq_cleanup()
1957 rq->comp_ring.next2proc = 0; in vmxnet3_rq_cleanup()
1966 for (i = 0; i < adapter->num_rx_queues; i++) in vmxnet3_rq_cleanup_all()
1967 vmxnet3_rq_cleanup(&adapter->rx_queue[i], adapter); in vmxnet3_rq_cleanup_all()
1968 rcu_assign_pointer(adapter->xdp_bpf_prog, NULL); in vmxnet3_rq_cleanup_all()
1980 if (rq->buf_info[i]) { in vmxnet3_rq_destroy()
1981 for (j = 0; j < rq->rx_ring[i].size; j++) in vmxnet3_rq_destroy()
1982 BUG_ON(rq->buf_info[i][j].page != NULL); in vmxnet3_rq_destroy()
1988 if (rq->rx_ring[i].base) { in vmxnet3_rq_destroy()
1989 dma_free_coherent(&adapter->pdev->dev, in vmxnet3_rq_destroy()
1990 rq->rx_ring[i].size in vmxnet3_rq_destroy()
1992 rq->rx_ring[i].base, in vmxnet3_rq_destroy()
1993 rq->rx_ring[i].basePA); in vmxnet3_rq_destroy()
1994 rq->rx_ring[i].base = NULL; in vmxnet3_rq_destroy()
1998 if (xdp_rxq_info_is_reg(&rq->xdp_rxq)) in vmxnet3_rq_destroy()
1999 xdp_rxq_info_unreg(&rq->xdp_rxq); in vmxnet3_rq_destroy()
2000 page_pool_destroy(rq->page_pool); in vmxnet3_rq_destroy()
2001 rq->page_pool = NULL; in vmxnet3_rq_destroy()
2003 if (rq->data_ring.base) { in vmxnet3_rq_destroy()
2004 dma_free_coherent(&adapter->pdev->dev, in vmxnet3_rq_destroy()
2005 rq->rx_ring[0].size * rq->data_ring.desc_size, in vmxnet3_rq_destroy()
2006 rq->data_ring.base, rq->data_ring.basePA); in vmxnet3_rq_destroy()
2007 rq->data_ring.base = NULL; in vmxnet3_rq_destroy()
2010 if (rq->comp_ring.base) { in vmxnet3_rq_destroy()
2011 dma_free_coherent(&adapter->pdev->dev, rq->comp_ring.size in vmxnet3_rq_destroy()
2013 rq->comp_ring.base, rq->comp_ring.basePA); in vmxnet3_rq_destroy()
2014 rq->comp_ring.base = NULL; in vmxnet3_rq_destroy()
2017 kfree(rq->buf_info[0]); in vmxnet3_rq_destroy()
2018 rq->buf_info[0] = NULL; in vmxnet3_rq_destroy()
2019 rq->buf_info[1] = NULL; in vmxnet3_rq_destroy()
2027 for (i = 0; i < adapter->num_rx_queues; i++) { in vmxnet3_rq_destroy_all_rxdataring()
2028 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; in vmxnet3_rq_destroy_all_rxdataring()
2030 if (rq->data_ring.base) { in vmxnet3_rq_destroy_all_rxdataring()
2031 dma_free_coherent(&adapter->pdev->dev, in vmxnet3_rq_destroy_all_rxdataring()
2032 (rq->rx_ring[0].size * in vmxnet3_rq_destroy_all_rxdataring()
2033 rq->data_ring.desc_size), in vmxnet3_rq_destroy_all_rxdataring()
2034 rq->data_ring.base, in vmxnet3_rq_destroy_all_rxdataring()
2035 rq->data_ring.basePA); in vmxnet3_rq_destroy_all_rxdataring()
2036 rq->data_ring.base = NULL; in vmxnet3_rq_destroy_all_rxdataring()
2037 rq->data_ring.desc_size = 0; in vmxnet3_rq_destroy_all_rxdataring()
2049 for (i = 0; i < rq->rx_ring[0].size; i++) { in vmxnet3_rq_init()
2052 if (i % adapter->rx_buf_per_pkt == 0) { in vmxnet3_rq_init()
2053 rq->buf_info[0][i].buf_type = vmxnet3_xdp_enabled(adapter) ? in vmxnet3_rq_init()
2056 rq->buf_info[0][i].len = adapter->skb_buf_size; in vmxnet3_rq_init()
2058 rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_PAGE; in vmxnet3_rq_init()
2059 rq->buf_info[0][i].len = PAGE_SIZE; in vmxnet3_rq_init()
2062 for (i = 0; i < rq->rx_ring[1].size; i++) { in vmxnet3_rq_init()
2063 rq->buf_info[1][i].buf_type = VMXNET3_RX_BUF_PAGE; in vmxnet3_rq_init()
2064 rq->buf_info[1][i].len = PAGE_SIZE; in vmxnet3_rq_init()
2069 rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0; in vmxnet3_rq_init()
2071 memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size * in vmxnet3_rq_init()
2073 rq->rx_ring[i].gen = VMXNET3_INIT_GEN; in vmxnet3_rq_init()
2074 rq->rx_ring[i].isOutOfOrder = 0; in vmxnet3_rq_init()
2078 rq->rx_ring[0].size + rq->rx_ring[1].size); in vmxnet3_rq_init()
2082 if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1, in vmxnet3_rq_init()
2084 xdp_rxq_info_unreg(&rq->xdp_rxq); in vmxnet3_rq_init()
2085 page_pool_destroy(rq->page_pool); in vmxnet3_rq_init()
2086 rq->page_pool = NULL; in vmxnet3_rq_init()
2089 return -ENOMEM; in vmxnet3_rq_init()
2091 vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter); in vmxnet3_rq_init()
2094 rq->comp_ring.next2proc = 0; in vmxnet3_rq_init()
2095 memset(rq->comp_ring.base, 0, rq->comp_ring.size * in vmxnet3_rq_init()
2097 rq->comp_ring.gen = VMXNET3_INIT_GEN; in vmxnet3_rq_init()
2100 rq->rx_ctx.skb = NULL; in vmxnet3_rq_init()
2112 for (i = 0; i < adapter->num_rx_queues; i++) { in vmxnet3_rq_init_all()
2113 err = vmxnet3_rq_init(&adapter->rx_queue[i], adapter); in vmxnet3_rq_init_all()
2115 dev_err(&adapter->netdev->dev, "%s: failed to " in vmxnet3_rq_init_all()
2117 adapter->netdev->name, i); in vmxnet3_rq_init_all()
2135 sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc); in vmxnet3_rq_create()
2136 rq->rx_ring[i].base = dma_alloc_coherent( in vmxnet3_rq_create()
2137 &adapter->pdev->dev, sz, in vmxnet3_rq_create()
2138 &rq->rx_ring[i].basePA, in vmxnet3_rq_create()
2140 if (!rq->rx_ring[i].base) { in vmxnet3_rq_create()
2141 netdev_err(adapter->netdev, in vmxnet3_rq_create()
2147 if ((adapter->rxdataring_enabled) && (rq->data_ring.desc_size != 0)) { in vmxnet3_rq_create()
2148 sz = rq->rx_ring[0].size * rq->data_ring.desc_size; in vmxnet3_rq_create()
2149 rq->data_ring.base = in vmxnet3_rq_create()
2150 dma_alloc_coherent(&adapter->pdev->dev, sz, in vmxnet3_rq_create()
2151 &rq->data_ring.basePA, in vmxnet3_rq_create()
2153 if (!rq->data_ring.base) { in vmxnet3_rq_create()
2154 netdev_err(adapter->netdev, in vmxnet3_rq_create()
2156 adapter->rxdataring_enabled = false; in vmxnet3_rq_create()
2159 rq->data_ring.base = NULL; in vmxnet3_rq_create()
2160 rq->data_ring.desc_size = 0; in vmxnet3_rq_create()
2163 sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc); in vmxnet3_rq_create()
2164 rq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, sz, in vmxnet3_rq_create()
2165 &rq->comp_ring.basePA, in vmxnet3_rq_create()
2167 if (!rq->comp_ring.base) { in vmxnet3_rq_create()
2168 netdev_err(adapter->netdev, "failed to allocate rx comp ring\n"); in vmxnet3_rq_create()
2172 bi = kcalloc_node(rq->rx_ring[0].size + rq->rx_ring[1].size, in vmxnet3_rq_create()
2173 sizeof(rq->buf_info[0][0]), GFP_KERNEL, in vmxnet3_rq_create()
2174 dev_to_node(&adapter->pdev->dev)); in vmxnet3_rq_create()
2178 rq->buf_info[0] = bi; in vmxnet3_rq_create()
2179 rq->buf_info[1] = bi + rq->rx_ring[0].size; in vmxnet3_rq_create()
2185 return -ENOMEM; in vmxnet3_rq_create()
2194 adapter->rxdataring_enabled = VMXNET3_VERSION_GE_3(adapter); in vmxnet3_rq_create_all()
2196 for (i = 0; i < adapter->num_rx_queues; i++) { in vmxnet3_rq_create_all()
2197 err = vmxnet3_rq_create(&adapter->rx_queue[i], adapter); in vmxnet3_rq_create_all()
2199 dev_err(&adapter->netdev->dev, in vmxnet3_rq_create_all()
2201 adapter->netdev->name, i); in vmxnet3_rq_create_all()
2206 if (!adapter->rxdataring_enabled) in vmxnet3_rq_create_all()
2222 if (unlikely(adapter->shared->ecr)) in vmxnet3_do_poll()
2224 for (i = 0; i < adapter->num_tx_queues; i++) in vmxnet3_do_poll()
2225 vmxnet3_tq_tx_complete(&adapter->tx_queue[i], adapter); in vmxnet3_do_poll()
2227 for (i = 0; i < adapter->num_rx_queues; i++) in vmxnet3_do_poll()
2228 rcd_done += vmxnet3_rq_rx_complete(&adapter->rx_queue[i], in vmxnet3_do_poll()
2241 rxd_done = vmxnet3_do_poll(rx_queue->adapter, budget); in vmxnet3_poll()
2245 vmxnet3_enable_all_intrs(rx_queue->adapter); in vmxnet3_poll()
2251 * NAPI polling function for MSI-X mode with multiple Rx queues
2260 struct vmxnet3_adapter *adapter = rq->adapter; in vmxnet3_poll_rx_only()
2266 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) { in vmxnet3_poll_rx_only()
2268 &adapter->tx_queue[rq - adapter->rx_queue]; in vmxnet3_poll_rx_only()
2276 vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx); in vmxnet3_poll_rx_only()
2293 struct vmxnet3_adapter *adapter = tq->adapter; in vmxnet3_msix_tx()
2295 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE) in vmxnet3_msix_tx()
2296 vmxnet3_disable_intr(adapter, tq->comp_ring.intr_idx); in vmxnet3_msix_tx()
2299 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) { in vmxnet3_msix_tx()
2301 for (i = 0; i < adapter->num_tx_queues; i++) { in vmxnet3_msix_tx()
2302 struct vmxnet3_tx_queue *txq = &adapter->tx_queue[i]; in vmxnet3_msix_tx()
2308 vmxnet3_enable_intr(adapter, tq->comp_ring.intr_idx); in vmxnet3_msix_tx()
2323 struct vmxnet3_adapter *adapter = rq->adapter; in vmxnet3_msix_rx()
2325 /* disable intr if needed */ in vmxnet3_msix_rx()
2326 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE) in vmxnet3_msix_rx()
2327 vmxnet3_disable_intr(adapter, rq->comp_ring.intr_idx); in vmxnet3_msix_rx()
2328 napi_schedule(&rq->napi); in vmxnet3_msix_rx()
2334 *----------------------------------------------------------------------------
2336 * vmxnet3_msix_event --
2343 *----------------------------------------------------------------------------
2352 /* disable intr if needed */ in vmxnet3_msix_event()
2353 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE) in vmxnet3_msix_event()
2354 vmxnet3_disable_intr(adapter, adapter->intr.event_intr_idx); in vmxnet3_msix_event()
2356 if (adapter->shared->ecr) in vmxnet3_msix_event()
2359 vmxnet3_enable_intr(adapter, adapter->intr.event_intr_idx); in vmxnet3_msix_event()
2374 if (adapter->intr.type == VMXNET3_IT_INTX) { in vmxnet3_intr()
2382 /* disable intr if needed */ in vmxnet3_intr()
2383 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE) in vmxnet3_intr()
2386 napi_schedule(&adapter->rx_queue[0].napi); in vmxnet3_intr()
2399 switch (adapter->intr.type) { in vmxnet3_netpoll()
2403 for (i = 0; i < adapter->num_rx_queues; i++) in vmxnet3_netpoll()
2404 vmxnet3_msix_rx(0, &adapter->rx_queue[i]); in vmxnet3_netpoll()
2410 vmxnet3_intr(0, adapter->netdev); in vmxnet3_netpoll()
2420 struct vmxnet3_intr *intr = &adapter->intr; in vmxnet3_request_irqs()
2425 if (adapter->intr.type == VMXNET3_IT_MSIX) { in vmxnet3_request_irqs()
2426 for (i = 0; i < adapter->num_tx_queues; i++) { in vmxnet3_request_irqs()
2427 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) { in vmxnet3_request_irqs()
2428 sprintf(adapter->tx_queue[i].name, "%s-tx-%d", in vmxnet3_request_irqs()
2429 adapter->netdev->name, vector); in vmxnet3_request_irqs()
2431 intr->msix_entries[vector].vector, in vmxnet3_request_irqs()
2433 adapter->tx_queue[i].name, in vmxnet3_request_irqs()
2434 &adapter->tx_queue[i]); in vmxnet3_request_irqs()
2436 sprintf(adapter->tx_queue[i].name, "%s-rxtx-%d", in vmxnet3_request_irqs()
2437 adapter->netdev->name, vector); in vmxnet3_request_irqs()
2440 dev_err(&adapter->netdev->dev, in vmxnet3_request_irqs()
2443 adapter->tx_queue[i].name, err); in vmxnet3_request_irqs()
2449 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) { in vmxnet3_request_irqs()
2450 for (; i < adapter->num_tx_queues; i++) in vmxnet3_request_irqs()
2451 adapter->tx_queue[i].comp_ring.intr_idx in vmxnet3_request_irqs()
2456 adapter->tx_queue[i].comp_ring.intr_idx in vmxnet3_request_irqs()
2460 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) in vmxnet3_request_irqs()
2463 for (i = 0; i < adapter->num_rx_queues; i++) { in vmxnet3_request_irqs()
2464 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) in vmxnet3_request_irqs()
2465 sprintf(adapter->rx_queue[i].name, "%s-rx-%d", in vmxnet3_request_irqs()
2466 adapter->netdev->name, vector); in vmxnet3_request_irqs()
2468 sprintf(adapter->rx_queue[i].name, "%s-rxtx-%d", in vmxnet3_request_irqs()
2469 adapter->netdev->name, vector); in vmxnet3_request_irqs()
2470 err = request_irq(intr->msix_entries[vector].vector, in vmxnet3_request_irqs()
2472 adapter->rx_queue[i].name, in vmxnet3_request_irqs()
2473 &(adapter->rx_queue[i])); in vmxnet3_request_irqs()
2475 netdev_err(adapter->netdev, in vmxnet3_request_irqs()
2478 adapter->rx_queue[i].name, err); in vmxnet3_request_irqs()
2482 adapter->rx_queue[i].comp_ring.intr_idx = vector++; in vmxnet3_request_irqs()
2485 sprintf(intr->event_msi_vector_name, "%s-event-%d", in vmxnet3_request_irqs()
2486 adapter->netdev->name, vector); in vmxnet3_request_irqs()
2487 err = request_irq(intr->msix_entries[vector].vector, in vmxnet3_request_irqs()
2489 intr->event_msi_vector_name, adapter->netdev); in vmxnet3_request_irqs()
2490 intr->event_intr_idx = vector; in vmxnet3_request_irqs()
2492 } else if (intr->type == VMXNET3_IT_MSI) { in vmxnet3_request_irqs()
2493 adapter->num_rx_queues = 1; in vmxnet3_request_irqs()
2494 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0, in vmxnet3_request_irqs()
2495 adapter->netdev->name, adapter->netdev); in vmxnet3_request_irqs()
2498 adapter->num_rx_queues = 1; in vmxnet3_request_irqs()
2499 err = request_irq(adapter->pdev->irq, vmxnet3_intr, in vmxnet3_request_irqs()
2500 IRQF_SHARED, adapter->netdev->name, in vmxnet3_request_irqs()
2501 adapter->netdev); in vmxnet3_request_irqs()
2505 intr->num_intrs = vector + 1; in vmxnet3_request_irqs()
2507 netdev_err(adapter->netdev, in vmxnet3_request_irqs()
2509 intr->type, err); in vmxnet3_request_irqs()
2512 for (i = 0; i < adapter->num_rx_queues; i++) { in vmxnet3_request_irqs()
2513 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; in vmxnet3_request_irqs()
2514 rq->qid = i; in vmxnet3_request_irqs()
2515 rq->qid2 = i + adapter->num_rx_queues; in vmxnet3_request_irqs()
2516 rq->dataRingQid = i + 2 * adapter->num_rx_queues; in vmxnet3_request_irqs()
2520 for (i = 0; i < intr->num_intrs; i++) in vmxnet3_request_irqs()
2521 intr->mod_levels[i] = UPT1_IML_ADAPTIVE; in vmxnet3_request_irqs()
2522 if (adapter->intr.type != VMXNET3_IT_MSIX) { in vmxnet3_request_irqs()
2523 adapter->intr.event_intr_idx = 0; in vmxnet3_request_irqs()
2524 for (i = 0; i < adapter->num_tx_queues; i++) in vmxnet3_request_irqs()
2525 adapter->tx_queue[i].comp_ring.intr_idx = 0; in vmxnet3_request_irqs()
2526 adapter->rx_queue[0].comp_ring.intr_idx = 0; in vmxnet3_request_irqs()
2529 netdev_info(adapter->netdev, in vmxnet3_request_irqs()
2531 intr->type, intr->mask_mode, intr->num_intrs); in vmxnet3_request_irqs()
2541 struct vmxnet3_intr *intr = &adapter->intr; in vmxnet3_free_irqs()
2542 BUG_ON(intr->type == VMXNET3_IT_AUTO || intr->num_intrs <= 0); in vmxnet3_free_irqs()
2544 switch (intr->type) { in vmxnet3_free_irqs()
2550 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) { in vmxnet3_free_irqs()
2551 for (i = 0; i < adapter->num_tx_queues; i++) { in vmxnet3_free_irqs()
2552 free_irq(intr->msix_entries[vector++].vector, in vmxnet3_free_irqs()
2553 &(adapter->tx_queue[i])); in vmxnet3_free_irqs()
2554 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) in vmxnet3_free_irqs()
2559 for (i = 0; i < adapter->num_rx_queues; i++) { in vmxnet3_free_irqs()
2560 free_irq(intr->msix_entries[vector++].vector, in vmxnet3_free_irqs()
2561 &(adapter->rx_queue[i])); in vmxnet3_free_irqs()
2564 free_irq(intr->msix_entries[vector].vector, in vmxnet3_free_irqs()
2565 adapter->netdev); in vmxnet3_free_irqs()
2566 BUG_ON(vector >= intr->num_intrs); in vmxnet3_free_irqs()
2571 free_irq(adapter->pdev->irq, adapter->netdev); in vmxnet3_free_irqs()
2574 free_irq(adapter->pdev->irq, adapter->netdev); in vmxnet3_free_irqs()
2585 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; in vmxnet3_restore_vlan()
2591 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) in vmxnet3_restore_vlan()
2601 if (!(netdev->flags & IFF_PROMISC)) { in vmxnet3_vlan_rx_add_vid()
2602 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; in vmxnet3_vlan_rx_add_vid()
2606 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_vlan_rx_add_vid()
2609 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_vlan_rx_add_vid()
2612 set_bit(vid, adapter->active_vlans); in vmxnet3_vlan_rx_add_vid()
2623 if (!(netdev->flags & IFF_PROMISC)) { in vmxnet3_vlan_rx_kill_vid()
2624 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; in vmxnet3_vlan_rx_kill_vid()
2628 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_vlan_rx_kill_vid()
2631 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_vlan_rx_kill_vid()
2634 clear_bit(vid, adapter->active_vlans); in vmxnet3_vlan_rx_kill_vid()
2655 memcpy(buf + i++ * ETH_ALEN, ha->addr, in vmxnet3_copy_mc()
2669 &adapter->shared->devRead.rxFilterConf; in vmxnet3_set_mc()
2675 if (netdev->flags & IFF_PROMISC) { in vmxnet3_set_mc()
2676 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; in vmxnet3_set_mc()
2684 if (netdev->flags & IFF_BROADCAST) in vmxnet3_set_mc()
2687 if (netdev->flags & IFF_ALLMULTI) in vmxnet3_set_mc()
2695 rxConf->mfTableLen = cpu_to_le16(sz); in vmxnet3_set_mc()
2697 &adapter->pdev->dev, in vmxnet3_set_mc()
2701 if (!dma_mapping_error(&adapter->pdev->dev, in vmxnet3_set_mc()
2705 rxConf->mfTablePA = cpu_to_le64( in vmxnet3_set_mc()
2717 rxConf->mfTableLen = 0; in vmxnet3_set_mc()
2718 rxConf->mfTablePA = 0; in vmxnet3_set_mc()
2721 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_set_mc()
2722 if (new_mode != rxConf->rxMode) { in vmxnet3_set_mc()
2723 rxConf->rxMode = cpu_to_le32(new_mode); in vmxnet3_set_mc()
2732 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_set_mc()
2735 dma_unmap_single(&adapter->pdev->dev, new_table_pa, in vmxnet3_set_mc()
2736 rxConf->mfTableLen, DMA_TO_DEVICE); in vmxnet3_set_mc()
2745 for (i = 0; i < adapter->num_rx_queues; i++) in vmxnet3_rq_destroy_all()
2746 vmxnet3_rq_destroy(&adapter->rx_queue[i], adapter); in vmxnet3_rq_destroy_all()
2757 struct Vmxnet3_DriverShared *shared = adapter->shared; in vmxnet3_setup_driver_shared()
2758 struct Vmxnet3_DSDevRead *devRead = &shared->devRead; in vmxnet3_setup_driver_shared()
2759 struct Vmxnet3_DSDevReadExt *devReadExt = &shared->devReadExt; in vmxnet3_setup_driver_shared()
2767 shared->magic = cpu_to_le32(VMXNET3_REV1_MAGIC); in vmxnet3_setup_driver_shared()
2768 devRead->misc.driverInfo.version = cpu_to_le32( in vmxnet3_setup_driver_shared()
2770 devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ? in vmxnet3_setup_driver_shared()
2772 devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX; in vmxnet3_setup_driver_shared()
2773 *((u32 *)&devRead->misc.driverInfo.gos) = cpu_to_le32( in vmxnet3_setup_driver_shared()
2774 *((u32 *)&devRead->misc.driverInfo.gos)); in vmxnet3_setup_driver_shared()
2775 devRead->misc.driverInfo.vmxnet3RevSpt = cpu_to_le32(1); in vmxnet3_setup_driver_shared()
2776 devRead->misc.driverInfo.uptVerSpt = cpu_to_le32(1); in vmxnet3_setup_driver_shared()
2778 devRead->misc.ddPA = cpu_to_le64(adapter->adapter_pa); in vmxnet3_setup_driver_shared()
2779 devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter)); in vmxnet3_setup_driver_shared()
2782 if (adapter->netdev->features & NETIF_F_RXCSUM) in vmxnet3_setup_driver_shared()
2783 devRead->misc.uptFeatures |= UPT1_F_RXCSUM; in vmxnet3_setup_driver_shared()
2785 if (adapter->netdev->features & NETIF_F_LRO) { in vmxnet3_setup_driver_shared()
2786 devRead->misc.uptFeatures |= UPT1_F_LRO; in vmxnet3_setup_driver_shared()
2787 devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS); in vmxnet3_setup_driver_shared()
2789 if (adapter->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) in vmxnet3_setup_driver_shared()
2790 devRead->misc.uptFeatures |= UPT1_F_RXVLAN; in vmxnet3_setup_driver_shared()
2792 if (adapter->netdev->features & (NETIF_F_GSO_UDP_TUNNEL | in vmxnet3_setup_driver_shared()
2794 devRead->misc.uptFeatures |= UPT1_F_RXINNEROFLD; in vmxnet3_setup_driver_shared()
2796 devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu); in vmxnet3_setup_driver_shared()
2797 devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa); in vmxnet3_setup_driver_shared()
2798 devRead->misc.queueDescLen = cpu_to_le32( in vmxnet3_setup_driver_shared()
2799 adapter->num_tx_queues * sizeof(struct Vmxnet3_TxQueueDesc) + in vmxnet3_setup_driver_shared()
2800 adapter->num_rx_queues * sizeof(struct Vmxnet3_RxQueueDesc)); in vmxnet3_setup_driver_shared()
2803 devRead->misc.numTxQueues = adapter->num_tx_queues; in vmxnet3_setup_driver_shared()
2804 for (i = 0; i < adapter->num_tx_queues; i++) { in vmxnet3_setup_driver_shared()
2805 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i]; in vmxnet3_setup_driver_shared()
2806 BUG_ON(adapter->tx_queue[i].tx_ring.base == NULL); in vmxnet3_setup_driver_shared()
2807 tqc = &adapter->tqd_start[i].conf; in vmxnet3_setup_driver_shared()
2808 tqc->txRingBasePA = cpu_to_le64(tq->tx_ring.basePA); in vmxnet3_setup_driver_shared()
2809 tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA); in vmxnet3_setup_driver_shared()
2810 tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA); in vmxnet3_setup_driver_shared()
2811 tqc->ddPA = cpu_to_le64(~0ULL); in vmxnet3_setup_driver_shared()
2812 tqc->txRingSize = cpu_to_le32(tq->tx_ring.size); in vmxnet3_setup_driver_shared()
2813 tqc->dataRingSize = cpu_to_le32(tq->data_ring.size); in vmxnet3_setup_driver_shared()
2814 tqc->txDataRingDescSize = cpu_to_le32(tq->txdata_desc_size); in vmxnet3_setup_driver_shared()
2815 tqc->compRingSize = cpu_to_le32(tq->comp_ring.size); in vmxnet3_setup_driver_shared()
2816 tqc->ddLen = cpu_to_le32(0); in vmxnet3_setup_driver_shared()
2817 tqc->intrIdx = tq->comp_ring.intr_idx; in vmxnet3_setup_driver_shared()
2821 devRead->misc.numRxQueues = adapter->num_rx_queues; in vmxnet3_setup_driver_shared()
2822 for (i = 0; i < adapter->num_rx_queues; i++) { in vmxnet3_setup_driver_shared()
2823 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; in vmxnet3_setup_driver_shared()
2824 rqc = &adapter->rqd_start[i].conf; in vmxnet3_setup_driver_shared()
2825 rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA); in vmxnet3_setup_driver_shared()
2826 rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA); in vmxnet3_setup_driver_shared()
2827 rqc->compRingBasePA = cpu_to_le64(rq->comp_ring.basePA); in vmxnet3_setup_driver_shared()
2828 rqc->ddPA = cpu_to_le64(~0ULL); in vmxnet3_setup_driver_shared()
2829 rqc->rxRingSize[0] = cpu_to_le32(rq->rx_ring[0].size); in vmxnet3_setup_driver_shared()
2830 rqc->rxRingSize[1] = cpu_to_le32(rq->rx_ring[1].size); in vmxnet3_setup_driver_shared()
2831 rqc->compRingSize = cpu_to_le32(rq->comp_ring.size); in vmxnet3_setup_driver_shared()
2832 rqc->ddLen = cpu_to_le32(0); in vmxnet3_setup_driver_shared()
2833 rqc->intrIdx = rq->comp_ring.intr_idx; in vmxnet3_setup_driver_shared()
2835 rqc->rxDataRingBasePA = in vmxnet3_setup_driver_shared()
2836 cpu_to_le64(rq->data_ring.basePA); in vmxnet3_setup_driver_shared()
2837 rqc->rxDataRingDescSize = in vmxnet3_setup_driver_shared()
2838 cpu_to_le16(rq->data_ring.desc_size); in vmxnet3_setup_driver_shared()
2843 memset(adapter->rss_conf, 0, sizeof(*adapter->rss_conf)); in vmxnet3_setup_driver_shared()
2845 if (adapter->rss) { in vmxnet3_setup_driver_shared()
2846 struct UPT1_RSSConf *rssConf = adapter->rss_conf; in vmxnet3_setup_driver_shared()
2848 devRead->misc.uptFeatures |= UPT1_F_RSS; in vmxnet3_setup_driver_shared()
2849 devRead->misc.numRxQueues = adapter->num_rx_queues; in vmxnet3_setup_driver_shared()
2850 rssConf->hashType = UPT1_RSS_HASH_TYPE_TCP_IPV4 | in vmxnet3_setup_driver_shared()
2854 rssConf->hashFunc = UPT1_RSS_HASH_FUNC_TOEPLITZ; in vmxnet3_setup_driver_shared()
2855 rssConf->hashKeySize = UPT1_RSS_MAX_KEY_SIZE; in vmxnet3_setup_driver_shared()
2856 rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE; in vmxnet3_setup_driver_shared()
2857 netdev_rss_key_fill(rssConf->hashKey, sizeof(rssConf->hashKey)); in vmxnet3_setup_driver_shared()
2859 for (i = 0; i < rssConf->indTableSize; i++) in vmxnet3_setup_driver_shared()
2860 rssConf->indTable[i] = ethtool_rxfh_indir_default( in vmxnet3_setup_driver_shared()
2861 i, adapter->num_rx_queues); in vmxnet3_setup_driver_shared()
2863 devRead->rssConfDesc.confVer = 1; in vmxnet3_setup_driver_shared()
2864 devRead->rssConfDesc.confLen = cpu_to_le32(sizeof(*rssConf)); in vmxnet3_setup_driver_shared()
2865 devRead->rssConfDesc.confPA = in vmxnet3_setup_driver_shared()
2866 cpu_to_le64(adapter->rss_conf_pa); in vmxnet3_setup_driver_shared()
2873 !adapter->queuesExtEnabled) { in vmxnet3_setup_driver_shared()
2874 devRead->intrConf.autoMask = adapter->intr.mask_mode == in vmxnet3_setup_driver_shared()
2876 devRead->intrConf.numIntrs = adapter->intr.num_intrs; in vmxnet3_setup_driver_shared()
2877 for (i = 0; i < adapter->intr.num_intrs; i++) in vmxnet3_setup_driver_shared()
2878 devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i]; in vmxnet3_setup_driver_shared()
2880 devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx; in vmxnet3_setup_driver_shared()
2881 devRead->intrConf.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL); in vmxnet3_setup_driver_shared()
2883 devReadExt->intrConfExt.autoMask = adapter->intr.mask_mode == in vmxnet3_setup_driver_shared()
2885 devReadExt->intrConfExt.numIntrs = adapter->intr.num_intrs; in vmxnet3_setup_driver_shared()
2886 for (i = 0; i < adapter->intr.num_intrs; i++) in vmxnet3_setup_driver_shared()
2887 devReadExt->intrConfExt.modLevels[i] = adapter->intr.mod_levels[i]; in vmxnet3_setup_driver_shared()
2889 devReadExt->intrConfExt.eventIntrIdx = adapter->intr.event_intr_idx; in vmxnet3_setup_driver_shared()
2890 devReadExt->intrConfExt.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL); in vmxnet3_setup_driver_shared()
2894 devRead->rxFilterConf.rxMode = 0; in vmxnet3_setup_driver_shared()
2896 vmxnet3_write_mac_addr(adapter, adapter->netdev->dev_addr); in vmxnet3_setup_driver_shared()
2904 struct Vmxnet3_DriverShared *shared = adapter->shared; in vmxnet3_init_bufsize()
2905 union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo; in vmxnet3_init_bufsize()
2911 cmdInfo->ringBufSize = adapter->ringBufSize; in vmxnet3_init_bufsize()
2912 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_init_bufsize()
2915 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_init_bufsize()
2921 struct Vmxnet3_DriverShared *shared = adapter->shared; in vmxnet3_init_coalesce()
2922 union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo; in vmxnet3_init_coalesce()
2928 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_init_coalesce()
2929 cmdInfo->varConf.confVer = 1; in vmxnet3_init_coalesce()
2930 cmdInfo->varConf.confLen = in vmxnet3_init_coalesce()
2931 cpu_to_le32(sizeof(*adapter->coal_conf)); in vmxnet3_init_coalesce()
2932 cmdInfo->varConf.confPA = cpu_to_le64(adapter->coal_conf_pa); in vmxnet3_init_coalesce()
2934 if (adapter->default_coal_mode) { in vmxnet3_init_coalesce()
2942 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_init_coalesce()
2948 struct Vmxnet3_DriverShared *shared = adapter->shared; in vmxnet3_init_rssfields()
2949 union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo; in vmxnet3_init_rssfields()
2955 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_init_rssfields()
2957 if (adapter->default_rss_fields) { in vmxnet3_init_rssfields()
2960 adapter->rss_fields = in vmxnet3_init_rssfields()
2964 if ((adapter->rss_fields & VMXNET3_RSS_FIELDS_UDPIP4 || in vmxnet3_init_rssfields()
2965 adapter->rss_fields & VMXNET3_RSS_FIELDS_UDPIP6) && in vmxnet3_init_rssfields()
2966 vmxnet3_check_ptcapability(adapter->ptcap_supported[0], in vmxnet3_init_rssfields()
2968 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_UDP_RSS; in vmxnet3_init_rssfields()
2970 adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_UDP_RSS); in vmxnet3_init_rssfields()
2973 if ((adapter->rss_fields & VMXNET3_RSS_FIELDS_ESPIP4) && in vmxnet3_init_rssfields()
2974 vmxnet3_check_ptcapability(adapter->ptcap_supported[0], in vmxnet3_init_rssfields()
2976 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_ESP_RSS_IPV4; in vmxnet3_init_rssfields()
2978 adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_ESP_RSS_IPV4); in vmxnet3_init_rssfields()
2981 if ((adapter->rss_fields & VMXNET3_RSS_FIELDS_ESPIP6) && in vmxnet3_init_rssfields()
2982 vmxnet3_check_ptcapability(adapter->ptcap_supported[0], in vmxnet3_init_rssfields()
2984 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_ESP_RSS_IPV6; in vmxnet3_init_rssfields()
2986 adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_ESP_RSS_IPV6); in vmxnet3_init_rssfields()
2989 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]); in vmxnet3_init_rssfields()
2991 adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); in vmxnet3_init_rssfields()
2993 cmdInfo->setRssFields = adapter->rss_fields; in vmxnet3_init_rssfields()
3001 adapter->rss_fields = in vmxnet3_init_rssfields()
3005 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_init_rssfields()
3015 netdev_dbg(adapter->netdev, "%s: skb_buf_size %d, rx_buf_per_pkt %d," in vmxnet3_activate_dev()
3016 " ring sizes %u %u %u\n", adapter->netdev->name, in vmxnet3_activate_dev()
3017 adapter->skb_buf_size, adapter->rx_buf_per_pkt, in vmxnet3_activate_dev()
3018 adapter->tx_queue[0].tx_ring.size, in vmxnet3_activate_dev()
3019 adapter->rx_queue[0].rx_ring[0].size, in vmxnet3_activate_dev()
3020 adapter->rx_queue[0].rx_ring[1].size); in vmxnet3_activate_dev()
3025 netdev_err(adapter->netdev, in vmxnet3_activate_dev()
3032 netdev_err(adapter->netdev, in vmxnet3_activate_dev()
3040 adapter->shared_pa)); in vmxnet3_activate_dev()
3042 adapter->shared_pa)); in vmxnet3_activate_dev()
3043 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_activate_dev()
3047 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_activate_dev()
3050 netdev_err(adapter->netdev, in vmxnet3_activate_dev()
3052 err = -EINVAL; in vmxnet3_activate_dev()
3060 for (i = 0; i < adapter->num_rx_queues; i++) { in vmxnet3_activate_dev()
3062 adapter->rx_prod_offset + i * VMXNET3_REG_ALIGN, in vmxnet3_activate_dev()
3063 adapter->rx_queue[i].rx_ring[0].next2fill); in vmxnet3_activate_dev()
3064 VMXNET3_WRITE_BAR0_REG(adapter, (adapter->rx_prod2_offset + in vmxnet3_activate_dev()
3066 adapter->rx_queue[i].rx_ring[1].next2fill); in vmxnet3_activate_dev()
3070 vmxnet3_set_mc(adapter->netdev); in vmxnet3_activate_dev()
3077 netif_tx_wake_all_queues(adapter->netdev); in vmxnet3_activate_dev()
3078 for (i = 0; i < adapter->num_rx_queues; i++) in vmxnet3_activate_dev()
3079 napi_enable(&adapter->rx_queue[i].napi); in vmxnet3_activate_dev()
3081 clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state); in vmxnet3_activate_dev()
3100 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_reset_dev()
3102 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_reset_dev()
3111 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state)) in vmxnet3_quiesce_dev()
3115 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_quiesce_dev()
3118 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_quiesce_dev()
3121 for (i = 0; i < adapter->num_rx_queues; i++) in vmxnet3_quiesce_dev()
3122 napi_disable(&adapter->rx_queue[i].napi); in vmxnet3_quiesce_dev()
3123 netif_tx_disable(adapter->netdev); in vmxnet3_quiesce_dev()
3124 adapter->link_speed = 0; in vmxnet3_quiesce_dev()
3125 netif_carrier_off(adapter->netdev); in vmxnet3_quiesce_dev()
3153 dev_addr_set(netdev, addr->sa_data); in vmxnet3_set_mac_addr()
3154 vmxnet3_write_mac_addr(adapter, addr->sa_data); in vmxnet3_set_mac_addr()
3167 struct pci_dev *pdev = adapter->pdev; in vmxnet3_alloc_pci_resources()
3171 dev_err(&pdev->dev, "Failed to enable adapter: error %d\n", err); in vmxnet3_alloc_pci_resources()
3175 err = pci_request_selected_regions(pdev, (1 << 2) - 1, in vmxnet3_alloc_pci_resources()
3178 dev_err(&pdev->dev, in vmxnet3_alloc_pci_resources()
3187 adapter->hw_addr0 = ioremap(mmio_start, mmio_len); in vmxnet3_alloc_pci_resources()
3188 if (!adapter->hw_addr0) { in vmxnet3_alloc_pci_resources()
3189 dev_err(&pdev->dev, "Failed to map bar0\n"); in vmxnet3_alloc_pci_resources()
3190 err = -EIO; in vmxnet3_alloc_pci_resources()
3196 adapter->hw_addr1 = ioremap(mmio_start, mmio_len); in vmxnet3_alloc_pci_resources()
3197 if (!adapter->hw_addr1) { in vmxnet3_alloc_pci_resources()
3198 dev_err(&pdev->dev, "Failed to map bar1\n"); in vmxnet3_alloc_pci_resources()
3199 err = -EIO; in vmxnet3_alloc_pci_resources()
3205 iounmap(adapter->hw_addr0); in vmxnet3_alloc_pci_resources()
3207 pci_release_selected_regions(pdev, (1 << 2) - 1); in vmxnet3_alloc_pci_resources()
3217 BUG_ON(!adapter->pdev); in vmxnet3_free_pci_resources()
3219 iounmap(adapter->hw_addr0); in vmxnet3_free_pci_resources()
3220 iounmap(adapter->hw_addr1); in vmxnet3_free_pci_resources()
3221 pci_release_selected_regions(adapter->pdev, (1 << 2) - 1); in vmxnet3_free_pci_resources()
3222 pci_disable_device(adapter->pdev); in vmxnet3_free_pci_resources()
3232 if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE - in vmxnet3_adjust_rx_ring_size()
3234 adapter->skb_buf_size = adapter->netdev->mtu + in vmxnet3_adjust_rx_ring_size()
3236 if (adapter->skb_buf_size < VMXNET3_MIN_T0_BUF_SIZE) in vmxnet3_adjust_rx_ring_size()
3237 adapter->skb_buf_size = VMXNET3_MIN_T0_BUF_SIZE; in vmxnet3_adjust_rx_ring_size()
3239 adapter->rx_buf_per_pkt = 1; in vmxnet3_adjust_rx_ring_size()
3241 adapter->skb_buf_size = VMXNET3_MAX_SKB_BUF_SIZE; in vmxnet3_adjust_rx_ring_size()
3242 sz = adapter->netdev->mtu - VMXNET3_MAX_SKB_BUF_SIZE + in vmxnet3_adjust_rx_ring_size()
3244 adapter->rx_buf_per_pkt = 1 + (sz + PAGE_SIZE - 1) / PAGE_SIZE; in vmxnet3_adjust_rx_ring_size()
3247 adapter->skb_buf_size = min((int)adapter->netdev->mtu + VMXNET3_MAX_ETH_HDR_SIZE, in vmxnet3_adjust_rx_ring_size()
3249 adapter->rx_buf_per_pkt = 1; in vmxnet3_adjust_rx_ring_size()
3250 adapter->ringBufSize.ring1BufSizeType0 = cpu_to_le16(adapter->skb_buf_size); in vmxnet3_adjust_rx_ring_size()
3251 adapter->ringBufSize.ring1BufSizeType1 = 0; in vmxnet3_adjust_rx_ring_size()
3252 adapter->ringBufSize.ring2BufSizeType1 = cpu_to_le16(PAGE_SIZE); in vmxnet3_adjust_rx_ring_size()
3259 sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN; in vmxnet3_adjust_rx_ring_size()
3260 ring0_size = adapter->rx_queue[0].rx_ring[0].size; in vmxnet3_adjust_rx_ring_size()
3261 ring0_size = (ring0_size + sz - 1) / sz * sz; in vmxnet3_adjust_rx_ring_size()
3264 ring1_size = adapter->rx_queue[0].rx_ring[1].size; in vmxnet3_adjust_rx_ring_size()
3265 ring1_size = (ring1_size + sz - 1) / sz * sz; in vmxnet3_adjust_rx_ring_size()
3275 for (i = 0; i < adapter->num_rx_queues; i++) { in vmxnet3_adjust_rx_ring_size()
3276 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; in vmxnet3_adjust_rx_ring_size()
3278 rq->rx_ring[0].size = ring0_size; in vmxnet3_adjust_rx_ring_size()
3279 rq->rx_ring[1].size = ring1_size; in vmxnet3_adjust_rx_ring_size()
3280 rq->comp_ring.size = comp_size; in vmxnet3_adjust_rx_ring_size()
3292 for (i = 0; i < adapter->num_tx_queues; i++) { in vmxnet3_create_queues()
3293 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i]; in vmxnet3_create_queues()
3294 tq->tx_ring.size = tx_ring_size; in vmxnet3_create_queues()
3295 tq->data_ring.size = tx_ring_size; in vmxnet3_create_queues()
3296 tq->comp_ring.size = tx_ring_size; in vmxnet3_create_queues()
3297 tq->txdata_desc_size = txdata_desc_size; in vmxnet3_create_queues()
3298 tq->shared = &adapter->tqd_start[i].ctrl; in vmxnet3_create_queues()
3299 tq->stopped = true; in vmxnet3_create_queues()
3300 tq->adapter = adapter; in vmxnet3_create_queues()
3301 tq->qid = i; in vmxnet3_create_queues()
3311 adapter->rx_queue[0].rx_ring[0].size = rx_ring_size; in vmxnet3_create_queues()
3312 adapter->rx_queue[0].rx_ring[1].size = rx_ring2_size; in vmxnet3_create_queues()
3315 adapter->rxdataring_enabled = VMXNET3_VERSION_GE_3(adapter); in vmxnet3_create_queues()
3316 for (i = 0; i < adapter->num_rx_queues; i++) { in vmxnet3_create_queues()
3317 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; in vmxnet3_create_queues()
3320 rq->shared = &adapter->rqd_start[i].ctrl; in vmxnet3_create_queues()
3321 rq->adapter = adapter; in vmxnet3_create_queues()
3322 rq->data_ring.desc_size = rxdata_desc_size; in vmxnet3_create_queues()
3326 netdev_err(adapter->netdev, in vmxnet3_create_queues()
3331 netdev_info(adapter->netdev, in vmxnet3_create_queues()
3334 adapter->num_rx_queues = i; in vmxnet3_create_queues()
3341 if (!adapter->rxdataring_enabled) in vmxnet3_create_queues()
3358 for (i = 0; i < adapter->num_tx_queues; i++) in vmxnet3_open()
3359 spin_lock_init(&adapter->tx_queue[i].tx_lock); in vmxnet3_open()
3365 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_open()
3370 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_open()
3375 adapter->txdata_desc_size = in vmxnet3_open()
3378 adapter->txdata_desc_size = txdata_desc_size; in vmxnet3_open()
3381 adapter->txdata_desc_size = sizeof(struct Vmxnet3_TxDataDesc); in vmxnet3_open()
3385 adapter->tx_ring_size, in vmxnet3_open()
3386 adapter->rx_ring_size, in vmxnet3_open()
3387 adapter->rx_ring2_size, in vmxnet3_open()
3388 adapter->txdata_desc_size, in vmxnet3_open()
3389 adapter->rxdata_desc_size); in vmxnet3_open()
3416 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)) in vmxnet3_close()
3424 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); in vmxnet3_close()
3440 BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)); in vmxnet3_force_close()
3443 for (i = 0; i < adapter->num_rx_queues; i++) in vmxnet3_force_close()
3444 napi_enable(&adapter->rx_queue[i].napi); in vmxnet3_force_close()
3449 clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state); in vmxnet3_force_close()
3450 dev_close(adapter->netdev); in vmxnet3_force_close()
3460 netdev->mtu = new_mtu; in vmxnet3_change_mtu()
3466 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)) in vmxnet3_change_mtu()
3473 /* we need to re-create the rx queue based on the new mtu */ in vmxnet3_change_mtu()
3479 "failed to re-create rx queues, " in vmxnet3_change_mtu()
3487 "failed to re-activate, error %d. " in vmxnet3_change_mtu()
3494 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); in vmxnet3_change_mtu()
3505 struct net_device *netdev = adapter->netdev; in vmxnet3_declare_features()
3507 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | in vmxnet3_declare_features()
3513 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL | in vmxnet3_declare_features()
3516 netdev->hw_enc_features = NETIF_F_SG | NETIF_F_RXCSUM | in vmxnet3_declare_features()
3526 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0], in vmxnet3_declare_features()
3528 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD; in vmxnet3_declare_features()
3530 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0], in vmxnet3_declare_features()
3532 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD; in vmxnet3_declare_features()
3534 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0], in vmxnet3_declare_features()
3536 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_TSO; in vmxnet3_declare_features()
3538 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0], in vmxnet3_declare_features()
3540 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_TSO; in vmxnet3_declare_features()
3542 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0], in vmxnet3_declare_features()
3544 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD; in vmxnet3_declare_features()
3546 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0], in vmxnet3_declare_features()
3548 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD; in vmxnet3_declare_features()
3551 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]); in vmxnet3_declare_features()
3552 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_declare_features()
3554 adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); in vmxnet3_declare_features()
3555 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_declare_features()
3557 if (!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD)) && in vmxnet3_declare_features()
3558 !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD)) && in vmxnet3_declare_features()
3559 !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_TSO)) && in vmxnet3_declare_features()
3560 !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_TSO))) { in vmxnet3_declare_features()
3561 netdev->hw_enc_features &= ~NETIF_F_GSO_UDP_TUNNEL; in vmxnet3_declare_features()
3562 netdev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL; in vmxnet3_declare_features()
3564 if (!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD)) && in vmxnet3_declare_features()
3565 !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD))) { in vmxnet3_declare_features()
3566 netdev->hw_enc_features &= ~NETIF_F_GSO_UDP_TUNNEL_CSUM; in vmxnet3_declare_features()
3567 netdev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL_CSUM; in vmxnet3_declare_features()
3571 netdev->vlan_features = netdev->hw_features & in vmxnet3_declare_features()
3574 netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER; in vmxnet3_declare_features()
3605 int ret = pci_enable_msix_range(adapter->pdev, in vmxnet3_acquire_msix_vectors()
3606 adapter->intr.msix_entries, nvec, nvec); in vmxnet3_acquire_msix_vectors()
3608 if (ret == -ENOSPC && nvec > VMXNET3_LINUX_MIN_MSIX_VECT) { in vmxnet3_acquire_msix_vectors()
3609 dev_err(&adapter->netdev->dev, in vmxnet3_acquire_msix_vectors()
3610 "Failed to enable %d MSI-X, trying %d\n", in vmxnet3_acquire_msix_vectors()
3613 ret = pci_enable_msix_range(adapter->pdev, in vmxnet3_acquire_msix_vectors()
3614 adapter->intr.msix_entries, in vmxnet3_acquire_msix_vectors()
3620 dev_err(&adapter->netdev->dev, in vmxnet3_acquire_msix_vectors()
3621 "Failed to enable MSI-X, error: %d\n", ret); in vmxnet3_acquire_msix_vectors()
3637 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_alloc_intr_resources()
3641 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_alloc_intr_resources()
3642 adapter->intr.type = cfg & 0x3; in vmxnet3_alloc_intr_resources()
3643 adapter->intr.mask_mode = (cfg >> 2) & 0x3; in vmxnet3_alloc_intr_resources()
3645 if (adapter->intr.type == VMXNET3_IT_AUTO) { in vmxnet3_alloc_intr_resources()
3646 adapter->intr.type = VMXNET3_IT_MSIX; in vmxnet3_alloc_intr_resources()
3650 if (adapter->intr.type == VMXNET3_IT_MSIX) { in vmxnet3_alloc_intr_resources()
3653 nvec = adapter->share_intr == VMXNET3_INTR_TXSHARE ? in vmxnet3_alloc_intr_resources()
3654 1 : adapter->num_tx_queues; in vmxnet3_alloc_intr_resources()
3655 nvec += adapter->share_intr == VMXNET3_INTR_BUDDYSHARE ? in vmxnet3_alloc_intr_resources()
3656 0 : adapter->num_rx_queues; in vmxnet3_alloc_intr_resources()
3662 adapter->intr.msix_entries[i].entry = i; in vmxnet3_alloc_intr_resources()
3673 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE in vmxnet3_alloc_intr_resources()
3674 || adapter->num_rx_queues != 1) { in vmxnet3_alloc_intr_resources()
3675 adapter->share_intr = VMXNET3_INTR_TXSHARE; in vmxnet3_alloc_intr_resources()
3676 netdev_err(adapter->netdev, in vmxnet3_alloc_intr_resources()
3678 adapter->num_rx_queues = 1; in vmxnet3_alloc_intr_resources()
3682 adapter->intr.num_intrs = nvec_allocated; in vmxnet3_alloc_intr_resources()
3687 dev_info(&adapter->pdev->dev, in vmxnet3_alloc_intr_resources()
3688 "Failed to enable MSI-X, error %d. " in vmxnet3_alloc_intr_resources()
3691 adapter->intr.type = VMXNET3_IT_MSI; in vmxnet3_alloc_intr_resources()
3694 if (adapter->intr.type == VMXNET3_IT_MSI) { in vmxnet3_alloc_intr_resources()
3695 if (!pci_enable_msi(adapter->pdev)) { in vmxnet3_alloc_intr_resources()
3696 adapter->num_rx_queues = 1; in vmxnet3_alloc_intr_resources()
3697 adapter->intr.num_intrs = 1; in vmxnet3_alloc_intr_resources()
3703 adapter->num_rx_queues = 1; in vmxnet3_alloc_intr_resources()
3704 dev_info(&adapter->netdev->dev, in vmxnet3_alloc_intr_resources()
3706 adapter->intr.type = VMXNET3_IT_INTX; in vmxnet3_alloc_intr_resources()
3708 /* INT-X related setting */ in vmxnet3_alloc_intr_resources()
3709 adapter->intr.num_intrs = 1; in vmxnet3_alloc_intr_resources()
3716 if (adapter->intr.type == VMXNET3_IT_MSIX) in vmxnet3_free_intr_resources()
3717 pci_disable_msix(adapter->pdev); in vmxnet3_free_intr_resources()
3718 else if (adapter->intr.type == VMXNET3_IT_MSI) in vmxnet3_free_intr_resources()
3719 pci_disable_msi(adapter->pdev); in vmxnet3_free_intr_resources()
3721 BUG_ON(adapter->intr.type != VMXNET3_IT_INTX); in vmxnet3_free_intr_resources()
3729 adapter->tx_timeout_count++; in vmxnet3_tx_timeout()
3731 netdev_err(adapter->netdev, "tx hang\n"); in vmxnet3_tx_timeout()
3732 schedule_work(&adapter->work); in vmxnet3_tx_timeout()
3744 if (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)) in vmxnet3_reset_work()
3749 if (netif_running(adapter->netdev)) { in vmxnet3_reset_work()
3750 netdev_notice(adapter->netdev, "resetting\n"); in vmxnet3_reset_work()
3755 netdev_info(adapter->netdev, "already closed\n"); in vmxnet3_reset_work()
3759 netif_wake_queue(adapter->netdev); in vmxnet3_reset_work()
3760 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); in vmxnet3_reset_work()
3819 return -ENOMEM; in vmxnet3_probe_device()
3823 adapter->netdev = netdev; in vmxnet3_probe_device()
3824 adapter->pdev = pdev; in vmxnet3_probe_device()
3826 adapter->tx_ring_size = VMXNET3_DEF_TX_RING_SIZE; in vmxnet3_probe_device()
3827 adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE; in vmxnet3_probe_device()
3828 adapter->rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE; in vmxnet3_probe_device()
3830 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); in vmxnet3_probe_device()
3832 dev_err(&pdev->dev, "dma_set_mask failed\n"); in vmxnet3_probe_device()
3836 spin_lock_init(&adapter->cmd_lock); in vmxnet3_probe_device()
3837 adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter, in vmxnet3_probe_device()
3840 if (dma_mapping_error(&adapter->pdev->dev, adapter->adapter_pa)) { in vmxnet3_probe_device()
3841 dev_err(&pdev->dev, "Failed to map dma\n"); in vmxnet3_probe_device()
3842 err = -EFAULT; in vmxnet3_probe_device()
3845 adapter->shared = dma_alloc_coherent( in vmxnet3_probe_device()
3846 &adapter->pdev->dev, in vmxnet3_probe_device()
3848 &adapter->shared_pa, GFP_KERNEL); in vmxnet3_probe_device()
3849 if (!adapter->shared) { in vmxnet3_probe_device()
3850 dev_err(&pdev->dev, "Failed to allocate memory\n"); in vmxnet3_probe_device()
3851 err = -ENOMEM; in vmxnet3_probe_device()
3864 adapter->version = VMXNET3_REV_7 + 1; in vmxnet3_probe_device()
3869 adapter->version = VMXNET3_REV_6 + 1; in vmxnet3_probe_device()
3874 adapter->version = VMXNET3_REV_5 + 1; in vmxnet3_probe_device()
3879 adapter->version = VMXNET3_REV_4 + 1; in vmxnet3_probe_device()
3884 adapter->version = VMXNET3_REV_3 + 1; in vmxnet3_probe_device()
3889 adapter->version = VMXNET3_REV_2 + 1; in vmxnet3_probe_device()
3894 adapter->version = VMXNET3_REV_1 + 1; in vmxnet3_probe_device()
3896 dev_err(&pdev->dev, in vmxnet3_probe_device()
3898 err = -EBUSY; in vmxnet3_probe_device()
3901 dev_dbg(&pdev->dev, "Using device version %d\n", adapter->version); in vmxnet3_probe_device()
3907 dev_err(&pdev->dev, in vmxnet3_probe_device()
3909 err = -EBUSY; in vmxnet3_probe_device()
3914 adapter->devcap_supported[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_DCR); in vmxnet3_probe_device()
3915 adapter->ptcap_supported[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_PTCR); in vmxnet3_probe_device()
3916 if (adapter->devcap_supported[0] & (1UL << VMXNET3_CAP_LARGE_BAR)) { in vmxnet3_probe_device()
3917 adapter->dev_caps[0] = adapter->devcap_supported[0] & in vmxnet3_probe_device()
3920 if (!(adapter->ptcap_supported[0] & (1UL << VMXNET3_DCR_ERROR)) && in vmxnet3_probe_device()
3921 adapter->ptcap_supported[0] & (1UL << VMXNET3_CAP_OOORX_COMP) && in vmxnet3_probe_device()
3922 adapter->devcap_supported[0] & (1UL << VMXNET3_CAP_OOORX_COMP)) { in vmxnet3_probe_device()
3923 adapter->dev_caps[0] |= adapter->devcap_supported[0] & in vmxnet3_probe_device()
3926 if (adapter->dev_caps[0]) in vmxnet3_probe_device()
3927 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]); in vmxnet3_probe_device()
3929 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_probe_device()
3931 adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); in vmxnet3_probe_device()
3932 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_probe_device()
3936 adapter->dev_caps[0] & (1UL << VMXNET3_CAP_LARGE_BAR)) { in vmxnet3_probe_device()
3937 adapter->tx_prod_offset = VMXNET3_REG_LB_TXPROD; in vmxnet3_probe_device()
3938 adapter->rx_prod_offset = VMXNET3_REG_LB_RXPROD; in vmxnet3_probe_device()
3939 adapter->rx_prod2_offset = VMXNET3_REG_LB_RXPROD2; in vmxnet3_probe_device()
3941 adapter->tx_prod_offset = VMXNET3_REG_TXPROD; in vmxnet3_probe_device()
3942 adapter->rx_prod_offset = VMXNET3_REG_RXPROD; in vmxnet3_probe_device()
3943 adapter->rx_prod2_offset = VMXNET3_REG_RXPROD2; in vmxnet3_probe_device()
3947 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_probe_device()
3951 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_probe_device()
3953 adapter->num_rx_queues = min(num_rx_queues, ((queues >> 8) & 0xff)); in vmxnet3_probe_device()
3954 adapter->num_tx_queues = min(num_tx_queues, (queues & 0xff)); in vmxnet3_probe_device()
3956 adapter->num_rx_queues = min(num_rx_queues, in vmxnet3_probe_device()
3958 adapter->num_tx_queues = min(num_tx_queues, in vmxnet3_probe_device()
3961 if (adapter->num_rx_queues > VMXNET3_MAX_RX_QUEUES || in vmxnet3_probe_device()
3962 adapter->num_tx_queues > VMXNET3_MAX_TX_QUEUES) { in vmxnet3_probe_device()
3963 adapter->queuesExtEnabled = true; in vmxnet3_probe_device()
3965 adapter->queuesExtEnabled = false; in vmxnet3_probe_device()
3968 adapter->queuesExtEnabled = false; in vmxnet3_probe_device()
3971 adapter->num_rx_queues = min(num_rx_queues, in vmxnet3_probe_device()
3973 adapter->num_tx_queues = min(num_tx_queues, in vmxnet3_probe_device()
3976 dev_info(&pdev->dev, in vmxnet3_probe_device()
3978 adapter->num_tx_queues, adapter->num_rx_queues); in vmxnet3_probe_device()
3980 adapter->rx_buf_per_pkt = 1; in vmxnet3_probe_device()
3982 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues; in vmxnet3_probe_device()
3983 size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues; in vmxnet3_probe_device()
3984 adapter->tqd_start = dma_alloc_coherent(&adapter->pdev->dev, size, in vmxnet3_probe_device()
3985 &adapter->queue_desc_pa, in vmxnet3_probe_device()
3988 if (!adapter->tqd_start) { in vmxnet3_probe_device()
3989 dev_err(&pdev->dev, "Failed to allocate memory\n"); in vmxnet3_probe_device()
3990 err = -ENOMEM; in vmxnet3_probe_device()
3993 adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start + in vmxnet3_probe_device()
3994 adapter->num_tx_queues); in vmxnet3_probe_device()
3996 adapter->pm_conf = dma_alloc_coherent(&adapter->pdev->dev, in vmxnet3_probe_device()
3998 &adapter->pm_conf_pa, in vmxnet3_probe_device()
4000 if (adapter->pm_conf == NULL) { in vmxnet3_probe_device()
4001 err = -ENOMEM; in vmxnet3_probe_device()
4007 adapter->rss_conf = dma_alloc_coherent(&adapter->pdev->dev, in vmxnet3_probe_device()
4009 &adapter->rss_conf_pa, in vmxnet3_probe_device()
4011 if (adapter->rss_conf == NULL) { in vmxnet3_probe_device()
4012 err = -ENOMEM; in vmxnet3_probe_device()
4018 adapter->coal_conf = in vmxnet3_probe_device()
4019 dma_alloc_coherent(&adapter->pdev->dev, in vmxnet3_probe_device()
4022 &adapter->coal_conf_pa, in vmxnet3_probe_device()
4024 if (!adapter->coal_conf) { in vmxnet3_probe_device()
4025 err = -ENOMEM; in vmxnet3_probe_device()
4028 adapter->coal_conf->coalMode = VMXNET3_COALESCE_DISABLED; in vmxnet3_probe_device()
4029 adapter->default_coal_mode = true; in vmxnet3_probe_device()
4033 adapter->default_rss_fields = true; in vmxnet3_probe_device()
4034 adapter->rss_fields = VMXNET3_RSS_FIELDS_DEFAULT; in vmxnet3_probe_device()
4037 SET_NETDEV_DEV(netdev, &pdev->dev); in vmxnet3_probe_device()
4039 netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | in vmxnet3_probe_device()
4042 adapter->rxdata_desc_size = VMXNET3_VERSION_GE_3(adapter) ? in vmxnet3_probe_device()
4045 if (adapter->num_tx_queues == adapter->num_rx_queues) in vmxnet3_probe_device()
4046 adapter->share_intr = VMXNET3_INTR_BUDDYSHARE; in vmxnet3_probe_device()
4048 adapter->share_intr = VMXNET3_INTR_DONTSHARE; in vmxnet3_probe_device()
4053 if (adapter->num_rx_queues > 1 && in vmxnet3_probe_device()
4054 adapter->intr.type == VMXNET3_IT_MSIX) { in vmxnet3_probe_device()
4055 adapter->rss = true; in vmxnet3_probe_device()
4056 netdev->hw_features |= NETIF_F_RXHASH; in vmxnet3_probe_device()
4057 netdev->features |= NETIF_F_RXHASH; in vmxnet3_probe_device()
4058 dev_dbg(&pdev->dev, "RSS is enabled.\n"); in vmxnet3_probe_device()
4060 adapter->rss = false; in vmxnet3_probe_device()
4067 netdev->netdev_ops = &vmxnet3_netdev_ops; in vmxnet3_probe_device()
4069 netdev->watchdog_timeo = 5 * HZ; in vmxnet3_probe_device()
4071 /* MTU range: 60 - 9190 */ in vmxnet3_probe_device()
4072 netdev->min_mtu = VMXNET3_MIN_MTU; in vmxnet3_probe_device()
4074 netdev->max_mtu = VMXNET3_V6_MAX_MTU; in vmxnet3_probe_device()
4076 netdev->max_mtu = VMXNET3_MAX_MTU; in vmxnet3_probe_device()
4078 INIT_WORK(&adapter->work, vmxnet3_reset_work); in vmxnet3_probe_device()
4079 set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state); in vmxnet3_probe_device()
4081 if (adapter->intr.type == VMXNET3_IT_MSIX) { in vmxnet3_probe_device()
4083 for (i = 0; i < adapter->num_rx_queues; i++) { in vmxnet3_probe_device()
4084 netif_napi_add(adapter->netdev, in vmxnet3_probe_device()
4085 &adapter->rx_queue[i].napi, in vmxnet3_probe_device()
4089 netif_napi_add(adapter->netdev, &adapter->rx_queue[0].napi, in vmxnet3_probe_device()
4093 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues); in vmxnet3_probe_device()
4094 netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues); in vmxnet3_probe_device()
4100 dev_err(&pdev->dev, "Failed to register adapter\n"); in vmxnet3_probe_device()
4109 dma_free_coherent(&adapter->pdev->dev, in vmxnet3_probe_device()
4111 adapter->coal_conf, adapter->coal_conf_pa); in vmxnet3_probe_device()
4116 dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf), in vmxnet3_probe_device()
4117 adapter->rss_conf, adapter->rss_conf_pa); in vmxnet3_probe_device()
4120 dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf), in vmxnet3_probe_device()
4121 adapter->pm_conf, adapter->pm_conf_pa); in vmxnet3_probe_device()
4123 dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start, in vmxnet3_probe_device()
4124 adapter->queue_desc_pa); in vmxnet3_probe_device()
4128 dma_free_coherent(&adapter->pdev->dev, in vmxnet3_probe_device()
4130 adapter->shared, adapter->shared_pa); in vmxnet3_probe_device()
4132 dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa, in vmxnet3_probe_device()
4160 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_remove_device()
4164 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_remove_device()
4175 cancel_work_sync(&adapter->work); in vmxnet3_remove_device()
4182 dma_free_coherent(&adapter->pdev->dev, in vmxnet3_remove_device()
4184 adapter->coal_conf, adapter->coal_conf_pa); in vmxnet3_remove_device()
4187 dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf), in vmxnet3_remove_device()
4188 adapter->rss_conf, adapter->rss_conf_pa); in vmxnet3_remove_device()
4190 dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf), in vmxnet3_remove_device()
4191 adapter->pm_conf, adapter->pm_conf_pa); in vmxnet3_remove_device()
4193 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues; in vmxnet3_remove_device()
4195 dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start, in vmxnet3_remove_device()
4196 adapter->queue_desc_pa); in vmxnet3_remove_device()
4197 dma_free_coherent(&adapter->pdev->dev, in vmxnet3_remove_device()
4199 adapter->shared, adapter->shared_pa); in vmxnet3_remove_device()
4200 dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa, in vmxnet3_remove_device()
4214 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)) in vmxnet3_shutdown_device()
4218 &adapter->state)) { in vmxnet3_shutdown_device()
4219 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); in vmxnet3_shutdown_device()
4222 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_shutdown_device()
4225 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_shutdown_device()
4228 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); in vmxnet3_shutdown_device()
4252 for (i = 0; i < adapter->num_rx_queues; i++) in vmxnet3_suspend()
4253 napi_disable(&adapter->rx_queue[i].napi); in vmxnet3_suspend()
4261 /* Create wake-up filters. */ in vmxnet3_suspend()
4262 pmConf = adapter->pm_conf; in vmxnet3_suspend()
4265 if (adapter->wol & WAKE_UCAST) { in vmxnet3_suspend()
4266 pmConf->filters[i].patternSize = ETH_ALEN; in vmxnet3_suspend()
4267 pmConf->filters[i].maskSize = 1; in vmxnet3_suspend()
4268 memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN); in vmxnet3_suspend()
4269 pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */ in vmxnet3_suspend()
4271 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER; in vmxnet3_suspend()
4275 if (adapter->wol & WAKE_ARP) { in vmxnet3_suspend()
4284 ifa = rcu_dereference(in_dev->ifa_list); in vmxnet3_suspend()
4290 pmConf->filters[i].patternSize = ETH_HLEN + /* Ethernet header*/ in vmxnet3_suspend()
4294 pmConf->filters[i].maskSize = in vmxnet3_suspend()
4295 (pmConf->filters[i].patternSize - 1) / 8 + 1; in vmxnet3_suspend()
4298 ehdr = (struct ethhdr *)pmConf->filters[i].pattern; in vmxnet3_suspend()
4299 ehdr->h_proto = htons(ETH_P_ARP); in vmxnet3_suspend()
4302 ahdr = (struct arphdr *)&pmConf->filters[i].pattern[ETH_HLEN]; in vmxnet3_suspend()
4303 ahdr->ar_op = htons(ARPOP_REQUEST); in vmxnet3_suspend()
4308 *(__be32 *)arpreq = ifa->ifa_address; in vmxnet3_suspend()
4313 pmConf->filters[i].mask[0] = 0x00; in vmxnet3_suspend()
4314 pmConf->filters[i].mask[1] = 0x30; /* ETH_P_ARP */ in vmxnet3_suspend()
4315 pmConf->filters[i].mask[2] = 0x30; /* ARPOP_REQUEST */ in vmxnet3_suspend()
4316 pmConf->filters[i].mask[3] = 0x00; in vmxnet3_suspend()
4317 pmConf->filters[i].mask[4] = 0xC0; /* IPv4 TIP */ in vmxnet3_suspend()
4318 pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */ in vmxnet3_suspend()
4320 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER; in vmxnet3_suspend()
4325 if (adapter->wol & WAKE_MAGIC) in vmxnet3_suspend()
4326 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC; in vmxnet3_suspend()
4328 pmConf->numFilters = i; in vmxnet3_suspend()
4330 adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1); in vmxnet3_suspend()
4331 adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof( in vmxnet3_suspend()
4333 adapter->shared->devRead.pmConfDesc.confPA = in vmxnet3_suspend()
4334 cpu_to_le64(adapter->pm_conf_pa); in vmxnet3_suspend()
4336 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_suspend()
4339 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_suspend()
4343 adapter->wol); in vmxnet3_suspend()
4380 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_resume()
4383 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_resume()
4391 "failed to re-activate on resume, error: %d", err); in vmxnet3_resume()
4423 pr_info("%s - version %s\n", VMXNET3_DRIVER_DESC, in vmxnet3_init_module()