Lines Matching +full:tx +full:- +full:hw +full:- +full:gso +full:- +full:packets
1 // SPDX-License-Identifier: GPL-2.0-or-later
33 static bool csum = true, gso = true, napi_tx = true; variable
35 module_param(gso, bool, 0444);
50 * at once, the weight is chosen so that the EWMA will be insensitive to short-
82 u64 packets; member
91 u64_stats_t packets; member
103 u64_stats_t packets; member
113 #define VIRTNET_SQ_STAT(name, m) {name, offsetof(struct virtnet_sq_stats, m), -1}
114 #define VIRTNET_RQ_STAT(name, m) {name, offsetof(struct virtnet_rq_stats, m), -1}
147 VIRTNET_SQ_STAT_QSTAT("packets", packets),
154 VIRTNET_RQ_STAT_QSTAT("packets", packets),
159 {#name, offsetof(struct virtio_net_stats_cvq, name), -1}
162 {#name, offsetof(struct virtio_net_stats_rx_ ## class, rx_ ## name), -1}
165 {#name, offsetof(struct virtio_net_stats_tx_ ## class, tx_ ## name), -1}
174 VIRTNET_STATS_DESC_RX(basic, packets),
182 VIRTNET_STATS_DESC_TX(basic, packets),
194 VIRTNET_STATS_DESC_TX(gso, gso_packets_noseg),
195 VIRTNET_STATS_DESC_TX(gso, gso_bytes_noseg),
242 VIRTNET_STATS_DESC_RX_QSTAT(gso, gso_packets, hw_gro_packets),
243 VIRTNET_STATS_DESC_RX_QSTAT(gso, gso_bytes, hw_gro_bytes),
244 VIRTNET_STATS_DESC_RX_QSTAT(gso, gso_packets_coalesced, hw_gro_wire_packets),
245 VIRTNET_STATS_DESC_RX_QSTAT(gso, gso_bytes_coalesced, hw_gro_wire_bytes),
249 VIRTNET_STATS_DESC_TX_QSTAT(gso, gso_packets, hw_gso_packets),
250 VIRTNET_STATS_DESC_TX_QSTAT(gso, gso_bytes, hw_gso_bytes),
251 VIRTNET_STATS_DESC_TX_QSTAT(gso, gso_segments, hw_gso_wire_packets),
252 VIRTNET_STATS_DESC_TX_QSTAT(gso, gso_segments_bytes, hw_gso_wire_bytes),
285 /* TX: fragments + linear part + virtio header */
391 /* I like... big packets and I cannot lie! */
394 /* number of sg entries allocated for big packets */
397 /* Host will merge rx buffers for big packets (shake it! shake it!) */
513 u16 indir_table_size = vi->has_rss ? vi->rss_indir_table_size : 1; in virtnet_rss_hdr_size()
515 return struct_size(vi->rss_hdr, indirection_table, indir_table_size); in virtnet_rss_hdr_size()
520 return struct_size(&vi->rss_trailer, hash_key_data, vi->rss_key_size); in virtnet_rss_trailer_size()
545 return virtqueue_add_outbuf(sq->vq, sq->sg, num, in virtnet_add_outbuf()
569 while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) { in __free_old_xmit()
575 stats->napi_packets++; in __free_old_xmit()
576 stats->napi_bytes += skb->len; in __free_old_xmit()
583 stats->packets++; in __free_old_xmit()
584 stats->bytes += skb->len; in __free_old_xmit()
591 stats->packets++; in __free_old_xmit()
592 stats->bytes += xdp_get_frame_len(frame); in __free_old_xmit()
597 stats->bytes += virtnet_ptr_to_xsk_buff_len(ptr); in __free_old_xmit()
598 stats->xsk++; in __free_old_xmit()
602 netdev_tx_completed_queue(txq, stats->napi_packets, stats->napi_bytes); in __free_old_xmit()
612 if (stats->xsk) in virtnet_free_old_xmit()
613 virtnet_xsk_completed(sq, stats->xsk); in virtnet_free_old_xmit()
616 /* Converting between virtqueue no. and kernel tx/rx queue no.
621 return (vq->index - 1) / 2; in vq2txq()
631 return vq->index / 2; in vq2rxq()
641 if (qid == vi->max_queue_pairs * 2) in vq_type()
653 return (struct virtio_net_common_hdr *)skb->cb; in skb_vnet_common_hdr()
657 * private is used to chain pages for big packets, put the whole
664 /* Find end of list, sew whole thing into vi->rq.pages. */ in give_pages()
665 for (end = page; end->private; end = (struct page *)end->private); in give_pages()
666 end->private = (unsigned long)rq->pages; in give_pages()
667 rq->pages = page; in give_pages()
672 struct page *p = rq->pages; in get_a_page()
675 rq->pages = (struct page *)p->private; in get_a_page()
677 p->private = 0; in get_a_page()
686 if (vi->mergeable_rx_bufs) in virtnet_rq_free_buf()
688 else if (vi->big_packets) in virtnet_rq_free_buf()
696 spin_lock_bh(&vi->refill_lock); in enable_delayed_refill()
697 vi->refill_enabled = true; in enable_delayed_refill()
698 spin_unlock_bh(&vi->refill_lock); in enable_delayed_refill()
703 spin_lock_bh(&vi->refill_lock); in disable_delayed_refill()
704 vi->refill_enabled = false; in disable_delayed_refill()
705 spin_unlock_bh(&vi->refill_lock); in disable_delayed_refill()
711 vi->rx_mode_work_enabled = true; in enable_rx_mode_work()
718 vi->rx_mode_work_enabled = false; in disable_rx_mode_work()
751 struct virtnet_info *vi = vq->vdev->priv; in skb_xmit_done()
752 struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi; in skb_xmit_done()
757 if (napi->weight) in skb_xmit_done()
761 netif_wake_subqueue(vi->dev, vq2txq(vq)); in skb_xmit_done()
778 return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1); in mergeable_ctx_to_truesize()
814 hdr_len = vi->hdr_len; in page_to_skb()
815 if (vi->mergeable_rx_bufs) in page_to_skb()
820 buf = p - headroom; in page_to_skb()
821 len -= hdr_len; in page_to_skb()
824 tailroom = truesize - headroom - hdr_padded_len - len; in page_to_skb()
829 skb = virtnet_build_skb(buf, truesize, p - buf, len); in page_to_skb()
833 page = (struct page *)page->private; in page_to_skb()
840 skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN); in page_to_skb()
844 /* Copy all frame if it fits skb->head, otherwise in page_to_skb()
853 len -= copy; in page_to_skb()
856 if (vi->mergeable_rx_bufs) { in page_to_skb()
871 net_dbg_ratelimited("%s: too much data\n", skb->dev->name); in page_to_skb()
877 unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len); in page_to_skb()
878 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset, in page_to_skb()
880 len -= frag_size; in page_to_skb()
881 page = (struct page *)page->private; in page_to_skb()
899 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_rq_unmap()
905 BUG_ON(vi->big_packets && !vi->mergeable_rx_bufs); in virtnet_rq_unmap()
911 --dma->ref; in virtnet_rq_unmap()
913 if (dma->need_sync && len) { in virtnet_rq_unmap()
914 offset = buf - (head + sizeof(*dma)); in virtnet_rq_unmap()
916 virtqueue_dma_sync_single_range_for_cpu(rq->vq, dma->addr, in virtnet_rq_unmap()
921 if (dma->ref) in virtnet_rq_unmap()
924 virtqueue_dma_unmap_single_attrs(rq->vq, dma->addr, dma->len, in virtnet_rq_unmap()
931 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_rq_get_buf()
934 BUG_ON(vi->big_packets && !vi->mergeable_rx_bufs); in virtnet_rq_get_buf()
936 buf = virtqueue_get_buf_ctx(rq->vq, len, ctx); in virtnet_rq_get_buf()
945 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_rq_init_one_sg()
951 BUG_ON(vi->big_packets && !vi->mergeable_rx_bufs); in virtnet_rq_init_one_sg()
953 head = page_address(rq->alloc_frag.page); in virtnet_rq_init_one_sg()
955 offset = buf - head; in virtnet_rq_init_one_sg()
959 addr = dma->addr - sizeof(*dma) + offset; in virtnet_rq_init_one_sg()
961 sg_init_table(rq->sg, 1); in virtnet_rq_init_one_sg()
962 sg_fill_dma(rq->sg, addr, len); in virtnet_rq_init_one_sg()
967 struct page_frag *alloc_frag = &rq->alloc_frag; in virtnet_rq_alloc()
968 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_rq_alloc()
973 BUG_ON(vi->big_packets && !vi->mergeable_rx_bufs); in virtnet_rq_alloc()
975 head = page_address(alloc_frag->page); in virtnet_rq_alloc()
980 if (!alloc_frag->offset) { in virtnet_rq_alloc()
981 if (rq->last_dma) { in virtnet_rq_alloc()
986 virtnet_rq_unmap(rq, rq->last_dma, 0); in virtnet_rq_alloc()
987 rq->last_dma = NULL; in virtnet_rq_alloc()
990 dma->len = alloc_frag->size - sizeof(*dma); in virtnet_rq_alloc()
992 addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1, in virtnet_rq_alloc()
993 dma->len, DMA_FROM_DEVICE, 0); in virtnet_rq_alloc()
994 if (virtqueue_dma_mapping_error(rq->vq, addr)) in virtnet_rq_alloc()
997 dma->addr = addr; in virtnet_rq_alloc()
998 dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr); in virtnet_rq_alloc()
1004 get_page(alloc_frag->page); in virtnet_rq_alloc()
1005 dma->ref = 1; in virtnet_rq_alloc()
1006 alloc_frag->offset = sizeof(*dma); in virtnet_rq_alloc()
1008 rq->last_dma = dma; in virtnet_rq_alloc()
1011 ++dma->ref; in virtnet_rq_alloc()
1013 buf = head + alloc_frag->offset; in virtnet_rq_alloc()
1015 get_page(alloc_frag->page); in virtnet_rq_alloc()
1016 alloc_frag->offset += size; in virtnet_rq_alloc()
1023 struct virtnet_info *vi = vq->vdev->priv; in virtnet_rq_unmap_free_buf()
1027 rq = &vi->rq[i]; in virtnet_rq_unmap_free_buf()
1029 if (rq->xsk_pool) { in virtnet_rq_unmap_free_buf()
1034 if (!vi->big_packets || vi->mergeable_rx_bufs) in virtnet_rq_unmap_free_buf()
1047 /* Avoid overhead when no packets have been processed in free_old_xmit()
1050 if (!stats.packets && !stats.napi_packets) in free_old_xmit()
1053 u64_stats_update_begin(&sq->stats.syncp); in free_old_xmit()
1054 u64_stats_add(&sq->stats.bytes, stats.bytes + stats.napi_bytes); in free_old_xmit()
1055 u64_stats_add(&sq->stats.packets, stats.packets + stats.napi_packets); in free_old_xmit()
1056 u64_stats_update_end(&sq->stats.syncp); in free_old_xmit()
1061 if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs)) in is_xdp_raw_buffer_queue()
1063 else if (q < vi->curr_queue_pairs) in is_xdp_raw_buffer_queue()
1075 qnum = sq - vi->sq; in tx_may_stop()
1077 /* If running out of space, stop queue to avoid getting packets that we in tx_may_stop()
1082 * maintaining the TX queue stop/start state properly, and causes in tx_may_stop()
1083 * the stack to do a non-trivial amount of useless work. in tx_may_stop()
1084 * Since most packets only take 1 or 2 ring slots, stopping the queue in tx_may_stop()
1087 if (sq->vq->num_free < 2+MAX_SKB_FRAGS) { in tx_may_stop()
1091 u64_stats_update_begin(&sq->stats.syncp); in tx_may_stop()
1092 u64_stats_inc(&sq->stats.stop); in tx_may_stop()
1093 u64_stats_update_end(&sq->stats.syncp); in tx_may_stop()
1105 bool use_napi = sq->napi.weight; in check_sq_full_and_disable()
1108 qnum = sq - vi->sq; in check_sq_full_and_disable()
1114 if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) in check_sq_full_and_disable()
1115 virtqueue_napi_schedule(&sq->napi, sq->vq); in check_sq_full_and_disable()
1116 } else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { in check_sq_full_and_disable()
1119 if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) { in check_sq_full_and_disable()
1121 u64_stats_update_begin(&sq->stats.syncp); in check_sq_full_and_disable()
1122 u64_stats_inc(&sq->stats.wake); in check_sq_full_and_disable()
1123 u64_stats_update_end(&sq->stats.syncp); in check_sq_full_and_disable()
1124 virtqueue_disable_cb(sq->vq); in check_sq_full_and_disable()
1138 bufsize = xsk_pool_get_rx_frame_size(rq->xsk_pool) + vi->hdr_len; in buf_to_xdp()
1142 vi->dev->name, len, bufsize); in buf_to_xdp()
1143 DEV_STATS_INC(vi->dev, rx_length_errors); in buf_to_xdp()
1157 unsigned int metasize = xdp->data - xdp->data_meta; in xsk_construct_skb()
1161 size = xdp->data_end - xdp->data_hard_start; in xsk_construct_skb()
1162 skb = napi_alloc_skb(&rq->napi, size); in xsk_construct_skb()
1168 skb_reserve(skb, xdp->data_meta - xdp->data_hard_start); in xsk_construct_skb()
1170 size = xdp->data_end - xdp->data_meta; in xsk_construct_skb()
1171 memcpy(__skb_put(skb, size), xdp->data_meta, size); in xsk_construct_skb()
1193 prog = rcu_dereference(rq->xdp_prog); in virtnet_receive_xsk_small()
1209 u64_stats_inc(&stats->drops); in virtnet_receive_xsk_small()
1222 while (num_buf-- > 1) { in xsk_drop_follow_bufs()
1223 xdp = virtqueue_get_buf(rq->vq, &len); in xsk_drop_follow_bufs()
1226 dev->name, num_buf); in xsk_drop_follow_bufs()
1230 u64_stats_add(&stats->bytes, len); in xsk_drop_follow_bufs()
1250 while (--num_buf) { in xsk_append_merge_buffer()
1251 buf = virtqueue_get_buf(rq->vq, &len); in xsk_append_merge_buffer()
1254 vi->dev->name, num_buf, in xsk_append_merge_buffer()
1255 virtio16_to_cpu(vi->vdev, in xsk_append_merge_buffer()
1256 hdr->num_buffers)); in xsk_append_merge_buffer()
1257 DEV_STATS_INC(vi->dev, rx_length_errors); in xsk_append_merge_buffer()
1258 return -EINVAL; in xsk_append_merge_buffer()
1261 u64_stats_add(&stats->bytes, len); in xsk_append_merge_buffer()
1273 memcpy(buf, xdp->data - vi->hdr_len, len); in xsk_append_merge_buffer()
1292 xsk_drop_follow_bufs(vi->dev, rq, num_buf, stats); in xsk_append_merge_buffer()
1293 return -EINVAL; in xsk_append_merge_buffer()
1306 hdr = xdp->data - vi->hdr_len; in virtnet_receive_xsk_merge()
1307 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); in virtnet_receive_xsk_merge()
1311 prog = rcu_dereference(rq->xdp_prog); in virtnet_receive_xsk_merge()
1343 u64_stats_inc(&stats->drops); in virtnet_receive_xsk_merge()
1352 struct net_device *dev = vi->dev; in virtnet_receive_xsk_buf()
1357 len -= vi->hdr_len; in virtnet_receive_xsk_buf()
1359 u64_stats_add(&stats->bytes, len); in virtnet_receive_xsk_buf()
1366 pr_debug("%s: short packet %i\n", dev->name, len); in virtnet_receive_xsk_buf()
1372 flags = ((struct virtio_net_common_hdr *)(xdp->data - vi->hdr_len))->hdr.flags; in virtnet_receive_xsk_buf()
1374 if (!vi->mergeable_rx_bufs) in virtnet_receive_xsk_buf()
1392 xsk_buffs = rq->xsk_buffs; in virtnet_add_recvbuf_xsk()
1394 num = xsk_buff_alloc_batch(pool, xsk_buffs, rq->vq->num_free); in virtnet_add_recvbuf_xsk()
1396 return -ENOMEM; in virtnet_add_recvbuf_xsk()
1398 len = xsk_pool_get_rx_frame_size(pool) + vi->hdr_len; in virtnet_add_recvbuf_xsk()
1402 * We assume XDP_PACKET_HEADROOM is larger than hdr->len. in virtnet_add_recvbuf_xsk()
1405 addr = xsk_buff_xdp_get_dma(xsk_buffs[i]) - vi->hdr_len; in virtnet_add_recvbuf_xsk()
1407 sg_init_table(rq->sg, 1); in virtnet_add_recvbuf_xsk()
1408 sg_fill_dma(rq->sg, addr, len); in virtnet_add_recvbuf_xsk()
1410 err = virtqueue_add_inbuf_premapped(rq->vq, rq->sg, 1, in virtnet_add_recvbuf_xsk()
1441 vi = sq->vq->vdev->priv; in virtnet_xsk_xmit_one()
1443 addr = xsk_buff_raw_get_dma(pool, desc->addr); in virtnet_xsk_xmit_one()
1444 xsk_buff_raw_dma_sync_for_device(pool, addr, desc->len); in virtnet_xsk_xmit_one()
1446 sg_init_table(sq->sg, 2); in virtnet_xsk_xmit_one()
1447 sg_fill_dma(sq->sg, sq->xsk_hdr_dma_addr, vi->hdr_len); in virtnet_xsk_xmit_one()
1448 sg_fill_dma(sq->sg + 1, addr, desc->len); in virtnet_xsk_xmit_one()
1450 return virtqueue_add_outbuf_premapped(sq->vq, sq->sg, 2, in virtnet_xsk_xmit_one()
1451 virtnet_xsk_to_ptr(desc->len), in virtnet_xsk_xmit_one()
1460 struct xdp_desc *descs = pool->tx_descs; in virtnet_xsk_xmit_batch()
1465 budget = min_t(u32, budget, sq->vq->num_free); in virtnet_xsk_xmit_batch()
1474 xsk_tx_completed(sq->xsk_pool, nb_pkts - i); in virtnet_xsk_xmit_batch()
1481 if (kick && virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) in virtnet_xsk_xmit_batch()
1490 struct virtnet_info *vi = sq->vq->vdev->priv; in virtnet_xsk_xmit()
1492 struct net_device *dev = vi->dev; in virtnet_xsk_xmit()
1499 __free_old_xmit(sq, netdev_get_tx_queue(dev, sq - vi->sq), true, &stats); in virtnet_xsk_xmit()
1502 xsk_tx_completed(sq->xsk_pool, stats.xsk); in virtnet_xsk_xmit()
1506 if (!is_xdp_raw_buffer_queue(vi, sq - vi->sq)) in virtnet_xsk_xmit()
1507 check_sq_full_and_disable(vi, vi->dev, sq); in virtnet_xsk_xmit()
1512 txq = netdev_get_tx_queue(vi->dev, sq - vi->sq); in virtnet_xsk_xmit()
1516 u64_stats_update_begin(&sq->stats.syncp); in virtnet_xsk_xmit()
1517 u64_stats_add(&sq->stats.packets, stats.packets); in virtnet_xsk_xmit()
1518 u64_stats_add(&sq->stats.bytes, stats.bytes); in virtnet_xsk_xmit()
1519 u64_stats_add(&sq->stats.kicks, kicks); in virtnet_xsk_xmit()
1520 u64_stats_add(&sq->stats.xdp_tx, sent); in virtnet_xsk_xmit()
1521 u64_stats_update_end(&sq->stats.syncp); in virtnet_xsk_xmit()
1531 if (napi_if_scheduled_mark_missed(&sq->napi)) in xsk_wakeup()
1535 virtqueue_napi_schedule(&sq->napi, sq->vq); in xsk_wakeup()
1545 return -ENETDOWN; in virtnet_xsk_wakeup()
1547 if (qid >= vi->curr_queue_pairs) in virtnet_xsk_wakeup()
1548 return -EINVAL; in virtnet_xsk_wakeup()
1550 sq = &vi->sq[qid]; in virtnet_xsk_wakeup()
1558 xsk_tx_completed(sq->xsk_pool, num); in virtnet_xsk_completed()
1561 * wakeup the tx napi to consume the xsk tx queue, because the tx in virtnet_xsk_completed()
1576 if (unlikely(xdpf->headroom < vi->hdr_len)) in __virtnet_xdp_xmit_one()
1577 return -EOVERFLOW; in __virtnet_xdp_xmit_one()
1581 nr_frags = shinfo->nr_frags; in __virtnet_xdp_xmit_one()
1587 * xdp_return_frame(), which will involve to xdpf->data and in __virtnet_xdp_xmit_one()
1588 * xdpf->headroom. Therefore, we need to update the value of in __virtnet_xdp_xmit_one()
1591 xdpf->headroom -= vi->hdr_len; in __virtnet_xdp_xmit_one()
1592 xdpf->data -= vi->hdr_len; in __virtnet_xdp_xmit_one()
1594 hdr = xdpf->data; in __virtnet_xdp_xmit_one()
1595 memset(hdr, 0, vi->hdr_len); in __virtnet_xdp_xmit_one()
1596 xdpf->len += vi->hdr_len; in __virtnet_xdp_xmit_one()
1598 sg_init_table(sq->sg, nr_frags + 1); in __virtnet_xdp_xmit_one()
1599 sg_set_buf(sq->sg, xdpf->data, xdpf->len); in __virtnet_xdp_xmit_one()
1601 skb_frag_t *frag = &shinfo->frags[i]; in __virtnet_xdp_xmit_one()
1603 sg_set_page(&sq->sg[i + 1], skb_frag_page(frag), in __virtnet_xdp_xmit_one()
1609 return -ENOSPC; /* Caller handle free/refcnt */ in __virtnet_xdp_xmit_one()
1614 /* when vi->curr_queue_pairs > nr_cpu_ids, the txq/sq is only used for xdp tx on
1628 if (v->curr_queue_pairs > nr_cpu_ids) { \
1629 qp = v->curr_queue_pairs - v->xdp_queue_pairs; \
1631 txq = netdev_get_tx_queue(v->dev, qp); \
1634 qp = cpu % v->curr_queue_pairs; \
1635 txq = netdev_get_tx_queue(v->dev, qp); \
1638 v->sq + qp; \
1645 txq = netdev_get_tx_queue(v->dev, (q) - v->sq); \
1646 if (v->curr_queue_pairs > nr_cpu_ids) \
1657 struct receive_queue *rq = vi->rq; in virtnet_xdp_xmit()
1668 xdp_prog = rcu_access_pointer(rq->xdp_prog); in virtnet_xdp_xmit()
1670 return -ENXIO; in virtnet_xdp_xmit()
1675 ret = -EINVAL; in virtnet_xdp_xmit()
1680 virtnet_free_old_xmit(sq, netdev_get_tx_queue(dev, sq - vi->sq), in virtnet_xdp_xmit()
1692 if (!is_xdp_raw_buffer_queue(vi, sq - vi->sq)) in virtnet_xdp_xmit()
1696 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) in virtnet_xdp_xmit()
1700 u64_stats_update_begin(&sq->stats.syncp); in virtnet_xdp_xmit()
1701 u64_stats_add(&sq->stats.bytes, stats.bytes); in virtnet_xdp_xmit()
1702 u64_stats_add(&sq->stats.packets, stats.packets); in virtnet_xdp_xmit()
1703 u64_stats_add(&sq->stats.xdp_tx, n); in virtnet_xdp_xmit()
1704 u64_stats_add(&sq->stats.xdp_tx_drops, n - nxmit); in virtnet_xdp_xmit()
1705 u64_stats_add(&sq->stats.kicks, kicks); in virtnet_xdp_xmit()
1706 u64_stats_update_end(&sq->stats.syncp); in virtnet_xdp_xmit()
1720 for (i = 0; i < shinfo->nr_frags; i++) { in put_xdp_frags()
1721 xdp_page = skb_frag_page(&shinfo->frags[i]); in put_xdp_frags()
1737 u64_stats_inc(&stats->xdp_packets); in virtnet_xdp_handler()
1744 u64_stats_inc(&stats->xdp_tx); in virtnet_xdp_handler()
1762 u64_stats_inc(&stats->xdp_redirects); in virtnet_xdp_handler()
1783 return vi->xdp_enabled ? XDP_PACKET_HEADROOM : 0; in virtnet_get_headroom()
1793 * with large buffers with sufficient headroom - so it should affect
1794 * at most queue size packets.
1796 * XDP should preclude the underlying device from sending packets
1820 while (--*num_buf) { in xdp_linearize_page()
1830 off = buf - page_address(p); in xdp_linearize_page()
1847 *len = page_off - XDP_PACKET_HEADROOM; in xdp_linearize_page()
1865 headroom = vi->hdr_len + header_offset; in receive_small_build_skb()
1874 memcpy(skb_vnet_common_hdr(skb), buf, vi->hdr_len); in receive_small_build_skb()
1890 unsigned int headroom = vi->hdr_len + header_offset; in receive_small_xdp()
1900 if (unlikely(hdr->hdr.gso_type)) in receive_small_xdp()
1903 /* Partially checksummed packets must be dropped. */ in receive_small_xdp()
1904 if (unlikely(hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)) in receive_small_xdp()
1911 int offset = buf - page_address(page) + header_offset; in receive_small_xdp()
1912 unsigned int tlen = len + vi->hdr_len; in receive_small_xdp()
1917 headroom = vi->hdr_len + header_offset; in receive_small_xdp()
1931 xdp_init_buff(&xdp, buflen, &rq->xdp_rxq); in receive_small_xdp()
1932 xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len, in receive_small_xdp()
1940 len = xdp.data_end - xdp.data; in receive_small_xdp()
1941 metasize = xdp.data - xdp.data_meta; in receive_small_xdp()
1952 skb = virtnet_build_skb(buf, buflen, xdp.data - buf, len); in receive_small_xdp()
1962 u64_stats_inc(&stats->xdp_drops); in receive_small_xdp()
1964 u64_stats_inc(&stats->drops); in receive_small_xdp()
1982 /* We passed the address of virtnet header to virtio-core, in receive_small()
1985 buf -= VIRTNET_RX_PAD + xdp_headroom; in receive_small()
1987 len -= vi->hdr_len; in receive_small()
1988 u64_stats_add(&stats->bytes, len); in receive_small()
1992 dev->name, len, GOOD_PACKET_LEN); in receive_small()
1997 if (unlikely(vi->xdp_enabled)) { in receive_small()
2001 xdp_prog = rcu_dereference(rq->xdp_prog); in receive_small()
2017 u64_stats_inc(&stats->drops); in receive_small()
2033 u64_stats_add(&stats->bytes, len - vi->hdr_len); in receive_big()
2040 u64_stats_inc(&stats->drops); in receive_big()
2053 while (num_buf-- > 1) { in mergeable_buf_free()
2057 dev->name, num_buf); in mergeable_buf_free()
2061 u64_stats_add(&stats->bytes, len); in mergeable_buf_free()
2069 * virtio-net there are 2 points that do not match its requirements:
2072 * like eth_type_trans() (which virtio-net does in receive_buf()).
2085 if (unlikely(xdp->data_end > xdp_data_hard_end(xdp))) { in build_skb_from_xdp_buff()
2091 nr_frags = sinfo->nr_frags; in build_skb_from_xdp_buff()
2093 skb = build_skb(xdp->data_hard_start, xdp->frame_sz); in build_skb_from_xdp_buff()
2097 headroom = xdp->data - xdp->data_hard_start; in build_skb_from_xdp_buff()
2098 data_len = xdp->data_end - xdp->data; in build_skb_from_xdp_buff()
2102 metasize = xdp->data - xdp->data_meta; in build_skb_from_xdp_buff()
2109 sinfo->xdp_frags_size, in build_skb_from_xdp_buff()
2138 xdp_init_buff(xdp, frame_sz, &rq->xdp_rxq); in virtnet_build_xdp_buff_mrg()
2139 xdp_prepare_buff(xdp, buf - XDP_PACKET_HEADROOM, in virtnet_build_xdp_buff_mrg()
2140 XDP_PACKET_HEADROOM + vi->hdr_len, len - vi->hdr_len, true); in virtnet_build_xdp_buff_mrg()
2146 /* If we want to build multi-buffer xdp, we need in virtnet_build_xdp_buff_mrg()
2154 shinfo->nr_frags = 0; in virtnet_build_xdp_buff_mrg()
2155 shinfo->xdp_frags_size = 0; in virtnet_build_xdp_buff_mrg()
2159 return -EINVAL; in virtnet_build_xdp_buff_mrg()
2161 while (--*num_buf > 0) { in virtnet_build_xdp_buff_mrg()
2165 dev->name, *num_buf, in virtnet_build_xdp_buff_mrg()
2166 virtio16_to_cpu(vi->vdev, hdr->num_buffers)); in virtnet_build_xdp_buff_mrg()
2171 u64_stats_add(&stats->bytes, len); in virtnet_build_xdp_buff_mrg()
2173 offset = buf - page_address(page); in virtnet_build_xdp_buff_mrg()
2182 if (unlikely(len > truesize - room || cur_frag_size > PAGE_SIZE)) { in virtnet_build_xdp_buff_mrg()
2185 dev->name, len, (unsigned long)(truesize - room)); in virtnet_build_xdp_buff_mrg()
2190 frag = &shinfo->frags[shinfo->nr_frags++]; in virtnet_build_xdp_buff_mrg()
2195 shinfo->xdp_frags_size += len; in virtnet_build_xdp_buff_mrg()
2203 return -EINVAL; in virtnet_build_xdp_buff_mrg()
2223 * in-flight packets from before XDP was enabled reach in mergeable_xdp_get_buf()
2226 if (unlikely(hdr->hdr.gso_type)) in mergeable_xdp_get_buf()
2229 /* Partially checksummed packets must be dropped. */ in mergeable_xdp_get_buf()
2230 if (unlikely(hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)) in mergeable_xdp_get_buf()
2241 (*num_buf == 1 || xdp_prog->aux->xdp_has_frags))) { in mergeable_xdp_get_buf()
2247 * This should only happen for the first several packets. in mergeable_xdp_get_buf()
2253 if (!xdp_prog->aux->xdp_has_frags) { in mergeable_xdp_get_buf()
2295 int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); in receive_mergeable_xdp()
2297 int offset = buf - page_address(page); in receive_mergeable_xdp()
2339 u64_stats_inc(&stats->xdp_drops); in receive_mergeable_xdp()
2340 u64_stats_inc(&stats->drops); in receive_mergeable_xdp()
2352 num_skb_frags = skb_shinfo(curr_skb)->nr_frags; in virtnet_skb_append_frag()
2360 skb_shinfo(curr_skb)->frag_list = nskb; in virtnet_skb_append_frag()
2362 curr_skb->next = nskb; in virtnet_skb_append_frag()
2364 head_skb->truesize += nskb->truesize; in virtnet_skb_append_frag()
2369 head_skb->data_len += len; in virtnet_skb_append_frag()
2370 head_skb->len += len; in virtnet_skb_append_frag()
2371 head_skb->truesize += truesize; in virtnet_skb_append_frag()
2374 offset = buf - page_address(page); in virtnet_skb_append_frag()
2377 skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1, in virtnet_skb_append_frag()
2397 int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); in receive_mergeable()
2399 int offset = buf - page_address(page); in receive_mergeable()
2407 u64_stats_add(&stats->bytes, len - vi->hdr_len); in receive_mergeable()
2409 if (unlikely(len > truesize - room)) { in receive_mergeable()
2411 dev->name, len, (unsigned long)(truesize - room)); in receive_mergeable()
2416 if (unlikely(vi->xdp_enabled)) { in receive_mergeable()
2420 xdp_prog = rcu_dereference(rq->xdp_prog); in receive_mergeable()
2435 while (--num_buf) { in receive_mergeable()
2439 dev->name, num_buf, in receive_mergeable()
2440 virtio16_to_cpu(vi->vdev, in receive_mergeable()
2441 hdr->num_buffers)); in receive_mergeable()
2446 u64_stats_add(&stats->bytes, len); in receive_mergeable()
2453 if (unlikely(len > truesize - room)) { in receive_mergeable()
2455 dev->name, len, (unsigned long)(truesize - room)); in receive_mergeable()
2466 ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len); in receive_mergeable()
2474 u64_stats_inc(&stats->drops); in receive_mergeable()
2487 switch (__le16_to_cpu(hdr_hash->hash_report)) { in virtio_skb_set_hash()
2505 skb_set_hash(skb, __le32_to_cpu(hdr_hash->hash_value), rss_hash_type); in virtio_skb_set_hash()
2512 struct net_device *dev = vi->dev; in virtnet_receive_done()
2515 if (dev->features & NETIF_F_RXHASH && vi->has_rss_hash_report) in virtnet_receive_done()
2516 virtio_skb_set_hash(&hdr->hash_v1_hdr, skb); in virtnet_receive_done()
2519 skb->ip_summed = CHECKSUM_UNNECESSARY; in virtnet_receive_done()
2521 if (virtio_net_hdr_to_skb(skb, &hdr->hdr, in virtnet_receive_done()
2522 virtio_is_little_endian(vi->vdev))) { in virtnet_receive_done()
2523 net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n", in virtnet_receive_done()
2524 dev->name, hdr->hdr.gso_type, in virtnet_receive_done()
2525 hdr->hdr.gso_size); in virtnet_receive_done()
2529 skb_record_rx_queue(skb, vq2rxq(rq->vq)); in virtnet_receive_done()
2530 skb->protocol = eth_type_trans(skb, dev); in virtnet_receive_done()
2532 ntohs(skb->protocol), skb->len, skb->pkt_type); in virtnet_receive_done()
2534 napi_gro_receive(&rq->napi, skb); in virtnet_receive_done()
2547 struct net_device *dev = vi->dev; in receive_buf()
2551 if (unlikely(len < vi->hdr_len + ETH_HLEN)) { in receive_buf()
2552 pr_debug("%s: short packet %i\n", dev->name, len); in receive_buf()
2559 * These flags ensure packets marked as VIRTIO_NET_HDR_F_DATA_VALID in receive_buf()
2561 * 2. XDP doesn't work with partially checksummed packets (refer to in receive_buf()
2562 * virtnet_xdp_set()), so packets marked as in receive_buf()
2565 flags = ((struct virtio_net_common_hdr *)buf)->hdr.flags; in receive_buf()
2567 if (vi->mergeable_rx_bufs) in receive_buf()
2570 else if (vi->big_packets) in receive_buf()
2583 * not need to use mergeable_len_to_ctx here - it is enough
2592 int len = vi->hdr_len + VIRTNET_RX_PAD + GOOD_PACKET_LEN + xdp_headroom; in add_recvbuf_small()
2598 if (unlikely(!skb_page_frag_refill(len, &rq->alloc_frag, gfp))) in add_recvbuf_small()
2599 return -ENOMEM; in add_recvbuf_small()
2603 return -ENOMEM; in add_recvbuf_small()
2607 virtnet_rq_init_one_sg(rq, buf, vi->hdr_len + GOOD_PACKET_LEN); in add_recvbuf_small()
2609 err = virtqueue_add_inbuf_premapped(rq->vq, rq->sg, 1, buf, ctx, gfp); in add_recvbuf_small()
2625 sg_init_table(rq->sg, vi->big_packets_num_skbfrags + 2); in add_recvbuf_big()
2627 /* page in rq->sg[vi->big_packets_num_skbfrags + 1] is list tail */ in add_recvbuf_big()
2628 for (i = vi->big_packets_num_skbfrags + 1; i > 1; --i) { in add_recvbuf_big()
2633 return -ENOMEM; in add_recvbuf_big()
2635 sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE); in add_recvbuf_big()
2638 first->private = (unsigned long)list; in add_recvbuf_big()
2645 return -ENOMEM; in add_recvbuf_big()
2649 /* rq->sg[0], rq->sg[1] share the same page */ in add_recvbuf_big()
2650 /* a separated rq->sg[0] for header - required in case !any_header_sg */ in add_recvbuf_big()
2651 sg_set_buf(&rq->sg[0], p, vi->hdr_len); in add_recvbuf_big()
2653 /* rq->sg[1] for data packet, from offset */ in add_recvbuf_big()
2655 sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset); in add_recvbuf_big()
2658 first->private = (unsigned long)list; in add_recvbuf_big()
2659 err = virtqueue_add_inbuf(rq->vq, rq->sg, vi->big_packets_num_skbfrags + 2, in add_recvbuf_big()
2671 struct virtnet_info *vi = rq->vq->vdev->priv; in get_mergeable_buf_len()
2672 const size_t hdr_len = vi->hdr_len; in get_mergeable_buf_len()
2676 return PAGE_SIZE - room; in get_mergeable_buf_len()
2679 rq->min_buf_len, PAGE_SIZE - hdr_len); in get_mergeable_buf_len()
2687 struct page_frag *alloc_frag = &rq->alloc_frag; in add_recvbuf_mergeable()
2698 * disabled GSO for XDP, it won't be a big issue. in add_recvbuf_mergeable()
2700 len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room); in add_recvbuf_mergeable()
2703 return -ENOMEM; in add_recvbuf_mergeable()
2705 if (!alloc_frag->offset && len + room + sizeof(struct virtnet_rq_dma) > alloc_frag->size) in add_recvbuf_mergeable()
2706 len -= sizeof(struct virtnet_rq_dma); in add_recvbuf_mergeable()
2710 return -ENOMEM; in add_recvbuf_mergeable()
2713 hole = alloc_frag->size - alloc_frag->offset; in add_recvbuf_mergeable()
2723 alloc_frag->offset += hole; in add_recvbuf_mergeable()
2729 err = virtqueue_add_inbuf_premapped(rq->vq, rq->sg, 1, buf, ctx, gfp); in add_recvbuf_mergeable()
2742 * before we're receiving packets, or from refill_work which is
2750 if (rq->xsk_pool) { in try_fill_recv()
2751 err = virtnet_add_recvbuf_xsk(vi, rq, rq->xsk_pool, gfp); in try_fill_recv()
2756 if (vi->mergeable_rx_bufs) in try_fill_recv()
2758 else if (vi->big_packets) in try_fill_recv()
2765 } while (rq->vq->num_free); in try_fill_recv()
2768 if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) { in try_fill_recv()
2771 flags = u64_stats_update_begin_irqsave(&rq->stats.syncp); in try_fill_recv()
2772 u64_stats_inc(&rq->stats.kicks); in try_fill_recv()
2773 u64_stats_update_end_irqrestore(&rq->stats.syncp, flags); in try_fill_recv()
2776 return err != -ENOMEM; in try_fill_recv()
2781 struct virtnet_info *vi = rvq->vdev->priv; in skb_recv_done()
2782 struct receive_queue *rq = &vi->rq[vq2rxq(rvq)]; in skb_recv_done()
2784 rq->calls++; in skb_recv_done()
2785 virtqueue_napi_schedule(&rq->napi, rvq); in skb_recv_done()
2794 * won't get another interrupt, so process any outstanding packets now. in virtnet_napi_do_enable()
2804 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_napi_enable()
2805 int qidx = vq2rxq(rq->vq); in virtnet_napi_enable()
2807 virtnet_napi_do_enable(rq->vq, &rq->napi); in virtnet_napi_enable()
2808 netif_queue_set_napi(vi->dev, qidx, NETDEV_QUEUE_TYPE_RX, &rq->napi); in virtnet_napi_enable()
2813 struct virtnet_info *vi = sq->vq->vdev->priv; in virtnet_napi_tx_enable()
2814 struct napi_struct *napi = &sq->napi; in virtnet_napi_tx_enable()
2815 int qidx = vq2txq(sq->vq); in virtnet_napi_tx_enable()
2817 if (!napi->weight) in virtnet_napi_tx_enable()
2820 /* Tx napi touches cachelines on the cpu handling tx interrupts. Only in virtnet_napi_tx_enable()
2823 if (!vi->affinity_hint_set) { in virtnet_napi_tx_enable()
2824 napi->weight = 0; in virtnet_napi_tx_enable()
2828 virtnet_napi_do_enable(sq->vq, napi); in virtnet_napi_tx_enable()
2829 netif_queue_set_napi(vi->dev, qidx, NETDEV_QUEUE_TYPE_TX, napi); in virtnet_napi_tx_enable()
2834 struct virtnet_info *vi = sq->vq->vdev->priv; in virtnet_napi_tx_disable()
2835 struct napi_struct *napi = &sq->napi; in virtnet_napi_tx_disable()
2836 int qidx = vq2txq(sq->vq); in virtnet_napi_tx_disable()
2838 if (napi->weight) { in virtnet_napi_tx_disable()
2839 netif_queue_set_napi(vi->dev, qidx, NETDEV_QUEUE_TYPE_TX, NULL); in virtnet_napi_tx_disable()
2846 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_napi_disable()
2847 struct napi_struct *napi = &rq->napi; in virtnet_napi_disable()
2848 int qidx = vq2rxq(rq->vq); in virtnet_napi_disable()
2850 netif_queue_set_napi(vi->dev, qidx, NETDEV_QUEUE_TYPE_RX, NULL); in virtnet_napi_disable()
2861 for (i = 0; i < vi->curr_queue_pairs; i++) { in refill_work()
2862 struct receive_queue *rq = &vi->rq[i]; in refill_work()
2870 * - cancel refill_work with cancel_delayed_work (note: in refill_work()
2871 * non-sync) in refill_work()
2872 * - cancel refill_work with cancel_delayed_work_sync in in refill_work()
2874 * - wrap all of the work in a lock (perhaps the netdev in refill_work()
2876 * - check netif_running() and return early to avoid a race in refill_work()
2878 napi_disable(&rq->napi); in refill_work()
2880 virtnet_napi_do_enable(rq->vq, &rq->napi); in refill_work()
2886 schedule_delayed_work(&vi->refill, HZ/2); in refill_work()
2897 int packets = 0; in virtnet_receive_xsk_bufs() local
2900 while (packets < budget) { in virtnet_receive_xsk_bufs()
2901 buf = virtqueue_get_buf(rq->vq, &len); in virtnet_receive_xsk_bufs()
2906 packets++; in virtnet_receive_xsk_bufs()
2909 return packets; in virtnet_receive_xsk_bufs()
2919 int packets = 0; in virtnet_receive_packets() local
2922 if (!vi->big_packets || vi->mergeable_rx_bufs) { in virtnet_receive_packets()
2924 while (packets < budget && in virtnet_receive_packets()
2927 packets++; in virtnet_receive_packets()
2930 while (packets < budget && in virtnet_receive_packets()
2931 (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { in virtnet_receive_packets()
2933 packets++; in virtnet_receive_packets()
2937 return packets; in virtnet_receive_packets()
2943 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_receive()
2945 int i, packets; in virtnet_receive() local
2947 if (rq->xsk_pool) in virtnet_receive()
2948 packets = virtnet_receive_xsk_bufs(vi, rq, budget, xdp_xmit, &stats); in virtnet_receive()
2950 packets = virtnet_receive_packets(vi, rq, budget, xdp_xmit, &stats); in virtnet_receive()
2952 if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) { in virtnet_receive()
2954 spin_lock(&vi->refill_lock); in virtnet_receive()
2955 if (vi->refill_enabled) in virtnet_receive()
2956 schedule_delayed_work(&vi->refill, 0); in virtnet_receive()
2957 spin_unlock(&vi->refill_lock); in virtnet_receive()
2961 u64_stats_set(&stats.packets, packets); in virtnet_receive()
2962 u64_stats_update_begin(&rq->stats.syncp); in virtnet_receive()
2967 item = (u64_stats_t *)((u8 *)&rq->stats + offset); in virtnet_receive()
2972 u64_stats_add(&rq->stats.packets, u64_stats_read(&stats.packets)); in virtnet_receive()
2973 u64_stats_add(&rq->stats.bytes, u64_stats_read(&stats.bytes)); in virtnet_receive()
2975 u64_stats_update_end(&rq->stats.syncp); in virtnet_receive()
2977 return packets; in virtnet_receive()
2982 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_poll_cleantx()
2983 unsigned int index = vq2rxq(rq->vq); in virtnet_poll_cleantx()
2984 struct send_queue *sq = &vi->sq[index]; in virtnet_poll_cleantx()
2985 struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index); in virtnet_poll_cleantx()
2987 if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index)) in virtnet_poll_cleantx()
2991 if (sq->reset) { in virtnet_poll_cleantx()
2997 virtqueue_disable_cb(sq->vq); in virtnet_poll_cleantx()
2999 } while (unlikely(!virtqueue_enable_cb_delayed(sq->vq))); in virtnet_poll_cleantx()
3001 if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) { in virtnet_poll_cleantx()
3003 u64_stats_update_begin(&sq->stats.syncp); in virtnet_poll_cleantx()
3004 u64_stats_inc(&sq->stats.wake); in virtnet_poll_cleantx()
3005 u64_stats_update_end(&sq->stats.syncp); in virtnet_poll_cleantx()
3018 if (!rq->packets_in_napi) in virtnet_rx_dim_update()
3024 dim_update_sample(rq->calls, in virtnet_rx_dim_update()
3025 u64_stats_read(&rq->stats.packets), in virtnet_rx_dim_update()
3026 u64_stats_read(&rq->stats.bytes), in virtnet_rx_dim_update()
3029 net_dim(&rq->dim, &cur_sample); in virtnet_rx_dim_update()
3030 rq->packets_in_napi = 0; in virtnet_rx_dim_update()
3037 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_poll()
3046 rq->packets_in_napi += received; in virtnet_poll()
3051 /* Out of packets? */ in virtnet_poll()
3053 napi_complete = virtqueue_napi_complete(napi, rq->vq, received); in virtnet_poll()
3058 if (napi_complete && rq->dim_enabled) in virtnet_poll()
3064 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) { in virtnet_poll()
3065 u64_stats_update_begin(&sq->stats.syncp); in virtnet_poll()
3066 u64_stats_inc(&sq->stats.kicks); in virtnet_poll()
3067 u64_stats_update_end(&sq->stats.syncp); in virtnet_poll()
3077 virtnet_napi_tx_disable(&vi->sq[qp_index]); in virtnet_disable_queue_pair()
3078 virtnet_napi_disable(&vi->rq[qp_index]); in virtnet_disable_queue_pair()
3079 xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq); in virtnet_disable_queue_pair()
3084 struct net_device *dev = vi->dev; in virtnet_enable_queue_pair()
3087 err = xdp_rxq_info_reg(&vi->rq[qp_index].xdp_rxq, dev, qp_index, in virtnet_enable_queue_pair()
3088 vi->rq[qp_index].napi.napi_id); in virtnet_enable_queue_pair()
3092 err = xdp_rxq_info_reg_mem_model(&vi->rq[qp_index].xdp_rxq, in virtnet_enable_queue_pair()
3097 virtnet_napi_enable(&vi->rq[qp_index]); in virtnet_enable_queue_pair()
3098 virtnet_napi_tx_enable(&vi->sq[qp_index]); in virtnet_enable_queue_pair()
3103 xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq); in virtnet_enable_queue_pair()
3109 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) in virtnet_cancel_dim()
3119 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX)) in virtnet_update_settings()
3122 virtio_cread_le(vi->vdev, struct virtio_net_config, speed, &speed); in virtnet_update_settings()
3125 vi->speed = speed; in virtnet_update_settings()
3127 virtio_cread_le(vi->vdev, struct virtio_net_config, duplex, &duplex); in virtnet_update_settings()
3130 vi->duplex = duplex; in virtnet_update_settings()
3140 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_open()
3141 if (i < vi->curr_queue_pairs) in virtnet_open()
3143 if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) in virtnet_open()
3144 schedule_delayed_work(&vi->refill, 0); in virtnet_open()
3151 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { in virtnet_open()
3152 if (vi->status & VIRTIO_NET_S_LINK_UP) in virtnet_open()
3153 netif_carrier_on(vi->dev); in virtnet_open()
3154 virtio_config_driver_enable(vi->vdev); in virtnet_open()
3156 vi->status = VIRTIO_NET_S_LINK_UP; in virtnet_open()
3164 cancel_delayed_work_sync(&vi->refill); in virtnet_open()
3166 for (i--; i >= 0; i--) { in virtnet_open()
3168 virtnet_cancel_dim(vi, &vi->rq[i].dim); in virtnet_open()
3177 struct virtnet_info *vi = sq->vq->vdev->priv; in virtnet_poll_tx()
3178 unsigned int index = vq2txq(sq->vq); in virtnet_poll_tx()
3189 txq = netdev_get_tx_queue(vi->dev, index); in virtnet_poll_tx()
3191 virtqueue_disable_cb(sq->vq); in virtnet_poll_tx()
3193 if (sq->xsk_pool) in virtnet_poll_tx()
3194 xsk_done = virtnet_xsk_xmit(sq, sq->xsk_pool, budget); in virtnet_poll_tx()
3198 if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) { in virtnet_poll_tx()
3200 u64_stats_update_begin(&sq->stats.syncp); in virtnet_poll_tx()
3201 u64_stats_inc(&sq->stats.wake); in virtnet_poll_tx()
3202 u64_stats_update_end(&sq->stats.syncp); in virtnet_poll_tx()
3212 opaque = virtqueue_enable_cb_prepare(sq->vq); in virtnet_poll_tx()
3217 virtqueue_disable_cb(sq->vq); in virtnet_poll_tx()
3222 if (unlikely(virtqueue_poll(sq->vq, opaque))) { in virtnet_poll_tx()
3225 virtqueue_disable_cb(sq->vq); in virtnet_poll_tx()
3238 const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; in xmit_skb()
3239 struct virtnet_info *vi = sq->vq->vdev->priv; in xmit_skb()
3241 unsigned hdr_len = vi->hdr_len; in xmit_skb()
3244 pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); in xmit_skb()
3246 can_push = vi->any_header_sg && in xmit_skb()
3247 !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) && in xmit_skb()
3252 hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len); in xmit_skb()
3254 hdr = &skb_vnet_common_hdr(skb)->mrg_hdr; in xmit_skb()
3256 if (virtio_net_hdr_from_skb(skb, &hdr->hdr, in xmit_skb()
3257 virtio_is_little_endian(vi->vdev), false, in xmit_skb()
3259 return -EPROTO; in xmit_skb()
3261 if (vi->mergeable_rx_bufs) in xmit_skb()
3262 hdr->num_buffers = 0; in xmit_skb()
3264 sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2)); in xmit_skb()
3267 num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len); in xmit_skb()
3270 /* Pull header back to avoid skew in tx bytes calculations. */ in xmit_skb()
3273 sg_set_buf(sq->sg, hdr, hdr_len); in xmit_skb()
3274 num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len); in xmit_skb()
3288 struct send_queue *sq = &vi->sq[qnum]; in start_xmit()
3292 bool use_napi = sq->napi.weight; in start_xmit()
3298 virtqueue_disable_cb(sq->vq); in start_xmit()
3310 dev_warn(&dev->dev, in start_xmit()
3329 kick = use_napi ? __netdev_tx_sent_queue(txq, skb->len, xmit_more) : in start_xmit()
3332 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) { in start_xmit()
3333 u64_stats_update_begin(&sq->stats.syncp); in start_xmit()
3334 u64_stats_inc(&sq->stats.kicks); in start_xmit()
3335 u64_stats_update_end(&sq->stats.syncp); in start_xmit()
3339 if (use_napi && kick && unlikely(!virtqueue_enable_cb_delayed(sq->vq))) in start_xmit()
3340 virtqueue_napi_schedule(&sq->napi, sq->vq); in start_xmit()
3348 bool running = netif_running(vi->dev); in __virtnet_rx_pause()
3352 virtnet_cancel_dim(vi, &rq->dim); in __virtnet_rx_pause()
3365 cancel_delayed_work_sync(&vi->refill); in virtnet_rx_pause_all()
3366 for (i = 0; i < vi->max_queue_pairs; i++) in virtnet_rx_pause_all()
3367 __virtnet_rx_pause(vi, &vi->rq[i]); in virtnet_rx_pause_all()
3377 cancel_delayed_work_sync(&vi->refill); in virtnet_rx_pause()
3385 bool running = netif_running(vi->dev); in __virtnet_rx_resume()
3394 schedule_delayed_work(&vi->refill, 0); in __virtnet_rx_resume()
3402 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_rx_resume_all()
3403 if (i < vi->curr_queue_pairs) in virtnet_rx_resume_all()
3404 __virtnet_rx_resume(vi, &vi->rq[i], true); in virtnet_rx_resume_all()
3406 __virtnet_rx_resume(vi, &vi->rq[i], false); in virtnet_rx_resume_all()
3421 qindex = rq - vi->rq; in virtnet_rx_resize()
3425 err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_unmap_free_buf, NULL); in virtnet_rx_resize()
3427 netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err); in virtnet_rx_resize()
3435 bool running = netif_running(vi->dev); in virtnet_tx_pause()
3439 qindex = sq - vi->sq; in virtnet_tx_pause()
3444 txq = netdev_get_tx_queue(vi->dev, qindex); in virtnet_tx_pause()
3452 sq->reset = true; in virtnet_tx_pause()
3454 /* Prevent the upper layer from trying to send packets. */ in virtnet_tx_pause()
3455 netif_stop_subqueue(vi->dev, qindex); in virtnet_tx_pause()
3462 bool running = netif_running(vi->dev); in virtnet_tx_resume()
3466 qindex = sq - vi->sq; in virtnet_tx_resume()
3468 txq = netdev_get_tx_queue(vi->dev, qindex); in virtnet_tx_resume()
3471 sq->reset = false; in virtnet_tx_resume()
3484 qindex = sq - vi->sq; in virtnet_tx_resize()
3488 err = virtqueue_resize(sq->vq, ring_num, virtnet_sq_free_unused_buf, in virtnet_tx_resize()
3491 netdev_err(vi->dev, "resize tx fail: tx queue index: %d err: %d\n", qindex, err); in virtnet_tx_resize()
3513 BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)); in virtnet_send_command_reply()
3515 mutex_lock(&vi->cvq_lock); in virtnet_send_command_reply()
3516 vi->ctrl->status = ~0; in virtnet_send_command_reply()
3517 vi->ctrl->hdr.class = class; in virtnet_send_command_reply()
3518 vi->ctrl->hdr.cmd = cmd; in virtnet_send_command_reply()
3520 sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr)); in virtnet_send_command_reply()
3527 sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status)); in virtnet_send_command_reply()
3534 ret = virtqueue_add_sgs(vi->cvq, sgs, out_num, in_num, vi, GFP_ATOMIC); in virtnet_send_command_reply()
3536 dev_warn(&vi->vdev->dev, in virtnet_send_command_reply()
3538 mutex_unlock(&vi->cvq_lock); in virtnet_send_command_reply()
3542 if (unlikely(!virtqueue_kick(vi->cvq))) in virtnet_send_command_reply()
3548 while (!virtqueue_get_buf(vi->cvq, &tmp) && in virtnet_send_command_reply()
3549 !virtqueue_is_broken(vi->cvq)) { in virtnet_send_command_reply()
3555 ok = vi->ctrl->status == VIRTIO_NET_OK; in virtnet_send_command_reply()
3556 mutex_unlock(&vi->cvq_lock); in virtnet_send_command_reply()
3569 struct virtio_device *vdev = vi->vdev; in virtnet_set_mac_address()
3574 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY)) in virtnet_set_mac_address()
3575 return -EOPNOTSUPP; in virtnet_set_mac_address()
3579 return -ENOMEM; in virtnet_set_mac_address()
3586 sg_init_one(&sg, addr->sa_data, dev->addr_len); in virtnet_set_mac_address()
3589 dev_warn(&vdev->dev, in virtnet_set_mac_address()
3591 ret = -EINVAL; in virtnet_set_mac_address()
3599 for (i = 0; i < dev->addr_len; i++) in virtnet_set_mac_address()
3602 i, addr->sa_data[i]); in virtnet_set_mac_address()
3620 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_stats()
3622 struct receive_queue *rq = &vi->rq[i]; in virtnet_stats()
3623 struct send_queue *sq = &vi->sq[i]; in virtnet_stats()
3626 start = u64_stats_fetch_begin(&sq->stats.syncp); in virtnet_stats()
3627 tpackets = u64_stats_read(&sq->stats.packets); in virtnet_stats()
3628 tbytes = u64_stats_read(&sq->stats.bytes); in virtnet_stats()
3629 terrors = u64_stats_read(&sq->stats.tx_timeouts); in virtnet_stats()
3630 } while (u64_stats_fetch_retry(&sq->stats.syncp, start)); in virtnet_stats()
3633 start = u64_stats_fetch_begin(&rq->stats.syncp); in virtnet_stats()
3634 rpackets = u64_stats_read(&rq->stats.packets); in virtnet_stats()
3635 rbytes = u64_stats_read(&rq->stats.bytes); in virtnet_stats()
3636 rdrops = u64_stats_read(&rq->stats.drops); in virtnet_stats()
3637 } while (u64_stats_fetch_retry(&rq->stats.syncp, start)); in virtnet_stats()
3639 tot->rx_packets += rpackets; in virtnet_stats()
3640 tot->tx_packets += tpackets; in virtnet_stats()
3641 tot->rx_bytes += rbytes; in virtnet_stats()
3642 tot->tx_bytes += tbytes; in virtnet_stats()
3643 tot->rx_dropped += rdrops; in virtnet_stats()
3644 tot->tx_errors += terrors; in virtnet_stats()
3647 tot->tx_dropped = DEV_STATS_READ(dev, tx_dropped); in virtnet_stats()
3648 tot->tx_fifo_errors = DEV_STATS_READ(dev, tx_fifo_errors); in virtnet_stats()
3649 tot->rx_length_errors = DEV_STATS_READ(dev, rx_length_errors); in virtnet_stats()
3650 tot->rx_frame_errors = DEV_STATS_READ(dev, rx_frame_errors); in virtnet_stats()
3657 dev_warn(&vi->dev->dev, "Failed to ack link announce.\n"); in virtnet_ack_link_announce()
3667 for (; i < vi->rss_indir_table_size; ++i) { in virtnet_rss_update_by_qpairs()
3669 vi->rss_hdr->indirection_table[i] = cpu_to_le16(indir_val); in virtnet_rss_update_by_qpairs()
3671 vi->rss_trailer.max_tx_vq = cpu_to_le16(queue_pairs); in virtnet_rss_update_by_qpairs()
3679 struct net_device *dev = vi->dev; in virtnet_set_queues()
3682 if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) in virtnet_set_queues()
3692 if (vi->has_rss && !netif_is_rxfh_configured(dev)) { in virtnet_set_queues()
3693 old_rss_hdr = vi->rss_hdr; in virtnet_set_queues()
3694 old_rss_trailer = vi->rss_trailer; in virtnet_set_queues()
3695 vi->rss_hdr = devm_kzalloc(&dev->dev, virtnet_rss_hdr_size(vi), GFP_KERNEL); in virtnet_set_queues()
3696 if (!vi->rss_hdr) { in virtnet_set_queues()
3697 vi->rss_hdr = old_rss_hdr; in virtnet_set_queues()
3698 return -ENOMEM; in virtnet_set_queues()
3701 *vi->rss_hdr = *old_rss_hdr; in virtnet_set_queues()
3706 devm_kfree(&dev->dev, vi->rss_hdr); in virtnet_set_queues()
3707 vi->rss_hdr = old_rss_hdr; in virtnet_set_queues()
3708 vi->rss_trailer = old_rss_trailer; in virtnet_set_queues()
3710 dev_warn(&dev->dev, "Fail to set num of queue pairs to %d, because committing RSS failed\n", in virtnet_set_queues()
3712 return -EINVAL; in virtnet_set_queues()
3714 devm_kfree(&dev->dev, old_rss_hdr); in virtnet_set_queues()
3720 return -ENOMEM; in virtnet_set_queues()
3722 mq->virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs); in virtnet_set_queues()
3727 dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n", in virtnet_set_queues()
3729 return -EINVAL; in virtnet_set_queues()
3732 vi->curr_queue_pairs = queue_pairs; in virtnet_set_queues()
3734 spin_lock_bh(&vi->refill_lock); in virtnet_set_queues()
3735 if (dev->flags & IFF_UP && vi->refill_enabled) in virtnet_set_queues()
3736 schedule_delayed_work(&vi->refill, 0); in virtnet_set_queues()
3737 spin_unlock_bh(&vi->refill_lock); in virtnet_set_queues()
3749 /* Make sure refill_work doesn't re-enable napi! */ in virtnet_close()
3750 cancel_delayed_work_sync(&vi->refill); in virtnet_close()
3754 virtio_config_driver_disable(vi->vdev); in virtnet_close()
3758 cancel_work_sync(&vi->config_work); in virtnet_close()
3760 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_close()
3762 virtnet_cancel_dim(vi, &vi->rq[i].dim); in virtnet_close()
3775 struct net_device *dev = vi->dev; in virtnet_rx_mode_work()
3785 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) in virtnet_rx_mode_work()
3790 dev_warn(&dev->dev, "Failed to set RX mode, no memory.\n"); in virtnet_rx_mode_work()
3796 *promisc_allmulti = !!(dev->flags & IFF_PROMISC); in virtnet_rx_mode_work()
3801 dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", in virtnet_rx_mode_work()
3804 *promisc_allmulti = !!(dev->flags & IFF_ALLMULTI); in virtnet_rx_mode_work()
3809 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", in virtnet_rx_mode_work()
3816 /* MAC filter - use one buffer for both lists */ in virtnet_rx_mode_work()
3818 (2 * sizeof(mac_data->entries)), GFP_ATOMIC); in virtnet_rx_mode_work()
3829 mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count); in virtnet_rx_mode_work()
3832 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); in virtnet_rx_mode_work()
3835 sizeof(mac_data->entries) + (uc_count * ETH_ALEN)); in virtnet_rx_mode_work()
3838 mac_data = (void *)&mac_data->macs[uc_count][0]; in virtnet_rx_mode_work()
3840 mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count); in virtnet_rx_mode_work()
3843 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); in virtnet_rx_mode_work()
3848 sizeof(mac_data->entries) + (mc_count * ETH_ALEN)); in virtnet_rx_mode_work()
3852 dev_warn(&dev->dev, "Failed to set MAC filter table.\n"); in virtnet_rx_mode_work()
3863 if (vi->rx_mode_work_enabled) in virtnet_set_rx_mode()
3864 schedule_work(&vi->rx_mode_work); in virtnet_set_rx_mode()
3876 return -ENOMEM; in virtnet_vlan_rx_add_vid()
3878 *_vid = cpu_to_virtio16(vi->vdev, vid); in virtnet_vlan_rx_add_vid()
3883 dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid); in virtnet_vlan_rx_add_vid()
3896 return -ENOMEM; in virtnet_vlan_rx_kill_vid()
3898 *_vid = cpu_to_virtio16(vi->vdev, vid); in virtnet_vlan_rx_kill_vid()
3903 dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid); in virtnet_vlan_rx_kill_vid()
3911 if (vi->affinity_hint_set) { in virtnet_clean_affinity()
3912 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_clean_affinity()
3913 virtqueue_set_affinity(vi->rq[i].vq, NULL); in virtnet_clean_affinity()
3914 virtqueue_set_affinity(vi->sq[i].vq, NULL); in virtnet_clean_affinity()
3917 vi->affinity_hint_set = false; in virtnet_clean_affinity()
3936 stride = max_t(int, num_cpu / vi->curr_queue_pairs, 1); in virtnet_set_affinity()
3937 stragglers = num_cpu >= vi->curr_queue_pairs ? in virtnet_set_affinity()
3938 num_cpu % vi->curr_queue_pairs : in virtnet_set_affinity()
3941 for (i = 0; i < vi->curr_queue_pairs; i++) { in virtnet_set_affinity()
3945 if (!group_size--) { in virtnet_set_affinity()
3952 virtqueue_set_affinity(vi->rq[i].vq, mask); in virtnet_set_affinity()
3953 virtqueue_set_affinity(vi->sq[i].vq, mask); in virtnet_set_affinity()
3954 __netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, XPS_CPUS); in virtnet_set_affinity()
3958 vi->affinity_hint_set = true; in virtnet_set_affinity()
3993 ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node); in virtnet_cpu_notif_add()
3997 &vi->node_dead); in virtnet_cpu_notif_add()
4000 cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node); in virtnet_cpu_notif_add()
4006 cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node); in virtnet_cpu_notif_remove()
4008 &vi->node_dead); in virtnet_cpu_notif_remove()
4019 return -ENOMEM; in virtnet_send_ctrl_coal_vq_cmd()
4021 coal_vq->vqn = cpu_to_le16(vqn); in virtnet_send_ctrl_coal_vq_cmd()
4022 coal_vq->coal.max_usecs = cpu_to_le32(max_usecs); in virtnet_send_ctrl_coal_vq_cmd()
4023 coal_vq->coal.max_packets = cpu_to_le32(max_packets); in virtnet_send_ctrl_coal_vq_cmd()
4029 return -EINVAL; in virtnet_send_ctrl_coal_vq_cmd()
4040 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) in virtnet_send_rx_ctrl_coal_vq_cmd()
4041 return -EOPNOTSUPP; in virtnet_send_rx_ctrl_coal_vq_cmd()
4048 vi->rq[queue].intr_coal.max_usecs = max_usecs; in virtnet_send_rx_ctrl_coal_vq_cmd()
4049 vi->rq[queue].intr_coal.max_packets = max_packets; in virtnet_send_rx_ctrl_coal_vq_cmd()
4060 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) in virtnet_send_tx_ctrl_coal_vq_cmd()
4061 return -EOPNOTSUPP; in virtnet_send_tx_ctrl_coal_vq_cmd()
4068 vi->sq[queue].intr_coal.max_usecs = max_usecs; in virtnet_send_tx_ctrl_coal_vq_cmd()
4069 vi->sq[queue].intr_coal.max_packets = max_packets; in virtnet_send_tx_ctrl_coal_vq_cmd()
4081 ring->rx_max_pending = vi->rq[0].vq->num_max; in virtnet_get_ringparam()
4082 ring->tx_max_pending = vi->sq[0].vq->num_max; in virtnet_get_ringparam()
4083 ring->rx_pending = virtqueue_get_vring_size(vi->rq[0].vq); in virtnet_get_ringparam()
4084 ring->tx_pending = virtqueue_get_vring_size(vi->sq[0].vq); in virtnet_get_ringparam()
4098 if (ring->rx_mini_pending || ring->rx_jumbo_pending) in virtnet_set_ringparam()
4099 return -EINVAL; in virtnet_set_ringparam()
4101 rx_pending = virtqueue_get_vring_size(vi->rq[0].vq); in virtnet_set_ringparam()
4102 tx_pending = virtqueue_get_vring_size(vi->sq[0].vq); in virtnet_set_ringparam()
4104 if (ring->rx_pending == rx_pending && in virtnet_set_ringparam()
4105 ring->tx_pending == tx_pending) in virtnet_set_ringparam()
4108 if (ring->rx_pending > vi->rq[0].vq->num_max) in virtnet_set_ringparam()
4109 return -EINVAL; in virtnet_set_ringparam()
4111 if (ring->tx_pending > vi->sq[0].vq->num_max) in virtnet_set_ringparam()
4112 return -EINVAL; in virtnet_set_ringparam()
4114 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_set_ringparam()
4115 rq = vi->rq + i; in virtnet_set_ringparam()
4116 sq = vi->sq + i; in virtnet_set_ringparam()
4118 if (ring->tx_pending != tx_pending) { in virtnet_set_ringparam()
4119 err = virtnet_tx_resize(vi, sq, ring->tx_pending); in virtnet_set_ringparam()
4123 /* Upon disabling and re-enabling a transmit virtqueue, the device must in virtnet_set_ringparam()
4126 * did not set any TX coalescing parameters, to 0. in virtnet_set_ringparam()
4129 vi->intr_coal_tx.max_usecs, in virtnet_set_ringparam()
4130 vi->intr_coal_tx.max_packets); in virtnet_set_ringparam()
4132 /* Don't break the tx resize action if the vq coalescing is not in virtnet_set_ringparam()
4135 if (err && err != -EOPNOTSUPP) in virtnet_set_ringparam()
4139 if (ring->rx_pending != rx_pending) { in virtnet_set_ringparam()
4140 err = virtnet_rx_resize(vi, rq, ring->rx_pending); in virtnet_set_ringparam()
4145 mutex_lock(&vi->rq[i].dim_lock); in virtnet_set_ringparam()
4147 vi->intr_coal_rx.max_usecs, in virtnet_set_ringparam()
4148 vi->intr_coal_rx.max_packets); in virtnet_set_ringparam()
4149 mutex_unlock(&vi->rq[i].dim_lock); in virtnet_set_ringparam()
4150 if (err && err != -EOPNOTSUPP) in virtnet_set_ringparam()
4160 struct net_device *dev = vi->dev; in virtnet_commit_rss_command()
4165 sg_set_buf(&sgs[0], vi->rss_hdr, virtnet_rss_hdr_size(vi)); in virtnet_commit_rss_command()
4166 sg_set_buf(&sgs[1], &vi->rss_trailer, virtnet_rss_trailer_size(vi)); in virtnet_commit_rss_command()
4169 vi->has_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG in virtnet_commit_rss_command()
4176 dev_warn(&dev->dev, "VIRTIONET issue with committing RSS sgs\n"); in virtnet_commit_rss_command()
4183 vi->rss_hdr->hash_types = cpu_to_le32(vi->rss_hash_types_supported); in virtnet_init_default_rss()
4184 vi->rss_hash_types_saved = vi->rss_hash_types_supported; in virtnet_init_default_rss()
4185 vi->rss_hdr->indirection_table_mask = vi->rss_indir_table_size in virtnet_init_default_rss()
4186 ? cpu_to_le16(vi->rss_indir_table_size - 1) : 0; in virtnet_init_default_rss()
4187 vi->rss_hdr->unclassified_queue = 0; in virtnet_init_default_rss()
4189 virtnet_rss_update_by_qpairs(vi, vi->curr_queue_pairs); in virtnet_init_default_rss()
4191 vi->rss_trailer.hash_key_length = vi->rss_key_size; in virtnet_init_default_rss()
4193 netdev_rss_key_fill(vi->rss_hash_key_data, vi->rss_key_size); in virtnet_init_default_rss()
4198 info->data = 0; in virtnet_get_hashflow()
4199 switch (info->flow_type) { in virtnet_get_hashflow()
4201 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv4) { in virtnet_get_hashflow()
4202 info->data = RXH_IP_SRC | RXH_IP_DST | in virtnet_get_hashflow()
4204 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) { in virtnet_get_hashflow()
4205 info->data = RXH_IP_SRC | RXH_IP_DST; in virtnet_get_hashflow()
4209 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv6) { in virtnet_get_hashflow()
4210 info->data = RXH_IP_SRC | RXH_IP_DST | in virtnet_get_hashflow()
4212 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) { in virtnet_get_hashflow()
4213 info->data = RXH_IP_SRC | RXH_IP_DST; in virtnet_get_hashflow()
4217 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv4) { in virtnet_get_hashflow()
4218 info->data = RXH_IP_SRC | RXH_IP_DST | in virtnet_get_hashflow()
4220 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) { in virtnet_get_hashflow()
4221 info->data = RXH_IP_SRC | RXH_IP_DST; in virtnet_get_hashflow()
4225 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv6) { in virtnet_get_hashflow()
4226 info->data = RXH_IP_SRC | RXH_IP_DST | in virtnet_get_hashflow()
4228 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) { in virtnet_get_hashflow()
4229 info->data = RXH_IP_SRC | RXH_IP_DST; in virtnet_get_hashflow()
4233 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) in virtnet_get_hashflow()
4234 info->data = RXH_IP_SRC | RXH_IP_DST; in virtnet_get_hashflow()
4238 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) in virtnet_get_hashflow()
4239 info->data = RXH_IP_SRC | RXH_IP_DST; in virtnet_get_hashflow()
4243 info->data = 0; in virtnet_get_hashflow()
4250 u32 new_hashtypes = vi->rss_hash_types_saved; in virtnet_set_hashflow()
4251 bool is_disable = info->data & RXH_DISCARD; in virtnet_set_hashflow()
4252 bool is_l4 = info->data == (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3); in virtnet_set_hashflow()
4255 if (!((info->data == (RXH_IP_SRC | RXH_IP_DST)) | is_l4 | is_disable)) in virtnet_set_hashflow()
4258 switch (info->flow_type) { in virtnet_set_hashflow()
4299 if (new_hashtypes != (new_hashtypes & vi->rss_hash_types_supported)) in virtnet_set_hashflow()
4302 if (new_hashtypes != vi->rss_hash_types_saved) { in virtnet_set_hashflow()
4303 vi->rss_hash_types_saved = new_hashtypes; in virtnet_set_hashflow()
4304 vi->rss_hdr->hash_types = cpu_to_le32(vi->rss_hash_types_saved); in virtnet_set_hashflow()
4305 if (vi->dev->features & NETIF_F_RXHASH) in virtnet_set_hashflow()
4316 struct virtio_device *vdev = vi->vdev; in virtnet_get_drvinfo()
4318 strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); in virtnet_get_drvinfo()
4319 strscpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version)); in virtnet_get_drvinfo()
4320 strscpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info)); in virtnet_get_drvinfo()
4324 /* TODO: Eliminate OOO packets during switching */
4329 u16 queue_pairs = channels->combined_count; in virtnet_set_channels()
4332 /* We don't support separate rx/tx channels. in virtnet_set_channels()
4335 if (channels->rx_count || channels->tx_count || channels->other_count) in virtnet_set_channels()
4336 return -EINVAL; in virtnet_set_channels()
4338 if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0) in virtnet_set_channels()
4339 return -EINVAL; in virtnet_set_channels()
4345 if (vi->rq[0].xdp_prog) in virtnet_set_channels()
4346 return -EINVAL; in virtnet_set_channels()
4377 /* qid == -1: for rx/tx queue total field */
4388 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_CVQ) { in virtnet_get_stats_string()
4392 virtnet_stats_sprintf(&p, NULL, noq_fmt, num, -1, desc); in virtnet_get_stats_string()
4408 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_BASIC) { in virtnet_get_stats_string()
4415 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_CSUM) { in virtnet_get_stats_string()
4422 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_SPEED) { in virtnet_get_stats_string()
4431 fmt = "tx%u_%s"; in virtnet_get_stats_string()
4439 fmt = "tx%u_hw_%s"; in virtnet_get_stats_string()
4442 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) { in virtnet_get_stats_string()
4449 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_GSO) { in virtnet_get_stats_string()
4456 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED) { in virtnet_get_stats_string()
4468 /* The stats are write to qstats or ethtool -S */
4490 ctx->data = data; in virtnet_stats_ctx_init()
4491 ctx->to_qstat = to_qstat; in virtnet_stats_ctx_init()
4494 ctx->desc_num[VIRTNET_Q_TYPE_RX] = ARRAY_SIZE(virtnet_rq_stats_desc_qstat); in virtnet_stats_ctx_init()
4495 ctx->desc_num[VIRTNET_Q_TYPE_TX] = ARRAY_SIZE(virtnet_sq_stats_desc_qstat); in virtnet_stats_ctx_init()
4499 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_BASIC) { in virtnet_stats_ctx_init()
4500 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_BASIC; in virtnet_stats_ctx_init()
4501 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_basic_desc_qstat); in virtnet_stats_ctx_init()
4502 ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_basic); in virtnet_stats_ctx_init()
4505 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_CSUM) { in virtnet_stats_ctx_init()
4506 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_CSUM; in virtnet_stats_ctx_init()
4507 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_csum_desc_qstat); in virtnet_stats_ctx_init()
4508 ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_csum); in virtnet_stats_ctx_init()
4511 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_GSO) { in virtnet_stats_ctx_init()
4512 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_GSO; in virtnet_stats_ctx_init()
4513 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_gso_desc_qstat); in virtnet_stats_ctx_init()
4514 ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_gso); in virtnet_stats_ctx_init()
4517 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_SPEED) { in virtnet_stats_ctx_init()
4518 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_SPEED; in virtnet_stats_ctx_init()
4519 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_speed_desc_qstat); in virtnet_stats_ctx_init()
4520 ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_speed); in virtnet_stats_ctx_init()
4525 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) { in virtnet_stats_ctx_init()
4526 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_BASIC; in virtnet_stats_ctx_init()
4527 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_basic_desc_qstat); in virtnet_stats_ctx_init()
4528 ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_basic); in virtnet_stats_ctx_init()
4531 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_CSUM) { in virtnet_stats_ctx_init()
4532 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_CSUM; in virtnet_stats_ctx_init()
4533 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_csum_desc_qstat); in virtnet_stats_ctx_init()
4534 ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_csum); in virtnet_stats_ctx_init()
4537 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_GSO) { in virtnet_stats_ctx_init()
4538 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_GSO; in virtnet_stats_ctx_init()
4539 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_gso_desc_qstat); in virtnet_stats_ctx_init()
4540 ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_gso); in virtnet_stats_ctx_init()
4543 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED) { in virtnet_stats_ctx_init()
4544 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_SPEED; in virtnet_stats_ctx_init()
4545 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_speed_desc_qstat); in virtnet_stats_ctx_init()
4546 ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_speed); in virtnet_stats_ctx_init()
4552 ctx->desc_num[VIRTNET_Q_TYPE_RX] = ARRAY_SIZE(virtnet_rq_stats_desc); in virtnet_stats_ctx_init()
4553 ctx->desc_num[VIRTNET_Q_TYPE_TX] = ARRAY_SIZE(virtnet_sq_stats_desc); in virtnet_stats_ctx_init()
4555 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_CVQ) { in virtnet_stats_ctx_init()
4558 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_CVQ; in virtnet_stats_ctx_init()
4559 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_cvq_desc); in virtnet_stats_ctx_init()
4560 ctx->size[queue_type] += sizeof(struct virtio_net_stats_cvq); in virtnet_stats_ctx_init()
4565 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_BASIC) { in virtnet_stats_ctx_init()
4566 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_BASIC; in virtnet_stats_ctx_init()
4567 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_basic_desc); in virtnet_stats_ctx_init()
4568 ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_basic); in virtnet_stats_ctx_init()
4571 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_CSUM) { in virtnet_stats_ctx_init()
4572 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_CSUM; in virtnet_stats_ctx_init()
4573 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_csum_desc); in virtnet_stats_ctx_init()
4574 ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_csum); in virtnet_stats_ctx_init()
4577 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_SPEED) { in virtnet_stats_ctx_init()
4578 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_SPEED; in virtnet_stats_ctx_init()
4579 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_speed_desc); in virtnet_stats_ctx_init()
4580 ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_speed); in virtnet_stats_ctx_init()
4585 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) { in virtnet_stats_ctx_init()
4586 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_BASIC; in virtnet_stats_ctx_init()
4587 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_basic_desc); in virtnet_stats_ctx_init()
4588 ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_basic); in virtnet_stats_ctx_init()
4591 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_GSO) { in virtnet_stats_ctx_init()
4592 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_GSO; in virtnet_stats_ctx_init()
4593 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_gso_desc); in virtnet_stats_ctx_init()
4594 ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_gso); in virtnet_stats_ctx_init()
4597 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED) { in virtnet_stats_ctx_init()
4598 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_SPEED; in virtnet_stats_ctx_init()
4599 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_speed_desc); in virtnet_stats_ctx_init()
4600 ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_speed); in virtnet_stats_ctx_init()
4604 /* stats_sum_queue - Calculate the sum of the same fields in sq or rq.
4631 num_cq = ctx->desc_num[VIRTNET_Q_TYPE_CQ]; in virtnet_fill_total_fields()
4632 num_rx = ctx->desc_num[VIRTNET_Q_TYPE_RX]; in virtnet_fill_total_fields()
4633 num_tx = ctx->desc_num[VIRTNET_Q_TYPE_TX]; in virtnet_fill_total_fields()
4635 first_rx_q = ctx->data + num_rx + num_tx + num_cq; in virtnet_fill_total_fields()
4636 first_tx_q = first_rx_q + vi->curr_queue_pairs * num_rx; in virtnet_fill_total_fields()
4638 data = ctx->data; in virtnet_fill_total_fields()
4640 stats_sum_queue(data, num_rx, first_rx_q, vi->curr_queue_pairs); in virtnet_fill_total_fields()
4642 data = ctx->data + num_rx; in virtnet_fill_total_fields()
4644 stats_sum_queue(data, num_tx, first_tx_q, vi->curr_queue_pairs); in virtnet_fill_total_fields()
4659 bitmap = ctx->bitmap[queue_type]; in virtnet_fill_stats_qstat()
4671 offset = desc[i].qstat_offset / sizeof(*ctx->data); in virtnet_fill_stats_qstat()
4673 ctx->data[offset] = u64_stats_read(v_stat); in virtnet_fill_stats_qstat()
4738 offset = desc[i].qstat_offset / sizeof(*ctx->data); in virtnet_fill_stats_qstat()
4740 ctx->data[offset] = le64_to_cpu(*v); in virtnet_fill_stats_qstat()
4744 /* virtnet_fill_stats - copy the stats to qstats or ethtool -S
4765 if (ctx->to_qstat) in virtnet_fill_stats()
4768 num_cq = ctx->desc_num[VIRTNET_Q_TYPE_CQ]; in virtnet_fill_stats()
4769 num_rx = ctx->desc_num[VIRTNET_Q_TYPE_RX]; in virtnet_fill_stats()
4770 num_tx = ctx->desc_num[VIRTNET_Q_TYPE_TX]; in virtnet_fill_stats()
4773 bitmap = ctx->bitmap[queue_type]; in virtnet_fill_stats()
4779 offset += num_cq + num_rx * vi->curr_queue_pairs + num_tx * (qid / 2); in virtnet_fill_stats()
4869 ctx->data[offset + i] = le64_to_cpu(*v); in virtnet_fill_stats()
4877 ctx->data[offset + i] = u64_stats_read(v_stat); in virtnet_fill_stats()
4902 for (p = reply; p - reply < res_size; p += le16_to_cpu(hdr->size)) { in __virtnet_get_hw_stats()
4904 qid = le16_to_cpu(hdr->vq_index); in __virtnet_get_hw_stats()
4905 virtnet_fill_stats(vi, qid, ctx, p, false, hdr->type); in __virtnet_get_hw_stats()
4917 u64 bitmap = ctx->bitmap[qtype]; in virtnet_make_stat_req()
4922 req->stats[*idx].vq_index = cpu_to_le16(qid); in virtnet_make_stat_req()
4923 req->stats[*idx].types_bitmap[0] = cpu_to_le64(bitmap); in virtnet_make_stat_req()
4927 /* qid: -1: get stats of all vq.
4939 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_DEVICE_STATS)) in virtnet_get_hw_stats()
4942 if (qid == -1) { in virtnet_get_hw_stats()
4943 last_vq = vi->curr_queue_pairs * 2 - 1; in virtnet_get_hw_stats()
4956 if (ctx->bitmap[qtype]) { in virtnet_get_hw_stats()
4958 res_size += ctx->size[qtype]; in virtnet_get_hw_stats()
4962 if (enable_cvq && ctx->bitmap[VIRTNET_Q_TYPE_CQ]) { in virtnet_get_hw_stats()
4963 res_size += ctx->size[VIRTNET_Q_TYPE_CQ]; in virtnet_get_hw_stats()
4969 return -ENOMEM; in virtnet_get_hw_stats()
4974 return -ENOMEM; in virtnet_get_hw_stats()
4982 virtnet_make_stat_req(vi, ctx, req, vi->max_queue_pairs * 2, &j); in virtnet_get_hw_stats()
5001 virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_RX, -1, &p); in virtnet_get_strings()
5002 virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_TX, -1, &p); in virtnet_get_strings()
5006 for (i = 0; i < vi->curr_queue_pairs; ++i) in virtnet_get_strings()
5009 for (i = 0; i < vi->curr_queue_pairs; ++i) in virtnet_get_strings()
5028 vi->curr_queue_pairs * pair_count; in virtnet_get_sset_count()
5030 return -EOPNOTSUPP; in virtnet_get_sset_count()
5043 if (virtnet_get_hw_stats(vi, &ctx, -1)) in virtnet_get_ethtool_stats()
5044 dev_warn(&vi->dev->dev, "Failed to get hw stats.\n"); in virtnet_get_ethtool_stats()
5046 for (i = 0; i < vi->curr_queue_pairs; i++) { in virtnet_get_ethtool_stats()
5047 struct receive_queue *rq = &vi->rq[i]; in virtnet_get_ethtool_stats()
5048 struct send_queue *sq = &vi->sq[i]; in virtnet_get_ethtool_stats()
5050 stats_base = (const u8 *)&rq->stats; in virtnet_get_ethtool_stats()
5052 start = u64_stats_fetch_begin(&rq->stats.syncp); in virtnet_get_ethtool_stats()
5054 } while (u64_stats_fetch_retry(&rq->stats.syncp, start)); in virtnet_get_ethtool_stats()
5056 stats_base = (const u8 *)&sq->stats; in virtnet_get_ethtool_stats()
5058 start = u64_stats_fetch_begin(&sq->stats.syncp); in virtnet_get_ethtool_stats()
5060 } while (u64_stats_fetch_retry(&sq->stats.syncp, start)); in virtnet_get_ethtool_stats()
5071 channels->combined_count = vi->curr_queue_pairs; in virtnet_get_channels()
5072 channels->max_combined = vi->max_queue_pairs; in virtnet_get_channels()
5073 channels->max_other = 0; in virtnet_get_channels()
5074 channels->rx_count = 0; in virtnet_get_channels()
5075 channels->tx_count = 0; in virtnet_get_channels()
5076 channels->other_count = 0; in virtnet_get_channels()
5085 &vi->speed, &vi->duplex); in virtnet_set_link_ksettings()
5093 cmd->base.speed = vi->speed; in virtnet_get_link_ksettings()
5094 cmd->base.duplex = vi->duplex; in virtnet_get_link_ksettings()
5095 cmd->base.port = PORT_OTHER; in virtnet_get_link_ksettings()
5109 return -ENOMEM; in virtnet_send_tx_notf_coal_cmds()
5111 coal_tx->tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs); in virtnet_send_tx_notf_coal_cmds()
5112 coal_tx->tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames); in virtnet_send_tx_notf_coal_cmds()
5118 return -EINVAL; in virtnet_send_tx_notf_coal_cmds()
5120 vi->intr_coal_tx.max_usecs = ec->tx_coalesce_usecs; in virtnet_send_tx_notf_coal_cmds()
5121 vi->intr_coal_tx.max_packets = ec->tx_max_coalesced_frames; in virtnet_send_tx_notf_coal_cmds()
5122 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_send_tx_notf_coal_cmds()
5123 vi->sq[i].intr_coal.max_usecs = ec->tx_coalesce_usecs; in virtnet_send_tx_notf_coal_cmds()
5124 vi->sq[i].intr_coal.max_packets = ec->tx_max_coalesced_frames; in virtnet_send_tx_notf_coal_cmds()
5134 bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce; in virtnet_send_rx_notf_coal_cmds()
5138 if (rx_ctrl_dim_on && !virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) in virtnet_send_rx_notf_coal_cmds()
5139 return -EOPNOTSUPP; in virtnet_send_rx_notf_coal_cmds()
5141 if (rx_ctrl_dim_on && (ec->rx_coalesce_usecs != vi->intr_coal_rx.max_usecs || in virtnet_send_rx_notf_coal_cmds()
5142 ec->rx_max_coalesced_frames != vi->intr_coal_rx.max_packets)) in virtnet_send_rx_notf_coal_cmds()
5143 return -EINVAL; in virtnet_send_rx_notf_coal_cmds()
5145 if (rx_ctrl_dim_on && !vi->rx_dim_enabled) { in virtnet_send_rx_notf_coal_cmds()
5146 vi->rx_dim_enabled = true; in virtnet_send_rx_notf_coal_cmds()
5147 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_send_rx_notf_coal_cmds()
5148 mutex_lock(&vi->rq[i].dim_lock); in virtnet_send_rx_notf_coal_cmds()
5149 vi->rq[i].dim_enabled = true; in virtnet_send_rx_notf_coal_cmds()
5150 mutex_unlock(&vi->rq[i].dim_lock); in virtnet_send_rx_notf_coal_cmds()
5157 return -ENOMEM; in virtnet_send_rx_notf_coal_cmds()
5159 if (!rx_ctrl_dim_on && vi->rx_dim_enabled) { in virtnet_send_rx_notf_coal_cmds()
5160 vi->rx_dim_enabled = false; in virtnet_send_rx_notf_coal_cmds()
5161 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_send_rx_notf_coal_cmds()
5162 mutex_lock(&vi->rq[i].dim_lock); in virtnet_send_rx_notf_coal_cmds()
5163 vi->rq[i].dim_enabled = false; in virtnet_send_rx_notf_coal_cmds()
5164 mutex_unlock(&vi->rq[i].dim_lock); in virtnet_send_rx_notf_coal_cmds()
5168 /* Since the per-queue coalescing params can be set, in virtnet_send_rx_notf_coal_cmds()
5172 coal_rx->rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs); in virtnet_send_rx_notf_coal_cmds()
5173 coal_rx->rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames); in virtnet_send_rx_notf_coal_cmds()
5179 return -EINVAL; in virtnet_send_rx_notf_coal_cmds()
5181 vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs; in virtnet_send_rx_notf_coal_cmds()
5182 vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames; in virtnet_send_rx_notf_coal_cmds()
5183 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_send_rx_notf_coal_cmds()
5184 mutex_lock(&vi->rq[i].dim_lock); in virtnet_send_rx_notf_coal_cmds()
5185 vi->rq[i].intr_coal.max_usecs = ec->rx_coalesce_usecs; in virtnet_send_rx_notf_coal_cmds()
5186 vi->rq[i].intr_coal.max_packets = ec->rx_max_coalesced_frames; in virtnet_send_rx_notf_coal_cmds()
5187 mutex_unlock(&vi->rq[i].dim_lock); in virtnet_send_rx_notf_coal_cmds()
5213 bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce; in virtnet_send_rx_notf_coal_vq_cmds()
5218 mutex_lock(&vi->rq[queue].dim_lock); in virtnet_send_rx_notf_coal_vq_cmds()
5219 cur_rx_dim = vi->rq[queue].dim_enabled; in virtnet_send_rx_notf_coal_vq_cmds()
5220 max_usecs = vi->rq[queue].intr_coal.max_usecs; in virtnet_send_rx_notf_coal_vq_cmds()
5221 max_packets = vi->rq[queue].intr_coal.max_packets; in virtnet_send_rx_notf_coal_vq_cmds()
5223 if (rx_ctrl_dim_on && (ec->rx_coalesce_usecs != max_usecs || in virtnet_send_rx_notf_coal_vq_cmds()
5224 ec->rx_max_coalesced_frames != max_packets)) { in virtnet_send_rx_notf_coal_vq_cmds()
5225 mutex_unlock(&vi->rq[queue].dim_lock); in virtnet_send_rx_notf_coal_vq_cmds()
5226 return -EINVAL; in virtnet_send_rx_notf_coal_vq_cmds()
5230 vi->rq[queue].dim_enabled = true; in virtnet_send_rx_notf_coal_vq_cmds()
5231 mutex_unlock(&vi->rq[queue].dim_lock); in virtnet_send_rx_notf_coal_vq_cmds()
5236 vi->rq[queue].dim_enabled = false; in virtnet_send_rx_notf_coal_vq_cmds()
5242 ec->rx_coalesce_usecs, in virtnet_send_rx_notf_coal_vq_cmds()
5243 ec->rx_max_coalesced_frames); in virtnet_send_rx_notf_coal_vq_cmds()
5244 mutex_unlock(&vi->rq[queue].dim_lock); in virtnet_send_rx_notf_coal_vq_cmds()
5259 ec->tx_coalesce_usecs, in virtnet_send_notf_coal_vq_cmds()
5260 ec->tx_max_coalesced_frames); in virtnet_send_notf_coal_vq_cmds()
5272 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_rx_dim_work()
5273 struct net_device *dev = vi->dev; in virtnet_rx_dim_work()
5277 qnum = rq - vi->rq; in virtnet_rx_dim_work()
5279 mutex_lock(&rq->dim_lock); in virtnet_rx_dim_work()
5280 if (!rq->dim_enabled) in virtnet_rx_dim_work()
5284 if (update_moder.usec != rq->intr_coal.max_usecs || in virtnet_rx_dim_work()
5285 update_moder.pkts != rq->intr_coal.max_packets) { in virtnet_rx_dim_work()
5291 dev->name, qnum); in virtnet_rx_dim_work()
5294 dim->state = DIM_START_MEASURE; in virtnet_rx_dim_work()
5295 mutex_unlock(&rq->dim_lock); in virtnet_rx_dim_work()
5303 if (ec->rx_coalesce_usecs || ec->tx_coalesce_usecs) in virtnet_coal_params_supported()
5304 return -EOPNOTSUPP; in virtnet_coal_params_supported()
5306 if (ec->tx_max_coalesced_frames > 1 || in virtnet_coal_params_supported()
5307 ec->rx_max_coalesced_frames != 1) in virtnet_coal_params_supported()
5308 return -EINVAL; in virtnet_coal_params_supported()
5318 return -EBUSY; in virtnet_should_update_vq_weight()
5335 napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0; in virtnet_set_coalesce()
5336 for (queue_number = 0; queue_number < vi->max_queue_pairs; queue_number++) { in virtnet_set_coalesce()
5337 ret = virtnet_should_update_vq_weight(dev->flags, napi_weight, in virtnet_set_coalesce()
5338 vi->sq[queue_number].napi.weight, in virtnet_set_coalesce()
5344 /* All queues that belong to [queue_number, vi->max_queue_pairs] will be in virtnet_set_coalesce()
5351 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) in virtnet_set_coalesce()
5360 /* xsk xmit depends on the tx napi. So if xsk is active, in virtnet_set_coalesce()
5361 * prevent modifications to tx napi. in virtnet_set_coalesce()
5363 for (i = queue_number; i < vi->max_queue_pairs; i++) { in virtnet_set_coalesce()
5364 if (vi->sq[i].xsk_pool) in virtnet_set_coalesce()
5365 return -EBUSY; in virtnet_set_coalesce()
5368 for (; queue_number < vi->max_queue_pairs; queue_number++) in virtnet_set_coalesce()
5369 vi->sq[queue_number].napi.weight = napi_weight; in virtnet_set_coalesce()
5382 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) { in virtnet_get_coalesce()
5383 ec->rx_coalesce_usecs = vi->intr_coal_rx.max_usecs; in virtnet_get_coalesce()
5384 ec->tx_coalesce_usecs = vi->intr_coal_tx.max_usecs; in virtnet_get_coalesce()
5385 ec->tx_max_coalesced_frames = vi->intr_coal_tx.max_packets; in virtnet_get_coalesce()
5386 ec->rx_max_coalesced_frames = vi->intr_coal_rx.max_packets; in virtnet_get_coalesce()
5387 ec->use_adaptive_rx_coalesce = vi->rx_dim_enabled; in virtnet_get_coalesce()
5389 ec->rx_max_coalesced_frames = 1; in virtnet_get_coalesce()
5391 if (vi->sq[0].napi.weight) in virtnet_get_coalesce()
5392 ec->tx_max_coalesced_frames = 1; in virtnet_get_coalesce()
5406 if (queue >= vi->max_queue_pairs) in virtnet_set_per_queue_coalesce()
5407 return -EINVAL; in virtnet_set_per_queue_coalesce()
5410 napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0; in virtnet_set_per_queue_coalesce()
5411 ret = virtnet_should_update_vq_weight(dev->flags, napi_weight, in virtnet_set_per_queue_coalesce()
5412 vi->sq[queue].napi.weight, in virtnet_set_per_queue_coalesce()
5417 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) in virtnet_set_per_queue_coalesce()
5426 vi->sq[queue].napi.weight = napi_weight; in virtnet_set_per_queue_coalesce()
5437 if (queue >= vi->max_queue_pairs) in virtnet_get_per_queue_coalesce()
5438 return -EINVAL; in virtnet_get_per_queue_coalesce()
5440 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) { in virtnet_get_per_queue_coalesce()
5441 mutex_lock(&vi->rq[queue].dim_lock); in virtnet_get_per_queue_coalesce()
5442 ec->rx_coalesce_usecs = vi->rq[queue].intr_coal.max_usecs; in virtnet_get_per_queue_coalesce()
5443 ec->tx_coalesce_usecs = vi->sq[queue].intr_coal.max_usecs; in virtnet_get_per_queue_coalesce()
5444 ec->tx_max_coalesced_frames = vi->sq[queue].intr_coal.max_packets; in virtnet_get_per_queue_coalesce()
5445 ec->rx_max_coalesced_frames = vi->rq[queue].intr_coal.max_packets; in virtnet_get_per_queue_coalesce()
5446 ec->use_adaptive_rx_coalesce = vi->rq[queue].dim_enabled; in virtnet_get_per_queue_coalesce()
5447 mutex_unlock(&vi->rq[queue].dim_lock); in virtnet_get_per_queue_coalesce()
5449 ec->rx_max_coalesced_frames = 1; in virtnet_get_per_queue_coalesce()
5451 if (vi->sq[queue].napi.weight) in virtnet_get_per_queue_coalesce()
5452 ec->tx_max_coalesced_frames = 1; in virtnet_get_per_queue_coalesce()
5462 vi->speed = SPEED_UNKNOWN; in virtnet_init_settings()
5463 vi->duplex = DUPLEX_UNKNOWN; in virtnet_init_settings()
5468 return ((struct virtnet_info *)netdev_priv(dev))->rss_key_size; in virtnet_get_rxfh_key_size()
5473 return ((struct virtnet_info *)netdev_priv(dev))->rss_indir_table_size; in virtnet_get_rxfh_indir_size()
5482 if (rxfh->indir) { in virtnet_get_rxfh()
5483 for (i = 0; i < vi->rss_indir_table_size; ++i) in virtnet_get_rxfh()
5484 rxfh->indir[i] = le16_to_cpu(vi->rss_hdr->indirection_table[i]); in virtnet_get_rxfh()
5487 if (rxfh->key) in virtnet_get_rxfh()
5488 memcpy(rxfh->key, vi->rss_hash_key_data, vi->rss_key_size); in virtnet_get_rxfh()
5490 rxfh->hfunc = ETH_RSS_HASH_TOP; in virtnet_get_rxfh()
5503 if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE && in virtnet_set_rxfh()
5504 rxfh->hfunc != ETH_RSS_HASH_TOP) in virtnet_set_rxfh()
5505 return -EOPNOTSUPP; in virtnet_set_rxfh()
5507 if (rxfh->indir) { in virtnet_set_rxfh()
5508 if (!vi->has_rss) in virtnet_set_rxfh()
5509 return -EOPNOTSUPP; in virtnet_set_rxfh()
5511 for (i = 0; i < vi->rss_indir_table_size; ++i) in virtnet_set_rxfh()
5512 vi->rss_hdr->indirection_table[i] = cpu_to_le16(rxfh->indir[i]); in virtnet_set_rxfh()
5516 if (rxfh->key) { in virtnet_set_rxfh()
5521 if (!vi->has_rss && !vi->has_rss_hash_report) in virtnet_set_rxfh()
5522 return -EOPNOTSUPP; in virtnet_set_rxfh()
5524 memcpy(vi->rss_hash_key_data, rxfh->key, vi->rss_key_size); in virtnet_set_rxfh()
5539 switch (info->cmd) { in virtnet_get_rxnfc()
5541 info->data = vi->curr_queue_pairs; in virtnet_get_rxnfc()
5547 rc = -EOPNOTSUPP; in virtnet_get_rxnfc()
5558 switch (info->cmd) { in virtnet_set_rxnfc()
5561 rc = -EINVAL; in virtnet_set_rxnfc()
5565 rc = -EOPNOTSUPP; in virtnet_set_rxnfc()
5602 struct receive_queue *rq = &vi->rq[i]; in virtnet_get_queue_stats_rx()
5608 virtnet_fill_stats(vi, i * 2, &ctx, (void *)&rq->stats, true, 0); in virtnet_get_queue_stats_rx()
5615 struct send_queue *sq = &vi->sq[i]; in virtnet_get_queue_stats_tx()
5621 virtnet_fill_stats(vi, i * 2 + 1, &ctx, (void *)&sq->stats, true, 0); in virtnet_get_queue_stats_tx()
5626 struct netdev_queue_stats_tx *tx) in virtnet_get_base_stats() argument
5630 /* The queue stats of the virtio-net will not be reset. So here we in virtnet_get_base_stats()
5633 rx->bytes = 0; in virtnet_get_base_stats()
5634 rx->packets = 0; in virtnet_get_base_stats()
5636 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_BASIC) { in virtnet_get_base_stats()
5637 rx->hw_drops = 0; in virtnet_get_base_stats()
5638 rx->hw_drop_overruns = 0; in virtnet_get_base_stats()
5641 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_CSUM) { in virtnet_get_base_stats()
5642 rx->csum_unnecessary = 0; in virtnet_get_base_stats()
5643 rx->csum_none = 0; in virtnet_get_base_stats()
5644 rx->csum_bad = 0; in virtnet_get_base_stats()
5647 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_GSO) { in virtnet_get_base_stats()
5648 rx->hw_gro_packets = 0; in virtnet_get_base_stats()
5649 rx->hw_gro_bytes = 0; in virtnet_get_base_stats()
5650 rx->hw_gro_wire_packets = 0; in virtnet_get_base_stats()
5651 rx->hw_gro_wire_bytes = 0; in virtnet_get_base_stats()
5654 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_SPEED) in virtnet_get_base_stats()
5655 rx->hw_drop_ratelimits = 0; in virtnet_get_base_stats()
5657 tx->bytes = 0; in virtnet_get_base_stats()
5658 tx->packets = 0; in virtnet_get_base_stats()
5659 tx->stop = 0; in virtnet_get_base_stats()
5660 tx->wake = 0; in virtnet_get_base_stats()
5662 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) { in virtnet_get_base_stats()
5663 tx->hw_drops = 0; in virtnet_get_base_stats()
5664 tx->hw_drop_errors = 0; in virtnet_get_base_stats()
5667 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_CSUM) { in virtnet_get_base_stats()
5668 tx->csum_none = 0; in virtnet_get_base_stats()
5669 tx->needs_csum = 0; in virtnet_get_base_stats()
5672 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_GSO) { in virtnet_get_base_stats()
5673 tx->hw_gso_packets = 0; in virtnet_get_base_stats()
5674 tx->hw_gso_bytes = 0; in virtnet_get_base_stats()
5675 tx->hw_gso_wire_packets = 0; in virtnet_get_base_stats()
5676 tx->hw_gso_wire_bytes = 0; in virtnet_get_base_stats()
5679 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED) in virtnet_get_base_stats()
5680 tx->hw_drop_ratelimits = 0; in virtnet_get_base_stats()
5683 dev->real_num_rx_queues, vi->max_queue_pairs, rx, in virtnet_get_base_stats()
5684 dev->real_num_tx_queues, vi->max_queue_pairs, tx); in virtnet_get_base_stats()
5695 struct virtnet_info *vi = vdev->priv; in virtnet_freeze_down()
5698 flush_work(&vi->config_work); in virtnet_freeze_down()
5700 flush_work(&vi->rx_mode_work); in virtnet_freeze_down()
5702 netif_tx_lock_bh(vi->dev); in virtnet_freeze_down()
5703 netif_device_detach(vi->dev); in virtnet_freeze_down()
5704 netif_tx_unlock_bh(vi->dev); in virtnet_freeze_down()
5705 if (netif_running(vi->dev)) { in virtnet_freeze_down()
5707 virtnet_close(vi->dev); in virtnet_freeze_down()
5716 struct virtnet_info *vi = vdev->priv; in virtnet_restore_up()
5728 if (netif_running(vi->dev)) { in virtnet_restore_up()
5730 err = virtnet_open(vi->dev); in virtnet_restore_up()
5736 netif_tx_lock_bh(vi->dev); in virtnet_restore_up()
5737 netif_device_attach(vi->dev); in virtnet_restore_up()
5738 netif_tx_unlock_bh(vi->dev); in virtnet_restore_up()
5749 return -ENOMEM; in virtnet_set_guest_offloads()
5751 *_offloads = cpu_to_virtio64(vi->vdev, offloads); in virtnet_set_guest_offloads()
5757 dev_warn(&vi->dev->dev, "Fail to set guest offload.\n"); in virtnet_set_guest_offloads()
5758 return -EINVAL; in virtnet_set_guest_offloads()
5768 if (!vi->guest_offloads) in virtnet_clear_guest_offloads()
5776 u64 offloads = vi->guest_offloads; in virtnet_restore_guest_offloads()
5778 if (!vi->guest_offloads) in virtnet_restore_guest_offloads()
5789 qindex = rq - vi->rq; in virtnet_rq_bind_xsk_pool()
5792 err = xdp_rxq_info_reg(&rq->xsk_rxq_info, vi->dev, qindex, rq->napi.napi_id); in virtnet_rq_bind_xsk_pool()
5796 err = xdp_rxq_info_reg_mem_model(&rq->xsk_rxq_info, in virtnet_rq_bind_xsk_pool()
5801 xsk_pool_set_rxq_info(pool, &rq->xsk_rxq_info); in virtnet_rq_bind_xsk_pool()
5806 err = virtqueue_reset(rq->vq, virtnet_rq_unmap_free_buf, NULL); in virtnet_rq_bind_xsk_pool()
5808 netdev_err(vi->dev, "reset rx fail: rx queue index: %d err: %d\n", qindex, err); in virtnet_rq_bind_xsk_pool()
5813 rq->xsk_pool = pool; in virtnet_rq_bind_xsk_pool()
5821 xdp_rxq_info_unreg(&rq->xsk_rxq_info); in virtnet_rq_bind_xsk_pool()
5831 qindex = sq - vi->sq; in virtnet_sq_bind_xsk_pool()
5835 err = virtqueue_reset(sq->vq, virtnet_sq_free_unused_buf, in virtnet_sq_bind_xsk_pool()
5838 netdev_err(vi->dev, "reset tx fail: tx queue index: %d err: %d\n", qindex, err); in virtnet_sq_bind_xsk_pool()
5842 sq->xsk_pool = pool; in virtnet_sq_bind_xsk_pool()
5860 if (vi->hdr_len > xsk_pool_get_headroom(pool)) in virtnet_xsk_pool_enable()
5861 return -EINVAL; in virtnet_xsk_pool_enable()
5866 if (vi->big_packets && !vi->mergeable_rx_bufs) in virtnet_xsk_pool_enable()
5867 return -ENOENT; in virtnet_xsk_pool_enable()
5869 if (qid >= vi->curr_queue_pairs) in virtnet_xsk_pool_enable()
5870 return -EINVAL; in virtnet_xsk_pool_enable()
5872 sq = &vi->sq[qid]; in virtnet_xsk_pool_enable()
5873 rq = &vi->rq[qid]; in virtnet_xsk_pool_enable()
5875 /* xsk assumes that tx and rx must have the same dma device. The af-xdp in virtnet_xsk_pool_enable()
5877 * send by the tx. So the dma dev of sq and rq must be the same one. in virtnet_xsk_pool_enable()
5879 * But vq->dma_dev allows every vq has the respective dma dev. So I in virtnet_xsk_pool_enable()
5882 if (virtqueue_dma_dev(rq->vq) != virtqueue_dma_dev(sq->vq)) in virtnet_xsk_pool_enable()
5883 return -EINVAL; in virtnet_xsk_pool_enable()
5885 dma_dev = virtqueue_dma_dev(rq->vq); in virtnet_xsk_pool_enable()
5887 return -EINVAL; in virtnet_xsk_pool_enable()
5889 size = virtqueue_get_vring_size(rq->vq); in virtnet_xsk_pool_enable()
5891 rq->xsk_buffs = kvcalloc(size, sizeof(*rq->xsk_buffs), GFP_KERNEL); in virtnet_xsk_pool_enable()
5892 if (!rq->xsk_buffs) in virtnet_xsk_pool_enable()
5893 return -ENOMEM; in virtnet_xsk_pool_enable()
5895 hdr_dma = virtqueue_dma_map_single_attrs(sq->vq, &xsk_hdr, vi->hdr_len, in virtnet_xsk_pool_enable()
5897 if (virtqueue_dma_mapping_error(sq->vq, hdr_dma)) { in virtnet_xsk_pool_enable()
5898 err = -ENOMEM; in virtnet_xsk_pool_enable()
5914 /* Now, we do not support tx offload(such as tx csum), so all the tx in virtnet_xsk_pool_enable()
5915 * virtnet hdr is zero. So all the tx packets can share a single hdr. in virtnet_xsk_pool_enable()
5917 sq->xsk_hdr_dma_addr = hdr_dma; in virtnet_xsk_pool_enable()
5926 virtqueue_dma_unmap_single_attrs(rq->vq, hdr_dma, vi->hdr_len, in virtnet_xsk_pool_enable()
5929 kvfree(rq->xsk_buffs); in virtnet_xsk_pool_enable()
5941 if (qid >= vi->curr_queue_pairs) in virtnet_xsk_pool_disable()
5942 return -EINVAL; in virtnet_xsk_pool_disable()
5944 sq = &vi->sq[qid]; in virtnet_xsk_pool_disable()
5945 rq = &vi->rq[qid]; in virtnet_xsk_pool_disable()
5947 pool = rq->xsk_pool; in virtnet_xsk_pool_disable()
5954 virtqueue_dma_unmap_single_attrs(sq->vq, sq->xsk_hdr_dma_addr, in virtnet_xsk_pool_disable()
5955 vi->hdr_len, DMA_TO_DEVICE, 0); in virtnet_xsk_pool_disable()
5956 kvfree(rq->xsk_buffs); in virtnet_xsk_pool_disable()
5963 if (xdp->xsk.pool) in virtnet_xsk_pool_setup()
5964 return virtnet_xsk_pool_enable(dev, xdp->xsk.pool, in virtnet_xsk_pool_setup()
5965 xdp->xsk.queue_id); in virtnet_xsk_pool_setup()
5967 return virtnet_xsk_pool_disable(dev, xdp->xsk.queue_id); in virtnet_xsk_pool_setup()
5975 unsigned int max_sz = PAGE_SIZE - room - ETH_HLEN; in virtnet_xdp_set()
5981 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) in virtnet_xdp_set()
5982 && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || in virtnet_xdp_set()
5983 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || in virtnet_xdp_set()
5984 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || in virtnet_xdp_set()
5985 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) || in virtnet_xdp_set()
5986 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM) || in virtnet_xdp_set()
5987 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO4) || in virtnet_xdp_set()
5988 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO6))) { in virtnet_xdp_set()
5990 return -EOPNOTSUPP; in virtnet_xdp_set()
5993 if (vi->mergeable_rx_bufs && !vi->any_header_sg) { in virtnet_xdp_set()
5995 return -EINVAL; in virtnet_xdp_set()
5998 if (prog && !prog->aux->xdp_has_frags && dev->mtu > max_sz) { in virtnet_xdp_set()
6000 netdev_warn(dev, "single-buffer XDP requires MTU less than %u\n", max_sz); in virtnet_xdp_set()
6001 return -EINVAL; in virtnet_xdp_set()
6004 curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs; in virtnet_xdp_set()
6009 if (curr_qp + xdp_qp > vi->max_queue_pairs) { in virtnet_xdp_set()
6010 …quest %i queues but max is %i. XDP_TX and XDP_REDIRECT will operate in a slower locked tx mode.\n", in virtnet_xdp_set()
6011 curr_qp + xdp_qp, vi->max_queue_pairs); in virtnet_xdp_set()
6015 old_prog = rtnl_dereference(vi->rq[0].xdp_prog); in virtnet_xdp_set()
6020 bpf_prog_add(prog, vi->max_queue_pairs - 1); in virtnet_xdp_set()
6024 /* Make sure NAPI is not using any XDP TX queues for RX. */ in virtnet_xdp_set()
6026 for (i = 0; i < vi->max_queue_pairs; i++) in virtnet_xdp_set()
6027 virtnet_napi_tx_disable(&vi->sq[i]); in virtnet_xdp_set()
6031 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_xdp_set()
6032 rcu_assign_pointer(vi->rq[i].xdp_prog, prog); in virtnet_xdp_set()
6043 vi->xdp_queue_pairs = xdp_qp; in virtnet_xdp_set()
6046 vi->xdp_enabled = true; in virtnet_xdp_set()
6047 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_xdp_set()
6048 rcu_assign_pointer(vi->rq[i].xdp_prog, prog); in virtnet_xdp_set()
6056 vi->xdp_enabled = false; in virtnet_xdp_set()
6060 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_xdp_set()
6064 virtnet_napi_tx_enable(&vi->sq[i]); in virtnet_xdp_set()
6072 for (i = 0; i < vi->max_queue_pairs; i++) in virtnet_xdp_set()
6073 rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog); in virtnet_xdp_set()
6078 for (i = 0; i < vi->max_queue_pairs; i++) in virtnet_xdp_set()
6079 virtnet_napi_tx_enable(&vi->sq[i]); in virtnet_xdp_set()
6082 bpf_prog_sub(prog, vi->max_queue_pairs - 1); in virtnet_xdp_set()
6088 switch (xdp->command) { in virtnet_xdp()
6090 return virtnet_xdp_set(dev, xdp->prog, xdp->extack); in virtnet_xdp()
6094 return -EINVAL; in virtnet_xdp()
6104 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY)) in virtnet_get_phys_port_name()
6105 return -EOPNOTSUPP; in virtnet_get_phys_port_name()
6109 return -EOPNOTSUPP; in virtnet_get_phys_port_name()
6121 if ((dev->features ^ features) & NETIF_F_GRO_HW) { in virtnet_set_features()
6122 if (vi->xdp_enabled) in virtnet_set_features()
6123 return -EBUSY; in virtnet_set_features()
6126 offloads = vi->guest_offloads_capable; in virtnet_set_features()
6128 offloads = vi->guest_offloads_capable & in virtnet_set_features()
6134 vi->guest_offloads = offloads; in virtnet_set_features()
6137 if ((dev->features ^ features) & NETIF_F_RXHASH) { in virtnet_set_features()
6139 vi->rss_hdr->hash_types = cpu_to_le32(vi->rss_hash_types_saved); in virtnet_set_features()
6141 vi->rss_hdr->hash_types = cpu_to_le32(VIRTIO_NET_HASH_REPORT_NONE); in virtnet_set_features()
6144 return -EINVAL; in virtnet_set_features()
6153 struct send_queue *sq = &priv->sq[txqueue]; in virtnet_tx_timeout()
6156 u64_stats_update_begin(&sq->stats.syncp); in virtnet_tx_timeout()
6157 u64_stats_inc(&sq->stats.tx_timeouts); in virtnet_tx_timeout()
6158 u64_stats_update_end(&sq->stats.syncp); in virtnet_tx_timeout()
6160 netdev_err(dev, "TX timeout on queue: %u, sq: %s, vq: 0x%x, name: %s, %u usecs ago\n", in virtnet_tx_timeout()
6161 txqueue, sq->name, sq->vq->index, sq->vq->name, in virtnet_tx_timeout()
6162 jiffies_to_usecs(jiffies - READ_ONCE(txq->trans_start))); in virtnet_tx_timeout()
6172 ret = net_dim_init_irq_moder(vi->dev, profile_flags, coal_flags, in virtnet_init_irq_moder()
6179 for (i = 0; i < vi->max_queue_pairs; i++) in virtnet_init_irq_moder()
6180 net_dim_setting(vi->dev, &vi->rq[i].dim, false); in virtnet_init_irq_moder()
6187 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) in virtnet_free_irq_moder()
6191 net_dim_free_irq_moder(vi->dev); in virtnet_free_irq_moder()
6220 if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS, in virtnet_config_changed_work()
6225 netdev_notify_peers(vi->dev); in virtnet_config_changed_work()
6232 if (vi->status == v) in virtnet_config_changed_work()
6235 vi->status = v; in virtnet_config_changed_work()
6237 if (vi->status & VIRTIO_NET_S_LINK_UP) { in virtnet_config_changed_work()
6239 netif_carrier_on(vi->dev); in virtnet_config_changed_work()
6240 netif_tx_wake_all_queues(vi->dev); in virtnet_config_changed_work()
6242 netif_carrier_off(vi->dev); in virtnet_config_changed_work()
6243 netif_tx_stop_all_queues(vi->dev); in virtnet_config_changed_work()
6249 struct virtnet_info *vi = vdev->priv; in virtnet_config_changed()
6251 schedule_work(&vi->config_work); in virtnet_config_changed()
6258 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_free_queues()
6259 __netif_napi_del(&vi->rq[i].napi); in virtnet_free_queues()
6260 __netif_napi_del(&vi->sq[i].napi); in virtnet_free_queues()
6264 * we need to respect an RCU grace period before freeing vi->rq in virtnet_free_queues()
6268 kfree(vi->rq); in virtnet_free_queues()
6269 kfree(vi->sq); in virtnet_free_queues()
6270 kfree(vi->ctrl); in virtnet_free_queues()
6278 for (i = 0; i < vi->max_queue_pairs; i++) { in _free_receive_bufs()
6279 while (vi->rq[i].pages) in _free_receive_bufs()
6280 __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0); in _free_receive_bufs()
6282 old_prog = rtnl_dereference(vi->rq[i].xdp_prog); in _free_receive_bufs()
6283 RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL); in _free_receive_bufs()
6299 for (i = 0; i < vi->max_queue_pairs; i++) in free_receive_page_frags()
6300 if (vi->rq[i].alloc_frag.page) { in free_receive_page_frags()
6301 if (vi->rq[i].last_dma) in free_receive_page_frags()
6302 virtnet_rq_unmap(&vi->rq[i], vi->rq[i].last_dma, 0); in free_receive_page_frags()
6303 put_page(vi->rq[i].alloc_frag.page); in free_receive_page_frags()
6309 struct virtnet_info *vi = vq->vdev->priv; in virtnet_sq_free_unused_buf()
6313 sq = &vi->sq[i]; in virtnet_sq_free_unused_buf()
6326 xsk_tx_completed(sq->xsk_pool, 1); in virtnet_sq_free_unused_buf()
6333 struct virtnet_info *vi = vq->vdev->priv; in virtnet_sq_free_unused_buf_done()
6336 netdev_tx_reset_queue(netdev_get_tx_queue(vi->dev, i)); in virtnet_sq_free_unused_buf_done()
6344 for (i = 0; i < vi->max_queue_pairs; i++) { in free_unused_bufs()
6345 struct virtqueue *vq = vi->sq[i].vq; in free_unused_bufs()
6351 for (i = 0; i < vi->max_queue_pairs; i++) { in free_unused_bufs()
6352 struct virtqueue *vq = vi->rq[i].vq; in free_unused_bufs()
6362 struct virtio_device *vdev = vi->vdev; in virtnet_del_vqs()
6366 vdev->config->del_vqs(vdev); in virtnet_del_vqs()
6377 const unsigned int hdr_len = vi->hdr_len; in mergeable_min_buf_len()
6379 unsigned int packet_len = vi->big_packets ? IP_MAX_MTU : vi->dev->max_mtu; in mergeable_min_buf_len()
6383 return max(max(min_buf_len, hdr_len) - hdr_len, in mergeable_min_buf_len()
6391 int ret = -ENOMEM; in virtnet_find_vqs()
6396 /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by in virtnet_find_vqs()
6397 * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by in virtnet_find_vqs()
6400 total_vqs = vi->max_queue_pairs * 2 + in virtnet_find_vqs()
6401 virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ); in virtnet_find_vqs()
6410 if (!vi->big_packets || vi->mergeable_rx_bufs) { in virtnet_find_vqs()
6419 if (vi->has_cvq) { in virtnet_find_vqs()
6420 vqs_info[total_vqs - 1].name = "control"; in virtnet_find_vqs()
6424 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_find_vqs()
6427 sprintf(vi->rq[i].name, "input.%u", i); in virtnet_find_vqs()
6428 sprintf(vi->sq[i].name, "output.%u", i); in virtnet_find_vqs()
6429 vqs_info[rxq2vq(i)].name = vi->rq[i].name; in virtnet_find_vqs()
6430 vqs_info[txq2vq(i)].name = vi->sq[i].name; in virtnet_find_vqs()
6435 ret = virtio_find_vqs(vi->vdev, total_vqs, vqs, vqs_info, NULL); in virtnet_find_vqs()
6439 if (vi->has_cvq) { in virtnet_find_vqs()
6440 vi->cvq = vqs[total_vqs - 1]; in virtnet_find_vqs()
6441 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN)) in virtnet_find_vqs()
6442 vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; in virtnet_find_vqs()
6445 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_find_vqs()
6446 vi->rq[i].vq = vqs[rxq2vq(i)]; in virtnet_find_vqs()
6447 vi->rq[i].min_buf_len = mergeable_min_buf_len(vi, vi->rq[i].vq); in virtnet_find_vqs()
6448 vi->sq[i].vq = vqs[txq2vq(i)]; in virtnet_find_vqs()
6468 if (vi->has_cvq) { in virtnet_alloc_queues()
6469 vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL); in virtnet_alloc_queues()
6470 if (!vi->ctrl) in virtnet_alloc_queues()
6473 vi->ctrl = NULL; in virtnet_alloc_queues()
6475 vi->sq = kcalloc(vi->max_queue_pairs, sizeof(*vi->sq), GFP_KERNEL); in virtnet_alloc_queues()
6476 if (!vi->sq) in virtnet_alloc_queues()
6478 vi->rq = kcalloc(vi->max_queue_pairs, sizeof(*vi->rq), GFP_KERNEL); in virtnet_alloc_queues()
6479 if (!vi->rq) in virtnet_alloc_queues()
6482 INIT_DELAYED_WORK(&vi->refill, refill_work); in virtnet_alloc_queues()
6483 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_alloc_queues()
6484 vi->rq[i].pages = NULL; in virtnet_alloc_queues()
6485 netif_napi_add_config(vi->dev, &vi->rq[i].napi, virtnet_poll, in virtnet_alloc_queues()
6487 vi->rq[i].napi.weight = napi_weight; in virtnet_alloc_queues()
6488 netif_napi_add_tx_weight(vi->dev, &vi->sq[i].napi, in virtnet_alloc_queues()
6492 sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg)); in virtnet_alloc_queues()
6493 ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len); in virtnet_alloc_queues()
6494 sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg)); in virtnet_alloc_queues()
6496 u64_stats_init(&vi->rq[i].stats.syncp); in virtnet_alloc_queues()
6497 u64_stats_init(&vi->sq[i].stats.syncp); in virtnet_alloc_queues()
6498 mutex_init(&vi->rq[i].dim_lock); in virtnet_alloc_queues()
6504 kfree(vi->sq); in virtnet_alloc_queues()
6506 kfree(vi->ctrl); in virtnet_alloc_queues()
6508 return -ENOMEM; in virtnet_alloc_queues()
6540 struct virtnet_info *vi = netdev_priv(queue->dev); in mergeable_rx_buffer_size_show()
6546 BUG_ON(queue_index >= vi->max_queue_pairs); in mergeable_rx_buffer_size_show()
6547 avg = &vi->rq[queue_index].mrg_avg_pkt_len; in mergeable_rx_buffer_size_show()
6549 get_mergeable_buf_len(&vi->rq[queue_index], avg, in mergeable_rx_buffer_size_show()
6574 dev_err(&vdev->dev, "device advertises feature %s but not %s", in virtnet_fail_on_feature()
6614 if (!vdev->config->get) { in virtnet_validate()
6615 dev_err(&vdev->dev, "%s failure: config access disabled\n", in virtnet_validate()
6617 return -EINVAL; in virtnet_validate()
6621 return -EINVAL; in virtnet_validate()
6633 …dev_warn(&vdev->dev, "device advertises feature VIRTIO_NET_F_STANDBY but not VIRTIO_NET_F_MAC, dis… in virtnet_validate()
6642 return virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || in virtnet_check_guest_gso()
6643 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || in virtnet_check_guest_gso()
6644 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || in virtnet_check_guest_gso()
6645 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) || in virtnet_check_guest_gso()
6646 (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO4) && in virtnet_check_guest_gso()
6647 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO6)); in virtnet_check_guest_gso()
6654 /* If device can receive ANY guest GSO packets, regardless of mtu, in virtnet_set_big_packets()
6655 * allocate packets of maximum size, otherwise limit it to only in virtnet_set_big_packets()
6659 vi->big_packets = true; in virtnet_set_big_packets()
6660 vi->big_packets_num_skbfrags = guest_gso ? MAX_SKB_FRAGS : DIV_ROUND_UP(mtu, PAGE_SIZE); in virtnet_set_big_packets()
6687 if (!(xdp->rxq->dev->features & NETIF_F_RXHASH)) in virtnet_xdp_rx_hash()
6688 return -ENODATA; in virtnet_xdp_rx_hash()
6690 vi = netdev_priv(xdp->rxq->dev); in virtnet_xdp_rx_hash()
6691 hdr_hash = (struct virtio_net_hdr_v1_hash *)(xdp->data - vi->hdr_len); in virtnet_xdp_rx_hash()
6692 hash_report = __le16_to_cpu(hdr_hash->hash_report); in virtnet_xdp_rx_hash()
6698 *hash = __le32_to_cpu(hdr_hash->hash_value); in virtnet_xdp_rx_hash()
6708 int i, err = -ENOMEM; in virtnet_probe()
6729 return -ENOMEM; in virtnet_probe()
6732 dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE | in virtnet_probe()
6734 dev->netdev_ops = &virtnet_netdev; in virtnet_probe()
6735 dev->stat_ops = &virtnet_stat_ops; in virtnet_probe()
6736 dev->features = NETIF_F_HIGHDMA; in virtnet_probe()
6738 dev->ethtool_ops = &virtnet_ethtool_ops; in virtnet_probe()
6739 SET_NETDEV_DEV(dev, &vdev->dev); in virtnet_probe()
6744 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG; in virtnet_probe()
6746 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; in virtnet_probe()
6749 dev->hw_features |= NETIF_F_TSO in virtnet_probe()
6754 dev->hw_features |= NETIF_F_TSO; in virtnet_probe()
6756 dev->hw_features |= NETIF_F_TSO6; in virtnet_probe()
6758 dev->hw_features |= NETIF_F_TSO_ECN; in virtnet_probe()
6760 dev->hw_features |= NETIF_F_GSO_UDP_L4; in virtnet_probe()
6762 dev->features |= NETIF_F_GSO_ROBUST; in virtnet_probe()
6764 if (gso) in virtnet_probe()
6765 dev->features |= dev->hw_features & NETIF_F_ALL_TSO; in virtnet_probe()
6766 /* (!csum && gso) case will be fixed by register_netdev() */ in virtnet_probe()
6770 * need to calculate checksums for partially checksummed packets, in virtnet_probe()
6773 * receives fully checksummed packets. The device may assist in in virtnet_probe()
6774 * validating these packets' checksums, so the driver won't have to. in virtnet_probe()
6776 dev->features |= NETIF_F_RXCSUM; in virtnet_probe()
6780 dev->features |= NETIF_F_GRO_HW; in virtnet_probe()
6782 dev->hw_features |= NETIF_F_GRO_HW; in virtnet_probe()
6784 dev->vlan_features = dev->features; in virtnet_probe()
6785 dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | in virtnet_probe()
6788 /* MTU range: 68 - 65535 */ in virtnet_probe()
6789 dev->min_mtu = MIN_MTU; in virtnet_probe()
6790 dev->max_mtu = MAX_MTU; in virtnet_probe()
6802 dev_info(&vdev->dev, "Assigned random MAC address %pM\n", in virtnet_probe()
6803 dev->dev_addr); in virtnet_probe()
6806 /* Set up our device-specific information */ in virtnet_probe()
6808 vi->dev = dev; in virtnet_probe()
6809 vi->vdev = vdev; in virtnet_probe()
6810 vdev->priv = vi; in virtnet_probe()
6812 INIT_WORK(&vi->config_work, virtnet_config_changed_work); in virtnet_probe()
6813 INIT_WORK(&vi->rx_mode_work, virtnet_rx_mode_work); in virtnet_probe()
6814 spin_lock_init(&vi->refill_lock); in virtnet_probe()
6817 vi->mergeable_rx_bufs = true; in virtnet_probe()
6818 dev->xdp_features |= NETDEV_XDP_ACT_RX_SG; in virtnet_probe()
6822 vi->has_rss_hash_report = true; in virtnet_probe()
6825 vi->has_rss = true; in virtnet_probe()
6827 vi->rss_indir_table_size = in virtnet_probe()
6831 vi->rss_hdr = devm_kzalloc(&vdev->dev, virtnet_rss_hdr_size(vi), GFP_KERNEL); in virtnet_probe()
6832 if (!vi->rss_hdr) { in virtnet_probe()
6833 err = -ENOMEM; in virtnet_probe()
6837 if (vi->has_rss || vi->has_rss_hash_report) { in virtnet_probe()
6838 vi->rss_key_size = in virtnet_probe()
6840 if (vi->rss_key_size > VIRTIO_NET_RSS_MAX_KEY_SIZE) { in virtnet_probe()
6841 dev_err(&vdev->dev, "rss_max_key_size=%u exceeds the limit %u.\n", in virtnet_probe()
6842 vi->rss_key_size, VIRTIO_NET_RSS_MAX_KEY_SIZE); in virtnet_probe()
6843 err = -EINVAL; in virtnet_probe()
6847 vi->rss_hash_types_supported = in virtnet_probe()
6849 vi->rss_hash_types_supported &= in virtnet_probe()
6854 dev->hw_features |= NETIF_F_RXHASH; in virtnet_probe()
6855 dev->xdp_metadata_ops = &virtnet_xdp_metadata_ops; in virtnet_probe()
6858 if (vi->has_rss_hash_report) in virtnet_probe()
6859 vi->hdr_len = sizeof(struct virtio_net_hdr_v1_hash); in virtnet_probe()
6862 vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); in virtnet_probe()
6864 vi->hdr_len = sizeof(struct virtio_net_hdr); in virtnet_probe()
6868 vi->any_header_sg = true; in virtnet_probe()
6871 vi->has_cvq = true; in virtnet_probe()
6873 mutex_init(&vi->cvq_lock); in virtnet_probe()
6879 if (mtu < dev->min_mtu) { in virtnet_probe()
6883 dev_err(&vdev->dev, in virtnet_probe()
6885 mtu, dev->min_mtu); in virtnet_probe()
6886 err = -EINVAL; in virtnet_probe()
6890 dev->mtu = mtu; in virtnet_probe()
6891 dev->max_mtu = mtu; in virtnet_probe()
6896 if (vi->any_header_sg) in virtnet_probe()
6897 dev->needed_headroom = vi->hdr_len; in virtnet_probe()
6901 vi->curr_queue_pairs = max_queue_pairs; in virtnet_probe()
6903 vi->curr_queue_pairs = num_online_cpus(); in virtnet_probe()
6904 vi->max_queue_pairs = max_queue_pairs; in virtnet_probe()
6906 /* Allocate/initialize the rx/tx queues, and invoke find_vqs */ in virtnet_probe()
6911 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) { in virtnet_probe()
6912 vi->intr_coal_rx.max_usecs = 0; in virtnet_probe()
6913 vi->intr_coal_tx.max_usecs = 0; in virtnet_probe()
6914 vi->intr_coal_rx.max_packets = 0; in virtnet_probe()
6919 if (vi->sq[0].napi.weight) in virtnet_probe()
6920 vi->intr_coal_tx.max_packets = 1; in virtnet_probe()
6922 vi->intr_coal_tx.max_packets = 0; in virtnet_probe()
6925 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) { in virtnet_probe()
6927 for (i = 0; i < vi->max_queue_pairs; i++) in virtnet_probe()
6928 if (vi->sq[i].napi.weight) in virtnet_probe()
6929 vi->sq[i].intr_coal.max_packets = 1; in virtnet_probe()
6937 if (vi->mergeable_rx_bufs) in virtnet_probe()
6938 dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group; in virtnet_probe()
6940 netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs); in virtnet_probe()
6941 netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs); in virtnet_probe()
6946 vi->failover = net_failover_create(vi->dev); in virtnet_probe()
6947 if (IS_ERR(vi->failover)) { in virtnet_probe()
6948 err = PTR_ERR(vi->failover); in virtnet_probe()
6953 if (vi->has_rss || vi->has_rss_hash_report) in virtnet_probe()
6969 virtio_config_driver_disable(vi->vdev); in virtnet_probe()
6973 if (vi->has_rss || vi->has_rss_hash_report) { in virtnet_probe()
6975 dev_warn(&vdev->dev, "RSS disabled because committing failed.\n"); in virtnet_probe()
6976 dev->hw_features &= ~NETIF_F_RXHASH; in virtnet_probe()
6977 vi->has_rss_hash_report = false; in virtnet_probe()
6978 vi->has_rss = false; in virtnet_probe()
6982 virtnet_set_queues(vi, vi->curr_queue_pairs); in virtnet_probe()
6989 virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) { in virtnet_probe()
6992 sg_init_one(&sg, dev->dev_addr, dev->addr_len); in virtnet_probe()
6997 err = -EINVAL; in virtnet_probe()
7002 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_DEVICE_STATS)) { in virtnet_probe()
7010 err = -ENOMEM; in virtnet_probe()
7021 err = -EINVAL; in virtnet_probe()
7025 v = stats_cap->supported_stats_types[0]; in virtnet_probe()
7026 vi->device_stats_cap = le64_to_cpu(v); in virtnet_probe()
7032 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { in virtnet_probe()
7033 virtnet_config_changed_work(&vi->config_work); in virtnet_probe()
7035 vi->status = VIRTIO_NET_S_LINK_UP; in virtnet_probe()
7041 if (virtio_has_feature(vi->vdev, guest_offloads[i])) in virtnet_probe()
7042 set_bit(guest_offloads[i], &vi->guest_offloads); in virtnet_probe()
7043 vi->guest_offloads_capable = vi->guest_offloads; in virtnet_probe()
7053 pr_debug("virtnet: registered device %s with %d RX and TX vq's\n", in virtnet_probe()
7054 dev->name, max_queue_pairs); in virtnet_probe()
7061 net_failover_destroy(vi->failover); in virtnet_probe()
7064 cancel_delayed_work_sync(&vi->refill); in virtnet_probe()
7076 virtio_reset_device(vi->vdev); in remove_vq_common()
7085 for (i = 0; i < vi->max_queue_pairs; i++) in remove_vq_common()
7086 netdev_tx_reset_queue(netdev_get_tx_queue(vi->dev, i)); in remove_vq_common()
7097 struct virtnet_info *vi = vdev->priv; in virtnet_remove()
7102 flush_work(&vi->config_work); in virtnet_remove()
7104 flush_work(&vi->rx_mode_work); in virtnet_remove()
7108 unregister_netdev(vi->dev); in virtnet_remove()
7110 net_failover_destroy(vi->failover); in virtnet_remove()
7114 free_netdev(vi->dev); in virtnet_remove()
7119 struct virtnet_info *vi = vdev->priv; in virtnet_freeze()
7130 struct virtnet_info *vi = vdev->priv; in virtnet_restore()
7136 virtnet_set_queues(vi, vi->curr_queue_pairs); in virtnet_restore()