Lines Matching full:vi

496 static void virtnet_receive_done(struct virtnet_info *vi, struct receive_queue *rq,
511 static size_t virtnet_rss_hdr_size(const struct virtnet_info *vi) in virtnet_rss_hdr_size() argument
513 u16 indir_table_size = vi->has_rss ? vi->rss_indir_table_size : 1; in virtnet_rss_hdr_size()
515 return struct_size(vi->rss_hdr, indirection_table, indir_table_size); in virtnet_rss_hdr_size()
518 static size_t virtnet_rss_trailer_size(const struct virtnet_info *vi) in virtnet_rss_trailer_size() argument
520 return struct_size(&vi->rss_trailer, hash_key_data, vi->rss_key_size); in virtnet_rss_trailer_size()
639 static int vq_type(struct virtnet_info *vi, int qid) in vq_type() argument
641 if (qid == vi->max_queue_pairs * 2) in vq_type()
664 /* Find end of list, sew whole thing into vi->rq.pages. */ in give_pages()
683 static void virtnet_rq_free_buf(struct virtnet_info *vi, in virtnet_rq_free_buf() argument
686 if (vi->mergeable_rx_bufs) in virtnet_rq_free_buf()
688 else if (vi->big_packets) in virtnet_rq_free_buf()
694 static void enable_delayed_refill(struct virtnet_info *vi) in enable_delayed_refill() argument
696 spin_lock_bh(&vi->refill_lock); in enable_delayed_refill()
697 vi->refill_enabled = true; in enable_delayed_refill()
698 spin_unlock_bh(&vi->refill_lock); in enable_delayed_refill()
701 static void disable_delayed_refill(struct virtnet_info *vi) in disable_delayed_refill() argument
703 spin_lock_bh(&vi->refill_lock); in disable_delayed_refill()
704 vi->refill_enabled = false; in disable_delayed_refill()
705 spin_unlock_bh(&vi->refill_lock); in disable_delayed_refill()
708 static void enable_rx_mode_work(struct virtnet_info *vi) in enable_rx_mode_work() argument
711 vi->rx_mode_work_enabled = true; in enable_rx_mode_work()
715 static void disable_rx_mode_work(struct virtnet_info *vi) in disable_rx_mode_work() argument
718 vi->rx_mode_work_enabled = false; in disable_rx_mode_work()
751 struct virtnet_info *vi = vq->vdev->priv; in skb_xmit_done() local
752 struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi; in skb_xmit_done()
761 netif_wake_subqueue(vi->dev, vq2txq(vq)); in skb_xmit_done()
798 static struct sk_buff *page_to_skb(struct virtnet_info *vi, in page_to_skb() argument
814 hdr_len = vi->hdr_len; in page_to_skb()
815 if (vi->mergeable_rx_bufs) in page_to_skb()
856 if (vi->mergeable_rx_bufs) { in page_to_skb()
899 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_rq_unmap() local
905 BUG_ON(vi->big_packets && !vi->mergeable_rx_bufs); in virtnet_rq_unmap()
931 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_rq_get_buf() local
934 BUG_ON(vi->big_packets && !vi->mergeable_rx_bufs); in virtnet_rq_get_buf()
945 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_rq_init_one_sg() local
951 BUG_ON(vi->big_packets && !vi->mergeable_rx_bufs); in virtnet_rq_init_one_sg()
968 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_rq_alloc() local
973 BUG_ON(vi->big_packets && !vi->mergeable_rx_bufs); in virtnet_rq_alloc()
1023 struct virtnet_info *vi = vq->vdev->priv; in virtnet_rq_unmap_free_buf() local
1027 rq = &vi->rq[i]; in virtnet_rq_unmap_free_buf()
1034 if (!vi->big_packets || vi->mergeable_rx_bufs) in virtnet_rq_unmap_free_buf()
1037 virtnet_rq_free_buf(vi, rq, buf); in virtnet_rq_unmap_free_buf()
1059 static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q) in is_xdp_raw_buffer_queue() argument
1061 if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs)) in is_xdp_raw_buffer_queue()
1063 else if (q < vi->curr_queue_pairs) in is_xdp_raw_buffer_queue()
1069 static bool tx_may_stop(struct virtnet_info *vi, in tx_may_stop() argument
1075 qnum = sq - vi->sq; in tx_may_stop()
1101 static void check_sq_full_and_disable(struct virtnet_info *vi, in check_sq_full_and_disable() argument
1108 qnum = sq - vi->sq; in check_sq_full_and_disable()
1110 if (tx_may_stop(vi, dev, sq)) { in check_sq_full_and_disable()
1130 static struct xdp_buff *buf_to_xdp(struct virtnet_info *vi, in buf_to_xdp() argument
1138 bufsize = xsk_pool_get_rx_frame_size(rq->xsk_pool) + vi->hdr_len; in buf_to_xdp()
1142 vi->dev->name, len, bufsize); in buf_to_xdp()
1143 DEV_STATS_INC(vi->dev, rx_length_errors); in buf_to_xdp()
1183 static struct sk_buff *virtnet_receive_xsk_small(struct net_device *dev, struct virtnet_info *vi, in virtnet_receive_xsk_small() argument
1235 static int xsk_append_merge_buffer(struct virtnet_info *vi, in xsk_append_merge_buffer() argument
1254 vi->dev->name, num_buf, in xsk_append_merge_buffer()
1255 virtio16_to_cpu(vi->vdev, in xsk_append_merge_buffer()
1257 DEV_STATS_INC(vi->dev, rx_length_errors); in xsk_append_merge_buffer()
1263 xdp = buf_to_xdp(vi, rq, buf, len); in xsk_append_merge_buffer()
1273 memcpy(buf, xdp->data - vi->hdr_len, len); in xsk_append_merge_buffer()
1292 xsk_drop_follow_bufs(vi->dev, rq, num_buf, stats); in xsk_append_merge_buffer()
1296 static struct sk_buff *virtnet_receive_xsk_merge(struct net_device *dev, struct virtnet_info *vi, in virtnet_receive_xsk_merge() argument
1306 hdr = xdp->data - vi->hdr_len; in virtnet_receive_xsk_merge()
1307 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); in virtnet_receive_xsk_merge()
1323 if (xsk_append_merge_buffer(vi, rq, skb, num_buf, hdr, stats)) { in virtnet_receive_xsk_merge()
1347 static void virtnet_receive_xsk_buf(struct virtnet_info *vi, struct receive_queue *rq, in virtnet_receive_xsk_buf() argument
1352 struct net_device *dev = vi->dev; in virtnet_receive_xsk_buf()
1357 len -= vi->hdr_len; in virtnet_receive_xsk_buf()
1361 xdp = buf_to_xdp(vi, rq, buf, len); in virtnet_receive_xsk_buf()
1372 flags = ((struct virtio_net_common_hdr *)(xdp->data - vi->hdr_len))->hdr.flags; in virtnet_receive_xsk_buf()
1374 if (!vi->mergeable_rx_bufs) in virtnet_receive_xsk_buf()
1375 skb = virtnet_receive_xsk_small(dev, vi, rq, xdp, xdp_xmit, stats); in virtnet_receive_xsk_buf()
1377 skb = virtnet_receive_xsk_merge(dev, vi, rq, xdp, xdp_xmit, stats); in virtnet_receive_xsk_buf()
1380 virtnet_receive_done(vi, rq, skb, flags); in virtnet_receive_xsk_buf()
1383 static int virtnet_add_recvbuf_xsk(struct virtnet_info *vi, struct receive_queue *rq, in virtnet_add_recvbuf_xsk() argument
1398 len = xsk_pool_get_rx_frame_size(pool) + vi->hdr_len; in virtnet_add_recvbuf_xsk()
1405 addr = xsk_buff_xdp_get_dma(xsk_buffs[i]) - vi->hdr_len; in virtnet_add_recvbuf_xsk()
1438 struct virtnet_info *vi; in virtnet_xsk_xmit_one() local
1441 vi = sq->vq->vdev->priv; in virtnet_xsk_xmit_one()
1447 sg_fill_dma(sq->sg, sq->xsk_hdr_dma_addr, vi->hdr_len); in virtnet_xsk_xmit_one()
1490 struct virtnet_info *vi = sq->vq->vdev->priv; in virtnet_xsk_xmit() local
1492 struct net_device *dev = vi->dev; in virtnet_xsk_xmit()
1499 __free_old_xmit(sq, netdev_get_tx_queue(dev, sq - vi->sq), true, &stats); in virtnet_xsk_xmit()
1506 if (!is_xdp_raw_buffer_queue(vi, sq - vi->sq)) in virtnet_xsk_xmit()
1507 check_sq_full_and_disable(vi, vi->dev, sq); in virtnet_xsk_xmit()
1512 txq = netdev_get_tx_queue(vi->dev, sq - vi->sq); in virtnet_xsk_xmit()
1541 struct virtnet_info *vi = netdev_priv(dev); in virtnet_xsk_wakeup() local
1547 if (qid >= vi->curr_queue_pairs) in virtnet_xsk_wakeup()
1550 sq = &vi->sq[qid]; in virtnet_xsk_wakeup()
1567 static int __virtnet_xdp_xmit_one(struct virtnet_info *vi, in __virtnet_xdp_xmit_one() argument
1576 if (unlikely(xdpf->headroom < vi->hdr_len)) in __virtnet_xdp_xmit_one()
1591 xdpf->headroom -= vi->hdr_len; in __virtnet_xdp_xmit_one()
1592 xdpf->data -= vi->hdr_len; in __virtnet_xdp_xmit_one()
1595 memset(hdr, 0, vi->hdr_len); in __virtnet_xdp_xmit_one()
1596 xdpf->len += vi->hdr_len; in __virtnet_xdp_xmit_one()
1614 /* when vi->curr_queue_pairs > nr_cpu_ids, the txq/sq is only used for xdp tx on
1622 #define virtnet_xdp_get_sq(vi) ({ \ argument
1625 typeof(vi) v = (vi); \
1641 #define virtnet_xdp_put_sq(vi, q) { \ argument
1643 typeof(vi) v = (vi); \
1655 struct virtnet_info *vi = netdev_priv(dev); in virtnet_xdp_xmit() local
1657 struct receive_queue *rq = vi->rq; in virtnet_xdp_xmit()
1672 sq = virtnet_xdp_get_sq(vi); in virtnet_xdp_xmit()
1680 virtnet_free_old_xmit(sq, netdev_get_tx_queue(dev, sq - vi->sq), in virtnet_xdp_xmit()
1686 if (__virtnet_xdp_xmit_one(vi, sq, xdpf)) in virtnet_xdp_xmit()
1692 if (!is_xdp_raw_buffer_queue(vi, sq - vi->sq)) in virtnet_xdp_xmit()
1693 check_sq_full_and_disable(vi, dev, sq); in virtnet_xdp_xmit()
1708 virtnet_xdp_put_sq(vi, sq); in virtnet_xdp_xmit()
1781 static unsigned int virtnet_get_headroom(struct virtnet_info *vi) in virtnet_get_headroom() argument
1783 return vi->xdp_enabled ? XDP_PACKET_HEADROOM : 0; in virtnet_get_headroom()
1854 static struct sk_buff *receive_small_build_skb(struct virtnet_info *vi, in receive_small_build_skb() argument
1865 headroom = vi->hdr_len + header_offset; in receive_small_build_skb()
1874 memcpy(skb_vnet_common_hdr(skb), buf, vi->hdr_len); in receive_small_build_skb()
1880 struct virtnet_info *vi, in receive_small_xdp() argument
1890 unsigned int headroom = vi->hdr_len + header_offset; in receive_small_xdp()
1910 if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) { in receive_small_xdp()
1912 unsigned int tlen = len + vi->hdr_len; in receive_small_xdp()
1915 xdp_headroom = virtnet_get_headroom(vi); in receive_small_xdp()
1917 headroom = vi->hdr_len + header_offset; in receive_small_xdp()
1932 xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len, in receive_small_xdp()
1971 struct virtnet_info *vi, in receive_small() argument
1987 len -= vi->hdr_len; in receive_small()
1997 if (unlikely(vi->xdp_enabled)) { in receive_small()
2003 skb = receive_small_xdp(dev, vi, rq, xdp_prog, buf, in receive_small()
2012 skb = receive_small_build_skb(vi, xdp_headroom, buf, len); in receive_small()
2023 struct virtnet_info *vi, in receive_big() argument
2031 page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, 0); in receive_big()
2033 u64_stats_add(&stats->bytes, len - vi->hdr_len); in receive_big()
2075 struct virtnet_info *vi, in build_skb_from_xdp_buff() argument
2118 struct virtnet_info *vi, in virtnet_build_xdp_buff_mrg() argument
2140 XDP_PACKET_HEADROOM + vi->hdr_len, len - vi->hdr_len, true); in virtnet_build_xdp_buff_mrg()
2166 virtio16_to_cpu(vi->vdev, hdr->num_buffers)); in virtnet_build_xdp_buff_mrg()
2206 static void *mergeable_xdp_get_buf(struct virtnet_info *vi, in mergeable_xdp_get_buf() argument
2240 if (likely(headroom >= virtnet_get_headroom(vi) && in mergeable_xdp_get_buf()
2285 struct virtnet_info *vi, in receive_mergeable_xdp() argument
2295 int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); in receive_mergeable_xdp()
2306 data = mergeable_xdp_get_buf(vi, rq, xdp_prog, ctx, &frame_sz, &num_buf, &page, in receive_mergeable_xdp()
2311 err = virtnet_build_xdp_buff_mrg(dev, vi, rq, &xdp, data, len, frame_sz, in receive_mergeable_xdp()
2320 head_skb = build_skb_from_xdp_buff(dev, vi, &xdp, xdp_frags_truesz); in receive_mergeable_xdp()
2388 struct virtnet_info *vi, in receive_mergeable() argument
2397 int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); in receive_mergeable()
2407 u64_stats_add(&stats->bytes, len - vi->hdr_len); in receive_mergeable()
2416 if (unlikely(vi->xdp_enabled)) { in receive_mergeable()
2422 head_skb = receive_mergeable_xdp(dev, vi, rq, xdp_prog, buf, ctx, in receive_mergeable()
2430 head_skb = page_to_skb(vi, rq, page, offset, len, truesize, headroom); in receive_mergeable()
2440 virtio16_to_cpu(vi->vdev, in receive_mergeable()
2508 static void virtnet_receive_done(struct virtnet_info *vi, struct receive_queue *rq, in virtnet_receive_done() argument
2512 struct net_device *dev = vi->dev; in virtnet_receive_done()
2515 if (dev->features & NETIF_F_RXHASH && vi->has_rss_hash_report) in virtnet_receive_done()
2522 virtio_is_little_endian(vi->vdev))) { in virtnet_receive_done()
2542 static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, in receive_buf() argument
2547 struct net_device *dev = vi->dev; in receive_buf()
2551 if (unlikely(len < vi->hdr_len + ETH_HLEN)) { in receive_buf()
2554 virtnet_rq_free_buf(vi, rq, buf); in receive_buf()
2567 if (vi->mergeable_rx_bufs) in receive_buf()
2568 skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit, in receive_buf()
2570 else if (vi->big_packets) in receive_buf()
2571 skb = receive_big(dev, vi, rq, buf, len, stats); in receive_buf()
2573 skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, stats); in receive_buf()
2578 virtnet_receive_done(vi, rq, skb, flags); in receive_buf()
2586 static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq, in add_recvbuf_small() argument
2590 unsigned int xdp_headroom = virtnet_get_headroom(vi); in add_recvbuf_small()
2592 int len = vi->hdr_len + VIRTNET_RX_PAD + GOOD_PACKET_LEN + xdp_headroom; in add_recvbuf_small()
2607 virtnet_rq_init_one_sg(rq, buf, vi->hdr_len + GOOD_PACKET_LEN); in add_recvbuf_small()
2618 static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq, in add_recvbuf_big() argument
2625 sg_init_table(rq->sg, vi->big_packets_num_skbfrags + 2); in add_recvbuf_big()
2627 /* page in rq->sg[vi->big_packets_num_skbfrags + 1] is list tail */ in add_recvbuf_big()
2628 for (i = vi->big_packets_num_skbfrags + 1; i > 1; --i) { in add_recvbuf_big()
2651 sg_set_buf(&rq->sg[0], p, vi->hdr_len); in add_recvbuf_big()
2659 err = virtqueue_add_inbuf(rq->vq, rq->sg, vi->big_packets_num_skbfrags + 2, in add_recvbuf_big()
2671 struct virtnet_info *vi = rq->vq->vdev->priv; in get_mergeable_buf_len() local
2672 const size_t hdr_len = vi->hdr_len; in get_mergeable_buf_len()
2684 static int add_recvbuf_mergeable(struct virtnet_info *vi, in add_recvbuf_mergeable() argument
2688 unsigned int headroom = virtnet_get_headroom(vi); in add_recvbuf_mergeable()
2745 static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq, in try_fill_recv() argument
2751 err = virtnet_add_recvbuf_xsk(vi, rq, rq->xsk_pool, gfp); in try_fill_recv()
2756 if (vi->mergeable_rx_bufs) in try_fill_recv()
2757 err = add_recvbuf_mergeable(vi, rq, gfp); in try_fill_recv()
2758 else if (vi->big_packets) in try_fill_recv()
2759 err = add_recvbuf_big(vi, rq, gfp); in try_fill_recv()
2761 err = add_recvbuf_small(vi, rq, gfp); in try_fill_recv()
2781 struct virtnet_info *vi = rvq->vdev->priv; in skb_recv_done() local
2782 struct receive_queue *rq = &vi->rq[vq2rxq(rvq)]; in skb_recv_done()
2804 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_napi_enable() local
2808 netif_queue_set_napi(vi->dev, qidx, NETDEV_QUEUE_TYPE_RX, &rq->napi); in virtnet_napi_enable()
2813 struct virtnet_info *vi = sq->vq->vdev->priv; in virtnet_napi_tx_enable() local
2823 if (!vi->affinity_hint_set) { in virtnet_napi_tx_enable()
2829 netif_queue_set_napi(vi->dev, qidx, NETDEV_QUEUE_TYPE_TX, napi); in virtnet_napi_tx_enable()
2834 struct virtnet_info *vi = sq->vq->vdev->priv; in virtnet_napi_tx_disable() local
2839 netif_queue_set_napi(vi->dev, qidx, NETDEV_QUEUE_TYPE_TX, NULL); in virtnet_napi_tx_disable()
2846 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_napi_disable() local
2850 netif_queue_set_napi(vi->dev, qidx, NETDEV_QUEUE_TYPE_RX, NULL); in virtnet_napi_disable()
2856 struct virtnet_info *vi = in refill_work() local
2861 for (i = 0; i < vi->curr_queue_pairs; i++) { in refill_work()
2862 struct receive_queue *rq = &vi->rq[i]; in refill_work()
2879 still_empty = !try_fill_recv(vi, rq, GFP_KERNEL); in refill_work()
2886 schedule_delayed_work(&vi->refill, HZ/2); in refill_work()
2890 static int virtnet_receive_xsk_bufs(struct virtnet_info *vi, in virtnet_receive_xsk_bufs() argument
2905 virtnet_receive_xsk_buf(vi, rq, buf, len, xdp_xmit, stats); in virtnet_receive_xsk_bufs()
2912 static int virtnet_receive_packets(struct virtnet_info *vi, in virtnet_receive_packets() argument
2922 if (!vi->big_packets || vi->mergeable_rx_bufs) { in virtnet_receive_packets()
2926 receive_buf(vi, rq, buf, len, ctx, xdp_xmit, stats); in virtnet_receive_packets()
2932 receive_buf(vi, rq, buf, len, NULL, xdp_xmit, stats); in virtnet_receive_packets()
2943 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_receive() local
2948 packets = virtnet_receive_xsk_bufs(vi, rq, budget, xdp_xmit, &stats); in virtnet_receive()
2950 packets = virtnet_receive_packets(vi, rq, budget, xdp_xmit, &stats); in virtnet_receive()
2953 if (!try_fill_recv(vi, rq, GFP_ATOMIC)) { in virtnet_receive()
2954 spin_lock(&vi->refill_lock); in virtnet_receive()
2955 if (vi->refill_enabled) in virtnet_receive()
2956 schedule_delayed_work(&vi->refill, 0); in virtnet_receive()
2957 spin_unlock(&vi->refill_lock); in virtnet_receive()
2982 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_poll_cleantx() local
2984 struct send_queue *sq = &vi->sq[index]; in virtnet_poll_cleantx()
2985 struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index); in virtnet_poll_cleantx()
2987 if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index)) in virtnet_poll_cleantx()
3014 static void virtnet_rx_dim_update(struct virtnet_info *vi, struct receive_queue *rq) in virtnet_rx_dim_update() argument
3037 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_poll() local
3059 virtnet_rx_dim_update(vi, rq); in virtnet_poll()
3063 sq = virtnet_xdp_get_sq(vi); in virtnet_poll()
3069 virtnet_xdp_put_sq(vi, sq); in virtnet_poll()
3075 static void virtnet_disable_queue_pair(struct virtnet_info *vi, int qp_index) in virtnet_disable_queue_pair() argument
3077 virtnet_napi_tx_disable(&vi->sq[qp_index]); in virtnet_disable_queue_pair()
3078 virtnet_napi_disable(&vi->rq[qp_index]); in virtnet_disable_queue_pair()
3079 xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq); in virtnet_disable_queue_pair()
3082 static int virtnet_enable_queue_pair(struct virtnet_info *vi, int qp_index) in virtnet_enable_queue_pair() argument
3084 struct net_device *dev = vi->dev; in virtnet_enable_queue_pair()
3087 err = xdp_rxq_info_reg(&vi->rq[qp_index].xdp_rxq, dev, qp_index, in virtnet_enable_queue_pair()
3088 vi->rq[qp_index].napi.napi_id); in virtnet_enable_queue_pair()
3092 err = xdp_rxq_info_reg_mem_model(&vi->rq[qp_index].xdp_rxq, in virtnet_enable_queue_pair()
3097 virtnet_napi_enable(&vi->rq[qp_index]); in virtnet_enable_queue_pair()
3098 virtnet_napi_tx_enable(&vi->sq[qp_index]); in virtnet_enable_queue_pair()
3103 xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq); in virtnet_enable_queue_pair()
3107 static void virtnet_cancel_dim(struct virtnet_info *vi, struct dim *dim) in virtnet_cancel_dim() argument
3109 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) in virtnet_cancel_dim()
3114 static void virtnet_update_settings(struct virtnet_info *vi) in virtnet_update_settings() argument
3119 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX)) in virtnet_update_settings()
3122 virtio_cread_le(vi->vdev, struct virtio_net_config, speed, &speed); in virtnet_update_settings()
3125 vi->speed = speed; in virtnet_update_settings()
3127 virtio_cread_le(vi->vdev, struct virtio_net_config, duplex, &duplex); in virtnet_update_settings()
3130 vi->duplex = duplex; in virtnet_update_settings()
3135 struct virtnet_info *vi = netdev_priv(dev); in virtnet_open() local
3138 enable_delayed_refill(vi); in virtnet_open()
3140 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_open()
3141 if (i < vi->curr_queue_pairs) in virtnet_open()
3143 if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) in virtnet_open()
3144 schedule_delayed_work(&vi->refill, 0); in virtnet_open()
3146 err = virtnet_enable_queue_pair(vi, i); in virtnet_open()
3151 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { in virtnet_open()
3152 if (vi->status & VIRTIO_NET_S_LINK_UP) in virtnet_open()
3153 netif_carrier_on(vi->dev); in virtnet_open()
3154 virtio_config_driver_enable(vi->vdev); in virtnet_open()
3156 vi->status = VIRTIO_NET_S_LINK_UP; in virtnet_open()
3163 disable_delayed_refill(vi); in virtnet_open()
3164 cancel_delayed_work_sync(&vi->refill); in virtnet_open()
3167 virtnet_disable_queue_pair(vi, i); in virtnet_open()
3168 virtnet_cancel_dim(vi, &vi->rq[i].dim); in virtnet_open()
3177 struct virtnet_info *vi = sq->vq->vdev->priv; in virtnet_poll_tx() local
3183 if (unlikely(is_xdp_raw_buffer_queue(vi, index))) { in virtnet_poll_tx()
3189 txq = netdev_get_tx_queue(vi->dev, index); in virtnet_poll_tx()
3239 struct virtnet_info *vi = sq->vq->vdev->priv; in xmit_skb() local
3241 unsigned hdr_len = vi->hdr_len; in xmit_skb()
3244 pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); in xmit_skb()
3246 can_push = vi->any_header_sg && in xmit_skb()
3257 virtio_is_little_endian(vi->vdev), false, in xmit_skb()
3261 if (vi->mergeable_rx_bufs) in xmit_skb()
3286 struct virtnet_info *vi = netdev_priv(dev); in start_xmit() local
3288 struct send_queue *sq = &vi->sq[qnum]; in start_xmit()
3325 tx_may_stop(vi, dev, sq); in start_xmit()
3327 check_sq_full_and_disable(vi, dev,sq); in start_xmit()
3345 static void __virtnet_rx_pause(struct virtnet_info *vi, in __virtnet_rx_pause() argument
3348 bool running = netif_running(vi->dev); in __virtnet_rx_pause()
3352 virtnet_cancel_dim(vi, &rq->dim); in __virtnet_rx_pause()
3356 static void virtnet_rx_pause_all(struct virtnet_info *vi) in virtnet_rx_pause_all() argument
3364 disable_delayed_refill(vi); in virtnet_rx_pause_all()
3365 cancel_delayed_work_sync(&vi->refill); in virtnet_rx_pause_all()
3366 for (i = 0; i < vi->max_queue_pairs; i++) in virtnet_rx_pause_all()
3367 __virtnet_rx_pause(vi, &vi->rq[i]); in virtnet_rx_pause_all()
3370 static void virtnet_rx_pause(struct virtnet_info *vi, struct receive_queue *rq) in virtnet_rx_pause() argument
3376 disable_delayed_refill(vi); in virtnet_rx_pause()
3377 cancel_delayed_work_sync(&vi->refill); in virtnet_rx_pause()
3378 __virtnet_rx_pause(vi, rq); in virtnet_rx_pause()
3381 static void __virtnet_rx_resume(struct virtnet_info *vi, in __virtnet_rx_resume() argument
3385 bool running = netif_running(vi->dev); in __virtnet_rx_resume()
3388 if (refill && !try_fill_recv(vi, rq, GFP_KERNEL)) in __virtnet_rx_resume()
3394 schedule_delayed_work(&vi->refill, 0); in __virtnet_rx_resume()
3397 static void virtnet_rx_resume_all(struct virtnet_info *vi) in virtnet_rx_resume_all() argument
3401 enable_delayed_refill(vi); in virtnet_rx_resume_all()
3402 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_rx_resume_all()
3403 if (i < vi->curr_queue_pairs) in virtnet_rx_resume_all()
3404 __virtnet_rx_resume(vi, &vi->rq[i], true); in virtnet_rx_resume_all()
3406 __virtnet_rx_resume(vi, &vi->rq[i], false); in virtnet_rx_resume_all()
3410 static void virtnet_rx_resume(struct virtnet_info *vi, struct receive_queue *rq) in virtnet_rx_resume() argument
3412 enable_delayed_refill(vi); in virtnet_rx_resume()
3413 __virtnet_rx_resume(vi, rq, true); in virtnet_rx_resume()
3416 static int virtnet_rx_resize(struct virtnet_info *vi, in virtnet_rx_resize() argument
3421 qindex = rq - vi->rq; in virtnet_rx_resize()
3423 virtnet_rx_pause(vi, rq); in virtnet_rx_resize()
3427 netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err); in virtnet_rx_resize()
3429 virtnet_rx_resume(vi, rq); in virtnet_rx_resize()
3433 static void virtnet_tx_pause(struct virtnet_info *vi, struct send_queue *sq) in virtnet_tx_pause() argument
3435 bool running = netif_running(vi->dev); in virtnet_tx_pause()
3439 qindex = sq - vi->sq; in virtnet_tx_pause()
3444 txq = netdev_get_tx_queue(vi->dev, qindex); in virtnet_tx_pause()
3455 netif_stop_subqueue(vi->dev, qindex); in virtnet_tx_pause()
3460 static void virtnet_tx_resume(struct virtnet_info *vi, struct send_queue *sq) in virtnet_tx_resume() argument
3462 bool running = netif_running(vi->dev); in virtnet_tx_resume()
3466 qindex = sq - vi->sq; in virtnet_tx_resume()
3468 txq = netdev_get_tx_queue(vi->dev, qindex); in virtnet_tx_resume()
3479 static int virtnet_tx_resize(struct virtnet_info *vi, struct send_queue *sq, in virtnet_tx_resize() argument
3484 qindex = sq - vi->sq; in virtnet_tx_resize()
3486 virtnet_tx_pause(vi, sq); in virtnet_tx_resize()
3491 netdev_err(vi->dev, "resize tx fail: tx queue index: %d err: %d\n", qindex, err); in virtnet_tx_resize()
3493 virtnet_tx_resume(vi, sq); in virtnet_tx_resize()
3503 static bool virtnet_send_command_reply(struct virtnet_info *vi, u8 class, u8 cmd, in virtnet_send_command_reply() argument
3513 BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)); in virtnet_send_command_reply()
3515 mutex_lock(&vi->cvq_lock); in virtnet_send_command_reply()
3516 vi->ctrl->status = ~0; in virtnet_send_command_reply()
3517 vi->ctrl->hdr.class = class; in virtnet_send_command_reply()
3518 vi->ctrl->hdr.cmd = cmd; in virtnet_send_command_reply()
3520 sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr)); in virtnet_send_command_reply()
3527 sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status)); in virtnet_send_command_reply()
3534 ret = virtqueue_add_sgs(vi->cvq, sgs, out_num, in_num, vi, GFP_ATOMIC); in virtnet_send_command_reply()
3536 dev_warn(&vi->vdev->dev, in virtnet_send_command_reply()
3538 mutex_unlock(&vi->cvq_lock); in virtnet_send_command_reply()
3542 if (unlikely(!virtqueue_kick(vi->cvq))) in virtnet_send_command_reply()
3548 while (!virtqueue_get_buf(vi->cvq, &tmp) && in virtnet_send_command_reply()
3549 !virtqueue_is_broken(vi->cvq)) { in virtnet_send_command_reply()
3555 ok = vi->ctrl->status == VIRTIO_NET_OK; in virtnet_send_command_reply()
3556 mutex_unlock(&vi->cvq_lock); in virtnet_send_command_reply()
3560 static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, in virtnet_send_command() argument
3563 return virtnet_send_command_reply(vi, class, cmd, out, NULL); in virtnet_send_command()
3568 struct virtnet_info *vi = netdev_priv(dev); in virtnet_set_mac_address() local
3569 struct virtio_device *vdev = vi->vdev; in virtnet_set_mac_address()
3574 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY)) in virtnet_set_mac_address()
3587 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, in virtnet_set_mac_address()
3616 struct virtnet_info *vi = netdev_priv(dev); in virtnet_stats() local
3620 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_stats()
3622 struct receive_queue *rq = &vi->rq[i]; in virtnet_stats()
3623 struct send_queue *sq = &vi->sq[i]; in virtnet_stats()
3653 static void virtnet_ack_link_announce(struct virtnet_info *vi) in virtnet_ack_link_announce() argument
3655 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE, in virtnet_ack_link_announce()
3657 dev_warn(&vi->dev->dev, "Failed to ack link announce.\n"); in virtnet_ack_link_announce()
3660 static bool virtnet_commit_rss_command(struct virtnet_info *vi);
3662 static void virtnet_rss_update_by_qpairs(struct virtnet_info *vi, u16 queue_pairs) in virtnet_rss_update_by_qpairs() argument
3667 for (; i < vi->rss_indir_table_size; ++i) { in virtnet_rss_update_by_qpairs()
3669 vi->rss_hdr->indirection_table[i] = cpu_to_le16(indir_val); in virtnet_rss_update_by_qpairs()
3671 vi->rss_trailer.max_tx_vq = cpu_to_le16(queue_pairs); in virtnet_rss_update_by_qpairs()
3674 static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) in virtnet_set_queues() argument
3679 struct net_device *dev = vi->dev; in virtnet_set_queues()
3682 if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) in virtnet_set_queues()
3692 if (vi->has_rss && !netif_is_rxfh_configured(dev)) { in virtnet_set_queues()
3693 old_rss_hdr = vi->rss_hdr; in virtnet_set_queues()
3694 old_rss_trailer = vi->rss_trailer; in virtnet_set_queues()
3695 vi->rss_hdr = devm_kzalloc(&dev->dev, virtnet_rss_hdr_size(vi), GFP_KERNEL); in virtnet_set_queues()
3696 if (!vi->rss_hdr) { in virtnet_set_queues()
3697 vi->rss_hdr = old_rss_hdr; in virtnet_set_queues()
3701 *vi->rss_hdr = *old_rss_hdr; in virtnet_set_queues()
3702 virtnet_rss_update_by_qpairs(vi, queue_pairs); in virtnet_set_queues()
3704 if (!virtnet_commit_rss_command(vi)) { in virtnet_set_queues()
3706 devm_kfree(&dev->dev, vi->rss_hdr); in virtnet_set_queues()
3707 vi->rss_hdr = old_rss_hdr; in virtnet_set_queues()
3708 vi->rss_trailer = old_rss_trailer; in virtnet_set_queues()
3722 mq->virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs); in virtnet_set_queues()
3725 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, in virtnet_set_queues()
3732 vi->curr_queue_pairs = queue_pairs; in virtnet_set_queues()
3734 spin_lock_bh(&vi->refill_lock); in virtnet_set_queues()
3735 if (dev->flags & IFF_UP && vi->refill_enabled) in virtnet_set_queues()
3736 schedule_delayed_work(&vi->refill, 0); in virtnet_set_queues()
3737 spin_unlock_bh(&vi->refill_lock); in virtnet_set_queues()
3744 struct virtnet_info *vi = netdev_priv(dev); in virtnet_close() local
3748 disable_delayed_refill(vi); in virtnet_close()
3750 cancel_delayed_work_sync(&vi->refill); in virtnet_close()
3754 virtio_config_driver_disable(vi->vdev); in virtnet_close()
3758 cancel_work_sync(&vi->config_work); in virtnet_close()
3760 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_close()
3761 virtnet_disable_queue_pair(vi, i); in virtnet_close()
3762 virtnet_cancel_dim(vi, &vi->rq[i].dim); in virtnet_close()
3772 struct virtnet_info *vi = in virtnet_rx_mode_work() local
3775 struct net_device *dev = vi->dev; in virtnet_rx_mode_work()
3785 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) in virtnet_rx_mode_work()
3799 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, in virtnet_rx_mode_work()
3807 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, in virtnet_rx_mode_work()
3829 mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count); in virtnet_rx_mode_work()
3840 mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count); in virtnet_rx_mode_work()
3850 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, in virtnet_rx_mode_work()
3861 struct virtnet_info *vi = netdev_priv(dev); in virtnet_set_rx_mode() local
3863 if (vi->rx_mode_work_enabled) in virtnet_set_rx_mode()
3864 schedule_work(&vi->rx_mode_work); in virtnet_set_rx_mode()
3870 struct virtnet_info *vi = netdev_priv(dev); in virtnet_vlan_rx_add_vid() local
3878 *_vid = cpu_to_virtio16(vi->vdev, vid); in virtnet_vlan_rx_add_vid()
3881 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, in virtnet_vlan_rx_add_vid()
3890 struct virtnet_info *vi = netdev_priv(dev); in virtnet_vlan_rx_kill_vid() local
3898 *_vid = cpu_to_virtio16(vi->vdev, vid); in virtnet_vlan_rx_kill_vid()
3901 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, in virtnet_vlan_rx_kill_vid()
3907 static void virtnet_clean_affinity(struct virtnet_info *vi) in virtnet_clean_affinity() argument
3911 if (vi->affinity_hint_set) { in virtnet_clean_affinity()
3912 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_clean_affinity()
3913 virtqueue_set_affinity(vi->rq[i].vq, NULL); in virtnet_clean_affinity()
3914 virtqueue_set_affinity(vi->sq[i].vq, NULL); in virtnet_clean_affinity()
3917 vi->affinity_hint_set = false; in virtnet_clean_affinity()
3921 static void virtnet_set_affinity(struct virtnet_info *vi) in virtnet_set_affinity() argument
3931 virtnet_clean_affinity(vi); in virtnet_set_affinity()
3936 stride = max_t(int, num_cpu / vi->curr_queue_pairs, 1); in virtnet_set_affinity()
3937 stragglers = num_cpu >= vi->curr_queue_pairs ? in virtnet_set_affinity()
3938 num_cpu % vi->curr_queue_pairs : in virtnet_set_affinity()
3941 for (i = 0; i < vi->curr_queue_pairs; i++) { in virtnet_set_affinity()
3952 virtqueue_set_affinity(vi->rq[i].vq, mask); in virtnet_set_affinity()
3953 virtqueue_set_affinity(vi->sq[i].vq, mask); in virtnet_set_affinity()
3954 __netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, XPS_CPUS); in virtnet_set_affinity()
3958 vi->affinity_hint_set = true; in virtnet_set_affinity()
3964 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, in virtnet_cpu_online() local
3966 virtnet_set_affinity(vi); in virtnet_cpu_online()
3972 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, in virtnet_cpu_dead() local
3974 virtnet_set_affinity(vi); in virtnet_cpu_dead()
3980 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, in virtnet_cpu_down_prep() local
3983 virtnet_clean_affinity(vi); in virtnet_cpu_down_prep()
3989 static int virtnet_cpu_notif_add(struct virtnet_info *vi) in virtnet_cpu_notif_add() argument
3993 ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node); in virtnet_cpu_notif_add()
3997 &vi->node_dead); in virtnet_cpu_notif_add()
4000 cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node); in virtnet_cpu_notif_add()
4004 static void virtnet_cpu_notif_remove(struct virtnet_info *vi) in virtnet_cpu_notif_remove() argument
4006 cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node); in virtnet_cpu_notif_remove()
4008 &vi->node_dead); in virtnet_cpu_notif_remove()
4011 static int virtnet_send_ctrl_coal_vq_cmd(struct virtnet_info *vi, in virtnet_send_ctrl_coal_vq_cmd() argument
4026 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL, in virtnet_send_ctrl_coal_vq_cmd()
4034 static int virtnet_send_rx_ctrl_coal_vq_cmd(struct virtnet_info *vi, in virtnet_send_rx_ctrl_coal_vq_cmd() argument
4040 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) in virtnet_send_rx_ctrl_coal_vq_cmd()
4043 err = virtnet_send_ctrl_coal_vq_cmd(vi, rxq2vq(queue), in virtnet_send_rx_ctrl_coal_vq_cmd()
4048 vi->rq[queue].intr_coal.max_usecs = max_usecs; in virtnet_send_rx_ctrl_coal_vq_cmd()
4049 vi->rq[queue].intr_coal.max_packets = max_packets; in virtnet_send_rx_ctrl_coal_vq_cmd()
4054 static int virtnet_send_tx_ctrl_coal_vq_cmd(struct virtnet_info *vi, in virtnet_send_tx_ctrl_coal_vq_cmd() argument
4060 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) in virtnet_send_tx_ctrl_coal_vq_cmd()
4063 err = virtnet_send_ctrl_coal_vq_cmd(vi, txq2vq(queue), in virtnet_send_tx_ctrl_coal_vq_cmd()
4068 vi->sq[queue].intr_coal.max_usecs = max_usecs; in virtnet_send_tx_ctrl_coal_vq_cmd()
4069 vi->sq[queue].intr_coal.max_packets = max_packets; in virtnet_send_tx_ctrl_coal_vq_cmd()
4079 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_ringparam() local
4081 ring->rx_max_pending = vi->rq[0].vq->num_max; in virtnet_get_ringparam()
4082 ring->tx_max_pending = vi->sq[0].vq->num_max; in virtnet_get_ringparam()
4083 ring->rx_pending = virtqueue_get_vring_size(vi->rq[0].vq); in virtnet_get_ringparam()
4084 ring->tx_pending = virtqueue_get_vring_size(vi->sq[0].vq); in virtnet_get_ringparam()
4092 struct virtnet_info *vi = netdev_priv(dev); in virtnet_set_ringparam() local
4101 rx_pending = virtqueue_get_vring_size(vi->rq[0].vq); in virtnet_set_ringparam()
4102 tx_pending = virtqueue_get_vring_size(vi->sq[0].vq); in virtnet_set_ringparam()
4108 if (ring->rx_pending > vi->rq[0].vq->num_max) in virtnet_set_ringparam()
4111 if (ring->tx_pending > vi->sq[0].vq->num_max) in virtnet_set_ringparam()
4114 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_set_ringparam()
4115 rq = vi->rq + i; in virtnet_set_ringparam()
4116 sq = vi->sq + i; in virtnet_set_ringparam()
4119 err = virtnet_tx_resize(vi, sq, ring->tx_pending); in virtnet_set_ringparam()
4128 err = virtnet_send_tx_ctrl_coal_vq_cmd(vi, i, in virtnet_set_ringparam()
4129 vi->intr_coal_tx.max_usecs, in virtnet_set_ringparam()
4130 vi->intr_coal_tx.max_packets); in virtnet_set_ringparam()
4140 err = virtnet_rx_resize(vi, rq, ring->rx_pending); in virtnet_set_ringparam()
4145 mutex_lock(&vi->rq[i].dim_lock); in virtnet_set_ringparam()
4146 err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, i, in virtnet_set_ringparam()
4147 vi->intr_coal_rx.max_usecs, in virtnet_set_ringparam()
4148 vi->intr_coal_rx.max_packets); in virtnet_set_ringparam()
4149 mutex_unlock(&vi->rq[i].dim_lock); in virtnet_set_ringparam()
4158 static bool virtnet_commit_rss_command(struct virtnet_info *vi) in virtnet_commit_rss_command() argument
4160 struct net_device *dev = vi->dev; in virtnet_commit_rss_command()
4165 sg_set_buf(&sgs[0], vi->rss_hdr, virtnet_rss_hdr_size(vi)); in virtnet_commit_rss_command()
4166 sg_set_buf(&sgs[1], &vi->rss_trailer, virtnet_rss_trailer_size(vi)); in virtnet_commit_rss_command()
4168 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, in virtnet_commit_rss_command()
4169 vi->has_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG in virtnet_commit_rss_command()
4181 static void virtnet_init_default_rss(struct virtnet_info *vi) in virtnet_init_default_rss() argument
4183 vi->rss_hdr->hash_types = cpu_to_le32(vi->rss_hash_types_supported); in virtnet_init_default_rss()
4184 vi->rss_hash_types_saved = vi->rss_hash_types_supported; in virtnet_init_default_rss()
4185 vi->rss_hdr->indirection_table_mask = vi->rss_indir_table_size in virtnet_init_default_rss()
4186 ? cpu_to_le16(vi->rss_indir_table_size - 1) : 0; in virtnet_init_default_rss()
4187 vi->rss_hdr->unclassified_queue = 0; in virtnet_init_default_rss()
4189 virtnet_rss_update_by_qpairs(vi, vi->curr_queue_pairs); in virtnet_init_default_rss()
4191 vi->rss_trailer.hash_key_length = vi->rss_key_size; in virtnet_init_default_rss()
4193 netdev_rss_key_fill(vi->rss_hash_key_data, vi->rss_key_size); in virtnet_init_default_rss()
4196 static void virtnet_get_hashflow(const struct virtnet_info *vi, struct ethtool_rxnfc *info) in virtnet_get_hashflow() argument
4201 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv4) { in virtnet_get_hashflow()
4204 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) { in virtnet_get_hashflow()
4209 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv6) { in virtnet_get_hashflow()
4212 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) { in virtnet_get_hashflow()
4217 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv4) { in virtnet_get_hashflow()
4220 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) { in virtnet_get_hashflow()
4225 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv6) { in virtnet_get_hashflow()
4228 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) { in virtnet_get_hashflow()
4233 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) in virtnet_get_hashflow()
4238 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) in virtnet_get_hashflow()
4248 static bool virtnet_set_hashflow(struct virtnet_info *vi, struct ethtool_rxnfc *info) in virtnet_set_hashflow() argument
4250 u32 new_hashtypes = vi->rss_hash_types_saved; in virtnet_set_hashflow()
4299 if (new_hashtypes != (new_hashtypes & vi->rss_hash_types_supported)) in virtnet_set_hashflow()
4302 if (new_hashtypes != vi->rss_hash_types_saved) { in virtnet_set_hashflow()
4303 vi->rss_hash_types_saved = new_hashtypes; in virtnet_set_hashflow()
4304 vi->rss_hdr->hash_types = cpu_to_le32(vi->rss_hash_types_saved); in virtnet_set_hashflow()
4305 if (vi->dev->features & NETIF_F_RXHASH) in virtnet_set_hashflow()
4306 return virtnet_commit_rss_command(vi); in virtnet_set_hashflow()
4315 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_drvinfo() local
4316 struct virtio_device *vdev = vi->vdev; in virtnet_get_drvinfo()
4328 struct virtnet_info *vi = netdev_priv(dev); in virtnet_set_channels() local
4338 if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0) in virtnet_set_channels()
4345 if (vi->rq[0].xdp_prog) in virtnet_set_channels()
4349 err = virtnet_set_queues(vi, queue_pairs); in virtnet_set_channels()
4354 virtnet_set_affinity(vi); in virtnet_set_channels()
4378 static void virtnet_get_stats_string(struct virtnet_info *vi, int type, int qid, u8 **data) in virtnet_get_stats_string() argument
4388 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_CVQ) { in virtnet_get_stats_string()
4408 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_BASIC) { in virtnet_get_stats_string()
4415 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_CSUM) { in virtnet_get_stats_string()
4422 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_SPEED) { in virtnet_get_stats_string()
4442 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) { in virtnet_get_stats_string()
4449 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_GSO) { in virtnet_get_stats_string()
4456 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED) { in virtnet_get_stats_string()
4484 static void virtnet_stats_ctx_init(struct virtnet_info *vi, in virtnet_stats_ctx_init() argument
4499 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_BASIC) { in virtnet_stats_ctx_init()
4505 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_CSUM) { in virtnet_stats_ctx_init()
4511 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_GSO) { in virtnet_stats_ctx_init()
4517 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_SPEED) { in virtnet_stats_ctx_init()
4525 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) { in virtnet_stats_ctx_init()
4531 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_CSUM) { in virtnet_stats_ctx_init()
4537 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_GSO) { in virtnet_stats_ctx_init()
4543 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED) { in virtnet_stats_ctx_init()
4555 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_CVQ) { in virtnet_stats_ctx_init()
4565 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_BASIC) { in virtnet_stats_ctx_init()
4571 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_CSUM) { in virtnet_stats_ctx_init()
4577 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_SPEED) { in virtnet_stats_ctx_init()
4585 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) { in virtnet_stats_ctx_init()
4591 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_GSO) { in virtnet_stats_ctx_init()
4597 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED) { in virtnet_stats_ctx_init()
4625 static void virtnet_fill_total_fields(struct virtnet_info *vi, in virtnet_fill_total_fields() argument
4636 first_tx_q = first_rx_q + vi->curr_queue_pairs * num_rx; in virtnet_fill_total_fields()
4640 stats_sum_queue(data, num_rx, first_rx_q, vi->curr_queue_pairs); in virtnet_fill_total_fields()
4644 stats_sum_queue(data, num_tx, first_tx_q, vi->curr_queue_pairs); in virtnet_fill_total_fields()
4647 static void virtnet_fill_stats_qstat(struct virtnet_info *vi, u32 qid, in virtnet_fill_stats_qstat() argument
4658 queue_type = vq_type(vi, qid); in virtnet_fill_stats_qstat()
4747 * @vi: virtio net info
4754 static void virtnet_fill_stats(struct virtnet_info *vi, u32 qid, in virtnet_fill_stats() argument
4766 return virtnet_fill_stats_qstat(vi, qid, ctx, base, drv_stats, reply_type); in virtnet_fill_stats()
4772 queue_type = vq_type(vi, qid); in virtnet_fill_stats()
4779 offset += num_cq + num_rx * vi->curr_queue_pairs + num_tx * (qid / 2); in virtnet_fill_stats()
4881 static int __virtnet_get_hw_stats(struct virtnet_info *vi, in __virtnet_get_hw_stats() argument
4895 ok = virtnet_send_command_reply(vi, VIRTIO_NET_CTRL_STATS, in __virtnet_get_hw_stats()
4905 virtnet_fill_stats(vi, qid, ctx, p, false, hdr->type); in __virtnet_get_hw_stats()
4911 static void virtnet_make_stat_req(struct virtnet_info *vi, in virtnet_make_stat_req() argument
4916 int qtype = vq_type(vi, qid); in virtnet_make_stat_req()
4930 static int virtnet_get_hw_stats(struct virtnet_info *vi, in virtnet_get_hw_stats() argument
4939 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_DEVICE_STATS)) in virtnet_get_hw_stats()
4943 last_vq = vi->curr_queue_pairs * 2 - 1; in virtnet_get_hw_stats()
4955 qtype = vq_type(vi, i); in virtnet_get_hw_stats()
4979 virtnet_make_stat_req(vi, ctx, req, i, &j); in virtnet_get_hw_stats()
4982 virtnet_make_stat_req(vi, ctx, req, vi->max_queue_pairs * 2, &j); in virtnet_get_hw_stats()
4984 ok = __virtnet_get_hw_stats(vi, ctx, req, sizeof(*req) * j, reply, res_size); in virtnet_get_hw_stats()
4994 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_strings() local
5001 virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_RX, -1, &p); in virtnet_get_strings()
5002 virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_TX, -1, &p); in virtnet_get_strings()
5004 virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_CQ, 0, &p); in virtnet_get_strings()
5006 for (i = 0; i < vi->curr_queue_pairs; ++i) in virtnet_get_strings()
5007 virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_RX, i, &p); in virtnet_get_strings()
5009 for (i = 0; i < vi->curr_queue_pairs; ++i) in virtnet_get_strings()
5010 virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_TX, i, &p); in virtnet_get_strings()
5017 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_sset_count() local
5023 virtnet_stats_ctx_init(vi, &ctx, NULL, false); in virtnet_get_sset_count()
5028 vi->curr_queue_pairs * pair_count; in virtnet_get_sset_count()
5037 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_ethtool_stats() local
5042 virtnet_stats_ctx_init(vi, &ctx, data, false); in virtnet_get_ethtool_stats()
5043 if (virtnet_get_hw_stats(vi, &ctx, -1)) in virtnet_get_ethtool_stats()
5044 dev_warn(&vi->dev->dev, "Failed to get hw stats.\n"); in virtnet_get_ethtool_stats()
5046 for (i = 0; i < vi->curr_queue_pairs; i++) { in virtnet_get_ethtool_stats()
5047 struct receive_queue *rq = &vi->rq[i]; in virtnet_get_ethtool_stats()
5048 struct send_queue *sq = &vi->sq[i]; in virtnet_get_ethtool_stats()
5053 virtnet_fill_stats(vi, i * 2, &ctx, stats_base, true, 0); in virtnet_get_ethtool_stats()
5059 virtnet_fill_stats(vi, i * 2 + 1, &ctx, stats_base, true, 0); in virtnet_get_ethtool_stats()
5063 virtnet_fill_total_fields(vi, &ctx); in virtnet_get_ethtool_stats()
5069 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_channels() local
5071 channels->combined_count = vi->curr_queue_pairs; in virtnet_get_channels()
5072 channels->max_combined = vi->max_queue_pairs; in virtnet_get_channels()
5082 struct virtnet_info *vi = netdev_priv(dev); in virtnet_set_link_ksettings() local
5085 &vi->speed, &vi->duplex); in virtnet_set_link_ksettings()
5091 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_link_ksettings() local
5093 cmd->base.speed = vi->speed; in virtnet_get_link_ksettings()
5094 cmd->base.duplex = vi->duplex; in virtnet_get_link_ksettings()
5100 static int virtnet_send_tx_notf_coal_cmds(struct virtnet_info *vi, in virtnet_send_tx_notf_coal_cmds() argument
5115 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL, in virtnet_send_tx_notf_coal_cmds()
5120 vi->intr_coal_tx.max_usecs = ec->tx_coalesce_usecs; in virtnet_send_tx_notf_coal_cmds()
5121 vi->intr_coal_tx.max_packets = ec->tx_max_coalesced_frames; in virtnet_send_tx_notf_coal_cmds()
5122 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_send_tx_notf_coal_cmds()
5123 vi->sq[i].intr_coal.max_usecs = ec->tx_coalesce_usecs; in virtnet_send_tx_notf_coal_cmds()
5124 vi->sq[i].intr_coal.max_packets = ec->tx_max_coalesced_frames; in virtnet_send_tx_notf_coal_cmds()
5130 static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi, in virtnet_send_rx_notf_coal_cmds() argument
5138 if (rx_ctrl_dim_on && !virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) in virtnet_send_rx_notf_coal_cmds()
5141 if (rx_ctrl_dim_on && (ec->rx_coalesce_usecs != vi->intr_coal_rx.max_usecs || in virtnet_send_rx_notf_coal_cmds()
5142 ec->rx_max_coalesced_frames != vi->intr_coal_rx.max_packets)) in virtnet_send_rx_notf_coal_cmds()
5145 if (rx_ctrl_dim_on && !vi->rx_dim_enabled) { in virtnet_send_rx_notf_coal_cmds()
5146 vi->rx_dim_enabled = true; in virtnet_send_rx_notf_coal_cmds()
5147 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_send_rx_notf_coal_cmds()
5148 mutex_lock(&vi->rq[i].dim_lock); in virtnet_send_rx_notf_coal_cmds()
5149 vi->rq[i].dim_enabled = true; in virtnet_send_rx_notf_coal_cmds()
5150 mutex_unlock(&vi->rq[i].dim_lock); in virtnet_send_rx_notf_coal_cmds()
5159 if (!rx_ctrl_dim_on && vi->rx_dim_enabled) { in virtnet_send_rx_notf_coal_cmds()
5160 vi->rx_dim_enabled = false; in virtnet_send_rx_notf_coal_cmds()
5161 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_send_rx_notf_coal_cmds()
5162 mutex_lock(&vi->rq[i].dim_lock); in virtnet_send_rx_notf_coal_cmds()
5163 vi->rq[i].dim_enabled = false; in virtnet_send_rx_notf_coal_cmds()
5164 mutex_unlock(&vi->rq[i].dim_lock); in virtnet_send_rx_notf_coal_cmds()
5176 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL, in virtnet_send_rx_notf_coal_cmds()
5181 vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs; in virtnet_send_rx_notf_coal_cmds()
5182 vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames; in virtnet_send_rx_notf_coal_cmds()
5183 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_send_rx_notf_coal_cmds()
5184 mutex_lock(&vi->rq[i].dim_lock); in virtnet_send_rx_notf_coal_cmds()
5185 vi->rq[i].intr_coal.max_usecs = ec->rx_coalesce_usecs; in virtnet_send_rx_notf_coal_cmds()
5186 vi->rq[i].intr_coal.max_packets = ec->rx_max_coalesced_frames; in virtnet_send_rx_notf_coal_cmds()
5187 mutex_unlock(&vi->rq[i].dim_lock); in virtnet_send_rx_notf_coal_cmds()
5193 static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi, in virtnet_send_notf_coal_cmds() argument
5198 err = virtnet_send_tx_notf_coal_cmds(vi, ec); in virtnet_send_notf_coal_cmds()
5202 err = virtnet_send_rx_notf_coal_cmds(vi, ec); in virtnet_send_notf_coal_cmds()
5209 static int virtnet_send_rx_notf_coal_vq_cmds(struct virtnet_info *vi, in virtnet_send_rx_notf_coal_vq_cmds() argument
5218 mutex_lock(&vi->rq[queue].dim_lock); in virtnet_send_rx_notf_coal_vq_cmds()
5219 cur_rx_dim = vi->rq[queue].dim_enabled; in virtnet_send_rx_notf_coal_vq_cmds()
5220 max_usecs = vi->rq[queue].intr_coal.max_usecs; in virtnet_send_rx_notf_coal_vq_cmds()
5221 max_packets = vi->rq[queue].intr_coal.max_packets; in virtnet_send_rx_notf_coal_vq_cmds()
5225 mutex_unlock(&vi->rq[queue].dim_lock); in virtnet_send_rx_notf_coal_vq_cmds()
5230 vi->rq[queue].dim_enabled = true; in virtnet_send_rx_notf_coal_vq_cmds()
5231 mutex_unlock(&vi->rq[queue].dim_lock); in virtnet_send_rx_notf_coal_vq_cmds()
5236 vi->rq[queue].dim_enabled = false; in virtnet_send_rx_notf_coal_vq_cmds()
5241 err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, queue, in virtnet_send_rx_notf_coal_vq_cmds()
5244 mutex_unlock(&vi->rq[queue].dim_lock); in virtnet_send_rx_notf_coal_vq_cmds()
5248 static int virtnet_send_notf_coal_vq_cmds(struct virtnet_info *vi, in virtnet_send_notf_coal_vq_cmds() argument
5254 err = virtnet_send_rx_notf_coal_vq_cmds(vi, ec, queue); in virtnet_send_notf_coal_vq_cmds()
5258 err = virtnet_send_tx_ctrl_coal_vq_cmd(vi, queue, in virtnet_send_notf_coal_vq_cmds()
5272 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_rx_dim_work() local
5273 struct net_device *dev = vi->dev; in virtnet_rx_dim_work()
5277 qnum = rq - vi->rq; in virtnet_rx_dim_work()
5286 err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, qnum, in virtnet_rx_dim_work()
5330 struct virtnet_info *vi = netdev_priv(dev); in virtnet_set_coalesce() local
5336 for (queue_number = 0; queue_number < vi->max_queue_pairs; queue_number++) { in virtnet_set_coalesce()
5338 vi->sq[queue_number].napi.weight, in virtnet_set_coalesce()
5344 /* All queues that belong to [queue_number, vi->max_queue_pairs] will be in virtnet_set_coalesce()
5351 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) in virtnet_set_coalesce()
5352 ret = virtnet_send_notf_coal_cmds(vi, ec); in virtnet_set_coalesce()
5363 for (i = queue_number; i < vi->max_queue_pairs; i++) { in virtnet_set_coalesce()
5364 if (vi->sq[i].xsk_pool) in virtnet_set_coalesce()
5368 for (; queue_number < vi->max_queue_pairs; queue_number++) in virtnet_set_coalesce()
5369 vi->sq[queue_number].napi.weight = napi_weight; in virtnet_set_coalesce()
5380 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_coalesce() local
5382 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) { in virtnet_get_coalesce()
5383 ec->rx_coalesce_usecs = vi->intr_coal_rx.max_usecs; in virtnet_get_coalesce()
5384 ec->tx_coalesce_usecs = vi->intr_coal_tx.max_usecs; in virtnet_get_coalesce()
5385 ec->tx_max_coalesced_frames = vi->intr_coal_tx.max_packets; in virtnet_get_coalesce()
5386 ec->rx_max_coalesced_frames = vi->intr_coal_rx.max_packets; in virtnet_get_coalesce()
5387 ec->use_adaptive_rx_coalesce = vi->rx_dim_enabled; in virtnet_get_coalesce()
5391 if (vi->sq[0].napi.weight) in virtnet_get_coalesce()
5402 struct virtnet_info *vi = netdev_priv(dev); in virtnet_set_per_queue_coalesce() local
5406 if (queue >= vi->max_queue_pairs) in virtnet_set_per_queue_coalesce()
5412 vi->sq[queue].napi.weight, in virtnet_set_per_queue_coalesce()
5417 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) in virtnet_set_per_queue_coalesce()
5418 ret = virtnet_send_notf_coal_vq_cmds(vi, ec, queue); in virtnet_set_per_queue_coalesce()
5426 vi->sq[queue].napi.weight = napi_weight; in virtnet_set_per_queue_coalesce()
5435 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_per_queue_coalesce() local
5437 if (queue >= vi->max_queue_pairs) in virtnet_get_per_queue_coalesce()
5440 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) { in virtnet_get_per_queue_coalesce()
5441 mutex_lock(&vi->rq[queue].dim_lock); in virtnet_get_per_queue_coalesce()
5442 ec->rx_coalesce_usecs = vi->rq[queue].intr_coal.max_usecs; in virtnet_get_per_queue_coalesce()
5443 ec->tx_coalesce_usecs = vi->sq[queue].intr_coal.max_usecs; in virtnet_get_per_queue_coalesce()
5444 ec->tx_max_coalesced_frames = vi->sq[queue].intr_coal.max_packets; in virtnet_get_per_queue_coalesce()
5445 ec->rx_max_coalesced_frames = vi->rq[queue].intr_coal.max_packets; in virtnet_get_per_queue_coalesce()
5446 ec->use_adaptive_rx_coalesce = vi->rq[queue].dim_enabled; in virtnet_get_per_queue_coalesce()
5447 mutex_unlock(&vi->rq[queue].dim_lock); in virtnet_get_per_queue_coalesce()
5451 if (vi->sq[queue].napi.weight) in virtnet_get_per_queue_coalesce()
5460 struct virtnet_info *vi = netdev_priv(dev); in virtnet_init_settings() local
5462 vi->speed = SPEED_UNKNOWN; in virtnet_init_settings()
5463 vi->duplex = DUPLEX_UNKNOWN; in virtnet_init_settings()
5479 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_rxfh() local
5483 for (i = 0; i < vi->rss_indir_table_size; ++i) in virtnet_get_rxfh()
5484 rxfh->indir[i] = le16_to_cpu(vi->rss_hdr->indirection_table[i]); in virtnet_get_rxfh()
5488 memcpy(rxfh->key, vi->rss_hash_key_data, vi->rss_key_size); in virtnet_get_rxfh()
5499 struct virtnet_info *vi = netdev_priv(dev); in virtnet_set_rxfh() local
5508 if (!vi->has_rss) in virtnet_set_rxfh()
5511 for (i = 0; i < vi->rss_indir_table_size; ++i) in virtnet_set_rxfh()
5512 vi->rss_hdr->indirection_table[i] = cpu_to_le16(rxfh->indir[i]); in virtnet_set_rxfh()
5521 if (!vi->has_rss && !vi->has_rss_hash_report) in virtnet_set_rxfh()
5524 memcpy(vi->rss_hash_key_data, rxfh->key, vi->rss_key_size); in virtnet_set_rxfh()
5529 virtnet_commit_rss_command(vi); in virtnet_set_rxfh()
5536 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_rxnfc() local
5541 info->data = vi->curr_queue_pairs; in virtnet_get_rxnfc()
5544 virtnet_get_hashflow(vi, info); in virtnet_get_rxnfc()
5555 struct virtnet_info *vi = netdev_priv(dev); in virtnet_set_rxnfc() local
5560 if (!virtnet_set_hashflow(vi, info)) in virtnet_set_rxnfc()
5601 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_queue_stats_rx() local
5602 struct receive_queue *rq = &vi->rq[i]; in virtnet_get_queue_stats_rx()
5605 virtnet_stats_ctx_init(vi, &ctx, (void *)stats, true); in virtnet_get_queue_stats_rx()
5607 virtnet_get_hw_stats(vi, &ctx, i * 2); in virtnet_get_queue_stats_rx()
5608 virtnet_fill_stats(vi, i * 2, &ctx, (void *)&rq->stats, true, 0); in virtnet_get_queue_stats_rx()
5614 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_queue_stats_tx() local
5615 struct send_queue *sq = &vi->sq[i]; in virtnet_get_queue_stats_tx()
5618 virtnet_stats_ctx_init(vi, &ctx, (void *)stats, true); in virtnet_get_queue_stats_tx()
5620 virtnet_get_hw_stats(vi, &ctx, i * 2 + 1); in virtnet_get_queue_stats_tx()
5621 virtnet_fill_stats(vi, i * 2 + 1, &ctx, (void *)&sq->stats, true, 0); in virtnet_get_queue_stats_tx()
5628 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_base_stats() local
5636 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_BASIC) { in virtnet_get_base_stats()
5641 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_CSUM) { in virtnet_get_base_stats()
5647 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_GSO) { in virtnet_get_base_stats()
5654 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_SPEED) in virtnet_get_base_stats()
5662 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) { in virtnet_get_base_stats()
5667 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_CSUM) { in virtnet_get_base_stats()
5672 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_GSO) { in virtnet_get_base_stats()
5679 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED) in virtnet_get_base_stats()
5683 dev->real_num_rx_queues, vi->max_queue_pairs, rx, in virtnet_get_base_stats()
5684 dev->real_num_tx_queues, vi->max_queue_pairs, tx); in virtnet_get_base_stats()
5695 struct virtnet_info *vi = vdev->priv; in virtnet_freeze_down() local
5698 flush_work(&vi->config_work); in virtnet_freeze_down()
5699 disable_rx_mode_work(vi); in virtnet_freeze_down()
5700 flush_work(&vi->rx_mode_work); in virtnet_freeze_down()
5702 netif_tx_lock_bh(vi->dev); in virtnet_freeze_down()
5703 netif_device_detach(vi->dev); in virtnet_freeze_down()
5704 netif_tx_unlock_bh(vi->dev); in virtnet_freeze_down()
5705 if (netif_running(vi->dev)) { in virtnet_freeze_down()
5707 virtnet_close(vi->dev); in virtnet_freeze_down()
5712 static int init_vqs(struct virtnet_info *vi);
5716 struct virtnet_info *vi = vdev->priv; in virtnet_restore_up() local
5719 err = init_vqs(vi); in virtnet_restore_up()
5725 enable_delayed_refill(vi); in virtnet_restore_up()
5726 enable_rx_mode_work(vi); in virtnet_restore_up()
5728 if (netif_running(vi->dev)) { in virtnet_restore_up()
5730 err = virtnet_open(vi->dev); in virtnet_restore_up()
5736 netif_tx_lock_bh(vi->dev); in virtnet_restore_up()
5737 netif_device_attach(vi->dev); in virtnet_restore_up()
5738 netif_tx_unlock_bh(vi->dev); in virtnet_restore_up()
5742 static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads) in virtnet_set_guest_offloads() argument
5751 *_offloads = cpu_to_virtio64(vi->vdev, offloads); in virtnet_set_guest_offloads()
5755 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS, in virtnet_set_guest_offloads()
5757 dev_warn(&vi->dev->dev, "Fail to set guest offload.\n"); in virtnet_set_guest_offloads()
5764 static int virtnet_clear_guest_offloads(struct virtnet_info *vi) in virtnet_clear_guest_offloads() argument
5768 if (!vi->guest_offloads) in virtnet_clear_guest_offloads()
5771 return virtnet_set_guest_offloads(vi, offloads); in virtnet_clear_guest_offloads()
5774 static int virtnet_restore_guest_offloads(struct virtnet_info *vi) in virtnet_restore_guest_offloads() argument
5776 u64 offloads = vi->guest_offloads; in virtnet_restore_guest_offloads()
5778 if (!vi->guest_offloads) in virtnet_restore_guest_offloads()
5781 return virtnet_set_guest_offloads(vi, offloads); in virtnet_restore_guest_offloads()
5784 static int virtnet_rq_bind_xsk_pool(struct virtnet_info *vi, struct receive_queue *rq, in virtnet_rq_bind_xsk_pool() argument
5789 qindex = rq - vi->rq; in virtnet_rq_bind_xsk_pool()
5792 err = xdp_rxq_info_reg(&rq->xsk_rxq_info, vi->dev, qindex, rq->napi.napi_id); in virtnet_rq_bind_xsk_pool()
5804 virtnet_rx_pause(vi, rq); in virtnet_rq_bind_xsk_pool()
5808 netdev_err(vi->dev, "reset rx fail: rx queue index: %d err: %d\n", qindex, err); in virtnet_rq_bind_xsk_pool()
5815 virtnet_rx_resume(vi, rq); in virtnet_rq_bind_xsk_pool()
5825 static int virtnet_sq_bind_xsk_pool(struct virtnet_info *vi, in virtnet_sq_bind_xsk_pool() argument
5831 qindex = sq - vi->sq; in virtnet_sq_bind_xsk_pool()
5833 virtnet_tx_pause(vi, sq); in virtnet_sq_bind_xsk_pool()
5838 netdev_err(vi->dev, "reset tx fail: tx queue index: %d err: %d\n", qindex, err); in virtnet_sq_bind_xsk_pool()
5844 virtnet_tx_resume(vi, sq); in virtnet_sq_bind_xsk_pool()
5853 struct virtnet_info *vi = netdev_priv(dev); in virtnet_xsk_pool_enable() local
5860 if (vi->hdr_len > xsk_pool_get_headroom(pool)) in virtnet_xsk_pool_enable()
5866 if (vi->big_packets && !vi->mergeable_rx_bufs) in virtnet_xsk_pool_enable()
5869 if (qid >= vi->curr_queue_pairs) in virtnet_xsk_pool_enable()
5872 sq = &vi->sq[qid]; in virtnet_xsk_pool_enable()
5873 rq = &vi->rq[qid]; in virtnet_xsk_pool_enable()
5895 hdr_dma = virtqueue_dma_map_single_attrs(sq->vq, &xsk_hdr, vi->hdr_len, in virtnet_xsk_pool_enable()
5906 err = virtnet_rq_bind_xsk_pool(vi, rq, pool); in virtnet_xsk_pool_enable()
5910 err = virtnet_sq_bind_xsk_pool(vi, sq, pool); in virtnet_xsk_pool_enable()
5922 virtnet_rq_bind_xsk_pool(vi, rq, NULL); in virtnet_xsk_pool_enable()
5926 virtqueue_dma_unmap_single_attrs(rq->vq, hdr_dma, vi->hdr_len, in virtnet_xsk_pool_enable()
5935 struct virtnet_info *vi = netdev_priv(dev); in virtnet_xsk_pool_disable() local
5941 if (qid >= vi->curr_queue_pairs) in virtnet_xsk_pool_disable()
5944 sq = &vi->sq[qid]; in virtnet_xsk_pool_disable()
5945 rq = &vi->rq[qid]; in virtnet_xsk_pool_disable()
5949 err = virtnet_rq_bind_xsk_pool(vi, rq, NULL); in virtnet_xsk_pool_disable()
5950 err |= virtnet_sq_bind_xsk_pool(vi, sq, NULL); in virtnet_xsk_pool_disable()
5955 vi->hdr_len, DMA_TO_DEVICE, 0); in virtnet_xsk_pool_disable()
5976 struct virtnet_info *vi = netdev_priv(dev); in virtnet_xdp_set() local
5981 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) in virtnet_xdp_set()
5982 && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || in virtnet_xdp_set()
5983 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || in virtnet_xdp_set()
5984 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || in virtnet_xdp_set()
5985 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) || in virtnet_xdp_set()
5986 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM) || in virtnet_xdp_set()
5987 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO4) || in virtnet_xdp_set()
5988 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO6))) { in virtnet_xdp_set()
5993 if (vi->mergeable_rx_bufs && !vi->any_header_sg) { in virtnet_xdp_set()
6004 curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs; in virtnet_xdp_set()
6009 if (curr_qp + xdp_qp > vi->max_queue_pairs) { in virtnet_xdp_set()
6011 curr_qp + xdp_qp, vi->max_queue_pairs); in virtnet_xdp_set()
6015 old_prog = rtnl_dereference(vi->rq[0].xdp_prog); in virtnet_xdp_set()
6020 bpf_prog_add(prog, vi->max_queue_pairs - 1); in virtnet_xdp_set()
6022 virtnet_rx_pause_all(vi); in virtnet_xdp_set()
6026 for (i = 0; i < vi->max_queue_pairs; i++) in virtnet_xdp_set()
6027 virtnet_napi_tx_disable(&vi->sq[i]); in virtnet_xdp_set()
6031 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_xdp_set()
6032 rcu_assign_pointer(vi->rq[i].xdp_prog, prog); in virtnet_xdp_set()
6034 virtnet_restore_guest_offloads(vi); in virtnet_xdp_set()
6039 err = virtnet_set_queues(vi, curr_qp + xdp_qp); in virtnet_xdp_set()
6043 vi->xdp_queue_pairs = xdp_qp; in virtnet_xdp_set()
6046 vi->xdp_enabled = true; in virtnet_xdp_set()
6047 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_xdp_set()
6048 rcu_assign_pointer(vi->rq[i].xdp_prog, prog); in virtnet_xdp_set()
6050 virtnet_clear_guest_offloads(vi); in virtnet_xdp_set()
6056 vi->xdp_enabled = false; in virtnet_xdp_set()
6059 virtnet_rx_resume_all(vi); in virtnet_xdp_set()
6060 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_xdp_set()
6064 virtnet_napi_tx_enable(&vi->sq[i]); in virtnet_xdp_set()
6071 virtnet_clear_guest_offloads(vi); in virtnet_xdp_set()
6072 for (i = 0; i < vi->max_queue_pairs; i++) in virtnet_xdp_set()
6073 rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog); in virtnet_xdp_set()
6076 virtnet_rx_resume_all(vi); in virtnet_xdp_set()
6078 for (i = 0; i < vi->max_queue_pairs; i++) in virtnet_xdp_set()
6079 virtnet_napi_tx_enable(&vi->sq[i]); in virtnet_xdp_set()
6082 bpf_prog_sub(prog, vi->max_queue_pairs - 1); in virtnet_xdp_set()
6101 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_phys_port_name() local
6104 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY)) in virtnet_get_phys_port_name()
6117 struct virtnet_info *vi = netdev_priv(dev); in virtnet_set_features() local
6122 if (vi->xdp_enabled) in virtnet_set_features()
6126 offloads = vi->guest_offloads_capable; in virtnet_set_features()
6128 offloads = vi->guest_offloads_capable & in virtnet_set_features()
6131 err = virtnet_set_guest_offloads(vi, offloads); in virtnet_set_features()
6134 vi->guest_offloads = offloads; in virtnet_set_features()
6139 vi->rss_hdr->hash_types = cpu_to_le32(vi->rss_hash_types_saved); in virtnet_set_features()
6141 vi->rss_hdr->hash_types = cpu_to_le32(VIRTIO_NET_HASH_REPORT_NONE); in virtnet_set_features()
6143 if (!virtnet_commit_rss_command(vi)) in virtnet_set_features()
6165 static int virtnet_init_irq_moder(struct virtnet_info *vi) in virtnet_init_irq_moder() argument
6172 ret = net_dim_init_irq_moder(vi->dev, profile_flags, coal_flags, in virtnet_init_irq_moder()
6179 for (i = 0; i < vi->max_queue_pairs; i++) in virtnet_init_irq_moder()
6180 net_dim_setting(vi->dev, &vi->rq[i].dim, false); in virtnet_init_irq_moder()
6185 static void virtnet_free_irq_moder(struct virtnet_info *vi) in virtnet_free_irq_moder() argument
6187 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) in virtnet_free_irq_moder()
6191 net_dim_free_irq_moder(vi->dev); in virtnet_free_irq_moder()
6216 struct virtnet_info *vi = in virtnet_config_changed_work() local
6220 if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS, in virtnet_config_changed_work()
6225 netdev_notify_peers(vi->dev); in virtnet_config_changed_work()
6226 virtnet_ack_link_announce(vi); in virtnet_config_changed_work()
6232 if (vi->status == v) in virtnet_config_changed_work()
6235 vi->status = v; in virtnet_config_changed_work()
6237 if (vi->status & VIRTIO_NET_S_LINK_UP) { in virtnet_config_changed_work()
6238 virtnet_update_settings(vi); in virtnet_config_changed_work()
6239 netif_carrier_on(vi->dev); in virtnet_config_changed_work()
6240 netif_tx_wake_all_queues(vi->dev); in virtnet_config_changed_work()
6242 netif_carrier_off(vi->dev); in virtnet_config_changed_work()
6243 netif_tx_stop_all_queues(vi->dev); in virtnet_config_changed_work()
6249 struct virtnet_info *vi = vdev->priv; in virtnet_config_changed() local
6251 schedule_work(&vi->config_work); in virtnet_config_changed()
6254 static void virtnet_free_queues(struct virtnet_info *vi) in virtnet_free_queues() argument
6258 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_free_queues()
6259 __netif_napi_del(&vi->rq[i].napi); in virtnet_free_queues()
6260 __netif_napi_del(&vi->sq[i].napi); in virtnet_free_queues()
6264 * we need to respect an RCU grace period before freeing vi->rq in virtnet_free_queues()
6268 kfree(vi->rq); in virtnet_free_queues()
6269 kfree(vi->sq); in virtnet_free_queues()
6270 kfree(vi->ctrl); in virtnet_free_queues()
6273 static void _free_receive_bufs(struct virtnet_info *vi) in _free_receive_bufs() argument
6278 for (i = 0; i < vi->max_queue_pairs; i++) { in _free_receive_bufs()
6279 while (vi->rq[i].pages) in _free_receive_bufs()
6280 __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0); in _free_receive_bufs()
6282 old_prog = rtnl_dereference(vi->rq[i].xdp_prog); in _free_receive_bufs()
6283 RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL); in _free_receive_bufs()
6289 static void free_receive_bufs(struct virtnet_info *vi) in free_receive_bufs() argument
6292 _free_receive_bufs(vi); in free_receive_bufs()
6296 static void free_receive_page_frags(struct virtnet_info *vi) in free_receive_page_frags() argument
6299 for (i = 0; i < vi->max_queue_pairs; i++) in free_receive_page_frags()
6300 if (vi->rq[i].alloc_frag.page) { in free_receive_page_frags()
6301 if (vi->rq[i].last_dma) in free_receive_page_frags()
6302 virtnet_rq_unmap(&vi->rq[i], vi->rq[i].last_dma, 0); in free_receive_page_frags()
6303 put_page(vi->rq[i].alloc_frag.page); in free_receive_page_frags()
6309 struct virtnet_info *vi = vq->vdev->priv; in virtnet_sq_free_unused_buf() local
6313 sq = &vi->sq[i]; in virtnet_sq_free_unused_buf()
6333 struct virtnet_info *vi = vq->vdev->priv; in virtnet_sq_free_unused_buf_done() local
6336 netdev_tx_reset_queue(netdev_get_tx_queue(vi->dev, i)); in virtnet_sq_free_unused_buf_done()
6339 static void free_unused_bufs(struct virtnet_info *vi) in free_unused_bufs() argument
6344 for (i = 0; i < vi->max_queue_pairs; i++) { in free_unused_bufs()
6345 struct virtqueue *vq = vi->sq[i].vq; in free_unused_bufs()
6351 for (i = 0; i < vi->max_queue_pairs; i++) { in free_unused_bufs()
6352 struct virtqueue *vq = vi->rq[i].vq; in free_unused_bufs()
6360 static void virtnet_del_vqs(struct virtnet_info *vi) in virtnet_del_vqs() argument
6362 struct virtio_device *vdev = vi->vdev; in virtnet_del_vqs()
6364 virtnet_clean_affinity(vi); in virtnet_del_vqs()
6368 virtnet_free_queues(vi); in virtnet_del_vqs()
6375 static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqueue *vq) in mergeable_min_buf_len() argument
6377 const unsigned int hdr_len = vi->hdr_len; in mergeable_min_buf_len()
6379 unsigned int packet_len = vi->big_packets ? IP_MAX_MTU : vi->dev->max_mtu; in mergeable_min_buf_len()
6387 static int virtnet_find_vqs(struct virtnet_info *vi) in virtnet_find_vqs() argument
6400 total_vqs = vi->max_queue_pairs * 2 + in virtnet_find_vqs()
6401 virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ); in virtnet_find_vqs()
6410 if (!vi->big_packets || vi->mergeable_rx_bufs) { in virtnet_find_vqs()
6419 if (vi->has_cvq) { in virtnet_find_vqs()
6424 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_find_vqs()
6427 sprintf(vi->rq[i].name, "input.%u", i); in virtnet_find_vqs()
6428 sprintf(vi->sq[i].name, "output.%u", i); in virtnet_find_vqs()
6429 vqs_info[rxq2vq(i)].name = vi->rq[i].name; in virtnet_find_vqs()
6430 vqs_info[txq2vq(i)].name = vi->sq[i].name; in virtnet_find_vqs()
6435 ret = virtio_find_vqs(vi->vdev, total_vqs, vqs, vqs_info, NULL); in virtnet_find_vqs()
6439 if (vi->has_cvq) { in virtnet_find_vqs()
6440 vi->cvq = vqs[total_vqs - 1]; in virtnet_find_vqs()
6441 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN)) in virtnet_find_vqs()
6442 vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; in virtnet_find_vqs()
6445 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_find_vqs()
6446 vi->rq[i].vq = vqs[rxq2vq(i)]; in virtnet_find_vqs()
6447 vi->rq[i].min_buf_len = mergeable_min_buf_len(vi, vi->rq[i].vq); in virtnet_find_vqs()
6448 vi->sq[i].vq = vqs[txq2vq(i)]; in virtnet_find_vqs()
6464 static int virtnet_alloc_queues(struct virtnet_info *vi) in virtnet_alloc_queues() argument
6468 if (vi->has_cvq) { in virtnet_alloc_queues()
6469 vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL); in virtnet_alloc_queues()
6470 if (!vi->ctrl) in virtnet_alloc_queues()
6473 vi->ctrl = NULL; in virtnet_alloc_queues()
6475 vi->sq = kcalloc(vi->max_queue_pairs, sizeof(*vi->sq), GFP_KERNEL); in virtnet_alloc_queues()
6476 if (!vi->sq) in virtnet_alloc_queues()
6478 vi->rq = kcalloc(vi->max_queue_pairs, sizeof(*vi->rq), GFP_KERNEL); in virtnet_alloc_queues()
6479 if (!vi->rq) in virtnet_alloc_queues()
6482 INIT_DELAYED_WORK(&vi->refill, refill_work); in virtnet_alloc_queues()
6483 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_alloc_queues()
6484 vi->rq[i].pages = NULL; in virtnet_alloc_queues()
6485 netif_napi_add_config(vi->dev, &vi->rq[i].napi, virtnet_poll, in virtnet_alloc_queues()
6487 vi->rq[i].napi.weight = napi_weight; in virtnet_alloc_queues()
6488 netif_napi_add_tx_weight(vi->dev, &vi->sq[i].napi, in virtnet_alloc_queues()
6492 sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg)); in virtnet_alloc_queues()
6493 ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len); in virtnet_alloc_queues()
6494 sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg)); in virtnet_alloc_queues()
6496 u64_stats_init(&vi->rq[i].stats.syncp); in virtnet_alloc_queues()
6497 u64_stats_init(&vi->sq[i].stats.syncp); in virtnet_alloc_queues()
6498 mutex_init(&vi->rq[i].dim_lock); in virtnet_alloc_queues()
6504 kfree(vi->sq); in virtnet_alloc_queues()
6506 kfree(vi->ctrl); in virtnet_alloc_queues()
6511 static int init_vqs(struct virtnet_info *vi) in init_vqs() argument
6516 ret = virtnet_alloc_queues(vi); in init_vqs()
6520 ret = virtnet_find_vqs(vi); in init_vqs()
6525 virtnet_set_affinity(vi); in init_vqs()
6531 virtnet_free_queues(vi); in init_vqs()
6540 struct virtnet_info *vi = netdev_priv(queue->dev); in mergeable_rx_buffer_size_show() local
6542 unsigned int headroom = virtnet_get_headroom(vi); in mergeable_rx_buffer_size_show()
6546 BUG_ON(queue_index >= vi->max_queue_pairs); in mergeable_rx_buffer_size_show()
6547 avg = &vi->rq[queue_index].mrg_avg_pkt_len; in mergeable_rx_buffer_size_show()
6549 get_mergeable_buf_len(&vi->rq[queue_index], avg, in mergeable_rx_buffer_size_show()
6640 static bool virtnet_check_guest_gso(const struct virtnet_info *vi) in virtnet_check_guest_gso() argument
6642 return virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || in virtnet_check_guest_gso()
6643 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || in virtnet_check_guest_gso()
6644 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || in virtnet_check_guest_gso()
6645 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) || in virtnet_check_guest_gso()
6646 (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO4) && in virtnet_check_guest_gso()
6647 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO6)); in virtnet_check_guest_gso()
6650 static void virtnet_set_big_packets(struct virtnet_info *vi, const int mtu) in virtnet_set_big_packets() argument
6652 bool guest_gso = virtnet_check_guest_gso(vi); in virtnet_set_big_packets()
6659 vi->big_packets = true; in virtnet_set_big_packets()
6660 vi->big_packets_num_skbfrags = guest_gso ? MAX_SKB_FRAGS : DIV_ROUND_UP(mtu, PAGE_SIZE); in virtnet_set_big_packets()
6684 struct virtnet_info *vi; in virtnet_xdp_rx_hash() local
6690 vi = netdev_priv(xdp->rxq->dev); in virtnet_xdp_rx_hash()
6691 hdr_hash = (struct virtio_net_hdr_v1_hash *)(xdp->data - vi->hdr_len); in virtnet_xdp_rx_hash()
6710 struct virtnet_info *vi; in virtnet_probe() local
6807 vi = netdev_priv(dev); in virtnet_probe()
6808 vi->dev = dev; in virtnet_probe()
6809 vi->vdev = vdev; in virtnet_probe()
6810 vdev->priv = vi; in virtnet_probe()
6812 INIT_WORK(&vi->config_work, virtnet_config_changed_work); in virtnet_probe()
6813 INIT_WORK(&vi->rx_mode_work, virtnet_rx_mode_work); in virtnet_probe()
6814 spin_lock_init(&vi->refill_lock); in virtnet_probe()
6817 vi->mergeable_rx_bufs = true; in virtnet_probe()
6822 vi->has_rss_hash_report = true; in virtnet_probe()
6825 vi->has_rss = true; in virtnet_probe()
6827 vi->rss_indir_table_size = in virtnet_probe()
6831 vi->rss_hdr = devm_kzalloc(&vdev->dev, virtnet_rss_hdr_size(vi), GFP_KERNEL); in virtnet_probe()
6832 if (!vi->rss_hdr) { in virtnet_probe()
6837 if (vi->has_rss || vi->has_rss_hash_report) { in virtnet_probe()
6838 vi->rss_key_size = in virtnet_probe()
6840 if (vi->rss_key_size > VIRTIO_NET_RSS_MAX_KEY_SIZE) { in virtnet_probe()
6842 vi->rss_key_size, VIRTIO_NET_RSS_MAX_KEY_SIZE); in virtnet_probe()
6847 vi->rss_hash_types_supported = in virtnet_probe()
6849 vi->rss_hash_types_supported &= in virtnet_probe()
6858 if (vi->has_rss_hash_report) in virtnet_probe()
6859 vi->hdr_len = sizeof(struct virtio_net_hdr_v1_hash); in virtnet_probe()
6862 vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); in virtnet_probe()
6864 vi->hdr_len = sizeof(struct virtio_net_hdr); in virtnet_probe()
6868 vi->any_header_sg = true; in virtnet_probe()
6871 vi->has_cvq = true; in virtnet_probe()
6873 mutex_init(&vi->cvq_lock); in virtnet_probe()
6894 virtnet_set_big_packets(vi, mtu); in virtnet_probe()
6896 if (vi->any_header_sg) in virtnet_probe()
6897 dev->needed_headroom = vi->hdr_len; in virtnet_probe()
6901 vi->curr_queue_pairs = max_queue_pairs; in virtnet_probe()
6903 vi->curr_queue_pairs = num_online_cpus(); in virtnet_probe()
6904 vi->max_queue_pairs = max_queue_pairs; in virtnet_probe()
6907 err = init_vqs(vi); in virtnet_probe()
6911 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) { in virtnet_probe()
6912 vi->intr_coal_rx.max_usecs = 0; in virtnet_probe()
6913 vi->intr_coal_tx.max_usecs = 0; in virtnet_probe()
6914 vi->intr_coal_rx.max_packets = 0; in virtnet_probe()
6919 if (vi->sq[0].napi.weight) in virtnet_probe()
6920 vi->intr_coal_tx.max_packets = 1; in virtnet_probe()
6922 vi->intr_coal_tx.max_packets = 0; in virtnet_probe()
6925 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) { in virtnet_probe()
6927 for (i = 0; i < vi->max_queue_pairs; i++) in virtnet_probe()
6928 if (vi->sq[i].napi.weight) in virtnet_probe()
6929 vi->sq[i].intr_coal.max_packets = 1; in virtnet_probe()
6931 err = virtnet_init_irq_moder(vi); in virtnet_probe()
6937 if (vi->mergeable_rx_bufs) in virtnet_probe()
6940 netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs); in virtnet_probe()
6941 netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs); in virtnet_probe()
6946 vi->failover = net_failover_create(vi->dev); in virtnet_probe()
6947 if (IS_ERR(vi->failover)) { in virtnet_probe()
6948 err = PTR_ERR(vi->failover); in virtnet_probe()
6953 if (vi->has_rss || vi->has_rss_hash_report) in virtnet_probe()
6954 virtnet_init_default_rss(vi); in virtnet_probe()
6956 enable_rx_mode_work(vi); in virtnet_probe()
6969 virtio_config_driver_disable(vi->vdev); in virtnet_probe()
6973 if (vi->has_rss || vi->has_rss_hash_report) { in virtnet_probe()
6974 if (!virtnet_commit_rss_command(vi)) { in virtnet_probe()
6977 vi->has_rss_hash_report = false; in virtnet_probe()
6978 vi->has_rss = false; in virtnet_probe()
6982 virtnet_set_queues(vi, vi->curr_queue_pairs); in virtnet_probe()
6989 virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) { in virtnet_probe()
6993 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, in virtnet_probe()
7002 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_DEVICE_STATS)) { in virtnet_probe()
7016 if (!virtnet_send_command_reply(vi, VIRTIO_NET_CTRL_STATS, in virtnet_probe()
7026 vi->device_stats_cap = le64_to_cpu(v); in virtnet_probe()
7032 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { in virtnet_probe()
7033 virtnet_config_changed_work(&vi->config_work); in virtnet_probe()
7035 vi->status = VIRTIO_NET_S_LINK_UP; in virtnet_probe()
7036 virtnet_update_settings(vi); in virtnet_probe()
7041 if (virtio_has_feature(vi->vdev, guest_offloads[i])) in virtnet_probe()
7042 set_bit(guest_offloads[i], &vi->guest_offloads); in virtnet_probe()
7043 vi->guest_offloads_capable = vi->guest_offloads; in virtnet_probe()
7047 err = virtnet_cpu_notif_add(vi); in virtnet_probe()
7061 net_failover_destroy(vi->failover); in virtnet_probe()
7064 cancel_delayed_work_sync(&vi->refill); in virtnet_probe()
7065 free_receive_page_frags(vi); in virtnet_probe()
7066 virtnet_del_vqs(vi); in virtnet_probe()
7072 static void remove_vq_common(struct virtnet_info *vi) in remove_vq_common() argument
7076 virtio_reset_device(vi->vdev); in remove_vq_common()
7079 free_unused_bufs(vi); in remove_vq_common()
7085 for (i = 0; i < vi->max_queue_pairs; i++) in remove_vq_common()
7086 netdev_tx_reset_queue(netdev_get_tx_queue(vi->dev, i)); in remove_vq_common()
7088 free_receive_bufs(vi); in remove_vq_common()
7090 free_receive_page_frags(vi); in remove_vq_common()
7092 virtnet_del_vqs(vi); in remove_vq_common()
7097 struct virtnet_info *vi = vdev->priv; in virtnet_remove() local
7099 virtnet_cpu_notif_remove(vi); in virtnet_remove()
7102 flush_work(&vi->config_work); in virtnet_remove()
7103 disable_rx_mode_work(vi); in virtnet_remove()
7104 flush_work(&vi->rx_mode_work); in virtnet_remove()
7106 virtnet_free_irq_moder(vi); in virtnet_remove()
7108 unregister_netdev(vi->dev); in virtnet_remove()
7110 net_failover_destroy(vi->failover); in virtnet_remove()
7112 remove_vq_common(vi); in virtnet_remove()
7114 free_netdev(vi->dev); in virtnet_remove()
7119 struct virtnet_info *vi = vdev->priv; in virtnet_freeze() local
7121 virtnet_cpu_notif_remove(vi); in virtnet_freeze()
7123 remove_vq_common(vi); in virtnet_freeze()
7130 struct virtnet_info *vi = vdev->priv; in virtnet_restore() local
7136 virtnet_set_queues(vi, vi->curr_queue_pairs); in virtnet_restore()
7138 err = virtnet_cpu_notif_add(vi); in virtnet_restore()
7141 remove_vq_common(vi); in virtnet_restore()