Lines Matching full:vi

406 	/* Find end of list, sew whole thing into vi->rq.pages. */  in give_pages()
425 static void virtnet_rq_free_buf(struct virtnet_info *vi, in virtnet_rq_free_buf() argument
428 if (vi->mergeable_rx_bufs) in virtnet_rq_free_buf()
430 else if (vi->big_packets) in virtnet_rq_free_buf()
436 static void enable_delayed_refill(struct virtnet_info *vi) in enable_delayed_refill() argument
438 spin_lock_bh(&vi->refill_lock); in enable_delayed_refill()
439 vi->refill_enabled = true; in enable_delayed_refill()
440 spin_unlock_bh(&vi->refill_lock); in enable_delayed_refill()
443 static void disable_delayed_refill(struct virtnet_info *vi) in disable_delayed_refill() argument
445 spin_lock_bh(&vi->refill_lock); in disable_delayed_refill()
446 vi->refill_enabled = false; in disable_delayed_refill()
447 spin_unlock_bh(&vi->refill_lock); in disable_delayed_refill()
479 struct virtnet_info *vi = vq->vdev->priv; in skb_xmit_done() local
480 struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi; in skb_xmit_done()
489 netif_wake_subqueue(vi->dev, vq2txq(vq)); in skb_xmit_done()
526 static struct sk_buff *page_to_skb(struct virtnet_info *vi, in page_to_skb() argument
542 hdr_len = vi->hdr_len; in page_to_skb()
543 if (vi->mergeable_rx_bufs) in page_to_skb()
585 if (vi->mergeable_rx_bufs) { in page_to_skb()
749 static void virtnet_rq_set_premapped(struct virtnet_info *vi) in virtnet_rq_set_premapped() argument
754 if (!vi->mergeable_rx_bufs && vi->big_packets) in virtnet_rq_set_premapped()
757 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_rq_set_premapped()
758 if (virtqueue_set_dma_premapped(vi->rq[i].vq)) in virtnet_rq_set_premapped()
761 vi->rq[i].do_dma = true; in virtnet_rq_set_premapped()
767 struct virtnet_info *vi = vq->vdev->priv; in virtnet_rq_unmap_free_buf() local
771 rq = &vi->rq[i]; in virtnet_rq_unmap_free_buf()
776 virtnet_rq_free_buf(vi, rq, buf); in virtnet_rq_unmap_free_buf()
815 static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q) in is_xdp_raw_buffer_queue() argument
817 if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs)) in is_xdp_raw_buffer_queue()
819 else if (q < vi->curr_queue_pairs) in is_xdp_raw_buffer_queue()
825 static void check_sq_full_and_disable(struct virtnet_info *vi, in check_sq_full_and_disable() argument
832 qnum = sq - vi->sq; in check_sq_full_and_disable()
860 static int __virtnet_xdp_xmit_one(struct virtnet_info *vi, in __virtnet_xdp_xmit_one() argument
869 if (unlikely(xdpf->headroom < vi->hdr_len)) in __virtnet_xdp_xmit_one()
884 xdpf->headroom -= vi->hdr_len; in __virtnet_xdp_xmit_one()
885 xdpf->data -= vi->hdr_len; in __virtnet_xdp_xmit_one()
888 memset(hdr, 0, vi->hdr_len); in __virtnet_xdp_xmit_one()
889 xdpf->len += vi->hdr_len; in __virtnet_xdp_xmit_one()
908 /* when vi->curr_queue_pairs > nr_cpu_ids, the txq/sq is only used for xdp tx on
916 #define virtnet_xdp_get_sq(vi) ({ \ argument
919 typeof(vi) v = (vi); \
935 #define virtnet_xdp_put_sq(vi, q) { \ argument
937 typeof(vi) v = (vi); \
949 struct virtnet_info *vi = netdev_priv(dev); in virtnet_xdp_xmit() local
950 struct receive_queue *rq = vi->rq; in virtnet_xdp_xmit()
969 sq = virtnet_xdp_get_sq(vi); in virtnet_xdp_xmit()
995 if (__virtnet_xdp_xmit_one(vi, sq, xdpf)) in virtnet_xdp_xmit()
1001 if (!is_xdp_raw_buffer_queue(vi, sq - vi->sq)) in virtnet_xdp_xmit()
1002 check_sq_full_and_disable(vi, dev, sq); in virtnet_xdp_xmit()
1017 virtnet_xdp_put_sq(vi, sq); in virtnet_xdp_xmit()
1090 static unsigned int virtnet_get_headroom(struct virtnet_info *vi) in virtnet_get_headroom() argument
1092 return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0; in virtnet_get_headroom()
1163 static struct sk_buff *receive_small_build_skb(struct virtnet_info *vi, in receive_small_build_skb() argument
1174 headroom = vi->hdr_len + header_offset; in receive_small_build_skb()
1183 memcpy(skb_vnet_common_hdr(skb), buf, vi->hdr_len); in receive_small_build_skb()
1189 struct virtnet_info *vi, in receive_small_xdp() argument
1199 unsigned int headroom = vi->hdr_len + header_offset; in receive_small_xdp()
1215 if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) { in receive_small_xdp()
1217 unsigned int tlen = len + vi->hdr_len; in receive_small_xdp()
1220 xdp_headroom = virtnet_get_headroom(vi); in receive_small_xdp()
1222 headroom = vi->hdr_len + header_offset; in receive_small_xdp()
1237 xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len, in receive_small_xdp()
1276 struct virtnet_info *vi, in receive_small() argument
1287 len -= vi->hdr_len; in receive_small()
1297 if (unlikely(vi->xdp_enabled)) { in receive_small()
1303 skb = receive_small_xdp(dev, vi, rq, xdp_prog, buf, in receive_small()
1312 skb = receive_small_build_skb(vi, xdp_headroom, buf, len); in receive_small()
1323 struct virtnet_info *vi, in receive_big() argument
1331 page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, 0); in receive_big()
1333 u64_stats_add(&stats->bytes, len - vi->hdr_len); in receive_big()
1375 struct virtnet_info *vi, in build_skb_from_xdp_buff() argument
1418 struct virtnet_info *vi, in virtnet_build_xdp_buff_mrg() argument
1440 VIRTIO_XDP_HEADROOM + vi->hdr_len, len - vi->hdr_len, true); in virtnet_build_xdp_buff_mrg()
1466 virtio16_to_cpu(vi->vdev, hdr->num_buffers)); in virtnet_build_xdp_buff_mrg()
1506 static void *mergeable_xdp_get_buf(struct virtnet_info *vi, in mergeable_xdp_get_buf() argument
1536 if (likely(headroom >= virtnet_get_headroom(vi) && in mergeable_xdp_get_buf()
1581 struct virtnet_info *vi, in receive_mergeable_xdp() argument
1591 int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); in receive_mergeable_xdp()
1602 data = mergeable_xdp_get_buf(vi, rq, xdp_prog, ctx, &frame_sz, &num_buf, &page, in receive_mergeable_xdp()
1607 err = virtnet_build_xdp_buff_mrg(dev, vi, rq, &xdp, data, len, frame_sz, in receive_mergeable_xdp()
1616 head_skb = build_skb_from_xdp_buff(dev, vi, &xdp, xdp_frags_truesz); in receive_mergeable_xdp()
1641 struct virtnet_info *vi, in receive_mergeable() argument
1650 int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); in receive_mergeable()
1660 u64_stats_add(&stats->bytes, len - vi->hdr_len); in receive_mergeable()
1669 if (unlikely(vi->xdp_enabled)) { in receive_mergeable()
1675 head_skb = receive_mergeable_xdp(dev, vi, rq, xdp_prog, buf, ctx, in receive_mergeable()
1683 head_skb = page_to_skb(vi, rq, page, offset, len, truesize, headroom); in receive_mergeable()
1695 virtio16_to_cpu(vi->vdev, in receive_mergeable()
1787 static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, in receive_buf() argument
1792 struct net_device *dev = vi->dev; in receive_buf()
1796 if (unlikely(len < vi->hdr_len + ETH_HLEN)) { in receive_buf()
1799 virtnet_rq_free_buf(vi, rq, buf); in receive_buf()
1803 if (vi->mergeable_rx_bufs) in receive_buf()
1804 skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit, in receive_buf()
1806 else if (vi->big_packets) in receive_buf()
1807 skb = receive_big(dev, vi, rq, buf, len, stats); in receive_buf()
1809 skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, stats); in receive_buf()
1815 if (dev->features & NETIF_F_RXHASH && vi->has_rss_hash_report) in receive_buf()
1822 virtio_is_little_endian(vi->vdev))) { in receive_buf()
1847 static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq, in add_recvbuf_small() argument
1851 unsigned int xdp_headroom = virtnet_get_headroom(vi); in add_recvbuf_small()
1853 int len = vi->hdr_len + VIRTNET_RX_PAD + GOOD_PACKET_LEN + xdp_headroom; in add_recvbuf_small()
1864 vi->hdr_len + GOOD_PACKET_LEN); in add_recvbuf_small()
1876 static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq, in add_recvbuf_big() argument
1883 sg_init_table(rq->sg, vi->big_packets_num_skbfrags + 2); in add_recvbuf_big()
1885 /* page in rq->sg[vi->big_packets_num_skbfrags + 1] is list tail */ in add_recvbuf_big()
1886 for (i = vi->big_packets_num_skbfrags + 1; i > 1; --i) { in add_recvbuf_big()
1909 sg_set_buf(&rq->sg[0], p, vi->hdr_len); in add_recvbuf_big()
1917 err = virtqueue_add_inbuf(rq->vq, rq->sg, vi->big_packets_num_skbfrags + 2, in add_recvbuf_big()
1929 struct virtnet_info *vi = rq->vq->vdev->priv; in get_mergeable_buf_len() local
1930 const size_t hdr_len = vi->hdr_len; in get_mergeable_buf_len()
1942 static int add_recvbuf_mergeable(struct virtnet_info *vi, in add_recvbuf_mergeable() argument
1946 unsigned int headroom = virtnet_get_headroom(vi); in add_recvbuf_mergeable()
1998 static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq, in try_fill_recv() argument
2005 if (vi->mergeable_rx_bufs) in try_fill_recv()
2006 err = add_recvbuf_mergeable(vi, rq, gfp); in try_fill_recv()
2007 else if (vi->big_packets) in try_fill_recv()
2008 err = add_recvbuf_big(vi, rq, gfp); in try_fill_recv()
2010 err = add_recvbuf_small(vi, rq, gfp); in try_fill_recv()
2029 struct virtnet_info *vi = rvq->vdev->priv; in skb_recv_done() local
2030 struct receive_queue *rq = &vi->rq[vq2rxq(rvq)]; in skb_recv_done()
2049 static void virtnet_napi_tx_enable(struct virtnet_info *vi, in virtnet_napi_tx_enable() argument
2059 if (!vi->affinity_hint_set) { in virtnet_napi_tx_enable()
2075 struct virtnet_info *vi = in refill_work() local
2080 for (i = 0; i < vi->curr_queue_pairs; i++) { in refill_work()
2081 struct receive_queue *rq = &vi->rq[i]; in refill_work()
2084 still_empty = !try_fill_recv(vi, rq, GFP_KERNEL); in refill_work()
2091 schedule_delayed_work(&vi->refill, HZ/2); in refill_work()
2098 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_receive() local
2105 if (!vi->big_packets || vi->mergeable_rx_bufs) { in virtnet_receive()
2110 receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats); in virtnet_receive()
2116 receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &stats); in virtnet_receive()
2122 if (!try_fill_recv(vi, rq, GFP_ATOMIC)) { in virtnet_receive()
2123 spin_lock(&vi->refill_lock); in virtnet_receive()
2124 if (vi->refill_enabled) in virtnet_receive()
2125 schedule_delayed_work(&vi->refill, 0); in virtnet_receive()
2126 spin_unlock(&vi->refill_lock); in virtnet_receive()
2147 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_poll_cleantx() local
2149 struct send_queue *sq = &vi->sq[index]; in virtnet_poll_cleantx()
2150 struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index); in virtnet_poll_cleantx()
2152 if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index)) in virtnet_poll_cleantx()
2173 static void virtnet_rx_dim_update(struct virtnet_info *vi, struct receive_queue *rq) in virtnet_rx_dim_update() argument
2195 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_poll() local
2213 virtnet_rx_dim_update(vi, rq); in virtnet_poll()
2217 sq = virtnet_xdp_get_sq(vi); in virtnet_poll()
2223 virtnet_xdp_put_sq(vi, sq); in virtnet_poll()
2229 static void virtnet_disable_queue_pair(struct virtnet_info *vi, int qp_index) in virtnet_disable_queue_pair() argument
2231 virtnet_napi_tx_disable(&vi->sq[qp_index].napi); in virtnet_disable_queue_pair()
2232 napi_disable(&vi->rq[qp_index].napi); in virtnet_disable_queue_pair()
2233 xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq); in virtnet_disable_queue_pair()
2236 static int virtnet_enable_queue_pair(struct virtnet_info *vi, int qp_index) in virtnet_enable_queue_pair() argument
2238 struct net_device *dev = vi->dev; in virtnet_enable_queue_pair()
2241 err = xdp_rxq_info_reg(&vi->rq[qp_index].xdp_rxq, dev, qp_index, in virtnet_enable_queue_pair()
2242 vi->rq[qp_index].napi.napi_id); in virtnet_enable_queue_pair()
2246 err = xdp_rxq_info_reg_mem_model(&vi->rq[qp_index].xdp_rxq, in virtnet_enable_queue_pair()
2251 virtnet_napi_enable(vi->rq[qp_index].vq, &vi->rq[qp_index].napi); in virtnet_enable_queue_pair()
2252 virtnet_napi_tx_enable(vi, vi->sq[qp_index].vq, &vi->sq[qp_index].napi); in virtnet_enable_queue_pair()
2257 xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq); in virtnet_enable_queue_pair()
2263 struct virtnet_info *vi = netdev_priv(dev); in virtnet_open() local
2266 enable_delayed_refill(vi); in virtnet_open()
2268 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_open()
2269 if (i < vi->curr_queue_pairs) in virtnet_open()
2271 if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) in virtnet_open()
2272 schedule_delayed_work(&vi->refill, 0); in virtnet_open()
2274 err = virtnet_enable_queue_pair(vi, i); in virtnet_open()
2282 disable_delayed_refill(vi); in virtnet_open()
2283 cancel_delayed_work_sync(&vi->refill); in virtnet_open()
2286 virtnet_disable_queue_pair(vi, i); in virtnet_open()
2287 cancel_work_sync(&vi->rq[i].dim.work); in virtnet_open()
2296 struct virtnet_info *vi = sq->vq->vdev->priv; in virtnet_poll_tx() local
2302 if (unlikely(is_xdp_raw_buffer_queue(vi, index))) { in virtnet_poll_tx()
2308 txq = netdev_get_tx_queue(vi->dev, index); in virtnet_poll_tx()
2343 struct virtnet_info *vi = sq->vq->vdev->priv; in xmit_skb() local
2345 unsigned hdr_len = vi->hdr_len; in xmit_skb()
2348 pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); in xmit_skb()
2350 can_push = vi->any_header_sg && in xmit_skb()
2361 virtio_is_little_endian(vi->vdev), false, in xmit_skb()
2365 if (vi->mergeable_rx_bufs) in xmit_skb()
2388 struct virtnet_info *vi = netdev_priv(dev); in start_xmit() local
2390 struct send_queue *sq = &vi->sq[qnum]; in start_xmit()
2430 check_sq_full_and_disable(vi, dev, sq); in start_xmit()
2443 static int virtnet_rx_resize(struct virtnet_info *vi, in virtnet_rx_resize() argument
2446 bool running = netif_running(vi->dev); in virtnet_rx_resize()
2449 qindex = rq - vi->rq; in virtnet_rx_resize()
2458 netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err); in virtnet_rx_resize()
2460 if (!try_fill_recv(vi, rq, GFP_KERNEL)) in virtnet_rx_resize()
2461 schedule_delayed_work(&vi->refill, 0); in virtnet_rx_resize()
2468 static int virtnet_tx_resize(struct virtnet_info *vi, in virtnet_tx_resize() argument
2471 bool running = netif_running(vi->dev); in virtnet_tx_resize()
2475 qindex = sq - vi->sq; in virtnet_tx_resize()
2480 txq = netdev_get_tx_queue(vi->dev, qindex); in virtnet_tx_resize()
2491 netif_stop_subqueue(vi->dev, qindex); in virtnet_tx_resize()
2497 netdev_err(vi->dev, "resize tx fail: tx queue index: %d err: %d\n", qindex, err); in virtnet_tx_resize()
2505 virtnet_napi_tx_enable(vi, sq->vq, &sq->napi); in virtnet_tx_resize()
2514 static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, in virtnet_send_command() argument
2522 BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)); in virtnet_send_command()
2524 vi->ctrl->status = ~0; in virtnet_send_command()
2525 vi->ctrl->hdr.class = class; in virtnet_send_command()
2526 vi->ctrl->hdr.cmd = cmd; in virtnet_send_command()
2528 sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr)); in virtnet_send_command()
2535 sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status)); in virtnet_send_command()
2539 ret = virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC); in virtnet_send_command()
2541 dev_warn(&vi->vdev->dev, in virtnet_send_command()
2546 if (unlikely(!virtqueue_kick(vi->cvq))) in virtnet_send_command()
2547 return vi->ctrl->status == VIRTIO_NET_OK; in virtnet_send_command()
2552 while (!virtqueue_get_buf(vi->cvq, &tmp) && in virtnet_send_command()
2553 !virtqueue_is_broken(vi->cvq)) in virtnet_send_command()
2556 return vi->ctrl->status == VIRTIO_NET_OK; in virtnet_send_command()
2561 struct virtnet_info *vi = netdev_priv(dev); in virtnet_set_mac_address() local
2562 struct virtio_device *vdev = vi->vdev; in virtnet_set_mac_address()
2567 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY)) in virtnet_set_mac_address()
2580 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, in virtnet_set_mac_address()
2609 struct virtnet_info *vi = netdev_priv(dev); in virtnet_stats() local
2613 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_stats()
2615 struct receive_queue *rq = &vi->rq[i]; in virtnet_stats()
2616 struct send_queue *sq = &vi->sq[i]; in virtnet_stats()
2646 static void virtnet_ack_link_announce(struct virtnet_info *vi) in virtnet_ack_link_announce() argument
2649 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE, in virtnet_ack_link_announce()
2651 dev_warn(&vi->dev->dev, "Failed to ack link announce.\n"); in virtnet_ack_link_announce()
2655 static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) in _virtnet_set_queues() argument
2658 struct net_device *dev = vi->dev; in _virtnet_set_queues()
2660 if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) in _virtnet_set_queues()
2663 vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs); in _virtnet_set_queues()
2664 sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq)); in _virtnet_set_queues()
2666 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, in _virtnet_set_queues()
2672 vi->curr_queue_pairs = queue_pairs; in _virtnet_set_queues()
2675 schedule_delayed_work(&vi->refill, 0); in _virtnet_set_queues()
2681 static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) in virtnet_set_queues() argument
2686 err = _virtnet_set_queues(vi, queue_pairs); in virtnet_set_queues()
2693 struct virtnet_info *vi = netdev_priv(dev); in virtnet_close() local
2697 disable_delayed_refill(vi); in virtnet_close()
2699 cancel_delayed_work_sync(&vi->refill); in virtnet_close()
2701 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_close()
2702 virtnet_disable_queue_pair(vi, i); in virtnet_close()
2703 cancel_work_sync(&vi->rq[i].dim.work); in virtnet_close()
2711 struct virtnet_info *vi = netdev_priv(dev); in virtnet_set_rx_mode() local
2721 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) in virtnet_set_rx_mode()
2724 vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0); in virtnet_set_rx_mode()
2725 vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0); in virtnet_set_rx_mode()
2727 sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc)); in virtnet_set_rx_mode()
2729 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, in virtnet_set_rx_mode()
2732 vi->ctrl->promisc ? "en" : "dis"); in virtnet_set_rx_mode()
2734 sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti)); in virtnet_set_rx_mode()
2736 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, in virtnet_set_rx_mode()
2739 vi->ctrl->allmulti ? "en" : "dis"); in virtnet_set_rx_mode()
2753 mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count); in virtnet_set_rx_mode()
2764 mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count); in virtnet_set_rx_mode()
2772 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, in virtnet_set_rx_mode()
2782 struct virtnet_info *vi = netdev_priv(dev); in virtnet_vlan_rx_add_vid() local
2785 vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid); in virtnet_vlan_rx_add_vid()
2786 sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid)); in virtnet_vlan_rx_add_vid()
2788 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, in virtnet_vlan_rx_add_vid()
2797 struct virtnet_info *vi = netdev_priv(dev); in virtnet_vlan_rx_kill_vid() local
2800 vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid); in virtnet_vlan_rx_kill_vid()
2801 sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid)); in virtnet_vlan_rx_kill_vid()
2803 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, in virtnet_vlan_rx_kill_vid()
2809 static void virtnet_clean_affinity(struct virtnet_info *vi) in virtnet_clean_affinity() argument
2813 if (vi->affinity_hint_set) { in virtnet_clean_affinity()
2814 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_clean_affinity()
2815 virtqueue_set_affinity(vi->rq[i].vq, NULL); in virtnet_clean_affinity()
2816 virtqueue_set_affinity(vi->sq[i].vq, NULL); in virtnet_clean_affinity()
2819 vi->affinity_hint_set = false; in virtnet_clean_affinity()
2823 static void virtnet_set_affinity(struct virtnet_info *vi) in virtnet_set_affinity() argument
2833 virtnet_clean_affinity(vi); in virtnet_set_affinity()
2838 stride = max_t(int, num_cpu / vi->curr_queue_pairs, 1); in virtnet_set_affinity()
2839 stragglers = num_cpu >= vi->curr_queue_pairs ? in virtnet_set_affinity()
2840 num_cpu % vi->curr_queue_pairs : in virtnet_set_affinity()
2844 for (i = 0; i < vi->curr_queue_pairs; i++) { in virtnet_set_affinity()
2852 virtqueue_set_affinity(vi->rq[i].vq, mask); in virtnet_set_affinity()
2853 virtqueue_set_affinity(vi->sq[i].vq, mask); in virtnet_set_affinity()
2854 __netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, XPS_CPUS); in virtnet_set_affinity()
2858 vi->affinity_hint_set = true; in virtnet_set_affinity()
2864 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, in virtnet_cpu_online() local
2866 virtnet_set_affinity(vi); in virtnet_cpu_online()
2872 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, in virtnet_cpu_dead() local
2874 virtnet_set_affinity(vi); in virtnet_cpu_dead()
2880 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, in virtnet_cpu_down_prep() local
2883 virtnet_clean_affinity(vi); in virtnet_cpu_down_prep()
2889 static int virtnet_cpu_notif_add(struct virtnet_info *vi) in virtnet_cpu_notif_add() argument
2893 ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node); in virtnet_cpu_notif_add()
2897 &vi->node_dead); in virtnet_cpu_notif_add()
2900 cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node); in virtnet_cpu_notif_add()
2904 static void virtnet_cpu_notif_remove(struct virtnet_info *vi) in virtnet_cpu_notif_remove() argument
2906 cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node); in virtnet_cpu_notif_remove()
2908 &vi->node_dead); in virtnet_cpu_notif_remove()
2911 static int virtnet_send_ctrl_coal_vq_cmd(struct virtnet_info *vi, in virtnet_send_ctrl_coal_vq_cmd() argument
2916 vi->ctrl->coal_vq.vqn = cpu_to_le16(vqn); in virtnet_send_ctrl_coal_vq_cmd()
2917 vi->ctrl->coal_vq.coal.max_usecs = cpu_to_le32(max_usecs); in virtnet_send_ctrl_coal_vq_cmd()
2918 vi->ctrl->coal_vq.coal.max_packets = cpu_to_le32(max_packets); in virtnet_send_ctrl_coal_vq_cmd()
2919 sg_init_one(&sgs, &vi->ctrl->coal_vq, sizeof(vi->ctrl->coal_vq)); in virtnet_send_ctrl_coal_vq_cmd()
2921 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL, in virtnet_send_ctrl_coal_vq_cmd()
2929 static int virtnet_send_rx_ctrl_coal_vq_cmd(struct virtnet_info *vi, in virtnet_send_rx_ctrl_coal_vq_cmd() argument
2935 err = virtnet_send_ctrl_coal_vq_cmd(vi, rxq2vq(queue), in virtnet_send_rx_ctrl_coal_vq_cmd()
2940 vi->rq[queue].intr_coal.max_usecs = max_usecs; in virtnet_send_rx_ctrl_coal_vq_cmd()
2941 vi->rq[queue].intr_coal.max_packets = max_packets; in virtnet_send_rx_ctrl_coal_vq_cmd()
2946 static int virtnet_send_tx_ctrl_coal_vq_cmd(struct virtnet_info *vi, in virtnet_send_tx_ctrl_coal_vq_cmd() argument
2952 err = virtnet_send_ctrl_coal_vq_cmd(vi, txq2vq(queue), in virtnet_send_tx_ctrl_coal_vq_cmd()
2957 vi->sq[queue].intr_coal.max_usecs = max_usecs; in virtnet_send_tx_ctrl_coal_vq_cmd()
2958 vi->sq[queue].intr_coal.max_packets = max_packets; in virtnet_send_tx_ctrl_coal_vq_cmd()
2968 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_ringparam() local
2970 ring->rx_max_pending = vi->rq[0].vq->num_max; in virtnet_get_ringparam()
2971 ring->tx_max_pending = vi->sq[0].vq->num_max; in virtnet_get_ringparam()
2972 ring->rx_pending = virtqueue_get_vring_size(vi->rq[0].vq); in virtnet_get_ringparam()
2973 ring->tx_pending = virtqueue_get_vring_size(vi->sq[0].vq); in virtnet_get_ringparam()
2981 struct virtnet_info *vi = netdev_priv(dev); in virtnet_set_ringparam() local
2990 rx_pending = virtqueue_get_vring_size(vi->rq[0].vq); in virtnet_set_ringparam()
2991 tx_pending = virtqueue_get_vring_size(vi->sq[0].vq); in virtnet_set_ringparam()
2997 if (ring->rx_pending > vi->rq[0].vq->num_max) in virtnet_set_ringparam()
3000 if (ring->tx_pending > vi->sq[0].vq->num_max) in virtnet_set_ringparam()
3003 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_set_ringparam()
3004 rq = vi->rq + i; in virtnet_set_ringparam()
3005 sq = vi->sq + i; in virtnet_set_ringparam()
3008 err = virtnet_tx_resize(vi, sq, ring->tx_pending); in virtnet_set_ringparam()
3017 err = virtnet_send_tx_ctrl_coal_vq_cmd(vi, i, in virtnet_set_ringparam()
3018 vi->intr_coal_tx.max_usecs, in virtnet_set_ringparam()
3019 vi->intr_coal_tx.max_packets); in virtnet_set_ringparam()
3025 err = virtnet_rx_resize(vi, rq, ring->rx_pending); in virtnet_set_ringparam()
3030 err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, i, in virtnet_set_ringparam()
3031 vi->intr_coal_rx.max_usecs, in virtnet_set_ringparam()
3032 vi->intr_coal_rx.max_packets); in virtnet_set_ringparam()
3041 static bool virtnet_commit_rss_command(struct virtnet_info *vi) in virtnet_commit_rss_command() argument
3043 struct net_device *dev = vi->dev; in virtnet_commit_rss_command()
3051 sg_set_buf(&sgs[0], &vi->ctrl->rss, sg_buf_size); in virtnet_commit_rss_command()
3053 sg_buf_size = sizeof(uint16_t) * (vi->ctrl->rss.indirection_table_mask + 1); in virtnet_commit_rss_command()
3054 sg_set_buf(&sgs[1], vi->ctrl->rss.indirection_table, sg_buf_size); in virtnet_commit_rss_command()
3058 sg_set_buf(&sgs[2], &vi->ctrl->rss.max_tx_vq, sg_buf_size); in virtnet_commit_rss_command()
3060 sg_buf_size = vi->rss_key_size; in virtnet_commit_rss_command()
3061 sg_set_buf(&sgs[3], vi->ctrl->rss.key, sg_buf_size); in virtnet_commit_rss_command()
3063 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, in virtnet_commit_rss_command()
3064 vi->has_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG in virtnet_commit_rss_command()
3072 static void virtnet_init_default_rss(struct virtnet_info *vi) in virtnet_init_default_rss() argument
3077 vi->ctrl->rss.hash_types = vi->rss_hash_types_supported; in virtnet_init_default_rss()
3078 vi->rss_hash_types_saved = vi->rss_hash_types_supported; in virtnet_init_default_rss()
3079 vi->ctrl->rss.indirection_table_mask = vi->rss_indir_table_size in virtnet_init_default_rss()
3080 ? vi->rss_indir_table_size - 1 : 0; in virtnet_init_default_rss()
3081 vi->ctrl->rss.unclassified_queue = 0; in virtnet_init_default_rss()
3083 for (; i < vi->rss_indir_table_size; ++i) { in virtnet_init_default_rss()
3084 indir_val = ethtool_rxfh_indir_default(i, vi->curr_queue_pairs); in virtnet_init_default_rss()
3085 vi->ctrl->rss.indirection_table[i] = indir_val; in virtnet_init_default_rss()
3088 vi->ctrl->rss.max_tx_vq = vi->has_rss ? vi->curr_queue_pairs : 0; in virtnet_init_default_rss()
3089 vi->ctrl->rss.hash_key_length = vi->rss_key_size; in virtnet_init_default_rss()
3091 netdev_rss_key_fill(vi->ctrl->rss.key, vi->rss_key_size); in virtnet_init_default_rss()
3094 static void virtnet_get_hashflow(const struct virtnet_info *vi, struct ethtool_rxnfc *info) in virtnet_get_hashflow() argument
3099 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv4) { in virtnet_get_hashflow()
3102 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) { in virtnet_get_hashflow()
3107 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv6) { in virtnet_get_hashflow()
3110 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) { in virtnet_get_hashflow()
3115 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv4) { in virtnet_get_hashflow()
3118 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) { in virtnet_get_hashflow()
3123 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv6) { in virtnet_get_hashflow()
3126 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) { in virtnet_get_hashflow()
3131 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) in virtnet_get_hashflow()
3136 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) in virtnet_get_hashflow()
3146 static bool virtnet_set_hashflow(struct virtnet_info *vi, struct ethtool_rxnfc *info) in virtnet_set_hashflow() argument
3148 u32 new_hashtypes = vi->rss_hash_types_saved; in virtnet_set_hashflow()
3197 if (new_hashtypes != (new_hashtypes & vi->rss_hash_types_supported)) in virtnet_set_hashflow()
3200 if (new_hashtypes != vi->rss_hash_types_saved) { in virtnet_set_hashflow()
3201 vi->rss_hash_types_saved = new_hashtypes; in virtnet_set_hashflow()
3202 vi->ctrl->rss.hash_types = vi->rss_hash_types_saved; in virtnet_set_hashflow()
3203 if (vi->dev->features & NETIF_F_RXHASH) in virtnet_set_hashflow()
3204 return virtnet_commit_rss_command(vi); in virtnet_set_hashflow()
3213 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_drvinfo() local
3214 struct virtio_device *vdev = vi->vdev; in virtnet_get_drvinfo()
3226 struct virtnet_info *vi = netdev_priv(dev); in virtnet_set_channels() local
3236 if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0) in virtnet_set_channels()
3243 if (vi->rq[0].xdp_prog) in virtnet_set_channels()
3247 err = _virtnet_set_queues(vi, queue_pairs); in virtnet_set_channels()
3252 virtnet_set_affinity(vi); in virtnet_set_channels()
3263 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_strings() local
3269 for (i = 0; i < vi->curr_queue_pairs; i++) { in virtnet_get_strings()
3275 for (i = 0; i < vi->curr_queue_pairs; i++) { in virtnet_get_strings()
3286 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_sset_count() local
3290 return vi->curr_queue_pairs * (VIRTNET_RQ_STATS_LEN + in virtnet_get_sset_count()
3300 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_ethtool_stats() local
3306 for (i = 0; i < vi->curr_queue_pairs; i++) { in virtnet_get_ethtool_stats()
3307 struct receive_queue *rq = &vi->rq[i]; in virtnet_get_ethtool_stats()
3321 for (i = 0; i < vi->curr_queue_pairs; i++) { in virtnet_get_ethtool_stats()
3322 struct send_queue *sq = &vi->sq[i]; in virtnet_get_ethtool_stats()
3340 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_channels() local
3342 channels->combined_count = vi->curr_queue_pairs; in virtnet_get_channels()
3343 channels->max_combined = vi->max_queue_pairs; in virtnet_get_channels()
3353 struct virtnet_info *vi = netdev_priv(dev); in virtnet_set_link_ksettings() local
3356 &vi->speed, &vi->duplex); in virtnet_set_link_ksettings()
3362 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_link_ksettings() local
3364 cmd->base.speed = vi->speed; in virtnet_get_link_ksettings()
3365 cmd->base.duplex = vi->duplex; in virtnet_get_link_ksettings()
3371 static int virtnet_send_tx_notf_coal_cmds(struct virtnet_info *vi, in virtnet_send_tx_notf_coal_cmds() argument
3377 vi->ctrl->coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs); in virtnet_send_tx_notf_coal_cmds()
3378 vi->ctrl->coal_tx.tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames); in virtnet_send_tx_notf_coal_cmds()
3379 sg_init_one(&sgs_tx, &vi->ctrl->coal_tx, sizeof(vi->ctrl->coal_tx)); in virtnet_send_tx_notf_coal_cmds()
3381 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL, in virtnet_send_tx_notf_coal_cmds()
3386 vi->intr_coal_tx.max_usecs = ec->tx_coalesce_usecs; in virtnet_send_tx_notf_coal_cmds()
3387 vi->intr_coal_tx.max_packets = ec->tx_max_coalesced_frames; in virtnet_send_tx_notf_coal_cmds()
3388 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_send_tx_notf_coal_cmds()
3389 vi->sq[i].intr_coal.max_usecs = ec->tx_coalesce_usecs; in virtnet_send_tx_notf_coal_cmds()
3390 vi->sq[i].intr_coal.max_packets = ec->tx_max_coalesced_frames; in virtnet_send_tx_notf_coal_cmds()
3396 static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi, in virtnet_send_rx_notf_coal_cmds() argument
3403 if (rx_ctrl_dim_on && !virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) in virtnet_send_rx_notf_coal_cmds()
3406 if (rx_ctrl_dim_on && (ec->rx_coalesce_usecs != vi->intr_coal_rx.max_usecs || in virtnet_send_rx_notf_coal_cmds()
3407 ec->rx_max_coalesced_frames != vi->intr_coal_rx.max_packets)) in virtnet_send_rx_notf_coal_cmds()
3410 if (rx_ctrl_dim_on && !vi->rx_dim_enabled) { in virtnet_send_rx_notf_coal_cmds()
3411 vi->rx_dim_enabled = true; in virtnet_send_rx_notf_coal_cmds()
3412 for (i = 0; i < vi->max_queue_pairs; i++) in virtnet_send_rx_notf_coal_cmds()
3413 vi->rq[i].dim_enabled = true; in virtnet_send_rx_notf_coal_cmds()
3417 if (!rx_ctrl_dim_on && vi->rx_dim_enabled) { in virtnet_send_rx_notf_coal_cmds()
3418 vi->rx_dim_enabled = false; in virtnet_send_rx_notf_coal_cmds()
3419 for (i = 0; i < vi->max_queue_pairs; i++) in virtnet_send_rx_notf_coal_cmds()
3420 vi->rq[i].dim_enabled = false; in virtnet_send_rx_notf_coal_cmds()
3427 vi->ctrl->coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs); in virtnet_send_rx_notf_coal_cmds()
3428 vi->ctrl->coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames); in virtnet_send_rx_notf_coal_cmds()
3429 sg_init_one(&sgs_rx, &vi->ctrl->coal_rx, sizeof(vi->ctrl->coal_rx)); in virtnet_send_rx_notf_coal_cmds()
3431 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL, in virtnet_send_rx_notf_coal_cmds()
3436 vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs; in virtnet_send_rx_notf_coal_cmds()
3437 vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames; in virtnet_send_rx_notf_coal_cmds()
3438 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_send_rx_notf_coal_cmds()
3439 vi->rq[i].intr_coal.max_usecs = ec->rx_coalesce_usecs; in virtnet_send_rx_notf_coal_cmds()
3440 vi->rq[i].intr_coal.max_packets = ec->rx_max_coalesced_frames; in virtnet_send_rx_notf_coal_cmds()
3446 static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi, in virtnet_send_notf_coal_cmds() argument
3451 err = virtnet_send_tx_notf_coal_cmds(vi, ec); in virtnet_send_notf_coal_cmds()
3455 err = virtnet_send_rx_notf_coal_cmds(vi, ec); in virtnet_send_notf_coal_cmds()
3462 static int virtnet_send_rx_notf_coal_vq_cmds(struct virtnet_info *vi, in virtnet_send_rx_notf_coal_vq_cmds() argument
3467 bool cur_rx_dim = vi->rq[queue].dim_enabled; in virtnet_send_rx_notf_coal_vq_cmds()
3471 max_usecs = vi->rq[queue].intr_coal.max_usecs; in virtnet_send_rx_notf_coal_vq_cmds()
3472 max_packets = vi->rq[queue].intr_coal.max_packets; in virtnet_send_rx_notf_coal_vq_cmds()
3479 vi->rq[queue].dim_enabled = true; in virtnet_send_rx_notf_coal_vq_cmds()
3484 vi->rq[queue].dim_enabled = false; in virtnet_send_rx_notf_coal_vq_cmds()
3489 err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, queue, in virtnet_send_rx_notf_coal_vq_cmds()
3498 static int virtnet_send_notf_coal_vq_cmds(struct virtnet_info *vi, in virtnet_send_notf_coal_vq_cmds() argument
3504 err = virtnet_send_rx_notf_coal_vq_cmds(vi, ec, queue); in virtnet_send_notf_coal_vq_cmds()
3508 err = virtnet_send_tx_ctrl_coal_vq_cmd(vi, queue, in virtnet_send_notf_coal_vq_cmds()
3522 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_rx_dim_work() local
3523 struct net_device *dev = vi->dev; in virtnet_rx_dim_work()
3536 for (i = 0; i < vi->curr_queue_pairs; i++) { in virtnet_rx_dim_work()
3537 rq = &vi->rq[i]; in virtnet_rx_dim_work()
3539 qnum = rq - vi->rq; in virtnet_rx_dim_work()
3547 err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, qnum, in virtnet_rx_dim_work()
3592 struct virtnet_info *vi = netdev_priv(dev); in virtnet_set_coalesce() local
3598 for (queue_number = 0; queue_number < vi->max_queue_pairs; queue_number++) { in virtnet_set_coalesce()
3600 vi->sq[queue_number].napi.weight, in virtnet_set_coalesce()
3606 /* All queues that belong to [queue_number, vi->max_queue_pairs] will be in virtnet_set_coalesce()
3613 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) in virtnet_set_coalesce()
3614 ret = virtnet_send_notf_coal_cmds(vi, ec); in virtnet_set_coalesce()
3622 for (; queue_number < vi->max_queue_pairs; queue_number++) in virtnet_set_coalesce()
3623 vi->sq[queue_number].napi.weight = napi_weight; in virtnet_set_coalesce()
3634 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_coalesce() local
3636 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) { in virtnet_get_coalesce()
3637 ec->rx_coalesce_usecs = vi->intr_coal_rx.max_usecs; in virtnet_get_coalesce()
3638 ec->tx_coalesce_usecs = vi->intr_coal_tx.max_usecs; in virtnet_get_coalesce()
3639 ec->tx_max_coalesced_frames = vi->intr_coal_tx.max_packets; in virtnet_get_coalesce()
3640 ec->rx_max_coalesced_frames = vi->intr_coal_rx.max_packets; in virtnet_get_coalesce()
3641 ec->use_adaptive_rx_coalesce = vi->rx_dim_enabled; in virtnet_get_coalesce()
3645 if (vi->sq[0].napi.weight) in virtnet_get_coalesce()
3656 struct virtnet_info *vi = netdev_priv(dev); in virtnet_set_per_queue_coalesce() local
3660 if (queue >= vi->max_queue_pairs) in virtnet_set_per_queue_coalesce()
3666 vi->sq[queue].napi.weight, in virtnet_set_per_queue_coalesce()
3671 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) in virtnet_set_per_queue_coalesce()
3672 ret = virtnet_send_notf_coal_vq_cmds(vi, ec, queue); in virtnet_set_per_queue_coalesce()
3680 vi->sq[queue].napi.weight = napi_weight; in virtnet_set_per_queue_coalesce()
3689 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_per_queue_coalesce() local
3691 if (queue >= vi->max_queue_pairs) in virtnet_get_per_queue_coalesce()
3694 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) { in virtnet_get_per_queue_coalesce()
3695 ec->rx_coalesce_usecs = vi->rq[queue].intr_coal.max_usecs; in virtnet_get_per_queue_coalesce()
3696 ec->tx_coalesce_usecs = vi->sq[queue].intr_coal.max_usecs; in virtnet_get_per_queue_coalesce()
3697 ec->tx_max_coalesced_frames = vi->sq[queue].intr_coal.max_packets; in virtnet_get_per_queue_coalesce()
3698 ec->rx_max_coalesced_frames = vi->rq[queue].intr_coal.max_packets; in virtnet_get_per_queue_coalesce()
3699 ec->use_adaptive_rx_coalesce = vi->rq[queue].dim_enabled; in virtnet_get_per_queue_coalesce()
3703 if (vi->sq[queue].napi.weight) in virtnet_get_per_queue_coalesce()
3712 struct virtnet_info *vi = netdev_priv(dev); in virtnet_init_settings() local
3714 vi->speed = SPEED_UNKNOWN; in virtnet_init_settings()
3715 vi->duplex = DUPLEX_UNKNOWN; in virtnet_init_settings()
3718 static void virtnet_update_settings(struct virtnet_info *vi) in virtnet_update_settings() argument
3723 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX)) in virtnet_update_settings()
3726 virtio_cread_le(vi->vdev, struct virtio_net_config, speed, &speed); in virtnet_update_settings()
3729 vi->speed = speed; in virtnet_update_settings()
3731 virtio_cread_le(vi->vdev, struct virtio_net_config, duplex, &duplex); in virtnet_update_settings()
3734 vi->duplex = duplex; in virtnet_update_settings()
3750 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_rxfh() local
3754 for (i = 0; i < vi->rss_indir_table_size; ++i) in virtnet_get_rxfh()
3755 rxfh->indir[i] = vi->ctrl->rss.indirection_table[i]; in virtnet_get_rxfh()
3759 memcpy(rxfh->key, vi->ctrl->rss.key, vi->rss_key_size); in virtnet_get_rxfh()
3770 struct virtnet_info *vi = netdev_priv(dev); in virtnet_set_rxfh() local
3778 for (i = 0; i < vi->rss_indir_table_size; ++i) in virtnet_set_rxfh()
3779 vi->ctrl->rss.indirection_table[i] = rxfh->indir[i]; in virtnet_set_rxfh()
3782 memcpy(vi->ctrl->rss.key, rxfh->key, vi->rss_key_size); in virtnet_set_rxfh()
3784 virtnet_commit_rss_command(vi); in virtnet_set_rxfh()
3791 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_rxnfc() local
3796 info->data = vi->curr_queue_pairs; in virtnet_get_rxnfc()
3799 virtnet_get_hashflow(vi, info); in virtnet_get_rxnfc()
3810 struct virtnet_info *vi = netdev_priv(dev); in virtnet_set_rxnfc() local
3815 if (!virtnet_set_hashflow(vi, info)) in virtnet_set_rxnfc()
3855 struct virtnet_info *vi = vdev->priv; in virtnet_freeze_down() local
3858 flush_work(&vi->config_work); in virtnet_freeze_down()
3860 netif_tx_lock_bh(vi->dev); in virtnet_freeze_down()
3861 netif_device_detach(vi->dev); in virtnet_freeze_down()
3862 netif_tx_unlock_bh(vi->dev); in virtnet_freeze_down()
3863 if (netif_running(vi->dev)) in virtnet_freeze_down()
3864 virtnet_close(vi->dev); in virtnet_freeze_down()
3867 static int init_vqs(struct virtnet_info *vi);
3871 struct virtnet_info *vi = vdev->priv; in virtnet_restore_up() local
3874 err = init_vqs(vi); in virtnet_restore_up()
3880 enable_delayed_refill(vi); in virtnet_restore_up()
3882 if (netif_running(vi->dev)) { in virtnet_restore_up()
3883 err = virtnet_open(vi->dev); in virtnet_restore_up()
3888 netif_tx_lock_bh(vi->dev); in virtnet_restore_up()
3889 netif_device_attach(vi->dev); in virtnet_restore_up()
3890 netif_tx_unlock_bh(vi->dev); in virtnet_restore_up()
3894 static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads) in virtnet_set_guest_offloads() argument
3897 vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads); in virtnet_set_guest_offloads()
3899 sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads)); in virtnet_set_guest_offloads()
3901 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS, in virtnet_set_guest_offloads()
3903 dev_warn(&vi->dev->dev, "Fail to set guest offload.\n"); in virtnet_set_guest_offloads()
3910 static int virtnet_clear_guest_offloads(struct virtnet_info *vi) in virtnet_clear_guest_offloads() argument
3914 if (!vi->guest_offloads) in virtnet_clear_guest_offloads()
3917 return virtnet_set_guest_offloads(vi, offloads); in virtnet_clear_guest_offloads()
3920 static int virtnet_restore_guest_offloads(struct virtnet_info *vi) in virtnet_restore_guest_offloads() argument
3922 u64 offloads = vi->guest_offloads; in virtnet_restore_guest_offloads()
3924 if (!vi->guest_offloads) in virtnet_restore_guest_offloads()
3927 return virtnet_set_guest_offloads(vi, offloads); in virtnet_restore_guest_offloads()
3936 struct virtnet_info *vi = netdev_priv(dev); in virtnet_xdp_set() local
3941 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) in virtnet_xdp_set()
3942 && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || in virtnet_xdp_set()
3943 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || in virtnet_xdp_set()
3944 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || in virtnet_xdp_set()
3945 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) || in virtnet_xdp_set()
3946 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM) || in virtnet_xdp_set()
3947 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO4) || in virtnet_xdp_set()
3948 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO6))) { in virtnet_xdp_set()
3953 if (vi->mergeable_rx_bufs && !vi->any_header_sg) { in virtnet_xdp_set()
3964 curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs; in virtnet_xdp_set()
3969 if (curr_qp + xdp_qp > vi->max_queue_pairs) { in virtnet_xdp_set()
3971 curr_qp + xdp_qp, vi->max_queue_pairs); in virtnet_xdp_set()
3975 old_prog = rtnl_dereference(vi->rq[0].xdp_prog); in virtnet_xdp_set()
3980 bpf_prog_add(prog, vi->max_queue_pairs - 1); in virtnet_xdp_set()
3984 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_xdp_set()
3985 napi_disable(&vi->rq[i].napi); in virtnet_xdp_set()
3986 virtnet_napi_tx_disable(&vi->sq[i].napi); in virtnet_xdp_set()
3991 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_xdp_set()
3992 rcu_assign_pointer(vi->rq[i].xdp_prog, prog); in virtnet_xdp_set()
3994 virtnet_restore_guest_offloads(vi); in virtnet_xdp_set()
3999 err = _virtnet_set_queues(vi, curr_qp + xdp_qp); in virtnet_xdp_set()
4003 vi->xdp_queue_pairs = xdp_qp; in virtnet_xdp_set()
4006 vi->xdp_enabled = true; in virtnet_xdp_set()
4007 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_xdp_set()
4008 rcu_assign_pointer(vi->rq[i].xdp_prog, prog); in virtnet_xdp_set()
4010 virtnet_clear_guest_offloads(vi); in virtnet_xdp_set()
4016 vi->xdp_enabled = false; in virtnet_xdp_set()
4019 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_xdp_set()
4023 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); in virtnet_xdp_set()
4024 virtnet_napi_tx_enable(vi, vi->sq[i].vq, in virtnet_xdp_set()
4025 &vi->sq[i].napi); in virtnet_xdp_set()
4033 virtnet_clear_guest_offloads(vi); in virtnet_xdp_set()
4034 for (i = 0; i < vi->max_queue_pairs; i++) in virtnet_xdp_set()
4035 rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog); in virtnet_xdp_set()
4039 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_xdp_set()
4040 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); in virtnet_xdp_set()
4041 virtnet_napi_tx_enable(vi, vi->sq[i].vq, in virtnet_xdp_set()
4042 &vi->sq[i].napi); in virtnet_xdp_set()
4046 bpf_prog_sub(prog, vi->max_queue_pairs - 1); in virtnet_xdp_set()
4063 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_phys_port_name() local
4066 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY)) in virtnet_get_phys_port_name()
4079 struct virtnet_info *vi = netdev_priv(dev); in virtnet_set_features() local
4084 if (vi->xdp_enabled) in virtnet_set_features()
4088 offloads = vi->guest_offloads_capable; in virtnet_set_features()
4090 offloads = vi->guest_offloads_capable & in virtnet_set_features()
4093 err = virtnet_set_guest_offloads(vi, offloads); in virtnet_set_features()
4096 vi->guest_offloads = offloads; in virtnet_set_features()
4101 vi->ctrl->rss.hash_types = vi->rss_hash_types_saved; in virtnet_set_features()
4103 vi->ctrl->rss.hash_types = VIRTIO_NET_HASH_REPORT_NONE; in virtnet_set_features()
4105 if (!virtnet_commit_rss_command(vi)) in virtnet_set_features()
4147 struct virtnet_info *vi = in virtnet_config_changed_work() local
4151 if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS, in virtnet_config_changed_work()
4156 netdev_notify_peers(vi->dev); in virtnet_config_changed_work()
4157 virtnet_ack_link_announce(vi); in virtnet_config_changed_work()
4163 if (vi->status == v) in virtnet_config_changed_work()
4166 vi->status = v; in virtnet_config_changed_work()
4168 if (vi->status & VIRTIO_NET_S_LINK_UP) { in virtnet_config_changed_work()
4169 virtnet_update_settings(vi); in virtnet_config_changed_work()
4170 netif_carrier_on(vi->dev); in virtnet_config_changed_work()
4171 netif_tx_wake_all_queues(vi->dev); in virtnet_config_changed_work()
4173 netif_carrier_off(vi->dev); in virtnet_config_changed_work()
4174 netif_tx_stop_all_queues(vi->dev); in virtnet_config_changed_work()
4180 struct virtnet_info *vi = vdev->priv; in virtnet_config_changed() local
4182 schedule_work(&vi->config_work); in virtnet_config_changed()
4185 static void virtnet_free_queues(struct virtnet_info *vi) in virtnet_free_queues() argument
4189 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_free_queues()
4190 __netif_napi_del(&vi->rq[i].napi); in virtnet_free_queues()
4191 __netif_napi_del(&vi->sq[i].napi); in virtnet_free_queues()
4195 * we need to respect an RCU grace period before freeing vi->rq in virtnet_free_queues()
4199 kfree(vi->rq); in virtnet_free_queues()
4200 kfree(vi->sq); in virtnet_free_queues()
4201 kfree(vi->ctrl); in virtnet_free_queues()
4204 static void _free_receive_bufs(struct virtnet_info *vi) in _free_receive_bufs() argument
4209 for (i = 0; i < vi->max_queue_pairs; i++) { in _free_receive_bufs()
4210 while (vi->rq[i].pages) in _free_receive_bufs()
4211 __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0); in _free_receive_bufs()
4213 old_prog = rtnl_dereference(vi->rq[i].xdp_prog); in _free_receive_bufs()
4214 RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL); in _free_receive_bufs()
4220 static void free_receive_bufs(struct virtnet_info *vi) in free_receive_bufs() argument
4223 _free_receive_bufs(vi); in free_receive_bufs()
4227 static void free_receive_page_frags(struct virtnet_info *vi) in free_receive_page_frags() argument
4230 for (i = 0; i < vi->max_queue_pairs; i++) in free_receive_page_frags()
4231 if (vi->rq[i].alloc_frag.page) { in free_receive_page_frags()
4232 if (vi->rq[i].do_dma && vi->rq[i].last_dma) in free_receive_page_frags()
4233 virtnet_rq_unmap(&vi->rq[i], vi->rq[i].last_dma, 0); in free_receive_page_frags()
4234 put_page(vi->rq[i].alloc_frag.page); in free_receive_page_frags()
4246 static void free_unused_bufs(struct virtnet_info *vi) in free_unused_bufs() argument
4251 for (i = 0; i < vi->max_queue_pairs; i++) { in free_unused_bufs()
4252 struct virtqueue *vq = vi->sq[i].vq; in free_unused_bufs()
4258 for (i = 0; i < vi->max_queue_pairs; i++) { in free_unused_bufs()
4259 struct virtqueue *vq = vi->rq[i].vq; in free_unused_bufs()
4267 static void virtnet_del_vqs(struct virtnet_info *vi) in virtnet_del_vqs() argument
4269 struct virtio_device *vdev = vi->vdev; in virtnet_del_vqs()
4271 virtnet_clean_affinity(vi); in virtnet_del_vqs()
4275 virtnet_free_queues(vi); in virtnet_del_vqs()
4282 static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqueue *vq) in mergeable_min_buf_len() argument
4284 const unsigned int hdr_len = vi->hdr_len; in mergeable_min_buf_len()
4286 unsigned int packet_len = vi->big_packets ? IP_MAX_MTU : vi->dev->max_mtu; in mergeable_min_buf_len()
4294 static int virtnet_find_vqs(struct virtnet_info *vi) in virtnet_find_vqs() argument
4308 total_vqs = vi->max_queue_pairs * 2 + in virtnet_find_vqs()
4309 virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ); in virtnet_find_vqs()
4321 if (!vi->big_packets || vi->mergeable_rx_bufs) { in virtnet_find_vqs()
4330 if (vi->has_cvq) { in virtnet_find_vqs()
4336 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_find_vqs()
4339 sprintf(vi->rq[i].name, "input.%u", i); in virtnet_find_vqs()
4340 sprintf(vi->sq[i].name, "output.%u", i); in virtnet_find_vqs()
4341 names[rxq2vq(i)] = vi->rq[i].name; in virtnet_find_vqs()
4342 names[txq2vq(i)] = vi->sq[i].name; in virtnet_find_vqs()
4347 ret = virtio_find_vqs_ctx(vi->vdev, total_vqs, vqs, callbacks, in virtnet_find_vqs()
4352 if (vi->has_cvq) { in virtnet_find_vqs()
4353 vi->cvq = vqs[total_vqs - 1]; in virtnet_find_vqs()
4354 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN)) in virtnet_find_vqs()
4355 vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; in virtnet_find_vqs()
4358 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_find_vqs()
4359 vi->rq[i].vq = vqs[rxq2vq(i)]; in virtnet_find_vqs()
4360 vi->rq[i].min_buf_len = mergeable_min_buf_len(vi, vi->rq[i].vq); in virtnet_find_vqs()
4361 vi->sq[i].vq = vqs[txq2vq(i)]; in virtnet_find_vqs()
4379 static int virtnet_alloc_queues(struct virtnet_info *vi) in virtnet_alloc_queues() argument
4383 if (vi->has_cvq) { in virtnet_alloc_queues()
4384 vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL); in virtnet_alloc_queues()
4385 if (!vi->ctrl) in virtnet_alloc_queues()
4388 vi->ctrl = NULL; in virtnet_alloc_queues()
4390 vi->sq = kcalloc(vi->max_queue_pairs, sizeof(*vi->sq), GFP_KERNEL); in virtnet_alloc_queues()
4391 if (!vi->sq) in virtnet_alloc_queues()
4393 vi->rq = kcalloc(vi->max_queue_pairs, sizeof(*vi->rq), GFP_KERNEL); in virtnet_alloc_queues()
4394 if (!vi->rq) in virtnet_alloc_queues()
4397 INIT_DELAYED_WORK(&vi->refill, refill_work); in virtnet_alloc_queues()
4398 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_alloc_queues()
4399 vi->rq[i].pages = NULL; in virtnet_alloc_queues()
4400 netif_napi_add_weight(vi->dev, &vi->rq[i].napi, virtnet_poll, in virtnet_alloc_queues()
4402 netif_napi_add_tx_weight(vi->dev, &vi->sq[i].napi, in virtnet_alloc_queues()
4406 INIT_WORK(&vi->rq[i].dim.work, virtnet_rx_dim_work); in virtnet_alloc_queues()
4407 vi->rq[i].dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; in virtnet_alloc_queues()
4409 sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg)); in virtnet_alloc_queues()
4410 ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len); in virtnet_alloc_queues()
4411 sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg)); in virtnet_alloc_queues()
4413 u64_stats_init(&vi->rq[i].stats.syncp); in virtnet_alloc_queues()
4414 u64_stats_init(&vi->sq[i].stats.syncp); in virtnet_alloc_queues()
4420 kfree(vi->sq); in virtnet_alloc_queues()
4422 kfree(vi->ctrl); in virtnet_alloc_queues()
4427 static int init_vqs(struct virtnet_info *vi) in init_vqs() argument
4432 ret = virtnet_alloc_queues(vi); in init_vqs()
4436 ret = virtnet_find_vqs(vi); in init_vqs()
4440 virtnet_rq_set_premapped(vi); in init_vqs()
4443 virtnet_set_affinity(vi); in init_vqs()
4449 virtnet_free_queues(vi); in init_vqs()
4458 struct virtnet_info *vi = netdev_priv(queue->dev); in mergeable_rx_buffer_size_show() local
4460 unsigned int headroom = virtnet_get_headroom(vi); in mergeable_rx_buffer_size_show()
4464 BUG_ON(queue_index >= vi->max_queue_pairs); in mergeable_rx_buffer_size_show()
4465 avg = &vi->rq[queue_index].mrg_avg_pkt_len; in mergeable_rx_buffer_size_show()
4467 get_mergeable_buf_len(&vi->rq[queue_index], avg, in mergeable_rx_buffer_size_show()
4558 static bool virtnet_check_guest_gso(const struct virtnet_info *vi) in virtnet_check_guest_gso() argument
4560 return virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || in virtnet_check_guest_gso()
4561 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || in virtnet_check_guest_gso()
4562 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || in virtnet_check_guest_gso()
4563 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) || in virtnet_check_guest_gso()
4564 (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO4) && in virtnet_check_guest_gso()
4565 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO6)); in virtnet_check_guest_gso()
4568 static void virtnet_set_big_packets(struct virtnet_info *vi, const int mtu) in virtnet_set_big_packets() argument
4570 bool guest_gso = virtnet_check_guest_gso(vi); in virtnet_set_big_packets()
4577 vi->big_packets = true; in virtnet_set_big_packets()
4578 vi->big_packets_num_skbfrags = guest_gso ? MAX_SKB_FRAGS : DIV_ROUND_UP(mtu, PAGE_SIZE); in virtnet_set_big_packets()
4586 struct virtnet_info *vi; in virtnet_probe() local
4673 vi = netdev_priv(dev); in virtnet_probe()
4674 vi->dev = dev; in virtnet_probe()
4675 vi->vdev = vdev; in virtnet_probe()
4676 vdev->priv = vi; in virtnet_probe()
4678 INIT_WORK(&vi->config_work, virtnet_config_changed_work); in virtnet_probe()
4679 spin_lock_init(&vi->refill_lock); in virtnet_probe()
4682 vi->mergeable_rx_bufs = true; in virtnet_probe()
4687 vi->has_rss_hash_report = true; in virtnet_probe()
4690 vi->has_rss = true; in virtnet_probe()
4692 if (vi->has_rss || vi->has_rss_hash_report) { in virtnet_probe()
4693 vi->rss_indir_table_size = in virtnet_probe()
4696 vi->rss_key_size = in virtnet_probe()
4699 vi->rss_hash_types_supported = in virtnet_probe()
4701 vi->rss_hash_types_supported &= in virtnet_probe()
4709 if (vi->has_rss_hash_report) in virtnet_probe()
4710 vi->hdr_len = sizeof(struct virtio_net_hdr_v1_hash); in virtnet_probe()
4713 vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); in virtnet_probe()
4715 vi->hdr_len = sizeof(struct virtio_net_hdr); in virtnet_probe()
4719 vi->any_header_sg = true; in virtnet_probe()
4722 vi->has_cvq = true; in virtnet_probe()
4743 virtnet_set_big_packets(vi, mtu); in virtnet_probe()
4745 if (vi->any_header_sg) in virtnet_probe()
4746 dev->needed_headroom = vi->hdr_len; in virtnet_probe()
4750 vi->curr_queue_pairs = max_queue_pairs; in virtnet_probe()
4752 vi->curr_queue_pairs = num_online_cpus(); in virtnet_probe()
4753 vi->max_queue_pairs = max_queue_pairs; in virtnet_probe()
4756 err = init_vqs(vi); in virtnet_probe()
4760 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) { in virtnet_probe()
4761 vi->intr_coal_rx.max_usecs = 0; in virtnet_probe()
4762 vi->intr_coal_tx.max_usecs = 0; in virtnet_probe()
4763 vi->intr_coal_rx.max_packets = 0; in virtnet_probe()
4768 if (vi->sq[0].napi.weight) in virtnet_probe()
4769 vi->intr_coal_tx.max_packets = 1; in virtnet_probe()
4771 vi->intr_coal_tx.max_packets = 0; in virtnet_probe()
4774 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) { in virtnet_probe()
4776 for (i = 0; i < vi->max_queue_pairs; i++) in virtnet_probe()
4777 if (vi->sq[i].napi.weight) in virtnet_probe()
4778 vi->sq[i].intr_coal.max_packets = 1; in virtnet_probe()
4782 if (vi->mergeable_rx_bufs) in virtnet_probe()
4785 netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs); in virtnet_probe()
4786 netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs); in virtnet_probe()
4791 vi->failover = net_failover_create(vi->dev); in virtnet_probe()
4792 if (IS_ERR(vi->failover)) { in virtnet_probe()
4793 err = PTR_ERR(vi->failover); in virtnet_probe()
4798 if (vi->has_rss || vi->has_rss_hash_report) in virtnet_probe()
4799 virtnet_init_default_rss(vi); in virtnet_probe()
4813 _virtnet_set_queues(vi, vi->curr_queue_pairs); in virtnet_probe()
4820 virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) { in virtnet_probe()
4824 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, in virtnet_probe()
4835 err = virtnet_cpu_notif_add(vi); in virtnet_probe()
4844 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { in virtnet_probe()
4845 schedule_work(&vi->config_work); in virtnet_probe()
4847 vi->status = VIRTIO_NET_S_LINK_UP; in virtnet_probe()
4848 virtnet_update_settings(vi); in virtnet_probe()
4853 if (virtio_has_feature(vi->vdev, guest_offloads[i])) in virtnet_probe()
4854 set_bit(guest_offloads[i], &vi->guest_offloads); in virtnet_probe()
4855 vi->guest_offloads_capable = vi->guest_offloads; in virtnet_probe()
4865 net_failover_destroy(vi->failover); in virtnet_probe()
4868 cancel_delayed_work_sync(&vi->refill); in virtnet_probe()
4869 free_receive_page_frags(vi); in virtnet_probe()
4870 virtnet_del_vqs(vi); in virtnet_probe()
4876 static void remove_vq_common(struct virtnet_info *vi) in remove_vq_common() argument
4878 virtio_reset_device(vi->vdev); in remove_vq_common()
4881 free_unused_bufs(vi); in remove_vq_common()
4883 free_receive_bufs(vi); in remove_vq_common()
4885 free_receive_page_frags(vi); in remove_vq_common()
4887 virtnet_del_vqs(vi); in remove_vq_common()
4892 struct virtnet_info *vi = vdev->priv; in virtnet_remove() local
4894 virtnet_cpu_notif_remove(vi); in virtnet_remove()
4897 flush_work(&vi->config_work); in virtnet_remove()
4899 unregister_netdev(vi->dev); in virtnet_remove()
4901 net_failover_destroy(vi->failover); in virtnet_remove()
4903 remove_vq_common(vi); in virtnet_remove()
4905 free_netdev(vi->dev); in virtnet_remove()
4910 struct virtnet_info *vi = vdev->priv; in virtnet_freeze() local
4912 virtnet_cpu_notif_remove(vi); in virtnet_freeze()
4914 remove_vq_common(vi); in virtnet_freeze()
4921 struct virtnet_info *vi = vdev->priv; in virtnet_restore() local
4927 virtnet_set_queues(vi, vi->curr_queue_pairs); in virtnet_restore()
4929 err = virtnet_cpu_notif_add(vi); in virtnet_restore()
4932 remove_vq_common(vi); in virtnet_restore()