Lines Matching +full:xdp +full:- +full:rx +full:- +full:metadata

1 // SPDX-License-Identifier: GPL-2.0-only
21 #include <net/xdp.h>
43 /* xdp */
122 struct xdp_buff xdp; member
129 cmd->base.speed = SPEED_10000; in veth_get_link_ksettings()
130 cmd->base.duplex = DUPLEX_FULL; in veth_get_link_ksettings()
131 cmd->base.port = PORT_TP; in veth_get_link_ksettings()
132 cmd->base.autoneg = AUTONEG_DISABLE; in veth_get_link_ksettings()
138 strscpy(info->driver, DRV_NAME, sizeof(info->driver)); in veth_get_drvinfo()
139 strscpy(info->version, DRV_VERSION, sizeof(info->version)); in veth_get_drvinfo()
151 for (i = 0; i < dev->real_num_rx_queues; i++) in veth_get_strings()
156 for (i = 0; i < dev->real_num_tx_queues; i++) in veth_get_strings()
171 VETH_RQ_STATS_LEN * dev->real_num_rx_queues + in veth_get_sset_count()
172 VETH_TQ_STATS_LEN * dev->real_num_tx_queues + in veth_get_sset_count()
175 return -EOPNOTSUPP; in veth_get_sset_count()
186 for (i = 0; i < dev->real_num_rx_queues; i++) { in veth_get_page_pool_stats()
187 if (!priv->rq[i].page_pool) in veth_get_page_pool_stats()
189 page_pool_get_stats(priv->rq[i].page_pool, &pp_stats); in veth_get_page_pool_stats()
199 struct net_device *peer = rtnl_dereference(priv->peer); in veth_get_ethtool_stats()
202 data[0] = peer ? peer->ifindex : 0; in veth_get_ethtool_stats()
204 for (i = 0; i < dev->real_num_rx_queues; i++) { in veth_get_ethtool_stats()
205 const struct veth_rq_stats *rq_stats = &priv->rq[i].stats; in veth_get_ethtool_stats()
206 const void *stats_base = (void *)&rq_stats->vs; in veth_get_ethtool_stats()
211 start = u64_stats_fetch_begin(&rq_stats->syncp); in veth_get_ethtool_stats()
216 } while (u64_stats_fetch_retry(&rq_stats->syncp, start)); in veth_get_ethtool_stats()
225 for (i = 0; i < peer->real_num_rx_queues; i++) { in veth_get_ethtool_stats()
226 const struct veth_rq_stats *rq_stats = &rcv_priv->rq[i].stats; in veth_get_ethtool_stats()
227 const void *base = (void *)&rq_stats->vs; in veth_get_ethtool_stats()
231 tx_idx += (i % dev->real_num_tx_queues) * VETH_TQ_STATS_LEN; in veth_get_ethtool_stats()
233 start = u64_stats_fetch_begin(&rq_stats->syncp); in veth_get_ethtool_stats()
238 } while (u64_stats_fetch_retry(&rq_stats->syncp, start)); in veth_get_ethtool_stats()
240 pp_idx = idx + dev->real_num_tx_queues * VETH_TQ_STATS_LEN; in veth_get_ethtool_stats()
249 channels->tx_count = dev->real_num_tx_queues; in veth_get_channels()
250 channels->rx_count = dev->real_num_rx_queues; in veth_get_channels()
251 channels->max_tx = dev->num_tx_queues; in veth_get_channels()
252 channels->max_rx = dev->num_rx_queues; in veth_get_channels()
282 static void *veth_xdp_to_ptr(struct xdp_frame *xdp) in veth_xdp_to_ptr() argument
284 return (void *)((unsigned long)xdp | VETH_XDP_FLAG); in veth_xdp_to_ptr()
299 if (!READ_ONCE(rq->rx_notify_masked) && in __veth_xdp_flush()
300 napi_schedule_prep(&rq->xdp_napi)) { in __veth_xdp_flush()
301 WRITE_ONCE(rq->rx_notify_masked, true); in __veth_xdp_flush()
302 __napi_schedule(&rq->xdp_napi); in __veth_xdp_flush()
308 if (unlikely(ptr_ring_produce(&rq->xdp_ring, skb))) { in veth_xdp_rx()
317 struct veth_rq *rq, bool xdp) in veth_forward_skb() argument
319 return __dev_forward_skb(dev, skb) ?: xdp ? in veth_forward_skb()
327 * When XDP is enabled, all traffic is considered eligible, as the xmit
331 * - the sock_wfree destructor is used by UDP, ICMP and XDP sockets -
338 return !(dev->features & NETIF_F_ALL_TSO) || in veth_skb_is_eligible_for_gro()
339 (skb->destructor == sock_wfree && in veth_skb_is_eligible_for_gro()
340 rcv->features & (NETIF_F_GRO_FRAGLIST | NETIF_F_GRO_UDP_FWD)); in veth_skb_is_eligible_for_gro()
349 int length = skb->len; in veth_xmit()
354 rcv = rcu_dereference(priv->peer); in veth_xmit()
362 if (rxq < rcv->real_num_rx_queues) { in veth_xmit()
363 rq = &rcv_priv->rq[rxq]; in veth_xmit()
365 /* The napi pointer is available when an XDP program is in veth_xmit()
369 use_napi = rcu_access_pointer(rq->napi) && in veth_xmit()
381 atomic64_inc(&priv->dropped); in veth_xmit()
395 result->peer_tq_xdp_xmit_err = 0; in veth_stats_rx()
396 result->xdp_packets = 0; in veth_stats_rx()
397 result->xdp_tx_err = 0; in veth_stats_rx()
398 result->xdp_bytes = 0; in veth_stats_rx()
399 result->rx_drops = 0; in veth_stats_rx()
400 for (i = 0; i < dev->num_rx_queues; i++) { in veth_stats_rx()
402 struct veth_rq_stats *stats = &priv->rq[i].stats; in veth_stats_rx()
406 start = u64_stats_fetch_begin(&stats->syncp); in veth_stats_rx()
407 peer_tq_xdp_xmit_err = stats->vs.peer_tq_xdp_xmit_err; in veth_stats_rx()
408 xdp_tx_err = stats->vs.xdp_tx_err; in veth_stats_rx()
409 packets = stats->vs.xdp_packets; in veth_stats_rx()
410 bytes = stats->vs.xdp_bytes; in veth_stats_rx()
411 drops = stats->vs.rx_drops; in veth_stats_rx()
412 } while (u64_stats_fetch_retry(&stats->syncp, start)); in veth_stats_rx()
413 result->peer_tq_xdp_xmit_err += peer_tq_xdp_xmit_err; in veth_stats_rx()
414 result->xdp_tx_err += xdp_tx_err; in veth_stats_rx()
415 result->xdp_packets += packets; in veth_stats_rx()
416 result->xdp_bytes += bytes; in veth_stats_rx()
417 result->rx_drops += drops; in veth_stats_rx()
426 struct veth_stats rx; in veth_get_stats64() local
428 tot->tx_dropped = atomic64_read(&priv->dropped); in veth_get_stats64()
429 dev_fetch_sw_netstats(tot, dev->tstats); in veth_get_stats64()
431 veth_stats_rx(&rx, dev); in veth_get_stats64()
432 tot->tx_dropped += rx.xdp_tx_err; in veth_get_stats64()
433 tot->rx_dropped = rx.rx_drops + rx.peer_tq_xdp_xmit_err; in veth_get_stats64()
434 tot->rx_bytes += rx.xdp_bytes; in veth_get_stats64()
435 tot->rx_packets += rx.xdp_packets; in veth_get_stats64()
438 peer = rcu_dereference(priv->peer); in veth_get_stats64()
442 dev_fetch_sw_netstats(&tot_peer, peer->tstats); in veth_get_stats64()
443 tot->rx_bytes += tot_peer.tx_bytes; in veth_get_stats64()
444 tot->rx_packets += tot_peer.tx_packets; in veth_get_stats64()
446 veth_stats_rx(&rx, peer); in veth_get_stats64()
447 tot->tx_dropped += rx.peer_tq_xdp_xmit_err; in veth_get_stats64()
448 tot->rx_dropped += rx.xdp_tx_err; in veth_get_stats64()
449 tot->tx_bytes += rx.xdp_bytes; in veth_get_stats64()
450 tot->tx_packets += rx.xdp_packets; in veth_get_stats64()
462 return smp_processor_id() % dev->real_num_rx_queues; in veth_select_rxq()
470 return rcu_dereference(priv->peer); in veth_peer_dev()
478 int i, ret = -ENXIO, nxmit = 0; in veth_xdp_xmit()
484 return -EINVAL; in veth_xdp_xmit()
487 rcv = rcu_dereference(priv->peer); in veth_xdp_xmit()
492 rq = &rcv_priv->rq[veth_select_rxq(rcv)]; in veth_xdp_xmit()
496 if (!rcu_access_pointer(rq->napi)) in veth_xdp_xmit()
499 max_len = rcv->mtu + rcv->hard_header_len + VLAN_HLEN; in veth_xdp_xmit()
501 spin_lock(&rq->xdp_ring.producer_lock); in veth_xdp_xmit()
507 __ptr_ring_produce(&rq->xdp_ring, ptr))) in veth_xdp_xmit()
511 spin_unlock(&rq->xdp_ring.producer_lock); in veth_xdp_xmit()
518 u64_stats_update_begin(&rq->stats.syncp); in veth_xdp_xmit()
519 rq->stats.vs.peer_tq_xdp_xmit += nxmit; in veth_xdp_xmit()
520 rq->stats.vs.peer_tq_xdp_xmit_err += n - nxmit; in veth_xdp_xmit()
521 u64_stats_update_end(&rq->stats.syncp); in veth_xdp_xmit()
539 atomic64_add(n, &priv->dropped); in veth_ndo_xdp_xmit()
549 sent = veth_xdp_xmit(rq->dev, bq->count, bq->q, 0, false); in veth_xdp_flush_bq()
555 for (i = sent; unlikely(i < bq->count); i++) in veth_xdp_flush_bq()
556 xdp_return_frame(bq->q[i]); in veth_xdp_flush_bq()
558 drops = bq->count - sent; in veth_xdp_flush_bq()
559 trace_xdp_bulk_tx(rq->dev, sent, drops, err); in veth_xdp_flush_bq()
561 u64_stats_update_begin(&rq->stats.syncp); in veth_xdp_flush_bq()
562 rq->stats.vs.xdp_tx += sent; in veth_xdp_flush_bq()
563 rq->stats.vs.xdp_tx_err += drops; in veth_xdp_flush_bq()
564 u64_stats_update_end(&rq->stats.syncp); in veth_xdp_flush_bq()
566 bq->count = 0; in veth_xdp_flush_bq()
571 struct veth_priv *rcv_priv, *priv = netdev_priv(rq->dev); in veth_xdp_flush()
577 rcv = rcu_dereference(priv->peer); in veth_xdp_flush()
582 rcv_rq = &rcv_priv->rq[veth_select_rxq(rcv)]; in veth_xdp_flush()
584 if (unlikely(!rcu_access_pointer(rcv_rq->xdp_prog))) in veth_xdp_flush()
592 static int veth_xdp_tx(struct veth_rq *rq, struct xdp_buff *xdp, in veth_xdp_tx() argument
595 struct xdp_frame *frame = xdp_convert_buff_to_frame(xdp); in veth_xdp_tx()
598 return -EOVERFLOW; in veth_xdp_tx()
600 if (unlikely(bq->count == VETH_XDP_TX_BULK_SIZE)) in veth_xdp_tx()
603 bq->q[bq->count++] = frame; in veth_xdp_tx()
617 xdp_prog = rcu_dereference(rq->xdp_prog); in veth_xdp_rcv_one()
620 struct xdp_buff *xdp = &vxbuf.xdp; in veth_xdp_rcv_one() local
623 xdp_convert_frame_to_buff(frame, xdp); in veth_xdp_rcv_one()
624 xdp->rxq = &rq->xdp_rxq; in veth_xdp_rcv_one()
627 act = bpf_prog_run_xdp(xdp_prog, xdp); in veth_xdp_rcv_one()
631 if (xdp_update_frame_from_buff(xdp, frame)) in veth_xdp_rcv_one()
636 xdp->rxq->mem = frame->mem; in veth_xdp_rcv_one()
637 if (unlikely(veth_xdp_tx(rq, xdp, bq) < 0)) { in veth_xdp_rcv_one()
638 trace_xdp_exception(rq->dev, xdp_prog, act); in veth_xdp_rcv_one()
640 stats->rx_drops++; in veth_xdp_rcv_one()
643 stats->xdp_tx++; in veth_xdp_rcv_one()
648 xdp->rxq->mem = frame->mem; in veth_xdp_rcv_one()
649 if (xdp_do_redirect(rq->dev, xdp, xdp_prog)) { in veth_xdp_rcv_one()
651 stats->rx_drops++; in veth_xdp_rcv_one()
654 stats->xdp_redirect++; in veth_xdp_rcv_one()
658 bpf_warn_invalid_xdp_action(rq->dev, xdp_prog, act); in veth_xdp_rcv_one()
661 trace_xdp_exception(rq->dev, xdp_prog, act); in veth_xdp_rcv_one()
664 stats->xdp_drops++; in veth_xdp_rcv_one()
690 stats->rx_drops += n_xdpf; in veth_xdp_rcv_bulk_skb()
699 rq->dev); in veth_xdp_rcv_bulk_skb()
702 stats->rx_drops++; in veth_xdp_rcv_bulk_skb()
705 napi_gro_receive(&rq->xdp_napi, skb); in veth_xdp_rcv_bulk_skb()
709 static void veth_xdp_get(struct xdp_buff *xdp) in veth_xdp_get() argument
711 struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); in veth_xdp_get()
714 get_page(virt_to_page(xdp->data)); in veth_xdp_get()
715 if (likely(!xdp_buff_has_frags(xdp))) in veth_xdp_get()
718 for (i = 0; i < sinfo->nr_frags; i++) in veth_xdp_get()
719 __skb_frag_ref(&sinfo->frags[i]); in veth_xdp_get()
723 struct xdp_buff *xdp, in veth_convert_skb_to_xdp_buff() argument
730 skb_shinfo(skb)->nr_frags || in veth_convert_skb_to_xdp_buff()
740 * into order-0 pages without linearize it. in veth_convert_skb_to_xdp_buff()
744 max_head_size = SKB_WITH_OVERHEAD(PAGE_SIZE - in veth_convert_skb_to_xdp_buff()
746 if (skb->len > PAGE_SIZE * MAX_SKB_FRAGS + max_head_size) in veth_convert_skb_to_xdp_buff()
749 size = min_t(u32, skb->len, max_head_size); in veth_convert_skb_to_xdp_buff()
753 va = page_pool_dev_alloc_va(rq->page_pool, &truesize); in veth_convert_skb_to_xdp_buff()
759 page_pool_free_va(rq->page_pool, va, true); in veth_convert_skb_to_xdp_buff()
767 if (skb_copy_bits(skb, 0, nskb->data, size)) { in veth_convert_skb_to_xdp_buff()
773 head_off = skb_headroom(nskb) - skb_headroom(skb); in veth_convert_skb_to_xdp_buff()
778 len = skb->len - off; in veth_convert_skb_to_xdp_buff()
780 for (i = 0; i < MAX_SKB_FRAGS && off < skb->len; i++) { in veth_convert_skb_to_xdp_buff()
784 page = page_pool_dev_alloc(rq->page_pool, &page_offset, in veth_convert_skb_to_xdp_buff()
800 len -= size; in veth_convert_skb_to_xdp_buff()
809 frame_sz = skb_end_pointer(skb) - skb->head; in veth_convert_skb_to_xdp_buff()
811 xdp_init_buff(xdp, frame_sz, &rq->xdp_rxq); in veth_convert_skb_to_xdp_buff()
812 xdp_prepare_buff(xdp, skb->head, skb_headroom(skb), in veth_convert_skb_to_xdp_buff()
816 skb_shinfo(skb)->xdp_frags_size = skb->data_len; in veth_convert_skb_to_xdp_buff()
817 xdp_buff_set_frags_flag(xdp); in veth_convert_skb_to_xdp_buff()
819 xdp_buff_clear_frags_flag(xdp); in veth_convert_skb_to_xdp_buff()
828 return -ENOMEM; in veth_convert_skb_to_xdp_buff()
839 struct xdp_buff *xdp = &vxbuf.xdp; in veth_xdp_rcv_skb() local
846 xdp_prog = rcu_dereference(rq->xdp_prog); in veth_xdp_rcv_skb()
852 __skb_push(skb, skb->data - skb_mac_header(skb)); in veth_xdp_rcv_skb()
853 if (veth_convert_skb_to_xdp_buff(rq, xdp, &skb)) in veth_xdp_rcv_skb()
857 orig_data = xdp->data; in veth_xdp_rcv_skb()
858 orig_data_end = xdp->data_end; in veth_xdp_rcv_skb()
860 act = bpf_prog_run_xdp(xdp_prog, xdp); in veth_xdp_rcv_skb()
866 veth_xdp_get(xdp); in veth_xdp_rcv_skb()
868 xdp->rxq->mem = rq->xdp_mem; in veth_xdp_rcv_skb()
869 if (unlikely(veth_xdp_tx(rq, xdp, bq) < 0)) { in veth_xdp_rcv_skb()
870 trace_xdp_exception(rq->dev, xdp_prog, act); in veth_xdp_rcv_skb()
871 stats->rx_drops++; in veth_xdp_rcv_skb()
874 stats->xdp_tx++; in veth_xdp_rcv_skb()
878 veth_xdp_get(xdp); in veth_xdp_rcv_skb()
880 xdp->rxq->mem = rq->xdp_mem; in veth_xdp_rcv_skb()
881 if (xdp_do_redirect(rq->dev, xdp, xdp_prog)) { in veth_xdp_rcv_skb()
882 stats->rx_drops++; in veth_xdp_rcv_skb()
885 stats->xdp_redirect++; in veth_xdp_rcv_skb()
889 bpf_warn_invalid_xdp_action(rq->dev, xdp_prog, act); in veth_xdp_rcv_skb()
892 trace_xdp_exception(rq->dev, xdp_prog, act); in veth_xdp_rcv_skb()
895 stats->xdp_drops++; in veth_xdp_rcv_skb()
901 off = orig_data - xdp->data; in veth_xdp_rcv_skb()
905 __skb_pull(skb, -off); in veth_xdp_rcv_skb()
910 off = xdp->data_end - orig_data_end; in veth_xdp_rcv_skb()
914 /* XDP frag metadata (e.g. nr_frags) are updated in eBPF helpers in veth_xdp_rcv_skb()
917 if (xdp_buff_has_frags(xdp)) in veth_xdp_rcv_skb()
918 skb->data_len = skb_shinfo(skb)->xdp_frags_size; in veth_xdp_rcv_skb()
920 skb->data_len = 0; in veth_xdp_rcv_skb()
922 skb->protocol = eth_type_trans(skb, rq->dev); in veth_xdp_rcv_skb()
924 metalen = xdp->data - xdp->data_meta; in veth_xdp_rcv_skb()
930 stats->rx_drops++; in veth_xdp_rcv_skb()
937 xdp_return_buff(xdp); in veth_xdp_rcv_skb()
950 void *ptr = __ptr_ring_consume(&rq->xdp_ring); in veth_xdp_rcv()
959 stats->xdp_bytes += xdp_get_frame_len(frame); in veth_xdp_rcv()
974 stats->xdp_bytes += skb->len; in veth_xdp_rcv()
980 napi_gro_receive(&rq->xdp_napi, skb); in veth_xdp_rcv()
989 u64_stats_update_begin(&rq->stats.syncp); in veth_xdp_rcv()
990 rq->stats.vs.xdp_redirect += stats->xdp_redirect; in veth_xdp_rcv()
991 rq->stats.vs.xdp_bytes += stats->xdp_bytes; in veth_xdp_rcv()
992 rq->stats.vs.xdp_drops += stats->xdp_drops; in veth_xdp_rcv()
993 rq->stats.vs.rx_drops += stats->rx_drops; in veth_xdp_rcv()
994 rq->stats.vs.xdp_packets += done; in veth_xdp_rcv()
995 u64_stats_update_end(&rq->stats.syncp); in veth_xdp_rcv()
1018 smp_store_mb(rq->rx_notify_masked, false); in veth_poll()
1019 if (unlikely(!__ptr_ring_empty(&rq->xdp_ring))) { in veth_poll()
1020 if (napi_schedule_prep(&rq->xdp_napi)) { in veth_poll()
1021 WRITE_ONCE(rq->rx_notify_masked, true); in veth_poll()
1022 __napi_schedule(&rq->xdp_napi); in veth_poll()
1040 .dev = &rq->dev->dev, in veth_create_page_pool()
1043 rq->page_pool = page_pool_create(&pp_params); in veth_create_page_pool()
1044 if (IS_ERR(rq->page_pool)) { in veth_create_page_pool()
1045 int err = PTR_ERR(rq->page_pool); in veth_create_page_pool()
1047 rq->page_pool = NULL; in veth_create_page_pool()
1060 err = veth_create_page_pool(&priv->rq[i]); in __veth_napi_enable_range()
1066 struct veth_rq *rq = &priv->rq[i]; in __veth_napi_enable_range()
1068 err = ptr_ring_init(&rq->xdp_ring, VETH_RING_SIZE, GFP_KERNEL); in __veth_napi_enable_range()
1074 struct veth_rq *rq = &priv->rq[i]; in __veth_napi_enable_range()
1076 napi_enable(&rq->xdp_napi); in __veth_napi_enable_range()
1077 rcu_assign_pointer(priv->rq[i].napi, &priv->rq[i].xdp_napi); in __veth_napi_enable_range()
1083 for (i--; i >= start; i--) in __veth_napi_enable_range()
1084 ptr_ring_cleanup(&priv->rq[i].xdp_ring, veth_ptr_free); in __veth_napi_enable_range()
1087 for (i--; i >= start; i--) { in __veth_napi_enable_range()
1088 page_pool_destroy(priv->rq[i].page_pool); in __veth_napi_enable_range()
1089 priv->rq[i].page_pool = NULL; in __veth_napi_enable_range()
1097 return __veth_napi_enable_range(dev, 0, dev->real_num_rx_queues); in __veth_napi_enable()
1106 struct veth_rq *rq = &priv->rq[i]; in veth_napi_del_range()
1108 rcu_assign_pointer(priv->rq[i].napi, NULL); in veth_napi_del_range()
1109 napi_disable(&rq->xdp_napi); in veth_napi_del_range()
1110 __netif_napi_del(&rq->xdp_napi); in veth_napi_del_range()
1115 struct veth_rq *rq = &priv->rq[i]; in veth_napi_del_range()
1117 rq->rx_notify_masked = false; in veth_napi_del_range()
1118 ptr_ring_cleanup(&rq->xdp_ring, veth_ptr_free); in veth_napi_del_range()
1122 page_pool_destroy(priv->rq[i].page_pool); in veth_napi_del_range()
1123 priv->rq[i].page_pool = NULL; in veth_napi_del_range()
1129 veth_napi_del_range(dev, 0, dev->real_num_rx_queues); in veth_napi_del()
1134 return !!(dev->wanted_features & NETIF_F_GRO); in veth_gro_requested()
1144 struct veth_rq *rq = &priv->rq[i]; in veth_enable_xdp_range()
1147 netif_napi_add(dev, &rq->xdp_napi, veth_poll); in veth_enable_xdp_range()
1148 err = xdp_rxq_info_reg(&rq->xdp_rxq, dev, i, rq->xdp_napi.napi_id); in veth_enable_xdp_range()
1152 err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq, in veth_enable_xdp_range()
1159 rq->xdp_mem = rq->xdp_rxq.mem; in veth_enable_xdp_range()
1164 xdp_rxq_info_unreg(&priv->rq[i].xdp_rxq); in veth_enable_xdp_range()
1166 for (i--; i >= start; i--) { in veth_enable_xdp_range()
1167 struct veth_rq *rq = &priv->rq[i]; in veth_enable_xdp_range()
1169 xdp_rxq_info_unreg(&rq->xdp_rxq); in veth_enable_xdp_range()
1171 netif_napi_del(&rq->xdp_napi); in veth_enable_xdp_range()
1184 struct veth_rq *rq = &priv->rq[i]; in veth_disable_xdp_range()
1186 rq->xdp_rxq.mem = rq->xdp_mem; in veth_disable_xdp_range()
1187 xdp_rxq_info_unreg(&rq->xdp_rxq); in veth_disable_xdp_range()
1190 netif_napi_del(&rq->xdp_napi); in veth_disable_xdp_range()
1196 bool napi_already_on = veth_gro_requested(dev) && (dev->flags & IFF_UP); in veth_enable_xdp()
1200 if (!xdp_rxq_info_is_reg(&priv->rq[0].xdp_rxq)) { in veth_enable_xdp()
1201 err = veth_enable_xdp_range(dev, 0, dev->real_num_rx_queues, napi_already_on); in veth_enable_xdp()
1208 veth_disable_xdp_range(dev, 0, dev->real_num_rx_queues, true); in veth_enable_xdp()
1214 for (i = 0; i < dev->real_num_rx_queues; i++) { in veth_enable_xdp()
1215 rcu_assign_pointer(priv->rq[i].xdp_prog, priv->_xdp_prog); in veth_enable_xdp()
1216 rcu_assign_pointer(priv->rq[i].napi, &priv->rq[i].xdp_napi); in veth_enable_xdp()
1227 for (i = 0; i < dev->real_num_rx_queues; i++) in veth_disable_xdp()
1228 rcu_assign_pointer(priv->rq[i].xdp_prog, NULL); in veth_disable_xdp()
1233 veth_disable_xdp_range(dev, 0, dev->real_num_rx_queues, false); in veth_disable_xdp()
1242 struct veth_rq *rq = &priv->rq[i]; in veth_napi_enable_range()
1244 netif_napi_add(dev, &rq->xdp_napi, veth_poll); in veth_napi_enable_range()
1250 struct veth_rq *rq = &priv->rq[i]; in veth_napi_enable_range()
1252 netif_napi_del(&rq->xdp_napi); in veth_napi_enable_range()
1261 return veth_napi_enable_range(dev, 0, dev->real_num_rx_queues); in veth_napi_enable()
1271 if (priv->_xdp_prog) { in veth_disable_range_safe()
1287 if (priv->_xdp_prog) { in veth_enable_range_safe()
1312 peer = rtnl_dereference(priv->peer); in veth_set_xdp_features()
1313 if (peer && peer->real_num_tx_queues <= dev->real_num_rx_queues) { in veth_set_xdp_features()
1319 if (priv_peer->_xdp_prog || veth_gro_requested(peer)) in veth_set_xdp_features()
1338 if (!ch->rx_count || !ch->tx_count) in veth_set_channels()
1339 return -EINVAL; in veth_set_channels()
1341 /* avoid braking XDP, if that is enabled */ in veth_set_channels()
1342 peer = rtnl_dereference(priv->peer); in veth_set_channels()
1344 if (priv->_xdp_prog && peer && ch->rx_count < peer->real_num_tx_queues) in veth_set_channels()
1345 return -EINVAL; in veth_set_channels()
1347 if (peer && peer_priv && peer_priv->_xdp_prog && ch->tx_count > peer->real_num_rx_queues) in veth_set_channels()
1348 return -EINVAL; in veth_set_channels()
1350 old_rx_count = dev->real_num_rx_queues; in veth_set_channels()
1351 new_rx_count = ch->rx_count; in veth_set_channels()
1364 err = netif_set_real_num_rx_queues(dev, ch->rx_count); in veth_set_channels()
1368 err = netif_set_real_num_tx_queues(dev, ch->tx_count); in veth_set_channels()
1372 /* this error condition could happen only if rx and tx change in veth_set_channels()
1373 * in opposite directions (e.g. tx nr raises, rx nr decreases) in veth_set_channels()
1378 pr_warn("Can't restore rx queues config %d -> %d %d", in veth_set_channels()
1395 /* update XDP supported features */ in veth_set_channels()
1404 old_rx_count = ch->rx_count; in veth_set_channels()
1411 struct net_device *peer = rtnl_dereference(priv->peer); in veth_open()
1415 return -ENOTCONN; in veth_open()
1417 if (priv->_xdp_prog) { in veth_open()
1427 if (peer->flags & IFF_UP) { in veth_open()
1440 struct net_device *peer = rtnl_dereference(priv->peer); in veth_close()
1446 if (priv->_xdp_prog) in veth_close()
1464 priv->rq = kvcalloc(dev->num_rx_queues, sizeof(*priv->rq), in veth_alloc_queues()
1466 if (!priv->rq) in veth_alloc_queues()
1467 return -ENOMEM; in veth_alloc_queues()
1469 for (i = 0; i < dev->num_rx_queues; i++) { in veth_alloc_queues()
1470 priv->rq[i].dev = dev; in veth_alloc_queues()
1471 u64_stats_init(&priv->rq[i].stats.syncp); in veth_alloc_queues()
1481 kvfree(priv->rq); in veth_free_queues()
1516 peer = rcu_dereference(priv->peer); in veth_get_iflink()
1517 iflink = peer ? peer->ifindex : 0; in veth_get_iflink()
1529 peer = rtnl_dereference(priv->peer); in veth_fix_features()
1533 if (peer_priv->_xdp_prog) in veth_fix_features()
1536 if (priv->_xdp_prog) in veth_fix_features()
1545 netdev_features_t changed = features ^ dev->features; in veth_set_features()
1550 if (!(changed & NETIF_F_GRO) || !(dev->flags & IFF_UP) || priv->_xdp_prog) in veth_set_features()
1553 peer = rtnl_dereference(priv->peer); in veth_set_features()
1578 peer = rcu_dereference(priv->peer); in veth_set_rx_headroom()
1583 priv->requested_headroom = new_hr; in veth_set_rx_headroom()
1584 new_hr = max(priv->requested_headroom, peer_priv->requested_headroom); in veth_set_rx_headroom()
1585 dev->needed_headroom = new_hr; in veth_set_rx_headroom()
1586 peer->needed_headroom = new_hr; in veth_set_rx_headroom()
1601 old_prog = priv->_xdp_prog; in veth_xdp_set()
1602 priv->_xdp_prog = prog; in veth_xdp_set()
1603 peer = rtnl_dereference(priv->peer); in veth_xdp_set()
1607 NL_SET_ERR_MSG_MOD(extack, "Cannot set XDP when peer is detached"); in veth_xdp_set()
1608 err = -ENOTCONN; in veth_xdp_set()
1612 max_mtu = SKB_WITH_OVERHEAD(PAGE_SIZE - VETH_XDP_HEADROOM) - in veth_xdp_set()
1613 peer->hard_header_len; in veth_xdp_set()
1615 * XDP fragments. in veth_xdp_set()
1617 if (prog->aux->xdp_has_frags) in veth_xdp_set()
1620 if (peer->mtu > max_mtu) { in veth_xdp_set()
1621 NL_SET_ERR_MSG_MOD(extack, "Peer MTU is too large to set XDP"); in veth_xdp_set()
1622 err = -ERANGE; in veth_xdp_set()
1626 if (dev->real_num_rx_queues < peer->real_num_tx_queues) { in veth_xdp_set()
1627 NL_SET_ERR_MSG_MOD(extack, "XDP expects number of rx queues not less than peer tx queues"); in veth_xdp_set()
1628 err = -ENOSPC; in veth_xdp_set()
1632 if (dev->flags & IFF_UP) { in veth_xdp_set()
1635 NL_SET_ERR_MSG_MOD(extack, "Setup for XDP failed"); in veth_xdp_set()
1642 /* user-space did not require GRO, but adding in veth_xdp_set()
1643 * XDP is supposed to get GRO working in veth_xdp_set()
1645 dev->features |= NETIF_F_GRO; in veth_xdp_set()
1649 peer->hw_features &= ~NETIF_F_GSO_SOFTWARE; in veth_xdp_set()
1650 peer->max_mtu = max_mtu; in veth_xdp_set()
1661 if (dev->flags & IFF_UP) in veth_xdp_set()
1664 /* if user-space did not require GRO, since adding XDP in veth_xdp_set()
1668 dev->features &= ~NETIF_F_GRO; in veth_xdp_set()
1673 peer->hw_features |= NETIF_F_GSO_SOFTWARE; in veth_xdp_set()
1674 peer->max_mtu = ETH_MAX_MTU; in veth_xdp_set()
1685 priv->_xdp_prog = old_prog; in veth_xdp_set()
1690 static int veth_xdp(struct net_device *dev, struct netdev_bpf *xdp) in veth_xdp() argument
1692 switch (xdp->command) { in veth_xdp()
1694 return veth_xdp_set(dev, xdp->prog, xdp->extack); in veth_xdp()
1696 return -EINVAL; in veth_xdp()
1704 if (!_ctx->skb) in veth_xdp_rx_timestamp()
1705 return -ENODATA; in veth_xdp_rx_timestamp()
1707 *timestamp = skb_hwtstamps(_ctx->skb)->hwtstamp; in veth_xdp_rx_timestamp()
1715 struct sk_buff *skb = _ctx->skb; in veth_xdp_rx_hash()
1718 return -ENODATA; in veth_xdp_rx_hash()
1721 *rss_type = skb->l4_hash ? XDP_RSS_TYPE_L4_ANY : XDP_RSS_TYPE_NONE; in veth_xdp_rx_hash()
1730 const struct sk_buff *skb = _ctx->skb; in veth_xdp_rx_vlan_tag()
1734 return -ENODATA; in veth_xdp_rx_vlan_tag()
1740 *vlan_proto = skb->vlan_proto; in veth_xdp_rx_vlan_tag()
1781 dev->priv_flags &= ~IFF_TX_SKB_SHARING; in veth_setup()
1782 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; in veth_setup()
1783 dev->priv_flags |= IFF_NO_QUEUE; in veth_setup()
1784 dev->priv_flags |= IFF_PHONY_HEADROOM; in veth_setup()
1786 dev->netdev_ops = &veth_netdev_ops; in veth_setup()
1787 dev->xdp_metadata_ops = &veth_xdp_metadata_ops; in veth_setup()
1788 dev->ethtool_ops = &veth_ethtool_ops; in veth_setup()
1789 dev->features |= NETIF_F_LLTX; in veth_setup()
1790 dev->features |= VETH_FEATURES; in veth_setup()
1791 dev->vlan_features = dev->features & in veth_setup()
1796 dev->needs_free_netdev = true; in veth_setup()
1797 dev->priv_destructor = veth_dev_free; in veth_setup()
1798 dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS; in veth_setup()
1799 dev->max_mtu = ETH_MAX_MTU; in veth_setup()
1801 dev->hw_features = VETH_FEATURES; in veth_setup()
1802 dev->hw_enc_features = VETH_FEATURES; in veth_setup()
1803 dev->mpls_features = NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE; in veth_setup()
1816 return -EINVAL; in veth_validate()
1818 return -EADDRNOTAVAIL; in veth_validate()
1822 return -EINVAL; in veth_validate()
1831 dev->features &= ~NETIF_F_GRO; in veth_disable_gro()
1832 dev->wanted_features &= ~NETIF_F_GRO; in veth_disable_gro()
1840 if (!tb[IFLA_NUM_TX_QUEUES] && dev->num_tx_queues > 1) { in veth_init_queues()
1845 if (!tb[IFLA_NUM_RX_QUEUES] && dev->num_rx_queues > 1) { in veth_init_queues()
1910 if (ifmp && (dev->ifindex != 0)) in veth_newlink()
1911 peer->ifindex = ifmp->ifi_index; in veth_newlink()
1935 * should be re-allocated in veth_newlink()
1942 nla_strscpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ); in veth_newlink()
1944 snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d"); in veth_newlink()
1957 rcu_assign_pointer(priv->peer, peer); in veth_newlink()
1963 rcu_assign_pointer(priv->peer, dev); in veth_newlink()
1969 /* update XDP supported features */ in veth_newlink()
1994 peer = rtnl_dereference(priv->peer); in veth_dellink()
2000 RCU_INIT_POINTER(priv->peer, NULL); in veth_dellink()
2005 RCU_INIT_POINTER(priv->peer, NULL); in veth_dellink()
2017 struct net_device *peer = rtnl_dereference(priv->peer); in veth_get_link_net()