Lines Matching +full:xdp +full:- +full:rx +full:- +full:metadata

1 /* SPDX-License-Identifier: (GPL-2.0 OR MIT)
4 * Copyright (C) 2015-2021 Google, Inc.
10 #include <linux/dma-mapping.h>
15 #include <net/xdp.h>
31 /* 1 for management, 1 for rx, 1 for tx */
34 /* Numbers of gve tx/rx stats in stats report. */
41 /* Numbers of NIC tx/rx stats in stats report. */
47 #define GVE_DATA_SLOT_ADDR_PAGE_MASK (~(PAGE_SIZE - 1))
68 /* 2K buffers for DQO-QPL */
74 * allocs and uses a non-qpl page on the receive path of DQO QPL to free
87 /* The page info for a single slot in the RX data queue */
93 u16 pad; /* adjustment for rx padding */
118 /* RX buffer queue for posting buffers to HW.
119 * Each RX (completion) queue has a corresponding buffer queue.
129 /* RX completion queue to receive packets from HW. */
166 /* Linked list index to next element in the list, or -1 if none */
170 /* `head` and `tail` are indices into an array, or -1 if empty. */
196 /* Contains datapath state used to represent an RX queue. */
223 * buf_states, or -1 if empty.
228 * buf_states, or -1 if empty.
240 * buf_states, or -1 if empty.
258 u64 rbytes; /* free-running bytes received */
259 u64 rpackets; /* free-running packets received */
260 u32 cnt; /* free-running total number of completed packets */
261 u32 fill_cnt; /* free-running total number of descs and buffs posted */
263 u64 rx_copybreak_pkt; /* free-running count of copybreak packets */
264 u64 rx_copied_pkt; /* free-running total number of copied packets */
265 u64 rx_skb_alloc_fail; /* free-running count of skb alloc fails */
266 u64 rx_buf_alloc_fail; /* free-running count of buffer alloc fails */
267 u64 rx_desc_err_dropped_pkt; /* free-running count of packets dropped by descriptor error */
268 u64 rx_cont_packet_cnt; /* free-running multi-fragment packets received */
269 u64 rx_frag_flip_cnt; /* free-running count of rx segments where page_flip was used */
270 u64 rx_frag_copy_cnt; /* free-running count of rx segments copied */
271 u64 rx_frag_alloc_cnt; /* free-running count of rx page allocations */
284 /* XDP stuff */
288 struct page_frag_cache page_cache; /* Page cache to allocate XDP frames */
294 struct gve_tx_mtd_desc mtd; /* optional metadata descriptor */
314 u16 size; /* size of xmitted xdp pkt */
316 } xdp; member
326 /* A TX buffer - each queue has one */
350 * re-injection completion.
376 /* Linked list index to next element in the list, or -1 if none */
379 /* Linked list index to prev element in the list, or -1 if none.
391 * freed if the corresponding re-injection completion is not received
399 /* Cacheline 0 -- Accessed & dirtied during transmit */
411 * pending_packets, or -1 if empty.
437 * tx_qpl_buf_next, or -1 if empty.
457 /* Cacheline 1 -- Accessed & dirtied during gve_clean_tx_done */
463 /* Spinlock for XDP tx traffic */
475 * pending_packets, or -1 if empty.
501 * tx_qpl_buf_next, or -1 if empty.
517 u64 pkt_done; /* free-running - total packets completed */
518 u64 bytes_done; /* free-running - total bytes completed */
519 u64 dropped_pkt; /* free-running - total packets dropped */
522 /* Cacheline 2 -- Read-mostly fields */
564 /* Slow-path fields */
592 struct gve_rx_ring *rx; /* rx rings on this block */ member
640 struct gve_rx_ring *rx; /* array of rx_cfg.num_queues */ member
655 u16 rx_pages_per_qpl; /* Suggested number of pages per qpl for RX queues by NIC */
656 u16 rx_data_slot_cnt; /* rx buffer length */
659 struct bpf_prog *xdp_prog; /* XDP BPF program */
667 u32 num_ntfy_blks; /* spilt between TX and RX so must be even */
677 /* Admin queue - see gve_adminq.h*/
682 u32 adminq_prod_cnt; /* free-running count of AQ cmds executed */
683 u32 adminq_cmd_fail; /* free-running count of AQ cmds failed */
684 u32 adminq_timeouts; /* free-running count of AQ cmds timeouts */
685 /* free-running count of per AQ cmd executed */
707 u32 stats_report_trigger_cnt; /* count of device-requested stats-reports since last reset */
761 return test_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags); in gve_get_do_reset()
766 set_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags); in gve_set_do_reset()
771 clear_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags); in gve_clear_do_reset()
777 &priv->service_task_flags); in gve_get_reset_in_progress()
782 set_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags); in gve_set_reset_in_progress()
787 clear_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags); in gve_clear_reset_in_progress()
793 &priv->service_task_flags); in gve_get_probe_in_progress()
798 set_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags); in gve_set_probe_in_progress()
803 clear_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags); in gve_clear_probe_in_progress()
809 &priv->service_task_flags); in gve_get_do_report_stats()
814 set_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags); in gve_set_do_report_stats()
819 clear_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags); in gve_clear_do_report_stats()
824 return test_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags); in gve_get_admin_queue_ok()
829 set_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags); in gve_set_admin_queue_ok()
834 clear_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags); in gve_clear_admin_queue_ok()
839 return test_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags); in gve_get_device_resources_ok()
844 set_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags); in gve_set_device_resources_ok()
849 clear_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags); in gve_clear_device_resources_ok()
854 return test_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags); in gve_get_device_rings_ok()
859 set_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags); in gve_set_device_rings_ok()
864 clear_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags); in gve_clear_device_rings_ok()
869 return test_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags); in gve_get_napi_enabled()
874 set_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags); in gve_set_napi_enabled()
879 clear_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags); in gve_clear_napi_enabled()
884 return test_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags); in gve_get_report_stats()
889 clear_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags); in gve_clear_report_stats()
897 return &priv->db_bar2[be32_to_cpu(*block->irq_db_index)]; in gve_irq_doorbell()
907 /* Returns the index into ntfy_blocks of the given rx ring's block
911 return (priv->num_ntfy_blks / 2) + queue_idx; in gve_rx_idx_to_ntfy()
916 return priv->queue_format == GVE_GQI_QPL_FORMAT || in gve_is_qpl()
917 priv->queue_format == GVE_DQO_QPL_FORMAT; in gve_is_qpl()
927 return priv->tx_cfg.num_queues + priv->num_xdp_queues; in gve_num_tx_qpls()
930 /* Returns the number of XDP tx queue page lists
934 if (priv->queue_format != GVE_GQI_QPL_FORMAT) in gve_num_xdp_qpls()
937 return priv->num_xdp_queues; in gve_num_xdp_qpls()
940 /* Returns the number of rx queue page lists
947 return priv->rx_cfg.num_queues; in gve_num_rx_qpls()
957 return priv->tx_cfg.max_queues + rx_qid; in gve_rx_qpl_id()
978 if (test_bit(id, priv->qpl_cfg.qpl_id_map)) in gve_assign_tx_qpl()
981 set_bit(id, priv->qpl_cfg.qpl_id_map); in gve_assign_tx_qpl()
982 return &priv->qpls[id]; in gve_assign_tx_qpl()
985 /* Returns a pointer to the next available rx qpl in the list of qpls
993 if (test_bit(id, priv->qpl_cfg.qpl_id_map)) in gve_assign_rx_qpl()
996 set_bit(id, priv->qpl_cfg.qpl_id_map); in gve_assign_rx_qpl()
997 return &priv->qpls[id]; in gve_assign_rx_qpl()
1004 clear_bit(id, priv->qpl_cfg.qpl_id_map); in gve_unassign_qpl()
1007 /* Returns the correct dma direction for tx and rx qpls
1020 return priv->queue_format == GVE_GQI_RDA_FORMAT || in gve_is_gqi()
1021 priv->queue_format == GVE_GQI_QPL_FORMAT; in gve_is_gqi()
1026 return priv->tx_cfg.num_queues + priv->num_xdp_queues; in gve_num_tx_queues()
1031 return priv->tx_cfg.num_queues + queue_id; in gve_xdp_tx_queue_id()
1059 /* rx handling */
1060 void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx);
1062 bool gve_rx_work_pending(struct gve_rx_ring *rx);