xref: /linux/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h (revision ab93e0dd72c37d378dd936f031ffb83ff2bd87ce)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
3 
4 #ifndef _HINIC3_RX_H_
5 #define _HINIC3_RX_H_
6 
7 #include <linux/bitfield.h>
8 #include <linux/netdevice.h>
9 
10 #define RQ_CQE_OFFOLAD_TYPE_PKT_TYPE_MASK           GENMASK(4, 0)
11 #define RQ_CQE_OFFOLAD_TYPE_IP_TYPE_MASK            GENMASK(6, 5)
12 #define RQ_CQE_OFFOLAD_TYPE_TUNNEL_PKT_FORMAT_MASK  GENMASK(11, 8)
13 #define RQ_CQE_OFFOLAD_TYPE_VLAN_EN_MASK            BIT(21)
14 #define RQ_CQE_OFFOLAD_TYPE_GET(val, member) \
15 	FIELD_GET(RQ_CQE_OFFOLAD_TYPE_##member##_MASK, val)
16 
17 #define RQ_CQE_SGE_VLAN_MASK  GENMASK(15, 0)
18 #define RQ_CQE_SGE_LEN_MASK   GENMASK(31, 16)
19 #define RQ_CQE_SGE_GET(val, member) \
20 	FIELD_GET(RQ_CQE_SGE_##member##_MASK, val)
21 
22 #define RQ_CQE_STATUS_CSUM_ERR_MASK  GENMASK(15, 0)
23 #define RQ_CQE_STATUS_NUM_LRO_MASK   GENMASK(23, 16)
24 #define RQ_CQE_STATUS_RXDONE_MASK    BIT(31)
25 #define RQ_CQE_STATUS_GET(val, member) \
26 	FIELD_GET(RQ_CQE_STATUS_##member##_MASK, val)
27 
28 /* RX Completion information that is provided by HW for a specific RX WQE */
29 struct hinic3_rq_cqe {
30 	u32 status;
31 	u32 vlan_len;
32 	u32 offload_type;
33 	u32 rsvd3;
34 	u32 rsvd4;
35 	u32 rsvd5;
36 	u32 rsvd6;
37 	u32 pkt_info;
38 };
39 
40 struct hinic3_rq_wqe {
41 	u32 buf_hi_addr;
42 	u32 buf_lo_addr;
43 	u32 cqe_hi_addr;
44 	u32 cqe_lo_addr;
45 };
46 
47 struct hinic3_rx_info {
48 	struct page      *page;
49 	u32              page_offset;
50 };
51 
52 struct hinic3_rxq {
53 	struct net_device       *netdev;
54 
55 	u16                     q_id;
56 	u32                     q_depth;
57 	u32                     q_mask;
58 
59 	u16                     buf_len;
60 	u32                     buf_len_shift;
61 
62 	u32                     cons_idx;
63 	u32                     delta;
64 
65 	u32                     irq_id;
66 	u16                     msix_entry_idx;
67 
68 	/* cqe_arr and rx_info are arrays of rq_depth elements. Each element is
69 	 * statically associated (by index) to a specific rq_wqe.
70 	 */
71 	struct hinic3_rq_cqe   *cqe_arr;
72 	struct hinic3_rx_info  *rx_info;
73 	struct page_pool       *page_pool;
74 
75 	struct hinic3_io_queue *rq;
76 
77 	struct hinic3_irq_cfg  *irq_cfg;
78 	u16                    next_to_alloc;
79 	u16                    next_to_update;
80 	struct device          *dev; /* device for DMA mapping */
81 
82 	dma_addr_t             cqe_start_paddr;
83 } ____cacheline_aligned;
84 
85 int hinic3_alloc_rxqs(struct net_device *netdev);
86 void hinic3_free_rxqs(struct net_device *netdev);
87 
88 int hinic3_rx_poll(struct hinic3_rxq *rxq, int budget);
89 
90 #endif
91