1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */ 3 4 #ifndef _HINIC3_RX_H_ 5 #define _HINIC3_RX_H_ 6 7 #include <linux/bitfield.h> 8 #include <linux/dim.h> 9 #include <linux/netdevice.h> 10 11 #define RQ_CQE_OFFOLAD_TYPE_PKT_TYPE_MASK GENMASK(4, 0) 12 #define RQ_CQE_OFFOLAD_TYPE_IP_TYPE_MASK GENMASK(6, 5) 13 #define RQ_CQE_OFFOLAD_TYPE_TUNNEL_PKT_FORMAT_MASK GENMASK(11, 8) 14 #define RQ_CQE_OFFOLAD_TYPE_VLAN_EN_MASK BIT(21) 15 #define RQ_CQE_OFFOLAD_TYPE_GET(val, member) \ 16 FIELD_GET(RQ_CQE_OFFOLAD_TYPE_##member##_MASK, val) 17 18 #define RQ_CQE_SGE_VLAN_MASK GENMASK(15, 0) 19 #define RQ_CQE_SGE_LEN_MASK GENMASK(31, 16) 20 #define RQ_CQE_SGE_GET(val, member) \ 21 FIELD_GET(RQ_CQE_SGE_##member##_MASK, val) 22 23 #define RQ_CQE_STATUS_CSUM_ERR_MASK GENMASK(15, 0) 24 #define RQ_CQE_STATUS_NUM_LRO_MASK GENMASK(23, 16) 25 #define RQ_CQE_STATUS_RXDONE_MASK BIT(31) 26 #define RQ_CQE_STATUS_GET(val, member) \ 27 FIELD_GET(RQ_CQE_STATUS_##member##_MASK, val) 28 29 struct hinic3_rxq_stats { 30 u64 packets; 31 u64 bytes; 32 u64 errors; 33 u64 csum_errors; 34 u64 other_errors; 35 u64 dropped; 36 u64 rx_buf_empty; 37 u64 alloc_skb_err; 38 u64 alloc_rx_buf_err; 39 u64 restore_drop_sge; 40 struct u64_stats_sync syncp; 41 }; 42 43 /* RX Completion information that is provided by HW for a specific RX WQE */ 44 struct hinic3_rq_cqe { 45 __le32 status; 46 __le32 vlan_len; 47 __le32 offload_type; 48 __le32 rsvd3; 49 __le32 rsvd4; 50 __le32 rsvd5; 51 __le32 rsvd6; 52 __le32 pkt_info; 53 }; 54 55 struct hinic3_rq_wqe { 56 __le32 buf_hi_addr; 57 __le32 buf_lo_addr; 58 __le32 cqe_hi_addr; 59 __le32 cqe_lo_addr; 60 }; 61 62 struct hinic3_rx_info { 63 struct page *page; 64 u32 page_offset; 65 }; 66 67 struct hinic3_rxq { 68 struct net_device *netdev; 69 70 u16 q_id; 71 u32 q_depth; 72 u32 q_mask; 73 74 u16 buf_len; 75 u32 buf_len_shift; 76 77 struct hinic3_rxq_stats rxq_stats; 78 u32 cons_idx; 79 u32 delta; 80 81 u32 irq_id; 82 u16 msix_entry_idx; 83 84 /* cqe_arr and rx_info are arrays of rq_depth elements. Each element is 85 * statically associated (by index) to a specific rq_wqe. 86 */ 87 struct hinic3_rq_cqe *cqe_arr; 88 struct hinic3_rx_info *rx_info; 89 struct page_pool *page_pool; 90 91 struct hinic3_io_queue *rq; 92 93 struct hinic3_irq_cfg *irq_cfg; 94 u16 next_to_alloc; 95 u16 next_to_update; 96 struct device *dev; /* device for DMA mapping */ 97 98 dma_addr_t cqe_start_paddr; 99 100 struct dim dim; 101 102 u8 last_coalesc_timer_cfg; 103 u8 last_pending_limit; 104 } ____cacheline_aligned; 105 106 struct hinic3_dyna_rxq_res { 107 u16 next_to_alloc; 108 struct hinic3_rx_info *rx_info; 109 dma_addr_t cqe_start_paddr; 110 void *cqe_start_vaddr; 111 struct page_pool *page_pool; 112 }; 113 114 int hinic3_alloc_rxqs(struct net_device *netdev); 115 void hinic3_free_rxqs(struct net_device *netdev); 116 117 int hinic3_alloc_rxqs_res(struct net_device *netdev, u16 num_rq, 118 u32 rq_depth, struct hinic3_dyna_rxq_res *rxqs_res); 119 void hinic3_free_rxqs_res(struct net_device *netdev, u16 num_rq, 120 u32 rq_depth, struct hinic3_dyna_rxq_res *rxqs_res); 121 int hinic3_configure_rxqs(struct net_device *netdev, u16 num_rq, 122 u32 rq_depth, struct hinic3_dyna_rxq_res *rxqs_res); 123 int hinic3_rx_poll(struct hinic3_rxq *rxq, int budget); 124 125 #endif 126