1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
3
4 #include <linux/etherdevice.h>
5 #include <linux/if_vlan.h>
6 #include <linux/netdevice.h>
7 #include <net/gro.h>
8 #include <net/page_pool/helpers.h>
9
10 #include "hinic3_hwdev.h"
11 #include "hinic3_nic_dev.h"
12 #include "hinic3_nic_io.h"
13 #include "hinic3_rx.h"
14
15 #define HINIC3_RX_HDR_SIZE 256
16 #define HINIC3_RX_BUFFER_WRITE 16
17
18 #define HINIC3_RX_TCP_PKT 0x3
19 #define HINIC3_RX_UDP_PKT 0x4
20 #define HINIC3_RX_SCTP_PKT 0x7
21
22 #define HINIC3_RX_IPV4_PKT 0
23 #define HINIC3_RX_IPV6_PKT 1
24 #define HINIC3_RX_INVALID_IP_TYPE 2
25
26 #define HINIC3_RX_PKT_FORMAT_NON_TUNNEL 0
27 #define HINIC3_RX_PKT_FORMAT_VXLAN 1
28
29 #define HINIC3_LRO_PKT_HDR_LEN_IPV4 66
30 #define HINIC3_LRO_PKT_HDR_LEN_IPV6 86
31 #define HINIC3_LRO_PKT_HDR_LEN(cqe) \
32 (RQ_CQE_OFFOLAD_TYPE_GET((cqe)->offload_type, IP_TYPE) == \
33 HINIC3_RX_IPV6_PKT ? HINIC3_LRO_PKT_HDR_LEN_IPV6 : \
34 HINIC3_LRO_PKT_HDR_LEN_IPV4)
35
hinic3_alloc_rxqs(struct net_device * netdev)36 int hinic3_alloc_rxqs(struct net_device *netdev)
37 {
38 /* Completed by later submission due to LoC limit. */
39 return -EFAULT;
40 }
41
hinic3_free_rxqs(struct net_device * netdev)42 void hinic3_free_rxqs(struct net_device *netdev)
43 {
44 /* Completed by later submission due to LoC limit. */
45 }
46
rx_alloc_mapped_page(struct page_pool * page_pool,struct hinic3_rx_info * rx_info,u16 buf_len)47 static int rx_alloc_mapped_page(struct page_pool *page_pool,
48 struct hinic3_rx_info *rx_info, u16 buf_len)
49 {
50 struct page *page;
51 u32 page_offset;
52
53 page = page_pool_dev_alloc_frag(page_pool, &page_offset, buf_len);
54 if (unlikely(!page))
55 return -ENOMEM;
56
57 rx_info->page = page;
58 rx_info->page_offset = page_offset;
59
60 return 0;
61 }
62
rq_wqe_buf_set(struct hinic3_io_queue * rq,uint32_t wqe_idx,dma_addr_t dma_addr,u16 len)63 static void rq_wqe_buf_set(struct hinic3_io_queue *rq, uint32_t wqe_idx,
64 dma_addr_t dma_addr, u16 len)
65 {
66 struct hinic3_rq_wqe *rq_wqe;
67
68 rq_wqe = get_q_element(&rq->wq.qpages, wqe_idx, NULL);
69 rq_wqe->buf_hi_addr = upper_32_bits(dma_addr);
70 rq_wqe->buf_lo_addr = lower_32_bits(dma_addr);
71 }
72
hinic3_rx_fill_buffers(struct hinic3_rxq * rxq)73 static u32 hinic3_rx_fill_buffers(struct hinic3_rxq *rxq)
74 {
75 u32 i, free_wqebbs = rxq->delta - 1;
76 struct hinic3_rx_info *rx_info;
77 dma_addr_t dma_addr;
78 int err;
79
80 for (i = 0; i < free_wqebbs; i++) {
81 rx_info = &rxq->rx_info[rxq->next_to_update];
82
83 err = rx_alloc_mapped_page(rxq->page_pool, rx_info,
84 rxq->buf_len);
85 if (unlikely(err))
86 break;
87
88 dma_addr = page_pool_get_dma_addr(rx_info->page) +
89 rx_info->page_offset;
90 rq_wqe_buf_set(rxq->rq, rxq->next_to_update, dma_addr,
91 rxq->buf_len);
92 rxq->next_to_update = (rxq->next_to_update + 1) & rxq->q_mask;
93 }
94
95 if (likely(i)) {
96 hinic3_write_db(rxq->rq, rxq->q_id & 3, DB_CFLAG_DP_RQ,
97 rxq->next_to_update << HINIC3_NORMAL_RQ_WQE);
98 rxq->delta -= i;
99 rxq->next_to_alloc = rxq->next_to_update;
100 }
101
102 return i;
103 }
104
hinic3_add_rx_frag(struct hinic3_rxq * rxq,struct hinic3_rx_info * rx_info,struct sk_buff * skb,u32 size)105 static void hinic3_add_rx_frag(struct hinic3_rxq *rxq,
106 struct hinic3_rx_info *rx_info,
107 struct sk_buff *skb, u32 size)
108 {
109 struct page *page;
110 u8 *va;
111
112 page = rx_info->page;
113 va = (u8 *)page_address(page) + rx_info->page_offset;
114 net_prefetch(va);
115
116 page_pool_dma_sync_for_cpu(rxq->page_pool, page, rx_info->page_offset,
117 rxq->buf_len);
118
119 if (size <= HINIC3_RX_HDR_SIZE && !skb_is_nonlinear(skb)) {
120 memcpy(__skb_put(skb, size), va,
121 ALIGN(size, sizeof(long)));
122 page_pool_put_full_page(rxq->page_pool, page, false);
123
124 return;
125 }
126
127 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
128 rx_info->page_offset, size, rxq->buf_len);
129 skb_mark_for_recycle(skb);
130 }
131
packaging_skb(struct hinic3_rxq * rxq,struct sk_buff * skb,u32 sge_num,u32 pkt_len)132 static void packaging_skb(struct hinic3_rxq *rxq, struct sk_buff *skb,
133 u32 sge_num, u32 pkt_len)
134 {
135 struct hinic3_rx_info *rx_info;
136 u32 temp_pkt_len = pkt_len;
137 u32 temp_sge_num = sge_num;
138 u32 sw_ci;
139 u32 size;
140
141 sw_ci = rxq->cons_idx & rxq->q_mask;
142 while (temp_sge_num) {
143 rx_info = &rxq->rx_info[sw_ci];
144 sw_ci = (sw_ci + 1) & rxq->q_mask;
145 if (unlikely(temp_pkt_len > rxq->buf_len)) {
146 size = rxq->buf_len;
147 temp_pkt_len -= rxq->buf_len;
148 } else {
149 size = temp_pkt_len;
150 }
151
152 hinic3_add_rx_frag(rxq, rx_info, skb, size);
153
154 /* clear contents of buffer_info */
155 rx_info->page = NULL;
156 temp_sge_num--;
157 }
158 }
159
hinic3_get_sge_num(struct hinic3_rxq * rxq,u32 pkt_len)160 static u32 hinic3_get_sge_num(struct hinic3_rxq *rxq, u32 pkt_len)
161 {
162 u32 sge_num;
163
164 sge_num = pkt_len >> rxq->buf_len_shift;
165 sge_num += (pkt_len & (rxq->buf_len - 1)) ? 1 : 0;
166
167 return sge_num;
168 }
169
hinic3_fetch_rx_buffer(struct hinic3_rxq * rxq,u32 pkt_len)170 static struct sk_buff *hinic3_fetch_rx_buffer(struct hinic3_rxq *rxq,
171 u32 pkt_len)
172 {
173 struct sk_buff *skb;
174 u32 sge_num;
175
176 skb = napi_alloc_skb(&rxq->irq_cfg->napi, HINIC3_RX_HDR_SIZE);
177 if (unlikely(!skb))
178 return NULL;
179
180 sge_num = hinic3_get_sge_num(rxq, pkt_len);
181
182 net_prefetchw(skb->data);
183 packaging_skb(rxq, skb, sge_num, pkt_len);
184
185 rxq->cons_idx += sge_num;
186 rxq->delta += sge_num;
187
188 return skb;
189 }
190
hinic3_pull_tail(struct sk_buff * skb)191 static void hinic3_pull_tail(struct sk_buff *skb)
192 {
193 skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
194 unsigned int pull_len;
195 unsigned char *va;
196
197 va = skb_frag_address(frag);
198
199 /* we need the header to contain the greater of either ETH_HLEN or
200 * 60 bytes if the skb->len is less than 60 for skb_pad.
201 */
202 pull_len = eth_get_headlen(skb->dev, va, HINIC3_RX_HDR_SIZE);
203
204 /* align pull length to size of long to optimize memcpy performance */
205 skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
206
207 /* update all of the pointers */
208 skb_frag_size_sub(frag, pull_len);
209 skb_frag_off_add(frag, pull_len);
210
211 skb->data_len -= pull_len;
212 skb->tail += pull_len;
213 }
214
hinic3_rx_csum(struct hinic3_rxq * rxq,u32 offload_type,u32 status,struct sk_buff * skb)215 static void hinic3_rx_csum(struct hinic3_rxq *rxq, u32 offload_type,
216 u32 status, struct sk_buff *skb)
217 {
218 u32 pkt_fmt = RQ_CQE_OFFOLAD_TYPE_GET(offload_type, TUNNEL_PKT_FORMAT);
219 u32 pkt_type = RQ_CQE_OFFOLAD_TYPE_GET(offload_type, PKT_TYPE);
220 u32 ip_type = RQ_CQE_OFFOLAD_TYPE_GET(offload_type, IP_TYPE);
221 u32 csum_err = RQ_CQE_STATUS_GET(status, CSUM_ERR);
222 struct net_device *netdev = rxq->netdev;
223
224 if (!(netdev->features & NETIF_F_RXCSUM))
225 return;
226
227 if (unlikely(csum_err)) {
228 /* pkt type is recognized by HW, and csum is wrong */
229 skb->ip_summed = CHECKSUM_NONE;
230 return;
231 }
232
233 if (ip_type == HINIC3_RX_INVALID_IP_TYPE ||
234 !(pkt_fmt == HINIC3_RX_PKT_FORMAT_NON_TUNNEL ||
235 pkt_fmt == HINIC3_RX_PKT_FORMAT_VXLAN)) {
236 skb->ip_summed = CHECKSUM_NONE;
237 return;
238 }
239
240 switch (pkt_type) {
241 case HINIC3_RX_TCP_PKT:
242 case HINIC3_RX_UDP_PKT:
243 case HINIC3_RX_SCTP_PKT:
244 skb->ip_summed = CHECKSUM_UNNECESSARY;
245 break;
246 default:
247 skb->ip_summed = CHECKSUM_NONE;
248 break;
249 }
250 }
251
hinic3_lro_set_gso_params(struct sk_buff * skb,u16 num_lro)252 static void hinic3_lro_set_gso_params(struct sk_buff *skb, u16 num_lro)
253 {
254 struct ethhdr *eth = (struct ethhdr *)(skb->data);
255 __be16 proto;
256
257 proto = __vlan_get_protocol(skb, eth->h_proto, NULL);
258
259 skb_shinfo(skb)->gso_size = DIV_ROUND_UP(skb->len - skb_headlen(skb),
260 num_lro);
261 skb_shinfo(skb)->gso_type = proto == htons(ETH_P_IP) ?
262 SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
263 skb_shinfo(skb)->gso_segs = num_lro;
264 }
265
recv_one_pkt(struct hinic3_rxq * rxq,struct hinic3_rq_cqe * rx_cqe,u32 pkt_len,u32 vlan_len,u32 status)266 static int recv_one_pkt(struct hinic3_rxq *rxq, struct hinic3_rq_cqe *rx_cqe,
267 u32 pkt_len, u32 vlan_len, u32 status)
268 {
269 struct net_device *netdev = rxq->netdev;
270 struct sk_buff *skb;
271 u32 offload_type;
272 u16 num_lro;
273
274 skb = hinic3_fetch_rx_buffer(rxq, pkt_len);
275 if (unlikely(!skb))
276 return -ENOMEM;
277
278 /* place header in linear portion of buffer */
279 if (skb_is_nonlinear(skb))
280 hinic3_pull_tail(skb);
281
282 offload_type = rx_cqe->offload_type;
283 hinic3_rx_csum(rxq, offload_type, status, skb);
284
285 num_lro = RQ_CQE_STATUS_GET(status, NUM_LRO);
286 if (num_lro)
287 hinic3_lro_set_gso_params(skb, num_lro);
288
289 skb_record_rx_queue(skb, rxq->q_id);
290 skb->protocol = eth_type_trans(skb, netdev);
291
292 if (skb_has_frag_list(skb)) {
293 napi_gro_flush(&rxq->irq_cfg->napi, false);
294 netif_receive_skb(skb);
295 } else {
296 napi_gro_receive(&rxq->irq_cfg->napi, skb);
297 }
298
299 return 0;
300 }
301
hinic3_rx_poll(struct hinic3_rxq * rxq,int budget)302 int hinic3_rx_poll(struct hinic3_rxq *rxq, int budget)
303 {
304 struct hinic3_nic_dev *nic_dev = netdev_priv(rxq->netdev);
305 u32 sw_ci, status, pkt_len, vlan_len;
306 struct hinic3_rq_cqe *rx_cqe;
307 u32 num_wqe = 0;
308 int nr_pkts = 0;
309 u16 num_lro;
310
311 while (likely(nr_pkts < budget)) {
312 sw_ci = rxq->cons_idx & rxq->q_mask;
313 rx_cqe = rxq->cqe_arr + sw_ci;
314 status = rx_cqe->status;
315 if (!RQ_CQE_STATUS_GET(status, RXDONE))
316 break;
317
318 /* make sure we read rx_done before packet length */
319 rmb();
320
321 vlan_len = rx_cqe->vlan_len;
322 pkt_len = RQ_CQE_SGE_GET(vlan_len, LEN);
323 if (recv_one_pkt(rxq, rx_cqe, pkt_len, vlan_len, status))
324 break;
325
326 nr_pkts++;
327 num_lro = RQ_CQE_STATUS_GET(status, NUM_LRO);
328 if (num_lro)
329 num_wqe += hinic3_get_sge_num(rxq, pkt_len);
330
331 rx_cqe->status = 0;
332
333 if (num_wqe >= nic_dev->lro_replenish_thld)
334 break;
335 }
336
337 if (rxq->delta >= HINIC3_RX_BUFFER_WRITE)
338 hinic3_rx_fill_buffers(rxq);
339
340 return nr_pkts;
341 }
342