1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /* Copyright (c) 2019 Mellanox Technologies. */
3
4 #ifndef __MLX5_EN_TXRX_H___
5 #define __MLX5_EN_TXRX_H___
6
7 #include "en.h"
8 #include <linux/indirect_call_wrapper.h>
9 #include <net/ip6_checksum.h>
10 #include <net/tcp.h>
11
12 #define MLX5E_TX_WQE_EMPTY_DS_COUNT (sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS)
13
14 #define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start))
15
16 /* IPSEC inline data includes:
17 * 1. ESP trailer: up to 255 bytes of padding, 1 byte for pad length, 1 byte for
18 * next header.
19 * 2. ESP authentication data: 16 bytes for ICV.
20 */
21 #define MLX5E_MAX_TX_IPSEC_DS DIV_ROUND_UP(sizeof(struct mlx5_wqe_inline_seg) + \
22 255 + 1 + 1 + 16, MLX5_SEND_WQE_DS)
23
24 /* 366 should be big enough to cover all L2, L3 and L4 headers with possible
25 * encapsulations.
26 */
27 #define MLX5E_MAX_TX_INLINE_DS DIV_ROUND_UP(366 - INL_HDR_START_SZ + VLAN_HLEN, \
28 MLX5_SEND_WQE_DS)
29
30 /* Sync the calculation with mlx5e_sq_calc_wqe_attr. */
31 #define MLX5E_MAX_TX_WQEBBS DIV_ROUND_UP(MLX5E_TX_WQE_EMPTY_DS_COUNT + \
32 MLX5E_MAX_TX_INLINE_DS + \
33 MLX5E_MAX_TX_IPSEC_DS + \
34 MAX_SKB_FRAGS + 1, \
35 MLX5_SEND_WQEBB_NUM_DS)
36
37 #define MLX5E_RX_ERR_CQE(cqe) (get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)
38
39 #define MLX5E_KSM_UMR_WQE_SZ(sgl_len)\
40 (sizeof(struct mlx5e_umr_wqe) +\
41 (sizeof(struct mlx5_ksm) * (sgl_len)))
42
43 #define MLX5E_KSM_UMR_WQEBBS(ksm_entries) \
44 (DIV_ROUND_UP(MLX5E_KSM_UMR_WQE_SZ(ksm_entries), MLX5_SEND_WQE_BB))
45
46 #define MLX5E_KSM_UMR_DS_CNT(ksm_entries)\
47 (DIV_ROUND_UP(MLX5E_KSM_UMR_WQE_SZ(ksm_entries), MLX5_SEND_WQE_DS))
48
49 #define MLX5E_KSM_MAX_ENTRIES_PER_WQE(wqe_size)\
50 (((wqe_size) - sizeof(struct mlx5e_umr_wqe)) / sizeof(struct mlx5_ksm))
51
52 #define MLX5E_KSM_ENTRIES_PER_WQE(wqe_size)\
53 ALIGN_DOWN(MLX5E_KSM_MAX_ENTRIES_PER_WQE(wqe_size), MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT)
54
55 #define MLX5E_MAX_KSM_PER_WQE(mdev) \
56 MLX5E_KSM_ENTRIES_PER_WQE(MLX5_SEND_WQE_BB * mlx5e_get_max_sq_aligned_wqebbs(mdev))
57
58 static inline
mlx5e_cqe_ts_to_ns(cqe_ts_to_ns func,struct mlx5_clock * clock,u64 cqe_ts)59 ktime_t mlx5e_cqe_ts_to_ns(cqe_ts_to_ns func, struct mlx5_clock *clock, u64 cqe_ts)
60 {
61 return INDIRECT_CALL_2(func, mlx5_real_time_cyc2time, mlx5_timecounter_cyc2time,
62 clock, cqe_ts);
63 }
64
65 enum mlx5e_icosq_wqe_type {
66 MLX5E_ICOSQ_WQE_NOP,
67 MLX5E_ICOSQ_WQE_UMR_RX,
68 #ifdef CONFIG_MLX5_EN_TLS
69 MLX5E_ICOSQ_WQE_UMR_TLS,
70 MLX5E_ICOSQ_WQE_SET_PSV_TLS,
71 MLX5E_ICOSQ_WQE_GET_PSV_TLS,
72 #endif
73 };
74
75 /* General */
mlx5e_skb_is_multicast(struct sk_buff * skb)76 static inline bool mlx5e_skb_is_multicast(struct sk_buff *skb)
77 {
78 return skb->pkt_type == PACKET_MULTICAST || skb->pkt_type == PACKET_BROADCAST;
79 }
80
81 void mlx5e_trigger_irq(struct mlx5e_icosq *sq);
82 void mlx5e_completion_event(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe);
83 void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
84 int mlx5e_napi_poll(struct napi_struct *napi, int budget);
85 int mlx5e_poll_ico_cq(struct mlx5e_cq *cq);
86
87 /* RX */
88 INDIRECT_CALLABLE_DECLARE(bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq));
89 INDIRECT_CALLABLE_DECLARE(bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq));
90 int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
91 void mlx5e_free_rx_descs(struct mlx5e_rq *rq);
92 void mlx5e_free_rx_missing_descs(struct mlx5e_rq *rq);
93
mlx5e_rx_hw_stamp(struct kernel_hwtstamp_config * config)94 static inline bool mlx5e_rx_hw_stamp(struct kernel_hwtstamp_config *config)
95 {
96 return config->rx_filter == HWTSTAMP_FILTER_ALL;
97 }
98
99 /* TX */
100 struct mlx5e_xmit_data {
101 dma_addr_t dma_addr;
102 void *data;
103 u32 len : 31;
104 u32 has_frags : 1;
105 };
106
107 struct mlx5e_xmit_data_frags {
108 struct mlx5e_xmit_data xd;
109 struct skb_shared_info *sinfo;
110 dma_addr_t *dma_arr;
111 };
112
113 netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
114 bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
115 void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq);
116
117 static inline bool
mlx5e_skb_fifo_has_room(struct mlx5e_skb_fifo * fifo)118 mlx5e_skb_fifo_has_room(struct mlx5e_skb_fifo *fifo)
119 {
120 return (u16)(*fifo->pc - *fifo->cc) <= fifo->mask;
121 }
122
123 static inline bool
mlx5e_wqc_has_room_for(struct mlx5_wq_cyc * wq,u16 cc,u16 pc,u16 n)124 mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n)
125 {
126 return (mlx5_wq_cyc_ctr2ix(wq, cc - pc) >= n) || (cc == pc);
127 }
128
mlx5e_fetch_wqe(struct mlx5_wq_cyc * wq,u16 pi,size_t wqe_size)129 static inline void *mlx5e_fetch_wqe(struct mlx5_wq_cyc *wq, u16 pi, size_t wqe_size)
130 {
131 void *wqe;
132
133 wqe = mlx5_wq_cyc_get_wqe(wq, pi);
134 memset(wqe, 0, wqe_size);
135
136 return wqe;
137 }
138
139 #define MLX5E_TX_FETCH_WQE(sq, pi) \
140 ((struct mlx5e_tx_wqe *)mlx5e_fetch_wqe(&(sq)->wq, pi, sizeof(struct mlx5e_tx_wqe)))
141
142 static inline struct mlx5e_tx_wqe *
mlx5e_post_nop(struct mlx5_wq_cyc * wq,u32 sqn,u16 * pc)143 mlx5e_post_nop(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc)
144 {
145 u16 pi = mlx5_wq_cyc_ctr2ix(wq, *pc);
146 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
147 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
148
149 memset(cseg, 0, sizeof(*cseg));
150
151 cseg->opmod_idx_opcode = cpu_to_be32((*pc << 8) | MLX5_OPCODE_NOP);
152 cseg->qpn_ds = cpu_to_be32((sqn << 8) | 0x01);
153
154 (*pc)++;
155
156 return wqe;
157 }
158
159 static inline struct mlx5e_tx_wqe *
mlx5e_post_nop_fence(struct mlx5_wq_cyc * wq,u32 sqn,u16 * pc)160 mlx5e_post_nop_fence(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc)
161 {
162 u16 pi = mlx5_wq_cyc_ctr2ix(wq, *pc);
163 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
164 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
165
166 memset(cseg, 0, sizeof(*cseg));
167
168 cseg->opmod_idx_opcode = cpu_to_be32((*pc << 8) | MLX5_OPCODE_NOP);
169 cseg->qpn_ds = cpu_to_be32((sqn << 8) | 0x01);
170 cseg->fm_ce_se = MLX5_FENCE_MODE_INITIATOR_SMALL;
171
172 (*pc)++;
173
174 return wqe;
175 }
176
177 struct mlx5e_tx_wqe_info {
178 struct sk_buff *skb;
179 u32 num_bytes;
180 u8 num_wqebbs;
181 u8 num_dma;
182 u8 num_fifo_pkts;
183 #ifdef CONFIG_MLX5_EN_TLS
184 struct page *resync_dump_frag_page;
185 #endif
186 };
187
mlx5e_txqsq_get_next_pi(struct mlx5e_txqsq * sq,u16 size)188 static inline u16 mlx5e_txqsq_get_next_pi(struct mlx5e_txqsq *sq, u16 size)
189 {
190 struct mlx5_wq_cyc *wq = &sq->wq;
191 u16 pi, contig_wqebbs;
192
193 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
194 contig_wqebbs = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
195 if (unlikely(contig_wqebbs < size)) {
196 struct mlx5e_tx_wqe_info *wi, *edge_wi;
197
198 wi = &sq->db.wqe_info[pi];
199 edge_wi = wi + contig_wqebbs;
200
201 /* Fill SQ frag edge with NOPs to avoid WQE wrapping two pages. */
202 for (; wi < edge_wi; wi++) {
203 *wi = (struct mlx5e_tx_wqe_info) {
204 .num_wqebbs = 1,
205 };
206 mlx5e_post_nop(wq, sq->sqn, &sq->pc);
207 }
208 sq->stats->nop += contig_wqebbs;
209
210 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
211 }
212
213 return pi;
214 }
215
mlx5e_txqsq_get_next_pi_anysize(struct mlx5e_txqsq * sq,u16 * size)216 static inline u16 mlx5e_txqsq_get_next_pi_anysize(struct mlx5e_txqsq *sq,
217 u16 *size)
218 {
219 struct mlx5_wq_cyc *wq = &sq->wq;
220 u16 pi, contig_wqebbs;
221
222 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
223 contig_wqebbs = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
224 *size = min_t(u16, contig_wqebbs, sq->max_sq_mpw_wqebbs);
225
226 return pi;
227 }
228
229 void mlx5e_txqsq_wake(struct mlx5e_txqsq *sq);
230
mlx5e_shampo_get_cqe_header_index(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe)231 static inline u16 mlx5e_shampo_get_cqe_header_index(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
232 {
233 return be16_to_cpu(cqe->shampo.header_entry_index) & (rq->mpwqe.shampo->hd_per_wq - 1);
234 }
235
236 struct mlx5e_shampo_umr {
237 u16 len;
238 };
239
240 struct mlx5e_icosq_wqe_info {
241 u8 wqe_type;
242 u8 num_wqebbs;
243
244 /* Auxiliary data for different wqe types. */
245 union {
246 struct {
247 struct mlx5e_rq *rq;
248 } umr;
249 struct mlx5e_shampo_umr shampo;
250 #ifdef CONFIG_MLX5_EN_TLS
251 struct {
252 struct mlx5e_ktls_offload_context_rx *priv_rx;
253 } tls_set_params;
254 struct {
255 struct mlx5e_ktls_rx_resync_buf *buf;
256 } tls_get_params;
257 #endif
258 };
259 };
260
261 void mlx5e_free_icosq_descs(struct mlx5e_icosq *sq);
262
mlx5e_icosq_get_next_pi(struct mlx5e_icosq * sq,u16 size)263 static inline u16 mlx5e_icosq_get_next_pi(struct mlx5e_icosq *sq, u16 size)
264 {
265 struct mlx5_wq_cyc *wq = &sq->wq;
266 u16 pi, contig_wqebbs;
267
268 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
269 contig_wqebbs = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
270 if (unlikely(contig_wqebbs < size)) {
271 struct mlx5e_icosq_wqe_info *wi, *edge_wi;
272
273 wi = &sq->db.wqe_info[pi];
274 edge_wi = wi + contig_wqebbs;
275
276 /* Fill SQ frag edge with NOPs to avoid WQE wrapping two pages. */
277 for (; wi < edge_wi; wi++) {
278 *wi = (struct mlx5e_icosq_wqe_info) {
279 .wqe_type = MLX5E_ICOSQ_WQE_NOP,
280 .num_wqebbs = 1,
281 };
282 mlx5e_post_nop(wq, sq->sqn, &sq->pc);
283 }
284
285 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
286 }
287
288 return pi;
289 }
290
291 static inline void
mlx5e_notify_hw(struct mlx5_wq_cyc * wq,u16 pc,void __iomem * uar_map,struct mlx5_wqe_ctrl_seg * ctrl)292 mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc, void __iomem *uar_map,
293 struct mlx5_wqe_ctrl_seg *ctrl)
294 {
295 ctrl->fm_ce_se |= MLX5_WQE_CTRL_CQ_UPDATE;
296 /* ensure wqe is visible to device before updating doorbell record */
297 dma_wmb();
298
299 *wq->db = cpu_to_be32(pc);
300
301 /* ensure doorbell record is visible to device before ringing the
302 * doorbell
303 */
304 wmb();
305
306 mlx5_write64((__be32 *)ctrl, uar_map);
307 }
308
mlx5e_cq_arm(struct mlx5e_cq * cq)309 static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
310 {
311 mlx5_cq_arm(&cq->mcq, MLX5_CQ_DB_REQ_NOT, cq->uar->map, cq->wq.cc);
312 }
313
314 static inline struct mlx5e_sq_dma *
mlx5e_dma_get(struct mlx5e_txqsq * sq,u32 i)315 mlx5e_dma_get(struct mlx5e_txqsq *sq, u32 i)
316 {
317 return &sq->db.dma_fifo[i & sq->dma_fifo_mask];
318 }
319
320 static inline void
mlx5e_dma_push_single(struct mlx5e_txqsq * sq,dma_addr_t addr,u32 size)321 mlx5e_dma_push_single(struct mlx5e_txqsq *sq, dma_addr_t addr, u32 size)
322 {
323 struct mlx5e_sq_dma *dma = mlx5e_dma_get(sq, sq->dma_fifo_pc++);
324
325 dma->addr = addr;
326 dma->size = size;
327 dma->type = MLX5E_DMA_MAP_SINGLE;
328 }
329
330 static inline void
mlx5e_dma_push_netmem(struct mlx5e_txqsq * sq,netmem_ref netmem,dma_addr_t addr,u32 size)331 mlx5e_dma_push_netmem(struct mlx5e_txqsq *sq, netmem_ref netmem,
332 dma_addr_t addr, u32 size)
333 {
334 struct mlx5e_sq_dma *dma = mlx5e_dma_get(sq, sq->dma_fifo_pc++);
335
336 netmem_dma_unmap_addr_set(netmem, dma, addr, addr);
337 dma->size = size;
338 dma->type = MLX5E_DMA_MAP_PAGE;
339 }
340
341 static inline
mlx5e_skb_fifo_get(struct mlx5e_skb_fifo * fifo,u16 i)342 struct sk_buff **mlx5e_skb_fifo_get(struct mlx5e_skb_fifo *fifo, u16 i)
343 {
344 return &fifo->fifo[i & fifo->mask];
345 }
346
347 static inline
mlx5e_skb_fifo_push(struct mlx5e_skb_fifo * fifo,struct sk_buff * skb)348 void mlx5e_skb_fifo_push(struct mlx5e_skb_fifo *fifo, struct sk_buff *skb)
349 {
350 struct sk_buff **skb_item = mlx5e_skb_fifo_get(fifo, (*fifo->pc)++);
351
352 *skb_item = skb;
353 }
354
355 static inline
mlx5e_skb_fifo_pop(struct mlx5e_skb_fifo * fifo)356 struct sk_buff *mlx5e_skb_fifo_pop(struct mlx5e_skb_fifo *fifo)
357 {
358 WARN_ON_ONCE(*fifo->pc == *fifo->cc);
359
360 return *mlx5e_skb_fifo_get(fifo, (*fifo->cc)++);
361 }
362
363 static inline void
mlx5e_tx_dma_unmap(struct device * pdev,struct mlx5e_sq_dma * dma)364 mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma)
365 {
366 switch (dma->type) {
367 case MLX5E_DMA_MAP_SINGLE:
368 dma_unmap_single(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
369 break;
370 case MLX5E_DMA_MAP_PAGE:
371 netmem_dma_unmap_page_attrs(pdev, dma->addr, dma->size,
372 DMA_TO_DEVICE, 0);
373 break;
374 default:
375 WARN_ONCE(true, "mlx5e_tx_dma_unmap unknown DMA type!\n");
376 }
377 }
378
379 void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq);
380
mlx5e_tx_mpwqe_is_full(struct mlx5e_tx_mpwqe * session)381 static inline bool mlx5e_tx_mpwqe_is_full(struct mlx5e_tx_mpwqe *session)
382 {
383 return session->ds_count == session->ds_count_max;
384 }
385
mlx5e_rqwq_reset(struct mlx5e_rq * rq)386 static inline void mlx5e_rqwq_reset(struct mlx5e_rq *rq)
387 {
388 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
389 mlx5_wq_ll_reset(&rq->mpwqe.wq);
390 rq->mpwqe.actual_wq_head = 0;
391 } else {
392 mlx5_wq_cyc_reset(&rq->wqe.wq);
393 }
394 }
395
mlx5e_dump_error_cqe(struct mlx5e_cq * cq,u32 qn,struct mlx5_err_cqe * err_cqe)396 static inline void mlx5e_dump_error_cqe(struct mlx5e_cq *cq, u32 qn,
397 struct mlx5_err_cqe *err_cqe)
398 {
399 struct mlx5_cqwq *wq = &cq->wq;
400 u32 ci;
401
402 ci = mlx5_cqwq_ctr2ix(wq, wq->cc - 1);
403
404 netdev_err(cq->netdev,
405 "Error cqe on cqn 0x%x, ci 0x%x, qn 0x%x, opcode 0x%x, syndrome 0x%x, vendor syndrome 0x%x\n",
406 cq->mcq.cqn, ci, qn,
407 get_cqe_opcode((struct mlx5_cqe64 *)err_cqe),
408 err_cqe->syndrome, err_cqe->vendor_err_synd);
409 mlx5_dump_err_cqe(cq->mdev, err_cqe);
410 }
411
mlx5e_rqwq_get_size(struct mlx5e_rq * rq)412 static inline u32 mlx5e_rqwq_get_size(struct mlx5e_rq *rq)
413 {
414 switch (rq->wq_type) {
415 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
416 return mlx5_wq_ll_get_size(&rq->mpwqe.wq);
417 default:
418 return mlx5_wq_cyc_get_size(&rq->wqe.wq);
419 }
420 }
421
mlx5e_rqwq_get_cur_sz(struct mlx5e_rq * rq)422 static inline u32 mlx5e_rqwq_get_cur_sz(struct mlx5e_rq *rq)
423 {
424 switch (rq->wq_type) {
425 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
426 return rq->mpwqe.wq.cur_sz;
427 default:
428 return rq->wqe.wq.cur_sz;
429 }
430 }
431
mlx5e_rqwq_get_head(struct mlx5e_rq * rq)432 static inline u16 mlx5e_rqwq_get_head(struct mlx5e_rq *rq)
433 {
434 switch (rq->wq_type) {
435 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
436 return mlx5_wq_ll_get_head(&rq->mpwqe.wq);
437 default:
438 return mlx5_wq_cyc_get_head(&rq->wqe.wq);
439 }
440 }
441
mlx5e_rqwq_get_wqe_counter(struct mlx5e_rq * rq)442 static inline u16 mlx5e_rqwq_get_wqe_counter(struct mlx5e_rq *rq)
443 {
444 switch (rq->wq_type) {
445 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
446 return mlx5_wq_ll_get_counter(&rq->mpwqe.wq);
447 default:
448 return mlx5_wq_cyc_get_counter(&rq->wqe.wq);
449 }
450 }
451
452 /* SW parser related functions */
453
454 struct mlx5e_swp_spec {
455 __be16 l3_proto;
456 u8 l4_proto;
457 u8 is_tun;
458 __be16 tun_l3_proto;
459 u8 tun_l4_proto;
460 };
461
mlx5e_eseg_swp_offsets_add_vlan(struct mlx5_wqe_eth_seg * eseg)462 static inline void mlx5e_eseg_swp_offsets_add_vlan(struct mlx5_wqe_eth_seg *eseg)
463 {
464 /* SWP offsets are in 2-bytes words */
465 eseg->swp_outer_l3_offset += VLAN_HLEN / 2;
466 eseg->swp_outer_l4_offset += VLAN_HLEN / 2;
467 eseg->swp_inner_l3_offset += VLAN_HLEN / 2;
468 eseg->swp_inner_l4_offset += VLAN_HLEN / 2;
469 }
470
471 static inline void
mlx5e_set_eseg_swp(struct sk_buff * skb,struct mlx5_wqe_eth_seg * eseg,struct mlx5e_swp_spec * swp_spec)472 mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg,
473 struct mlx5e_swp_spec *swp_spec)
474 {
475 /* SWP offsets are in 2-bytes words */
476 eseg->swp_outer_l3_offset = skb_network_offset(skb) / 2;
477 if (swp_spec->l3_proto == htons(ETH_P_IPV6))
478 eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6;
479 if (swp_spec->l4_proto) {
480 eseg->swp_outer_l4_offset = skb_transport_offset(skb) / 2;
481 if (swp_spec->l4_proto == IPPROTO_UDP)
482 eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L4_UDP;
483 }
484
485 if (swp_spec->is_tun) {
486 eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
487 if (swp_spec->tun_l3_proto == htons(ETH_P_IPV6))
488 eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
489 } else { /* typically for ipsec when xfrm mode != XFRM_MODE_TUNNEL */
490 eseg->swp_inner_l3_offset = skb_network_offset(skb) / 2;
491 if (swp_spec->l3_proto == htons(ETH_P_IPV6))
492 eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
493 }
494 switch (swp_spec->tun_l4_proto) {
495 case IPPROTO_UDP:
496 eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
497 fallthrough;
498 case IPPROTO_TCP:
499 eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
500 break;
501 }
502 }
503
504 static inline void
mlx5e_swp_encap_csum_partial(struct mlx5_core_dev * mdev,struct sk_buff * skb,bool tunnel)505 mlx5e_swp_encap_csum_partial(struct mlx5_core_dev *mdev, struct sk_buff *skb, bool tunnel)
506 {
507 const struct iphdr *ip = tunnel ? inner_ip_hdr(skb) : ip_hdr(skb);
508 const struct ipv6hdr *ip6;
509 struct tcphdr *th;
510 struct udphdr *uh;
511 int len;
512
513 if (!MLX5_CAP_ETH(mdev, swp_csum_l4_partial) || !skb_is_gso(skb))
514 return;
515
516 if (skb_is_gso_tcp(skb)) {
517 th = inner_tcp_hdr(skb);
518 len = skb_shinfo(skb)->gso_size + inner_tcp_hdrlen(skb);
519
520 if (ip->version == 4) {
521 th->check = ~tcp_v4_check(len, ip->saddr, ip->daddr, 0);
522 } else {
523 ip6 = tunnel ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
524 th->check = ~tcp_v6_check(len, &ip6->saddr, &ip6->daddr, 0);
525 }
526 } else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
527 uh = (struct udphdr *)skb_inner_transport_header(skb);
528 len = skb_shinfo(skb)->gso_size + sizeof(struct udphdr);
529
530 if (ip->version == 4) {
531 uh->check = ~udp_v4_check(len, ip->saddr, ip->daddr, 0);
532 } else {
533 ip6 = tunnel ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
534 uh->check = ~udp_v6_check(len, &ip6->saddr, &ip6->daddr, 0);
535 }
536 }
537 }
538
539 #define MLX5E_STOP_ROOM(wqebbs) ((wqebbs) * 2 - 1)
540
mlx5e_stop_room_for_wqe(struct mlx5_core_dev * mdev,u16 wqe_size)541 static inline u16 mlx5e_stop_room_for_wqe(struct mlx5_core_dev *mdev, u16 wqe_size)
542 {
543 WARN_ON_ONCE(PAGE_SIZE / MLX5_SEND_WQE_BB < (u16)mlx5e_get_max_sq_wqebbs(mdev));
544
545 /* A WQE must not cross the page boundary, hence two conditions:
546 * 1. Its size must not exceed the page size.
547 * 2. If the WQE size is X, and the space remaining in a page is less
548 * than X, this space needs to be padded with NOPs. So, one WQE of
549 * size X may require up to X-1 WQEBBs of padding, which makes the
550 * stop room of X-1 + X.
551 * WQE size is also limited by the hardware limit.
552 */
553 WARN_ONCE(wqe_size > mlx5e_get_max_sq_wqebbs(mdev),
554 "wqe_size %u is greater than max SQ WQEBBs %u",
555 wqe_size, mlx5e_get_max_sq_wqebbs(mdev));
556
557 return MLX5E_STOP_ROOM(wqe_size);
558 }
559
mlx5e_stop_room_for_max_wqe(struct mlx5_core_dev * mdev)560 static inline u16 mlx5e_stop_room_for_max_wqe(struct mlx5_core_dev *mdev)
561 {
562 return MLX5E_STOP_ROOM(mlx5e_get_max_sq_wqebbs(mdev));
563 }
564
mlx5e_stop_room_for_mpwqe(struct mlx5_core_dev * mdev)565 static inline u16 mlx5e_stop_room_for_mpwqe(struct mlx5_core_dev *mdev)
566 {
567 u8 mpwqe_wqebbs = mlx5e_get_max_sq_aligned_wqebbs(mdev);
568
569 return mlx5e_stop_room_for_wqe(mdev, mpwqe_wqebbs);
570 }
571
mlx5e_icosq_can_post_wqe(struct mlx5e_icosq * sq,u16 wqe_size)572 static inline bool mlx5e_icosq_can_post_wqe(struct mlx5e_icosq *sq, u16 wqe_size)
573 {
574 u16 room = sq->reserved_room + MLX5E_STOP_ROOM(wqe_size);
575
576 return mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, room);
577 }
578
mlx5e_get_mpw_info(struct mlx5e_rq * rq,int i)579 static inline struct mlx5e_mpw_info *mlx5e_get_mpw_info(struct mlx5e_rq *rq, int i)
580 {
581 size_t isz = struct_size(rq->mpwqe.info, alloc_units.frag_pages, rq->mpwqe.pages_per_wqe);
582
583 return (struct mlx5e_mpw_info *)((char *)rq->mpwqe.info + array_size(i, isz));
584 }
585 #endif
586