1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /* Copyright (c) 2015 - 2021 Intel Corporation */
3 #ifndef IRDMA_TYPE_H
4 #define IRDMA_TYPE_H
5 #include "osdep.h"
6 #include "irdma.h"
7 #include "user.h"
8 #include "hmc.h"
9 #include "uda.h"
10 #include "ws.h"
11 #include "virtchnl.h"
12
13 #define IRDMA_DEBUG_ERR "ERR"
14 #define IRDMA_DEBUG_INIT "INIT"
15 #define IRDMA_DEBUG_DEV "DEV"
16 #define IRDMA_DEBUG_CM "CM"
17 #define IRDMA_DEBUG_VERBS "VERBS"
18 #define IRDMA_DEBUG_PUDA "PUDA"
19 #define IRDMA_DEBUG_ILQ "ILQ"
20 #define IRDMA_DEBUG_IEQ "IEQ"
21 #define IRDMA_DEBUG_QP "QP"
22 #define IRDMA_DEBUG_CQ "CQ"
23 #define IRDMA_DEBUG_MR "MR"
24 #define IRDMA_DEBUG_PBLE "PBLE"
25 #define IRDMA_DEBUG_WQE "WQE"
26 #define IRDMA_DEBUG_AEQ "AEQ"
27 #define IRDMA_DEBUG_CQP "CQP"
28 #define IRDMA_DEBUG_HMC "HMC"
29 #define IRDMA_DEBUG_USER "USER"
30 #define IRDMA_DEBUG_VIRT "VIRT"
31 #define IRDMA_DEBUG_DCB "DCB"
32 #define IRDMA_DEBUG_CQE "CQE"
33 #define IRDMA_DEBUG_CLNT "CLNT"
34 #define IRDMA_DEBUG_WS "WS"
35 #define IRDMA_DEBUG_STATS "STATS"
36
37 enum irdma_page_size {
38 IRDMA_PAGE_SIZE_4K = 0,
39 IRDMA_PAGE_SIZE_2M,
40 IRDMA_PAGE_SIZE_1G,
41 };
42
43 enum irdma_hdrct_flags {
44 DDP_LEN_FLAG = 0x80,
45 DDP_HDR_FLAG = 0x40,
46 RDMA_HDR_FLAG = 0x20,
47 };
48
49 enum irdma_term_layers {
50 LAYER_RDMA = 0,
51 LAYER_DDP = 1,
52 LAYER_MPA = 2,
53 };
54
55 enum irdma_term_error_types {
56 RDMAP_REMOTE_PROT = 1,
57 RDMAP_REMOTE_OP = 2,
58 DDP_CATASTROPHIC = 0,
59 DDP_TAGGED_BUF = 1,
60 DDP_UNTAGGED_BUF = 2,
61 DDP_LLP = 3,
62 };
63
64 enum irdma_term_rdma_errors {
65 RDMAP_INV_STAG = 0x00,
66 RDMAP_INV_BOUNDS = 0x01,
67 RDMAP_ACCESS = 0x02,
68 RDMAP_UNASSOC_STAG = 0x03,
69 RDMAP_TO_WRAP = 0x04,
70 RDMAP_INV_RDMAP_VER = 0x05,
71 RDMAP_UNEXPECTED_OP = 0x06,
72 RDMAP_CATASTROPHIC_LOCAL = 0x07,
73 RDMAP_CATASTROPHIC_GLOBAL = 0x08,
74 RDMAP_CANT_INV_STAG = 0x09,
75 RDMAP_UNSPECIFIED = 0xff,
76 };
77
78 enum irdma_term_ddp_errors {
79 DDP_CATASTROPHIC_LOCAL = 0x00,
80 DDP_TAGGED_INV_STAG = 0x00,
81 DDP_TAGGED_BOUNDS = 0x01,
82 DDP_TAGGED_UNASSOC_STAG = 0x02,
83 DDP_TAGGED_TO_WRAP = 0x03,
84 DDP_TAGGED_INV_DDP_VER = 0x04,
85 DDP_UNTAGGED_INV_QN = 0x01,
86 DDP_UNTAGGED_INV_MSN_NO_BUF = 0x02,
87 DDP_UNTAGGED_INV_MSN_RANGE = 0x03,
88 DDP_UNTAGGED_INV_MO = 0x04,
89 DDP_UNTAGGED_INV_TOO_LONG = 0x05,
90 DDP_UNTAGGED_INV_DDP_VER = 0x06,
91 };
92
93 enum irdma_term_mpa_errors {
94 MPA_CLOSED = 0x01,
95 MPA_CRC = 0x02,
96 MPA_MARKER = 0x03,
97 MPA_REQ_RSP = 0x04,
98 };
99
100 enum irdma_hw_stats_index {
101 /* gen1 - 32-bit */
102 IRDMA_HW_STAT_INDEX_IP4RXDISCARD = 0,
103 IRDMA_HW_STAT_INDEX_IP4RXTRUNC = 1,
104 IRDMA_HW_STAT_INDEX_IP4TXNOROUTE = 2,
105 IRDMA_HW_STAT_INDEX_IP6RXDISCARD = 3,
106 IRDMA_HW_STAT_INDEX_IP6RXTRUNC = 4,
107 IRDMA_HW_STAT_INDEX_IP6TXNOROUTE = 5,
108 IRDMA_HW_STAT_INDEX_TCPRTXSEG = 6,
109 IRDMA_HW_STAT_INDEX_TCPRXOPTERR = 7,
110 IRDMA_HW_STAT_INDEX_TCPRXPROTOERR = 8,
111 IRDMA_HW_STAT_INDEX_RXVLANERR = 9,
112 /* gen1 - 64-bit */
113 IRDMA_HW_STAT_INDEX_IP4RXOCTS = 10,
114 IRDMA_HW_STAT_INDEX_IP4RXPKTS = 11,
115 IRDMA_HW_STAT_INDEX_IP4RXFRAGS = 12,
116 IRDMA_HW_STAT_INDEX_IP4RXMCPKTS = 13,
117 IRDMA_HW_STAT_INDEX_IP4TXOCTS = 14,
118 IRDMA_HW_STAT_INDEX_IP4TXPKTS = 15,
119 IRDMA_HW_STAT_INDEX_IP4TXFRAGS = 16,
120 IRDMA_HW_STAT_INDEX_IP4TXMCPKTS = 17,
121 IRDMA_HW_STAT_INDEX_IP6RXOCTS = 18,
122 IRDMA_HW_STAT_INDEX_IP6RXPKTS = 19,
123 IRDMA_HW_STAT_INDEX_IP6RXFRAGS = 20,
124 IRDMA_HW_STAT_INDEX_IP6RXMCPKTS = 21,
125 IRDMA_HW_STAT_INDEX_IP6TXOCTS = 22,
126 IRDMA_HW_STAT_INDEX_IP6TXPKTS = 23,
127 IRDMA_HW_STAT_INDEX_IP6TXFRAGS = 24,
128 IRDMA_HW_STAT_INDEX_IP6TXMCPKTS = 25,
129 IRDMA_HW_STAT_INDEX_TCPRXSEGS = 26,
130 IRDMA_HW_STAT_INDEX_TCPTXSEG = 27,
131 IRDMA_HW_STAT_INDEX_RDMARXRDS = 28,
132 IRDMA_HW_STAT_INDEX_RDMARXSNDS = 29,
133 IRDMA_HW_STAT_INDEX_RDMARXWRS = 30,
134 IRDMA_HW_STAT_INDEX_RDMATXRDS = 31,
135 IRDMA_HW_STAT_INDEX_RDMATXSNDS = 32,
136 IRDMA_HW_STAT_INDEX_RDMATXWRS = 33,
137 IRDMA_HW_STAT_INDEX_RDMAVBND = 34,
138 IRDMA_HW_STAT_INDEX_RDMAVINV = 35,
139 IRDMA_HW_STAT_INDEX_IP4RXMCOCTS = 36,
140 IRDMA_HW_STAT_INDEX_IP4TXMCOCTS = 37,
141 IRDMA_HW_STAT_INDEX_IP6RXMCOCTS = 38,
142 IRDMA_HW_STAT_INDEX_IP6TXMCOCTS = 39,
143 IRDMA_HW_STAT_INDEX_UDPRXPKTS = 40,
144 IRDMA_HW_STAT_INDEX_UDPTXPKTS = 41,
145 IRDMA_HW_STAT_INDEX_MAX_GEN_1 = 42, /* Must be same value as next entry */
146 /* gen2 - 64-bit */
147 IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS = 42,
148 /* gen2 - 32-bit */
149 IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED = 43,
150 IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED = 44,
151 IRDMA_HW_STAT_INDEX_TXNPCNPSENT = 45,
152 IRDMA_HW_STAT_INDEX_MAX_GEN_2 = 46,
153
154 /* gen3 */
155 IRDMA_HW_STAT_INDEX_RNR_SENT = 46,
156 IRDMA_HW_STAT_INDEX_RNR_RCVD = 47,
157 IRDMA_HW_STAT_INDEX_RDMAORDLMTCNT = 48,
158 IRDMA_HW_STAT_INDEX_RDMAIRDLMTCNT = 49,
159 IRDMA_HW_STAT_INDEX_RDMARXATS = 50,
160 IRDMA_HW_STAT_INDEX_RDMATXATS = 51,
161 IRDMA_HW_STAT_INDEX_NAKSEQERR = 52,
162 IRDMA_HW_STAT_INDEX_NAKSEQERR_IMPLIED = 53,
163 IRDMA_HW_STAT_INDEX_RTO = 54,
164 IRDMA_HW_STAT_INDEX_RXOOOPKTS = 55,
165 IRDMA_HW_STAT_INDEX_ICRCERR = 56,
166
167 IRDMA_HW_STAT_INDEX_MAX_GEN_3 = 57,
168 };
169
170 enum irdma_feature_type {
171 IRDMA_FEATURE_FW_INFO = 0,
172 IRDMA_HW_VERSION_INFO = 1,
173 IRDMA_QP_MAX_INCR = 2,
174 IRDMA_CQ_MAX_INCR = 3,
175 IRDMA_CEQ_MAX_INCR = 4,
176 IRDMA_SD_MAX_INCR = 5,
177 IRDMA_MR_MAX_INCR = 6,
178 IRDMA_Q1_MAX_INCR = 7,
179 IRDMA_AH_MAX_INCR = 8,
180 IRDMA_SRQ_MAX_INCR = 9,
181 IRDMA_TIMER_MAX_INCR = 10,
182 IRDMA_XF_MAX_INCR = 11,
183 IRDMA_RRF_MAX_INCR = 12,
184 IRDMA_PBLE_MAX_INCR = 13,
185 IRDMA_OBJ_1 = 22,
186 IRDMA_OBJ_2 = 23,
187 IRDMA_ENDPT_TRK = 24,
188 IRDMA_FTN_INLINE_MAX = 25,
189 IRDMA_QSETS_MAX = 26,
190 IRDMA_ASO = 27,
191 IRDMA_FTN_FLAGS = 32,
192 IRDMA_FTN_NOP = 33,
193 IRDMA_MAX_FEATURES, /* Must be last entry */
194 };
195
196 enum irdma_sched_prio_type {
197 IRDMA_PRIO_WEIGHTED_RR = 1,
198 IRDMA_PRIO_STRICT = 2,
199 IRDMA_PRIO_WEIGHTED_STRICT = 3,
200 };
201
202 enum irdma_vm_vf_type {
203 IRDMA_VF_TYPE = 0,
204 IRDMA_VM_TYPE,
205 IRDMA_PF_TYPE,
206 };
207
208 enum irdma_cqp_hmc_profile {
209 IRDMA_HMC_PROFILE_DEFAULT = 1,
210 IRDMA_HMC_PROFILE_FAVOR_VF = 2,
211 IRDMA_HMC_PROFILE_EQUAL = 3,
212 };
213
214 enum irdma_quad_entry_type {
215 IRDMA_QHASH_TYPE_TCP_ESTABLISHED = 1,
216 IRDMA_QHASH_TYPE_TCP_SYN,
217 IRDMA_QHASH_TYPE_UDP_UNICAST,
218 IRDMA_QHASH_TYPE_UDP_MCAST,
219 IRDMA_QHASH_TYPE_ROCE_MCAST,
220 IRDMA_QHASH_TYPE_ROCEV2_HW,
221 };
222
223 enum irdma_quad_hash_manage_type {
224 IRDMA_QHASH_MANAGE_TYPE_DELETE = 0,
225 IRDMA_QHASH_MANAGE_TYPE_ADD,
226 IRDMA_QHASH_MANAGE_TYPE_MODIFY,
227 };
228
229 enum irdma_syn_rst_handling {
230 IRDMA_SYN_RST_HANDLING_HW_TCP_SECURE = 0,
231 IRDMA_SYN_RST_HANDLING_HW_TCP,
232 IRDMA_SYN_RST_HANDLING_FW_TCP_SECURE,
233 IRDMA_SYN_RST_HANDLING_FW_TCP,
234 };
235
236 enum irdma_queue_type {
237 IRDMA_QUEUE_TYPE_SQ_RQ = 0,
238 IRDMA_QUEUE_TYPE_CQP,
239 IRDMA_QUEUE_TYPE_SRQ,
240 };
241
242 enum irdma_rsvd_cq_id {
243 IRDMA_RSVD_CQ_ID_CQP,
244 IRDMA_RSVD_CQ_ID_ILQ,
245 IRDMA_RSVD_CQ_ID_IEQ,
246 };
247
248 enum irdma_rsvd_qp_id {
249 IRDMA_RSVD_QP_ID_0,
250 IRDMA_RSVD_QP_ID_GSI_ILQ,
251 IRDMA_RSVD_QP_ID_IEQ,
252 };
253
254 struct irdma_sc_dev;
255 struct irdma_vsi_pestat;
256
257 struct irdma_dcqcn_cc_params {
258 u8 cc_cfg_valid;
259 u8 min_dec_factor;
260 u8 min_rate;
261 u8 dcqcn_f;
262 u16 rai_factor;
263 u16 hai_factor;
264 u16 dcqcn_t;
265 u32 dcqcn_b;
266 u32 rreduce_mperiod;
267 };
268
269 struct irdma_cqp_init_info {
270 u64 cqp_compl_ctx;
271 u64 host_ctx_pa;
272 u64 sq_pa;
273 struct irdma_sc_dev *dev;
274 struct irdma_cqp_quanta *sq;
275 struct irdma_dcqcn_cc_params dcqcn_params;
276 __le64 *host_ctx;
277 u64 *scratch_array;
278 u32 sq_size;
279 struct irdma_ooo_cqp_op *ooo_op_array;
280 u32 pe_en_vf_cnt;
281 u16 hw_maj_ver;
282 u16 hw_min_ver;
283 u8 struct_ver;
284 u8 hmc_profile;
285 u8 ena_vf_count;
286 u8 ceqs_per_vf;
287 u8 ooisc_blksize;
288 u8 rrsp_blksize;
289 u8 q1_blksize;
290 u8 xmit_blksize;
291 u8 ts_override;
292 u8 ts_shift;
293 u8 en_fine_grained_timers;
294 u8 blksizes_valid;
295 bool en_datacenter_tcp:1;
296 bool disable_packed:1;
297 bool rocev2_rto_policy:1;
298 enum irdma_protocol_used protocol_used;
299 };
300
301 struct irdma_terminate_hdr {
302 u8 layer_etype;
303 u8 error_code;
304 u8 hdrct;
305 u8 rsvd;
306 };
307
308 struct irdma_cqp_sq_wqe {
309 __le64 buf[IRDMA_CQP_WQE_SIZE];
310 };
311
312 struct irdma_sc_aeqe {
313 __le64 buf[IRDMA_AEQE_SIZE];
314 };
315
316 struct irdma_ceqe {
317 __le64 buf[IRDMA_CEQE_SIZE];
318 };
319
320 struct irdma_cqp_ctx {
321 __le64 buf[IRDMA_CQP_CTX_SIZE];
322 };
323
324 struct irdma_cq_shadow_area {
325 __le64 buf[IRDMA_SHADOW_AREA_SIZE];
326 };
327
328 struct irdma_dev_hw_stats_offsets {
329 u32 stats_offset[IRDMA_HW_STAT_INDEX_MAX_GEN_1];
330 };
331
332 struct irdma_dev_hw_stats {
333 u64 stats_val[IRDMA_GATHER_STATS_BUF_SIZE / sizeof(u64)];
334 };
335
336 struct irdma_gather_stats {
337 u64 val[IRDMA_GATHER_STATS_BUF_SIZE / sizeof(u64)];
338 };
339
340 struct irdma_hw_stat_map {
341 u16 byteoff;
342 u8 bitoff;
343 u64 bitmask;
344 };
345
346 struct irdma_stats_gather_info {
347 bool use_hmc_fcn_index:1;
348 bool use_stats_inst:1;
349 u8 hmc_fcn_index;
350 u8 stats_inst_index;
351 struct irdma_dma_mem stats_buff_mem;
352 void *gather_stats_va;
353 void *last_gather_stats_va;
354 };
355
356 struct irdma_vsi_pestat {
357 struct irdma_hw *hw;
358 struct irdma_dev_hw_stats hw_stats;
359 struct irdma_stats_gather_info gather_info;
360 struct timer_list stats_timer;
361 struct irdma_sc_vsi *vsi;
362 struct irdma_dev_hw_stats last_hw_stats;
363 spinlock_t lock; /* rdma stats lock */
364 };
365
366 struct irdma_mmio_region {
367 u8 __iomem *addr;
368 resource_size_t len;
369 resource_size_t offset;
370 };
371
372 struct irdma_hw {
373 union {
374 u8 __iomem *hw_addr;
375 struct {
376 struct irdma_mmio_region rdma_reg; /* RDMA region */
377 struct irdma_mmio_region *io_regs; /* Non-RDMA MMIO regions */
378 u16 num_io_regions; /* Number of Non-RDMA MMIO regions */
379 };
380 };
381 struct device *device;
382 struct irdma_hmc_info hmc;
383 };
384
385 struct irdma_pfpdu {
386 struct list_head rxlist;
387 u32 rcv_nxt;
388 u32 fps;
389 u32 max_fpdu_data;
390 u32 nextseqnum;
391 u32 rcv_start_seq;
392 bool mode:1;
393 bool mpa_crc_err:1;
394 u8 marker_len;
395 u64 total_ieq_bufs;
396 u64 fpdu_processed;
397 u64 bad_seq_num;
398 u64 crc_err;
399 u64 no_tx_bufs;
400 u64 tx_err;
401 u64 out_of_order;
402 u64 pmode_count;
403 struct irdma_sc_ah *ah;
404 struct irdma_puda_buf *ah_buf;
405 spinlock_t lock; /* fpdu processing lock */
406 struct irdma_puda_buf *lastrcv_buf;
407 };
408
409 struct irdma_sc_pd {
410 struct irdma_sc_dev *dev;
411 u32 pd_id;
412 int abi_ver;
413 };
414
415 struct irdma_cqp_quanta {
416 __le64 elem[IRDMA_CQP_WQE_SIZE];
417 };
418
419 struct irdma_ooo_cqp_op {
420 struct list_head list_entry;
421 u64 scratch;
422 u32 def_info;
423 u32 sw_def_info;
424 u32 wqe_idx;
425 bool deferred:1;
426 };
427
428 struct irdma_sc_cqp {
429 spinlock_t ooo_list_lock; /* protects list of pending completions */
430 struct list_head ooo_avail;
431 struct list_head ooo_pnd;
432 u32 last_def_cmpl_ticket;
433 u32 sw_def_cmpl_ticket;
434 u32 size;
435 u64 sq_pa;
436 u64 host_ctx_pa;
437 void *back_cqp;
438 struct irdma_sc_dev *dev;
439 int (*process_cqp_sds)(struct irdma_sc_dev *dev,
440 struct irdma_update_sds_info *info);
441 struct irdma_dma_mem sdbuf;
442 struct irdma_ring sq_ring;
443 struct irdma_cqp_quanta *sq_base;
444 struct irdma_dcqcn_cc_params dcqcn_params;
445 __le64 *host_ctx;
446 u64 *scratch_array;
447 u64 requested_ops;
448 atomic64_t completed_ops;
449 struct irdma_ooo_cqp_op *ooo_op_array;
450 u32 cqp_id;
451 u32 sq_size;
452 u32 pe_en_vf_cnt;
453 u32 hw_sq_size;
454 u16 hw_maj_ver;
455 u16 hw_min_ver;
456 u8 struct_ver;
457 u8 polarity;
458 u8 hmc_profile;
459 u8 ena_vf_count;
460 u8 timeout_count;
461 u8 ceqs_per_vf;
462 u8 ooisc_blksize;
463 u8 rrsp_blksize;
464 u8 q1_blksize;
465 u8 xmit_blksize;
466 u8 ts_override;
467 u8 ts_shift;
468 u8 en_fine_grained_timers;
469 u8 blksizes_valid;
470 bool en_datacenter_tcp:1;
471 bool disable_packed:1;
472 bool rocev2_rto_policy:1;
473 enum irdma_protocol_used protocol_used;
474 };
475
476 struct irdma_sc_aeq {
477 u32 size;
478 u64 aeq_elem_pa;
479 struct irdma_sc_dev *dev;
480 struct irdma_sc_aeqe *aeqe_base;
481 void *pbl_list;
482 u32 elem_cnt;
483 struct irdma_ring aeq_ring;
484 u8 pbl_chunk_size;
485 u32 first_pm_pbl_idx;
486 u32 msix_idx;
487 u8 polarity;
488 bool virtual_map:1;
489 bool pasid_valid:1;
490 u32 pasid;
491 };
492
493 struct irdma_sc_ceq {
494 u32 size;
495 u64 ceq_elem_pa;
496 struct irdma_sc_dev *dev;
497 struct irdma_ceqe *ceqe_base;
498 void *pbl_list;
499 u32 ceq_id;
500 u32 elem_cnt;
501 struct irdma_ring ceq_ring;
502 u8 pbl_chunk_size;
503 u8 tph_val;
504 u32 first_pm_pbl_idx;
505 u8 polarity;
506 u16 vsi_idx;
507 bool virtual_map:1;
508 bool tph_en:1;
509 bool itr_no_expire:1;
510 bool pasid_valid:1;
511 u32 pasid;
512 };
513
514 struct irdma_sc_cq {
515 struct irdma_cq_uk cq_uk;
516 u64 cq_pa;
517 u64 shadow_area_pa;
518 struct irdma_sc_dev *dev;
519 u16 vsi_idx;
520 struct irdma_sc_vsi *vsi;
521 void *pbl_list;
522 void *back_cq;
523 u32 ceq_id;
524 u32 shadow_read_threshold;
525 u8 pbl_chunk_size;
526 u8 cq_type;
527 u8 tph_val;
528 u32 first_pm_pbl_idx;
529 bool ceqe_mask:1;
530 bool virtual_map:1;
531 bool check_overflow:1;
532 bool ceq_id_valid:1;
533 bool tph_en;
534 };
535
536 struct irdma_sc_qp {
537 struct irdma_qp_uk qp_uk;
538 u64 sq_pa;
539 u64 rq_pa;
540 u64 hw_host_ctx_pa;
541 u64 shadow_area_pa;
542 u64 q2_pa;
543 struct irdma_sc_dev *dev;
544 struct irdma_sc_vsi *vsi;
545 struct irdma_sc_pd *pd;
546 __le64 *hw_host_ctx;
547 void *llp_stream_handle;
548 struct irdma_pfpdu pfpdu;
549 u32 ieq_qp;
550 u8 *q2_buf;
551 u64 qp_compl_ctx;
552 u32 push_idx;
553 u16 qs_handle;
554 u16 push_offset;
555 u8 flush_wqes_count;
556 u8 sq_tph_val;
557 u8 rq_tph_val;
558 u8 qp_state;
559 u8 hw_sq_size;
560 u8 hw_rq_size;
561 u8 src_mac_addr_idx;
562 bool on_qoslist:1;
563 bool ieq_pass_thru:1;
564 bool sq_tph_en:1;
565 bool rq_tph_en:1;
566 bool rcv_tph_en:1;
567 bool xmit_tph_en:1;
568 bool virtual_map:1;
569 bool flush_sq:1;
570 bool flush_rq:1;
571 bool err_sq_idx_valid:1;
572 bool err_rq_idx_valid:1;
573 u32 err_sq_idx;
574 u32 err_rq_idx;
575 bool sq_flush_code:1;
576 bool rq_flush_code:1;
577 u32 pkt_limit;
578 enum irdma_flush_opcode flush_code;
579 enum irdma_qp_event_type event_type;
580 u8 term_flags;
581 u8 user_pri;
582 struct list_head list;
583 };
584
585 struct irdma_stats_inst_info {
586 bool use_hmc_fcn_index;
587 u8 hmc_fn_id;
588 u16 stats_idx;
589 };
590
591 struct irdma_up_info {
592 u8 map[8];
593 u8 cnp_up_override;
594 u16 hmc_fcn_idx;
595 bool use_vlan:1;
596 bool use_cnp_up_override:1;
597 };
598
599 #define IRDMA_MAX_WS_NODES 0x3FF
600 #define IRDMA_WS_NODE_INVALID 0xFFFF
601
602 struct irdma_ws_node_info {
603 u16 id;
604 u16 vsi;
605 u16 parent_id;
606 u16 qs_handle;
607 bool type_leaf:1;
608 bool enable:1;
609 u8 prio_type;
610 u8 tc;
611 u8 weight;
612 };
613
614 struct irdma_hmc_fpm_misc {
615 u32 max_ceqs;
616 u32 max_sds;
617 u32 loc_mem_pages;
618 u8 ird;
619 u32 xf_block_size;
620 u32 q1_block_size;
621 u32 ht_multiplier;
622 u32 timer_bucket;
623 u32 rrf_block_size;
624 u32 ooiscf_block_size;
625 };
626
627 #define IRDMA_VCHNL_MAX_MSG_SIZE 512
628 #define IRDMA_LEAF_DEFAULT_REL_BW 64
629 #define IRDMA_PARENT_DEFAULT_REL_BW 1
630
631 struct irdma_qos {
632 struct list_head qplist;
633 struct mutex qos_mutex; /* protect QoS attributes per QoS level */
634 u64 lan_qos_handle;
635 u32 l2_sched_node_id;
636 u16 qs_handle;
637 u8 traffic_class;
638 u8 rel_bw;
639 u8 prio_type;
640 bool valid;
641 };
642
643 #define IRDMA_INVALID_STATS_IDX 0xff
644 struct irdma_sc_vsi {
645 u16 vsi_idx;
646 struct irdma_sc_dev *dev;
647 void *back_vsi;
648 u32 ilq_count;
649 struct irdma_virt_mem ilq_mem;
650 struct irdma_puda_rsrc *ilq;
651 u32 ieq_count;
652 struct irdma_virt_mem ieq_mem;
653 struct irdma_puda_rsrc *ieq;
654 u32 exception_lan_q;
655 u16 mtu;
656 u16 vm_id;
657 enum irdma_vm_vf_type vm_vf_type;
658 bool stats_inst_alloc:1;
659 bool tc_change_pending:1;
660 struct irdma_vsi_pestat *pestat;
661 atomic_t qp_suspend_reqs;
662 int (*register_qset)(struct irdma_sc_vsi *vsi,
663 struct irdma_ws_node *tc_node);
664 void (*unregister_qset)(struct irdma_sc_vsi *vsi,
665 struct irdma_ws_node *tc_node);
666 u8 qos_rel_bw;
667 u8 qos_prio_type;
668 u8 stats_idx;
669 u8 dscp_map[DSCP_MAX];
670 struct irdma_qos qos[IRDMA_MAX_USER_PRIORITY];
671 u64 hw_stats_regs[IRDMA_HW_STAT_INDEX_MAX_GEN_1];
672 bool dscp_mode:1;
673 };
674
675 struct irdma_sc_dev {
676 struct list_head cqp_cmd_head; /* head of the CQP command list */
677 spinlock_t cqp_lock; /* protect CQP list access */
678 bool stats_idx_array[IRDMA_MAX_STATS_COUNT_GEN_1];
679 struct irdma_dma_mem vf_fpm_query_buf[IRDMA_MAX_PE_ENA_VF_COUNT];
680 u64 fpm_query_buf_pa;
681 u64 fpm_commit_buf_pa;
682 __le64 *fpm_query_buf;
683 __le64 *fpm_commit_buf;
684 struct irdma_hw *hw;
685 u8 __iomem *db_addr;
686 u32 __iomem *wqe_alloc_db;
687 u32 __iomem *cq_arm_db;
688 u32 __iomem *aeq_alloc_db;
689 u32 __iomem *cqp_db;
690 u32 __iomem *cq_ack_db;
691 u32 __iomem *ceq_itr_mask_db;
692 u32 __iomem *aeq_itr_mask_db;
693 u32 __iomem *hw_regs[IRDMA_MAX_REGS];
694 u32 ceq_itr; /* Interrupt throttle, usecs between interrupts: 0 disabled. 2 - 8160 */
695 u64 hw_masks[IRDMA_MAX_MASKS];
696 u64 hw_shifts[IRDMA_MAX_SHIFTS];
697 const struct irdma_hw_stat_map *hw_stats_map;
698 u64 hw_stats_regs[IRDMA_HW_STAT_INDEX_MAX_GEN_1];
699 u64 feature_info[IRDMA_MAX_FEATURES];
700 u64 cqp_cmd_stats[IRDMA_MAX_CQP_OPS];
701 struct irdma_hw_attrs hw_attrs;
702 struct irdma_hmc_info *hmc_info;
703 struct irdma_vchnl_rdma_caps vc_caps;
704 u8 vc_recv_buf[IRDMA_VCHNL_MAX_MSG_SIZE];
705 u16 vc_recv_len;
706 struct irdma_sc_cqp *cqp;
707 struct irdma_sc_aeq *aeq;
708 struct irdma_sc_ceq *ceq[IRDMA_CEQ_MAX_COUNT];
709 struct irdma_sc_cq *ccq;
710 spinlock_t puda_cq_lock;
711 struct irdma_sc_cq *ilq_cq;
712 struct irdma_sc_cq *ieq_cq;
713 const struct irdma_irq_ops *irq_ops;
714 struct irdma_qos qos[IRDMA_MAX_USER_PRIORITY];
715 struct irdma_hmc_fpm_misc hmc_fpm_misc;
716 struct irdma_ws_node *ws_tree_root;
717 struct mutex ws_mutex; /* ws tree mutex */
718 u32 vchnl_ver;
719 u16 num_vfs;
720 u16 hmc_fn_id;
721 u16 vf_id;
722 bool privileged:1;
723 bool vchnl_up:1;
724 bool ceq_valid:1;
725 bool is_pf:1;
726 u8 protocol_used;
727 struct mutex vchnl_mutex; /* mutex to synchronize RDMA virtual channel messages */
728 u8 pci_rev;
729 int (*ws_add)(struct irdma_sc_vsi *vsi, u8 user_pri);
730 void (*ws_remove)(struct irdma_sc_vsi *vsi, u8 user_pri);
731 void (*ws_reset)(struct irdma_sc_vsi *vsi);
732 };
733
734 struct irdma_modify_cq_info {
735 u64 cq_pa;
736 struct irdma_cqe *cq_base;
737 u32 cq_size;
738 u32 shadow_read_threshold;
739 u8 pbl_chunk_size;
740 u32 first_pm_pbl_idx;
741 bool virtual_map:1;
742 bool check_overflow;
743 bool cq_resize:1;
744 };
745
746 struct irdma_srq_init_info {
747 struct irdma_sc_pd *pd;
748 struct irdma_sc_vsi *vsi;
749 u64 srq_pa;
750 u64 shadow_area_pa;
751 u32 first_pm_pbl_idx;
752 u32 pasid;
753 u32 srq_size;
754 u16 srq_limit;
755 u8 pasid_valid;
756 u8 wqe_size;
757 u8 leaf_pbl_size;
758 u8 virtual_map;
759 u8 tph_en;
760 u8 arm_limit_event;
761 u8 tph_value;
762 u8 pbl_chunk_size;
763 struct irdma_srq_uk_init_info srq_uk_init_info;
764 };
765
766 struct irdma_sc_srq {
767 struct irdma_sc_dev *dev;
768 struct irdma_sc_vsi *vsi;
769 struct irdma_sc_pd *pd;
770 struct irdma_srq_uk srq_uk;
771 void *back_srq;
772 u64 srq_pa;
773 u64 shadow_area_pa;
774 u32 first_pm_pbl_idx;
775 u32 pasid;
776 u32 hw_srq_size;
777 u16 srq_limit;
778 u8 pasid_valid;
779 u8 leaf_pbl_size;
780 u8 virtual_map;
781 u8 tph_en;
782 u8 arm_limit_event;
783 u8 tph_val;
784 };
785
786 struct irdma_modify_srq_info {
787 u16 srq_limit;
788 u8 arm_limit_event;
789 };
790
791 struct irdma_create_qp_info {
792 bool ord_valid:1;
793 bool tcp_ctx_valid:1;
794 bool cq_num_valid:1;
795 bool arp_cache_idx_valid:1;
796 bool mac_valid:1;
797 bool force_lpb;
798 u8 next_iwarp_state;
799 };
800
801 struct irdma_modify_qp_info {
802 u64 rx_win0;
803 u64 rx_win1;
804 u16 new_mss;
805 u8 next_iwarp_state;
806 u8 curr_iwarp_state;
807 u8 termlen;
808 bool ord_valid:1;
809 bool tcp_ctx_valid:1;
810 bool udp_ctx_valid:1;
811 bool cq_num_valid:1;
812 bool arp_cache_idx_valid:1;
813 bool reset_tcp_conn:1;
814 bool remove_hash_idx:1;
815 bool dont_send_term:1;
816 bool dont_send_fin:1;
817 bool cached_var_valid:1;
818 bool mss_change:1;
819 bool force_lpb:1;
820 bool mac_valid:1;
821 };
822
823 struct irdma_ccq_cqe_info {
824 struct irdma_sc_cqp *cqp;
825 u64 scratch;
826 u32 op_ret_val;
827 u16 maj_err_code;
828 u16 min_err_code;
829 u8 op_code;
830 bool error:1;
831 bool pending:1;
832 };
833
834 struct irdma_dcb_app_info {
835 u8 priority;
836 u8 selector;
837 u16 prot_id;
838 };
839
840 struct irdma_qos_tc_info {
841 u64 tc_ctx;
842 u8 rel_bw;
843 u8 prio_type;
844 u8 egress_virt_up;
845 u8 ingress_virt_up;
846 };
847
848 struct irdma_l2params {
849 struct irdma_qos_tc_info tc_info[IRDMA_MAX_USER_PRIORITY];
850 struct irdma_dcb_app_info apps[IRDMA_MAX_APPS];
851 u32 num_apps;
852 u16 qs_handle_list[IRDMA_MAX_USER_PRIORITY];
853 u16 mtu;
854 u8 up2tc[IRDMA_MAX_USER_PRIORITY];
855 u8 dscp_map[DSCP_MAX];
856 u8 num_tc;
857 u8 vsi_rel_bw;
858 u8 vsi_prio_type;
859 bool mtu_changed:1;
860 bool tc_changed:1;
861 bool dscp_mode:1;
862 };
863
864 struct irdma_vsi_init_info {
865 struct irdma_sc_dev *dev;
866 void *back_vsi;
867 struct irdma_l2params *params;
868 u16 exception_lan_q;
869 u16 pf_data_vsi_num;
870 enum irdma_vm_vf_type vm_vf_type;
871 u16 vm_id;
872 int (*register_qset)(struct irdma_sc_vsi *vsi,
873 struct irdma_ws_node *tc_node);
874 void (*unregister_qset)(struct irdma_sc_vsi *vsi,
875 struct irdma_ws_node *tc_node);
876 };
877
878 struct irdma_vsi_stats_info {
879 struct irdma_vsi_pestat *pestat;
880 u16 fcn_id;
881 bool alloc_stats_inst;
882 };
883
884 struct irdma_device_init_info {
885 u64 fpm_query_buf_pa;
886 u64 fpm_commit_buf_pa;
887 __le64 *fpm_query_buf;
888 __le64 *fpm_commit_buf;
889 struct irdma_hw *hw;
890 void __iomem *bar0;
891 enum irdma_protocol_used protocol_used;
892 u16 hmc_fn_id;
893 };
894
895 struct irdma_ceq_init_info {
896 u64 ceqe_pa;
897 struct irdma_sc_dev *dev;
898 u64 *ceqe_base;
899 void *pbl_list;
900 u32 elem_cnt;
901 u32 ceq_id;
902 bool virtual_map:1;
903 bool tph_en:1;
904 bool itr_no_expire:1;
905 u8 pbl_chunk_size;
906 u8 tph_val;
907 u16 vsi_idx;
908 u32 first_pm_pbl_idx;
909 };
910
911 struct irdma_aeq_init_info {
912 u64 aeq_elem_pa;
913 struct irdma_sc_dev *dev;
914 u32 *aeqe_base;
915 void *pbl_list;
916 u32 elem_cnt;
917 bool virtual_map;
918 u8 pbl_chunk_size;
919 u32 first_pm_pbl_idx;
920 u32 msix_idx;
921 };
922
923 struct irdma_ccq_init_info {
924 u64 cq_pa;
925 u64 shadow_area_pa;
926 struct irdma_sc_dev *dev;
927 struct irdma_cqe *cq_base;
928 __le64 *shadow_area;
929 void *pbl_list;
930 u32 num_elem;
931 u32 ceq_id;
932 u32 shadow_read_threshold;
933 bool ceqe_mask:1;
934 bool ceq_id_valid:1;
935 bool avoid_mem_cflct:1;
936 bool virtual_map:1;
937 bool tph_en:1;
938 u8 tph_val;
939 u8 pbl_chunk_size;
940 u32 first_pm_pbl_idx;
941 struct irdma_sc_vsi *vsi;
942 };
943
944 struct irdma_udp_offload_info {
945 bool ipv4:1;
946 bool insert_vlan_tag:1;
947 u8 ttl;
948 u8 tos;
949 u16 src_port;
950 u16 dst_port;
951 u32 dest_ip_addr[4];
952 u32 snd_mss;
953 u16 vlan_tag;
954 u16 arp_idx;
955 u32 flow_label;
956 u8 udp_state;
957 u32 psn_nxt;
958 u32 lsn;
959 u32 epsn;
960 u32 psn_max;
961 u32 psn_una;
962 u32 local_ipaddr[4];
963 u32 cwnd;
964 u8 rexmit_thresh;
965 u8 rnr_nak_thresh;
966 u8 rnr_nak_tmr;
967 u8 min_rnr_timer;
968 };
969
970 struct irdma_roce_offload_info {
971 u16 p_key;
972 u16 err_rq_idx;
973 u32 qkey;
974 u32 dest_qp;
975 u8 roce_tver;
976 u8 ack_credits;
977 u8 err_rq_idx_valid;
978 u32 pd_id;
979 u16 ord_size;
980 u16 ird_size;
981 bool is_qp1:1;
982 bool udprivcq_en:1;
983 bool dcqcn_en:1;
984 bool rcv_no_icrc:1;
985 bool wr_rdresp_en:1;
986 bool bind_en:1;
987 bool fast_reg_en:1;
988 bool priv_mode_en:1;
989 bool rd_en:1;
990 bool timely_en:1;
991 bool dctcp_en:1;
992 bool fw_cc_enable:1;
993 bool use_stats_inst:1;
994 u8 local_ack_timeout;
995 u16 t_high;
996 u16 t_low;
997 u8 last_byte_sent;
998 u8 mac_addr[ETH_ALEN];
999 u8 rtomin;
1000 };
1001
1002 struct irdma_iwarp_offload_info {
1003 u16 rcv_mark_offset;
1004 u16 snd_mark_offset;
1005 u8 ddp_ver;
1006 u8 rdmap_ver;
1007 u8 iwarp_mode;
1008 u16 err_rq_idx;
1009 u32 pd_id;
1010 u16 ord_size;
1011 u16 ird_size;
1012 bool ib_rd_en:1;
1013 bool align_hdrs:1;
1014 bool rcv_no_mpa_crc:1;
1015 bool err_rq_idx_valid:1;
1016 bool snd_mark_en:1;
1017 bool rcv_mark_en:1;
1018 bool wr_rdresp_en:1;
1019 bool bind_en:1;
1020 bool fast_reg_en:1;
1021 bool priv_mode_en:1;
1022 bool rd_en:1;
1023 bool timely_en:1;
1024 bool use_stats_inst:1;
1025 bool ecn_en:1;
1026 bool dctcp_en:1;
1027 u16 t_high;
1028 u16 t_low;
1029 u8 last_byte_sent;
1030 u8 mac_addr[ETH_ALEN];
1031 u8 rtomin;
1032 };
1033
1034 struct irdma_tcp_offload_info {
1035 bool ipv4:1;
1036 bool no_nagle:1;
1037 bool insert_vlan_tag:1;
1038 bool time_stamp:1;
1039 bool drop_ooo_seg:1;
1040 bool avoid_stretch_ack:1;
1041 bool wscale:1;
1042 bool ignore_tcp_opt:1;
1043 bool ignore_tcp_uns_opt:1;
1044 u8 cwnd_inc_limit;
1045 u8 dup_ack_thresh;
1046 u8 ttl;
1047 u8 src_mac_addr_idx;
1048 u8 tos;
1049 u16 src_port;
1050 u16 dst_port;
1051 u32 dest_ip_addr[4];
1052 //u32 dest_ip_addr0;
1053 //u32 dest_ip_addr1;
1054 //u32 dest_ip_addr2;
1055 //u32 dest_ip_addr3;
1056 u32 snd_mss;
1057 u16 syn_rst_handling;
1058 u16 vlan_tag;
1059 u16 arp_idx;
1060 u32 flow_label;
1061 u8 tcp_state;
1062 u8 snd_wscale;
1063 u8 rcv_wscale;
1064 u32 time_stamp_recent;
1065 u32 time_stamp_age;
1066 u32 snd_nxt;
1067 u32 snd_wnd;
1068 u32 rcv_nxt;
1069 u32 rcv_wnd;
1070 u32 snd_max;
1071 u32 snd_una;
1072 u32 srtt;
1073 u32 rtt_var;
1074 u32 ss_thresh;
1075 u32 cwnd;
1076 u32 snd_wl1;
1077 u32 snd_wl2;
1078 u32 max_snd_window;
1079 u8 rexmit_thresh;
1080 u32 local_ipaddr[4];
1081 };
1082
1083 struct irdma_qp_host_ctx_info {
1084 u64 qp_compl_ctx;
1085 union {
1086 struct irdma_tcp_offload_info *tcp_info;
1087 struct irdma_udp_offload_info *udp_info;
1088 };
1089 union {
1090 struct irdma_iwarp_offload_info *iwarp_info;
1091 struct irdma_roce_offload_info *roce_info;
1092 };
1093 u32 send_cq_num;
1094 u32 rcv_cq_num;
1095 u32 srq_id;
1096 u32 rem_endpoint_idx;
1097 u16 stats_idx;
1098 bool remote_atomics_en:1;
1099 bool srq_valid:1;
1100 bool tcp_info_valid:1;
1101 bool iwarp_info_valid:1;
1102 bool stats_idx_valid:1;
1103 u8 user_pri;
1104 };
1105
1106 struct irdma_aeqe_info {
1107 u64 compl_ctx;
1108 u32 qp_cq_id;
1109 u32 def_info; /* only valid for DEF_CMPL */
1110 u16 ae_id;
1111 u16 wqe_idx;
1112 u8 tcp_state;
1113 u8 iwarp_state;
1114 bool qp:1;
1115 bool cq:1;
1116 bool sq:1;
1117 bool rq:1;
1118 bool srq:1;
1119 bool in_rdrsp_wr:1;
1120 bool out_rdrsp:1;
1121 bool aeqe_overflow:1;
1122 bool err_rq_idx_valid:1;
1123 u8 q2_data_written;
1124 u8 ae_src;
1125 };
1126
1127 struct irdma_allocate_stag_info {
1128 u64 total_len;
1129 u64 first_pm_pbl_idx;
1130 u32 chunk_size;
1131 u32 stag_idx;
1132 u32 page_size;
1133 u32 pd_id;
1134 u16 access_rights;
1135 bool remote_access:1;
1136 bool use_hmc_fcn_index:1;
1137 bool use_pf_rid:1;
1138 bool all_memory:1;
1139 bool remote_atomics_en:1;
1140 u16 hmc_fcn_index;
1141 };
1142
1143 struct irdma_mw_alloc_info {
1144 u32 mw_stag_index;
1145 u32 page_size;
1146 u32 pd_id;
1147 bool remote_access:1;
1148 bool mw_wide:1;
1149 bool mw1_bind_dont_vldt_key:1;
1150 };
1151
1152 struct irdma_reg_ns_stag_info {
1153 u64 reg_addr_pa;
1154 u64 va;
1155 u64 total_len;
1156 u32 page_size;
1157 u32 chunk_size;
1158 u32 first_pm_pbl_index;
1159 enum irdma_addressing_type addr_type;
1160 irdma_stag_index stag_idx;
1161 u16 access_rights;
1162 u32 pd_id;
1163 irdma_stag_key stag_key;
1164 bool use_hmc_fcn_index:1;
1165 u8 hmc_fcn_index;
1166 bool use_pf_rid:1;
1167 bool all_memory:1;
1168 bool remote_atomics_en:1;
1169 };
1170
1171 struct irdma_fast_reg_stag_info {
1172 u64 wr_id;
1173 u64 reg_addr_pa;
1174 u64 fbo;
1175 void *va;
1176 u64 total_len;
1177 u32 page_size;
1178 u32 chunk_size;
1179 u32 first_pm_pbl_index;
1180 enum irdma_addressing_type addr_type;
1181 irdma_stag_index stag_idx;
1182 u16 access_rights;
1183 u32 pd_id;
1184 irdma_stag_key stag_key;
1185 bool local_fence:1;
1186 bool read_fence:1;
1187 bool signaled:1;
1188 bool use_hmc_fcn_index:1;
1189 u8 hmc_fcn_index;
1190 bool use_pf_rid:1;
1191 bool defer_flag:1;
1192 bool remote_atomics_en:1;
1193 };
1194
1195 struct irdma_dealloc_stag_info {
1196 u32 stag_idx;
1197 u32 pd_id;
1198 bool mr:1;
1199 bool dealloc_pbl:1;
1200 };
1201
1202 struct irdma_register_shared_stag {
1203 u64 va;
1204 enum irdma_addressing_type addr_type;
1205 irdma_stag_index new_stag_idx;
1206 irdma_stag_index parent_stag_idx;
1207 u32 access_rights;
1208 u32 pd_id;
1209 u32 page_size;
1210 irdma_stag_key new_stag_key;
1211 };
1212
1213 struct irdma_qp_init_info {
1214 struct irdma_qp_uk_init_info qp_uk_init_info;
1215 struct irdma_sc_pd *pd;
1216 struct irdma_sc_vsi *vsi;
1217 __le64 *host_ctx;
1218 u8 *q2;
1219 u64 sq_pa;
1220 u64 rq_pa;
1221 u64 host_ctx_pa;
1222 u64 q2_pa;
1223 u64 shadow_area_pa;
1224 u8 sq_tph_val;
1225 u8 rq_tph_val;
1226 bool sq_tph_en:1;
1227 bool rq_tph_en:1;
1228 bool rcv_tph_en:1;
1229 bool xmit_tph_en:1;
1230 bool virtual_map:1;
1231 };
1232
1233 struct irdma_cq_init_info {
1234 struct irdma_sc_dev *dev;
1235 u64 cq_base_pa;
1236 u64 shadow_area_pa;
1237 u32 ceq_id;
1238 u32 shadow_read_threshold;
1239 u8 pbl_chunk_size;
1240 u32 first_pm_pbl_idx;
1241 bool virtual_map:1;
1242 bool ceqe_mask:1;
1243 bool ceq_id_valid:1;
1244 bool tph_en:1;
1245 u8 tph_val;
1246 u8 type;
1247 struct irdma_cq_uk_init_info cq_uk_init_info;
1248 struct irdma_sc_vsi *vsi;
1249 };
1250
1251 struct irdma_upload_context_info {
1252 u64 buf_pa;
1253 u32 qp_id;
1254 u8 qp_type;
1255 bool freeze_qp:1;
1256 bool raw_format:1;
1257 };
1258
1259 struct irdma_local_mac_entry_info {
1260 u8 mac_addr[6];
1261 u16 entry_idx;
1262 };
1263
1264 struct irdma_add_arp_cache_entry_info {
1265 u8 mac_addr[ETH_ALEN];
1266 u32 reach_max;
1267 u16 arp_index;
1268 bool permanent;
1269 };
1270
1271 struct irdma_apbvt_info {
1272 u16 port;
1273 bool add;
1274 };
1275
1276 struct irdma_qhash_table_info {
1277 struct irdma_sc_vsi *vsi;
1278 enum irdma_quad_hash_manage_type manage;
1279 enum irdma_quad_entry_type entry_type;
1280 bool vlan_valid:1;
1281 bool ipv4_valid:1;
1282 u8 mac_addr[ETH_ALEN];
1283 u16 vlan_id;
1284 u8 user_pri;
1285 u32 qp_num;
1286 u32 dest_ip[4];
1287 u32 src_ip[4];
1288 u16 dest_port;
1289 u16 src_port;
1290 };
1291
1292 struct irdma_cqp_manage_push_page_info {
1293 u32 push_idx;
1294 u16 qs_handle;
1295 u8 free_page;
1296 u8 push_page_type;
1297 };
1298
1299 struct irdma_qp_flush_info {
1300 u32 err_sq_idx;
1301 u32 err_rq_idx;
1302 u16 sq_minor_code;
1303 u16 sq_major_code;
1304 u16 rq_minor_code;
1305 u16 rq_major_code;
1306 u16 ae_code;
1307 u8 ae_src;
1308 bool sq:1;
1309 bool rq:1;
1310 bool userflushcode:1;
1311 bool generate_ae:1;
1312 bool err_sq_idx_valid:1;
1313 bool err_rq_idx_valid:1;
1314 };
1315
1316 struct irdma_gen_ae_info {
1317 u16 ae_code;
1318 u8 ae_src;
1319 };
1320
1321 struct irdma_cqp_timeout {
1322 u64 compl_cqp_cmds;
1323 u32 count;
1324 };
1325
1326 struct irdma_irq_ops {
1327 void (*irdma_cfg_aeq)(struct irdma_sc_dev *dev, u32 idx, bool enable);
1328 void (*irdma_cfg_ceq)(struct irdma_sc_dev *dev, u32 ceq_id, u32 idx,
1329 bool enable);
1330 void (*irdma_dis_irq)(struct irdma_sc_dev *dev, u32 idx);
1331 void (*irdma_en_irq)(struct irdma_sc_dev *dev, u32 idx);
1332 };
1333
1334 void irdma_sc_ccq_arm(struct irdma_sc_cq *ccq);
1335 int irdma_sc_ccq_create(struct irdma_sc_cq *ccq, u64 scratch,
1336 bool check_overflow, bool post_sq);
1337 int irdma_sc_ccq_destroy(struct irdma_sc_cq *ccq, u64 scratch, bool post_sq);
1338 int irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
1339 struct irdma_ccq_cqe_info *info);
1340 int irdma_sc_ccq_init(struct irdma_sc_cq *ccq,
1341 struct irdma_ccq_init_info *info);
1342
1343 int irdma_sc_cceq_create(struct irdma_sc_ceq *ceq, u64 scratch);
1344 int irdma_sc_cceq_destroy_done(struct irdma_sc_ceq *ceq);
1345
1346 int irdma_sc_ceq_destroy(struct irdma_sc_ceq *ceq, u64 scratch, bool post_sq);
1347 int irdma_sc_ceq_init(struct irdma_sc_ceq *ceq,
1348 struct irdma_ceq_init_info *info);
1349 void irdma_sc_cleanup_ceqes(struct irdma_sc_cq *cq, struct irdma_sc_ceq *ceq);
1350 bool irdma_sc_process_ceq(struct irdma_sc_dev *dev, struct irdma_sc_ceq *ceq,
1351 u32 *cq_idx);
1352
1353 int irdma_sc_aeq_init(struct irdma_sc_aeq *aeq,
1354 struct irdma_aeq_init_info *info);
1355 int irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
1356 struct irdma_aeqe_info *info);
1357 void irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, u32 count);
1358
1359 void irdma_sc_pd_init(struct irdma_sc_dev *dev, struct irdma_sc_pd *pd, u32 pd_id,
1360 int abi_ver);
1361 void irdma_cfg_aeq(struct irdma_sc_dev *dev, u32 idx, bool enable);
1362 void irdma_check_cqp_progress(struct irdma_cqp_timeout *cqp_timeout,
1363 struct irdma_sc_dev *dev);
1364 void irdma_sc_cqp_def_cmpl_ae_handler(struct irdma_sc_dev *dev,
1365 struct irdma_aeqe_info *info,
1366 bool first, u64 *scratch,
1367 u32 *sw_def_info);
1368 u64 irdma_sc_cqp_cleanup_handler(struct irdma_sc_dev *dev);
1369 int irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_err, u16 *min_err);
1370 int irdma_sc_cqp_destroy(struct irdma_sc_cqp *cqp);
1371 int irdma_sc_cqp_init(struct irdma_sc_cqp *cqp,
1372 struct irdma_cqp_init_info *info);
1373 void irdma_sc_cqp_post_sq(struct irdma_sc_cqp *cqp);
1374 int irdma_sc_poll_for_cqp_op_done(struct irdma_sc_cqp *cqp, u8 opcode,
1375 struct irdma_ccq_cqe_info *cmpl_info);
1376 int irdma_sc_fast_register(struct irdma_sc_qp *qp,
1377 struct irdma_fast_reg_stag_info *info, bool post_sq);
1378 int irdma_sc_qp_create(struct irdma_sc_qp *qp,
1379 struct irdma_create_qp_info *info, u64 scratch,
1380 bool post_sq);
1381 int irdma_sc_qp_destroy(struct irdma_sc_qp *qp, u64 scratch,
1382 bool remove_hash_idx, bool ignore_mw_bnd, bool post_sq);
1383 int irdma_sc_qp_flush_wqes(struct irdma_sc_qp *qp,
1384 struct irdma_qp_flush_info *info, u64 scratch,
1385 bool post_sq);
1386 int irdma_sc_qp_init(struct irdma_sc_qp *qp, struct irdma_qp_init_info *info);
1387 int irdma_sc_qp_modify(struct irdma_sc_qp *qp,
1388 struct irdma_modify_qp_info *info, u64 scratch,
1389 bool post_sq);
1390 void irdma_sc_send_lsmm(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size,
1391 irdma_stag stag);
1392
1393 void irdma_sc_send_rtt(struct irdma_sc_qp *qp, bool read);
1394 void irdma_sc_qp_setctx(struct irdma_sc_qp *qp, __le64 *qp_ctx,
1395 struct irdma_qp_host_ctx_info *info);
1396 void irdma_sc_qp_setctx_roce(struct irdma_sc_qp *qp, __le64 *qp_ctx,
1397 struct irdma_qp_host_ctx_info *info);
1398 int irdma_sc_cq_destroy(struct irdma_sc_cq *cq, u64 scratch, bool post_sq);
1399 int irdma_sc_cq_init(struct irdma_sc_cq *cq, struct irdma_cq_init_info *info);
1400 void irdma_sc_cq_resize(struct irdma_sc_cq *cq, struct irdma_modify_cq_info *info);
1401 int irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp, u64 scratch,
1402 u8 hmc_fn_id, bool post_sq,
1403 bool poll_registers);
1404 int irdma_sc_srq_init(struct irdma_sc_srq *srq,
1405 struct irdma_srq_init_info *info);
1406
1407 void sc_vsi_update_stats(struct irdma_sc_vsi *vsi);
1408 struct cqp_info {
1409 union {
1410 struct {
1411 struct irdma_sc_qp *qp;
1412 struct irdma_create_qp_info info;
1413 u64 scratch;
1414 } qp_create;
1415
1416 struct {
1417 struct irdma_sc_qp *qp;
1418 struct irdma_modify_qp_info info;
1419 u64 scratch;
1420 } qp_modify;
1421
1422 struct {
1423 struct irdma_sc_qp *qp;
1424 u64 scratch;
1425 bool remove_hash_idx;
1426 bool ignore_mw_bnd;
1427 } qp_destroy;
1428
1429 struct {
1430 struct irdma_sc_cq *cq;
1431 u64 scratch;
1432 bool check_overflow;
1433 } cq_create;
1434
1435 struct {
1436 struct irdma_sc_cq *cq;
1437 struct irdma_modify_cq_info info;
1438 u64 scratch;
1439 } cq_modify;
1440
1441 struct {
1442 struct irdma_sc_cq *cq;
1443 u64 scratch;
1444 } cq_destroy;
1445
1446 struct {
1447 struct irdma_sc_dev *dev;
1448 struct irdma_allocate_stag_info info;
1449 u64 scratch;
1450 } alloc_stag;
1451
1452 struct {
1453 struct irdma_sc_dev *dev;
1454 struct irdma_mw_alloc_info info;
1455 u64 scratch;
1456 } mw_alloc;
1457
1458 struct {
1459 struct irdma_sc_dev *dev;
1460 struct irdma_reg_ns_stag_info info;
1461 u64 scratch;
1462 } mr_reg_non_shared;
1463
1464 struct {
1465 struct irdma_sc_dev *dev;
1466 struct irdma_dealloc_stag_info info;
1467 u64 scratch;
1468 } dealloc_stag;
1469
1470 struct {
1471 struct irdma_sc_cqp *cqp;
1472 struct irdma_add_arp_cache_entry_info info;
1473 u64 scratch;
1474 } add_arp_cache_entry;
1475
1476 struct {
1477 struct irdma_sc_cqp *cqp;
1478 u64 scratch;
1479 u16 arp_index;
1480 } del_arp_cache_entry;
1481
1482 struct {
1483 struct irdma_sc_cqp *cqp;
1484 struct irdma_local_mac_entry_info info;
1485 u64 scratch;
1486 } add_local_mac_entry;
1487
1488 struct {
1489 struct irdma_sc_cqp *cqp;
1490 u64 scratch;
1491 u8 entry_idx;
1492 u8 ignore_ref_count;
1493 } del_local_mac_entry;
1494
1495 struct {
1496 struct irdma_sc_cqp *cqp;
1497 u64 scratch;
1498 } alloc_local_mac_entry;
1499
1500 struct {
1501 struct irdma_sc_cqp *cqp;
1502 struct irdma_cqp_manage_push_page_info info;
1503 u64 scratch;
1504 } manage_push_page;
1505
1506 struct {
1507 struct irdma_sc_dev *dev;
1508 struct irdma_upload_context_info info;
1509 u64 scratch;
1510 } qp_upload_context;
1511
1512 struct {
1513 struct irdma_sc_dev *dev;
1514 struct irdma_hmc_fcn_info info;
1515 u64 scratch;
1516 } manage_hmc_pm;
1517
1518 struct {
1519 struct irdma_sc_ceq *ceq;
1520 u64 scratch;
1521 } ceq_create;
1522
1523 struct {
1524 struct irdma_sc_ceq *ceq;
1525 u64 scratch;
1526 } ceq_destroy;
1527
1528 struct {
1529 struct irdma_sc_aeq *aeq;
1530 u64 scratch;
1531 } aeq_create;
1532
1533 struct {
1534 struct irdma_sc_aeq *aeq;
1535 u64 scratch;
1536 } aeq_destroy;
1537
1538 struct {
1539 struct irdma_sc_qp *qp;
1540 struct irdma_qp_flush_info info;
1541 u64 scratch;
1542 } qp_flush_wqes;
1543
1544 struct {
1545 struct irdma_sc_qp *qp;
1546 struct irdma_gen_ae_info info;
1547 u64 scratch;
1548 } gen_ae;
1549
1550 struct {
1551 struct irdma_sc_cqp *cqp;
1552 void *fpm_val_va;
1553 u64 fpm_val_pa;
1554 u8 hmc_fn_id;
1555 u64 scratch;
1556 } query_fpm_val;
1557
1558 struct {
1559 struct irdma_sc_cqp *cqp;
1560 void *fpm_val_va;
1561 u64 fpm_val_pa;
1562 u8 hmc_fn_id;
1563 u64 scratch;
1564 } commit_fpm_val;
1565
1566 struct {
1567 struct irdma_sc_cqp *cqp;
1568 struct irdma_apbvt_info info;
1569 u64 scratch;
1570 } manage_apbvt_entry;
1571
1572 struct {
1573 struct irdma_sc_cqp *cqp;
1574 struct irdma_qhash_table_info info;
1575 u64 scratch;
1576 } manage_qhash_table_entry;
1577
1578 struct {
1579 struct irdma_sc_dev *dev;
1580 struct irdma_update_sds_info info;
1581 u64 scratch;
1582 } update_pe_sds;
1583
1584 struct {
1585 struct irdma_sc_cqp *cqp;
1586 struct irdma_sc_qp *qp;
1587 u64 scratch;
1588 } suspend_resume;
1589
1590 struct {
1591 struct irdma_sc_cqp *cqp;
1592 struct irdma_ah_info info;
1593 u64 scratch;
1594 } ah_create;
1595
1596 struct {
1597 struct irdma_sc_cqp *cqp;
1598 struct irdma_ah_info info;
1599 u64 scratch;
1600 } ah_destroy;
1601
1602 struct {
1603 struct irdma_sc_cqp *cqp;
1604 struct irdma_mcast_grp_info info;
1605 u64 scratch;
1606 } mc_create;
1607
1608 struct {
1609 struct irdma_sc_cqp *cqp;
1610 struct irdma_mcast_grp_info info;
1611 u64 scratch;
1612 } mc_destroy;
1613
1614 struct {
1615 struct irdma_sc_cqp *cqp;
1616 struct irdma_mcast_grp_info info;
1617 u64 scratch;
1618 } mc_modify;
1619
1620 struct {
1621 struct irdma_sc_cqp *cqp;
1622 struct irdma_stats_inst_info info;
1623 u64 scratch;
1624 } stats_manage;
1625
1626 struct {
1627 struct irdma_sc_cqp *cqp;
1628 struct irdma_stats_gather_info info;
1629 u64 scratch;
1630 } stats_gather;
1631
1632 struct {
1633 struct irdma_sc_cqp *cqp;
1634 struct irdma_ws_node_info info;
1635 u64 scratch;
1636 } ws_node;
1637
1638 struct {
1639 struct irdma_sc_cqp *cqp;
1640 struct irdma_up_info info;
1641 u64 scratch;
1642 } up_map;
1643
1644 struct {
1645 struct irdma_sc_cqp *cqp;
1646 struct irdma_dma_mem query_buff_mem;
1647 u64 scratch;
1648 } query_rdma;
1649
1650 struct {
1651 struct irdma_sc_srq *srq;
1652 u64 scratch;
1653 } srq_create;
1654
1655 struct {
1656 struct irdma_sc_srq *srq;
1657 struct irdma_modify_srq_info info;
1658 u64 scratch;
1659 } srq_modify;
1660
1661 struct {
1662 struct irdma_sc_srq *srq;
1663 u64 scratch;
1664 } srq_destroy;
1665
1666 } u;
1667 };
1668
1669 struct cqp_cmds_info {
1670 struct list_head cqp_cmd_entry;
1671 u8 cqp_cmd;
1672 u8 post_sq;
1673 struct cqp_info in;
1674 };
1675
1676 __le64 *irdma_sc_cqp_get_next_send_wqe_idx(struct irdma_sc_cqp *cqp, u64 scratch,
1677 u32 *wqe_idx);
1678
1679 /**
1680 * irdma_sc_cqp_get_next_send_wqe - get next wqe on cqp sq
1681 * @cqp: struct for cqp hw
1682 * @scratch: private data for CQP WQE
1683 */
irdma_sc_cqp_get_next_send_wqe(struct irdma_sc_cqp * cqp,u64 scratch)1684 static inline __le64 *irdma_sc_cqp_get_next_send_wqe(struct irdma_sc_cqp *cqp, u64 scratch)
1685 {
1686 u32 wqe_idx;
1687
1688 return irdma_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx);
1689 }
1690 #endif /* IRDMA_TYPE_H */
1691