1 /*-
2 * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
3 *
4 * Copyright (c) 2015 - 2026 Intel Corporation
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenFabrics.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35 #ifndef IRDMA_TYPE_H
36 #define IRDMA_TYPE_H
37
38 #include "osdep.h"
39
40 #include "irdma.h"
41 #include "irdma_user.h"
42 #include "irdma_hmc.h"
43 #include "irdma_uda.h"
44 #include "irdma_ws.h"
45 #include "irdma_pble.h"
46
47 enum irdma_debug_flag {
48 IRDMA_DEBUG_NONE = 0x00000000,
49 IRDMA_DEBUG_ERR = 0x00000001,
50 IRDMA_DEBUG_INIT = 0x00000002,
51 IRDMA_DEBUG_DEV = 0x00000004,
52 IRDMA_DEBUG_CM = 0x00000008,
53 IRDMA_DEBUG_VERBS = 0x00000010,
54 IRDMA_DEBUG_PUDA = 0x00000020,
55 IRDMA_DEBUG_ILQ = 0x00000040,
56 IRDMA_DEBUG_IEQ = 0x00000080,
57 IRDMA_DEBUG_QP = 0x00000100,
58 IRDMA_DEBUG_CQ = 0x00000200,
59 IRDMA_DEBUG_MR = 0x00000400,
60 IRDMA_DEBUG_PBLE = 0x00000800,
61 IRDMA_DEBUG_WQE = 0x00001000,
62 IRDMA_DEBUG_AEQ = 0x00002000,
63 IRDMA_DEBUG_CQP = 0x00004000,
64 IRDMA_DEBUG_HMC = 0x00008000,
65 IRDMA_DEBUG_USER = 0x00010000,
66 IRDMA_DEBUG_VIRT = 0x00020000,
67 IRDMA_DEBUG_DCB = 0x00040000,
68 IRDMA_DEBUG_CQE = 0x00800000,
69 IRDMA_DEBUG_CLNT = 0x01000000,
70 IRDMA_DEBUG_WS = 0x02000000,
71 IRDMA_DEBUG_STATS = 0x04000000,
72 IRDMA_DEBUG_ALL = 0xFFFFFFFF,
73 };
74
75 #define RSVD_OFFSET 0xFFFFFFFF
76
77 enum irdma_page_size {
78 IRDMA_PAGE_SIZE_4K = 0,
79 IRDMA_PAGE_SIZE_2M,
80 IRDMA_PAGE_SIZE_1G,
81 };
82
83 enum irdma_hdrct_flags {
84 DDP_LEN_FLAG = 0x80,
85 DDP_HDR_FLAG = 0x40,
86 RDMA_HDR_FLAG = 0x20,
87 };
88
89 enum irdma_term_layers {
90 LAYER_RDMA = 0,
91 LAYER_DDP = 1,
92 LAYER_MPA = 2,
93 };
94
95 enum irdma_term_error_types {
96 RDMAP_REMOTE_PROT = 1,
97 RDMAP_REMOTE_OP = 2,
98 DDP_CATASTROPHIC = 0,
99 DDP_TAGGED_BUF = 1,
100 DDP_UNTAGGED_BUF = 2,
101 DDP_LLP = 3,
102 };
103
104 enum irdma_term_rdma_errors {
105 RDMAP_INV_STAG = 0x00,
106 RDMAP_INV_BOUNDS = 0x01,
107 RDMAP_ACCESS = 0x02,
108 RDMAP_UNASSOC_STAG = 0x03,
109 RDMAP_TO_WRAP = 0x04,
110 RDMAP_INV_RDMAP_VER = 0x05,
111 RDMAP_UNEXPECTED_OP = 0x06,
112 RDMAP_CATASTROPHIC_LOCAL = 0x07,
113 RDMAP_CATASTROPHIC_GLOBAL = 0x08,
114 RDMAP_CANT_INV_STAG = 0x09,
115 RDMAP_UNSPECIFIED = 0xff,
116 };
117
118 enum irdma_term_ddp_errors {
119 DDP_CATASTROPHIC_LOCAL = 0x00,
120 DDP_TAGGED_INV_STAG = 0x00,
121 DDP_TAGGED_BOUNDS = 0x01,
122 DDP_TAGGED_UNASSOC_STAG = 0x02,
123 DDP_TAGGED_TO_WRAP = 0x03,
124 DDP_TAGGED_INV_DDP_VER = 0x04,
125 DDP_UNTAGGED_INV_QN = 0x01,
126 DDP_UNTAGGED_INV_MSN_NO_BUF = 0x02,
127 DDP_UNTAGGED_INV_MSN_RANGE = 0x03,
128 DDP_UNTAGGED_INV_MO = 0x04,
129 DDP_UNTAGGED_INV_TOO_LONG = 0x05,
130 DDP_UNTAGGED_INV_DDP_VER = 0x06,
131 };
132
133 enum irdma_term_mpa_errors {
134 MPA_CLOSED = 0x01,
135 MPA_CRC = 0x02,
136 MPA_MARKER = 0x03,
137 MPA_REQ_RSP = 0x04,
138 };
139
140 enum irdma_hw_stats_index {
141 /* gen1 - 32-bit */
142 IRDMA_HW_STAT_INDEX_IP4RXDISCARD = 0,
143 IRDMA_HW_STAT_INDEX_IP4RXTRUNC = 1,
144 IRDMA_HW_STAT_INDEX_IP4TXNOROUTE = 2,
145 IRDMA_HW_STAT_INDEX_IP6RXDISCARD = 3,
146 IRDMA_HW_STAT_INDEX_IP6RXTRUNC = 4,
147 IRDMA_HW_STAT_INDEX_IP6TXNOROUTE = 5,
148 IRDMA_HW_STAT_INDEX_TCPRTXSEG = 6,
149 IRDMA_HW_STAT_INDEX_TCPRXOPTERR = 7,
150 IRDMA_HW_STAT_INDEX_TCPRXPROTOERR = 8,
151 IRDMA_HW_STAT_INDEX_RXVLANERR = 9,
152 /* gen1 - 64-bit */
153 IRDMA_HW_STAT_INDEX_IP4RXOCTS = 10,
154 IRDMA_HW_STAT_INDEX_IP4RXPKTS = 11,
155 IRDMA_HW_STAT_INDEX_IP4RXFRAGS = 12,
156 IRDMA_HW_STAT_INDEX_IP4RXMCPKTS = 13,
157 IRDMA_HW_STAT_INDEX_IP4TXOCTS = 14,
158 IRDMA_HW_STAT_INDEX_IP4TXPKTS = 15,
159 IRDMA_HW_STAT_INDEX_IP4TXFRAGS = 16,
160 IRDMA_HW_STAT_INDEX_IP4TXMCPKTS = 17,
161 IRDMA_HW_STAT_INDEX_IP6RXOCTS = 18,
162 IRDMA_HW_STAT_INDEX_IP6RXPKTS = 19,
163 IRDMA_HW_STAT_INDEX_IP6RXFRAGS = 20,
164 IRDMA_HW_STAT_INDEX_IP6RXMCPKTS = 21,
165 IRDMA_HW_STAT_INDEX_IP6TXOCTS = 22,
166 IRDMA_HW_STAT_INDEX_IP6TXPKTS = 23,
167 IRDMA_HW_STAT_INDEX_IP6TXFRAGS = 24,
168 IRDMA_HW_STAT_INDEX_IP6TXMCPKTS = 25,
169 IRDMA_HW_STAT_INDEX_TCPRXSEGS = 26,
170 IRDMA_HW_STAT_INDEX_TCPTXSEG = 27,
171 IRDMA_HW_STAT_INDEX_RDMARXRDS = 28,
172 IRDMA_HW_STAT_INDEX_RDMARXSNDS = 29,
173 IRDMA_HW_STAT_INDEX_RDMARXWRS = 30,
174 IRDMA_HW_STAT_INDEX_RDMATXRDS = 31,
175 IRDMA_HW_STAT_INDEX_RDMATXSNDS = 32,
176 IRDMA_HW_STAT_INDEX_RDMATXWRS = 33,
177 IRDMA_HW_STAT_INDEX_RDMAVBND = 34,
178 IRDMA_HW_STAT_INDEX_RDMAVINV = 35,
179 IRDMA_HW_STAT_INDEX_IP4RXMCOCTS = 36,
180 IRDMA_HW_STAT_INDEX_IP4TXMCOCTS = 37,
181 IRDMA_HW_STAT_INDEX_IP6RXMCOCTS = 38,
182 IRDMA_HW_STAT_INDEX_IP6TXMCOCTS = 39,
183 IRDMA_HW_STAT_INDEX_UDPRXPKTS = 40,
184 IRDMA_HW_STAT_INDEX_UDPTXPKTS = 41,
185 IRDMA_HW_STAT_INDEX_MAX_GEN_1 = 42, /* Must be same value as next entry */
186
187 /* gen2 - 64-bit */
188 IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS = 42,
189
190 /* gen2 - 32-bit */
191 IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED = 43,
192 IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED = 44,
193 IRDMA_HW_STAT_INDEX_TXNPCNPSENT = 45,
194 IRDMA_HW_STAT_INDEX_MAX_GEN_2 = 46,
195 };
196
197 #define IRDMA_MIN_FEATURES 2
198
199 enum irdma_feature_type {
200 IRDMA_FEATURE_FW_INFO = 0,
201 IRDMA_HW_VERSION_INFO = 1,
202 IRDMA_QSETS_MAX = 26,
203 IRDMA_MAX_FEATURES, /* Must be last entry */
204 };
205
206 enum irdma_sched_prio_type {
207 IRDMA_PRIO_WEIGHTED_RR = 1,
208 IRDMA_PRIO_STRICT = 2,
209 IRDMA_PRIO_WEIGHTED_STRICT = 3,
210 };
211
212 enum irdma_vm_vf_type {
213 IRDMA_VF_TYPE = 0,
214 IRDMA_VM_TYPE,
215 IRDMA_PF_TYPE,
216 };
217
218 enum irdma_cqp_hmc_profile {
219 IRDMA_HMC_PROFILE_DEFAULT = 1,
220 IRDMA_HMC_PROFILE_FAVOR_VF = 2,
221 IRDMA_HMC_PROFILE_EQUAL = 3,
222 };
223
224 enum irdma_quad_entry_type {
225 IRDMA_QHASH_TYPE_TCP_ESTABLISHED = 1,
226 IRDMA_QHASH_TYPE_TCP_SYN,
227 IRDMA_QHASH_TYPE_UDP_UNICAST,
228 IRDMA_QHASH_TYPE_UDP_MCAST,
229 IRDMA_QHASH_TYPE_ROCE_MCAST,
230 IRDMA_QHASH_TYPE_ROCEV2_HW,
231 };
232
233 enum irdma_quad_hash_manage_type {
234 IRDMA_QHASH_MANAGE_TYPE_DELETE = 0,
235 IRDMA_QHASH_MANAGE_TYPE_ADD,
236 IRDMA_QHASH_MANAGE_TYPE_MODIFY,
237 };
238
239 enum irdma_syn_rst_handling {
240 IRDMA_SYN_RST_HANDLING_HW_TCP_SECURE = 0,
241 IRDMA_SYN_RST_HANDLING_HW_TCP,
242 IRDMA_SYN_RST_HANDLING_FW_TCP_SECURE,
243 IRDMA_SYN_RST_HANDLING_FW_TCP,
244 };
245
246 enum irdma_queue_type {
247 IRDMA_QUEUE_TYPE_SQ_RQ = 0,
248 IRDMA_QUEUE_TYPE_CQP,
249 };
250
251 struct irdma_sc_dev;
252 struct irdma_vsi_pestat;
253
254 struct irdma_dcqcn_cc_params {
255 u8 cc_cfg_valid;
256 u8 min_dec_factor;
257 u8 min_rate;
258 u8 dcqcn_f;
259 u16 rai_factor;
260 u16 hai_factor;
261 u16 dcqcn_t;
262 u32 dcqcn_b;
263 u32 rreduce_mperiod;
264 };
265
266 struct irdma_cqp_init_info {
267 u64 cqp_compl_ctx;
268 u64 host_ctx_pa;
269 u64 sq_pa;
270 struct irdma_sc_dev *dev;
271 struct irdma_cqp_quanta *sq;
272 struct irdma_dcqcn_cc_params dcqcn_params;
273 __le64 *host_ctx;
274 u64 *scratch_array;
275 u32 sq_size;
276 u16 hw_maj_ver;
277 u16 hw_min_ver;
278 u8 struct_ver;
279 u8 hmc_profile;
280 u8 ena_vf_count;
281 u8 ceqs_per_vf;
282 u8 timer_slots;
283 bool en_datacenter_tcp:1;
284 bool disable_packed:1;
285 bool rocev2_rto_policy:1;
286 bool en_rem_endpoint_trk:1;
287 enum irdma_protocol_used protocol_used;
288 };
289
290 struct irdma_terminate_hdr {
291 u8 layer_etype;
292 u8 error_code;
293 u8 hdrct;
294 u8 rsvd;
295 };
296
297 struct irdma_cqp_sq_wqe {
298 __le64 buf[IRDMA_CQP_WQE_SIZE];
299 };
300
301 struct irdma_sc_aeqe {
302 __le64 buf[IRDMA_AEQE_SIZE];
303 };
304
305 struct irdma_ceqe {
306 __le64 buf[IRDMA_CEQE_SIZE];
307 };
308
309 struct irdma_cqp_ctx {
310 __le64 buf[IRDMA_CQP_CTX_SIZE];
311 };
312
313 struct irdma_cq_shadow_area {
314 __le64 buf[IRDMA_SHADOW_AREA_SIZE];
315 };
316
317 struct irdma_dev_hw_stats {
318 u64 stats_val[IRDMA_GATHER_STATS_BUF_SIZE / sizeof(u64)];
319 };
320
321 struct irdma_gather_stats {
322 u64 val[IRDMA_GATHER_STATS_BUF_SIZE / sizeof(u64)];
323 };
324
325 struct irdma_hw_stat_map {
326 u16 byteoff;
327 u8 bitoff;
328 u64 bitmask;
329 };
330
331 struct irdma_stats_gather_info {
332 bool use_hmc_fcn_index:1;
333 bool use_stats_inst:1;
334 u16 hmc_fcn_index;
335 u16 stats_inst_index;
336 struct irdma_dma_mem stats_buff_mem;
337 void *gather_stats_va;
338 void *last_gather_stats_va;
339 };
340
341 struct irdma_vsi_pestat {
342 struct irdma_hw *hw;
343 struct irdma_dev_hw_stats hw_stats;
344 struct irdma_stats_gather_info gather_info;
345 struct OS_TIMER stats_timer;
346 struct irdma_sc_vsi *vsi;
347 spinlock_t lock; /* rdma stats lock */
348 };
349
350 struct irdma_hw {
351 u8 IOMEM *hw_addr;
352 u8 IOMEM *priv_hw_addr;
353 void *dev_context;
354 struct irdma_hmc_info hmc;
355 };
356
357 struct irdma_pfpdu {
358 struct list_head rxlist;
359 u32 rcv_nxt;
360 u32 fps;
361 u32 max_fpdu_data;
362 u32 nextseqnum;
363 u32 rcv_start_seq;
364 bool mode:1;
365 bool mpa_crc_err:1;
366 u8 marker_len;
367 u64 total_ieq_bufs;
368 u64 fpdu_processed;
369 u64 bad_seq_num;
370 u64 crc_err;
371 u64 no_tx_bufs;
372 u64 tx_err;
373 u64 out_of_order;
374 u64 pmode_count;
375 struct irdma_sc_ah *ah;
376 struct irdma_puda_buf *ah_buf;
377 spinlock_t lock; /* fpdu processing lock */
378 struct irdma_puda_buf *lastrcv_buf;
379 };
380
381 struct irdma_sc_pd {
382 struct irdma_sc_dev *dev;
383 u32 pd_id;
384 int abi_ver;
385 };
386
387 struct irdma_cqp_quanta {
388 __le64 elem[IRDMA_CQP_WQE_SIZE];
389 };
390
391 struct irdma_sc_cqp {
392 u32 size;
393 u64 sq_pa;
394 u64 host_ctx_pa;
395 void *back_cqp;
396 struct irdma_sc_dev *dev;
397 int (*process_cqp_sds)(struct irdma_sc_dev *dev,
398 struct irdma_update_sds_info *info);
399 struct irdma_dma_mem sdbuf;
400 struct irdma_ring sq_ring;
401 struct irdma_cqp_quanta *sq_base;
402 struct irdma_dcqcn_cc_params dcqcn_params;
403 __le64 *host_ctx;
404 u64 *scratch_array;
405 u64 requested_ops;
406 atomic64_t completed_ops;
407 u32 cqp_id;
408 u32 sq_size;
409 u32 hw_sq_size;
410 u16 hw_maj_ver;
411 u16 hw_min_ver;
412 u8 struct_ver;
413 u8 polarity;
414 u8 hmc_profile;
415 u8 ena_vf_count;
416 u8 timeout_count;
417 u8 ceqs_per_vf;
418 u8 timer_slots;
419 bool en_datacenter_tcp:1;
420 bool disable_packed:1;
421 bool rocev2_rto_policy:1;
422 bool en_rem_endpoint_trk:1;
423 enum irdma_protocol_used protocol_used;
424 };
425
426 struct irdma_sc_aeq {
427 u32 size;
428 u64 aeq_elem_pa;
429 struct irdma_sc_dev *dev;
430 struct irdma_sc_aeqe *aeqe_base;
431 void *pbl_list;
432 u32 elem_cnt;
433 struct irdma_ring aeq_ring;
434 u8 pbl_chunk_size;
435 u32 first_pm_pbl_idx;
436 u32 msix_idx;
437 u8 polarity;
438 bool virtual_map:1;
439 };
440
441 struct irdma_sc_ceq {
442 u32 size;
443 u64 ceq_elem_pa;
444 struct irdma_sc_dev *dev;
445 struct irdma_ceqe *ceqe_base;
446 void *pbl_list;
447 u32 elem_cnt;
448 u16 ceq_id;
449 struct irdma_ring ceq_ring;
450 u8 pbl_chunk_size;
451 u8 tph_val;
452 u32 first_pm_pbl_idx;
453 u8 polarity;
454 struct irdma_sc_vsi *vsi;
455 struct irdma_sc_cq **reg_cq;
456 u32 reg_cq_size;
457 spinlock_t req_cq_lock; /* protect access to reg_cq array */
458 bool virtual_map:1;
459 bool tph_en:1;
460 bool itr_no_expire:1;
461 };
462
463 struct irdma_sc_cq {
464 struct irdma_cq_uk cq_uk;
465 u64 cq_pa;
466 u64 shadow_area_pa;
467 struct irdma_sc_dev *dev;
468 struct irdma_sc_vsi *vsi;
469 void *pbl_list;
470 void *back_cq;
471 u32 shadow_read_threshold;
472 u16 ceq_id;
473 u8 pbl_chunk_size;
474 u8 cq_type;
475 u8 tph_val;
476 u32 first_pm_pbl_idx;
477 bool ceqe_mask:1;
478 bool virtual_map:1;
479 bool check_overflow:1;
480 bool ceq_id_valid:1;
481 bool tph_en:1;
482 };
483
484 struct irdma_sc_qp {
485 struct irdma_qp_uk qp_uk;
486 u64 sq_pa;
487 u64 rq_pa;
488 u64 hw_host_ctx_pa;
489 u64 shadow_area_pa;
490 u64 q2_pa;
491 struct irdma_sc_dev *dev;
492 struct irdma_sc_vsi *vsi;
493 struct irdma_sc_pd *pd;
494 __le64 *hw_host_ctx;
495 void *llp_stream_handle;
496 struct irdma_pfpdu pfpdu;
497 u32 ieq_qp;
498 u8 *q2_buf;
499 u64 qp_compl_ctx;
500 u32 push_idx;
501 u16 qs_handle;
502 u16 push_offset;
503 u8 flush_wqes_count;
504 u8 sq_tph_val;
505 u8 rq_tph_val;
506 u8 qp_state;
507 u8 hw_sq_size;
508 u8 hw_rq_size;
509 u8 src_mac_addr_idx;
510 bool suspended:1;
511 bool on_qoslist:1;
512 bool ieq_pass_thru:1;
513 bool sq_tph_en:1;
514 bool rq_tph_en:1;
515 bool rcv_tph_en:1;
516 bool xmit_tph_en:1;
517 bool virtual_map:1;
518 bool flush_sq:1;
519 bool flush_rq:1;
520 bool sq_flush_code:1;
521 bool rq_flush_code:1;
522 enum irdma_flush_opcode flush_code;
523 enum irdma_qp_event_type event_type;
524 u8 term_flags;
525 u8 user_pri;
526 struct list_head list;
527 };
528
529 struct irdma_up_info {
530 u8 map[8];
531 u8 cnp_up_override;
532 u16 hmc_fcn_idx;
533 bool use_vlan:1;
534 bool use_cnp_up_override:1;
535 };
536
537 #define IRDMA_MAX_WS_NODES 0x3FF
538 #define IRDMA_WS_NODE_INVALID 0xFFFF
539
540 struct irdma_ws_move_node_info {
541 u16 node_id[16];
542 u8 num_nodes;
543 u8 target_port;
544 bool resume_traffic:1;
545 };
546
547 struct irdma_ws_node_info {
548 u16 id;
549 u16 vsi;
550 u16 parent_id;
551 u16 qs_handle;
552 bool type_leaf:1;
553 bool enable:1;
554 u8 prio_type;
555 u8 tc;
556 u8 weight;
557 };
558
559 struct irdma_hmc_fpm_misc {
560 u32 max_ceqs;
561 u32 max_sds;
562 u32 xf_block_size;
563 u32 q1_block_size;
564 u32 ht_multiplier;
565 u32 timer_bucket;
566 u32 rrf_block_size;
567 u32 ooiscf_block_size;
568 };
569
570 #define IRDMA_LEAF_DEFAULT_REL_BW 64
571 #define IRDMA_PARENT_DEFAULT_REL_BW 1
572
573 struct irdma_qos {
574 struct list_head qplist;
575 struct mutex qos_mutex; /* protect QoS attributes per QoS level */
576 u32 l2_sched_node_id;
577 u16 qs_handle;
578 u8 traffic_class;
579 u8 rel_bw;
580 u8 prio_type;
581 bool valid:1;
582 };
583
584 struct irdma_config_check {
585 bool config_ok:1;
586 bool lfc_set:1;
587 bool pfc_set:1;
588 u8 traffic_class;
589 u8 prio;
590 u16 qs_handle;
591 };
592
593 #define IRDMA_INVALID_STATS_IDX 0xff
594 struct irdma_sc_vsi {
595 u16 vsi_idx;
596 struct irdma_sc_dev *dev;
597 void *back_vsi;
598 u32 ilq_count;
599 struct irdma_virt_mem ilq_mem;
600 struct irdma_puda_rsrc *ilq;
601 u32 ieq_count;
602 struct irdma_virt_mem ieq_mem;
603 struct irdma_puda_rsrc *ieq;
604 u32 exception_lan_q;
605 u16 mtu;
606 enum irdma_vm_vf_type vm_vf_type;
607 bool tc_change_pending:1;
608 bool mtu_change_pending:1;
609 struct irdma_vsi_pestat *pestat;
610 atomic_t qp_suspend_reqs;
611 int (*register_qset)(struct irdma_sc_vsi *vsi,
612 struct irdma_ws_node *tc_node);
613 void (*unregister_qset)(struct irdma_sc_vsi *vsi,
614 struct irdma_ws_node *tc_node);
615 struct irdma_config_check cfg_check[IRDMA_MAX_USER_PRIORITY];
616 bool tc_print_warning[IRDMA_MAX_TRAFFIC_CLASS];
617 u8 qos_rel_bw;
618 u8 qos_prio_type;
619 u16 stats_idx;
620 u8 dscp_map[IRDMA_DSCP_NUM_VAL];
621 struct irdma_qos qos[IRDMA_MAX_USER_PRIORITY];
622 bool dscp_mode:1;
623 };
624
625 struct irdma_sc_dev {
626 struct list_head cqp_cmd_head; /* head of the CQP command list */
627 spinlock_t cqp_lock; /* protect CQP list access */
628 struct irdma_dma_mem vf_fpm_query_buf[IRDMA_MAX_PE_ENA_VF_COUNT];
629 u64 fpm_query_buf_pa;
630 u64 fpm_commit_buf_pa;
631 __le64 *fpm_query_buf;
632 __le64 *fpm_commit_buf;
633 struct irdma_hw *hw;
634 u32 IOMEM *wqe_alloc_db;
635 u32 IOMEM *cq_arm_db;
636 u32 IOMEM *aeq_alloc_db;
637 u32 IOMEM *cqp_db;
638 u32 IOMEM *cq_ack_db;
639 u32 IOMEM *hw_regs[IRDMA_MAX_REGS];
640 u32 ceq_itr; /* Interrupt throttle, usecs between interrupts: 0 disabled. 2 - 8160 */
641 u64 hw_masks[IRDMA_MAX_MASKS];
642 u8 hw_shifts[IRDMA_MAX_SHIFTS];
643 const struct irdma_hw_stat_map *hw_stats_map;
644 u64 feature_info[IRDMA_MAX_FEATURES];
645 u64 cqp_cmd_stats[IRDMA_MAX_CQP_OPS];
646 struct irdma_hw_attrs hw_attrs;
647 struct irdma_hmc_info *hmc_info;
648 struct irdma_sc_cqp *cqp;
649 struct irdma_sc_aeq *aeq;
650 struct irdma_sc_ceq *ceq[IRDMA_CEQ_MAX_COUNT];
651 struct irdma_sc_cq *ccq;
652 const struct irdma_irq_ops *irq_ops;
653 struct irdma_hmc_fpm_misc hmc_fpm_misc;
654 struct irdma_ws_node *ws_tree_root;
655 struct mutex ws_mutex; /* ws tree mutex */
656 u32 debug_mask;
657 u16 num_vfs;
658 u16 hmc_fn_id;
659 bool ceq_valid:1;
660 int (*ws_add)(struct irdma_sc_vsi *vsi, u8 user_pri);
661 void (*ws_remove)(struct irdma_sc_vsi *vsi, u8 user_pri);
662 void (*ws_reset)(struct irdma_sc_vsi *vsi);
663 };
664
665 struct irdma_modify_cq_info {
666 u64 cq_pa;
667 struct irdma_cqe *cq_base;
668 u32 cq_size;
669 u32 shadow_read_threshold;
670 u8 pbl_chunk_size;
671 u32 first_pm_pbl_idx;
672 bool virtual_map:1;
673 bool check_overflow:1;
674 bool cq_resize:1;
675 };
676
677 struct irdma_create_qp_info {
678 bool ord_valid:1;
679 bool tcp_ctx_valid:1;
680 bool cq_num_valid:1;
681 bool arp_cache_idx_valid:1;
682 bool mac_valid:1;
683 bool force_lpb:1;
684 u8 next_iwarp_state;
685 };
686
687 struct irdma_modify_qp_info {
688 u64 rx_win0;
689 u64 rx_win1;
690 u16 new_mss;
691 u8 next_iwarp_state;
692 u8 curr_iwarp_state;
693 u8 termlen;
694 bool ord_valid:1;
695 bool tcp_ctx_valid:1;
696 bool udp_ctx_valid:1;
697 bool cq_num_valid:1;
698 bool arp_cache_idx_valid:1;
699 bool reset_tcp_conn:1;
700 bool remove_hash_idx:1;
701 bool dont_send_term:1;
702 bool dont_send_fin:1;
703 bool cached_var_valid:1;
704 bool mss_change:1;
705 bool force_lpb:1;
706 bool mac_valid:1;
707 };
708
709 struct irdma_ccq_cqe_info {
710 struct irdma_sc_cqp *cqp;
711 u64 scratch;
712 u32 op_ret_val;
713 u16 maj_err_code;
714 u16 min_err_code;
715 u8 op_code;
716 bool error:1;
717 };
718
719 struct irdma_qos_tc_info {
720 u64 tc_ctx;
721 u8 rel_bw;
722 u8 prio_type;
723 u8 egress_virt_up;
724 u8 ingress_virt_up;
725 };
726
727 struct irdma_l2params {
728 struct irdma_qos_tc_info tc_info[IRDMA_MAX_USER_PRIORITY];
729 u32 num_apps;
730 u16 qs_handle_list[IRDMA_MAX_USER_PRIORITY];
731 u16 mtu;
732 u8 up2tc[IRDMA_MAX_USER_PRIORITY];
733 u8 dscp_map[IRDMA_DSCP_NUM_VAL];
734 u8 num_tc;
735 u8 vsi_rel_bw;
736 u8 vsi_prio_type;
737 bool mtu_changed:1;
738 bool tc_changed:1;
739 bool dscp_mode:1;
740 };
741
742 struct irdma_vsi_init_info {
743 struct irdma_sc_dev *dev;
744 void *back_vsi;
745 struct irdma_l2params *params;
746 u16 exception_lan_q;
747 u16 pf_data_vsi_num;
748 enum irdma_vm_vf_type vm_vf_type;
749 int (*register_qset)(struct irdma_sc_vsi *vsi,
750 struct irdma_ws_node *tc_node);
751 void (*unregister_qset)(struct irdma_sc_vsi *vsi,
752 struct irdma_ws_node *tc_node);
753 };
754
755 struct irdma_vsi_stats_info {
756 struct irdma_vsi_pestat *pestat;
757 u8 fcn_id;
758 bool alloc_stats_inst:1;
759 };
760
761 struct irdma_device_init_info {
762 u64 fpm_query_buf_pa;
763 u64 fpm_commit_buf_pa;
764 __le64 *fpm_query_buf;
765 __le64 *fpm_commit_buf;
766 struct irdma_hw *hw;
767 void IOMEM *bar0;
768 u16 max_vfs;
769 u16 hmc_fn_id;
770 u32 debug_mask;
771 };
772
773 struct irdma_ceq_init_info {
774 u64 ceqe_pa;
775 struct irdma_sc_dev *dev;
776 u64 *ceqe_base;
777 void *pbl_list;
778 u32 elem_cnt;
779 u16 ceq_id;
780 bool virtual_map:1;
781 bool tph_en:1;
782 bool itr_no_expire:1;
783 u8 pbl_chunk_size;
784 u8 tph_val;
785 u32 first_pm_pbl_idx;
786 struct irdma_sc_vsi *vsi;
787 struct irdma_sc_cq **reg_cq;
788 };
789
790 struct irdma_aeq_init_info {
791 u64 aeq_elem_pa;
792 struct irdma_sc_dev *dev;
793 u32 *aeqe_base;
794 void *pbl_list;
795 u32 elem_cnt;
796 bool virtual_map:1;
797 u8 pbl_chunk_size;
798 u32 first_pm_pbl_idx;
799 u32 msix_idx;
800 };
801
802 struct irdma_ccq_init_info {
803 u64 cq_pa;
804 u64 shadow_area_pa;
805 struct irdma_sc_dev *dev;
806 struct irdma_cqe *cq_base;
807 __le64 *shadow_area;
808 void *pbl_list;
809 u32 num_elem;
810 u32 shadow_read_threshold;
811 u16 ceq_id;
812 bool ceqe_mask:1;
813 bool ceq_id_valid:1;
814 bool avoid_mem_cflct:1;
815 bool virtual_map:1;
816 bool tph_en:1;
817 u8 tph_val;
818 u8 pbl_chunk_size;
819 u32 first_pm_pbl_idx;
820 struct irdma_sc_vsi *vsi;
821 };
822
823 struct irdma_udp_offload_info {
824 bool ipv4:1;
825 bool insert_vlan_tag:1;
826 u8 ttl;
827 u8 tos;
828 u16 src_port;
829 u16 dst_port;
830 u32 dest_ip_addr[4];
831 u32 snd_mss;
832 u16 vlan_tag;
833 u16 arp_idx;
834 u32 flow_label;
835 u8 udp_state;
836 u32 psn_nxt;
837 u32 lsn;
838 u32 epsn;
839 u32 psn_max;
840 u32 psn_una;
841 u32 local_ipaddr[4];
842 u32 cwnd;
843 u8 rexmit_thresh;
844 u8 rnr_nak_thresh;
845 };
846
847 struct irdma_roce_offload_info {
848 u16 p_key;
849 u32 err_rq_idx;
850 u32 qkey;
851 u32 dest_qp;
852 u8 roce_tver;
853 u8 ack_credits;
854 u8 err_rq_idx_valid;
855 u32 pd_id;
856 u16 ord_size;
857 u16 ird_size;
858 bool is_qp1:1;
859 bool udprivcq_en:1;
860 bool dcqcn_en:1;
861 bool rcv_no_icrc:1;
862 bool wr_rdresp_en:1;
863 bool fast_reg_en:1;
864 bool priv_mode_en:1;
865 bool rd_en:1;
866 bool timely_en:1;
867 bool dctcp_en:1;
868 bool fw_cc_enable:1;
869 bool use_stats_inst:1;
870 u16 t_high;
871 u16 t_low;
872 u8 last_byte_sent;
873 u8 mac_addr[ETHER_ADDR_LEN];
874 u8 rtomin;
875 };
876
877 struct irdma_iwarp_offload_info {
878 u16 rcv_mark_offset;
879 u16 snd_mark_offset;
880 u8 ddp_ver;
881 u8 rdmap_ver;
882 u8 iwarp_mode;
883 u32 err_rq_idx;
884 u32 pd_id;
885 u16 ord_size;
886 u16 ird_size;
887 bool ib_rd_en:1;
888 bool align_hdrs:1;
889 bool rcv_no_mpa_crc:1;
890 bool err_rq_idx_valid:1;
891 bool snd_mark_en:1;
892 bool rcv_mark_en:1;
893 bool wr_rdresp_en:1;
894 bool fast_reg_en:1;
895 bool priv_mode_en:1;
896 bool rd_en:1;
897 bool timely_en:1;
898 bool use_stats_inst:1;
899 bool ecn_en:1;
900 bool dctcp_en:1;
901 u16 t_high;
902 u16 t_low;
903 u8 last_byte_sent;
904 u8 mac_addr[ETHER_ADDR_LEN];
905 u8 rtomin;
906 };
907
908 struct irdma_tcp_offload_info {
909 bool ipv4:1;
910 bool no_nagle:1;
911 bool insert_vlan_tag:1;
912 bool time_stamp:1;
913 bool drop_ooo_seg:1;
914 bool avoid_stretch_ack:1;
915 bool wscale:1;
916 bool ignore_tcp_opt:1;
917 bool ignore_tcp_uns_opt:1;
918 u8 cwnd_inc_limit;
919 u8 dup_ack_thresh;
920 u8 ttl;
921 u8 src_mac_addr_idx;
922 u8 tos;
923 u16 src_port;
924 u16 dst_port;
925 u32 dest_ip_addr[4];
926 //u32 dest_ip_addr0;
927 //u32 dest_ip_addr1;
928 //u32 dest_ip_addr2;
929 //u32 dest_ip_addr3;
930 u32 snd_mss;
931 u16 syn_rst_handling;
932 u16 vlan_tag;
933 u16 arp_idx;
934 u32 flow_label;
935 u8 tcp_state;
936 u8 snd_wscale;
937 u8 rcv_wscale;
938 u32 time_stamp_recent;
939 u32 time_stamp_age;
940 u32 snd_nxt;
941 u32 snd_wnd;
942 u32 rcv_nxt;
943 u32 rcv_wnd;
944 u32 snd_max;
945 u32 snd_una;
946 u32 srtt;
947 u32 rtt_var;
948 u32 ss_thresh;
949 u32 cwnd;
950 u32 snd_wl1;
951 u32 snd_wl2;
952 u32 max_snd_window;
953 u8 rexmit_thresh;
954 u32 local_ipaddr[4];
955 };
956
957 struct irdma_qp_host_ctx_info {
958 u64 qp_compl_ctx;
959 union {
960 struct irdma_tcp_offload_info *tcp_info;
961 struct irdma_udp_offload_info *udp_info;
962 };
963 union {
964 struct irdma_iwarp_offload_info *iwarp_info;
965 struct irdma_roce_offload_info *roce_info;
966 };
967 u32 send_cq_num;
968 u32 rcv_cq_num;
969 u32 rem_endpoint_idx;
970 u16 stats_idx;
971 bool tcp_info_valid:1;
972 bool iwarp_info_valid:1;
973 bool stats_idx_valid:1;
974 u8 user_pri;
975 };
976
977 struct irdma_aeqe_info {
978 u64 compl_ctx;
979 u32 qp_cq_id;
980 u32 wqe_idx;
981 u16 ae_id;
982 u8 tcp_state;
983 u8 iwarp_state;
984 bool qp:1;
985 bool cq:1;
986 bool sq:1;
987 bool rq:1;
988 bool in_rdrsp_wr:1;
989 bool out_rdrsp:1;
990 bool aeqe_overflow:1;
991 /* This flag is used to determine if we should pass the rq tail
992 * in the QP context for FW/HW. It is set when ae_src is rq for GEN1/GEN2
993 * And additionally set for inbound atomic, read and write for GEN3
994 */
995 bool err_rq_idx_valid:1;
996 u8 q2_data_written;
997 u8 ae_src;
998 };
999
1000 struct irdma_allocate_stag_info {
1001 u64 total_len;
1002 u64 first_pm_pbl_idx;
1003 u32 chunk_size;
1004 u32 stag_idx;
1005 u32 page_size;
1006 u32 pd_id;
1007 u16 access_rights;
1008 bool remote_access:1;
1009 bool use_hmc_fcn_index:1;
1010 bool all_memory:1;
1011 u16 hmc_fcn_index;
1012 };
1013
1014 struct irdma_mw_alloc_info {
1015 u32 mw_stag_index;
1016 u32 page_size;
1017 u32 pd_id;
1018 bool remote_access:1;
1019 bool mw_wide:1;
1020 bool mw1_bind_dont_vldt_key:1;
1021 };
1022
1023 struct irdma_reg_ns_stag_info {
1024 u64 reg_addr_pa;
1025 u64 va;
1026 u64 total_len;
1027 u32 page_size;
1028 u32 chunk_size;
1029 u32 first_pm_pbl_index;
1030 enum irdma_addressing_type addr_type;
1031 irdma_stag_index stag_idx;
1032 u16 access_rights;
1033 u32 pd_id;
1034 irdma_stag_key stag_key;
1035 bool use_hmc_fcn_index:1;
1036 u16 hmc_fcn_index;
1037 bool all_memory:1;
1038 };
1039
1040 struct irdma_fast_reg_stag_info {
1041 u64 wr_id;
1042 u64 reg_addr_pa;
1043 u64 fbo;
1044 void *va;
1045 u64 total_len;
1046 u32 page_size;
1047 u32 chunk_size;
1048 u32 first_pm_pbl_index;
1049 enum irdma_addressing_type addr_type;
1050 irdma_stag_index stag_idx;
1051 u16 access_rights;
1052 u32 pd_id;
1053 irdma_stag_key stag_key;
1054 bool local_fence:1;
1055 bool read_fence:1;
1056 bool signaled:1;
1057 bool push_wqe:1;
1058 bool use_hmc_fcn_index:1;
1059 u16 hmc_fcn_index;
1060 bool defer_flag:1;
1061 };
1062
1063 struct irdma_dealloc_stag_info {
1064 u32 stag_idx;
1065 u32 pd_id;
1066 bool mr:1;
1067 bool dealloc_pbl:1;
1068 bool skip_flush_markers:1;
1069 };
1070
1071 struct irdma_register_shared_stag {
1072 u64 va;
1073 enum irdma_addressing_type addr_type;
1074 irdma_stag_index new_stag_idx;
1075 irdma_stag_index parent_stag_idx;
1076 u32 access_rights;
1077 u32 pd_id;
1078 u32 page_size;
1079 irdma_stag_key new_stag_key;
1080 };
1081
1082 struct irdma_qp_init_info {
1083 struct irdma_qp_uk_init_info qp_uk_init_info;
1084 struct irdma_sc_pd *pd;
1085 struct irdma_sc_vsi *vsi;
1086 __le64 *host_ctx;
1087 u8 *q2;
1088 u64 sq_pa;
1089 u64 rq_pa;
1090 u64 host_ctx_pa;
1091 u64 q2_pa;
1092 u64 shadow_area_pa;
1093 u8 sq_tph_val;
1094 u8 rq_tph_val;
1095 bool sq_tph_en:1;
1096 bool rq_tph_en:1;
1097 bool rcv_tph_en:1;
1098 bool xmit_tph_en:1;
1099 bool virtual_map:1;
1100 };
1101
1102 struct irdma_cq_init_info {
1103 struct irdma_sc_dev *dev;
1104 u64 cq_base_pa;
1105 u64 shadow_area_pa;
1106 u32 shadow_read_threshold;
1107 u16 ceq_id;
1108 u8 pbl_chunk_size;
1109 u32 first_pm_pbl_idx;
1110 bool virtual_map:1;
1111 bool ceqe_mask:1;
1112 bool ceq_id_valid:1;
1113 bool tph_en:1;
1114 u8 tph_val;
1115 u8 type;
1116 struct irdma_cq_uk_init_info cq_uk_init_info;
1117 struct irdma_sc_vsi *vsi;
1118 };
1119
1120 struct irdma_upload_context_info {
1121 u64 buf_pa;
1122 u32 qp_id;
1123 u8 qp_type;
1124 bool freeze_qp:1;
1125 bool raw_format:1;
1126 };
1127
1128 struct irdma_local_mac_entry_info {
1129 u8 mac_addr[6];
1130 u16 entry_idx;
1131 };
1132
1133 struct irdma_add_arp_cache_entry_info {
1134 u8 mac_addr[ETHER_ADDR_LEN];
1135 u32 reach_max;
1136 u16 arp_index;
1137 bool permanent:1;
1138 };
1139
1140 struct irdma_apbvt_info {
1141 u16 port;
1142 bool add:1;
1143 };
1144
1145 struct irdma_qhash_table_info {
1146 struct irdma_sc_vsi *vsi;
1147 enum irdma_quad_hash_manage_type manage;
1148 enum irdma_quad_entry_type entry_type;
1149 bool vlan_valid:1;
1150 bool ipv4_valid:1;
1151 u8 mac_addr[ETHER_ADDR_LEN];
1152 u16 vlan_id;
1153 u8 user_pri;
1154 u32 qp_num;
1155 u32 dest_ip[4];
1156 u32 src_ip[4];
1157 u16 dest_port;
1158 u16 src_port;
1159 };
1160
1161 struct irdma_cqp_manage_push_page_info {
1162 u32 push_idx;
1163 u16 qs_handle;
1164 u8 free_page;
1165 u8 push_page_type;
1166 };
1167
1168 struct irdma_qp_flush_info {
1169 u16 sq_minor_code;
1170 u16 sq_major_code;
1171 u16 rq_minor_code;
1172 u16 rq_major_code;
1173 u16 ae_code;
1174 u8 ae_src;
1175 bool sq:1;
1176 bool rq:1;
1177 bool userflushcode:1;
1178 bool generate_ae:1;
1179 };
1180
1181 struct irdma_gen_ae_info {
1182 u16 ae_code;
1183 u8 ae_src;
1184 };
1185
1186 struct irdma_cqp_timeout {
1187 u64 compl_cqp_cmds;
1188 u32 count;
1189 };
1190
1191 struct irdma_irq_ops {
1192 void (*irdma_cfg_aeq)(struct irdma_sc_dev *dev, u32 idx, bool enable);
1193 void (*irdma_cfg_ceq)(struct irdma_sc_dev *dev, u32 ceq_id, u32 idx,
1194 bool enable);
1195 void (*irdma_dis_irq)(struct irdma_sc_dev *dev, u32 idx);
1196 void (*irdma_en_irq)(struct irdma_sc_dev *dev, u32 idx);
1197 };
1198
1199 void irdma_sc_ccq_arm(struct irdma_sc_cq *ccq);
1200 int irdma_sc_ccq_create(struct irdma_sc_cq *ccq, u64 scratch,
1201 bool check_overflow, bool post_sq);
1202 int irdma_sc_ccq_destroy(struct irdma_sc_cq *ccq, u64 scratch, bool post_sq);
1203 int irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
1204 struct irdma_ccq_cqe_info *info);
1205 int irdma_sc_ccq_init(struct irdma_sc_cq *ccq,
1206 struct irdma_ccq_init_info *info);
1207
1208 int irdma_sc_cceq_create(struct irdma_sc_ceq *ceq);
1209 int irdma_sc_cceq_destroy_done(struct irdma_sc_ceq *ceq);
1210
1211 int irdma_sc_ceq_destroy(struct irdma_sc_ceq *ceq, u64 scratch, bool post_sq);
1212 int irdma_sc_ceq_init(struct irdma_sc_ceq *ceq,
1213 struct irdma_ceq_init_info *info);
1214 void irdma_sc_cleanup_ceqes(struct irdma_sc_cq *cq, struct irdma_sc_ceq *ceq);
1215 void *irdma_sc_process_ceq(struct irdma_sc_dev *dev, struct irdma_sc_ceq *ceq);
1216
1217 int irdma_sc_aeq_init(struct irdma_sc_aeq *aeq,
1218 struct irdma_aeq_init_info *info);
1219 int irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
1220 struct irdma_aeqe_info *info);
1221 void irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, u32 count);
1222
1223 void irdma_sc_pd_init(struct irdma_sc_dev *dev, struct irdma_sc_pd *pd, u32 pd_id,
1224 int abi_ver);
1225 void irdma_cfg_aeq(struct irdma_sc_dev *dev, u32 idx, bool enable);
1226 void irdma_check_cqp_progress(struct irdma_cqp_timeout *cqp_timeout,
1227 struct irdma_sc_dev *dev);
1228 int irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_err, u16 *min_err);
1229 int irdma_sc_cqp_destroy(struct irdma_sc_cqp *cqp, bool free_hwcqp);
1230 int irdma_sc_cqp_init(struct irdma_sc_cqp *cqp,
1231 struct irdma_cqp_init_info *info);
1232 void irdma_sc_cqp_post_sq(struct irdma_sc_cqp *cqp);
1233 int irdma_sc_poll_for_cqp_op_done(struct irdma_sc_cqp *cqp, u8 opcode,
1234 struct irdma_ccq_cqe_info *cmpl_info);
1235 int irdma_sc_qp_create(struct irdma_sc_qp *qp,
1236 struct irdma_create_qp_info *info, u64 scratch,
1237 bool post_sq);
1238 int irdma_sc_qp_destroy(struct irdma_sc_qp *qp, u64 scratch,
1239 bool remove_hash_idx, bool ignore_mw_bnd, bool post_sq);
1240 int irdma_sc_qp_flush_wqes(struct irdma_sc_qp *qp,
1241 struct irdma_qp_flush_info *info, u64 scratch,
1242 bool post_sq);
1243 int irdma_sc_qp_init(struct irdma_sc_qp *qp, struct irdma_qp_init_info *info);
1244 int irdma_sc_qp_modify(struct irdma_sc_qp *qp,
1245 struct irdma_modify_qp_info *info, u64 scratch,
1246 bool post_sq);
1247 void irdma_sc_send_lsmm(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size,
1248 irdma_stag stag);
1249 void irdma_sc_send_rtt(struct irdma_sc_qp *qp, bool read);
1250 void irdma_sc_qp_setctx(struct irdma_sc_qp *qp, __le64 *qp_ctx,
1251 struct irdma_qp_host_ctx_info *info);
1252 void irdma_sc_qp_setctx_roce(struct irdma_sc_qp *qp, __le64 *qp_ctx,
1253 struct irdma_qp_host_ctx_info *info);
1254 int irdma_sc_cq_destroy(struct irdma_sc_cq *cq, u64 scratch, bool post_sq);
1255 int irdma_sc_cq_init(struct irdma_sc_cq *cq, struct irdma_cq_init_info *info);
1256 void irdma_sc_cq_resize(struct irdma_sc_cq *cq, struct irdma_modify_cq_info *info);
1257 int irdma_sc_aeq_destroy(struct irdma_sc_aeq *aeq, u64 scratch, bool post_sq);
1258 int irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp, u64 scratch,
1259 u16 hmc_fn_id, bool post_sq,
1260 bool poll_registers);
1261
1262 void sc_vsi_update_stats(struct irdma_sc_vsi *vsi);
1263 struct cqp_info {
1264 union {
1265 struct {
1266 struct irdma_sc_qp *qp;
1267 struct irdma_create_qp_info info;
1268 u64 scratch;
1269 } qp_create;
1270
1271 struct {
1272 struct irdma_sc_qp *qp;
1273 struct irdma_modify_qp_info info;
1274 u64 scratch;
1275 } qp_modify;
1276
1277 struct {
1278 struct irdma_sc_qp *qp;
1279 u64 scratch;
1280 bool remove_hash_idx;
1281 bool ignore_mw_bnd;
1282 } qp_destroy;
1283
1284 struct {
1285 struct irdma_sc_cq *cq;
1286 u64 scratch;
1287 bool check_overflow;
1288 } cq_create;
1289
1290 struct {
1291 struct irdma_sc_cq *cq;
1292 struct irdma_modify_cq_info info;
1293 u64 scratch;
1294 } cq_modify;
1295
1296 struct {
1297 struct irdma_sc_cq *cq;
1298 u64 scratch;
1299 } cq_destroy;
1300
1301 struct {
1302 struct irdma_sc_dev *dev;
1303 struct irdma_allocate_stag_info info;
1304 u64 scratch;
1305 } alloc_stag;
1306
1307 struct {
1308 struct irdma_sc_dev *dev;
1309 struct irdma_mw_alloc_info info;
1310 u64 scratch;
1311 } mw_alloc;
1312
1313 struct {
1314 struct irdma_sc_dev *dev;
1315 struct irdma_reg_ns_stag_info info;
1316 u64 scratch;
1317 } mr_reg_non_shared;
1318
1319 struct {
1320 struct irdma_sc_dev *dev;
1321 struct irdma_dealloc_stag_info info;
1322 u64 scratch;
1323 } dealloc_stag;
1324
1325 struct {
1326 struct irdma_sc_cqp *cqp;
1327 struct irdma_add_arp_cache_entry_info info;
1328 u64 scratch;
1329 } add_arp_cache_entry;
1330
1331 struct {
1332 struct irdma_sc_cqp *cqp;
1333 u64 scratch;
1334 u16 arp_index;
1335 } del_arp_cache_entry;
1336
1337 struct {
1338 struct irdma_sc_cqp *cqp;
1339 struct irdma_local_mac_entry_info info;
1340 u64 scratch;
1341 } add_local_mac_entry;
1342
1343 struct {
1344 struct irdma_sc_cqp *cqp;
1345 u64 scratch;
1346 u8 entry_idx;
1347 u8 ignore_ref_count;
1348 } del_local_mac_entry;
1349
1350 struct {
1351 struct irdma_sc_cqp *cqp;
1352 u64 scratch;
1353 } alloc_local_mac_entry;
1354
1355 struct {
1356 struct irdma_sc_cqp *cqp;
1357 struct irdma_cqp_manage_push_page_info info;
1358 u64 scratch;
1359 } manage_push_page;
1360
1361 struct {
1362 struct irdma_sc_dev *dev;
1363 struct irdma_upload_context_info info;
1364 u64 scratch;
1365 } qp_upload_context;
1366
1367 struct {
1368 struct irdma_sc_dev *dev;
1369 struct irdma_hmc_fcn_info info;
1370 u64 scratch;
1371 } manage_hmc_pm;
1372
1373 struct {
1374 struct irdma_sc_ceq *ceq;
1375 u64 scratch;
1376 } ceq_create;
1377
1378 struct {
1379 struct irdma_sc_ceq *ceq;
1380 u64 scratch;
1381 } ceq_destroy;
1382
1383 struct {
1384 struct irdma_sc_aeq *aeq;
1385 u64 scratch;
1386 } aeq_create;
1387
1388 struct {
1389 struct irdma_sc_aeq *aeq;
1390 u64 scratch;
1391 } aeq_destroy;
1392
1393 struct {
1394 struct irdma_sc_qp *qp;
1395 struct irdma_qp_flush_info info;
1396 u64 scratch;
1397 } qp_flush_wqes;
1398
1399 struct {
1400 struct irdma_sc_qp *qp;
1401 struct irdma_gen_ae_info info;
1402 u64 scratch;
1403 } gen_ae;
1404
1405 struct {
1406 struct irdma_sc_cqp *cqp;
1407 void *fpm_val_va;
1408 u64 fpm_val_pa;
1409 u16 hmc_fn_id;
1410 u64 scratch;
1411 } query_fpm_val;
1412
1413 struct {
1414 struct irdma_sc_cqp *cqp;
1415 void *fpm_val_va;
1416 u64 fpm_val_pa;
1417 u16 hmc_fn_id;
1418 u64 scratch;
1419 } commit_fpm_val;
1420
1421 struct {
1422 struct irdma_sc_cqp *cqp;
1423 struct irdma_apbvt_info info;
1424 u64 scratch;
1425 } manage_apbvt_entry;
1426
1427 struct {
1428 struct irdma_sc_cqp *cqp;
1429 struct irdma_qhash_table_info info;
1430 u64 scratch;
1431 } manage_qhash_table_entry;
1432
1433 struct {
1434 struct irdma_sc_dev *dev;
1435 struct irdma_update_sds_info info;
1436 u64 scratch;
1437 } update_pe_sds;
1438
1439 struct {
1440 struct irdma_sc_cqp *cqp;
1441 struct irdma_sc_qp *qp;
1442 u64 scratch;
1443 } suspend_resume;
1444
1445 struct {
1446 struct irdma_sc_cqp *cqp;
1447 struct irdma_ah_info info;
1448 u64 scratch;
1449 } ah_create;
1450
1451 struct {
1452 struct irdma_sc_cqp *cqp;
1453 struct irdma_ah_info info;
1454 u64 scratch;
1455 } ah_destroy;
1456
1457 struct {
1458 struct irdma_sc_cqp *cqp;
1459 struct irdma_mcast_grp_info info;
1460 u64 scratch;
1461 } mc_create;
1462
1463 struct {
1464 struct irdma_sc_cqp *cqp;
1465 struct irdma_mcast_grp_info info;
1466 u64 scratch;
1467 } mc_destroy;
1468
1469 struct {
1470 struct irdma_sc_cqp *cqp;
1471 struct irdma_mcast_grp_info info;
1472 u64 scratch;
1473 } mc_modify;
1474
1475 struct {
1476 struct irdma_sc_cqp *cqp;
1477 struct irdma_stats_gather_info info;
1478 u64 scratch;
1479 } stats_gather;
1480
1481 struct {
1482 struct irdma_sc_cqp *cqp;
1483 struct irdma_ws_node_info info;
1484 u64 scratch;
1485 } ws_node;
1486
1487 struct {
1488 struct irdma_sc_cqp *cqp;
1489 struct irdma_ws_move_node_info info;
1490 u64 scratch;
1491 } ws_move_node;
1492
1493 struct {
1494 struct irdma_sc_cqp *cqp;
1495 struct irdma_up_info info;
1496 u64 scratch;
1497 } up_map;
1498
1499 struct {
1500 struct irdma_sc_cqp *cqp;
1501 struct irdma_dma_mem query_buff_mem;
1502 u64 scratch;
1503 } query_rdma;
1504 } u;
1505 };
1506
1507 struct cqp_cmds_info {
1508 struct list_head cqp_cmd_entry;
1509 u8 cqp_cmd;
1510 u8 post_sq;
1511 struct cqp_info in;
1512 int cqp_cmd_exec_status;
1513 bool create;
1514 };
1515
1516 __le64 *irdma_sc_cqp_get_next_send_wqe_idx(struct irdma_sc_cqp *cqp, u64 scratch,
1517 u32 *wqe_idx);
1518
1519 /**
1520 * irdma_sc_cqp_get_next_send_wqe - get next wqe on cqp sq
1521 * @cqp: struct for cqp hw
1522 * @scratch: private data for CQP WQE
1523 */
irdma_sc_cqp_get_next_send_wqe(struct irdma_sc_cqp * cqp,u64 scratch)1524 static inline __le64 *irdma_sc_cqp_get_next_send_wqe(struct irdma_sc_cqp *cqp, u64 scratch)
1525 {
1526 u32 wqe_idx;
1527
1528 return irdma_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx);
1529 }
1530 #endif /* IRDMA_TYPE_H */
1531