xref: /linux/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h (revision 7ce4de1cdaf11c39b507008dfb5a4e59079d4e8a)
1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
2 /*
3  * Copyright 2018-2025 Amazon.com, Inc. or its affiliates. All rights reserved.
4  */
5 
6 #ifndef _EFA_ADMIN_CMDS_H_
7 #define _EFA_ADMIN_CMDS_H_
8 
9 #define EFA_ADMIN_API_VERSION_MAJOR          0
10 #define EFA_ADMIN_API_VERSION_MINOR          1
11 
12 /* EFA admin queue opcodes */
13 enum efa_admin_aq_opcode {
14 	EFA_ADMIN_CREATE_QP                         = 1,
15 	EFA_ADMIN_MODIFY_QP                         = 2,
16 	EFA_ADMIN_QUERY_QP                          = 3,
17 	EFA_ADMIN_DESTROY_QP                        = 4,
18 	EFA_ADMIN_CREATE_AH                         = 5,
19 	EFA_ADMIN_DESTROY_AH                        = 6,
20 	EFA_ADMIN_REG_MR                            = 7,
21 	EFA_ADMIN_DEREG_MR                          = 8,
22 	EFA_ADMIN_CREATE_CQ                         = 9,
23 	EFA_ADMIN_DESTROY_CQ                        = 10,
24 	EFA_ADMIN_GET_FEATURE                       = 11,
25 	EFA_ADMIN_SET_FEATURE                       = 12,
26 	EFA_ADMIN_GET_STATS                         = 13,
27 	EFA_ADMIN_ALLOC_PD                          = 14,
28 	EFA_ADMIN_DEALLOC_PD                        = 15,
29 	EFA_ADMIN_ALLOC_UAR                         = 16,
30 	EFA_ADMIN_DEALLOC_UAR                       = 17,
31 	EFA_ADMIN_CREATE_EQ                         = 18,
32 	EFA_ADMIN_DESTROY_EQ                        = 19,
33 	EFA_ADMIN_ALLOC_MR                          = 20,
34 	EFA_ADMIN_MAX_OPCODE                        = 20,
35 };
36 
37 enum efa_admin_aq_feature_id {
38 	EFA_ADMIN_DEVICE_ATTR                       = 1,
39 	EFA_ADMIN_AENQ_CONFIG                       = 2,
40 	EFA_ADMIN_NETWORK_ATTR                      = 3,
41 	EFA_ADMIN_QUEUE_ATTR                        = 4,
42 	EFA_ADMIN_HW_HINTS                          = 5,
43 	EFA_ADMIN_HOST_INFO                         = 6,
44 	EFA_ADMIN_EVENT_QUEUE_ATTR                  = 7,
45 };
46 
47 /* QP transport type */
48 enum efa_admin_qp_type {
49 	/* Unreliable Datagram */
50 	EFA_ADMIN_QP_TYPE_UD                        = 1,
51 	/* Scalable Reliable Datagram */
52 	EFA_ADMIN_QP_TYPE_SRD                       = 2,
53 };
54 
55 /* QP state */
56 enum efa_admin_qp_state {
57 	EFA_ADMIN_QP_STATE_RESET                    = 0,
58 	EFA_ADMIN_QP_STATE_INIT                     = 1,
59 	EFA_ADMIN_QP_STATE_RTR                      = 2,
60 	EFA_ADMIN_QP_STATE_RTS                      = 3,
61 	EFA_ADMIN_QP_STATE_SQD                      = 4,
62 	EFA_ADMIN_QP_STATE_SQE                      = 5,
63 	EFA_ADMIN_QP_STATE_ERR                      = 6,
64 };
65 
66 enum efa_admin_get_stats_type {
67 	EFA_ADMIN_GET_STATS_TYPE_BASIC              = 0,
68 	EFA_ADMIN_GET_STATS_TYPE_MESSAGES           = 1,
69 	EFA_ADMIN_GET_STATS_TYPE_RDMA_READ          = 2,
70 	EFA_ADMIN_GET_STATS_TYPE_RDMA_WRITE         = 3,
71 	EFA_ADMIN_GET_STATS_TYPE_NETWORK            = 4,
72 };
73 
74 enum efa_admin_get_stats_scope {
75 	EFA_ADMIN_GET_STATS_SCOPE_ALL               = 0,
76 	EFA_ADMIN_GET_STATS_SCOPE_QUEUE             = 1,
77 };
78 
79 /*
80  * QP allocation sizes, converted by fabric QueuePair (QP) create command
81  * from QP capabilities.
82  */
83 struct efa_admin_qp_alloc_size {
84 	/* Send descriptor ring size in bytes */
85 	u32 send_queue_ring_size;
86 
87 	/* Max number of WQEs that can be outstanding on send queue. */
88 	u32 send_queue_depth;
89 
90 	/*
91 	 * Recv descriptor ring size in bytes, sufficient for user-provided
92 	 * number of WQEs
93 	 */
94 	u32 recv_queue_ring_size;
95 
96 	/* Max number of WQEs that can be outstanding on recv queue */
97 	u32 recv_queue_depth;
98 };
99 
100 struct efa_admin_create_qp_cmd {
101 	/* Common Admin Queue descriptor */
102 	struct efa_admin_aq_common_desc aq_common_desc;
103 
104 	/* Protection Domain associated with this QP */
105 	u16 pd;
106 
107 	/* QP type */
108 	u8 qp_type;
109 
110 	/*
111 	 * 0 : sq_virt - If set, SQ ring base address is
112 	 *    virtual (IOVA returned by MR registration)
113 	 * 1 : rq_virt - If set, RQ ring base address is
114 	 *    virtual (IOVA returned by MR registration)
115 	 * 2 : unsolicited_write_recv - If set, work requests
116 	 *    will not be consumed for incoming RDMA write with
117 	 *    immediate
118 	 * 7:3 : reserved - MBZ
119 	 */
120 	u8 flags;
121 
122 	/*
123 	 * Send queue (SQ) ring base physical address. This field is not
124 	 * used if this is a Low Latency Queue(LLQ).
125 	 */
126 	u64 sq_base_addr;
127 
128 	/* Receive queue (RQ) ring base address. */
129 	u64 rq_base_addr;
130 
131 	/* Index of CQ to be associated with Send Queue completions */
132 	u32 send_cq_idx;
133 
134 	/* Index of CQ to be associated with Recv Queue completions */
135 	u32 recv_cq_idx;
136 
137 	/*
138 	 * Memory registration key for the SQ ring, used only when not in
139 	 * LLQ mode and base address is virtual
140 	 */
141 	u32 sq_l_key;
142 
143 	/*
144 	 * Memory registration key for the RQ ring, used only when base
145 	 * address is virtual
146 	 */
147 	u32 rq_l_key;
148 
149 	/* Requested QP allocation sizes */
150 	struct efa_admin_qp_alloc_size qp_alloc_size;
151 
152 	/* UAR number */
153 	u16 uar;
154 
155 	/* Requested service level for the QP, 0 is the default SL */
156 	u8 sl;
157 
158 	/* MBZ */
159 	u8 reserved;
160 
161 	/* MBZ */
162 	u32 reserved2;
163 };
164 
165 struct efa_admin_create_qp_resp {
166 	/* Common Admin Queue completion descriptor */
167 	struct efa_admin_acq_common_desc acq_common_desc;
168 
169 	/*
170 	 * Opaque handle to be used for consequent admin operations on the
171 	 * QP
172 	 */
173 	u32 qp_handle;
174 
175 	/*
176 	 * QP number in the given EFA virtual device. Least-significant bits (as
177 	 * needed according to max_qp) carry unique QP ID
178 	 */
179 	u16 qp_num;
180 
181 	/* MBZ */
182 	u16 reserved;
183 
184 	/* Index of sub-CQ for Send Queue completions */
185 	u16 send_sub_cq_idx;
186 
187 	/* Index of sub-CQ for Receive Queue completions */
188 	u16 recv_sub_cq_idx;
189 
190 	/* SQ doorbell address, as offset to PCIe DB BAR */
191 	u32 sq_db_offset;
192 
193 	/* RQ doorbell address, as offset to PCIe DB BAR */
194 	u32 rq_db_offset;
195 
196 	/*
197 	 * low latency send queue ring base address as an offset to PCIe
198 	 * MMIO LLQ_MEM BAR
199 	 */
200 	u32 llq_descriptors_offset;
201 };
202 
203 struct efa_admin_modify_qp_cmd {
204 	/* Common Admin Queue descriptor */
205 	struct efa_admin_aq_common_desc aq_common_desc;
206 
207 	/*
208 	 * Mask indicating which fields should be updated
209 	 * 0 : qp_state
210 	 * 1 : cur_qp_state
211 	 * 2 : qkey
212 	 * 3 : sq_psn
213 	 * 4 : sq_drained_async_notify
214 	 * 5 : rnr_retry
215 	 * 31:6 : reserved
216 	 */
217 	u32 modify_mask;
218 
219 	/* QP handle returned by create_qp command */
220 	u32 qp_handle;
221 
222 	/* QP state */
223 	u32 qp_state;
224 
225 	/* Override current QP state (before applying the transition) */
226 	u32 cur_qp_state;
227 
228 	/* QKey */
229 	u32 qkey;
230 
231 	/* SQ PSN */
232 	u32 sq_psn;
233 
234 	/* Enable async notification when SQ is drained */
235 	u8 sq_drained_async_notify;
236 
237 	/* Number of RNR retries (valid only for SRD QPs) */
238 	u8 rnr_retry;
239 
240 	/* MBZ */
241 	u16 reserved2;
242 };
243 
244 struct efa_admin_modify_qp_resp {
245 	/* Common Admin Queue completion descriptor */
246 	struct efa_admin_acq_common_desc acq_common_desc;
247 };
248 
249 struct efa_admin_query_qp_cmd {
250 	/* Common Admin Queue descriptor */
251 	struct efa_admin_aq_common_desc aq_common_desc;
252 
253 	/* QP handle returned by create_qp command */
254 	u32 qp_handle;
255 };
256 
257 struct efa_admin_query_qp_resp {
258 	/* Common Admin Queue completion descriptor */
259 	struct efa_admin_acq_common_desc acq_common_desc;
260 
261 	/* QP state */
262 	u32 qp_state;
263 
264 	/* QKey */
265 	u32 qkey;
266 
267 	/* SQ PSN */
268 	u32 sq_psn;
269 
270 	/* Indicates that draining is in progress */
271 	u8 sq_draining;
272 
273 	/* Number of RNR retries (valid only for SRD QPs) */
274 	u8 rnr_retry;
275 
276 	/* MBZ */
277 	u16 reserved2;
278 };
279 
280 struct efa_admin_destroy_qp_cmd {
281 	/* Common Admin Queue descriptor */
282 	struct efa_admin_aq_common_desc aq_common_desc;
283 
284 	/* QP handle returned by create_qp command */
285 	u32 qp_handle;
286 };
287 
288 struct efa_admin_destroy_qp_resp {
289 	/* Common Admin Queue completion descriptor */
290 	struct efa_admin_acq_common_desc acq_common_desc;
291 };
292 
293 /*
294  * Create Address Handle command parameters. Must not be called more than
295  * once for the same destination
296  */
297 struct efa_admin_create_ah_cmd {
298 	/* Common Admin Queue descriptor */
299 	struct efa_admin_aq_common_desc aq_common_desc;
300 
301 	/* Destination address in network byte order */
302 	u8 dest_addr[16];
303 
304 	/* PD number */
305 	u16 pd;
306 
307 	/* MBZ */
308 	u16 reserved;
309 };
310 
311 struct efa_admin_create_ah_resp {
312 	/* Common Admin Queue completion descriptor */
313 	struct efa_admin_acq_common_desc acq_common_desc;
314 
315 	/* Target interface address handle (opaque) */
316 	u16 ah;
317 
318 	/* MBZ */
319 	u16 reserved;
320 };
321 
322 struct efa_admin_destroy_ah_cmd {
323 	/* Common Admin Queue descriptor */
324 	struct efa_admin_aq_common_desc aq_common_desc;
325 
326 	/* Target interface address handle (opaque) */
327 	u16 ah;
328 
329 	/* PD number */
330 	u16 pd;
331 };
332 
333 struct efa_admin_destroy_ah_resp {
334 	/* Common Admin Queue completion descriptor */
335 	struct efa_admin_acq_common_desc acq_common_desc;
336 };
337 
338 /*
339  * Registration of MemoryRegion, required for QP working with Virtual
340  * Addresses. In standard verbs semantics, region length is limited to 2GB
341  * space, but EFA offers larger MR support for large memory space, to ease
342  * on users working with very large datasets (i.e. full GPU memory mapping).
343  */
344 struct efa_admin_reg_mr_cmd {
345 	/* Common Admin Queue descriptor */
346 	struct efa_admin_aq_common_desc aq_common_desc;
347 
348 	/* Protection Domain */
349 	u16 pd;
350 
351 	/* MBZ */
352 	u16 reserved16_w1;
353 
354 	/* Physical Buffer List, each element is page-aligned. */
355 	union {
356 		/*
357 		 * Inline array of guest-physical page addresses of user
358 		 * memory pages (optimization for short region
359 		 * registrations)
360 		 */
361 		u64 inline_pbl_array[4];
362 
363 		/* points to PBL (direct or indirect, chained if needed) */
364 		struct efa_admin_ctrl_buff_info pbl;
365 	} pbl;
366 
367 	/* Memory region length, in bytes. */
368 	u64 mr_length;
369 
370 	/*
371 	 * flags and page size
372 	 * 4:0 : phys_page_size_shift - page size is (1 <<
373 	 *    phys_page_size_shift). Page size is used for
374 	 *    building the Virtual to Physical address mapping
375 	 * 6:5 : reserved - MBZ
376 	 * 7 : mem_addr_phy_mode_en - Enable bit for physical
377 	 *    memory registration (no translation), can be used
378 	 *    only by privileged clients. If set, PBL must
379 	 *    contain a single entry.
380 	 */
381 	u8 flags;
382 
383 	/*
384 	 * permissions
385 	 * 0 : local_write_enable - Local write permissions:
386 	 *    must be set for RQ buffers and buffers posted for
387 	 *    RDMA Read requests
388 	 * 1 : remote_write_enable - Remote write
389 	 *    permissions: must be set to enable RDMA write to
390 	 *    the region
391 	 * 2 : remote_read_enable - Remote read permissions:
392 	 *    must be set to enable RDMA read from the region
393 	 * 7:3 : reserved2 - MBZ
394 	 */
395 	u8 permissions;
396 
397 	/* MBZ */
398 	u16 reserved16_w5;
399 
400 	/* number of pages in PBL (redundant, could be calculated) */
401 	u32 page_num;
402 
403 	/*
404 	 * IO Virtual Address associated with this MR. If
405 	 * mem_addr_phy_mode_en is set, contains the physical address of
406 	 * the region.
407 	 */
408 	u64 iova;
409 };
410 
411 struct efa_admin_reg_mr_resp {
412 	/* Common Admin Queue completion descriptor */
413 	struct efa_admin_acq_common_desc acq_common_desc;
414 
415 	/*
416 	 * L_Key, to be used in conjunction with local buffer references in
417 	 * SQ and RQ WQE, or with virtual RQ/CQ rings
418 	 */
419 	u32 l_key;
420 
421 	/*
422 	 * R_Key, to be used in RDMA messages to refer to remotely accessed
423 	 * memory region
424 	 */
425 	u32 r_key;
426 
427 	/*
428 	 * Mask indicating which fields have valid values
429 	 * 0 : recv_ic_id
430 	 * 1 : rdma_read_ic_id
431 	 * 2 : rdma_recv_ic_id
432 	 */
433 	u8 validity;
434 
435 	/*
436 	 * Physical interconnect used by the device to reach the MR for receive
437 	 * operation
438 	 */
439 	u8 recv_ic_id;
440 
441 	/*
442 	 * Physical interconnect used by the device to reach the MR for RDMA
443 	 * read operation
444 	 */
445 	u8 rdma_read_ic_id;
446 
447 	/*
448 	 * Physical interconnect used by the device to reach the MR for RDMA
449 	 * write receive
450 	 */
451 	u8 rdma_recv_ic_id;
452 };
453 
454 struct efa_admin_dereg_mr_cmd {
455 	/* Common Admin Queue descriptor */
456 	struct efa_admin_aq_common_desc aq_common_desc;
457 
458 	/* L_Key, memory region's l_key */
459 	u32 l_key;
460 };
461 
462 struct efa_admin_dereg_mr_resp {
463 	/* Common Admin Queue completion descriptor */
464 	struct efa_admin_acq_common_desc acq_common_desc;
465 };
466 
467 /*
468  * Allocation of MemoryRegion, required for QP working with Virtual
469  * Addresses in kernel verbs semantics, ready for fast registration use.
470  */
471 struct efa_admin_alloc_mr_cmd {
472 	/* Common Admin Queue descriptor */
473 	struct efa_admin_aq_common_desc aq_common_desc;
474 
475 	/* Protection Domain */
476 	u16 pd;
477 
478 	/* MBZ */
479 	u16 reserved1;
480 
481 	/* Maximum number of pages this MR supports. */
482 	u32 max_pages;
483 };
484 
485 struct efa_admin_alloc_mr_resp {
486 	/* Common Admin Queue completion descriptor */
487 	struct efa_admin_acq_common_desc acq_common_desc;
488 
489 	/*
490 	 * L_Key, to be used in conjunction with local buffer references in
491 	 * SQ and RQ WQE, or with virtual RQ/CQ rings
492 	 */
493 	u32 l_key;
494 
495 	/*
496 	 * R_Key, to be used in RDMA messages to refer to remotely accessed
497 	 * memory region
498 	 */
499 	u32 r_key;
500 };
501 
502 struct efa_admin_create_cq_cmd {
503 	struct efa_admin_aq_common_desc aq_common_desc;
504 
505 	/*
506 	 * 4:0 : reserved5 - MBZ
507 	 * 5 : interrupt_mode_enabled - if set, cq operates
508 	 *    in interrupt mode (i.e. CQ events and EQ elements
509 	 *    are generated), otherwise - polling
510 	 * 6 : virt - If set, ring base address is virtual
511 	 *    (IOVA returned by MR registration)
512 	 * 7 : reserved6 - MBZ
513 	 */
514 	u8 cq_caps_1;
515 
516 	/*
517 	 * 4:0 : cq_entry_size_words - size of CQ entry in
518 	 *    32-bit words, valid values: 4, 8.
519 	 * 5 : set_src_addr - If set, source address will be
520 	 *    filled on RX completions from unknown senders.
521 	 *    Requires 8 words CQ entry size.
522 	 * 7:6 : reserved7 - MBZ
523 	 */
524 	u8 cq_caps_2;
525 
526 	/* Sub completion queue depth in # of entries. must be power of 2 */
527 	u16 sub_cq_depth;
528 
529 	/* EQ number assigned to this cq */
530 	u16 eqn;
531 
532 	/* MBZ */
533 	u16 reserved;
534 
535 	/*
536 	 * CQ ring base address, virtual or physical depending on 'virt'
537 	 * flag
538 	 */
539 	struct efa_common_mem_addr cq_ba;
540 
541 	/*
542 	 * Memory registration key for the ring, used only when base
543 	 * address is virtual
544 	 */
545 	u32 l_key;
546 
547 	/*
548 	 * number of sub cqs - must be equal to sub_cqs_per_cq of queue
549 	 * attributes.
550 	 */
551 	u16 num_sub_cqs;
552 
553 	/* UAR number */
554 	u16 uar;
555 };
556 
557 struct efa_admin_create_cq_resp {
558 	struct efa_admin_acq_common_desc acq_common_desc;
559 
560 	u16 cq_idx;
561 
562 	/* actual sub cq depth in number of entries */
563 	u16 sub_cq_actual_depth;
564 
565 	/* CQ doorbell address, as offset to PCIe DB BAR */
566 	u32 db_offset;
567 
568 	/*
569 	 * 0 : db_valid - If set, doorbell offset is valid.
570 	 *    Always set when interrupts are requested.
571 	 */
572 	u32 flags;
573 };
574 
575 struct efa_admin_destroy_cq_cmd {
576 	struct efa_admin_aq_common_desc aq_common_desc;
577 
578 	u16 cq_idx;
579 
580 	/* MBZ */
581 	u16 reserved1;
582 };
583 
584 struct efa_admin_destroy_cq_resp {
585 	struct efa_admin_acq_common_desc acq_common_desc;
586 };
587 
588 /*
589  * EFA AQ Get Statistics command. Extended statistics are placed in control
590  * buffer pointed by AQ entry
591  */
592 struct efa_admin_aq_get_stats_cmd {
593 	struct efa_admin_aq_common_desc aq_common_descriptor;
594 
595 	union {
596 		/* command specific inline data */
597 		u32 inline_data_w1[3];
598 
599 		struct efa_admin_ctrl_buff_info control_buffer;
600 	} u;
601 
602 	/* stats type as defined in enum efa_admin_get_stats_type */
603 	u8 type;
604 
605 	/* stats scope defined in enum efa_admin_get_stats_scope */
606 	u8 scope;
607 
608 	u16 scope_modifier;
609 };
610 
611 struct efa_admin_basic_stats {
612 	u64 tx_bytes;
613 
614 	u64 tx_pkts;
615 
616 	u64 rx_bytes;
617 
618 	u64 rx_pkts;
619 
620 	u64 rx_drops;
621 
622 	u64 qkey_viol;
623 };
624 
625 struct efa_admin_messages_stats {
626 	u64 send_bytes;
627 
628 	u64 send_wrs;
629 
630 	u64 recv_bytes;
631 
632 	u64 recv_wrs;
633 };
634 
635 struct efa_admin_rdma_read_stats {
636 	u64 read_wrs;
637 
638 	u64 read_bytes;
639 
640 	u64 read_wr_err;
641 
642 	u64 read_resp_bytes;
643 };
644 
645 struct efa_admin_rdma_write_stats {
646 	u64 write_wrs;
647 
648 	u64 write_bytes;
649 
650 	u64 write_wr_err;
651 
652 	u64 write_recv_bytes;
653 };
654 
655 struct efa_admin_network_stats {
656 	u64 retrans_bytes;
657 
658 	u64 retrans_pkts;
659 
660 	u64 retrans_timeout_events;
661 
662 	u64 unresponsive_remote_events;
663 
664 	u64 impaired_remote_conn_events;
665 };
666 
667 struct efa_admin_acq_get_stats_resp {
668 	struct efa_admin_acq_common_desc acq_common_desc;
669 
670 	union {
671 		struct efa_admin_basic_stats basic_stats;
672 
673 		struct efa_admin_messages_stats messages_stats;
674 
675 		struct efa_admin_rdma_read_stats rdma_read_stats;
676 
677 		struct efa_admin_rdma_write_stats rdma_write_stats;
678 
679 		struct efa_admin_network_stats network_stats;
680 	} u;
681 };
682 
683 struct efa_admin_get_set_feature_common_desc {
684 	/* MBZ */
685 	u8 reserved0;
686 
687 	/* as appears in efa_admin_aq_feature_id */
688 	u8 feature_id;
689 
690 	/* MBZ */
691 	u16 reserved16;
692 };
693 
694 struct efa_admin_feature_device_attr_desc {
695 	/* Bitmap of efa_admin_aq_feature_id */
696 	u64 supported_features;
697 
698 	/* Bitmap of supported page sizes in MR registrations */
699 	u64 page_size_cap;
700 
701 	u32 fw_version;
702 
703 	u32 admin_api_version;
704 
705 	u32 device_version;
706 
707 	/* Bar used for SQ and RQ doorbells */
708 	u16 db_bar;
709 
710 	/* Indicates how many bits are used on physical address access */
711 	u8 phys_addr_width;
712 
713 	/* Indicates how many bits are used on virtual address access */
714 	u8 virt_addr_width;
715 
716 	/*
717 	 * 0 : rdma_read - If set, RDMA Read is supported on
718 	 *    TX queues
719 	 * 1 : rnr_retry - If set, RNR retry is supported on
720 	 *    modify QP command
721 	 * 2 : data_polling_128 - If set, 128 bytes data
722 	 *    polling is supported
723 	 * 3 : rdma_write - If set, RDMA Write is supported
724 	 *    on TX queues
725 	 * 4 : unsolicited_write_recv - If set, unsolicited
726 	 *    write with imm. receive is supported
727 	 * 31:5 : reserved - MBZ
728 	 */
729 	u32 device_caps;
730 
731 	/* Max RDMA transfer size in bytes */
732 	u32 max_rdma_size;
733 
734 	/* Unique global ID for an EFA device */
735 	u64 guid;
736 
737 	/* The device maximum link speed in Gbit/sec */
738 	u16 max_link_speed_gbps;
739 
740 	/* MBZ */
741 	u16 reserved0;
742 
743 	/* MBZ */
744 	u32 reserved1;
745 };
746 
747 struct efa_admin_feature_queue_attr_desc {
748 	/* The maximum number of queue pairs supported */
749 	u32 max_qp;
750 
751 	/* Maximum number of WQEs per Send Queue */
752 	u32 max_sq_depth;
753 
754 	/* Maximum size of data that can be sent inline in a Send WQE */
755 	u32 inline_buf_size;
756 
757 	/* Maximum number of buffer descriptors per Recv Queue */
758 	u32 max_rq_depth;
759 
760 	/* The maximum number of completion queues supported per VF */
761 	u32 max_cq;
762 
763 	/* Maximum number of CQEs per Completion Queue */
764 	u32 max_cq_depth;
765 
766 	/* Number of sub-CQs to be created for each CQ */
767 	u16 sub_cqs_per_cq;
768 
769 	/* Minimum number of WQEs per SQ */
770 	u16 min_sq_depth;
771 
772 	/* Maximum number of SGEs (buffers) allowed for a single send WQE */
773 	u16 max_wr_send_sges;
774 
775 	/* Maximum number of SGEs allowed for a single recv WQE */
776 	u16 max_wr_recv_sges;
777 
778 	/* The maximum number of memory regions supported */
779 	u32 max_mr;
780 
781 	/* The maximum number of pages can be registered */
782 	u32 max_mr_pages;
783 
784 	/* The maximum number of protection domains supported */
785 	u32 max_pd;
786 
787 	/* The maximum number of address handles supported */
788 	u32 max_ah;
789 
790 	/* The maximum size of LLQ in bytes */
791 	u32 max_llq_size;
792 
793 	/* Maximum number of SGEs for a single RDMA read/write WQE */
794 	u16 max_wr_rdma_sges;
795 
796 	/*
797 	 * Maximum number of bytes that can be written to SQ between two
798 	 * consecutive doorbells (in units of 64B). Driver must ensure that only
799 	 * complete WQEs are written to queue before issuing a doorbell.
800 	 * Examples: max_tx_batch=16 and WQE size = 64B, means up to 16 WQEs can
801 	 * be written to SQ between two consecutive doorbells. max_tx_batch=11
802 	 * and WQE size = 128B, means up to 5 WQEs can be written to SQ between
803 	 * two consecutive doorbells. Zero means unlimited.
804 	 */
805 	u16 max_tx_batch;
806 };
807 
808 struct efa_admin_event_queue_attr_desc {
809 	/* The maximum number of event queues supported */
810 	u32 max_eq;
811 
812 	/* Maximum number of EQEs per Event Queue */
813 	u32 max_eq_depth;
814 
815 	/* Supported events bitmask */
816 	u32 event_bitmask;
817 };
818 
819 struct efa_admin_feature_aenq_desc {
820 	/* bitmask for AENQ groups the device can report */
821 	u32 supported_groups;
822 
823 	/* bitmask for AENQ groups to report */
824 	u32 enabled_groups;
825 };
826 
827 struct efa_admin_feature_network_attr_desc {
828 	/* Raw address data in network byte order */
829 	u8 addr[16];
830 
831 	/* max packet payload size in bytes */
832 	u32 mtu;
833 };
834 
835 /*
836  * When hint value is 0, hints capabilities are not supported or driver
837  * should use its own predefined value
838  */
839 struct efa_admin_hw_hints {
840 	/* value in ms */
841 	u16 mmio_read_timeout;
842 
843 	/* value in ms */
844 	u16 driver_watchdog_timeout;
845 
846 	/* value in ms */
847 	u16 admin_completion_timeout;
848 
849 	/* poll interval in ms */
850 	u16 poll_interval;
851 };
852 
853 struct efa_admin_get_feature_cmd {
854 	struct efa_admin_aq_common_desc aq_common_descriptor;
855 
856 	struct efa_admin_ctrl_buff_info control_buffer;
857 
858 	struct efa_admin_get_set_feature_common_desc feature_common;
859 
860 	u32 raw[11];
861 };
862 
863 struct efa_admin_get_feature_resp {
864 	struct efa_admin_acq_common_desc acq_common_desc;
865 
866 	union {
867 		u32 raw[14];
868 
869 		struct efa_admin_feature_device_attr_desc device_attr;
870 
871 		struct efa_admin_feature_aenq_desc aenq;
872 
873 		struct efa_admin_feature_network_attr_desc network_attr;
874 
875 		struct efa_admin_feature_queue_attr_desc queue_attr;
876 
877 		struct efa_admin_event_queue_attr_desc event_queue_attr;
878 
879 		struct efa_admin_hw_hints hw_hints;
880 	} u;
881 };
882 
883 struct efa_admin_set_feature_cmd {
884 	struct efa_admin_aq_common_desc aq_common_descriptor;
885 
886 	struct efa_admin_ctrl_buff_info control_buffer;
887 
888 	struct efa_admin_get_set_feature_common_desc feature_common;
889 
890 	union {
891 		u32 raw[11];
892 
893 		/* AENQ configuration */
894 		struct efa_admin_feature_aenq_desc aenq;
895 	} u;
896 };
897 
898 struct efa_admin_set_feature_resp {
899 	struct efa_admin_acq_common_desc acq_common_desc;
900 
901 	union {
902 		u32 raw[14];
903 	} u;
904 };
905 
906 struct efa_admin_alloc_pd_cmd {
907 	struct efa_admin_aq_common_desc aq_common_descriptor;
908 };
909 
910 struct efa_admin_alloc_pd_resp {
911 	struct efa_admin_acq_common_desc acq_common_desc;
912 
913 	/* PD number */
914 	u16 pd;
915 
916 	/* MBZ */
917 	u16 reserved;
918 };
919 
920 struct efa_admin_dealloc_pd_cmd {
921 	struct efa_admin_aq_common_desc aq_common_descriptor;
922 
923 	/* PD number */
924 	u16 pd;
925 
926 	/* MBZ */
927 	u16 reserved;
928 };
929 
930 struct efa_admin_dealloc_pd_resp {
931 	struct efa_admin_acq_common_desc acq_common_desc;
932 };
933 
934 struct efa_admin_alloc_uar_cmd {
935 	struct efa_admin_aq_common_desc aq_common_descriptor;
936 };
937 
938 struct efa_admin_alloc_uar_resp {
939 	struct efa_admin_acq_common_desc acq_common_desc;
940 
941 	/* UAR number */
942 	u16 uar;
943 
944 	/* MBZ */
945 	u16 reserved;
946 };
947 
948 struct efa_admin_dealloc_uar_cmd {
949 	struct efa_admin_aq_common_desc aq_common_descriptor;
950 
951 	/* UAR number */
952 	u16 uar;
953 
954 	/* MBZ */
955 	u16 reserved;
956 };
957 
958 struct efa_admin_dealloc_uar_resp {
959 	struct efa_admin_acq_common_desc acq_common_desc;
960 };
961 
962 struct efa_admin_create_eq_cmd {
963 	struct efa_admin_aq_common_desc aq_common_descriptor;
964 
965 	/* Size of the EQ in entries, must be power of 2 */
966 	u16 depth;
967 
968 	/* MSI-X table entry index */
969 	u8 msix_vec;
970 
971 	/*
972 	 * 4:0 : entry_size_words - size of EQ entry in
973 	 *    32-bit words
974 	 * 7:5 : reserved - MBZ
975 	 */
976 	u8 caps;
977 
978 	/* EQ ring base address */
979 	struct efa_common_mem_addr ba;
980 
981 	/*
982 	 * Enabled events on this EQ
983 	 * 0 : completion_events - Enable completion events
984 	 * 31:1 : reserved - MBZ
985 	 */
986 	u32 event_bitmask;
987 
988 	/* MBZ */
989 	u32 reserved;
990 };
991 
992 struct efa_admin_create_eq_resp {
993 	struct efa_admin_acq_common_desc acq_common_desc;
994 
995 	/* EQ number */
996 	u16 eqn;
997 
998 	/* MBZ */
999 	u16 reserved;
1000 };
1001 
1002 struct efa_admin_destroy_eq_cmd {
1003 	struct efa_admin_aq_common_desc aq_common_descriptor;
1004 
1005 	/* EQ number */
1006 	u16 eqn;
1007 
1008 	/* MBZ */
1009 	u16 reserved;
1010 };
1011 
1012 struct efa_admin_destroy_eq_resp {
1013 	struct efa_admin_acq_common_desc acq_common_desc;
1014 };
1015 
1016 /* asynchronous event notification groups */
1017 enum efa_admin_aenq_group {
1018 	EFA_ADMIN_FATAL_ERROR                       = 1,
1019 	EFA_ADMIN_WARNING                           = 2,
1020 	EFA_ADMIN_NOTIFICATION                      = 3,
1021 	EFA_ADMIN_KEEP_ALIVE                        = 4,
1022 	EFA_ADMIN_AENQ_GROUPS_NUM                   = 5,
1023 };
1024 
1025 struct efa_admin_mmio_req_read_less_resp {
1026 	u16 req_id;
1027 
1028 	u16 reg_off;
1029 
1030 	/* value is valid when poll is cleared */
1031 	u32 reg_val;
1032 };
1033 
1034 enum efa_admin_os_type {
1035 	EFA_ADMIN_OS_LINUX                          = 0,
1036 };
1037 
1038 struct efa_admin_host_info {
1039 	/* OS distribution string format */
1040 	u8 os_dist_str[128];
1041 
1042 	/* Defined in enum efa_admin_os_type */
1043 	u32 os_type;
1044 
1045 	/* Kernel version string format */
1046 	u8 kernel_ver_str[32];
1047 
1048 	/* Kernel version numeric format */
1049 	u32 kernel_ver;
1050 
1051 	/*
1052 	 * 7:0 : driver_module_type
1053 	 * 15:8 : driver_sub_minor
1054 	 * 23:16 : driver_minor
1055 	 * 31:24 : driver_major
1056 	 */
1057 	u32 driver_ver;
1058 
1059 	/*
1060 	 * Device's Bus, Device and Function
1061 	 * 2:0 : function
1062 	 * 7:3 : device
1063 	 * 15:8 : bus
1064 	 */
1065 	u16 bdf;
1066 
1067 	/*
1068 	 * Spec version
1069 	 * 7:0 : spec_minor
1070 	 * 15:8 : spec_major
1071 	 */
1072 	u16 spec_ver;
1073 
1074 	/*
1075 	 * 0 : intree - Intree driver
1076 	 * 1 : gdr - GPUDirect RDMA supported
1077 	 * 31:2 : reserved2
1078 	 */
1079 	u32 flags;
1080 };
1081 
1082 /* create_qp_cmd */
1083 #define EFA_ADMIN_CREATE_QP_CMD_SQ_VIRT_MASK                BIT(0)
1084 #define EFA_ADMIN_CREATE_QP_CMD_RQ_VIRT_MASK                BIT(1)
1085 #define EFA_ADMIN_CREATE_QP_CMD_UNSOLICITED_WRITE_RECV_MASK BIT(2)
1086 
1087 /* modify_qp_cmd */
1088 #define EFA_ADMIN_MODIFY_QP_CMD_QP_STATE_MASK               BIT(0)
1089 #define EFA_ADMIN_MODIFY_QP_CMD_CUR_QP_STATE_MASK           BIT(1)
1090 #define EFA_ADMIN_MODIFY_QP_CMD_QKEY_MASK                   BIT(2)
1091 #define EFA_ADMIN_MODIFY_QP_CMD_SQ_PSN_MASK                 BIT(3)
1092 #define EFA_ADMIN_MODIFY_QP_CMD_SQ_DRAINED_ASYNC_NOTIFY_MASK BIT(4)
1093 #define EFA_ADMIN_MODIFY_QP_CMD_RNR_RETRY_MASK              BIT(5)
1094 
1095 /* reg_mr_cmd */
1096 #define EFA_ADMIN_REG_MR_CMD_PHYS_PAGE_SIZE_SHIFT_MASK      GENMASK(4, 0)
1097 #define EFA_ADMIN_REG_MR_CMD_MEM_ADDR_PHY_MODE_EN_MASK      BIT(7)
1098 #define EFA_ADMIN_REG_MR_CMD_LOCAL_WRITE_ENABLE_MASK        BIT(0)
1099 #define EFA_ADMIN_REG_MR_CMD_REMOTE_WRITE_ENABLE_MASK       BIT(1)
1100 #define EFA_ADMIN_REG_MR_CMD_REMOTE_READ_ENABLE_MASK        BIT(2)
1101 
1102 /* reg_mr_resp */
1103 #define EFA_ADMIN_REG_MR_RESP_RECV_IC_ID_MASK               BIT(0)
1104 #define EFA_ADMIN_REG_MR_RESP_RDMA_READ_IC_ID_MASK          BIT(1)
1105 #define EFA_ADMIN_REG_MR_RESP_RDMA_RECV_IC_ID_MASK          BIT(2)
1106 
1107 /* create_cq_cmd */
1108 #define EFA_ADMIN_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK BIT(5)
1109 #define EFA_ADMIN_CREATE_CQ_CMD_VIRT_MASK                   BIT(6)
1110 #define EFA_ADMIN_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK    GENMASK(4, 0)
1111 #define EFA_ADMIN_CREATE_CQ_CMD_SET_SRC_ADDR_MASK           BIT(5)
1112 
1113 /* create_cq_resp */
1114 #define EFA_ADMIN_CREATE_CQ_RESP_DB_VALID_MASK              BIT(0)
1115 
1116 /* feature_device_attr_desc */
1117 #define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RDMA_READ_MASK   BIT(0)
1118 #define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RNR_RETRY_MASK   BIT(1)
1119 #define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_DATA_POLLING_128_MASK BIT(2)
1120 #define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RDMA_WRITE_MASK  BIT(3)
1121 #define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_UNSOLICITED_WRITE_RECV_MASK BIT(4)
1122 
1123 /* create_eq_cmd */
1124 #define EFA_ADMIN_CREATE_EQ_CMD_ENTRY_SIZE_WORDS_MASK       GENMASK(4, 0)
1125 #define EFA_ADMIN_CREATE_EQ_CMD_COMPLETION_EVENTS_MASK      BIT(0)
1126 
1127 /* host_info */
1128 #define EFA_ADMIN_HOST_INFO_DRIVER_MODULE_TYPE_MASK         GENMASK(7, 0)
1129 #define EFA_ADMIN_HOST_INFO_DRIVER_SUB_MINOR_MASK           GENMASK(15, 8)
1130 #define EFA_ADMIN_HOST_INFO_DRIVER_MINOR_MASK               GENMASK(23, 16)
1131 #define EFA_ADMIN_HOST_INFO_DRIVER_MAJOR_MASK               GENMASK(31, 24)
1132 #define EFA_ADMIN_HOST_INFO_FUNCTION_MASK                   GENMASK(2, 0)
1133 #define EFA_ADMIN_HOST_INFO_DEVICE_MASK                     GENMASK(7, 3)
1134 #define EFA_ADMIN_HOST_INFO_BUS_MASK                        GENMASK(15, 8)
1135 #define EFA_ADMIN_HOST_INFO_SPEC_MINOR_MASK                 GENMASK(7, 0)
1136 #define EFA_ADMIN_HOST_INFO_SPEC_MAJOR_MASK                 GENMASK(15, 8)
1137 #define EFA_ADMIN_HOST_INFO_INTREE_MASK                     BIT(0)
1138 #define EFA_ADMIN_HOST_INFO_GDR_MASK                        BIT(1)
1139 
1140 #endif /* _EFA_ADMIN_CMDS_H_ */
1141