1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2 /* Copyright (c) 2021, Microsoft Corporation. */
3
4 #ifndef _GDMA_H
5 #define _GDMA_H
6
7 #include <linux/dma-mapping.h>
8 #include <linux/netdevice.h>
9
10 #include "shm_channel.h"
11
12 #define GDMA_STATUS_MORE_ENTRIES 0x00000105
13 #define GDMA_STATUS_CMD_UNSUPPORTED 0xffffffff
14
15 /* Structures labeled with "HW DATA" are exchanged with the hardware. All of
16 * them are naturally aligned and hence don't need __packed.
17 */
18
19 enum gdma_request_type {
20 GDMA_VERIFY_VF_DRIVER_VERSION = 1,
21 GDMA_QUERY_MAX_RESOURCES = 2,
22 GDMA_LIST_DEVICES = 3,
23 GDMA_REGISTER_DEVICE = 4,
24 GDMA_DEREGISTER_DEVICE = 5,
25 GDMA_GENERATE_TEST_EQE = 10,
26 GDMA_CREATE_QUEUE = 12,
27 GDMA_DISABLE_QUEUE = 13,
28 GDMA_ALLOCATE_RESOURCE_RANGE = 22,
29 GDMA_DESTROY_RESOURCE_RANGE = 24,
30 GDMA_CREATE_DMA_REGION = 25,
31 GDMA_DMA_REGION_ADD_PAGES = 26,
32 GDMA_DESTROY_DMA_REGION = 27,
33 GDMA_CREATE_PD = 29,
34 GDMA_DESTROY_PD = 30,
35 GDMA_CREATE_MR = 31,
36 GDMA_DESTROY_MR = 32,
37 GDMA_QUERY_HWC_TIMEOUT = 84, /* 0x54 */
38 GDMA_ALLOC_DM = 96, /* 0x60 */
39 GDMA_DESTROY_DM = 97, /* 0x61 */
40 };
41
42 #define GDMA_RESOURCE_DOORBELL_PAGE 27
43
44 enum gdma_queue_type {
45 GDMA_INVALID_QUEUE,
46 GDMA_SQ,
47 GDMA_RQ,
48 GDMA_CQ,
49 GDMA_EQ,
50 };
51
52 enum gdma_work_request_flags {
53 GDMA_WR_NONE = 0,
54 GDMA_WR_OOB_IN_SGL = BIT(0),
55 GDMA_WR_PAD_BY_SGE0 = BIT(1),
56 };
57
58 enum gdma_eqe_type {
59 GDMA_EQE_COMPLETION = 3,
60 GDMA_EQE_TEST_EVENT = 64,
61 GDMA_EQE_HWC_INIT_EQ_ID_DB = 129,
62 GDMA_EQE_HWC_INIT_DATA = 130,
63 GDMA_EQE_HWC_INIT_DONE = 131,
64 GDMA_EQE_HWC_FPGA_RECONFIG = 132,
65 GDMA_EQE_HWC_SOC_RECONFIG_DATA = 133,
66 GDMA_EQE_HWC_SOC_SERVICE = 134,
67 GDMA_EQE_HWC_RESET_REQUEST = 135,
68 GDMA_EQE_RNIC_QP_FATAL = 176,
69 };
70
71 enum {
72 GDMA_DEVICE_NONE = 0,
73 GDMA_DEVICE_HWC = 1,
74 GDMA_DEVICE_MANA = 2,
75 GDMA_DEVICE_MANA_IB = 3,
76 };
77
78 enum gdma_service_type {
79 GDMA_SERVICE_TYPE_NONE = 0,
80 GDMA_SERVICE_TYPE_RDMA_SUSPEND = 1,
81 GDMA_SERVICE_TYPE_RDMA_RESUME = 2,
82 };
83
84 struct mana_service_work {
85 struct work_struct work;
86 struct gdma_dev *gdma_dev;
87 enum gdma_service_type event;
88 };
89
90 struct gdma_resource {
91 /* Protect the bitmap */
92 spinlock_t lock;
93
94 /* The bitmap size in bits. */
95 u32 size;
96
97 /* The bitmap tracks the resources. */
98 unsigned long *map;
99 };
100
101 union gdma_doorbell_entry {
102 u64 as_uint64;
103
104 struct {
105 u64 id : 24;
106 u64 reserved : 8;
107 u64 tail_ptr : 31;
108 u64 arm : 1;
109 } cq;
110
111 struct {
112 u64 id : 24;
113 u64 wqe_cnt : 8;
114 u64 tail_ptr : 32;
115 } rq;
116
117 struct {
118 u64 id : 24;
119 u64 reserved : 8;
120 u64 tail_ptr : 32;
121 } sq;
122
123 struct {
124 u64 id : 16;
125 u64 reserved : 16;
126 u64 tail_ptr : 31;
127 u64 arm : 1;
128 } eq;
129 }; /* HW DATA */
130
131 struct gdma_msg_hdr {
132 u32 hdr_type;
133 u32 msg_type;
134 u16 msg_version;
135 u16 hwc_msg_id;
136 u32 msg_size;
137 }; /* HW DATA */
138
139 struct gdma_dev_id {
140 union {
141 struct {
142 u16 type;
143 u16 instance;
144 };
145
146 u32 as_uint32;
147 };
148 }; /* HW DATA */
149
150 struct gdma_req_hdr {
151 struct gdma_msg_hdr req;
152 struct gdma_msg_hdr resp; /* The expected response */
153 struct gdma_dev_id dev_id;
154 u32 activity_id;
155 }; /* HW DATA */
156
157 struct gdma_resp_hdr {
158 struct gdma_msg_hdr response;
159 struct gdma_dev_id dev_id;
160 u32 activity_id;
161 u32 status;
162 u32 reserved;
163 }; /* HW DATA */
164
165 struct gdma_general_req {
166 struct gdma_req_hdr hdr;
167 }; /* HW DATA */
168
169 #define GDMA_MESSAGE_V1 1
170 #define GDMA_MESSAGE_V2 2
171 #define GDMA_MESSAGE_V3 3
172 #define GDMA_MESSAGE_V4 4
173
174 struct gdma_general_resp {
175 struct gdma_resp_hdr hdr;
176 }; /* HW DATA */
177
178 #define GDMA_STANDARD_HEADER_TYPE 0
179
mana_gd_init_req_hdr(struct gdma_req_hdr * hdr,u32 code,u32 req_size,u32 resp_size)180 static inline void mana_gd_init_req_hdr(struct gdma_req_hdr *hdr, u32 code,
181 u32 req_size, u32 resp_size)
182 {
183 hdr->req.hdr_type = GDMA_STANDARD_HEADER_TYPE;
184 hdr->req.msg_type = code;
185 hdr->req.msg_version = GDMA_MESSAGE_V1;
186 hdr->req.msg_size = req_size;
187
188 hdr->resp.hdr_type = GDMA_STANDARD_HEADER_TYPE;
189 hdr->resp.msg_type = code;
190 hdr->resp.msg_version = GDMA_MESSAGE_V1;
191 hdr->resp.msg_size = resp_size;
192 }
193
194 /* The 16-byte struct is part of the GDMA work queue entry (WQE). */
195 struct gdma_sge {
196 u64 address;
197 u32 mem_key;
198 u32 size;
199 }; /* HW DATA */
200
201 struct gdma_wqe_request {
202 struct gdma_sge *sgl;
203 u32 num_sge;
204
205 u32 inline_oob_size;
206 const void *inline_oob_data;
207
208 u32 flags;
209 u32 client_data_unit;
210 };
211
212 enum gdma_page_type {
213 GDMA_PAGE_TYPE_4K,
214 };
215
216 #define GDMA_INVALID_DMA_REGION 0
217
218 struct gdma_mem_info {
219 struct device *dev;
220
221 dma_addr_t dma_handle;
222 void *virt_addr;
223 u64 length;
224
225 /* Allocated by the PF driver */
226 u64 dma_region_handle;
227 };
228
229 #define REGISTER_ATB_MST_MKEY_LOWER_SIZE 8
230
231 struct gdma_dev {
232 struct gdma_context *gdma_context;
233
234 struct gdma_dev_id dev_id;
235
236 u32 pdid;
237 u32 doorbell;
238 u32 gpa_mkey;
239
240 /* GDMA driver specific pointer */
241 void *driver_data;
242
243 struct auxiliary_device *adev;
244 bool is_suspended;
245 bool rdma_teardown;
246 };
247
248 /* MANA_PAGE_SIZE is the DMA unit */
249 #define MANA_PAGE_SHIFT 12
250 #define MANA_PAGE_SIZE BIT(MANA_PAGE_SHIFT)
251 #define MANA_PAGE_ALIGN(x) ALIGN((x), MANA_PAGE_SIZE)
252 #define MANA_PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), MANA_PAGE_SIZE)
253 #define MANA_PFN(a) ((a) >> MANA_PAGE_SHIFT)
254
255 /* Required by HW */
256 #define MANA_MIN_QSIZE MANA_PAGE_SIZE
257
258 #define GDMA_CQE_SIZE 64
259 #define GDMA_EQE_SIZE 16
260 #define GDMA_MAX_SQE_SIZE 512
261 #define GDMA_MAX_RQE_SIZE 256
262
263 #define GDMA_COMP_DATA_SIZE 0x3C
264
265 #define GDMA_EVENT_DATA_SIZE 0xC
266
267 /* The WQE size must be a multiple of the Basic Unit, which is 32 bytes. */
268 #define GDMA_WQE_BU_SIZE 32
269
270 #define INVALID_PDID UINT_MAX
271 #define INVALID_DOORBELL UINT_MAX
272 #define INVALID_MEM_KEY UINT_MAX
273 #define INVALID_QUEUE_ID UINT_MAX
274 #define INVALID_PCI_MSIX_INDEX UINT_MAX
275
276 struct gdma_comp {
277 u32 cqe_data[GDMA_COMP_DATA_SIZE / 4];
278 u32 wq_num;
279 bool is_sq;
280 };
281
282 struct gdma_event {
283 u32 details[GDMA_EVENT_DATA_SIZE / 4];
284 u8 type;
285 };
286
287 struct gdma_queue;
288
289 struct mana_eq {
290 struct gdma_queue *eq;
291 struct dentry *mana_eq_debugfs;
292 };
293
294 typedef void gdma_eq_callback(void *context, struct gdma_queue *q,
295 struct gdma_event *e);
296
297 typedef void gdma_cq_callback(void *context, struct gdma_queue *q);
298
299 /* The 'head' is the producer index. For SQ/RQ, when the driver posts a WQE
300 * (Note: the WQE size must be a multiple of the 32-byte Basic Unit), the
301 * driver increases the 'head' in BUs rather than in bytes, and notifies
302 * the HW of the updated head. For EQ/CQ, the driver uses the 'head' to track
303 * the HW head, and increases the 'head' by 1 for every processed EQE/CQE.
304 *
305 * The 'tail' is the consumer index for SQ/RQ. After the CQE of the SQ/RQ is
306 * processed, the driver increases the 'tail' to indicate that WQEs have
307 * been consumed by the HW, so the driver can post new WQEs into the SQ/RQ.
308 *
309 * The driver doesn't use the 'tail' for EQ/CQ, because the driver ensures
310 * that the EQ/CQ is big enough so they can't overflow, and the driver uses
311 * the owner bits mechanism to detect if the queue has become empty.
312 */
313 struct gdma_queue {
314 struct gdma_dev *gdma_dev;
315
316 enum gdma_queue_type type;
317 u32 id;
318
319 struct gdma_mem_info mem_info;
320
321 void *queue_mem_ptr;
322 u32 queue_size;
323
324 bool monitor_avl_buf;
325
326 u32 head;
327 u32 tail;
328 struct list_head entry;
329
330 /* Extra fields specific to EQ/CQ. */
331 union {
332 struct {
333 bool disable_needed;
334
335 gdma_eq_callback *callback;
336 void *context;
337
338 unsigned int msix_index;
339
340 u32 log2_throttle_limit;
341 } eq;
342
343 struct {
344 gdma_cq_callback *callback;
345 void *context;
346
347 struct gdma_queue *parent; /* For CQ/EQ relationship */
348 } cq;
349 };
350 };
351
352 struct gdma_queue_spec {
353 enum gdma_queue_type type;
354 bool monitor_avl_buf;
355 unsigned int queue_size;
356
357 /* Extra fields specific to EQ/CQ. */
358 union {
359 struct {
360 gdma_eq_callback *callback;
361 void *context;
362
363 unsigned long log2_throttle_limit;
364 unsigned int msix_index;
365 } eq;
366
367 struct {
368 gdma_cq_callback *callback;
369 void *context;
370
371 struct gdma_queue *parent_eq;
372
373 } cq;
374 };
375 };
376
377 #define MANA_IRQ_NAME_SZ 32
378
379 struct gdma_irq_context {
380 void (*handler)(void *arg);
381 /* Protect the eq_list */
382 spinlock_t lock;
383 struct list_head eq_list;
384 char name[MANA_IRQ_NAME_SZ];
385 };
386
387 enum gdma_context_flags {
388 GC_PROBE_SUCCEEDED = 0,
389 };
390
391 struct gdma_context {
392 struct device *dev;
393 struct dentry *mana_pci_debugfs;
394
395 /* Per-vPort max number of queues */
396 unsigned int max_num_queues;
397 unsigned int max_num_msix;
398 unsigned int num_msix_usable;
399 struct xarray irq_contexts;
400
401 /* L2 MTU */
402 u16 adapter_mtu;
403
404 /* This maps a CQ index to the queue structure. */
405 unsigned int max_num_cqs;
406 struct gdma_queue **cq_table;
407
408 /* Protect eq_test_event and test_event_eq_id */
409 struct mutex eq_test_event_mutex;
410 struct completion eq_test_event;
411 u32 test_event_eq_id;
412
413 bool is_pf;
414 bool in_service;
415
416 phys_addr_t bar0_pa;
417 void __iomem *bar0_va;
418 void __iomem *shm_base;
419 void __iomem *db_page_base;
420 phys_addr_t phys_db_page_base;
421 u32 db_page_size;
422 int numa_node;
423
424 /* Shared memory chanenl (used to bootstrap HWC) */
425 struct shm_channel shm_channel;
426
427 /* Hardware communication channel (HWC) */
428 struct gdma_dev hwc;
429
430 /* Azure network adapter */
431 struct gdma_dev mana;
432
433 /* Azure RDMA adapter */
434 struct gdma_dev mana_ib;
435
436 u64 pf_cap_flags1;
437
438 struct workqueue_struct *service_wq;
439
440 unsigned long flags;
441 };
442
mana_gd_is_mana(struct gdma_dev * gd)443 static inline bool mana_gd_is_mana(struct gdma_dev *gd)
444 {
445 return gd->dev_id.type == GDMA_DEVICE_MANA;
446 }
447
mana_gd_is_hwc(struct gdma_dev * gd)448 static inline bool mana_gd_is_hwc(struct gdma_dev *gd)
449 {
450 return gd->dev_id.type == GDMA_DEVICE_HWC;
451 }
452
453 u8 *mana_gd_get_wqe_ptr(const struct gdma_queue *wq, u32 wqe_offset);
454 u32 mana_gd_wq_avail_space(struct gdma_queue *wq);
455
456 int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq);
457
458 int mana_gd_create_hwc_queue(struct gdma_dev *gd,
459 const struct gdma_queue_spec *spec,
460 struct gdma_queue **queue_ptr);
461
462 int mana_gd_create_mana_eq(struct gdma_dev *gd,
463 const struct gdma_queue_spec *spec,
464 struct gdma_queue **queue_ptr);
465
466 int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
467 const struct gdma_queue_spec *spec,
468 struct gdma_queue **queue_ptr);
469
470 void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue);
471
472 int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe);
473
474 void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit);
475
476 struct gdma_wqe {
477 u32 reserved :24;
478 u32 last_vbytes :8;
479
480 union {
481 u32 flags;
482
483 struct {
484 u32 num_sge :8;
485 u32 inline_oob_size_div4:3;
486 u32 client_oob_in_sgl :1;
487 u32 reserved1 :4;
488 u32 client_data_unit :14;
489 u32 reserved2 :2;
490 };
491 };
492 }; /* HW DATA */
493
494 #define INLINE_OOB_SMALL_SIZE 8
495 #define INLINE_OOB_LARGE_SIZE 24
496
497 #define MANA_MAX_TX_WQE_SGL_ENTRIES 30
498
499 #define MAX_TX_WQE_SIZE 512
500 #define MAX_RX_WQE_SIZE 256
501
502 #define MAX_TX_WQE_SGL_ENTRIES ((GDMA_MAX_SQE_SIZE - \
503 sizeof(struct gdma_sge) - INLINE_OOB_SMALL_SIZE) / \
504 sizeof(struct gdma_sge))
505
506 #define MAX_RX_WQE_SGL_ENTRIES ((GDMA_MAX_RQE_SIZE - \
507 sizeof(struct gdma_sge)) / sizeof(struct gdma_sge))
508
509 struct gdma_cqe {
510 u32 cqe_data[GDMA_COMP_DATA_SIZE / 4];
511
512 union {
513 u32 as_uint32;
514
515 struct {
516 u32 wq_num : 24;
517 u32 is_sq : 1;
518 u32 reserved : 4;
519 u32 owner_bits : 3;
520 };
521 } cqe_info;
522 }; /* HW DATA */
523
524 #define GDMA_CQE_OWNER_BITS 3
525
526 #define GDMA_CQE_OWNER_MASK ((1 << GDMA_CQE_OWNER_BITS) - 1)
527
528 #define SET_ARM_BIT 1
529
530 #define GDMA_EQE_OWNER_BITS 3
531
532 union gdma_eqe_info {
533 u32 as_uint32;
534
535 struct {
536 u32 type : 8;
537 u32 reserved1 : 8;
538 u32 client_id : 2;
539 u32 reserved2 : 11;
540 u32 owner_bits : 3;
541 };
542 }; /* HW DATA */
543
544 #define GDMA_EQE_OWNER_MASK ((1 << GDMA_EQE_OWNER_BITS) - 1)
545 #define INITIALIZED_OWNER_BIT(log2_num_entries) (1UL << (log2_num_entries))
546
547 struct gdma_eqe {
548 u32 details[GDMA_EVENT_DATA_SIZE / 4];
549 u32 eqe_info;
550 }; /* HW DATA */
551
552 #define GDMA_REG_DB_PAGE_OFFSET 8
553 #define GDMA_REG_DB_PAGE_SIZE 0x10
554 #define GDMA_REG_SHM_OFFSET 0x18
555
556 #define GDMA_PF_REG_DB_PAGE_SIZE 0xD0
557 #define GDMA_PF_REG_DB_PAGE_OFF 0xC8
558 #define GDMA_PF_REG_SHM_OFF 0x70
559
560 #define GDMA_SRIOV_REG_CFG_BASE_OFF 0x108
561
562 #define MANA_PF_DEVICE_ID 0x00B9
563 #define MANA_VF_DEVICE_ID 0x00BA
564
565 struct gdma_posted_wqe_info {
566 u32 wqe_size_in_bu;
567 };
568
569 /* GDMA_GENERATE_TEST_EQE */
570 struct gdma_generate_test_event_req {
571 struct gdma_req_hdr hdr;
572 u32 queue_index;
573 }; /* HW DATA */
574
575 /* GDMA_VERIFY_VF_DRIVER_VERSION */
576 enum {
577 GDMA_PROTOCOL_V1 = 1,
578 GDMA_PROTOCOL_FIRST = GDMA_PROTOCOL_V1,
579 GDMA_PROTOCOL_LAST = GDMA_PROTOCOL_V1,
580 };
581
582 #define GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT BIT(0)
583
584 /* Advertise to the NIC firmware: the NAPI work_done variable race is fixed,
585 * so the driver is able to reliably support features like busy_poll.
586 */
587 #define GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX BIT(2)
588 #define GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG BIT(3)
589 #define GDMA_DRV_CAP_FLAG_1_GDMA_PAGES_4MB_1GB_2GB BIT(4)
590 #define GDMA_DRV_CAP_FLAG_1_VARIABLE_INDIRECTION_TABLE_SUPPORT BIT(5)
591
592 /* Driver can handle holes (zeros) in the device list */
593 #define GDMA_DRV_CAP_FLAG_1_DEV_LIST_HOLES_SUP BIT(11)
594
595 /* Driver supports dynamic MSI-X vector allocation */
596 #define GDMA_DRV_CAP_FLAG_1_DYNAMIC_IRQ_ALLOC_SUPPORT BIT(13)
597
598 /* Driver can self reset on EQE notification */
599 #define GDMA_DRV_CAP_FLAG_1_SELF_RESET_ON_EQE BIT(14)
600
601 /* Driver can self reset on FPGA Reconfig EQE notification */
602 #define GDMA_DRV_CAP_FLAG_1_HANDLE_RECONFIG_EQE BIT(17)
603
604 /* Driver detects stalled send queues and recovers them */
605 #define GDMA_DRV_CAP_FLAG_1_HANDLE_STALL_SQ_RECOVERY BIT(18)
606
607 #define GDMA_DRV_CAP_FLAG_1_HW_VPORT_LINK_AWARE BIT(6)
608
609 /* Driver supports linearizing the skb when num_sge exceeds hardware limit */
610 #define GDMA_DRV_CAP_FLAG_1_SKB_LINEARIZE BIT(20)
611
612 /* Driver can send HWC periodically to query stats */
613 #define GDMA_DRV_CAP_FLAG_1_PERIODIC_STATS_QUERY BIT(21)
614
615 /* Driver can handle hardware recovery events during probe */
616 #define GDMA_DRV_CAP_FLAG_1_PROBE_RECOVERY BIT(22)
617
618 #define GDMA_DRV_CAP_FLAGS1 \
619 (GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT | \
620 GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX | \
621 GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG | \
622 GDMA_DRV_CAP_FLAG_1_VARIABLE_INDIRECTION_TABLE_SUPPORT | \
623 GDMA_DRV_CAP_FLAG_1_DEV_LIST_HOLES_SUP | \
624 GDMA_DRV_CAP_FLAG_1_DYNAMIC_IRQ_ALLOC_SUPPORT | \
625 GDMA_DRV_CAP_FLAG_1_SELF_RESET_ON_EQE | \
626 GDMA_DRV_CAP_FLAG_1_HANDLE_RECONFIG_EQE | \
627 GDMA_DRV_CAP_FLAG_1_HW_VPORT_LINK_AWARE | \
628 GDMA_DRV_CAP_FLAG_1_PERIODIC_STATS_QUERY | \
629 GDMA_DRV_CAP_FLAG_1_SKB_LINEARIZE | \
630 GDMA_DRV_CAP_FLAG_1_PROBE_RECOVERY | \
631 GDMA_DRV_CAP_FLAG_1_HANDLE_STALL_SQ_RECOVERY)
632
633 #define GDMA_DRV_CAP_FLAGS2 0
634
635 #define GDMA_DRV_CAP_FLAGS3 0
636
637 #define GDMA_DRV_CAP_FLAGS4 0
638
639 struct gdma_verify_ver_req {
640 struct gdma_req_hdr hdr;
641
642 /* Mandatory fields required for protocol establishment */
643 u64 protocol_ver_min;
644 u64 protocol_ver_max;
645
646 /* Gdma Driver Capability Flags */
647 u64 gd_drv_cap_flags1;
648 u64 gd_drv_cap_flags2;
649 u64 gd_drv_cap_flags3;
650 u64 gd_drv_cap_flags4;
651
652 /* Advisory fields */
653 u64 drv_ver;
654 u32 os_type; /* Linux = 0x10; Windows = 0x20; Other = 0x30 */
655 u32 reserved;
656 u32 os_ver_major;
657 u32 os_ver_minor;
658 u32 os_ver_build;
659 u32 os_ver_platform;
660 u64 reserved_2;
661 u8 os_ver_str1[128];
662 u8 os_ver_str2[128];
663 u8 os_ver_str3[128];
664 u8 os_ver_str4[128];
665 }; /* HW DATA */
666
667 struct gdma_verify_ver_resp {
668 struct gdma_resp_hdr hdr;
669 u64 gdma_protocol_ver;
670 u64 pf_cap_flags1;
671 u64 pf_cap_flags2;
672 u64 pf_cap_flags3;
673 u64 pf_cap_flags4;
674 }; /* HW DATA */
675
676 /* GDMA_QUERY_MAX_RESOURCES */
677 struct gdma_query_max_resources_resp {
678 struct gdma_resp_hdr hdr;
679 u32 status;
680 u32 max_sq;
681 u32 max_rq;
682 u32 max_cq;
683 u32 max_eq;
684 u32 max_db;
685 u32 max_mst;
686 u32 max_cq_mod_ctx;
687 u32 max_mod_cq;
688 u32 max_msix;
689 }; /* HW DATA */
690
691 /* GDMA_LIST_DEVICES */
692 #define GDMA_DEV_LIST_SIZE 64
693 struct gdma_list_devices_resp {
694 struct gdma_resp_hdr hdr;
695 u32 num_of_devs;
696 u32 reserved;
697 struct gdma_dev_id devs[GDMA_DEV_LIST_SIZE];
698 }; /* HW DATA */
699
700 /* GDMA_REGISTER_DEVICE */
701 struct gdma_register_device_resp {
702 struct gdma_resp_hdr hdr;
703 u32 pdid;
704 u32 gpa_mkey;
705 u32 db_id;
706 }; /* HW DATA */
707
708 struct gdma_allocate_resource_range_req {
709 struct gdma_req_hdr hdr;
710 u32 resource_type;
711 u32 num_resources;
712 u32 alignment;
713 u32 allocated_resources;
714 };
715
716 struct gdma_allocate_resource_range_resp {
717 struct gdma_resp_hdr hdr;
718 u32 allocated_resources;
719 };
720
721 struct gdma_destroy_resource_range_req {
722 struct gdma_req_hdr hdr;
723 u32 resource_type;
724 u32 num_resources;
725 u32 allocated_resources;
726 };
727
728 /* GDMA_CREATE_QUEUE */
729 struct gdma_create_queue_req {
730 struct gdma_req_hdr hdr;
731 u32 type;
732 u32 reserved1;
733 u32 pdid;
734 u32 doolbell_id;
735 u64 gdma_region;
736 u32 reserved2;
737 u32 queue_size;
738 u32 log2_throttle_limit;
739 u32 eq_pci_msix_index;
740 u32 cq_mod_ctx_id;
741 u32 cq_parent_eq_id;
742 u8 rq_drop_on_overrun;
743 u8 rq_err_on_wqe_overflow;
744 u8 rq_chain_rec_wqes;
745 u8 sq_hw_db;
746 u32 reserved3;
747 }; /* HW DATA */
748
749 struct gdma_create_queue_resp {
750 struct gdma_resp_hdr hdr;
751 u32 queue_index;
752 }; /* HW DATA */
753
754 /* GDMA_DISABLE_QUEUE */
755 struct gdma_disable_queue_req {
756 struct gdma_req_hdr hdr;
757 u32 type;
758 u32 queue_index;
759 u32 alloc_res_id_on_creation;
760 }; /* HW DATA */
761
762 /* GDMA_QUERY_HWC_TIMEOUT */
763 struct gdma_query_hwc_timeout_req {
764 struct gdma_req_hdr hdr;
765 u32 timeout_ms;
766 u32 reserved;
767 };
768
769 struct gdma_query_hwc_timeout_resp {
770 struct gdma_resp_hdr hdr;
771 u32 timeout_ms;
772 u32 reserved;
773 };
774
775 enum gdma_mr_access_flags {
776 GDMA_ACCESS_FLAG_LOCAL_READ = BIT_ULL(0),
777 GDMA_ACCESS_FLAG_LOCAL_WRITE = BIT_ULL(1),
778 GDMA_ACCESS_FLAG_REMOTE_READ = BIT_ULL(2),
779 GDMA_ACCESS_FLAG_REMOTE_WRITE = BIT_ULL(3),
780 GDMA_ACCESS_FLAG_REMOTE_ATOMIC = BIT_ULL(4),
781 };
782
783 /* GDMA_CREATE_DMA_REGION */
784 struct gdma_create_dma_region_req {
785 struct gdma_req_hdr hdr;
786
787 /* The total size of the DMA region */
788 u64 length;
789
790 /* The offset in the first page */
791 u32 offset_in_page;
792
793 /* enum gdma_page_type */
794 u32 gdma_page_type;
795
796 /* The total number of pages */
797 u32 page_count;
798
799 /* If page_addr_list_len is smaller than page_count,
800 * the remaining page addresses will be added via the
801 * message GDMA_DMA_REGION_ADD_PAGES.
802 */
803 u32 page_addr_list_len;
804 u64 page_addr_list[];
805 }; /* HW DATA */
806
807 struct gdma_create_dma_region_resp {
808 struct gdma_resp_hdr hdr;
809 u64 dma_region_handle;
810 }; /* HW DATA */
811
812 /* GDMA_DMA_REGION_ADD_PAGES */
813 struct gdma_dma_region_add_pages_req {
814 struct gdma_req_hdr hdr;
815
816 u64 dma_region_handle;
817
818 u32 page_addr_list_len;
819 u32 reserved3;
820
821 u64 page_addr_list[];
822 }; /* HW DATA */
823
824 /* GDMA_DESTROY_DMA_REGION */
825 struct gdma_destroy_dma_region_req {
826 struct gdma_req_hdr hdr;
827
828 u64 dma_region_handle;
829 }; /* HW DATA */
830
831 enum gdma_pd_flags {
832 GDMA_PD_FLAG_INVALID = 0,
833 GDMA_PD_FLAG_ALLOW_GPA_MR = 1,
834 };
835
836 struct gdma_create_pd_req {
837 struct gdma_req_hdr hdr;
838 enum gdma_pd_flags flags;
839 u32 reserved;
840 };/* HW DATA */
841
842 struct gdma_create_pd_resp {
843 struct gdma_resp_hdr hdr;
844 u64 pd_handle;
845 u32 pd_id;
846 u32 reserved;
847 };/* HW DATA */
848
849 struct gdma_destroy_pd_req {
850 struct gdma_req_hdr hdr;
851 u64 pd_handle;
852 };/* HW DATA */
853
854 struct gdma_destory_pd_resp {
855 struct gdma_resp_hdr hdr;
856 };/* HW DATA */
857
858 enum gdma_mr_type {
859 /*
860 * Guest Physical Address - MRs of this type allow access
861 * to any DMA-mapped memory using bus-logical address
862 */
863 GDMA_MR_TYPE_GPA = 1,
864 /* Guest Virtual Address - MRs of this type allow access
865 * to memory mapped by PTEs associated with this MR using a virtual
866 * address that is set up in the MST
867 */
868 GDMA_MR_TYPE_GVA = 2,
869 /* Guest zero-based address MRs */
870 GDMA_MR_TYPE_ZBVA = 4,
871 /* Device address MRs */
872 GDMA_MR_TYPE_DM = 5,
873 };
874
875 struct gdma_create_mr_params {
876 u64 pd_handle;
877 enum gdma_mr_type mr_type;
878 union {
879 struct {
880 u64 dma_region_handle;
881 u64 virtual_address;
882 enum gdma_mr_access_flags access_flags;
883 } gva;
884 struct {
885 u64 dma_region_handle;
886 enum gdma_mr_access_flags access_flags;
887 } zbva;
888 struct {
889 u64 dm_handle;
890 u64 offset;
891 u64 length;
892 enum gdma_mr_access_flags access_flags;
893 } da;
894 };
895 };
896
897 struct gdma_create_mr_request {
898 struct gdma_req_hdr hdr;
899 u64 pd_handle;
900 enum gdma_mr_type mr_type;
901 u32 reserved_1;
902
903 union {
904 struct {
905 u64 dma_region_handle;
906 u64 virtual_address;
907 enum gdma_mr_access_flags access_flags;
908 } __packed gva;
909 struct {
910 u64 dma_region_handle;
911 enum gdma_mr_access_flags access_flags;
912 } __packed zbva;
913 struct {
914 u64 dm_handle;
915 u64 offset;
916 enum gdma_mr_access_flags access_flags;
917 } __packed da;
918 } __packed;
919 u32 reserved_2;
920 union {
921 struct {
922 u64 length;
923 } da_ext;
924 };
925 };/* HW DATA */
926
927 struct gdma_create_mr_response {
928 struct gdma_resp_hdr hdr;
929 u64 mr_handle;
930 u32 lkey;
931 u32 rkey;
932 };/* HW DATA */
933
934 struct gdma_destroy_mr_request {
935 struct gdma_req_hdr hdr;
936 u64 mr_handle;
937 };/* HW DATA */
938
939 struct gdma_destroy_mr_response {
940 struct gdma_resp_hdr hdr;
941 };/* HW DATA */
942
943 struct gdma_alloc_dm_req {
944 struct gdma_req_hdr hdr;
945 u64 length;
946 u32 alignment;
947 u32 flags;
948 }; /* HW Data */
949
950 struct gdma_alloc_dm_resp {
951 struct gdma_resp_hdr hdr;
952 u64 dm_handle;
953 }; /* HW Data */
954
955 struct gdma_destroy_dm_req {
956 struct gdma_req_hdr hdr;
957 u64 dm_handle;
958 }; /* HW Data */
959
960 struct gdma_destroy_dm_resp {
961 struct gdma_resp_hdr hdr;
962 }; /* HW Data */
963
964 int mana_gd_verify_vf_version(struct pci_dev *pdev);
965
966 int mana_gd_register_device(struct gdma_dev *gd);
967 int mana_gd_deregister_device(struct gdma_dev *gd);
968
969 int mana_gd_post_work_request(struct gdma_queue *wq,
970 const struct gdma_wqe_request *wqe_req,
971 struct gdma_posted_wqe_info *wqe_info);
972
973 int mana_gd_post_and_ring(struct gdma_queue *queue,
974 const struct gdma_wqe_request *wqe,
975 struct gdma_posted_wqe_info *wqe_info);
976
977 int mana_gd_alloc_res_map(u32 res_avail, struct gdma_resource *r);
978 void mana_gd_free_res_map(struct gdma_resource *r);
979
980 void mana_gd_wq_ring_doorbell(struct gdma_context *gc,
981 struct gdma_queue *queue);
982
983 int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length,
984 struct gdma_mem_info *gmi);
985
986 void mana_gd_free_memory(struct gdma_mem_info *gmi);
987
988 int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req,
989 u32 resp_len, void *resp);
990
991 int mana_gd_destroy_dma_region(struct gdma_context *gc, u64 dma_region_handle);
992 void mana_register_debugfs(void);
993 void mana_unregister_debugfs(void);
994
995 int mana_rdma_service_event(struct gdma_context *gc, enum gdma_service_type event);
996
997 int mana_gd_suspend(struct pci_dev *pdev, pm_message_t state);
998 int mana_gd_resume(struct pci_dev *pdev);
999
1000 bool mana_need_log(struct gdma_context *gc, int err);
1001
1002 #endif /* _GDMA_H */
1003