1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ 2 /* Copyright (c) 2021, Microsoft Corporation. */ 3 4 #ifndef _GDMA_H 5 #define _GDMA_H 6 7 #include <linux/dma-mapping.h> 8 #include <linux/netdevice.h> 9 10 #include "shm_channel.h" 11 12 #define GDMA_STATUS_MORE_ENTRIES 0x00000105 13 14 /* Structures labeled with "HW DATA" are exchanged with the hardware. All of 15 * them are naturally aligned and hence don't need __packed. 16 */ 17 18 enum gdma_request_type { 19 GDMA_VERIFY_VF_DRIVER_VERSION = 1, 20 GDMA_QUERY_MAX_RESOURCES = 2, 21 GDMA_LIST_DEVICES = 3, 22 GDMA_REGISTER_DEVICE = 4, 23 GDMA_DEREGISTER_DEVICE = 5, 24 GDMA_GENERATE_TEST_EQE = 10, 25 GDMA_CREATE_QUEUE = 12, 26 GDMA_DISABLE_QUEUE = 13, 27 GDMA_ALLOCATE_RESOURCE_RANGE = 22, 28 GDMA_DESTROY_RESOURCE_RANGE = 24, 29 GDMA_CREATE_DMA_REGION = 25, 30 GDMA_DMA_REGION_ADD_PAGES = 26, 31 GDMA_DESTROY_DMA_REGION = 27, 32 GDMA_CREATE_PD = 29, 33 GDMA_DESTROY_PD = 30, 34 GDMA_CREATE_MR = 31, 35 GDMA_DESTROY_MR = 32, 36 GDMA_QUERY_HWC_TIMEOUT = 84, /* 0x54 */ 37 }; 38 39 #define GDMA_RESOURCE_DOORBELL_PAGE 27 40 41 enum gdma_queue_type { 42 GDMA_INVALID_QUEUE, 43 GDMA_SQ, 44 GDMA_RQ, 45 GDMA_CQ, 46 GDMA_EQ, 47 }; 48 49 enum gdma_work_request_flags { 50 GDMA_WR_NONE = 0, 51 GDMA_WR_OOB_IN_SGL = BIT(0), 52 GDMA_WR_PAD_BY_SGE0 = BIT(1), 53 }; 54 55 enum gdma_eqe_type { 56 GDMA_EQE_COMPLETION = 3, 57 GDMA_EQE_TEST_EVENT = 64, 58 GDMA_EQE_HWC_INIT_EQ_ID_DB = 129, 59 GDMA_EQE_HWC_INIT_DATA = 130, 60 GDMA_EQE_HWC_INIT_DONE = 131, 61 GDMA_EQE_HWC_SOC_RECONFIG = 132, 62 GDMA_EQE_HWC_SOC_RECONFIG_DATA = 133, 63 GDMA_EQE_HWC_SOC_SERVICE = 134, 64 GDMA_EQE_RNIC_QP_FATAL = 176, 65 }; 66 67 enum { 68 GDMA_DEVICE_NONE = 0, 69 GDMA_DEVICE_HWC = 1, 70 GDMA_DEVICE_MANA = 2, 71 GDMA_DEVICE_MANA_IB = 3, 72 }; 73 74 enum gdma_service_type { 75 GDMA_SERVICE_TYPE_NONE = 0, 76 GDMA_SERVICE_TYPE_RDMA_SUSPEND = 1, 77 GDMA_SERVICE_TYPE_RDMA_RESUME = 2, 78 }; 79 80 struct mana_service_work { 81 struct work_struct work; 82 struct gdma_dev *gdma_dev; 83 enum gdma_service_type event; 84 }; 85 86 struct gdma_resource { 87 /* Protect the bitmap */ 88 spinlock_t lock; 89 90 /* The bitmap size in bits. */ 91 u32 size; 92 93 /* The bitmap tracks the resources. */ 94 unsigned long *map; 95 }; 96 97 union gdma_doorbell_entry { 98 u64 as_uint64; 99 100 struct { 101 u64 id : 24; 102 u64 reserved : 8; 103 u64 tail_ptr : 31; 104 u64 arm : 1; 105 } cq; 106 107 struct { 108 u64 id : 24; 109 u64 wqe_cnt : 8; 110 u64 tail_ptr : 32; 111 } rq; 112 113 struct { 114 u64 id : 24; 115 u64 reserved : 8; 116 u64 tail_ptr : 32; 117 } sq; 118 119 struct { 120 u64 id : 16; 121 u64 reserved : 16; 122 u64 tail_ptr : 31; 123 u64 arm : 1; 124 } eq; 125 }; /* HW DATA */ 126 127 struct gdma_msg_hdr { 128 u32 hdr_type; 129 u32 msg_type; 130 u16 msg_version; 131 u16 hwc_msg_id; 132 u32 msg_size; 133 }; /* HW DATA */ 134 135 struct gdma_dev_id { 136 union { 137 struct { 138 u16 type; 139 u16 instance; 140 }; 141 142 u32 as_uint32; 143 }; 144 }; /* HW DATA */ 145 146 struct gdma_req_hdr { 147 struct gdma_msg_hdr req; 148 struct gdma_msg_hdr resp; /* The expected response */ 149 struct gdma_dev_id dev_id; 150 u32 activity_id; 151 }; /* HW DATA */ 152 153 struct gdma_resp_hdr { 154 struct gdma_msg_hdr response; 155 struct gdma_dev_id dev_id; 156 u32 activity_id; 157 u32 status; 158 u32 reserved; 159 }; /* HW DATA */ 160 161 struct gdma_general_req { 162 struct gdma_req_hdr hdr; 163 }; /* HW DATA */ 164 165 #define GDMA_MESSAGE_V1 1 166 #define GDMA_MESSAGE_V2 2 167 #define GDMA_MESSAGE_V3 3 168 #define GDMA_MESSAGE_V4 4 169 170 struct gdma_general_resp { 171 struct gdma_resp_hdr hdr; 172 }; /* HW DATA */ 173 174 #define GDMA_STANDARD_HEADER_TYPE 0 175 176 static inline void mana_gd_init_req_hdr(struct gdma_req_hdr *hdr, u32 code, 177 u32 req_size, u32 resp_size) 178 { 179 hdr->req.hdr_type = GDMA_STANDARD_HEADER_TYPE; 180 hdr->req.msg_type = code; 181 hdr->req.msg_version = GDMA_MESSAGE_V1; 182 hdr->req.msg_size = req_size; 183 184 hdr->resp.hdr_type = GDMA_STANDARD_HEADER_TYPE; 185 hdr->resp.msg_type = code; 186 hdr->resp.msg_version = GDMA_MESSAGE_V1; 187 hdr->resp.msg_size = resp_size; 188 } 189 190 /* The 16-byte struct is part of the GDMA work queue entry (WQE). */ 191 struct gdma_sge { 192 u64 address; 193 u32 mem_key; 194 u32 size; 195 }; /* HW DATA */ 196 197 struct gdma_wqe_request { 198 struct gdma_sge *sgl; 199 u32 num_sge; 200 201 u32 inline_oob_size; 202 const void *inline_oob_data; 203 204 u32 flags; 205 u32 client_data_unit; 206 }; 207 208 enum gdma_page_type { 209 GDMA_PAGE_TYPE_4K, 210 }; 211 212 #define GDMA_INVALID_DMA_REGION 0 213 214 struct gdma_mem_info { 215 struct device *dev; 216 217 dma_addr_t dma_handle; 218 void *virt_addr; 219 u64 length; 220 221 /* Allocated by the PF driver */ 222 u64 dma_region_handle; 223 }; 224 225 #define REGISTER_ATB_MST_MKEY_LOWER_SIZE 8 226 227 struct gdma_dev { 228 struct gdma_context *gdma_context; 229 230 struct gdma_dev_id dev_id; 231 232 u32 pdid; 233 u32 doorbell; 234 u32 gpa_mkey; 235 236 /* GDMA driver specific pointer */ 237 void *driver_data; 238 239 struct auxiliary_device *adev; 240 bool is_suspended; 241 bool rdma_teardown; 242 }; 243 244 /* MANA_PAGE_SIZE is the DMA unit */ 245 #define MANA_PAGE_SHIFT 12 246 #define MANA_PAGE_SIZE BIT(MANA_PAGE_SHIFT) 247 #define MANA_PAGE_ALIGN(x) ALIGN((x), MANA_PAGE_SIZE) 248 #define MANA_PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), MANA_PAGE_SIZE) 249 #define MANA_PFN(a) ((a) >> MANA_PAGE_SHIFT) 250 251 /* Required by HW */ 252 #define MANA_MIN_QSIZE MANA_PAGE_SIZE 253 254 #define GDMA_CQE_SIZE 64 255 #define GDMA_EQE_SIZE 16 256 #define GDMA_MAX_SQE_SIZE 512 257 #define GDMA_MAX_RQE_SIZE 256 258 259 #define GDMA_COMP_DATA_SIZE 0x3C 260 261 #define GDMA_EVENT_DATA_SIZE 0xC 262 263 /* The WQE size must be a multiple of the Basic Unit, which is 32 bytes. */ 264 #define GDMA_WQE_BU_SIZE 32 265 266 #define INVALID_PDID UINT_MAX 267 #define INVALID_DOORBELL UINT_MAX 268 #define INVALID_MEM_KEY UINT_MAX 269 #define INVALID_QUEUE_ID UINT_MAX 270 #define INVALID_PCI_MSIX_INDEX UINT_MAX 271 272 struct gdma_comp { 273 u32 cqe_data[GDMA_COMP_DATA_SIZE / 4]; 274 u32 wq_num; 275 bool is_sq; 276 }; 277 278 struct gdma_event { 279 u32 details[GDMA_EVENT_DATA_SIZE / 4]; 280 u8 type; 281 }; 282 283 struct gdma_queue; 284 285 struct mana_eq { 286 struct gdma_queue *eq; 287 struct dentry *mana_eq_debugfs; 288 }; 289 290 typedef void gdma_eq_callback(void *context, struct gdma_queue *q, 291 struct gdma_event *e); 292 293 typedef void gdma_cq_callback(void *context, struct gdma_queue *q); 294 295 /* The 'head' is the producer index. For SQ/RQ, when the driver posts a WQE 296 * (Note: the WQE size must be a multiple of the 32-byte Basic Unit), the 297 * driver increases the 'head' in BUs rather than in bytes, and notifies 298 * the HW of the updated head. For EQ/CQ, the driver uses the 'head' to track 299 * the HW head, and increases the 'head' by 1 for every processed EQE/CQE. 300 * 301 * The 'tail' is the consumer index for SQ/RQ. After the CQE of the SQ/RQ is 302 * processed, the driver increases the 'tail' to indicate that WQEs have 303 * been consumed by the HW, so the driver can post new WQEs into the SQ/RQ. 304 * 305 * The driver doesn't use the 'tail' for EQ/CQ, because the driver ensures 306 * that the EQ/CQ is big enough so they can't overflow, and the driver uses 307 * the owner bits mechanism to detect if the queue has become empty. 308 */ 309 struct gdma_queue { 310 struct gdma_dev *gdma_dev; 311 312 enum gdma_queue_type type; 313 u32 id; 314 315 struct gdma_mem_info mem_info; 316 317 void *queue_mem_ptr; 318 u32 queue_size; 319 320 bool monitor_avl_buf; 321 322 u32 head; 323 u32 tail; 324 struct list_head entry; 325 326 /* Extra fields specific to EQ/CQ. */ 327 union { 328 struct { 329 bool disable_needed; 330 331 gdma_eq_callback *callback; 332 void *context; 333 334 unsigned int msix_index; 335 336 u32 log2_throttle_limit; 337 } eq; 338 339 struct { 340 gdma_cq_callback *callback; 341 void *context; 342 343 struct gdma_queue *parent; /* For CQ/EQ relationship */ 344 } cq; 345 }; 346 }; 347 348 struct gdma_queue_spec { 349 enum gdma_queue_type type; 350 bool monitor_avl_buf; 351 unsigned int queue_size; 352 353 /* Extra fields specific to EQ/CQ. */ 354 union { 355 struct { 356 gdma_eq_callback *callback; 357 void *context; 358 359 unsigned long log2_throttle_limit; 360 unsigned int msix_index; 361 } eq; 362 363 struct { 364 gdma_cq_callback *callback; 365 void *context; 366 367 struct gdma_queue *parent_eq; 368 369 } cq; 370 }; 371 }; 372 373 #define MANA_IRQ_NAME_SZ 32 374 375 struct gdma_irq_context { 376 void (*handler)(void *arg); 377 /* Protect the eq_list */ 378 spinlock_t lock; 379 struct list_head eq_list; 380 char name[MANA_IRQ_NAME_SZ]; 381 }; 382 383 struct gdma_context { 384 struct device *dev; 385 struct dentry *mana_pci_debugfs; 386 387 /* Per-vPort max number of queues */ 388 unsigned int max_num_queues; 389 unsigned int max_num_msix; 390 unsigned int num_msix_usable; 391 struct gdma_irq_context *irq_contexts; 392 393 /* L2 MTU */ 394 u16 adapter_mtu; 395 396 /* This maps a CQ index to the queue structure. */ 397 unsigned int max_num_cqs; 398 struct gdma_queue **cq_table; 399 400 /* Protect eq_test_event and test_event_eq_id */ 401 struct mutex eq_test_event_mutex; 402 struct completion eq_test_event; 403 u32 test_event_eq_id; 404 405 bool is_pf; 406 phys_addr_t bar0_pa; 407 void __iomem *bar0_va; 408 void __iomem *shm_base; 409 void __iomem *db_page_base; 410 phys_addr_t phys_db_page_base; 411 u32 db_page_size; 412 int numa_node; 413 414 /* Shared memory chanenl (used to bootstrap HWC) */ 415 struct shm_channel shm_channel; 416 417 /* Hardware communication channel (HWC) */ 418 struct gdma_dev hwc; 419 420 /* Azure network adapter */ 421 struct gdma_dev mana; 422 423 /* Azure RDMA adapter */ 424 struct gdma_dev mana_ib; 425 426 u64 pf_cap_flags1; 427 428 struct workqueue_struct *service_wq; 429 }; 430 431 static inline bool mana_gd_is_mana(struct gdma_dev *gd) 432 { 433 return gd->dev_id.type == GDMA_DEVICE_MANA; 434 } 435 436 static inline bool mana_gd_is_hwc(struct gdma_dev *gd) 437 { 438 return gd->dev_id.type == GDMA_DEVICE_HWC; 439 } 440 441 u8 *mana_gd_get_wqe_ptr(const struct gdma_queue *wq, u32 wqe_offset); 442 u32 mana_gd_wq_avail_space(struct gdma_queue *wq); 443 444 int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq); 445 446 int mana_gd_create_hwc_queue(struct gdma_dev *gd, 447 const struct gdma_queue_spec *spec, 448 struct gdma_queue **queue_ptr); 449 450 int mana_gd_create_mana_eq(struct gdma_dev *gd, 451 const struct gdma_queue_spec *spec, 452 struct gdma_queue **queue_ptr); 453 454 int mana_gd_create_mana_wq_cq(struct gdma_dev *gd, 455 const struct gdma_queue_spec *spec, 456 struct gdma_queue **queue_ptr); 457 458 void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue); 459 460 int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe); 461 462 void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit); 463 464 struct gdma_wqe { 465 u32 reserved :24; 466 u32 last_vbytes :8; 467 468 union { 469 u32 flags; 470 471 struct { 472 u32 num_sge :8; 473 u32 inline_oob_size_div4:3; 474 u32 client_oob_in_sgl :1; 475 u32 reserved1 :4; 476 u32 client_data_unit :14; 477 u32 reserved2 :2; 478 }; 479 }; 480 }; /* HW DATA */ 481 482 #define INLINE_OOB_SMALL_SIZE 8 483 #define INLINE_OOB_LARGE_SIZE 24 484 485 #define MAX_TX_WQE_SIZE 512 486 #define MAX_RX_WQE_SIZE 256 487 488 #define MAX_TX_WQE_SGL_ENTRIES ((GDMA_MAX_SQE_SIZE - \ 489 sizeof(struct gdma_sge) - INLINE_OOB_SMALL_SIZE) / \ 490 sizeof(struct gdma_sge)) 491 492 #define MAX_RX_WQE_SGL_ENTRIES ((GDMA_MAX_RQE_SIZE - \ 493 sizeof(struct gdma_sge)) / sizeof(struct gdma_sge)) 494 495 struct gdma_cqe { 496 u32 cqe_data[GDMA_COMP_DATA_SIZE / 4]; 497 498 union { 499 u32 as_uint32; 500 501 struct { 502 u32 wq_num : 24; 503 u32 is_sq : 1; 504 u32 reserved : 4; 505 u32 owner_bits : 3; 506 }; 507 } cqe_info; 508 }; /* HW DATA */ 509 510 #define GDMA_CQE_OWNER_BITS 3 511 512 #define GDMA_CQE_OWNER_MASK ((1 << GDMA_CQE_OWNER_BITS) - 1) 513 514 #define SET_ARM_BIT 1 515 516 #define GDMA_EQE_OWNER_BITS 3 517 518 union gdma_eqe_info { 519 u32 as_uint32; 520 521 struct { 522 u32 type : 8; 523 u32 reserved1 : 8; 524 u32 client_id : 2; 525 u32 reserved2 : 11; 526 u32 owner_bits : 3; 527 }; 528 }; /* HW DATA */ 529 530 #define GDMA_EQE_OWNER_MASK ((1 << GDMA_EQE_OWNER_BITS) - 1) 531 #define INITIALIZED_OWNER_BIT(log2_num_entries) (1UL << (log2_num_entries)) 532 533 struct gdma_eqe { 534 u32 details[GDMA_EVENT_DATA_SIZE / 4]; 535 u32 eqe_info; 536 }; /* HW DATA */ 537 538 #define GDMA_REG_DB_PAGE_OFFSET 8 539 #define GDMA_REG_DB_PAGE_SIZE 0x10 540 #define GDMA_REG_SHM_OFFSET 0x18 541 542 #define GDMA_PF_REG_DB_PAGE_SIZE 0xD0 543 #define GDMA_PF_REG_DB_PAGE_OFF 0xC8 544 #define GDMA_PF_REG_SHM_OFF 0x70 545 546 #define GDMA_SRIOV_REG_CFG_BASE_OFF 0x108 547 548 #define MANA_PF_DEVICE_ID 0x00B9 549 #define MANA_VF_DEVICE_ID 0x00BA 550 551 struct gdma_posted_wqe_info { 552 u32 wqe_size_in_bu; 553 }; 554 555 /* GDMA_GENERATE_TEST_EQE */ 556 struct gdma_generate_test_event_req { 557 struct gdma_req_hdr hdr; 558 u32 queue_index; 559 }; /* HW DATA */ 560 561 /* GDMA_VERIFY_VF_DRIVER_VERSION */ 562 enum { 563 GDMA_PROTOCOL_V1 = 1, 564 GDMA_PROTOCOL_FIRST = GDMA_PROTOCOL_V1, 565 GDMA_PROTOCOL_LAST = GDMA_PROTOCOL_V1, 566 }; 567 568 #define GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT BIT(0) 569 570 /* Advertise to the NIC firmware: the NAPI work_done variable race is fixed, 571 * so the driver is able to reliably support features like busy_poll. 572 */ 573 #define GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX BIT(2) 574 #define GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG BIT(3) 575 #define GDMA_DRV_CAP_FLAG_1_GDMA_PAGES_4MB_1GB_2GB BIT(4) 576 #define GDMA_DRV_CAP_FLAG_1_VARIABLE_INDIRECTION_TABLE_SUPPORT BIT(5) 577 578 /* Driver can handle holes (zeros) in the device list */ 579 #define GDMA_DRV_CAP_FLAG_1_DEV_LIST_HOLES_SUP BIT(11) 580 581 #define GDMA_DRV_CAP_FLAGS1 \ 582 (GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT | \ 583 GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX | \ 584 GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG | \ 585 GDMA_DRV_CAP_FLAG_1_VARIABLE_INDIRECTION_TABLE_SUPPORT | \ 586 GDMA_DRV_CAP_FLAG_1_DEV_LIST_HOLES_SUP) 587 588 #define GDMA_DRV_CAP_FLAGS2 0 589 590 #define GDMA_DRV_CAP_FLAGS3 0 591 592 #define GDMA_DRV_CAP_FLAGS4 0 593 594 struct gdma_verify_ver_req { 595 struct gdma_req_hdr hdr; 596 597 /* Mandatory fields required for protocol establishment */ 598 u64 protocol_ver_min; 599 u64 protocol_ver_max; 600 601 /* Gdma Driver Capability Flags */ 602 u64 gd_drv_cap_flags1; 603 u64 gd_drv_cap_flags2; 604 u64 gd_drv_cap_flags3; 605 u64 gd_drv_cap_flags4; 606 607 /* Advisory fields */ 608 u64 drv_ver; 609 u32 os_type; /* Linux = 0x10; Windows = 0x20; Other = 0x30 */ 610 u32 reserved; 611 u32 os_ver_major; 612 u32 os_ver_minor; 613 u32 os_ver_build; 614 u32 os_ver_platform; 615 u64 reserved_2; 616 u8 os_ver_str1[128]; 617 u8 os_ver_str2[128]; 618 u8 os_ver_str3[128]; 619 u8 os_ver_str4[128]; 620 }; /* HW DATA */ 621 622 struct gdma_verify_ver_resp { 623 struct gdma_resp_hdr hdr; 624 u64 gdma_protocol_ver; 625 u64 pf_cap_flags1; 626 u64 pf_cap_flags2; 627 u64 pf_cap_flags3; 628 u64 pf_cap_flags4; 629 }; /* HW DATA */ 630 631 /* GDMA_QUERY_MAX_RESOURCES */ 632 struct gdma_query_max_resources_resp { 633 struct gdma_resp_hdr hdr; 634 u32 status; 635 u32 max_sq; 636 u32 max_rq; 637 u32 max_cq; 638 u32 max_eq; 639 u32 max_db; 640 u32 max_mst; 641 u32 max_cq_mod_ctx; 642 u32 max_mod_cq; 643 u32 max_msix; 644 }; /* HW DATA */ 645 646 /* GDMA_LIST_DEVICES */ 647 #define GDMA_DEV_LIST_SIZE 64 648 struct gdma_list_devices_resp { 649 struct gdma_resp_hdr hdr; 650 u32 num_of_devs; 651 u32 reserved; 652 struct gdma_dev_id devs[GDMA_DEV_LIST_SIZE]; 653 }; /* HW DATA */ 654 655 /* GDMA_REGISTER_DEVICE */ 656 struct gdma_register_device_resp { 657 struct gdma_resp_hdr hdr; 658 u32 pdid; 659 u32 gpa_mkey; 660 u32 db_id; 661 }; /* HW DATA */ 662 663 struct gdma_allocate_resource_range_req { 664 struct gdma_req_hdr hdr; 665 u32 resource_type; 666 u32 num_resources; 667 u32 alignment; 668 u32 allocated_resources; 669 }; 670 671 struct gdma_allocate_resource_range_resp { 672 struct gdma_resp_hdr hdr; 673 u32 allocated_resources; 674 }; 675 676 struct gdma_destroy_resource_range_req { 677 struct gdma_req_hdr hdr; 678 u32 resource_type; 679 u32 num_resources; 680 u32 allocated_resources; 681 }; 682 683 /* GDMA_CREATE_QUEUE */ 684 struct gdma_create_queue_req { 685 struct gdma_req_hdr hdr; 686 u32 type; 687 u32 reserved1; 688 u32 pdid; 689 u32 doolbell_id; 690 u64 gdma_region; 691 u32 reserved2; 692 u32 queue_size; 693 u32 log2_throttle_limit; 694 u32 eq_pci_msix_index; 695 u32 cq_mod_ctx_id; 696 u32 cq_parent_eq_id; 697 u8 rq_drop_on_overrun; 698 u8 rq_err_on_wqe_overflow; 699 u8 rq_chain_rec_wqes; 700 u8 sq_hw_db; 701 u32 reserved3; 702 }; /* HW DATA */ 703 704 struct gdma_create_queue_resp { 705 struct gdma_resp_hdr hdr; 706 u32 queue_index; 707 }; /* HW DATA */ 708 709 /* GDMA_DISABLE_QUEUE */ 710 struct gdma_disable_queue_req { 711 struct gdma_req_hdr hdr; 712 u32 type; 713 u32 queue_index; 714 u32 alloc_res_id_on_creation; 715 }; /* HW DATA */ 716 717 /* GDMA_QUERY_HWC_TIMEOUT */ 718 struct gdma_query_hwc_timeout_req { 719 struct gdma_req_hdr hdr; 720 u32 timeout_ms; 721 u32 reserved; 722 }; 723 724 struct gdma_query_hwc_timeout_resp { 725 struct gdma_resp_hdr hdr; 726 u32 timeout_ms; 727 u32 reserved; 728 }; 729 730 enum gdma_mr_access_flags { 731 GDMA_ACCESS_FLAG_LOCAL_READ = BIT_ULL(0), 732 GDMA_ACCESS_FLAG_LOCAL_WRITE = BIT_ULL(1), 733 GDMA_ACCESS_FLAG_REMOTE_READ = BIT_ULL(2), 734 GDMA_ACCESS_FLAG_REMOTE_WRITE = BIT_ULL(3), 735 GDMA_ACCESS_FLAG_REMOTE_ATOMIC = BIT_ULL(4), 736 }; 737 738 /* GDMA_CREATE_DMA_REGION */ 739 struct gdma_create_dma_region_req { 740 struct gdma_req_hdr hdr; 741 742 /* The total size of the DMA region */ 743 u64 length; 744 745 /* The offset in the first page */ 746 u32 offset_in_page; 747 748 /* enum gdma_page_type */ 749 u32 gdma_page_type; 750 751 /* The total number of pages */ 752 u32 page_count; 753 754 /* If page_addr_list_len is smaller than page_count, 755 * the remaining page addresses will be added via the 756 * message GDMA_DMA_REGION_ADD_PAGES. 757 */ 758 u32 page_addr_list_len; 759 u64 page_addr_list[]; 760 }; /* HW DATA */ 761 762 struct gdma_create_dma_region_resp { 763 struct gdma_resp_hdr hdr; 764 u64 dma_region_handle; 765 }; /* HW DATA */ 766 767 /* GDMA_DMA_REGION_ADD_PAGES */ 768 struct gdma_dma_region_add_pages_req { 769 struct gdma_req_hdr hdr; 770 771 u64 dma_region_handle; 772 773 u32 page_addr_list_len; 774 u32 reserved3; 775 776 u64 page_addr_list[]; 777 }; /* HW DATA */ 778 779 /* GDMA_DESTROY_DMA_REGION */ 780 struct gdma_destroy_dma_region_req { 781 struct gdma_req_hdr hdr; 782 783 u64 dma_region_handle; 784 }; /* HW DATA */ 785 786 enum gdma_pd_flags { 787 GDMA_PD_FLAG_INVALID = 0, 788 GDMA_PD_FLAG_ALLOW_GPA_MR = 1, 789 }; 790 791 struct gdma_create_pd_req { 792 struct gdma_req_hdr hdr; 793 enum gdma_pd_flags flags; 794 u32 reserved; 795 };/* HW DATA */ 796 797 struct gdma_create_pd_resp { 798 struct gdma_resp_hdr hdr; 799 u64 pd_handle; 800 u32 pd_id; 801 u32 reserved; 802 };/* HW DATA */ 803 804 struct gdma_destroy_pd_req { 805 struct gdma_req_hdr hdr; 806 u64 pd_handle; 807 };/* HW DATA */ 808 809 struct gdma_destory_pd_resp { 810 struct gdma_resp_hdr hdr; 811 };/* HW DATA */ 812 813 enum gdma_mr_type { 814 /* 815 * Guest Physical Address - MRs of this type allow access 816 * to any DMA-mapped memory using bus-logical address 817 */ 818 GDMA_MR_TYPE_GPA = 1, 819 /* Guest Virtual Address - MRs of this type allow access 820 * to memory mapped by PTEs associated with this MR using a virtual 821 * address that is set up in the MST 822 */ 823 GDMA_MR_TYPE_GVA = 2, 824 /* Guest zero-based address MRs */ 825 GDMA_MR_TYPE_ZBVA = 4, 826 }; 827 828 struct gdma_create_mr_params { 829 u64 pd_handle; 830 enum gdma_mr_type mr_type; 831 union { 832 struct { 833 u64 dma_region_handle; 834 u64 virtual_address; 835 enum gdma_mr_access_flags access_flags; 836 } gva; 837 struct { 838 u64 dma_region_handle; 839 enum gdma_mr_access_flags access_flags; 840 } zbva; 841 }; 842 }; 843 844 struct gdma_create_mr_request { 845 struct gdma_req_hdr hdr; 846 u64 pd_handle; 847 enum gdma_mr_type mr_type; 848 u32 reserved_1; 849 850 union { 851 struct { 852 u64 dma_region_handle; 853 u64 virtual_address; 854 enum gdma_mr_access_flags access_flags; 855 } gva; 856 struct { 857 u64 dma_region_handle; 858 enum gdma_mr_access_flags access_flags; 859 } zbva; 860 }; 861 u32 reserved_2; 862 };/* HW DATA */ 863 864 struct gdma_create_mr_response { 865 struct gdma_resp_hdr hdr; 866 u64 mr_handle; 867 u32 lkey; 868 u32 rkey; 869 };/* HW DATA */ 870 871 struct gdma_destroy_mr_request { 872 struct gdma_req_hdr hdr; 873 u64 mr_handle; 874 };/* HW DATA */ 875 876 struct gdma_destroy_mr_response { 877 struct gdma_resp_hdr hdr; 878 };/* HW DATA */ 879 880 int mana_gd_verify_vf_version(struct pci_dev *pdev); 881 882 int mana_gd_register_device(struct gdma_dev *gd); 883 int mana_gd_deregister_device(struct gdma_dev *gd); 884 885 int mana_gd_post_work_request(struct gdma_queue *wq, 886 const struct gdma_wqe_request *wqe_req, 887 struct gdma_posted_wqe_info *wqe_info); 888 889 int mana_gd_post_and_ring(struct gdma_queue *queue, 890 const struct gdma_wqe_request *wqe, 891 struct gdma_posted_wqe_info *wqe_info); 892 893 int mana_gd_alloc_res_map(u32 res_avail, struct gdma_resource *r); 894 void mana_gd_free_res_map(struct gdma_resource *r); 895 896 void mana_gd_wq_ring_doorbell(struct gdma_context *gc, 897 struct gdma_queue *queue); 898 899 int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length, 900 struct gdma_mem_info *gmi); 901 902 void mana_gd_free_memory(struct gdma_mem_info *gmi); 903 904 int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req, 905 u32 resp_len, void *resp); 906 907 int mana_gd_destroy_dma_region(struct gdma_context *gc, u64 dma_region_handle); 908 void mana_register_debugfs(void); 909 void mana_unregister_debugfs(void); 910 911 int mana_rdma_service_event(struct gdma_context *gc, enum gdma_service_type event); 912 913 #endif /* _GDMA_H */ 914