1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ 2 /* Copyright (c) 2021, Microsoft Corporation. */ 3 4 #ifndef _MANA_H 5 #define _MANA_H 6 7 #include <net/xdp.h> 8 9 #include "gdma.h" 10 #include "hw_channel.h" 11 12 /* Microsoft Azure Network Adapter (MANA)'s definitions 13 * 14 * Structures labeled with "HW DATA" are exchanged with the hardware. All of 15 * them are naturally aligned and hence don't need __packed. 16 */ 17 18 /* MANA protocol version */ 19 #define MANA_MAJOR_VERSION 0 20 #define MANA_MINOR_VERSION 1 21 #define MANA_MICRO_VERSION 1 22 23 typedef u64 mana_handle_t; 24 #define INVALID_MANA_HANDLE ((mana_handle_t)-1) 25 26 enum TRI_STATE { 27 TRI_STATE_UNKNOWN = -1, 28 TRI_STATE_FALSE = 0, 29 TRI_STATE_TRUE = 1 30 }; 31 32 /* Number of entries for hardware indirection table must be in power of 2 */ 33 #define MANA_INDIRECT_TABLE_MAX_SIZE 512 34 #define MANA_INDIRECT_TABLE_DEF_SIZE 64 35 36 /* The Toeplitz hash key's length in bytes: should be multiple of 8 */ 37 #define MANA_HASH_KEY_SIZE 40 38 39 #define COMP_ENTRY_SIZE 64 40 41 /* This Max value for RX buffers is derived from __alloc_page()'s max page 42 * allocation calculation. It allows maximum 2^(MAX_ORDER -1) pages. RX buffer 43 * size beyond this value gets rejected by __alloc_page() call. 44 */ 45 #define MAX_RX_BUFFERS_PER_QUEUE 8192 46 #define DEF_RX_BUFFERS_PER_QUEUE 1024 47 #define MIN_RX_BUFFERS_PER_QUEUE 128 48 49 /* This max value for TX buffers is derived as the maximum allocatable 50 * pages supported on host per guest through testing. TX buffer size beyond 51 * this value is rejected by the hardware. 52 */ 53 #define MAX_TX_BUFFERS_PER_QUEUE 16384 54 #define DEF_TX_BUFFERS_PER_QUEUE 256 55 #define MIN_TX_BUFFERS_PER_QUEUE 128 56 57 #define EQ_SIZE (8 * MANA_PAGE_SIZE) 58 59 #define LOG2_EQ_THROTTLE 3 60 61 #define MAX_PORTS_IN_MANA_DEV 256 62 63 /* Update this count whenever the respective structures are changed */ 64 #define MANA_STATS_RX_COUNT 5 65 #define MANA_STATS_TX_COUNT 11 66 67 struct mana_stats_rx { 68 u64 packets; 69 u64 bytes; 70 u64 xdp_drop; 71 u64 xdp_tx; 72 u64 xdp_redirect; 73 struct u64_stats_sync syncp; 74 }; 75 76 struct mana_stats_tx { 77 u64 packets; 78 u64 bytes; 79 u64 xdp_xmit; 80 u64 tso_packets; 81 u64 tso_bytes; 82 u64 tso_inner_packets; 83 u64 tso_inner_bytes; 84 u64 short_pkt_fmt; 85 u64 long_pkt_fmt; 86 u64 csum_partial; 87 u64 mana_map_err; 88 struct u64_stats_sync syncp; 89 }; 90 91 struct mana_txq { 92 struct gdma_queue *gdma_sq; 93 94 union { 95 u32 gdma_txq_id; 96 struct { 97 u32 reserved1 : 10; 98 u32 vsq_frame : 14; 99 u32 reserved2 : 8; 100 }; 101 }; 102 103 u16 vp_offset; 104 105 struct net_device *ndev; 106 107 /* The SKBs are sent to the HW and we are waiting for the CQEs. */ 108 struct sk_buff_head pending_skbs; 109 struct netdev_queue *net_txq; 110 111 atomic_t pending_sends; 112 113 bool napi_initialized; 114 115 struct mana_stats_tx stats; 116 }; 117 118 /* skb data and frags dma mappings */ 119 struct mana_skb_head { 120 /* GSO pkts may have 2 SGEs for the linear part*/ 121 dma_addr_t dma_handle[MAX_SKB_FRAGS + 2]; 122 123 u32 size[MAX_SKB_FRAGS + 2]; 124 }; 125 126 #define MANA_HEADROOM sizeof(struct mana_skb_head) 127 128 enum mana_tx_pkt_format { 129 MANA_SHORT_PKT_FMT = 0, 130 MANA_LONG_PKT_FMT = 1, 131 }; 132 133 struct mana_tx_short_oob { 134 u32 pkt_fmt : 2; 135 u32 is_outer_ipv4 : 1; 136 u32 is_outer_ipv6 : 1; 137 u32 comp_iphdr_csum : 1; 138 u32 comp_tcp_csum : 1; 139 u32 comp_udp_csum : 1; 140 u32 supress_txcqe_gen : 1; 141 u32 vcq_num : 24; 142 143 u32 trans_off : 10; /* Transport header offset */ 144 u32 vsq_frame : 14; 145 u32 short_vp_offset : 8; 146 }; /* HW DATA */ 147 148 struct mana_tx_long_oob { 149 u32 is_encap : 1; 150 u32 inner_is_ipv6 : 1; 151 u32 inner_tcp_opt : 1; 152 u32 inject_vlan_pri_tag : 1; 153 u32 reserved1 : 12; 154 u32 pcp : 3; /* 802.1Q */ 155 u32 dei : 1; /* 802.1Q */ 156 u32 vlan_id : 12; /* 802.1Q */ 157 158 u32 inner_frame_offset : 10; 159 u32 inner_ip_rel_offset : 6; 160 u32 long_vp_offset : 12; 161 u32 reserved2 : 4; 162 163 u32 reserved3; 164 u32 reserved4; 165 }; /* HW DATA */ 166 167 struct mana_tx_oob { 168 struct mana_tx_short_oob s_oob; 169 struct mana_tx_long_oob l_oob; 170 }; /* HW DATA */ 171 172 enum mana_cq_type { 173 MANA_CQ_TYPE_RX, 174 MANA_CQ_TYPE_TX, 175 }; 176 177 enum mana_cqe_type { 178 CQE_INVALID = 0, 179 CQE_RX_OKAY = 1, 180 CQE_RX_COALESCED_4 = 2, 181 CQE_RX_OBJECT_FENCE = 3, 182 CQE_RX_TRUNCATED = 4, 183 184 CQE_TX_OKAY = 32, 185 CQE_TX_SA_DROP = 33, 186 CQE_TX_MTU_DROP = 34, 187 CQE_TX_INVALID_OOB = 35, 188 CQE_TX_INVALID_ETH_TYPE = 36, 189 CQE_TX_HDR_PROCESSING_ERROR = 37, 190 CQE_TX_VF_DISABLED = 38, 191 CQE_TX_VPORT_IDX_OUT_OF_RANGE = 39, 192 CQE_TX_VPORT_DISABLED = 40, 193 CQE_TX_VLAN_TAGGING_VIOLATION = 41, 194 }; 195 196 #define MANA_CQE_COMPLETION 1 197 198 struct mana_cqe_header { 199 u32 cqe_type : 6; 200 u32 client_type : 2; 201 u32 vendor_err : 24; 202 }; /* HW DATA */ 203 204 /* NDIS HASH Types */ 205 #define NDIS_HASH_IPV4 BIT(0) 206 #define NDIS_HASH_TCP_IPV4 BIT(1) 207 #define NDIS_HASH_UDP_IPV4 BIT(2) 208 #define NDIS_HASH_IPV6 BIT(3) 209 #define NDIS_HASH_TCP_IPV6 BIT(4) 210 #define NDIS_HASH_UDP_IPV6 BIT(5) 211 #define NDIS_HASH_IPV6_EX BIT(6) 212 #define NDIS_HASH_TCP_IPV6_EX BIT(7) 213 #define NDIS_HASH_UDP_IPV6_EX BIT(8) 214 215 #define MANA_HASH_L3 (NDIS_HASH_IPV4 | NDIS_HASH_IPV6 | NDIS_HASH_IPV6_EX) 216 #define MANA_HASH_L4 \ 217 (NDIS_HASH_TCP_IPV4 | NDIS_HASH_UDP_IPV4 | NDIS_HASH_TCP_IPV6 | \ 218 NDIS_HASH_UDP_IPV6 | NDIS_HASH_TCP_IPV6_EX | NDIS_HASH_UDP_IPV6_EX) 219 220 struct mana_rxcomp_perpkt_info { 221 u32 pkt_len : 16; 222 u32 reserved1 : 16; 223 u32 reserved2; 224 u32 pkt_hash; 225 }; /* HW DATA */ 226 227 #define MANA_RXCOMP_OOB_NUM_PPI 4 228 229 /* Receive completion OOB */ 230 struct mana_rxcomp_oob { 231 struct mana_cqe_header cqe_hdr; 232 233 u32 rx_vlan_id : 12; 234 u32 rx_vlantag_present : 1; 235 u32 rx_outer_iphdr_csum_succeed : 1; 236 u32 rx_outer_iphdr_csum_fail : 1; 237 u32 reserved1 : 1; 238 u32 rx_hashtype : 9; 239 u32 rx_iphdr_csum_succeed : 1; 240 u32 rx_iphdr_csum_fail : 1; 241 u32 rx_tcp_csum_succeed : 1; 242 u32 rx_tcp_csum_fail : 1; 243 u32 rx_udp_csum_succeed : 1; 244 u32 rx_udp_csum_fail : 1; 245 u32 reserved2 : 1; 246 247 struct mana_rxcomp_perpkt_info ppi[MANA_RXCOMP_OOB_NUM_PPI]; 248 249 u32 rx_wqe_offset; 250 }; /* HW DATA */ 251 252 struct mana_tx_comp_oob { 253 struct mana_cqe_header cqe_hdr; 254 255 u32 tx_data_offset; 256 257 u32 tx_sgl_offset : 5; 258 u32 tx_wqe_offset : 27; 259 260 u32 reserved[12]; 261 }; /* HW DATA */ 262 263 struct mana_rxq; 264 265 #define CQE_POLLING_BUFFER 512 266 267 struct mana_cq { 268 struct gdma_queue *gdma_cq; 269 270 /* Cache the CQ id (used to verify if each CQE comes to the right CQ. */ 271 u32 gdma_id; 272 273 /* Type of the CQ: TX or RX */ 274 enum mana_cq_type type; 275 276 /* Pointer to the mana_rxq that is pushing RX CQEs to the queue. 277 * Only and must be non-NULL if type is MANA_CQ_TYPE_RX. 278 */ 279 struct mana_rxq *rxq; 280 281 /* Pointer to the mana_txq that is pushing TX CQEs to the queue. 282 * Only and must be non-NULL if type is MANA_CQ_TYPE_TX. 283 */ 284 struct mana_txq *txq; 285 286 /* Buffer which the CQ handler can copy the CQE's into. */ 287 struct gdma_comp gdma_comp_buf[CQE_POLLING_BUFFER]; 288 289 /* NAPI data */ 290 struct napi_struct napi; 291 int work_done; 292 int work_done_since_doorbell; 293 int budget; 294 }; 295 296 struct mana_recv_buf_oob { 297 /* A valid GDMA work request representing the data buffer. */ 298 struct gdma_wqe_request wqe_req; 299 300 void *buf_va; 301 bool from_pool; /* allocated from a page pool */ 302 303 /* SGL of the buffer going to be sent as part of the work request. */ 304 u32 num_sge; 305 struct gdma_sge sgl[MAX_RX_WQE_SGL_ENTRIES]; 306 307 /* Required to store the result of mana_gd_post_work_request. 308 * gdma_posted_wqe_info.wqe_size_in_bu is required for progressing the 309 * work queue when the WQE is consumed. 310 */ 311 struct gdma_posted_wqe_info wqe_inf; 312 }; 313 314 #define MANA_RXBUF_PAD (SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) \ 315 + ETH_HLEN) 316 317 #define MANA_XDP_MTU_MAX (PAGE_SIZE - MANA_RXBUF_PAD - XDP_PACKET_HEADROOM) 318 319 struct mana_rxq { 320 struct gdma_queue *gdma_rq; 321 /* Cache the gdma receive queue id */ 322 u32 gdma_id; 323 324 /* Index of RQ in the vPort, not gdma receive queue id */ 325 u32 rxq_idx; 326 327 u32 datasize; 328 u32 alloc_size; 329 u32 headroom; 330 331 mana_handle_t rxobj; 332 333 struct mana_cq rx_cq; 334 335 struct completion fence_event; 336 337 struct net_device *ndev; 338 339 /* Total number of receive buffers to be allocated */ 340 u32 num_rx_buf; 341 342 u32 buf_index; 343 344 struct mana_stats_rx stats; 345 346 struct bpf_prog __rcu *bpf_prog; 347 struct xdp_rxq_info xdp_rxq; 348 void *xdp_save_va; /* for reusing */ 349 bool xdp_flush; 350 int xdp_rc; /* XDP redirect return code */ 351 352 struct page_pool *page_pool; 353 struct dentry *mana_rx_debugfs; 354 355 /* MUST BE THE LAST MEMBER: 356 * Each receive buffer has an associated mana_recv_buf_oob. 357 */ 358 struct mana_recv_buf_oob rx_oobs[] __counted_by(num_rx_buf); 359 }; 360 361 struct mana_tx_qp { 362 struct mana_txq txq; 363 364 struct mana_cq tx_cq; 365 366 mana_handle_t tx_object; 367 368 struct dentry *mana_tx_debugfs; 369 }; 370 371 struct mana_ethtool_stats { 372 u64 stop_queue; 373 u64 wake_queue; 374 u64 hc_rx_discards_no_wqe; 375 u64 hc_rx_err_vport_disabled; 376 u64 hc_rx_bytes; 377 u64 hc_rx_ucast_pkts; 378 u64 hc_rx_ucast_bytes; 379 u64 hc_rx_bcast_pkts; 380 u64 hc_rx_bcast_bytes; 381 u64 hc_rx_mcast_pkts; 382 u64 hc_rx_mcast_bytes; 383 u64 hc_tx_err_gf_disabled; 384 u64 hc_tx_err_vport_disabled; 385 u64 hc_tx_err_inval_vportoffset_pkt; 386 u64 hc_tx_err_vlan_enforcement; 387 u64 hc_tx_err_eth_type_enforcement; 388 u64 hc_tx_err_sa_enforcement; 389 u64 hc_tx_err_sqpdid_enforcement; 390 u64 hc_tx_err_cqpdid_enforcement; 391 u64 hc_tx_err_mtu_violation; 392 u64 hc_tx_err_inval_oob; 393 u64 hc_tx_bytes; 394 u64 hc_tx_ucast_pkts; 395 u64 hc_tx_ucast_bytes; 396 u64 hc_tx_bcast_pkts; 397 u64 hc_tx_bcast_bytes; 398 u64 hc_tx_mcast_pkts; 399 u64 hc_tx_mcast_bytes; 400 u64 hc_tx_err_gdma; 401 u64 tx_cqe_err; 402 u64 tx_cqe_unknown_type; 403 u64 rx_coalesced_err; 404 u64 rx_cqe_unknown_type; 405 }; 406 407 struct mana_context { 408 struct gdma_dev *gdma_dev; 409 410 u16 num_ports; 411 u8 bm_hostmode; 412 413 struct mana_eq *eqs; 414 struct dentry *mana_eqs_debugfs; 415 416 struct net_device *ports[MAX_PORTS_IN_MANA_DEV]; 417 }; 418 419 struct mana_port_context { 420 struct mana_context *ac; 421 struct net_device *ndev; 422 423 u8 mac_addr[ETH_ALEN]; 424 425 enum TRI_STATE rss_state; 426 427 mana_handle_t default_rxobj; 428 bool tx_shortform_allowed; 429 u16 tx_vp_offset; 430 431 struct mana_tx_qp *tx_qp; 432 433 /* Indirection Table for RX & TX. The values are queue indexes */ 434 u32 *indir_table; 435 u32 indir_table_sz; 436 437 /* Indirection table containing RxObject Handles */ 438 mana_handle_t *rxobj_table; 439 440 /* Hash key used by the NIC */ 441 u8 hashkey[MANA_HASH_KEY_SIZE]; 442 443 /* This points to an array of num_queues of RQ pointers. */ 444 struct mana_rxq **rxqs; 445 446 /* pre-allocated rx buffer array */ 447 void **rxbufs_pre; 448 dma_addr_t *das_pre; 449 int rxbpre_total; 450 u32 rxbpre_datasize; 451 u32 rxbpre_alloc_size; 452 u32 rxbpre_headroom; 453 454 struct bpf_prog *bpf_prog; 455 456 /* Create num_queues EQs, SQs, SQ-CQs, RQs and RQ-CQs, respectively. */ 457 unsigned int max_queues; 458 unsigned int num_queues; 459 460 unsigned int rx_queue_size; 461 unsigned int tx_queue_size; 462 463 mana_handle_t port_handle; 464 mana_handle_t pf_filter_handle; 465 466 /* Mutex for sharing access to vport_use_count */ 467 struct mutex vport_mutex; 468 int vport_use_count; 469 470 u16 port_idx; 471 472 bool port_is_up; 473 bool port_st_save; /* Saved port state */ 474 475 struct mana_ethtool_stats eth_stats; 476 477 /* Debugfs */ 478 struct dentry *mana_port_debugfs; 479 }; 480 481 netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev); 482 int mana_config_rss(struct mana_port_context *ac, enum TRI_STATE rx, 483 bool update_hash, bool update_tab); 484 485 int mana_alloc_queues(struct net_device *ndev); 486 int mana_attach(struct net_device *ndev); 487 int mana_detach(struct net_device *ndev, bool from_close); 488 489 int mana_probe(struct gdma_dev *gd, bool resuming); 490 void mana_remove(struct gdma_dev *gd, bool suspending); 491 492 int mana_rdma_probe(struct gdma_dev *gd); 493 void mana_rdma_remove(struct gdma_dev *gd); 494 495 void mana_xdp_tx(struct sk_buff *skb, struct net_device *ndev); 496 int mana_xdp_xmit(struct net_device *ndev, int n, struct xdp_frame **frames, 497 u32 flags); 498 u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq, 499 struct xdp_buff *xdp, void *buf_va, uint pkt_len); 500 struct bpf_prog *mana_xdp_get(struct mana_port_context *apc); 501 void mana_chn_setxdp(struct mana_port_context *apc, struct bpf_prog *prog); 502 int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf); 503 void mana_query_gf_stats(struct mana_port_context *apc); 504 int mana_pre_alloc_rxbufs(struct mana_port_context *apc, int mtu, int num_queues); 505 void mana_pre_dealloc_rxbufs(struct mana_port_context *apc); 506 507 extern const struct ethtool_ops mana_ethtool_ops; 508 extern struct dentry *mana_debugfs_root; 509 510 /* A CQ can be created not associated with any EQ */ 511 #define GDMA_CQ_NO_EQ 0xffff 512 513 struct mana_obj_spec { 514 u32 queue_index; 515 u64 gdma_region; 516 u32 queue_size; 517 u32 attached_eq; 518 u32 modr_ctx_id; 519 }; 520 521 enum mana_command_code { 522 MANA_QUERY_DEV_CONFIG = 0x20001, 523 MANA_QUERY_GF_STAT = 0x20002, 524 MANA_CONFIG_VPORT_TX = 0x20003, 525 MANA_CREATE_WQ_OBJ = 0x20004, 526 MANA_DESTROY_WQ_OBJ = 0x20005, 527 MANA_FENCE_RQ = 0x20006, 528 MANA_CONFIG_VPORT_RX = 0x20007, 529 MANA_QUERY_VPORT_CONFIG = 0x20008, 530 531 /* Privileged commands for the PF mode */ 532 MANA_REGISTER_FILTER = 0x28000, 533 MANA_DEREGISTER_FILTER = 0x28001, 534 MANA_REGISTER_HW_PORT = 0x28003, 535 MANA_DEREGISTER_HW_PORT = 0x28004, 536 }; 537 538 /* Query Device Configuration */ 539 struct mana_query_device_cfg_req { 540 struct gdma_req_hdr hdr; 541 542 /* MANA Nic Driver Capability flags */ 543 u64 mn_drv_cap_flags1; 544 u64 mn_drv_cap_flags2; 545 u64 mn_drv_cap_flags3; 546 u64 mn_drv_cap_flags4; 547 548 u32 proto_major_ver; 549 u32 proto_minor_ver; 550 u32 proto_micro_ver; 551 552 u32 reserved; 553 }; /* HW DATA */ 554 555 struct mana_query_device_cfg_resp { 556 struct gdma_resp_hdr hdr; 557 558 u64 pf_cap_flags1; 559 u64 pf_cap_flags2; 560 u64 pf_cap_flags3; 561 u64 pf_cap_flags4; 562 563 u16 max_num_vports; 564 u8 bm_hostmode; /* response v3: Bare Metal Host Mode */ 565 u8 reserved; 566 u32 max_num_eqs; 567 568 /* response v2: */ 569 u16 adapter_mtu; 570 u16 reserved2; 571 u32 reserved3; 572 }; /* HW DATA */ 573 574 /* Query vPort Configuration */ 575 struct mana_query_vport_cfg_req { 576 struct gdma_req_hdr hdr; 577 u32 vport_index; 578 }; /* HW DATA */ 579 580 struct mana_query_vport_cfg_resp { 581 struct gdma_resp_hdr hdr; 582 u32 max_num_sq; 583 u32 max_num_rq; 584 u32 num_indirection_ent; 585 u32 reserved1; 586 u8 mac_addr[6]; 587 u8 reserved2[2]; 588 mana_handle_t vport; 589 }; /* HW DATA */ 590 591 /* Configure vPort */ 592 struct mana_config_vport_req { 593 struct gdma_req_hdr hdr; 594 mana_handle_t vport; 595 u32 pdid; 596 u32 doorbell_pageid; 597 }; /* HW DATA */ 598 599 struct mana_config_vport_resp { 600 struct gdma_resp_hdr hdr; 601 u16 tx_vport_offset; 602 u8 short_form_allowed; 603 u8 reserved; 604 }; /* HW DATA */ 605 606 /* Create WQ Object */ 607 struct mana_create_wqobj_req { 608 struct gdma_req_hdr hdr; 609 mana_handle_t vport; 610 u32 wq_type; 611 u32 reserved; 612 u64 wq_gdma_region; 613 u64 cq_gdma_region; 614 u32 wq_size; 615 u32 cq_size; 616 u32 cq_moderation_ctx_id; 617 u32 cq_parent_qid; 618 }; /* HW DATA */ 619 620 struct mana_create_wqobj_resp { 621 struct gdma_resp_hdr hdr; 622 u32 wq_id; 623 u32 cq_id; 624 mana_handle_t wq_obj; 625 }; /* HW DATA */ 626 627 /* Destroy WQ Object */ 628 struct mana_destroy_wqobj_req { 629 struct gdma_req_hdr hdr; 630 u32 wq_type; 631 u32 reserved; 632 mana_handle_t wq_obj_handle; 633 }; /* HW DATA */ 634 635 struct mana_destroy_wqobj_resp { 636 struct gdma_resp_hdr hdr; 637 }; /* HW DATA */ 638 639 /* Fence RQ */ 640 struct mana_fence_rq_req { 641 struct gdma_req_hdr hdr; 642 mana_handle_t wq_obj_handle; 643 }; /* HW DATA */ 644 645 struct mana_fence_rq_resp { 646 struct gdma_resp_hdr hdr; 647 }; /* HW DATA */ 648 649 /* Query stats RQ */ 650 struct mana_query_gf_stat_req { 651 struct gdma_req_hdr hdr; 652 u64 req_stats; 653 }; /* HW DATA */ 654 655 struct mana_query_gf_stat_resp { 656 struct gdma_resp_hdr hdr; 657 u64 reported_stats; 658 /* rx errors/discards */ 659 u64 rx_discards_nowqe; 660 u64 rx_err_vport_disabled; 661 /* rx bytes/packets */ 662 u64 hc_rx_bytes; 663 u64 hc_rx_ucast_pkts; 664 u64 hc_rx_ucast_bytes; 665 u64 hc_rx_bcast_pkts; 666 u64 hc_rx_bcast_bytes; 667 u64 hc_rx_mcast_pkts; 668 u64 hc_rx_mcast_bytes; 669 /* tx errors */ 670 u64 tx_err_gf_disabled; 671 u64 tx_err_vport_disabled; 672 u64 tx_err_inval_vport_offset_pkt; 673 u64 tx_err_vlan_enforcement; 674 u64 tx_err_ethtype_enforcement; 675 u64 tx_err_SA_enforcement; 676 u64 tx_err_SQPDID_enforcement; 677 u64 tx_err_CQPDID_enforcement; 678 u64 tx_err_mtu_violation; 679 u64 tx_err_inval_oob; 680 /* tx bytes/packets */ 681 u64 hc_tx_bytes; 682 u64 hc_tx_ucast_pkts; 683 u64 hc_tx_ucast_bytes; 684 u64 hc_tx_bcast_pkts; 685 u64 hc_tx_bcast_bytes; 686 u64 hc_tx_mcast_pkts; 687 u64 hc_tx_mcast_bytes; 688 /* tx error */ 689 u64 tx_err_gdma; 690 }; /* HW DATA */ 691 692 /* Configure vPort Rx Steering */ 693 struct mana_cfg_rx_steer_req_v2 { 694 struct gdma_req_hdr hdr; 695 mana_handle_t vport; 696 u16 num_indir_entries; 697 u16 indir_tab_offset; 698 u32 rx_enable; 699 u32 rss_enable; 700 u8 update_default_rxobj; 701 u8 update_hashkey; 702 u8 update_indir_tab; 703 u8 reserved; 704 mana_handle_t default_rxobj; 705 u8 hashkey[MANA_HASH_KEY_SIZE]; 706 u8 cqe_coalescing_enable; 707 u8 reserved2[7]; 708 mana_handle_t indir_tab[] __counted_by(num_indir_entries); 709 }; /* HW DATA */ 710 711 struct mana_cfg_rx_steer_resp { 712 struct gdma_resp_hdr hdr; 713 }; /* HW DATA */ 714 715 /* Register HW vPort */ 716 struct mana_register_hw_vport_req { 717 struct gdma_req_hdr hdr; 718 u16 attached_gfid; 719 u8 is_pf_default_vport; 720 u8 reserved1; 721 u8 allow_all_ether_types; 722 u8 reserved2; 723 u8 reserved3; 724 u8 reserved4; 725 }; /* HW DATA */ 726 727 struct mana_register_hw_vport_resp { 728 struct gdma_resp_hdr hdr; 729 mana_handle_t hw_vport_handle; 730 }; /* HW DATA */ 731 732 /* Deregister HW vPort */ 733 struct mana_deregister_hw_vport_req { 734 struct gdma_req_hdr hdr; 735 mana_handle_t hw_vport_handle; 736 }; /* HW DATA */ 737 738 struct mana_deregister_hw_vport_resp { 739 struct gdma_resp_hdr hdr; 740 }; /* HW DATA */ 741 742 /* Register filter */ 743 struct mana_register_filter_req { 744 struct gdma_req_hdr hdr; 745 mana_handle_t vport; 746 u8 mac_addr[6]; 747 u8 reserved1; 748 u8 reserved2; 749 u8 reserved3; 750 u8 reserved4; 751 u16 reserved5; 752 u32 reserved6; 753 u32 reserved7; 754 u32 reserved8; 755 }; /* HW DATA */ 756 757 struct mana_register_filter_resp { 758 struct gdma_resp_hdr hdr; 759 mana_handle_t filter_handle; 760 }; /* HW DATA */ 761 762 /* Deregister filter */ 763 struct mana_deregister_filter_req { 764 struct gdma_req_hdr hdr; 765 mana_handle_t filter_handle; 766 }; /* HW DATA */ 767 768 struct mana_deregister_filter_resp { 769 struct gdma_resp_hdr hdr; 770 }; /* HW DATA */ 771 772 /* Requested GF stats Flags */ 773 /* Rx discards/Errors */ 774 #define STATISTICS_FLAGS_RX_DISCARDS_NO_WQE 0x0000000000000001 775 #define STATISTICS_FLAGS_RX_ERRORS_VPORT_DISABLED 0x0000000000000002 776 /* Rx bytes/pkts */ 777 #define STATISTICS_FLAGS_HC_RX_BYTES 0x0000000000000004 778 #define STATISTICS_FLAGS_HC_RX_UCAST_PACKETS 0x0000000000000008 779 #define STATISTICS_FLAGS_HC_RX_UCAST_BYTES 0x0000000000000010 780 #define STATISTICS_FLAGS_HC_RX_MCAST_PACKETS 0x0000000000000020 781 #define STATISTICS_FLAGS_HC_RX_MCAST_BYTES 0x0000000000000040 782 #define STATISTICS_FLAGS_HC_RX_BCAST_PACKETS 0x0000000000000080 783 #define STATISTICS_FLAGS_HC_RX_BCAST_BYTES 0x0000000000000100 784 /* Tx errors */ 785 #define STATISTICS_FLAGS_TX_ERRORS_GF_DISABLED 0x0000000000000200 786 #define STATISTICS_FLAGS_TX_ERRORS_VPORT_DISABLED 0x0000000000000400 787 #define STATISTICS_FLAGS_TX_ERRORS_INVAL_VPORT_OFFSET_PACKETS \ 788 0x0000000000000800 789 #define STATISTICS_FLAGS_TX_ERRORS_VLAN_ENFORCEMENT 0x0000000000001000 790 #define STATISTICS_FLAGS_TX_ERRORS_ETH_TYPE_ENFORCEMENT \ 791 0x0000000000002000 792 #define STATISTICS_FLAGS_TX_ERRORS_SA_ENFORCEMENT 0x0000000000004000 793 #define STATISTICS_FLAGS_TX_ERRORS_SQPDID_ENFORCEMENT 0x0000000000008000 794 #define STATISTICS_FLAGS_TX_ERRORS_CQPDID_ENFORCEMENT 0x0000000000010000 795 #define STATISTICS_FLAGS_TX_ERRORS_MTU_VIOLATION 0x0000000000020000 796 #define STATISTICS_FLAGS_TX_ERRORS_INVALID_OOB 0x0000000000040000 797 /* Tx bytes/pkts */ 798 #define STATISTICS_FLAGS_HC_TX_BYTES 0x0000000000080000 799 #define STATISTICS_FLAGS_HC_TX_UCAST_PACKETS 0x0000000000100000 800 #define STATISTICS_FLAGS_HC_TX_UCAST_BYTES 0x0000000000200000 801 #define STATISTICS_FLAGS_HC_TX_MCAST_PACKETS 0x0000000000400000 802 #define STATISTICS_FLAGS_HC_TX_MCAST_BYTES 0x0000000000800000 803 #define STATISTICS_FLAGS_HC_TX_BCAST_PACKETS 0x0000000001000000 804 #define STATISTICS_FLAGS_HC_TX_BCAST_BYTES 0x0000000002000000 805 /* Tx error */ 806 #define STATISTICS_FLAGS_TX_ERRORS_GDMA_ERROR 0x0000000004000000 807 808 #define MANA_MAX_NUM_QUEUES 64 809 810 #define MANA_SHORT_VPORT_OFFSET_MAX ((1U << 8) - 1) 811 812 struct mana_tx_package { 813 struct gdma_wqe_request wqe_req; 814 struct gdma_sge sgl_array[5]; 815 struct gdma_sge *sgl_ptr; 816 817 struct mana_tx_oob tx_oob; 818 819 struct gdma_posted_wqe_info wqe_info; 820 }; 821 822 int mana_create_wq_obj(struct mana_port_context *apc, 823 mana_handle_t vport, 824 u32 wq_type, struct mana_obj_spec *wq_spec, 825 struct mana_obj_spec *cq_spec, 826 mana_handle_t *wq_obj); 827 828 void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type, 829 mana_handle_t wq_obj); 830 831 int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id, 832 u32 doorbell_pg_id); 833 void mana_uncfg_vport(struct mana_port_context *apc); 834 835 struct net_device *mana_get_primary_netdev(struct mana_context *ac, 836 u32 port_index, 837 netdevice_tracker *tracker); 838 #endif /* _MANA_H */ 839