1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ 2 /* Copyright (c) 2021, Microsoft Corporation. */ 3 4 #ifndef _MANA_H 5 #define _MANA_H 6 7 #include <net/xdp.h> 8 #include <net/net_shaper.h> 9 10 #include "gdma.h" 11 #include "hw_channel.h" 12 13 /* Microsoft Azure Network Adapter (MANA)'s definitions 14 * 15 * Structures labeled with "HW DATA" are exchanged with the hardware. All of 16 * them are naturally aligned and hence don't need __packed. 17 */ 18 19 /* MANA protocol version */ 20 #define MANA_MAJOR_VERSION 0 21 #define MANA_MINOR_VERSION 1 22 #define MANA_MICRO_VERSION 1 23 24 typedef u64 mana_handle_t; 25 #define INVALID_MANA_HANDLE ((mana_handle_t)-1) 26 27 enum TRI_STATE { 28 TRI_STATE_UNKNOWN = -1, 29 TRI_STATE_FALSE = 0, 30 TRI_STATE_TRUE = 1 31 }; 32 33 /* Number of entries for hardware indirection table must be in power of 2 */ 34 #define MANA_INDIRECT_TABLE_MAX_SIZE 512 35 #define MANA_INDIRECT_TABLE_DEF_SIZE 64 36 37 /* The Toeplitz hash key's length in bytes: should be multiple of 8 */ 38 #define MANA_HASH_KEY_SIZE 40 39 40 #define COMP_ENTRY_SIZE 64 41 42 /* This Max value for RX buffers is derived from __alloc_page()'s max page 43 * allocation calculation. It allows maximum 2^(MAX_ORDER -1) pages. RX buffer 44 * size beyond this value gets rejected by __alloc_page() call. 45 */ 46 #define MAX_RX_BUFFERS_PER_QUEUE 8192 47 #define DEF_RX_BUFFERS_PER_QUEUE 1024 48 #define MIN_RX_BUFFERS_PER_QUEUE 128 49 50 /* This max value for TX buffers is derived as the maximum allocatable 51 * pages supported on host per guest through testing. TX buffer size beyond 52 * this value is rejected by the hardware. 53 */ 54 #define MAX_TX_BUFFERS_PER_QUEUE 16384 55 #define DEF_TX_BUFFERS_PER_QUEUE 256 56 #define MIN_TX_BUFFERS_PER_QUEUE 128 57 58 #define EQ_SIZE (8 * MANA_PAGE_SIZE) 59 60 #define LOG2_EQ_THROTTLE 3 61 62 #define MAX_PORTS_IN_MANA_DEV 256 63 64 /* Maximum number of packets per coalesced CQE */ 65 #define MANA_RXCOMP_OOB_NUM_PPI 4 66 67 /* Update this count whenever the respective structures are changed */ 68 #define MANA_STATS_RX_COUNT (6 + MANA_RXCOMP_OOB_NUM_PPI - 1) 69 #define MANA_STATS_TX_COUNT 11 70 71 #define MANA_RX_FRAG_ALIGNMENT 64 72 73 struct mana_stats_rx { 74 u64 packets; 75 u64 bytes; 76 u64 xdp_drop; 77 u64 xdp_tx; 78 u64 xdp_redirect; 79 u64 pkt_len0_err; 80 u64 coalesced_cqe[MANA_RXCOMP_OOB_NUM_PPI - 1]; 81 struct u64_stats_sync syncp; 82 }; 83 84 struct mana_stats_tx { 85 u64 packets; 86 u64 bytes; 87 u64 xdp_xmit; 88 u64 tso_packets; 89 u64 tso_bytes; 90 u64 tso_inner_packets; 91 u64 tso_inner_bytes; 92 u64 short_pkt_fmt; 93 u64 long_pkt_fmt; 94 u64 csum_partial; 95 u64 mana_map_err; 96 struct u64_stats_sync syncp; 97 }; 98 99 struct mana_txq { 100 struct gdma_queue *gdma_sq; 101 102 union { 103 u32 gdma_txq_id; 104 struct { 105 u32 reserved1 : 10; 106 u32 vsq_frame : 14; 107 u32 reserved2 : 8; 108 }; 109 }; 110 111 u16 vp_offset; 112 113 struct net_device *ndev; 114 115 /* The SKBs are sent to the HW and we are waiting for the CQEs. */ 116 struct sk_buff_head pending_skbs; 117 struct netdev_queue *net_txq; 118 119 atomic_t pending_sends; 120 121 bool napi_initialized; 122 123 struct mana_stats_tx stats; 124 }; 125 126 /* skb data and frags dma mappings */ 127 struct mana_skb_head { 128 /* GSO pkts may have 2 SGEs for the linear part*/ 129 dma_addr_t dma_handle[MAX_SKB_FRAGS + 2]; 130 131 u32 size[MAX_SKB_FRAGS + 2]; 132 }; 133 134 #define MANA_HEADROOM sizeof(struct mana_skb_head) 135 136 enum mana_tx_pkt_format { 137 MANA_SHORT_PKT_FMT = 0, 138 MANA_LONG_PKT_FMT = 1, 139 }; 140 141 struct mana_tx_short_oob { 142 u32 pkt_fmt : 2; 143 u32 is_outer_ipv4 : 1; 144 u32 is_outer_ipv6 : 1; 145 u32 comp_iphdr_csum : 1; 146 u32 comp_tcp_csum : 1; 147 u32 comp_udp_csum : 1; 148 u32 supress_txcqe_gen : 1; 149 u32 vcq_num : 24; 150 151 u32 trans_off : 10; /* Transport header offset */ 152 u32 vsq_frame : 14; 153 u32 short_vp_offset : 8; 154 }; /* HW DATA */ 155 156 struct mana_tx_long_oob { 157 u32 is_encap : 1; 158 u32 inner_is_ipv6 : 1; 159 u32 inner_tcp_opt : 1; 160 u32 inject_vlan_pri_tag : 1; 161 u32 reserved1 : 12; 162 u32 pcp : 3; /* 802.1Q */ 163 u32 dei : 1; /* 802.1Q */ 164 u32 vlan_id : 12; /* 802.1Q */ 165 166 u32 inner_frame_offset : 10; 167 u32 inner_ip_rel_offset : 6; 168 u32 long_vp_offset : 12; 169 u32 reserved2 : 4; 170 171 u32 reserved3; 172 u32 reserved4; 173 }; /* HW DATA */ 174 175 struct mana_tx_oob { 176 struct mana_tx_short_oob s_oob; 177 struct mana_tx_long_oob l_oob; 178 }; /* HW DATA */ 179 180 enum mana_cq_type { 181 MANA_CQ_TYPE_RX, 182 MANA_CQ_TYPE_TX, 183 }; 184 185 enum mana_cqe_type { 186 CQE_INVALID = 0, 187 CQE_RX_OKAY = 1, 188 CQE_RX_COALESCED_4 = 2, 189 CQE_RX_OBJECT_FENCE = 3, 190 CQE_RX_TRUNCATED = 4, 191 192 CQE_TX_OKAY = 32, 193 CQE_TX_SA_DROP = 33, 194 CQE_TX_MTU_DROP = 34, 195 CQE_TX_INVALID_OOB = 35, 196 CQE_TX_INVALID_ETH_TYPE = 36, 197 CQE_TX_HDR_PROCESSING_ERROR = 37, 198 CQE_TX_VF_DISABLED = 38, 199 CQE_TX_VPORT_IDX_OUT_OF_RANGE = 39, 200 CQE_TX_VPORT_DISABLED = 40, 201 CQE_TX_VLAN_TAGGING_VIOLATION = 41, 202 }; 203 204 #define MANA_CQE_COMPLETION 1 205 206 struct mana_cqe_header { 207 u32 cqe_type : 6; 208 u32 client_type : 2; 209 u32 vendor_err : 24; 210 }; /* HW DATA */ 211 212 /* NDIS HASH Types */ 213 #define NDIS_HASH_IPV4 BIT(0) 214 #define NDIS_HASH_TCP_IPV4 BIT(1) 215 #define NDIS_HASH_UDP_IPV4 BIT(2) 216 #define NDIS_HASH_IPV6 BIT(3) 217 #define NDIS_HASH_TCP_IPV6 BIT(4) 218 #define NDIS_HASH_UDP_IPV6 BIT(5) 219 #define NDIS_HASH_IPV6_EX BIT(6) 220 #define NDIS_HASH_TCP_IPV6_EX BIT(7) 221 #define NDIS_HASH_UDP_IPV6_EX BIT(8) 222 223 #define MANA_HASH_L3 (NDIS_HASH_IPV4 | NDIS_HASH_IPV6 | NDIS_HASH_IPV6_EX) 224 #define MANA_HASH_L4 \ 225 (NDIS_HASH_TCP_IPV4 | NDIS_HASH_UDP_IPV4 | NDIS_HASH_TCP_IPV6 | \ 226 NDIS_HASH_UDP_IPV6 | NDIS_HASH_TCP_IPV6_EX | NDIS_HASH_UDP_IPV6_EX) 227 228 struct mana_rxcomp_perpkt_info { 229 u32 pkt_len : 16; 230 u32 reserved1 : 16; 231 u32 reserved2; 232 u32 pkt_hash; 233 }; /* HW DATA */ 234 235 /* Receive completion OOB */ 236 struct mana_rxcomp_oob { 237 struct mana_cqe_header cqe_hdr; 238 239 u32 rx_vlan_id : 12; 240 u32 rx_vlantag_present : 1; 241 u32 rx_outer_iphdr_csum_succeed : 1; 242 u32 rx_outer_iphdr_csum_fail : 1; 243 u32 reserved1 : 1; 244 u32 rx_hashtype : 9; 245 u32 rx_iphdr_csum_succeed : 1; 246 u32 rx_iphdr_csum_fail : 1; 247 u32 rx_tcp_csum_succeed : 1; 248 u32 rx_tcp_csum_fail : 1; 249 u32 rx_udp_csum_succeed : 1; 250 u32 rx_udp_csum_fail : 1; 251 u32 reserved2 : 1; 252 253 struct mana_rxcomp_perpkt_info ppi[MANA_RXCOMP_OOB_NUM_PPI]; 254 255 u32 rx_wqe_offset; 256 }; /* HW DATA */ 257 258 struct mana_tx_comp_oob { 259 struct mana_cqe_header cqe_hdr; 260 261 u32 tx_data_offset; 262 263 u32 tx_sgl_offset : 5; 264 u32 tx_wqe_offset : 27; 265 266 u32 reserved[12]; 267 }; /* HW DATA */ 268 269 struct mana_rxq; 270 271 #define CQE_POLLING_BUFFER 512 272 273 struct mana_cq { 274 struct gdma_queue *gdma_cq; 275 276 /* Cache the CQ id (used to verify if each CQE comes to the right CQ. */ 277 u32 gdma_id; 278 279 /* Type of the CQ: TX or RX */ 280 enum mana_cq_type type; 281 282 /* Pointer to the mana_rxq that is pushing RX CQEs to the queue. 283 * Only and must be non-NULL if type is MANA_CQ_TYPE_RX. 284 */ 285 struct mana_rxq *rxq; 286 287 /* Pointer to the mana_txq that is pushing TX CQEs to the queue. 288 * Only and must be non-NULL if type is MANA_CQ_TYPE_TX. 289 */ 290 struct mana_txq *txq; 291 292 /* Buffer which the CQ handler can copy the CQE's into. */ 293 struct gdma_comp gdma_comp_buf[CQE_POLLING_BUFFER]; 294 295 /* NAPI data */ 296 struct napi_struct napi; 297 int work_done; 298 int work_done_since_doorbell; 299 int budget; 300 }; 301 302 struct mana_recv_buf_oob { 303 /* A valid GDMA work request representing the data buffer. */ 304 struct gdma_wqe_request wqe_req; 305 306 void *buf_va; 307 bool from_pool; /* allocated from a page pool */ 308 309 /* SGL of the buffer going to be sent as part of the work request. */ 310 u32 num_sge; 311 struct gdma_sge sgl[MAX_RX_WQE_SGL_ENTRIES]; 312 313 /* Required to store the result of mana_gd_post_work_request. 314 * gdma_posted_wqe_info.wqe_size_in_bu is required for progressing the 315 * work queue when the WQE is consumed. 316 */ 317 struct gdma_posted_wqe_info wqe_inf; 318 }; 319 320 #define MANA_RXBUF_PAD (SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) \ 321 + ETH_HLEN) 322 323 #define MANA_XDP_MTU_MAX (PAGE_SIZE - MANA_RXBUF_PAD - XDP_PACKET_HEADROOM) 324 325 struct mana_rxq { 326 struct gdma_queue *gdma_rq; 327 /* Cache the gdma receive queue id */ 328 u32 gdma_id; 329 330 /* Index of RQ in the vPort, not gdma receive queue id */ 331 u32 rxq_idx; 332 333 u32 datasize; 334 u32 alloc_size; 335 u32 headroom; 336 u32 frag_count; 337 338 mana_handle_t rxobj; 339 340 struct mana_cq rx_cq; 341 342 struct completion fence_event; 343 344 struct net_device *ndev; 345 346 /* Total number of receive buffers to be allocated */ 347 u32 num_rx_buf; 348 349 u32 buf_index; 350 351 struct mana_stats_rx stats; 352 353 struct bpf_prog __rcu *bpf_prog; 354 struct xdp_rxq_info xdp_rxq; 355 void *xdp_save_va; /* for reusing */ 356 bool xdp_flush; 357 int xdp_rc; /* XDP redirect return code */ 358 359 struct page_pool *page_pool; 360 struct dentry *mana_rx_debugfs; 361 362 /* MUST BE THE LAST MEMBER: 363 * Each receive buffer has an associated mana_recv_buf_oob. 364 */ 365 struct mana_recv_buf_oob rx_oobs[] __counted_by(num_rx_buf); 366 }; 367 368 struct mana_tx_qp { 369 struct mana_txq txq; 370 371 struct mana_cq tx_cq; 372 373 mana_handle_t tx_object; 374 375 struct dentry *mana_tx_debugfs; 376 }; 377 378 struct mana_ethtool_stats { 379 u64 stop_queue; 380 u64 wake_queue; 381 u64 tx_cqe_err; 382 u64 tx_cqe_unknown_type; 383 u64 tx_linear_pkt_cnt; 384 u64 rx_cqe_unknown_type; 385 }; 386 387 struct mana_ethtool_hc_stats { 388 u64 hc_rx_discards_no_wqe; 389 u64 hc_rx_err_vport_disabled; 390 u64 hc_rx_bytes; 391 u64 hc_rx_ucast_pkts; 392 u64 hc_rx_ucast_bytes; 393 u64 hc_rx_bcast_pkts; 394 u64 hc_rx_bcast_bytes; 395 u64 hc_rx_mcast_pkts; 396 u64 hc_rx_mcast_bytes; 397 u64 hc_tx_err_gf_disabled; 398 u64 hc_tx_err_vport_disabled; 399 u64 hc_tx_err_inval_vportoffset_pkt; 400 u64 hc_tx_err_vlan_enforcement; 401 u64 hc_tx_err_eth_type_enforcement; 402 u64 hc_tx_err_sa_enforcement; 403 u64 hc_tx_err_sqpdid_enforcement; 404 u64 hc_tx_err_cqpdid_enforcement; 405 u64 hc_tx_err_mtu_violation; 406 u64 hc_tx_err_inval_oob; 407 u64 hc_tx_bytes; 408 u64 hc_tx_ucast_pkts; 409 u64 hc_tx_ucast_bytes; 410 u64 hc_tx_bcast_pkts; 411 u64 hc_tx_bcast_bytes; 412 u64 hc_tx_mcast_pkts; 413 u64 hc_tx_mcast_bytes; 414 u64 hc_tx_err_gdma; 415 }; 416 417 struct mana_ethtool_phy_stats { 418 /* Drop Counters */ 419 u64 rx_pkt_drop_phy; 420 u64 tx_pkt_drop_phy; 421 422 /* Per TC traffic Counters */ 423 u64 rx_pkt_tc0_phy; 424 u64 tx_pkt_tc0_phy; 425 u64 rx_pkt_tc1_phy; 426 u64 tx_pkt_tc1_phy; 427 u64 rx_pkt_tc2_phy; 428 u64 tx_pkt_tc2_phy; 429 u64 rx_pkt_tc3_phy; 430 u64 tx_pkt_tc3_phy; 431 u64 rx_pkt_tc4_phy; 432 u64 tx_pkt_tc4_phy; 433 u64 rx_pkt_tc5_phy; 434 u64 tx_pkt_tc5_phy; 435 u64 rx_pkt_tc6_phy; 436 u64 tx_pkt_tc6_phy; 437 u64 rx_pkt_tc7_phy; 438 u64 tx_pkt_tc7_phy; 439 440 u64 rx_byte_tc0_phy; 441 u64 tx_byte_tc0_phy; 442 u64 rx_byte_tc1_phy; 443 u64 tx_byte_tc1_phy; 444 u64 rx_byte_tc2_phy; 445 u64 tx_byte_tc2_phy; 446 u64 rx_byte_tc3_phy; 447 u64 tx_byte_tc3_phy; 448 u64 rx_byte_tc4_phy; 449 u64 tx_byte_tc4_phy; 450 u64 rx_byte_tc5_phy; 451 u64 tx_byte_tc5_phy; 452 u64 rx_byte_tc6_phy; 453 u64 tx_byte_tc6_phy; 454 u64 rx_byte_tc7_phy; 455 u64 tx_byte_tc7_phy; 456 457 /* Per TC pause Counters */ 458 u64 rx_pause_tc0_phy; 459 u64 tx_pause_tc0_phy; 460 u64 rx_pause_tc1_phy; 461 u64 tx_pause_tc1_phy; 462 u64 rx_pause_tc2_phy; 463 u64 tx_pause_tc2_phy; 464 u64 rx_pause_tc3_phy; 465 u64 tx_pause_tc3_phy; 466 u64 rx_pause_tc4_phy; 467 u64 tx_pause_tc4_phy; 468 u64 rx_pause_tc5_phy; 469 u64 tx_pause_tc5_phy; 470 u64 rx_pause_tc6_phy; 471 u64 tx_pause_tc6_phy; 472 u64 rx_pause_tc7_phy; 473 u64 tx_pause_tc7_phy; 474 }; 475 476 struct mana_context { 477 struct gdma_dev *gdma_dev; 478 479 u16 num_ports; 480 u8 bm_hostmode; 481 482 struct mana_ethtool_hc_stats hc_stats; 483 struct mana_eq *eqs; 484 struct dentry *mana_eqs_debugfs; 485 struct workqueue_struct *per_port_queue_reset_wq; 486 /* Workqueue for querying hardware stats */ 487 struct delayed_work gf_stats_work; 488 bool hwc_timeout_occurred; 489 490 struct net_device *ports[MAX_PORTS_IN_MANA_DEV]; 491 492 /* Link state change work */ 493 struct work_struct link_change_work; 494 u32 link_event; 495 }; 496 497 struct mana_port_context { 498 struct mana_context *ac; 499 struct net_device *ndev; 500 struct work_struct queue_reset_work; 501 502 u8 mac_addr[ETH_ALEN]; 503 504 enum TRI_STATE rss_state; 505 506 mana_handle_t default_rxobj; 507 bool tx_shortform_allowed; 508 u16 tx_vp_offset; 509 510 struct mana_tx_qp *tx_qp; 511 512 /* Indirection Table for RX & TX. The values are queue indexes */ 513 u32 *indir_table; 514 u32 indir_table_sz; 515 516 /* Indirection table containing RxObject Handles */ 517 mana_handle_t *rxobj_table; 518 519 /* Hash key used by the NIC */ 520 u8 hashkey[MANA_HASH_KEY_SIZE]; 521 522 /* This points to an array of num_queues of RQ pointers. */ 523 struct mana_rxq **rxqs; 524 525 /* pre-allocated rx buffer array */ 526 void **rxbufs_pre; 527 dma_addr_t *das_pre; 528 int rxbpre_total; 529 u32 rxbpre_datasize; 530 u32 rxbpre_alloc_size; 531 u32 rxbpre_headroom; 532 u32 rxbpre_frag_count; 533 534 struct bpf_prog *bpf_prog; 535 536 /* Create num_queues EQs, SQs, SQ-CQs, RQs and RQ-CQs, respectively. */ 537 unsigned int max_queues; 538 unsigned int num_queues; 539 540 unsigned int rx_queue_size; 541 unsigned int tx_queue_size; 542 543 mana_handle_t port_handle; 544 mana_handle_t pf_filter_handle; 545 546 /* Mutex for sharing access to vport_use_count */ 547 struct mutex vport_mutex; 548 int vport_use_count; 549 550 /* Net shaper handle*/ 551 struct net_shaper_handle handle; 552 553 u16 port_idx; 554 /* Currently configured speed (mbps) */ 555 u32 speed; 556 /* Maximum speed supported by the SKU (mbps) */ 557 u32 max_speed; 558 559 bool port_is_up; 560 bool port_st_save; /* Saved port state */ 561 562 u8 cqe_coalescing_enable; 563 u32 cqe_coalescing_timeout_ns; 564 565 struct mana_ethtool_stats eth_stats; 566 567 struct mana_ethtool_phy_stats phy_stats; 568 569 /* Debugfs */ 570 struct dentry *mana_port_debugfs; 571 }; 572 573 netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev); 574 int mana_config_rss(struct mana_port_context *ac, enum TRI_STATE rx, 575 bool update_hash, bool update_tab); 576 577 int mana_alloc_queues(struct net_device *ndev); 578 int mana_attach(struct net_device *ndev); 579 int mana_detach(struct net_device *ndev, bool from_close); 580 581 int mana_probe(struct gdma_dev *gd, bool resuming); 582 void mana_remove(struct gdma_dev *gd, bool suspending); 583 584 int mana_rdma_probe(struct gdma_dev *gd); 585 void mana_rdma_remove(struct gdma_dev *gd); 586 587 void mana_xdp_tx(struct sk_buff *skb, struct net_device *ndev); 588 int mana_xdp_xmit(struct net_device *ndev, int n, struct xdp_frame **frames, 589 u32 flags); 590 u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq, 591 struct xdp_buff *xdp, void *buf_va, uint pkt_len); 592 struct bpf_prog *mana_xdp_get(struct mana_port_context *apc); 593 void mana_chn_setxdp(struct mana_port_context *apc, struct bpf_prog *prog); 594 int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf); 595 int mana_query_gf_stats(struct mana_context *ac); 596 int mana_query_link_cfg(struct mana_port_context *apc); 597 int mana_set_bw_clamp(struct mana_port_context *apc, u32 speed, 598 int enable_clamping); 599 void mana_query_phy_stats(struct mana_port_context *apc); 600 int mana_pre_alloc_rxbufs(struct mana_port_context *apc, int mtu, int num_queues); 601 void mana_pre_dealloc_rxbufs(struct mana_port_context *apc); 602 void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc); 603 604 extern const struct ethtool_ops mana_ethtool_ops; 605 extern struct dentry *mana_debugfs_root; 606 607 /* A CQ can be created not associated with any EQ */ 608 #define GDMA_CQ_NO_EQ 0xffff 609 610 struct mana_obj_spec { 611 u32 queue_index; 612 u64 gdma_region; 613 u32 queue_size; 614 u32 attached_eq; 615 u32 modr_ctx_id; 616 }; 617 618 enum mana_command_code { 619 MANA_QUERY_DEV_CONFIG = 0x20001, 620 MANA_QUERY_GF_STAT = 0x20002, 621 MANA_CONFIG_VPORT_TX = 0x20003, 622 MANA_CREATE_WQ_OBJ = 0x20004, 623 MANA_DESTROY_WQ_OBJ = 0x20005, 624 MANA_FENCE_RQ = 0x20006, 625 MANA_CONFIG_VPORT_RX = 0x20007, 626 MANA_QUERY_VPORT_CONFIG = 0x20008, 627 MANA_QUERY_LINK_CONFIG = 0x2000A, 628 MANA_SET_BW_CLAMP = 0x2000B, 629 MANA_QUERY_PHY_STAT = 0x2000c, 630 631 /* Privileged commands for the PF mode */ 632 MANA_REGISTER_FILTER = 0x28000, 633 MANA_DEREGISTER_FILTER = 0x28001, 634 MANA_REGISTER_HW_PORT = 0x28003, 635 MANA_DEREGISTER_HW_PORT = 0x28004, 636 }; 637 638 /* Query Link Configuration*/ 639 struct mana_query_link_config_req { 640 struct gdma_req_hdr hdr; 641 mana_handle_t vport; 642 }; /* HW DATA */ 643 644 struct mana_query_link_config_resp { 645 struct gdma_resp_hdr hdr; 646 u32 qos_speed_mbps; 647 u8 qos_unconfigured; 648 u8 reserved1[3]; 649 u32 link_speed_mbps; 650 u8 reserved2[4]; 651 }; /* HW DATA */ 652 653 /* Set Bandwidth Clamp*/ 654 struct mana_set_bw_clamp_req { 655 struct gdma_req_hdr hdr; 656 mana_handle_t vport; 657 enum TRI_STATE enable_clamping; 658 u32 link_speed_mbps; 659 }; /* HW DATA */ 660 661 struct mana_set_bw_clamp_resp { 662 struct gdma_resp_hdr hdr; 663 u8 qos_unconfigured; 664 u8 reserved[7]; 665 }; /* HW DATA */ 666 667 /* Query Device Configuration */ 668 struct mana_query_device_cfg_req { 669 struct gdma_req_hdr hdr; 670 671 /* MANA Nic Driver Capability flags */ 672 u64 mn_drv_cap_flags1; 673 u64 mn_drv_cap_flags2; 674 u64 mn_drv_cap_flags3; 675 u64 mn_drv_cap_flags4; 676 677 u32 proto_major_ver; 678 u32 proto_minor_ver; 679 u32 proto_micro_ver; 680 681 u32 reserved; 682 }; /* HW DATA */ 683 684 struct mana_query_device_cfg_resp { 685 struct gdma_resp_hdr hdr; 686 687 u64 pf_cap_flags1; 688 u64 pf_cap_flags2; 689 u64 pf_cap_flags3; 690 u64 pf_cap_flags4; 691 692 u16 max_num_vports; 693 u8 bm_hostmode; /* response v3: Bare Metal Host Mode */ 694 u8 reserved; 695 u32 max_num_eqs; 696 697 /* response v2: */ 698 u16 adapter_mtu; 699 u16 reserved2; 700 u32 reserved3; 701 }; /* HW DATA */ 702 703 /* Query vPort Configuration */ 704 struct mana_query_vport_cfg_req { 705 struct gdma_req_hdr hdr; 706 u32 vport_index; 707 }; /* HW DATA */ 708 709 struct mana_query_vport_cfg_resp { 710 struct gdma_resp_hdr hdr; 711 u32 max_num_sq; 712 u32 max_num_rq; 713 u32 num_indirection_ent; 714 u32 reserved1; 715 u8 mac_addr[6]; 716 u8 reserved2[2]; 717 mana_handle_t vport; 718 }; /* HW DATA */ 719 720 /* Configure vPort */ 721 struct mana_config_vport_req { 722 struct gdma_req_hdr hdr; 723 mana_handle_t vport; 724 u32 pdid; 725 u32 doorbell_pageid; 726 }; /* HW DATA */ 727 728 struct mana_config_vport_resp { 729 struct gdma_resp_hdr hdr; 730 u16 tx_vport_offset; 731 u8 short_form_allowed; 732 u8 reserved; 733 }; /* HW DATA */ 734 735 /* Create WQ Object */ 736 struct mana_create_wqobj_req { 737 struct gdma_req_hdr hdr; 738 mana_handle_t vport; 739 u32 wq_type; 740 u32 reserved; 741 u64 wq_gdma_region; 742 u64 cq_gdma_region; 743 u32 wq_size; 744 u32 cq_size; 745 u32 cq_moderation_ctx_id; 746 u32 cq_parent_qid; 747 }; /* HW DATA */ 748 749 struct mana_create_wqobj_resp { 750 struct gdma_resp_hdr hdr; 751 u32 wq_id; 752 u32 cq_id; 753 mana_handle_t wq_obj; 754 }; /* HW DATA */ 755 756 /* Destroy WQ Object */ 757 struct mana_destroy_wqobj_req { 758 struct gdma_req_hdr hdr; 759 u32 wq_type; 760 u32 reserved; 761 mana_handle_t wq_obj_handle; 762 }; /* HW DATA */ 763 764 struct mana_destroy_wqobj_resp { 765 struct gdma_resp_hdr hdr; 766 }; /* HW DATA */ 767 768 /* Fence RQ */ 769 struct mana_fence_rq_req { 770 struct gdma_req_hdr hdr; 771 mana_handle_t wq_obj_handle; 772 }; /* HW DATA */ 773 774 struct mana_fence_rq_resp { 775 struct gdma_resp_hdr hdr; 776 }; /* HW DATA */ 777 778 /* Query stats RQ */ 779 struct mana_query_gf_stat_req { 780 struct gdma_req_hdr hdr; 781 u64 req_stats; 782 }; /* HW DATA */ 783 784 struct mana_query_gf_stat_resp { 785 struct gdma_resp_hdr hdr; 786 u64 reported_stats; 787 /* rx errors/discards */ 788 u64 rx_discards_nowqe; 789 u64 rx_err_vport_disabled; 790 /* rx bytes/packets */ 791 u64 hc_rx_bytes; 792 u64 hc_rx_ucast_pkts; 793 u64 hc_rx_ucast_bytes; 794 u64 hc_rx_bcast_pkts; 795 u64 hc_rx_bcast_bytes; 796 u64 hc_rx_mcast_pkts; 797 u64 hc_rx_mcast_bytes; 798 /* tx errors */ 799 u64 tx_err_gf_disabled; 800 u64 tx_err_vport_disabled; 801 u64 tx_err_inval_vport_offset_pkt; 802 u64 tx_err_vlan_enforcement; 803 u64 tx_err_ethtype_enforcement; 804 u64 tx_err_SA_enforcement; 805 u64 tx_err_SQPDID_enforcement; 806 u64 tx_err_CQPDID_enforcement; 807 u64 tx_err_mtu_violation; 808 u64 tx_err_inval_oob; 809 /* tx bytes/packets */ 810 u64 hc_tx_bytes; 811 u64 hc_tx_ucast_pkts; 812 u64 hc_tx_ucast_bytes; 813 u64 hc_tx_bcast_pkts; 814 u64 hc_tx_bcast_bytes; 815 u64 hc_tx_mcast_pkts; 816 u64 hc_tx_mcast_bytes; 817 /* tx error */ 818 u64 tx_err_gdma; 819 }; /* HW DATA */ 820 821 /* Query phy stats */ 822 struct mana_query_phy_stat_req { 823 struct gdma_req_hdr hdr; 824 u64 req_stats; 825 }; /* HW DATA */ 826 827 struct mana_query_phy_stat_resp { 828 struct gdma_resp_hdr hdr; 829 u64 reported_stats; 830 831 /* Aggregate Drop Counters */ 832 u64 rx_pkt_drop_phy; 833 u64 tx_pkt_drop_phy; 834 835 /* Per TC(Traffic class) traffic Counters */ 836 u64 rx_pkt_tc0_phy; 837 u64 tx_pkt_tc0_phy; 838 u64 rx_pkt_tc1_phy; 839 u64 tx_pkt_tc1_phy; 840 u64 rx_pkt_tc2_phy; 841 u64 tx_pkt_tc2_phy; 842 u64 rx_pkt_tc3_phy; 843 u64 tx_pkt_tc3_phy; 844 u64 rx_pkt_tc4_phy; 845 u64 tx_pkt_tc4_phy; 846 u64 rx_pkt_tc5_phy; 847 u64 tx_pkt_tc5_phy; 848 u64 rx_pkt_tc6_phy; 849 u64 tx_pkt_tc6_phy; 850 u64 rx_pkt_tc7_phy; 851 u64 tx_pkt_tc7_phy; 852 853 u64 rx_byte_tc0_phy; 854 u64 tx_byte_tc0_phy; 855 u64 rx_byte_tc1_phy; 856 u64 tx_byte_tc1_phy; 857 u64 rx_byte_tc2_phy; 858 u64 tx_byte_tc2_phy; 859 u64 rx_byte_tc3_phy; 860 u64 tx_byte_tc3_phy; 861 u64 rx_byte_tc4_phy; 862 u64 tx_byte_tc4_phy; 863 u64 rx_byte_tc5_phy; 864 u64 tx_byte_tc5_phy; 865 u64 rx_byte_tc6_phy; 866 u64 tx_byte_tc6_phy; 867 u64 rx_byte_tc7_phy; 868 u64 tx_byte_tc7_phy; 869 870 /* Per TC(Traffic Class) pause Counters */ 871 u64 rx_pause_tc0_phy; 872 u64 tx_pause_tc0_phy; 873 u64 rx_pause_tc1_phy; 874 u64 tx_pause_tc1_phy; 875 u64 rx_pause_tc2_phy; 876 u64 tx_pause_tc2_phy; 877 u64 rx_pause_tc3_phy; 878 u64 tx_pause_tc3_phy; 879 u64 rx_pause_tc4_phy; 880 u64 tx_pause_tc4_phy; 881 u64 rx_pause_tc5_phy; 882 u64 tx_pause_tc5_phy; 883 u64 rx_pause_tc6_phy; 884 u64 tx_pause_tc6_phy; 885 u64 rx_pause_tc7_phy; 886 u64 tx_pause_tc7_phy; 887 }; /* HW DATA */ 888 889 /* Configure vPort Rx Steering */ 890 struct mana_cfg_rx_steer_req_v2 { 891 struct gdma_req_hdr hdr; 892 mana_handle_t vport; 893 u16 num_indir_entries; 894 u16 indir_tab_offset; 895 u32 rx_enable; 896 u32 rss_enable; 897 u8 update_default_rxobj; 898 u8 update_hashkey; 899 u8 update_indir_tab; 900 u8 reserved; 901 mana_handle_t default_rxobj; 902 u8 hashkey[MANA_HASH_KEY_SIZE]; 903 u8 cqe_coalescing_enable; 904 u8 reserved2[7]; 905 mana_handle_t indir_tab[] __counted_by(num_indir_entries); 906 }; /* HW DATA */ 907 908 struct mana_cfg_rx_steer_resp { 909 struct gdma_resp_hdr hdr; 910 911 /* V2 */ 912 u32 cqe_coalescing_timeout_ns; 913 u32 reserved1; 914 }; /* HW DATA */ 915 916 /* Register HW vPort */ 917 struct mana_register_hw_vport_req { 918 struct gdma_req_hdr hdr; 919 u16 attached_gfid; 920 u8 is_pf_default_vport; 921 u8 reserved1; 922 u8 allow_all_ether_types; 923 u8 reserved2; 924 u8 reserved3; 925 u8 reserved4; 926 }; /* HW DATA */ 927 928 struct mana_register_hw_vport_resp { 929 struct gdma_resp_hdr hdr; 930 mana_handle_t hw_vport_handle; 931 }; /* HW DATA */ 932 933 /* Deregister HW vPort */ 934 struct mana_deregister_hw_vport_req { 935 struct gdma_req_hdr hdr; 936 mana_handle_t hw_vport_handle; 937 }; /* HW DATA */ 938 939 struct mana_deregister_hw_vport_resp { 940 struct gdma_resp_hdr hdr; 941 }; /* HW DATA */ 942 943 /* Register filter */ 944 struct mana_register_filter_req { 945 struct gdma_req_hdr hdr; 946 mana_handle_t vport; 947 u8 mac_addr[6]; 948 u8 reserved1; 949 u8 reserved2; 950 u8 reserved3; 951 u8 reserved4; 952 u16 reserved5; 953 u32 reserved6; 954 u32 reserved7; 955 u32 reserved8; 956 }; /* HW DATA */ 957 958 struct mana_register_filter_resp { 959 struct gdma_resp_hdr hdr; 960 mana_handle_t filter_handle; 961 }; /* HW DATA */ 962 963 /* Deregister filter */ 964 struct mana_deregister_filter_req { 965 struct gdma_req_hdr hdr; 966 mana_handle_t filter_handle; 967 }; /* HW DATA */ 968 969 struct mana_deregister_filter_resp { 970 struct gdma_resp_hdr hdr; 971 }; /* HW DATA */ 972 973 /* Requested GF stats Flags */ 974 /* Rx discards/Errors */ 975 #define STATISTICS_FLAGS_RX_DISCARDS_NO_WQE 0x0000000000000001 976 #define STATISTICS_FLAGS_RX_ERRORS_VPORT_DISABLED 0x0000000000000002 977 /* Rx bytes/pkts */ 978 #define STATISTICS_FLAGS_HC_RX_BYTES 0x0000000000000004 979 #define STATISTICS_FLAGS_HC_RX_UCAST_PACKETS 0x0000000000000008 980 #define STATISTICS_FLAGS_HC_RX_UCAST_BYTES 0x0000000000000010 981 #define STATISTICS_FLAGS_HC_RX_MCAST_PACKETS 0x0000000000000020 982 #define STATISTICS_FLAGS_HC_RX_MCAST_BYTES 0x0000000000000040 983 #define STATISTICS_FLAGS_HC_RX_BCAST_PACKETS 0x0000000000000080 984 #define STATISTICS_FLAGS_HC_RX_BCAST_BYTES 0x0000000000000100 985 /* Tx errors */ 986 #define STATISTICS_FLAGS_TX_ERRORS_GF_DISABLED 0x0000000000000200 987 #define STATISTICS_FLAGS_TX_ERRORS_VPORT_DISABLED 0x0000000000000400 988 #define STATISTICS_FLAGS_TX_ERRORS_INVAL_VPORT_OFFSET_PACKETS \ 989 0x0000000000000800 990 #define STATISTICS_FLAGS_TX_ERRORS_VLAN_ENFORCEMENT 0x0000000000001000 991 #define STATISTICS_FLAGS_TX_ERRORS_ETH_TYPE_ENFORCEMENT \ 992 0x0000000000002000 993 #define STATISTICS_FLAGS_TX_ERRORS_SA_ENFORCEMENT 0x0000000000004000 994 #define STATISTICS_FLAGS_TX_ERRORS_SQPDID_ENFORCEMENT 0x0000000000008000 995 #define STATISTICS_FLAGS_TX_ERRORS_CQPDID_ENFORCEMENT 0x0000000000010000 996 #define STATISTICS_FLAGS_TX_ERRORS_MTU_VIOLATION 0x0000000000020000 997 #define STATISTICS_FLAGS_TX_ERRORS_INVALID_OOB 0x0000000000040000 998 /* Tx bytes/pkts */ 999 #define STATISTICS_FLAGS_HC_TX_BYTES 0x0000000000080000 1000 #define STATISTICS_FLAGS_HC_TX_UCAST_PACKETS 0x0000000000100000 1001 #define STATISTICS_FLAGS_HC_TX_UCAST_BYTES 0x0000000000200000 1002 #define STATISTICS_FLAGS_HC_TX_MCAST_PACKETS 0x0000000000400000 1003 #define STATISTICS_FLAGS_HC_TX_MCAST_BYTES 0x0000000000800000 1004 #define STATISTICS_FLAGS_HC_TX_BCAST_PACKETS 0x0000000001000000 1005 #define STATISTICS_FLAGS_HC_TX_BCAST_BYTES 0x0000000002000000 1006 /* Tx error */ 1007 #define STATISTICS_FLAGS_TX_ERRORS_GDMA_ERROR 0x0000000004000000 1008 1009 #define MANA_MAX_NUM_QUEUES 64 1010 #define MANA_DEF_NUM_QUEUES 16 1011 1012 #define MANA_SHORT_VPORT_OFFSET_MAX ((1U << 8) - 1) 1013 1014 struct mana_tx_package { 1015 struct gdma_wqe_request wqe_req; 1016 struct gdma_sge sgl_array[5]; 1017 struct gdma_sge *sgl_ptr; 1018 1019 struct mana_tx_oob tx_oob; 1020 1021 struct gdma_posted_wqe_info wqe_info; 1022 }; 1023 1024 int mana_create_wq_obj(struct mana_port_context *apc, 1025 mana_handle_t vport, 1026 u32 wq_type, struct mana_obj_spec *wq_spec, 1027 struct mana_obj_spec *cq_spec, 1028 mana_handle_t *wq_obj); 1029 1030 void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type, 1031 mana_handle_t wq_obj); 1032 1033 int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id, 1034 u32 doorbell_pg_id); 1035 void mana_uncfg_vport(struct mana_port_context *apc); 1036 1037 struct net_device *mana_get_primary_netdev(struct mana_context *ac, 1038 u32 port_index, 1039 netdevice_tracker *tracker); 1040 #endif /* _MANA_H */ 1041