1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* Marvell RVU Ethernet driver 3 * 4 * Copyright (C) 2020 Marvell. 5 * 6 */ 7 8 #ifndef OTX2_COMMON_H 9 #define OTX2_COMMON_H 10 11 #include <linux/ethtool.h> 12 #include <linux/pci.h> 13 #include <linux/iommu.h> 14 #include <linux/net_tstamp.h> 15 #include <linux/ptp_clock_kernel.h> 16 #include <linux/timecounter.h> 17 #include <linux/soc/marvell/octeontx2/asm.h> 18 #include <net/macsec.h> 19 #include <net/pkt_cls.h> 20 #include <net/devlink.h> 21 #include <linux/time64.h> 22 #include <linux/dim.h> 23 #include <uapi/linux/if_macsec.h> 24 #include <net/page_pool/helpers.h> 25 26 #include <mbox.h> 27 #include <npc.h> 28 #include "otx2_reg.h" 29 #include "otx2_txrx.h" 30 #include "otx2_devlink.h" 31 #include <rvu_trace.h> 32 #include "qos.h" 33 #include "rep.h" 34 #include "cn10k_ipsec.h" 35 36 /* IPv4 flag more fragment bit */ 37 #define IPV4_FLAG_MORE 0x20 38 39 /* PCI device IDs */ 40 #define PCI_DEVID_OCTEONTX2_RVU_PF 0xA063 41 #define PCI_DEVID_OCTEONTX2_RVU_VF 0xA064 42 #define PCI_DEVID_OCTEONTX2_RVU_AFVF 0xA0F8 43 44 #define PCI_SUBSYS_DEVID_96XX_RVU_PFVF 0xB200 45 #define PCI_SUBSYS_DEVID_CN10K_A_RVU_PFVF 0xB900 46 #define PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF 0xBD00 47 48 #define PCI_DEVID_OCTEONTX2_SDP_REP 0xA0F7 49 50 /* PCI BAR nos */ 51 #define PCI_CFG_REG_BAR_NUM 2 52 #define PCI_MBOX_BAR_NUM 4 53 54 #define NAME_SIZE 32 55 56 #ifdef CONFIG_DCB 57 /* Max priority supported for PFC */ 58 #define NIX_PF_PFC_PRIO_MAX 8 59 #endif 60 61 /* Number of segments per SG structure */ 62 #define MAX_SEGS_PER_SG 3 63 64 enum arua_mapped_qtypes { 65 AURA_NIX_RQ, 66 AURA_NIX_SQ, 67 }; 68 69 /* NIX LF interrupts range*/ 70 #define NIX_LF_QINT_VEC_START 0x00 71 #define NIX_LF_CINT_VEC_START 0x40 72 #define NIX_LF_GINT_VEC 0x80 73 #define NIX_LF_ERR_VEC 0x81 74 #define NIX_LF_POISON_VEC 0x82 75 76 /* Send skid of 2000 packets required for CQ size of 4K CQEs. */ 77 #define SEND_CQ_SKID 2000 78 79 #define OTX2_GET_RX_STATS(reg) \ 80 otx2_read64(pfvf, NIX_LF_RX_STATX(reg)) 81 #define OTX2_GET_TX_STATS(reg) \ 82 otx2_read64(pfvf, NIX_LF_TX_STATX(reg)) 83 84 struct otx2_lmt_info { 85 u64 lmt_addr; 86 u16 lmt_id; 87 }; 88 /* RSS configuration */ 89 struct otx2_rss_ctx { 90 u8 ind_tbl[MAX_RSS_INDIR_TBL_SIZE]; 91 }; 92 93 struct otx2_rss_info { 94 u8 enable; 95 u32 flowkey_cfg; 96 u16 rss_size; 97 #define RSS_HASH_KEY_SIZE 44 /* 352 bit key */ 98 u8 key[RSS_HASH_KEY_SIZE]; 99 struct otx2_rss_ctx *rss_ctx[MAX_RSS_GROUPS]; 100 }; 101 102 /* NIX (or NPC) RX errors */ 103 enum otx2_errlvl { 104 NPC_ERRLVL_RE, 105 NPC_ERRLVL_LID_LA, 106 NPC_ERRLVL_LID_LB, 107 NPC_ERRLVL_LID_LC, 108 NPC_ERRLVL_LID_LD, 109 NPC_ERRLVL_LID_LE, 110 NPC_ERRLVL_LID_LF, 111 NPC_ERRLVL_LID_LG, 112 NPC_ERRLVL_LID_LH, 113 NPC_ERRLVL_NIX = 0x0F, 114 }; 115 116 enum otx2_errcodes_re { 117 /* NPC_ERRLVL_RE errcodes */ 118 ERRCODE_FCS = 0x7, 119 ERRCODE_FCS_RCV = 0x8, 120 ERRCODE_UNDERSIZE = 0x10, 121 ERRCODE_OVERSIZE = 0x11, 122 ERRCODE_OL2_LEN_MISMATCH = 0x12, 123 /* NPC_ERRLVL_NIX errcodes */ 124 ERRCODE_OL3_LEN = 0x10, 125 ERRCODE_OL4_LEN = 0x11, 126 ERRCODE_OL4_CSUM = 0x12, 127 ERRCODE_IL3_LEN = 0x20, 128 ERRCODE_IL4_LEN = 0x21, 129 ERRCODE_IL4_CSUM = 0x22, 130 }; 131 132 enum otx2_xdp_action { 133 OTX2_XDP_TX = BIT(0), 134 OTX2_XDP_REDIRECT = BIT(1), 135 OTX2_AF_XDP_FRAME = BIT(2), 136 }; 137 138 struct otx2_dev_stats { 139 u64 rx_bytes; 140 u64 rx_frames; 141 u64 rx_ucast_frames; 142 u64 rx_bcast_frames; 143 u64 rx_mcast_frames; 144 u64 rx_drops; 145 146 u64 tx_bytes; 147 u64 tx_frames; 148 u64 tx_ucast_frames; 149 u64 tx_bcast_frames; 150 u64 tx_mcast_frames; 151 u64 tx_drops; 152 }; 153 154 /* Driver counted stats */ 155 struct otx2_drv_stats { 156 atomic_t rx_fcs_errs; 157 atomic_t rx_oversize_errs; 158 atomic_t rx_undersize_errs; 159 atomic_t rx_csum_errs; 160 atomic_t rx_len_errs; 161 atomic_t rx_other_errs; 162 }; 163 164 struct mbox { 165 struct otx2_mbox mbox; 166 struct work_struct mbox_wrk; 167 struct otx2_mbox mbox_up; 168 struct work_struct mbox_up_wrk; 169 struct otx2_nic *pfvf; 170 void *bbuf_base; /* Bounce buffer for mbox memory */ 171 struct mutex lock; /* serialize mailbox access */ 172 int num_msgs; /* mbox number of messages */ 173 int up_num_msgs; /* mbox_up number of messages */ 174 }; 175 176 /* Egress rate limiting definitions */ 177 #define MAX_BURST_EXPONENT 0x0FULL 178 #define MAX_BURST_MANTISSA 0xFFULL 179 #define MAX_BURST_SIZE 130816ULL 180 #define MAX_RATE_DIVIDER_EXPONENT 12ULL 181 #define MAX_RATE_EXPONENT 0x0FULL 182 #define MAX_RATE_MANTISSA 0xFFULL 183 184 /* Bitfields in NIX_TLX_PIR register */ 185 #define TLX_RATE_MANTISSA GENMASK_ULL(8, 1) 186 #define TLX_RATE_EXPONENT GENMASK_ULL(12, 9) 187 #define TLX_RATE_DIVIDER_EXPONENT GENMASK_ULL(16, 13) 188 #define TLX_BURST_MANTISSA GENMASK_ULL(36, 29) 189 #define TLX_BURST_EXPONENT GENMASK_ULL(40, 37) 190 191 struct otx2_hw { 192 struct pci_dev *pdev; 193 struct otx2_rss_info rss_info; 194 u16 rx_queues; 195 u16 tx_queues; 196 u16 xdp_queues; 197 u16 tc_tx_queues; 198 u16 non_qos_queues; /* tx queues plus xdp queues */ 199 u16 max_queues; 200 u16 pool_cnt; 201 u16 rqpool_cnt; 202 u16 sqpool_cnt; 203 204 #define OTX2_DEFAULT_RBUF_LEN 2048 205 u16 rbuf_len; 206 u32 xqe_size; 207 208 /* NPA */ 209 u32 stack_pg_ptrs; /* No of ptrs per stack page */ 210 u32 stack_pg_bytes; /* Size of stack page */ 211 u16 sqb_size; 212 213 /* NIX */ 214 u8 txschq_link_cfg_lvl; 215 u8 txschq_cnt[NIX_TXSCH_LVL_CNT]; 216 u8 txschq_aggr_lvl_rr_prio; 217 u16 txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC]; 218 u16 matchall_ipolicer; 219 u32 dwrr_mtu; 220 u32 max_mtu; 221 u8 smq_link_type; 222 223 /* HW settings, coalescing etc */ 224 u16 rx_chan_base; 225 u16 tx_chan_base; 226 u8 rx_chan_cnt; 227 u8 tx_chan_cnt; 228 u16 cq_qcount_wait; 229 u16 cq_ecount_wait; 230 u16 rq_skid; 231 u8 cq_time_wait; 232 233 /* Segmentation */ 234 u8 lso_tsov4_idx; 235 u8 lso_tsov6_idx; 236 u8 lso_udpv4_idx; 237 u8 lso_udpv6_idx; 238 239 /* RSS */ 240 u8 flowkey_alg_idx; 241 242 /* MSI-X */ 243 u8 cint_cnt; /* CQ interrupt count */ 244 u16 npa_msixoff; /* Offset of NPA vectors */ 245 u16 nix_msixoff; /* Offset of NIX vectors */ 246 char *irq_name; 247 cpumask_var_t *affinity_mask; 248 249 /* Stats */ 250 struct otx2_dev_stats dev_stats; 251 struct otx2_drv_stats drv_stats; 252 u64 cgx_rx_stats[CGX_RX_STATS_COUNT]; 253 u64 cgx_tx_stats[CGX_TX_STATS_COUNT]; 254 u64 cgx_fec_corr_blks; 255 u64 cgx_fec_uncorr_blks; 256 u8 cgx_links; /* No. of CGX links present in HW */ 257 u8 lbk_links; /* No. of LBK links present in HW */ 258 u8 tx_link; /* Transmit channel link number */ 259 #define HW_TSO 0 260 #define CN10K_MBOX 1 261 #define CN10K_LMTST 2 262 #define CN10K_RPM 3 263 #define CN10K_PTP_ONESTEP 4 264 #define CN10K_HW_MACSEC 5 265 #define QOS_CIR_PIR_SUPPORT 6 266 unsigned long cap_flag; 267 268 #define LMT_LINE_SIZE 128 269 #define LMT_BURST_SIZE 32 /* 32 LMTST lines for burst SQE flush */ 270 u64 *lmt_base; 271 struct otx2_lmt_info __percpu *lmt_info; 272 }; 273 274 enum vfperm { 275 OTX2_RESET_VF_PERM, 276 OTX2_TRUSTED_VF, 277 }; 278 279 struct otx2_vf_config { 280 struct otx2_nic *pf; 281 struct delayed_work link_event_work; 282 bool intf_down; /* interface was either configured or not */ 283 u8 mac[ETH_ALEN]; 284 u16 vlan; 285 int tx_vtag_idx; 286 bool trusted; 287 }; 288 289 struct flr_work { 290 struct work_struct work; 291 struct otx2_nic *pf; 292 }; 293 294 struct refill_work { 295 struct delayed_work pool_refill_work; 296 struct otx2_nic *pf; 297 struct napi_struct *napi; 298 }; 299 300 /* PTPv2 originTimestamp structure */ 301 struct ptpv2_tstamp { 302 __be16 seconds_msb; /* 16 bits + */ 303 __be32 seconds_lsb; /* 32 bits = 48 bits*/ 304 __be32 nanoseconds; 305 } __packed; 306 307 struct otx2_ptp { 308 struct ptp_clock_info ptp_info; 309 struct ptp_clock *ptp_clock; 310 struct otx2_nic *nic; 311 312 struct cyclecounter cycle_counter; 313 struct timecounter time_counter; 314 315 struct delayed_work extts_work; 316 u64 last_extts; 317 u64 thresh; 318 319 struct ptp_pin_desc extts_config; 320 u64 (*convert_rx_ptp_tstmp)(u64 timestamp); 321 u64 (*convert_tx_ptp_tstmp)(u64 timestamp); 322 u64 (*ptp_tstamp2nsec)(const struct timecounter *time_counter, u64 timestamp); 323 struct delayed_work synctstamp_work; 324 u64 tstamp; 325 u32 base_ns; 326 }; 327 328 #define OTX2_HW_TIMESTAMP_LEN 8 329 330 struct otx2_mac_table { 331 u8 addr[ETH_ALEN]; 332 u16 mcam_entry; 333 bool inuse; 334 }; 335 336 struct otx2_flow_config { 337 u16 *flow_ent; 338 u16 *def_ent; 339 u16 nr_flows; 340 #define OTX2_DEFAULT_FLOWCOUNT 16 341 #define OTX2_DEFAULT_UNICAST_FLOWS 4 342 #define OTX2_MAX_VLAN_FLOWS 1 343 #define OTX2_MAX_TC_FLOWS OTX2_DEFAULT_FLOWCOUNT 344 u16 unicast_offset; 345 u16 rx_vlan_offset; 346 u16 vf_vlan_offset; 347 #define OTX2_PER_VF_VLAN_FLOWS 2 /* Rx + Tx per VF */ 348 #define OTX2_VF_VLAN_RX_INDEX 0 349 #define OTX2_VF_VLAN_TX_INDEX 1 350 u32 *bmap_to_dmacindex; 351 unsigned long *dmacflt_bmap; 352 struct list_head flow_list; 353 u32 dmacflt_max_flows; 354 u16 max_flows; 355 refcount_t mark_flows; 356 struct list_head flow_list_tc; 357 u8 ucast_flt_cnt; 358 bool ntuple; 359 u16 ntuple_cnt; 360 }; 361 362 struct dev_hw_ops { 363 int (*sq_aq_init)(void *dev, u16 qidx, u8 chan_offset, 364 u16 sqb_aura); 365 void (*sqe_flush)(void *dev, struct otx2_snd_queue *sq, 366 int size, int qidx); 367 int (*refill_pool_ptrs)(void *dev, struct otx2_cq_queue *cq); 368 void (*aura_freeptr)(void *dev, int aura, u64 buf); 369 }; 370 371 #define CN10K_MCS_SA_PER_SC 4 372 373 /* Stats which need to be accumulated in software because 374 * of shared counters in hardware. 375 */ 376 struct cn10k_txsc_stats { 377 u64 InPktsUntagged; 378 u64 InPktsNoTag; 379 u64 InPktsBadTag; 380 u64 InPktsUnknownSCI; 381 u64 InPktsNoSCI; 382 u64 InPktsOverrun; 383 }; 384 385 struct cn10k_rxsc_stats { 386 u64 InOctetsValidated; 387 u64 InOctetsDecrypted; 388 u64 InPktsUnchecked; 389 u64 InPktsDelayed; 390 u64 InPktsOK; 391 u64 InPktsInvalid; 392 u64 InPktsLate; 393 u64 InPktsNotValid; 394 u64 InPktsNotUsingSA; 395 u64 InPktsUnusedSA; 396 }; 397 398 struct cn10k_mcs_txsc { 399 struct macsec_secy *sw_secy; 400 struct cn10k_txsc_stats stats; 401 struct list_head entry; 402 enum macsec_validation_type last_validate_frames; 403 bool last_replay_protect; 404 u16 hw_secy_id_tx; 405 u16 hw_secy_id_rx; 406 u16 hw_flow_id; 407 u16 hw_sc_id; 408 u16 hw_sa_id[CN10K_MCS_SA_PER_SC]; 409 u8 sa_bmap; 410 u8 sa_key[CN10K_MCS_SA_PER_SC][MACSEC_MAX_KEY_LEN]; 411 u8 encoding_sa; 412 u8 salt[CN10K_MCS_SA_PER_SC][MACSEC_SALT_LEN]; 413 ssci_t ssci[CN10K_MCS_SA_PER_SC]; 414 bool vlan_dev; /* macsec running on VLAN ? */ 415 }; 416 417 struct cn10k_mcs_rxsc { 418 struct macsec_secy *sw_secy; 419 struct macsec_rx_sc *sw_rxsc; 420 struct cn10k_rxsc_stats stats; 421 struct list_head entry; 422 u16 hw_flow_id; 423 u16 hw_sc_id; 424 u16 hw_sa_id[CN10K_MCS_SA_PER_SC]; 425 u8 sa_bmap; 426 u8 sa_key[CN10K_MCS_SA_PER_SC][MACSEC_MAX_KEY_LEN]; 427 u8 salt[CN10K_MCS_SA_PER_SC][MACSEC_SALT_LEN]; 428 ssci_t ssci[CN10K_MCS_SA_PER_SC]; 429 }; 430 431 struct cn10k_mcs_cfg { 432 struct list_head txsc_list; 433 struct list_head rxsc_list; 434 }; 435 436 struct otx2_nic { 437 void __iomem *reg_base; 438 struct net_device *netdev; 439 struct dev_hw_ops *hw_ops; 440 void *iommu_domain; 441 u16 tx_max_pktlen; 442 u16 rbsize; /* Receive buffer size */ 443 444 #define OTX2_FLAG_RX_TSTAMP_ENABLED BIT_ULL(0) 445 #define OTX2_FLAG_TX_TSTAMP_ENABLED BIT_ULL(1) 446 #define OTX2_FLAG_INTF_DOWN BIT_ULL(2) 447 #define OTX2_FLAG_MCAM_ENTRIES_ALLOC BIT_ULL(3) 448 #define OTX2_FLAG_NTUPLE_SUPPORT BIT_ULL(4) 449 #define OTX2_FLAG_UCAST_FLTR_SUPPORT BIT_ULL(5) 450 #define OTX2_FLAG_RX_VLAN_SUPPORT BIT_ULL(6) 451 #define OTX2_FLAG_VF_VLAN_SUPPORT BIT_ULL(7) 452 #define OTX2_FLAG_PF_SHUTDOWN BIT_ULL(8) 453 #define OTX2_FLAG_RX_PAUSE_ENABLED BIT_ULL(9) 454 #define OTX2_FLAG_TX_PAUSE_ENABLED BIT_ULL(10) 455 #define OTX2_FLAG_TC_FLOWER_SUPPORT BIT_ULL(11) 456 #define OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED BIT_ULL(12) 457 #define OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED BIT_ULL(13) 458 #define OTX2_FLAG_DMACFLTR_SUPPORT BIT_ULL(14) 459 #define OTX2_FLAG_PTP_ONESTEP_SYNC BIT_ULL(15) 460 #define OTX2_FLAG_ADPTV_INT_COAL_ENABLED BIT_ULL(16) 461 #define OTX2_FLAG_TC_MARK_ENABLED BIT_ULL(17) 462 #define OTX2_FLAG_REP_MODE_ENABLED BIT_ULL(18) 463 #define OTX2_FLAG_PORT_UP BIT_ULL(19) 464 #define OTX2_FLAG_IPSEC_OFFLOAD_ENABLED BIT_ULL(20) 465 u64 flags; 466 u64 *cq_op_addr; 467 468 struct bpf_prog *xdp_prog; 469 struct otx2_qset qset; 470 struct otx2_hw hw; 471 struct pci_dev *pdev; 472 struct device *dev; 473 474 /* Mbox */ 475 struct mbox mbox; 476 struct mbox *mbox_pfvf; 477 struct workqueue_struct *mbox_wq; 478 struct workqueue_struct *mbox_pfvf_wq; 479 480 u8 total_vfs; 481 u16 pcifunc; /* RVU PF_FUNC */ 482 u16 bpid[NIX_MAX_BPID_CHAN]; 483 struct otx2_vf_config *vf_configs; 484 struct cgx_link_user_info linfo; 485 486 /* NPC MCAM */ 487 struct otx2_flow_config *flow_cfg; 488 struct otx2_mac_table *mac_table; 489 490 u64 reset_count; 491 struct work_struct reset_task; 492 struct workqueue_struct *flr_wq; 493 struct flr_work *flr_wrk; 494 struct refill_work *refill_wrk; 495 struct workqueue_struct *otx2_wq; 496 struct work_struct rx_mode_work; 497 498 /* Ethtool stuff */ 499 u32 msg_enable; 500 501 /* Block address of NIX either BLKADDR_NIX0 or BLKADDR_NIX1 */ 502 int nix_blkaddr; 503 /* LMTST Lines info */ 504 struct qmem *dync_lmt; 505 u16 tot_lmt_lines; 506 u16 npa_lmt_lines; 507 u32 nix_lmt_size; 508 509 struct otx2_ptp *ptp; 510 struct hwtstamp_config tstamp; 511 512 unsigned long rq_bmap; 513 514 /* Devlink */ 515 struct otx2_devlink *dl; 516 /* PFC */ 517 u8 pfc_en; 518 #ifdef CONFIG_DCB 519 u8 *queue_to_pfc_map; 520 u16 pfc_schq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC]; 521 bool pfc_alloc_status[NIX_PF_PFC_PRIO_MAX]; 522 #endif 523 /* qos */ 524 struct otx2_qos qos; 525 526 /* napi event count. It is needed for adaptive irq coalescing. */ 527 u32 napi_events; 528 529 #if IS_ENABLED(CONFIG_MACSEC) 530 struct cn10k_mcs_cfg *macsec_cfg; 531 #endif 532 533 #if IS_ENABLED(CONFIG_RVU_ESWITCH) 534 struct rep_dev **reps; 535 int rep_cnt; 536 u16 rep_pf_map[RVU_MAX_REP]; 537 u16 esw_mode; 538 #endif 539 540 /* Inline ipsec */ 541 struct cn10k_ipsec ipsec; 542 /* af_xdp zero-copy */ 543 unsigned long *af_xdp_zc_qidx; 544 }; 545 546 static inline bool is_otx2_lbkvf(struct pci_dev *pdev) 547 { 548 return (pdev->device == PCI_DEVID_OCTEONTX2_RVU_AFVF) || 549 (pdev->device == PCI_DEVID_RVU_REP); 550 } 551 552 static inline bool is_96xx_A0(struct pci_dev *pdev) 553 { 554 return (pdev->revision == 0x00) && 555 (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX_RVU_PFVF); 556 } 557 558 static inline bool is_96xx_B0(struct pci_dev *pdev) 559 { 560 return (pdev->revision == 0x01) && 561 (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX_RVU_PFVF); 562 } 563 564 static inline bool is_otx2_sdp_rep(struct pci_dev *pdev) 565 { 566 return pdev->device == PCI_DEVID_OCTEONTX2_SDP_REP; 567 } 568 569 /* REVID for PCIe devices. 570 * Bits 0..1: minor pass, bit 3..2: major pass 571 * bits 7..4: midr id 572 */ 573 #define PCI_REVISION_ID_96XX 0x00 574 #define PCI_REVISION_ID_95XX 0x10 575 #define PCI_REVISION_ID_95XXN 0x20 576 #define PCI_REVISION_ID_98XX 0x30 577 #define PCI_REVISION_ID_95XXMM 0x40 578 #define PCI_REVISION_ID_95XXO 0xE0 579 580 static inline bool is_dev_otx2(struct pci_dev *pdev) 581 { 582 u8 midr = pdev->revision & 0xF0; 583 584 return (midr == PCI_REVISION_ID_96XX || midr == PCI_REVISION_ID_95XX || 585 midr == PCI_REVISION_ID_95XXN || midr == PCI_REVISION_ID_98XX || 586 midr == PCI_REVISION_ID_95XXMM || midr == PCI_REVISION_ID_95XXO); 587 } 588 589 static inline bool is_dev_cn10kb(struct pci_dev *pdev) 590 { 591 return pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF; 592 } 593 594 static inline bool is_dev_cn10ka_b0(struct pci_dev *pdev) 595 { 596 if (pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A_RVU_PFVF && 597 (pdev->revision & 0xFF) == 0x54) 598 return true; 599 600 return false; 601 } 602 603 static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf) 604 { 605 struct otx2_hw *hw = &pfvf->hw; 606 607 pfvf->hw.cq_time_wait = CQ_TIMER_THRESH_DEFAULT; 608 pfvf->hw.cq_ecount_wait = CQ_CQE_THRESH_DEFAULT; 609 pfvf->hw.cq_qcount_wait = CQ_QCOUNT_DEFAULT; 610 611 __set_bit(HW_TSO, &hw->cap_flag); 612 613 if (is_96xx_A0(pfvf->pdev)) { 614 __clear_bit(HW_TSO, &hw->cap_flag); 615 616 /* Time based irq coalescing is not supported */ 617 pfvf->hw.cq_qcount_wait = 0x0; 618 619 /* Due to HW issue previous silicons required minimum 620 * 600 unused CQE to avoid CQ overflow. 621 */ 622 pfvf->hw.rq_skid = 600; 623 pfvf->qset.rqe_cnt = Q_COUNT(Q_SIZE_1K); 624 } 625 if (is_96xx_B0(pfvf->pdev)) 626 __clear_bit(HW_TSO, &hw->cap_flag); 627 628 if (!is_dev_otx2(pfvf->pdev)) { 629 __set_bit(CN10K_MBOX, &hw->cap_flag); 630 __set_bit(CN10K_LMTST, &hw->cap_flag); 631 __set_bit(CN10K_RPM, &hw->cap_flag); 632 __set_bit(CN10K_PTP_ONESTEP, &hw->cap_flag); 633 __set_bit(QOS_CIR_PIR_SUPPORT, &hw->cap_flag); 634 } 635 636 if (is_dev_cn10kb(pfvf->pdev)) 637 __set_bit(CN10K_HW_MACSEC, &hw->cap_flag); 638 } 639 640 /* Register read/write APIs */ 641 static inline void __iomem *otx2_get_regaddr(struct otx2_nic *nic, u64 offset) 642 { 643 u64 blkaddr; 644 645 switch ((offset >> RVU_FUNC_BLKADDR_SHIFT) & RVU_FUNC_BLKADDR_MASK) { 646 case BLKTYPE_NIX: 647 blkaddr = nic->nix_blkaddr; 648 break; 649 case BLKTYPE_NPA: 650 blkaddr = BLKADDR_NPA; 651 break; 652 case BLKTYPE_CPT: 653 blkaddr = BLKADDR_CPT0; 654 break; 655 default: 656 blkaddr = BLKADDR_RVUM; 657 break; 658 } 659 660 offset &= ~(RVU_FUNC_BLKADDR_MASK << RVU_FUNC_BLKADDR_SHIFT); 661 offset |= (blkaddr << RVU_FUNC_BLKADDR_SHIFT); 662 663 return nic->reg_base + offset; 664 } 665 666 static inline void otx2_write64(struct otx2_nic *nic, u64 offset, u64 val) 667 { 668 void __iomem *addr = otx2_get_regaddr(nic, offset); 669 670 writeq(val, addr); 671 } 672 673 static inline u64 otx2_read64(struct otx2_nic *nic, u64 offset) 674 { 675 void __iomem *addr = otx2_get_regaddr(nic, offset); 676 677 return readq(addr); 678 } 679 680 /* Mbox bounce buffer APIs */ 681 static inline int otx2_mbox_bbuf_init(struct mbox *mbox, struct pci_dev *pdev) 682 { 683 struct otx2_mbox *otx2_mbox; 684 struct otx2_mbox_dev *mdev; 685 686 mbox->bbuf_base = devm_kmalloc(&pdev->dev, MBOX_SIZE, GFP_KERNEL); 687 if (!mbox->bbuf_base) 688 return -ENOMEM; 689 690 /* Overwrite mbox mbase to point to bounce buffer, so that PF/VF 691 * prepare all mbox messages in bounce buffer instead of directly 692 * in hw mbox memory. 693 */ 694 otx2_mbox = &mbox->mbox; 695 mdev = &otx2_mbox->dev[0]; 696 mdev->mbase = mbox->bbuf_base; 697 698 otx2_mbox = &mbox->mbox_up; 699 mdev = &otx2_mbox->dev[0]; 700 mdev->mbase = mbox->bbuf_base; 701 return 0; 702 } 703 704 static inline void otx2_sync_mbox_bbuf(struct otx2_mbox *mbox, int devid) 705 { 706 u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN); 707 void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE); 708 struct otx2_mbox_dev *mdev = &mbox->dev[devid]; 709 struct mbox_hdr *hdr; 710 u64 msg_size; 711 712 if (mdev->mbase == hw_mbase) 713 return; 714 715 hdr = hw_mbase + mbox->rx_start; 716 msg_size = hdr->msg_size; 717 718 if (msg_size > mbox->rx_size - msgs_offset) 719 msg_size = mbox->rx_size - msgs_offset; 720 721 /* Copy mbox messages from mbox memory to bounce buffer */ 722 memcpy(mdev->mbase + mbox->rx_start, 723 hw_mbase + mbox->rx_start, msg_size + msgs_offset); 724 } 725 726 /* With the absence of API for 128-bit IO memory access for arm64, 727 * implement required operations at place. 728 */ 729 #if defined(CONFIG_ARM64) 730 static inline void otx2_write128(u64 lo, u64 hi, void __iomem *addr) 731 { 732 __asm__ volatile("stp %x[x0], %x[x1], [%x[p1],#0]!" 733 ::[x0]"r"(lo), [x1]"r"(hi), [p1]"r"(addr)); 734 } 735 736 static inline u64 otx2_atomic64_add(u64 incr, u64 *ptr) 737 { 738 u64 result; 739 740 __asm__ volatile(".cpu generic+lse\n" 741 "ldadd %x[i], %x[r], [%[b]]" 742 : [r]"=r"(result), "+m"(*ptr) 743 : [i]"r"(incr), [b]"r"(ptr) 744 : "memory"); 745 return result; 746 } 747 748 #else 749 #define otx2_write128(lo, hi, addr) writeq((hi) | (lo), addr) 750 #define otx2_atomic64_add(incr, ptr) ({ *ptr += incr; }) 751 #endif 752 753 static inline void __cn10k_aura_freeptr(struct otx2_nic *pfvf, u64 aura, 754 u64 *ptrs, u64 num_ptrs) 755 { 756 struct otx2_lmt_info *lmt_info; 757 u64 size = 0, count_eot = 0; 758 u64 tar_addr, val = 0; 759 760 lmt_info = per_cpu_ptr(pfvf->hw.lmt_info, smp_processor_id()); 761 tar_addr = (__force u64)otx2_get_regaddr(pfvf, NPA_LF_AURA_BATCH_FREE0); 762 /* LMTID is same as AURA Id */ 763 val = (lmt_info->lmt_id & 0x7FF) | BIT_ULL(63); 764 /* Set if [127:64] of last 128bit word has a valid pointer */ 765 count_eot = (num_ptrs % 2) ? 0ULL : 1ULL; 766 /* Set AURA ID to free pointer */ 767 ptrs[0] = (count_eot << 32) | (aura & 0xFFFFF); 768 /* Target address for LMTST flush tells HW how many 128bit 769 * words are valid from NPA_LF_AURA_BATCH_FREE0. 770 * 771 * tar_addr[6:4] is LMTST size-1 in units of 128b. 772 */ 773 if (num_ptrs > 2) { 774 size = (sizeof(u64) * num_ptrs) / 16; 775 if (!count_eot) 776 size++; 777 tar_addr |= ((size - 1) & 0x7) << 4; 778 } 779 dma_wmb(); 780 memcpy((u64 *)lmt_info->lmt_addr, ptrs, sizeof(u64) * num_ptrs); 781 /* Perform LMTST flush */ 782 cn10k_lmt_flush(val, tar_addr); 783 } 784 785 static inline void cn10k_aura_freeptr(void *dev, int aura, u64 buf) 786 { 787 struct otx2_nic *pfvf = dev; 788 u64 ptrs[2]; 789 790 ptrs[1] = buf; 791 get_cpu(); 792 /* Free only one buffer at time during init and teardown */ 793 __cn10k_aura_freeptr(pfvf, aura, ptrs, 2); 794 put_cpu(); 795 } 796 797 /* Alloc pointer from pool/aura */ 798 static inline u64 otx2_aura_allocptr(struct otx2_nic *pfvf, int aura) 799 { 800 u64 *ptr = (__force u64 *)otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_ALLOCX(0)); 801 u64 incr = (u64)aura | BIT_ULL(63); 802 803 return otx2_atomic64_add(incr, ptr); 804 } 805 806 /* Free pointer to a pool/aura */ 807 static inline void otx2_aura_freeptr(void *dev, int aura, u64 buf) 808 { 809 struct otx2_nic *pfvf = dev; 810 void __iomem *addr = otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_FREE0); 811 812 otx2_write128(buf, (u64)aura | BIT_ULL(63), addr); 813 } 814 815 static inline int otx2_get_pool_idx(struct otx2_nic *pfvf, int type, int idx) 816 { 817 if (type == AURA_NIX_SQ) 818 return pfvf->hw.rqpool_cnt + idx; 819 820 /* AURA_NIX_RQ */ 821 return idx; 822 } 823 824 /* Mbox APIs */ 825 static inline int otx2_sync_mbox_msg(struct mbox *mbox) 826 { 827 int err; 828 829 if (!otx2_mbox_nonempty(&mbox->mbox, 0)) 830 return 0; 831 otx2_mbox_msg_send(&mbox->mbox, 0); 832 err = otx2_mbox_wait_for_rsp(&mbox->mbox, 0); 833 if (err) 834 return err; 835 836 return otx2_mbox_check_rsp_msgs(&mbox->mbox, 0); 837 } 838 839 static inline int otx2_sync_mbox_up_msg(struct mbox *mbox, int devid) 840 { 841 int err; 842 843 if (!otx2_mbox_nonempty(&mbox->mbox_up, devid)) 844 return 0; 845 otx2_mbox_msg_send_up(&mbox->mbox_up, devid); 846 err = otx2_mbox_wait_for_rsp(&mbox->mbox_up, devid); 847 if (err) 848 return err; 849 850 return otx2_mbox_check_rsp_msgs(&mbox->mbox_up, devid); 851 } 852 853 /* Use this API to send mbox msgs in atomic context 854 * where sleeping is not allowed 855 */ 856 static inline int otx2_sync_mbox_msg_busy_poll(struct mbox *mbox) 857 { 858 int err; 859 860 if (!otx2_mbox_nonempty(&mbox->mbox, 0)) 861 return 0; 862 otx2_mbox_msg_send(&mbox->mbox, 0); 863 err = otx2_mbox_busy_poll_for_rsp(&mbox->mbox, 0); 864 if (err) 865 return err; 866 867 return otx2_mbox_check_rsp_msgs(&mbox->mbox, 0); 868 } 869 870 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \ 871 static struct _req_type __maybe_unused \ 872 *otx2_mbox_alloc_msg_ ## _fn_name(struct mbox *mbox) \ 873 { \ 874 struct _req_type *req; \ 875 u16 pcifunc = mbox->pfvf->pcifunc; \ 876 \ 877 req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \ 878 &mbox->mbox, 0, sizeof(struct _req_type), \ 879 sizeof(struct _rsp_type)); \ 880 if (!req) \ 881 return NULL; \ 882 req->hdr.sig = OTX2_MBOX_REQ_SIG; \ 883 req->hdr.id = _id; \ 884 req->hdr.pcifunc = pcifunc; \ 885 trace_otx2_msg_alloc(mbox->mbox.pdev, _id, sizeof(*req), pcifunc); \ 886 return req; \ 887 } 888 889 MBOX_MESSAGES 890 #undef M 891 892 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \ 893 int \ 894 otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \ 895 struct _req_type *req, \ 896 struct _rsp_type *rsp); \ 897 898 MBOX_UP_CGX_MESSAGES 899 MBOX_UP_MCS_MESSAGES 900 #undef M 901 902 /* Time to wait before watchdog kicks off */ 903 #define OTX2_TX_TIMEOUT (100 * HZ) 904 905 #define RVU_PFVF_PF_SHIFT 10 906 #define RVU_PFVF_PF_MASK 0x3F 907 #define RVU_PFVF_FUNC_SHIFT 0 908 #define RVU_PFVF_FUNC_MASK 0x3FF 909 910 static inline bool is_otx2_vf(u16 pcifunc) 911 { 912 return !!(pcifunc & RVU_PFVF_FUNC_MASK); 913 } 914 915 static inline int rvu_get_pf(u16 pcifunc) 916 { 917 return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK; 918 } 919 920 static inline dma_addr_t otx2_dma_map_page(struct otx2_nic *pfvf, 921 struct page *page, 922 size_t offset, size_t size, 923 enum dma_data_direction dir) 924 { 925 dma_addr_t iova; 926 927 iova = dma_map_page_attrs(pfvf->dev, page, 928 offset, size, dir, DMA_ATTR_SKIP_CPU_SYNC); 929 if (unlikely(dma_mapping_error(pfvf->dev, iova))) 930 return (dma_addr_t)NULL; 931 return iova; 932 } 933 934 static inline void otx2_dma_unmap_page(struct otx2_nic *pfvf, 935 dma_addr_t addr, size_t size, 936 enum dma_data_direction dir) 937 { 938 dma_unmap_page_attrs(pfvf->dev, addr, size, 939 dir, DMA_ATTR_SKIP_CPU_SYNC); 940 } 941 942 static inline u16 otx2_get_smq_idx(struct otx2_nic *pfvf, u16 qidx) 943 { 944 u16 smq; 945 int idx; 946 947 #ifdef CONFIG_DCB 948 if (qidx < NIX_PF_PFC_PRIO_MAX && pfvf->pfc_alloc_status[qidx]) 949 return pfvf->pfc_schq_list[NIX_TXSCH_LVL_SMQ][qidx]; 950 #endif 951 /* check if qidx falls under QOS queues */ 952 if (qidx >= pfvf->hw.non_qos_queues) { 953 smq = pfvf->qos.qid_to_sqmap[qidx - pfvf->hw.non_qos_queues]; 954 } else { 955 idx = qidx % pfvf->hw.txschq_cnt[NIX_TXSCH_LVL_SMQ]; 956 smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][idx]; 957 } 958 959 return smq; 960 } 961 962 static inline u16 otx2_get_total_tx_queues(struct otx2_nic *pfvf) 963 { 964 return pfvf->hw.non_qos_queues + pfvf->hw.tc_tx_queues; 965 } 966 967 static inline u64 otx2_convert_rate(u64 rate) 968 { 969 u64 converted_rate; 970 971 /* Convert bytes per second to Mbps */ 972 converted_rate = rate * 8; 973 converted_rate = max_t(u64, converted_rate / 1000000, 1); 974 975 return converted_rate; 976 } 977 978 static inline int otx2_tc_flower_rule_cnt(struct otx2_nic *pfvf) 979 { 980 /* return here if MCAM entries not allocated */ 981 if (!pfvf->flow_cfg) 982 return 0; 983 984 return pfvf->flow_cfg->nr_flows; 985 } 986 987 /* MSI-X APIs */ 988 void otx2_free_cints(struct otx2_nic *pfvf, int n); 989 void otx2_set_cints_affinity(struct otx2_nic *pfvf); 990 int otx2_set_mac_address(struct net_device *netdev, void *p); 991 int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu); 992 void otx2_tx_timeout(struct net_device *netdev, unsigned int txq); 993 void otx2_get_mac_from_af(struct net_device *netdev); 994 void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx); 995 int otx2_config_pause_frm(struct otx2_nic *pfvf); 996 void otx2_setup_segmentation(struct otx2_nic *pfvf); 997 int otx2_reset_mac_stats(struct otx2_nic *pfvf); 998 999 /* RVU block related APIs */ 1000 int otx2_attach_npa_nix(struct otx2_nic *pfvf); 1001 int otx2_detach_resources(struct mbox *mbox); 1002 int otx2_config_npa(struct otx2_nic *pfvf); 1003 int otx2_sq_aura_pool_init(struct otx2_nic *pfvf); 1004 int otx2_rq_aura_pool_init(struct otx2_nic *pfvf); 1005 void otx2_aura_pool_free(struct otx2_nic *pfvf); 1006 void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type); 1007 void otx2_sq_free_sqbs(struct otx2_nic *pfvf); 1008 int otx2_config_nix(struct otx2_nic *pfvf); 1009 int otx2_config_nix_queues(struct otx2_nic *pfvf); 1010 int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool pfc_en); 1011 int otx2_txsch_alloc(struct otx2_nic *pfvf); 1012 void otx2_txschq_stop(struct otx2_nic *pfvf); 1013 void otx2_txschq_free_one(struct otx2_nic *pfvf, u16 lvl, u16 schq); 1014 void otx2_free_pending_sqe(struct otx2_nic *pfvf); 1015 void otx2_sqb_flush(struct otx2_nic *pfvf); 1016 int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool, 1017 dma_addr_t *dma, int qidx, int idx); 1018 int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable); 1019 void otx2_ctx_disable(struct mbox *mbox, int type, bool npa); 1020 int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable); 1021 int otx2_nix_cpt_config_bp(struct otx2_nic *pfvf, bool enable); 1022 void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int qidx); 1023 void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq); 1024 int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura); 1025 int otx2_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura); 1026 int cn10k_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura); 1027 int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, 1028 dma_addr_t *dma); 1029 int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id, 1030 int stack_pages, int numptrs, int buf_size, int type); 1031 int otx2_aura_init(struct otx2_nic *pfvf, int aura_id, 1032 int pool_id, int numptrs); 1033 int otx2_init_rsrc(struct pci_dev *pdev, struct otx2_nic *pf); 1034 void otx2_free_queue_mem(struct otx2_qset *qset); 1035 int otx2_alloc_queue_mem(struct otx2_nic *pf); 1036 int otx2_init_hw_resources(struct otx2_nic *pfvf); 1037 void otx2_free_hw_resources(struct otx2_nic *pf); 1038 int otx2_wq_init(struct otx2_nic *pf); 1039 int otx2_check_pf_usable(struct otx2_nic *pf); 1040 int otx2_pfaf_mbox_init(struct otx2_nic *pf); 1041 int otx2_register_mbox_intr(struct otx2_nic *pf, bool probe_af); 1042 int otx2_realloc_msix_vectors(struct otx2_nic *pf); 1043 void otx2_pfaf_mbox_destroy(struct otx2_nic *pf); 1044 void otx2_disable_mbox_intr(struct otx2_nic *pf); 1045 void otx2_disable_napi(struct otx2_nic *pf); 1046 irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq); 1047 int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura); 1048 int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx); 1049 1050 /* RSS configuration APIs*/ 1051 int otx2_rss_init(struct otx2_nic *pfvf); 1052 int otx2_set_flowkey_cfg(struct otx2_nic *pfvf); 1053 void otx2_set_rss_key(struct otx2_nic *pfvf); 1054 int otx2_set_rss_table(struct otx2_nic *pfvf, int ctx_id); 1055 1056 /* Mbox handlers */ 1057 void mbox_handler_msix_offset(struct otx2_nic *pfvf, 1058 struct msix_offset_rsp *rsp); 1059 void mbox_handler_npa_lf_alloc(struct otx2_nic *pfvf, 1060 struct npa_lf_alloc_rsp *rsp); 1061 void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf, 1062 struct nix_lf_alloc_rsp *rsp); 1063 void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf, 1064 struct nix_txsch_alloc_rsp *rsp); 1065 void mbox_handler_cgx_stats(struct otx2_nic *pfvf, 1066 struct cgx_stats_rsp *rsp); 1067 void mbox_handler_cgx_fec_stats(struct otx2_nic *pfvf, 1068 struct cgx_fec_stats_rsp *rsp); 1069 void otx2_set_fec_stats_count(struct otx2_nic *pfvf); 1070 void mbox_handler_nix_bp_enable(struct otx2_nic *pfvf, 1071 struct nix_bp_cfg_rsp *rsp); 1072 1073 /* Device stats APIs */ 1074 void otx2_get_dev_stats(struct otx2_nic *pfvf); 1075 void otx2_get_stats64(struct net_device *netdev, 1076 struct rtnl_link_stats64 *stats); 1077 void otx2_update_lmac_stats(struct otx2_nic *pfvf); 1078 void otx2_update_lmac_fec_stats(struct otx2_nic *pfvf); 1079 int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx); 1080 int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx); 1081 void otx2_set_ethtool_ops(struct net_device *netdev); 1082 void otx2vf_set_ethtool_ops(struct net_device *netdev); 1083 1084 int otx2_open(struct net_device *netdev); 1085 int otx2_stop(struct net_device *netdev); 1086 int otx2_set_real_num_queues(struct net_device *netdev, 1087 int tx_queues, int rx_queues); 1088 int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd); 1089 int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr); 1090 1091 /* MCAM filter related APIs */ 1092 int otx2_mcam_flow_init(struct otx2_nic *pf); 1093 int otx2vf_mcam_flow_init(struct otx2_nic *pfvf); 1094 int otx2_alloc_mcam_entries(struct otx2_nic *pfvf, u16 count); 1095 void otx2_mcam_flow_del(struct otx2_nic *pf); 1096 int otx2_destroy_ntuple_flows(struct otx2_nic *pf); 1097 int otx2_destroy_mcam_flows(struct otx2_nic *pfvf); 1098 int otx2_get_flow(struct otx2_nic *pfvf, 1099 struct ethtool_rxnfc *nfc, u32 location); 1100 int otx2_get_all_flows(struct otx2_nic *pfvf, 1101 struct ethtool_rxnfc *nfc, u32 *rule_locs); 1102 int otx2_add_flow(struct otx2_nic *pfvf, 1103 struct ethtool_rxnfc *nfc); 1104 int otx2_remove_flow(struct otx2_nic *pfvf, u32 location); 1105 int otx2_get_maxflows(struct otx2_flow_config *flow_cfg); 1106 void otx2_rss_ctx_flow_del(struct otx2_nic *pfvf, int ctx_id); 1107 int otx2_del_macfilter(struct net_device *netdev, const u8 *mac); 1108 int otx2_add_macfilter(struct net_device *netdev, const u8 *mac); 1109 int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable); 1110 int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf); 1111 bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, struct xdp_frame *xdpf, 1112 u64 iova, int len, u16 qidx, u16 flags); 1113 void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, struct xdp_frame *xdpf, 1114 u64 dma_addr, int len, int *offset, u16 flags); 1115 u16 otx2_get_max_mtu(struct otx2_nic *pfvf); 1116 int otx2_handle_ntuple_tc_features(struct net_device *netdev, 1117 netdev_features_t features); 1118 int otx2_smq_flush(struct otx2_nic *pfvf, int smq); 1119 void otx2_free_bufs(struct otx2_nic *pfvf, struct otx2_pool *pool, 1120 u64 iova, int size); 1121 int otx2_mcam_entry_init(struct otx2_nic *pfvf); 1122 1123 /* tc support */ 1124 int otx2_init_tc(struct otx2_nic *nic); 1125 void otx2_shutdown_tc(struct otx2_nic *nic); 1126 int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type, 1127 void *type_data); 1128 void otx2_tc_apply_ingress_police_rules(struct otx2_nic *nic); 1129 1130 /* CGX/RPM DMAC filters support */ 1131 int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf); 1132 int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u32 bit_pos); 1133 int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac, u32 bit_pos); 1134 int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u32 bit_pos); 1135 void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf); 1136 void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf); 1137 1138 #ifdef CONFIG_DCB 1139 /* DCB support*/ 1140 void otx2_update_bpid_in_rqctx(struct otx2_nic *pfvf, int vlan_prio, int qidx, bool pfc_enable); 1141 int otx2_config_priority_flow_ctrl(struct otx2_nic *pfvf); 1142 int otx2_dcbnl_set_ops(struct net_device *dev); 1143 /* PFC support */ 1144 int otx2_pfc_txschq_config(struct otx2_nic *pfvf); 1145 int otx2_pfc_txschq_alloc(struct otx2_nic *pfvf); 1146 int otx2_pfc_txschq_update(struct otx2_nic *pfvf); 1147 int otx2_pfc_txschq_stop(struct otx2_nic *pfvf); 1148 #endif 1149 1150 #if IS_ENABLED(CONFIG_MACSEC) 1151 /* MACSEC offload support */ 1152 int cn10k_mcs_init(struct otx2_nic *pfvf); 1153 void cn10k_mcs_free(struct otx2_nic *pfvf); 1154 void cn10k_handle_mcs_event(struct otx2_nic *pfvf, struct mcs_intr_info *event); 1155 #else 1156 static inline int cn10k_mcs_init(struct otx2_nic *pfvf) { return 0; } 1157 static inline void cn10k_mcs_free(struct otx2_nic *pfvf) {} 1158 static inline void cn10k_handle_mcs_event(struct otx2_nic *pfvf, 1159 struct mcs_intr_info *event) 1160 {} 1161 #endif /* CONFIG_MACSEC */ 1162 1163 /* qos support */ 1164 static inline void otx2_qos_init(struct otx2_nic *pfvf, int qos_txqs) 1165 { 1166 struct otx2_hw *hw = &pfvf->hw; 1167 1168 hw->tc_tx_queues = qos_txqs; 1169 INIT_LIST_HEAD(&pfvf->qos.qos_tree); 1170 mutex_init(&pfvf->qos.qos_lock); 1171 } 1172 1173 static inline void otx2_shutdown_qos(struct otx2_nic *pfvf) 1174 { 1175 mutex_destroy(&pfvf->qos.qos_lock); 1176 } 1177 1178 u16 otx2_select_queue(struct net_device *netdev, struct sk_buff *skb, 1179 struct net_device *sb_dev); 1180 int otx2_get_txq_by_classid(struct otx2_nic *pfvf, u16 classid); 1181 void otx2_qos_config_txschq(struct otx2_nic *pfvf); 1182 void otx2_clean_qos_queues(struct otx2_nic *pfvf); 1183 int rvu_event_up_notify(struct otx2_nic *pf, struct rep_event *info); 1184 int otx2_setup_tc_cls_flower(struct otx2_nic *nic, 1185 struct flow_cls_offload *cls_flower); 1186 1187 static inline int mcam_entry_cmp(const void *a, const void *b) 1188 { 1189 return *(u16 *)a - *(u16 *)b; 1190 } 1191 1192 dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf, 1193 struct sk_buff *skb, int seg, int *len); 1194 void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg); 1195 int otx2_read_free_sqe(struct otx2_nic *pfvf, u16 qidx); 1196 #endif /* OTX2_COMMON_H */ 1197