1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* Marvell RVU Ethernet driver 3 * 4 * Copyright (C) 2020 Marvell. 5 * 6 */ 7 8 #ifndef OTX2_COMMON_H 9 #define OTX2_COMMON_H 10 11 #include <linux/ethtool.h> 12 #include <linux/pci.h> 13 #include <linux/iommu.h> 14 #include <linux/net_tstamp.h> 15 #include <linux/ptp_clock_kernel.h> 16 #include <linux/timecounter.h> 17 #include <linux/soc/marvell/silicons.h> 18 #include <linux/soc/marvell/octeontx2/asm.h> 19 #include <net/macsec.h> 20 #include <net/pkt_cls.h> 21 #include <net/devlink.h> 22 #include <linux/time64.h> 23 #include <linux/dim.h> 24 #include <uapi/linux/if_macsec.h> 25 #include <net/page_pool/helpers.h> 26 27 #include <mbox.h> 28 #include <npc.h> 29 #include "otx2_reg.h" 30 #include "otx2_txrx.h" 31 #include "otx2_devlink.h" 32 #include <rvu.h> 33 #include <rvu_trace.h> 34 #include "qos.h" 35 #include "rep.h" 36 #include "cn10k_ipsec.h" 37 #include "cn20k.h" 38 39 /* IPv4 flag more fragment bit */ 40 #define IPV4_FLAG_MORE 0x20 41 42 /* PCI device IDs */ 43 #define PCI_DEVID_OCTEONTX2_RVU_PF 0xA063 44 #define PCI_DEVID_OCTEONTX2_RVU_VF 0xA064 45 #define PCI_DEVID_OCTEONTX2_RVU_AFVF 0xA0F8 46 47 #define PCI_SUBSYS_DEVID_96XX_RVU_PFVF 0xB200 48 #define PCI_SUBSYS_DEVID_CN10K_A_RVU_PFVF 0xB900 49 #define PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF 0xBD00 50 51 #define PCI_DEVID_OCTEONTX2_SDP_REP 0xA0F7 52 53 /* PCI BAR nos */ 54 #define PCI_CFG_REG_BAR_NUM 2 55 #define PCI_MBOX_BAR_NUM 4 56 57 #define NAME_SIZE 32 58 59 #ifdef CONFIG_DCB 60 /* Max priority supported for PFC */ 61 #define NIX_PF_PFC_PRIO_MAX 8 62 #endif 63 64 /* Number of segments per SG structure */ 65 #define MAX_SEGS_PER_SG 3 66 67 irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq); 68 irqreturn_t cn20k_pfaf_mbox_intr_handler(int irq, void *pf_irq); 69 irqreturn_t cn20k_vfaf_mbox_intr_handler(int irq, void *vf_irq); 70 irqreturn_t cn20k_pfvf_mbox_intr_handler(int irq, void *pf_irq); 71 irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq); 72 73 enum arua_mapped_qtypes { 74 AURA_NIX_RQ, 75 AURA_NIX_SQ, 76 }; 77 78 /* NIX LF interrupts range*/ 79 #define NIX_LF_QINT_VEC_START 0x00 80 #define NIX_LF_CINT_VEC_START 0x40 81 #define NIX_LF_GINT_VEC 0x80 82 #define NIX_LF_ERR_VEC 0x81 83 #define NIX_LF_POISON_VEC 0x82 84 85 /* Send skid of 2000 packets required for CQ size of 4K CQEs. */ 86 #define SEND_CQ_SKID 2000 87 88 #define OTX2_GET_RX_STATS(reg) \ 89 otx2_read64(pfvf, NIX_LF_RX_STATX(reg)) 90 #define OTX2_GET_TX_STATS(reg) \ 91 otx2_read64(pfvf, NIX_LF_TX_STATX(reg)) 92 93 struct otx2_lmt_info { 94 u64 lmt_addr; 95 u16 lmt_id; 96 }; 97 98 struct otx2_rss_info { 99 u8 enable; 100 u32 flowkey_cfg; 101 u16 rss_size; 102 #define RSS_HASH_KEY_SIZE 44 /* 352 bit key */ 103 u8 key[RSS_HASH_KEY_SIZE]; 104 u32 ind_tbl[MAX_RSS_INDIR_TBL_SIZE]; 105 }; 106 107 /* NIX (or NPC) RX errors */ 108 enum otx2_errlvl { 109 NPC_ERRLVL_RE, 110 NPC_ERRLVL_LID_LA, 111 NPC_ERRLVL_LID_LB, 112 NPC_ERRLVL_LID_LC, 113 NPC_ERRLVL_LID_LD, 114 NPC_ERRLVL_LID_LE, 115 NPC_ERRLVL_LID_LF, 116 NPC_ERRLVL_LID_LG, 117 NPC_ERRLVL_LID_LH, 118 NPC_ERRLVL_NIX = 0x0F, 119 }; 120 121 enum otx2_errcodes_re { 122 /* NPC_ERRLVL_RE errcodes */ 123 ERRCODE_FCS = 0x7, 124 ERRCODE_FCS_RCV = 0x8, 125 ERRCODE_UNDERSIZE = 0x10, 126 ERRCODE_OVERSIZE = 0x11, 127 ERRCODE_OL2_LEN_MISMATCH = 0x12, 128 /* NPC_ERRLVL_NIX errcodes */ 129 ERRCODE_OL3_LEN = 0x10, 130 ERRCODE_OL4_LEN = 0x11, 131 ERRCODE_OL4_CSUM = 0x12, 132 ERRCODE_IL3_LEN = 0x20, 133 ERRCODE_IL4_LEN = 0x21, 134 ERRCODE_IL4_CSUM = 0x22, 135 }; 136 137 enum otx2_xdp_action { 138 OTX2_XDP_TX = BIT(0), 139 OTX2_XDP_REDIRECT = BIT(1), 140 OTX2_AF_XDP_FRAME = BIT(2), 141 }; 142 143 struct otx2_dev_stats { 144 u64 rx_bytes; 145 u64 rx_frames; 146 u64 rx_ucast_frames; 147 u64 rx_bcast_frames; 148 u64 rx_mcast_frames; 149 u64 rx_drops; 150 151 u64 tx_bytes; 152 u64 tx_frames; 153 u64 tx_ucast_frames; 154 u64 tx_bcast_frames; 155 u64 tx_mcast_frames; 156 u64 tx_drops; 157 atomic_long_t tx_discards; 158 }; 159 160 /* Driver counted stats */ 161 struct otx2_drv_stats { 162 atomic_t rx_fcs_errs; 163 atomic_t rx_oversize_errs; 164 atomic_t rx_undersize_errs; 165 atomic_t rx_csum_errs; 166 atomic_t rx_len_errs; 167 atomic_t rx_other_errs; 168 }; 169 170 struct mbox { 171 struct otx2_mbox mbox; 172 struct work_struct mbox_wrk; 173 struct otx2_mbox mbox_up; 174 struct work_struct mbox_up_wrk; 175 struct otx2_nic *pfvf; 176 void *bbuf_base; /* Bounce buffer for mbox memory */ 177 struct mutex lock; /* serialize mailbox access */ 178 int num_msgs; /* mbox number of messages */ 179 int up_num_msgs; /* mbox_up number of messages */ 180 }; 181 182 /* Egress rate limiting definitions */ 183 #define MAX_BURST_EXPONENT 0x0FULL 184 #define MAX_BURST_MANTISSA 0xFFULL 185 #define MAX_BURST_SIZE 130816ULL 186 #define MAX_RATE_DIVIDER_EXPONENT 12ULL 187 #define MAX_RATE_EXPONENT 0x0FULL 188 #define MAX_RATE_MANTISSA 0xFFULL 189 190 /* Bitfields in NIX_TLX_PIR register */ 191 #define TLX_RATE_MANTISSA GENMASK_ULL(8, 1) 192 #define TLX_RATE_EXPONENT GENMASK_ULL(12, 9) 193 #define TLX_RATE_DIVIDER_EXPONENT GENMASK_ULL(16, 13) 194 #define TLX_BURST_MANTISSA GENMASK_ULL(36, 29) 195 #define TLX_BURST_EXPONENT GENMASK_ULL(40, 37) 196 197 struct otx2_hw { 198 struct pci_dev *pdev; 199 struct otx2_rss_info rss_info; 200 u16 rx_queues; 201 u16 tx_queues; 202 u16 xdp_queues; 203 u16 tc_tx_queues; 204 u16 non_qos_queues; /* tx queues plus xdp queues */ 205 u16 max_queues; 206 u16 pool_cnt; 207 u16 rqpool_cnt; 208 u16 sqpool_cnt; 209 210 #define OTX2_DEFAULT_RBUF_LEN 2048 211 u16 rbuf_len; 212 u32 xqe_size; 213 214 /* NPA */ 215 u32 stack_pg_ptrs; /* No of ptrs per stack page */ 216 u32 stack_pg_bytes; /* Size of stack page */ 217 u16 sqb_size; 218 219 /* NIX */ 220 u8 txschq_link_cfg_lvl; 221 u8 txschq_cnt[NIX_TXSCH_LVL_CNT]; 222 u8 txschq_aggr_lvl_rr_prio; 223 u16 txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC]; 224 u16 matchall_ipolicer; 225 u32 dwrr_mtu; 226 u32 max_mtu; 227 u8 smq_link_type; 228 229 /* HW settings, coalescing etc */ 230 u16 rx_chan_base; 231 u16 tx_chan_base; 232 u8 rx_chan_cnt; 233 u8 tx_chan_cnt; 234 u16 cq_qcount_wait; 235 u16 cq_ecount_wait; 236 u16 rq_skid; 237 u8 cq_time_wait; 238 239 /* Segmentation */ 240 u8 lso_tsov4_idx; 241 u8 lso_tsov6_idx; 242 u8 lso_udpv4_idx; 243 u8 lso_udpv6_idx; 244 245 /* RSS */ 246 u8 flowkey_alg_idx; 247 248 /* MSI-X */ 249 u8 cint_cnt; /* CQ interrupt count */ 250 u16 npa_msixoff; /* Offset of NPA vectors */ 251 u16 nix_msixoff; /* Offset of NIX vectors */ 252 char *irq_name; 253 cpumask_var_t *affinity_mask; 254 struct pf_irq_data *pfvf_irq_devid[4]; 255 256 /* Stats */ 257 struct otx2_dev_stats dev_stats; 258 struct otx2_drv_stats drv_stats; 259 u64 cgx_rx_stats[CGX_RX_STATS_COUNT]; 260 u64 cgx_tx_stats[CGX_TX_STATS_COUNT]; 261 u64 cgx_fec_corr_blks; 262 u64 cgx_fec_uncorr_blks; 263 u8 cgx_links; /* No. of CGX links present in HW */ 264 u8 lbk_links; /* No. of LBK links present in HW */ 265 u8 tx_link; /* Transmit channel link number */ 266 #define HW_TSO 0 267 #define CN10K_MBOX 1 268 #define CN10K_LMTST 2 269 #define CN10K_RPM 3 270 #define CN10K_PTP_ONESTEP 4 271 #define CN10K_HW_MACSEC 5 272 #define QOS_CIR_PIR_SUPPORT 6 273 unsigned long cap_flag; 274 275 #define LMT_LINE_SIZE 128 276 #define LMT_BURST_SIZE 32 /* 32 LMTST lines for burst SQE flush */ 277 u64 *lmt_base; 278 struct otx2_lmt_info __percpu *lmt_info; 279 }; 280 281 enum vfperm { 282 OTX2_RESET_VF_PERM, 283 OTX2_TRUSTED_VF, 284 }; 285 286 struct otx2_vf_config { 287 struct otx2_nic *pf; 288 struct delayed_work link_event_work; 289 bool intf_down; /* interface was either configured or not */ 290 u8 mac[ETH_ALEN]; 291 u16 vlan; 292 int tx_vtag_idx; 293 bool trusted; 294 }; 295 296 struct flr_work { 297 struct work_struct work; 298 struct otx2_nic *pf; 299 }; 300 301 struct refill_work { 302 struct delayed_work pool_refill_work; 303 struct otx2_nic *pf; 304 struct napi_struct *napi; 305 }; 306 307 /* PTPv2 originTimestamp structure */ 308 struct ptpv2_tstamp { 309 __be16 seconds_msb; /* 16 bits + */ 310 __be32 seconds_lsb; /* 32 bits = 48 bits*/ 311 __be32 nanoseconds; 312 } __packed; 313 314 struct otx2_ptp { 315 struct ptp_clock_info ptp_info; 316 struct ptp_clock *ptp_clock; 317 struct otx2_nic *nic; 318 319 struct cyclecounter cycle_counter; 320 struct timecounter time_counter; 321 322 struct delayed_work extts_work; 323 u64 last_extts; 324 u64 thresh; 325 326 struct ptp_pin_desc extts_config; 327 u64 (*convert_rx_ptp_tstmp)(u64 timestamp); 328 u64 (*convert_tx_ptp_tstmp)(u64 timestamp); 329 u64 (*ptp_tstamp2nsec)(const struct timecounter *time_counter, u64 timestamp); 330 struct delayed_work synctstamp_work; 331 u64 tstamp; 332 u32 base_ns; 333 }; 334 335 #define OTX2_HW_TIMESTAMP_LEN 8 336 337 struct otx2_mac_table { 338 u8 addr[ETH_ALEN]; 339 u16 mcam_entry; 340 bool inuse; 341 }; 342 343 struct otx2_flow_config { 344 u16 *flow_ent; 345 u16 *def_ent; 346 u16 nr_flows; 347 #define OTX2_DEFAULT_FLOWCOUNT 16 348 #define OTX2_DEFAULT_UNICAST_FLOWS 4 349 #define OTX2_MAX_VLAN_FLOWS 1 350 #define OTX2_MAX_TC_FLOWS OTX2_DEFAULT_FLOWCOUNT 351 u16 unicast_offset; 352 u16 rx_vlan_offset; 353 u16 vf_vlan_offset; 354 #define OTX2_PER_VF_VLAN_FLOWS 2 /* Rx + Tx per VF */ 355 #define OTX2_VF_VLAN_RX_INDEX 0 356 #define OTX2_VF_VLAN_TX_INDEX 1 357 u32 *bmap_to_dmacindex; 358 unsigned long *dmacflt_bmap; 359 struct list_head flow_list; 360 u32 dmacflt_max_flows; 361 u16 max_flows; 362 refcount_t mark_flows; 363 struct list_head flow_list_tc; 364 u8 ucast_flt_cnt; 365 bool ntuple; 366 u16 ntuple_cnt; 367 }; 368 369 struct otx2_tc_flow_stats { 370 u64 bytes; 371 u64 pkts; 372 u64 used; 373 }; 374 375 struct otx2_tc_flow { 376 struct list_head list; 377 unsigned long cookie; 378 struct rcu_head rcu; 379 struct otx2_tc_flow_stats stats; 380 spinlock_t lock; /* lock for stats */ 381 u16 rq; 382 u16 entry; 383 u16 leaf_profile; 384 bool is_act_police; 385 u32 prio; 386 struct npc_install_flow_req req; 387 u64 rate; 388 u32 burst; 389 u32 mcast_grp_idx; 390 bool is_pps; 391 u8 kw_type; /* X2/X4 */ 392 }; 393 394 struct dev_hw_ops { 395 int (*sq_aq_init)(void *dev, u16 qidx, u8 chan_offset, 396 u16 sqb_aura); 397 void (*sqe_flush)(void *dev, struct otx2_snd_queue *sq, 398 int size, int qidx); 399 int (*refill_pool_ptrs)(void *dev, struct otx2_cq_queue *cq); 400 void (*aura_freeptr)(void *dev, int aura, u64 buf); 401 irqreturn_t (*pfaf_mbox_intr_handler)(int irq, void *pf_irq); 402 irqreturn_t (*vfaf_mbox_intr_handler)(int irq, void *pf_irq); 403 irqreturn_t (*pfvf_mbox_intr_handler)(int irq, void *pf_irq); 404 int (*aura_aq_init)(struct otx2_nic *pfvf, int aura_id, 405 int pool_id, int numptrs); 406 int (*pool_aq_init)(struct otx2_nic *pfvf, u16 pool_id, 407 int stack_pages, int numptrs, int buf_size, 408 int type); 409 }; 410 411 #define CN10K_MCS_SA_PER_SC 4 412 413 /* Stats which need to be accumulated in software because 414 * of shared counters in hardware. 415 */ 416 struct cn10k_txsc_stats { 417 u64 InPktsUntagged; 418 u64 InPktsNoTag; 419 u64 InPktsBadTag; 420 u64 InPktsUnknownSCI; 421 u64 InPktsNoSCI; 422 u64 InPktsOverrun; 423 }; 424 425 struct cn10k_rxsc_stats { 426 u64 InOctetsValidated; 427 u64 InOctetsDecrypted; 428 u64 InPktsUnchecked; 429 u64 InPktsDelayed; 430 u64 InPktsOK; 431 u64 InPktsInvalid; 432 u64 InPktsLate; 433 u64 InPktsNotValid; 434 u64 InPktsNotUsingSA; 435 u64 InPktsUnusedSA; 436 }; 437 438 struct cn10k_mcs_txsc { 439 struct macsec_secy *sw_secy; 440 struct cn10k_txsc_stats stats; 441 struct list_head entry; 442 enum macsec_validation_type last_validate_frames; 443 bool last_replay_protect; 444 u16 hw_secy_id_tx; 445 u16 hw_secy_id_rx; 446 u16 hw_flow_id; 447 u16 hw_sc_id; 448 u16 hw_sa_id[CN10K_MCS_SA_PER_SC]; 449 u8 sa_bmap; 450 u8 sa_key[CN10K_MCS_SA_PER_SC][MACSEC_MAX_KEY_LEN]; 451 u8 encoding_sa; 452 u8 salt[CN10K_MCS_SA_PER_SC][MACSEC_SALT_LEN]; 453 ssci_t ssci[CN10K_MCS_SA_PER_SC]; 454 bool vlan_dev; /* macsec running on VLAN ? */ 455 }; 456 457 struct cn10k_mcs_rxsc { 458 struct macsec_secy *sw_secy; 459 struct macsec_rx_sc *sw_rxsc; 460 struct cn10k_rxsc_stats stats; 461 struct list_head entry; 462 u16 hw_flow_id; 463 u16 hw_sc_id; 464 u16 hw_sa_id[CN10K_MCS_SA_PER_SC]; 465 u8 sa_bmap; 466 u8 sa_key[CN10K_MCS_SA_PER_SC][MACSEC_MAX_KEY_LEN]; 467 u8 salt[CN10K_MCS_SA_PER_SC][MACSEC_SALT_LEN]; 468 ssci_t ssci[CN10K_MCS_SA_PER_SC]; 469 }; 470 471 struct cn10k_mcs_cfg { 472 struct list_head txsc_list; 473 struct list_head rxsc_list; 474 }; 475 476 struct pf_irq_data { 477 u64 intr_status; 478 void (*pf_queue_work_hdlr)(struct mbox *mb, struct workqueue_struct *mw, 479 int first, int mdevs, u64 intr); 480 struct otx2_nic *pf; 481 int vec_num; 482 int start; 483 int mdevs; 484 }; 485 486 struct otx2_nic { 487 void __iomem *reg_base; 488 struct net_device *netdev; 489 struct dev_hw_ops *hw_ops; 490 void *iommu_domain; 491 u16 tx_max_pktlen; 492 u16 rbsize; /* Receive buffer size */ 493 494 #define OTX2_FLAG_RX_TSTAMP_ENABLED BIT_ULL(0) 495 #define OTX2_FLAG_TX_TSTAMP_ENABLED BIT_ULL(1) 496 #define OTX2_FLAG_INTF_DOWN BIT_ULL(2) 497 #define OTX2_FLAG_MCAM_ENTRIES_ALLOC BIT_ULL(3) 498 #define OTX2_FLAG_NTUPLE_SUPPORT BIT_ULL(4) 499 #define OTX2_FLAG_UCAST_FLTR_SUPPORT BIT_ULL(5) 500 #define OTX2_FLAG_RX_VLAN_SUPPORT BIT_ULL(6) 501 #define OTX2_FLAG_VF_VLAN_SUPPORT BIT_ULL(7) 502 #define OTX2_FLAG_PF_SHUTDOWN BIT_ULL(8) 503 #define OTX2_FLAG_RX_PAUSE_ENABLED BIT_ULL(9) 504 #define OTX2_FLAG_TX_PAUSE_ENABLED BIT_ULL(10) 505 #define OTX2_FLAG_TC_FLOWER_SUPPORT BIT_ULL(11) 506 #define OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED BIT_ULL(12) 507 #define OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED BIT_ULL(13) 508 #define OTX2_FLAG_DMACFLTR_SUPPORT BIT_ULL(14) 509 #define OTX2_FLAG_PTP_ONESTEP_SYNC BIT_ULL(15) 510 #define OTX2_FLAG_ADPTV_INT_COAL_ENABLED BIT_ULL(16) 511 #define OTX2_FLAG_TC_MARK_ENABLED BIT_ULL(17) 512 #define OTX2_FLAG_REP_MODE_ENABLED BIT_ULL(18) 513 #define OTX2_FLAG_PORT_UP BIT_ULL(19) 514 #define OTX2_FLAG_IPSEC_OFFLOAD_ENABLED BIT_ULL(20) 515 u64 flags; 516 u64 *cq_op_addr; 517 518 struct bpf_prog *xdp_prog; 519 struct otx2_qset qset; 520 struct otx2_hw hw; 521 struct pci_dev *pdev; 522 struct device *dev; 523 524 /* Mbox */ 525 struct mbox mbox; 526 struct mbox *mbox_pfvf; 527 struct workqueue_struct *mbox_wq; 528 struct workqueue_struct *mbox_pfvf_wq; 529 struct qmem *pfvf_mbox_addr; 530 531 u8 total_vfs; 532 u16 pcifunc; /* RVU PF_FUNC */ 533 u16 bpid[NIX_MAX_BPID_CHAN]; 534 struct otx2_vf_config *vf_configs; 535 struct cgx_link_user_info linfo; 536 537 /* NPC MCAM */ 538 struct otx2_flow_config *flow_cfg; 539 struct otx2_mac_table *mac_table; 540 541 u64 reset_count; 542 struct work_struct reset_task; 543 struct workqueue_struct *flr_wq; 544 struct flr_work *flr_wrk; 545 struct refill_work *refill_wrk; 546 struct workqueue_struct *otx2_wq; 547 struct work_struct rx_mode_work; 548 549 /* Ethtool stuff */ 550 u32 msg_enable; 551 552 /* Block address of NIX either BLKADDR_NIX0 or BLKADDR_NIX1 */ 553 int nix_blkaddr; 554 /* LMTST Lines info */ 555 struct qmem *dync_lmt; 556 u16 tot_lmt_lines; 557 u16 npa_lmt_lines; 558 u32 nix_lmt_size; 559 560 struct otx2_ptp *ptp; 561 struct kernel_hwtstamp_config tstamp; 562 563 unsigned long rq_bmap; 564 565 /* Devlink */ 566 struct otx2_devlink *dl; 567 /* PFC */ 568 u8 pfc_en; 569 #ifdef CONFIG_DCB 570 u8 *queue_to_pfc_map; 571 u16 pfc_schq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC]; 572 bool pfc_alloc_status[NIX_PF_PFC_PRIO_MAX]; 573 #endif 574 /* qos */ 575 struct otx2_qos qos; 576 577 /* napi event count. It is needed for adaptive irq coalescing. */ 578 u32 napi_events; 579 580 #if IS_ENABLED(CONFIG_MACSEC) 581 struct cn10k_mcs_cfg *macsec_cfg; 582 #endif 583 584 #if IS_ENABLED(CONFIG_RVU_ESWITCH) 585 struct rep_dev **reps; 586 int rep_cnt; 587 u16 rep_pf_map[RVU_MAX_REP]; 588 u16 esw_mode; 589 #endif 590 591 /* Inline ipsec */ 592 struct cn10k_ipsec ipsec; 593 /* af_xdp zero-copy */ 594 unsigned long *af_xdp_zc_qidx; 595 }; 596 597 static inline bool is_otx2_lbkvf(struct pci_dev *pdev) 598 { 599 return (pdev->device == PCI_DEVID_OCTEONTX2_RVU_AFVF) || 600 (pdev->device == PCI_DEVID_RVU_REP); 601 } 602 603 static inline bool is_96xx_A0(struct pci_dev *pdev) 604 { 605 return (pdev->revision == 0x00) && 606 (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX_RVU_PFVF); 607 } 608 609 static inline bool is_96xx_B0(struct pci_dev *pdev) 610 { 611 return (pdev->revision == 0x01) && 612 (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX_RVU_PFVF); 613 } 614 615 static inline bool is_otx2_sdp_rep(struct pci_dev *pdev) 616 { 617 return pdev->device == PCI_DEVID_OCTEONTX2_SDP_REP; 618 } 619 620 /* REVID for PCIe devices. 621 * Bits 0..1: minor pass, bit 3..2: major pass 622 * bits 7..4: midr id 623 */ 624 #define PCI_REVISION_ID_96XX 0x00 625 #define PCI_REVISION_ID_95XX 0x10 626 #define PCI_REVISION_ID_95XXN 0x20 627 #define PCI_REVISION_ID_98XX 0x30 628 #define PCI_REVISION_ID_95XXMM 0x40 629 #define PCI_REVISION_ID_95XXO 0xE0 630 631 static inline bool is_dev_otx2(struct pci_dev *pdev) 632 { 633 u8 midr = pdev->revision & 0xF0; 634 635 return (midr == PCI_REVISION_ID_96XX || midr == PCI_REVISION_ID_95XX || 636 midr == PCI_REVISION_ID_95XXN || midr == PCI_REVISION_ID_98XX || 637 midr == PCI_REVISION_ID_95XXMM || midr == PCI_REVISION_ID_95XXO); 638 } 639 640 static inline bool is_dev_cn10kb(struct pci_dev *pdev) 641 { 642 return pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF; 643 } 644 645 static inline bool is_dev_cn10ka_b0(struct pci_dev *pdev) 646 { 647 if (pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A_RVU_PFVF && 648 (pdev->revision & 0xFF) == 0x54) 649 return true; 650 651 return false; 652 } 653 654 static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf) 655 { 656 struct otx2_hw *hw = &pfvf->hw; 657 658 pfvf->hw.cq_time_wait = CQ_TIMER_THRESH_DEFAULT; 659 pfvf->hw.cq_ecount_wait = CQ_CQE_THRESH_DEFAULT; 660 pfvf->hw.cq_qcount_wait = CQ_QCOUNT_DEFAULT; 661 662 __set_bit(HW_TSO, &hw->cap_flag); 663 664 if (is_96xx_A0(pfvf->pdev)) { 665 __clear_bit(HW_TSO, &hw->cap_flag); 666 667 /* Time based irq coalescing is not supported */ 668 pfvf->hw.cq_qcount_wait = 0x0; 669 670 /* Due to HW issue previous silicons required minimum 671 * 600 unused CQE to avoid CQ overflow. 672 */ 673 pfvf->hw.rq_skid = 600; 674 pfvf->qset.rqe_cnt = Q_COUNT(Q_SIZE_1K); 675 } 676 if (is_96xx_B0(pfvf->pdev)) 677 __clear_bit(HW_TSO, &hw->cap_flag); 678 679 if (!is_dev_otx2(pfvf->pdev)) { 680 __set_bit(CN10K_MBOX, &hw->cap_flag); 681 __set_bit(CN10K_LMTST, &hw->cap_flag); 682 __set_bit(CN10K_RPM, &hw->cap_flag); 683 __set_bit(CN10K_PTP_ONESTEP, &hw->cap_flag); 684 __set_bit(QOS_CIR_PIR_SUPPORT, &hw->cap_flag); 685 } 686 } 687 688 /* Register read/write APIs */ 689 static inline void __iomem *otx2_get_regaddr(struct otx2_nic *nic, u64 offset) 690 { 691 u64 blkaddr; 692 693 switch ((offset >> RVU_FUNC_BLKADDR_SHIFT) & RVU_FUNC_BLKADDR_MASK) { 694 case BLKTYPE_NIX: 695 blkaddr = nic->nix_blkaddr; 696 break; 697 case BLKTYPE_NPA: 698 blkaddr = BLKADDR_NPA; 699 break; 700 case BLKTYPE_CPT: 701 blkaddr = BLKADDR_CPT0; 702 break; 703 default: 704 blkaddr = BLKADDR_RVUM; 705 break; 706 } 707 708 offset &= ~(RVU_FUNC_BLKADDR_MASK << RVU_FUNC_BLKADDR_SHIFT); 709 offset |= (blkaddr << RVU_FUNC_BLKADDR_SHIFT); 710 711 return nic->reg_base + offset; 712 } 713 714 static inline void otx2_write64(struct otx2_nic *nic, u64 offset, u64 val) 715 { 716 void __iomem *addr = otx2_get_regaddr(nic, offset); 717 718 writeq(val, addr); 719 } 720 721 static inline u64 otx2_read64(struct otx2_nic *nic, u64 offset) 722 { 723 void __iomem *addr = otx2_get_regaddr(nic, offset); 724 725 return readq(addr); 726 } 727 728 /* Mbox bounce buffer APIs */ 729 static inline int otx2_mbox_bbuf_init(struct mbox *mbox, struct pci_dev *pdev) 730 { 731 struct otx2_mbox *otx2_mbox; 732 struct otx2_mbox_dev *mdev; 733 734 mbox->bbuf_base = devm_kmalloc(&pdev->dev, MBOX_SIZE, GFP_KERNEL); 735 if (!mbox->bbuf_base) 736 return -ENOMEM; 737 738 /* Overwrite mbox mbase to point to bounce buffer, so that PF/VF 739 * prepare all mbox messages in bounce buffer instead of directly 740 * in hw mbox memory. 741 */ 742 otx2_mbox = &mbox->mbox; 743 mdev = &otx2_mbox->dev[0]; 744 mdev->mbase = mbox->bbuf_base; 745 746 otx2_mbox = &mbox->mbox_up; 747 mdev = &otx2_mbox->dev[0]; 748 mdev->mbase = mbox->bbuf_base; 749 return 0; 750 } 751 752 static inline void otx2_sync_mbox_bbuf(struct otx2_mbox *mbox, int devid) 753 { 754 u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN); 755 void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE); 756 struct otx2_mbox_dev *mdev = &mbox->dev[devid]; 757 struct mbox_hdr *hdr; 758 u64 msg_size; 759 760 if (mdev->mbase == hw_mbase) 761 return; 762 763 hdr = hw_mbase + mbox->rx_start; 764 msg_size = hdr->msg_size; 765 766 if (msg_size > mbox->rx_size - msgs_offset) 767 msg_size = mbox->rx_size - msgs_offset; 768 769 /* Copy mbox messages from mbox memory to bounce buffer */ 770 memcpy(mdev->mbase + mbox->rx_start, 771 hw_mbase + mbox->rx_start, msg_size + msgs_offset); 772 } 773 774 /* With the absence of API for 128-bit IO memory access for arm64, 775 * implement required operations at place. 776 */ 777 #if defined(CONFIG_ARM64) 778 static inline void otx2_write128(u64 lo, u64 hi, void __iomem *addr) 779 { 780 __asm__ volatile("stp %x[x0], %x[x1], [%x[p1],#0]!" 781 ::[x0]"r"(lo), [x1]"r"(hi), [p1]"r"(addr)); 782 } 783 784 static inline u64 otx2_atomic64_add(u64 incr, void __iomem *addr) 785 { 786 u64 __iomem *ptr = addr; 787 u64 result; 788 789 __asm__ volatile(".cpu generic+lse\n" 790 "ldadd %x[i], %x[r], [%[b]]" 791 : [r]"=r"(result), "+m"(*ptr) 792 : [i]"r"(incr), [b]"r"(ptr) 793 : "memory"); 794 return result; 795 } 796 797 #else 798 #define otx2_write128(lo, hi, addr) writeq((hi) | (lo), addr) 799 800 static inline u64 otx2_atomic64_add(u64 incr, void __iomem *addr) 801 { 802 return 0; 803 } 804 #endif 805 806 static inline void __cn10k_aura_freeptr(struct otx2_nic *pfvf, u64 aura, 807 u64 *ptrs, u64 num_ptrs) 808 { 809 struct otx2_lmt_info *lmt_info; 810 u64 size = 0, count_eot = 0; 811 u64 tar_addr, val = 0; 812 813 lmt_info = per_cpu_ptr(pfvf->hw.lmt_info, smp_processor_id()); 814 tar_addr = (__force u64)otx2_get_regaddr(pfvf, NPA_LF_AURA_BATCH_FREE0); 815 /* LMTID is same as AURA Id */ 816 val = (lmt_info->lmt_id & 0x7FF) | BIT_ULL(63); 817 /* Meaning of count_eot 818 * CN10K: count_eot = 0 if the number of pointers to free is even, 819 * count_eot = 1 if the number of pointers to free is odd. 820 * 821 * CN20K: count_eot represents the least significant 2 bits of the 822 * total number of valid pointers to free. 823 * Example: if 7 pointers are freed (0b111), count_eot = 0b11. 824 */ 825 count_eot = (num_ptrs - 1) & 0x3ULL; 826 /* Set AURA ID to free pointer */ 827 ptrs[0] = (count_eot << 32) | (aura & 0xFFFFF); 828 /* Target address for LMTST flush tells HW how many 128bit 829 * words are valid from NPA_LF_AURA_BATCH_FREE0. 830 * 831 * tar_addr[6:4] is LMTST size-1 in units of 128b. 832 */ 833 if (num_ptrs > 2) { 834 size = (sizeof(u64) * num_ptrs) / 16; 835 if (!(count_eot & 1)) 836 size++; 837 tar_addr |= ((size - 1) & 0x7) << 4; 838 } 839 dma_wmb(); 840 memcpy((u64 *)lmt_info->lmt_addr, ptrs, sizeof(u64) * num_ptrs); 841 /* Perform LMTST flush */ 842 cn10k_lmt_flush(val, tar_addr); 843 } 844 845 static inline void cn10k_aura_freeptr(void *dev, int aura, u64 buf) 846 { 847 struct otx2_nic *pfvf = dev; 848 u64 ptrs[2]; 849 850 ptrs[1] = buf; 851 get_cpu(); 852 /* Free only one buffer at time during init and teardown */ 853 __cn10k_aura_freeptr(pfvf, aura, ptrs, 2); 854 put_cpu(); 855 } 856 857 /* Alloc pointer from pool/aura */ 858 static inline u64 otx2_aura_allocptr(struct otx2_nic *pfvf, int aura) 859 { 860 void __iomem *ptr = otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_ALLOCX(0)); 861 u64 incr = (u64)aura | BIT_ULL(63); 862 863 return otx2_atomic64_add(incr, ptr); 864 } 865 866 /* Free pointer to a pool/aura */ 867 static inline void otx2_aura_freeptr(void *dev, int aura, u64 buf) 868 { 869 struct otx2_nic *pfvf = dev; 870 void __iomem *addr = otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_FREE0); 871 872 otx2_write128(buf, (u64)aura | BIT_ULL(63), addr); 873 } 874 875 static inline int otx2_get_pool_idx(struct otx2_nic *pfvf, int type, int idx) 876 { 877 if (type == AURA_NIX_SQ) 878 return pfvf->hw.rqpool_cnt + idx; 879 880 /* AURA_NIX_RQ */ 881 return idx; 882 } 883 884 /* Mbox APIs */ 885 static inline int otx2_sync_mbox_msg(struct mbox *mbox) 886 { 887 int err; 888 889 if (!otx2_mbox_nonempty(&mbox->mbox, 0)) 890 return 0; 891 otx2_mbox_msg_send(&mbox->mbox, 0); 892 err = otx2_mbox_wait_for_rsp(&mbox->mbox, 0); 893 if (err) 894 return err; 895 896 return otx2_mbox_check_rsp_msgs(&mbox->mbox, 0); 897 } 898 899 static inline int otx2_sync_mbox_up_msg(struct mbox *mbox, int devid) 900 { 901 int err; 902 903 if (!otx2_mbox_nonempty(&mbox->mbox_up, devid)) 904 return 0; 905 otx2_mbox_msg_send_up(&mbox->mbox_up, devid); 906 err = otx2_mbox_wait_for_rsp(&mbox->mbox_up, devid); 907 if (err) 908 return err; 909 910 return otx2_mbox_check_rsp_msgs(&mbox->mbox_up, devid); 911 } 912 913 /* Use this API to send mbox msgs in atomic context 914 * where sleeping is not allowed 915 */ 916 static inline int otx2_sync_mbox_msg_busy_poll(struct mbox *mbox) 917 { 918 int err; 919 920 if (!otx2_mbox_nonempty(&mbox->mbox, 0)) 921 return 0; 922 otx2_mbox_msg_send(&mbox->mbox, 0); 923 err = otx2_mbox_busy_poll_for_rsp(&mbox->mbox, 0); 924 if (err) 925 return err; 926 927 return otx2_mbox_check_rsp_msgs(&mbox->mbox, 0); 928 } 929 930 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \ 931 static struct _req_type __maybe_unused \ 932 *otx2_mbox_alloc_msg_ ## _fn_name(struct mbox *mbox) \ 933 { \ 934 struct _req_type *req; \ 935 u16 pcifunc = mbox->pfvf->pcifunc; \ 936 \ 937 req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \ 938 &mbox->mbox, 0, sizeof(struct _req_type), \ 939 sizeof(struct _rsp_type)); \ 940 if (!req) \ 941 return NULL; \ 942 req->hdr.sig = OTX2_MBOX_REQ_SIG; \ 943 req->hdr.id = _id; \ 944 req->hdr.pcifunc = pcifunc; \ 945 trace_otx2_msg_alloc(mbox->mbox.pdev, _id, sizeof(*req), pcifunc); \ 946 return req; \ 947 } 948 949 MBOX_MESSAGES 950 #undef M 951 952 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \ 953 int \ 954 otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \ 955 struct _req_type *req, \ 956 struct _rsp_type *rsp); \ 957 958 MBOX_UP_CGX_MESSAGES 959 MBOX_UP_MCS_MESSAGES 960 #undef M 961 962 /* Time to wait before watchdog kicks off */ 963 #define OTX2_TX_TIMEOUT (100 * HZ) 964 965 static inline bool is_otx2_vf(u16 pcifunc) 966 { 967 return !!(pcifunc & RVU_PFVF_FUNC_MASK); 968 } 969 970 static inline dma_addr_t otx2_dma_map_page(struct otx2_nic *pfvf, 971 struct page *page, 972 size_t offset, size_t size, 973 enum dma_data_direction dir) 974 { 975 return dma_map_page_attrs(pfvf->dev, page, 976 offset, size, dir, DMA_ATTR_SKIP_CPU_SYNC); 977 } 978 979 static inline void otx2_dma_unmap_page(struct otx2_nic *pfvf, 980 dma_addr_t addr, size_t size, 981 enum dma_data_direction dir) 982 { 983 dma_unmap_page_attrs(pfvf->dev, addr, size, 984 dir, DMA_ATTR_SKIP_CPU_SYNC); 985 } 986 987 static inline u16 otx2_get_smq_idx(struct otx2_nic *pfvf, u16 qidx) 988 { 989 u16 smq; 990 int idx; 991 992 #ifdef CONFIG_DCB 993 if (qidx < NIX_PF_PFC_PRIO_MAX && pfvf->pfc_alloc_status[qidx]) 994 return pfvf->pfc_schq_list[NIX_TXSCH_LVL_SMQ][qidx]; 995 #endif 996 /* check if qidx falls under QOS queues */ 997 if (qidx >= pfvf->hw.non_qos_queues) { 998 smq = pfvf->qos.qid_to_sqmap[qidx - pfvf->hw.non_qos_queues]; 999 } else { 1000 idx = qidx % pfvf->hw.txschq_cnt[NIX_TXSCH_LVL_SMQ]; 1001 smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][idx]; 1002 } 1003 1004 return smq; 1005 } 1006 1007 static inline u16 otx2_get_total_tx_queues(struct otx2_nic *pfvf) 1008 { 1009 return pfvf->hw.non_qos_queues + pfvf->hw.tc_tx_queues; 1010 } 1011 1012 static inline u64 otx2_convert_rate(u64 rate) 1013 { 1014 u64 converted_rate; 1015 1016 /* Convert bytes per second to Mbps */ 1017 converted_rate = rate * 8; 1018 converted_rate = max_t(u64, converted_rate / 1000000, 1); 1019 1020 return converted_rate; 1021 } 1022 1023 static inline int otx2_tc_flower_rule_cnt(struct otx2_nic *pfvf) 1024 { 1025 /* return here if MCAM entries not allocated */ 1026 if (!pfvf->flow_cfg) 1027 return 0; 1028 1029 return pfvf->flow_cfg->nr_flows; 1030 } 1031 1032 /* MSI-X APIs */ 1033 void otx2_free_cints(struct otx2_nic *pfvf, int n); 1034 void otx2_set_cints_affinity(struct otx2_nic *pfvf); 1035 int otx2_set_mac_address(struct net_device *netdev, void *p); 1036 int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu); 1037 void otx2_tx_timeout(struct net_device *netdev, unsigned int txq); 1038 void otx2_get_mac_from_af(struct net_device *netdev); 1039 void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx); 1040 int otx2_config_pause_frm(struct otx2_nic *pfvf); 1041 void otx2_setup_segmentation(struct otx2_nic *pfvf); 1042 int otx2_reset_mac_stats(struct otx2_nic *pfvf); 1043 1044 /* RVU block related APIs */ 1045 int otx2_attach_npa_nix(struct otx2_nic *pfvf); 1046 int otx2_detach_resources(struct mbox *mbox); 1047 int otx2_config_npa(struct otx2_nic *pfvf); 1048 int otx2_sq_aura_pool_init(struct otx2_nic *pfvf); 1049 int otx2_rq_aura_pool_init(struct otx2_nic *pfvf); 1050 void otx2_aura_pool_free(struct otx2_nic *pfvf); 1051 void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type); 1052 void otx2_sq_free_sqbs(struct otx2_nic *pfvf); 1053 int otx2_config_nix(struct otx2_nic *pfvf); 1054 int otx2_config_nix_queues(struct otx2_nic *pfvf); 1055 int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool pfc_en); 1056 int otx2_txsch_alloc(struct otx2_nic *pfvf); 1057 void otx2_txschq_stop(struct otx2_nic *pfvf); 1058 void otx2_txschq_free_one(struct otx2_nic *pfvf, u16 lvl, u16 schq); 1059 void otx2_free_pending_sqe(struct otx2_nic *pfvf); 1060 void otx2_sqb_flush(struct otx2_nic *pfvf); 1061 int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool, 1062 dma_addr_t *dma, int qidx, int idx); 1063 int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable); 1064 void otx2_ctx_disable(struct mbox *mbox, int type, bool npa); 1065 int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable); 1066 int otx2_nix_cpt_config_bp(struct otx2_nic *pfvf, bool enable); 1067 void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int qidx); 1068 void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq); 1069 int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura); 1070 int otx2_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura); 1071 int cn10k_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura); 1072 int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, 1073 dma_addr_t *dma); 1074 int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id, 1075 int stack_pages, int numptrs, int buf_size, int type); 1076 int otx2_aura_init(struct otx2_nic *pfvf, int aura_id, 1077 int pool_id, int numptrs); 1078 int otx2_init_rsrc(struct pci_dev *pdev, struct otx2_nic *pf); 1079 void otx2_free_queue_mem(struct otx2_qset *qset); 1080 int otx2_alloc_queue_mem(struct otx2_nic *pf); 1081 int otx2_init_hw_resources(struct otx2_nic *pfvf); 1082 void otx2_free_hw_resources(struct otx2_nic *pf); 1083 int otx2_wq_init(struct otx2_nic *pf); 1084 int otx2_check_pf_usable(struct otx2_nic *pf); 1085 int otx2_pfaf_mbox_init(struct otx2_nic *pf); 1086 int otx2_register_mbox_intr(struct otx2_nic *pf, bool probe_af); 1087 int otx2_realloc_msix_vectors(struct otx2_nic *pf); 1088 void otx2_pfaf_mbox_destroy(struct otx2_nic *pf); 1089 void otx2_disable_mbox_intr(struct otx2_nic *pf); 1090 void otx2_disable_napi(struct otx2_nic *pf); 1091 irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq); 1092 int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura); 1093 int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx); 1094 int otx2_set_hw_capabilities(struct otx2_nic *pfvf); 1095 int otx2_aura_aq_init(struct otx2_nic *pfvf, int aura_id, 1096 int pool_id, int numptrs); 1097 int otx2_pool_aq_init(struct otx2_nic *pfvf, u16 pool_id, 1098 int stack_pages, int numptrs, int buf_size, int type); 1099 1100 /* RSS configuration APIs*/ 1101 int otx2_rss_init(struct otx2_nic *pfvf); 1102 int otx2_set_flowkey_cfg(struct otx2_nic *pfvf); 1103 void otx2_set_rss_key(struct otx2_nic *pfvf); 1104 int otx2_set_rss_table(struct otx2_nic *pfvf, int ctx_id, const u32 *ind_tbl); 1105 1106 /* Mbox handlers */ 1107 void mbox_handler_msix_offset(struct otx2_nic *pfvf, 1108 struct msix_offset_rsp *rsp); 1109 void mbox_handler_npa_lf_alloc(struct otx2_nic *pfvf, 1110 struct npa_lf_alloc_rsp *rsp); 1111 void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf, 1112 struct nix_lf_alloc_rsp *rsp); 1113 void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf, 1114 struct nix_txsch_alloc_rsp *rsp); 1115 void mbox_handler_cgx_stats(struct otx2_nic *pfvf, 1116 struct cgx_stats_rsp *rsp); 1117 void mbox_handler_cgx_fec_stats(struct otx2_nic *pfvf, 1118 struct cgx_fec_stats_rsp *rsp); 1119 void otx2_set_fec_stats_count(struct otx2_nic *pfvf); 1120 void mbox_handler_nix_bp_enable(struct otx2_nic *pfvf, 1121 struct nix_bp_cfg_rsp *rsp); 1122 1123 /* Device stats APIs */ 1124 void otx2_get_dev_stats(struct otx2_nic *pfvf); 1125 void otx2_get_stats64(struct net_device *netdev, 1126 struct rtnl_link_stats64 *stats); 1127 void otx2_update_lmac_stats(struct otx2_nic *pfvf); 1128 void otx2_update_lmac_fec_stats(struct otx2_nic *pfvf); 1129 int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx); 1130 int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx); 1131 void otx2_set_ethtool_ops(struct net_device *netdev); 1132 void otx2vf_set_ethtool_ops(struct net_device *netdev); 1133 1134 int otx2_open(struct net_device *netdev); 1135 int otx2_stop(struct net_device *netdev); 1136 int otx2_set_real_num_queues(struct net_device *netdev, 1137 int tx_queues, int rx_queues); 1138 int otx2_config_hwtstamp_get(struct net_device *netdev, 1139 struct kernel_hwtstamp_config *config); 1140 int otx2_config_hwtstamp_set(struct net_device *netdev, 1141 struct kernel_hwtstamp_config *config, 1142 struct netlink_ext_ack *extack); 1143 1144 /* MCAM filter related APIs */ 1145 int otx2_mcam_flow_init(struct otx2_nic *pf); 1146 int otx2vf_mcam_flow_init(struct otx2_nic *pfvf); 1147 int otx2_alloc_mcam_entries(struct otx2_nic *pfvf, u16 count); 1148 void otx2_mcam_flow_del(struct otx2_nic *pf); 1149 int otx2_destroy_ntuple_flows(struct otx2_nic *pf); 1150 int otx2_destroy_mcam_flows(struct otx2_nic *pfvf); 1151 int otx2_get_flow(struct otx2_nic *pfvf, 1152 struct ethtool_rxnfc *nfc, u32 location); 1153 int otx2_get_all_flows(struct otx2_nic *pfvf, 1154 struct ethtool_rxnfc *nfc, u32 *rule_locs); 1155 int otx2_add_flow(struct otx2_nic *pfvf, 1156 struct ethtool_rxnfc *nfc); 1157 int otx2_remove_flow(struct otx2_nic *pfvf, u32 location); 1158 int otx2_get_maxflows(struct otx2_flow_config *flow_cfg); 1159 void otx2_rss_ctx_flow_del(struct otx2_nic *pfvf, int ctx_id); 1160 int otx2_del_macfilter(struct net_device *netdev, const u8 *mac); 1161 int otx2_add_macfilter(struct net_device *netdev, const u8 *mac); 1162 int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable); 1163 int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf); 1164 bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, struct xdp_frame *xdpf, 1165 u64 iova, int len, u16 qidx, u16 flags); 1166 void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, struct xdp_frame *xdpf, 1167 u64 dma_addr, int len, int *offset, u16 flags); 1168 u16 otx2_get_max_mtu(struct otx2_nic *pfvf); 1169 int otx2_handle_ntuple_tc_features(struct net_device *netdev, 1170 netdev_features_t features); 1171 int otx2_smq_flush(struct otx2_nic *pfvf, int smq); 1172 void otx2_free_bufs(struct otx2_nic *pfvf, struct otx2_pool *pool, 1173 u64 iova, int size); 1174 int otx2_mcam_entry_init(struct otx2_nic *pfvf); 1175 1176 /* tc support */ 1177 int otx2_init_tc(struct otx2_nic *nic); 1178 void otx2_shutdown_tc(struct otx2_nic *nic); 1179 int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type, 1180 void *type_data); 1181 void otx2_tc_apply_ingress_police_rules(struct otx2_nic *nic); 1182 1183 /* CGX/RPM DMAC filters support */ 1184 int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf); 1185 int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u32 bit_pos); 1186 int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac, u32 bit_pos); 1187 int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u32 bit_pos); 1188 void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf); 1189 void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf); 1190 1191 #ifdef CONFIG_DCB 1192 /* DCB support*/ 1193 void otx2_update_bpid_in_rqctx(struct otx2_nic *pfvf, int vlan_prio, int qidx, bool pfc_enable); 1194 int otx2_config_priority_flow_ctrl(struct otx2_nic *pfvf); 1195 int otx2_dcbnl_set_ops(struct net_device *dev); 1196 /* PFC support */ 1197 int otx2_pfc_txschq_config(struct otx2_nic *pfvf); 1198 int otx2_pfc_txschq_alloc(struct otx2_nic *pfvf); 1199 int otx2_pfc_txschq_update(struct otx2_nic *pfvf); 1200 int otx2_pfc_txschq_stop(struct otx2_nic *pfvf); 1201 #endif 1202 1203 #if IS_ENABLED(CONFIG_MACSEC) 1204 /* MACSEC offload support */ 1205 int cn10k_mcs_init(struct otx2_nic *pfvf); 1206 void cn10k_mcs_free(struct otx2_nic *pfvf); 1207 void cn10k_handle_mcs_event(struct otx2_nic *pfvf, struct mcs_intr_info *event); 1208 #else 1209 static inline int cn10k_mcs_init(struct otx2_nic *pfvf) { return 0; } 1210 static inline void cn10k_mcs_free(struct otx2_nic *pfvf) {} 1211 static inline void cn10k_handle_mcs_event(struct otx2_nic *pfvf, 1212 struct mcs_intr_info *event) 1213 {} 1214 #endif /* CONFIG_MACSEC */ 1215 1216 /* qos support */ 1217 static inline void otx2_qos_init(struct otx2_nic *pfvf, int qos_txqs) 1218 { 1219 struct otx2_hw *hw = &pfvf->hw; 1220 1221 hw->tc_tx_queues = qos_txqs; 1222 INIT_LIST_HEAD(&pfvf->qos.qos_tree); 1223 mutex_init(&pfvf->qos.qos_lock); 1224 } 1225 1226 static inline void otx2_shutdown_qos(struct otx2_nic *pfvf) 1227 { 1228 mutex_destroy(&pfvf->qos.qos_lock); 1229 } 1230 1231 u16 otx2_select_queue(struct net_device *netdev, struct sk_buff *skb, 1232 struct net_device *sb_dev); 1233 int otx2_get_txq_by_classid(struct otx2_nic *pfvf, u16 classid); 1234 void otx2_qos_config_txschq(struct otx2_nic *pfvf); 1235 void otx2_clean_qos_queues(struct otx2_nic *pfvf); 1236 int rvu_event_up_notify(struct otx2_nic *pf, struct rep_event *info); 1237 int otx2_setup_tc_cls_flower(struct otx2_nic *nic, 1238 struct flow_cls_offload *cls_flower); 1239 1240 static inline int mcam_entry_cmp(const void *a, const void *b) 1241 { 1242 return *(u16 *)a - *(u16 *)b; 1243 } 1244 1245 dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf, 1246 struct sk_buff *skb, int seg, int *len); 1247 void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg); 1248 int otx2_read_free_sqe(struct otx2_nic *pfvf, u16 qidx); 1249 void otx2_queue_vf_work(struct mbox *mw, struct workqueue_struct *mbox_wq, 1250 int first, int mdevs, u64 intr); 1251 int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry, 1252 u16 *cntr_val); 1253 int otx2_add_mcam_flow_entry(struct otx2_nic *nic, 1254 struct npc_install_flow_req *req); 1255 int otx2_tc_add_to_flow_list(struct otx2_flow_config *flow_cfg, 1256 struct otx2_tc_flow *node); 1257 1258 struct otx2_tc_flow * 1259 otx2_tc_get_entry_by_index(struct otx2_flow_config *flow_cfg, 1260 int index); 1261 #endif /* OTX2_COMMON_H */ 1262