1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell RVU Admin Function driver 3 * 4 * Copyright (C) 2018 Marvell. 5 * 6 */ 7 8 #include <linux/module.h> 9 #include <linux/pci.h> 10 11 #include "rvu_struct.h" 12 #include "rvu_reg.h" 13 #include "rvu.h" 14 #include "npc.h" 15 #include "mcs.h" 16 #include "cgx.h" 17 #include "lmac_common.h" 18 #include "rvu_npc_hash.h" 19 20 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc); 21 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, 22 int type, int chan_id); 23 static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc, 24 int type, bool add); 25 static int nix_setup_ipolicers(struct rvu *rvu, 26 struct nix_hw *nix_hw, int blkaddr); 27 static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw); 28 static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req, 29 struct nix_hw *nix_hw, u16 pcifunc); 30 static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc); 31 static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw, 32 u32 leaf_prof); 33 static const char *nix_get_ctx_name(int ctype); 34 static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc); 35 36 enum mc_tbl_sz { 37 MC_TBL_SZ_256, 38 MC_TBL_SZ_512, 39 MC_TBL_SZ_1K, 40 MC_TBL_SZ_2K, 41 MC_TBL_SZ_4K, 42 MC_TBL_SZ_8K, 43 MC_TBL_SZ_16K, 44 MC_TBL_SZ_32K, 45 MC_TBL_SZ_64K, 46 }; 47 48 enum mc_buf_cnt { 49 MC_BUF_CNT_8, 50 MC_BUF_CNT_16, 51 MC_BUF_CNT_32, 52 MC_BUF_CNT_64, 53 MC_BUF_CNT_128, 54 MC_BUF_CNT_256, 55 MC_BUF_CNT_512, 56 MC_BUF_CNT_1024, 57 MC_BUF_CNT_2048, 58 }; 59 60 enum nix_makr_fmt_indexes { 61 NIX_MARK_CFG_IP_DSCP_RED, 62 NIX_MARK_CFG_IP_DSCP_YELLOW, 63 NIX_MARK_CFG_IP_DSCP_YELLOW_RED, 64 NIX_MARK_CFG_IP_ECN_RED, 65 NIX_MARK_CFG_IP_ECN_YELLOW, 66 NIX_MARK_CFG_IP_ECN_YELLOW_RED, 67 NIX_MARK_CFG_VLAN_DEI_RED, 68 NIX_MARK_CFG_VLAN_DEI_YELLOW, 69 NIX_MARK_CFG_VLAN_DEI_YELLOW_RED, 70 NIX_MARK_CFG_MAX, 71 }; 72 73 /* For now considering MC resources needed for broadcast 74 * pkt replication only. i.e 256 HWVFs + 12 PFs. 75 */ 76 #define MC_TBL_SIZE MC_TBL_SZ_2K 77 #define MC_BUF_CNT MC_BUF_CNT_1024 78 79 #define MC_TX_MAX 2048 80 81 struct mce { 82 struct hlist_node node; 83 u32 rq_rss_index; 84 u16 pcifunc; 85 u16 channel; 86 u8 dest_type; 87 u8 is_active; 88 u8 reserved[2]; 89 }; 90 91 int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr) 92 { 93 int i = 0; 94 95 /*If blkaddr is 0, return the first nix block address*/ 96 if (blkaddr == 0) 97 return rvu->nix_blkaddr[blkaddr]; 98 99 while (i + 1 < MAX_NIX_BLKS) { 100 if (rvu->nix_blkaddr[i] == blkaddr) 101 return rvu->nix_blkaddr[i + 1]; 102 i++; 103 } 104 105 return 0; 106 } 107 108 bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc) 109 { 110 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 111 int blkaddr; 112 113 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 114 if (!pfvf->nixlf || blkaddr < 0) 115 return false; 116 return true; 117 } 118 119 int rvu_get_nixlf_count(struct rvu *rvu) 120 { 121 int blkaddr = 0, max = 0; 122 struct rvu_block *block; 123 124 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 125 while (blkaddr) { 126 block = &rvu->hw->block[blkaddr]; 127 max += block->lf.max; 128 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 129 } 130 return max; 131 } 132 133 int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr) 134 { 135 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 136 struct rvu_hwinfo *hw = rvu->hw; 137 int blkaddr; 138 139 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 140 if (!pfvf->nixlf || blkaddr < 0) 141 return NIX_AF_ERR_AF_LF_INVALID; 142 143 *nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); 144 if (*nixlf < 0) 145 return NIX_AF_ERR_AF_LF_INVALID; 146 147 if (nix_blkaddr) 148 *nix_blkaddr = blkaddr; 149 150 return 0; 151 } 152 153 int nix_get_struct_ptrs(struct rvu *rvu, u16 pcifunc, 154 struct nix_hw **nix_hw, int *blkaddr) 155 { 156 struct rvu_pfvf *pfvf; 157 158 pfvf = rvu_get_pfvf(rvu, pcifunc); 159 *blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 160 if (!pfvf->nixlf || *blkaddr < 0) 161 return NIX_AF_ERR_AF_LF_INVALID; 162 163 *nix_hw = get_nix_hw(rvu->hw, *blkaddr); 164 if (!*nix_hw) 165 return NIX_AF_ERR_INVALID_NIXBLK; 166 return 0; 167 } 168 169 static void nix_mce_list_init(struct nix_mce_list *list, int max) 170 { 171 INIT_HLIST_HEAD(&list->head); 172 list->count = 0; 173 list->max = max; 174 } 175 176 static int nix_alloc_mce_list(struct nix_mcast *mcast, int count, u8 dir) 177 { 178 struct rsrc_bmap *mce_counter; 179 int idx; 180 181 if (!mcast) 182 return -EINVAL; 183 184 mce_counter = &mcast->mce_counter[dir]; 185 if (!rvu_rsrc_check_contig(mce_counter, count)) 186 return -ENOSPC; 187 188 idx = rvu_alloc_rsrc_contig(mce_counter, count); 189 return idx; 190 } 191 192 static void nix_free_mce_list(struct nix_mcast *mcast, int count, int start, u8 dir) 193 { 194 struct rsrc_bmap *mce_counter; 195 196 if (!mcast) 197 return; 198 199 mce_counter = &mcast->mce_counter[dir]; 200 rvu_free_rsrc_contig(mce_counter, count, start); 201 } 202 203 struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr) 204 { 205 int nix_blkaddr = 0, i = 0; 206 struct rvu *rvu = hw->rvu; 207 208 nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr); 209 while (nix_blkaddr) { 210 if (blkaddr == nix_blkaddr && hw->nix) 211 return &hw->nix[i]; 212 nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr); 213 i++; 214 } 215 return NULL; 216 } 217 218 int nix_get_dwrr_mtu_reg(struct rvu_hwinfo *hw, int smq_link_type) 219 { 220 if (hw->cap.nix_multiple_dwrr_mtu) 221 return NIX_AF_DWRR_MTUX(smq_link_type); 222 223 if (smq_link_type == SMQ_LINK_TYPE_SDP) 224 return NIX_AF_DWRR_SDP_MTU; 225 226 /* Here it's same reg for RPM and LBK */ 227 return NIX_AF_DWRR_RPM_MTU; 228 } 229 230 u32 convert_dwrr_mtu_to_bytes(u8 dwrr_mtu) 231 { 232 dwrr_mtu &= 0x1FULL; 233 234 /* MTU used for DWRR calculation is in power of 2 up until 64K bytes. 235 * Value of 4 is reserved for MTU value of 9728 bytes. 236 * Value of 5 is reserved for MTU value of 10240 bytes. 237 */ 238 switch (dwrr_mtu) { 239 case 4: 240 return 9728; 241 case 5: 242 return 10240; 243 default: 244 return BIT_ULL(dwrr_mtu); 245 } 246 247 return 0; 248 } 249 250 u32 convert_bytes_to_dwrr_mtu(u32 bytes) 251 { 252 /* MTU used for DWRR calculation is in power of 2 up until 64K bytes. 253 * Value of 4 is reserved for MTU value of 9728 bytes. 254 * Value of 5 is reserved for MTU value of 10240 bytes. 255 */ 256 if (bytes > BIT_ULL(16)) 257 return 0; 258 259 switch (bytes) { 260 case 9728: 261 return 4; 262 case 10240: 263 return 5; 264 default: 265 return ilog2(bytes); 266 } 267 268 return 0; 269 } 270 271 static void nix_rx_sync(struct rvu *rvu, int blkaddr) 272 { 273 int err; 274 275 /* Sync all in flight RX packets to LLC/DRAM */ 276 rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0)); 277 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true); 278 if (err) 279 dev_err(rvu->dev, "SYNC1: NIX RX software sync failed\n"); 280 281 /* SW_SYNC ensures all existing transactions are finished and pkts 282 * are written to LLC/DRAM, queues should be teared down after 283 * successful SW_SYNC. Due to a HW errata, in some rare scenarios 284 * an existing transaction might end after SW_SYNC operation. To 285 * ensure operation is fully done, do the SW_SYNC twice. 286 */ 287 rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0)); 288 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true); 289 if (err) 290 dev_err(rvu->dev, "SYNC2: NIX RX software sync failed\n"); 291 } 292 293 static bool is_valid_txschq(struct rvu *rvu, int blkaddr, 294 int lvl, u16 pcifunc, u16 schq) 295 { 296 struct rvu_hwinfo *hw = rvu->hw; 297 struct nix_txsch *txsch; 298 struct nix_hw *nix_hw; 299 u16 map_func; 300 301 nix_hw = get_nix_hw(rvu->hw, blkaddr); 302 if (!nix_hw) 303 return false; 304 305 txsch = &nix_hw->txsch[lvl]; 306 /* Check out of bounds */ 307 if (schq >= txsch->schq.max) 308 return false; 309 310 mutex_lock(&rvu->rsrc_lock); 311 map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]); 312 mutex_unlock(&rvu->rsrc_lock); 313 314 /* TLs aggegating traffic are shared across PF and VFs */ 315 if (lvl >= hw->cap.nix_tx_aggr_lvl) { 316 if ((nix_get_tx_link(rvu, map_func) != 317 nix_get_tx_link(rvu, pcifunc)) && 318 (rvu_get_pf(rvu->pdev, map_func) != 319 rvu_get_pf(rvu->pdev, pcifunc))) 320 return false; 321 else 322 return true; 323 } 324 325 if (map_func != pcifunc) 326 return false; 327 328 return true; 329 } 330 331 static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf, 332 struct nix_lf_alloc_rsp *rsp, bool loop) 333 { 334 struct rvu_pfvf *parent_pf, *pfvf = rvu_get_pfvf(rvu, pcifunc); 335 u16 req_chan_base, req_chan_end, req_chan_cnt; 336 struct rvu_hwinfo *hw = rvu->hw; 337 struct sdp_node_info *sdp_info; 338 int pkind, pf, vf, lbkid, vfid; 339 u8 cgx_id, lmac_id; 340 bool from_vf; 341 int err; 342 343 pf = rvu_get_pf(rvu->pdev, pcifunc); 344 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK && 345 type != NIX_INTF_TYPE_SDP) 346 return 0; 347 348 switch (type) { 349 case NIX_INTF_TYPE_CGX: 350 pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf]; 351 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id); 352 353 pkind = rvu_npc_get_pkind(rvu, pf); 354 if (pkind < 0) { 355 dev_err(rvu->dev, 356 "PF_Func 0x%x: Invalid pkind\n", pcifunc); 357 return -EINVAL; 358 } 359 pfvf->rx_chan_base = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0); 360 pfvf->tx_chan_base = pfvf->rx_chan_base; 361 pfvf->rx_chan_cnt = 1; 362 pfvf->tx_chan_cnt = 1; 363 rsp->tx_link = cgx_id * hw->lmac_per_cgx + lmac_id; 364 365 cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind); 366 rvu_npc_set_pkind(rvu, pkind, pfvf); 367 break; 368 case NIX_INTF_TYPE_LBK: 369 vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1; 370 371 /* If NIX1 block is present on the silicon then NIXes are 372 * assigned alternatively for lbk interfaces. NIX0 should 373 * send packets on lbk link 1 channels and NIX1 should send 374 * on lbk link 0 channels for the communication between 375 * NIX0 and NIX1. 376 */ 377 lbkid = 0; 378 if (rvu->hw->lbk_links > 1) 379 lbkid = vf & 0x1 ? 0 : 1; 380 381 /* By default NIX0 is configured to send packet on lbk link 1 382 * (which corresponds to LBK1), same packet will receive on 383 * NIX1 over lbk link 0. If NIX1 sends packet on lbk link 0 384 * (which corresponds to LBK2) packet will receive on NIX0 lbk 385 * link 1. 386 * But if lbk links for NIX0 and NIX1 are negated, i.e NIX0 387 * transmits and receives on lbk link 0, whick corresponds 388 * to LBK1 block, back to back connectivity between NIX and 389 * LBK can be achieved (which is similar to 96xx) 390 * 391 * RX TX 392 * NIX0 lbk link 1 (LBK2) 1 (LBK1) 393 * NIX0 lbk link 0 (LBK0) 0 (LBK0) 394 * NIX1 lbk link 0 (LBK1) 0 (LBK2) 395 * NIX1 lbk link 1 (LBK3) 1 (LBK3) 396 */ 397 if (loop) 398 lbkid = !lbkid; 399 400 /* Note that AF's VFs work in pairs and talk over consecutive 401 * loopback channels.Therefore if odd number of AF VFs are 402 * enabled then the last VF remains with no pair. 403 */ 404 pfvf->rx_chan_base = rvu_nix_chan_lbk(rvu, lbkid, vf); 405 pfvf->tx_chan_base = vf & 0x1 ? 406 rvu_nix_chan_lbk(rvu, lbkid, vf - 1) : 407 rvu_nix_chan_lbk(rvu, lbkid, vf + 1); 408 pfvf->rx_chan_cnt = 1; 409 pfvf->tx_chan_cnt = 1; 410 rsp->tx_link = hw->cgx_links + lbkid; 411 pfvf->lbkid = lbkid; 412 rvu_npc_set_pkind(rvu, NPC_RX_LBK_PKIND, pfvf); 413 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, 414 pfvf->rx_chan_base, 415 pfvf->rx_chan_cnt); 416 417 break; 418 case NIX_INTF_TYPE_SDP: 419 from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK); 420 parent_pf = &rvu->pf[rvu_get_pf(rvu->pdev, pcifunc)]; 421 sdp_info = parent_pf->sdp_info; 422 if (!sdp_info) { 423 dev_err(rvu->dev, "Invalid sdp_info pointer\n"); 424 return -EINVAL; 425 } 426 if (from_vf) { 427 req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn + 428 sdp_info->num_pf_rings; 429 vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1; 430 for (vfid = 0; vfid < vf; vfid++) 431 req_chan_base += sdp_info->vf_rings[vfid]; 432 req_chan_cnt = sdp_info->vf_rings[vf]; 433 req_chan_end = req_chan_base + req_chan_cnt - 1; 434 if (req_chan_base < rvu_nix_chan_sdp(rvu, 0) || 435 req_chan_end > rvu_nix_chan_sdp(rvu, 255)) { 436 dev_err(rvu->dev, 437 "PF_Func 0x%x: Invalid channel base and count\n", 438 pcifunc); 439 return -EINVAL; 440 } 441 } else { 442 req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn; 443 req_chan_cnt = sdp_info->num_pf_rings; 444 } 445 446 pfvf->rx_chan_base = req_chan_base; 447 pfvf->rx_chan_cnt = req_chan_cnt; 448 pfvf->tx_chan_base = pfvf->rx_chan_base; 449 pfvf->tx_chan_cnt = pfvf->rx_chan_cnt; 450 451 rsp->tx_link = hw->cgx_links + hw->lbk_links; 452 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, 453 pfvf->rx_chan_base, 454 pfvf->rx_chan_cnt); 455 break; 456 } 457 458 /* Add a UCAST forwarding rule in MCAM with this NIXLF attached 459 * RVU PF/VF's MAC address. 460 */ 461 rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf, 462 pfvf->rx_chan_base, pfvf->mac_addr); 463 464 /* Add this PF_FUNC to bcast pkt replication list */ 465 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, true); 466 if (err) { 467 dev_err(rvu->dev, 468 "Bcast list, failed to enable PF_FUNC 0x%x\n", 469 pcifunc); 470 return err; 471 } 472 /* Install MCAM rule matching Ethernet broadcast mac address */ 473 rvu_npc_install_bcast_match_entry(rvu, pcifunc, 474 nixlf, pfvf->rx_chan_base); 475 476 pfvf->maxlen = NIC_HW_MIN_FRS; 477 pfvf->minlen = NIC_HW_MIN_FRS; 478 479 return 0; 480 } 481 482 static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf) 483 { 484 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 485 int err; 486 487 pfvf->maxlen = 0; 488 pfvf->minlen = 0; 489 490 /* Remove this PF_FUNC from bcast pkt replication list */ 491 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, false); 492 if (err) { 493 dev_err(rvu->dev, 494 "Bcast list, failed to disable PF_FUNC 0x%x\n", 495 pcifunc); 496 } 497 498 /* Free and disable any MCAM entries used by this NIX LF */ 499 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); 500 501 /* Disable DMAC filters used */ 502 rvu_cgx_disable_dmac_entries(rvu, pcifunc); 503 } 504 505 #define NIX_BPIDS_PER_LMAC 8 506 #define NIX_BPIDS_PER_CPT 1 507 static int nix_setup_bpids(struct rvu *rvu, struct nix_hw *hw, int blkaddr) 508 { 509 struct nix_bp *bp = &hw->bp; 510 int err, max_bpids; 511 u64 cfg; 512 513 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); 514 max_bpids = FIELD_GET(NIX_CONST_MAX_BPIDS, cfg); 515 516 /* Reserve the BPIds for CGX and SDP */ 517 bp->cgx_bpid_cnt = rvu->hw->cgx_links * NIX_BPIDS_PER_LMAC; 518 bp->sdp_bpid_cnt = rvu->hw->sdp_links * FIELD_GET(NIX_CONST_SDP_CHANS, cfg); 519 bp->free_pool_base = bp->cgx_bpid_cnt + bp->sdp_bpid_cnt + 520 NIX_BPIDS_PER_CPT; 521 bp->bpids.max = max_bpids - bp->free_pool_base; 522 523 err = rvu_alloc_bitmap(&bp->bpids); 524 if (err) 525 return err; 526 527 bp->fn_map = devm_kcalloc(rvu->dev, bp->bpids.max, 528 sizeof(u16), GFP_KERNEL); 529 if (!bp->fn_map) 530 return -ENOMEM; 531 532 bp->intf_map = devm_kcalloc(rvu->dev, bp->bpids.max, 533 sizeof(u8), GFP_KERNEL); 534 if (!bp->intf_map) 535 return -ENOMEM; 536 537 bp->ref_cnt = devm_kcalloc(rvu->dev, bp->bpids.max, 538 sizeof(u8), GFP_KERNEL); 539 if (!bp->ref_cnt) 540 return -ENOMEM; 541 542 return 0; 543 } 544 545 void rvu_nix_flr_free_bpids(struct rvu *rvu, u16 pcifunc) 546 { 547 int blkaddr, bpid, err; 548 struct nix_hw *nix_hw; 549 struct nix_bp *bp; 550 551 if (!is_lbk_vf(rvu, pcifunc)) 552 return; 553 554 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); 555 if (err) 556 return; 557 558 bp = &nix_hw->bp; 559 560 mutex_lock(&rvu->rsrc_lock); 561 for (bpid = 0; bpid < bp->bpids.max; bpid++) { 562 if (bp->fn_map[bpid] == pcifunc) { 563 bp->ref_cnt[bpid]--; 564 if (bp->ref_cnt[bpid]) 565 continue; 566 rvu_free_rsrc(&bp->bpids, bpid); 567 bp->fn_map[bpid] = 0; 568 } 569 } 570 mutex_unlock(&rvu->rsrc_lock); 571 } 572 573 static u16 nix_get_channel(u16 chan, bool cpt_link) 574 { 575 /* CPT channel for a given link channel is always 576 * assumed to be BIT(11) set in link channel. 577 */ 578 return cpt_link ? chan | BIT(11) : chan; 579 } 580 581 static int nix_bp_disable(struct rvu *rvu, 582 struct nix_bp_cfg_req *req, 583 struct msg_rsp *rsp, bool cpt_link) 584 { 585 u16 pcifunc = req->hdr.pcifunc; 586 int blkaddr, pf, type, err; 587 u16 chan_base, chan, bpid; 588 struct rvu_pfvf *pfvf; 589 struct nix_hw *nix_hw; 590 struct nix_bp *bp; 591 u16 chan_v; 592 u64 cfg; 593 594 pf = rvu_get_pf(rvu->pdev, pcifunc); 595 type = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; 596 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK) 597 return 0; 598 599 if (is_sdp_pfvf(rvu, pcifunc)) 600 type = NIX_INTF_TYPE_SDP; 601 602 if (cpt_link && !rvu->hw->cpt_links) 603 return 0; 604 605 pfvf = rvu_get_pfvf(rvu, pcifunc); 606 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); 607 if (err) 608 return err; 609 610 bp = &nix_hw->bp; 611 chan_base = pfvf->rx_chan_base + req->chan_base; 612 for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) { 613 chan_v = nix_get_channel(chan, cpt_link); 614 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v)); 615 rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v), 616 cfg & ~BIT_ULL(16)); 617 618 if (type == NIX_INTF_TYPE_LBK) { 619 bpid = cfg & GENMASK(8, 0); 620 mutex_lock(&rvu->rsrc_lock); 621 rvu_free_rsrc(&bp->bpids, bpid - bp->free_pool_base); 622 for (bpid = 0; bpid < bp->bpids.max; bpid++) { 623 if (bp->fn_map[bpid] == pcifunc) { 624 bp->fn_map[bpid] = 0; 625 bp->ref_cnt[bpid] = 0; 626 } 627 } 628 mutex_unlock(&rvu->rsrc_lock); 629 } 630 } 631 return 0; 632 } 633 634 int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu, 635 struct nix_bp_cfg_req *req, 636 struct msg_rsp *rsp) 637 { 638 return nix_bp_disable(rvu, req, rsp, false); 639 } 640 641 int rvu_mbox_handler_nix_cpt_bp_disable(struct rvu *rvu, 642 struct nix_bp_cfg_req *req, 643 struct msg_rsp *rsp) 644 { 645 return nix_bp_disable(rvu, req, rsp, true); 646 } 647 648 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, 649 int type, int chan_id) 650 { 651 int bpid, blkaddr, sdp_chan_base, err; 652 struct rvu_hwinfo *hw = rvu->hw; 653 struct rvu_pfvf *pfvf; 654 struct nix_hw *nix_hw; 655 u8 cgx_id, lmac_id; 656 struct nix_bp *bp; 657 658 pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); 659 660 err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr); 661 if (err) 662 return err; 663 664 bp = &nix_hw->bp; 665 666 /* Backpressure IDs range division 667 * CGX channles are mapped to (0 - 191) BPIDs 668 * LBK channles are mapped to (192 - 255) BPIDs 669 * SDP channles are mapped to (256 - 511) BPIDs 670 * 671 * Lmac channles and bpids mapped as follows 672 * cgx(0)_lmac(0)_chan(0 - 15) = bpid(0 - 15) 673 * cgx(0)_lmac(1)_chan(0 - 15) = bpid(16 - 31) .... 674 * cgx(1)_lmac(0)_chan(0 - 15) = bpid(64 - 79) .... 675 */ 676 switch (type) { 677 case NIX_INTF_TYPE_CGX: 678 if ((req->chan_base + req->chan_cnt) > NIX_BPIDS_PER_LMAC) 679 return NIX_AF_ERR_INVALID_BPID_REQ; 680 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id); 681 /* Assign bpid based on cgx, lmac and chan id */ 682 bpid = (cgx_id * hw->lmac_per_cgx * NIX_BPIDS_PER_LMAC) + 683 (lmac_id * NIX_BPIDS_PER_LMAC) + req->chan_base; 684 685 if (req->bpid_per_chan) 686 bpid += chan_id; 687 if (bpid > bp->cgx_bpid_cnt) 688 return NIX_AF_ERR_INVALID_BPID; 689 break; 690 691 case NIX_INTF_TYPE_LBK: 692 /* Alloc bpid from the free pool */ 693 mutex_lock(&rvu->rsrc_lock); 694 bpid = rvu_alloc_rsrc(&bp->bpids); 695 if (bpid < 0) { 696 mutex_unlock(&rvu->rsrc_lock); 697 return NIX_AF_ERR_INVALID_BPID; 698 } 699 bp->fn_map[bpid] = req->hdr.pcifunc; 700 bp->ref_cnt[bpid]++; 701 bpid += bp->free_pool_base; 702 mutex_unlock(&rvu->rsrc_lock); 703 break; 704 case NIX_INTF_TYPE_SDP: 705 if ((req->chan_base + req->chan_cnt) > bp->sdp_bpid_cnt) 706 return NIX_AF_ERR_INVALID_BPID_REQ; 707 708 /* Handle usecase of 2 SDP blocks */ 709 if (!hw->cap.programmable_chans) 710 sdp_chan_base = pfvf->rx_chan_base - NIX_CHAN_SDP_CH_START; 711 else 712 sdp_chan_base = pfvf->rx_chan_base - hw->sdp_chan_base; 713 714 bpid = bp->cgx_bpid_cnt + req->chan_base + sdp_chan_base; 715 if (req->bpid_per_chan) 716 bpid += chan_id; 717 718 if (bpid > (bp->cgx_bpid_cnt + bp->sdp_bpid_cnt)) 719 return NIX_AF_ERR_INVALID_BPID; 720 break; 721 default: 722 return -EINVAL; 723 } 724 return bpid; 725 } 726 727 static int nix_bp_enable(struct rvu *rvu, 728 struct nix_bp_cfg_req *req, 729 struct nix_bp_cfg_rsp *rsp, 730 bool cpt_link) 731 { 732 int blkaddr, pf, type, chan_id = 0; 733 u16 pcifunc = req->hdr.pcifunc; 734 struct rvu_pfvf *pfvf; 735 u16 chan_base, chan; 736 s16 bpid, bpid_base; 737 u16 chan_v; 738 u64 cfg; 739 740 pf = rvu_get_pf(rvu->pdev, pcifunc); 741 type = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; 742 if (is_sdp_pfvf(rvu, pcifunc)) 743 type = NIX_INTF_TYPE_SDP; 744 745 /* Enable backpressure only for CGX mapped PFs and LBK/SDP interface */ 746 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK && 747 type != NIX_INTF_TYPE_SDP) 748 return 0; 749 750 if (cpt_link && !rvu->hw->cpt_links) 751 return 0; 752 753 pfvf = rvu_get_pfvf(rvu, pcifunc); 754 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 755 756 bpid_base = rvu_nix_get_bpid(rvu, req, type, chan_id); 757 chan_base = pfvf->rx_chan_base + req->chan_base; 758 bpid = bpid_base; 759 760 for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) { 761 if (bpid < 0) { 762 dev_warn(rvu->dev, "Fail to enable backpressure\n"); 763 return -EINVAL; 764 } 765 766 chan_v = nix_get_channel(chan, cpt_link); 767 768 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v)); 769 cfg &= ~GENMASK_ULL(8, 0); 770 rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v), 771 cfg | (bpid & GENMASK_ULL(8, 0)) | BIT_ULL(16)); 772 chan_id++; 773 bpid = rvu_nix_get_bpid(rvu, req, type, chan_id); 774 } 775 776 for (chan = 0; chan < req->chan_cnt; chan++) { 777 /* Map channel and bpid assign to it */ 778 rsp->chan_bpid[chan] = ((req->chan_base + chan) & 0x7F) << 10 | 779 (bpid_base & 0x3FF); 780 if (req->bpid_per_chan) 781 bpid_base++; 782 } 783 rsp->chan_cnt = req->chan_cnt; 784 785 return 0; 786 } 787 788 int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu, 789 struct nix_bp_cfg_req *req, 790 struct nix_bp_cfg_rsp *rsp) 791 { 792 return nix_bp_enable(rvu, req, rsp, false); 793 } 794 795 int rvu_mbox_handler_nix_cpt_bp_enable(struct rvu *rvu, 796 struct nix_bp_cfg_req *req, 797 struct nix_bp_cfg_rsp *rsp) 798 { 799 return nix_bp_enable(rvu, req, rsp, true); 800 } 801 802 static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr, 803 u64 format, bool v4, u64 *fidx) 804 { 805 struct nix_lso_format field = {0}; 806 807 /* IP's Length field */ 808 field.layer = NIX_TXLAYER_OL3; 809 /* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */ 810 field.offset = v4 ? 2 : 4; 811 field.sizem1 = 1; /* i.e 2 bytes */ 812 field.alg = NIX_LSOALG_ADD_PAYLEN; 813 rvu_write64(rvu, blkaddr, 814 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), 815 *(u64 *)&field); 816 817 /* No ID field in IPv6 header */ 818 if (!v4) 819 return; 820 821 /* IP's ID field */ 822 field.layer = NIX_TXLAYER_OL3; 823 field.offset = 4; 824 field.sizem1 = 1; /* i.e 2 bytes */ 825 field.alg = NIX_LSOALG_ADD_SEGNUM; 826 rvu_write64(rvu, blkaddr, 827 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), 828 *(u64 *)&field); 829 } 830 831 static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr, 832 u64 format, u64 *fidx) 833 { 834 struct nix_lso_format field = {0}; 835 836 /* TCP's sequence number field */ 837 field.layer = NIX_TXLAYER_OL4; 838 field.offset = 4; 839 field.sizem1 = 3; /* i.e 4 bytes */ 840 field.alg = NIX_LSOALG_ADD_OFFSET; 841 rvu_write64(rvu, blkaddr, 842 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), 843 *(u64 *)&field); 844 845 /* TCP's flags field */ 846 field.layer = NIX_TXLAYER_OL4; 847 field.offset = 12; 848 field.sizem1 = 1; /* 2 bytes */ 849 field.alg = NIX_LSOALG_TCP_FLAGS; 850 rvu_write64(rvu, blkaddr, 851 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), 852 *(u64 *)&field); 853 } 854 855 static void nix_setup_lso(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) 856 { 857 u64 cfg, idx, fidx = 0; 858 859 /* Get max HW supported format indices */ 860 cfg = (rvu_read64(rvu, blkaddr, NIX_AF_CONST1) >> 48) & 0xFF; 861 nix_hw->lso.total = cfg; 862 863 /* Enable LSO */ 864 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG); 865 /* For TSO, set first and middle segment flags to 866 * mask out PSH, RST & FIN flags in TCP packet 867 */ 868 cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16)); 869 cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16); 870 rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63)); 871 872 /* Setup default static LSO formats 873 * 874 * Configure format fields for TCPv4 segmentation offload 875 */ 876 idx = NIX_LSO_FORMAT_IDX_TSOV4; 877 nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx); 878 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx); 879 880 /* Set rest of the fields to NOP */ 881 for (; fidx < 8; fidx++) { 882 rvu_write64(rvu, blkaddr, 883 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL); 884 } 885 nix_hw->lso.in_use++; 886 887 /* Configure format fields for TCPv6 segmentation offload */ 888 idx = NIX_LSO_FORMAT_IDX_TSOV6; 889 fidx = 0; 890 nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx); 891 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx); 892 893 /* Set rest of the fields to NOP */ 894 for (; fidx < 8; fidx++) { 895 rvu_write64(rvu, blkaddr, 896 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL); 897 } 898 nix_hw->lso.in_use++; 899 } 900 901 static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf) 902 { 903 kfree(pfvf->rq_bmap); 904 kfree(pfvf->sq_bmap); 905 kfree(pfvf->cq_bmap); 906 if (pfvf->rq_ctx) 907 qmem_free(rvu->dev, pfvf->rq_ctx); 908 if (pfvf->sq_ctx) 909 qmem_free(rvu->dev, pfvf->sq_ctx); 910 if (pfvf->cq_ctx) 911 qmem_free(rvu->dev, pfvf->cq_ctx); 912 if (pfvf->rss_ctx) 913 qmem_free(rvu->dev, pfvf->rss_ctx); 914 if (pfvf->nix_qints_ctx) 915 qmem_free(rvu->dev, pfvf->nix_qints_ctx); 916 if (pfvf->cq_ints_ctx) 917 qmem_free(rvu->dev, pfvf->cq_ints_ctx); 918 919 pfvf->rq_bmap = NULL; 920 pfvf->cq_bmap = NULL; 921 pfvf->sq_bmap = NULL; 922 pfvf->rq_ctx = NULL; 923 pfvf->sq_ctx = NULL; 924 pfvf->cq_ctx = NULL; 925 pfvf->rss_ctx = NULL; 926 pfvf->nix_qints_ctx = NULL; 927 pfvf->cq_ints_ctx = NULL; 928 } 929 930 static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr, 931 struct rvu_pfvf *pfvf, int nixlf, 932 int rss_sz, int rss_grps, int hwctx_size, 933 u64 way_mask, bool tag_lsb_as_adder) 934 { 935 int err, grp, num_indices; 936 u64 val; 937 938 /* RSS is not requested for this NIXLF */ 939 if (!rss_sz) 940 return 0; 941 num_indices = rss_sz * rss_grps; 942 943 /* Alloc NIX RSS HW context memory and config the base */ 944 err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size); 945 if (err) 946 return err; 947 948 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf), 949 (u64)pfvf->rss_ctx->iova); 950 951 /* Config full RSS table size, enable RSS and caching */ 952 val = BIT_ULL(36) | BIT_ULL(4) | way_mask << 20 | 953 ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE); 954 955 if (tag_lsb_as_adder) 956 val |= BIT_ULL(5); 957 958 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf), val); 959 /* Config RSS group offset and sizes */ 960 for (grp = 0; grp < rss_grps; grp++) 961 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp), 962 ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp)); 963 return 0; 964 } 965 966 static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block, 967 struct nix_aq_inst_s *inst) 968 { 969 struct admin_queue *aq = block->aq; 970 struct nix_aq_res_s *result; 971 int timeout = 1000; 972 u64 reg, head; 973 int ret; 974 975 result = (struct nix_aq_res_s *)aq->res->base; 976 977 /* Get current head pointer where to append this instruction */ 978 reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS); 979 head = (reg >> 4) & AQ_PTR_MASK; 980 981 memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)), 982 (void *)inst, aq->inst->entry_sz); 983 memset(result, 0, sizeof(*result)); 984 /* sync into memory */ 985 wmb(); 986 987 /* Ring the doorbell and wait for result */ 988 rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1); 989 while (result->compcode == NIX_AQ_COMP_NOTDONE) { 990 cpu_relax(); 991 udelay(1); 992 timeout--; 993 if (!timeout) 994 return -EBUSY; 995 } 996 997 if (result->compcode != NIX_AQ_COMP_GOOD) { 998 /* TODO: Replace this with some error code */ 999 if (result->compcode == NIX_AQ_COMP_CTX_FAULT || 1000 result->compcode == NIX_AQ_COMP_LOCKERR || 1001 result->compcode == NIX_AQ_COMP_CTX_POISON) { 1002 ret = rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX0_RX); 1003 ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX0_TX); 1004 ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX1_RX); 1005 ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX1_TX); 1006 if (ret) 1007 dev_err(rvu->dev, 1008 "%s: Not able to unlock cachelines\n", __func__); 1009 } 1010 1011 return -EBUSY; 1012 } 1013 1014 return 0; 1015 } 1016 1017 static void nix_get_aq_req_smq(struct rvu *rvu, struct nix_aq_enq_req *req, 1018 u16 *smq, u16 *smq_mask) 1019 { 1020 struct nix_cn10k_aq_enq_req *aq_req; 1021 1022 if (is_cn20k(rvu->pdev)) { 1023 *smq = ((struct nix_cn20k_aq_enq_req *)req)->sq.smq; 1024 *smq_mask = ((struct nix_cn20k_aq_enq_req *)req)->sq_mask.smq; 1025 return; 1026 } 1027 1028 if (!is_rvu_otx2(rvu)) { 1029 aq_req = (struct nix_cn10k_aq_enq_req *)req; 1030 *smq = aq_req->sq.smq; 1031 *smq_mask = aq_req->sq_mask.smq; 1032 } else { 1033 *smq = req->sq.smq; 1034 *smq_mask = req->sq_mask.smq; 1035 } 1036 } 1037 1038 static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw, 1039 struct nix_aq_enq_req *req, 1040 struct nix_aq_enq_rsp *rsp) 1041 { 1042 struct rvu_hwinfo *hw = rvu->hw; 1043 u16 pcifunc = req->hdr.pcifunc; 1044 int nixlf, blkaddr, rc = 0; 1045 struct nix_aq_inst_s inst; 1046 struct rvu_block *block; 1047 struct admin_queue *aq; 1048 struct rvu_pfvf *pfvf; 1049 u16 smq, smq_mask; 1050 void *ctx, *mask; 1051 bool ena; 1052 u64 cfg; 1053 1054 blkaddr = nix_hw->blkaddr; 1055 block = &hw->block[blkaddr]; 1056 aq = block->aq; 1057 if (!aq) { 1058 dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__); 1059 return NIX_AF_ERR_AQ_ENQUEUE; 1060 } 1061 1062 pfvf = rvu_get_pfvf(rvu, pcifunc); 1063 nixlf = rvu_get_lf(rvu, block, pcifunc, 0); 1064 1065 /* Skip NIXLF check for broadcast MCE entry and bandwidth profile 1066 * operations done by AF itself. 1067 */ 1068 if (!((!rsp && req->ctype == NIX_AQ_CTYPE_MCE) || 1069 (req->ctype == NIX_AQ_CTYPE_BANDPROF && !pcifunc))) { 1070 if (!pfvf->nixlf || nixlf < 0) 1071 return NIX_AF_ERR_AF_LF_INVALID; 1072 } 1073 1074 switch (req->ctype) { 1075 case NIX_AQ_CTYPE_RQ: 1076 /* Check if index exceeds max no of queues */ 1077 if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize) 1078 rc = NIX_AF_ERR_AQ_ENQUEUE; 1079 break; 1080 case NIX_AQ_CTYPE_SQ: 1081 if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize) 1082 rc = NIX_AF_ERR_AQ_ENQUEUE; 1083 break; 1084 case NIX_AQ_CTYPE_CQ: 1085 if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize) 1086 rc = NIX_AF_ERR_AQ_ENQUEUE; 1087 break; 1088 case NIX_AQ_CTYPE_RSS: 1089 /* Check if RSS is enabled and qidx is within range */ 1090 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf)); 1091 if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx || 1092 (req->qidx >= (256UL << (cfg & 0xF)))) 1093 rc = NIX_AF_ERR_AQ_ENQUEUE; 1094 break; 1095 case NIX_AQ_CTYPE_MCE: 1096 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG); 1097 1098 /* Check if index exceeds MCE list length */ 1099 if (!nix_hw->mcast.mce_ctx || 1100 (req->qidx >= (256UL << (cfg & 0xF)))) 1101 rc = NIX_AF_ERR_AQ_ENQUEUE; 1102 1103 /* Adding multicast lists for requests from PF/VFs is not 1104 * yet supported, so ignore this. 1105 */ 1106 if (rsp) 1107 rc = NIX_AF_ERR_AQ_ENQUEUE; 1108 break; 1109 case NIX_AQ_CTYPE_BANDPROF: 1110 if (nix_verify_bandprof((struct nix_cn10k_aq_enq_req *)req, 1111 nix_hw, pcifunc)) 1112 rc = NIX_AF_ERR_INVALID_BANDPROF; 1113 break; 1114 default: 1115 rc = NIX_AF_ERR_AQ_ENQUEUE; 1116 } 1117 1118 if (rc) 1119 return rc; 1120 1121 nix_get_aq_req_smq(rvu, req, &smq, &smq_mask); 1122 /* Check if SQ pointed SMQ belongs to this PF/VF or not */ 1123 if (req->ctype == NIX_AQ_CTYPE_SQ && 1124 ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) || 1125 (req->op == NIX_AQ_INSTOP_WRITE && 1126 req->sq_mask.ena && req->sq.ena && smq_mask))) { 1127 if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ, 1128 pcifunc, smq)) 1129 return NIX_AF_ERR_AQ_ENQUEUE; 1130 } 1131 1132 memset(&inst, 0, sizeof(struct nix_aq_inst_s)); 1133 inst.lf = nixlf; 1134 inst.cindex = req->qidx; 1135 inst.ctype = req->ctype; 1136 inst.op = req->op; 1137 /* Currently we are not supporting enqueuing multiple instructions, 1138 * so always choose first entry in result memory. 1139 */ 1140 inst.res_addr = (u64)aq->res->iova; 1141 1142 /* Hardware uses same aq->res->base for updating result of 1143 * previous instruction hence wait here till it is done. 1144 */ 1145 spin_lock(&aq->lock); 1146 1147 /* Clean result + context memory */ 1148 memset(aq->res->base, 0, aq->res->entry_sz); 1149 /* Context needs to be written at RES_ADDR + 128 */ 1150 ctx = aq->res->base + 128; 1151 /* Mask needs to be written at RES_ADDR + 256 */ 1152 mask = aq->res->base + 256; 1153 1154 switch (req->op) { 1155 case NIX_AQ_INSTOP_WRITE: 1156 if (req->ctype == NIX_AQ_CTYPE_RQ) 1157 memcpy(mask, &req->rq_mask, 1158 NIX_MAX_CTX_SIZE); 1159 else if (req->ctype == NIX_AQ_CTYPE_SQ) 1160 memcpy(mask, &req->sq_mask, 1161 NIX_MAX_CTX_SIZE); 1162 else if (req->ctype == NIX_AQ_CTYPE_CQ) 1163 memcpy(mask, &req->cq_mask, 1164 NIX_MAX_CTX_SIZE); 1165 else if (req->ctype == NIX_AQ_CTYPE_RSS) 1166 memcpy(mask, &req->rss_mask, 1167 NIX_MAX_CTX_SIZE); 1168 else if (req->ctype == NIX_AQ_CTYPE_MCE) 1169 memcpy(mask, &req->mce_mask, 1170 NIX_MAX_CTX_SIZE); 1171 else if (req->ctype == NIX_AQ_CTYPE_BANDPROF) 1172 memcpy(mask, &req->prof_mask, 1173 NIX_MAX_CTX_SIZE); 1174 fallthrough; 1175 case NIX_AQ_INSTOP_INIT: 1176 if (req->ctype == NIX_AQ_CTYPE_RQ) 1177 memcpy(ctx, &req->rq, NIX_MAX_CTX_SIZE); 1178 else if (req->ctype == NIX_AQ_CTYPE_SQ) 1179 memcpy(ctx, &req->sq, NIX_MAX_CTX_SIZE); 1180 else if (req->ctype == NIX_AQ_CTYPE_CQ) 1181 memcpy(ctx, &req->cq, NIX_MAX_CTX_SIZE); 1182 else if (req->ctype == NIX_AQ_CTYPE_RSS) 1183 memcpy(ctx, &req->rss, NIX_MAX_CTX_SIZE); 1184 else if (req->ctype == NIX_AQ_CTYPE_MCE) 1185 memcpy(ctx, &req->mce, NIX_MAX_CTX_SIZE); 1186 else if (req->ctype == NIX_AQ_CTYPE_BANDPROF) 1187 memcpy(ctx, &req->prof, NIX_MAX_CTX_SIZE); 1188 break; 1189 case NIX_AQ_INSTOP_NOP: 1190 case NIX_AQ_INSTOP_READ: 1191 case NIX_AQ_INSTOP_LOCK: 1192 case NIX_AQ_INSTOP_UNLOCK: 1193 break; 1194 default: 1195 rc = NIX_AF_ERR_AQ_ENQUEUE; 1196 spin_unlock(&aq->lock); 1197 return rc; 1198 } 1199 1200 /* Submit the instruction to AQ */ 1201 rc = nix_aq_enqueue_wait(rvu, block, &inst); 1202 if (rc) { 1203 spin_unlock(&aq->lock); 1204 return rc; 1205 } 1206 1207 /* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */ 1208 if (req->op == NIX_AQ_INSTOP_INIT) { 1209 if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena) 1210 __set_bit(req->qidx, pfvf->rq_bmap); 1211 if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena) 1212 __set_bit(req->qidx, pfvf->sq_bmap); 1213 if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena) 1214 __set_bit(req->qidx, pfvf->cq_bmap); 1215 } 1216 1217 if (req->op == NIX_AQ_INSTOP_WRITE) { 1218 if (req->ctype == NIX_AQ_CTYPE_RQ) { 1219 ena = (req->rq.ena & req->rq_mask.ena) | 1220 (test_bit(req->qidx, pfvf->rq_bmap) & 1221 ~req->rq_mask.ena); 1222 if (ena) 1223 __set_bit(req->qidx, pfvf->rq_bmap); 1224 else 1225 __clear_bit(req->qidx, pfvf->rq_bmap); 1226 } 1227 if (req->ctype == NIX_AQ_CTYPE_SQ) { 1228 ena = (req->rq.ena & req->sq_mask.ena) | 1229 (test_bit(req->qidx, pfvf->sq_bmap) & 1230 ~req->sq_mask.ena); 1231 if (ena) 1232 __set_bit(req->qidx, pfvf->sq_bmap); 1233 else 1234 __clear_bit(req->qidx, pfvf->sq_bmap); 1235 } 1236 if (req->ctype == NIX_AQ_CTYPE_CQ) { 1237 ena = (req->rq.ena & req->cq_mask.ena) | 1238 (test_bit(req->qidx, pfvf->cq_bmap) & 1239 ~req->cq_mask.ena); 1240 if (ena) 1241 __set_bit(req->qidx, pfvf->cq_bmap); 1242 else 1243 __clear_bit(req->qidx, pfvf->cq_bmap); 1244 } 1245 } 1246 1247 if (rsp) { 1248 /* Copy read context into mailbox */ 1249 if (req->op == NIX_AQ_INSTOP_READ) { 1250 if (req->ctype == NIX_AQ_CTYPE_RQ) 1251 memcpy(&rsp->rq, ctx, 1252 NIX_MAX_CTX_SIZE); 1253 else if (req->ctype == NIX_AQ_CTYPE_SQ) 1254 memcpy(&rsp->sq, ctx, 1255 NIX_MAX_CTX_SIZE); 1256 else if (req->ctype == NIX_AQ_CTYPE_CQ) 1257 memcpy(&rsp->cq, ctx, 1258 NIX_MAX_CTX_SIZE); 1259 else if (req->ctype == NIX_AQ_CTYPE_RSS) 1260 memcpy(&rsp->rss, ctx, 1261 NIX_MAX_CTX_SIZE); 1262 else if (req->ctype == NIX_AQ_CTYPE_MCE) 1263 memcpy(&rsp->mce, ctx, 1264 NIX_MAX_CTX_SIZE); 1265 else if (req->ctype == NIX_AQ_CTYPE_BANDPROF) 1266 memcpy(&rsp->prof, ctx, 1267 NIX_MAX_CTX_SIZE); 1268 } 1269 } 1270 1271 spin_unlock(&aq->lock); 1272 return 0; 1273 } 1274 1275 static int rvu_nix_verify_aq_ctx(struct rvu *rvu, struct nix_hw *nix_hw, 1276 struct nix_aq_enq_req *req, u8 ctype) 1277 { 1278 struct nix_cn10k_aq_enq_req aq_req; 1279 struct nix_cn10k_aq_enq_rsp aq_rsp; 1280 int rc, word; 1281 1282 if (req->ctype != NIX_AQ_CTYPE_CQ) 1283 return 0; 1284 1285 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 1286 req->hdr.pcifunc, ctype, req->qidx); 1287 if (rc) { 1288 dev_err(rvu->dev, 1289 "%s: Failed to fetch %s%d context of PFFUNC 0x%x\n", 1290 __func__, nix_get_ctx_name(ctype), req->qidx, 1291 req->hdr.pcifunc); 1292 return rc; 1293 } 1294 1295 /* Make copy of original context & mask which are required 1296 * for resubmission 1297 */ 1298 memcpy(&aq_req.cq_mask, &req->cq_mask, NIX_MAX_CTX_SIZE); 1299 memcpy(&aq_req.cq, &req->cq, NIX_MAX_CTX_SIZE); 1300 1301 /* exclude fields which HW can update */ 1302 aq_req.cq_mask.cq_err = 0; 1303 aq_req.cq_mask.wrptr = 0; 1304 aq_req.cq_mask.tail = 0; 1305 aq_req.cq_mask.head = 0; 1306 aq_req.cq_mask.avg_level = 0; 1307 aq_req.cq_mask.update_time = 0; 1308 aq_req.cq_mask.substream = 0; 1309 1310 /* Context mask (cq_mask) holds mask value of fields which 1311 * are changed in AQ WRITE operation. 1312 * for example cq.drop = 0xa; 1313 * cq_mask.drop = 0xff; 1314 * Below logic performs '&' between cq and cq_mask so that non 1315 * updated fields are masked out for request and response 1316 * comparison 1317 */ 1318 for (word = 0; word < NIX_MAX_CTX_SIZE / sizeof(u64); 1319 word++) { 1320 *(u64 *)((u8 *)&aq_rsp.cq + word * 8) &= 1321 (*(u64 *)((u8 *)&aq_req.cq_mask + word * 8)); 1322 *(u64 *)((u8 *)&aq_req.cq + word * 8) &= 1323 (*(u64 *)((u8 *)&aq_req.cq_mask + word * 8)); 1324 } 1325 1326 if (memcmp(&aq_req.cq, &aq_rsp.cq, NIX_MAX_CTX_SIZE)) 1327 return NIX_AF_ERR_AQ_CTX_RETRY_WRITE; 1328 1329 return 0; 1330 } 1331 1332 int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, 1333 struct nix_aq_enq_rsp *rsp) 1334 { 1335 struct nix_hw *nix_hw; 1336 int err, retries = 5; 1337 int blkaddr; 1338 1339 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc); 1340 if (blkaddr < 0) 1341 return NIX_AF_ERR_AF_LF_INVALID; 1342 1343 nix_hw = get_nix_hw(rvu->hw, blkaddr); 1344 if (!nix_hw) 1345 return NIX_AF_ERR_INVALID_NIXBLK; 1346 1347 retry: 1348 err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp); 1349 1350 /* HW errata 'AQ Modification to CQ could be discarded on heavy traffic' 1351 * As a work around perfrom CQ context read after each AQ write. If AQ 1352 * read shows AQ write is not updated perform AQ write again. 1353 */ 1354 if (!err && req->op == NIX_AQ_INSTOP_WRITE) { 1355 err = rvu_nix_verify_aq_ctx(rvu, nix_hw, req, NIX_AQ_CTYPE_CQ); 1356 if (err == NIX_AF_ERR_AQ_CTX_RETRY_WRITE) { 1357 if (retries--) 1358 goto retry; 1359 else 1360 return NIX_AF_ERR_CQ_CTX_WRITE_ERR; 1361 } 1362 } 1363 1364 return err; 1365 } 1366 1367 static const char *nix_get_ctx_name(int ctype) 1368 { 1369 switch (ctype) { 1370 case NIX_AQ_CTYPE_CQ: 1371 return "CQ"; 1372 case NIX_AQ_CTYPE_SQ: 1373 return "SQ"; 1374 case NIX_AQ_CTYPE_RQ: 1375 return "RQ"; 1376 case NIX_AQ_CTYPE_RSS: 1377 return "RSS"; 1378 } 1379 return ""; 1380 } 1381 1382 static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req) 1383 { 1384 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); 1385 struct nix_aq_enq_req aq_req; 1386 unsigned long *bmap; 1387 int qidx, q_cnt = 0; 1388 int err = 0, rc; 1389 1390 if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx) 1391 return NIX_AF_ERR_AQ_ENQUEUE; 1392 1393 memset(&aq_req, 0, sizeof(struct nix_aq_enq_req)); 1394 aq_req.hdr.pcifunc = req->hdr.pcifunc; 1395 1396 if (req->ctype == NIX_AQ_CTYPE_CQ) { 1397 aq_req.cq.ena = 0; 1398 aq_req.cq_mask.ena = 1; 1399 aq_req.cq.bp_ena = 0; 1400 aq_req.cq_mask.bp_ena = 1; 1401 q_cnt = pfvf->cq_ctx->qsize; 1402 bmap = pfvf->cq_bmap; 1403 } 1404 if (req->ctype == NIX_AQ_CTYPE_SQ) { 1405 aq_req.sq.ena = 0; 1406 aq_req.sq_mask.ena = 1; 1407 q_cnt = pfvf->sq_ctx->qsize; 1408 bmap = pfvf->sq_bmap; 1409 } 1410 if (req->ctype == NIX_AQ_CTYPE_RQ) { 1411 aq_req.rq.ena = 0; 1412 aq_req.rq_mask.ena = 1; 1413 q_cnt = pfvf->rq_ctx->qsize; 1414 bmap = pfvf->rq_bmap; 1415 } 1416 1417 aq_req.ctype = req->ctype; 1418 aq_req.op = NIX_AQ_INSTOP_WRITE; 1419 1420 for (qidx = 0; qidx < q_cnt; qidx++) { 1421 if (!test_bit(qidx, bmap)) 1422 continue; 1423 aq_req.qidx = qidx; 1424 rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL); 1425 if (rc) { 1426 err = rc; 1427 dev_err(rvu->dev, "Failed to disable %s:%d context\n", 1428 nix_get_ctx_name(req->ctype), qidx); 1429 } 1430 } 1431 1432 return err; 1433 } 1434 1435 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING 1436 static int nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req) 1437 { 1438 struct nix_aq_enq_req lock_ctx_req; 1439 int err; 1440 1441 if (req->op != NIX_AQ_INSTOP_INIT) 1442 return 0; 1443 1444 if (req->ctype == NIX_AQ_CTYPE_MCE || 1445 req->ctype == NIX_AQ_CTYPE_DYNO) 1446 return 0; 1447 1448 memset(&lock_ctx_req, 0, sizeof(struct nix_aq_enq_req)); 1449 lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc; 1450 lock_ctx_req.ctype = req->ctype; 1451 lock_ctx_req.op = NIX_AQ_INSTOP_LOCK; 1452 lock_ctx_req.qidx = req->qidx; 1453 err = rvu_nix_aq_enq_inst(rvu, &lock_ctx_req, NULL); 1454 if (err) 1455 dev_err(rvu->dev, 1456 "PFUNC 0x%x: Failed to lock NIX %s:%d context\n", 1457 req->hdr.pcifunc, 1458 nix_get_ctx_name(req->ctype), req->qidx); 1459 return err; 1460 } 1461 1462 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu, 1463 struct nix_aq_enq_req *req, 1464 struct nix_aq_enq_rsp *rsp) 1465 { 1466 int err; 1467 1468 err = rvu_nix_aq_enq_inst(rvu, req, rsp); 1469 if (!err) 1470 err = nix_lf_hwctx_lockdown(rvu, req); 1471 return err; 1472 } 1473 #else 1474 1475 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu, 1476 struct nix_aq_enq_req *req, 1477 struct nix_aq_enq_rsp *rsp) 1478 { 1479 return rvu_nix_aq_enq_inst(rvu, req, rsp); 1480 } 1481 #endif 1482 /* CN10K mbox handler */ 1483 int rvu_mbox_handler_nix_cn10k_aq_enq(struct rvu *rvu, 1484 struct nix_cn10k_aq_enq_req *req, 1485 struct nix_cn10k_aq_enq_rsp *rsp) 1486 { 1487 return rvu_nix_aq_enq_inst(rvu, (struct nix_aq_enq_req *)req, 1488 (struct nix_aq_enq_rsp *)rsp); 1489 } 1490 1491 int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu, 1492 struct hwctx_disable_req *req, 1493 struct msg_rsp *rsp) 1494 { 1495 return nix_lf_hwctx_disable(rvu, req); 1496 } 1497 1498 int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu, 1499 struct nix_lf_alloc_req *req, 1500 struct nix_lf_alloc_rsp *rsp) 1501 { 1502 int nixlf, qints, hwctx_size, intf, err, rc = 0; 1503 struct rvu_hwinfo *hw = rvu->hw; 1504 u16 pcifunc = req->hdr.pcifunc; 1505 struct rvu_block *block; 1506 struct rvu_pfvf *pfvf; 1507 u64 cfg, ctx_cfg; 1508 int blkaddr; 1509 1510 if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt) 1511 return NIX_AF_ERR_PARAM; 1512 1513 if (req->way_mask) 1514 req->way_mask &= 0xFFFF; 1515 1516 pfvf = rvu_get_pfvf(rvu, pcifunc); 1517 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1518 if (!pfvf->nixlf || blkaddr < 0) 1519 return NIX_AF_ERR_AF_LF_INVALID; 1520 1521 block = &hw->block[blkaddr]; 1522 nixlf = rvu_get_lf(rvu, block, pcifunc, 0); 1523 if (nixlf < 0) 1524 return NIX_AF_ERR_AF_LF_INVALID; 1525 1526 /* Check if requested 'NIXLF <=> NPALF' mapping is valid */ 1527 if (req->npa_func) { 1528 /* If default, use 'this' NIXLF's PFFUNC */ 1529 if (req->npa_func == RVU_DEFAULT_PF_FUNC) 1530 req->npa_func = pcifunc; 1531 if (!is_pffunc_map_valid(rvu, req->npa_func, BLKTYPE_NPA)) 1532 return NIX_AF_INVAL_NPA_PF_FUNC; 1533 } 1534 1535 /* Check if requested 'NIXLF <=> SSOLF' mapping is valid */ 1536 if (req->sso_func) { 1537 /* If default, use 'this' NIXLF's PFFUNC */ 1538 if (req->sso_func == RVU_DEFAULT_PF_FUNC) 1539 req->sso_func = pcifunc; 1540 if (!is_pffunc_map_valid(rvu, req->sso_func, BLKTYPE_SSO)) 1541 return NIX_AF_INVAL_SSO_PF_FUNC; 1542 } 1543 1544 /* If RSS is being enabled, check if requested config is valid. 1545 * RSS table size should be power of two, otherwise 1546 * RSS_GRP::OFFSET + adder might go beyond that group or 1547 * won't be able to use entire table. 1548 */ 1549 if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE || 1550 !is_power_of_2(req->rss_sz))) 1551 return NIX_AF_ERR_RSS_SIZE_INVALID; 1552 1553 if (req->rss_sz && 1554 (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS)) 1555 return NIX_AF_ERR_RSS_GRPS_INVALID; 1556 1557 /* Reset this NIX LF */ 1558 err = rvu_lf_reset(rvu, block, nixlf); 1559 if (err) { 1560 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n", 1561 block->addr - BLKADDR_NIX0, nixlf); 1562 return NIX_AF_ERR_LF_RESET; 1563 } 1564 1565 ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3); 1566 1567 /* Alloc NIX RQ HW context memory and config the base */ 1568 hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF); 1569 err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size); 1570 if (err) 1571 goto free_mem; 1572 1573 pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL); 1574 if (!pfvf->rq_bmap) 1575 goto free_mem; 1576 1577 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf), 1578 (u64)pfvf->rq_ctx->iova); 1579 1580 /* Set caching and queue count in HW */ 1581 cfg = BIT_ULL(36) | (req->rq_cnt - 1) | req->way_mask << 20; 1582 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg); 1583 1584 /* Alloc NIX SQ HW context memory and config the base */ 1585 hwctx_size = 1UL << (ctx_cfg & 0xF); 1586 err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size); 1587 if (err) 1588 goto free_mem; 1589 1590 pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL); 1591 if (!pfvf->sq_bmap) 1592 goto free_mem; 1593 1594 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf), 1595 (u64)pfvf->sq_ctx->iova); 1596 1597 cfg = BIT_ULL(36) | (req->sq_cnt - 1) | req->way_mask << 20; 1598 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg); 1599 1600 /* Alloc NIX CQ HW context memory and config the base */ 1601 hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF); 1602 err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size); 1603 if (err) 1604 goto free_mem; 1605 1606 pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL); 1607 if (!pfvf->cq_bmap) 1608 goto free_mem; 1609 1610 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf), 1611 (u64)pfvf->cq_ctx->iova); 1612 1613 cfg = BIT_ULL(36) | (req->cq_cnt - 1) | req->way_mask << 20; 1614 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg); 1615 1616 /* Initialize receive side scaling (RSS) */ 1617 hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF); 1618 err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz, 1619 req->rss_grps, hwctx_size, req->way_mask, 1620 !!(req->flags & NIX_LF_RSS_TAG_LSB_AS_ADDER)); 1621 if (err) 1622 goto free_mem; 1623 1624 /* Alloc memory for CQINT's HW contexts */ 1625 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); 1626 qints = (cfg >> 24) & 0xFFF; 1627 hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF); 1628 err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size); 1629 if (err) 1630 goto free_mem; 1631 1632 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf), 1633 (u64)pfvf->cq_ints_ctx->iova); 1634 1635 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf), 1636 BIT_ULL(36) | req->way_mask << 20); 1637 1638 /* Alloc memory for QINT's HW contexts */ 1639 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); 1640 qints = (cfg >> 12) & 0xFFF; 1641 hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF); 1642 err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size); 1643 if (err) 1644 goto free_mem; 1645 1646 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf), 1647 (u64)pfvf->nix_qints_ctx->iova); 1648 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf), 1649 BIT_ULL(36) | req->way_mask << 20); 1650 1651 /* Setup VLANX TPID's. 1652 * Use VLAN1 for 802.1Q 1653 * and VLAN0 for 802.1AD. 1654 */ 1655 cfg = (0x8100ULL << 16) | 0x88A8ULL; 1656 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg); 1657 1658 /* Enable LMTST for this NIX LF */ 1659 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0)); 1660 1661 /* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */ 1662 if (req->npa_func) 1663 cfg = req->npa_func; 1664 if (req->sso_func) 1665 cfg |= (u64)req->sso_func << 16; 1666 1667 cfg |= (u64)req->xqe_sz << 33; 1668 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg); 1669 1670 /* Config Rx pkt length, csum checks and apad enable / disable */ 1671 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg); 1672 1673 /* Configure pkind for TX parse config */ 1674 cfg = NPC_TX_DEF_PKIND; 1675 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg); 1676 1677 if (is_rep_dev(rvu, pcifunc)) { 1678 pfvf->tx_chan_base = RVU_SWITCH_LBK_CHAN; 1679 pfvf->tx_chan_cnt = 1; 1680 goto exit; 1681 } 1682 1683 intf = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; 1684 if (is_sdp_pfvf(rvu, pcifunc)) 1685 intf = NIX_INTF_TYPE_SDP; 1686 1687 err = nix_interface_init(rvu, pcifunc, intf, nixlf, rsp, 1688 !!(req->flags & NIX_LF_LBK_BLK_SEL)); 1689 if (err) 1690 goto free_mem; 1691 1692 /* Disable NPC entries as NIXLF's contexts are not initialized yet */ 1693 rvu_npc_disable_default_entries(rvu, pcifunc, nixlf); 1694 1695 /* Configure RX VTAG Type 7 (strip) for vf vlan */ 1696 rvu_write64(rvu, blkaddr, 1697 NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, NIX_AF_LFX_RX_VTAG_TYPE7), 1698 VTAGSIZE_T4 | VTAG_STRIP); 1699 1700 goto exit; 1701 1702 free_mem: 1703 nix_ctx_free(rvu, pfvf); 1704 rc = -ENOMEM; 1705 1706 exit: 1707 /* Set macaddr of this PF/VF */ 1708 ether_addr_copy(rsp->mac_addr, pfvf->mac_addr); 1709 1710 /* set SQB size info */ 1711 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST); 1712 rsp->sqb_size = (cfg >> 34) & 0xFFFF; 1713 rsp->rx_chan_base = pfvf->rx_chan_base; 1714 rsp->tx_chan_base = pfvf->tx_chan_base; 1715 rsp->rx_chan_cnt = pfvf->rx_chan_cnt; 1716 rsp->tx_chan_cnt = pfvf->tx_chan_cnt; 1717 rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4; 1718 rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6; 1719 /* Get HW supported stat count */ 1720 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); 1721 rsp->lf_rx_stats = ((cfg >> 32) & 0xFF); 1722 rsp->lf_tx_stats = ((cfg >> 24) & 0xFF); 1723 /* Get count of CQ IRQs and error IRQs supported per LF */ 1724 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); 1725 rsp->qints = ((cfg >> 12) & 0xFFF); 1726 rsp->cints = ((cfg >> 24) & 0xFFF); 1727 rsp->cgx_links = hw->cgx_links; 1728 rsp->lbk_links = hw->lbk_links; 1729 rsp->sdp_links = hw->sdp_links; 1730 1731 return rc; 1732 } 1733 1734 int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct nix_lf_free_req *req, 1735 struct msg_rsp *rsp) 1736 { 1737 struct rvu_hwinfo *hw = rvu->hw; 1738 u16 pcifunc = req->hdr.pcifunc; 1739 struct rvu_block *block; 1740 int blkaddr, nixlf, err; 1741 struct rvu_pfvf *pfvf; 1742 1743 pfvf = rvu_get_pfvf(rvu, pcifunc); 1744 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1745 if (!pfvf->nixlf || blkaddr < 0) 1746 return NIX_AF_ERR_AF_LF_INVALID; 1747 1748 block = &hw->block[blkaddr]; 1749 nixlf = rvu_get_lf(rvu, block, pcifunc, 0); 1750 if (nixlf < 0) 1751 return NIX_AF_ERR_AF_LF_INVALID; 1752 1753 if (is_rep_dev(rvu, pcifunc)) 1754 goto free_lf; 1755 1756 if (req->flags & NIX_LF_DISABLE_FLOWS) 1757 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); 1758 else 1759 rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf); 1760 1761 /* Free any tx vtag def entries used by this NIX LF */ 1762 if (!(req->flags & NIX_LF_DONT_FREE_TX_VTAG)) 1763 nix_free_tx_vtag_entries(rvu, pcifunc); 1764 1765 nix_interface_deinit(rvu, pcifunc, nixlf); 1766 1767 free_lf: 1768 /* Reset this NIX LF */ 1769 err = rvu_lf_reset(rvu, block, nixlf); 1770 if (err) { 1771 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n", 1772 block->addr - BLKADDR_NIX0, nixlf); 1773 return NIX_AF_ERR_LF_RESET; 1774 } 1775 1776 nix_ctx_free(rvu, pfvf); 1777 1778 return 0; 1779 } 1780 1781 int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu, 1782 struct nix_mark_format_cfg *req, 1783 struct nix_mark_format_cfg_rsp *rsp) 1784 { 1785 u16 pcifunc = req->hdr.pcifunc; 1786 struct nix_hw *nix_hw; 1787 struct rvu_pfvf *pfvf; 1788 int blkaddr, rc; 1789 u32 cfg; 1790 1791 pfvf = rvu_get_pfvf(rvu, pcifunc); 1792 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1793 if (!pfvf->nixlf || blkaddr < 0) 1794 return NIX_AF_ERR_AF_LF_INVALID; 1795 1796 nix_hw = get_nix_hw(rvu->hw, blkaddr); 1797 if (!nix_hw) 1798 return NIX_AF_ERR_INVALID_NIXBLK; 1799 1800 cfg = (((u32)req->offset & 0x7) << 16) | 1801 (((u32)req->y_mask & 0xF) << 12) | 1802 (((u32)req->y_val & 0xF) << 8) | 1803 (((u32)req->r_mask & 0xF) << 4) | ((u32)req->r_val & 0xF); 1804 1805 rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg); 1806 if (rc < 0) { 1807 dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)", 1808 rvu_get_pf(rvu->pdev, pcifunc), 1809 pcifunc & RVU_PFVF_FUNC_MASK); 1810 return NIX_AF_ERR_MARK_CFG_FAIL; 1811 } 1812 1813 rsp->mark_format_idx = rc; 1814 return 0; 1815 } 1816 1817 /* Handle shaper update specially for few revisions */ 1818 static bool 1819 handle_txschq_shaper_update(struct rvu *rvu, int blkaddr, int nixlf, 1820 int lvl, u64 reg, u64 regval) 1821 { 1822 u64 regbase, oldval, sw_xoff = 0; 1823 u64 dbgval, md_debug0 = 0; 1824 unsigned long poll_tmo; 1825 bool rate_reg = 0; 1826 u32 schq; 1827 1828 regbase = reg & 0xFFFF; 1829 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); 1830 1831 /* Check for rate register */ 1832 switch (lvl) { 1833 case NIX_TXSCH_LVL_TL1: 1834 md_debug0 = NIX_AF_TL1X_MD_DEBUG0(schq); 1835 sw_xoff = NIX_AF_TL1X_SW_XOFF(schq); 1836 1837 rate_reg = !!(regbase == NIX_AF_TL1X_CIR(0)); 1838 break; 1839 case NIX_TXSCH_LVL_TL2: 1840 md_debug0 = NIX_AF_TL2X_MD_DEBUG0(schq); 1841 sw_xoff = NIX_AF_TL2X_SW_XOFF(schq); 1842 1843 rate_reg = (regbase == NIX_AF_TL2X_CIR(0) || 1844 regbase == NIX_AF_TL2X_PIR(0)); 1845 break; 1846 case NIX_TXSCH_LVL_TL3: 1847 md_debug0 = NIX_AF_TL3X_MD_DEBUG0(schq); 1848 sw_xoff = NIX_AF_TL3X_SW_XOFF(schq); 1849 1850 rate_reg = (regbase == NIX_AF_TL3X_CIR(0) || 1851 regbase == NIX_AF_TL3X_PIR(0)); 1852 break; 1853 case NIX_TXSCH_LVL_TL4: 1854 md_debug0 = NIX_AF_TL4X_MD_DEBUG0(schq); 1855 sw_xoff = NIX_AF_TL4X_SW_XOFF(schq); 1856 1857 rate_reg = (regbase == NIX_AF_TL4X_CIR(0) || 1858 regbase == NIX_AF_TL4X_PIR(0)); 1859 break; 1860 case NIX_TXSCH_LVL_MDQ: 1861 sw_xoff = NIX_AF_MDQX_SW_XOFF(schq); 1862 rate_reg = (regbase == NIX_AF_MDQX_CIR(0) || 1863 regbase == NIX_AF_MDQX_PIR(0)); 1864 break; 1865 } 1866 1867 if (!rate_reg) 1868 return false; 1869 1870 /* Nothing special to do when state is not toggled */ 1871 oldval = rvu_read64(rvu, blkaddr, reg); 1872 if ((oldval & 0x1) == (regval & 0x1)) { 1873 rvu_write64(rvu, blkaddr, reg, regval); 1874 return true; 1875 } 1876 1877 /* PIR/CIR disable */ 1878 if (!(regval & 0x1)) { 1879 rvu_write64(rvu, blkaddr, sw_xoff, 1); 1880 rvu_write64(rvu, blkaddr, reg, 0); 1881 udelay(4); 1882 rvu_write64(rvu, blkaddr, sw_xoff, 0); 1883 return true; 1884 } 1885 1886 /* PIR/CIR enable */ 1887 rvu_write64(rvu, blkaddr, sw_xoff, 1); 1888 if (md_debug0) { 1889 poll_tmo = jiffies + usecs_to_jiffies(10000); 1890 /* Wait until VLD(bit32) == 1 or C_CON(bit48) == 0 */ 1891 do { 1892 if (time_after(jiffies, poll_tmo)) { 1893 dev_err(rvu->dev, 1894 "NIXLF%d: TLX%u(lvl %u) CIR/PIR enable failed\n", 1895 nixlf, schq, lvl); 1896 goto exit; 1897 } 1898 usleep_range(1, 5); 1899 dbgval = rvu_read64(rvu, blkaddr, md_debug0); 1900 } while (!(dbgval & BIT_ULL(32)) && (dbgval & BIT_ULL(48))); 1901 } 1902 rvu_write64(rvu, blkaddr, reg, regval); 1903 exit: 1904 rvu_write64(rvu, blkaddr, sw_xoff, 0); 1905 return true; 1906 } 1907 1908 static void nix_reset_tx_schedule(struct rvu *rvu, int blkaddr, 1909 int lvl, int schq) 1910 { 1911 u64 tlx_parent = 0, tlx_schedule = 0; 1912 1913 switch (lvl) { 1914 case NIX_TXSCH_LVL_TL2: 1915 tlx_parent = NIX_AF_TL2X_PARENT(schq); 1916 tlx_schedule = NIX_AF_TL2X_SCHEDULE(schq); 1917 break; 1918 case NIX_TXSCH_LVL_TL3: 1919 tlx_parent = NIX_AF_TL3X_PARENT(schq); 1920 tlx_schedule = NIX_AF_TL3X_SCHEDULE(schq); 1921 break; 1922 case NIX_TXSCH_LVL_TL4: 1923 tlx_parent = NIX_AF_TL4X_PARENT(schq); 1924 tlx_schedule = NIX_AF_TL4X_SCHEDULE(schq); 1925 break; 1926 case NIX_TXSCH_LVL_MDQ: 1927 /* no need to reset SMQ_CFG as HW clears this CSR 1928 * on SMQ flush 1929 */ 1930 tlx_parent = NIX_AF_MDQX_PARENT(schq); 1931 tlx_schedule = NIX_AF_MDQX_SCHEDULE(schq); 1932 break; 1933 default: 1934 return; 1935 } 1936 1937 if (tlx_parent) 1938 rvu_write64(rvu, blkaddr, tlx_parent, 0x0); 1939 1940 if (tlx_schedule) 1941 rvu_write64(rvu, blkaddr, tlx_schedule, 0x0); 1942 } 1943 1944 /* Disable shaping of pkts by a scheduler queue 1945 * at a given scheduler level. 1946 */ 1947 static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr, 1948 int nixlf, int lvl, int schq) 1949 { 1950 struct rvu_hwinfo *hw = rvu->hw; 1951 u64 cir_reg = 0, pir_reg = 0; 1952 u64 cfg; 1953 1954 switch (lvl) { 1955 case NIX_TXSCH_LVL_TL1: 1956 cir_reg = NIX_AF_TL1X_CIR(schq); 1957 pir_reg = 0; /* PIR not available at TL1 */ 1958 break; 1959 case NIX_TXSCH_LVL_TL2: 1960 cir_reg = NIX_AF_TL2X_CIR(schq); 1961 pir_reg = NIX_AF_TL2X_PIR(schq); 1962 break; 1963 case NIX_TXSCH_LVL_TL3: 1964 cir_reg = NIX_AF_TL3X_CIR(schq); 1965 pir_reg = NIX_AF_TL3X_PIR(schq); 1966 break; 1967 case NIX_TXSCH_LVL_TL4: 1968 cir_reg = NIX_AF_TL4X_CIR(schq); 1969 pir_reg = NIX_AF_TL4X_PIR(schq); 1970 break; 1971 case NIX_TXSCH_LVL_MDQ: 1972 cir_reg = NIX_AF_MDQX_CIR(schq); 1973 pir_reg = NIX_AF_MDQX_PIR(schq); 1974 break; 1975 } 1976 1977 /* Shaper state toggle needs wait/poll */ 1978 if (hw->cap.nix_shaper_toggle_wait) { 1979 if (cir_reg) 1980 handle_txschq_shaper_update(rvu, blkaddr, nixlf, 1981 lvl, cir_reg, 0); 1982 if (pir_reg) 1983 handle_txschq_shaper_update(rvu, blkaddr, nixlf, 1984 lvl, pir_reg, 0); 1985 return; 1986 } 1987 1988 if (!cir_reg) 1989 return; 1990 cfg = rvu_read64(rvu, blkaddr, cir_reg); 1991 rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0)); 1992 1993 if (!pir_reg) 1994 return; 1995 cfg = rvu_read64(rvu, blkaddr, pir_reg); 1996 rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0)); 1997 } 1998 1999 static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr, 2000 int lvl, int schq) 2001 { 2002 struct rvu_hwinfo *hw = rvu->hw; 2003 int link_level; 2004 int link; 2005 2006 if (lvl >= hw->cap.nix_tx_aggr_lvl) 2007 return; 2008 2009 /* Reset TL4's SDP link config */ 2010 if (lvl == NIX_TXSCH_LVL_TL4) 2011 rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00); 2012 2013 link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ? 2014 NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2; 2015 if (lvl != link_level) 2016 return; 2017 2018 /* Reset TL2's CGX or LBK link config */ 2019 for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++) 2020 rvu_write64(rvu, blkaddr, 2021 NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00); 2022 } 2023 2024 static void nix_clear_tx_xoff(struct rvu *rvu, int blkaddr, 2025 int lvl, int schq) 2026 { 2027 struct rvu_hwinfo *hw = rvu->hw; 2028 u64 reg; 2029 2030 /* Skip this if shaping is not supported */ 2031 if (!hw->cap.nix_shaping) 2032 return; 2033 2034 /* Clear level specific SW_XOFF */ 2035 switch (lvl) { 2036 case NIX_TXSCH_LVL_TL1: 2037 reg = NIX_AF_TL1X_SW_XOFF(schq); 2038 break; 2039 case NIX_TXSCH_LVL_TL2: 2040 reg = NIX_AF_TL2X_SW_XOFF(schq); 2041 break; 2042 case NIX_TXSCH_LVL_TL3: 2043 reg = NIX_AF_TL3X_SW_XOFF(schq); 2044 break; 2045 case NIX_TXSCH_LVL_TL4: 2046 reg = NIX_AF_TL4X_SW_XOFF(schq); 2047 break; 2048 case NIX_TXSCH_LVL_MDQ: 2049 reg = NIX_AF_MDQX_SW_XOFF(schq); 2050 break; 2051 default: 2052 return; 2053 } 2054 2055 rvu_write64(rvu, blkaddr, reg, 0x0); 2056 } 2057 2058 static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc) 2059 { 2060 struct rvu_hwinfo *hw = rvu->hw; 2061 int pf = rvu_get_pf(rvu->pdev, pcifunc); 2062 u8 cgx_id = 0, lmac_id = 0; 2063 2064 if (is_lbk_vf(rvu, pcifunc)) {/* LBK links */ 2065 return hw->cgx_links; 2066 } else if (is_pf_cgxmapped(rvu, pf)) { 2067 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 2068 return (cgx_id * hw->lmac_per_cgx) + lmac_id; 2069 } 2070 2071 /* SDP link */ 2072 return hw->cgx_links + hw->lbk_links; 2073 } 2074 2075 static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc, 2076 int link, int *start, int *end) 2077 { 2078 struct rvu_hwinfo *hw = rvu->hw; 2079 int pf = rvu_get_pf(rvu->pdev, pcifunc); 2080 2081 /* LBK links */ 2082 if (is_lbk_vf(rvu, pcifunc) || is_rep_dev(rvu, pcifunc)) { 2083 *start = hw->cap.nix_txsch_per_cgx_lmac * link; 2084 *end = *start + hw->cap.nix_txsch_per_lbk_lmac; 2085 } else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */ 2086 *start = hw->cap.nix_txsch_per_cgx_lmac * link; 2087 *end = *start + hw->cap.nix_txsch_per_cgx_lmac; 2088 } else { /* SDP link */ 2089 *start = (hw->cap.nix_txsch_per_cgx_lmac * hw->cgx_links) + 2090 (hw->cap.nix_txsch_per_lbk_lmac * hw->lbk_links); 2091 *end = *start + hw->cap.nix_txsch_per_sdp_lmac; 2092 } 2093 } 2094 2095 static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc, 2096 struct nix_hw *nix_hw, 2097 struct nix_txsch_alloc_req *req) 2098 { 2099 struct rvu_hwinfo *hw = rvu->hw; 2100 int schq, req_schq, free_cnt; 2101 struct nix_txsch *txsch; 2102 int link, start, end; 2103 2104 txsch = &nix_hw->txsch[lvl]; 2105 req_schq = req->schq_contig[lvl] + req->schq[lvl]; 2106 2107 if (!req_schq) 2108 return 0; 2109 2110 link = nix_get_tx_link(rvu, pcifunc); 2111 2112 /* For traffic aggregating scheduler level, one queue is enough */ 2113 if (lvl >= hw->cap.nix_tx_aggr_lvl) { 2114 if (req_schq != 1) 2115 return NIX_AF_ERR_TLX_ALLOC_FAIL; 2116 return 0; 2117 } 2118 2119 /* Get free SCHQ count and check if request can be accomodated */ 2120 if (hw->cap.nix_fixed_txschq_mapping) { 2121 nix_get_txschq_range(rvu, pcifunc, link, &start, &end); 2122 schq = start + (pcifunc & RVU_PFVF_FUNC_MASK); 2123 if (end <= txsch->schq.max && schq < end && 2124 !test_bit(schq, txsch->schq.bmap)) 2125 free_cnt = 1; 2126 else 2127 free_cnt = 0; 2128 } else { 2129 free_cnt = rvu_rsrc_free_count(&txsch->schq); 2130 } 2131 2132 if (free_cnt < req_schq || req->schq[lvl] > MAX_TXSCHQ_PER_FUNC || 2133 req->schq_contig[lvl] > MAX_TXSCHQ_PER_FUNC) 2134 return NIX_AF_ERR_TLX_ALLOC_FAIL; 2135 2136 /* If contiguous queues are needed, check for availability */ 2137 if (!hw->cap.nix_fixed_txschq_mapping && req->schq_contig[lvl] && 2138 !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl])) 2139 return NIX_AF_ERR_TLX_ALLOC_FAIL; 2140 2141 return 0; 2142 } 2143 2144 static void nix_txsch_alloc(struct rvu *rvu, struct nix_txsch *txsch, 2145 struct nix_txsch_alloc_rsp *rsp, 2146 int lvl, int start, int end) 2147 { 2148 struct rvu_hwinfo *hw = rvu->hw; 2149 u16 pcifunc = rsp->hdr.pcifunc; 2150 int idx, schq; 2151 2152 /* For traffic aggregating levels, queue alloc is based 2153 * on transmit link to which PF_FUNC is mapped to. 2154 */ 2155 if (lvl >= hw->cap.nix_tx_aggr_lvl) { 2156 /* A single TL queue is allocated */ 2157 if (rsp->schq_contig[lvl]) { 2158 rsp->schq_contig[lvl] = 1; 2159 rsp->schq_contig_list[lvl][0] = start; 2160 } 2161 2162 /* Both contig and non-contig reqs doesn't make sense here */ 2163 if (rsp->schq_contig[lvl]) 2164 rsp->schq[lvl] = 0; 2165 2166 if (rsp->schq[lvl]) { 2167 rsp->schq[lvl] = 1; 2168 rsp->schq_list[lvl][0] = start; 2169 } 2170 return; 2171 } 2172 2173 /* Adjust the queue request count if HW supports 2174 * only one queue per level configuration. 2175 */ 2176 if (hw->cap.nix_fixed_txschq_mapping) { 2177 idx = pcifunc & RVU_PFVF_FUNC_MASK; 2178 schq = start + idx; 2179 if (idx >= (end - start) || test_bit(schq, txsch->schq.bmap)) { 2180 rsp->schq_contig[lvl] = 0; 2181 rsp->schq[lvl] = 0; 2182 return; 2183 } 2184 2185 if (rsp->schq_contig[lvl]) { 2186 rsp->schq_contig[lvl] = 1; 2187 set_bit(schq, txsch->schq.bmap); 2188 rsp->schq_contig_list[lvl][0] = schq; 2189 rsp->schq[lvl] = 0; 2190 } else if (rsp->schq[lvl]) { 2191 rsp->schq[lvl] = 1; 2192 set_bit(schq, txsch->schq.bmap); 2193 rsp->schq_list[lvl][0] = schq; 2194 } 2195 return; 2196 } 2197 2198 /* Allocate contiguous queue indices requesty first */ 2199 if (rsp->schq_contig[lvl]) { 2200 schq = bitmap_find_next_zero_area(txsch->schq.bmap, 2201 txsch->schq.max, start, 2202 rsp->schq_contig[lvl], 0); 2203 if (schq >= end) 2204 rsp->schq_contig[lvl] = 0; 2205 for (idx = 0; idx < rsp->schq_contig[lvl]; idx++) { 2206 set_bit(schq, txsch->schq.bmap); 2207 rsp->schq_contig_list[lvl][idx] = schq; 2208 schq++; 2209 } 2210 } 2211 2212 /* Allocate non-contiguous queue indices */ 2213 if (rsp->schq[lvl]) { 2214 idx = 0; 2215 for (schq = start; schq < end; schq++) { 2216 if (!test_bit(schq, txsch->schq.bmap)) { 2217 set_bit(schq, txsch->schq.bmap); 2218 rsp->schq_list[lvl][idx++] = schq; 2219 } 2220 if (idx == rsp->schq[lvl]) 2221 break; 2222 } 2223 /* Update how many were allocated */ 2224 rsp->schq[lvl] = idx; 2225 } 2226 } 2227 2228 int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu, 2229 struct nix_txsch_alloc_req *req, 2230 struct nix_txsch_alloc_rsp *rsp) 2231 { 2232 struct rvu_hwinfo *hw = rvu->hw; 2233 u16 pcifunc = req->hdr.pcifunc; 2234 int link, blkaddr, rc = 0; 2235 int lvl, idx, start, end; 2236 struct nix_txsch *txsch; 2237 struct nix_hw *nix_hw; 2238 u32 *pfvf_map; 2239 int nixlf; 2240 u16 schq; 2241 2242 rc = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 2243 if (rc) 2244 return rc; 2245 2246 nix_hw = get_nix_hw(rvu->hw, blkaddr); 2247 if (!nix_hw) 2248 return NIX_AF_ERR_INVALID_NIXBLK; 2249 2250 mutex_lock(&rvu->rsrc_lock); 2251 2252 /* Check if request is valid as per HW capabilities 2253 * and can be accomodated. 2254 */ 2255 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 2256 rc = nix_check_txschq_alloc_req(rvu, lvl, pcifunc, nix_hw, req); 2257 if (rc) 2258 goto err; 2259 } 2260 2261 /* Allocate requested Tx scheduler queues */ 2262 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 2263 txsch = &nix_hw->txsch[lvl]; 2264 pfvf_map = txsch->pfvf_map; 2265 2266 if (!req->schq[lvl] && !req->schq_contig[lvl]) 2267 continue; 2268 2269 rsp->schq[lvl] = req->schq[lvl]; 2270 rsp->schq_contig[lvl] = req->schq_contig[lvl]; 2271 2272 link = nix_get_tx_link(rvu, pcifunc); 2273 2274 if (lvl >= hw->cap.nix_tx_aggr_lvl) { 2275 start = link; 2276 end = link; 2277 } else if (hw->cap.nix_fixed_txschq_mapping) { 2278 nix_get_txschq_range(rvu, pcifunc, link, &start, &end); 2279 } else { 2280 start = 0; 2281 end = txsch->schq.max; 2282 } 2283 2284 nix_txsch_alloc(rvu, txsch, rsp, lvl, start, end); 2285 2286 /* Reset queue config */ 2287 for (idx = 0; idx < req->schq_contig[lvl]; idx++) { 2288 schq = rsp->schq_contig_list[lvl][idx]; 2289 if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) & 2290 NIX_TXSCHQ_CFG_DONE)) 2291 pfvf_map[schq] = TXSCH_MAP(pcifunc, 0); 2292 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); 2293 nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq); 2294 nix_reset_tx_schedule(rvu, blkaddr, lvl, schq); 2295 } 2296 2297 for (idx = 0; idx < req->schq[lvl]; idx++) { 2298 schq = rsp->schq_list[lvl][idx]; 2299 if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) & 2300 NIX_TXSCHQ_CFG_DONE)) 2301 pfvf_map[schq] = TXSCH_MAP(pcifunc, 0); 2302 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); 2303 nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq); 2304 nix_reset_tx_schedule(rvu, blkaddr, lvl, schq); 2305 } 2306 } 2307 2308 rsp->aggr_level = hw->cap.nix_tx_aggr_lvl; 2309 rsp->aggr_lvl_rr_prio = TXSCH_TL1_DFLT_RR_PRIO; 2310 rsp->link_cfg_lvl = rvu_read64(rvu, blkaddr, 2311 NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ? 2312 NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2; 2313 goto exit; 2314 err: 2315 rc = NIX_AF_ERR_TLX_ALLOC_FAIL; 2316 exit: 2317 mutex_unlock(&rvu->rsrc_lock); 2318 return rc; 2319 } 2320 2321 static void nix_smq_flush_fill_ctx(struct rvu *rvu, int blkaddr, int smq, 2322 struct nix_smq_flush_ctx *smq_flush_ctx) 2323 { 2324 struct nix_smq_tree_ctx *smq_tree_ctx; 2325 u64 parent_off, regval; 2326 u16 schq; 2327 int lvl; 2328 2329 smq_flush_ctx->smq = smq; 2330 2331 schq = smq; 2332 for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) { 2333 smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl]; 2334 smq_tree_ctx->schq = schq; 2335 if (lvl == NIX_TXSCH_LVL_TL1) { 2336 smq_tree_ctx->cir_off = NIX_AF_TL1X_CIR(schq); 2337 smq_tree_ctx->pir_off = 0; 2338 smq_tree_ctx->pir_val = 0; 2339 parent_off = 0; 2340 } else if (lvl == NIX_TXSCH_LVL_TL2) { 2341 smq_tree_ctx->cir_off = NIX_AF_TL2X_CIR(schq); 2342 smq_tree_ctx->pir_off = NIX_AF_TL2X_PIR(schq); 2343 parent_off = NIX_AF_TL2X_PARENT(schq); 2344 } else if (lvl == NIX_TXSCH_LVL_TL3) { 2345 smq_tree_ctx->cir_off = NIX_AF_TL3X_CIR(schq); 2346 smq_tree_ctx->pir_off = NIX_AF_TL3X_PIR(schq); 2347 parent_off = NIX_AF_TL3X_PARENT(schq); 2348 } else if (lvl == NIX_TXSCH_LVL_TL4) { 2349 smq_tree_ctx->cir_off = NIX_AF_TL4X_CIR(schq); 2350 smq_tree_ctx->pir_off = NIX_AF_TL4X_PIR(schq); 2351 parent_off = NIX_AF_TL4X_PARENT(schq); 2352 } else if (lvl == NIX_TXSCH_LVL_MDQ) { 2353 smq_tree_ctx->cir_off = NIX_AF_MDQX_CIR(schq); 2354 smq_tree_ctx->pir_off = NIX_AF_MDQX_PIR(schq); 2355 parent_off = NIX_AF_MDQX_PARENT(schq); 2356 } 2357 /* save cir/pir register values */ 2358 smq_tree_ctx->cir_val = rvu_read64(rvu, blkaddr, smq_tree_ctx->cir_off); 2359 if (smq_tree_ctx->pir_off) 2360 smq_tree_ctx->pir_val = rvu_read64(rvu, blkaddr, smq_tree_ctx->pir_off); 2361 2362 /* get parent txsch node */ 2363 if (parent_off) { 2364 regval = rvu_read64(rvu, blkaddr, parent_off); 2365 schq = (regval >> 16) & 0x1FF; 2366 } 2367 } 2368 } 2369 2370 static void nix_smq_flush_enadis_xoff(struct rvu *rvu, int blkaddr, 2371 struct nix_smq_flush_ctx *smq_flush_ctx, bool enable) 2372 { 2373 struct nix_txsch *txsch; 2374 struct nix_hw *nix_hw; 2375 int tl2, tl2_schq; 2376 u64 regoff; 2377 2378 nix_hw = get_nix_hw(rvu->hw, blkaddr); 2379 if (!nix_hw) 2380 return; 2381 2382 /* loop through all TL2s with matching PF_FUNC */ 2383 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2]; 2384 tl2_schq = smq_flush_ctx->smq_tree_ctx[NIX_TXSCH_LVL_TL2].schq; 2385 for (tl2 = 0; tl2 < txsch->schq.max; tl2++) { 2386 /* skip the smq(flush) TL2 */ 2387 if (tl2 == tl2_schq) 2388 continue; 2389 /* skip unused TL2s */ 2390 if (TXSCH_MAP_FLAGS(txsch->pfvf_map[tl2]) & NIX_TXSCHQ_FREE) 2391 continue; 2392 /* skip if PF_FUNC doesn't match */ 2393 if ((TXSCH_MAP_FUNC(txsch->pfvf_map[tl2]) & ~RVU_PFVF_FUNC_MASK) != 2394 (TXSCH_MAP_FUNC(txsch->pfvf_map[tl2_schq] & 2395 ~RVU_PFVF_FUNC_MASK))) 2396 continue; 2397 /* enable/disable XOFF */ 2398 regoff = NIX_AF_TL2X_SW_XOFF(tl2); 2399 if (enable) 2400 rvu_write64(rvu, blkaddr, regoff, 0x1); 2401 else 2402 rvu_write64(rvu, blkaddr, regoff, 0x0); 2403 } 2404 } 2405 2406 static void nix_smq_flush_enadis_rate(struct rvu *rvu, int blkaddr, 2407 struct nix_smq_flush_ctx *smq_flush_ctx, bool enable) 2408 { 2409 u64 cir_off, pir_off, cir_val, pir_val; 2410 struct nix_smq_tree_ctx *smq_tree_ctx; 2411 int lvl; 2412 2413 for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) { 2414 smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl]; 2415 cir_off = smq_tree_ctx->cir_off; 2416 cir_val = smq_tree_ctx->cir_val; 2417 pir_off = smq_tree_ctx->pir_off; 2418 pir_val = smq_tree_ctx->pir_val; 2419 2420 if (enable) { 2421 rvu_write64(rvu, blkaddr, cir_off, cir_val); 2422 if (lvl != NIX_TXSCH_LVL_TL1) 2423 rvu_write64(rvu, blkaddr, pir_off, pir_val); 2424 } else { 2425 rvu_write64(rvu, blkaddr, cir_off, 0x0); 2426 if (lvl != NIX_TXSCH_LVL_TL1) 2427 rvu_write64(rvu, blkaddr, pir_off, 0x0); 2428 } 2429 } 2430 } 2431 2432 static int nix_smq_flush(struct rvu *rvu, int blkaddr, 2433 int smq, u16 pcifunc, int nixlf) 2434 { 2435 struct nix_smq_flush_ctx *smq_flush_ctx; 2436 int err, restore_tx_en = 0, i; 2437 int pf = rvu_get_pf(rvu->pdev, pcifunc); 2438 u8 cgx_id = 0, lmac_id = 0; 2439 u16 tl2_tl3_link_schq; 2440 u8 link, link_level; 2441 u64 cfg, bmap = 0; 2442 2443 if (!is_rvu_otx2(rvu)) { 2444 /* Skip SMQ flush if pkt count is zero */ 2445 cfg = rvu_read64(rvu, blkaddr, NIX_AF_MDQX_IN_MD_COUNT(smq)); 2446 if (!cfg) 2447 return 0; 2448 } 2449 2450 /* enable cgx tx if disabled */ 2451 if (is_pf_cgxmapped(rvu, pf)) { 2452 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 2453 restore_tx_en = !rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), 2454 lmac_id, true); 2455 } 2456 2457 /* XOFF all TL2s whose parent TL1 matches SMQ tree TL1 */ 2458 smq_flush_ctx = kzalloc_obj(*smq_flush_ctx); 2459 if (!smq_flush_ctx) 2460 return -ENOMEM; 2461 nix_smq_flush_fill_ctx(rvu, blkaddr, smq, smq_flush_ctx); 2462 nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, true); 2463 nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, false); 2464 2465 /* Disable backpressure from physical link, 2466 * otherwise SMQ flush may stall. 2467 */ 2468 rvu_cgx_enadis_rx_bp(rvu, pf, false); 2469 2470 link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ? 2471 NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2; 2472 tl2_tl3_link_schq = smq_flush_ctx->smq_tree_ctx[link_level].schq; 2473 link = smq_flush_ctx->smq_tree_ctx[NIX_TXSCH_LVL_TL1].schq; 2474 2475 /* SMQ set enqueue xoff */ 2476 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq)); 2477 cfg |= BIT_ULL(50); 2478 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg); 2479 2480 /* Clear all NIX_AF_TL3_TL2_LINK_CFG[ENA] for the TL3/TL2 queue */ 2481 for (i = 0; i < (rvu->hw->cgx_links + rvu->hw->lbk_links); i++) { 2482 cfg = rvu_read64(rvu, blkaddr, 2483 NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link)); 2484 if (!(cfg & BIT_ULL(12))) 2485 continue; 2486 bmap |= BIT_ULL(i); 2487 cfg &= ~BIT_ULL(12); 2488 rvu_write64(rvu, blkaddr, 2489 NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link), cfg); 2490 } 2491 2492 /* Do SMQ flush and set enqueue xoff */ 2493 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq)); 2494 cfg |= BIT_ULL(50) | BIT_ULL(49); 2495 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg); 2496 2497 /* Wait for flush to complete */ 2498 err = rvu_poll_reg(rvu, blkaddr, 2499 NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true); 2500 if (err) 2501 dev_info(rvu->dev, 2502 "NIXLF%d: SMQ%d flush failed, txlink might be busy\n", 2503 nixlf, smq); 2504 2505 /* Set NIX_AF_TL3_TL2_LINKX_CFG[ENA] for the TL3/TL2 queue */ 2506 for (i = 0; i < (rvu->hw->cgx_links + rvu->hw->lbk_links); i++) { 2507 if (!(bmap & BIT_ULL(i))) 2508 continue; 2509 cfg = rvu_read64(rvu, blkaddr, 2510 NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link)); 2511 cfg |= BIT_ULL(12); 2512 rvu_write64(rvu, blkaddr, 2513 NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link), cfg); 2514 } 2515 2516 /* clear XOFF on TL2s */ 2517 nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, true); 2518 nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, false); 2519 kfree(smq_flush_ctx); 2520 2521 rvu_cgx_enadis_rx_bp(rvu, pf, true); 2522 /* restore cgx tx state */ 2523 if (restore_tx_en) 2524 rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false); 2525 return err; 2526 } 2527 2528 static int nix_txschq_free(struct rvu *rvu, u16 pcifunc) 2529 { 2530 int blkaddr, nixlf, lvl, schq, err; 2531 struct rvu_hwinfo *hw = rvu->hw; 2532 struct nix_txsch *txsch; 2533 struct nix_hw *nix_hw; 2534 u16 map_func; 2535 2536 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 2537 if (blkaddr < 0) 2538 return NIX_AF_ERR_AF_LF_INVALID; 2539 2540 nix_hw = get_nix_hw(rvu->hw, blkaddr); 2541 if (!nix_hw) 2542 return NIX_AF_ERR_INVALID_NIXBLK; 2543 2544 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); 2545 if (nixlf < 0) 2546 return NIX_AF_ERR_AF_LF_INVALID; 2547 2548 /* Disable TL2/3 queue links and all XOFF's before SMQ flush*/ 2549 mutex_lock(&rvu->rsrc_lock); 2550 for (lvl = NIX_TXSCH_LVL_MDQ; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 2551 txsch = &nix_hw->txsch[lvl]; 2552 2553 if (lvl >= hw->cap.nix_tx_aggr_lvl) 2554 continue; 2555 2556 for (schq = 0; schq < txsch->schq.max; schq++) { 2557 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) 2558 continue; 2559 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); 2560 nix_clear_tx_xoff(rvu, blkaddr, lvl, schq); 2561 nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq); 2562 } 2563 } 2564 nix_clear_tx_xoff(rvu, blkaddr, NIX_TXSCH_LVL_TL1, 2565 nix_get_tx_link(rvu, pcifunc)); 2566 2567 /* On PF cleanup, clear cfg done flag as 2568 * PF would have changed default config. 2569 */ 2570 if (!(pcifunc & RVU_PFVF_FUNC_MASK)) { 2571 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL1]; 2572 schq = nix_get_tx_link(rvu, pcifunc); 2573 /* Do not clear pcifunc in txsch->pfvf_map[schq] because 2574 * VF might be using this TL1 queue 2575 */ 2576 map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]); 2577 txsch->pfvf_map[schq] = TXSCH_SET_FLAG(map_func, 0x0); 2578 } 2579 2580 /* Flush SMQs */ 2581 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ]; 2582 for (schq = 0; schq < txsch->schq.max; schq++) { 2583 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) 2584 continue; 2585 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf); 2586 } 2587 2588 /* Now free scheduler queues to free pool */ 2589 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 2590 /* TLs above aggregation level are shared across all PF 2591 * and it's VFs, hence skip freeing them. 2592 */ 2593 if (lvl >= hw->cap.nix_tx_aggr_lvl) 2594 continue; 2595 2596 txsch = &nix_hw->txsch[lvl]; 2597 for (schq = 0; schq < txsch->schq.max; schq++) { 2598 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) 2599 continue; 2600 nix_reset_tx_schedule(rvu, blkaddr, lvl, schq); 2601 rvu_free_rsrc(&txsch->schq, schq); 2602 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE); 2603 } 2604 } 2605 mutex_unlock(&rvu->rsrc_lock); 2606 2607 err = rvu_ndc_sync(rvu, blkaddr, nixlf, NIX_AF_NDC_TX_SYNC); 2608 if (err) 2609 dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf); 2610 2611 return 0; 2612 } 2613 2614 static int nix_txschq_free_one(struct rvu *rvu, 2615 struct nix_txsch_free_req *req) 2616 { 2617 struct rvu_hwinfo *hw = rvu->hw; 2618 u16 pcifunc = req->hdr.pcifunc; 2619 int lvl, schq, nixlf, blkaddr; 2620 struct nix_txsch *txsch; 2621 struct nix_hw *nix_hw; 2622 u32 *pfvf_map; 2623 int rc; 2624 2625 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 2626 if (blkaddr < 0) 2627 return NIX_AF_ERR_AF_LF_INVALID; 2628 2629 nix_hw = get_nix_hw(rvu->hw, blkaddr); 2630 if (!nix_hw) 2631 return NIX_AF_ERR_INVALID_NIXBLK; 2632 2633 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); 2634 if (nixlf < 0) 2635 return NIX_AF_ERR_AF_LF_INVALID; 2636 2637 lvl = req->schq_lvl; 2638 schq = req->schq; 2639 txsch = &nix_hw->txsch[lvl]; 2640 2641 if (lvl >= hw->cap.nix_tx_aggr_lvl || schq >= txsch->schq.max) 2642 return 0; 2643 2644 pfvf_map = txsch->pfvf_map; 2645 mutex_lock(&rvu->rsrc_lock); 2646 2647 if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) { 2648 rc = NIX_AF_ERR_TLX_INVALID; 2649 goto err; 2650 } 2651 2652 /* Clear SW_XOFF of this resource only. 2653 * For SMQ level, all path XOFF's 2654 * need to be made clear by user 2655 */ 2656 nix_clear_tx_xoff(rvu, blkaddr, lvl, schq); 2657 2658 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); 2659 nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq); 2660 2661 /* Flush if it is a SMQ. Onus of disabling 2662 * TL2/3 queue links before SMQ flush is on user 2663 */ 2664 if (lvl == NIX_TXSCH_LVL_SMQ && 2665 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf)) { 2666 rc = NIX_AF_SMQ_FLUSH_FAILED; 2667 goto err; 2668 } 2669 2670 nix_reset_tx_schedule(rvu, blkaddr, lvl, schq); 2671 2672 /* Free the resource */ 2673 rvu_free_rsrc(&txsch->schq, schq); 2674 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE); 2675 mutex_unlock(&rvu->rsrc_lock); 2676 return 0; 2677 err: 2678 mutex_unlock(&rvu->rsrc_lock); 2679 return rc; 2680 } 2681 2682 int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu, 2683 struct nix_txsch_free_req *req, 2684 struct msg_rsp *rsp) 2685 { 2686 if (req->flags & TXSCHQ_FREE_ALL) 2687 return nix_txschq_free(rvu, req->hdr.pcifunc); 2688 else 2689 return nix_txschq_free_one(rvu, req); 2690 } 2691 2692 static bool is_txschq_hierarchy_valid(struct rvu *rvu, u16 pcifunc, int blkaddr, 2693 int lvl, u64 reg, u64 regval) 2694 { 2695 u64 regbase = reg & 0xFFFF; 2696 u16 schq, parent; 2697 2698 if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg)) 2699 return false; 2700 2701 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); 2702 /* Check if this schq belongs to this PF/VF or not */ 2703 if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq)) 2704 return false; 2705 2706 parent = (regval >> 16) & 0x1FF; 2707 /* Validate MDQ's TL4 parent */ 2708 if (regbase == NIX_AF_MDQX_PARENT(0) && 2709 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent)) 2710 return false; 2711 2712 /* Validate TL4's TL3 parent */ 2713 if (regbase == NIX_AF_TL4X_PARENT(0) && 2714 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent)) 2715 return false; 2716 2717 /* Validate TL3's TL2 parent */ 2718 if (regbase == NIX_AF_TL3X_PARENT(0) && 2719 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent)) 2720 return false; 2721 2722 /* Validate TL2's TL1 parent */ 2723 if (regbase == NIX_AF_TL2X_PARENT(0) && 2724 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent)) 2725 return false; 2726 2727 return true; 2728 } 2729 2730 static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg) 2731 { 2732 u64 regbase; 2733 2734 if (hw->cap.nix_shaping) 2735 return true; 2736 2737 /* If shaping and coloring is not supported, then 2738 * *_CIR and *_PIR registers should not be configured. 2739 */ 2740 regbase = reg & 0xFFFF; 2741 2742 switch (lvl) { 2743 case NIX_TXSCH_LVL_TL1: 2744 if (regbase == NIX_AF_TL1X_CIR(0)) 2745 return false; 2746 break; 2747 case NIX_TXSCH_LVL_TL2: 2748 if (regbase == NIX_AF_TL2X_CIR(0) || 2749 regbase == NIX_AF_TL2X_PIR(0)) 2750 return false; 2751 break; 2752 case NIX_TXSCH_LVL_TL3: 2753 if (regbase == NIX_AF_TL3X_CIR(0) || 2754 regbase == NIX_AF_TL3X_PIR(0)) 2755 return false; 2756 break; 2757 case NIX_TXSCH_LVL_TL4: 2758 if (regbase == NIX_AF_TL4X_CIR(0) || 2759 regbase == NIX_AF_TL4X_PIR(0)) 2760 return false; 2761 break; 2762 case NIX_TXSCH_LVL_MDQ: 2763 if (regbase == NIX_AF_MDQX_CIR(0) || 2764 regbase == NIX_AF_MDQX_PIR(0)) 2765 return false; 2766 break; 2767 } 2768 return true; 2769 } 2770 2771 static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw, 2772 u16 pcifunc, int blkaddr) 2773 { 2774 u32 *pfvf_map; 2775 int schq; 2776 2777 schq = nix_get_tx_link(rvu, pcifunc); 2778 pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map; 2779 /* Skip if PF has already done the config */ 2780 if (TXSCH_MAP_FLAGS(pfvf_map[schq]) & NIX_TXSCHQ_CFG_DONE) 2781 return; 2782 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq), 2783 (TXSCH_TL1_DFLT_RR_PRIO << 1)); 2784 2785 /* On OcteonTx2 the config was in bytes and newer silcons 2786 * it's changed to weight. 2787 */ 2788 if (!rvu->hw->cap.nix_common_dwrr_mtu) 2789 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq), 2790 TXSCH_TL1_DFLT_RR_QTM); 2791 else 2792 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq), 2793 CN10K_MAX_DWRR_WEIGHT); 2794 2795 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00); 2796 pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE); 2797 } 2798 2799 /* Register offset - [15:0] 2800 * Scheduler Queue number - [25:16] 2801 */ 2802 #define NIX_TX_SCHQ_MASK GENMASK_ULL(25, 0) 2803 2804 static int nix_txschq_cfg_read(struct rvu *rvu, struct nix_hw *nix_hw, 2805 int blkaddr, struct nix_txschq_config *req, 2806 struct nix_txschq_config *rsp) 2807 { 2808 u16 pcifunc = req->hdr.pcifunc; 2809 int idx, schq; 2810 u64 reg; 2811 2812 for (idx = 0; idx < req->num_regs; idx++) { 2813 reg = req->reg[idx]; 2814 reg &= NIX_TX_SCHQ_MASK; 2815 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); 2816 if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, req->lvl, reg) || 2817 !is_valid_txschq(rvu, blkaddr, req->lvl, pcifunc, schq)) 2818 return NIX_AF_INVAL_TXSCHQ_CFG; 2819 rsp->regval[idx] = rvu_read64(rvu, blkaddr, reg); 2820 } 2821 rsp->lvl = req->lvl; 2822 rsp->num_regs = req->num_regs; 2823 return 0; 2824 } 2825 2826 void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr, u16 pcifunc, 2827 struct nix_txsch *txsch, bool enable) 2828 { 2829 struct rvu_hwinfo *hw = rvu->hw; 2830 int lbk_link_start, lbk_links; 2831 u8 pf = rvu_get_pf(rvu->pdev, pcifunc); 2832 int schq; 2833 u64 cfg; 2834 2835 if (!is_pf_cgxmapped(rvu, pf) && !is_rep_dev(rvu, pcifunc)) 2836 return; 2837 2838 cfg = enable ? (BIT_ULL(12) | RVU_SWITCH_LBK_CHAN) : 0; 2839 lbk_link_start = hw->cgx_links; 2840 2841 for (schq = 0; schq < txsch->schq.max; schq++) { 2842 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) 2843 continue; 2844 /* Enable all LBK links with channel 63 by default so that 2845 * packets can be sent to LBK with a NPC TX MCAM rule 2846 */ 2847 lbk_links = hw->lbk_links; 2848 while (lbk_links--) 2849 rvu_write64(rvu, blkaddr, 2850 NIX_AF_TL3_TL2X_LINKX_CFG(schq, 2851 lbk_link_start + 2852 lbk_links), cfg); 2853 } 2854 } 2855 2856 int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu, 2857 struct nix_txschq_config *req, 2858 struct nix_txschq_config *rsp) 2859 { 2860 u64 reg, val, regval, schq_regbase, val_mask; 2861 struct rvu_hwinfo *hw = rvu->hw; 2862 u16 pcifunc = req->hdr.pcifunc; 2863 struct nix_txsch *txsch; 2864 struct nix_hw *nix_hw; 2865 int blkaddr, idx, err; 2866 int nixlf, schq; 2867 u32 *pfvf_map; 2868 2869 if (req->lvl >= NIX_TXSCH_LVL_CNT || 2870 req->num_regs > MAX_REGS_PER_MBOX_MSG) 2871 return NIX_AF_INVAL_TXSCHQ_CFG; 2872 2873 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 2874 if (err) 2875 return err; 2876 2877 nix_hw = get_nix_hw(rvu->hw, blkaddr); 2878 if (!nix_hw) 2879 return NIX_AF_ERR_INVALID_NIXBLK; 2880 2881 if (req->read) 2882 return nix_txschq_cfg_read(rvu, nix_hw, blkaddr, req, rsp); 2883 2884 txsch = &nix_hw->txsch[req->lvl]; 2885 pfvf_map = txsch->pfvf_map; 2886 2887 if (req->lvl >= hw->cap.nix_tx_aggr_lvl && 2888 pcifunc & RVU_PFVF_FUNC_MASK) { 2889 mutex_lock(&rvu->rsrc_lock); 2890 if (req->lvl == NIX_TXSCH_LVL_TL1) 2891 nix_tl1_default_cfg(rvu, nix_hw, pcifunc, blkaddr); 2892 mutex_unlock(&rvu->rsrc_lock); 2893 return 0; 2894 } 2895 2896 for (idx = 0; idx < req->num_regs; idx++) { 2897 reg = req->reg[idx]; 2898 reg &= NIX_TX_SCHQ_MASK; 2899 regval = req->regval[idx]; 2900 schq_regbase = reg & 0xFFFF; 2901 val_mask = req->regval_mask[idx]; 2902 2903 if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr, 2904 txsch->lvl, reg, regval)) 2905 return NIX_AF_INVAL_TXSCHQ_CFG; 2906 2907 /* Check if shaping and coloring is supported */ 2908 if (!is_txschq_shaping_valid(hw, req->lvl, reg)) 2909 continue; 2910 2911 val = rvu_read64(rvu, blkaddr, reg); 2912 regval = (val & val_mask) | (regval & ~val_mask); 2913 2914 /* Handle shaping state toggle specially */ 2915 if (hw->cap.nix_shaper_toggle_wait && 2916 handle_txschq_shaper_update(rvu, blkaddr, nixlf, 2917 req->lvl, reg, regval)) 2918 continue; 2919 2920 /* Replace PF/VF visible NIXLF slot with HW NIXLF id */ 2921 if (schq_regbase == NIX_AF_SMQX_CFG(0)) { 2922 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], 2923 pcifunc, 0); 2924 regval &= ~(0x7FULL << 24); 2925 regval |= ((u64)nixlf << 24); 2926 } 2927 2928 /* Clear 'BP_ENA' config, if it's not allowed */ 2929 if (!hw->cap.nix_tx_link_bp) { 2930 if (schq_regbase == NIX_AF_TL4X_SDP_LINK_CFG(0) || 2931 (schq_regbase & 0xFF00) == 2932 NIX_AF_TL3_TL2X_LINKX_CFG(0, 0)) 2933 regval &= ~BIT_ULL(13); 2934 } 2935 2936 /* Mark config as done for TL1 by PF */ 2937 if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) && 2938 schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) { 2939 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); 2940 mutex_lock(&rvu->rsrc_lock); 2941 pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], 2942 NIX_TXSCHQ_CFG_DONE); 2943 mutex_unlock(&rvu->rsrc_lock); 2944 } 2945 2946 /* SMQ flush is special hence split register writes such 2947 * that flush first and write rest of the bits later. 2948 */ 2949 if (schq_regbase == NIX_AF_SMQX_CFG(0) && 2950 (regval & BIT_ULL(49))) { 2951 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); 2952 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf); 2953 regval &= ~BIT_ULL(49); 2954 } 2955 rvu_write64(rvu, blkaddr, reg, regval); 2956 } 2957 2958 return 0; 2959 } 2960 2961 static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr, 2962 struct nix_vtag_config *req) 2963 { 2964 u64 regval = req->vtag_size; 2965 2966 if (req->rx.vtag_type > NIX_AF_LFX_RX_VTAG_TYPE7 || 2967 req->vtag_size > VTAGSIZE_T8) 2968 return -EINVAL; 2969 2970 /* RX VTAG Type 7 reserved for vf vlan */ 2971 if (req->rx.vtag_type == NIX_AF_LFX_RX_VTAG_TYPE7) 2972 return NIX_AF_ERR_RX_VTAG_INUSE; 2973 2974 if (req->rx.capture_vtag) 2975 regval |= BIT_ULL(5); 2976 if (req->rx.strip_vtag) 2977 regval |= BIT_ULL(4); 2978 2979 rvu_write64(rvu, blkaddr, 2980 NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval); 2981 return 0; 2982 } 2983 2984 static int nix_tx_vtag_free(struct rvu *rvu, int blkaddr, 2985 u16 pcifunc, int index) 2986 { 2987 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); 2988 struct nix_txvlan *vlan; 2989 2990 if (!nix_hw) 2991 return NIX_AF_ERR_INVALID_NIXBLK; 2992 2993 vlan = &nix_hw->txvlan; 2994 if (vlan->entry2pfvf_map[index] != pcifunc) 2995 return NIX_AF_ERR_PARAM; 2996 2997 rvu_write64(rvu, blkaddr, 2998 NIX_AF_TX_VTAG_DEFX_DATA(index), 0x0ull); 2999 rvu_write64(rvu, blkaddr, 3000 NIX_AF_TX_VTAG_DEFX_CTL(index), 0x0ull); 3001 3002 vlan->entry2pfvf_map[index] = 0; 3003 rvu_free_rsrc(&vlan->rsrc, index); 3004 3005 return 0; 3006 } 3007 3008 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc) 3009 { 3010 struct nix_txvlan *vlan; 3011 struct nix_hw *nix_hw; 3012 int index, blkaddr; 3013 3014 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 3015 if (blkaddr < 0) 3016 return; 3017 3018 nix_hw = get_nix_hw(rvu->hw, blkaddr); 3019 if (!nix_hw) 3020 return; 3021 3022 vlan = &nix_hw->txvlan; 3023 3024 mutex_lock(&vlan->rsrc_lock); 3025 /* Scan all the entries and free the ones mapped to 'pcifunc' */ 3026 for (index = 0; index < vlan->rsrc.max; index++) { 3027 if (vlan->entry2pfvf_map[index] == pcifunc) 3028 nix_tx_vtag_free(rvu, blkaddr, pcifunc, index); 3029 } 3030 mutex_unlock(&vlan->rsrc_lock); 3031 } 3032 3033 static int nix_tx_vtag_alloc(struct rvu *rvu, int blkaddr, 3034 u64 vtag, u8 size) 3035 { 3036 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); 3037 struct nix_txvlan *vlan; 3038 u64 regval; 3039 int index; 3040 3041 if (!nix_hw) 3042 return NIX_AF_ERR_INVALID_NIXBLK; 3043 3044 vlan = &nix_hw->txvlan; 3045 3046 mutex_lock(&vlan->rsrc_lock); 3047 3048 index = rvu_alloc_rsrc(&vlan->rsrc); 3049 if (index < 0) { 3050 mutex_unlock(&vlan->rsrc_lock); 3051 return index; 3052 } 3053 3054 mutex_unlock(&vlan->rsrc_lock); 3055 3056 regval = size ? vtag : vtag << 32; 3057 3058 rvu_write64(rvu, blkaddr, 3059 NIX_AF_TX_VTAG_DEFX_DATA(index), regval); 3060 rvu_write64(rvu, blkaddr, 3061 NIX_AF_TX_VTAG_DEFX_CTL(index), size); 3062 3063 return index; 3064 } 3065 3066 static int nix_tx_vtag_decfg(struct rvu *rvu, int blkaddr, 3067 struct nix_vtag_config *req) 3068 { 3069 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); 3070 u16 pcifunc = req->hdr.pcifunc; 3071 int idx0 = req->tx.vtag0_idx; 3072 int idx1 = req->tx.vtag1_idx; 3073 struct nix_txvlan *vlan; 3074 int err = 0; 3075 3076 if (!nix_hw) 3077 return NIX_AF_ERR_INVALID_NIXBLK; 3078 3079 vlan = &nix_hw->txvlan; 3080 if (req->tx.free_vtag0 && req->tx.free_vtag1) 3081 if (vlan->entry2pfvf_map[idx0] != pcifunc || 3082 vlan->entry2pfvf_map[idx1] != pcifunc) 3083 return NIX_AF_ERR_PARAM; 3084 3085 mutex_lock(&vlan->rsrc_lock); 3086 3087 if (req->tx.free_vtag0) { 3088 err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx0); 3089 if (err) 3090 goto exit; 3091 } 3092 3093 if (req->tx.free_vtag1) 3094 err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx1); 3095 3096 exit: 3097 mutex_unlock(&vlan->rsrc_lock); 3098 return err; 3099 } 3100 3101 static int nix_tx_vtag_cfg(struct rvu *rvu, int blkaddr, 3102 struct nix_vtag_config *req, 3103 struct nix_vtag_config_rsp *rsp) 3104 { 3105 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); 3106 struct nix_txvlan *vlan; 3107 u16 pcifunc = req->hdr.pcifunc; 3108 3109 if (!nix_hw) 3110 return NIX_AF_ERR_INVALID_NIXBLK; 3111 3112 vlan = &nix_hw->txvlan; 3113 if (req->tx.cfg_vtag0) { 3114 rsp->vtag0_idx = 3115 nix_tx_vtag_alloc(rvu, blkaddr, 3116 req->tx.vtag0, req->vtag_size); 3117 3118 if (rsp->vtag0_idx < 0) 3119 return NIX_AF_ERR_TX_VTAG_NOSPC; 3120 3121 vlan->entry2pfvf_map[rsp->vtag0_idx] = pcifunc; 3122 } 3123 3124 if (req->tx.cfg_vtag1) { 3125 rsp->vtag1_idx = 3126 nix_tx_vtag_alloc(rvu, blkaddr, 3127 req->tx.vtag1, req->vtag_size); 3128 3129 if (rsp->vtag1_idx < 0) 3130 goto err_free; 3131 3132 vlan->entry2pfvf_map[rsp->vtag1_idx] = pcifunc; 3133 } 3134 3135 return 0; 3136 3137 err_free: 3138 if (req->tx.cfg_vtag0) 3139 nix_tx_vtag_free(rvu, blkaddr, pcifunc, rsp->vtag0_idx); 3140 3141 return NIX_AF_ERR_TX_VTAG_NOSPC; 3142 } 3143 3144 int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu, 3145 struct nix_vtag_config *req, 3146 struct nix_vtag_config_rsp *rsp) 3147 { 3148 u16 pcifunc = req->hdr.pcifunc; 3149 int blkaddr, nixlf, err; 3150 3151 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 3152 if (err) 3153 return err; 3154 3155 if (req->cfg_type) { 3156 /* rx vtag configuration */ 3157 err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req); 3158 if (err) 3159 return NIX_AF_ERR_PARAM; 3160 } else { 3161 /* tx vtag configuration */ 3162 if ((req->tx.cfg_vtag0 || req->tx.cfg_vtag1) && 3163 (req->tx.free_vtag0 || req->tx.free_vtag1)) 3164 return NIX_AF_ERR_PARAM; 3165 3166 if (req->tx.cfg_vtag0 || req->tx.cfg_vtag1) 3167 return nix_tx_vtag_cfg(rvu, blkaddr, req, rsp); 3168 3169 if (req->tx.free_vtag0 || req->tx.free_vtag1) 3170 return nix_tx_vtag_decfg(rvu, blkaddr, req); 3171 } 3172 3173 return 0; 3174 } 3175 3176 static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw, 3177 int mce, u8 op, u16 pcifunc, int next, 3178 int index, u8 mce_op, bool eol) 3179 { 3180 struct nix_aq_enq_req aq_req; 3181 int err; 3182 3183 aq_req.hdr.pcifunc = 0; 3184 aq_req.ctype = NIX_AQ_CTYPE_MCE; 3185 aq_req.op = op; 3186 aq_req.qidx = mce; 3187 3188 /* Use RSS with RSS index 0 */ 3189 aq_req.mce.op = mce_op; 3190 aq_req.mce.index = index; 3191 aq_req.mce.eol = eol; 3192 aq_req.mce.pf_func = pcifunc; 3193 aq_req.mce.next = next; 3194 3195 /* All fields valid */ 3196 *(u64 *)(&aq_req.mce_mask) = ~0ULL; 3197 3198 err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, &aq_req, NULL); 3199 if (err) { 3200 dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n", 3201 rvu_get_pf(rvu->pdev, pcifunc), 3202 pcifunc & RVU_PFVF_FUNC_MASK); 3203 return err; 3204 } 3205 return 0; 3206 } 3207 3208 static void nix_delete_mcast_mce_list(struct nix_mce_list *mce_list) 3209 { 3210 struct hlist_node *tmp; 3211 struct mce *mce; 3212 3213 /* Scan through the current list */ 3214 hlist_for_each_entry_safe(mce, tmp, &mce_list->head, node) { 3215 hlist_del(&mce->node); 3216 kfree(mce); 3217 } 3218 3219 mce_list->count = 0; 3220 mce_list->max = 0; 3221 } 3222 3223 static int nix_get_last_mce_list_index(struct nix_mcast_grp_elem *elem) 3224 { 3225 return elem->mce_start_index + elem->mcast_mce_list.count - 1; 3226 } 3227 3228 static int nix_update_ingress_mce_list_hw(struct rvu *rvu, 3229 struct nix_hw *nix_hw, 3230 struct nix_mcast_grp_elem *elem) 3231 { 3232 int idx, last_idx, next_idx, err; 3233 struct nix_mce_list *mce_list; 3234 struct mce *mce, *prev_mce; 3235 3236 mce_list = &elem->mcast_mce_list; 3237 idx = elem->mce_start_index; 3238 last_idx = nix_get_last_mce_list_index(elem); 3239 hlist_for_each_entry(mce, &mce_list->head, node) { 3240 if (idx > last_idx) 3241 break; 3242 3243 if (!mce->is_active) { 3244 if (idx == elem->mce_start_index) { 3245 idx++; 3246 prev_mce = mce; 3247 elem->mce_start_index = idx; 3248 continue; 3249 } else if (idx == last_idx) { 3250 err = nix_blk_setup_mce(rvu, nix_hw, idx - 1, NIX_AQ_INSTOP_WRITE, 3251 prev_mce->pcifunc, next_idx, 3252 prev_mce->rq_rss_index, 3253 prev_mce->dest_type, 3254 false); 3255 if (err) 3256 return err; 3257 3258 break; 3259 } 3260 } 3261 3262 next_idx = idx + 1; 3263 /* EOL should be set in last MCE */ 3264 err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE, 3265 mce->pcifunc, next_idx, 3266 mce->rq_rss_index, mce->dest_type, 3267 (next_idx > last_idx) ? true : false); 3268 if (err) 3269 return err; 3270 3271 idx++; 3272 prev_mce = mce; 3273 } 3274 3275 return 0; 3276 } 3277 3278 static void nix_update_egress_mce_list_hw(struct rvu *rvu, 3279 struct nix_hw *nix_hw, 3280 struct nix_mcast_grp_elem *elem) 3281 { 3282 struct nix_mce_list *mce_list; 3283 int idx, last_idx, next_idx; 3284 struct mce *mce, *prev_mce; 3285 u64 regval; 3286 u8 eol; 3287 3288 mce_list = &elem->mcast_mce_list; 3289 idx = elem->mce_start_index; 3290 last_idx = nix_get_last_mce_list_index(elem); 3291 hlist_for_each_entry(mce, &mce_list->head, node) { 3292 if (idx > last_idx) 3293 break; 3294 3295 if (!mce->is_active) { 3296 if (idx == elem->mce_start_index) { 3297 idx++; 3298 prev_mce = mce; 3299 elem->mce_start_index = idx; 3300 continue; 3301 } else if (idx == last_idx) { 3302 regval = (next_idx << 16) | (1 << 12) | prev_mce->channel; 3303 rvu_write64(rvu, nix_hw->blkaddr, 3304 NIX_AF_TX_MCASTX(idx - 1), 3305 regval); 3306 break; 3307 } 3308 } 3309 3310 eol = 0; 3311 next_idx = idx + 1; 3312 /* EOL should be set in last MCE */ 3313 if (next_idx > last_idx) 3314 eol = 1; 3315 3316 regval = (next_idx << 16) | (eol << 12) | mce->channel; 3317 rvu_write64(rvu, nix_hw->blkaddr, 3318 NIX_AF_TX_MCASTX(idx), 3319 regval); 3320 idx++; 3321 prev_mce = mce; 3322 } 3323 } 3324 3325 static int nix_del_mce_list_entry(struct rvu *rvu, 3326 struct nix_hw *nix_hw, 3327 struct nix_mcast_grp_elem *elem, 3328 struct nix_mcast_grp_update_req *req) 3329 { 3330 u32 num_entry = req->num_mce_entry; 3331 struct nix_mce_list *mce_list; 3332 struct mce *mce; 3333 bool is_found; 3334 int i; 3335 3336 mce_list = &elem->mcast_mce_list; 3337 for (i = 0; i < num_entry; i++) { 3338 is_found = false; 3339 hlist_for_each_entry(mce, &mce_list->head, node) { 3340 /* If already exists, then delete */ 3341 if (mce->pcifunc == req->pcifunc[i]) { 3342 hlist_del(&mce->node); 3343 kfree(mce); 3344 mce_list->count--; 3345 is_found = true; 3346 break; 3347 } 3348 } 3349 3350 if (!is_found) 3351 return NIX_AF_ERR_INVALID_MCAST_DEL_REQ; 3352 } 3353 3354 mce_list->max = mce_list->count; 3355 /* Dump the updated list to HW */ 3356 if (elem->dir == NIX_MCAST_INGRESS) 3357 return nix_update_ingress_mce_list_hw(rvu, nix_hw, elem); 3358 3359 nix_update_egress_mce_list_hw(rvu, nix_hw, elem); 3360 return 0; 3361 } 3362 3363 static int nix_add_mce_list_entry(struct rvu *rvu, 3364 struct nix_hw *nix_hw, 3365 struct nix_mcast_grp_elem *elem, 3366 struct nix_mcast_grp_update_req *req) 3367 { 3368 u32 num_entry = req->num_mce_entry; 3369 struct nix_mce_list *mce_list; 3370 struct hlist_node *tmp; 3371 struct mce *mce; 3372 int i; 3373 3374 mce_list = &elem->mcast_mce_list; 3375 for (i = 0; i < num_entry; i++) { 3376 mce = kzalloc_obj(*mce); 3377 if (!mce) 3378 goto free_mce; 3379 3380 mce->pcifunc = req->pcifunc[i]; 3381 mce->channel = req->channel[i]; 3382 mce->rq_rss_index = req->rq_rss_index[i]; 3383 mce->dest_type = req->dest_type[i]; 3384 mce->is_active = 1; 3385 hlist_add_head(&mce->node, &mce_list->head); 3386 mce_list->count++; 3387 } 3388 3389 mce_list->max += num_entry; 3390 3391 /* Dump the updated list to HW */ 3392 if (elem->dir == NIX_MCAST_INGRESS) 3393 return nix_update_ingress_mce_list_hw(rvu, nix_hw, elem); 3394 3395 nix_update_egress_mce_list_hw(rvu, nix_hw, elem); 3396 return 0; 3397 3398 free_mce: 3399 hlist_for_each_entry_safe(mce, tmp, &mce_list->head, node) { 3400 hlist_del(&mce->node); 3401 kfree(mce); 3402 mce_list->count--; 3403 } 3404 3405 return -ENOMEM; 3406 } 3407 3408 static int nix_update_mce_list_entry(struct nix_mce_list *mce_list, 3409 u16 pcifunc, bool add) 3410 { 3411 struct mce *mce, *tail = NULL; 3412 bool delete = false; 3413 3414 /* Scan through the current list */ 3415 hlist_for_each_entry(mce, &mce_list->head, node) { 3416 /* If already exists, then delete */ 3417 if (mce->pcifunc == pcifunc && !add) { 3418 delete = true; 3419 break; 3420 } else if (mce->pcifunc == pcifunc && add) { 3421 /* entry already exists */ 3422 return 0; 3423 } 3424 tail = mce; 3425 } 3426 3427 if (delete) { 3428 hlist_del(&mce->node); 3429 kfree(mce); 3430 mce_list->count--; 3431 return 0; 3432 } 3433 3434 if (!add) 3435 return 0; 3436 3437 /* Add a new one to the list, at the tail */ 3438 mce = kzalloc_obj(*mce); 3439 if (!mce) 3440 return -ENOMEM; 3441 mce->pcifunc = pcifunc; 3442 if (!tail) 3443 hlist_add_head(&mce->node, &mce_list->head); 3444 else 3445 hlist_add_behind(&mce->node, &tail->node); 3446 mce_list->count++; 3447 return 0; 3448 } 3449 3450 int nix_update_mce_list(struct rvu *rvu, u16 pcifunc, 3451 struct nix_mce_list *mce_list, 3452 int mce_idx, int mcam_index, bool add) 3453 { 3454 int err = 0, idx, next_idx, last_idx, blkaddr, npc_blkaddr; 3455 struct npc_mcam *mcam = &rvu->hw->mcam; 3456 struct nix_mcast *mcast; 3457 struct nix_hw *nix_hw; 3458 struct mce *mce; 3459 3460 if (!mce_list) 3461 return -EINVAL; 3462 3463 /* Get this PF/VF func's MCE index */ 3464 idx = mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK); 3465 3466 if (idx > (mce_idx + mce_list->max)) { 3467 dev_err(rvu->dev, 3468 "%s: Idx %d > max MCE idx %d, for PF%d bcast list\n", 3469 __func__, idx, mce_list->max, 3470 rvu_get_pf(rvu->pdev, pcifunc)); 3471 return -EINVAL; 3472 } 3473 3474 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); 3475 if (err) 3476 return err; 3477 3478 mcast = &nix_hw->mcast; 3479 mutex_lock(&mcast->mce_lock); 3480 3481 err = nix_update_mce_list_entry(mce_list, pcifunc, add); 3482 if (err) 3483 goto end; 3484 3485 /* Disable MCAM entry in NPC */ 3486 if (!mce_list->count) { 3487 npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 3488 npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, mcam_index, false); 3489 goto end; 3490 } 3491 3492 /* Dump the updated list to HW */ 3493 idx = mce_idx; 3494 last_idx = idx + mce_list->count - 1; 3495 hlist_for_each_entry(mce, &mce_list->head, node) { 3496 if (idx > last_idx) 3497 break; 3498 3499 next_idx = idx + 1; 3500 /* EOL should be set in last MCE */ 3501 err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE, 3502 mce->pcifunc, next_idx, 3503 0, 1, 3504 (next_idx > last_idx) ? true : false); 3505 if (err) 3506 goto end; 3507 idx++; 3508 } 3509 3510 end: 3511 mutex_unlock(&mcast->mce_lock); 3512 return err; 3513 } 3514 3515 void nix_get_mce_list(struct rvu *rvu, u16 pcifunc, int type, 3516 struct nix_mce_list **mce_list, int *mce_idx) 3517 { 3518 struct rvu_hwinfo *hw = rvu->hw; 3519 struct rvu_pfvf *pfvf; 3520 3521 if (!hw->cap.nix_rx_multicast || 3522 !is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, 3523 pcifunc & ~RVU_PFVF_FUNC_MASK))) { 3524 *mce_list = NULL; 3525 *mce_idx = 0; 3526 return; 3527 } 3528 3529 /* Get this PF/VF func's MCE index */ 3530 pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK); 3531 3532 if (type == NIXLF_BCAST_ENTRY) { 3533 *mce_list = &pfvf->bcast_mce_list; 3534 *mce_idx = pfvf->bcast_mce_idx; 3535 } else if (type == NIXLF_ALLMULTI_ENTRY) { 3536 *mce_list = &pfvf->mcast_mce_list; 3537 *mce_idx = pfvf->mcast_mce_idx; 3538 } else if (type == NIXLF_PROMISC_ENTRY) { 3539 *mce_list = &pfvf->promisc_mce_list; 3540 *mce_idx = pfvf->promisc_mce_idx; 3541 } else { 3542 *mce_list = NULL; 3543 *mce_idx = 0; 3544 } 3545 } 3546 3547 static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc, 3548 int type, bool add) 3549 { 3550 int err = 0, nixlf, blkaddr, mcam_index, mce_idx; 3551 struct npc_mcam *mcam = &rvu->hw->mcam; 3552 struct rvu_hwinfo *hw = rvu->hw; 3553 struct nix_mce_list *mce_list; 3554 int pf; 3555 3556 /* skip multicast pkt replication for AF's VFs & SDP links */ 3557 if (is_lbk_vf(rvu, pcifunc) || is_sdp_pfvf(rvu, pcifunc)) 3558 return 0; 3559 3560 if (!hw->cap.nix_rx_multicast) 3561 return 0; 3562 3563 pf = rvu_get_pf(rvu->pdev, pcifunc); 3564 if (!is_pf_cgxmapped(rvu, pf)) 3565 return 0; 3566 3567 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 3568 if (blkaddr < 0) 3569 return -EINVAL; 3570 3571 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); 3572 if (nixlf < 0) 3573 return -EINVAL; 3574 3575 nix_get_mce_list(rvu, pcifunc, type, &mce_list, &mce_idx); 3576 3577 mcam_index = npc_get_nixlf_mcam_index(mcam, 3578 pcifunc & ~RVU_PFVF_FUNC_MASK, 3579 nixlf, type); 3580 err = nix_update_mce_list(rvu, pcifunc, mce_list, 3581 mce_idx, mcam_index, add); 3582 return err; 3583 } 3584 3585 static void nix_setup_mcast_grp(struct nix_hw *nix_hw) 3586 { 3587 struct nix_mcast_grp *mcast_grp = &nix_hw->mcast_grp; 3588 3589 INIT_LIST_HEAD(&mcast_grp->mcast_grp_head); 3590 mutex_init(&mcast_grp->mcast_grp_lock); 3591 mcast_grp->next_grp_index = 1; 3592 mcast_grp->count = 0; 3593 } 3594 3595 static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw) 3596 { 3597 struct nix_mcast *mcast = &nix_hw->mcast; 3598 int err, pf, numvfs, idx; 3599 struct rvu_pfvf *pfvf; 3600 u16 pcifunc; 3601 u64 cfg; 3602 3603 /* Skip PF0 (i.e AF) */ 3604 for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) { 3605 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); 3606 /* If PF is not enabled, nothing to do */ 3607 if (!((cfg >> 20) & 0x01)) 3608 continue; 3609 /* Get numVFs attached to this PF */ 3610 numvfs = (cfg >> 12) & 0xFF; 3611 3612 pfvf = &rvu->pf[pf]; 3613 3614 /* This NIX0/1 block mapped to PF ? */ 3615 if (pfvf->nix_blkaddr != nix_hw->blkaddr) 3616 continue; 3617 3618 /* save start idx of broadcast mce list */ 3619 pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1, NIX_MCAST_INGRESS); 3620 nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1); 3621 3622 /* save start idx of multicast mce list */ 3623 pfvf->mcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1, NIX_MCAST_INGRESS); 3624 nix_mce_list_init(&pfvf->mcast_mce_list, numvfs + 1); 3625 3626 /* save the start idx of promisc mce list */ 3627 pfvf->promisc_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1, NIX_MCAST_INGRESS); 3628 nix_mce_list_init(&pfvf->promisc_mce_list, numvfs + 1); 3629 3630 for (idx = 0; idx < (numvfs + 1); idx++) { 3631 /* idx-0 is for PF, followed by VFs */ 3632 pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0); 3633 pcifunc |= idx; 3634 /* Add dummy entries now, so that we don't have to check 3635 * for whether AQ_OP should be INIT/WRITE later on. 3636 * Will be updated when a NIXLF is attached/detached to 3637 * these PF/VFs. 3638 */ 3639 err = nix_blk_setup_mce(rvu, nix_hw, 3640 pfvf->bcast_mce_idx + idx, 3641 NIX_AQ_INSTOP_INIT, 3642 pcifunc, 0, 0, 1, true); 3643 if (err) 3644 return err; 3645 3646 /* add dummy entries to multicast mce list */ 3647 err = nix_blk_setup_mce(rvu, nix_hw, 3648 pfvf->mcast_mce_idx + idx, 3649 NIX_AQ_INSTOP_INIT, 3650 pcifunc, 0, 0, 1, true); 3651 if (err) 3652 return err; 3653 3654 /* add dummy entries to promisc mce list */ 3655 err = nix_blk_setup_mce(rvu, nix_hw, 3656 pfvf->promisc_mce_idx + idx, 3657 NIX_AQ_INSTOP_INIT, 3658 pcifunc, 0, 0, 1, true); 3659 if (err) 3660 return err; 3661 } 3662 } 3663 return 0; 3664 } 3665 3666 static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) 3667 { 3668 struct nix_mcast *mcast = &nix_hw->mcast; 3669 struct rvu_hwinfo *hw = rvu->hw; 3670 int err, size; 3671 3672 size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F; 3673 size = BIT_ULL(size); 3674 3675 /* Allocate bitmap for rx mce entries */ 3676 mcast->mce_counter[NIX_MCAST_INGRESS].max = 256UL << MC_TBL_SIZE; 3677 err = rvu_alloc_bitmap(&mcast->mce_counter[NIX_MCAST_INGRESS]); 3678 if (err) 3679 return -ENOMEM; 3680 3681 /* Allocate bitmap for tx mce entries */ 3682 mcast->mce_counter[NIX_MCAST_EGRESS].max = MC_TX_MAX; 3683 err = rvu_alloc_bitmap(&mcast->mce_counter[NIX_MCAST_EGRESS]); 3684 if (err) { 3685 rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_INGRESS]); 3686 return -ENOMEM; 3687 } 3688 3689 /* Alloc memory for multicast/mirror replication entries */ 3690 err = qmem_alloc(rvu->dev, &mcast->mce_ctx, 3691 mcast->mce_counter[NIX_MCAST_INGRESS].max, size); 3692 if (err) { 3693 rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_INGRESS]); 3694 rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_EGRESS]); 3695 return -ENOMEM; 3696 } 3697 3698 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE, 3699 (u64)mcast->mce_ctx->iova); 3700 3701 /* Set max list length equal to max no of VFs per PF + PF itself */ 3702 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG, 3703 BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE); 3704 3705 /* Alloc memory for multicast replication buffers */ 3706 size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF; 3707 err = qmem_alloc(rvu->dev, &mcast->mcast_buf, 3708 (8UL << MC_BUF_CNT), size); 3709 if (err) { 3710 rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_INGRESS]); 3711 rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_EGRESS]); 3712 return -ENOMEM; 3713 } 3714 3715 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE, 3716 (u64)mcast->mcast_buf->iova); 3717 3718 /* Alloc pkind for NIX internal RX multicast/mirror replay */ 3719 mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc); 3720 3721 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG, 3722 BIT_ULL(63) | (mcast->replay_pkind << 24) | 3723 BIT_ULL(20) | MC_BUF_CNT); 3724 3725 mutex_init(&mcast->mce_lock); 3726 3727 nix_setup_mcast_grp(nix_hw); 3728 3729 return nix_setup_mce_tables(rvu, nix_hw); 3730 } 3731 3732 static int nix_setup_txvlan(struct rvu *rvu, struct nix_hw *nix_hw) 3733 { 3734 struct nix_txvlan *vlan = &nix_hw->txvlan; 3735 int err; 3736 3737 /* Allocate resource bimap for tx vtag def registers*/ 3738 vlan->rsrc.max = NIX_TX_VTAG_DEF_MAX; 3739 err = rvu_alloc_bitmap(&vlan->rsrc); 3740 if (err) 3741 return -ENOMEM; 3742 3743 /* Alloc memory for saving entry to RVU PFFUNC allocation mapping */ 3744 vlan->entry2pfvf_map = devm_kcalloc(rvu->dev, vlan->rsrc.max, 3745 sizeof(u16), GFP_KERNEL); 3746 if (!vlan->entry2pfvf_map) 3747 goto free_mem; 3748 3749 mutex_init(&vlan->rsrc_lock); 3750 return 0; 3751 3752 free_mem: 3753 kfree(vlan->rsrc.bmap); 3754 return -ENOMEM; 3755 } 3756 3757 static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) 3758 { 3759 struct nix_txsch *txsch; 3760 int err, lvl, schq; 3761 u64 cfg, reg; 3762 3763 /* Get scheduler queue count of each type and alloc 3764 * bitmap for each for alloc/free/attach operations. 3765 */ 3766 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 3767 txsch = &nix_hw->txsch[lvl]; 3768 txsch->lvl = lvl; 3769 switch (lvl) { 3770 case NIX_TXSCH_LVL_SMQ: 3771 reg = NIX_AF_MDQ_CONST; 3772 break; 3773 case NIX_TXSCH_LVL_TL4: 3774 reg = NIX_AF_TL4_CONST; 3775 break; 3776 case NIX_TXSCH_LVL_TL3: 3777 reg = NIX_AF_TL3_CONST; 3778 break; 3779 case NIX_TXSCH_LVL_TL2: 3780 reg = NIX_AF_TL2_CONST; 3781 break; 3782 case NIX_TXSCH_LVL_TL1: 3783 reg = NIX_AF_TL1_CONST; 3784 break; 3785 } 3786 cfg = rvu_read64(rvu, blkaddr, reg); 3787 txsch->schq.max = cfg & 0xFFFF; 3788 err = rvu_alloc_bitmap(&txsch->schq); 3789 if (err) 3790 return err; 3791 3792 /* Allocate memory for scheduler queues to 3793 * PF/VF pcifunc mapping info. 3794 */ 3795 txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max, 3796 sizeof(u32), GFP_KERNEL); 3797 if (!txsch->pfvf_map) 3798 return -ENOMEM; 3799 for (schq = 0; schq < txsch->schq.max; schq++) 3800 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE); 3801 } 3802 3803 /* Setup a default value of 8192 as DWRR MTU */ 3804 if (rvu->hw->cap.nix_common_dwrr_mtu || 3805 rvu->hw->cap.nix_multiple_dwrr_mtu) { 3806 rvu_write64(rvu, blkaddr, 3807 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM), 3808 convert_bytes_to_dwrr_mtu(8192)); 3809 rvu_write64(rvu, blkaddr, 3810 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_LBK), 3811 convert_bytes_to_dwrr_mtu(8192)); 3812 rvu_write64(rvu, blkaddr, 3813 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_SDP), 3814 convert_bytes_to_dwrr_mtu(8192)); 3815 } 3816 3817 return 0; 3818 } 3819 3820 int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw, 3821 int blkaddr, u32 cfg) 3822 { 3823 int fmt_idx; 3824 3825 for (fmt_idx = 0; fmt_idx < nix_hw->mark_format.in_use; fmt_idx++) { 3826 if (nix_hw->mark_format.cfg[fmt_idx] == cfg) 3827 return fmt_idx; 3828 } 3829 if (fmt_idx >= nix_hw->mark_format.total) 3830 return -ERANGE; 3831 3832 rvu_write64(rvu, blkaddr, NIX_AF_MARK_FORMATX_CTL(fmt_idx), cfg); 3833 nix_hw->mark_format.cfg[fmt_idx] = cfg; 3834 nix_hw->mark_format.in_use++; 3835 return fmt_idx; 3836 } 3837 3838 static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw, 3839 int blkaddr) 3840 { 3841 u64 cfgs[] = { 3842 [NIX_MARK_CFG_IP_DSCP_RED] = 0x10003, 3843 [NIX_MARK_CFG_IP_DSCP_YELLOW] = 0x11200, 3844 [NIX_MARK_CFG_IP_DSCP_YELLOW_RED] = 0x11203, 3845 [NIX_MARK_CFG_IP_ECN_RED] = 0x6000c, 3846 [NIX_MARK_CFG_IP_ECN_YELLOW] = 0x60c00, 3847 [NIX_MARK_CFG_IP_ECN_YELLOW_RED] = 0x60c0c, 3848 [NIX_MARK_CFG_VLAN_DEI_RED] = 0x30008, 3849 [NIX_MARK_CFG_VLAN_DEI_YELLOW] = 0x30800, 3850 [NIX_MARK_CFG_VLAN_DEI_YELLOW_RED] = 0x30808, 3851 }; 3852 int i, rc; 3853 u64 total; 3854 3855 total = (rvu_read64(rvu, blkaddr, NIX_AF_PSE_CONST) & 0xFF00) >> 8; 3856 nix_hw->mark_format.total = (u8)total; 3857 nix_hw->mark_format.cfg = devm_kcalloc(rvu->dev, total, sizeof(u32), 3858 GFP_KERNEL); 3859 if (!nix_hw->mark_format.cfg) 3860 return -ENOMEM; 3861 for (i = 0; i < NIX_MARK_CFG_MAX; i++) { 3862 rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfgs[i]); 3863 if (rc < 0) 3864 dev_err(rvu->dev, "Err %d in setup mark format %d\n", 3865 i, rc); 3866 } 3867 3868 return 0; 3869 } 3870 3871 static void rvu_get_lbk_link_max_frs(struct rvu *rvu, u16 *max_mtu) 3872 { 3873 /* CN10K supports LBK FIFO size 72 KB */ 3874 if (rvu->hw->lbk_bufsize == 0x12000) 3875 *max_mtu = CN10K_LBK_LINK_MAX_FRS; 3876 else 3877 *max_mtu = NIC_HW_MAX_FRS; 3878 } 3879 3880 static void rvu_get_lmac_link_max_frs(struct rvu *rvu, u16 *max_mtu) 3881 { 3882 int fifo_size = rvu_cgx_get_fifolen(rvu); 3883 3884 /* RPM supports FIFO len 128 KB and RPM2 supports double the 3885 * FIFO len to accommodate 8 LMACS 3886 */ 3887 if (fifo_size == 0x20000 || fifo_size == 0x40000) 3888 *max_mtu = CN10K_LMAC_LINK_MAX_FRS; 3889 else 3890 *max_mtu = NIC_HW_MAX_FRS; 3891 } 3892 3893 int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req, 3894 struct nix_hw_info *rsp) 3895 { 3896 u16 pcifunc = req->hdr.pcifunc; 3897 u64 dwrr_mtu; 3898 int blkaddr; 3899 3900 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 3901 if (blkaddr < 0) 3902 return NIX_AF_ERR_AF_LF_INVALID; 3903 3904 if (is_lbk_vf(rvu, pcifunc)) 3905 rvu_get_lbk_link_max_frs(rvu, &rsp->max_mtu); 3906 else 3907 rvu_get_lmac_link_max_frs(rvu, &rsp->max_mtu); 3908 3909 rsp->min_mtu = NIC_HW_MIN_FRS; 3910 3911 if (!rvu->hw->cap.nix_common_dwrr_mtu && 3912 !rvu->hw->cap.nix_multiple_dwrr_mtu) { 3913 /* Return '1' on OTx2 */ 3914 rsp->rpm_dwrr_mtu = 1; 3915 rsp->sdp_dwrr_mtu = 1; 3916 rsp->lbk_dwrr_mtu = 1; 3917 return 0; 3918 } 3919 3920 /* Return DWRR_MTU for TLx_SCHEDULE[RR_WEIGHT] config */ 3921 dwrr_mtu = rvu_read64(rvu, blkaddr, 3922 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM)); 3923 rsp->rpm_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu); 3924 3925 dwrr_mtu = rvu_read64(rvu, blkaddr, 3926 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_SDP)); 3927 rsp->sdp_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu); 3928 3929 dwrr_mtu = rvu_read64(rvu, blkaddr, 3930 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_LBK)); 3931 rsp->lbk_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu); 3932 3933 return 0; 3934 } 3935 3936 int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req, 3937 struct msg_rsp *rsp) 3938 { 3939 u16 pcifunc = req->hdr.pcifunc; 3940 int i, nixlf, blkaddr, err; 3941 u64 stats; 3942 3943 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 3944 if (err) 3945 return err; 3946 3947 /* Get stats count supported by HW */ 3948 stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); 3949 3950 /* Reset tx stats */ 3951 for (i = 0; i < ((stats >> 24) & 0xFF); i++) 3952 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), 0); 3953 3954 /* Reset rx stats */ 3955 for (i = 0; i < ((stats >> 32) & 0xFF); i++) 3956 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), 0); 3957 3958 return 0; 3959 } 3960 3961 /* Returns the ALG index to be set into NPC_RX_ACTION */ 3962 static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg) 3963 { 3964 int i; 3965 3966 /* Scan over exiting algo entries to find a match */ 3967 for (i = 0; i < nix_hw->flowkey.in_use; i++) 3968 if (nix_hw->flowkey.flowkey[i] == flow_cfg) 3969 return i; 3970 3971 return -ERANGE; 3972 } 3973 3974 /* Mask to match ipv6(NPC_LT_LC_IP6) and ipv6 ext(NPC_LT_LC_IP6_EXT) */ 3975 #define NPC_LT_LC_IP6_MATCH_MSK ((~(NPC_LT_LC_IP6 ^ NPC_LT_LC_IP6_EXT)) & 0xf) 3976 /* Mask to match both ipv4(NPC_LT_LC_IP) and ipv4 ext(NPC_LT_LC_IP_OPT) */ 3977 #define NPC_LT_LC_IP_MATCH_MSK ((~(NPC_LT_LC_IP ^ NPC_LT_LC_IP_OPT)) & 0xf) 3978 3979 static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg) 3980 { 3981 int idx, nr_field, key_off, field_marker, keyoff_marker; 3982 int max_key_off, max_bit_pos, group_member; 3983 struct nix_rx_flowkey_alg *field; 3984 struct nix_rx_flowkey_alg tmp; 3985 u32 key_type, valid_key; 3986 u32 l3_l4_src_dst; 3987 int l4_key_offset = 0; 3988 3989 if (!alg) 3990 return -EINVAL; 3991 3992 #define FIELDS_PER_ALG 5 3993 #define MAX_KEY_OFF 40 3994 /* Clear all fields */ 3995 memset(alg, 0, sizeof(uint64_t) * FIELDS_PER_ALG); 3996 3997 /* Each of the 32 possible flow key algorithm definitions should 3998 * fall into above incremental config (except ALG0). Otherwise a 3999 * single NPC MCAM entry is not sufficient for supporting RSS. 4000 * 4001 * If a different definition or combination needed then NPC MCAM 4002 * has to be programmed to filter such pkts and it's action should 4003 * point to this definition to calculate flowtag or hash. 4004 * 4005 * The `for loop` goes over _all_ protocol field and the following 4006 * variables depicts the state machine forward progress logic. 4007 * 4008 * keyoff_marker - Enabled when hash byte length needs to be accounted 4009 * in field->key_offset update. 4010 * field_marker - Enabled when a new field needs to be selected. 4011 * group_member - Enabled when protocol is part of a group. 4012 */ 4013 4014 /* Last 4 bits (31:28) are reserved to specify SRC, DST 4015 * selection for L3, L4 i.e IPV[4,6]_SRC, IPV[4,6]_DST, 4016 * [TCP,UDP,SCTP]_SRC, [TCP,UDP,SCTP]_DST 4017 * 31 => L3_SRC, 30 => L3_DST, 29 => L4_SRC, 28 => L4_DST 4018 */ 4019 l3_l4_src_dst = flow_cfg; 4020 /* Reset these 4 bits, so that these won't be part of key */ 4021 flow_cfg &= NIX_FLOW_KEY_TYPE_L3_L4_MASK; 4022 4023 keyoff_marker = 0; max_key_off = 0; group_member = 0; 4024 nr_field = 0; key_off = 0; field_marker = 1; 4025 field = &tmp; max_bit_pos = fls(flow_cfg); 4026 for (idx = 0; 4027 idx < max_bit_pos && nr_field < FIELDS_PER_ALG && 4028 key_off < MAX_KEY_OFF; idx++) { 4029 key_type = BIT(idx); 4030 valid_key = flow_cfg & key_type; 4031 /* Found a field marker, reset the field values */ 4032 if (field_marker) 4033 memset(&tmp, 0, sizeof(tmp)); 4034 4035 field_marker = true; 4036 keyoff_marker = true; 4037 switch (key_type) { 4038 case NIX_FLOW_KEY_TYPE_PORT: 4039 field->sel_chan = true; 4040 /* This should be set to 1, when SEL_CHAN is set */ 4041 field->bytesm1 = 1; 4042 break; 4043 case NIX_FLOW_KEY_TYPE_IPV4_PROTO: 4044 field->lid = NPC_LID_LC; 4045 field->hdr_offset = 9; /* offset */ 4046 field->bytesm1 = 0; /* 1 byte */ 4047 field->ltype_match = NPC_LT_LC_IP; 4048 field->ltype_mask = NPC_LT_LC_IP_MATCH_MSK; 4049 break; 4050 case NIX_FLOW_KEY_TYPE_IPV4: 4051 case NIX_FLOW_KEY_TYPE_INNR_IPV4: 4052 field->lid = NPC_LID_LC; 4053 field->ltype_match = NPC_LT_LC_IP; 4054 if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV4) { 4055 field->lid = NPC_LID_LG; 4056 field->ltype_match = NPC_LT_LG_TU_IP; 4057 } 4058 field->hdr_offset = 12; /* SIP offset */ 4059 field->bytesm1 = 7; /* SIP + DIP, 8 bytes */ 4060 4061 /* Only SIP */ 4062 if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_SRC_ONLY) 4063 field->bytesm1 = 3; /* SIP, 4 bytes */ 4064 4065 if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_DST_ONLY) { 4066 /* Both SIP + DIP */ 4067 if (field->bytesm1 == 3) { 4068 field->bytesm1 = 7; /* SIP + DIP, 8B */ 4069 } else { 4070 /* Only DIP */ 4071 field->hdr_offset = 16; /* DIP off */ 4072 field->bytesm1 = 3; /* DIP, 4 bytes */ 4073 } 4074 } 4075 field->ltype_mask = NPC_LT_LC_IP_MATCH_MSK; 4076 keyoff_marker = false; 4077 break; 4078 case NIX_FLOW_KEY_TYPE_IPV6: 4079 case NIX_FLOW_KEY_TYPE_INNR_IPV6: 4080 field->lid = NPC_LID_LC; 4081 field->ltype_match = NPC_LT_LC_IP6; 4082 if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV6) { 4083 field->lid = NPC_LID_LG; 4084 field->ltype_match = NPC_LT_LG_TU_IP6; 4085 } 4086 field->hdr_offset = 8; /* SIP offset */ 4087 field->bytesm1 = 31; /* SIP + DIP, 32 bytes */ 4088 4089 /* Only SIP */ 4090 if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_SRC_ONLY) 4091 field->bytesm1 = 15; /* SIP, 16 bytes */ 4092 4093 if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_DST_ONLY) { 4094 /* Both SIP + DIP */ 4095 if (field->bytesm1 == 15) { 4096 /* SIP + DIP, 32 bytes */ 4097 field->bytesm1 = 31; 4098 } else { 4099 /* Only DIP */ 4100 field->hdr_offset = 24; /* DIP off */ 4101 field->bytesm1 = 15; /* DIP,16 bytes */ 4102 } 4103 } 4104 field->ltype_mask = NPC_LT_LC_IP6_MATCH_MSK; 4105 break; 4106 case NIX_FLOW_KEY_TYPE_TCP: 4107 case NIX_FLOW_KEY_TYPE_UDP: 4108 case NIX_FLOW_KEY_TYPE_SCTP: 4109 case NIX_FLOW_KEY_TYPE_INNR_TCP: 4110 case NIX_FLOW_KEY_TYPE_INNR_UDP: 4111 case NIX_FLOW_KEY_TYPE_INNR_SCTP: 4112 field->lid = NPC_LID_LD; 4113 if (key_type == NIX_FLOW_KEY_TYPE_INNR_TCP || 4114 key_type == NIX_FLOW_KEY_TYPE_INNR_UDP || 4115 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) 4116 field->lid = NPC_LID_LH; 4117 field->bytesm1 = 3; /* Sport + Dport, 4 bytes */ 4118 4119 if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L4_SRC_ONLY) 4120 field->bytesm1 = 1; /* SRC, 2 bytes */ 4121 4122 if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L4_DST_ONLY) { 4123 /* Both SRC + DST */ 4124 if (field->bytesm1 == 1) { 4125 /* SRC + DST, 4 bytes */ 4126 field->bytesm1 = 3; 4127 } else { 4128 /* Only DIP */ 4129 field->hdr_offset = 2; /* DST off */ 4130 field->bytesm1 = 1; /* DST, 2 bytes */ 4131 } 4132 } 4133 4134 /* Enum values for NPC_LID_LD and NPC_LID_LG are same, 4135 * so no need to change the ltype_match, just change 4136 * the lid for inner protocols 4137 */ 4138 BUILD_BUG_ON((int)NPC_LT_LD_TCP != 4139 (int)NPC_LT_LH_TU_TCP); 4140 BUILD_BUG_ON((int)NPC_LT_LD_UDP != 4141 (int)NPC_LT_LH_TU_UDP); 4142 BUILD_BUG_ON((int)NPC_LT_LD_SCTP != 4143 (int)NPC_LT_LH_TU_SCTP); 4144 4145 if ((key_type == NIX_FLOW_KEY_TYPE_TCP || 4146 key_type == NIX_FLOW_KEY_TYPE_INNR_TCP) && 4147 valid_key) { 4148 field->ltype_match |= NPC_LT_LD_TCP; 4149 group_member = true; 4150 } else if ((key_type == NIX_FLOW_KEY_TYPE_UDP || 4151 key_type == NIX_FLOW_KEY_TYPE_INNR_UDP) && 4152 valid_key) { 4153 field->ltype_match |= NPC_LT_LD_UDP; 4154 group_member = true; 4155 } else if ((key_type == NIX_FLOW_KEY_TYPE_SCTP || 4156 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) && 4157 valid_key) { 4158 field->ltype_match |= NPC_LT_LD_SCTP; 4159 group_member = true; 4160 } 4161 field->ltype_mask = ~field->ltype_match; 4162 if (key_type == NIX_FLOW_KEY_TYPE_SCTP || 4163 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) { 4164 /* Handle the case where any of the group item 4165 * is enabled in the group but not the final one 4166 */ 4167 if (group_member) { 4168 valid_key = true; 4169 group_member = false; 4170 } 4171 } else { 4172 field_marker = false; 4173 keyoff_marker = false; 4174 } 4175 4176 /* TCP/UDP/SCTP and ESP/AH falls at same offset so 4177 * remember the TCP key offset of 40 byte hash key. 4178 */ 4179 if (key_type == NIX_FLOW_KEY_TYPE_TCP) 4180 l4_key_offset = key_off; 4181 break; 4182 case NIX_FLOW_KEY_TYPE_NVGRE: 4183 field->lid = NPC_LID_LD; 4184 field->hdr_offset = 4; /* VSID offset */ 4185 field->bytesm1 = 2; 4186 field->ltype_match = NPC_LT_LD_NVGRE; 4187 field->ltype_mask = 0xF; 4188 break; 4189 case NIX_FLOW_KEY_TYPE_VXLAN: 4190 case NIX_FLOW_KEY_TYPE_GENEVE: 4191 field->lid = NPC_LID_LE; 4192 field->bytesm1 = 2; 4193 field->hdr_offset = 4; 4194 field->ltype_mask = 0xF; 4195 field_marker = false; 4196 keyoff_marker = false; 4197 4198 if (key_type == NIX_FLOW_KEY_TYPE_VXLAN && valid_key) { 4199 field->ltype_match |= NPC_LT_LE_VXLAN; 4200 group_member = true; 4201 } 4202 4203 if (key_type == NIX_FLOW_KEY_TYPE_GENEVE && valid_key) { 4204 field->ltype_match |= NPC_LT_LE_GENEVE; 4205 group_member = true; 4206 } 4207 4208 if (key_type == NIX_FLOW_KEY_TYPE_GENEVE) { 4209 if (group_member) { 4210 field->ltype_mask = ~field->ltype_match; 4211 field_marker = true; 4212 keyoff_marker = true; 4213 valid_key = true; 4214 group_member = false; 4215 } 4216 } 4217 break; 4218 case NIX_FLOW_KEY_TYPE_ETH_DMAC: 4219 case NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC: 4220 field->lid = NPC_LID_LA; 4221 field->ltype_match = NPC_LT_LA_ETHER; 4222 if (key_type == NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC) { 4223 field->lid = NPC_LID_LF; 4224 field->ltype_match = NPC_LT_LF_TU_ETHER; 4225 } 4226 field->hdr_offset = 0; 4227 field->bytesm1 = 5; /* DMAC 6 Byte */ 4228 field->ltype_mask = 0xF; 4229 break; 4230 case NIX_FLOW_KEY_TYPE_IPV6_EXT: 4231 field->lid = NPC_LID_LC; 4232 field->hdr_offset = 40; /* IPV6 hdr */ 4233 field->bytesm1 = 0; /* 1 Byte ext hdr*/ 4234 field->ltype_match = NPC_LT_LC_IP6_EXT; 4235 field->ltype_mask = 0xF; 4236 break; 4237 case NIX_FLOW_KEY_TYPE_GTPU: 4238 field->lid = NPC_LID_LE; 4239 field->hdr_offset = 4; 4240 field->bytesm1 = 3; /* 4 bytes TID*/ 4241 field->ltype_match = NPC_LT_LE_GTPU; 4242 field->ltype_mask = 0xF; 4243 break; 4244 case NIX_FLOW_KEY_TYPE_CUSTOM0: 4245 field->lid = NPC_LID_LC; 4246 field->hdr_offset = 6; 4247 field->bytesm1 = 1; /* 2 Bytes*/ 4248 field->ltype_match = NPC_LT_LC_CUSTOM0; 4249 field->ltype_mask = 0xF; 4250 break; 4251 case NIX_FLOW_KEY_TYPE_VLAN: 4252 field->lid = NPC_LID_LB; 4253 field->hdr_offset = 2; /* Skip TPID (2-bytes) */ 4254 field->bytesm1 = 1; /* 2 Bytes (Actually 12 bits) */ 4255 field->ltype_match = NPC_LT_LB_CTAG; 4256 field->ltype_mask = 0xF; 4257 field->fn_mask = 1; /* Mask out the first nibble */ 4258 break; 4259 case NIX_FLOW_KEY_TYPE_AH: 4260 case NIX_FLOW_KEY_TYPE_ESP: 4261 field->hdr_offset = 0; 4262 field->bytesm1 = 7; /* SPI + sequence number */ 4263 field->ltype_mask = 0xF; 4264 field->lid = NPC_LID_LE; 4265 field->ltype_match = NPC_LT_LE_ESP; 4266 if (key_type == NIX_FLOW_KEY_TYPE_AH) { 4267 field->lid = NPC_LID_LD; 4268 field->ltype_match = NPC_LT_LD_AH; 4269 field->hdr_offset = 4; 4270 keyoff_marker = false; 4271 } 4272 break; 4273 } 4274 field->ena = 1; 4275 4276 /* Found a valid flow key type */ 4277 if (valid_key) { 4278 /* Use the key offset of TCP/UDP/SCTP fields 4279 * for ESP/AH fields. 4280 */ 4281 if (key_type == NIX_FLOW_KEY_TYPE_ESP || 4282 key_type == NIX_FLOW_KEY_TYPE_AH) 4283 key_off = l4_key_offset; 4284 field->key_offset = key_off; 4285 memcpy(&alg[nr_field], field, sizeof(*field)); 4286 max_key_off = max(max_key_off, field->bytesm1 + 1); 4287 4288 /* Found a field marker, get the next field */ 4289 if (field_marker) 4290 nr_field++; 4291 } 4292 4293 /* Found a keyoff marker, update the new key_off */ 4294 if (keyoff_marker) { 4295 key_off += max_key_off; 4296 max_key_off = 0; 4297 } 4298 } 4299 /* Processed all the flow key types */ 4300 if (idx == max_bit_pos && key_off <= MAX_KEY_OFF) 4301 return 0; 4302 else 4303 return NIX_AF_ERR_RSS_NOSPC_FIELD; 4304 } 4305 4306 static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg) 4307 { 4308 u64 field[FIELDS_PER_ALG]; 4309 struct nix_hw *hw; 4310 int fid, rc; 4311 4312 hw = get_nix_hw(rvu->hw, blkaddr); 4313 if (!hw) 4314 return NIX_AF_ERR_INVALID_NIXBLK; 4315 4316 /* No room to add new flow hash algoritham */ 4317 if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX) 4318 return NIX_AF_ERR_RSS_NOSPC_ALGO; 4319 4320 /* Generate algo fields for the given flow_cfg */ 4321 rc = set_flowkey_fields((struct nix_rx_flowkey_alg *)field, flow_cfg); 4322 if (rc) 4323 return rc; 4324 4325 /* Update ALGX_FIELDX register with generated fields */ 4326 for (fid = 0; fid < FIELDS_PER_ALG; fid++) 4327 rvu_write64(rvu, blkaddr, 4328 NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(hw->flowkey.in_use, 4329 fid), field[fid]); 4330 4331 /* Store the flow_cfg for futher lookup */ 4332 rc = hw->flowkey.in_use; 4333 hw->flowkey.flowkey[rc] = flow_cfg; 4334 hw->flowkey.in_use++; 4335 4336 return rc; 4337 } 4338 4339 int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu, 4340 struct nix_rss_flowkey_cfg *req, 4341 struct nix_rss_flowkey_cfg_rsp *rsp) 4342 { 4343 u16 pcifunc = req->hdr.pcifunc; 4344 int alg_idx, nixlf, blkaddr; 4345 struct nix_hw *nix_hw; 4346 int err; 4347 4348 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 4349 if (err) 4350 return err; 4351 4352 nix_hw = get_nix_hw(rvu->hw, blkaddr); 4353 if (!nix_hw) 4354 return NIX_AF_ERR_INVALID_NIXBLK; 4355 4356 alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg); 4357 /* Failed to get algo index from the exiting list, reserve new */ 4358 if (alg_idx < 0) { 4359 alg_idx = reserve_flowkey_alg_idx(rvu, blkaddr, 4360 req->flowkey_cfg); 4361 if (alg_idx < 0) 4362 return alg_idx; 4363 } 4364 rsp->alg_idx = alg_idx; 4365 rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group, 4366 alg_idx, req->mcam_index); 4367 return 0; 4368 } 4369 4370 static int nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr) 4371 { 4372 u32 flowkey_cfg, minkey_cfg; 4373 int alg, fid, rc; 4374 4375 /* Disable all flow key algx fieldx */ 4376 for (alg = 0; alg < NIX_FLOW_KEY_ALG_MAX; alg++) { 4377 for (fid = 0; fid < FIELDS_PER_ALG; fid++) 4378 rvu_write64(rvu, blkaddr, 4379 NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid), 4380 0); 4381 } 4382 4383 /* IPv4/IPv6 SIP/DIPs */ 4384 flowkey_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6; 4385 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 4386 if (rc < 0) 4387 return rc; 4388 4389 /* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */ 4390 minkey_cfg = flowkey_cfg; 4391 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP; 4392 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 4393 if (rc < 0) 4394 return rc; 4395 4396 /* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */ 4397 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP; 4398 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 4399 if (rc < 0) 4400 return rc; 4401 4402 /* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */ 4403 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_SCTP; 4404 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 4405 if (rc < 0) 4406 return rc; 4407 4408 /* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */ 4409 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP | 4410 NIX_FLOW_KEY_TYPE_UDP; 4411 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 4412 if (rc < 0) 4413 return rc; 4414 4415 /* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */ 4416 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP | 4417 NIX_FLOW_KEY_TYPE_SCTP; 4418 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 4419 if (rc < 0) 4420 return rc; 4421 4422 /* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */ 4423 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP | 4424 NIX_FLOW_KEY_TYPE_SCTP; 4425 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 4426 if (rc < 0) 4427 return rc; 4428 4429 /* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */ 4430 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP | 4431 NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP; 4432 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 4433 if (rc < 0) 4434 return rc; 4435 4436 return 0; 4437 } 4438 4439 int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu, 4440 struct nix_set_mac_addr *req, 4441 struct msg_rsp *rsp) 4442 { 4443 bool from_vf = req->hdr.pcifunc & RVU_PFVF_FUNC_MASK; 4444 u16 pcifunc = req->hdr.pcifunc; 4445 int blkaddr, nixlf, err; 4446 struct rvu_pfvf *pfvf; 4447 4448 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 4449 if (err) 4450 return err; 4451 4452 pfvf = rvu_get_pfvf(rvu, pcifunc); 4453 4454 /* untrusted VF can't overwrite admin(PF) changes */ 4455 if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && 4456 (from_vf && test_bit(PF_SET_VF_MAC, &pfvf->flags))) { 4457 dev_warn(rvu->dev, 4458 "MAC address set by admin(PF) cannot be overwritten by untrusted VF"); 4459 return -EPERM; 4460 } 4461 4462 ether_addr_copy(pfvf->mac_addr, req->mac_addr); 4463 4464 rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf, 4465 pfvf->rx_chan_base, req->mac_addr); 4466 4467 if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && from_vf) 4468 ether_addr_copy(pfvf->default_mac, req->mac_addr); 4469 4470 return 0; 4471 } 4472 4473 int rvu_mbox_handler_nix_get_mac_addr(struct rvu *rvu, 4474 struct msg_req *req, 4475 struct nix_get_mac_addr_rsp *rsp) 4476 { 4477 u16 pcifunc = req->hdr.pcifunc; 4478 struct rvu_pfvf *pfvf; 4479 4480 if (!is_nixlf_attached(rvu, pcifunc)) 4481 return NIX_AF_ERR_AF_LF_INVALID; 4482 4483 pfvf = rvu_get_pfvf(rvu, pcifunc); 4484 4485 ether_addr_copy(rsp->mac_addr, pfvf->mac_addr); 4486 4487 return 0; 4488 } 4489 4490 int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req, 4491 struct msg_rsp *rsp) 4492 { 4493 bool allmulti, promisc, nix_rx_multicast; 4494 u16 pcifunc = req->hdr.pcifunc; 4495 struct rvu_pfvf *pfvf; 4496 int nixlf, err; 4497 4498 pfvf = rvu_get_pfvf(rvu, pcifunc); 4499 promisc = req->mode & NIX_RX_MODE_PROMISC ? true : false; 4500 allmulti = req->mode & NIX_RX_MODE_ALLMULTI ? true : false; 4501 pfvf->use_mce_list = req->mode & NIX_RX_MODE_USE_MCE ? true : false; 4502 4503 nix_rx_multicast = rvu->hw->cap.nix_rx_multicast & pfvf->use_mce_list; 4504 4505 if (is_vf(pcifunc) && !nix_rx_multicast && 4506 (promisc || allmulti)) { 4507 dev_warn_ratelimited(rvu->dev, 4508 "VF promisc/multicast not supported\n"); 4509 return 0; 4510 } 4511 4512 /* untrusted VF can't configure promisc/allmulti */ 4513 if (is_vf(pcifunc) && !test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && 4514 (promisc || allmulti)) 4515 return 0; 4516 4517 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); 4518 if (err) 4519 return err; 4520 4521 if (nix_rx_multicast) { 4522 /* add/del this PF_FUNC to/from mcast pkt replication list */ 4523 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_ALLMULTI_ENTRY, 4524 allmulti); 4525 if (err) { 4526 dev_err(rvu->dev, 4527 "Failed to update pcifunc 0x%x to multicast list\n", 4528 pcifunc); 4529 return err; 4530 } 4531 4532 /* add/del this PF_FUNC to/from promisc pkt replication list */ 4533 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_PROMISC_ENTRY, 4534 promisc); 4535 if (err) { 4536 dev_err(rvu->dev, 4537 "Failed to update pcifunc 0x%x to promisc list\n", 4538 pcifunc); 4539 return err; 4540 } 4541 } 4542 4543 /* install/uninstall allmulti entry */ 4544 if (allmulti) { 4545 rvu_npc_install_allmulti_entry(rvu, pcifunc, nixlf, 4546 pfvf->rx_chan_base); 4547 } else { 4548 if (!nix_rx_multicast) 4549 rvu_npc_enable_allmulti_entry(rvu, pcifunc, nixlf, false); 4550 } 4551 4552 /* install/uninstall promisc entry */ 4553 if (promisc) 4554 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, 4555 pfvf->rx_chan_base, 4556 pfvf->rx_chan_cnt); 4557 else 4558 if (!nix_rx_multicast) 4559 rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf, false); 4560 4561 return 0; 4562 } 4563 4564 static void nix_find_link_frs(struct rvu *rvu, 4565 struct nix_frs_cfg *req, u16 pcifunc) 4566 { 4567 int pf = rvu_get_pf(rvu->pdev, pcifunc); 4568 struct rvu_pfvf *pfvf; 4569 int maxlen, minlen; 4570 int numvfs, hwvf; 4571 int vf; 4572 4573 /* Update with requester's min/max lengths */ 4574 pfvf = rvu_get_pfvf(rvu, pcifunc); 4575 pfvf->maxlen = req->maxlen; 4576 if (req->update_minlen) 4577 pfvf->minlen = req->minlen; 4578 4579 maxlen = req->maxlen; 4580 minlen = req->update_minlen ? req->minlen : 0; 4581 4582 /* Get this PF's numVFs and starting hwvf */ 4583 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf); 4584 4585 /* For each VF, compare requested max/minlen */ 4586 for (vf = 0; vf < numvfs; vf++) { 4587 pfvf = &rvu->hwvf[hwvf + vf]; 4588 if (pfvf->maxlen > maxlen) 4589 maxlen = pfvf->maxlen; 4590 if (req->update_minlen && 4591 pfvf->minlen && pfvf->minlen < minlen) 4592 minlen = pfvf->minlen; 4593 } 4594 4595 /* Compare requested max/minlen with PF's max/minlen */ 4596 pfvf = &rvu->pf[pf]; 4597 if (pfvf->maxlen > maxlen) 4598 maxlen = pfvf->maxlen; 4599 if (req->update_minlen && 4600 pfvf->minlen && pfvf->minlen < minlen) 4601 minlen = pfvf->minlen; 4602 4603 /* Update the request with max/min PF's and it's VF's max/min */ 4604 req->maxlen = maxlen; 4605 if (req->update_minlen) 4606 req->minlen = minlen; 4607 } 4608 4609 int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req, 4610 struct msg_rsp *rsp) 4611 { 4612 struct rvu_hwinfo *hw = rvu->hw; 4613 u16 pcifunc = req->hdr.pcifunc; 4614 int pf = rvu_get_pf(rvu->pdev, pcifunc); 4615 int blkaddr, link = -1; 4616 struct nix_hw *nix_hw; 4617 struct rvu_pfvf *pfvf; 4618 u8 cgx = 0, lmac = 0; 4619 u16 max_mtu; 4620 u64 cfg; 4621 4622 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 4623 if (blkaddr < 0) 4624 return NIX_AF_ERR_AF_LF_INVALID; 4625 4626 nix_hw = get_nix_hw(rvu->hw, blkaddr); 4627 if (!nix_hw) 4628 return NIX_AF_ERR_INVALID_NIXBLK; 4629 4630 if (is_lbk_vf(rvu, pcifunc) || is_rep_dev(rvu, pcifunc)) 4631 rvu_get_lbk_link_max_frs(rvu, &max_mtu); 4632 else 4633 rvu_get_lmac_link_max_frs(rvu, &max_mtu); 4634 4635 if (!req->sdp_link && req->maxlen > max_mtu) 4636 return NIX_AF_ERR_FRS_INVALID; 4637 4638 if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS) 4639 return NIX_AF_ERR_FRS_INVALID; 4640 4641 /* Check if config is for SDP link */ 4642 if (req->sdp_link) { 4643 if (!hw->sdp_links) 4644 return NIX_AF_ERR_RX_LINK_INVALID; 4645 link = hw->cgx_links + hw->lbk_links; 4646 goto linkcfg; 4647 } 4648 4649 /* Check if the request is from CGX mapped RVU PF */ 4650 if (is_pf_cgxmapped(rvu, pf)) { 4651 /* Get CGX and LMAC to which this PF is mapped and find link */ 4652 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac); 4653 link = (cgx * hw->lmac_per_cgx) + lmac; 4654 } else if (pf == 0) { 4655 /* For VFs of PF0 ingress is LBK port, so config LBK link */ 4656 pfvf = rvu_get_pfvf(rvu, pcifunc); 4657 link = hw->cgx_links + pfvf->lbkid; 4658 } else if (is_rep_dev(rvu, pcifunc)) { 4659 link = hw->cgx_links + 0; 4660 } 4661 4662 if (link < 0) 4663 return NIX_AF_ERR_RX_LINK_INVALID; 4664 4665 linkcfg: 4666 nix_find_link_frs(rvu, req, pcifunc); 4667 4668 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link)); 4669 cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16); 4670 if (req->update_minlen) 4671 cfg = (cfg & ~0xFFFFULL) | req->minlen; 4672 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg); 4673 4674 return 0; 4675 } 4676 4677 int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req, 4678 struct msg_rsp *rsp) 4679 { 4680 int nixlf, blkaddr, err; 4681 u64 cfg; 4682 4683 err = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, &blkaddr); 4684 if (err) 4685 return err; 4686 4687 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf)); 4688 /* Set the interface configuration */ 4689 if (req->len_verify & BIT(0)) 4690 cfg |= BIT_ULL(41); 4691 else 4692 cfg &= ~BIT_ULL(41); 4693 4694 if (req->len_verify & BIT(1)) 4695 cfg |= BIT_ULL(40); 4696 else 4697 cfg &= ~BIT_ULL(40); 4698 4699 if (req->len_verify & NIX_RX_DROP_RE) 4700 cfg |= BIT_ULL(32); 4701 else 4702 cfg &= ~BIT_ULL(32); 4703 4704 if (req->csum_verify & BIT(0)) 4705 cfg |= BIT_ULL(37); 4706 else 4707 cfg &= ~BIT_ULL(37); 4708 4709 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), cfg); 4710 4711 return 0; 4712 } 4713 4714 static u64 rvu_get_lbk_link_credits(struct rvu *rvu, u16 lbk_max_frs) 4715 { 4716 return 1600; /* 16 * max LBK datarate = 16 * 100Gbps */ 4717 } 4718 4719 static void nix_link_config(struct rvu *rvu, int blkaddr, 4720 struct nix_hw *nix_hw) 4721 { 4722 struct rvu_hwinfo *hw = rvu->hw; 4723 int cgx, lmac_cnt, slink, link; 4724 u16 lbk_max_frs, lmac_max_frs; 4725 unsigned long lmac_bmap; 4726 u64 tx_credits, cfg; 4727 u64 lmac_fifo_len; 4728 int iter; 4729 4730 rvu_get_lbk_link_max_frs(rvu, &lbk_max_frs); 4731 rvu_get_lmac_link_max_frs(rvu, &lmac_max_frs); 4732 4733 /* Set SDP link credit */ 4734 rvu_write64(rvu, blkaddr, NIX_AF_SDP_LINK_CREDIT, SDP_LINK_CREDIT); 4735 4736 /* Set default min/max packet lengths allowed on NIX Rx links. 4737 * 4738 * With HW reset minlen value of 60byte, HW will treat ARP pkts 4739 * as undersize and report them to SW as error pkts, hence 4740 * setting it to 40 bytes. 4741 */ 4742 for (link = 0; link < hw->cgx_links; link++) { 4743 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), 4744 ((u64)lmac_max_frs << 16) | NIC_HW_MIN_FRS); 4745 } 4746 4747 for (link = hw->cgx_links; link < hw->lbk_links; link++) { 4748 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), 4749 ((u64)lbk_max_frs << 16) | NIC_HW_MIN_FRS); 4750 } 4751 if (hw->sdp_links) { 4752 link = hw->cgx_links + hw->lbk_links; 4753 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), 4754 SDP_HW_MAX_FRS << 16 | SDP_HW_MIN_FRS); 4755 } 4756 4757 /* Get MCS external bypass status for CN10K-B */ 4758 if (mcs_get_blkcnt() == 1) { 4759 /* Adjust for 2 credits when external bypass is disabled */ 4760 nix_hw->cc_mcs_cnt = is_mcs_bypass(0) ? 0 : 2; 4761 } 4762 4763 /* Set credits for Tx links assuming max packet length allowed. 4764 * This will be reconfigured based on MTU set for PF/VF. 4765 */ 4766 for (cgx = 0; cgx < hw->cgx; cgx++) { 4767 lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu)); 4768 /* Skip when cgx is not available or lmac cnt is zero */ 4769 if (lmac_cnt <= 0) 4770 continue; 4771 slink = cgx * hw->lmac_per_cgx; 4772 4773 /* Get LMAC id's from bitmap */ 4774 lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu)); 4775 for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) { 4776 lmac_fifo_len = rvu_cgx_get_lmac_fifolen(rvu, cgx, iter); 4777 if (!lmac_fifo_len) { 4778 dev_err(rvu->dev, 4779 "%s: Failed to get CGX/RPM%d:LMAC%d FIFO size\n", 4780 __func__, cgx, iter); 4781 continue; 4782 } 4783 tx_credits = (lmac_fifo_len - lmac_max_frs) / 16; 4784 /* Enable credits and set credit pkt count to max allowed */ 4785 cfg = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1); 4786 cfg |= FIELD_PREP(NIX_AF_LINKX_MCS_CNT_MASK, nix_hw->cc_mcs_cnt); 4787 4788 link = iter + slink; 4789 nix_hw->tx_credits[link] = tx_credits; 4790 rvu_write64(rvu, blkaddr, 4791 NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg); 4792 } 4793 } 4794 4795 /* Set Tx credits for LBK link */ 4796 slink = hw->cgx_links; 4797 for (link = slink; link < (slink + hw->lbk_links); link++) { 4798 tx_credits = rvu_get_lbk_link_credits(rvu, lbk_max_frs); 4799 nix_hw->tx_credits[link] = tx_credits; 4800 /* Enable credits and set credit pkt count to max allowed */ 4801 tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1); 4802 rvu_write64(rvu, blkaddr, 4803 NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits); 4804 } 4805 } 4806 4807 static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr) 4808 { 4809 int idx, err; 4810 u64 status; 4811 4812 /* Start X2P bus calibration */ 4813 rvu_write64(rvu, blkaddr, NIX_AF_CFG, 4814 rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9)); 4815 /* Wait for calibration to complete */ 4816 err = rvu_poll_reg(rvu, blkaddr, 4817 NIX_AF_STATUS, BIT_ULL(10), false); 4818 if (err) { 4819 dev_err(rvu->dev, "NIX X2P bus calibration failed\n"); 4820 return err; 4821 } 4822 4823 status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS); 4824 /* Check if CGX devices are ready */ 4825 for (idx = 0; idx < rvu->cgx_cnt_max; idx++) { 4826 /* Skip when cgx port is not available */ 4827 if (!rvu_cgx_pdata(idx, rvu) || 4828 (status & (BIT_ULL(16 + idx)))) 4829 continue; 4830 dev_err(rvu->dev, 4831 "CGX%d didn't respond to NIX X2P calibration\n", idx); 4832 err = -EBUSY; 4833 } 4834 4835 /* Check if LBK is ready */ 4836 if (!(status & BIT_ULL(19))) { 4837 dev_err(rvu->dev, 4838 "LBK didn't respond to NIX X2P calibration\n"); 4839 err = -EBUSY; 4840 } 4841 4842 /* Clear 'calibrate_x2p' bit */ 4843 rvu_write64(rvu, blkaddr, NIX_AF_CFG, 4844 rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9)); 4845 if (err || (status & 0x3FFULL)) 4846 dev_err(rvu->dev, 4847 "NIX X2P calibration failed, status 0x%llx\n", status); 4848 if (err) 4849 return err; 4850 return 0; 4851 } 4852 4853 static int nix_aq_init(struct rvu *rvu, struct rvu_block *block) 4854 { 4855 u64 cfg; 4856 int err; 4857 4858 /* Set admin queue endianness */ 4859 cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG); 4860 #ifdef __BIG_ENDIAN 4861 cfg |= BIT_ULL(8); 4862 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg); 4863 #else 4864 cfg &= ~BIT_ULL(8); 4865 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg); 4866 #endif 4867 4868 /* Do not bypass NDC cache */ 4869 cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG); 4870 cfg &= ~0x3FFEULL; 4871 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING 4872 /* Disable caching of SQB aka SQEs */ 4873 cfg |= 0x04ULL; 4874 #endif 4875 rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg); 4876 4877 /* Result structure can be followed by RQ/SQ/CQ context at 4878 * RES + 128bytes and a write mask at RES + 256 bytes, depending on 4879 * operation type. Alloc sufficient result memory for all operations. 4880 */ 4881 err = rvu_aq_alloc(rvu, &block->aq, 4882 Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s), 4883 ALIGN(sizeof(struct nix_aq_res_s), 128) + 256); 4884 if (err) 4885 return err; 4886 4887 rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE); 4888 rvu_write64(rvu, block->addr, 4889 NIX_AF_AQ_BASE, (u64)block->aq->inst->iova); 4890 return 0; 4891 } 4892 4893 static void rvu_nix_setup_capabilities(struct rvu *rvu, int blkaddr) 4894 { 4895 struct rvu_hwinfo *hw = rvu->hw; 4896 u64 hw_const; 4897 4898 hw_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); 4899 4900 /* On OcteonTx2 DWRR quantum is directly configured into each of 4901 * the transmit scheduler queues. And PF/VF drivers were free to 4902 * config any value upto 2^24. 4903 * On CN10K, HW is modified, the quantum configuration at scheduler 4904 * queues is in terms of weight. And SW needs to setup a base DWRR MTU 4905 * at NIX_AF_DWRR_RPM_MTU / NIX_AF_DWRR_SDP_MTU. HW will do 4906 * 'DWRR MTU * weight' to get the quantum. 4907 * 4908 * Check if HW uses a common MTU for all DWRR quantum configs. 4909 * On OcteonTx2 this register field is '0'. 4910 */ 4911 if ((((hw_const >> 56) & 0x10) == 0x10) && !(hw_const & BIT_ULL(61))) 4912 hw->cap.nix_common_dwrr_mtu = true; 4913 4914 if (hw_const & BIT_ULL(61)) 4915 hw->cap.nix_multiple_dwrr_mtu = true; 4916 } 4917 4918 static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw) 4919 { 4920 const struct npc_lt_def_cfg *ltdefs; 4921 struct rvu_hwinfo *hw = rvu->hw; 4922 int blkaddr = nix_hw->blkaddr; 4923 struct rvu_block *block; 4924 int err; 4925 u64 cfg; 4926 4927 block = &hw->block[blkaddr]; 4928 4929 if (is_rvu_96xx_B0(rvu)) { 4930 /* As per a HW errata in 96xx A0/B0 silicon, NIX may corrupt 4931 * internal state when conditional clocks are turned off. 4932 * Hence enable them. 4933 */ 4934 rvu_write64(rvu, blkaddr, NIX_AF_CFG, 4935 rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL); 4936 } 4937 4938 /* Set chan/link to backpressure TL3 instead of TL2 */ 4939 rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01); 4940 4941 /* Disable SQ manager's sticky mode operation (set TM6 = 0, TM11 = 0) 4942 * This sticky mode is known to cause SQ stalls when multiple 4943 * SQs are mapped to same SMQ and transmitting pkts simultaneously. 4944 * NIX PSE may deadlock when there are any sticky to non-sticky 4945 * transmission. Hence disable it (TM5 = 0). 4946 */ 4947 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS); 4948 cfg &= ~(BIT_ULL(15) | BIT_ULL(14) | BIT_ULL(23)); 4949 /* NIX may drop credits when condition clocks are turned off. 4950 * Hence enable control flow clk (set TM9 = 1). 4951 */ 4952 cfg |= BIT_ULL(21); 4953 rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg); 4954 4955 ltdefs = rvu->kpu.lt_def; 4956 /* Calibrate X2P bus to check if CGX/LBK links are fine */ 4957 err = nix_calibrate_x2p(rvu, blkaddr); 4958 if (err) 4959 return err; 4960 4961 /* Setup capabilities of the NIX block */ 4962 rvu_nix_setup_capabilities(rvu, blkaddr); 4963 4964 /* Initialize admin queue */ 4965 err = nix_aq_init(rvu, block); 4966 if (err) 4967 return err; 4968 4969 /* Restore CINT timer delay to HW reset values */ 4970 rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL); 4971 4972 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SEB_CFG); 4973 4974 /* For better performance use NDC TX instead of NDC RX for SQ's SQEs" */ 4975 cfg |= 1ULL; 4976 if (!is_rvu_otx2(rvu)) 4977 cfg |= NIX_PTP_1STEP_EN; 4978 4979 rvu_write64(rvu, blkaddr, NIX_AF_SEB_CFG, cfg); 4980 4981 if (!is_rvu_otx2(rvu)) 4982 rvu_nix_block_cn10k_init(rvu, nix_hw); 4983 4984 if (is_block_implemented(hw, blkaddr)) { 4985 err = nix_setup_txschq(rvu, nix_hw, blkaddr); 4986 if (err) 4987 return err; 4988 4989 err = nix_setup_ipolicers(rvu, nix_hw, blkaddr); 4990 if (err) 4991 return err; 4992 4993 err = nix_af_mark_format_setup(rvu, nix_hw, blkaddr); 4994 if (err) 4995 return err; 4996 4997 err = nix_setup_mcast(rvu, nix_hw, blkaddr); 4998 if (err) 4999 return err; 5000 5001 err = nix_setup_txvlan(rvu, nix_hw); 5002 if (err) 5003 return err; 5004 5005 err = nix_setup_bpids(rvu, nix_hw, blkaddr); 5006 if (err) 5007 return err; 5008 5009 /* Configure segmentation offload formats */ 5010 nix_setup_lso(rvu, nix_hw, blkaddr); 5011 5012 /* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info. 5013 * This helps HW protocol checker to identify headers 5014 * and validate length and checksums. 5015 */ 5016 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2, 5017 (ltdefs->rx_ol2.lid << 8) | (ltdefs->rx_ol2.ltype_match << 4) | 5018 ltdefs->rx_ol2.ltype_mask); 5019 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4, 5020 (ltdefs->rx_oip4.lid << 8) | (ltdefs->rx_oip4.ltype_match << 4) | 5021 ltdefs->rx_oip4.ltype_mask); 5022 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4, 5023 (ltdefs->rx_iip4.lid << 8) | (ltdefs->rx_iip4.ltype_match << 4) | 5024 ltdefs->rx_iip4.ltype_mask); 5025 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6, 5026 (ltdefs->rx_oip6.lid << 8) | (ltdefs->rx_oip6.ltype_match << 4) | 5027 ltdefs->rx_oip6.ltype_mask); 5028 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6, 5029 (ltdefs->rx_iip6.lid << 8) | (ltdefs->rx_iip6.ltype_match << 4) | 5030 ltdefs->rx_iip6.ltype_mask); 5031 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP, 5032 (ltdefs->rx_otcp.lid << 8) | (ltdefs->rx_otcp.ltype_match << 4) | 5033 ltdefs->rx_otcp.ltype_mask); 5034 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP, 5035 (ltdefs->rx_itcp.lid << 8) | (ltdefs->rx_itcp.ltype_match << 4) | 5036 ltdefs->rx_itcp.ltype_mask); 5037 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP, 5038 (ltdefs->rx_oudp.lid << 8) | (ltdefs->rx_oudp.ltype_match << 4) | 5039 ltdefs->rx_oudp.ltype_mask); 5040 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP, 5041 (ltdefs->rx_iudp.lid << 8) | (ltdefs->rx_iudp.ltype_match << 4) | 5042 ltdefs->rx_iudp.ltype_mask); 5043 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP, 5044 (ltdefs->rx_osctp.lid << 8) | (ltdefs->rx_osctp.ltype_match << 4) | 5045 ltdefs->rx_osctp.ltype_mask); 5046 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP, 5047 (ltdefs->rx_isctp.lid << 8) | (ltdefs->rx_isctp.ltype_match << 4) | 5048 ltdefs->rx_isctp.ltype_mask); 5049 5050 if (!is_rvu_otx2(rvu)) { 5051 /* Enable APAD calculation for other protocols 5052 * matching APAD0 and APAD1 lt def registers. 5053 */ 5054 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD0, 5055 (ltdefs->rx_apad0.valid << 11) | 5056 (ltdefs->rx_apad0.lid << 8) | 5057 (ltdefs->rx_apad0.ltype_match << 4) | 5058 ltdefs->rx_apad0.ltype_mask); 5059 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD1, 5060 (ltdefs->rx_apad1.valid << 11) | 5061 (ltdefs->rx_apad1.lid << 8) | 5062 (ltdefs->rx_apad1.ltype_match << 4) | 5063 ltdefs->rx_apad1.ltype_mask); 5064 5065 /* Receive ethertype definition register defines layer 5066 * information in NPC_RESULT_S to identify the Ethertype 5067 * location in L2 header. Used for Ethertype overwriting 5068 * in inline IPsec flow. 5069 */ 5070 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(0), 5071 (ltdefs->rx_et[0].offset << 12) | 5072 (ltdefs->rx_et[0].valid << 11) | 5073 (ltdefs->rx_et[0].lid << 8) | 5074 (ltdefs->rx_et[0].ltype_match << 4) | 5075 ltdefs->rx_et[0].ltype_mask); 5076 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(1), 5077 (ltdefs->rx_et[1].offset << 12) | 5078 (ltdefs->rx_et[1].valid << 11) | 5079 (ltdefs->rx_et[1].lid << 8) | 5080 (ltdefs->rx_et[1].ltype_match << 4) | 5081 ltdefs->rx_et[1].ltype_mask); 5082 } 5083 5084 err = nix_rx_flowkey_alg_cfg(rvu, blkaddr); 5085 if (err) 5086 return err; 5087 5088 nix_hw->tx_credits = kcalloc(hw->cgx_links + hw->lbk_links, 5089 sizeof(u64), GFP_KERNEL); 5090 if (!nix_hw->tx_credits) 5091 return -ENOMEM; 5092 5093 /* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */ 5094 nix_link_config(rvu, blkaddr, nix_hw); 5095 5096 /* Enable Channel backpressure */ 5097 rvu_write64(rvu, blkaddr, NIX_AF_RX_CFG, BIT_ULL(0)); 5098 } 5099 return 0; 5100 } 5101 5102 int rvu_nix_init(struct rvu *rvu) 5103 { 5104 struct rvu_hwinfo *hw = rvu->hw; 5105 struct nix_hw *nix_hw; 5106 int blkaddr = 0, err; 5107 int i = 0; 5108 5109 hw->nix = devm_kcalloc(rvu->dev, MAX_NIX_BLKS, sizeof(struct nix_hw), 5110 GFP_KERNEL); 5111 if (!hw->nix) 5112 return -ENOMEM; 5113 5114 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 5115 while (blkaddr) { 5116 nix_hw = &hw->nix[i]; 5117 nix_hw->rvu = rvu; 5118 nix_hw->blkaddr = blkaddr; 5119 err = rvu_nix_block_init(rvu, nix_hw); 5120 if (err) 5121 return err; 5122 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 5123 i++; 5124 } 5125 5126 return 0; 5127 } 5128 5129 static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr, 5130 struct rvu_block *block) 5131 { 5132 struct nix_txsch *txsch; 5133 struct nix_mcast *mcast; 5134 struct nix_txvlan *vlan; 5135 struct nix_hw *nix_hw; 5136 int lvl; 5137 5138 rvu_aq_free(rvu, block->aq); 5139 5140 if (is_block_implemented(rvu->hw, blkaddr)) { 5141 nix_hw = get_nix_hw(rvu->hw, blkaddr); 5142 if (!nix_hw) 5143 return; 5144 5145 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 5146 txsch = &nix_hw->txsch[lvl]; 5147 kfree(txsch->schq.bmap); 5148 } 5149 5150 kfree(nix_hw->tx_credits); 5151 5152 nix_ipolicer_freemem(rvu, nix_hw); 5153 5154 vlan = &nix_hw->txvlan; 5155 kfree(vlan->rsrc.bmap); 5156 mutex_destroy(&vlan->rsrc_lock); 5157 5158 mcast = &nix_hw->mcast; 5159 qmem_free(rvu->dev, mcast->mce_ctx); 5160 qmem_free(rvu->dev, mcast->mcast_buf); 5161 mutex_destroy(&mcast->mce_lock); 5162 } 5163 } 5164 5165 void rvu_nix_freemem(struct rvu *rvu) 5166 { 5167 struct rvu_hwinfo *hw = rvu->hw; 5168 struct rvu_block *block; 5169 int blkaddr = 0; 5170 5171 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 5172 while (blkaddr) { 5173 block = &hw->block[blkaddr]; 5174 rvu_nix_block_freemem(rvu, blkaddr, block); 5175 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 5176 } 5177 } 5178 5179 static void nix_mcast_update_action(struct rvu *rvu, 5180 struct nix_mcast_grp_elem *elem) 5181 { 5182 struct npc_mcam *mcam = &rvu->hw->mcam; 5183 struct nix_rx_action rx_action = { 0 }; 5184 struct nix_tx_action tx_action = { 0 }; 5185 int npc_blkaddr; 5186 5187 npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 5188 if (elem->dir == NIX_MCAST_INGRESS) { 5189 *(u64 *)&rx_action = npc_get_mcam_action(rvu, mcam, 5190 npc_blkaddr, 5191 elem->mcam_index); 5192 rx_action.index = elem->mce_start_index; 5193 npc_set_mcam_action(rvu, mcam, npc_blkaddr, elem->mcam_index, 5194 *(u64 *)&rx_action); 5195 } else { 5196 *(u64 *)&tx_action = npc_get_mcam_action(rvu, mcam, 5197 npc_blkaddr, 5198 elem->mcam_index); 5199 tx_action.index = elem->mce_start_index; 5200 npc_set_mcam_action(rvu, mcam, npc_blkaddr, elem->mcam_index, 5201 *(u64 *)&tx_action); 5202 } 5203 } 5204 5205 static void nix_mcast_update_mce_entry(struct rvu *rvu, u16 pcifunc, u8 is_active) 5206 { 5207 struct nix_mcast_grp_elem *elem; 5208 struct nix_mcast_grp *mcast_grp; 5209 struct nix_hw *nix_hw; 5210 int blkaddr; 5211 5212 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 5213 nix_hw = get_nix_hw(rvu->hw, blkaddr); 5214 if (!nix_hw) 5215 return; 5216 5217 mcast_grp = &nix_hw->mcast_grp; 5218 5219 mutex_lock(&mcast_grp->mcast_grp_lock); 5220 list_for_each_entry(elem, &mcast_grp->mcast_grp_head, list) { 5221 struct nix_mce_list *mce_list; 5222 struct mce *mce; 5223 5224 /* Iterate the group elements and disable the element which 5225 * received the disable request. 5226 */ 5227 mce_list = &elem->mcast_mce_list; 5228 hlist_for_each_entry(mce, &mce_list->head, node) { 5229 if (mce->pcifunc == pcifunc) { 5230 mce->is_active = is_active; 5231 break; 5232 } 5233 } 5234 5235 /* Dump the updated list to HW */ 5236 if (elem->dir == NIX_MCAST_INGRESS) 5237 nix_update_ingress_mce_list_hw(rvu, nix_hw, elem); 5238 else 5239 nix_update_egress_mce_list_hw(rvu, nix_hw, elem); 5240 5241 /* Update the multicast index in NPC rule */ 5242 nix_mcast_update_action(rvu, elem); 5243 } 5244 mutex_unlock(&mcast_grp->mcast_grp_lock); 5245 } 5246 5247 int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req, 5248 struct msg_rsp *rsp) 5249 { 5250 u16 pcifunc = req->hdr.pcifunc; 5251 struct rvu_pfvf *pfvf; 5252 int nixlf, err, pf; 5253 5254 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); 5255 if (err) 5256 return err; 5257 5258 /* Enable the interface if it is in any multicast list */ 5259 nix_mcast_update_mce_entry(rvu, pcifunc, 1); 5260 5261 rvu_npc_enable_default_entries(rvu, pcifunc, nixlf); 5262 5263 npc_mcam_enable_flows(rvu, pcifunc); 5264 5265 pfvf = rvu_get_pfvf(rvu, pcifunc); 5266 set_bit(NIXLF_INITIALIZED, &pfvf->flags); 5267 5268 rvu_switch_update_rules(rvu, pcifunc, true); 5269 5270 pf = rvu_get_pf(rvu->pdev, pcifunc); 5271 if (is_pf_cgxmapped(rvu, pf) && rvu->rep_mode) 5272 rvu_rep_notify_pfvf_state(rvu, pcifunc, true); 5273 5274 return rvu_cgx_start_stop_io(rvu, pcifunc, true); 5275 } 5276 5277 int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req, 5278 struct msg_rsp *rsp) 5279 { 5280 u16 pcifunc = req->hdr.pcifunc; 5281 struct rvu_pfvf *pfvf; 5282 int nixlf, err, pf; 5283 5284 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); 5285 if (err) 5286 return err; 5287 5288 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); 5289 /* Disable the interface if it is in any multicast list */ 5290 nix_mcast_update_mce_entry(rvu, pcifunc, 0); 5291 5292 5293 pfvf = rvu_get_pfvf(rvu, pcifunc); 5294 clear_bit(NIXLF_INITIALIZED, &pfvf->flags); 5295 5296 err = rvu_cgx_start_stop_io(rvu, pcifunc, false); 5297 if (err) 5298 return err; 5299 5300 rvu_switch_update_rules(rvu, pcifunc, false); 5301 rvu_cgx_tx_enable(rvu, pcifunc, true); 5302 5303 pf = rvu_get_pf(rvu->pdev, pcifunc); 5304 if (is_pf_cgxmapped(rvu, pf) && rvu->rep_mode) 5305 rvu_rep_notify_pfvf_state(rvu, pcifunc, false); 5306 return 0; 5307 } 5308 5309 #define RX_SA_BASE GENMASK_ULL(52, 7) 5310 5311 void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf) 5312 { 5313 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 5314 struct hwctx_disable_req ctx_req; 5315 int pf = rvu_get_pf(rvu->pdev, pcifunc); 5316 struct mac_ops *mac_ops; 5317 u8 cgx_id, lmac_id; 5318 u64 sa_base; 5319 void *cgxd; 5320 int err; 5321 5322 ctx_req.hdr.pcifunc = pcifunc; 5323 5324 /* Cleanup NPC MCAM entries, free Tx scheduler queues being used */ 5325 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); 5326 rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf); 5327 nix_interface_deinit(rvu, pcifunc, nixlf); 5328 nix_rx_sync(rvu, blkaddr); 5329 nix_txschq_free(rvu, pcifunc); 5330 5331 clear_bit(NIXLF_INITIALIZED, &pfvf->flags); 5332 5333 if (is_pf_cgxmapped(rvu, pf) && rvu->rep_mode) 5334 rvu_rep_notify_pfvf_state(rvu, pcifunc, false); 5335 5336 rvu_cgx_start_stop_io(rvu, pcifunc, false); 5337 5338 if (pfvf->sq_ctx) { 5339 ctx_req.ctype = NIX_AQ_CTYPE_SQ; 5340 err = nix_lf_hwctx_disable(rvu, &ctx_req); 5341 if (err) 5342 dev_err(rvu->dev, "SQ ctx disable failed\n"); 5343 } 5344 5345 if (pfvf->rq_ctx) { 5346 ctx_req.ctype = NIX_AQ_CTYPE_RQ; 5347 err = nix_lf_hwctx_disable(rvu, &ctx_req); 5348 if (err) 5349 dev_err(rvu->dev, "RQ ctx disable failed\n"); 5350 } 5351 5352 if (pfvf->cq_ctx) { 5353 ctx_req.ctype = NIX_AQ_CTYPE_CQ; 5354 err = nix_lf_hwctx_disable(rvu, &ctx_req); 5355 if (err) 5356 dev_err(rvu->dev, "CQ ctx disable failed\n"); 5357 } 5358 5359 /* reset HW config done for Switch headers */ 5360 rvu_npc_set_parse_mode(rvu, pcifunc, OTX2_PRIV_FLAGS_DEFAULT, 5361 (PKIND_TX | PKIND_RX), 0, 0, 0, 0); 5362 5363 /* Disabling CGX and NPC config done for PTP */ 5364 if (pfvf->hw_rx_tstamp_en) { 5365 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 5366 cgxd = rvu_cgx_pdata(cgx_id, rvu); 5367 mac_ops = get_mac_ops(cgxd); 5368 mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, false); 5369 /* Undo NPC config done for PTP */ 5370 if (npc_config_ts_kpuaction(rvu, pf, pcifunc, false)) 5371 dev_err(rvu->dev, "NPC config for PTP failed\n"); 5372 pfvf->hw_rx_tstamp_en = false; 5373 } 5374 5375 /* reset priority flow control config */ 5376 rvu_cgx_prio_flow_ctrl_cfg(rvu, pcifunc, 0, 0, 0); 5377 5378 /* reset 802.3x flow control config */ 5379 rvu_cgx_cfg_pause_frm(rvu, pcifunc, 0, 0); 5380 5381 nix_ctx_free(rvu, pfvf); 5382 5383 nix_free_all_bandprof(rvu, pcifunc); 5384 5385 sa_base = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(nixlf)); 5386 if (FIELD_GET(RX_SA_BASE, sa_base)) { 5387 err = rvu_cpt_ctx_flush(rvu, pcifunc); 5388 if (err) 5389 dev_err(rvu->dev, 5390 "CPT ctx flush failed with error: %d\n", err); 5391 } 5392 } 5393 5394 #define NIX_AF_LFX_TX_CFG_PTP_EN BIT_ULL(32) 5395 5396 static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable) 5397 { 5398 struct rvu_hwinfo *hw = rvu->hw; 5399 struct rvu_block *block; 5400 int blkaddr, pf; 5401 int nixlf; 5402 u64 cfg; 5403 5404 pf = rvu_get_pf(rvu->pdev, pcifunc); 5405 if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP)) 5406 return 0; 5407 5408 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 5409 if (blkaddr < 0) 5410 return NIX_AF_ERR_AF_LF_INVALID; 5411 5412 block = &hw->block[blkaddr]; 5413 nixlf = rvu_get_lf(rvu, block, pcifunc, 0); 5414 if (nixlf < 0) 5415 return NIX_AF_ERR_AF_LF_INVALID; 5416 5417 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf)); 5418 5419 if (enable) 5420 cfg |= NIX_AF_LFX_TX_CFG_PTP_EN; 5421 else 5422 cfg &= ~NIX_AF_LFX_TX_CFG_PTP_EN; 5423 5424 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg); 5425 5426 return 0; 5427 } 5428 5429 int rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu *rvu, struct msg_req *req, 5430 struct msg_rsp *rsp) 5431 { 5432 return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, true); 5433 } 5434 5435 int rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu *rvu, struct msg_req *req, 5436 struct msg_rsp *rsp) 5437 { 5438 return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, false); 5439 } 5440 5441 int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu, 5442 struct nix_lso_format_cfg *req, 5443 struct nix_lso_format_cfg_rsp *rsp) 5444 { 5445 u16 pcifunc = req->hdr.pcifunc; 5446 struct nix_hw *nix_hw; 5447 struct rvu_pfvf *pfvf; 5448 int blkaddr, idx, f; 5449 u64 reg; 5450 5451 pfvf = rvu_get_pfvf(rvu, pcifunc); 5452 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 5453 if (!pfvf->nixlf || blkaddr < 0) 5454 return NIX_AF_ERR_AF_LF_INVALID; 5455 5456 nix_hw = get_nix_hw(rvu->hw, blkaddr); 5457 if (!nix_hw) 5458 return NIX_AF_ERR_INVALID_NIXBLK; 5459 5460 /* Find existing matching LSO format, if any */ 5461 for (idx = 0; idx < nix_hw->lso.in_use; idx++) { 5462 for (f = 0; f < NIX_LSO_FIELD_MAX; f++) { 5463 reg = rvu_read64(rvu, blkaddr, 5464 NIX_AF_LSO_FORMATX_FIELDX(idx, f)); 5465 if (req->fields[f] != (reg & req->field_mask)) 5466 break; 5467 } 5468 5469 if (f == NIX_LSO_FIELD_MAX) 5470 break; 5471 } 5472 5473 if (idx < nix_hw->lso.in_use) { 5474 /* Match found */ 5475 rsp->lso_format_idx = idx; 5476 return 0; 5477 } 5478 5479 if (nix_hw->lso.in_use == nix_hw->lso.total) 5480 return NIX_AF_ERR_LSO_CFG_FAIL; 5481 5482 rsp->lso_format_idx = nix_hw->lso.in_use++; 5483 5484 for (f = 0; f < NIX_LSO_FIELD_MAX; f++) 5485 rvu_write64(rvu, blkaddr, 5486 NIX_AF_LSO_FORMATX_FIELDX(rsp->lso_format_idx, f), 5487 req->fields[f]); 5488 5489 return 0; 5490 } 5491 5492 #define IPSEC_GEN_CFG_EGRP GENMASK_ULL(50, 48) 5493 #define IPSEC_GEN_CFG_OPCODE GENMASK_ULL(47, 32) 5494 #define IPSEC_GEN_CFG_PARAM1 GENMASK_ULL(31, 16) 5495 #define IPSEC_GEN_CFG_PARAM2 GENMASK_ULL(15, 0) 5496 5497 #define CPT_INST_QSEL_BLOCK GENMASK_ULL(28, 24) 5498 #define CPT_INST_QSEL_PF_FUNC GENMASK_ULL(23, 8) 5499 #define CPT_INST_QSEL_SLOT GENMASK_ULL(7, 0) 5500 5501 #define CPT_INST_CREDIT_TH GENMASK_ULL(53, 32) 5502 #define CPT_INST_CREDIT_BPID GENMASK_ULL(30, 22) 5503 #define CPT_INST_CREDIT_CNT GENMASK_ULL(21, 0) 5504 5505 static void nix_inline_ipsec_cfg(struct rvu *rvu, struct nix_inline_ipsec_cfg *req, 5506 int blkaddr) 5507 { 5508 u8 cpt_idx, cpt_blkaddr; 5509 u64 val; 5510 5511 cpt_idx = (blkaddr == BLKADDR_NIX0) ? 0 : 1; 5512 if (req->enable) { 5513 val = 0; 5514 /* Enable context prefetching */ 5515 if (!is_rvu_otx2(rvu)) 5516 val |= BIT_ULL(51); 5517 5518 /* Set OPCODE and EGRP */ 5519 val |= FIELD_PREP(IPSEC_GEN_CFG_EGRP, req->gen_cfg.egrp); 5520 val |= FIELD_PREP(IPSEC_GEN_CFG_OPCODE, req->gen_cfg.opcode); 5521 val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM1, req->gen_cfg.param1); 5522 val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM2, req->gen_cfg.param2); 5523 5524 rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, val); 5525 5526 /* Set CPT queue for inline IPSec */ 5527 val = FIELD_PREP(CPT_INST_QSEL_SLOT, req->inst_qsel.cpt_slot); 5528 val |= FIELD_PREP(CPT_INST_QSEL_PF_FUNC, 5529 req->inst_qsel.cpt_pf_func); 5530 5531 if (!is_rvu_otx2(rvu)) { 5532 cpt_blkaddr = (cpt_idx == 0) ? BLKADDR_CPT0 : 5533 BLKADDR_CPT1; 5534 val |= FIELD_PREP(CPT_INST_QSEL_BLOCK, cpt_blkaddr); 5535 } 5536 5537 rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx), 5538 val); 5539 5540 /* Set CPT credit */ 5541 val = rvu_read64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx)); 5542 if ((val & 0x3FFFFF) != 0x3FFFFF) 5543 rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx), 5544 0x3FFFFF - val); 5545 5546 val = FIELD_PREP(CPT_INST_CREDIT_CNT, req->cpt_credit); 5547 val |= FIELD_PREP(CPT_INST_CREDIT_BPID, req->bpid); 5548 val |= FIELD_PREP(CPT_INST_CREDIT_TH, req->credit_th); 5549 rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx), val); 5550 } else { 5551 rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, 0x0); 5552 rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx), 5553 0x0); 5554 val = rvu_read64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx)); 5555 if ((val & 0x3FFFFF) != 0x3FFFFF) 5556 rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx), 5557 0x3FFFFF - val); 5558 } 5559 } 5560 5561 int rvu_mbox_handler_nix_inline_ipsec_cfg(struct rvu *rvu, 5562 struct nix_inline_ipsec_cfg *req, 5563 struct msg_rsp *rsp) 5564 { 5565 if (!is_block_implemented(rvu->hw, BLKADDR_CPT0)) 5566 return 0; 5567 5568 nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX0); 5569 if (is_block_implemented(rvu->hw, BLKADDR_CPT1)) 5570 nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX1); 5571 5572 return 0; 5573 } 5574 5575 int rvu_mbox_handler_nix_read_inline_ipsec_cfg(struct rvu *rvu, 5576 struct msg_req *req, 5577 struct nix_inline_ipsec_cfg *rsp) 5578 5579 { 5580 u64 val; 5581 5582 if (!is_block_implemented(rvu->hw, BLKADDR_CPT0)) 5583 return 0; 5584 5585 val = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_RX_IPSEC_GEN_CFG); 5586 rsp->gen_cfg.egrp = FIELD_GET(IPSEC_GEN_CFG_EGRP, val); 5587 rsp->gen_cfg.opcode = FIELD_GET(IPSEC_GEN_CFG_OPCODE, val); 5588 rsp->gen_cfg.param1 = FIELD_GET(IPSEC_GEN_CFG_PARAM1, val); 5589 rsp->gen_cfg.param2 = FIELD_GET(IPSEC_GEN_CFG_PARAM2, val); 5590 5591 val = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_RX_CPTX_CREDIT(0)); 5592 rsp->cpt_credit = FIELD_GET(CPT_INST_CREDIT_CNT, val); 5593 rsp->credit_th = FIELD_GET(CPT_INST_CREDIT_TH, val); 5594 rsp->bpid = FIELD_GET(CPT_INST_CREDIT_BPID, val); 5595 5596 return 0; 5597 } 5598 5599 int rvu_mbox_handler_nix_inline_ipsec_lf_cfg(struct rvu *rvu, 5600 struct nix_inline_ipsec_lf_cfg *req, 5601 struct msg_rsp *rsp) 5602 { 5603 int lf, blkaddr, err; 5604 u64 val; 5605 5606 if (!is_block_implemented(rvu->hw, BLKADDR_CPT0)) 5607 return 0; 5608 5609 err = nix_get_nixlf(rvu, req->hdr.pcifunc, &lf, &blkaddr); 5610 if (err) 5611 return err; 5612 5613 if (req->enable) { 5614 /* Set TT, TAG_CONST, SA_POW2_SIZE and LENM1_MAX */ 5615 val = (u64)req->ipsec_cfg0.tt << 44 | 5616 (u64)req->ipsec_cfg0.tag_const << 20 | 5617 (u64)req->ipsec_cfg0.sa_pow2_size << 16 | 5618 req->ipsec_cfg0.lenm1_max; 5619 5620 if (blkaddr == BLKADDR_NIX1) 5621 val |= BIT_ULL(46); 5622 5623 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), val); 5624 5625 /* Set SA_IDX_W and SA_IDX_MAX */ 5626 val = (u64)req->ipsec_cfg1.sa_idx_w << 32 | 5627 req->ipsec_cfg1.sa_idx_max; 5628 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), val); 5629 5630 /* Set SA base address */ 5631 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf), 5632 req->sa_base_addr); 5633 } else { 5634 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), 0x0); 5635 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), 0x0); 5636 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf), 5637 0x0); 5638 } 5639 5640 return 0; 5641 } 5642 5643 void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc) 5644 { 5645 bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK); 5646 5647 /* overwrite vf mac address with default_mac */ 5648 if (from_vf) 5649 ether_addr_copy(pfvf->mac_addr, pfvf->default_mac); 5650 } 5651 5652 /* NIX ingress policers or bandwidth profiles APIs */ 5653 static void nix_config_rx_pkt_policer_precolor(struct rvu *rvu, int blkaddr) 5654 { 5655 struct npc_lt_def_cfg defs, *ltdefs; 5656 5657 ltdefs = &defs; 5658 memcpy(ltdefs, rvu->kpu.lt_def, sizeof(struct npc_lt_def_cfg)); 5659 5660 /* Extract PCP and DEI fields from outer VLAN from byte offset 5661 * 2 from the start of LB_PTR (ie TAG). 5662 * VLAN0 is Outer VLAN and VLAN1 is Inner VLAN. Inner VLAN 5663 * fields are considered when 'Tunnel enable' is set in profile. 5664 */ 5665 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN0_PCP_DEI, 5666 (2UL << 12) | (ltdefs->ovlan.lid << 8) | 5667 (ltdefs->ovlan.ltype_match << 4) | 5668 ltdefs->ovlan.ltype_mask); 5669 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN1_PCP_DEI, 5670 (2UL << 12) | (ltdefs->ivlan.lid << 8) | 5671 (ltdefs->ivlan.ltype_match << 4) | 5672 ltdefs->ivlan.ltype_mask); 5673 5674 /* DSCP field in outer and tunneled IPv4 packets */ 5675 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4_DSCP, 5676 (1UL << 12) | (ltdefs->rx_oip4.lid << 8) | 5677 (ltdefs->rx_oip4.ltype_match << 4) | 5678 ltdefs->rx_oip4.ltype_mask); 5679 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4_DSCP, 5680 (1UL << 12) | (ltdefs->rx_iip4.lid << 8) | 5681 (ltdefs->rx_iip4.ltype_match << 4) | 5682 ltdefs->rx_iip4.ltype_mask); 5683 5684 /* DSCP field (traffic class) in outer and tunneled IPv6 packets */ 5685 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6_DSCP, 5686 (1UL << 11) | (ltdefs->rx_oip6.lid << 8) | 5687 (ltdefs->rx_oip6.ltype_match << 4) | 5688 ltdefs->rx_oip6.ltype_mask); 5689 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6_DSCP, 5690 (1UL << 11) | (ltdefs->rx_iip6.lid << 8) | 5691 (ltdefs->rx_iip6.ltype_match << 4) | 5692 ltdefs->rx_iip6.ltype_mask); 5693 } 5694 5695 static int nix_init_policer_context(struct rvu *rvu, struct nix_hw *nix_hw, 5696 int layer, int prof_idx) 5697 { 5698 struct nix_cn10k_aq_enq_req aq_req; 5699 int rc; 5700 5701 memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req)); 5702 5703 aq_req.qidx = (prof_idx & 0x3FFF) | (layer << 14); 5704 aq_req.ctype = NIX_AQ_CTYPE_BANDPROF; 5705 aq_req.op = NIX_AQ_INSTOP_INIT; 5706 5707 /* Context is all zeros, submit to AQ */ 5708 rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, 5709 (struct nix_aq_enq_req *)&aq_req, NULL); 5710 if (rc) 5711 dev_err(rvu->dev, "Failed to INIT bandwidth profile layer %d profile %d\n", 5712 layer, prof_idx); 5713 return rc; 5714 } 5715 5716 static int nix_setup_ipolicers(struct rvu *rvu, 5717 struct nix_hw *nix_hw, int blkaddr) 5718 { 5719 struct rvu_hwinfo *hw = rvu->hw; 5720 struct nix_ipolicer *ipolicer; 5721 int err, layer, prof_idx; 5722 u64 cfg; 5723 5724 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST); 5725 if (!(cfg & BIT_ULL(61))) { 5726 hw->cap.ipolicer = false; 5727 return 0; 5728 } 5729 5730 hw->cap.ipolicer = true; 5731 nix_hw->ipolicer = devm_kcalloc(rvu->dev, BAND_PROF_NUM_LAYERS, 5732 sizeof(*ipolicer), GFP_KERNEL); 5733 if (!nix_hw->ipolicer) 5734 return -ENOMEM; 5735 5736 cfg = rvu_read64(rvu, blkaddr, NIX_AF_PL_CONST); 5737 5738 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 5739 ipolicer = &nix_hw->ipolicer[layer]; 5740 switch (layer) { 5741 case BAND_PROF_LEAF_LAYER: 5742 ipolicer->band_prof.max = cfg & 0XFFFF; 5743 break; 5744 case BAND_PROF_MID_LAYER: 5745 ipolicer->band_prof.max = (cfg >> 16) & 0XFFFF; 5746 break; 5747 case BAND_PROF_TOP_LAYER: 5748 ipolicer->band_prof.max = (cfg >> 32) & 0XFFFF; 5749 break; 5750 } 5751 5752 if (!ipolicer->band_prof.max) 5753 continue; 5754 5755 err = rvu_alloc_bitmap(&ipolicer->band_prof); 5756 if (err) 5757 return err; 5758 5759 ipolicer->pfvf_map = devm_kcalloc(rvu->dev, 5760 ipolicer->band_prof.max, 5761 sizeof(u16), GFP_KERNEL); 5762 if (!ipolicer->pfvf_map) 5763 return -ENOMEM; 5764 5765 ipolicer->match_id = devm_kcalloc(rvu->dev, 5766 ipolicer->band_prof.max, 5767 sizeof(u16), GFP_KERNEL); 5768 if (!ipolicer->match_id) 5769 return -ENOMEM; 5770 5771 for (prof_idx = 0; 5772 prof_idx < ipolicer->band_prof.max; prof_idx++) { 5773 /* Set AF as current owner for INIT ops to succeed */ 5774 ipolicer->pfvf_map[prof_idx] = 0x00; 5775 5776 /* There is no enable bit in the profile context, 5777 * so no context disable. So let's INIT them here 5778 * so that PF/VF later on have to just do WRITE to 5779 * setup policer rates and config. 5780 */ 5781 err = nix_init_policer_context(rvu, nix_hw, 5782 layer, prof_idx); 5783 if (err) 5784 return err; 5785 } 5786 5787 /* Allocate memory for maintaining ref_counts for MID level 5788 * profiles, this will be needed for leaf layer profiles' 5789 * aggregation. 5790 */ 5791 if (layer != BAND_PROF_MID_LAYER) 5792 continue; 5793 5794 ipolicer->ref_count = devm_kcalloc(rvu->dev, 5795 ipolicer->band_prof.max, 5796 sizeof(u16), GFP_KERNEL); 5797 if (!ipolicer->ref_count) 5798 return -ENOMEM; 5799 } 5800 5801 /* Set policer timeunit to 2us ie (19 + 1) * 100 nsec = 2us */ 5802 rvu_write64(rvu, blkaddr, NIX_AF_PL_TS, 19); 5803 5804 nix_config_rx_pkt_policer_precolor(rvu, blkaddr); 5805 5806 return 0; 5807 } 5808 5809 static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw) 5810 { 5811 struct nix_ipolicer *ipolicer; 5812 int layer; 5813 5814 if (!rvu->hw->cap.ipolicer) 5815 return; 5816 5817 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 5818 ipolicer = &nix_hw->ipolicer[layer]; 5819 5820 if (!ipolicer->band_prof.max) 5821 continue; 5822 5823 kfree(ipolicer->band_prof.bmap); 5824 } 5825 } 5826 5827 #define NIX_BW_PROF_HI_MASK GENMASK(10, 7) 5828 5829 static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req, 5830 struct nix_hw *nix_hw, u16 pcifunc) 5831 { 5832 struct nix_ipolicer *ipolicer; 5833 int layer, hi_layer, prof_idx; 5834 5835 /* Bits [15:14] in profile index represent layer */ 5836 layer = (req->qidx >> 14) & 0x03; 5837 prof_idx = req->qidx & 0x3FFF; 5838 5839 ipolicer = &nix_hw->ipolicer[layer]; 5840 if (prof_idx >= ipolicer->band_prof.max) 5841 return -EINVAL; 5842 5843 /* Check if the profile is allocated to the requesting PCIFUNC or not 5844 * with the exception of AF. AF is allowed to read and update contexts. 5845 */ 5846 if (pcifunc && ipolicer->pfvf_map[prof_idx] != pcifunc) 5847 return -EINVAL; 5848 5849 /* If this profile is linked to higher layer profile then check 5850 * if that profile is also allocated to the requesting PCIFUNC 5851 * or not. 5852 */ 5853 if (!req->prof.hl_en) 5854 return 0; 5855 5856 /* Leaf layer profile can link only to mid layer and 5857 * mid layer to top layer. 5858 */ 5859 if (layer == BAND_PROF_LEAF_LAYER) 5860 hi_layer = BAND_PROF_MID_LAYER; 5861 else if (layer == BAND_PROF_MID_LAYER) 5862 hi_layer = BAND_PROF_TOP_LAYER; 5863 else 5864 return -EINVAL; 5865 5866 ipolicer = &nix_hw->ipolicer[hi_layer]; 5867 prof_idx = FIELD_PREP(NIX_BW_PROF_HI_MASK, req->prof.band_prof_id_h); 5868 prof_idx |= req->prof.band_prof_id; 5869 if (prof_idx >= ipolicer->band_prof.max || 5870 ipolicer->pfvf_map[prof_idx] != pcifunc) 5871 return -EINVAL; 5872 5873 return 0; 5874 } 5875 5876 int rvu_mbox_handler_nix_bandprof_alloc(struct rvu *rvu, 5877 struct nix_bandprof_alloc_req *req, 5878 struct nix_bandprof_alloc_rsp *rsp) 5879 { 5880 int blkaddr, layer, prof, idx, err; 5881 u16 pcifunc = req->hdr.pcifunc; 5882 struct nix_ipolicer *ipolicer; 5883 struct nix_hw *nix_hw; 5884 5885 if (!rvu->hw->cap.ipolicer) 5886 return NIX_AF_ERR_IPOLICER_NOTSUPP; 5887 5888 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); 5889 if (err) 5890 return err; 5891 5892 mutex_lock(&rvu->rsrc_lock); 5893 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 5894 if (layer == BAND_PROF_INVAL_LAYER) 5895 continue; 5896 if (!req->prof_count[layer]) 5897 continue; 5898 5899 ipolicer = &nix_hw->ipolicer[layer]; 5900 for (idx = 0; idx < req->prof_count[layer]; idx++) { 5901 /* Allocate a max of 'MAX_BANDPROF_PER_PFFUNC' profiles */ 5902 if (idx == MAX_BANDPROF_PER_PFFUNC) 5903 break; 5904 5905 prof = rvu_alloc_rsrc(&ipolicer->band_prof); 5906 if (prof < 0) 5907 break; 5908 rsp->prof_count[layer]++; 5909 rsp->prof_idx[layer][idx] = prof; 5910 ipolicer->pfvf_map[prof] = pcifunc; 5911 } 5912 } 5913 mutex_unlock(&rvu->rsrc_lock); 5914 return 0; 5915 } 5916 5917 static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc) 5918 { 5919 int blkaddr, layer, prof_idx, err; 5920 struct nix_ipolicer *ipolicer; 5921 struct nix_hw *nix_hw; 5922 5923 if (!rvu->hw->cap.ipolicer) 5924 return NIX_AF_ERR_IPOLICER_NOTSUPP; 5925 5926 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); 5927 if (err) 5928 return err; 5929 5930 mutex_lock(&rvu->rsrc_lock); 5931 /* Free all the profiles allocated to the PCIFUNC */ 5932 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 5933 if (layer == BAND_PROF_INVAL_LAYER) 5934 continue; 5935 ipolicer = &nix_hw->ipolicer[layer]; 5936 5937 for (prof_idx = 0; prof_idx < ipolicer->band_prof.max; prof_idx++) { 5938 if (ipolicer->pfvf_map[prof_idx] != pcifunc) 5939 continue; 5940 5941 /* Clear ratelimit aggregation, if any */ 5942 if (layer == BAND_PROF_LEAF_LAYER && 5943 ipolicer->match_id[prof_idx]) 5944 nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx); 5945 5946 ipolicer->pfvf_map[prof_idx] = 0x00; 5947 ipolicer->match_id[prof_idx] = 0; 5948 rvu_free_rsrc(&ipolicer->band_prof, prof_idx); 5949 } 5950 } 5951 mutex_unlock(&rvu->rsrc_lock); 5952 return 0; 5953 } 5954 5955 int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu, 5956 struct nix_bandprof_free_req *req, 5957 struct msg_rsp *rsp) 5958 { 5959 int blkaddr, layer, prof_idx, idx, err; 5960 u16 pcifunc = req->hdr.pcifunc; 5961 struct nix_ipolicer *ipolicer; 5962 struct nix_hw *nix_hw; 5963 5964 if (req->free_all) 5965 return nix_free_all_bandprof(rvu, pcifunc); 5966 5967 if (!rvu->hw->cap.ipolicer) 5968 return NIX_AF_ERR_IPOLICER_NOTSUPP; 5969 5970 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); 5971 if (err) 5972 return err; 5973 5974 mutex_lock(&rvu->rsrc_lock); 5975 /* Free the requested profile indices */ 5976 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 5977 if (layer == BAND_PROF_INVAL_LAYER) 5978 continue; 5979 if (!req->prof_count[layer]) 5980 continue; 5981 5982 ipolicer = &nix_hw->ipolicer[layer]; 5983 for (idx = 0; idx < req->prof_count[layer]; idx++) { 5984 if (idx == MAX_BANDPROF_PER_PFFUNC) 5985 break; 5986 prof_idx = req->prof_idx[layer][idx]; 5987 if (prof_idx >= ipolicer->band_prof.max || 5988 ipolicer->pfvf_map[prof_idx] != pcifunc) 5989 continue; 5990 5991 /* Clear ratelimit aggregation, if any */ 5992 if (layer == BAND_PROF_LEAF_LAYER && 5993 ipolicer->match_id[prof_idx]) 5994 nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx); 5995 5996 ipolicer->pfvf_map[prof_idx] = 0x00; 5997 ipolicer->match_id[prof_idx] = 0; 5998 rvu_free_rsrc(&ipolicer->band_prof, prof_idx); 5999 } 6000 } 6001 mutex_unlock(&rvu->rsrc_lock); 6002 return 0; 6003 } 6004 6005 int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw, 6006 struct nix_cn10k_aq_enq_req *aq_req, 6007 struct nix_cn10k_aq_enq_rsp *aq_rsp, 6008 u16 pcifunc, u8 ctype, u32 qidx) 6009 { 6010 memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req)); 6011 aq_req->hdr.pcifunc = pcifunc; 6012 aq_req->ctype = ctype; 6013 aq_req->op = NIX_AQ_INSTOP_READ; 6014 aq_req->qidx = qidx; 6015 6016 return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, 6017 (struct nix_aq_enq_req *)aq_req, 6018 (struct nix_aq_enq_rsp *)aq_rsp); 6019 } 6020 6021 static int nix_ipolicer_map_leaf_midprofs(struct rvu *rvu, 6022 struct nix_hw *nix_hw, 6023 struct nix_cn10k_aq_enq_req *aq_req, 6024 struct nix_cn10k_aq_enq_rsp *aq_rsp, 6025 u32 leaf_prof, u16 mid_prof) 6026 { 6027 memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req)); 6028 aq_req->hdr.pcifunc = 0x00; 6029 aq_req->ctype = NIX_AQ_CTYPE_BANDPROF; 6030 aq_req->op = NIX_AQ_INSTOP_WRITE; 6031 aq_req->qidx = leaf_prof; 6032 6033 aq_req->prof.band_prof_id = mid_prof & 0x7F; 6034 aq_req->prof_mask.band_prof_id = GENMASK(6, 0); 6035 aq_req->prof.band_prof_id_h = FIELD_GET(NIX_BW_PROF_HI_MASK, mid_prof); 6036 aq_req->prof_mask.band_prof_id_h = GENMASK(3, 0); 6037 aq_req->prof.hl_en = 1; 6038 aq_req->prof_mask.hl_en = 1; 6039 6040 return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, 6041 (struct nix_aq_enq_req *)aq_req, 6042 (struct nix_aq_enq_rsp *)aq_rsp); 6043 } 6044 6045 #define NIX_RQ_PROF_HI_MASK GENMASK(13, 10) 6046 6047 int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc, 6048 u16 rq_idx, u16 match_id) 6049 { 6050 int leaf_prof, mid_prof, leaf_match; 6051 struct nix_cn10k_aq_enq_req aq_req; 6052 struct nix_cn10k_aq_enq_rsp aq_rsp; 6053 struct nix_ipolicer *ipolicer; 6054 struct nix_hw *nix_hw; 6055 int blkaddr, idx, rc; 6056 6057 if (!rvu->hw->cap.ipolicer) 6058 return 0; 6059 6060 rc = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); 6061 if (rc) 6062 return rc; 6063 6064 /* Fetch the RQ's context to see if policing is enabled */ 6065 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, pcifunc, 6066 NIX_AQ_CTYPE_RQ, rq_idx); 6067 if (rc) { 6068 dev_err(rvu->dev, 6069 "%s: Failed to fetch RQ%d context of PFFUNC 0x%x\n", 6070 __func__, rq_idx, pcifunc); 6071 return rc; 6072 } 6073 6074 if (!aq_rsp.rq.policer_ena) 6075 return 0; 6076 6077 /* Get the bandwidth profile ID mapped to this RQ */ 6078 leaf_prof = FIELD_PREP(NIX_RQ_PROF_HI_MASK, aq_rsp.rq.band_prof_id_h); 6079 leaf_prof |= aq_rsp.rq.band_prof_id; 6080 6081 ipolicer = &nix_hw->ipolicer[BAND_PROF_LEAF_LAYER]; 6082 ipolicer->match_id[leaf_prof] = match_id; 6083 6084 /* Check if any other leaf profile is marked with same match_id */ 6085 for (idx = 0; idx < ipolicer->band_prof.max; idx++) { 6086 if (idx == leaf_prof) 6087 continue; 6088 if (ipolicer->match_id[idx] != match_id) 6089 continue; 6090 6091 leaf_match = idx; 6092 break; 6093 } 6094 6095 if (idx == ipolicer->band_prof.max) 6096 return 0; 6097 6098 /* Fetch the matching profile's context to check if it's already 6099 * mapped to a mid level profile. 6100 */ 6101 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00, 6102 NIX_AQ_CTYPE_BANDPROF, leaf_match); 6103 if (rc) { 6104 dev_err(rvu->dev, 6105 "%s: Failed to fetch context of leaf profile %d\n", 6106 __func__, leaf_match); 6107 return rc; 6108 } 6109 6110 ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER]; 6111 if (aq_rsp.prof.hl_en) { 6112 /* Get Mid layer prof index and map leaf_prof index 6113 * also such that flows that are being steered 6114 * to different RQs and marked with same match_id 6115 * are rate limited in a aggregate fashion 6116 */ 6117 mid_prof = FIELD_PREP(NIX_BW_PROF_HI_MASK, 6118 aq_rsp.prof.band_prof_id_h); 6119 mid_prof |= aq_rsp.prof.band_prof_id; 6120 6121 rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw, 6122 &aq_req, &aq_rsp, 6123 leaf_prof, mid_prof); 6124 if (rc) { 6125 dev_err(rvu->dev, 6126 "%s: Failed to map leaf(%d) and mid(%d) profiles\n", 6127 __func__, leaf_prof, mid_prof); 6128 goto exit; 6129 } 6130 6131 mutex_lock(&rvu->rsrc_lock); 6132 ipolicer->ref_count[mid_prof]++; 6133 mutex_unlock(&rvu->rsrc_lock); 6134 goto exit; 6135 } 6136 6137 /* Allocate a mid layer profile and 6138 * map both 'leaf_prof' and 'leaf_match' profiles to it. 6139 */ 6140 mutex_lock(&rvu->rsrc_lock); 6141 mid_prof = rvu_alloc_rsrc(&ipolicer->band_prof); 6142 if (mid_prof < 0) { 6143 dev_err(rvu->dev, 6144 "%s: Unable to allocate mid layer profile\n", __func__); 6145 mutex_unlock(&rvu->rsrc_lock); 6146 goto exit; 6147 } 6148 mutex_unlock(&rvu->rsrc_lock); 6149 ipolicer->pfvf_map[mid_prof] = 0x00; 6150 ipolicer->ref_count[mid_prof] = 0; 6151 6152 /* Initialize mid layer profile same as 'leaf_prof' */ 6153 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00, 6154 NIX_AQ_CTYPE_BANDPROF, leaf_prof); 6155 if (rc) { 6156 dev_err(rvu->dev, 6157 "%s: Failed to fetch context of leaf profile %d\n", 6158 __func__, leaf_prof); 6159 goto exit; 6160 } 6161 6162 memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req)); 6163 aq_req.hdr.pcifunc = 0x00; 6164 aq_req.qidx = (mid_prof & 0x3FFF) | (BAND_PROF_MID_LAYER << 14); 6165 aq_req.ctype = NIX_AQ_CTYPE_BANDPROF; 6166 aq_req.op = NIX_AQ_INSTOP_WRITE; 6167 memcpy(&aq_req.prof, &aq_rsp.prof, sizeof(struct nix_bandprof_s)); 6168 memset((char *)&aq_req.prof_mask, 0xff, sizeof(struct nix_bandprof_s)); 6169 /* Clear higher layer enable bit in the mid profile, just in case */ 6170 aq_req.prof.hl_en = 0; 6171 aq_req.prof_mask.hl_en = 1; 6172 6173 rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, 6174 (struct nix_aq_enq_req *)&aq_req, NULL); 6175 if (rc) { 6176 dev_err(rvu->dev, 6177 "%s: Failed to INIT context of mid layer profile %d\n", 6178 __func__, mid_prof); 6179 goto exit; 6180 } 6181 6182 /* Map both leaf profiles to this mid layer profile */ 6183 rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw, 6184 &aq_req, &aq_rsp, 6185 leaf_prof, mid_prof); 6186 if (rc) { 6187 dev_err(rvu->dev, 6188 "%s: Failed to map leaf(%d) and mid(%d) profiles\n", 6189 __func__, leaf_prof, mid_prof); 6190 goto exit; 6191 } 6192 6193 mutex_lock(&rvu->rsrc_lock); 6194 ipolicer->ref_count[mid_prof]++; 6195 mutex_unlock(&rvu->rsrc_lock); 6196 6197 rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw, 6198 &aq_req, &aq_rsp, 6199 leaf_match, mid_prof); 6200 if (rc) { 6201 dev_err(rvu->dev, 6202 "%s: Failed to map leaf(%d) and mid(%d) profiles\n", 6203 __func__, leaf_match, mid_prof); 6204 ipolicer->ref_count[mid_prof]--; 6205 goto exit; 6206 } 6207 6208 mutex_lock(&rvu->rsrc_lock); 6209 ipolicer->ref_count[mid_prof]++; 6210 mutex_unlock(&rvu->rsrc_lock); 6211 6212 exit: 6213 return rc; 6214 } 6215 6216 /* Called with mutex rsrc_lock */ 6217 static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw, 6218 u32 leaf_prof) 6219 { 6220 struct nix_cn10k_aq_enq_req aq_req; 6221 struct nix_cn10k_aq_enq_rsp aq_rsp; 6222 struct nix_ipolicer *ipolicer; 6223 u16 mid_prof; 6224 int rc; 6225 6226 mutex_unlock(&rvu->rsrc_lock); 6227 6228 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00, 6229 NIX_AQ_CTYPE_BANDPROF, leaf_prof); 6230 6231 mutex_lock(&rvu->rsrc_lock); 6232 if (rc) { 6233 dev_err(rvu->dev, 6234 "%s: Failed to fetch context of leaf profile %d\n", 6235 __func__, leaf_prof); 6236 return; 6237 } 6238 6239 if (!aq_rsp.prof.hl_en) 6240 return; 6241 6242 mid_prof = FIELD_PREP(NIX_BW_PROF_HI_MASK, aq_rsp.prof.band_prof_id_h); 6243 mid_prof |= aq_rsp.prof.band_prof_id; 6244 ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER]; 6245 ipolicer->ref_count[mid_prof]--; 6246 /* If ref_count is zero, free mid layer profile */ 6247 if (!ipolicer->ref_count[mid_prof]) { 6248 ipolicer->pfvf_map[mid_prof] = 0x00; 6249 rvu_free_rsrc(&ipolicer->band_prof, mid_prof); 6250 } 6251 } 6252 6253 int rvu_mbox_handler_nix_bandprof_get_hwinfo(struct rvu *rvu, struct msg_req *req, 6254 struct nix_bandprof_get_hwinfo_rsp *rsp) 6255 { 6256 struct nix_ipolicer *ipolicer; 6257 int blkaddr, layer, err; 6258 struct nix_hw *nix_hw; 6259 u64 tu; 6260 6261 if (!rvu->hw->cap.ipolicer) 6262 return NIX_AF_ERR_IPOLICER_NOTSUPP; 6263 6264 err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr); 6265 if (err) 6266 return err; 6267 6268 /* Return number of bandwidth profiles free at each layer */ 6269 mutex_lock(&rvu->rsrc_lock); 6270 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 6271 if (layer == BAND_PROF_INVAL_LAYER) 6272 continue; 6273 6274 ipolicer = &nix_hw->ipolicer[layer]; 6275 rsp->prof_count[layer] = rvu_rsrc_free_count(&ipolicer->band_prof); 6276 } 6277 mutex_unlock(&rvu->rsrc_lock); 6278 6279 /* Set the policer timeunit in nanosec */ 6280 tu = rvu_read64(rvu, blkaddr, NIX_AF_PL_TS) & GENMASK_ULL(9, 0); 6281 rsp->policer_timeunit = (tu + 1) * 100; 6282 6283 return 0; 6284 } 6285 6286 static struct nix_mcast_grp_elem *rvu_nix_mcast_find_grp_elem(struct nix_mcast_grp *mcast_grp, 6287 u32 mcast_grp_idx) 6288 { 6289 struct nix_mcast_grp_elem *iter; 6290 bool is_found = false; 6291 6292 list_for_each_entry(iter, &mcast_grp->mcast_grp_head, list) { 6293 if (iter->mcast_grp_idx == mcast_grp_idx) { 6294 is_found = true; 6295 break; 6296 } 6297 } 6298 6299 if (is_found) 6300 return iter; 6301 6302 return NULL; 6303 } 6304 6305 int rvu_nix_mcast_get_mce_index(struct rvu *rvu, u16 pcifunc, u32 mcast_grp_idx) 6306 { 6307 struct nix_mcast_grp_elem *elem; 6308 struct nix_mcast_grp *mcast_grp; 6309 struct nix_hw *nix_hw; 6310 int blkaddr, ret; 6311 6312 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 6313 nix_hw = get_nix_hw(rvu->hw, blkaddr); 6314 if (!nix_hw) 6315 return NIX_AF_ERR_INVALID_NIXBLK; 6316 6317 mcast_grp = &nix_hw->mcast_grp; 6318 mutex_lock(&mcast_grp->mcast_grp_lock); 6319 elem = rvu_nix_mcast_find_grp_elem(mcast_grp, mcast_grp_idx); 6320 if (!elem) 6321 ret = NIX_AF_ERR_INVALID_MCAST_GRP; 6322 else 6323 ret = elem->mce_start_index; 6324 6325 mutex_unlock(&mcast_grp->mcast_grp_lock); 6326 return ret; 6327 } 6328 6329 void rvu_nix_mcast_flr_free_entries(struct rvu *rvu, u16 pcifunc) 6330 { 6331 struct nix_mcast_grp_destroy_req dreq = { 0 }; 6332 struct nix_mcast_grp_update_req ureq = { 0 }; 6333 struct nix_mcast_grp_update_rsp ursp = { 0 }; 6334 struct nix_mcast_grp_elem *elem, *tmp; 6335 struct nix_mcast_grp *mcast_grp; 6336 struct nix_hw *nix_hw; 6337 int blkaddr; 6338 6339 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 6340 nix_hw = get_nix_hw(rvu->hw, blkaddr); 6341 if (!nix_hw) 6342 return; 6343 6344 mcast_grp = &nix_hw->mcast_grp; 6345 6346 mutex_lock(&mcast_grp->mcast_grp_lock); 6347 list_for_each_entry_safe(elem, tmp, &mcast_grp->mcast_grp_head, list) { 6348 struct nix_mce_list *mce_list; 6349 struct hlist_node *tmp; 6350 struct mce *mce; 6351 6352 /* If the pcifunc which created the multicast/mirror 6353 * group received an FLR, then delete the entire group. 6354 */ 6355 if (elem->pcifunc == pcifunc) { 6356 /* Delete group */ 6357 dreq.hdr.pcifunc = elem->pcifunc; 6358 dreq.mcast_grp_idx = elem->mcast_grp_idx; 6359 dreq.is_af = 1; 6360 rvu_mbox_handler_nix_mcast_grp_destroy(rvu, &dreq, NULL); 6361 continue; 6362 } 6363 6364 /* Iterate the group elements and delete the element which 6365 * received the FLR. 6366 */ 6367 mce_list = &elem->mcast_mce_list; 6368 hlist_for_each_entry_safe(mce, tmp, &mce_list->head, node) { 6369 if (mce->pcifunc == pcifunc) { 6370 ureq.hdr.pcifunc = pcifunc; 6371 ureq.num_mce_entry = 1; 6372 ureq.mcast_grp_idx = elem->mcast_grp_idx; 6373 ureq.op = NIX_MCAST_OP_DEL_ENTRY; 6374 ureq.pcifunc[0] = pcifunc; 6375 ureq.is_af = 1; 6376 rvu_mbox_handler_nix_mcast_grp_update(rvu, &ureq, &ursp); 6377 break; 6378 } 6379 } 6380 } 6381 mutex_unlock(&mcast_grp->mcast_grp_lock); 6382 } 6383 6384 int rvu_nix_mcast_update_mcam_entry(struct rvu *rvu, u16 pcifunc, 6385 u32 mcast_grp_idx, u16 mcam_index) 6386 { 6387 struct nix_mcast_grp_elem *elem; 6388 struct nix_mcast_grp *mcast_grp; 6389 struct nix_hw *nix_hw; 6390 int blkaddr, ret = 0; 6391 6392 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 6393 nix_hw = get_nix_hw(rvu->hw, blkaddr); 6394 if (!nix_hw) 6395 return NIX_AF_ERR_INVALID_NIXBLK; 6396 6397 mcast_grp = &nix_hw->mcast_grp; 6398 mutex_lock(&mcast_grp->mcast_grp_lock); 6399 elem = rvu_nix_mcast_find_grp_elem(mcast_grp, mcast_grp_idx); 6400 if (!elem) 6401 ret = NIX_AF_ERR_INVALID_MCAST_GRP; 6402 else 6403 elem->mcam_index = mcam_index; 6404 6405 mutex_unlock(&mcast_grp->mcast_grp_lock); 6406 return ret; 6407 } 6408 6409 int rvu_mbox_handler_nix_mcast_grp_create(struct rvu *rvu, 6410 struct nix_mcast_grp_create_req *req, 6411 struct nix_mcast_grp_create_rsp *rsp) 6412 { 6413 struct nix_mcast_grp_elem *elem; 6414 struct nix_mcast_grp *mcast_grp; 6415 struct nix_hw *nix_hw; 6416 int blkaddr, err; 6417 6418 err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr); 6419 if (err) 6420 return err; 6421 6422 mcast_grp = &nix_hw->mcast_grp; 6423 elem = kzalloc_obj(*elem); 6424 if (!elem) 6425 return -ENOMEM; 6426 6427 INIT_HLIST_HEAD(&elem->mcast_mce_list.head); 6428 elem->mcam_index = -1; 6429 elem->mce_start_index = -1; 6430 elem->pcifunc = req->hdr.pcifunc; 6431 elem->dir = req->dir; 6432 elem->mcast_grp_idx = mcast_grp->next_grp_index++; 6433 6434 mutex_lock(&mcast_grp->mcast_grp_lock); 6435 list_add_tail(&elem->list, &mcast_grp->mcast_grp_head); 6436 mcast_grp->count++; 6437 mutex_unlock(&mcast_grp->mcast_grp_lock); 6438 6439 rsp->mcast_grp_idx = elem->mcast_grp_idx; 6440 return 0; 6441 } 6442 6443 int rvu_mbox_handler_nix_mcast_grp_destroy(struct rvu *rvu, 6444 struct nix_mcast_grp_destroy_req *req, 6445 struct msg_rsp *rsp) 6446 { 6447 struct npc_delete_flow_req uninstall_req = { 0 }; 6448 struct npc_delete_flow_rsp uninstall_rsp = { 0 }; 6449 struct nix_mcast_grp_elem *elem; 6450 struct nix_mcast_grp *mcast_grp; 6451 int blkaddr, err, ret = 0; 6452 struct nix_mcast *mcast; 6453 struct nix_hw *nix_hw; 6454 6455 err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr); 6456 if (err) 6457 return err; 6458 6459 mcast_grp = &nix_hw->mcast_grp; 6460 6461 /* If AF is requesting for the deletion, 6462 * then AF is already taking the lock 6463 */ 6464 if (!req->is_af) 6465 mutex_lock(&mcast_grp->mcast_grp_lock); 6466 6467 elem = rvu_nix_mcast_find_grp_elem(mcast_grp, req->mcast_grp_idx); 6468 if (!elem) { 6469 ret = NIX_AF_ERR_INVALID_MCAST_GRP; 6470 goto unlock_grp; 6471 } 6472 6473 /* If no mce entries are associated with the group 6474 * then just remove it from the global list. 6475 */ 6476 if (!elem->mcast_mce_list.count) 6477 goto delete_grp; 6478 6479 /* Delete the associated mcam entry and 6480 * remove all mce entries from the group 6481 */ 6482 mcast = &nix_hw->mcast; 6483 mutex_lock(&mcast->mce_lock); 6484 if (elem->mcam_index != -1) { 6485 uninstall_req.hdr.pcifunc = req->hdr.pcifunc; 6486 uninstall_req.entry = elem->mcam_index; 6487 rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &uninstall_rsp); 6488 } 6489 6490 nix_free_mce_list(mcast, elem->mcast_mce_list.count, 6491 elem->mce_start_index, elem->dir); 6492 nix_delete_mcast_mce_list(&elem->mcast_mce_list); 6493 mutex_unlock(&mcast->mce_lock); 6494 6495 delete_grp: 6496 list_del(&elem->list); 6497 kfree(elem); 6498 mcast_grp->count--; 6499 6500 unlock_grp: 6501 if (!req->is_af) 6502 mutex_unlock(&mcast_grp->mcast_grp_lock); 6503 6504 return ret; 6505 } 6506 6507 int rvu_mbox_handler_nix_mcast_grp_update(struct rvu *rvu, 6508 struct nix_mcast_grp_update_req *req, 6509 struct nix_mcast_grp_update_rsp *rsp) 6510 { 6511 struct nix_mcast_grp_destroy_req dreq = { 0 }; 6512 struct npc_mcam *mcam = &rvu->hw->mcam; 6513 struct nix_mcast_grp_elem *elem; 6514 struct nix_mcast_grp *mcast_grp; 6515 int blkaddr, err, npc_blkaddr; 6516 u16 prev_count, new_count; 6517 struct nix_mcast *mcast; 6518 struct nix_hw *nix_hw; 6519 int i, ret; 6520 6521 if (!req->num_mce_entry) 6522 return 0; 6523 6524 err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr); 6525 if (err) 6526 return err; 6527 6528 mcast_grp = &nix_hw->mcast_grp; 6529 6530 /* If AF is requesting for the updation, 6531 * then AF is already taking the lock 6532 */ 6533 if (!req->is_af) 6534 mutex_lock(&mcast_grp->mcast_grp_lock); 6535 6536 elem = rvu_nix_mcast_find_grp_elem(mcast_grp, req->mcast_grp_idx); 6537 if (!elem) { 6538 ret = NIX_AF_ERR_INVALID_MCAST_GRP; 6539 goto unlock_grp; 6540 } 6541 6542 /* If any pcifunc matches the group's pcifunc, then we can 6543 * delete the entire group. 6544 */ 6545 if (req->op == NIX_MCAST_OP_DEL_ENTRY) { 6546 for (i = 0; i < req->num_mce_entry; i++) { 6547 if (elem->pcifunc == req->pcifunc[i]) { 6548 /* Delete group */ 6549 dreq.hdr.pcifunc = elem->pcifunc; 6550 dreq.mcast_grp_idx = elem->mcast_grp_idx; 6551 dreq.is_af = 1; 6552 rvu_mbox_handler_nix_mcast_grp_destroy(rvu, &dreq, NULL); 6553 ret = 0; 6554 goto unlock_grp; 6555 } 6556 } 6557 } 6558 6559 mcast = &nix_hw->mcast; 6560 mutex_lock(&mcast->mce_lock); 6561 npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 6562 if (elem->mcam_index != -1) 6563 npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, elem->mcam_index, false); 6564 6565 prev_count = elem->mcast_mce_list.count; 6566 if (req->op == NIX_MCAST_OP_ADD_ENTRY) { 6567 new_count = prev_count + req->num_mce_entry; 6568 if (prev_count) 6569 nix_free_mce_list(mcast, prev_count, elem->mce_start_index, elem->dir); 6570 6571 elem->mce_start_index = nix_alloc_mce_list(mcast, new_count, elem->dir); 6572 6573 /* It is possible not to get contiguous memory */ 6574 if (elem->mce_start_index < 0) { 6575 if (elem->mcam_index != -1) { 6576 npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, 6577 elem->mcam_index, true); 6578 ret = NIX_AF_ERR_NON_CONTIG_MCE_LIST; 6579 goto unlock_mce; 6580 } 6581 } 6582 6583 ret = nix_add_mce_list_entry(rvu, nix_hw, elem, req); 6584 if (ret) { 6585 nix_free_mce_list(mcast, new_count, elem->mce_start_index, elem->dir); 6586 if (prev_count) 6587 elem->mce_start_index = nix_alloc_mce_list(mcast, 6588 prev_count, 6589 elem->dir); 6590 6591 if (elem->mcam_index != -1) 6592 npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, 6593 elem->mcam_index, true); 6594 6595 goto unlock_mce; 6596 } 6597 } else { 6598 if (!prev_count || prev_count < req->num_mce_entry) { 6599 if (elem->mcam_index != -1) 6600 npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, 6601 elem->mcam_index, true); 6602 ret = NIX_AF_ERR_INVALID_MCAST_DEL_REQ; 6603 goto unlock_mce; 6604 } 6605 6606 nix_free_mce_list(mcast, prev_count, elem->mce_start_index, elem->dir); 6607 new_count = prev_count - req->num_mce_entry; 6608 elem->mce_start_index = nix_alloc_mce_list(mcast, new_count, elem->dir); 6609 ret = nix_del_mce_list_entry(rvu, nix_hw, elem, req); 6610 if (ret) { 6611 nix_free_mce_list(mcast, new_count, elem->mce_start_index, elem->dir); 6612 elem->mce_start_index = nix_alloc_mce_list(mcast, prev_count, elem->dir); 6613 if (elem->mcam_index != -1) 6614 npc_enable_mcam_entry(rvu, mcam, 6615 npc_blkaddr, 6616 elem->mcam_index, 6617 true); 6618 6619 goto unlock_mce; 6620 } 6621 } 6622 6623 if (elem->mcam_index == -1) { 6624 rsp->mce_start_index = elem->mce_start_index; 6625 ret = 0; 6626 goto unlock_mce; 6627 } 6628 6629 nix_mcast_update_action(rvu, elem); 6630 npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, elem->mcam_index, true); 6631 rsp->mce_start_index = elem->mce_start_index; 6632 ret = 0; 6633 6634 unlock_mce: 6635 mutex_unlock(&mcast->mce_lock); 6636 6637 unlock_grp: 6638 if (!req->is_af) 6639 mutex_unlock(&mcast_grp->mcast_grp_lock); 6640 6641 return ret; 6642 } 6643 6644 /* On CN10k and older series of silicons, hardware may incorrectly 6645 * assert XOFF on certain channels. Issue a write on NIX_AF_RX_CHANX_CFG 6646 * to broadcacst XON on the same. 6647 */ 6648 void rvu_block_bcast_xon(struct rvu *rvu, int blkaddr) 6649 { 6650 struct rvu_block *block = &rvu->hw->block[blkaddr]; 6651 u64 cfg; 6652 6653 if (!block->implemented || is_cn20k(rvu->pdev)) 6654 return; 6655 6656 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(0)); 6657 rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(0), cfg); 6658 } 6659