1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) 2 /* Copyright 2017-2019 NXP */ 3 4 #include <linux/unaligned.h> 5 #include <linux/module.h> 6 #include <linux/of.h> 7 #include <linux/of_platform.h> 8 #include <linux/of_net.h> 9 #include <linux/pcs-lynx.h> 10 #include "enetc_ierb.h" 11 #include "enetc_pf_common.h" 12 13 #define ENETC_DRV_NAME_STR "ENETC PF driver" 14 15 static void enetc_pf_get_primary_mac_addr(struct enetc_hw *hw, int si, u8 *addr) 16 { 17 u32 upper = __raw_readl(hw->port + ENETC_PSIPMAR0(si)); 18 u16 lower = __raw_readw(hw->port + ENETC_PSIPMAR1(si)); 19 20 put_unaligned_le32(upper, addr); 21 put_unaligned_le16(lower, addr + 4); 22 } 23 24 static void enetc_pf_set_primary_mac_addr(struct enetc_hw *hw, int si, 25 const u8 *addr) 26 { 27 u32 upper = get_unaligned_le32(addr); 28 u16 lower = get_unaligned_le16(addr + 4); 29 30 __raw_writel(upper, hw->port + ENETC_PSIPMAR0(si)); 31 __raw_writew(lower, hw->port + ENETC_PSIPMAR1(si)); 32 } 33 34 static struct phylink_pcs *enetc_pf_create_pcs(struct enetc_pf *pf, 35 struct mii_bus *bus) 36 { 37 return lynx_pcs_create_mdiodev(bus, 0); 38 } 39 40 static void enetc_pf_destroy_pcs(struct phylink_pcs *pcs) 41 { 42 lynx_pcs_destroy(pcs); 43 } 44 45 static void enetc_set_vlan_promisc(struct enetc_hw *hw, char si_map) 46 { 47 u32 val = enetc_port_rd(hw, ENETC_PSIPVMR); 48 49 val &= ~ENETC_PSIPVMR_SET_VP(ENETC_VLAN_PROMISC_MAP_ALL); 50 enetc_port_wr(hw, ENETC_PSIPVMR, ENETC_PSIPVMR_SET_VP(si_map) | val); 51 } 52 53 static void enetc_enable_si_vlan_promisc(struct enetc_pf *pf, int si_idx) 54 { 55 pf->vlan_promisc_simap |= BIT(si_idx); 56 enetc_set_vlan_promisc(&pf->si->hw, pf->vlan_promisc_simap); 57 } 58 59 static void enetc_disable_si_vlan_promisc(struct enetc_pf *pf, int si_idx) 60 { 61 pf->vlan_promisc_simap &= ~BIT(si_idx); 62 enetc_set_vlan_promisc(&pf->si->hw, pf->vlan_promisc_simap); 63 } 64 65 static void enetc_set_isol_vlan(struct enetc_hw *hw, int si, u16 vlan, u8 qos) 66 { 67 u32 val = 0; 68 69 if (vlan) 70 val = ENETC_PSIVLAN_EN | ENETC_PSIVLAN_SET_QOS(qos) | vlan; 71 72 enetc_port_wr(hw, ENETC_PSIVLANR(si), val); 73 } 74 75 static void enetc_add_mac_addr_em_filter(struct enetc_mac_filter *filter, 76 const unsigned char *addr) 77 { 78 /* add exact match addr */ 79 ether_addr_copy(filter->mac_addr, addr); 80 filter->mac_addr_cnt++; 81 } 82 83 static void enetc_clear_mac_ht_flt(struct enetc_si *si, int si_idx, int type) 84 { 85 bool err = si->errata & ENETC_ERR_UCMCSWP; 86 87 if (type == UC) { 88 enetc_port_wr(&si->hw, ENETC_PSIUMHFR0(si_idx, err), 0); 89 enetc_port_wr(&si->hw, ENETC_PSIUMHFR1(si_idx), 0); 90 } else { /* MC */ 91 enetc_port_wr(&si->hw, ENETC_PSIMMHFR0(si_idx, err), 0); 92 enetc_port_wr(&si->hw, ENETC_PSIMMHFR1(si_idx), 0); 93 } 94 } 95 96 static void enetc_set_mac_ht_flt(struct enetc_si *si, int si_idx, int type, 97 unsigned long hash) 98 { 99 bool err = si->errata & ENETC_ERR_UCMCSWP; 100 101 if (type == UC) { 102 enetc_port_wr(&si->hw, ENETC_PSIUMHFR0(si_idx, err), 103 lower_32_bits(hash)); 104 enetc_port_wr(&si->hw, ENETC_PSIUMHFR1(si_idx), 105 upper_32_bits(hash)); 106 } else { /* MC */ 107 enetc_port_wr(&si->hw, ENETC_PSIMMHFR0(si_idx, err), 108 lower_32_bits(hash)); 109 enetc_port_wr(&si->hw, ENETC_PSIMMHFR1(si_idx), 110 upper_32_bits(hash)); 111 } 112 } 113 114 static void enetc_sync_mac_filters(struct enetc_pf *pf) 115 { 116 struct enetc_mac_filter *f = pf->mac_filter; 117 struct enetc_si *si = pf->si; 118 int i, pos; 119 120 pos = EMETC_MAC_ADDR_FILT_RES; 121 122 for (i = 0; i < MADDR_TYPE; i++, f++) { 123 bool em = (f->mac_addr_cnt == 1) && (i == UC); 124 bool clear = !f->mac_addr_cnt; 125 126 if (clear) { 127 if (i == UC) 128 enetc_clear_mac_flt_entry(si, pos); 129 130 enetc_clear_mac_ht_flt(si, 0, i); 131 continue; 132 } 133 134 /* exact match filter */ 135 if (em) { 136 int err; 137 138 enetc_clear_mac_ht_flt(si, 0, UC); 139 140 err = enetc_set_mac_flt_entry(si, pos, f->mac_addr, 141 BIT(0)); 142 if (!err) 143 continue; 144 145 /* fallback to HT filtering */ 146 dev_warn(&si->pdev->dev, "fallback to HT filt (%d)\n", 147 err); 148 } 149 150 /* hash table filter, clear EM filter for UC entries */ 151 if (i == UC) 152 enetc_clear_mac_flt_entry(si, pos); 153 154 enetc_set_mac_ht_flt(si, 0, i, *f->mac_hash_table); 155 } 156 } 157 158 static void enetc_pf_set_rx_mode(struct net_device *ndev) 159 { 160 struct enetc_ndev_priv *priv = netdev_priv(ndev); 161 struct enetc_pf *pf = enetc_si_priv(priv->si); 162 struct enetc_hw *hw = &priv->si->hw; 163 bool uprom = false, mprom = false; 164 struct enetc_mac_filter *filter; 165 struct netdev_hw_addr *ha; 166 u32 psipmr = 0; 167 bool em; 168 169 if (ndev->flags & IFF_PROMISC) { 170 /* enable promisc mode for SI0 (PF) */ 171 psipmr = ENETC_PSIPMR_SET_UP(0) | ENETC_PSIPMR_SET_MP(0); 172 uprom = true; 173 mprom = true; 174 } else if (ndev->flags & IFF_ALLMULTI) { 175 /* enable multi cast promisc mode for SI0 (PF) */ 176 psipmr = ENETC_PSIPMR_SET_MP(0); 177 mprom = true; 178 } 179 180 /* first 2 filter entries belong to PF */ 181 if (!uprom) { 182 /* Update unicast filters */ 183 filter = &pf->mac_filter[UC]; 184 enetc_reset_mac_addr_filter(filter); 185 186 em = (netdev_uc_count(ndev) == 1); 187 netdev_for_each_uc_addr(ha, ndev) { 188 if (em) { 189 enetc_add_mac_addr_em_filter(filter, ha->addr); 190 break; 191 } 192 193 enetc_add_mac_addr_ht_filter(filter, ha->addr); 194 } 195 } 196 197 if (!mprom) { 198 /* Update multicast filters */ 199 filter = &pf->mac_filter[MC]; 200 enetc_reset_mac_addr_filter(filter); 201 202 netdev_for_each_mc_addr(ha, ndev) { 203 if (!is_multicast_ether_addr(ha->addr)) 204 continue; 205 206 enetc_add_mac_addr_ht_filter(filter, ha->addr); 207 } 208 } 209 210 if (!uprom || !mprom) 211 /* update PF entries */ 212 enetc_sync_mac_filters(pf); 213 214 psipmr |= enetc_port_rd(hw, ENETC_PSIPMR) & 215 ~(ENETC_PSIPMR_SET_UP(0) | ENETC_PSIPMR_SET_MP(0)); 216 enetc_port_wr(hw, ENETC_PSIPMR, psipmr); 217 } 218 219 static void enetc_set_loopback(struct net_device *ndev, bool en) 220 { 221 struct enetc_ndev_priv *priv = netdev_priv(ndev); 222 struct enetc_si *si = priv->si; 223 u32 reg; 224 225 reg = enetc_port_mac_rd(si, ENETC_PM0_IF_MODE); 226 if (reg & ENETC_PM0_IFM_RG) { 227 /* RGMII mode */ 228 reg = (reg & ~ENETC_PM0_IFM_RLP) | 229 (en ? ENETC_PM0_IFM_RLP : 0); 230 enetc_port_mac_wr(si, ENETC_PM0_IF_MODE, reg); 231 } else { 232 /* assume SGMII mode */ 233 reg = enetc_port_mac_rd(si, ENETC_PM0_CMD_CFG); 234 reg = (reg & ~ENETC_PM0_CMD_XGLP) | 235 (en ? ENETC_PM0_CMD_XGLP : 0); 236 reg = (reg & ~ENETC_PM0_CMD_PHY_TX_EN) | 237 (en ? ENETC_PM0_CMD_PHY_TX_EN : 0); 238 enetc_port_mac_wr(si, ENETC_PM0_CMD_CFG, reg); 239 } 240 } 241 242 static int enetc_pf_set_vf_mac(struct net_device *ndev, int vf, u8 *mac) 243 { 244 struct enetc_ndev_priv *priv = netdev_priv(ndev); 245 struct enetc_pf *pf = enetc_si_priv(priv->si); 246 struct enetc_vf_state *vf_state; 247 248 if (vf >= pf->total_vfs) 249 return -EINVAL; 250 251 if (!is_valid_ether_addr(mac)) 252 return -EADDRNOTAVAIL; 253 254 vf_state = &pf->vf_state[vf]; 255 vf_state->flags |= ENETC_VF_FLAG_PF_SET_MAC; 256 enetc_pf_set_primary_mac_addr(&priv->si->hw, vf + 1, mac); 257 return 0; 258 } 259 260 static int enetc_pf_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, 261 u8 qos, __be16 proto) 262 { 263 struct enetc_ndev_priv *priv = netdev_priv(ndev); 264 struct enetc_pf *pf = enetc_si_priv(priv->si); 265 266 if (priv->si->errata & ENETC_ERR_VLAN_ISOL) 267 return -EOPNOTSUPP; 268 269 if (vf >= pf->total_vfs) 270 return -EINVAL; 271 272 if (proto != htons(ETH_P_8021Q)) 273 /* only C-tags supported for now */ 274 return -EPROTONOSUPPORT; 275 276 enetc_set_isol_vlan(&priv->si->hw, vf + 1, vlan, qos); 277 return 0; 278 } 279 280 static int enetc_pf_set_vf_spoofchk(struct net_device *ndev, int vf, bool en) 281 { 282 struct enetc_ndev_priv *priv = netdev_priv(ndev); 283 struct enetc_pf *pf = enetc_si_priv(priv->si); 284 u32 cfgr; 285 286 if (vf >= pf->total_vfs) 287 return -EINVAL; 288 289 cfgr = enetc_port_rd(&priv->si->hw, ENETC_PSICFGR0(vf + 1)); 290 cfgr = (cfgr & ~ENETC_PSICFGR0_ASE) | (en ? ENETC_PSICFGR0_ASE : 0); 291 enetc_port_wr(&priv->si->hw, ENETC_PSICFGR0(vf + 1), cfgr); 292 293 return 0; 294 } 295 296 static void enetc_port_assign_rfs_entries(struct enetc_si *si) 297 { 298 struct enetc_pf *pf = enetc_si_priv(si); 299 struct enetc_hw *hw = &si->hw; 300 int num_entries, vf_entries, i; 301 u32 val; 302 303 /* split RFS entries between functions */ 304 val = enetc_port_rd(hw, ENETC_PRFSCAPR); 305 num_entries = ENETC_PRFSCAPR_GET_NUM_RFS(val); 306 vf_entries = num_entries / (pf->total_vfs + 1); 307 308 for (i = 0; i < pf->total_vfs; i++) 309 enetc_port_wr(hw, ENETC_PSIRFSCFGR(i + 1), vf_entries); 310 enetc_port_wr(hw, ENETC_PSIRFSCFGR(0), 311 num_entries - vf_entries * pf->total_vfs); 312 313 /* enable RFS on port */ 314 enetc_port_wr(hw, ENETC_PRFSMR, ENETC_PRFSMR_RFSE); 315 } 316 317 static void enetc_port_get_caps(struct enetc_si *si) 318 { 319 struct enetc_hw *hw = &si->hw; 320 u32 val; 321 322 val = enetc_port_rd(hw, ENETC_PCAPR0); 323 324 if (val & ENETC_PCAPR0_QBV) 325 si->hw_features |= ENETC_SI_F_QBV; 326 327 if (val & ENETC_PCAPR0_QBU) 328 si->hw_features |= ENETC_SI_F_QBU; 329 330 if (val & ENETC_PCAPR0_PSFP) 331 si->hw_features |= ENETC_SI_F_PSFP; 332 } 333 334 static void enetc_port_si_configure(struct enetc_si *si) 335 { 336 struct enetc_pf *pf = enetc_si_priv(si); 337 struct enetc_hw *hw = &si->hw; 338 int num_rings, i; 339 u32 val; 340 341 enetc_port_get_caps(si); 342 343 val = enetc_port_rd(hw, ENETC_PCAPR0); 344 num_rings = min(ENETC_PCAPR0_RXBDR(val), ENETC_PCAPR0_TXBDR(val)); 345 346 val = ENETC_PSICFGR0_SET_TXBDR(ENETC_PF_NUM_RINGS); 347 val |= ENETC_PSICFGR0_SET_RXBDR(ENETC_PF_NUM_RINGS); 348 349 if (unlikely(num_rings < ENETC_PF_NUM_RINGS)) { 350 val = ENETC_PSICFGR0_SET_TXBDR(num_rings); 351 val |= ENETC_PSICFGR0_SET_RXBDR(num_rings); 352 353 dev_warn(&si->pdev->dev, "Found %d rings, expected %d!\n", 354 num_rings, ENETC_PF_NUM_RINGS); 355 356 num_rings = 0; 357 } 358 359 /* Add default one-time settings for SI0 (PF) */ 360 val |= ENETC_PSICFGR0_SIVC(ENETC_VLAN_TYPE_C | ENETC_VLAN_TYPE_S); 361 362 enetc_port_wr(hw, ENETC_PSICFGR0(0), val); 363 364 if (num_rings) 365 num_rings -= ENETC_PF_NUM_RINGS; 366 367 /* Configure the SIs for each available VF */ 368 val = ENETC_PSICFGR0_SIVC(ENETC_VLAN_TYPE_C | ENETC_VLAN_TYPE_S); 369 val |= ENETC_PSICFGR0_VTE | ENETC_PSICFGR0_SIVIE; 370 371 if (num_rings) { 372 num_rings /= pf->total_vfs; 373 val |= ENETC_PSICFGR0_SET_TXBDR(num_rings); 374 val |= ENETC_PSICFGR0_SET_RXBDR(num_rings); 375 } 376 377 for (i = 0; i < pf->total_vfs; i++) 378 enetc_port_wr(hw, ENETC_PSICFGR0(i + 1), val); 379 380 /* Port level VLAN settings */ 381 val = ENETC_PVCLCTR_OVTPIDL(ENETC_VLAN_TYPE_C | ENETC_VLAN_TYPE_S); 382 enetc_port_wr(hw, ENETC_PVCLCTR, val); 383 /* use outer tag for VLAN filtering */ 384 enetc_port_wr(hw, ENETC_PSIVLANFMR, ENETC_PSIVLANFMR_VS); 385 } 386 387 void enetc_set_ptcmsdur(struct enetc_hw *hw, u32 *max_sdu) 388 { 389 int tc; 390 391 for (tc = 0; tc < 8; tc++) { 392 u32 val = ENETC_MAC_MAXFRM_SIZE; 393 394 if (max_sdu[tc]) 395 val = max_sdu[tc] + VLAN_ETH_HLEN; 396 397 enetc_port_wr(hw, ENETC_PTCMSDUR(tc), val); 398 } 399 } 400 401 void enetc_reset_ptcmsdur(struct enetc_hw *hw) 402 { 403 int tc; 404 405 for (tc = 0; tc < 8; tc++) 406 enetc_port_wr(hw, ENETC_PTCMSDUR(tc), ENETC_MAC_MAXFRM_SIZE); 407 } 408 409 static void enetc_configure_port_mac(struct enetc_si *si) 410 { 411 struct enetc_hw *hw = &si->hw; 412 413 enetc_port_mac_wr(si, ENETC_PM0_MAXFRM, 414 ENETC_SET_MAXFRM(ENETC_RX_MAXFRM_SIZE)); 415 416 enetc_reset_ptcmsdur(hw); 417 418 enetc_port_mac_wr(si, ENETC_PM0_CMD_CFG, ENETC_PM0_CMD_PHY_TX_EN | 419 ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC); 420 421 /* On LS1028A, the MAC RX FIFO defaults to 2, which is too high 422 * and may lead to RX lock-up under traffic. Set it to 1 instead, 423 * as recommended by the hardware team. 424 */ 425 enetc_port_mac_wr(si, ENETC_PM0_RX_FIFO, ENETC_PM0_RX_FIFO_VAL); 426 } 427 428 static void enetc_mac_config(struct enetc_si *si, phy_interface_t phy_mode) 429 { 430 u32 val; 431 432 if (phy_interface_mode_is_rgmii(phy_mode)) { 433 val = enetc_port_mac_rd(si, ENETC_PM0_IF_MODE); 434 val &= ~(ENETC_PM0_IFM_EN_AUTO | ENETC_PM0_IFM_IFMODE_MASK); 435 val |= ENETC_PM0_IFM_IFMODE_GMII | ENETC_PM0_IFM_RG; 436 enetc_port_mac_wr(si, ENETC_PM0_IF_MODE, val); 437 } 438 439 if (phy_mode == PHY_INTERFACE_MODE_USXGMII) { 440 val = ENETC_PM0_IFM_FULL_DPX | ENETC_PM0_IFM_IFMODE_XGMII; 441 enetc_port_mac_wr(si, ENETC_PM0_IF_MODE, val); 442 } 443 } 444 445 static void enetc_mac_enable(struct enetc_si *si, bool en) 446 { 447 u32 val = enetc_port_mac_rd(si, ENETC_PM0_CMD_CFG); 448 449 val &= ~(ENETC_PM0_TX_EN | ENETC_PM0_RX_EN); 450 val |= en ? (ENETC_PM0_TX_EN | ENETC_PM0_RX_EN) : 0; 451 452 enetc_port_mac_wr(si, ENETC_PM0_CMD_CFG, val); 453 } 454 455 static void enetc_configure_port(struct enetc_pf *pf) 456 { 457 struct enetc_hw *hw = &pf->si->hw; 458 459 enetc_configure_port_mac(pf->si); 460 461 enetc_port_si_configure(pf->si); 462 463 /* set up hash key */ 464 enetc_set_default_rss_key(pf); 465 466 /* split up RFS entries */ 467 enetc_port_assign_rfs_entries(pf->si); 468 469 /* enforce VLAN promisc mode for all SIs */ 470 pf->vlan_promisc_simap = ENETC_VLAN_PROMISC_MAP_ALL; 471 enetc_set_vlan_promisc(hw, pf->vlan_promisc_simap); 472 473 enetc_port_wr(hw, ENETC_PSIPMR, 0); 474 475 /* enable port */ 476 enetc_port_wr(hw, ENETC_PMR, ENETC_PMR_EN); 477 } 478 479 /* Messaging */ 480 static u16 enetc_msg_pf_set_vf_primary_mac_addr(struct enetc_pf *pf, 481 int vf_id) 482 { 483 struct enetc_vf_state *vf_state = &pf->vf_state[vf_id]; 484 struct enetc_msg_swbd *msg = &pf->rxmsg[vf_id]; 485 struct enetc_msg_cmd_set_primary_mac *cmd; 486 struct device *dev = &pf->si->pdev->dev; 487 u16 cmd_id; 488 char *addr; 489 490 cmd = (struct enetc_msg_cmd_set_primary_mac *)msg->vaddr; 491 cmd_id = cmd->header.id; 492 if (cmd_id != ENETC_MSG_CMD_MNG_ADD) 493 return ENETC_MSG_CMD_STATUS_FAIL; 494 495 addr = cmd->mac.sa_data; 496 if (vf_state->flags & ENETC_VF_FLAG_PF_SET_MAC) 497 dev_warn(dev, "Attempt to override PF set mac addr for VF%d\n", 498 vf_id); 499 else 500 enetc_pf_set_primary_mac_addr(&pf->si->hw, vf_id + 1, addr); 501 502 return ENETC_MSG_CMD_STATUS_OK; 503 } 504 505 void enetc_msg_handle_rxmsg(struct enetc_pf *pf, int vf_id, u16 *status) 506 { 507 struct enetc_msg_swbd *msg = &pf->rxmsg[vf_id]; 508 struct device *dev = &pf->si->pdev->dev; 509 struct enetc_msg_cmd_header *cmd_hdr; 510 u16 cmd_type; 511 512 *status = ENETC_MSG_CMD_STATUS_OK; 513 cmd_hdr = (struct enetc_msg_cmd_header *)msg->vaddr; 514 cmd_type = cmd_hdr->type; 515 516 switch (cmd_type) { 517 case ENETC_MSG_CMD_MNG_MAC: 518 *status = enetc_msg_pf_set_vf_primary_mac_addr(pf, vf_id); 519 break; 520 default: 521 dev_err(dev, "command not supported (cmd_type: 0x%x)\n", 522 cmd_type); 523 } 524 } 525 526 #ifdef CONFIG_PCI_IOV 527 static int enetc_sriov_configure(struct pci_dev *pdev, int num_vfs) 528 { 529 struct enetc_si *si = pci_get_drvdata(pdev); 530 struct enetc_pf *pf = enetc_si_priv(si); 531 int err; 532 533 if (!num_vfs) { 534 enetc_msg_psi_free(pf); 535 pf->num_vfs = 0; 536 pci_disable_sriov(pdev); 537 } else { 538 pf->num_vfs = num_vfs; 539 540 err = enetc_msg_psi_init(pf); 541 if (err) { 542 dev_err(&pdev->dev, "enetc_msg_psi_init (%d)\n", err); 543 goto err_msg_psi; 544 } 545 546 err = pci_enable_sriov(pdev, num_vfs); 547 if (err) { 548 dev_err(&pdev->dev, "pci_enable_sriov err %d\n", err); 549 goto err_en_sriov; 550 } 551 } 552 553 return num_vfs; 554 555 err_en_sriov: 556 enetc_msg_psi_free(pf); 557 err_msg_psi: 558 pf->num_vfs = 0; 559 560 return err; 561 } 562 #else 563 #define enetc_sriov_configure(pdev, num_vfs) (void)0 564 #endif 565 566 static int enetc_pf_set_features(struct net_device *ndev, 567 netdev_features_t features) 568 { 569 netdev_features_t changed = ndev->features ^ features; 570 struct enetc_ndev_priv *priv = netdev_priv(ndev); 571 int err; 572 573 if (changed & NETIF_F_HW_TC) { 574 err = enetc_set_psfp(ndev, !!(features & NETIF_F_HW_TC)); 575 if (err) 576 return err; 577 } 578 579 if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) { 580 struct enetc_pf *pf = enetc_si_priv(priv->si); 581 582 if (!!(features & NETIF_F_HW_VLAN_CTAG_FILTER)) 583 enetc_disable_si_vlan_promisc(pf, 0); 584 else 585 enetc_enable_si_vlan_promisc(pf, 0); 586 } 587 588 if (changed & NETIF_F_LOOPBACK) 589 enetc_set_loopback(ndev, !!(features & NETIF_F_LOOPBACK)); 590 591 enetc_set_features(ndev, features); 592 593 return 0; 594 } 595 596 static int enetc_pf_setup_tc(struct net_device *ndev, enum tc_setup_type type, 597 void *type_data) 598 { 599 switch (type) { 600 case TC_QUERY_CAPS: 601 return enetc_qos_query_caps(ndev, type_data); 602 case TC_SETUP_QDISC_MQPRIO: 603 return enetc_setup_tc_mqprio(ndev, type_data); 604 case TC_SETUP_QDISC_TAPRIO: 605 return enetc_setup_tc_taprio(ndev, type_data); 606 case TC_SETUP_QDISC_CBS: 607 return enetc_setup_tc_cbs(ndev, type_data); 608 case TC_SETUP_QDISC_ETF: 609 return enetc_setup_tc_txtime(ndev, type_data); 610 case TC_SETUP_BLOCK: 611 return enetc_setup_tc_psfp(ndev, type_data); 612 default: 613 return -EOPNOTSUPP; 614 } 615 } 616 617 static const struct net_device_ops enetc_ndev_ops = { 618 .ndo_open = enetc_open, 619 .ndo_stop = enetc_close, 620 .ndo_start_xmit = enetc_xmit, 621 .ndo_get_stats = enetc_get_stats, 622 .ndo_set_mac_address = enetc_pf_set_mac_addr, 623 .ndo_set_rx_mode = enetc_pf_set_rx_mode, 624 .ndo_vlan_rx_add_vid = enetc_vlan_rx_add_vid, 625 .ndo_vlan_rx_kill_vid = enetc_vlan_rx_del_vid, 626 .ndo_set_vf_mac = enetc_pf_set_vf_mac, 627 .ndo_set_vf_vlan = enetc_pf_set_vf_vlan, 628 .ndo_set_vf_spoofchk = enetc_pf_set_vf_spoofchk, 629 .ndo_set_features = enetc_pf_set_features, 630 .ndo_eth_ioctl = enetc_ioctl, 631 .ndo_setup_tc = enetc_pf_setup_tc, 632 .ndo_bpf = enetc_setup_bpf, 633 .ndo_xdp_xmit = enetc_xdp_xmit, 634 .ndo_hwtstamp_get = enetc_hwtstamp_get, 635 .ndo_hwtstamp_set = enetc_hwtstamp_set, 636 }; 637 638 static struct phylink_pcs * 639 enetc_pl_mac_select_pcs(struct phylink_config *config, phy_interface_t iface) 640 { 641 struct enetc_pf *pf = phylink_to_enetc_pf(config); 642 643 return pf->pcs; 644 } 645 646 static void enetc_pl_mac_config(struct phylink_config *config, 647 unsigned int mode, 648 const struct phylink_link_state *state) 649 { 650 struct enetc_pf *pf = phylink_to_enetc_pf(config); 651 652 enetc_mac_config(pf->si, state->interface); 653 } 654 655 static void enetc_force_rgmii_mac(struct enetc_si *si, int speed, int duplex) 656 { 657 u32 old_val, val; 658 659 old_val = val = enetc_port_mac_rd(si, ENETC_PM0_IF_MODE); 660 661 if (speed == SPEED_1000) { 662 val &= ~ENETC_PM0_IFM_SSP_MASK; 663 val |= ENETC_PM0_IFM_SSP_1000; 664 } else if (speed == SPEED_100) { 665 val &= ~ENETC_PM0_IFM_SSP_MASK; 666 val |= ENETC_PM0_IFM_SSP_100; 667 } else if (speed == SPEED_10) { 668 val &= ~ENETC_PM0_IFM_SSP_MASK; 669 val |= ENETC_PM0_IFM_SSP_10; 670 } 671 672 if (duplex == DUPLEX_FULL) 673 val |= ENETC_PM0_IFM_FULL_DPX; 674 else 675 val &= ~ENETC_PM0_IFM_FULL_DPX; 676 677 if (val == old_val) 678 return; 679 680 enetc_port_mac_wr(si, ENETC_PM0_IF_MODE, val); 681 } 682 683 static void enetc_pl_mac_link_up(struct phylink_config *config, 684 struct phy_device *phy, unsigned int mode, 685 phy_interface_t interface, int speed, 686 int duplex, bool tx_pause, bool rx_pause) 687 { 688 struct enetc_pf *pf = phylink_to_enetc_pf(config); 689 u32 pause_off_thresh = 0, pause_on_thresh = 0; 690 u32 init_quanta = 0, refresh_quanta = 0; 691 struct enetc_hw *hw = &pf->si->hw; 692 struct enetc_si *si = pf->si; 693 struct enetc_ndev_priv *priv; 694 u32 rbmr, cmd_cfg; 695 int idx; 696 697 priv = netdev_priv(pf->si->ndev); 698 699 if (pf->si->hw_features & ENETC_SI_F_QBV) 700 enetc_sched_speed_set(priv, speed); 701 702 if (!phylink_autoneg_inband(mode) && 703 phy_interface_mode_is_rgmii(interface)) 704 enetc_force_rgmii_mac(si, speed, duplex); 705 706 /* Flow control */ 707 for (idx = 0; idx < priv->num_rx_rings; idx++) { 708 rbmr = enetc_rxbdr_rd(hw, idx, ENETC_RBMR); 709 710 if (tx_pause) 711 rbmr |= ENETC_RBMR_CM; 712 else 713 rbmr &= ~ENETC_RBMR_CM; 714 715 enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr); 716 } 717 718 if (tx_pause) { 719 /* When the port first enters congestion, send a PAUSE request 720 * with the maximum number of quanta. When the port exits 721 * congestion, it will automatically send a PAUSE frame with 722 * zero quanta. 723 */ 724 init_quanta = 0xffff; 725 726 /* Also, set up the refresh timer to send follow-up PAUSE 727 * frames at half the quanta value, in case the congestion 728 * condition persists. 729 */ 730 refresh_quanta = 0xffff / 2; 731 732 /* Start emitting PAUSE frames when 3 large frames (or more 733 * smaller frames) have accumulated in the FIFO waiting to be 734 * DMAed to the RX ring. 735 */ 736 pause_on_thresh = 3 * ENETC_MAC_MAXFRM_SIZE; 737 pause_off_thresh = 1 * ENETC_MAC_MAXFRM_SIZE; 738 } 739 740 enetc_port_mac_wr(si, ENETC_PM0_PAUSE_QUANTA, init_quanta); 741 enetc_port_mac_wr(si, ENETC_PM0_PAUSE_THRESH, refresh_quanta); 742 enetc_port_wr(hw, ENETC_PPAUONTR, pause_on_thresh); 743 enetc_port_wr(hw, ENETC_PPAUOFFTR, pause_off_thresh); 744 745 cmd_cfg = enetc_port_mac_rd(si, ENETC_PM0_CMD_CFG); 746 747 if (rx_pause) 748 cmd_cfg &= ~ENETC_PM0_PAUSE_IGN; 749 else 750 cmd_cfg |= ENETC_PM0_PAUSE_IGN; 751 752 enetc_port_mac_wr(si, ENETC_PM0_CMD_CFG, cmd_cfg); 753 754 enetc_mac_enable(si, true); 755 756 if (si->hw_features & ENETC_SI_F_QBU) 757 enetc_mm_link_state_update(priv, true); 758 } 759 760 static void enetc_pl_mac_link_down(struct phylink_config *config, 761 unsigned int mode, 762 phy_interface_t interface) 763 { 764 struct enetc_pf *pf = phylink_to_enetc_pf(config); 765 struct enetc_si *si = pf->si; 766 struct enetc_ndev_priv *priv; 767 768 priv = netdev_priv(si->ndev); 769 770 if (si->hw_features & ENETC_SI_F_QBU) 771 enetc_mm_link_state_update(priv, false); 772 773 enetc_mac_enable(si, false); 774 } 775 776 static const struct phylink_mac_ops enetc_mac_phylink_ops = { 777 .mac_select_pcs = enetc_pl_mac_select_pcs, 778 .mac_config = enetc_pl_mac_config, 779 .mac_link_up = enetc_pl_mac_link_up, 780 .mac_link_down = enetc_pl_mac_link_down, 781 }; 782 783 /* Initialize the entire shared memory for the flow steering entries 784 * of this port (PF + VFs) 785 */ 786 static int enetc_init_port_rfs_memory(struct enetc_si *si) 787 { 788 struct enetc_cmd_rfse rfse = {0}; 789 struct enetc_hw *hw = &si->hw; 790 int num_rfs, i, err = 0; 791 u32 val; 792 793 val = enetc_port_rd(hw, ENETC_PRFSCAPR); 794 num_rfs = ENETC_PRFSCAPR_GET_NUM_RFS(val); 795 796 for (i = 0; i < num_rfs; i++) { 797 err = enetc_set_fs_entry(si, &rfse, i); 798 if (err) 799 break; 800 } 801 802 return err; 803 } 804 805 static int enetc_init_port_rss_memory(struct enetc_si *si) 806 { 807 struct enetc_hw *hw = &si->hw; 808 int num_rss, err; 809 int *rss_table; 810 u32 val; 811 812 val = enetc_port_rd(hw, ENETC_PRSSCAPR); 813 num_rss = ENETC_PRSSCAPR_GET_NUM_RSS(val); 814 if (!num_rss) 815 return 0; 816 817 rss_table = kcalloc(num_rss, sizeof(*rss_table), GFP_KERNEL); 818 if (!rss_table) 819 return -ENOMEM; 820 821 err = enetc_set_rss_table(si, rss_table, num_rss); 822 823 kfree(rss_table); 824 825 return err; 826 } 827 828 static int enetc_pf_register_with_ierb(struct pci_dev *pdev) 829 { 830 struct platform_device *ierb_pdev; 831 struct device_node *ierb_node; 832 833 ierb_node = of_find_compatible_node(NULL, NULL, 834 "fsl,ls1028a-enetc-ierb"); 835 if (!ierb_node || !of_device_is_available(ierb_node)) 836 return -ENODEV; 837 838 ierb_pdev = of_find_device_by_node(ierb_node); 839 of_node_put(ierb_node); 840 841 if (!ierb_pdev) 842 return -EPROBE_DEFER; 843 844 return enetc_ierb_register_pf(ierb_pdev, pdev); 845 } 846 847 static const struct enetc_si_ops enetc_psi_ops = { 848 .get_rss_table = enetc_get_rss_table, 849 .set_rss_table = enetc_set_rss_table, 850 }; 851 852 static struct enetc_si *enetc_psi_create(struct pci_dev *pdev) 853 { 854 struct enetc_si *si; 855 int err; 856 857 err = enetc_pci_probe(pdev, KBUILD_MODNAME, sizeof(struct enetc_pf)); 858 if (err) { 859 dev_err_probe(&pdev->dev, err, "PCI probing failed\n"); 860 goto out; 861 } 862 863 si = pci_get_drvdata(pdev); 864 if (!si->hw.port || !si->hw.global) { 865 err = -ENODEV; 866 dev_err(&pdev->dev, "could not map PF space, probing a VF?\n"); 867 goto out_pci_remove; 868 } 869 870 si->revision = enetc_get_ip_revision(&si->hw); 871 si->ops = &enetc_psi_ops; 872 err = enetc_get_driver_data(si); 873 if (err) { 874 dev_err(&pdev->dev, "Could not get PF driver data\n"); 875 goto out_pci_remove; 876 } 877 878 err = enetc_setup_cbdr(&pdev->dev, &si->hw, ENETC_CBDR_DEFAULT_SIZE, 879 &si->cbd_ring); 880 if (err) 881 goto out_pci_remove; 882 883 err = enetc_init_port_rfs_memory(si); 884 if (err) { 885 dev_err(&pdev->dev, "Failed to initialize RFS memory\n"); 886 goto out_teardown_cbdr; 887 } 888 889 err = enetc_init_port_rss_memory(si); 890 if (err) { 891 dev_err(&pdev->dev, "Failed to initialize RSS memory\n"); 892 goto out_teardown_cbdr; 893 } 894 895 return si; 896 897 out_teardown_cbdr: 898 enetc_teardown_cbdr(&si->cbd_ring); 899 out_pci_remove: 900 enetc_pci_remove(pdev); 901 out: 902 return ERR_PTR(err); 903 } 904 905 static void enetc_psi_destroy(struct pci_dev *pdev) 906 { 907 struct enetc_si *si = pci_get_drvdata(pdev); 908 909 enetc_teardown_cbdr(&si->cbd_ring); 910 enetc_pci_remove(pdev); 911 } 912 913 static const struct enetc_pf_ops enetc_pf_ops = { 914 .set_si_primary_mac = enetc_pf_set_primary_mac_addr, 915 .get_si_primary_mac = enetc_pf_get_primary_mac_addr, 916 .create_pcs = enetc_pf_create_pcs, 917 .destroy_pcs = enetc_pf_destroy_pcs, 918 .enable_psfp = enetc_psfp_enable, 919 }; 920 921 static int enetc_pf_probe(struct pci_dev *pdev, 922 const struct pci_device_id *ent) 923 { 924 struct device_node *node = pdev->dev.of_node; 925 struct enetc_ndev_priv *priv; 926 struct net_device *ndev; 927 struct enetc_si *si; 928 struct enetc_pf *pf; 929 int err; 930 931 err = enetc_pf_register_with_ierb(pdev); 932 if (err == -EPROBE_DEFER) 933 return err; 934 if (err) 935 dev_warn(&pdev->dev, 936 "Could not register with IERB driver: %pe, please update the device tree\n", 937 ERR_PTR(err)); 938 939 si = enetc_psi_create(pdev); 940 if (IS_ERR(si)) { 941 err = PTR_ERR(si); 942 goto err_psi_create; 943 } 944 945 pf = enetc_si_priv(si); 946 pf->si = si; 947 pf->ops = &enetc_pf_ops; 948 949 pf->total_vfs = pci_sriov_get_totalvfs(pdev); 950 if (pf->total_vfs) { 951 pf->vf_state = kcalloc(pf->total_vfs, sizeof(struct enetc_vf_state), 952 GFP_KERNEL); 953 if (!pf->vf_state) 954 goto err_alloc_vf_state; 955 } 956 957 err = enetc_setup_mac_addresses(node, pf); 958 if (err) 959 goto err_setup_mac_addresses; 960 961 enetc_configure_port(pf); 962 963 enetc_get_si_caps(si); 964 965 ndev = alloc_etherdev_mq(sizeof(*priv), ENETC_MAX_NUM_TXQS); 966 if (!ndev) { 967 err = -ENOMEM; 968 dev_err(&pdev->dev, "netdev creation failed\n"); 969 goto err_alloc_netdev; 970 } 971 972 enetc_pf_netdev_setup(si, ndev, &enetc_ndev_ops); 973 974 priv = netdev_priv(ndev); 975 976 mutex_init(&priv->mm_lock); 977 978 enetc_init_si_rings_params(priv); 979 980 err = enetc_alloc_si_resources(priv); 981 if (err) { 982 dev_err(&pdev->dev, "SI resource alloc failed\n"); 983 goto err_alloc_si_res; 984 } 985 986 err = enetc_configure_si(priv); 987 if (err) { 988 dev_err(&pdev->dev, "Failed to configure SI\n"); 989 goto err_config_si; 990 } 991 992 err = enetc_alloc_msix(priv); 993 if (err) { 994 dev_err(&pdev->dev, "MSIX alloc failed\n"); 995 goto err_alloc_msix; 996 } 997 998 err = of_get_phy_mode(node, &pf->if_mode); 999 if (err) { 1000 dev_err(&pdev->dev, "Failed to read PHY mode\n"); 1001 goto err_phy_mode; 1002 } 1003 1004 err = enetc_mdiobus_create(pf, node); 1005 if (err) 1006 goto err_mdiobus_create; 1007 1008 err = enetc_phylink_create(priv, node, &enetc_mac_phylink_ops); 1009 if (err) 1010 goto err_phylink_create; 1011 1012 err = register_netdev(ndev); 1013 if (err) 1014 goto err_reg_netdev; 1015 1016 return 0; 1017 1018 err_reg_netdev: 1019 enetc_phylink_destroy(priv); 1020 err_phylink_create: 1021 enetc_mdiobus_destroy(pf); 1022 err_mdiobus_create: 1023 err_phy_mode: 1024 enetc_free_msix(priv); 1025 err_config_si: 1026 err_alloc_msix: 1027 enetc_free_si_resources(priv); 1028 err_alloc_si_res: 1029 si->ndev = NULL; 1030 free_netdev(ndev); 1031 err_alloc_netdev: 1032 err_setup_mac_addresses: 1033 kfree(pf->vf_state); 1034 err_alloc_vf_state: 1035 enetc_psi_destroy(pdev); 1036 err_psi_create: 1037 return err; 1038 } 1039 1040 static void enetc_pf_remove(struct pci_dev *pdev) 1041 { 1042 struct enetc_si *si = pci_get_drvdata(pdev); 1043 struct enetc_pf *pf = enetc_si_priv(si); 1044 struct enetc_ndev_priv *priv; 1045 1046 priv = netdev_priv(si->ndev); 1047 1048 if (pf->num_vfs) 1049 enetc_sriov_configure(pdev, 0); 1050 1051 unregister_netdev(si->ndev); 1052 1053 enetc_phylink_destroy(priv); 1054 enetc_mdiobus_destroy(pf); 1055 1056 enetc_free_msix(priv); 1057 1058 enetc_free_si_resources(priv); 1059 1060 free_netdev(si->ndev); 1061 kfree(pf->vf_state); 1062 1063 enetc_psi_destroy(pdev); 1064 } 1065 1066 static void enetc_fixup_clear_rss_rfs(struct pci_dev *pdev) 1067 { 1068 struct device_node *node = pdev->dev.of_node; 1069 struct enetc_si *si; 1070 1071 /* Only apply quirk for disabled functions. For the ones 1072 * that are enabled, enetc_pf_probe() will apply it. 1073 */ 1074 if (node && of_device_is_available(node)) 1075 return; 1076 1077 si = enetc_psi_create(pdev); 1078 if (!IS_ERR(si)) 1079 enetc_psi_destroy(pdev); 1080 } 1081 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_PF, 1082 enetc_fixup_clear_rss_rfs); 1083 1084 static const struct pci_device_id enetc_pf_id_table[] = { 1085 { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_PF) }, 1086 { 0, } /* End of table. */ 1087 }; 1088 MODULE_DEVICE_TABLE(pci, enetc_pf_id_table); 1089 1090 static struct pci_driver enetc_pf_driver = { 1091 .name = KBUILD_MODNAME, 1092 .id_table = enetc_pf_id_table, 1093 .probe = enetc_pf_probe, 1094 .remove = enetc_pf_remove, 1095 #ifdef CONFIG_PCI_IOV 1096 .sriov_configure = enetc_sriov_configure, 1097 #endif 1098 }; 1099 module_pci_driver(enetc_pf_driver); 1100 1101 MODULE_DESCRIPTION(ENETC_DRV_NAME_STR); 1102 MODULE_LICENSE("Dual BSD/GPL"); 1103