1 // SPDX-License-Identifier: GPL-2.0 2 3 /* Texas Instruments ICSSG Ethernet Driver 4 * 5 * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/ 6 * 7 */ 8 9 #include <linux/bitops.h> 10 #include <linux/clk.h> 11 #include <linux/delay.h> 12 #include <linux/dma-mapping.h> 13 #include <linux/dma/ti-cppi5.h> 14 #include <linux/etherdevice.h> 15 #include <linux/genalloc.h> 16 #include <linux/if_hsr.h> 17 #include <linux/if_vlan.h> 18 #include <linux/interrupt.h> 19 #include <linux/io-64-nonatomic-hi-lo.h> 20 #include <linux/kernel.h> 21 #include <linux/mfd/syscon.h> 22 #include <linux/module.h> 23 #include <linux/of.h> 24 #include <linux/of_mdio.h> 25 #include <linux/of_net.h> 26 #include <linux/platform_device.h> 27 #include <linux/phy.h> 28 #include <linux/property.h> 29 #include <linux/remoteproc/pruss.h> 30 #include <linux/regmap.h> 31 #include <linux/remoteproc.h> 32 #include <net/switchdev.h> 33 34 #include "icssg_prueth.h" 35 #include "icssg_mii_rt.h" 36 #include "icssg_switchdev.h" 37 #include "../k3-cppi-desc-pool.h" 38 39 #define PRUETH_MODULE_DESCRIPTION "PRUSS ICSSG Ethernet driver" 40 41 #define DEFAULT_VID 1 42 #define DEFAULT_PORT_MASK 1 43 #define DEFAULT_UNTAG_MASK 1 44 45 #define NETIF_PRUETH_HSR_OFFLOAD_FEATURES (NETIF_F_HW_HSR_FWD | \ 46 NETIF_F_HW_HSR_DUP | \ 47 NETIF_F_HW_HSR_TAG_INS | \ 48 NETIF_F_HW_HSR_TAG_RM) 49 50 /* CTRLMMR_ICSSG_RGMII_CTRL register bits */ 51 #define ICSSG_CTRL_RGMII_ID_MODE BIT(24) 52 53 static int emac_get_tx_ts(struct prueth_emac *emac, 54 struct emac_tx_ts_response *rsp) 55 { 56 struct prueth *prueth = emac->prueth; 57 int slice = prueth_emac_slice(emac); 58 int addr; 59 60 addr = icssg_queue_pop(prueth, slice == 0 ? 61 ICSSG_TS_POP_SLICE0 : ICSSG_TS_POP_SLICE1); 62 if (addr < 0) 63 return addr; 64 65 memcpy_fromio(rsp, prueth->shram.va + addr, sizeof(*rsp)); 66 /* return buffer back for to pool */ 67 icssg_queue_push(prueth, slice == 0 ? 68 ICSSG_TS_PUSH_SLICE0 : ICSSG_TS_PUSH_SLICE1, addr); 69 70 return 0; 71 } 72 73 static void tx_ts_work(struct prueth_emac *emac) 74 { 75 struct skb_shared_hwtstamps ssh; 76 struct emac_tx_ts_response tsr; 77 struct sk_buff *skb; 78 int ret = 0; 79 u32 hi_sw; 80 u64 ns; 81 82 /* There may be more than one pending requests */ 83 while (1) { 84 ret = emac_get_tx_ts(emac, &tsr); 85 if (ret) /* nothing more */ 86 break; 87 88 if (tsr.cookie >= PRUETH_MAX_TX_TS_REQUESTS || 89 !emac->tx_ts_skb[tsr.cookie]) { 90 netdev_err(emac->ndev, "Invalid TX TS cookie 0x%x\n", 91 tsr.cookie); 92 break; 93 } 94 95 skb = emac->tx_ts_skb[tsr.cookie]; 96 emac->tx_ts_skb[tsr.cookie] = NULL; /* free slot */ 97 if (!skb) { 98 netdev_err(emac->ndev, "Driver Bug! got NULL skb\n"); 99 break; 100 } 101 102 hi_sw = readl(emac->prueth->shram.va + 103 TIMESYNC_FW_WC_COUNT_HI_SW_OFFSET_OFFSET); 104 ns = icssg_ts_to_ns(hi_sw, tsr.hi_ts, tsr.lo_ts, 105 IEP_DEFAULT_CYCLE_TIME_NS); 106 107 memset(&ssh, 0, sizeof(ssh)); 108 ssh.hwtstamp = ns_to_ktime(ns); 109 110 skb_tstamp_tx(skb, &ssh); 111 dev_consume_skb_any(skb); 112 113 if (atomic_dec_and_test(&emac->tx_ts_pending)) /* no more? */ 114 break; 115 } 116 } 117 118 static irqreturn_t prueth_tx_ts_irq(int irq, void *dev_id) 119 { 120 struct prueth_emac *emac = dev_id; 121 122 /* currently only TX timestamp is being returned */ 123 tx_ts_work(emac); 124 125 return IRQ_HANDLED; 126 } 127 128 static struct icssg_firmwares icssg_hsr_firmwares[] = { 129 { 130 .pru = "ti-pruss/am65x-sr2-pru0-pruhsr-fw.elf", 131 .rtu = "ti-pruss/am65x-sr2-rtu0-pruhsr-fw.elf", 132 .txpru = "ti-pruss/am65x-sr2-txpru0-pruhsr-fw.elf", 133 }, 134 { 135 .pru = "ti-pruss/am65x-sr2-pru1-pruhsr-fw.elf", 136 .rtu = "ti-pruss/am65x-sr2-rtu1-pruhsr-fw.elf", 137 .txpru = "ti-pruss/am65x-sr2-txpru1-pruhsr-fw.elf", 138 } 139 }; 140 141 static struct icssg_firmwares icssg_switch_firmwares[] = { 142 { 143 .pru = "ti-pruss/am65x-sr2-pru0-prusw-fw.elf", 144 .rtu = "ti-pruss/am65x-sr2-rtu0-prusw-fw.elf", 145 .txpru = "ti-pruss/am65x-sr2-txpru0-prusw-fw.elf", 146 }, 147 { 148 .pru = "ti-pruss/am65x-sr2-pru1-prusw-fw.elf", 149 .rtu = "ti-pruss/am65x-sr2-rtu1-prusw-fw.elf", 150 .txpru = "ti-pruss/am65x-sr2-txpru1-prusw-fw.elf", 151 } 152 }; 153 154 static struct icssg_firmwares icssg_emac_firmwares[] = { 155 { 156 .pru = "ti-pruss/am65x-sr2-pru0-prueth-fw.elf", 157 .rtu = "ti-pruss/am65x-sr2-rtu0-prueth-fw.elf", 158 .txpru = "ti-pruss/am65x-sr2-txpru0-prueth-fw.elf", 159 }, 160 { 161 .pru = "ti-pruss/am65x-sr2-pru1-prueth-fw.elf", 162 .rtu = "ti-pruss/am65x-sr2-rtu1-prueth-fw.elf", 163 .txpru = "ti-pruss/am65x-sr2-txpru1-prueth-fw.elf", 164 } 165 }; 166 167 static int prueth_start(struct rproc *rproc, const char *fw_name) 168 { 169 int ret; 170 171 ret = rproc_set_firmware(rproc, fw_name); 172 if (ret) 173 return ret; 174 return rproc_boot(rproc); 175 } 176 177 static void prueth_shutdown(struct rproc *rproc) 178 { 179 rproc_shutdown(rproc); 180 } 181 182 static int prueth_emac_start(struct prueth *prueth) 183 { 184 struct icssg_firmwares *firmwares; 185 struct device *dev = prueth->dev; 186 int ret, slice; 187 188 if (prueth->is_switch_mode) 189 firmwares = icssg_switch_firmwares; 190 else if (prueth->is_hsr_offload_mode) 191 firmwares = icssg_hsr_firmwares; 192 else 193 firmwares = icssg_emac_firmwares; 194 195 for (slice = 0; slice < PRUETH_NUM_MACS; slice++) { 196 ret = prueth_start(prueth->pru[slice], firmwares[slice].pru); 197 if (ret) { 198 dev_err(dev, "failed to boot PRU%d: %d\n", slice, ret); 199 goto unwind_slices; 200 } 201 202 ret = prueth_start(prueth->rtu[slice], firmwares[slice].rtu); 203 if (ret) { 204 dev_err(dev, "failed to boot RTU%d: %d\n", slice, ret); 205 rproc_shutdown(prueth->pru[slice]); 206 goto unwind_slices; 207 } 208 209 ret = prueth_start(prueth->txpru[slice], firmwares[slice].txpru); 210 if (ret) { 211 dev_err(dev, "failed to boot TX_PRU%d: %d\n", slice, ret); 212 rproc_shutdown(prueth->rtu[slice]); 213 rproc_shutdown(prueth->pru[slice]); 214 goto unwind_slices; 215 } 216 } 217 218 return 0; 219 220 unwind_slices: 221 while (--slice >= 0) { 222 prueth_shutdown(prueth->txpru[slice]); 223 prueth_shutdown(prueth->rtu[slice]); 224 prueth_shutdown(prueth->pru[slice]); 225 } 226 227 return ret; 228 } 229 230 static void prueth_emac_stop(struct prueth *prueth) 231 { 232 int slice; 233 234 for (slice = 0; slice < PRUETH_NUM_MACS; slice++) { 235 prueth_shutdown(prueth->txpru[slice]); 236 prueth_shutdown(prueth->rtu[slice]); 237 prueth_shutdown(prueth->pru[slice]); 238 } 239 } 240 241 static int prueth_emac_common_start(struct prueth *prueth) 242 { 243 struct prueth_emac *emac; 244 int ret = 0; 245 int slice; 246 247 if (!prueth->emac[ICSS_SLICE0] && !prueth->emac[ICSS_SLICE1]) 248 return -EINVAL; 249 250 /* clear SMEM and MSMC settings for all slices */ 251 memset_io(prueth->msmcram.va, 0, prueth->msmcram.size); 252 memset_io(prueth->shram.va, 0, ICSSG_CONFIG_OFFSET_SLICE1 * PRUETH_NUM_MACS); 253 254 icssg_class_default(prueth->miig_rt, ICSS_SLICE0, 0, false); 255 icssg_class_default(prueth->miig_rt, ICSS_SLICE1, 0, false); 256 257 if (prueth->is_switch_mode || prueth->is_hsr_offload_mode) 258 icssg_init_fw_offload_mode(prueth); 259 else 260 icssg_init_emac_mode(prueth); 261 262 for (slice = 0; slice < PRUETH_NUM_MACS; slice++) { 263 emac = prueth->emac[slice]; 264 if (!emac) 265 continue; 266 ret = icssg_config(prueth, emac, slice); 267 if (ret) 268 goto disable_class; 269 } 270 271 ret = prueth_emac_start(prueth); 272 if (ret) 273 goto disable_class; 274 275 emac = prueth->emac[ICSS_SLICE0] ? prueth->emac[ICSS_SLICE0] : 276 prueth->emac[ICSS_SLICE1]; 277 ret = icss_iep_init(emac->iep, &prueth_iep_clockops, 278 emac, IEP_DEFAULT_CYCLE_TIME_NS); 279 if (ret) { 280 dev_err(prueth->dev, "Failed to initialize IEP module\n"); 281 goto stop_pruss; 282 } 283 284 return 0; 285 286 stop_pruss: 287 prueth_emac_stop(prueth); 288 289 disable_class: 290 icssg_class_disable(prueth->miig_rt, ICSS_SLICE0); 291 icssg_class_disable(prueth->miig_rt, ICSS_SLICE1); 292 293 return ret; 294 } 295 296 static int prueth_emac_common_stop(struct prueth *prueth) 297 { 298 struct prueth_emac *emac; 299 300 if (!prueth->emac[ICSS_SLICE0] && !prueth->emac[ICSS_SLICE1]) 301 return -EINVAL; 302 303 icssg_class_disable(prueth->miig_rt, ICSS_SLICE0); 304 icssg_class_disable(prueth->miig_rt, ICSS_SLICE1); 305 306 prueth_emac_stop(prueth); 307 308 emac = prueth->emac[ICSS_SLICE0] ? prueth->emac[ICSS_SLICE0] : 309 prueth->emac[ICSS_SLICE1]; 310 icss_iep_exit(emac->iep); 311 312 return 0; 313 } 314 315 /* called back by PHY layer if there is change in link state of hw port*/ 316 static void emac_adjust_link(struct net_device *ndev) 317 { 318 struct prueth_emac *emac = netdev_priv(ndev); 319 struct phy_device *phydev = ndev->phydev; 320 struct prueth *prueth = emac->prueth; 321 bool new_state = false; 322 unsigned long flags; 323 324 if (phydev->link) { 325 /* check the mode of operation - full/half duplex */ 326 if (phydev->duplex != emac->duplex) { 327 new_state = true; 328 emac->duplex = phydev->duplex; 329 } 330 if (phydev->speed != emac->speed) { 331 new_state = true; 332 emac->speed = phydev->speed; 333 } 334 if (!emac->link) { 335 new_state = true; 336 emac->link = 1; 337 } 338 } else if (emac->link) { 339 new_state = true; 340 emac->link = 0; 341 342 /* f/w should support 100 & 1000 */ 343 emac->speed = SPEED_1000; 344 345 /* half duplex may not be supported by f/w */ 346 emac->duplex = DUPLEX_FULL; 347 } 348 349 if (new_state) { 350 phy_print_status(phydev); 351 352 /* update RGMII and MII configuration based on PHY negotiated 353 * values 354 */ 355 if (emac->link) { 356 if (emac->duplex == DUPLEX_HALF) 357 icssg_config_half_duplex(emac); 358 /* Set the RGMII cfg for gig en and full duplex */ 359 icssg_update_rgmii_cfg(prueth->miig_rt, emac); 360 361 /* update the Tx IPG based on 100M/1G speed */ 362 spin_lock_irqsave(&emac->lock, flags); 363 icssg_config_ipg(emac); 364 spin_unlock_irqrestore(&emac->lock, flags); 365 icssg_config_set_speed(emac); 366 icssg_set_port_state(emac, ICSSG_EMAC_PORT_FORWARD); 367 368 } else { 369 icssg_set_port_state(emac, ICSSG_EMAC_PORT_DISABLE); 370 } 371 } 372 373 if (emac->link) { 374 /* reactivate the transmit queue */ 375 netif_tx_wake_all_queues(ndev); 376 } else { 377 netif_tx_stop_all_queues(ndev); 378 prueth_cleanup_tx_ts(emac); 379 } 380 } 381 382 static enum hrtimer_restart emac_rx_timer_callback(struct hrtimer *timer) 383 { 384 struct prueth_emac *emac = 385 container_of(timer, struct prueth_emac, rx_hrtimer); 386 int rx_flow = PRUETH_RX_FLOW_DATA; 387 388 enable_irq(emac->rx_chns.irq[rx_flow]); 389 return HRTIMER_NORESTART; 390 } 391 392 static int emac_phy_connect(struct prueth_emac *emac) 393 { 394 struct prueth *prueth = emac->prueth; 395 struct net_device *ndev = emac->ndev; 396 /* connect PHY */ 397 ndev->phydev = of_phy_connect(emac->ndev, emac->phy_node, 398 &emac_adjust_link, 0, 399 emac->phy_if); 400 if (!ndev->phydev) { 401 dev_err(prueth->dev, "couldn't connect to phy %s\n", 402 emac->phy_node->full_name); 403 return -ENODEV; 404 } 405 406 if (!emac->half_duplex) { 407 dev_dbg(prueth->dev, "half duplex mode is not supported\n"); 408 phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT); 409 phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT); 410 } 411 412 /* remove unsupported modes */ 413 phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); 414 phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_Pause_BIT); 415 phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT); 416 417 if (emac->phy_if == PHY_INTERFACE_MODE_MII) 418 phy_set_max_speed(ndev->phydev, SPEED_100); 419 420 return 0; 421 } 422 423 static u64 prueth_iep_gettime(void *clockops_data, struct ptp_system_timestamp *sts) 424 { 425 u32 hi_rollover_count, hi_rollover_count_r; 426 struct prueth_emac *emac = clockops_data; 427 struct prueth *prueth = emac->prueth; 428 void __iomem *fw_hi_r_count_addr; 429 void __iomem *fw_count_hi_addr; 430 u32 iepcount_hi, iepcount_hi_r; 431 unsigned long flags; 432 u32 iepcount_lo; 433 u64 ts = 0; 434 435 fw_count_hi_addr = prueth->shram.va + TIMESYNC_FW_WC_COUNT_HI_SW_OFFSET_OFFSET; 436 fw_hi_r_count_addr = prueth->shram.va + TIMESYNC_FW_WC_HI_ROLLOVER_COUNT_OFFSET; 437 438 local_irq_save(flags); 439 do { 440 iepcount_hi = icss_iep_get_count_hi(emac->iep); 441 iepcount_hi += readl(fw_count_hi_addr); 442 hi_rollover_count = readl(fw_hi_r_count_addr); 443 ptp_read_system_prets(sts); 444 iepcount_lo = icss_iep_get_count_low(emac->iep); 445 ptp_read_system_postts(sts); 446 447 iepcount_hi_r = icss_iep_get_count_hi(emac->iep); 448 iepcount_hi_r += readl(fw_count_hi_addr); 449 hi_rollover_count_r = readl(fw_hi_r_count_addr); 450 } while ((iepcount_hi_r != iepcount_hi) || 451 (hi_rollover_count != hi_rollover_count_r)); 452 local_irq_restore(flags); 453 454 ts = ((u64)hi_rollover_count) << 23 | iepcount_hi; 455 ts = ts * (u64)IEP_DEFAULT_CYCLE_TIME_NS + iepcount_lo; 456 457 return ts; 458 } 459 460 static void prueth_iep_settime(void *clockops_data, u64 ns) 461 { 462 struct icssg_setclock_desc __iomem *sc_descp; 463 struct prueth_emac *emac = clockops_data; 464 struct icssg_setclock_desc sc_desc; 465 u64 cyclecount; 466 u32 cycletime; 467 int timeout; 468 469 sc_descp = emac->prueth->shram.va + TIMESYNC_FW_WC_SETCLOCK_DESC_OFFSET; 470 471 cycletime = IEP_DEFAULT_CYCLE_TIME_NS; 472 cyclecount = ns / cycletime; 473 474 memset(&sc_desc, 0, sizeof(sc_desc)); 475 sc_desc.margin = cycletime - 1000; 476 sc_desc.cyclecounter0_set = cyclecount & GENMASK(31, 0); 477 sc_desc.cyclecounter1_set = (cyclecount & GENMASK(63, 32)) >> 32; 478 sc_desc.iepcount_set = ns % cycletime; 479 /* Count from 0 to (cycle time) - emac->iep->def_inc */ 480 sc_desc.CMP0_current = cycletime - emac->iep->def_inc; 481 482 memcpy_toio(sc_descp, &sc_desc, sizeof(sc_desc)); 483 484 writeb(1, &sc_descp->request); 485 486 timeout = 5; /* fw should take 2-3 ms */ 487 while (timeout--) { 488 if (readb(&sc_descp->acknowledgment)) 489 return; 490 491 usleep_range(500, 1000); 492 } 493 494 dev_err(emac->prueth->dev, "settime timeout\n"); 495 } 496 497 static int prueth_perout_enable(void *clockops_data, 498 struct ptp_perout_request *req, int on, 499 u64 *cmp) 500 { 501 struct prueth_emac *emac = clockops_data; 502 u32 reduction_factor = 0, offset = 0; 503 struct timespec64 ts; 504 u64 current_cycle; 505 u64 start_offset; 506 u64 ns_period; 507 508 if (!on) 509 return 0; 510 511 /* Any firmware specific stuff for PPS/PEROUT handling */ 512 ts.tv_sec = req->period.sec; 513 ts.tv_nsec = req->period.nsec; 514 ns_period = timespec64_to_ns(&ts); 515 516 /* f/w doesn't support period less than cycle time */ 517 if (ns_period < IEP_DEFAULT_CYCLE_TIME_NS) 518 return -ENXIO; 519 520 reduction_factor = ns_period / IEP_DEFAULT_CYCLE_TIME_NS; 521 offset = ns_period % IEP_DEFAULT_CYCLE_TIME_NS; 522 523 /* f/w requires at least 1uS within a cycle so CMP 524 * can trigger after SYNC is enabled 525 */ 526 if (offset < 5 * NSEC_PER_USEC) 527 offset = 5 * NSEC_PER_USEC; 528 529 /* if offset is close to cycle time then we will miss 530 * the CMP event for last tick when IEP rolls over. 531 * In normal mode, IEP tick is 4ns. 532 * In slow compensation it could be 0ns or 8ns at 533 * every slow compensation cycle. 534 */ 535 if (offset > IEP_DEFAULT_CYCLE_TIME_NS - 8) 536 offset = IEP_DEFAULT_CYCLE_TIME_NS - 8; 537 538 /* we're in shadow mode so need to set upper 32-bits */ 539 *cmp = (u64)offset << 32; 540 541 writel(reduction_factor, emac->prueth->shram.va + 542 TIMESYNC_FW_WC_SYNCOUT_REDUCTION_FACTOR_OFFSET); 543 544 current_cycle = icssg_read_time(emac->prueth->shram.va + 545 TIMESYNC_FW_WC_CYCLECOUNT_OFFSET); 546 547 /* Rounding of current_cycle count to next second */ 548 start_offset = roundup(current_cycle, MSEC_PER_SEC); 549 550 hi_lo_writeq(start_offset, emac->prueth->shram.va + 551 TIMESYNC_FW_WC_SYNCOUT_START_TIME_CYCLECOUNT_OFFSET); 552 553 return 0; 554 } 555 556 const struct icss_iep_clockops prueth_iep_clockops = { 557 .settime = prueth_iep_settime, 558 .gettime = prueth_iep_gettime, 559 .perout_enable = prueth_perout_enable, 560 }; 561 562 static int prueth_create_xdp_rxqs(struct prueth_emac *emac) 563 { 564 struct xdp_rxq_info *rxq = &emac->rx_chns.xdp_rxq; 565 struct page_pool *pool = emac->rx_chns.pg_pool; 566 int ret; 567 568 ret = xdp_rxq_info_reg(rxq, emac->ndev, 0, emac->napi_rx.napi_id); 569 if (ret) 570 return ret; 571 572 ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool); 573 if (ret) 574 xdp_rxq_info_unreg(rxq); 575 576 return ret; 577 } 578 579 static void prueth_destroy_xdp_rxqs(struct prueth_emac *emac) 580 { 581 struct xdp_rxq_info *rxq = &emac->rx_chns.xdp_rxq; 582 583 if (!xdp_rxq_info_is_reg(rxq)) 584 return; 585 586 xdp_rxq_info_unreg(rxq); 587 } 588 589 static int icssg_prueth_add_mcast(struct net_device *ndev, const u8 *addr) 590 { 591 struct net_device *real_dev; 592 struct prueth_emac *emac; 593 int port_mask; 594 u8 vlan_id; 595 596 vlan_id = is_vlan_dev(ndev) ? vlan_dev_vlan_id(ndev) : PRUETH_DFLT_VLAN_MAC; 597 real_dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev; 598 emac = netdev_priv(real_dev); 599 600 port_mask = BIT(emac->port_id) | icssg_fdb_lookup(emac, addr, vlan_id); 601 icssg_fdb_add_del(emac, addr, vlan_id, port_mask, true); 602 icssg_vtbl_modify(emac, vlan_id, port_mask, port_mask, true); 603 604 return 0; 605 } 606 607 static int icssg_prueth_del_mcast(struct net_device *ndev, const u8 *addr) 608 { 609 struct net_device *real_dev; 610 struct prueth_emac *emac; 611 int other_port_mask; 612 int port_mask; 613 u8 vlan_id; 614 615 vlan_id = is_vlan_dev(ndev) ? vlan_dev_vlan_id(ndev) : PRUETH_DFLT_VLAN_MAC; 616 real_dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev; 617 emac = netdev_priv(real_dev); 618 619 port_mask = BIT(emac->port_id); 620 other_port_mask = port_mask ^ icssg_fdb_lookup(emac, addr, vlan_id); 621 622 icssg_fdb_add_del(emac, addr, vlan_id, port_mask, false); 623 icssg_vtbl_modify(emac, vlan_id, port_mask, port_mask, false); 624 625 if (other_port_mask) { 626 icssg_fdb_add_del(emac, addr, vlan_id, other_port_mask, true); 627 icssg_vtbl_modify(emac, vlan_id, other_port_mask, 628 other_port_mask, true); 629 } 630 631 return 0; 632 } 633 634 static void icssg_prueth_hsr_fdb_add_del(struct prueth_emac *emac, 635 const u8 *addr, u8 vid, bool add) 636 { 637 icssg_fdb_add_del(emac, addr, vid, 638 ICSSG_FDB_ENTRY_P0_MEMBERSHIP | 639 ICSSG_FDB_ENTRY_P1_MEMBERSHIP | 640 ICSSG_FDB_ENTRY_P2_MEMBERSHIP | 641 ICSSG_FDB_ENTRY_BLOCK, add); 642 643 if (add) 644 icssg_vtbl_modify(emac, vid, BIT(emac->port_id), 645 BIT(emac->port_id), add); 646 } 647 648 static int icssg_prueth_hsr_add_mcast(struct net_device *ndev, const u8 *addr) 649 { 650 struct net_device *real_dev; 651 struct prueth_emac *emac; 652 u8 vlan_id, i; 653 654 vlan_id = is_vlan_dev(ndev) ? vlan_dev_vlan_id(ndev) : PRUETH_DFLT_VLAN_HSR; 655 real_dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev; 656 657 if (is_hsr_master(real_dev)) { 658 for (i = HSR_PT_SLAVE_A; i < HSR_PT_INTERLINK; i++) { 659 emac = netdev_priv(hsr_get_port_ndev(real_dev, i)); 660 if (!emac) 661 return -EINVAL; 662 icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id, 663 true); 664 } 665 } else { 666 emac = netdev_priv(real_dev); 667 icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id, true); 668 } 669 670 return 0; 671 } 672 673 static int icssg_prueth_hsr_del_mcast(struct net_device *ndev, const u8 *addr) 674 { 675 struct net_device *real_dev; 676 struct prueth_emac *emac; 677 u8 vlan_id, i; 678 679 vlan_id = is_vlan_dev(ndev) ? vlan_dev_vlan_id(ndev) : PRUETH_DFLT_VLAN_HSR; 680 real_dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev; 681 682 if (is_hsr_master(real_dev)) { 683 for (i = HSR_PT_SLAVE_A; i < HSR_PT_INTERLINK; i++) { 684 emac = netdev_priv(hsr_get_port_ndev(real_dev, i)); 685 if (!emac) 686 return -EINVAL; 687 icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id, 688 false); 689 } 690 } else { 691 emac = netdev_priv(real_dev); 692 icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id, false); 693 } 694 695 return 0; 696 } 697 698 static int icssg_update_vlan_mcast(struct net_device *vdev, int vid, 699 void *args) 700 { 701 struct prueth_emac *emac = args; 702 703 if (!vdev || !vid) 704 return 0; 705 706 netif_addr_lock_bh(vdev); 707 __hw_addr_sync_multiple(&emac->vlan_mcast_list[vid], &vdev->mc, 708 vdev->addr_len); 709 netif_addr_unlock_bh(vdev); 710 711 if (emac->prueth->is_hsr_offload_mode) 712 __hw_addr_sync_dev(&emac->vlan_mcast_list[vid], vdev, 713 icssg_prueth_hsr_add_mcast, 714 icssg_prueth_hsr_del_mcast); 715 else 716 __hw_addr_sync_dev(&emac->vlan_mcast_list[vid], vdev, 717 icssg_prueth_add_mcast, 718 icssg_prueth_del_mcast); 719 720 return 0; 721 } 722 723 /** 724 * emac_ndo_open - EMAC device open 725 * @ndev: network adapter device 726 * 727 * Called when system wants to start the interface. 728 * 729 * Return: 0 for a successful open, or appropriate error code 730 */ 731 static int emac_ndo_open(struct net_device *ndev) 732 { 733 struct prueth_emac *emac = netdev_priv(ndev); 734 int ret, i, num_data_chn = emac->tx_ch_num; 735 struct icssg_flow_cfg __iomem *flow_cfg; 736 struct prueth *prueth = emac->prueth; 737 int slice = prueth_emac_slice(emac); 738 struct device *dev = prueth->dev; 739 int max_rx_flows; 740 int rx_flow; 741 742 /* set h/w MAC as user might have re-configured */ 743 ether_addr_copy(emac->mac_addr, ndev->dev_addr); 744 745 icssg_class_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr); 746 icssg_ft1_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr); 747 748 /* Notify the stack of the actual queue counts. */ 749 ret = netif_set_real_num_tx_queues(ndev, num_data_chn); 750 if (ret) { 751 dev_err(dev, "cannot set real number of tx queues\n"); 752 return ret; 753 } 754 755 init_completion(&emac->cmd_complete); 756 ret = prueth_init_tx_chns(emac); 757 if (ret) { 758 dev_err(dev, "failed to init tx channel: %d\n", ret); 759 return ret; 760 } 761 762 max_rx_flows = PRUETH_MAX_RX_FLOWS; 763 ret = prueth_init_rx_chns(emac, &emac->rx_chns, "rx", 764 max_rx_flows, PRUETH_MAX_RX_DESC); 765 if (ret) { 766 dev_err(dev, "failed to init rx channel: %d\n", ret); 767 goto cleanup_tx; 768 } 769 770 ret = prueth_ndev_add_tx_napi(emac); 771 if (ret) 772 goto cleanup_rx; 773 774 /* we use only the highest priority flow for now i.e. @irq[3] */ 775 rx_flow = PRUETH_RX_FLOW_DATA; 776 ret = request_irq(emac->rx_chns.irq[rx_flow], prueth_rx_irq, 777 IRQF_TRIGGER_HIGH, dev_name(dev), emac); 778 if (ret) { 779 dev_err(dev, "unable to request RX IRQ\n"); 780 goto cleanup_napi; 781 } 782 783 if (!prueth->emacs_initialized) { 784 ret = prueth_emac_common_start(prueth); 785 if (ret) 786 goto free_rx_irq; 787 } 788 789 flow_cfg = emac->dram.va + ICSSG_CONFIG_OFFSET + PSI_L_REGULAR_FLOW_ID_BASE_OFFSET; 790 writew(emac->rx_flow_id_base, &flow_cfg->rx_base_flow); 791 ret = emac_fdb_flow_id_updated(emac); 792 793 if (ret) { 794 netdev_err(ndev, "Failed to update Rx Flow ID %d", ret); 795 goto stop; 796 } 797 798 icssg_mii_update_mtu(prueth->mii_rt, slice, ndev->max_mtu); 799 800 ret = request_threaded_irq(emac->tx_ts_irq, NULL, prueth_tx_ts_irq, 801 IRQF_ONESHOT, dev_name(dev), emac); 802 if (ret) 803 goto stop; 804 805 /* Prepare RX */ 806 ret = prueth_prepare_rx_chan(emac, &emac->rx_chns, PRUETH_MAX_PKT_SIZE); 807 if (ret) 808 goto free_tx_ts_irq; 809 810 ret = prueth_create_xdp_rxqs(emac); 811 if (ret) 812 goto reset_rx_chn; 813 814 ret = k3_udma_glue_enable_rx_chn(emac->rx_chns.rx_chn); 815 if (ret) 816 goto destroy_xdp_rxqs; 817 818 for (i = 0; i < emac->tx_ch_num; i++) { 819 ret = k3_udma_glue_enable_tx_chn(emac->tx_chns[i].tx_chn); 820 if (ret) 821 goto reset_tx_chan; 822 } 823 824 /* Enable NAPI in Tx and Rx direction */ 825 for (i = 0; i < emac->tx_ch_num; i++) 826 napi_enable(&emac->tx_chns[i].napi_tx); 827 napi_enable(&emac->napi_rx); 828 829 /* start PHY */ 830 phy_start(ndev->phydev); 831 832 prueth->emacs_initialized++; 833 834 queue_work(system_long_wq, &emac->stats_work.work); 835 836 return 0; 837 838 reset_tx_chan: 839 /* Since interface is not yet up, there is wouldn't be 840 * any SKB for completion. So set false to free_skb 841 */ 842 prueth_reset_tx_chan(emac, i, false); 843 destroy_xdp_rxqs: 844 prueth_destroy_xdp_rxqs(emac); 845 reset_rx_chn: 846 prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, false); 847 free_tx_ts_irq: 848 free_irq(emac->tx_ts_irq, emac); 849 stop: 850 if (!prueth->emacs_initialized) 851 prueth_emac_common_stop(prueth); 852 free_rx_irq: 853 free_irq(emac->rx_chns.irq[rx_flow], emac); 854 cleanup_napi: 855 prueth_ndev_del_tx_napi(emac, emac->tx_ch_num); 856 cleanup_rx: 857 prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows); 858 cleanup_tx: 859 prueth_cleanup_tx_chns(emac); 860 861 return ret; 862 } 863 864 /** 865 * emac_ndo_stop - EMAC device stop 866 * @ndev: network adapter device 867 * 868 * Called when system wants to stop or down the interface. 869 * 870 * Return: Always 0 (Success) 871 */ 872 static int emac_ndo_stop(struct net_device *ndev) 873 { 874 struct prueth_emac *emac = netdev_priv(ndev); 875 struct prueth *prueth = emac->prueth; 876 int rx_flow = PRUETH_RX_FLOW_DATA; 877 int max_rx_flows; 878 int ret, i; 879 880 /* inform the upper layers. */ 881 netif_tx_stop_all_queues(ndev); 882 883 /* block packets from wire */ 884 if (ndev->phydev) 885 phy_stop(ndev->phydev); 886 887 if (emac->prueth->is_hsr_offload_mode) 888 __dev_mc_unsync(ndev, icssg_prueth_hsr_del_mcast); 889 else 890 __dev_mc_unsync(ndev, icssg_prueth_del_mcast); 891 892 atomic_set(&emac->tdown_cnt, emac->tx_ch_num); 893 /* ensure new tdown_cnt value is visible */ 894 smp_mb__after_atomic(); 895 /* tear down and disable UDMA channels */ 896 reinit_completion(&emac->tdown_complete); 897 for (i = 0; i < emac->tx_ch_num; i++) 898 k3_udma_glue_tdown_tx_chn(emac->tx_chns[i].tx_chn, false); 899 900 ret = wait_for_completion_timeout(&emac->tdown_complete, 901 msecs_to_jiffies(1000)); 902 if (!ret) 903 netdev_err(ndev, "tx teardown timeout\n"); 904 905 prueth_reset_tx_chan(emac, emac->tx_ch_num, true); 906 for (i = 0; i < emac->tx_ch_num; i++) { 907 napi_disable(&emac->tx_chns[i].napi_tx); 908 hrtimer_cancel(&emac->tx_chns[i].tx_hrtimer); 909 } 910 911 max_rx_flows = PRUETH_MAX_RX_FLOWS; 912 k3_udma_glue_tdown_rx_chn(emac->rx_chns.rx_chn, true); 913 914 prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, true); 915 prueth_destroy_xdp_rxqs(emac); 916 napi_disable(&emac->napi_rx); 917 hrtimer_cancel(&emac->rx_hrtimer); 918 919 cancel_work_sync(&emac->rx_mode_work); 920 921 /* Destroying the queued work in ndo_stop() */ 922 cancel_delayed_work_sync(&emac->stats_work); 923 924 /* stop PRUs */ 925 if (prueth->emacs_initialized == 1) 926 prueth_emac_common_stop(prueth); 927 928 free_irq(emac->tx_ts_irq, emac); 929 930 free_irq(emac->rx_chns.irq[rx_flow], emac); 931 prueth_ndev_del_tx_napi(emac, emac->tx_ch_num); 932 933 prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows); 934 prueth_cleanup_tx_chns(emac); 935 936 prueth->emacs_initialized--; 937 938 return 0; 939 } 940 941 static void emac_ndo_set_rx_mode_work(struct work_struct *work) 942 { 943 struct prueth_emac *emac = container_of(work, struct prueth_emac, rx_mode_work); 944 struct net_device *ndev = emac->ndev; 945 bool promisc, allmulti; 946 947 if (!netif_running(ndev)) 948 return; 949 950 promisc = ndev->flags & IFF_PROMISC; 951 allmulti = ndev->flags & IFF_ALLMULTI; 952 icssg_set_port_state(emac, ICSSG_EMAC_PORT_UC_FLOODING_DISABLE); 953 icssg_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_DISABLE); 954 955 if (promisc) { 956 icssg_set_port_state(emac, ICSSG_EMAC_PORT_UC_FLOODING_ENABLE); 957 icssg_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE); 958 return; 959 } 960 961 if (allmulti) { 962 icssg_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE); 963 return; 964 } 965 966 if (emac->prueth->is_hsr_offload_mode) { 967 __dev_mc_sync(ndev, icssg_prueth_hsr_add_mcast, 968 icssg_prueth_hsr_del_mcast); 969 if (rtnl_trylock()) { 970 vlan_for_each(emac->prueth->hsr_dev, 971 icssg_update_vlan_mcast, emac); 972 rtnl_unlock(); 973 } 974 } else { 975 __dev_mc_sync(ndev, icssg_prueth_add_mcast, 976 icssg_prueth_del_mcast); 977 if (rtnl_trylock()) { 978 vlan_for_each(ndev, icssg_update_vlan_mcast, emac); 979 rtnl_unlock(); 980 } 981 } 982 } 983 984 /** 985 * emac_ndo_set_rx_mode - EMAC set receive mode function 986 * @ndev: The EMAC network adapter 987 * 988 * Called when system wants to set the receive mode of the device. 989 * 990 */ 991 static void emac_ndo_set_rx_mode(struct net_device *ndev) 992 { 993 struct prueth_emac *emac = netdev_priv(ndev); 994 995 queue_work(emac->cmd_wq, &emac->rx_mode_work); 996 } 997 998 static netdev_features_t emac_ndo_fix_features(struct net_device *ndev, 999 netdev_features_t features) 1000 { 1001 /* hsr tag insertion offload and hsr dup offload are tightly coupled in 1002 * firmware implementation. Both these features need to be enabled / 1003 * disabled together. 1004 */ 1005 if (!(ndev->features & (NETIF_F_HW_HSR_DUP | NETIF_F_HW_HSR_TAG_INS))) 1006 if ((features & NETIF_F_HW_HSR_DUP) || 1007 (features & NETIF_F_HW_HSR_TAG_INS)) 1008 features |= NETIF_F_HW_HSR_DUP | 1009 NETIF_F_HW_HSR_TAG_INS; 1010 1011 if ((ndev->features & NETIF_F_HW_HSR_DUP) || 1012 (ndev->features & NETIF_F_HW_HSR_TAG_INS)) 1013 if (!(features & NETIF_F_HW_HSR_DUP) || 1014 !(features & NETIF_F_HW_HSR_TAG_INS)) 1015 features &= ~(NETIF_F_HW_HSR_DUP | 1016 NETIF_F_HW_HSR_TAG_INS); 1017 1018 return features; 1019 } 1020 1021 static int emac_ndo_vlan_rx_add_vid(struct net_device *ndev, 1022 __be16 proto, u16 vid) 1023 { 1024 struct prueth_emac *emac = netdev_priv(ndev); 1025 struct prueth *prueth = emac->prueth; 1026 int port_mask = BIT(emac->port_id); 1027 int untag_mask = 0; 1028 1029 if (prueth->is_hsr_offload_mode) 1030 port_mask |= BIT(PRUETH_PORT_HOST); 1031 1032 __hw_addr_init(&emac->vlan_mcast_list[vid]); 1033 netdev_dbg(emac->ndev, "VID add vid:%u port_mask:%X untag_mask %X\n", 1034 vid, port_mask, untag_mask); 1035 1036 icssg_vtbl_modify(emac, vid, port_mask, untag_mask, true); 1037 icssg_set_pvid(emac->prueth, vid, emac->port_id); 1038 1039 return 0; 1040 } 1041 1042 static int emac_ndo_vlan_rx_del_vid(struct net_device *ndev, 1043 __be16 proto, u16 vid) 1044 { 1045 struct prueth_emac *emac = netdev_priv(ndev); 1046 struct prueth *prueth = emac->prueth; 1047 int port_mask = BIT(emac->port_id); 1048 int untag_mask = 0; 1049 1050 if (prueth->is_hsr_offload_mode) 1051 port_mask = BIT(PRUETH_PORT_HOST); 1052 1053 netdev_dbg(emac->ndev, "VID del vid:%u port_mask:%X untag_mask %X\n", 1054 vid, port_mask, untag_mask); 1055 icssg_vtbl_modify(emac, vid, port_mask, untag_mask, false); 1056 1057 return 0; 1058 } 1059 1060 /** 1061 * emac_xdp_xmit - Implements ndo_xdp_xmit 1062 * @dev: netdev 1063 * @n: number of frames 1064 * @frames: array of XDP buffer pointers 1065 * @flags: XDP extra info 1066 * 1067 * Return: number of frames successfully sent. Failed frames 1068 * will be free'ed by XDP core. 1069 * 1070 * For error cases, a negative errno code is returned and no-frames 1071 * are transmitted (caller must handle freeing frames). 1072 **/ 1073 static int emac_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, 1074 u32 flags) 1075 { 1076 struct prueth_emac *emac = netdev_priv(dev); 1077 struct net_device *ndev = emac->ndev; 1078 struct netdev_queue *netif_txq; 1079 int cpu = smp_processor_id(); 1080 struct xdp_frame *xdpf; 1081 unsigned int q_idx; 1082 int nxmit = 0; 1083 u32 err; 1084 int i; 1085 1086 q_idx = cpu % emac->tx_ch_num; 1087 netif_txq = netdev_get_tx_queue(ndev, q_idx); 1088 1089 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 1090 return -EINVAL; 1091 1092 __netif_tx_lock(netif_txq, cpu); 1093 for (i = 0; i < n; i++) { 1094 xdpf = frames[i]; 1095 err = emac_xmit_xdp_frame(emac, xdpf, NULL, q_idx); 1096 if (err != ICSSG_XDP_TX) { 1097 ndev->stats.tx_dropped++; 1098 break; 1099 } 1100 nxmit++; 1101 } 1102 __netif_tx_unlock(netif_txq); 1103 1104 return nxmit; 1105 } 1106 1107 /** 1108 * emac_xdp_setup - add/remove an XDP program 1109 * @emac: emac device 1110 * @bpf: XDP program 1111 * 1112 * Return: Always 0 (Success) 1113 **/ 1114 static int emac_xdp_setup(struct prueth_emac *emac, struct netdev_bpf *bpf) 1115 { 1116 struct bpf_prog *prog = bpf->prog; 1117 1118 if (!emac->xdpi.prog && !prog) 1119 return 0; 1120 1121 WRITE_ONCE(emac->xdp_prog, prog); 1122 1123 xdp_attachment_setup(&emac->xdpi, bpf); 1124 1125 return 0; 1126 } 1127 1128 /** 1129 * emac_ndo_bpf - implements ndo_bpf for icssg_prueth 1130 * @ndev: network adapter device 1131 * @bpf: XDP program 1132 * 1133 * Return: 0 on success, error code on failure. 1134 **/ 1135 static int emac_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf) 1136 { 1137 struct prueth_emac *emac = netdev_priv(ndev); 1138 1139 switch (bpf->command) { 1140 case XDP_SETUP_PROG: 1141 return emac_xdp_setup(emac, bpf); 1142 default: 1143 return -EINVAL; 1144 } 1145 } 1146 1147 static const struct net_device_ops emac_netdev_ops = { 1148 .ndo_open = emac_ndo_open, 1149 .ndo_stop = emac_ndo_stop, 1150 .ndo_start_xmit = icssg_ndo_start_xmit, 1151 .ndo_set_mac_address = eth_mac_addr, 1152 .ndo_validate_addr = eth_validate_addr, 1153 .ndo_tx_timeout = icssg_ndo_tx_timeout, 1154 .ndo_set_rx_mode = emac_ndo_set_rx_mode, 1155 .ndo_eth_ioctl = icssg_ndo_ioctl, 1156 .ndo_get_stats64 = icssg_ndo_get_stats64, 1157 .ndo_get_phys_port_name = icssg_ndo_get_phys_port_name, 1158 .ndo_fix_features = emac_ndo_fix_features, 1159 .ndo_vlan_rx_add_vid = emac_ndo_vlan_rx_add_vid, 1160 .ndo_vlan_rx_kill_vid = emac_ndo_vlan_rx_del_vid, 1161 .ndo_bpf = emac_ndo_bpf, 1162 .ndo_xdp_xmit = emac_xdp_xmit, 1163 }; 1164 1165 static int prueth_netdev_init(struct prueth *prueth, 1166 struct device_node *eth_node) 1167 { 1168 int ret, num_tx_chn = PRUETH_MAX_TX_QUEUES; 1169 struct prueth_emac *emac; 1170 struct net_device *ndev; 1171 enum prueth_port port; 1172 const char *irq_name; 1173 enum prueth_mac mac; 1174 1175 port = prueth_node_port(eth_node); 1176 if (port == PRUETH_PORT_INVALID) 1177 return -EINVAL; 1178 1179 mac = prueth_node_mac(eth_node); 1180 if (mac == PRUETH_MAC_INVALID) 1181 return -EINVAL; 1182 1183 ndev = alloc_etherdev_mq(sizeof(*emac), num_tx_chn); 1184 if (!ndev) 1185 return -ENOMEM; 1186 1187 emac = netdev_priv(ndev); 1188 emac->prueth = prueth; 1189 emac->ndev = ndev; 1190 emac->port_id = port; 1191 emac->xdp_prog = NULL; 1192 emac->ndev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS; 1193 emac->cmd_wq = create_singlethread_workqueue("icssg_cmd_wq"); 1194 if (!emac->cmd_wq) { 1195 ret = -ENOMEM; 1196 goto free_ndev; 1197 } 1198 INIT_WORK(&emac->rx_mode_work, emac_ndo_set_rx_mode_work); 1199 1200 INIT_DELAYED_WORK(&emac->stats_work, icssg_stats_work_handler); 1201 1202 ret = pruss_request_mem_region(prueth->pruss, 1203 port == PRUETH_PORT_MII0 ? 1204 PRUSS_MEM_DRAM0 : PRUSS_MEM_DRAM1, 1205 &emac->dram); 1206 if (ret) { 1207 dev_err(prueth->dev, "unable to get DRAM: %d\n", ret); 1208 ret = -ENOMEM; 1209 goto free_wq; 1210 } 1211 1212 emac->tx_ch_num = 1; 1213 1214 irq_name = "tx_ts0"; 1215 if (emac->port_id == PRUETH_PORT_MII1) 1216 irq_name = "tx_ts1"; 1217 emac->tx_ts_irq = platform_get_irq_byname_optional(prueth->pdev, irq_name); 1218 if (emac->tx_ts_irq < 0) { 1219 ret = dev_err_probe(prueth->dev, emac->tx_ts_irq, "could not get tx_ts_irq\n"); 1220 goto free; 1221 } 1222 1223 SET_NETDEV_DEV(ndev, prueth->dev); 1224 spin_lock_init(&emac->lock); 1225 mutex_init(&emac->cmd_lock); 1226 1227 emac->phy_node = of_parse_phandle(eth_node, "phy-handle", 0); 1228 if (!emac->phy_node && !of_phy_is_fixed_link(eth_node)) { 1229 dev_err(prueth->dev, "couldn't find phy-handle\n"); 1230 ret = -ENODEV; 1231 goto free; 1232 } else if (of_phy_is_fixed_link(eth_node)) { 1233 ret = of_phy_register_fixed_link(eth_node); 1234 if (ret) { 1235 ret = dev_err_probe(prueth->dev, ret, 1236 "failed to register fixed-link phy\n"); 1237 goto free; 1238 } 1239 1240 emac->phy_node = eth_node; 1241 } 1242 1243 ret = of_get_phy_mode(eth_node, &emac->phy_if); 1244 if (ret) { 1245 dev_err(prueth->dev, "could not get phy-mode property\n"); 1246 goto free; 1247 } 1248 1249 if (emac->phy_if != PHY_INTERFACE_MODE_MII && 1250 !phy_interface_mode_is_rgmii(emac->phy_if)) { 1251 dev_err(prueth->dev, "PHY mode unsupported %s\n", phy_modes(emac->phy_if)); 1252 ret = -EINVAL; 1253 goto free; 1254 } 1255 1256 /* AM65 SR2.0 has TX Internal delay always enabled by hardware 1257 * and it is not possible to disable TX Internal delay. The below 1258 * switch case block describes how we handle different phy modes 1259 * based on hardware restriction. 1260 */ 1261 switch (emac->phy_if) { 1262 case PHY_INTERFACE_MODE_RGMII_ID: 1263 emac->phy_if = PHY_INTERFACE_MODE_RGMII_RXID; 1264 break; 1265 case PHY_INTERFACE_MODE_RGMII_TXID: 1266 emac->phy_if = PHY_INTERFACE_MODE_RGMII; 1267 break; 1268 case PHY_INTERFACE_MODE_RGMII: 1269 case PHY_INTERFACE_MODE_RGMII_RXID: 1270 dev_err(prueth->dev, "RGMII mode without TX delay is not supported"); 1271 ret = -EINVAL; 1272 goto free; 1273 default: 1274 break; 1275 } 1276 1277 /* get mac address from DT and set private and netdev addr */ 1278 ret = of_get_ethdev_address(eth_node, ndev); 1279 if (!is_valid_ether_addr(ndev->dev_addr)) { 1280 eth_hw_addr_random(ndev); 1281 dev_warn(prueth->dev, "port %d: using random MAC addr: %pM\n", 1282 port, ndev->dev_addr); 1283 } 1284 ether_addr_copy(emac->mac_addr, ndev->dev_addr); 1285 1286 ndev->dev.of_node = eth_node; 1287 ndev->min_mtu = PRUETH_MIN_PKT_SIZE; 1288 ndev->max_mtu = PRUETH_MAX_MTU; 1289 ndev->netdev_ops = &emac_netdev_ops; 1290 ndev->ethtool_ops = &icssg_ethtool_ops; 1291 ndev->hw_features = NETIF_F_SG; 1292 ndev->features = ndev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER; 1293 ndev->hw_features |= NETIF_PRUETH_HSR_OFFLOAD_FEATURES; 1294 xdp_set_features_flag(ndev, 1295 NETDEV_XDP_ACT_BASIC | 1296 NETDEV_XDP_ACT_REDIRECT | 1297 NETDEV_XDP_ACT_NDO_XMIT); 1298 1299 netif_napi_add(ndev, &emac->napi_rx, icssg_napi_rx_poll); 1300 hrtimer_setup(&emac->rx_hrtimer, &emac_rx_timer_callback, CLOCK_MONOTONIC, 1301 HRTIMER_MODE_REL_PINNED); 1302 prueth->emac[mac] = emac; 1303 1304 return 0; 1305 1306 free: 1307 pruss_release_mem_region(prueth->pruss, &emac->dram); 1308 free_wq: 1309 destroy_workqueue(emac->cmd_wq); 1310 free_ndev: 1311 emac->ndev = NULL; 1312 prueth->emac[mac] = NULL; 1313 free_netdev(ndev); 1314 1315 return ret; 1316 } 1317 1318 bool prueth_dev_check(const struct net_device *ndev) 1319 { 1320 if (ndev->netdev_ops == &emac_netdev_ops && netif_running(ndev)) { 1321 struct prueth_emac *emac = netdev_priv(ndev); 1322 1323 return emac->prueth->is_switch_mode; 1324 } 1325 1326 return false; 1327 } 1328 1329 static void prueth_offload_fwd_mark_update(struct prueth *prueth) 1330 { 1331 int set_val = 0; 1332 int i; 1333 1334 if (prueth->br_members == (BIT(PRUETH_PORT_MII0) | BIT(PRUETH_PORT_MII1))) 1335 set_val = 1; 1336 1337 dev_dbg(prueth->dev, "set offload_fwd_mark %d\n", set_val); 1338 1339 for (i = PRUETH_MAC0; i < PRUETH_NUM_MACS; i++) { 1340 struct prueth_emac *emac = prueth->emac[i]; 1341 1342 if (!emac || !emac->ndev) 1343 continue; 1344 1345 emac->offload_fwd_mark = set_val; 1346 } 1347 } 1348 1349 static int prueth_emac_restart(struct prueth *prueth) 1350 { 1351 struct prueth_emac *emac0 = prueth->emac[PRUETH_MAC0]; 1352 struct prueth_emac *emac1 = prueth->emac[PRUETH_MAC1]; 1353 int ret; 1354 1355 /* Detach the net_device for both PRUeth ports*/ 1356 if (netif_running(emac0->ndev)) 1357 netif_device_detach(emac0->ndev); 1358 if (netif_running(emac1->ndev)) 1359 netif_device_detach(emac1->ndev); 1360 1361 /* Disable both PRUeth ports */ 1362 ret = icssg_set_port_state(emac0, ICSSG_EMAC_PORT_DISABLE); 1363 ret |= icssg_set_port_state(emac1, ICSSG_EMAC_PORT_DISABLE); 1364 if (ret) 1365 return ret; 1366 1367 /* Stop both pru cores for both PRUeth ports*/ 1368 ret = prueth_emac_common_stop(prueth); 1369 if (ret) { 1370 dev_err(prueth->dev, "Failed to stop the firmwares"); 1371 return ret; 1372 } 1373 1374 /* Start both pru cores for both PRUeth ports */ 1375 ret = prueth_emac_common_start(prueth); 1376 if (ret) { 1377 dev_err(prueth->dev, "Failed to start the firmwares"); 1378 return ret; 1379 } 1380 1381 /* Enable forwarding for both PRUeth ports */ 1382 ret = icssg_set_port_state(emac0, ICSSG_EMAC_PORT_FORWARD); 1383 ret |= icssg_set_port_state(emac1, ICSSG_EMAC_PORT_FORWARD); 1384 1385 /* Attache net_device for both PRUeth ports */ 1386 netif_device_attach(emac0->ndev); 1387 netif_device_attach(emac1->ndev); 1388 1389 return ret; 1390 } 1391 1392 static void icssg_change_mode(struct prueth *prueth) 1393 { 1394 struct prueth_emac *emac; 1395 int mac, ret; 1396 1397 ret = prueth_emac_restart(prueth); 1398 if (ret) { 1399 dev_err(prueth->dev, "Failed to restart the firmwares, aborting the process"); 1400 return; 1401 } 1402 1403 for (mac = PRUETH_MAC0; mac < PRUETH_NUM_MACS; mac++) { 1404 emac = prueth->emac[mac]; 1405 if (prueth->is_hsr_offload_mode) { 1406 if (emac->ndev->features & NETIF_F_HW_HSR_TAG_RM) 1407 icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_ENABLE); 1408 else 1409 icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_DISABLE); 1410 } 1411 1412 if (netif_running(emac->ndev)) { 1413 icssg_fdb_add_del(emac, eth_stp_addr, prueth->default_vlan, 1414 ICSSG_FDB_ENTRY_P0_MEMBERSHIP | 1415 ICSSG_FDB_ENTRY_P1_MEMBERSHIP | 1416 ICSSG_FDB_ENTRY_P2_MEMBERSHIP | 1417 ICSSG_FDB_ENTRY_BLOCK, 1418 true); 1419 icssg_vtbl_modify(emac, emac->port_vlan | DEFAULT_VID, 1420 BIT(emac->port_id) | DEFAULT_PORT_MASK, 1421 BIT(emac->port_id) | DEFAULT_UNTAG_MASK, 1422 true); 1423 if (prueth->is_hsr_offload_mode) 1424 icssg_vtbl_modify(emac, DEFAULT_VID, 1425 DEFAULT_PORT_MASK, 1426 DEFAULT_UNTAG_MASK, true); 1427 icssg_set_pvid(prueth, emac->port_vlan, emac->port_id); 1428 if (prueth->is_switch_mode) 1429 icssg_set_port_state(emac, ICSSG_EMAC_PORT_VLAN_AWARE_ENABLE); 1430 } 1431 } 1432 } 1433 1434 static int prueth_netdevice_port_link(struct net_device *ndev, 1435 struct net_device *br_ndev, 1436 struct netlink_ext_ack *extack) 1437 { 1438 struct prueth_emac *emac = netdev_priv(ndev); 1439 struct prueth *prueth = emac->prueth; 1440 int err; 1441 1442 if (!prueth->br_members) { 1443 prueth->hw_bridge_dev = br_ndev; 1444 } else { 1445 /* This is adding the port to a second bridge, this is 1446 * unsupported 1447 */ 1448 if (prueth->hw_bridge_dev != br_ndev) 1449 return -EOPNOTSUPP; 1450 } 1451 1452 err = switchdev_bridge_port_offload(br_ndev, ndev, emac, 1453 &prueth->prueth_switchdev_nb, 1454 &prueth->prueth_switchdev_bl_nb, 1455 false, extack); 1456 if (err) 1457 return err; 1458 1459 prueth->br_members |= BIT(emac->port_id); 1460 1461 if (!prueth->is_switch_mode) { 1462 if (prueth->br_members & BIT(PRUETH_PORT_MII0) && 1463 prueth->br_members & BIT(PRUETH_PORT_MII1)) { 1464 prueth->is_switch_mode = true; 1465 prueth->default_vlan = PRUETH_DFLT_VLAN_SW; 1466 emac->port_vlan = prueth->default_vlan; 1467 icssg_change_mode(prueth); 1468 } 1469 } 1470 1471 prueth_offload_fwd_mark_update(prueth); 1472 1473 return NOTIFY_DONE; 1474 } 1475 1476 static void prueth_netdevice_port_unlink(struct net_device *ndev) 1477 { 1478 struct prueth_emac *emac = netdev_priv(ndev); 1479 struct prueth *prueth = emac->prueth; 1480 int ret; 1481 1482 prueth->br_members &= ~BIT(emac->port_id); 1483 1484 if (prueth->is_switch_mode) { 1485 prueth->is_switch_mode = false; 1486 emac->port_vlan = 0; 1487 ret = prueth_emac_restart(prueth); 1488 if (ret) { 1489 dev_err(prueth->dev, "Failed to restart the firmwares, aborting the process"); 1490 return; 1491 } 1492 } 1493 1494 prueth_offload_fwd_mark_update(prueth); 1495 1496 if (!prueth->br_members) 1497 prueth->hw_bridge_dev = NULL; 1498 } 1499 1500 static int prueth_hsr_port_link(struct net_device *ndev) 1501 { 1502 struct prueth_emac *emac = netdev_priv(ndev); 1503 struct prueth *prueth = emac->prueth; 1504 struct prueth_emac *emac0; 1505 struct prueth_emac *emac1; 1506 1507 emac0 = prueth->emac[PRUETH_MAC0]; 1508 emac1 = prueth->emac[PRUETH_MAC1]; 1509 1510 if (prueth->is_switch_mode) 1511 return -EOPNOTSUPP; 1512 1513 prueth->hsr_members |= BIT(emac->port_id); 1514 if (!prueth->is_hsr_offload_mode) { 1515 if (prueth->hsr_members & BIT(PRUETH_PORT_MII0) && 1516 prueth->hsr_members & BIT(PRUETH_PORT_MII1)) { 1517 if (!(emac0->ndev->features & 1518 NETIF_PRUETH_HSR_OFFLOAD_FEATURES) && 1519 !(emac1->ndev->features & 1520 NETIF_PRUETH_HSR_OFFLOAD_FEATURES)) 1521 return -EOPNOTSUPP; 1522 prueth->is_hsr_offload_mode = true; 1523 prueth->default_vlan = PRUETH_DFLT_VLAN_HSR; 1524 emac0->port_vlan = prueth->default_vlan; 1525 emac1->port_vlan = prueth->default_vlan; 1526 icssg_change_mode(prueth); 1527 netdev_dbg(ndev, "Enabling HSR offload mode\n"); 1528 } 1529 } 1530 1531 return 0; 1532 } 1533 1534 static void prueth_hsr_port_unlink(struct net_device *ndev) 1535 { 1536 struct prueth_emac *emac = netdev_priv(ndev); 1537 struct prueth *prueth = emac->prueth; 1538 struct prueth_emac *emac0; 1539 struct prueth_emac *emac1; 1540 int ret; 1541 1542 emac0 = prueth->emac[PRUETH_MAC0]; 1543 emac1 = prueth->emac[PRUETH_MAC1]; 1544 1545 prueth->hsr_members &= ~BIT(emac->port_id); 1546 if (prueth->is_hsr_offload_mode) { 1547 prueth->is_hsr_offload_mode = false; 1548 emac0->port_vlan = 0; 1549 emac1->port_vlan = 0; 1550 prueth->hsr_dev = NULL; 1551 ret = prueth_emac_restart(prueth); 1552 if (ret) { 1553 dev_err(prueth->dev, "Failed to restart the firmwares, aborting the process"); 1554 return; 1555 } 1556 netdev_dbg(ndev, "Disabling HSR Offload mode\n"); 1557 } 1558 } 1559 1560 /* netdev notifier */ 1561 static int prueth_netdevice_event(struct notifier_block *unused, 1562 unsigned long event, void *ptr) 1563 { 1564 struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr); 1565 struct net_device *ndev = netdev_notifier_info_to_dev(ptr); 1566 struct netdev_notifier_changeupper_info *info; 1567 struct prueth_emac *emac = netdev_priv(ndev); 1568 struct prueth *prueth = emac->prueth; 1569 int ret = NOTIFY_DONE; 1570 1571 if (ndev->netdev_ops != &emac_netdev_ops) 1572 return NOTIFY_DONE; 1573 1574 switch (event) { 1575 case NETDEV_CHANGEUPPER: 1576 info = ptr; 1577 1578 if ((ndev->features & NETIF_PRUETH_HSR_OFFLOAD_FEATURES) && 1579 is_hsr_master(info->upper_dev)) { 1580 if (info->linking) { 1581 if (!prueth->hsr_dev) { 1582 prueth->hsr_dev = info->upper_dev; 1583 icssg_class_set_host_mac_addr(prueth->miig_rt, 1584 prueth->hsr_dev->dev_addr); 1585 } else { 1586 if (prueth->hsr_dev != info->upper_dev) { 1587 netdev_dbg(ndev, "Both interfaces must be linked to same upper device\n"); 1588 return -EOPNOTSUPP; 1589 } 1590 } 1591 prueth_hsr_port_link(ndev); 1592 } else { 1593 prueth_hsr_port_unlink(ndev); 1594 } 1595 } 1596 1597 if (netif_is_bridge_master(info->upper_dev)) { 1598 if (info->linking) 1599 ret = prueth_netdevice_port_link(ndev, info->upper_dev, extack); 1600 else 1601 prueth_netdevice_port_unlink(ndev); 1602 } 1603 break; 1604 default: 1605 return NOTIFY_DONE; 1606 } 1607 1608 return notifier_from_errno(ret); 1609 } 1610 1611 static int prueth_register_notifiers(struct prueth *prueth) 1612 { 1613 int ret = 0; 1614 1615 prueth->prueth_netdevice_nb.notifier_call = &prueth_netdevice_event; 1616 ret = register_netdevice_notifier(&prueth->prueth_netdevice_nb); 1617 if (ret) { 1618 dev_err(prueth->dev, "can't register netdevice notifier\n"); 1619 return ret; 1620 } 1621 1622 ret = prueth_switchdev_register_notifiers(prueth); 1623 if (ret) 1624 unregister_netdevice_notifier(&prueth->prueth_netdevice_nb); 1625 1626 return ret; 1627 } 1628 1629 static void prueth_unregister_notifiers(struct prueth *prueth) 1630 { 1631 prueth_switchdev_unregister_notifiers(prueth); 1632 unregister_netdevice_notifier(&prueth->prueth_netdevice_nb); 1633 } 1634 1635 static int prueth_probe(struct platform_device *pdev) 1636 { 1637 struct device_node *eth_node, *eth_ports_node; 1638 struct device_node *eth0_node = NULL; 1639 struct device_node *eth1_node = NULL; 1640 struct genpool_data_align gp_data = { 1641 .align = SZ_64K, 1642 }; 1643 struct device *dev = &pdev->dev; 1644 struct device_node *np; 1645 struct prueth *prueth; 1646 struct pruss *pruss; 1647 u32 msmc_ram_size; 1648 int i, ret; 1649 1650 np = dev->of_node; 1651 1652 BUILD_BUG_ON_MSG((sizeof(struct prueth_swdata) > PRUETH_NAV_SW_DATA_SIZE), 1653 "insufficient SW_DATA size"); 1654 1655 prueth = devm_kzalloc(dev, sizeof(*prueth), GFP_KERNEL); 1656 if (!prueth) 1657 return -ENOMEM; 1658 1659 dev_set_drvdata(dev, prueth); 1660 prueth->pdev = pdev; 1661 prueth->pdata = *(const struct prueth_pdata *)device_get_match_data(dev); 1662 1663 prueth->dev = dev; 1664 eth_ports_node = of_get_child_by_name(np, "ethernet-ports"); 1665 if (!eth_ports_node) 1666 return -ENOENT; 1667 1668 for_each_child_of_node(eth_ports_node, eth_node) { 1669 u32 reg; 1670 1671 if (strcmp(eth_node->name, "port")) 1672 continue; 1673 ret = of_property_read_u32(eth_node, "reg", ®); 1674 if (ret < 0) { 1675 dev_err(dev, "%pOF error reading port_id %d\n", 1676 eth_node, ret); 1677 } 1678 1679 of_node_get(eth_node); 1680 1681 if (reg == 0) { 1682 eth0_node = eth_node; 1683 if (!of_device_is_available(eth0_node)) { 1684 of_node_put(eth0_node); 1685 eth0_node = NULL; 1686 } 1687 } else if (reg == 1) { 1688 eth1_node = eth_node; 1689 if (!of_device_is_available(eth1_node)) { 1690 of_node_put(eth1_node); 1691 eth1_node = NULL; 1692 } 1693 } else { 1694 dev_err(dev, "port reg should be 0 or 1\n"); 1695 } 1696 } 1697 1698 of_node_put(eth_ports_node); 1699 1700 /* At least one node must be present and available else we fail */ 1701 if (!eth0_node && !eth1_node) { 1702 dev_err(dev, "neither port0 nor port1 node available\n"); 1703 return -ENODEV; 1704 } 1705 1706 if (eth0_node == eth1_node) { 1707 dev_err(dev, "port0 and port1 can't have same reg\n"); 1708 of_node_put(eth0_node); 1709 return -ENODEV; 1710 } 1711 1712 prueth->eth_node[PRUETH_MAC0] = eth0_node; 1713 prueth->eth_node[PRUETH_MAC1] = eth1_node; 1714 1715 prueth->miig_rt = syscon_regmap_lookup_by_phandle(np, "ti,mii-g-rt"); 1716 if (IS_ERR(prueth->miig_rt)) { 1717 dev_err(dev, "couldn't get ti,mii-g-rt syscon regmap\n"); 1718 return -ENODEV; 1719 } 1720 1721 prueth->mii_rt = syscon_regmap_lookup_by_phandle(np, "ti,mii-rt"); 1722 if (IS_ERR(prueth->mii_rt)) { 1723 dev_err(dev, "couldn't get ti,mii-rt syscon regmap\n"); 1724 return -ENODEV; 1725 } 1726 1727 prueth->pa_stats = syscon_regmap_lookup_by_phandle(np, "ti,pa-stats"); 1728 if (IS_ERR(prueth->pa_stats)) { 1729 dev_err(dev, "couldn't get ti,pa-stats syscon regmap\n"); 1730 prueth->pa_stats = NULL; 1731 } 1732 1733 if (eth0_node || eth1_node) { 1734 ret = prueth_get_cores(prueth, ICSS_SLICE0, false); 1735 if (ret) 1736 goto put_cores; 1737 ret = prueth_get_cores(prueth, ICSS_SLICE1, false); 1738 if (ret) 1739 goto put_cores; 1740 } 1741 1742 pruss = pruss_get(eth0_node ? 1743 prueth->pru[ICSS_SLICE0] : prueth->pru[ICSS_SLICE1]); 1744 if (IS_ERR(pruss)) { 1745 ret = PTR_ERR(pruss); 1746 dev_err(dev, "unable to get pruss handle\n"); 1747 goto put_cores; 1748 } 1749 1750 prueth->pruss = pruss; 1751 1752 ret = pruss_request_mem_region(pruss, PRUSS_MEM_SHRD_RAM2, 1753 &prueth->shram); 1754 if (ret) { 1755 dev_err(dev, "unable to get PRUSS SHRD RAM2: %d\n", ret); 1756 goto put_pruss; 1757 } 1758 1759 prueth->sram_pool = of_gen_pool_get(np, "sram", 0); 1760 if (!prueth->sram_pool) { 1761 dev_err(dev, "unable to get SRAM pool\n"); 1762 ret = -ENODEV; 1763 1764 goto put_mem; 1765 } 1766 1767 msmc_ram_size = MSMC_RAM_SIZE; 1768 prueth->is_switchmode_supported = prueth->pdata.switch_mode; 1769 if (prueth->is_switchmode_supported) 1770 msmc_ram_size = MSMC_RAM_SIZE_SWITCH_MODE; 1771 1772 /* NOTE: FW bug needs buffer base to be 64KB aligned */ 1773 prueth->msmcram.va = 1774 (void __iomem *)gen_pool_alloc_algo(prueth->sram_pool, 1775 msmc_ram_size, 1776 gen_pool_first_fit_align, 1777 &gp_data); 1778 1779 if (!prueth->msmcram.va) { 1780 ret = -ENOMEM; 1781 dev_err(dev, "unable to allocate MSMC resource\n"); 1782 goto put_mem; 1783 } 1784 prueth->msmcram.pa = gen_pool_virt_to_phys(prueth->sram_pool, 1785 (unsigned long)prueth->msmcram.va); 1786 prueth->msmcram.size = msmc_ram_size; 1787 memset_io(prueth->msmcram.va, 0, msmc_ram_size); 1788 dev_dbg(dev, "sram: pa %llx va %p size %zx\n", prueth->msmcram.pa, 1789 prueth->msmcram.va, prueth->msmcram.size); 1790 1791 prueth->iep0 = icss_iep_get_idx(np, 0); 1792 if (IS_ERR(prueth->iep0)) { 1793 ret = dev_err_probe(dev, PTR_ERR(prueth->iep0), "iep0 get failed\n"); 1794 prueth->iep0 = NULL; 1795 goto free_pool; 1796 } 1797 1798 prueth->iep1 = icss_iep_get_idx(np, 1); 1799 if (IS_ERR(prueth->iep1)) { 1800 ret = dev_err_probe(dev, PTR_ERR(prueth->iep1), "iep1 get failed\n"); 1801 goto put_iep0; 1802 } 1803 1804 if (prueth->pdata.quirk_10m_link_issue) { 1805 /* Enable IEP1 for FW in 64bit mode as W/A for 10M FD link detect issue under TX 1806 * traffic. 1807 */ 1808 icss_iep_init_fw(prueth->iep1); 1809 } 1810 1811 spin_lock_init(&prueth->vtbl_lock); 1812 spin_lock_init(&prueth->stats_lock); 1813 /* setup netdev interfaces */ 1814 if (eth0_node) { 1815 ret = prueth_netdev_init(prueth, eth0_node); 1816 if (ret) { 1817 dev_err_probe(dev, ret, "netdev init %s failed\n", 1818 eth0_node->name); 1819 goto exit_iep; 1820 } 1821 1822 prueth->emac[PRUETH_MAC0]->half_duplex = 1823 of_property_read_bool(eth0_node, "ti,half-duplex-capable"); 1824 1825 prueth->emac[PRUETH_MAC0]->iep = prueth->iep0; 1826 } 1827 1828 if (eth1_node) { 1829 ret = prueth_netdev_init(prueth, eth1_node); 1830 if (ret) { 1831 dev_err_probe(dev, ret, "netdev init %s failed\n", 1832 eth1_node->name); 1833 goto netdev_exit; 1834 } 1835 1836 prueth->emac[PRUETH_MAC1]->half_duplex = 1837 of_property_read_bool(eth1_node, "ti,half-duplex-capable"); 1838 1839 prueth->emac[PRUETH_MAC1]->iep = prueth->iep0; 1840 } 1841 1842 /* register the network devices */ 1843 if (eth0_node) { 1844 ret = register_netdev(prueth->emac[PRUETH_MAC0]->ndev); 1845 if (ret) { 1846 dev_err(dev, "can't register netdev for port MII0"); 1847 goto netdev_exit; 1848 } 1849 1850 prueth->registered_netdevs[PRUETH_MAC0] = prueth->emac[PRUETH_MAC0]->ndev; 1851 1852 ret = emac_phy_connect(prueth->emac[PRUETH_MAC0]); 1853 if (ret) { 1854 dev_err(dev, 1855 "can't connect to MII0 PHY, error -%d", ret); 1856 goto netdev_unregister; 1857 } 1858 phy_attached_info(prueth->emac[PRUETH_MAC0]->ndev->phydev); 1859 } 1860 1861 if (eth1_node) { 1862 ret = register_netdev(prueth->emac[PRUETH_MAC1]->ndev); 1863 if (ret) { 1864 dev_err(dev, "can't register netdev for port MII1"); 1865 goto netdev_unregister; 1866 } 1867 1868 prueth->registered_netdevs[PRUETH_MAC1] = prueth->emac[PRUETH_MAC1]->ndev; 1869 ret = emac_phy_connect(prueth->emac[PRUETH_MAC1]); 1870 if (ret) { 1871 dev_err(dev, 1872 "can't connect to MII1 PHY, error %d", ret); 1873 goto netdev_unregister; 1874 } 1875 phy_attached_info(prueth->emac[PRUETH_MAC1]->ndev->phydev); 1876 } 1877 1878 if (prueth->is_switchmode_supported) { 1879 ret = prueth_register_notifiers(prueth); 1880 if (ret) 1881 goto netdev_unregister; 1882 1883 sprintf(prueth->switch_id, "%s", dev_name(dev)); 1884 } 1885 1886 dev_info(dev, "TI PRU ethernet driver initialized: %s EMAC mode\n", 1887 (!eth0_node || !eth1_node) ? "single" : "dual"); 1888 1889 if (eth1_node) 1890 of_node_put(eth1_node); 1891 if (eth0_node) 1892 of_node_put(eth0_node); 1893 return 0; 1894 1895 netdev_unregister: 1896 for (i = 0; i < PRUETH_NUM_MACS; i++) { 1897 if (!prueth->registered_netdevs[i]) 1898 continue; 1899 if (prueth->emac[i]->ndev->phydev) { 1900 phy_disconnect(prueth->emac[i]->ndev->phydev); 1901 prueth->emac[i]->ndev->phydev = NULL; 1902 } 1903 unregister_netdev(prueth->registered_netdevs[i]); 1904 } 1905 1906 netdev_exit: 1907 for (i = 0; i < PRUETH_NUM_MACS; i++) { 1908 eth_node = prueth->eth_node[i]; 1909 if (!eth_node) 1910 continue; 1911 1912 prueth_netdev_exit(prueth, eth_node); 1913 } 1914 1915 exit_iep: 1916 if (prueth->pdata.quirk_10m_link_issue) 1917 icss_iep_exit_fw(prueth->iep1); 1918 icss_iep_put(prueth->iep1); 1919 1920 put_iep0: 1921 icss_iep_put(prueth->iep0); 1922 prueth->iep0 = NULL; 1923 prueth->iep1 = NULL; 1924 1925 free_pool: 1926 gen_pool_free(prueth->sram_pool, 1927 (unsigned long)prueth->msmcram.va, msmc_ram_size); 1928 1929 put_mem: 1930 pruss_release_mem_region(prueth->pruss, &prueth->shram); 1931 1932 put_pruss: 1933 pruss_put(prueth->pruss); 1934 1935 put_cores: 1936 if (eth0_node || eth1_node) { 1937 prueth_put_cores(prueth, ICSS_SLICE0); 1938 of_node_put(eth0_node); 1939 1940 prueth_put_cores(prueth, ICSS_SLICE1); 1941 of_node_put(eth1_node); 1942 } 1943 1944 return ret; 1945 } 1946 1947 static void prueth_remove(struct platform_device *pdev) 1948 { 1949 struct prueth *prueth = platform_get_drvdata(pdev); 1950 struct device_node *eth_node; 1951 int i; 1952 1953 prueth_unregister_notifiers(prueth); 1954 1955 for (i = 0; i < PRUETH_NUM_MACS; i++) { 1956 if (!prueth->registered_netdevs[i]) 1957 continue; 1958 phy_stop(prueth->emac[i]->ndev->phydev); 1959 phy_disconnect(prueth->emac[i]->ndev->phydev); 1960 prueth->emac[i]->ndev->phydev = NULL; 1961 unregister_netdev(prueth->registered_netdevs[i]); 1962 } 1963 1964 for (i = 0; i < PRUETH_NUM_MACS; i++) { 1965 eth_node = prueth->eth_node[i]; 1966 if (!eth_node) 1967 continue; 1968 1969 prueth_netdev_exit(prueth, eth_node); 1970 } 1971 1972 if (prueth->pdata.quirk_10m_link_issue) 1973 icss_iep_exit_fw(prueth->iep1); 1974 1975 icss_iep_put(prueth->iep1); 1976 icss_iep_put(prueth->iep0); 1977 1978 gen_pool_free(prueth->sram_pool, 1979 (unsigned long)prueth->msmcram.va, 1980 MSMC_RAM_SIZE); 1981 1982 pruss_release_mem_region(prueth->pruss, &prueth->shram); 1983 1984 pruss_put(prueth->pruss); 1985 1986 if (prueth->eth_node[PRUETH_MAC1]) 1987 prueth_put_cores(prueth, ICSS_SLICE1); 1988 1989 if (prueth->eth_node[PRUETH_MAC0]) 1990 prueth_put_cores(prueth, ICSS_SLICE0); 1991 } 1992 1993 static const struct prueth_pdata am654_icssg_pdata = { 1994 .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE, 1995 .quirk_10m_link_issue = 1, 1996 .switch_mode = 1, 1997 }; 1998 1999 static const struct prueth_pdata am64x_icssg_pdata = { 2000 .fdqring_mode = K3_RINGACC_RING_MODE_RING, 2001 .quirk_10m_link_issue = 1, 2002 .switch_mode = 1, 2003 }; 2004 2005 static const struct of_device_id prueth_dt_match[] = { 2006 { .compatible = "ti,am654-icssg-prueth", .data = &am654_icssg_pdata }, 2007 { .compatible = "ti,am642-icssg-prueth", .data = &am64x_icssg_pdata }, 2008 { /* sentinel */ } 2009 }; 2010 MODULE_DEVICE_TABLE(of, prueth_dt_match); 2011 2012 static struct platform_driver prueth_driver = { 2013 .probe = prueth_probe, 2014 .remove = prueth_remove, 2015 .driver = { 2016 .name = "icssg-prueth", 2017 .of_match_table = prueth_dt_match, 2018 .pm = &prueth_dev_pm_ops, 2019 }, 2020 }; 2021 module_platform_driver(prueth_driver); 2022 2023 MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>"); 2024 MODULE_AUTHOR("Md Danish Anwar <danishanwar@ti.com>"); 2025 MODULE_DESCRIPTION("PRUSS ICSSG Ethernet Driver"); 2026 MODULE_LICENSE("GPL"); 2027