Lines Matching +full:pme +full:- +full:active +full:- +full:high
1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 2017-2024 Microchip Technology Inc.
14 #include <linux/platform_data/microchip-ksz.h>
205 * struct ksz_drive_strength - drive strength mapping
214 /* ksz9477_drive_strengths - Drive strength mapping for KSZ9477 variants
222 * - for high speed signals
232 * - for low speed signals
249 /* ksz88x3_drive_strengths - Drive strength mapping for KSZ8863, KSZ8873, ..
269 * ksz_phylink_mac_disable_tx_lpi() - Callback to signal LPI support (Dummy)
280 * ksz_phylink_mac_enable_tx_lpi() - Callback to signal LPI support (Dummy)
290 * For KSZ switch ports with integrated PHYs (e.g., KSZ9893R ports 1-2),
293 * the auto-negotiation results. (Note: While the datasheet describes EEE
298 * autonomously via strapping, means MAC-level software intervention is not
301 * EEE, also Sections 4.1.7 on Auto-Negotiation and 3.2.1 on Configuration
305 * lack documented MAC-level LPI control.
1437 * port map is NOT continuous. The per-port register
2018 if (chip->chip_id == prod_num) in ksz_lookup_info()
2030 if (dev->pdata) { in ksz_check_device_id()
2031 expected_chip_id = dev->pdata->chip_id; in ksz_check_device_id()
2034 return -ENODEV; in ksz_check_device_id()
2036 expected_chip_data = of_device_get_match_data(dev->dev); in ksz_check_device_id()
2037 expected_chip_id = expected_chip_data->chip_id; in ksz_check_device_id()
2040 if (expected_chip_id != dev->chip_id) { in ksz_check_device_id()
2041 dev_err(dev->dev, in ksz_check_device_id()
2043 expected_chip_data->dev_name, dev->info->dev_name); in ksz_check_device_id()
2044 return -ENODEV; in ksz_check_device_id()
2053 struct ksz_device *dev = ds->priv; in ksz_phylink_get_caps()
2055 if (dev->info->supports_mii[port]) in ksz_phylink_get_caps()
2056 __set_bit(PHY_INTERFACE_MODE_MII, config->supported_interfaces); in ksz_phylink_get_caps()
2058 if (dev->info->supports_rmii[port]) in ksz_phylink_get_caps()
2060 config->supported_interfaces); in ksz_phylink_get_caps()
2062 if (dev->info->supports_rgmii[port]) in ksz_phylink_get_caps()
2063 phy_interface_set_rgmii(config->supported_interfaces); in ksz_phylink_get_caps()
2065 if (dev->info->internal_phy[port]) { in ksz_phylink_get_caps()
2067 config->supported_interfaces); in ksz_phylink_get_caps()
2069 * phy-mode property is absent in ksz_phylink_get_caps()
2072 config->supported_interfaces); in ksz_phylink_get_caps()
2075 if (dev->dev_ops->get_caps) in ksz_phylink_get_caps()
2076 dev->dev_ops->get_caps(dev, port, config); in ksz_phylink_get_caps()
2078 if (ds->ops->support_eee && ds->ops->support_eee(ds, port)) { in ksz_phylink_get_caps()
2079 memcpy(config->lpi_interfaces, config->supported_interfaces, in ksz_phylink_get_caps()
2080 sizeof(config->lpi_interfaces)); in ksz_phylink_get_caps()
2082 config->lpi_capabilities = MAC_100FD; in ksz_phylink_get_caps()
2083 if (dev->info->gbit_capable[port]) in ksz_phylink_get_caps()
2084 config->lpi_capabilities |= MAC_1000FD; in ksz_phylink_get_caps()
2087 config->eee_enabled_default = true; in ksz_phylink_get_caps()
2099 mib = &dev->ports[port].mib; in ksz_r_mib_stats64()
2100 stats = &mib->stats64; in ksz_r_mib_stats64()
2101 pstats = &mib->pause_stats; in ksz_r_mib_stats64()
2102 raw = (struct ksz_stats_raw *)mib->counters; in ksz_r_mib_stats64()
2104 spin_lock(&mib->stats64_lock); in ksz_r_mib_stats64()
2106 stats->rx_packets = raw->rx_bcast + raw->rx_mcast + raw->rx_ucast + in ksz_r_mib_stats64()
2107 raw->rx_pause; in ksz_r_mib_stats64()
2108 stats->tx_packets = raw->tx_bcast + raw->tx_mcast + raw->tx_ucast + in ksz_r_mib_stats64()
2109 raw->tx_pause; in ksz_r_mib_stats64()
2114 stats->rx_bytes = raw->rx_total - stats->rx_packets * ETH_FCS_LEN; in ksz_r_mib_stats64()
2115 stats->tx_bytes = raw->tx_total - stats->tx_packets * ETH_FCS_LEN; in ksz_r_mib_stats64()
2117 stats->rx_length_errors = raw->rx_undersize + raw->rx_fragments + in ksz_r_mib_stats64()
2118 raw->rx_oversize; in ksz_r_mib_stats64()
2120 stats->rx_crc_errors = raw->rx_crc_err; in ksz_r_mib_stats64()
2121 stats->rx_frame_errors = raw->rx_align_err; in ksz_r_mib_stats64()
2122 stats->rx_dropped = raw->rx_discards; in ksz_r_mib_stats64()
2123 stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors + in ksz_r_mib_stats64()
2124 stats->rx_frame_errors + stats->rx_dropped; in ksz_r_mib_stats64()
2126 stats->tx_window_errors = raw->tx_late_col; in ksz_r_mib_stats64()
2127 stats->tx_fifo_errors = raw->tx_discards; in ksz_r_mib_stats64()
2128 stats->tx_aborted_errors = raw->tx_exc_col; in ksz_r_mib_stats64()
2129 stats->tx_errors = stats->tx_window_errors + stats->tx_fifo_errors + in ksz_r_mib_stats64()
2130 stats->tx_aborted_errors; in ksz_r_mib_stats64()
2132 stats->multicast = raw->rx_mcast; in ksz_r_mib_stats64()
2133 stats->collisions = raw->tx_total_col; in ksz_r_mib_stats64()
2135 pstats->tx_pause_frames = raw->tx_pause; in ksz_r_mib_stats64()
2136 pstats->rx_pause_frames = raw->rx_pause; in ksz_r_mib_stats64()
2138 spin_unlock(&mib->stats64_lock); in ksz_r_mib_stats64()
2140 if (dev->info->phy_errata_9477) { in ksz_r_mib_stats64()
2141 ret = ksz9477_errata_monitor(dev, port, raw->tx_late_col); in ksz_r_mib_stats64()
2143 dev_err(dev->dev, "Failed to monitor transmission halt\n"); in ksz_r_mib_stats64()
2154 mib = &dev->ports[port].mib; in ksz88xx_r_mib_stats64()
2155 stats = &mib->stats64; in ksz88xx_r_mib_stats64()
2156 pstats = &mib->pause_stats; in ksz88xx_r_mib_stats64()
2157 raw = (struct ksz88xx_stats_raw *)mib->counters; in ksz88xx_r_mib_stats64()
2159 spin_lock(&mib->stats64_lock); in ksz88xx_r_mib_stats64()
2161 stats->rx_packets = raw->rx_bcast + raw->rx_mcast + raw->rx_ucast + in ksz88xx_r_mib_stats64()
2162 raw->rx_pause; in ksz88xx_r_mib_stats64()
2163 stats->tx_packets = raw->tx_bcast + raw->tx_mcast + raw->tx_ucast + in ksz88xx_r_mib_stats64()
2164 raw->tx_pause; in ksz88xx_r_mib_stats64()
2169 stats->rx_bytes = raw->rx + raw->rx_hi - stats->rx_packets * ETH_FCS_LEN; in ksz88xx_r_mib_stats64()
2170 stats->tx_bytes = raw->tx + raw->tx_hi - stats->tx_packets * ETH_FCS_LEN; in ksz88xx_r_mib_stats64()
2172 stats->rx_length_errors = raw->rx_undersize + raw->rx_fragments + in ksz88xx_r_mib_stats64()
2173 raw->rx_oversize; in ksz88xx_r_mib_stats64()
2175 stats->rx_crc_errors = raw->rx_crc_err; in ksz88xx_r_mib_stats64()
2176 stats->rx_frame_errors = raw->rx_align_err; in ksz88xx_r_mib_stats64()
2177 stats->rx_dropped = raw->rx_discards; in ksz88xx_r_mib_stats64()
2178 stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors + in ksz88xx_r_mib_stats64()
2179 stats->rx_frame_errors + stats->rx_dropped; in ksz88xx_r_mib_stats64()
2181 stats->tx_window_errors = raw->tx_late_col; in ksz88xx_r_mib_stats64()
2182 stats->tx_fifo_errors = raw->tx_discards; in ksz88xx_r_mib_stats64()
2183 stats->tx_aborted_errors = raw->tx_exc_col; in ksz88xx_r_mib_stats64()
2184 stats->tx_errors = stats->tx_window_errors + stats->tx_fifo_errors + in ksz88xx_r_mib_stats64()
2185 stats->tx_aborted_errors; in ksz88xx_r_mib_stats64()
2187 stats->multicast = raw->rx_mcast; in ksz88xx_r_mib_stats64()
2188 stats->collisions = raw->tx_total_col; in ksz88xx_r_mib_stats64()
2190 pstats->tx_pause_frames = raw->tx_pause; in ksz88xx_r_mib_stats64()
2191 pstats->rx_pause_frames = raw->rx_pause; in ksz88xx_r_mib_stats64()
2193 spin_unlock(&mib->stats64_lock); in ksz88xx_r_mib_stats64()
2199 struct ksz_device *dev = ds->priv; in ksz_get_stats64()
2202 mib = &dev->ports[port].mib; in ksz_get_stats64()
2204 spin_lock(&mib->stats64_lock); in ksz_get_stats64()
2205 memcpy(s, &mib->stats64, sizeof(*s)); in ksz_get_stats64()
2206 spin_unlock(&mib->stats64_lock); in ksz_get_stats64()
2212 struct ksz_device *dev = ds->priv; in ksz_get_pause_stats()
2215 mib = &dev->ports[port].mib; in ksz_get_pause_stats()
2217 spin_lock(&mib->stats64_lock); in ksz_get_pause_stats()
2218 memcpy(pause_stats, &mib->pause_stats, sizeof(*pause_stats)); in ksz_get_pause_stats()
2219 spin_unlock(&mib->stats64_lock); in ksz_get_pause_stats()
2225 struct ksz_device *dev = ds->priv; in ksz_get_strings()
2231 for (i = 0; i < dev->info->mib_cnt; i++) in ksz_get_strings()
2232 ethtool_puts(&buf, dev->info->mib_names[i].string); in ksz_get_strings()
2236 * ksz_update_port_member - Adjust port forwarding rules based on STP state and
2260 struct ksz_port *p = &dev->ports[port]; in ksz_update_port_member()
2261 struct dsa_switch *ds = dev->ds; in ksz_update_port_member()
2272 for (i = 0; i < ds->num_ports; i++) { in ksz_update_port_member()
2274 struct ksz_port *other_p = &dev->ports[i]; in ksz_update_port_member()
2283 if (other_p->stp_state != BR_STATE_FORWARDING) in ksz_update_port_member()
2292 if (p->stp_state == BR_STATE_FORWARDING && in ksz_update_port_member()
2293 !(p->isolated && other_p->isolated)) { in ksz_update_port_member()
2299 for (j = 0; j < ds->num_ports; j++) { in ksz_update_port_member()
2309 third_p = &dev->ports[j]; in ksz_update_port_member()
2310 if (third_p->stp_state != BR_STATE_FORWARDING) in ksz_update_port_member()
2324 !(other_p->isolated && third_p->isolated)) in ksz_update_port_member()
2328 dev->dev_ops->cfg_port_member(dev, i, val | cpu_port); in ksz_update_port_member()
2331 dev->dev_ops->cfg_port_member(dev, port, port_member | cpu_port); in ksz_update_port_member()
2336 struct ksz_device *dev = bus->priv; in ksz_sw_mdio_read()
2340 ret = dev->dev_ops->r_phy(dev, addr, regnum, &val); in ksz_sw_mdio_read()
2350 struct ksz_device *dev = bus->priv; in ksz_sw_mdio_write()
2352 return dev->dev_ops->w_phy(dev, addr, regnum, val); in ksz_sw_mdio_write()
2356 * ksz_parent_mdio_read - Read data from a PHY register on the parent MDIO bus.
2370 struct ksz_device *dev = bus->priv; in ksz_parent_mdio_read()
2372 return mdiobus_read_nested(dev->parent_mdio_bus, addr, regnum); in ksz_parent_mdio_read()
2376 * ksz_parent_mdio_write - Write data to a PHY register on the parent MDIO bus.
2391 struct ksz_device *dev = bus->priv; in ksz_parent_mdio_write()
2393 return mdiobus_write_nested(dev->parent_mdio_bus, addr, regnum, val); in ksz_parent_mdio_write()
2397 * ksz_phy_addr_to_port - Map a PHY address to the corresponding switch port.
2407 * Return: Port index on success, or -EINVAL if no matching port is found.
2411 struct dsa_switch *ds = dev->ds; in ksz_phy_addr_to_port()
2415 if (dev->info->internal_phy[dp->index] && in ksz_phy_addr_to_port()
2416 dev->phy_addr_map[dp->index] == addr) in ksz_phy_addr_to_port()
2417 return dp->index; in ksz_phy_addr_to_port()
2420 return -EINVAL; in ksz_phy_addr_to_port()
2424 * ksz_irq_phy_setup - Configure IRQs for PHYs in the KSZ device.
2427 * Sets up IRQs for each active PHY connected to the KSZ switch by mapping the
2435 struct dsa_switch *ds = dev->ds; in ksz_irq_phy_setup()
2441 if (BIT(phy) & ds->phys_mii_mask) { in ksz_irq_phy_setup()
2448 irq = irq_find_mapping(dev->ports[port].pirq.domain, in ksz_irq_phy_setup()
2454 ds->user_mii_bus->irq[phy] = irq; in ksz_irq_phy_setup()
2459 while (phy--) in ksz_irq_phy_setup()
2460 if (BIT(phy) & ds->phys_mii_mask) in ksz_irq_phy_setup()
2461 irq_dispose_mapping(ds->user_mii_bus->irq[phy]); in ksz_irq_phy_setup()
2467 * ksz_irq_phy_free - Release IRQ mappings for PHYs in the KSZ device.
2470 * Releases any IRQ mappings previously assigned to active PHYs in the KSZ
2475 struct dsa_switch *ds = dev->ds; in ksz_irq_phy_free()
2479 if (BIT(phy) & ds->phys_mii_mask) in ksz_irq_phy_free()
2480 irq_dispose_mapping(ds->user_mii_bus->irq[phy]); in ksz_irq_phy_free()
2484 * ksz_parse_dt_phy_config - Parse and validate PHY configuration from DT
2491 * `phy-handle` properties are correctly set and that the internal PHYs match
2506 dsa_switch_for_each_user_port(dp, dev->ds) { in ksz_parse_dt_phy_config()
2507 if (!dev->info->internal_phy[dp->index]) in ksz_parse_dt_phy_config()
2510 phy_node = of_parse_phandle(dp->dn, "phy-handle", 0); in ksz_parse_dt_phy_config()
2512 dev_err(dev->dev, "failed to parse phy-handle for port %d.\n", in ksz_parse_dt_phy_config()
2513 dp->index); in ksz_parse_dt_phy_config()
2520 dev_err(dev->dev, "failed to get PHY-parent node for port %d\n", in ksz_parse_dt_phy_config()
2521 dp->index); in ksz_parse_dt_phy_config()
2524 dev_err(dev->dev, "PHY-parent node mismatch for port %d, expected %pOF, got %pOF\n", in ksz_parse_dt_phy_config()
2525 dp->index, mdio_np, phy_parent_node); in ksz_parse_dt_phy_config()
2530 dev_err(dev->dev, "failed to read PHY address for port %d. Error %d\n", in ksz_parse_dt_phy_config()
2531 dp->index, ret); in ksz_parse_dt_phy_config()
2533 } else if (phy_addr != dev->phy_addr_map[dp->index]) { in ksz_parse_dt_phy_config()
2534 dev_err(dev->dev, "PHY address mismatch for port %d, expected 0x%x, got 0x%x\n", in ksz_parse_dt_phy_config()
2535 dp->index, dev->phy_addr_map[dp->index], in ksz_parse_dt_phy_config()
2539 bus->phy_mask |= BIT(phy_addr); in ksz_parse_dt_phy_config()
2548 return -EINVAL; in ksz_parse_dt_phy_config()
2554 * ksz_mdio_register - Register and configure the MDIO bus for the KSZ device.
2560 * "mdio-parent-bus" device tree property to directly manage internal PHYs.
2569 struct dsa_switch *ds = dev->ds; in ksz_mdio_register()
2574 mdio_np = of_get_child_by_name(dev->dev->of_node, "mdio"); in ksz_mdio_register()
2578 parent_bus_node = of_parse_phandle(mdio_np, "mdio-parent-bus", 0); in ksz_mdio_register()
2579 if (parent_bus_node && !dev->info->phy_side_mdio_supported) { in ksz_mdio_register()
2580 …dev_err(dev->dev, "Side MDIO bus is not supported for this HW, ignoring 'mdio-parent-bus' property… in ksz_mdio_register()
2581 ret = -EINVAL; in ksz_mdio_register()
2587 ret = -EPROBE_DEFER; in ksz_mdio_register()
2592 dev->parent_mdio_bus = parent_bus; in ksz_mdio_register()
2595 bus = devm_mdiobus_alloc(ds->dev); in ksz_mdio_register()
2597 ret = -ENOMEM; in ksz_mdio_register()
2601 if (dev->dev_ops->mdio_bus_preinit) { in ksz_mdio_register()
2602 ret = dev->dev_ops->mdio_bus_preinit(dev, !!parent_bus); in ksz_mdio_register()
2607 if (dev->dev_ops->create_phy_addr_map) { in ksz_mdio_register()
2608 ret = dev->dev_ops->create_phy_addr_map(dev, !!parent_bus); in ksz_mdio_register()
2612 for (i = 0; i < dev->info->port_cnt; i++) in ksz_mdio_register()
2613 dev->phy_addr_map[i] = i; in ksz_mdio_register()
2616 bus->priv = dev; in ksz_mdio_register()
2618 bus->read = ksz_parent_mdio_read; in ksz_mdio_register()
2619 bus->write = ksz_parent_mdio_write; in ksz_mdio_register()
2620 bus->name = "KSZ side MDIO"; in ksz_mdio_register()
2621 snprintf(bus->id, MII_BUS_ID_SIZE, "ksz-side-mdio-%d", in ksz_mdio_register()
2622 ds->index); in ksz_mdio_register()
2624 bus->read = ksz_sw_mdio_read; in ksz_mdio_register()
2625 bus->write = ksz_sw_mdio_write; in ksz_mdio_register()
2626 bus->name = "ksz user smi"; in ksz_mdio_register()
2627 if (ds->dst->index != 0) { in ksz_mdio_register()
2628 snprintf(bus->id, MII_BUS_ID_SIZE, "SMI-%d-%d", ds->dst->index, ds->index); in ksz_mdio_register()
2630 snprintf(bus->id, MII_BUS_ID_SIZE, "SMI-%d", ds->index); in ksz_mdio_register()
2638 ds->phys_mii_mask = bus->phy_mask; in ksz_mdio_register()
2639 bus->parent = ds->dev; in ksz_mdio_register()
2641 ds->user_mii_bus = bus; in ksz_mdio_register()
2643 if (dev->irq > 0) { in ksz_mdio_register()
2649 ret = devm_of_mdiobus_register(ds->dev, bus, mdio_np); in ksz_mdio_register()
2651 dev_err(ds->dev, "unable to register MDIO bus %s\n", in ksz_mdio_register()
2652 bus->id); in ksz_mdio_register()
2653 if (dev->irq > 0) in ksz_mdio_register()
2668 kirq->masked |= BIT(d->hwirq); in ksz_irq_mask()
2675 kirq->masked &= ~BIT(d->hwirq); in ksz_irq_unmask()
2682 mutex_lock(&kirq->dev->lock_irq); in ksz_irq_bus_lock()
2688 struct ksz_device *dev = kirq->dev; in ksz_irq_bus_sync_unlock()
2691 ret = ksz_write8(dev, kirq->reg_mask, kirq->masked); in ksz_irq_bus_sync_unlock()
2693 dev_err(dev->dev, "failed to change IRQ mask\n"); in ksz_irq_bus_sync_unlock()
2695 mutex_unlock(&dev->lock_irq); in ksz_irq_bus_sync_unlock()
2699 .name = "ksz-irq",
2709 irq_set_chip_data(irq, d->host_data); in ksz_irq_domain_map()
2725 free_irq(kirq->irq_num, kirq); in ksz_irq_free()
2727 for (irq = 0; irq < kirq->nirqs; irq++) { in ksz_irq_free()
2728 virq = irq_find_mapping(kirq->domain, irq); in ksz_irq_free()
2732 irq_domain_remove(kirq->domain); in ksz_irq_free()
2745 dev = kirq->dev; in ksz_irq_thread_fn()
2748 ret = ksz_read8(dev, kirq->reg_status, &data); in ksz_irq_thread_fn()
2752 for (n = 0; n < kirq->nirqs; ++n) { in ksz_irq_thread_fn()
2754 sub_irq = irq_find_mapping(kirq->domain, n); in ksz_irq_thread_fn()
2767 kirq->dev = dev; in ksz_irq_common_setup()
2768 kirq->masked = ~0; in ksz_irq_common_setup()
2770 kirq->domain = irq_domain_add_simple(dev->dev->of_node, kirq->nirqs, 0, in ksz_irq_common_setup()
2772 if (!kirq->domain) in ksz_irq_common_setup()
2773 return -ENOMEM; in ksz_irq_common_setup()
2775 for (n = 0; n < kirq->nirqs; n++) in ksz_irq_common_setup()
2776 irq_create_mapping(kirq->domain, n); in ksz_irq_common_setup()
2778 ret = request_threaded_irq(kirq->irq_num, NULL, ksz_irq_thread_fn, in ksz_irq_common_setup()
2779 IRQF_ONESHOT, kirq->name, kirq); in ksz_irq_common_setup()
2793 struct ksz_irq *girq = &dev->girq; in ksz_girq_setup()
2795 girq->nirqs = dev->info->port_cnt; in ksz_girq_setup()
2796 girq->reg_mask = REG_SW_PORT_INT_MASK__1; in ksz_girq_setup()
2797 girq->reg_status = REG_SW_PORT_INT_STATUS__1; in ksz_girq_setup()
2798 snprintf(girq->name, sizeof(girq->name), "global_port_irq"); in ksz_girq_setup()
2800 girq->irq_num = dev->irq; in ksz_girq_setup()
2807 struct ksz_irq *pirq = &dev->ports[p].pirq; in ksz_pirq_setup()
2809 pirq->nirqs = dev->info->port_nirqs; in ksz_pirq_setup()
2810 pirq->reg_mask = dev->dev_ops->get_port_addr(p, REG_PORT_INT_MASK); in ksz_pirq_setup()
2811 pirq->reg_status = dev->dev_ops->get_port_addr(p, REG_PORT_INT_STATUS); in ksz_pirq_setup()
2812 snprintf(pirq->name, sizeof(pirq->name), "port_irq-%d", p); in ksz_pirq_setup()
2814 pirq->irq_num = irq_find_mapping(dev->girq.domain, p); in ksz_pirq_setup()
2815 if (pirq->irq_num < 0) in ksz_pirq_setup()
2816 return pirq->irq_num; in ksz_pirq_setup()
2825 struct ksz_device *dev = ds->priv; in ksz_setup()
2831 regs = dev->info->regs; in ksz_setup()
2833 dev->vlan_cache = devm_kcalloc(dev->dev, sizeof(struct vlan_table), in ksz_setup()
2834 dev->info->num_vlans, GFP_KERNEL); in ksz_setup()
2835 if (!dev->vlan_cache) in ksz_setup()
2836 return -ENOMEM; in ksz_setup()
2838 ret = dev->dev_ops->reset(dev); in ksz_setup()
2840 dev_err(ds->dev, "failed to reset switch\n"); in ksz_setup()
2854 dev->dev_ops->config_cpu_port(ds); in ksz_setup()
2856 dev->dev_ops->enable_stp_addr(dev); in ksz_setup()
2858 ds->num_tx_queues = dev->info->num_tx_queues; in ksz_setup()
2865 ds->configure_vlan_while_not_filtering = false; in ksz_setup()
2866 ds->dscp_prio_mapping_is_global = true; in ksz_setup()
2868 if (dev->dev_ops->setup) { in ksz_setup()
2869 ret = dev->dev_ops->setup(ds); in ksz_setup()
2879 p = &dev->ports[dev->cpu_port]; in ksz_setup()
2880 p->learning = true; in ksz_setup()
2882 if (dev->irq > 0) { in ksz_setup()
2887 dsa_switch_for_each_user_port(dp, dev->ds) { in ksz_setup()
2888 ret = ksz_pirq_setup(dev, dp->index); in ksz_setup()
2892 if (dev->info->ptp_capable) { in ksz_setup()
2893 ret = ksz_ptp_irq_setup(ds, dp->index); in ksz_setup()
2900 if (dev->info->ptp_capable) { in ksz_setup()
2903 dev_err(dev->dev, "Failed to register PTP clock: %d\n", in ksz_setup()
2911 dev_err(dev->dev, "failed to register the mdio"); in ksz_setup()
2926 if (dev->info->ptp_capable) in ksz_setup()
2929 if (dev->irq > 0 && dev->info->ptp_capable) in ksz_setup()
2930 dsa_switch_for_each_user_port(dp, dev->ds) in ksz_setup()
2931 ksz_ptp_irq_free(ds, dp->index); in ksz_setup()
2933 if (dev->irq > 0) in ksz_setup()
2934 dsa_switch_for_each_user_port(dp, dev->ds) in ksz_setup()
2935 ksz_irq_free(&dev->ports[dp->index].pirq); in ksz_setup()
2937 if (dev->irq > 0) in ksz_setup()
2938 ksz_irq_free(&dev->girq); in ksz_setup()
2945 struct ksz_device *dev = ds->priv; in ksz_teardown()
2948 if (dev->info->ptp_capable) in ksz_teardown()
2951 if (dev->irq > 0) { in ksz_teardown()
2952 dsa_switch_for_each_user_port(dp, dev->ds) { in ksz_teardown()
2953 if (dev->info->ptp_capable) in ksz_teardown()
2954 ksz_ptp_irq_free(ds, dp->index); in ksz_teardown()
2956 ksz_irq_free(&dev->ports[dp->index].pirq); in ksz_teardown()
2959 ksz_irq_free(&dev->girq); in ksz_teardown()
2962 if (dev->dev_ops->teardown) in ksz_teardown()
2963 dev->dev_ops->teardown(ds); in ksz_teardown()
2968 struct ksz_port_mib *mib = &dev->ports[port].mib; in port_r_cnt()
2972 while (mib->cnt_ptr < dev->info->reg_mib_cnt) { in port_r_cnt()
2973 dev->dev_ops->r_mib_cnt(dev, port, mib->cnt_ptr, in port_r_cnt()
2974 &mib->counters[mib->cnt_ptr]); in port_r_cnt()
2975 ++mib->cnt_ptr; in port_r_cnt()
2979 dropped = &mib->counters[dev->info->mib_cnt]; in port_r_cnt()
2982 while (mib->cnt_ptr < dev->info->mib_cnt) { in port_r_cnt()
2983 dev->dev_ops->r_mib_pkt(dev, port, mib->cnt_ptr, in port_r_cnt()
2984 dropped, &mib->counters[mib->cnt_ptr]); in port_r_cnt()
2985 ++mib->cnt_ptr; in port_r_cnt()
2987 mib->cnt_ptr = 0; in port_r_cnt()
2998 for (i = 0; i < dev->info->port_cnt; i++) { in ksz_mib_read_work()
2999 if (dsa_is_unused_port(dev->ds, i)) in ksz_mib_read_work()
3002 p = &dev->ports[i]; in ksz_mib_read_work()
3003 mib = &p->mib; in ksz_mib_read_work()
3004 mutex_lock(&mib->cnt_mutex); in ksz_mib_read_work()
3009 if (!p->read) { in ksz_mib_read_work()
3010 const struct dsa_port *dp = dsa_to_port(dev->ds, i); in ksz_mib_read_work()
3012 if (!netif_carrier_ok(dp->user)) in ksz_mib_read_work()
3013 mib->cnt_ptr = dev->info->reg_mib_cnt; in ksz_mib_read_work()
3016 p->read = false; in ksz_mib_read_work()
3018 if (dev->dev_ops->r_mib_stat64) in ksz_mib_read_work()
3019 dev->dev_ops->r_mib_stat64(dev, i); in ksz_mib_read_work()
3021 mutex_unlock(&mib->cnt_mutex); in ksz_mib_read_work()
3024 schedule_delayed_work(&dev->mib_read, dev->mib_read_interval); in ksz_mib_read_work()
3031 INIT_DELAYED_WORK(&dev->mib_read, ksz_mib_read_work); in ksz_init_mib_timer()
3033 for (i = 0; i < dev->info->port_cnt; i++) { in ksz_init_mib_timer()
3034 struct ksz_port_mib *mib = &dev->ports[i].mib; in ksz_init_mib_timer()
3036 dev->dev_ops->port_init_cnt(dev, i); in ksz_init_mib_timer()
3038 mib->cnt_ptr = 0; in ksz_init_mib_timer()
3039 memset(mib->counters, 0, dev->info->mib_cnt * sizeof(u64)); in ksz_init_mib_timer()
3045 struct ksz_device *dev = ds->priv; in ksz_phy_read16()
3049 ret = dev->dev_ops->r_phy(dev, addr, reg, &val); in ksz_phy_read16()
3058 struct ksz_device *dev = ds->priv; in ksz_phy_write16()
3061 ret = dev->dev_ops->w_phy(dev, addr, reg, val); in ksz_phy_write16()
3070 struct ksz_device *dev = ds->priv; in ksz_get_phy_flags()
3072 switch (dev->chip_id) { in ksz_get_phy_flags()
3075 * Port 1 does not work with LinkMD Cable-Testing. in ksz_get_phy_flags()
3091 struct ksz_device *dev = dp->ds->priv; in ksz_phylink_mac_link_down()
3094 dev->ports[dp->index].read = true; in ksz_phylink_mac_link_down()
3096 if (dev->mib_read_interval) in ksz_phylink_mac_link_down()
3097 schedule_delayed_work(&dev->mib_read, 0); in ksz_phylink_mac_link_down()
3102 struct ksz_device *dev = ds->priv; in ksz_sset_count()
3107 return dev->info->mib_cnt; in ksz_sset_count()
3114 struct ksz_device *dev = ds->priv; in ksz_get_ethtool_stats()
3117 mib = &dev->ports[port].mib; in ksz_get_ethtool_stats()
3118 mutex_lock(&mib->cnt_mutex); in ksz_get_ethtool_stats()
3121 if (!netif_carrier_ok(dp->user)) in ksz_get_ethtool_stats()
3122 mib->cnt_ptr = dev->info->reg_mib_cnt; in ksz_get_ethtool_stats()
3124 memcpy(buf, mib->counters, dev->info->mib_cnt * sizeof(u64)); in ksz_get_ethtool_stats()
3125 mutex_unlock(&mib->cnt_mutex); in ksz_get_ethtool_stats()
3150 struct ksz_device *dev = ds->priv; in ksz_port_fast_age()
3152 dev->dev_ops->flush_dyn_mac_table(dev, port); in ksz_port_fast_age()
3157 struct ksz_device *dev = ds->priv; in ksz_set_ageing_time()
3159 if (!dev->dev_ops->set_ageing_time) in ksz_set_ageing_time()
3160 return -EOPNOTSUPP; in ksz_set_ageing_time()
3162 return dev->dev_ops->set_ageing_time(dev, msecs); in ksz_set_ageing_time()
3169 struct ksz_device *dev = ds->priv; in ksz_port_fdb_add()
3171 if (!dev->dev_ops->fdb_add) in ksz_port_fdb_add()
3172 return -EOPNOTSUPP; in ksz_port_fdb_add()
3174 return dev->dev_ops->fdb_add(dev, port, addr, vid, db); in ksz_port_fdb_add()
3181 struct ksz_device *dev = ds->priv; in ksz_port_fdb_del()
3183 if (!dev->dev_ops->fdb_del) in ksz_port_fdb_del()
3184 return -EOPNOTSUPP; in ksz_port_fdb_del()
3186 return dev->dev_ops->fdb_del(dev, port, addr, vid, db); in ksz_port_fdb_del()
3192 struct ksz_device *dev = ds->priv; in ksz_port_fdb_dump()
3194 if (!dev->dev_ops->fdb_dump) in ksz_port_fdb_dump()
3195 return -EOPNOTSUPP; in ksz_port_fdb_dump()
3197 return dev->dev_ops->fdb_dump(dev, port, cb, data); in ksz_port_fdb_dump()
3204 struct ksz_device *dev = ds->priv; in ksz_port_mdb_add()
3206 if (!dev->dev_ops->mdb_add) in ksz_port_mdb_add()
3207 return -EOPNOTSUPP; in ksz_port_mdb_add()
3209 return dev->dev_ops->mdb_add(dev, port, mdb, db); in ksz_port_mdb_add()
3216 struct ksz_device *dev = ds->priv; in ksz_port_mdb_del()
3218 if (!dev->dev_ops->mdb_del) in ksz_port_mdb_del()
3219 return -EOPNOTSUPP; in ksz_port_mdb_del()
3221 return dev->dev_ops->mdb_del(dev, port, mdb, db); in ksz_port_mdb_del()
3230 for (ipm = 0; ipm < dev->info->num_ipms; ipm++) { in ksz9477_set_default_prio_queue_mapping()
3237 queue = ieee8021q_tt_to_tc(ipm, dev->info->num_tx_queues); in ksz9477_set_default_prio_queue_mapping()
3249 struct ksz_device *dev = ds->priv; in ksz_port_setup()
3256 dev->dev_ops->port_setup(dev, port, false); in ksz_port_setup()
3273 struct ksz_device *dev = ds->priv; in ksz_port_stp_state_set()
3278 regs = dev->info->regs; in ksz_port_stp_state_set()
3283 p = &dev->ports[port]; in ksz_port_stp_state_set()
3294 if (!p->learning) in ksz_port_stp_state_set()
3299 if (!p->learning) in ksz_port_stp_state_set()
3306 dev_err(ds->dev, "invalid STP state: %d\n", state); in ksz_port_stp_state_set()
3312 p->stp_state = state; in ksz_port_stp_state_set()
3319 struct ksz_device *dev = ds->priv; in ksz_port_teardown()
3321 switch (dev->chip_id) { in ksz_port_teardown()
3341 return -EINVAL; in ksz_port_pre_bridge_flags()
3350 struct ksz_device *dev = ds->priv; in ksz_port_bridge_flags()
3351 struct ksz_port *p = &dev->ports[port]; in ksz_port_bridge_flags()
3355 p->learning = !!(flags.val & BR_LEARNING); in ksz_port_bridge_flags()
3358 p->isolated = !!(flags.val & BR_ISOLATED); in ksz_port_bridge_flags()
3361 ksz_port_stp_state_set(ds, port, p->stp_state); in ksz_port_bridge_flags()
3371 struct ksz_device *dev = ds->priv; in ksz_get_tag_protocol()
3377 if (dev->chip_id == KSZ88X3_CHIP_ID || in ksz_get_tag_protocol()
3378 dev->chip_id == KSZ8563_CHIP_ID || in ksz_get_tag_protocol()
3379 dev->chip_id == KSZ9893_CHIP_ID || in ksz_get_tag_protocol()
3380 dev->chip_id == KSZ9563_CHIP_ID) in ksz_get_tag_protocol()
3383 if (dev->chip_id == KSZ8567_CHIP_ID || in ksz_get_tag_protocol()
3384 dev->chip_id == KSZ9477_CHIP_ID || in ksz_get_tag_protocol()
3385 dev->chip_id == KSZ9896_CHIP_ID || in ksz_get_tag_protocol()
3386 dev->chip_id == KSZ9897_CHIP_ID || in ksz_get_tag_protocol()
3387 dev->chip_id == KSZ9567_CHIP_ID || in ksz_get_tag_protocol()
3388 dev->chip_id == LAN9646_CHIP_ID) in ksz_get_tag_protocol()
3409 tagger_data->xmit_work_fn = ksz_port_deferred_xmit; in ksz_connect_tag_protocol()
3412 return -EPROTONOSUPPORT; in ksz_connect_tag_protocol()
3419 struct ksz_device *dev = ds->priv; in ksz_port_vlan_filtering()
3421 if (!dev->dev_ops->vlan_filtering) in ksz_port_vlan_filtering()
3422 return -EOPNOTSUPP; in ksz_port_vlan_filtering()
3424 return dev->dev_ops->vlan_filtering(dev, port, flag, extack); in ksz_port_vlan_filtering()
3431 struct ksz_device *dev = ds->priv; in ksz_port_vlan_add()
3433 if (!dev->dev_ops->vlan_add) in ksz_port_vlan_add()
3434 return -EOPNOTSUPP; in ksz_port_vlan_add()
3436 return dev->dev_ops->vlan_add(dev, port, vlan, extack); in ksz_port_vlan_add()
3442 struct ksz_device *dev = ds->priv; in ksz_port_vlan_del()
3444 if (!dev->dev_ops->vlan_del) in ksz_port_vlan_del()
3445 return -EOPNOTSUPP; in ksz_port_vlan_del()
3447 return dev->dev_ops->vlan_del(dev, port, vlan); in ksz_port_vlan_del()
3454 struct ksz_device *dev = ds->priv; in ksz_port_mirror_add()
3456 if (!dev->dev_ops->mirror_add) in ksz_port_mirror_add()
3457 return -EOPNOTSUPP; in ksz_port_mirror_add()
3459 return dev->dev_ops->mirror_add(dev, port, mirror, ingress, extack); in ksz_port_mirror_add()
3465 struct ksz_device *dev = ds->priv; in ksz_port_mirror_del()
3467 if (dev->dev_ops->mirror_del) in ksz_port_mirror_del()
3468 dev->dev_ops->mirror_del(dev, port, mirror); in ksz_port_mirror_del()
3473 struct ksz_device *dev = ds->priv; in ksz_change_mtu()
3475 if (!dev->dev_ops->change_mtu) in ksz_change_mtu()
3476 return -EOPNOTSUPP; in ksz_change_mtu()
3478 return dev->dev_ops->change_mtu(dev, port, mtu); in ksz_change_mtu()
3483 struct ksz_device *dev = ds->priv; in ksz_max_mtu()
3485 switch (dev->chip_id) { in ksz_max_mtu()
3489 return KSZ8795_HUGE_PACKET_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN; in ksz_max_mtu()
3493 return KSZ8863_HUGE_PACKET_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN; in ksz_max_mtu()
3508 return KSZ9477_MAX_FRAME_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN; in ksz_max_mtu()
3511 return -EOPNOTSUPP; in ksz_max_mtu()
3515 * ksz_support_eee - Determine Energy Efficient Ethernet (EEE) support for a
3530 struct ksz_device *dev = ds->priv; in ksz_support_eee()
3532 if (!dev->info->internal_phy[port]) in ksz_support_eee()
3535 switch (dev->chip_id) { in ksz_support_eee()
3559 * controls. If not disabled, the PHY ports can auto-negotiate in ksz_support_eee()
3574 struct ksz_device *dev = ds->priv; in ksz_set_mac_eee()
3576 if (!e->tx_lpi_enabled) { in ksz_set_mac_eee()
3577 dev_err(dev->dev, "Disabling EEE Tx LPI is not supported\n"); in ksz_set_mac_eee()
3578 return -EINVAL; in ksz_set_mac_eee()
3581 if (e->tx_lpi_timer) { in ksz_set_mac_eee()
3582 dev_err(dev->dev, "Setting EEE Tx LPI timer is not supported\n"); in ksz_set_mac_eee()
3583 return -EINVAL; in ksz_set_mac_eee()
3592 const u8 *bitval = dev->info->xmii_ctrl1; in ksz_set_xmii()
3593 struct ksz_port *p = &dev->ports[port]; in ksz_set_xmii()
3594 const u16 *regs = dev->info->regs; in ksz_set_xmii()
3617 /* On KSZ9893, disable RGMII in-band status support */ in ksz_set_xmii()
3618 if (dev->chip_id == KSZ9893_CHIP_ID || in ksz_set_xmii()
3619 dev->chip_id == KSZ8563_CHIP_ID || in ksz_set_xmii()
3620 dev->chip_id == KSZ9563_CHIP_ID || in ksz_set_xmii()
3625 dev_err(dev->dev, "Unsupported interface '%s' for port %d\n", in ksz_set_xmii()
3630 if (p->rgmii_tx_val) in ksz_set_xmii()
3633 if (p->rgmii_rx_val) in ksz_set_xmii()
3642 const u8 *bitval = dev->info->xmii_ctrl1; in ksz_get_xmii()
3643 const u16 *regs = dev->info->regs; in ksz_get_xmii()
3678 struct ksz_device *dev = dp->ds->priv; in ksz88x3_phylink_mac_config()
3680 dev->ports[dp->index].manual_flow = !(state->pause & MLO_PAUSE_AN); in ksz88x3_phylink_mac_config()
3688 struct ksz_device *dev = dp->ds->priv; in ksz_phylink_mac_config()
3689 int port = dp->index; in ksz_phylink_mac_config()
3692 if (dev->info->internal_phy[port]) in ksz_phylink_mac_config()
3696 dev_err(dev->dev, "In-band AN not supported!\n"); in ksz_phylink_mac_config()
3700 ksz_set_xmii(dev, port, state->interface); in ksz_phylink_mac_config()
3702 if (dev->dev_ops->setup_rgmii_delay) in ksz_phylink_mac_config()
3703 dev->dev_ops->setup_rgmii_delay(dev, port); in ksz_phylink_mac_config()
3708 const u8 *bitval = dev->info->xmii_ctrl1; in ksz_get_gbit()
3709 const u16 *regs = dev->info->regs; in ksz_get_gbit()
3726 const u8 *bitval = dev->info->xmii_ctrl1; in ksz_set_gbit()
3727 const u16 *regs = dev->info->regs; in ksz_set_gbit()
3745 const u8 *bitval = dev->info->xmii_ctrl0; in ksz_set_100_10mbit()
3746 const u16 *regs = dev->info->regs; in ksz_set_100_10mbit()
3776 const u8 *bitval = dev->info->xmii_ctrl0; in ksz_duplex_flowctrl()
3777 const u32 *masks = dev->info->masks; in ksz_duplex_flowctrl()
3778 const u16 *regs = dev->info->regs; in ksz_duplex_flowctrl()
3807 struct ksz_device *dev = dp->ds->priv; in ksz9477_phylink_mac_link_up()
3808 int port = dp->index; in ksz9477_phylink_mac_link_up()
3811 p = &dev->ports[port]; in ksz9477_phylink_mac_link_up()
3814 if (dev->info->internal_phy[port]) in ksz9477_phylink_mac_link_up()
3817 p->phydev.speed = speed; in ksz9477_phylink_mac_link_up()
3844 dev->chip_id = KSZ8795_CHIP_ID; in ksz_switch_detect()
3848 dev->chip_id = KSZ8765_CHIP_ID; in ksz_switch_detect()
3850 dev->chip_id = KSZ8794_CHIP_ID; in ksz_switch_detect()
3852 return -ENODEV; in ksz_switch_detect()
3857 dev->chip_id = KSZ88X3_CHIP_ID; in ksz_switch_detect()
3859 return -ENODEV; in ksz_switch_detect()
3864 dev->chip_id = KSZ8895_CHIP_ID; in ksz_switch_detect()
3866 return -ENODEV; in ksz_switch_detect()
3871 dev->chip_id = KSZ8864_CHIP_ID; in ksz_switch_detect()
3878 dev->chip_rev = FIELD_GET(SW_REV_ID_M, id32); in ksz_switch_detect()
3894 if (dev->chip_id != LAN9646_CHIP_ID) in ksz_switch_detect()
3895 dev->chip_id = id32; in ksz_switch_detect()
3904 dev->chip_id = KSZ8563_CHIP_ID; in ksz_switch_detect()
3906 dev->chip_id = KSZ9563_CHIP_ID; in ksz_switch_detect()
3908 dev->chip_id = KSZ9893_CHIP_ID; in ksz_switch_detect()
3912 dev_err(dev->dev, in ksz_switch_detect()
3914 return -ENODEV; in ksz_switch_detect()
3923 struct ksz_device *dev = ds->priv; in ksz_cls_flower_add()
3925 switch (dev->chip_id) { in ksz_cls_flower_add()
3938 return -EOPNOTSUPP; in ksz_cls_flower_add()
3944 struct ksz_device *dev = ds->priv; in ksz_cls_flower_del()
3946 switch (dev->chip_id) { in ksz_cls_flower_del()
3959 return -EOPNOTSUPP; in ksz_cls_flower_del()
3963 * is converted to Hex-decimal using the successive multiplication method. On
3974 txrate = idle_slope - send_slope; in cinc_cal()
3977 return -EINVAL; in cinc_cal()
4008 struct ksz_device *dev = ds->priv; in ksz_setup_tc_cbs()
4012 if (!dev->info->tc_cbs_supported) in ksz_setup_tc_cbs()
4013 return -EOPNOTSUPP; in ksz_setup_tc_cbs()
4015 if (qopt->queue > dev->info->num_tx_queues) in ksz_setup_tc_cbs()
4016 return -EINVAL; in ksz_setup_tc_cbs()
4019 ret = ksz_pwrite32(dev, port, REG_PORT_MTI_QUEUE_INDEX__4, qopt->queue); in ksz_setup_tc_cbs()
4023 if (!qopt->enable) in ksz_setup_tc_cbs()
4027 /* High Credit */ in ksz_setup_tc_cbs()
4029 qopt->hicredit); in ksz_setup_tc_cbs()
4035 qopt->locredit); in ksz_setup_tc_cbs()
4040 ret = cinc_cal(qopt->idleslope, qopt->sendslope, &bw); in ksz_setup_tc_cbs()
4044 if (dev->dev_ops->tc_cbs_set_cinc) { in ksz_setup_tc_cbs()
4045 ret = dev->dev_ops->tc_cbs_set_cinc(dev, port, bw); in ksz_setup_tc_cbs()
4061 for (queue = 0; queue < dev->info->num_tx_queues; queue++) { in ksz_disable_egress_rate_limit()
4078 return p->bands - 1 - band; in ksz_ets_band_to_queue()
4127 for (band = 0; band < p->bands; band++) { in ksz_tc_ets_add()
4139 for (tc_prio = 0; tc_prio < ARRAY_SIZE(p->priomap); tc_prio++) { in ksz_tc_ets_add()
4142 if (tc_prio >= dev->info->num_ipms) in ksz_tc_ets_add()
4145 queue = ksz_ets_band_to_queue(p, p->priomap[tc_prio]); in ksz_tc_ets_add()
4159 for (queue = 0; queue < dev->info->num_tx_queues; queue++) { in ksz_tc_ets_del()
4166 /* Revert the queue mapping for TC-priority to its default setting on in ksz_tc_ets_del()
4180 if (p->bands != dev->info->num_tx_queues) { in ksz_tc_ets_validate()
4181 dev_err(dev->dev, "Not supported amount of bands. It should be %d\n", in ksz_tc_ets_validate()
4182 dev->info->num_tx_queues); in ksz_tc_ets_validate()
4183 return -EOPNOTSUPP; in ksz_tc_ets_validate()
4186 for (band = 0; band < p->bands; ++band) { in ksz_tc_ets_validate()
4199 if (p->quanta[band]) { in ksz_tc_ets_validate()
4200 dev_err(dev->dev, "Quanta/weights configuration is not supported.\n"); in ksz_tc_ets_validate()
4201 return -EOPNOTSUPP; in ksz_tc_ets_validate()
4211 struct ksz_device *dev = ds->priv; in ksz_tc_setup_qdisc_ets()
4215 return -EOPNOTSUPP; in ksz_tc_setup_qdisc_ets()
4217 if (qopt->parent != TC_H_ROOT) { in ksz_tc_setup_qdisc_ets()
4218 dev_err(dev->dev, "Parent should be \"root\"\n"); in ksz_tc_setup_qdisc_ets()
4219 return -EOPNOTSUPP; in ksz_tc_setup_qdisc_ets()
4222 switch (qopt->command) { in ksz_tc_setup_qdisc_ets()
4224 ret = ksz_tc_ets_validate(dev, port, &qopt->replace_params); in ksz_tc_setup_qdisc_ets()
4228 return ksz_tc_ets_add(dev, port, &qopt->replace_params); in ksz_tc_setup_qdisc_ets()
4233 return -EOPNOTSUPP; in ksz_tc_setup_qdisc_ets()
4236 return -EOPNOTSUPP; in ksz_tc_setup_qdisc_ets()
4248 return -EOPNOTSUPP; in ksz_setup_tc()
4253 * ksz_handle_wake_reason - Handle wake reason on a specified port.
4257 * This function reads the PME (Power Management Event) status register of a
4260 * "Magic Packet", "Link Up", or "Energy Detect" event. The PME status register
4267 const struct ksz_dev_ops *ops = dev->dev_ops; in ksz_handle_wake_reason()
4268 const u16 *regs = dev->info->regs; in ksz_handle_wake_reason()
4272 ret = ops->pme_pread8(dev, port, regs[REG_PORT_PME_STATUS], in ksz_handle_wake_reason()
4280 dev_dbg(dev->dev, "Wake event on port %d due to:%s%s%s\n", port, in ksz_handle_wake_reason()
4285 return ops->pme_pwrite8(dev, port, regs[REG_PORT_PME_STATUS], in ksz_handle_wake_reason()
4290 * ksz_get_wol - Get Wake-on-LAN settings for a specified port.
4293 * @wol: Pointer to ethtool Wake-on-LAN settings structure.
4295 * This function checks the device PME wakeup_source flag and chip_id.
4296 * If enabled and supported, it sets the supported and active WoL
4302 struct ksz_device *dev = ds->priv; in ksz_get_wol()
4303 const u16 *regs = dev->info->regs; in ksz_get_wol()
4310 if (!dev->wakeup_source) in ksz_get_wol()
4313 wol->supported = WAKE_PHY; in ksz_get_wol()
4319 if (ksz_is_port_mac_global_usable(dev->ds, port)) in ksz_get_wol()
4320 wol->supported |= WAKE_MAGIC; in ksz_get_wol()
4322 ret = dev->dev_ops->pme_pread8(dev, port, regs[REG_PORT_PME_CTRL], in ksz_get_wol()
4328 wol->wolopts |= WAKE_MAGIC; in ksz_get_wol()
4330 wol->wolopts |= WAKE_PHY; in ksz_get_wol()
4334 * ksz_set_wol - Set Wake-on-LAN settings for a specified port.
4337 * @wol: Pointer to ethtool Wake-on-LAN settings structure.
4339 * This function configures Wake-on-LAN (WoL) settings for a specified
4340 * port. It validates the provided WoL options, checks if PME is
4342 * the Magic Packet flag in the port's PME control register if
4351 struct ksz_device *dev = ds->priv; in ksz_set_wol()
4352 const u16 *regs = dev->info->regs; in ksz_set_wol()
4357 if (wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC)) in ksz_set_wol()
4358 return -EINVAL; in ksz_set_wol()
4361 return -EOPNOTSUPP; in ksz_set_wol()
4363 if (!dev->wakeup_source) in ksz_set_wol()
4364 return -EOPNOTSUPP; in ksz_set_wol()
4370 if (wol->wolopts & WAKE_MAGIC) in ksz_set_wol()
4372 if (wol->wolopts & WAKE_PHY) in ksz_set_wol()
4375 ret = dev->dev_ops->pme_pread8(dev, port, regs[REG_PORT_PME_CTRL], in ksz_set_wol()
4392 ret = ksz_switch_macaddr_get(dev->ds, port, NULL); in ksz_set_wol()
4396 ksz_switch_macaddr_put(dev->ds); in ksz_set_wol()
4399 ret = dev->dev_ops->pme_pwrite8(dev, port, regs[REG_PORT_PME_CTRL], in ksz_set_wol()
4403 ksz_switch_macaddr_put(dev->ds); in ksz_set_wol()
4411 * ksz_wol_pre_shutdown - Prepares the switch device for shutdown while
4412 * considering Wake-on-LAN (WoL) settings.
4418 * into account the Wake-on-LAN (WoL) settings on the user ports. It updates
4419 * the wol_enabled flag accordingly to reflect whether WoL is active on any
4424 const struct ksz_dev_ops *ops = dev->dev_ops; in ksz_wol_pre_shutdown()
4425 const u16 *regs = dev->info->regs; in ksz_wol_pre_shutdown()
4435 if (!dev->wakeup_source) in ksz_wol_pre_shutdown()
4438 dsa_switch_for_each_user_port(dp, dev->ds) { in ksz_wol_pre_shutdown()
4441 ret = ops->pme_pread8(dev, dp->index, in ksz_wol_pre_shutdown()
4449 ksz_handle_wake_reason(dev, dp->index); in ksz_wol_pre_shutdown()
4452 /* Now we are save to enable PME pin. */ in ksz_wol_pre_shutdown()
4454 if (dev->pme_active_high) in ksz_wol_pre_shutdown()
4456 ops->pme_write8(dev, regs[REG_SW_PME_CTRL], pme_pin_en); in ksz_wol_pre_shutdown()
4468 if (dp->hsr_dev) { in ksz_port_set_mac_address()
4469 dev_err(ds->dev, in ksz_port_set_mac_address()
4470 "Cannot change MAC address on port %d with active HSR offload\n", in ksz_port_set_mac_address()
4472 return -EBUSY; in ksz_port_set_mac_address()
4480 ksz_get_wol(ds, dp->index, &wol); in ksz_port_set_mac_address()
4482 dev_err(ds->dev, in ksz_port_set_mac_address()
4483 "Cannot change MAC address on port %d with active Wake on Magic Packet\n", in ksz_port_set_mac_address()
4485 return -EBUSY; in ksz_port_set_mac_address()
4492 * ksz_is_port_mac_global_usable - Check if the MAC address on a given port
4505 struct net_device *user = dsa_to_port(ds, port)->user; in ksz_is_port_mac_global_usable()
4506 const unsigned char *addr = user->dev_addr; in ksz_is_port_mac_global_usable()
4508 struct ksz_device *dev = ds->priv; in ksz_is_port_mac_global_usable()
4512 switch_macaddr = dev->switch_macaddr; in ksz_is_port_mac_global_usable()
4513 if (switch_macaddr && !ether_addr_equal(switch_macaddr->addr, addr)) in ksz_is_port_mac_global_usable()
4520 * ksz_switch_macaddr_get - Program the switch's MAC address register.
4527 * multiple features like HSR self-address filtering and WoL. Other user ports
4537 struct net_device *user = dsa_to_port(ds, port)->user; in ksz_switch_macaddr_get()
4538 const unsigned char *addr = user->dev_addr; in ksz_switch_macaddr_get()
4540 struct ksz_device *dev = ds->priv; in ksz_switch_macaddr_get()
4541 const u16 *regs = dev->info->regs; in ksz_switch_macaddr_get()
4547 switch_macaddr = dev->switch_macaddr; in ksz_switch_macaddr_get()
4549 if (!ether_addr_equal(switch_macaddr->addr, addr)) { in ksz_switch_macaddr_get()
4552 switch_macaddr->addr); in ksz_switch_macaddr_get()
4553 return -EBUSY; in ksz_switch_macaddr_get()
4556 refcount_inc(&switch_macaddr->refcount); in ksz_switch_macaddr_get()
4562 return -ENOMEM; in ksz_switch_macaddr_get()
4564 ether_addr_copy(switch_macaddr->addr, addr); in ksz_switch_macaddr_get()
4565 refcount_set(&switch_macaddr->refcount, 1); in ksz_switch_macaddr_get()
4566 dev->switch_macaddr = switch_macaddr; in ksz_switch_macaddr_get()
4578 dev->switch_macaddr = NULL; in ksz_switch_macaddr_get()
4579 refcount_set(&switch_macaddr->refcount, 0); in ksz_switch_macaddr_get()
4588 struct ksz_device *dev = ds->priv; in ksz_switch_macaddr_put()
4589 const u16 *regs = dev->info->regs; in ksz_switch_macaddr_put()
4595 switch_macaddr = dev->switch_macaddr; in ksz_switch_macaddr_put()
4596 if (!refcount_dec_and_test(&switch_macaddr->refcount)) in ksz_switch_macaddr_put()
4602 dev->switch_macaddr = NULL; in ksz_switch_macaddr_put()
4609 struct ksz_device *dev = ds->priv; in ksz_hsr_join()
4617 if (dev->chip_id != KSZ9477_CHIP_ID) { in ksz_hsr_join()
4619 return -EOPNOTSUPP; in ksz_hsr_join()
4623 if (dev->hsr_dev && hsr != dev->hsr_dev) { in ksz_hsr_join()
4625 return -EOPNOTSUPP; in ksz_hsr_join()
4631 return -EOPNOTSUPP; in ksz_hsr_join()
4635 if (hweight8(dev->hsr_ports) >= 2) { in ksz_hsr_join()
4637 "Cannot offload more than two ports - using software HSR"); in ksz_hsr_join()
4638 return -EOPNOTSUPP; in ksz_hsr_join()
4649 dev->hsr_dev = hsr; in ksz_hsr_join()
4650 dev->hsr_ports |= BIT(port); in ksz_hsr_join()
4658 struct ksz_device *dev = ds->priv; in ksz_hsr_leave()
4660 WARN_ON(dev->chip_id != KSZ9477_CHIP_ID); in ksz_hsr_leave()
4663 dev->hsr_ports &= ~BIT(port); in ksz_hsr_leave()
4664 if (!dev->hsr_ports) in ksz_hsr_leave()
4665 dev->hsr_dev = NULL; in ksz_hsr_leave()
4674 struct ksz_device *dev = ds->priv; in ksz_suspend()
4676 cancel_delayed_work_sync(&dev->mib_read); in ksz_suspend()
4682 struct ksz_device *dev = ds->priv; in ksz_resume()
4684 if (dev->mib_read_interval) in ksz_resume()
4685 schedule_delayed_work(&dev->mib_read, dev->mib_read_interval); in ksz_resume()
4759 ds->dev = base; in ksz_switch_alloc()
4760 ds->num_ports = DSA_MAX_PORTS; in ksz_switch_alloc()
4761 ds->ops = &ksz_switch_ops; in ksz_switch_alloc()
4767 ds->priv = swdev; in ksz_switch_alloc()
4768 swdev->dev = base; in ksz_switch_alloc()
4770 swdev->ds = ds; in ksz_switch_alloc()
4771 swdev->priv = priv; in ksz_switch_alloc()
4778 * ksz_switch_shutdown - Shutdown routine for the switch device.
4793 if (dev->dev_ops->reset && !wol_enabled) in ksz_switch_shutdown()
4794 dev->dev_ops->reset(dev); in ksz_switch_shutdown()
4796 dsa_switch_shutdown(dev->ds); in ksz_switch_shutdown()
4803 phy_interface_t phy_mode = dev->ports[port_num].interface; in ksz_parse_rgmii_delay()
4804 int rx_delay = -1, tx_delay = -1; in ksz_parse_rgmii_delay()
4809 of_property_read_u32(port_dn, "rx-internal-delay-ps", &rx_delay); in ksz_parse_rgmii_delay()
4810 of_property_read_u32(port_dn, "tx-internal-delay-ps", &tx_delay); in ksz_parse_rgmii_delay()
4812 if (rx_delay == -1 && tx_delay == -1) { in ksz_parse_rgmii_delay()
4813 dev_warn(dev->dev, in ksz_parse_rgmii_delay()
4814 "Port %d interpreting RGMII delay settings based on \"phy-mode\" property, " in ksz_parse_rgmii_delay()
4815 "please update device tree to specify \"rx-internal-delay-ps\" and " in ksz_parse_rgmii_delay()
4816 "\"tx-internal-delay-ps\"", in ksz_parse_rgmii_delay()
4833 dev->ports[port_num].rgmii_rx_val = rx_delay; in ksz_parse_rgmii_delay()
4834 dev->ports[port_num].rgmii_tx_val = tx_delay; in ksz_parse_rgmii_delay()
4838 * ksz_drive_strength_to_reg() - Convert drive strength value to corresponding
4848 * is returned. Otherwise, -EINVAL is returned indicating an invalid value.
4860 return -EINVAL; in ksz_drive_strength_to_reg()
4864 * ksz_drive_strength_error() - Report invalid drive strength value
4895 remaining_size -= added_len; in ksz_drive_strength_error()
4898 dev_err(dev->dev, "Invalid drive strength %d, supported values are %s\n", in ksz_drive_strength_error()
4903 * ksz9477_drive_strength_write() - Set the drive strength for specific KSZ9477
4910 * based on the provided properties. It handles chip-specific nuances and
4924 if (props[KSZ_DRIVER_STRENGTH_IO].value != -1) in ksz9477_drive_strength_write()
4925 dev_warn(dev->dev, "%s is not supported by this chip variant\n", in ksz9477_drive_strength_write()
4928 if (dev->chip_id == KSZ8795_CHIP_ID || in ksz9477_drive_strength_write()
4929 dev->chip_id == KSZ8794_CHIP_ID || in ksz9477_drive_strength_write()
4930 dev->chip_id == KSZ8765_CHIP_ID) in ksz9477_drive_strength_write()
4936 if (props[i].value == -1) in ksz9477_drive_strength_write()
4955 * ksz88x3_drive_strength_write() - Set the drive strength configuration for
4977 if (props[i].value == -1 || i == KSZ_DRIVER_STRENGTH_IO) in ksz88x3_drive_strength_write()
4980 dev_warn(dev->dev, "%s is not supported by this chip variant\n", in ksz88x3_drive_strength_write()
4998 * ksz_parse_drive_strength() - Extract and apply drive strength configurations
5013 .name = "microchip,hi-drive-strength-microamp", in ksz_parse_drive_strength()
5015 .value = -1, in ksz_parse_drive_strength()
5018 .name = "microchip,lo-drive-strength-microamp", in ksz_parse_drive_strength()
5020 .value = -1, in ksz_parse_drive_strength()
5023 .name = "microchip,io-drive-strength-microamp", in ksz_parse_drive_strength()
5025 .value = -1, in ksz_parse_drive_strength()
5028 struct device_node *np = dev->dev->of_node; in ksz_parse_drive_strength()
5035 if (ret && ret != -EINVAL) in ksz_parse_drive_strength()
5036 dev_warn(dev->dev, "Failed to read %s\n", in ksz_parse_drive_strength()
5047 switch (dev->chip_id) { in ksz_parse_drive_strength()
5067 if (of_props[i].value == -1) in ksz_parse_drive_strength()
5070 dev_warn(dev->dev, "%s is not supported by this chip variant\n", in ksz_parse_drive_strength()
5087 dev->reset_gpio = devm_gpiod_get_optional(dev->dev, "reset", in ksz_switch_register()
5089 if (IS_ERR(dev->reset_gpio)) in ksz_switch_register()
5090 return PTR_ERR(dev->reset_gpio); in ksz_switch_register()
5092 if (dev->reset_gpio) { in ksz_switch_register()
5093 gpiod_set_value_cansleep(dev->reset_gpio, 1); in ksz_switch_register()
5095 gpiod_set_value_cansleep(dev->reset_gpio, 0); in ksz_switch_register()
5099 mutex_init(&dev->dev_mutex); in ksz_switch_register()
5100 mutex_init(&dev->regmap_mutex); in ksz_switch_register()
5101 mutex_init(&dev->alu_mutex); in ksz_switch_register()
5102 mutex_init(&dev->vlan_mutex); in ksz_switch_register()
5108 info = ksz_lookup_info(dev->chip_id); in ksz_switch_register()
5110 return -ENODEV; in ksz_switch_register()
5113 dev->info = info; in ksz_switch_register()
5115 dev_info(dev->dev, "found switch: %s, rev %i\n", in ksz_switch_register()
5116 dev->info->dev_name, dev->chip_rev); in ksz_switch_register()
5122 dev->dev_ops = dev->info->ops; in ksz_switch_register()
5124 ret = dev->dev_ops->init(dev); in ksz_switch_register()
5128 dev->ports = devm_kzalloc(dev->dev, in ksz_switch_register()
5129 dev->info->port_cnt * sizeof(struct ksz_port), in ksz_switch_register()
5131 if (!dev->ports) in ksz_switch_register()
5132 return -ENOMEM; in ksz_switch_register()
5134 for (i = 0; i < dev->info->port_cnt; i++) { in ksz_switch_register()
5135 spin_lock_init(&dev->ports[i].mib.stats64_lock); in ksz_switch_register()
5136 mutex_init(&dev->ports[i].mib.cnt_mutex); in ksz_switch_register()
5137 dev->ports[i].mib.counters = in ksz_switch_register()
5138 devm_kzalloc(dev->dev, in ksz_switch_register()
5139 sizeof(u64) * (dev->info->mib_cnt + 1), in ksz_switch_register()
5141 if (!dev->ports[i].mib.counters) in ksz_switch_register()
5142 return -ENOMEM; in ksz_switch_register()
5144 dev->ports[i].ksz_dev = dev; in ksz_switch_register()
5145 dev->ports[i].num = i; in ksz_switch_register()
5149 dev->ds->num_ports = dev->info->port_cnt; in ksz_switch_register()
5152 dev->ds->phylink_mac_ops = dev->info->phylink_mac_ops; in ksz_switch_register()
5157 for (port_num = 0; port_num < dev->info->port_cnt; ++port_num) in ksz_switch_register()
5158 dev->ports[port_num].interface = PHY_INTERFACE_MODE_NA; in ksz_switch_register()
5159 if (dev->dev->of_node) { in ksz_switch_register()
5160 ret = of_get_phy_mode(dev->dev->of_node, &interface); in ksz_switch_register()
5162 dev->compat_interface = interface; in ksz_switch_register()
5163 ports = of_get_child_by_name(dev->dev->of_node, "ethernet-ports"); in ksz_switch_register()
5165 ports = of_get_child_by_name(dev->dev->of_node, "ports"); in ksz_switch_register()
5171 if (!(dev->port_mask & BIT(port_num))) { in ksz_switch_register()
5173 return -EINVAL; in ksz_switch_register()
5176 &dev->ports[port_num].interface); in ksz_switch_register()
5182 dev->synclko_125 = of_property_read_bool(dev->dev->of_node, in ksz_switch_register()
5183 "microchip,synclko-125"); in ksz_switch_register()
5184 dev->synclko_disable = of_property_read_bool(dev->dev->of_node, in ksz_switch_register()
5185 "microchip,synclko-disable"); in ksz_switch_register()
5186 if (dev->synclko_125 && dev->synclko_disable) { in ksz_switch_register()
5187 dev_err(dev->dev, "inconsistent synclko settings\n"); in ksz_switch_register()
5188 return -EINVAL; in ksz_switch_register()
5191 dev->wakeup_source = of_property_read_bool(dev->dev->of_node, in ksz_switch_register()
5192 "wakeup-source"); in ksz_switch_register()
5193 dev->pme_active_high = of_property_read_bool(dev->dev->of_node, in ksz_switch_register()
5194 "microchip,pme-active-high"); in ksz_switch_register()
5197 ret = dsa_register_switch(dev->ds); in ksz_switch_register()
5199 dev->dev_ops->exit(dev); in ksz_switch_register()
5204 dev->mib_read_interval = msecs_to_jiffies(5000); in ksz_switch_register()
5207 schedule_delayed_work(&dev->mib_read, 0); in ksz_switch_register()
5216 if (dev->mib_read_interval) { in ksz_switch_remove()
5217 dev->mib_read_interval = 0; in ksz_switch_remove()
5218 cancel_delayed_work_sync(&dev->mib_read); in ksz_switch_remove()
5221 dev->dev_ops->exit(dev); in ksz_switch_remove()
5222 dsa_unregister_switch(dev->ds); in ksz_switch_remove()
5224 if (dev->reset_gpio) in ksz_switch_remove()
5225 gpiod_set_value_cansleep(dev->reset_gpio, 1); in ksz_switch_remove()
5235 return dsa_switch_suspend(priv->ds); in ksz_switch_suspend()
5243 return dsa_switch_resume(priv->ds); in ksz_switch_resume()