Lines Matching +full:speed +full:- +full:bin

1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Sensor-Technik Wiedemann GmbH
3 * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
18 #include <linux/pcs/pcs-xpcs.h>
77 vlan = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entries; in sja1105_is_vlan_configured()
78 count = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entry_count; in sja1105_is_vlan_configured()
85 return -1; in sja1105_is_vlan_configured()
90 struct sja1105_private *priv = ds->priv; in sja1105_drop_untagged()
93 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; in sja1105_drop_untagged()
108 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; in sja1105_pvid_apply()
123 struct sja1105_private *priv = ds->priv; in sja1105_commit_pvid()
130 pvid = priv->bridge_pvid[port]; in sja1105_commit_pvid()
132 pvid = priv->tag_8021q_pvid[port]; in sja1105_commit_pvid()
139 * VLAN-aware bridge. When the tag_8021q pvid is used, we are in sja1105_commit_pvid()
145 if (pvid == priv->bridge_pvid[port]) { in sja1105_commit_pvid()
146 vlan = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entries; in sja1105_commit_pvid()
164 * Every queue i holds top[i] - base[i] frames. in sja1105_init_mac_settings()
165 * Sum of top[i] - base[i] is 511 (max hardware limit). in sja1105_init_mac_settings()
172 /* Always put the MAC speed in automatic mode, where it can be in sja1105_init_mac_settings()
175 .speed = priv->info->port_speed[SJA1105_SPEED_AUTO], in sja1105_init_mac_settings()
176 /* No static correction for 1-step 1588 events */ in sja1105_init_mac_settings()
188 /* Don't drop double-tagged traffic */ in sja1105_init_mac_settings()
194 /* Disable learning and I/O on user ports by default - in sja1105_init_mac_settings()
202 struct dsa_switch *ds = priv->ds; in sja1105_init_mac_settings()
206 table = &priv->static_config.tables[BLK_IDX_MAC_CONFIG]; in sja1105_init_mac_settings()
209 if (table->entry_count) { in sja1105_init_mac_settings()
210 kfree(table->entries); in sja1105_init_mac_settings()
211 table->entry_count = 0; in sja1105_init_mac_settings()
214 table->entries = kcalloc(table->ops->max_entry_count, in sja1105_init_mac_settings()
215 table->ops->unpacked_entry_size, GFP_KERNEL); in sja1105_init_mac_settings()
216 if (!table->entries) in sja1105_init_mac_settings()
217 return -ENOMEM; in sja1105_init_mac_settings()
219 table->entry_count = table->ops->max_entry_count; in sja1105_init_mac_settings()
221 mac = table->entries; in sja1105_init_mac_settings()
223 list_for_each_entry(dp, &ds->dst->ports, list) { in sja1105_init_mac_settings()
224 if (dp->ds != ds) in sja1105_init_mac_settings()
227 mac[dp->index] = default_mac; in sja1105_init_mac_settings()
230 * enabled for the DSA ports. CPU ports use software-assisted in sja1105_init_mac_settings()
233 * CPU ports in a cross-chip topology if multiple CPU ports in sja1105_init_mac_settings()
237 dp->learning = true; in sja1105_init_mac_settings()
243 mac[dp->index].drpuntag = true; in sja1105_init_mac_settings()
251 struct device *dev = &priv->spidev->dev; in sja1105_init_mii_settings()
253 struct dsa_switch *ds = priv->ds; in sja1105_init_mii_settings()
257 table = &priv->static_config.tables[BLK_IDX_XMII_PARAMS]; in sja1105_init_mii_settings()
260 if (table->entry_count) { in sja1105_init_mii_settings()
261 kfree(table->entries); in sja1105_init_mii_settings()
262 table->entry_count = 0; in sja1105_init_mii_settings()
265 table->entries = kcalloc(table->ops->max_entry_count, in sja1105_init_mii_settings()
266 table->ops->unpacked_entry_size, GFP_KERNEL); in sja1105_init_mii_settings()
267 if (!table->entries) in sja1105_init_mii_settings()
268 return -ENOMEM; in sja1105_init_mii_settings()
271 table->entry_count = table->ops->max_entry_count; in sja1105_init_mii_settings()
273 mii = table->entries; in sja1105_init_mii_settings()
275 for (i = 0; i < ds->num_ports; i++) { in sja1105_init_mii_settings()
278 if (dsa_is_unused_port(priv->ds, i)) in sja1105_init_mii_settings()
281 switch (priv->phy_mode[i]) { in sja1105_init_mii_settings()
283 if (priv->info->internal_phy[i] == SJA1105_NO_PHY) in sja1105_init_mii_settings()
286 mii->xmii_mode[i] = XMII_MODE_MII; in sja1105_init_mii_settings()
287 if (priv->info->internal_phy[i] == SJA1105_PHY_BASE_TX) in sja1105_init_mii_settings()
288 mii->special[i] = true; in sja1105_init_mii_settings()
295 if (!priv->info->supports_mii[i]) in sja1105_init_mii_settings()
298 mii->xmii_mode[i] = XMII_MODE_MII; in sja1105_init_mii_settings()
304 if (!priv->info->supports_rmii[i]) in sja1105_init_mii_settings()
307 mii->xmii_mode[i] = XMII_MODE_RMII; in sja1105_init_mii_settings()
313 if (!priv->info->supports_rgmii[i]) in sja1105_init_mii_settings()
316 mii->xmii_mode[i] = XMII_MODE_RGMII; in sja1105_init_mii_settings()
319 if (!priv->info->supports_sgmii[i]) in sja1105_init_mii_settings()
322 mii->xmii_mode[i] = XMII_MODE_SGMII; in sja1105_init_mii_settings()
323 mii->special[i] = true; in sja1105_init_mii_settings()
326 if (!priv->info->supports_2500basex[i]) in sja1105_init_mii_settings()
329 mii->xmii_mode[i] = XMII_MODE_SGMII; in sja1105_init_mii_settings()
330 mii->special[i] = true; in sja1105_init_mii_settings()
335 phy_modes(priv->phy_mode[i]), i); in sja1105_init_mii_settings()
336 return -EINVAL; in sja1105_init_mii_settings()
339 mii->phy_mac[i] = role; in sja1105_init_mii_settings()
350 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP]; in sja1105_init_static_fdb()
353 * entries, except for a special entry at the end which is a catch-all in sja1105_init_static_fdb()
356 if (table->entry_count) { in sja1105_init_static_fdb()
357 kfree(table->entries); in sja1105_init_static_fdb()
358 table->entry_count = 0; in sja1105_init_static_fdb()
361 if (!priv->info->can_limit_mcast_flood) in sja1105_init_static_fdb()
364 table->entries = kcalloc(1, table->ops->unpacked_entry_size, in sja1105_init_static_fdb()
366 if (!table->entries) in sja1105_init_static_fdb()
367 return -ENOMEM; in sja1105_init_static_fdb()
369 table->entry_count = 1; in sja1105_init_static_fdb()
370 l2_lookup = table->entries; in sja1105_init_static_fdb()
376 l2_lookup[0].index = SJA1105_MAX_L2_LOOKUP_COUNT - 1; in sja1105_init_static_fdb()
379 for (port = 0; port < priv->ds->num_ports; port++) in sja1105_init_static_fdb()
380 if (!dsa_is_unused_port(priv->ds, port)) in sja1105_init_static_fdb()
391 /* All entries within a FDB bin are available for learning */ in sja1105_init_l2_lookup_params()
399 /* Don't discard management traffic based on ENFPORT - in sja1105_init_l2_lookup_params()
416 struct dsa_switch *ds = priv->ds; in sja1105_init_l2_lookup_params()
421 for (port = 0; port < ds->num_ports; port++) in sja1105_init_l2_lookup_params()
427 for (port = 0; port < ds->num_ports; port++) { in sja1105_init_l2_lookup_params()
434 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS]; in sja1105_init_l2_lookup_params()
436 if (table->entry_count) { in sja1105_init_l2_lookup_params()
437 kfree(table->entries); in sja1105_init_l2_lookup_params()
438 table->entry_count = 0; in sja1105_init_l2_lookup_params()
441 table->entries = kcalloc(table->ops->max_entry_count, in sja1105_init_l2_lookup_params()
442 table->ops->unpacked_entry_size, GFP_KERNEL); in sja1105_init_l2_lookup_params()
443 if (!table->entries) in sja1105_init_l2_lookup_params()
444 return -ENOMEM; in sja1105_init_l2_lookup_params()
446 table->entry_count = table->ops->max_entry_count; in sja1105_init_l2_lookup_params()
449 ((struct sja1105_l2_lookup_params_entry *)table->entries)[0] = in sja1105_init_l2_lookup_params()
457 * All DT-defined ports are members of this VLAN, and there are no
474 struct dsa_switch *ds = priv->ds; in sja1105_init_static_vlan()
477 table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP]; in sja1105_init_static_vlan()
479 if (table->entry_count) { in sja1105_init_static_vlan()
480 kfree(table->entries); in sja1105_init_static_vlan()
481 table->entry_count = 0; in sja1105_init_static_vlan()
484 table->entries = kzalloc(table->ops->unpacked_entry_size, in sja1105_init_static_vlan()
486 if (!table->entries) in sja1105_init_static_vlan()
487 return -ENOMEM; in sja1105_init_static_vlan()
489 table->entry_count = 1; in sja1105_init_static_vlan()
491 for (port = 0; port < ds->num_ports; port++) { in sja1105_init_static_vlan()
500 priv->tag_8021q_pvid[port] = SJA1105_DEFAULT_VLAN; in sja1105_init_static_vlan()
501 priv->bridge_pvid[port] = SJA1105_DEFAULT_VLAN; in sja1105_init_static_vlan()
505 ((struct sja1105_vlan_lookup_entry *)table->entries)[0] = pvid; in sja1105_init_static_vlan()
512 struct dsa_switch *ds = priv->ds; in sja1105_init_l2_forwarding()
519 table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING]; in sja1105_init_l2_forwarding()
521 if (table->entry_count) { in sja1105_init_l2_forwarding()
522 kfree(table->entries); in sja1105_init_l2_forwarding()
523 table->entry_count = 0; in sja1105_init_l2_forwarding()
526 table->entries = kcalloc(table->ops->max_entry_count, in sja1105_init_l2_forwarding()
527 table->ops->unpacked_entry_size, GFP_KERNEL); in sja1105_init_l2_forwarding()
528 if (!table->entries) in sja1105_init_l2_forwarding()
529 return -ENOMEM; in sja1105_init_l2_forwarding()
531 table->entry_count = table->ops->max_entry_count; in sja1105_init_l2_forwarding()
533 l2fwd = table->entries; in sja1105_init_l2_forwarding()
539 for (port = 0; port < ds->num_ports; port++) { in sja1105_init_l2_forwarding()
548 * only to the always-on domain (CPU port and DSA links) in sja1105_init_l2_forwarding()
550 for (from = 0; from < ds->num_ports; from++) { in sja1105_init_l2_forwarding()
554 for (to = 0; to < ds->num_ports; to++) { in sja1105_init_l2_forwarding()
567 * always-on domain). These can send packets to any enabled port except in sja1105_init_l2_forwarding()
570 for (from = 0; from < ds->num_ports; from++) { in sja1105_init_l2_forwarding()
574 for (to = 0; to < ds->num_ports; to++) { in sja1105_init_l2_forwarding()
596 dst = ds->dst; in sja1105_init_l2_forwarding()
598 list_for_each_entry(dl, &dst->rtable, list) { in sja1105_init_l2_forwarding()
599 if (dl->dp->ds != ds || dl->link_dp->cpu_dp == dl->dp->cpu_dp) in sja1105_init_l2_forwarding()
602 from = dl->dp->index; in sja1105_init_l2_forwarding()
605 dev_warn(ds->dev, in sja1105_init_l2_forwarding()
618 for (port = 0; port < ds->num_ports; port++) { in sja1105_init_l2_forwarding()
622 priv->ucast_egress_floods |= BIT(port); in sja1105_init_l2_forwarding()
623 priv->bcast_egress_floods |= BIT(port); in sja1105_init_l2_forwarding()
627 * Create a one-to-one mapping. in sja1105_init_l2_forwarding()
630 for (port = 0; port < ds->num_ports; port++) { in sja1105_init_l2_forwarding()
634 l2fwd[ds->num_ports + tc].vlan_pmap[port] = tc; in sja1105_init_l2_forwarding()
637 l2fwd[ds->num_ports + tc].type_egrpcp2outputq = true; in sja1105_init_l2_forwarding()
646 struct dsa_switch *ds = priv->ds; in sja1110_init_pcp_remapping()
650 table = &priv->static_config.tables[BLK_IDX_PCP_REMAPPING]; in sja1110_init_pcp_remapping()
653 if (!table->ops->max_entry_count) in sja1110_init_pcp_remapping()
656 if (table->entry_count) { in sja1110_init_pcp_remapping()
657 kfree(table->entries); in sja1110_init_pcp_remapping()
658 table->entry_count = 0; in sja1110_init_pcp_remapping()
661 table->entries = kcalloc(table->ops->max_entry_count, in sja1110_init_pcp_remapping()
662 table->ops->unpacked_entry_size, GFP_KERNEL); in sja1110_init_pcp_remapping()
663 if (!table->entries) in sja1110_init_pcp_remapping()
664 return -ENOMEM; in sja1110_init_pcp_remapping()
666 table->entry_count = table->ops->max_entry_count; in sja1110_init_pcp_remapping()
668 pcp_remap = table->entries; in sja1110_init_pcp_remapping()
671 for (port = 0; port < ds->num_ports; port++) { in sja1110_init_pcp_remapping()
687 table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS]; in sja1105_init_l2_forwarding_params()
689 if (table->entry_count) { in sja1105_init_l2_forwarding_params()
690 kfree(table->entries); in sja1105_init_l2_forwarding_params()
691 table->entry_count = 0; in sja1105_init_l2_forwarding_params()
694 table->entries = kcalloc(table->ops->max_entry_count, in sja1105_init_l2_forwarding_params()
695 table->ops->unpacked_entry_size, GFP_KERNEL); in sja1105_init_l2_forwarding_params()
696 if (!table->entries) in sja1105_init_l2_forwarding_params()
697 return -ENOMEM; in sja1105_init_l2_forwarding_params()
699 table->entry_count = table->ops->max_entry_count; in sja1105_init_l2_forwarding_params()
702 l2fwd_params = table->entries; in sja1105_init_l2_forwarding_params()
705 l2fwd_params->max_dynp = 0; in sja1105_init_l2_forwarding_params()
707 l2fwd_params->part_spc[0] = priv->info->max_frame_mem; in sja1105_init_l2_forwarding_params()
718 table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS]; in sja1105_frame_memory_partitioning()
719 l2_fwd_params = table->entries; in sja1105_frame_memory_partitioning()
720 l2_fwd_params->part_spc[0] = SJA1105_MAX_FRAME_MEMORY; in sja1105_frame_memory_partitioning()
722 /* If we have any critical-traffic virtual links, we need to reserve in sja1105_frame_memory_partitioning()
725 * remaining for best-effort traffic. TODO: figure out a more flexible in sja1105_frame_memory_partitioning()
728 if (!priv->static_config.tables[BLK_IDX_VL_FORWARDING].entry_count) in sja1105_frame_memory_partitioning()
731 table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING_PARAMS]; in sja1105_frame_memory_partitioning()
732 vl_fwd_params = table->entries; in sja1105_frame_memory_partitioning()
734 l2_fwd_params->part_spc[0] -= SJA1105_VL_FRAME_MEMORY; in sja1105_frame_memory_partitioning()
735 vl_fwd_params->partspc[0] = SJA1105_VL_FRAME_MEMORY; in sja1105_frame_memory_partitioning()
741 * -----+----------------+---------------+---------------+---------------
743 * 1 |0, [5:10], retag| [1:2] | [3:4] | -
744 * 2 | 0, [5:10] | [1:3], retag | 4 | -
745 * 3 | 0, [5:10] |[1:2], 4, retag| 3 | -
746 * 4 | 0, 2, [5:10] | 1, retag | [3:4] | -
747 * 5 | 0, 1, [5:10] | 2, retag | [3:4] | -
748 * 14 | 0, [5:10] | [1:4], retag | - | -
749 * 15 | [5:10] | [0:4], retag | - | -
760 if (priv->info->device_id != SJA1110_DEVICE_ID) in sja1110_select_tdmaconfigidx()
763 table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS]; in sja1110_select_tdmaconfigidx()
764 general_params = table->entries; in sja1110_select_tdmaconfigidx()
769 port_1_is_base_tx = priv->phy_mode[1] == PHY_INTERFACE_MODE_INTERNAL; in sja1110_select_tdmaconfigidx()
770 port_3_is_2500 = priv->phy_mode[3] == PHY_INTERFACE_MODE_2500BASEX; in sja1110_select_tdmaconfigidx()
771 port_4_is_2500 = priv->phy_mode[4] == PHY_INTERFACE_MODE_2500BASEX; in sja1110_select_tdmaconfigidx()
789 general_params->tdmaconfigidx = tdmaconfigidx; in sja1110_select_tdmaconfigidx()
795 struct dsa_switch *ds = priv->ds; in sja1105_init_topology()
802 general_params->host_port = ds->num_ports; in sja1105_init_topology()
804 /* Link-local traffic received on casc_port will be forwarded in sja1105_init_topology()
814 if (!priv->info->multiple_cascade_ports) in sja1105_init_topology()
815 general_params->casc_port = ds->num_ports; in sja1105_init_topology()
817 for (port = 0; port < ds->num_ports; port++) { in sja1105_init_topology()
822 * upstream-facing DSA links in sja1105_init_topology()
825 if (general_params->host_port == ds->num_ports) { in sja1105_init_topology()
826 general_params->host_port = port; in sja1105_init_topology()
828 dev_err(ds->dev, in sja1105_init_topology()
830 general_params->host_port, port); in sja1105_init_topology()
831 return -EINVAL; in sja1105_init_topology()
835 /* Cascade ports are downstream-facing DSA links */ in sja1105_init_topology()
837 if (priv->info->multiple_cascade_ports) { in sja1105_init_topology()
838 general_params->casc_port |= BIT(port); in sja1105_init_topology()
839 } else if (general_params->casc_port == ds->num_ports) { in sja1105_init_topology()
840 general_params->casc_port = port; in sja1105_init_topology()
842 dev_err(ds->dev, in sja1105_init_topology()
844 general_params->casc_port, port); in sja1105_init_topology()
845 return -EINVAL; in sja1105_init_topology()
850 if (general_params->host_port == ds->num_ports) { in sja1105_init_topology()
851 dev_err(ds->dev, "No host port configured\n"); in sja1105_init_topology()
852 return -EINVAL; in sja1105_init_topology()
863 .switchid = priv->ds->index, in sja1105_init_general_params()
864 /* Priority queue for link-local management frames in sja1105_init_general_params()
865 * (both ingress to and egress from CPU - PTP, STP etc) in sja1105_init_general_params()
877 .mirr_port = priv->ds->num_ports, in sja1105_init_general_params()
882 /* Only update correctionField for 1-step PTP (L2 transport) */ in sja1105_init_general_params()
902 table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS]; in sja1105_init_general_params()
904 if (table->entry_count) { in sja1105_init_general_params()
905 kfree(table->entries); in sja1105_init_general_params()
906 table->entry_count = 0; in sja1105_init_general_params()
909 table->entries = kcalloc(table->ops->max_entry_count, in sja1105_init_general_params()
910 table->ops->unpacked_entry_size, GFP_KERNEL); in sja1105_init_general_params()
911 if (!table->entries) in sja1105_init_general_params()
912 return -ENOMEM; in sja1105_init_general_params()
914 table->entry_count = table->ops->max_entry_count; in sja1105_init_general_params()
916 general_params = table->entries; in sja1105_init_general_params()
931 table = &priv->static_config.tables[BLK_IDX_AVB_PARAMS]; in sja1105_init_avb_params()
934 if (table->entry_count) { in sja1105_init_avb_params()
935 kfree(table->entries); in sja1105_init_avb_params()
936 table->entry_count = 0; in sja1105_init_avb_params()
939 table->entries = kcalloc(table->ops->max_entry_count, in sja1105_init_avb_params()
940 table->ops->unpacked_entry_size, GFP_KERNEL); in sja1105_init_avb_params()
941 if (!table->entries) in sja1105_init_avb_params()
942 return -ENOMEM; in sja1105_init_avb_params()
944 table->entry_count = table->ops->max_entry_count; in sja1105_init_avb_params()
946 avb = table->entries; in sja1105_init_avb_params()
949 avb->destmeta = SJA1105_META_DMAC; in sja1105_init_avb_params()
950 avb->srcmeta = SJA1105_META_SMAC; in sja1105_init_avb_params()
958 avb->cas_master = false; in sja1105_init_avb_params()
963 /* The L2 policing table is 2-stage. The table is looked up for each frame
971 * +------------+--------+ +---------------------------------+
973 * +------------+--------+ +---------------------------------+
975 * +------------+--------+ +---------------------------------+
977 * +------------+--------+ +---------------------------------+
979 * +------------+--------+ +---------------------------------+
981 * +------------+--------+ +---------------------------------+
983 * +------------+--------+ +---------------------------------+
985 * +------------+--------+ +---------------------------------+
987 * +------------+--------+ +---------------------------------+
989 * +------------+--------+
991 * +------------+--------+
993 * +------------+--------+
995 * +------------+--------+ +---------------------------------+
997 * +------------+--------+ +---------------------------------+
999 * In this driver, we shall use policers 0-4 as statically alocated port
1006 #define SJA1105_RATE_MBPS(speed) (((speed) * 64000) / 1000) argument
1011 struct dsa_switch *ds = priv->ds; in sja1105_init_l2_policing()
1015 table = &priv->static_config.tables[BLK_IDX_L2_POLICING]; in sja1105_init_l2_policing()
1018 if (table->entry_count) { in sja1105_init_l2_policing()
1019 kfree(table->entries); in sja1105_init_l2_policing()
1020 table->entry_count = 0; in sja1105_init_l2_policing()
1023 table->entries = kcalloc(table->ops->max_entry_count, in sja1105_init_l2_policing()
1024 table->ops->unpacked_entry_size, GFP_KERNEL); in sja1105_init_l2_policing()
1025 if (!table->entries) in sja1105_init_l2_policing()
1026 return -ENOMEM; in sja1105_init_l2_policing()
1028 table->entry_count = table->ops->max_entry_count; in sja1105_init_l2_policing()
1030 policing = table->entries; in sja1105_init_l2_policing()
1033 for (port = 0; port < ds->num_ports; port++) { in sja1105_init_l2_policing()
1034 int mcast = (ds->num_ports * (SJA1105_NUM_TC + 1)) + port; in sja1105_init_l2_policing()
1035 int bcast = (ds->num_ports * SJA1105_NUM_TC) + port; in sja1105_init_l2_policing()
1042 if (mcast < table->ops->max_entry_count) in sja1105_init_l2_policing()
1047 for (port = 0; port < ds->num_ports; port++) { in sja1105_init_l2_policing()
1066 sja1105_static_config_free(&priv->static_config); in sja1105_static_config_load()
1067 rc = sja1105_static_config_init(&priv->static_config, in sja1105_static_config_load()
1068 priv->info->static_ops, in sja1105_static_config_load()
1069 priv->info->device_id); in sja1105_static_config_load()
1113 * based on the explicit "rx-internal-delay-ps" and "tx-internal-delay-ps"
1118 * Previously we were acting upon the "phy-mode" property when we were
1119 * operating in fixed-link, basically acting as a PHY, but with a reversed
1125 * If the "{rx,tx}-internal-delay-ps" properties are not specified, we fall
1126 * back to the legacy behavior and apply delays on fixed-link ports based on
1127 * the reverse interpretation of the phy-mode. This is a deviation from the
1130 * "{rx,tx}-internal-delay-ps" with a value of 0.
1135 phy_interface_t phy_mode = priv->phy_mode[port]; in sja1105_parse_rgmii_delays()
1136 struct device *dev = &priv->spidev->dev; in sja1105_parse_rgmii_delays()
1137 int rx_delay = -1, tx_delay = -1; in sja1105_parse_rgmii_delays()
1142 of_property_read_u32(port_dn, "rx-internal-delay-ps", &rx_delay); in sja1105_parse_rgmii_delays()
1143 of_property_read_u32(port_dn, "tx-internal-delay-ps", &tx_delay); in sja1105_parse_rgmii_delays()
1145 if (rx_delay == -1 && tx_delay == -1 && priv->fixed_link[port]) { in sja1105_parse_rgmii_delays()
1147 "Port %d interpreting RGMII delay settings based on \"phy-mode\" property, " in sja1105_parse_rgmii_delays()
1148 "please update device tree to specify \"rx-internal-delay-ps\" and " in sja1105_parse_rgmii_delays()
1149 "\"tx-internal-delay-ps\"", in sja1105_parse_rgmii_delays()
1166 if ((rx_delay || tx_delay) && !priv->info->setup_rgmii_delay) { in sja1105_parse_rgmii_delays()
1168 return -EINVAL; in sja1105_parse_rgmii_delays()
1178 return -ERANGE; in sja1105_parse_rgmii_delays()
1181 priv->rgmii_rx_delay_ps[port] = rx_delay; in sja1105_parse_rgmii_delays()
1182 priv->rgmii_tx_delay_ps[port] = tx_delay; in sja1105_parse_rgmii_delays()
1190 struct device *dev = &priv->spidev->dev; in sja1105_parse_ports_node()
1204 return -ENODEV; in sja1105_parse_ports_node()
1210 dev_err(dev, "Failed to read phy-mode or " in sja1105_parse_ports_node()
1211 "phy-interface-type property for port %d\n", in sja1105_parse_ports_node()
1214 return -ENODEV; in sja1105_parse_ports_node()
1217 phy_node = of_parse_phandle(child, "phy-handle", 0); in sja1105_parse_ports_node()
1220 dev_err(dev, "phy-handle or fixed-link " in sja1105_parse_ports_node()
1223 return -ENODEV; in sja1105_parse_ports_node()
1225 /* phy-handle is missing, but fixed-link isn't. in sja1105_parse_ports_node()
1228 priv->fixed_link[index] = true; in sja1105_parse_ports_node()
1233 priv->phy_mode[index] = phy_mode; in sja1105_parse_ports_node()
1247 struct device *dev = &priv->spidev->dev; in sja1105_parse_dt()
1248 struct device_node *switch_node = dev->of_node; in sja1105_parse_dt()
1254 ports_node = of_get_child_by_name(switch_node, "ethernet-ports"); in sja1105_parse_dt()
1257 return -ENODEV; in sja1105_parse_dt()
1266 /* Convert link speed from SJA1105 to ethtool encoding */
1268 u64 speed) in sja1105_port_speed_to_ethtool() argument
1270 if (speed == priv->info->port_speed[SJA1105_SPEED_10MBPS]) in sja1105_port_speed_to_ethtool()
1272 if (speed == priv->info->port_speed[SJA1105_SPEED_100MBPS]) in sja1105_port_speed_to_ethtool()
1274 if (speed == priv->info->port_speed[SJA1105_SPEED_1000MBPS]) in sja1105_port_speed_to_ethtool()
1276 if (speed == priv->info->port_speed[SJA1105_SPEED_2500MBPS]) in sja1105_port_speed_to_ethtool()
1281 /* Set link speed in the MAC configuration for a specific port. */
1286 struct device *dev = priv->ds->dev; in sja1105_adjust_port_config()
1287 u64 speed; in sja1105_adjust_port_config() local
1296 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; in sja1105_adjust_port_config()
1301 * the state->interface, but AN has not completed and the in sja1105_adjust_port_config()
1302 * speed is not yet valid. UM10944.pdf says that setting in sja1105_adjust_port_config()
1304 * ok for power consumption in case AN will never complete - in sja1105_adjust_port_config()
1307 speed = priv->info->port_speed[SJA1105_SPEED_AUTO]; in sja1105_adjust_port_config()
1310 speed = priv->info->port_speed[SJA1105_SPEED_10MBPS]; in sja1105_adjust_port_config()
1313 speed = priv->info->port_speed[SJA1105_SPEED_100MBPS]; in sja1105_adjust_port_config()
1316 speed = priv->info->port_speed[SJA1105_SPEED_1000MBPS]; in sja1105_adjust_port_config()
1319 speed = priv->info->port_speed[SJA1105_SPEED_2500MBPS]; in sja1105_adjust_port_config()
1322 dev_err(dev, "Invalid speed %iMbps\n", speed_mbps); in sja1105_adjust_port_config()
1323 return -EINVAL; in sja1105_adjust_port_config()
1333 if (priv->phy_mode[port] == PHY_INTERFACE_MODE_SGMII) in sja1105_adjust_port_config()
1334 mac[port].speed = priv->info->port_speed[SJA1105_SPEED_1000MBPS]; in sja1105_adjust_port_config()
1335 else if (priv->phy_mode[port] == PHY_INTERFACE_MODE_2500BASEX) in sja1105_adjust_port_config()
1336 mac[port].speed = priv->info->port_speed[SJA1105_SPEED_2500MBPS]; in sja1105_adjust_port_config()
1338 mac[port].speed = speed; in sja1105_adjust_port_config()
1354 if (!phy_interface_mode_is_rgmii(priv->phy_mode[port])) in sja1105_adjust_port_config()
1363 struct sja1105_private *priv = ds->priv; in sja1105_mac_select_pcs()
1364 struct dw_xpcs *xpcs = priv->xpcs[port]; in sja1105_mac_select_pcs()
1367 return &xpcs->pcs; in sja1105_mac_select_pcs()
1376 sja1105_inhibit_tx(ds->priv, BIT(port), true); in sja1105_mac_link_down()
1383 int speed, int duplex, in sja1105_mac_link_up() argument
1386 struct sja1105_private *priv = ds->priv; in sja1105_mac_link_up()
1388 sja1105_adjust_port_config(priv, port, speed); in sja1105_mac_link_up()
1396 struct sja1105_private *priv = ds->priv; in sja1105_phylink_get_caps()
1400 phy_mode = priv->phy_mode[port]; in sja1105_phylink_get_caps()
1405 * changes between SGMII and 2500base-X. in sja1105_phylink_get_caps()
1407 if (priv->info->supports_sgmii[port]) in sja1105_phylink_get_caps()
1409 config->supported_interfaces); in sja1105_phylink_get_caps()
1411 if (priv->info->supports_2500basex[port]) in sja1105_phylink_get_caps()
1413 config->supported_interfaces); in sja1105_phylink_get_caps()
1419 __set_bit(phy_mode, config->supported_interfaces); in sja1105_phylink_get_caps()
1423 * support half-duplex traffic modes. in sja1105_phylink_get_caps()
1425 config->mac_capabilities = MAC_10FD | MAC_100FD; in sja1105_phylink_get_caps()
1427 mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries; in sja1105_phylink_get_caps()
1428 if (mii->xmii_mode[port] == XMII_MODE_RGMII || in sja1105_phylink_get_caps()
1429 mii->xmii_mode[port] == XMII_MODE_SGMII) in sja1105_phylink_get_caps()
1430 config->mac_capabilities |= MAC_1000FD; in sja1105_phylink_get_caps()
1432 if (priv->info->supports_2500basex[port]) in sja1105_phylink_get_caps()
1433 config->mac_capabilities |= MAC_2500FD; in sja1105_phylink_get_caps()
1444 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP]; in sja1105_find_static_fdb_entry()
1445 l2_lookup = table->entries; in sja1105_find_static_fdb_entry()
1447 for (i = 0; i < table->entry_count; i++) in sja1105_find_static_fdb_entry()
1448 if (l2_lookup[i].macaddr == requested->macaddr && in sja1105_find_static_fdb_entry()
1449 l2_lookup[i].vlanid == requested->vlanid && in sja1105_find_static_fdb_entry()
1453 return -1; in sja1105_find_static_fdb_entry()
1470 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP]; in sja1105_static_fdb_change()
1479 rc = sja1105_table_resize(table, table->entry_count + 1); in sja1105_static_fdb_change()
1483 match = table->entry_count - 1; in sja1105_static_fdb_change()
1487 l2_lookup = table->entries; in sja1105_static_fdb_change()
1503 l2_lookup[match] = l2_lookup[table->entry_count - 1]; in sja1105_static_fdb_change()
1504 return sja1105_table_resize(table, table->entry_count - 1); in sja1105_static_fdb_change()
1507 /* First-generation switches have a 4-way set associative TCAM that
1509 * a "bin" (grouping of 4 entries) and a "way" (an entry within a bin).
1510 * For the placement of a newly learnt FDB entry, the switch selects the bin
1511 * based on a hash function, and the way within that bin incrementally.
1513 static int sja1105et_fdb_index(int bin, int way) in sja1105et_fdb_index() argument
1515 return bin * SJA1105ET_FDB_BIN_SIZE + way; in sja1105et_fdb_index()
1518 static int sja1105et_is_fdb_entry_in_bin(struct sja1105_private *priv, int bin, in sja1105et_is_fdb_entry_in_bin() argument
1527 int index = sja1105et_fdb_index(bin, way); in sja1105et_is_fdb_entry_in_bin()
1547 return -1; in sja1105et_is_fdb_entry_in_bin()
1554 struct sja1105_private *priv = ds->priv; in sja1105et_fdb_add()
1555 struct device *dev = ds->dev; in sja1105et_fdb_add()
1556 int last_unused = -1; in sja1105et_fdb_add()
1558 int bin, way, rc; in sja1105et_fdb_add() local
1560 bin = sja1105et_fdb_hash(priv, addr, vid); in sja1105et_fdb_add()
1562 way = sja1105et_is_fdb_entry_in_bin(priv, bin, addr, vid, in sja1105et_fdb_add()
1573 int index = sja1105et_fdb_index(bin, way); in sja1105et_fdb_add()
1585 /* Bin is full, need to evict somebody. in sja1105et_fdb_add()
1589 * static_config[BLK_IDX_L2_LOOKUP_PARAMS].entries->poly in sja1105et_fdb_add()
1593 dev_warn(dev, "Warning, FDB bin %d full while adding entry for %pM. Evicting entry %u.\n", in sja1105et_fdb_add()
1594 bin, addr, way); in sja1105et_fdb_add()
1601 l2_lookup.index = sja1105et_fdb_index(bin, way); in sja1105et_fdb_add()
1610 start = sja1105et_fdb_index(bin, 0); in sja1105et_fdb_add()
1611 end = sja1105et_fdb_index(bin, way); in sja1105et_fdb_add()
1616 if (rc == -ENOENT) in sja1105et_fdb_add()
1639 struct sja1105_private *priv = ds->priv; in sja1105et_fdb_del()
1640 int index, bin, way, rc; in sja1105et_fdb_del() local
1643 bin = sja1105et_fdb_hash(priv, addr, vid); in sja1105et_fdb_del()
1644 way = sja1105et_is_fdb_entry_in_bin(priv, bin, addr, vid, in sja1105et_fdb_del()
1648 index = sja1105et_fdb_index(bin, way); in sja1105et_fdb_del()
1674 struct sja1105_private *priv = ds->priv; in sja1105pqrs_fdb_add()
1680 l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0); in sja1105pqrs_fdb_add()
1688 if (rc == 0 && tmp.index != SJA1105_MAX_L2_LOOKUP_COUNT - 1) { in sja1105pqrs_fdb_add()
1705 * This is slightly inefficient because the strategy is knock-knock at in sja1105pqrs_fdb_add()
1715 dev_err(ds->dev, "FDB is full, cannot add entry.\n"); in sja1105pqrs_fdb_add()
1716 return -EINVAL; in sja1105pqrs_fdb_add()
1745 dev_err(ds->dev, in sja1105pqrs_fdb_add()
1765 struct sja1105_private *priv = ds->priv; in sja1105pqrs_fdb_del()
1771 l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0); in sja1105pqrs_fdb_del()
1802 struct sja1105_private *priv = ds->priv; in sja1105_fdb_add()
1814 return -EOPNOTSUPP; in sja1105_fdb_add()
1818 mutex_lock(&priv->fdb_lock); in sja1105_fdb_add()
1819 rc = priv->info->fdb_add_cmd(ds, port, addr, vid); in sja1105_fdb_add()
1820 mutex_unlock(&priv->fdb_lock); in sja1105_fdb_add()
1829 struct sja1105_private *priv = ds->priv; in __sja1105_fdb_del()
1840 return -EOPNOTSUPP; in __sja1105_fdb_del()
1844 return priv->info->fdb_del_cmd(ds, port, addr, vid); in __sja1105_fdb_del()
1851 struct sja1105_private *priv = ds->priv; in sja1105_fdb_del()
1854 mutex_lock(&priv->fdb_lock); in sja1105_fdb_del()
1856 mutex_unlock(&priv->fdb_lock); in sja1105_fdb_del()
1864 struct sja1105_private *priv = ds->priv; in sja1105_fdb_dump()
1865 struct device *dev = ds->dev; in sja1105_fdb_dump()
1876 if (rc == -ENOENT) in sja1105_fdb_dump()
1886 * 1024-sized FDB table needs to be traversed 4 times through in sja1105_fdb_dump()
1913 struct sja1105_private *priv = ds->priv; in sja1105_fast_age()
1923 mutex_lock(&priv->fdb_lock); in sja1105_fast_age()
1933 if (rc == -ENOENT) in sja1105_fast_age()
1936 dev_err(ds->dev, "Failed to read FDB: %pe\n", in sja1105_fast_age()
1952 dev_err(ds->dev, in sja1105_fast_age()
1959 mutex_unlock(&priv->fdb_lock); in sja1105_fast_age()
1966 return sja1105_fdb_add(ds, port, mdb->addr, mdb->vid, db); in sja1105_mdb_add()
1973 return sja1105_fdb_del(ds, port, mdb->addr, mdb->vid, db); in sja1105_mdb_del()
1985 struct dsa_switch *ds = priv->ds; in sja1105_manage_flood_domains()
1988 l2_fwd = priv->static_config.tables[BLK_IDX_L2_FORWARDING].entries; in sja1105_manage_flood_domains()
1990 for (from = 0; from < ds->num_ports; from++) { in sja1105_manage_flood_domains()
1993 for (to = 0; to < priv->ds->num_ports; to++) { in sja1105_manage_flood_domains()
1997 if (priv->ucast_egress_floods & BIT(to)) in sja1105_manage_flood_domains()
1999 if (priv->bcast_egress_floods & BIT(to)) in sja1105_manage_flood_domains()
2024 struct sja1105_private *priv = ds->priv; in sja1105_bridge_member()
2027 l2_fwd = priv->static_config.tables[BLK_IDX_L2_FORWARDING].entries; in sja1105_bridge_member()
2029 for (i = 0; i < ds->num_ports; i++) { in sja1105_bridge_member()
2073 struct sja1105_private *priv = ds->priv; in sja1105_bridge_stp_state_set()
2076 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; in sja1105_bridge_stp_state_set()
2098 mac[port].dyn_learn = dp->learning; in sja1105_bridge_stp_state_set()
2103 mac[port].dyn_learn = dp->learning; in sja1105_bridge_stp_state_set()
2106 dev_err(ds->dev, "invalid STP state: %d\n", state); in sja1105_bridge_stp_state_set()
2144 #define SJA1110_FIXED_CBS(port, prio) ((((port) - 1) * SJA1105_NUM_TC) + (prio))
2151 if (priv->info->fixed_cbs_mapping) { in sja1105_find_cbs_shaper()
2153 if (i >= 0 && i < priv->info->num_cbs_shapers) in sja1105_find_cbs_shaper()
2156 return -1; in sja1105_find_cbs_shaper()
2159 for (i = 0; i < priv->info->num_cbs_shapers; i++) in sja1105_find_cbs_shaper()
2160 if (priv->cbs[i].port == port && priv->cbs[i].prio == prio) in sja1105_find_cbs_shaper()
2163 return -1; in sja1105_find_cbs_shaper()
2170 if (priv->info->fixed_cbs_mapping) in sja1105_find_unused_cbs_shaper()
2171 return -1; in sja1105_find_unused_cbs_shaper()
2173 for (i = 0; i < priv->info->num_cbs_shapers; i++) in sja1105_find_unused_cbs_shaper()
2174 if (!priv->cbs[i].idle_slope && !priv->cbs[i].send_slope) in sja1105_find_unused_cbs_shaper()
2177 return -1; in sja1105_find_unused_cbs_shaper()
2185 for (i = 0; i < priv->info->num_cbs_shapers; i++) { in sja1105_delete_cbs_shaper()
2186 struct sja1105_cbs_entry *cbs = &priv->cbs[i]; in sja1105_delete_cbs_shaper()
2188 if (cbs->port == port && cbs->prio == prio) { in sja1105_delete_cbs_shaper()
2201 struct sja1105_private *priv = ds->priv; in sja1105_setup_tc_cbs()
2206 if (!offload->enable) in sja1105_setup_tc_cbs()
2207 return sja1105_delete_cbs_shaper(priv, port, offload->queue); in sja1105_setup_tc_cbs()
2210 index = sja1105_find_cbs_shaper(priv, port, offload->queue); in sja1105_setup_tc_cbs()
2212 /* That isn't the case - see if we can allocate a new one */ in sja1105_setup_tc_cbs()
2215 return -ENOSPC; in sja1105_setup_tc_cbs()
2218 cbs = &priv->cbs[index]; in sja1105_setup_tc_cbs()
2219 cbs->port = port; in sja1105_setup_tc_cbs()
2220 cbs->prio = offload->queue; in sja1105_setup_tc_cbs()
2224 cbs->credit_hi = offload->hicredit; in sja1105_setup_tc_cbs()
2225 cbs->credit_lo = abs(offload->locredit); in sja1105_setup_tc_cbs()
2227 * link speed. Since the given offload->sendslope is good only for the in sja1105_setup_tc_cbs()
2228 * current link speed anyway, and user space is likely to reprogram it in sja1105_setup_tc_cbs()
2229 * when that changes, don't even bother to track the port's link speed, in sja1105_setup_tc_cbs()
2230 * but deduce the port transmit rate from idleslope - sendslope. in sja1105_setup_tc_cbs()
2232 port_transmit_rate_kbps = offload->idleslope - offload->sendslope; in sja1105_setup_tc_cbs()
2233 cbs->idle_slope = div_s64(offload->idleslope * BYTES_PER_KBIT, in sja1105_setup_tc_cbs()
2235 cbs->send_slope = div_s64(abs(offload->sendslope * BYTES_PER_KBIT), in sja1105_setup_tc_cbs()
2237 /* Convert the negative values from 64-bit 2's complement in sja1105_setup_tc_cbs()
2238 * to 32-bit 2's complement (for the case of 0x80000000 whose in sja1105_setup_tc_cbs()
2241 cbs->credit_lo &= GENMASK_ULL(31, 0); in sja1105_setup_tc_cbs()
2242 cbs->send_slope &= GENMASK_ULL(31, 0); in sja1105_setup_tc_cbs()
2255 if (!priv->cbs) in sja1105_reload_cbs()
2258 for (i = 0; i < priv->info->num_cbs_shapers; i++) { in sja1105_reload_cbs()
2259 struct sja1105_cbs_entry *cbs = &priv->cbs[i]; in sja1105_reload_cbs()
2261 if (!cbs->idle_slope && !cbs->send_slope) in sja1105_reload_cbs()
2276 [SJA1105_SCHEDULING] = "Time-aware scheduling",
2277 [SJA1105_BEST_EFFORT_POLICING] = "Best-effort policing",
2295 struct dsa_switch *ds = priv->ds; in sja1105_static_config_reload()
2301 mutex_lock(&priv->fdb_lock); in sja1105_static_config_reload()
2302 mutex_lock(&priv->mgmt_lock); in sja1105_static_config_reload()
2304 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; in sja1105_static_config_reload()
2306 /* Back up the dynamic link speed changed by sja1105_adjust_port_config in sja1105_static_config_reload()
2307 * in order to temporarily restore it to SJA1105_SPEED_AUTO - which the in sja1105_static_config_reload()
2311 for (i = 0; i < ds->num_ports; i++) { in sja1105_static_config_reload()
2313 mac[i].speed); in sja1105_static_config_reload()
2314 mac[i].speed = priv->info->port_speed[SJA1105_SPEED_AUTO]; in sja1105_static_config_reload()
2316 if (priv->xpcs[i]) in sja1105_static_config_reload()
2317 bmcr[i] = mdiobus_c45_read(priv->mdio_pcs, i, in sja1105_static_config_reload()
2322 mutex_lock(&priv->ptp_data.lock); in sja1105_static_config_reload()
2326 mutex_unlock(&priv->ptp_data.lock); in sja1105_static_config_reload()
2333 mutex_unlock(&priv->ptp_data.lock); in sja1105_static_config_reload()
2339 mutex_unlock(&priv->ptp_data.lock); in sja1105_static_config_reload()
2347 /* Mid point, corresponds to pre-reset PTPCLKVAL */ in sja1105_static_config_reload()
2348 t12 = t1 + (t2 - t1) / 2; in sja1105_static_config_reload()
2349 /* Mid point, corresponds to post-reset PTPCLKVAL, aka 0 */ in sja1105_static_config_reload()
2350 t34 = t3 + (t4 - t3) / 2; in sja1105_static_config_reload()
2352 now += (t34 - t12); in sja1105_static_config_reload()
2356 mutex_unlock(&priv->ptp_data.lock); in sja1105_static_config_reload()
2358 dev_info(priv->ds->dev, in sja1105_static_config_reload()
2366 if (priv->info->clocking_setup) { in sja1105_static_config_reload()
2367 rc = priv->info->clocking_setup(priv); in sja1105_static_config_reload()
2372 for (i = 0; i < ds->num_ports; i++) { in sja1105_static_config_reload()
2373 struct dw_xpcs *xpcs = priv->xpcs[i]; in sja1105_static_config_reload()
2388 rc = xpcs_do_config(xpcs, priv->phy_mode[i], NULL, neg_mode); in sja1105_static_config_reload()
2393 int speed = SPEED_UNKNOWN; in sja1105_static_config_reload() local
2395 if (priv->phy_mode[i] == PHY_INTERFACE_MODE_2500BASEX) in sja1105_static_config_reload()
2396 speed = SPEED_2500; in sja1105_static_config_reload()
2398 speed = SPEED_1000; in sja1105_static_config_reload()
2400 speed = SPEED_100; in sja1105_static_config_reload()
2402 speed = SPEED_10; in sja1105_static_config_reload()
2404 xpcs_link_up(&xpcs->pcs, neg_mode, priv->phy_mode[i], in sja1105_static_config_reload()
2405 speed, DUPLEX_FULL); in sja1105_static_config_reload()
2413 mutex_unlock(&priv->mgmt_lock); in sja1105_static_config_reload()
2414 mutex_unlock(&priv->fdb_lock); in sja1105_static_config_reload()
2423 struct sja1105_private *priv = ds->priv; in sja1105_get_tag_protocol()
2425 return priv->info->tag_proto; in sja1105_get_tag_protocol()
2436 struct sja1105_private *priv = ds->priv; in sja1105_vlan_filtering()
2442 list_for_each_entry(rule, &priv->flow_block.rules, list) { in sja1105_vlan_filtering()
2443 if (rule->type == SJA1105_RULE_VL) { in sja1105_vlan_filtering()
2446 return -EBUSY; in sja1105_vlan_filtering()
2460 table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS]; in sja1105_vlan_filtering()
2461 general_params = table->entries; in sja1105_vlan_filtering()
2462 /* EtherType used to identify inner tagged (C-tag) VLAN traffic */ in sja1105_vlan_filtering()
2463 general_params->tpid = tpid; in sja1105_vlan_filtering()
2464 /* EtherType used to identify outer tagged (S-tag) VLAN traffic */ in sja1105_vlan_filtering()
2465 general_params->tpid2 = tpid2; in sja1105_vlan_filtering()
2467 for (port = 0; port < ds->num_ports; port++) { in sja1105_vlan_filtering()
2490 table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP]; in sja1105_vlan_add()
2494 rc = sja1105_table_resize(table, table->entry_count + 1); in sja1105_vlan_add()
2497 match = table->entry_count - 1; in sja1105_vlan_add()
2501 vlan = table->entries; in sja1105_vlan_add()
2528 table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP]; in sja1105_vlan_del()
2536 vlan = table->entries; in sja1105_vlan_del()
2567 struct sja1105_private *priv = ds->priv; in sja1105_bridge_vlan_add()
2568 u16 flags = vlan->flags; in sja1105_bridge_vlan_add()
2573 if (vid_is_dsa_8021q(vlan->vid)) { in sja1105_bridge_vlan_add()
2575 "Range 3072-4095 reserved for dsa_8021q operation"); in sja1105_bridge_vlan_add()
2576 return -EBUSY; in sja1105_bridge_vlan_add()
2579 /* Always install bridge VLANs as egress-tagged on CPU and DSA ports */ in sja1105_bridge_vlan_add()
2583 rc = sja1105_vlan_add(priv, port, vlan->vid, flags, true); in sja1105_bridge_vlan_add()
2587 if (vlan->flags & BRIDGE_VLAN_INFO_PVID) in sja1105_bridge_vlan_add()
2588 priv->bridge_pvid[port] = vlan->vid; in sja1105_bridge_vlan_add()
2596 struct sja1105_private *priv = ds->priv; in sja1105_bridge_vlan_del()
2599 rc = sja1105_vlan_del(priv, port, vlan->vid); in sja1105_bridge_vlan_del()
2612 struct sja1105_private *priv = ds->priv; in sja1105_dsa_8021q_vlan_add()
2627 priv->tag_8021q_pvid[port] = vid; in sja1105_dsa_8021q_vlan_add()
2634 struct sja1105_private *priv = ds->priv; in sja1105_dsa_8021q_vlan_del()
2642 struct netlink_ext_ack *extack = info->info.extack; in sja1105_prechangeupper()
2643 struct net_device *upper = info->upper_dev; in sja1105_prechangeupper()
2644 struct dsa_switch_tree *dst = ds->dst; in sja1105_prechangeupper()
2649 return -EBUSY; in sja1105_prechangeupper()
2653 list_for_each_entry(dp, &dst->ports, list) { in sja1105_prechangeupper()
2658 "Only one VLAN-aware bridge is supported"); in sja1105_prechangeupper()
2659 return -EBUSY; in sja1105_prechangeupper()
2671 struct sja1105_private *priv = ds->priv; in sja1105_mgmt_xmit()
2678 mgmt_route.macaddr = ether_addr_to_u64(hdr->h_dest); in sja1105_mgmt_xmit()
2692 dsa_enqueue_skb(skb, dsa_to_port(ds, port)->user); in sja1105_mgmt_xmit()
2699 dev_err_ratelimited(priv->ds->dev, in sja1105_mgmt_xmit()
2709 } while (mgmt_route.enfport && --timeout); in sja1105_mgmt_xmit()
2712 /* Clean up the management route so that a follow-up in sja1105_mgmt_xmit()
2714 * This is only hardware supported on P/Q/R/S - on E/T it is in sja1105_mgmt_xmit()
2715 * a no-op and we are silently discarding the -EOPNOTSUPP. in sja1105_mgmt_xmit()
2719 dev_err_ratelimited(priv->ds->dev, "xmit timed out\n"); in sja1105_mgmt_xmit()
2735 struct sk_buff *clone, *skb = xmit_work->skb; in sja1105_port_deferred_xmit()
2736 struct dsa_switch *ds = xmit_work->dp->ds; in sja1105_port_deferred_xmit()
2737 struct sja1105_private *priv = ds->priv; in sja1105_port_deferred_xmit()
2738 int port = xmit_work->dp->index; in sja1105_port_deferred_xmit()
2740 clone = SJA1105_SKB_CB(skb)->clone; in sja1105_port_deferred_xmit()
2742 mutex_lock(&priv->mgmt_lock); in sja1105_port_deferred_xmit()
2750 mutex_unlock(&priv->mgmt_lock); in sja1105_port_deferred_xmit()
2758 struct sja1105_private *priv = ds->priv; in sja1105_connect_tag_protocol()
2761 if (proto != priv->info->tag_proto) in sja1105_connect_tag_protocol()
2762 return -EPROTONOSUPPORT; in sja1105_connect_tag_protocol()
2765 tagger_data->xmit_work_fn = sja1105_port_deferred_xmit; in sja1105_connect_tag_protocol()
2766 tagger_data->meta_tstamp_handler = sja1110_process_meta_tstamp; in sja1105_connect_tag_protocol()
2778 struct sja1105_private *priv = ds->priv; in sja1105_set_ageing_time()
2782 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS]; in sja1105_set_ageing_time()
2783 l2_lookup_params = table->entries; in sja1105_set_ageing_time()
2787 if (l2_lookup_params->maxage == maxage) in sja1105_set_ageing_time()
2790 l2_lookup_params->maxage = maxage; in sja1105_set_ageing_time()
2798 struct sja1105_private *priv = ds->priv; in sja1105_change_mtu()
2805 policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries; in sja1105_change_mtu()
2817 return 2043 - VLAN_ETH_HLEN - ETH_FCS_LEN; in sja1105_get_max_mtu()
2830 return -EOPNOTSUPP; in sja1105_port_setup_tc()
2845 struct dsa_switch *ds = priv->ds; in sja1105_mirror_apply()
2851 table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS]; in sja1105_mirror_apply()
2852 general_params = table->entries; in sja1105_mirror_apply()
2854 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; in sja1105_mirror_apply()
2856 already_enabled = (general_params->mirr_port != ds->num_ports); in sja1105_mirror_apply()
2857 if (already_enabled && enabled && general_params->mirr_port != to) { in sja1105_mirror_apply()
2858 dev_err(priv->ds->dev, in sja1105_mirror_apply()
2860 general_params->mirr_port); in sja1105_mirror_apply()
2861 return -EBUSY; in sja1105_mirror_apply()
2870 for (port = 0; port < ds->num_ports; port++) { in sja1105_mirror_apply()
2878 new_mirr_port = ds->num_ports; in sja1105_mirror_apply()
2880 if (new_mirr_port != general_params->mirr_port) { in sja1105_mirror_apply()
2881 general_params->mirr_port = new_mirr_port; in sja1105_mirror_apply()
2902 return sja1105_mirror_apply(ds->priv, port, mirror->to_local_port, in sja1105_mirror_add()
2909 sja1105_mirror_apply(ds->priv, port, mirror->to_local_port, in sja1105_mirror_del()
2910 mirror->ingress, false); in sja1105_mirror_del()
2917 struct sja1105_private *priv = ds->priv; in sja1105_port_policer_add()
2919 policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries; in sja1105_port_policer_add()
2925 policing[port].rate = div_u64(512 * policer->rate_bytes_per_sec, in sja1105_port_policer_add()
2927 policing[port].smax = policer->burst; in sja1105_port_policer_add()
2935 struct sja1105_private *priv = ds->priv; in sja1105_port_policer_del()
2937 policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries; in sja1105_port_policer_del()
2950 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; in sja1105_port_set_learning()
2963 priv->ucast_egress_floods |= BIT(to); in sja1105_port_ucast_bcast_flood()
2965 priv->ucast_egress_floods &= ~BIT(to); in sja1105_port_ucast_bcast_flood()
2970 priv->bcast_egress_floods |= BIT(to); in sja1105_port_ucast_bcast_flood()
2972 priv->bcast_egress_floods &= ~BIT(to); in sja1105_port_ucast_bcast_flood()
2986 mutex_lock(&priv->fdb_lock); in sja1105_port_mcast_flood()
2988 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP]; in sja1105_port_mcast_flood()
2989 l2_lookup = table->entries; in sja1105_port_mcast_flood()
2991 for (match = 0; match < table->entry_count; match++) in sja1105_port_mcast_flood()
2996 if (match == table->entry_count) { in sja1105_port_mcast_flood()
2999 rc = -ENOSPC; in sja1105_port_mcast_flood()
3012 mutex_unlock(&priv->fdb_lock); in sja1105_port_mcast_flood()
3021 struct sja1105_private *priv = ds->priv; in sja1105_port_pre_bridge_flags()
3025 return -EINVAL; in sja1105_port_pre_bridge_flags()
3028 !priv->info->can_limit_mcast_flood) { in sja1105_port_pre_bridge_flags()
3035 return -EINVAL; in sja1105_port_pre_bridge_flags()
3046 struct sja1105_private *priv = ds->priv; in sja1105_port_bridge_flags()
3067 if (flags.mask & BR_MCAST_FLOOD && priv->info->can_limit_mcast_flood) { in sja1105_port_bridge_flags()
3077 /* The programming model for the SJA1105 switch is "all-at-once" via static
3084 * Setting correct PHY link speed does not matter now.
3091 struct sja1105_private *priv = ds->priv; in sja1105_setup()
3094 if (priv->info->disable_microcontroller) { in sja1105_setup()
3095 rc = priv->info->disable_microcontroller(priv); in sja1105_setup()
3097 dev_err(ds->dev, in sja1105_setup()
3107 dev_err(ds->dev, "Failed to load static config: %d\n", rc); in sja1105_setup()
3112 if (priv->info->clocking_setup) { in sja1105_setup()
3113 rc = priv->info->clocking_setup(priv); in sja1105_setup()
3115 dev_err(ds->dev, in sja1105_setup()
3127 dev_err(ds->dev, "Failed to register PTP clock: %d\n", rc); in sja1105_setup()
3133 dev_err(ds->dev, "Failed to register MDIO bus: %pe\n", in sja1105_setup()
3156 ds->vlan_filtering_is_global = true; in sja1105_setup()
3157 ds->untag_bridge_pvid = true; in sja1105_setup()
3158 ds->fdb_isolation = true; in sja1105_setup()
3160 ds->max_num_bridges = 7; in sja1105_setup()
3163 ds->num_tx_queues = SJA1105_NUM_TC; in sja1105_setup()
3165 ds->mtu_enforcement_ingress = true; in sja1105_setup()
3166 ds->assisted_learning_on_cpu_port = true; in sja1105_setup()
3180 sja1105_static_config_free(&priv->static_config); in sja1105_setup()
3187 struct sja1105_private *priv = ds->priv; in sja1105_teardown()
3198 sja1105_static_config_free(&priv->static_config); in sja1105_teardown()
3253 const struct sja1105_regs *regs = priv->info->regs; in sja1105_check_device_id()
3255 struct device *dev = &priv->spidev->dev; in sja1105_check_device_id()
3261 rc = sja1105_xfer_u32(priv, SPI_READ, regs->device_id, &device_id, in sja1105_check_device_id()
3266 rc = sja1105_xfer_buf(priv, SPI_READ, regs->prod_id, prod_id, in sja1105_check_device_id()
3273 for (match = sja1105_dt_ids; match->compatible[0]; match++) { in sja1105_check_device_id()
3274 const struct sja1105_info *info = match->data; in sja1105_check_device_id()
3277 if (info->device_id != device_id || info->part_no != part_no) in sja1105_check_device_id()
3281 if (priv->info->device_id != device_id || in sja1105_check_device_id()
3282 priv->info->part_no != part_no) { in sja1105_check_device_id()
3284 priv->info->name, info->name); in sja1105_check_device_id()
3286 priv->info = info; in sja1105_check_device_id()
3295 return -ENODEV; in sja1105_check_device_id()
3300 struct device *dev = &spi->dev; in sja1105_probe()
3306 if (!dev->of_node) { in sja1105_probe()
3308 return -EINVAL; in sja1105_probe()
3317 return -ENOMEM; in sja1105_probe()
3322 priv->spidev = spi; in sja1105_probe()
3326 spi->bits_per_word = 8; in sja1105_probe()
3346 /* We need to send at least one 64-bit word of SPI payload per message in sja1105_probe()
3351 return -EINVAL; in sja1105_probe()
3354 priv->max_xfer_len = SJA1105_SIZE_SPI_MSG_MAXLEN; in sja1105_probe()
3355 if (priv->max_xfer_len > max_xfer) in sja1105_probe()
3356 priv->max_xfer_len = max_xfer; in sja1105_probe()
3357 if (priv->max_xfer_len > max_msg - SJA1105_SIZE_SPI_MSG_HEADER) in sja1105_probe()
3358 priv->max_xfer_len = max_msg - SJA1105_SIZE_SPI_MSG_HEADER; in sja1105_probe()
3360 priv->info = of_device_get_match_data(dev); in sja1105_probe()
3369 dev_info(dev, "Probed switch chip: %s\n", priv->info->name); in sja1105_probe()
3373 return -ENOMEM; in sja1105_probe()
3375 ds->dev = dev; in sja1105_probe()
3376 ds->num_ports = priv->info->num_ports; in sja1105_probe()
3377 ds->ops = &sja1105_switch_ops; in sja1105_probe()
3378 ds->priv = priv; in sja1105_probe()
3379 priv->ds = ds; in sja1105_probe()
3381 mutex_init(&priv->ptp_data.lock); in sja1105_probe()
3382 mutex_init(&priv->dynamic_config_lock); in sja1105_probe()
3383 mutex_init(&priv->mgmt_lock); in sja1105_probe()
3384 mutex_init(&priv->fdb_lock); in sja1105_probe()
3385 spin_lock_init(&priv->ts_id_lock); in sja1105_probe()
3389 dev_err(ds->dev, "Failed to parse DT: %d\n", rc); in sja1105_probe()
3394 priv->cbs = devm_kcalloc(dev, priv->info->num_cbs_shapers, in sja1105_probe()
3397 if (!priv->cbs) in sja1105_probe()
3398 return -ENOMEM; in sja1105_probe()
3401 return dsa_register_switch(priv->ds); in sja1105_probe()
3411 dsa_unregister_switch(priv->ds); in sja1105_remove()
3421 dsa_switch_shutdown(priv->ds); in sja1105_shutdown()
3471 MODULE_AUTHOR("Georg Waibel <georg.waibel@sensor-technik.de>");