1 /* 2 * B53 switch driver main logic 3 * 4 * Copyright (C) 2011-2013 Jonas Gorski <jogo@openwrt.org> 5 * Copyright (C) 2016 Florian Fainelli <f.fainelli@gmail.com> 6 * 7 * Permission to use, copy, modify, and/or distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include <linux/delay.h> 21 #include <linux/export.h> 22 #include <linux/gpio.h> 23 #include <linux/kernel.h> 24 #include <linux/math.h> 25 #include <linux/minmax.h> 26 #include <linux/module.h> 27 #include <linux/platform_data/b53.h> 28 #include <linux/phy.h> 29 #include <linux/phylink.h> 30 #include <linux/etherdevice.h> 31 #include <linux/if_bridge.h> 32 #include <linux/if_vlan.h> 33 #include <net/dsa.h> 34 35 #include "b53_regs.h" 36 #include "b53_priv.h" 37 38 struct b53_mib_desc { 39 u8 size; 40 u8 offset; 41 const char *name; 42 }; 43 44 /* BCM5365 MIB counters */ 45 static const struct b53_mib_desc b53_mibs_65[] = { 46 { 8, 0x00, "TxOctets" }, 47 { 4, 0x08, "TxDropPkts" }, 48 { 4, 0x10, "TxBroadcastPkts" }, 49 { 4, 0x14, "TxMulticastPkts" }, 50 { 4, 0x18, "TxUnicastPkts" }, 51 { 4, 0x1c, "TxCollisions" }, 52 { 4, 0x20, "TxSingleCollision" }, 53 { 4, 0x24, "TxMultipleCollision" }, 54 { 4, 0x28, "TxDeferredTransmit" }, 55 { 4, 0x2c, "TxLateCollision" }, 56 { 4, 0x30, "TxExcessiveCollision" }, 57 { 4, 0x38, "TxPausePkts" }, 58 { 8, 0x44, "RxOctets" }, 59 { 4, 0x4c, "RxUndersizePkts" }, 60 { 4, 0x50, "RxPausePkts" }, 61 { 4, 0x54, "Pkts64Octets" }, 62 { 4, 0x58, "Pkts65to127Octets" }, 63 { 4, 0x5c, "Pkts128to255Octets" }, 64 { 4, 0x60, "Pkts256to511Octets" }, 65 { 4, 0x64, "Pkts512to1023Octets" }, 66 { 4, 0x68, "Pkts1024to1522Octets" }, 67 { 4, 0x6c, "RxOversizePkts" }, 68 { 4, 0x70, "RxJabbers" }, 69 { 4, 0x74, "RxAlignmentErrors" }, 70 { 4, 0x78, "RxFCSErrors" }, 71 { 8, 0x7c, "RxGoodOctets" }, 72 { 4, 0x84, "RxDropPkts" }, 73 { 4, 0x88, "RxUnicastPkts" }, 74 { 4, 0x8c, "RxMulticastPkts" }, 75 { 4, 0x90, "RxBroadcastPkts" }, 76 { 4, 0x94, "RxSAChanges" }, 77 { 4, 0x98, "RxFragments" }, 78 }; 79 80 #define B53_MIBS_65_SIZE ARRAY_SIZE(b53_mibs_65) 81 82 /* BCM63xx MIB counters */ 83 static const struct b53_mib_desc b53_mibs_63xx[] = { 84 { 8, 0x00, "TxOctets" }, 85 { 4, 0x08, "TxDropPkts" }, 86 { 4, 0x0c, "TxQoSPkts" }, 87 { 4, 0x10, "TxBroadcastPkts" }, 88 { 4, 0x14, "TxMulticastPkts" }, 89 { 4, 0x18, "TxUnicastPkts" }, 90 { 4, 0x1c, "TxCollisions" }, 91 { 4, 0x20, "TxSingleCollision" }, 92 { 4, 0x24, "TxMultipleCollision" }, 93 { 4, 0x28, "TxDeferredTransmit" }, 94 { 4, 0x2c, "TxLateCollision" }, 95 { 4, 0x30, "TxExcessiveCollision" }, 96 { 4, 0x38, "TxPausePkts" }, 97 { 8, 0x3c, "TxQoSOctets" }, 98 { 8, 0x44, "RxOctets" }, 99 { 4, 0x4c, "RxUndersizePkts" }, 100 { 4, 0x50, "RxPausePkts" }, 101 { 4, 0x54, "Pkts64Octets" }, 102 { 4, 0x58, "Pkts65to127Octets" }, 103 { 4, 0x5c, "Pkts128to255Octets" }, 104 { 4, 0x60, "Pkts256to511Octets" }, 105 { 4, 0x64, "Pkts512to1023Octets" }, 106 { 4, 0x68, "Pkts1024to1522Octets" }, 107 { 4, 0x6c, "RxOversizePkts" }, 108 { 4, 0x70, "RxJabbers" }, 109 { 4, 0x74, "RxAlignmentErrors" }, 110 { 4, 0x78, "RxFCSErrors" }, 111 { 8, 0x7c, "RxGoodOctets" }, 112 { 4, 0x84, "RxDropPkts" }, 113 { 4, 0x88, "RxUnicastPkts" }, 114 { 4, 0x8c, "RxMulticastPkts" }, 115 { 4, 0x90, "RxBroadcastPkts" }, 116 { 4, 0x94, "RxSAChanges" }, 117 { 4, 0x98, "RxFragments" }, 118 { 4, 0xa0, "RxSymbolErrors" }, 119 { 4, 0xa4, "RxQoSPkts" }, 120 { 8, 0xa8, "RxQoSOctets" }, 121 { 4, 0xb0, "Pkts1523to2047Octets" }, 122 { 4, 0xb4, "Pkts2048to4095Octets" }, 123 { 4, 0xb8, "Pkts4096to8191Octets" }, 124 { 4, 0xbc, "Pkts8192to9728Octets" }, 125 { 4, 0xc0, "RxDiscarded" }, 126 }; 127 128 #define B53_MIBS_63XX_SIZE ARRAY_SIZE(b53_mibs_63xx) 129 130 /* MIB counters */ 131 static const struct b53_mib_desc b53_mibs[] = { 132 { 8, 0x00, "TxOctets" }, 133 { 4, 0x08, "TxDropPkts" }, 134 { 4, 0x10, "TxBroadcastPkts" }, 135 { 4, 0x14, "TxMulticastPkts" }, 136 { 4, 0x18, "TxUnicastPkts" }, 137 { 4, 0x1c, "TxCollisions" }, 138 { 4, 0x20, "TxSingleCollision" }, 139 { 4, 0x24, "TxMultipleCollision" }, 140 { 4, 0x28, "TxDeferredTransmit" }, 141 { 4, 0x2c, "TxLateCollision" }, 142 { 4, 0x30, "TxExcessiveCollision" }, 143 { 4, 0x38, "TxPausePkts" }, 144 { 8, 0x50, "RxOctets" }, 145 { 4, 0x58, "RxUndersizePkts" }, 146 { 4, 0x5c, "RxPausePkts" }, 147 { 4, 0x60, "Pkts64Octets" }, 148 { 4, 0x64, "Pkts65to127Octets" }, 149 { 4, 0x68, "Pkts128to255Octets" }, 150 { 4, 0x6c, "Pkts256to511Octets" }, 151 { 4, 0x70, "Pkts512to1023Octets" }, 152 { 4, 0x74, "Pkts1024to1522Octets" }, 153 { 4, 0x78, "RxOversizePkts" }, 154 { 4, 0x7c, "RxJabbers" }, 155 { 4, 0x80, "RxAlignmentErrors" }, 156 { 4, 0x84, "RxFCSErrors" }, 157 { 8, 0x88, "RxGoodOctets" }, 158 { 4, 0x90, "RxDropPkts" }, 159 { 4, 0x94, "RxUnicastPkts" }, 160 { 4, 0x98, "RxMulticastPkts" }, 161 { 4, 0x9c, "RxBroadcastPkts" }, 162 { 4, 0xa0, "RxSAChanges" }, 163 { 4, 0xa4, "RxFragments" }, 164 { 4, 0xa8, "RxJumboPkts" }, 165 { 4, 0xac, "RxSymbolErrors" }, 166 { 4, 0xc0, "RxDiscarded" }, 167 }; 168 169 #define B53_MIBS_SIZE ARRAY_SIZE(b53_mibs) 170 171 static const struct b53_mib_desc b53_mibs_58xx[] = { 172 { 8, 0x00, "TxOctets" }, 173 { 4, 0x08, "TxDropPkts" }, 174 { 4, 0x0c, "TxQPKTQ0" }, 175 { 4, 0x10, "TxBroadcastPkts" }, 176 { 4, 0x14, "TxMulticastPkts" }, 177 { 4, 0x18, "TxUnicastPKts" }, 178 { 4, 0x1c, "TxCollisions" }, 179 { 4, 0x20, "TxSingleCollision" }, 180 { 4, 0x24, "TxMultipleCollision" }, 181 { 4, 0x28, "TxDeferredCollision" }, 182 { 4, 0x2c, "TxLateCollision" }, 183 { 4, 0x30, "TxExcessiveCollision" }, 184 { 4, 0x34, "TxFrameInDisc" }, 185 { 4, 0x38, "TxPausePkts" }, 186 { 4, 0x3c, "TxQPKTQ1" }, 187 { 4, 0x40, "TxQPKTQ2" }, 188 { 4, 0x44, "TxQPKTQ3" }, 189 { 4, 0x48, "TxQPKTQ4" }, 190 { 4, 0x4c, "TxQPKTQ5" }, 191 { 8, 0x50, "RxOctets" }, 192 { 4, 0x58, "RxUndersizePkts" }, 193 { 4, 0x5c, "RxPausePkts" }, 194 { 4, 0x60, "RxPkts64Octets" }, 195 { 4, 0x64, "RxPkts65to127Octets" }, 196 { 4, 0x68, "RxPkts128to255Octets" }, 197 { 4, 0x6c, "RxPkts256to511Octets" }, 198 { 4, 0x70, "RxPkts512to1023Octets" }, 199 { 4, 0x74, "RxPkts1024toMaxPktsOctets" }, 200 { 4, 0x78, "RxOversizePkts" }, 201 { 4, 0x7c, "RxJabbers" }, 202 { 4, 0x80, "RxAlignmentErrors" }, 203 { 4, 0x84, "RxFCSErrors" }, 204 { 8, 0x88, "RxGoodOctets" }, 205 { 4, 0x90, "RxDropPkts" }, 206 { 4, 0x94, "RxUnicastPkts" }, 207 { 4, 0x98, "RxMulticastPkts" }, 208 { 4, 0x9c, "RxBroadcastPkts" }, 209 { 4, 0xa0, "RxSAChanges" }, 210 { 4, 0xa4, "RxFragments" }, 211 { 4, 0xa8, "RxJumboPkt" }, 212 { 4, 0xac, "RxSymblErr" }, 213 { 4, 0xb0, "InRangeErrCount" }, 214 { 4, 0xb4, "OutRangeErrCount" }, 215 { 4, 0xb8, "EEELpiEvent" }, 216 { 4, 0xbc, "EEELpiDuration" }, 217 { 4, 0xc0, "RxDiscard" }, 218 { 4, 0xc8, "TxQPKTQ6" }, 219 { 4, 0xcc, "TxQPKTQ7" }, 220 { 4, 0xd0, "TxPkts64Octets" }, 221 { 4, 0xd4, "TxPkts65to127Octets" }, 222 { 4, 0xd8, "TxPkts128to255Octets" }, 223 { 4, 0xdc, "TxPkts256to511Ocets" }, 224 { 4, 0xe0, "TxPkts512to1023Ocets" }, 225 { 4, 0xe4, "TxPkts1024toMaxPktOcets" }, 226 }; 227 228 #define B53_MIBS_58XX_SIZE ARRAY_SIZE(b53_mibs_58xx) 229 230 #define B53_MAX_MTU_25 (1536 - ETH_HLEN - VLAN_HLEN - ETH_FCS_LEN) 231 #define B53_MAX_MTU (9720 - ETH_HLEN - VLAN_HLEN - ETH_FCS_LEN) 232 233 static int b53_do_vlan_op(struct b53_device *dev, u8 op) 234 { 235 unsigned int i; 236 237 b53_write8(dev, B53_ARLIO_PAGE, dev->vta_regs[0], VTA_START_CMD | op); 238 239 for (i = 0; i < 10; i++) { 240 u8 vta; 241 242 b53_read8(dev, B53_ARLIO_PAGE, dev->vta_regs[0], &vta); 243 if (!(vta & VTA_START_CMD)) 244 return 0; 245 246 usleep_range(100, 200); 247 } 248 249 return -EIO; 250 } 251 252 static void b53_set_vlan_entry(struct b53_device *dev, u16 vid, 253 struct b53_vlan *vlan) 254 { 255 if (is5325(dev)) { 256 u32 entry = 0; 257 258 if (vlan->members) { 259 entry = ((vlan->untag & VA_UNTAG_MASK_25) << 260 VA_UNTAG_S_25) | vlan->members; 261 if (dev->core_rev >= 3) 262 entry |= VA_VALID_25_R4 | vid << VA_VID_HIGH_S; 263 else 264 entry |= VA_VALID_25; 265 } 266 267 b53_write32(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_25, entry); 268 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, vid | 269 VTA_RW_STATE_WR | VTA_RW_OP_EN); 270 } else if (is5365(dev)) { 271 u16 entry = 0; 272 273 if (vlan->members) 274 entry = ((vlan->untag & VA_UNTAG_MASK_65) << 275 VA_UNTAG_S_65) | vlan->members | VA_VALID_65; 276 277 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_65, entry); 278 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_65, vid | 279 VTA_RW_STATE_WR | VTA_RW_OP_EN); 280 } else { 281 b53_write16(dev, B53_ARLIO_PAGE, dev->vta_regs[1], vid); 282 b53_write32(dev, B53_ARLIO_PAGE, dev->vta_regs[2], 283 (vlan->untag << VTE_UNTAG_S) | vlan->members); 284 285 b53_do_vlan_op(dev, VTA_CMD_WRITE); 286 } 287 288 dev_dbg(dev->ds->dev, "VID: %d, members: 0x%04x, untag: 0x%04x\n", 289 vid, vlan->members, vlan->untag); 290 } 291 292 static void b53_get_vlan_entry(struct b53_device *dev, u16 vid, 293 struct b53_vlan *vlan) 294 { 295 if (is5325(dev)) { 296 u32 entry = 0; 297 298 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, vid | 299 VTA_RW_STATE_RD | VTA_RW_OP_EN); 300 b53_read32(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_25, &entry); 301 302 if (dev->core_rev >= 3) 303 vlan->valid = !!(entry & VA_VALID_25_R4); 304 else 305 vlan->valid = !!(entry & VA_VALID_25); 306 vlan->members = entry & VA_MEMBER_MASK; 307 vlan->untag = (entry >> VA_UNTAG_S_25) & VA_UNTAG_MASK_25; 308 309 } else if (is5365(dev)) { 310 u16 entry = 0; 311 312 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_65, vid | 313 VTA_RW_STATE_WR | VTA_RW_OP_EN); 314 b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_65, &entry); 315 316 vlan->valid = !!(entry & VA_VALID_65); 317 vlan->members = entry & VA_MEMBER_MASK; 318 vlan->untag = (entry >> VA_UNTAG_S_65) & VA_UNTAG_MASK_65; 319 } else { 320 u32 entry = 0; 321 322 b53_write16(dev, B53_ARLIO_PAGE, dev->vta_regs[1], vid); 323 b53_do_vlan_op(dev, VTA_CMD_READ); 324 b53_read32(dev, B53_ARLIO_PAGE, dev->vta_regs[2], &entry); 325 vlan->members = entry & VTE_MEMBERS; 326 vlan->untag = (entry >> VTE_UNTAG_S) & VTE_MEMBERS; 327 vlan->valid = true; 328 } 329 } 330 331 static void b53_set_eap_mode(struct b53_device *dev, int port, int mode) 332 { 333 u64 eap_conf; 334 335 if (is5325(dev) || is5365(dev) || dev->chip_id == BCM5389_DEVICE_ID) 336 return; 337 338 b53_read64(dev, B53_EAP_PAGE, B53_PORT_EAP_CONF(port), &eap_conf); 339 340 if (is63xx(dev)) { 341 eap_conf &= ~EAP_MODE_MASK_63XX; 342 eap_conf |= (u64)mode << EAP_MODE_SHIFT_63XX; 343 } else { 344 eap_conf &= ~EAP_MODE_MASK; 345 eap_conf |= (u64)mode << EAP_MODE_SHIFT; 346 } 347 348 b53_write64(dev, B53_EAP_PAGE, B53_PORT_EAP_CONF(port), eap_conf); 349 } 350 351 static void b53_set_forwarding(struct b53_device *dev, int enable) 352 { 353 u8 mgmt; 354 355 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt); 356 357 if (enable) 358 mgmt |= SM_SW_FWD_EN; 359 else 360 mgmt &= ~SM_SW_FWD_EN; 361 362 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt); 363 364 /* Include IMP port in dumb forwarding mode 365 */ 366 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, &mgmt); 367 mgmt |= B53_MII_DUMB_FWDG_EN; 368 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt); 369 370 /* Look at B53_UC_FWD_EN and B53_MC_FWD_EN to decide whether 371 * frames should be flooded or not. 372 */ 373 b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt); 374 mgmt |= B53_UC_FWD_EN | B53_MC_FWD_EN | B53_IPMC_FWD_EN; 375 b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt); 376 } 377 378 static void b53_enable_vlan(struct b53_device *dev, int port, bool enable, 379 bool enable_filtering) 380 { 381 u8 mgmt, vc0, vc1, vc4 = 0, vc5; 382 383 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt); 384 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL0, &vc0); 385 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL1, &vc1); 386 387 if (is5325(dev) || is5365(dev)) { 388 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, &vc4); 389 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_25, &vc5); 390 } else if (is63xx(dev)) { 391 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_63XX, &vc4); 392 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_63XX, &vc5); 393 } else { 394 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4, &vc4); 395 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, &vc5); 396 } 397 398 vc1 &= ~VC1_RX_MCST_FWD_EN; 399 400 if (enable) { 401 vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID; 402 vc1 |= VC1_RX_MCST_UNTAG_EN; 403 vc4 &= ~VC4_ING_VID_CHECK_MASK; 404 if (enable_filtering) { 405 vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S; 406 vc5 |= VC5_DROP_VTABLE_MISS; 407 } else { 408 vc4 |= VC4_NO_ING_VID_CHK << VC4_ING_VID_CHECK_S; 409 vc5 &= ~VC5_DROP_VTABLE_MISS; 410 } 411 412 if (is5325(dev)) 413 vc0 &= ~VC0_RESERVED_1; 414 415 if (is5325(dev) || is5365(dev)) 416 vc1 |= VC1_RX_MCST_TAG_EN; 417 418 } else { 419 vc0 &= ~(VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID); 420 vc1 &= ~VC1_RX_MCST_UNTAG_EN; 421 vc4 &= ~VC4_ING_VID_CHECK_MASK; 422 vc5 &= ~VC5_DROP_VTABLE_MISS; 423 424 if (is5325(dev) || is5365(dev)) 425 vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S; 426 else 427 vc4 |= VC4_ING_VID_VIO_TO_IMP << VC4_ING_VID_CHECK_S; 428 429 if (is5325(dev) || is5365(dev)) 430 vc1 &= ~VC1_RX_MCST_TAG_EN; 431 } 432 433 if (!is5325(dev) && !is5365(dev)) 434 vc5 &= ~VC5_VID_FFF_EN; 435 436 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL0, vc0); 437 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL1, vc1); 438 439 if (is5325(dev) || is5365(dev)) { 440 /* enable the high 8 bit vid check on 5325 */ 441 if (is5325(dev) && enable) 442 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3, 443 VC3_HIGH_8BIT_EN); 444 else 445 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3, 0); 446 447 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, vc4); 448 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_25, vc5); 449 } else if (is63xx(dev)) { 450 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3_63XX, 0); 451 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_63XX, vc4); 452 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_63XX, vc5); 453 } else { 454 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3, 0); 455 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4, vc4); 456 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, vc5); 457 } 458 459 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt); 460 461 dev->vlan_enabled = enable; 462 463 dev_dbg(dev->dev, "Port %d VLAN enabled: %d, filtering: %d\n", 464 port, enable, enable_filtering); 465 } 466 467 static int b53_set_jumbo(struct b53_device *dev, bool enable, bool allow_10_100) 468 { 469 u32 port_mask = 0; 470 u16 max_size = JMS_MIN_SIZE; 471 472 if (is5325(dev) || is5365(dev)) 473 return -EINVAL; 474 475 if (enable) { 476 port_mask = dev->enabled_ports; 477 max_size = JMS_MAX_SIZE; 478 if (allow_10_100) 479 port_mask |= JPM_10_100_JUMBO_EN; 480 } 481 482 b53_write32(dev, B53_JUMBO_PAGE, dev->jumbo_pm_reg, port_mask); 483 return b53_write16(dev, B53_JUMBO_PAGE, dev->jumbo_size_reg, max_size); 484 } 485 486 static int b53_flush_arl(struct b53_device *dev, u8 mask) 487 { 488 unsigned int i; 489 490 b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL, 491 FAST_AGE_DONE | FAST_AGE_DYNAMIC | mask); 492 493 for (i = 0; i < 10; i++) { 494 u8 fast_age_ctrl; 495 496 b53_read8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL, 497 &fast_age_ctrl); 498 499 if (!(fast_age_ctrl & FAST_AGE_DONE)) 500 goto out; 501 502 msleep(1); 503 } 504 505 return -ETIMEDOUT; 506 out: 507 /* Only age dynamic entries (default behavior) */ 508 b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL, FAST_AGE_DYNAMIC); 509 return 0; 510 } 511 512 static int b53_fast_age_port(struct b53_device *dev, int port) 513 { 514 b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_PORT_CTRL, port); 515 516 return b53_flush_arl(dev, FAST_AGE_PORT); 517 } 518 519 static int b53_fast_age_vlan(struct b53_device *dev, u16 vid) 520 { 521 b53_write16(dev, B53_CTRL_PAGE, B53_FAST_AGE_VID_CTRL, vid); 522 523 return b53_flush_arl(dev, FAST_AGE_VLAN); 524 } 525 526 void b53_imp_vlan_setup(struct dsa_switch *ds, int cpu_port) 527 { 528 struct b53_device *dev = ds->priv; 529 unsigned int i; 530 u16 pvlan; 531 532 /* Enable the IMP port to be in the same VLAN as the other ports 533 * on a per-port basis such that we only have Port i and IMP in 534 * the same VLAN. 535 */ 536 b53_for_each_port(dev, i) { 537 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), &pvlan); 538 pvlan |= BIT(cpu_port); 539 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), pvlan); 540 } 541 } 542 EXPORT_SYMBOL(b53_imp_vlan_setup); 543 544 static void b53_port_set_ucast_flood(struct b53_device *dev, int port, 545 bool unicast) 546 { 547 u16 uc; 548 549 b53_read16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, &uc); 550 if (unicast) 551 uc |= BIT(port); 552 else 553 uc &= ~BIT(port); 554 b53_write16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, uc); 555 } 556 557 static void b53_port_set_mcast_flood(struct b53_device *dev, int port, 558 bool multicast) 559 { 560 u16 mc; 561 562 b53_read16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, &mc); 563 if (multicast) 564 mc |= BIT(port); 565 else 566 mc &= ~BIT(port); 567 b53_write16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, mc); 568 569 b53_read16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, &mc); 570 if (multicast) 571 mc |= BIT(port); 572 else 573 mc &= ~BIT(port); 574 b53_write16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, mc); 575 } 576 577 static void b53_port_set_learning(struct b53_device *dev, int port, 578 bool learning) 579 { 580 u16 reg; 581 582 b53_read16(dev, B53_CTRL_PAGE, B53_DIS_LEARNING, ®); 583 if (learning) 584 reg &= ~BIT(port); 585 else 586 reg |= BIT(port); 587 b53_write16(dev, B53_CTRL_PAGE, B53_DIS_LEARNING, reg); 588 } 589 590 static void b53_eee_enable_set(struct dsa_switch *ds, int port, bool enable) 591 { 592 struct b53_device *dev = ds->priv; 593 u16 reg; 594 595 b53_read16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, ®); 596 if (enable) 597 reg |= BIT(port); 598 else 599 reg &= ~BIT(port); 600 b53_write16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, reg); 601 } 602 603 int b53_setup_port(struct dsa_switch *ds, int port) 604 { 605 struct b53_device *dev = ds->priv; 606 607 b53_port_set_ucast_flood(dev, port, true); 608 b53_port_set_mcast_flood(dev, port, true); 609 b53_port_set_learning(dev, port, false); 610 611 /* Force all traffic to go to the CPU port to prevent the ASIC from 612 * trying to forward to bridged ports on matching FDB entries, then 613 * dropping frames because it isn't allowed to forward there. 614 */ 615 if (dsa_is_user_port(ds, port)) 616 b53_set_eap_mode(dev, port, EAP_MODE_SIMPLIFIED); 617 618 return 0; 619 } 620 EXPORT_SYMBOL(b53_setup_port); 621 622 int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy) 623 { 624 struct b53_device *dev = ds->priv; 625 unsigned int cpu_port; 626 int ret = 0; 627 u16 pvlan; 628 629 if (!dsa_is_user_port(ds, port)) 630 return 0; 631 632 cpu_port = dsa_to_port(ds, port)->cpu_dp->index; 633 634 if (dev->ops->irq_enable) 635 ret = dev->ops->irq_enable(dev, port); 636 if (ret) 637 return ret; 638 639 /* Clear the Rx and Tx disable bits and set to no spanning tree */ 640 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), 0); 641 642 /* Set this port, and only this one to be in the default VLAN, 643 * if member of a bridge, restore its membership prior to 644 * bringing down this port. 645 */ 646 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan); 647 pvlan &= ~0x1ff; 648 pvlan |= BIT(port); 649 pvlan |= dev->ports[port].vlan_ctl_mask; 650 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan); 651 652 b53_imp_vlan_setup(ds, cpu_port); 653 654 /* If EEE was enabled, restore it */ 655 if (dev->ports[port].eee.eee_enabled) 656 b53_eee_enable_set(ds, port, true); 657 658 return 0; 659 } 660 EXPORT_SYMBOL(b53_enable_port); 661 662 void b53_disable_port(struct dsa_switch *ds, int port) 663 { 664 struct b53_device *dev = ds->priv; 665 u8 reg; 666 667 /* Disable Tx/Rx for the port */ 668 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), ®); 669 reg |= PORT_CTRL_RX_DISABLE | PORT_CTRL_TX_DISABLE; 670 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg); 671 672 if (dev->ops->irq_disable) 673 dev->ops->irq_disable(dev, port); 674 } 675 EXPORT_SYMBOL(b53_disable_port); 676 677 void b53_brcm_hdr_setup(struct dsa_switch *ds, int port) 678 { 679 struct b53_device *dev = ds->priv; 680 bool tag_en = !(dev->tag_protocol == DSA_TAG_PROTO_NONE); 681 u8 hdr_ctl, val; 682 u16 reg; 683 684 /* Resolve which bit controls the Broadcom tag */ 685 switch (port) { 686 case 8: 687 val = BRCM_HDR_P8_EN; 688 break; 689 case 7: 690 val = BRCM_HDR_P7_EN; 691 break; 692 case 5: 693 val = BRCM_HDR_P5_EN; 694 break; 695 default: 696 val = 0; 697 break; 698 } 699 700 /* Enable management mode if tagging is requested */ 701 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &hdr_ctl); 702 if (tag_en) 703 hdr_ctl |= SM_SW_FWD_MODE; 704 else 705 hdr_ctl &= ~SM_SW_FWD_MODE; 706 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, hdr_ctl); 707 708 /* Configure the appropriate IMP port */ 709 b53_read8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &hdr_ctl); 710 if (port == 8) 711 hdr_ctl |= GC_FRM_MGMT_PORT_MII; 712 else if (port == 5) 713 hdr_ctl |= GC_FRM_MGMT_PORT_M; 714 b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, hdr_ctl); 715 716 /* Enable Broadcom tags for IMP port */ 717 b53_read8(dev, B53_MGMT_PAGE, B53_BRCM_HDR, &hdr_ctl); 718 if (tag_en) 719 hdr_ctl |= val; 720 else 721 hdr_ctl &= ~val; 722 b53_write8(dev, B53_MGMT_PAGE, B53_BRCM_HDR, hdr_ctl); 723 724 /* Registers below are only accessible on newer devices */ 725 if (!is58xx(dev)) 726 return; 727 728 /* Enable reception Broadcom tag for CPU TX (switch RX) to 729 * allow us to tag outgoing frames 730 */ 731 b53_read16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_RX_DIS, ®); 732 if (tag_en) 733 reg &= ~BIT(port); 734 else 735 reg |= BIT(port); 736 b53_write16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_RX_DIS, reg); 737 738 /* Enable transmission of Broadcom tags from the switch (CPU RX) to 739 * allow delivering frames to the per-port net_devices 740 */ 741 b53_read16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_TX_DIS, ®); 742 if (tag_en) 743 reg &= ~BIT(port); 744 else 745 reg |= BIT(port); 746 b53_write16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_TX_DIS, reg); 747 } 748 EXPORT_SYMBOL(b53_brcm_hdr_setup); 749 750 static void b53_enable_cpu_port(struct b53_device *dev, int port) 751 { 752 u8 port_ctrl; 753 754 /* BCM5325 CPU port is at 8 */ 755 if ((is5325(dev) || is5365(dev)) && port == B53_CPU_PORT_25) 756 port = B53_CPU_PORT; 757 758 port_ctrl = PORT_CTRL_RX_BCST_EN | 759 PORT_CTRL_RX_MCST_EN | 760 PORT_CTRL_RX_UCST_EN; 761 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), port_ctrl); 762 763 b53_brcm_hdr_setup(dev->ds, port); 764 } 765 766 static void b53_enable_mib(struct b53_device *dev) 767 { 768 u8 gc; 769 770 b53_read8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc); 771 gc &= ~(GC_RESET_MIB | GC_MIB_AC_EN); 772 b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc); 773 } 774 775 static void b53_enable_stp(struct b53_device *dev) 776 { 777 u8 gc; 778 779 b53_read8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc); 780 gc |= GC_RX_BPDU_EN; 781 b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc); 782 } 783 784 static u16 b53_default_pvid(struct b53_device *dev) 785 { 786 if (is5325(dev) || is5365(dev)) 787 return 1; 788 else 789 return 0; 790 } 791 792 static bool b53_vlan_port_needs_forced_tagged(struct dsa_switch *ds, int port) 793 { 794 struct b53_device *dev = ds->priv; 795 796 return dev->tag_protocol == DSA_TAG_PROTO_NONE && dsa_is_cpu_port(ds, port); 797 } 798 799 static bool b53_vlan_port_may_join_untagged(struct dsa_switch *ds, int port) 800 { 801 struct b53_device *dev = ds->priv; 802 struct dsa_port *dp; 803 804 if (!dev->vlan_filtering) 805 return true; 806 807 dp = dsa_to_port(ds, port); 808 809 if (dsa_port_is_cpu(dp)) 810 return true; 811 812 return dp->bridge == NULL; 813 } 814 815 int b53_configure_vlan(struct dsa_switch *ds) 816 { 817 struct b53_device *dev = ds->priv; 818 struct b53_vlan vl = { 0 }; 819 struct b53_vlan *v; 820 int i, def_vid; 821 u16 vid; 822 823 def_vid = b53_default_pvid(dev); 824 825 /* clear all vlan entries */ 826 if (is5325(dev) || is5365(dev)) { 827 for (i = def_vid; i < dev->num_vlans; i++) 828 b53_set_vlan_entry(dev, i, &vl); 829 } else { 830 b53_do_vlan_op(dev, VTA_CMD_CLEAR); 831 } 832 833 b53_enable_vlan(dev, -1, dev->vlan_enabled, dev->vlan_filtering); 834 835 /* Create an untagged VLAN entry for the default PVID in case 836 * CONFIG_VLAN_8021Q is disabled and there are no calls to 837 * dsa_user_vlan_rx_add_vid() to create the default VLAN 838 * entry. Do this only when the tagging protocol is not 839 * DSA_TAG_PROTO_NONE 840 */ 841 v = &dev->vlans[def_vid]; 842 b53_for_each_port(dev, i) { 843 if (!b53_vlan_port_may_join_untagged(ds, i)) 844 continue; 845 846 vl.members |= BIT(i); 847 if (!b53_vlan_port_needs_forced_tagged(ds, i)) 848 vl.untag = vl.members; 849 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(i), 850 def_vid); 851 } 852 b53_set_vlan_entry(dev, def_vid, &vl); 853 854 if (dev->vlan_filtering) { 855 /* Upon initial call we have not set-up any VLANs, but upon 856 * system resume, we need to restore all VLAN entries. 857 */ 858 for (vid = def_vid + 1; vid < dev->num_vlans; vid++) { 859 v = &dev->vlans[vid]; 860 861 if (!v->members) 862 continue; 863 864 b53_set_vlan_entry(dev, vid, v); 865 b53_fast_age_vlan(dev, vid); 866 } 867 868 b53_for_each_port(dev, i) { 869 if (!dsa_is_cpu_port(ds, i)) 870 b53_write16(dev, B53_VLAN_PAGE, 871 B53_VLAN_PORT_DEF_TAG(i), 872 dev->ports[i].pvid); 873 } 874 } 875 876 return 0; 877 } 878 EXPORT_SYMBOL(b53_configure_vlan); 879 880 static void b53_switch_reset_gpio(struct b53_device *dev) 881 { 882 int gpio = dev->reset_gpio; 883 884 if (gpio < 0) 885 return; 886 887 /* Reset sequence: RESET low(50ms)->high(20ms) 888 */ 889 gpio_set_value(gpio, 0); 890 mdelay(50); 891 892 gpio_set_value(gpio, 1); 893 mdelay(20); 894 895 dev->current_page = 0xff; 896 } 897 898 static int b53_switch_reset(struct b53_device *dev) 899 { 900 unsigned int timeout = 1000; 901 u8 mgmt, reg; 902 903 b53_switch_reset_gpio(dev); 904 905 if (is539x(dev)) { 906 b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, 0x83); 907 b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, 0x00); 908 } 909 910 /* This is specific to 58xx devices here, do not use is58xx() which 911 * covers the larger Starfigther 2 family, including 7445/7278 which 912 * still use this driver as a library and need to perform the reset 913 * earlier. 914 */ 915 if (dev->chip_id == BCM58XX_DEVICE_ID || 916 dev->chip_id == BCM583XX_DEVICE_ID) { 917 b53_read8(dev, B53_CTRL_PAGE, B53_SOFTRESET, ®); 918 reg |= SW_RST | EN_SW_RST | EN_CH_RST; 919 b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, reg); 920 921 do { 922 b53_read8(dev, B53_CTRL_PAGE, B53_SOFTRESET, ®); 923 if (!(reg & SW_RST)) 924 break; 925 926 usleep_range(1000, 2000); 927 } while (timeout-- > 0); 928 929 if (timeout == 0) { 930 dev_err(dev->dev, 931 "Timeout waiting for SW_RST to clear!\n"); 932 return -ETIMEDOUT; 933 } 934 } 935 936 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt); 937 938 if (!(mgmt & SM_SW_FWD_EN)) { 939 mgmt &= ~SM_SW_FWD_MODE; 940 mgmt |= SM_SW_FWD_EN; 941 942 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt); 943 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt); 944 945 if (!(mgmt & SM_SW_FWD_EN)) { 946 dev_err(dev->dev, "Failed to enable switch!\n"); 947 return -EINVAL; 948 } 949 } 950 951 b53_enable_mib(dev); 952 b53_enable_stp(dev); 953 954 return b53_flush_arl(dev, FAST_AGE_STATIC); 955 } 956 957 static int b53_phy_read16(struct dsa_switch *ds, int addr, int reg) 958 { 959 struct b53_device *priv = ds->priv; 960 u16 value = 0; 961 int ret; 962 963 if (priv->ops->phy_read16) 964 ret = priv->ops->phy_read16(priv, addr, reg, &value); 965 else 966 ret = b53_read16(priv, B53_PORT_MII_PAGE(addr), 967 reg * 2, &value); 968 969 return ret ? ret : value; 970 } 971 972 static int b53_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val) 973 { 974 struct b53_device *priv = ds->priv; 975 976 if (priv->ops->phy_write16) 977 return priv->ops->phy_write16(priv, addr, reg, val); 978 979 return b53_write16(priv, B53_PORT_MII_PAGE(addr), reg * 2, val); 980 } 981 982 static int b53_reset_switch(struct b53_device *priv) 983 { 984 /* reset vlans */ 985 memset(priv->vlans, 0, sizeof(*priv->vlans) * priv->num_vlans); 986 memset(priv->ports, 0, sizeof(*priv->ports) * priv->num_ports); 987 988 priv->serdes_lane = B53_INVALID_LANE; 989 990 return b53_switch_reset(priv); 991 } 992 993 static int b53_apply_config(struct b53_device *priv) 994 { 995 /* disable switching */ 996 b53_set_forwarding(priv, 0); 997 998 b53_configure_vlan(priv->ds); 999 1000 /* enable switching */ 1001 b53_set_forwarding(priv, 1); 1002 1003 return 0; 1004 } 1005 1006 static void b53_reset_mib(struct b53_device *priv) 1007 { 1008 u8 gc; 1009 1010 b53_read8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc); 1011 1012 b53_write8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc | GC_RESET_MIB); 1013 msleep(1); 1014 b53_write8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc & ~GC_RESET_MIB); 1015 msleep(1); 1016 } 1017 1018 static const struct b53_mib_desc *b53_get_mib(struct b53_device *dev) 1019 { 1020 if (is5365(dev)) 1021 return b53_mibs_65; 1022 else if (is63xx(dev)) 1023 return b53_mibs_63xx; 1024 else if (is58xx(dev)) 1025 return b53_mibs_58xx; 1026 else 1027 return b53_mibs; 1028 } 1029 1030 static unsigned int b53_get_mib_size(struct b53_device *dev) 1031 { 1032 if (is5365(dev)) 1033 return B53_MIBS_65_SIZE; 1034 else if (is63xx(dev)) 1035 return B53_MIBS_63XX_SIZE; 1036 else if (is58xx(dev)) 1037 return B53_MIBS_58XX_SIZE; 1038 else 1039 return B53_MIBS_SIZE; 1040 } 1041 1042 static struct phy_device *b53_get_phy_device(struct dsa_switch *ds, int port) 1043 { 1044 /* These ports typically do not have built-in PHYs */ 1045 switch (port) { 1046 case B53_CPU_PORT_25: 1047 case 7: 1048 case B53_CPU_PORT: 1049 return NULL; 1050 } 1051 1052 return mdiobus_get_phy(ds->user_mii_bus, port); 1053 } 1054 1055 void b53_get_strings(struct dsa_switch *ds, int port, u32 stringset, 1056 uint8_t *data) 1057 { 1058 struct b53_device *dev = ds->priv; 1059 const struct b53_mib_desc *mibs = b53_get_mib(dev); 1060 unsigned int mib_size = b53_get_mib_size(dev); 1061 struct phy_device *phydev; 1062 unsigned int i; 1063 1064 if (stringset == ETH_SS_STATS) { 1065 for (i = 0; i < mib_size; i++) 1066 ethtool_puts(&data, mibs[i].name); 1067 } else if (stringset == ETH_SS_PHY_STATS) { 1068 phydev = b53_get_phy_device(ds, port); 1069 if (!phydev) 1070 return; 1071 1072 phy_ethtool_get_strings(phydev, data); 1073 } 1074 } 1075 EXPORT_SYMBOL(b53_get_strings); 1076 1077 void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data) 1078 { 1079 struct b53_device *dev = ds->priv; 1080 const struct b53_mib_desc *mibs = b53_get_mib(dev); 1081 unsigned int mib_size = b53_get_mib_size(dev); 1082 const struct b53_mib_desc *s; 1083 unsigned int i; 1084 u64 val = 0; 1085 1086 if (is5365(dev) && port == 5) 1087 port = 8; 1088 1089 mutex_lock(&dev->stats_mutex); 1090 1091 for (i = 0; i < mib_size; i++) { 1092 s = &mibs[i]; 1093 1094 if (s->size == 8) { 1095 b53_read64(dev, B53_MIB_PAGE(port), s->offset, &val); 1096 } else { 1097 u32 val32; 1098 1099 b53_read32(dev, B53_MIB_PAGE(port), s->offset, 1100 &val32); 1101 val = val32; 1102 } 1103 data[i] = (u64)val; 1104 } 1105 1106 mutex_unlock(&dev->stats_mutex); 1107 } 1108 EXPORT_SYMBOL(b53_get_ethtool_stats); 1109 1110 void b53_get_ethtool_phy_stats(struct dsa_switch *ds, int port, uint64_t *data) 1111 { 1112 struct phy_device *phydev; 1113 1114 phydev = b53_get_phy_device(ds, port); 1115 if (!phydev) 1116 return; 1117 1118 phy_ethtool_get_stats(phydev, NULL, data); 1119 } 1120 EXPORT_SYMBOL(b53_get_ethtool_phy_stats); 1121 1122 int b53_get_sset_count(struct dsa_switch *ds, int port, int sset) 1123 { 1124 struct b53_device *dev = ds->priv; 1125 struct phy_device *phydev; 1126 1127 if (sset == ETH_SS_STATS) { 1128 return b53_get_mib_size(dev); 1129 } else if (sset == ETH_SS_PHY_STATS) { 1130 phydev = b53_get_phy_device(ds, port); 1131 if (!phydev) 1132 return 0; 1133 1134 return phy_ethtool_get_sset_count(phydev); 1135 } 1136 1137 return 0; 1138 } 1139 EXPORT_SYMBOL(b53_get_sset_count); 1140 1141 enum b53_devlink_resource_id { 1142 B53_DEVLINK_PARAM_ID_VLAN_TABLE, 1143 }; 1144 1145 static u64 b53_devlink_vlan_table_get(void *priv) 1146 { 1147 struct b53_device *dev = priv; 1148 struct b53_vlan *vl; 1149 unsigned int i; 1150 u64 count = 0; 1151 1152 for (i = 0; i < dev->num_vlans; i++) { 1153 vl = &dev->vlans[i]; 1154 if (vl->members) 1155 count++; 1156 } 1157 1158 return count; 1159 } 1160 1161 int b53_setup_devlink_resources(struct dsa_switch *ds) 1162 { 1163 struct devlink_resource_size_params size_params; 1164 struct b53_device *dev = ds->priv; 1165 int err; 1166 1167 devlink_resource_size_params_init(&size_params, dev->num_vlans, 1168 dev->num_vlans, 1169 1, DEVLINK_RESOURCE_UNIT_ENTRY); 1170 1171 err = dsa_devlink_resource_register(ds, "VLAN", dev->num_vlans, 1172 B53_DEVLINK_PARAM_ID_VLAN_TABLE, 1173 DEVLINK_RESOURCE_ID_PARENT_TOP, 1174 &size_params); 1175 if (err) 1176 goto out; 1177 1178 dsa_devlink_resource_occ_get_register(ds, 1179 B53_DEVLINK_PARAM_ID_VLAN_TABLE, 1180 b53_devlink_vlan_table_get, dev); 1181 1182 return 0; 1183 out: 1184 dsa_devlink_resources_unregister(ds); 1185 return err; 1186 } 1187 EXPORT_SYMBOL(b53_setup_devlink_resources); 1188 1189 static int b53_setup(struct dsa_switch *ds) 1190 { 1191 struct b53_device *dev = ds->priv; 1192 struct b53_vlan *vl; 1193 unsigned int port; 1194 u16 pvid; 1195 int ret; 1196 1197 /* Request bridge PVID untagged when DSA_TAG_PROTO_NONE is set 1198 * which forces the CPU port to be tagged in all VLANs. 1199 */ 1200 ds->untag_bridge_pvid = dev->tag_protocol == DSA_TAG_PROTO_NONE; 1201 1202 /* The switch does not tell us the original VLAN for untagged 1203 * packets, so keep the CPU port always tagged. 1204 */ 1205 ds->untag_vlan_aware_bridge_pvid = true; 1206 1207 /* Ageing time is set in seconds */ 1208 ds->ageing_time_min = 1 * 1000; 1209 ds->ageing_time_max = AGE_TIME_MAX * 1000; 1210 1211 ret = b53_reset_switch(dev); 1212 if (ret) { 1213 dev_err(ds->dev, "failed to reset switch\n"); 1214 return ret; 1215 } 1216 1217 /* setup default vlan for filtering mode */ 1218 pvid = b53_default_pvid(dev); 1219 vl = &dev->vlans[pvid]; 1220 b53_for_each_port(dev, port) { 1221 vl->members |= BIT(port); 1222 if (!b53_vlan_port_needs_forced_tagged(ds, port)) 1223 vl->untag |= BIT(port); 1224 } 1225 1226 b53_reset_mib(dev); 1227 1228 ret = b53_apply_config(dev); 1229 if (ret) { 1230 dev_err(ds->dev, "failed to apply configuration\n"); 1231 return ret; 1232 } 1233 1234 /* Configure IMP/CPU port, disable all other ports. Enabled 1235 * ports will be configured with .port_enable 1236 */ 1237 for (port = 0; port < dev->num_ports; port++) { 1238 if (dsa_is_cpu_port(ds, port)) 1239 b53_enable_cpu_port(dev, port); 1240 else 1241 b53_disable_port(ds, port); 1242 } 1243 1244 return b53_setup_devlink_resources(ds); 1245 } 1246 1247 static void b53_teardown(struct dsa_switch *ds) 1248 { 1249 dsa_devlink_resources_unregister(ds); 1250 } 1251 1252 static void b53_force_link(struct b53_device *dev, int port, int link) 1253 { 1254 u8 reg, val, off; 1255 1256 /* Override the port settings */ 1257 if (port == dev->imp_port) { 1258 off = B53_PORT_OVERRIDE_CTRL; 1259 val = PORT_OVERRIDE_EN; 1260 } else { 1261 off = B53_GMII_PORT_OVERRIDE_CTRL(port); 1262 val = GMII_PO_EN; 1263 } 1264 1265 b53_read8(dev, B53_CTRL_PAGE, off, ®); 1266 reg |= val; 1267 if (link) 1268 reg |= PORT_OVERRIDE_LINK; 1269 else 1270 reg &= ~PORT_OVERRIDE_LINK; 1271 b53_write8(dev, B53_CTRL_PAGE, off, reg); 1272 } 1273 1274 static void b53_force_port_config(struct b53_device *dev, int port, 1275 int speed, int duplex, 1276 bool tx_pause, bool rx_pause) 1277 { 1278 u8 reg, val, off; 1279 1280 /* Override the port settings */ 1281 if (port == dev->imp_port) { 1282 off = B53_PORT_OVERRIDE_CTRL; 1283 val = PORT_OVERRIDE_EN; 1284 } else { 1285 off = B53_GMII_PORT_OVERRIDE_CTRL(port); 1286 val = GMII_PO_EN; 1287 } 1288 1289 b53_read8(dev, B53_CTRL_PAGE, off, ®); 1290 reg |= val; 1291 if (duplex == DUPLEX_FULL) 1292 reg |= PORT_OVERRIDE_FULL_DUPLEX; 1293 else 1294 reg &= ~PORT_OVERRIDE_FULL_DUPLEX; 1295 1296 switch (speed) { 1297 case 2000: 1298 reg |= PORT_OVERRIDE_SPEED_2000M; 1299 fallthrough; 1300 case SPEED_1000: 1301 reg |= PORT_OVERRIDE_SPEED_1000M; 1302 break; 1303 case SPEED_100: 1304 reg |= PORT_OVERRIDE_SPEED_100M; 1305 break; 1306 case SPEED_10: 1307 reg |= PORT_OVERRIDE_SPEED_10M; 1308 break; 1309 default: 1310 dev_err(dev->dev, "unknown speed: %d\n", speed); 1311 return; 1312 } 1313 1314 if (rx_pause) 1315 reg |= PORT_OVERRIDE_RX_FLOW; 1316 if (tx_pause) 1317 reg |= PORT_OVERRIDE_TX_FLOW; 1318 1319 b53_write8(dev, B53_CTRL_PAGE, off, reg); 1320 } 1321 1322 static void b53_adjust_63xx_rgmii(struct dsa_switch *ds, int port, 1323 phy_interface_t interface) 1324 { 1325 struct b53_device *dev = ds->priv; 1326 u8 rgmii_ctrl = 0; 1327 1328 b53_read8(dev, B53_CTRL_PAGE, B53_RGMII_CTRL_P(port), &rgmii_ctrl); 1329 rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC); 1330 1331 if (is63268(dev)) 1332 rgmii_ctrl |= RGMII_CTRL_MII_OVERRIDE; 1333 1334 rgmii_ctrl |= RGMII_CTRL_ENABLE_GMII; 1335 1336 b53_write8(dev, B53_CTRL_PAGE, B53_RGMII_CTRL_P(port), rgmii_ctrl); 1337 1338 dev_dbg(ds->dev, "Configured port %d for %s\n", port, 1339 phy_modes(interface)); 1340 } 1341 1342 static void b53_adjust_531x5_rgmii(struct dsa_switch *ds, int port, 1343 phy_interface_t interface) 1344 { 1345 struct b53_device *dev = ds->priv; 1346 u8 rgmii_ctrl = 0, off; 1347 1348 if (port == dev->imp_port) 1349 off = B53_RGMII_CTRL_IMP; 1350 else 1351 off = B53_RGMII_CTRL_P(port); 1352 1353 /* Configure the port RGMII clock delay by DLL disabled and 1354 * tx_clk aligned timing (restoring to reset defaults) 1355 */ 1356 b53_read8(dev, B53_CTRL_PAGE, off, &rgmii_ctrl); 1357 rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC); 1358 1359 /* PHY_INTERFACE_MODE_RGMII_TXID means TX internal delay, make 1360 * sure that we enable the port TX clock internal delay to 1361 * account for this internal delay that is inserted, otherwise 1362 * the switch won't be able to receive correctly. 1363 * 1364 * PHY_INTERFACE_MODE_RGMII means that we are not introducing 1365 * any delay neither on transmission nor reception, so the 1366 * BCM53125 must also be configured accordingly to account for 1367 * the lack of delay and introduce 1368 * 1369 * The BCM53125 switch has its RX clock and TX clock control 1370 * swapped, hence the reason why we modify the TX clock path in 1371 * the "RGMII" case 1372 */ 1373 if (interface == PHY_INTERFACE_MODE_RGMII_TXID) 1374 rgmii_ctrl |= RGMII_CTRL_DLL_TXC; 1375 if (interface == PHY_INTERFACE_MODE_RGMII) 1376 rgmii_ctrl |= RGMII_CTRL_DLL_TXC | RGMII_CTRL_DLL_RXC; 1377 1378 if (dev->chip_id != BCM53115_DEVICE_ID) 1379 rgmii_ctrl |= RGMII_CTRL_TIMING_SEL; 1380 1381 b53_write8(dev, B53_CTRL_PAGE, off, rgmii_ctrl); 1382 1383 dev_info(ds->dev, "Configured port %d for %s\n", port, 1384 phy_modes(interface)); 1385 } 1386 1387 static void b53_adjust_5325_mii(struct dsa_switch *ds, int port) 1388 { 1389 struct b53_device *dev = ds->priv; 1390 u8 reg = 0; 1391 1392 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL, 1393 ®); 1394 1395 /* reverse mii needs to be enabled */ 1396 if (!(reg & PORT_OVERRIDE_RV_MII_25)) { 1397 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL, 1398 reg | PORT_OVERRIDE_RV_MII_25); 1399 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL, 1400 ®); 1401 1402 if (!(reg & PORT_OVERRIDE_RV_MII_25)) { 1403 dev_err(ds->dev, 1404 "Failed to enable reverse MII mode\n"); 1405 return; 1406 } 1407 } 1408 } 1409 1410 void b53_port_event(struct dsa_switch *ds, int port) 1411 { 1412 struct b53_device *dev = ds->priv; 1413 bool link; 1414 u16 sts; 1415 1416 b53_read16(dev, B53_STAT_PAGE, B53_LINK_STAT, &sts); 1417 link = !!(sts & BIT(port)); 1418 dsa_port_phylink_mac_change(ds, port, link); 1419 } 1420 EXPORT_SYMBOL(b53_port_event); 1421 1422 static void b53_phylink_get_caps(struct dsa_switch *ds, int port, 1423 struct phylink_config *config) 1424 { 1425 struct b53_device *dev = ds->priv; 1426 1427 /* Internal ports need GMII for PHYLIB */ 1428 __set_bit(PHY_INTERFACE_MODE_GMII, config->supported_interfaces); 1429 1430 /* These switches appear to support MII and RevMII too, but beyond 1431 * this, the code gives very few clues. FIXME: We probably need more 1432 * interface modes here. 1433 * 1434 * According to b53_srab_mux_init(), ports 3..5 can support: 1435 * SGMII, MII, GMII, RGMII or INTERNAL depending on the MUX setting. 1436 * However, the interface mode read from the MUX configuration is 1437 * not passed back to DSA, so phylink uses NA. 1438 * DT can specify RGMII for ports 0, 1. 1439 * For MDIO, port 8 can be RGMII_TXID. 1440 */ 1441 __set_bit(PHY_INTERFACE_MODE_MII, config->supported_interfaces); 1442 __set_bit(PHY_INTERFACE_MODE_REVMII, config->supported_interfaces); 1443 1444 /* BCM63xx RGMII ports support RGMII */ 1445 if (is63xx(dev) && in_range(port, B53_63XX_RGMII0, 4)) 1446 phy_interface_set_rgmii(config->supported_interfaces); 1447 1448 config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | 1449 MAC_10 | MAC_100; 1450 1451 /* 5325/5365 are not capable of gigabit speeds, everything else is. 1452 * Note: the original code also exclulded Gigagbit for MII, RevMII 1453 * and 802.3z modes. MII and RevMII are not able to work above 100M, 1454 * so will be excluded by the generic validator implementation. 1455 * However, the exclusion of Gigabit for 802.3z just seems wrong. 1456 */ 1457 if (!(is5325(dev) || is5365(dev))) 1458 config->mac_capabilities |= MAC_1000; 1459 1460 /* Get the implementation specific capabilities */ 1461 if (dev->ops->phylink_get_caps) 1462 dev->ops->phylink_get_caps(dev, port, config); 1463 } 1464 1465 static struct phylink_pcs *b53_phylink_mac_select_pcs(struct phylink_config *config, 1466 phy_interface_t interface) 1467 { 1468 struct dsa_port *dp = dsa_phylink_to_port(config); 1469 struct b53_device *dev = dp->ds->priv; 1470 1471 if (!dev->ops->phylink_mac_select_pcs) 1472 return NULL; 1473 1474 return dev->ops->phylink_mac_select_pcs(dev, dp->index, interface); 1475 } 1476 1477 static void b53_phylink_mac_config(struct phylink_config *config, 1478 unsigned int mode, 1479 const struct phylink_link_state *state) 1480 { 1481 struct dsa_port *dp = dsa_phylink_to_port(config); 1482 phy_interface_t interface = state->interface; 1483 struct dsa_switch *ds = dp->ds; 1484 struct b53_device *dev = ds->priv; 1485 int port = dp->index; 1486 1487 if (is63xx(dev) && in_range(port, B53_63XX_RGMII0, 4)) 1488 b53_adjust_63xx_rgmii(ds, port, interface); 1489 1490 if (mode == MLO_AN_FIXED) { 1491 if (is531x5(dev) && phy_interface_mode_is_rgmii(interface)) 1492 b53_adjust_531x5_rgmii(ds, port, interface); 1493 1494 /* configure MII port if necessary */ 1495 if (is5325(dev)) 1496 b53_adjust_5325_mii(ds, port); 1497 } 1498 } 1499 1500 static void b53_phylink_mac_link_down(struct phylink_config *config, 1501 unsigned int mode, 1502 phy_interface_t interface) 1503 { 1504 struct dsa_port *dp = dsa_phylink_to_port(config); 1505 struct b53_device *dev = dp->ds->priv; 1506 int port = dp->index; 1507 1508 if (mode == MLO_AN_PHY) 1509 return; 1510 1511 if (mode == MLO_AN_FIXED) { 1512 b53_force_link(dev, port, false); 1513 return; 1514 } 1515 1516 if (phy_interface_mode_is_8023z(interface) && 1517 dev->ops->serdes_link_set) 1518 dev->ops->serdes_link_set(dev, port, mode, interface, false); 1519 } 1520 1521 static void b53_phylink_mac_link_up(struct phylink_config *config, 1522 struct phy_device *phydev, 1523 unsigned int mode, 1524 phy_interface_t interface, 1525 int speed, int duplex, 1526 bool tx_pause, bool rx_pause) 1527 { 1528 struct dsa_port *dp = dsa_phylink_to_port(config); 1529 struct dsa_switch *ds = dp->ds; 1530 struct b53_device *dev = ds->priv; 1531 struct ethtool_keee *p = &dev->ports[dp->index].eee; 1532 int port = dp->index; 1533 1534 if (mode == MLO_AN_PHY) { 1535 /* Re-negotiate EEE if it was enabled already */ 1536 p->eee_enabled = b53_eee_init(ds, port, phydev); 1537 return; 1538 } 1539 1540 if (mode == MLO_AN_FIXED) { 1541 /* Force flow control on BCM5301x's CPU port */ 1542 if (is5301x(dev) && dsa_is_cpu_port(ds, port)) 1543 tx_pause = rx_pause = true; 1544 1545 b53_force_port_config(dev, port, speed, duplex, 1546 tx_pause, rx_pause); 1547 b53_force_link(dev, port, true); 1548 return; 1549 } 1550 1551 if (phy_interface_mode_is_8023z(interface) && 1552 dev->ops->serdes_link_set) 1553 dev->ops->serdes_link_set(dev, port, mode, interface, true); 1554 } 1555 1556 int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering, 1557 struct netlink_ext_ack *extack) 1558 { 1559 struct b53_device *dev = ds->priv; 1560 1561 if (dev->vlan_filtering != vlan_filtering) { 1562 dev->vlan_filtering = vlan_filtering; 1563 b53_apply_config(dev); 1564 } 1565 1566 return 0; 1567 } 1568 EXPORT_SYMBOL(b53_vlan_filtering); 1569 1570 static int b53_vlan_prepare(struct dsa_switch *ds, int port, 1571 const struct switchdev_obj_port_vlan *vlan) 1572 { 1573 struct b53_device *dev = ds->priv; 1574 1575 if ((is5325(dev) || is5365(dev)) && vlan->vid == 0) 1576 return -EOPNOTSUPP; 1577 1578 /* Port 7 on 7278 connects to the ASP's UniMAC which is not capable of 1579 * receiving VLAN tagged frames at all, we can still allow the port to 1580 * be configured for egress untagged. 1581 */ 1582 if (dev->chip_id == BCM7278_DEVICE_ID && port == 7 && 1583 !(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED)) 1584 return -EINVAL; 1585 1586 if (vlan->vid >= dev->num_vlans) 1587 return -ERANGE; 1588 1589 b53_enable_vlan(dev, port, true, dev->vlan_filtering); 1590 1591 return 0; 1592 } 1593 1594 int b53_vlan_add(struct dsa_switch *ds, int port, 1595 const struct switchdev_obj_port_vlan *vlan, 1596 struct netlink_ext_ack *extack) 1597 { 1598 struct b53_device *dev = ds->priv; 1599 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; 1600 bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; 1601 struct b53_vlan *vl; 1602 u16 old_pvid, new_pvid; 1603 int err; 1604 1605 err = b53_vlan_prepare(ds, port, vlan); 1606 if (err) 1607 return err; 1608 1609 if (vlan->vid == 0) 1610 return 0; 1611 1612 old_pvid = dev->ports[port].pvid; 1613 if (pvid) 1614 new_pvid = vlan->vid; 1615 else if (!pvid && vlan->vid == old_pvid) 1616 new_pvid = b53_default_pvid(dev); 1617 else 1618 new_pvid = old_pvid; 1619 dev->ports[port].pvid = new_pvid; 1620 1621 vl = &dev->vlans[vlan->vid]; 1622 1623 if (dsa_is_cpu_port(ds, port)) 1624 untagged = false; 1625 1626 vl->members |= BIT(port); 1627 if (untagged && !b53_vlan_port_needs_forced_tagged(ds, port)) 1628 vl->untag |= BIT(port); 1629 else 1630 vl->untag &= ~BIT(port); 1631 1632 if (!dev->vlan_filtering) 1633 return 0; 1634 1635 b53_set_vlan_entry(dev, vlan->vid, vl); 1636 b53_fast_age_vlan(dev, vlan->vid); 1637 1638 if (!dsa_is_cpu_port(ds, port) && new_pvid != old_pvid) { 1639 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), 1640 new_pvid); 1641 b53_fast_age_vlan(dev, old_pvid); 1642 } 1643 1644 return 0; 1645 } 1646 EXPORT_SYMBOL(b53_vlan_add); 1647 1648 int b53_vlan_del(struct dsa_switch *ds, int port, 1649 const struct switchdev_obj_port_vlan *vlan) 1650 { 1651 struct b53_device *dev = ds->priv; 1652 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; 1653 struct b53_vlan *vl; 1654 u16 pvid; 1655 1656 if (vlan->vid == 0) 1657 return 0; 1658 1659 pvid = dev->ports[port].pvid; 1660 1661 vl = &dev->vlans[vlan->vid]; 1662 1663 vl->members &= ~BIT(port); 1664 1665 if (pvid == vlan->vid) 1666 pvid = b53_default_pvid(dev); 1667 dev->ports[port].pvid = pvid; 1668 1669 if (untagged && !b53_vlan_port_needs_forced_tagged(ds, port)) 1670 vl->untag &= ~(BIT(port)); 1671 1672 if (!dev->vlan_filtering) 1673 return 0; 1674 1675 b53_set_vlan_entry(dev, vlan->vid, vl); 1676 b53_fast_age_vlan(dev, vlan->vid); 1677 1678 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), pvid); 1679 b53_fast_age_vlan(dev, pvid); 1680 1681 return 0; 1682 } 1683 EXPORT_SYMBOL(b53_vlan_del); 1684 1685 /* Address Resolution Logic routines. Caller must hold &dev->arl_mutex. */ 1686 static int b53_arl_op_wait(struct b53_device *dev) 1687 { 1688 unsigned int timeout = 10; 1689 u8 reg; 1690 1691 do { 1692 b53_read8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, ®); 1693 if (!(reg & ARLTBL_START_DONE)) 1694 return 0; 1695 1696 usleep_range(1000, 2000); 1697 } while (timeout--); 1698 1699 dev_warn(dev->dev, "timeout waiting for ARL to finish: 0x%02x\n", reg); 1700 1701 return -ETIMEDOUT; 1702 } 1703 1704 static int b53_arl_rw_op(struct b53_device *dev, unsigned int op) 1705 { 1706 u8 reg; 1707 1708 if (op > ARLTBL_RW) 1709 return -EINVAL; 1710 1711 b53_read8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, ®); 1712 reg |= ARLTBL_START_DONE; 1713 if (op) 1714 reg |= ARLTBL_RW; 1715 else 1716 reg &= ~ARLTBL_RW; 1717 if (dev->vlan_enabled) 1718 reg &= ~ARLTBL_IVL_SVL_SELECT; 1719 else 1720 reg |= ARLTBL_IVL_SVL_SELECT; 1721 b53_write8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, reg); 1722 1723 return b53_arl_op_wait(dev); 1724 } 1725 1726 static int b53_arl_read(struct b53_device *dev, u64 mac, 1727 u16 vid, struct b53_arl_entry *ent, u8 *idx) 1728 { 1729 DECLARE_BITMAP(free_bins, B53_ARLTBL_MAX_BIN_ENTRIES); 1730 unsigned int i; 1731 int ret; 1732 1733 ret = b53_arl_op_wait(dev); 1734 if (ret) 1735 return ret; 1736 1737 bitmap_zero(free_bins, dev->num_arl_bins); 1738 1739 /* Read the bins */ 1740 for (i = 0; i < dev->num_arl_bins; i++) { 1741 u64 mac_vid; 1742 u32 fwd_entry; 1743 1744 b53_read64(dev, B53_ARLIO_PAGE, 1745 B53_ARLTBL_MAC_VID_ENTRY(i), &mac_vid); 1746 b53_read32(dev, B53_ARLIO_PAGE, 1747 B53_ARLTBL_DATA_ENTRY(i), &fwd_entry); 1748 b53_arl_to_entry(ent, mac_vid, fwd_entry); 1749 1750 if (!(fwd_entry & ARLTBL_VALID)) { 1751 set_bit(i, free_bins); 1752 continue; 1753 } 1754 if ((mac_vid & ARLTBL_MAC_MASK) != mac) 1755 continue; 1756 if (dev->vlan_enabled && 1757 ((mac_vid >> ARLTBL_VID_S) & ARLTBL_VID_MASK) != vid) 1758 continue; 1759 *idx = i; 1760 return 0; 1761 } 1762 1763 *idx = find_first_bit(free_bins, dev->num_arl_bins); 1764 return *idx >= dev->num_arl_bins ? -ENOSPC : -ENOENT; 1765 } 1766 1767 static int b53_arl_op(struct b53_device *dev, int op, int port, 1768 const unsigned char *addr, u16 vid, bool is_valid) 1769 { 1770 struct b53_arl_entry ent; 1771 u32 fwd_entry; 1772 u64 mac, mac_vid = 0; 1773 u8 idx = 0; 1774 int ret; 1775 1776 /* Convert the array into a 64-bit MAC */ 1777 mac = ether_addr_to_u64(addr); 1778 1779 /* Perform a read for the given MAC and VID */ 1780 b53_write48(dev, B53_ARLIO_PAGE, B53_MAC_ADDR_IDX, mac); 1781 b53_write16(dev, B53_ARLIO_PAGE, B53_VLAN_ID_IDX, vid); 1782 1783 /* Issue a read operation for this MAC */ 1784 ret = b53_arl_rw_op(dev, 1); 1785 if (ret) 1786 return ret; 1787 1788 ret = b53_arl_read(dev, mac, vid, &ent, &idx); 1789 1790 /* If this is a read, just finish now */ 1791 if (op) 1792 return ret; 1793 1794 switch (ret) { 1795 case -ETIMEDOUT: 1796 return ret; 1797 case -ENOSPC: 1798 dev_dbg(dev->dev, "{%pM,%.4d} no space left in ARL\n", 1799 addr, vid); 1800 return is_valid ? ret : 0; 1801 case -ENOENT: 1802 /* We could not find a matching MAC, so reset to a new entry */ 1803 dev_dbg(dev->dev, "{%pM,%.4d} not found, using idx: %d\n", 1804 addr, vid, idx); 1805 fwd_entry = 0; 1806 break; 1807 default: 1808 dev_dbg(dev->dev, "{%pM,%.4d} found, using idx: %d\n", 1809 addr, vid, idx); 1810 break; 1811 } 1812 1813 /* For multicast address, the port is a bitmask and the validity 1814 * is determined by having at least one port being still active 1815 */ 1816 if (!is_multicast_ether_addr(addr)) { 1817 ent.port = port; 1818 ent.is_valid = is_valid; 1819 } else { 1820 if (is_valid) 1821 ent.port |= BIT(port); 1822 else 1823 ent.port &= ~BIT(port); 1824 1825 ent.is_valid = !!(ent.port); 1826 } 1827 1828 ent.vid = vid; 1829 ent.is_static = true; 1830 ent.is_age = false; 1831 memcpy(ent.mac, addr, ETH_ALEN); 1832 b53_arl_from_entry(&mac_vid, &fwd_entry, &ent); 1833 1834 b53_write64(dev, B53_ARLIO_PAGE, 1835 B53_ARLTBL_MAC_VID_ENTRY(idx), mac_vid); 1836 b53_write32(dev, B53_ARLIO_PAGE, 1837 B53_ARLTBL_DATA_ENTRY(idx), fwd_entry); 1838 1839 return b53_arl_rw_op(dev, 0); 1840 } 1841 1842 int b53_fdb_add(struct dsa_switch *ds, int port, 1843 const unsigned char *addr, u16 vid, 1844 struct dsa_db db) 1845 { 1846 struct b53_device *priv = ds->priv; 1847 int ret; 1848 1849 /* 5325 and 5365 require some more massaging, but could 1850 * be supported eventually 1851 */ 1852 if (is5325(priv) || is5365(priv)) 1853 return -EOPNOTSUPP; 1854 1855 mutex_lock(&priv->arl_mutex); 1856 ret = b53_arl_op(priv, 0, port, addr, vid, true); 1857 mutex_unlock(&priv->arl_mutex); 1858 1859 return ret; 1860 } 1861 EXPORT_SYMBOL(b53_fdb_add); 1862 1863 int b53_fdb_del(struct dsa_switch *ds, int port, 1864 const unsigned char *addr, u16 vid, 1865 struct dsa_db db) 1866 { 1867 struct b53_device *priv = ds->priv; 1868 int ret; 1869 1870 mutex_lock(&priv->arl_mutex); 1871 ret = b53_arl_op(priv, 0, port, addr, vid, false); 1872 mutex_unlock(&priv->arl_mutex); 1873 1874 return ret; 1875 } 1876 EXPORT_SYMBOL(b53_fdb_del); 1877 1878 static int b53_arl_search_wait(struct b53_device *dev) 1879 { 1880 unsigned int timeout = 1000; 1881 u8 reg; 1882 1883 do { 1884 b53_read8(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_CTL, ®); 1885 if (!(reg & ARL_SRCH_STDN)) 1886 return 0; 1887 1888 if (reg & ARL_SRCH_VLID) 1889 return 0; 1890 1891 usleep_range(1000, 2000); 1892 } while (timeout--); 1893 1894 return -ETIMEDOUT; 1895 } 1896 1897 static void b53_arl_search_rd(struct b53_device *dev, u8 idx, 1898 struct b53_arl_entry *ent) 1899 { 1900 u64 mac_vid; 1901 u32 fwd_entry; 1902 1903 b53_read64(dev, B53_ARLIO_PAGE, 1904 B53_ARL_SRCH_RSTL_MACVID(idx), &mac_vid); 1905 b53_read32(dev, B53_ARLIO_PAGE, 1906 B53_ARL_SRCH_RSTL(idx), &fwd_entry); 1907 b53_arl_to_entry(ent, mac_vid, fwd_entry); 1908 } 1909 1910 static int b53_fdb_copy(int port, const struct b53_arl_entry *ent, 1911 dsa_fdb_dump_cb_t *cb, void *data) 1912 { 1913 if (!ent->is_valid) 1914 return 0; 1915 1916 if (port != ent->port) 1917 return 0; 1918 1919 return cb(ent->mac, ent->vid, ent->is_static, data); 1920 } 1921 1922 int b53_fdb_dump(struct dsa_switch *ds, int port, 1923 dsa_fdb_dump_cb_t *cb, void *data) 1924 { 1925 struct b53_device *priv = ds->priv; 1926 struct b53_arl_entry results[2]; 1927 unsigned int count = 0; 1928 int ret; 1929 u8 reg; 1930 1931 mutex_lock(&priv->arl_mutex); 1932 1933 /* Start search operation */ 1934 reg = ARL_SRCH_STDN; 1935 b53_write8(priv, B53_ARLIO_PAGE, B53_ARL_SRCH_CTL, reg); 1936 1937 do { 1938 ret = b53_arl_search_wait(priv); 1939 if (ret) 1940 break; 1941 1942 b53_arl_search_rd(priv, 0, &results[0]); 1943 ret = b53_fdb_copy(port, &results[0], cb, data); 1944 if (ret) 1945 break; 1946 1947 if (priv->num_arl_bins > 2) { 1948 b53_arl_search_rd(priv, 1, &results[1]); 1949 ret = b53_fdb_copy(port, &results[1], cb, data); 1950 if (ret) 1951 break; 1952 1953 if (!results[0].is_valid && !results[1].is_valid) 1954 break; 1955 } 1956 1957 } while (count++ < b53_max_arl_entries(priv) / 2); 1958 1959 mutex_unlock(&priv->arl_mutex); 1960 1961 return 0; 1962 } 1963 EXPORT_SYMBOL(b53_fdb_dump); 1964 1965 int b53_mdb_add(struct dsa_switch *ds, int port, 1966 const struct switchdev_obj_port_mdb *mdb, 1967 struct dsa_db db) 1968 { 1969 struct b53_device *priv = ds->priv; 1970 int ret; 1971 1972 /* 5325 and 5365 require some more massaging, but could 1973 * be supported eventually 1974 */ 1975 if (is5325(priv) || is5365(priv)) 1976 return -EOPNOTSUPP; 1977 1978 mutex_lock(&priv->arl_mutex); 1979 ret = b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, true); 1980 mutex_unlock(&priv->arl_mutex); 1981 1982 return ret; 1983 } 1984 EXPORT_SYMBOL(b53_mdb_add); 1985 1986 int b53_mdb_del(struct dsa_switch *ds, int port, 1987 const struct switchdev_obj_port_mdb *mdb, 1988 struct dsa_db db) 1989 { 1990 struct b53_device *priv = ds->priv; 1991 int ret; 1992 1993 mutex_lock(&priv->arl_mutex); 1994 ret = b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, false); 1995 mutex_unlock(&priv->arl_mutex); 1996 if (ret) 1997 dev_err(ds->dev, "failed to delete MDB entry\n"); 1998 1999 return ret; 2000 } 2001 EXPORT_SYMBOL(b53_mdb_del); 2002 2003 int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge, 2004 bool *tx_fwd_offload, struct netlink_ext_ack *extack) 2005 { 2006 struct b53_device *dev = ds->priv; 2007 struct b53_vlan *vl; 2008 s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index; 2009 u16 pvlan, reg, pvid; 2010 unsigned int i; 2011 2012 /* On 7278, port 7 which connects to the ASP should only receive 2013 * traffic from matching CFP rules. 2014 */ 2015 if (dev->chip_id == BCM7278_DEVICE_ID && port == 7) 2016 return -EINVAL; 2017 2018 pvid = b53_default_pvid(dev); 2019 vl = &dev->vlans[pvid]; 2020 2021 if (dev->vlan_filtering) { 2022 /* Make this port leave the all VLANs join since we will have 2023 * proper VLAN entries from now on 2024 */ 2025 if (is58xx(dev)) { 2026 b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, 2027 ®); 2028 reg &= ~BIT(port); 2029 if ((reg & BIT(cpu_port)) == BIT(cpu_port)) 2030 reg &= ~BIT(cpu_port); 2031 b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, 2032 reg); 2033 } 2034 2035 b53_get_vlan_entry(dev, pvid, vl); 2036 vl->members &= ~BIT(port); 2037 b53_set_vlan_entry(dev, pvid, vl); 2038 } 2039 2040 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan); 2041 2042 b53_for_each_port(dev, i) { 2043 if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge)) 2044 continue; 2045 2046 /* Add this local port to the remote port VLAN control 2047 * membership and update the remote port bitmask 2048 */ 2049 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), ®); 2050 reg |= BIT(port); 2051 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), reg); 2052 dev->ports[i].vlan_ctl_mask = reg; 2053 2054 pvlan |= BIT(i); 2055 } 2056 2057 /* Disable redirection of unknown SA to the CPU port */ 2058 b53_set_eap_mode(dev, port, EAP_MODE_BASIC); 2059 2060 /* Configure the local port VLAN control membership to include 2061 * remote ports and update the local port bitmask 2062 */ 2063 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan); 2064 dev->ports[port].vlan_ctl_mask = pvlan; 2065 2066 return 0; 2067 } 2068 EXPORT_SYMBOL(b53_br_join); 2069 2070 void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge) 2071 { 2072 struct b53_device *dev = ds->priv; 2073 struct b53_vlan *vl; 2074 s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index; 2075 unsigned int i; 2076 u16 pvlan, reg, pvid; 2077 2078 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan); 2079 2080 b53_for_each_port(dev, i) { 2081 /* Don't touch the remaining ports */ 2082 if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge)) 2083 continue; 2084 2085 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), ®); 2086 reg &= ~BIT(port); 2087 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), reg); 2088 dev->ports[port].vlan_ctl_mask = reg; 2089 2090 /* Prevent self removal to preserve isolation */ 2091 if (port != i) 2092 pvlan &= ~BIT(i); 2093 } 2094 2095 /* Enable redirection of unknown SA to the CPU port */ 2096 b53_set_eap_mode(dev, port, EAP_MODE_SIMPLIFIED); 2097 2098 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan); 2099 dev->ports[port].vlan_ctl_mask = pvlan; 2100 2101 pvid = b53_default_pvid(dev); 2102 vl = &dev->vlans[pvid]; 2103 2104 if (dev->vlan_filtering) { 2105 /* Make this port join all VLANs without VLAN entries */ 2106 if (is58xx(dev)) { 2107 b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, ®); 2108 reg |= BIT(port); 2109 if (!(reg & BIT(cpu_port))) 2110 reg |= BIT(cpu_port); 2111 b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg); 2112 } 2113 2114 b53_get_vlan_entry(dev, pvid, vl); 2115 vl->members |= BIT(port); 2116 b53_set_vlan_entry(dev, pvid, vl); 2117 } 2118 } 2119 EXPORT_SYMBOL(b53_br_leave); 2120 2121 void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state) 2122 { 2123 struct b53_device *dev = ds->priv; 2124 u8 hw_state; 2125 u8 reg; 2126 2127 switch (state) { 2128 case BR_STATE_DISABLED: 2129 hw_state = PORT_CTRL_DIS_STATE; 2130 break; 2131 case BR_STATE_LISTENING: 2132 hw_state = PORT_CTRL_LISTEN_STATE; 2133 break; 2134 case BR_STATE_LEARNING: 2135 hw_state = PORT_CTRL_LEARN_STATE; 2136 break; 2137 case BR_STATE_FORWARDING: 2138 hw_state = PORT_CTRL_FWD_STATE; 2139 break; 2140 case BR_STATE_BLOCKING: 2141 hw_state = PORT_CTRL_BLOCK_STATE; 2142 break; 2143 default: 2144 dev_err(ds->dev, "invalid STP state: %d\n", state); 2145 return; 2146 } 2147 2148 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), ®); 2149 reg &= ~PORT_CTRL_STP_STATE_MASK; 2150 reg |= hw_state; 2151 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg); 2152 } 2153 EXPORT_SYMBOL(b53_br_set_stp_state); 2154 2155 void b53_br_fast_age(struct dsa_switch *ds, int port) 2156 { 2157 struct b53_device *dev = ds->priv; 2158 2159 if (b53_fast_age_port(dev, port)) 2160 dev_err(ds->dev, "fast ageing failed\n"); 2161 } 2162 EXPORT_SYMBOL(b53_br_fast_age); 2163 2164 int b53_br_flags_pre(struct dsa_switch *ds, int port, 2165 struct switchdev_brport_flags flags, 2166 struct netlink_ext_ack *extack) 2167 { 2168 if (flags.mask & ~(BR_FLOOD | BR_MCAST_FLOOD | BR_LEARNING)) 2169 return -EINVAL; 2170 2171 return 0; 2172 } 2173 EXPORT_SYMBOL(b53_br_flags_pre); 2174 2175 int b53_br_flags(struct dsa_switch *ds, int port, 2176 struct switchdev_brport_flags flags, 2177 struct netlink_ext_ack *extack) 2178 { 2179 if (flags.mask & BR_FLOOD) 2180 b53_port_set_ucast_flood(ds->priv, port, 2181 !!(flags.val & BR_FLOOD)); 2182 if (flags.mask & BR_MCAST_FLOOD) 2183 b53_port_set_mcast_flood(ds->priv, port, 2184 !!(flags.val & BR_MCAST_FLOOD)); 2185 if (flags.mask & BR_LEARNING) 2186 b53_port_set_learning(ds->priv, port, 2187 !!(flags.val & BR_LEARNING)); 2188 2189 return 0; 2190 } 2191 EXPORT_SYMBOL(b53_br_flags); 2192 2193 static bool b53_possible_cpu_port(struct dsa_switch *ds, int port) 2194 { 2195 /* Broadcom switches will accept enabling Broadcom tags on the 2196 * following ports: 5, 7 and 8, any other port is not supported 2197 */ 2198 switch (port) { 2199 case B53_CPU_PORT_25: 2200 case 7: 2201 case B53_CPU_PORT: 2202 return true; 2203 } 2204 2205 return false; 2206 } 2207 2208 static bool b53_can_enable_brcm_tags(struct dsa_switch *ds, int port, 2209 enum dsa_tag_protocol tag_protocol) 2210 { 2211 bool ret = b53_possible_cpu_port(ds, port); 2212 2213 if (!ret) { 2214 dev_warn(ds->dev, "Port %d is not Broadcom tag capable\n", 2215 port); 2216 return ret; 2217 } 2218 2219 switch (tag_protocol) { 2220 case DSA_TAG_PROTO_BRCM: 2221 case DSA_TAG_PROTO_BRCM_PREPEND: 2222 dev_warn(ds->dev, 2223 "Port %d is stacked to Broadcom tag switch\n", port); 2224 ret = false; 2225 break; 2226 default: 2227 ret = true; 2228 break; 2229 } 2230 2231 return ret; 2232 } 2233 2234 enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port, 2235 enum dsa_tag_protocol mprot) 2236 { 2237 struct b53_device *dev = ds->priv; 2238 2239 if (!b53_can_enable_brcm_tags(ds, port, mprot)) { 2240 dev->tag_protocol = DSA_TAG_PROTO_NONE; 2241 goto out; 2242 } 2243 2244 /* Older models require a different 6 byte tag */ 2245 if (is5325(dev) || is5365(dev) || is63xx(dev)) { 2246 dev->tag_protocol = DSA_TAG_PROTO_BRCM_LEGACY; 2247 goto out; 2248 } 2249 2250 /* Broadcom BCM58xx chips have a flow accelerator on Port 8 2251 * which requires us to use the prepended Broadcom tag type 2252 */ 2253 if (dev->chip_id == BCM58XX_DEVICE_ID && port == B53_CPU_PORT) { 2254 dev->tag_protocol = DSA_TAG_PROTO_BRCM_PREPEND; 2255 goto out; 2256 } 2257 2258 dev->tag_protocol = DSA_TAG_PROTO_BRCM; 2259 out: 2260 return dev->tag_protocol; 2261 } 2262 EXPORT_SYMBOL(b53_get_tag_protocol); 2263 2264 int b53_mirror_add(struct dsa_switch *ds, int port, 2265 struct dsa_mall_mirror_tc_entry *mirror, bool ingress, 2266 struct netlink_ext_ack *extack) 2267 { 2268 struct b53_device *dev = ds->priv; 2269 u16 reg, loc; 2270 2271 if (ingress) 2272 loc = B53_IG_MIR_CTL; 2273 else 2274 loc = B53_EG_MIR_CTL; 2275 2276 b53_read16(dev, B53_MGMT_PAGE, loc, ®); 2277 reg |= BIT(port); 2278 b53_write16(dev, B53_MGMT_PAGE, loc, reg); 2279 2280 b53_read16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, ®); 2281 reg &= ~CAP_PORT_MASK; 2282 reg |= mirror->to_local_port; 2283 reg |= MIRROR_EN; 2284 b53_write16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, reg); 2285 2286 return 0; 2287 } 2288 EXPORT_SYMBOL(b53_mirror_add); 2289 2290 void b53_mirror_del(struct dsa_switch *ds, int port, 2291 struct dsa_mall_mirror_tc_entry *mirror) 2292 { 2293 struct b53_device *dev = ds->priv; 2294 bool loc_disable = false, other_loc_disable = false; 2295 u16 reg, loc; 2296 2297 if (mirror->ingress) 2298 loc = B53_IG_MIR_CTL; 2299 else 2300 loc = B53_EG_MIR_CTL; 2301 2302 /* Update the desired ingress/egress register */ 2303 b53_read16(dev, B53_MGMT_PAGE, loc, ®); 2304 reg &= ~BIT(port); 2305 if (!(reg & MIRROR_MASK)) 2306 loc_disable = true; 2307 b53_write16(dev, B53_MGMT_PAGE, loc, reg); 2308 2309 /* Now look at the other one to know if we can disable mirroring 2310 * entirely 2311 */ 2312 if (mirror->ingress) 2313 b53_read16(dev, B53_MGMT_PAGE, B53_EG_MIR_CTL, ®); 2314 else 2315 b53_read16(dev, B53_MGMT_PAGE, B53_IG_MIR_CTL, ®); 2316 if (!(reg & MIRROR_MASK)) 2317 other_loc_disable = true; 2318 2319 b53_read16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, ®); 2320 /* Both no longer have ports, let's disable mirroring */ 2321 if (loc_disable && other_loc_disable) { 2322 reg &= ~MIRROR_EN; 2323 reg &= ~mirror->to_local_port; 2324 } 2325 b53_write16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, reg); 2326 } 2327 EXPORT_SYMBOL(b53_mirror_del); 2328 2329 /* Returns 0 if EEE was not enabled, or 1 otherwise 2330 */ 2331 int b53_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy) 2332 { 2333 int ret; 2334 2335 if (!b53_support_eee(ds, port)) 2336 return 0; 2337 2338 ret = phy_init_eee(phy, false); 2339 if (ret) 2340 return 0; 2341 2342 b53_eee_enable_set(ds, port, true); 2343 2344 return 1; 2345 } 2346 EXPORT_SYMBOL(b53_eee_init); 2347 2348 bool b53_support_eee(struct dsa_switch *ds, int port) 2349 { 2350 struct b53_device *dev = ds->priv; 2351 2352 return !is5325(dev) && !is5365(dev) && !is63xx(dev); 2353 } 2354 EXPORT_SYMBOL(b53_support_eee); 2355 2356 int b53_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e) 2357 { 2358 struct b53_device *dev = ds->priv; 2359 struct ethtool_keee *p = &dev->ports[port].eee; 2360 2361 p->eee_enabled = e->eee_enabled; 2362 b53_eee_enable_set(ds, port, e->eee_enabled); 2363 2364 return 0; 2365 } 2366 EXPORT_SYMBOL(b53_set_mac_eee); 2367 2368 static int b53_change_mtu(struct dsa_switch *ds, int port, int mtu) 2369 { 2370 struct b53_device *dev = ds->priv; 2371 bool enable_jumbo; 2372 bool allow_10_100; 2373 2374 if (is5325(dev) || is5365(dev)) 2375 return 0; 2376 2377 if (!dsa_is_cpu_port(ds, port)) 2378 return 0; 2379 2380 enable_jumbo = (mtu > ETH_DATA_LEN); 2381 allow_10_100 = !is63xx(dev); 2382 2383 return b53_set_jumbo(dev, enable_jumbo, allow_10_100); 2384 } 2385 2386 static int b53_get_max_mtu(struct dsa_switch *ds, int port) 2387 { 2388 struct b53_device *dev = ds->priv; 2389 2390 if (is5325(dev) || is5365(dev)) 2391 return B53_MAX_MTU_25; 2392 2393 return B53_MAX_MTU; 2394 } 2395 2396 int b53_set_ageing_time(struct dsa_switch *ds, unsigned int msecs) 2397 { 2398 struct b53_device *dev = ds->priv; 2399 u32 atc; 2400 int reg; 2401 2402 if (is63xx(dev)) 2403 reg = B53_AGING_TIME_CONTROL_63XX; 2404 else 2405 reg = B53_AGING_TIME_CONTROL; 2406 2407 atc = DIV_ROUND_CLOSEST(msecs, 1000); 2408 2409 if (!is5325(dev) && !is5365(dev)) 2410 atc |= AGE_CHANGE; 2411 2412 b53_write32(dev, B53_MGMT_PAGE, reg, atc); 2413 2414 return 0; 2415 } 2416 EXPORT_SYMBOL_GPL(b53_set_ageing_time); 2417 2418 static const struct phylink_mac_ops b53_phylink_mac_ops = { 2419 .mac_select_pcs = b53_phylink_mac_select_pcs, 2420 .mac_config = b53_phylink_mac_config, 2421 .mac_link_down = b53_phylink_mac_link_down, 2422 .mac_link_up = b53_phylink_mac_link_up, 2423 }; 2424 2425 static const struct dsa_switch_ops b53_switch_ops = { 2426 .get_tag_protocol = b53_get_tag_protocol, 2427 .setup = b53_setup, 2428 .teardown = b53_teardown, 2429 .get_strings = b53_get_strings, 2430 .get_ethtool_stats = b53_get_ethtool_stats, 2431 .get_sset_count = b53_get_sset_count, 2432 .get_ethtool_phy_stats = b53_get_ethtool_phy_stats, 2433 .phy_read = b53_phy_read16, 2434 .phy_write = b53_phy_write16, 2435 .phylink_get_caps = b53_phylink_get_caps, 2436 .port_setup = b53_setup_port, 2437 .port_enable = b53_enable_port, 2438 .port_disable = b53_disable_port, 2439 .support_eee = b53_support_eee, 2440 .set_mac_eee = b53_set_mac_eee, 2441 .set_ageing_time = b53_set_ageing_time, 2442 .port_bridge_join = b53_br_join, 2443 .port_bridge_leave = b53_br_leave, 2444 .port_pre_bridge_flags = b53_br_flags_pre, 2445 .port_bridge_flags = b53_br_flags, 2446 .port_stp_state_set = b53_br_set_stp_state, 2447 .port_fast_age = b53_br_fast_age, 2448 .port_vlan_filtering = b53_vlan_filtering, 2449 .port_vlan_add = b53_vlan_add, 2450 .port_vlan_del = b53_vlan_del, 2451 .port_fdb_dump = b53_fdb_dump, 2452 .port_fdb_add = b53_fdb_add, 2453 .port_fdb_del = b53_fdb_del, 2454 .port_mirror_add = b53_mirror_add, 2455 .port_mirror_del = b53_mirror_del, 2456 .port_mdb_add = b53_mdb_add, 2457 .port_mdb_del = b53_mdb_del, 2458 .port_max_mtu = b53_get_max_mtu, 2459 .port_change_mtu = b53_change_mtu, 2460 }; 2461 2462 struct b53_chip_data { 2463 u32 chip_id; 2464 const char *dev_name; 2465 u16 vlans; 2466 u16 enabled_ports; 2467 u8 imp_port; 2468 u8 cpu_port; 2469 u8 vta_regs[3]; 2470 u8 arl_bins; 2471 u16 arl_buckets; 2472 u8 duplex_reg; 2473 u8 jumbo_pm_reg; 2474 u8 jumbo_size_reg; 2475 }; 2476 2477 #define B53_VTA_REGS \ 2478 { B53_VT_ACCESS, B53_VT_INDEX, B53_VT_ENTRY } 2479 #define B53_VTA_REGS_9798 \ 2480 { B53_VT_ACCESS_9798, B53_VT_INDEX_9798, B53_VT_ENTRY_9798 } 2481 #define B53_VTA_REGS_63XX \ 2482 { B53_VT_ACCESS_63XX, B53_VT_INDEX_63XX, B53_VT_ENTRY_63XX } 2483 2484 static const struct b53_chip_data b53_switch_chips[] = { 2485 { 2486 .chip_id = BCM5325_DEVICE_ID, 2487 .dev_name = "BCM5325", 2488 .vlans = 16, 2489 .enabled_ports = 0x3f, 2490 .arl_bins = 2, 2491 .arl_buckets = 1024, 2492 .imp_port = 5, 2493 .duplex_reg = B53_DUPLEX_STAT_FE, 2494 }, 2495 { 2496 .chip_id = BCM5365_DEVICE_ID, 2497 .dev_name = "BCM5365", 2498 .vlans = 256, 2499 .enabled_ports = 0x3f, 2500 .arl_bins = 2, 2501 .arl_buckets = 1024, 2502 .imp_port = 5, 2503 .duplex_reg = B53_DUPLEX_STAT_FE, 2504 }, 2505 { 2506 .chip_id = BCM5389_DEVICE_ID, 2507 .dev_name = "BCM5389", 2508 .vlans = 4096, 2509 .enabled_ports = 0x11f, 2510 .arl_bins = 4, 2511 .arl_buckets = 1024, 2512 .imp_port = 8, 2513 .vta_regs = B53_VTA_REGS, 2514 .duplex_reg = B53_DUPLEX_STAT_GE, 2515 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2516 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2517 }, 2518 { 2519 .chip_id = BCM5395_DEVICE_ID, 2520 .dev_name = "BCM5395", 2521 .vlans = 4096, 2522 .enabled_ports = 0x11f, 2523 .arl_bins = 4, 2524 .arl_buckets = 1024, 2525 .imp_port = 8, 2526 .vta_regs = B53_VTA_REGS, 2527 .duplex_reg = B53_DUPLEX_STAT_GE, 2528 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2529 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2530 }, 2531 { 2532 .chip_id = BCM5397_DEVICE_ID, 2533 .dev_name = "BCM5397", 2534 .vlans = 4096, 2535 .enabled_ports = 0x11f, 2536 .arl_bins = 4, 2537 .arl_buckets = 1024, 2538 .imp_port = 8, 2539 .vta_regs = B53_VTA_REGS_9798, 2540 .duplex_reg = B53_DUPLEX_STAT_GE, 2541 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2542 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2543 }, 2544 { 2545 .chip_id = BCM5398_DEVICE_ID, 2546 .dev_name = "BCM5398", 2547 .vlans = 4096, 2548 .enabled_ports = 0x17f, 2549 .arl_bins = 4, 2550 .arl_buckets = 1024, 2551 .imp_port = 8, 2552 .vta_regs = B53_VTA_REGS_9798, 2553 .duplex_reg = B53_DUPLEX_STAT_GE, 2554 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2555 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2556 }, 2557 { 2558 .chip_id = BCM53101_DEVICE_ID, 2559 .dev_name = "BCM53101", 2560 .vlans = 4096, 2561 .enabled_ports = 0x11f, 2562 .arl_bins = 4, 2563 .arl_buckets = 512, 2564 .vta_regs = B53_VTA_REGS, 2565 .imp_port = 8, 2566 .duplex_reg = B53_DUPLEX_STAT_GE, 2567 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2568 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2569 }, 2570 { 2571 .chip_id = BCM53115_DEVICE_ID, 2572 .dev_name = "BCM53115", 2573 .vlans = 4096, 2574 .enabled_ports = 0x11f, 2575 .arl_bins = 4, 2576 .arl_buckets = 1024, 2577 .vta_regs = B53_VTA_REGS, 2578 .imp_port = 8, 2579 .duplex_reg = B53_DUPLEX_STAT_GE, 2580 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2581 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2582 }, 2583 { 2584 .chip_id = BCM53125_DEVICE_ID, 2585 .dev_name = "BCM53125", 2586 .vlans = 4096, 2587 .enabled_ports = 0x1ff, 2588 .arl_bins = 4, 2589 .arl_buckets = 1024, 2590 .imp_port = 8, 2591 .vta_regs = B53_VTA_REGS, 2592 .duplex_reg = B53_DUPLEX_STAT_GE, 2593 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2594 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2595 }, 2596 { 2597 .chip_id = BCM53128_DEVICE_ID, 2598 .dev_name = "BCM53128", 2599 .vlans = 4096, 2600 .enabled_ports = 0x1ff, 2601 .arl_bins = 4, 2602 .arl_buckets = 1024, 2603 .imp_port = 8, 2604 .vta_regs = B53_VTA_REGS, 2605 .duplex_reg = B53_DUPLEX_STAT_GE, 2606 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2607 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2608 }, 2609 { 2610 .chip_id = BCM63XX_DEVICE_ID, 2611 .dev_name = "BCM63xx", 2612 .vlans = 4096, 2613 .enabled_ports = 0, /* pdata must provide them */ 2614 .arl_bins = 4, 2615 .arl_buckets = 1024, 2616 .imp_port = 8, 2617 .vta_regs = B53_VTA_REGS_63XX, 2618 .duplex_reg = B53_DUPLEX_STAT_63XX, 2619 .jumbo_pm_reg = B53_JUMBO_PORT_MASK_63XX, 2620 .jumbo_size_reg = B53_JUMBO_MAX_SIZE_63XX, 2621 }, 2622 { 2623 .chip_id = BCM63268_DEVICE_ID, 2624 .dev_name = "BCM63268", 2625 .vlans = 4096, 2626 .enabled_ports = 0, /* pdata must provide them */ 2627 .arl_bins = 4, 2628 .arl_buckets = 1024, 2629 .imp_port = 8, 2630 .vta_regs = B53_VTA_REGS_63XX, 2631 .duplex_reg = B53_DUPLEX_STAT_63XX, 2632 .jumbo_pm_reg = B53_JUMBO_PORT_MASK_63XX, 2633 .jumbo_size_reg = B53_JUMBO_MAX_SIZE_63XX, 2634 }, 2635 { 2636 .chip_id = BCM53010_DEVICE_ID, 2637 .dev_name = "BCM53010", 2638 .vlans = 4096, 2639 .enabled_ports = 0x1bf, 2640 .arl_bins = 4, 2641 .arl_buckets = 1024, 2642 .imp_port = 8, 2643 .vta_regs = B53_VTA_REGS, 2644 .duplex_reg = B53_DUPLEX_STAT_GE, 2645 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2646 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2647 }, 2648 { 2649 .chip_id = BCM53011_DEVICE_ID, 2650 .dev_name = "BCM53011", 2651 .vlans = 4096, 2652 .enabled_ports = 0x1bf, 2653 .arl_bins = 4, 2654 .arl_buckets = 1024, 2655 .imp_port = 8, 2656 .vta_regs = B53_VTA_REGS, 2657 .duplex_reg = B53_DUPLEX_STAT_GE, 2658 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2659 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2660 }, 2661 { 2662 .chip_id = BCM53012_DEVICE_ID, 2663 .dev_name = "BCM53012", 2664 .vlans = 4096, 2665 .enabled_ports = 0x1bf, 2666 .arl_bins = 4, 2667 .arl_buckets = 1024, 2668 .imp_port = 8, 2669 .vta_regs = B53_VTA_REGS, 2670 .duplex_reg = B53_DUPLEX_STAT_GE, 2671 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2672 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2673 }, 2674 { 2675 .chip_id = BCM53018_DEVICE_ID, 2676 .dev_name = "BCM53018", 2677 .vlans = 4096, 2678 .enabled_ports = 0x1bf, 2679 .arl_bins = 4, 2680 .arl_buckets = 1024, 2681 .imp_port = 8, 2682 .vta_regs = B53_VTA_REGS, 2683 .duplex_reg = B53_DUPLEX_STAT_GE, 2684 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2685 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2686 }, 2687 { 2688 .chip_id = BCM53019_DEVICE_ID, 2689 .dev_name = "BCM53019", 2690 .vlans = 4096, 2691 .enabled_ports = 0x1bf, 2692 .arl_bins = 4, 2693 .arl_buckets = 1024, 2694 .imp_port = 8, 2695 .vta_regs = B53_VTA_REGS, 2696 .duplex_reg = B53_DUPLEX_STAT_GE, 2697 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2698 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2699 }, 2700 { 2701 .chip_id = BCM58XX_DEVICE_ID, 2702 .dev_name = "BCM585xx/586xx/88312", 2703 .vlans = 4096, 2704 .enabled_ports = 0x1ff, 2705 .arl_bins = 4, 2706 .arl_buckets = 1024, 2707 .imp_port = 8, 2708 .vta_regs = B53_VTA_REGS, 2709 .duplex_reg = B53_DUPLEX_STAT_GE, 2710 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2711 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2712 }, 2713 { 2714 .chip_id = BCM583XX_DEVICE_ID, 2715 .dev_name = "BCM583xx/11360", 2716 .vlans = 4096, 2717 .enabled_ports = 0x103, 2718 .arl_bins = 4, 2719 .arl_buckets = 1024, 2720 .imp_port = 8, 2721 .vta_regs = B53_VTA_REGS, 2722 .duplex_reg = B53_DUPLEX_STAT_GE, 2723 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2724 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2725 }, 2726 /* Starfighter 2 */ 2727 { 2728 .chip_id = BCM4908_DEVICE_ID, 2729 .dev_name = "BCM4908", 2730 .vlans = 4096, 2731 .enabled_ports = 0x1bf, 2732 .arl_bins = 4, 2733 .arl_buckets = 256, 2734 .imp_port = 8, 2735 .vta_regs = B53_VTA_REGS, 2736 .duplex_reg = B53_DUPLEX_STAT_GE, 2737 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2738 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2739 }, 2740 { 2741 .chip_id = BCM7445_DEVICE_ID, 2742 .dev_name = "BCM7445", 2743 .vlans = 4096, 2744 .enabled_ports = 0x1ff, 2745 .arl_bins = 4, 2746 .arl_buckets = 1024, 2747 .imp_port = 8, 2748 .vta_regs = B53_VTA_REGS, 2749 .duplex_reg = B53_DUPLEX_STAT_GE, 2750 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2751 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2752 }, 2753 { 2754 .chip_id = BCM7278_DEVICE_ID, 2755 .dev_name = "BCM7278", 2756 .vlans = 4096, 2757 .enabled_ports = 0x1ff, 2758 .arl_bins = 4, 2759 .arl_buckets = 256, 2760 .imp_port = 8, 2761 .vta_regs = B53_VTA_REGS, 2762 .duplex_reg = B53_DUPLEX_STAT_GE, 2763 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2764 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2765 }, 2766 { 2767 .chip_id = BCM53134_DEVICE_ID, 2768 .dev_name = "BCM53134", 2769 .vlans = 4096, 2770 .enabled_ports = 0x12f, 2771 .imp_port = 8, 2772 .cpu_port = B53_CPU_PORT, 2773 .vta_regs = B53_VTA_REGS, 2774 .arl_bins = 4, 2775 .arl_buckets = 1024, 2776 .duplex_reg = B53_DUPLEX_STAT_GE, 2777 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2778 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2779 }, 2780 }; 2781 2782 static int b53_switch_init(struct b53_device *dev) 2783 { 2784 unsigned int i; 2785 int ret; 2786 2787 for (i = 0; i < ARRAY_SIZE(b53_switch_chips); i++) { 2788 const struct b53_chip_data *chip = &b53_switch_chips[i]; 2789 2790 if (chip->chip_id == dev->chip_id) { 2791 if (!dev->enabled_ports) 2792 dev->enabled_ports = chip->enabled_ports; 2793 dev->name = chip->dev_name; 2794 dev->duplex_reg = chip->duplex_reg; 2795 dev->vta_regs[0] = chip->vta_regs[0]; 2796 dev->vta_regs[1] = chip->vta_regs[1]; 2797 dev->vta_regs[2] = chip->vta_regs[2]; 2798 dev->jumbo_pm_reg = chip->jumbo_pm_reg; 2799 dev->imp_port = chip->imp_port; 2800 dev->num_vlans = chip->vlans; 2801 dev->num_arl_bins = chip->arl_bins; 2802 dev->num_arl_buckets = chip->arl_buckets; 2803 break; 2804 } 2805 } 2806 2807 /* check which BCM5325x version we have */ 2808 if (is5325(dev)) { 2809 u8 vc4; 2810 2811 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, &vc4); 2812 2813 /* check reserved bits */ 2814 switch (vc4 & 3) { 2815 case 1: 2816 /* BCM5325E */ 2817 break; 2818 case 3: 2819 /* BCM5325F - do not use port 4 */ 2820 dev->enabled_ports &= ~BIT(4); 2821 break; 2822 default: 2823 /* On the BCM47XX SoCs this is the supported internal switch.*/ 2824 #ifndef CONFIG_BCM47XX 2825 /* BCM5325M */ 2826 return -EINVAL; 2827 #else 2828 break; 2829 #endif 2830 } 2831 } 2832 2833 dev->num_ports = fls(dev->enabled_ports); 2834 2835 dev->ds->num_ports = min_t(unsigned int, dev->num_ports, DSA_MAX_PORTS); 2836 2837 /* Include non standard CPU port built-in PHYs to be probed */ 2838 if (is539x(dev) || is531x5(dev)) { 2839 for (i = 0; i < dev->num_ports; i++) { 2840 if (!(dev->ds->phys_mii_mask & BIT(i)) && 2841 !b53_possible_cpu_port(dev->ds, i)) 2842 dev->ds->phys_mii_mask |= BIT(i); 2843 } 2844 } 2845 2846 dev->ports = devm_kcalloc(dev->dev, 2847 dev->num_ports, sizeof(struct b53_port), 2848 GFP_KERNEL); 2849 if (!dev->ports) 2850 return -ENOMEM; 2851 2852 dev->vlans = devm_kcalloc(dev->dev, 2853 dev->num_vlans, sizeof(struct b53_vlan), 2854 GFP_KERNEL); 2855 if (!dev->vlans) 2856 return -ENOMEM; 2857 2858 dev->reset_gpio = b53_switch_get_reset_gpio(dev); 2859 if (dev->reset_gpio >= 0) { 2860 ret = devm_gpio_request_one(dev->dev, dev->reset_gpio, 2861 GPIOF_OUT_INIT_HIGH, "robo_reset"); 2862 if (ret) 2863 return ret; 2864 } 2865 2866 return 0; 2867 } 2868 2869 struct b53_device *b53_switch_alloc(struct device *base, 2870 const struct b53_io_ops *ops, 2871 void *priv) 2872 { 2873 struct dsa_switch *ds; 2874 struct b53_device *dev; 2875 2876 ds = devm_kzalloc(base, sizeof(*ds), GFP_KERNEL); 2877 if (!ds) 2878 return NULL; 2879 2880 ds->dev = base; 2881 2882 dev = devm_kzalloc(base, sizeof(*dev), GFP_KERNEL); 2883 if (!dev) 2884 return NULL; 2885 2886 ds->priv = dev; 2887 dev->dev = base; 2888 2889 dev->ds = ds; 2890 dev->priv = priv; 2891 dev->ops = ops; 2892 ds->ops = &b53_switch_ops; 2893 ds->phylink_mac_ops = &b53_phylink_mac_ops; 2894 dev->vlan_enabled = true; 2895 dev->vlan_filtering = false; 2896 /* Let DSA handle the case were multiple bridges span the same switch 2897 * device and different VLAN awareness settings are requested, which 2898 * would be breaking filtering semantics for any of the other bridge 2899 * devices. (not hardware supported) 2900 */ 2901 ds->vlan_filtering_is_global = true; 2902 2903 mutex_init(&dev->reg_mutex); 2904 mutex_init(&dev->stats_mutex); 2905 mutex_init(&dev->arl_mutex); 2906 2907 return dev; 2908 } 2909 EXPORT_SYMBOL(b53_switch_alloc); 2910 2911 int b53_switch_detect(struct b53_device *dev) 2912 { 2913 u32 id32; 2914 u16 tmp; 2915 u8 id8; 2916 int ret; 2917 2918 ret = b53_read8(dev, B53_MGMT_PAGE, B53_DEVICE_ID, &id8); 2919 if (ret) 2920 return ret; 2921 2922 switch (id8) { 2923 case 0: 2924 /* BCM5325 and BCM5365 do not have this register so reads 2925 * return 0. But the read operation did succeed, so assume this 2926 * is one of them. 2927 * 2928 * Next check if we can write to the 5325's VTA register; for 2929 * 5365 it is read only. 2930 */ 2931 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, 0xf); 2932 b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, &tmp); 2933 2934 if (tmp == 0xf) 2935 dev->chip_id = BCM5325_DEVICE_ID; 2936 else 2937 dev->chip_id = BCM5365_DEVICE_ID; 2938 break; 2939 case BCM5389_DEVICE_ID: 2940 case BCM5395_DEVICE_ID: 2941 case BCM5397_DEVICE_ID: 2942 case BCM5398_DEVICE_ID: 2943 dev->chip_id = id8; 2944 break; 2945 default: 2946 ret = b53_read32(dev, B53_MGMT_PAGE, B53_DEVICE_ID, &id32); 2947 if (ret) 2948 return ret; 2949 2950 switch (id32) { 2951 case BCM53101_DEVICE_ID: 2952 case BCM53115_DEVICE_ID: 2953 case BCM53125_DEVICE_ID: 2954 case BCM53128_DEVICE_ID: 2955 case BCM53010_DEVICE_ID: 2956 case BCM53011_DEVICE_ID: 2957 case BCM53012_DEVICE_ID: 2958 case BCM53018_DEVICE_ID: 2959 case BCM53019_DEVICE_ID: 2960 case BCM53134_DEVICE_ID: 2961 dev->chip_id = id32; 2962 break; 2963 default: 2964 dev_err(dev->dev, 2965 "unsupported switch detected (BCM53%02x/BCM%x)\n", 2966 id8, id32); 2967 return -ENODEV; 2968 } 2969 } 2970 2971 if (dev->chip_id == BCM5325_DEVICE_ID) 2972 return b53_read8(dev, B53_STAT_PAGE, B53_REV_ID_25, 2973 &dev->core_rev); 2974 else 2975 return b53_read8(dev, B53_MGMT_PAGE, B53_REV_ID, 2976 &dev->core_rev); 2977 } 2978 EXPORT_SYMBOL(b53_switch_detect); 2979 2980 int b53_switch_register(struct b53_device *dev) 2981 { 2982 int ret; 2983 2984 if (dev->pdata) { 2985 dev->chip_id = dev->pdata->chip_id; 2986 dev->enabled_ports = dev->pdata->enabled_ports; 2987 } 2988 2989 if (!dev->chip_id && b53_switch_detect(dev)) 2990 return -EINVAL; 2991 2992 ret = b53_switch_init(dev); 2993 if (ret) 2994 return ret; 2995 2996 dev_info(dev->dev, "found switch: %s, rev %i\n", 2997 dev->name, dev->core_rev); 2998 2999 return dsa_register_switch(dev->ds); 3000 } 3001 EXPORT_SYMBOL(b53_switch_register); 3002 3003 MODULE_AUTHOR("Jonas Gorski <jogo@openwrt.org>"); 3004 MODULE_DESCRIPTION("B53 switch library"); 3005 MODULE_LICENSE("Dual BSD/GPL"); 3006