1 // SPDX-License-Identifier: GPL-2.0 2 /* niu.c: Neptune ethernet driver. 3 * 4 * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net) 5 */ 6 7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8 9 #include <linux/module.h> 10 #include <linux/init.h> 11 #include <linux/interrupt.h> 12 #include <linux/pci.h> 13 #include <linux/dma-mapping.h> 14 #include <linux/netdevice.h> 15 #include <linux/ethtool.h> 16 #include <linux/etherdevice.h> 17 #include <linux/platform_device.h> 18 #include <linux/delay.h> 19 #include <linux/bitops.h> 20 #include <linux/mii.h> 21 #include <linux/if.h> 22 #include <linux/if_ether.h> 23 #include <linux/if_vlan.h> 24 #include <linux/ip.h> 25 #include <linux/in.h> 26 #include <linux/ipv6.h> 27 #include <linux/log2.h> 28 #include <linux/jiffies.h> 29 #include <linux/crc32.h> 30 #include <linux/list.h> 31 #include <linux/slab.h> 32 33 #include <linux/io.h> 34 #include <linux/of.h> 35 36 #include "niu.h" 37 38 /* This driver wants to store a link to a "next page" within the 39 * page struct itself by overloading the content of the "mapping" 40 * member. This is not expected by the page API, but does currently 41 * work. However, the randstruct plugin gets very bothered by this 42 * case because "mapping" (struct address_space) is randomized, so 43 * casts to/from it trigger warnings. Hide this by way of a union, 44 * to create a typed alias of "mapping", since that's how it is 45 * actually being used here. 46 */ 47 union niu_page { 48 struct page page; 49 struct { 50 unsigned long __flags; /* unused alias of "flags" */ 51 struct list_head __lru; /* unused alias of "lru" */ 52 struct page *next; /* alias of "mapping" */ 53 }; 54 }; 55 #define niu_next_page(p) container_of(p, union niu_page, page)->next 56 57 #define DRV_MODULE_NAME "niu" 58 #define DRV_MODULE_VERSION "1.1" 59 #define DRV_MODULE_RELDATE "Apr 22, 2010" 60 61 static char version[] = 62 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 63 64 MODULE_AUTHOR("David S. Miller <davem@davemloft.net>"); 65 MODULE_DESCRIPTION("NIU ethernet driver"); 66 MODULE_LICENSE("GPL"); 67 MODULE_VERSION(DRV_MODULE_VERSION); 68 69 #ifndef readq 70 static u64 readq(void __iomem *reg) 71 { 72 return ((u64) readl(reg)) | (((u64) readl(reg + 4UL)) << 32); 73 } 74 75 static void writeq(u64 val, void __iomem *reg) 76 { 77 writel(val & 0xffffffff, reg); 78 writel(val >> 32, reg + 0x4UL); 79 } 80 #endif 81 82 static const struct pci_device_id niu_pci_tbl[] = { 83 {PCI_DEVICE(PCI_VENDOR_ID_SUN, 0xabcd)}, 84 {} 85 }; 86 87 MODULE_DEVICE_TABLE(pci, niu_pci_tbl); 88 89 #define NIU_TX_TIMEOUT (5 * HZ) 90 91 #define nr64(reg) readq(np->regs + (reg)) 92 #define nw64(reg, val) writeq((val), np->regs + (reg)) 93 94 #define nr64_mac(reg) readq(np->mac_regs + (reg)) 95 #define nw64_mac(reg, val) writeq((val), np->mac_regs + (reg)) 96 97 #define nr64_ipp(reg) readq(np->regs + np->ipp_off + (reg)) 98 #define nw64_ipp(reg, val) writeq((val), np->regs + np->ipp_off + (reg)) 99 100 #define nr64_pcs(reg) readq(np->regs + np->pcs_off + (reg)) 101 #define nw64_pcs(reg, val) writeq((val), np->regs + np->pcs_off + (reg)) 102 103 #define nr64_xpcs(reg) readq(np->regs + np->xpcs_off + (reg)) 104 #define nw64_xpcs(reg, val) writeq((val), np->regs + np->xpcs_off + (reg)) 105 106 #define NIU_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) 107 108 static int niu_debug; 109 static int debug = -1; 110 module_param(debug, int, 0); 111 MODULE_PARM_DESC(debug, "NIU debug level"); 112 113 #define niu_lock_parent(np, flags) \ 114 spin_lock_irqsave(&np->parent->lock, flags) 115 #define niu_unlock_parent(np, flags) \ 116 spin_unlock_irqrestore(&np->parent->lock, flags) 117 118 static int serdes_init_10g_serdes(struct niu *np); 119 120 static int __niu_wait_bits_clear_mac(struct niu *np, unsigned long reg, 121 u64 bits, int limit, int delay) 122 { 123 while (--limit >= 0) { 124 u64 val = nr64_mac(reg); 125 126 if (!(val & bits)) 127 break; 128 udelay(delay); 129 } 130 if (limit < 0) 131 return -ENODEV; 132 return 0; 133 } 134 135 static int __niu_set_and_wait_clear_mac(struct niu *np, unsigned long reg, 136 u64 bits, int limit, int delay, 137 const char *reg_name) 138 { 139 int err; 140 141 nw64_mac(reg, bits); 142 err = __niu_wait_bits_clear_mac(np, reg, bits, limit, delay); 143 if (err) 144 netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n", 145 (unsigned long long)bits, reg_name, 146 (unsigned long long)nr64_mac(reg)); 147 return err; 148 } 149 150 #define niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \ 151 ({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \ 152 __niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \ 153 }) 154 155 static int __niu_wait_bits_clear_ipp(struct niu *np, unsigned long reg, 156 u64 bits, int limit, int delay) 157 { 158 while (--limit >= 0) { 159 u64 val = nr64_ipp(reg); 160 161 if (!(val & bits)) 162 break; 163 udelay(delay); 164 } 165 if (limit < 0) 166 return -ENODEV; 167 return 0; 168 } 169 170 static int __niu_set_and_wait_clear_ipp(struct niu *np, unsigned long reg, 171 u64 bits, int limit, int delay, 172 const char *reg_name) 173 { 174 int err; 175 u64 val; 176 177 val = nr64_ipp(reg); 178 val |= bits; 179 nw64_ipp(reg, val); 180 181 err = __niu_wait_bits_clear_ipp(np, reg, bits, limit, delay); 182 if (err) 183 netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n", 184 (unsigned long long)bits, reg_name, 185 (unsigned long long)nr64_ipp(reg)); 186 return err; 187 } 188 189 #define niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \ 190 ({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \ 191 __niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \ 192 }) 193 194 static int __niu_wait_bits_clear(struct niu *np, unsigned long reg, 195 u64 bits, int limit, int delay) 196 { 197 while (--limit >= 0) { 198 u64 val = nr64(reg); 199 200 if (!(val & bits)) 201 break; 202 udelay(delay); 203 } 204 if (limit < 0) 205 return -ENODEV; 206 return 0; 207 } 208 209 #define niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY) \ 210 ({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \ 211 __niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY); \ 212 }) 213 214 static int __niu_set_and_wait_clear(struct niu *np, unsigned long reg, 215 u64 bits, int limit, int delay, 216 const char *reg_name) 217 { 218 int err; 219 220 nw64(reg, bits); 221 err = __niu_wait_bits_clear(np, reg, bits, limit, delay); 222 if (err) 223 netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n", 224 (unsigned long long)bits, reg_name, 225 (unsigned long long)nr64(reg)); 226 return err; 227 } 228 229 #define niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \ 230 ({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \ 231 __niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \ 232 }) 233 234 static void niu_ldg_rearm(struct niu *np, struct niu_ldg *lp, int on) 235 { 236 u64 val = (u64) lp->timer; 237 238 if (on) 239 val |= LDG_IMGMT_ARM; 240 241 nw64(LDG_IMGMT(lp->ldg_num), val); 242 } 243 244 static int niu_ldn_irq_enable(struct niu *np, int ldn, int on) 245 { 246 unsigned long mask_reg, bits; 247 u64 val; 248 249 if (ldn < 0 || ldn > LDN_MAX) 250 return -EINVAL; 251 252 if (ldn < 64) { 253 mask_reg = LD_IM0(ldn); 254 bits = LD_IM0_MASK; 255 } else { 256 mask_reg = LD_IM1(ldn - 64); 257 bits = LD_IM1_MASK; 258 } 259 260 val = nr64(mask_reg); 261 if (on) 262 val &= ~bits; 263 else 264 val |= bits; 265 nw64(mask_reg, val); 266 267 return 0; 268 } 269 270 static int niu_enable_ldn_in_ldg(struct niu *np, struct niu_ldg *lp, int on) 271 { 272 struct niu_parent *parent = np->parent; 273 int i; 274 275 for (i = 0; i <= LDN_MAX; i++) { 276 int err; 277 278 if (parent->ldg_map[i] != lp->ldg_num) 279 continue; 280 281 err = niu_ldn_irq_enable(np, i, on); 282 if (err) 283 return err; 284 } 285 return 0; 286 } 287 288 static int niu_enable_interrupts(struct niu *np, int on) 289 { 290 int i; 291 292 for (i = 0; i < np->num_ldg; i++) { 293 struct niu_ldg *lp = &np->ldg[i]; 294 int err; 295 296 err = niu_enable_ldn_in_ldg(np, lp, on); 297 if (err) 298 return err; 299 } 300 for (i = 0; i < np->num_ldg; i++) 301 niu_ldg_rearm(np, &np->ldg[i], on); 302 303 return 0; 304 } 305 306 static u32 phy_encode(u32 type, int port) 307 { 308 return type << (port * 2); 309 } 310 311 static u32 phy_decode(u32 val, int port) 312 { 313 return (val >> (port * 2)) & PORT_TYPE_MASK; 314 } 315 316 static int mdio_wait(struct niu *np) 317 { 318 int limit = 1000; 319 u64 val; 320 321 while (--limit > 0) { 322 val = nr64(MIF_FRAME_OUTPUT); 323 if ((val >> MIF_FRAME_OUTPUT_TA_SHIFT) & 0x1) 324 return val & MIF_FRAME_OUTPUT_DATA; 325 326 udelay(10); 327 } 328 329 return -ENODEV; 330 } 331 332 static int mdio_read(struct niu *np, int port, int dev, int reg) 333 { 334 int err; 335 336 nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg)); 337 err = mdio_wait(np); 338 if (err < 0) 339 return err; 340 341 nw64(MIF_FRAME_OUTPUT, MDIO_READ_OP(port, dev)); 342 return mdio_wait(np); 343 } 344 345 static int mdio_write(struct niu *np, int port, int dev, int reg, int data) 346 { 347 int err; 348 349 nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg)); 350 err = mdio_wait(np); 351 if (err < 0) 352 return err; 353 354 nw64(MIF_FRAME_OUTPUT, MDIO_WRITE_OP(port, dev, data)); 355 err = mdio_wait(np); 356 if (err < 0) 357 return err; 358 359 return 0; 360 } 361 362 static int mii_read(struct niu *np, int port, int reg) 363 { 364 nw64(MIF_FRAME_OUTPUT, MII_READ_OP(port, reg)); 365 return mdio_wait(np); 366 } 367 368 static int mii_write(struct niu *np, int port, int reg, int data) 369 { 370 int err; 371 372 nw64(MIF_FRAME_OUTPUT, MII_WRITE_OP(port, reg, data)); 373 err = mdio_wait(np); 374 if (err < 0) 375 return err; 376 377 return 0; 378 } 379 380 static int esr2_set_tx_cfg(struct niu *np, unsigned long channel, u32 val) 381 { 382 int err; 383 384 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, 385 ESR2_TI_PLL_TX_CFG_L(channel), 386 val & 0xffff); 387 if (!err) 388 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, 389 ESR2_TI_PLL_TX_CFG_H(channel), 390 val >> 16); 391 return err; 392 } 393 394 static int esr2_set_rx_cfg(struct niu *np, unsigned long channel, u32 val) 395 { 396 int err; 397 398 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, 399 ESR2_TI_PLL_RX_CFG_L(channel), 400 val & 0xffff); 401 if (!err) 402 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, 403 ESR2_TI_PLL_RX_CFG_H(channel), 404 val >> 16); 405 return err; 406 } 407 408 /* Mode is always 10G fiber. */ 409 static int serdes_init_niu_10g_fiber(struct niu *np) 410 { 411 struct niu_link_config *lp = &np->link_config; 412 u32 tx_cfg, rx_cfg; 413 unsigned long i; 414 415 tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV); 416 rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT | 417 PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH | 418 PLL_RX_CFG_EQ_LP_ADAPTIVE); 419 420 if (lp->loopback_mode == LOOPBACK_PHY) { 421 u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS; 422 423 mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, 424 ESR2_TI_PLL_TEST_CFG_L, test_cfg); 425 426 tx_cfg |= PLL_TX_CFG_ENTEST; 427 rx_cfg |= PLL_RX_CFG_ENTEST; 428 } 429 430 /* Initialize all 4 lanes of the SERDES. */ 431 for (i = 0; i < 4; i++) { 432 int err = esr2_set_tx_cfg(np, i, tx_cfg); 433 if (err) 434 return err; 435 } 436 437 for (i = 0; i < 4; i++) { 438 int err = esr2_set_rx_cfg(np, i, rx_cfg); 439 if (err) 440 return err; 441 } 442 443 return 0; 444 } 445 446 static int serdes_init_niu_1g_serdes(struct niu *np) 447 { 448 struct niu_link_config *lp = &np->link_config; 449 u16 pll_cfg, pll_sts; 450 int max_retry = 100; 451 u64 sig, mask, val; 452 u32 tx_cfg, rx_cfg; 453 unsigned long i; 454 int err; 455 456 tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV | 457 PLL_TX_CFG_RATE_HALF); 458 rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT | 459 PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH | 460 PLL_RX_CFG_RATE_HALF); 461 462 if (np->port == 0) 463 rx_cfg |= PLL_RX_CFG_EQ_LP_ADAPTIVE; 464 465 if (lp->loopback_mode == LOOPBACK_PHY) { 466 u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS; 467 468 mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, 469 ESR2_TI_PLL_TEST_CFG_L, test_cfg); 470 471 tx_cfg |= PLL_TX_CFG_ENTEST; 472 rx_cfg |= PLL_RX_CFG_ENTEST; 473 } 474 475 /* Initialize PLL for 1G */ 476 pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_8X); 477 478 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, 479 ESR2_TI_PLL_CFG_L, pll_cfg); 480 if (err) { 481 netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n", 482 np->port, __func__); 483 return err; 484 } 485 486 pll_sts = PLL_CFG_ENPLL; 487 488 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, 489 ESR2_TI_PLL_STS_L, pll_sts); 490 if (err) { 491 netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n", 492 np->port, __func__); 493 return err; 494 } 495 496 udelay(200); 497 498 /* Initialize all 4 lanes of the SERDES. */ 499 for (i = 0; i < 4; i++) { 500 err = esr2_set_tx_cfg(np, i, tx_cfg); 501 if (err) 502 return err; 503 } 504 505 for (i = 0; i < 4; i++) { 506 err = esr2_set_rx_cfg(np, i, rx_cfg); 507 if (err) 508 return err; 509 } 510 511 switch (np->port) { 512 case 0: 513 val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0); 514 mask = val; 515 break; 516 517 case 1: 518 val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1); 519 mask = val; 520 break; 521 522 default: 523 return -EINVAL; 524 } 525 526 while (max_retry--) { 527 sig = nr64(ESR_INT_SIGNALS); 528 if ((sig & mask) == val) 529 break; 530 531 mdelay(500); 532 } 533 534 if ((sig & mask) != val) { 535 netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n", 536 np->port, (int)(sig & mask), (int)val); 537 return -ENODEV; 538 } 539 540 return 0; 541 } 542 543 static int serdes_init_niu_10g_serdes(struct niu *np) 544 { 545 struct niu_link_config *lp = &np->link_config; 546 u32 tx_cfg, rx_cfg, pll_cfg, pll_sts; 547 int max_retry = 100; 548 u64 sig, mask, val; 549 unsigned long i; 550 int err; 551 552 tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV); 553 rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT | 554 PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH | 555 PLL_RX_CFG_EQ_LP_ADAPTIVE); 556 557 if (lp->loopback_mode == LOOPBACK_PHY) { 558 u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS; 559 560 mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, 561 ESR2_TI_PLL_TEST_CFG_L, test_cfg); 562 563 tx_cfg |= PLL_TX_CFG_ENTEST; 564 rx_cfg |= PLL_RX_CFG_ENTEST; 565 } 566 567 /* Initialize PLL for 10G */ 568 pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_10X); 569 570 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, 571 ESR2_TI_PLL_CFG_L, pll_cfg & 0xffff); 572 if (err) { 573 netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n", 574 np->port, __func__); 575 return err; 576 } 577 578 pll_sts = PLL_CFG_ENPLL; 579 580 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, 581 ESR2_TI_PLL_STS_L, pll_sts & 0xffff); 582 if (err) { 583 netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n", 584 np->port, __func__); 585 return err; 586 } 587 588 udelay(200); 589 590 /* Initialize all 4 lanes of the SERDES. */ 591 for (i = 0; i < 4; i++) { 592 err = esr2_set_tx_cfg(np, i, tx_cfg); 593 if (err) 594 return err; 595 } 596 597 for (i = 0; i < 4; i++) { 598 err = esr2_set_rx_cfg(np, i, rx_cfg); 599 if (err) 600 return err; 601 } 602 603 /* check if serdes is ready */ 604 605 switch (np->port) { 606 case 0: 607 mask = ESR_INT_SIGNALS_P0_BITS; 608 val = (ESR_INT_SRDY0_P0 | 609 ESR_INT_DET0_P0 | 610 ESR_INT_XSRDY_P0 | 611 ESR_INT_XDP_P0_CH3 | 612 ESR_INT_XDP_P0_CH2 | 613 ESR_INT_XDP_P0_CH1 | 614 ESR_INT_XDP_P0_CH0); 615 break; 616 617 case 1: 618 mask = ESR_INT_SIGNALS_P1_BITS; 619 val = (ESR_INT_SRDY0_P1 | 620 ESR_INT_DET0_P1 | 621 ESR_INT_XSRDY_P1 | 622 ESR_INT_XDP_P1_CH3 | 623 ESR_INT_XDP_P1_CH2 | 624 ESR_INT_XDP_P1_CH1 | 625 ESR_INT_XDP_P1_CH0); 626 break; 627 628 default: 629 return -EINVAL; 630 } 631 632 while (max_retry--) { 633 sig = nr64(ESR_INT_SIGNALS); 634 if ((sig & mask) == val) 635 break; 636 637 mdelay(500); 638 } 639 640 if ((sig & mask) != val) { 641 pr_info("NIU Port %u signal bits [%08x] are not [%08x] for 10G...trying 1G\n", 642 np->port, (int)(sig & mask), (int)val); 643 644 /* 10G failed, try initializing at 1G */ 645 err = serdes_init_niu_1g_serdes(np); 646 if (!err) { 647 np->flags &= ~NIU_FLAGS_10G; 648 np->mac_xcvr = MAC_XCVR_PCS; 649 } else { 650 netdev_err(np->dev, "Port %u 10G/1G SERDES Link Failed\n", 651 np->port); 652 return -ENODEV; 653 } 654 } 655 return 0; 656 } 657 658 static int esr_read_rxtx_ctrl(struct niu *np, unsigned long chan, u32 *val) 659 { 660 int err; 661 662 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, ESR_RXTX_CTRL_L(chan)); 663 if (err >= 0) { 664 *val = (err & 0xffff); 665 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, 666 ESR_RXTX_CTRL_H(chan)); 667 if (err >= 0) 668 *val |= ((err & 0xffff) << 16); 669 err = 0; 670 } 671 return err; 672 } 673 674 static int esr_read_glue0(struct niu *np, unsigned long chan, u32 *val) 675 { 676 int err; 677 678 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, 679 ESR_GLUE_CTRL0_L(chan)); 680 if (err >= 0) { 681 *val = (err & 0xffff); 682 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, 683 ESR_GLUE_CTRL0_H(chan)); 684 if (err >= 0) { 685 *val |= ((err & 0xffff) << 16); 686 err = 0; 687 } 688 } 689 return err; 690 } 691 692 static int esr_read_reset(struct niu *np, u32 *val) 693 { 694 int err; 695 696 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, 697 ESR_RXTX_RESET_CTRL_L); 698 if (err >= 0) { 699 *val = (err & 0xffff); 700 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, 701 ESR_RXTX_RESET_CTRL_H); 702 if (err >= 0) { 703 *val |= ((err & 0xffff) << 16); 704 err = 0; 705 } 706 } 707 return err; 708 } 709 710 static int esr_write_rxtx_ctrl(struct niu *np, unsigned long chan, u32 val) 711 { 712 int err; 713 714 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, 715 ESR_RXTX_CTRL_L(chan), val & 0xffff); 716 if (!err) 717 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, 718 ESR_RXTX_CTRL_H(chan), (val >> 16)); 719 return err; 720 } 721 722 static int esr_write_glue0(struct niu *np, unsigned long chan, u32 val) 723 { 724 int err; 725 726 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, 727 ESR_GLUE_CTRL0_L(chan), val & 0xffff); 728 if (!err) 729 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, 730 ESR_GLUE_CTRL0_H(chan), (val >> 16)); 731 return err; 732 } 733 734 static int esr_reset(struct niu *np) 735 { 736 u32 reset; 737 int err; 738 739 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, 740 ESR_RXTX_RESET_CTRL_L, 0x0000); 741 if (err) 742 return err; 743 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, 744 ESR_RXTX_RESET_CTRL_H, 0xffff); 745 if (err) 746 return err; 747 udelay(200); 748 749 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, 750 ESR_RXTX_RESET_CTRL_L, 0xffff); 751 if (err) 752 return err; 753 udelay(200); 754 755 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, 756 ESR_RXTX_RESET_CTRL_H, 0x0000); 757 if (err) 758 return err; 759 udelay(200); 760 761 err = esr_read_reset(np, &reset); 762 if (err) 763 return err; 764 if (reset != 0) { 765 netdev_err(np->dev, "Port %u ESR_RESET did not clear [%08x]\n", 766 np->port, reset); 767 return -ENODEV; 768 } 769 770 return 0; 771 } 772 773 static int serdes_init_10g(struct niu *np) 774 { 775 struct niu_link_config *lp = &np->link_config; 776 unsigned long ctrl_reg, test_cfg_reg, i; 777 u64 ctrl_val, test_cfg_val, sig, mask, val; 778 int err; 779 780 switch (np->port) { 781 case 0: 782 ctrl_reg = ENET_SERDES_0_CTRL_CFG; 783 test_cfg_reg = ENET_SERDES_0_TEST_CFG; 784 break; 785 case 1: 786 ctrl_reg = ENET_SERDES_1_CTRL_CFG; 787 test_cfg_reg = ENET_SERDES_1_TEST_CFG; 788 break; 789 790 default: 791 return -EINVAL; 792 } 793 ctrl_val = (ENET_SERDES_CTRL_SDET_0 | 794 ENET_SERDES_CTRL_SDET_1 | 795 ENET_SERDES_CTRL_SDET_2 | 796 ENET_SERDES_CTRL_SDET_3 | 797 (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) | 798 (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) | 799 (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) | 800 (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) | 801 (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) | 802 (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) | 803 (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) | 804 (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT)); 805 test_cfg_val = 0; 806 807 if (lp->loopback_mode == LOOPBACK_PHY) { 808 test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK << 809 ENET_SERDES_TEST_MD_0_SHIFT) | 810 (ENET_TEST_MD_PAD_LOOPBACK << 811 ENET_SERDES_TEST_MD_1_SHIFT) | 812 (ENET_TEST_MD_PAD_LOOPBACK << 813 ENET_SERDES_TEST_MD_2_SHIFT) | 814 (ENET_TEST_MD_PAD_LOOPBACK << 815 ENET_SERDES_TEST_MD_3_SHIFT)); 816 } 817 818 nw64(ctrl_reg, ctrl_val); 819 nw64(test_cfg_reg, test_cfg_val); 820 821 /* Initialize all 4 lanes of the SERDES. */ 822 for (i = 0; i < 4; i++) { 823 u32 rxtx_ctrl, glue0; 824 825 err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl); 826 if (err) 827 return err; 828 err = esr_read_glue0(np, i, &glue0); 829 if (err) 830 return err; 831 832 rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO); 833 rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH | 834 (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT)); 835 836 glue0 &= ~(ESR_GLUE_CTRL0_SRATE | 837 ESR_GLUE_CTRL0_THCNT | 838 ESR_GLUE_CTRL0_BLTIME); 839 glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB | 840 (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) | 841 (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) | 842 (BLTIME_300_CYCLES << 843 ESR_GLUE_CTRL0_BLTIME_SHIFT)); 844 845 err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl); 846 if (err) 847 return err; 848 err = esr_write_glue0(np, i, glue0); 849 if (err) 850 return err; 851 } 852 853 err = esr_reset(np); 854 if (err) 855 return err; 856 857 sig = nr64(ESR_INT_SIGNALS); 858 switch (np->port) { 859 case 0: 860 mask = ESR_INT_SIGNALS_P0_BITS; 861 val = (ESR_INT_SRDY0_P0 | 862 ESR_INT_DET0_P0 | 863 ESR_INT_XSRDY_P0 | 864 ESR_INT_XDP_P0_CH3 | 865 ESR_INT_XDP_P0_CH2 | 866 ESR_INT_XDP_P0_CH1 | 867 ESR_INT_XDP_P0_CH0); 868 break; 869 870 case 1: 871 mask = ESR_INT_SIGNALS_P1_BITS; 872 val = (ESR_INT_SRDY0_P1 | 873 ESR_INT_DET0_P1 | 874 ESR_INT_XSRDY_P1 | 875 ESR_INT_XDP_P1_CH3 | 876 ESR_INT_XDP_P1_CH2 | 877 ESR_INT_XDP_P1_CH1 | 878 ESR_INT_XDP_P1_CH0); 879 break; 880 881 default: 882 return -EINVAL; 883 } 884 885 if ((sig & mask) != val) { 886 if (np->flags & NIU_FLAGS_HOTPLUG_PHY) { 887 np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT; 888 return 0; 889 } 890 netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n", 891 np->port, (int)(sig & mask), (int)val); 892 return -ENODEV; 893 } 894 if (np->flags & NIU_FLAGS_HOTPLUG_PHY) 895 np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT; 896 return 0; 897 } 898 899 static int serdes_init_1g(struct niu *np) 900 { 901 u64 val; 902 903 val = nr64(ENET_SERDES_1_PLL_CFG); 904 val &= ~ENET_SERDES_PLL_FBDIV2; 905 switch (np->port) { 906 case 0: 907 val |= ENET_SERDES_PLL_HRATE0; 908 break; 909 case 1: 910 val |= ENET_SERDES_PLL_HRATE1; 911 break; 912 case 2: 913 val |= ENET_SERDES_PLL_HRATE2; 914 break; 915 case 3: 916 val |= ENET_SERDES_PLL_HRATE3; 917 break; 918 default: 919 return -EINVAL; 920 } 921 nw64(ENET_SERDES_1_PLL_CFG, val); 922 923 return 0; 924 } 925 926 static int serdes_init_1g_serdes(struct niu *np) 927 { 928 struct niu_link_config *lp = &np->link_config; 929 unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i; 930 u64 ctrl_val, test_cfg_val, sig, mask, val; 931 int err; 932 u64 reset_val, val_rd; 933 934 val = ENET_SERDES_PLL_HRATE0 | ENET_SERDES_PLL_HRATE1 | 935 ENET_SERDES_PLL_HRATE2 | ENET_SERDES_PLL_HRATE3 | 936 ENET_SERDES_PLL_FBDIV0; 937 switch (np->port) { 938 case 0: 939 reset_val = ENET_SERDES_RESET_0; 940 ctrl_reg = ENET_SERDES_0_CTRL_CFG; 941 test_cfg_reg = ENET_SERDES_0_TEST_CFG; 942 pll_cfg = ENET_SERDES_0_PLL_CFG; 943 break; 944 case 1: 945 reset_val = ENET_SERDES_RESET_1; 946 ctrl_reg = ENET_SERDES_1_CTRL_CFG; 947 test_cfg_reg = ENET_SERDES_1_TEST_CFG; 948 pll_cfg = ENET_SERDES_1_PLL_CFG; 949 break; 950 951 default: 952 return -EINVAL; 953 } 954 ctrl_val = (ENET_SERDES_CTRL_SDET_0 | 955 ENET_SERDES_CTRL_SDET_1 | 956 ENET_SERDES_CTRL_SDET_2 | 957 ENET_SERDES_CTRL_SDET_3 | 958 (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) | 959 (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) | 960 (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) | 961 (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) | 962 (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) | 963 (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) | 964 (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) | 965 (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT)); 966 test_cfg_val = 0; 967 968 if (lp->loopback_mode == LOOPBACK_PHY) { 969 test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK << 970 ENET_SERDES_TEST_MD_0_SHIFT) | 971 (ENET_TEST_MD_PAD_LOOPBACK << 972 ENET_SERDES_TEST_MD_1_SHIFT) | 973 (ENET_TEST_MD_PAD_LOOPBACK << 974 ENET_SERDES_TEST_MD_2_SHIFT) | 975 (ENET_TEST_MD_PAD_LOOPBACK << 976 ENET_SERDES_TEST_MD_3_SHIFT)); 977 } 978 979 nw64(ENET_SERDES_RESET, reset_val); 980 mdelay(20); 981 val_rd = nr64(ENET_SERDES_RESET); 982 val_rd &= ~reset_val; 983 nw64(pll_cfg, val); 984 nw64(ctrl_reg, ctrl_val); 985 nw64(test_cfg_reg, test_cfg_val); 986 nw64(ENET_SERDES_RESET, val_rd); 987 mdelay(2000); 988 989 /* Initialize all 4 lanes of the SERDES. */ 990 for (i = 0; i < 4; i++) { 991 u32 rxtx_ctrl, glue0; 992 993 err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl); 994 if (err) 995 return err; 996 err = esr_read_glue0(np, i, &glue0); 997 if (err) 998 return err; 999 1000 rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO); 1001 rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH | 1002 (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT)); 1003 1004 glue0 &= ~(ESR_GLUE_CTRL0_SRATE | 1005 ESR_GLUE_CTRL0_THCNT | 1006 ESR_GLUE_CTRL0_BLTIME); 1007 glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB | 1008 (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) | 1009 (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) | 1010 (BLTIME_300_CYCLES << 1011 ESR_GLUE_CTRL0_BLTIME_SHIFT)); 1012 1013 err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl); 1014 if (err) 1015 return err; 1016 err = esr_write_glue0(np, i, glue0); 1017 if (err) 1018 return err; 1019 } 1020 1021 1022 sig = nr64(ESR_INT_SIGNALS); 1023 switch (np->port) { 1024 case 0: 1025 val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0); 1026 mask = val; 1027 break; 1028 1029 case 1: 1030 val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1); 1031 mask = val; 1032 break; 1033 1034 default: 1035 return -EINVAL; 1036 } 1037 1038 if ((sig & mask) != val) { 1039 netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n", 1040 np->port, (int)(sig & mask), (int)val); 1041 return -ENODEV; 1042 } 1043 1044 return 0; 1045 } 1046 1047 static int link_status_1g_serdes(struct niu *np, int *link_up_p) 1048 { 1049 struct niu_link_config *lp = &np->link_config; 1050 int link_up; 1051 u64 val; 1052 u16 current_speed; 1053 unsigned long flags; 1054 u8 current_duplex; 1055 1056 link_up = 0; 1057 current_speed = SPEED_INVALID; 1058 current_duplex = DUPLEX_INVALID; 1059 1060 spin_lock_irqsave(&np->lock, flags); 1061 1062 val = nr64_pcs(PCS_MII_STAT); 1063 1064 if (val & PCS_MII_STAT_LINK_STATUS) { 1065 link_up = 1; 1066 current_speed = SPEED_1000; 1067 current_duplex = DUPLEX_FULL; 1068 } 1069 1070 lp->active_speed = current_speed; 1071 lp->active_duplex = current_duplex; 1072 spin_unlock_irqrestore(&np->lock, flags); 1073 1074 *link_up_p = link_up; 1075 return 0; 1076 } 1077 1078 static int link_status_10g_serdes(struct niu *np, int *link_up_p) 1079 { 1080 unsigned long flags; 1081 struct niu_link_config *lp = &np->link_config; 1082 int link_up = 0; 1083 int link_ok = 1; 1084 u64 val, val2; 1085 u16 current_speed; 1086 u8 current_duplex; 1087 1088 if (!(np->flags & NIU_FLAGS_10G)) 1089 return link_status_1g_serdes(np, link_up_p); 1090 1091 current_speed = SPEED_INVALID; 1092 current_duplex = DUPLEX_INVALID; 1093 spin_lock_irqsave(&np->lock, flags); 1094 1095 val = nr64_xpcs(XPCS_STATUS(0)); 1096 val2 = nr64_mac(XMAC_INTER2); 1097 if (val2 & 0x01000000) 1098 link_ok = 0; 1099 1100 if ((val & 0x1000ULL) && link_ok) { 1101 link_up = 1; 1102 current_speed = SPEED_10000; 1103 current_duplex = DUPLEX_FULL; 1104 } 1105 lp->active_speed = current_speed; 1106 lp->active_duplex = current_duplex; 1107 spin_unlock_irqrestore(&np->lock, flags); 1108 *link_up_p = link_up; 1109 return 0; 1110 } 1111 1112 static int link_status_mii(struct niu *np, int *link_up_p) 1113 { 1114 struct niu_link_config *lp = &np->link_config; 1115 int err; 1116 int bmsr, advert, ctrl1000, stat1000, lpa, bmcr, estatus; 1117 int supported, advertising, active_speed, active_duplex; 1118 1119 err = mii_read(np, np->phy_addr, MII_BMCR); 1120 if (unlikely(err < 0)) 1121 return err; 1122 bmcr = err; 1123 1124 err = mii_read(np, np->phy_addr, MII_BMSR); 1125 if (unlikely(err < 0)) 1126 return err; 1127 bmsr = err; 1128 1129 err = mii_read(np, np->phy_addr, MII_ADVERTISE); 1130 if (unlikely(err < 0)) 1131 return err; 1132 advert = err; 1133 1134 err = mii_read(np, np->phy_addr, MII_LPA); 1135 if (unlikely(err < 0)) 1136 return err; 1137 lpa = err; 1138 1139 if (likely(bmsr & BMSR_ESTATEN)) { 1140 err = mii_read(np, np->phy_addr, MII_ESTATUS); 1141 if (unlikely(err < 0)) 1142 return err; 1143 estatus = err; 1144 1145 err = mii_read(np, np->phy_addr, MII_CTRL1000); 1146 if (unlikely(err < 0)) 1147 return err; 1148 ctrl1000 = err; 1149 1150 err = mii_read(np, np->phy_addr, MII_STAT1000); 1151 if (unlikely(err < 0)) 1152 return err; 1153 stat1000 = err; 1154 } else 1155 estatus = ctrl1000 = stat1000 = 0; 1156 1157 supported = 0; 1158 if (bmsr & BMSR_ANEGCAPABLE) 1159 supported |= SUPPORTED_Autoneg; 1160 if (bmsr & BMSR_10HALF) 1161 supported |= SUPPORTED_10baseT_Half; 1162 if (bmsr & BMSR_10FULL) 1163 supported |= SUPPORTED_10baseT_Full; 1164 if (bmsr & BMSR_100HALF) 1165 supported |= SUPPORTED_100baseT_Half; 1166 if (bmsr & BMSR_100FULL) 1167 supported |= SUPPORTED_100baseT_Full; 1168 if (estatus & ESTATUS_1000_THALF) 1169 supported |= SUPPORTED_1000baseT_Half; 1170 if (estatus & ESTATUS_1000_TFULL) 1171 supported |= SUPPORTED_1000baseT_Full; 1172 lp->supported = supported; 1173 1174 advertising = mii_adv_to_ethtool_adv_t(advert); 1175 advertising |= mii_ctrl1000_to_ethtool_adv_t(ctrl1000); 1176 1177 if (bmcr & BMCR_ANENABLE) { 1178 int neg, neg1000; 1179 1180 lp->active_autoneg = 1; 1181 advertising |= ADVERTISED_Autoneg; 1182 1183 neg = advert & lpa; 1184 neg1000 = (ctrl1000 << 2) & stat1000; 1185 1186 if (neg1000 & (LPA_1000FULL | LPA_1000HALF)) 1187 active_speed = SPEED_1000; 1188 else if (neg & LPA_100) 1189 active_speed = SPEED_100; 1190 else if (neg & (LPA_10HALF | LPA_10FULL)) 1191 active_speed = SPEED_10; 1192 else 1193 active_speed = SPEED_INVALID; 1194 1195 if ((neg1000 & LPA_1000FULL) || (neg & LPA_DUPLEX)) 1196 active_duplex = DUPLEX_FULL; 1197 else if (active_speed != SPEED_INVALID) 1198 active_duplex = DUPLEX_HALF; 1199 else 1200 active_duplex = DUPLEX_INVALID; 1201 } else { 1202 lp->active_autoneg = 0; 1203 1204 if ((bmcr & BMCR_SPEED1000) && !(bmcr & BMCR_SPEED100)) 1205 active_speed = SPEED_1000; 1206 else if (bmcr & BMCR_SPEED100) 1207 active_speed = SPEED_100; 1208 else 1209 active_speed = SPEED_10; 1210 1211 if (bmcr & BMCR_FULLDPLX) 1212 active_duplex = DUPLEX_FULL; 1213 else 1214 active_duplex = DUPLEX_HALF; 1215 } 1216 1217 lp->active_advertising = advertising; 1218 lp->active_speed = active_speed; 1219 lp->active_duplex = active_duplex; 1220 *link_up_p = !!(bmsr & BMSR_LSTATUS); 1221 1222 return 0; 1223 } 1224 1225 static int link_status_1g_rgmii(struct niu *np, int *link_up_p) 1226 { 1227 struct niu_link_config *lp = &np->link_config; 1228 u16 current_speed, bmsr; 1229 unsigned long flags; 1230 u8 current_duplex; 1231 int err, link_up; 1232 1233 link_up = 0; 1234 current_speed = SPEED_INVALID; 1235 current_duplex = DUPLEX_INVALID; 1236 1237 spin_lock_irqsave(&np->lock, flags); 1238 1239 err = mii_read(np, np->phy_addr, MII_BMSR); 1240 if (err < 0) 1241 goto out; 1242 1243 bmsr = err; 1244 if (bmsr & BMSR_LSTATUS) { 1245 link_up = 1; 1246 current_speed = SPEED_1000; 1247 current_duplex = DUPLEX_FULL; 1248 } 1249 lp->active_speed = current_speed; 1250 lp->active_duplex = current_duplex; 1251 err = 0; 1252 1253 out: 1254 spin_unlock_irqrestore(&np->lock, flags); 1255 1256 *link_up_p = link_up; 1257 return err; 1258 } 1259 1260 static int link_status_1g(struct niu *np, int *link_up_p) 1261 { 1262 struct niu_link_config *lp = &np->link_config; 1263 unsigned long flags; 1264 int err; 1265 1266 spin_lock_irqsave(&np->lock, flags); 1267 1268 err = link_status_mii(np, link_up_p); 1269 lp->supported |= SUPPORTED_TP; 1270 lp->active_advertising |= ADVERTISED_TP; 1271 1272 spin_unlock_irqrestore(&np->lock, flags); 1273 return err; 1274 } 1275 1276 static int bcm8704_reset(struct niu *np) 1277 { 1278 int err, limit; 1279 1280 err = mdio_read(np, np->phy_addr, 1281 BCM8704_PHYXS_DEV_ADDR, MII_BMCR); 1282 if (err < 0 || err == 0xffff) 1283 return err; 1284 err |= BMCR_RESET; 1285 err = mdio_write(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, 1286 MII_BMCR, err); 1287 if (err) 1288 return err; 1289 1290 limit = 1000; 1291 while (--limit >= 0) { 1292 err = mdio_read(np, np->phy_addr, 1293 BCM8704_PHYXS_DEV_ADDR, MII_BMCR); 1294 if (err < 0) 1295 return err; 1296 if (!(err & BMCR_RESET)) 1297 break; 1298 } 1299 if (limit < 0) { 1300 netdev_err(np->dev, "Port %u PHY will not reset (bmcr=%04x)\n", 1301 np->port, (err & 0xffff)); 1302 return -ENODEV; 1303 } 1304 return 0; 1305 } 1306 1307 /* When written, certain PHY registers need to be read back twice 1308 * in order for the bits to settle properly. 1309 */ 1310 static int bcm8704_user_dev3_readback(struct niu *np, int reg) 1311 { 1312 int err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg); 1313 if (err < 0) 1314 return err; 1315 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg); 1316 if (err < 0) 1317 return err; 1318 return 0; 1319 } 1320 1321 static int bcm8706_init_user_dev3(struct niu *np) 1322 { 1323 int err; 1324 1325 1326 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 1327 BCM8704_USER_OPT_DIGITAL_CTRL); 1328 if (err < 0) 1329 return err; 1330 err &= ~USER_ODIG_CTRL_GPIOS; 1331 err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT); 1332 err |= USER_ODIG_CTRL_RESV2; 1333 err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 1334 BCM8704_USER_OPT_DIGITAL_CTRL, err); 1335 if (err) 1336 return err; 1337 1338 mdelay(1000); 1339 1340 return 0; 1341 } 1342 1343 static int bcm8704_init_user_dev3(struct niu *np) 1344 { 1345 int err; 1346 1347 err = mdio_write(np, np->phy_addr, 1348 BCM8704_USER_DEV3_ADDR, BCM8704_USER_CONTROL, 1349 (USER_CONTROL_OPTXRST_LVL | 1350 USER_CONTROL_OPBIASFLT_LVL | 1351 USER_CONTROL_OBTMPFLT_LVL | 1352 USER_CONTROL_OPPRFLT_LVL | 1353 USER_CONTROL_OPTXFLT_LVL | 1354 USER_CONTROL_OPRXLOS_LVL | 1355 USER_CONTROL_OPRXFLT_LVL | 1356 USER_CONTROL_OPTXON_LVL | 1357 (0x3f << USER_CONTROL_RES1_SHIFT))); 1358 if (err) 1359 return err; 1360 1361 err = mdio_write(np, np->phy_addr, 1362 BCM8704_USER_DEV3_ADDR, BCM8704_USER_PMD_TX_CONTROL, 1363 (USER_PMD_TX_CTL_XFP_CLKEN | 1364 (1 << USER_PMD_TX_CTL_TX_DAC_TXD_SH) | 1365 (2 << USER_PMD_TX_CTL_TX_DAC_TXCK_SH) | 1366 USER_PMD_TX_CTL_TSCK_LPWREN)); 1367 if (err) 1368 return err; 1369 1370 err = bcm8704_user_dev3_readback(np, BCM8704_USER_CONTROL); 1371 if (err) 1372 return err; 1373 err = bcm8704_user_dev3_readback(np, BCM8704_USER_PMD_TX_CONTROL); 1374 if (err) 1375 return err; 1376 1377 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 1378 BCM8704_USER_OPT_DIGITAL_CTRL); 1379 if (err < 0) 1380 return err; 1381 err &= ~USER_ODIG_CTRL_GPIOS; 1382 err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT); 1383 err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 1384 BCM8704_USER_OPT_DIGITAL_CTRL, err); 1385 if (err) 1386 return err; 1387 1388 mdelay(1000); 1389 1390 return 0; 1391 } 1392 1393 static int mrvl88x2011_act_led(struct niu *np, int val) 1394 { 1395 int err; 1396 1397 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR, 1398 MRVL88X2011_LED_8_TO_11_CTL); 1399 if (err < 0) 1400 return err; 1401 1402 err &= ~MRVL88X2011_LED(MRVL88X2011_LED_ACT,MRVL88X2011_LED_CTL_MASK); 1403 err |= MRVL88X2011_LED(MRVL88X2011_LED_ACT,val); 1404 1405 return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR, 1406 MRVL88X2011_LED_8_TO_11_CTL, err); 1407 } 1408 1409 static int mrvl88x2011_led_blink_rate(struct niu *np, int rate) 1410 { 1411 int err; 1412 1413 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR, 1414 MRVL88X2011_LED_BLINK_CTL); 1415 if (err >= 0) { 1416 err &= ~MRVL88X2011_LED_BLKRATE_MASK; 1417 err |= (rate << 4); 1418 1419 err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR, 1420 MRVL88X2011_LED_BLINK_CTL, err); 1421 } 1422 1423 return err; 1424 } 1425 1426 static int xcvr_init_10g_mrvl88x2011(struct niu *np) 1427 { 1428 int err; 1429 1430 /* Set LED functions */ 1431 err = mrvl88x2011_led_blink_rate(np, MRVL88X2011_LED_BLKRATE_134MS); 1432 if (err) 1433 return err; 1434 1435 /* led activity */ 1436 err = mrvl88x2011_act_led(np, MRVL88X2011_LED_CTL_OFF); 1437 if (err) 1438 return err; 1439 1440 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR, 1441 MRVL88X2011_GENERAL_CTL); 1442 if (err < 0) 1443 return err; 1444 1445 err |= MRVL88X2011_ENA_XFPREFCLK; 1446 1447 err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR, 1448 MRVL88X2011_GENERAL_CTL, err); 1449 if (err < 0) 1450 return err; 1451 1452 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, 1453 MRVL88X2011_PMA_PMD_CTL_1); 1454 if (err < 0) 1455 return err; 1456 1457 if (np->link_config.loopback_mode == LOOPBACK_MAC) 1458 err |= MRVL88X2011_LOOPBACK; 1459 else 1460 err &= ~MRVL88X2011_LOOPBACK; 1461 1462 err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, 1463 MRVL88X2011_PMA_PMD_CTL_1, err); 1464 if (err < 0) 1465 return err; 1466 1467 /* Enable PMD */ 1468 return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, 1469 MRVL88X2011_10G_PMD_TX_DIS, MRVL88X2011_ENA_PMDTX); 1470 } 1471 1472 1473 static int xcvr_diag_bcm870x(struct niu *np) 1474 { 1475 u16 analog_stat0, tx_alarm_status; 1476 int err = 0; 1477 1478 #if 1 1479 err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR, 1480 MII_STAT1000); 1481 if (err < 0) 1482 return err; 1483 pr_info("Port %u PMA_PMD(MII_STAT1000) [%04x]\n", np->port, err); 1484 1485 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 0x20); 1486 if (err < 0) 1487 return err; 1488 pr_info("Port %u USER_DEV3(0x20) [%04x]\n", np->port, err); 1489 1490 err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, 1491 MII_NWAYTEST); 1492 if (err < 0) 1493 return err; 1494 pr_info("Port %u PHYXS(MII_NWAYTEST) [%04x]\n", np->port, err); 1495 #endif 1496 1497 /* XXX dig this out it might not be so useful XXX */ 1498 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 1499 BCM8704_USER_ANALOG_STATUS0); 1500 if (err < 0) 1501 return err; 1502 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 1503 BCM8704_USER_ANALOG_STATUS0); 1504 if (err < 0) 1505 return err; 1506 analog_stat0 = err; 1507 1508 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 1509 BCM8704_USER_TX_ALARM_STATUS); 1510 if (err < 0) 1511 return err; 1512 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 1513 BCM8704_USER_TX_ALARM_STATUS); 1514 if (err < 0) 1515 return err; 1516 tx_alarm_status = err; 1517 1518 if (analog_stat0 != 0x03fc) { 1519 if ((analog_stat0 == 0x43bc) && (tx_alarm_status != 0)) { 1520 pr_info("Port %u cable not connected or bad cable\n", 1521 np->port); 1522 } else if (analog_stat0 == 0x639c) { 1523 pr_info("Port %u optical module is bad or missing\n", 1524 np->port); 1525 } 1526 } 1527 1528 return 0; 1529 } 1530 1531 static int xcvr_10g_set_lb_bcm870x(struct niu *np) 1532 { 1533 struct niu_link_config *lp = &np->link_config; 1534 int err; 1535 1536 err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR, 1537 MII_BMCR); 1538 if (err < 0) 1539 return err; 1540 1541 err &= ~BMCR_LOOPBACK; 1542 1543 if (lp->loopback_mode == LOOPBACK_MAC) 1544 err |= BMCR_LOOPBACK; 1545 1546 err = mdio_write(np, np->phy_addr, BCM8704_PCS_DEV_ADDR, 1547 MII_BMCR, err); 1548 if (err) 1549 return err; 1550 1551 return 0; 1552 } 1553 1554 static int xcvr_init_10g_bcm8706(struct niu *np) 1555 { 1556 int err = 0; 1557 u64 val; 1558 1559 if ((np->flags & NIU_FLAGS_HOTPLUG_PHY) && 1560 (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) == 0) 1561 return err; 1562 1563 val = nr64_mac(XMAC_CONFIG); 1564 val &= ~XMAC_CONFIG_LED_POLARITY; 1565 val |= XMAC_CONFIG_FORCE_LED_ON; 1566 nw64_mac(XMAC_CONFIG, val); 1567 1568 val = nr64(MIF_CONFIG); 1569 val |= MIF_CONFIG_INDIRECT_MODE; 1570 nw64(MIF_CONFIG, val); 1571 1572 err = bcm8704_reset(np); 1573 if (err) 1574 return err; 1575 1576 err = xcvr_10g_set_lb_bcm870x(np); 1577 if (err) 1578 return err; 1579 1580 err = bcm8706_init_user_dev3(np); 1581 if (err) 1582 return err; 1583 1584 err = xcvr_diag_bcm870x(np); 1585 if (err) 1586 return err; 1587 1588 return 0; 1589 } 1590 1591 static int xcvr_init_10g_bcm8704(struct niu *np) 1592 { 1593 int err; 1594 1595 err = bcm8704_reset(np); 1596 if (err) 1597 return err; 1598 1599 err = bcm8704_init_user_dev3(np); 1600 if (err) 1601 return err; 1602 1603 err = xcvr_10g_set_lb_bcm870x(np); 1604 if (err) 1605 return err; 1606 1607 err = xcvr_diag_bcm870x(np); 1608 if (err) 1609 return err; 1610 1611 return 0; 1612 } 1613 1614 static int xcvr_init_10g(struct niu *np) 1615 { 1616 int phy_id, err; 1617 u64 val; 1618 1619 val = nr64_mac(XMAC_CONFIG); 1620 val &= ~XMAC_CONFIG_LED_POLARITY; 1621 val |= XMAC_CONFIG_FORCE_LED_ON; 1622 nw64_mac(XMAC_CONFIG, val); 1623 1624 /* XXX shared resource, lock parent XXX */ 1625 val = nr64(MIF_CONFIG); 1626 val |= MIF_CONFIG_INDIRECT_MODE; 1627 nw64(MIF_CONFIG, val); 1628 1629 phy_id = phy_decode(np->parent->port_phy, np->port); 1630 phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port]; 1631 1632 /* handle different phy types */ 1633 switch (phy_id & NIU_PHY_ID_MASK) { 1634 case NIU_PHY_ID_MRVL88X2011: 1635 err = xcvr_init_10g_mrvl88x2011(np); 1636 break; 1637 1638 default: /* bcom 8704 */ 1639 err = xcvr_init_10g_bcm8704(np); 1640 break; 1641 } 1642 1643 return err; 1644 } 1645 1646 static int mii_reset(struct niu *np) 1647 { 1648 int limit, err; 1649 1650 err = mii_write(np, np->phy_addr, MII_BMCR, BMCR_RESET); 1651 if (err) 1652 return err; 1653 1654 limit = 1000; 1655 while (--limit >= 0) { 1656 udelay(500); 1657 err = mii_read(np, np->phy_addr, MII_BMCR); 1658 if (err < 0) 1659 return err; 1660 if (!(err & BMCR_RESET)) 1661 break; 1662 } 1663 if (limit < 0) { 1664 netdev_err(np->dev, "Port %u MII would not reset, bmcr[%04x]\n", 1665 np->port, err); 1666 return -ENODEV; 1667 } 1668 1669 return 0; 1670 } 1671 1672 static int xcvr_init_1g_rgmii(struct niu *np) 1673 { 1674 int err; 1675 u64 val; 1676 u16 bmcr, bmsr, estat; 1677 1678 val = nr64(MIF_CONFIG); 1679 val &= ~MIF_CONFIG_INDIRECT_MODE; 1680 nw64(MIF_CONFIG, val); 1681 1682 err = mii_reset(np); 1683 if (err) 1684 return err; 1685 1686 err = mii_read(np, np->phy_addr, MII_BMSR); 1687 if (err < 0) 1688 return err; 1689 bmsr = err; 1690 1691 estat = 0; 1692 if (bmsr & BMSR_ESTATEN) { 1693 err = mii_read(np, np->phy_addr, MII_ESTATUS); 1694 if (err < 0) 1695 return err; 1696 estat = err; 1697 } 1698 1699 bmcr = 0; 1700 err = mii_write(np, np->phy_addr, MII_BMCR, bmcr); 1701 if (err) 1702 return err; 1703 1704 if (bmsr & BMSR_ESTATEN) { 1705 u16 ctrl1000 = 0; 1706 1707 if (estat & ESTATUS_1000_TFULL) 1708 ctrl1000 |= ADVERTISE_1000FULL; 1709 err = mii_write(np, np->phy_addr, MII_CTRL1000, ctrl1000); 1710 if (err) 1711 return err; 1712 } 1713 1714 bmcr = (BMCR_SPEED1000 | BMCR_FULLDPLX); 1715 1716 err = mii_write(np, np->phy_addr, MII_BMCR, bmcr); 1717 if (err) 1718 return err; 1719 1720 err = mii_read(np, np->phy_addr, MII_BMCR); 1721 if (err < 0) 1722 return err; 1723 bmcr = mii_read(np, np->phy_addr, MII_BMCR); 1724 1725 err = mii_read(np, np->phy_addr, MII_BMSR); 1726 if (err < 0) 1727 return err; 1728 1729 return 0; 1730 } 1731 1732 static int mii_init_common(struct niu *np) 1733 { 1734 struct niu_link_config *lp = &np->link_config; 1735 u16 bmcr, bmsr, adv, estat; 1736 int err; 1737 1738 err = mii_reset(np); 1739 if (err) 1740 return err; 1741 1742 err = mii_read(np, np->phy_addr, MII_BMSR); 1743 if (err < 0) 1744 return err; 1745 bmsr = err; 1746 1747 estat = 0; 1748 if (bmsr & BMSR_ESTATEN) { 1749 err = mii_read(np, np->phy_addr, MII_ESTATUS); 1750 if (err < 0) 1751 return err; 1752 estat = err; 1753 } 1754 1755 bmcr = 0; 1756 err = mii_write(np, np->phy_addr, MII_BMCR, bmcr); 1757 if (err) 1758 return err; 1759 1760 if (lp->loopback_mode == LOOPBACK_MAC) { 1761 bmcr |= BMCR_LOOPBACK; 1762 if (lp->active_speed == SPEED_1000) 1763 bmcr |= BMCR_SPEED1000; 1764 if (lp->active_duplex == DUPLEX_FULL) 1765 bmcr |= BMCR_FULLDPLX; 1766 } 1767 1768 if (lp->loopback_mode == LOOPBACK_PHY) { 1769 u16 aux; 1770 1771 aux = (BCM5464R_AUX_CTL_EXT_LB | 1772 BCM5464R_AUX_CTL_WRITE_1); 1773 err = mii_write(np, np->phy_addr, BCM5464R_AUX_CTL, aux); 1774 if (err) 1775 return err; 1776 } 1777 1778 if (lp->autoneg) { 1779 u16 ctrl1000; 1780 1781 adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP; 1782 if ((bmsr & BMSR_10HALF) && 1783 (lp->advertising & ADVERTISED_10baseT_Half)) 1784 adv |= ADVERTISE_10HALF; 1785 if ((bmsr & BMSR_10FULL) && 1786 (lp->advertising & ADVERTISED_10baseT_Full)) 1787 adv |= ADVERTISE_10FULL; 1788 if ((bmsr & BMSR_100HALF) && 1789 (lp->advertising & ADVERTISED_100baseT_Half)) 1790 adv |= ADVERTISE_100HALF; 1791 if ((bmsr & BMSR_100FULL) && 1792 (lp->advertising & ADVERTISED_100baseT_Full)) 1793 adv |= ADVERTISE_100FULL; 1794 err = mii_write(np, np->phy_addr, MII_ADVERTISE, adv); 1795 if (err) 1796 return err; 1797 1798 if (likely(bmsr & BMSR_ESTATEN)) { 1799 ctrl1000 = 0; 1800 if ((estat & ESTATUS_1000_THALF) && 1801 (lp->advertising & ADVERTISED_1000baseT_Half)) 1802 ctrl1000 |= ADVERTISE_1000HALF; 1803 if ((estat & ESTATUS_1000_TFULL) && 1804 (lp->advertising & ADVERTISED_1000baseT_Full)) 1805 ctrl1000 |= ADVERTISE_1000FULL; 1806 err = mii_write(np, np->phy_addr, 1807 MII_CTRL1000, ctrl1000); 1808 if (err) 1809 return err; 1810 } 1811 1812 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); 1813 } else { 1814 /* !lp->autoneg */ 1815 int fulldpx; 1816 1817 if (lp->duplex == DUPLEX_FULL) { 1818 bmcr |= BMCR_FULLDPLX; 1819 fulldpx = 1; 1820 } else if (lp->duplex == DUPLEX_HALF) 1821 fulldpx = 0; 1822 else 1823 return -EINVAL; 1824 1825 if (lp->speed == SPEED_1000) { 1826 /* if X-full requested while not supported, or 1827 X-half requested while not supported... */ 1828 if ((fulldpx && !(estat & ESTATUS_1000_TFULL)) || 1829 (!fulldpx && !(estat & ESTATUS_1000_THALF))) 1830 return -EINVAL; 1831 bmcr |= BMCR_SPEED1000; 1832 } else if (lp->speed == SPEED_100) { 1833 if ((fulldpx && !(bmsr & BMSR_100FULL)) || 1834 (!fulldpx && !(bmsr & BMSR_100HALF))) 1835 return -EINVAL; 1836 bmcr |= BMCR_SPEED100; 1837 } else if (lp->speed == SPEED_10) { 1838 if ((fulldpx && !(bmsr & BMSR_10FULL)) || 1839 (!fulldpx && !(bmsr & BMSR_10HALF))) 1840 return -EINVAL; 1841 } else 1842 return -EINVAL; 1843 } 1844 1845 err = mii_write(np, np->phy_addr, MII_BMCR, bmcr); 1846 if (err) 1847 return err; 1848 1849 #if 0 1850 err = mii_read(np, np->phy_addr, MII_BMCR); 1851 if (err < 0) 1852 return err; 1853 bmcr = err; 1854 1855 err = mii_read(np, np->phy_addr, MII_BMSR); 1856 if (err < 0) 1857 return err; 1858 bmsr = err; 1859 1860 pr_info("Port %u after MII init bmcr[%04x] bmsr[%04x]\n", 1861 np->port, bmcr, bmsr); 1862 #endif 1863 1864 return 0; 1865 } 1866 1867 static int xcvr_init_1g(struct niu *np) 1868 { 1869 u64 val; 1870 1871 /* XXX shared resource, lock parent XXX */ 1872 val = nr64(MIF_CONFIG); 1873 val &= ~MIF_CONFIG_INDIRECT_MODE; 1874 nw64(MIF_CONFIG, val); 1875 1876 return mii_init_common(np); 1877 } 1878 1879 static int niu_xcvr_init(struct niu *np) 1880 { 1881 const struct niu_phy_ops *ops = np->phy_ops; 1882 int err; 1883 1884 err = 0; 1885 if (ops->xcvr_init) 1886 err = ops->xcvr_init(np); 1887 1888 return err; 1889 } 1890 1891 static int niu_serdes_init(struct niu *np) 1892 { 1893 const struct niu_phy_ops *ops = np->phy_ops; 1894 int err; 1895 1896 err = 0; 1897 if (ops->serdes_init) 1898 err = ops->serdes_init(np); 1899 1900 return err; 1901 } 1902 1903 static void niu_init_xif(struct niu *); 1904 static void niu_handle_led(struct niu *, int status); 1905 1906 static int niu_link_status_common(struct niu *np, int link_up) 1907 { 1908 struct niu_link_config *lp = &np->link_config; 1909 struct net_device *dev = np->dev; 1910 unsigned long flags; 1911 1912 if (!netif_carrier_ok(dev) && link_up) { 1913 netif_info(np, link, dev, "Link is up at %s, %s duplex\n", 1914 lp->active_speed == SPEED_10000 ? "10Gb/sec" : 1915 lp->active_speed == SPEED_1000 ? "1Gb/sec" : 1916 lp->active_speed == SPEED_100 ? "100Mbit/sec" : 1917 "10Mbit/sec", 1918 lp->active_duplex == DUPLEX_FULL ? "full" : "half"); 1919 1920 spin_lock_irqsave(&np->lock, flags); 1921 niu_init_xif(np); 1922 niu_handle_led(np, 1); 1923 spin_unlock_irqrestore(&np->lock, flags); 1924 1925 netif_carrier_on(dev); 1926 } else if (netif_carrier_ok(dev) && !link_up) { 1927 netif_warn(np, link, dev, "Link is down\n"); 1928 spin_lock_irqsave(&np->lock, flags); 1929 niu_handle_led(np, 0); 1930 spin_unlock_irqrestore(&np->lock, flags); 1931 netif_carrier_off(dev); 1932 } 1933 1934 return 0; 1935 } 1936 1937 static int link_status_10g_mrvl(struct niu *np, int *link_up_p) 1938 { 1939 int err, link_up, pma_status, pcs_status; 1940 1941 link_up = 0; 1942 1943 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, 1944 MRVL88X2011_10G_PMD_STATUS_2); 1945 if (err < 0) 1946 goto out; 1947 1948 /* Check PMA/PMD Register: 1.0001.2 == 1 */ 1949 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, 1950 MRVL88X2011_PMA_PMD_STATUS_1); 1951 if (err < 0) 1952 goto out; 1953 1954 pma_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0); 1955 1956 /* Check PMC Register : 3.0001.2 == 1: read twice */ 1957 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR, 1958 MRVL88X2011_PMA_PMD_STATUS_1); 1959 if (err < 0) 1960 goto out; 1961 1962 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR, 1963 MRVL88X2011_PMA_PMD_STATUS_1); 1964 if (err < 0) 1965 goto out; 1966 1967 pcs_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0); 1968 1969 /* Check XGXS Register : 4.0018.[0-3,12] */ 1970 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV4_ADDR, 1971 MRVL88X2011_10G_XGXS_LANE_STAT); 1972 if (err < 0) 1973 goto out; 1974 1975 if (err == (PHYXS_XGXS_LANE_STAT_ALINGED | PHYXS_XGXS_LANE_STAT_LANE3 | 1976 PHYXS_XGXS_LANE_STAT_LANE2 | PHYXS_XGXS_LANE_STAT_LANE1 | 1977 PHYXS_XGXS_LANE_STAT_LANE0 | PHYXS_XGXS_LANE_STAT_MAGIC | 1978 0x800)) 1979 link_up = (pma_status && pcs_status) ? 1 : 0; 1980 1981 np->link_config.active_speed = SPEED_10000; 1982 np->link_config.active_duplex = DUPLEX_FULL; 1983 err = 0; 1984 out: 1985 mrvl88x2011_act_led(np, (link_up ? 1986 MRVL88X2011_LED_CTL_PCS_ACT : 1987 MRVL88X2011_LED_CTL_OFF)); 1988 1989 *link_up_p = link_up; 1990 return err; 1991 } 1992 1993 static int link_status_10g_bcm8706(struct niu *np, int *link_up_p) 1994 { 1995 int err, link_up; 1996 link_up = 0; 1997 1998 err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR, 1999 BCM8704_PMD_RCV_SIGDET); 2000 if (err < 0 || err == 0xffff) 2001 goto out; 2002 if (!(err & PMD_RCV_SIGDET_GLOBAL)) { 2003 err = 0; 2004 goto out; 2005 } 2006 2007 err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR, 2008 BCM8704_PCS_10G_R_STATUS); 2009 if (err < 0) 2010 goto out; 2011 2012 if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) { 2013 err = 0; 2014 goto out; 2015 } 2016 2017 err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, 2018 BCM8704_PHYXS_XGXS_LANE_STAT); 2019 if (err < 0) 2020 goto out; 2021 if (err != (PHYXS_XGXS_LANE_STAT_ALINGED | 2022 PHYXS_XGXS_LANE_STAT_MAGIC | 2023 PHYXS_XGXS_LANE_STAT_PATTEST | 2024 PHYXS_XGXS_LANE_STAT_LANE3 | 2025 PHYXS_XGXS_LANE_STAT_LANE2 | 2026 PHYXS_XGXS_LANE_STAT_LANE1 | 2027 PHYXS_XGXS_LANE_STAT_LANE0)) { 2028 err = 0; 2029 np->link_config.active_speed = SPEED_INVALID; 2030 np->link_config.active_duplex = DUPLEX_INVALID; 2031 goto out; 2032 } 2033 2034 link_up = 1; 2035 np->link_config.active_speed = SPEED_10000; 2036 np->link_config.active_duplex = DUPLEX_FULL; 2037 err = 0; 2038 2039 out: 2040 *link_up_p = link_up; 2041 return err; 2042 } 2043 2044 static int link_status_10g_bcom(struct niu *np, int *link_up_p) 2045 { 2046 int err, link_up; 2047 2048 link_up = 0; 2049 2050 err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR, 2051 BCM8704_PMD_RCV_SIGDET); 2052 if (err < 0) 2053 goto out; 2054 if (!(err & PMD_RCV_SIGDET_GLOBAL)) { 2055 err = 0; 2056 goto out; 2057 } 2058 2059 err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR, 2060 BCM8704_PCS_10G_R_STATUS); 2061 if (err < 0) 2062 goto out; 2063 if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) { 2064 err = 0; 2065 goto out; 2066 } 2067 2068 err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, 2069 BCM8704_PHYXS_XGXS_LANE_STAT); 2070 if (err < 0) 2071 goto out; 2072 2073 if (err != (PHYXS_XGXS_LANE_STAT_ALINGED | 2074 PHYXS_XGXS_LANE_STAT_MAGIC | 2075 PHYXS_XGXS_LANE_STAT_LANE3 | 2076 PHYXS_XGXS_LANE_STAT_LANE2 | 2077 PHYXS_XGXS_LANE_STAT_LANE1 | 2078 PHYXS_XGXS_LANE_STAT_LANE0)) { 2079 err = 0; 2080 goto out; 2081 } 2082 2083 link_up = 1; 2084 np->link_config.active_speed = SPEED_10000; 2085 np->link_config.active_duplex = DUPLEX_FULL; 2086 err = 0; 2087 2088 out: 2089 *link_up_p = link_up; 2090 return err; 2091 } 2092 2093 static int link_status_10g(struct niu *np, int *link_up_p) 2094 { 2095 unsigned long flags; 2096 int err = -EINVAL; 2097 2098 spin_lock_irqsave(&np->lock, flags); 2099 2100 if (np->link_config.loopback_mode == LOOPBACK_DISABLED) { 2101 int phy_id; 2102 2103 phy_id = phy_decode(np->parent->port_phy, np->port); 2104 phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port]; 2105 2106 /* handle different phy types */ 2107 switch (phy_id & NIU_PHY_ID_MASK) { 2108 case NIU_PHY_ID_MRVL88X2011: 2109 err = link_status_10g_mrvl(np, link_up_p); 2110 break; 2111 2112 default: /* bcom 8704 */ 2113 err = link_status_10g_bcom(np, link_up_p); 2114 break; 2115 } 2116 } 2117 2118 spin_unlock_irqrestore(&np->lock, flags); 2119 2120 return err; 2121 } 2122 2123 static int niu_10g_phy_present(struct niu *np) 2124 { 2125 u64 sig, mask, val; 2126 2127 sig = nr64(ESR_INT_SIGNALS); 2128 switch (np->port) { 2129 case 0: 2130 mask = ESR_INT_SIGNALS_P0_BITS; 2131 val = (ESR_INT_SRDY0_P0 | 2132 ESR_INT_DET0_P0 | 2133 ESR_INT_XSRDY_P0 | 2134 ESR_INT_XDP_P0_CH3 | 2135 ESR_INT_XDP_P0_CH2 | 2136 ESR_INT_XDP_P0_CH1 | 2137 ESR_INT_XDP_P0_CH0); 2138 break; 2139 2140 case 1: 2141 mask = ESR_INT_SIGNALS_P1_BITS; 2142 val = (ESR_INT_SRDY0_P1 | 2143 ESR_INT_DET0_P1 | 2144 ESR_INT_XSRDY_P1 | 2145 ESR_INT_XDP_P1_CH3 | 2146 ESR_INT_XDP_P1_CH2 | 2147 ESR_INT_XDP_P1_CH1 | 2148 ESR_INT_XDP_P1_CH0); 2149 break; 2150 2151 default: 2152 return 0; 2153 } 2154 2155 if ((sig & mask) != val) 2156 return 0; 2157 return 1; 2158 } 2159 2160 static int link_status_10g_hotplug(struct niu *np, int *link_up_p) 2161 { 2162 unsigned long flags; 2163 int err = 0; 2164 int phy_present; 2165 int phy_present_prev; 2166 2167 spin_lock_irqsave(&np->lock, flags); 2168 2169 if (np->link_config.loopback_mode == LOOPBACK_DISABLED) { 2170 phy_present_prev = (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) ? 2171 1 : 0; 2172 phy_present = niu_10g_phy_present(np); 2173 if (phy_present != phy_present_prev) { 2174 /* state change */ 2175 if (phy_present) { 2176 /* A NEM was just plugged in */ 2177 np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT; 2178 if (np->phy_ops->xcvr_init) 2179 err = np->phy_ops->xcvr_init(np); 2180 if (err) { 2181 err = mdio_read(np, np->phy_addr, 2182 BCM8704_PHYXS_DEV_ADDR, MII_BMCR); 2183 if (err == 0xffff) { 2184 /* No mdio, back-to-back XAUI */ 2185 goto out; 2186 } 2187 /* debounce */ 2188 np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT; 2189 } 2190 } else { 2191 np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT; 2192 *link_up_p = 0; 2193 netif_warn(np, link, np->dev, 2194 "Hotplug PHY Removed\n"); 2195 } 2196 } 2197 out: 2198 if (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) { 2199 err = link_status_10g_bcm8706(np, link_up_p); 2200 if (err == 0xffff) { 2201 /* No mdio, back-to-back XAUI: it is C10NEM */ 2202 *link_up_p = 1; 2203 np->link_config.active_speed = SPEED_10000; 2204 np->link_config.active_duplex = DUPLEX_FULL; 2205 } 2206 } 2207 } 2208 2209 spin_unlock_irqrestore(&np->lock, flags); 2210 2211 return 0; 2212 } 2213 2214 static int niu_link_status(struct niu *np, int *link_up_p) 2215 { 2216 const struct niu_phy_ops *ops = np->phy_ops; 2217 int err; 2218 2219 err = 0; 2220 if (ops->link_status) 2221 err = ops->link_status(np, link_up_p); 2222 2223 return err; 2224 } 2225 2226 static void niu_timer(struct timer_list *t) 2227 { 2228 struct niu *np = from_timer(np, t, timer); 2229 unsigned long off; 2230 int err, link_up; 2231 2232 err = niu_link_status(np, &link_up); 2233 if (!err) 2234 niu_link_status_common(np, link_up); 2235 2236 if (netif_carrier_ok(np->dev)) 2237 off = 5 * HZ; 2238 else 2239 off = 1 * HZ; 2240 np->timer.expires = jiffies + off; 2241 2242 add_timer(&np->timer); 2243 } 2244 2245 static const struct niu_phy_ops phy_ops_10g_serdes = { 2246 .serdes_init = serdes_init_10g_serdes, 2247 .link_status = link_status_10g_serdes, 2248 }; 2249 2250 static const struct niu_phy_ops phy_ops_10g_serdes_niu = { 2251 .serdes_init = serdes_init_niu_10g_serdes, 2252 .link_status = link_status_10g_serdes, 2253 }; 2254 2255 static const struct niu_phy_ops phy_ops_1g_serdes_niu = { 2256 .serdes_init = serdes_init_niu_1g_serdes, 2257 .link_status = link_status_1g_serdes, 2258 }; 2259 2260 static const struct niu_phy_ops phy_ops_1g_rgmii = { 2261 .xcvr_init = xcvr_init_1g_rgmii, 2262 .link_status = link_status_1g_rgmii, 2263 }; 2264 2265 static const struct niu_phy_ops phy_ops_10g_fiber_niu = { 2266 .serdes_init = serdes_init_niu_10g_fiber, 2267 .xcvr_init = xcvr_init_10g, 2268 .link_status = link_status_10g, 2269 }; 2270 2271 static const struct niu_phy_ops phy_ops_10g_fiber = { 2272 .serdes_init = serdes_init_10g, 2273 .xcvr_init = xcvr_init_10g, 2274 .link_status = link_status_10g, 2275 }; 2276 2277 static const struct niu_phy_ops phy_ops_10g_fiber_hotplug = { 2278 .serdes_init = serdes_init_10g, 2279 .xcvr_init = xcvr_init_10g_bcm8706, 2280 .link_status = link_status_10g_hotplug, 2281 }; 2282 2283 static const struct niu_phy_ops phy_ops_niu_10g_hotplug = { 2284 .serdes_init = serdes_init_niu_10g_fiber, 2285 .xcvr_init = xcvr_init_10g_bcm8706, 2286 .link_status = link_status_10g_hotplug, 2287 }; 2288 2289 static const struct niu_phy_ops phy_ops_10g_copper = { 2290 .serdes_init = serdes_init_10g, 2291 .link_status = link_status_10g, /* XXX */ 2292 }; 2293 2294 static const struct niu_phy_ops phy_ops_1g_fiber = { 2295 .serdes_init = serdes_init_1g, 2296 .xcvr_init = xcvr_init_1g, 2297 .link_status = link_status_1g, 2298 }; 2299 2300 static const struct niu_phy_ops phy_ops_1g_copper = { 2301 .xcvr_init = xcvr_init_1g, 2302 .link_status = link_status_1g, 2303 }; 2304 2305 struct niu_phy_template { 2306 const struct niu_phy_ops *ops; 2307 u32 phy_addr_base; 2308 }; 2309 2310 static const struct niu_phy_template phy_template_niu_10g_fiber = { 2311 .ops = &phy_ops_10g_fiber_niu, 2312 .phy_addr_base = 16, 2313 }; 2314 2315 static const struct niu_phy_template phy_template_niu_10g_serdes = { 2316 .ops = &phy_ops_10g_serdes_niu, 2317 .phy_addr_base = 0, 2318 }; 2319 2320 static const struct niu_phy_template phy_template_niu_1g_serdes = { 2321 .ops = &phy_ops_1g_serdes_niu, 2322 .phy_addr_base = 0, 2323 }; 2324 2325 static const struct niu_phy_template phy_template_10g_fiber = { 2326 .ops = &phy_ops_10g_fiber, 2327 .phy_addr_base = 8, 2328 }; 2329 2330 static const struct niu_phy_template phy_template_10g_fiber_hotplug = { 2331 .ops = &phy_ops_10g_fiber_hotplug, 2332 .phy_addr_base = 8, 2333 }; 2334 2335 static const struct niu_phy_template phy_template_niu_10g_hotplug = { 2336 .ops = &phy_ops_niu_10g_hotplug, 2337 .phy_addr_base = 8, 2338 }; 2339 2340 static const struct niu_phy_template phy_template_10g_copper = { 2341 .ops = &phy_ops_10g_copper, 2342 .phy_addr_base = 10, 2343 }; 2344 2345 static const struct niu_phy_template phy_template_1g_fiber = { 2346 .ops = &phy_ops_1g_fiber, 2347 .phy_addr_base = 0, 2348 }; 2349 2350 static const struct niu_phy_template phy_template_1g_copper = { 2351 .ops = &phy_ops_1g_copper, 2352 .phy_addr_base = 0, 2353 }; 2354 2355 static const struct niu_phy_template phy_template_1g_rgmii = { 2356 .ops = &phy_ops_1g_rgmii, 2357 .phy_addr_base = 0, 2358 }; 2359 2360 static const struct niu_phy_template phy_template_10g_serdes = { 2361 .ops = &phy_ops_10g_serdes, 2362 .phy_addr_base = 0, 2363 }; 2364 2365 static int niu_atca_port_num[4] = { 2366 0, 0, 11, 10 2367 }; 2368 2369 static int serdes_init_10g_serdes(struct niu *np) 2370 { 2371 struct niu_link_config *lp = &np->link_config; 2372 unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i; 2373 u64 ctrl_val, test_cfg_val, sig, mask, val; 2374 2375 switch (np->port) { 2376 case 0: 2377 ctrl_reg = ENET_SERDES_0_CTRL_CFG; 2378 test_cfg_reg = ENET_SERDES_0_TEST_CFG; 2379 pll_cfg = ENET_SERDES_0_PLL_CFG; 2380 break; 2381 case 1: 2382 ctrl_reg = ENET_SERDES_1_CTRL_CFG; 2383 test_cfg_reg = ENET_SERDES_1_TEST_CFG; 2384 pll_cfg = ENET_SERDES_1_PLL_CFG; 2385 break; 2386 2387 default: 2388 return -EINVAL; 2389 } 2390 ctrl_val = (ENET_SERDES_CTRL_SDET_0 | 2391 ENET_SERDES_CTRL_SDET_1 | 2392 ENET_SERDES_CTRL_SDET_2 | 2393 ENET_SERDES_CTRL_SDET_3 | 2394 (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) | 2395 (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) | 2396 (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) | 2397 (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) | 2398 (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) | 2399 (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) | 2400 (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) | 2401 (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT)); 2402 test_cfg_val = 0; 2403 2404 if (lp->loopback_mode == LOOPBACK_PHY) { 2405 test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK << 2406 ENET_SERDES_TEST_MD_0_SHIFT) | 2407 (ENET_TEST_MD_PAD_LOOPBACK << 2408 ENET_SERDES_TEST_MD_1_SHIFT) | 2409 (ENET_TEST_MD_PAD_LOOPBACK << 2410 ENET_SERDES_TEST_MD_2_SHIFT) | 2411 (ENET_TEST_MD_PAD_LOOPBACK << 2412 ENET_SERDES_TEST_MD_3_SHIFT)); 2413 } 2414 2415 esr_reset(np); 2416 nw64(pll_cfg, ENET_SERDES_PLL_FBDIV2); 2417 nw64(ctrl_reg, ctrl_val); 2418 nw64(test_cfg_reg, test_cfg_val); 2419 2420 /* Initialize all 4 lanes of the SERDES. */ 2421 for (i = 0; i < 4; i++) { 2422 u32 rxtx_ctrl, glue0; 2423 int err; 2424 2425 err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl); 2426 if (err) 2427 return err; 2428 err = esr_read_glue0(np, i, &glue0); 2429 if (err) 2430 return err; 2431 2432 rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO); 2433 rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH | 2434 (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT)); 2435 2436 glue0 &= ~(ESR_GLUE_CTRL0_SRATE | 2437 ESR_GLUE_CTRL0_THCNT | 2438 ESR_GLUE_CTRL0_BLTIME); 2439 glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB | 2440 (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) | 2441 (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) | 2442 (BLTIME_300_CYCLES << 2443 ESR_GLUE_CTRL0_BLTIME_SHIFT)); 2444 2445 err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl); 2446 if (err) 2447 return err; 2448 err = esr_write_glue0(np, i, glue0); 2449 if (err) 2450 return err; 2451 } 2452 2453 2454 sig = nr64(ESR_INT_SIGNALS); 2455 switch (np->port) { 2456 case 0: 2457 mask = ESR_INT_SIGNALS_P0_BITS; 2458 val = (ESR_INT_SRDY0_P0 | 2459 ESR_INT_DET0_P0 | 2460 ESR_INT_XSRDY_P0 | 2461 ESR_INT_XDP_P0_CH3 | 2462 ESR_INT_XDP_P0_CH2 | 2463 ESR_INT_XDP_P0_CH1 | 2464 ESR_INT_XDP_P0_CH0); 2465 break; 2466 2467 case 1: 2468 mask = ESR_INT_SIGNALS_P1_BITS; 2469 val = (ESR_INT_SRDY0_P1 | 2470 ESR_INT_DET0_P1 | 2471 ESR_INT_XSRDY_P1 | 2472 ESR_INT_XDP_P1_CH3 | 2473 ESR_INT_XDP_P1_CH2 | 2474 ESR_INT_XDP_P1_CH1 | 2475 ESR_INT_XDP_P1_CH0); 2476 break; 2477 2478 default: 2479 return -EINVAL; 2480 } 2481 2482 if ((sig & mask) != val) { 2483 int err; 2484 err = serdes_init_1g_serdes(np); 2485 if (!err) { 2486 np->flags &= ~NIU_FLAGS_10G; 2487 np->mac_xcvr = MAC_XCVR_PCS; 2488 } else { 2489 netdev_err(np->dev, "Port %u 10G/1G SERDES Link Failed\n", 2490 np->port); 2491 return -ENODEV; 2492 } 2493 } 2494 2495 return 0; 2496 } 2497 2498 static int niu_determine_phy_disposition(struct niu *np) 2499 { 2500 struct niu_parent *parent = np->parent; 2501 u8 plat_type = parent->plat_type; 2502 const struct niu_phy_template *tp; 2503 u32 phy_addr_off = 0; 2504 2505 if (plat_type == PLAT_TYPE_NIU) { 2506 switch (np->flags & 2507 (NIU_FLAGS_10G | 2508 NIU_FLAGS_FIBER | 2509 NIU_FLAGS_XCVR_SERDES)) { 2510 case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES: 2511 /* 10G Serdes */ 2512 tp = &phy_template_niu_10g_serdes; 2513 break; 2514 case NIU_FLAGS_XCVR_SERDES: 2515 /* 1G Serdes */ 2516 tp = &phy_template_niu_1g_serdes; 2517 break; 2518 case NIU_FLAGS_10G | NIU_FLAGS_FIBER: 2519 /* 10G Fiber */ 2520 default: 2521 if (np->flags & NIU_FLAGS_HOTPLUG_PHY) { 2522 tp = &phy_template_niu_10g_hotplug; 2523 if (np->port == 0) 2524 phy_addr_off = 8; 2525 if (np->port == 1) 2526 phy_addr_off = 12; 2527 } else { 2528 tp = &phy_template_niu_10g_fiber; 2529 phy_addr_off += np->port; 2530 } 2531 break; 2532 } 2533 } else { 2534 switch (np->flags & 2535 (NIU_FLAGS_10G | 2536 NIU_FLAGS_FIBER | 2537 NIU_FLAGS_XCVR_SERDES)) { 2538 case 0: 2539 /* 1G copper */ 2540 tp = &phy_template_1g_copper; 2541 if (plat_type == PLAT_TYPE_VF_P0) 2542 phy_addr_off = 10; 2543 else if (plat_type == PLAT_TYPE_VF_P1) 2544 phy_addr_off = 26; 2545 2546 phy_addr_off += (np->port ^ 0x3); 2547 break; 2548 2549 case NIU_FLAGS_10G: 2550 /* 10G copper */ 2551 tp = &phy_template_10g_copper; 2552 break; 2553 2554 case NIU_FLAGS_FIBER: 2555 /* 1G fiber */ 2556 tp = &phy_template_1g_fiber; 2557 break; 2558 2559 case NIU_FLAGS_10G | NIU_FLAGS_FIBER: 2560 /* 10G fiber */ 2561 tp = &phy_template_10g_fiber; 2562 if (plat_type == PLAT_TYPE_VF_P0 || 2563 plat_type == PLAT_TYPE_VF_P1) 2564 phy_addr_off = 8; 2565 phy_addr_off += np->port; 2566 if (np->flags & NIU_FLAGS_HOTPLUG_PHY) { 2567 tp = &phy_template_10g_fiber_hotplug; 2568 if (np->port == 0) 2569 phy_addr_off = 8; 2570 if (np->port == 1) 2571 phy_addr_off = 12; 2572 } 2573 break; 2574 2575 case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES: 2576 case NIU_FLAGS_XCVR_SERDES | NIU_FLAGS_FIBER: 2577 case NIU_FLAGS_XCVR_SERDES: 2578 switch(np->port) { 2579 case 0: 2580 case 1: 2581 tp = &phy_template_10g_serdes; 2582 break; 2583 case 2: 2584 case 3: 2585 tp = &phy_template_1g_rgmii; 2586 break; 2587 default: 2588 return -EINVAL; 2589 } 2590 phy_addr_off = niu_atca_port_num[np->port]; 2591 break; 2592 2593 default: 2594 return -EINVAL; 2595 } 2596 } 2597 2598 np->phy_ops = tp->ops; 2599 np->phy_addr = tp->phy_addr_base + phy_addr_off; 2600 2601 return 0; 2602 } 2603 2604 static int niu_init_link(struct niu *np) 2605 { 2606 struct niu_parent *parent = np->parent; 2607 int err, ignore; 2608 2609 if (parent->plat_type == PLAT_TYPE_NIU) { 2610 err = niu_xcvr_init(np); 2611 if (err) 2612 return err; 2613 msleep(200); 2614 } 2615 err = niu_serdes_init(np); 2616 if (err && !(np->flags & NIU_FLAGS_HOTPLUG_PHY)) 2617 return err; 2618 msleep(200); 2619 err = niu_xcvr_init(np); 2620 if (!err || (np->flags & NIU_FLAGS_HOTPLUG_PHY)) 2621 niu_link_status(np, &ignore); 2622 return 0; 2623 } 2624 2625 static void niu_set_primary_mac(struct niu *np, const unsigned char *addr) 2626 { 2627 u16 reg0 = addr[4] << 8 | addr[5]; 2628 u16 reg1 = addr[2] << 8 | addr[3]; 2629 u16 reg2 = addr[0] << 8 | addr[1]; 2630 2631 if (np->flags & NIU_FLAGS_XMAC) { 2632 nw64_mac(XMAC_ADDR0, reg0); 2633 nw64_mac(XMAC_ADDR1, reg1); 2634 nw64_mac(XMAC_ADDR2, reg2); 2635 } else { 2636 nw64_mac(BMAC_ADDR0, reg0); 2637 nw64_mac(BMAC_ADDR1, reg1); 2638 nw64_mac(BMAC_ADDR2, reg2); 2639 } 2640 } 2641 2642 static int niu_num_alt_addr(struct niu *np) 2643 { 2644 if (np->flags & NIU_FLAGS_XMAC) 2645 return XMAC_NUM_ALT_ADDR; 2646 else 2647 return BMAC_NUM_ALT_ADDR; 2648 } 2649 2650 static int niu_set_alt_mac(struct niu *np, int index, unsigned char *addr) 2651 { 2652 u16 reg0 = addr[4] << 8 | addr[5]; 2653 u16 reg1 = addr[2] << 8 | addr[3]; 2654 u16 reg2 = addr[0] << 8 | addr[1]; 2655 2656 if (index >= niu_num_alt_addr(np)) 2657 return -EINVAL; 2658 2659 if (np->flags & NIU_FLAGS_XMAC) { 2660 nw64_mac(XMAC_ALT_ADDR0(index), reg0); 2661 nw64_mac(XMAC_ALT_ADDR1(index), reg1); 2662 nw64_mac(XMAC_ALT_ADDR2(index), reg2); 2663 } else { 2664 nw64_mac(BMAC_ALT_ADDR0(index), reg0); 2665 nw64_mac(BMAC_ALT_ADDR1(index), reg1); 2666 nw64_mac(BMAC_ALT_ADDR2(index), reg2); 2667 } 2668 2669 return 0; 2670 } 2671 2672 static int niu_enable_alt_mac(struct niu *np, int index, int on) 2673 { 2674 unsigned long reg; 2675 u64 val, mask; 2676 2677 if (index >= niu_num_alt_addr(np)) 2678 return -EINVAL; 2679 2680 if (np->flags & NIU_FLAGS_XMAC) { 2681 reg = XMAC_ADDR_CMPEN; 2682 mask = 1 << index; 2683 } else { 2684 reg = BMAC_ADDR_CMPEN; 2685 mask = 1 << (index + 1); 2686 } 2687 2688 val = nr64_mac(reg); 2689 if (on) 2690 val |= mask; 2691 else 2692 val &= ~mask; 2693 nw64_mac(reg, val); 2694 2695 return 0; 2696 } 2697 2698 static void __set_rdc_table_num_hw(struct niu *np, unsigned long reg, 2699 int num, int mac_pref) 2700 { 2701 u64 val = nr64_mac(reg); 2702 val &= ~(HOST_INFO_MACRDCTBLN | HOST_INFO_MPR); 2703 val |= num; 2704 if (mac_pref) 2705 val |= HOST_INFO_MPR; 2706 nw64_mac(reg, val); 2707 } 2708 2709 static int __set_rdc_table_num(struct niu *np, 2710 int xmac_index, int bmac_index, 2711 int rdc_table_num, int mac_pref) 2712 { 2713 unsigned long reg; 2714 2715 if (rdc_table_num & ~HOST_INFO_MACRDCTBLN) 2716 return -EINVAL; 2717 if (np->flags & NIU_FLAGS_XMAC) 2718 reg = XMAC_HOST_INFO(xmac_index); 2719 else 2720 reg = BMAC_HOST_INFO(bmac_index); 2721 __set_rdc_table_num_hw(np, reg, rdc_table_num, mac_pref); 2722 return 0; 2723 } 2724 2725 static int niu_set_primary_mac_rdc_table(struct niu *np, int table_num, 2726 int mac_pref) 2727 { 2728 return __set_rdc_table_num(np, 17, 0, table_num, mac_pref); 2729 } 2730 2731 static int niu_set_multicast_mac_rdc_table(struct niu *np, int table_num, 2732 int mac_pref) 2733 { 2734 return __set_rdc_table_num(np, 16, 8, table_num, mac_pref); 2735 } 2736 2737 static int niu_set_alt_mac_rdc_table(struct niu *np, int idx, 2738 int table_num, int mac_pref) 2739 { 2740 if (idx >= niu_num_alt_addr(np)) 2741 return -EINVAL; 2742 return __set_rdc_table_num(np, idx, idx + 1, table_num, mac_pref); 2743 } 2744 2745 static u64 vlan_entry_set_parity(u64 reg_val) 2746 { 2747 u64 port01_mask; 2748 u64 port23_mask; 2749 2750 port01_mask = 0x00ff; 2751 port23_mask = 0xff00; 2752 2753 if (hweight64(reg_val & port01_mask) & 1) 2754 reg_val |= ENET_VLAN_TBL_PARITY0; 2755 else 2756 reg_val &= ~ENET_VLAN_TBL_PARITY0; 2757 2758 if (hweight64(reg_val & port23_mask) & 1) 2759 reg_val |= ENET_VLAN_TBL_PARITY1; 2760 else 2761 reg_val &= ~ENET_VLAN_TBL_PARITY1; 2762 2763 return reg_val; 2764 } 2765 2766 static void vlan_tbl_write(struct niu *np, unsigned long index, 2767 int port, int vpr, int rdc_table) 2768 { 2769 u64 reg_val = nr64(ENET_VLAN_TBL(index)); 2770 2771 reg_val &= ~((ENET_VLAN_TBL_VPR | 2772 ENET_VLAN_TBL_VLANRDCTBLN) << 2773 ENET_VLAN_TBL_SHIFT(port)); 2774 if (vpr) 2775 reg_val |= (ENET_VLAN_TBL_VPR << 2776 ENET_VLAN_TBL_SHIFT(port)); 2777 reg_val |= (rdc_table << ENET_VLAN_TBL_SHIFT(port)); 2778 2779 reg_val = vlan_entry_set_parity(reg_val); 2780 2781 nw64(ENET_VLAN_TBL(index), reg_val); 2782 } 2783 2784 static void vlan_tbl_clear(struct niu *np) 2785 { 2786 int i; 2787 2788 for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++) 2789 nw64(ENET_VLAN_TBL(i), 0); 2790 } 2791 2792 static int tcam_wait_bit(struct niu *np, u64 bit) 2793 { 2794 int limit = 1000; 2795 2796 while (--limit > 0) { 2797 if (nr64(TCAM_CTL) & bit) 2798 break; 2799 udelay(1); 2800 } 2801 if (limit <= 0) 2802 return -ENODEV; 2803 2804 return 0; 2805 } 2806 2807 static int tcam_flush(struct niu *np, int index) 2808 { 2809 nw64(TCAM_KEY_0, 0x00); 2810 nw64(TCAM_KEY_MASK_0, 0xff); 2811 nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index)); 2812 2813 return tcam_wait_bit(np, TCAM_CTL_STAT); 2814 } 2815 2816 #if 0 2817 static int tcam_read(struct niu *np, int index, 2818 u64 *key, u64 *mask) 2819 { 2820 int err; 2821 2822 nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_READ | index)); 2823 err = tcam_wait_bit(np, TCAM_CTL_STAT); 2824 if (!err) { 2825 key[0] = nr64(TCAM_KEY_0); 2826 key[1] = nr64(TCAM_KEY_1); 2827 key[2] = nr64(TCAM_KEY_2); 2828 key[3] = nr64(TCAM_KEY_3); 2829 mask[0] = nr64(TCAM_KEY_MASK_0); 2830 mask[1] = nr64(TCAM_KEY_MASK_1); 2831 mask[2] = nr64(TCAM_KEY_MASK_2); 2832 mask[3] = nr64(TCAM_KEY_MASK_3); 2833 } 2834 return err; 2835 } 2836 #endif 2837 2838 static int tcam_write(struct niu *np, int index, 2839 u64 *key, u64 *mask) 2840 { 2841 nw64(TCAM_KEY_0, key[0]); 2842 nw64(TCAM_KEY_1, key[1]); 2843 nw64(TCAM_KEY_2, key[2]); 2844 nw64(TCAM_KEY_3, key[3]); 2845 nw64(TCAM_KEY_MASK_0, mask[0]); 2846 nw64(TCAM_KEY_MASK_1, mask[1]); 2847 nw64(TCAM_KEY_MASK_2, mask[2]); 2848 nw64(TCAM_KEY_MASK_3, mask[3]); 2849 nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index)); 2850 2851 return tcam_wait_bit(np, TCAM_CTL_STAT); 2852 } 2853 2854 #if 0 2855 static int tcam_assoc_read(struct niu *np, int index, u64 *data) 2856 { 2857 int err; 2858 2859 nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_READ | index)); 2860 err = tcam_wait_bit(np, TCAM_CTL_STAT); 2861 if (!err) 2862 *data = nr64(TCAM_KEY_1); 2863 2864 return err; 2865 } 2866 #endif 2867 2868 static int tcam_assoc_write(struct niu *np, int index, u64 assoc_data) 2869 { 2870 nw64(TCAM_KEY_1, assoc_data); 2871 nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_WRITE | index)); 2872 2873 return tcam_wait_bit(np, TCAM_CTL_STAT); 2874 } 2875 2876 static void tcam_enable(struct niu *np, int on) 2877 { 2878 u64 val = nr64(FFLP_CFG_1); 2879 2880 if (on) 2881 val &= ~FFLP_CFG_1_TCAM_DIS; 2882 else 2883 val |= FFLP_CFG_1_TCAM_DIS; 2884 nw64(FFLP_CFG_1, val); 2885 } 2886 2887 static void tcam_set_lat_and_ratio(struct niu *np, u64 latency, u64 ratio) 2888 { 2889 u64 val = nr64(FFLP_CFG_1); 2890 2891 val &= ~(FFLP_CFG_1_FFLPINITDONE | 2892 FFLP_CFG_1_CAMLAT | 2893 FFLP_CFG_1_CAMRATIO); 2894 val |= (latency << FFLP_CFG_1_CAMLAT_SHIFT); 2895 val |= (ratio << FFLP_CFG_1_CAMRATIO_SHIFT); 2896 nw64(FFLP_CFG_1, val); 2897 2898 val = nr64(FFLP_CFG_1); 2899 val |= FFLP_CFG_1_FFLPINITDONE; 2900 nw64(FFLP_CFG_1, val); 2901 } 2902 2903 static int tcam_user_eth_class_enable(struct niu *np, unsigned long class, 2904 int on) 2905 { 2906 unsigned long reg; 2907 u64 val; 2908 2909 if (class < CLASS_CODE_ETHERTYPE1 || 2910 class > CLASS_CODE_ETHERTYPE2) 2911 return -EINVAL; 2912 2913 reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1); 2914 val = nr64(reg); 2915 if (on) 2916 val |= L2_CLS_VLD; 2917 else 2918 val &= ~L2_CLS_VLD; 2919 nw64(reg, val); 2920 2921 return 0; 2922 } 2923 2924 #if 0 2925 static int tcam_user_eth_class_set(struct niu *np, unsigned long class, 2926 u64 ether_type) 2927 { 2928 unsigned long reg; 2929 u64 val; 2930 2931 if (class < CLASS_CODE_ETHERTYPE1 || 2932 class > CLASS_CODE_ETHERTYPE2 || 2933 (ether_type & ~(u64)0xffff) != 0) 2934 return -EINVAL; 2935 2936 reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1); 2937 val = nr64(reg); 2938 val &= ~L2_CLS_ETYPE; 2939 val |= (ether_type << L2_CLS_ETYPE_SHIFT); 2940 nw64(reg, val); 2941 2942 return 0; 2943 } 2944 #endif 2945 2946 static int tcam_user_ip_class_enable(struct niu *np, unsigned long class, 2947 int on) 2948 { 2949 unsigned long reg; 2950 u64 val; 2951 2952 if (class < CLASS_CODE_USER_PROG1 || 2953 class > CLASS_CODE_USER_PROG4) 2954 return -EINVAL; 2955 2956 reg = L3_CLS(class - CLASS_CODE_USER_PROG1); 2957 val = nr64(reg); 2958 if (on) 2959 val |= L3_CLS_VALID; 2960 else 2961 val &= ~L3_CLS_VALID; 2962 nw64(reg, val); 2963 2964 return 0; 2965 } 2966 2967 static int tcam_user_ip_class_set(struct niu *np, unsigned long class, 2968 int ipv6, u64 protocol_id, 2969 u64 tos_mask, u64 tos_val) 2970 { 2971 unsigned long reg; 2972 u64 val; 2973 2974 if (class < CLASS_CODE_USER_PROG1 || 2975 class > CLASS_CODE_USER_PROG4 || 2976 (protocol_id & ~(u64)0xff) != 0 || 2977 (tos_mask & ~(u64)0xff) != 0 || 2978 (tos_val & ~(u64)0xff) != 0) 2979 return -EINVAL; 2980 2981 reg = L3_CLS(class - CLASS_CODE_USER_PROG1); 2982 val = nr64(reg); 2983 val &= ~(L3_CLS_IPVER | L3_CLS_PID | 2984 L3_CLS_TOSMASK | L3_CLS_TOS); 2985 if (ipv6) 2986 val |= L3_CLS_IPVER; 2987 val |= (protocol_id << L3_CLS_PID_SHIFT); 2988 val |= (tos_mask << L3_CLS_TOSMASK_SHIFT); 2989 val |= (tos_val << L3_CLS_TOS_SHIFT); 2990 nw64(reg, val); 2991 2992 return 0; 2993 } 2994 2995 static int tcam_early_init(struct niu *np) 2996 { 2997 unsigned long i; 2998 int err; 2999 3000 tcam_enable(np, 0); 3001 tcam_set_lat_and_ratio(np, 3002 DEFAULT_TCAM_LATENCY, 3003 DEFAULT_TCAM_ACCESS_RATIO); 3004 for (i = CLASS_CODE_ETHERTYPE1; i <= CLASS_CODE_ETHERTYPE2; i++) { 3005 err = tcam_user_eth_class_enable(np, i, 0); 3006 if (err) 3007 return err; 3008 } 3009 for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_USER_PROG4; i++) { 3010 err = tcam_user_ip_class_enable(np, i, 0); 3011 if (err) 3012 return err; 3013 } 3014 3015 return 0; 3016 } 3017 3018 static int tcam_flush_all(struct niu *np) 3019 { 3020 unsigned long i; 3021 3022 for (i = 0; i < np->parent->tcam_num_entries; i++) { 3023 int err = tcam_flush(np, i); 3024 if (err) 3025 return err; 3026 } 3027 return 0; 3028 } 3029 3030 static u64 hash_addr_regval(unsigned long index, unsigned long num_entries) 3031 { 3032 return (u64)index | (num_entries == 1 ? HASH_TBL_ADDR_AUTOINC : 0); 3033 } 3034 3035 #if 0 3036 static int hash_read(struct niu *np, unsigned long partition, 3037 unsigned long index, unsigned long num_entries, 3038 u64 *data) 3039 { 3040 u64 val = hash_addr_regval(index, num_entries); 3041 unsigned long i; 3042 3043 if (partition >= FCRAM_NUM_PARTITIONS || 3044 index + num_entries > FCRAM_SIZE) 3045 return -EINVAL; 3046 3047 nw64(HASH_TBL_ADDR(partition), val); 3048 for (i = 0; i < num_entries; i++) 3049 data[i] = nr64(HASH_TBL_DATA(partition)); 3050 3051 return 0; 3052 } 3053 #endif 3054 3055 static int hash_write(struct niu *np, unsigned long partition, 3056 unsigned long index, unsigned long num_entries, 3057 u64 *data) 3058 { 3059 u64 val = hash_addr_regval(index, num_entries); 3060 unsigned long i; 3061 3062 if (partition >= FCRAM_NUM_PARTITIONS || 3063 index + (num_entries * 8) > FCRAM_SIZE) 3064 return -EINVAL; 3065 3066 nw64(HASH_TBL_ADDR(partition), val); 3067 for (i = 0; i < num_entries; i++) 3068 nw64(HASH_TBL_DATA(partition), data[i]); 3069 3070 return 0; 3071 } 3072 3073 static void fflp_reset(struct niu *np) 3074 { 3075 u64 val; 3076 3077 nw64(FFLP_CFG_1, FFLP_CFG_1_PIO_FIO_RST); 3078 udelay(10); 3079 nw64(FFLP_CFG_1, 0); 3080 3081 val = FFLP_CFG_1_FCRAMOUTDR_NORMAL | FFLP_CFG_1_FFLPINITDONE; 3082 nw64(FFLP_CFG_1, val); 3083 } 3084 3085 static void fflp_set_timings(struct niu *np) 3086 { 3087 u64 val = nr64(FFLP_CFG_1); 3088 3089 val &= ~FFLP_CFG_1_FFLPINITDONE; 3090 val |= (DEFAULT_FCRAMRATIO << FFLP_CFG_1_FCRAMRATIO_SHIFT); 3091 nw64(FFLP_CFG_1, val); 3092 3093 val = nr64(FFLP_CFG_1); 3094 val |= FFLP_CFG_1_FFLPINITDONE; 3095 nw64(FFLP_CFG_1, val); 3096 3097 val = nr64(FCRAM_REF_TMR); 3098 val &= ~(FCRAM_REF_TMR_MAX | FCRAM_REF_TMR_MIN); 3099 val |= (DEFAULT_FCRAM_REFRESH_MAX << FCRAM_REF_TMR_MAX_SHIFT); 3100 val |= (DEFAULT_FCRAM_REFRESH_MIN << FCRAM_REF_TMR_MIN_SHIFT); 3101 nw64(FCRAM_REF_TMR, val); 3102 } 3103 3104 static int fflp_set_partition(struct niu *np, u64 partition, 3105 u64 mask, u64 base, int enable) 3106 { 3107 unsigned long reg; 3108 u64 val; 3109 3110 if (partition >= FCRAM_NUM_PARTITIONS || 3111 (mask & ~(u64)0x1f) != 0 || 3112 (base & ~(u64)0x1f) != 0) 3113 return -EINVAL; 3114 3115 reg = FLW_PRT_SEL(partition); 3116 3117 val = nr64(reg); 3118 val &= ~(FLW_PRT_SEL_EXT | FLW_PRT_SEL_MASK | FLW_PRT_SEL_BASE); 3119 val |= (mask << FLW_PRT_SEL_MASK_SHIFT); 3120 val |= (base << FLW_PRT_SEL_BASE_SHIFT); 3121 if (enable) 3122 val |= FLW_PRT_SEL_EXT; 3123 nw64(reg, val); 3124 3125 return 0; 3126 } 3127 3128 static int fflp_disable_all_partitions(struct niu *np) 3129 { 3130 unsigned long i; 3131 3132 for (i = 0; i < FCRAM_NUM_PARTITIONS; i++) { 3133 int err = fflp_set_partition(np, 0, 0, 0, 0); 3134 if (err) 3135 return err; 3136 } 3137 return 0; 3138 } 3139 3140 static void fflp_llcsnap_enable(struct niu *np, int on) 3141 { 3142 u64 val = nr64(FFLP_CFG_1); 3143 3144 if (on) 3145 val |= FFLP_CFG_1_LLCSNAP; 3146 else 3147 val &= ~FFLP_CFG_1_LLCSNAP; 3148 nw64(FFLP_CFG_1, val); 3149 } 3150 3151 static void fflp_errors_enable(struct niu *np, int on) 3152 { 3153 u64 val = nr64(FFLP_CFG_1); 3154 3155 if (on) 3156 val &= ~FFLP_CFG_1_ERRORDIS; 3157 else 3158 val |= FFLP_CFG_1_ERRORDIS; 3159 nw64(FFLP_CFG_1, val); 3160 } 3161 3162 static int fflp_hash_clear(struct niu *np) 3163 { 3164 struct fcram_hash_ipv4 ent; 3165 unsigned long i; 3166 3167 /* IPV4 hash entry with valid bit clear, rest is don't care. */ 3168 memset(&ent, 0, sizeof(ent)); 3169 ent.header = HASH_HEADER_EXT; 3170 3171 for (i = 0; i < FCRAM_SIZE; i += sizeof(ent)) { 3172 int err = hash_write(np, 0, i, 1, (u64 *) &ent); 3173 if (err) 3174 return err; 3175 } 3176 return 0; 3177 } 3178 3179 static int fflp_early_init(struct niu *np) 3180 { 3181 struct niu_parent *parent; 3182 unsigned long flags; 3183 int err; 3184 3185 niu_lock_parent(np, flags); 3186 3187 parent = np->parent; 3188 err = 0; 3189 if (!(parent->flags & PARENT_FLGS_CLS_HWINIT)) { 3190 if (np->parent->plat_type != PLAT_TYPE_NIU) { 3191 fflp_reset(np); 3192 fflp_set_timings(np); 3193 err = fflp_disable_all_partitions(np); 3194 if (err) { 3195 netif_printk(np, probe, KERN_DEBUG, np->dev, 3196 "fflp_disable_all_partitions failed, err=%d\n", 3197 err); 3198 goto out; 3199 } 3200 } 3201 3202 err = tcam_early_init(np); 3203 if (err) { 3204 netif_printk(np, probe, KERN_DEBUG, np->dev, 3205 "tcam_early_init failed, err=%d\n", err); 3206 goto out; 3207 } 3208 fflp_llcsnap_enable(np, 1); 3209 fflp_errors_enable(np, 0); 3210 nw64(H1POLY, 0); 3211 nw64(H2POLY, 0); 3212 3213 err = tcam_flush_all(np); 3214 if (err) { 3215 netif_printk(np, probe, KERN_DEBUG, np->dev, 3216 "tcam_flush_all failed, err=%d\n", err); 3217 goto out; 3218 } 3219 if (np->parent->plat_type != PLAT_TYPE_NIU) { 3220 err = fflp_hash_clear(np); 3221 if (err) { 3222 netif_printk(np, probe, KERN_DEBUG, np->dev, 3223 "fflp_hash_clear failed, err=%d\n", 3224 err); 3225 goto out; 3226 } 3227 } 3228 3229 vlan_tbl_clear(np); 3230 3231 parent->flags |= PARENT_FLGS_CLS_HWINIT; 3232 } 3233 out: 3234 niu_unlock_parent(np, flags); 3235 return err; 3236 } 3237 3238 static int niu_set_flow_key(struct niu *np, unsigned long class_code, u64 key) 3239 { 3240 if (class_code < CLASS_CODE_USER_PROG1 || 3241 class_code > CLASS_CODE_SCTP_IPV6) 3242 return -EINVAL; 3243 3244 nw64(FLOW_KEY(class_code - CLASS_CODE_USER_PROG1), key); 3245 return 0; 3246 } 3247 3248 static int niu_set_tcam_key(struct niu *np, unsigned long class_code, u64 key) 3249 { 3250 if (class_code < CLASS_CODE_USER_PROG1 || 3251 class_code > CLASS_CODE_SCTP_IPV6) 3252 return -EINVAL; 3253 3254 nw64(TCAM_KEY(class_code - CLASS_CODE_USER_PROG1), key); 3255 return 0; 3256 } 3257 3258 /* Entries for the ports are interleaved in the TCAM */ 3259 static u16 tcam_get_index(struct niu *np, u16 idx) 3260 { 3261 /* One entry reserved for IP fragment rule */ 3262 if (idx >= (np->clas.tcam_sz - 1)) 3263 idx = 0; 3264 return np->clas.tcam_top + ((idx+1) * np->parent->num_ports); 3265 } 3266 3267 static u16 tcam_get_size(struct niu *np) 3268 { 3269 /* One entry reserved for IP fragment rule */ 3270 return np->clas.tcam_sz - 1; 3271 } 3272 3273 static u16 tcam_get_valid_entry_cnt(struct niu *np) 3274 { 3275 /* One entry reserved for IP fragment rule */ 3276 return np->clas.tcam_valid_entries - 1; 3277 } 3278 3279 static void niu_rx_skb_append(struct sk_buff *skb, struct page *page, 3280 u32 offset, u32 size, u32 truesize) 3281 { 3282 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page, offset, size); 3283 3284 skb->len += size; 3285 skb->data_len += size; 3286 skb->truesize += truesize; 3287 } 3288 3289 static unsigned int niu_hash_rxaddr(struct rx_ring_info *rp, u64 a) 3290 { 3291 a >>= PAGE_SHIFT; 3292 a ^= (a >> ilog2(MAX_RBR_RING_SIZE)); 3293 3294 return a & (MAX_RBR_RING_SIZE - 1); 3295 } 3296 3297 static struct page *niu_find_rxpage(struct rx_ring_info *rp, u64 addr, 3298 struct page ***link) 3299 { 3300 unsigned int h = niu_hash_rxaddr(rp, addr); 3301 struct page *p, **pp; 3302 3303 addr &= PAGE_MASK; 3304 pp = &rp->rxhash[h]; 3305 for (; (p = *pp) != NULL; pp = &niu_next_page(p)) { 3306 if (p->private == addr) { 3307 *link = pp; 3308 goto found; 3309 } 3310 } 3311 BUG(); 3312 3313 found: 3314 return p; 3315 } 3316 3317 static void niu_hash_page(struct rx_ring_info *rp, struct page *page, u64 base) 3318 { 3319 unsigned int h = niu_hash_rxaddr(rp, base); 3320 3321 page->private = base; 3322 niu_next_page(page) = rp->rxhash[h]; 3323 rp->rxhash[h] = page; 3324 } 3325 3326 static int niu_rbr_add_page(struct niu *np, struct rx_ring_info *rp, 3327 gfp_t mask, int start_index) 3328 { 3329 struct page *page; 3330 u64 addr; 3331 int i; 3332 3333 page = alloc_page(mask); 3334 if (!page) 3335 return -ENOMEM; 3336 3337 addr = np->ops->map_page(np->device, page, 0, 3338 PAGE_SIZE, DMA_FROM_DEVICE); 3339 if (!addr) { 3340 __free_page(page); 3341 return -ENOMEM; 3342 } 3343 3344 niu_hash_page(rp, page, addr); 3345 if (rp->rbr_blocks_per_page > 1) 3346 page_ref_add(page, rp->rbr_blocks_per_page - 1); 3347 3348 for (i = 0; i < rp->rbr_blocks_per_page; i++) { 3349 __le32 *rbr = &rp->rbr[start_index + i]; 3350 3351 *rbr = cpu_to_le32(addr >> RBR_DESCR_ADDR_SHIFT); 3352 addr += rp->rbr_block_size; 3353 } 3354 3355 return 0; 3356 } 3357 3358 static void niu_rbr_refill(struct niu *np, struct rx_ring_info *rp, gfp_t mask) 3359 { 3360 int index = rp->rbr_index; 3361 3362 rp->rbr_pending++; 3363 if ((rp->rbr_pending % rp->rbr_blocks_per_page) == 0) { 3364 int err = niu_rbr_add_page(np, rp, mask, index); 3365 3366 if (unlikely(err)) { 3367 rp->rbr_pending--; 3368 return; 3369 } 3370 3371 rp->rbr_index += rp->rbr_blocks_per_page; 3372 BUG_ON(rp->rbr_index > rp->rbr_table_size); 3373 if (rp->rbr_index == rp->rbr_table_size) 3374 rp->rbr_index = 0; 3375 3376 if (rp->rbr_pending >= rp->rbr_kick_thresh) { 3377 nw64(RBR_KICK(rp->rx_channel), rp->rbr_pending); 3378 rp->rbr_pending = 0; 3379 } 3380 } 3381 } 3382 3383 static int niu_rx_pkt_ignore(struct niu *np, struct rx_ring_info *rp) 3384 { 3385 unsigned int index = rp->rcr_index; 3386 int num_rcr = 0; 3387 3388 rp->rx_dropped++; 3389 while (1) { 3390 struct page *page, **link; 3391 u64 addr, val; 3392 u32 rcr_size; 3393 3394 num_rcr++; 3395 3396 val = le64_to_cpup(&rp->rcr[index]); 3397 addr = (val & RCR_ENTRY_PKT_BUF_ADDR) << 3398 RCR_ENTRY_PKT_BUF_ADDR_SHIFT; 3399 page = niu_find_rxpage(rp, addr, &link); 3400 3401 rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >> 3402 RCR_ENTRY_PKTBUFSZ_SHIFT]; 3403 if ((page->private + PAGE_SIZE) - rcr_size == addr) { 3404 *link = niu_next_page(page); 3405 np->ops->unmap_page(np->device, page->private, 3406 PAGE_SIZE, DMA_FROM_DEVICE); 3407 page->private = 0; 3408 niu_next_page(page) = NULL; 3409 __free_page(page); 3410 rp->rbr_refill_pending++; 3411 } 3412 3413 index = NEXT_RCR(rp, index); 3414 if (!(val & RCR_ENTRY_MULTI)) 3415 break; 3416 3417 } 3418 rp->rcr_index = index; 3419 3420 return num_rcr; 3421 } 3422 3423 static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np, 3424 struct rx_ring_info *rp) 3425 { 3426 unsigned int index = rp->rcr_index; 3427 struct rx_pkt_hdr1 *rh; 3428 struct sk_buff *skb; 3429 int len, num_rcr; 3430 3431 skb = netdev_alloc_skb(np->dev, RX_SKB_ALLOC_SIZE); 3432 if (unlikely(!skb)) 3433 return niu_rx_pkt_ignore(np, rp); 3434 3435 num_rcr = 0; 3436 while (1) { 3437 struct page *page, **link; 3438 u32 rcr_size, append_size; 3439 u64 addr, val, off; 3440 3441 num_rcr++; 3442 3443 val = le64_to_cpup(&rp->rcr[index]); 3444 3445 len = (val & RCR_ENTRY_L2_LEN) >> 3446 RCR_ENTRY_L2_LEN_SHIFT; 3447 append_size = len + ETH_HLEN + ETH_FCS_LEN; 3448 3449 addr = (val & RCR_ENTRY_PKT_BUF_ADDR) << 3450 RCR_ENTRY_PKT_BUF_ADDR_SHIFT; 3451 page = niu_find_rxpage(rp, addr, &link); 3452 3453 rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >> 3454 RCR_ENTRY_PKTBUFSZ_SHIFT]; 3455 3456 off = addr & ~PAGE_MASK; 3457 if (num_rcr == 1) { 3458 int ptype; 3459 3460 ptype = (val >> RCR_ENTRY_PKT_TYPE_SHIFT); 3461 if ((ptype == RCR_PKT_TYPE_TCP || 3462 ptype == RCR_PKT_TYPE_UDP) && 3463 !(val & (RCR_ENTRY_NOPORT | 3464 RCR_ENTRY_ERROR))) 3465 skb->ip_summed = CHECKSUM_UNNECESSARY; 3466 else 3467 skb_checksum_none_assert(skb); 3468 } else if (!(val & RCR_ENTRY_MULTI)) 3469 append_size = append_size - skb->len; 3470 3471 niu_rx_skb_append(skb, page, off, append_size, rcr_size); 3472 if ((page->private + rp->rbr_block_size) - rcr_size == addr) { 3473 *link = niu_next_page(page); 3474 np->ops->unmap_page(np->device, page->private, 3475 PAGE_SIZE, DMA_FROM_DEVICE); 3476 page->private = 0; 3477 niu_next_page(page) = NULL; 3478 rp->rbr_refill_pending++; 3479 } else 3480 get_page(page); 3481 3482 index = NEXT_RCR(rp, index); 3483 if (!(val & RCR_ENTRY_MULTI)) 3484 break; 3485 3486 } 3487 rp->rcr_index = index; 3488 3489 len += sizeof(*rh); 3490 len = min_t(int, len, sizeof(*rh) + VLAN_ETH_HLEN); 3491 __pskb_pull_tail(skb, len); 3492 3493 rh = (struct rx_pkt_hdr1 *) skb->data; 3494 if (np->dev->features & NETIF_F_RXHASH) 3495 skb_set_hash(skb, 3496 ((u32)rh->hashval2_0 << 24 | 3497 (u32)rh->hashval2_1 << 16 | 3498 (u32)rh->hashval1_1 << 8 | 3499 (u32)rh->hashval1_2 << 0), 3500 PKT_HASH_TYPE_L3); 3501 skb_pull(skb, sizeof(*rh)); 3502 3503 rp->rx_packets++; 3504 rp->rx_bytes += skb->len; 3505 3506 skb->protocol = eth_type_trans(skb, np->dev); 3507 skb_record_rx_queue(skb, rp->rx_channel); 3508 napi_gro_receive(napi, skb); 3509 3510 return num_rcr; 3511 } 3512 3513 static int niu_rbr_fill(struct niu *np, struct rx_ring_info *rp, gfp_t mask) 3514 { 3515 int blocks_per_page = rp->rbr_blocks_per_page; 3516 int err, index = rp->rbr_index; 3517 3518 err = 0; 3519 while (index < (rp->rbr_table_size - blocks_per_page)) { 3520 err = niu_rbr_add_page(np, rp, mask, index); 3521 if (unlikely(err)) 3522 break; 3523 3524 index += blocks_per_page; 3525 } 3526 3527 rp->rbr_index = index; 3528 return err; 3529 } 3530 3531 static void niu_rbr_free(struct niu *np, struct rx_ring_info *rp) 3532 { 3533 int i; 3534 3535 for (i = 0; i < MAX_RBR_RING_SIZE; i++) { 3536 struct page *page; 3537 3538 page = rp->rxhash[i]; 3539 while (page) { 3540 struct page *next = niu_next_page(page); 3541 u64 base = page->private; 3542 3543 np->ops->unmap_page(np->device, base, PAGE_SIZE, 3544 DMA_FROM_DEVICE); 3545 page->private = 0; 3546 niu_next_page(page) = NULL; 3547 3548 __free_page(page); 3549 3550 page = next; 3551 } 3552 } 3553 3554 for (i = 0; i < rp->rbr_table_size; i++) 3555 rp->rbr[i] = cpu_to_le32(0); 3556 rp->rbr_index = 0; 3557 } 3558 3559 static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx) 3560 { 3561 struct tx_buff_info *tb = &rp->tx_buffs[idx]; 3562 struct sk_buff *skb = tb->skb; 3563 struct tx_pkt_hdr *tp; 3564 u64 tx_flags; 3565 int i, len; 3566 3567 tp = (struct tx_pkt_hdr *) skb->data; 3568 tx_flags = le64_to_cpup(&tp->flags); 3569 3570 rp->tx_packets++; 3571 rp->tx_bytes += (((tx_flags & TXHDR_LEN) >> TXHDR_LEN_SHIFT) - 3572 ((tx_flags & TXHDR_PAD) / 2)); 3573 3574 len = skb_headlen(skb); 3575 np->ops->unmap_single(np->device, tb->mapping, 3576 len, DMA_TO_DEVICE); 3577 3578 if (le64_to_cpu(rp->descr[idx]) & TX_DESC_MARK) 3579 rp->mark_pending--; 3580 3581 tb->skb = NULL; 3582 do { 3583 idx = NEXT_TX(rp, idx); 3584 len -= MAX_TX_DESC_LEN; 3585 } while (len > 0); 3586 3587 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 3588 tb = &rp->tx_buffs[idx]; 3589 BUG_ON(tb->skb != NULL); 3590 np->ops->unmap_page(np->device, tb->mapping, 3591 skb_frag_size(&skb_shinfo(skb)->frags[i]), 3592 DMA_TO_DEVICE); 3593 idx = NEXT_TX(rp, idx); 3594 } 3595 3596 dev_kfree_skb(skb); 3597 3598 return idx; 3599 } 3600 3601 #define NIU_TX_WAKEUP_THRESH(rp) ((rp)->pending / 4) 3602 3603 static void niu_tx_work(struct niu *np, struct tx_ring_info *rp) 3604 { 3605 struct netdev_queue *txq; 3606 u16 pkt_cnt, tmp; 3607 int cons, index; 3608 u64 cs; 3609 3610 index = (rp - np->tx_rings); 3611 txq = netdev_get_tx_queue(np->dev, index); 3612 3613 cs = rp->tx_cs; 3614 if (unlikely(!(cs & (TX_CS_MK | TX_CS_MMK)))) 3615 goto out; 3616 3617 tmp = pkt_cnt = (cs & TX_CS_PKT_CNT) >> TX_CS_PKT_CNT_SHIFT; 3618 pkt_cnt = (pkt_cnt - rp->last_pkt_cnt) & 3619 (TX_CS_PKT_CNT >> TX_CS_PKT_CNT_SHIFT); 3620 3621 rp->last_pkt_cnt = tmp; 3622 3623 cons = rp->cons; 3624 3625 netif_printk(np, tx_done, KERN_DEBUG, np->dev, 3626 "%s() pkt_cnt[%u] cons[%d]\n", __func__, pkt_cnt, cons); 3627 3628 while (pkt_cnt--) 3629 cons = release_tx_packet(np, rp, cons); 3630 3631 rp->cons = cons; 3632 smp_mb(); 3633 3634 out: 3635 if (unlikely(netif_tx_queue_stopped(txq) && 3636 (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) { 3637 __netif_tx_lock(txq, smp_processor_id()); 3638 if (netif_tx_queue_stopped(txq) && 3639 (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp))) 3640 netif_tx_wake_queue(txq); 3641 __netif_tx_unlock(txq); 3642 } 3643 } 3644 3645 static inline void niu_sync_rx_discard_stats(struct niu *np, 3646 struct rx_ring_info *rp, 3647 const int limit) 3648 { 3649 /* This elaborate scheme is needed for reading the RX discard 3650 * counters, as they are only 16-bit and can overflow quickly, 3651 * and because the overflow indication bit is not usable as 3652 * the counter value does not wrap, but remains at max value 3653 * 0xFFFF. 3654 * 3655 * In theory and in practice counters can be lost in between 3656 * reading nr64() and clearing the counter nw64(). For this 3657 * reason, the number of counter clearings nw64() is 3658 * limited/reduced though the limit parameter. 3659 */ 3660 int rx_channel = rp->rx_channel; 3661 u32 misc, wred; 3662 3663 /* RXMISC (Receive Miscellaneous Discard Count), covers the 3664 * following discard events: IPP (Input Port Process), 3665 * FFLP/TCAM, Full RCR (Receive Completion Ring) RBR (Receive 3666 * Block Ring) prefetch buffer is empty. 3667 */ 3668 misc = nr64(RXMISC(rx_channel)); 3669 if (unlikely((misc & RXMISC_COUNT) > limit)) { 3670 nw64(RXMISC(rx_channel), 0); 3671 rp->rx_errors += misc & RXMISC_COUNT; 3672 3673 if (unlikely(misc & RXMISC_OFLOW)) 3674 dev_err(np->device, "rx-%d: Counter overflow RXMISC discard\n", 3675 rx_channel); 3676 3677 netif_printk(np, rx_err, KERN_DEBUG, np->dev, 3678 "rx-%d: MISC drop=%u over=%u\n", 3679 rx_channel, misc, misc-limit); 3680 } 3681 3682 /* WRED (Weighted Random Early Discard) by hardware */ 3683 wred = nr64(RED_DIS_CNT(rx_channel)); 3684 if (unlikely((wred & RED_DIS_CNT_COUNT) > limit)) { 3685 nw64(RED_DIS_CNT(rx_channel), 0); 3686 rp->rx_dropped += wred & RED_DIS_CNT_COUNT; 3687 3688 if (unlikely(wred & RED_DIS_CNT_OFLOW)) 3689 dev_err(np->device, "rx-%d: Counter overflow WRED discard\n", rx_channel); 3690 3691 netif_printk(np, rx_err, KERN_DEBUG, np->dev, 3692 "rx-%d: WRED drop=%u over=%u\n", 3693 rx_channel, wred, wred-limit); 3694 } 3695 } 3696 3697 static int niu_rx_work(struct napi_struct *napi, struct niu *np, 3698 struct rx_ring_info *rp, int budget) 3699 { 3700 int qlen, rcr_done = 0, work_done = 0; 3701 struct rxdma_mailbox *mbox = rp->mbox; 3702 u64 stat; 3703 3704 #if 1 3705 stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel)); 3706 qlen = nr64(RCRSTAT_A(rp->rx_channel)) & RCRSTAT_A_QLEN; 3707 #else 3708 stat = le64_to_cpup(&mbox->rx_dma_ctl_stat); 3709 qlen = (le64_to_cpup(&mbox->rcrstat_a) & RCRSTAT_A_QLEN); 3710 #endif 3711 mbox->rx_dma_ctl_stat = 0; 3712 mbox->rcrstat_a = 0; 3713 3714 netif_printk(np, rx_status, KERN_DEBUG, np->dev, 3715 "%s(chan[%d]), stat[%llx] qlen=%d\n", 3716 __func__, rp->rx_channel, (unsigned long long)stat, qlen); 3717 3718 rcr_done = work_done = 0; 3719 qlen = min(qlen, budget); 3720 while (work_done < qlen) { 3721 rcr_done += niu_process_rx_pkt(napi, np, rp); 3722 work_done++; 3723 } 3724 3725 if (rp->rbr_refill_pending >= rp->rbr_kick_thresh) { 3726 unsigned int i; 3727 3728 for (i = 0; i < rp->rbr_refill_pending; i++) 3729 niu_rbr_refill(np, rp, GFP_ATOMIC); 3730 rp->rbr_refill_pending = 0; 3731 } 3732 3733 stat = (RX_DMA_CTL_STAT_MEX | 3734 ((u64)work_done << RX_DMA_CTL_STAT_PKTREAD_SHIFT) | 3735 ((u64)rcr_done << RX_DMA_CTL_STAT_PTRREAD_SHIFT)); 3736 3737 nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat); 3738 3739 /* Only sync discards stats when qlen indicate potential for drops */ 3740 if (qlen > 10) 3741 niu_sync_rx_discard_stats(np, rp, 0x7FFF); 3742 3743 return work_done; 3744 } 3745 3746 static int niu_poll_core(struct niu *np, struct niu_ldg *lp, int budget) 3747 { 3748 u64 v0 = lp->v0; 3749 u32 tx_vec = (v0 >> 32); 3750 u32 rx_vec = (v0 & 0xffffffff); 3751 int i, work_done = 0; 3752 3753 netif_printk(np, intr, KERN_DEBUG, np->dev, 3754 "%s() v0[%016llx]\n", __func__, (unsigned long long)v0); 3755 3756 for (i = 0; i < np->num_tx_rings; i++) { 3757 struct tx_ring_info *rp = &np->tx_rings[i]; 3758 if (tx_vec & (1 << rp->tx_channel)) 3759 niu_tx_work(np, rp); 3760 nw64(LD_IM0(LDN_TXDMA(rp->tx_channel)), 0); 3761 } 3762 3763 for (i = 0; i < np->num_rx_rings; i++) { 3764 struct rx_ring_info *rp = &np->rx_rings[i]; 3765 3766 if (rx_vec & (1 << rp->rx_channel)) { 3767 int this_work_done; 3768 3769 this_work_done = niu_rx_work(&lp->napi, np, rp, 3770 budget); 3771 3772 budget -= this_work_done; 3773 work_done += this_work_done; 3774 } 3775 nw64(LD_IM0(LDN_RXDMA(rp->rx_channel)), 0); 3776 } 3777 3778 return work_done; 3779 } 3780 3781 static int niu_poll(struct napi_struct *napi, int budget) 3782 { 3783 struct niu_ldg *lp = container_of(napi, struct niu_ldg, napi); 3784 struct niu *np = lp->np; 3785 int work_done; 3786 3787 work_done = niu_poll_core(np, lp, budget); 3788 3789 if (work_done < budget) { 3790 napi_complete_done(napi, work_done); 3791 niu_ldg_rearm(np, lp, 1); 3792 } 3793 return work_done; 3794 } 3795 3796 static void niu_log_rxchan_errors(struct niu *np, struct rx_ring_info *rp, 3797 u64 stat) 3798 { 3799 netdev_err(np->dev, "RX channel %u errors ( ", rp->rx_channel); 3800 3801 if (stat & RX_DMA_CTL_STAT_RBR_TMOUT) 3802 pr_cont("RBR_TMOUT "); 3803 if (stat & RX_DMA_CTL_STAT_RSP_CNT_ERR) 3804 pr_cont("RSP_CNT "); 3805 if (stat & RX_DMA_CTL_STAT_BYTE_EN_BUS) 3806 pr_cont("BYTE_EN_BUS "); 3807 if (stat & RX_DMA_CTL_STAT_RSP_DAT_ERR) 3808 pr_cont("RSP_DAT "); 3809 if (stat & RX_DMA_CTL_STAT_RCR_ACK_ERR) 3810 pr_cont("RCR_ACK "); 3811 if (stat & RX_DMA_CTL_STAT_RCR_SHA_PAR) 3812 pr_cont("RCR_SHA_PAR "); 3813 if (stat & RX_DMA_CTL_STAT_RBR_PRE_PAR) 3814 pr_cont("RBR_PRE_PAR "); 3815 if (stat & RX_DMA_CTL_STAT_CONFIG_ERR) 3816 pr_cont("CONFIG "); 3817 if (stat & RX_DMA_CTL_STAT_RCRINCON) 3818 pr_cont("RCRINCON "); 3819 if (stat & RX_DMA_CTL_STAT_RCRFULL) 3820 pr_cont("RCRFULL "); 3821 if (stat & RX_DMA_CTL_STAT_RBRFULL) 3822 pr_cont("RBRFULL "); 3823 if (stat & RX_DMA_CTL_STAT_RBRLOGPAGE) 3824 pr_cont("RBRLOGPAGE "); 3825 if (stat & RX_DMA_CTL_STAT_CFIGLOGPAGE) 3826 pr_cont("CFIGLOGPAGE "); 3827 if (stat & RX_DMA_CTL_STAT_DC_FIFO_ERR) 3828 pr_cont("DC_FIDO "); 3829 3830 pr_cont(")\n"); 3831 } 3832 3833 static int niu_rx_error(struct niu *np, struct rx_ring_info *rp) 3834 { 3835 u64 stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel)); 3836 int err = 0; 3837 3838 3839 if (stat & (RX_DMA_CTL_STAT_CHAN_FATAL | 3840 RX_DMA_CTL_STAT_PORT_FATAL)) 3841 err = -EINVAL; 3842 3843 if (err) { 3844 netdev_err(np->dev, "RX channel %u error, stat[%llx]\n", 3845 rp->rx_channel, 3846 (unsigned long long) stat); 3847 3848 niu_log_rxchan_errors(np, rp, stat); 3849 } 3850 3851 nw64(RX_DMA_CTL_STAT(rp->rx_channel), 3852 stat & RX_DMA_CTL_WRITE_CLEAR_ERRS); 3853 3854 return err; 3855 } 3856 3857 static void niu_log_txchan_errors(struct niu *np, struct tx_ring_info *rp, 3858 u64 cs) 3859 { 3860 netdev_err(np->dev, "TX channel %u errors ( ", rp->tx_channel); 3861 3862 if (cs & TX_CS_MBOX_ERR) 3863 pr_cont("MBOX "); 3864 if (cs & TX_CS_PKT_SIZE_ERR) 3865 pr_cont("PKT_SIZE "); 3866 if (cs & TX_CS_TX_RING_OFLOW) 3867 pr_cont("TX_RING_OFLOW "); 3868 if (cs & TX_CS_PREF_BUF_PAR_ERR) 3869 pr_cont("PREF_BUF_PAR "); 3870 if (cs & TX_CS_NACK_PREF) 3871 pr_cont("NACK_PREF "); 3872 if (cs & TX_CS_NACK_PKT_RD) 3873 pr_cont("NACK_PKT_RD "); 3874 if (cs & TX_CS_CONF_PART_ERR) 3875 pr_cont("CONF_PART "); 3876 if (cs & TX_CS_PKT_PRT_ERR) 3877 pr_cont("PKT_PTR "); 3878 3879 pr_cont(")\n"); 3880 } 3881 3882 static int niu_tx_error(struct niu *np, struct tx_ring_info *rp) 3883 { 3884 u64 cs, logh, logl; 3885 3886 cs = nr64(TX_CS(rp->tx_channel)); 3887 logh = nr64(TX_RNG_ERR_LOGH(rp->tx_channel)); 3888 logl = nr64(TX_RNG_ERR_LOGL(rp->tx_channel)); 3889 3890 netdev_err(np->dev, "TX channel %u error, cs[%llx] logh[%llx] logl[%llx]\n", 3891 rp->tx_channel, 3892 (unsigned long long)cs, 3893 (unsigned long long)logh, 3894 (unsigned long long)logl); 3895 3896 niu_log_txchan_errors(np, rp, cs); 3897 3898 return -ENODEV; 3899 } 3900 3901 static int niu_mif_interrupt(struct niu *np) 3902 { 3903 u64 mif_status = nr64(MIF_STATUS); 3904 int phy_mdint = 0; 3905 3906 if (np->flags & NIU_FLAGS_XMAC) { 3907 u64 xrxmac_stat = nr64_mac(XRXMAC_STATUS); 3908 3909 if (xrxmac_stat & XRXMAC_STATUS_PHY_MDINT) 3910 phy_mdint = 1; 3911 } 3912 3913 netdev_err(np->dev, "MIF interrupt, stat[%llx] phy_mdint(%d)\n", 3914 (unsigned long long)mif_status, phy_mdint); 3915 3916 return -ENODEV; 3917 } 3918 3919 static void niu_xmac_interrupt(struct niu *np) 3920 { 3921 struct niu_xmac_stats *mp = &np->mac_stats.xmac; 3922 u64 val; 3923 3924 val = nr64_mac(XTXMAC_STATUS); 3925 if (val & XTXMAC_STATUS_FRAME_CNT_EXP) 3926 mp->tx_frames += TXMAC_FRM_CNT_COUNT; 3927 if (val & XTXMAC_STATUS_BYTE_CNT_EXP) 3928 mp->tx_bytes += TXMAC_BYTE_CNT_COUNT; 3929 if (val & XTXMAC_STATUS_TXFIFO_XFR_ERR) 3930 mp->tx_fifo_errors++; 3931 if (val & XTXMAC_STATUS_TXMAC_OFLOW) 3932 mp->tx_overflow_errors++; 3933 if (val & XTXMAC_STATUS_MAX_PSIZE_ERR) 3934 mp->tx_max_pkt_size_errors++; 3935 if (val & XTXMAC_STATUS_TXMAC_UFLOW) 3936 mp->tx_underflow_errors++; 3937 3938 val = nr64_mac(XRXMAC_STATUS); 3939 if (val & XRXMAC_STATUS_LCL_FLT_STATUS) 3940 mp->rx_local_faults++; 3941 if (val & XRXMAC_STATUS_RFLT_DET) 3942 mp->rx_remote_faults++; 3943 if (val & XRXMAC_STATUS_LFLT_CNT_EXP) 3944 mp->rx_link_faults += LINK_FAULT_CNT_COUNT; 3945 if (val & XRXMAC_STATUS_ALIGNERR_CNT_EXP) 3946 mp->rx_align_errors += RXMAC_ALIGN_ERR_CNT_COUNT; 3947 if (val & XRXMAC_STATUS_RXFRAG_CNT_EXP) 3948 mp->rx_frags += RXMAC_FRAG_CNT_COUNT; 3949 if (val & XRXMAC_STATUS_RXMULTF_CNT_EXP) 3950 mp->rx_mcasts += RXMAC_MC_FRM_CNT_COUNT; 3951 if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP) 3952 mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT; 3953 if (val & XRXMAC_STATUS_RXHIST1_CNT_EXP) 3954 mp->rx_hist_cnt1 += RXMAC_HIST_CNT1_COUNT; 3955 if (val & XRXMAC_STATUS_RXHIST2_CNT_EXP) 3956 mp->rx_hist_cnt2 += RXMAC_HIST_CNT2_COUNT; 3957 if (val & XRXMAC_STATUS_RXHIST3_CNT_EXP) 3958 mp->rx_hist_cnt3 += RXMAC_HIST_CNT3_COUNT; 3959 if (val & XRXMAC_STATUS_RXHIST4_CNT_EXP) 3960 mp->rx_hist_cnt4 += RXMAC_HIST_CNT4_COUNT; 3961 if (val & XRXMAC_STATUS_RXHIST5_CNT_EXP) 3962 mp->rx_hist_cnt5 += RXMAC_HIST_CNT5_COUNT; 3963 if (val & XRXMAC_STATUS_RXHIST6_CNT_EXP) 3964 mp->rx_hist_cnt6 += RXMAC_HIST_CNT6_COUNT; 3965 if (val & XRXMAC_STATUS_RXHIST7_CNT_EXP) 3966 mp->rx_hist_cnt7 += RXMAC_HIST_CNT7_COUNT; 3967 if (val & XRXMAC_STATUS_RXOCTET_CNT_EXP) 3968 mp->rx_octets += RXMAC_BT_CNT_COUNT; 3969 if (val & XRXMAC_STATUS_CVIOLERR_CNT_EXP) 3970 mp->rx_code_violations += RXMAC_CD_VIO_CNT_COUNT; 3971 if (val & XRXMAC_STATUS_LENERR_CNT_EXP) 3972 mp->rx_len_errors += RXMAC_MPSZER_CNT_COUNT; 3973 if (val & XRXMAC_STATUS_CRCERR_CNT_EXP) 3974 mp->rx_crc_errors += RXMAC_CRC_ER_CNT_COUNT; 3975 if (val & XRXMAC_STATUS_RXUFLOW) 3976 mp->rx_underflows++; 3977 if (val & XRXMAC_STATUS_RXOFLOW) 3978 mp->rx_overflows++; 3979 3980 val = nr64_mac(XMAC_FC_STAT); 3981 if (val & XMAC_FC_STAT_TX_MAC_NPAUSE) 3982 mp->pause_off_state++; 3983 if (val & XMAC_FC_STAT_TX_MAC_PAUSE) 3984 mp->pause_on_state++; 3985 if (val & XMAC_FC_STAT_RX_MAC_RPAUSE) 3986 mp->pause_received++; 3987 } 3988 3989 static void niu_bmac_interrupt(struct niu *np) 3990 { 3991 struct niu_bmac_stats *mp = &np->mac_stats.bmac; 3992 u64 val; 3993 3994 val = nr64_mac(BTXMAC_STATUS); 3995 if (val & BTXMAC_STATUS_UNDERRUN) 3996 mp->tx_underflow_errors++; 3997 if (val & BTXMAC_STATUS_MAX_PKT_ERR) 3998 mp->tx_max_pkt_size_errors++; 3999 if (val & BTXMAC_STATUS_BYTE_CNT_EXP) 4000 mp->tx_bytes += BTXMAC_BYTE_CNT_COUNT; 4001 if (val & BTXMAC_STATUS_FRAME_CNT_EXP) 4002 mp->tx_frames += BTXMAC_FRM_CNT_COUNT; 4003 4004 val = nr64_mac(BRXMAC_STATUS); 4005 if (val & BRXMAC_STATUS_OVERFLOW) 4006 mp->rx_overflows++; 4007 if (val & BRXMAC_STATUS_FRAME_CNT_EXP) 4008 mp->rx_frames += BRXMAC_FRAME_CNT_COUNT; 4009 if (val & BRXMAC_STATUS_ALIGN_ERR_EXP) 4010 mp->rx_align_errors += BRXMAC_ALIGN_ERR_CNT_COUNT; 4011 if (val & BRXMAC_STATUS_CRC_ERR_EXP) 4012 mp->rx_crc_errors += BRXMAC_ALIGN_ERR_CNT_COUNT; 4013 if (val & BRXMAC_STATUS_LEN_ERR_EXP) 4014 mp->rx_len_errors += BRXMAC_CODE_VIOL_ERR_CNT_COUNT; 4015 4016 val = nr64_mac(BMAC_CTRL_STATUS); 4017 if (val & BMAC_CTRL_STATUS_NOPAUSE) 4018 mp->pause_off_state++; 4019 if (val & BMAC_CTRL_STATUS_PAUSE) 4020 mp->pause_on_state++; 4021 if (val & BMAC_CTRL_STATUS_PAUSE_RECV) 4022 mp->pause_received++; 4023 } 4024 4025 static int niu_mac_interrupt(struct niu *np) 4026 { 4027 if (np->flags & NIU_FLAGS_XMAC) 4028 niu_xmac_interrupt(np); 4029 else 4030 niu_bmac_interrupt(np); 4031 4032 return 0; 4033 } 4034 4035 static void niu_log_device_error(struct niu *np, u64 stat) 4036 { 4037 netdev_err(np->dev, "Core device errors ( "); 4038 4039 if (stat & SYS_ERR_MASK_META2) 4040 pr_cont("META2 "); 4041 if (stat & SYS_ERR_MASK_META1) 4042 pr_cont("META1 "); 4043 if (stat & SYS_ERR_MASK_PEU) 4044 pr_cont("PEU "); 4045 if (stat & SYS_ERR_MASK_TXC) 4046 pr_cont("TXC "); 4047 if (stat & SYS_ERR_MASK_RDMC) 4048 pr_cont("RDMC "); 4049 if (stat & SYS_ERR_MASK_TDMC) 4050 pr_cont("TDMC "); 4051 if (stat & SYS_ERR_MASK_ZCP) 4052 pr_cont("ZCP "); 4053 if (stat & SYS_ERR_MASK_FFLP) 4054 pr_cont("FFLP "); 4055 if (stat & SYS_ERR_MASK_IPP) 4056 pr_cont("IPP "); 4057 if (stat & SYS_ERR_MASK_MAC) 4058 pr_cont("MAC "); 4059 if (stat & SYS_ERR_MASK_SMX) 4060 pr_cont("SMX "); 4061 4062 pr_cont(")\n"); 4063 } 4064 4065 static int niu_device_error(struct niu *np) 4066 { 4067 u64 stat = nr64(SYS_ERR_STAT); 4068 4069 netdev_err(np->dev, "Core device error, stat[%llx]\n", 4070 (unsigned long long)stat); 4071 4072 niu_log_device_error(np, stat); 4073 4074 return -ENODEV; 4075 } 4076 4077 static int niu_slowpath_interrupt(struct niu *np, struct niu_ldg *lp, 4078 u64 v0, u64 v1, u64 v2) 4079 { 4080 4081 int i, err = 0; 4082 4083 lp->v0 = v0; 4084 lp->v1 = v1; 4085 lp->v2 = v2; 4086 4087 if (v1 & 0x00000000ffffffffULL) { 4088 u32 rx_vec = (v1 & 0xffffffff); 4089 4090 for (i = 0; i < np->num_rx_rings; i++) { 4091 struct rx_ring_info *rp = &np->rx_rings[i]; 4092 4093 if (rx_vec & (1 << rp->rx_channel)) { 4094 int r = niu_rx_error(np, rp); 4095 if (r) { 4096 err = r; 4097 } else { 4098 if (!v0) 4099 nw64(RX_DMA_CTL_STAT(rp->rx_channel), 4100 RX_DMA_CTL_STAT_MEX); 4101 } 4102 } 4103 } 4104 } 4105 if (v1 & 0x7fffffff00000000ULL) { 4106 u32 tx_vec = (v1 >> 32) & 0x7fffffff; 4107 4108 for (i = 0; i < np->num_tx_rings; i++) { 4109 struct tx_ring_info *rp = &np->tx_rings[i]; 4110 4111 if (tx_vec & (1 << rp->tx_channel)) { 4112 int r = niu_tx_error(np, rp); 4113 if (r) 4114 err = r; 4115 } 4116 } 4117 } 4118 if ((v0 | v1) & 0x8000000000000000ULL) { 4119 int r = niu_mif_interrupt(np); 4120 if (r) 4121 err = r; 4122 } 4123 if (v2) { 4124 if (v2 & 0x01ef) { 4125 int r = niu_mac_interrupt(np); 4126 if (r) 4127 err = r; 4128 } 4129 if (v2 & 0x0210) { 4130 int r = niu_device_error(np); 4131 if (r) 4132 err = r; 4133 } 4134 } 4135 4136 if (err) 4137 niu_enable_interrupts(np, 0); 4138 4139 return err; 4140 } 4141 4142 static void niu_rxchan_intr(struct niu *np, struct rx_ring_info *rp, 4143 int ldn) 4144 { 4145 struct rxdma_mailbox *mbox = rp->mbox; 4146 u64 stat_write, stat = le64_to_cpup(&mbox->rx_dma_ctl_stat); 4147 4148 stat_write = (RX_DMA_CTL_STAT_RCRTHRES | 4149 RX_DMA_CTL_STAT_RCRTO); 4150 nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat_write); 4151 4152 netif_printk(np, intr, KERN_DEBUG, np->dev, 4153 "%s() stat[%llx]\n", __func__, (unsigned long long)stat); 4154 } 4155 4156 static void niu_txchan_intr(struct niu *np, struct tx_ring_info *rp, 4157 int ldn) 4158 { 4159 rp->tx_cs = nr64(TX_CS(rp->tx_channel)); 4160 4161 netif_printk(np, intr, KERN_DEBUG, np->dev, 4162 "%s() cs[%llx]\n", __func__, (unsigned long long)rp->tx_cs); 4163 } 4164 4165 static void __niu_fastpath_interrupt(struct niu *np, int ldg, u64 v0) 4166 { 4167 struct niu_parent *parent = np->parent; 4168 u32 rx_vec, tx_vec; 4169 int i; 4170 4171 tx_vec = (v0 >> 32); 4172 rx_vec = (v0 & 0xffffffff); 4173 4174 for (i = 0; i < np->num_rx_rings; i++) { 4175 struct rx_ring_info *rp = &np->rx_rings[i]; 4176 int ldn = LDN_RXDMA(rp->rx_channel); 4177 4178 if (parent->ldg_map[ldn] != ldg) 4179 continue; 4180 4181 nw64(LD_IM0(ldn), LD_IM0_MASK); 4182 if (rx_vec & (1 << rp->rx_channel)) 4183 niu_rxchan_intr(np, rp, ldn); 4184 } 4185 4186 for (i = 0; i < np->num_tx_rings; i++) { 4187 struct tx_ring_info *rp = &np->tx_rings[i]; 4188 int ldn = LDN_TXDMA(rp->tx_channel); 4189 4190 if (parent->ldg_map[ldn] != ldg) 4191 continue; 4192 4193 nw64(LD_IM0(ldn), LD_IM0_MASK); 4194 if (tx_vec & (1 << rp->tx_channel)) 4195 niu_txchan_intr(np, rp, ldn); 4196 } 4197 } 4198 4199 static void niu_schedule_napi(struct niu *np, struct niu_ldg *lp, 4200 u64 v0, u64 v1, u64 v2) 4201 { 4202 if (likely(napi_schedule_prep(&lp->napi))) { 4203 lp->v0 = v0; 4204 lp->v1 = v1; 4205 lp->v2 = v2; 4206 __niu_fastpath_interrupt(np, lp->ldg_num, v0); 4207 __napi_schedule(&lp->napi); 4208 } 4209 } 4210 4211 static irqreturn_t niu_interrupt(int irq, void *dev_id) 4212 { 4213 struct niu_ldg *lp = dev_id; 4214 struct niu *np = lp->np; 4215 int ldg = lp->ldg_num; 4216 unsigned long flags; 4217 u64 v0, v1, v2; 4218 4219 if (netif_msg_intr(np)) 4220 printk(KERN_DEBUG KBUILD_MODNAME ": " "%s() ldg[%p](%d)", 4221 __func__, lp, ldg); 4222 4223 spin_lock_irqsave(&np->lock, flags); 4224 4225 v0 = nr64(LDSV0(ldg)); 4226 v1 = nr64(LDSV1(ldg)); 4227 v2 = nr64(LDSV2(ldg)); 4228 4229 if (netif_msg_intr(np)) 4230 pr_cont(" v0[%llx] v1[%llx] v2[%llx]\n", 4231 (unsigned long long) v0, 4232 (unsigned long long) v1, 4233 (unsigned long long) v2); 4234 4235 if (unlikely(!v0 && !v1 && !v2)) { 4236 spin_unlock_irqrestore(&np->lock, flags); 4237 return IRQ_NONE; 4238 } 4239 4240 if (unlikely((v0 & ((u64)1 << LDN_MIF)) || v1 || v2)) { 4241 int err = niu_slowpath_interrupt(np, lp, v0, v1, v2); 4242 if (err) 4243 goto out; 4244 } 4245 if (likely(v0 & ~((u64)1 << LDN_MIF))) 4246 niu_schedule_napi(np, lp, v0, v1, v2); 4247 else 4248 niu_ldg_rearm(np, lp, 1); 4249 out: 4250 spin_unlock_irqrestore(&np->lock, flags); 4251 4252 return IRQ_HANDLED; 4253 } 4254 4255 static void niu_free_rx_ring_info(struct niu *np, struct rx_ring_info *rp) 4256 { 4257 if (rp->mbox) { 4258 np->ops->free_coherent(np->device, 4259 sizeof(struct rxdma_mailbox), 4260 rp->mbox, rp->mbox_dma); 4261 rp->mbox = NULL; 4262 } 4263 if (rp->rcr) { 4264 np->ops->free_coherent(np->device, 4265 MAX_RCR_RING_SIZE * sizeof(__le64), 4266 rp->rcr, rp->rcr_dma); 4267 rp->rcr = NULL; 4268 rp->rcr_table_size = 0; 4269 rp->rcr_index = 0; 4270 } 4271 if (rp->rbr) { 4272 niu_rbr_free(np, rp); 4273 4274 np->ops->free_coherent(np->device, 4275 MAX_RBR_RING_SIZE * sizeof(__le32), 4276 rp->rbr, rp->rbr_dma); 4277 rp->rbr = NULL; 4278 rp->rbr_table_size = 0; 4279 rp->rbr_index = 0; 4280 } 4281 kfree(rp->rxhash); 4282 rp->rxhash = NULL; 4283 } 4284 4285 static void niu_free_tx_ring_info(struct niu *np, struct tx_ring_info *rp) 4286 { 4287 if (rp->mbox) { 4288 np->ops->free_coherent(np->device, 4289 sizeof(struct txdma_mailbox), 4290 rp->mbox, rp->mbox_dma); 4291 rp->mbox = NULL; 4292 } 4293 if (rp->descr) { 4294 int i; 4295 4296 for (i = 0; i < MAX_TX_RING_SIZE; i++) { 4297 if (rp->tx_buffs[i].skb) 4298 (void) release_tx_packet(np, rp, i); 4299 } 4300 4301 np->ops->free_coherent(np->device, 4302 MAX_TX_RING_SIZE * sizeof(__le64), 4303 rp->descr, rp->descr_dma); 4304 rp->descr = NULL; 4305 rp->pending = 0; 4306 rp->prod = 0; 4307 rp->cons = 0; 4308 rp->wrap_bit = 0; 4309 } 4310 } 4311 4312 static void niu_free_channels(struct niu *np) 4313 { 4314 int i; 4315 4316 if (np->rx_rings) { 4317 for (i = 0; i < np->num_rx_rings; i++) { 4318 struct rx_ring_info *rp = &np->rx_rings[i]; 4319 4320 niu_free_rx_ring_info(np, rp); 4321 } 4322 kfree(np->rx_rings); 4323 np->rx_rings = NULL; 4324 np->num_rx_rings = 0; 4325 } 4326 4327 if (np->tx_rings) { 4328 for (i = 0; i < np->num_tx_rings; i++) { 4329 struct tx_ring_info *rp = &np->tx_rings[i]; 4330 4331 niu_free_tx_ring_info(np, rp); 4332 } 4333 kfree(np->tx_rings); 4334 np->tx_rings = NULL; 4335 np->num_tx_rings = 0; 4336 } 4337 } 4338 4339 static int niu_alloc_rx_ring_info(struct niu *np, 4340 struct rx_ring_info *rp) 4341 { 4342 BUILD_BUG_ON(sizeof(struct rxdma_mailbox) != 64); 4343 4344 rp->rxhash = kcalloc(MAX_RBR_RING_SIZE, sizeof(struct page *), 4345 GFP_KERNEL); 4346 if (!rp->rxhash) 4347 return -ENOMEM; 4348 4349 rp->mbox = np->ops->alloc_coherent(np->device, 4350 sizeof(struct rxdma_mailbox), 4351 &rp->mbox_dma, GFP_KERNEL); 4352 if (!rp->mbox) 4353 return -ENOMEM; 4354 if ((unsigned long)rp->mbox & (64UL - 1)) { 4355 netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA mailbox %p\n", 4356 rp->mbox); 4357 return -EINVAL; 4358 } 4359 4360 rp->rcr = np->ops->alloc_coherent(np->device, 4361 MAX_RCR_RING_SIZE * sizeof(__le64), 4362 &rp->rcr_dma, GFP_KERNEL); 4363 if (!rp->rcr) 4364 return -ENOMEM; 4365 if ((unsigned long)rp->rcr & (64UL - 1)) { 4366 netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA RCR table %p\n", 4367 rp->rcr); 4368 return -EINVAL; 4369 } 4370 rp->rcr_table_size = MAX_RCR_RING_SIZE; 4371 rp->rcr_index = 0; 4372 4373 rp->rbr = np->ops->alloc_coherent(np->device, 4374 MAX_RBR_RING_SIZE * sizeof(__le32), 4375 &rp->rbr_dma, GFP_KERNEL); 4376 if (!rp->rbr) 4377 return -ENOMEM; 4378 if ((unsigned long)rp->rbr & (64UL - 1)) { 4379 netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA RBR table %p\n", 4380 rp->rbr); 4381 return -EINVAL; 4382 } 4383 rp->rbr_table_size = MAX_RBR_RING_SIZE; 4384 rp->rbr_index = 0; 4385 rp->rbr_pending = 0; 4386 4387 return 0; 4388 } 4389 4390 static void niu_set_max_burst(struct niu *np, struct tx_ring_info *rp) 4391 { 4392 int mtu = np->dev->mtu; 4393 4394 /* These values are recommended by the HW designers for fair 4395 * utilization of DRR amongst the rings. 4396 */ 4397 rp->max_burst = mtu + 32; 4398 if (rp->max_burst > 4096) 4399 rp->max_burst = 4096; 4400 } 4401 4402 static int niu_alloc_tx_ring_info(struct niu *np, 4403 struct tx_ring_info *rp) 4404 { 4405 BUILD_BUG_ON(sizeof(struct txdma_mailbox) != 64); 4406 4407 rp->mbox = np->ops->alloc_coherent(np->device, 4408 sizeof(struct txdma_mailbox), 4409 &rp->mbox_dma, GFP_KERNEL); 4410 if (!rp->mbox) 4411 return -ENOMEM; 4412 if ((unsigned long)rp->mbox & (64UL - 1)) { 4413 netdev_err(np->dev, "Coherent alloc gives misaligned TXDMA mailbox %p\n", 4414 rp->mbox); 4415 return -EINVAL; 4416 } 4417 4418 rp->descr = np->ops->alloc_coherent(np->device, 4419 MAX_TX_RING_SIZE * sizeof(__le64), 4420 &rp->descr_dma, GFP_KERNEL); 4421 if (!rp->descr) 4422 return -ENOMEM; 4423 if ((unsigned long)rp->descr & (64UL - 1)) { 4424 netdev_err(np->dev, "Coherent alloc gives misaligned TXDMA descr table %p\n", 4425 rp->descr); 4426 return -EINVAL; 4427 } 4428 4429 rp->pending = MAX_TX_RING_SIZE; 4430 rp->prod = 0; 4431 rp->cons = 0; 4432 rp->wrap_bit = 0; 4433 4434 /* XXX make these configurable... XXX */ 4435 rp->mark_freq = rp->pending / 4; 4436 4437 niu_set_max_burst(np, rp); 4438 4439 return 0; 4440 } 4441 4442 static void niu_size_rbr(struct niu *np, struct rx_ring_info *rp) 4443 { 4444 u16 bss; 4445 4446 bss = min(PAGE_SHIFT, 15); 4447 4448 rp->rbr_block_size = 1 << bss; 4449 rp->rbr_blocks_per_page = 1 << (PAGE_SHIFT-bss); 4450 4451 rp->rbr_sizes[0] = 256; 4452 rp->rbr_sizes[1] = 1024; 4453 if (np->dev->mtu > ETH_DATA_LEN) { 4454 switch (PAGE_SIZE) { 4455 case 4 * 1024: 4456 rp->rbr_sizes[2] = 4096; 4457 break; 4458 4459 default: 4460 rp->rbr_sizes[2] = 8192; 4461 break; 4462 } 4463 } else { 4464 rp->rbr_sizes[2] = 2048; 4465 } 4466 rp->rbr_sizes[3] = rp->rbr_block_size; 4467 } 4468 4469 static int niu_alloc_channels(struct niu *np) 4470 { 4471 struct niu_parent *parent = np->parent; 4472 int first_rx_channel, first_tx_channel; 4473 int num_rx_rings, num_tx_rings; 4474 struct rx_ring_info *rx_rings; 4475 struct tx_ring_info *tx_rings; 4476 int i, port, err; 4477 4478 port = np->port; 4479 first_rx_channel = first_tx_channel = 0; 4480 for (i = 0; i < port; i++) { 4481 first_rx_channel += parent->rxchan_per_port[i]; 4482 first_tx_channel += parent->txchan_per_port[i]; 4483 } 4484 4485 num_rx_rings = parent->rxchan_per_port[port]; 4486 num_tx_rings = parent->txchan_per_port[port]; 4487 4488 rx_rings = kcalloc(num_rx_rings, sizeof(struct rx_ring_info), 4489 GFP_KERNEL); 4490 err = -ENOMEM; 4491 if (!rx_rings) 4492 goto out_err; 4493 4494 np->num_rx_rings = num_rx_rings; 4495 smp_wmb(); 4496 np->rx_rings = rx_rings; 4497 4498 netif_set_real_num_rx_queues(np->dev, num_rx_rings); 4499 4500 for (i = 0; i < np->num_rx_rings; i++) { 4501 struct rx_ring_info *rp = &np->rx_rings[i]; 4502 4503 rp->np = np; 4504 rp->rx_channel = first_rx_channel + i; 4505 4506 err = niu_alloc_rx_ring_info(np, rp); 4507 if (err) 4508 goto out_err; 4509 4510 niu_size_rbr(np, rp); 4511 4512 /* XXX better defaults, configurable, etc... XXX */ 4513 rp->nonsyn_window = 64; 4514 rp->nonsyn_threshold = rp->rcr_table_size - 64; 4515 rp->syn_window = 64; 4516 rp->syn_threshold = rp->rcr_table_size - 64; 4517 rp->rcr_pkt_threshold = 16; 4518 rp->rcr_timeout = 8; 4519 rp->rbr_kick_thresh = RBR_REFILL_MIN; 4520 if (rp->rbr_kick_thresh < rp->rbr_blocks_per_page) 4521 rp->rbr_kick_thresh = rp->rbr_blocks_per_page; 4522 4523 err = niu_rbr_fill(np, rp, GFP_KERNEL); 4524 if (err) 4525 goto out_err; 4526 } 4527 4528 tx_rings = kcalloc(num_tx_rings, sizeof(struct tx_ring_info), 4529 GFP_KERNEL); 4530 err = -ENOMEM; 4531 if (!tx_rings) 4532 goto out_err; 4533 4534 np->num_tx_rings = num_tx_rings; 4535 smp_wmb(); 4536 np->tx_rings = tx_rings; 4537 4538 netif_set_real_num_tx_queues(np->dev, num_tx_rings); 4539 4540 for (i = 0; i < np->num_tx_rings; i++) { 4541 struct tx_ring_info *rp = &np->tx_rings[i]; 4542 4543 rp->np = np; 4544 rp->tx_channel = first_tx_channel + i; 4545 4546 err = niu_alloc_tx_ring_info(np, rp); 4547 if (err) 4548 goto out_err; 4549 } 4550 4551 return 0; 4552 4553 out_err: 4554 niu_free_channels(np); 4555 return err; 4556 } 4557 4558 static int niu_tx_cs_sng_poll(struct niu *np, int channel) 4559 { 4560 int limit = 1000; 4561 4562 while (--limit > 0) { 4563 u64 val = nr64(TX_CS(channel)); 4564 if (val & TX_CS_SNG_STATE) 4565 return 0; 4566 } 4567 return -ENODEV; 4568 } 4569 4570 static int niu_tx_channel_stop(struct niu *np, int channel) 4571 { 4572 u64 val = nr64(TX_CS(channel)); 4573 4574 val |= TX_CS_STOP_N_GO; 4575 nw64(TX_CS(channel), val); 4576 4577 return niu_tx_cs_sng_poll(np, channel); 4578 } 4579 4580 static int niu_tx_cs_reset_poll(struct niu *np, int channel) 4581 { 4582 int limit = 1000; 4583 4584 while (--limit > 0) { 4585 u64 val = nr64(TX_CS(channel)); 4586 if (!(val & TX_CS_RST)) 4587 return 0; 4588 } 4589 return -ENODEV; 4590 } 4591 4592 static int niu_tx_channel_reset(struct niu *np, int channel) 4593 { 4594 u64 val = nr64(TX_CS(channel)); 4595 int err; 4596 4597 val |= TX_CS_RST; 4598 nw64(TX_CS(channel), val); 4599 4600 err = niu_tx_cs_reset_poll(np, channel); 4601 if (!err) 4602 nw64(TX_RING_KICK(channel), 0); 4603 4604 return err; 4605 } 4606 4607 static int niu_tx_channel_lpage_init(struct niu *np, int channel) 4608 { 4609 u64 val; 4610 4611 nw64(TX_LOG_MASK1(channel), 0); 4612 nw64(TX_LOG_VAL1(channel), 0); 4613 nw64(TX_LOG_MASK2(channel), 0); 4614 nw64(TX_LOG_VAL2(channel), 0); 4615 nw64(TX_LOG_PAGE_RELO1(channel), 0); 4616 nw64(TX_LOG_PAGE_RELO2(channel), 0); 4617 nw64(TX_LOG_PAGE_HDL(channel), 0); 4618 4619 val = (u64)np->port << TX_LOG_PAGE_VLD_FUNC_SHIFT; 4620 val |= (TX_LOG_PAGE_VLD_PAGE0 | TX_LOG_PAGE_VLD_PAGE1); 4621 nw64(TX_LOG_PAGE_VLD(channel), val); 4622 4623 /* XXX TXDMA 32bit mode? XXX */ 4624 4625 return 0; 4626 } 4627 4628 static void niu_txc_enable_port(struct niu *np, int on) 4629 { 4630 unsigned long flags; 4631 u64 val, mask; 4632 4633 niu_lock_parent(np, flags); 4634 val = nr64(TXC_CONTROL); 4635 mask = (u64)1 << np->port; 4636 if (on) { 4637 val |= TXC_CONTROL_ENABLE | mask; 4638 } else { 4639 val &= ~mask; 4640 if ((val & ~TXC_CONTROL_ENABLE) == 0) 4641 val &= ~TXC_CONTROL_ENABLE; 4642 } 4643 nw64(TXC_CONTROL, val); 4644 niu_unlock_parent(np, flags); 4645 } 4646 4647 static void niu_txc_set_imask(struct niu *np, u64 imask) 4648 { 4649 unsigned long flags; 4650 u64 val; 4651 4652 niu_lock_parent(np, flags); 4653 val = nr64(TXC_INT_MASK); 4654 val &= ~TXC_INT_MASK_VAL(np->port); 4655 val |= (imask << TXC_INT_MASK_VAL_SHIFT(np->port)); 4656 niu_unlock_parent(np, flags); 4657 } 4658 4659 static void niu_txc_port_dma_enable(struct niu *np, int on) 4660 { 4661 u64 val = 0; 4662 4663 if (on) { 4664 int i; 4665 4666 for (i = 0; i < np->num_tx_rings; i++) 4667 val |= (1 << np->tx_rings[i].tx_channel); 4668 } 4669 nw64(TXC_PORT_DMA(np->port), val); 4670 } 4671 4672 static int niu_init_one_tx_channel(struct niu *np, struct tx_ring_info *rp) 4673 { 4674 int err, channel = rp->tx_channel; 4675 u64 val, ring_len; 4676 4677 err = niu_tx_channel_stop(np, channel); 4678 if (err) 4679 return err; 4680 4681 err = niu_tx_channel_reset(np, channel); 4682 if (err) 4683 return err; 4684 4685 err = niu_tx_channel_lpage_init(np, channel); 4686 if (err) 4687 return err; 4688 4689 nw64(TXC_DMA_MAX(channel), rp->max_burst); 4690 nw64(TX_ENT_MSK(channel), 0); 4691 4692 if (rp->descr_dma & ~(TX_RNG_CFIG_STADDR_BASE | 4693 TX_RNG_CFIG_STADDR)) { 4694 netdev_err(np->dev, "TX ring channel %d DMA addr (%llx) is not aligned\n", 4695 channel, (unsigned long long)rp->descr_dma); 4696 return -EINVAL; 4697 } 4698 4699 /* The length field in TX_RNG_CFIG is measured in 64-byte 4700 * blocks. rp->pending is the number of TX descriptors in 4701 * our ring, 8 bytes each, thus we divide by 8 bytes more 4702 * to get the proper value the chip wants. 4703 */ 4704 ring_len = (rp->pending / 8); 4705 4706 val = ((ring_len << TX_RNG_CFIG_LEN_SHIFT) | 4707 rp->descr_dma); 4708 nw64(TX_RNG_CFIG(channel), val); 4709 4710 if (((rp->mbox_dma >> 32) & ~TXDMA_MBH_MBADDR) || 4711 ((u32)rp->mbox_dma & ~TXDMA_MBL_MBADDR)) { 4712 netdev_err(np->dev, "TX ring channel %d MBOX addr (%llx) has invalid bits\n", 4713 channel, (unsigned long long)rp->mbox_dma); 4714 return -EINVAL; 4715 } 4716 nw64(TXDMA_MBH(channel), rp->mbox_dma >> 32); 4717 nw64(TXDMA_MBL(channel), rp->mbox_dma & TXDMA_MBL_MBADDR); 4718 4719 nw64(TX_CS(channel), 0); 4720 4721 rp->last_pkt_cnt = 0; 4722 4723 return 0; 4724 } 4725 4726 static void niu_init_rdc_groups(struct niu *np) 4727 { 4728 struct niu_rdc_tables *tp = &np->parent->rdc_group_cfg[np->port]; 4729 int i, first_table_num = tp->first_table_num; 4730 4731 for (i = 0; i < tp->num_tables; i++) { 4732 struct rdc_table *tbl = &tp->tables[i]; 4733 int this_table = first_table_num + i; 4734 int slot; 4735 4736 for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++) 4737 nw64(RDC_TBL(this_table, slot), 4738 tbl->rxdma_channel[slot]); 4739 } 4740 4741 nw64(DEF_RDC(np->port), np->parent->rdc_default[np->port]); 4742 } 4743 4744 static void niu_init_drr_weight(struct niu *np) 4745 { 4746 int type = phy_decode(np->parent->port_phy, np->port); 4747 u64 val; 4748 4749 switch (type) { 4750 case PORT_TYPE_10G: 4751 val = PT_DRR_WEIGHT_DEFAULT_10G; 4752 break; 4753 4754 case PORT_TYPE_1G: 4755 default: 4756 val = PT_DRR_WEIGHT_DEFAULT_1G; 4757 break; 4758 } 4759 nw64(PT_DRR_WT(np->port), val); 4760 } 4761 4762 static int niu_init_hostinfo(struct niu *np) 4763 { 4764 struct niu_parent *parent = np->parent; 4765 struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port]; 4766 int i, err, num_alt = niu_num_alt_addr(np); 4767 int first_rdc_table = tp->first_table_num; 4768 4769 err = niu_set_primary_mac_rdc_table(np, first_rdc_table, 1); 4770 if (err) 4771 return err; 4772 4773 err = niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1); 4774 if (err) 4775 return err; 4776 4777 for (i = 0; i < num_alt; i++) { 4778 err = niu_set_alt_mac_rdc_table(np, i, first_rdc_table, 1); 4779 if (err) 4780 return err; 4781 } 4782 4783 return 0; 4784 } 4785 4786 static int niu_rx_channel_reset(struct niu *np, int channel) 4787 { 4788 return niu_set_and_wait_clear(np, RXDMA_CFIG1(channel), 4789 RXDMA_CFIG1_RST, 1000, 10, 4790 "RXDMA_CFIG1"); 4791 } 4792 4793 static int niu_rx_channel_lpage_init(struct niu *np, int channel) 4794 { 4795 u64 val; 4796 4797 nw64(RX_LOG_MASK1(channel), 0); 4798 nw64(RX_LOG_VAL1(channel), 0); 4799 nw64(RX_LOG_MASK2(channel), 0); 4800 nw64(RX_LOG_VAL2(channel), 0); 4801 nw64(RX_LOG_PAGE_RELO1(channel), 0); 4802 nw64(RX_LOG_PAGE_RELO2(channel), 0); 4803 nw64(RX_LOG_PAGE_HDL(channel), 0); 4804 4805 val = (u64)np->port << RX_LOG_PAGE_VLD_FUNC_SHIFT; 4806 val |= (RX_LOG_PAGE_VLD_PAGE0 | RX_LOG_PAGE_VLD_PAGE1); 4807 nw64(RX_LOG_PAGE_VLD(channel), val); 4808 4809 return 0; 4810 } 4811 4812 static void niu_rx_channel_wred_init(struct niu *np, struct rx_ring_info *rp) 4813 { 4814 u64 val; 4815 4816 val = (((u64)rp->nonsyn_window << RDC_RED_PARA_WIN_SHIFT) | 4817 ((u64)rp->nonsyn_threshold << RDC_RED_PARA_THRE_SHIFT) | 4818 ((u64)rp->syn_window << RDC_RED_PARA_WIN_SYN_SHIFT) | 4819 ((u64)rp->syn_threshold << RDC_RED_PARA_THRE_SYN_SHIFT)); 4820 nw64(RDC_RED_PARA(rp->rx_channel), val); 4821 } 4822 4823 static int niu_compute_rbr_cfig_b(struct rx_ring_info *rp, u64 *ret) 4824 { 4825 u64 val = 0; 4826 4827 *ret = 0; 4828 switch (rp->rbr_block_size) { 4829 case 4 * 1024: 4830 val |= (RBR_BLKSIZE_4K << RBR_CFIG_B_BLKSIZE_SHIFT); 4831 break; 4832 case 8 * 1024: 4833 val |= (RBR_BLKSIZE_8K << RBR_CFIG_B_BLKSIZE_SHIFT); 4834 break; 4835 case 16 * 1024: 4836 val |= (RBR_BLKSIZE_16K << RBR_CFIG_B_BLKSIZE_SHIFT); 4837 break; 4838 case 32 * 1024: 4839 val |= (RBR_BLKSIZE_32K << RBR_CFIG_B_BLKSIZE_SHIFT); 4840 break; 4841 default: 4842 return -EINVAL; 4843 } 4844 val |= RBR_CFIG_B_VLD2; 4845 switch (rp->rbr_sizes[2]) { 4846 case 2 * 1024: 4847 val |= (RBR_BUFSZ2_2K << RBR_CFIG_B_BUFSZ2_SHIFT); 4848 break; 4849 case 4 * 1024: 4850 val |= (RBR_BUFSZ2_4K << RBR_CFIG_B_BUFSZ2_SHIFT); 4851 break; 4852 case 8 * 1024: 4853 val |= (RBR_BUFSZ2_8K << RBR_CFIG_B_BUFSZ2_SHIFT); 4854 break; 4855 case 16 * 1024: 4856 val |= (RBR_BUFSZ2_16K << RBR_CFIG_B_BUFSZ2_SHIFT); 4857 break; 4858 4859 default: 4860 return -EINVAL; 4861 } 4862 val |= RBR_CFIG_B_VLD1; 4863 switch (rp->rbr_sizes[1]) { 4864 case 1 * 1024: 4865 val |= (RBR_BUFSZ1_1K << RBR_CFIG_B_BUFSZ1_SHIFT); 4866 break; 4867 case 2 * 1024: 4868 val |= (RBR_BUFSZ1_2K << RBR_CFIG_B_BUFSZ1_SHIFT); 4869 break; 4870 case 4 * 1024: 4871 val |= (RBR_BUFSZ1_4K << RBR_CFIG_B_BUFSZ1_SHIFT); 4872 break; 4873 case 8 * 1024: 4874 val |= (RBR_BUFSZ1_8K << RBR_CFIG_B_BUFSZ1_SHIFT); 4875 break; 4876 4877 default: 4878 return -EINVAL; 4879 } 4880 val |= RBR_CFIG_B_VLD0; 4881 switch (rp->rbr_sizes[0]) { 4882 case 256: 4883 val |= (RBR_BUFSZ0_256 << RBR_CFIG_B_BUFSZ0_SHIFT); 4884 break; 4885 case 512: 4886 val |= (RBR_BUFSZ0_512 << RBR_CFIG_B_BUFSZ0_SHIFT); 4887 break; 4888 case 1 * 1024: 4889 val |= (RBR_BUFSZ0_1K << RBR_CFIG_B_BUFSZ0_SHIFT); 4890 break; 4891 case 2 * 1024: 4892 val |= (RBR_BUFSZ0_2K << RBR_CFIG_B_BUFSZ0_SHIFT); 4893 break; 4894 4895 default: 4896 return -EINVAL; 4897 } 4898 4899 *ret = val; 4900 return 0; 4901 } 4902 4903 static int niu_enable_rx_channel(struct niu *np, int channel, int on) 4904 { 4905 u64 val = nr64(RXDMA_CFIG1(channel)); 4906 int limit; 4907 4908 if (on) 4909 val |= RXDMA_CFIG1_EN; 4910 else 4911 val &= ~RXDMA_CFIG1_EN; 4912 nw64(RXDMA_CFIG1(channel), val); 4913 4914 limit = 1000; 4915 while (--limit > 0) { 4916 if (nr64(RXDMA_CFIG1(channel)) & RXDMA_CFIG1_QST) 4917 break; 4918 udelay(10); 4919 } 4920 if (limit <= 0) 4921 return -ENODEV; 4922 return 0; 4923 } 4924 4925 static int niu_init_one_rx_channel(struct niu *np, struct rx_ring_info *rp) 4926 { 4927 int err, channel = rp->rx_channel; 4928 u64 val; 4929 4930 err = niu_rx_channel_reset(np, channel); 4931 if (err) 4932 return err; 4933 4934 err = niu_rx_channel_lpage_init(np, channel); 4935 if (err) 4936 return err; 4937 4938 niu_rx_channel_wred_init(np, rp); 4939 4940 nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_RBR_EMPTY); 4941 nw64(RX_DMA_CTL_STAT(channel), 4942 (RX_DMA_CTL_STAT_MEX | 4943 RX_DMA_CTL_STAT_RCRTHRES | 4944 RX_DMA_CTL_STAT_RCRTO | 4945 RX_DMA_CTL_STAT_RBR_EMPTY)); 4946 nw64(RXDMA_CFIG1(channel), rp->mbox_dma >> 32); 4947 nw64(RXDMA_CFIG2(channel), 4948 ((rp->mbox_dma & RXDMA_CFIG2_MBADDR_L) | 4949 RXDMA_CFIG2_FULL_HDR)); 4950 nw64(RBR_CFIG_A(channel), 4951 ((u64)rp->rbr_table_size << RBR_CFIG_A_LEN_SHIFT) | 4952 (rp->rbr_dma & (RBR_CFIG_A_STADDR_BASE | RBR_CFIG_A_STADDR))); 4953 err = niu_compute_rbr_cfig_b(rp, &val); 4954 if (err) 4955 return err; 4956 nw64(RBR_CFIG_B(channel), val); 4957 nw64(RCRCFIG_A(channel), 4958 ((u64)rp->rcr_table_size << RCRCFIG_A_LEN_SHIFT) | 4959 (rp->rcr_dma & (RCRCFIG_A_STADDR_BASE | RCRCFIG_A_STADDR))); 4960 nw64(RCRCFIG_B(channel), 4961 ((u64)rp->rcr_pkt_threshold << RCRCFIG_B_PTHRES_SHIFT) | 4962 RCRCFIG_B_ENTOUT | 4963 ((u64)rp->rcr_timeout << RCRCFIG_B_TIMEOUT_SHIFT)); 4964 4965 err = niu_enable_rx_channel(np, channel, 1); 4966 if (err) 4967 return err; 4968 4969 nw64(RBR_KICK(channel), rp->rbr_index); 4970 4971 val = nr64(RX_DMA_CTL_STAT(channel)); 4972 val |= RX_DMA_CTL_STAT_RBR_EMPTY; 4973 nw64(RX_DMA_CTL_STAT(channel), val); 4974 4975 return 0; 4976 } 4977 4978 static int niu_init_rx_channels(struct niu *np) 4979 { 4980 unsigned long flags; 4981 u64 seed = jiffies_64; 4982 int err, i; 4983 4984 niu_lock_parent(np, flags); 4985 nw64(RX_DMA_CK_DIV, np->parent->rxdma_clock_divider); 4986 nw64(RED_RAN_INIT, RED_RAN_INIT_OPMODE | (seed & RED_RAN_INIT_VAL)); 4987 niu_unlock_parent(np, flags); 4988 4989 /* XXX RXDMA 32bit mode? XXX */ 4990 4991 niu_init_rdc_groups(np); 4992 niu_init_drr_weight(np); 4993 4994 err = niu_init_hostinfo(np); 4995 if (err) 4996 return err; 4997 4998 for (i = 0; i < np->num_rx_rings; i++) { 4999 struct rx_ring_info *rp = &np->rx_rings[i]; 5000 5001 err = niu_init_one_rx_channel(np, rp); 5002 if (err) 5003 return err; 5004 } 5005 5006 return 0; 5007 } 5008 5009 static int niu_set_ip_frag_rule(struct niu *np) 5010 { 5011 struct niu_parent *parent = np->parent; 5012 struct niu_classifier *cp = &np->clas; 5013 struct niu_tcam_entry *tp; 5014 int index, err; 5015 5016 index = cp->tcam_top; 5017 tp = &parent->tcam[index]; 5018 5019 /* Note that the noport bit is the same in both ipv4 and 5020 * ipv6 format TCAM entries. 5021 */ 5022 memset(tp, 0, sizeof(*tp)); 5023 tp->key[1] = TCAM_V4KEY1_NOPORT; 5024 tp->key_mask[1] = TCAM_V4KEY1_NOPORT; 5025 tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET | 5026 ((u64)0 << TCAM_ASSOCDATA_OFFSET_SHIFT)); 5027 err = tcam_write(np, index, tp->key, tp->key_mask); 5028 if (err) 5029 return err; 5030 err = tcam_assoc_write(np, index, tp->assoc_data); 5031 if (err) 5032 return err; 5033 tp->valid = 1; 5034 cp->tcam_valid_entries++; 5035 5036 return 0; 5037 } 5038 5039 static int niu_init_classifier_hw(struct niu *np) 5040 { 5041 struct niu_parent *parent = np->parent; 5042 struct niu_classifier *cp = &np->clas; 5043 int i, err; 5044 5045 nw64(H1POLY, cp->h1_init); 5046 nw64(H2POLY, cp->h2_init); 5047 5048 err = niu_init_hostinfo(np); 5049 if (err) 5050 return err; 5051 5052 for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++) { 5053 struct niu_vlan_rdc *vp = &cp->vlan_mappings[i]; 5054 5055 vlan_tbl_write(np, i, np->port, 5056 vp->vlan_pref, vp->rdc_num); 5057 } 5058 5059 for (i = 0; i < cp->num_alt_mac_mappings; i++) { 5060 struct niu_altmac_rdc *ap = &cp->alt_mac_mappings[i]; 5061 5062 err = niu_set_alt_mac_rdc_table(np, ap->alt_mac_num, 5063 ap->rdc_num, ap->mac_pref); 5064 if (err) 5065 return err; 5066 } 5067 5068 for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) { 5069 int index = i - CLASS_CODE_USER_PROG1; 5070 5071 err = niu_set_tcam_key(np, i, parent->tcam_key[index]); 5072 if (err) 5073 return err; 5074 err = niu_set_flow_key(np, i, parent->flow_key[index]); 5075 if (err) 5076 return err; 5077 } 5078 5079 err = niu_set_ip_frag_rule(np); 5080 if (err) 5081 return err; 5082 5083 tcam_enable(np, 1); 5084 5085 return 0; 5086 } 5087 5088 static int niu_zcp_write(struct niu *np, int index, u64 *data) 5089 { 5090 nw64(ZCP_RAM_DATA0, data[0]); 5091 nw64(ZCP_RAM_DATA1, data[1]); 5092 nw64(ZCP_RAM_DATA2, data[2]); 5093 nw64(ZCP_RAM_DATA3, data[3]); 5094 nw64(ZCP_RAM_DATA4, data[4]); 5095 nw64(ZCP_RAM_BE, ZCP_RAM_BE_VAL); 5096 nw64(ZCP_RAM_ACC, 5097 (ZCP_RAM_ACC_WRITE | 5098 (0 << ZCP_RAM_ACC_ZFCID_SHIFT) | 5099 (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT))); 5100 5101 return niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY, 5102 1000, 100); 5103 } 5104 5105 static int niu_zcp_read(struct niu *np, int index, u64 *data) 5106 { 5107 int err; 5108 5109 err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY, 5110 1000, 100); 5111 if (err) { 5112 netdev_err(np->dev, "ZCP read busy won't clear, ZCP_RAM_ACC[%llx]\n", 5113 (unsigned long long)nr64(ZCP_RAM_ACC)); 5114 return err; 5115 } 5116 5117 nw64(ZCP_RAM_ACC, 5118 (ZCP_RAM_ACC_READ | 5119 (0 << ZCP_RAM_ACC_ZFCID_SHIFT) | 5120 (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT))); 5121 5122 err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY, 5123 1000, 100); 5124 if (err) { 5125 netdev_err(np->dev, "ZCP read busy2 won't clear, ZCP_RAM_ACC[%llx]\n", 5126 (unsigned long long)nr64(ZCP_RAM_ACC)); 5127 return err; 5128 } 5129 5130 data[0] = nr64(ZCP_RAM_DATA0); 5131 data[1] = nr64(ZCP_RAM_DATA1); 5132 data[2] = nr64(ZCP_RAM_DATA2); 5133 data[3] = nr64(ZCP_RAM_DATA3); 5134 data[4] = nr64(ZCP_RAM_DATA4); 5135 5136 return 0; 5137 } 5138 5139 static void niu_zcp_cfifo_reset(struct niu *np) 5140 { 5141 u64 val = nr64(RESET_CFIFO); 5142 5143 val |= RESET_CFIFO_RST(np->port); 5144 nw64(RESET_CFIFO, val); 5145 udelay(10); 5146 5147 val &= ~RESET_CFIFO_RST(np->port); 5148 nw64(RESET_CFIFO, val); 5149 } 5150 5151 static int niu_init_zcp(struct niu *np) 5152 { 5153 u64 data[5], rbuf[5]; 5154 int i, max, err; 5155 5156 if (np->parent->plat_type != PLAT_TYPE_NIU) { 5157 if (np->port == 0 || np->port == 1) 5158 max = ATLAS_P0_P1_CFIFO_ENTRIES; 5159 else 5160 max = ATLAS_P2_P3_CFIFO_ENTRIES; 5161 } else 5162 max = NIU_CFIFO_ENTRIES; 5163 5164 data[0] = 0; 5165 data[1] = 0; 5166 data[2] = 0; 5167 data[3] = 0; 5168 data[4] = 0; 5169 5170 for (i = 0; i < max; i++) { 5171 err = niu_zcp_write(np, i, data); 5172 if (err) 5173 return err; 5174 err = niu_zcp_read(np, i, rbuf); 5175 if (err) 5176 return err; 5177 } 5178 5179 niu_zcp_cfifo_reset(np); 5180 nw64(CFIFO_ECC(np->port), 0); 5181 nw64(ZCP_INT_STAT, ZCP_INT_STAT_ALL); 5182 (void) nr64(ZCP_INT_STAT); 5183 nw64(ZCP_INT_MASK, ZCP_INT_MASK_ALL); 5184 5185 return 0; 5186 } 5187 5188 static void niu_ipp_write(struct niu *np, int index, u64 *data) 5189 { 5190 u64 val = nr64_ipp(IPP_CFIG); 5191 5192 nw64_ipp(IPP_CFIG, val | IPP_CFIG_DFIFO_PIO_W); 5193 nw64_ipp(IPP_DFIFO_WR_PTR, index); 5194 nw64_ipp(IPP_DFIFO_WR0, data[0]); 5195 nw64_ipp(IPP_DFIFO_WR1, data[1]); 5196 nw64_ipp(IPP_DFIFO_WR2, data[2]); 5197 nw64_ipp(IPP_DFIFO_WR3, data[3]); 5198 nw64_ipp(IPP_DFIFO_WR4, data[4]); 5199 nw64_ipp(IPP_CFIG, val & ~IPP_CFIG_DFIFO_PIO_W); 5200 } 5201 5202 static void niu_ipp_read(struct niu *np, int index, u64 *data) 5203 { 5204 nw64_ipp(IPP_DFIFO_RD_PTR, index); 5205 data[0] = nr64_ipp(IPP_DFIFO_RD0); 5206 data[1] = nr64_ipp(IPP_DFIFO_RD1); 5207 data[2] = nr64_ipp(IPP_DFIFO_RD2); 5208 data[3] = nr64_ipp(IPP_DFIFO_RD3); 5209 data[4] = nr64_ipp(IPP_DFIFO_RD4); 5210 } 5211 5212 static int niu_ipp_reset(struct niu *np) 5213 { 5214 return niu_set_and_wait_clear_ipp(np, IPP_CFIG, IPP_CFIG_SOFT_RST, 5215 1000, 100, "IPP_CFIG"); 5216 } 5217 5218 static int niu_init_ipp(struct niu *np) 5219 { 5220 u64 data[5], rbuf[5], val; 5221 int i, max, err; 5222 5223 if (np->parent->plat_type != PLAT_TYPE_NIU) { 5224 if (np->port == 0 || np->port == 1) 5225 max = ATLAS_P0_P1_DFIFO_ENTRIES; 5226 else 5227 max = ATLAS_P2_P3_DFIFO_ENTRIES; 5228 } else 5229 max = NIU_DFIFO_ENTRIES; 5230 5231 data[0] = 0; 5232 data[1] = 0; 5233 data[2] = 0; 5234 data[3] = 0; 5235 data[4] = 0; 5236 5237 for (i = 0; i < max; i++) { 5238 niu_ipp_write(np, i, data); 5239 niu_ipp_read(np, i, rbuf); 5240 } 5241 5242 (void) nr64_ipp(IPP_INT_STAT); 5243 (void) nr64_ipp(IPP_INT_STAT); 5244 5245 err = niu_ipp_reset(np); 5246 if (err) 5247 return err; 5248 5249 (void) nr64_ipp(IPP_PKT_DIS); 5250 (void) nr64_ipp(IPP_BAD_CS_CNT); 5251 (void) nr64_ipp(IPP_ECC); 5252 5253 (void) nr64_ipp(IPP_INT_STAT); 5254 5255 nw64_ipp(IPP_MSK, ~IPP_MSK_ALL); 5256 5257 val = nr64_ipp(IPP_CFIG); 5258 val &= ~IPP_CFIG_IP_MAX_PKT; 5259 val |= (IPP_CFIG_IPP_ENABLE | 5260 IPP_CFIG_DFIFO_ECC_EN | 5261 IPP_CFIG_DROP_BAD_CRC | 5262 IPP_CFIG_CKSUM_EN | 5263 (0x1ffff << IPP_CFIG_IP_MAX_PKT_SHIFT)); 5264 nw64_ipp(IPP_CFIG, val); 5265 5266 return 0; 5267 } 5268 5269 static void niu_handle_led(struct niu *np, int status) 5270 { 5271 u64 val; 5272 val = nr64_mac(XMAC_CONFIG); 5273 5274 if ((np->flags & NIU_FLAGS_10G) != 0 && 5275 (np->flags & NIU_FLAGS_FIBER) != 0) { 5276 if (status) { 5277 val |= XMAC_CONFIG_LED_POLARITY; 5278 val &= ~XMAC_CONFIG_FORCE_LED_ON; 5279 } else { 5280 val |= XMAC_CONFIG_FORCE_LED_ON; 5281 val &= ~XMAC_CONFIG_LED_POLARITY; 5282 } 5283 } 5284 5285 nw64_mac(XMAC_CONFIG, val); 5286 } 5287 5288 static void niu_init_xif_xmac(struct niu *np) 5289 { 5290 struct niu_link_config *lp = &np->link_config; 5291 u64 val; 5292 5293 if (np->flags & NIU_FLAGS_XCVR_SERDES) { 5294 val = nr64(MIF_CONFIG); 5295 val |= MIF_CONFIG_ATCA_GE; 5296 nw64(MIF_CONFIG, val); 5297 } 5298 5299 val = nr64_mac(XMAC_CONFIG); 5300 val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC; 5301 5302 val |= XMAC_CONFIG_TX_OUTPUT_EN; 5303 5304 if (lp->loopback_mode == LOOPBACK_MAC) { 5305 val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC; 5306 val |= XMAC_CONFIG_LOOPBACK; 5307 } else { 5308 val &= ~XMAC_CONFIG_LOOPBACK; 5309 } 5310 5311 if (np->flags & NIU_FLAGS_10G) { 5312 val &= ~XMAC_CONFIG_LFS_DISABLE; 5313 } else { 5314 val |= XMAC_CONFIG_LFS_DISABLE; 5315 if (!(np->flags & NIU_FLAGS_FIBER) && 5316 !(np->flags & NIU_FLAGS_XCVR_SERDES)) 5317 val |= XMAC_CONFIG_1G_PCS_BYPASS; 5318 else 5319 val &= ~XMAC_CONFIG_1G_PCS_BYPASS; 5320 } 5321 5322 val &= ~XMAC_CONFIG_10G_XPCS_BYPASS; 5323 5324 if (lp->active_speed == SPEED_100) 5325 val |= XMAC_CONFIG_SEL_CLK_25MHZ; 5326 else 5327 val &= ~XMAC_CONFIG_SEL_CLK_25MHZ; 5328 5329 nw64_mac(XMAC_CONFIG, val); 5330 5331 val = nr64_mac(XMAC_CONFIG); 5332 val &= ~XMAC_CONFIG_MODE_MASK; 5333 if (np->flags & NIU_FLAGS_10G) { 5334 val |= XMAC_CONFIG_MODE_XGMII; 5335 } else { 5336 if (lp->active_speed == SPEED_1000) 5337 val |= XMAC_CONFIG_MODE_GMII; 5338 else 5339 val |= XMAC_CONFIG_MODE_MII; 5340 } 5341 5342 nw64_mac(XMAC_CONFIG, val); 5343 } 5344 5345 static void niu_init_xif_bmac(struct niu *np) 5346 { 5347 struct niu_link_config *lp = &np->link_config; 5348 u64 val; 5349 5350 val = BMAC_XIF_CONFIG_TX_OUTPUT_EN; 5351 5352 if (lp->loopback_mode == LOOPBACK_MAC) 5353 val |= BMAC_XIF_CONFIG_MII_LOOPBACK; 5354 else 5355 val &= ~BMAC_XIF_CONFIG_MII_LOOPBACK; 5356 5357 if (lp->active_speed == SPEED_1000) 5358 val |= BMAC_XIF_CONFIG_GMII_MODE; 5359 else 5360 val &= ~BMAC_XIF_CONFIG_GMII_MODE; 5361 5362 val &= ~(BMAC_XIF_CONFIG_LINK_LED | 5363 BMAC_XIF_CONFIG_LED_POLARITY); 5364 5365 if (!(np->flags & NIU_FLAGS_10G) && 5366 !(np->flags & NIU_FLAGS_FIBER) && 5367 lp->active_speed == SPEED_100) 5368 val |= BMAC_XIF_CONFIG_25MHZ_CLOCK; 5369 else 5370 val &= ~BMAC_XIF_CONFIG_25MHZ_CLOCK; 5371 5372 nw64_mac(BMAC_XIF_CONFIG, val); 5373 } 5374 5375 static void niu_init_xif(struct niu *np) 5376 { 5377 if (np->flags & NIU_FLAGS_XMAC) 5378 niu_init_xif_xmac(np); 5379 else 5380 niu_init_xif_bmac(np); 5381 } 5382 5383 static void niu_pcs_mii_reset(struct niu *np) 5384 { 5385 int limit = 1000; 5386 u64 val = nr64_pcs(PCS_MII_CTL); 5387 val |= PCS_MII_CTL_RST; 5388 nw64_pcs(PCS_MII_CTL, val); 5389 while ((--limit >= 0) && (val & PCS_MII_CTL_RST)) { 5390 udelay(100); 5391 val = nr64_pcs(PCS_MII_CTL); 5392 } 5393 } 5394 5395 static void niu_xpcs_reset(struct niu *np) 5396 { 5397 int limit = 1000; 5398 u64 val = nr64_xpcs(XPCS_CONTROL1); 5399 val |= XPCS_CONTROL1_RESET; 5400 nw64_xpcs(XPCS_CONTROL1, val); 5401 while ((--limit >= 0) && (val & XPCS_CONTROL1_RESET)) { 5402 udelay(100); 5403 val = nr64_xpcs(XPCS_CONTROL1); 5404 } 5405 } 5406 5407 static int niu_init_pcs(struct niu *np) 5408 { 5409 struct niu_link_config *lp = &np->link_config; 5410 u64 val; 5411 5412 switch (np->flags & (NIU_FLAGS_10G | 5413 NIU_FLAGS_FIBER | 5414 NIU_FLAGS_XCVR_SERDES)) { 5415 case NIU_FLAGS_FIBER: 5416 /* 1G fiber */ 5417 nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE); 5418 nw64_pcs(PCS_DPATH_MODE, 0); 5419 niu_pcs_mii_reset(np); 5420 break; 5421 5422 case NIU_FLAGS_10G: 5423 case NIU_FLAGS_10G | NIU_FLAGS_FIBER: 5424 case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES: 5425 /* 10G SERDES */ 5426 if (!(np->flags & NIU_FLAGS_XMAC)) 5427 return -EINVAL; 5428 5429 /* 10G copper or fiber */ 5430 val = nr64_mac(XMAC_CONFIG); 5431 val &= ~XMAC_CONFIG_10G_XPCS_BYPASS; 5432 nw64_mac(XMAC_CONFIG, val); 5433 5434 niu_xpcs_reset(np); 5435 5436 val = nr64_xpcs(XPCS_CONTROL1); 5437 if (lp->loopback_mode == LOOPBACK_PHY) 5438 val |= XPCS_CONTROL1_LOOPBACK; 5439 else 5440 val &= ~XPCS_CONTROL1_LOOPBACK; 5441 nw64_xpcs(XPCS_CONTROL1, val); 5442 5443 nw64_xpcs(XPCS_DESKEW_ERR_CNT, 0); 5444 (void) nr64_xpcs(XPCS_SYMERR_CNT01); 5445 (void) nr64_xpcs(XPCS_SYMERR_CNT23); 5446 break; 5447 5448 5449 case NIU_FLAGS_XCVR_SERDES: 5450 /* 1G SERDES */ 5451 niu_pcs_mii_reset(np); 5452 nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE); 5453 nw64_pcs(PCS_DPATH_MODE, 0); 5454 break; 5455 5456 case 0: 5457 /* 1G copper */ 5458 case NIU_FLAGS_XCVR_SERDES | NIU_FLAGS_FIBER: 5459 /* 1G RGMII FIBER */ 5460 nw64_pcs(PCS_DPATH_MODE, PCS_DPATH_MODE_MII); 5461 niu_pcs_mii_reset(np); 5462 break; 5463 5464 default: 5465 return -EINVAL; 5466 } 5467 5468 return 0; 5469 } 5470 5471 static int niu_reset_tx_xmac(struct niu *np) 5472 { 5473 return niu_set_and_wait_clear_mac(np, XTXMAC_SW_RST, 5474 (XTXMAC_SW_RST_REG_RS | 5475 XTXMAC_SW_RST_SOFT_RST), 5476 1000, 100, "XTXMAC_SW_RST"); 5477 } 5478 5479 static int niu_reset_tx_bmac(struct niu *np) 5480 { 5481 int limit; 5482 5483 nw64_mac(BTXMAC_SW_RST, BTXMAC_SW_RST_RESET); 5484 limit = 1000; 5485 while (--limit >= 0) { 5486 if (!(nr64_mac(BTXMAC_SW_RST) & BTXMAC_SW_RST_RESET)) 5487 break; 5488 udelay(100); 5489 } 5490 if (limit < 0) { 5491 dev_err(np->device, "Port %u TX BMAC would not reset, BTXMAC_SW_RST[%llx]\n", 5492 np->port, 5493 (unsigned long long) nr64_mac(BTXMAC_SW_RST)); 5494 return -ENODEV; 5495 } 5496 5497 return 0; 5498 } 5499 5500 static int niu_reset_tx_mac(struct niu *np) 5501 { 5502 if (np->flags & NIU_FLAGS_XMAC) 5503 return niu_reset_tx_xmac(np); 5504 else 5505 return niu_reset_tx_bmac(np); 5506 } 5507 5508 static void niu_init_tx_xmac(struct niu *np, u64 min, u64 max) 5509 { 5510 u64 val; 5511 5512 val = nr64_mac(XMAC_MIN); 5513 val &= ~(XMAC_MIN_TX_MIN_PKT_SIZE | 5514 XMAC_MIN_RX_MIN_PKT_SIZE); 5515 val |= (min << XMAC_MIN_RX_MIN_PKT_SIZE_SHFT); 5516 val |= (min << XMAC_MIN_TX_MIN_PKT_SIZE_SHFT); 5517 nw64_mac(XMAC_MIN, val); 5518 5519 nw64_mac(XMAC_MAX, max); 5520 5521 nw64_mac(XTXMAC_STAT_MSK, ~(u64)0); 5522 5523 val = nr64_mac(XMAC_IPG); 5524 if (np->flags & NIU_FLAGS_10G) { 5525 val &= ~XMAC_IPG_IPG_XGMII; 5526 val |= (IPG_12_15_XGMII << XMAC_IPG_IPG_XGMII_SHIFT); 5527 } else { 5528 val &= ~XMAC_IPG_IPG_MII_GMII; 5529 val |= (IPG_12_MII_GMII << XMAC_IPG_IPG_MII_GMII_SHIFT); 5530 } 5531 nw64_mac(XMAC_IPG, val); 5532 5533 val = nr64_mac(XMAC_CONFIG); 5534 val &= ~(XMAC_CONFIG_ALWAYS_NO_CRC | 5535 XMAC_CONFIG_STRETCH_MODE | 5536 XMAC_CONFIG_VAR_MIN_IPG_EN | 5537 XMAC_CONFIG_TX_ENABLE); 5538 nw64_mac(XMAC_CONFIG, val); 5539 5540 nw64_mac(TXMAC_FRM_CNT, 0); 5541 nw64_mac(TXMAC_BYTE_CNT, 0); 5542 } 5543 5544 static void niu_init_tx_bmac(struct niu *np, u64 min, u64 max) 5545 { 5546 u64 val; 5547 5548 nw64_mac(BMAC_MIN_FRAME, min); 5549 nw64_mac(BMAC_MAX_FRAME, max); 5550 5551 nw64_mac(BTXMAC_STATUS_MASK, ~(u64)0); 5552 nw64_mac(BMAC_CTRL_TYPE, 0x8808); 5553 nw64_mac(BMAC_PREAMBLE_SIZE, 7); 5554 5555 val = nr64_mac(BTXMAC_CONFIG); 5556 val &= ~(BTXMAC_CONFIG_FCS_DISABLE | 5557 BTXMAC_CONFIG_ENABLE); 5558 nw64_mac(BTXMAC_CONFIG, val); 5559 } 5560 5561 static void niu_init_tx_mac(struct niu *np) 5562 { 5563 u64 min, max; 5564 5565 min = 64; 5566 if (np->dev->mtu > ETH_DATA_LEN) 5567 max = 9216; 5568 else 5569 max = 1522; 5570 5571 /* The XMAC_MIN register only accepts values for TX min which 5572 * have the low 3 bits cleared. 5573 */ 5574 BUG_ON(min & 0x7); 5575 5576 if (np->flags & NIU_FLAGS_XMAC) 5577 niu_init_tx_xmac(np, min, max); 5578 else 5579 niu_init_tx_bmac(np, min, max); 5580 } 5581 5582 static int niu_reset_rx_xmac(struct niu *np) 5583 { 5584 int limit; 5585 5586 nw64_mac(XRXMAC_SW_RST, 5587 XRXMAC_SW_RST_REG_RS | XRXMAC_SW_RST_SOFT_RST); 5588 limit = 1000; 5589 while (--limit >= 0) { 5590 if (!(nr64_mac(XRXMAC_SW_RST) & (XRXMAC_SW_RST_REG_RS | 5591 XRXMAC_SW_RST_SOFT_RST))) 5592 break; 5593 udelay(100); 5594 } 5595 if (limit < 0) { 5596 dev_err(np->device, "Port %u RX XMAC would not reset, XRXMAC_SW_RST[%llx]\n", 5597 np->port, 5598 (unsigned long long) nr64_mac(XRXMAC_SW_RST)); 5599 return -ENODEV; 5600 } 5601 5602 return 0; 5603 } 5604 5605 static int niu_reset_rx_bmac(struct niu *np) 5606 { 5607 int limit; 5608 5609 nw64_mac(BRXMAC_SW_RST, BRXMAC_SW_RST_RESET); 5610 limit = 1000; 5611 while (--limit >= 0) { 5612 if (!(nr64_mac(BRXMAC_SW_RST) & BRXMAC_SW_RST_RESET)) 5613 break; 5614 udelay(100); 5615 } 5616 if (limit < 0) { 5617 dev_err(np->device, "Port %u RX BMAC would not reset, BRXMAC_SW_RST[%llx]\n", 5618 np->port, 5619 (unsigned long long) nr64_mac(BRXMAC_SW_RST)); 5620 return -ENODEV; 5621 } 5622 5623 return 0; 5624 } 5625 5626 static int niu_reset_rx_mac(struct niu *np) 5627 { 5628 if (np->flags & NIU_FLAGS_XMAC) 5629 return niu_reset_rx_xmac(np); 5630 else 5631 return niu_reset_rx_bmac(np); 5632 } 5633 5634 static void niu_init_rx_xmac(struct niu *np) 5635 { 5636 struct niu_parent *parent = np->parent; 5637 struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port]; 5638 int first_rdc_table = tp->first_table_num; 5639 unsigned long i; 5640 u64 val; 5641 5642 nw64_mac(XMAC_ADD_FILT0, 0); 5643 nw64_mac(XMAC_ADD_FILT1, 0); 5644 nw64_mac(XMAC_ADD_FILT2, 0); 5645 nw64_mac(XMAC_ADD_FILT12_MASK, 0); 5646 nw64_mac(XMAC_ADD_FILT00_MASK, 0); 5647 for (i = 0; i < MAC_NUM_HASH; i++) 5648 nw64_mac(XMAC_HASH_TBL(i), 0); 5649 nw64_mac(XRXMAC_STAT_MSK, ~(u64)0); 5650 niu_set_primary_mac_rdc_table(np, first_rdc_table, 1); 5651 niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1); 5652 5653 val = nr64_mac(XMAC_CONFIG); 5654 val &= ~(XMAC_CONFIG_RX_MAC_ENABLE | 5655 XMAC_CONFIG_PROMISCUOUS | 5656 XMAC_CONFIG_PROMISC_GROUP | 5657 XMAC_CONFIG_ERR_CHK_DIS | 5658 XMAC_CONFIG_RX_CRC_CHK_DIS | 5659 XMAC_CONFIG_RESERVED_MULTICAST | 5660 XMAC_CONFIG_RX_CODEV_CHK_DIS | 5661 XMAC_CONFIG_ADDR_FILTER_EN | 5662 XMAC_CONFIG_RCV_PAUSE_ENABLE | 5663 XMAC_CONFIG_STRIP_CRC | 5664 XMAC_CONFIG_PASS_FLOW_CTRL | 5665 XMAC_CONFIG_MAC2IPP_PKT_CNT_EN); 5666 val |= (XMAC_CONFIG_HASH_FILTER_EN); 5667 nw64_mac(XMAC_CONFIG, val); 5668 5669 nw64_mac(RXMAC_BT_CNT, 0); 5670 nw64_mac(RXMAC_BC_FRM_CNT, 0); 5671 nw64_mac(RXMAC_MC_FRM_CNT, 0); 5672 nw64_mac(RXMAC_FRAG_CNT, 0); 5673 nw64_mac(RXMAC_HIST_CNT1, 0); 5674 nw64_mac(RXMAC_HIST_CNT2, 0); 5675 nw64_mac(RXMAC_HIST_CNT3, 0); 5676 nw64_mac(RXMAC_HIST_CNT4, 0); 5677 nw64_mac(RXMAC_HIST_CNT5, 0); 5678 nw64_mac(RXMAC_HIST_CNT6, 0); 5679 nw64_mac(RXMAC_HIST_CNT7, 0); 5680 nw64_mac(RXMAC_MPSZER_CNT, 0); 5681 nw64_mac(RXMAC_CRC_ER_CNT, 0); 5682 nw64_mac(RXMAC_CD_VIO_CNT, 0); 5683 nw64_mac(LINK_FAULT_CNT, 0); 5684 } 5685 5686 static void niu_init_rx_bmac(struct niu *np) 5687 { 5688 struct niu_parent *parent = np->parent; 5689 struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port]; 5690 int first_rdc_table = tp->first_table_num; 5691 unsigned long i; 5692 u64 val; 5693 5694 nw64_mac(BMAC_ADD_FILT0, 0); 5695 nw64_mac(BMAC_ADD_FILT1, 0); 5696 nw64_mac(BMAC_ADD_FILT2, 0); 5697 nw64_mac(BMAC_ADD_FILT12_MASK, 0); 5698 nw64_mac(BMAC_ADD_FILT00_MASK, 0); 5699 for (i = 0; i < MAC_NUM_HASH; i++) 5700 nw64_mac(BMAC_HASH_TBL(i), 0); 5701 niu_set_primary_mac_rdc_table(np, first_rdc_table, 1); 5702 niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1); 5703 nw64_mac(BRXMAC_STATUS_MASK, ~(u64)0); 5704 5705 val = nr64_mac(BRXMAC_CONFIG); 5706 val &= ~(BRXMAC_CONFIG_ENABLE | 5707 BRXMAC_CONFIG_STRIP_PAD | 5708 BRXMAC_CONFIG_STRIP_FCS | 5709 BRXMAC_CONFIG_PROMISC | 5710 BRXMAC_CONFIG_PROMISC_GRP | 5711 BRXMAC_CONFIG_ADDR_FILT_EN | 5712 BRXMAC_CONFIG_DISCARD_DIS); 5713 val |= (BRXMAC_CONFIG_HASH_FILT_EN); 5714 nw64_mac(BRXMAC_CONFIG, val); 5715 5716 val = nr64_mac(BMAC_ADDR_CMPEN); 5717 val |= BMAC_ADDR_CMPEN_EN0; 5718 nw64_mac(BMAC_ADDR_CMPEN, val); 5719 } 5720 5721 static void niu_init_rx_mac(struct niu *np) 5722 { 5723 niu_set_primary_mac(np, np->dev->dev_addr); 5724 5725 if (np->flags & NIU_FLAGS_XMAC) 5726 niu_init_rx_xmac(np); 5727 else 5728 niu_init_rx_bmac(np); 5729 } 5730 5731 static void niu_enable_tx_xmac(struct niu *np, int on) 5732 { 5733 u64 val = nr64_mac(XMAC_CONFIG); 5734 5735 if (on) 5736 val |= XMAC_CONFIG_TX_ENABLE; 5737 else 5738 val &= ~XMAC_CONFIG_TX_ENABLE; 5739 nw64_mac(XMAC_CONFIG, val); 5740 } 5741 5742 static void niu_enable_tx_bmac(struct niu *np, int on) 5743 { 5744 u64 val = nr64_mac(BTXMAC_CONFIG); 5745 5746 if (on) 5747 val |= BTXMAC_CONFIG_ENABLE; 5748 else 5749 val &= ~BTXMAC_CONFIG_ENABLE; 5750 nw64_mac(BTXMAC_CONFIG, val); 5751 } 5752 5753 static void niu_enable_tx_mac(struct niu *np, int on) 5754 { 5755 if (np->flags & NIU_FLAGS_XMAC) 5756 niu_enable_tx_xmac(np, on); 5757 else 5758 niu_enable_tx_bmac(np, on); 5759 } 5760 5761 static void niu_enable_rx_xmac(struct niu *np, int on) 5762 { 5763 u64 val = nr64_mac(XMAC_CONFIG); 5764 5765 val &= ~(XMAC_CONFIG_HASH_FILTER_EN | 5766 XMAC_CONFIG_PROMISCUOUS); 5767 5768 if (np->flags & NIU_FLAGS_MCAST) 5769 val |= XMAC_CONFIG_HASH_FILTER_EN; 5770 if (np->flags & NIU_FLAGS_PROMISC) 5771 val |= XMAC_CONFIG_PROMISCUOUS; 5772 5773 if (on) 5774 val |= XMAC_CONFIG_RX_MAC_ENABLE; 5775 else 5776 val &= ~XMAC_CONFIG_RX_MAC_ENABLE; 5777 nw64_mac(XMAC_CONFIG, val); 5778 } 5779 5780 static void niu_enable_rx_bmac(struct niu *np, int on) 5781 { 5782 u64 val = nr64_mac(BRXMAC_CONFIG); 5783 5784 val &= ~(BRXMAC_CONFIG_HASH_FILT_EN | 5785 BRXMAC_CONFIG_PROMISC); 5786 5787 if (np->flags & NIU_FLAGS_MCAST) 5788 val |= BRXMAC_CONFIG_HASH_FILT_EN; 5789 if (np->flags & NIU_FLAGS_PROMISC) 5790 val |= BRXMAC_CONFIG_PROMISC; 5791 5792 if (on) 5793 val |= BRXMAC_CONFIG_ENABLE; 5794 else 5795 val &= ~BRXMAC_CONFIG_ENABLE; 5796 nw64_mac(BRXMAC_CONFIG, val); 5797 } 5798 5799 static void niu_enable_rx_mac(struct niu *np, int on) 5800 { 5801 if (np->flags & NIU_FLAGS_XMAC) 5802 niu_enable_rx_xmac(np, on); 5803 else 5804 niu_enable_rx_bmac(np, on); 5805 } 5806 5807 static int niu_init_mac(struct niu *np) 5808 { 5809 int err; 5810 5811 niu_init_xif(np); 5812 err = niu_init_pcs(np); 5813 if (err) 5814 return err; 5815 5816 err = niu_reset_tx_mac(np); 5817 if (err) 5818 return err; 5819 niu_init_tx_mac(np); 5820 err = niu_reset_rx_mac(np); 5821 if (err) 5822 return err; 5823 niu_init_rx_mac(np); 5824 5825 /* This looks hookey but the RX MAC reset we just did will 5826 * undo some of the state we setup in niu_init_tx_mac() so we 5827 * have to call it again. In particular, the RX MAC reset will 5828 * set the XMAC_MAX register back to it's default value. 5829 */ 5830 niu_init_tx_mac(np); 5831 niu_enable_tx_mac(np, 1); 5832 5833 niu_enable_rx_mac(np, 1); 5834 5835 return 0; 5836 } 5837 5838 static void niu_stop_one_tx_channel(struct niu *np, struct tx_ring_info *rp) 5839 { 5840 (void) niu_tx_channel_stop(np, rp->tx_channel); 5841 } 5842 5843 static void niu_stop_tx_channels(struct niu *np) 5844 { 5845 int i; 5846 5847 for (i = 0; i < np->num_tx_rings; i++) { 5848 struct tx_ring_info *rp = &np->tx_rings[i]; 5849 5850 niu_stop_one_tx_channel(np, rp); 5851 } 5852 } 5853 5854 static void niu_reset_one_tx_channel(struct niu *np, struct tx_ring_info *rp) 5855 { 5856 (void) niu_tx_channel_reset(np, rp->tx_channel); 5857 } 5858 5859 static void niu_reset_tx_channels(struct niu *np) 5860 { 5861 int i; 5862 5863 for (i = 0; i < np->num_tx_rings; i++) { 5864 struct tx_ring_info *rp = &np->tx_rings[i]; 5865 5866 niu_reset_one_tx_channel(np, rp); 5867 } 5868 } 5869 5870 static void niu_stop_one_rx_channel(struct niu *np, struct rx_ring_info *rp) 5871 { 5872 (void) niu_enable_rx_channel(np, rp->rx_channel, 0); 5873 } 5874 5875 static void niu_stop_rx_channels(struct niu *np) 5876 { 5877 int i; 5878 5879 for (i = 0; i < np->num_rx_rings; i++) { 5880 struct rx_ring_info *rp = &np->rx_rings[i]; 5881 5882 niu_stop_one_rx_channel(np, rp); 5883 } 5884 } 5885 5886 static void niu_reset_one_rx_channel(struct niu *np, struct rx_ring_info *rp) 5887 { 5888 int channel = rp->rx_channel; 5889 5890 (void) niu_rx_channel_reset(np, channel); 5891 nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_ALL); 5892 nw64(RX_DMA_CTL_STAT(channel), 0); 5893 (void) niu_enable_rx_channel(np, channel, 0); 5894 } 5895 5896 static void niu_reset_rx_channels(struct niu *np) 5897 { 5898 int i; 5899 5900 for (i = 0; i < np->num_rx_rings; i++) { 5901 struct rx_ring_info *rp = &np->rx_rings[i]; 5902 5903 niu_reset_one_rx_channel(np, rp); 5904 } 5905 } 5906 5907 static void niu_disable_ipp(struct niu *np) 5908 { 5909 u64 rd, wr, val; 5910 int limit; 5911 5912 rd = nr64_ipp(IPP_DFIFO_RD_PTR); 5913 wr = nr64_ipp(IPP_DFIFO_WR_PTR); 5914 limit = 100; 5915 while (--limit >= 0 && (rd != wr)) { 5916 rd = nr64_ipp(IPP_DFIFO_RD_PTR); 5917 wr = nr64_ipp(IPP_DFIFO_WR_PTR); 5918 } 5919 if (limit < 0 && 5920 (rd != 0 && wr != 1)) { 5921 netdev_err(np->dev, "IPP would not quiesce, rd_ptr[%llx] wr_ptr[%llx]\n", 5922 (unsigned long long)nr64_ipp(IPP_DFIFO_RD_PTR), 5923 (unsigned long long)nr64_ipp(IPP_DFIFO_WR_PTR)); 5924 } 5925 5926 val = nr64_ipp(IPP_CFIG); 5927 val &= ~(IPP_CFIG_IPP_ENABLE | 5928 IPP_CFIG_DFIFO_ECC_EN | 5929 IPP_CFIG_DROP_BAD_CRC | 5930 IPP_CFIG_CKSUM_EN); 5931 nw64_ipp(IPP_CFIG, val); 5932 5933 (void) niu_ipp_reset(np); 5934 } 5935 5936 static int niu_init_hw(struct niu *np) 5937 { 5938 int i, err; 5939 5940 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize TXC\n"); 5941 niu_txc_enable_port(np, 1); 5942 niu_txc_port_dma_enable(np, 1); 5943 niu_txc_set_imask(np, 0); 5944 5945 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize TX channels\n"); 5946 for (i = 0; i < np->num_tx_rings; i++) { 5947 struct tx_ring_info *rp = &np->tx_rings[i]; 5948 5949 err = niu_init_one_tx_channel(np, rp); 5950 if (err) 5951 return err; 5952 } 5953 5954 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize RX channels\n"); 5955 err = niu_init_rx_channels(np); 5956 if (err) 5957 goto out_uninit_tx_channels; 5958 5959 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize classifier\n"); 5960 err = niu_init_classifier_hw(np); 5961 if (err) 5962 goto out_uninit_rx_channels; 5963 5964 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize ZCP\n"); 5965 err = niu_init_zcp(np); 5966 if (err) 5967 goto out_uninit_rx_channels; 5968 5969 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize IPP\n"); 5970 err = niu_init_ipp(np); 5971 if (err) 5972 goto out_uninit_rx_channels; 5973 5974 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize MAC\n"); 5975 err = niu_init_mac(np); 5976 if (err) 5977 goto out_uninit_ipp; 5978 5979 return 0; 5980 5981 out_uninit_ipp: 5982 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit IPP\n"); 5983 niu_disable_ipp(np); 5984 5985 out_uninit_rx_channels: 5986 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit RX channels\n"); 5987 niu_stop_rx_channels(np); 5988 niu_reset_rx_channels(np); 5989 5990 out_uninit_tx_channels: 5991 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit TX channels\n"); 5992 niu_stop_tx_channels(np); 5993 niu_reset_tx_channels(np); 5994 5995 return err; 5996 } 5997 5998 static void niu_stop_hw(struct niu *np) 5999 { 6000 netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable interrupts\n"); 6001 niu_enable_interrupts(np, 0); 6002 6003 netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable RX MAC\n"); 6004 niu_enable_rx_mac(np, 0); 6005 6006 netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable IPP\n"); 6007 niu_disable_ipp(np); 6008 6009 netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Stop TX channels\n"); 6010 niu_stop_tx_channels(np); 6011 6012 netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Stop RX channels\n"); 6013 niu_stop_rx_channels(np); 6014 6015 netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Reset TX channels\n"); 6016 niu_reset_tx_channels(np); 6017 6018 netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Reset RX channels\n"); 6019 niu_reset_rx_channels(np); 6020 } 6021 6022 static void niu_set_irq_name(struct niu *np) 6023 { 6024 int port = np->port; 6025 int i, j = 1; 6026 6027 sprintf(np->irq_name[0], "%s:MAC", np->dev->name); 6028 6029 if (port == 0) { 6030 sprintf(np->irq_name[1], "%s:MIF", np->dev->name); 6031 sprintf(np->irq_name[2], "%s:SYSERR", np->dev->name); 6032 j = 3; 6033 } 6034 6035 for (i = 0; i < np->num_ldg - j; i++) { 6036 if (i < np->num_rx_rings) 6037 sprintf(np->irq_name[i+j], "%s-rx-%d", 6038 np->dev->name, i); 6039 else if (i < np->num_tx_rings + np->num_rx_rings) 6040 sprintf(np->irq_name[i+j], "%s-tx-%d", np->dev->name, 6041 i - np->num_rx_rings); 6042 } 6043 } 6044 6045 static int niu_request_irq(struct niu *np) 6046 { 6047 int i, j, err; 6048 6049 niu_set_irq_name(np); 6050 6051 err = 0; 6052 for (i = 0; i < np->num_ldg; i++) { 6053 struct niu_ldg *lp = &np->ldg[i]; 6054 6055 err = request_irq(lp->irq, niu_interrupt, IRQF_SHARED, 6056 np->irq_name[i], lp); 6057 if (err) 6058 goto out_free_irqs; 6059 6060 } 6061 6062 return 0; 6063 6064 out_free_irqs: 6065 for (j = 0; j < i; j++) { 6066 struct niu_ldg *lp = &np->ldg[j]; 6067 6068 free_irq(lp->irq, lp); 6069 } 6070 return err; 6071 } 6072 6073 static void niu_free_irq(struct niu *np) 6074 { 6075 int i; 6076 6077 for (i = 0; i < np->num_ldg; i++) { 6078 struct niu_ldg *lp = &np->ldg[i]; 6079 6080 free_irq(lp->irq, lp); 6081 } 6082 } 6083 6084 static void niu_enable_napi(struct niu *np) 6085 { 6086 int i; 6087 6088 for (i = 0; i < np->num_ldg; i++) 6089 napi_enable_locked(&np->ldg[i].napi); 6090 } 6091 6092 static void niu_disable_napi(struct niu *np) 6093 { 6094 int i; 6095 6096 for (i = 0; i < np->num_ldg; i++) 6097 napi_disable(&np->ldg[i].napi); 6098 } 6099 6100 static int niu_open(struct net_device *dev) 6101 { 6102 struct niu *np = netdev_priv(dev); 6103 int err; 6104 6105 netif_carrier_off(dev); 6106 6107 err = niu_alloc_channels(np); 6108 if (err) 6109 goto out_err; 6110 6111 err = niu_enable_interrupts(np, 0); 6112 if (err) 6113 goto out_free_channels; 6114 6115 err = niu_request_irq(np); 6116 if (err) 6117 goto out_free_channels; 6118 6119 netdev_lock(dev); 6120 niu_enable_napi(np); 6121 netdev_unlock(dev); 6122 6123 spin_lock_irq(&np->lock); 6124 6125 err = niu_init_hw(np); 6126 if (!err) { 6127 timer_setup(&np->timer, niu_timer, 0); 6128 np->timer.expires = jiffies + HZ; 6129 6130 err = niu_enable_interrupts(np, 1); 6131 if (err) 6132 niu_stop_hw(np); 6133 } 6134 6135 spin_unlock_irq(&np->lock); 6136 6137 if (err) { 6138 niu_disable_napi(np); 6139 goto out_free_irq; 6140 } 6141 6142 netif_tx_start_all_queues(dev); 6143 6144 if (np->link_config.loopback_mode != LOOPBACK_DISABLED) 6145 netif_carrier_on(dev); 6146 6147 add_timer(&np->timer); 6148 6149 return 0; 6150 6151 out_free_irq: 6152 niu_free_irq(np); 6153 6154 out_free_channels: 6155 niu_free_channels(np); 6156 6157 out_err: 6158 return err; 6159 } 6160 6161 static void niu_full_shutdown(struct niu *np, struct net_device *dev) 6162 { 6163 cancel_work_sync(&np->reset_task); 6164 6165 niu_disable_napi(np); 6166 netif_tx_stop_all_queues(dev); 6167 6168 timer_delete_sync(&np->timer); 6169 6170 spin_lock_irq(&np->lock); 6171 6172 niu_stop_hw(np); 6173 6174 spin_unlock_irq(&np->lock); 6175 } 6176 6177 static int niu_close(struct net_device *dev) 6178 { 6179 struct niu *np = netdev_priv(dev); 6180 6181 niu_full_shutdown(np, dev); 6182 6183 niu_free_irq(np); 6184 6185 niu_free_channels(np); 6186 6187 niu_handle_led(np, 0); 6188 6189 return 0; 6190 } 6191 6192 static void niu_sync_xmac_stats(struct niu *np) 6193 { 6194 struct niu_xmac_stats *mp = &np->mac_stats.xmac; 6195 6196 mp->tx_frames += nr64_mac(TXMAC_FRM_CNT); 6197 mp->tx_bytes += nr64_mac(TXMAC_BYTE_CNT); 6198 6199 mp->rx_link_faults += nr64_mac(LINK_FAULT_CNT); 6200 mp->rx_align_errors += nr64_mac(RXMAC_ALIGN_ERR_CNT); 6201 mp->rx_frags += nr64_mac(RXMAC_FRAG_CNT); 6202 mp->rx_mcasts += nr64_mac(RXMAC_MC_FRM_CNT); 6203 mp->rx_bcasts += nr64_mac(RXMAC_BC_FRM_CNT); 6204 mp->rx_hist_cnt1 += nr64_mac(RXMAC_HIST_CNT1); 6205 mp->rx_hist_cnt2 += nr64_mac(RXMAC_HIST_CNT2); 6206 mp->rx_hist_cnt3 += nr64_mac(RXMAC_HIST_CNT3); 6207 mp->rx_hist_cnt4 += nr64_mac(RXMAC_HIST_CNT4); 6208 mp->rx_hist_cnt5 += nr64_mac(RXMAC_HIST_CNT5); 6209 mp->rx_hist_cnt6 += nr64_mac(RXMAC_HIST_CNT6); 6210 mp->rx_hist_cnt7 += nr64_mac(RXMAC_HIST_CNT7); 6211 mp->rx_octets += nr64_mac(RXMAC_BT_CNT); 6212 mp->rx_code_violations += nr64_mac(RXMAC_CD_VIO_CNT); 6213 mp->rx_len_errors += nr64_mac(RXMAC_MPSZER_CNT); 6214 mp->rx_crc_errors += nr64_mac(RXMAC_CRC_ER_CNT); 6215 } 6216 6217 static void niu_sync_bmac_stats(struct niu *np) 6218 { 6219 struct niu_bmac_stats *mp = &np->mac_stats.bmac; 6220 6221 mp->tx_bytes += nr64_mac(BTXMAC_BYTE_CNT); 6222 mp->tx_frames += nr64_mac(BTXMAC_FRM_CNT); 6223 6224 mp->rx_frames += nr64_mac(BRXMAC_FRAME_CNT); 6225 mp->rx_align_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT); 6226 mp->rx_crc_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT); 6227 mp->rx_len_errors += nr64_mac(BRXMAC_CODE_VIOL_ERR_CNT); 6228 } 6229 6230 static void niu_sync_mac_stats(struct niu *np) 6231 { 6232 if (np->flags & NIU_FLAGS_XMAC) 6233 niu_sync_xmac_stats(np); 6234 else 6235 niu_sync_bmac_stats(np); 6236 } 6237 6238 static void niu_get_rx_stats(struct niu *np, 6239 struct rtnl_link_stats64 *stats) 6240 { 6241 u64 pkts, dropped, errors, bytes; 6242 struct rx_ring_info *rx_rings; 6243 int i; 6244 6245 pkts = dropped = errors = bytes = 0; 6246 6247 rx_rings = READ_ONCE(np->rx_rings); 6248 if (!rx_rings) 6249 goto no_rings; 6250 6251 for (i = 0; i < np->num_rx_rings; i++) { 6252 struct rx_ring_info *rp = &rx_rings[i]; 6253 6254 niu_sync_rx_discard_stats(np, rp, 0); 6255 6256 pkts += rp->rx_packets; 6257 bytes += rp->rx_bytes; 6258 dropped += rp->rx_dropped; 6259 errors += rp->rx_errors; 6260 } 6261 6262 no_rings: 6263 stats->rx_packets = pkts; 6264 stats->rx_bytes = bytes; 6265 stats->rx_dropped = dropped; 6266 stats->rx_errors = errors; 6267 } 6268 6269 static void niu_get_tx_stats(struct niu *np, 6270 struct rtnl_link_stats64 *stats) 6271 { 6272 u64 pkts, errors, bytes; 6273 struct tx_ring_info *tx_rings; 6274 int i; 6275 6276 pkts = errors = bytes = 0; 6277 6278 tx_rings = READ_ONCE(np->tx_rings); 6279 if (!tx_rings) 6280 goto no_rings; 6281 6282 for (i = 0; i < np->num_tx_rings; i++) { 6283 struct tx_ring_info *rp = &tx_rings[i]; 6284 6285 pkts += rp->tx_packets; 6286 bytes += rp->tx_bytes; 6287 errors += rp->tx_errors; 6288 } 6289 6290 no_rings: 6291 stats->tx_packets = pkts; 6292 stats->tx_bytes = bytes; 6293 stats->tx_errors = errors; 6294 } 6295 6296 static void niu_get_stats(struct net_device *dev, 6297 struct rtnl_link_stats64 *stats) 6298 { 6299 struct niu *np = netdev_priv(dev); 6300 6301 if (netif_running(dev)) { 6302 niu_get_rx_stats(np, stats); 6303 niu_get_tx_stats(np, stats); 6304 } 6305 } 6306 6307 static void niu_load_hash_xmac(struct niu *np, u16 *hash) 6308 { 6309 int i; 6310 6311 for (i = 0; i < 16; i++) 6312 nw64_mac(XMAC_HASH_TBL(i), hash[i]); 6313 } 6314 6315 static void niu_load_hash_bmac(struct niu *np, u16 *hash) 6316 { 6317 int i; 6318 6319 for (i = 0; i < 16; i++) 6320 nw64_mac(BMAC_HASH_TBL(i), hash[i]); 6321 } 6322 6323 static void niu_load_hash(struct niu *np, u16 *hash) 6324 { 6325 if (np->flags & NIU_FLAGS_XMAC) 6326 niu_load_hash_xmac(np, hash); 6327 else 6328 niu_load_hash_bmac(np, hash); 6329 } 6330 6331 static void niu_set_rx_mode(struct net_device *dev) 6332 { 6333 struct niu *np = netdev_priv(dev); 6334 int i, alt_cnt, err; 6335 struct netdev_hw_addr *ha; 6336 unsigned long flags; 6337 u16 hash[16] = { 0, }; 6338 6339 spin_lock_irqsave(&np->lock, flags); 6340 niu_enable_rx_mac(np, 0); 6341 6342 np->flags &= ~(NIU_FLAGS_MCAST | NIU_FLAGS_PROMISC); 6343 if (dev->flags & IFF_PROMISC) 6344 np->flags |= NIU_FLAGS_PROMISC; 6345 if ((dev->flags & IFF_ALLMULTI) || (!netdev_mc_empty(dev))) 6346 np->flags |= NIU_FLAGS_MCAST; 6347 6348 alt_cnt = netdev_uc_count(dev); 6349 if (alt_cnt > niu_num_alt_addr(np)) { 6350 alt_cnt = 0; 6351 np->flags |= NIU_FLAGS_PROMISC; 6352 } 6353 6354 if (alt_cnt) { 6355 int index = 0; 6356 6357 netdev_for_each_uc_addr(ha, dev) { 6358 err = niu_set_alt_mac(np, index, ha->addr); 6359 if (err) 6360 netdev_warn(dev, "Error %d adding alt mac %d\n", 6361 err, index); 6362 err = niu_enable_alt_mac(np, index, 1); 6363 if (err) 6364 netdev_warn(dev, "Error %d enabling alt mac %d\n", 6365 err, index); 6366 6367 index++; 6368 } 6369 } else { 6370 int alt_start; 6371 if (np->flags & NIU_FLAGS_XMAC) 6372 alt_start = 0; 6373 else 6374 alt_start = 1; 6375 for (i = alt_start; i < niu_num_alt_addr(np); i++) { 6376 err = niu_enable_alt_mac(np, i, 0); 6377 if (err) 6378 netdev_warn(dev, "Error %d disabling alt mac %d\n", 6379 err, i); 6380 } 6381 } 6382 if (dev->flags & IFF_ALLMULTI) { 6383 for (i = 0; i < 16; i++) 6384 hash[i] = 0xffff; 6385 } else if (!netdev_mc_empty(dev)) { 6386 netdev_for_each_mc_addr(ha, dev) { 6387 u32 crc = ether_crc_le(ETH_ALEN, ha->addr); 6388 6389 crc >>= 24; 6390 hash[crc >> 4] |= (1 << (15 - (crc & 0xf))); 6391 } 6392 } 6393 6394 if (np->flags & NIU_FLAGS_MCAST) 6395 niu_load_hash(np, hash); 6396 6397 niu_enable_rx_mac(np, 1); 6398 spin_unlock_irqrestore(&np->lock, flags); 6399 } 6400 6401 static int niu_set_mac_addr(struct net_device *dev, void *p) 6402 { 6403 struct niu *np = netdev_priv(dev); 6404 struct sockaddr *addr = p; 6405 unsigned long flags; 6406 6407 if (!is_valid_ether_addr(addr->sa_data)) 6408 return -EADDRNOTAVAIL; 6409 6410 eth_hw_addr_set(dev, addr->sa_data); 6411 6412 if (!netif_running(dev)) 6413 return 0; 6414 6415 spin_lock_irqsave(&np->lock, flags); 6416 niu_enable_rx_mac(np, 0); 6417 niu_set_primary_mac(np, dev->dev_addr); 6418 niu_enable_rx_mac(np, 1); 6419 spin_unlock_irqrestore(&np->lock, flags); 6420 6421 return 0; 6422 } 6423 6424 static int niu_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 6425 { 6426 return -EOPNOTSUPP; 6427 } 6428 6429 static void niu_netif_stop(struct niu *np) 6430 { 6431 netif_trans_update(np->dev); /* prevent tx timeout */ 6432 6433 niu_disable_napi(np); 6434 6435 netif_tx_disable(np->dev); 6436 } 6437 6438 static void niu_netif_start(struct niu *np) 6439 { 6440 /* NOTE: unconditional netif_wake_queue is only appropriate 6441 * so long as all callers are assured to have free tx slots 6442 * (such as after niu_init_hw). 6443 */ 6444 netif_tx_wake_all_queues(np->dev); 6445 6446 niu_enable_napi(np); 6447 6448 niu_enable_interrupts(np, 1); 6449 } 6450 6451 static void niu_reset_buffers(struct niu *np) 6452 { 6453 int i, j, k, err; 6454 6455 if (np->rx_rings) { 6456 for (i = 0; i < np->num_rx_rings; i++) { 6457 struct rx_ring_info *rp = &np->rx_rings[i]; 6458 6459 for (j = 0, k = 0; j < MAX_RBR_RING_SIZE; j++) { 6460 struct page *page; 6461 6462 page = rp->rxhash[j]; 6463 while (page) { 6464 struct page *next = niu_next_page(page); 6465 u64 base = page->private; 6466 base = base >> RBR_DESCR_ADDR_SHIFT; 6467 rp->rbr[k++] = cpu_to_le32(base); 6468 page = next; 6469 } 6470 } 6471 for (; k < MAX_RBR_RING_SIZE; k++) { 6472 err = niu_rbr_add_page(np, rp, GFP_ATOMIC, k); 6473 if (unlikely(err)) 6474 break; 6475 } 6476 6477 rp->rbr_index = rp->rbr_table_size - 1; 6478 rp->rcr_index = 0; 6479 rp->rbr_pending = 0; 6480 rp->rbr_refill_pending = 0; 6481 } 6482 } 6483 if (np->tx_rings) { 6484 for (i = 0; i < np->num_tx_rings; i++) { 6485 struct tx_ring_info *rp = &np->tx_rings[i]; 6486 6487 for (j = 0; j < MAX_TX_RING_SIZE; j++) { 6488 if (rp->tx_buffs[j].skb) 6489 (void) release_tx_packet(np, rp, j); 6490 } 6491 6492 rp->pending = MAX_TX_RING_SIZE; 6493 rp->prod = 0; 6494 rp->cons = 0; 6495 rp->wrap_bit = 0; 6496 } 6497 } 6498 } 6499 6500 static void niu_reset_task(struct work_struct *work) 6501 { 6502 struct niu *np = container_of(work, struct niu, reset_task); 6503 unsigned long flags; 6504 int err; 6505 6506 spin_lock_irqsave(&np->lock, flags); 6507 if (!netif_running(np->dev)) { 6508 spin_unlock_irqrestore(&np->lock, flags); 6509 return; 6510 } 6511 6512 spin_unlock_irqrestore(&np->lock, flags); 6513 6514 timer_delete_sync(&np->timer); 6515 6516 niu_netif_stop(np); 6517 6518 spin_lock_irqsave(&np->lock, flags); 6519 6520 niu_stop_hw(np); 6521 6522 spin_unlock_irqrestore(&np->lock, flags); 6523 6524 niu_reset_buffers(np); 6525 6526 netdev_lock(np->dev); 6527 spin_lock_irqsave(&np->lock, flags); 6528 6529 err = niu_init_hw(np); 6530 if (!err) { 6531 np->timer.expires = jiffies + HZ; 6532 add_timer(&np->timer); 6533 niu_netif_start(np); 6534 } 6535 6536 spin_unlock_irqrestore(&np->lock, flags); 6537 netdev_unlock(np->dev); 6538 } 6539 6540 static void niu_tx_timeout(struct net_device *dev, unsigned int txqueue) 6541 { 6542 struct niu *np = netdev_priv(dev); 6543 6544 dev_err(np->device, "%s: Transmit timed out, resetting\n", 6545 dev->name); 6546 6547 schedule_work(&np->reset_task); 6548 } 6549 6550 static void niu_set_txd(struct tx_ring_info *rp, int index, 6551 u64 mapping, u64 len, u64 mark, 6552 u64 n_frags) 6553 { 6554 __le64 *desc = &rp->descr[index]; 6555 6556 *desc = cpu_to_le64(mark | 6557 (n_frags << TX_DESC_NUM_PTR_SHIFT) | 6558 (len << TX_DESC_TR_LEN_SHIFT) | 6559 (mapping & TX_DESC_SAD)); 6560 } 6561 6562 static u64 niu_compute_tx_flags(struct sk_buff *skb, struct ethhdr *ehdr, 6563 u64 pad_bytes, u64 len) 6564 { 6565 u16 eth_proto, eth_proto_inner; 6566 u64 csum_bits, l3off, ihl, ret; 6567 u8 ip_proto; 6568 int ipv6; 6569 6570 eth_proto = be16_to_cpu(ehdr->h_proto); 6571 eth_proto_inner = eth_proto; 6572 if (eth_proto == ETH_P_8021Q) { 6573 struct vlan_ethhdr *vp = (struct vlan_ethhdr *) ehdr; 6574 __be16 val = vp->h_vlan_encapsulated_proto; 6575 6576 eth_proto_inner = be16_to_cpu(val); 6577 } 6578 6579 ipv6 = ihl = 0; 6580 switch (skb->protocol) { 6581 case cpu_to_be16(ETH_P_IP): 6582 ip_proto = ip_hdr(skb)->protocol; 6583 ihl = ip_hdr(skb)->ihl; 6584 break; 6585 case cpu_to_be16(ETH_P_IPV6): 6586 ip_proto = ipv6_hdr(skb)->nexthdr; 6587 ihl = (40 >> 2); 6588 ipv6 = 1; 6589 break; 6590 default: 6591 ip_proto = ihl = 0; 6592 break; 6593 } 6594 6595 csum_bits = TXHDR_CSUM_NONE; 6596 if (skb->ip_summed == CHECKSUM_PARTIAL) { 6597 u64 start, stuff; 6598 6599 csum_bits = (ip_proto == IPPROTO_TCP ? 6600 TXHDR_CSUM_TCP : 6601 (ip_proto == IPPROTO_UDP ? 6602 TXHDR_CSUM_UDP : TXHDR_CSUM_SCTP)); 6603 6604 start = skb_checksum_start_offset(skb) - 6605 (pad_bytes + sizeof(struct tx_pkt_hdr)); 6606 stuff = start + skb->csum_offset; 6607 6608 csum_bits |= (start / 2) << TXHDR_L4START_SHIFT; 6609 csum_bits |= (stuff / 2) << TXHDR_L4STUFF_SHIFT; 6610 } 6611 6612 l3off = skb_network_offset(skb) - 6613 (pad_bytes + sizeof(struct tx_pkt_hdr)); 6614 6615 ret = (((pad_bytes / 2) << TXHDR_PAD_SHIFT) | 6616 (len << TXHDR_LEN_SHIFT) | 6617 ((l3off / 2) << TXHDR_L3START_SHIFT) | 6618 (ihl << TXHDR_IHL_SHIFT) | 6619 ((eth_proto_inner < ETH_P_802_3_MIN) ? TXHDR_LLC : 0) | 6620 ((eth_proto == ETH_P_8021Q) ? TXHDR_VLAN : 0) | 6621 (ipv6 ? TXHDR_IP_VER : 0) | 6622 csum_bits); 6623 6624 return ret; 6625 } 6626 6627 static netdev_tx_t niu_start_xmit(struct sk_buff *skb, 6628 struct net_device *dev) 6629 { 6630 struct niu *np = netdev_priv(dev); 6631 unsigned long align, headroom; 6632 struct netdev_queue *txq; 6633 struct tx_ring_info *rp; 6634 struct tx_pkt_hdr *tp; 6635 unsigned int len, nfg; 6636 struct ethhdr *ehdr; 6637 int prod, i, tlen; 6638 u64 mapping, mrk; 6639 6640 i = skb_get_queue_mapping(skb); 6641 rp = &np->tx_rings[i]; 6642 txq = netdev_get_tx_queue(dev, i); 6643 6644 if (niu_tx_avail(rp) <= (skb_shinfo(skb)->nr_frags + 1)) { 6645 netif_tx_stop_queue(txq); 6646 dev_err(np->device, "%s: BUG! Tx ring full when queue awake!\n", dev->name); 6647 rp->tx_errors++; 6648 return NETDEV_TX_BUSY; 6649 } 6650 6651 if (eth_skb_pad(skb)) 6652 goto out; 6653 6654 len = sizeof(struct tx_pkt_hdr) + 15; 6655 if (skb_headroom(skb) < len) { 6656 struct sk_buff *skb_new; 6657 6658 skb_new = skb_realloc_headroom(skb, len); 6659 if (!skb_new) 6660 goto out_drop; 6661 kfree_skb(skb); 6662 skb = skb_new; 6663 } else 6664 skb_orphan(skb); 6665 6666 align = ((unsigned long) skb->data & (16 - 1)); 6667 headroom = align + sizeof(struct tx_pkt_hdr); 6668 6669 ehdr = (struct ethhdr *) skb->data; 6670 tp = skb_push(skb, headroom); 6671 6672 len = skb->len - sizeof(struct tx_pkt_hdr); 6673 tp->flags = cpu_to_le64(niu_compute_tx_flags(skb, ehdr, align, len)); 6674 tp->resv = 0; 6675 6676 len = skb_headlen(skb); 6677 mapping = np->ops->map_single(np->device, skb->data, 6678 len, DMA_TO_DEVICE); 6679 6680 prod = rp->prod; 6681 6682 rp->tx_buffs[prod].skb = skb; 6683 rp->tx_buffs[prod].mapping = mapping; 6684 6685 mrk = TX_DESC_SOP; 6686 if (++rp->mark_counter == rp->mark_freq) { 6687 rp->mark_counter = 0; 6688 mrk |= TX_DESC_MARK; 6689 rp->mark_pending++; 6690 } 6691 6692 tlen = len; 6693 nfg = skb_shinfo(skb)->nr_frags; 6694 while (tlen > 0) { 6695 tlen -= MAX_TX_DESC_LEN; 6696 nfg++; 6697 } 6698 6699 while (len > 0) { 6700 unsigned int this_len = len; 6701 6702 if (this_len > MAX_TX_DESC_LEN) 6703 this_len = MAX_TX_DESC_LEN; 6704 6705 niu_set_txd(rp, prod, mapping, this_len, mrk, nfg); 6706 mrk = nfg = 0; 6707 6708 prod = NEXT_TX(rp, prod); 6709 mapping += this_len; 6710 len -= this_len; 6711 } 6712 6713 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 6714 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 6715 6716 len = skb_frag_size(frag); 6717 mapping = np->ops->map_page(np->device, skb_frag_page(frag), 6718 skb_frag_off(frag), len, 6719 DMA_TO_DEVICE); 6720 6721 rp->tx_buffs[prod].skb = NULL; 6722 rp->tx_buffs[prod].mapping = mapping; 6723 6724 niu_set_txd(rp, prod, mapping, len, 0, 0); 6725 6726 prod = NEXT_TX(rp, prod); 6727 } 6728 6729 if (prod < rp->prod) 6730 rp->wrap_bit ^= TX_RING_KICK_WRAP; 6731 rp->prod = prod; 6732 6733 nw64(TX_RING_KICK(rp->tx_channel), rp->wrap_bit | (prod << 3)); 6734 6735 if (unlikely(niu_tx_avail(rp) <= (MAX_SKB_FRAGS + 1))) { 6736 netif_tx_stop_queue(txq); 6737 if (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)) 6738 netif_tx_wake_queue(txq); 6739 } 6740 6741 out: 6742 return NETDEV_TX_OK; 6743 6744 out_drop: 6745 rp->tx_errors++; 6746 kfree_skb(skb); 6747 goto out; 6748 } 6749 6750 static int niu_change_mtu(struct net_device *dev, int new_mtu) 6751 { 6752 struct niu *np = netdev_priv(dev); 6753 int err, orig_jumbo, new_jumbo; 6754 6755 orig_jumbo = (dev->mtu > ETH_DATA_LEN); 6756 new_jumbo = (new_mtu > ETH_DATA_LEN); 6757 6758 WRITE_ONCE(dev->mtu, new_mtu); 6759 6760 if (!netif_running(dev) || 6761 (orig_jumbo == new_jumbo)) 6762 return 0; 6763 6764 niu_full_shutdown(np, dev); 6765 6766 niu_free_channels(np); 6767 6768 netdev_lock(dev); 6769 niu_enable_napi(np); 6770 netdev_unlock(dev); 6771 6772 err = niu_alloc_channels(np); 6773 if (err) 6774 return err; 6775 6776 spin_lock_irq(&np->lock); 6777 6778 err = niu_init_hw(np); 6779 if (!err) { 6780 timer_setup(&np->timer, niu_timer, 0); 6781 np->timer.expires = jiffies + HZ; 6782 6783 err = niu_enable_interrupts(np, 1); 6784 if (err) 6785 niu_stop_hw(np); 6786 } 6787 6788 spin_unlock_irq(&np->lock); 6789 6790 if (!err) { 6791 netif_tx_start_all_queues(dev); 6792 if (np->link_config.loopback_mode != LOOPBACK_DISABLED) 6793 netif_carrier_on(dev); 6794 6795 add_timer(&np->timer); 6796 } 6797 6798 return err; 6799 } 6800 6801 static void niu_get_drvinfo(struct net_device *dev, 6802 struct ethtool_drvinfo *info) 6803 { 6804 struct niu *np = netdev_priv(dev); 6805 struct niu_vpd *vpd = &np->vpd; 6806 6807 strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); 6808 strscpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); 6809 snprintf(info->fw_version, sizeof(info->fw_version), "%d.%d", 6810 vpd->fcode_major, vpd->fcode_minor); 6811 if (np->parent->plat_type != PLAT_TYPE_NIU) 6812 strscpy(info->bus_info, pci_name(np->pdev), 6813 sizeof(info->bus_info)); 6814 } 6815 6816 static int niu_get_link_ksettings(struct net_device *dev, 6817 struct ethtool_link_ksettings *cmd) 6818 { 6819 struct niu *np = netdev_priv(dev); 6820 struct niu_link_config *lp; 6821 6822 lp = &np->link_config; 6823 6824 memset(cmd, 0, sizeof(*cmd)); 6825 cmd->base.phy_address = np->phy_addr; 6826 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, 6827 lp->supported); 6828 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, 6829 lp->active_advertising); 6830 cmd->base.autoneg = lp->active_autoneg; 6831 cmd->base.speed = lp->active_speed; 6832 cmd->base.duplex = lp->active_duplex; 6833 cmd->base.port = (np->flags & NIU_FLAGS_FIBER) ? PORT_FIBRE : PORT_TP; 6834 6835 return 0; 6836 } 6837 6838 static int niu_set_link_ksettings(struct net_device *dev, 6839 const struct ethtool_link_ksettings *cmd) 6840 { 6841 struct niu *np = netdev_priv(dev); 6842 struct niu_link_config *lp = &np->link_config; 6843 6844 ethtool_convert_link_mode_to_legacy_u32(&lp->advertising, 6845 cmd->link_modes.advertising); 6846 lp->speed = cmd->base.speed; 6847 lp->duplex = cmd->base.duplex; 6848 lp->autoneg = cmd->base.autoneg; 6849 return niu_init_link(np); 6850 } 6851 6852 static u32 niu_get_msglevel(struct net_device *dev) 6853 { 6854 struct niu *np = netdev_priv(dev); 6855 return np->msg_enable; 6856 } 6857 6858 static void niu_set_msglevel(struct net_device *dev, u32 value) 6859 { 6860 struct niu *np = netdev_priv(dev); 6861 np->msg_enable = value; 6862 } 6863 6864 static int niu_nway_reset(struct net_device *dev) 6865 { 6866 struct niu *np = netdev_priv(dev); 6867 6868 if (np->link_config.autoneg) 6869 return niu_init_link(np); 6870 6871 return 0; 6872 } 6873 6874 static int niu_get_eeprom_len(struct net_device *dev) 6875 { 6876 struct niu *np = netdev_priv(dev); 6877 6878 return np->eeprom_len; 6879 } 6880 6881 static int niu_get_eeprom(struct net_device *dev, 6882 struct ethtool_eeprom *eeprom, u8 *data) 6883 { 6884 struct niu *np = netdev_priv(dev); 6885 u32 offset, len, val; 6886 6887 offset = eeprom->offset; 6888 len = eeprom->len; 6889 6890 if (offset + len < offset) 6891 return -EINVAL; 6892 if (offset >= np->eeprom_len) 6893 return -EINVAL; 6894 if (offset + len > np->eeprom_len) 6895 len = eeprom->len = np->eeprom_len - offset; 6896 6897 if (offset & 3) { 6898 u32 b_offset, b_count; 6899 6900 b_offset = offset & 3; 6901 b_count = 4 - b_offset; 6902 if (b_count > len) 6903 b_count = len; 6904 6905 val = nr64(ESPC_NCR((offset - b_offset) / 4)); 6906 memcpy(data, ((char *)&val) + b_offset, b_count); 6907 data += b_count; 6908 len -= b_count; 6909 offset += b_count; 6910 } 6911 while (len >= 4) { 6912 val = nr64(ESPC_NCR(offset / 4)); 6913 memcpy(data, &val, 4); 6914 data += 4; 6915 len -= 4; 6916 offset += 4; 6917 } 6918 if (len) { 6919 val = nr64(ESPC_NCR(offset / 4)); 6920 memcpy(data, &val, len); 6921 } 6922 return 0; 6923 } 6924 6925 static void niu_ethflow_to_l3proto(int flow_type, u8 *pid) 6926 { 6927 switch (flow_type) { 6928 case TCP_V4_FLOW: 6929 case TCP_V6_FLOW: 6930 *pid = IPPROTO_TCP; 6931 break; 6932 case UDP_V4_FLOW: 6933 case UDP_V6_FLOW: 6934 *pid = IPPROTO_UDP; 6935 break; 6936 case SCTP_V4_FLOW: 6937 case SCTP_V6_FLOW: 6938 *pid = IPPROTO_SCTP; 6939 break; 6940 case AH_V4_FLOW: 6941 case AH_V6_FLOW: 6942 *pid = IPPROTO_AH; 6943 break; 6944 case ESP_V4_FLOW: 6945 case ESP_V6_FLOW: 6946 *pid = IPPROTO_ESP; 6947 break; 6948 default: 6949 *pid = 0; 6950 break; 6951 } 6952 } 6953 6954 static int niu_class_to_ethflow(u64 class, int *flow_type) 6955 { 6956 switch (class) { 6957 case CLASS_CODE_TCP_IPV4: 6958 *flow_type = TCP_V4_FLOW; 6959 break; 6960 case CLASS_CODE_UDP_IPV4: 6961 *flow_type = UDP_V4_FLOW; 6962 break; 6963 case CLASS_CODE_AH_ESP_IPV4: 6964 *flow_type = AH_V4_FLOW; 6965 break; 6966 case CLASS_CODE_SCTP_IPV4: 6967 *flow_type = SCTP_V4_FLOW; 6968 break; 6969 case CLASS_CODE_TCP_IPV6: 6970 *flow_type = TCP_V6_FLOW; 6971 break; 6972 case CLASS_CODE_UDP_IPV6: 6973 *flow_type = UDP_V6_FLOW; 6974 break; 6975 case CLASS_CODE_AH_ESP_IPV6: 6976 *flow_type = AH_V6_FLOW; 6977 break; 6978 case CLASS_CODE_SCTP_IPV6: 6979 *flow_type = SCTP_V6_FLOW; 6980 break; 6981 case CLASS_CODE_USER_PROG1: 6982 case CLASS_CODE_USER_PROG2: 6983 case CLASS_CODE_USER_PROG3: 6984 case CLASS_CODE_USER_PROG4: 6985 *flow_type = IP_USER_FLOW; 6986 break; 6987 default: 6988 return -EINVAL; 6989 } 6990 6991 return 0; 6992 } 6993 6994 static int niu_ethflow_to_class(int flow_type, u64 *class) 6995 { 6996 switch (flow_type) { 6997 case TCP_V4_FLOW: 6998 *class = CLASS_CODE_TCP_IPV4; 6999 break; 7000 case UDP_V4_FLOW: 7001 *class = CLASS_CODE_UDP_IPV4; 7002 break; 7003 case AH_ESP_V4_FLOW: 7004 case AH_V4_FLOW: 7005 case ESP_V4_FLOW: 7006 *class = CLASS_CODE_AH_ESP_IPV4; 7007 break; 7008 case SCTP_V4_FLOW: 7009 *class = CLASS_CODE_SCTP_IPV4; 7010 break; 7011 case TCP_V6_FLOW: 7012 *class = CLASS_CODE_TCP_IPV6; 7013 break; 7014 case UDP_V6_FLOW: 7015 *class = CLASS_CODE_UDP_IPV6; 7016 break; 7017 case AH_ESP_V6_FLOW: 7018 case AH_V6_FLOW: 7019 case ESP_V6_FLOW: 7020 *class = CLASS_CODE_AH_ESP_IPV6; 7021 break; 7022 case SCTP_V6_FLOW: 7023 *class = CLASS_CODE_SCTP_IPV6; 7024 break; 7025 default: 7026 return 0; 7027 } 7028 7029 return 1; 7030 } 7031 7032 static u64 niu_flowkey_to_ethflow(u64 flow_key) 7033 { 7034 u64 ethflow = 0; 7035 7036 if (flow_key & FLOW_KEY_L2DA) 7037 ethflow |= RXH_L2DA; 7038 if (flow_key & FLOW_KEY_VLAN) 7039 ethflow |= RXH_VLAN; 7040 if (flow_key & FLOW_KEY_IPSA) 7041 ethflow |= RXH_IP_SRC; 7042 if (flow_key & FLOW_KEY_IPDA) 7043 ethflow |= RXH_IP_DST; 7044 if (flow_key & FLOW_KEY_PROTO) 7045 ethflow |= RXH_L3_PROTO; 7046 if (flow_key & (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT)) 7047 ethflow |= RXH_L4_B_0_1; 7048 if (flow_key & (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT)) 7049 ethflow |= RXH_L4_B_2_3; 7050 7051 return ethflow; 7052 7053 } 7054 7055 static int niu_ethflow_to_flowkey(u64 ethflow, u64 *flow_key) 7056 { 7057 u64 key = 0; 7058 7059 if (ethflow & RXH_L2DA) 7060 key |= FLOW_KEY_L2DA; 7061 if (ethflow & RXH_VLAN) 7062 key |= FLOW_KEY_VLAN; 7063 if (ethflow & RXH_IP_SRC) 7064 key |= FLOW_KEY_IPSA; 7065 if (ethflow & RXH_IP_DST) 7066 key |= FLOW_KEY_IPDA; 7067 if (ethflow & RXH_L3_PROTO) 7068 key |= FLOW_KEY_PROTO; 7069 if (ethflow & RXH_L4_B_0_1) 7070 key |= (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT); 7071 if (ethflow & RXH_L4_B_2_3) 7072 key |= (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT); 7073 7074 *flow_key = key; 7075 7076 return 1; 7077 7078 } 7079 7080 static int niu_get_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc) 7081 { 7082 u64 class; 7083 7084 nfc->data = 0; 7085 7086 if (!niu_ethflow_to_class(nfc->flow_type, &class)) 7087 return -EINVAL; 7088 7089 if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] & 7090 TCAM_KEY_DISC) 7091 nfc->data = RXH_DISCARD; 7092 else 7093 nfc->data = niu_flowkey_to_ethflow(np->parent->flow_key[class - 7094 CLASS_CODE_USER_PROG1]); 7095 return 0; 7096 } 7097 7098 static void niu_get_ip4fs_from_tcam_key(struct niu_tcam_entry *tp, 7099 struct ethtool_rx_flow_spec *fsp) 7100 { 7101 u32 tmp; 7102 u16 prt; 7103 7104 tmp = (tp->key[3] & TCAM_V4KEY3_SADDR) >> TCAM_V4KEY3_SADDR_SHIFT; 7105 fsp->h_u.tcp_ip4_spec.ip4src = cpu_to_be32(tmp); 7106 7107 tmp = (tp->key[3] & TCAM_V4KEY3_DADDR) >> TCAM_V4KEY3_DADDR_SHIFT; 7108 fsp->h_u.tcp_ip4_spec.ip4dst = cpu_to_be32(tmp); 7109 7110 tmp = (tp->key_mask[3] & TCAM_V4KEY3_SADDR) >> TCAM_V4KEY3_SADDR_SHIFT; 7111 fsp->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(tmp); 7112 7113 tmp = (tp->key_mask[3] & TCAM_V4KEY3_DADDR) >> TCAM_V4KEY3_DADDR_SHIFT; 7114 fsp->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(tmp); 7115 7116 fsp->h_u.tcp_ip4_spec.tos = (tp->key[2] & TCAM_V4KEY2_TOS) >> 7117 TCAM_V4KEY2_TOS_SHIFT; 7118 fsp->m_u.tcp_ip4_spec.tos = (tp->key_mask[2] & TCAM_V4KEY2_TOS) >> 7119 TCAM_V4KEY2_TOS_SHIFT; 7120 7121 switch (fsp->flow_type) { 7122 case TCP_V4_FLOW: 7123 case UDP_V4_FLOW: 7124 case SCTP_V4_FLOW: 7125 prt = ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >> 7126 TCAM_V4KEY2_PORT_SPI_SHIFT) >> 16; 7127 fsp->h_u.tcp_ip4_spec.psrc = cpu_to_be16(prt); 7128 7129 prt = ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >> 7130 TCAM_V4KEY2_PORT_SPI_SHIFT) & 0xffff; 7131 fsp->h_u.tcp_ip4_spec.pdst = cpu_to_be16(prt); 7132 7133 prt = ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >> 7134 TCAM_V4KEY2_PORT_SPI_SHIFT) >> 16; 7135 fsp->m_u.tcp_ip4_spec.psrc = cpu_to_be16(prt); 7136 7137 prt = ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >> 7138 TCAM_V4KEY2_PORT_SPI_SHIFT) & 0xffff; 7139 fsp->m_u.tcp_ip4_spec.pdst = cpu_to_be16(prt); 7140 break; 7141 case AH_V4_FLOW: 7142 case ESP_V4_FLOW: 7143 tmp = (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >> 7144 TCAM_V4KEY2_PORT_SPI_SHIFT; 7145 fsp->h_u.ah_ip4_spec.spi = cpu_to_be32(tmp); 7146 7147 tmp = (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >> 7148 TCAM_V4KEY2_PORT_SPI_SHIFT; 7149 fsp->m_u.ah_ip4_spec.spi = cpu_to_be32(tmp); 7150 break; 7151 case IP_USER_FLOW: 7152 tmp = (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >> 7153 TCAM_V4KEY2_PORT_SPI_SHIFT; 7154 fsp->h_u.usr_ip4_spec.l4_4_bytes = cpu_to_be32(tmp); 7155 7156 tmp = (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >> 7157 TCAM_V4KEY2_PORT_SPI_SHIFT; 7158 fsp->m_u.usr_ip4_spec.l4_4_bytes = cpu_to_be32(tmp); 7159 7160 fsp->h_u.usr_ip4_spec.proto = 7161 (tp->key[2] & TCAM_V4KEY2_PROTO) >> 7162 TCAM_V4KEY2_PROTO_SHIFT; 7163 fsp->m_u.usr_ip4_spec.proto = 7164 (tp->key_mask[2] & TCAM_V4KEY2_PROTO) >> 7165 TCAM_V4KEY2_PROTO_SHIFT; 7166 7167 fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; 7168 break; 7169 default: 7170 break; 7171 } 7172 } 7173 7174 static int niu_get_ethtool_tcam_entry(struct niu *np, 7175 struct ethtool_rxnfc *nfc) 7176 { 7177 struct niu_parent *parent = np->parent; 7178 struct niu_tcam_entry *tp; 7179 struct ethtool_rx_flow_spec *fsp = &nfc->fs; 7180 u16 idx; 7181 u64 class; 7182 int ret = 0; 7183 7184 idx = tcam_get_index(np, (u16)nfc->fs.location); 7185 7186 tp = &parent->tcam[idx]; 7187 if (!tp->valid) { 7188 netdev_info(np->dev, "niu%d: entry [%d] invalid for idx[%d]\n", 7189 parent->index, (u16)nfc->fs.location, idx); 7190 return -EINVAL; 7191 } 7192 7193 /* fill the flow spec entry */ 7194 class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >> 7195 TCAM_V4KEY0_CLASS_CODE_SHIFT; 7196 ret = niu_class_to_ethflow(class, &fsp->flow_type); 7197 if (ret < 0) { 7198 netdev_info(np->dev, "niu%d: niu_class_to_ethflow failed\n", 7199 parent->index); 7200 goto out; 7201 } 7202 7203 if (fsp->flow_type == AH_V4_FLOW || fsp->flow_type == AH_V6_FLOW) { 7204 u32 proto = (tp->key[2] & TCAM_V4KEY2_PROTO) >> 7205 TCAM_V4KEY2_PROTO_SHIFT; 7206 if (proto == IPPROTO_ESP) { 7207 if (fsp->flow_type == AH_V4_FLOW) 7208 fsp->flow_type = ESP_V4_FLOW; 7209 else 7210 fsp->flow_type = ESP_V6_FLOW; 7211 } 7212 } 7213 7214 switch (fsp->flow_type) { 7215 case TCP_V4_FLOW: 7216 case UDP_V4_FLOW: 7217 case SCTP_V4_FLOW: 7218 case AH_V4_FLOW: 7219 case ESP_V4_FLOW: 7220 niu_get_ip4fs_from_tcam_key(tp, fsp); 7221 break; 7222 case TCP_V6_FLOW: 7223 case UDP_V6_FLOW: 7224 case SCTP_V6_FLOW: 7225 case AH_V6_FLOW: 7226 case ESP_V6_FLOW: 7227 /* Not yet implemented */ 7228 ret = -EINVAL; 7229 break; 7230 case IP_USER_FLOW: 7231 niu_get_ip4fs_from_tcam_key(tp, fsp); 7232 break; 7233 default: 7234 ret = -EINVAL; 7235 break; 7236 } 7237 7238 if (ret < 0) 7239 goto out; 7240 7241 if (tp->assoc_data & TCAM_ASSOCDATA_DISC) 7242 fsp->ring_cookie = RX_CLS_FLOW_DISC; 7243 else 7244 fsp->ring_cookie = (tp->assoc_data & TCAM_ASSOCDATA_OFFSET) >> 7245 TCAM_ASSOCDATA_OFFSET_SHIFT; 7246 7247 /* put the tcam size here */ 7248 nfc->data = tcam_get_size(np); 7249 out: 7250 return ret; 7251 } 7252 7253 static int niu_get_ethtool_tcam_all(struct niu *np, 7254 struct ethtool_rxnfc *nfc, 7255 u32 *rule_locs) 7256 { 7257 struct niu_parent *parent = np->parent; 7258 struct niu_tcam_entry *tp; 7259 int i, idx, cnt; 7260 unsigned long flags; 7261 int ret = 0; 7262 7263 /* put the tcam size here */ 7264 nfc->data = tcam_get_size(np); 7265 7266 niu_lock_parent(np, flags); 7267 for (cnt = 0, i = 0; i < nfc->data; i++) { 7268 idx = tcam_get_index(np, i); 7269 tp = &parent->tcam[idx]; 7270 if (!tp->valid) 7271 continue; 7272 if (cnt == nfc->rule_cnt) { 7273 ret = -EMSGSIZE; 7274 break; 7275 } 7276 rule_locs[cnt] = i; 7277 cnt++; 7278 } 7279 niu_unlock_parent(np, flags); 7280 7281 nfc->rule_cnt = cnt; 7282 7283 return ret; 7284 } 7285 7286 static int niu_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd, 7287 u32 *rule_locs) 7288 { 7289 struct niu *np = netdev_priv(dev); 7290 int ret = 0; 7291 7292 switch (cmd->cmd) { 7293 case ETHTOOL_GRXFH: 7294 ret = niu_get_hash_opts(np, cmd); 7295 break; 7296 case ETHTOOL_GRXRINGS: 7297 cmd->data = np->num_rx_rings; 7298 break; 7299 case ETHTOOL_GRXCLSRLCNT: 7300 cmd->rule_cnt = tcam_get_valid_entry_cnt(np); 7301 break; 7302 case ETHTOOL_GRXCLSRULE: 7303 ret = niu_get_ethtool_tcam_entry(np, cmd); 7304 break; 7305 case ETHTOOL_GRXCLSRLALL: 7306 ret = niu_get_ethtool_tcam_all(np, cmd, rule_locs); 7307 break; 7308 default: 7309 ret = -EINVAL; 7310 break; 7311 } 7312 7313 return ret; 7314 } 7315 7316 static int niu_set_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc) 7317 { 7318 u64 class; 7319 u64 flow_key = 0; 7320 unsigned long flags; 7321 7322 if (!niu_ethflow_to_class(nfc->flow_type, &class)) 7323 return -EINVAL; 7324 7325 if (class < CLASS_CODE_USER_PROG1 || 7326 class > CLASS_CODE_SCTP_IPV6) 7327 return -EINVAL; 7328 7329 if (nfc->data & RXH_DISCARD) { 7330 niu_lock_parent(np, flags); 7331 flow_key = np->parent->tcam_key[class - 7332 CLASS_CODE_USER_PROG1]; 7333 flow_key |= TCAM_KEY_DISC; 7334 nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1), flow_key); 7335 np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] = flow_key; 7336 niu_unlock_parent(np, flags); 7337 return 0; 7338 } else { 7339 /* Discard was set before, but is not set now */ 7340 if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] & 7341 TCAM_KEY_DISC) { 7342 niu_lock_parent(np, flags); 7343 flow_key = np->parent->tcam_key[class - 7344 CLASS_CODE_USER_PROG1]; 7345 flow_key &= ~TCAM_KEY_DISC; 7346 nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1), 7347 flow_key); 7348 np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] = 7349 flow_key; 7350 niu_unlock_parent(np, flags); 7351 } 7352 } 7353 7354 if (!niu_ethflow_to_flowkey(nfc->data, &flow_key)) 7355 return -EINVAL; 7356 7357 niu_lock_parent(np, flags); 7358 nw64(FLOW_KEY(class - CLASS_CODE_USER_PROG1), flow_key); 7359 np->parent->flow_key[class - CLASS_CODE_USER_PROG1] = flow_key; 7360 niu_unlock_parent(np, flags); 7361 7362 return 0; 7363 } 7364 7365 static void niu_get_tcamkey_from_ip4fs(struct ethtool_rx_flow_spec *fsp, 7366 struct niu_tcam_entry *tp, 7367 int l2_rdc_tab, u64 class) 7368 { 7369 u8 pid = 0; 7370 u32 sip, dip, sipm, dipm, spi, spim; 7371 u16 sport, dport, spm, dpm; 7372 7373 sip = be32_to_cpu(fsp->h_u.tcp_ip4_spec.ip4src); 7374 sipm = be32_to_cpu(fsp->m_u.tcp_ip4_spec.ip4src); 7375 dip = be32_to_cpu(fsp->h_u.tcp_ip4_spec.ip4dst); 7376 dipm = be32_to_cpu(fsp->m_u.tcp_ip4_spec.ip4dst); 7377 7378 tp->key[0] = class << TCAM_V4KEY0_CLASS_CODE_SHIFT; 7379 tp->key_mask[0] = TCAM_V4KEY0_CLASS_CODE; 7380 tp->key[1] = (u64)l2_rdc_tab << TCAM_V4KEY1_L2RDCNUM_SHIFT; 7381 tp->key_mask[1] = TCAM_V4KEY1_L2RDCNUM; 7382 7383 tp->key[3] = (u64)sip << TCAM_V4KEY3_SADDR_SHIFT; 7384 tp->key[3] |= dip; 7385 7386 tp->key_mask[3] = (u64)sipm << TCAM_V4KEY3_SADDR_SHIFT; 7387 tp->key_mask[3] |= dipm; 7388 7389 tp->key[2] |= ((u64)fsp->h_u.tcp_ip4_spec.tos << 7390 TCAM_V4KEY2_TOS_SHIFT); 7391 tp->key_mask[2] |= ((u64)fsp->m_u.tcp_ip4_spec.tos << 7392 TCAM_V4KEY2_TOS_SHIFT); 7393 switch (fsp->flow_type) { 7394 case TCP_V4_FLOW: 7395 case UDP_V4_FLOW: 7396 case SCTP_V4_FLOW: 7397 sport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.psrc); 7398 spm = be16_to_cpu(fsp->m_u.tcp_ip4_spec.psrc); 7399 dport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.pdst); 7400 dpm = be16_to_cpu(fsp->m_u.tcp_ip4_spec.pdst); 7401 7402 tp->key[2] |= (((u64)sport << 16) | dport); 7403 tp->key_mask[2] |= (((u64)spm << 16) | dpm); 7404 niu_ethflow_to_l3proto(fsp->flow_type, &pid); 7405 break; 7406 case AH_V4_FLOW: 7407 case ESP_V4_FLOW: 7408 spi = be32_to_cpu(fsp->h_u.ah_ip4_spec.spi); 7409 spim = be32_to_cpu(fsp->m_u.ah_ip4_spec.spi); 7410 7411 tp->key[2] |= spi; 7412 tp->key_mask[2] |= spim; 7413 niu_ethflow_to_l3proto(fsp->flow_type, &pid); 7414 break; 7415 case IP_USER_FLOW: 7416 spi = be32_to_cpu(fsp->h_u.usr_ip4_spec.l4_4_bytes); 7417 spim = be32_to_cpu(fsp->m_u.usr_ip4_spec.l4_4_bytes); 7418 7419 tp->key[2] |= spi; 7420 tp->key_mask[2] |= spim; 7421 pid = fsp->h_u.usr_ip4_spec.proto; 7422 break; 7423 default: 7424 break; 7425 } 7426 7427 tp->key[2] |= ((u64)pid << TCAM_V4KEY2_PROTO_SHIFT); 7428 if (pid) { 7429 tp->key_mask[2] |= TCAM_V4KEY2_PROTO; 7430 } 7431 } 7432 7433 static int niu_add_ethtool_tcam_entry(struct niu *np, 7434 struct ethtool_rxnfc *nfc) 7435 { 7436 struct niu_parent *parent = np->parent; 7437 struct niu_tcam_entry *tp; 7438 struct ethtool_rx_flow_spec *fsp = &nfc->fs; 7439 struct niu_rdc_tables *rdc_table = &parent->rdc_group_cfg[np->port]; 7440 int l2_rdc_table = rdc_table->first_table_num; 7441 u16 idx; 7442 u64 class; 7443 unsigned long flags; 7444 int err, ret; 7445 7446 ret = 0; 7447 7448 idx = nfc->fs.location; 7449 if (idx >= tcam_get_size(np)) 7450 return -EINVAL; 7451 7452 if (fsp->flow_type == IP_USER_FLOW) { 7453 int i; 7454 int add_usr_cls = 0; 7455 struct ethtool_usrip4_spec *uspec = &fsp->h_u.usr_ip4_spec; 7456 struct ethtool_usrip4_spec *umask = &fsp->m_u.usr_ip4_spec; 7457 7458 if (uspec->ip_ver != ETH_RX_NFC_IP4) 7459 return -EINVAL; 7460 7461 niu_lock_parent(np, flags); 7462 7463 for (i = 0; i < NIU_L3_PROG_CLS; i++) { 7464 if (parent->l3_cls[i]) { 7465 if (uspec->proto == parent->l3_cls_pid[i]) { 7466 class = parent->l3_cls[i]; 7467 parent->l3_cls_refcnt[i]++; 7468 add_usr_cls = 1; 7469 break; 7470 } 7471 } else { 7472 /* Program new user IP class */ 7473 switch (i) { 7474 case 0: 7475 class = CLASS_CODE_USER_PROG1; 7476 break; 7477 case 1: 7478 class = CLASS_CODE_USER_PROG2; 7479 break; 7480 case 2: 7481 class = CLASS_CODE_USER_PROG3; 7482 break; 7483 case 3: 7484 class = CLASS_CODE_USER_PROG4; 7485 break; 7486 default: 7487 class = CLASS_CODE_UNRECOG; 7488 break; 7489 } 7490 ret = tcam_user_ip_class_set(np, class, 0, 7491 uspec->proto, 7492 uspec->tos, 7493 umask->tos); 7494 if (ret) 7495 goto out; 7496 7497 ret = tcam_user_ip_class_enable(np, class, 1); 7498 if (ret) 7499 goto out; 7500 parent->l3_cls[i] = class; 7501 parent->l3_cls_pid[i] = uspec->proto; 7502 parent->l3_cls_refcnt[i]++; 7503 add_usr_cls = 1; 7504 break; 7505 } 7506 } 7507 if (!add_usr_cls) { 7508 netdev_info(np->dev, "niu%d: %s(): Could not find/insert class for pid %d\n", 7509 parent->index, __func__, uspec->proto); 7510 ret = -EINVAL; 7511 goto out; 7512 } 7513 niu_unlock_parent(np, flags); 7514 } else { 7515 if (!niu_ethflow_to_class(fsp->flow_type, &class)) { 7516 return -EINVAL; 7517 } 7518 } 7519 7520 niu_lock_parent(np, flags); 7521 7522 idx = tcam_get_index(np, idx); 7523 tp = &parent->tcam[idx]; 7524 7525 memset(tp, 0, sizeof(*tp)); 7526 7527 /* fill in the tcam key and mask */ 7528 switch (fsp->flow_type) { 7529 case TCP_V4_FLOW: 7530 case UDP_V4_FLOW: 7531 case SCTP_V4_FLOW: 7532 case AH_V4_FLOW: 7533 case ESP_V4_FLOW: 7534 niu_get_tcamkey_from_ip4fs(fsp, tp, l2_rdc_table, class); 7535 break; 7536 case TCP_V6_FLOW: 7537 case UDP_V6_FLOW: 7538 case SCTP_V6_FLOW: 7539 case AH_V6_FLOW: 7540 case ESP_V6_FLOW: 7541 /* Not yet implemented */ 7542 netdev_info(np->dev, "niu%d: In %s(): flow %d for IPv6 not implemented\n", 7543 parent->index, __func__, fsp->flow_type); 7544 ret = -EINVAL; 7545 goto out; 7546 case IP_USER_FLOW: 7547 niu_get_tcamkey_from_ip4fs(fsp, tp, l2_rdc_table, class); 7548 break; 7549 default: 7550 netdev_info(np->dev, "niu%d: In %s(): Unknown flow type %d\n", 7551 parent->index, __func__, fsp->flow_type); 7552 ret = -EINVAL; 7553 goto out; 7554 } 7555 7556 /* fill in the assoc data */ 7557 if (fsp->ring_cookie == RX_CLS_FLOW_DISC) { 7558 tp->assoc_data = TCAM_ASSOCDATA_DISC; 7559 } else { 7560 if (fsp->ring_cookie >= np->num_rx_rings) { 7561 netdev_info(np->dev, "niu%d: In %s(): Invalid RX ring %lld\n", 7562 parent->index, __func__, 7563 (long long)fsp->ring_cookie); 7564 ret = -EINVAL; 7565 goto out; 7566 } 7567 tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET | 7568 (fsp->ring_cookie << 7569 TCAM_ASSOCDATA_OFFSET_SHIFT)); 7570 } 7571 7572 err = tcam_write(np, idx, tp->key, tp->key_mask); 7573 if (err) { 7574 ret = -EINVAL; 7575 goto out; 7576 } 7577 err = tcam_assoc_write(np, idx, tp->assoc_data); 7578 if (err) { 7579 ret = -EINVAL; 7580 goto out; 7581 } 7582 7583 /* validate the entry */ 7584 tp->valid = 1; 7585 np->clas.tcam_valid_entries++; 7586 out: 7587 niu_unlock_parent(np, flags); 7588 7589 return ret; 7590 } 7591 7592 static int niu_del_ethtool_tcam_entry(struct niu *np, u32 loc) 7593 { 7594 struct niu_parent *parent = np->parent; 7595 struct niu_tcam_entry *tp; 7596 u16 idx; 7597 unsigned long flags; 7598 u64 class; 7599 int ret = 0; 7600 7601 if (loc >= tcam_get_size(np)) 7602 return -EINVAL; 7603 7604 niu_lock_parent(np, flags); 7605 7606 idx = tcam_get_index(np, loc); 7607 tp = &parent->tcam[idx]; 7608 7609 /* if the entry is of a user defined class, then update*/ 7610 class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >> 7611 TCAM_V4KEY0_CLASS_CODE_SHIFT; 7612 7613 if (class >= CLASS_CODE_USER_PROG1 && class <= CLASS_CODE_USER_PROG4) { 7614 int i; 7615 for (i = 0; i < NIU_L3_PROG_CLS; i++) { 7616 if (parent->l3_cls[i] == class) { 7617 parent->l3_cls_refcnt[i]--; 7618 if (!parent->l3_cls_refcnt[i]) { 7619 /* disable class */ 7620 ret = tcam_user_ip_class_enable(np, 7621 class, 7622 0); 7623 if (ret) 7624 goto out; 7625 parent->l3_cls[i] = 0; 7626 parent->l3_cls_pid[i] = 0; 7627 } 7628 break; 7629 } 7630 } 7631 if (i == NIU_L3_PROG_CLS) { 7632 netdev_info(np->dev, "niu%d: In %s(): Usr class 0x%llx not found\n", 7633 parent->index, __func__, 7634 (unsigned long long)class); 7635 ret = -EINVAL; 7636 goto out; 7637 } 7638 } 7639 7640 ret = tcam_flush(np, idx); 7641 if (ret) 7642 goto out; 7643 7644 /* invalidate the entry */ 7645 tp->valid = 0; 7646 np->clas.tcam_valid_entries--; 7647 out: 7648 niu_unlock_parent(np, flags); 7649 7650 return ret; 7651 } 7652 7653 static int niu_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd) 7654 { 7655 struct niu *np = netdev_priv(dev); 7656 int ret = 0; 7657 7658 switch (cmd->cmd) { 7659 case ETHTOOL_SRXFH: 7660 ret = niu_set_hash_opts(np, cmd); 7661 break; 7662 case ETHTOOL_SRXCLSRLINS: 7663 ret = niu_add_ethtool_tcam_entry(np, cmd); 7664 break; 7665 case ETHTOOL_SRXCLSRLDEL: 7666 ret = niu_del_ethtool_tcam_entry(np, cmd->fs.location); 7667 break; 7668 default: 7669 ret = -EINVAL; 7670 break; 7671 } 7672 7673 return ret; 7674 } 7675 7676 static const struct { 7677 const char string[ETH_GSTRING_LEN]; 7678 } niu_xmac_stat_keys[] = { 7679 { "tx_frames" }, 7680 { "tx_bytes" }, 7681 { "tx_fifo_errors" }, 7682 { "tx_overflow_errors" }, 7683 { "tx_max_pkt_size_errors" }, 7684 { "tx_underflow_errors" }, 7685 { "rx_local_faults" }, 7686 { "rx_remote_faults" }, 7687 { "rx_link_faults" }, 7688 { "rx_align_errors" }, 7689 { "rx_frags" }, 7690 { "rx_mcasts" }, 7691 { "rx_bcasts" }, 7692 { "rx_hist_cnt1" }, 7693 { "rx_hist_cnt2" }, 7694 { "rx_hist_cnt3" }, 7695 { "rx_hist_cnt4" }, 7696 { "rx_hist_cnt5" }, 7697 { "rx_hist_cnt6" }, 7698 { "rx_hist_cnt7" }, 7699 { "rx_octets" }, 7700 { "rx_code_violations" }, 7701 { "rx_len_errors" }, 7702 { "rx_crc_errors" }, 7703 { "rx_underflows" }, 7704 { "rx_overflows" }, 7705 { "pause_off_state" }, 7706 { "pause_on_state" }, 7707 { "pause_received" }, 7708 }; 7709 7710 #define NUM_XMAC_STAT_KEYS ARRAY_SIZE(niu_xmac_stat_keys) 7711 7712 static const struct { 7713 const char string[ETH_GSTRING_LEN]; 7714 } niu_bmac_stat_keys[] = { 7715 { "tx_underflow_errors" }, 7716 { "tx_max_pkt_size_errors" }, 7717 { "tx_bytes" }, 7718 { "tx_frames" }, 7719 { "rx_overflows" }, 7720 { "rx_frames" }, 7721 { "rx_align_errors" }, 7722 { "rx_crc_errors" }, 7723 { "rx_len_errors" }, 7724 { "pause_off_state" }, 7725 { "pause_on_state" }, 7726 { "pause_received" }, 7727 }; 7728 7729 #define NUM_BMAC_STAT_KEYS ARRAY_SIZE(niu_bmac_stat_keys) 7730 7731 static const struct { 7732 const char string[ETH_GSTRING_LEN]; 7733 } niu_rxchan_stat_keys[] = { 7734 { "rx_channel" }, 7735 { "rx_packets" }, 7736 { "rx_bytes" }, 7737 { "rx_dropped" }, 7738 { "rx_errors" }, 7739 }; 7740 7741 #define NUM_RXCHAN_STAT_KEYS ARRAY_SIZE(niu_rxchan_stat_keys) 7742 7743 static const struct { 7744 const char string[ETH_GSTRING_LEN]; 7745 } niu_txchan_stat_keys[] = { 7746 { "tx_channel" }, 7747 { "tx_packets" }, 7748 { "tx_bytes" }, 7749 { "tx_errors" }, 7750 }; 7751 7752 #define NUM_TXCHAN_STAT_KEYS ARRAY_SIZE(niu_txchan_stat_keys) 7753 7754 static void niu_get_strings(struct net_device *dev, u32 stringset, u8 *data) 7755 { 7756 struct niu *np = netdev_priv(dev); 7757 int i; 7758 7759 if (stringset != ETH_SS_STATS) 7760 return; 7761 7762 if (np->flags & NIU_FLAGS_XMAC) { 7763 memcpy(data, niu_xmac_stat_keys, 7764 sizeof(niu_xmac_stat_keys)); 7765 data += sizeof(niu_xmac_stat_keys); 7766 } else { 7767 memcpy(data, niu_bmac_stat_keys, 7768 sizeof(niu_bmac_stat_keys)); 7769 data += sizeof(niu_bmac_stat_keys); 7770 } 7771 for (i = 0; i < np->num_rx_rings; i++) { 7772 memcpy(data, niu_rxchan_stat_keys, 7773 sizeof(niu_rxchan_stat_keys)); 7774 data += sizeof(niu_rxchan_stat_keys); 7775 } 7776 for (i = 0; i < np->num_tx_rings; i++) { 7777 memcpy(data, niu_txchan_stat_keys, 7778 sizeof(niu_txchan_stat_keys)); 7779 data += sizeof(niu_txchan_stat_keys); 7780 } 7781 } 7782 7783 static int niu_get_sset_count(struct net_device *dev, int stringset) 7784 { 7785 struct niu *np = netdev_priv(dev); 7786 7787 if (stringset != ETH_SS_STATS) 7788 return -EINVAL; 7789 7790 return (np->flags & NIU_FLAGS_XMAC ? 7791 NUM_XMAC_STAT_KEYS : 7792 NUM_BMAC_STAT_KEYS) + 7793 (np->num_rx_rings * NUM_RXCHAN_STAT_KEYS) + 7794 (np->num_tx_rings * NUM_TXCHAN_STAT_KEYS); 7795 } 7796 7797 static void niu_get_ethtool_stats(struct net_device *dev, 7798 struct ethtool_stats *stats, u64 *data) 7799 { 7800 struct niu *np = netdev_priv(dev); 7801 int i; 7802 7803 niu_sync_mac_stats(np); 7804 if (np->flags & NIU_FLAGS_XMAC) { 7805 memcpy(data, &np->mac_stats.xmac, 7806 sizeof(struct niu_xmac_stats)); 7807 data += (sizeof(struct niu_xmac_stats) / sizeof(u64)); 7808 } else { 7809 memcpy(data, &np->mac_stats.bmac, 7810 sizeof(struct niu_bmac_stats)); 7811 data += (sizeof(struct niu_bmac_stats) / sizeof(u64)); 7812 } 7813 for (i = 0; i < np->num_rx_rings; i++) { 7814 struct rx_ring_info *rp = &np->rx_rings[i]; 7815 7816 niu_sync_rx_discard_stats(np, rp, 0); 7817 7818 data[0] = rp->rx_channel; 7819 data[1] = rp->rx_packets; 7820 data[2] = rp->rx_bytes; 7821 data[3] = rp->rx_dropped; 7822 data[4] = rp->rx_errors; 7823 data += 5; 7824 } 7825 for (i = 0; i < np->num_tx_rings; i++) { 7826 struct tx_ring_info *rp = &np->tx_rings[i]; 7827 7828 data[0] = rp->tx_channel; 7829 data[1] = rp->tx_packets; 7830 data[2] = rp->tx_bytes; 7831 data[3] = rp->tx_errors; 7832 data += 4; 7833 } 7834 } 7835 7836 static u64 niu_led_state_save(struct niu *np) 7837 { 7838 if (np->flags & NIU_FLAGS_XMAC) 7839 return nr64_mac(XMAC_CONFIG); 7840 else 7841 return nr64_mac(BMAC_XIF_CONFIG); 7842 } 7843 7844 static void niu_led_state_restore(struct niu *np, u64 val) 7845 { 7846 if (np->flags & NIU_FLAGS_XMAC) 7847 nw64_mac(XMAC_CONFIG, val); 7848 else 7849 nw64_mac(BMAC_XIF_CONFIG, val); 7850 } 7851 7852 static void niu_force_led(struct niu *np, int on) 7853 { 7854 u64 val, reg, bit; 7855 7856 if (np->flags & NIU_FLAGS_XMAC) { 7857 reg = XMAC_CONFIG; 7858 bit = XMAC_CONFIG_FORCE_LED_ON; 7859 } else { 7860 reg = BMAC_XIF_CONFIG; 7861 bit = BMAC_XIF_CONFIG_LINK_LED; 7862 } 7863 7864 val = nr64_mac(reg); 7865 if (on) 7866 val |= bit; 7867 else 7868 val &= ~bit; 7869 nw64_mac(reg, val); 7870 } 7871 7872 static int niu_set_phys_id(struct net_device *dev, 7873 enum ethtool_phys_id_state state) 7874 7875 { 7876 struct niu *np = netdev_priv(dev); 7877 7878 if (!netif_running(dev)) 7879 return -EAGAIN; 7880 7881 switch (state) { 7882 case ETHTOOL_ID_ACTIVE: 7883 np->orig_led_state = niu_led_state_save(np); 7884 return 1; /* cycle on/off once per second */ 7885 7886 case ETHTOOL_ID_ON: 7887 niu_force_led(np, 1); 7888 break; 7889 7890 case ETHTOOL_ID_OFF: 7891 niu_force_led(np, 0); 7892 break; 7893 7894 case ETHTOOL_ID_INACTIVE: 7895 niu_led_state_restore(np, np->orig_led_state); 7896 } 7897 7898 return 0; 7899 } 7900 7901 static const struct ethtool_ops niu_ethtool_ops = { 7902 .get_drvinfo = niu_get_drvinfo, 7903 .get_link = ethtool_op_get_link, 7904 .get_msglevel = niu_get_msglevel, 7905 .set_msglevel = niu_set_msglevel, 7906 .nway_reset = niu_nway_reset, 7907 .get_eeprom_len = niu_get_eeprom_len, 7908 .get_eeprom = niu_get_eeprom, 7909 .get_strings = niu_get_strings, 7910 .get_sset_count = niu_get_sset_count, 7911 .get_ethtool_stats = niu_get_ethtool_stats, 7912 .set_phys_id = niu_set_phys_id, 7913 .get_rxnfc = niu_get_nfc, 7914 .set_rxnfc = niu_set_nfc, 7915 .get_link_ksettings = niu_get_link_ksettings, 7916 .set_link_ksettings = niu_set_link_ksettings, 7917 }; 7918 7919 static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent, 7920 int ldg, int ldn) 7921 { 7922 if (ldg < NIU_LDG_MIN || ldg > NIU_LDG_MAX) 7923 return -EINVAL; 7924 if (ldn < 0 || ldn > LDN_MAX) 7925 return -EINVAL; 7926 7927 parent->ldg_map[ldn] = ldg; 7928 7929 if (np->parent->plat_type == PLAT_TYPE_NIU) { 7930 /* On N2 NIU, the ldn-->ldg assignments are setup and fixed by 7931 * the firmware, and we're not supposed to change them. 7932 * Validate the mapping, because if it's wrong we probably 7933 * won't get any interrupts and that's painful to debug. 7934 */ 7935 if (nr64(LDG_NUM(ldn)) != ldg) { 7936 dev_err(np->device, "Port %u, mismatched LDG assignment for ldn %d, should be %d is %llu\n", 7937 np->port, ldn, ldg, 7938 (unsigned long long) nr64(LDG_NUM(ldn))); 7939 return -EINVAL; 7940 } 7941 } else 7942 nw64(LDG_NUM(ldn), ldg); 7943 7944 return 0; 7945 } 7946 7947 static int niu_set_ldg_timer_res(struct niu *np, int res) 7948 { 7949 if (res < 0 || res > LDG_TIMER_RES_VAL) 7950 return -EINVAL; 7951 7952 7953 nw64(LDG_TIMER_RES, res); 7954 7955 return 0; 7956 } 7957 7958 static int niu_set_ldg_sid(struct niu *np, int ldg, int func, int vector) 7959 { 7960 if ((ldg < NIU_LDG_MIN || ldg > NIU_LDG_MAX) || 7961 (func < 0 || func > 3) || 7962 (vector < 0 || vector > 0x1f)) 7963 return -EINVAL; 7964 7965 nw64(SID(ldg), (func << SID_FUNC_SHIFT) | vector); 7966 7967 return 0; 7968 } 7969 7970 static int niu_pci_eeprom_read(struct niu *np, u32 addr) 7971 { 7972 u64 frame, frame_base = (ESPC_PIO_STAT_READ_START | 7973 (addr << ESPC_PIO_STAT_ADDR_SHIFT)); 7974 int limit; 7975 7976 if (addr > (ESPC_PIO_STAT_ADDR >> ESPC_PIO_STAT_ADDR_SHIFT)) 7977 return -EINVAL; 7978 7979 frame = frame_base; 7980 nw64(ESPC_PIO_STAT, frame); 7981 limit = 64; 7982 do { 7983 udelay(5); 7984 frame = nr64(ESPC_PIO_STAT); 7985 if (frame & ESPC_PIO_STAT_READ_END) 7986 break; 7987 } while (limit--); 7988 if (!(frame & ESPC_PIO_STAT_READ_END)) { 7989 dev_err(np->device, "EEPROM read timeout frame[%llx]\n", 7990 (unsigned long long) frame); 7991 return -ENODEV; 7992 } 7993 7994 frame = frame_base; 7995 nw64(ESPC_PIO_STAT, frame); 7996 limit = 64; 7997 do { 7998 udelay(5); 7999 frame = nr64(ESPC_PIO_STAT); 8000 if (frame & ESPC_PIO_STAT_READ_END) 8001 break; 8002 } while (limit--); 8003 if (!(frame & ESPC_PIO_STAT_READ_END)) { 8004 dev_err(np->device, "EEPROM read timeout frame[%llx]\n", 8005 (unsigned long long) frame); 8006 return -ENODEV; 8007 } 8008 8009 frame = nr64(ESPC_PIO_STAT); 8010 return (frame & ESPC_PIO_STAT_DATA) >> ESPC_PIO_STAT_DATA_SHIFT; 8011 } 8012 8013 static int niu_pci_eeprom_read16(struct niu *np, u32 off) 8014 { 8015 int err = niu_pci_eeprom_read(np, off); 8016 u16 val; 8017 8018 if (err < 0) 8019 return err; 8020 val = (err << 8); 8021 err = niu_pci_eeprom_read(np, off + 1); 8022 if (err < 0) 8023 return err; 8024 val |= (err & 0xff); 8025 8026 return val; 8027 } 8028 8029 static int niu_pci_eeprom_read16_swp(struct niu *np, u32 off) 8030 { 8031 int err = niu_pci_eeprom_read(np, off); 8032 u16 val; 8033 8034 if (err < 0) 8035 return err; 8036 8037 val = (err & 0xff); 8038 err = niu_pci_eeprom_read(np, off + 1); 8039 if (err < 0) 8040 return err; 8041 8042 val |= (err & 0xff) << 8; 8043 8044 return val; 8045 } 8046 8047 static int niu_pci_vpd_get_propname(struct niu *np, u32 off, char *namebuf, 8048 int namebuf_len) 8049 { 8050 int i; 8051 8052 for (i = 0; i < namebuf_len; i++) { 8053 int err = niu_pci_eeprom_read(np, off + i); 8054 if (err < 0) 8055 return err; 8056 *namebuf++ = err; 8057 if (!err) 8058 break; 8059 } 8060 if (i >= namebuf_len) 8061 return -EINVAL; 8062 8063 return i + 1; 8064 } 8065 8066 static void niu_vpd_parse_version(struct niu *np) 8067 { 8068 struct niu_vpd *vpd = &np->vpd; 8069 int len = strlen(vpd->version) + 1; 8070 const char *s = vpd->version; 8071 int i; 8072 8073 for (i = 0; i < len - 5; i++) { 8074 if (!strncmp(s + i, "FCode ", 6)) 8075 break; 8076 } 8077 if (i >= len - 5) 8078 return; 8079 8080 s += i + 5; 8081 sscanf(s, "%d.%d", &vpd->fcode_major, &vpd->fcode_minor); 8082 8083 netif_printk(np, probe, KERN_DEBUG, np->dev, 8084 "VPD_SCAN: FCODE major(%d) minor(%d)\n", 8085 vpd->fcode_major, vpd->fcode_minor); 8086 if (vpd->fcode_major > NIU_VPD_MIN_MAJOR || 8087 (vpd->fcode_major == NIU_VPD_MIN_MAJOR && 8088 vpd->fcode_minor >= NIU_VPD_MIN_MINOR)) 8089 np->flags |= NIU_FLAGS_VPD_VALID; 8090 } 8091 8092 /* ESPC_PIO_EN_ENABLE must be set */ 8093 static int niu_pci_vpd_scan_props(struct niu *np, u32 start, u32 end) 8094 { 8095 unsigned int found_mask = 0; 8096 #define FOUND_MASK_MODEL 0x00000001 8097 #define FOUND_MASK_BMODEL 0x00000002 8098 #define FOUND_MASK_VERS 0x00000004 8099 #define FOUND_MASK_MAC 0x00000008 8100 #define FOUND_MASK_NMAC 0x00000010 8101 #define FOUND_MASK_PHY 0x00000020 8102 #define FOUND_MASK_ALL 0x0000003f 8103 8104 netif_printk(np, probe, KERN_DEBUG, np->dev, 8105 "VPD_SCAN: start[%x] end[%x]\n", start, end); 8106 while (start < end) { 8107 int len, err, prop_len; 8108 char namebuf[64]; 8109 u8 *prop_buf; 8110 int max_len; 8111 8112 if (found_mask == FOUND_MASK_ALL) { 8113 niu_vpd_parse_version(np); 8114 return 1; 8115 } 8116 8117 err = niu_pci_eeprom_read(np, start + 2); 8118 if (err < 0) 8119 return err; 8120 len = err; 8121 start += 3; 8122 8123 prop_len = niu_pci_eeprom_read(np, start + 4); 8124 if (prop_len < 0) 8125 return prop_len; 8126 err = niu_pci_vpd_get_propname(np, start + 5, namebuf, 64); 8127 if (err < 0) 8128 return err; 8129 8130 prop_buf = NULL; 8131 max_len = 0; 8132 if (!strcmp(namebuf, "model")) { 8133 prop_buf = np->vpd.model; 8134 max_len = NIU_VPD_MODEL_MAX; 8135 found_mask |= FOUND_MASK_MODEL; 8136 } else if (!strcmp(namebuf, "board-model")) { 8137 prop_buf = np->vpd.board_model; 8138 max_len = NIU_VPD_BD_MODEL_MAX; 8139 found_mask |= FOUND_MASK_BMODEL; 8140 } else if (!strcmp(namebuf, "version")) { 8141 prop_buf = np->vpd.version; 8142 max_len = NIU_VPD_VERSION_MAX; 8143 found_mask |= FOUND_MASK_VERS; 8144 } else if (!strcmp(namebuf, "local-mac-address")) { 8145 prop_buf = np->vpd.local_mac; 8146 max_len = ETH_ALEN; 8147 found_mask |= FOUND_MASK_MAC; 8148 } else if (!strcmp(namebuf, "num-mac-addresses")) { 8149 prop_buf = &np->vpd.mac_num; 8150 max_len = 1; 8151 found_mask |= FOUND_MASK_NMAC; 8152 } else if (!strcmp(namebuf, "phy-type")) { 8153 prop_buf = np->vpd.phy_type; 8154 max_len = NIU_VPD_PHY_TYPE_MAX; 8155 found_mask |= FOUND_MASK_PHY; 8156 } 8157 8158 if (max_len && prop_len > max_len) { 8159 dev_err(np->device, "Property '%s' length (%d) is too long\n", namebuf, prop_len); 8160 return -EINVAL; 8161 } 8162 8163 if (prop_buf) { 8164 u32 off = start + 5 + err; 8165 int i; 8166 8167 netif_printk(np, probe, KERN_DEBUG, np->dev, 8168 "VPD_SCAN: Reading in property [%s] len[%d]\n", 8169 namebuf, prop_len); 8170 for (i = 0; i < prop_len; i++) { 8171 err = niu_pci_eeprom_read(np, off + i); 8172 if (err < 0) 8173 return err; 8174 *prop_buf++ = err; 8175 } 8176 } 8177 8178 start += len; 8179 } 8180 8181 return 0; 8182 } 8183 8184 /* ESPC_PIO_EN_ENABLE must be set */ 8185 static int niu_pci_vpd_fetch(struct niu *np, u32 start) 8186 { 8187 u32 offset; 8188 int err; 8189 8190 err = niu_pci_eeprom_read16_swp(np, start + 1); 8191 if (err < 0) 8192 return err; 8193 8194 offset = err + 3; 8195 8196 while (start + offset < ESPC_EEPROM_SIZE) { 8197 u32 here = start + offset; 8198 u32 end; 8199 8200 err = niu_pci_eeprom_read(np, here); 8201 if (err < 0) 8202 return err; 8203 if (err != 0x90) 8204 return -EINVAL; 8205 8206 err = niu_pci_eeprom_read16_swp(np, here + 1); 8207 if (err < 0) 8208 return err; 8209 8210 here = start + offset + 3; 8211 end = start + offset + err; 8212 8213 offset += err; 8214 8215 err = niu_pci_vpd_scan_props(np, here, end); 8216 if (err < 0) 8217 return err; 8218 /* ret == 1 is not an error */ 8219 if (err == 1) 8220 return 0; 8221 } 8222 return 0; 8223 } 8224 8225 /* ESPC_PIO_EN_ENABLE must be set */ 8226 static u32 niu_pci_vpd_offset(struct niu *np) 8227 { 8228 u32 start = 0, end = ESPC_EEPROM_SIZE, ret; 8229 int err; 8230 8231 while (start < end) { 8232 ret = start; 8233 8234 /* ROM header signature? */ 8235 err = niu_pci_eeprom_read16(np, start + 0); 8236 if (err != 0x55aa) 8237 return 0; 8238 8239 /* Apply offset to PCI data structure. */ 8240 err = niu_pci_eeprom_read16(np, start + 23); 8241 if (err < 0) 8242 return 0; 8243 start += err; 8244 8245 /* Check for "PCIR" signature. */ 8246 err = niu_pci_eeprom_read16(np, start + 0); 8247 if (err != 0x5043) 8248 return 0; 8249 err = niu_pci_eeprom_read16(np, start + 2); 8250 if (err != 0x4952) 8251 return 0; 8252 8253 /* Check for OBP image type. */ 8254 err = niu_pci_eeprom_read(np, start + 20); 8255 if (err < 0) 8256 return 0; 8257 if (err != 0x01) { 8258 err = niu_pci_eeprom_read(np, ret + 2); 8259 if (err < 0) 8260 return 0; 8261 8262 start = ret + (err * 512); 8263 continue; 8264 } 8265 8266 err = niu_pci_eeprom_read16_swp(np, start + 8); 8267 if (err < 0) 8268 return err; 8269 ret += err; 8270 8271 err = niu_pci_eeprom_read(np, ret + 0); 8272 if (err != 0x82) 8273 return 0; 8274 8275 return ret; 8276 } 8277 8278 return 0; 8279 } 8280 8281 static int niu_phy_type_prop_decode(struct niu *np, const char *phy_prop) 8282 { 8283 if (!strcmp(phy_prop, "mif")) { 8284 /* 1G copper, MII */ 8285 np->flags &= ~(NIU_FLAGS_FIBER | 8286 NIU_FLAGS_10G); 8287 np->mac_xcvr = MAC_XCVR_MII; 8288 } else if (!strcmp(phy_prop, "xgf")) { 8289 /* 10G fiber, XPCS */ 8290 np->flags |= (NIU_FLAGS_10G | 8291 NIU_FLAGS_FIBER); 8292 np->mac_xcvr = MAC_XCVR_XPCS; 8293 } else if (!strcmp(phy_prop, "pcs")) { 8294 /* 1G fiber, PCS */ 8295 np->flags &= ~NIU_FLAGS_10G; 8296 np->flags |= NIU_FLAGS_FIBER; 8297 np->mac_xcvr = MAC_XCVR_PCS; 8298 } else if (!strcmp(phy_prop, "xgc")) { 8299 /* 10G copper, XPCS */ 8300 np->flags |= NIU_FLAGS_10G; 8301 np->flags &= ~NIU_FLAGS_FIBER; 8302 np->mac_xcvr = MAC_XCVR_XPCS; 8303 } else if (!strcmp(phy_prop, "xgsd") || !strcmp(phy_prop, "gsd")) { 8304 /* 10G Serdes or 1G Serdes, default to 10G */ 8305 np->flags |= NIU_FLAGS_10G; 8306 np->flags &= ~NIU_FLAGS_FIBER; 8307 np->flags |= NIU_FLAGS_XCVR_SERDES; 8308 np->mac_xcvr = MAC_XCVR_XPCS; 8309 } else { 8310 return -EINVAL; 8311 } 8312 return 0; 8313 } 8314 8315 static int niu_pci_vpd_get_nports(struct niu *np) 8316 { 8317 int ports = 0; 8318 8319 if ((!strcmp(np->vpd.model, NIU_QGC_LP_MDL_STR)) || 8320 (!strcmp(np->vpd.model, NIU_QGC_PEM_MDL_STR)) || 8321 (!strcmp(np->vpd.model, NIU_MARAMBA_MDL_STR)) || 8322 (!strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) || 8323 (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR))) { 8324 ports = 4; 8325 } else if ((!strcmp(np->vpd.model, NIU_2XGF_LP_MDL_STR)) || 8326 (!strcmp(np->vpd.model, NIU_2XGF_PEM_MDL_STR)) || 8327 (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) || 8328 (!strcmp(np->vpd.model, NIU_2XGF_MRVL_MDL_STR))) { 8329 ports = 2; 8330 } 8331 8332 return ports; 8333 } 8334 8335 static void niu_pci_vpd_validate(struct niu *np) 8336 { 8337 struct net_device *dev = np->dev; 8338 struct niu_vpd *vpd = &np->vpd; 8339 u8 addr[ETH_ALEN]; 8340 u8 val8; 8341 8342 if (!is_valid_ether_addr(&vpd->local_mac[0])) { 8343 dev_err(np->device, "VPD MAC invalid, falling back to SPROM\n"); 8344 8345 np->flags &= ~NIU_FLAGS_VPD_VALID; 8346 return; 8347 } 8348 8349 if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) || 8350 !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) { 8351 np->flags |= NIU_FLAGS_10G; 8352 np->flags &= ~NIU_FLAGS_FIBER; 8353 np->flags |= NIU_FLAGS_XCVR_SERDES; 8354 np->mac_xcvr = MAC_XCVR_PCS; 8355 if (np->port > 1) { 8356 np->flags |= NIU_FLAGS_FIBER; 8357 np->flags &= ~NIU_FLAGS_10G; 8358 } 8359 if (np->flags & NIU_FLAGS_10G) 8360 np->mac_xcvr = MAC_XCVR_XPCS; 8361 } else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) { 8362 np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER | 8363 NIU_FLAGS_HOTPLUG_PHY); 8364 } else if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) { 8365 dev_err(np->device, "Illegal phy string [%s]\n", 8366 np->vpd.phy_type); 8367 dev_err(np->device, "Falling back to SPROM\n"); 8368 np->flags &= ~NIU_FLAGS_VPD_VALID; 8369 return; 8370 } 8371 8372 ether_addr_copy(addr, vpd->local_mac); 8373 8374 val8 = addr[5]; 8375 addr[5] += np->port; 8376 if (addr[5] < val8) 8377 addr[4]++; 8378 8379 eth_hw_addr_set(dev, addr); 8380 } 8381 8382 static int niu_pci_probe_sprom(struct niu *np) 8383 { 8384 struct net_device *dev = np->dev; 8385 u8 addr[ETH_ALEN]; 8386 int len, i; 8387 u64 val, sum; 8388 u8 val8; 8389 8390 val = (nr64(ESPC_VER_IMGSZ) & ESPC_VER_IMGSZ_IMGSZ); 8391 val >>= ESPC_VER_IMGSZ_IMGSZ_SHIFT; 8392 len = val / 4; 8393 8394 np->eeprom_len = len; 8395 8396 netif_printk(np, probe, KERN_DEBUG, np->dev, 8397 "SPROM: Image size %llu\n", (unsigned long long)val); 8398 8399 sum = 0; 8400 for (i = 0; i < len; i++) { 8401 val = nr64(ESPC_NCR(i)); 8402 sum += (val >> 0) & 0xff; 8403 sum += (val >> 8) & 0xff; 8404 sum += (val >> 16) & 0xff; 8405 sum += (val >> 24) & 0xff; 8406 } 8407 netif_printk(np, probe, KERN_DEBUG, np->dev, 8408 "SPROM: Checksum %x\n", (int)(sum & 0xff)); 8409 if ((sum & 0xff) != 0xab) { 8410 dev_err(np->device, "Bad SPROM checksum (%x, should be 0xab)\n", (int)(sum & 0xff)); 8411 return -EINVAL; 8412 } 8413 8414 val = nr64(ESPC_PHY_TYPE); 8415 switch (np->port) { 8416 case 0: 8417 val8 = (val & ESPC_PHY_TYPE_PORT0) >> 8418 ESPC_PHY_TYPE_PORT0_SHIFT; 8419 break; 8420 case 1: 8421 val8 = (val & ESPC_PHY_TYPE_PORT1) >> 8422 ESPC_PHY_TYPE_PORT1_SHIFT; 8423 break; 8424 case 2: 8425 val8 = (val & ESPC_PHY_TYPE_PORT2) >> 8426 ESPC_PHY_TYPE_PORT2_SHIFT; 8427 break; 8428 case 3: 8429 val8 = (val & ESPC_PHY_TYPE_PORT3) >> 8430 ESPC_PHY_TYPE_PORT3_SHIFT; 8431 break; 8432 default: 8433 dev_err(np->device, "Bogus port number %u\n", 8434 np->port); 8435 return -EINVAL; 8436 } 8437 netif_printk(np, probe, KERN_DEBUG, np->dev, 8438 "SPROM: PHY type %x\n", val8); 8439 8440 switch (val8) { 8441 case ESPC_PHY_TYPE_1G_COPPER: 8442 /* 1G copper, MII */ 8443 np->flags &= ~(NIU_FLAGS_FIBER | 8444 NIU_FLAGS_10G); 8445 np->mac_xcvr = MAC_XCVR_MII; 8446 break; 8447 8448 case ESPC_PHY_TYPE_1G_FIBER: 8449 /* 1G fiber, PCS */ 8450 np->flags &= ~NIU_FLAGS_10G; 8451 np->flags |= NIU_FLAGS_FIBER; 8452 np->mac_xcvr = MAC_XCVR_PCS; 8453 break; 8454 8455 case ESPC_PHY_TYPE_10G_COPPER: 8456 /* 10G copper, XPCS */ 8457 np->flags |= NIU_FLAGS_10G; 8458 np->flags &= ~NIU_FLAGS_FIBER; 8459 np->mac_xcvr = MAC_XCVR_XPCS; 8460 break; 8461 8462 case ESPC_PHY_TYPE_10G_FIBER: 8463 /* 10G fiber, XPCS */ 8464 np->flags |= (NIU_FLAGS_10G | 8465 NIU_FLAGS_FIBER); 8466 np->mac_xcvr = MAC_XCVR_XPCS; 8467 break; 8468 8469 default: 8470 dev_err(np->device, "Bogus SPROM phy type %u\n", val8); 8471 return -EINVAL; 8472 } 8473 8474 val = nr64(ESPC_MAC_ADDR0); 8475 netif_printk(np, probe, KERN_DEBUG, np->dev, 8476 "SPROM: MAC_ADDR0[%08llx]\n", (unsigned long long)val); 8477 addr[0] = (val >> 0) & 0xff; 8478 addr[1] = (val >> 8) & 0xff; 8479 addr[2] = (val >> 16) & 0xff; 8480 addr[3] = (val >> 24) & 0xff; 8481 8482 val = nr64(ESPC_MAC_ADDR1); 8483 netif_printk(np, probe, KERN_DEBUG, np->dev, 8484 "SPROM: MAC_ADDR1[%08llx]\n", (unsigned long long)val); 8485 addr[4] = (val >> 0) & 0xff; 8486 addr[5] = (val >> 8) & 0xff; 8487 8488 if (!is_valid_ether_addr(addr)) { 8489 dev_err(np->device, "SPROM MAC address invalid [ %pM ]\n", 8490 addr); 8491 return -EINVAL; 8492 } 8493 8494 val8 = addr[5]; 8495 addr[5] += np->port; 8496 if (addr[5] < val8) 8497 addr[4]++; 8498 8499 eth_hw_addr_set(dev, addr); 8500 8501 val = nr64(ESPC_MOD_STR_LEN); 8502 netif_printk(np, probe, KERN_DEBUG, np->dev, 8503 "SPROM: MOD_STR_LEN[%llu]\n", (unsigned long long)val); 8504 if (val >= 8 * 4) 8505 return -EINVAL; 8506 8507 for (i = 0; i < val; i += 4) { 8508 u64 tmp = nr64(ESPC_NCR(5 + (i / 4))); 8509 8510 np->vpd.model[i + 3] = (tmp >> 0) & 0xff; 8511 np->vpd.model[i + 2] = (tmp >> 8) & 0xff; 8512 np->vpd.model[i + 1] = (tmp >> 16) & 0xff; 8513 np->vpd.model[i + 0] = (tmp >> 24) & 0xff; 8514 } 8515 np->vpd.model[val] = '\0'; 8516 8517 val = nr64(ESPC_BD_MOD_STR_LEN); 8518 netif_printk(np, probe, KERN_DEBUG, np->dev, 8519 "SPROM: BD_MOD_STR_LEN[%llu]\n", (unsigned long long)val); 8520 if (val >= 4 * 4) 8521 return -EINVAL; 8522 8523 for (i = 0; i < val; i += 4) { 8524 u64 tmp = nr64(ESPC_NCR(14 + (i / 4))); 8525 8526 np->vpd.board_model[i + 3] = (tmp >> 0) & 0xff; 8527 np->vpd.board_model[i + 2] = (tmp >> 8) & 0xff; 8528 np->vpd.board_model[i + 1] = (tmp >> 16) & 0xff; 8529 np->vpd.board_model[i + 0] = (tmp >> 24) & 0xff; 8530 } 8531 np->vpd.board_model[val] = '\0'; 8532 8533 np->vpd.mac_num = 8534 nr64(ESPC_NUM_PORTS_MACS) & ESPC_NUM_PORTS_MACS_VAL; 8535 netif_printk(np, probe, KERN_DEBUG, np->dev, 8536 "SPROM: NUM_PORTS_MACS[%d]\n", np->vpd.mac_num); 8537 8538 return 0; 8539 } 8540 8541 static int niu_get_and_validate_port(struct niu *np) 8542 { 8543 struct niu_parent *parent = np->parent; 8544 8545 if (np->port <= 1) 8546 np->flags |= NIU_FLAGS_XMAC; 8547 8548 if (!parent->num_ports) { 8549 if (parent->plat_type == PLAT_TYPE_NIU) { 8550 parent->num_ports = 2; 8551 } else { 8552 parent->num_ports = niu_pci_vpd_get_nports(np); 8553 if (!parent->num_ports) { 8554 /* Fall back to SPROM as last resort. 8555 * This will fail on most cards. 8556 */ 8557 parent->num_ports = nr64(ESPC_NUM_PORTS_MACS) & 8558 ESPC_NUM_PORTS_MACS_VAL; 8559 8560 /* All of the current probing methods fail on 8561 * Maramba on-board parts. 8562 */ 8563 if (!parent->num_ports) 8564 parent->num_ports = 4; 8565 } 8566 } 8567 } 8568 8569 if (np->port >= parent->num_ports) 8570 return -ENODEV; 8571 8572 return 0; 8573 } 8574 8575 static int phy_record(struct niu_parent *parent, struct phy_probe_info *p, 8576 int dev_id_1, int dev_id_2, u8 phy_port, int type) 8577 { 8578 u32 id = (dev_id_1 << 16) | dev_id_2; 8579 u8 idx; 8580 8581 if (dev_id_1 < 0 || dev_id_2 < 0) 8582 return 0; 8583 if (type == PHY_TYPE_PMA_PMD || type == PHY_TYPE_PCS) { 8584 /* Because of the NIU_PHY_ID_MASK being applied, the 8704 8585 * test covers the 8706 as well. 8586 */ 8587 if (((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8704) && 8588 ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_MRVL88X2011)) 8589 return 0; 8590 } else { 8591 if ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM5464R) 8592 return 0; 8593 } 8594 8595 pr_info("niu%d: Found PHY %08x type %s at phy_port %u\n", 8596 parent->index, id, 8597 type == PHY_TYPE_PMA_PMD ? "PMA/PMD" : 8598 type == PHY_TYPE_PCS ? "PCS" : "MII", 8599 phy_port); 8600 8601 if (p->cur[type] >= NIU_MAX_PORTS) { 8602 pr_err("Too many PHY ports\n"); 8603 return -EINVAL; 8604 } 8605 idx = p->cur[type]; 8606 p->phy_id[type][idx] = id; 8607 p->phy_port[type][idx] = phy_port; 8608 p->cur[type] = idx + 1; 8609 return 0; 8610 } 8611 8612 static int port_has_10g(struct phy_probe_info *p, int port) 8613 { 8614 int i; 8615 8616 for (i = 0; i < p->cur[PHY_TYPE_PMA_PMD]; i++) { 8617 if (p->phy_port[PHY_TYPE_PMA_PMD][i] == port) 8618 return 1; 8619 } 8620 for (i = 0; i < p->cur[PHY_TYPE_PCS]; i++) { 8621 if (p->phy_port[PHY_TYPE_PCS][i] == port) 8622 return 1; 8623 } 8624 8625 return 0; 8626 } 8627 8628 static int count_10g_ports(struct phy_probe_info *p, int *lowest) 8629 { 8630 int port, cnt; 8631 8632 cnt = 0; 8633 *lowest = 32; 8634 for (port = 8; port < 32; port++) { 8635 if (port_has_10g(p, port)) { 8636 if (!cnt) 8637 *lowest = port; 8638 cnt++; 8639 } 8640 } 8641 8642 return cnt; 8643 } 8644 8645 static int count_1g_ports(struct phy_probe_info *p, int *lowest) 8646 { 8647 *lowest = 32; 8648 if (p->cur[PHY_TYPE_MII]) 8649 *lowest = p->phy_port[PHY_TYPE_MII][0]; 8650 8651 return p->cur[PHY_TYPE_MII]; 8652 } 8653 8654 static void niu_n2_divide_channels(struct niu_parent *parent) 8655 { 8656 int num_ports = parent->num_ports; 8657 int i; 8658 8659 for (i = 0; i < num_ports; i++) { 8660 parent->rxchan_per_port[i] = (16 / num_ports); 8661 parent->txchan_per_port[i] = (16 / num_ports); 8662 8663 pr_info("niu%d: Port %u [%u RX chans] [%u TX chans]\n", 8664 parent->index, i, 8665 parent->rxchan_per_port[i], 8666 parent->txchan_per_port[i]); 8667 } 8668 } 8669 8670 static void niu_divide_channels(struct niu_parent *parent, 8671 int num_10g, int num_1g) 8672 { 8673 int num_ports = parent->num_ports; 8674 int rx_chans_per_10g, rx_chans_per_1g; 8675 int tx_chans_per_10g, tx_chans_per_1g; 8676 int i, tot_rx, tot_tx; 8677 8678 if (!num_10g || !num_1g) { 8679 rx_chans_per_10g = rx_chans_per_1g = 8680 (NIU_NUM_RXCHAN / num_ports); 8681 tx_chans_per_10g = tx_chans_per_1g = 8682 (NIU_NUM_TXCHAN / num_ports); 8683 } else { 8684 rx_chans_per_1g = NIU_NUM_RXCHAN / 8; 8685 rx_chans_per_10g = (NIU_NUM_RXCHAN - 8686 (rx_chans_per_1g * num_1g)) / 8687 num_10g; 8688 8689 tx_chans_per_1g = NIU_NUM_TXCHAN / 6; 8690 tx_chans_per_10g = (NIU_NUM_TXCHAN - 8691 (tx_chans_per_1g * num_1g)) / 8692 num_10g; 8693 } 8694 8695 tot_rx = tot_tx = 0; 8696 for (i = 0; i < num_ports; i++) { 8697 int type = phy_decode(parent->port_phy, i); 8698 8699 if (type == PORT_TYPE_10G) { 8700 parent->rxchan_per_port[i] = rx_chans_per_10g; 8701 parent->txchan_per_port[i] = tx_chans_per_10g; 8702 } else { 8703 parent->rxchan_per_port[i] = rx_chans_per_1g; 8704 parent->txchan_per_port[i] = tx_chans_per_1g; 8705 } 8706 pr_info("niu%d: Port %u [%u RX chans] [%u TX chans]\n", 8707 parent->index, i, 8708 parent->rxchan_per_port[i], 8709 parent->txchan_per_port[i]); 8710 tot_rx += parent->rxchan_per_port[i]; 8711 tot_tx += parent->txchan_per_port[i]; 8712 } 8713 8714 if (tot_rx > NIU_NUM_RXCHAN) { 8715 pr_err("niu%d: Too many RX channels (%d), resetting to one per port\n", 8716 parent->index, tot_rx); 8717 for (i = 0; i < num_ports; i++) 8718 parent->rxchan_per_port[i] = 1; 8719 } 8720 if (tot_tx > NIU_NUM_TXCHAN) { 8721 pr_err("niu%d: Too many TX channels (%d), resetting to one per port\n", 8722 parent->index, tot_tx); 8723 for (i = 0; i < num_ports; i++) 8724 parent->txchan_per_port[i] = 1; 8725 } 8726 if (tot_rx < NIU_NUM_RXCHAN || tot_tx < NIU_NUM_TXCHAN) { 8727 pr_warn("niu%d: Driver bug, wasted channels, RX[%d] TX[%d]\n", 8728 parent->index, tot_rx, tot_tx); 8729 } 8730 } 8731 8732 static void niu_divide_rdc_groups(struct niu_parent *parent, 8733 int num_10g, int num_1g) 8734 { 8735 int i, num_ports = parent->num_ports; 8736 int rdc_group, rdc_groups_per_port; 8737 int rdc_channel_base; 8738 8739 rdc_group = 0; 8740 rdc_groups_per_port = NIU_NUM_RDC_TABLES / num_ports; 8741 8742 rdc_channel_base = 0; 8743 8744 for (i = 0; i < num_ports; i++) { 8745 struct niu_rdc_tables *tp = &parent->rdc_group_cfg[i]; 8746 int grp, num_channels = parent->rxchan_per_port[i]; 8747 int this_channel_offset; 8748 8749 tp->first_table_num = rdc_group; 8750 tp->num_tables = rdc_groups_per_port; 8751 this_channel_offset = 0; 8752 for (grp = 0; grp < tp->num_tables; grp++) { 8753 struct rdc_table *rt = &tp->tables[grp]; 8754 int slot; 8755 8756 pr_info("niu%d: Port %d RDC tbl(%d) [ ", 8757 parent->index, i, tp->first_table_num + grp); 8758 for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++) { 8759 rt->rxdma_channel[slot] = 8760 rdc_channel_base + this_channel_offset; 8761 8762 pr_cont("%d ", rt->rxdma_channel[slot]); 8763 8764 if (++this_channel_offset == num_channels) 8765 this_channel_offset = 0; 8766 } 8767 pr_cont("]\n"); 8768 } 8769 8770 parent->rdc_default[i] = rdc_channel_base; 8771 8772 rdc_channel_base += num_channels; 8773 rdc_group += rdc_groups_per_port; 8774 } 8775 } 8776 8777 static int fill_phy_probe_info(struct niu *np, struct niu_parent *parent, 8778 struct phy_probe_info *info) 8779 { 8780 unsigned long flags; 8781 int port, err; 8782 8783 memset(info, 0, sizeof(*info)); 8784 8785 /* Port 0 to 7 are reserved for onboard Serdes, probe the rest. */ 8786 niu_lock_parent(np, flags); 8787 err = 0; 8788 for (port = 8; port < 32; port++) { 8789 int dev_id_1, dev_id_2; 8790 8791 dev_id_1 = mdio_read(np, port, 8792 NIU_PMA_PMD_DEV_ADDR, MII_PHYSID1); 8793 dev_id_2 = mdio_read(np, port, 8794 NIU_PMA_PMD_DEV_ADDR, MII_PHYSID2); 8795 err = phy_record(parent, info, dev_id_1, dev_id_2, port, 8796 PHY_TYPE_PMA_PMD); 8797 if (err) 8798 break; 8799 dev_id_1 = mdio_read(np, port, 8800 NIU_PCS_DEV_ADDR, MII_PHYSID1); 8801 dev_id_2 = mdio_read(np, port, 8802 NIU_PCS_DEV_ADDR, MII_PHYSID2); 8803 err = phy_record(parent, info, dev_id_1, dev_id_2, port, 8804 PHY_TYPE_PCS); 8805 if (err) 8806 break; 8807 dev_id_1 = mii_read(np, port, MII_PHYSID1); 8808 dev_id_2 = mii_read(np, port, MII_PHYSID2); 8809 err = phy_record(parent, info, dev_id_1, dev_id_2, port, 8810 PHY_TYPE_MII); 8811 if (err) 8812 break; 8813 } 8814 niu_unlock_parent(np, flags); 8815 8816 return err; 8817 } 8818 8819 static int walk_phys(struct niu *np, struct niu_parent *parent) 8820 { 8821 struct phy_probe_info *info = &parent->phy_probe_info; 8822 int lowest_10g, lowest_1g; 8823 int num_10g, num_1g; 8824 u32 val; 8825 int err; 8826 8827 num_10g = num_1g = 0; 8828 8829 if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) || 8830 !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) { 8831 num_10g = 0; 8832 num_1g = 2; 8833 parent->plat_type = PLAT_TYPE_ATCA_CP3220; 8834 parent->num_ports = 4; 8835 val = (phy_encode(PORT_TYPE_1G, 0) | 8836 phy_encode(PORT_TYPE_1G, 1) | 8837 phy_encode(PORT_TYPE_1G, 2) | 8838 phy_encode(PORT_TYPE_1G, 3)); 8839 } else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) { 8840 num_10g = 2; 8841 num_1g = 0; 8842 parent->num_ports = 2; 8843 val = (phy_encode(PORT_TYPE_10G, 0) | 8844 phy_encode(PORT_TYPE_10G, 1)); 8845 } else if ((np->flags & NIU_FLAGS_XCVR_SERDES) && 8846 (parent->plat_type == PLAT_TYPE_NIU)) { 8847 /* this is the Monza case */ 8848 if (np->flags & NIU_FLAGS_10G) { 8849 val = (phy_encode(PORT_TYPE_10G, 0) | 8850 phy_encode(PORT_TYPE_10G, 1)); 8851 } else { 8852 val = (phy_encode(PORT_TYPE_1G, 0) | 8853 phy_encode(PORT_TYPE_1G, 1)); 8854 } 8855 } else { 8856 err = fill_phy_probe_info(np, parent, info); 8857 if (err) 8858 return err; 8859 8860 num_10g = count_10g_ports(info, &lowest_10g); 8861 num_1g = count_1g_ports(info, &lowest_1g); 8862 8863 switch ((num_10g << 4) | num_1g) { 8864 case 0x24: 8865 if (lowest_1g == 10) 8866 parent->plat_type = PLAT_TYPE_VF_P0; 8867 else if (lowest_1g == 26) 8868 parent->plat_type = PLAT_TYPE_VF_P1; 8869 else 8870 goto unknown_vg_1g_port; 8871 8872 fallthrough; 8873 case 0x22: 8874 val = (phy_encode(PORT_TYPE_10G, 0) | 8875 phy_encode(PORT_TYPE_10G, 1) | 8876 phy_encode(PORT_TYPE_1G, 2) | 8877 phy_encode(PORT_TYPE_1G, 3)); 8878 break; 8879 8880 case 0x20: 8881 val = (phy_encode(PORT_TYPE_10G, 0) | 8882 phy_encode(PORT_TYPE_10G, 1)); 8883 break; 8884 8885 case 0x10: 8886 val = phy_encode(PORT_TYPE_10G, np->port); 8887 break; 8888 8889 case 0x14: 8890 if (lowest_1g == 10) 8891 parent->plat_type = PLAT_TYPE_VF_P0; 8892 else if (lowest_1g == 26) 8893 parent->plat_type = PLAT_TYPE_VF_P1; 8894 else 8895 goto unknown_vg_1g_port; 8896 8897 fallthrough; 8898 case 0x13: 8899 if ((lowest_10g & 0x7) == 0) 8900 val = (phy_encode(PORT_TYPE_10G, 0) | 8901 phy_encode(PORT_TYPE_1G, 1) | 8902 phy_encode(PORT_TYPE_1G, 2) | 8903 phy_encode(PORT_TYPE_1G, 3)); 8904 else 8905 val = (phy_encode(PORT_TYPE_1G, 0) | 8906 phy_encode(PORT_TYPE_10G, 1) | 8907 phy_encode(PORT_TYPE_1G, 2) | 8908 phy_encode(PORT_TYPE_1G, 3)); 8909 break; 8910 8911 case 0x04: 8912 if (lowest_1g == 10) 8913 parent->plat_type = PLAT_TYPE_VF_P0; 8914 else if (lowest_1g == 26) 8915 parent->plat_type = PLAT_TYPE_VF_P1; 8916 else 8917 goto unknown_vg_1g_port; 8918 8919 val = (phy_encode(PORT_TYPE_1G, 0) | 8920 phy_encode(PORT_TYPE_1G, 1) | 8921 phy_encode(PORT_TYPE_1G, 2) | 8922 phy_encode(PORT_TYPE_1G, 3)); 8923 break; 8924 8925 default: 8926 pr_err("Unsupported port config 10G[%d] 1G[%d]\n", 8927 num_10g, num_1g); 8928 return -EINVAL; 8929 } 8930 } 8931 8932 parent->port_phy = val; 8933 8934 if (parent->plat_type == PLAT_TYPE_NIU) 8935 niu_n2_divide_channels(parent); 8936 else 8937 niu_divide_channels(parent, num_10g, num_1g); 8938 8939 niu_divide_rdc_groups(parent, num_10g, num_1g); 8940 8941 return 0; 8942 8943 unknown_vg_1g_port: 8944 pr_err("Cannot identify platform type, 1gport=%d\n", lowest_1g); 8945 return -EINVAL; 8946 } 8947 8948 static int niu_probe_ports(struct niu *np) 8949 { 8950 struct niu_parent *parent = np->parent; 8951 int err, i; 8952 8953 if (parent->port_phy == PORT_PHY_UNKNOWN) { 8954 err = walk_phys(np, parent); 8955 if (err) 8956 return err; 8957 8958 niu_set_ldg_timer_res(np, 2); 8959 for (i = 0; i <= LDN_MAX; i++) 8960 niu_ldn_irq_enable(np, i, 0); 8961 } 8962 8963 if (parent->port_phy == PORT_PHY_INVALID) 8964 return -EINVAL; 8965 8966 return 0; 8967 } 8968 8969 static int niu_classifier_swstate_init(struct niu *np) 8970 { 8971 struct niu_classifier *cp = &np->clas; 8972 8973 cp->tcam_top = (u16) np->port; 8974 cp->tcam_sz = np->parent->tcam_num_entries / np->parent->num_ports; 8975 cp->h1_init = 0xffffffff; 8976 cp->h2_init = 0xffff; 8977 8978 return fflp_early_init(np); 8979 } 8980 8981 static void niu_link_config_init(struct niu *np) 8982 { 8983 struct niu_link_config *lp = &np->link_config; 8984 8985 lp->advertising = (ADVERTISED_10baseT_Half | 8986 ADVERTISED_10baseT_Full | 8987 ADVERTISED_100baseT_Half | 8988 ADVERTISED_100baseT_Full | 8989 ADVERTISED_1000baseT_Half | 8990 ADVERTISED_1000baseT_Full | 8991 ADVERTISED_10000baseT_Full | 8992 ADVERTISED_Autoneg); 8993 lp->speed = lp->active_speed = SPEED_INVALID; 8994 lp->duplex = DUPLEX_FULL; 8995 lp->active_duplex = DUPLEX_INVALID; 8996 lp->autoneg = 1; 8997 #if 0 8998 lp->loopback_mode = LOOPBACK_MAC; 8999 lp->active_speed = SPEED_10000; 9000 lp->active_duplex = DUPLEX_FULL; 9001 #else 9002 lp->loopback_mode = LOOPBACK_DISABLED; 9003 #endif 9004 } 9005 9006 static int niu_init_mac_ipp_pcs_base(struct niu *np) 9007 { 9008 switch (np->port) { 9009 case 0: 9010 np->mac_regs = np->regs + XMAC_PORT0_OFF; 9011 np->ipp_off = 0x00000; 9012 np->pcs_off = 0x04000; 9013 np->xpcs_off = 0x02000; 9014 break; 9015 9016 case 1: 9017 np->mac_regs = np->regs + XMAC_PORT1_OFF; 9018 np->ipp_off = 0x08000; 9019 np->pcs_off = 0x0a000; 9020 np->xpcs_off = 0x08000; 9021 break; 9022 9023 case 2: 9024 np->mac_regs = np->regs + BMAC_PORT2_OFF; 9025 np->ipp_off = 0x04000; 9026 np->pcs_off = 0x0e000; 9027 np->xpcs_off = ~0UL; 9028 break; 9029 9030 case 3: 9031 np->mac_regs = np->regs + BMAC_PORT3_OFF; 9032 np->ipp_off = 0x0c000; 9033 np->pcs_off = 0x12000; 9034 np->xpcs_off = ~0UL; 9035 break; 9036 9037 default: 9038 dev_err(np->device, "Port %u is invalid, cannot compute MAC block offset\n", np->port); 9039 return -EINVAL; 9040 } 9041 9042 return 0; 9043 } 9044 9045 static void niu_try_msix(struct niu *np, u8 *ldg_num_map) 9046 { 9047 struct msix_entry msi_vec[NIU_NUM_LDG]; 9048 struct niu_parent *parent = np->parent; 9049 struct pci_dev *pdev = np->pdev; 9050 int i, num_irqs; 9051 u8 first_ldg; 9052 9053 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port; 9054 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++) 9055 ldg_num_map[i] = first_ldg + i; 9056 9057 num_irqs = (parent->rxchan_per_port[np->port] + 9058 parent->txchan_per_port[np->port] + 9059 (np->port == 0 ? 3 : 1)); 9060 BUG_ON(num_irqs > (NIU_NUM_LDG / parent->num_ports)); 9061 9062 for (i = 0; i < num_irqs; i++) { 9063 msi_vec[i].vector = 0; 9064 msi_vec[i].entry = i; 9065 } 9066 9067 pdev->dev_flags |= PCI_DEV_FLAGS_MSIX_TOUCH_ENTRY_DATA_FIRST; 9068 9069 num_irqs = pci_enable_msix_range(pdev, msi_vec, 1, num_irqs); 9070 if (num_irqs < 0) { 9071 np->flags &= ~NIU_FLAGS_MSIX; 9072 return; 9073 } 9074 9075 np->flags |= NIU_FLAGS_MSIX; 9076 for (i = 0; i < num_irqs; i++) 9077 np->ldg[i].irq = msi_vec[i].vector; 9078 np->num_ldg = num_irqs; 9079 } 9080 9081 static int niu_n2_irq_init(struct niu *np, u8 *ldg_num_map) 9082 { 9083 #ifdef CONFIG_SPARC64 9084 struct platform_device *op = np->op; 9085 const u32 *int_prop; 9086 int i; 9087 9088 int_prop = of_get_property(op->dev.of_node, "interrupts", NULL); 9089 if (!int_prop) 9090 return -ENODEV; 9091 9092 for (i = 0; i < op->archdata.num_irqs; i++) { 9093 ldg_num_map[i] = int_prop[i]; 9094 np->ldg[i].irq = op->archdata.irqs[i]; 9095 } 9096 9097 np->num_ldg = op->archdata.num_irqs; 9098 9099 return 0; 9100 #else 9101 return -EINVAL; 9102 #endif 9103 } 9104 9105 static int niu_ldg_init(struct niu *np) 9106 { 9107 struct niu_parent *parent = np->parent; 9108 u8 ldg_num_map[NIU_NUM_LDG]; 9109 int first_chan, num_chan; 9110 int i, err, ldg_rotor; 9111 u8 port; 9112 9113 np->num_ldg = 1; 9114 np->ldg[0].irq = np->dev->irq; 9115 if (parent->plat_type == PLAT_TYPE_NIU) { 9116 err = niu_n2_irq_init(np, ldg_num_map); 9117 if (err) 9118 return err; 9119 } else 9120 niu_try_msix(np, ldg_num_map); 9121 9122 port = np->port; 9123 for (i = 0; i < np->num_ldg; i++) { 9124 struct niu_ldg *lp = &np->ldg[i]; 9125 9126 netif_napi_add(np->dev, &lp->napi, niu_poll); 9127 9128 lp->np = np; 9129 lp->ldg_num = ldg_num_map[i]; 9130 lp->timer = 2; /* XXX */ 9131 9132 /* On N2 NIU the firmware has setup the SID mappings so they go 9133 * to the correct values that will route the LDG to the proper 9134 * interrupt in the NCU interrupt table. 9135 */ 9136 if (np->parent->plat_type != PLAT_TYPE_NIU) { 9137 err = niu_set_ldg_sid(np, lp->ldg_num, port, i); 9138 if (err) 9139 return err; 9140 } 9141 } 9142 9143 /* We adopt the LDG assignment ordering used by the N2 NIU 9144 * 'interrupt' properties because that simplifies a lot of 9145 * things. This ordering is: 9146 * 9147 * MAC 9148 * MIF (if port zero) 9149 * SYSERR (if port zero) 9150 * RX channels 9151 * TX channels 9152 */ 9153 9154 ldg_rotor = 0; 9155 9156 err = niu_ldg_assign_ldn(np, parent, ldg_num_map[ldg_rotor], 9157 LDN_MAC(port)); 9158 if (err) 9159 return err; 9160 9161 ldg_rotor++; 9162 if (ldg_rotor == np->num_ldg) 9163 ldg_rotor = 0; 9164 9165 if (port == 0) { 9166 err = niu_ldg_assign_ldn(np, parent, 9167 ldg_num_map[ldg_rotor], 9168 LDN_MIF); 9169 if (err) 9170 return err; 9171 9172 ldg_rotor++; 9173 if (ldg_rotor == np->num_ldg) 9174 ldg_rotor = 0; 9175 9176 err = niu_ldg_assign_ldn(np, parent, 9177 ldg_num_map[ldg_rotor], 9178 LDN_DEVICE_ERROR); 9179 if (err) 9180 return err; 9181 9182 ldg_rotor++; 9183 if (ldg_rotor == np->num_ldg) 9184 ldg_rotor = 0; 9185 9186 } 9187 9188 first_chan = 0; 9189 for (i = 0; i < port; i++) 9190 first_chan += parent->rxchan_per_port[i]; 9191 num_chan = parent->rxchan_per_port[port]; 9192 9193 for (i = first_chan; i < (first_chan + num_chan); i++) { 9194 err = niu_ldg_assign_ldn(np, parent, 9195 ldg_num_map[ldg_rotor], 9196 LDN_RXDMA(i)); 9197 if (err) 9198 return err; 9199 ldg_rotor++; 9200 if (ldg_rotor == np->num_ldg) 9201 ldg_rotor = 0; 9202 } 9203 9204 first_chan = 0; 9205 for (i = 0; i < port; i++) 9206 first_chan += parent->txchan_per_port[i]; 9207 num_chan = parent->txchan_per_port[port]; 9208 for (i = first_chan; i < (first_chan + num_chan); i++) { 9209 err = niu_ldg_assign_ldn(np, parent, 9210 ldg_num_map[ldg_rotor], 9211 LDN_TXDMA(i)); 9212 if (err) 9213 return err; 9214 ldg_rotor++; 9215 if (ldg_rotor == np->num_ldg) 9216 ldg_rotor = 0; 9217 } 9218 9219 return 0; 9220 } 9221 9222 static void niu_ldg_free(struct niu *np) 9223 { 9224 if (np->flags & NIU_FLAGS_MSIX) 9225 pci_disable_msix(np->pdev); 9226 } 9227 9228 static int niu_get_of_props(struct niu *np) 9229 { 9230 #ifdef CONFIG_SPARC64 9231 struct net_device *dev = np->dev; 9232 struct device_node *dp; 9233 const char *phy_type; 9234 const u8 *mac_addr; 9235 const char *model; 9236 int prop_len; 9237 9238 if (np->parent->plat_type == PLAT_TYPE_NIU) 9239 dp = np->op->dev.of_node; 9240 else 9241 dp = pci_device_to_OF_node(np->pdev); 9242 9243 phy_type = of_get_property(dp, "phy-type", NULL); 9244 if (!phy_type) { 9245 netdev_err(dev, "%pOF: OF node lacks phy-type property\n", dp); 9246 return -EINVAL; 9247 } 9248 9249 if (!strcmp(phy_type, "none")) 9250 return -ENODEV; 9251 9252 strcpy(np->vpd.phy_type, phy_type); 9253 9254 if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) { 9255 netdev_err(dev, "%pOF: Illegal phy string [%s]\n", 9256 dp, np->vpd.phy_type); 9257 return -EINVAL; 9258 } 9259 9260 mac_addr = of_get_property(dp, "local-mac-address", &prop_len); 9261 if (!mac_addr) { 9262 netdev_err(dev, "%pOF: OF node lacks local-mac-address property\n", 9263 dp); 9264 return -EINVAL; 9265 } 9266 if (prop_len != dev->addr_len) { 9267 netdev_err(dev, "%pOF: OF MAC address prop len (%d) is wrong\n", 9268 dp, prop_len); 9269 } 9270 eth_hw_addr_set(dev, mac_addr); 9271 if (!is_valid_ether_addr(&dev->dev_addr[0])) { 9272 netdev_err(dev, "%pOF: OF MAC address is invalid\n", dp); 9273 netdev_err(dev, "%pOF: [ %pM ]\n", dp, dev->dev_addr); 9274 return -EINVAL; 9275 } 9276 9277 model = of_get_property(dp, "model", NULL); 9278 9279 if (model) 9280 strcpy(np->vpd.model, model); 9281 9282 if (of_property_read_bool(dp, "hot-swappable-phy")) { 9283 np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER | 9284 NIU_FLAGS_HOTPLUG_PHY); 9285 } 9286 9287 return 0; 9288 #else 9289 return -EINVAL; 9290 #endif 9291 } 9292 9293 static int niu_get_invariants(struct niu *np) 9294 { 9295 int err, have_props; 9296 u32 offset; 9297 9298 err = niu_get_of_props(np); 9299 if (err == -ENODEV) 9300 return err; 9301 9302 have_props = !err; 9303 9304 err = niu_init_mac_ipp_pcs_base(np); 9305 if (err) 9306 return err; 9307 9308 if (have_props) { 9309 err = niu_get_and_validate_port(np); 9310 if (err) 9311 return err; 9312 9313 } else { 9314 if (np->parent->plat_type == PLAT_TYPE_NIU) 9315 return -EINVAL; 9316 9317 nw64(ESPC_PIO_EN, ESPC_PIO_EN_ENABLE); 9318 offset = niu_pci_vpd_offset(np); 9319 netif_printk(np, probe, KERN_DEBUG, np->dev, 9320 "%s() VPD offset [%08x]\n", __func__, offset); 9321 if (offset) { 9322 err = niu_pci_vpd_fetch(np, offset); 9323 if (err < 0) 9324 return err; 9325 } 9326 nw64(ESPC_PIO_EN, 0); 9327 9328 if (np->flags & NIU_FLAGS_VPD_VALID) { 9329 niu_pci_vpd_validate(np); 9330 err = niu_get_and_validate_port(np); 9331 if (err) 9332 return err; 9333 } 9334 9335 if (!(np->flags & NIU_FLAGS_VPD_VALID)) { 9336 err = niu_get_and_validate_port(np); 9337 if (err) 9338 return err; 9339 err = niu_pci_probe_sprom(np); 9340 if (err) 9341 return err; 9342 } 9343 } 9344 9345 err = niu_probe_ports(np); 9346 if (err) 9347 return err; 9348 9349 niu_ldg_init(np); 9350 9351 niu_classifier_swstate_init(np); 9352 niu_link_config_init(np); 9353 9354 err = niu_determine_phy_disposition(np); 9355 if (!err) 9356 err = niu_init_link(np); 9357 9358 return err; 9359 } 9360 9361 static LIST_HEAD(niu_parent_list); 9362 static DEFINE_MUTEX(niu_parent_lock); 9363 static int niu_parent_index; 9364 9365 static ssize_t show_port_phy(struct device *dev, 9366 struct device_attribute *attr, char *buf) 9367 { 9368 struct platform_device *plat_dev = to_platform_device(dev); 9369 struct niu_parent *p = dev_get_platdata(&plat_dev->dev); 9370 u32 port_phy = p->port_phy; 9371 char *orig_buf = buf; 9372 int i; 9373 9374 if (port_phy == PORT_PHY_UNKNOWN || 9375 port_phy == PORT_PHY_INVALID) 9376 return 0; 9377 9378 for (i = 0; i < p->num_ports; i++) { 9379 const char *type_str; 9380 int type; 9381 9382 type = phy_decode(port_phy, i); 9383 if (type == PORT_TYPE_10G) 9384 type_str = "10G"; 9385 else 9386 type_str = "1G"; 9387 buf += sprintf(buf, 9388 (i == 0) ? "%s" : " %s", 9389 type_str); 9390 } 9391 buf += sprintf(buf, "\n"); 9392 return buf - orig_buf; 9393 } 9394 9395 static ssize_t show_plat_type(struct device *dev, 9396 struct device_attribute *attr, char *buf) 9397 { 9398 struct platform_device *plat_dev = to_platform_device(dev); 9399 struct niu_parent *p = dev_get_platdata(&plat_dev->dev); 9400 const char *type_str; 9401 9402 switch (p->plat_type) { 9403 case PLAT_TYPE_ATLAS: 9404 type_str = "atlas"; 9405 break; 9406 case PLAT_TYPE_NIU: 9407 type_str = "niu"; 9408 break; 9409 case PLAT_TYPE_VF_P0: 9410 type_str = "vf_p0"; 9411 break; 9412 case PLAT_TYPE_VF_P1: 9413 type_str = "vf_p1"; 9414 break; 9415 default: 9416 type_str = "unknown"; 9417 break; 9418 } 9419 9420 return sprintf(buf, "%s\n", type_str); 9421 } 9422 9423 static ssize_t __show_chan_per_port(struct device *dev, 9424 struct device_attribute *attr, char *buf, 9425 int rx) 9426 { 9427 struct platform_device *plat_dev = to_platform_device(dev); 9428 struct niu_parent *p = dev_get_platdata(&plat_dev->dev); 9429 char *orig_buf = buf; 9430 u8 *arr; 9431 int i; 9432 9433 arr = (rx ? p->rxchan_per_port : p->txchan_per_port); 9434 9435 for (i = 0; i < p->num_ports; i++) { 9436 buf += sprintf(buf, 9437 (i == 0) ? "%d" : " %d", 9438 arr[i]); 9439 } 9440 buf += sprintf(buf, "\n"); 9441 9442 return buf - orig_buf; 9443 } 9444 9445 static ssize_t show_rxchan_per_port(struct device *dev, 9446 struct device_attribute *attr, char *buf) 9447 { 9448 return __show_chan_per_port(dev, attr, buf, 1); 9449 } 9450 9451 static ssize_t show_txchan_per_port(struct device *dev, 9452 struct device_attribute *attr, char *buf) 9453 { 9454 return __show_chan_per_port(dev, attr, buf, 1); 9455 } 9456 9457 static ssize_t show_num_ports(struct device *dev, 9458 struct device_attribute *attr, char *buf) 9459 { 9460 struct platform_device *plat_dev = to_platform_device(dev); 9461 struct niu_parent *p = dev_get_platdata(&plat_dev->dev); 9462 9463 return sprintf(buf, "%d\n", p->num_ports); 9464 } 9465 9466 static struct device_attribute niu_parent_attributes[] = { 9467 __ATTR(port_phy, 0444, show_port_phy, NULL), 9468 __ATTR(plat_type, 0444, show_plat_type, NULL), 9469 __ATTR(rxchan_per_port, 0444, show_rxchan_per_port, NULL), 9470 __ATTR(txchan_per_port, 0444, show_txchan_per_port, NULL), 9471 __ATTR(num_ports, 0444, show_num_ports, NULL), 9472 {} 9473 }; 9474 9475 static struct niu_parent *niu_new_parent(struct niu *np, 9476 union niu_parent_id *id, u8 ptype) 9477 { 9478 struct platform_device *plat_dev; 9479 struct niu_parent *p; 9480 int i; 9481 9482 plat_dev = platform_device_register_simple("niu-board", niu_parent_index, 9483 NULL, 0); 9484 if (IS_ERR(plat_dev)) 9485 return NULL; 9486 9487 for (i = 0; niu_parent_attributes[i].attr.name; i++) { 9488 int err = device_create_file(&plat_dev->dev, 9489 &niu_parent_attributes[i]); 9490 if (err) 9491 goto fail_unregister; 9492 } 9493 9494 p = kzalloc(sizeof(*p), GFP_KERNEL); 9495 if (!p) 9496 goto fail_unregister; 9497 9498 p->index = niu_parent_index++; 9499 9500 plat_dev->dev.platform_data = p; 9501 p->plat_dev = plat_dev; 9502 9503 memcpy(&p->id, id, sizeof(*id)); 9504 p->plat_type = ptype; 9505 INIT_LIST_HEAD(&p->list); 9506 atomic_set(&p->refcnt, 0); 9507 list_add(&p->list, &niu_parent_list); 9508 spin_lock_init(&p->lock); 9509 9510 p->rxdma_clock_divider = 7500; 9511 9512 p->tcam_num_entries = NIU_PCI_TCAM_ENTRIES; 9513 if (p->plat_type == PLAT_TYPE_NIU) 9514 p->tcam_num_entries = NIU_NONPCI_TCAM_ENTRIES; 9515 9516 for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) { 9517 int index = i - CLASS_CODE_USER_PROG1; 9518 9519 p->tcam_key[index] = TCAM_KEY_TSEL; 9520 p->flow_key[index] = (FLOW_KEY_IPSA | 9521 FLOW_KEY_IPDA | 9522 FLOW_KEY_PROTO | 9523 (FLOW_KEY_L4_BYTE12 << 9524 FLOW_KEY_L4_0_SHIFT) | 9525 (FLOW_KEY_L4_BYTE12 << 9526 FLOW_KEY_L4_1_SHIFT)); 9527 } 9528 9529 for (i = 0; i < LDN_MAX + 1; i++) 9530 p->ldg_map[i] = LDG_INVALID; 9531 9532 return p; 9533 9534 fail_unregister: 9535 platform_device_unregister(plat_dev); 9536 return NULL; 9537 } 9538 9539 static struct niu_parent *niu_get_parent(struct niu *np, 9540 union niu_parent_id *id, u8 ptype) 9541 { 9542 struct niu_parent *p, *tmp; 9543 int port = np->port; 9544 9545 mutex_lock(&niu_parent_lock); 9546 p = NULL; 9547 list_for_each_entry(tmp, &niu_parent_list, list) { 9548 if (!memcmp(id, &tmp->id, sizeof(*id))) { 9549 p = tmp; 9550 break; 9551 } 9552 } 9553 if (!p) 9554 p = niu_new_parent(np, id, ptype); 9555 9556 if (p) { 9557 char port_name[8]; 9558 int err; 9559 9560 sprintf(port_name, "port%d", port); 9561 err = sysfs_create_link(&p->plat_dev->dev.kobj, 9562 &np->device->kobj, 9563 port_name); 9564 if (!err) { 9565 p->ports[port] = np; 9566 atomic_inc(&p->refcnt); 9567 } 9568 } 9569 mutex_unlock(&niu_parent_lock); 9570 9571 return p; 9572 } 9573 9574 static void niu_put_parent(struct niu *np) 9575 { 9576 struct niu_parent *p = np->parent; 9577 u8 port = np->port; 9578 char port_name[8]; 9579 9580 BUG_ON(!p || p->ports[port] != np); 9581 9582 netif_printk(np, probe, KERN_DEBUG, np->dev, 9583 "%s() port[%u]\n", __func__, port); 9584 9585 sprintf(port_name, "port%d", port); 9586 9587 mutex_lock(&niu_parent_lock); 9588 9589 sysfs_remove_link(&p->plat_dev->dev.kobj, port_name); 9590 9591 p->ports[port] = NULL; 9592 np->parent = NULL; 9593 9594 if (atomic_dec_and_test(&p->refcnt)) { 9595 list_del(&p->list); 9596 platform_device_unregister(p->plat_dev); 9597 } 9598 9599 mutex_unlock(&niu_parent_lock); 9600 } 9601 9602 static void *niu_pci_alloc_coherent(struct device *dev, size_t size, 9603 u64 *handle, gfp_t flag) 9604 { 9605 dma_addr_t dh; 9606 void *ret; 9607 9608 ret = dma_alloc_coherent(dev, size, &dh, flag); 9609 if (ret) 9610 *handle = dh; 9611 return ret; 9612 } 9613 9614 static void niu_pci_free_coherent(struct device *dev, size_t size, 9615 void *cpu_addr, u64 handle) 9616 { 9617 dma_free_coherent(dev, size, cpu_addr, handle); 9618 } 9619 9620 static u64 niu_pci_map_page(struct device *dev, struct page *page, 9621 unsigned long offset, size_t size, 9622 enum dma_data_direction direction) 9623 { 9624 return dma_map_page(dev, page, offset, size, direction); 9625 } 9626 9627 static void niu_pci_unmap_page(struct device *dev, u64 dma_address, 9628 size_t size, enum dma_data_direction direction) 9629 { 9630 dma_unmap_page(dev, dma_address, size, direction); 9631 } 9632 9633 static u64 niu_pci_map_single(struct device *dev, void *cpu_addr, 9634 size_t size, 9635 enum dma_data_direction direction) 9636 { 9637 return dma_map_single(dev, cpu_addr, size, direction); 9638 } 9639 9640 static void niu_pci_unmap_single(struct device *dev, u64 dma_address, 9641 size_t size, 9642 enum dma_data_direction direction) 9643 { 9644 dma_unmap_single(dev, dma_address, size, direction); 9645 } 9646 9647 static const struct niu_ops niu_pci_ops = { 9648 .alloc_coherent = niu_pci_alloc_coherent, 9649 .free_coherent = niu_pci_free_coherent, 9650 .map_page = niu_pci_map_page, 9651 .unmap_page = niu_pci_unmap_page, 9652 .map_single = niu_pci_map_single, 9653 .unmap_single = niu_pci_unmap_single, 9654 }; 9655 9656 static void niu_driver_version(void) 9657 { 9658 static int niu_version_printed; 9659 9660 if (niu_version_printed++ == 0) 9661 pr_info("%s", version); 9662 } 9663 9664 static struct net_device *niu_alloc_and_init(struct device *gen_dev, 9665 struct pci_dev *pdev, 9666 struct platform_device *op, 9667 const struct niu_ops *ops, u8 port) 9668 { 9669 struct net_device *dev; 9670 struct niu *np; 9671 9672 dev = alloc_etherdev_mq(sizeof(struct niu), NIU_NUM_TXCHAN); 9673 if (!dev) 9674 return NULL; 9675 9676 SET_NETDEV_DEV(dev, gen_dev); 9677 9678 np = netdev_priv(dev); 9679 np->dev = dev; 9680 np->pdev = pdev; 9681 np->op = op; 9682 np->device = gen_dev; 9683 np->ops = ops; 9684 9685 np->msg_enable = niu_debug; 9686 9687 spin_lock_init(&np->lock); 9688 INIT_WORK(&np->reset_task, niu_reset_task); 9689 9690 np->port = port; 9691 9692 return dev; 9693 } 9694 9695 static const struct net_device_ops niu_netdev_ops = { 9696 .ndo_open = niu_open, 9697 .ndo_stop = niu_close, 9698 .ndo_start_xmit = niu_start_xmit, 9699 .ndo_get_stats64 = niu_get_stats, 9700 .ndo_set_rx_mode = niu_set_rx_mode, 9701 .ndo_validate_addr = eth_validate_addr, 9702 .ndo_set_mac_address = niu_set_mac_addr, 9703 .ndo_eth_ioctl = niu_ioctl, 9704 .ndo_tx_timeout = niu_tx_timeout, 9705 .ndo_change_mtu = niu_change_mtu, 9706 }; 9707 9708 static void niu_assign_netdev_ops(struct net_device *dev) 9709 { 9710 dev->netdev_ops = &niu_netdev_ops; 9711 dev->ethtool_ops = &niu_ethtool_ops; 9712 dev->watchdog_timeo = NIU_TX_TIMEOUT; 9713 } 9714 9715 static void niu_device_announce(struct niu *np) 9716 { 9717 struct net_device *dev = np->dev; 9718 9719 pr_info("%s: NIU Ethernet %pM\n", dev->name, dev->dev_addr); 9720 9721 if (np->parent->plat_type == PLAT_TYPE_ATCA_CP3220) { 9722 pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n", 9723 dev->name, 9724 (np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"), 9725 (np->flags & NIU_FLAGS_10G ? "10G" : "1G"), 9726 (np->flags & NIU_FLAGS_FIBER ? "RGMII FIBER" : "SERDES"), 9727 (np->mac_xcvr == MAC_XCVR_MII ? "MII" : 9728 (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")), 9729 np->vpd.phy_type); 9730 } else { 9731 pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n", 9732 dev->name, 9733 (np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"), 9734 (np->flags & NIU_FLAGS_10G ? "10G" : "1G"), 9735 (np->flags & NIU_FLAGS_FIBER ? "FIBER" : 9736 (np->flags & NIU_FLAGS_XCVR_SERDES ? "SERDES" : 9737 "COPPER")), 9738 (np->mac_xcvr == MAC_XCVR_MII ? "MII" : 9739 (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")), 9740 np->vpd.phy_type); 9741 } 9742 } 9743 9744 static void niu_set_basic_features(struct net_device *dev) 9745 { 9746 dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXHASH; 9747 dev->features |= dev->hw_features | NETIF_F_RXCSUM; 9748 } 9749 9750 static int niu_pci_init_one(struct pci_dev *pdev, 9751 const struct pci_device_id *ent) 9752 { 9753 union niu_parent_id parent_id; 9754 struct net_device *dev; 9755 struct niu *np; 9756 int err; 9757 9758 niu_driver_version(); 9759 9760 err = pci_enable_device(pdev); 9761 if (err) { 9762 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); 9763 return err; 9764 } 9765 9766 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) || 9767 !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { 9768 dev_err(&pdev->dev, "Cannot find proper PCI device base addresses, aborting\n"); 9769 err = -ENODEV; 9770 goto err_out_disable_pdev; 9771 } 9772 9773 err = pci_request_regions(pdev, DRV_MODULE_NAME); 9774 if (err) { 9775 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); 9776 goto err_out_disable_pdev; 9777 } 9778 9779 if (!pci_is_pcie(pdev)) { 9780 dev_err(&pdev->dev, "Cannot find PCI Express capability, aborting\n"); 9781 err = -ENODEV; 9782 goto err_out_free_res; 9783 } 9784 9785 dev = niu_alloc_and_init(&pdev->dev, pdev, NULL, 9786 &niu_pci_ops, PCI_FUNC(pdev->devfn)); 9787 if (!dev) { 9788 err = -ENOMEM; 9789 goto err_out_free_res; 9790 } 9791 np = netdev_priv(dev); 9792 9793 memset(&parent_id, 0, sizeof(parent_id)); 9794 parent_id.pci.domain = pci_domain_nr(pdev->bus); 9795 parent_id.pci.bus = pdev->bus->number; 9796 parent_id.pci.device = PCI_SLOT(pdev->devfn); 9797 9798 np->parent = niu_get_parent(np, &parent_id, 9799 PLAT_TYPE_ATLAS); 9800 if (!np->parent) { 9801 err = -ENOMEM; 9802 goto err_out_free_dev; 9803 } 9804 9805 pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL, 9806 PCI_EXP_DEVCTL_NOSNOOP_EN, 9807 PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE | 9808 PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE | 9809 PCI_EXP_DEVCTL_RELAX_EN); 9810 9811 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44)); 9812 if (!err) 9813 dev->features |= NETIF_F_HIGHDMA; 9814 if (err) { 9815 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 9816 if (err) { 9817 dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); 9818 goto err_out_release_parent; 9819 } 9820 } 9821 9822 niu_set_basic_features(dev); 9823 9824 dev->priv_flags |= IFF_UNICAST_FLT; 9825 9826 np->regs = pci_ioremap_bar(pdev, 0); 9827 if (!np->regs) { 9828 dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); 9829 err = -ENOMEM; 9830 goto err_out_release_parent; 9831 } 9832 9833 pci_set_master(pdev); 9834 pci_save_state(pdev); 9835 9836 dev->irq = pdev->irq; 9837 9838 /* MTU range: 68 - 9216 */ 9839 dev->min_mtu = ETH_MIN_MTU; 9840 dev->max_mtu = NIU_MAX_MTU; 9841 9842 niu_assign_netdev_ops(dev); 9843 9844 err = niu_get_invariants(np); 9845 if (err) { 9846 if (err != -ENODEV) 9847 dev_err(&pdev->dev, "Problem fetching invariants of chip, aborting\n"); 9848 goto err_out_iounmap; 9849 } 9850 9851 err = register_netdev(dev); 9852 if (err) { 9853 dev_err(&pdev->dev, "Cannot register net device, aborting\n"); 9854 goto err_out_iounmap; 9855 } 9856 9857 pci_set_drvdata(pdev, dev); 9858 9859 niu_device_announce(np); 9860 9861 return 0; 9862 9863 err_out_iounmap: 9864 if (np->regs) { 9865 iounmap(np->regs); 9866 np->regs = NULL; 9867 } 9868 9869 err_out_release_parent: 9870 niu_put_parent(np); 9871 9872 err_out_free_dev: 9873 free_netdev(dev); 9874 9875 err_out_free_res: 9876 pci_release_regions(pdev); 9877 9878 err_out_disable_pdev: 9879 pci_disable_device(pdev); 9880 9881 return err; 9882 } 9883 9884 static void niu_pci_remove_one(struct pci_dev *pdev) 9885 { 9886 struct net_device *dev = pci_get_drvdata(pdev); 9887 9888 if (dev) { 9889 struct niu *np = netdev_priv(dev); 9890 9891 unregister_netdev(dev); 9892 if (np->regs) { 9893 iounmap(np->regs); 9894 np->regs = NULL; 9895 } 9896 9897 niu_ldg_free(np); 9898 9899 niu_put_parent(np); 9900 9901 free_netdev(dev); 9902 pci_release_regions(pdev); 9903 pci_disable_device(pdev); 9904 } 9905 } 9906 9907 static int __maybe_unused niu_suspend(struct device *dev_d) 9908 { 9909 struct net_device *dev = dev_get_drvdata(dev_d); 9910 struct niu *np = netdev_priv(dev); 9911 unsigned long flags; 9912 9913 if (!netif_running(dev)) 9914 return 0; 9915 9916 flush_work(&np->reset_task); 9917 niu_netif_stop(np); 9918 9919 timer_delete_sync(&np->timer); 9920 9921 spin_lock_irqsave(&np->lock, flags); 9922 niu_enable_interrupts(np, 0); 9923 spin_unlock_irqrestore(&np->lock, flags); 9924 9925 netif_device_detach(dev); 9926 9927 spin_lock_irqsave(&np->lock, flags); 9928 niu_stop_hw(np); 9929 spin_unlock_irqrestore(&np->lock, flags); 9930 9931 return 0; 9932 } 9933 9934 static int __maybe_unused niu_resume(struct device *dev_d) 9935 { 9936 struct net_device *dev = dev_get_drvdata(dev_d); 9937 struct niu *np = netdev_priv(dev); 9938 unsigned long flags; 9939 int err; 9940 9941 if (!netif_running(dev)) 9942 return 0; 9943 9944 netif_device_attach(dev); 9945 9946 spin_lock_irqsave(&np->lock, flags); 9947 9948 netdev_lock(dev); 9949 err = niu_init_hw(np); 9950 if (!err) { 9951 np->timer.expires = jiffies + HZ; 9952 add_timer(&np->timer); 9953 niu_netif_start(np); 9954 } 9955 9956 spin_unlock_irqrestore(&np->lock, flags); 9957 netdev_unlock(dev); 9958 9959 return err; 9960 } 9961 9962 static SIMPLE_DEV_PM_OPS(niu_pm_ops, niu_suspend, niu_resume); 9963 9964 static struct pci_driver niu_pci_driver = { 9965 .name = DRV_MODULE_NAME, 9966 .id_table = niu_pci_tbl, 9967 .probe = niu_pci_init_one, 9968 .remove = niu_pci_remove_one, 9969 .driver.pm = &niu_pm_ops, 9970 }; 9971 9972 #ifdef CONFIG_SPARC64 9973 static void *niu_phys_alloc_coherent(struct device *dev, size_t size, 9974 u64 *dma_addr, gfp_t flag) 9975 { 9976 unsigned long order = get_order(size); 9977 unsigned long page = __get_free_pages(flag, order); 9978 9979 if (page == 0UL) 9980 return NULL; 9981 memset((char *)page, 0, PAGE_SIZE << order); 9982 *dma_addr = __pa(page); 9983 9984 return (void *) page; 9985 } 9986 9987 static void niu_phys_free_coherent(struct device *dev, size_t size, 9988 void *cpu_addr, u64 handle) 9989 { 9990 unsigned long order = get_order(size); 9991 9992 free_pages((unsigned long) cpu_addr, order); 9993 } 9994 9995 static u64 niu_phys_map_page(struct device *dev, struct page *page, 9996 unsigned long offset, size_t size, 9997 enum dma_data_direction direction) 9998 { 9999 return page_to_phys(page) + offset; 10000 } 10001 10002 static void niu_phys_unmap_page(struct device *dev, u64 dma_address, 10003 size_t size, enum dma_data_direction direction) 10004 { 10005 /* Nothing to do. */ 10006 } 10007 10008 static u64 niu_phys_map_single(struct device *dev, void *cpu_addr, 10009 size_t size, 10010 enum dma_data_direction direction) 10011 { 10012 return __pa(cpu_addr); 10013 } 10014 10015 static void niu_phys_unmap_single(struct device *dev, u64 dma_address, 10016 size_t size, 10017 enum dma_data_direction direction) 10018 { 10019 /* Nothing to do. */ 10020 } 10021 10022 static const struct niu_ops niu_phys_ops = { 10023 .alloc_coherent = niu_phys_alloc_coherent, 10024 .free_coherent = niu_phys_free_coherent, 10025 .map_page = niu_phys_map_page, 10026 .unmap_page = niu_phys_unmap_page, 10027 .map_single = niu_phys_map_single, 10028 .unmap_single = niu_phys_unmap_single, 10029 }; 10030 10031 static int niu_of_probe(struct platform_device *op) 10032 { 10033 union niu_parent_id parent_id; 10034 struct net_device *dev; 10035 struct niu *np; 10036 const u32 *reg; 10037 int err; 10038 10039 niu_driver_version(); 10040 10041 reg = of_get_property(op->dev.of_node, "reg", NULL); 10042 if (!reg) { 10043 dev_err(&op->dev, "%pOF: No 'reg' property, aborting\n", 10044 op->dev.of_node); 10045 return -ENODEV; 10046 } 10047 10048 dev = niu_alloc_and_init(&op->dev, NULL, op, 10049 &niu_phys_ops, reg[0] & 0x1); 10050 if (!dev) { 10051 err = -ENOMEM; 10052 goto err_out; 10053 } 10054 np = netdev_priv(dev); 10055 10056 memset(&parent_id, 0, sizeof(parent_id)); 10057 parent_id.of = of_get_parent(op->dev.of_node); 10058 10059 np->parent = niu_get_parent(np, &parent_id, 10060 PLAT_TYPE_NIU); 10061 if (!np->parent) { 10062 err = -ENOMEM; 10063 goto err_out_free_dev; 10064 } 10065 10066 niu_set_basic_features(dev); 10067 10068 np->regs = of_ioremap(&op->resource[1], 0, 10069 resource_size(&op->resource[1]), 10070 "niu regs"); 10071 if (!np->regs) { 10072 dev_err(&op->dev, "Cannot map device registers, aborting\n"); 10073 err = -ENOMEM; 10074 goto err_out_release_parent; 10075 } 10076 10077 np->vir_regs_1 = of_ioremap(&op->resource[2], 0, 10078 resource_size(&op->resource[2]), 10079 "niu vregs-1"); 10080 if (!np->vir_regs_1) { 10081 dev_err(&op->dev, "Cannot map device vir registers 1, aborting\n"); 10082 err = -ENOMEM; 10083 goto err_out_iounmap; 10084 } 10085 10086 np->vir_regs_2 = of_ioremap(&op->resource[3], 0, 10087 resource_size(&op->resource[3]), 10088 "niu vregs-2"); 10089 if (!np->vir_regs_2) { 10090 dev_err(&op->dev, "Cannot map device vir registers 2, aborting\n"); 10091 err = -ENOMEM; 10092 goto err_out_iounmap; 10093 } 10094 10095 niu_assign_netdev_ops(dev); 10096 10097 err = niu_get_invariants(np); 10098 if (err) { 10099 if (err != -ENODEV) 10100 dev_err(&op->dev, "Problem fetching invariants of chip, aborting\n"); 10101 goto err_out_iounmap; 10102 } 10103 10104 err = register_netdev(dev); 10105 if (err) { 10106 dev_err(&op->dev, "Cannot register net device, aborting\n"); 10107 goto err_out_iounmap; 10108 } 10109 10110 platform_set_drvdata(op, dev); 10111 10112 niu_device_announce(np); 10113 10114 return 0; 10115 10116 err_out_iounmap: 10117 if (np->vir_regs_1) { 10118 of_iounmap(&op->resource[2], np->vir_regs_1, 10119 resource_size(&op->resource[2])); 10120 np->vir_regs_1 = NULL; 10121 } 10122 10123 if (np->vir_regs_2) { 10124 of_iounmap(&op->resource[3], np->vir_regs_2, 10125 resource_size(&op->resource[3])); 10126 np->vir_regs_2 = NULL; 10127 } 10128 10129 if (np->regs) { 10130 of_iounmap(&op->resource[1], np->regs, 10131 resource_size(&op->resource[1])); 10132 np->regs = NULL; 10133 } 10134 10135 err_out_release_parent: 10136 niu_put_parent(np); 10137 10138 err_out_free_dev: 10139 free_netdev(dev); 10140 10141 err_out: 10142 return err; 10143 } 10144 10145 static void niu_of_remove(struct platform_device *op) 10146 { 10147 struct net_device *dev = platform_get_drvdata(op); 10148 10149 if (dev) { 10150 struct niu *np = netdev_priv(dev); 10151 10152 unregister_netdev(dev); 10153 10154 if (np->vir_regs_1) { 10155 of_iounmap(&op->resource[2], np->vir_regs_1, 10156 resource_size(&op->resource[2])); 10157 np->vir_regs_1 = NULL; 10158 } 10159 10160 if (np->vir_regs_2) { 10161 of_iounmap(&op->resource[3], np->vir_regs_2, 10162 resource_size(&op->resource[3])); 10163 np->vir_regs_2 = NULL; 10164 } 10165 10166 if (np->regs) { 10167 of_iounmap(&op->resource[1], np->regs, 10168 resource_size(&op->resource[1])); 10169 np->regs = NULL; 10170 } 10171 10172 niu_ldg_free(np); 10173 10174 niu_put_parent(np); 10175 10176 free_netdev(dev); 10177 } 10178 } 10179 10180 static const struct of_device_id niu_match[] = { 10181 { 10182 .name = "network", 10183 .compatible = "SUNW,niusl", 10184 }, 10185 {}, 10186 }; 10187 MODULE_DEVICE_TABLE(of, niu_match); 10188 10189 static struct platform_driver niu_of_driver = { 10190 .driver = { 10191 .name = "niu", 10192 .of_match_table = niu_match, 10193 }, 10194 .probe = niu_of_probe, 10195 .remove = niu_of_remove, 10196 }; 10197 10198 #endif /* CONFIG_SPARC64 */ 10199 10200 static int __init niu_init(void) 10201 { 10202 int err = 0; 10203 10204 BUILD_BUG_ON(PAGE_SIZE < 4 * 1024); 10205 10206 BUILD_BUG_ON(offsetof(struct page, mapping) != 10207 offsetof(union niu_page, next)); 10208 10209 niu_debug = netif_msg_init(debug, NIU_MSG_DEFAULT); 10210 10211 #ifdef CONFIG_SPARC64 10212 err = platform_driver_register(&niu_of_driver); 10213 #endif 10214 10215 if (!err) { 10216 err = pci_register_driver(&niu_pci_driver); 10217 #ifdef CONFIG_SPARC64 10218 if (err) 10219 platform_driver_unregister(&niu_of_driver); 10220 #endif 10221 } 10222 10223 return err; 10224 } 10225 10226 static void __exit niu_exit(void) 10227 { 10228 pci_unregister_driver(&niu_pci_driver); 10229 #ifdef CONFIG_SPARC64 10230 platform_driver_unregister(&niu_of_driver); 10231 #endif 10232 } 10233 10234 module_init(niu_init); 10235 module_exit(niu_exit); 10236