Lines Matching +full:hw +full:- +full:flow +full:- +full:ctrl

1 // SPDX-License-Identifier: GPL-2.0-only
8 * of the original driver such as link fail-over and link management because
28 #include <linux/dma-mapping.h>
60 MODULE_AUTHOR("Stephen Hemminger <shemminger@linux-foundation.org>");
68 static int debug = -1; /* defaults above */
76 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x4300) }, /* SK-9xx */
78 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x4320) }, /* SK-98xx V2.0 */
79 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b01) }, /* D-Link DGE-530T (rev.B) */
80 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4c00) }, /* D-Link DGE-530T */
81 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302) }, /* D-Link DGE-530T Rev C1 */
84 { PCI_DEVICE(PCI_VENDOR_ID_CNET, 0x434E) }, /* CNet PowerG-2000 */
95 static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val);
96 static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val);
99 static void yukon_init(struct skge_hw *hw, int port);
100 static void genesis_mac_init(struct skge_hw *hw, int port);
113 static inline bool is_genesis(const struct skge_hw *hw) in is_genesis() argument
116 return hw->chip_id == CHIP_ID_GENESIS; in is_genesis()
136 const void __iomem *io = skge->hw->regs; in skge_get_regs()
138 regs->version = 1; in skge_get_regs()
139 memset(p, 0, regs->len); in skge_get_regs()
142 if (regs->len > B3_RI_WTO_R1) { in skge_get_regs()
144 regs->len - B3_RI_WTO_R1); in skge_get_regs()
149 static u32 wol_supported(const struct skge_hw *hw) in wol_supported() argument
151 if (is_genesis(hw)) in wol_supported()
154 if (hw->chip_id == CHIP_ID_YUKON && hw->chip_rev == 0) in wol_supported()
162 struct skge_hw *hw = skge->hw; in skge_wol_init() local
163 int port = skge->port; in skge_wol_init()
164 u16 ctrl; in skge_wol_init() local
166 skge_write16(hw, B0_CTST, CS_RST_CLR); in skge_wol_init()
167 skge_write16(hw, SK_REG(port, GMAC_LINK_CTRL), GMLC_RST_CLR); in skge_wol_init()
170 skge_write8(hw, B0_POWER_CTRL, in skge_wol_init()
173 /* WA code for COMA mode -- clear PHY reset */ in skge_wol_init()
174 if (hw->chip_id == CHIP_ID_YUKON_LITE && in skge_wol_init()
175 hw->chip_rev >= CHIP_REV_YU_LITE_A3) { in skge_wol_init()
176 u32 reg = skge_read32(hw, B2_GP_IO); in skge_wol_init()
179 skge_write32(hw, B2_GP_IO, reg); in skge_wol_init()
182 skge_write32(hw, SK_REG(port, GPHY_CTRL), in skge_wol_init()
187 skge_write32(hw, SK_REG(port, GPHY_CTRL), in skge_wol_init()
192 skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR); in skge_wol_init()
194 /* Force to 10/100 skge_reset will re-enable on resume */ in skge_wol_init()
195 gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, in skge_wol_init()
199 gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, 0); in skge_wol_init()
200 gm_phy_write(hw, port, PHY_MARV_CTRL, in skge_wol_init()
205 /* Set GMAC to no flow control and auto update for speed/duplex */ in skge_wol_init()
206 gma_write16(hw, port, GM_GP_CTRL, in skge_wol_init()
211 memcpy_toio(hw->regs + WOL_REGS(port, WOL_MAC_ADDR), in skge_wol_init()
212 skge->netdev->dev_addr, ETH_ALEN); in skge_wol_init()
215 skge_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), WOL_CTL_CLEAR_RESULT); in skge_wol_init()
216 ctrl = 0; in skge_wol_init()
217 if (skge->wol & WAKE_PHY) in skge_wol_init()
218 ctrl |= WOL_CTL_ENA_PME_ON_LINK_CHG|WOL_CTL_ENA_LINK_CHG_UNIT; in skge_wol_init()
220 ctrl |= WOL_CTL_DIS_PME_ON_LINK_CHG|WOL_CTL_DIS_LINK_CHG_UNIT; in skge_wol_init()
222 if (skge->wol & WAKE_MAGIC) in skge_wol_init()
223 ctrl |= WOL_CTL_ENA_PME_ON_MAGIC_PKT|WOL_CTL_ENA_MAGIC_PKT_UNIT; in skge_wol_init()
225 ctrl |= WOL_CTL_DIS_PME_ON_MAGIC_PKT|WOL_CTL_DIS_MAGIC_PKT_UNIT; in skge_wol_init()
227 ctrl |= WOL_CTL_DIS_PME_ON_PATTERN|WOL_CTL_DIS_PATTERN_UNIT; in skge_wol_init()
228 skge_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), ctrl); in skge_wol_init()
231 skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); in skge_wol_init()
238 wol->supported = wol_supported(skge->hw); in skge_get_wol()
239 wol->wolopts = skge->wol; in skge_get_wol()
245 struct skge_hw *hw = skge->hw; in skge_set_wol() local
247 if ((wol->wolopts & ~wol_supported(hw)) || in skge_set_wol()
248 !device_can_wakeup(&hw->pdev->dev)) in skge_set_wol()
249 return -EOPNOTSUPP; in skge_set_wol()
251 skge->wol = wol->wolopts; in skge_set_wol()
253 device_set_wakeup_enable(&hw->pdev->dev, skge->wol); in skge_set_wol()
261 static u32 skge_supported_modes(const struct skge_hw *hw) in skge_supported_modes() argument
265 if (hw->copper) { in skge_supported_modes()
275 if (is_genesis(hw)) in skge_supported_modes()
281 else if (hw->chip_id == CHIP_ID_YUKON) in skge_supported_modes()
296 struct skge_hw *hw = skge->hw; in skge_get_link_ksettings() local
299 supported = skge_supported_modes(hw); in skge_get_link_ksettings()
301 if (hw->copper) { in skge_get_link_ksettings()
302 cmd->base.port = PORT_TP; in skge_get_link_ksettings()
303 cmd->base.phy_address = hw->phy_addr; in skge_get_link_ksettings()
305 cmd->base.port = PORT_FIBRE; in skge_get_link_ksettings()
307 advertising = skge->advertising; in skge_get_link_ksettings()
308 cmd->base.autoneg = skge->autoneg; in skge_get_link_ksettings()
309 cmd->base.speed = skge->speed; in skge_get_link_ksettings()
310 cmd->base.duplex = skge->duplex; in skge_get_link_ksettings()
312 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, in skge_get_link_ksettings()
314 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, in skge_get_link_ksettings()
324 const struct skge_hw *hw = skge->hw; in skge_set_link_ksettings() local
325 u32 supported = skge_supported_modes(hw); in skge_set_link_ksettings()
330 cmd->link_modes.advertising); in skge_set_link_ksettings()
332 if (cmd->base.autoneg == AUTONEG_ENABLE) { in skge_set_link_ksettings()
334 skge->duplex = -1; in skge_set_link_ksettings()
335 skge->speed = -1; in skge_set_link_ksettings()
338 u32 speed = cmd->base.speed; in skge_set_link_ksettings()
342 if (cmd->base.duplex == DUPLEX_FULL) in skge_set_link_ksettings()
344 else if (cmd->base.duplex == DUPLEX_HALF) in skge_set_link_ksettings()
347 return -EINVAL; in skge_set_link_ksettings()
350 if (cmd->base.duplex == DUPLEX_FULL) in skge_set_link_ksettings()
352 else if (cmd->base.duplex == DUPLEX_HALF) in skge_set_link_ksettings()
355 return -EINVAL; in skge_set_link_ksettings()
359 if (cmd->base.duplex == DUPLEX_FULL) in skge_set_link_ksettings()
361 else if (cmd->base.duplex == DUPLEX_HALF) in skge_set_link_ksettings()
364 return -EINVAL; in skge_set_link_ksettings()
367 return -EINVAL; in skge_set_link_ksettings()
371 return -EINVAL; in skge_set_link_ksettings()
373 skge->speed = speed; in skge_set_link_ksettings()
374 skge->duplex = cmd->base.duplex; in skge_set_link_ksettings()
377 skge->autoneg = cmd->base.autoneg; in skge_set_link_ksettings()
378 skge->advertising = advertising; in skge_set_link_ksettings()
397 strscpy(info->driver, DRV_NAME, sizeof(info->driver)); in skge_get_drvinfo()
398 strscpy(info->version, DRV_VERSION, sizeof(info->version)); in skge_get_drvinfo()
399 strscpy(info->bus_info, pci_name(skge->hw->pdev), in skge_get_drvinfo()
400 sizeof(info->bus_info)); in skge_get_drvinfo()
440 return -EOPNOTSUPP; in skge_get_sset_count()
449 if (is_genesis(skge->hw)) in skge_get_ethtool_stats()
464 if (is_genesis(skge->hw)) in skge_get_stats()
469 dev->stats.tx_bytes = data[0]; in skge_get_stats()
470 dev->stats.rx_bytes = data[1]; in skge_get_stats()
471 dev->stats.tx_packets = data[2] + data[4] + data[6]; in skge_get_stats()
472 dev->stats.rx_packets = data[3] + data[5] + data[7]; in skge_get_stats()
473 dev->stats.multicast = data[3] + data[5]; in skge_get_stats()
474 dev->stats.collisions = data[10]; in skge_get_stats()
475 dev->stats.tx_aborted_errors = data[12]; in skge_get_stats()
477 return &dev->stats; in skge_get_stats()
500 p->rx_max_pending = MAX_RX_RING_SIZE; in skge_get_ring_param()
501 p->tx_max_pending = MAX_TX_RING_SIZE; in skge_get_ring_param()
503 p->rx_pending = skge->rx_ring.count; in skge_get_ring_param()
504 p->tx_pending = skge->tx_ring.count; in skge_get_ring_param()
515 if (p->rx_pending == 0 || p->rx_pending > MAX_RX_RING_SIZE || in skge_set_ring_param()
516 p->tx_pending < TX_LOW_WATER || p->tx_pending > MAX_TX_RING_SIZE) in skge_set_ring_param()
517 return -EINVAL; in skge_set_ring_param()
519 skge->rx_ring.count = p->rx_pending; in skge_set_ring_param()
520 skge->tx_ring.count = p->tx_pending; in skge_set_ring_param()
535 return skge->msg_enable; in skge_get_msglevel()
541 skge->msg_enable = value; in skge_set_msglevel()
548 if (skge->autoneg != AUTONEG_ENABLE || !netif_running(dev)) in skge_nway_reset()
549 return -EINVAL; in skge_nway_reset()
560 ecmd->rx_pause = ((skge->flow_control == FLOW_MODE_SYMMETRIC) || in skge_get_pauseparam()
561 (skge->flow_control == FLOW_MODE_SYM_OR_REM)); in skge_get_pauseparam()
562 ecmd->tx_pause = (ecmd->rx_pause || in skge_get_pauseparam()
563 (skge->flow_control == FLOW_MODE_LOC_SEND)); in skge_get_pauseparam()
565 ecmd->autoneg = ecmd->rx_pause || ecmd->tx_pause; in skge_get_pauseparam()
577 if (ecmd->autoneg != old.autoneg) in skge_set_pauseparam()
578 skge->flow_control = ecmd->autoneg ? FLOW_MODE_NONE : FLOW_MODE_SYMMETRIC; in skge_set_pauseparam()
580 if (ecmd->rx_pause && ecmd->tx_pause) in skge_set_pauseparam()
581 skge->flow_control = FLOW_MODE_SYMMETRIC; in skge_set_pauseparam()
582 else if (ecmd->rx_pause && !ecmd->tx_pause) in skge_set_pauseparam()
583 skge->flow_control = FLOW_MODE_SYM_OR_REM; in skge_set_pauseparam()
584 else if (!ecmd->rx_pause && ecmd->tx_pause) in skge_set_pauseparam()
585 skge->flow_control = FLOW_MODE_LOC_SEND; in skge_set_pauseparam()
587 skge->flow_control = FLOW_MODE_NONE; in skge_set_pauseparam()
603 static inline u32 hwkhz(const struct skge_hw *hw) in hwkhz() argument
605 return is_genesis(hw) ? 53125 : 78125; in hwkhz()
609 static inline u32 skge_clk2usec(const struct skge_hw *hw, u32 ticks) in skge_clk2usec() argument
611 return (ticks * 1000) / hwkhz(hw); in skge_clk2usec()
615 static inline u32 skge_usecs2clk(const struct skge_hw *hw, u32 usec) in skge_usecs2clk() argument
617 return hwkhz(hw) * usec / 1000; in skge_usecs2clk()
626 struct skge_hw *hw = skge->hw; in skge_get_coalesce() local
627 int port = skge->port; in skge_get_coalesce()
629 ecmd->rx_coalesce_usecs = 0; in skge_get_coalesce()
630 ecmd->tx_coalesce_usecs = 0; in skge_get_coalesce()
632 if (skge_read32(hw, B2_IRQM_CTRL) & TIM_START) { in skge_get_coalesce()
633 u32 delay = skge_clk2usec(hw, skge_read32(hw, B2_IRQM_INI)); in skge_get_coalesce()
634 u32 msk = skge_read32(hw, B2_IRQM_MSK); in skge_get_coalesce()
637 ecmd->rx_coalesce_usecs = delay; in skge_get_coalesce()
639 ecmd->tx_coalesce_usecs = delay; in skge_get_coalesce()
652 struct skge_hw *hw = skge->hw; in skge_set_coalesce() local
653 int port = skge->port; in skge_set_coalesce()
654 u32 msk = skge_read32(hw, B2_IRQM_MSK); in skge_set_coalesce()
657 if (ecmd->rx_coalesce_usecs == 0) in skge_set_coalesce()
659 else if (ecmd->rx_coalesce_usecs < 25 || in skge_set_coalesce()
660 ecmd->rx_coalesce_usecs > 33333) in skge_set_coalesce()
661 return -EINVAL; in skge_set_coalesce()
664 delay = ecmd->rx_coalesce_usecs; in skge_set_coalesce()
667 if (ecmd->tx_coalesce_usecs == 0) in skge_set_coalesce()
669 else if (ecmd->tx_coalesce_usecs < 25 || in skge_set_coalesce()
670 ecmd->tx_coalesce_usecs > 33333) in skge_set_coalesce()
671 return -EINVAL; in skge_set_coalesce()
674 delay = min(delay, ecmd->rx_coalesce_usecs); in skge_set_coalesce()
677 skge_write32(hw, B2_IRQM_MSK, msk); in skge_set_coalesce()
679 skge_write32(hw, B2_IRQM_CTRL, TIM_STOP); in skge_set_coalesce()
681 skge_write32(hw, B2_IRQM_INI, skge_usecs2clk(hw, delay)); in skge_set_coalesce()
682 skge_write32(hw, B2_IRQM_CTRL, TIM_START); in skge_set_coalesce()
690 struct skge_hw *hw = skge->hw; in skge_led() local
691 int port = skge->port; in skge_led()
693 spin_lock_bh(&hw->phy_lock); in skge_led()
694 if (is_genesis(hw)) { in skge_led()
697 if (hw->phy_type == SK_PHY_BCOM) in skge_led()
698 xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, PHY_B_PEC_LED_OFF); in skge_led()
700 skge_write32(hw, SK_REG(port, TX_LED_VAL), 0); in skge_led()
701 skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_T_OFF); in skge_led()
703 skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_OFF); in skge_led()
704 skge_write32(hw, SK_REG(port, RX_LED_VAL), 0); in skge_led()
705 skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_T_OFF); in skge_led()
709 skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_ON); in skge_led()
710 skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_LINKSYNC_ON); in skge_led()
712 skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_START); in skge_led()
713 skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_START); in skge_led()
718 skge_write8(hw, SK_REG(port, RX_LED_TST), LED_T_ON); in skge_led()
719 skge_write32(hw, SK_REG(port, RX_LED_VAL), 100); in skge_led()
720 skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_START); in skge_led()
722 if (hw->phy_type == SK_PHY_BCOM) in skge_led()
723 xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, PHY_B_PEC_LED_ON); in skge_led()
725 skge_write8(hw, SK_REG(port, TX_LED_TST), LED_T_ON); in skge_led()
726 skge_write32(hw, SK_REG(port, TX_LED_VAL), 100); in skge_led()
727 skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_START); in skge_led()
734 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0); in skge_led()
735 gm_phy_write(hw, port, PHY_MARV_LED_OVER, in skge_led()
743 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, in skge_led()
749 gm_phy_write(hw, port, PHY_MARV_LED_OVER, in skge_led()
751 (skge->speed == SPEED_100 ? in skge_led()
755 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0); in skge_led()
756 gm_phy_write(hw, port, PHY_MARV_LED_OVER, in skge_led()
764 spin_unlock_bh(&hw->phy_lock); in skge_led()
798 pci_read_config_dword(skge->hw->pdev, PCI_DEV_REG2, &reg2); in skge_get_eeprom_len()
831 struct pci_dev *pdev = skge->hw->pdev; in skge_get_eeprom()
833 int length = eeprom->len; in skge_get_eeprom()
834 u16 offset = eeprom->offset; in skge_get_eeprom()
837 return -EINVAL; in skge_get_eeprom()
839 eeprom->magic = SKGE_EEPROM_MAGIC; in skge_get_eeprom()
846 length -= n; in skge_get_eeprom()
857 struct pci_dev *pdev = skge->hw->pdev; in skge_set_eeprom()
859 int length = eeprom->len; in skge_set_eeprom()
860 u16 offset = eeprom->offset; in skge_set_eeprom()
863 return -EINVAL; in skge_set_eeprom()
865 if (eeprom->magic != SKGE_EEPROM_MAGIC) in skge_set_eeprom()
866 return -EINVAL; in skge_set_eeprom()
878 length -= n; in skge_set_eeprom()
915 * One-to-one association of board descriptors with ring elements
923 ring->start = kcalloc(ring->count, sizeof(*e), GFP_KERNEL); in skge_ring_alloc()
924 if (!ring->start) in skge_ring_alloc()
925 return -ENOMEM; in skge_ring_alloc()
927 for (i = 0, e = ring->start, d = vaddr; i < ring->count; i++, e++, d++) { in skge_ring_alloc()
928 e->desc = d; in skge_ring_alloc()
929 if (i == ring->count - 1) { in skge_ring_alloc()
930 e->next = ring->start; in skge_ring_alloc()
931 d->next_offset = base; in skge_ring_alloc()
933 e->next = e + 1; in skge_ring_alloc()
934 d->next_offset = base + (i+1) * sizeof(*d); in skge_ring_alloc()
937 ring->to_use = ring->to_clean = ring->start; in skge_ring_alloc()
946 struct skge_rx_desc *rd = e->desc; in skge_rx_setup()
949 map = dma_map_single(&skge->hw->pdev->dev, skb->data, bufsize, in skge_rx_setup()
952 if (dma_mapping_error(&skge->hw->pdev->dev, map)) in skge_rx_setup()
953 return -1; in skge_rx_setup()
955 rd->dma_lo = lower_32_bits(map); in skge_rx_setup()
956 rd->dma_hi = upper_32_bits(map); in skge_rx_setup()
957 e->skb = skb; in skge_rx_setup()
958 rd->csum1_start = ETH_HLEN; in skge_rx_setup()
959 rd->csum2_start = ETH_HLEN; in skge_rx_setup()
960 rd->csum1 = 0; in skge_rx_setup()
961 rd->csum2 = 0; in skge_rx_setup()
965 rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize; in skge_rx_setup()
977 struct skge_rx_desc *rd = e->desc; in skge_rx_reuse()
979 rd->csum2 = 0; in skge_rx_reuse()
980 rd->csum2_start = ETH_HLEN; in skge_rx_reuse()
984 rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | size; in skge_rx_reuse()
991 struct skge_hw *hw = skge->hw; in skge_rx_clean() local
992 struct skge_ring *ring = &skge->rx_ring; in skge_rx_clean()
995 e = ring->start; in skge_rx_clean()
997 struct skge_rx_desc *rd = e->desc; in skge_rx_clean()
998 rd->control = 0; in skge_rx_clean()
999 if (e->skb) { in skge_rx_clean()
1000 dma_unmap_single(&hw->pdev->dev, in skge_rx_clean()
1004 dev_kfree_skb(e->skb); in skge_rx_clean()
1005 e->skb = NULL; in skge_rx_clean()
1007 } while ((e = e->next) != ring->start); in skge_rx_clean()
1017 struct skge_ring *ring = &skge->rx_ring; in skge_rx_fill()
1020 e = ring->start; in skge_rx_fill()
1024 skb = __netdev_alloc_skb(dev, skge->rx_buf_size + NET_IP_ALIGN, in skge_rx_fill()
1027 return -ENOMEM; in skge_rx_fill()
1030 if (skge_rx_setup(skge, e, skb, skge->rx_buf_size) < 0) { in skge_rx_fill()
1032 return -EIO; in skge_rx_fill()
1034 } while ((e = e->next) != ring->start); in skge_rx_fill()
1036 ring->to_clean = ring->start; in skge_rx_fill()
1059 skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), in skge_link_up()
1062 netif_carrier_on(skge->netdev); in skge_link_up()
1063 netif_wake_queue(skge->netdev); in skge_link_up()
1065 netif_info(skge, link, skge->netdev, in skge_link_up()
1066 "Link is up at %d Mbps, %s duplex, flow control %s\n", in skge_link_up()
1067 skge->speed, in skge_link_up()
1068 skge->duplex == DUPLEX_FULL ? "full" : "half", in skge_link_up()
1069 skge_pause(skge->flow_status)); in skge_link_up()
1074 skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_REG_OFF); in skge_link_down()
1075 netif_carrier_off(skge->netdev); in skge_link_down()
1076 netif_stop_queue(skge->netdev); in skge_link_down()
1078 netif_info(skge, link, skge->netdev, "Link is down\n"); in skge_link_down()
1081 static void xm_link_down(struct skge_hw *hw, int port) in xm_link_down() argument
1083 struct net_device *dev = hw->dev[port]; in xm_link_down()
1086 xm_write16(hw, port, XM_IMSK, XM_IMSK_DISABLE); in xm_link_down()
1092 static int __xm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val) in __xm_phy_read() argument
1096 xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr); in __xm_phy_read()
1097 *val = xm_read16(hw, port, XM_PHY_DATA); in __xm_phy_read()
1099 if (hw->phy_type == SK_PHY_XMAC) in __xm_phy_read()
1103 if (xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_RDY) in __xm_phy_read()
1108 return -ETIMEDOUT; in __xm_phy_read()
1110 *val = xm_read16(hw, port, XM_PHY_DATA); in __xm_phy_read()
1115 static u16 xm_phy_read(struct skge_hw *hw, int port, u16 reg) in xm_phy_read() argument
1118 if (__xm_phy_read(hw, port, reg, &v)) in xm_phy_read()
1119 pr_warn("%s: phy read timed out\n", hw->dev[port]->name); in xm_phy_read()
1123 static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val) in xm_phy_write() argument
1127 xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr); in xm_phy_write()
1129 if (!(xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY)) in xm_phy_write()
1133 return -EIO; in xm_phy_write()
1136 xm_write16(hw, port, XM_PHY_DATA, val); in xm_phy_write()
1138 if (!(xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY)) in xm_phy_write()
1142 return -ETIMEDOUT; in xm_phy_write()
1145 static void genesis_init(struct skge_hw *hw) in genesis_init() argument
1148 skge_write32(hw, B2_BSC_INI, (SK_BLK_DUR * SK_FACT_53) / 100); in genesis_init()
1149 skge_write8(hw, B2_BSC_CTRL, BSC_START); in genesis_init()
1152 skge_write16(hw, B3_MA_TO_CTRL, MA_RST_CLR); in genesis_init()
1155 skge_write8(hw, B3_MA_TOINI_RX1, SK_MAC_TO_53); in genesis_init()
1156 skge_write8(hw, B3_MA_TOINI_RX2, SK_MAC_TO_53); in genesis_init()
1157 skge_write8(hw, B3_MA_TOINI_TX1, SK_MAC_TO_53); in genesis_init()
1158 skge_write8(hw, B3_MA_TOINI_TX2, SK_MAC_TO_53); in genesis_init()
1160 skge_write8(hw, B3_MA_RCINI_RX1, 0); in genesis_init()
1161 skge_write8(hw, B3_MA_RCINI_RX2, 0); in genesis_init()
1162 skge_write8(hw, B3_MA_RCINI_TX1, 0); in genesis_init()
1163 skge_write8(hw, B3_MA_RCINI_TX2, 0); in genesis_init()
1166 skge_write16(hw, B3_PA_CTRL, PA_RST_CLR); in genesis_init()
1167 skge_write16(hw, B3_PA_TOINI_RX1, SK_PKT_TO_MAX); in genesis_init()
1168 skge_write16(hw, B3_PA_TOINI_TX1, SK_PKT_TO_MAX); in genesis_init()
1169 skge_write16(hw, B3_PA_TOINI_RX2, SK_PKT_TO_MAX); in genesis_init()
1170 skge_write16(hw, B3_PA_TOINI_TX2, SK_PKT_TO_MAX); in genesis_init()
1173 static void genesis_reset(struct skge_hw *hw, int port) in genesis_reset() argument
1178 skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0); in genesis_reset()
1181 xm_write32(hw, port, XM_GP_PORT, XM_GP_RES_STAT); in genesis_reset()
1182 xm_write16(hw, port, XM_IMSK, XM_IMSK_DISABLE); in genesis_reset()
1183 xm_write32(hw, port, XM_MODE, 0); /* clear Mode Reg */ in genesis_reset()
1184 xm_write16(hw, port, XM_TX_CMD, 0); /* reset TX CMD Reg */ in genesis_reset()
1185 xm_write16(hw, port, XM_RX_CMD, 0); /* reset RX CMD Reg */ in genesis_reset()
1188 if (hw->phy_type == SK_PHY_BCOM) in genesis_reset()
1189 xm_write16(hw, port, PHY_BCOM_INT_MASK, 0xffff); in genesis_reset()
1191 xm_outhash(hw, port, XM_HSM, zero); in genesis_reset()
1194 reg = xm_read32(hw, port, XM_MODE); in genesis_reset()
1195 xm_write32(hw, port, XM_MODE, reg | XM_MD_FTF); in genesis_reset()
1196 xm_write32(hw, port, XM_MODE, reg | XM_MD_FRF); in genesis_reset()
1217 static void bcom_check_link(struct skge_hw *hw, int port) in bcom_check_link() argument
1219 struct net_device *dev = hw->dev[port]; in bcom_check_link()
1224 xm_phy_read(hw, port, PHY_BCOM_STAT); in bcom_check_link()
1225 status = xm_phy_read(hw, port, PHY_BCOM_STAT); in bcom_check_link()
1228 xm_link_down(hw, port); in bcom_check_link()
1232 if (skge->autoneg == AUTONEG_ENABLE) { in bcom_check_link()
1238 lpa = xm_phy_read(hw, port, PHY_XMAC_AUNE_LP); in bcom_check_link()
1244 aux = xm_phy_read(hw, port, PHY_BCOM_AUX_STAT); in bcom_check_link()
1249 skge->duplex = DUPLEX_FULL; in bcom_check_link()
1252 skge->duplex = DUPLEX_HALF; in bcom_check_link()
1259 /* We are using IEEE 802.3z/D5.0 Table 37-4 */ in bcom_check_link()
1262 skge->flow_status = FLOW_STAT_SYMMETRIC; in bcom_check_link()
1265 skge->flow_status = FLOW_STAT_REM_SEND; in bcom_check_link()
1268 skge->flow_status = FLOW_STAT_LOC_SEND; in bcom_check_link()
1271 skge->flow_status = FLOW_STAT_NONE; in bcom_check_link()
1273 skge->speed = SPEED_1000; in bcom_check_link()
1285 struct skge_hw *hw = skge->hw; in bcom_phy_init() local
1286 int port = skge->port; in bcom_phy_init()
1305 id1 = xm_phy_read(hw, port, PHY_XMAC_ID1); in bcom_phy_init()
1308 r = xm_read16(hw, port, XM_MMU_CMD); in bcom_phy_init()
1310 xm_write16(hw, port, XM_MMU_CMD, r); in bcom_phy_init()
1319 xm_phy_write(hw, port, in bcom_phy_init()
1329 xm_phy_write(hw, port, in bcom_phy_init()
1338 r = xm_phy_read(hw, port, PHY_BCOM_AUX_CTRL); in bcom_phy_init()
1340 xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL, r); in bcom_phy_init()
1343 xm_read16(hw, port, XM_ISRC); in bcom_phy_init()
1348 if (skge->autoneg == AUTONEG_ENABLE) { in bcom_phy_init()
1351 * 1000Base-T Link Acquisition Failure in Slave Mode in bcom_phy_init()
1352 * Set Repeater/DTE bit 10 of the 1000Base-T Control Register in bcom_phy_init()
1355 if (skge->advertising & ADVERTISED_1000baseT_Half) in bcom_phy_init()
1357 if (skge->advertising & ADVERTISED_1000baseT_Full) in bcom_phy_init()
1359 xm_phy_write(hw, port, PHY_BCOM_1000T_CTRL, adv); in bcom_phy_init()
1363 if (skge->duplex == DUPLEX_FULL) in bcom_phy_init()
1366 xm_phy_write(hw, port, PHY_BCOM_1000T_CTRL, PHY_B_1000C_MSE); in bcom_phy_init()
1370 xm_phy_write(hw, port, PHY_BCOM_AUNE_ADV, in bcom_phy_init()
1371 phy_pause_map[skge->flow_control] | PHY_AN_CSMA); in bcom_phy_init()
1374 if (hw->dev[port]->mtu > ETH_DATA_LEN) { in bcom_phy_init()
1375 xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL, in bcom_phy_init()
1382 xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, ext); in bcom_phy_init()
1383 xm_phy_write(hw, port, PHY_BCOM_CTRL, ctl); in bcom_phy_init()
1386 xm_phy_write(hw, port, PHY_BCOM_INT_MASK, PHY_B_DEF_MSK); in bcom_phy_init()
1391 struct skge_hw *hw = skge->hw; in xm_phy_init() local
1392 int port = skge->port; in xm_phy_init()
1393 u16 ctrl = 0; in xm_phy_init() local
1395 if (skge->autoneg == AUTONEG_ENABLE) { in xm_phy_init()
1396 if (skge->advertising & ADVERTISED_1000baseT_Half) in xm_phy_init()
1397 ctrl |= PHY_X_AN_HD; in xm_phy_init()
1398 if (skge->advertising & ADVERTISED_1000baseT_Full) in xm_phy_init()
1399 ctrl |= PHY_X_AN_FD; in xm_phy_init()
1401 ctrl |= fiber_pause_map[skge->flow_control]; in xm_phy_init()
1403 xm_phy_write(hw, port, PHY_XMAC_AUNE_ADV, ctrl); in xm_phy_init()
1405 /* Restart Auto-negotiation */ in xm_phy_init()
1406 ctrl = PHY_CT_ANE | PHY_CT_RE_CFG; in xm_phy_init()
1409 if (skge->duplex == DUPLEX_FULL) in xm_phy_init()
1410 ctrl |= PHY_CT_DUP_MD; in xm_phy_init()
1412 * Do NOT enable Auto-negotiation here. This would hold in xm_phy_init()
1417 xm_phy_write(hw, port, PHY_XMAC_CTRL, ctrl); in xm_phy_init()
1420 mod_timer(&skge->link_timer, jiffies + LINK_HZ); in xm_phy_init()
1426 struct skge_hw *hw = skge->hw; in xm_check_link() local
1427 int port = skge->port; in xm_check_link()
1431 xm_phy_read(hw, port, PHY_XMAC_STAT); in xm_check_link()
1432 status = xm_phy_read(hw, port, PHY_XMAC_STAT); in xm_check_link()
1435 xm_link_down(hw, port); in xm_check_link()
1439 if (skge->autoneg == AUTONEG_ENABLE) { in xm_check_link()
1445 lpa = xm_phy_read(hw, port, PHY_XMAC_AUNE_LP); in xm_check_link()
1451 res = xm_phy_read(hw, port, PHY_XMAC_RES_ABI); in xm_check_link()
1456 skge->duplex = DUPLEX_FULL; in xm_check_link()
1459 skge->duplex = DUPLEX_HALF; in xm_check_link()
1466 /* We are using IEEE 802.3z/D5.0 Table 37-4 */ in xm_check_link()
1467 if ((skge->flow_control == FLOW_MODE_SYMMETRIC || in xm_check_link()
1468 skge->flow_control == FLOW_MODE_SYM_OR_REM) && in xm_check_link()
1470 skge->flow_status = FLOW_STAT_SYMMETRIC; in xm_check_link()
1471 else if (skge->flow_control == FLOW_MODE_SYM_OR_REM && in xm_check_link()
1474 skge->flow_status = FLOW_STAT_REM_SEND; in xm_check_link()
1475 else if (skge->flow_control == FLOW_MODE_LOC_SEND && in xm_check_link()
1478 skge->flow_status = FLOW_STAT_LOC_SEND; in xm_check_link()
1480 skge->flow_status = FLOW_STAT_NONE; in xm_check_link()
1482 skge->speed = SPEED_1000; in xm_check_link()
1499 struct net_device *dev = skge->netdev; in xm_link_timer()
1500 struct skge_hw *hw = skge->hw; in xm_link_timer() local
1501 int port = skge->port; in xm_link_timer()
1508 spin_lock_irqsave(&hw->phy_lock, flags); in xm_link_timer()
1515 if (xm_read16(hw, port, XM_GP_PORT) & XM_GP_INP_ASS) in xm_link_timer()
1519 /* Re-enable interrupt to detect link down */ in xm_link_timer()
1521 u16 msk = xm_read16(hw, port, XM_IMSK); in xm_link_timer()
1523 xm_write16(hw, port, XM_IMSK, msk); in xm_link_timer()
1524 xm_read16(hw, port, XM_ISRC); in xm_link_timer()
1527 mod_timer(&skge->link_timer, in xm_link_timer()
1530 spin_unlock_irqrestore(&hw->phy_lock, flags); in xm_link_timer()
1533 static void genesis_mac_init(struct skge_hw *hw, int port) in genesis_mac_init() argument
1535 struct net_device *dev = hw->dev[port]; in genesis_mac_init()
1537 int jumbo = hw->dev[port]->mtu > ETH_DATA_LEN; in genesis_mac_init()
1543 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), in genesis_mac_init()
1545 if (skge_read16(hw, SK_REG(port, TX_MFF_CTRL1)) & MFF_SET_MAC_RST) in genesis_mac_init()
1554 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST); in genesis_mac_init()
1561 if (hw->phy_type != SK_PHY_XMAC) { in genesis_mac_init()
1563 r = skge_read32(hw, B2_GP_IO); in genesis_mac_init()
1569 skge_write32(hw, B2_GP_IO, r); in genesis_mac_init()
1572 xm_write16(hw, port, XM_HW_CFG, XM_HW_GMII_MD); in genesis_mac_init()
1576 switch (hw->phy_type) { in genesis_mac_init()
1582 bcom_check_link(hw, port); in genesis_mac_init()
1586 xm_outaddr(hw, port, XM_SA, dev->dev_addr); in genesis_mac_init()
1590 xm_outaddr(hw, port, XM_EXM(i), zero); in genesis_mac_init()
1593 xm_write16(hw, port, XM_STAT_CMD, in genesis_mac_init()
1596 xm_write16(hw, port, XM_STAT_CMD, in genesis_mac_init()
1600 xm_write16(hw, port, XM_RX_HI_WM, 1450); in genesis_mac_init()
1607 if (skge->duplex == DUPLEX_HALF) { in genesis_mac_init()
1615 xm_write16(hw, port, XM_RX_CMD, r); in genesis_mac_init()
1618 xm_write16(hw, port, XM_TX_CMD, XM_TX_AUTO_PAD); in genesis_mac_init()
1621 if (hw->ports > 1 && jumbo) in genesis_mac_init()
1622 xm_write16(hw, port, XM_TX_THR, 1020); in genesis_mac_init()
1624 xm_write16(hw, port, XM_TX_THR, 512); in genesis_mac_init()
1640 xm_write32(hw, port, XM_MODE, XM_DEF_MODE); in genesis_mac_init()
1645 * - Enable all bits excepting 'Octets Rx OK Low CntOv' in genesis_mac_init()
1648 xm_write32(hw, port, XM_RX_EV_MSK, XMR_DEF_MSK); in genesis_mac_init()
1652 * - Enable all bits excepting 'Octets Tx OK Low CntOv' in genesis_mac_init()
1655 xm_write32(hw, port, XM_TX_EV_MSK, XMT_DEF_MSK); in genesis_mac_init()
1658 skge_write16(hw, B3_MA_TO_CTRL, MA_RST_CLR); in genesis_mac_init()
1661 skge_write8(hw, B3_MA_TOINI_RX1, 72); in genesis_mac_init()
1662 skge_write8(hw, B3_MA_TOINI_RX2, 72); in genesis_mac_init()
1663 skge_write8(hw, B3_MA_TOINI_TX1, 72); in genesis_mac_init()
1664 skge_write8(hw, B3_MA_TOINI_TX2, 72); in genesis_mac_init()
1666 skge_write8(hw, B3_MA_RCINI_RX1, 0); in genesis_mac_init()
1667 skge_write8(hw, B3_MA_RCINI_RX2, 0); in genesis_mac_init()
1668 skge_write8(hw, B3_MA_RCINI_TX1, 0); in genesis_mac_init()
1669 skge_write8(hw, B3_MA_RCINI_TX2, 0); in genesis_mac_init()
1672 skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_RST_CLR); in genesis_mac_init()
1673 skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_TIM_PAT); in genesis_mac_init()
1674 skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_ENA_OP_MD); in genesis_mac_init()
1677 skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_RST_CLR); in genesis_mac_init()
1678 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_TX_CTRL_DEF); in genesis_mac_init()
1679 skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_ENA_OP_MD); in genesis_mac_init()
1683 skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_FLUSH); in genesis_mac_init()
1686 skge_write16(hw, B3_PA_CTRL, in genesis_mac_init()
1693 struct skge_hw *hw = skge->hw; in genesis_stop() local
1694 int port = skge->port; in genesis_stop()
1699 cmd = xm_read16(hw, port, XM_MMU_CMD); in genesis_stop()
1701 xm_write16(hw, port, XM_MMU_CMD, cmd); in genesis_stop()
1703 genesis_reset(hw, port); in genesis_stop()
1706 skge_write16(hw, B3_PA_CTRL, in genesis_stop()
1710 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST); in genesis_stop()
1712 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_SET_MAC_RST); in genesis_stop()
1713 if (!(skge_read16(hw, SK_REG(port, TX_MFF_CTRL1)) & MFF_SET_MAC_RST)) in genesis_stop()
1715 } while (--retries > 0); in genesis_stop()
1718 if (hw->phy_type != SK_PHY_XMAC) { in genesis_stop()
1719 u32 reg = skge_read32(hw, B2_GP_IO); in genesis_stop()
1727 skge_write32(hw, B2_GP_IO, reg); in genesis_stop()
1728 skge_read32(hw, B2_GP_IO); in genesis_stop()
1731 xm_write16(hw, port, XM_MMU_CMD, in genesis_stop()
1732 xm_read16(hw, port, XM_MMU_CMD) in genesis_stop()
1735 xm_read16(hw, port, XM_MMU_CMD); in genesis_stop()
1741 struct skge_hw *hw = skge->hw; in genesis_get_stats() local
1742 int port = skge->port; in genesis_get_stats()
1746 xm_write16(hw, port, in genesis_get_stats()
1750 while (xm_read16(hw, port, XM_STAT_CMD) in genesis_get_stats()
1758 data[0] = (u64) xm_read32(hw, port, XM_TXO_OK_HI) << 32 in genesis_get_stats()
1759 | xm_read32(hw, port, XM_TXO_OK_LO); in genesis_get_stats()
1760 data[1] = (u64) xm_read32(hw, port, XM_RXO_OK_HI) << 32 in genesis_get_stats()
1761 | xm_read32(hw, port, XM_RXO_OK_LO); in genesis_get_stats()
1764 data[i] = xm_read32(hw, port, skge_stats[i].xmac_offset); in genesis_get_stats()
1767 static void genesis_mac_intr(struct skge_hw *hw, int port) in genesis_mac_intr() argument
1769 struct net_device *dev = hw->dev[port]; in genesis_mac_intr()
1771 u16 status = xm_read16(hw, port, XM_ISRC); in genesis_mac_intr()
1773 netif_printk(skge, intr, KERN_DEBUG, skge->netdev, in genesis_mac_intr()
1776 if (hw->phy_type == SK_PHY_XMAC && (status & XM_IS_INP_ASS)) { in genesis_mac_intr()
1777 xm_link_down(hw, port); in genesis_mac_intr()
1778 mod_timer(&skge->link_timer, jiffies + 1); in genesis_mac_intr()
1782 xm_write32(hw, port, XM_MODE, XM_MD_FTF); in genesis_mac_intr()
1783 ++dev->stats.tx_fifo_errors; in genesis_mac_intr()
1789 struct skge_hw *hw = skge->hw; in genesis_link_up() local
1790 int port = skge->port; in genesis_link_up()
1794 cmd = xm_read16(hw, port, XM_MMU_CMD); in genesis_link_up()
1800 if (skge->flow_status == FLOW_STAT_NONE || in genesis_link_up()
1801 skge->flow_status == FLOW_STAT_LOC_SEND) in genesis_link_up()
1808 xm_write16(hw, port, XM_MMU_CMD, cmd); in genesis_link_up()
1810 mode = xm_read32(hw, port, XM_MODE); in genesis_link_up()
1811 if (skge->flow_status == FLOW_STAT_SYMMETRIC || in genesis_link_up()
1812 skge->flow_status == FLOW_STAT_LOC_SEND) { in genesis_link_up()
1819 * Send a zero pause time frame to re-start transmission. in genesis_link_up()
1824 xm_write16(hw, port, XM_MAC_PTIME, 0xffff); in genesis_link_up()
1827 skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_PAUSE); in genesis_link_up()
1836 skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_DIS_PAUSE); in genesis_link_up()
1839 xm_write32(hw, port, XM_MODE, mode); in genesis_link_up()
1842 msk = xm_read16(hw, port, XM_IMSK); in genesis_link_up()
1844 xm_write16(hw, port, XM_IMSK, msk); in genesis_link_up()
1846 xm_read16(hw, port, XM_ISRC); in genesis_link_up()
1849 cmd = xm_read16(hw, port, XM_MMU_CMD); in genesis_link_up()
1850 if (hw->phy_type != SK_PHY_XMAC && skge->duplex == DUPLEX_FULL) in genesis_link_up()
1857 if (hw->phy_type == SK_PHY_BCOM) { in genesis_link_up()
1858 xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL, in genesis_link_up()
1859 xm_phy_read(hw, port, PHY_BCOM_AUX_CTRL) in genesis_link_up()
1861 xm_phy_write(hw, port, PHY_BCOM_INT_MASK, PHY_B_DEF_MSK); in genesis_link_up()
1865 xm_write16(hw, port, XM_MMU_CMD, in genesis_link_up()
1873 struct skge_hw *hw = skge->hw; in bcom_phy_intr() local
1874 int port = skge->port; in bcom_phy_intr()
1877 isrc = xm_phy_read(hw, port, PHY_BCOM_INT_STAT); in bcom_phy_intr()
1878 netif_printk(skge, intr, KERN_DEBUG, skge->netdev, in bcom_phy_intr()
1883 hw->dev[port]->name); in bcom_phy_intr()
1889 u16 ctrl = xm_phy_read(hw, port, PHY_BCOM_CTRL); in bcom_phy_intr() local
1890 xm_phy_write(hw, port, PHY_BCOM_CTRL, in bcom_phy_intr()
1891 ctrl | PHY_CT_LOOP); in bcom_phy_intr()
1892 xm_phy_write(hw, port, PHY_BCOM_CTRL, in bcom_phy_intr()
1893 ctrl & ~PHY_CT_LOOP); in bcom_phy_intr()
1897 bcom_check_link(hw, port); in bcom_phy_intr()
1901 static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val) in gm_phy_write() argument
1905 gma_write16(hw, port, GM_SMI_DATA, val); in gm_phy_write()
1906 gma_write16(hw, port, GM_SMI_CTRL, in gm_phy_write()
1907 GM_SMI_CT_PHY_AD(hw->phy_addr) | GM_SMI_CT_REG_AD(reg)); in gm_phy_write()
1911 if (!(gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_BUSY)) in gm_phy_write()
1915 pr_warn("%s: phy write timeout\n", hw->dev[port]->name); in gm_phy_write()
1916 return -EIO; in gm_phy_write()
1919 static int __gm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val) in __gm_phy_read() argument
1923 gma_write16(hw, port, GM_SMI_CTRL, in __gm_phy_read()
1924 GM_SMI_CT_PHY_AD(hw->phy_addr) in __gm_phy_read()
1929 if (gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_RD_VAL) in __gm_phy_read()
1933 return -ETIMEDOUT; in __gm_phy_read()
1935 *val = gma_read16(hw, port, GM_SMI_DATA); in __gm_phy_read()
1939 static u16 gm_phy_read(struct skge_hw *hw, int port, u16 reg) in gm_phy_read() argument
1942 if (__gm_phy_read(hw, port, reg, &v)) in gm_phy_read()
1943 pr_warn("%s: phy read timeout\n", hw->dev[port]->name); in gm_phy_read()
1948 static void yukon_init(struct skge_hw *hw, int port) in yukon_init() argument
1950 struct skge_port *skge = netdev_priv(hw->dev[port]); in yukon_init()
1951 u16 ctrl, ct1000, adv; in yukon_init() local
1953 if (skge->autoneg == AUTONEG_ENABLE) { in yukon_init()
1954 u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL); in yukon_init()
1962 gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, ectrl); in yukon_init()
1965 ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL); in yukon_init()
1966 if (skge->autoneg == AUTONEG_DISABLE) in yukon_init()
1967 ctrl &= ~PHY_CT_ANE; in yukon_init()
1969 ctrl |= PHY_CT_RESET; in yukon_init()
1970 gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); in yukon_init()
1972 ctrl = 0; in yukon_init()
1976 if (skge->autoneg == AUTONEG_ENABLE) { in yukon_init()
1977 if (hw->copper) { in yukon_init()
1978 if (skge->advertising & ADVERTISED_1000baseT_Full) in yukon_init()
1980 if (skge->advertising & ADVERTISED_1000baseT_Half) in yukon_init()
1982 if (skge->advertising & ADVERTISED_100baseT_Full) in yukon_init()
1984 if (skge->advertising & ADVERTISED_100baseT_Half) in yukon_init()
1986 if (skge->advertising & ADVERTISED_10baseT_Full) in yukon_init()
1988 if (skge->advertising & ADVERTISED_10baseT_Half) in yukon_init()
1991 /* Set Flow-control capabilities */ in yukon_init()
1992 adv |= phy_pause_map[skge->flow_control]; in yukon_init()
1994 if (skge->advertising & ADVERTISED_1000baseT_Full) in yukon_init()
1996 if (skge->advertising & ADVERTISED_1000baseT_Half) in yukon_init()
1999 adv |= fiber_pause_map[skge->flow_control]; in yukon_init()
2002 /* Restart Auto-negotiation */ in yukon_init()
2003 ctrl |= PHY_CT_ANE | PHY_CT_RE_CFG; in yukon_init()
2008 if (skge->duplex == DUPLEX_FULL) in yukon_init()
2009 ctrl |= PHY_CT_DUP_MD; in yukon_init()
2011 switch (skge->speed) { in yukon_init()
2013 ctrl |= PHY_CT_SP1000; in yukon_init()
2016 ctrl |= PHY_CT_SP100; in yukon_init()
2020 ctrl |= PHY_CT_RESET; in yukon_init()
2023 gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, ct1000); in yukon_init()
2025 gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, adv); in yukon_init()
2026 gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); in yukon_init()
2029 if (skge->autoneg == AUTONEG_ENABLE) in yukon_init()
2030 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_MSK); in yukon_init()
2032 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_DEF_MSK); in yukon_init()
2035 static void yukon_reset(struct skge_hw *hw, int port) in yukon_reset() argument
2037 gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);/* disable PHY IRQs */ in yukon_reset()
2038 gma_write16(hw, port, GM_MC_ADDR_H1, 0); /* clear MC hash */ in yukon_reset()
2039 gma_write16(hw, port, GM_MC_ADDR_H2, 0); in yukon_reset()
2040 gma_write16(hw, port, GM_MC_ADDR_H3, 0); in yukon_reset()
2041 gma_write16(hw, port, GM_MC_ADDR_H4, 0); in yukon_reset()
2043 gma_write16(hw, port, GM_RX_CTRL, in yukon_reset()
2044 gma_read16(hw, port, GM_RX_CTRL) in yukon_reset()
2048 /* Apparently, early versions of Yukon-Lite had wrong chip_id? */
2049 static int is_yukon_lite_a0(struct skge_hw *hw) in is_yukon_lite_a0() argument
2054 if (hw->chip_id != CHIP_ID_YUKON) in is_yukon_lite_a0()
2057 reg = skge_read32(hw, B2_FAR); in is_yukon_lite_a0()
2058 skge_write8(hw, B2_FAR + 3, 0xff); in is_yukon_lite_a0()
2059 ret = (skge_read8(hw, B2_FAR + 3) != 0); in is_yukon_lite_a0()
2060 skge_write32(hw, B2_FAR, reg); in is_yukon_lite_a0()
2064 static void yukon_mac_init(struct skge_hw *hw, int port) in yukon_mac_init() argument
2066 struct skge_port *skge = netdev_priv(hw->dev[port]); in yukon_mac_init()
2069 const u8 *addr = hw->dev[port]->dev_addr; in yukon_mac_init()
2071 /* WA code for COMA mode -- set PHY reset */ in yukon_mac_init()
2072 if (hw->chip_id == CHIP_ID_YUKON_LITE && in yukon_mac_init()
2073 hw->chip_rev >= CHIP_REV_YU_LITE_A3) { in yukon_mac_init()
2074 reg = skge_read32(hw, B2_GP_IO); in yukon_mac_init()
2076 skge_write32(hw, B2_GP_IO, reg); in yukon_mac_init()
2080 skge_write32(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET); in yukon_mac_init()
2081 skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET); in yukon_mac_init()
2083 /* WA code for COMA mode -- clear PHY reset */ in yukon_mac_init()
2084 if (hw->chip_id == CHIP_ID_YUKON_LITE && in yukon_mac_init()
2085 hw->chip_rev >= CHIP_REV_YU_LITE_A3) { in yukon_mac_init()
2086 reg = skge_read32(hw, B2_GP_IO); in yukon_mac_init()
2089 skge_write32(hw, B2_GP_IO, reg); in yukon_mac_init()
2095 reg |= hw->copper ? GPC_HWCFG_GMII_COP : GPC_HWCFG_GMII_FIB; in yukon_mac_init()
2098 skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_SET); in yukon_mac_init()
2099 skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_CLR); in yukon_mac_init()
2100 skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON | GMC_RST_CLR); in yukon_mac_init()
2102 if (skge->autoneg == AUTONEG_DISABLE) { in yukon_mac_init()
2104 gma_write16(hw, port, GM_GP_CTRL, in yukon_mac_init()
2105 gma_read16(hw, port, GM_GP_CTRL) | reg); in yukon_mac_init()
2107 switch (skge->speed) { in yukon_mac_init()
2121 if (skge->duplex == DUPLEX_FULL) in yukon_mac_init()
2126 switch (skge->flow_control) { in yukon_mac_init()
2128 skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); in yukon_mac_init()
2132 /* disable Rx flow-control */ in yukon_mac_init()
2137 /* enable Tx & Rx flow-control */ in yukon_mac_init()
2141 gma_write16(hw, port, GM_GP_CTRL, reg); in yukon_mac_init()
2142 skge_read16(hw, SK_REG(port, GMAC_IRQ_SRC)); in yukon_mac_init()
2144 yukon_init(hw, port); in yukon_mac_init()
2147 reg = gma_read16(hw, port, GM_PHY_ADDR); in yukon_mac_init()
2148 gma_write16(hw, port, GM_PHY_ADDR, reg | GM_PAR_MIB_CLR); in yukon_mac_init()
2151 gma_read16(hw, port, GM_MIB_CNT_BASE + 8*i); in yukon_mac_init()
2152 gma_write16(hw, port, GM_PHY_ADDR, reg); in yukon_mac_init()
2155 gma_write16(hw, port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF)); in yukon_mac_init()
2158 gma_write16(hw, port, GM_RX_CTRL, in yukon_mac_init()
2161 /* transmit flow control */ in yukon_mac_init()
2162 gma_write16(hw, port, GM_TX_FLOW_CTRL, 0xffff); in yukon_mac_init()
2165 gma_write16(hw, port, GM_TX_PARAM, in yukon_mac_init()
2175 if (hw->dev[port]->mtu > ETH_DATA_LEN) in yukon_mac_init()
2178 gma_write16(hw, port, GM_SERIAL_MODE, reg); in yukon_mac_init()
2181 gma_set_addr(hw, port, GM_SRC_ADDR_1L, addr); in yukon_mac_init()
2183 gma_set_addr(hw, port, GM_SRC_ADDR_2L, addr); in yukon_mac_init()
2186 gma_write16(hw, port, GM_TX_IRQ_MSK, 0); in yukon_mac_init()
2187 gma_write16(hw, port, GM_RX_IRQ_MSK, 0); in yukon_mac_init()
2188 gma_write16(hw, port, GM_TR_IRQ_MSK, 0); in yukon_mac_init()
2193 skge_write16(hw, SK_REG(port, RX_GMF_FL_MSK), RX_FF_FL_DEF_MSK); in yukon_mac_init()
2196 /* disable Rx GMAC FIFO Flush for YUKON-Lite Rev. A0 only */ in yukon_mac_init()
2197 if (is_yukon_lite_a0(hw)) in yukon_mac_init()
2200 skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR); in yukon_mac_init()
2201 skge_write16(hw, SK_REG(port, RX_GMF_CTRL_T), reg); in yukon_mac_init()
2205 * in order to flush pause packets in Rx FIFO on Yukon-1 in yukon_mac_init()
2207 skge_write16(hw, SK_REG(port, RX_GMF_FL_THR), RX_GMF_FL_THR_DEF+1); in yukon_mac_init()
2210 skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR); in yukon_mac_init()
2211 skge_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON); in yukon_mac_init()
2215 static void yukon_suspend(struct skge_hw *hw, int port) in yukon_suspend() argument
2217 u16 ctrl; in yukon_suspend() local
2219 ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); in yukon_suspend()
2220 ctrl |= PHY_M_PC_POL_R_DIS; in yukon_suspend()
2221 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl); in yukon_suspend()
2223 ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL); in yukon_suspend()
2224 ctrl |= PHY_CT_RESET; in yukon_suspend()
2225 gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); in yukon_suspend()
2228 ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL); in yukon_suspend()
2229 ctrl |= PHY_CT_PDOWN; in yukon_suspend()
2230 gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); in yukon_suspend()
2235 struct skge_hw *hw = skge->hw; in yukon_stop() local
2236 int port = skge->port; in yukon_stop()
2238 skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0); in yukon_stop()
2239 yukon_reset(hw, port); in yukon_stop()
2241 gma_write16(hw, port, GM_GP_CTRL, in yukon_stop()
2242 gma_read16(hw, port, GM_GP_CTRL) in yukon_stop()
2244 gma_read16(hw, port, GM_GP_CTRL); in yukon_stop()
2246 yukon_suspend(hw, port); in yukon_stop()
2249 skge_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET); in yukon_stop()
2250 skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET); in yukon_stop()
2255 struct skge_hw *hw = skge->hw; in yukon_get_stats() local
2256 int port = skge->port; in yukon_get_stats()
2259 data[0] = (u64) gma_read32(hw, port, GM_TXO_OK_HI) << 32 in yukon_get_stats()
2260 | gma_read32(hw, port, GM_TXO_OK_LO); in yukon_get_stats()
2261 data[1] = (u64) gma_read32(hw, port, GM_RXO_OK_HI) << 32 in yukon_get_stats()
2262 | gma_read32(hw, port, GM_RXO_OK_LO); in yukon_get_stats()
2265 data[i] = gma_read32(hw, port, in yukon_get_stats()
2269 static void yukon_mac_intr(struct skge_hw *hw, int port) in yukon_mac_intr() argument
2271 struct net_device *dev = hw->dev[port]; in yukon_mac_intr()
2273 u8 status = skge_read8(hw, SK_REG(port, GMAC_IRQ_SRC)); in yukon_mac_intr()
2275 netif_printk(skge, intr, KERN_DEBUG, skge->netdev, in yukon_mac_intr()
2279 ++dev->stats.rx_fifo_errors; in yukon_mac_intr()
2280 skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_CLI_RX_FO); in yukon_mac_intr()
2284 ++dev->stats.tx_fifo_errors; in yukon_mac_intr()
2285 skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_FU); in yukon_mac_intr()
2290 static u16 yukon_speed(const struct skge_hw *hw, u16 aux) in yukon_speed() argument
2304 struct skge_hw *hw = skge->hw; in yukon_link_up() local
2305 int port = skge->port; in yukon_link_up()
2309 skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK); in yukon_link_up()
2311 reg = gma_read16(hw, port, GM_GP_CTRL); in yukon_link_up()
2312 if (skge->duplex == DUPLEX_FULL || skge->autoneg == AUTONEG_ENABLE) in yukon_link_up()
2317 gma_write16(hw, port, GM_GP_CTRL, reg); in yukon_link_up()
2319 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_DEF_MSK); in yukon_link_up()
2325 struct skge_hw *hw = skge->hw; in yukon_link_down() local
2326 int port = skge->port; in yukon_link_down()
2327 u16 ctrl; in yukon_link_down() local
2329 ctrl = gma_read16(hw, port, GM_GP_CTRL); in yukon_link_down()
2330 ctrl &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); in yukon_link_down()
2331 gma_write16(hw, port, GM_GP_CTRL, ctrl); in yukon_link_down()
2333 if (skge->flow_status == FLOW_STAT_REM_SEND) { in yukon_link_down()
2334 ctrl = gm_phy_read(hw, port, PHY_MARV_AUNE_ADV); in yukon_link_down()
2335 ctrl |= PHY_M_AN_ASP; in yukon_link_down()
2337 gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, ctrl); in yukon_link_down()
2342 yukon_init(hw, port); in yukon_link_down()
2347 struct skge_hw *hw = skge->hw; in yukon_phy_intr() local
2348 int port = skge->port; in yukon_phy_intr()
2352 istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT); in yukon_phy_intr()
2353 phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT); in yukon_phy_intr()
2355 netif_printk(skge, intr, KERN_DEBUG, skge->netdev, in yukon_phy_intr()
2359 if (gm_phy_read(hw, port, PHY_MARV_AUNE_LP) in yukon_phy_intr()
2365 if (gm_phy_read(hw, port, PHY_MARV_1000T_STAT) & PHY_B_1000S_MSF) { in yukon_phy_intr()
2375 skge->duplex = (phystat & PHY_M_PS_FULL_DUP) in yukon_phy_intr()
2377 skge->speed = yukon_speed(hw, phystat); in yukon_phy_intr()
2379 /* We are using IEEE 802.3z/D5.0 Table 37-4 */ in yukon_phy_intr()
2382 skge->flow_status = FLOW_STAT_SYMMETRIC; in yukon_phy_intr()
2385 skge->flow_status = FLOW_STAT_REM_SEND; in yukon_phy_intr()
2388 skge->flow_status = FLOW_STAT_LOC_SEND; in yukon_phy_intr()
2391 skge->flow_status = FLOW_STAT_NONE; in yukon_phy_intr()
2394 if (skge->flow_status == FLOW_STAT_NONE || in yukon_phy_intr()
2395 (skge->speed < SPEED_1000 && skge->duplex == DUPLEX_HALF)) in yukon_phy_intr()
2396 skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); in yukon_phy_intr()
2398 skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON); in yukon_phy_intr()
2404 skge->speed = yukon_speed(hw, phystat); in yukon_phy_intr()
2407 skge->duplex = (phystat & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF; in yukon_phy_intr()
2416 pr_err("%s: autonegotiation failed (%s)\n", skge->netdev->name, reason); in yukon_phy_intr()
2423 struct skge_hw *hw = skge->hw; in skge_phy_reset() local
2424 int port = skge->port; in skge_phy_reset()
2425 struct net_device *dev = hw->dev[port]; in skge_phy_reset()
2427 netif_stop_queue(skge->netdev); in skge_phy_reset()
2428 netif_carrier_off(skge->netdev); in skge_phy_reset()
2430 spin_lock_bh(&hw->phy_lock); in skge_phy_reset()
2431 if (is_genesis(hw)) { in skge_phy_reset()
2432 genesis_reset(hw, port); in skge_phy_reset()
2433 genesis_mac_init(hw, port); in skge_phy_reset()
2435 yukon_reset(hw, port); in skge_phy_reset()
2436 yukon_init(hw, port); in skge_phy_reset()
2438 spin_unlock_bh(&hw->phy_lock); in skge_phy_reset()
2448 struct skge_hw *hw = skge->hw; in skge_ioctl() local
2449 int err = -EOPNOTSUPP; in skge_ioctl()
2452 return -ENODEV; /* Phy still in reset */ in skge_ioctl()
2456 data->phy_id = hw->phy_addr; in skge_ioctl()
2461 spin_lock_bh(&hw->phy_lock); in skge_ioctl()
2463 if (is_genesis(hw)) in skge_ioctl()
2464 err = __xm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val); in skge_ioctl()
2466 err = __gm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val); in skge_ioctl()
2467 spin_unlock_bh(&hw->phy_lock); in skge_ioctl()
2468 data->val_out = val; in skge_ioctl()
2473 spin_lock_bh(&hw->phy_lock); in skge_ioctl()
2474 if (is_genesis(hw)) in skge_ioctl()
2475 err = xm_phy_write(hw, skge->port, data->reg_num & 0x1f, in skge_ioctl()
2476 data->val_in); in skge_ioctl()
2478 err = gm_phy_write(hw, skge->port, data->reg_num & 0x1f, in skge_ioctl()
2479 data->val_in); in skge_ioctl()
2480 spin_unlock_bh(&hw->phy_lock); in skge_ioctl()
2486 static void skge_ramset(struct skge_hw *hw, u16 q, u32 start, size_t len) in skge_ramset() argument
2492 end = start + len - 1; in skge_ramset()
2494 skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR); in skge_ramset()
2495 skge_write32(hw, RB_ADDR(q, RB_START), start); in skge_ramset()
2496 skge_write32(hw, RB_ADDR(q, RB_WP), start); in skge_ramset()
2497 skge_write32(hw, RB_ADDR(q, RB_RP), start); in skge_ramset()
2498 skge_write32(hw, RB_ADDR(q, RB_END), end); in skge_ramset()
2502 skge_write32(hw, RB_ADDR(q, RB_RX_UTPP), in skge_ramset()
2504 skge_write32(hw, RB_ADDR(q, RB_RX_LTPP), in skge_ramset()
2510 skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_STFWD); in skge_ramset()
2513 skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_OP_MD); in skge_ramset()
2520 struct skge_hw *hw = skge->hw; in skge_qset() local
2522 u64 base = skge->dma + (e->desc - skge->mem); in skge_qset()
2525 if ((skge_read16(hw, B0_CTST) & (CS_BUS_CLOCK | CS_BUS_SLOT_SZ)) == 0) in skge_qset()
2528 skge_write32(hw, Q_ADDR(q, Q_CSR), CSR_CLR_RESET); in skge_qset()
2529 skge_write32(hw, Q_ADDR(q, Q_F), watermark); in skge_qset()
2530 skge_write32(hw, Q_ADDR(q, Q_DA_H), (u32)(base >> 32)); in skge_qset()
2531 skge_write32(hw, Q_ADDR(q, Q_DA_L), (u32)base); in skge_qset()
2537 struct skge_hw *hw = skge->hw; in skge_up() local
2538 int port = skge->port; in skge_up()
2543 if (!is_valid_ether_addr(dev->dev_addr)) in skge_up()
2544 return -EINVAL; in skge_up()
2546 netif_info(skge, ifup, skge->netdev, "enabling interface\n"); in skge_up()
2548 if (dev->mtu > RX_BUF_SIZE) in skge_up()
2549 skge->rx_buf_size = dev->mtu + ETH_HLEN; in skge_up()
2551 skge->rx_buf_size = RX_BUF_SIZE; in skge_up()
2554 rx_size = skge->rx_ring.count * sizeof(struct skge_rx_desc); in skge_up()
2555 tx_size = skge->tx_ring.count * sizeof(struct skge_tx_desc); in skge_up()
2556 skge->mem_size = tx_size + rx_size; in skge_up()
2557 skge->mem = dma_alloc_coherent(&hw->pdev->dev, skge->mem_size, in skge_up()
2558 &skge->dma, GFP_KERNEL); in skge_up()
2559 if (!skge->mem) in skge_up()
2560 return -ENOMEM; in skge_up()
2562 BUG_ON(skge->dma & 7); in skge_up()
2564 if (upper_32_bits(skge->dma) != upper_32_bits(skge->dma + skge->mem_size)) { in skge_up()
2565 dev_err(&hw->pdev->dev, "dma_alloc_coherent region crosses 4G boundary\n"); in skge_up()
2566 err = -EINVAL; in skge_up()
2570 err = skge_ring_alloc(&skge->rx_ring, skge->mem, skge->dma); in skge_up()
2578 err = skge_ring_alloc(&skge->tx_ring, skge->mem + rx_size, in skge_up()
2579 skge->dma + rx_size); in skge_up()
2583 if (hw->ports == 1) { in skge_up()
2584 err = request_irq(hw->pdev->irq, skge_intr, IRQF_SHARED, in skge_up()
2585 dev->name, hw); in skge_up()
2588 hw->pdev->irq, err); in skge_up()
2595 spin_lock_bh(&hw->phy_lock); in skge_up()
2596 if (is_genesis(hw)) in skge_up()
2597 genesis_mac_init(hw, port); in skge_up()
2599 yukon_mac_init(hw, port); in skge_up()
2600 spin_unlock_bh(&hw->phy_lock); in skge_up()
2602 /* Configure RAMbuffers - equally between ports and tx/rx */ in skge_up()
2603 chunk = (hw->ram_size - hw->ram_offset) / (hw->ports * 2); in skge_up()
2604 ram_addr = hw->ram_offset + 2 * chunk * port; in skge_up()
2606 skge_ramset(hw, rxqaddr[port], ram_addr, chunk); in skge_up()
2607 skge_qset(skge, rxqaddr[port], skge->rx_ring.to_clean); in skge_up()
2609 BUG_ON(skge->tx_ring.to_use != skge->tx_ring.to_clean); in skge_up()
2610 skge_ramset(hw, txqaddr[port], ram_addr+chunk, chunk); in skge_up()
2611 skge_qset(skge, txqaddr[port], skge->tx_ring.to_use); in skge_up()
2615 skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_START | CSR_IRQ_CL_F); in skge_up()
2618 spin_lock_irq(&hw->hw_lock); in skge_up()
2619 hw->intr_mask |= portmask[port]; in skge_up()
2620 skge_write32(hw, B0_IMSK, hw->intr_mask); in skge_up()
2621 skge_read32(hw, B0_IMSK); in skge_up()
2622 spin_unlock_irq(&hw->hw_lock); in skge_up()
2624 napi_enable(&skge->napi); in skge_up()
2631 kfree(skge->tx_ring.start); in skge_up()
2634 kfree(skge->rx_ring.start); in skge_up()
2636 dma_free_coherent(&hw->pdev->dev, skge->mem_size, skge->mem, in skge_up()
2637 skge->dma); in skge_up()
2638 skge->mem = NULL; in skge_up()
2644 static void skge_rx_stop(struct skge_hw *hw, int port) in skge_rx_stop() argument
2646 skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_STOP); in skge_rx_stop()
2647 skge_write32(hw, RB_ADDR(port ? Q_R2 : Q_R1, RB_CTRL), in skge_rx_stop()
2649 skge_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_SET_RESET); in skge_rx_stop()
2655 struct skge_hw *hw = skge->hw; in skge_down() local
2656 int port = skge->port; in skge_down()
2658 if (!skge->mem) in skge_down()
2661 netif_info(skge, ifdown, skge->netdev, "disabling interface\n"); in skge_down()
2665 if (is_genesis(hw) && hw->phy_type == SK_PHY_XMAC) in skge_down()
2666 del_timer_sync(&skge->link_timer); in skge_down()
2668 napi_disable(&skge->napi); in skge_down()
2671 spin_lock_irq(&hw->hw_lock); in skge_down()
2672 hw->intr_mask &= ~portmask[port]; in skge_down()
2673 skge_write32(hw, B0_IMSK, (hw->ports == 1) ? 0 : hw->intr_mask); in skge_down()
2674 skge_read32(hw, B0_IMSK); in skge_down()
2675 spin_unlock_irq(&hw->hw_lock); in skge_down()
2677 if (hw->ports == 1) in skge_down()
2678 free_irq(hw->pdev->irq, hw); in skge_down()
2680 skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_REG_OFF); in skge_down()
2681 if (is_genesis(hw)) in skge_down()
2687 skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP); in skge_down()
2688 skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), in skge_down()
2693 skge_write8(hw, SK_REG(port, TXA_CTRL), in skge_down()
2697 skge_write32(hw, SK_REG(port, TXA_ITI_INI), 0L); in skge_down()
2698 skge_write32(hw, SK_REG(port, TXA_LIM_INI), 0L); in skge_down()
2701 skge_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_SET_RESET); in skge_down()
2702 skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET); in skge_down()
2705 skge_write8(hw, RB_ADDR(port == 0 ? Q_XA1 : Q_XA2, RB_CTRL), RB_RST_SET); in skge_down()
2707 skge_rx_stop(hw, port); in skge_down()
2709 if (is_genesis(hw)) { in skge_down()
2710 skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_RST_SET); in skge_down()
2711 skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_RST_SET); in skge_down()
2713 skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); in skge_down()
2714 skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET); in skge_down()
2725 kfree(skge->rx_ring.start); in skge_down()
2726 kfree(skge->tx_ring.start); in skge_down()
2727 dma_free_coherent(&hw->pdev->dev, skge->mem_size, skge->mem, in skge_down()
2728 skge->dma); in skge_down()
2729 skge->mem = NULL; in skge_down()
2736 return ((ring->to_clean > ring->to_use) ? 0 : ring->count) in skge_avail()
2737 + (ring->to_clean - ring->to_use) - 1; in skge_avail()
2744 struct skge_hw *hw = skge->hw; in skge_xmit_frame() local
2754 if (unlikely(skge_avail(&skge->tx_ring) < skb_shinfo(skb)->nr_frags + 1)) in skge_xmit_frame()
2757 e = skge->tx_ring.to_use; in skge_xmit_frame()
2758 td = e->desc; in skge_xmit_frame()
2759 BUG_ON(td->control & BMU_OWN); in skge_xmit_frame()
2760 e->skb = skb; in skge_xmit_frame()
2762 map = dma_map_single(&hw->pdev->dev, skb->data, len, DMA_TO_DEVICE); in skge_xmit_frame()
2763 if (dma_mapping_error(&hw->pdev->dev, map)) in skge_xmit_frame()
2769 td->dma_lo = lower_32_bits(map); in skge_xmit_frame()
2770 td->dma_hi = upper_32_bits(map); in skge_xmit_frame()
2772 if (skb->ip_summed == CHECKSUM_PARTIAL) { in skge_xmit_frame()
2778 if (ipip_hdr(skb)->protocol == IPPROTO_UDP && in skge_xmit_frame()
2779 hw->chip_rev == 0 && hw->chip_id == CHIP_ID_YUKON) in skge_xmit_frame()
2784 td->csum_offs = 0; in skge_xmit_frame()
2785 td->csum_start = offset; in skge_xmit_frame()
2786 td->csum_write = offset + skb->csum_offset; in skge_xmit_frame()
2790 if (!skb_shinfo(skb)->nr_frags) /* single buffer i.e. no fragments */ in skge_xmit_frame()
2796 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in skge_xmit_frame()
2797 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skge_xmit_frame()
2799 map = skb_frag_dma_map(&hw->pdev->dev, frag, 0, in skge_xmit_frame()
2801 if (dma_mapping_error(&hw->pdev->dev, map)) in skge_xmit_frame()
2804 e = e->next; in skge_xmit_frame()
2805 e->skb = skb; in skge_xmit_frame()
2806 tf = e->desc; in skge_xmit_frame()
2807 BUG_ON(tf->control & BMU_OWN); in skge_xmit_frame()
2809 tf->dma_lo = lower_32_bits(map); in skge_xmit_frame()
2810 tf->dma_hi = upper_32_bits(map); in skge_xmit_frame()
2814 tf->control = BMU_OWN | BMU_SW | control | skb_frag_size(frag); in skge_xmit_frame()
2816 tf->control |= BMU_EOF | BMU_IRQ_EOF; in skge_xmit_frame()
2820 td->control = BMU_OWN | BMU_SW | BMU_STF | control | len; in skge_xmit_frame()
2823 netdev_sent_queue(dev, skb->len); in skge_xmit_frame()
2825 skge_write8(hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_START); in skge_xmit_frame()
2827 netif_printk(skge, tx_queued, KERN_DEBUG, skge->netdev, in skge_xmit_frame()
2829 e - skge->tx_ring.start, skb->len); in skge_xmit_frame()
2831 skge->tx_ring.to_use = e->next; in skge_xmit_frame()
2834 if (skge_avail(&skge->tx_ring) <= TX_LOW_WATER) { in skge_xmit_frame()
2842 e = skge->tx_ring.to_use; in skge_xmit_frame()
2843 dma_unmap_single(&hw->pdev->dev, dma_unmap_addr(e, mapaddr), in skge_xmit_frame()
2845 while (i-- > 0) { in skge_xmit_frame()
2846 e = e->next; in skge_xmit_frame()
2847 dma_unmap_page(&hw->pdev->dev, dma_unmap_addr(e, mapaddr), in skge_xmit_frame()
2853 dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name); in skge_xmit_frame()
2865 dma_unmap_single(&pdev->dev, dma_unmap_addr(e, mapaddr), in skge_tx_unmap()
2868 dma_unmap_page(&pdev->dev, dma_unmap_addr(e, mapaddr), in skge_tx_unmap()
2878 for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) { in skge_tx_clean()
2879 struct skge_tx_desc *td = e->desc; in skge_tx_clean()
2881 skge_tx_unmap(skge->hw->pdev, e, td->control); in skge_tx_clean()
2883 if (td->control & BMU_EOF) in skge_tx_clean()
2884 dev_kfree_skb(e->skb); in skge_tx_clean()
2885 td->control = 0; in skge_tx_clean()
2889 skge->tx_ring.to_clean = e; in skge_tx_clean()
2896 netif_printk(skge, timer, KERN_DEBUG, skge->netdev, "tx timeout\n"); in skge_tx_timeout()
2898 skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_STOP); in skge_tx_timeout()
2908 dev->mtu = new_mtu; in skge_change_mtu()
2914 dev->mtu = new_mtu; in skge_change_mtu()
2937 struct skge_hw *hw = skge->hw; in genesis_set_multicast() local
2938 int port = skge->port; in genesis_set_multicast()
2943 mode = xm_read32(hw, port, XM_MODE); in genesis_set_multicast()
2945 if (dev->flags & IFF_PROMISC) in genesis_set_multicast()
2950 if (dev->flags & IFF_ALLMULTI) in genesis_set_multicast()
2955 if (skge->flow_status == FLOW_STAT_REM_SEND || in genesis_set_multicast()
2956 skge->flow_status == FLOW_STAT_SYMMETRIC) in genesis_set_multicast()
2960 genesis_add_filter(filter, ha->addr); in genesis_set_multicast()
2963 xm_write32(hw, port, XM_MODE, mode); in genesis_set_multicast()
2964 xm_outhash(hw, port, XM_HSM, filter); in genesis_set_multicast()
2977 struct skge_hw *hw = skge->hw; in yukon_set_multicast() local
2978 int port = skge->port; in yukon_set_multicast()
2980 int rx_pause = (skge->flow_status == FLOW_STAT_REM_SEND || in yukon_set_multicast()
2981 skge->flow_status == FLOW_STAT_SYMMETRIC); in yukon_set_multicast()
2987 reg = gma_read16(hw, port, GM_RX_CTRL); in yukon_set_multicast()
2990 if (dev->flags & IFF_PROMISC) /* promiscuous */ in yukon_set_multicast()
2992 else if (dev->flags & IFF_ALLMULTI) /* all multicast */ in yukon_set_multicast()
3003 yukon_add_filter(filter, ha->addr); in yukon_set_multicast()
3007 gma_write16(hw, port, GM_MC_ADDR_H1, in yukon_set_multicast()
3009 gma_write16(hw, port, GM_MC_ADDR_H2, in yukon_set_multicast()
3011 gma_write16(hw, port, GM_MC_ADDR_H3, in yukon_set_multicast()
3013 gma_write16(hw, port, GM_MC_ADDR_H4, in yukon_set_multicast()
3016 gma_write16(hw, port, GM_RX_CTRL, reg); in yukon_set_multicast()
3019 static inline u16 phy_length(const struct skge_hw *hw, u32 status) in phy_length() argument
3021 if (is_genesis(hw)) in phy_length()
3027 static inline int bad_phy_status(const struct skge_hw *hw, u32 status) in bad_phy_status() argument
3029 if (is_genesis(hw)) in bad_phy_status()
3040 if (is_genesis(skge->hw)) in skge_set_multicast()
3059 netif_printk(skge, rx_status, KERN_DEBUG, skge->netdev, in skge_rx_get()
3061 e - skge->rx_ring.start, status, len); in skge_rx_get()
3063 if (len > skge->rx_buf_size) in skge_rx_get()
3069 if (bad_phy_status(skge->hw, status)) in skge_rx_get()
3072 if (phy_length(skge->hw, status) != len) in skge_rx_get()
3080 dma_sync_single_for_cpu(&skge->hw->pdev->dev, in skge_rx_get()
3084 skb_copy_from_linear_data(e->skb, skb->data, len); in skge_rx_get()
3085 dma_sync_single_for_device(&skge->hw->pdev->dev, in skge_rx_get()
3089 skge_rx_reuse(e, skge->rx_buf_size); in skge_rx_get()
3094 nskb = netdev_alloc_skb_ip_align(dev, skge->rx_buf_size); in skge_rx_get()
3101 prefetch(skb->data); in skge_rx_get()
3103 if (skge_rx_setup(skge, e, nskb, skge->rx_buf_size) < 0) { in skge_rx_get()
3108 dma_unmap_single(&skge->hw->pdev->dev, in skge_rx_get()
3115 if (dev->features & NETIF_F_RXCSUM) { in skge_rx_get()
3116 skb->csum = le16_to_cpu(csum); in skge_rx_get()
3117 skb->ip_summed = CHECKSUM_COMPLETE; in skge_rx_get()
3120 skb->protocol = eth_type_trans(skb, dev); in skge_rx_get()
3125 netif_printk(skge, rx_err, KERN_DEBUG, skge->netdev, in skge_rx_get()
3127 e - skge->rx_ring.start, control, status); in skge_rx_get()
3129 if (is_genesis(skge->hw)) { in skge_rx_get()
3131 dev->stats.rx_length_errors++; in skge_rx_get()
3133 dev->stats.rx_frame_errors++; in skge_rx_get()
3135 dev->stats.rx_crc_errors++; in skge_rx_get()
3138 dev->stats.rx_length_errors++; in skge_rx_get()
3140 dev->stats.rx_frame_errors++; in skge_rx_get()
3142 dev->stats.rx_crc_errors++; in skge_rx_get()
3146 skge_rx_reuse(e, skge->rx_buf_size); in skge_rx_get()
3154 struct skge_ring *ring = &skge->tx_ring; in skge_tx_done()
3158 skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F); in skge_tx_done()
3160 for (e = ring->to_clean; e != ring->to_use; e = e->next) { in skge_tx_done()
3161 u32 control = ((const struct skge_tx_desc *) e->desc)->control; in skge_tx_done()
3166 skge_tx_unmap(skge->hw->pdev, e, control); in skge_tx_done()
3169 netif_printk(skge, tx_done, KERN_DEBUG, skge->netdev, in skge_tx_done()
3171 e - skge->tx_ring.start); in skge_tx_done()
3174 bytes_compl += e->skb->len; in skge_tx_done()
3176 dev_consume_skb_any(e->skb); in skge_tx_done()
3180 skge->tx_ring.to_clean = e; in skge_tx_done()
3186 skge_avail(&skge->tx_ring) > TX_LOW_WATER)) { in skge_tx_done()
3189 skge_avail(&skge->tx_ring) > TX_LOW_WATER)) { in skge_tx_done()
3200 struct net_device *dev = skge->netdev; in skge_poll()
3201 struct skge_hw *hw = skge->hw; in skge_poll() local
3202 struct skge_ring *ring = &skge->rx_ring; in skge_poll()
3208 skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F); in skge_poll()
3210 for (e = ring->to_clean; prefetch(e->next), work_done < budget; e = e->next) { in skge_poll()
3211 struct skge_rx_desc *rd = e->desc; in skge_poll()
3216 control = rd->control; in skge_poll()
3220 skb = skge_rx_get(dev, e, control, rd->status, rd->csum2); in skge_poll()
3226 ring->to_clean = e; in skge_poll()
3230 skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_START); in skge_poll()
3235 spin_lock_irqsave(&hw->hw_lock, flags); in skge_poll()
3236 hw->intr_mask |= napimask[skge->port]; in skge_poll()
3237 skge_write32(hw, B0_IMSK, hw->intr_mask); in skge_poll()
3238 skge_read32(hw, B0_IMSK); in skge_poll()
3239 spin_unlock_irqrestore(&hw->hw_lock, flags); in skge_poll()
3248 static void skge_mac_parity(struct skge_hw *hw, int port) in skge_mac_parity() argument
3250 struct net_device *dev = hw->dev[port]; in skge_mac_parity()
3252 ++dev->stats.tx_heartbeat_errors; in skge_mac_parity()
3254 if (is_genesis(hw)) in skge_mac_parity()
3255 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), in skge_mac_parity()
3258 /* HW-Bug #8: cleared by GMF_CLI_TX_FC instead of GMF_CLI_TX_PE */ in skge_mac_parity()
3259 skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), in skge_mac_parity()
3260 (hw->chip_id == CHIP_ID_YUKON && hw->chip_rev == 0) in skge_mac_parity()
3264 static void skge_mac_intr(struct skge_hw *hw, int port) in skge_mac_intr() argument
3266 if (is_genesis(hw)) in skge_mac_intr()
3267 genesis_mac_intr(hw, port); in skge_mac_intr()
3269 yukon_mac_intr(hw, port); in skge_mac_intr()
3273 static void skge_error_irq(struct skge_hw *hw) in skge_error_irq() argument
3275 struct pci_dev *pdev = hw->pdev; in skge_error_irq()
3276 u32 hwstatus = skge_read32(hw, B0_HWE_ISRC); in skge_error_irq()
3278 if (is_genesis(hw)) { in skge_error_irq()
3281 skge_write16(hw, RX_MFF_CTRL1, MFF_CLR_INSTAT); in skge_error_irq()
3283 skge_write16(hw, RX_MFF_CTRL2, MFF_CLR_INSTAT); in skge_error_irq()
3287 skge_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); in skge_error_irq()
3291 dev_err(&pdev->dev, "Ram read data parity error\n"); in skge_error_irq()
3292 skge_write16(hw, B3_RI_CTRL, RI_CLR_RD_PERR); in skge_error_irq()
3296 dev_err(&pdev->dev, "Ram write data parity error\n"); in skge_error_irq()
3297 skge_write16(hw, B3_RI_CTRL, RI_CLR_WR_PERR); in skge_error_irq()
3301 skge_mac_parity(hw, 0); in skge_error_irq()
3304 skge_mac_parity(hw, 1); in skge_error_irq()
3307 dev_err(&pdev->dev, "%s: receive queue parity error\n", in skge_error_irq()
3308 hw->dev[0]->name); in skge_error_irq()
3309 skge_write32(hw, B0_R1_CSR, CSR_IRQ_CL_P); in skge_error_irq()
3313 dev_err(&pdev->dev, "%s: receive queue parity error\n", in skge_error_irq()
3314 hw->dev[1]->name); in skge_error_irq()
3315 skge_write32(hw, B0_R2_CSR, CSR_IRQ_CL_P); in skge_error_irq()
3324 dev_err(&pdev->dev, "PCI error cmd=%#x status=%#x\n", in skge_error_irq()
3329 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); in skge_error_irq()
3333 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); in skge_error_irq()
3336 hwstatus = skge_read32(hw, B0_HWE_ISRC); in skge_error_irq()
3338 dev_warn(&hw->pdev->dev, "unable to clear error (so ignoring them)\n"); in skge_error_irq()
3339 hw->intr_mask &= ~IS_HW_ERR; in skge_error_irq()
3351 struct skge_hw *hw = from_tasklet(hw, t, phy_task); in skge_extirq() local
3354 for (port = 0; port < hw->ports; port++) { in skge_extirq()
3355 struct net_device *dev = hw->dev[port]; in skge_extirq()
3360 spin_lock(&hw->phy_lock); in skge_extirq()
3361 if (!is_genesis(hw)) in skge_extirq()
3363 else if (hw->phy_type == SK_PHY_BCOM) in skge_extirq()
3365 spin_unlock(&hw->phy_lock); in skge_extirq()
3369 spin_lock_irq(&hw->hw_lock); in skge_extirq()
3370 hw->intr_mask |= IS_EXT_REG; in skge_extirq()
3371 skge_write32(hw, B0_IMSK, hw->intr_mask); in skge_extirq()
3372 skge_read32(hw, B0_IMSK); in skge_extirq()
3373 spin_unlock_irq(&hw->hw_lock); in skge_extirq()
3378 struct skge_hw *hw = dev_id; in skge_intr() local
3382 spin_lock(&hw->hw_lock); in skge_intr()
3384 status = skge_read32(hw, B0_SP_ISRC); in skge_intr()
3389 status &= hw->intr_mask; in skge_intr()
3391 hw->intr_mask &= ~IS_EXT_REG; in skge_intr()
3392 tasklet_schedule(&hw->phy_task); in skge_intr()
3396 struct skge_port *skge = netdev_priv(hw->dev[0]); in skge_intr()
3397 hw->intr_mask &= ~(IS_XA1_F|IS_R1_F); in skge_intr()
3398 napi_schedule(&skge->napi); in skge_intr()
3402 skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX1); in skge_intr()
3405 ++hw->dev[0]->stats.rx_over_errors; in skge_intr()
3406 skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX1); in skge_intr()
3411 skge_mac_intr(hw, 0); in skge_intr()
3413 if (hw->dev[1]) { in skge_intr()
3414 struct skge_port *skge = netdev_priv(hw->dev[1]); in skge_intr()
3417 hw->intr_mask &= ~(IS_XA2_F|IS_R2_F); in skge_intr()
3418 napi_schedule(&skge->napi); in skge_intr()
3422 ++hw->dev[1]->stats.rx_over_errors; in skge_intr()
3423 skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX2); in skge_intr()
3427 skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX2); in skge_intr()
3430 skge_mac_intr(hw, 1); in skge_intr()
3434 skge_error_irq(hw); in skge_intr()
3436 skge_write32(hw, B0_IMSK, hw->intr_mask); in skge_intr()
3437 skge_read32(hw, B0_IMSK); in skge_intr()
3438 spin_unlock(&hw->hw_lock); in skge_intr()
3448 disable_irq(dev->irq); in skge_netpoll()
3449 skge_intr(dev->irq, skge->hw); in skge_netpoll()
3450 enable_irq(dev->irq); in skge_netpoll()
3457 struct skge_hw *hw = skge->hw; in skge_set_mac_address() local
3458 unsigned port = skge->port; in skge_set_mac_address()
3460 u16 ctrl; in skge_set_mac_address() local
3462 if (!is_valid_ether_addr(addr->sa_data)) in skge_set_mac_address()
3463 return -EADDRNOTAVAIL; in skge_set_mac_address()
3465 eth_hw_addr_set(dev, addr->sa_data); in skge_set_mac_address()
3468 memcpy_toio(hw->regs + B2_MAC_1 + port*8, dev->dev_addr, ETH_ALEN); in skge_set_mac_address()
3469 memcpy_toio(hw->regs + B2_MAC_2 + port*8, dev->dev_addr, ETH_ALEN); in skge_set_mac_address()
3472 spin_lock_bh(&hw->phy_lock); in skge_set_mac_address()
3473 ctrl = gma_read16(hw, port, GM_GP_CTRL); in skge_set_mac_address()
3474 gma_write16(hw, port, GM_GP_CTRL, ctrl & ~GM_GPCR_RX_ENA); in skge_set_mac_address()
3476 memcpy_toio(hw->regs + B2_MAC_1 + port*8, dev->dev_addr, ETH_ALEN); in skge_set_mac_address()
3477 memcpy_toio(hw->regs + B2_MAC_2 + port*8, dev->dev_addr, ETH_ALEN); in skge_set_mac_address()
3479 if (is_genesis(hw)) in skge_set_mac_address()
3480 xm_outaddr(hw, port, XM_SA, dev->dev_addr); in skge_set_mac_address()
3482 gma_set_addr(hw, port, GM_SRC_ADDR_1L, dev->dev_addr); in skge_set_mac_address()
3483 gma_set_addr(hw, port, GM_SRC_ADDR_2L, dev->dev_addr); in skge_set_mac_address()
3486 gma_write16(hw, port, GM_GP_CTRL, ctrl); in skge_set_mac_address()
3487 spin_unlock_bh(&hw->phy_lock); in skge_set_mac_address()
3499 { CHIP_ID_YUKON_LITE, "Yukon-Lite"},
3500 { CHIP_ID_YUKON_LP, "Yukon-LP"},
3503 static const char *skge_board_name(const struct skge_hw *hw) in skge_board_name() argument
3509 if (skge_chips[i].id == hw->chip_id) in skge_board_name()
3512 snprintf(buf, sizeof(buf), "chipid 0x%x", hw->chip_id); in skge_board_name()
3521 static int skge_reset(struct skge_hw *hw) in skge_reset() argument
3528 ctst = skge_read16(hw, B0_CTST); in skge_reset()
3531 skge_write8(hw, B0_CTST, CS_RST_SET); in skge_reset()
3532 skge_write8(hw, B0_CTST, CS_RST_CLR); in skge_reset()
3535 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); in skge_reset()
3536 skge_write8(hw, B2_TST_CTRL2, 0); in skge_reset()
3538 pci_read_config_word(hw->pdev, PCI_STATUS, &pci_status); in skge_reset()
3539 pci_write_config_word(hw->pdev, PCI_STATUS, in skge_reset()
3541 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); in skge_reset()
3542 skge_write8(hw, B0_CTST, CS_MRST_CLR); in skge_reset()
3544 /* restore CLK_RUN bits (for Yukon-Lite) */ in skge_reset()
3545 skge_write16(hw, B0_CTST, in skge_reset()
3548 hw->chip_id = skge_read8(hw, B2_CHIP_ID); in skge_reset()
3549 hw->phy_type = skge_read8(hw, B2_E_1) & 0xf; in skge_reset()
3550 pmd_type = skge_read8(hw, B2_PMD_TYP); in skge_reset()
3551 hw->copper = (pmd_type == 'T' || pmd_type == '1'); in skge_reset()
3553 switch (hw->chip_id) { in skge_reset()
3556 switch (hw->phy_type) { in skge_reset()
3558 hw->phy_addr = PHY_ADDR_XMAC; in skge_reset()
3561 hw->phy_addr = PHY_ADDR_BCOM; in skge_reset()
3564 dev_err(&hw->pdev->dev, "unsupported phy type 0x%x\n", in skge_reset()
3565 hw->phy_type); in skge_reset()
3566 return -EOPNOTSUPP; in skge_reset()
3570 dev_err(&hw->pdev->dev, "Genesis chip detected but not configured\n"); in skge_reset()
3571 return -EOPNOTSUPP; in skge_reset()
3577 if (hw->phy_type < SK_PHY_MARV_COPPER && pmd_type != 'S') in skge_reset()
3578 hw->copper = 1; in skge_reset()
3580 hw->phy_addr = PHY_ADDR_MARV; in skge_reset()
3584 dev_err(&hw->pdev->dev, "unsupported chip type 0x%x\n", in skge_reset()
3585 hw->chip_id); in skge_reset()
3586 return -EOPNOTSUPP; in skge_reset()
3589 mac_cfg = skge_read8(hw, B2_MAC_CFG); in skge_reset()
3590 hw->ports = (mac_cfg & CFG_SNG_MAC) ? 1 : 2; in skge_reset()
3591 hw->chip_rev = (mac_cfg & CFG_CHIP_R_MSK) >> 4; in skge_reset()
3594 t8 = skge_read8(hw, B2_E_0); in skge_reset()
3595 if (is_genesis(hw)) { in skge_reset()
3598 hw->ram_size = 0x100000; in skge_reset()
3599 hw->ram_offset = 0x80000; in skge_reset()
3601 hw->ram_size = t8 * 512; in skge_reset()
3603 hw->ram_size = 0x20000; in skge_reset()
3605 hw->ram_size = t8 * 4096; in skge_reset()
3607 hw->intr_mask = IS_HW_ERR; in skge_reset()
3610 if (!(is_genesis(hw) && hw->phy_type == SK_PHY_XMAC)) in skge_reset()
3611 hw->intr_mask |= IS_EXT_REG; in skge_reset()
3613 if (is_genesis(hw)) in skge_reset()
3614 genesis_init(hw); in skge_reset()
3617 skge_write8(hw, B0_POWER_CTRL, in skge_reset()
3621 if ((skge_read32(hw, B0_ISRC) & IS_HW_ERR) && in skge_reset()
3622 (skge_read32(hw, B0_HWE_ISRC) & IS_IRQ_SENSOR)) { in skge_reset()
3623 dev_warn(&hw->pdev->dev, "stuck hardware sensor bit\n"); in skge_reset()
3624 hw->intr_mask &= ~IS_HW_ERR; in skge_reset()
3628 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); in skge_reset()
3629 pci_read_config_dword(hw->pdev, PCI_DEV_REG1, &reg); in skge_reset()
3631 pci_write_config_dword(hw->pdev, PCI_DEV_REG1, reg); in skge_reset()
3632 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); in skge_reset()
3635 for (i = 0; i < hw->ports; i++) { in skge_reset()
3636 skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET); in skge_reset()
3637 skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR); in skge_reset()
3642 skge_write8(hw, B2_TI_CTRL, TIM_STOP); in skge_reset()
3643 skge_write8(hw, B2_TI_CTRL, TIM_CLR_IRQ); in skge_reset()
3644 skge_write8(hw, B0_LED, LED_STAT_ON); in skge_reset()
3647 for (i = 0; i < hw->ports; i++) in skge_reset()
3648 skge_write8(hw, SK_REG(i, TXA_CTRL), TXA_ENA_ARB); in skge_reset()
3651 skge_write16(hw, B3_RI_CTRL, RI_RST_CLR); in skge_reset()
3653 skge_write8(hw, B3_RI_WTO_R1, SK_RI_TO_53); in skge_reset()
3654 skge_write8(hw, B3_RI_WTO_XA1, SK_RI_TO_53); in skge_reset()
3655 skge_write8(hw, B3_RI_WTO_XS1, SK_RI_TO_53); in skge_reset()
3656 skge_write8(hw, B3_RI_RTO_R1, SK_RI_TO_53); in skge_reset()
3657 skge_write8(hw, B3_RI_RTO_XA1, SK_RI_TO_53); in skge_reset()
3658 skge_write8(hw, B3_RI_RTO_XS1, SK_RI_TO_53); in skge_reset()
3659 skge_write8(hw, B3_RI_WTO_R2, SK_RI_TO_53); in skge_reset()
3660 skge_write8(hw, B3_RI_WTO_XA2, SK_RI_TO_53); in skge_reset()
3661 skge_write8(hw, B3_RI_WTO_XS2, SK_RI_TO_53); in skge_reset()
3662 skge_write8(hw, B3_RI_RTO_R2, SK_RI_TO_53); in skge_reset()
3663 skge_write8(hw, B3_RI_RTO_XA2, SK_RI_TO_53); in skge_reset()
3664 skge_write8(hw, B3_RI_RTO_XS2, SK_RI_TO_53); in skge_reset()
3666 skge_write32(hw, B0_HWE_IMSK, IS_ERR_MSK); in skge_reset()
3671 skge_write32(hw, B2_IRQM_MSK, IS_XA1_F|IS_XA2_F); in skge_reset()
3672 skge_write32(hw, B2_IRQM_INI, skge_usecs2clk(hw, 100)); in skge_reset()
3673 skge_write32(hw, B2_IRQM_CTRL, TIM_START); in skge_reset()
3676 skge_write32(hw, B0_IMSK, 0); in skge_reset()
3678 for (i = 0; i < hw->ports; i++) { in skge_reset()
3679 if (is_genesis(hw)) in skge_reset()
3680 genesis_reset(hw, i); in skge_reset()
3682 yukon_reset(hw, i); in skge_reset()
3695 struct net_device *dev = seq->private; in skge_debug_show()
3697 const struct skge_hw *hw = skge->hw; in skge_debug_show() local
3701 return -ENETDOWN; in skge_debug_show()
3703 seq_printf(seq, "IRQ src=%x mask=%x\n", skge_read32(hw, B0_ISRC), in skge_debug_show()
3704 skge_read32(hw, B0_IMSK)); in skge_debug_show()
3706 seq_printf(seq, "Tx Ring: (%d)\n", skge_avail(&skge->tx_ring)); in skge_debug_show()
3707 for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) { in skge_debug_show()
3708 const struct skge_tx_desc *t = e->desc; in skge_debug_show()
3710 t->control, t->dma_hi, t->dma_lo, t->status, in skge_debug_show()
3711 t->csum_offs, t->csum_write, t->csum_start); in skge_debug_show()
3715 for (e = skge->rx_ring.to_clean; ; e = e->next) { in skge_debug_show()
3716 const struct skge_rx_desc *r = e->desc; in skge_debug_show()
3718 if (r->control & BMU_OWN) in skge_debug_show()
3722 r->control, r->dma_hi, r->dma_lo, r->status, in skge_debug_show()
3723 r->timestamp, r->csum1, r->csum1_start); in skge_debug_show()
3740 if (dev->netdev_ops->ndo_open != &skge_up || !skge_debug) in skge_device_event()
3746 if (skge->debugfs) in skge_device_event()
3747 skge->debugfs = debugfs_rename(skge_debug, in skge_device_event()
3748 skge->debugfs, in skge_device_event()
3749 skge_debug, dev->name); in skge_device_event()
3753 debugfs_remove(skge->debugfs); in skge_device_event()
3754 skge->debugfs = NULL; in skge_device_event()
3758 skge->debugfs = debugfs_create_file(dev->name, 0444, skge_debug, in skge_device_event()
3811 static struct net_device *skge_devinit(struct skge_hw *hw, int port, in skge_devinit() argument
3821 SET_NETDEV_DEV(dev, &hw->pdev->dev); in skge_devinit()
3822 dev->netdev_ops = &skge_netdev_ops; in skge_devinit()
3823 dev->ethtool_ops = &skge_ethtool_ops; in skge_devinit()
3824 dev->watchdog_timeo = TX_WATCHDOG; in skge_devinit()
3825 dev->irq = hw->pdev->irq; in skge_devinit()
3827 /* MTU range: 60 - 9000 */ in skge_devinit()
3828 dev->min_mtu = ETH_ZLEN; in skge_devinit()
3829 dev->max_mtu = ETH_JUMBO_MTU; in skge_devinit()
3832 dev->features |= NETIF_F_HIGHDMA; in skge_devinit()
3835 netif_napi_add(dev, &skge->napi, skge_poll); in skge_devinit()
3836 skge->netdev = dev; in skge_devinit()
3837 skge->hw = hw; in skge_devinit()
3838 skge->msg_enable = netif_msg_init(debug, default_msg); in skge_devinit()
3840 skge->tx_ring.count = DEFAULT_TX_RING_SIZE; in skge_devinit()
3841 skge->rx_ring.count = DEFAULT_RX_RING_SIZE; in skge_devinit()
3843 /* Auto speed and flow control */ in skge_devinit()
3844 skge->autoneg = AUTONEG_ENABLE; in skge_devinit()
3845 skge->flow_control = FLOW_MODE_SYM_OR_REM; in skge_devinit()
3846 skge->duplex = -1; in skge_devinit()
3847 skge->speed = -1; in skge_devinit()
3848 skge->advertising = skge_supported_modes(hw); in skge_devinit()
3850 if (device_can_wakeup(&hw->pdev->dev)) { in skge_devinit()
3851 skge->wol = wol_supported(hw) & WAKE_MAGIC; in skge_devinit()
3852 device_set_wakeup_enable(&hw->pdev->dev, skge->wol); in skge_devinit()
3855 hw->dev[port] = dev; in skge_devinit()
3857 skge->port = port; in skge_devinit()
3860 if (is_genesis(hw)) in skge_devinit()
3861 timer_setup(&skge->link_timer, xm_link_timer, 0); in skge_devinit()
3863 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | in skge_devinit()
3865 dev->features |= dev->hw_features; in skge_devinit()
3869 memcpy_fromio(addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN); in skge_devinit()
3879 netif_info(skge, probe, skge->netdev, "addr %pM\n", dev->dev_addr); in skge_show_addr()
3887 struct skge_hw *hw; in skge_probe() local
3892 dev_err(&pdev->dev, "cannot enable PCI device\n"); in skge_probe()
3898 dev_err(&pdev->dev, "cannot obtain PCI resources\n"); in skge_probe()
3904 if (!only_32bit_dma && !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { in skge_probe()
3906 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); in skge_probe()
3907 } else if (!(err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)))) { in skge_probe()
3909 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); in skge_probe()
3913 dev_err(&pdev->dev, "no usable DMA configuration\n"); in skge_probe()
3928 err = -ENOMEM; in skge_probe()
3930 hw = kzalloc(sizeof(*hw) + strlen(DRV_NAME "@pci:") in skge_probe()
3932 if (!hw) in skge_probe()
3935 sprintf(hw->irq_name, DRV_NAME "@pci:%s", pci_name(pdev)); in skge_probe()
3937 hw->pdev = pdev; in skge_probe()
3938 spin_lock_init(&hw->hw_lock); in skge_probe()
3939 spin_lock_init(&hw->phy_lock); in skge_probe()
3940 tasklet_setup(&hw->phy_task, skge_extirq); in skge_probe()
3942 hw->regs = ioremap(pci_resource_start(pdev, 0), 0x4000); in skge_probe()
3943 if (!hw->regs) { in skge_probe()
3944 dev_err(&pdev->dev, "cannot map device registers\n"); in skge_probe()
3948 err = skge_reset(hw); in skge_probe()
3954 (unsigned long long)pci_resource_start(pdev, 0), pdev->irq, in skge_probe()
3955 skge_board_name(hw), hw->chip_rev); in skge_probe()
3957 dev = skge_devinit(hw, 0, using_dac); in skge_probe()
3959 err = -ENOMEM; in skge_probe()
3964 if (!is_valid_ether_addr(dev->dev_addr)) in skge_probe()
3965 dev_warn(&pdev->dev, "bad (zero?) ethernet address in rom\n"); in skge_probe()
3969 dev_err(&pdev->dev, "cannot register net device\n"); in skge_probe()
3975 if (hw->ports > 1) { in skge_probe()
3976 dev1 = skge_devinit(hw, 1, using_dac); in skge_probe()
3978 err = -ENOMEM; in skge_probe()
3984 dev_err(&pdev->dev, "cannot register second net device\n"); in skge_probe()
3988 err = request_irq(pdev->irq, skge_intr, IRQF_SHARED, in skge_probe()
3989 hw->irq_name, hw); in skge_probe()
3991 dev_err(&pdev->dev, "cannot assign irq %d\n", in skge_probe()
3992 pdev->irq); in skge_probe()
3998 pci_set_drvdata(pdev, hw); in skge_probe()
4011 skge_write16(hw, B0_LED, LED_STAT_OFF); in skge_probe()
4013 iounmap(hw->regs); in skge_probe()
4015 kfree(hw); in skge_probe()
4026 struct skge_hw *hw = pci_get_drvdata(pdev); in skge_remove() local
4029 if (!hw) in skge_remove()
4032 dev1 = hw->dev[1]; in skge_remove()
4035 dev0 = hw->dev[0]; in skge_remove()
4038 tasklet_kill(&hw->phy_task); in skge_remove()
4040 spin_lock_irq(&hw->hw_lock); in skge_remove()
4041 hw->intr_mask = 0; in skge_remove()
4043 if (hw->ports > 1) { in skge_remove()
4044 skge_write32(hw, B0_IMSK, 0); in skge_remove()
4045 skge_read32(hw, B0_IMSK); in skge_remove()
4047 spin_unlock_irq(&hw->hw_lock); in skge_remove()
4049 skge_write16(hw, B0_LED, LED_STAT_OFF); in skge_remove()
4050 skge_write8(hw, B0_CTST, CS_RST_SET); in skge_remove()
4052 if (hw->ports > 1) in skge_remove()
4053 free_irq(pdev->irq, hw); in skge_remove()
4060 iounmap(hw->regs); in skge_remove()
4061 kfree(hw); in skge_remove()
4067 struct skge_hw *hw = dev_get_drvdata(dev); in skge_suspend() local
4070 if (!hw) in skge_suspend()
4073 for (i = 0; i < hw->ports; i++) { in skge_suspend()
4074 struct net_device *dev = hw->dev[i]; in skge_suspend()
4080 if (skge->wol) in skge_suspend()
4084 skge_write32(hw, B0_IMSK, 0); in skge_suspend()
4091 struct skge_hw *hw = dev_get_drvdata(dev); in skge_resume() local
4094 if (!hw) in skge_resume()
4097 err = skge_reset(hw); in skge_resume()
4101 for (i = 0; i < hw->ports; i++) { in skge_resume()
4102 struct net_device *dev = hw->dev[i]; in skge_resume()
4128 struct skge_hw *hw = pci_get_drvdata(pdev); in skge_shutdown() local
4131 if (!hw) in skge_shutdown()
4134 for (i = 0; i < hw->ports; i++) { in skge_shutdown()
4135 struct net_device *dev = hw->dev[i]; in skge_shutdown()
4138 if (skge->wol) in skge_shutdown()
4142 pci_wake_from_d3(pdev, device_may_wakeup(&pdev->dev)); in skge_shutdown()
4171 .ident = "FUJITSU SIEMENS A8NE-FM",
4174 DMI_MATCH(DMI_BOARD_NAME, "A8NE-FM")