Lines Matching +full:need +full:- +full:phy +full:- +full:for +full:- +full:wake
1 // SPDX-License-Identifier: GPL-2.0-only
3 * drivers/net/ethernet/micrel/ksx884x.c - Micrel KSZ8841/2 PCI Ethernet driver
5 * Copyright (c) 2009-2010 Micrel, Inc.
270 #define PHY_CTRL_INTERVAL (KS884X_P2MBCR_P - KS884X_P1MBCR_P)
314 /* PHY Identifier Registers */
320 /* Auto-Negotiation Advertisement Register */
340 /* Auto-Negotiation Link Partner Ability Register */
361 #define PHY_SPECIAL_INTERVAL (KS884X_P2VCT_P - KS884X_P1VCT_P)
550 (KS8842_PORT_2_CTRL_1 - KS8842_PORT_1_CTRL_1))
655 #define STATIC_MAC_TABLE_ADDR 00-0000FFFF-FFFFFFFF
656 #define STATIC_MAC_TABLE_FWD_PORTS 00-00070000-00000000
657 #define STATIC_MAC_TABLE_VALID 00-00080000-00000000
658 #define STATIC_MAC_TABLE_OVERRIDE 00-00100000-00000000
659 #define STATIC_MAC_TABLE_USE_FID 00-00200000-00000000
660 #define STATIC_MAC_TABLE_FID 00-03C00000-00000000
674 #define VLAN_TABLE_VID 00-00000000-00000FFF
675 #define VLAN_TABLE_FID 00-00000000-0000F000
676 #define VLAN_TABLE_MEMBERSHIP 00-00000000-00070000
677 #define VLAN_TABLE_VALID 00-00000000-00080000
689 #define DYNAMIC_MAC_TABLE_ADDR 00-0000FFFF-FFFFFFFF
690 #define DYNAMIC_MAC_TABLE_FID 00-000F0000-00000000
691 #define DYNAMIC_MAC_TABLE_SRC_PORT 00-00300000-00000000
692 #define DYNAMIC_MAC_TABLE_TIMESTAMP 00-00C00000-00000000
693 #define DYNAMIC_MAC_TABLE_ENTRIES 03-FF000000-00000000
694 #define DYNAMIC_MAC_TABLE_MAC_EMPTY 04-00000000-00000000
695 #define DYNAMIC_MAC_TABLE_RESERVED 78-00000000-00000000
696 #define DYNAMIC_MAC_TABLE_NOT_READY 80-00000000-00000000
717 #define MIB_COUNTER_VALUE 00-00000000-3FFFFFFF
718 #define MIB_COUNTER_VALID 00-00000000-40000000
719 #define MIB_COUNTER_OVERFLOW 00-00000000-80000000
738 #define MAC_ADDR_ORDER(i) (ETH_ALEN - 1 - (i))
754 #define HW_TO_DEV_PORT(port) (port - 1)
933 * struct ksz_hw_desc - Hardware descriptor data structure
947 * struct ksz_sw_desc - Software descriptor data structure
959 * struct ksz_dma_buf - OS dependent DMA buffer data structure
971 * struct ksz_desc - Descriptor structure
973 * @sw: Cached memory to hold hardware descriptor values for
984 #define DMA_BUFFER(desc) ((struct ksz_dma_buf *)(&(desc)->dma_buf))
987 * struct ksz_desc_info - Descriptor information data structure
994 * @avail: Number of descriptors available for use.
995 * @last: Index for last descriptor released to hardware.
996 * @next: Index for next descriptor available for use.
997 * @mask: Mask for index wrapping.
1027 * struct ksz_mac_table - Static MAC table data structure
1049 * struct ksz_vlan_table - VLAN table data structure
1113 * struct ksz_port_mib - Port MIB data structure
1118 * @counter: 64-bit MIB counter value.
1142 * struct ksz_port_cfg - Port configuration data structure
1160 * struct ksz_switch - KSZ8842 switch data structure
1164 * @diffserv: DiffServ priority settings. Possible values from 6-bit of ToS
1166 * @p_802_1p: 802.1P priority settings. Possible values from 3-bit of 802.1p
1168 * @br_addr: Bridge address. Used for STP.
1169 * @other_addr: Other MAC address. Used for multiple network device mode.
1171 * @member: Current port membership. Used for STP.
1191 * struct ksz_port_info - Port information data structure
1195 * @advertised: Advertised auto-negotiation setting. Used to determine link.
1196 * @partner: Auto-negotiation partner setting. Used to determine link.
1224 * struct ksz_hw - KSZ884X hardware data structure
1230 * @dst_ports: Destination ports in switch for transmission.
1231 * @id: Hardware ID. Used for display only.
1241 * @tx_int_cnt: Transmit interrupt count. Used for TX optimization.
1242 * @tx_int_mask: Transmit interrupt mask. Used for TX optimization.
1243 * @tx_size: Transmit data size. Used for TX optimization.
1316 * struct ksz_port - Virtual port data structure
1317 * @duplex: Duplex mode setting. 1 for half duplex, 2 for full
1318 * duplex, and 0 for auto, which normally results in full
1320 * @speed: Speed setting. 10 for 10 Mbit, 100 for 100 Mbit, and
1321 * 0 for auto, which normally results in 100 Mbit.
1322 * @force_link: Force link setting. 0 for auto-negotiation, and 1 for
1324 * @flow_ctrl: Flow control setting. PHY_NO_FLOW_CTRL for no flow
1325 * control, and PHY_FLOW_CTRL for flow control.
1326 * PHY_TX_ONLY and PHY_RX_ONLY are not supported for 100
1327 * Mbit PHY.
1351 * struct ksz_timer_info - Timer information data structure
1354 * @max: Number of times to run timer; -1 for infinity.
1365 * struct ksz_shared_mem - OS dependent shared memory data structure
1381 * struct ksz_counter_info - OS dependent counter information data structure
1393 * struct dev_info - Network device information data structure
1397 * @desc_pool: Physical memory used for descriptor pool.
1401 * @last_skb: Socket buffer allocated for descriptor rx fragments.
1402 * @skb_index: Buffer index for receiving fragments.
1403 * @skb_len: Buffer length for receiving fragments.
1406 * @counter: Used for MIB reading.
1412 * @wol_enable: Wake-on-LAN enable set by ethtool.
1413 * @wol_support: Wake-on-LAN support used by ethtool.
1414 * @pme_wait: Used for KSZ8841 power management.
1448 * struct dev_priv - Network device private data structure
1452 * @proc_sem: Semaphore for proc accessing.
1494 writel(interrupt, hw->io + KS884X_INTERRUPTS_STATUS); in hw_ack_intr()
1499 hw->intr_blocked = hw->intr_mask; in hw_dis_intr()
1500 writel(0, hw->io + KS884X_INTERRUPTS_ENABLE); in hw_dis_intr()
1501 hw->intr_set = readl(hw->io + KS884X_INTERRUPTS_ENABLE); in hw_dis_intr()
1506 hw->intr_set = interrupt; in hw_set_intr()
1507 writel(interrupt, hw->io + KS884X_INTERRUPTS_ENABLE); in hw_set_intr()
1512 hw->intr_blocked = 0; in hw_ena_intr()
1513 hw_set_intr(hw, hw->intr_mask); in hw_ena_intr()
1518 hw->intr_mask &= ~(bit); in hw_dis_intr_bit()
1525 read_intr = readl(hw->io + KS884X_INTERRUPTS_ENABLE); in hw_turn_off_intr()
1526 hw->intr_set = read_intr & ~interrupt; in hw_turn_off_intr()
1527 writel(hw->intr_set, hw->io + KS884X_INTERRUPTS_ENABLE); in hw_turn_off_intr()
1532 * hw_turn_on_intr - turn on specified interrupts
1541 hw->intr_mask |= bit; in hw_turn_on_intr()
1543 if (!hw->intr_blocked) in hw_turn_on_intr()
1544 hw_set_intr(hw, hw->intr_mask); in hw_turn_on_intr()
1551 read_intr = readl(hw->io + KS884X_INTERRUPTS_ENABLE); in hw_ena_intr_bit()
1552 hw->intr_set = read_intr | interrupt; in hw_ena_intr_bit()
1553 writel(hw->intr_set, hw->io + KS884X_INTERRUPTS_ENABLE); in hw_ena_intr_bit()
1558 *status = readl(hw->io + KS884X_INTERRUPTS_STATUS); in hw_read_intr()
1559 *status = *status & hw->intr_set; in hw_read_intr()
1569 * hw_block_intr - block hardware interrupts
1581 if (!hw->intr_blocked) { in hw_block_intr()
1583 interrupt = hw->intr_blocked; in hw_block_intr()
1595 desc->phw->ctrl.data = cpu_to_le32(status.data); in reset_desc()
1600 desc->sw.ctrl.tx.hw_owned = 1; in release_desc()
1601 if (desc->sw.buf_size != desc->sw.buf.data) { in release_desc()
1602 desc->sw.buf_size = desc->sw.buf.data; in release_desc()
1603 desc->phw->buf.data = cpu_to_le32(desc->sw.buf.data); in release_desc()
1605 desc->phw->ctrl.data = cpu_to_le32(desc->sw.ctrl.data); in release_desc()
1610 *desc = &info->ring[info->last]; in get_rx_pkt()
1611 info->last++; in get_rx_pkt()
1612 info->last &= info->mask; in get_rx_pkt()
1613 info->avail--; in get_rx_pkt()
1614 (*desc)->sw.buf.data &= ~KS_DESC_RX_MASK; in get_rx_pkt()
1619 desc->phw->addr = cpu_to_le32(addr); in set_rx_buf()
1624 desc->sw.buf.rx.buf_size = len; in set_rx_len()
1630 *desc = &info->ring[info->next]; in get_tx_pkt()
1631 info->next++; in get_tx_pkt()
1632 info->next &= info->mask; in get_tx_pkt()
1633 info->avail--; in get_tx_pkt()
1634 (*desc)->sw.buf.data &= ~KS_DESC_TX_MASK; in get_tx_pkt()
1639 desc->phw->addr = cpu_to_le32(addr); in set_tx_buf()
1644 desc->sw.buf.tx.buf_size = len; in set_tx_len()
1654 readw(hw->io + reg); \
1658 * sw_r_table - read 4 bytes of data from switch table
1676 writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET); in sw_r_table()
1678 *data = readl(hw->io + KS884X_ACC_DATA_0_OFFSET); in sw_r_table()
1684 * sw_w_table_64 - write 8 bytes of data to the switch table
1704 writel(data_hi, hw->io + KS884X_ACC_DATA_4_OFFSET); in sw_w_table_64()
1705 writel(data_lo, hw->io + KS884X_ACC_DATA_0_OFFSET); in sw_w_table_64()
1707 writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET); in sw_w_table_64()
1714 * sw_w_sta_mac_table - write to the static MAC table
1752 * sw_r_vlan_table - read from the VLAN table
1762 * Return 0 if the entry is valid; otherwise -1.
1777 return -1; in sw_r_vlan_table()
1781 * port_r_mib_cnt - read MIB counter
1802 writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET); in port_r_mib_cnt()
1805 for (timeout = 100; timeout > 0; timeout--) { in port_r_mib_cnt()
1806 data = readl(hw->io + KS884X_ACC_DATA_0_OFFSET); in port_r_mib_cnt()
1820 * port_r_mib_pkt - read dropped packet counts
1844 writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET); in port_r_mib_pkt()
1846 data = readl(hw->io + KS884X_ACC_DATA_0_OFFSET); in port_r_mib_pkt()
1856 data -= cur; in port_r_mib_pkt()
1861 index -= KS_MIB_PACKET_DROPPED_TX - in port_r_mib_pkt()
1867 * port_r_cnt - read MIB counters periodically
1875 * Return non-zero when not all counters not read.
1879 struct ksz_port_mib *mib = &hw->port_mib[port]; in port_r_cnt()
1881 if (mib->mib_start < PORT_COUNTER_NUM) in port_r_cnt()
1882 while (mib->cnt_ptr < PORT_COUNTER_NUM) { in port_r_cnt()
1883 port_r_mib_cnt(hw, port, mib->cnt_ptr, in port_r_cnt()
1884 &mib->counter[mib->cnt_ptr]); in port_r_cnt()
1885 ++mib->cnt_ptr; in port_r_cnt()
1887 if (hw->mib_cnt > PORT_COUNTER_NUM) in port_r_cnt()
1888 port_r_mib_pkt(hw, port, mib->dropped, in port_r_cnt()
1889 &mib->counter[PORT_COUNTER_NUM]); in port_r_cnt()
1890 mib->cnt_ptr = 0; in port_r_cnt()
1895 * port_init_cnt - initialize MIB counter values
1904 struct ksz_port_mib *mib = &hw->port_mib[port]; in port_init_cnt()
1906 mib->cnt_ptr = 0; in port_init_cnt()
1907 if (mib->mib_start < PORT_COUNTER_NUM) in port_init_cnt()
1909 port_r_mib_cnt(hw, port, mib->cnt_ptr, in port_init_cnt()
1910 &mib->counter[mib->cnt_ptr]); in port_init_cnt()
1911 ++mib->cnt_ptr; in port_init_cnt()
1912 } while (mib->cnt_ptr < PORT_COUNTER_NUM); in port_init_cnt()
1913 if (hw->mib_cnt > PORT_COUNTER_NUM) in port_init_cnt()
1914 port_r_mib_pkt(hw, port, mib->dropped, in port_init_cnt()
1915 &mib->counter[PORT_COUNTER_NUM]); in port_init_cnt()
1916 memset((void *) mib->counter, 0, sizeof(u64) * TOTAL_PORT_COUNTER_NUM); in port_init_cnt()
1917 mib->cnt_ptr = 0; in port_init_cnt()
1925 * port_chk - check port register bits
1943 data = readw(hw->io + addr); in port_chk()
1948 * port_cfg - set port register bits
1965 data = readw(hw->io + addr); in port_cfg()
1970 writew(data, hw->io + addr); in port_cfg()
1974 * port_chk_shift - check port bit
1990 data = readw(hw->io + addr); in port_chk_shift()
1996 * port_cfg_shift - set port bit
2011 data = readw(hw->io + addr); in port_cfg_shift()
2017 writew(data, hw->io + addr); in port_cfg_shift()
2021 * port_r8 - read byte from port register
2035 *data = readb(hw->io + addr); in port_r8()
2039 * port_r16 - read word from port register.
2053 *data = readw(hw->io + addr); in port_r16()
2057 * port_w16 - write word to port register.
2071 writew(data, hw->io + addr); in port_w16()
2075 * sw_chk - check switch register bits
2089 data = readw(hw->io + addr); in sw_chk()
2094 * sw_cfg - set switch register bits
2106 data = readw(hw->io + addr); in sw_cfg()
2111 writew(data, hw->io + addr); in sw_cfg()
2135 * sw_cfg_broad_storm - configure broadcast storm threshold
2149 data = readw(hw->io + KS8842_SWITCH_CTRL_3_OFFSET); in sw_cfg_broad_storm()
2152 writew(data, hw->io + KS8842_SWITCH_CTRL_3_OFFSET); in sw_cfg_broad_storm()
2156 * sw_get_board_storm - get broadcast storm threshold
2167 data = readw(hw->io + KS8842_SWITCH_CTRL_3_OFFSET); in sw_get_broad_storm()
2176 * sw_dis_broad_storm - disable broadstorm
2188 * sw_ena_broad_storm - enable broadcast storm
2196 sw_cfg_broad_storm(hw, hw->ksz_switch->broad_per); in sw_ena_broad_storm()
2201 * sw_init_broad_storm - initialize broadcast storm
2210 hw->ksz_switch->broad_per = 1; in sw_init_broad_storm()
2211 sw_cfg_broad_storm(hw, hw->ksz_switch->broad_per); in sw_init_broad_storm()
2212 for (port = 0; port < TOTAL_PORT_NUM; port++) in sw_init_broad_storm()
2218 * hw_cfg_broad_storm - configure broadcast storm
2232 hw->ksz_switch->broad_per = percent; in hw_cfg_broad_storm()
2236 * sw_dis_prio_rate - disable switch priority rate
2248 writel(0, hw->io + addr); in sw_dis_prio_rate()
2252 * sw_init_prio_rate - initialize switch prioirty rate
2261 struct ksz_switch *sw = hw->ksz_switch; in sw_init_prio_rate()
2263 for (port = 0; port < TOTAL_PORT_NUM; port++) { in sw_init_prio_rate()
2264 for (prio = 0; prio < PRIO_QUEUES; prio++) { in sw_init_prio_rate()
2265 sw->port_cfg[port].rx_rate[prio] = in sw_init_prio_rate()
2266 sw->port_cfg[port].tx_rate[prio] = 0; in sw_init_prio_rate()
2319 if (!(hw->overrides & FAST_AGING)) { in sw_flush_dyn_mac_table()
2405 for (port = 0; port < TOTAL_PORT_NUM; port++) { in sw_init_mirror()
2486 * sw_dis_diffserv - disable switch DiffServ priority
2498 * sw_dis_802_1p - disable switch 802.1p priority
2510 * sw_cfg_replace_null_vid -
2521 * sw_cfg_replace_vid - enable switch 802.10 priority re-mapping
2526 * This routine enables the 802.1p priority re-mapping function of the switch.
2537 * sw_cfg_port_based - configure switch port based priority
2551 hw->ksz_switch->port_cfg[port].port_prio = prio; in sw_cfg_port_based()
2560 * sw_dis_multi_queue - disable transmit multiple queues
2573 * sw_init_prio - initialize switch priority
2582 struct ksz_switch *sw = hw->ksz_switch; in sw_init_prio()
2588 sw->p_802_1p[0] = 0; in sw_init_prio()
2589 sw->p_802_1p[1] = 0; in sw_init_prio()
2590 sw->p_802_1p[2] = 1; in sw_init_prio()
2591 sw->p_802_1p[3] = 1; in sw_init_prio()
2592 sw->p_802_1p[4] = 2; in sw_init_prio()
2593 sw->p_802_1p[5] = 2; in sw_init_prio()
2594 sw->p_802_1p[6] = 3; in sw_init_prio()
2595 sw->p_802_1p[7] = 3; in sw_init_prio()
2601 for (tos = 0; tos < DIFFSERV_ENTRIES; tos++) in sw_init_prio()
2602 sw->diffserv[tos] = 0; in sw_init_prio()
2605 for (port = 0; port < TOTAL_PORT_NUM; port++) { in sw_init_prio()
2611 sw->port_cfg[port].port_prio = 0; in sw_init_prio()
2612 sw_cfg_port_based(hw, port, sw->port_cfg[port].port_prio); in sw_init_prio()
2618 * port_get_def_vid - get port default VID.
2631 *vid = readw(hw->io + addr); in port_get_def_vid()
2635 * sw_init_vlan - initialize switch VLAN
2644 struct ksz_switch *sw = hw->ksz_switch; in sw_init_vlan()
2647 for (entry = 0; entry < VLAN_TABLE_ENTRIES; entry++) { in sw_init_vlan()
2649 &sw->vlan_table[entry].vid, in sw_init_vlan()
2650 &sw->vlan_table[entry].fid, in sw_init_vlan()
2651 &sw->vlan_table[entry].member); in sw_init_vlan()
2654 for (port = 0; port < TOTAL_PORT_NUM; port++) { in sw_init_vlan()
2655 port_get_def_vid(hw, port, &sw->port_cfg[port].vid); in sw_init_vlan()
2656 sw->port_cfg[port].member = PORT_MASK; in sw_init_vlan()
2661 * sw_cfg_port_base_vlan - configure port-based VLAN membership
2664 * @member: The port-based VLAN membership.
2666 * This routine configures the port-based VLAN membership of the port.
2676 data = readb(hw->io + addr); in sw_cfg_port_base_vlan()
2679 writeb(data, hw->io + addr); in sw_cfg_port_base_vlan()
2681 hw->ksz_switch->port_cfg[port].member = member; in sw_cfg_port_base_vlan()
2685 * sw_get_addr - get the switch MAC address.
2695 for (i = 0; i < 6; i += 2) { in sw_get_addr()
2696 mac_addr[i] = readb(hw->io + KS8842_MAC_ADDR_0_OFFSET + i); in sw_get_addr()
2697 mac_addr[1 + i] = readb(hw->io + KS8842_MAC_ADDR_1_OFFSET + i); in sw_get_addr()
2702 * sw_set_addr - configure switch MAC address
2712 for (i = 0; i < 6; i += 2) { in sw_set_addr()
2713 writeb(mac_addr[i], hw->io + KS8842_MAC_ADDR_0_OFFSET + i); in sw_set_addr()
2714 writeb(mac_addr[1 + i], hw->io + KS8842_MAC_ADDR_1_OFFSET + i); in sw_set_addr()
2719 * sw_set_global_ctrl - set switch global control
2729 data = readw(hw->io + KS8842_SWITCH_CTRL_3_OFFSET); in sw_set_global_ctrl()
2731 writew(data, hw->io + KS8842_SWITCH_CTRL_3_OFFSET); in sw_set_global_ctrl()
2733 data = readw(hw->io + KS8842_SWITCH_CTRL_1_OFFSET); in sw_set_global_ctrl()
2742 if (hw->overrides & FAST_AGING) in sw_set_global_ctrl()
2746 writew(data, hw->io + KS8842_SWITCH_CTRL_1_OFFSET); in sw_set_global_ctrl()
2748 data = readw(hw->io + KS8842_SWITCH_CTRL_2_OFFSET); in sw_set_global_ctrl()
2752 writew(data, hw->io + KS8842_SWITCH_CTRL_2_OFFSET); in sw_set_global_ctrl()
2765 * port_set_stp_state - configure port spanning tree state
2784 * No need to turn on transmit because of port direct mode. in port_set_stp_state()
2802 * Need to setup static MAC table with override to keep receiving BPDU in port_set_stp_state()
2814 hw->ksz_switch->port_cfg[port].stp_state = state; in port_set_stp_state()
2823 * sw_clr_sta_mac_table - clear static MAC table
2833 for (i = 0; i < STATIC_MAC_TABLE_ENTRIES; i++) { in sw_clr_sta_mac_table()
2834 entry = &hw->ksz_switch->mac_table[i]; in sw_clr_sta_mac_table()
2836 entry->mac_addr, entry->ports, in sw_clr_sta_mac_table()
2837 entry->override, 0, in sw_clr_sta_mac_table()
2838 entry->use_fid, entry->fid); in sw_clr_sta_mac_table()
2843 * sw_init_stp - initialize switch spanning tree support
2852 entry = &hw->ksz_switch->mac_table[STP_ENTRY]; in sw_init_stp()
2853 entry->mac_addr[0] = 0x01; in sw_init_stp()
2854 entry->mac_addr[1] = 0x80; in sw_init_stp()
2855 entry->mac_addr[2] = 0xC2; in sw_init_stp()
2856 entry->mac_addr[3] = 0x00; in sw_init_stp()
2857 entry->mac_addr[4] = 0x00; in sw_init_stp()
2858 entry->mac_addr[5] = 0x00; in sw_init_stp()
2859 entry->ports = HOST_MASK; in sw_init_stp()
2860 entry->override = 1; in sw_init_stp()
2861 entry->valid = 1; in sw_init_stp()
2863 entry->mac_addr, entry->ports, in sw_init_stp()
2864 entry->override, entry->valid, in sw_init_stp()
2865 entry->use_fid, entry->fid); in sw_init_stp()
2869 * sw_block_addr - block certain packets from the host port
2879 for (i = BROADCAST_ENTRY; i <= IPV6_ADDR_ENTRY; i++) { in sw_block_addr()
2880 entry = &hw->ksz_switch->mac_table[i]; in sw_block_addr()
2881 entry->valid = 0; in sw_block_addr()
2883 entry->mac_addr, entry->ports, in sw_block_addr()
2884 entry->override, entry->valid, in sw_block_addr()
2885 entry->use_fid, entry->fid); in sw_block_addr()
2898 static inline void hw_r_phy_ctrl(struct ksz_hw *hw, int phy, u16 *data) in hw_r_phy_ctrl() argument
2900 *data = readw(hw->io + phy + KS884X_PHY_CTRL_OFFSET); in hw_r_phy_ctrl()
2903 static inline void hw_w_phy_ctrl(struct ksz_hw *hw, int phy, u16 data) in hw_w_phy_ctrl() argument
2905 writew(data, hw->io + phy + KS884X_PHY_CTRL_OFFSET); in hw_w_phy_ctrl()
2908 static inline void hw_r_phy_link_stat(struct ksz_hw *hw, int phy, u16 *data) in hw_r_phy_link_stat() argument
2910 *data = readw(hw->io + phy + KS884X_PHY_STATUS_OFFSET); in hw_r_phy_link_stat()
2913 static inline void hw_r_phy_auto_neg(struct ksz_hw *hw, int phy, u16 *data) in hw_r_phy_auto_neg() argument
2915 *data = readw(hw->io + phy + KS884X_PHY_AUTO_NEG_OFFSET); in hw_r_phy_auto_neg()
2918 static inline void hw_w_phy_auto_neg(struct ksz_hw *hw, int phy, u16 data) in hw_w_phy_auto_neg() argument
2920 writew(data, hw->io + phy + KS884X_PHY_AUTO_NEG_OFFSET); in hw_w_phy_auto_neg()
2923 static inline void hw_r_phy_rem_cap(struct ksz_hw *hw, int phy, u16 *data) in hw_r_phy_rem_cap() argument
2925 *data = readw(hw->io + phy + KS884X_PHY_REMOTE_CAP_OFFSET); in hw_r_phy_rem_cap()
2928 static inline void hw_r_phy_crossover(struct ksz_hw *hw, int phy, u16 *data) in hw_r_phy_crossover() argument
2930 *data = readw(hw->io + phy + KS884X_PHY_CTRL_OFFSET); in hw_r_phy_crossover()
2933 static inline void hw_w_phy_crossover(struct ksz_hw *hw, int phy, u16 data) in hw_w_phy_crossover() argument
2935 writew(data, hw->io + phy + KS884X_PHY_CTRL_OFFSET); in hw_w_phy_crossover()
2938 static inline void hw_r_phy_polarity(struct ksz_hw *hw, int phy, u16 *data) in hw_r_phy_polarity() argument
2940 *data = readw(hw->io + phy + KS884X_PHY_PHY_CTRL_OFFSET); in hw_r_phy_polarity()
2943 static inline void hw_w_phy_polarity(struct ksz_hw *hw, int phy, u16 data) in hw_w_phy_polarity() argument
2945 writew(data, hw->io + phy + KS884X_PHY_PHY_CTRL_OFFSET); in hw_w_phy_polarity()
2948 static inline void hw_r_phy_link_md(struct ksz_hw *hw, int phy, u16 *data) in hw_r_phy_link_md() argument
2950 *data = readw(hw->io + phy + KS884X_PHY_LINK_MD_OFFSET); in hw_r_phy_link_md()
2953 static inline void hw_w_phy_link_md(struct ksz_hw *hw, int phy, u16 data) in hw_w_phy_link_md() argument
2955 writew(data, hw->io + phy + KS884X_PHY_LINK_MD_OFFSET); in hw_w_phy_link_md()
2959 * hw_r_phy - read data from PHY register
2962 * @reg: PHY register to read.
2965 * This routine reads data from the PHY register.
2969 int phy; in hw_r_phy() local
2971 phy = KS884X_PHY_1_CTRL_OFFSET + port * PHY_CTRL_INTERVAL + reg; in hw_r_phy()
2972 *val = readw(hw->io + phy); in hw_r_phy()
2976 * port_w_phy - write data to PHY register
2979 * @reg: PHY register to write.
2982 * This routine writes data to the PHY register.
2986 int phy; in hw_w_phy() local
2988 phy = KS884X_PHY_1_CTRL_OFFSET + port * PHY_CTRL_INTERVAL + reg; in hw_w_phy()
2989 writew(val, hw->io + phy); in hw_w_phy()
3012 data = readw(hw->io + KS884X_EEPROM_CTRL_OFFSET); in drop_gpio()
3014 writew(data, hw->io + KS884X_EEPROM_CTRL_OFFSET); in drop_gpio()
3021 data = readw(hw->io + KS884X_EEPROM_CTRL_OFFSET); in raise_gpio()
3023 writew(data, hw->io + KS884X_EEPROM_CTRL_OFFSET); in raise_gpio()
3030 data = readw(hw->io + KS884X_EEPROM_CTRL_OFFSET); in state_gpio()
3047 for (i = 15; i >= 0; i--) { in spi_r()
3063 for (i = 15; i >= 0; i--) { in spi_w()
3079 for (i = 1; i >= 0; i--) { in spi_reg()
3086 for (i = 5; i >= 0; i--) { in spi_reg()
3105 * eeprom_read - read from AT93C46 EEPROM
3128 * eeprom_write - write to AT93C46 EEPROM
3158 } while (!state_gpio(hw, EEPROM_DATA_IN) && --timeout); in eeprom_write()
3175 } while (!state_gpio(hw, EEPROM_DATA_IN) && --timeout); in eeprom_write()
3193 switch (port->flow_ctrl) { in advertised_flow_ctrl()
3211 rx_cfg = hw->rx_cfg; in set_flow_ctrl()
3212 tx_cfg = hw->tx_cfg; in set_flow_ctrl()
3214 hw->rx_cfg |= DMA_RX_FLOW_ENABLE; in set_flow_ctrl()
3216 hw->rx_cfg &= ~DMA_RX_FLOW_ENABLE; in set_flow_ctrl()
3218 hw->tx_cfg |= DMA_TX_FLOW_ENABLE; in set_flow_ctrl()
3220 hw->tx_cfg &= ~DMA_TX_FLOW_ENABLE; in set_flow_ctrl()
3221 if (hw->enabled) { in set_flow_ctrl()
3222 if (rx_cfg != hw->rx_cfg) in set_flow_ctrl()
3223 writel(hw->rx_cfg, hw->io + KS_DMA_RX_CTRL); in set_flow_ctrl()
3224 if (tx_cfg != hw->tx_cfg) in set_flow_ctrl()
3225 writel(hw->tx_cfg, hw->io + KS_DMA_TX_CTRL); in set_flow_ctrl()
3235 if (hw->overrides & PAUSE_FLOW_CTRL) in determine_flow_ctrl()
3239 if (port->force_link) in determine_flow_ctrl()
3253 if (!hw->ksz_switch) in determine_flow_ctrl()
3260 if ((hw->features & HALF_DUPLEX_SIGNAL_BUG) && in port_cfg_change()
3261 !(hw->overrides & PAUSE_FLOW_CTRL)) { in port_cfg_change()
3262 u32 cfg = hw->tx_cfg; in port_cfg_change()
3265 if (1 == info->duplex) in port_cfg_change()
3266 hw->tx_cfg &= ~DMA_TX_FLOW_ENABLE; in port_cfg_change()
3267 if (hw->enabled && cfg != hw->tx_cfg) in port_cfg_change()
3268 writel(hw->tx_cfg, hw->io + KS_DMA_TX_CTRL); in port_cfg_change()
3273 * port_get_link_speed - get current link status
3276 * This routine reads PHY registers to determine the current link status of the
3284 struct ksz_hw *hw = port->hw; in port_get_link_speed()
3295 for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) { in port_get_link_speed()
3296 info = &hw->port_info[p]; in port_get_link_speed()
3309 if (local == info->advertised && remote == info->partner) in port_get_link_speed()
3312 info->advertised = local; in port_get_link_speed()
3313 info->partner = remote; in port_get_link_speed()
3320 info->tx_rate = 10 * TX_RATE_UNIT; in port_get_link_speed()
3322 info->tx_rate = 100 * TX_RATE_UNIT; in port_get_link_speed()
3324 info->duplex = 1; in port_get_link_speed()
3326 info->duplex = 2; in port_get_link_speed()
3328 if (media_connected != info->state) { in port_get_link_speed()
3334 if (hw->ksz_switch) { in port_get_link_speed()
3336 (1 == info->duplex)); in port_get_link_speed()
3341 info->state = media_connected; in port_get_link_speed()
3343 if (media_disconnected != info->state) { in port_get_link_speed()
3347 hw->port_mib[p].link_down = 1; in port_get_link_speed()
3349 info->state = media_disconnected; in port_get_link_speed()
3351 hw->port_mib[p].state = (u8) info->state; in port_get_link_speed()
3354 if (linked && media_disconnected == port->linked->state) in port_get_link_speed()
3355 port->linked = linked; in port_get_link_speed()
3363 * port_set_link_speed - set port speed
3370 struct ksz_hw *hw = port->hw; in port_set_link_speed()
3377 for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) { in port_set_link_speed()
3392 if (port->speed || port->duplex) { in port_set_link_speed()
3393 if (10 == port->speed) in port_set_link_speed()
3396 else if (100 == port->speed) in port_set_link_speed()
3399 if (1 == port->duplex) in port_set_link_speed()
3402 else if (2 == port->duplex) in port_set_link_speed()
3414 * port_force_link_speed - force port speed
3421 struct ksz_hw *hw = port->hw; in port_force_link_speed()
3424 int phy; in port_force_link_speed() local
3427 for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) { in port_force_link_speed()
3428 phy = KS884X_PHY_1_CTRL_OFFSET + p * PHY_CTRL_INTERVAL; in port_force_link_speed()
3429 hw_r_phy_ctrl(hw, phy, &data); in port_force_link_speed()
3433 if (10 == port->speed) in port_force_link_speed()
3435 else if (100 == port->speed) in port_force_link_speed()
3437 if (1 == port->duplex) in port_force_link_speed()
3439 else if (2 == port->duplex) in port_force_link_speed()
3441 hw_w_phy_ctrl(hw, phy, data); in port_force_link_speed()
3447 struct ksz_hw *hw = port->hw; in port_set_power_saving()
3451 for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) in port_set_power_saving()
3461 * hw_chk_wol_pme_status - check PMEN pin
3471 struct pci_dev *pdev = hw_priv->pdev; in hw_chk_wol_pme_status()
3474 if (!pdev->pm_cap) in hw_chk_wol_pme_status()
3476 pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &data); in hw_chk_wol_pme_status()
3481 * hw_clr_wol_pme_status - clear PMEN pin
3489 struct pci_dev *pdev = hw_priv->pdev; in hw_clr_wol_pme_status()
3492 if (!pdev->pm_cap) in hw_clr_wol_pme_status()
3496 pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &data); in hw_clr_wol_pme_status()
3498 pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, data); in hw_clr_wol_pme_status()
3502 * hw_cfg_wol_pme - enable or disable Wake-on-LAN
3506 * This routine is used to enable or disable Wake-on-LAN.
3511 struct pci_dev *pdev = hw_priv->pdev; in hw_cfg_wol_pme()
3514 if (!pdev->pm_cap) in hw_cfg_wol_pme()
3516 pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &data); in hw_cfg_wol_pme()
3522 pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, data); in hw_cfg_wol_pme()
3526 * hw_cfg_wol - configure Wake-on-LAN features
3531 * This routine is used to enable or disable certain Wake-on-LAN features.
3537 data = readw(hw->io + KS8841_WOL_CTRL_OFFSET); in hw_cfg_wol()
3542 writew(data, hw->io + KS8841_WOL_CTRL_OFFSET); in hw_cfg_wol()
3546 * hw_set_wol_frame - program Wake-on-LAN pattern
3554 * This routine is used to program Wake-on-LAN pattern.
3573 writel(0, hw->io + KS8841_WOL_FRAME_BYTE0_OFFSET + i); in hw_set_wol_frame()
3574 writel(0, hw->io + KS8841_WOL_FRAME_BYTE2_OFFSET + i); in hw_set_wol_frame()
3583 --bits; in hw_set_wol_frame()
3586 writeb(val, hw->io + KS8841_WOL_FRAME_BYTE0_OFFSET + i in hw_set_wol_frame()
3596 bits = mask[len - 1]; in hw_set_wol_frame()
3599 writeb(bits, hw->io + KS8841_WOL_FRAME_BYTE0_OFFSET + i + len - in hw_set_wol_frame()
3603 writel(crc, hw->io + KS8841_WOL_FRAME_CRC_OFFSET + i); in hw_set_wol_frame()
3607 * hw_add_wol_arp - add ARP pattern
3611 * This routine is used to add ARP pattern for waking up the host.
3631 * hw_add_wol_bcast - add broadcast pattern
3634 * This routine is used to add broadcast pattern for waking up the host.
3645 * hw_add_wol_mcast - add multicast pattern
3648 * This routine is used to add multicast pattern for waking up the host.
3652 * multicast hash table, so not all multicast packets can wake up the host.
3659 memcpy(&pattern[3], &hw->override_addr[3], 3); in hw_add_wol_mcast()
3664 * hw_add_wol_ucast - add unicast pattern
3676 hw_set_wol_frame(hw, 0, 1, mask, ETH_ALEN, hw->override_addr); in hw_add_wol_ucast()
3680 * hw_enable_wol - enable Wake-on-LAN
3682 * @wol_enable: The Wake-on-LAN settings.
3685 * This routine is used to enable Wake-on-LAN depending on driver settings.
3700 * hw_init - check driver is correct for the hardware
3703 * This function checks the hardware is correct for this driver and sets the
3704 * hardware up for proper initialization.
3715 writew(BUS_SPEED_125_MHZ, hw->io + KS884X_BUS_CTRL_OFFSET); in hw_init()
3718 data = readw(hw->io + KS884X_CHIP_ID_OFFSET); in hw_init()
3731 hw->features |= SMALL_PACKET_TX_BUG; in hw_init()
3733 hw->features |= HALF_DUPLEX_SIGNAL_BUG; in hw_init()
3739 * hw_reset - reset the hardware
3746 writew(GLOBAL_SOFTWARE_RESET, hw->io + KS884X_GLOBAL_CTRL_OFFSET); in hw_reset()
3748 /* Wait for device to reset. */ in hw_reset()
3752 writew(0, hw->io + KS884X_GLOBAL_CTRL_OFFSET); in hw_reset()
3756 * hw_setup - setup the hardware
3759 * This routine setup the hardware for proper operation.
3767 data = readw(hw->io + KS8842_SWITCH_CTRL_5_OFFSET); in hw_setup()
3770 writew(data, hw->io + KS8842_SWITCH_CTRL_5_OFFSET); in hw_setup()
3774 hw->tx_cfg = (DMA_TX_PAD_ENABLE | DMA_TX_CRC_ENABLE | in hw_setup()
3778 hw->rx_cfg = (DMA_RX_BROADCAST | DMA_RX_UNICAST | in hw_setup()
3780 hw->rx_cfg |= KS884X_DMA_RX_MULTICAST; in hw_setup()
3783 hw->rx_cfg |= (DMA_RX_CSUM_TCP | DMA_RX_CSUM_IP); in hw_setup()
3785 if (hw->all_multi) in hw_setup()
3786 hw->rx_cfg |= DMA_RX_ALL_MULTICAST; in hw_setup()
3787 if (hw->promiscuous) in hw_setup()
3788 hw->rx_cfg |= DMA_RX_PROMISCUOUS; in hw_setup()
3792 * hw_setup_intr - setup interrupt mask
3795 * This routine setup the interrupt mask for proper operation.
3799 hw->intr_mask = KS884X_INT_MASK | KS884X_INT_RX_OVERRUN; in hw_setup_intr()
3806 int alloc = info->alloc; in ksz_check_desc_num()
3823 info->alloc = alloc; in ksz_check_desc_num()
3825 info->mask = info->alloc - 1; in ksz_check_desc_num()
3831 u32 phys = desc_info->ring_phys; in hw_init_desc()
3832 struct ksz_hw_desc *desc = desc_info->ring_virt; in hw_init_desc()
3833 struct ksz_desc *cur = desc_info->ring; in hw_init_desc()
3836 for (i = 0; i < desc_info->alloc; i++) { in hw_init_desc()
3837 cur->phw = desc++; in hw_init_desc()
3838 phys += desc_info->size; in hw_init_desc()
3840 previous->phw->next = cpu_to_le32(phys); in hw_init_desc()
3842 previous->phw->next = cpu_to_le32(desc_info->ring_phys); in hw_init_desc()
3843 previous->sw.buf.rx.end_of_ring = 1; in hw_init_desc()
3844 previous->phw->buf.data = cpu_to_le32(previous->sw.buf.data); in hw_init_desc()
3846 desc_info->avail = desc_info->alloc; in hw_init_desc()
3847 desc_info->last = desc_info->next = 0; in hw_init_desc()
3849 desc_info->cur = desc_info->ring; in hw_init_desc()
3853 * hw_set_desc_base - set descriptor base addresses
3863 writel(tx_addr, hw->io + KS_DMA_TX_ADDR); in hw_set_desc_base()
3864 writel(rx_addr, hw->io + KS_DMA_RX_ADDR); in hw_set_desc_base()
3869 info->cur = info->ring; in hw_reset_pkts()
3870 info->avail = info->alloc; in hw_reset_pkts()
3871 info->last = info->next = 0; in hw_reset_pkts()
3876 writel(DMA_START, hw->io + KS_DMA_RX_START); in hw_resume_rx()
3880 * hw_start_rx - start receiving
3887 writel(hw->rx_cfg, hw->io + KS_DMA_RX_CTRL); in hw_start_rx()
3890 hw->intr_mask |= KS884X_INT_RX_STOPPED; in hw_start_rx()
3892 writel(DMA_START, hw->io + KS_DMA_RX_START); in hw_start_rx()
3894 hw->rx_stop++; in hw_start_rx()
3897 if (0 == hw->rx_stop) in hw_start_rx()
3898 hw->rx_stop = 2; in hw_start_rx()
3902 * hw_stop_rx - stop receiving
3909 hw->rx_stop = 0; in hw_stop_rx()
3911 writel((hw->rx_cfg & ~DMA_RX_ENABLE), hw->io + KS_DMA_RX_CTRL); in hw_stop_rx()
3915 * hw_start_tx - start transmitting
3922 writel(hw->tx_cfg, hw->io + KS_DMA_TX_CTRL); in hw_start_tx()
3926 * hw_stop_tx - stop transmitting
3933 writel((hw->tx_cfg & ~DMA_TX_ENABLE), hw->io + KS_DMA_TX_CTRL); in hw_stop_tx()
3937 * hw_disable - disable hardware
3946 hw->enabled = 0; in hw_disable()
3950 * hw_enable - enable hardware
3959 hw->enabled = 1; in hw_enable()
3963 * hw_alloc_pkt - allocate enough descriptors for transmission
3968 * This function allocates descriptors for transmission.
3970 * Return 0 if not successful; 1 for buffer copy; or number of descriptors.
3975 if (hw->tx_desc_info.avail <= 1) in hw_alloc_pkt()
3978 /* Allocate a descriptor for transmission and mark it current. */ in hw_alloc_pkt()
3979 get_tx_pkt(&hw->tx_desc_info, &hw->tx_desc_info.cur); in hw_alloc_pkt()
3980 hw->tx_desc_info.cur->sw.buf.tx.first_seg = 1; in hw_alloc_pkt()
3983 ++hw->tx_int_cnt; in hw_alloc_pkt()
3984 hw->tx_size += length; in hw_alloc_pkt()
3987 if (hw->tx_size >= MAX_TX_HELD_SIZE) in hw_alloc_pkt()
3988 hw->tx_int_cnt = hw->tx_int_mask + 1; in hw_alloc_pkt()
3990 if (physical > hw->tx_desc_info.avail) in hw_alloc_pkt()
3993 return hw->tx_desc_info.avail; in hw_alloc_pkt()
3997 * hw_send_pkt - mark packet for transmission
4000 * This routine marks the packet for transmission in PCI version.
4004 struct ksz_desc *cur = hw->tx_desc_info.cur; in hw_send_pkt()
4006 cur->sw.buf.tx.last_seg = 1; in hw_send_pkt()
4009 if (hw->tx_int_cnt > hw->tx_int_mask) { in hw_send_pkt()
4010 cur->sw.buf.tx.intr = 1; in hw_send_pkt()
4011 hw->tx_int_cnt = 0; in hw_send_pkt()
4012 hw->tx_size = 0; in hw_send_pkt()
4016 cur->sw.buf.tx.dest_port = hw->dst_ports; in hw_send_pkt()
4020 writel(0, hw->io + KS_DMA_TX_START); in hw_send_pkt()
4032 * hw_set_addr - set MAC address
4042 for (i = 0; i < ETH_ALEN; i++) in hw_set_addr()
4043 writeb(hw->override_addr[MAC_ADDR_ORDER(i)], in hw_set_addr()
4044 hw->io + KS884X_ADDR_0_OFFSET + i); in hw_set_addr()
4046 sw_set_addr(hw, hw->override_addr); in hw_set_addr()
4050 * hw_read_addr - read MAC address
4059 for (i = 0; i < ETH_ALEN; i++) in hw_read_addr()
4060 hw->perm_addr[MAC_ADDR_ORDER(i)] = readb(hw->io + in hw_read_addr()
4063 if (!hw->mac_override) { in hw_read_addr()
4064 memcpy(hw->override_addr, hw->perm_addr, ETH_ALEN); in hw_read_addr()
4065 if (empty_addr(hw->override_addr)) { in hw_read_addr()
4066 memcpy(hw->perm_addr, DEFAULT_MAC_ADDRESS, ETH_ALEN); in hw_read_addr()
4067 memcpy(hw->override_addr, DEFAULT_MAC_ADDRESS, in hw_read_addr()
4069 hw->override_addr[5] += hw->id; in hw_read_addr()
4082 for (i = 0; i < 2; i++) { in hw_ena_add_addr()
4088 for (i = 2; i < 6; i++) { in hw_ena_add_addr()
4094 writel(mac_addr_lo, hw->io + index + KS_ADD_ADDR_0_LO); in hw_ena_add_addr()
4095 writel(mac_addr_hi, hw->io + index + KS_ADD_ADDR_0_HI); in hw_ena_add_addr()
4102 for (i = 0; i < ADDITIONAL_ENTRIES; i++) { in hw_set_add_addr()
4103 if (empty_addr(hw->address[i])) in hw_set_add_addr()
4104 writel(0, hw->io + ADD_ADDR_INCR * i + in hw_set_add_addr()
4107 hw_ena_add_addr(hw, i, hw->address[i]); in hw_set_add_addr()
4116 if (ether_addr_equal(hw->override_addr, mac_addr)) in hw_add_addr()
4118 for (i = 0; i < hw->addr_list_size; i++) { in hw_add_addr()
4119 if (ether_addr_equal(hw->address[i], mac_addr)) in hw_add_addr()
4121 if (ADDITIONAL_ENTRIES == j && empty_addr(hw->address[i])) in hw_add_addr()
4125 memcpy(hw->address[j], mac_addr, ETH_ALEN); in hw_add_addr()
4126 hw_ena_add_addr(hw, j, hw->address[j]); in hw_add_addr()
4129 return -1; in hw_add_addr()
4136 for (i = 0; i < hw->addr_list_size; i++) { in hw_del_addr()
4137 if (ether_addr_equal(hw->address[i], mac_addr)) { in hw_del_addr()
4138 eth_zero_addr(hw->address[i]); in hw_del_addr()
4139 writel(0, hw->io + ADD_ADDR_INCR * i + in hw_del_addr()
4144 return -1; in hw_del_addr()
4148 * hw_clr_multicast - clear multicast addresses
4157 for (i = 0; i < HW_MULTICAST_SIZE; i++) { in hw_clr_multicast()
4158 hw->multi_bits[i] = 0; in hw_clr_multicast()
4160 writeb(0, hw->io + KS884X_MULTICAST_0_OFFSET + i); in hw_clr_multicast()
4165 * hw_set_grp_addr - set multicast addresses
4168 * This routine programs multicast addresses for the hardware to accept those
4178 memset(hw->multi_bits, 0, sizeof(u8) * HW_MULTICAST_SIZE); in hw_set_grp_addr()
4180 for (i = 0; i < hw->multi_list_size; i++) { in hw_set_grp_addr()
4181 position = (ether_crc(6, hw->multi_list[i]) >> 26) & 0x3f; in hw_set_grp_addr()
4184 hw->multi_bits[index] |= (u8) value; in hw_set_grp_addr()
4187 for (i = 0; i < HW_MULTICAST_SIZE; i++) in hw_set_grp_addr()
4188 writeb(hw->multi_bits[i], hw->io + KS884X_MULTICAST_0_OFFSET + in hw_set_grp_addr()
4193 * hw_set_multicast - enable or disable all multicast receiving
4201 /* Stop receiving for reconfiguration. */ in hw_set_multicast()
4205 hw->rx_cfg |= DMA_RX_ALL_MULTICAST; in hw_set_multicast()
4207 hw->rx_cfg &= ~DMA_RX_ALL_MULTICAST; in hw_set_multicast()
4209 if (hw->enabled) in hw_set_multicast()
4214 * hw_set_promiscuous - enable or disable promiscuous receiving
4222 /* Stop receiving for reconfiguration. */ in hw_set_promiscuous()
4226 hw->rx_cfg |= DMA_RX_PROMISCUOUS; in hw_set_promiscuous()
4228 hw->rx_cfg &= ~DMA_RX_PROMISCUOUS; in hw_set_promiscuous()
4230 if (hw->enabled) in hw_set_promiscuous()
4235 * sw_enable - enable the switch
4245 for (port = 0; port < SWITCH_PORT_NUM; port++) { in sw_enable()
4246 if (hw->dev_count > 1) { in sw_enable()
4247 /* Set port-base vlan membership with host port. */ in sw_enable()
4256 if (hw->dev_count > 1) in sw_enable()
4263 writew(enable, hw->io + KS884X_CHIP_ID_OFFSET); in sw_enable()
4267 * sw_setup - setup the switch
4270 * This routine setup the hardware switch engine for default operation.
4281 for (port = 0; port < SWITCH_PORT_NUM; port++) in sw_setup()
4292 if (hw->features & STP_SUPPORT) in sw_setup()
4296 hw->overrides |= PAUSE_FLOW_CTRL; in sw_setup()
4301 * ksz_start_timer - start kernel timer
4309 info->cnt = 0; in ksz_start_timer()
4310 info->timer.expires = jiffies + time; in ksz_start_timer()
4311 add_timer(&info->timer); in ksz_start_timer()
4314 info->max = -1; in ksz_start_timer()
4318 * ksz_stop_timer - stop kernel timer
4325 if (info->max) { in ksz_stop_timer()
4326 info->max = 0; in ksz_stop_timer()
4327 del_timer_sync(&info->timer); in ksz_stop_timer()
4334 info->max = 0; in ksz_init_timer()
4335 info->period = period; in ksz_init_timer()
4336 timer_setup(&info->timer, function, 0); in ksz_init_timer()
4341 ++info->cnt; in ksz_update_timer()
4342 if (info->max > 0) { in ksz_update_timer()
4343 if (info->cnt < info->max) { in ksz_update_timer()
4344 info->timer.expires = jiffies + info->period; in ksz_update_timer()
4345 add_timer(&info->timer); in ksz_update_timer()
4347 info->max = 0; in ksz_update_timer()
4348 } else if (info->max < 0) { in ksz_update_timer()
4349 info->timer.expires = jiffies + info->period; in ksz_update_timer()
4350 add_timer(&info->timer); in ksz_update_timer()
4355 * ksz_alloc_soft_desc - allocate software descriptors
4357 * @transmit: Indication that descriptors are for transmit.
4359 * This local function allocates software descriptors for manipulation in
4366 desc_info->ring = kcalloc(desc_info->alloc, sizeof(struct ksz_desc), in ksz_alloc_soft_desc()
4368 if (!desc_info->ring) in ksz_alloc_soft_desc()
4375 * ksz_alloc_desc - allocate hardware descriptors
4378 * This local function allocates hardware descriptors for receiving and
4385 struct ksz_hw *hw = &adapter->hw; in ksz_alloc_desc()
4388 /* Allocate memory for RX & TX descriptors. */ in ksz_alloc_desc()
4389 adapter->desc_pool.alloc_size = in ksz_alloc_desc()
4390 hw->rx_desc_info.size * hw->rx_desc_info.alloc + in ksz_alloc_desc()
4391 hw->tx_desc_info.size * hw->tx_desc_info.alloc + in ksz_alloc_desc()
4394 adapter->desc_pool.alloc_virt = in ksz_alloc_desc()
4395 dma_alloc_coherent(&adapter->pdev->dev, in ksz_alloc_desc()
4396 adapter->desc_pool.alloc_size, in ksz_alloc_desc()
4397 &adapter->desc_pool.dma_addr, GFP_KERNEL); in ksz_alloc_desc()
4398 if (adapter->desc_pool.alloc_virt == NULL) { in ksz_alloc_desc()
4399 adapter->desc_pool.alloc_size = 0; in ksz_alloc_desc()
4404 offset = (((ulong) adapter->desc_pool.alloc_virt % DESC_ALIGNMENT) ? in ksz_alloc_desc()
4405 (DESC_ALIGNMENT - in ksz_alloc_desc()
4406 ((ulong) adapter->desc_pool.alloc_virt % DESC_ALIGNMENT)) : 0); in ksz_alloc_desc()
4407 adapter->desc_pool.virt = adapter->desc_pool.alloc_virt + offset; in ksz_alloc_desc()
4408 adapter->desc_pool.phys = adapter->desc_pool.dma_addr + offset; in ksz_alloc_desc()
4411 hw->rx_desc_info.ring_virt = (struct ksz_hw_desc *) in ksz_alloc_desc()
4412 adapter->desc_pool.virt; in ksz_alloc_desc()
4413 hw->rx_desc_info.ring_phys = adapter->desc_pool.phys; in ksz_alloc_desc()
4414 offset = hw->rx_desc_info.alloc * hw->rx_desc_info.size; in ksz_alloc_desc()
4415 hw->tx_desc_info.ring_virt = (struct ksz_hw_desc *) in ksz_alloc_desc()
4416 (adapter->desc_pool.virt + offset); in ksz_alloc_desc()
4417 hw->tx_desc_info.ring_phys = adapter->desc_pool.phys + offset; in ksz_alloc_desc()
4419 if (ksz_alloc_soft_desc(&hw->rx_desc_info, 0)) in ksz_alloc_desc()
4421 if (ksz_alloc_soft_desc(&hw->tx_desc_info, 1)) in ksz_alloc_desc()
4428 * free_dma_buf - release DMA buffer resources
4438 dma_unmap_single(&adapter->pdev->dev, dma_buf->dma, dma_buf->len, in free_dma_buf()
4440 dev_kfree_skb(dma_buf->skb); in free_dma_buf()
4441 dma_buf->skb = NULL; in free_dma_buf()
4442 dma_buf->dma = 0; in free_dma_buf()
4446 * ksz_init_rx_buffers - initialize receive descriptors
4449 * This routine initializes DMA buffers for receiving.
4456 struct ksz_hw *hw = &adapter->hw; in ksz_init_rx_buffers()
4457 struct ksz_desc_info *info = &hw->rx_desc_info; in ksz_init_rx_buffers()
4459 for (i = 0; i < hw->rx_desc_info.alloc; i++) { in ksz_init_rx_buffers()
4463 if (dma_buf->skb && dma_buf->len != adapter->mtu) in ksz_init_rx_buffers()
4465 dma_buf->len = adapter->mtu; in ksz_init_rx_buffers()
4466 if (!dma_buf->skb) in ksz_init_rx_buffers()
4467 dma_buf->skb = alloc_skb(dma_buf->len, GFP_ATOMIC); in ksz_init_rx_buffers()
4468 if (dma_buf->skb && !dma_buf->dma) in ksz_init_rx_buffers()
4469 dma_buf->dma = dma_map_single(&adapter->pdev->dev, in ksz_init_rx_buffers()
4470 skb_tail_pointer(dma_buf->skb), in ksz_init_rx_buffers()
4471 dma_buf->len, in ksz_init_rx_buffers()
4475 set_rx_buf(desc, dma_buf->dma); in ksz_init_rx_buffers()
4476 set_rx_len(desc, dma_buf->len); in ksz_init_rx_buffers()
4482 * ksz_alloc_mem - allocate memory for hardware descriptors
4485 * This function allocates memory for use by hardware descriptors for receiving
4492 struct ksz_hw *hw = &adapter->hw; in ksz_alloc_mem()
4495 hw->rx_desc_info.alloc = NUM_OF_RX_DESC; in ksz_alloc_mem()
4496 hw->tx_desc_info.alloc = NUM_OF_TX_DESC; in ksz_alloc_mem()
4499 hw->tx_int_cnt = 0; in ksz_alloc_mem()
4500 hw->tx_int_mask = NUM_OF_TX_DESC / 4; in ksz_alloc_mem()
4501 if (hw->tx_int_mask > 8) in ksz_alloc_mem()
4502 hw->tx_int_mask = 8; in ksz_alloc_mem()
4503 while (hw->tx_int_mask) { in ksz_alloc_mem()
4504 hw->tx_int_cnt++; in ksz_alloc_mem()
4505 hw->tx_int_mask >>= 1; in ksz_alloc_mem()
4507 if (hw->tx_int_cnt) { in ksz_alloc_mem()
4508 hw->tx_int_mask = (1 << (hw->tx_int_cnt - 1)) - 1; in ksz_alloc_mem()
4509 hw->tx_int_cnt = 0; in ksz_alloc_mem()
4513 hw->rx_desc_info.size = in ksz_alloc_mem()
4514 (((sizeof(struct ksz_hw_desc) + DESC_ALIGNMENT - 1) / in ksz_alloc_mem()
4516 hw->tx_desc_info.size = in ksz_alloc_mem()
4517 (((sizeof(struct ksz_hw_desc) + DESC_ALIGNMENT - 1) / in ksz_alloc_mem()
4519 if (hw->rx_desc_info.size != sizeof(struct ksz_hw_desc)) in ksz_alloc_mem()
4521 ksz_check_desc_num(&hw->rx_desc_info); in ksz_alloc_mem()
4522 ksz_check_desc_num(&hw->tx_desc_info); in ksz_alloc_mem()
4532 * ksz_free_desc - free software and hardware descriptors
4540 struct ksz_hw *hw = &adapter->hw; in ksz_free_desc()
4543 hw->rx_desc_info.ring_virt = NULL; in ksz_free_desc()
4544 hw->tx_desc_info.ring_virt = NULL; in ksz_free_desc()
4545 hw->rx_desc_info.ring_phys = 0; in ksz_free_desc()
4546 hw->tx_desc_info.ring_phys = 0; in ksz_free_desc()
4549 if (adapter->desc_pool.alloc_virt) in ksz_free_desc()
4550 dma_free_coherent(&adapter->pdev->dev, in ksz_free_desc()
4551 adapter->desc_pool.alloc_size, in ksz_free_desc()
4552 adapter->desc_pool.alloc_virt, in ksz_free_desc()
4553 adapter->desc_pool.dma_addr); in ksz_free_desc()
4556 adapter->desc_pool.alloc_size = 0; in ksz_free_desc()
4557 adapter->desc_pool.alloc_virt = NULL; in ksz_free_desc()
4559 kfree(hw->rx_desc_info.ring); in ksz_free_desc()
4560 hw->rx_desc_info.ring = NULL; in ksz_free_desc()
4561 kfree(hw->tx_desc_info.ring); in ksz_free_desc()
4562 hw->tx_desc_info.ring = NULL; in ksz_free_desc()
4566 * ksz_free_buffers - free buffers used in the descriptors
4578 struct ksz_desc *desc = desc_info->ring; in ksz_free_buffers()
4580 for (i = 0; i < desc_info->alloc; i++) { in ksz_free_buffers()
4582 if (dma_buf->skb) in ksz_free_buffers()
4589 * ksz_free_mem - free all resources used by descriptors
4597 ksz_free_buffers(adapter, &adapter->hw.tx_desc_info, DMA_TO_DEVICE); in ksz_free_mem()
4600 ksz_free_buffers(adapter, &adapter->hw.rx_desc_info, DMA_FROM_DEVICE); in ksz_free_mem()
4615 for (i = 0, port = first; i < cnt; i++, port++) { in get_mib_counters()
4616 port_mib = &hw->port_mib[port]; in get_mib_counters()
4617 for (mib = port_mib->mib_start; mib < hw->mib_cnt; mib++) in get_mib_counters()
4618 counter[mib] += port_mib->counter[mib]; in get_mib_counters()
4623 * send_packet - send packet
4634 struct dev_info *hw_priv = priv->adapter; in send_packet()
4635 struct ksz_hw *hw = &hw_priv->hw; in send_packet()
4636 struct ksz_desc_info *info = &hw->tx_desc_info; in send_packet()
4639 int last_frag = skb_shinfo(skb)->nr_frags; in send_packet()
4645 if (hw->dev_count > 1) in send_packet()
4646 hw->dst_ports = 1 << priv->port.first_port; in send_packet()
4649 len = skb->len; in send_packet()
4652 first = info->cur; in send_packet()
4660 dma_buf->len = skb_headlen(skb); in send_packet()
4662 dma_buf->dma = dma_map_single(&hw_priv->pdev->dev, skb->data, in send_packet()
4663 dma_buf->len, DMA_TO_DEVICE); in send_packet()
4664 set_tx_buf(desc, dma_buf->dma); in send_packet()
4665 set_tx_len(desc, dma_buf->len); in send_packet()
4669 this_frag = &skb_shinfo(skb)->frags[frag]; in send_packet()
4675 ++hw->tx_int_cnt; in send_packet()
4678 dma_buf->len = skb_frag_size(this_frag); in send_packet()
4680 dma_buf->dma = dma_map_single(&hw_priv->pdev->dev, in send_packet()
4682 dma_buf->len, in send_packet()
4684 set_tx_buf(desc, dma_buf->dma); in send_packet()
4685 set_tx_len(desc, dma_buf->len); in send_packet()
4696 info->cur = desc; in send_packet()
4701 dma_buf->len = len; in send_packet()
4703 dma_buf->dma = dma_map_single(&hw_priv->pdev->dev, skb->data, in send_packet()
4704 dma_buf->len, DMA_TO_DEVICE); in send_packet()
4705 set_tx_buf(desc, dma_buf->dma); in send_packet()
4706 set_tx_len(desc, dma_buf->len); in send_packet()
4709 if (skb->ip_summed == CHECKSUM_PARTIAL) { in send_packet()
4710 (desc)->sw.buf.tx.csum_gen_tcp = 1; in send_packet()
4711 (desc)->sw.buf.tx.csum_gen_udp = 1; in send_packet()
4718 dma_buf->skb = skb; in send_packet()
4723 dev->stats.tx_packets++; in send_packet()
4724 dev->stats.tx_bytes += len; in send_packet()
4728 * transmit_cleanup - clean up transmit descriptors
4738 struct ksz_hw *hw = &hw_priv->hw; in transmit_cleanup()
4739 struct ksz_desc_info *info = &hw->tx_desc_info; in transmit_cleanup()
4744 spin_lock_irq(&hw_priv->hwlock); in transmit_cleanup()
4745 last = info->last; in transmit_cleanup()
4747 while (info->avail < info->alloc) { in transmit_cleanup()
4749 desc = &info->ring[last]; in transmit_cleanup()
4750 status.data = le32_to_cpu(desc->phw->ctrl.data); in transmit_cleanup()
4759 dma_unmap_single(&hw_priv->pdev->dev, dma_buf->dma, in transmit_cleanup()
4760 dma_buf->len, DMA_TO_DEVICE); in transmit_cleanup()
4763 if (dma_buf->skb) { in transmit_cleanup()
4764 dev = dma_buf->skb->dev; in transmit_cleanup()
4767 dev_kfree_skb_irq(dma_buf->skb); in transmit_cleanup()
4768 dma_buf->skb = NULL; in transmit_cleanup()
4773 last &= info->mask; in transmit_cleanup()
4774 info->avail++; in transmit_cleanup()
4776 info->last = last; in transmit_cleanup()
4777 spin_unlock_irq(&hw_priv->hwlock); in transmit_cleanup()
4785 * transmit_done - transmit done processing
4793 struct ksz_hw *hw = &hw_priv->hw; in tx_done()
4798 for (port = 0; port < hw->dev_count; port++) { in tx_done()
4799 struct net_device *dev = hw->port_info[port].pdev; in tx_done()
4808 skb->dev = old->dev; in copy_old_skb()
4809 skb->protocol = old->protocol; in copy_old_skb()
4810 skb->ip_summed = old->ip_summed; in copy_old_skb()
4811 skb->csum = old->csum; in copy_old_skb()
4818 * netdev_tx - send out packet
4829 struct dev_info *hw_priv = priv->adapter; in netdev_tx()
4830 struct ksz_hw *hw = &hw_priv->hw; in netdev_tx()
4835 if (hw->features & SMALL_PACKET_TX_BUG) { in netdev_tx()
4838 if (skb->len <= 48) { in netdev_tx()
4839 if (skb_end_pointer(skb) - skb->data >= 50) { in netdev_tx()
4840 memset(&skb->data[skb->len], 0, 50 - skb->len); in netdev_tx()
4841 skb->len = 50; in netdev_tx()
4846 memcpy(skb->data, org_skb->data, org_skb->len); in netdev_tx()
4847 memset(&skb->data[org_skb->len], 0, in netdev_tx()
4848 50 - org_skb->len); in netdev_tx()
4849 skb->len = 50; in netdev_tx()
4855 spin_lock_irq(&hw_priv->hwlock); in netdev_tx()
4857 num = skb_shinfo(skb)->nr_frags + 1; in netdev_tx()
4858 left = hw_alloc_pkt(hw, skb->len, num); in netdev_tx()
4861 (CHECKSUM_PARTIAL == skb->ip_summed && in netdev_tx()
4862 skb->protocol == htons(ETH_P_IPV6))) { in netdev_tx()
4865 skb = netdev_alloc_skb(dev, org_skb->len); in netdev_tx()
4870 skb_copy_and_csum_dev(org_skb, skb->data); in netdev_tx()
4871 org_skb->ip_summed = CHECKSUM_NONE; in netdev_tx()
4872 skb->len = org_skb->len; in netdev_tx()
4884 spin_unlock_irq(&hw_priv->hwlock); in netdev_tx()
4890 * netdev_tx_timeout - transmit timeout processing
4904 struct dev_info *hw_priv = priv->adapter; in netdev_tx_timeout()
4905 struct ksz_hw *hw = &hw_priv->hw; in netdev_tx_timeout()
4908 if (hw->dev_count > 1) { in netdev_tx_timeout()
4913 if (time_before_eq(jiffies, last_reset + dev->watchdog_timeo)) in netdev_tx_timeout()
4923 hw_reset_pkts(&hw->rx_desc_info); in netdev_tx_timeout()
4924 hw_reset_pkts(&hw->tx_desc_info); in netdev_tx_timeout()
4930 hw->tx_desc_info.ring_phys, in netdev_tx_timeout()
4931 hw->rx_desc_info.ring_phys); in netdev_tx_timeout()
4933 if (hw->all_multi) in netdev_tx_timeout()
4934 hw_set_multicast(hw, hw->all_multi); in netdev_tx_timeout()
4935 else if (hw->multi_list_size) in netdev_tx_timeout()
4938 if (hw->dev_count > 1) { in netdev_tx_timeout()
4940 for (port = 0; port < SWITCH_PORT_NUM; port++) { in netdev_tx_timeout()
4946 port_dev = hw->port_info[port].pdev; in netdev_tx_timeout()
4966 protocol = skb->protocol; in csum_verified()
4970 protocol = iph->tot_len; in csum_verified()
4975 if (iph->protocol == IPPROTO_TCP) in csum_verified()
4976 skb->ip_summed = CHECKSUM_UNNECESSARY; in csum_verified()
4985 struct dev_info *hw_priv = priv->adapter; in rx_proc()
4989 /* Received length includes 4-byte CRC. */ in rx_proc()
4990 packet_len = status.rx.frame_len - 4; in rx_proc()
4993 dma_sync_single_for_cpu(&hw_priv->pdev->dev, dma_buf->dma, in rx_proc()
4997 /* skb->data != skb->head */ in rx_proc()
5000 dev->stats.rx_dropped++; in rx_proc()
5001 return -ENOMEM; in rx_proc()
5005 * Align socket buffer in 4-byte boundary for better in rx_proc()
5010 skb_put_data(skb, dma_buf->skb->data, packet_len); in rx_proc()
5013 skb->protocol = eth_type_trans(skb, dev); in rx_proc()
5015 if (hw->rx_cfg & (DMA_RX_CSUM_UDP | DMA_RX_CSUM_TCP)) in rx_proc()
5019 dev->stats.rx_packets++; in rx_proc()
5020 dev->stats.rx_bytes += packet_len; in rx_proc()
5022 /* Notify upper layer for received packet. */ in rx_proc()
5032 struct ksz_hw *hw = &hw_priv->hw; in dev_rcv_packets()
5033 struct net_device *dev = hw->port_info[0].pdev; in dev_rcv_packets()
5034 struct ksz_desc_info *info = &hw->rx_desc_info; in dev_rcv_packets()
5035 int left = info->alloc; in dev_rcv_packets()
5039 next = info->next; in dev_rcv_packets()
5040 while (left--) { in dev_rcv_packets()
5042 desc = &info->ring[next]; in dev_rcv_packets()
5043 status.data = le32_to_cpu(desc->phw->ctrl.data); in dev_rcv_packets()
5057 next &= info->mask; in dev_rcv_packets()
5059 info->next = next; in dev_rcv_packets()
5068 struct ksz_hw *hw = &hw_priv->hw; in port_rcv_packets()
5069 struct net_device *dev = hw->port_info[0].pdev; in port_rcv_packets()
5070 struct ksz_desc_info *info = &hw->rx_desc_info; in port_rcv_packets()
5071 int left = info->alloc; in port_rcv_packets()
5075 next = info->next; in port_rcv_packets()
5076 while (left--) { in port_rcv_packets()
5078 desc = &info->ring[next]; in port_rcv_packets()
5079 status.data = le32_to_cpu(desc->phw->ctrl.data); in port_rcv_packets()
5083 if (hw->dev_count > 1) { in port_rcv_packets()
5087 dev = hw->port_info[p].pdev; in port_rcv_packets()
5102 next &= info->mask; in port_rcv_packets()
5104 info->next = next; in port_rcv_packets()
5113 struct ksz_hw *hw = &hw_priv->hw; in dev_rcv_special()
5114 struct net_device *dev = hw->port_info[0].pdev; in dev_rcv_special()
5115 struct ksz_desc_info *info = &hw->rx_desc_info; in dev_rcv_special()
5116 int left = info->alloc; in dev_rcv_special()
5120 next = info->next; in dev_rcv_special()
5121 while (left--) { in dev_rcv_special()
5123 desc = &info->ring[next]; in dev_rcv_special()
5124 status.data = le32_to_cpu(desc->phw->ctrl.data); in dev_rcv_special()
5128 if (hw->dev_count > 1) { in dev_rcv_special()
5132 dev = hw->port_info[p].pdev; in dev_rcv_special()
5142 * dropped, so no need to check the error bit. in dev_rcv_special()
5154 priv->port.counter[OID_COUNTER_RCV_ERROR]++; in dev_rcv_special()
5161 next &= info->mask; in dev_rcv_special()
5163 info->next = next; in dev_rcv_special()
5171 struct ksz_hw *hw = &hw_priv->hw; in rx_proc_task()
5173 if (!hw->enabled) in rx_proc_task()
5175 if (unlikely(!hw_priv->dev_rcv(hw_priv))) { in rx_proc_task()
5181 spin_lock_irq(&hw_priv->hwlock); in rx_proc_task()
5183 spin_unlock_irq(&hw_priv->hwlock); in rx_proc_task()
5186 tasklet_schedule(&hw_priv->rx_tasklet); in rx_proc_task()
5193 struct ksz_hw *hw = &hw_priv->hw; in tx_proc_task()
5200 spin_lock_irq(&hw_priv->hwlock); in tx_proc_task()
5202 spin_unlock_irq(&hw_priv->hwlock); in tx_proc_task()
5208 if (0 == hw->rx_stop) in handle_rx_stop()
5209 hw->intr_mask &= ~KS884X_INT_RX_STOPPED; in handle_rx_stop()
5210 else if (hw->rx_stop > 1) { in handle_rx_stop()
5211 if (hw->enabled && (hw->rx_cfg & DMA_RX_ENABLE)) { in handle_rx_stop()
5214 hw->intr_mask &= ~KS884X_INT_RX_STOPPED; in handle_rx_stop()
5215 hw->rx_stop = 0; in handle_rx_stop()
5219 hw->rx_stop++; in handle_rx_stop()
5223 * netdev_intr - interrupt handling
5236 struct dev_info *hw_priv = priv->adapter; in netdev_intr()
5237 struct ksz_hw *hw = &hw_priv->hw; in netdev_intr()
5239 spin_lock(&hw_priv->hwlock); in netdev_intr()
5245 spin_unlock(&hw_priv->hwlock); in netdev_intr()
5251 int_enable &= hw->intr_mask; in netdev_intr()
5255 tasklet_schedule(&hw_priv->tx_tasklet); in netdev_intr()
5260 tasklet_schedule(&hw_priv->rx_tasklet); in netdev_intr()
5264 dev->stats.rx_fifo_errors++; in netdev_intr()
5269 struct ksz_port *port = &priv->port; in netdev_intr()
5271 hw->features |= LINK_INT_WORKING; in netdev_intr()
5283 hw->intr_mask &= ~KS884X_INT_TX_STOPPED; in netdev_intr()
5285 data = readl(hw->io + KS_DMA_TX_CTRL); in netdev_intr()
5294 spin_unlock(&hw_priv->hwlock); in netdev_intr()
5309 struct dev_info *hw_priv = priv->adapter; in netdev_netpoll()
5311 hw_dis_intr(&hw_priv->hw); in netdev_netpoll()
5312 netdev_intr(dev->irq, dev); in netdev_netpoll()
5320 struct ksz_switch *sw = hw->ksz_switch; in bridge_change()
5323 if (!sw->member) { in bridge_change()
5327 for (port = 0; port < SWITCH_PORT_NUM; port++) { in bridge_change()
5328 if (STP_STATE_FORWARDING == sw->port_cfg[port].stp_state) in bridge_change()
5329 member = HOST_MASK | sw->member; in bridge_change()
5332 if (member != sw->port_cfg[port].member) in bridge_change()
5338 * netdev_close - close network device
5349 struct dev_info *hw_priv = priv->adapter; in netdev_close()
5350 struct ksz_port *port = &priv->port; in netdev_close()
5351 struct ksz_hw *hw = &hw_priv->hw; in netdev_close()
5356 ksz_stop_timer(&priv->monitor_timer_info); in netdev_close()
5358 /* Need to shut the port manually in multiple device interfaces mode. */ in netdev_close()
5359 if (hw->dev_count > 1) { in netdev_close()
5360 port_set_stp_state(hw, port->first_port, STP_STATE_DISABLED); in netdev_close()
5362 /* Port is closed. Need to change bridge setting. */ in netdev_close()
5363 if (hw->features & STP_SUPPORT) { in netdev_close()
5364 pi = 1 << port->first_port; in netdev_close()
5365 if (hw->ksz_switch->member & pi) { in netdev_close()
5366 hw->ksz_switch->member &= ~pi; in netdev_close()
5371 if (port->first_port > 0) in netdev_close()
5372 hw_del_addr(hw, dev->dev_addr); in netdev_close()
5373 if (!hw_priv->wol_enable) in netdev_close()
5376 if (priv->multicast) in netdev_close()
5377 --hw->all_multi; in netdev_close()
5378 if (priv->promiscuous) in netdev_close()
5379 --hw->promiscuous; in netdev_close()
5381 hw_priv->opened--; in netdev_close()
5382 if (!(hw_priv->opened)) { in netdev_close()
5383 ksz_stop_timer(&hw_priv->mib_timer_info); in netdev_close()
5384 flush_work(&hw_priv->mib_read); in netdev_close()
5390 /* Delay for receive task to stop scheduling itself. */ in netdev_close()
5393 tasklet_kill(&hw_priv->rx_tasklet); in netdev_close()
5394 tasklet_kill(&hw_priv->tx_tasklet); in netdev_close()
5395 free_irq(dev->irq, hw_priv->dev); in netdev_close()
5398 hw_reset_pkts(&hw->rx_desc_info); in netdev_close()
5399 hw_reset_pkts(&hw->tx_desc_info); in netdev_close()
5402 if (hw->features & STP_SUPPORT) in netdev_close()
5411 if (hw->ksz_switch) { in hw_cfg_huge_frame()
5414 data = readw(hw->io + KS8842_SWITCH_CTRL_2_OFFSET); in hw_cfg_huge_frame()
5415 if (hw->features & RX_HUGE_FRAME) in hw_cfg_huge_frame()
5419 writew(data, hw->io + KS8842_SWITCH_CTRL_2_OFFSET); in hw_cfg_huge_frame()
5421 if (hw->features & RX_HUGE_FRAME) { in hw_cfg_huge_frame()
5422 hw->rx_cfg |= DMA_RX_ERROR; in hw_cfg_huge_frame()
5423 hw_priv->dev_rcv = dev_rcv_special; in hw_cfg_huge_frame()
5425 hw->rx_cfg &= ~DMA_RX_ERROR; in hw_cfg_huge_frame()
5426 if (hw->dev_count > 1) in hw_cfg_huge_frame()
5427 hw_priv->dev_rcv = port_rcv_packets; in hw_cfg_huge_frame()
5429 hw_priv->dev_rcv = dev_rcv_packets; in hw_cfg_huge_frame()
5436 struct dev_info *hw_priv = priv->adapter; in prepare_hardware()
5437 struct ksz_hw *hw = &hw_priv->hw; in prepare_hardware()
5441 hw_priv->dev = dev; in prepare_hardware()
5442 rc = request_irq(dev->irq, netdev_intr, IRQF_SHARED, dev->name, dev); in prepare_hardware()
5445 tasklet_setup(&hw_priv->rx_tasklet, rx_proc_task); in prepare_hardware()
5446 tasklet_setup(&hw_priv->tx_tasklet, tx_proc_task); in prepare_hardware()
5448 hw->promiscuous = 0; in prepare_hardware()
5449 hw->all_multi = 0; in prepare_hardware()
5450 hw->multi_list_size = 0; in prepare_hardware()
5455 hw->tx_desc_info.ring_phys, hw->rx_desc_info.ring_phys); in prepare_hardware()
5466 if (media_state == priv->media_state) in set_media_state()
5471 media_state == priv->media_state ? "on" : "off"); in set_media_state()
5475 * netdev_open - open network device
5486 struct dev_info *hw_priv = priv->adapter; in netdev_open()
5487 struct ksz_hw *hw = &hw_priv->hw; in netdev_open()
5488 struct ksz_port *port = &priv->port; in netdev_open()
5493 priv->multicast = 0; in netdev_open()
5494 priv->promiscuous = 0; in netdev_open()
5497 memset(&dev->stats, 0, sizeof(struct net_device_stats)); in netdev_open()
5498 memset((void *) port->counter, 0, in netdev_open()
5501 if (!(hw_priv->opened)) { in netdev_open()
5505 for (i = 0; i < hw->mib_port_cnt; i++) { in netdev_open()
5510 hw_priv->counter[i].time = next_jiffies; in netdev_open()
5511 hw->port_mib[i].state = media_disconnected; in netdev_open()
5514 if (hw->ksz_switch) in netdev_open()
5515 hw->port_mib[HOST_PORT].state = media_connected; in netdev_open()
5519 hw_clr_wol_pme_status(&hw_priv->hw); in netdev_open()
5524 for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) { in netdev_open()
5529 hw->port_info[p].partner = 0xFF; in netdev_open()
5530 hw->port_info[p].state = media_disconnected; in netdev_open()
5533 /* Need to open the port in multiple device interfaces mode. */ in netdev_open()
5534 if (hw->dev_count > 1) { in netdev_open()
5535 port_set_stp_state(hw, port->first_port, STP_STATE_SIMPLE); in netdev_open()
5536 if (port->first_port > 0) in netdev_open()
5537 hw_add_addr(hw, dev->dev_addr); in netdev_open()
5541 if (port->force_link) in netdev_open()
5546 if (!(hw_priv->opened)) { in netdev_open()
5551 if (hw->mib_port_cnt) in netdev_open()
5552 ksz_start_timer(&hw_priv->mib_timer_info, in netdev_open()
5553 hw_priv->mib_timer_info.period); in netdev_open()
5556 hw_priv->opened++; in netdev_open()
5558 ksz_start_timer(&priv->monitor_timer_info, in netdev_open()
5559 priv->monitor_timer_info.period); in netdev_open()
5561 priv->media_state = port->linked->state; in netdev_open()
5580 * netdev_query_statistics - query network device statistics
5591 struct ksz_port *port = &priv->port; in netdev_query_statistics()
5592 struct ksz_hw *hw = &priv->adapter->hw; in netdev_query_statistics()
5597 dev->stats.rx_errors = port->counter[OID_COUNTER_RCV_ERROR]; in netdev_query_statistics()
5598 dev->stats.tx_errors = port->counter[OID_COUNTER_XMIT_ERROR]; in netdev_query_statistics()
5601 dev->stats.multicast = 0; in netdev_query_statistics()
5602 dev->stats.collisions = 0; in netdev_query_statistics()
5603 dev->stats.rx_length_errors = 0; in netdev_query_statistics()
5604 dev->stats.rx_crc_errors = 0; in netdev_query_statistics()
5605 dev->stats.rx_frame_errors = 0; in netdev_query_statistics()
5606 dev->stats.tx_window_errors = 0; in netdev_query_statistics()
5608 for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) { in netdev_query_statistics()
5609 mib = &hw->port_mib[p]; in netdev_query_statistics()
5611 dev->stats.multicast += (unsigned long) in netdev_query_statistics()
5612 mib->counter[MIB_COUNTER_RX_MULTICAST]; in netdev_query_statistics()
5614 dev->stats.collisions += (unsigned long) in netdev_query_statistics()
5615 mib->counter[MIB_COUNTER_TX_TOTAL_COLLISION]; in netdev_query_statistics()
5617 dev->stats.rx_length_errors += (unsigned long)( in netdev_query_statistics()
5618 mib->counter[MIB_COUNTER_RX_UNDERSIZE] + in netdev_query_statistics()
5619 mib->counter[MIB_COUNTER_RX_FRAGMENT] + in netdev_query_statistics()
5620 mib->counter[MIB_COUNTER_RX_OVERSIZE] + in netdev_query_statistics()
5621 mib->counter[MIB_COUNTER_RX_JABBER]); in netdev_query_statistics()
5622 dev->stats.rx_crc_errors += (unsigned long) in netdev_query_statistics()
5623 mib->counter[MIB_COUNTER_RX_CRC_ERR]; in netdev_query_statistics()
5624 dev->stats.rx_frame_errors += (unsigned long)( in netdev_query_statistics()
5625 mib->counter[MIB_COUNTER_RX_ALIGNMENT_ERR] + in netdev_query_statistics()
5626 mib->counter[MIB_COUNTER_RX_SYMBOL_ERR]); in netdev_query_statistics()
5628 dev->stats.tx_window_errors += (unsigned long) in netdev_query_statistics()
5629 mib->counter[MIB_COUNTER_TX_LATE_COLLISION]; in netdev_query_statistics()
5632 return &dev->stats; in netdev_query_statistics()
5636 * netdev_set_mac_address - set network device MAC address
5647 struct dev_info *hw_priv = priv->adapter; in netdev_set_mac_address()
5648 struct ksz_hw *hw = &hw_priv->hw; in netdev_set_mac_address()
5652 if (priv->port.first_port > 0) in netdev_set_mac_address()
5653 hw_del_addr(hw, dev->dev_addr); in netdev_set_mac_address()
5655 hw->mac_override = 1; in netdev_set_mac_address()
5656 memcpy(hw->override_addr, mac->sa_data, ETH_ALEN); in netdev_set_mac_address()
5659 memcpy(dev->dev_addr, mac->sa_data, ETH_ALEN); in netdev_set_mac_address()
5663 if (priv->port.first_port > 0) in netdev_set_mac_address()
5664 hw_add_addr(hw, dev->dev_addr); in netdev_set_mac_address()
5675 if (promiscuous != priv->promiscuous) { in dev_set_promiscuous()
5676 u8 prev_state = hw->promiscuous; in dev_set_promiscuous()
5679 ++hw->promiscuous; in dev_set_promiscuous()
5681 --hw->promiscuous; in dev_set_promiscuous()
5682 priv->promiscuous = promiscuous; in dev_set_promiscuous()
5685 if (hw->promiscuous <= 1 && prev_state <= 1) in dev_set_promiscuous()
5686 hw_set_promiscuous(hw, hw->promiscuous); in dev_set_promiscuous()
5692 if ((hw->features & STP_SUPPORT) && !promiscuous && in dev_set_promiscuous()
5694 struct ksz_switch *sw = hw->ksz_switch; in dev_set_promiscuous()
5695 int port = priv->port.first_port; in dev_set_promiscuous()
5699 if (sw->member & port) { in dev_set_promiscuous()
5700 sw->member &= ~port; in dev_set_promiscuous()
5710 if (multicast != priv->multicast) { in dev_set_multicast()
5711 u8 all_multi = hw->all_multi; in dev_set_multicast()
5714 ++hw->all_multi; in dev_set_multicast()
5716 --hw->all_multi; in dev_set_multicast()
5717 priv->multicast = multicast; in dev_set_multicast()
5720 if (hw->all_multi <= 1 && all_multi <= 1) in dev_set_multicast()
5721 hw_set_multicast(hw, hw->all_multi); in dev_set_multicast()
5735 struct dev_info *hw_priv = priv->adapter; in netdev_set_rx_mode()
5736 struct ksz_hw *hw = &hw_priv->hw; in netdev_set_rx_mode()
5738 int multicast = (dev->flags & IFF_ALLMULTI); in netdev_set_rx_mode()
5740 dev_set_promiscuous(dev, priv, hw, (dev->flags & IFF_PROMISC)); in netdev_set_rx_mode()
5742 if (hw_priv->hw.dev_count > 1) in netdev_set_rx_mode()
5743 multicast |= (dev->flags & IFF_MULTICAST); in netdev_set_rx_mode()
5747 if (hw_priv->hw.dev_count > 1) in netdev_set_rx_mode()
5750 if ((dev->flags & IFF_MULTICAST) && !netdev_mc_empty(dev)) { in netdev_set_rx_mode()
5755 if (MAX_MULTICAST_LIST != hw->multi_list_size) { in netdev_set_rx_mode()
5756 hw->multi_list_size = MAX_MULTICAST_LIST; in netdev_set_rx_mode()
5757 ++hw->all_multi; in netdev_set_rx_mode()
5758 hw_set_multicast(hw, hw->all_multi); in netdev_set_rx_mode()
5766 memcpy(hw->multi_list[i++], ha->addr, ETH_ALEN); in netdev_set_rx_mode()
5768 hw->multi_list_size = (u8) i; in netdev_set_rx_mode()
5771 if (MAX_MULTICAST_LIST == hw->multi_list_size) { in netdev_set_rx_mode()
5772 --hw->all_multi; in netdev_set_rx_mode()
5773 hw_set_multicast(hw, hw->all_multi); in netdev_set_rx_mode()
5775 hw->multi_list_size = 0; in netdev_set_rx_mode()
5783 struct dev_info *hw_priv = priv->adapter; in netdev_change_mtu()
5784 struct ksz_hw *hw = &hw_priv->hw; in netdev_change_mtu()
5788 return -EBUSY; in netdev_change_mtu()
5791 if (hw->dev_count > 1) in netdev_change_mtu()
5792 if (dev != hw_priv->dev) in netdev_change_mtu()
5797 hw->features |= RX_HUGE_FRAME; in netdev_change_mtu()
5800 hw->features &= ~RX_HUGE_FRAME; in netdev_change_mtu()
5804 hw_priv->mtu = hw_mtu; in netdev_change_mtu()
5805 dev->mtu = new_mtu; in netdev_change_mtu()
5811 * netdev_ioctl - I/O control processing
5823 struct dev_info *hw_priv = priv->adapter; in netdev_ioctl()
5824 struct ksz_hw *hw = &hw_priv->hw; in netdev_ioctl()
5825 struct ksz_port *port = &priv->port; in netdev_ioctl()
5829 if (down_interruptible(&priv->proc_sem)) in netdev_ioctl()
5830 return -ERESTARTSYS; in netdev_ioctl()
5833 /* Get address of MII PHY in use. */ in netdev_ioctl()
5835 data->phy_id = priv->id; in netdev_ioctl()
5838 /* Read MII PHY register. */ in netdev_ioctl()
5840 if (data->phy_id != priv->id || data->reg_num >= 6) in netdev_ioctl()
5841 result = -EIO; in netdev_ioctl()
5843 hw_r_phy(hw, port->linked->port_id, data->reg_num, in netdev_ioctl()
5844 &data->val_out); in netdev_ioctl()
5847 /* Write MII PHY register. */ in netdev_ioctl()
5850 result = -EPERM; in netdev_ioctl()
5851 else if (data->phy_id != priv->id || data->reg_num >= 6) in netdev_ioctl()
5852 result = -EIO; in netdev_ioctl()
5854 hw_w_phy(hw, port->linked->port_id, data->reg_num, in netdev_ioctl()
5855 data->val_in); in netdev_ioctl()
5859 result = -EOPNOTSUPP; in netdev_ioctl()
5862 up(&priv->proc_sem); in netdev_ioctl()
5872 * mdio_read - read PHY register
5874 * @phy_id: The PHY id.
5877 * This function returns the PHY register value.
5884 struct ksz_port *port = &priv->port; in mdio_read()
5885 struct ksz_hw *hw = port->hw; in mdio_read()
5888 hw_r_phy(hw, port->linked->port_id, reg_num << 1, &val_out); in mdio_read()
5893 * mdio_write - set PHY register
5895 * @phy_id: The PHY id.
5899 * This procedure sets the PHY register value.
5904 struct ksz_port *port = &priv->port; in mdio_write()
5905 struct ksz_hw *hw = port->hw; in mdio_write()
5909 for (i = 0, pi = port->first_port; i < port->port_cnt; i++, pi++) in mdio_write()
5930 * netdev_get_link_ksettings - get network device settings
5934 * This function queries the PHY and returns its state in the ethtool command.
5942 struct dev_info *hw_priv = priv->adapter; in netdev_get_link_ksettings()
5944 mutex_lock(&hw_priv->lock); in netdev_get_link_ksettings()
5945 mii_ethtool_get_link_ksettings(&priv->mii_if, cmd); in netdev_get_link_ksettings()
5947 mutex_unlock(&hw_priv->lock); in netdev_get_link_ksettings()
5949 /* Save advertised settings for workaround in next function. */ in netdev_get_link_ksettings()
5950 ethtool_convert_link_mode_to_legacy_u32(&priv->advertising, in netdev_get_link_ksettings()
5951 cmd->link_modes.advertising); in netdev_get_link_ksettings()
5957 * netdev_set_link_ksettings - set network device settings
5961 * This function sets the PHY according to the ethtool command.
5969 struct dev_info *hw_priv = priv->adapter; in netdev_set_link_ksettings()
5970 struct ksz_port *port = &priv->port; in netdev_set_link_ksettings()
5972 u32 speed = cmd->base.speed; in netdev_set_link_ksettings()
5977 cmd->link_modes.advertising); in netdev_set_link_ksettings()
5983 if (cmd->base.autoneg && priv->advertising == advertising) { in netdev_set_link_ksettings()
5993 if (0 == cmd->base.duplex) in netdev_set_link_ksettings()
5997 else if (1 == cmd->base.duplex) in netdev_set_link_ksettings()
6002 mutex_lock(&hw_priv->lock); in netdev_set_link_ksettings()
6003 if (cmd->base.autoneg && in netdev_set_link_ksettings()
6005 port->duplex = 0; in netdev_set_link_ksettings()
6006 port->speed = 0; in netdev_set_link_ksettings()
6007 port->force_link = 0; in netdev_set_link_ksettings()
6009 port->duplex = cmd->base.duplex + 1; in netdev_set_link_ksettings()
6011 port->speed = speed; in netdev_set_link_ksettings()
6012 if (cmd->base.autoneg) in netdev_set_link_ksettings()
6013 port->force_link = 0; in netdev_set_link_ksettings()
6015 port->force_link = 1; in netdev_set_link_ksettings()
6022 &priv->mii_if, in netdev_set_link_ksettings()
6024 mutex_unlock(&hw_priv->lock); in netdev_set_link_ksettings()
6029 * netdev_nway_reset - restart auto-negotiation
6032 * This function restarts the PHY for auto-negotiation.
6039 struct dev_info *hw_priv = priv->adapter; in netdev_nway_reset()
6042 mutex_lock(&hw_priv->lock); in netdev_nway_reset()
6043 rc = mii_nway_restart(&priv->mii_if); in netdev_nway_reset()
6044 mutex_unlock(&hw_priv->lock); in netdev_nway_reset()
6049 * netdev_get_link - get network device link status
6052 * This function gets the link status from the PHY.
6054 * Return true if PHY is linked and false otherwise.
6061 rc = mii_link_ok(&priv->mii_if); in netdev_get_link()
6066 * netdev_get_drvinfo - get network driver information
6076 struct dev_info *hw_priv = priv->adapter; in netdev_get_drvinfo()
6078 strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); in netdev_get_drvinfo()
6079 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); in netdev_get_drvinfo()
6080 strlcpy(info->bus_info, pci_name(hw_priv->pdev), in netdev_get_drvinfo()
6081 sizeof(info->bus_info)); in netdev_get_drvinfo()
6098 * netdev_get_regs_len - get length of register dump
6110 while (range->end > range->start) { in netdev_get_regs_len()
6111 regs_len += (range->end - range->start + 3) / 4 * 4; in netdev_get_regs_len()
6118 * netdev_get_regs - get register dump
6129 struct dev_info *hw_priv = priv->adapter; in netdev_get_regs()
6130 struct ksz_hw *hw = &hw_priv->hw; in netdev_get_regs()
6135 mutex_lock(&hw_priv->lock); in netdev_get_regs()
6136 regs->version = 0; in netdev_get_regs()
6137 for (len = 0; len < 0x40; len += 4) { in netdev_get_regs()
6138 pci_read_config_dword(hw_priv->pdev, len, buf); in netdev_get_regs()
6141 while (range->end > range->start) { in netdev_get_regs()
6142 for (len = range->start; len < range->end; len += 4) { in netdev_get_regs()
6143 *buf = readl(hw->io + len); in netdev_get_regs()
6148 mutex_unlock(&hw_priv->lock); in netdev_get_regs()
6157 * netdev_get_wol - get Wake-on-LAN support
6159 * @wol: Ethtool Wake-on-LAN data structure.
6161 * This procedure returns Wake-on-LAN support.
6167 struct dev_info *hw_priv = priv->adapter; in netdev_get_wol()
6169 wol->supported = hw_priv->wol_support; in netdev_get_wol()
6170 wol->wolopts = hw_priv->wol_enable; in netdev_get_wol()
6171 memset(&wol->sopass, 0, sizeof(wol->sopass)); in netdev_get_wol()
6175 * netdev_set_wol - set Wake-on-LAN support
6177 * @wol: Ethtool Wake-on-LAN data structure.
6179 * This function sets Wake-on-LAN support.
6187 struct dev_info *hw_priv = priv->adapter; in netdev_set_wol()
6189 /* Need to find a way to retrieve the device IP address. */ in netdev_set_wol()
6192 if (wol->wolopts & ~hw_priv->wol_support) in netdev_set_wol()
6193 return -EINVAL; in netdev_set_wol()
6195 hw_priv->wol_enable = wol->wolopts; in netdev_set_wol()
6198 if (wol->wolopts) in netdev_set_wol()
6199 hw_priv->wol_enable |= WAKE_PHY; in netdev_set_wol()
6200 hw_enable_wol(&hw_priv->hw, hw_priv->wol_enable, net_addr); in netdev_set_wol()
6205 * netdev_get_msglevel - get debug message level
6216 return priv->msg_enable; in netdev_get_msglevel()
6220 * netdev_set_msglevel - set debug message level
6230 priv->msg_enable = value; in netdev_set_msglevel()
6234 * netdev_get_eeprom_len - get EEPROM length
6249 * netdev_get_eeprom - get EEPROM data
6262 struct dev_info *hw_priv = priv->adapter; in netdev_get_eeprom()
6267 len = (eeprom->offset + eeprom->len + 1) / 2; in netdev_get_eeprom()
6268 for (i = eeprom->offset / 2; i < len; i++) in netdev_get_eeprom()
6269 eeprom_data[i] = eeprom_read(&hw_priv->hw, i); in netdev_get_eeprom()
6270 eeprom->magic = EEPROM_MAGIC; in netdev_get_eeprom()
6271 memcpy(data, &eeprom_byte[eeprom->offset], eeprom->len); in netdev_get_eeprom()
6277 * netdev_set_eeprom - write EEPROM data
6290 struct dev_info *hw_priv = priv->adapter; in netdev_set_eeprom()
6296 if (eeprom->magic != EEPROM_MAGIC) in netdev_set_eeprom()
6297 return -EINVAL; in netdev_set_eeprom()
6299 len = (eeprom->offset + eeprom->len + 1) / 2; in netdev_set_eeprom()
6300 for (i = eeprom->offset / 2; i < len; i++) in netdev_set_eeprom()
6301 eeprom_data[i] = eeprom_read(&hw_priv->hw, i); in netdev_set_eeprom()
6303 memcpy(&eeprom_byte[eeprom->offset], data, eeprom->len); in netdev_set_eeprom()
6304 for (i = 0; i < EEPROM_SIZE; i++) in netdev_set_eeprom()
6307 eeprom_write(&hw_priv->hw, i, eeprom_data[i]); in netdev_set_eeprom()
6314 * netdev_get_pauseparam - get flow control parameters
6324 struct dev_info *hw_priv = priv->adapter; in netdev_get_pauseparam()
6325 struct ksz_hw *hw = &hw_priv->hw; in netdev_get_pauseparam()
6327 pause->autoneg = (hw->overrides & PAUSE_FLOW_CTRL) ? 0 : 1; in netdev_get_pauseparam()
6328 if (!hw->ksz_switch) { in netdev_get_pauseparam()
6329 pause->rx_pause = in netdev_get_pauseparam()
6330 (hw->rx_cfg & DMA_RX_FLOW_ENABLE) ? 1 : 0; in netdev_get_pauseparam()
6331 pause->tx_pause = in netdev_get_pauseparam()
6332 (hw->tx_cfg & DMA_TX_FLOW_ENABLE) ? 1 : 0; in netdev_get_pauseparam()
6334 pause->rx_pause = in netdev_get_pauseparam()
6337 pause->tx_pause = in netdev_get_pauseparam()
6344 * netdev_set_pauseparam - set flow control parameters
6357 struct dev_info *hw_priv = priv->adapter; in netdev_set_pauseparam()
6358 struct ksz_hw *hw = &hw_priv->hw; in netdev_set_pauseparam()
6359 struct ksz_port *port = &priv->port; in netdev_set_pauseparam()
6361 mutex_lock(&hw_priv->lock); in netdev_set_pauseparam()
6362 if (pause->autoneg) { in netdev_set_pauseparam()
6363 if (!pause->rx_pause && !pause->tx_pause) in netdev_set_pauseparam()
6364 port->flow_ctrl = PHY_NO_FLOW_CTRL; in netdev_set_pauseparam()
6366 port->flow_ctrl = PHY_FLOW_CTRL; in netdev_set_pauseparam()
6367 hw->overrides &= ~PAUSE_FLOW_CTRL; in netdev_set_pauseparam()
6368 port->force_link = 0; in netdev_set_pauseparam()
6369 if (hw->ksz_switch) { in netdev_set_pauseparam()
6377 hw->overrides |= PAUSE_FLOW_CTRL; in netdev_set_pauseparam()
6378 if (hw->ksz_switch) { in netdev_set_pauseparam()
6380 SWITCH_RX_FLOW_CTRL, pause->rx_pause); in netdev_set_pauseparam()
6382 SWITCH_TX_FLOW_CTRL, pause->tx_pause); in netdev_set_pauseparam()
6384 set_flow_ctrl(hw, pause->rx_pause, pause->tx_pause); in netdev_set_pauseparam()
6386 mutex_unlock(&hw_priv->lock); in netdev_set_pauseparam()
6392 * netdev_get_ringparam - get tx/rx ring parameters
6402 struct dev_info *hw_priv = priv->adapter; in netdev_get_ringparam()
6403 struct ksz_hw *hw = &hw_priv->hw; in netdev_get_ringparam()
6405 ring->tx_max_pending = (1 << 9); in netdev_get_ringparam()
6406 ring->tx_pending = hw->tx_desc_info.alloc; in netdev_get_ringparam()
6407 ring->rx_max_pending = (1 << 9); in netdev_get_ringparam()
6408 ring->rx_pending = hw->rx_desc_info.alloc; in netdev_get_ringparam()
6455 * netdev_get_strings - get statistics identity strings
6465 struct dev_info *hw_priv = priv->adapter; in netdev_get_strings()
6466 struct ksz_hw *hw = &hw_priv->hw; in netdev_get_strings()
6470 ETH_GSTRING_LEN * hw->mib_cnt); in netdev_get_strings()
6474 * netdev_get_sset_count - get statistics size
6485 struct dev_info *hw_priv = priv->adapter; in netdev_get_sset_count()
6486 struct ksz_hw *hw = &hw_priv->hw; in netdev_get_sset_count()
6490 return hw->mib_cnt; in netdev_get_sset_count()
6492 return -EOPNOTSUPP; in netdev_get_sset_count()
6497 * netdev_get_ethtool_stats - get network device statistics
6508 struct dev_info *hw_priv = priv->adapter; in netdev_get_ethtool_stats()
6509 struct ksz_hw *hw = &hw_priv->hw; in netdev_get_ethtool_stats()
6510 struct ksz_port *port = &priv->port; in netdev_get_ethtool_stats()
6511 int n_stats = stats->n_stats; in netdev_get_ethtool_stats()
6517 mutex_lock(&hw_priv->lock); in netdev_get_ethtool_stats()
6519 for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) { in netdev_get_ethtool_stats()
6520 if (media_connected == hw->port_mib[p].state) { in netdev_get_ethtool_stats()
6521 hw_priv->counter[p].read = 1; in netdev_get_ethtool_stats()
6528 mutex_unlock(&hw_priv->lock); in netdev_get_ethtool_stats()
6531 schedule_work(&hw_priv->mib_read); in netdev_get_ethtool_stats()
6533 if (1 == port->mib_port_cnt && n < SWITCH_PORT_NUM) { in netdev_get_ethtool_stats()
6536 hw_priv->counter[p].counter, in netdev_get_ethtool_stats()
6537 2 == hw_priv->counter[p].read, in netdev_get_ethtool_stats()
6540 for (i = 0, p = n; i < port->mib_port_cnt - n; i++, p++) { in netdev_get_ethtool_stats()
6543 hw_priv->counter[p].counter, in netdev_get_ethtool_stats()
6544 2 == hw_priv->counter[p].read, in netdev_get_ethtool_stats()
6546 } else if (hw->port_mib[p].cnt_ptr) { in netdev_get_ethtool_stats()
6548 hw_priv->counter[p].counter, in netdev_get_ethtool_stats()
6549 2 == hw_priv->counter[p].read, in netdev_get_ethtool_stats()
6554 get_mib_counters(hw, port->first_port, port->mib_port_cnt, counter); in netdev_get_ethtool_stats()
6555 n = hw->mib_cnt; in netdev_get_ethtool_stats()
6558 n_stats -= n; in netdev_get_ethtool_stats()
6559 for (i = 0; i < n; i++) in netdev_get_ethtool_stats()
6564 * netdev_set_features - set receive checksum support
6576 struct dev_info *hw_priv = priv->adapter; in netdev_set_features()
6577 struct ksz_hw *hw = &hw_priv->hw; in netdev_set_features()
6579 mutex_lock(&hw_priv->lock); in netdev_set_features()
6583 hw->rx_cfg |= DMA_RX_CSUM_TCP | DMA_RX_CSUM_IP; in netdev_set_features()
6585 hw->rx_cfg &= ~(DMA_RX_CSUM_TCP | DMA_RX_CSUM_IP); in netdev_set_features()
6587 if (hw->enabled) in netdev_set_features()
6588 writel(hw->rx_cfg, hw->io + KS_DMA_RX_CTRL); in netdev_set_features()
6590 mutex_unlock(&hw_priv->lock); in netdev_set_features()
6625 if (priv->media_state != port->linked->state) { in update_link()
6626 priv->media_state = port->linked->state; in update_link()
6636 struct ksz_hw *hw = &hw_priv->hw; in mib_read_work()
6641 for (i = 0; i < hw->mib_port_cnt; i++) { in mib_read_work()
6642 mib = &hw->port_mib[i]; in mib_read_work()
6645 if (mib->cnt_ptr || 1 == hw_priv->counter[i].read) { in mib_read_work()
6647 /* Need to process receive interrupt. */ in mib_read_work()
6650 hw_priv->counter[i].read = 0; in mib_read_work()
6653 if (0 == mib->cnt_ptr) { in mib_read_work()
6654 hw_priv->counter[i].read = 2; in mib_read_work()
6656 &hw_priv->counter[i].counter); in mib_read_work()
6658 } else if (time_after_eq(jiffies, hw_priv->counter[i].time)) { in mib_read_work()
6660 if (media_connected == mib->state) in mib_read_work()
6661 hw_priv->counter[i].read = 1; in mib_read_work()
6662 next_jiffies += HZ * 1 * hw->mib_port_cnt; in mib_read_work()
6663 hw_priv->counter[i].time = next_jiffies; in mib_read_work()
6666 } else if (mib->link_down) { in mib_read_work()
6667 mib->link_down = 0; in mib_read_work()
6670 hw_priv->counter[i].read = 1; in mib_read_work()
6679 mib_read_work(&hw_priv->mib_read); in mib_monitor()
6681 /* This is used to verify Wake-on-LAN is working. */ in mib_monitor()
6682 if (hw_priv->pme_wait) { in mib_monitor()
6683 if (time_is_before_eq_jiffies(hw_priv->pme_wait)) { in mib_monitor()
6684 hw_clr_wol_pme_status(&hw_priv->hw); in mib_monitor()
6685 hw_priv->pme_wait = 0; in mib_monitor()
6687 } else if (hw_chk_wol_pme_status(&hw_priv->hw)) { in mib_monitor()
6690 hw_priv->pme_wait = jiffies + HZ * 2; in mib_monitor()
6693 ksz_update_timer(&hw_priv->mib_timer_info); in mib_monitor()
6697 * dev_monitor - periodic monitoring
6705 struct net_device *dev = priv->mii_if.dev; in dev_monitor()
6706 struct dev_info *hw_priv = priv->adapter; in dev_monitor()
6707 struct ksz_hw *hw = &hw_priv->hw; in dev_monitor()
6708 struct ksz_port *port = &priv->port; in dev_monitor()
6710 if (!(hw->features & LINK_INT_WORKING)) in dev_monitor()
6714 ksz_update_timer(&priv->monitor_timer_info); in dev_monitor()
6729 * This enables multiple network device mode for KSZ8842, which contains a
6731 * ports for running Spanning Tree Protocol. The driver will create an
6732 * additional eth? device for the other port.
6743 * that need the host's attention are passed to it. This prevents the host
6762 * netdev_init - initialize network device.
6774 ksz_init_timer(&priv->monitor_timer_info, 500 * HZ / 1000, in netdev_init()
6778 dev->watchdog_timeo = HZ / 2; in netdev_init()
6780 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_RXCSUM; in netdev_init()
6786 dev->hw_features |= NETIF_F_IPV6_CSUM; in netdev_init()
6788 dev->features |= dev->hw_features; in netdev_init()
6790 sema_init(&priv->proc_sem, 1); in netdev_init()
6792 priv->mii_if.phy_id_mask = 0x1; in netdev_init()
6793 priv->mii_if.reg_num_mask = 0x7; in netdev_init()
6794 priv->mii_if.dev = dev; in netdev_init()
6795 priv->mii_if.mdio_read = mdio_read; in netdev_init()
6796 priv->mii_if.mdio_write = mdio_write; in netdev_init()
6797 priv->mii_if.phy_id = priv->port.first_port + 1; in netdev_init()
6799 priv->msg_enable = netif_msg_init(msg_enable, in netdev_init()
6825 if (dev->watchdog_timeo) in netdev_free()
6864 hw_priv->hw.override_addr[j++] = (u8) num; in get_mac_addr()
6865 hw_priv->hw.override_addr[5] += in get_mac_addr()
6866 hw_priv->hw.id; in get_mac_addr()
6868 hw_priv->hw.ksz_switch->other_addr[j++] = in get_mac_addr()
6870 hw_priv->hw.ksz_switch->other_addr[5] += in get_mac_addr()
6871 hw_priv->hw.id; in get_mac_addr()
6879 hw_priv->hw.mac_override = 1; in get_mac_addr()
6889 struct ksz_switch *sw = hw->ksz_switch; in read_other_addr()
6891 for (i = 0; i < 3; i++) in read_other_addr()
6894 sw->other_addr[5] = (u8) data[0]; in read_other_addr()
6895 sw->other_addr[4] = (u8)(data[0] >> 8); in read_other_addr()
6896 sw->other_addr[3] = (u8) data[1]; in read_other_addr()
6897 sw->other_addr[2] = (u8)(data[1] >> 8); in read_other_addr()
6898 sw->other_addr[1] = (u8) data[2]; in read_other_addr()
6899 sw->other_addr[0] = (u8)(data[2] >> 8); in read_other_addr()
6930 result = -ENODEV; in pcidev_init()
6932 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) || in pcidev_init()
6933 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32))) in pcidev_init()
6945 result = -ENOMEM; in pcidev_init()
6951 hw_priv = &info->dev_info; in pcidev_init()
6952 hw_priv->pdev = pdev; in pcidev_init()
6954 hw = &hw_priv->hw; in pcidev_init()
6956 hw->io = ioremap(reg_base, reg_len); in pcidev_init()
6957 if (!hw->io) in pcidev_init()
6964 result = -ENODEV; in pcidev_init()
6970 dev_info(&hw_priv->pdev->dev, "%s\n", banner); in pcidev_init()
6971 dev_dbg(&hw_priv->pdev->dev, "Mem = %p; IRQ = %d\n", hw->io, pdev->irq); in pcidev_init()
6974 hw->dev_count = 1; in pcidev_init()
6977 hw->addr_list_size = 0; in pcidev_init()
6978 hw->mib_cnt = PORT_COUNTER_NUM; in pcidev_init()
6979 hw->mib_port_cnt = 1; in pcidev_init()
6984 hw->overrides |= FAST_AGING; in pcidev_init()
6986 hw->mib_cnt = TOTAL_PORT_COUNTER_NUM; in pcidev_init()
6990 hw->dev_count = SWITCH_PORT_NUM; in pcidev_init()
6991 hw->addr_list_size = SWITCH_PORT_NUM - 1; in pcidev_init()
6995 if (1 == hw->dev_count) { in pcidev_init()
6999 hw->mib_port_cnt = TOTAL_PORT_NUM; in pcidev_init()
7000 hw->ksz_switch = kzalloc(sizeof(struct ksz_switch), GFP_KERNEL); in pcidev_init()
7001 if (!hw->ksz_switch) in pcidev_init()
7004 sw = hw->ksz_switch; in pcidev_init()
7006 for (i = 0; i < hw->mib_port_cnt; i++) in pcidev_init()
7007 hw->port_mib[i].mib_start = 0; in pcidev_init()
7009 hw->parent = hw_priv; in pcidev_init()
7012 hw_priv->mtu = (REGULAR_RX_BUF_SIZE + 3) & ~3; in pcidev_init()
7017 hw_priv->hw.id = net_device_present; in pcidev_init()
7019 spin_lock_init(&hw_priv->hwlock); in pcidev_init()
7020 mutex_init(&hw_priv->lock); in pcidev_init()
7022 for (i = 0; i < TOTAL_PORT_NUM; i++) in pcidev_init()
7023 init_waitqueue_head(&hw_priv->counter[i].counter); in pcidev_init()
7032 if (hw->dev_count > 1) { in pcidev_init()
7033 memcpy(sw->other_addr, hw->override_addr, ETH_ALEN); in pcidev_init()
7040 if (hw->ksz_switch) in pcidev_init()
7043 hw_priv->wol_support = WOL_SUPPORT; in pcidev_init()
7044 hw_priv->wol_enable = 0; in pcidev_init()
7047 INIT_WORK(&hw_priv->mib_read, mib_read_work); in pcidev_init()
7050 ksz_init_timer(&hw_priv->mib_timer_info, 500 * HZ / 1000, in pcidev_init()
7053 for (i = 0; i < hw->dev_count; i++) { in pcidev_init()
7057 SET_NETDEV_DEV(dev, &pdev->dev); in pcidev_init()
7058 info->netdev[i] = dev; in pcidev_init()
7061 priv->adapter = hw_priv; in pcidev_init()
7062 priv->id = net_device_present++; in pcidev_init()
7064 port = &priv->port; in pcidev_init()
7065 port->port_cnt = port_count; in pcidev_init()
7066 port->mib_port_cnt = mib_port_count; in pcidev_init()
7067 port->first_port = i; in pcidev_init()
7068 port->flow_ctrl = PHY_FLOW_CTRL; in pcidev_init()
7070 port->hw = hw; in pcidev_init()
7071 port->linked = &hw->port_info[port->first_port]; in pcidev_init()
7073 for (cnt = 0, pi = i; cnt < port_count; cnt++, pi++) { in pcidev_init()
7074 hw->port_info[pi].port_id = pi; in pcidev_init()
7075 hw->port_info[pi].pdev = dev; in pcidev_init()
7076 hw->port_info[pi].state = media_disconnected; in pcidev_init()
7079 dev->mem_start = (unsigned long) hw->io; in pcidev_init()
7080 dev->mem_end = dev->mem_start + reg_len - 1; in pcidev_init()
7081 dev->irq = pdev->irq; in pcidev_init()
7083 memcpy(dev->dev_addr, hw_priv->hw.override_addr, in pcidev_init()
7086 memcpy(dev->dev_addr, sw->other_addr, ETH_ALEN); in pcidev_init()
7087 if (ether_addr_equal(sw->other_addr, hw->override_addr)) in pcidev_init()
7088 dev->dev_addr[5] += port->first_port; in pcidev_init()
7091 dev->netdev_ops = &netdev_ops; in pcidev_init()
7092 dev->ethtool_ops = &netdev_ethtool_ops; in pcidev_init()
7094 /* MTU range: 60 - 1894 */ in pcidev_init()
7095 dev->min_mtu = ETH_ZLEN; in pcidev_init()
7096 dev->max_mtu = MAX_RX_BUF_SIZE - in pcidev_init()
7104 pci_dev_get(hw_priv->pdev); in pcidev_init()
7109 for (i = 0; i < hw->dev_count; i++) { in pcidev_init()
7110 if (info->netdev[i]) { in pcidev_init()
7111 netdev_free(info->netdev[i]); in pcidev_init()
7112 info->netdev[i] = NULL; in pcidev_init()
7118 kfree(hw->ksz_switch); in pcidev_init()
7121 iounmap(hw->io); in pcidev_init()
7136 struct dev_info *hw_priv = &info->dev_info; in pcidev_exit()
7140 for (i = 0; i < hw_priv->hw.dev_count; i++) { in pcidev_exit()
7141 if (info->netdev[i]) in pcidev_exit()
7142 netdev_free(info->netdev[i]); in pcidev_exit()
7144 if (hw_priv->hw.io) in pcidev_exit()
7145 iounmap(hw_priv->hw.io); in pcidev_exit()
7147 kfree(hw_priv->hw.ksz_switch); in pcidev_exit()
7148 pci_dev_put(hw_priv->pdev); in pcidev_exit()
7156 struct dev_info *hw_priv = &info->dev_info; in pcidev_resume()
7157 struct ksz_hw *hw = &hw_priv->hw; in pcidev_resume()
7161 if (hw_priv->wol_enable) in pcidev_resume()
7163 for (i = 0; i < hw->dev_count; i++) { in pcidev_resume()
7164 if (info->netdev[i]) { in pcidev_resume()
7165 struct net_device *dev = info->netdev[i]; in pcidev_resume()
7180 struct dev_info *hw_priv = &info->dev_info; in pcidev_suspend()
7181 struct ksz_hw *hw = &hw_priv->hw; in pcidev_suspend()
7183 /* Need to find a way to retrieve the device IP address. */ in pcidev_suspend()
7186 for (i = 0; i < hw->dev_count; i++) { in pcidev_suspend()
7187 if (info->netdev[i]) { in pcidev_suspend()
7188 struct net_device *dev = info->netdev[i]; in pcidev_suspend()
7196 if (hw_priv->wol_enable) { in pcidev_suspend()
7197 hw_enable_wol(hw, hw_priv->wol_enable, net_addr); in pcidev_suspend()