Lines Matching full:cp

110  * also, we need to make cp->lock finer-grained.
160 #define CAS_MAX_MTU min(((cp->page_size << 1) - 0x50), 9000)
229 static void cas_set_link_modes(struct cas *cp);
231 static inline void cas_lock_tx(struct cas *cp) in cas_lock_tx() argument
236 spin_lock_nested(&cp->tx_lock[i], i); in cas_lock_tx()
247 #define cas_lock_all_save(cp, flags) \ argument
249 struct cas *xxxcp = (cp); \
254 static inline void cas_unlock_tx(struct cas *cp) in cas_unlock_tx() argument
259 spin_unlock(&cp->tx_lock[i - 1]); in cas_unlock_tx()
262 #define cas_unlock_all_restore(cp, flags) \ argument
264 struct cas *xxxcp = (cp); \
269 static void cas_disable_irq(struct cas *cp, const int ring) in cas_disable_irq() argument
273 writel(0xFFFFFFFF, cp->regs + REG_INTR_MASK); in cas_disable_irq()
278 if (cp->cas_flags & CAS_FLAG_REG_PLUS) { in cas_disable_irq()
291 cp->regs + REG_PLUS_INTRN_MASK(ring)); in cas_disable_irq()
295 writel(INTRN_MASK_CLEAR_ALL, cp->regs + in cas_disable_irq()
302 static inline void cas_mask_intr(struct cas *cp) in cas_mask_intr() argument
307 cas_disable_irq(cp, i); in cas_mask_intr()
310 static void cas_enable_irq(struct cas *cp, const int ring) in cas_enable_irq() argument
313 writel(INTR_TX_DONE, cp->regs + REG_INTR_MASK); in cas_enable_irq()
317 if (cp->cas_flags & CAS_FLAG_REG_PLUS) { in cas_enable_irq()
329 writel(INTRN_MASK_RX_EN, cp->regs + in cas_enable_irq()
339 static inline void cas_unmask_intr(struct cas *cp) in cas_unmask_intr() argument
344 cas_enable_irq(cp, i); in cas_unmask_intr()
347 static inline void cas_entropy_gather(struct cas *cp) in cas_entropy_gather() argument
350 if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0) in cas_entropy_gather()
353 batch_entropy_store(readl(cp->regs + REG_ENTROPY_IV), in cas_entropy_gather()
354 readl(cp->regs + REG_ENTROPY_IV), in cas_entropy_gather()
359 static inline void cas_entropy_reset(struct cas *cp) in cas_entropy_reset() argument
362 if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0) in cas_entropy_reset()
366 cp->regs + REG_BIM_LOCAL_DEV_EN); in cas_entropy_reset()
367 writeb(ENTROPY_RESET_STC_MODE, cp->regs + REG_ENTROPY_RESET); in cas_entropy_reset()
368 writeb(0x55, cp->regs + REG_ENTROPY_RAND_REG); in cas_entropy_reset()
371 if (readb(cp->regs + REG_ENTROPY_RAND_REG) == 0) in cas_entropy_reset()
372 cp->cas_flags &= ~CAS_FLAG_ENTROPY_DEV; in cas_entropy_reset()
379 static u16 cas_phy_read(struct cas *cp, int reg) in cas_phy_read() argument
385 cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr); in cas_phy_read()
388 writel(cmd, cp->regs + REG_MIF_FRAME); in cas_phy_read()
393 cmd = readl(cp->regs + REG_MIF_FRAME); in cas_phy_read()
400 static int cas_phy_write(struct cas *cp, int reg, u16 val) in cas_phy_write() argument
406 cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr); in cas_phy_write()
410 writel(cmd, cp->regs + REG_MIF_FRAME); in cas_phy_write()
415 cmd = readl(cp->regs + REG_MIF_FRAME); in cas_phy_write()
422 static void cas_phy_powerup(struct cas *cp) in cas_phy_powerup() argument
424 u16 ctl = cas_phy_read(cp, MII_BMCR); in cas_phy_powerup()
429 cas_phy_write(cp, MII_BMCR, ctl); in cas_phy_powerup()
432 static void cas_phy_powerdown(struct cas *cp) in cas_phy_powerdown() argument
434 u16 ctl = cas_phy_read(cp, MII_BMCR); in cas_phy_powerdown()
439 cas_phy_write(cp, MII_BMCR, ctl); in cas_phy_powerdown()
442 /* cp->lock held. note: the last put_page will free the buffer */
443 static int cas_page_free(struct cas *cp, cas_page_t *page) in cas_page_free() argument
445 dma_unmap_page(&cp->pdev->dev, page->dma_addr, cp->page_size, in cas_page_free()
447 __free_pages(page->buffer, cp->page_order); in cas_page_free()
463 static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags) in cas_page_alloc() argument
473 page->buffer = alloc_pages(flags, cp->page_order); in cas_page_alloc()
476 page->dma_addr = dma_map_page(&cp->pdev->dev, page->buffer, 0, in cas_page_alloc()
477 cp->page_size, DMA_FROM_DEVICE); in cas_page_alloc()
486 static void cas_spare_init(struct cas *cp) in cas_spare_init() argument
488 spin_lock(&cp->rx_inuse_lock); in cas_spare_init()
489 INIT_LIST_HEAD(&cp->rx_inuse_list); in cas_spare_init()
490 spin_unlock(&cp->rx_inuse_lock); in cas_spare_init()
492 spin_lock(&cp->rx_spare_lock); in cas_spare_init()
493 INIT_LIST_HEAD(&cp->rx_spare_list); in cas_spare_init()
494 cp->rx_spares_needed = RX_SPARE_COUNT; in cas_spare_init()
495 spin_unlock(&cp->rx_spare_lock); in cas_spare_init()
499 static void cas_spare_free(struct cas *cp) in cas_spare_free() argument
505 spin_lock(&cp->rx_spare_lock); in cas_spare_free()
506 list_splice_init(&cp->rx_spare_list, &list); in cas_spare_free()
507 spin_unlock(&cp->rx_spare_lock); in cas_spare_free()
509 cas_page_free(cp, list_entry(elem, cas_page_t, list)); in cas_spare_free()
518 spin_lock(&cp->rx_inuse_lock); in cas_spare_free()
519 list_splice_init(&cp->rx_inuse_list, &list); in cas_spare_free()
520 spin_unlock(&cp->rx_inuse_lock); in cas_spare_free()
522 spin_lock(&cp->rx_spare_lock); in cas_spare_free()
523 list_splice_init(&cp->rx_inuse_list, &list); in cas_spare_free()
524 spin_unlock(&cp->rx_spare_lock); in cas_spare_free()
527 cas_page_free(cp, list_entry(elem, cas_page_t, list)); in cas_spare_free()
532 static void cas_spare_recover(struct cas *cp, const gfp_t flags) in cas_spare_recover() argument
543 spin_lock(&cp->rx_inuse_lock); in cas_spare_recover()
544 list_splice_init(&cp->rx_inuse_list, &list); in cas_spare_recover()
545 spin_unlock(&cp->rx_inuse_lock); in cas_spare_recover()
566 spin_lock(&cp->rx_spare_lock); in cas_spare_recover()
567 if (cp->rx_spares_needed > 0) { in cas_spare_recover()
568 list_add(elem, &cp->rx_spare_list); in cas_spare_recover()
569 cp->rx_spares_needed--; in cas_spare_recover()
570 spin_unlock(&cp->rx_spare_lock); in cas_spare_recover()
572 spin_unlock(&cp->rx_spare_lock); in cas_spare_recover()
573 cas_page_free(cp, page); in cas_spare_recover()
579 spin_lock(&cp->rx_inuse_lock); in cas_spare_recover()
580 list_splice(&list, &cp->rx_inuse_list); in cas_spare_recover()
581 spin_unlock(&cp->rx_inuse_lock); in cas_spare_recover()
584 spin_lock(&cp->rx_spare_lock); in cas_spare_recover()
585 needed = cp->rx_spares_needed; in cas_spare_recover()
586 spin_unlock(&cp->rx_spare_lock); in cas_spare_recover()
594 cas_page_t *spare = cas_page_alloc(cp, flags); in cas_spare_recover()
601 spin_lock(&cp->rx_spare_lock); in cas_spare_recover()
602 list_splice(&list, &cp->rx_spare_list); in cas_spare_recover()
603 cp->rx_spares_needed -= i; in cas_spare_recover()
604 spin_unlock(&cp->rx_spare_lock); in cas_spare_recover()
608 static cas_page_t *cas_page_dequeue(struct cas *cp) in cas_page_dequeue() argument
613 spin_lock(&cp->rx_spare_lock); in cas_page_dequeue()
614 if (list_empty(&cp->rx_spare_list)) { in cas_page_dequeue()
616 spin_unlock(&cp->rx_spare_lock); in cas_page_dequeue()
617 cas_spare_recover(cp, GFP_ATOMIC); in cas_page_dequeue()
618 spin_lock(&cp->rx_spare_lock); in cas_page_dequeue()
619 if (list_empty(&cp->rx_spare_list)) { in cas_page_dequeue()
620 netif_err(cp, rx_err, cp->dev, in cas_page_dequeue()
622 spin_unlock(&cp->rx_spare_lock); in cas_page_dequeue()
627 entry = cp->rx_spare_list.next; in cas_page_dequeue()
629 recover = ++cp->rx_spares_needed; in cas_page_dequeue()
630 spin_unlock(&cp->rx_spare_lock); in cas_page_dequeue()
635 atomic_inc(&cp->reset_task_pending); in cas_page_dequeue()
636 atomic_inc(&cp->reset_task_pending_spare); in cas_page_dequeue()
637 schedule_work(&cp->reset_task); in cas_page_dequeue()
639 atomic_set(&cp->reset_task_pending, CAS_RESET_SPARE); in cas_page_dequeue()
640 schedule_work(&cp->reset_task); in cas_page_dequeue()
647 static void cas_mif_poll(struct cas *cp, const int enable) in cas_mif_poll() argument
651 cfg = readl(cp->regs + REG_MIF_CFG); in cas_mif_poll()
654 if (cp->phy_type & CAS_PHY_MII_MDIO1) in cas_mif_poll()
661 cfg |= CAS_BASE(MIF_CFG_POLL_PHY, cp->phy_addr); in cas_mif_poll()
664 cp->regs + REG_MIF_MASK); in cas_mif_poll()
665 writel(cfg, cp->regs + REG_MIF_CFG); in cas_mif_poll()
668 /* Must be invoked under cp->lock */
669 static void cas_begin_auto_negotiation(struct cas *cp, in cas_begin_auto_negotiation() argument
676 int oldstate = cp->lstate; in cas_begin_auto_negotiation()
682 lcntl = cp->link_cntl; in cas_begin_auto_negotiation()
684 cp->link_cntl = BMCR_ANENABLE; in cas_begin_auto_negotiation()
687 cp->link_cntl = 0; in cas_begin_auto_negotiation()
689 cp->link_cntl |= BMCR_SPEED100; in cas_begin_auto_negotiation()
691 cp->link_cntl |= CAS_BMCR_SPEED1000; in cas_begin_auto_negotiation()
693 cp->link_cntl |= BMCR_FULLDPLX; in cas_begin_auto_negotiation()
696 changed = (lcntl != cp->link_cntl); in cas_begin_auto_negotiation()
699 if (cp->lstate == link_up) { in cas_begin_auto_negotiation()
700 netdev_info(cp->dev, "PCS link down\n"); in cas_begin_auto_negotiation()
703 netdev_info(cp->dev, "link configuration changed\n"); in cas_begin_auto_negotiation()
706 cp->lstate = link_down; in cas_begin_auto_negotiation()
707 cp->link_transition = LINK_TRANSITION_LINK_DOWN; in cas_begin_auto_negotiation()
708 if (!cp->hw_running) in cas_begin_auto_negotiation()
717 netif_carrier_off(cp->dev); in cas_begin_auto_negotiation()
724 atomic_inc(&cp->reset_task_pending); in cas_begin_auto_negotiation()
725 atomic_inc(&cp->reset_task_pending_all); in cas_begin_auto_negotiation()
726 schedule_work(&cp->reset_task); in cas_begin_auto_negotiation()
727 cp->timer_ticks = 0; in cas_begin_auto_negotiation()
728 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT); in cas_begin_auto_negotiation()
732 if (cp->phy_type & CAS_PHY_SERDES) { in cas_begin_auto_negotiation()
733 u32 val = readl(cp->regs + REG_PCS_MII_CTRL); in cas_begin_auto_negotiation()
735 if (cp->link_cntl & BMCR_ANENABLE) { in cas_begin_auto_negotiation()
737 cp->lstate = link_aneg; in cas_begin_auto_negotiation()
739 if (cp->link_cntl & BMCR_FULLDPLX) in cas_begin_auto_negotiation()
742 cp->lstate = link_force_ok; in cas_begin_auto_negotiation()
744 cp->link_transition = LINK_TRANSITION_LINK_CONFIG; in cas_begin_auto_negotiation()
745 writel(val, cp->regs + REG_PCS_MII_CTRL); in cas_begin_auto_negotiation()
748 cas_mif_poll(cp, 0); in cas_begin_auto_negotiation()
749 ctl = cas_phy_read(cp, MII_BMCR); in cas_begin_auto_negotiation()
752 ctl |= cp->link_cntl; in cas_begin_auto_negotiation()
755 cp->lstate = link_aneg; in cas_begin_auto_negotiation()
757 cp->lstate = link_force_ok; in cas_begin_auto_negotiation()
759 cp->link_transition = LINK_TRANSITION_LINK_CONFIG; in cas_begin_auto_negotiation()
760 cas_phy_write(cp, MII_BMCR, ctl); in cas_begin_auto_negotiation()
761 cas_mif_poll(cp, 1); in cas_begin_auto_negotiation()
764 cp->timer_ticks = 0; in cas_begin_auto_negotiation()
765 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT); in cas_begin_auto_negotiation()
768 /* Must be invoked under cp->lock. */
769 static int cas_reset_mii_phy(struct cas *cp) in cas_reset_mii_phy() argument
774 cas_phy_write(cp, MII_BMCR, BMCR_RESET); in cas_reset_mii_phy()
777 val = cas_phy_read(cp, MII_BMCR); in cas_reset_mii_phy()
785 static void cas_saturn_firmware_init(struct cas *cp) in cas_saturn_firmware_init() argument
791 if (PHY_NS_DP83065 != cp->phy_id) in cas_saturn_firmware_init()
794 err = request_firmware(&fw, fw_name, &cp->pdev->dev); in cas_saturn_firmware_init()
805 cp->fw_load_addr= fw->data[1] << 8 | fw->data[0]; in cas_saturn_firmware_init()
806 cp->fw_size = fw->size - 2; in cas_saturn_firmware_init()
807 cp->fw_data = vmalloc(cp->fw_size); in cas_saturn_firmware_init()
808 if (!cp->fw_data) in cas_saturn_firmware_init()
810 memcpy(cp->fw_data, &fw->data[2], cp->fw_size); in cas_saturn_firmware_init()
815 static void cas_saturn_firmware_load(struct cas *cp) in cas_saturn_firmware_load() argument
819 if (!cp->fw_data) in cas_saturn_firmware_load()
822 cas_phy_powerdown(cp); in cas_saturn_firmware_load()
825 cas_phy_write(cp, DP83065_MII_MEM, 0x0); in cas_saturn_firmware_load()
828 cas_phy_write(cp, DP83065_MII_REGE, 0x8ff9); in cas_saturn_firmware_load()
829 cas_phy_write(cp, DP83065_MII_REGD, 0xbd); in cas_saturn_firmware_load()
830 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffa); in cas_saturn_firmware_load()
831 cas_phy_write(cp, DP83065_MII_REGD, 0x82); in cas_saturn_firmware_load()
832 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffb); in cas_saturn_firmware_load()
833 cas_phy_write(cp, DP83065_MII_REGD, 0x0); in cas_saturn_firmware_load()
834 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffc); in cas_saturn_firmware_load()
835 cas_phy_write(cp, DP83065_MII_REGD, 0x39); in cas_saturn_firmware_load()
838 cas_phy_write(cp, DP83065_MII_MEM, 0x1); in cas_saturn_firmware_load()
839 cas_phy_write(cp, DP83065_MII_REGE, cp->fw_load_addr); in cas_saturn_firmware_load()
840 for (i = 0; i < cp->fw_size; i++) in cas_saturn_firmware_load()
841 cas_phy_write(cp, DP83065_MII_REGD, cp->fw_data[i]); in cas_saturn_firmware_load()
844 cas_phy_write(cp, DP83065_MII_REGE, 0x8ff8); in cas_saturn_firmware_load()
845 cas_phy_write(cp, DP83065_MII_REGD, 0x1); in cas_saturn_firmware_load()
850 static void cas_phy_init(struct cas *cp) in cas_phy_init() argument
855 if (CAS_PHY_MII(cp->phy_type)) { in cas_phy_init()
857 cp->regs + REG_PCS_DATAPATH_MODE); in cas_phy_init()
859 cas_mif_poll(cp, 0); in cas_phy_init()
860 cas_reset_mii_phy(cp); /* take out of isolate mode */ in cas_phy_init()
862 if (PHY_LUCENT_B0 == cp->phy_id) { in cas_phy_init()
864 cas_phy_write(cp, LUCENT_MII_REG, 0x8000); in cas_phy_init()
865 cas_phy_write(cp, MII_BMCR, 0x00f1); in cas_phy_init()
866 cas_phy_write(cp, LUCENT_MII_REG, 0x0); in cas_phy_init()
868 } else if (PHY_BROADCOM_B0 == (cp->phy_id & 0xFFFFFFFC)) { in cas_phy_init()
870 cas_phy_write(cp, BROADCOM_MII_REG8, 0x0C20); in cas_phy_init()
871 cas_phy_write(cp, BROADCOM_MII_REG7, 0x0012); in cas_phy_init()
872 cas_phy_write(cp, BROADCOM_MII_REG5, 0x1804); in cas_phy_init()
873 cas_phy_write(cp, BROADCOM_MII_REG7, 0x0013); in cas_phy_init()
874 cas_phy_write(cp, BROADCOM_MII_REG5, 0x1204); in cas_phy_init()
875 cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006); in cas_phy_init()
876 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0132); in cas_phy_init()
877 cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006); in cas_phy_init()
878 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0232); in cas_phy_init()
879 cas_phy_write(cp, BROADCOM_MII_REG7, 0x201F); in cas_phy_init()
880 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0A20); in cas_phy_init()
882 } else if (PHY_BROADCOM_5411 == cp->phy_id) { in cas_phy_init()
883 val = cas_phy_read(cp, BROADCOM_MII_REG4); in cas_phy_init()
884 val = cas_phy_read(cp, BROADCOM_MII_REG4); in cas_phy_init()
887 cas_phy_write(cp, BROADCOM_MII_REG4, in cas_phy_init()
891 } else if (cp->cas_flags & CAS_FLAG_SATURN) { in cas_phy_init()
892 writel((cp->phy_type & CAS_PHY_MII_MDIO0) ? in cas_phy_init()
894 cp->regs + REG_SATURN_PCFG); in cas_phy_init()
900 if (PHY_NS_DP83065 == cp->phy_id) { in cas_phy_init()
901 cas_saturn_firmware_load(cp); in cas_phy_init()
903 cas_phy_powerup(cp); in cas_phy_init()
907 val = cas_phy_read(cp, MII_BMCR); in cas_phy_init()
909 cas_phy_write(cp, MII_BMCR, val); in cas_phy_init()
912 cas_phy_write(cp, MII_ADVERTISE, in cas_phy_init()
913 cas_phy_read(cp, MII_ADVERTISE) | in cas_phy_init()
919 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) { in cas_phy_init()
923 val = cas_phy_read(cp, CAS_MII_1000_CTRL); in cas_phy_init()
926 cas_phy_write(cp, CAS_MII_1000_CTRL, val); in cas_phy_init()
935 cp->regs + REG_PCS_DATAPATH_MODE); in cas_phy_init()
938 if (cp->cas_flags & CAS_FLAG_SATURN) in cas_phy_init()
939 writel(0, cp->regs + REG_SATURN_PCFG); in cas_phy_init()
942 val = readl(cp->regs + REG_PCS_MII_CTRL); in cas_phy_init()
944 writel(val, cp->regs + REG_PCS_MII_CTRL); in cas_phy_init()
949 if ((readl(cp->regs + REG_PCS_MII_CTRL) & in cas_phy_init()
954 netdev_warn(cp->dev, "PCS reset bit would not clear [%08x]\n", in cas_phy_init()
955 readl(cp->regs + REG_PCS_STATE_MACHINE)); in cas_phy_init()
960 writel(0x0, cp->regs + REG_PCS_CFG); in cas_phy_init()
963 val = readl(cp->regs + REG_PCS_MII_ADVERT); in cas_phy_init()
967 writel(val, cp->regs + REG_PCS_MII_ADVERT); in cas_phy_init()
970 writel(PCS_CFG_EN, cp->regs + REG_PCS_CFG); in cas_phy_init()
974 cp->regs + REG_PCS_SERDES_CTRL); in cas_phy_init()
979 static int cas_pcs_link_check(struct cas *cp) in cas_pcs_link_check() argument
988 stat = readl(cp->regs + REG_PCS_MII_STATUS); in cas_pcs_link_check()
990 stat = readl(cp->regs + REG_PCS_MII_STATUS); in cas_pcs_link_check()
998 netif_info(cp, link, cp->dev, "PCS RemoteFault\n"); in cas_pcs_link_check()
1003 state_machine = readl(cp->regs + REG_PCS_STATE_MACHINE); in cas_pcs_link_check()
1011 if (cp->lstate != link_up) { in cas_pcs_link_check()
1012 if (cp->opened) { in cas_pcs_link_check()
1013 cp->lstate = link_up; in cas_pcs_link_check()
1014 cp->link_transition = LINK_TRANSITION_LINK_UP; in cas_pcs_link_check()
1016 cas_set_link_modes(cp); in cas_pcs_link_check()
1017 netif_carrier_on(cp->dev); in cas_pcs_link_check()
1020 } else if (cp->lstate == link_up) { in cas_pcs_link_check()
1021 cp->lstate = link_down; in cas_pcs_link_check()
1023 cp->link_transition != LINK_TRANSITION_REQUESTED_RESET && in cas_pcs_link_check()
1024 !cp->link_transition_jiffies_valid) { in cas_pcs_link_check()
1038 cp->link_transition = LINK_TRANSITION_REQUESTED_RESET; in cas_pcs_link_check()
1039 cp->link_transition_jiffies = jiffies; in cas_pcs_link_check()
1040 cp->link_transition_jiffies_valid = 1; in cas_pcs_link_check()
1042 cp->link_transition = LINK_TRANSITION_ON_FAILURE; in cas_pcs_link_check()
1044 netif_carrier_off(cp->dev); in cas_pcs_link_check()
1045 if (cp->opened) in cas_pcs_link_check()
1046 netif_info(cp, link, cp->dev, "PCS link down\n"); in cas_pcs_link_check()
1056 if ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0) { in cas_pcs_link_check()
1058 stat = readl(cp->regs + REG_PCS_SERDES_STATE); in cas_pcs_link_check()
1062 } else if (cp->lstate == link_down) { in cas_pcs_link_check()
1064 cp->link_transition != LINK_TRANSITION_REQUESTED_RESET && in cas_pcs_link_check()
1065 !cp->link_transition_jiffies_valid) { in cas_pcs_link_check()
1072 cp->link_transition = LINK_TRANSITION_REQUESTED_RESET; in cas_pcs_link_check()
1073 cp->link_transition_jiffies = jiffies; in cas_pcs_link_check()
1074 cp->link_transition_jiffies_valid = 1; in cas_pcs_link_check()
1076 cp->link_transition = LINK_TRANSITION_STILL_FAILED; in cas_pcs_link_check()
1084 struct cas *cp, u32 status) in cas_pcs_interrupt() argument
1086 u32 stat = readl(cp->regs + REG_PCS_INTR_STATUS); in cas_pcs_interrupt()
1090 return cas_pcs_link_check(cp); in cas_pcs_interrupt()
1094 struct cas *cp, u32 status) in cas_txmac_interrupt() argument
1096 u32 txmac_stat = readl(cp->regs + REG_MAC_TX_STATUS); in cas_txmac_interrupt()
1101 netif_printk(cp, intr, KERN_DEBUG, cp->dev, in cas_txmac_interrupt()
1111 spin_lock(&cp->stat_lock[0]); in cas_txmac_interrupt()
1114 cp->net_stats[0].tx_fifo_errors++; in cas_txmac_interrupt()
1119 cp->net_stats[0].tx_errors++; in cas_txmac_interrupt()
1126 cp->net_stats[0].collisions += 0x10000; in cas_txmac_interrupt()
1129 cp->net_stats[0].tx_aborted_errors += 0x10000; in cas_txmac_interrupt()
1130 cp->net_stats[0].collisions += 0x10000; in cas_txmac_interrupt()
1134 cp->net_stats[0].tx_aborted_errors += 0x10000; in cas_txmac_interrupt()
1135 cp->net_stats[0].collisions += 0x10000; in cas_txmac_interrupt()
1137 spin_unlock(&cp->stat_lock[0]); in cas_txmac_interrupt()
1145 static void cas_load_firmware(struct cas *cp, cas_hp_inst_t *firmware) in cas_load_firmware() argument
1153 writel(i, cp->regs + REG_HP_INSTR_RAM_ADDR); in cas_load_firmware()
1157 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_HI); in cas_load_firmware()
1166 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_MID); in cas_load_firmware()
1172 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_LOW); in cas_load_firmware()
1178 static void cas_init_rx_dma(struct cas *cp) in cas_init_rx_dma() argument
1180 u64 desc_dma = cp->block_dvma; in cas_init_rx_dma()
1189 (cp->cas_flags & CAS_FLAG_REG_PLUS)) /* do desc 2 */ in cas_init_rx_dma()
1191 writel(val, cp->regs + REG_RX_CFG); in cas_init_rx_dma()
1193 val = (unsigned long) cp->init_rxds[0] - in cas_init_rx_dma()
1194 (unsigned long) cp->init_block; in cas_init_rx_dma()
1195 writel((desc_dma + val) >> 32, cp->regs + REG_RX_DB_HI); in cas_init_rx_dma()
1196 writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_DB_LOW); in cas_init_rx_dma()
1197 writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK); in cas_init_rx_dma()
1199 if (cp->cas_flags & CAS_FLAG_REG_PLUS) { in cas_init_rx_dma()
1203 val = (unsigned long) cp->init_rxds[1] - in cas_init_rx_dma()
1204 (unsigned long) cp->init_block; in cas_init_rx_dma()
1205 writel((desc_dma + val) >> 32, cp->regs + REG_PLUS_RX_DB1_HI); in cas_init_rx_dma()
1206 writel((desc_dma + val) & 0xffffffff, cp->regs + in cas_init_rx_dma()
1208 writel(RX_DESC_RINGN_SIZE(1) - 4, cp->regs + in cas_init_rx_dma()
1213 val = (unsigned long) cp->init_rxcs[0] - in cas_init_rx_dma()
1214 (unsigned long) cp->init_block; in cas_init_rx_dma()
1215 writel((desc_dma + val) >> 32, cp->regs + REG_RX_CB_HI); in cas_init_rx_dma()
1216 writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_CB_LOW); in cas_init_rx_dma()
1218 if (cp->cas_flags & CAS_FLAG_REG_PLUS) { in cas_init_rx_dma()
1221 val = (unsigned long) cp->init_rxcs[i] - in cas_init_rx_dma()
1222 (unsigned long) cp->init_block; in cas_init_rx_dma()
1223 writel((desc_dma + val) >> 32, cp->regs + in cas_init_rx_dma()
1225 writel((desc_dma + val) & 0xffffffff, cp->regs + in cas_init_rx_dma()
1234 readl(cp->regs + REG_INTR_STATUS_ALIAS); in cas_init_rx_dma()
1235 writel(INTR_RX_DONE | INTR_RX_BUF_UNAVAIL, cp->regs + REG_ALIAS_CLEAR); in cas_init_rx_dma()
1239 cp->rx_pause_off / RX_PAUSE_THRESH_QUANTUM); in cas_init_rx_dma()
1241 cp->rx_pause_on / RX_PAUSE_THRESH_QUANTUM); in cas_init_rx_dma()
1242 writel(val, cp->regs + REG_RX_PAUSE_THRESH); in cas_init_rx_dma()
1246 writel(i, cp->regs + REG_RX_TABLE_ADDR); in cas_init_rx_dma()
1247 writel(0x0, cp->regs + REG_RX_TABLE_DATA_LOW); in cas_init_rx_dma()
1248 writel(0x0, cp->regs + REG_RX_TABLE_DATA_MID); in cas_init_rx_dma()
1249 writel(0x0, cp->regs + REG_RX_TABLE_DATA_HI); in cas_init_rx_dma()
1253 writel(0x0, cp->regs + REG_RX_CTRL_FIFO_ADDR); in cas_init_rx_dma()
1254 writel(0x0, cp->regs + REG_RX_IPP_FIFO_ADDR); in cas_init_rx_dma()
1260 writel(val, cp->regs + REG_RX_BLANK); in cas_init_rx_dma()
1262 writel(0x0, cp->regs + REG_RX_BLANK); in cas_init_rx_dma()
1272 writel(val, cp->regs + REG_RX_AE_THRESH); in cas_init_rx_dma()
1273 if (cp->cas_flags & CAS_FLAG_REG_PLUS) { in cas_init_rx_dma()
1275 writel(val, cp->regs + REG_PLUS_RX_AE1_THRESH); in cas_init_rx_dma()
1281 writel(0x0, cp->regs + REG_RX_RED); in cas_init_rx_dma()
1285 if (cp->page_size == 0x1000) in cas_init_rx_dma()
1287 else if (cp->page_size == 0x2000) in cas_init_rx_dma()
1289 else if (cp->page_size == 0x4000) in cas_init_rx_dma()
1293 size = cp->dev->mtu + 64; in cas_init_rx_dma()
1294 if (size > cp->page_size) in cas_init_rx_dma()
1295 size = cp->page_size; in cas_init_rx_dma()
1306 cp->mtu_stride = 1 << (i + 10); in cas_init_rx_dma()
1309 val |= CAS_BASE(RX_PAGE_SIZE_MTU_COUNT, cp->page_size >> (i + 10)); in cas_init_rx_dma()
1311 writel(val, cp->regs + REG_RX_PAGE_SIZE); in cas_init_rx_dma()
1320 writel(val, cp->regs + REG_HP_CFG); in cas_init_rx_dma()
1333 static inline cas_page_t *cas_page_spare(struct cas *cp, const int index) in cas_page_spare() argument
1335 cas_page_t *page = cp->rx_pages[1][index]; in cas_page_spare()
1341 new = cas_page_dequeue(cp); in cas_page_spare()
1343 spin_lock(&cp->rx_inuse_lock); in cas_page_spare()
1344 list_add(&page->list, &cp->rx_inuse_list); in cas_page_spare()
1345 spin_unlock(&cp->rx_inuse_lock); in cas_page_spare()
1351 static cas_page_t *cas_page_swap(struct cas *cp, const int ring, in cas_page_swap() argument
1354 cas_page_t **page0 = cp->rx_pages[0]; in cas_page_swap()
1355 cas_page_t **page1 = cp->rx_pages[1]; in cas_page_swap()
1359 cas_page_t *new = cas_page_spare(cp, index); in cas_page_swap()
1369 static void cas_clean_rxds(struct cas *cp) in cas_clean_rxds() argument
1372 struct cas_rx_desc *rxd = cp->init_rxds[0]; in cas_clean_rxds()
1378 while ((skb = __skb_dequeue(&cp->rx_flows[i]))) { in cas_clean_rxds()
1386 cas_page_t *page = cas_page_swap(cp, 0, i); in cas_clean_rxds()
1392 cp->rx_old[0] = RX_DESC_RINGN_SIZE(0) - 4; in cas_clean_rxds()
1393 cp->rx_last[0] = 0; in cas_clean_rxds()
1394 cp->cas_flags &= ~CAS_FLAG_RXD_POST(0); in cas_clean_rxds()
1397 static void cas_clean_rxcs(struct cas *cp) in cas_clean_rxcs() argument
1402 memset(cp->rx_cur, 0, sizeof(*cp->rx_cur)*N_RX_COMP_RINGS); in cas_clean_rxcs()
1403 memset(cp->rx_new, 0, sizeof(*cp->rx_new)*N_RX_COMP_RINGS); in cas_clean_rxcs()
1405 struct cas_rx_comp *rxc = cp->init_rxcs[i]; in cas_clean_rxcs()
1419 static int cas_rxmac_reset(struct cas *cp)
1421 struct net_device *dev = cp->dev;
1426 writel(cp->mac_rx_cfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
1428 if (!(readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN))
1438 writel(0, cp->regs + REG_RX_CFG);
1440 if (!(readl(cp->regs + REG_RX_CFG) & RX_CFG_DMA_EN))
1452 writel(SW_RESET_RX, cp->regs + REG_SW_RESET);
1454 if (!(readl(cp->regs + REG_SW_RESET) & SW_RESET_RX))
1464 cas_clean_rxds(cp);
1465 cas_clean_rxcs(cp);
1468 cas_init_rx_dma(cp);
1471 val = readl(cp->regs + REG_RX_CFG);
1472 writel(val | RX_CFG_DMA_EN, cp->regs + REG_RX_CFG);
1473 writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
1474 val = readl(cp->regs + REG_MAC_RX_CFG);
1475 writel(val | MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
1480 static int cas_rxmac_interrupt(struct net_device *dev, struct cas *cp, in cas_rxmac_interrupt() argument
1483 u32 stat = readl(cp->regs + REG_MAC_RX_STATUS); in cas_rxmac_interrupt()
1488 netif_dbg(cp, intr, cp->dev, "rxmac interrupt, stat: 0x%x\n", stat); in cas_rxmac_interrupt()
1491 spin_lock(&cp->stat_lock[0]); in cas_rxmac_interrupt()
1493 cp->net_stats[0].rx_frame_errors += 0x10000; in cas_rxmac_interrupt()
1496 cp->net_stats[0].rx_crc_errors += 0x10000; in cas_rxmac_interrupt()
1499 cp->net_stats[0].rx_length_errors += 0x10000; in cas_rxmac_interrupt()
1502 cp->net_stats[0].rx_over_errors++; in cas_rxmac_interrupt()
1503 cp->net_stats[0].rx_fifo_errors++; in cas_rxmac_interrupt()
1509 spin_unlock(&cp->stat_lock[0]); in cas_rxmac_interrupt()
1513 static int cas_mac_interrupt(struct net_device *dev, struct cas *cp, in cas_mac_interrupt() argument
1516 u32 stat = readl(cp->regs + REG_MAC_CTRL_STATUS); in cas_mac_interrupt()
1521 netif_printk(cp, intr, KERN_DEBUG, cp->dev, in cas_mac_interrupt()
1529 cp->pause_entered++; in cas_mac_interrupt()
1532 cp->pause_last_time_recvd = (stat >> 16); in cas_mac_interrupt()
1538 /* Must be invoked under cp->lock. */
1539 static inline int cas_mdio_link_not_up(struct cas *cp) in cas_mdio_link_not_up() argument
1543 switch (cp->lstate) { in cas_mdio_link_not_up()
1545 netif_info(cp, link, cp->dev, "Autoneg failed again, keeping forced mode\n"); in cas_mdio_link_not_up()
1546 cas_phy_write(cp, MII_BMCR, cp->link_fcntl); in cas_mdio_link_not_up()
1547 cp->timer_ticks = 5; in cas_mdio_link_not_up()
1548 cp->lstate = link_force_ok; in cas_mdio_link_not_up()
1549 cp->link_transition = LINK_TRANSITION_LINK_CONFIG; in cas_mdio_link_not_up()
1553 val = cas_phy_read(cp, MII_BMCR); in cas_mdio_link_not_up()
1560 val |= (cp->cas_flags & CAS_FLAG_1000MB_CAP) ? in cas_mdio_link_not_up()
1562 cas_phy_write(cp, MII_BMCR, val); in cas_mdio_link_not_up()
1563 cp->timer_ticks = 5; in cas_mdio_link_not_up()
1564 cp->lstate = link_force_try; in cas_mdio_link_not_up()
1565 cp->link_transition = LINK_TRANSITION_LINK_CONFIG; in cas_mdio_link_not_up()
1570 val = cas_phy_read(cp, MII_BMCR); in cas_mdio_link_not_up()
1571 cp->timer_ticks = 5; in cas_mdio_link_not_up()
1575 cas_phy_write(cp, MII_BMCR, val); in cas_mdio_link_not_up()
1585 cas_phy_write(cp, MII_BMCR, val); in cas_mdio_link_not_up()
1596 /* must be invoked with cp->lock held */
1597 static int cas_mii_link_check(struct cas *cp, const u16 bmsr) in cas_mii_link_check() argument
1607 if ((cp->lstate == link_force_try) && in cas_mii_link_check()
1608 (cp->link_cntl & BMCR_ANENABLE)) { in cas_mii_link_check()
1609 cp->lstate = link_force_ret; in cas_mii_link_check()
1610 cp->link_transition = LINK_TRANSITION_LINK_CONFIG; in cas_mii_link_check()
1611 cas_mif_poll(cp, 0); in cas_mii_link_check()
1612 cp->link_fcntl = cas_phy_read(cp, MII_BMCR); in cas_mii_link_check()
1613 cp->timer_ticks = 5; in cas_mii_link_check()
1614 if (cp->opened) in cas_mii_link_check()
1615 netif_info(cp, link, cp->dev, in cas_mii_link_check()
1617 cas_phy_write(cp, MII_BMCR, in cas_mii_link_check()
1618 cp->link_fcntl | BMCR_ANENABLE | in cas_mii_link_check()
1620 cas_mif_poll(cp, 1); in cas_mii_link_check()
1622 } else if (cp->lstate != link_up) { in cas_mii_link_check()
1623 cp->lstate = link_up; in cas_mii_link_check()
1624 cp->link_transition = LINK_TRANSITION_LINK_UP; in cas_mii_link_check()
1626 if (cp->opened) { in cas_mii_link_check()
1627 cas_set_link_modes(cp); in cas_mii_link_check()
1628 netif_carrier_on(cp->dev); in cas_mii_link_check()
1638 if (cp->lstate == link_up) { in cas_mii_link_check()
1639 cp->lstate = link_down; in cas_mii_link_check()
1640 cp->link_transition = LINK_TRANSITION_LINK_DOWN; in cas_mii_link_check()
1642 netif_carrier_off(cp->dev); in cas_mii_link_check()
1643 if (cp->opened) in cas_mii_link_check()
1644 netif_info(cp, link, cp->dev, "Link down\n"); in cas_mii_link_check()
1647 } else if (++cp->timer_ticks > 10) in cas_mii_link_check()
1648 cas_mdio_link_not_up(cp); in cas_mii_link_check()
1653 static int cas_mif_interrupt(struct net_device *dev, struct cas *cp, in cas_mif_interrupt() argument
1656 u32 stat = readl(cp->regs + REG_MIF_STATUS); in cas_mif_interrupt()
1664 return cas_mii_link_check(cp, bmsr); in cas_mif_interrupt()
1667 static int cas_pci_interrupt(struct net_device *dev, struct cas *cp, in cas_pci_interrupt() argument
1670 u32 stat = readl(cp->regs + REG_PCI_ERR_STATUS); in cas_pci_interrupt()
1676 stat, readl(cp->regs + REG_BIM_DIAG)); in cas_pci_interrupt()
1680 ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0)) in cas_pci_interrupt()
1699 pci_errs = pci_status_get_and_clear_errors(cp->pdev); in cas_pci_interrupt()
1725 static int cas_abnormal_irq(struct net_device *dev, struct cas *cp, in cas_abnormal_irq() argument
1730 netif_printk(cp, rx_err, KERN_DEBUG, cp->dev, in cas_abnormal_irq()
1732 spin_lock(&cp->stat_lock[0]); in cas_abnormal_irq()
1733 cp->net_stats[0].rx_errors++; in cas_abnormal_irq()
1734 spin_unlock(&cp->stat_lock[0]); in cas_abnormal_irq()
1740 netif_printk(cp, rx_err, KERN_DEBUG, cp->dev, in cas_abnormal_irq()
1742 spin_lock(&cp->stat_lock[0]); in cas_abnormal_irq()
1743 cp->net_stats[0].rx_errors++; in cas_abnormal_irq()
1744 spin_unlock(&cp->stat_lock[0]); in cas_abnormal_irq()
1749 if (cas_pcs_interrupt(dev, cp, status)) in cas_abnormal_irq()
1754 if (cas_txmac_interrupt(dev, cp, status)) in cas_abnormal_irq()
1759 if (cas_rxmac_interrupt(dev, cp, status)) in cas_abnormal_irq()
1764 if (cas_mac_interrupt(dev, cp, status)) in cas_abnormal_irq()
1769 if (cas_mif_interrupt(dev, cp, status)) in cas_abnormal_irq()
1774 if (cas_pci_interrupt(dev, cp, status)) in cas_abnormal_irq()
1781 atomic_inc(&cp->reset_task_pending); in cas_abnormal_irq()
1782 atomic_inc(&cp->reset_task_pending_all); in cas_abnormal_irq()
1784 schedule_work(&cp->reset_task); in cas_abnormal_irq()
1786 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL); in cas_abnormal_irq()
1788 schedule_work(&cp->reset_task); in cas_abnormal_irq()
1798 static inline int cas_calc_tabort(struct cas *cp, const unsigned long addr, in cas_calc_tabort() argument
1803 if (CAS_TABORT(cp) == 1) in cas_calc_tabort()
1810 static inline void cas_tx_ringN(struct cas *cp, int ring, int limit) in cas_tx_ringN() argument
1814 struct net_device *dev = cp->dev; in cas_tx_ringN()
1817 spin_lock(&cp->tx_lock[ring]); in cas_tx_ringN()
1818 txds = cp->init_txds[ring]; in cas_tx_ringN()
1819 skbs = cp->tx_skbs[ring]; in cas_tx_ringN()
1820 entry = cp->tx_old[ring]; in cas_tx_ringN()
1837 + cp->tx_tiny_use[ring][entry].nbufs + 1; in cas_tx_ringN()
1841 netif_printk(cp, tx_done, KERN_DEBUG, cp->dev, in cas_tx_ringN()
1845 cp->tx_tiny_use[ring][entry].nbufs = 0; in cas_tx_ringN()
1853 dma_unmap_page(&cp->pdev->dev, daddr, dlen, in cas_tx_ringN()
1858 if (cp->tx_tiny_use[ring][entry].used) { in cas_tx_ringN()
1859 cp->tx_tiny_use[ring][entry].used = 0; in cas_tx_ringN()
1864 spin_lock(&cp->stat_lock[ring]); in cas_tx_ringN()
1865 cp->net_stats[ring].tx_packets++; in cas_tx_ringN()
1866 cp->net_stats[ring].tx_bytes += skb->len; in cas_tx_ringN()
1867 spin_unlock(&cp->stat_lock[ring]); in cas_tx_ringN()
1870 cp->tx_old[ring] = entry; in cas_tx_ringN()
1877 (TX_BUFFS_AVAIL(cp, ring) > CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1))) in cas_tx_ringN()
1879 spin_unlock(&cp->tx_lock[ring]); in cas_tx_ringN()
1882 static void cas_tx(struct net_device *dev, struct cas *cp, in cas_tx() argument
1887 u64 compwb = le64_to_cpu(cp->init_block->tx_compwb); in cas_tx()
1889 netif_printk(cp, intr, KERN_DEBUG, cp->dev, in cas_tx()
1900 limit = readl(cp->regs + REG_TX_COMPN(ring)); in cas_tx()
1902 if (cp->tx_old[ring] != limit) in cas_tx()
1903 cas_tx_ringN(cp, ring, limit); in cas_tx()
1908 static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc, in cas_rx_process_pkt() argument
1929 skb = netdev_alloc_skb(cp->dev, alloclen + swivel + cp->crc_size); in cas_rx_process_pkt()
1940 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; in cas_rx_process_pkt()
1946 i += cp->crc_size; in cas_rx_process_pkt()
1947 dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off, in cas_rx_process_pkt()
1950 dma_sync_single_for_device(&cp->pdev->dev, in cas_rx_process_pkt()
1964 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; in cas_rx_process_pkt()
1967 hlen = min(cp->page_size - off, dlen); in cas_rx_process_pkt()
1969 netif_printk(cp, rx_err, KERN_DEBUG, cp->dev, in cas_rx_process_pkt()
1976 i += cp->crc_size; in cas_rx_process_pkt()
1977 dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off, in cas_rx_process_pkt()
1985 dma_sync_single_for_device(&cp->pdev->dev, in cas_rx_process_pkt()
1990 RX_USED_ADD(page, cp->mtu_stride); in cas_rx_process_pkt()
2010 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; in cas_rx_process_pkt()
2011 dma_sync_single_for_cpu(&cp->pdev->dev, in cas_rx_process_pkt()
2013 hlen + cp->crc_size, in cas_rx_process_pkt()
2015 dma_sync_single_for_device(&cp->pdev->dev, in cas_rx_process_pkt()
2017 hlen + cp->crc_size, in cas_rx_process_pkt()
2027 RX_USED_ADD(page, hlen + cp->crc_size); in cas_rx_process_pkt()
2030 if (cp->crc_size) in cas_rx_process_pkt()
2039 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; in cas_rx_process_pkt()
2041 hlen = min(cp->page_size - off, dlen); in cas_rx_process_pkt()
2043 netif_printk(cp, rx_err, KERN_DEBUG, cp->dev, in cas_rx_process_pkt()
2050 i += cp->crc_size; in cas_rx_process_pkt()
2051 dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off, in cas_rx_process_pkt()
2054 dma_sync_single_for_device(&cp->pdev->dev, in cas_rx_process_pkt()
2058 RX_USED_ADD(page, cp->mtu_stride); in cas_rx_process_pkt()
2066 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; in cas_rx_process_pkt()
2067 dma_sync_single_for_cpu(&cp->pdev->dev, in cas_rx_process_pkt()
2069 dlen + cp->crc_size, in cas_rx_process_pkt()
2071 memcpy(p, page_address(page->buffer), dlen + cp->crc_size); in cas_rx_process_pkt()
2072 dma_sync_single_for_device(&cp->pdev->dev, in cas_rx_process_pkt()
2074 dlen + cp->crc_size, in cas_rx_process_pkt()
2076 RX_USED_ADD(page, dlen + cp->crc_size); in cas_rx_process_pkt()
2079 if (cp->crc_size) in cas_rx_process_pkt()
2086 if (cp->crc_size) { in cas_rx_process_pkt()
2088 csum = csum_fold(csum_partial(crcaddr, cp->crc_size, in cas_rx_process_pkt()
2091 skb->protocol = eth_type_trans(skb, cp->dev); in cas_rx_process_pkt()
2115 static inline void cas_rx_flow_pkt(struct cas *cp, const u64 *words, in cas_rx_flow_pkt() argument
2119 struct sk_buff_head *flow = &cp->rx_flows[flowid]; in cas_rx_flow_pkt()
2136 static void cas_post_page(struct cas *cp, const int ring, const int index) in cas_post_page() argument
2141 entry = cp->rx_old[ring]; in cas_post_page()
2143 new = cas_page_swap(cp, ring, index); in cas_post_page()
2144 cp->init_rxds[ring][entry].buffer = cpu_to_le64(new->dma_addr); in cas_post_page()
2145 cp->init_rxds[ring][entry].index = in cas_post_page()
2150 cp->rx_old[ring] = entry; in cas_post_page()
2156 writel(entry, cp->regs + REG_RX_KICK); in cas_post_page()
2158 (cp->cas_flags & CAS_FLAG_REG_PLUS)) in cas_post_page()
2159 writel(entry, cp->regs + REG_PLUS_RX_KICK1); in cas_post_page()
2164 static int cas_post_rxds_ringN(struct cas *cp, int ring, int num) in cas_post_rxds_ringN() argument
2168 cas_page_t **page = cp->rx_pages[ring]; in cas_post_rxds_ringN()
2170 entry = cp->rx_old[ring]; in cas_post_rxds_ringN()
2172 netif_printk(cp, intr, KERN_DEBUG, cp->dev, in cas_post_rxds_ringN()
2182 cas_page_t *new = cas_page_dequeue(cp); in cas_post_rxds_ringN()
2187 cp->cas_flags |= CAS_FLAG_RXD_POST(ring); in cas_post_rxds_ringN()
2188 if (!timer_pending(&cp->link_timer)) in cas_post_rxds_ringN()
2189 mod_timer(&cp->link_timer, jiffies + in cas_post_rxds_ringN()
2191 cp->rx_old[ring] = entry; in cas_post_rxds_ringN()
2192 cp->rx_last[ring] = num ? num - released : 0; in cas_post_rxds_ringN()
2195 spin_lock(&cp->rx_inuse_lock); in cas_post_rxds_ringN()
2196 list_add(&page[entry]->list, &cp->rx_inuse_list); in cas_post_rxds_ringN()
2197 spin_unlock(&cp->rx_inuse_lock); in cas_post_rxds_ringN()
2198 cp->init_rxds[ring][entry].buffer = in cas_post_rxds_ringN()
2211 cp->rx_old[ring] = entry; in cas_post_rxds_ringN()
2217 writel(cluster, cp->regs + REG_RX_KICK); in cas_post_rxds_ringN()
2219 (cp->cas_flags & CAS_FLAG_REG_PLUS)) in cas_post_rxds_ringN()
2220 writel(cluster, cp->regs + REG_PLUS_RX_KICK1); in cas_post_rxds_ringN()
2237 static int cas_rx_ringN(struct cas *cp, int ring, int budget) in cas_rx_ringN() argument
2239 struct cas_rx_comp *rxcs = cp->init_rxcs[ring]; in cas_rx_ringN()
2243 netif_printk(cp, intr, KERN_DEBUG, cp->dev, in cas_rx_ringN()
2246 readl(cp->regs + REG_RX_COMP_HEAD), cp->rx_new[ring]); in cas_rx_ringN()
2248 entry = cp->rx_new[ring]; in cas_rx_ringN()
2274 spin_lock(&cp->stat_lock[ring]); in cas_rx_ringN()
2275 cp->net_stats[ring].rx_errors++; in cas_rx_ringN()
2277 cp->net_stats[ring].rx_length_errors++; in cas_rx_ringN()
2279 cp->net_stats[ring].rx_crc_errors++; in cas_rx_ringN()
2280 spin_unlock(&cp->stat_lock[ring]); in cas_rx_ringN()
2284 spin_lock(&cp->stat_lock[ring]); in cas_rx_ringN()
2285 ++cp->net_stats[ring].rx_dropped; in cas_rx_ringN()
2286 spin_unlock(&cp->stat_lock[ring]); in cas_rx_ringN()
2290 len = cas_rx_process_pkt(cp, rxc, entry, words, &skb); in cas_rx_ringN()
2303 cas_rx_flow_pkt(cp, words, skb); in cas_rx_ringN()
2306 spin_lock(&cp->stat_lock[ring]); in cas_rx_ringN()
2307 cp->net_stats[ring].rx_packets++; in cas_rx_ringN()
2308 cp->net_stats[ring].rx_bytes += len; in cas_rx_ringN()
2309 spin_unlock(&cp->stat_lock[ring]); in cas_rx_ringN()
2319 cas_post_page(cp, dring, i); in cas_rx_ringN()
2326 cas_post_page(cp, dring, i); in cas_rx_ringN()
2333 cas_post_page(cp, dring, i); in cas_rx_ringN()
2344 cp->rx_new[ring] = entry; in cas_rx_ringN()
2347 netdev_info(cp->dev, "Memory squeeze, deferring packet\n"); in cas_rx_ringN()
2354 struct cas *cp, int ring) in cas_post_rxcs_ringN() argument
2356 struct cas_rx_comp *rxc = cp->init_rxcs[ring]; in cas_post_rxcs_ringN()
2359 last = cp->rx_cur[ring]; in cas_post_rxcs_ringN()
2360 entry = cp->rx_new[ring]; in cas_post_rxcs_ringN()
2361 netif_printk(cp, intr, KERN_DEBUG, dev, in cas_post_rxcs_ringN()
2363 ring, readl(cp->regs + REG_RX_COMP_HEAD), entry); in cas_post_rxcs_ringN()
2370 cp->rx_cur[ring] = last; in cas_post_rxcs_ringN()
2373 writel(last, cp->regs + REG_RX_COMP_TAIL); in cas_post_rxcs_ringN()
2374 else if (cp->cas_flags & CAS_FLAG_REG_PLUS) in cas_post_rxcs_ringN()
2375 writel(last, cp->regs + REG_PLUS_RX_COMPN_TAIL(ring)); in cas_post_rxcs_ringN()
2385 struct cas *cp, const u32 status, in cas_handle_irqN() argument
2389 cas_post_rxcs_ringN(dev, cp, ring); in cas_handle_irqN()
2395 struct cas *cp = netdev_priv(dev); in cas_interruptN() local
2397 int ring = (irq == cp->pci_irq_INTC) ? 2 : 3; in cas_interruptN()
2398 u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring)); in cas_interruptN()
2404 spin_lock_irqsave(&cp->lock, flags); in cas_interruptN()
2407 cas_mask_intr(cp); in cas_interruptN()
2408 napi_schedule(&cp->napi); in cas_interruptN()
2410 cas_rx_ringN(cp, ring, 0); in cas_interruptN()
2416 cas_handle_irqN(dev, cp, status, ring); in cas_interruptN()
2417 spin_unlock_irqrestore(&cp->lock, flags); in cas_interruptN()
2424 static inline void cas_handle_irq1(struct cas *cp, const u32 status) in cas_handle_irq1() argument
2429 cas_post_rxds_ringN(cp, 1, 0); in cas_handle_irq1()
2430 spin_lock(&cp->stat_lock[1]); in cas_handle_irq1()
2431 cp->net_stats[1].rx_dropped++; in cas_handle_irq1()
2432 spin_unlock(&cp->stat_lock[1]); in cas_handle_irq1()
2436 cas_post_rxds_ringN(cp, 1, RX_DESC_RINGN_SIZE(1) - in cas_handle_irq1()
2440 cas_post_rxcs_ringN(cp, 1); in cas_handle_irq1()
2447 struct cas *cp = netdev_priv(dev); in cas_interrupt1() local
2449 u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1)); in cas_interrupt1()
2455 spin_lock_irqsave(&cp->lock, flags); in cas_interrupt1()
2458 cas_mask_intr(cp); in cas_interrupt1()
2459 napi_schedule(&cp->napi); in cas_interrupt1()
2461 cas_rx_ringN(cp, 1, 0); in cas_interrupt1()
2466 cas_handle_irq1(cp, status); in cas_interrupt1()
2467 spin_unlock_irqrestore(&cp->lock, flags); in cas_interrupt1()
2473 struct cas *cp, const u32 status) in cas_handle_irq() argument
2477 cas_abnormal_irq(dev, cp, status); in cas_handle_irq()
2483 cas_post_rxds_ringN(cp, 0, 0); in cas_handle_irq()
2484 spin_lock(&cp->stat_lock[0]); in cas_handle_irq()
2485 cp->net_stats[0].rx_dropped++; in cas_handle_irq()
2486 spin_unlock(&cp->stat_lock[0]); in cas_handle_irq()
2488 cas_post_rxds_ringN(cp, 0, RX_DESC_RINGN_SIZE(0) - in cas_handle_irq()
2493 cas_post_rxcs_ringN(dev, cp, 0); in cas_handle_irq()
2499 struct cas *cp = netdev_priv(dev); in cas_interrupt() local
2501 u32 status = readl(cp->regs + REG_INTR_STATUS); in cas_interrupt()
2506 spin_lock_irqsave(&cp->lock, flags); in cas_interrupt()
2508 cas_tx(dev, cp, status); in cas_interrupt()
2514 cas_mask_intr(cp); in cas_interrupt()
2515 napi_schedule(&cp->napi); in cas_interrupt()
2517 cas_rx_ringN(cp, 0, 0); in cas_interrupt()
2523 cas_handle_irq(dev, cp, status); in cas_interrupt()
2524 spin_unlock_irqrestore(&cp->lock, flags); in cas_interrupt()
2532 struct cas *cp = container_of(napi, struct cas, napi); in cas_poll() local
2533 struct net_device *dev = cp->dev; in cas_poll()
2535 u32 status = readl(cp->regs + REG_INTR_STATUS); in cas_poll()
2538 spin_lock_irqsave(&cp->lock, flags); in cas_poll()
2539 cas_tx(dev, cp, status); in cas_poll()
2540 spin_unlock_irqrestore(&cp->lock, flags); in cas_poll()
2554 credits += cas_rx_ringN(cp, j, budget / N_RX_COMP_RINGS); in cas_poll()
2564 spin_lock_irqsave(&cp->lock, flags); in cas_poll()
2566 cas_handle_irq(dev, cp, status); in cas_poll()
2570 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1)); in cas_poll()
2572 cas_handle_irq1(dev, cp, status); in cas_poll()
2578 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(2)); in cas_poll()
2580 cas_handle_irqN(dev, cp, status, 2); in cas_poll()
2586 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(3)); in cas_poll()
2588 cas_handle_irqN(dev, cp, status, 3); in cas_poll()
2591 spin_unlock_irqrestore(&cp->lock, flags); in cas_poll()
2594 cas_unmask_intr(cp); in cas_poll()
2603 struct cas *cp = netdev_priv(dev); in cas_netpoll() local
2605 cas_disable_irq(cp, 0); in cas_netpoll()
2606 cas_interrupt(cp->pdev->irq, dev); in cas_netpoll()
2607 cas_enable_irq(cp, 0); in cas_netpoll()
2629 struct cas *cp = netdev_priv(dev); in cas_tx_timeout() local
2632 if (!cp->hw_running) { in cas_tx_timeout()
2638 readl(cp->regs + REG_MIF_STATE_MACHINE)); in cas_tx_timeout()
2641 readl(cp->regs + REG_MAC_STATE_MACHINE)); in cas_tx_timeout()
2644 readl(cp->regs + REG_TX_CFG), in cas_tx_timeout()
2645 readl(cp->regs + REG_MAC_TX_STATUS), in cas_tx_timeout()
2646 readl(cp->regs + REG_MAC_TX_CFG), in cas_tx_timeout()
2647 readl(cp->regs + REG_TX_FIFO_PKT_CNT), in cas_tx_timeout()
2648 readl(cp->regs + REG_TX_FIFO_WRITE_PTR), in cas_tx_timeout()
2649 readl(cp->regs + REG_TX_FIFO_READ_PTR), in cas_tx_timeout()
2650 readl(cp->regs + REG_TX_SM_1), in cas_tx_timeout()
2651 readl(cp->regs + REG_TX_SM_2)); in cas_tx_timeout()
2654 readl(cp->regs + REG_RX_CFG), in cas_tx_timeout()
2655 readl(cp->regs + REG_MAC_RX_STATUS), in cas_tx_timeout()
2656 readl(cp->regs + REG_MAC_RX_CFG)); in cas_tx_timeout()
2659 readl(cp->regs + REG_HP_STATE_MACHINE), in cas_tx_timeout()
2660 readl(cp->regs + REG_HP_STATUS0), in cas_tx_timeout()
2661 readl(cp->regs + REG_HP_STATUS1), in cas_tx_timeout()
2662 readl(cp->regs + REG_HP_STATUS2)); in cas_tx_timeout()
2665 atomic_inc(&cp->reset_task_pending); in cas_tx_timeout()
2666 atomic_inc(&cp->reset_task_pending_all); in cas_tx_timeout()
2667 schedule_work(&cp->reset_task); in cas_tx_timeout()
2669 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL); in cas_tx_timeout()
2670 schedule_work(&cp->reset_task); in cas_tx_timeout()
2683 static void cas_write_txd(struct cas *cp, int ring, int entry, in cas_write_txd() argument
2686 struct cas_tx_desc *txd = cp->init_txds[ring] + entry; in cas_write_txd()
2697 static inline void *tx_tiny_buf(struct cas *cp, const int ring, in tx_tiny_buf() argument
2700 return cp->tx_tiny_bufs[ring] + TX_TINY_BUF_LEN*entry; in tx_tiny_buf()
2703 static inline dma_addr_t tx_tiny_map(struct cas *cp, const int ring, in tx_tiny_map() argument
2706 cp->tx_tiny_use[ring][tentry].nbufs++; in tx_tiny_map()
2707 cp->tx_tiny_use[ring][entry].used = 1; in tx_tiny_map()
2708 return cp->tx_tiny_dvma[ring] + TX_TINY_BUF_LEN*entry; in tx_tiny_map()
2711 static inline int cas_xmit_tx_ringN(struct cas *cp, int ring, in cas_xmit_tx_ringN() argument
2714 struct net_device *dev = cp->dev; in cas_xmit_tx_ringN()
2721 spin_lock_irqsave(&cp->tx_lock[ring], flags); in cas_xmit_tx_ringN()
2724 if (TX_BUFFS_AVAIL(cp, ring) <= in cas_xmit_tx_ringN()
2725 CAS_TABORT(cp)*(skb_shinfo(skb)->nr_frags + 1)) { in cas_xmit_tx_ringN()
2727 spin_unlock_irqrestore(&cp->tx_lock[ring], flags); in cas_xmit_tx_ringN()
2742 entry = cp->tx_new[ring]; in cas_xmit_tx_ringN()
2743 cp->tx_skbs[ring][entry] = skb; in cas_xmit_tx_ringN()
2747 mapping = dma_map_page(&cp->pdev->dev, virt_to_page(skb->data), in cas_xmit_tx_ringN()
2751 tabort = cas_calc_tabort(cp, (unsigned long) skb->data, len); in cas_xmit_tx_ringN()
2754 cas_write_txd(cp, ring, entry, mapping, len - tabort, in cas_xmit_tx_ringN()
2759 tx_tiny_buf(cp, ring, entry), tabort); in cas_xmit_tx_ringN()
2760 mapping = tx_tiny_map(cp, ring, entry, tentry); in cas_xmit_tx_ringN()
2761 cas_write_txd(cp, ring, entry, mapping, tabort, ctrl, in cas_xmit_tx_ringN()
2764 cas_write_txd(cp, ring, entry, mapping, len, ctrl | in cas_xmit_tx_ringN()
2773 mapping = skb_frag_dma_map(&cp->pdev->dev, fragp, 0, len, in cas_xmit_tx_ringN()
2776 tabort = cas_calc_tabort(cp, skb_frag_off(fragp), len); in cas_xmit_tx_ringN()
2779 cas_write_txd(cp, ring, entry, mapping, len - tabort, in cas_xmit_tx_ringN()
2782 memcpy_from_page(tx_tiny_buf(cp, ring, entry), in cas_xmit_tx_ringN()
2786 mapping = tx_tiny_map(cp, ring, entry, tentry); in cas_xmit_tx_ringN()
2790 cas_write_txd(cp, ring, entry, mapping, len, ctrl, in cas_xmit_tx_ringN()
2795 cp->tx_new[ring] = entry; in cas_xmit_tx_ringN()
2796 if (TX_BUFFS_AVAIL(cp, ring) <= CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1)) in cas_xmit_tx_ringN()
2799 netif_printk(cp, tx_queued, KERN_DEBUG, dev, in cas_xmit_tx_ringN()
2801 ring, entry, skb->len, TX_BUFFS_AVAIL(cp, ring)); in cas_xmit_tx_ringN()
2802 writel(entry, cp->regs + REG_TX_KICKN(ring)); in cas_xmit_tx_ringN()
2803 spin_unlock_irqrestore(&cp->tx_lock[ring], flags); in cas_xmit_tx_ringN()
2809 struct cas *cp = netdev_priv(dev); in cas_start_xmit() local
2816 if (skb_padto(skb, cp->min_frame_size)) in cas_start_xmit()
2822 if (cas_xmit_tx_ringN(cp, ring++ & N_TX_RINGS_MASK, skb)) in cas_start_xmit()
2827 static void cas_init_tx_dma(struct cas *cp) in cas_init_tx_dma() argument
2829 u64 desc_dma = cp->block_dvma; in cas_init_tx_dma()
2837 writel((desc_dma + off) >> 32, cp->regs + REG_TX_COMPWB_DB_HI); in cas_init_tx_dma()
2838 writel((desc_dma + off) & 0xffffffff, cp->regs + REG_TX_COMPWB_DB_LOW); in cas_init_tx_dma()
2851 off = (unsigned long) cp->init_txds[i] - in cas_init_tx_dma()
2852 (unsigned long) cp->init_block; in cas_init_tx_dma()
2855 writel((desc_dma + off) >> 32, cp->regs + REG_TX_DBN_HI(i)); in cas_init_tx_dma()
2856 writel((desc_dma + off) & 0xffffffff, cp->regs + in cas_init_tx_dma()
2862 writel(val, cp->regs + REG_TX_CFG); in cas_init_tx_dma()
2868 writel(0x800, cp->regs + REG_TX_MAXBURST_0); in cas_init_tx_dma()
2869 writel(0x1600, cp->regs + REG_TX_MAXBURST_1); in cas_init_tx_dma()
2870 writel(0x2400, cp->regs + REG_TX_MAXBURST_2); in cas_init_tx_dma()
2871 writel(0x4800, cp->regs + REG_TX_MAXBURST_3); in cas_init_tx_dma()
2873 writel(0x800, cp->regs + REG_TX_MAXBURST_0); in cas_init_tx_dma()
2874 writel(0x800, cp->regs + REG_TX_MAXBURST_1); in cas_init_tx_dma()
2875 writel(0x800, cp->regs + REG_TX_MAXBURST_2); in cas_init_tx_dma()
2876 writel(0x800, cp->regs + REG_TX_MAXBURST_3); in cas_init_tx_dma()
2880 /* Must be invoked under cp->lock. */
2881 static inline void cas_init_dma(struct cas *cp) in cas_init_dma() argument
2883 cas_init_tx_dma(cp); in cas_init_dma()
2884 cas_init_rx_dma(cp); in cas_init_dma()
2887 static void cas_process_mc_list(struct cas *cp) in cas_process_mc_list() argument
2895 netdev_for_each_mc_addr(ha, cp->dev) { in cas_process_mc_list()
2901 cp->regs + REG_MAC_ADDRN(i*3 + 0)); in cas_process_mc_list()
2903 cp->regs + REG_MAC_ADDRN(i*3 + 1)); in cas_process_mc_list()
2905 cp->regs + REG_MAC_ADDRN(i*3 + 2)); in cas_process_mc_list()
2918 writel(hash_table[i], cp->regs + REG_MAC_HASH_TABLEN(i)); in cas_process_mc_list()
2921 /* Must be invoked under cp->lock. */
2922 static u32 cas_setup_multicast(struct cas *cp) in cas_setup_multicast() argument
2927 if (cp->dev->flags & IFF_PROMISC) { in cas_setup_multicast()
2930 } else if (cp->dev->flags & IFF_ALLMULTI) { in cas_setup_multicast()
2932 writel(0xFFFF, cp->regs + REG_MAC_HASH_TABLEN(i)); in cas_setup_multicast()
2936 cas_process_mc_list(cp); in cas_setup_multicast()
2943 /* must be invoked under cp->stat_lock[N_TX_RINGS] */
2944 static void cas_clear_mac_err(struct cas *cp) in cas_clear_mac_err() argument
2946 writel(0, cp->regs + REG_MAC_COLL_NORMAL); in cas_clear_mac_err()
2947 writel(0, cp->regs + REG_MAC_COLL_FIRST); in cas_clear_mac_err()
2948 writel(0, cp->regs + REG_MAC_COLL_EXCESS); in cas_clear_mac_err()
2949 writel(0, cp->regs + REG_MAC_COLL_LATE); in cas_clear_mac_err()
2950 writel(0, cp->regs + REG_MAC_TIMER_DEFER); in cas_clear_mac_err()
2951 writel(0, cp->regs + REG_MAC_ATTEMPTS_PEAK); in cas_clear_mac_err()
2952 writel(0, cp->regs + REG_MAC_RECV_FRAME); in cas_clear_mac_err()
2953 writel(0, cp->regs + REG_MAC_LEN_ERR); in cas_clear_mac_err()
2954 writel(0, cp->regs + REG_MAC_ALIGN_ERR); in cas_clear_mac_err()
2955 writel(0, cp->regs + REG_MAC_FCS_ERR); in cas_clear_mac_err()
2956 writel(0, cp->regs + REG_MAC_RX_CODE_ERR); in cas_clear_mac_err()
2960 static void cas_mac_reset(struct cas *cp) in cas_mac_reset() argument
2965 writel(0x1, cp->regs + REG_MAC_TX_RESET); in cas_mac_reset()
2966 writel(0x1, cp->regs + REG_MAC_RX_RESET); in cas_mac_reset()
2971 if (readl(cp->regs + REG_MAC_TX_RESET) == 0) in cas_mac_reset()
2979 if (readl(cp->regs + REG_MAC_RX_RESET) == 0) in cas_mac_reset()
2984 if (readl(cp->regs + REG_MAC_TX_RESET) | in cas_mac_reset()
2985 readl(cp->regs + REG_MAC_RX_RESET)) in cas_mac_reset()
2986 netdev_err(cp->dev, "mac tx[%d]/rx[%d] reset failed [%08x]\n", in cas_mac_reset()
2987 readl(cp->regs + REG_MAC_TX_RESET), in cas_mac_reset()
2988 readl(cp->regs + REG_MAC_RX_RESET), in cas_mac_reset()
2989 readl(cp->regs + REG_MAC_STATE_MACHINE)); in cas_mac_reset()
2993 /* Must be invoked under cp->lock. */
2994 static void cas_init_mac(struct cas *cp) in cas_init_mac() argument
2996 const unsigned char *e = &cp->dev->dev_addr[0]; in cas_init_mac()
2998 cas_mac_reset(cp); in cas_init_mac()
3001 writel(CAWR_RR_DIS, cp->regs + REG_CAWR); in cas_init_mac()
3007 if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) == 0) in cas_init_mac()
3008 writel(INF_BURST_EN, cp->regs + REG_INF_BURST); in cas_init_mac()
3011 writel(0x1BF0, cp->regs + REG_MAC_SEND_PAUSE); in cas_init_mac()
3013 writel(0x00, cp->regs + REG_MAC_IPG0); in cas_init_mac()
3014 writel(0x08, cp->regs + REG_MAC_IPG1); in cas_init_mac()
3015 writel(0x04, cp->regs + REG_MAC_IPG2); in cas_init_mac()
3018 writel(0x40, cp->regs + REG_MAC_SLOT_TIME); in cas_init_mac()
3021 writel(ETH_ZLEN + 4, cp->regs + REG_MAC_FRAMESIZE_MIN); in cas_init_mac()
3030 cp->regs + REG_MAC_FRAMESIZE_MAX); in cas_init_mac()
3036 if ((cp->cas_flags & CAS_FLAG_SATURN) && cp->crc_size) in cas_init_mac()
3037 writel(0x41, cp->regs + REG_MAC_PA_SIZE); in cas_init_mac()
3039 writel(0x07, cp->regs + REG_MAC_PA_SIZE); in cas_init_mac()
3040 writel(0x04, cp->regs + REG_MAC_JAM_SIZE); in cas_init_mac()
3041 writel(0x10, cp->regs + REG_MAC_ATTEMPT_LIMIT); in cas_init_mac()
3042 writel(0x8808, cp->regs + REG_MAC_CTRL_TYPE); in cas_init_mac()
3044 writel((e[5] | (e[4] << 8)) & 0x3ff, cp->regs + REG_MAC_RANDOM_SEED); in cas_init_mac()
3046 writel(0, cp->regs + REG_MAC_ADDR_FILTER0); in cas_init_mac()
3047 writel(0, cp->regs + REG_MAC_ADDR_FILTER1); in cas_init_mac()
3048 writel(0, cp->regs + REG_MAC_ADDR_FILTER2); in cas_init_mac()
3049 writel(0, cp->regs + REG_MAC_ADDR_FILTER2_1_MASK); in cas_init_mac()
3050 writel(0, cp->regs + REG_MAC_ADDR_FILTER0_MASK); in cas_init_mac()
3054 writel(0x0, cp->regs + REG_MAC_ADDRN(i)); in cas_init_mac()
3056 writel((e[4] << 8) | e[5], cp->regs + REG_MAC_ADDRN(0)); in cas_init_mac()
3057 writel((e[2] << 8) | e[3], cp->regs + REG_MAC_ADDRN(1)); in cas_init_mac()
3058 writel((e[0] << 8) | e[1], cp->regs + REG_MAC_ADDRN(2)); in cas_init_mac()
3060 writel(0x0001, cp->regs + REG_MAC_ADDRN(42)); in cas_init_mac()
3061 writel(0xc200, cp->regs + REG_MAC_ADDRN(43)); in cas_init_mac()
3062 writel(0x0180, cp->regs + REG_MAC_ADDRN(44)); in cas_init_mac()
3064 cp->mac_rx_cfg = cas_setup_multicast(cp); in cas_init_mac()
3066 spin_lock(&cp->stat_lock[N_TX_RINGS]); in cas_init_mac()
3067 cas_clear_mac_err(cp); in cas_init_mac()
3068 spin_unlock(&cp->stat_lock[N_TX_RINGS]); in cas_init_mac()
3074 writel(MAC_TX_FRAME_XMIT, cp->regs + REG_MAC_TX_MASK); in cas_init_mac()
3075 writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK); in cas_init_mac()
3080 writel(0xffffffff, cp->regs + REG_MAC_CTRL_MASK); in cas_init_mac()
3083 /* Must be invoked under cp->lock. */
3084 static void cas_init_pause_thresholds(struct cas *cp) in cas_init_pause_thresholds() argument
3089 if (cp->rx_fifo_size <= (2 * 1024)) { in cas_init_pause_thresholds()
3090 cp->rx_pause_off = cp->rx_pause_on = cp->rx_fifo_size; in cas_init_pause_thresholds()
3092 int max_frame = (cp->dev->mtu + ETH_HLEN + 4 + 4 + 64) & ~63; in cas_init_pause_thresholds()
3093 if (max_frame * 3 > cp->rx_fifo_size) { in cas_init_pause_thresholds()
3094 cp->rx_pause_off = 7104; in cas_init_pause_thresholds()
3095 cp->rx_pause_on = 960; in cas_init_pause_thresholds()
3097 int off = (cp->rx_fifo_size - (max_frame * 2)); in cas_init_pause_thresholds()
3099 cp->rx_pause_off = off; in cas_init_pause_thresholds()
3100 cp->rx_pause_on = on; in cas_init_pause_thresholds()
3129 static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr, in cas_get_vpd_info() argument
3132 void __iomem *p = cp->regs + REG_EXPANSION_ROM_RUN_START; in cas_get_vpd_info()
3148 cp->regs + REG_BIM_LOCAL_DEV_EN); in cas_get_vpd_info()
3253 cp->cas_flags |= CAS_FLAG_ENTROPY_DEV; in cas_get_vpd_info()
3294 addr = of_get_property(cp->of_node, "local-mac-address", NULL); in cas_get_vpd_info()
3309 writel(0, cp->regs + REG_BIM_LOCAL_DEV_EN); in cas_get_vpd_info()
3314 static void cas_check_pci_invariants(struct cas *cp) in cas_check_pci_invariants() argument
3316 struct pci_dev *pdev = cp->pdev; in cas_check_pci_invariants()
3318 cp->cas_flags = 0; in cas_check_pci_invariants()
3322 cp->cas_flags |= CAS_FLAG_REG_PLUS; in cas_check_pci_invariants()
3324 cp->cas_flags |= CAS_FLAG_TARGET_ABORT; in cas_check_pci_invariants()
3330 cp->cas_flags |= CAS_FLAG_NO_HW_CSUM; in cas_check_pci_invariants()
3333 cp->cas_flags |= CAS_FLAG_REG_PLUS; in cas_check_pci_invariants()
3340 cp->cas_flags |= CAS_FLAG_SATURN; in cas_check_pci_invariants()
3345 static int cas_check_invariants(struct cas *cp) in cas_check_invariants() argument
3347 struct pci_dev *pdev = cp->pdev; in cas_check_invariants()
3353 cp->page_order = 0; in cas_check_invariants()
3362 cp->page_order = CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT; in cas_check_invariants()
3368 cp->page_size = (PAGE_SIZE << cp->page_order); in cas_check_invariants()
3371 cp->tx_fifo_size = readl(cp->regs + REG_TX_FIFO_SIZE) * 64; in cas_check_invariants()
3372 cp->rx_fifo_size = RX_FIFO_SIZE; in cas_check_invariants()
3377 cp->phy_type = cas_get_vpd_info(cp, addr, PCI_SLOT(pdev->devfn)); in cas_check_invariants()
3378 eth_hw_addr_set(cp->dev, addr); in cas_check_invariants()
3379 if (cp->phy_type & CAS_PHY_SERDES) { in cas_check_invariants()
3380 cp->cas_flags |= CAS_FLAG_1000MB_CAP; in cas_check_invariants()
3385 cfg = readl(cp->regs + REG_MIF_CFG); in cas_check_invariants()
3387 cp->phy_type = CAS_PHY_MII_MDIO1; in cas_check_invariants()
3389 cp->phy_type = CAS_PHY_MII_MDIO0; in cas_check_invariants()
3392 cas_mif_poll(cp, 0); in cas_check_invariants()
3393 writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE); in cas_check_invariants()
3400 cp->phy_addr = i; in cas_check_invariants()
3401 phy_id = cas_phy_read(cp, MII_PHYSID1) << 16; in cas_check_invariants()
3402 phy_id |= cas_phy_read(cp, MII_PHYSID2); in cas_check_invariants()
3404 cp->phy_id = phy_id; in cas_check_invariants()
3410 readl(cp->regs + REG_MIF_STATE_MACHINE)); in cas_check_invariants()
3415 cfg = cas_phy_read(cp, MII_BMSR); in cas_check_invariants()
3417 cas_phy_read(cp, CAS_MII_1000_EXTEND)) in cas_check_invariants()
3418 cp->cas_flags |= CAS_FLAG_1000MB_CAP; in cas_check_invariants()
3422 /* Must be invoked under cp->lock. */
3423 static inline void cas_start_dma(struct cas *cp) in cas_start_dma() argument
3430 val = readl(cp->regs + REG_TX_CFG) | TX_CFG_DMA_EN; in cas_start_dma()
3431 writel(val, cp->regs + REG_TX_CFG); in cas_start_dma()
3432 val = readl(cp->regs + REG_RX_CFG) | RX_CFG_DMA_EN; in cas_start_dma()
3433 writel(val, cp->regs + REG_RX_CFG); in cas_start_dma()
3436 val = readl(cp->regs + REG_MAC_TX_CFG) | MAC_TX_CFG_EN; in cas_start_dma()
3437 writel(val, cp->regs + REG_MAC_TX_CFG); in cas_start_dma()
3438 val = readl(cp->regs + REG_MAC_RX_CFG) | MAC_RX_CFG_EN; in cas_start_dma()
3439 writel(val, cp->regs + REG_MAC_RX_CFG); in cas_start_dma()
3443 val = readl(cp->regs + REG_MAC_TX_CFG); in cas_start_dma()
3451 val = readl(cp->regs + REG_MAC_RX_CFG); in cas_start_dma()
3454 netdev_err(cp->dev, in cas_start_dma()
3456 readl(cp->regs + REG_MIF_STATE_MACHINE), in cas_start_dma()
3457 readl(cp->regs + REG_MAC_STATE_MACHINE)); in cas_start_dma()
3463 netdev_err(cp->dev, "enabling mac failed [%s:%08x:%08x]\n", in cas_start_dma()
3465 readl(cp->regs + REG_MIF_STATE_MACHINE), in cas_start_dma()
3466 readl(cp->regs + REG_MAC_STATE_MACHINE)); in cas_start_dma()
3469 cas_unmask_intr(cp); /* enable interrupts */ in cas_start_dma()
3470 writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK); in cas_start_dma()
3471 writel(0, cp->regs + REG_RX_COMP_TAIL); in cas_start_dma()
3473 if (cp->cas_flags & CAS_FLAG_REG_PLUS) { in cas_start_dma()
3476 cp->regs + REG_PLUS_RX_KICK1); in cas_start_dma()
3480 /* Must be invoked under cp->lock. */
3481 static void cas_read_pcs_link_mode(struct cas *cp, int *fd, int *spd, in cas_read_pcs_link_mode() argument
3484 u32 val = readl(cp->regs + REG_PCS_MII_LPA); in cas_read_pcs_link_mode()
3492 /* Must be invoked under cp->lock. */
3493 static void cas_read_mii_link_mode(struct cas *cp, int *fd, int *spd, in cas_read_mii_link_mode() argument
3503 val = cas_phy_read(cp, MII_LPA); in cas_read_mii_link_mode()
3515 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) { in cas_read_mii_link_mode()
3516 val = cas_phy_read(cp, CAS_MII_1000_STATUS); in cas_read_mii_link_mode()
3527 * Must be invoked under cp->lock.
3529 static void cas_set_link_modes(struct cas *cp) in cas_set_link_modes() argument
3538 if (CAS_PHY_MII(cp->phy_type)) { in cas_set_link_modes()
3539 cas_mif_poll(cp, 0); in cas_set_link_modes()
3540 val = cas_phy_read(cp, MII_BMCR); in cas_set_link_modes()
3542 cas_read_mii_link_mode(cp, &full_duplex, &speed, in cas_set_link_modes()
3551 speed = (cp->cas_flags & CAS_FLAG_1000MB_CAP) ? in cas_set_link_modes()
3554 cas_mif_poll(cp, 1); in cas_set_link_modes()
3557 val = readl(cp->regs + REG_PCS_MII_CTRL); in cas_set_link_modes()
3558 cas_read_pcs_link_mode(cp, &full_duplex, &speed, &pause); in cas_set_link_modes()
3565 netif_info(cp, link, cp->dev, "Link up at %d Mbps, %s-duplex\n", in cas_set_link_modes()
3569 if (CAS_PHY_MII(cp->phy_type)) { in cas_set_link_modes()
3578 writel(val, cp->regs + REG_MAC_XIF_CFG); in cas_set_link_modes()
3600 cp->regs + REG_MAC_TX_CFG); in cas_set_link_modes()
3602 val = readl(cp->regs + REG_MAC_RX_CFG); in cas_set_link_modes()
3605 cp->regs + REG_MAC_RX_CFG); in cas_set_link_modes()
3607 writel(0x200, cp->regs + REG_MAC_SLOT_TIME); in cas_set_link_modes()
3609 cp->crc_size = 4; in cas_set_link_modes()
3611 cp->min_frame_size = CAS_1000MB_MIN_FRAME; in cas_set_link_modes()
3614 writel(val, cp->regs + REG_MAC_TX_CFG); in cas_set_link_modes()
3619 val = readl(cp->regs + REG_MAC_RX_CFG); in cas_set_link_modes()
3622 cp->crc_size = 0; in cas_set_link_modes()
3623 cp->min_frame_size = CAS_MIN_MTU; in cas_set_link_modes()
3626 cp->crc_size = 4; in cas_set_link_modes()
3627 cp->min_frame_size = CAS_MIN_FRAME; in cas_set_link_modes()
3630 cp->regs + REG_MAC_RX_CFG); in cas_set_link_modes()
3631 writel(0x40, cp->regs + REG_MAC_SLOT_TIME); in cas_set_link_modes()
3634 if (netif_msg_link(cp)) { in cas_set_link_modes()
3636 netdev_info(cp->dev, "Pause is enabled (rxfifo: %d off: %d on: %d)\n", in cas_set_link_modes()
3637 cp->rx_fifo_size, in cas_set_link_modes()
3638 cp->rx_pause_off, in cas_set_link_modes()
3639 cp->rx_pause_on); in cas_set_link_modes()
3641 netdev_info(cp->dev, "TX pause enabled\n"); in cas_set_link_modes()
3643 netdev_info(cp->dev, "Pause is disabled\n"); in cas_set_link_modes()
3647 val = readl(cp->regs + REG_MAC_CTRL_CFG); in cas_set_link_modes()
3655 writel(val, cp->regs + REG_MAC_CTRL_CFG); in cas_set_link_modes()
3656 cas_start_dma(cp); in cas_set_link_modes()
3659 /* Must be invoked under cp->lock. */
3660 static void cas_init_hw(struct cas *cp, int restart_link) in cas_init_hw() argument
3663 cas_phy_init(cp); in cas_init_hw()
3665 cas_init_pause_thresholds(cp); in cas_init_hw()
3666 cas_init_mac(cp); in cas_init_hw()
3667 cas_init_dma(cp); in cas_init_hw()
3671 cp->timer_ticks = 0; in cas_init_hw()
3672 cas_begin_auto_negotiation(cp, NULL); in cas_init_hw()
3673 } else if (cp->lstate == link_up) { in cas_init_hw()
3674 cas_set_link_modes(cp); in cas_init_hw()
3675 netif_carrier_on(cp->dev); in cas_init_hw()
3679 /* Must be invoked under cp->lock. on earlier cassini boards,
3683 static void cas_hard_reset(struct cas *cp) in cas_hard_reset() argument
3685 writel(BIM_LOCAL_DEV_SOFT_0, cp->regs + REG_BIM_LOCAL_DEV_EN); in cas_hard_reset()
3687 pci_restore_state(cp->pdev); in cas_hard_reset()
3691 static void cas_global_reset(struct cas *cp, int blkflag) in cas_global_reset() argument
3696 if (blkflag && !CAS_PHY_MII(cp->phy_type)) { in cas_global_reset()
3704 cp->regs + REG_SW_RESET); in cas_global_reset()
3706 writel(SW_RESET_TX | SW_RESET_RX, cp->regs + REG_SW_RESET); in cas_global_reset()
3714 u32 val = readl(cp->regs + REG_SW_RESET); in cas_global_reset()
3719 netdev_err(cp->dev, "sw reset failed\n"); in cas_global_reset()
3724 BIM_CFG_RTA_INTR_ENABLE, cp->regs + REG_BIM_CFG); in cas_global_reset()
3732 PCI_ERR_BIM_DMA_READ), cp->regs + in cas_global_reset()
3738 writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE); in cas_global_reset()
3741 static void cas_reset(struct cas *cp, int blkflag) in cas_reset() argument
3745 cas_mask_intr(cp); in cas_reset()
3746 cas_global_reset(cp, blkflag); in cas_reset()
3747 cas_mac_reset(cp); in cas_reset()
3748 cas_entropy_reset(cp); in cas_reset()
3751 val = readl(cp->regs + REG_TX_CFG); in cas_reset()
3753 writel(val, cp->regs + REG_TX_CFG); in cas_reset()
3755 val = readl(cp->regs + REG_RX_CFG); in cas_reset()
3757 writel(val, cp->regs + REG_RX_CFG); in cas_reset()
3760 if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) || in cas_reset()
3762 cas_load_firmware(cp, CAS_HP_FIRMWARE); in cas_reset()
3764 cas_load_firmware(cp, CAS_HP_ALT_FIRMWARE); in cas_reset()
3768 spin_lock(&cp->stat_lock[N_TX_RINGS]); in cas_reset()
3769 cas_clear_mac_err(cp); in cas_reset()
3770 spin_unlock(&cp->stat_lock[N_TX_RINGS]); in cas_reset()
3774 static void cas_shutdown(struct cas *cp) in cas_shutdown() argument
3779 cp->hw_running = 0; in cas_shutdown()
3781 del_timer_sync(&cp->link_timer); in cas_shutdown()
3785 while (atomic_read(&cp->reset_task_pending_mtu) || in cas_shutdown()
3786 atomic_read(&cp->reset_task_pending_spare) || in cas_shutdown()
3787 atomic_read(&cp->reset_task_pending_all)) in cas_shutdown()
3791 while (atomic_read(&cp->reset_task_pending)) in cas_shutdown()
3795 cas_lock_all_save(cp, flags); in cas_shutdown()
3796 cas_reset(cp, 0); in cas_shutdown()
3797 if (cp->cas_flags & CAS_FLAG_SATURN) in cas_shutdown()
3798 cas_phy_powerdown(cp); in cas_shutdown()
3799 cas_unlock_all_restore(cp, flags); in cas_shutdown()
3804 struct cas *cp = netdev_priv(dev); in cas_change_mtu() local
3812 atomic_inc(&cp->reset_task_pending); in cas_change_mtu()
3813 if ((cp->phy_type & CAS_PHY_SERDES)) { in cas_change_mtu()
3814 atomic_inc(&cp->reset_task_pending_all); in cas_change_mtu()
3816 atomic_inc(&cp->reset_task_pending_mtu); in cas_change_mtu()
3818 schedule_work(&cp->reset_task); in cas_change_mtu()
3820 atomic_set(&cp->reset_task_pending, (cp->phy_type & CAS_PHY_SERDES) ? in cas_change_mtu()
3823 schedule_work(&cp->reset_task); in cas_change_mtu()
3826 flush_work(&cp->reset_task); in cas_change_mtu()
3830 static void cas_clean_txd(struct cas *cp, int ring) in cas_clean_txd() argument
3832 struct cas_tx_desc *txd = cp->init_txds[ring]; in cas_clean_txd()
3833 struct sk_buff *skb, **skbs = cp->tx_skbs[ring]; in cas_clean_txd()
3856 dma_unmap_page(&cp->pdev->dev, daddr, dlen, in cas_clean_txd()
3866 if (cp->tx_tiny_use[ring][ent].used) in cas_clean_txd()
3874 memset(cp->tx_tiny_use[ring], 0, size*sizeof(*cp->tx_tiny_use[ring])); in cas_clean_txd()
3878 static inline void cas_free_rx_desc(struct cas *cp, int ring) in cas_free_rx_desc() argument
3880 cas_page_t **page = cp->rx_pages[ring]; in cas_free_rx_desc()
3886 cas_page_free(cp, page[i]); in cas_free_rx_desc()
3892 static void cas_free_rxds(struct cas *cp) in cas_free_rxds() argument
3897 cas_free_rx_desc(cp, i); in cas_free_rxds()
3900 /* Must be invoked under cp->lock. */
3901 static void cas_clean_rings(struct cas *cp) in cas_clean_rings() argument
3906 memset(cp->tx_old, 0, sizeof(*cp->tx_old)*N_TX_RINGS); in cas_clean_rings()
3907 memset(cp->tx_new, 0, sizeof(*cp->tx_new)*N_TX_RINGS); in cas_clean_rings()
3909 cas_clean_txd(cp, i); in cas_clean_rings()
3912 memset(cp->init_block, 0, sizeof(struct cas_init_block)); in cas_clean_rings()
3913 cas_clean_rxds(cp); in cas_clean_rings()
3914 cas_clean_rxcs(cp); in cas_clean_rings()
3918 static inline int cas_alloc_rx_desc(struct cas *cp, int ring) in cas_alloc_rx_desc() argument
3920 cas_page_t **page = cp->rx_pages[ring]; in cas_alloc_rx_desc()
3925 if ((page[i] = cas_page_alloc(cp, GFP_KERNEL)) == NULL) in cas_alloc_rx_desc()
3931 static int cas_alloc_rxds(struct cas *cp) in cas_alloc_rxds() argument
3936 if (cas_alloc_rx_desc(cp, i) < 0) { in cas_alloc_rxds()
3937 cas_free_rxds(cp); in cas_alloc_rxds()
3946 struct cas *cp = container_of(work, struct cas, reset_task); in cas_reset_task() local
3948 int pending = atomic_read(&cp->reset_task_pending); in cas_reset_task()
3950 int pending_all = atomic_read(&cp->reset_task_pending_all); in cas_reset_task()
3951 int pending_spare = atomic_read(&cp->reset_task_pending_spare); in cas_reset_task()
3952 int pending_mtu = atomic_read(&cp->reset_task_pending_mtu); in cas_reset_task()
3958 atomic_dec(&cp->reset_task_pending); in cas_reset_task()
3966 if (cp->hw_running) { in cas_reset_task()
3970 netif_device_detach(cp->dev); in cas_reset_task()
3971 cas_lock_all_save(cp, flags); in cas_reset_task()
3973 if (cp->opened) { in cas_reset_task()
3978 cas_spare_recover(cp, GFP_ATOMIC); in cas_reset_task()
3996 cas_reset(cp, !(pending_all > 0)); in cas_reset_task()
3997 if (cp->opened) in cas_reset_task()
3998 cas_clean_rings(cp); in cas_reset_task()
3999 cas_init_hw(cp, (pending_all > 0)); in cas_reset_task()
4001 cas_reset(cp, !(pending == CAS_RESET_ALL)); in cas_reset_task()
4002 if (cp->opened) in cas_reset_task()
4003 cas_clean_rings(cp); in cas_reset_task()
4004 cas_init_hw(cp, pending == CAS_RESET_ALL); in cas_reset_task()
4008 cas_unlock_all_restore(cp, flags); in cas_reset_task()
4009 netif_device_attach(cp->dev); in cas_reset_task()
4012 atomic_sub(pending_all, &cp->reset_task_pending_all); in cas_reset_task()
4013 atomic_sub(pending_spare, &cp->reset_task_pending_spare); in cas_reset_task()
4014 atomic_sub(pending_mtu, &cp->reset_task_pending_mtu); in cas_reset_task()
4015 atomic_dec(&cp->reset_task_pending); in cas_reset_task()
4017 atomic_set(&cp->reset_task_pending, 0); in cas_reset_task()
4023 struct cas *cp = from_timer(cp, t, link_timer); in cas_link_timer() local
4028 cp->link_transition_jiffies_valid && in cas_link_timer()
4029 time_is_before_jiffies(cp->link_transition_jiffies + in cas_link_timer()
4035 cp->link_transition_jiffies_valid = 0; in cas_link_timer()
4038 if (!cp->hw_running) in cas_link_timer()
4041 spin_lock_irqsave(&cp->lock, flags); in cas_link_timer()
4042 cas_lock_tx(cp); in cas_link_timer()
4043 cas_entropy_gather(cp); in cas_link_timer()
4049 if (atomic_read(&cp->reset_task_pending_all) || in cas_link_timer()
4050 atomic_read(&cp->reset_task_pending_spare) || in cas_link_timer()
4051 atomic_read(&cp->reset_task_pending_mtu)) in cas_link_timer()
4054 if (atomic_read(&cp->reset_task_pending)) in cas_link_timer()
4059 if ((mask = (cp->cas_flags & CAS_FLAG_RXD_POST_MASK))) { in cas_link_timer()
4068 if (cas_post_rxds_ringN(cp, i, cp->rx_last[i]) < 0) { in cas_link_timer()
4072 cp->cas_flags &= ~rmask; in cas_link_timer()
4076 if (CAS_PHY_MII(cp->phy_type)) { in cas_link_timer()
4078 cas_mif_poll(cp, 0); in cas_link_timer()
4079 bmsr = cas_phy_read(cp, MII_BMSR); in cas_link_timer()
4085 bmsr = cas_phy_read(cp, MII_BMSR); in cas_link_timer()
4086 cas_mif_poll(cp, 1); in cas_link_timer()
4087 readl(cp->regs + REG_MIF_STATUS); /* avoid dups */ in cas_link_timer()
4088 reset = cas_mii_link_check(cp, bmsr); in cas_link_timer()
4090 reset = cas_pcs_link_check(cp); in cas_link_timer()
4097 if ((readl(cp->regs + REG_MAC_TX_STATUS) & MAC_TX_FRAME_XMIT) == 0) { in cas_link_timer()
4098 u32 val = readl(cp->regs + REG_MAC_STATE_MACHINE); in cas_link_timer()
4104 netif_printk(cp, tx_err, KERN_DEBUG, cp->dev, in cas_link_timer()
4110 val = readl(cp->regs + REG_TX_FIFO_PKT_CNT); in cas_link_timer()
4111 wptr = readl(cp->regs + REG_TX_FIFO_WRITE_PTR); in cas_link_timer()
4112 rptr = readl(cp->regs + REG_TX_FIFO_READ_PTR); in cas_link_timer()
4114 netif_printk(cp, tx_err, KERN_DEBUG, cp->dev, in cas_link_timer()
4121 cas_hard_reset(cp); in cas_link_timer()
4127 atomic_inc(&cp->reset_task_pending); in cas_link_timer()
4128 atomic_inc(&cp->reset_task_pending_all); in cas_link_timer()
4129 schedule_work(&cp->reset_task); in cas_link_timer()
4131 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL); in cas_link_timer()
4133 schedule_work(&cp->reset_task); in cas_link_timer()
4138 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT); in cas_link_timer()
4139 cas_unlock_tx(cp); in cas_link_timer()
4140 spin_unlock_irqrestore(&cp->lock, flags); in cas_link_timer()
4146 static void cas_tx_tiny_free(struct cas *cp) in cas_tx_tiny_free() argument
4148 struct pci_dev *pdev = cp->pdev; in cas_tx_tiny_free()
4152 if (!cp->tx_tiny_bufs[i]) in cas_tx_tiny_free()
4156 cp->tx_tiny_bufs[i], cp->tx_tiny_dvma[i]); in cas_tx_tiny_free()
4157 cp->tx_tiny_bufs[i] = NULL; in cas_tx_tiny_free()
4161 static int cas_tx_tiny_alloc(struct cas *cp) in cas_tx_tiny_alloc() argument
4163 struct pci_dev *pdev = cp->pdev; in cas_tx_tiny_alloc()
4167 cp->tx_tiny_bufs[i] = in cas_tx_tiny_alloc()
4169 &cp->tx_tiny_dvma[i], GFP_KERNEL); in cas_tx_tiny_alloc()
4170 if (!cp->tx_tiny_bufs[i]) { in cas_tx_tiny_alloc()
4171 cas_tx_tiny_free(cp); in cas_tx_tiny_alloc()
4181 struct cas *cp = netdev_priv(dev); in cas_open() local
4185 mutex_lock(&cp->pm_mutex); in cas_open()
4187 hw_was_up = cp->hw_running; in cas_open()
4190 * etc. state so it is safe to do this bit without cp->lock in cas_open()
4192 if (!cp->hw_running) { in cas_open()
4194 cas_lock_all_save(cp, flags); in cas_open()
4200 cas_reset(cp, 0); in cas_open()
4201 cp->hw_running = 1; in cas_open()
4202 cas_unlock_all_restore(cp, flags); in cas_open()
4206 if (cas_tx_tiny_alloc(cp) < 0) in cas_open()
4210 if (cas_alloc_rxds(cp) < 0) in cas_open()
4214 cas_spare_init(cp); in cas_open()
4215 cas_spare_recover(cp, GFP_KERNEL); in cas_open()
4222 if (request_irq(cp->pdev->irq, cas_interrupt, in cas_open()
4224 netdev_err(cp->dev, "failed to request irq !\n"); in cas_open()
4230 napi_enable(&cp->napi); in cas_open()
4233 cas_lock_all_save(cp, flags); in cas_open()
4234 cas_clean_rings(cp); in cas_open()
4235 cas_init_hw(cp, !hw_was_up); in cas_open()
4236 cp->opened = 1; in cas_open()
4237 cas_unlock_all_restore(cp, flags); in cas_open()
4240 mutex_unlock(&cp->pm_mutex); in cas_open()
4244 cas_spare_free(cp); in cas_open()
4245 cas_free_rxds(cp); in cas_open()
4247 cas_tx_tiny_free(cp); in cas_open()
4249 mutex_unlock(&cp->pm_mutex); in cas_open()
4256 struct cas *cp = netdev_priv(dev); in cas_close() local
4259 napi_disable(&cp->napi); in cas_close()
4262 mutex_lock(&cp->pm_mutex); in cas_close()
4267 cas_lock_all_save(cp, flags); in cas_close()
4268 cp->opened = 0; in cas_close()
4269 cas_reset(cp, 0); in cas_close()
4270 cas_phy_init(cp); in cas_close()
4271 cas_begin_auto_negotiation(cp, NULL); in cas_close()
4272 cas_clean_rings(cp); in cas_close()
4273 cas_unlock_all_restore(cp, flags); in cas_close()
4275 free_irq(cp->pdev->irq, (void *) dev); in cas_close()
4276 cas_spare_free(cp); in cas_close()
4277 cas_free_rxds(cp); in cas_close()
4278 cas_tx_tiny_free(cp); in cas_close()
4279 mutex_unlock(&cp->pm_mutex); in cas_close()
4330 static void cas_read_regs(struct cas *cp, u8 *ptr, int len) in cas_read_regs() argument
4336 spin_lock_irqsave(&cp->lock, flags); in cas_read_regs()
4341 hval = cas_phy_read(cp, in cas_read_regs()
4345 val= readl(cp->regs+ethtool_register_table[i].offsets); in cas_read_regs()
4349 spin_unlock_irqrestore(&cp->lock, flags); in cas_read_regs()
4354 struct cas *cp = netdev_priv(dev); in cas_get_stats() local
4355 struct net_device_stats *stats = cp->net_stats; in cas_get_stats()
4361 if (!cp->hw_running) in cas_get_stats()
4372 spin_lock_irqsave(&cp->stat_lock[N_TX_RINGS], flags); in cas_get_stats()
4374 readl(cp->regs + REG_MAC_FCS_ERR) & 0xffff; in cas_get_stats()
4376 readl(cp->regs + REG_MAC_ALIGN_ERR) &0xffff; in cas_get_stats()
4378 readl(cp->regs + REG_MAC_LEN_ERR) & 0xffff; in cas_get_stats()
4380 tmp = (readl(cp->regs + REG_MAC_COLL_EXCESS) & 0xffff) + in cas_get_stats()
4381 (readl(cp->regs + REG_MAC_COLL_LATE) & 0xffff); in cas_get_stats()
4384 tmp + (readl(cp->regs + REG_MAC_COLL_NORMAL) & 0xffff); in cas_get_stats()
4387 readl(cp->regs + REG_MAC_COLL_EXCESS); in cas_get_stats()
4388 stats[N_TX_RINGS].collisions += readl(cp->regs + REG_MAC_COLL_EXCESS) + in cas_get_stats()
4389 readl(cp->regs + REG_MAC_COLL_LATE); in cas_get_stats()
4391 cas_clear_mac_err(cp); in cas_get_stats()
4394 spin_lock(&cp->stat_lock[0]); in cas_get_stats()
4401 spin_unlock(&cp->stat_lock[0]); in cas_get_stats()
4404 spin_lock(&cp->stat_lock[i]); in cas_get_stats()
4417 spin_unlock(&cp->stat_lock[i]); in cas_get_stats()
4419 spin_unlock_irqrestore(&cp->stat_lock[N_TX_RINGS], flags); in cas_get_stats()
4426 struct cas *cp = netdev_priv(dev); in cas_set_multicast() local
4431 if (!cp->hw_running) in cas_set_multicast()
4434 spin_lock_irqsave(&cp->lock, flags); in cas_set_multicast()
4435 rxcfg = readl(cp->regs + REG_MAC_RX_CFG); in cas_set_multicast()
4438 writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG); in cas_set_multicast()
4439 while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN) { in cas_set_multicast()
4448 writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG); in cas_set_multicast()
4449 while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_HASH_FILTER_EN) { in cas_set_multicast()
4456 cp->mac_rx_cfg = rxcfg_new = cas_setup_multicast(cp); in cas_set_multicast()
4458 writel(rxcfg, cp->regs + REG_MAC_RX_CFG); in cas_set_multicast()
4459 spin_unlock_irqrestore(&cp->lock, flags); in cas_set_multicast()
4464 struct cas *cp = netdev_priv(dev); in cas_get_drvinfo() local
4467 strscpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info)); in cas_get_drvinfo()
4473 struct cas *cp = netdev_priv(dev); in cas_get_link_ksettings() local
4482 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) { in cas_get_link_ksettings()
4488 spin_lock_irqsave(&cp->lock, flags); in cas_get_link_ksettings()
4490 linkstate = cp->lstate; in cas_get_link_ksettings()
4491 if (CAS_PHY_MII(cp->phy_type)) { in cas_get_link_ksettings()
4493 cmd->base.phy_address = cp->phy_addr; in cas_get_link_ksettings()
4507 if (cp->hw_running) { in cas_get_link_ksettings()
4508 cas_mif_poll(cp, 0); in cas_get_link_ksettings()
4509 bmcr = cas_phy_read(cp, MII_BMCR); in cas_get_link_ksettings()
4510 cas_read_mii_link_mode(cp, &full_duplex, in cas_get_link_ksettings()
4512 cas_mif_poll(cp, 1); in cas_get_link_ksettings()
4521 if (cp->hw_running) { in cas_get_link_ksettings()
4523 bmcr = readl(cp->regs + REG_PCS_MII_CTRL); in cas_get_link_ksettings()
4524 cas_read_pcs_link_mode(cp, &full_duplex, in cas_get_link_ksettings()
4528 spin_unlock_irqrestore(&cp->lock, flags); in cas_get_link_ksettings()
4558 if (cp->link_cntl & BMCR_ANENABLE) { in cas_get_link_ksettings()
4563 if (cp->link_cntl & BMCR_SPEED100) { in cas_get_link_ksettings()
4565 } else if (cp->link_cntl & CAS_BMCR_SPEED1000) { in cas_get_link_ksettings()
4568 cmd->base.duplex = (cp->link_cntl & BMCR_FULLDPLX) ? in cas_get_link_ksettings()
4584 struct cas *cp = netdev_priv(dev); in cas_set_link_ksettings() local
4602 spin_lock_irqsave(&cp->lock, flags); in cas_set_link_ksettings()
4603 cas_begin_auto_negotiation(cp, cmd); in cas_set_link_ksettings()
4604 spin_unlock_irqrestore(&cp->lock, flags); in cas_set_link_ksettings()
4610 struct cas *cp = netdev_priv(dev); in cas_nway_reset() local
4613 if ((cp->link_cntl & BMCR_ANENABLE) == 0) in cas_nway_reset()
4617 spin_lock_irqsave(&cp->lock, flags); in cas_nway_reset()
4618 cas_begin_auto_negotiation(cp, NULL); in cas_nway_reset()
4619 spin_unlock_irqrestore(&cp->lock, flags); in cas_nway_reset()
4626 struct cas *cp = netdev_priv(dev); in cas_get_link() local
4627 return cp->lstate == link_up; in cas_get_link()
4632 struct cas *cp = netdev_priv(dev); in cas_get_msglevel() local
4633 return cp->msg_enable; in cas_get_msglevel()
4638 struct cas *cp = netdev_priv(dev); in cas_set_msglevel() local
4639 cp->msg_enable = value; in cas_set_msglevel()
4644 struct cas *cp = netdev_priv(dev); in cas_get_regs_len() local
4645 return min_t(int, cp->casreg_len, CAS_MAX_REGS); in cas_get_regs_len()
4651 struct cas *cp = netdev_priv(dev); in cas_get_regs() local
4653 /* cas_read_regs handles locks (cp->lock). */ in cas_get_regs()
4654 cas_read_regs(cp, p, regs->len / sizeof(u32)); in cas_get_regs()
4676 struct cas *cp = netdev_priv(dev); in cas_get_ethtool_stats() local
4677 struct net_device_stats *stats = cas_get_stats(cp->dev); in cas_get_ethtool_stats()
4715 struct cas *cp = netdev_priv(dev); in cas_ioctl() local
4723 mutex_lock(&cp->pm_mutex); in cas_ioctl()
4726 data->phy_id = cp->phy_addr; in cas_ioctl()
4730 spin_lock_irqsave(&cp->lock, flags); in cas_ioctl()
4731 cas_mif_poll(cp, 0); in cas_ioctl()
4732 data->val_out = cas_phy_read(cp, data->reg_num & 0x1f); in cas_ioctl()
4733 cas_mif_poll(cp, 1); in cas_ioctl()
4734 spin_unlock_irqrestore(&cp->lock, flags); in cas_ioctl()
4739 spin_lock_irqsave(&cp->lock, flags); in cas_ioctl()
4740 cas_mif_poll(cp, 0); in cas_ioctl()
4741 rc = cas_phy_write(cp, data->reg_num & 0x1f, data->val_in); in cas_ioctl()
4742 cas_mif_poll(cp, 1); in cas_ioctl()
4743 spin_unlock_irqrestore(&cp->lock, flags); in cas_ioctl()
4749 mutex_unlock(&cp->pm_mutex); in cas_ioctl()
4858 struct cas *cp; in cas_init_one() local
4879 dev = alloc_etherdev(sizeof(*cp)); in cas_init_one()
4939 cp = netdev_priv(dev); in cas_init_one()
4940 cp->pdev = pdev; in cas_init_one()
4943 cp->orig_cacheline_size = cas_cacheline_size ? orig_cacheline_size: 0; in cas_init_one()
4945 cp->dev = dev; in cas_init_one()
4946 cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE : in cas_init_one()
4950 cp->of_node = pci_device_to_OF_node(pdev); in cas_init_one()
4953 cp->link_transition = LINK_TRANSITION_UNKNOWN; in cas_init_one()
4954 cp->link_transition_jiffies_valid = 0; in cas_init_one()
4956 spin_lock_init(&cp->lock); in cas_init_one()
4957 spin_lock_init(&cp->rx_inuse_lock); in cas_init_one()
4958 spin_lock_init(&cp->rx_spare_lock); in cas_init_one()
4960 spin_lock_init(&cp->stat_lock[i]); in cas_init_one()
4961 spin_lock_init(&cp->tx_lock[i]); in cas_init_one()
4963 spin_lock_init(&cp->stat_lock[N_TX_RINGS]); in cas_init_one()
4964 mutex_init(&cp->pm_mutex); in cas_init_one()
4966 timer_setup(&cp->link_timer, cas_link_timer, 0); in cas_init_one()
4972 atomic_set(&cp->reset_task_pending, 0); in cas_init_one()
4973 atomic_set(&cp->reset_task_pending_all, 0); in cas_init_one()
4974 atomic_set(&cp->reset_task_pending_spare, 0); in cas_init_one()
4975 atomic_set(&cp->reset_task_pending_mtu, 0); in cas_init_one()
4977 INIT_WORK(&cp->reset_task, cas_reset_task); in cas_init_one()
4981 cp->link_cntl = link_modes[link_mode]; in cas_init_one()
4983 cp->link_cntl = BMCR_ANENABLE; in cas_init_one()
4984 cp->lstate = link_down; in cas_init_one()
4985 cp->link_transition = LINK_TRANSITION_LINK_DOWN; in cas_init_one()
4986 netif_carrier_off(cp->dev); in cas_init_one()
4987 cp->timer_ticks = 0; in cas_init_one()
4990 cp->regs = pci_iomap(pdev, 0, casreg_len); in cas_init_one()
4991 if (!cp->regs) { in cas_init_one()
4995 cp->casreg_len = casreg_len; in cas_init_one()
4998 cas_check_pci_invariants(cp); in cas_init_one()
4999 cas_hard_reset(cp); in cas_init_one()
5000 cas_reset(cp, 0); in cas_init_one()
5001 if (cas_check_invariants(cp)) in cas_init_one()
5003 if (cp->cas_flags & CAS_FLAG_SATURN) in cas_init_one()
5004 cas_saturn_firmware_init(cp); in cas_init_one()
5006 cp->init_block = in cas_init_one()
5008 &cp->block_dvma, GFP_KERNEL); in cas_init_one()
5009 if (!cp->init_block) { in cas_init_one()
5015 cp->init_txds[i] = cp->init_block->txds[i]; in cas_init_one()
5018 cp->init_rxds[i] = cp->init_block->rxds[i]; in cas_init_one()
5021 cp->init_rxcs[i] = cp->init_block->rxcs[i]; in cas_init_one()
5024 skb_queue_head_init(&cp->rx_flows[i]); in cas_init_one()
5031 netif_napi_add(dev, &cp->napi, cas_poll); in cas_init_one()
5037 if ((cp->cas_flags & CAS_FLAG_NO_HW_CSUM) == 0) in cas_init_one()
5051 i = readl(cp->regs + REG_BIM_CFG); in cas_init_one()
5053 (cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "", in cas_init_one()
5056 (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq, in cas_init_one()
5060 cp->hw_running = 1; in cas_init_one()
5061 cas_entropy_reset(cp); in cas_init_one()
5062 cas_phy_init(cp); in cas_init_one()
5063 cas_begin_auto_negotiation(cp, NULL); in cas_init_one()
5068 cp->init_block, cp->block_dvma); in cas_init_one()
5071 mutex_lock(&cp->pm_mutex); in cas_init_one()
5072 if (cp->hw_running) in cas_init_one()
5073 cas_shutdown(cp); in cas_init_one()
5074 mutex_unlock(&cp->pm_mutex); in cas_init_one()
5076 vfree(cp->fw_data); in cas_init_one()
5078 pci_iounmap(pdev, cp->regs); in cas_init_one()
5100 struct cas *cp; in cas_remove_one() local
5104 cp = netdev_priv(dev); in cas_remove_one()
5107 vfree(cp->fw_data); in cas_remove_one()
5109 mutex_lock(&cp->pm_mutex); in cas_remove_one()
5110 cancel_work_sync(&cp->reset_task); in cas_remove_one()
5111 if (cp->hw_running) in cas_remove_one()
5112 cas_shutdown(cp); in cas_remove_one()
5113 mutex_unlock(&cp->pm_mutex); in cas_remove_one()
5116 if (cp->orig_cacheline_size) { in cas_remove_one()
5121 cp->orig_cacheline_size); in cas_remove_one()
5125 cp->init_block, cp->block_dvma); in cas_remove_one()
5126 pci_iounmap(pdev, cp->regs); in cas_remove_one()
5135 struct cas *cp = netdev_priv(dev); in cas_suspend() local
5138 mutex_lock(&cp->pm_mutex); in cas_suspend()
5141 if (cp->opened) { in cas_suspend()
5144 cas_lock_all_save(cp, flags); in cas_suspend()
5151 cas_reset(cp, 0); in cas_suspend()
5152 cas_clean_rings(cp); in cas_suspend()
5153 cas_unlock_all_restore(cp, flags); in cas_suspend()
5156 if (cp->hw_running) in cas_suspend()
5157 cas_shutdown(cp); in cas_suspend()
5158 mutex_unlock(&cp->pm_mutex); in cas_suspend()
5166 struct cas *cp = netdev_priv(dev); in cas_resume() local
5170 mutex_lock(&cp->pm_mutex); in cas_resume()
5171 cas_hard_reset(cp); in cas_resume()
5172 if (cp->opened) { in cas_resume()
5174 cas_lock_all_save(cp, flags); in cas_resume()
5175 cas_reset(cp, 0); in cas_resume()
5176 cp->hw_running = 1; in cas_resume()
5177 cas_clean_rings(cp); in cas_resume()
5178 cas_init_hw(cp, 1); in cas_resume()
5179 cas_unlock_all_restore(cp, flags); in cas_resume()
5183 mutex_unlock(&cp->pm_mutex); in cas_resume()