Lines Matching +full:eth +full:- +full:ck
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 1999 - 2018 Intel Corporation. */
34 static int debug = -1;
138 id->device_id; id++) in e1000e_check_me()
139 if (device_id == id->device_id) in e1000e_check_me()
146 * __ew32_prepare - prepare to write to MAC CSR register on certain parts
161 while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i) in __ew32_prepare()
167 if (hw->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) in __ew32()
170 writel(val, hw->hw_addr + reg); in __ew32()
174 * e1000_regdump - register printout routine
184 switch (reginfo->ofs) { in e1000_regdump()
198 pr_info("%-15s %08x\n", in e1000_regdump()
199 reginfo->name, __er32(hw, reginfo->ofs)); in e1000_regdump()
203 snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]"); in e1000_regdump()
204 pr_info("%-15s %08x %08x\n", rname, regs[0], regs[1]); in e1000_regdump()
213 for (i = 0; i < adapter->rx_ps_pages; i++) { in e1000e_dump_ps_pages()
214 ps_page = &bi->ps_pages[i]; in e1000e_dump_ps_pages()
216 if (ps_page->page) { in e1000e_dump_ps_pages()
219 16, 1, page_address(ps_page->page), in e1000e_dump_ps_pages()
226 * e1000e_dump - Print registers, Tx-ring and Rx-ring
231 struct net_device *netdev = adapter->netdev; in e1000e_dump()
232 struct e1000_hw *hw = &adapter->hw; in e1000e_dump()
234 struct e1000_ring *tx_ring = adapter->tx_ring; in e1000e_dump()
241 struct e1000_ring *rx_ring = adapter->rx_ring; in e1000e_dump()
258 dev_info(&adapter->pdev->dev, "Net device Info\n"); in e1000e_dump()
260 pr_info("%-15s %016lX %016lX\n", netdev->name, in e1000e_dump()
261 netdev->state, dev_trans_start(netdev)); in e1000e_dump()
265 dev_info(&adapter->pdev->dev, "Register Dump\n"); in e1000e_dump()
268 reginfo->name; reginfo++) { in e1000e_dump()
276 dev_info(&adapter->pdev->dev, "Tx Ring Summary\n"); in e1000e_dump()
277 pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n"); in e1000e_dump()
278 buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean]; in e1000e_dump()
280 0, tx_ring->next_to_use, tx_ring->next_to_clean, in e1000e_dump()
281 (unsigned long long)buffer_info->dma, in e1000e_dump()
282 buffer_info->length, in e1000e_dump()
283 buffer_info->next_to_watch, in e1000e_dump()
284 (unsigned long long)buffer_info->time_stamp); in e1000e_dump()
290 dev_info(&adapter->pdev->dev, "Tx Ring Dump\n"); in e1000e_dump()
292 /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended) in e1000e_dump()
295 * +--------------------------------------------------------------+ in e1000e_dump()
297 * +--------------------------------------------------------------+ in e1000e_dump()
299 * +--------------------------------------------------------------+ in e1000e_dump()
304 * +----------------------------------------------------------------+ in e1000e_dump()
306 * +----------------------------------------------------------------+ in e1000e_dump()
308 * +----------------------------------------------------------------+ in e1000e_dump()
312 * +----------------------------------------------------------------+ in e1000e_dump()
314 * +----------------------------------------------------------------+ in e1000e_dump()
316 * +----------------------------------------------------------------+ in e1000e_dump()
319 …Tl[desc] [address 63:0 ] [SpeCssSCmCsLen] [bi->dma ] leng ntw timestamp bi->skb… in e1000e_dump()
320 …Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestamp bi->skb… in e1000e_dump()
321 …Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestamp bi->skb… in e1000e_dump()
322 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { in e1000e_dump()
325 buffer_info = &tx_ring->buffer_info[i]; in e1000e_dump()
327 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean) in e1000e_dump()
329 else if (i == tx_ring->next_to_use) in e1000e_dump()
331 else if (i == tx_ring->next_to_clean) in e1000e_dump()
336 (!(le64_to_cpu(u0->b) & BIT(29)) ? 'l' : in e1000e_dump()
337 ((le64_to_cpu(u0->b) & BIT(20)) ? 'd' : 'c')), in e1000e_dump()
339 (unsigned long long)le64_to_cpu(u0->a), in e1000e_dump()
340 (unsigned long long)le64_to_cpu(u0->b), in e1000e_dump()
341 (unsigned long long)buffer_info->dma, in e1000e_dump()
342 buffer_info->length, buffer_info->next_to_watch, in e1000e_dump()
343 (unsigned long long)buffer_info->time_stamp, in e1000e_dump()
344 buffer_info->skb, next_desc); in e1000e_dump()
346 if (netif_msg_pktdata(adapter) && buffer_info->skb) in e1000e_dump()
348 16, 1, buffer_info->skb->data, in e1000e_dump()
349 buffer_info->skb->len, true); in e1000e_dump()
354 dev_info(&adapter->pdev->dev, "Rx Ring Summary\n"); in e1000e_dump()
357 0, rx_ring->next_to_use, rx_ring->next_to_clean); in e1000e_dump()
363 dev_info(&adapter->pdev->dev, "Rx Ring Dump\n"); in e1000e_dump()
364 switch (adapter->rx_ps_pages) { in e1000e_dump()
370 * +-----------------------------------------------------+ in e1000e_dump()
372 * +-----------------------------------------------------+ in e1000e_dump()
374 * +-----------------------------------------------------+ in e1000e_dump()
376 * +-----------------------------------------------------+ in e1000e_dump()
378 * +-----------------------------------------------------+ in e1000e_dump()
380 …0 63:0 ] [buffer 1 63:0 ] [buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] [bi->skb] <-- Ext Pkt… in e1000e_dump()
381 /* [Extended] Receive Descriptor (Write-Back) Format in e1000e_dump()
384 * +------------------------------------------------------+ in e1000e_dump()
387 * +------------------------------------------------------+ in e1000e_dump()
389 * +------------------------------------------------------+ in e1000e_dump()
392 …nfo("RWB[desc] [ck ipid mrqhsh] [vl l0 ee es] [ l3 l2 l1 hs] [reserved ] ----------… in e1000e_dump()
393 for (i = 0; i < rx_ring->count; i++) { in e1000e_dump()
395 buffer_info = &rx_ring->buffer_info[i]; in e1000e_dump()
399 le32_to_cpu(rx_desc_ps->wb.middle.status_error); in e1000e_dump()
401 if (i == rx_ring->next_to_use) in e1000e_dump()
403 else if (i == rx_ring->next_to_clean) in e1000e_dump()
410 pr_info("%s[0x%03X] %016llX %016llX %016llX %016llX ---------------- %p%s\n", in e1000e_dump()
412 (unsigned long long)le64_to_cpu(u1->a), in e1000e_dump()
413 (unsigned long long)le64_to_cpu(u1->b), in e1000e_dump()
414 (unsigned long long)le64_to_cpu(u1->c), in e1000e_dump()
415 (unsigned long long)le64_to_cpu(u1->d), in e1000e_dump()
416 buffer_info->skb, next_desc); in e1000e_dump()
420 (unsigned long long)le64_to_cpu(u1->a), in e1000e_dump()
421 (unsigned long long)le64_to_cpu(u1->b), in e1000e_dump()
422 (unsigned long long)le64_to_cpu(u1->c), in e1000e_dump()
423 (unsigned long long)le64_to_cpu(u1->d), in e1000e_dump()
424 (unsigned long long)buffer_info->dma, in e1000e_dump()
425 buffer_info->skb, next_desc); in e1000e_dump()
437 * +-----------------------------------------------------+ in e1000e_dump()
439 * +-----------------------------------------------------+ in e1000e_dump()
441 * +-----------------------------------------------------+ in e1000e_dump()
443 …pr_info("R [desc] [buf addr 63:0 ] [reserved 63:0 ] [bi->dma ] [bi->skb] <-- Ext (Read… in e1000e_dump()
444 /* Extended Receive Descriptor (Write-Back) Format in e1000e_dump()
447 * +------------------------------------------------------+ in e1000e_dump()
449 * 0 +-------------------+ Rsvd | Reserved | MRQ RSS | in e1000e_dump()
452 * +------------------------------------------------------+ in e1000e_dump()
454 * +------------------------------------------------------+ in e1000e_dump()
457 …pr_info("RWB[desc] [cs ipid mrq] [vt ln xe xs] [bi->skb] <-- Ext (Write-Back) format\n"… in e1000e_dump()
459 for (i = 0; i < rx_ring->count; i++) { in e1000e_dump()
462 buffer_info = &rx_ring->buffer_info[i]; in e1000e_dump()
465 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); in e1000e_dump()
467 if (i == rx_ring->next_to_use) in e1000e_dump()
469 else if (i == rx_ring->next_to_clean) in e1000e_dump()
476 pr_info("%s[0x%03X] %016llX %016llX ---------------- %p%s\n", in e1000e_dump()
478 (unsigned long long)le64_to_cpu(u1->a), in e1000e_dump()
479 (unsigned long long)le64_to_cpu(u1->b), in e1000e_dump()
480 buffer_info->skb, next_desc); in e1000e_dump()
484 (unsigned long long)le64_to_cpu(u1->a), in e1000e_dump()
485 (unsigned long long)le64_to_cpu(u1->b), in e1000e_dump()
486 (unsigned long long)buffer_info->dma, in e1000e_dump()
487 buffer_info->skb, next_desc); in e1000e_dump()
490 buffer_info->skb) in e1000e_dump()
494 buffer_info->skb->data, in e1000e_dump()
495 adapter->rx_buffer_len, in e1000e_dump()
503 * e1000_desc_unused - calculate if we have unused descriptors
508 if (ring->next_to_clean > ring->next_to_use) in e1000_desc_unused()
509 return ring->next_to_clean - ring->next_to_use - 1; in e1000_desc_unused()
511 return ring->count + ring->next_to_clean - ring->next_to_use - 1; in e1000_desc_unused()
515 * e1000e_systim_to_hwtstamp - convert system time value to hw time stamp
535 spin_lock_irqsave(&adapter->systim_lock, flags); in e1000e_systim_to_hwtstamp()
536 ns = timecounter_cyc2time(&adapter->tc, systim); in e1000e_systim_to_hwtstamp()
537 spin_unlock_irqrestore(&adapter->systim_lock, flags); in e1000e_systim_to_hwtstamp()
540 hwtstamps->hwtstamp = ns_to_ktime(ns); in e1000e_systim_to_hwtstamp()
544 * e1000e_rx_hwtstamp - utility function which checks for Rx time stamp
556 struct e1000_hw *hw = &adapter->hw; in e1000e_rx_hwtstamp()
559 if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP) || in e1000e_rx_hwtstamp()
575 adapter->flags2 &= ~FLAG2_CHECK_RX_HWTSTAMP; in e1000e_rx_hwtstamp()
579 * e1000_receive_skb - helper function to handle Rx indications
594 skb->protocol = eth_type_trans(skb, netdev); in e1000_receive_skb()
599 napi_gro_receive(&adapter->napi, skb); in e1000_receive_skb()
603 * e1000_rx_checksum - Receive Checksum Offload
617 if (!(adapter->netdev->features & NETIF_F_RXCSUM)) in e1000_rx_checksum()
627 adapter->hw_csum_err++; in e1000_rx_checksum()
636 skb->ip_summed = CHECKSUM_UNNECESSARY; in e1000_rx_checksum()
637 adapter->hw_csum_good++; in e1000_rx_checksum()
642 struct e1000_adapter *adapter = rx_ring->adapter; in e1000e_update_rdt_wa()
643 struct e1000_hw *hw = &adapter->hw; in e1000e_update_rdt_wa()
646 writel(i, rx_ring->tail); in e1000e_update_rdt_wa()
648 if (unlikely(i != readl(rx_ring->tail))) { in e1000e_update_rdt_wa()
652 e_err("ME firmware caused invalid RDT - resetting\n"); in e1000e_update_rdt_wa()
653 schedule_work(&adapter->reset_task); in e1000e_update_rdt_wa()
659 struct e1000_adapter *adapter = tx_ring->adapter; in e1000e_update_tdt_wa()
660 struct e1000_hw *hw = &adapter->hw; in e1000e_update_tdt_wa()
663 writel(i, tx_ring->tail); in e1000e_update_tdt_wa()
665 if (unlikely(i != readl(tx_ring->tail))) { in e1000e_update_tdt_wa()
669 e_err("ME firmware caused invalid TDT - resetting\n"); in e1000e_update_tdt_wa()
670 schedule_work(&adapter->reset_task); in e1000e_update_tdt_wa()
675 * e1000_alloc_rx_buffers - Replace used receive buffers
683 struct e1000_adapter *adapter = rx_ring->adapter; in e1000_alloc_rx_buffers()
684 struct net_device *netdev = adapter->netdev; in e1000_alloc_rx_buffers()
685 struct pci_dev *pdev = adapter->pdev; in e1000_alloc_rx_buffers()
690 unsigned int bufsz = adapter->rx_buffer_len; in e1000_alloc_rx_buffers()
692 i = rx_ring->next_to_use; in e1000_alloc_rx_buffers()
693 buffer_info = &rx_ring->buffer_info[i]; in e1000_alloc_rx_buffers()
695 while (cleaned_count--) { in e1000_alloc_rx_buffers()
696 skb = buffer_info->skb; in e1000_alloc_rx_buffers()
705 adapter->alloc_rx_buff_failed++; in e1000_alloc_rx_buffers()
709 buffer_info->skb = skb; in e1000_alloc_rx_buffers()
711 buffer_info->dma = dma_map_single(&pdev->dev, skb->data, in e1000_alloc_rx_buffers()
712 adapter->rx_buffer_len, in e1000_alloc_rx_buffers()
714 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { in e1000_alloc_rx_buffers()
715 dev_err(&pdev->dev, "Rx DMA map failed\n"); in e1000_alloc_rx_buffers()
716 adapter->rx_dma_failed++; in e1000_alloc_rx_buffers()
721 rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); in e1000_alloc_rx_buffers()
723 if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) { in e1000_alloc_rx_buffers()
726 * applicable for weak-ordered memory model archs, in e1000_alloc_rx_buffers()
727 * such as IA-64). in e1000_alloc_rx_buffers()
730 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) in e1000_alloc_rx_buffers()
733 writel(i, rx_ring->tail); in e1000_alloc_rx_buffers()
736 if (i == rx_ring->count) in e1000_alloc_rx_buffers()
738 buffer_info = &rx_ring->buffer_info[i]; in e1000_alloc_rx_buffers()
741 rx_ring->next_to_use = i; in e1000_alloc_rx_buffers()
745 * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
753 struct e1000_adapter *adapter = rx_ring->adapter; in e1000_alloc_rx_buffers_ps()
754 struct net_device *netdev = adapter->netdev; in e1000_alloc_rx_buffers_ps()
755 struct pci_dev *pdev = adapter->pdev; in e1000_alloc_rx_buffers_ps()
762 i = rx_ring->next_to_use; in e1000_alloc_rx_buffers_ps()
763 buffer_info = &rx_ring->buffer_info[i]; in e1000_alloc_rx_buffers_ps()
765 while (cleaned_count--) { in e1000_alloc_rx_buffers_ps()
769 ps_page = &buffer_info->ps_pages[j]; in e1000_alloc_rx_buffers_ps()
770 if (j >= adapter->rx_ps_pages) { in e1000_alloc_rx_buffers_ps()
772 rx_desc->read.buffer_addr[j + 1] = in e1000_alloc_rx_buffers_ps()
776 if (!ps_page->page) { in e1000_alloc_rx_buffers_ps()
777 ps_page->page = alloc_page(gfp); in e1000_alloc_rx_buffers_ps()
778 if (!ps_page->page) { in e1000_alloc_rx_buffers_ps()
779 adapter->alloc_rx_buff_failed++; in e1000_alloc_rx_buffers_ps()
782 ps_page->dma = dma_map_page(&pdev->dev, in e1000_alloc_rx_buffers_ps()
783 ps_page->page, in e1000_alloc_rx_buffers_ps()
786 if (dma_mapping_error(&pdev->dev, in e1000_alloc_rx_buffers_ps()
787 ps_page->dma)) { in e1000_alloc_rx_buffers_ps()
788 dev_err(&adapter->pdev->dev, in e1000_alloc_rx_buffers_ps()
790 adapter->rx_dma_failed++; in e1000_alloc_rx_buffers_ps()
795 * didn't change because each write-back in e1000_alloc_rx_buffers_ps()
798 rx_desc->read.buffer_addr[j + 1] = in e1000_alloc_rx_buffers_ps()
799 cpu_to_le64(ps_page->dma); in e1000_alloc_rx_buffers_ps()
802 skb = __netdev_alloc_skb_ip_align(netdev, adapter->rx_ps_bsize0, in e1000_alloc_rx_buffers_ps()
806 adapter->alloc_rx_buff_failed++; in e1000_alloc_rx_buffers_ps()
810 buffer_info->skb = skb; in e1000_alloc_rx_buffers_ps()
811 buffer_info->dma = dma_map_single(&pdev->dev, skb->data, in e1000_alloc_rx_buffers_ps()
812 adapter->rx_ps_bsize0, in e1000_alloc_rx_buffers_ps()
814 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { in e1000_alloc_rx_buffers_ps()
815 dev_err(&pdev->dev, "Rx DMA map failed\n"); in e1000_alloc_rx_buffers_ps()
816 adapter->rx_dma_failed++; in e1000_alloc_rx_buffers_ps()
819 buffer_info->skb = NULL; in e1000_alloc_rx_buffers_ps()
823 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma); in e1000_alloc_rx_buffers_ps()
825 if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) { in e1000_alloc_rx_buffers_ps()
828 * applicable for weak-ordered memory model archs, in e1000_alloc_rx_buffers_ps()
829 * such as IA-64). in e1000_alloc_rx_buffers_ps()
832 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) in e1000_alloc_rx_buffers_ps()
835 writel(i << 1, rx_ring->tail); in e1000_alloc_rx_buffers_ps()
839 if (i == rx_ring->count) in e1000_alloc_rx_buffers_ps()
841 buffer_info = &rx_ring->buffer_info[i]; in e1000_alloc_rx_buffers_ps()
845 rx_ring->next_to_use = i; in e1000_alloc_rx_buffers_ps()
849 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
858 struct e1000_adapter *adapter = rx_ring->adapter; in e1000_alloc_jumbo_rx_buffers()
859 struct net_device *netdev = adapter->netdev; in e1000_alloc_jumbo_rx_buffers()
860 struct pci_dev *pdev = adapter->pdev; in e1000_alloc_jumbo_rx_buffers()
865 unsigned int bufsz = 256 - 16; /* for skb_reserve */ in e1000_alloc_jumbo_rx_buffers()
867 i = rx_ring->next_to_use; in e1000_alloc_jumbo_rx_buffers()
868 buffer_info = &rx_ring->buffer_info[i]; in e1000_alloc_jumbo_rx_buffers()
870 while (cleaned_count--) { in e1000_alloc_jumbo_rx_buffers()
871 skb = buffer_info->skb; in e1000_alloc_jumbo_rx_buffers()
880 adapter->alloc_rx_buff_failed++; in e1000_alloc_jumbo_rx_buffers()
884 buffer_info->skb = skb; in e1000_alloc_jumbo_rx_buffers()
887 if (!buffer_info->page) { in e1000_alloc_jumbo_rx_buffers()
888 buffer_info->page = alloc_page(gfp); in e1000_alloc_jumbo_rx_buffers()
889 if (unlikely(!buffer_info->page)) { in e1000_alloc_jumbo_rx_buffers()
890 adapter->alloc_rx_buff_failed++; in e1000_alloc_jumbo_rx_buffers()
895 if (!buffer_info->dma) { in e1000_alloc_jumbo_rx_buffers()
896 buffer_info->dma = dma_map_page(&pdev->dev, in e1000_alloc_jumbo_rx_buffers()
897 buffer_info->page, 0, in e1000_alloc_jumbo_rx_buffers()
900 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { in e1000_alloc_jumbo_rx_buffers()
901 adapter->alloc_rx_buff_failed++; in e1000_alloc_jumbo_rx_buffers()
907 rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); in e1000_alloc_jumbo_rx_buffers()
909 if (unlikely(++i == rx_ring->count)) in e1000_alloc_jumbo_rx_buffers()
911 buffer_info = &rx_ring->buffer_info[i]; in e1000_alloc_jumbo_rx_buffers()
914 if (likely(rx_ring->next_to_use != i)) { in e1000_alloc_jumbo_rx_buffers()
915 rx_ring->next_to_use = i; in e1000_alloc_jumbo_rx_buffers()
916 if (unlikely(i-- == 0)) in e1000_alloc_jumbo_rx_buffers()
917 i = (rx_ring->count - 1); in e1000_alloc_jumbo_rx_buffers()
921 * applicable for weak-ordered memory model archs, in e1000_alloc_jumbo_rx_buffers()
922 * such as IA-64). in e1000_alloc_jumbo_rx_buffers()
925 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) in e1000_alloc_jumbo_rx_buffers()
928 writel(i, rx_ring->tail); in e1000_alloc_jumbo_rx_buffers()
935 if (netdev->features & NETIF_F_RXHASH) in e1000_rx_hash()
940 * e1000_clean_rx_irq - Send received data up the network stack
951 struct e1000_adapter *adapter = rx_ring->adapter; in e1000_clean_rx_irq()
952 struct net_device *netdev = adapter->netdev; in e1000_clean_rx_irq()
953 struct pci_dev *pdev = adapter->pdev; in e1000_clean_rx_irq()
954 struct e1000_hw *hw = &adapter->hw; in e1000_clean_rx_irq()
963 i = rx_ring->next_to_clean; in e1000_clean_rx_irq()
965 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); in e1000_clean_rx_irq()
966 buffer_info = &rx_ring->buffer_info[i]; in e1000_clean_rx_irq()
976 skb = buffer_info->skb; in e1000_clean_rx_irq()
977 buffer_info->skb = NULL; in e1000_clean_rx_irq()
979 prefetch(skb->data - NET_IP_ALIGN); in e1000_clean_rx_irq()
982 if (i == rx_ring->count) in e1000_clean_rx_irq()
987 next_buffer = &rx_ring->buffer_info[i]; in e1000_clean_rx_irq()
991 dma_unmap_single(&pdev->dev, buffer_info->dma, in e1000_clean_rx_irq()
992 adapter->rx_buffer_len, DMA_FROM_DEVICE); in e1000_clean_rx_irq()
993 buffer_info->dma = 0; in e1000_clean_rx_irq()
995 length = le16_to_cpu(rx_desc->wb.upper.length); in e1000_clean_rx_irq()
1004 adapter->flags2 |= FLAG2_IS_DISCARDING; in e1000_clean_rx_irq()
1006 if (adapter->flags2 & FLAG2_IS_DISCARDING) { in e1000_clean_rx_irq()
1010 buffer_info->skb = skb; in e1000_clean_rx_irq()
1012 adapter->flags2 &= ~FLAG2_IS_DISCARDING; in e1000_clean_rx_irq()
1017 !(netdev->features & NETIF_F_RXALL))) { in e1000_clean_rx_irq()
1019 buffer_info->skb = skb; in e1000_clean_rx_irq()
1024 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) { in e1000_clean_rx_irq()
1029 if (netdev->features & NETIF_F_RXFCS) in e1000_clean_rx_irq()
1030 total_rx_bytes -= 4; in e1000_clean_rx_irq()
1032 length -= 4; in e1000_clean_rx_irq()
1044 napi_alloc_skb(&adapter->napi, length); in e1000_clean_rx_irq()
1047 -NET_IP_ALIGN, in e1000_clean_rx_irq()
1048 (skb->data - in e1000_clean_rx_irq()
1053 buffer_info->skb = skb; in e1000_clean_rx_irq()
1064 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); in e1000_clean_rx_irq()
1067 rx_desc->wb.upper.vlan); in e1000_clean_rx_irq()
1070 rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF); in e1000_clean_rx_irq()
1074 adapter->alloc_rx_buf(rx_ring, cleaned_count, in e1000_clean_rx_irq()
1083 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); in e1000_clean_rx_irq()
1085 rx_ring->next_to_clean = i; in e1000_clean_rx_irq()
1089 adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC); in e1000_clean_rx_irq()
1091 adapter->total_rx_bytes += total_rx_bytes; in e1000_clean_rx_irq()
1092 adapter->total_rx_packets += total_rx_packets; in e1000_clean_rx_irq()
1100 struct e1000_adapter *adapter = tx_ring->adapter; in e1000_put_txbuf()
1102 if (buffer_info->dma) { in e1000_put_txbuf()
1103 if (buffer_info->mapped_as_page) in e1000_put_txbuf()
1104 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma, in e1000_put_txbuf()
1105 buffer_info->length, DMA_TO_DEVICE); in e1000_put_txbuf()
1107 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, in e1000_put_txbuf()
1108 buffer_info->length, DMA_TO_DEVICE); in e1000_put_txbuf()
1109 buffer_info->dma = 0; in e1000_put_txbuf()
1111 if (buffer_info->skb) { in e1000_put_txbuf()
1113 dev_kfree_skb_any(buffer_info->skb); in e1000_put_txbuf()
1115 dev_consume_skb_any(buffer_info->skb); in e1000_put_txbuf()
1116 buffer_info->skb = NULL; in e1000_put_txbuf()
1118 buffer_info->time_stamp = 0; in e1000_put_txbuf()
1126 struct net_device *netdev = adapter->netdev; in e1000_print_hw_hang()
1127 struct e1000_ring *tx_ring = adapter->tx_ring; in e1000_print_hw_hang()
1128 unsigned int i = tx_ring->next_to_clean; in e1000_print_hw_hang()
1129 unsigned int eop = tx_ring->buffer_info[i].next_to_watch; in e1000_print_hw_hang()
1131 struct e1000_hw *hw = &adapter->hw; in e1000_print_hw_hang()
1135 if (test_bit(__E1000_DOWN, &adapter->state)) in e1000_print_hw_hang()
1138 if (!adapter->tx_hang_recheck && (adapter->flags2 & FLAG2_DMA_BURST)) { in e1000_print_hw_hang()
1139 /* May be block on write-back, flush and detect again in e1000_print_hw_hang()
1142 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); in e1000_print_hw_hang()
1148 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); in e1000_print_hw_hang()
1151 adapter->tx_hang_recheck = true; in e1000_print_hw_hang()
1154 adapter->tx_hang_recheck = false; in e1000_print_hw_hang()
1168 pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status); in e1000_print_hw_hang()
1183 "PHY 1000BASE-T Status <%x>\n" in e1000_print_hw_hang()
1186 readl(tx_ring->head), readl(tx_ring->tail), tx_ring->next_to_use, in e1000_print_hw_hang()
1187 tx_ring->next_to_clean, tx_ring->buffer_info[eop].time_stamp, in e1000_print_hw_hang()
1188 eop, jiffies, eop_desc->upper.fields.status, er32(STATUS), in e1000_print_hw_hang()
1194 if ((hw->mac.type == e1000_pchlan) && (er32(CTRL) & E1000_CTRL_TFCE)) in e1000_print_hw_hang()
1199 * e1000e_tx_hwtstamp_work - check for Tx time stamp
1210 struct e1000_hw *hw = &adapter->hw; in e1000e_tx_hwtstamp_work()
1213 struct sk_buff *skb = adapter->tx_hwtstamp_skb; in e1000e_tx_hwtstamp_work()
1225 adapter->tx_hwtstamp_skb = NULL; in e1000e_tx_hwtstamp_work()
1230 } else if (time_after(jiffies, adapter->tx_hwtstamp_start in e1000e_tx_hwtstamp_work()
1231 + adapter->tx_timeout_factor * HZ)) { in e1000e_tx_hwtstamp_work()
1232 dev_kfree_skb_any(adapter->tx_hwtstamp_skb); in e1000e_tx_hwtstamp_work()
1233 adapter->tx_hwtstamp_skb = NULL; in e1000e_tx_hwtstamp_work()
1234 adapter->tx_hwtstamp_timeouts++; in e1000e_tx_hwtstamp_work()
1238 schedule_work(&adapter->tx_hwtstamp_work); in e1000e_tx_hwtstamp_work()
1243 * e1000_clean_tx_irq - Reclaim resources after transmit completes
1251 struct e1000_adapter *adapter = tx_ring->adapter; in e1000_clean_tx_irq()
1252 struct net_device *netdev = adapter->netdev; in e1000_clean_tx_irq()
1253 struct e1000_hw *hw = &adapter->hw; in e1000_clean_tx_irq()
1261 i = tx_ring->next_to_clean; in e1000_clean_tx_irq()
1262 eop = tx_ring->buffer_info[i].next_to_watch; in e1000_clean_tx_irq()
1265 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && in e1000_clean_tx_irq()
1266 (count < tx_ring->count)) { in e1000_clean_tx_irq()
1272 buffer_info = &tx_ring->buffer_info[i]; in e1000_clean_tx_irq()
1276 total_tx_packets += buffer_info->segs; in e1000_clean_tx_irq()
1277 total_tx_bytes += buffer_info->bytecount; in e1000_clean_tx_irq()
1278 if (buffer_info->skb) { in e1000_clean_tx_irq()
1279 bytes_compl += buffer_info->skb->len; in e1000_clean_tx_irq()
1285 tx_desc->upper.data = 0; in e1000_clean_tx_irq()
1288 if (i == tx_ring->count) in e1000_clean_tx_irq()
1292 if (i == tx_ring->next_to_use) in e1000_clean_tx_irq()
1294 eop = tx_ring->buffer_info[i].next_to_watch; in e1000_clean_tx_irq()
1298 tx_ring->next_to_clean = i; in e1000_clean_tx_irq()
1311 !(test_bit(__E1000_DOWN, &adapter->state))) { in e1000_clean_tx_irq()
1313 ++adapter->restart_queue; in e1000_clean_tx_irq()
1317 if (adapter->detect_tx_hung) { in e1000_clean_tx_irq()
1321 adapter->detect_tx_hung = false; in e1000_clean_tx_irq()
1322 if (tx_ring->buffer_info[i].time_stamp && in e1000_clean_tx_irq()
1323 time_after(jiffies, tx_ring->buffer_info[i].time_stamp in e1000_clean_tx_irq()
1324 + (adapter->tx_timeout_factor * HZ)) && in e1000_clean_tx_irq()
1326 schedule_work(&adapter->print_hang_task); in e1000_clean_tx_irq()
1328 adapter->tx_hang_recheck = false; in e1000_clean_tx_irq()
1330 adapter->total_tx_bytes += total_tx_bytes; in e1000_clean_tx_irq()
1331 adapter->total_tx_packets += total_tx_packets; in e1000_clean_tx_irq()
1332 return count < tx_ring->count; in e1000_clean_tx_irq()
1336 * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
1347 struct e1000_adapter *adapter = rx_ring->adapter; in e1000_clean_rx_irq_ps()
1348 struct e1000_hw *hw = &adapter->hw; in e1000_clean_rx_irq_ps()
1350 struct net_device *netdev = adapter->netdev; in e1000_clean_rx_irq_ps()
1351 struct pci_dev *pdev = adapter->pdev; in e1000_clean_rx_irq_ps()
1361 i = rx_ring->next_to_clean; in e1000_clean_rx_irq_ps()
1363 staterr = le32_to_cpu(rx_desc->wb.middle.status_error); in e1000_clean_rx_irq_ps()
1364 buffer_info = &rx_ring->buffer_info[i]; in e1000_clean_rx_irq_ps()
1370 skb = buffer_info->skb; in e1000_clean_rx_irq_ps()
1374 prefetch(skb->data - NET_IP_ALIGN); in e1000_clean_rx_irq_ps()
1377 if (i == rx_ring->count) in e1000_clean_rx_irq_ps()
1382 next_buffer = &rx_ring->buffer_info[i]; in e1000_clean_rx_irq_ps()
1386 dma_unmap_single(&pdev->dev, buffer_info->dma, in e1000_clean_rx_irq_ps()
1387 adapter->rx_ps_bsize0, DMA_FROM_DEVICE); in e1000_clean_rx_irq_ps()
1388 buffer_info->dma = 0; in e1000_clean_rx_irq_ps()
1392 adapter->flags2 |= FLAG2_IS_DISCARDING; in e1000_clean_rx_irq_ps()
1394 if (adapter->flags2 & FLAG2_IS_DISCARDING) { in e1000_clean_rx_irq_ps()
1398 adapter->flags2 &= ~FLAG2_IS_DISCARDING; in e1000_clean_rx_irq_ps()
1403 !(netdev->features & NETIF_F_RXALL))) { in e1000_clean_rx_irq_ps()
1408 length = le16_to_cpu(rx_desc->wb.middle.length0); in e1000_clean_rx_irq_ps()
1423 int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]); in e1000_clean_rx_irq_ps()
1431 ((length + l1) <= adapter->rx_ps_bsize0)) { in e1000_clean_rx_irq_ps()
1434 ps_page = &buffer_info->ps_pages[0]; in e1000_clean_rx_irq_ps()
1440 dma_sync_single_for_cpu(&pdev->dev, in e1000_clean_rx_irq_ps()
1441 ps_page->dma, in e1000_clean_rx_irq_ps()
1444 vaddr = kmap_atomic(ps_page->page); in e1000_clean_rx_irq_ps()
1447 dma_sync_single_for_device(&pdev->dev, in e1000_clean_rx_irq_ps()
1448 ps_page->dma, in e1000_clean_rx_irq_ps()
1453 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) { in e1000_clean_rx_irq_ps()
1454 if (!(netdev->features & NETIF_F_RXFCS)) in e1000_clean_rx_irq_ps()
1455 l1 -= 4; in e1000_clean_rx_irq_ps()
1464 length = le16_to_cpu(rx_desc->wb.upper.length[j]); in e1000_clean_rx_irq_ps()
1468 ps_page = &buffer_info->ps_pages[j]; in e1000_clean_rx_irq_ps()
1469 dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE, in e1000_clean_rx_irq_ps()
1471 ps_page->dma = 0; in e1000_clean_rx_irq_ps()
1472 skb_fill_page_desc(skb, j, ps_page->page, 0, length); in e1000_clean_rx_irq_ps()
1473 ps_page->page = NULL; in e1000_clean_rx_irq_ps()
1474 skb->len += length; in e1000_clean_rx_irq_ps()
1475 skb->data_len += length; in e1000_clean_rx_irq_ps()
1476 skb->truesize += PAGE_SIZE; in e1000_clean_rx_irq_ps()
1482 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) { in e1000_clean_rx_irq_ps()
1483 if (!(netdev->features & NETIF_F_RXFCS)) in e1000_clean_rx_irq_ps()
1484 pskb_trim(skb, skb->len - 4); in e1000_clean_rx_irq_ps()
1488 total_rx_bytes += skb->len; in e1000_clean_rx_irq_ps()
1493 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); in e1000_clean_rx_irq_ps()
1495 if (rx_desc->wb.upper.header_status & in e1000_clean_rx_irq_ps()
1497 adapter->rx_hdr_split++; in e1000_clean_rx_irq_ps()
1500 rx_desc->wb.middle.vlan); in e1000_clean_rx_irq_ps()
1503 rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF); in e1000_clean_rx_irq_ps()
1504 buffer_info->skb = NULL; in e1000_clean_rx_irq_ps()
1508 adapter->alloc_rx_buf(rx_ring, cleaned_count, in e1000_clean_rx_irq_ps()
1517 staterr = le32_to_cpu(rx_desc->wb.middle.status_error); in e1000_clean_rx_irq_ps()
1519 rx_ring->next_to_clean = i; in e1000_clean_rx_irq_ps()
1523 adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC); in e1000_clean_rx_irq_ps()
1525 adapter->total_rx_bytes += total_rx_bytes; in e1000_clean_rx_irq_ps()
1526 adapter->total_rx_packets += total_rx_packets; in e1000_clean_rx_irq_ps()
1533 bi->page = NULL; in e1000_consume_page()
1534 skb->len += length; in e1000_consume_page()
1535 skb->data_len += length; in e1000_consume_page()
1536 skb->truesize += PAGE_SIZE; in e1000_consume_page()
1540 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
1551 struct e1000_adapter *adapter = rx_ring->adapter; in e1000_clean_jumbo_rx_irq()
1552 struct net_device *netdev = adapter->netdev; in e1000_clean_jumbo_rx_irq()
1553 struct pci_dev *pdev = adapter->pdev; in e1000_clean_jumbo_rx_irq()
1563 i = rx_ring->next_to_clean; in e1000_clean_jumbo_rx_irq()
1565 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); in e1000_clean_jumbo_rx_irq()
1566 buffer_info = &rx_ring->buffer_info[i]; in e1000_clean_jumbo_rx_irq()
1576 skb = buffer_info->skb; in e1000_clean_jumbo_rx_irq()
1577 buffer_info->skb = NULL; in e1000_clean_jumbo_rx_irq()
1580 if (i == rx_ring->count) in e1000_clean_jumbo_rx_irq()
1585 next_buffer = &rx_ring->buffer_info[i]; in e1000_clean_jumbo_rx_irq()
1589 dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE, in e1000_clean_jumbo_rx_irq()
1591 buffer_info->dma = 0; in e1000_clean_jumbo_rx_irq()
1593 length = le16_to_cpu(rx_desc->wb.upper.length); in e1000_clean_jumbo_rx_irq()
1598 !(netdev->features & NETIF_F_RXALL)))) { in e1000_clean_jumbo_rx_irq()
1600 buffer_info->skb = skb; in e1000_clean_jumbo_rx_irq()
1602 if (rx_ring->rx_skb_top) in e1000_clean_jumbo_rx_irq()
1603 dev_kfree_skb_irq(rx_ring->rx_skb_top); in e1000_clean_jumbo_rx_irq()
1604 rx_ring->rx_skb_top = NULL; in e1000_clean_jumbo_rx_irq()
1607 #define rxtop (rx_ring->rx_skb_top) in e1000_clean_jumbo_rx_irq()
1613 skb_fill_page_desc(rxtop, 0, buffer_info->page, in e1000_clean_jumbo_rx_irq()
1618 skb_fill_page_desc(rxtop, shinfo->nr_frags, in e1000_clean_jumbo_rx_irq()
1619 buffer_info->page, 0, in e1000_clean_jumbo_rx_irq()
1621 /* re-use the skb, only consumed the page */ in e1000_clean_jumbo_rx_irq()
1622 buffer_info->skb = skb; in e1000_clean_jumbo_rx_irq()
1630 skb_fill_page_desc(rxtop, shinfo->nr_frags, in e1000_clean_jumbo_rx_irq()
1631 buffer_info->page, 0, in e1000_clean_jumbo_rx_irq()
1633 /* re-use the current skb, we only consumed the in e1000_clean_jumbo_rx_irq()
1636 buffer_info->skb = skb; in e1000_clean_jumbo_rx_irq()
1647 vaddr = kmap_atomic(buffer_info->page); in e1000_clean_jumbo_rx_irq()
1651 /* re-use the page, so don't erase in e1000_clean_jumbo_rx_irq()
1652 * buffer_info->page in e1000_clean_jumbo_rx_irq()
1657 buffer_info->page, 0, in e1000_clean_jumbo_rx_irq()
1668 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); in e1000_clean_jumbo_rx_irq()
1671 total_rx_bytes += skb->len; in e1000_clean_jumbo_rx_irq()
1674 /* eth type trans needs skb->data to point to something */ in e1000_clean_jumbo_rx_irq()
1682 rx_desc->wb.upper.vlan); in e1000_clean_jumbo_rx_irq()
1685 rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF); in e1000_clean_jumbo_rx_irq()
1689 adapter->alloc_rx_buf(rx_ring, cleaned_count, in e1000_clean_jumbo_rx_irq()
1698 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); in e1000_clean_jumbo_rx_irq()
1700 rx_ring->next_to_clean = i; in e1000_clean_jumbo_rx_irq()
1704 adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC); in e1000_clean_jumbo_rx_irq()
1706 adapter->total_rx_bytes += total_rx_bytes; in e1000_clean_jumbo_rx_irq()
1707 adapter->total_rx_packets += total_rx_packets; in e1000_clean_jumbo_rx_irq()
1712 * e1000_clean_rx_ring - Free Rx Buffers per Queue
1717 struct e1000_adapter *adapter = rx_ring->adapter; in e1000_clean_rx_ring()
1720 struct pci_dev *pdev = adapter->pdev; in e1000_clean_rx_ring()
1724 for (i = 0; i < rx_ring->count; i++) { in e1000_clean_rx_ring()
1725 buffer_info = &rx_ring->buffer_info[i]; in e1000_clean_rx_ring()
1726 if (buffer_info->dma) { in e1000_clean_rx_ring()
1727 if (adapter->clean_rx == e1000_clean_rx_irq) in e1000_clean_rx_ring()
1728 dma_unmap_single(&pdev->dev, buffer_info->dma, in e1000_clean_rx_ring()
1729 adapter->rx_buffer_len, in e1000_clean_rx_ring()
1731 else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) in e1000_clean_rx_ring()
1732 dma_unmap_page(&pdev->dev, buffer_info->dma, in e1000_clean_rx_ring()
1734 else if (adapter->clean_rx == e1000_clean_rx_irq_ps) in e1000_clean_rx_ring()
1735 dma_unmap_single(&pdev->dev, buffer_info->dma, in e1000_clean_rx_ring()
1736 adapter->rx_ps_bsize0, in e1000_clean_rx_ring()
1738 buffer_info->dma = 0; in e1000_clean_rx_ring()
1741 if (buffer_info->page) { in e1000_clean_rx_ring()
1742 put_page(buffer_info->page); in e1000_clean_rx_ring()
1743 buffer_info->page = NULL; in e1000_clean_rx_ring()
1746 if (buffer_info->skb) { in e1000_clean_rx_ring()
1747 dev_kfree_skb(buffer_info->skb); in e1000_clean_rx_ring()
1748 buffer_info->skb = NULL; in e1000_clean_rx_ring()
1752 ps_page = &buffer_info->ps_pages[j]; in e1000_clean_rx_ring()
1753 if (!ps_page->page) in e1000_clean_rx_ring()
1755 dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE, in e1000_clean_rx_ring()
1757 ps_page->dma = 0; in e1000_clean_rx_ring()
1758 put_page(ps_page->page); in e1000_clean_rx_ring()
1759 ps_page->page = NULL; in e1000_clean_rx_ring()
1764 if (rx_ring->rx_skb_top) { in e1000_clean_rx_ring()
1765 dev_kfree_skb(rx_ring->rx_skb_top); in e1000_clean_rx_ring()
1766 rx_ring->rx_skb_top = NULL; in e1000_clean_rx_ring()
1770 memset(rx_ring->desc, 0, rx_ring->size); in e1000_clean_rx_ring()
1772 rx_ring->next_to_clean = 0; in e1000_clean_rx_ring()
1773 rx_ring->next_to_use = 0; in e1000_clean_rx_ring()
1774 adapter->flags2 &= ~FLAG2_IS_DISCARDING; in e1000_clean_rx_ring()
1783 if (test_bit(__E1000_DOWN, &adapter->state)) in e1000e_downshift_workaround()
1786 e1000e_gig_downshift_workaround_ich8lan(&adapter->hw); in e1000e_downshift_workaround()
1790 * e1000_intr_msi - Interrupt Handler
1798 struct e1000_hw *hw = &adapter->hw; in e1000_intr_msi()
1803 hw->mac.get_link_status = true; in e1000_intr_msi()
1804 /* ICH8 workaround-- Call gig speed drop workaround on cable in e1000_intr_msi()
1807 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && in e1000_intr_msi()
1809 schedule_work(&adapter->downshift_task); in e1000_intr_msi()
1811 /* 80003ES2LAN workaround-- For packet buffer work-around on in e1000_intr_msi()
1816 adapter->flags & FLAG_RX_NEEDS_RESTART) { in e1000_intr_msi()
1821 adapter->flags |= FLAG_RESTART_NOW; in e1000_intr_msi()
1824 if (!test_bit(__E1000_DOWN, &adapter->state)) in e1000_intr_msi()
1825 mod_timer(&adapter->watchdog_timer, jiffies + 1); in e1000_intr_msi()
1829 if ((icr & E1000_ICR_ECCER) && (hw->mac.type >= e1000_pch_lpt)) { in e1000_intr_msi()
1832 adapter->corr_errors += in e1000_intr_msi()
1834 adapter->uncorr_errors += in e1000_intr_msi()
1839 schedule_work(&adapter->reset_task); in e1000_intr_msi()
1845 if (napi_schedule_prep(&adapter->napi)) { in e1000_intr_msi()
1846 adapter->total_tx_bytes = 0; in e1000_intr_msi()
1847 adapter->total_tx_packets = 0; in e1000_intr_msi()
1848 adapter->total_rx_bytes = 0; in e1000_intr_msi()
1849 adapter->total_rx_packets = 0; in e1000_intr_msi()
1850 __napi_schedule(&adapter->napi); in e1000_intr_msi()
1857 * e1000_intr - Interrupt Handler
1865 struct e1000_hw *hw = &adapter->hw; in e1000_intr()
1868 if (!icr || test_bit(__E1000_DOWN, &adapter->state)) in e1000_intr()
1871 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is in e1000_intr()
1877 /* Interrupt Auto-Mask...upon reading ICR, in e1000_intr()
1883 hw->mac.get_link_status = true; in e1000_intr()
1884 /* ICH8 workaround-- Call gig speed drop workaround on cable in e1000_intr()
1887 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && in e1000_intr()
1889 schedule_work(&adapter->downshift_task); in e1000_intr()
1891 /* 80003ES2LAN workaround-- in e1000_intr()
1892 * For packet buffer work-around on link down event; in e1000_intr()
1897 (adapter->flags & FLAG_RX_NEEDS_RESTART)) { in e1000_intr()
1901 adapter->flags |= FLAG_RESTART_NOW; in e1000_intr()
1904 if (!test_bit(__E1000_DOWN, &adapter->state)) in e1000_intr()
1905 mod_timer(&adapter->watchdog_timer, jiffies + 1); in e1000_intr()
1909 if ((icr & E1000_ICR_ECCER) && (hw->mac.type >= e1000_pch_lpt)) { in e1000_intr()
1912 adapter->corr_errors += in e1000_intr()
1914 adapter->uncorr_errors += in e1000_intr()
1919 schedule_work(&adapter->reset_task); in e1000_intr()
1925 if (napi_schedule_prep(&adapter->napi)) { in e1000_intr()
1926 adapter->total_tx_bytes = 0; in e1000_intr()
1927 adapter->total_tx_packets = 0; in e1000_intr()
1928 adapter->total_rx_bytes = 0; in e1000_intr()
1929 adapter->total_rx_packets = 0; in e1000_intr()
1930 __napi_schedule(&adapter->napi); in e1000_intr()
1940 struct e1000_hw *hw = &adapter->hw; in e1000_msix_other()
1943 if (icr & adapter->eiac_mask) in e1000_msix_other()
1944 ew32(ICS, (icr & adapter->eiac_mask)); in e1000_msix_other()
1947 hw->mac.get_link_status = true; in e1000_msix_other()
1949 if (!test_bit(__E1000_DOWN, &adapter->state)) in e1000_msix_other()
1950 mod_timer(&adapter->watchdog_timer, jiffies + 1); in e1000_msix_other()
1953 if (!test_bit(__E1000_DOWN, &adapter->state)) in e1000_msix_other()
1963 struct e1000_hw *hw = &adapter->hw; in e1000_intr_msix_tx()
1964 struct e1000_ring *tx_ring = adapter->tx_ring; in e1000_intr_msix_tx()
1966 adapter->total_tx_bytes = 0; in e1000_intr_msix_tx()
1967 adapter->total_tx_packets = 0; in e1000_intr_msix_tx()
1971 ew32(ICS, tx_ring->ims_val); in e1000_intr_msix_tx()
1973 if (!test_bit(__E1000_DOWN, &adapter->state)) in e1000_intr_msix_tx()
1974 ew32(IMS, adapter->tx_ring->ims_val); in e1000_intr_msix_tx()
1983 struct e1000_ring *rx_ring = adapter->rx_ring; in e1000_intr_msix_rx()
1988 if (rx_ring->set_itr) { in e1000_intr_msix_rx()
1989 u32 itr = rx_ring->itr_val ? in e1000_intr_msix_rx()
1990 1000000000 / (rx_ring->itr_val * 256) : 0; in e1000_intr_msix_rx()
1992 writel(itr, rx_ring->itr_register); in e1000_intr_msix_rx()
1993 rx_ring->set_itr = 0; in e1000_intr_msix_rx()
1996 if (napi_schedule_prep(&adapter->napi)) { in e1000_intr_msix_rx()
1997 adapter->total_rx_bytes = 0; in e1000_intr_msix_rx()
1998 adapter->total_rx_packets = 0; in e1000_intr_msix_rx()
1999 __napi_schedule(&adapter->napi); in e1000_intr_msix_rx()
2005 * e1000_configure_msix - Configure MSI-X hardware
2009 * generate MSI-X interrupts.
2013 struct e1000_hw *hw = &adapter->hw; in e1000_configure_msix()
2014 struct e1000_ring *rx_ring = adapter->rx_ring; in e1000_configure_msix()
2015 struct e1000_ring *tx_ring = adapter->tx_ring; in e1000_configure_msix()
2019 adapter->eiac_mask = 0; in e1000_configure_msix()
2021 /* Workaround issue with spurious interrupts on 82574 in MSI-X mode */ in e1000_configure_msix()
2022 if (hw->mac.type == e1000_82574) { in e1000_configure_msix()
2030 rx_ring->ims_val = E1000_IMS_RXQ0; in e1000_configure_msix()
2031 adapter->eiac_mask |= rx_ring->ims_val; in e1000_configure_msix()
2032 if (rx_ring->itr_val) in e1000_configure_msix()
2033 writel(1000000000 / (rx_ring->itr_val * 256), in e1000_configure_msix()
2034 rx_ring->itr_register); in e1000_configure_msix()
2036 writel(1, rx_ring->itr_register); in e1000_configure_msix()
2040 tx_ring->ims_val = E1000_IMS_TXQ0; in e1000_configure_msix()
2042 if (tx_ring->itr_val) in e1000_configure_msix()
2043 writel(1000000000 / (tx_ring->itr_val * 256), in e1000_configure_msix()
2044 tx_ring->itr_register); in e1000_configure_msix()
2046 writel(1, tx_ring->itr_register); in e1000_configure_msix()
2047 adapter->eiac_mask |= tx_ring->ims_val; in e1000_configure_msix()
2053 if (rx_ring->itr_val) in e1000_configure_msix()
2054 writel(1000000000 / (rx_ring->itr_val * 256), in e1000_configure_msix()
2055 hw->hw_addr + E1000_EITR_82574(vector)); in e1000_configure_msix()
2057 writel(1, hw->hw_addr + E1000_EITR_82574(vector)); in e1000_configure_msix()
2064 /* enable MSI-X PBA support */ in e1000_configure_msix()
2073 if (adapter->msix_entries) { in e1000e_reset_interrupt_capability()
2074 pci_disable_msix(adapter->pdev); in e1000e_reset_interrupt_capability()
2075 kfree(adapter->msix_entries); in e1000e_reset_interrupt_capability()
2076 adapter->msix_entries = NULL; in e1000e_reset_interrupt_capability()
2077 } else if (adapter->flags & FLAG_MSI_ENABLED) { in e1000e_reset_interrupt_capability()
2078 pci_disable_msi(adapter->pdev); in e1000e_reset_interrupt_capability()
2079 adapter->flags &= ~FLAG_MSI_ENABLED; in e1000e_reset_interrupt_capability()
2084 * e1000e_set_interrupt_capability - set MSI or MSI-X if supported
2095 switch (adapter->int_mode) { in e1000e_set_interrupt_capability()
2097 if (adapter->flags & FLAG_HAS_MSIX) { in e1000e_set_interrupt_capability()
2098 adapter->num_vectors = 3; /* RxQ0, TxQ0 and other */ in e1000e_set_interrupt_capability()
2099 adapter->msix_entries = kcalloc(adapter->num_vectors, in e1000e_set_interrupt_capability()
2103 if (adapter->msix_entries) { in e1000e_set_interrupt_capability()
2106 for (i = 0; i < adapter->num_vectors; i++) in e1000e_set_interrupt_capability()
2107 adapter->msix_entries[i].entry = i; in e1000e_set_interrupt_capability()
2109 err = pci_enable_msix_range(a->pdev, in e1000e_set_interrupt_capability()
2110 a->msix_entries, in e1000e_set_interrupt_capability()
2111 a->num_vectors, in e1000e_set_interrupt_capability()
2112 a->num_vectors); in e1000e_set_interrupt_capability()
2116 /* MSI-X failed, so fall through and try MSI */ in e1000e_set_interrupt_capability()
2117 e_err("Failed to initialize MSI-X interrupts. Falling back to MSI interrupts.\n"); in e1000e_set_interrupt_capability()
2120 adapter->int_mode = E1000E_INT_MODE_MSI; in e1000e_set_interrupt_capability()
2123 if (!pci_enable_msi(adapter->pdev)) { in e1000e_set_interrupt_capability()
2124 adapter->flags |= FLAG_MSI_ENABLED; in e1000e_set_interrupt_capability()
2126 adapter->int_mode = E1000E_INT_MODE_LEGACY; in e1000e_set_interrupt_capability()
2136 adapter->num_vectors = 1; in e1000e_set_interrupt_capability()
2140 * e1000_request_msix - Initialize MSI-X interrupts
2143 * e1000_request_msix allocates MSI-X vectors and requests interrupts from the
2148 struct net_device *netdev = adapter->netdev; in e1000_request_msix()
2151 if (strlen(netdev->name) < (IFNAMSIZ - 5)) in e1000_request_msix()
2152 snprintf(adapter->rx_ring->name, in e1000_request_msix()
2153 sizeof(adapter->rx_ring->name) - 1, in e1000_request_msix()
2154 "%.14s-rx-0", netdev->name); in e1000_request_msix()
2156 memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ); in e1000_request_msix()
2157 err = request_irq(adapter->msix_entries[vector].vector, in e1000_request_msix()
2158 e1000_intr_msix_rx, 0, adapter->rx_ring->name, in e1000_request_msix()
2162 adapter->rx_ring->itr_register = adapter->hw.hw_addr + in e1000_request_msix()
2164 adapter->rx_ring->itr_val = adapter->itr; in e1000_request_msix()
2167 if (strlen(netdev->name) < (IFNAMSIZ - 5)) in e1000_request_msix()
2168 snprintf(adapter->tx_ring->name, in e1000_request_msix()
2169 sizeof(adapter->tx_ring->name) - 1, in e1000_request_msix()
2170 "%.14s-tx-0", netdev->name); in e1000_request_msix()
2172 memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ); in e1000_request_msix()
2173 err = request_irq(adapter->msix_entries[vector].vector, in e1000_request_msix()
2174 e1000_intr_msix_tx, 0, adapter->tx_ring->name, in e1000_request_msix()
2178 adapter->tx_ring->itr_register = adapter->hw.hw_addr + in e1000_request_msix()
2180 adapter->tx_ring->itr_val = adapter->itr; in e1000_request_msix()
2183 err = request_irq(adapter->msix_entries[vector].vector, in e1000_request_msix()
2184 e1000_msix_other, 0, netdev->name, netdev); in e1000_request_msix()
2194 * e1000_request_irq - initialize interrupts
2202 struct net_device *netdev = adapter->netdev; in e1000_request_irq()
2205 if (adapter->msix_entries) { in e1000_request_irq()
2211 adapter->int_mode = E1000E_INT_MODE_MSI; in e1000_request_irq()
2214 if (adapter->flags & FLAG_MSI_ENABLED) { in e1000_request_irq()
2215 err = request_irq(adapter->pdev->irq, e1000_intr_msi, 0, in e1000_request_irq()
2216 netdev->name, netdev); in e1000_request_irq()
2222 adapter->int_mode = E1000E_INT_MODE_LEGACY; in e1000_request_irq()
2225 err = request_irq(adapter->pdev->irq, e1000_intr, IRQF_SHARED, in e1000_request_irq()
2226 netdev->name, netdev); in e1000_request_irq()
2235 struct net_device *netdev = adapter->netdev; in e1000_free_irq()
2237 if (adapter->msix_entries) { in e1000_free_irq()
2240 free_irq(adapter->msix_entries[vector].vector, netdev); in e1000_free_irq()
2243 free_irq(adapter->msix_entries[vector].vector, netdev); in e1000_free_irq()
2247 free_irq(adapter->msix_entries[vector].vector, netdev); in e1000_free_irq()
2251 free_irq(adapter->pdev->irq, netdev); in e1000_free_irq()
2255 * e1000_irq_disable - Mask off interrupt generation on the NIC
2260 struct e1000_hw *hw = &adapter->hw; in e1000_irq_disable()
2263 if (adapter->msix_entries) in e1000_irq_disable()
2267 if (adapter->msix_entries) { in e1000_irq_disable()
2270 for (i = 0; i < adapter->num_vectors; i++) in e1000_irq_disable()
2271 synchronize_irq(adapter->msix_entries[i].vector); in e1000_irq_disable()
2273 synchronize_irq(adapter->pdev->irq); in e1000_irq_disable()
2278 * e1000_irq_enable - Enable default interrupt generation settings
2283 struct e1000_hw *hw = &adapter->hw; in e1000_irq_enable()
2285 if (adapter->msix_entries) { in e1000_irq_enable()
2286 ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574); in e1000_irq_enable()
2287 ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | in e1000_irq_enable()
2289 } else if (hw->mac.type >= e1000_pch_lpt) { in e1000_irq_enable()
2298 * e1000e_get_hw_control - get control of the h/w from f/w
2308 struct e1000_hw *hw = &adapter->hw; in e1000e_get_hw_control()
2313 if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) { in e1000e_get_hw_control()
2316 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) { in e1000e_get_hw_control()
2323 * e1000e_release_hw_control - release control of the h/w to f/w
2334 struct e1000_hw *hw = &adapter->hw; in e1000e_release_hw_control()
2339 if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) { in e1000e_release_hw_control()
2342 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) { in e1000e_release_hw_control()
2349 * e1000_alloc_ring_dma - allocate memory for a ring structure
2356 struct pci_dev *pdev = adapter->pdev; in e1000_alloc_ring_dma()
2358 ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma, in e1000_alloc_ring_dma()
2360 if (!ring->desc) in e1000_alloc_ring_dma()
2361 return -ENOMEM; in e1000_alloc_ring_dma()
2367 * e1000e_setup_tx_resources - allocate Tx resources (Descriptors)
2374 struct e1000_adapter *adapter = tx_ring->adapter; in e1000e_setup_tx_resources()
2375 int err = -ENOMEM, size; in e1000e_setup_tx_resources()
2377 size = sizeof(struct e1000_buffer) * tx_ring->count; in e1000e_setup_tx_resources()
2378 tx_ring->buffer_info = vzalloc(size); in e1000e_setup_tx_resources()
2379 if (!tx_ring->buffer_info) in e1000e_setup_tx_resources()
2383 tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc); in e1000e_setup_tx_resources()
2384 tx_ring->size = ALIGN(tx_ring->size, 4096); in e1000e_setup_tx_resources()
2390 tx_ring->next_to_use = 0; in e1000e_setup_tx_resources()
2391 tx_ring->next_to_clean = 0; in e1000e_setup_tx_resources()
2395 vfree(tx_ring->buffer_info); in e1000e_setup_tx_resources()
2401 * e1000e_setup_rx_resources - allocate Rx resources (Descriptors)
2408 struct e1000_adapter *adapter = rx_ring->adapter; in e1000e_setup_rx_resources()
2410 int i, size, desc_len, err = -ENOMEM; in e1000e_setup_rx_resources()
2412 size = sizeof(struct e1000_buffer) * rx_ring->count; in e1000e_setup_rx_resources()
2413 rx_ring->buffer_info = vzalloc(size); in e1000e_setup_rx_resources()
2414 if (!rx_ring->buffer_info) in e1000e_setup_rx_resources()
2417 for (i = 0; i < rx_ring->count; i++) { in e1000e_setup_rx_resources()
2418 buffer_info = &rx_ring->buffer_info[i]; in e1000e_setup_rx_resources()
2419 buffer_info->ps_pages = kcalloc(PS_PAGE_BUFFERS, in e1000e_setup_rx_resources()
2422 if (!buffer_info->ps_pages) in e1000e_setup_rx_resources()
2429 rx_ring->size = rx_ring->count * desc_len; in e1000e_setup_rx_resources()
2430 rx_ring->size = ALIGN(rx_ring->size, 4096); in e1000e_setup_rx_resources()
2436 rx_ring->next_to_clean = 0; in e1000e_setup_rx_resources()
2437 rx_ring->next_to_use = 0; in e1000e_setup_rx_resources()
2438 rx_ring->rx_skb_top = NULL; in e1000e_setup_rx_resources()
2443 for (i = 0; i < rx_ring->count; i++) { in e1000e_setup_rx_resources()
2444 buffer_info = &rx_ring->buffer_info[i]; in e1000e_setup_rx_resources()
2445 kfree(buffer_info->ps_pages); in e1000e_setup_rx_resources()
2448 vfree(rx_ring->buffer_info); in e1000e_setup_rx_resources()
2454 * e1000_clean_tx_ring - Free Tx Buffers
2459 struct e1000_adapter *adapter = tx_ring->adapter; in e1000_clean_tx_ring()
2464 for (i = 0; i < tx_ring->count; i++) { in e1000_clean_tx_ring()
2465 buffer_info = &tx_ring->buffer_info[i]; in e1000_clean_tx_ring()
2469 netdev_reset_queue(adapter->netdev); in e1000_clean_tx_ring()
2470 size = sizeof(struct e1000_buffer) * tx_ring->count; in e1000_clean_tx_ring()
2471 memset(tx_ring->buffer_info, 0, size); in e1000_clean_tx_ring()
2473 memset(tx_ring->desc, 0, tx_ring->size); in e1000_clean_tx_ring()
2475 tx_ring->next_to_use = 0; in e1000_clean_tx_ring()
2476 tx_ring->next_to_clean = 0; in e1000_clean_tx_ring()
2480 * e1000e_free_tx_resources - Free Tx Resources per Queue
2487 struct e1000_adapter *adapter = tx_ring->adapter; in e1000e_free_tx_resources()
2488 struct pci_dev *pdev = adapter->pdev; in e1000e_free_tx_resources()
2492 vfree(tx_ring->buffer_info); in e1000e_free_tx_resources()
2493 tx_ring->buffer_info = NULL; in e1000e_free_tx_resources()
2495 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, in e1000e_free_tx_resources()
2496 tx_ring->dma); in e1000e_free_tx_resources()
2497 tx_ring->desc = NULL; in e1000e_free_tx_resources()
2501 * e1000e_free_rx_resources - Free Rx Resources
2508 struct e1000_adapter *adapter = rx_ring->adapter; in e1000e_free_rx_resources()
2509 struct pci_dev *pdev = adapter->pdev; in e1000e_free_rx_resources()
2514 for (i = 0; i < rx_ring->count; i++) in e1000e_free_rx_resources()
2515 kfree(rx_ring->buffer_info[i].ps_pages); in e1000e_free_rx_resources()
2517 vfree(rx_ring->buffer_info); in e1000e_free_rx_resources()
2518 rx_ring->buffer_info = NULL; in e1000e_free_rx_resources()
2520 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, in e1000e_free_rx_resources()
2521 rx_ring->dma); in e1000e_free_rx_resources()
2522 rx_ring->desc = NULL; in e1000e_free_rx_resources()
2526 * e1000_update_itr - update the dynamic ITR value based on statistics
2527 * @itr_setting: current adapter->itr
2586 u32 new_itr = adapter->itr; in e1000_set_itr()
2588 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ in e1000_set_itr()
2589 if (adapter->link_speed != SPEED_1000) { in e1000_set_itr()
2595 if (adapter->flags2 & FLAG2_DISABLE_AIM) { in e1000_set_itr()
2600 adapter->tx_itr = e1000_update_itr(adapter->tx_itr, in e1000_set_itr()
2601 adapter->total_tx_packets, in e1000_set_itr()
2602 adapter->total_tx_bytes); in e1000_set_itr()
2604 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) in e1000_set_itr()
2605 adapter->tx_itr = low_latency; in e1000_set_itr()
2607 adapter->rx_itr = e1000_update_itr(adapter->rx_itr, in e1000_set_itr()
2608 adapter->total_rx_packets, in e1000_set_itr()
2609 adapter->total_rx_bytes); in e1000_set_itr()
2611 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) in e1000_set_itr()
2612 adapter->rx_itr = low_latency; in e1000_set_itr()
2614 current_itr = max(adapter->rx_itr, adapter->tx_itr); in e1000_set_itr()
2632 if (new_itr != adapter->itr) { in e1000_set_itr()
2637 new_itr = new_itr > adapter->itr ? in e1000_set_itr()
2638 min(adapter->itr + (new_itr >> 2), new_itr) : new_itr; in e1000_set_itr()
2639 adapter->itr = new_itr; in e1000_set_itr()
2640 adapter->rx_ring->itr_val = new_itr; in e1000_set_itr()
2641 if (adapter->msix_entries) in e1000_set_itr()
2642 adapter->rx_ring->set_itr = 1; in e1000_set_itr()
2649 * e1000e_write_itr - write the ITR value to the appropriate registers
2653 * e1000e_write_itr determines if the adapter is in MSI-X mode
2659 struct e1000_hw *hw = &adapter->hw; in e1000e_write_itr()
2662 if (adapter->msix_entries) { in e1000e_write_itr()
2665 for (vector = 0; vector < adapter->num_vectors; vector++) in e1000e_write_itr()
2666 writel(new_itr, hw->hw_addr + E1000_EITR_82574(vector)); in e1000e_write_itr()
2673 * e1000_alloc_queues - Allocate memory for all rings
2680 adapter->tx_ring = kzalloc(size, GFP_KERNEL); in e1000_alloc_queues()
2681 if (!adapter->tx_ring) in e1000_alloc_queues()
2683 adapter->tx_ring->count = adapter->tx_ring_count; in e1000_alloc_queues()
2684 adapter->tx_ring->adapter = adapter; in e1000_alloc_queues()
2686 adapter->rx_ring = kzalloc(size, GFP_KERNEL); in e1000_alloc_queues()
2687 if (!adapter->rx_ring) in e1000_alloc_queues()
2689 adapter->rx_ring->count = adapter->rx_ring_count; in e1000_alloc_queues()
2690 adapter->rx_ring->adapter = adapter; in e1000_alloc_queues()
2695 kfree(adapter->rx_ring); in e1000_alloc_queues()
2696 kfree(adapter->tx_ring); in e1000_alloc_queues()
2697 return -ENOMEM; in e1000_alloc_queues()
2701 * e1000e_poll - NAPI Rx polling callback
2709 struct e1000_hw *hw = &adapter->hw; in e1000e_poll()
2710 struct net_device *poll_dev = adapter->netdev; in e1000e_poll()
2715 if (!adapter->msix_entries || in e1000e_poll()
2716 (adapter->rx_ring->ims_val & adapter->tx_ring->ims_val)) in e1000e_poll()
2717 tx_cleaned = e1000_clean_tx_irq(adapter->tx_ring); in e1000e_poll()
2719 adapter->clean_rx(adapter->rx_ring, &work_done, budget); in e1000e_poll()
2724 /* Exit the polling mode, but don't re-enable interrupts if stack might in e1000e_poll()
2725 * poll us due to busy-polling in e1000e_poll()
2728 if (adapter->itr_setting & 3) in e1000e_poll()
2730 if (!test_bit(__E1000_DOWN, &adapter->state)) { in e1000e_poll()
2731 if (adapter->msix_entries) in e1000e_poll()
2732 ew32(IMS, adapter->rx_ring->ims_val); in e1000e_poll()
2745 struct e1000_hw *hw = &adapter->hw; in e1000_vlan_rx_add_vid()
2749 if ((adapter->hw.mng_cookie.status & in e1000_vlan_rx_add_vid()
2751 (vid == adapter->mng_vlan_id)) in e1000_vlan_rx_add_vid()
2755 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { in e1000_vlan_rx_add_vid()
2759 hw->mac.ops.write_vfta(hw, index, vfta); in e1000_vlan_rx_add_vid()
2762 set_bit(vid, adapter->active_vlans); in e1000_vlan_rx_add_vid()
2771 struct e1000_hw *hw = &adapter->hw; in e1000_vlan_rx_kill_vid()
2774 if ((adapter->hw.mng_cookie.status & in e1000_vlan_rx_kill_vid()
2776 (vid == adapter->mng_vlan_id)) { in e1000_vlan_rx_kill_vid()
2783 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { in e1000_vlan_rx_kill_vid()
2787 hw->mac.ops.write_vfta(hw, index, vfta); in e1000_vlan_rx_kill_vid()
2790 clear_bit(vid, adapter->active_vlans); in e1000_vlan_rx_kill_vid()
2796 * e1000e_vlan_filter_disable - helper to disable hw VLAN filtering
2801 struct net_device *netdev = adapter->netdev; in e1000e_vlan_filter_disable()
2802 struct e1000_hw *hw = &adapter->hw; in e1000e_vlan_filter_disable()
2805 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { in e1000e_vlan_filter_disable()
2811 if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) { in e1000e_vlan_filter_disable()
2813 adapter->mng_vlan_id); in e1000e_vlan_filter_disable()
2814 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; in e1000e_vlan_filter_disable()
2820 * e1000e_vlan_filter_enable - helper to enable HW VLAN filtering
2825 struct e1000_hw *hw = &adapter->hw; in e1000e_vlan_filter_enable()
2828 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { in e1000e_vlan_filter_enable()
2838 * e1000e_vlan_strip_disable - helper to disable HW VLAN stripping
2843 struct e1000_hw *hw = &adapter->hw; in e1000e_vlan_strip_disable()
2853 * e1000e_vlan_strip_enable - helper to enable HW VLAN stripping
2858 struct e1000_hw *hw = &adapter->hw; in e1000e_vlan_strip_enable()
2869 struct net_device *netdev = adapter->netdev; in e1000_update_mng_vlan()
2870 u16 vid = adapter->hw.mng_cookie.vlan_id; in e1000_update_mng_vlan()
2871 u16 old_vid = adapter->mng_vlan_id; in e1000_update_mng_vlan()
2873 if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { in e1000_update_mng_vlan()
2875 adapter->mng_vlan_id = vid; in e1000_update_mng_vlan()
2886 e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0); in e1000_restore_vlan()
2888 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) in e1000_restore_vlan()
2889 e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); in e1000_restore_vlan()
2894 struct e1000_hw *hw = &adapter->hw; in e1000_init_manageability_pt()
2897 if (!(adapter->flags & FLAG_MNG_PT_ENABLED)) in e1000_init_manageability_pt()
2909 switch (hw->mac.type) { in e1000_init_manageability_pt()
2915 /* Check if IPMI pass-through decision filter already exists; in e1000_init_manageability_pt()
2946 e_warn("Unable to create IPMI pass-through filter\n"); in e1000_init_manageability_pt()
2955 * e1000_configure_tx - Configure Transmit Unit after Reset
2962 struct e1000_hw *hw = &adapter->hw; in e1000_configure_tx()
2963 struct e1000_ring *tx_ring = adapter->tx_ring; in e1000_configure_tx()
2968 tdba = tx_ring->dma; in e1000_configure_tx()
2969 tdlen = tx_ring->count * sizeof(struct e1000_tx_desc); in e1000_configure_tx()
2975 tx_ring->head = adapter->hw.hw_addr + E1000_TDH(0); in e1000_configure_tx()
2976 tx_ring->tail = adapter->hw.hw_addr + E1000_TDT(0); in e1000_configure_tx()
2978 writel(0, tx_ring->head); in e1000_configure_tx()
2979 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) in e1000_configure_tx()
2982 writel(0, tx_ring->tail); in e1000_configure_tx()
2985 ew32(TIDV, adapter->tx_int_delay); in e1000_configure_tx()
2987 ew32(TADV, adapter->tx_abs_int_delay); in e1000_configure_tx()
2989 if (adapter->flags2 & FLAG2_DMA_BURST) { in e1000_configure_tx()
3015 if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) { in e1000_configure_tx()
3026 if (adapter->flags & FLAG_TARC_SET_BIT_ZERO) { in e1000_configure_tx()
3036 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS; in e1000_configure_tx()
3039 if (adapter->tx_int_delay) in e1000_configure_tx()
3040 adapter->txd_cmd |= E1000_TXD_CMD_IDE; in e1000_configure_tx()
3043 adapter->txd_cmd |= E1000_TXD_CMD_RS; in e1000_configure_tx()
3047 hw->mac.ops.config_collision_dist(hw); in e1000_configure_tx()
3050 if (hw->mac.type == e1000_pch_spt) { in e1000_configure_tx()
3069 (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
3072 * e1000_setup_rctl - configure the receive control registers
3077 struct e1000_hw *hw = &adapter->hw; in e1000_setup_rctl()
3081 /* Workaround Si errata on PCHx - configure jumbo frame flow. in e1000_setup_rctl()
3085 if (hw->mac.type >= e1000_pch2lan) { in e1000_setup_rctl()
3088 if (adapter->netdev->mtu > ETH_DATA_LEN) in e1000_setup_rctl()
3102 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); in e1000_setup_rctl()
3108 if (adapter->netdev->mtu <= ETH_DATA_LEN) in e1000_setup_rctl()
3117 if (adapter->flags2 & FLAG2_CRC_STRIPPING) in e1000_setup_rctl()
3120 /* Workaround Si errata on 82577 PHY - configure IPG for jumbos */ in e1000_setup_rctl()
3121 if ((hw->phy.type == e1000_phy_82577) && (rctl & E1000_RCTL_LPE)) { in e1000_setup_rctl()
3140 switch (adapter->rx_buffer_len) { in e1000_setup_rctl()
3162 /* 82571 and greater support packet-split where the protocol in e1000_setup_rctl()
3163 * header is placed in skb->data and the packet data is in e1000_setup_rctl()
3164 * placed in pages hanging off of skb_shinfo(skb)->nr_frags. in e1000_setup_rctl()
3165 * In the case of a non-split, skb->data is linearly filled, in e1000_setup_rctl()
3166 * followed by the page buffers. Therefore, skb->data is in e1000_setup_rctl()
3176 pages = PAGE_USE_COUNT(adapter->netdev->mtu); in e1000_setup_rctl()
3178 adapter->rx_ps_pages = pages; in e1000_setup_rctl()
3180 adapter->rx_ps_pages = 0; in e1000_setup_rctl()
3182 if (adapter->rx_ps_pages) { in e1000_setup_rctl()
3188 psrctl |= adapter->rx_ps_bsize0 >> E1000_PSRCTL_BSIZE0_SHIFT; in e1000_setup_rctl()
3190 switch (adapter->rx_ps_pages) { in e1000_setup_rctl()
3206 if (adapter->netdev->features & NETIF_F_RXALL) { in e1000_setup_rctl()
3224 adapter->flags &= ~FLAG_RESTART_NOW; in e1000_setup_rctl()
3228 * e1000_configure_rx - Configure Receive Unit after Reset
3235 struct e1000_hw *hw = &adapter->hw; in e1000_configure_rx()
3236 struct e1000_ring *rx_ring = adapter->rx_ring; in e1000_configure_rx()
3240 if (adapter->rx_ps_pages) { in e1000_configure_rx()
3242 rdlen = rx_ring->count * in e1000_configure_rx()
3244 adapter->clean_rx = e1000_clean_rx_irq_ps; in e1000_configure_rx()
3245 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps; in e1000_configure_rx()
3246 } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) { in e1000_configure_rx()
3247 rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended); in e1000_configure_rx()
3248 adapter->clean_rx = e1000_clean_jumbo_rx_irq; in e1000_configure_rx()
3249 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers; in e1000_configure_rx()
3251 rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended); in e1000_configure_rx()
3252 adapter->clean_rx = e1000_clean_rx_irq; in e1000_configure_rx()
3253 adapter->alloc_rx_buf = e1000_alloc_rx_buffers; in e1000_configure_rx()
3258 if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX)) in e1000_configure_rx()
3263 if (adapter->flags2 & FLAG2_DMA_BURST) { in e1000_configure_rx()
3277 ew32(RDTR, adapter->rx_int_delay); in e1000_configure_rx()
3280 ew32(RADV, adapter->rx_abs_int_delay); in e1000_configure_rx()
3281 if ((adapter->itr_setting != 0) && (adapter->itr != 0)) in e1000_configure_rx()
3282 e1000e_write_itr(adapter, adapter->itr); in e1000_configure_rx()
3285 /* Auto-Mask interrupts upon ICR access */ in e1000_configure_rx()
3294 rdba = rx_ring->dma; in e1000_configure_rx()
3300 rx_ring->head = adapter->hw.hw_addr + E1000_RDH(0); in e1000_configure_rx()
3301 rx_ring->tail = adapter->hw.hw_addr + E1000_RDT(0); in e1000_configure_rx()
3303 writel(0, rx_ring->head); in e1000_configure_rx()
3304 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) in e1000_configure_rx()
3307 writel(0, rx_ring->tail); in e1000_configure_rx()
3311 if (adapter->netdev->features & NETIF_F_RXCSUM) in e1000_configure_rx()
3317 /* With jumbo frames, excessive C-state transition latencies result in e1000_configure_rx()
3320 if (adapter->netdev->mtu > ETH_DATA_LEN) { in e1000_configure_rx()
3322 ((er32(PBA) & E1000_PBA_RXA_MASK) * 1024 - in e1000_configure_rx()
3323 adapter->max_frame_size) * 8 / 1000; in e1000_configure_rx()
3325 if (adapter->flags & FLAG_IS_ICH) { in e1000_configure_rx()
3331 dev_info(&adapter->pdev->dev, in e1000_configure_rx()
3332 "Some CPU C-states have been disabled in order to enable jumbo frames\n"); in e1000_configure_rx()
3333 cpu_latency_qos_update_request(&adapter->pm_qos_req, lat); in e1000_configure_rx()
3335 cpu_latency_qos_update_request(&adapter->pm_qos_req, in e1000_configure_rx()
3344 * e1000e_write_mc_addr_list - write multicast addresses to MTA
3348 * Returns: -ENOMEM on failure
3355 struct e1000_hw *hw = &adapter->hw; in e1000e_write_mc_addr_list()
3362 hw->mac.ops.update_mc_addr_list(hw, NULL, 0); in e1000e_write_mc_addr_list()
3368 return -ENOMEM; in e1000e_write_mc_addr_list()
3373 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); in e1000e_write_mc_addr_list()
3375 hw->mac.ops.update_mc_addr_list(hw, mta_list, i); in e1000e_write_mc_addr_list()
3382 * e1000e_write_uc_addr_list - write unicast addresses to RAR table
3386 * Returns: -ENOMEM on failure/insufficient address space
3393 struct e1000_hw *hw = &adapter->hw; in e1000e_write_uc_addr_list()
3397 rar_entries = hw->mac.ops.rar_get_count(hw); in e1000e_write_uc_addr_list()
3400 rar_entries--; in e1000e_write_uc_addr_list()
3403 if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) in e1000e_write_uc_addr_list()
3404 rar_entries--; in e1000e_write_uc_addr_list()
3408 return -ENOMEM; in e1000e_write_uc_addr_list()
3421 ret_val = hw->mac.ops.rar_set(hw, ha->addr, rar_entries--); in e1000e_write_uc_addr_list()
3423 return -ENOMEM; in e1000e_write_uc_addr_list()
3429 for (; rar_entries > 0; rar_entries--) { in e1000e_write_uc_addr_list()
3439 * e1000e_set_rx_mode - secondary unicast, Multicast and Promiscuous mode set
3445 * promiscuous mode, and all-multi behavior.
3450 struct e1000_hw *hw = &adapter->hw; in e1000e_set_rx_mode()
3453 if (pm_runtime_suspended(netdev->dev.parent)) in e1000e_set_rx_mode()
3462 if (netdev->flags & IFF_PROMISC) { in e1000e_set_rx_mode()
3469 if (netdev->flags & IFF_ALLMULTI) { in e1000e_set_rx_mode()
3492 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) in e1000e_set_rx_mode()
3500 struct e1000_hw *hw = &adapter->hw; in e1000e_setup_rss_hash()
3531 * e1000e_get_base_timinca - get default SYSTIM time increment attributes
3540 struct e1000_hw *hw = &adapter->hw; in e1000e_get_base_timinca()
3546 if ((hw->mac.type >= e1000_pch_lpt) && in e1000e_get_base_timinca()
3557 switch (hw->mac.type) { in e1000e_get_base_timinca()
3563 adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHZ; in e1000e_get_base_timinca()
3571 adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHZ; in e1000e_get_base_timinca()
3577 adapter->cc.shift = shift; in e1000e_get_base_timinca()
3585 adapter->cc.shift = shift; in e1000e_get_base_timinca()
3596 adapter->cc.shift = shift; in e1000e_get_base_timinca()
3602 adapter->cc.shift = shift; in e1000e_get_base_timinca()
3611 adapter->cc.shift = shift; in e1000e_get_base_timinca()
3614 return -EINVAL; in e1000e_get_base_timinca()
3624 * e1000e_config_hwtstamp - configure the hwtstamp registers and enable/disable
3642 struct e1000_hw *hw = &adapter->hw; in e1000e_config_hwtstamp()
3651 if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP)) in e1000e_config_hwtstamp()
3652 return -EINVAL; in e1000e_config_hwtstamp()
3654 /* flags reserved for future extensions - must be zero */ in e1000e_config_hwtstamp()
3655 if (config->flags) in e1000e_config_hwtstamp()
3656 return -EINVAL; in e1000e_config_hwtstamp()
3658 switch (config->tx_type) { in e1000e_config_hwtstamp()
3665 return -ERANGE; in e1000e_config_hwtstamp()
3668 switch (config->rx_filter) { in e1000e_config_hwtstamp()
3720 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; in e1000e_config_hwtstamp()
3726 * Delay Request messages but not both so fall-through to in e1000e_config_hwtstamp()
3735 config->rx_filter = HWTSTAMP_FILTER_ALL; in e1000e_config_hwtstamp()
3738 return -ERANGE; in e1000e_config_hwtstamp()
3741 adapter->hwtstamp_config = *config; in e1000e_config_hwtstamp()
3751 return -EAGAIN; in e1000e_config_hwtstamp()
3764 return -EAGAIN; in e1000e_config_hwtstamp()
3791 * e1000_configure - configure the hardware for Rx and Tx
3796 struct e1000_ring *rx_ring = adapter->rx_ring; in e1000_configure()
3798 e1000e_set_rx_mode(adapter->netdev); in e1000_configure()
3805 if (adapter->netdev->features & NETIF_F_RXHASH) in e1000_configure()
3809 adapter->alloc_rx_buf(rx_ring, e1000_desc_unused(rx_ring), GFP_KERNEL); in e1000_configure()
3813 * e1000e_power_up_phy - restore link in case the phy was powered down
3822 if (adapter->hw.phy.ops.power_up) in e1000e_power_up_phy()
3823 adapter->hw.phy.ops.power_up(&adapter->hw); in e1000e_power_up_phy()
3825 adapter->hw.mac.ops.setup_link(&adapter->hw); in e1000e_power_up_phy()
3829 * e1000_power_down_phy - Power down the PHY
3837 if (adapter->hw.phy.ops.power_down) in e1000_power_down_phy()
3838 adapter->hw.phy.ops.power_down(&adapter->hw); in e1000_power_down_phy()
3842 * e1000_flush_tx_ring - remove all descriptors from the tx_ring
3852 struct e1000_hw *hw = &adapter->hw; in e1000_flush_tx_ring()
3853 struct e1000_ring *tx_ring = adapter->tx_ring; in e1000_flush_tx_ring()
3861 BUG_ON(tdt != tx_ring->next_to_use); in e1000_flush_tx_ring()
3862 tx_desc = E1000_TX_DESC(*tx_ring, tx_ring->next_to_use); in e1000_flush_tx_ring()
3863 tx_desc->buffer_addr = cpu_to_le64(tx_ring->dma); in e1000_flush_tx_ring()
3865 tx_desc->lower.data = cpu_to_le32(txd_lower | size); in e1000_flush_tx_ring()
3866 tx_desc->upper.data = 0; in e1000_flush_tx_ring()
3869 tx_ring->next_to_use++; in e1000_flush_tx_ring()
3870 if (tx_ring->next_to_use == tx_ring->count) in e1000_flush_tx_ring()
3871 tx_ring->next_to_use = 0; in e1000_flush_tx_ring()
3872 ew32(TDT(0), tx_ring->next_to_use); in e1000_flush_tx_ring()
3877 * e1000_flush_rx_ring - remove all descriptors from the rx_ring
3885 struct e1000_hw *hw = &adapter->hw; in e1000_flush_rx_ring()
3910 * e1000_flush_desc_rings - remove all descriptors from the descriptor rings
3925 struct e1000_hw *hw = &adapter->hw; in e1000_flush_desc_rings()
3933 pci_read_config_word(adapter->pdev, PCICFG_DESC_RING_STATUS, in e1000_flush_desc_rings()
3939 pci_read_config_word(adapter->pdev, PCICFG_DESC_RING_STATUS, in e1000_flush_desc_rings()
3946 * e1000e_systim_reset - reset the timesync registers after a hardware reset
3956 struct ptp_clock_info *info = &adapter->ptp_clock_info; in e1000e_systim_reset()
3957 struct e1000_hw *hw = &adapter->hw; in e1000e_systim_reset()
3962 if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP)) in e1000e_systim_reset()
3965 if (info->adjfreq) { in e1000e_systim_reset()
3967 ret_val = info->adjfreq(info, adapter->ptp_delta); in e1000e_systim_reset()
3976 dev_warn(&adapter->pdev->dev, in e1000e_systim_reset()
3983 spin_lock_irqsave(&adapter->systim_lock, flags); in e1000e_systim_reset()
3984 timecounter_init(&adapter->tc, &adapter->cc, in e1000e_systim_reset()
3986 spin_unlock_irqrestore(&adapter->systim_lock, flags); in e1000e_systim_reset()
3989 e1000e_config_hwtstamp(adapter, &adapter->hwtstamp_config); in e1000e_systim_reset()
3993 * e1000e_reset - bring the hardware into a known good state
3997 * require a configuration cycle of the hardware - those cannot be
4003 struct e1000_mac_info *mac = &adapter->hw.mac; in e1000e_reset()
4004 struct e1000_fc_info *fc = &adapter->hw.fc; in e1000e_reset()
4005 struct e1000_hw *hw = &adapter->hw; in e1000e_reset()
4007 u32 pba = adapter->pba; in e1000e_reset()
4013 if (adapter->max_frame_size > (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)) { in e1000e_reset()
4029 min_tx_space = (adapter->max_frame_size + in e1000e_reset()
4030 sizeof(struct e1000_tx_desc) - ETH_FCS_LEN) * 2; in e1000e_reset()
4034 min_rx_space = adapter->max_frame_size; in e1000e_reset()
4043 ((min_tx_space - tx_space) < pba)) { in e1000e_reset()
4044 pba -= min_tx_space - tx_space; in e1000e_reset()
4061 * - 90% of the Rx FIFO size, and in e1000e_reset()
4062 * - the full Rx FIFO size minus one full frame in e1000e_reset()
4064 if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME) in e1000e_reset()
4065 fc->pause_time = 0xFFFF; in e1000e_reset()
4067 fc->pause_time = E1000_FC_PAUSE_TIME; in e1000e_reset()
4068 fc->send_xon = true; in e1000e_reset()
4069 fc->current_mode = fc->requested_mode; in e1000e_reset()
4071 switch (hw->mac.type) { in e1000e_reset()
4074 if (adapter->netdev->mtu > ETH_DATA_LEN) { in e1000e_reset()
4077 fc->high_water = 0x2800; in e1000e_reset()
4078 fc->low_water = fc->high_water - 8; in e1000e_reset()
4084 ((pba << 10) - adapter->max_frame_size)); in e1000e_reset()
4086 fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */ in e1000e_reset()
4087 fc->low_water = fc->high_water - 8; in e1000e_reset()
4093 if (adapter->netdev->mtu > ETH_DATA_LEN) { in e1000e_reset()
4094 fc->high_water = 0x3500; in e1000e_reset()
4095 fc->low_water = 0x1500; in e1000e_reset()
4097 fc->high_water = 0x5000; in e1000e_reset()
4098 fc->low_water = 0x3000; in e1000e_reset()
4100 fc->refresh_time = 0x1000; in e1000e_reset()
4109 fc->refresh_time = 0xFFFF; in e1000e_reset()
4110 fc->pause_time = 0xFFFF; in e1000e_reset()
4112 if (adapter->netdev->mtu <= ETH_DATA_LEN) { in e1000e_reset()
4113 fc->high_water = 0x05C20; in e1000e_reset()
4114 fc->low_water = 0x05048; in e1000e_reset()
4120 fc->high_water = ((pba << 10) * 9 / 10) & E1000_FCRTH_RTH; in e1000e_reset()
4121 fc->low_water = ((pba << 10) * 8 / 10) & E1000_FCRTL_RTL; in e1000e_reset()
4130 adapter->tx_fifo_limit = min_t(u32, ((er32(PBA) >> 16) << 10) - 96, in e1000e_reset()
4136 if (adapter->itr_setting & 0x3) { in e1000e_reset()
4137 if ((adapter->max_frame_size * 2) > (pba << 10)) { in e1000e_reset()
4138 if (!(adapter->flags2 & FLAG2_DISABLE_AIM)) { in e1000e_reset()
4139 dev_info(&adapter->pdev->dev, in e1000e_reset()
4141 adapter->flags2 |= FLAG2_DISABLE_AIM; in e1000e_reset()
4144 } else if (adapter->flags2 & FLAG2_DISABLE_AIM) { in e1000e_reset()
4145 dev_info(&adapter->pdev->dev, in e1000e_reset()
4147 adapter->flags2 &= ~FLAG2_DISABLE_AIM; in e1000e_reset()
4148 adapter->itr = 20000; in e1000e_reset()
4149 e1000e_write_itr(adapter, adapter->itr); in e1000e_reset()
4153 if (hw->mac.type >= e1000_pch_spt) in e1000e_reset()
4156 mac->ops.reset_hw(hw); in e1000e_reset()
4161 if (adapter->flags & FLAG_HAS_AMT) in e1000e_reset()
4166 if (mac->ops.init_hw(hw)) in e1000e_reset()
4180 if (adapter->flags2 & FLAG2_HAS_EEE) { in e1000e_reset()
4184 switch (hw->phy.type) { in e1000e_reset()
4192 dev_err(&adapter->pdev->dev, in e1000e_reset()
4197 ret_val = hw->phy.ops.acquire(hw); in e1000e_reset()
4199 dev_err(&adapter->pdev->dev, in e1000e_reset()
4200 "EEE advertisement - unable to acquire PHY\n"); in e1000e_reset()
4205 hw->dev_spec.ich8lan.eee_disable ? in e1000e_reset()
4206 0 : adapter->eee_advert); in e1000e_reset()
4208 hw->phy.ops.release(hw); in e1000e_reset()
4211 if (!netif_running(adapter->netdev) && in e1000e_reset()
4212 !test_bit(__E1000_TESTING, &adapter->state)) in e1000e_reset()
4217 if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) && in e1000e_reset()
4218 !(adapter->flags & FLAG_SMART_POWER_DOWN)) { in e1000e_reset()
4228 if (hw->mac.type >= e1000_pch_spt && adapter->int_mode == 0) { in e1000e_reset()
4245 * e1000e_trigger_lsc - trigger an LSC interrupt
4252 struct e1000_hw *hw = &adapter->hw; in e1000e_trigger_lsc()
4254 if (adapter->msix_entries) in e1000e_trigger_lsc()
4265 clear_bit(__E1000_DOWN, &adapter->state); in e1000e_up()
4267 if (adapter->msix_entries) in e1000e_up()
4278 struct e1000_hw *hw = &adapter->hw; in e1000e_flush_descriptors()
4280 if (!(adapter->flags2 & FLAG2_DMA_BURST)) in e1000e_flush_descriptors()
4284 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); in e1000e_flush_descriptors()
4285 ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD); in e1000e_flush_descriptors()
4293 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); in e1000e_flush_descriptors()
4294 ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD); in e1000e_flush_descriptors()
4303 * e1000e_down - quiesce the device and optionally reset the hardware
4309 struct net_device *netdev = adapter->netdev; in e1000e_down()
4310 struct e1000_hw *hw = &adapter->hw; in e1000e_down()
4316 set_bit(__E1000_DOWN, &adapter->state); in e1000e_down()
4322 if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX)) in e1000e_down()
4339 napi_synchronize(&adapter->napi); in e1000e_down()
4341 del_timer_sync(&adapter->watchdog_timer); in e1000e_down()
4342 del_timer_sync(&adapter->phy_info_timer); in e1000e_down()
4344 spin_lock(&adapter->stats64_lock); in e1000e_down()
4346 spin_unlock(&adapter->stats64_lock); in e1000e_down()
4350 adapter->link_speed = 0; in e1000e_down()
4351 adapter->link_duplex = 0; in e1000e_down()
4354 if ((hw->mac.type >= e1000_pch2lan) && in e1000e_down()
4355 (adapter->netdev->mtu > ETH_DATA_LEN) && in e1000e_down()
4359 if (!pci_channel_offline(adapter->pdev)) { in e1000e_down()
4362 else if (hw->mac.type >= e1000_pch_spt) in e1000e_down()
4365 e1000_clean_tx_ring(adapter->tx_ring); in e1000e_down()
4366 e1000_clean_rx_ring(adapter->rx_ring); in e1000e_down()
4372 while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) in e1000e_reinit_locked()
4376 clear_bit(__E1000_RESETTING, &adapter->state); in e1000e_reinit_locked()
4380 * e1000e_sanitize_systim - sanitize raw cycle counter reads
4406 time_delta = systim_next - systim; in e1000e_sanitize_systim()
4421 * e1000e_read_systim - read SYSTIM register
4429 struct e1000_hw *hw = &adapter->hw; in e1000e_read_systim()
4436 * to fix that we test for overflow and if true, we re-read systime. in e1000e_read_systim()
4443 if (systimel >= (u32)0xffffffff - E1000_TIMINCA_INCVALUE_MASK) { in e1000e_read_systim()
4458 if (adapter->flags2 & FLAG2_CHECK_SYSTIM_OVERFLOW) in e1000e_read_systim()
4465 * e1000e_cyclecounter_read - read raw cycle counter (used by time counter)
4477 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
4486 struct net_device *netdev = adapter->netdev; in e1000_sw_init()
4488 adapter->rx_buffer_len = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN; in e1000_sw_init()
4489 adapter->rx_ps_bsize0 = 128; in e1000_sw_init()
4490 adapter->max_frame_size = netdev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; in e1000_sw_init()
4491 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; in e1000_sw_init()
4492 adapter->tx_ring_count = E1000_DEFAULT_TXD; in e1000_sw_init()
4493 adapter->rx_ring_count = E1000_DEFAULT_RXD; in e1000_sw_init()
4495 spin_lock_init(&adapter->stats64_lock); in e1000_sw_init()
4500 return -ENOMEM; in e1000_sw_init()
4503 if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) { in e1000_sw_init()
4504 adapter->cc.read = e1000e_cyclecounter_read; in e1000_sw_init()
4505 adapter->cc.mask = CYCLECOUNTER_MASK(64); in e1000_sw_init()
4506 adapter->cc.mult = 1; in e1000_sw_init()
4509 spin_lock_init(&adapter->systim_lock); in e1000_sw_init()
4510 INIT_WORK(&adapter->tx_hwtstamp_work, e1000e_tx_hwtstamp_work); in e1000_sw_init()
4516 set_bit(__E1000_DOWN, &adapter->state); in e1000_sw_init()
4521 * e1000_intr_msi_test - Interrupt Handler
4529 struct e1000_hw *hw = &adapter->hw; in e1000_intr_msi_test()
4534 adapter->flags &= ~FLAG_MSI_TEST_FAILED; in e1000_intr_msi_test()
4545 * e1000_test_msi_interrupt - Returns 0 for successful test
4552 struct net_device *netdev = adapter->netdev; in e1000_test_msi_interrupt()
4553 struct e1000_hw *hw = &adapter->hw; in e1000_test_msi_interrupt()
4567 adapter->flags |= FLAG_MSI_TEST_FAILED; in e1000_test_msi_interrupt()
4569 err = pci_enable_msi(adapter->pdev); in e1000_test_msi_interrupt()
4573 err = request_irq(adapter->pdev->irq, e1000_intr_msi_test, 0, in e1000_test_msi_interrupt()
4574 netdev->name, netdev); in e1000_test_msi_interrupt()
4576 pci_disable_msi(adapter->pdev); in e1000_test_msi_interrupt()
4596 if (adapter->flags & FLAG_MSI_TEST_FAILED) { in e1000_test_msi_interrupt()
4597 adapter->int_mode = E1000E_INT_MODE_LEGACY; in e1000_test_msi_interrupt()
4603 free_irq(adapter->pdev->irq, netdev); in e1000_test_msi_interrupt()
4604 pci_disable_msi(adapter->pdev); in e1000_test_msi_interrupt()
4612 * e1000_test_msi - Returns 0 if MSI test succeeds or INTx mode is restored
4622 if (!(adapter->flags & FLAG_MSI_ENABLED)) in e1000_test_msi()
4626 pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd); in e1000_test_msi()
4628 pci_write_config_word(adapter->pdev, PCI_COMMAND, in e1000_test_msi()
4633 /* re-enable SERR */ in e1000_test_msi()
4635 pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd); in e1000_test_msi()
4637 pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd); in e1000_test_msi()
4644 * e1000e_open - Called when a network interface is made active
4658 struct e1000_hw *hw = &adapter->hw; in e1000e_open()
4659 struct pci_dev *pdev = adapter->pdev; in e1000e_open()
4663 if (test_bit(__E1000_TESTING, &adapter->state)) in e1000e_open()
4664 return -EBUSY; in e1000e_open()
4666 pm_runtime_get_sync(&pdev->dev); in e1000e_open()
4672 err = e1000e_setup_tx_resources(adapter->tx_ring); in e1000e_open()
4677 err = e1000e_setup_rx_resources(adapter->rx_ring); in e1000e_open()
4684 if (adapter->flags & FLAG_HAS_AMT) { in e1000e_open()
4691 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; in e1000e_open()
4692 if ((adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN)) in e1000e_open()
4696 cpu_latency_qos_add_request(&adapter->pm_qos_req, PM_QOS_DEFAULT_VALUE); in e1000e_open()
4713 if (adapter->int_mode != E1000E_INT_MODE_LEGACY) { in e1000e_open()
4722 clear_bit(__E1000_DOWN, &adapter->state); in e1000e_open()
4724 napi_enable(&adapter->napi); in e1000e_open()
4728 adapter->tx_hang_recheck = false; in e1000e_open()
4730 hw->mac.get_link_status = true; in e1000e_open()
4731 pm_runtime_put(&pdev->dev); in e1000e_open()
4738 cpu_latency_qos_remove_request(&adapter->pm_qos_req); in e1000e_open()
4741 e1000e_free_rx_resources(adapter->rx_ring); in e1000e_open()
4743 e1000e_free_tx_resources(adapter->tx_ring); in e1000e_open()
4746 pm_runtime_put_sync(&pdev->dev); in e1000e_open()
4752 * e1000e_close - Disables a network interface
4757 * The close entry point is called when an interface is de-activated
4765 struct pci_dev *pdev = adapter->pdev; in e1000e_close()
4768 while (test_bit(__E1000_RESETTING, &adapter->state) && count--) in e1000e_close()
4771 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); in e1000e_close()
4773 pm_runtime_get_sync(&pdev->dev); in e1000e_close()
4783 napi_disable(&adapter->napi); in e1000e_close()
4785 e1000e_free_tx_resources(adapter->tx_ring); in e1000e_close()
4786 e1000e_free_rx_resources(adapter->rx_ring); in e1000e_close()
4791 if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) in e1000e_close()
4793 adapter->mng_vlan_id); in e1000e_close()
4798 if ((adapter->flags & FLAG_HAS_AMT) && in e1000e_close()
4799 !test_bit(__E1000_TESTING, &adapter->state)) in e1000e_close()
4802 cpu_latency_qos_remove_request(&adapter->pm_qos_req); in e1000e_close()
4804 pm_runtime_put_sync(&pdev->dev); in e1000e_close()
4810 * e1000_set_mac - Change the Ethernet Address of the NIC
4819 struct e1000_hw *hw = &adapter->hw; in e1000_set_mac()
4822 if (!is_valid_ether_addr(addr->sa_data)) in e1000_set_mac()
4823 return -EADDRNOTAVAIL; in e1000_set_mac()
4825 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); in e1000_set_mac()
4826 memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len); in e1000_set_mac()
4828 hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr, 0); in e1000_set_mac()
4830 if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) { in e1000_set_mac()
4832 e1000e_set_laa_state_82571(&adapter->hw, 1); in e1000_set_mac()
4841 hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr, in e1000_set_mac()
4842 adapter->hw.mac.rar_entry_count - 1); in e1000_set_mac()
4849 * e1000e_update_phy_task - work thread to update phy
4861 struct e1000_hw *hw = &adapter->hw; in e1000e_update_phy_task()
4863 if (test_bit(__E1000_DOWN, &adapter->state)) in e1000e_update_phy_task()
4869 if (hw->phy.type >= e1000_phy_82579) in e1000e_update_phy_task()
4874 * e1000_update_phy_info - timre call-back to update PHY info
4884 if (test_bit(__E1000_DOWN, &adapter->state)) in e1000_update_phy_info()
4887 schedule_work(&adapter->update_phy_task); in e1000_update_phy_info()
4891 * e1000e_update_phy_stats - Update the PHY statistics counters
4894 * Read/clear the upper 16-bit PHY registers and read/accumulate lower
4898 struct e1000_hw *hw = &adapter->hw; in e1000e_update_phy_stats()
4902 ret_val = hw->phy.ops.acquire(hw); in e1000e_update_phy_stats()
4909 hw->phy.addr = 1; in e1000e_update_phy_stats()
4915 ret_val = hw->phy.ops.set_page(hw, in e1000e_update_phy_stats()
4922 hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data); in e1000e_update_phy_stats()
4923 ret_val = hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data); in e1000e_update_phy_stats()
4925 adapter->stats.scc += phy_data; in e1000e_update_phy_stats()
4928 hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data); in e1000e_update_phy_stats()
4929 ret_val = hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data); in e1000e_update_phy_stats()
4931 adapter->stats.ecol += phy_data; in e1000e_update_phy_stats()
4934 hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data); in e1000e_update_phy_stats()
4935 ret_val = hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data); in e1000e_update_phy_stats()
4937 adapter->stats.mcc += phy_data; in e1000e_update_phy_stats()
4940 hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data); in e1000e_update_phy_stats()
4941 ret_val = hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data); in e1000e_update_phy_stats()
4943 adapter->stats.latecol += phy_data; in e1000e_update_phy_stats()
4945 /* Collision Count - also used for adaptive IFS */ in e1000e_update_phy_stats()
4946 hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data); in e1000e_update_phy_stats()
4947 ret_val = hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data); in e1000e_update_phy_stats()
4949 hw->mac.collision_delta = phy_data; in e1000e_update_phy_stats()
4952 hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data); in e1000e_update_phy_stats()
4953 ret_val = hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data); in e1000e_update_phy_stats()
4955 adapter->stats.dc += phy_data; in e1000e_update_phy_stats()
4958 hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data); in e1000e_update_phy_stats()
4959 ret_val = hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data); in e1000e_update_phy_stats()
4961 adapter->stats.tncrs += phy_data; in e1000e_update_phy_stats()
4964 hw->phy.ops.release(hw); in e1000e_update_phy_stats()
4968 * e1000e_update_stats - Update the board statistics counters
4973 struct net_device *netdev = adapter->netdev; in e1000e_update_stats()
4974 struct e1000_hw *hw = &adapter->hw; in e1000e_update_stats()
4975 struct pci_dev *pdev = adapter->pdev; in e1000e_update_stats()
4980 if (adapter->link_speed == 0) in e1000e_update_stats()
4985 adapter->stats.crcerrs += er32(CRCERRS); in e1000e_update_stats()
4986 adapter->stats.gprc += er32(GPRC); in e1000e_update_stats()
4987 adapter->stats.gorc += er32(GORCL); in e1000e_update_stats()
4989 adapter->stats.bprc += er32(BPRC); in e1000e_update_stats()
4990 adapter->stats.mprc += er32(MPRC); in e1000e_update_stats()
4991 adapter->stats.roc += er32(ROC); in e1000e_update_stats()
4993 adapter->stats.mpc += er32(MPC); in e1000e_update_stats()
4995 /* Half-duplex statistics */ in e1000e_update_stats()
4996 if (adapter->link_duplex == HALF_DUPLEX) { in e1000e_update_stats()
4997 if (adapter->flags2 & FLAG2_HAS_PHY_STATS) { in e1000e_update_stats()
5000 adapter->stats.scc += er32(SCC); in e1000e_update_stats()
5001 adapter->stats.ecol += er32(ECOL); in e1000e_update_stats()
5002 adapter->stats.mcc += er32(MCC); in e1000e_update_stats()
5003 adapter->stats.latecol += er32(LATECOL); in e1000e_update_stats()
5004 adapter->stats.dc += er32(DC); in e1000e_update_stats()
5006 hw->mac.collision_delta = er32(COLC); in e1000e_update_stats()
5008 if ((hw->mac.type != e1000_82574) && in e1000e_update_stats()
5009 (hw->mac.type != e1000_82583)) in e1000e_update_stats()
5010 adapter->stats.tncrs += er32(TNCRS); in e1000e_update_stats()
5012 adapter->stats.colc += hw->mac.collision_delta; in e1000e_update_stats()
5015 adapter->stats.xonrxc += er32(XONRXC); in e1000e_update_stats()
5016 adapter->stats.xontxc += er32(XONTXC); in e1000e_update_stats()
5017 adapter->stats.xoffrxc += er32(XOFFRXC); in e1000e_update_stats()
5018 adapter->stats.xofftxc += er32(XOFFTXC); in e1000e_update_stats()
5019 adapter->stats.gptc += er32(GPTC); in e1000e_update_stats()
5020 adapter->stats.gotc += er32(GOTCL); in e1000e_update_stats()
5022 adapter->stats.rnbc += er32(RNBC); in e1000e_update_stats()
5023 adapter->stats.ruc += er32(RUC); in e1000e_update_stats()
5025 adapter->stats.mptc += er32(MPTC); in e1000e_update_stats()
5026 adapter->stats.bptc += er32(BPTC); in e1000e_update_stats()
5030 hw->mac.tx_packet_delta = er32(TPT); in e1000e_update_stats()
5031 adapter->stats.tpt += hw->mac.tx_packet_delta; in e1000e_update_stats()
5033 adapter->stats.algnerrc += er32(ALGNERRC); in e1000e_update_stats()
5034 adapter->stats.rxerrc += er32(RXERRC); in e1000e_update_stats()
5035 adapter->stats.cexterr += er32(CEXTERR); in e1000e_update_stats()
5036 adapter->stats.tsctc += er32(TSCTC); in e1000e_update_stats()
5037 adapter->stats.tsctfc += er32(TSCTFC); in e1000e_update_stats()
5040 netdev->stats.multicast = adapter->stats.mprc; in e1000e_update_stats()
5041 netdev->stats.collisions = adapter->stats.colc; in e1000e_update_stats()
5048 netdev->stats.rx_errors = adapter->stats.rxerrc + in e1000e_update_stats()
5049 adapter->stats.crcerrs + adapter->stats.algnerrc + in e1000e_update_stats()
5050 adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr; in e1000e_update_stats()
5051 netdev->stats.rx_length_errors = adapter->stats.ruc + in e1000e_update_stats()
5052 adapter->stats.roc; in e1000e_update_stats()
5053 netdev->stats.rx_crc_errors = adapter->stats.crcerrs; in e1000e_update_stats()
5054 netdev->stats.rx_frame_errors = adapter->stats.algnerrc; in e1000e_update_stats()
5055 netdev->stats.rx_missed_errors = adapter->stats.mpc; in e1000e_update_stats()
5058 netdev->stats.tx_errors = adapter->stats.ecol + adapter->stats.latecol; in e1000e_update_stats()
5059 netdev->stats.tx_aborted_errors = adapter->stats.ecol; in e1000e_update_stats()
5060 netdev->stats.tx_window_errors = adapter->stats.latecol; in e1000e_update_stats()
5061 netdev->stats.tx_carrier_errors = adapter->stats.tncrs; in e1000e_update_stats()
5066 adapter->stats.mgptc += er32(MGTPTC); in e1000e_update_stats()
5067 adapter->stats.mgprc += er32(MGTPRC); in e1000e_update_stats()
5068 adapter->stats.mgpdc += er32(MGTPDC); in e1000e_update_stats()
5071 if (hw->mac.type >= e1000_pch_lpt) { in e1000e_update_stats()
5074 adapter->corr_errors += in e1000e_update_stats()
5076 adapter->uncorr_errors += in e1000e_update_stats()
5083 * e1000_phy_read_status - Update the PHY register status snapshot
5088 struct e1000_hw *hw = &adapter->hw; in e1000_phy_read_status()
5089 struct e1000_phy_regs *phy = &adapter->phy_regs; in e1000_phy_read_status()
5091 if (!pm_runtime_suspended((&adapter->pdev->dev)->parent) && in e1000_phy_read_status()
5093 (adapter->hw.phy.media_type == e1000_media_type_copper)) { in e1000_phy_read_status()
5096 ret_val = e1e_rphy(hw, MII_BMCR, &phy->bmcr); in e1000_phy_read_status()
5097 ret_val |= e1e_rphy(hw, MII_BMSR, &phy->bmsr); in e1000_phy_read_status()
5098 ret_val |= e1e_rphy(hw, MII_ADVERTISE, &phy->advertise); in e1000_phy_read_status()
5099 ret_val |= e1e_rphy(hw, MII_LPA, &phy->lpa); in e1000_phy_read_status()
5100 ret_val |= e1e_rphy(hw, MII_EXPANSION, &phy->expansion); in e1000_phy_read_status()
5101 ret_val |= e1e_rphy(hw, MII_CTRL1000, &phy->ctrl1000); in e1000_phy_read_status()
5102 ret_val |= e1e_rphy(hw, MII_STAT1000, &phy->stat1000); in e1000_phy_read_status()
5103 ret_val |= e1e_rphy(hw, MII_ESTATUS, &phy->estatus); in e1000_phy_read_status()
5108 * Set values to typical power-on defaults in e1000_phy_read_status()
5110 phy->bmcr = (BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_FULLDPLX); in e1000_phy_read_status()
5111 phy->bmsr = (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL | in e1000_phy_read_status()
5114 phy->advertise = (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP | in e1000_phy_read_status()
5116 phy->lpa = 0; in e1000_phy_read_status()
5117 phy->expansion = EXPANSION_ENABLENPAGE; in e1000_phy_read_status()
5118 phy->ctrl1000 = ADVERTISE_1000FULL; in e1000_phy_read_status()
5119 phy->stat1000 = 0; in e1000_phy_read_status()
5120 phy->estatus = (ESTATUS_1000_TFULL | ESTATUS_1000_THALF); in e1000_phy_read_status()
5126 struct e1000_hw *hw = &adapter->hw; in e1000_print_link_info()
5130 netdev_info(adapter->netdev, in e1000_print_link_info()
5132 adapter->link_speed, in e1000_print_link_info()
5133 adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half", in e1000_print_link_info()
5141 struct e1000_hw *hw = &adapter->hw; in e1000e_has_link()
5150 switch (hw->phy.media_type) { in e1000e_has_link()
5152 if (hw->mac.get_link_status) { in e1000e_has_link()
5153 ret_val = hw->mac.ops.check_for_link(hw); in e1000e_has_link()
5154 link_active = !hw->mac.get_link_status; in e1000e_has_link()
5160 ret_val = hw->mac.ops.check_for_link(hw); in e1000e_has_link()
5164 ret_val = hw->mac.ops.check_for_link(hw); in e1000e_has_link()
5165 link_active = hw->mac.serdes_has_link; in e1000e_has_link()
5172 if ((ret_val == -E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) && in e1000e_has_link()
5184 if ((adapter->flags & FLAG_RX_NEEDS_RESTART) && in e1000e_enable_receives()
5185 (adapter->flags & FLAG_RESTART_NOW)) { in e1000e_enable_receives()
5186 struct e1000_hw *hw = &adapter->hw; in e1000e_enable_receives()
5190 adapter->flags &= ~FLAG_RESTART_NOW; in e1000e_enable_receives()
5196 struct e1000_hw *hw = &adapter->hw; in e1000e_check_82574_phy_workaround()
5202 adapter->phy_hang_count++; in e1000e_check_82574_phy_workaround()
5204 adapter->phy_hang_count = 0; in e1000e_check_82574_phy_workaround()
5206 if (adapter->phy_hang_count > 1) { in e1000e_check_82574_phy_workaround()
5207 adapter->phy_hang_count = 0; in e1000e_check_82574_phy_workaround()
5208 e_dbg("PHY appears hung - resetting\n"); in e1000e_check_82574_phy_workaround()
5209 schedule_work(&adapter->reset_task); in e1000e_check_82574_phy_workaround()
5214 * e1000_watchdog - Timer Call-back
5222 schedule_work(&adapter->watchdog_task); in e1000_watchdog()
5232 struct net_device *netdev = adapter->netdev; in e1000_watchdog_task()
5233 struct e1000_mac_info *mac = &adapter->hw.mac; in e1000_watchdog_task()
5234 struct e1000_phy_info *phy = &adapter->hw.phy; in e1000_watchdog_task()
5235 struct e1000_ring *tx_ring = adapter->tx_ring; in e1000_watchdog_task()
5237 struct e1000_hw *hw = &adapter->hw; in e1000_watchdog_task()
5240 if (test_bit(__E1000_DOWN, &adapter->state)) in e1000_watchdog_task()
5246 pm_runtime_resume(netdev->dev.parent); in e1000_watchdog_task()
5253 (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id)) in e1000_watchdog_task()
5261 pm_runtime_resume(netdev->dev.parent); in e1000_watchdog_task()
5275 e1000_phy_hw_reset(&adapter->hw); in e1000_watchdog_task()
5280 mac->ops.get_link_up_info(&adapter->hw, in e1000_watchdog_task()
5281 &adapter->link_speed, in e1000_watchdog_task()
5282 &adapter->link_duplex); in e1000_watchdog_task()
5287 if (phy->speed_downgraded) in e1000_watchdog_task()
5294 if ((hw->phy.type == e1000_phy_igp_3 || in e1000_watchdog_task()
5295 hw->phy.type == e1000_phy_bm) && in e1000_watchdog_task()
5296 hw->mac.autoneg && in e1000_watchdog_task()
5297 (adapter->link_speed == SPEED_10 || in e1000_watchdog_task()
5298 adapter->link_speed == SPEED_100) && in e1000_watchdog_task()
5299 (adapter->link_duplex == HALF_DUPLEX)) { in e1000_watchdog_task()
5309 adapter->tx_timeout_factor = 1; in e1000_watchdog_task()
5310 switch (adapter->link_speed) { in e1000_watchdog_task()
5313 adapter->tx_timeout_factor = 16; in e1000_watchdog_task()
5317 adapter->tx_timeout_factor = 10; in e1000_watchdog_task()
5321 /* workaround: re-program speed mode bit after in e1000_watchdog_task()
5322 * link-up event in e1000_watchdog_task()
5324 if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) && in e1000_watchdog_task()
5336 if (!(adapter->flags & FLAG_TSO_FORCE)) { in e1000_watchdog_task()
5337 switch (adapter->link_speed) { in e1000_watchdog_task()
5341 netdev->features &= ~NETIF_F_TSO; in e1000_watchdog_task()
5342 netdev->features &= ~NETIF_F_TSO6; in e1000_watchdog_task()
5345 netdev->features |= NETIF_F_TSO; in e1000_watchdog_task()
5346 netdev->features |= NETIF_F_TSO6; in e1000_watchdog_task()
5352 if (hw->mac.type == e1000_pch_spt) { in e1000_watchdog_task()
5353 netdev->features &= ~NETIF_F_TSO; in e1000_watchdog_task()
5354 netdev->features &= ~NETIF_F_TSO6; in e1000_watchdog_task()
5365 /* Perform any post-link-up configuration before in e1000_watchdog_task()
5368 if (phy->ops.cfg_on_link_up) in e1000_watchdog_task()
5369 phy->ops.cfg_on_link_up(hw); in e1000_watchdog_task()
5374 if (!test_bit(__E1000_DOWN, &adapter->state)) in e1000_watchdog_task()
5375 mod_timer(&adapter->phy_info_timer, in e1000_watchdog_task()
5380 adapter->link_speed = 0; in e1000_watchdog_task()
5381 adapter->link_duplex = 0; in e1000_watchdog_task()
5386 if (!test_bit(__E1000_DOWN, &adapter->state)) in e1000_watchdog_task()
5387 mod_timer(&adapter->phy_info_timer, in e1000_watchdog_task()
5390 /* 8000ES2LAN requires a Rx packet buffer work-around in e1000_watchdog_task()
5394 if (adapter->flags & FLAG_RX_NEEDS_RESTART) in e1000_watchdog_task()
5395 adapter->flags |= FLAG_RESTART_NOW; in e1000_watchdog_task()
5397 pm_schedule_suspend(netdev->dev.parent, in e1000_watchdog_task()
5403 spin_lock(&adapter->stats64_lock); in e1000_watchdog_task()
5406 mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; in e1000_watchdog_task()
5407 adapter->tpt_old = adapter->stats.tpt; in e1000_watchdog_task()
5408 mac->collision_delta = adapter->stats.colc - adapter->colc_old; in e1000_watchdog_task()
5409 adapter->colc_old = adapter->stats.colc; in e1000_watchdog_task()
5411 adapter->gorc = adapter->stats.gorc - adapter->gorc_old; in e1000_watchdog_task()
5412 adapter->gorc_old = adapter->stats.gorc; in e1000_watchdog_task()
5413 adapter->gotc = adapter->stats.gotc - adapter->gotc_old; in e1000_watchdog_task()
5414 adapter->gotc_old = adapter->stats.gotc; in e1000_watchdog_task()
5415 spin_unlock(&adapter->stats64_lock); in e1000_watchdog_task()
5422 (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) in e1000_watchdog_task()
5423 adapter->flags |= FLAG_RESTART_NOW; in e1000_watchdog_task()
5426 if (adapter->flags & FLAG_RESTART_NOW) { in e1000_watchdog_task()
5427 schedule_work(&adapter->reset_task); in e1000_watchdog_task()
5432 e1000e_update_adaptive(&adapter->hw); in e1000_watchdog_task()
5435 if (adapter->itr_setting == 4) { in e1000_watchdog_task()
5438 * everyone else is between 2000-8000. in e1000_watchdog_task()
5440 u32 goc = (adapter->gotc + adapter->gorc) / 10000; in e1000_watchdog_task()
5441 u32 dif = (adapter->gotc > adapter->gorc ? in e1000_watchdog_task()
5442 adapter->gotc - adapter->gorc : in e1000_watchdog_task()
5443 adapter->gorc - adapter->gotc) / 10000; in e1000_watchdog_task()
5450 if (adapter->msix_entries) in e1000_watchdog_task()
5451 ew32(ICS, adapter->rx_ring->ims_val); in e1000_watchdog_task()
5459 adapter->detect_tx_hung = true; in e1000_watchdog_task()
5465 hw->mac.ops.rar_set(hw, adapter->hw.mac.addr, 0); in e1000_watchdog_task()
5467 if (adapter->flags2 & FLAG2_CHECK_PHY_HANG) in e1000_watchdog_task()
5471 if (adapter->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) { in e1000_watchdog_task()
5472 if ((adapter->flags2 & FLAG2_CHECK_RX_HWTSTAMP) && in e1000_watchdog_task()
5475 adapter->rx_hwtstamp_cleared++; in e1000_watchdog_task()
5477 adapter->flags2 |= FLAG2_CHECK_RX_HWTSTAMP; in e1000_watchdog_task()
5482 if (!test_bit(__E1000_DOWN, &adapter->state)) in e1000_watchdog_task()
5483 mod_timer(&adapter->watchdog_timer, in e1000_watchdog_task()
5515 mss = skb_shinfo(skb)->gso_size; in e1000_tso()
5518 iph->tot_len = 0; in e1000_tso()
5519 iph->check = 0; in e1000_tso()
5520 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, in e1000_tso()
5523 ipcse = skb_transport_offset(skb) - 1; in e1000_tso()
5529 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; in e1000_tso()
5531 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; in e1000_tso()
5534 E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); in e1000_tso()
5536 i = tx_ring->next_to_use; in e1000_tso()
5538 buffer_info = &tx_ring->buffer_info[i]; in e1000_tso()
5540 context_desc->lower_setup.ip_fields.ipcss = ipcss; in e1000_tso()
5541 context_desc->lower_setup.ip_fields.ipcso = ipcso; in e1000_tso()
5542 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); in e1000_tso()
5543 context_desc->upper_setup.tcp_fields.tucss = tucss; in e1000_tso()
5544 context_desc->upper_setup.tcp_fields.tucso = tucso; in e1000_tso()
5545 context_desc->upper_setup.tcp_fields.tucse = 0; in e1000_tso()
5546 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); in e1000_tso()
5547 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; in e1000_tso()
5548 context_desc->cmd_and_length = cpu_to_le32(cmd_length); in e1000_tso()
5550 buffer_info->time_stamp = jiffies; in e1000_tso()
5551 buffer_info->next_to_watch = i; in e1000_tso()
5554 if (i == tx_ring->count) in e1000_tso()
5556 tx_ring->next_to_use = i; in e1000_tso()
5564 struct e1000_adapter *adapter = tx_ring->adapter; in e1000_tx_csum()
5571 if (skb->ip_summed != CHECKSUM_PARTIAL) in e1000_tx_csum()
5576 if (ip_hdr(skb)->protocol == IPPROTO_TCP) in e1000_tx_csum()
5581 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) in e1000_tx_csum()
5593 i = tx_ring->next_to_use; in e1000_tx_csum()
5594 buffer_info = &tx_ring->buffer_info[i]; in e1000_tx_csum()
5597 context_desc->lower_setup.ip_config = 0; in e1000_tx_csum()
5598 context_desc->upper_setup.tcp_fields.tucss = css; in e1000_tx_csum()
5599 context_desc->upper_setup.tcp_fields.tucso = css + skb->csum_offset; in e1000_tx_csum()
5600 context_desc->upper_setup.tcp_fields.tucse = 0; in e1000_tx_csum()
5601 context_desc->tcp_seg_setup.data = 0; in e1000_tx_csum()
5602 context_desc->cmd_and_length = cpu_to_le32(cmd_len); in e1000_tx_csum()
5604 buffer_info->time_stamp = jiffies; in e1000_tx_csum()
5605 buffer_info->next_to_watch = i; in e1000_tx_csum()
5608 if (i == tx_ring->count) in e1000_tx_csum()
5610 tx_ring->next_to_use = i; in e1000_tx_csum()
5619 struct e1000_adapter *adapter = tx_ring->adapter; in e1000_tx_map()
5620 struct pci_dev *pdev = adapter->pdev; in e1000_tx_map()
5626 i = tx_ring->next_to_use; in e1000_tx_map()
5629 buffer_info = &tx_ring->buffer_info[i]; in e1000_tx_map()
5632 buffer_info->length = size; in e1000_tx_map()
5633 buffer_info->time_stamp = jiffies; in e1000_tx_map()
5634 buffer_info->next_to_watch = i; in e1000_tx_map()
5635 buffer_info->dma = dma_map_single(&pdev->dev, in e1000_tx_map()
5636 skb->data + offset, in e1000_tx_map()
5638 buffer_info->mapped_as_page = false; in e1000_tx_map()
5639 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) in e1000_tx_map()
5642 len -= size; in e1000_tx_map()
5648 if (i == tx_ring->count) in e1000_tx_map()
5654 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; in e1000_tx_map()
5661 if (i == tx_ring->count) in e1000_tx_map()
5664 buffer_info = &tx_ring->buffer_info[i]; in e1000_tx_map()
5667 buffer_info->length = size; in e1000_tx_map()
5668 buffer_info->time_stamp = jiffies; in e1000_tx_map()
5669 buffer_info->next_to_watch = i; in e1000_tx_map()
5670 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, in e1000_tx_map()
5673 buffer_info->mapped_as_page = true; in e1000_tx_map()
5674 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) in e1000_tx_map()
5677 len -= size; in e1000_tx_map()
5683 segs = skb_shinfo(skb)->gso_segs ? : 1; in e1000_tx_map()
5685 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len; in e1000_tx_map()
5687 tx_ring->buffer_info[i].skb = skb; in e1000_tx_map()
5688 tx_ring->buffer_info[i].segs = segs; in e1000_tx_map()
5689 tx_ring->buffer_info[i].bytecount = bytecount; in e1000_tx_map()
5690 tx_ring->buffer_info[first].next_to_watch = i; in e1000_tx_map()
5695 dev_err(&pdev->dev, "Tx DMA map failed\n"); in e1000_tx_map()
5696 buffer_info->dma = 0; in e1000_tx_map()
5698 count--; in e1000_tx_map()
5700 while (count--) { in e1000_tx_map()
5702 i += tx_ring->count; in e1000_tx_map()
5703 i--; in e1000_tx_map()
5704 buffer_info = &tx_ring->buffer_info[i]; in e1000_tx_map()
5713 struct e1000_adapter *adapter = tx_ring->adapter; in e1000_tx_queue()
5746 i = tx_ring->next_to_use; in e1000_tx_queue()
5749 buffer_info = &tx_ring->buffer_info[i]; in e1000_tx_queue()
5751 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); in e1000_tx_queue()
5752 tx_desc->lower.data = cpu_to_le32(txd_lower | in e1000_tx_queue()
5753 buffer_info->length); in e1000_tx_queue()
5754 tx_desc->upper.data = cpu_to_le32(txd_upper); in e1000_tx_queue()
5757 if (i == tx_ring->count) in e1000_tx_queue()
5759 } while (--count > 0); in e1000_tx_queue()
5761 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); in e1000_tx_queue()
5763 /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */ in e1000_tx_queue()
5765 tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS)); in e1000_tx_queue()
5769 * applicable for weak-ordered memory model archs, in e1000_tx_queue()
5770 * such as IA-64). in e1000_tx_queue()
5774 tx_ring->next_to_use = i; in e1000_tx_queue()
5781 struct e1000_hw *hw = &adapter->hw; in e1000_transfer_dhcp_info()
5785 !((skb_vlan_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) && in e1000_transfer_dhcp_info()
5786 (adapter->hw.mng_cookie.status & in e1000_transfer_dhcp_info()
5790 if (skb->len <= MINIMUM_DHCP_PACKET_SIZE) in e1000_transfer_dhcp_info()
5793 if (((struct ethhdr *)skb->data)->h_proto != htons(ETH_P_IP)) in e1000_transfer_dhcp_info()
5797 const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data + 14); in e1000_transfer_dhcp_info()
5800 if (ip->protocol != IPPROTO_UDP) in e1000_transfer_dhcp_info()
5803 udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2)); in e1000_transfer_dhcp_info()
5804 if (ntohs(udp->dest) != 67) in e1000_transfer_dhcp_info()
5807 offset = (u8 *)udp + 8 - skb->data; in e1000_transfer_dhcp_info()
5808 length = skb->len - offset; in e1000_transfer_dhcp_info()
5817 struct e1000_adapter *adapter = tx_ring->adapter; in __e1000_maybe_stop_tx()
5819 netif_stop_queue(adapter->netdev); in __e1000_maybe_stop_tx()
5830 return -EBUSY; in __e1000_maybe_stop_tx()
5833 netif_start_queue(adapter->netdev); in __e1000_maybe_stop_tx()
5834 ++adapter->restart_queue; in __e1000_maybe_stop_tx()
5840 BUG_ON(size > tx_ring->count); in e1000_maybe_stop_tx()
5851 struct e1000_ring *tx_ring = adapter->tx_ring; in e1000_xmit_frame()
5862 if (test_bit(__E1000_DOWN, &adapter->state)) { in e1000_xmit_frame()
5867 if (skb->len <= 0) { in e1000_xmit_frame()
5878 mss = skb_shinfo(skb)->gso_size; in e1000_xmit_frame()
5882 /* TSO Workaround for 82571/2/3 Controllers -- if skb->data in e1000_xmit_frame()
5884 * frags into skb->data in e1000_xmit_frame()
5887 /* we do this workaround for ES2LAN, but it is un-necessary, in e1000_xmit_frame()
5890 if (skb->data_len && (hdr_len == len)) { in e1000_xmit_frame()
5893 pull_size = min_t(unsigned int, 4, skb->data_len); in e1000_xmit_frame()
5904 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL)) in e1000_xmit_frame()
5908 count += DIV_ROUND_UP(len, adapter->tx_fifo_limit); in e1000_xmit_frame()
5910 nr_frags = skb_shinfo(skb)->nr_frags; in e1000_xmit_frame()
5912 count += DIV_ROUND_UP(skb_frag_size(&skb_shinfo(skb)->frags[f]), in e1000_xmit_frame()
5913 adapter->tx_fifo_limit); in e1000_xmit_frame()
5915 if (adapter->hw.mac.tx_pkt_filtering) in e1000_xmit_frame()
5930 first = tx_ring->next_to_use; in e1000_xmit_frame()
5950 if (unlikely(skb->no_fcs)) in e1000_xmit_frame()
5954 count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit, in e1000_xmit_frame()
5957 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && in e1000_xmit_frame()
5958 (adapter->flags & FLAG_HAS_HW_TIMESTAMP)) { in e1000_xmit_frame()
5959 if (!adapter->tx_hwtstamp_skb) { in e1000_xmit_frame()
5960 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in e1000_xmit_frame()
5962 adapter->tx_hwtstamp_skb = skb_get(skb); in e1000_xmit_frame()
5963 adapter->tx_hwtstamp_start = jiffies; in e1000_xmit_frame()
5964 schedule_work(&adapter->tx_hwtstamp_work); in e1000_xmit_frame()
5966 adapter->tx_hwtstamp_skipped++; in e1000_xmit_frame()
5972 netdev_sent_queue(netdev, skb->len); in e1000_xmit_frame()
5978 adapter->tx_fifo_limit) + 2)); in e1000_xmit_frame()
5982 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) in e1000_xmit_frame()
5984 tx_ring->next_to_use); in e1000_xmit_frame()
5986 writel(tx_ring->next_to_use, tx_ring->tail); in e1000_xmit_frame()
5990 tx_ring->buffer_info[first].time_stamp = 0; in e1000_xmit_frame()
5991 tx_ring->next_to_use = first; in e1000_xmit_frame()
5998 * e1000_tx_timeout - Respond to a Tx Hang
6007 adapter->tx_timeout_count++; in e1000_tx_timeout()
6008 schedule_work(&adapter->reset_task); in e1000_tx_timeout()
6017 if (test_bit(__E1000_DOWN, &adapter->state)) in e1000_reset_task()
6020 if (!(adapter->flags & FLAG_RESTART_NOW)) { in e1000_reset_task()
6028 * e1000_get_stats64 - Get System Network Statistics
6039 spin_lock(&adapter->stats64_lock); in e1000e_get_stats64()
6042 stats->rx_bytes = adapter->stats.gorc; in e1000e_get_stats64()
6043 stats->rx_packets = adapter->stats.gprc; in e1000e_get_stats64()
6044 stats->tx_bytes = adapter->stats.gotc; in e1000e_get_stats64()
6045 stats->tx_packets = adapter->stats.gptc; in e1000e_get_stats64()
6046 stats->multicast = adapter->stats.mprc; in e1000e_get_stats64()
6047 stats->collisions = adapter->stats.colc; in e1000e_get_stats64()
6054 stats->rx_errors = adapter->stats.rxerrc + in e1000e_get_stats64()
6055 adapter->stats.crcerrs + adapter->stats.algnerrc + in e1000e_get_stats64()
6056 adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr; in e1000e_get_stats64()
6057 stats->rx_length_errors = adapter->stats.ruc + adapter->stats.roc; in e1000e_get_stats64()
6058 stats->rx_crc_errors = adapter->stats.crcerrs; in e1000e_get_stats64()
6059 stats->rx_frame_errors = adapter->stats.algnerrc; in e1000e_get_stats64()
6060 stats->rx_missed_errors = adapter->stats.mpc; in e1000e_get_stats64()
6063 stats->tx_errors = adapter->stats.ecol + adapter->stats.latecol; in e1000e_get_stats64()
6064 stats->tx_aborted_errors = adapter->stats.ecol; in e1000e_get_stats64()
6065 stats->tx_window_errors = adapter->stats.latecol; in e1000e_get_stats64()
6066 stats->tx_carrier_errors = adapter->stats.tncrs; in e1000e_get_stats64()
6070 spin_unlock(&adapter->stats64_lock); in e1000e_get_stats64()
6074 * e1000_change_mtu - Change the Maximum Transfer Unit
6087 !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) { in e1000_change_mtu()
6089 return -EINVAL; in e1000_change_mtu()
6093 if ((adapter->hw.mac.type >= e1000_pch2lan) && in e1000_change_mtu()
6094 !(adapter->flags2 & FLAG2_CRC_STRIPPING) && in e1000_change_mtu()
6097 return -EINVAL; in e1000_change_mtu()
6100 while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) in e1000_change_mtu()
6102 /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */ in e1000_change_mtu()
6103 adapter->max_frame_size = max_frame; in e1000_change_mtu()
6105 netdev->mtu, new_mtu); in e1000_change_mtu()
6106 netdev->mtu = new_mtu; in e1000_change_mtu()
6108 pm_runtime_get_sync(netdev->dev.parent); in e1000_change_mtu()
6116 * i.e. RXBUFFER_2048 --> size-4096 slab in e1000_change_mtu()
6122 adapter->rx_buffer_len = 2048; in e1000_change_mtu()
6124 adapter->rx_buffer_len = 4096; in e1000_change_mtu()
6128 adapter->rx_buffer_len = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN; in e1000_change_mtu()
6135 pm_runtime_put_sync(netdev->dev.parent); in e1000_change_mtu()
6137 clear_bit(__E1000_RESETTING, &adapter->state); in e1000_change_mtu()
6148 if (adapter->hw.phy.media_type != e1000_media_type_copper) in e1000_mii_ioctl()
6149 return -EOPNOTSUPP; in e1000_mii_ioctl()
6153 data->phy_id = adapter->hw.phy.addr; in e1000_mii_ioctl()
6158 switch (data->reg_num & 0x1F) { in e1000_mii_ioctl()
6160 data->val_out = adapter->phy_regs.bmcr; in e1000_mii_ioctl()
6163 data->val_out = adapter->phy_regs.bmsr; in e1000_mii_ioctl()
6166 data->val_out = (adapter->hw.phy.id >> 16); in e1000_mii_ioctl()
6169 data->val_out = (adapter->hw.phy.id & 0xFFFF); in e1000_mii_ioctl()
6172 data->val_out = adapter->phy_regs.advertise; in e1000_mii_ioctl()
6175 data->val_out = adapter->phy_regs.lpa; in e1000_mii_ioctl()
6178 data->val_out = adapter->phy_regs.expansion; in e1000_mii_ioctl()
6181 data->val_out = adapter->phy_regs.ctrl1000; in e1000_mii_ioctl()
6184 data->val_out = adapter->phy_regs.stat1000; in e1000_mii_ioctl()
6187 data->val_out = adapter->phy_regs.estatus; in e1000_mii_ioctl()
6190 return -EIO; in e1000_mii_ioctl()
6195 return -EOPNOTSUPP; in e1000_mii_ioctl()
6201 * e1000e_hwtstamp_ioctl - control hardware time stamping
6222 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) in e1000e_hwtstamp_set()
6223 return -EFAULT; in e1000e_hwtstamp_set()
6247 return copy_to_user(ifr->ifr_data, &config, in e1000e_hwtstamp_set()
6248 sizeof(config)) ? -EFAULT : 0; in e1000e_hwtstamp_set()
6255 return copy_to_user(ifr->ifr_data, &adapter->hwtstamp_config, in e1000e_hwtstamp_get()
6256 sizeof(adapter->hwtstamp_config)) ? -EFAULT : 0; in e1000e_hwtstamp_get()
6271 return -EOPNOTSUPP; in e1000_ioctl()
6277 struct e1000_hw *hw = &adapter->hw; in e1000_init_phy_wakeup()
6285 retval = hw->phy.ops.acquire(hw); in e1000_init_phy_wakeup()
6296 /* copy MAC MTA to PHY MTA - only needed for pchlan */ in e1000_init_phy_wakeup()
6297 for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) { in e1000_init_phy_wakeup()
6299 hw->phy.ops.write_reg_page(hw, BM_MTA(i), in e1000_init_phy_wakeup()
6301 hw->phy.ops.write_reg_page(hw, BM_MTA(i) + 1, in e1000_init_phy_wakeup()
6306 hw->phy.ops.read_reg_page(&adapter->hw, BM_RCTL, &phy_reg); in e1000_init_phy_wakeup()
6323 hw->phy.ops.write_reg_page(&adapter->hw, BM_RCTL, phy_reg); in e1000_init_phy_wakeup()
6335 hw->phy.ops.write_reg_page(&adapter->hw, BM_WUFC, wufc); in e1000_init_phy_wakeup()
6336 hw->phy.ops.write_reg_page(&adapter->hw, BM_WUC, wuc); in e1000_init_phy_wakeup()
6344 hw->phy.ops.release(hw); in e1000_init_phy_wakeup()
6353 struct e1000_hw *hw = &adapter->hw; in e1000e_flush_lpic()
6356 pm_runtime_get_sync(netdev->dev.parent); in e1000e_flush_lpic()
6358 ret_val = hw->phy.ops.acquire(hw); in e1000e_flush_lpic()
6365 hw->phy.ops.release(hw); in e1000e_flush_lpic()
6368 pm_runtime_put_sync(netdev->dev.parent); in e1000e_flush_lpic()
6374 struct e1000_hw *hw = &adapter->hw; in e1000e_s0ix_entry_flow()
6405 * Gate PPW via EXTCNF_CTRL - set 0x0F00[7] = 1 in e1000e_s0ix_entry_flow()
6506 struct e1000_hw *hw = &adapter->hw; in e1000e_s0ix_exit_flow()
6607 while (test_bit(__E1000_RESETTING, &adapter->state) && count--) in e1000e_pm_freeze()
6610 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); in e1000e_pm_freeze()
6621 e1000e_disable_pcie_master(&adapter->hw); in e1000e_pm_freeze()
6630 struct e1000_hw *hw = &adapter->hw; in __e1000_shutdown()
6637 else if (device_may_wakeup(&pdev->dev)) in __e1000_shutdown()
6638 wufc = adapter->wol; in __e1000_shutdown()
6650 /* turn on all-multi mode if wake on multicast is enabled */ in __e1000_shutdown()
6659 if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP)) in __e1000_shutdown()
6663 if (adapter->hw.phy.media_type == e1000_media_type_fiber || in __e1000_shutdown()
6664 adapter->hw.phy.media_type == in __e1000_shutdown()
6675 if (adapter->flags & FLAG_IS_ICH) in __e1000_shutdown()
6676 e1000_suspend_workarounds_ich8lan(&adapter->hw); in __e1000_shutdown()
6678 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) { in __e1000_shutdown()
6695 if (adapter->hw.phy.type == e1000_phy_igp_3) { in __e1000_shutdown()
6696 e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); in __e1000_shutdown()
6697 } else if (hw->mac.type >= e1000_pch_lpt) { in __e1000_shutdown()
6711 if ((hw->phy.type >= e1000_phy_i217) && in __e1000_shutdown()
6712 adapter->eee_advert && hw->dev_spec.ich8lan.eee_lp_ability) { in __e1000_shutdown()
6715 retval = hw->phy.ops.acquire(hw); in __e1000_shutdown()
6720 if (adapter->eee_advert & in __e1000_shutdown()
6721 hw->dev_spec.ich8lan.eee_lp_ability & in __e1000_shutdown()
6724 if (adapter->eee_advert & in __e1000_shutdown()
6725 hw->dev_spec.ich8lan.eee_lp_ability & in __e1000_shutdown()
6733 hw->phy.ops.release(hw); in __e1000_shutdown()
6743 /* The pci-e switch on some quad port adapters will report a in __e1000_shutdown()
6746 * downstream port of the pci-e switch. in __e1000_shutdown()
6752 if (adapter->flags & FLAG_IS_QUAD_PORT) { in __e1000_shutdown()
6753 struct pci_dev *us_dev = pdev->bus->self; in __e1000_shutdown()
6773 * __e1000e_disable_aspm - Disable ASPM states
6775 * @state: bit-mask of ASPM states to disable
6782 struct pci_dev *parent = pdev->bus->self; in __e1000e_disable_aspm()
6812 dev_info(&pdev->dev, "Disabling ASPM %s %s\n", in __e1000e_disable_aspm()
6824 /* Double-check ASPM control. If not disabled by the above, the in __e1000e_disable_aspm()
6846 * e1000e_disable_aspm - Disable ASPM states.
6848 * @state: bit-mask of ASPM states to disable
6861 * @state: bit-mask of ASPM states to disable
6899 struct e1000_hw *hw = &adapter->hw; in __e1000_resume()
6902 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S) in __e1000_resume()
6904 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1) in __e1000_resume()
6911 if (hw->mac.type >= e1000_pch2lan) in __e1000_resume()
6912 e1000_resume_workarounds_pchlan(&adapter->hw); in __e1000_resume()
6917 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) { in __e1000_resume()
6920 e1e_rphy(&adapter->hw, BM_WUS, &phy_data); in __e1000_resume()
6922 e_info("PHY Wakeup cause - %s\n", in __e1000_resume()
6930 e1e_wphy(&adapter->hw, BM_WUS, ~0); in __e1000_resume()
6935 e_info("MAC Wakeup cause - %s\n", in __e1000_resume()
6954 if (!(adapter->flags & FLAG_HAS_AMT)) in __e1000_resume()
6965 struct e1000_hw *hw = &adapter->hw; in e1000e_pm_suspend()
6977 if (hw->mac.type >= e1000_pch_cnp && in e1000e_pm_suspend()
6978 !e1000e_check_me(hw->adapter->pdev->device)) in e1000e_pm_suspend()
6989 struct e1000_hw *hw = &adapter->hw; in e1000e_pm_resume()
6993 if (hw->mac.type >= e1000_pch_cnp && in e1000e_pm_resume()
6994 !e1000e_check_me(hw->adapter->pdev->device)) in e1000e_pm_resume()
7010 eee_lp = adapter->hw.dev_spec.ich8lan.eee_lp_ability; in e1000e_pm_runtime_idle()
7013 adapter->hw.dev_spec.ich8lan.eee_lp_ability = eee_lp; in e1000e_pm_runtime_idle()
7017 return -EBUSY; in e1000e_pm_runtime_idle()
7031 if (netdev->flags & IFF_UP) in e1000e_pm_runtime_resume()
7043 if (netdev->flags & IFF_UP) { in e1000e_pm_runtime_suspend()
7046 while (test_bit(__E1000_RESETTING, &adapter->state) && count--) in e1000e_pm_runtime_suspend()
7049 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); in e1000e_pm_runtime_suspend()
7057 return -EBUSY; in e1000e_pm_runtime_suspend()
7067 e1000e_pm_freeze(&pdev->dev); in e1000_shutdown()
7079 if (adapter->msix_entries) { in e1000_intr_msix()
7083 msix_irq = adapter->msix_entries[vector].vector; in e1000_intr_msix()
7089 msix_irq = adapter->msix_entries[vector].vector; in e1000_intr_msix()
7095 msix_irq = adapter->msix_entries[vector].vector; in e1000_intr_msix()
7108 * Polling 'interrupt' - used by things like netconsole to send skbs
7109 * without having to re-enable interrupts. It's not called while
7116 switch (adapter->int_mode) { in e1000_netpoll()
7118 e1000_intr_msix(adapter->pdev->irq, netdev); in e1000_netpoll()
7121 if (disable_hardirq(adapter->pdev->irq)) in e1000_netpoll()
7122 e1000_intr_msi(adapter->pdev->irq, netdev); in e1000_netpoll()
7123 enable_irq(adapter->pdev->irq); in e1000_netpoll()
7126 if (disable_hardirq(adapter->pdev->irq)) in e1000_netpoll()
7127 e1000_intr(adapter->pdev->irq, netdev); in e1000_netpoll()
7128 enable_irq(adapter->pdev->irq); in e1000_netpoll()
7135 * e1000_io_error_detected - called when PCI error is detected
7145 e1000e_pm_freeze(&pdev->dev); in e1000_io_error_detected()
7157 * e1000_io_slot_reset - called after the pci bus has been reset.
7160 * Restart the card from scratch, as if from a cold-boot. Implementation
7161 * resembles the first-half of the e1000e_pm_resume routine.
7167 struct e1000_hw *hw = &adapter->hw; in e1000_io_slot_reset()
7172 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S) in e1000_io_slot_reset()
7174 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1) in e1000_io_slot_reset()
7181 dev_err(&pdev->dev, in e1000_io_slot_reset()
7182 "Cannot re-enable PCI device after reset.\n"); in e1000_io_slot_reset()
7185 pdev->state_saved = true; in e1000_io_slot_reset()
7201 * e1000_io_resume - called when traffic can start flowing again.
7206 * second-half of the e1000e_pm_resume routine.
7215 e1000e_pm_thaw(&pdev->dev); in e1000_io_resume()
7221 if (!(adapter->flags & FLAG_HAS_AMT)) in e1000_io_resume()
7227 struct e1000_hw *hw = &adapter->hw; in e1000_print_device_info()
7228 struct net_device *netdev = adapter->netdev; in e1000_print_device_info()
7235 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : in e1000_print_device_info()
7238 netdev->dev_addr); in e1000_print_device_info()
7240 (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000"); in e1000_print_device_info()
7246 hw->mac.type, hw->phy.type, pba_str); in e1000_print_device_info()
7251 struct e1000_hw *hw = &adapter->hw; in e1000_eeprom_checks()
7255 if (hw->mac.type != e1000_82573) in e1000_eeprom_checks()
7262 dev_warn(&adapter->pdev->dev, in e1000_eeprom_checks()
7271 struct e1000_hw *hw = &adapter->hw; in e1000_fix_features()
7274 if ((hw->mac.type >= e1000_pch2lan) && (netdev->mtu > ETH_DATA_LEN)) in e1000_fix_features()
7292 netdev_features_t changed = features ^ netdev->features; in e1000_set_features()
7295 adapter->flags |= FLAG_TSO_FORCE; in e1000_set_features()
7304 adapter->flags2 &= ~FLAG2_CRC_STRIPPING; in e1000_set_features()
7309 if (adapter->flags2 & FLAG2_DFLT_CRC_STRIPPING) in e1000_set_features()
7310 adapter->flags2 |= FLAG2_CRC_STRIPPING; in e1000_set_features()
7312 adapter->flags2 &= ~FLAG2_CRC_STRIPPING; in e1000_set_features()
7316 netdev->features = features; in e1000_set_features()
7349 * e1000_probe - Device Initialization Routine
7364 const struct e1000_info *ei = e1000_info_tbl[ent->driver_data]; in e1000_probe()
7374 if (ei->flags2 & FLAG2_DISABLE_ASPM_L0S) in e1000_probe()
7376 if (ei->flags2 & FLAG2_DISABLE_ASPM_L1) in e1000_probe()
7386 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); in e1000_probe()
7390 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); in e1000_probe()
7392 dev_err(&pdev->dev, in e1000_probe()
7413 err = -ENOMEM; in e1000_probe()
7418 SET_NETDEV_DEV(netdev, &pdev->dev); in e1000_probe()
7420 netdev->irq = pdev->irq; in e1000_probe()
7424 hw = &adapter->hw; in e1000_probe()
7425 adapter->netdev = netdev; in e1000_probe()
7426 adapter->pdev = pdev; in e1000_probe()
7427 adapter->ei = ei; in e1000_probe()
7428 adapter->pba = ei->pba; in e1000_probe()
7429 adapter->flags = ei->flags; in e1000_probe()
7430 adapter->flags2 = ei->flags2; in e1000_probe()
7431 adapter->hw.adapter = adapter; in e1000_probe()
7432 adapter->hw.mac.type = ei->mac; in e1000_probe()
7433 adapter->max_hw_frame_size = ei->max_hw_frame_size; in e1000_probe()
7434 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); in e1000_probe()
7439 err = -EIO; in e1000_probe()
7440 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len); in e1000_probe()
7441 if (!adapter->hw.hw_addr) in e1000_probe()
7444 if ((adapter->flags & FLAG_HAS_FLASH) && in e1000_probe()
7446 (hw->mac.type < e1000_pch_spt)) { in e1000_probe()
7449 adapter->hw.flash_address = ioremap(flash_start, flash_len); in e1000_probe()
7450 if (!adapter->hw.flash_address) in e1000_probe()
7455 if (adapter->flags2 & FLAG2_HAS_EEE) in e1000_probe()
7456 adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T; in e1000_probe()
7459 netdev->netdev_ops = &e1000e_netdev_ops; in e1000_probe()
7461 netdev->watchdog_timeo = 5 * HZ; in e1000_probe()
7462 netif_napi_add(netdev, &adapter->napi, e1000e_poll, 64); in e1000_probe()
7463 strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); in e1000_probe()
7465 netdev->mem_start = mmio_start; in e1000_probe()
7466 netdev->mem_end = mmio_start + mmio_len; in e1000_probe()
7468 adapter->bd_number = cards_found++; in e1000_probe()
7477 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); in e1000_probe()
7478 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops)); in e1000_probe()
7479 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); in e1000_probe()
7481 err = ei->get_variants(adapter); in e1000_probe()
7485 if ((adapter->flags & FLAG_IS_ICH) && in e1000_probe()
7486 (adapter->flags & FLAG_READ_ONLY_NVM) && in e1000_probe()
7487 (hw->mac.type < e1000_pch_spt)) in e1000_probe()
7488 e1000e_write_protect_nvm_ich8lan(&adapter->hw); in e1000_probe()
7490 hw->mac.ops.get_bus_info(&adapter->hw); in e1000_probe()
7492 adapter->hw.phy.autoneg_wait_to_complete = 0; in e1000_probe()
7495 if (adapter->hw.phy.media_type == e1000_media_type_copper) { in e1000_probe()
7496 adapter->hw.phy.mdix = AUTO_ALL_MODES; in e1000_probe()
7497 adapter->hw.phy.disable_polarity_correction = 0; in e1000_probe()
7498 adapter->hw.phy.ms_type = e1000_ms_hw_default; in e1000_probe()
7501 if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw)) in e1000_probe()
7502 dev_info(&pdev->dev, in e1000_probe()
7506 netdev->features = (NETIF_F_SG | in e1000_probe()
7515 /* Set user-changeable features (subset of all device features) */ in e1000_probe()
7516 netdev->hw_features = netdev->features; in e1000_probe()
7517 netdev->hw_features |= NETIF_F_RXFCS; in e1000_probe()
7518 netdev->priv_flags |= IFF_SUPP_NOFCS; in e1000_probe()
7519 netdev->hw_features |= NETIF_F_RXALL; in e1000_probe()
7521 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) in e1000_probe()
7522 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; in e1000_probe()
7524 netdev->vlan_features |= (NETIF_F_SG | in e1000_probe()
7529 netdev->priv_flags |= IFF_UNICAST_FLT; in e1000_probe()
7532 netdev->features |= NETIF_F_HIGHDMA; in e1000_probe()
7533 netdev->vlan_features |= NETIF_F_HIGHDMA; in e1000_probe()
7536 /* MTU range: 68 - max_hw_frame_size */ in e1000_probe()
7537 netdev->min_mtu = ETH_MIN_MTU; in e1000_probe()
7538 netdev->max_mtu = adapter->max_hw_frame_size - in e1000_probe()
7541 if (e1000e_enable_mng_pass_thru(&adapter->hw)) in e1000_probe()
7542 adapter->flags |= FLAG_MNG_PT_ENABLED; in e1000_probe()
7547 adapter->hw.mac.ops.reset_hw(&adapter->hw); in e1000_probe()
7553 if (e1000_validate_nvm_checksum(&adapter->hw) >= 0) in e1000_probe()
7556 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n"); in e1000_probe()
7557 err = -EIO; in e1000_probe()
7565 if (e1000e_read_mac_addr(&adapter->hw)) in e1000_probe()
7566 dev_err(&pdev->dev, in e1000_probe()
7569 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); in e1000_probe()
7571 if (!is_valid_ether_addr(netdev->dev_addr)) { in e1000_probe()
7572 dev_err(&pdev->dev, "Invalid MAC Address: %pM\n", in e1000_probe()
7573 netdev->dev_addr); in e1000_probe()
7574 err = -EIO; in e1000_probe()
7578 timer_setup(&adapter->watchdog_timer, e1000_watchdog, 0); in e1000_probe()
7579 timer_setup(&adapter->phy_info_timer, e1000_update_phy_info, 0); in e1000_probe()
7581 INIT_WORK(&adapter->reset_task, e1000_reset_task); in e1000_probe()
7582 INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task); in e1000_probe()
7583 INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround); in e1000_probe()
7584 INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task); in e1000_probe()
7585 INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang); in e1000_probe()
7588 adapter->hw.mac.autoneg = 1; in e1000_probe()
7589 adapter->fc_autoneg = true; in e1000_probe()
7590 adapter->hw.fc.requested_mode = e1000_fc_default; in e1000_probe()
7591 adapter->hw.fc.current_mode = e1000_fc_default; in e1000_probe()
7592 adapter->hw.phy.autoneg_advertised = 0x2f; in e1000_probe()
7594 /* Initial Wake on LAN setting - If APM wake is enabled in in e1000_probe()
7597 if (adapter->flags & FLAG_APME_IN_WUC) { in e1000_probe()
7601 if ((hw->mac.type > e1000_ich10lan) && in e1000_probe()
7603 adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP; in e1000_probe()
7604 } else if (adapter->flags & FLAG_APME_IN_CTRL3) { in e1000_probe()
7605 if (adapter->flags & FLAG_APME_CHECK_PORT_B && in e1000_probe()
7606 (adapter->hw.bus.func == 1)) in e1000_probe()
7607 ret_val = e1000_read_nvm(&adapter->hw, in e1000_probe()
7611 ret_val = e1000_read_nvm(&adapter->hw, in e1000_probe()
7620 adapter->eeprom_wol |= E1000_WUFC_MAG; in e1000_probe()
7626 if (!(adapter->flags & FLAG_HAS_WOL)) in e1000_probe()
7627 adapter->eeprom_wol = 0; in e1000_probe()
7630 adapter->wol = adapter->eeprom_wol; in e1000_probe()
7633 if (adapter->wol || (adapter->flags & FLAG_MNG_PT_ENABLED) || in e1000_probe()
7634 (hw->mac.ops.check_mng_mode(hw))) in e1000_probe()
7635 device_wakeup_enable(&pdev->dev); in e1000_probe()
7638 ret_val = e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers); in e1000_probe()
7642 adapter->eeprom_vers = 0; in e1000_probe()
7655 if (!(adapter->flags & FLAG_HAS_AMT)) in e1000_probe()
7658 strlcpy(netdev->name, "eth%d", sizeof(netdev->name)); in e1000_probe()
7668 dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE); in e1000_probe()
7670 if (pci_dev_run_wake(pdev) && hw->mac.type < e1000_pch_cnp) in e1000_probe()
7671 pm_runtime_put_noidle(&pdev->dev); in e1000_probe()
7676 if (!(adapter->flags & FLAG_HAS_AMT)) in e1000_probe()
7679 if (hw->phy.ops.check_reset_block && !hw->phy.ops.check_reset_block(hw)) in e1000_probe()
7680 e1000_phy_hw_reset(&adapter->hw); in e1000_probe()
7682 kfree(adapter->tx_ring); in e1000_probe()
7683 kfree(adapter->rx_ring); in e1000_probe()
7685 if ((adapter->hw.flash_address) && (hw->mac.type < e1000_pch_spt)) in e1000_probe()
7686 iounmap(adapter->hw.flash_address); in e1000_probe()
7689 iounmap(adapter->hw.hw_addr); in e1000_probe()
7701 * e1000_remove - Device Removal Routine
7706 * Hot-Plug event, or because the driver is going to be removed from
7719 set_bit(__E1000_DOWN, &adapter->state); in e1000_remove()
7720 del_timer_sync(&adapter->watchdog_timer); in e1000_remove()
7721 del_timer_sync(&adapter->phy_info_timer); in e1000_remove()
7723 cancel_work_sync(&adapter->reset_task); in e1000_remove()
7724 cancel_work_sync(&adapter->watchdog_task); in e1000_remove()
7725 cancel_work_sync(&adapter->downshift_task); in e1000_remove()
7726 cancel_work_sync(&adapter->update_phy_task); in e1000_remove()
7727 cancel_work_sync(&adapter->print_hang_task); in e1000_remove()
7729 if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) { in e1000_remove()
7730 cancel_work_sync(&adapter->tx_hwtstamp_work); in e1000_remove()
7731 if (adapter->tx_hwtstamp_skb) { in e1000_remove()
7732 dev_consume_skb_any(adapter->tx_hwtstamp_skb); in e1000_remove()
7733 adapter->tx_hwtstamp_skb = NULL; in e1000_remove()
7740 pm_runtime_get_noresume(&pdev->dev); in e1000_remove()
7748 kfree(adapter->tx_ring); in e1000_remove()
7749 kfree(adapter->rx_ring); in e1000_remove()
7751 iounmap(adapter->hw.hw_addr); in e1000_remove()
7752 if ((adapter->hw.flash_address) && in e1000_remove()
7753 (adapter->hw.mac.type < e1000_pch_spt)) in e1000_remove()
7754 iounmap(adapter->hw.flash_address); in e1000_remove()
7918 * e1000_init_module - Driver Registration Routine
7926 pr_info("Copyright(c) 1999 - 2015 Intel Corporation.\n"); in e1000_init_module()
7933 * e1000_exit_module - Driver Exit Cleanup Routine