Lines Matching +full:eee +full:- +full:broken +full:- +full:100 +full:tx
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2007 - 2018 Intel Corporation. */
57 "Copyright (c) 2007-2014 Intel Corporation.";
235 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
240 static int debug = -1;
268 /* TX Registers */
285 /* igb_regdump - register printout routine */
292 switch (reginfo->ofs) { in igb_regdump()
342 pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs)); in igb_regdump()
346 snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]"); in igb_regdump()
347 pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1], in igb_regdump()
351 /* igb_dump - Print registers, Tx-rings and Rx-rings */
354 struct net_device *netdev = adapter->netdev; in igb_dump()
355 struct e1000_hw *hw = &adapter->hw; in igb_dump()
370 dev_info(&adapter->pdev->dev, "Net device Info\n"); in igb_dump()
372 pr_info("%-15s %016lX %016lX\n", netdev->name, in igb_dump()
373 netdev->state, dev_trans_start(netdev)); in igb_dump()
377 dev_info(&adapter->pdev->dev, "Register Dump\n"); in igb_dump()
380 reginfo->name; reginfo++) { in igb_dump()
384 /* Print TX Ring Summary */ in igb_dump()
388 dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); in igb_dump()
389 pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n"); in igb_dump()
390 for (n = 0; n < adapter->num_tx_queues; n++) { in igb_dump()
392 tx_ring = adapter->tx_ring[n]; in igb_dump()
393 buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; in igb_dump()
395 n, tx_ring->next_to_use, tx_ring->next_to_clean, in igb_dump()
398 buffer_info->next_to_watch, in igb_dump()
399 (u64)buffer_info->time_stamp); in igb_dump()
402 /* Print TX Rings */ in igb_dump()
406 dev_info(&adapter->pdev->dev, "TX Rings Dump\n"); in igb_dump()
411 * +--------------------------------------------------------------+ in igb_dump()
413 * +--------------------------------------------------------------+ in igb_dump()
415 * +--------------------------------------------------------------+ in igb_dump()
419 for (n = 0; n < adapter->num_tx_queues; n++) { in igb_dump()
420 tx_ring = adapter->tx_ring[n]; in igb_dump()
421 pr_info("------------------------------------\n"); in igb_dump()
422 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index); in igb_dump()
423 pr_info("------------------------------------\n"); in igb_dump()
424 …fo("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] [bi->dma ] leng ntw timestamp bi-… in igb_dump()
426 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { in igb_dump()
430 buffer_info = &tx_ring->tx_buffer_info[i]; in igb_dump()
432 if (i == tx_ring->next_to_use && in igb_dump()
433 i == tx_ring->next_to_clean) in igb_dump()
435 else if (i == tx_ring->next_to_use) in igb_dump()
437 else if (i == tx_ring->next_to_clean) in igb_dump()
443 i, le64_to_cpu(u0->a), in igb_dump()
444 le64_to_cpu(u0->b), in igb_dump()
447 buffer_info->next_to_watch, in igb_dump()
448 (u64)buffer_info->time_stamp, in igb_dump()
449 buffer_info->skb, next_desc); in igb_dump()
451 if (netif_msg_pktdata(adapter) && buffer_info->skb) in igb_dump()
454 16, 1, buffer_info->skb->data, in igb_dump()
462 dev_info(&adapter->pdev->dev, "RX Rings Summary\n"); in igb_dump()
464 for (n = 0; n < adapter->num_rx_queues; n++) { in igb_dump()
465 rx_ring = adapter->rx_ring[n]; in igb_dump()
467 n, rx_ring->next_to_use, rx_ring->next_to_clean); in igb_dump()
474 dev_info(&adapter->pdev->dev, "RX Rings Dump\n"); in igb_dump()
478 * +-----------------------------------------------------+ in igb_dump()
480 * +----------------------------------------------+------+ in igb_dump()
482 * +-----------------------------------------------------+ in igb_dump()
485 * Advanced Receive Descriptor (Write-Back) Format in igb_dump()
488 * +------------------------------------------------------+ in igb_dump()
491 * +------------------------------------------------------+ in igb_dump()
493 * +------------------------------------------------------+ in igb_dump()
497 for (n = 0; n < adapter->num_rx_queues; n++) { in igb_dump()
498 rx_ring = adapter->rx_ring[n]; in igb_dump()
499 pr_info("------------------------------------\n"); in igb_dump()
500 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index); in igb_dump()
501 pr_info("------------------------------------\n"); in igb_dump()
502 …pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] [bi->dma ] [bi->skb] <-- Adv Rx Re… in igb_dump()
503 …nfo("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-… in igb_dump()
505 for (i = 0; i < rx_ring->count; i++) { in igb_dump()
508 buffer_info = &rx_ring->rx_buffer_info[i]; in igb_dump()
511 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); in igb_dump()
513 if (i == rx_ring->next_to_use) in igb_dump()
515 else if (i == rx_ring->next_to_clean) in igb_dump()
522 pr_info("%s[0x%03X] %016llX %016llX ---------------- %s\n", in igb_dump()
524 le64_to_cpu(u0->a), in igb_dump()
525 le64_to_cpu(u0->b), in igb_dump()
530 le64_to_cpu(u0->a), in igb_dump()
531 le64_to_cpu(u0->b), in igb_dump()
532 (u64)buffer_info->dma, in igb_dump()
536 buffer_info->dma && buffer_info->page) { in igb_dump()
540 page_address(buffer_info->page) + in igb_dump()
541 buffer_info->page_offset, in igb_dump()
553 * igb_get_i2c_data - Reads the I2C SDA data bit
561 struct e1000_hw *hw = &adapter->hw; in igb_get_i2c_data()
568 * igb_set_i2c_data - Sets the I2C data bit
577 struct e1000_hw *hw = &adapter->hw; in igb_set_i2c_data()
593 * igb_set_i2c_clk - Sets the I2C SCL clock
602 struct e1000_hw *hw = &adapter->hw; in igb_set_i2c_clk()
617 * igb_get_i2c_clk - Gets the I2C SCL clock state
625 struct e1000_hw *hw = &adapter->hw; in igb_get_i2c_clk()
641 * igb_get_hw_dev - return device
648 struct igb_adapter *adapter = hw->back; in igb_get_hw_dev()
649 return adapter->netdev; in igb_get_hw_dev()
653 * igb_init_module - Driver Registration Routine
675 * igb_exit_module - Driver Exit Cleanup Routine
692 * igb_cache_ring_register - Descriptor ring to register mapping
695 * Once we know the feature-set enabled for the device, we'll cache
701 u32 rbase_offset = adapter->vfs_allocated_count; in igb_cache_ring_register()
703 switch (adapter->hw.mac.type) { in igb_cache_ring_register()
710 if (adapter->vfs_allocated_count) { in igb_cache_ring_register()
711 for (; i < adapter->rss_queues; i++) in igb_cache_ring_register()
712 adapter->rx_ring[i]->reg_idx = rbase_offset + in igb_cache_ring_register()
723 for (; i < adapter->num_rx_queues; i++) in igb_cache_ring_register()
724 adapter->rx_ring[i]->reg_idx = rbase_offset + i; in igb_cache_ring_register()
725 for (; j < adapter->num_tx_queues; j++) in igb_cache_ring_register()
726 adapter->tx_ring[j]->reg_idx = rbase_offset + j; in igb_cache_ring_register()
734 u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr); in igb_rd32()
744 struct net_device *netdev = igb->netdev; in igb_rd32()
745 hw->hw_addr = NULL; in igb_rd32()
747 WARN(pci_device_is_present(igb->pdev), in igb_rd32()
755 * igb_write_ivar - configure ivar for given MSI-X vector
763 * each containing an cause allocation for an Rx and Tx ring, and a
780 #define IGB_N0_QUEUE -1
783 struct igb_adapter *adapter = q_vector->adapter; in igb_assign_vector()
784 struct e1000_hw *hw = &adapter->hw; in igb_assign_vector()
789 if (q_vector->rx.ring) in igb_assign_vector()
790 rx_queue = q_vector->rx.ring->reg_idx; in igb_assign_vector()
791 if (q_vector->tx.ring) in igb_assign_vector()
792 tx_queue = q_vector->tx.ring->reg_idx; in igb_assign_vector()
794 switch (hw->mac.type) { in igb_assign_vector()
805 if (!(adapter->flags & IGB_FLAG_HAS_MSIX) && msix_vector == 0) in igb_assign_vector()
808 q_vector->eims_value = msixbm; in igb_assign_vector()
812 * with 8 rows. The ordering is column-major so we use the in igb_assign_vector()
824 q_vector->eims_value = BIT(msix_vector); in igb_assign_vector()
832 * however instead of ordering column-major we have things in igb_assign_vector()
833 * ordered row-major. So we traverse the table by using in igb_assign_vector()
845 q_vector->eims_value = BIT(msix_vector); in igb_assign_vector()
853 adapter->eims_enable_mask |= q_vector->eims_value; in igb_assign_vector()
856 q_vector->set_itr = 1; in igb_assign_vector()
860 * igb_configure_msix - Configure MSI-X hardware
864 * generate MSI-X interrupts.
870 struct e1000_hw *hw = &adapter->hw; in igb_configure_msix()
872 adapter->eims_enable_mask = 0; in igb_configure_msix()
875 switch (hw->mac.type) { in igb_configure_msix()
878 /* enable MSI-X PBA support*/ in igb_configure_msix()
881 /* Auto-Mask interrupts upon ICR read. */ in igb_configure_msix()
889 adapter->eims_other = E1000_EIMS_OTHER; in igb_configure_msix()
899 /* Turn on MSI-X capability first, or our settings in igb_configure_msix()
907 adapter->eims_other = BIT(vector); in igb_configure_msix()
913 /* do nothing, since nothing else supports MSI-X */ in igb_configure_msix()
915 } /* switch (hw->mac.type) */ in igb_configure_msix()
917 adapter->eims_enable_mask |= adapter->eims_other; in igb_configure_msix()
919 for (i = 0; i < adapter->num_q_vectors; i++) in igb_configure_msix()
920 igb_assign_vector(adapter->q_vector[i], vector++); in igb_configure_msix()
926 * igb_request_msix - Initialize MSI-X interrupts
929 * igb_request_msix allocates MSI-X vectors and requests interrupts from the
934 struct net_device *netdev = adapter->netdev; in igb_request_msix()
937 err = request_irq(adapter->msix_entries[vector].vector, in igb_request_msix()
938 igb_msix_other, 0, netdev->name, adapter); in igb_request_msix()
942 for (i = 0; i < adapter->num_q_vectors; i++) { in igb_request_msix()
943 struct igb_q_vector *q_vector = adapter->q_vector[i]; in igb_request_msix()
947 q_vector->itr_register = adapter->io_addr + E1000_EITR(vector); in igb_request_msix()
949 if (q_vector->rx.ring && q_vector->tx.ring) in igb_request_msix()
950 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name, in igb_request_msix()
951 q_vector->rx.ring->queue_index); in igb_request_msix()
952 else if (q_vector->tx.ring) in igb_request_msix()
953 sprintf(q_vector->name, "%s-tx-%u", netdev->name, in igb_request_msix()
954 q_vector->tx.ring->queue_index); in igb_request_msix()
955 else if (q_vector->rx.ring) in igb_request_msix()
956 sprintf(q_vector->name, "%s-rx-%u", netdev->name, in igb_request_msix()
957 q_vector->rx.ring->queue_index); in igb_request_msix()
959 sprintf(q_vector->name, "%s-unused", netdev->name); in igb_request_msix()
961 err = request_irq(adapter->msix_entries[vector].vector, in igb_request_msix()
962 igb_msix_ring, 0, q_vector->name, in igb_request_msix()
973 free_irq(adapter->msix_entries[free_vector++].vector, adapter); in igb_request_msix()
975 vector--; in igb_request_msix()
977 free_irq(adapter->msix_entries[free_vector++].vector, in igb_request_msix()
978 adapter->q_vector[i]); in igb_request_msix()
985 * igb_free_q_vector - Free memory allocated for specific interrupt vector
993 struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; in igb_free_q_vector()
995 adapter->q_vector[v_idx] = NULL; in igb_free_q_vector()
1005 * igb_reset_q_vector - Reset config for interrupt vector
1014 struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; in igb_reset_q_vector()
1022 if (q_vector->tx.ring) in igb_reset_q_vector()
1023 adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL; in igb_reset_q_vector()
1025 if (q_vector->rx.ring) in igb_reset_q_vector()
1026 adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL; in igb_reset_q_vector()
1028 netif_napi_del(&q_vector->napi); in igb_reset_q_vector()
1034 int v_idx = adapter->num_q_vectors; in igb_reset_interrupt_capability()
1036 if (adapter->flags & IGB_FLAG_HAS_MSIX) in igb_reset_interrupt_capability()
1037 pci_disable_msix(adapter->pdev); in igb_reset_interrupt_capability()
1038 else if (adapter->flags & IGB_FLAG_HAS_MSI) in igb_reset_interrupt_capability()
1039 pci_disable_msi(adapter->pdev); in igb_reset_interrupt_capability()
1041 while (v_idx--) in igb_reset_interrupt_capability()
1046 * igb_free_q_vectors - Free memory allocated for interrupt vectors
1055 int v_idx = adapter->num_q_vectors; in igb_free_q_vectors()
1057 adapter->num_tx_queues = 0; in igb_free_q_vectors()
1058 adapter->num_rx_queues = 0; in igb_free_q_vectors()
1059 adapter->num_q_vectors = 0; in igb_free_q_vectors()
1061 while (v_idx--) { in igb_free_q_vectors()
1068 * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
1071 * This function resets the device so that it has 0 Rx queues, Tx queues, and
1072 * MSI-X interrupts allocated.
1081 * igb_set_interrupt_capability - set MSI or MSI-X if supported
1095 adapter->flags |= IGB_FLAG_HAS_MSIX; in igb_set_interrupt_capability()
1098 adapter->num_rx_queues = adapter->rss_queues; in igb_set_interrupt_capability()
1099 if (adapter->vfs_allocated_count) in igb_set_interrupt_capability()
1100 adapter->num_tx_queues = 1; in igb_set_interrupt_capability()
1102 adapter->num_tx_queues = adapter->rss_queues; in igb_set_interrupt_capability()
1105 numvecs = adapter->num_rx_queues; in igb_set_interrupt_capability()
1107 /* if Tx handler is separate add 1 for every Tx queue */ in igb_set_interrupt_capability()
1108 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) in igb_set_interrupt_capability()
1109 numvecs += adapter->num_tx_queues; in igb_set_interrupt_capability()
1112 adapter->num_q_vectors = numvecs; in igb_set_interrupt_capability()
1117 adapter->msix_entries[i].entry = i; in igb_set_interrupt_capability()
1119 err = pci_enable_msix_range(adapter->pdev, in igb_set_interrupt_capability()
1120 adapter->msix_entries, in igb_set_interrupt_capability()
1128 /* If we can't do MSI-X, try MSI */ in igb_set_interrupt_capability()
1130 adapter->flags &= ~IGB_FLAG_HAS_MSIX; in igb_set_interrupt_capability()
1132 /* disable SR-IOV for non MSI-X configurations */ in igb_set_interrupt_capability()
1133 if (adapter->vf_data) { in igb_set_interrupt_capability()
1134 struct e1000_hw *hw = &adapter->hw; in igb_set_interrupt_capability()
1136 pci_disable_sriov(adapter->pdev); in igb_set_interrupt_capability()
1139 kfree(adapter->vf_mac_list); in igb_set_interrupt_capability()
1140 adapter->vf_mac_list = NULL; in igb_set_interrupt_capability()
1141 kfree(adapter->vf_data); in igb_set_interrupt_capability()
1142 adapter->vf_data = NULL; in igb_set_interrupt_capability()
1145 msleep(100); in igb_set_interrupt_capability()
1146 dev_info(&adapter->pdev->dev, "IOV Disabled\n"); in igb_set_interrupt_capability()
1149 adapter->vfs_allocated_count = 0; in igb_set_interrupt_capability()
1150 adapter->rss_queues = 1; in igb_set_interrupt_capability()
1151 adapter->flags |= IGB_FLAG_QUEUE_PAIRS; in igb_set_interrupt_capability()
1152 adapter->num_rx_queues = 1; in igb_set_interrupt_capability()
1153 adapter->num_tx_queues = 1; in igb_set_interrupt_capability()
1154 adapter->num_q_vectors = 1; in igb_set_interrupt_capability()
1155 if (!pci_enable_msi(adapter->pdev)) in igb_set_interrupt_capability()
1156 adapter->flags |= IGB_FLAG_HAS_MSI; in igb_set_interrupt_capability()
1162 head->ring = ring; in igb_add_ring()
1163 head->count++; in igb_add_ring()
1167 * igb_alloc_q_vector - Allocate memory for a single interrupt vector
1171 * @txr_count: total number of Tx rings to allocate
1172 * @txr_idx: index of first Tx ring to allocate
1176 * We allocate one q_vector. If allocation fails we return -ENOMEM.
1188 /* igb only supports 1 Tx and/or 1 Rx queue per vector */ in igb_alloc_q_vector()
1190 return -ENOMEM; in igb_alloc_q_vector()
1196 q_vector = adapter->q_vector[v_idx]; in igb_alloc_q_vector()
1206 return -ENOMEM; in igb_alloc_q_vector()
1209 netif_napi_add(adapter->netdev, &q_vector->napi, in igb_alloc_q_vector()
1213 adapter->q_vector[v_idx] = q_vector; in igb_alloc_q_vector()
1214 q_vector->adapter = adapter; in igb_alloc_q_vector()
1217 q_vector->tx.work_limit = adapter->tx_work_limit; in igb_alloc_q_vector()
1220 q_vector->itr_register = adapter->io_addr + E1000_EITR(0); in igb_alloc_q_vector()
1221 q_vector->itr_val = IGB_START_ITR; in igb_alloc_q_vector()
1224 ring = q_vector->ring; in igb_alloc_q_vector()
1228 /* rx or rx/tx vector */ in igb_alloc_q_vector()
1229 if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3) in igb_alloc_q_vector()
1230 q_vector->itr_val = adapter->rx_itr_setting; in igb_alloc_q_vector()
1232 /* tx only vector */ in igb_alloc_q_vector()
1233 if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3) in igb_alloc_q_vector()
1234 q_vector->itr_val = adapter->tx_itr_setting; in igb_alloc_q_vector()
1239 ring->dev = &adapter->pdev->dev; in igb_alloc_q_vector()
1240 ring->netdev = adapter->netdev; in igb_alloc_q_vector()
1243 ring->q_vector = q_vector; in igb_alloc_q_vector()
1245 /* update q_vector Tx values */ in igb_alloc_q_vector()
1246 igb_add_ring(ring, &q_vector->tx); in igb_alloc_q_vector()
1249 if (adapter->hw.mac.type == e1000_82575) in igb_alloc_q_vector()
1250 set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags); in igb_alloc_q_vector()
1252 /* apply Tx specific ring traits */ in igb_alloc_q_vector()
1253 ring->count = adapter->tx_ring_count; in igb_alloc_q_vector()
1254 ring->queue_index = txr_idx; in igb_alloc_q_vector()
1256 ring->cbs_enable = false; in igb_alloc_q_vector()
1257 ring->idleslope = 0; in igb_alloc_q_vector()
1258 ring->sendslope = 0; in igb_alloc_q_vector()
1259 ring->hicredit = 0; in igb_alloc_q_vector()
1260 ring->locredit = 0; in igb_alloc_q_vector()
1262 u64_stats_init(&ring->tx_syncp); in igb_alloc_q_vector()
1263 u64_stats_init(&ring->tx_syncp2); in igb_alloc_q_vector()
1266 adapter->tx_ring[txr_idx] = ring; in igb_alloc_q_vector()
1274 ring->dev = &adapter->pdev->dev; in igb_alloc_q_vector()
1275 ring->netdev = adapter->netdev; in igb_alloc_q_vector()
1278 ring->q_vector = q_vector; in igb_alloc_q_vector()
1281 igb_add_ring(ring, &q_vector->rx); in igb_alloc_q_vector()
1284 if (adapter->hw.mac.type >= e1000_82576) in igb_alloc_q_vector()
1285 set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags); in igb_alloc_q_vector()
1288 * have the tag byte-swapped. in igb_alloc_q_vector()
1290 if (adapter->hw.mac.type >= e1000_i350) in igb_alloc_q_vector()
1291 set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags); in igb_alloc_q_vector()
1294 ring->count = adapter->rx_ring_count; in igb_alloc_q_vector()
1295 ring->queue_index = rxr_idx; in igb_alloc_q_vector()
1297 u64_stats_init(&ring->rx_syncp); in igb_alloc_q_vector()
1300 adapter->rx_ring[rxr_idx] = ring; in igb_alloc_q_vector()
1308 * igb_alloc_q_vectors - Allocate memory for interrupt vectors
1312 * return -ENOMEM.
1316 int q_vectors = adapter->num_q_vectors; in igb_alloc_q_vectors()
1317 int rxr_remaining = adapter->num_rx_queues; in igb_alloc_q_vectors()
1318 int txr_remaining = adapter->num_tx_queues; in igb_alloc_q_vectors()
1331 rxr_remaining--; in igb_alloc_q_vectors()
1337 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); in igb_alloc_q_vectors()
1338 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); in igb_alloc_q_vectors()
1347 rxr_remaining -= rqpv; in igb_alloc_q_vectors()
1348 txr_remaining -= tqpv; in igb_alloc_q_vectors()
1356 adapter->num_tx_queues = 0; in igb_alloc_q_vectors()
1357 adapter->num_rx_queues = 0; in igb_alloc_q_vectors()
1358 adapter->num_q_vectors = 0; in igb_alloc_q_vectors()
1360 while (v_idx--) in igb_alloc_q_vectors()
1363 return -ENOMEM; in igb_alloc_q_vectors()
1367 * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
1375 struct pci_dev *pdev = adapter->pdev; in igb_init_interrupt_scheme()
1382 dev_err(&pdev->dev, "Unable to allocate memory for vectors\n"); in igb_init_interrupt_scheme()
1396 * igb_request_irq - initialize interrupts
1404 struct net_device *netdev = adapter->netdev; in igb_request_irq()
1405 struct pci_dev *pdev = adapter->pdev; in igb_request_irq()
1408 if (adapter->flags & IGB_FLAG_HAS_MSIX) { in igb_request_irq()
1426 igb_assign_vector(adapter->q_vector[0], 0); in igb_request_irq()
1428 if (adapter->flags & IGB_FLAG_HAS_MSI) { in igb_request_irq()
1429 err = request_irq(pdev->irq, igb_intr_msi, 0, in igb_request_irq()
1430 netdev->name, adapter); in igb_request_irq()
1436 adapter->flags &= ~IGB_FLAG_HAS_MSI; in igb_request_irq()
1439 err = request_irq(pdev->irq, igb_intr, IRQF_SHARED, in igb_request_irq()
1440 netdev->name, adapter); in igb_request_irq()
1443 dev_err(&pdev->dev, "Error %d getting interrupt\n", in igb_request_irq()
1452 if (adapter->flags & IGB_FLAG_HAS_MSIX) { in igb_free_irq()
1455 free_irq(adapter->msix_entries[vector++].vector, adapter); in igb_free_irq()
1457 for (i = 0; i < adapter->num_q_vectors; i++) in igb_free_irq()
1458 free_irq(adapter->msix_entries[vector++].vector, in igb_free_irq()
1459 adapter->q_vector[i]); in igb_free_irq()
1461 free_irq(adapter->pdev->irq, adapter); in igb_free_irq()
1466 * igb_irq_disable - Mask off interrupt generation on the NIC
1471 struct e1000_hw *hw = &adapter->hw; in igb_irq_disable()
1477 if (adapter->flags & IGB_FLAG_HAS_MSIX) { in igb_irq_disable()
1480 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask); in igb_irq_disable()
1481 wr32(E1000_EIMC, adapter->eims_enable_mask); in igb_irq_disable()
1483 wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask); in igb_irq_disable()
1489 if (adapter->flags & IGB_FLAG_HAS_MSIX) { in igb_irq_disable()
1492 for (i = 0; i < adapter->num_q_vectors; i++) in igb_irq_disable()
1493 synchronize_irq(adapter->msix_entries[i].vector); in igb_irq_disable()
1495 synchronize_irq(adapter->pdev->irq); in igb_irq_disable()
1500 * igb_irq_enable - Enable default interrupt generation settings
1505 struct e1000_hw *hw = &adapter->hw; in igb_irq_enable()
1507 if (adapter->flags & IGB_FLAG_HAS_MSIX) { in igb_irq_enable()
1511 wr32(E1000_EIAC, regval | adapter->eims_enable_mask); in igb_irq_enable()
1513 wr32(E1000_EIAM, regval | adapter->eims_enable_mask); in igb_irq_enable()
1514 wr32(E1000_EIMS, adapter->eims_enable_mask); in igb_irq_enable()
1515 if (adapter->vfs_allocated_count) { in igb_irq_enable()
1530 struct e1000_hw *hw = &adapter->hw; in igb_update_mng_vlan()
1531 u16 pf_id = adapter->vfs_allocated_count; in igb_update_mng_vlan()
1532 u16 vid = adapter->hw.mng_cookie.vlan_id; in igb_update_mng_vlan()
1533 u16 old_vid = adapter->mng_vlan_id; in igb_update_mng_vlan()
1535 if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { in igb_update_mng_vlan()
1538 adapter->mng_vlan_id = vid; in igb_update_mng_vlan()
1540 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE; in igb_update_mng_vlan()
1545 !test_bit(old_vid, adapter->active_vlans)) { in igb_update_mng_vlan()
1552 * igb_release_hw_control - release control of the h/w to f/w
1561 struct e1000_hw *hw = &adapter->hw; in igb_release_hw_control()
1571 * igb_get_hw_control - get control of the h/w from f/w
1580 struct e1000_hw *hw = &adapter->hw; in igb_get_hw_control()
1591 struct net_device *netdev = adapter->netdev; in enable_fqtss()
1592 struct e1000_hw *hw = &adapter->hw; in enable_fqtss()
1594 WARN_ON(hw->mac.type != e1000_i210); in enable_fqtss()
1597 adapter->flags |= IGB_FLAG_FQTSS; in enable_fqtss()
1599 adapter->flags &= ~IGB_FLAG_FQTSS; in enable_fqtss()
1602 schedule_work(&adapter->reset_task); in enable_fqtss()
1607 return (adapter->flags & IGB_FLAG_FQTSS) ? true : false; in is_fqtss_enabled()
1615 WARN_ON(hw->mac.type != e1000_i210); in set_tx_desc_fetch_prio()
1632 WARN_ON(hw->mac.type != e1000_i210); in set_queue_mode()
1649 for (i = 0; i < adapter->num_tx_queues; i++) { in is_any_cbs_enabled()
1650 if (adapter->tx_ring[i]->cbs_enable) in is_any_cbs_enabled()
1661 for (i = 0; i < adapter->num_tx_queues; i++) { in is_any_txtime_enabled()
1662 if (adapter->tx_ring[i]->launchtime_enable) in is_any_txtime_enabled()
1670 * igb_config_tx_modes - Configure "Qav Tx mode" features on igb
1675 * Parameters are retrieved from the correct Tx ring, so
1681 struct igb_ring *ring = adapter->tx_ring[queue]; in igb_config_tx_modes()
1682 struct net_device *netdev = adapter->netdev; in igb_config_tx_modes()
1683 struct e1000_hw *hw = &adapter->hw; in igb_config_tx_modes()
1687 WARN_ON(hw->mac.type != e1000_i210); in igb_config_tx_modes()
1694 if (ring->cbs_enable || ring->launchtime_enable) { in igb_config_tx_modes()
1703 if (ring->cbs_enable || queue == 0) { in igb_config_tx_modes()
1713 if (queue == 0 && !ring->cbs_enable) { in igb_config_tx_modes()
1715 ring->idleslope = 1000000; in igb_config_tx_modes()
1716 ring->hicredit = ETH_FRAME_LEN; in igb_config_tx_modes()
1719 /* Always set data transfer arbitration to credit-based in igb_config_tx_modes()
1731 * For 100 Mbps link speed: in igb_config_tx_modes()
1740 * Note that 'link-speed' is in Mbps. in igb_config_tx_modes()
1742 * value = BW * 0x7735 * 2 * link-speed in igb_config_tx_modes()
1743 * -------------- (E3) in igb_config_tx_modes()
1752 * ----------------- (E4) in igb_config_tx_modes()
1753 * link-speed * 1000 in igb_config_tx_modes()
1759 * value = idleSlope * 0x7735 * 2 * link-speed in igb_config_tx_modes()
1760 * ----------------- -------------- (E5) in igb_config_tx_modes()
1761 * link-speed * 1000 1000 in igb_config_tx_modes()
1763 * 'link-speed' is present in both sides of the fraction so in igb_config_tx_modes()
1767 * ----------------- (E6) in igb_config_tx_modes()
1784 value = DIV_ROUND_UP_ULL(ring->idleslope * 61034ULL, 1000000); in igb_config_tx_modes()
1792 0x80000000 + ring->hicredit * 0x7735); in igb_config_tx_modes()
1815 if (ring->launchtime_enable) { in igb_config_tx_modes()
1819 * - LaunchTime will be enabled for all SR queues. in igb_config_tx_modes()
1820 * - A fixed offset can be added relative to the launch in igb_config_tx_modes()
1846 …netdev_dbg(netdev, "Qav Tx mode: cbs %s, launchtime %s, queue %d idleslope %d sendslope %d hiCredi… in igb_config_tx_modes()
1847 ring->cbs_enable ? "enabled" : "disabled", in igb_config_tx_modes()
1848 ring->launchtime_enable ? "enabled" : "disabled", in igb_config_tx_modes()
1850 ring->idleslope, ring->sendslope, in igb_config_tx_modes()
1851 ring->hicredit, ring->locredit); in igb_config_tx_modes()
1859 if (queue < 0 || queue > adapter->num_tx_queues) in igb_save_txtime_params()
1860 return -EINVAL; in igb_save_txtime_params()
1862 ring = adapter->tx_ring[queue]; in igb_save_txtime_params()
1863 ring->launchtime_enable = enable; in igb_save_txtime_params()
1874 if (queue < 0 || queue > adapter->num_tx_queues) in igb_save_cbs_params()
1875 return -EINVAL; in igb_save_cbs_params()
1877 ring = adapter->tx_ring[queue]; in igb_save_cbs_params()
1879 ring->cbs_enable = enable; in igb_save_cbs_params()
1880 ring->idleslope = idleslope; in igb_save_cbs_params()
1881 ring->sendslope = sendslope; in igb_save_cbs_params()
1882 ring->hicredit = hicredit; in igb_save_cbs_params()
1883 ring->locredit = locredit; in igb_save_cbs_params()
1889 * igb_setup_tx_mode - Switch to/from Qav Tx mode when applicable
1892 * Configure TQAVCTRL register switching the controller's Tx mode
1895 * Tx parameters are applied.
1899 struct net_device *netdev = adapter->netdev; in igb_setup_tx_mode()
1900 struct e1000_hw *hw = &adapter->hw; in igb_setup_tx_mode()
1904 if (hw->mac.type != e1000_i210) in igb_setup_tx_mode()
1919 /* Configure Tx and Rx packet buffers sizes as described in in igb_setup_tx_mode()
1942 * reason, we set set MAX_ TPKT_SIZE to (4kB - 1) / 64. in igb_setup_tx_mode()
1944 val = (4096 - 1) / 64; in igb_setup_tx_mode()
1952 max_queue = (adapter->num_tx_queues < I210_SR_QUEUES_NUM) ? in igb_setup_tx_mode()
1953 adapter->num_tx_queues : I210_SR_QUEUES_NUM; in igb_setup_tx_mode()
1977 * igb_configure - configure the hardware for RX and TX
1982 struct net_device *netdev = adapter->netdev; in igb_configure()
1999 igb_rx_fifo_flush_82575(&adapter->hw); in igb_configure()
2005 for (i = 0; i < adapter->num_rx_queues; i++) { in igb_configure()
2006 struct igb_ring *ring = adapter->rx_ring[i]; in igb_configure()
2012 * igb_power_up_link - Power up the phy/serdes link
2017 igb_reset_phy(&adapter->hw); in igb_power_up_link()
2019 if (adapter->hw.phy.media_type == e1000_media_type_copper) in igb_power_up_link()
2020 igb_power_up_phy_copper(&adapter->hw); in igb_power_up_link()
2022 igb_power_up_serdes_link_82575(&adapter->hw); in igb_power_up_link()
2024 igb_setup_link(&adapter->hw); in igb_power_up_link()
2028 * igb_power_down_link - Power down the phy/serdes link
2033 if (adapter->hw.phy.media_type == e1000_media_type_copper) in igb_power_down_link()
2034 igb_power_down_phy_copper_82575(&adapter->hw); in igb_power_down_link()
2036 igb_shutdown_serdes_link_82575(&adapter->hw); in igb_power_down_link()
2045 struct e1000_hw *hw = &adapter->hw; in igb_check_swap_media()
2056 if ((hw->phy.media_type == e1000_media_type_copper) && in igb_check_swap_media()
2059 } else if ((hw->phy.media_type != e1000_media_type_copper) && in igb_check_swap_media()
2062 if (adapter->copper_tries < 4) { in igb_check_swap_media()
2063 adapter->copper_tries++; in igb_check_swap_media()
2068 adapter->copper_tries = 0; in igb_check_swap_media()
2081 switch (hw->phy.media_type) { in igb_check_swap_media()
2083 netdev_info(adapter->netdev, in igb_check_swap_media()
2087 adapter->flags |= IGB_FLAG_MEDIA_RESET; in igb_check_swap_media()
2088 adapter->copper_tries = 0; in igb_check_swap_media()
2092 netdev_info(adapter->netdev, in igb_check_swap_media()
2096 adapter->flags |= IGB_FLAG_MEDIA_RESET; in igb_check_swap_media()
2100 netdev_err(adapter->netdev, in igb_check_swap_media()
2108 * igb_up - Open the interface and prepare it to handle traffic
2113 struct e1000_hw *hw = &adapter->hw; in igb_up()
2119 clear_bit(__IGB_DOWN, &adapter->state); in igb_up()
2121 for (i = 0; i < adapter->num_q_vectors; i++) in igb_up()
2122 napi_enable(&(adapter->q_vector[i]->napi)); in igb_up()
2124 if (adapter->flags & IGB_FLAG_HAS_MSIX) in igb_up()
2127 igb_assign_vector(adapter->q_vector[0], 0); in igb_up()
2135 if (adapter->vfs_allocated_count) { in igb_up()
2142 netif_tx_start_all_queues(adapter->netdev); in igb_up()
2145 hw->mac.get_link_status = 1; in igb_up()
2146 schedule_work(&adapter->watchdog_task); in igb_up()
2148 if ((adapter->flags & IGB_FLAG_EEE) && in igb_up()
2149 (!hw->dev_spec._82575.eee_disable)) in igb_up()
2150 adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T; in igb_up()
2157 struct net_device *netdev = adapter->netdev; in igb_down()
2158 struct e1000_hw *hw = &adapter->hw; in igb_down()
2165 set_bit(__IGB_DOWN, &adapter->state); in igb_down()
2187 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE; in igb_down()
2189 for (i = 0; i < adapter->num_q_vectors; i++) { in igb_down()
2190 if (adapter->q_vector[i]) { in igb_down()
2191 napi_synchronize(&adapter->q_vector[i]->napi); in igb_down()
2192 napi_disable(&adapter->q_vector[i]->napi); in igb_down()
2196 del_timer_sync(&adapter->watchdog_timer); in igb_down()
2197 del_timer_sync(&adapter->phy_info_timer); in igb_down()
2200 spin_lock(&adapter->stats64_lock); in igb_down()
2202 spin_unlock(&adapter->stats64_lock); in igb_down()
2204 adapter->link_speed = 0; in igb_down()
2205 adapter->link_duplex = 0; in igb_down()
2207 if (!pci_channel_offline(adapter->pdev)) in igb_down()
2211 adapter->flags &= ~IGB_FLAG_VLAN_PROMISC; in igb_down()
2224 while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) in igb_reinit_locked()
2228 clear_bit(__IGB_RESETTING, &adapter->state); in igb_reinit_locked()
2231 /** igb_enable_mas - Media Autosense re-enable after swap
2237 struct e1000_hw *hw = &adapter->hw; in igb_enable_mas()
2241 if ((hw->phy.media_type == e1000_media_type_copper) && in igb_enable_mas()
2252 struct pci_dev *pdev = adapter->pdev; in igb_reset()
2253 struct e1000_hw *hw = &adapter->hw; in igb_reset()
2254 struct e1000_mac_info *mac = &hw->mac; in igb_reset()
2255 struct e1000_fc_info *fc = &hw->fc; in igb_reset()
2261 switch (mac->type) { in igb_reset()
2280 if (mac->type == e1000_82575) { in igb_reset()
2283 /* write Rx PBA so that hardware can report correct Tx PBA */ in igb_reset()
2286 /* To maintain wire speed transmits, the Tx FIFO should be in igb_reset()
2295 /* The Tx FIFO also stores 16 bytes of information about the Tx in igb_reset()
2300 min_tx_space = adapter->max_frame_size; in igb_reset()
2301 min_tx_space += sizeof(union e1000_adv_tx_desc) - ETH_FCS_LEN; in igb_reset()
2304 /* upper 16 bits has Tx packet buffer allocation size in KB */ in igb_reset()
2305 needed_tx_space = min_tx_space - (rd32(E1000_PBA) >> 16); in igb_reset()
2307 /* If current Tx allocation is less than the min Tx FIFO size, in igb_reset()
2308 * and the min Tx FIFO size is less than the current Rx FIFO in igb_reset()
2312 pba -= needed_tx_space; in igb_reset()
2314 /* if short on Rx space, Rx wins and must trump Tx in igb_reset()
2331 * - the full Rx FIFO size minus one full Tx plus one full Rx frame in igb_reset()
2333 hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE); in igb_reset()
2335 fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */ in igb_reset()
2336 fc->low_water = fc->high_water - 16; in igb_reset()
2337 fc->pause_time = 0xFFFF; in igb_reset()
2338 fc->send_xon = 1; in igb_reset()
2339 fc->current_mode = fc->requested_mode; in igb_reset()
2342 if (adapter->vfs_allocated_count) { in igb_reset()
2345 for (i = 0 ; i < adapter->vfs_allocated_count; i++) in igb_reset()
2346 adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC; in igb_reset()
2357 hw->mac.ops.reset_hw(hw); in igb_reset()
2360 if (adapter->flags & IGB_FLAG_MEDIA_RESET) { in igb_reset()
2362 adapter->ei.get_invariants(hw); in igb_reset()
2363 adapter->flags &= ~IGB_FLAG_MEDIA_RESET; in igb_reset()
2365 if ((mac->type == e1000_82575 || mac->type == e1000_i350) && in igb_reset()
2366 (adapter->flags & IGB_FLAG_MAS_ENABLE)) { in igb_reset()
2369 if (hw->mac.ops.init_hw(hw)) in igb_reset()
2370 dev_err(&pdev->dev, "Hardware Error\n"); in igb_reset()
2374 __dev_uc_unsync(adapter->netdev, NULL); in igb_reset()
2382 if (!hw->mac.autoneg) in igb_reset()
2387 /* Re-initialize the thermal sensor on i350 devices. */ in igb_reset()
2388 if (!test_bit(__IGB_DOWN, &adapter->state)) { in igb_reset()
2389 if (mac->type == e1000_i350 && hw->bus.func == 0) { in igb_reset()
2390 /* If present, re-initialize the external thermal sensor in igb_reset()
2393 if (adapter->ets) in igb_reset()
2394 mac->ops.init_thermal_sensor_thresh(hw); in igb_reset()
2398 /* Re-establish EEE setting */ in igb_reset()
2399 if (hw->phy.media_type == e1000_media_type_copper) { in igb_reset()
2400 switch (mac->type) { in igb_reset()
2413 if (!netif_running(adapter->netdev)) in igb_reset()
2421 /* Re-enable PTP, where applicable. */ in igb_reset()
2422 if (adapter->ptp_flags & IGB_PTP_ENABLED) in igb_reset()
2431 /* Since there is no support for separate Rx/Tx vlan accel in igb_fix_features()
2432 * enable/disable make sure Tx flag is always in same state as Rx. in igb_fix_features()
2445 netdev_features_t changed = netdev->features ^ features; in igb_set_features()
2458 spin_lock(&adapter->nfc_lock); in igb_set_features()
2460 &adapter->nfc_filter_list, nfc_node) { in igb_set_features()
2462 hlist_del(&rule->nfc_node); in igb_set_features()
2465 spin_unlock(&adapter->nfc_lock); in igb_set_features()
2466 adapter->nfc_filter_count = 0; in igb_set_features()
2469 netdev->features = features; in igb_set_features()
2488 int vfn = adapter->vfs_allocated_count; in igb_ndo_fdb_add()
2491 return -ENOMEM; in igb_ndo_fdb_add()
2507 mac_hdr_len = skb_network_header(skb) - skb->data; in igb_features_check()
2516 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); in igb_features_check()
2527 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) in igb_features_check()
2549 struct e1000_hw *hw = &adapter->hw; in igb_offload_cbs()
2553 if (hw->mac.type != e1000_i210) in igb_offload_cbs()
2554 return -EOPNOTSUPP; in igb_offload_cbs()
2557 if (qopt->queue < 0 || qopt->queue > 1) in igb_offload_cbs()
2558 return -EINVAL; in igb_offload_cbs()
2560 err = igb_save_cbs_params(adapter, qopt->queue, qopt->enable, in igb_offload_cbs()
2561 qopt->idleslope, qopt->sendslope, in igb_offload_cbs()
2562 qopt->hicredit, qopt->locredit); in igb_offload_cbs()
2566 igb_offload_apply(adapter, qopt->queue); in igb_offload_cbs()
2580 struct flow_dissector *dissector = rule->match.dissector; in igb_parse_cls_flower()
2581 struct netlink_ext_ack *extack = f->common.extack; in igb_parse_cls_flower()
2583 if (dissector->used_keys & in igb_parse_cls_flower()
2590 return -EOPNOTSUPP; in igb_parse_cls_flower()
2597 if (!is_zero_ether_addr(match.mask->dst)) { in igb_parse_cls_flower()
2598 if (!is_broadcast_ether_addr(match.mask->dst)) { in igb_parse_cls_flower()
2600 return -EINVAL; in igb_parse_cls_flower()
2603 input->filter.match_flags |= in igb_parse_cls_flower()
2605 ether_addr_copy(input->filter.dst_addr, match.key->dst); in igb_parse_cls_flower()
2608 if (!is_zero_ether_addr(match.mask->src)) { in igb_parse_cls_flower()
2609 if (!is_broadcast_ether_addr(match.mask->src)) { in igb_parse_cls_flower()
2611 return -EINVAL; in igb_parse_cls_flower()
2614 input->filter.match_flags |= in igb_parse_cls_flower()
2616 ether_addr_copy(input->filter.src_addr, match.key->src); in igb_parse_cls_flower()
2624 if (match.mask->n_proto) { in igb_parse_cls_flower()
2625 if (match.mask->n_proto != ETHER_TYPE_FULL_MASK) { in igb_parse_cls_flower()
2627 return -EINVAL; in igb_parse_cls_flower()
2630 input->filter.match_flags |= IGB_FILTER_FLAG_ETHER_TYPE; in igb_parse_cls_flower()
2631 input->filter.etype = match.key->n_proto; in igb_parse_cls_flower()
2639 if (match.mask->vlan_priority) { in igb_parse_cls_flower()
2640 if (match.mask->vlan_priority != VLAN_PRIO_FULL_MASK) { in igb_parse_cls_flower()
2642 return -EINVAL; in igb_parse_cls_flower()
2645 input->filter.match_flags |= IGB_FILTER_FLAG_VLAN_TCI; in igb_parse_cls_flower()
2646 input->filter.vlan_tci = match.key->vlan_priority; in igb_parse_cls_flower()
2650 input->action = traffic_class; in igb_parse_cls_flower()
2651 input->cookie = f->cookie; in igb_parse_cls_flower()
2659 struct netlink_ext_ack *extack = cls_flower->common.extack; in igb_configure_clsflower()
2663 tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid); in igb_configure_clsflower()
2666 return -EINVAL; in igb_configure_clsflower()
2671 return -ENOMEM; in igb_configure_clsflower()
2677 spin_lock(&adapter->nfc_lock); in igb_configure_clsflower()
2679 hlist_for_each_entry(f, &adapter->nfc_filter_list, nfc_node) { in igb_configure_clsflower()
2680 if (!memcmp(&f->filter, &filter->filter, sizeof(f->filter))) { in igb_configure_clsflower()
2681 err = -EEXIST; in igb_configure_clsflower()
2688 hlist_for_each_entry(f, &adapter->cls_flower_list, nfc_node) { in igb_configure_clsflower()
2689 if (!memcmp(&f->filter, &filter->filter, sizeof(f->filter))) { in igb_configure_clsflower()
2690 err = -EEXIST; in igb_configure_clsflower()
2703 hlist_add_head(&filter->nfc_node, &adapter->cls_flower_list); in igb_configure_clsflower()
2705 spin_unlock(&adapter->nfc_lock); in igb_configure_clsflower()
2710 spin_unlock(&adapter->nfc_lock); in igb_configure_clsflower()
2724 spin_lock(&adapter->nfc_lock); in igb_delete_clsflower()
2726 hlist_for_each_entry(filter, &adapter->cls_flower_list, nfc_node) in igb_delete_clsflower()
2727 if (filter->cookie == cls_flower->cookie) in igb_delete_clsflower()
2731 err = -ENOENT; in igb_delete_clsflower()
2739 hlist_del(&filter->nfc_node); in igb_delete_clsflower()
2743 spin_unlock(&adapter->nfc_lock); in igb_delete_clsflower()
2751 switch (cls_flower->command) { in igb_setup_tc_cls_flower()
2757 return -EOPNOTSUPP; in igb_setup_tc_cls_flower()
2759 return -EOPNOTSUPP; in igb_setup_tc_cls_flower()
2768 if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data)) in igb_setup_tc_block_cb()
2769 return -EOPNOTSUPP; in igb_setup_tc_block_cb()
2776 return -EOPNOTSUPP; in igb_setup_tc_block_cb()
2783 struct e1000_hw *hw = &adapter->hw; in igb_offload_txtime()
2787 if (hw->mac.type != e1000_i210) in igb_offload_txtime()
2788 return -EOPNOTSUPP; in igb_offload_txtime()
2791 if (qopt->queue < 0 || qopt->queue > 1) in igb_offload_txtime()
2792 return -EINVAL; in igb_offload_txtime()
2794 err = igb_save_txtime_params(adapter, qopt->queue, qopt->enable); in igb_offload_txtime()
2798 igb_offload_apply(adapter, qopt->queue); in igb_offload_txtime()
2823 return -EOPNOTSUPP; in igb_setup_tc()
2829 int i, frame_size = dev->mtu + IGB_ETH_PKT_HDR_PAD; in igb_xdp_setup()
2831 struct bpf_prog *prog = bpf->prog, *old_prog; in igb_xdp_setup()
2836 for (i = 0; i < adapter->num_rx_queues; i++) { in igb_xdp_setup()
2837 struct igb_ring *ring = adapter->rx_ring[i]; in igb_xdp_setup()
2840 NL_SET_ERR_MSG_MOD(bpf->extack, in igb_xdp_setup()
2844 return -EINVAL; in igb_xdp_setup()
2848 old_prog = xchg(&adapter->xdp_prog, prog); in igb_xdp_setup()
2855 for (i = 0; i < adapter->num_rx_queues; i++) in igb_xdp_setup()
2856 (void)xchg(&adapter->rx_ring[i]->xdp_prog, in igb_xdp_setup()
2857 adapter->xdp_prog); in igb_xdp_setup()
2875 switch (xdp->command) { in igb_xdp()
2879 return -EINVAL; in igb_xdp()
2889 writel(ring->next_to_use, ring->tail); in igb_xdp_ring_update_tail()
2896 if (r_idx >= adapter->num_tx_queues) in igb_xdp_tx_queue_mapping()
2897 r_idx = r_idx % adapter->num_tx_queues; in igb_xdp_tx_queue_mapping()
2899 return adapter->tx_ring[r_idx]; in igb_xdp_tx_queue_mapping()
2913 /* During program transitions its possible adapter->xdp_prog is assigned in igb_xdp_xmit_back()
2916 tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL; in igb_xdp_xmit_back()
2923 nq->trans_start = jiffies; in igb_xdp_xmit_back()
2940 if (unlikely(test_bit(__IGB_DOWN, &adapter->state))) in igb_xdp_xmit()
2941 return -ENETDOWN; in igb_xdp_xmit()
2944 return -EINVAL; in igb_xdp_xmit()
2946 /* During program transitions its possible adapter->xdp_prog is assigned in igb_xdp_xmit()
2949 tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL; in igb_xdp_xmit()
2951 return -ENXIO; in igb_xdp_xmit()
2957 nq->trans_start = jiffies; in igb_xdp_xmit()
2975 return n - drops; in igb_xdp_xmit()
3007 * igb_set_fw_version - Configure version string for ethtool
3012 struct e1000_hw *hw = &adapter->hw; in igb_set_fw_version()
3017 switch (hw->mac.type) { in igb_set_fw_version()
3021 snprintf(adapter->fw_version, in igb_set_fw_version()
3022 sizeof(adapter->fw_version), in igb_set_fw_version()
3023 "%2d.%2d-%d", in igb_set_fw_version()
3032 snprintf(adapter->fw_version, in igb_set_fw_version()
3033 sizeof(adapter->fw_version), in igb_set_fw_version()
3039 snprintf(adapter->fw_version, in igb_set_fw_version()
3040 sizeof(adapter->fw_version), in igb_set_fw_version()
3044 snprintf(adapter->fw_version, in igb_set_fw_version()
3045 sizeof(adapter->fw_version), in igb_set_fw_version()
3054 * igb_init_mas - init Media Autosense feature if enabled in the NVM
3060 struct e1000_hw *hw = &adapter->hw; in igb_init_mas()
3063 hw->nvm.ops.read(hw, NVM_COMPAT, 1, &eeprom_data); in igb_init_mas()
3064 switch (hw->bus.func) { in igb_init_mas()
3067 adapter->flags |= IGB_FLAG_MAS_ENABLE; in igb_init_mas()
3068 netdev_info(adapter->netdev, in igb_init_mas()
3070 hw->bus.func); in igb_init_mas()
3075 adapter->flags |= IGB_FLAG_MAS_ENABLE; in igb_init_mas()
3076 netdev_info(adapter->netdev, in igb_init_mas()
3078 hw->bus.func); in igb_init_mas()
3083 adapter->flags |= IGB_FLAG_MAS_ENABLE; in igb_init_mas()
3084 netdev_info(adapter->netdev, in igb_init_mas()
3086 hw->bus.func); in igb_init_mas()
3091 adapter->flags |= IGB_FLAG_MAS_ENABLE; in igb_init_mas()
3092 netdev_info(adapter->netdev, in igb_init_mas()
3094 hw->bus.func); in igb_init_mas()
3099 netdev_err(adapter->netdev, in igb_init_mas()
3106 * igb_init_i2c - Init I2C interface
3114 if (adapter->hw.mac.type != e1000_i350) in igb_init_i2c()
3121 adapter->i2c_adap.owner = THIS_MODULE; in igb_init_i2c()
3122 adapter->i2c_algo = igb_i2c_algo; in igb_init_i2c()
3123 adapter->i2c_algo.data = adapter; in igb_init_i2c()
3124 adapter->i2c_adap.algo_data = &adapter->i2c_algo; in igb_init_i2c()
3125 adapter->i2c_adap.dev.parent = &adapter->pdev->dev; in igb_init_i2c()
3126 strlcpy(adapter->i2c_adap.name, "igb BB", in igb_init_i2c()
3127 sizeof(adapter->i2c_adap.name)); in igb_init_i2c()
3128 status = i2c_bit_add_bus(&adapter->i2c_adap); in igb_init_i2c()
3133 * igb_probe - Device Initialization Routine
3151 const struct e1000_info *ei = igb_info_tbl[ent->driver_data]; in igb_probe()
3155 /* Catch broken hardware that put the wrong VF device ID in in igb_probe()
3156 * the PCIe SR-IOV capability. in igb_probe()
3158 if (pdev->is_virtfn) { in igb_probe()
3160 pci_name(pdev), pdev->vendor, pdev->device); in igb_probe()
3161 return -EINVAL; in igb_probe()
3169 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); in igb_probe()
3173 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); in igb_probe()
3175 dev_err(&pdev->dev, in igb_probe()
3190 err = -ENOMEM; in igb_probe()
3196 SET_NETDEV_DEV(netdev, &pdev->dev); in igb_probe()
3200 adapter->netdev = netdev; in igb_probe()
3201 adapter->pdev = pdev; in igb_probe()
3202 hw = &adapter->hw; in igb_probe()
3203 hw->back = adapter; in igb_probe()
3204 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); in igb_probe()
3206 err = -EIO; in igb_probe()
3207 adapter->io_addr = pci_iomap(pdev, 0, 0); in igb_probe()
3208 if (!adapter->io_addr) in igb_probe()
3210 /* hw->hw_addr can be altered, we'll use adapter->io_addr for unmap */ in igb_probe()
3211 hw->hw_addr = adapter->io_addr; in igb_probe()
3213 netdev->netdev_ops = &igb_netdev_ops; in igb_probe()
3215 netdev->watchdog_timeo = 5 * HZ; in igb_probe()
3217 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); in igb_probe()
3219 netdev->mem_start = pci_resource_start(pdev, 0); in igb_probe()
3220 netdev->mem_end = pci_resource_end(pdev, 0); in igb_probe()
3223 hw->vendor_id = pdev->vendor; in igb_probe()
3224 hw->device_id = pdev->device; in igb_probe()
3225 hw->revision_id = pdev->revision; in igb_probe()
3226 hw->subsystem_vendor_id = pdev->subsystem_vendor; in igb_probe()
3227 hw->subsystem_device_id = pdev->subsystem_device; in igb_probe()
3230 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); in igb_probe()
3231 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); in igb_probe()
3232 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops)); in igb_probe()
3233 /* Initialize skew-specific constants */ in igb_probe()
3234 err = ei->get_invariants(hw); in igb_probe()
3245 hw->phy.autoneg_wait_to_complete = false; in igb_probe()
3248 if (hw->phy.media_type == e1000_media_type_copper) { in igb_probe()
3249 hw->phy.mdix = AUTO_ALL_MODES; in igb_probe()
3250 hw->phy.disable_polarity_correction = false; in igb_probe()
3251 hw->phy.ms_type = e1000_ms_hw_default; in igb_probe()
3255 dev_info(&pdev->dev, in igb_probe()
3262 netdev->features |= NETIF_F_SG | in igb_probe()
3269 if (hw->mac.type >= e1000_82576) in igb_probe()
3270 netdev->features |= NETIF_F_SCTP_CRC | NETIF_F_GSO_UDP_L4; in igb_probe()
3272 if (hw->mac.type >= e1000_i350) in igb_probe()
3273 netdev->features |= NETIF_F_HW_TC; in igb_probe()
3282 netdev->gso_partial_features = IGB_GSO_PARTIAL_FEATURES; in igb_probe()
3283 netdev->features |= NETIF_F_GSO_PARTIAL | IGB_GSO_PARTIAL_FEATURES; in igb_probe()
3286 netdev->hw_features |= netdev->features | in igb_probe()
3291 if (hw->mac.type >= e1000_i350) in igb_probe()
3292 netdev->hw_features |= NETIF_F_NTUPLE; in igb_probe()
3295 netdev->features |= NETIF_F_HIGHDMA; in igb_probe()
3297 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; in igb_probe()
3298 netdev->mpls_features |= NETIF_F_HW_CSUM; in igb_probe()
3299 netdev->hw_enc_features |= netdev->vlan_features; in igb_probe()
3302 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | in igb_probe()
3306 netdev->priv_flags |= IFF_SUPP_NOFCS; in igb_probe()
3308 netdev->priv_flags |= IFF_UNICAST_FLT; in igb_probe()
3310 /* MTU range: 68 - 9216 */ in igb_probe()
3311 netdev->min_mtu = ETH_MIN_MTU; in igb_probe()
3312 netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE; in igb_probe()
3314 adapter->en_mng_pt = igb_enable_mng_pass_thru(hw); in igb_probe()
3319 hw->mac.ops.reset_hw(hw); in igb_probe()
3324 switch (hw->mac.type) { in igb_probe()
3328 if (hw->nvm.ops.validate(hw) < 0) { in igb_probe()
3329 dev_err(&pdev->dev, in igb_probe()
3331 err = -EIO; in igb_probe()
3337 if (hw->nvm.ops.validate(hw) < 0) { in igb_probe()
3338 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n"); in igb_probe()
3339 err = -EIO; in igb_probe()
3345 if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) { in igb_probe()
3347 if (hw->mac.ops.read_mac_addr(hw)) in igb_probe()
3348 dev_err(&pdev->dev, "NVM Read Error\n"); in igb_probe()
3351 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len); in igb_probe()
3353 if (!is_valid_ether_addr(netdev->dev_addr)) { in igb_probe()
3354 dev_err(&pdev->dev, "Invalid MAC Address\n"); in igb_probe()
3355 err = -EIO; in igb_probe()
3361 /* get firmware version for ethtool -i */ in igb_probe()
3365 if (hw->mac.type == e1000_i210) { in igb_probe()
3370 timer_setup(&adapter->watchdog_timer, igb_watchdog, 0); in igb_probe()
3371 timer_setup(&adapter->phy_info_timer, igb_update_phy_info, 0); in igb_probe()
3373 INIT_WORK(&adapter->reset_task, igb_reset_task); in igb_probe()
3374 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task); in igb_probe()
3376 /* Initialize link properties that are user-changeable */ in igb_probe()
3377 adapter->fc_autoneg = true; in igb_probe()
3378 hw->mac.autoneg = true; in igb_probe()
3379 hw->phy.autoneg_advertised = 0x2f; in igb_probe()
3381 hw->fc.requested_mode = e1000_fc_default; in igb_probe()
3382 hw->fc.current_mode = e1000_fc_default; in igb_probe()
3387 if (hw->bus.func == 0) in igb_probe()
3388 adapter->flags |= IGB_FLAG_WOL_SUPPORTED; in igb_probe()
3390 /* Check the NVM for wake support on non-port A ports */ in igb_probe()
3391 if (hw->mac.type >= e1000_82580) in igb_probe()
3392 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + in igb_probe()
3393 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, in igb_probe()
3395 else if (hw->bus.func == 1) in igb_probe()
3396 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); in igb_probe()
3399 adapter->flags |= IGB_FLAG_WOL_SUPPORTED; in igb_probe()
3405 switch (pdev->device) { in igb_probe()
3407 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; in igb_probe()
3416 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; in igb_probe()
3422 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; in igb_probe()
3424 adapter->flags |= IGB_FLAG_QUAD_PORT_A; in igb_probe()
3431 if (!device_can_wakeup(&adapter->pdev->dev)) in igb_probe()
3432 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; in igb_probe()
3436 if (adapter->flags & IGB_FLAG_WOL_SUPPORTED) in igb_probe()
3437 adapter->wol |= E1000_WUFC_MAG; in igb_probe()
3440 if ((hw->mac.type == e1000_i350) && in igb_probe()
3441 (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)) { in igb_probe()
3442 adapter->flags |= IGB_FLAG_WOL_SUPPORTED; in igb_probe()
3443 adapter->wol = 0; in igb_probe()
3449 if (((hw->mac.type == e1000_i350) || in igb_probe()
3450 (hw->mac.type == e1000_i354)) && in igb_probe()
3451 (pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)) { in igb_probe()
3452 adapter->flags |= IGB_FLAG_WOL_SUPPORTED; in igb_probe()
3453 adapter->wol = 0; in igb_probe()
3455 if (hw->mac.type == e1000_i350) { in igb_probe()
3456 if (((pdev->subsystem_device == 0x5001) || in igb_probe()
3457 (pdev->subsystem_device == 0x5002)) && in igb_probe()
3458 (hw->bus.func == 0)) { in igb_probe()
3459 adapter->flags |= IGB_FLAG_WOL_SUPPORTED; in igb_probe()
3460 adapter->wol = 0; in igb_probe()
3462 if (pdev->subsystem_device == 0x1F52) in igb_probe()
3463 adapter->flags |= IGB_FLAG_WOL_SUPPORTED; in igb_probe()
3466 device_set_wakeup_enable(&adapter->pdev->dev, in igb_probe()
3467 adapter->flags & IGB_FLAG_WOL_SUPPORTED); in igb_probe()
3475 dev_err(&pdev->dev, "failed to init i2c interface\n"); in igb_probe()
3484 strcpy(netdev->name, "eth%d"); in igb_probe()
3493 if (dca_add_requester(&pdev->dev) == 0) { in igb_probe()
3494 adapter->flags |= IGB_FLAG_DCA_ENABLED; in igb_probe()
3495 dev_info(&pdev->dev, "DCA enabled\n"); in igb_probe()
3502 if (hw->mac.type == e1000_i350 && hw->bus.func == 0) { in igb_probe()
3508 hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_word); in igb_probe()
3510 adapter->ets = true; in igb_probe()
3512 adapter->ets = false; in igb_probe()
3514 dev_err(&pdev->dev, in igb_probe()
3517 adapter->ets = false; in igb_probe()
3521 adapter->ei = *ei; in igb_probe()
3522 if (hw->dev_spec._82575.mas_capable) in igb_probe()
3528 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n"); in igb_probe()
3530 if (hw->mac.type != e1000_i354) { in igb_probe()
3531 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n", in igb_probe()
3532 netdev->name, in igb_probe()
3533 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" : in igb_probe()
3534 (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" : in igb_probe()
3536 ((hw->bus.width == e1000_bus_width_pcie_x4) ? in igb_probe()
3538 (hw->bus.width == e1000_bus_width_pcie_x2) ? in igb_probe()
3540 (hw->bus.width == e1000_bus_width_pcie_x1) ? in igb_probe()
3541 "Width x1" : "unknown"), netdev->dev_addr); in igb_probe()
3544 if ((hw->mac.type == e1000_82576 && in igb_probe()
3546 (hw->mac.type >= e1000_i210 || in igb_probe()
3551 ret_val = -E1000_ERR_INVM_VALUE_NOT_FOUND; in igb_probe()
3556 dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str); in igb_probe()
3557 dev_info(&pdev->dev, in igb_probe()
3558 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n", in igb_probe()
3559 (adapter->flags & IGB_FLAG_HAS_MSIX) ? "MSI-X" : in igb_probe()
3560 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy", in igb_probe()
3561 adapter->num_rx_queues, adapter->num_tx_queues); in igb_probe()
3562 if (hw->phy.media_type == e1000_media_type_copper) { in igb_probe()
3563 switch (hw->mac.type) { in igb_probe()
3567 /* Enable EEE for internal copper PHY devices */ in igb_probe()
3570 (!hw->dev_spec._82575.eee_disable)) { in igb_probe()
3571 adapter->eee_advert = in igb_probe()
3573 adapter->flags |= IGB_FLAG_EEE; in igb_probe()
3581 (!hw->dev_spec._82575.eee_disable)) { in igb_probe()
3582 adapter->eee_advert = in igb_probe()
3584 adapter->flags |= IGB_FLAG_EEE; in igb_probe()
3593 dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE); in igb_probe()
3595 pm_runtime_put_noidle(&pdev->dev); in igb_probe()
3600 memset(&adapter->i2c_adap, 0, sizeof(adapter->i2c_adap)); in igb_probe()
3605 if (hw->flash_address) in igb_probe()
3606 iounmap(hw->flash_address); in igb_probe()
3608 kfree(adapter->mac_table); in igb_probe()
3609 kfree(adapter->shadow_vfta); in igb_probe()
3614 pci_iounmap(pdev, adapter->io_addr); in igb_probe()
3630 struct e1000_hw *hw = &adapter->hw; in igb_disable_sriov()
3633 if (adapter->vf_data) { in igb_disable_sriov()
3636 dev_warn(&pdev->dev, in igb_disable_sriov()
3637 …"Cannot deallocate SR-IOV virtual functions while they are assigned - VFs will not be deallocated\… in igb_disable_sriov()
3638 return -EPERM; in igb_disable_sriov()
3644 kfree(adapter->vf_mac_list); in igb_disable_sriov()
3645 adapter->vf_mac_list = NULL; in igb_disable_sriov()
3646 kfree(adapter->vf_data); in igb_disable_sriov()
3647 adapter->vf_data = NULL; in igb_disable_sriov()
3648 adapter->vfs_allocated_count = 0; in igb_disable_sriov()
3651 msleep(100); in igb_disable_sriov()
3652 dev_info(&pdev->dev, "IOV Disabled\n"); in igb_disable_sriov()
3654 /* Re-enable DMA Coalescing flag since IOV is turned off */ in igb_disable_sriov()
3655 adapter->flags |= IGB_FLAG_DMAC; in igb_disable_sriov()
3670 if (!(adapter->flags & IGB_FLAG_HAS_MSIX) || num_vfs > 7) { in igb_enable_sriov()
3671 err = -EPERM; in igb_enable_sriov()
3678 dev_info(&pdev->dev, "%d pre-allocated VFs found - override max_vfs setting of %d\n", in igb_enable_sriov()
3680 adapter->vfs_allocated_count = old_vfs; in igb_enable_sriov()
3682 adapter->vfs_allocated_count = num_vfs; in igb_enable_sriov()
3684 adapter->vf_data = kcalloc(adapter->vfs_allocated_count, in igb_enable_sriov()
3687 /* if allocation failed then we do not support SR-IOV */ in igb_enable_sriov()
3688 if (!adapter->vf_data) { in igb_enable_sriov()
3689 adapter->vfs_allocated_count = 0; in igb_enable_sriov()
3690 err = -ENOMEM; in igb_enable_sriov()
3699 num_vf_mac_filters = adapter->hw.mac.rar_entry_count - in igb_enable_sriov()
3701 adapter->vfs_allocated_count); in igb_enable_sriov()
3703 adapter->vf_mac_list = kcalloc(num_vf_mac_filters, in igb_enable_sriov()
3707 mac_list = adapter->vf_mac_list; in igb_enable_sriov()
3708 INIT_LIST_HEAD(&adapter->vf_macs.l); in igb_enable_sriov()
3710 if (adapter->vf_mac_list) { in igb_enable_sriov()
3713 mac_list->vf = -1; in igb_enable_sriov()
3714 mac_list->free = true; in igb_enable_sriov()
3715 list_add(&mac_list->l, &adapter->vf_macs.l); in igb_enable_sriov()
3722 dev_err(&pdev->dev, in igb_enable_sriov()
3728 err = pci_enable_sriov(pdev, adapter->vfs_allocated_count); in igb_enable_sriov()
3732 dev_info(&pdev->dev, "%d VFs allocated\n", in igb_enable_sriov()
3733 adapter->vfs_allocated_count); in igb_enable_sriov()
3734 for (i = 0; i < adapter->vfs_allocated_count; i++) in igb_enable_sriov()
3738 adapter->flags &= ~IGB_FLAG_DMAC; in igb_enable_sriov()
3742 kfree(adapter->vf_mac_list); in igb_enable_sriov()
3743 adapter->vf_mac_list = NULL; in igb_enable_sriov()
3744 kfree(adapter->vf_data); in igb_enable_sriov()
3745 adapter->vf_data = NULL; in igb_enable_sriov()
3746 adapter->vfs_allocated_count = 0; in igb_enable_sriov()
3753 * igb_remove_i2c - Cleanup I2C interface
3759 i2c_del_adapter(&adapter->i2c_adap); in igb_remove_i2c()
3763 * igb_remove - Device Removal Routine
3768 * Hot-Plug event, or because the driver is going to be removed from
3775 struct e1000_hw *hw = &adapter->hw; in igb_remove()
3777 pm_runtime_get_noresume(&pdev->dev); in igb_remove()
3786 set_bit(__IGB_DOWN, &adapter->state); in igb_remove()
3787 del_timer_sync(&adapter->watchdog_timer); in igb_remove()
3788 del_timer_sync(&adapter->phy_info_timer); in igb_remove()
3790 cancel_work_sync(&adapter->reset_task); in igb_remove()
3791 cancel_work_sync(&adapter->watchdog_task); in igb_remove()
3794 if (adapter->flags & IGB_FLAG_DCA_ENABLED) { in igb_remove()
3795 dev_info(&pdev->dev, "DCA disabled\n"); in igb_remove()
3796 dca_remove_requester(&pdev->dev); in igb_remove()
3797 adapter->flags &= ~IGB_FLAG_DCA_ENABLED; in igb_remove()
3815 pci_iounmap(pdev, adapter->io_addr); in igb_remove()
3816 if (hw->flash_address) in igb_remove()
3817 iounmap(hw->flash_address); in igb_remove()
3820 kfree(adapter->mac_table); in igb_remove()
3821 kfree(adapter->shadow_vfta); in igb_remove()
3830 * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
3835 * mor expensive time wise to disable SR-IOV than it is to allocate and free
3841 struct pci_dev *pdev = adapter->pdev; in igb_probe_vfs()
3842 struct e1000_hw *hw = &adapter->hw; in igb_probe_vfs()
3845 if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) in igb_probe_vfs()
3863 struct e1000_hw *hw = &adapter->hw; in igb_get_max_rss_queues()
3867 switch (hw->mac.type) { in igb_get_max_rss_queues()
3876 /* I350 cannot do RSS and SR-IOV at the same time */ in igb_get_max_rss_queues()
3877 if (!!adapter->vfs_allocated_count) { in igb_get_max_rss_queues()
3883 if (!!adapter->vfs_allocated_count) { in igb_get_max_rss_queues()
3903 adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus()); in igb_init_queue_configuration()
3911 struct e1000_hw *hw = &adapter->hw; in igb_set_flag_queue_pairs()
3914 switch (hw->mac.type) { in igb_set_flag_queue_pairs()
3928 if (adapter->rss_queues > (max_rss_queues / 2)) in igb_set_flag_queue_pairs()
3929 adapter->flags |= IGB_FLAG_QUEUE_PAIRS; in igb_set_flag_queue_pairs()
3931 adapter->flags &= ~IGB_FLAG_QUEUE_PAIRS; in igb_set_flag_queue_pairs()
3937 * igb_sw_init - Initialize general software structures (struct igb_adapter)
3946 struct e1000_hw *hw = &adapter->hw; in igb_sw_init()
3947 struct net_device *netdev = adapter->netdev; in igb_sw_init()
3948 struct pci_dev *pdev = adapter->pdev; in igb_sw_init()
3950 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word); in igb_sw_init()
3953 adapter->tx_ring_count = IGB_DEFAULT_TXD; in igb_sw_init()
3954 adapter->rx_ring_count = IGB_DEFAULT_RXD; in igb_sw_init()
3957 adapter->rx_itr_setting = IGB_DEFAULT_ITR; in igb_sw_init()
3958 adapter->tx_itr_setting = IGB_DEFAULT_ITR; in igb_sw_init()
3961 adapter->tx_work_limit = IGB_DEFAULT_TX_WORK; in igb_sw_init()
3963 adapter->max_frame_size = netdev->mtu + IGB_ETH_PKT_HDR_PAD; in igb_sw_init()
3964 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; in igb_sw_init()
3966 spin_lock_init(&adapter->nfc_lock); in igb_sw_init()
3967 spin_lock_init(&adapter->stats64_lock); in igb_sw_init()
3969 switch (hw->mac.type) { in igb_sw_init()
3973 dev_warn(&pdev->dev, in igb_sw_init()
3975 max_vfs = adapter->vfs_allocated_count = 7; in igb_sw_init()
3977 adapter->vfs_allocated_count = max_vfs; in igb_sw_init()
3978 if (adapter->vfs_allocated_count) in igb_sw_init()
3979 dev_warn(&pdev->dev, in igb_sw_init()
3980 …"Enabling SR-IOV VFs using the module parameter is deprecated - please use the pci sysfs interface… in igb_sw_init()
3987 /* Assume MSI-X interrupts, will be checked during IRQ allocation */ in igb_sw_init()
3988 adapter->flags |= IGB_FLAG_HAS_MSIX; in igb_sw_init()
3990 adapter->mac_table = kcalloc(hw->mac.rar_entry_count, in igb_sw_init()
3993 if (!adapter->mac_table) in igb_sw_init()
3994 return -ENOMEM; in igb_sw_init()
4001 adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32), in igb_sw_init()
4003 if (!adapter->shadow_vfta) in igb_sw_init()
4004 return -ENOMEM; in igb_sw_init()
4008 dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); in igb_sw_init()
4009 return -ENOMEM; in igb_sw_init()
4015 if (hw->mac.type >= e1000_i350) in igb_sw_init()
4016 adapter->flags &= ~IGB_FLAG_DMAC; in igb_sw_init()
4018 set_bit(__IGB_DOWN, &adapter->state); in igb_sw_init()
4023 * igb_open - Called when a network interface is made active
4038 struct e1000_hw *hw = &adapter->hw; in __igb_open()
4039 struct pci_dev *pdev = adapter->pdev; in __igb_open()
4044 if (test_bit(__IGB_TESTING, &adapter->state)) { in __igb_open()
4046 return -EBUSY; in __igb_open()
4050 pm_runtime_get_sync(&pdev->dev); in __igb_open()
4078 err = netif_set_real_num_tx_queues(adapter->netdev, in __igb_open()
4079 adapter->num_tx_queues); in __igb_open()
4083 err = netif_set_real_num_rx_queues(adapter->netdev, in __igb_open()
4084 adapter->num_rx_queues); in __igb_open()
4089 clear_bit(__IGB_DOWN, &adapter->state); in __igb_open()
4091 for (i = 0; i < adapter->num_q_vectors; i++) in __igb_open()
4092 napi_enable(&(adapter->q_vector[i]->napi)); in __igb_open()
4101 if (adapter->vfs_allocated_count) { in __igb_open()
4111 pm_runtime_put(&pdev->dev); in __igb_open()
4114 hw->mac.get_link_status = 1; in __igb_open()
4115 schedule_work(&adapter->watchdog_task); in __igb_open()
4130 pm_runtime_put(&pdev->dev); in __igb_open()
4141 * igb_close - Disables a network interface
4147 * The close entry point is called when an interface is de-activated
4155 struct pci_dev *pdev = adapter->pdev; in __igb_close()
4157 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state)); in __igb_close()
4160 pm_runtime_get_sync(&pdev->dev); in __igb_close()
4169 pm_runtime_put_sync(&pdev->dev); in __igb_close()
4175 if (netif_device_present(netdev) || netdev->dismantle) in igb_close()
4181 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
4182 * @tx_ring: tx descriptor ring (for a specific queue) to setup
4188 struct device *dev = tx_ring->dev; in igb_setup_tx_resources()
4191 size = sizeof(struct igb_tx_buffer) * tx_ring->count; in igb_setup_tx_resources()
4193 tx_ring->tx_buffer_info = vmalloc(size); in igb_setup_tx_resources()
4194 if (!tx_ring->tx_buffer_info) in igb_setup_tx_resources()
4198 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); in igb_setup_tx_resources()
4199 tx_ring->size = ALIGN(tx_ring->size, 4096); in igb_setup_tx_resources()
4201 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, in igb_setup_tx_resources()
4202 &tx_ring->dma, GFP_KERNEL); in igb_setup_tx_resources()
4203 if (!tx_ring->desc) in igb_setup_tx_resources()
4206 tx_ring->next_to_use = 0; in igb_setup_tx_resources()
4207 tx_ring->next_to_clean = 0; in igb_setup_tx_resources()
4212 vfree(tx_ring->tx_buffer_info); in igb_setup_tx_resources()
4213 tx_ring->tx_buffer_info = NULL; in igb_setup_tx_resources()
4214 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n"); in igb_setup_tx_resources()
4215 return -ENOMEM; in igb_setup_tx_resources()
4219 * igb_setup_all_tx_resources - wrapper to allocate Tx resources
4227 struct pci_dev *pdev = adapter->pdev; in igb_setup_all_tx_resources()
4230 for (i = 0; i < adapter->num_tx_queues; i++) { in igb_setup_all_tx_resources()
4231 err = igb_setup_tx_resources(adapter->tx_ring[i]); in igb_setup_all_tx_resources()
4233 dev_err(&pdev->dev, in igb_setup_all_tx_resources()
4234 "Allocation for Tx Queue %u failed\n", i); in igb_setup_all_tx_resources()
4235 for (i--; i >= 0; i--) in igb_setup_all_tx_resources()
4236 igb_free_tx_resources(adapter->tx_ring[i]); in igb_setup_all_tx_resources()
4245 * igb_setup_tctl - configure the transmit control registers
4250 struct e1000_hw *hw = &adapter->hw; in igb_setup_tctl()
4271 * igb_configure_tx_ring - Configure transmit ring after Reset
4273 * @ring: tx ring to configure
4280 struct e1000_hw *hw = &adapter->hw; in igb_configure_tx_ring()
4282 u64 tdba = ring->dma; in igb_configure_tx_ring()
4283 int reg_idx = ring->reg_idx; in igb_configure_tx_ring()
4286 ring->count * sizeof(union e1000_adv_tx_desc)); in igb_configure_tx_ring()
4291 ring->tail = adapter->io_addr + E1000_TDT(reg_idx); in igb_configure_tx_ring()
4293 writel(0, ring->tail); in igb_configure_tx_ring()
4300 memset(ring->tx_buffer_info, 0, in igb_configure_tx_ring()
4301 sizeof(struct igb_tx_buffer) * ring->count); in igb_configure_tx_ring()
4308 * igb_configure_tx - Configure transmit Unit after Reset
4311 * Configure the Tx unit of the MAC after a reset.
4315 struct e1000_hw *hw = &adapter->hw; in igb_configure_tx()
4319 for (i = 0; i < adapter->num_tx_queues; i++) in igb_configure_tx()
4320 wr32(E1000_TXDCTL(adapter->tx_ring[i]->reg_idx), 0); in igb_configure_tx()
4325 for (i = 0; i < adapter->num_tx_queues; i++) in igb_configure_tx()
4326 igb_configure_tx_ring(adapter, adapter->tx_ring[i]); in igb_configure_tx()
4330 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
4337 struct igb_adapter *adapter = netdev_priv(rx_ring->netdev); in igb_setup_rx_resources()
4338 struct device *dev = rx_ring->dev; in igb_setup_rx_resources()
4341 size = sizeof(struct igb_rx_buffer) * rx_ring->count; in igb_setup_rx_resources()
4343 rx_ring->rx_buffer_info = vmalloc(size); in igb_setup_rx_resources()
4344 if (!rx_ring->rx_buffer_info) in igb_setup_rx_resources()
4348 rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc); in igb_setup_rx_resources()
4349 rx_ring->size = ALIGN(rx_ring->size, 4096); in igb_setup_rx_resources()
4351 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, in igb_setup_rx_resources()
4352 &rx_ring->dma, GFP_KERNEL); in igb_setup_rx_resources()
4353 if (!rx_ring->desc) in igb_setup_rx_resources()
4356 rx_ring->next_to_alloc = 0; in igb_setup_rx_resources()
4357 rx_ring->next_to_clean = 0; in igb_setup_rx_resources()
4358 rx_ring->next_to_use = 0; in igb_setup_rx_resources()
4360 rx_ring->xdp_prog = adapter->xdp_prog; in igb_setup_rx_resources()
4362 /* XDP RX-queue info */ in igb_setup_rx_resources()
4363 if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, in igb_setup_rx_resources()
4364 rx_ring->queue_index) < 0) in igb_setup_rx_resources()
4370 vfree(rx_ring->rx_buffer_info); in igb_setup_rx_resources()
4371 rx_ring->rx_buffer_info = NULL; in igb_setup_rx_resources()
4373 return -ENOMEM; in igb_setup_rx_resources()
4377 * igb_setup_all_rx_resources - wrapper to allocate Rx resources
4385 struct pci_dev *pdev = adapter->pdev; in igb_setup_all_rx_resources()
4388 for (i = 0; i < adapter->num_rx_queues; i++) { in igb_setup_all_rx_resources()
4389 err = igb_setup_rx_resources(adapter->rx_ring[i]); in igb_setup_all_rx_resources()
4391 dev_err(&pdev->dev, in igb_setup_all_rx_resources()
4393 for (i--; i >= 0; i--) in igb_setup_all_rx_resources()
4394 igb_free_rx_resources(adapter->rx_ring[i]); in igb_setup_all_rx_resources()
4403 * igb_setup_mrqc - configure the multiple receive queue control registers
4408 struct e1000_hw *hw = &adapter->hw; in igb_setup_mrqc()
4417 num_rx_queues = adapter->rss_queues; in igb_setup_mrqc()
4419 switch (hw->mac.type) { in igb_setup_mrqc()
4421 /* 82576 supports 2 RSS queues for SR-IOV */ in igb_setup_mrqc()
4422 if (adapter->vfs_allocated_count) in igb_setup_mrqc()
4429 if (adapter->rss_indir_tbl_init != num_rx_queues) { in igb_setup_mrqc()
4431 adapter->rss_indir_tbl[j] = in igb_setup_mrqc()
4433 adapter->rss_indir_tbl_init = num_rx_queues; in igb_setup_mrqc()
4444 if (adapter->hw.mac.type >= e1000_82576) in igb_setup_mrqc()
4460 if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP) in igb_setup_mrqc()
4462 if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP) in igb_setup_mrqc()
4469 if (adapter->vfs_allocated_count) { in igb_setup_mrqc()
4470 if (hw->mac.type > e1000_82575) { in igb_setup_mrqc()
4476 vtctl |= adapter->vfs_allocated_count << in igb_setup_mrqc()
4480 if (adapter->rss_queues > 1) in igb_setup_mrqc()
4485 if (hw->mac.type != e1000_i211) in igb_setup_mrqc()
4494 * igb_setup_rctl - configure the receive control registers
4499 struct e1000_hw *hw = &adapter->hw; in igb_setup_rctl()
4508 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT); in igb_setup_rctl()
4522 /* disable queue 0 to prevent tail write w/o re-config */ in igb_setup_rctl()
4525 /* Attention!!! For SR-IOV PF driver operations you must enable in igb_setup_rctl()
4527 * if an un-trusted VF does not provide descriptors to hardware. in igb_setup_rctl()
4529 if (adapter->vfs_allocated_count) { in igb_setup_rctl()
4535 if (adapter->netdev->features & NETIF_F_RXALL) { in igb_setup_rctl()
4556 struct e1000_hw *hw = &adapter->hw; in igb_set_vf_rlpml()
4573 struct e1000_hw *hw = &adapter->hw; in igb_set_vf_vlan_strip()
4576 if (hw->mac.type < e1000_82576) in igb_set_vf_vlan_strip()
4579 if (hw->mac.type == e1000_i350) in igb_set_vf_vlan_strip()
4595 struct e1000_hw *hw = &adapter->hw; in igb_set_vmolr()
4601 if (hw->mac.type < e1000_82576) in igb_set_vmolr()
4613 if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count) in igb_set_vmolr()
4618 if (vfn <= adapter->vfs_allocated_count) in igb_set_vmolr()
4625 * igb_setup_srrctl - configure the split and replication receive control
4632 struct e1000_hw *hw = &adapter->hw; in igb_setup_srrctl()
4633 int reg_idx = ring->reg_idx; in igb_setup_srrctl()
4642 if (hw->mac.type >= e1000_82580) in igb_setup_srrctl()
4647 if (adapter->vfs_allocated_count || in igb_setup_srrctl()
4648 (!(hw->fc.current_mode & e1000_fc_rx_pause) && in igb_setup_srrctl()
4649 adapter->num_rx_queues > 1)) in igb_setup_srrctl()
4656 * igb_configure_rx_ring - Configure a receive ring after Reset
4665 struct e1000_hw *hw = &adapter->hw; in igb_configure_rx_ring()
4667 u64 rdba = ring->dma; in igb_configure_rx_ring()
4668 int reg_idx = ring->reg_idx; in igb_configure_rx_ring()
4671 xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); in igb_configure_rx_ring()
4672 WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, in igb_configure_rx_ring()
4683 ring->count * sizeof(union e1000_adv_rx_desc)); in igb_configure_rx_ring()
4686 ring->tail = adapter->io_addr + E1000_RDT(reg_idx); in igb_configure_rx_ring()
4688 writel(0, ring->tail); in igb_configure_rx_ring()
4701 memset(ring->rx_buffer_info, 0, in igb_configure_rx_ring()
4702 sizeof(struct igb_rx_buffer) * ring->count); in igb_configure_rx_ring()
4706 rx_desc->wb.upper.length = 0; in igb_configure_rx_ring()
4720 if (adapter->flags & IGB_FLAG_RX_LEGACY) in igb_set_rx_buffer_len()
4726 if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB) in igb_set_rx_buffer_len()
4734 * igb_configure_rx - Configure receive Unit after Reset
4749 for (i = 0; i < adapter->num_rx_queues; i++) { in igb_configure_rx()
4750 struct igb_ring *rx_ring = adapter->rx_ring[i]; in igb_configure_rx()
4758 * igb_free_tx_resources - Free Tx Resources per Queue
4759 * @tx_ring: Tx descriptor ring for a specific queue
4767 vfree(tx_ring->tx_buffer_info); in igb_free_tx_resources()
4768 tx_ring->tx_buffer_info = NULL; in igb_free_tx_resources()
4771 if (!tx_ring->desc) in igb_free_tx_resources()
4774 dma_free_coherent(tx_ring->dev, tx_ring->size, in igb_free_tx_resources()
4775 tx_ring->desc, tx_ring->dma); in igb_free_tx_resources()
4777 tx_ring->desc = NULL; in igb_free_tx_resources()
4781 * igb_free_all_tx_resources - Free Tx Resources for All Queues
4790 for (i = 0; i < adapter->num_tx_queues; i++) in igb_free_all_tx_resources()
4791 if (adapter->tx_ring[i]) in igb_free_all_tx_resources()
4792 igb_free_tx_resources(adapter->tx_ring[i]); in igb_free_all_tx_resources()
4796 * igb_clean_tx_ring - Free Tx Buffers
4801 u16 i = tx_ring->next_to_clean; in igb_clean_tx_ring()
4802 struct igb_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; in igb_clean_tx_ring()
4804 while (i != tx_ring->next_to_use) { in igb_clean_tx_ring()
4807 /* Free all the Tx ring sk_buffs */ in igb_clean_tx_ring()
4808 dev_kfree_skb_any(tx_buffer->skb); in igb_clean_tx_ring()
4811 dma_unmap_single(tx_ring->dev, in igb_clean_tx_ring()
4817 eop_desc = tx_buffer->next_to_watch; in igb_clean_tx_ring()
4825 if (unlikely(i == tx_ring->count)) { in igb_clean_tx_ring()
4827 tx_buffer = tx_ring->tx_buffer_info; in igb_clean_tx_ring()
4833 dma_unmap_page(tx_ring->dev, in igb_clean_tx_ring()
4842 if (unlikely(i == tx_ring->count)) { in igb_clean_tx_ring()
4844 tx_buffer = tx_ring->tx_buffer_info; in igb_clean_tx_ring()
4852 tx_ring->next_to_use = 0; in igb_clean_tx_ring()
4853 tx_ring->next_to_clean = 0; in igb_clean_tx_ring()
4857 * igb_clean_all_tx_rings - Free Tx Buffers for all queues
4864 for (i = 0; i < adapter->num_tx_queues; i++) in igb_clean_all_tx_rings()
4865 if (adapter->tx_ring[i]) in igb_clean_all_tx_rings()
4866 igb_clean_tx_ring(adapter->tx_ring[i]); in igb_clean_all_tx_rings()
4870 * igb_free_rx_resources - Free Rx Resources
4879 rx_ring->xdp_prog = NULL; in igb_free_rx_resources()
4880 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); in igb_free_rx_resources()
4881 vfree(rx_ring->rx_buffer_info); in igb_free_rx_resources()
4882 rx_ring->rx_buffer_info = NULL; in igb_free_rx_resources()
4885 if (!rx_ring->desc) in igb_free_rx_resources()
4888 dma_free_coherent(rx_ring->dev, rx_ring->size, in igb_free_rx_resources()
4889 rx_ring->desc, rx_ring->dma); in igb_free_rx_resources()
4891 rx_ring->desc = NULL; in igb_free_rx_resources()
4895 * igb_free_all_rx_resources - Free Rx Resources for All Queues
4904 for (i = 0; i < adapter->num_rx_queues; i++) in igb_free_all_rx_resources()
4905 if (adapter->rx_ring[i]) in igb_free_all_rx_resources()
4906 igb_free_rx_resources(adapter->rx_ring[i]); in igb_free_all_rx_resources()
4910 * igb_clean_rx_ring - Free Rx Buffers per Queue
4915 u16 i = rx_ring->next_to_clean; in igb_clean_rx_ring()
4917 dev_kfree_skb(rx_ring->skb); in igb_clean_rx_ring()
4918 rx_ring->skb = NULL; in igb_clean_rx_ring()
4921 while (i != rx_ring->next_to_alloc) { in igb_clean_rx_ring()
4922 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i]; in igb_clean_rx_ring()
4927 dma_sync_single_range_for_cpu(rx_ring->dev, in igb_clean_rx_ring()
4928 buffer_info->dma, in igb_clean_rx_ring()
4929 buffer_info->page_offset, in igb_clean_rx_ring()
4934 dma_unmap_page_attrs(rx_ring->dev, in igb_clean_rx_ring()
4935 buffer_info->dma, in igb_clean_rx_ring()
4939 __page_frag_cache_drain(buffer_info->page, in igb_clean_rx_ring()
4940 buffer_info->pagecnt_bias); in igb_clean_rx_ring()
4943 if (i == rx_ring->count) in igb_clean_rx_ring()
4947 rx_ring->next_to_alloc = 0; in igb_clean_rx_ring()
4948 rx_ring->next_to_clean = 0; in igb_clean_rx_ring()
4949 rx_ring->next_to_use = 0; in igb_clean_rx_ring()
4953 * igb_clean_all_rx_rings - Free Rx Buffers for all queues
4960 for (i = 0; i < adapter->num_rx_queues; i++) in igb_clean_all_rx_rings()
4961 if (adapter->rx_ring[i]) in igb_clean_all_rx_rings()
4962 igb_clean_rx_ring(adapter->rx_ring[i]); in igb_clean_all_rx_rings()
4966 * igb_set_mac - Change the Ethernet Address of the NIC
4975 struct e1000_hw *hw = &adapter->hw; in igb_set_mac()
4978 if (!is_valid_ether_addr(addr->sa_data)) in igb_set_mac()
4979 return -EADDRNOTAVAIL; in igb_set_mac()
4981 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); in igb_set_mac()
4982 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); in igb_set_mac()
4991 * igb_write_mc_addr_list - write multicast addresses to MTA
4995 * Returns: -ENOMEM on failure
5002 struct e1000_hw *hw = &adapter->hw; in igb_write_mc_addr_list()
5016 return -ENOMEM; in igb_write_mc_addr_list()
5021 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); in igb_write_mc_addr_list()
5031 struct e1000_hw *hw = &adapter->hw; in igb_vlan_promisc_enable()
5034 switch (hw->mac.type) { in igb_vlan_promisc_enable()
5039 if (adapter->netdev->features & NETIF_F_NTUPLE) in igb_vlan_promisc_enable()
5046 if (adapter->vfs_allocated_count) in igb_vlan_promisc_enable()
5054 if (adapter->flags & IGB_FLAG_VLAN_PROMISC) in igb_vlan_promisc_enable()
5057 if (!adapter->vfs_allocated_count) in igb_vlan_promisc_enable()
5061 pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT; in igb_vlan_promisc_enable()
5063 for (i = E1000_VLVF_ARRAY_SIZE; --i;) { in igb_vlan_promisc_enable()
5072 for (i = E1000_VLAN_FILTER_TBL_SIZE; i--;) in igb_vlan_promisc_enable()
5073 hw->mac.ops.write_vfta(hw, i, ~0U); in igb_vlan_promisc_enable()
5076 adapter->flags |= IGB_FLAG_VLAN_PROMISC; in igb_vlan_promisc_enable()
5084 struct e1000_hw *hw = &adapter->hw; in igb_scrub_vfta()
5091 vid = adapter->mng_vlan_id; in igb_scrub_vfta()
5093 vfta[(vid - vid_start) / 32] |= BIT(vid % 32); in igb_scrub_vfta()
5095 if (!adapter->vfs_allocated_count) in igb_scrub_vfta()
5098 pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT; in igb_scrub_vfta()
5100 for (i = E1000_VLVF_ARRAY_SIZE; --i;) { in igb_scrub_vfta()
5112 vfta[(vid - vid_start) / 32] |= BIT(vid % 32); in igb_scrub_vfta()
5115 if (test_bit(vid, adapter->active_vlans)) in igb_scrub_vfta()
5127 for (i = VFTA_BLOCK_SIZE; i--;) { in igb_scrub_vfta()
5132 vfta[i] |= adapter->active_vlans[word] >> bits; in igb_scrub_vfta()
5134 hw->mac.ops.write_vfta(hw, vfta_offset + i, vfta[i]); in igb_scrub_vfta()
5143 if (!(adapter->flags & IGB_FLAG_VLAN_PROMISC)) in igb_vlan_promisc_disable()
5147 adapter->flags &= ~IGB_FLAG_VLAN_PROMISC; in igb_vlan_promisc_disable()
5154 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
5160 * promiscuous mode, and all-multi behavior.
5165 struct e1000_hw *hw = &adapter->hw; in igb_set_rx_mode()
5166 unsigned int vfn = adapter->vfs_allocated_count; in igb_set_rx_mode()
5171 if (netdev->flags & IFF_PROMISC) { in igb_set_rx_mode()
5176 if (hw->mac.type == e1000_82576) in igb_set_rx_mode()
5179 if (netdev->flags & IFF_ALLMULTI) { in igb_set_rx_mode()
5210 if ((netdev->flags & IFF_PROMISC) || in igb_set_rx_mode()
5211 (netdev->features & NETIF_F_RXALL)) { in igb_set_rx_mode()
5225 if (!adapter->vfs_allocated_count) { in igb_set_rx_mode()
5226 if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB) in igb_set_rx_mode()
5232 /* In order to support SR-IOV and eventually VMDq it is necessary to set in igb_set_rx_mode()
5237 if ((hw->mac.type < e1000_82576) || (hw->mac.type > e1000_i350)) in igb_set_rx_mode()
5249 if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB) in igb_set_rx_mode()
5263 struct e1000_hw *hw = &adapter->hw; in igb_check_wvbr()
5266 switch (hw->mac.type) { in igb_check_wvbr()
5277 adapter->wvbr |= wvbr; in igb_check_wvbr()
5286 if (!adapter->wvbr) in igb_spoof_check()
5289 for (j = 0; j < adapter->vfs_allocated_count; j++) { in igb_spoof_check()
5290 if (adapter->wvbr & BIT(j) || in igb_spoof_check()
5291 adapter->wvbr & BIT(j + IGB_STAGGERED_QUEUE_OFFSET)) { in igb_spoof_check()
5292 dev_warn(&adapter->pdev->dev, in igb_spoof_check()
5294 adapter->wvbr &= in igb_spoof_check()
5307 igb_get_phy_info(&adapter->hw); in igb_update_phy_info()
5311 * igb_has_link - check shared code for link and determine up/down
5316 struct e1000_hw *hw = &adapter->hw; in igb_has_link()
5324 switch (hw->phy.media_type) { in igb_has_link()
5326 if (!hw->mac.get_link_status) in igb_has_link()
5330 hw->mac.ops.check_for_link(hw); in igb_has_link()
5331 link_active = !hw->mac.get_link_status; in igb_has_link()
5338 if (((hw->mac.type == e1000_i210) || in igb_has_link()
5339 (hw->mac.type == e1000_i211)) && in igb_has_link()
5340 (hw->phy.id == I210_I_PHY_ID)) { in igb_has_link()
5341 if (!netif_carrier_ok(adapter->netdev)) { in igb_has_link()
5342 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE; in igb_has_link()
5343 } else if (!(adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)) { in igb_has_link()
5344 adapter->flags |= IGB_FLAG_NEED_LINK_UPDATE; in igb_has_link()
5345 adapter->link_check_timeout = jiffies; in igb_has_link()
5358 if (hw->mac.type == e1000_i350) { in igb_thermal_sensor_event()
5362 if ((hw->phy.media_type == e1000_media_type_copper) && in igb_thermal_sensor_event()
5371 * igb_check_lvmmc - check for malformed packets received
5377 struct e1000_hw *hw = &adapter->hw; in igb_check_lvmmc()
5383 netdev_warn(adapter->netdev, in igb_check_lvmmc()
5384 "malformed Tx packet detected and dropped, LVMMC:0x%08x\n", in igb_check_lvmmc()
5391 * igb_watchdog - Timer Call-back
5398 schedule_work(&adapter->watchdog_task); in igb_watchdog()
5406 struct e1000_hw *hw = &adapter->hw; in igb_watchdog_task()
5407 struct e1000_phy_info *phy = &hw->phy; in igb_watchdog_task()
5408 struct net_device *netdev = adapter->netdev; in igb_watchdog_task()
5416 if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) { in igb_watchdog_task()
5417 if (time_after(jiffies, (adapter->link_check_timeout + HZ))) in igb_watchdog_task()
5418 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE; in igb_watchdog_task()
5424 if (adapter->flags & IGB_FLAG_MAS_ENABLE) { in igb_watchdog_task()
5425 if (hw->phy.media_type == e1000_media_type_copper) { in igb_watchdog_task()
5433 if (hw->dev_spec._82575.media_changed) { in igb_watchdog_task()
5434 hw->dev_spec._82575.media_changed = false; in igb_watchdog_task()
5435 adapter->flags |= IGB_FLAG_MEDIA_RESET; in igb_watchdog_task()
5439 pm_runtime_resume(netdev->dev.parent); in igb_watchdog_task()
5444 hw->mac.ops.get_speed_and_duplex(hw, in igb_watchdog_task()
5445 &adapter->link_speed, in igb_watchdog_task()
5446 &adapter->link_duplex); in igb_watchdog_task()
5452 netdev->name, in igb_watchdog_task()
5453 adapter->link_speed, in igb_watchdog_task()
5454 adapter->link_duplex == FULL_DUPLEX ? in igb_watchdog_task()
5457 (ctrl & E1000_CTRL_RFCE) ? "RX/TX" : in igb_watchdog_task()
5459 (ctrl & E1000_CTRL_TFCE) ? "TX" : "None"); in igb_watchdog_task()
5461 /* disable EEE if enabled */ in igb_watchdog_task()
5462 if ((adapter->flags & IGB_FLAG_EEE) && in igb_watchdog_task()
5463 (adapter->link_duplex == HALF_DUPLEX)) { in igb_watchdog_task()
5464 dev_info(&adapter->pdev->dev, in igb_watchdog_task()
5465 "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex.\n"); in igb_watchdog_task()
5466 adapter->hw.dev_spec._82575.eee_disable = true; in igb_watchdog_task()
5467 adapter->flags &= ~IGB_FLAG_EEE; in igb_watchdog_task()
5472 if (phy->speed_downgraded) in igb_watchdog_task()
5481 adapter->tx_timeout_factor = 1; in igb_watchdog_task()
5482 switch (adapter->link_speed) { in igb_watchdog_task()
5484 adapter->tx_timeout_factor = 14; in igb_watchdog_task()
5491 if (adapter->link_speed != SPEED_1000) in igb_watchdog_task()
5500 msleep(100); in igb_watchdog_task()
5501 retry_count--; in igb_watchdog_task()
5504 dev_err(&adapter->pdev->dev, "exceed max 2 second\n"); in igb_watchdog_task()
5507 dev_err(&adapter->pdev->dev, "read 1000Base-T Status Reg\n"); in igb_watchdog_task()
5516 if (!test_bit(__IGB_DOWN, &adapter->state)) in igb_watchdog_task()
5517 mod_timer(&adapter->phy_info_timer, in igb_watchdog_task()
5522 adapter->link_speed = 0; in igb_watchdog_task()
5523 adapter->link_duplex = 0; in igb_watchdog_task()
5533 netdev->name); in igb_watchdog_task()
5539 if (!test_bit(__IGB_DOWN, &adapter->state)) in igb_watchdog_task()
5540 mod_timer(&adapter->phy_info_timer, in igb_watchdog_task()
5544 if (adapter->flags & IGB_FLAG_MAS_ENABLE) { in igb_watchdog_task()
5546 if (adapter->flags & IGB_FLAG_MEDIA_RESET) { in igb_watchdog_task()
5547 schedule_work(&adapter->reset_task); in igb_watchdog_task()
5552 pm_schedule_suspend(netdev->dev.parent, in igb_watchdog_task()
5557 (adapter->flags & IGB_FLAG_MAS_ENABLE)) { in igb_watchdog_task()
5559 if (adapter->flags & IGB_FLAG_MEDIA_RESET) { in igb_watchdog_task()
5560 schedule_work(&adapter->reset_task); in igb_watchdog_task()
5567 spin_lock(&adapter->stats64_lock); in igb_watchdog_task()
5569 spin_unlock(&adapter->stats64_lock); in igb_watchdog_task()
5571 for (i = 0; i < adapter->num_tx_queues; i++) { in igb_watchdog_task()
5572 struct igb_ring *tx_ring = adapter->tx_ring[i]; in igb_watchdog_task()
5575 * but we've got queued Tx work that's never going in igb_watchdog_task()
5576 * to get done, so reset controller to flush Tx. in igb_watchdog_task()
5579 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) { in igb_watchdog_task()
5580 adapter->tx_timeout_count++; in igb_watchdog_task()
5581 schedule_work(&adapter->reset_task); in igb_watchdog_task()
5588 set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); in igb_watchdog_task()
5592 if (adapter->flags & IGB_FLAG_HAS_MSIX) { in igb_watchdog_task()
5595 for (i = 0; i < adapter->num_q_vectors; i++) in igb_watchdog_task()
5596 eics |= adapter->q_vector[i]->eims_value; in igb_watchdog_task()
5607 if ((adapter->hw.mac.type == e1000_i350) || in igb_watchdog_task()
5608 (adapter->hw.mac.type == e1000_i354)) in igb_watchdog_task()
5612 if (!test_bit(__IGB_DOWN, &adapter->state)) { in igb_watchdog_task()
5613 if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) in igb_watchdog_task()
5614 mod_timer(&adapter->watchdog_timer, in igb_watchdog_task()
5617 mod_timer(&adapter->watchdog_timer, in igb_watchdog_task()
5630 * igb_update_ring_itr - update the dynamic ITR value based on packet size
5646 int new_val = q_vector->itr_val; in igb_update_ring_itr()
5648 struct igb_adapter *adapter = q_vector->adapter; in igb_update_ring_itr()
5651 /* For non-gigabit speeds, just fix the interrupt rate at 4000 in igb_update_ring_itr()
5652 * ints/sec - ITR timer value of 120 ticks. in igb_update_ring_itr()
5654 if (adapter->link_speed != SPEED_1000) { in igb_update_ring_itr()
5659 packets = q_vector->rx.total_packets; in igb_update_ring_itr()
5661 avg_wire_size = q_vector->rx.total_bytes / packets; in igb_update_ring_itr()
5663 packets = q_vector->tx.total_packets; in igb_update_ring_itr()
5666 q_vector->tx.total_bytes / packets); in igb_update_ring_itr()
5678 /* Give a little boost to mid-size frames */ in igb_update_ring_itr()
5686 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || in igb_update_ring_itr()
5687 (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) in igb_update_ring_itr()
5691 if (new_val != q_vector->itr_val) { in igb_update_ring_itr()
5692 q_vector->itr_val = new_val; in igb_update_ring_itr()
5693 q_vector->set_itr = 1; in igb_update_ring_itr()
5696 q_vector->rx.total_bytes = 0; in igb_update_ring_itr()
5697 q_vector->rx.total_packets = 0; in igb_update_ring_itr()
5698 q_vector->tx.total_bytes = 0; in igb_update_ring_itr()
5699 q_vector->tx.total_packets = 0; in igb_update_ring_itr()
5703 * igb_update_itr - update the dynamic ITR value based on statistics
5715 * NOTE: These calculations are only valid when operating in a single-
5721 unsigned int packets = ring_container->total_packets; in igb_update_itr()
5722 unsigned int bytes = ring_container->total_bytes; in igb_update_itr()
5723 u8 itrval = ring_container->itr; in igb_update_itr()
5763 ring_container->total_bytes = 0; in igb_update_itr()
5764 ring_container->total_packets = 0; in igb_update_itr()
5767 ring_container->itr = itrval; in igb_update_itr()
5772 struct igb_adapter *adapter = q_vector->adapter; in igb_set_itr()
5773 u32 new_itr = q_vector->itr_val; in igb_set_itr()
5776 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ in igb_set_itr()
5777 if (adapter->link_speed != SPEED_1000) { in igb_set_itr()
5783 igb_update_itr(q_vector, &q_vector->tx); in igb_set_itr()
5784 igb_update_itr(q_vector, &q_vector->rx); in igb_set_itr()
5786 current_itr = max(q_vector->rx.itr, q_vector->tx.itr); in igb_set_itr()
5790 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || in igb_set_itr()
5791 (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) in igb_set_itr()
5810 if (new_itr != q_vector->itr_val) { in igb_set_itr()
5815 new_itr = new_itr > q_vector->itr_val ? in igb_set_itr()
5816 max((new_itr * q_vector->itr_val) / in igb_set_itr()
5817 (new_itr + (q_vector->itr_val >> 2)), in igb_set_itr()
5825 q_vector->itr_val = new_itr; in igb_set_itr()
5826 q_vector->set_itr = 1; in igb_set_itr()
5836 u16 i = tx_ring->next_to_use; in igb_tx_ctxtdesc()
5842 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in igb_tx_ctxtdesc()
5848 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) in igb_tx_ctxtdesc()
5849 mss_l4len_idx |= tx_ring->reg_idx << 4; in igb_tx_ctxtdesc()
5851 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); in igb_tx_ctxtdesc()
5852 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); in igb_tx_ctxtdesc()
5853 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); in igb_tx_ctxtdesc()
5855 /* We assume there is always a valid tx time available. Invalid times in igb_tx_ctxtdesc()
5858 if (tx_ring->launchtime_enable) { in igb_tx_ctxtdesc()
5859 ts = ktime_to_timespec64(first->skb->tstamp); in igb_tx_ctxtdesc()
5860 first->skb->tstamp = ktime_set(0, 0); in igb_tx_ctxtdesc()
5861 context_desc->seqnum_seed = cpu_to_le32(ts.tv_nsec / 32); in igb_tx_ctxtdesc()
5863 context_desc->seqnum_seed = 0; in igb_tx_ctxtdesc()
5872 struct sk_buff *skb = first->skb; in igb_tso()
5886 if (skb->ip_summed != CHECKSUM_PARTIAL) in igb_tso()
5900 type_tucmd = (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ? in igb_tso()
5904 if (ip.v4->version == 4) { in igb_tso()
5906 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); in igb_tso()
5911 ip.v4->check = csum_fold(csum_partial(trans_start, in igb_tso()
5912 csum_start - trans_start, in igb_tso()
5916 ip.v4->tot_len = 0; in igb_tso()
5917 first->tx_flags |= IGB_TX_FLAGS_TSO | in igb_tso()
5921 ip.v6->payload_len = 0; in igb_tso()
5922 first->tx_flags |= IGB_TX_FLAGS_TSO | in igb_tso()
5927 l4_offset = l4.hdr - skb->data; in igb_tso()
5930 paylen = skb->len - l4_offset; in igb_tso()
5933 *hdr_len = (l4.tcp->doff * 4) + l4_offset; in igb_tso()
5934 csum_replace_by_diff(&l4.tcp->check, in igb_tso()
5939 csum_replace_by_diff(&l4.udp->check, in igb_tso()
5944 first->gso_segs = skb_shinfo(skb)->gso_segs; in igb_tso()
5945 first->bytecount += (first->gso_segs - 1) * *hdr_len; in igb_tso()
5948 mss_l4len_idx = (*hdr_len - l4_offset) << E1000_ADVTXD_L4LEN_SHIFT; in igb_tso()
5949 mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT; in igb_tso()
5952 vlan_macip_lens = l4.hdr - ip.hdr; in igb_tso()
5953 vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT; in igb_tso()
5954 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK; in igb_tso()
5973 struct sk_buff *skb = first->skb; in igb_tx_csum()
5977 if (skb->ip_summed != CHECKSUM_PARTIAL) { in igb_tx_csum()
5979 if (!(first->tx_flags & IGB_TX_FLAGS_VLAN) && in igb_tx_csum()
5980 !tx_ring->launchtime_enable) in igb_tx_csum()
5985 switch (skb->csum_offset) { in igb_tx_csum()
5993 if (((first->protocol == htons(ETH_P_IP)) && in igb_tx_csum()
5994 (ip_hdr(skb)->protocol == IPPROTO_SCTP)) || in igb_tx_csum()
5995 ((first->protocol == htons(ETH_P_IPV6)) && in igb_tx_csum()
6006 /* update TX checksum flag */ in igb_tx_csum()
6007 first->tx_flags |= IGB_TX_FLAGS_CSUM; in igb_tx_csum()
6008 vlan_macip_lens = skb_checksum_start_offset(skb) - in igb_tx_csum()
6012 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK; in igb_tx_csum()
6042 cmd_type ^= IGB_SET_FLAG(skb->no_fcs, 1, E1000_ADVTXD_DCMD_IFCS); in igb_tx_cmd_type()
6054 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) in igb_tx_olinfo_status()
6055 olinfo_status |= tx_ring->reg_idx << 4; in igb_tx_olinfo_status()
6067 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); in igb_tx_olinfo_status()
6072 struct net_device *netdev = tx_ring->netdev; in __igb_maybe_stop_tx()
6074 netif_stop_subqueue(netdev, tx_ring->queue_index); in __igb_maybe_stop_tx()
6086 return -EBUSY; in __igb_maybe_stop_tx()
6089 netif_wake_subqueue(netdev, tx_ring->queue_index); in __igb_maybe_stop_tx()
6091 u64_stats_update_begin(&tx_ring->tx_syncp2); in __igb_maybe_stop_tx()
6092 tx_ring->tx_stats.restart_queue2++; in __igb_maybe_stop_tx()
6093 u64_stats_update_end(&tx_ring->tx_syncp2); in __igb_maybe_stop_tx()
6109 struct sk_buff *skb = first->skb; in igb_tx_map()
6115 u32 tx_flags = first->tx_flags; in igb_tx_map()
6117 u16 i = tx_ring->next_to_use; in igb_tx_map()
6121 igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len); in igb_tx_map()
6124 data_len = skb->data_len; in igb_tx_map()
6126 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); in igb_tx_map()
6130 for (frag = &skb_shinfo(skb)->frags[0];; frag++) { in igb_tx_map()
6131 if (dma_mapping_error(tx_ring->dev, dma)) in igb_tx_map()
6138 tx_desc->read.buffer_addr = cpu_to_le64(dma); in igb_tx_map()
6141 tx_desc->read.cmd_type_len = in igb_tx_map()
6146 if (i == tx_ring->count) { in igb_tx_map()
6150 tx_desc->read.olinfo_status = 0; in igb_tx_map()
6153 size -= IGB_MAX_DATA_PER_TXD; in igb_tx_map()
6155 tx_desc->read.buffer_addr = cpu_to_le64(dma); in igb_tx_map()
6161 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size); in igb_tx_map()
6165 if (i == tx_ring->count) { in igb_tx_map()
6169 tx_desc->read.olinfo_status = 0; in igb_tx_map()
6172 data_len -= size; in igb_tx_map()
6174 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, in igb_tx_map()
6177 tx_buffer = &tx_ring->tx_buffer_info[i]; in igb_tx_map()
6182 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); in igb_tx_map()
6184 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); in igb_tx_map()
6187 first->time_stamp = jiffies; in igb_tx_map()
6192 * are new descriptors to fetch. (Only applicable for weak-ordered in igb_tx_map()
6193 * memory model archs, such as IA-64). in igb_tx_map()
6201 first->next_to_watch = tx_desc; in igb_tx_map()
6204 if (i == tx_ring->count) in igb_tx_map()
6207 tx_ring->next_to_use = i; in igb_tx_map()
6213 writel(i, tx_ring->tail); in igb_tx_map()
6218 dev_err(tx_ring->dev, "TX DMA map failed\n"); in igb_tx_map()
6219 tx_buffer = &tx_ring->tx_buffer_info[i]; in igb_tx_map()
6224 dma_unmap_page(tx_ring->dev, in igb_tx_map()
6230 if (i-- == 0) in igb_tx_map()
6231 i += tx_ring->count; in igb_tx_map()
6232 tx_buffer = &tx_ring->tx_buffer_info[i]; in igb_tx_map()
6236 dma_unmap_single(tx_ring->dev, in igb_tx_map()
6242 dev_kfree_skb_any(tx_buffer->skb); in igb_tx_map()
6243 tx_buffer->skb = NULL; in igb_tx_map()
6245 tx_ring->next_to_use = i; in igb_tx_map()
6247 return -1; in igb_tx_map()
6260 len = xdpf->len; in igb_xmit_xdp_ring()
6265 dma = dma_map_single(tx_ring->dev, xdpf->data, len, DMA_TO_DEVICE); in igb_xmit_xdp_ring()
6266 if (dma_mapping_error(tx_ring->dev, dma)) in igb_xmit_xdp_ring()
6270 tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; in igb_xmit_xdp_ring()
6271 tx_buffer->bytecount = len; in igb_xmit_xdp_ring()
6272 tx_buffer->gso_segs = 1; in igb_xmit_xdp_ring()
6273 tx_buffer->protocol = 0; in igb_xmit_xdp_ring()
6275 i = tx_ring->next_to_use; in igb_xmit_xdp_ring()
6280 tx_buffer->type = IGB_TYPE_XDP; in igb_xmit_xdp_ring()
6281 tx_buffer->xdpf = xdpf; in igb_xmit_xdp_ring()
6283 tx_desc->read.buffer_addr = cpu_to_le64(dma); in igb_xmit_xdp_ring()
6290 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); in igb_xmit_xdp_ring()
6294 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) in igb_xmit_xdp_ring()
6295 olinfo_status |= tx_ring->reg_idx << 4; in igb_xmit_xdp_ring()
6297 tx_desc->read.olinfo_status = olinfo_status; in igb_xmit_xdp_ring()
6299 netdev_tx_sent_queue(txring_txq(tx_ring), tx_buffer->bytecount); in igb_xmit_xdp_ring()
6302 tx_buffer->time_stamp = jiffies; in igb_xmit_xdp_ring()
6309 if (i == tx_ring->count) in igb_xmit_xdp_ring()
6312 tx_buffer->next_to_watch = tx_desc; in igb_xmit_xdp_ring()
6313 tx_ring->next_to_use = i; in igb_xmit_xdp_ring()
6319 writel(i, tx_ring->tail); in igb_xmit_xdp_ring()
6341 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) in igb_xmit_frame_ring()
6343 &skb_shinfo(skb)->frags[f])); in igb_xmit_frame_ring()
6351 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; in igb_xmit_frame_ring()
6352 first->type = IGB_TYPE_SKB; in igb_xmit_frame_ring()
6353 first->skb = skb; in igb_xmit_frame_ring()
6354 first->bytecount = skb->len; in igb_xmit_frame_ring()
6355 first->gso_segs = 1; in igb_xmit_frame_ring()
6357 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { in igb_xmit_frame_ring()
6358 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev); in igb_xmit_frame_ring()
6360 if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON && in igb_xmit_frame_ring()
6362 &adapter->state)) { in igb_xmit_frame_ring()
6363 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in igb_xmit_frame_ring()
6366 adapter->ptp_tx_skb = skb_get(skb); in igb_xmit_frame_ring()
6367 adapter->ptp_tx_start = jiffies; in igb_xmit_frame_ring()
6368 if (adapter->hw.mac.type == e1000_82576) in igb_xmit_frame_ring()
6369 schedule_work(&adapter->ptp_tx_work); in igb_xmit_frame_ring()
6371 adapter->tx_hwtstamp_skipped++; in igb_xmit_frame_ring()
6381 first->tx_flags = tx_flags; in igb_xmit_frame_ring()
6382 first->protocol = protocol; in igb_xmit_frame_ring()
6396 dev_kfree_skb_any(first->skb); in igb_xmit_frame_ring()
6397 first->skb = NULL; in igb_xmit_frame_ring()
6400 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev); in igb_xmit_frame_ring()
6402 dev_kfree_skb_any(adapter->ptp_tx_skb); in igb_xmit_frame_ring()
6403 adapter->ptp_tx_skb = NULL; in igb_xmit_frame_ring()
6404 if (adapter->hw.mac.type == e1000_82576) in igb_xmit_frame_ring()
6405 cancel_work_sync(&adapter->ptp_tx_work); in igb_xmit_frame_ring()
6406 clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state); in igb_xmit_frame_ring()
6415 unsigned int r_idx = skb->queue_mapping; in igb_tx_queue_mapping()
6417 if (r_idx >= adapter->num_tx_queues) in igb_tx_queue_mapping()
6418 r_idx = r_idx % adapter->num_tx_queues; in igb_tx_queue_mapping()
6420 return adapter->tx_ring[r_idx]; in igb_tx_queue_mapping()
6438 * igb_tx_timeout - Respond to a Tx Hang
6440 * @txqueue: number of the Tx queue that hung (unused)
6445 struct e1000_hw *hw = &adapter->hw; in igb_tx_timeout()
6448 adapter->tx_timeout_count++; in igb_tx_timeout()
6450 if (hw->mac.type >= e1000_82580) in igb_tx_timeout()
6451 hw->dev_spec._82575.global_device_reset = true; in igb_tx_timeout()
6453 schedule_work(&adapter->reset_task); in igb_tx_timeout()
6455 (adapter->eims_enable_mask & ~adapter->eims_other)); in igb_tx_timeout()
6465 if (test_bit(__IGB_DOWN, &adapter->state) || in igb_reset_task()
6466 test_bit(__IGB_RESETTING, &adapter->state)) { in igb_reset_task()
6472 netdev_err(adapter->netdev, "Reset adapter\n"); in igb_reset_task()
6478 * igb_get_stats64 - Get System Network Statistics
6487 spin_lock(&adapter->stats64_lock); in igb_get_stats64()
6489 memcpy(stats, &adapter->stats64, sizeof(*stats)); in igb_get_stats64()
6490 spin_unlock(&adapter->stats64_lock); in igb_get_stats64()
6494 * igb_change_mtu - Change the Maximum Transfer Unit
6505 if (adapter->xdp_prog) { in igb_change_mtu()
6508 for (i = 0; i < adapter->num_rx_queues; i++) { in igb_change_mtu()
6509 struct igb_ring *ring = adapter->rx_ring[i]; in igb_change_mtu()
6512 netdev_warn(adapter->netdev, in igb_change_mtu()
6515 return -EINVAL; in igb_change_mtu()
6524 while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) in igb_change_mtu()
6528 adapter->max_frame_size = max_frame; in igb_change_mtu()
6534 netdev->mtu, new_mtu); in igb_change_mtu()
6535 netdev->mtu = new_mtu; in igb_change_mtu()
6542 clear_bit(__IGB_RESETTING, &adapter->state); in igb_change_mtu()
6548 * igb_update_stats - Update the board statistics counters
6553 struct rtnl_link_stats64 *net_stats = &adapter->stats64; in igb_update_stats()
6554 struct e1000_hw *hw = &adapter->hw; in igb_update_stats()
6555 struct pci_dev *pdev = adapter->pdev; in igb_update_stats()
6565 if (adapter->link_speed == 0) in igb_update_stats()
6574 for (i = 0; i < adapter->num_rx_queues; i++) { in igb_update_stats()
6575 struct igb_ring *ring = adapter->rx_ring[i]; in igb_update_stats()
6577 if (hw->mac.type >= e1000_i210) in igb_update_stats()
6581 ring->rx_stats.drops += rqdpc; in igb_update_stats()
6582 net_stats->rx_fifo_errors += rqdpc; in igb_update_stats()
6586 start = u64_stats_fetch_begin_irq(&ring->rx_syncp); in igb_update_stats()
6587 _bytes = ring->rx_stats.bytes; in igb_update_stats()
6588 _packets = ring->rx_stats.packets; in igb_update_stats()
6589 } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); in igb_update_stats()
6594 net_stats->rx_bytes = bytes; in igb_update_stats()
6595 net_stats->rx_packets = packets; in igb_update_stats()
6599 for (i = 0; i < adapter->num_tx_queues; i++) { in igb_update_stats()
6600 struct igb_ring *ring = adapter->tx_ring[i]; in igb_update_stats()
6602 start = u64_stats_fetch_begin_irq(&ring->tx_syncp); in igb_update_stats()
6603 _bytes = ring->tx_stats.bytes; in igb_update_stats()
6604 _packets = ring->tx_stats.packets; in igb_update_stats()
6605 } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start)); in igb_update_stats()
6609 net_stats->tx_bytes = bytes; in igb_update_stats()
6610 net_stats->tx_packets = packets; in igb_update_stats()
6614 adapter->stats.crcerrs += rd32(E1000_CRCERRS); in igb_update_stats()
6615 adapter->stats.gprc += rd32(E1000_GPRC); in igb_update_stats()
6616 adapter->stats.gorc += rd32(E1000_GORCL); in igb_update_stats()
6618 adapter->stats.bprc += rd32(E1000_BPRC); in igb_update_stats()
6619 adapter->stats.mprc += rd32(E1000_MPRC); in igb_update_stats()
6620 adapter->stats.roc += rd32(E1000_ROC); in igb_update_stats()
6622 adapter->stats.prc64 += rd32(E1000_PRC64); in igb_update_stats()
6623 adapter->stats.prc127 += rd32(E1000_PRC127); in igb_update_stats()
6624 adapter->stats.prc255 += rd32(E1000_PRC255); in igb_update_stats()
6625 adapter->stats.prc511 += rd32(E1000_PRC511); in igb_update_stats()
6626 adapter->stats.prc1023 += rd32(E1000_PRC1023); in igb_update_stats()
6627 adapter->stats.prc1522 += rd32(E1000_PRC1522); in igb_update_stats()
6628 adapter->stats.symerrs += rd32(E1000_SYMERRS); in igb_update_stats()
6629 adapter->stats.sec += rd32(E1000_SEC); in igb_update_stats()
6632 adapter->stats.mpc += mpc; in igb_update_stats()
6633 net_stats->rx_fifo_errors += mpc; in igb_update_stats()
6634 adapter->stats.scc += rd32(E1000_SCC); in igb_update_stats()
6635 adapter->stats.ecol += rd32(E1000_ECOL); in igb_update_stats()
6636 adapter->stats.mcc += rd32(E1000_MCC); in igb_update_stats()
6637 adapter->stats.latecol += rd32(E1000_LATECOL); in igb_update_stats()
6638 adapter->stats.dc += rd32(E1000_DC); in igb_update_stats()
6639 adapter->stats.rlec += rd32(E1000_RLEC); in igb_update_stats()
6640 adapter->stats.xonrxc += rd32(E1000_XONRXC); in igb_update_stats()
6641 adapter->stats.xontxc += rd32(E1000_XONTXC); in igb_update_stats()
6642 adapter->stats.xoffrxc += rd32(E1000_XOFFRXC); in igb_update_stats()
6643 adapter->stats.xofftxc += rd32(E1000_XOFFTXC); in igb_update_stats()
6644 adapter->stats.fcruc += rd32(E1000_FCRUC); in igb_update_stats()
6645 adapter->stats.gptc += rd32(E1000_GPTC); in igb_update_stats()
6646 adapter->stats.gotc += rd32(E1000_GOTCL); in igb_update_stats()
6648 adapter->stats.rnbc += rd32(E1000_RNBC); in igb_update_stats()
6649 adapter->stats.ruc += rd32(E1000_RUC); in igb_update_stats()
6650 adapter->stats.rfc += rd32(E1000_RFC); in igb_update_stats()
6651 adapter->stats.rjc += rd32(E1000_RJC); in igb_update_stats()
6652 adapter->stats.tor += rd32(E1000_TORH); in igb_update_stats()
6653 adapter->stats.tot += rd32(E1000_TOTH); in igb_update_stats()
6654 adapter->stats.tpr += rd32(E1000_TPR); in igb_update_stats()
6656 adapter->stats.ptc64 += rd32(E1000_PTC64); in igb_update_stats()
6657 adapter->stats.ptc127 += rd32(E1000_PTC127); in igb_update_stats()
6658 adapter->stats.ptc255 += rd32(E1000_PTC255); in igb_update_stats()
6659 adapter->stats.ptc511 += rd32(E1000_PTC511); in igb_update_stats()
6660 adapter->stats.ptc1023 += rd32(E1000_PTC1023); in igb_update_stats()
6661 adapter->stats.ptc1522 += rd32(E1000_PTC1522); in igb_update_stats()
6663 adapter->stats.mptc += rd32(E1000_MPTC); in igb_update_stats()
6664 adapter->stats.bptc += rd32(E1000_BPTC); in igb_update_stats()
6666 adapter->stats.tpt += rd32(E1000_TPT); in igb_update_stats()
6667 adapter->stats.colc += rd32(E1000_COLC); in igb_update_stats()
6669 adapter->stats.algnerrc += rd32(E1000_ALGNERRC); in igb_update_stats()
6673 adapter->stats.rxerrc += rd32(E1000_RXERRC); in igb_update_stats()
6676 if ((hw->mac.type != e1000_i210) && in igb_update_stats()
6677 (hw->mac.type != e1000_i211)) in igb_update_stats()
6678 adapter->stats.tncrs += rd32(E1000_TNCRS); in igb_update_stats()
6681 adapter->stats.tsctc += rd32(E1000_TSCTC); in igb_update_stats()
6682 adapter->stats.tsctfc += rd32(E1000_TSCTFC); in igb_update_stats()
6684 adapter->stats.iac += rd32(E1000_IAC); in igb_update_stats()
6685 adapter->stats.icrxoc += rd32(E1000_ICRXOC); in igb_update_stats()
6686 adapter->stats.icrxptc += rd32(E1000_ICRXPTC); in igb_update_stats()
6687 adapter->stats.icrxatc += rd32(E1000_ICRXATC); in igb_update_stats()
6688 adapter->stats.ictxptc += rd32(E1000_ICTXPTC); in igb_update_stats()
6689 adapter->stats.ictxatc += rd32(E1000_ICTXATC); in igb_update_stats()
6690 adapter->stats.ictxqec += rd32(E1000_ICTXQEC); in igb_update_stats()
6691 adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC); in igb_update_stats()
6692 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC); in igb_update_stats()
6695 net_stats->multicast = adapter->stats.mprc; in igb_update_stats()
6696 net_stats->collisions = adapter->stats.colc; in igb_update_stats()
6703 net_stats->rx_errors = adapter->stats.rxerrc + in igb_update_stats()
6704 adapter->stats.crcerrs + adapter->stats.algnerrc + in igb_update_stats()
6705 adapter->stats.ruc + adapter->stats.roc + in igb_update_stats()
6706 adapter->stats.cexterr; in igb_update_stats()
6707 net_stats->rx_length_errors = adapter->stats.ruc + in igb_update_stats()
6708 adapter->stats.roc; in igb_update_stats()
6709 net_stats->rx_crc_errors = adapter->stats.crcerrs; in igb_update_stats()
6710 net_stats->rx_frame_errors = adapter->stats.algnerrc; in igb_update_stats()
6711 net_stats->rx_missed_errors = adapter->stats.mpc; in igb_update_stats()
6713 /* Tx Errors */ in igb_update_stats()
6714 net_stats->tx_errors = adapter->stats.ecol + in igb_update_stats()
6715 adapter->stats.latecol; in igb_update_stats()
6716 net_stats->tx_aborted_errors = adapter->stats.ecol; in igb_update_stats()
6717 net_stats->tx_window_errors = adapter->stats.latecol; in igb_update_stats()
6718 net_stats->tx_carrier_errors = adapter->stats.tncrs; in igb_update_stats()
6720 /* Tx Dropped needs to be maintained elsewhere */ in igb_update_stats()
6723 adapter->stats.mgptc += rd32(E1000_MGTPTC); in igb_update_stats()
6724 adapter->stats.mgprc += rd32(E1000_MGTPRC); in igb_update_stats()
6725 adapter->stats.mgpdc += rd32(E1000_MGTPDC); in igb_update_stats()
6730 adapter->stats.o2bgptc += rd32(E1000_O2BGPTC); in igb_update_stats()
6731 adapter->stats.o2bspc += rd32(E1000_O2BSPC); in igb_update_stats()
6732 adapter->stats.b2ospc += rd32(E1000_B2OSPC); in igb_update_stats()
6733 adapter->stats.b2ogprc += rd32(E1000_B2OGPRC); in igb_update_stats()
6739 struct e1000_hw *hw = &adapter->hw; in igb_tsync_interrupt()
6746 if (adapter->ptp_caps.pps) in igb_tsync_interrupt()
6747 ptp_clock_event(adapter->ptp_clock, &event); in igb_tsync_interrupt()
6753 schedule_work(&adapter->ptp_tx_work); in igb_tsync_interrupt()
6758 spin_lock(&adapter->tmreg_lock); in igb_tsync_interrupt()
6759 ts = timespec64_add(adapter->perout[0].start, in igb_tsync_interrupt()
6760 adapter->perout[0].period); in igb_tsync_interrupt()
6767 adapter->perout[0].start = ts; in igb_tsync_interrupt()
6768 spin_unlock(&adapter->tmreg_lock); in igb_tsync_interrupt()
6773 spin_lock(&adapter->tmreg_lock); in igb_tsync_interrupt()
6774 ts = timespec64_add(adapter->perout[1].start, in igb_tsync_interrupt()
6775 adapter->perout[1].period); in igb_tsync_interrupt()
6781 adapter->perout[1].start = ts; in igb_tsync_interrupt()
6782 spin_unlock(&adapter->tmreg_lock); in igb_tsync_interrupt()
6792 ptp_clock_event(adapter->ptp_clock, &event); in igb_tsync_interrupt()
6802 ptp_clock_event(adapter->ptp_clock, &event); in igb_tsync_interrupt()
6813 struct e1000_hw *hw = &adapter->hw; in igb_msix_other()
6818 schedule_work(&adapter->reset_task); in igb_msix_other()
6822 adapter->stats.doosync++; in igb_msix_other()
6835 hw->mac.get_link_status = 1; in igb_msix_other()
6837 if (!test_bit(__IGB_DOWN, &adapter->state)) in igb_msix_other()
6838 mod_timer(&adapter->watchdog_timer, jiffies + 1); in igb_msix_other()
6844 wr32(E1000_EIMS, adapter->eims_other); in igb_msix_other()
6851 struct igb_adapter *adapter = q_vector->adapter; in igb_write_itr()
6852 u32 itr_val = q_vector->itr_val & 0x7FFC; in igb_write_itr()
6854 if (!q_vector->set_itr) in igb_write_itr()
6860 if (adapter->hw.mac.type == e1000_82575) in igb_write_itr()
6865 writel(itr_val, q_vector->itr_register); in igb_write_itr()
6866 q_vector->set_itr = 0; in igb_write_itr()
6876 napi_schedule(&q_vector->napi); in igb_msix_ring()
6886 struct e1000_hw *hw = &adapter->hw; in igb_update_tx_dca()
6887 u32 txctrl = dca3_get_tag(tx_ring->dev, cpu); in igb_update_tx_dca()
6889 if (hw->mac.type != e1000_82575) in igb_update_tx_dca()
6900 wr32(E1000_DCA_TXCTRL(tx_ring->reg_idx), txctrl); in igb_update_tx_dca()
6907 struct e1000_hw *hw = &adapter->hw; in igb_update_rx_dca()
6908 u32 rxctrl = dca3_get_tag(&adapter->pdev->dev, cpu); in igb_update_rx_dca()
6910 if (hw->mac.type != e1000_82575) in igb_update_rx_dca()
6920 wr32(E1000_DCA_RXCTRL(rx_ring->reg_idx), rxctrl); in igb_update_rx_dca()
6925 struct igb_adapter *adapter = q_vector->adapter; in igb_update_dca()
6928 if (q_vector->cpu == cpu) in igb_update_dca()
6931 if (q_vector->tx.ring) in igb_update_dca()
6932 igb_update_tx_dca(adapter, q_vector->tx.ring, cpu); in igb_update_dca()
6934 if (q_vector->rx.ring) in igb_update_dca()
6935 igb_update_rx_dca(adapter, q_vector->rx.ring, cpu); in igb_update_dca()
6937 q_vector->cpu = cpu; in igb_update_dca()
6944 struct e1000_hw *hw = &adapter->hw; in igb_setup_dca()
6947 if (!(adapter->flags & IGB_FLAG_DCA_ENABLED)) in igb_setup_dca()
6953 for (i = 0; i < adapter->num_q_vectors; i++) { in igb_setup_dca()
6954 adapter->q_vector[i]->cpu = -1; in igb_setup_dca()
6955 igb_update_dca(adapter->q_vector[i]); in igb_setup_dca()
6963 struct pci_dev *pdev = adapter->pdev; in __igb_notify_dca()
6964 struct e1000_hw *hw = &adapter->hw; in __igb_notify_dca()
6970 if (adapter->flags & IGB_FLAG_DCA_ENABLED) in __igb_notify_dca()
6973 adapter->flags |= IGB_FLAG_DCA_ENABLED; in __igb_notify_dca()
6974 dev_info(&pdev->dev, "DCA enabled\n"); in __igb_notify_dca()
6980 if (adapter->flags & IGB_FLAG_DCA_ENABLED) { in __igb_notify_dca()
6985 dev_info(&pdev->dev, "DCA disabled\n"); in __igb_notify_dca()
6986 adapter->flags &= ~IGB_FLAG_DCA_ENABLED; in __igb_notify_dca()
7016 adapter->vf_data[vf].spoofchk_enabled = true; in igb_vf_configure()
7019 adapter->vf_data[vf].trusted = false; in igb_vf_configure()
7027 struct e1000_hw *hw = &adapter->hw; in igb_ping_all_vfs()
7031 for (i = 0 ; i < adapter->vfs_allocated_count; i++) { in igb_ping_all_vfs()
7033 if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS) in igb_ping_all_vfs()
7041 struct e1000_hw *hw = &adapter->hw; in igb_set_vf_promisc()
7043 struct vf_data_storage *vf_data = &adapter->vf_data[vf]; in igb_set_vf_promisc()
7045 vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC | in igb_set_vf_promisc()
7051 vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC; in igb_set_vf_promisc()
7058 if (vf_data->num_vf_mc_hashes > 30) { in igb_set_vf_promisc()
7060 } else if (vf_data->num_vf_mc_hashes) { in igb_set_vf_promisc()
7064 for (j = 0; j < vf_data->num_vf_mc_hashes; j++) in igb_set_vf_promisc()
7065 igb_mta_set(hw, vf_data->vf_mc_hashes[j]); in igb_set_vf_promisc()
7073 return -EINVAL; in igb_set_vf_promisc()
7083 struct vf_data_storage *vf_data = &adapter->vf_data[vf]; in igb_set_vf_multicasts()
7090 vf_data->num_vf_mc_hashes = n; in igb_set_vf_multicasts()
7098 vf_data->vf_mc_hashes[i] = hash_list[i]; in igb_set_vf_multicasts()
7101 igb_set_rx_mode(adapter->netdev); in igb_set_vf_multicasts()
7108 struct e1000_hw *hw = &adapter->hw; in igb_restore_vf_multicasts()
7112 for (i = 0; i < adapter->vfs_allocated_count; i++) { in igb_restore_vf_multicasts()
7117 vf_data = &adapter->vf_data[i]; in igb_restore_vf_multicasts()
7119 if ((vf_data->num_vf_mc_hashes > 30) || in igb_restore_vf_multicasts()
7120 (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) { in igb_restore_vf_multicasts()
7122 } else if (vf_data->num_vf_mc_hashes) { in igb_restore_vf_multicasts()
7124 for (j = 0; j < vf_data->num_vf_mc_hashes; j++) in igb_restore_vf_multicasts()
7125 igb_mta_set(hw, vf_data->vf_mc_hashes[j]); in igb_restore_vf_multicasts()
7133 struct e1000_hw *hw = &adapter->hw; in igb_clear_vf_vfta()
7142 adapter->vfs_allocated_count); in igb_clear_vf_vfta()
7145 for (i = E1000_VLVF_ARRAY_SIZE; i--;) { in igb_clear_vf_vfta()
7168 vfta = adapter->shadow_vfta[vid / 32]; in igb_clear_vf_vfta()
7170 hw->mac.ops.write_vfta(hw, vid / 32, vfta ^ vfta_mask); in igb_clear_vf_vfta()
7173 if (adapter->flags & IGB_FLAG_VLAN_PROMISC) in igb_clear_vf_vfta()
7193 for (idx = E1000_VLVF_ARRAY_SIZE; --idx;) { in igb_find_vlvf_entry()
7204 struct e1000_hw *hw = &adapter->hw; in igb_update_pf_vlvf()
7215 pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT; in igb_update_pf_vlvf()
7221 if (adapter->flags & IGB_FLAG_VLAN_PROMISC) in igb_update_pf_vlvf()
7231 int pf_id = adapter->vfs_allocated_count; in igb_set_vf_vlan()
7232 struct e1000_hw *hw = &adapter->hw; in igb_set_vf_vlan()
7240 if (add && test_bit(vid, adapter->active_vlans)) { in igb_set_vf_vlan()
7255 if (test_bit(vid, adapter->active_vlans) || in igb_set_vf_vlan()
7256 (adapter->flags & IGB_FLAG_VLAN_PROMISC)) in igb_set_vf_vlan()
7264 struct e1000_hw *hw = &adapter->hw; in igb_set_vmvir()
7285 if (vlan != adapter->vf_data[vf].pf_vlan) in igb_enable_port_vlan()
7286 igb_set_vf_vlan(adapter, adapter->vf_data[vf].pf_vlan, in igb_enable_port_vlan()
7289 adapter->vf_data[vf].pf_vlan = vlan; in igb_enable_port_vlan()
7290 adapter->vf_data[vf].pf_qos = qos; in igb_enable_port_vlan()
7292 dev_info(&adapter->pdev->dev, in igb_enable_port_vlan()
7294 if (test_bit(__IGB_DOWN, &adapter->state)) { in igb_enable_port_vlan()
7295 dev_warn(&adapter->pdev->dev, in igb_enable_port_vlan()
7297 dev_warn(&adapter->pdev->dev, in igb_enable_port_vlan()
7313 if (adapter->vf_data[vf].pf_vlan) in igb_disable_port_vlan()
7314 igb_set_vf_vlan(adapter, adapter->vf_data[vf].pf_vlan, in igb_disable_port_vlan()
7317 adapter->vf_data[vf].pf_vlan = 0; in igb_disable_port_vlan()
7318 adapter->vf_data[vf].pf_qos = 0; in igb_disable_port_vlan()
7329 if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7)) in igb_ndo_set_vf_vlan()
7330 return -EINVAL; in igb_ndo_set_vf_vlan()
7333 return -EPROTONOSUPPORT; in igb_ndo_set_vf_vlan()
7345 if (adapter->vf_data[vf].pf_vlan) in igb_set_vf_vlan_msg()
7346 return -1; in igb_set_vf_vlan_msg()
7360 struct vf_data_storage *vf_data = &adapter->vf_data[vf]; in igb_vf_reset()
7362 /* clear flags - except flag that indicates PF has set the MAC */ in igb_vf_reset()
7363 vf_data->flags &= IGB_VF_FLAG_PF_SET_MAC; in igb_vf_reset()
7364 vf_data->last_nack = jiffies; in igb_vf_reset()
7368 igb_set_vf_vlan(adapter, vf_data->pf_vlan, true, vf); in igb_vf_reset()
7369 igb_set_vmvir(adapter, vf_data->pf_vlan | in igb_vf_reset()
7370 (vf_data->pf_qos << VLAN_PRIO_SHIFT), vf); in igb_vf_reset()
7371 igb_set_vmolr(adapter, vf, !vf_data->pf_vlan); in igb_vf_reset()
7372 igb_set_vf_vlan_strip(adapter, vf, !!(vf_data->pf_vlan)); in igb_vf_reset()
7375 adapter->vf_data[vf].num_vf_mc_hashes = 0; in igb_vf_reset()
7378 igb_set_rx_mode(adapter->netdev); in igb_vf_reset()
7383 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses; in igb_vf_reset_event()
7386 if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC)) in igb_vf_reset_event()
7395 struct e1000_hw *hw = &adapter->hw; in igb_vf_reset_msg()
7396 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses; in igb_vf_reset_msg()
7412 adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS; in igb_vf_reset_msg()
7426 struct e1000_hw *hw = &adapter->hw; in igb_flush_mac_table()
7429 for (i = 0; i < hw->mac.rar_entry_count; i++) { in igb_flush_mac_table()
7430 adapter->mac_table[i].state &= ~IGB_MAC_STATE_IN_USE; in igb_flush_mac_table()
7431 eth_zero_addr(adapter->mac_table[i].addr); in igb_flush_mac_table()
7432 adapter->mac_table[i].queue = 0; in igb_flush_mac_table()
7439 struct e1000_hw *hw = &adapter->hw; in igb_available_rars()
7441 int rar_entries = hw->mac.rar_entry_count - in igb_available_rars()
7442 adapter->vfs_allocated_count; in igb_available_rars()
7447 if (adapter->mac_table[i].state & IGB_MAC_STATE_DEFAULT) in igb_available_rars()
7451 if ((adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE) && in igb_available_rars()
7452 (adapter->mac_table[i].queue != queue)) in igb_available_rars()
7464 struct igb_mac_addr *mac_table = &adapter->mac_table[0]; in igb_set_default_mac_filter()
7466 ether_addr_copy(mac_table->addr, adapter->hw.mac.addr); in igb_set_default_mac_filter()
7467 mac_table->queue = adapter->vfs_allocated_count; in igb_set_default_mac_filter()
7468 mac_table->state = IGB_MAC_STATE_DEFAULT | IGB_MAC_STATE_IN_USE; in igb_set_default_mac_filter()
7481 if (!(entry->state & IGB_MAC_STATE_IN_USE)) in igb_mac_entry_can_be_used()
7484 if ((entry->state & IGB_MAC_STATE_SRC_ADDR) != in igb_mac_entry_can_be_used()
7488 if (!ether_addr_equal(addr, entry->addr)) in igb_mac_entry_can_be_used()
7503 struct e1000_hw *hw = &adapter->hw; in igb_add_mac_filter_flags()
7504 int rar_entries = hw->mac.rar_entry_count - in igb_add_mac_filter_flags()
7505 adapter->vfs_allocated_count; in igb_add_mac_filter_flags()
7509 return -EINVAL; in igb_add_mac_filter_flags()
7516 if (!igb_mac_entry_can_be_used(&adapter->mac_table[i], in igb_add_mac_filter_flags()
7520 ether_addr_copy(adapter->mac_table[i].addr, addr); in igb_add_mac_filter_flags()
7521 adapter->mac_table[i].queue = queue; in igb_add_mac_filter_flags()
7522 adapter->mac_table[i].state |= IGB_MAC_STATE_IN_USE | flags; in igb_add_mac_filter_flags()
7528 return -ENOSPC; in igb_add_mac_filter_flags()
7547 struct e1000_hw *hw = &adapter->hw; in igb_del_mac_filter_flags()
7548 int rar_entries = hw->mac.rar_entry_count - in igb_del_mac_filter_flags()
7549 adapter->vfs_allocated_count; in igb_del_mac_filter_flags()
7553 return -EINVAL; in igb_del_mac_filter_flags()
7560 if (!(adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE)) in igb_del_mac_filter_flags()
7562 if ((adapter->mac_table[i].state & flags) != flags) in igb_del_mac_filter_flags()
7564 if (adapter->mac_table[i].queue != queue) in igb_del_mac_filter_flags()
7566 if (!ether_addr_equal(adapter->mac_table[i].addr, addr)) in igb_del_mac_filter_flags()
7572 if (adapter->mac_table[i].state & IGB_MAC_STATE_DEFAULT) { in igb_del_mac_filter_flags()
7573 adapter->mac_table[i].state = in igb_del_mac_filter_flags()
7575 adapter->mac_table[i].queue = in igb_del_mac_filter_flags()
7576 adapter->vfs_allocated_count; in igb_del_mac_filter_flags()
7578 adapter->mac_table[i].state = 0; in igb_del_mac_filter_flags()
7579 adapter->mac_table[i].queue = 0; in igb_del_mac_filter_flags()
7580 eth_zero_addr(adapter->mac_table[i].addr); in igb_del_mac_filter_flags()
7587 return -ENOENT; in igb_del_mac_filter_flags()
7599 struct e1000_hw *hw = &adapter->hw; in igb_add_mac_steering_filter()
7604 if (hw->mac.type != e1000_i210) in igb_add_mac_steering_filter()
7605 return -EOPNOTSUPP; in igb_add_mac_steering_filter()
7623 ret = igb_add_mac_filter(adapter, addr, adapter->vfs_allocated_count); in igb_uc_sync()
7632 igb_del_mac_filter(adapter, addr, adapter->vfs_allocated_count); in igb_uc_unsync()
7640 struct pci_dev *pdev = adapter->pdev; in igb_set_vf_mac_filter()
7641 struct vf_data_storage *vf_data = &adapter->vf_data[vf]; in igb_set_vf_mac_filter()
7649 list_for_each(pos, &adapter->vf_macs.l) { in igb_set_vf_mac_filter()
7651 if (entry->vf == vf) { in igb_set_vf_mac_filter()
7652 entry->vf = -1; in igb_set_vf_mac_filter()
7653 entry->free = true; in igb_set_vf_mac_filter()
7654 igb_del_mac_filter(adapter, entry->vf_mac, vf); in igb_set_vf_mac_filter()
7659 if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) && in igb_set_vf_mac_filter()
7660 !vf_data->trusted) { in igb_set_vf_mac_filter()
7661 dev_warn(&pdev->dev, in igb_set_vf_mac_filter()
7664 return -EINVAL; in igb_set_vf_mac_filter()
7667 dev_warn(&pdev->dev, in igb_set_vf_mac_filter()
7670 return -EINVAL; in igb_set_vf_mac_filter()
7674 list_for_each(pos, &adapter->vf_macs.l) { in igb_set_vf_mac_filter()
7676 if (entry->free) in igb_set_vf_mac_filter()
7680 if (entry && entry->free) { in igb_set_vf_mac_filter()
7681 entry->free = false; in igb_set_vf_mac_filter()
7682 entry->vf = vf; in igb_set_vf_mac_filter()
7683 ether_addr_copy(entry->vf_mac, addr); in igb_set_vf_mac_filter()
7688 ret = -ENOSPC; in igb_set_vf_mac_filter()
7691 if (ret == -ENOSPC) in igb_set_vf_mac_filter()
7692 dev_warn(&pdev->dev, in igb_set_vf_mac_filter()
7697 ret = -EINVAL; in igb_set_vf_mac_filter()
7706 struct pci_dev *pdev = adapter->pdev; in igb_set_vf_mac_addr()
7707 struct vf_data_storage *vf_data = &adapter->vf_data[vf]; in igb_set_vf_mac_addr()
7717 if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) && in igb_set_vf_mac_addr()
7718 !vf_data->trusted) { in igb_set_vf_mac_addr()
7719 dev_warn(&pdev->dev, in igb_set_vf_mac_addr()
7722 return -EINVAL; in igb_set_vf_mac_addr()
7726 dev_warn(&pdev->dev, in igb_set_vf_mac_addr()
7729 return -EINVAL; in igb_set_vf_mac_addr()
7742 struct e1000_hw *hw = &adapter->hw; in igb_rcv_ack_from_vf()
7743 struct vf_data_storage *vf_data = &adapter->vf_data[vf]; in igb_rcv_ack_from_vf()
7747 if (!(vf_data->flags & IGB_VF_FLAG_CTS) && in igb_rcv_ack_from_vf()
7748 time_after(jiffies, vf_data->last_nack + (2 * HZ))) { in igb_rcv_ack_from_vf()
7750 vf_data->last_nack = jiffies; in igb_rcv_ack_from_vf()
7756 struct pci_dev *pdev = adapter->pdev; in igb_rcv_msg_from_vf()
7758 struct e1000_hw *hw = &adapter->hw; in igb_rcv_msg_from_vf()
7759 struct vf_data_storage *vf_data = &adapter->vf_data[vf]; in igb_rcv_msg_from_vf()
7766 dev_err(&pdev->dev, "Error receiving message from VF\n"); in igb_rcv_msg_from_vf()
7767 vf_data->flags &= ~IGB_VF_FLAG_CTS; in igb_rcv_msg_from_vf()
7768 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ))) in igb_rcv_msg_from_vf()
7786 if (!(vf_data->flags & IGB_VF_FLAG_CTS)) { in igb_rcv_msg_from_vf()
7787 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ))) in igb_rcv_msg_from_vf()
7789 retval = -1; in igb_rcv_msg_from_vf()
7807 retval = -1; in igb_rcv_msg_from_vf()
7808 if (vf_data->pf_vlan) in igb_rcv_msg_from_vf()
7809 dev_warn(&pdev->dev, in igb_rcv_msg_from_vf()
7816 dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]); in igb_rcv_msg_from_vf()
7817 retval = -1; in igb_rcv_msg_from_vf()
7839 struct e1000_hw *hw = &adapter->hw; in igb_msg_task()
7842 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) { in igb_msg_task()
7858 * igb_set_uta - Set unicast filter table address
7862 * The unicast table address is a register array of 32-bit registers.
7870 struct e1000_hw *hw = &adapter->hw; in igb_set_uta()
7875 if (!adapter->vfs_allocated_count) in igb_set_uta()
7878 for (i = hw->mac.uta_reg_count; i--;) in igb_set_uta()
7883 * igb_intr_msi - Interrupt Handler
7890 struct igb_q_vector *q_vector = adapter->q_vector[0]; in igb_intr_msi()
7891 struct e1000_hw *hw = &adapter->hw; in igb_intr_msi()
7898 schedule_work(&adapter->reset_task); in igb_intr_msi()
7902 adapter->stats.doosync++; in igb_intr_msi()
7906 hw->mac.get_link_status = 1; in igb_intr_msi()
7907 if (!test_bit(__IGB_DOWN, &adapter->state)) in igb_intr_msi()
7908 mod_timer(&adapter->watchdog_timer, jiffies + 1); in igb_intr_msi()
7914 napi_schedule(&q_vector->napi); in igb_intr_msi()
7920 * igb_intr - Legacy Interrupt Handler
7927 struct igb_q_vector *q_vector = adapter->q_vector[0]; in igb_intr()
7928 struct e1000_hw *hw = &adapter->hw; in igb_intr()
7929 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No in igb_intr()
7934 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is in igb_intr()
7943 schedule_work(&adapter->reset_task); in igb_intr()
7947 adapter->stats.doosync++; in igb_intr()
7951 hw->mac.get_link_status = 1; in igb_intr()
7953 if (!test_bit(__IGB_DOWN, &adapter->state)) in igb_intr()
7954 mod_timer(&adapter->watchdog_timer, jiffies + 1); in igb_intr()
7960 napi_schedule(&q_vector->napi); in igb_intr()
7967 struct igb_adapter *adapter = q_vector->adapter; in igb_ring_irq_enable()
7968 struct e1000_hw *hw = &adapter->hw; in igb_ring_irq_enable()
7970 if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) || in igb_ring_irq_enable()
7971 (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) { in igb_ring_irq_enable()
7972 if ((adapter->num_q_vectors == 1) && !adapter->vf_data) in igb_ring_irq_enable()
7978 if (!test_bit(__IGB_DOWN, &adapter->state)) { in igb_ring_irq_enable()
7979 if (adapter->flags & IGB_FLAG_HAS_MSIX) in igb_ring_irq_enable()
7980 wr32(E1000_EIMS, q_vector->eims_value); in igb_ring_irq_enable()
7987 * igb_poll - NAPI Rx polling callback
8000 if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED) in igb_poll()
8003 if (q_vector->tx.ring) in igb_poll()
8006 if (q_vector->rx.ring) { in igb_poll()
8018 /* Exit the polling mode, but don't re-enable interrupts if stack might in igb_poll()
8019 * poll us due to busy-polling in igb_poll()
8024 return min(work_done, budget - 1); in igb_poll()
8028 * igb_clean_tx_irq - Reclaim resources after transmit completes
8036 struct igb_adapter *adapter = q_vector->adapter; in igb_clean_tx_irq()
8037 struct igb_ring *tx_ring = q_vector->tx.ring; in igb_clean_tx_irq()
8041 unsigned int budget = q_vector->tx.work_limit; in igb_clean_tx_irq()
8042 unsigned int i = tx_ring->next_to_clean; in igb_clean_tx_irq()
8044 if (test_bit(__IGB_DOWN, &adapter->state)) in igb_clean_tx_irq()
8047 tx_buffer = &tx_ring->tx_buffer_info[i]; in igb_clean_tx_irq()
8049 i -= tx_ring->count; in igb_clean_tx_irq()
8052 union e1000_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; in igb_clean_tx_irq()
8062 if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD))) in igb_clean_tx_irq()
8066 tx_buffer->next_to_watch = NULL; in igb_clean_tx_irq()
8069 total_bytes += tx_buffer->bytecount; in igb_clean_tx_irq()
8070 total_packets += tx_buffer->gso_segs; in igb_clean_tx_irq()
8073 if (tx_buffer->type == IGB_TYPE_SKB) in igb_clean_tx_irq()
8074 napi_consume_skb(tx_buffer->skb, napi_budget); in igb_clean_tx_irq()
8076 xdp_return_frame(tx_buffer->xdpf); in igb_clean_tx_irq()
8079 dma_unmap_single(tx_ring->dev, in igb_clean_tx_irq()
8093 i -= tx_ring->count; in igb_clean_tx_irq()
8094 tx_buffer = tx_ring->tx_buffer_info; in igb_clean_tx_irq()
8100 dma_unmap_page(tx_ring->dev, in igb_clean_tx_irq()
8113 i -= tx_ring->count; in igb_clean_tx_irq()
8114 tx_buffer = tx_ring->tx_buffer_info; in igb_clean_tx_irq()
8118 /* issue prefetch for next Tx descriptor */ in igb_clean_tx_irq()
8122 budget--; in igb_clean_tx_irq()
8127 i += tx_ring->count; in igb_clean_tx_irq()
8128 tx_ring->next_to_clean = i; in igb_clean_tx_irq()
8129 u64_stats_update_begin(&tx_ring->tx_syncp); in igb_clean_tx_irq()
8130 tx_ring->tx_stats.bytes += total_bytes; in igb_clean_tx_irq()
8131 tx_ring->tx_stats.packets += total_packets; in igb_clean_tx_irq()
8132 u64_stats_update_end(&tx_ring->tx_syncp); in igb_clean_tx_irq()
8133 q_vector->tx.total_bytes += total_bytes; in igb_clean_tx_irq()
8134 q_vector->tx.total_packets += total_packets; in igb_clean_tx_irq()
8136 if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) { in igb_clean_tx_irq()
8137 struct e1000_hw *hw = &adapter->hw; in igb_clean_tx_irq()
8142 clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); in igb_clean_tx_irq()
8143 if (tx_buffer->next_to_watch && in igb_clean_tx_irq()
8144 time_after(jiffies, tx_buffer->time_stamp + in igb_clean_tx_irq()
8145 (adapter->tx_timeout_factor * HZ)) && in igb_clean_tx_irq()
8148 /* detected Tx unit hang */ in igb_clean_tx_irq()
8149 dev_err(tx_ring->dev, in igb_clean_tx_irq()
8150 "Detected Tx Unit Hang\n" in igb_clean_tx_irq()
8151 " Tx Queue <%d>\n" in igb_clean_tx_irq()
8161 tx_ring->queue_index, in igb_clean_tx_irq()
8162 rd32(E1000_TDH(tx_ring->reg_idx)), in igb_clean_tx_irq()
8163 readl(tx_ring->tail), in igb_clean_tx_irq()
8164 tx_ring->next_to_use, in igb_clean_tx_irq()
8165 tx_ring->next_to_clean, in igb_clean_tx_irq()
8166 tx_buffer->time_stamp, in igb_clean_tx_irq()
8167 tx_buffer->next_to_watch, in igb_clean_tx_irq()
8169 tx_buffer->next_to_watch->wb.status); in igb_clean_tx_irq()
8170 netif_stop_subqueue(tx_ring->netdev, in igb_clean_tx_irq()
8171 tx_ring->queue_index); in igb_clean_tx_irq()
8180 netif_carrier_ok(tx_ring->netdev) && in igb_clean_tx_irq()
8186 if (__netif_subqueue_stopped(tx_ring->netdev, in igb_clean_tx_irq()
8187 tx_ring->queue_index) && in igb_clean_tx_irq()
8188 !(test_bit(__IGB_DOWN, &adapter->state))) { in igb_clean_tx_irq()
8189 netif_wake_subqueue(tx_ring->netdev, in igb_clean_tx_irq()
8190 tx_ring->queue_index); in igb_clean_tx_irq()
8192 u64_stats_update_begin(&tx_ring->tx_syncp); in igb_clean_tx_irq()
8193 tx_ring->tx_stats.restart_queue++; in igb_clean_tx_irq()
8194 u64_stats_update_end(&tx_ring->tx_syncp); in igb_clean_tx_irq()
8202 * igb_reuse_rx_page - page flip buffer and store it back on the ring
8212 u16 nta = rx_ring->next_to_alloc; in igb_reuse_rx_page()
8214 new_buff = &rx_ring->rx_buffer_info[nta]; in igb_reuse_rx_page()
8218 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; in igb_reuse_rx_page()
8224 new_buff->dma = old_buff->dma; in igb_reuse_rx_page()
8225 new_buff->page = old_buff->page; in igb_reuse_rx_page()
8226 new_buff->page_offset = old_buff->page_offset; in igb_reuse_rx_page()
8227 new_buff->pagecnt_bias = old_buff->pagecnt_bias; in igb_reuse_rx_page()
8237 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; in igb_can_reuse_rx_page()
8238 struct page *page = rx_buffer->page; in igb_can_reuse_rx_page()
8240 /* avoid re-using remote pages */ in igb_can_reuse_rx_page()
8246 if (unlikely((page_ref_count(page) - pagecnt_bias) > 1)) in igb_can_reuse_rx_page()
8250 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGB_RXBUFFER_2048) in igb_can_reuse_rx_page()
8252 if (rx_buffer->page_offset > IGB_LAST_OFFSET) in igb_can_reuse_rx_page()
8261 page_ref_add(page, USHRT_MAX - 1); in igb_can_reuse_rx_page()
8262 rx_buffer->pagecnt_bias = USHRT_MAX; in igb_can_reuse_rx_page()
8269 * igb_add_rx_frag - Add contents of Rx buffer to sk_buff
8275 * This function will add the data contained in rx_buffer->page to the skb.
8289 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, in igb_add_rx_frag()
8290 rx_buffer->page_offset, size, truesize); in igb_add_rx_frag()
8292 rx_buffer->page_offset ^= truesize; in igb_add_rx_frag()
8294 rx_buffer->page_offset += truesize; in igb_add_rx_frag()
8306 unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end - in igb_construct_skb()
8307 xdp->data_hard_start); in igb_construct_skb()
8309 unsigned int size = xdp->data_end - xdp->data; in igb_construct_skb()
8314 net_prefetch(xdp->data); in igb_construct_skb()
8317 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN); in igb_construct_skb()
8322 igb_ptp_rx_pktstamp(rx_ring->q_vector, xdp->data, skb); in igb_construct_skb()
8323 xdp->data += IGB_TS_HDR_LEN; in igb_construct_skb()
8324 size -= IGB_TS_HDR_LEN; in igb_construct_skb()
8330 headlen = eth_get_headlen(skb->dev, xdp->data, IGB_RX_HDR_LEN); in igb_construct_skb()
8333 memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen, sizeof(long))); in igb_construct_skb()
8336 size -= headlen; in igb_construct_skb()
8338 skb_add_rx_frag(skb, 0, rx_buffer->page, in igb_construct_skb()
8339 (xdp->data + headlen) - page_address(rx_buffer->page), in igb_construct_skb()
8342 rx_buffer->page_offset ^= truesize; in igb_construct_skb()
8344 rx_buffer->page_offset += truesize; in igb_construct_skb()
8347 rx_buffer->pagecnt_bias++; in igb_construct_skb()
8362 SKB_DATA_ALIGN(xdp->data_end - in igb_build_skb()
8363 xdp->data_hard_start); in igb_build_skb()
8365 unsigned int metasize = xdp->data - xdp->data_meta; in igb_build_skb()
8369 net_prefetch(xdp->data_meta); in igb_build_skb()
8372 skb = build_skb(xdp->data_hard_start, truesize); in igb_build_skb()
8377 skb_reserve(skb, xdp->data - xdp->data_hard_start); in igb_build_skb()
8378 __skb_put(skb, xdp->data_end - xdp->data); in igb_build_skb()
8385 igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb); in igb_build_skb()
8391 rx_buffer->page_offset ^= truesize; in igb_build_skb()
8393 rx_buffer->page_offset += truesize; in igb_build_skb()
8408 xdp_prog = READ_ONCE(rx_ring->xdp_prog); in igb_run_xdp()
8413 prefetchw(xdp->data_hard_start); /* xdp_frame write */ in igb_run_xdp()
8423 err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog); in igb_run_xdp()
8433 trace_xdp_exception(rx_ring->netdev, xdp_prog, act); in igb_run_xdp()
8441 return ERR_PTR(-result); in igb_run_xdp()
8450 truesize = igb_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */ in igb_rx_frame_truesize()
8466 rx_buffer->page_offset ^= truesize; in igb_rx_buffer_flip()
8468 rx_buffer->page_offset += truesize; in igb_rx_buffer_flip()
8483 if (!(ring->netdev->features & NETIF_F_RXCSUM)) in igb_rx_checksum()
8494 if (!((skb->len == 60) && in igb_rx_checksum()
8495 test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) { in igb_rx_checksum()
8496 u64_stats_update_begin(&ring->rx_syncp); in igb_rx_checksum()
8497 ring->rx_stats.csum_err++; in igb_rx_checksum()
8498 u64_stats_update_end(&ring->rx_syncp); in igb_rx_checksum()
8506 skb->ip_summed = CHECKSUM_UNNECESSARY; in igb_rx_checksum()
8508 dev_dbg(ring->dev, "cksum success: bits %08X\n", in igb_rx_checksum()
8509 le32_to_cpu(rx_desc->wb.upper.status_error)); in igb_rx_checksum()
8516 if (ring->netdev->features & NETIF_F_RXHASH) in igb_rx_hash()
8518 le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), in igb_rx_hash()
8523 * igb_is_non_eop - process handling of non-EOP buffers
8530 * that this is in fact a non-EOP buffer.
8535 u32 ntc = rx_ring->next_to_clean + 1; in igb_is_non_eop()
8538 ntc = (ntc < rx_ring->count) ? ntc : 0; in igb_is_non_eop()
8539 rx_ring->next_to_clean = ntc; in igb_is_non_eop()
8550 * igb_cleanup_headers - Correct corrupted or empty headers
8573 struct net_device *netdev = rx_ring->netdev; in igb_cleanup_headers()
8574 if (!(netdev->features & NETIF_F_RXALL)) { in igb_cleanup_headers()
8588 * igb_process_skb_fields - Populate skb header fields from Rx descriptor
8601 struct net_device *dev = rx_ring->netdev; in igb_process_skb_fields()
8609 igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb); in igb_process_skb_fields()
8611 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && in igb_process_skb_fields()
8616 test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags)) in igb_process_skb_fields()
8617 vid = be16_to_cpu(rx_desc->wb.upper.vlan); in igb_process_skb_fields()
8619 vid = le16_to_cpu(rx_desc->wb.upper.vlan); in igb_process_skb_fields()
8624 skb_record_rx_queue(skb, rx_ring->queue_index); in igb_process_skb_fields()
8626 skb->protocol = eth_type_trans(skb, rx_ring->netdev); in igb_process_skb_fields()
8639 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; in igb_get_rx_buffer()
8640 prefetchw(rx_buffer->page); in igb_get_rx_buffer()
8643 dma_sync_single_range_for_cpu(rx_ring->dev, in igb_get_rx_buffer()
8644 rx_buffer->dma, in igb_get_rx_buffer()
8645 rx_buffer->page_offset, in igb_get_rx_buffer()
8649 rx_buffer->pagecnt_bias--; in igb_get_rx_buffer()
8664 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, in igb_put_rx_buffer()
8667 __page_frag_cache_drain(rx_buffer->page, in igb_put_rx_buffer()
8668 rx_buffer->pagecnt_bias); in igb_put_rx_buffer()
8672 rx_buffer->page = NULL; in igb_put_rx_buffer()
8677 struct igb_adapter *adapter = q_vector->adapter; in igb_clean_rx_irq()
8678 struct igb_ring *rx_ring = q_vector->rx.ring; in igb_clean_rx_irq()
8679 struct sk_buff *skb = rx_ring->skb; in igb_clean_rx_irq()
8685 xdp.rxq = &rx_ring->xdp_rxq; in igb_clean_rx_irq()
8703 rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean); in igb_clean_rx_irq()
8704 size = le16_to_cpu(rx_desc->wb.upper.length); in igb_clean_rx_irq()
8718 xdp.data = page_address(rx_buffer->page) + in igb_clean_rx_irq()
8719 rx_buffer->page_offset; in igb_clean_rx_irq()
8721 xdp.data_hard_start = xdp.data - in igb_clean_rx_irq()
8732 unsigned int xdp_res = -PTR_ERR(skb); in igb_clean_rx_irq()
8738 rx_buffer->pagecnt_bias++; in igb_clean_rx_irq()
8752 rx_ring->rx_stats.alloc_failed++; in igb_clean_rx_irq()
8753 rx_buffer->pagecnt_bias++; in igb_clean_rx_irq()
8760 /* fetch next buffer in frame if non-eop */ in igb_clean_rx_irq()
8771 total_bytes += skb->len; in igb_clean_rx_irq()
8776 napi_gro_receive(&q_vector->napi, skb); in igb_clean_rx_irq()
8786 rx_ring->skb = skb; in igb_clean_rx_irq()
8797 u64_stats_update_begin(&rx_ring->rx_syncp); in igb_clean_rx_irq()
8798 rx_ring->rx_stats.packets += total_packets; in igb_clean_rx_irq()
8799 rx_ring->rx_stats.bytes += total_bytes; in igb_clean_rx_irq()
8800 u64_stats_update_end(&rx_ring->rx_syncp); in igb_clean_rx_irq()
8801 q_vector->rx.total_packets += total_packets; in igb_clean_rx_irq()
8802 q_vector->rx.total_bytes += total_bytes; in igb_clean_rx_irq()
8813 struct page *page = bi->page; in igb_alloc_mapped_page()
8823 rx_ring->rx_stats.alloc_failed++; in igb_alloc_mapped_page()
8828 dma = dma_map_page_attrs(rx_ring->dev, page, 0, in igb_alloc_mapped_page()
8836 if (dma_mapping_error(rx_ring->dev, dma)) { in igb_alloc_mapped_page()
8839 rx_ring->rx_stats.alloc_failed++; in igb_alloc_mapped_page()
8843 bi->dma = dma; in igb_alloc_mapped_page()
8844 bi->page = page; in igb_alloc_mapped_page()
8845 bi->page_offset = igb_rx_offset(rx_ring); in igb_alloc_mapped_page()
8846 page_ref_add(page, USHRT_MAX - 1); in igb_alloc_mapped_page()
8847 bi->pagecnt_bias = USHRT_MAX; in igb_alloc_mapped_page()
8853 * igb_alloc_rx_buffers - Replace used receive buffers
8861 u16 i = rx_ring->next_to_use; in igb_alloc_rx_buffers()
8869 bi = &rx_ring->rx_buffer_info[i]; in igb_alloc_rx_buffers()
8870 i -= rx_ring->count; in igb_alloc_rx_buffers()
8879 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, in igb_alloc_rx_buffers()
8880 bi->page_offset, bufsz, in igb_alloc_rx_buffers()
8884 * because each write-back erases this info. in igb_alloc_rx_buffers()
8886 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); in igb_alloc_rx_buffers()
8893 bi = rx_ring->rx_buffer_info; in igb_alloc_rx_buffers()
8894 i -= rx_ring->count; in igb_alloc_rx_buffers()
8898 rx_desc->wb.upper.length = 0; in igb_alloc_rx_buffers()
8900 cleaned_count--; in igb_alloc_rx_buffers()
8903 i += rx_ring->count; in igb_alloc_rx_buffers()
8905 if (rx_ring->next_to_use != i) { in igb_alloc_rx_buffers()
8907 rx_ring->next_to_use = i; in igb_alloc_rx_buffers()
8910 rx_ring->next_to_alloc = i; in igb_alloc_rx_buffers()
8914 * applicable for weak-ordered memory model archs, in igb_alloc_rx_buffers()
8915 * such as IA-64). in igb_alloc_rx_buffers()
8918 writel(i, rx_ring->tail); in igb_alloc_rx_buffers()
8923 * igb_mii_ioctl -
8933 if (adapter->hw.phy.media_type != e1000_media_type_copper) in igb_mii_ioctl()
8934 return -EOPNOTSUPP; in igb_mii_ioctl()
8938 data->phy_id = adapter->hw.phy.addr; in igb_mii_ioctl()
8941 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F, in igb_mii_ioctl()
8942 &data->val_out)) in igb_mii_ioctl()
8943 return -EIO; in igb_mii_ioctl()
8947 return -EOPNOTSUPP; in igb_mii_ioctl()
8953 * igb_ioctl -
8970 return -EOPNOTSUPP; in igb_ioctl()
8976 struct igb_adapter *adapter = hw->back; in igb_read_pci_cfg()
8978 pci_read_config_word(adapter->pdev, reg, value); in igb_read_pci_cfg()
8983 struct igb_adapter *adapter = hw->back; in igb_write_pci_cfg()
8985 pci_write_config_word(adapter->pdev, reg, *value); in igb_write_pci_cfg()
8990 struct igb_adapter *adapter = hw->back; in igb_read_pcie_cap_reg()
8992 if (pcie_capability_read_word(adapter->pdev, reg, value)) in igb_read_pcie_cap_reg()
8993 return -E1000_ERR_CONFIG; in igb_read_pcie_cap_reg()
9000 struct igb_adapter *adapter = hw->back; in igb_write_pcie_cap_reg()
9002 if (pcie_capability_write_word(adapter->pdev, reg, *value)) in igb_write_pcie_cap_reg()
9003 return -E1000_ERR_CONFIG; in igb_write_pcie_cap_reg()
9011 struct e1000_hw *hw = &adapter->hw; in igb_vlan_mode()
9032 igb_set_vf_vlan_strip(adapter, adapter->vfs_allocated_count, enable); in igb_vlan_mode()
9039 struct e1000_hw *hw = &adapter->hw; in igb_vlan_rx_add_vid()
9040 int pf_id = adapter->vfs_allocated_count; in igb_vlan_rx_add_vid()
9043 if (!vid || !(adapter->flags & IGB_FLAG_VLAN_PROMISC)) in igb_vlan_rx_add_vid()
9046 set_bit(vid, adapter->active_vlans); in igb_vlan_rx_add_vid()
9055 int pf_id = adapter->vfs_allocated_count; in igb_vlan_rx_kill_vid()
9056 struct e1000_hw *hw = &adapter->hw; in igb_vlan_rx_kill_vid()
9059 if (vid && !(adapter->flags & IGB_FLAG_VLAN_PROMISC)) in igb_vlan_rx_kill_vid()
9062 clear_bit(vid, adapter->active_vlans); in igb_vlan_rx_kill_vid()
9071 igb_vlan_mode(adapter->netdev, adapter->netdev->features); in igb_restore_vlan()
9072 igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0); in igb_restore_vlan()
9074 for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID) in igb_restore_vlan()
9075 igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); in igb_restore_vlan()
9080 struct pci_dev *pdev = adapter->pdev; in igb_set_spd_dplx()
9081 struct e1000_mac_info *mac = &adapter->hw.mac; in igb_set_spd_dplx()
9083 mac->autoneg = 0; in igb_set_spd_dplx()
9092 * and 100Mbps Full duplex for 100baseFx sfp in igb_set_spd_dplx()
9094 if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) { in igb_set_spd_dplx()
9107 mac->forced_speed_duplex = ADVERTISE_10_HALF; in igb_set_spd_dplx()
9110 mac->forced_speed_duplex = ADVERTISE_10_FULL; in igb_set_spd_dplx()
9113 mac->forced_speed_duplex = ADVERTISE_100_HALF; in igb_set_spd_dplx()
9116 mac->forced_speed_duplex = ADVERTISE_100_FULL; in igb_set_spd_dplx()
9119 mac->autoneg = 1; in igb_set_spd_dplx()
9120 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; in igb_set_spd_dplx()
9127 /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */ in igb_set_spd_dplx()
9128 adapter->hw.phy.mdix = AUTO_ALL_MODES; in igb_set_spd_dplx()
9133 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n"); in igb_set_spd_dplx()
9134 return -EINVAL; in igb_set_spd_dplx()
9142 struct e1000_hw *hw = &adapter->hw; in __igb_shutdown()
9144 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol; in __igb_shutdown()
9166 /* turn on all-multi mode if wake on multicast is enabled */ in __igb_shutdown()
9187 wake = wufc || adapter->en_mng_pt; in __igb_shutdown()
9209 struct e1000_hw *hw = &adapter->hw; in igb_deliver_wake_packet()
9227 /* Ensure reads are 32-bit aligned */ in igb_deliver_wake_packet()
9230 memcpy_fromio(skb->data, hw->hw_addr + E1000_WUPM_REG(0), wupl); in igb_deliver_wake_packet()
9232 skb->protocol = eth_type_trans(skb, netdev); in igb_deliver_wake_packet()
9246 struct e1000_hw *hw = &adapter->hw; in igb_resume()
9254 return -ENODEV; in igb_resume()
9257 dev_err(&pdev->dev, in igb_resume()
9267 dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); in igb_resume()
9268 return -ENOMEM; in igb_resume()
9303 return -EBUSY; in igb_runtime_idle()
9333 struct pci_dev *pdev = adapter->pdev; in igb_sriov_reinit()
9348 dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); in igb_sriov_reinit()
9349 return -ENOMEM; in igb_sriov_reinit()
9398 * igb_io_error_detected - called when PCI error is detected
9425 * igb_io_slot_reset - called after the pci bus has been reset.
9428 * Restart the card from scratch, as if from a cold-boot. Implementation
9429 * resembles the first-half of the igb_resume routine.
9435 struct e1000_hw *hw = &adapter->hw; in igb_io_slot_reset()
9439 dev_err(&pdev->dev, in igb_io_slot_reset()
9440 "Cannot re-enable PCI device after reset.\n"); in igb_io_slot_reset()
9451 * so we should re-assign it here. in igb_io_slot_reset()
9453 hw->hw_addr = adapter->io_addr; in igb_io_slot_reset()
9464 * igb_io_resume - called when traffic can start flowing again.
9469 * second-half of the igb_resume routine.
9478 dev_err(&pdev->dev, "igb_up failed after reset\n"); in igb_io_resume()
9492 * igb_rar_set_index - Sync RAL[index] and RAH[index] registers with MAC table
9498 struct e1000_hw *hw = &adapter->hw; in igb_rar_set_index()
9500 u8 *addr = adapter->mac_table[index].addr; in igb_rar_set_index()
9511 if (adapter->mac_table[index].state & IGB_MAC_STATE_IN_USE) { in igb_rar_set_index()
9515 if (adapter->mac_table[index].state & IGB_MAC_STATE_SRC_ADDR) in igb_rar_set_index()
9518 switch (hw->mac.type) { in igb_rar_set_index()
9521 if (adapter->mac_table[index].state & in igb_rar_set_index()
9526 adapter->mac_table[index].queue; in igb_rar_set_index()
9530 adapter->mac_table[index].queue; in igb_rar_set_index()
9544 struct e1000_hw *hw = &adapter->hw; in igb_set_vf_mac()
9548 int rar_entry = hw->mac.rar_entry_count - (vf + 1); in igb_set_vf_mac()
9549 unsigned char *vf_mac_addr = adapter->vf_data[vf].vf_mac_addresses; in igb_set_vf_mac()
9552 ether_addr_copy(adapter->mac_table[rar_entry].addr, mac_addr); in igb_set_vf_mac()
9553 adapter->mac_table[rar_entry].queue = vf; in igb_set_vf_mac()
9554 adapter->mac_table[rar_entry].state |= IGB_MAC_STATE_IN_USE; in igb_set_vf_mac()
9564 if (vf >= adapter->vfs_allocated_count) in igb_ndo_set_vf_mac()
9565 return -EINVAL; in igb_ndo_set_vf_mac()
9570 * MAC after unbinding vfio-pci and reloading igbvf after shutting in igb_ndo_set_vf_mac()
9574 adapter->vf_data[vf].flags &= ~IGB_VF_FLAG_PF_SET_MAC; in igb_ndo_set_vf_mac()
9575 dev_info(&adapter->pdev->dev, in igb_ndo_set_vf_mac()
9579 adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC; in igb_ndo_set_vf_mac()
9580 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", in igb_ndo_set_vf_mac()
9582 dev_info(&adapter->pdev->dev, in igb_ndo_set_vf_mac()
9585 if (test_bit(__IGB_DOWN, &adapter->state)) { in igb_ndo_set_vf_mac()
9586 dev_warn(&adapter->pdev->dev, in igb_ndo_set_vf_mac()
9588 dev_warn(&adapter->pdev->dev, in igb_ndo_set_vf_mac()
9592 return -EINVAL; in igb_ndo_set_vf_mac()
9601 return 100; in igb_link_mbps()
9618 rf_dec = (link_speed - (rf_int * tx_rate)); in igb_set_vf_rate_limit()
9632 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported. in igb_set_vf_rate_limit()
9643 /* VF TX rate limit was not set or not supported */ in igb_check_vf_rate_limit()
9644 if ((adapter->vf_rate_link_speed == 0) || in igb_check_vf_rate_limit()
9645 (adapter->hw.mac.type != e1000_82576)) in igb_check_vf_rate_limit()
9648 actual_link_speed = igb_link_mbps(adapter->link_speed); in igb_check_vf_rate_limit()
9649 if (actual_link_speed != adapter->vf_rate_link_speed) { in igb_check_vf_rate_limit()
9651 adapter->vf_rate_link_speed = 0; in igb_check_vf_rate_limit()
9652 dev_info(&adapter->pdev->dev, in igb_check_vf_rate_limit()
9656 for (i = 0; i < adapter->vfs_allocated_count; i++) { in igb_check_vf_rate_limit()
9658 adapter->vf_data[i].tx_rate = 0; in igb_check_vf_rate_limit()
9660 igb_set_vf_rate_limit(&adapter->hw, i, in igb_check_vf_rate_limit()
9661 adapter->vf_data[i].tx_rate, in igb_check_vf_rate_limit()
9670 struct e1000_hw *hw = &adapter->hw; in igb_ndo_set_vf_bw()
9673 if (hw->mac.type != e1000_82576) in igb_ndo_set_vf_bw()
9674 return -EOPNOTSUPP; in igb_ndo_set_vf_bw()
9677 return -EINVAL; in igb_ndo_set_vf_bw()
9679 actual_link_speed = igb_link_mbps(adapter->link_speed); in igb_ndo_set_vf_bw()
9680 if ((vf >= adapter->vfs_allocated_count) || in igb_ndo_set_vf_bw()
9684 return -EINVAL; in igb_ndo_set_vf_bw()
9686 adapter->vf_rate_link_speed = actual_link_speed; in igb_ndo_set_vf_bw()
9687 adapter->vf_data[vf].tx_rate = (u16)max_tx_rate; in igb_ndo_set_vf_bw()
9697 struct e1000_hw *hw = &adapter->hw; in igb_ndo_set_vf_spoofchk()
9700 if (!adapter->vfs_allocated_count) in igb_ndo_set_vf_spoofchk()
9701 return -EOPNOTSUPP; in igb_ndo_set_vf_spoofchk()
9703 if (vf >= adapter->vfs_allocated_count) in igb_ndo_set_vf_spoofchk()
9704 return -EINVAL; in igb_ndo_set_vf_spoofchk()
9706 reg_offset = (hw->mac.type == e1000_82576) ? E1000_DTXSWC : E1000_TXSWC; in igb_ndo_set_vf_spoofchk()
9716 adapter->vf_data[vf].spoofchk_enabled = setting; in igb_ndo_set_vf_spoofchk()
9724 if (vf >= adapter->vfs_allocated_count) in igb_ndo_set_vf_trust()
9725 return -EINVAL; in igb_ndo_set_vf_trust()
9726 if (adapter->vf_data[vf].trusted == setting) in igb_ndo_set_vf_trust()
9729 adapter->vf_data[vf].trusted = setting; in igb_ndo_set_vf_trust()
9731 dev_info(&adapter->pdev->dev, "VF %u is %strusted\n", in igb_ndo_set_vf_trust()
9740 if (vf >= adapter->vfs_allocated_count) in igb_ndo_get_vf_config()
9741 return -EINVAL; in igb_ndo_get_vf_config()
9742 ivi->vf = vf; in igb_ndo_get_vf_config()
9743 memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN); in igb_ndo_get_vf_config()
9744 ivi->max_tx_rate = adapter->vf_data[vf].tx_rate; in igb_ndo_get_vf_config()
9745 ivi->min_tx_rate = 0; in igb_ndo_get_vf_config()
9746 ivi->vlan = adapter->vf_data[vf].pf_vlan; in igb_ndo_get_vf_config()
9747 ivi->qos = adapter->vf_data[vf].pf_qos; in igb_ndo_get_vf_config()
9748 ivi->spoofchk = adapter->vf_data[vf].spoofchk_enabled; in igb_ndo_get_vf_config()
9749 ivi->trusted = adapter->vf_data[vf].trusted; in igb_ndo_get_vf_config()
9755 struct e1000_hw *hw = &adapter->hw; in igb_vmm_control()
9758 switch (hw->mac.type) { in igb_vmm_control()
9783 if (adapter->vfs_allocated_count) { in igb_vmm_control()
9787 adapter->vfs_allocated_count); in igb_vmm_control()
9796 struct e1000_hw *hw = &adapter->hw; in igb_init_dmac()
9800 if (hw->mac.type > e1000_82580) { in igb_init_dmac()
9801 if (adapter->flags & IGB_FLAG_DMAC) { in igb_init_dmac()
9808 * than the Rx threshold. Set hwm to PBA - max frame in igb_init_dmac()
9809 * size in 16B units, capping it at PBA - 6KB. in igb_init_dmac()
9811 hwm = 64 * (pba - 6); in igb_init_dmac()
9818 /* Set the DMA Coalescing Rx threshold to PBA - 2 * max in igb_init_dmac()
9819 * frame size, capping it at PBA - 10KB. in igb_init_dmac()
9821 dmac_thr = pba - 10; in igb_init_dmac()
9830 /* watchdog timer= +-1000 usec in 32usec intervals */ in igb_init_dmac()
9833 /* Disable BMC-to-OS Watchdog Enable */ in igb_init_dmac()
9834 if (hw->mac.type != e1000_i354) in igb_init_dmac()
9840 * coalescing(smart fifb)-UTRESH=0 in igb_init_dmac()
9848 /* free space in tx packet buffer to wake from in igb_init_dmac()
9851 wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE - in igb_init_dmac()
9852 (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6); in igb_init_dmac()
9860 } /* endif adapter->dmac is not disabled */ in igb_init_dmac()
9861 } else if (hw->mac.type == e1000_82580) { in igb_init_dmac()
9870 * igb_read_i2c_byte - Reads 8 bit word over I2C
9883 struct i2c_client *this_client = adapter->i2c_client; in igb_read_i2c_byte()
9892 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) in igb_read_i2c_byte()
9896 hw->mac.ops.release_swfw_sync(hw, swfw_mask); in igb_read_i2c_byte()
9907 * igb_write_i2c_byte - Writes 8 bit word over I2C
9920 struct i2c_client *this_client = adapter->i2c_client; in igb_write_i2c_byte()
9927 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) in igb_write_i2c_byte()
9930 hw->mac.ops.release_swfw_sync(hw, swfw_mask); in igb_write_i2c_byte()
9941 struct net_device *netdev = adapter->netdev; in igb_reinit_queues()
9942 struct pci_dev *pdev = adapter->pdev; in igb_reinit_queues()
9951 dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); in igb_reinit_queues()
9952 return -ENOMEM; in igb_reinit_queues()
9965 spin_lock(&adapter->nfc_lock); in igb_nfc_filter_exit()
9967 hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) in igb_nfc_filter_exit()
9970 hlist_for_each_entry(rule, &adapter->cls_flower_list, nfc_node) in igb_nfc_filter_exit()
9973 spin_unlock(&adapter->nfc_lock); in igb_nfc_filter_exit()
9980 spin_lock(&adapter->nfc_lock); in igb_nfc_filter_restore()
9982 hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) in igb_nfc_filter_restore()
9985 spin_unlock(&adapter->nfc_lock); in igb_nfc_filter_restore()