Lines Matching +full:tcb +full:- +full:timer

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Linux network driver for QLogic BR-series Converged Network Adapter.
6 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
7 * Copyright (c) 2014-2015 QLogic Corporation
57 (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \
58 ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \
59 ((_bnad)->pcidev->irq))
63 (_res_info)->res_type = BNA_RES_T_MEM; \
64 (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \
65 (_res_info)->res_u.mem_info.num = (_num); \
66 (_res_info)->res_u.mem_info.len = (_size); \
78 for (i = 0; i < ccb->q_depth; i++) { in bnad_cq_cleanup()
79 cmpl = &((struct bna_cq_entry *)ccb->sw_q)[i]; in bnad_cq_cleanup()
80 cmpl->valid = 0; in bnad_cq_cleanup()
98 nvecs = unmap->nvecs; in bnad_tx_buff_unmap()
100 skb = unmap->skb; in bnad_tx_buff_unmap()
101 unmap->skb = NULL; in bnad_tx_buff_unmap()
102 unmap->nvecs = 0; in bnad_tx_buff_unmap()
103 dma_unmap_single(&bnad->pcidev->dev, in bnad_tx_buff_unmap()
104 dma_unmap_addr(&unmap->vectors[0], dma_addr), in bnad_tx_buff_unmap()
106 dma_unmap_addr_set(&unmap->vectors[0], dma_addr, 0); in bnad_tx_buff_unmap()
107 nvecs--; in bnad_tx_buff_unmap()
118 dma_unmap_page(&bnad->pcidev->dev, in bnad_tx_buff_unmap()
119 dma_unmap_addr(&unmap->vectors[vector], dma_addr), in bnad_tx_buff_unmap()
120 dma_unmap_len(&unmap->vectors[vector], dma_len), in bnad_tx_buff_unmap()
122 dma_unmap_addr_set(&unmap->vectors[vector], dma_addr, 0); in bnad_tx_buff_unmap()
123 nvecs--; in bnad_tx_buff_unmap()
137 bnad_txq_cleanup(struct bnad *bnad, struct bna_tcb *tcb) in bnad_txq_cleanup() argument
139 struct bnad_tx_unmap *unmap_q = tcb->unmap_q; in bnad_txq_cleanup()
143 for (i = 0; i < tcb->q_depth; i++) { in bnad_txq_cleanup()
147 bnad_tx_buff_unmap(bnad, unmap_q, tcb->q_depth, i); in bnad_txq_cleanup()
159 bnad_txcmpl_process(struct bnad *bnad, struct bna_tcb *tcb) in bnad_txcmpl_process() argument
163 struct bnad_tx_unmap *unmap_q = tcb->unmap_q; in bnad_txcmpl_process()
168 if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) in bnad_txcmpl_process()
171 hw_cons = *(tcb->hw_consumer_index); in bnad_txcmpl_process()
173 cons = tcb->consumer_index; in bnad_txcmpl_process()
174 q_depth = tcb->q_depth; in bnad_txcmpl_process()
177 BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth))); in bnad_txcmpl_process()
182 skb = unmap->skb; in bnad_txcmpl_process()
185 sent_bytes += skb->len; in bnad_txcmpl_process()
187 unmap_wis = BNA_TXQ_WI_NEEDED(unmap->nvecs); in bnad_txcmpl_process()
188 wis -= unmap_wis; in bnad_txcmpl_process()
195 tcb->consumer_index = hw_cons; in bnad_txcmpl_process()
197 tcb->txq->tx_packets += sent_packets; in bnad_txcmpl_process()
198 tcb->txq->tx_bytes += sent_bytes; in bnad_txcmpl_process()
204 bnad_tx_complete(struct bnad *bnad, struct bna_tcb *tcb) in bnad_tx_complete() argument
206 struct net_device *netdev = bnad->netdev; in bnad_tx_complete()
209 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) in bnad_tx_complete()
212 sent = bnad_txcmpl_process(bnad, tcb); in bnad_tx_complete()
216 BNA_QE_FREE_CNT(tcb, tcb->q_depth) >= in bnad_tx_complete()
218 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) { in bnad_tx_complete()
225 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) in bnad_tx_complete()
226 bna_ib_ack(tcb->i_dbell, sent); in bnad_tx_complete()
229 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); in bnad_tx_complete()
238 struct bna_tcb *tcb = (struct bna_tcb *)data; in bnad_msix_tx() local
239 struct bnad *bnad = tcb->bnad; in bnad_msix_tx()
241 bnad_tx_complete(bnad, tcb); in bnad_msix_tx()
249 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q; in bnad_rxq_alloc_uninit()
251 unmap_q->reuse_pi = -1; in bnad_rxq_alloc_uninit()
252 unmap_q->alloc_order = -1; in bnad_rxq_alloc_uninit()
253 unmap_q->map_size = 0; in bnad_rxq_alloc_uninit()
254 unmap_q->type = BNAD_RXBUF_NONE; in bnad_rxq_alloc_uninit()
257 /* Default is page-based allocation. Multi-buffer support - TBD */
261 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q; in bnad_rxq_alloc_init()
266 order = get_order(rcb->rxq->buffer_size); in bnad_rxq_alloc_init()
268 unmap_q->type = BNAD_RXBUF_PAGE; in bnad_rxq_alloc_init()
270 if (bna_is_small_rxq(rcb->id)) { in bnad_rxq_alloc_init()
271 unmap_q->alloc_order = 0; in bnad_rxq_alloc_init()
272 unmap_q->map_size = rcb->rxq->buffer_size; in bnad_rxq_alloc_init()
274 if (rcb->rxq->multi_buffer) { in bnad_rxq_alloc_init()
275 unmap_q->alloc_order = 0; in bnad_rxq_alloc_init()
276 unmap_q->map_size = rcb->rxq->buffer_size; in bnad_rxq_alloc_init()
277 unmap_q->type = BNAD_RXBUF_MULTI_BUFF; in bnad_rxq_alloc_init()
279 unmap_q->alloc_order = order; in bnad_rxq_alloc_init()
280 unmap_q->map_size = in bnad_rxq_alloc_init()
281 (rcb->rxq->buffer_size > 2048) ? in bnad_rxq_alloc_init()
286 BUG_ON((PAGE_SIZE << order) % unmap_q->map_size); in bnad_rxq_alloc_init()
294 if (!unmap->page) in bnad_rxq_cleanup_page()
297 dma_unmap_page(&bnad->pcidev->dev, in bnad_rxq_cleanup_page()
298 dma_unmap_addr(&unmap->vector, dma_addr), in bnad_rxq_cleanup_page()
299 unmap->vector.len, DMA_FROM_DEVICE); in bnad_rxq_cleanup_page()
300 put_page(unmap->page); in bnad_rxq_cleanup_page()
301 unmap->page = NULL; in bnad_rxq_cleanup_page()
302 dma_unmap_addr_set(&unmap->vector, dma_addr, 0); in bnad_rxq_cleanup_page()
303 unmap->vector.len = 0; in bnad_rxq_cleanup_page()
309 if (!unmap->skb) in bnad_rxq_cleanup_skb()
312 dma_unmap_single(&bnad->pcidev->dev, in bnad_rxq_cleanup_skb()
313 dma_unmap_addr(&unmap->vector, dma_addr), in bnad_rxq_cleanup_skb()
314 unmap->vector.len, DMA_FROM_DEVICE); in bnad_rxq_cleanup_skb()
315 dev_kfree_skb_any(unmap->skb); in bnad_rxq_cleanup_skb()
316 unmap->skb = NULL; in bnad_rxq_cleanup_skb()
317 dma_unmap_addr_set(&unmap->vector, dma_addr, 0); in bnad_rxq_cleanup_skb()
318 unmap->vector.len = 0; in bnad_rxq_cleanup_skb()
324 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q; in bnad_rxq_cleanup()
327 for (i = 0; i < rcb->q_depth; i++) { in bnad_rxq_cleanup()
328 struct bnad_rx_unmap *unmap = &unmap_q->unmap[i]; in bnad_rxq_cleanup()
330 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) in bnad_rxq_cleanup()
342 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q; in bnad_rxq_refill_page()
349 prod = rcb->producer_index; in bnad_rxq_refill_page()
350 q_depth = rcb->q_depth; in bnad_rxq_refill_page()
352 alloc_size = PAGE_SIZE << unmap_q->alloc_order; in bnad_rxq_refill_page()
355 while (nalloc--) { in bnad_rxq_refill_page()
356 unmap = &unmap_q->unmap[prod]; in bnad_rxq_refill_page()
358 if (unmap_q->reuse_pi < 0) { in bnad_rxq_refill_page()
360 unmap_q->alloc_order); in bnad_rxq_refill_page()
363 prev = &unmap_q->unmap[unmap_q->reuse_pi]; in bnad_rxq_refill_page()
364 page = prev->page; in bnad_rxq_refill_page()
365 page_offset = prev->page_offset + unmap_q->map_size; in bnad_rxq_refill_page()
371 rcb->rxq->rxbuf_alloc_failed++; in bnad_rxq_refill_page()
375 dma_addr = dma_map_page(&bnad->pcidev->dev, page, page_offset, in bnad_rxq_refill_page()
376 unmap_q->map_size, DMA_FROM_DEVICE); in bnad_rxq_refill_page()
377 if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) { in bnad_rxq_refill_page()
380 rcb->rxq->rxbuf_map_failed++; in bnad_rxq_refill_page()
384 unmap->page = page; in bnad_rxq_refill_page()
385 unmap->page_offset = page_offset; in bnad_rxq_refill_page()
386 dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr); in bnad_rxq_refill_page()
387 unmap->vector.len = unmap_q->map_size; in bnad_rxq_refill_page()
388 page_offset += unmap_q->map_size; in bnad_rxq_refill_page()
391 unmap_q->reuse_pi = prod; in bnad_rxq_refill_page()
393 unmap_q->reuse_pi = -1; in bnad_rxq_refill_page()
395 rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod]; in bnad_rxq_refill_page()
396 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr); in bnad_rxq_refill_page()
403 rcb->producer_index = prod; in bnad_rxq_refill_page()
405 if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags))) in bnad_rxq_refill_page()
416 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q; in bnad_rxq_refill_skb()
422 buff_sz = rcb->rxq->buffer_size; in bnad_rxq_refill_skb()
423 prod = rcb->producer_index; in bnad_rxq_refill_skb()
424 q_depth = rcb->q_depth; in bnad_rxq_refill_skb()
427 while (nalloc--) { in bnad_rxq_refill_skb()
428 unmap = &unmap_q->unmap[prod]; in bnad_rxq_refill_skb()
430 skb = netdev_alloc_skb_ip_align(bnad->netdev, buff_sz); in bnad_rxq_refill_skb()
434 rcb->rxq->rxbuf_alloc_failed++; in bnad_rxq_refill_skb()
438 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data, in bnad_rxq_refill_skb()
440 if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) { in bnad_rxq_refill_skb()
443 rcb->rxq->rxbuf_map_failed++; in bnad_rxq_refill_skb()
447 unmap->skb = skb; in bnad_rxq_refill_skb()
448 dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr); in bnad_rxq_refill_skb()
449 unmap->vector.len = buff_sz; in bnad_rxq_refill_skb()
451 rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod]; in bnad_rxq_refill_skb()
452 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr); in bnad_rxq_refill_skb()
459 rcb->producer_index = prod; in bnad_rxq_refill_skb()
461 if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags))) in bnad_rxq_refill_skb()
471 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q; in bnad_rxq_post()
474 to_alloc = BNA_QE_FREE_CNT(rcb, rcb->q_depth); in bnad_rxq_post()
478 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) in bnad_rxq_post()
506 unmap_q = rcb->unmap_q; in bnad_cq_drop_packet()
508 unmap = &unmap_q->unmap[ci]; in bnad_cq_drop_packet()
509 BNA_QE_INDX_INC(ci, rcb->q_depth); in bnad_cq_drop_packet()
511 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) in bnad_cq_drop_packet()
527 cq = ccb->sw_q; in bnad_cq_setup_skb_frags()
528 pi = ccb->producer_index; in bnad_cq_setup_skb_frags()
531 rcb = bna_is_small_rxq(cmpl->rxq_id) ? ccb->rcb[1] : ccb->rcb[0]; in bnad_cq_setup_skb_frags()
532 unmap_q = rcb->unmap_q; in bnad_cq_setup_skb_frags()
533 bnad = rcb->bnad; in bnad_cq_setup_skb_frags()
534 ci = rcb->consumer_index; in bnad_cq_setup_skb_frags()
537 prefetch(page_address(unmap_q->unmap[ci].page) + in bnad_cq_setup_skb_frags()
538 unmap_q->unmap[ci].page_offset); in bnad_cq_setup_skb_frags()
540 while (nvecs--) { in bnad_cq_setup_skb_frags()
544 unmap = &unmap_q->unmap[ci]; in bnad_cq_setup_skb_frags()
545 BNA_QE_INDX_INC(ci, rcb->q_depth); in bnad_cq_setup_skb_frags()
547 dma_unmap_page(&bnad->pcidev->dev, in bnad_cq_setup_skb_frags()
548 dma_unmap_addr(&unmap->vector, dma_addr), in bnad_cq_setup_skb_frags()
549 unmap->vector.len, DMA_FROM_DEVICE); in bnad_cq_setup_skb_frags()
551 len = ntohs(cmpl->length); in bnad_cq_setup_skb_frags()
552 skb->truesize += unmap->vector.len; in bnad_cq_setup_skb_frags()
555 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, in bnad_cq_setup_skb_frags()
556 unmap->page, unmap->page_offset, len); in bnad_cq_setup_skb_frags()
558 unmap->page = NULL; in bnad_cq_setup_skb_frags()
559 unmap->vector.len = 0; in bnad_cq_setup_skb_frags()
561 BNA_QE_INDX_INC(pi, ccb->q_depth); in bnad_cq_setup_skb_frags()
565 skb->len += totlen; in bnad_cq_setup_skb_frags()
566 skb->data_len += totlen; in bnad_cq_setup_skb_frags()
573 prefetch(skb->data); in bnad_cq_setup_skb()
575 dma_unmap_single(&bnad->pcidev->dev, in bnad_cq_setup_skb()
576 dma_unmap_addr(&unmap->vector, dma_addr), in bnad_cq_setup_skb()
577 unmap->vector.len, DMA_FROM_DEVICE); in bnad_cq_setup_skb()
580 skb->protocol = eth_type_trans(skb, bnad->netdev); in bnad_cq_setup_skb()
582 unmap->skb = NULL; in bnad_cq_setup_skb()
583 unmap->vector.len = 0; in bnad_cq_setup_skb()
594 struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate; in bnad_cq_process()
595 struct bnad_rx_ctrl *rx_ctrl = ccb->ctrl; in bnad_cq_process()
600 prefetch(bnad->netdev); in bnad_cq_process()
602 cq = ccb->sw_q; in bnad_cq_process()
605 cmpl = &cq[ccb->producer_index]; in bnad_cq_process()
606 if (!cmpl->valid) in bnad_cq_process()
617 BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length)); in bnad_cq_process()
619 if (bna_is_small_rxq(cmpl->rxq_id)) in bnad_cq_process()
620 rcb = ccb->rcb[1]; in bnad_cq_process()
622 rcb = ccb->rcb[0]; in bnad_cq_process()
624 unmap_q = rcb->unmap_q; in bnad_cq_process()
627 sop_ci = rcb->consumer_index; in bnad_cq_process()
629 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) { in bnad_cq_process()
630 unmap = &unmap_q->unmap[sop_ci]; in bnad_cq_process()
631 skb = unmap->skb; in bnad_cq_process()
633 skb = napi_get_frags(&rx_ctrl->napi); in bnad_cq_process()
639 flags = ntohl(cmpl->flags); in bnad_cq_process()
640 len = ntohs(cmpl->length); in bnad_cq_process()
645 * busy-wait doesn't help much, break here. in bnad_cq_process()
647 if (BNAD_RXBUF_IS_MULTI_BUFF(unmap_q->type) && in bnad_cq_process()
649 pi = ccb->producer_index; in bnad_cq_process()
651 BNA_QE_INDX_INC(pi, ccb->q_depth); in bnad_cq_process()
654 if (!next_cmpl->valid) in bnad_cq_process()
667 len = ntohs(next_cmpl->length); in bnad_cq_process()
668 flags = ntohl(next_cmpl->flags); in bnad_cq_process()
674 if (!next_cmpl->valid) in bnad_cq_process()
684 rcb->rxq->rx_packets_with_error++; in bnad_cq_process()
689 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) in bnad_cq_process()
694 rcb->rxq->rx_packets++; in bnad_cq_process()
695 rcb->rxq->rx_bytes += totlen; in bnad_cq_process()
696 ccb->bytes_per_intr += totlen; in bnad_cq_process()
701 ((bnad->netdev->features & NETIF_F_RXCSUM) && in bnad_cq_process()
706 skb->ip_summed = CHECKSUM_UNNECESSARY; in bnad_cq_process()
711 (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) in bnad_cq_process()
712 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag)); in bnad_cq_process()
714 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) in bnad_cq_process()
717 napi_gro_frags(&rx_ctrl->napi); in bnad_cq_process()
720 BNA_QE_INDX_ADD(rcb->consumer_index, nvecs, rcb->q_depth); in bnad_cq_process()
722 cmpl = &cq[ccb->producer_index]; in bnad_cq_process()
723 cmpl->valid = 0; in bnad_cq_process()
724 BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth); in bnad_cq_process()
728 napi_gro_flush(&rx_ctrl->napi, false); in bnad_cq_process()
729 if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))) in bnad_cq_process()
730 bna_ib_ack_disable_irq(ccb->i_dbell, packets); in bnad_cq_process()
732 bnad_rxq_post(bnad, ccb->rcb[0]); in bnad_cq_process()
733 if (ccb->rcb[1]) in bnad_cq_process()
734 bnad_rxq_post(bnad, ccb->rcb[1]); in bnad_cq_process()
742 struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl); in bnad_netif_rx_schedule_poll()
743 struct napi_struct *napi = &rx_ctrl->napi; in bnad_netif_rx_schedule_poll()
747 rx_ctrl->rx_schedule++; in bnad_netif_rx_schedule_poll()
758 ((struct bnad_rx_ctrl *)ccb->ctrl)->rx_intr_ctr++; in bnad_msix_rx()
759 bnad_netif_rx_schedule_poll(ccb->bnad, ccb); in bnad_msix_rx()
775 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_msix_mbox_handler()
776 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) { in bnad_msix_mbox_handler()
777 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_msix_mbox_handler()
781 bna_intr_status_get(&bnad->bna, intr_status); in bnad_msix_mbox_handler()
783 if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status)) in bnad_msix_mbox_handler()
784 bna_mbox_handler(&bnad->bna, intr_status); in bnad_msix_mbox_handler()
786 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_msix_mbox_handler()
800 struct bna_tcb *tcb = NULL; in bnad_isr() local
802 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_isr()
803 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) { in bnad_isr()
804 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_isr()
808 bna_intr_status_get(&bnad->bna, intr_status); in bnad_isr()
811 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_isr()
815 if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status)) in bnad_isr()
816 bna_mbox_handler(&bnad->bna, intr_status); in bnad_isr()
818 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_isr()
825 for (i = 0; i < bnad->num_tx; i++) { in bnad_isr()
826 for (j = 0; j < bnad->num_txq_per_tx; j++) { in bnad_isr()
827 tcb = bnad->tx_info[i].tcb[j]; in bnad_isr()
828 if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) in bnad_isr()
829 bnad_tx_complete(bnad, bnad->tx_info[i].tcb[j]); in bnad_isr()
833 for (i = 0; i < bnad->num_rx; i++) { in bnad_isr()
834 rx_info = &bnad->rx_info[i]; in bnad_isr()
835 if (!rx_info->rx) in bnad_isr()
837 for (j = 0; j < bnad->num_rxp_per_rx; j++) { in bnad_isr()
838 rx_ctrl = &rx_info->rx_ctrl[j]; in bnad_isr()
839 if (rx_ctrl->ccb) in bnad_isr()
841 rx_ctrl->ccb); in bnad_isr()
854 clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags); in bnad_enable_mbox_irq()
860 * Called with bnad->bna_lock held b'cos of
861 * bnad->cfg_flags access.
866 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags); in bnad_disable_mbox_irq()
874 struct net_device *netdev = bnad->netdev; in bnad_set_netdev_perm_addr()
876 ether_addr_copy(netdev->perm_addr, bnad->perm_addr); in bnad_set_netdev_perm_addr()
877 if (is_zero_ether_addr(netdev->dev_addr)) in bnad_set_netdev_perm_addr()
878 ether_addr_copy(netdev->dev_addr, bnad->perm_addr); in bnad_set_netdev_perm_addr()
899 bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS; in bnad_cb_ioceth_ready()
900 complete(&bnad->bnad_completions.ioc_comp); in bnad_cb_ioceth_ready()
906 bnad->bnad_completions.ioc_comp_status = BNA_CB_FAIL; in bnad_cb_ioceth_failed()
907 complete(&bnad->bnad_completions.ioc_comp); in bnad_cb_ioceth_failed()
913 bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS; in bnad_cb_ioceth_disabled()
914 complete(&bnad->bnad_completions.ioc_comp); in bnad_cb_ioceth_disabled()
922 netif_carrier_off(bnad->netdev); in bnad_cb_enet_disabled()
923 complete(&bnad->bnad_completions.enet_comp); in bnad_cb_enet_disabled()
935 if (!test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) in bnad_cb_ethport_link_status()
937 set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags); in bnad_cb_ethport_link_status()
939 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) in bnad_cb_ethport_link_status()
941 clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags); in bnad_cb_ethport_link_status()
945 if (!netif_carrier_ok(bnad->netdev)) { in bnad_cb_ethport_link_status()
947 netdev_info(bnad->netdev, "link up\n"); in bnad_cb_ethport_link_status()
948 netif_carrier_on(bnad->netdev); in bnad_cb_ethport_link_status()
950 for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) { in bnad_cb_ethport_link_status()
951 for (tcb_id = 0; tcb_id < bnad->num_txq_per_tx; in bnad_cb_ethport_link_status()
953 struct bna_tcb *tcb = in bnad_cb_ethport_link_status() local
954 bnad->tx_info[tx_id].tcb[tcb_id]; in bnad_cb_ethport_link_status()
956 if (!tcb) in bnad_cb_ethport_link_status()
959 txq_id = tcb->id; in bnad_cb_ethport_link_status()
962 &tcb->flags)) { in bnad_cb_ethport_link_status()
967 bnad->netdev, in bnad_cb_ethport_link_status()
973 bnad->netdev, in bnad_cb_ethport_link_status()
982 if (netif_carrier_ok(bnad->netdev)) { in bnad_cb_ethport_link_status()
983 netdev_info(bnad->netdev, "link down\n"); in bnad_cb_ethport_link_status()
984 netif_carrier_off(bnad->netdev); in bnad_cb_ethport_link_status()
995 complete(&bnad->bnad_completions.tx_comp); in bnad_cb_tx_disabled()
999 bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb) in bnad_cb_tcb_setup() argument
1002 (struct bnad_tx_info *)tcb->txq->tx->priv; in bnad_cb_tcb_setup()
1004 tcb->priv = tcb; in bnad_cb_tcb_setup()
1005 tx_info->tcb[tcb->id] = tcb; in bnad_cb_tcb_setup()
1009 bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb) in bnad_cb_tcb_destroy() argument
1012 (struct bnad_tx_info *)tcb->txq->tx->priv; in bnad_cb_tcb_destroy()
1014 tx_info->tcb[tcb->id] = NULL; in bnad_cb_tcb_destroy()
1015 tcb->priv = NULL; in bnad_cb_tcb_destroy()
1022 (struct bnad_rx_info *)ccb->cq->rx->priv; in bnad_cb_ccb_setup()
1024 rx_info->rx_ctrl[ccb->id].ccb = ccb; in bnad_cb_ccb_setup()
1025 ccb->ctrl = &rx_info->rx_ctrl[ccb->id]; in bnad_cb_ccb_setup()
1032 (struct bnad_rx_info *)ccb->cq->rx->priv; in bnad_cb_ccb_destroy()
1034 rx_info->rx_ctrl[ccb->id].ccb = NULL; in bnad_cb_ccb_destroy()
1041 (struct bnad_tx_info *)tx->priv; in bnad_cb_tx_stall()
1042 struct bna_tcb *tcb; in bnad_cb_tx_stall() local
1047 tcb = tx_info->tcb[i]; in bnad_cb_tx_stall()
1048 if (!tcb) in bnad_cb_tx_stall()
1050 txq_id = tcb->id; in bnad_cb_tx_stall()
1051 clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags); in bnad_cb_tx_stall()
1052 netif_stop_subqueue(bnad->netdev, txq_id); in bnad_cb_tx_stall()
1059 struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv; in bnad_cb_tx_resume()
1060 struct bna_tcb *tcb; in bnad_cb_tx_resume() local
1065 tcb = tx_info->tcb[i]; in bnad_cb_tx_resume()
1066 if (!tcb) in bnad_cb_tx_resume()
1068 txq_id = tcb->id; in bnad_cb_tx_resume()
1070 BUG_ON(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)); in bnad_cb_tx_resume()
1071 set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags); in bnad_cb_tx_resume()
1072 BUG_ON(*(tcb->hw_consumer_index) != 0); in bnad_cb_tx_resume()
1074 if (netif_carrier_ok(bnad->netdev)) { in bnad_cb_tx_resume()
1075 netif_wake_subqueue(bnad->netdev, txq_id); in bnad_cb_tx_resume()
1085 if (is_zero_ether_addr(bnad->perm_addr)) { in bnad_cb_tx_resume()
1086 bna_enet_perm_mac_get(&bnad->bna.enet, bnad->perm_addr); in bnad_cb_tx_resume()
1100 struct bna_tcb *tcb; in bnad_tx_cleanup() local
1105 tcb = tx_info->tcb[i]; in bnad_tx_cleanup()
1106 if (!tcb) in bnad_tx_cleanup()
1109 bnad = tcb->bnad; in bnad_tx_cleanup()
1111 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) { in bnad_tx_cleanup()
1116 bnad_txq_cleanup(bnad, tcb); in bnad_tx_cleanup()
1119 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); in bnad_tx_cleanup()
1123 queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work, in bnad_tx_cleanup()
1128 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_tx_cleanup()
1129 bna_tx_cleanup_complete(tx_info->tx); in bnad_tx_cleanup()
1130 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_tx_cleanup()
1136 struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv; in bnad_cb_tx_cleanup()
1137 struct bna_tcb *tcb; in bnad_cb_tx_cleanup() local
1141 tcb = tx_info->tcb[i]; in bnad_cb_tx_cleanup()
1142 if (!tcb) in bnad_cb_tx_cleanup()
1146 queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work, 0); in bnad_cb_tx_cleanup()
1152 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv; in bnad_cb_rx_stall()
1158 rx_ctrl = &rx_info->rx_ctrl[i]; in bnad_cb_rx_stall()
1159 ccb = rx_ctrl->ccb; in bnad_cb_rx_stall()
1163 clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[0]->flags); in bnad_cb_rx_stall()
1165 if (ccb->rcb[1]) in bnad_cb_rx_stall()
1166 clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[1]->flags); in bnad_cb_rx_stall()
1184 rx_ctrl = &rx_info->rx_ctrl[i]; in bnad_rx_cleanup()
1186 if (!rx_ctrl->ccb) in bnad_rx_cleanup()
1189 bnad = rx_ctrl->ccb->bnad; in bnad_rx_cleanup()
1195 napi_disable(&rx_ctrl->napi); in bnad_rx_cleanup()
1197 bnad_cq_cleanup(bnad, rx_ctrl->ccb); in bnad_rx_cleanup()
1198 bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[0]); in bnad_rx_cleanup()
1199 if (rx_ctrl->ccb->rcb[1]) in bnad_rx_cleanup()
1200 bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[1]); in bnad_rx_cleanup()
1203 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_rx_cleanup()
1204 bna_rx_cleanup_complete(rx_info->rx); in bnad_rx_cleanup()
1205 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_rx_cleanup()
1211 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv; in bnad_cb_rx_cleanup()
1217 rx_ctrl = &rx_info->rx_ctrl[i]; in bnad_cb_rx_cleanup()
1218 ccb = rx_ctrl->ccb; in bnad_cb_rx_cleanup()
1222 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags); in bnad_cb_rx_cleanup()
1224 if (ccb->rcb[1]) in bnad_cb_rx_cleanup()
1225 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags); in bnad_cb_rx_cleanup()
1228 queue_work(bnad->work_q, &rx_info->rx_cleanup_work); in bnad_cb_rx_cleanup()
1234 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv; in bnad_cb_rx_post()
1241 rx_ctrl = &rx_info->rx_ctrl[i]; in bnad_cb_rx_post()
1242 ccb = rx_ctrl->ccb; in bnad_cb_rx_post()
1246 napi_enable(&rx_ctrl->napi); in bnad_cb_rx_post()
1249 rcb = ccb->rcb[j]; in bnad_cb_rx_post()
1254 set_bit(BNAD_RXQ_STARTED, &rcb->flags); in bnad_cb_rx_post()
1255 set_bit(BNAD_RXQ_POST_OK, &rcb->flags); in bnad_cb_rx_post()
1266 complete(&bnad->bnad_completions.rx_comp); in bnad_cb_rx_disabled()
1272 bnad->bnad_completions.mcast_comp_status = BNA_CB_SUCCESS; in bnad_cb_rx_mcast_add()
1273 complete(&bnad->bnad_completions.mcast_comp); in bnad_cb_rx_mcast_add()
1283 if (!netif_running(bnad->netdev) || in bnad_cb_stats_get()
1284 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) in bnad_cb_stats_get()
1287 mod_timer(&bnad->stats_timer, in bnad_cb_stats_get()
1294 bnad->bnad_completions.mtu_comp_status = BNA_CB_SUCCESS; in bnad_cb_enet_mtu_set()
1295 complete(&bnad->bnad_completions.mtu_comp); in bnad_cb_enet_mtu_set()
1304 iocmd_comp->comp_status = (u32) status; in bnad_cb_completion()
1305 complete(&iocmd_comp->comp); in bnad_cb_completion()
1317 if (mem_info->mdl == NULL) in bnad_mem_free()
1320 for (i = 0; i < mem_info->num; i++) { in bnad_mem_free()
1321 if (mem_info->mdl[i].kva != NULL) { in bnad_mem_free()
1322 if (mem_info->mem_type == BNA_MEM_T_DMA) { in bnad_mem_free()
1323 BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma), in bnad_mem_free()
1325 dma_free_coherent(&bnad->pcidev->dev, in bnad_mem_free()
1326 mem_info->mdl[i].len, in bnad_mem_free()
1327 mem_info->mdl[i].kva, dma_pa); in bnad_mem_free()
1329 kfree(mem_info->mdl[i].kva); in bnad_mem_free()
1332 kfree(mem_info->mdl); in bnad_mem_free()
1333 mem_info->mdl = NULL; in bnad_mem_free()
1343 if ((mem_info->num == 0) || (mem_info->len == 0)) { in bnad_mem_alloc()
1344 mem_info->mdl = NULL; in bnad_mem_alloc()
1348 mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr), in bnad_mem_alloc()
1350 if (mem_info->mdl == NULL) in bnad_mem_alloc()
1351 return -ENOMEM; in bnad_mem_alloc()
1353 if (mem_info->mem_type == BNA_MEM_T_DMA) { in bnad_mem_alloc()
1354 for (i = 0; i < mem_info->num; i++) { in bnad_mem_alloc()
1355 mem_info->mdl[i].len = mem_info->len; in bnad_mem_alloc()
1356 mem_info->mdl[i].kva = in bnad_mem_alloc()
1357 dma_alloc_coherent(&bnad->pcidev->dev, in bnad_mem_alloc()
1358 mem_info->len, &dma_pa, in bnad_mem_alloc()
1360 if (mem_info->mdl[i].kva == NULL) in bnad_mem_alloc()
1364 &(mem_info->mdl[i].dma)); in bnad_mem_alloc()
1367 for (i = 0; i < mem_info->num; i++) { in bnad_mem_alloc()
1368 mem_info->mdl[i].len = mem_info->len; in bnad_mem_alloc()
1369 mem_info->mdl[i].kva = kzalloc(mem_info->len, in bnad_mem_alloc()
1371 if (mem_info->mdl[i].kva == NULL) in bnad_mem_alloc()
1380 return -ENOMEM; in bnad_mem_alloc()
1390 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_mbox_irq_free()
1392 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_mbox_irq_free()
1411 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_mbox_irq_alloc()
1412 if (bnad->cfg_flags & BNAD_CF_MSIX) { in bnad_mbox_irq_alloc()
1414 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector; in bnad_mbox_irq_alloc()
1418 irq = bnad->pcidev->irq; in bnad_mbox_irq_alloc()
1422 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_mbox_irq_alloc()
1423 sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME); in bnad_mbox_irq_alloc()
1429 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags); in bnad_mbox_irq_alloc()
1434 bnad->mbox_irq_name, bnad); in bnad_mbox_irq_alloc()
1442 kfree(intr_info->idl); in bnad_txrx_irq_free()
1443 intr_info->idl = NULL; in bnad_txrx_irq_free()
1446 /* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1455 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_txrx_irq_alloc()
1456 cfg_flags = bnad->cfg_flags; in bnad_txrx_irq_alloc()
1457 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_txrx_irq_alloc()
1460 intr_info->intr_type = BNA_INTR_T_MSIX; in bnad_txrx_irq_alloc()
1461 intr_info->idl = kcalloc(intr_info->num, in bnad_txrx_irq_alloc()
1464 if (!intr_info->idl) in bnad_txrx_irq_alloc()
1465 return -ENOMEM; in bnad_txrx_irq_alloc()
1474 (bnad->num_tx * bnad->num_txq_per_tx) + in bnad_txrx_irq_alloc()
1482 for (i = 0; i < intr_info->num; i++) in bnad_txrx_irq_alloc()
1483 intr_info->idl[i].vector = vector_start + i; in bnad_txrx_irq_alloc()
1485 intr_info->intr_type = BNA_INTR_T_INTX; in bnad_txrx_irq_alloc()
1486 intr_info->num = 1; in bnad_txrx_irq_alloc()
1487 intr_info->idl = kcalloc(intr_info->num, in bnad_txrx_irq_alloc()
1490 if (!intr_info->idl) in bnad_txrx_irq_alloc()
1491 return -ENOMEM; in bnad_txrx_irq_alloc()
1495 intr_info->idl[0].vector = BNAD_INTX_TX_IB_BITMASK; in bnad_txrx_irq_alloc()
1499 intr_info->idl[0].vector = BNAD_INTX_RX_IB_BITMASK; in bnad_txrx_irq_alloc()
1517 if (tx_info->tcb[i] == NULL) in bnad_tx_msix_unregister()
1520 vector_num = tx_info->tcb[i]->intr_vector; in bnad_tx_msix_unregister()
1521 free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]); in bnad_tx_msix_unregister()
1537 vector_num = tx_info->tcb[i]->intr_vector; in bnad_tx_msix_register()
1538 sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name, in bnad_tx_msix_register()
1539 tx_id + tx_info->tcb[i]->id); in bnad_tx_msix_register()
1540 err = request_irq(bnad->msix_table[vector_num].vector, in bnad_tx_msix_register()
1542 tx_info->tcb[i]->name, in bnad_tx_msix_register()
1543 tx_info->tcb[i]); in bnad_tx_msix_register()
1552 bnad_tx_msix_unregister(bnad, tx_info, (i - 1)); in bnad_tx_msix_register()
1553 return -1; in bnad_tx_msix_register()
1567 if (rx_info->rx_ctrl[i].ccb == NULL) in bnad_rx_msix_unregister()
1570 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector; in bnad_rx_msix_unregister()
1571 free_irq(bnad->msix_table[vector_num].vector, in bnad_rx_msix_unregister()
1572 rx_info->rx_ctrl[i].ccb); in bnad_rx_msix_unregister()
1588 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector; in bnad_rx_msix_register()
1589 sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d", in bnad_rx_msix_register()
1590 bnad->netdev->name, in bnad_rx_msix_register()
1591 rx_id + rx_info->rx_ctrl[i].ccb->id); in bnad_rx_msix_register()
1592 err = request_irq(bnad->msix_table[vector_num].vector, in bnad_rx_msix_register()
1594 rx_info->rx_ctrl[i].ccb->name, in bnad_rx_msix_register()
1595 rx_info->rx_ctrl[i].ccb); in bnad_rx_msix_register()
1604 bnad_rx_msix_unregister(bnad, rx_info, (i - 1)); in bnad_rx_msix_register()
1605 return -1; in bnad_rx_msix_register()
1685 /* Timer callbacks */
1686 /* a) IOC timer */
1693 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_ioc_timeout()
1694 bfa_nw_ioc_timeout(&bnad->bna.ioceth.ioc); in bnad_ioc_timeout()
1695 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_ioc_timeout()
1704 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_ioc_hb_check()
1705 bfa_nw_ioc_hb_check(&bnad->bna.ioceth.ioc); in bnad_ioc_hb_check()
1706 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_ioc_hb_check()
1715 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_iocpf_timeout()
1716 bfa_nw_iocpf_timeout(&bnad->bna.ioceth.ioc); in bnad_iocpf_timeout()
1717 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_iocpf_timeout()
1726 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_iocpf_sem_timeout()
1727 bfa_nw_iocpf_sem_timeout(&bnad->bna.ioceth.ioc); in bnad_iocpf_sem_timeout()
1728 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_iocpf_sem_timeout()
1732 * All timer routines use bnad->bna_lock to protect against
1741 /* b) Dynamic Interrupt Moderation Timer */
1751 if (!netif_carrier_ok(bnad->netdev)) in bnad_dim_timeout()
1754 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_dim_timeout()
1755 for (i = 0; i < bnad->num_rx; i++) { in bnad_dim_timeout()
1756 rx_info = &bnad->rx_info[i]; in bnad_dim_timeout()
1757 if (!rx_info->rx) in bnad_dim_timeout()
1759 for (j = 0; j < bnad->num_rxp_per_rx; j++) { in bnad_dim_timeout()
1760 rx_ctrl = &rx_info->rx_ctrl[j]; in bnad_dim_timeout()
1761 if (!rx_ctrl->ccb) in bnad_dim_timeout()
1763 bna_rx_dim_update(rx_ctrl->ccb); in bnad_dim_timeout()
1768 if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) in bnad_dim_timeout()
1769 mod_timer(&bnad->dim_timer, in bnad_dim_timeout()
1771 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_dim_timeout()
1774 /* c) Statistics Timer */
1781 if (!netif_running(bnad->netdev) || in bnad_stats_timeout()
1782 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) in bnad_stats_timeout()
1785 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_stats_timeout()
1786 bna_hw_stats_get(&bnad->bna); in bnad_stats_timeout()
1787 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_stats_timeout()
1791 * Set up timer for DIM
1792 * Called with bnad->bna_lock held
1797 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED && in bnad_dim_timer_start()
1798 !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) { in bnad_dim_timer_start()
1799 timer_setup(&bnad->dim_timer, bnad_dim_timeout, 0); in bnad_dim_timer_start()
1800 set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags); in bnad_dim_timer_start()
1801 mod_timer(&bnad->dim_timer, in bnad_dim_timer_start()
1807 * Set up timer for statistics
1808 * Called with mutex_lock(&bnad->conf_mutex) held
1815 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_stats_timer_start()
1816 if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) { in bnad_stats_timer_start()
1817 timer_setup(&bnad->stats_timer, bnad_stats_timeout, 0); in bnad_stats_timer_start()
1818 mod_timer(&bnad->stats_timer, in bnad_stats_timer_start()
1821 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_stats_timer_start()
1825 * Stops the stats timer
1826 * Called with mutex_lock(&bnad->conf_mutex) held
1834 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_stats_timer_stop()
1835 if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) in bnad_stats_timer_stop()
1837 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_stats_timer_stop()
1839 del_timer_sync(&bnad->stats_timer); in bnad_stats_timer_stop()
1851 ether_addr_copy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0]); in bnad_netdev_mc_list_get()
1861 struct bnad *bnad = rx_ctrl->bnad; in bnad_napi_poll_rx()
1864 rx_ctrl->rx_poll_ctr++; in bnad_napi_poll_rx()
1866 if (!netif_carrier_ok(bnad->netdev)) in bnad_napi_poll_rx()
1869 rcvd = bnad_cq_process(bnad, rx_ctrl->ccb, budget); in bnad_napi_poll_rx()
1876 rx_ctrl->rx_complete++; in bnad_napi_poll_rx()
1878 if (rx_ctrl->ccb) in bnad_napi_poll_rx()
1879 bnad_enable_rx_irq_unsafe(rx_ctrl->ccb); in bnad_napi_poll_rx()
1892 for (i = 0; i < bnad->num_rxp_per_rx; i++) { in bnad_napi_add()
1893 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i]; in bnad_napi_add()
1894 netif_napi_add(bnad->netdev, &rx_ctrl->napi, in bnad_napi_add()
1905 for (i = 0; i < bnad->num_rxp_per_rx; i++) in bnad_napi_delete()
1906 netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi); in bnad_napi_delete()
1913 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id]; in bnad_destroy_tx()
1914 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0]; in bnad_destroy_tx()
1917 if (!tx_info->tx) in bnad_destroy_tx()
1920 init_completion(&bnad->bnad_completions.tx_comp); in bnad_destroy_tx()
1921 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_destroy_tx()
1922 bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled); in bnad_destroy_tx()
1923 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_destroy_tx()
1924 wait_for_completion(&bnad->bnad_completions.tx_comp); in bnad_destroy_tx()
1926 if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX) in bnad_destroy_tx()
1928 bnad->num_txq_per_tx); in bnad_destroy_tx()
1930 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_destroy_tx()
1931 bna_tx_destroy(tx_info->tx); in bnad_destroy_tx()
1932 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_destroy_tx()
1934 tx_info->tx = NULL; in bnad_destroy_tx()
1935 tx_info->tx_id = 0; in bnad_destroy_tx()
1945 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id]; in bnad_setup_tx()
1946 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0]; in bnad_setup_tx()
1949 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id]; in bnad_setup_tx()
1961 tx_info->tx_id = tx_id; in bnad_setup_tx()
1964 tx_config->num_txq = bnad->num_txq_per_tx; in bnad_setup_tx()
1965 tx_config->txq_depth = bnad->txq_depth; in bnad_setup_tx()
1966 tx_config->tx_type = BNA_TX_T_REGULAR; in bnad_setup_tx()
1967 tx_config->coalescing_timeo = bnad->tx_coalescing_timeo; in bnad_setup_tx()
1970 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_setup_tx()
1971 bna_tx_res_req(bnad->num_txq_per_tx, in bnad_setup_tx()
1972 bnad->txq_depth, res_info); in bnad_setup_tx()
1973 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_setup_tx()
1977 bnad->num_txq_per_tx, (sizeof(struct bnad_tx_unmap) * in bnad_setup_tx()
1978 bnad->txq_depth)); in bnad_setup_tx()
1986 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_setup_tx()
1987 tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info, in bnad_setup_tx()
1989 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_setup_tx()
1991 err = -ENOMEM; in bnad_setup_tx()
1994 tx_info->tx = tx; in bnad_setup_tx()
1996 INIT_DELAYED_WORK(&tx_info->tx_cleanup_work, in bnad_setup_tx()
2000 if (intr_info->intr_type == BNA_INTR_T_MSIX) { in bnad_setup_tx()
2002 tx_id, bnad->num_txq_per_tx); in bnad_setup_tx()
2007 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_setup_tx()
2009 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_setup_tx()
2014 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_setup_tx()
2015 bna_tx_destroy(tx_info->tx); in bnad_setup_tx()
2016 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_setup_tx()
2017 tx_info->tx = NULL; in bnad_setup_tx()
2018 tx_info->tx_id = 0; in bnad_setup_tx()
2030 rx_config->rx_type = BNA_RX_T_REGULAR; in bnad_init_rx_config()
2031 rx_config->num_paths = bnad->num_rxp_per_rx; in bnad_init_rx_config()
2032 rx_config->coalescing_timeo = bnad->rx_coalescing_timeo; in bnad_init_rx_config()
2034 if (bnad->num_rxp_per_rx > 1) { in bnad_init_rx_config()
2035 rx_config->rss_status = BNA_STATUS_T_ENABLED; in bnad_init_rx_config()
2036 rx_config->rss_config.hash_type = in bnad_init_rx_config()
2041 rx_config->rss_config.hash_mask = in bnad_init_rx_config()
2042 bnad->num_rxp_per_rx - 1; in bnad_init_rx_config()
2043 netdev_rss_key_fill(rx_config->rss_config.toeplitz_hash_key, in bnad_init_rx_config()
2044 sizeof(rx_config->rss_config.toeplitz_hash_key)); in bnad_init_rx_config()
2046 rx_config->rss_status = BNA_STATUS_T_DISABLED; in bnad_init_rx_config()
2047 memset(&rx_config->rss_config, 0, in bnad_init_rx_config()
2048 sizeof(rx_config->rss_config)); in bnad_init_rx_config()
2051 rx_config->frame_size = BNAD_FRAME_SIZE(bnad->netdev->mtu); in bnad_init_rx_config()
2052 rx_config->q0_multi_buf = BNA_STATUS_T_DISABLED; in bnad_init_rx_config()
2054 /* BNA_RXP_SINGLE - one data-buffer queue in bnad_init_rx_config()
2055 * BNA_RXP_SLR - one small-buffer and one large-buffer queues in bnad_init_rx_config()
2056 * BNA_RXP_HDS - one header-buffer and one data-buffer queues in bnad_init_rx_config()
2059 rx_config->rxp_type = BNA_RXP_SLR; in bnad_init_rx_config()
2062 rx_config->frame_size > 4096) { in bnad_init_rx_config()
2067 rx_config->q0_buf_size = 2048; in bnad_init_rx_config()
2069 rx_config->q0_num_vecs = 4; in bnad_init_rx_config()
2070 rx_config->q0_depth = bnad->rxq_depth * rx_config->q0_num_vecs; in bnad_init_rx_config()
2071 rx_config->q0_multi_buf = BNA_STATUS_T_ENABLED; in bnad_init_rx_config()
2073 rx_config->q0_buf_size = rx_config->frame_size; in bnad_init_rx_config()
2074 rx_config->q0_num_vecs = 1; in bnad_init_rx_config()
2075 rx_config->q0_depth = bnad->rxq_depth; in bnad_init_rx_config()
2079 if (rx_config->rxp_type == BNA_RXP_SLR) { in bnad_init_rx_config()
2080 rx_config->q1_depth = bnad->rxq_depth; in bnad_init_rx_config()
2081 rx_config->q1_buf_size = BFI_SMALL_RXBUF_SIZE; in bnad_init_rx_config()
2084 rx_config->vlan_strip_status = in bnad_init_rx_config()
2085 (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) ? in bnad_init_rx_config()
2092 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id]; in bnad_rx_ctrl_init()
2095 for (i = 0; i < bnad->num_rxp_per_rx; i++) in bnad_rx_ctrl_init()
2096 rx_info->rx_ctrl[i].bnad = bnad; in bnad_rx_ctrl_init()
2099 /* Called with mutex_lock(&bnad->conf_mutex) held */
2103 struct net_device *netdev = bnad->netdev; in bnad_reinit_rx()
2109 for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) { in bnad_reinit_rx()
2110 if (!bnad->rx_info[rx_id].rx) in bnad_reinit_rx()
2115 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_reinit_rx()
2116 bna_enet_mtu_set(&bnad->bna.enet, in bnad_reinit_rx()
2117 BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL); in bnad_reinit_rx()
2118 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_reinit_rx()
2120 for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) { in bnad_reinit_rx()
2130 if (bnad->rx_info[0].rx && !err) { in bnad_reinit_rx()
2133 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_reinit_rx()
2134 bnad_mac_addr_set_locked(bnad, netdev->dev_addr); in bnad_reinit_rx()
2135 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_reinit_rx()
2146 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id]; in bnad_destroy_rx()
2147 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id]; in bnad_destroy_rx()
2148 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0]; in bnad_destroy_rx()
2152 if (!rx_info->rx) in bnad_destroy_rx()
2156 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_destroy_rx()
2157 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED && in bnad_destroy_rx()
2158 test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) { in bnad_destroy_rx()
2159 clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags); in bnad_destroy_rx()
2162 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_destroy_rx()
2164 del_timer_sync(&bnad->dim_timer); in bnad_destroy_rx()
2167 init_completion(&bnad->bnad_completions.rx_comp); in bnad_destroy_rx()
2168 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_destroy_rx()
2169 bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled); in bnad_destroy_rx()
2170 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_destroy_rx()
2171 wait_for_completion(&bnad->bnad_completions.rx_comp); in bnad_destroy_rx()
2173 if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX) in bnad_destroy_rx()
2174 bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths); in bnad_destroy_rx()
2178 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_destroy_rx()
2179 bna_rx_destroy(rx_info->rx); in bnad_destroy_rx()
2181 rx_info->rx = NULL; in bnad_destroy_rx()
2182 rx_info->rx_id = 0; in bnad_destroy_rx()
2183 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_destroy_rx()
2188 /* Called with mutex_lock(&bnad->conf_mutex) held */
2193 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id]; in bnad_setup_rx()
2194 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0]; in bnad_setup_rx()
2197 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id]; in bnad_setup_rx()
2210 rx_info->rx_id = rx_id; in bnad_setup_rx()
2216 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_setup_rx()
2218 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_setup_rx()
2222 rx_config->num_paths, in bnad_setup_rx()
2223 (rx_config->q0_depth * in bnad_setup_rx()
2227 if (rx_config->rxp_type != BNA_RXP_SINGLE) { in bnad_setup_rx()
2229 rx_config->num_paths, in bnad_setup_rx()
2230 (rx_config->q1_depth * in bnad_setup_rx()
2242 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_setup_rx()
2243 rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info, in bnad_setup_rx()
2246 err = -ENOMEM; in bnad_setup_rx()
2247 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_setup_rx()
2250 rx_info->rx = rx; in bnad_setup_rx()
2251 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_setup_rx()
2253 INIT_WORK(&rx_info->rx_cleanup_work, in bnad_setup_rx()
2263 if (intr_info->intr_type == BNA_INTR_T_MSIX) { in bnad_setup_rx()
2265 rx_config->num_paths); in bnad_setup_rx()
2270 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_setup_rx()
2273 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED) in bnad_setup_rx()
2274 bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector); in bnad_setup_rx()
2279 /* Start the DIM timer */ in bnad_setup_rx()
2284 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_setup_rx()
2293 /* Called with conf_lock & bnad->bna_lock held */
2299 tx_info = &bnad->tx_info[0]; in bnad_tx_coalescing_timeo_set()
2300 if (!tx_info->tx) in bnad_tx_coalescing_timeo_set()
2303 bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo); in bnad_tx_coalescing_timeo_set()
2306 /* Called with conf_lock & bnad->bna_lock held */
2313 for (i = 0; i < bnad->num_rx; i++) { in bnad_rx_coalescing_timeo_set()
2314 rx_info = &bnad->rx_info[i]; in bnad_rx_coalescing_timeo_set()
2315 if (!rx_info->rx) in bnad_rx_coalescing_timeo_set()
2317 bna_rx_coalescing_timeo_set(rx_info->rx, in bnad_rx_coalescing_timeo_set()
2318 bnad->rx_coalescing_timeo); in bnad_rx_coalescing_timeo_set()
2323 * Called with bnad->bna_lock held
2331 return -EADDRNOTAVAIL; in bnad_mac_addr_set_locked()
2334 if (!bnad->rx_info[0].rx) in bnad_mac_addr_set_locked()
2337 ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr); in bnad_mac_addr_set_locked()
2339 return -EADDRNOTAVAIL; in bnad_mac_addr_set_locked()
2348 struct bnad_rx_info *rx_info = &bnad->rx_info[0]; in bnad_enable_default_bcast()
2352 init_completion(&bnad->bnad_completions.mcast_comp); in bnad_enable_default_bcast()
2354 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_enable_default_bcast()
2355 ret = bna_rx_mcast_add(rx_info->rx, bnad_bcast_addr, in bnad_enable_default_bcast()
2357 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_enable_default_bcast()
2360 wait_for_completion(&bnad->bnad_completions.mcast_comp); in bnad_enable_default_bcast()
2362 return -ENODEV; in bnad_enable_default_bcast()
2364 if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS) in bnad_enable_default_bcast()
2365 return -ENODEV; in bnad_enable_default_bcast()
2370 /* Called with mutex_lock(&bnad->conf_mutex) held */
2377 for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) { in bnad_restore_vlans()
2378 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_restore_vlans()
2379 bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vid); in bnad_restore_vlans()
2380 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_restore_vlans()
2390 for (i = 0; i < bnad->num_rx; i++) { in bnad_netdev_qstats_fill()
2391 for (j = 0; j < bnad->num_rxp_per_rx; j++) { in bnad_netdev_qstats_fill()
2392 if (bnad->rx_info[i].rx_ctrl[j].ccb) { in bnad_netdev_qstats_fill()
2393 stats->rx_packets += bnad->rx_info[i]. in bnad_netdev_qstats_fill()
2394 rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets; in bnad_netdev_qstats_fill()
2395 stats->rx_bytes += bnad->rx_info[i]. in bnad_netdev_qstats_fill()
2396 rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes; in bnad_netdev_qstats_fill()
2397 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] && in bnad_netdev_qstats_fill()
2398 bnad->rx_info[i].rx_ctrl[j].ccb-> in bnad_netdev_qstats_fill()
2399 rcb[1]->rxq) { in bnad_netdev_qstats_fill()
2400 stats->rx_packets += in bnad_netdev_qstats_fill()
2401 bnad->rx_info[i].rx_ctrl[j]. in bnad_netdev_qstats_fill()
2402 ccb->rcb[1]->rxq->rx_packets; in bnad_netdev_qstats_fill()
2403 stats->rx_bytes += in bnad_netdev_qstats_fill()
2404 bnad->rx_info[i].rx_ctrl[j]. in bnad_netdev_qstats_fill()
2405 ccb->rcb[1]->rxq->rx_bytes; in bnad_netdev_qstats_fill()
2410 for (i = 0; i < bnad->num_tx; i++) { in bnad_netdev_qstats_fill()
2411 for (j = 0; j < bnad->num_txq_per_tx; j++) { in bnad_netdev_qstats_fill()
2412 if (bnad->tx_info[i].tcb[j]) { in bnad_netdev_qstats_fill()
2413 stats->tx_packets += in bnad_netdev_qstats_fill()
2414 bnad->tx_info[i].tcb[j]->txq->tx_packets; in bnad_netdev_qstats_fill()
2415 stats->tx_bytes += in bnad_netdev_qstats_fill()
2416 bnad->tx_info[i].tcb[j]->txq->tx_bytes; in bnad_netdev_qstats_fill()
2432 mac_stats = &bnad->stats.bna_stats->hw_stats.mac_stats; in bnad_netdev_hwstats_fill()
2433 stats->rx_errors = in bnad_netdev_hwstats_fill()
2434 mac_stats->rx_fcs_error + mac_stats->rx_alignment_error + in bnad_netdev_hwstats_fill()
2435 mac_stats->rx_frame_length_error + mac_stats->rx_code_error + in bnad_netdev_hwstats_fill()
2436 mac_stats->rx_undersize; in bnad_netdev_hwstats_fill()
2437 stats->tx_errors = mac_stats->tx_fcs_error + in bnad_netdev_hwstats_fill()
2438 mac_stats->tx_undersize; in bnad_netdev_hwstats_fill()
2439 stats->rx_dropped = mac_stats->rx_drop; in bnad_netdev_hwstats_fill()
2440 stats->tx_dropped = mac_stats->tx_drop; in bnad_netdev_hwstats_fill()
2441 stats->multicast = mac_stats->rx_multicast; in bnad_netdev_hwstats_fill()
2442 stats->collisions = mac_stats->tx_total_collision; in bnad_netdev_hwstats_fill()
2444 stats->rx_length_errors = mac_stats->rx_frame_length_error; in bnad_netdev_hwstats_fill()
2448 stats->rx_crc_errors = mac_stats->rx_fcs_error; in bnad_netdev_hwstats_fill()
2449 stats->rx_frame_errors = mac_stats->rx_alignment_error; in bnad_netdev_hwstats_fill()
2451 bmap = bna_rx_rid_mask(&bnad->bna); in bnad_netdev_hwstats_fill()
2454 stats->rx_fifo_errors += in bnad_netdev_hwstats_fill()
2455 bnad->stats.bna_stats-> in bnad_netdev_hwstats_fill()
2469 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_mbox_irq_sync()
2470 if (bnad->cfg_flags & BNAD_CF_MSIX) in bnad_mbox_irq_sync()
2471 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector; in bnad_mbox_irq_sync()
2473 irq = bnad->pcidev->irq; in bnad_mbox_irq_sync()
2474 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_mbox_irq_sync()
2492 * For TSO, the TCP checksum field is seeded with pseudo-header sum in bnad_tso_prepare()
2499 iph->tot_len = 0; in bnad_tso_prepare()
2500 iph->check = 0; in bnad_tso_prepare()
2502 tcp_hdr(skb)->check = in bnad_tso_prepare()
2503 ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0, in bnad_tso_prepare()
2516 * Called with bnad->bna_lock held, because of cfg_flags
2527 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) in bnad_q_num_init()
2530 bnad->num_rx = 1; in bnad_q_num_init()
2531 bnad->num_tx = 1; in bnad_q_num_init()
2532 bnad->num_rxp_per_rx = rxps; in bnad_q_num_init()
2533 bnad->num_txq_per_tx = BNAD_TXQ_NUM; in bnad_q_num_init()
2540 * Called with bnad->bna_lock held b'cos of cfg_flags access
2545 bnad->num_txq_per_tx = 1; in bnad_q_num_adjust()
2546 if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx) + in bnad_q_num_adjust()
2548 (bnad->cfg_flags & BNAD_CF_MSIX)) { in bnad_q_num_adjust()
2549 bnad->num_rxp_per_rx = msix_vectors - in bnad_q_num_adjust()
2550 (bnad->num_tx * bnad->num_txq_per_tx) - in bnad_q_num_adjust()
2553 bnad->num_rxp_per_rx = 1; in bnad_q_num_adjust()
2563 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_ioceth_disable()
2564 init_completion(&bnad->bnad_completions.ioc_comp); in bnad_ioceth_disable()
2565 bna_ioceth_disable(&bnad->bna.ioceth, BNA_HARD_CLEANUP); in bnad_ioceth_disable()
2566 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_ioceth_disable()
2568 wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp, in bnad_ioceth_disable()
2571 err = bnad->bnad_completions.ioc_comp_status; in bnad_ioceth_disable()
2581 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_ioceth_enable()
2582 init_completion(&bnad->bnad_completions.ioc_comp); in bnad_ioceth_enable()
2583 bnad->bnad_completions.ioc_comp_status = BNA_CB_WAITING; in bnad_ioceth_enable()
2584 bna_ioceth_enable(&bnad->bna.ioceth); in bnad_ioceth_enable()
2585 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_ioceth_enable()
2587 wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp, in bnad_ioceth_enable()
2590 err = bnad->bnad_completions.ioc_comp_status; in bnad_ioceth_enable()
2632 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_enable_msix()
2633 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) { in bnad_enable_msix()
2634 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_enable_msix()
2637 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_enable_msix()
2639 if (bnad->msix_table) in bnad_enable_msix()
2642 bnad->msix_table = in bnad_enable_msix()
2643 kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL); in bnad_enable_msix()
2645 if (!bnad->msix_table) in bnad_enable_msix()
2648 for (i = 0; i < bnad->msix_num; i++) in bnad_enable_msix()
2649 bnad->msix_table[i].entry = i; in bnad_enable_msix()
2651 ret = pci_enable_msix_range(bnad->pcidev, bnad->msix_table, in bnad_enable_msix()
2652 1, bnad->msix_num); in bnad_enable_msix()
2655 } else if (ret < bnad->msix_num) { in bnad_enable_msix()
2656 dev_warn(&bnad->pcidev->dev, in bnad_enable_msix()
2657 "%d MSI-X vectors allocated < %d requested\n", in bnad_enable_msix()
2658 ret, bnad->msix_num); in bnad_enable_msix()
2660 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_enable_msix()
2662 bnad_q_num_adjust(bnad, (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2, in bnad_enable_msix()
2663 (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2); in bnad_enable_msix()
2664 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_enable_msix()
2666 bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP + in bnad_enable_msix()
2669 if (bnad->msix_num > ret) { in bnad_enable_msix()
2670 pci_disable_msix(bnad->pcidev); in bnad_enable_msix()
2675 pci_intx(bnad->pcidev, 0); in bnad_enable_msix()
2680 dev_warn(&bnad->pcidev->dev, in bnad_enable_msix()
2681 "MSI-X enable failed - operating in INTx mode\n"); in bnad_enable_msix()
2683 kfree(bnad->msix_table); in bnad_enable_msix()
2684 bnad->msix_table = NULL; in bnad_enable_msix()
2685 bnad->msix_num = 0; in bnad_enable_msix()
2686 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_enable_msix()
2687 bnad->cfg_flags &= ~BNAD_CF_MSIX; in bnad_enable_msix()
2689 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_enable_msix()
2698 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_disable_msix()
2699 cfg_flags = bnad->cfg_flags; in bnad_disable_msix()
2700 if (bnad->cfg_flags & BNAD_CF_MSIX) in bnad_disable_msix()
2701 bnad->cfg_flags &= ~BNAD_CF_MSIX; in bnad_disable_msix()
2702 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_disable_msix()
2705 pci_disable_msix(bnad->pcidev); in bnad_disable_msix()
2706 kfree(bnad->msix_table); in bnad_disable_msix()
2707 bnad->msix_table = NULL; in bnad_disable_msix()
2720 mutex_lock(&bnad->conf_mutex); in bnad_open()
2736 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_open()
2737 bna_enet_mtu_set(&bnad->bna.enet, in bnad_open()
2738 BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL); in bnad_open()
2739 bna_enet_pause_config(&bnad->bna.enet, &pause_config); in bnad_open()
2740 bna_enet_enable(&bnad->bna.enet); in bnad_open()
2741 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_open()
2750 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_open()
2751 bnad_mac_addr_set_locked(bnad, netdev->dev_addr); in bnad_open()
2752 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_open()
2754 /* Start the stats timer */ in bnad_open()
2757 mutex_unlock(&bnad->conf_mutex); in bnad_open()
2765 mutex_unlock(&bnad->conf_mutex); in bnad_open()
2775 mutex_lock(&bnad->conf_mutex); in bnad_stop()
2777 /* Stop the stats timer */ in bnad_stop()
2780 init_completion(&bnad->bnad_completions.enet_comp); in bnad_stop()
2782 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_stop()
2783 bna_enet_disable(&bnad->bna.enet, BNA_HARD_CLEANUP, in bnad_stop()
2785 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_stop()
2787 wait_for_completion(&bnad->bnad_completions.enet_comp); in bnad_stop()
2795 mutex_unlock(&bnad->conf_mutex); in bnad_stop()
2803 bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb, in bnad_txq_wi_prepare() argument
2814 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) { in bnad_txq_wi_prepare()
2815 vlan_tag = ((tcb->priority & 0x7) << VLAN_PRIO_SHIFT) in bnad_txq_wi_prepare()
2819 txqent->hdr.wi.vlan_tag = htons(vlan_tag); in bnad_txq_wi_prepare()
2822 gso_size = skb_shinfo(skb)->gso_size; in bnad_txq_wi_prepare()
2823 if (unlikely(gso_size > bnad->netdev->mtu)) { in bnad_txq_wi_prepare()
2825 return -EINVAL; in bnad_txq_wi_prepare()
2828 tcp_hdrlen(skb)) >= skb->len)) { in bnad_txq_wi_prepare()
2829 txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND); in bnad_txq_wi_prepare()
2830 txqent->hdr.wi.lso_mss = 0; in bnad_txq_wi_prepare()
2833 txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND_LSO); in bnad_txq_wi_prepare()
2834 txqent->hdr.wi.lso_mss = htons(gso_size); in bnad_txq_wi_prepare()
2839 return -EINVAL; in bnad_txq_wi_prepare()
2843 txqent->hdr.wi.l4_hdr_size_n_offset = in bnad_txq_wi_prepare()
2847 txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND); in bnad_txq_wi_prepare()
2848 txqent->hdr.wi.lso_mss = 0; in bnad_txq_wi_prepare()
2850 if (unlikely(skb->len > (bnad->netdev->mtu + VLAN_ETH_HLEN))) { in bnad_txq_wi_prepare()
2852 return -EINVAL; in bnad_txq_wi_prepare()
2855 if (skb->ip_summed == CHECKSUM_PARTIAL) { in bnad_txq_wi_prepare()
2860 proto = ip_hdr(skb)->protocol; in bnad_txq_wi_prepare()
2864 proto = ipv6_hdr(skb)->nexthdr; in bnad_txq_wi_prepare()
2869 txqent->hdr.wi.l4_hdr_size_n_offset = in bnad_txq_wi_prepare()
2879 return -EINVAL; in bnad_txq_wi_prepare()
2883 txqent->hdr.wi.l4_hdr_size_n_offset = in bnad_txq_wi_prepare()
2892 return -EINVAL; in bnad_txq_wi_prepare()
2897 return -EINVAL; in bnad_txq_wi_prepare()
2900 txqent->hdr.wi.l4_hdr_size_n_offset = 0; in bnad_txq_wi_prepare()
2903 txqent->hdr.wi.flags = htons(flags); in bnad_txq_wi_prepare()
2904 txqent->hdr.wi.frame_length = htonl(skb->len); in bnad_txq_wi_prepare()
2918 struct bna_tcb *tcb = NULL; in bnad_start_xmit() local
2930 if (unlikely(skb->len <= ETH_HLEN)) { in bnad_start_xmit()
2946 tcb = bnad->tx_info[0].tcb[txq_id]; in bnad_start_xmit()
2952 if (unlikely(!tcb || !test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) { in bnad_start_xmit()
2958 q_depth = tcb->q_depth; in bnad_start_xmit()
2959 prod = tcb->producer_index; in bnad_start_xmit()
2960 unmap_q = tcb->unmap_q; in bnad_start_xmit()
2962 vectors = 1 + skb_shinfo(skb)->nr_frags; in bnad_start_xmit()
2972 if (unlikely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) { in bnad_start_xmit()
2973 if ((*tcb->hw_consumer_index != tcb->consumer_index) && in bnad_start_xmit()
2974 !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) { in bnad_start_xmit()
2976 sent = bnad_txcmpl_process(bnad, tcb); in bnad_start_xmit()
2977 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) in bnad_start_xmit()
2978 bna_ib_ack(tcb->i_dbell, sent); in bnad_start_xmit()
2980 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); in bnad_start_xmit()
2992 if (likely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) { in bnad_start_xmit()
3001 txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod]; in bnad_start_xmit()
3005 if (bnad_txq_wi_prepare(bnad, tcb, skb, txqent)) { in bnad_start_xmit()
3009 txqent->hdr.wi.reserved = 0; in bnad_start_xmit()
3010 txqent->hdr.wi.num_vectors = vectors; in bnad_start_xmit()
3012 head_unmap->skb = skb; in bnad_start_xmit()
3013 head_unmap->nvecs = 0; in bnad_start_xmit()
3017 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data, in bnad_start_xmit()
3019 if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) { in bnad_start_xmit()
3024 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr); in bnad_start_xmit()
3025 txqent->vector[0].length = htons(len); in bnad_start_xmit()
3026 dma_unmap_addr_set(&unmap->vectors[0], dma_addr, dma_addr); in bnad_start_xmit()
3027 head_unmap->nvecs++; in bnad_start_xmit()
3029 for (i = 0, vect_id = 0; i < vectors - 1; i++) { in bnad_start_xmit()
3030 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in bnad_start_xmit()
3034 /* Undo the changes starting at tcb->producer_index */ in bnad_start_xmit()
3036 tcb->producer_index); in bnad_start_xmit()
3048 txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod]; in bnad_start_xmit()
3049 txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION); in bnad_start_xmit()
3053 dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag, in bnad_start_xmit()
3055 if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) { in bnad_start_xmit()
3056 /* Undo the changes starting at tcb->producer_index */ in bnad_start_xmit()
3058 tcb->producer_index); in bnad_start_xmit()
3064 dma_unmap_len_set(&unmap->vectors[vect_id], dma_len, size); in bnad_start_xmit()
3065 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr); in bnad_start_xmit()
3066 txqent->vector[vect_id].length = htons(size); in bnad_start_xmit()
3067 dma_unmap_addr_set(&unmap->vectors[vect_id], dma_addr, in bnad_start_xmit()
3069 head_unmap->nvecs++; in bnad_start_xmit()
3072 if (unlikely(len != skb->len)) { in bnad_start_xmit()
3073 /* Undo the changes starting at tcb->producer_index */ in bnad_start_xmit()
3074 bnad_tx_buff_unmap(bnad, unmap_q, q_depth, tcb->producer_index); in bnad_start_xmit()
3081 tcb->producer_index = prod; in bnad_start_xmit()
3085 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) in bnad_start_xmit()
3090 bna_txq_prod_indx_doorbell(tcb); in bnad_start_xmit()
3105 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_get_stats64()
3110 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_get_stats64()
3116 struct net_device *netdev = bnad->netdev; in bnad_set_rx_ucast_fltr()
3123 if (netdev_uc_empty(bnad->netdev)) { in bnad_set_rx_ucast_fltr()
3124 bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL); in bnad_set_rx_ucast_fltr()
3128 if (uc_count > bna_attr(&bnad->bna)->num_ucmac) in bnad_set_rx_ucast_fltr()
3137 ether_addr_copy(&mac_list[entry * ETH_ALEN], &ha->addr[0]); in bnad_set_rx_ucast_fltr()
3141 ret = bna_rx_ucast_listset(bnad->rx_info[0].rx, entry, mac_list); in bnad_set_rx_ucast_fltr()
3151 bnad->cfg_flags |= BNAD_CF_DEFAULT; in bnad_set_rx_ucast_fltr()
3152 bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL); in bnad_set_rx_ucast_fltr()
3158 struct net_device *netdev = bnad->netdev; in bnad_set_rx_mcast_fltr()
3163 if (netdev->flags & IFF_ALLMULTI) in bnad_set_rx_mcast_fltr()
3169 if (mc_count > bna_attr(&bnad->bna)->num_mcmac) in bnad_set_rx_mcast_fltr()
3181 ret = bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1, mac_list); in bnad_set_rx_mcast_fltr()
3190 bnad->cfg_flags |= BNAD_CF_ALLMULTI; in bnad_set_rx_mcast_fltr()
3191 bna_rx_mcast_delall(bnad->rx_info[0].rx); in bnad_set_rx_mcast_fltr()
3201 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_set_rx_mode()
3203 if (bnad->rx_info[0].rx == NULL) { in bnad_set_rx_mode()
3204 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_set_rx_mode()
3209 bnad->cfg_flags &= ~(BNAD_CF_PROMISC | BNAD_CF_DEFAULT | in bnad_set_rx_mode()
3213 if (netdev->flags & IFF_PROMISC) { in bnad_set_rx_mode()
3215 bnad->cfg_flags |= BNAD_CF_PROMISC; in bnad_set_rx_mode()
3219 if (bnad->cfg_flags & BNAD_CF_ALLMULTI) in bnad_set_rx_mode()
3224 if (bnad->cfg_flags & BNAD_CF_DEFAULT) in bnad_set_rx_mode()
3230 bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask); in bnad_set_rx_mode()
3232 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_set_rx_mode()
3236 * bna_lock is used to sync writes to netdev->addr
3238 * in a non-blocking context.
3248 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_set_mac_address()
3250 err = bnad_mac_addr_set_locked(bnad, sa->sa_data); in bnad_set_mac_address()
3252 ether_addr_copy(netdev->dev_addr, sa->sa_data); in bnad_set_mac_address()
3254 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_set_mac_address()
3264 init_completion(&bnad->bnad_completions.mtu_comp); in bnad_mtu_set()
3266 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_mtu_set()
3267 bna_enet_mtu_set(&bnad->bna.enet, frame_size, bnad_cb_enet_mtu_set); in bnad_mtu_set()
3268 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_mtu_set()
3270 wait_for_completion(&bnad->bnad_completions.mtu_comp); in bnad_mtu_set()
3272 return bnad->bnad_completions.mtu_comp_status; in bnad_mtu_set()
3282 mutex_lock(&bnad->conf_mutex); in bnad_change_mtu()
3284 mtu = netdev->mtu; in bnad_change_mtu()
3285 netdev->mtu = new_mtu; in bnad_change_mtu()
3290 /* check if multi-buffer needs to be enabled */ in bnad_change_mtu()
3292 netif_running(bnad->netdev)) { in bnad_change_mtu()
3301 err = -EBUSY; in bnad_change_mtu()
3303 mutex_unlock(&bnad->conf_mutex); in bnad_change_mtu()
3313 if (!bnad->rx_info[0].rx) in bnad_vlan_rx_add_vid()
3316 mutex_lock(&bnad->conf_mutex); in bnad_vlan_rx_add_vid()
3318 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_vlan_rx_add_vid()
3319 bna_rx_vlan_add(bnad->rx_info[0].rx, vid); in bnad_vlan_rx_add_vid()
3320 set_bit(vid, bnad->active_vlans); in bnad_vlan_rx_add_vid()
3321 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_vlan_rx_add_vid()
3323 mutex_unlock(&bnad->conf_mutex); in bnad_vlan_rx_add_vid()
3334 if (!bnad->rx_info[0].rx) in bnad_vlan_rx_kill_vid()
3337 mutex_lock(&bnad->conf_mutex); in bnad_vlan_rx_kill_vid()
3339 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_vlan_rx_kill_vid()
3340 clear_bit(vid, bnad->active_vlans); in bnad_vlan_rx_kill_vid()
3341 bna_rx_vlan_del(bnad->rx_info[0].rx, vid); in bnad_vlan_rx_kill_vid()
3342 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_vlan_rx_kill_vid()
3344 mutex_unlock(&bnad->conf_mutex); in bnad_vlan_rx_kill_vid()
3352 netdev_features_t changed = features ^ dev->features; in bnad_set_features()
3357 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_set_features()
3360 bna_rx_vlan_strip_enable(bnad->rx_info[0].rx); in bnad_set_features()
3362 bna_rx_vlan_strip_disable(bnad->rx_info[0].rx); in bnad_set_features()
3364 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_set_features()
3380 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) { in bnad_netpoll()
3381 bna_intx_disable(&bnad->bna, curr_mask); in bnad_netpoll()
3382 bnad_isr(bnad->pcidev->irq, netdev); in bnad_netpoll()
3383 bna_intx_enable(&bnad->bna, curr_mask); in bnad_netpoll()
3391 for (i = 0; i < bnad->num_rx; i++) { in bnad_netpoll()
3392 rx_info = &bnad->rx_info[i]; in bnad_netpoll()
3393 if (!rx_info->rx) in bnad_netpoll()
3395 for (j = 0; j < bnad->num_rxp_per_rx; j++) { in bnad_netpoll()
3396 rx_ctrl = &rx_info->rx_ctrl[j]; in bnad_netpoll()
3397 if (rx_ctrl->ccb) in bnad_netpoll()
3399 rx_ctrl->ccb); in bnad_netpoll()
3426 struct net_device *netdev = bnad->netdev; in bnad_netdev_init()
3428 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | in bnad_netdev_init()
3433 netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA | in bnad_netdev_init()
3437 netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER; in bnad_netdev_init()
3440 netdev->features |= NETIF_F_HIGHDMA; in bnad_netdev_init()
3442 netdev->mem_start = bnad->mmio_start; in bnad_netdev_init()
3443 netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1; in bnad_netdev_init()
3445 /* MTU range: 46 - 9000 */ in bnad_netdev_init()
3446 netdev->min_mtu = ETH_ZLEN - ETH_HLEN; in bnad_netdev_init()
3447 netdev->max_mtu = BNAD_JUMBO_MTU; in bnad_netdev_init()
3449 netdev->netdev_ops = &bnad_netdev_ops; in bnad_netdev_init()
3465 SET_NETDEV_DEV(netdev, &pdev->dev); in bnad_init()
3468 bnad->netdev = netdev; in bnad_init()
3469 bnad->pcidev = pdev; in bnad_init()
3470 bnad->mmio_start = pci_resource_start(pdev, 0); in bnad_init()
3471 bnad->mmio_len = pci_resource_len(pdev, 0); in bnad_init()
3472 bnad->bar0 = ioremap(bnad->mmio_start, bnad->mmio_len); in bnad_init()
3473 if (!bnad->bar0) { in bnad_init()
3474 dev_err(&pdev->dev, "ioremap for bar0 failed\n"); in bnad_init()
3475 return -ENOMEM; in bnad_init()
3477 dev_info(&pdev->dev, "bar0 mapped to %p, len %llu\n", bnad->bar0, in bnad_init()
3478 (unsigned long long) bnad->mmio_len); in bnad_init()
3480 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_init()
3482 bnad->cfg_flags = BNAD_CF_MSIX; in bnad_init()
3484 bnad->cfg_flags |= BNAD_CF_DIM_ENABLED; in bnad_init()
3487 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_init()
3489 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) + in bnad_init()
3490 (bnad->num_rx * bnad->num_rxp_per_rx) + in bnad_init()
3493 bnad->txq_depth = BNAD_TXQ_DEPTH; in bnad_init()
3494 bnad->rxq_depth = BNAD_RXQ_DEPTH; in bnad_init()
3496 bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO; in bnad_init()
3497 bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO; in bnad_init()
3499 sprintf(bnad->wq_name, "%s_wq_%d", BNAD_NAME, bnad->id); in bnad_init()
3500 bnad->work_q = create_singlethread_workqueue(bnad->wq_name); in bnad_init()
3501 if (!bnad->work_q) { in bnad_init()
3502 iounmap(bnad->bar0); in bnad_init()
3503 return -ENOMEM; in bnad_init()
3517 if (bnad->work_q) { in bnad_uninit()
3518 flush_workqueue(bnad->work_q); in bnad_uninit()
3519 destroy_workqueue(bnad->work_q); in bnad_uninit()
3520 bnad->work_q = NULL; in bnad_uninit()
3523 if (bnad->bar0) in bnad_uninit()
3524 iounmap(bnad->bar0); in bnad_uninit()
3536 spin_lock_init(&bnad->bna_lock); in bnad_lock_init()
3537 mutex_init(&bnad->conf_mutex); in bnad_lock_init()
3543 mutex_destroy(&bnad->conf_mutex); in bnad_lock_uninit()
3559 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { in bnad_pci_init()
3562 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); in bnad_pci_init()
3600 dev_err(&pdev->dev, "failed to load firmware image!\n"); in bnad_pci_probe()
3601 return -ENODEV; in bnad_pci_probe()
3607 * bnad = netdev->priv in bnad_pci_probe()
3611 err = -ENOMEM; in bnad_pci_probe()
3616 bnad->id = atomic_inc_return(&bna_id) - 1; in bnad_pci_probe()
3618 mutex_lock(&bnad->conf_mutex); in bnad_pci_probe()
3648 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_pci_probe()
3649 bna_res_req(&bnad->res_info[0]); in bnad_pci_probe()
3650 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_pci_probe()
3653 err = bnad_res_alloc(bnad, &bnad->res_info[0], BNA_RES_T_MAX); in bnad_pci_probe()
3657 bna = &bnad->bna; in bnad_pci_probe()
3660 pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn); in bnad_pci_probe()
3661 pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn); in bnad_pci_probe()
3662 pcidev_info.device_id = bnad->pcidev->device; in bnad_pci_probe()
3663 pcidev_info.pci_bar_kva = bnad->bar0; in bnad_pci_probe()
3665 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_pci_probe()
3666 bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]); in bnad_pci_probe()
3667 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_pci_probe()
3669 bnad->stats.bna_stats = &bna->stats; in bnad_pci_probe()
3677 timer_setup(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout, 0); in bnad_pci_probe()
3678 timer_setup(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check, 0); in bnad_pci_probe()
3679 timer_setup(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout, 0); in bnad_pci_probe()
3680 timer_setup(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout, in bnad_pci_probe()
3690 dev_err(&pdev->dev, "initialization failed err=%d\n", err); in bnad_pci_probe()
3694 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_pci_probe()
3697 bnad_q_num_adjust(bnad, bna_attr(bna)->num_txq - 1, in bnad_pci_probe()
3698 bna_attr(bna)->num_rxp - 1); in bnad_pci_probe()
3701 err = -EIO; in bnad_pci_probe()
3703 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_pci_probe()
3707 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_pci_probe()
3708 bna_mod_res_req(&bnad->bna, &bnad->mod_res_info[0]); in bnad_pci_probe()
3709 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_pci_probe()
3711 err = bnad_res_alloc(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX); in bnad_pci_probe()
3713 err = -EIO; in bnad_pci_probe()
3717 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_pci_probe()
3718 bna_mod_init(&bnad->bna, &bnad->mod_res_info[0]); in bnad_pci_probe()
3719 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_pci_probe()
3721 /* Get the burnt-in mac */ in bnad_pci_probe()
3722 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_pci_probe()
3723 bna_enet_perm_mac_get(&bna->enet, bnad->perm_addr); in bnad_pci_probe()
3725 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_pci_probe()
3727 mutex_unlock(&bnad->conf_mutex); in bnad_pci_probe()
3732 dev_err(&pdev->dev, "registering net device failed\n"); in bnad_pci_probe()
3735 set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags); in bnad_pci_probe()
3740 mutex_unlock(&bnad->conf_mutex); in bnad_pci_probe()
3744 mutex_lock(&bnad->conf_mutex); in bnad_pci_probe()
3745 bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX); in bnad_pci_probe()
3748 del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer); in bnad_pci_probe()
3749 del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer); in bnad_pci_probe()
3750 del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer); in bnad_pci_probe()
3751 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_pci_probe()
3753 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_pci_probe()
3757 bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX); in bnad_pci_probe()
3760 kfree(bnad->regdata); in bnad_pci_probe()
3766 mutex_unlock(&bnad->conf_mutex); in bnad_pci_probe()
3784 bna = &bnad->bna; in bnad_pci_remove()
3786 if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags)) in bnad_pci_remove()
3789 mutex_lock(&bnad->conf_mutex); in bnad_pci_remove()
3791 del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer); in bnad_pci_remove()
3792 del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer); in bnad_pci_remove()
3793 del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer); in bnad_pci_remove()
3794 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_pci_remove()
3796 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_pci_remove()
3798 bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX); in bnad_pci_remove()
3799 bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX); in bnad_pci_remove()
3803 mutex_unlock(&bnad->conf_mutex); in bnad_pci_remove()
3806 kfree(bnad->regdata); in bnad_pci_remove()
3865 MODULE_DESCRIPTION("QLogic BR-series 10G PCIe Ethernet driver");