Lines Matching +full:tx +full:- +full:queues +full:- +full:to +full:- +full:use
2 * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
10 * Redistribution and use in source and binary forms, with or
14 * - Redistributions of source code must retain the above
18 * - Redistributions in binary form must reproduce the above
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
38 #include <linux/dma-mapping.h>
58 * Page chunk size for FL0 buffers if FL0 is to be populated with page chunks.
59 * It must be a divisor of PAGE_SIZE. If set to 0 FL0 will use sk_buffs
77 * Period of the Tx buffer reclaim timer. This timer does not need to run
78 * frequently as Tx buffers are usually reclaimed by new Tx packets.
88 * Types of Tx queues in each queue set. Order here matters, do not change.
109 struct tx_sw_desc { /* SW state per Tx descriptor */
134 * Holds unmapping information for Tx packets that need deferred unmapping.
135 * This structure lives at skb->head and must be allocated by callers.
143 * Maps a number of flits to the number of Tx descriptors that can hold them.
146 * desc = 1 + (flits - 2) / (WR_FLITS - 1).
148 * HW allows up to 4 descriptors to be combined into a WR.
183 * refill_rspq - replenish an SGE response queue
185 * @q: the response queue to replenish
186 * @credits: how many new responses to make available
189 * available to HW.
196 V_RSPQ(q->cntxt_id) | V_CREDITS(credits)); in refill_rspq()
200 * need_skb_unmap - does the platform need unmapping of sk_buffs?
215 * unmap_skb - unmap a packet main body and its page fragments
217 * @q: the Tx queue containing Tx descriptors for the packet
218 * @cidx: index of Tx descriptor
223 * to conserve space for metadata, the information necessary to unmap an
224 * sk_buff is spread across the sk_buff itself (buffer lengths), the HW Tx
228 * the buffers held in the first Tx descriptor here, and we have enough
229 * information at this point to set the state for the next Tx descriptor.
231 * Note that it is possible to clean up the first descriptor of a packet
240 struct tx_sw_desc *d = &q->sdesc[cidx]; in unmap_skb()
241 int nfrags, frag_idx, curflit, j = d->addr_idx; in unmap_skb()
243 sgp = (struct sg_ent *)&q->desc[cidx].flit[d->sflit]; in unmap_skb()
244 frag_idx = d->fragidx; in unmap_skb()
247 pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]), in unmap_skb()
252 curflit = d->sflit + 1 + j; in unmap_skb()
253 nfrags = skb_shinfo(skb)->nr_frags; in unmap_skb()
256 pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]), in unmap_skb()
257 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]), in unmap_skb()
268 if (frag_idx < nfrags) { /* SGL continues into next Tx descriptor */ in unmap_skb()
269 d = cidx + 1 == q->size ? q->sdesc : d + 1; in unmap_skb()
270 d->fragidx = frag_idx; in unmap_skb()
271 d->addr_idx = j; in unmap_skb()
272 d->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */ in unmap_skb()
277 * free_tx_desc - reclaims Tx descriptors and their buffers
279 * @q: the Tx queue to reclaim descriptors from
280 * @n: the number of descriptors to reclaim
282 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
283 * Tx buffers. Called with the Tx queue lock held.
289 struct pci_dev *pdev = adapter->pdev; in free_tx_desc()
290 unsigned int cidx = q->cidx; in free_tx_desc()
293 q->cntxt_id >= FW_TUNNEL_SGEEC_START; in free_tx_desc()
295 d = &q->sdesc[cidx]; in free_tx_desc()
296 while (n--) { in free_tx_desc()
297 if (d->skb) { /* an SGL is present */ in free_tx_desc()
299 unmap_skb(d->skb, q, cidx, pdev); in free_tx_desc()
300 if (d->eop) { in free_tx_desc()
301 dev_consume_skb_any(d->skb); in free_tx_desc()
302 d->skb = NULL; in free_tx_desc()
306 if (++cidx == q->size) { in free_tx_desc()
308 d = q->sdesc; in free_tx_desc()
311 q->cidx = cidx; in free_tx_desc()
315 * reclaim_completed_tx - reclaims completed Tx descriptors
317 * @q: the Tx queue to reclaim completed descriptors from
318 * @chunk: maximum number of descriptors to reclaim
320 * Reclaims Tx descriptors that the SGE has indicated it has processed,
321 * and frees the associated buffers if possible. Called with the Tx
328 unsigned int reclaim = q->processed - q->cleaned; in reclaim_completed_tx()
333 q->cleaned += reclaim; in reclaim_completed_tx()
334 q->in_use -= reclaim; in reclaim_completed_tx()
336 return q->processed - q->cleaned; in reclaim_completed_tx()
340 * should_restart_tx - are there enough resources to restart a Tx queue?
341 * @q: the Tx queue
343 * Checks if there are enough descriptors to restart a suspended Tx queue.
347 unsigned int r = q->processed - q->cleaned; in should_restart_tx()
349 return q->in_use - r < (q->size >> 1); in should_restart_tx()
355 if (q->use_pages && d->pg_chunk.page) { in clear_rx_desc()
356 (*d->pg_chunk.p_cnt)--; in clear_rx_desc()
357 if (!*d->pg_chunk.p_cnt) in clear_rx_desc()
359 d->pg_chunk.mapping, in clear_rx_desc()
360 q->alloc_size, PCI_DMA_FROMDEVICE); in clear_rx_desc()
362 put_page(d->pg_chunk.page); in clear_rx_desc()
363 d->pg_chunk.page = NULL; in clear_rx_desc()
366 q->buf_size, PCI_DMA_FROMDEVICE); in clear_rx_desc()
367 kfree_skb(d->skb); in clear_rx_desc()
368 d->skb = NULL; in clear_rx_desc()
373 * free_rx_bufs - free the Rx buffers on an SGE free list
375 * @q: the SGE free list to clean up
377 * Release the buffers on an SGE free-buffer Rx queue. HW fetching from
382 unsigned int cidx = q->cidx; in free_rx_bufs()
384 while (q->credits--) { in free_rx_bufs()
385 struct rx_sw_desc *d = &q->sdesc[cidx]; in free_rx_bufs()
389 if (++cidx == q->size) in free_rx_bufs()
393 if (q->pg_chunk.page) { in free_rx_bufs()
394 __free_pages(q->pg_chunk.page, q->order); in free_rx_bufs()
395 q->pg_chunk.page = NULL; in free_rx_bufs()
400 * add_one_rx_buf - add a packet buffer to a free-buffer list
403 * @d: the HW Rx descriptor to write
404 * @sd: the SW Rx descriptor to write
408 * Add a buffer of the given length to the supplied HW and SW Rx
419 return -ENOMEM; in add_one_rx_buf()
423 d->addr_lo = cpu_to_be32(mapping); in add_one_rx_buf()
424 d->addr_hi = cpu_to_be32((u64) mapping >> 32); in add_one_rx_buf()
426 d->len_gen = cpu_to_be32(V_FLD_GEN1(gen)); in add_one_rx_buf()
427 d->gen2 = cpu_to_be32(V_FLD_GEN2(gen)); in add_one_rx_buf()
434 d->addr_lo = cpu_to_be32(mapping); in add_one_rx_chunk()
435 d->addr_hi = cpu_to_be32((u64) mapping >> 32); in add_one_rx_chunk()
437 d->len_gen = cpu_to_be32(V_FLD_GEN1(gen)); in add_one_rx_chunk()
438 d->gen2 = cpu_to_be32(V_FLD_GEN2(gen)); in add_one_rx_chunk()
446 if (!q->pg_chunk.page) { in alloc_pg_chunk()
449 q->pg_chunk.page = alloc_pages(gfp, order); in alloc_pg_chunk()
450 if (unlikely(!q->pg_chunk.page)) in alloc_pg_chunk()
451 return -ENOMEM; in alloc_pg_chunk()
452 q->pg_chunk.va = page_address(q->pg_chunk.page); in alloc_pg_chunk()
453 q->pg_chunk.p_cnt = q->pg_chunk.va + (PAGE_SIZE << order) - in alloc_pg_chunk()
455 q->pg_chunk.offset = 0; in alloc_pg_chunk()
456 mapping = pci_map_page(adapter->pdev, q->pg_chunk.page, in alloc_pg_chunk()
457 0, q->alloc_size, PCI_DMA_FROMDEVICE); in alloc_pg_chunk()
458 if (unlikely(pci_dma_mapping_error(adapter->pdev, mapping))) { in alloc_pg_chunk()
459 __free_pages(q->pg_chunk.page, order); in alloc_pg_chunk()
460 q->pg_chunk.page = NULL; in alloc_pg_chunk()
461 return -EIO; in alloc_pg_chunk()
463 q->pg_chunk.mapping = mapping; in alloc_pg_chunk()
465 sd->pg_chunk = q->pg_chunk; in alloc_pg_chunk()
467 prefetch(sd->pg_chunk.p_cnt); in alloc_pg_chunk()
469 q->pg_chunk.offset += q->buf_size; in alloc_pg_chunk()
470 if (q->pg_chunk.offset == (PAGE_SIZE << order)) in alloc_pg_chunk()
471 q->pg_chunk.page = NULL; in alloc_pg_chunk()
473 q->pg_chunk.va += q->buf_size; in alloc_pg_chunk()
474 get_page(q->pg_chunk.page); in alloc_pg_chunk()
477 if (sd->pg_chunk.offset == 0) in alloc_pg_chunk()
478 *sd->pg_chunk.p_cnt = 1; in alloc_pg_chunk()
480 *sd->pg_chunk.p_cnt += 1; in alloc_pg_chunk()
487 if (q->pend_cred >= q->credits / 4) { in ring_fl_db()
488 q->pend_cred = 0; in ring_fl_db()
490 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id)); in ring_fl_db()
495 * refill_fl - refill an SGE free-buffer list
497 * @q: the free-list to refill
498 * @n: the number of new buffers to allocate
501 * (Re)populate an SGE free-buffer list with up to @n new packet buffers,
507 struct rx_sw_desc *sd = &q->sdesc[q->pidx]; in refill_fl()
508 struct rx_desc *d = &q->desc[q->pidx]; in refill_fl()
511 while (n--) { in refill_fl()
515 if (q->use_pages) { in refill_fl()
517 q->order))) { in refill_fl()
518 nomem: q->alloc_failed++; in refill_fl()
521 mapping = sd->pg_chunk.mapping + sd->pg_chunk.offset; in refill_fl()
524 add_one_rx_chunk(mapping, d, q->gen); in refill_fl()
525 pci_dma_sync_single_for_device(adap->pdev, mapping, in refill_fl()
526 q->buf_size - SGE_PG_RSVD, in refill_fl()
531 struct sk_buff *skb = alloc_skb(q->buf_size, gfp); in refill_fl()
535 sd->skb = skb; in refill_fl()
536 buf_start = skb->data; in refill_fl()
537 err = add_one_rx_buf(buf_start, q->buf_size, d, sd, in refill_fl()
538 q->gen, adap->pdev); in refill_fl()
540 clear_rx_desc(adap->pdev, q, sd); in refill_fl()
547 if (++q->pidx == q->size) { in refill_fl()
548 q->pidx = 0; in refill_fl()
549 q->gen ^= 1; in refill_fl()
550 sd = q->sdesc; in refill_fl()
551 d = q->desc; in refill_fl()
556 q->credits += count; in refill_fl()
557 q->pend_cred += count; in refill_fl()
565 refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits), in __refill_fl()
570 * recycle_rx_buf - recycle a receive buffer
573 * @idx: index of buffer to recycle
581 struct rx_desc *from = &q->desc[idx]; in recycle_rx_buf()
582 struct rx_desc *to = &q->desc[q->pidx]; in recycle_rx_buf() local
584 q->sdesc[q->pidx] = q->sdesc[idx]; in recycle_rx_buf()
585 to->addr_lo = from->addr_lo; /* already big endian */ in recycle_rx_buf()
586 to->addr_hi = from->addr_hi; /* likewise */ in recycle_rx_buf()
588 to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen)); in recycle_rx_buf()
589 to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen)); in recycle_rx_buf()
591 if (++q->pidx == q->size) { in recycle_rx_buf()
592 q->pidx = 0; in recycle_rx_buf()
593 q->gen ^= 1; in recycle_rx_buf()
596 q->credits++; in recycle_rx_buf()
597 q->pend_cred++; in recycle_rx_buf()
602 * alloc_ring - allocate resources for an SGE descriptor ring
610 * Allocates resources for an SGE descriptor ring, such as Tx queues,
611 * free buffer lists, or response queues. Each SGE ring requires
623 void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL); in alloc_ring()
631 dma_free_coherent(&pdev->dev, len, p, *phys); in alloc_ring()
640 * t3_reset_qset - reset a sge qset
649 if (q->adap && in t3_reset_qset()
650 !(q->adap->flags & NAPI_INIT)) { in t3_reset_qset()
655 q->adap = NULL; in t3_reset_qset()
656 memset(&q->rspq, 0, sizeof(q->rspq)); in t3_reset_qset()
657 memset(q->fl, 0, sizeof(struct sge_fl) * SGE_RXQ_PER_SET); in t3_reset_qset()
658 memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET); in t3_reset_qset()
659 q->txq_stopped = 0; in t3_reset_qset()
660 q->tx_reclaim_timer.function = NULL; /* for t3_stop_sge_timers() */ in t3_reset_qset()
661 q->rx_reclaim_timer.function = NULL; in t3_reset_qset()
662 q->nomem = 0; in t3_reset_qset()
663 napi_free_frags(&q->napi); in t3_reset_qset()
668 * free_qset - free the resources of an SGE queue set
673 * as HW contexts, packet buffers, and descriptor rings. Traffic to the
674 * queue set must be quiesced prior to calling this.
679 struct pci_dev *pdev = adapter->pdev; in t3_free_qset()
682 if (q->fl[i].desc) { in t3_free_qset()
683 spin_lock_irq(&adapter->sge.reg_lock); in t3_free_qset()
684 t3_sge_disable_fl(adapter, q->fl[i].cntxt_id); in t3_free_qset()
685 spin_unlock_irq(&adapter->sge.reg_lock); in t3_free_qset()
686 free_rx_bufs(pdev, &q->fl[i]); in t3_free_qset()
687 kfree(q->fl[i].sdesc); in t3_free_qset()
688 dma_free_coherent(&pdev->dev, in t3_free_qset()
689 q->fl[i].size * in t3_free_qset()
690 sizeof(struct rx_desc), q->fl[i].desc, in t3_free_qset()
691 q->fl[i].phys_addr); in t3_free_qset()
695 if (q->txq[i].desc) { in t3_free_qset()
696 spin_lock_irq(&adapter->sge.reg_lock); in t3_free_qset()
697 t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0); in t3_free_qset()
698 spin_unlock_irq(&adapter->sge.reg_lock); in t3_free_qset()
699 if (q->txq[i].sdesc) { in t3_free_qset()
700 free_tx_desc(adapter, &q->txq[i], in t3_free_qset()
701 q->txq[i].in_use); in t3_free_qset()
702 kfree(q->txq[i].sdesc); in t3_free_qset()
704 dma_free_coherent(&pdev->dev, in t3_free_qset()
705 q->txq[i].size * in t3_free_qset()
707 q->txq[i].desc, q->txq[i].phys_addr); in t3_free_qset()
708 __skb_queue_purge(&q->txq[i].sendq); in t3_free_qset()
711 if (q->rspq.desc) { in t3_free_qset()
712 spin_lock_irq(&adapter->sge.reg_lock); in t3_free_qset()
713 t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id); in t3_free_qset()
714 spin_unlock_irq(&adapter->sge.reg_lock); in t3_free_qset()
715 dma_free_coherent(&pdev->dev, in t3_free_qset()
716 q->rspq.size * sizeof(struct rsp_desc), in t3_free_qset()
717 q->rspq.desc, q->rspq.phys_addr); in t3_free_qset()
724 * init_qset_cntxt - initialize an SGE queue set context info
728 * Initializes the TIDs and context ids for the queues of a queue set.
732 qs->rspq.cntxt_id = id; in init_qset_cntxt()
733 qs->fl[0].cntxt_id = 2 * id; in init_qset_cntxt()
734 qs->fl[1].cntxt_id = 2 * id + 1; in init_qset_cntxt()
735 qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id; in init_qset_cntxt()
736 qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id; in init_qset_cntxt()
737 qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id; in init_qset_cntxt()
738 qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id; in init_qset_cntxt()
739 qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id; in init_qset_cntxt()
743 * sgl_len - calculates the size of an SGL of the given capacity
756 * flits_to_desc - returns the num of Tx descriptors for the given flits
759 * Calculates the number of Tx descriptors needed for the supplied number
769 * get_packet - return the next ingress packet buffer from a free list
777 * original buffer, otherwise we use the original buffer itself. If a
780 * threshold and the packet is too big to copy, or (b) the packet should
787 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; in get_packet()
789 prefetch(sd->skb->data); in get_packet()
790 fl->credits--; in get_packet()
796 pci_dma_sync_single_for_cpu(adap->pdev, in get_packet()
799 memcpy(skb->data, sd->skb->data, len); in get_packet()
800 pci_dma_sync_single_for_device(adap->pdev, in get_packet()
806 recycle_rx_buf(adap, fl, fl->cidx); in get_packet()
810 if (unlikely(fl->credits < drop_thres) && in get_packet()
811 refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits - 1), in get_packet()
816 pci_unmap_single(adap->pdev, dma_unmap_addr(sd, dma_addr), in get_packet()
817 fl->buf_size, PCI_DMA_FROMDEVICE); in get_packet()
818 skb = sd->skb; in get_packet()
825 * get_packet_pg - return the next ingress packet buffer from a free list
834 * otherwise we attach the original buffer as a page fragment to a fresh
837 * under the threshold and the packet is too big to copy, or (b) there's
840 * Note: this function is similar to @get_packet but deals with Rx buffers
848 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; in get_packet_pg()
852 newskb = skb = q->pg_skb; in get_packet_pg()
857 pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len, in get_packet_pg()
859 memcpy(newskb->data, sd->pg_chunk.va, len); in get_packet_pg()
860 pci_dma_sync_single_for_device(adap->pdev, dma_addr, in get_packet_pg()
866 fl->credits--; in get_packet_pg()
867 recycle_rx_buf(adap, fl, fl->cidx); in get_packet_pg()
868 q->rx_recycle_buf++; in get_packet_pg()
872 if (unlikely(q->rx_recycle_buf || (!skb && fl->credits <= drop_thres))) in get_packet_pg()
875 prefetch(sd->pg_chunk.p_cnt); in get_packet_pg()
886 pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len, in get_packet_pg()
888 (*sd->pg_chunk.p_cnt)--; in get_packet_pg()
889 if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page) in get_packet_pg()
890 pci_unmap_page(adap->pdev, in get_packet_pg()
891 sd->pg_chunk.mapping, in get_packet_pg()
892 fl->alloc_size, in get_packet_pg()
896 memcpy(newskb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN); in get_packet_pg()
897 skb_fill_page_desc(newskb, 0, sd->pg_chunk.page, in get_packet_pg()
898 sd->pg_chunk.offset + SGE_RX_PULL_LEN, in get_packet_pg()
899 len - SGE_RX_PULL_LEN); in get_packet_pg()
900 newskb->len = len; in get_packet_pg()
901 newskb->data_len = len - SGE_RX_PULL_LEN; in get_packet_pg()
902 newskb->truesize += newskb->data_len; in get_packet_pg()
904 skb_fill_page_desc(newskb, skb_shinfo(newskb)->nr_frags, in get_packet_pg()
905 sd->pg_chunk.page, in get_packet_pg()
906 sd->pg_chunk.offset, len); in get_packet_pg()
907 newskb->len += len; in get_packet_pg()
908 newskb->data_len += len; in get_packet_pg()
909 newskb->truesize += len; in get_packet_pg()
912 fl->credits--; in get_packet_pg()
914 * We do not refill FLs here, we let the caller do it to overlap a in get_packet_pg()
921 * get_imm_packet - return the next ingress packet buffer from a response
932 skb_copy_to_linear_data(skb, resp->imm_data, IMMED_PKT_SIZE); in get_imm_packet()
938 * calc_tx_descs - calculate the number of Tx descriptors for a packet
941 * Returns the number of Tx descriptors needed for the given Ethernet
948 if (skb->len <= WR_LEN - sizeof(struct cpl_tx_pkt)) in calc_tx_descs()
951 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 2; in calc_tx_descs()
952 if (skb_shinfo(skb)->gso_size) in calc_tx_descs()
957 /* map_skb - map a packet main body and its page fragments
960 * @addr: placeholder to save the mapped addresses
971 *addr = pci_map_single(pdev, skb->data, skb_headlen(skb), in map_skb()
979 end = &si->frags[si->nr_frags]; in map_skb()
981 for (fp = si->frags; fp < end; fp++) { in map_skb()
982 *addr = skb_frag_dma_map(&pdev->dev, fp, 0, skb_frag_size(fp), in map_skb()
991 while (fp-- > si->frags) in map_skb()
992 dma_unmap_page(&pdev->dev, *--addr, skb_frag_size(fp), in map_skb()
995 pci_unmap_single(pdev, addr[-1], skb_headlen(skb), PCI_DMA_TODEVICE); in map_skb()
997 return -ENOMEM; in map_skb()
1001 * write_sgl - populate a scatter/gather list for a packet
1003 * @sgp: the SGL to populate
1004 * @start: start address of skb main body data to include in the SGL
1005 * @len: length of skb main body data to include in the SGL
1009 * and returns the SGL size in 8-byte words. The caller must size the SGL
1019 sgp->len[0] = cpu_to_be32(len); in write_sgl()
1020 sgp->addr[j++] = cpu_to_be64(addr[k++]); in write_sgl()
1023 nfrags = skb_shinfo(skb)->nr_frags; in write_sgl()
1025 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in write_sgl()
1027 sgp->len[j] = cpu_to_be32(skb_frag_size(frag)); in write_sgl()
1028 sgp->addr[j] = cpu_to_be64(addr[k++]); in write_sgl()
1034 sgp->len[j] = 0; in write_sgl()
1039 * check_ring_tx_db - check and potentially ring a Tx queue's doorbell
1041 * @q: the Tx queue
1043 * Ring the doorbel if a Tx queue is asleep. There is a natural race,
1044 * where the HW is going to sleep just after we checked, however,
1045 * then the interrupt handler will detect the outstanding TX packet
1053 clear_bit(TXQ_LAST_PKT_DB, &q->flags); in check_ring_tx_db()
1054 if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) { in check_ring_tx_db()
1055 set_bit(TXQ_LAST_PKT_DB, &q->flags); in check_ring_tx_db()
1057 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); in check_ring_tx_db()
1062 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); in check_ring_tx_db()
1069 d->flit[TX_DESC_FLITS - 1] = cpu_to_be64(gen); in wr_gen2()
1074 * write_wr_hdr_sgl - write a WR header and, optionally, SGL
1075 * @ndesc: number of Tx descriptors spanned by the SGL
1076 * @skb: the packet corresponding to the WR
1077 * @d: first Tx descriptor to be written
1079 * @q: the SGE Tx queue
1081 * @flits: number of flits to the start of the SGL in the first descriptor
1083 * @gen: the Tx descriptor generation
1088 * small enough to fit into one Tx descriptor it has already been written
1089 * and we just need to write the WR header. Otherwise we distribute the
1101 struct tx_sw_desc *sd = &q->sdesc[pidx]; in write_wr_hdr_sgl()
1103 sd->skb = skb; in write_wr_hdr_sgl()
1105 sd->fragidx = 0; in write_wr_hdr_sgl()
1106 sd->addr_idx = 0; in write_wr_hdr_sgl()
1107 sd->sflit = flits; in write_wr_hdr_sgl()
1111 sd->eop = 1; in write_wr_hdr_sgl()
1112 wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) | in write_wr_hdr_sgl()
1115 wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) | in write_wr_hdr_sgl()
1123 wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) | in write_wr_hdr_sgl()
1127 unsigned int avail = WR_FLITS - flits; in write_wr_hdr_sgl()
1131 memcpy(&d->flit[flits], fp, avail * sizeof(*fp)); in write_wr_hdr_sgl()
1132 sgl_flits -= avail; in write_wr_hdr_sgl()
1133 ndesc--; in write_wr_hdr_sgl()
1139 sd->eop = 0; in write_wr_hdr_sgl()
1141 if (++pidx == q->size) { in write_wr_hdr_sgl()
1144 d = q->desc; in write_wr_hdr_sgl()
1145 sd = q->sdesc; in write_wr_hdr_sgl()
1148 sd->skb = skb; in write_wr_hdr_sgl()
1150 wrp->wr_hi = htonl(V_WR_DATATYPE(1) | in write_wr_hdr_sgl()
1152 wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS, in write_wr_hdr_sgl()
1158 sd->eop = 1; in write_wr_hdr_sgl()
1159 wrp->wr_hi |= htonl(F_WR_EOP); in write_wr_hdr_sgl()
1161 wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo; in write_wr_hdr_sgl()
1168 * write_tx_pkt_wr - write a TX_PKT work request
1170 * @skb: the packet to send
1172 * @pidx: index of the first Tx descriptor to write
1173 * @gen: the generation value to use
1174 * @q: the Tx queue
1176 * @compl: the value of the COMPL bit to use
1179 * Generate a TX_PKT work request to send the supplied packet.
1189 struct tx_desc *d = &q->desc[pidx]; in write_tx_pkt_wr()
1192 cpl->len = htonl(skb->len); in write_tx_pkt_wr()
1193 cntrl = V_TXPKT_INTF(pi->port_id); in write_tx_pkt_wr()
1198 tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size); in write_tx_pkt_wr()
1203 d->flit[2] = 0; in write_tx_pkt_wr()
1205 hdr->cntrl = htonl(cntrl); in write_tx_pkt_wr()
1209 V_LSO_IPHDR_WORDS(ip_hdr(skb)->ihl) | in write_tx_pkt_wr()
1210 V_LSO_TCPHDR_WORDS(tcp_hdr(skb)->doff); in write_tx_pkt_wr()
1211 hdr->lso_info = htonl(tso_info); in write_tx_pkt_wr()
1216 cntrl |= V_TXPKT_L4CSUM_DIS(skb->ip_summed != CHECKSUM_PARTIAL); in write_tx_pkt_wr()
1217 cpl->cntrl = htonl(cntrl); in write_tx_pkt_wr()
1219 if (skb->len <= WR_LEN - sizeof(*cpl)) { in write_tx_pkt_wr()
1220 q->sdesc[pidx].skb = NULL; in write_tx_pkt_wr()
1221 if (!skb->data_len) in write_tx_pkt_wr()
1222 skb_copy_from_linear_data(skb, &d->flit[2], in write_tx_pkt_wr()
1223 skb->len); in write_tx_pkt_wr()
1225 skb_copy_bits(skb, 0, &d->flit[2], skb->len); in write_tx_pkt_wr()
1227 flits = (skb->len + 7) / 8 + 2; in write_tx_pkt_wr()
1228 cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) | in write_tx_pkt_wr()
1232 cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) | in write_tx_pkt_wr()
1233 V_WR_TID(q->token)); in write_tx_pkt_wr()
1242 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; in write_tx_pkt_wr()
1243 sgl_flits = write_sgl(skb, sgp, skb->data, skb_headlen(skb), addr); in write_tx_pkt_wr()
1247 htonl(V_WR_TID(q->token))); in write_tx_pkt_wr()
1254 set_bit(TXQ_ETH, &qs->txq_stopped); in t3_stop_tx_queue()
1255 q->stops++; in t3_stop_tx_queue()
1259 * eth_xmit - add a packet to the Ethernet Tx queue
1263 * Add a packet to an SGE Tx queue. Runs with softirqs disabled.
1270 struct adapter *adap = pi->adapter; in t3_eth_xmit()
1280 if (unlikely(skb->len < ETH_HLEN)) { in t3_eth_xmit()
1286 qs = &pi->qs[qidx]; in t3_eth_xmit()
1287 q = &qs->txq[TXQ_ETH]; in t3_eth_xmit()
1292 credits = q->size - q->in_use; in t3_eth_xmit()
1297 dev_err(&adap->pdev->dev, in t3_eth_xmit()
1298 "%s: Tx ring %u full while queue awake!\n", in t3_eth_xmit()
1299 dev->name, q->cntxt_id & 7); in t3_eth_xmit()
1304 if (skb->len > (WR_LEN - sizeof(struct cpl_tx_pkt))) { in t3_eth_xmit()
1305 if (unlikely(map_skb(adap->pdev, skb, addr) < 0)) { in t3_eth_xmit()
1311 q->in_use += ndesc; in t3_eth_xmit()
1312 if (unlikely(credits - ndesc < q->stop_thres)) { in t3_eth_xmit()
1316 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) { in t3_eth_xmit()
1317 q->restarts++; in t3_eth_xmit()
1322 gen = q->gen; in t3_eth_xmit()
1323 q->unacked += ndesc; in t3_eth_xmit()
1324 compl = (q->unacked & 8) << (S_WR_COMPL - 3); in t3_eth_xmit()
1325 q->unacked &= 7; in t3_eth_xmit()
1326 pidx = q->pidx; in t3_eth_xmit()
1327 q->pidx += ndesc; in t3_eth_xmit()
1328 if (q->pidx >= q->size) { in t3_eth_xmit()
1329 q->pidx -= q->size; in t3_eth_xmit()
1330 q->gen ^= 1; in t3_eth_xmit()
1334 if (skb->ip_summed == CHECKSUM_PARTIAL) in t3_eth_xmit()
1335 qs->port_stats[SGE_PSTAT_TX_CSUM]++; in t3_eth_xmit()
1336 if (skb_shinfo(skb)->gso_size) in t3_eth_xmit()
1337 qs->port_stats[SGE_PSTAT_TSO]++; in t3_eth_xmit()
1339 qs->port_stats[SGE_PSTAT_VLANINS]++; in t3_eth_xmit()
1342 * We do not use Tx completion interrupts to free DMAd Tx packets. in t3_eth_xmit()
1343 * This is good for performance but means that we rely on new Tx in t3_eth_xmit()
1344 * packets arriving to run the destructors of completed packets, in t3_eth_xmit()
1345 * which open up space in their sockets' send queues. Sometimes in t3_eth_xmit()
1346 * we do not get such new packets causing Tx to stall. A single in t3_eth_xmit()
1349 * but it doesn't run often enough (nor do we want it to) to prevent in t3_eth_xmit()
1350 * lengthy stalls. A solution to this problem is to run the in t3_eth_xmit()
1352 * A cons is that we lie to socket memory accounting, but the amount in t3_eth_xmit()
1353 * of extra memory is reasonable (limited by the number of Tx in t3_eth_xmit()
1356 * acks to really free up the data the extra memory is even less. in t3_eth_xmit()
1359 * good thing. We also run them without holding our Tx queue lock, in t3_eth_xmit()
1363 * to make sure it doesn't complete and get freed prematurely. in t3_eth_xmit()
1374 * write_imm - write a packet into a Tx descriptor as immediate data
1375 * @d: the Tx descriptor to write
1377 * @len: the length of packet data to write as immediate data
1378 * @gen: the generation bit value to write
1380 * Writes a packet as immediate data into a Tx descriptor. The packet
1388 struct work_request_hdr *from = (struct work_request_hdr *)skb->data; in write_imm()
1389 struct work_request_hdr *to = (struct work_request_hdr *)d; in write_imm() local
1391 if (likely(!skb->data_len)) in write_imm()
1392 memcpy(&to[1], &from[1], len - sizeof(*from)); in write_imm()
1394 skb_copy_bits(skb, sizeof(*from), &to[1], len - sizeof(*from)); in write_imm()
1396 to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP | in write_imm()
1399 to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) | in write_imm()
1406 * check_desc_avail - check descriptor availability on a send queue
1410 * @ndesc: the number of Tx descriptors needed
1411 * @qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
1413 * Checks if the requested number of Tx descriptors is available on an
1416 * Must be called with the Tx queue locked.
1420 * needs to retry because there weren't enough descriptors at the
1427 if (unlikely(!skb_queue_empty(&q->sendq))) { in check_desc_avail()
1428 addq_exit:__skb_queue_tail(&q->sendq, skb); in check_desc_avail()
1431 if (unlikely(q->size - q->in_use < ndesc)) { in check_desc_avail()
1434 set_bit(qid, &qs->txq_stopped); in check_desc_avail()
1438 test_and_clear_bit(qid, &qs->txq_stopped)) in check_desc_avail()
1441 q->stops++; in check_desc_avail()
1448 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1449 * @q: the SGE control Tx queue
1451 * This is a variant of reclaim_completed_tx() that is used for Tx queues
1452 * that send only immediate data (presently just the control queues) and
1453 * thus do not have any sk_buffs to release.
1457 unsigned int reclaim = q->processed - q->cleaned; in reclaim_completed_tx_imm()
1459 q->in_use -= reclaim; in reclaim_completed_tx_imm()
1460 q->cleaned += reclaim; in reclaim_completed_tx_imm()
1465 return skb->len <= WR_LEN; in immediate()
1469 * ctrl_xmit - send a packet through an SGE control Tx queue
1474 * Send a packet through an SGE control Tx queue. Packets sent through
1475 * a control queue must fit entirely as immediate data in a single Tx
1482 struct work_request_hdr *wrp = (struct work_request_hdr *)skb->data; in ctrl_xmit()
1490 wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP); in ctrl_xmit()
1491 wrp->wr_lo = htonl(V_WR_TID(q->token)); in ctrl_xmit()
1493 spin_lock(&q->lock); in ctrl_xmit()
1499 spin_unlock(&q->lock); in ctrl_xmit()
1505 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen); in ctrl_xmit()
1507 q->in_use++; in ctrl_xmit()
1508 if (++q->pidx >= q->size) { in ctrl_xmit()
1509 q->pidx = 0; in ctrl_xmit()
1510 q->gen ^= 1; in ctrl_xmit()
1512 spin_unlock(&q->lock); in ctrl_xmit()
1515 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); in ctrl_xmit()
1520 * restart_ctrlq - restart a suspended control queue
1521 * @t: pointer to the tasklet associated with this handler
1523 * Resumes transmission on a suspended Tx control queue.
1529 struct sge_txq *q = &qs->txq[TXQ_CTRL]; in restart_ctrlq()
1531 spin_lock(&q->lock); in restart_ctrlq()
1534 while (q->in_use < q->size && in restart_ctrlq()
1535 (skb = __skb_dequeue(&q->sendq)) != NULL) { in restart_ctrlq()
1537 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen); in restart_ctrlq()
1539 if (++q->pidx >= q->size) { in restart_ctrlq()
1540 q->pidx = 0; in restart_ctrlq()
1541 q->gen ^= 1; in restart_ctrlq()
1543 q->in_use++; in restart_ctrlq()
1546 if (!skb_queue_empty(&q->sendq)) { in restart_ctrlq()
1547 set_bit(TXQ_CTRL, &qs->txq_stopped); in restart_ctrlq()
1551 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) in restart_ctrlq()
1553 q->stops++; in restart_ctrlq()
1556 spin_unlock(&q->lock); in restart_ctrlq()
1558 t3_write_reg(qs->adap, A_SG_KDOORBELL, in restart_ctrlq()
1559 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); in restart_ctrlq()
1569 ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb); in t3_mgmt_tx()
1576 * deferred_unmap_destructor - unmap a packet when it is freed
1579 * This is the packet destructor used for Tx packets that need to remain
1580 * mapped until they are freed rather than until their Tx descriptors are
1590 dui = (struct deferred_unmap_info *)skb->head; in deferred_unmap_destructor()
1591 p = dui->addr; in deferred_unmap_destructor()
1593 if (skb_tail_pointer(skb) - skb_transport_header(skb)) in deferred_unmap_destructor()
1594 pci_unmap_single(dui->pdev, *p++, skb_tail_pointer(skb) - in deferred_unmap_destructor()
1598 for (i = 0; i < si->nr_frags; i++) in deferred_unmap_destructor()
1599 pci_unmap_page(dui->pdev, *p++, skb_frag_size(&si->frags[i]), in deferred_unmap_destructor()
1609 dui = (struct deferred_unmap_info *)skb->head; in setup_deferred_unmapping()
1610 dui->pdev = pdev; in setup_deferred_unmapping()
1611 for (p = dui->addr; sgl_flits >= 3; sgl++, sgl_flits -= 3) { in setup_deferred_unmapping()
1612 *p++ = be64_to_cpu(sgl->addr[0]); in setup_deferred_unmapping()
1613 *p++ = be64_to_cpu(sgl->addr[1]); in setup_deferred_unmapping()
1616 *p = be64_to_cpu(sgl->addr[0]); in setup_deferred_unmapping()
1620 * write_ofld_wr - write an offload work request
1622 * @skb: the packet to send
1623 * @q: the Tx queue
1624 * @pidx: index of the first Tx descriptor to write
1625 * @gen: the generation value to use
1629 * Write an offload work request to send the supplied packet. The packet
1640 struct tx_desc *d = &q->desc[pidx]; in write_ofld_wr()
1643 q->sdesc[pidx].skb = NULL; in write_ofld_wr()
1644 write_imm(d, skb, skb->len, gen); in write_ofld_wr()
1650 from = (struct work_request_hdr *)skb->data; in write_ofld_wr()
1651 memcpy(&d->flit[1], &from[1], in write_ofld_wr()
1652 skb_transport_offset(skb) - sizeof(*from)); in write_ofld_wr()
1655 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; in write_ofld_wr()
1657 skb_tail_pointer(skb) - skb_transport_header(skb), in write_ofld_wr()
1660 setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits); in write_ofld_wr()
1661 skb->destructor = deferred_unmap_destructor; in write_ofld_wr()
1665 gen, from->wr_hi, from->wr_lo); in write_ofld_wr()
1669 * calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet
1672 * Returns the number of Tx descriptors needed for the given offload
1679 if (skb->len <= WR_LEN) in calc_tx_descs_ofld()
1683 cnt = skb_shinfo(skb)->nr_frags; in calc_tx_descs_ofld()
1690 * ofld_xmit - send a packet through an offload queue
1692 * @q: the Tx offload queue
1703 spin_lock(&q->lock); in ofld_xmit()
1709 skb->priority = ndesc; /* save for restart */ in ofld_xmit()
1710 spin_unlock(&q->lock); in ofld_xmit()
1717 map_skb(adap->pdev, skb, (dma_addr_t *)skb->head)) { in ofld_xmit()
1718 spin_unlock(&q->lock); in ofld_xmit()
1722 gen = q->gen; in ofld_xmit()
1723 q->in_use += ndesc; in ofld_xmit()
1724 pidx = q->pidx; in ofld_xmit()
1725 q->pidx += ndesc; in ofld_xmit()
1726 if (q->pidx >= q->size) { in ofld_xmit()
1727 q->pidx -= q->size; in ofld_xmit()
1728 q->gen ^= 1; in ofld_xmit()
1730 spin_unlock(&q->lock); in ofld_xmit()
1732 write_ofld_wr(adap, skb, q, pidx, gen, ndesc, (dma_addr_t *)skb->head); in ofld_xmit()
1738 * restart_offloadq - restart a suspended offload queue
1739 * @t: pointer to the tasklet associated with this handler
1741 * Resumes transmission on a suspended Tx offload queue.
1747 struct sge_txq *q = &qs->txq[TXQ_OFLD]; in restart_offloadq()
1748 const struct port_info *pi = netdev_priv(qs->netdev); in restart_offloadq()
1749 struct adapter *adap = pi->adapter; in restart_offloadq()
1752 spin_lock(&q->lock); in restart_offloadq()
1755 while ((skb = skb_peek(&q->sendq)) != NULL) { in restart_offloadq()
1757 unsigned int ndesc = skb->priority; in restart_offloadq()
1759 if (unlikely(q->size - q->in_use < ndesc)) { in restart_offloadq()
1760 set_bit(TXQ_OFLD, &qs->txq_stopped); in restart_offloadq()
1764 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) in restart_offloadq()
1766 q->stops++; in restart_offloadq()
1771 map_skb(adap->pdev, skb, (dma_addr_t *)skb->head)) in restart_offloadq()
1774 gen = q->gen; in restart_offloadq()
1775 q->in_use += ndesc; in restart_offloadq()
1776 pidx = q->pidx; in restart_offloadq()
1777 q->pidx += ndesc; in restart_offloadq()
1779 if (q->pidx >= q->size) { in restart_offloadq()
1780 q->pidx -= q->size; in restart_offloadq()
1781 q->gen ^= 1; in restart_offloadq()
1783 __skb_unlink(skb, &q->sendq); in restart_offloadq()
1784 spin_unlock(&q->lock); in restart_offloadq()
1787 (dma_addr_t *)skb->head); in restart_offloadq()
1788 spin_lock(&q->lock); in restart_offloadq()
1790 spin_unlock(&q->lock); in restart_offloadq()
1793 set_bit(TXQ_RUNNING, &q->flags); in restart_offloadq()
1794 set_bit(TXQ_LAST_PKT_DB, &q->flags); in restart_offloadq()
1799 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); in restart_offloadq()
1803 * queue_set - return the queue set a packet should use
1806 * Maps a packet to the SGE queue set it should use. The desired queue
1807 * set is carried in bits 1-3 in the packet's priority.
1811 return skb->priority >> 1; in queue_set()
1815 * is_ctrl_pkt - return whether an offload packet is a control packet
1818 * Determines whether an offload packet should use an OFLD or a CTRL
1819 * Tx queue. This is indicated by bit 0 in the packet's priority.
1823 return skb->priority & 1; in is_ctrl_pkt()
1827 * t3_offload_tx - send an offload packet
1828 * @tdev: the offload device to send to
1831 * Sends an offload packet. We use the packet priority to select the
1832 * appropriate Tx queue as follows: bit 0 indicates whether the packet
1833 * should be sent as regular or control, bits 1-3 select the queue set.
1838 struct sge_qset *qs = &adap->sge.qs[queue_set(skb)]; in t3_offload_tx()
1841 return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb); in t3_offload_tx()
1843 return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb); in t3_offload_tx()
1847 * offload_enqueue - add an offload packet to an SGE offload receive queue
1851 * Add a new offload packet to an SGE response queue's offload packet
1853 * softirq to process the queue.
1857 int was_empty = skb_queue_empty(&q->rx_queue); in offload_enqueue()
1859 __skb_queue_tail(&q->rx_queue, skb); in offload_enqueue()
1864 napi_schedule(&qs->napi); in offload_enqueue()
1869 * deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts
1875 * Delivers a (partial) bundle of Rx offload packets to an offload device.
1882 q->offload_bundles++; in deliver_partial_bundle()
1883 tdev->recv(tdev, skbs, n); in deliver_partial_bundle()
1888 * ofld_poll - NAPI handler for offload packets in interrupt mode
1893 * by the hard interrupt handler, i.e., when it's operating in non-polling
1895 * receive handler. Batches need to be of modest size as we do prefetches
1901 struct sge_rspq *q = &qs->rspq; in ofld_poll()
1902 struct adapter *adapter = qs->adap; in ofld_poll()
1910 spin_lock_irq(&q->lock); in ofld_poll()
1912 skb_queue_splice_init(&q->rx_queue, &queue); in ofld_poll()
1915 spin_unlock_irq(&q->lock); in ofld_poll()
1918 spin_unlock_irq(&q->lock); in ofld_poll()
1927 prefetch(skb->data); in ofld_poll()
1930 q->offload_bundles++; in ofld_poll()
1931 adapter->tdev.recv(&adapter->tdev, skbs, in ofld_poll()
1938 spin_lock_irq(&q->lock); in ofld_poll()
1939 skb_queue_splice(&queue, &q->rx_queue); in ofld_poll()
1940 spin_unlock_irq(&q->lock); in ofld_poll()
1942 deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered); in ofld_poll()
1949 * rx_offload - process a received offload packet
1956 * Process an ingress offload pakcet and add it to the offload ingress
1967 if (rq->polling) { in rx_offload()
1970 tdev->recv(tdev, rx_gather, RX_BUNDLE_SIZE); in rx_offload()
1972 rq->offload_bundles++; in rx_offload()
1981 * restart_tx - check whether to restart suspended Tx queues
1982 * @qs: the queue set to resume
1984 * Restarts suspended Tx queues of an SGE queue set if they have enough
1985 * free resources to resume operation.
1989 if (test_bit(TXQ_ETH, &qs->txq_stopped) && in restart_tx()
1990 should_restart_tx(&qs->txq[TXQ_ETH]) && in restart_tx()
1991 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) { in restart_tx()
1992 qs->txq[TXQ_ETH].restarts++; in restart_tx()
1993 if (netif_running(qs->netdev)) in restart_tx()
1994 netif_tx_wake_queue(qs->tx_q); in restart_tx()
1997 if (test_bit(TXQ_OFLD, &qs->txq_stopped) && in restart_tx()
1998 should_restart_tx(&qs->txq[TXQ_OFLD]) && in restart_tx()
1999 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) { in restart_tx()
2000 qs->txq[TXQ_OFLD].restarts++; in restart_tx()
2001 tasklet_schedule(&qs->txq[TXQ_OFLD].qresume_tsk); in restart_tx()
2003 if (test_bit(TXQ_CTRL, &qs->txq_stopped) && in restart_tx()
2004 should_restart_tx(&qs->txq[TXQ_CTRL]) && in restart_tx()
2005 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) { in restart_tx()
2006 qs->txq[TXQ_CTRL].restarts++; in restart_tx()
2007 tasklet_schedule(&qs->txq[TXQ_CTRL].qresume_tsk); in restart_tx()
2012 * cxgb3_arp_process - process an ARP request probing a private IP address
2017 * dedicated to iSCSI, generate an ARP reply if so.
2021 struct net_device *dev = skb->dev; in cxgb3_arp_process()
2033 if (arp->ar_op != htons(ARPOP_REQUEST)) in cxgb3_arp_process()
2038 arp_ptr += dev->addr_len; in cxgb3_arp_process()
2041 arp_ptr += dev->addr_len; in cxgb3_arp_process()
2044 if (tip != pi->iscsi_ipv4addr) in cxgb3_arp_process()
2048 pi->iscsic.mac_addr, sha); in cxgb3_arp_process()
2054 return skb->protocol == htons(ETH_P_ARP); in is_arp()
2065 if (pi->iscsic.recv) in cxgb3_process_iscsi_prov_pack()
2066 pi->iscsic.recv(pi, skb); in cxgb3_process_iscsi_prov_pack()
2071 * rx_eth - process an ingress ethernet packet
2078 * Process an ingress ethernet pakcet and deliver it to the stack.
2085 struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad); in rx_eth()
2090 skb->protocol = eth_type_trans(skb, adap->port[p->iff]); in rx_eth()
2091 pi = netdev_priv(skb->dev); in rx_eth()
2092 if ((skb->dev->features & NETIF_F_RXCSUM) && p->csum_valid && in rx_eth()
2093 p->csum == htons(0xffff) && !p->fragment) { in rx_eth()
2094 qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++; in rx_eth()
2095 skb->ip_summed = CHECKSUM_UNNECESSARY; in rx_eth()
2098 skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]); in rx_eth()
2100 if (p->vlan_valid) { in rx_eth()
2101 qs->port_stats[SGE_PSTAT_VLANEX]++; in rx_eth()
2102 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(p->vlan)); in rx_eth()
2104 if (rq->polling) { in rx_eth()
2106 napi_gro_receive(&qs->napi, skb); in rx_eth()
2108 if (unlikely(pi->iscsic.flags)) in rx_eth()
2122 * lro_add_page - add a page chunk to an LRO session
2125 * @fl: the free list containing the page chunk to add
2129 * Add a received packet contained in a page chunk to an existing LRO
2135 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; in lro_add_page()
2136 struct port_info *pi = netdev_priv(qs->netdev); in lro_add_page()
2143 if (!qs->nomem) { in lro_add_page()
2144 skb = napi_get_frags(&qs->napi); in lro_add_page()
2145 qs->nomem = !skb; in lro_add_page()
2148 fl->credits--; in lro_add_page()
2150 pci_dma_sync_single_for_cpu(adap->pdev, in lro_add_page()
2152 fl->buf_size - SGE_PG_RSVD, in lro_add_page()
2155 (*sd->pg_chunk.p_cnt)--; in lro_add_page()
2156 if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page) in lro_add_page()
2157 pci_unmap_page(adap->pdev, in lro_add_page()
2158 sd->pg_chunk.mapping, in lro_add_page()
2159 fl->alloc_size, in lro_add_page()
2163 put_page(sd->pg_chunk.page); in lro_add_page()
2165 qs->nomem = 0; in lro_add_page()
2169 rx_frag = skb_shinfo(skb)->frags; in lro_add_page()
2170 nr_frags = skb_shinfo(skb)->nr_frags; in lro_add_page()
2174 cpl = qs->lro_va = sd->pg_chunk.va + 2; in lro_add_page()
2176 if ((qs->netdev->features & NETIF_F_RXCSUM) && in lro_add_page()
2177 cpl->csum_valid && cpl->csum == htons(0xffff)) { in lro_add_page()
2178 skb->ip_summed = CHECKSUM_UNNECESSARY; in lro_add_page()
2179 qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++; in lro_add_page()
2181 skb->ip_summed = CHECKSUM_NONE; in lro_add_page()
2183 cpl = qs->lro_va; in lro_add_page()
2185 len -= offset; in lro_add_page()
2188 __skb_frag_set_page(rx_frag, sd->pg_chunk.page); in lro_add_page()
2189 skb_frag_off_set(rx_frag, sd->pg_chunk.offset + offset); in lro_add_page()
2192 skb->len += len; in lro_add_page()
2193 skb->data_len += len; in lro_add_page()
2194 skb->truesize += len; in lro_add_page()
2195 skb_shinfo(skb)->nr_frags++; in lro_add_page()
2200 skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]); in lro_add_page()
2202 if (cpl->vlan_valid) { in lro_add_page()
2203 qs->port_stats[SGE_PSTAT_VLANEX]++; in lro_add_page()
2204 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cpl->vlan)); in lro_add_page()
2206 napi_gro_frags(&qs->napi); in lro_add_page()
2210 * handle_rsp_cntrl_info - handles control information in a response
2211 * @qs: the queue set corresponding to the response
2215 * indications and completion credits for the queue set's Tx queues.
2224 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags); in handle_rsp_cntrl_info()
2229 qs->txq[TXQ_ETH].processed += credits; in handle_rsp_cntrl_info()
2233 qs->txq[TXQ_CTRL].processed += credits; in handle_rsp_cntrl_info()
2237 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags); in handle_rsp_cntrl_info()
2241 qs->txq[TXQ_OFLD].processed += credits; in handle_rsp_cntrl_info()
2245 * check_ring_db - check if we need to ring any doorbells
2247 * @qs: the queue set whose Tx queues are to be examined
2248 * @sleeping: indicates which Tx queue sent GTS
2250 * Checks if some of a queue set's Tx queues need to ring their doorbells
2251 * to resume transmission after idling while they still have unprocessed
2258 struct sge_txq *txq = &qs->txq[TXQ_ETH]; in check_ring_db()
2260 if (txq->cleaned + txq->in_use != txq->processed && in check_ring_db()
2261 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) { in check_ring_db()
2262 set_bit(TXQ_RUNNING, &txq->flags); in check_ring_db()
2264 V_EGRCNTX(txq->cntxt_id)); in check_ring_db()
2269 struct sge_txq *txq = &qs->txq[TXQ_OFLD]; in check_ring_db()
2271 if (txq->cleaned + txq->in_use != txq->processed && in check_ring_db()
2272 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) { in check_ring_db()
2273 set_bit(TXQ_RUNNING, &txq->flags); in check_ring_db()
2275 V_EGRCNTX(txq->cntxt_id)); in check_ring_db()
2281 * is_new_response - check if a response is newly written
2291 return (r->intr_gen & F_RSPD_GEN2) == q->gen; in is_new_response()
2296 q->pg_skb = NULL; in clear_rspq_bufstate()
2297 q->rx_recycle_buf = 0; in clear_rspq_bufstate()
2306 /* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
2310 * process_responses - process responses from an SGE response queue
2312 * @qs: the queue set to which the response queue belongs
2315 * Process responses from an SGE response queue up to the supplied budget.
2317 * for the queues that belong to the response queue's queue set.
2321 * on this queue. If the system is under memory shortage use a fairly
2322 * long delay to help recovery.
2327 struct sge_rspq *q = &qs->rspq; in process_responses()
2328 struct rsp_desc *r = &q->desc[q->cidx]; in process_responses()
2334 q->next_holdoff = q->holdoff_tmr; in process_responses()
2338 int lro = !!(qs->netdev->features & NETIF_F_GRO); in process_responses()
2344 eth = r->rss_hdr.opcode == CPL_RX_PKT; in process_responses()
2346 rss_lo = r->rss_hdr.rss_hash_val; in process_responses()
2347 flags = ntohl(r->flags); in process_responses()
2355 skb->data[0] = CPL_ASYNC_NOTIF; in process_responses()
2357 q->async_notif++; in process_responses()
2362 q->next_holdoff = NOMEM_INTR_DELAY; in process_responses()
2363 q->nomem++; in process_responses()
2365 budget_left--; in process_responses()
2368 q->imm_data++; in process_responses()
2370 } else if ((len = ntohl(r->len_cq)) != 0) { in process_responses()
2375 fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0]; in process_responses()
2376 if (fl->use_pages) { in process_responses()
2377 void *addr = fl->sdesc[fl->cidx].pg_chunk.va; in process_responses()
2392 q->pg_skb = skb; in process_responses()
2399 q->rx_drops++; in process_responses()
2400 } else if (unlikely(r->rss_hdr.opcode == CPL_TRACE_PKT)) in process_responses()
2403 if (++fl->cidx == fl->size) in process_responses()
2404 fl->cidx = 0; in process_responses()
2406 q->pure_rsps++; in process_responses()
2414 if (unlikely(++q->cidx == q->size)) { in process_responses()
2415 q->cidx = 0; in process_responses()
2416 q->gen ^= 1; in process_responses()
2417 r = q->desc; in process_responses()
2421 if (++q->credits >= (q->size / 4)) { in process_responses()
2422 refill_rspq(adap, q, q->credits); in process_responses()
2423 q->credits = 0; in process_responses()
2434 q->offload_pkts++; in process_responses()
2436 skb->csum = rss_hi; in process_responses()
2437 skb->priority = rss_lo; in process_responses()
2438 ngathered = rx_offload(&adap->tdev, q, skb, in process_responses()
2446 --budget_left; in process_responses()
2449 deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered); in process_responses()
2454 smp_mb(); /* commit Tx queue .processed updates */ in process_responses()
2455 if (unlikely(qs->txq_stopped != 0)) in process_responses()
2458 budget -= budget_left; in process_responses()
2464 __be32 n = r->flags & htonl(F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID); in is_pure_response()
2466 return (n | r->len_cq) == 0; in is_pure_response()
2470 * napi_rx_handler - the NAPI handler for Rx processing
2479 struct adapter *adap = qs->adap; in napi_rx_handler()
2490 * causing the NAPI interrupt handler below to return in napi_rx_handler()
2491 * unhandled status to the OS. To protect against in napi_rx_handler()
2497 * The race cannot happen at all with MSI-X. in napi_rx_handler()
2499 t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) | in napi_rx_handler()
2500 V_NEWTIMER(qs->rspq.next_holdoff) | in napi_rx_handler()
2501 V_NEWINDEX(qs->rspq.cidx)); in napi_rx_handler()
2511 return test_bit(NAPI_STATE_SCHED, &napi->state); in napi_is_scheduled()
2515 * process_pure_responses - process pure responses from a response queue
2518 * @r: the first pure response to process
2521 * non data-carrying) responses. Such respones are too light-weight to
2523 * the interrupt handler. The function is called with a pointer to a
2526 * Returns 1 if it encounters a valid data-carrying response, 0 otherwise.
2531 struct sge_rspq *q = &qs->rspq; in process_pure_responses()
2535 u32 flags = ntohl(r->flags); in process_pure_responses()
2538 if (unlikely(++q->cidx == q->size)) { in process_pure_responses()
2539 q->cidx = 0; in process_pure_responses()
2540 q->gen ^= 1; in process_pure_responses()
2541 r = q->desc; in process_pure_responses()
2550 q->pure_rsps++; in process_pure_responses()
2551 if (++q->credits >= (q->size / 4)) { in process_pure_responses()
2552 refill_rspq(adap, q, q->credits); in process_pure_responses()
2553 q->credits = 0; in process_pure_responses()
2563 smp_mb(); /* commit Tx queue .processed updates */ in process_pure_responses()
2564 if (unlikely(qs->txq_stopped != 0)) in process_pure_responses()
2571 * handle_responses - decide what to do with new responses in NAPI mode
2575 * This is used by the NAPI interrupt handlers to decide what to do with
2576 * new SGE responses. If there are no new responses it returns -1. If
2577 * there are new responses and they are pure (i.e., non-data carrying)
2588 struct rsp_desc *r = &q->desc[q->cidx]; in handle_responses()
2591 return -1; in handle_responses()
2594 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) | in handle_responses()
2595 V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx)); in handle_responses()
2598 napi_schedule(&qs->napi); in handle_responses()
2603 * The MSI-X interrupt handler for an SGE response queue for the non-NAPI case
2609 struct adapter *adap = qs->adap; in t3_sge_intr_msix()
2610 struct sge_rspq *q = &qs->rspq; in t3_sge_intr_msix()
2612 spin_lock(&q->lock); in t3_sge_intr_msix()
2613 if (process_responses(adap, qs, -1) == 0) in t3_sge_intr_msix()
2614 q->unhandled_irqs++; in t3_sge_intr_msix()
2615 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) | in t3_sge_intr_msix()
2616 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx)); in t3_sge_intr_msix()
2617 spin_unlock(&q->lock); in t3_sge_intr_msix()
2622 * The MSI-X interrupt handler for an SGE response queue for the NAPI case
2628 struct sge_rspq *q = &qs->rspq; in t3_sge_intr_msix_napi()
2630 spin_lock(&q->lock); in t3_sge_intr_msix_napi()
2632 if (handle_responses(qs->adap, q) < 0) in t3_sge_intr_msix_napi()
2633 q->unhandled_irqs++; in t3_sge_intr_msix_napi()
2634 spin_unlock(&q->lock); in t3_sge_intr_msix_napi()
2639 * The non-NAPI MSI interrupt handler. This needs to handle data events from
2640 * SGE response queues as well as error and other async events as they all use
2641 * the same MSI vector. We use one SGE response queue per port in this mode
2642 * and protect all response queues with queue 0's lock.
2648 struct sge_rspq *q = &adap->sge.qs[0].rspq; in t3_intr_msi()
2650 spin_lock(&q->lock); in t3_intr_msi()
2652 if (process_responses(adap, &adap->sge.qs[0], -1)) { in t3_intr_msi()
2653 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) | in t3_intr_msi()
2654 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx)); in t3_intr_msi()
2658 if (adap->params.nports == 2 && in t3_intr_msi()
2659 process_responses(adap, &adap->sge.qs[1], -1)) { in t3_intr_msi()
2660 struct sge_rspq *q1 = &adap->sge.qs[1].rspq; in t3_intr_msi()
2662 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q1->cntxt_id) | in t3_intr_msi()
2663 V_NEWTIMER(q1->next_holdoff) | in t3_intr_msi()
2664 V_NEWINDEX(q1->cidx)); in t3_intr_msi()
2669 q->unhandled_irqs++; in t3_intr_msi()
2671 spin_unlock(&q->lock); in t3_intr_msi()
2677 struct sge_rspq *q = &qs->rspq; in rspq_check_napi()
2679 if (!napi_is_scheduled(&qs->napi) && in rspq_check_napi()
2680 is_new_response(&q->desc[q->cidx], q)) { in rspq_check_napi()
2681 napi_schedule(&qs->napi); in rspq_check_napi()
2688 * The MSI interrupt handler for the NAPI case (i.e., response queues serviced
2689 * by NAPI polling). Handles data events from SGE response queues as well as
2690 * error and other async events as they all use the same MSI vector. We use
2692 * queues with queue 0's lock.
2698 struct sge_rspq *q = &adap->sge.qs[0].rspq; in t3_intr_msi_napi()
2700 spin_lock(&q->lock); in t3_intr_msi_napi()
2702 new_packets = rspq_check_napi(&adap->sge.qs[0]); in t3_intr_msi_napi()
2703 if (adap->params.nports == 2) in t3_intr_msi_napi()
2704 new_packets += rspq_check_napi(&adap->sge.qs[1]); in t3_intr_msi_napi()
2706 q->unhandled_irqs++; in t3_intr_msi_napi()
2708 spin_unlock(&q->lock); in t3_intr_msi_napi()
2720 work = process_responses(adap, rspq_to_qset(rq), -1); in process_responses_gts()
2721 t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) | in process_responses_gts()
2722 V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx)); in process_responses_gts()
2727 * The legacy INTx interrupt handler. This needs to handle data events from
2728 * SGE response queues as well as error and other async events as they all use
2729 * the same interrupt pin. We use one SGE response queue per port in this mode
2730 * and protect all response queues with queue 0's lock.
2736 struct sge_rspq *q0 = &adap->sge.qs[0].rspq; in t3_intr()
2737 struct sge_rspq *q1 = &adap->sge.qs[1].rspq; in t3_intr()
2739 spin_lock(&q0->lock); in t3_intr()
2741 w0 = is_new_response(&q0->desc[q0->cidx], q0); in t3_intr()
2742 w1 = adap->params.nports == 2 && in t3_intr()
2743 is_new_response(&q1->desc[q1->cidx], q1); in t3_intr()
2759 spin_unlock(&q0->lock); in t3_intr()
2764 * Interrupt handler for legacy INTx interrupts for T3B-based cards.
2765 * Handles data events from SGE response queues as well as error and other
2766 * async events as they all use the same interrupt pin. We use one SGE
2767 * response queue per port in this mode and protect all response queues with
2774 struct sge_rspq *q0 = &adap->sge.qs[0].rspq; in t3b_intr()
2782 spin_lock(&q0->lock); in t3b_intr()
2791 process_responses_gts(adap, &adap->sge.qs[1].rspq); in t3b_intr()
2793 spin_unlock(&q0->lock); in t3b_intr()
2798 * NAPI interrupt handler for legacy INTx interrupts for T3B-based cards.
2799 * Handles data events from SGE response queues as well as error and other
2800 * async events as they all use the same interrupt pin. We use one SGE
2801 * response queue per port in this mode and protect all response queues with
2808 struct sge_qset *qs0 = &adap->sge.qs[0]; in t3b_intr_napi()
2809 struct sge_rspq *q0 = &qs0->rspq; in t3b_intr_napi()
2817 spin_lock(&q0->lock); in t3b_intr_napi()
2823 napi_schedule(&qs0->napi); in t3b_intr_napi()
2826 napi_schedule(&adap->sge.qs[1].napi); in t3b_intr_napi()
2828 spin_unlock(&q0->lock); in t3b_intr_napi()
2833 * t3_intr_handler - select the top-level interrupt handler
2835 * @polling: whether using NAPI to service response queues
2837 * Selects the top-level interrupt handler based on the type of interrupts
2838 * (MSI-X, MSI, or legacy) and whether NAPI will be used to service the
2839 * response queues.
2843 if (adap->flags & USING_MSIX) in t3_intr_handler()
2845 if (adap->flags & USING_MSI) in t3_intr_handler()
2847 if (adap->params.rev > 0) in t3_intr_handler()
2862 * t3_sge_err_intr_handler - SGE async event interrupt handler
2865 * Interrupt handler for SGE asynchronous (non-data) events.
2886 "packet delivered to disabled response queue " in t3_sge_err_intr_handler()
2891 queue_work(cxgb3_wq, &adapter->db_drop_task); in t3_sge_err_intr_handler()
2894 queue_work(cxgb3_wq, &adapter->db_full_task); in t3_sge_err_intr_handler()
2897 queue_work(cxgb3_wq, &adapter->db_empty_task); in t3_sge_err_intr_handler()
2905 * sge_timer_tx - perform periodic maintenance of an SGE qset
2906 * @t: a timer list containing the SGE queue set to maintain
2908 * Runs periodically from a timer to perform maintenance of an SGE queue
2911 * Cleans up any completed Tx descriptors that may still be pending.
2912 * Normal descriptor cleanup happens when new packets are added to a Tx
2914 * if the Tx queue has not seen any new packets in a while. We make a
2915 * best effort attempt to reclaim descriptors, in that we don't wait
2918 * up). Since control queues use immediate data exclusively we don't
2925 struct port_info *pi = netdev_priv(qs->netdev); in sge_timer_tx()
2926 struct adapter *adap = pi->adapter; in sge_timer_tx()
2930 if (__netif_tx_trylock(qs->tx_q)) { in sge_timer_tx()
2931 tbd[TXQ_ETH] = reclaim_completed_tx(adap, &qs->txq[TXQ_ETH], in sge_timer_tx()
2933 __netif_tx_unlock(qs->tx_q); in sge_timer_tx()
2936 if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) { in sge_timer_tx()
2937 tbd[TXQ_OFLD] = reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD], in sge_timer_tx()
2939 spin_unlock(&qs->txq[TXQ_OFLD].lock); in sge_timer_tx()
2945 mod_timer(&qs->tx_reclaim_timer, jiffies + next_period); in sge_timer_tx()
2949 * sge_timer_rx - perform periodic maintenance of an SGE qset
2950 * @t: the timer list containing the SGE queue set to maintain
2952 * a) Replenishes Rx queues that have run out due to memory shortage.
2954 * when out of memory a queue can become empty. We try to add only a few
2966 struct port_info *pi = netdev_priv(qs->netdev); in sge_timer_rx()
2967 struct adapter *adap = pi->adapter; in sge_timer_rx()
2970 lock = adap->params.rev > 0 ? in sge_timer_rx()
2971 &qs->rspq.lock : &adap->sge.qs[0].rspq.lock; in sge_timer_rx()
2976 if (napi_is_scheduled(&qs->napi)) in sge_timer_rx()
2979 if (adap->params.rev < 4) { in sge_timer_rx()
2982 if (status & (1 << qs->rspq.cntxt_id)) { in sge_timer_rx()
2983 qs->rspq.starved++; in sge_timer_rx()
2984 if (qs->rspq.credits) { in sge_timer_rx()
2985 qs->rspq.credits--; in sge_timer_rx()
2986 refill_rspq(adap, &qs->rspq, 1); in sge_timer_rx()
2987 qs->rspq.restarted++; in sge_timer_rx()
2989 1 << qs->rspq.cntxt_id); in sge_timer_rx()
2994 if (qs->fl[0].credits < qs->fl[0].size) in sge_timer_rx()
2995 __refill_fl(adap, &qs->fl[0]); in sge_timer_rx()
2996 if (qs->fl[1].credits < qs->fl[1].size) in sge_timer_rx()
2997 __refill_fl(adap, &qs->fl[1]); in sge_timer_rx()
3002 mod_timer(&qs->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD); in sge_timer_rx()
3006 * t3_update_qset_coalesce - update coalescing settings for a queue set
3015 qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */ in t3_update_qset_coalesce()
3016 qs->rspq.polling = p->polling; in t3_update_qset_coalesce()
3017 qs->napi.poll = p->polling ? napi_rx_handler : ofld_poll; in t3_update_qset_coalesce()
3021 * t3_sge_alloc_qset - initialize an SGE queue set
3027 * @ntxq: number of Tx queues for the queue set
3029 * @netdevq: net device TX queue associated with this queue set
3032 * comprises a response queue, two Rx free-buffer queues, and up to 3
3033 * Tx queues. The Tx queues are assigned roles in the order Ethernet
3041 int i, avail, ret = -ENOMEM; in t3_sge_alloc_qset()
3042 struct sge_qset *q = &adapter->sge.qs[id]; in t3_sge_alloc_qset()
3045 timer_setup(&q->tx_reclaim_timer, sge_timer_tx, 0); in t3_sge_alloc_qset()
3046 timer_setup(&q->rx_reclaim_timer, sge_timer_rx, 0); in t3_sge_alloc_qset()
3048 q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size, in t3_sge_alloc_qset()
3051 &q->fl[0].phys_addr, &q->fl[0].sdesc); in t3_sge_alloc_qset()
3052 if (!q->fl[0].desc) in t3_sge_alloc_qset()
3055 q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size, in t3_sge_alloc_qset()
3058 &q->fl[1].phys_addr, &q->fl[1].sdesc); in t3_sge_alloc_qset()
3059 if (!q->fl[1].desc) in t3_sge_alloc_qset()
3062 q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size, in t3_sge_alloc_qset()
3064 &q->rspq.phys_addr, NULL); in t3_sge_alloc_qset()
3065 if (!q->rspq.desc) in t3_sge_alloc_qset()
3071 * need to keep track of any sk_buffs. in t3_sge_alloc_qset()
3075 q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i], in t3_sge_alloc_qset()
3077 &q->txq[i].phys_addr, in t3_sge_alloc_qset()
3078 &q->txq[i].sdesc); in t3_sge_alloc_qset()
3079 if (!q->txq[i].desc) in t3_sge_alloc_qset()
3082 q->txq[i].gen = 1; in t3_sge_alloc_qset()
3083 q->txq[i].size = p->txq_size[i]; in t3_sge_alloc_qset()
3084 spin_lock_init(&q->txq[i].lock); in t3_sge_alloc_qset()
3085 skb_queue_head_init(&q->txq[i].sendq); in t3_sge_alloc_qset()
3088 tasklet_setup(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq); in t3_sge_alloc_qset()
3089 tasklet_setup(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq); in t3_sge_alloc_qset()
3091 q->fl[0].gen = q->fl[1].gen = 1; in t3_sge_alloc_qset()
3092 q->fl[0].size = p->fl_size; in t3_sge_alloc_qset()
3093 q->fl[1].size = p->jumbo_size; in t3_sge_alloc_qset()
3095 q->rspq.gen = 1; in t3_sge_alloc_qset()
3096 q->rspq.size = p->rspq_size; in t3_sge_alloc_qset()
3097 spin_lock_init(&q->rspq.lock); in t3_sge_alloc_qset()
3098 skb_queue_head_init(&q->rspq.rx_queue); in t3_sge_alloc_qset()
3100 q->txq[TXQ_ETH].stop_thres = nports * in t3_sge_alloc_qset()
3104 q->fl[0].buf_size = FL0_PG_CHUNK_SIZE; in t3_sge_alloc_qset()
3106 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data); in t3_sge_alloc_qset()
3109 q->fl[1].buf_size = FL1_PG_CHUNK_SIZE; in t3_sge_alloc_qset()
3111 q->fl[1].buf_size = is_offload(adapter) ? in t3_sge_alloc_qset()
3112 (16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) : in t3_sge_alloc_qset()
3116 q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0; in t3_sge_alloc_qset()
3117 q->fl[1].use_pages = FL1_PG_CHUNK_SIZE > 0; in t3_sge_alloc_qset()
3118 q->fl[0].order = FL0_PG_ORDER; in t3_sge_alloc_qset()
3119 q->fl[1].order = FL1_PG_ORDER; in t3_sge_alloc_qset()
3120 q->fl[0].alloc_size = FL0_PG_ALLOC_SIZE; in t3_sge_alloc_qset()
3121 q->fl[1].alloc_size = FL1_PG_ALLOC_SIZE; in t3_sge_alloc_qset()
3123 spin_lock_irq(&adapter->sge.reg_lock); in t3_sge_alloc_qset()
3126 ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx, in t3_sge_alloc_qset()
3127 q->rspq.phys_addr, q->rspq.size, in t3_sge_alloc_qset()
3128 q->fl[0].buf_size - SGE_PG_RSVD, 1, 0); in t3_sge_alloc_qset()
3133 ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0, in t3_sge_alloc_qset()
3134 q->fl[i].phys_addr, q->fl[i].size, in t3_sge_alloc_qset()
3135 q->fl[i].buf_size - SGE_PG_RSVD, in t3_sge_alloc_qset()
3136 p->cong_thres, 1, 0); in t3_sge_alloc_qset()
3141 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS, in t3_sge_alloc_qset()
3142 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr, in t3_sge_alloc_qset()
3143 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token, in t3_sge_alloc_qset()
3149 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id, in t3_sge_alloc_qset()
3151 q->txq[TXQ_OFLD].phys_addr, in t3_sge_alloc_qset()
3152 q->txq[TXQ_OFLD].size, 0, 1, 0); in t3_sge_alloc_qset()
3158 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0, in t3_sge_alloc_qset()
3160 q->txq[TXQ_CTRL].phys_addr, in t3_sge_alloc_qset()
3161 q->txq[TXQ_CTRL].size, in t3_sge_alloc_qset()
3162 q->txq[TXQ_CTRL].token, 1, 0); in t3_sge_alloc_qset()
3167 spin_unlock_irq(&adapter->sge.reg_lock); in t3_sge_alloc_qset()
3169 q->adap = adapter; in t3_sge_alloc_qset()
3170 q->netdev = dev; in t3_sge_alloc_qset()
3171 q->tx_q = netdevq; in t3_sge_alloc_qset()
3174 avail = refill_fl(adapter, &q->fl[0], q->fl[0].size, in t3_sge_alloc_qset()
3178 ret = -ENOMEM; in t3_sge_alloc_qset()
3181 if (avail < q->fl[0].size) in t3_sge_alloc_qset()
3185 avail = refill_fl(adapter, &q->fl[1], q->fl[1].size, in t3_sge_alloc_qset()
3187 if (avail < q->fl[1].size) in t3_sge_alloc_qset()
3190 refill_rspq(adapter, &q->rspq, q->rspq.size - 1); in t3_sge_alloc_qset()
3192 t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) | in t3_sge_alloc_qset()
3193 V_NEWTIMER(q->rspq.holdoff_tmr)); in t3_sge_alloc_qset()
3198 spin_unlock_irq(&adapter->sge.reg_lock); in t3_sge_alloc_qset()
3205 * t3_start_sge_timers - start SGE timer call backs
3215 struct sge_qset *q = &adap->sge.qs[i]; in t3_start_sge_timers()
3217 if (q->tx_reclaim_timer.function) in t3_start_sge_timers()
3218 mod_timer(&q->tx_reclaim_timer, in t3_start_sge_timers()
3221 if (q->rx_reclaim_timer.function) in t3_start_sge_timers()
3222 mod_timer(&q->rx_reclaim_timer, in t3_start_sge_timers()
3228 * t3_stop_sge_timers - stop SGE timer call backs
3238 struct sge_qset *q = &adap->sge.qs[i]; in t3_stop_sge_timers()
3240 if (q->tx_reclaim_timer.function) in t3_stop_sge_timers()
3241 del_timer_sync(&q->tx_reclaim_timer); in t3_stop_sge_timers()
3242 if (q->rx_reclaim_timer.function) in t3_stop_sge_timers()
3243 del_timer_sync(&q->rx_reclaim_timer); in t3_stop_sge_timers()
3248 * t3_free_sge_resources - free SGE resources
3258 t3_free_qset(adap, &adap->sge.qs[i]); in t3_free_sge_resources()
3262 * t3_sge_start - enable SGE
3274 * t3_sge_stop_dma - Disable SGE DMA engine operation
3291 * t3_sge_stop - disable SGE operation completly
3304 struct sge_qset *qs = &adap->sge.qs[i]; in t3_sge_stop()
3306 tasklet_kill(&qs->txq[TXQ_OFLD].qresume_tsk); in t3_sge_stop()
3307 tasklet_kill(&qs->txq[TXQ_CTRL].qresume_tsk); in t3_sge_stop()
3312 * t3_sge_init - initialize SGE
3318 * top-level must request those individually. We also do not enable DMA
3319 * here, that should be done after the queues have been set up.
3323 unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12); in t3_sge_init()
3327 V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS | in t3_sge_init()
3328 V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING; in t3_sge_init()
3332 if (adap->params.rev > 0) { in t3_sge_init()
3333 if (!(adap->flags & (USING_MSIX | USING_MSI))) in t3_sge_init()
3343 adap->params.rev < T3_REV_C ? 1000 : 500); in t3_sge_init()
3352 * t3_sge_prep - one-time SGE initialization
3356 * Performs one-time initialization of SGE SW state. Includes determining
3358 * they are used to initialize the SGE.
3364 p->max_pkt_size = (16 * 1024) - sizeof(struct cpl_rx_data) - in t3_sge_prep()
3368 struct qset_params *q = p->qset + i; in t3_sge_prep()
3370 q->polling = adap->params.rev > 0; in t3_sge_prep()
3371 q->coalesce_usecs = 5; in t3_sge_prep()
3372 q->rspq_size = 1024; in t3_sge_prep()
3373 q->fl_size = 1024; in t3_sge_prep()
3374 q->jumbo_size = 512; in t3_sge_prep()
3375 q->txq_size[TXQ_ETH] = 1024; in t3_sge_prep()
3376 q->txq_size[TXQ_OFLD] = 1024; in t3_sge_prep()
3377 q->txq_size[TXQ_CTRL] = 256; in t3_sge_prep()
3378 q->cong_thres = 0; in t3_sge_prep()
3381 spin_lock_init(&adap->sge.reg_lock); in t3_sge_prep()