Lines Matching +full:mcast +full:- +full:groups
2 * Copyright (c) 2012 - 2018 Intel Corporation. All rights reserved.
3 * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
16 * - Redistributions of source code must retain the above
20 * - Redistributions in binary form must reproduce the above
91 "Maximum number of multicast groups to support");
140 struct rvt_sge *sg_list = ss->sg_list; in qib_count_sge()
141 struct rvt_sge sge = ss->sge; in qib_count_sge()
142 u8 num_sge = ss->num_sge; in qib_count_sge()
148 if (((long) sge.vaddr & (sizeof(u32) - 1)) || in qib_count_sge()
149 (len != length && (len & (sizeof(u32) - 1)))) { in qib_count_sge()
155 sge.length -= len; in qib_count_sge()
156 sge.sge_length -= len; in qib_count_sge()
158 if (--num_sge) in qib_count_sge()
160 } else if (sge.length == 0 && sge.mr->lkey) { in qib_count_sge()
162 if (++sge.m >= sge.mr->mapsz) in qib_count_sge()
167 sge.mr->map[sge.m]->segs[sge.n].vaddr; in qib_count_sge()
169 sge.mr->map[sge.m]->segs[sge.n].length; in qib_count_sge()
171 length -= len; in qib_count_sge()
181 struct rvt_sge *sge = &ss->sge; in qib_copy_from_sge()
186 memcpy(data, sge->vaddr, len); in qib_copy_from_sge()
187 sge->vaddr += len; in qib_copy_from_sge()
188 sge->length -= len; in qib_copy_from_sge()
189 sge->sge_length -= len; in qib_copy_from_sge()
190 if (sge->sge_length == 0) { in qib_copy_from_sge()
191 if (--ss->num_sge) in qib_copy_from_sge()
192 *sge = *ss->sg_list++; in qib_copy_from_sge()
193 } else if (sge->length == 0 && sge->mr->lkey) { in qib_copy_from_sge()
194 if (++sge->n >= RVT_SEGSZ) { in qib_copy_from_sge()
195 if (++sge->m >= sge->mr->mapsz) in qib_copy_from_sge()
197 sge->n = 0; in qib_copy_from_sge()
199 sge->vaddr = in qib_copy_from_sge()
200 sge->mr->map[sge->m]->segs[sge->n].vaddr; in qib_copy_from_sge()
201 sge->length = in qib_copy_from_sge()
202 sge->mr->map[sge->m]->segs[sge->n].length; in qib_copy_from_sge()
205 length -= len; in qib_copy_from_sge()
210 * qib_qp_rcv - processing an incoming packet on a QP
225 struct qib_ibport *ibp = &rcd->ppd->ibport_data; in qib_qp_rcv()
227 spin_lock(&qp->r_lock); in qib_qp_rcv()
230 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) { in qib_qp_rcv()
231 ibp->rvp.n_pkt_drops++; in qib_qp_rcv()
235 switch (qp->ibqp.qp_type) { in qib_qp_rcv()
258 spin_unlock(&qp->r_lock); in qib_qp_rcv()
262 * qib_ib_rcv - process an incoming packet
273 struct qib_pportdata *ppd = rcd->ppd; in qib_ib_rcv()
274 struct qib_ibport *ibp = &ppd->ibport_data; in qib_ib_rcv()
276 struct qib_devdata *dd = ppd->dd; in qib_ib_rcv()
277 struct rvt_dev_info *rdi = &dd->verbs_dev.rdi; in qib_ib_rcv()
290 lid = be16_to_cpu(hdr->lrh[1]); in qib_ib_rcv()
292 lid &= ~((1 << ppd->lmc) - 1); in qib_ib_rcv()
293 if (unlikely(lid != ppd->lid)) in qib_ib_rcv()
298 lnh = be16_to_cpu(hdr->lrh[0]) & 3; in qib_ib_rcv()
300 ohdr = &hdr->u.oth; in qib_ib_rcv()
304 ohdr = &hdr->u.l.oth; in qib_ib_rcv()
305 if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR) in qib_ib_rcv()
307 vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow); in qib_ib_rcv()
313 opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0x7f; in qib_ib_rcv()
315 rcd->opstats->stats[opcode].n_bytes += tlen; in qib_ib_rcv()
316 rcd->opstats->stats[opcode].n_packets++; in qib_ib_rcv()
320 qp_num = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK; in qib_ib_rcv()
322 struct rvt_mcast *mcast; in qib_ib_rcv() local
327 mcast = rvt_mcast_find(&ibp->rvp, &hdr->u.l.grh.dgid, lid); in qib_ib_rcv()
328 if (mcast == NULL) in qib_ib_rcv()
330 this_cpu_inc(ibp->pmastats->n_multicast_rcv); in qib_ib_rcv()
332 list_for_each_entry_rcu(p, &mcast->qp_list, list) in qib_ib_rcv()
333 qib_qp_rcv(rcd, hdr, 1, data, tlen, p->qp); in qib_ib_rcv()
339 if (atomic_dec_return(&mcast->refcount) <= 1) in qib_ib_rcv()
340 wake_up(&mcast->wait); in qib_ib_rcv()
343 qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num); in qib_ib_rcv()
348 this_cpu_inc(ibp->pmastats->n_unicast_rcv); in qib_ib_rcv()
355 ibp->rvp.n_pkt_drops++; in qib_ib_rcv()
365 struct list_head *list = &dev->memwait; in mem_timer()
370 spin_lock_irqsave(&dev->rdi.pending_lock, flags); in mem_timer()
372 priv = list_entry(list->next, struct qib_qp_priv, iowait); in mem_timer()
373 qp = priv->owner; in mem_timer()
374 list_del_init(&priv->iowait); in mem_timer()
377 mod_timer(&dev->mem_timer, jiffies + 1); in mem_timer()
379 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags); in mem_timer()
382 spin_lock_irqsave(&qp->s_lock, flags); in mem_timer()
383 if (qp->s_flags & RVT_S_WAIT_KMEM) { in mem_timer()
384 qp->s_flags &= ~RVT_S_WAIT_KMEM; in mem_timer()
387 spin_unlock_irqrestore(&qp->s_lock, flags); in mem_timer()
405 data <<= ((sizeof(u32) - n) * BITS_PER_BYTE); in clear_upper_bytes()
406 data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE); in clear_upper_bytes()
422 data >>= ((sizeof(u32) - n) * BITS_PER_BYTE); in clear_upper_bytes()
423 data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE); in clear_upper_bytes()
436 u32 len = rvt_get_sge_length(&ss->sge, length); in qib_copy_io()
440 off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1); in qib_copy_io()
442 u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr & in qib_copy_io()
443 ~(sizeof(u32) - 1)); in qib_copy_io()
447 y = sizeof(u32) - off; in qib_copy_io()
453 len = sizeof(u32) - extra; in qib_copy_io()
473 u32 *addr = (u32 *) ss->sge.vaddr; in qib_copy_io()
475 int ushift = 32 - shift; in qib_copy_io()
486 l -= sizeof(u32); in qib_copy_io()
496 len -= l + extra - sizeof(u32); in qib_copy_io()
526 qib_pio_copy(piobuf, ss->sge.vaddr, w - 1); in qib_copy_io()
527 piobuf += w - 1; in qib_copy_io()
528 last = ((u32 *) ss->sge.vaddr)[w - 1]; in qib_copy_io()
533 qib_pio_copy(piobuf, ss->sge.vaddr, w); in qib_copy_io()
536 extra = len & (sizeof(u32) - 1); in qib_copy_io()
538 u32 v = ((u32 *) ss->sge.vaddr)[w]; in qib_copy_io()
545 length -= len; in qib_copy_io()
562 struct qib_qp_priv *priv = qp->priv; in __get_txreq()
566 spin_lock_irqsave(&qp->s_lock, flags); in __get_txreq()
567 spin_lock(&dev->rdi.pending_lock); in __get_txreq()
569 if (!list_empty(&dev->txreq_free)) { in __get_txreq()
570 struct list_head *l = dev->txreq_free.next; in __get_txreq()
573 spin_unlock(&dev->rdi.pending_lock); in __get_txreq()
574 spin_unlock_irqrestore(&qp->s_lock, flags); in __get_txreq()
577 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK && in __get_txreq()
578 list_empty(&priv->iowait)) { in __get_txreq()
579 dev->n_txwait++; in __get_txreq()
580 qp->s_flags |= RVT_S_WAIT_TX; in __get_txreq()
581 list_add_tail(&priv->iowait, &dev->txwait); in __get_txreq()
583 qp->s_flags &= ~RVT_S_BUSY; in __get_txreq()
584 spin_unlock(&dev->rdi.pending_lock); in __get_txreq()
585 spin_unlock_irqrestore(&qp->s_lock, flags); in __get_txreq()
586 tx = ERR_PTR(-EBUSY); in __get_txreq()
597 spin_lock_irqsave(&dev->rdi.pending_lock, flags); in get_txreq()
599 if (likely(!list_empty(&dev->txreq_free))) { in get_txreq()
600 struct list_head *l = dev->txreq_free.next; in get_txreq()
603 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags); in get_txreq()
607 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags); in get_txreq()
620 qp = tx->qp; in qib_put_txreq()
621 dev = to_idev(qp->ibqp.device); in qib_put_txreq()
623 if (tx->mr) { in qib_put_txreq()
624 rvt_put_mr(tx->mr); in qib_put_txreq()
625 tx->mr = NULL; in qib_put_txreq()
627 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) { in qib_put_txreq()
628 tx->txreq.flags &= ~QIB_SDMA_TXREQ_F_FREEBUF; in qib_put_txreq()
629 dma_unmap_single(&dd_from_dev(dev)->pcidev->dev, in qib_put_txreq()
630 tx->txreq.addr, tx->hdr_dwords << 2, in qib_put_txreq()
632 kfree(tx->align_buf); in qib_put_txreq()
635 spin_lock_irqsave(&dev->rdi.pending_lock, flags); in qib_put_txreq()
638 list_add(&tx->txreq.list, &dev->txreq_free); in qib_put_txreq()
640 if (!list_empty(&dev->txwait)) { in qib_put_txreq()
642 priv = list_entry(dev->txwait.next, struct qib_qp_priv, in qib_put_txreq()
644 qp = priv->owner; in qib_put_txreq()
645 list_del_init(&priv->iowait); in qib_put_txreq()
647 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags); in qib_put_txreq()
649 spin_lock_irqsave(&qp->s_lock, flags); in qib_put_txreq()
650 if (qp->s_flags & RVT_S_WAIT_TX) { in qib_put_txreq()
651 qp->s_flags &= ~RVT_S_WAIT_TX; in qib_put_txreq()
654 spin_unlock_irqrestore(&qp->s_lock, flags); in qib_put_txreq()
658 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags); in qib_put_txreq()
665 * This is called with ppd->sdma_lock held.
676 dev = &ppd->dd->verbs_dev; in qib_verbs_sdma_desc_avail()
677 spin_lock(&dev->rdi.pending_lock); in qib_verbs_sdma_desc_avail()
680 list_for_each_entry_safe(qpp, nqpp, &dev->dmawait, iowait) { in qib_verbs_sdma_desc_avail()
681 qp = qpp->owner; in qib_verbs_sdma_desc_avail()
682 if (qp->port_num != ppd->port) in qib_verbs_sdma_desc_avail()
686 if (qpp->s_tx->txreq.sg_count > avail) in qib_verbs_sdma_desc_avail()
688 avail -= qpp->s_tx->txreq.sg_count; in qib_verbs_sdma_desc_avail()
689 list_del_init(&qpp->iowait); in qib_verbs_sdma_desc_avail()
694 spin_unlock(&dev->rdi.pending_lock); in qib_verbs_sdma_desc_avail()
698 spin_lock(&qp->s_lock); in qib_verbs_sdma_desc_avail()
699 if (qp->s_flags & RVT_S_WAIT_DMA_DESC) { in qib_verbs_sdma_desc_avail()
700 qp->s_flags &= ~RVT_S_WAIT_DMA_DESC; in qib_verbs_sdma_desc_avail()
703 spin_unlock(&qp->s_lock); in qib_verbs_sdma_desc_avail()
709 * This is called with ppd->sdma_lock held.
715 struct rvt_qp *qp = tx->qp; in sdma_complete()
716 struct qib_qp_priv *priv = qp->priv; in sdma_complete()
718 spin_lock(&qp->s_lock); in sdma_complete()
719 if (tx->wqe) in sdma_complete()
720 rvt_send_complete(qp, tx->wqe, IB_WC_SUCCESS); in sdma_complete()
721 else if (qp->ibqp.qp_type == IB_QPT_RC) { in sdma_complete()
724 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) in sdma_complete()
725 hdr = &tx->align_buf->hdr; in sdma_complete()
727 struct qib_ibdev *dev = to_idev(qp->ibqp.device); in sdma_complete()
729 hdr = &dev->pio_hdrs[tx->hdr_inx].hdr; in sdma_complete()
733 if (atomic_dec_and_test(&priv->s_dma_busy)) { in sdma_complete()
734 if (qp->state == IB_QPS_RESET) in sdma_complete()
735 wake_up(&priv->wait_dma); in sdma_complete()
736 else if (qp->s_flags & RVT_S_WAIT_DMA) { in sdma_complete()
737 qp->s_flags &= ~RVT_S_WAIT_DMA; in sdma_complete()
741 spin_unlock(&qp->s_lock); in sdma_complete()
748 struct qib_qp_priv *priv = qp->priv; in wait_kmem()
752 spin_lock_irqsave(&qp->s_lock, flags); in wait_kmem()
753 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) { in wait_kmem()
754 spin_lock(&dev->rdi.pending_lock); in wait_kmem()
755 if (list_empty(&priv->iowait)) { in wait_kmem()
756 if (list_empty(&dev->memwait)) in wait_kmem()
757 mod_timer(&dev->mem_timer, jiffies + 1); in wait_kmem()
758 qp->s_flags |= RVT_S_WAIT_KMEM; in wait_kmem()
759 list_add_tail(&priv->iowait, &dev->memwait); in wait_kmem()
761 spin_unlock(&dev->rdi.pending_lock); in wait_kmem()
762 qp->s_flags &= ~RVT_S_BUSY; in wait_kmem()
763 ret = -EBUSY; in wait_kmem()
765 spin_unlock_irqrestore(&qp->s_lock, flags); in wait_kmem()
774 struct qib_qp_priv *priv = qp->priv; in qib_verbs_send_dma()
775 struct qib_ibdev *dev = to_idev(qp->ibqp.device); in qib_verbs_send_dma()
777 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); in qib_verbs_send_dma()
785 tx = priv->s_tx; in qib_verbs_send_dma()
787 priv->s_tx = NULL; in qib_verbs_send_dma()
789 ret = qib_sdma_verbs_send(ppd, tx->ss, tx->dwords, tx); in qib_verbs_send_dma()
797 control = dd->f_setpbc_control(ppd, plen, qp->s_srate, in qib_verbs_send_dma()
798 be16_to_cpu(hdr->lrh[0]) >> 12); in qib_verbs_send_dma()
799 tx->qp = qp; in qib_verbs_send_dma()
800 tx->wqe = qp->s_wqe; in qib_verbs_send_dma()
801 tx->mr = qp->s_rdma_mr; in qib_verbs_send_dma()
802 if (qp->s_rdma_mr) in qib_verbs_send_dma()
803 qp->s_rdma_mr = NULL; in qib_verbs_send_dma()
804 tx->txreq.callback = sdma_complete; in qib_verbs_send_dma()
805 if (dd->flags & QIB_HAS_SDMA_TIMEOUT) in qib_verbs_send_dma()
806 tx->txreq.flags = QIB_SDMA_TXREQ_F_HEADTOHOST; in qib_verbs_send_dma()
808 tx->txreq.flags = QIB_SDMA_TXREQ_F_INTREQ; in qib_verbs_send_dma()
809 if (plen + 1 > dd->piosize2kmax_dwords) in qib_verbs_send_dma()
810 tx->txreq.flags |= QIB_SDMA_TXREQ_F_USELARGEBUF; in qib_verbs_send_dma()
818 if (ndesc >= ppd->sdma_descq_cnt) in qib_verbs_send_dma()
823 phdr = &dev->pio_hdrs[tx->hdr_inx]; in qib_verbs_send_dma()
824 phdr->pbc[0] = cpu_to_le32(plen); in qib_verbs_send_dma()
825 phdr->pbc[1] = cpu_to_le32(control); in qib_verbs_send_dma()
826 memcpy(&phdr->hdr, hdr, hdrwords << 2); in qib_verbs_send_dma()
827 tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEDESC; in qib_verbs_send_dma()
828 tx->txreq.sg_count = ndesc; in qib_verbs_send_dma()
829 tx->txreq.addr = dev->pio_hdrs_phys + in qib_verbs_send_dma()
830 tx->hdr_inx * sizeof(struct qib_pio_header); in qib_verbs_send_dma()
831 tx->hdr_dwords = hdrwords + 2; /* add PBC length */ in qib_verbs_send_dma()
837 tx->hdr_dwords = plen + 1; in qib_verbs_send_dma()
838 phdr = kmalloc(tx->hdr_dwords << 2, GFP_ATOMIC); in qib_verbs_send_dma()
841 phdr->pbc[0] = cpu_to_le32(plen); in qib_verbs_send_dma()
842 phdr->pbc[1] = cpu_to_le32(control); in qib_verbs_send_dma()
843 memcpy(&phdr->hdr, hdr, hdrwords << 2); in qib_verbs_send_dma()
844 qib_copy_from_sge((u32 *) &phdr->hdr + hdrwords, ss, len); in qib_verbs_send_dma()
846 tx->txreq.addr = dma_map_single(&dd->pcidev->dev, phdr, in qib_verbs_send_dma()
847 tx->hdr_dwords << 2, DMA_TO_DEVICE); in qib_verbs_send_dma()
848 if (dma_mapping_error(&dd->pcidev->dev, tx->txreq.addr)) in qib_verbs_send_dma()
850 tx->align_buf = phdr; in qib_verbs_send_dma()
851 tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEBUF; in qib_verbs_send_dma()
852 tx->txreq.sg_count = 1; in qib_verbs_send_dma()
862 ibp->rvp.n_unaligned++; in qib_verbs_send_dma()
876 struct qib_qp_priv *priv = qp->priv; in no_bufs_available()
877 struct qib_ibdev *dev = to_idev(qp->ibqp.device); in no_bufs_available()
888 spin_lock_irqsave(&qp->s_lock, flags); in no_bufs_available()
889 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) { in no_bufs_available()
890 spin_lock(&dev->rdi.pending_lock); in no_bufs_available()
891 if (list_empty(&priv->iowait)) { in no_bufs_available()
892 dev->n_piowait++; in no_bufs_available()
893 qp->s_flags |= RVT_S_WAIT_PIO; in no_bufs_available()
894 list_add_tail(&priv->iowait, &dev->piowait); in no_bufs_available()
896 dd->f_wantpiobuf_intr(dd, 1); in no_bufs_available()
898 spin_unlock(&dev->rdi.pending_lock); in no_bufs_available()
899 qp->s_flags &= ~RVT_S_BUSY; in no_bufs_available()
900 ret = -EBUSY; in no_bufs_available()
902 spin_unlock_irqrestore(&qp->s_lock, flags); in no_bufs_available()
910 struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device); in qib_verbs_send_pio()
911 struct qib_pportdata *ppd = dd->pport + qp->port_num - 1; in qib_verbs_send_pio()
921 control = dd->f_setpbc_control(ppd, plen, qp->s_srate, in qib_verbs_send_pio()
922 be16_to_cpu(ibhdr->lrh[0]) >> 12); in qib_verbs_send_pio()
924 piobuf = dd->f_getsendbuf(ppd, pbc, &pbufn); in qib_verbs_send_pio()
937 flush_wc = dd->flags & QIB_PIO_FLUSH_WC; in qib_verbs_send_pio()
946 qib_pio_copy(piobuf, hdr, hdrwords - 1); in qib_verbs_send_pio()
948 __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1); in qib_verbs_send_pio()
961 if (likely(ss->num_sge == 1 && len <= ss->sge.length && in qib_verbs_send_pio()
962 !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) { in qib_verbs_send_pio()
963 u32 *addr = (u32 *) ss->sge.vaddr; in qib_verbs_send_pio()
968 qib_pio_copy(piobuf, addr, dwords - 1); in qib_verbs_send_pio()
971 __raw_writel(addr[dwords - 1], piobuf + dwords - 1); in qib_verbs_send_pio()
980 if (dd->flags & QIB_USE_SPCL_TRIG) { in qib_verbs_send_pio()
981 u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023; in qib_verbs_send_pio()
987 if (qp->s_rdma_mr) { in qib_verbs_send_pio()
988 rvt_put_mr(qp->s_rdma_mr); in qib_verbs_send_pio()
989 qp->s_rdma_mr = NULL; in qib_verbs_send_pio()
991 if (qp->s_wqe) { in qib_verbs_send_pio()
992 spin_lock_irqsave(&qp->s_lock, flags); in qib_verbs_send_pio()
993 rvt_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS); in qib_verbs_send_pio()
994 spin_unlock_irqrestore(&qp->s_lock, flags); in qib_verbs_send_pio()
995 } else if (qp->ibqp.qp_type == IB_QPT_RC) { in qib_verbs_send_pio()
996 spin_lock_irqsave(&qp->s_lock, flags); in qib_verbs_send_pio()
998 spin_unlock_irqrestore(&qp->s_lock, flags); in qib_verbs_send_pio()
1004 * qib_verbs_send - send a packet
1007 * @hdrwords: the number of 32-bit words in the header
1012 * Return non-zero and clear qp->s_flags RVT_S_BUSY otherwise.
1017 struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device); in qib_verbs_send()
1033 if (qp->ibqp.qp_type == IB_QPT_SMI || in qib_verbs_send()
1034 !(dd->flags & QIB_HAS_SEND_DMA)) in qib_verbs_send()
1049 struct qib_devdata *dd = ppd->dd; in qib_snapshot_counters()
1051 if (!(dd->flags & QIB_PRESENT)) { in qib_snapshot_counters()
1053 ret = -EINVAL; in qib_snapshot_counters()
1056 *swords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDSEND); in qib_snapshot_counters()
1057 *rwords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDRCV); in qib_snapshot_counters()
1058 *spkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTSEND); in qib_snapshot_counters()
1059 *rpkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTRCV); in qib_snapshot_counters()
1060 *xmit_wait = dd->f_portcntr(ppd, QIBPORTCNTR_SENDSTALL); in qib_snapshot_counters()
1069 * qib_get_counters - get various chip counters
1080 if (!(ppd->dd->flags & QIB_PRESENT)) { in qib_get_counters()
1082 ret = -EINVAL; in qib_get_counters()
1085 cntrs->symbol_error_counter = in qib_get_counters()
1086 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBSYMBOLERR); in qib_get_counters()
1087 cntrs->link_error_recovery_counter = in qib_get_counters()
1088 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKERRRECOV); in qib_get_counters()
1094 cntrs->link_downed_counter = in qib_get_counters()
1095 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKDOWN); in qib_get_counters()
1096 cntrs->port_rcv_errors = in qib_get_counters()
1097 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXDROPPKT) + in qib_get_counters()
1098 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVOVFL) + in qib_get_counters()
1099 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERR_RLEN) + in qib_get_counters()
1100 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_INVALIDRLEN) + in qib_get_counters()
1101 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLINK) + in qib_get_counters()
1102 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRICRC) + in qib_get_counters()
1103 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRVCRC) + in qib_get_counters()
1104 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLPCRC) + in qib_get_counters()
1105 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_BADFORMAT); in qib_get_counters()
1106 cntrs->port_rcv_errors += in qib_get_counters()
1107 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXLOCALPHYERR); in qib_get_counters()
1108 cntrs->port_rcv_errors += in qib_get_counters()
1109 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXVLERR); in qib_get_counters()
1110 cntrs->port_rcv_remphys_errors = in qib_get_counters()
1111 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVEBP); in qib_get_counters()
1112 cntrs->port_xmit_discards = in qib_get_counters()
1113 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_UNSUPVL); in qib_get_counters()
1114 cntrs->port_xmit_data = ppd->dd->f_portcntr(ppd, in qib_get_counters()
1116 cntrs->port_rcv_data = ppd->dd->f_portcntr(ppd, in qib_get_counters()
1118 cntrs->port_xmit_packets = ppd->dd->f_portcntr(ppd, in qib_get_counters()
1120 cntrs->port_rcv_packets = ppd->dd->f_portcntr(ppd, in qib_get_counters()
1122 cntrs->local_link_integrity_errors = in qib_get_counters()
1123 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_LLI); in qib_get_counters()
1124 cntrs->excessive_buffer_overrun_errors = in qib_get_counters()
1125 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_EXCESSBUFOVFL); in qib_get_counters()
1126 cntrs->vl15_dropped = in qib_get_counters()
1127 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_VL15PKTDROP); in qib_get_counters()
1136 * qib_ib_piobufavail - callback when a PIO buffer is available
1145 struct qib_ibdev *dev = &dd->verbs_dev; in qib_ib_piobufavail()
1153 list = &dev->piowait; in qib_ib_piobufavail()
1162 spin_lock_irqsave(&dev->rdi.pending_lock, flags); in qib_ib_piobufavail()
1166 priv = list_entry(list->next, struct qib_qp_priv, iowait); in qib_ib_piobufavail()
1167 qp = priv->owner; in qib_ib_piobufavail()
1168 list_del_init(&priv->iowait); in qib_ib_piobufavail()
1172 dd->f_wantpiobuf_intr(dd, 0); in qib_ib_piobufavail()
1174 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags); in qib_ib_piobufavail()
1179 spin_lock_irqsave(&qp->s_lock, flags); in qib_ib_piobufavail()
1180 if (qp->s_flags & RVT_S_WAIT_PIO) { in qib_ib_piobufavail()
1181 qp->s_flags &= ~RVT_S_WAIT_PIO; in qib_ib_piobufavail()
1184 spin_unlock_irqrestore(&qp->s_lock, flags); in qib_ib_piobufavail()
1196 struct qib_pportdata *ppd = &dd->pport[port_num - 1]; in qib_query_port()
1198 u16 lid = ppd->lid; in qib_query_port()
1201 props->lid = lid ? lid : be16_to_cpu(IB_LID_PERMISSIVE); in qib_query_port()
1202 props->lmc = ppd->lmc; in qib_query_port()
1203 props->state = dd->f_iblink_state(ppd->lastibcstat); in qib_query_port()
1204 props->phys_state = dd->f_ibphys_portstate(ppd->lastibcstat); in qib_query_port()
1205 props->gid_tbl_len = QIB_GUIDS_PER_PORT; in qib_query_port()
1206 props->active_width = ppd->link_width_active; in qib_query_port()
1208 props->active_speed = ppd->link_speed_active; in qib_query_port()
1209 props->max_vl_num = qib_num_vls(ppd->vls_supported); in qib_query_port()
1211 props->max_mtu = qib_ibmtu ? qib_ibmtu : IB_MTU_4096; in qib_query_port()
1212 switch (ppd->ibmtu) { in qib_query_port()
1231 props->active_mtu = mtu; in qib_query_port()
1246 ret = -EOPNOTSUPP; in qib_modify_device()
1251 memcpy(device->node_desc, device_modify->node_desc, in qib_modify_device()
1253 for (i = 0; i < dd->num_pports; i++) { in qib_modify_device()
1254 struct qib_ibport *ibp = &dd->pport[i].ibport_data; in qib_modify_device()
1262 cpu_to_be64(device_modify->sys_image_guid); in qib_modify_device()
1263 for (i = 0; i < dd->num_pports; i++) { in qib_modify_device()
1264 struct qib_ibport *ibp = &dd->pport[i].ibport_data; in qib_modify_device()
1280 struct qib_pportdata *ppd = &dd->pport[port_num - 1]; in qib_shut_down_port()
1294 *guid = ppd->guid; in qib_get_guid_be()
1296 *guid = ibp->guids[guid_index - 1]; in qib_get_guid_be()
1298 return -EINVAL; in qib_get_guid_be()
1306 return -EINVAL; in qib_check_ah()
1309 return -EINVAL; in qib_check_ah()
1315 return -EINVAL; in qib_check_ah()
1334 ah->vl = ibp->sl_to_vl[rdma_ah_get_sl(&ah->attr)]; in qib_notify_new_ah()
1335 ah->log_pmtu = ilog2(ppd->ibmtu); in qib_notify_new_ah()
1341 struct ib_ah *ah = ERR_PTR(-EINVAL); in qib_create_qp0_ah()
1345 u32 port_num = ppd->port; in qib_create_qp0_ah()
1348 attr.type = rdma_ah_find_type(&dd->verbs_dev.rdi.ibdev, port_num); in qib_create_qp0_ah()
1352 qp0 = rcu_dereference(ibp->rvp.qp[0]); in qib_create_qp0_ah()
1354 ah = rdma_create_ah(qp0->ibqp.pd, &attr, 0); in qib_create_qp0_ah()
1360 * qib_get_npkeys - return the size of the PKEY table for context 0
1365 return ARRAY_SIZE(dd->rcd[0]->pkeys); in qib_get_npkeys()
1375 struct qib_devdata *dd = ppd->dd; in qib_get_pkey()
1376 unsigned ctxt = ppd->hw_pidx; in qib_get_pkey()
1379 /* dd->rcd null if mini_init or some init failures */ in qib_get_pkey()
1380 if (!dd->rcd || index >= ARRAY_SIZE(dd->rcd[ctxt]->pkeys)) in qib_get_pkey()
1383 ret = dd->rcd[ctxt]->pkeys[index]; in qib_get_pkey()
1391 struct qib_ibport *ibp = &ppd->ibport_data; in init_ibport()
1393 spin_lock_init(&ibp->rvp.lock); in init_ibport()
1395 ibp->rvp.gid_prefix = IB_DEFAULT_GID_PREFIX; in init_ibport()
1396 ibp->rvp.sm_lid = be16_to_cpu(IB_LID_PERMISSIVE); in init_ibport()
1397 ibp->rvp.port_cap_flags = IB_PORT_SYS_IMAGE_GUID_SUP | in init_ibport()
1402 if (ppd->dd->flags & QIB_HAS_LINK_LATENCY) in init_ibport()
1403 ibp->rvp.port_cap_flags |= IB_PORT_LINK_LATENCY_SUP; in init_ibport()
1404 ibp->rvp.pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA; in init_ibport()
1405 ibp->rvp.pma_counter_select[1] = IB_PMA_PORT_RCV_DATA; in init_ibport()
1406 ibp->rvp.pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS; in init_ibport()
1407 ibp->rvp.pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS; in init_ibport()
1408 ibp->rvp.pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT; in init_ibport()
1412 ibp->z_symbol_error_counter = cntrs.symbol_error_counter; in init_ibport()
1413 ibp->z_link_error_recovery_counter = in init_ibport()
1415 ibp->z_link_downed_counter = cntrs.link_downed_counter; in init_ibport()
1416 ibp->z_port_rcv_errors = cntrs.port_rcv_errors; in init_ibport()
1417 ibp->z_port_rcv_remphys_errors = cntrs.port_rcv_remphys_errors; in init_ibport()
1418 ibp->z_port_xmit_discards = cntrs.port_xmit_discards; in init_ibport()
1419 ibp->z_port_xmit_data = cntrs.port_xmit_data; in init_ibport()
1420 ibp->z_port_rcv_data = cntrs.port_rcv_data; in init_ibport()
1421 ibp->z_port_xmit_packets = cntrs.port_xmit_packets; in init_ibport()
1422 ibp->z_port_rcv_packets = cntrs.port_rcv_packets; in init_ibport()
1423 ibp->z_local_link_integrity_errors = in init_ibport()
1425 ibp->z_excessive_buffer_overrun_errors = in init_ibport()
1427 ibp->z_vl15_dropped = cntrs.vl15_dropped; in init_ibport()
1428 RCU_INIT_POINTER(ibp->rvp.qp[0], NULL); in init_ibport()
1429 RCU_INIT_POINTER(ibp->rvp.qp[1], NULL); in init_ibport()
1433 * qib_fill_device_attr - Fill in rvt dev info device attributes.
1438 struct rvt_dev_info *rdi = &dd->verbs_dev.rdi; in qib_fill_device_attr()
1440 memset(&rdi->dparms.props, 0, sizeof(rdi->dparms.props)); in qib_fill_device_attr()
1442 rdi->dparms.props.max_pd = ib_qib_max_pds; in qib_fill_device_attr()
1443 rdi->dparms.props.max_ah = ib_qib_max_ahs; in qib_fill_device_attr()
1444 rdi->dparms.props.device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR | in qib_fill_device_attr()
1448 rdi->dparms.props.page_size_cap = PAGE_SIZE; in qib_fill_device_attr()
1449 rdi->dparms.props.vendor_id = in qib_fill_device_attr()
1451 rdi->dparms.props.vendor_part_id = dd->deviceid; in qib_fill_device_attr()
1452 rdi->dparms.props.hw_ver = dd->minrev; in qib_fill_device_attr()
1453 rdi->dparms.props.sys_image_guid = ib_qib_sys_image_guid; in qib_fill_device_attr()
1454 rdi->dparms.props.max_mr_size = ~0ULL; in qib_fill_device_attr()
1455 rdi->dparms.props.max_qp = ib_qib_max_qps; in qib_fill_device_attr()
1456 rdi->dparms.props.max_qp_wr = ib_qib_max_qp_wrs; in qib_fill_device_attr()
1457 rdi->dparms.props.max_send_sge = ib_qib_max_sges; in qib_fill_device_attr()
1458 rdi->dparms.props.max_recv_sge = ib_qib_max_sges; in qib_fill_device_attr()
1459 rdi->dparms.props.max_sge_rd = ib_qib_max_sges; in qib_fill_device_attr()
1460 rdi->dparms.props.max_cq = ib_qib_max_cqs; in qib_fill_device_attr()
1461 rdi->dparms.props.max_cqe = ib_qib_max_cqes; in qib_fill_device_attr()
1462 rdi->dparms.props.max_ah = ib_qib_max_ahs; in qib_fill_device_attr()
1463 rdi->dparms.props.max_qp_rd_atom = QIB_MAX_RDMA_ATOMIC; in qib_fill_device_attr()
1464 rdi->dparms.props.max_qp_init_rd_atom = 255; in qib_fill_device_attr()
1465 rdi->dparms.props.max_srq = ib_qib_max_srqs; in qib_fill_device_attr()
1466 rdi->dparms.props.max_srq_wr = ib_qib_max_srq_wrs; in qib_fill_device_attr()
1467 rdi->dparms.props.max_srq_sge = ib_qib_max_srq_sges; in qib_fill_device_attr()
1468 rdi->dparms.props.atomic_cap = IB_ATOMIC_GLOB; in qib_fill_device_attr()
1469 rdi->dparms.props.max_pkeys = qib_get_npkeys(dd); in qib_fill_device_attr()
1470 rdi->dparms.props.max_mcast_grp = ib_qib_max_mcast_grps; in qib_fill_device_attr()
1471 rdi->dparms.props.max_mcast_qp_attach = ib_qib_max_mcast_qp_attached; in qib_fill_device_attr()
1472 rdi->dparms.props.max_total_mcast_qp_attach = in qib_fill_device_attr()
1473 rdi->dparms.props.max_mcast_qp_attach * in qib_fill_device_attr()
1474 rdi->dparms.props.max_mcast_grp; in qib_fill_device_attr()
1476 dd->verbs_dev.rdi.post_parms = qib_post_parms; in qib_fill_device_attr()
1479 dd->verbs_dev.rdi.wc_opcode = ib_qib_wc_opcode; in qib_fill_device_attr()
1493 * qib_register_ib_device - register our device with the infiniband core
1499 struct qib_ibdev *dev = &dd->verbs_dev; in qib_register_ib_device()
1500 struct ib_device *ibdev = &dev->rdi.ibdev; in qib_register_ib_device()
1501 struct qib_pportdata *ppd = dd->pport; in qib_register_ib_device()
1505 for (i = 0; i < dd->num_pports; i++) in qib_register_ib_device()
1508 /* Only need to initialize non-zero fields. */ in qib_register_ib_device()
1509 timer_setup(&dev->mem_timer, mem_timer, 0); in qib_register_ib_device()
1511 INIT_LIST_HEAD(&dev->piowait); in qib_register_ib_device()
1512 INIT_LIST_HEAD(&dev->dmawait); in qib_register_ib_device()
1513 INIT_LIST_HEAD(&dev->txwait); in qib_register_ib_device()
1514 INIT_LIST_HEAD(&dev->memwait); in qib_register_ib_device()
1515 INIT_LIST_HEAD(&dev->txreq_free); in qib_register_ib_device()
1517 if (ppd->sdma_descq_cnt) { in qib_register_ib_device()
1518 dev->pio_hdrs = dma_alloc_coherent(&dd->pcidev->dev, in qib_register_ib_device()
1519 ppd->sdma_descq_cnt * in qib_register_ib_device()
1521 &dev->pio_hdrs_phys, in qib_register_ib_device()
1523 if (!dev->pio_hdrs) { in qib_register_ib_device()
1524 ret = -ENOMEM; in qib_register_ib_device()
1529 for (i = 0; i < ppd->sdma_descq_cnt; i++) { in qib_register_ib_device()
1534 ret = -ENOMEM; in qib_register_ib_device()
1537 tx->hdr_inx = i; in qib_register_ib_device()
1538 list_add(&tx->txreq.list, &dev->txreq_free); in qib_register_ib_device()
1547 ib_qib_sys_image_guid = ppd->guid; in qib_register_ib_device()
1549 ibdev->node_guid = ppd->guid; in qib_register_ib_device()
1550 ibdev->phys_port_cnt = dd->num_pports; in qib_register_ib_device()
1551 ibdev->dev.parent = &dd->pcidev->dev; in qib_register_ib_device()
1553 snprintf(ibdev->node_desc, sizeof(ibdev->node_desc), in qib_register_ib_device()
1554 "Intel Infiniband HCA %s", init_utsname()->nodename); in qib_register_ib_device()
1559 dd->verbs_dev.rdi.driver_f.get_pci_dev = qib_get_pci_dev; in qib_register_ib_device()
1560 dd->verbs_dev.rdi.driver_f.check_ah = qib_check_ah; in qib_register_ib_device()
1561 dd->verbs_dev.rdi.driver_f.setup_wqe = qib_check_send_wqe; in qib_register_ib_device()
1562 dd->verbs_dev.rdi.driver_f.notify_new_ah = qib_notify_new_ah; in qib_register_ib_device()
1563 dd->verbs_dev.rdi.driver_f.alloc_qpn = qib_alloc_qpn; in qib_register_ib_device()
1564 dd->verbs_dev.rdi.driver_f.qp_priv_alloc = qib_qp_priv_alloc; in qib_register_ib_device()
1565 dd->verbs_dev.rdi.driver_f.qp_priv_free = qib_qp_priv_free; in qib_register_ib_device()
1566 dd->verbs_dev.rdi.driver_f.free_all_qps = qib_free_all_qps; in qib_register_ib_device()
1567 dd->verbs_dev.rdi.driver_f.notify_qp_reset = qib_notify_qp_reset; in qib_register_ib_device()
1568 dd->verbs_dev.rdi.driver_f.do_send = qib_do_send; in qib_register_ib_device()
1569 dd->verbs_dev.rdi.driver_f.schedule_send = qib_schedule_send; in qib_register_ib_device()
1570 dd->verbs_dev.rdi.driver_f.quiesce_qp = qib_quiesce_qp; in qib_register_ib_device()
1571 dd->verbs_dev.rdi.driver_f.stop_send_queue = qib_stop_send_queue; in qib_register_ib_device()
1572 dd->verbs_dev.rdi.driver_f.flush_qp_waiters = qib_flush_qp_waiters; in qib_register_ib_device()
1573 dd->verbs_dev.rdi.driver_f.notify_error_qp = qib_notify_error_qp; in qib_register_ib_device()
1574 dd->verbs_dev.rdi.driver_f.notify_restart_rc = qib_restart_rc; in qib_register_ib_device()
1575 dd->verbs_dev.rdi.driver_f.mtu_to_path_mtu = qib_mtu_to_path_mtu; in qib_register_ib_device()
1576 dd->verbs_dev.rdi.driver_f.mtu_from_qp = qib_mtu_from_qp; in qib_register_ib_device()
1577 dd->verbs_dev.rdi.driver_f.get_pmtu_from_attr = qib_get_pmtu_from_attr; in qib_register_ib_device()
1578 dd->verbs_dev.rdi.driver_f.schedule_send_no_lock = _qib_schedule_send; in qib_register_ib_device()
1579 dd->verbs_dev.rdi.driver_f.query_port_state = qib_query_port; in qib_register_ib_device()
1580 dd->verbs_dev.rdi.driver_f.shut_down_port = qib_shut_down_port; in qib_register_ib_device()
1581 dd->verbs_dev.rdi.driver_f.cap_mask_chg = qib_cap_mask_chg; in qib_register_ib_device()
1582 dd->verbs_dev.rdi.driver_f.notify_create_mad_agent = in qib_register_ib_device()
1584 dd->verbs_dev.rdi.driver_f.notify_free_mad_agent = in qib_register_ib_device()
1587 dd->verbs_dev.rdi.dparms.max_rdma_atomic = QIB_MAX_RDMA_ATOMIC; in qib_register_ib_device()
1588 dd->verbs_dev.rdi.driver_f.get_guid_be = qib_get_guid_be; in qib_register_ib_device()
1589 dd->verbs_dev.rdi.dparms.lkey_table_size = qib_lkey_table_size; in qib_register_ib_device()
1590 dd->verbs_dev.rdi.dparms.qp_table_size = ib_qib_qp_table_size; in qib_register_ib_device()
1591 dd->verbs_dev.rdi.dparms.qpn_start = 1; in qib_register_ib_device()
1592 dd->verbs_dev.rdi.dparms.qpn_res_start = QIB_KD_QP; in qib_register_ib_device()
1593 dd->verbs_dev.rdi.dparms.qpn_res_end = QIB_KD_QP; /* Reserve one QP */ in qib_register_ib_device()
1594 dd->verbs_dev.rdi.dparms.qpn_inc = 1; in qib_register_ib_device()
1595 dd->verbs_dev.rdi.dparms.qos_shift = 1; in qib_register_ib_device()
1596 dd->verbs_dev.rdi.dparms.psn_mask = QIB_PSN_MASK; in qib_register_ib_device()
1597 dd->verbs_dev.rdi.dparms.psn_shift = QIB_PSN_SHIFT; in qib_register_ib_device()
1598 dd->verbs_dev.rdi.dparms.psn_modify_mask = QIB_PSN_MASK; in qib_register_ib_device()
1599 dd->verbs_dev.rdi.dparms.nports = dd->num_pports; in qib_register_ib_device()
1600 dd->verbs_dev.rdi.dparms.npkeys = qib_get_npkeys(dd); in qib_register_ib_device()
1601 dd->verbs_dev.rdi.dparms.node = dd->assigned_node_id; in qib_register_ib_device()
1602 dd->verbs_dev.rdi.dparms.core_cap_flags = RDMA_CORE_PORT_IBA_IB; in qib_register_ib_device()
1603 dd->verbs_dev.rdi.dparms.max_mad_size = IB_MGMT_MAD_SIZE; in qib_register_ib_device()
1604 dd->verbs_dev.rdi.dparms.sge_copy_mode = RVT_SGE_COPY_MEMCPY; in qib_register_ib_device()
1608 ppd = dd->pport; in qib_register_ib_device()
1609 for (i = 0; i < dd->num_pports; i++, ppd++) { in qib_register_ib_device()
1610 ctxt = ppd->hw_pidx; in qib_register_ib_device()
1611 rvt_init_port(&dd->verbs_dev.rdi, in qib_register_ib_device()
1612 &ppd->ibport_data.rvp, in qib_register_ib_device()
1614 dd->rcd[ctxt]->pkeys); in qib_register_ib_device()
1618 ret = rvt_register_device(&dd->verbs_dev.rdi); in qib_register_ib_device()
1625 while (!list_empty(&dev->txreq_free)) { in qib_register_ib_device()
1626 struct list_head *l = dev->txreq_free.next; in qib_register_ib_device()
1633 if (ppd->sdma_descq_cnt) in qib_register_ib_device()
1634 dma_free_coherent(&dd->pcidev->dev, in qib_register_ib_device()
1635 ppd->sdma_descq_cnt * in qib_register_ib_device()
1637 dev->pio_hdrs, dev->pio_hdrs_phys); in qib_register_ib_device()
1639 qib_dev_err(dd, "cannot register verbs: %d!\n", -ret); in qib_register_ib_device()
1645 struct qib_ibdev *dev = &dd->verbs_dev; in qib_unregister_ib_device()
1647 rvt_unregister_device(&dd->verbs_dev.rdi); in qib_unregister_ib_device()
1649 if (!list_empty(&dev->piowait)) in qib_unregister_ib_device()
1651 if (!list_empty(&dev->dmawait)) in qib_unregister_ib_device()
1653 if (!list_empty(&dev->txwait)) in qib_unregister_ib_device()
1655 if (!list_empty(&dev->memwait)) in qib_unregister_ib_device()
1658 del_timer_sync(&dev->mem_timer); in qib_unregister_ib_device()
1659 while (!list_empty(&dev->txreq_free)) { in qib_unregister_ib_device()
1660 struct list_head *l = dev->txreq_free.next; in qib_unregister_ib_device()
1667 if (dd->pport->sdma_descq_cnt) in qib_unregister_ib_device()
1668 dma_free_coherent(&dd->pcidev->dev, in qib_unregister_ib_device()
1669 dd->pport->sdma_descq_cnt * in qib_unregister_ib_device()
1671 dev->pio_hdrs, dev->pio_hdrs_phys); in qib_unregister_ib_device()
1675 * _qib_schedule_send - schedule progress
1686 to_iport(qp->ibqp.device, qp->port_num); in _qib_schedule_send()
1688 struct qib_qp_priv *priv = qp->priv; in _qib_schedule_send()
1690 return queue_work(ppd->qib_wq, &priv->s_work); in _qib_schedule_send()
1694 * qib_schedule_send - schedule progress