Lines Matching +full:mcast +full:- +full:groups
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright(c) 2015 - 2020 Intel Corporation.
68 "Maximum number of multicast groups to support");
111 MODULE_PARM_DESC(wss_threshold, "Percentage (1-100) of LLC to use as a threshold for a cacheless co…
135 * Length of header by opcode, 0 --> not supported
276 if (!(ib_rvt_state_ops[packet->qp->state] & RVT_PROCESS_RECV_OK)) in qp_ok()
278 if (((packet->opcode & RVT_OPCODE_QP_MASK) == in qp_ok()
279 packet->qp->allowed_ops) || in qp_ok()
280 (packet->opcode == IB_OPCODE_CNP)) in qp_ok()
281 return opcode_handler_tbl[packet->opcode]; in qp_ok()
291 * In order to drop non-IB traffic we in hfi1_fault_tx()
319 if (packet->qp->ibqp.qp_type != IB_QPT_RC || in tid_qp_ok()
320 !(ib_rvt_state_ops[packet->qp->state] & RVT_PROCESS_RECV_OK)) in tid_qp_ok()
329 struct hfi1_ctxtdata *rcd = packet->rcd; in hfi1_kdeth_eager_rcv()
330 struct ib_header *hdr = packet->hdr; in hfi1_kdeth_eager_rcv()
331 u32 tlen = packet->tlen; in hfi1_kdeth_eager_rcv()
332 struct hfi1_pportdata *ppd = rcd->ppd; in hfi1_kdeth_eager_rcv()
333 struct hfi1_ibport *ibp = &ppd->ibport_data; in hfi1_kdeth_eager_rcv()
334 struct rvt_dev_info *rdi = &ppd->dd->verbs_dev.rdi; in hfi1_kdeth_eager_rcv()
345 lnh = be16_to_cpu(hdr->lrh[0]) & 3; in hfi1_kdeth_eager_rcv()
349 packet->ohdr = &hdr->u.oth; in hfi1_kdeth_eager_rcv()
350 trace_input_ibhdr(rcd->dd, packet, !!(rhf_dc_info(packet->rhf))); in hfi1_kdeth_eager_rcv()
352 opcode = (be32_to_cpu(packet->ohdr->bth[0]) >> 24); in hfi1_kdeth_eager_rcv()
353 inc_opstats(tlen, &rcd->opstats->stats[opcode]); in hfi1_kdeth_eager_rcv()
356 qp_num = be32_to_cpu(packet->ohdr->u.tid_rdma.r_req.verbs_qp) & in hfi1_kdeth_eager_rcv()
360 packet->qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num); in hfi1_kdeth_eager_rcv()
361 if (!packet->qp) in hfi1_kdeth_eager_rcv()
363 spin_lock_irqsave(&packet->qp->r_lock, flags); in hfi1_kdeth_eager_rcv()
369 spin_unlock_irqrestore(&packet->qp->r_lock, flags); in hfi1_kdeth_eager_rcv()
374 spin_unlock_irqrestore(&packet->qp->r_lock, flags); in hfi1_kdeth_eager_rcv()
378 ibp->rvp.n_pkt_drops++; in hfi1_kdeth_eager_rcv()
383 struct hfi1_ctxtdata *rcd = packet->rcd; in hfi1_kdeth_expected_rcv()
384 struct ib_header *hdr = packet->hdr; in hfi1_kdeth_expected_rcv()
385 u32 tlen = packet->tlen; in hfi1_kdeth_expected_rcv()
386 struct hfi1_pportdata *ppd = rcd->ppd; in hfi1_kdeth_expected_rcv()
387 struct hfi1_ibport *ibp = &ppd->ibport_data; in hfi1_kdeth_expected_rcv()
388 struct rvt_dev_info *rdi = &ppd->dd->verbs_dev.rdi; in hfi1_kdeth_expected_rcv()
399 lnh = be16_to_cpu(hdr->lrh[0]) & 3; in hfi1_kdeth_expected_rcv()
403 packet->ohdr = &hdr->u.oth; in hfi1_kdeth_expected_rcv()
404 trace_input_ibhdr(rcd->dd, packet, !!(rhf_dc_info(packet->rhf))); in hfi1_kdeth_expected_rcv()
406 opcode = (be32_to_cpu(packet->ohdr->bth[0]) >> 24); in hfi1_kdeth_expected_rcv()
407 inc_opstats(tlen, &rcd->opstats->stats[opcode]); in hfi1_kdeth_expected_rcv()
410 qp_num = be32_to_cpu(packet->ohdr->u.tid_rdma.r_rsp.verbs_qp) & in hfi1_kdeth_expected_rcv()
414 packet->qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num); in hfi1_kdeth_expected_rcv()
415 if (!packet->qp) in hfi1_kdeth_expected_rcv()
417 spin_lock_irqsave(&packet->qp->r_lock, flags); in hfi1_kdeth_expected_rcv()
423 spin_unlock_irqrestore(&packet->qp->r_lock, flags); in hfi1_kdeth_expected_rcv()
428 spin_unlock_irqrestore(&packet->qp->r_lock, flags); in hfi1_kdeth_expected_rcv()
432 ibp->rvp.n_pkt_drops++; in hfi1_kdeth_expected_rcv()
437 struct hfi1_ctxtdata *rcd = packet->rcd; in hfi1_do_pkey_check()
438 struct hfi1_pportdata *ppd = rcd->ppd; in hfi1_do_pkey_check()
439 struct hfi1_16b_header *hdr = packet->hdr; in hfi1_do_pkey_check()
443 if (packet->etype != RHF_RCV_TYPE_BYPASS) in hfi1_do_pkey_check()
448 return ingress_pkey_check(ppd, pkey, packet->sc, in hfi1_do_pkey_check()
449 packet->qp->s_pkey_index, in hfi1_do_pkey_check()
450 packet->slid, true); in hfi1_do_pkey_check()
457 struct hfi1_ctxtdata *rcd = packet->rcd; in hfi1_handle_packet()
458 struct hfi1_pportdata *ppd = rcd->ppd; in hfi1_handle_packet()
460 struct rvt_dev_info *rdi = &ppd->dd->verbs_dev.rdi; in hfi1_handle_packet()
464 inc_opstats(packet->tlen, &rcd->opstats->stats[packet->opcode]); in hfi1_handle_packet()
467 struct rvt_mcast *mcast; in hfi1_handle_packet() local
470 if (!packet->grh) in hfi1_handle_packet()
472 mcast = rvt_mcast_find(&ibp->rvp, in hfi1_handle_packet()
473 &packet->grh->dgid, in hfi1_handle_packet()
474 opa_get_lid(packet->dlid, 9B)); in hfi1_handle_packet()
475 if (!mcast) in hfi1_handle_packet()
478 list_for_each_entry_rcu(p, &mcast->qp_list, list) { in hfi1_handle_packet()
479 packet->qp = p->qp; in hfi1_handle_packet()
482 spin_lock_irqsave(&packet->qp->r_lock, flags); in hfi1_handle_packet()
487 ibp->rvp.n_pkt_drops++; in hfi1_handle_packet()
488 spin_unlock_irqrestore(&packet->qp->r_lock, flags); in hfi1_handle_packet()
495 if (atomic_dec_return(&mcast->refcount) <= 1) in hfi1_handle_packet()
496 wake_up(&mcast->wait); in hfi1_handle_packet()
499 if (packet->etype == RHF_RCV_TYPE_BYPASS && in hfi1_handle_packet()
500 hfi1_16B_get_l4(packet->hdr) == OPA_16B_L4_FM) in hfi1_handle_packet()
501 qp_num = hfi1_16B_get_dest_qpn(packet->mgmt); in hfi1_handle_packet()
503 qp_num = ib_bth_get_qpn(packet->ohdr); in hfi1_handle_packet()
506 packet->qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num); in hfi1_handle_packet()
507 if (!packet->qp) in hfi1_handle_packet()
513 spin_lock_irqsave(&packet->qp->r_lock, flags); in hfi1_handle_packet()
518 ibp->rvp.n_pkt_drops++; in hfi1_handle_packet()
519 spin_unlock_irqrestore(&packet->qp->r_lock, flags); in hfi1_handle_packet()
526 ibp->rvp.n_pkt_drops++; in hfi1_handle_packet()
530 * hfi1_ib_rcv - process an incoming packet
537 struct hfi1_ctxtdata *rcd = packet->rcd; in hfi1_ib_rcv()
539 trace_input_ibhdr(rcd->dd, packet, !!(rhf_dc_info(packet->rhf))); in hfi1_ib_rcv()
540 hfi1_handle_packet(packet, hfi1_check_mcast(packet->dlid)); in hfi1_ib_rcv()
545 struct hfi1_ctxtdata *rcd = packet->rcd; in hfi1_16B_rcv()
547 trace_input_ibhdr(rcd->dd, packet, false); in hfi1_16B_rcv()
548 hfi1_handle_packet(packet, hfi1_check_mcast(packet->dlid)); in hfi1_16B_rcv()
558 struct list_head *list = &dev->memwait; in mem_timer()
564 write_seqlock_irqsave(&dev->iowait_lock, flags); in mem_timer()
568 priv = qp->priv; in mem_timer()
569 list_del_init(&priv->s_iowait.list); in mem_timer()
570 priv->s_iowait.lock = NULL; in mem_timer()
573 mod_timer(&dev->mem_timer, jiffies + 1); in mem_timer()
575 write_sequnlock_irqrestore(&dev->iowait_lock, flags); in mem_timer()
591 struct rvt_qp *qp = tx->qp; in verbs_sdma_complete()
593 spin_lock(&qp->s_lock); in verbs_sdma_complete()
594 if (tx->wqe) { in verbs_sdma_complete()
595 rvt_send_complete(qp, tx->wqe, IB_WC_SUCCESS); in verbs_sdma_complete()
596 } else if (qp->ibqp.qp_type == IB_QPT_RC) { in verbs_sdma_complete()
599 hdr = &tx->phdr.hdr; in verbs_sdma_complete()
604 spin_unlock(&qp->s_lock); in verbs_sdma_complete()
611 struct hfi1_qp_priv *priv = qp->priv; in hfi1_wait_kmem()
612 struct ib_qp *ibqp = &qp->ibqp; in hfi1_wait_kmem()
613 struct ib_device *ibdev = ibqp->device; in hfi1_wait_kmem()
616 if (list_empty(&priv->s_iowait.list)) { in hfi1_wait_kmem()
617 if (list_empty(&dev->memwait)) in hfi1_wait_kmem()
618 mod_timer(&dev->mem_timer, jiffies + 1); in hfi1_wait_kmem()
619 qp->s_flags |= RVT_S_WAIT_KMEM; in hfi1_wait_kmem()
620 list_add_tail(&priv->s_iowait.list, &dev->memwait); in hfi1_wait_kmem()
621 priv->s_iowait.lock = &dev->iowait_lock; in hfi1_wait_kmem()
634 spin_lock_irqsave(&qp->s_lock, flags); in wait_kmem()
635 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) { in wait_kmem()
636 write_seqlock(&dev->iowait_lock); in wait_kmem()
637 list_add_tail(&ps->s_txreq->txreq.list, in wait_kmem()
638 &ps->wait->tx_head); in wait_kmem()
640 write_sequnlock(&dev->iowait_lock); in wait_kmem()
641 hfi1_qp_unbusy(qp, ps->wait); in wait_kmem()
642 ret = -EBUSY; in wait_kmem()
644 spin_unlock_irqrestore(&qp->s_lock, flags); in wait_kmem()
659 struct rvt_sge_state *ss = tx->ss; in build_verbs_ulp_payload()
660 struct rvt_sge *sg_list = ss->sg_list; in build_verbs_ulp_payload()
661 struct rvt_sge sge = ss->sge; in build_verbs_ulp_payload()
662 u8 num_sge = ss->num_sge; in build_verbs_ulp_payload()
667 len = rvt_get_sge_length(&ss->sge, length); in build_verbs_ulp_payload()
670 sde->dd, in build_verbs_ulp_payload()
671 &tx->txreq, in build_verbs_ulp_payload()
672 ss->sge.vaddr, in build_verbs_ulp_payload()
677 length -= len; in build_verbs_ulp_payload()
682 ss->sge = sge; in build_verbs_ulp_payload()
683 ss->num_sge = num_sge; in build_verbs_ulp_payload()
684 ss->sg_list = sg_list; in build_verbs_ulp_payload()
689 * update_tx_opstats - record stats by opcode
701 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); in update_tx_opstats()
702 struct hfi1_opcode_stats_perctx *s = get_cpu_ptr(dd->tx_opstats); in update_tx_opstats()
704 inc_opstats(plen * 4, &s->stats[ps->opcode]); in update_tx_opstats()
726 struct hfi1_sdma_header *phdr = &tx->phdr; in build_verbs_tx_desc()
727 u16 hdrbytes = (tx->hdr_dwords + sizeof(pbc) / 4) << 2; in build_verbs_tx_desc()
730 if (tx->phdr.hdr.hdr_type) { in build_verbs_tx_desc()
735 extra_bytes = hfi1_get_16b_padding(hdrbytes - 8, length) + in build_verbs_tx_desc()
738 if (!ahg_info->ahgcount) { in build_verbs_tx_desc()
740 &tx->txreq, in build_verbs_tx_desc()
741 ahg_info->tx_flags, in build_verbs_tx_desc()
744 ahg_info->ahgidx, in build_verbs_tx_desc()
751 phdr->pbc = cpu_to_le64(pbc); in build_verbs_tx_desc()
753 sde->dd, in build_verbs_tx_desc()
754 &tx->txreq, in build_verbs_tx_desc()
761 &tx->txreq, in build_verbs_tx_desc()
762 ahg_info->tx_flags, in build_verbs_tx_desc()
764 ahg_info->ahgidx, in build_verbs_tx_desc()
765 ahg_info->ahgcount, in build_verbs_tx_desc()
766 ahg_info->ahgdesc, in build_verbs_tx_desc()
772 /* add the ulp payload - if any. tx->ss can be NULL for acks */ in build_verbs_tx_desc()
773 if (tx->ss) { in build_verbs_tx_desc()
781 ret = sdma_txadd_daddr(sde->dd, &tx->txreq, sde->dd->sdma_pad_phys, in build_verbs_tx_desc()
800 struct hfi1_qp_priv *priv = qp->priv; in hfi1_verbs_send_dma()
801 struct hfi1_ahg_info *ahg_info = priv->s_ahg; in hfi1_verbs_send_dma()
802 u32 hdrwords = ps->s_txreq->hdr_dwords; in hfi1_verbs_send_dma()
803 u32 len = ps->s_txreq->s_cur_size; in hfi1_verbs_send_dma()
805 struct hfi1_ibdev *dev = ps->dev; in hfi1_verbs_send_dma()
806 struct hfi1_pportdata *ppd = ps->ppd; in hfi1_verbs_send_dma()
808 u8 sc5 = priv->s_sc; in hfi1_verbs_send_dma()
812 if (ps->s_txreq->phdr.hdr.hdr_type) { in hfi1_verbs_send_dma()
822 tx = ps->s_txreq; in hfi1_verbs_send_dma()
823 if (!sdma_txreq_built(&tx->txreq)) { in hfi1_verbs_send_dma()
825 u32 vl = sc_to_vlt(dd_from_ibdev(qp->ibqp.device), sc5); in hfi1_verbs_send_dma()
829 if (ps->s_txreq->phdr.hdr.hdr_type) in hfi1_verbs_send_dma()
837 qp->srate_mbps, in hfi1_verbs_send_dma()
841 if (unlikely(hfi1_dbg_should_fault_tx(qp, ps->opcode))) in hfi1_verbs_send_dma()
842 pbc = hfi1_fault_tx(qp, ps->opcode, pbc); in hfi1_verbs_send_dma()
845 pbc = update_hcrc(ps->opcode, pbc); in hfi1_verbs_send_dma()
847 tx->wqe = qp->s_wqe; in hfi1_verbs_send_dma()
848 ret = build_verbs_tx_desc(tx->sde, len, tx, ahg_info, pbc); in hfi1_verbs_send_dma()
852 ret = sdma_send_txreq(tx->sde, ps->wait, &tx->txreq, ps->pkts_sent); in hfi1_verbs_send_dma()
854 if (ret == -ECOMM) in hfi1_verbs_send_dma()
860 trace_sdma_output_ibhdr(dd_from_ibdev(qp->ibqp.device), in hfi1_verbs_send_dma()
861 &ps->s_txreq->phdr.hdr, ib_is_sc5(sc5)); in hfi1_verbs_send_dma()
870 /* free txreq - bad state */ in hfi1_verbs_send_dma()
871 hfi1_put_txreq(ps->s_txreq); in hfi1_verbs_send_dma()
872 ps->s_txreq = NULL; in hfi1_verbs_send_dma()
886 struct hfi1_qp_priv *priv = qp->priv; in pio_wait()
887 struct hfi1_devdata *dd = sc->dd; in pio_wait()
897 spin_lock_irqsave(&qp->s_lock, flags); in pio_wait()
898 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) { in pio_wait()
899 write_seqlock(&sc->waitlock); in pio_wait()
900 list_add_tail(&ps->s_txreq->txreq.list, in pio_wait()
901 &ps->wait->tx_head); in pio_wait()
902 if (list_empty(&priv->s_iowait.list)) { in pio_wait()
903 struct hfi1_ibdev *dev = &dd->verbs_dev; in pio_wait()
906 dev->n_piowait += !!(flag & RVT_S_WAIT_PIO); in pio_wait()
907 dev->n_piodrain += !!(flag & HFI1_S_WAIT_PIO_DRAIN); in pio_wait()
908 qp->s_flags |= flag; in pio_wait()
909 was_empty = list_empty(&sc->piowait); in pio_wait()
910 iowait_get_priority(&priv->s_iowait); in pio_wait()
911 iowait_queue(ps->pkts_sent, &priv->s_iowait, in pio_wait()
912 &sc->piowait); in pio_wait()
913 priv->s_iowait.lock = &sc->waitlock; in pio_wait()
920 write_sequnlock(&sc->waitlock); in pio_wait()
921 hfi1_qp_unbusy(qp, ps->wait); in pio_wait()
922 ret = -EBUSY; in pio_wait()
924 spin_unlock_irqrestore(&qp->s_lock, flags); in pio_wait()
931 struct hfi1_qp_priv *priv = qp->priv; in verbs_pio_complete()
933 if (iowait_pio_dec(&priv->s_iowait)) in verbs_pio_complete()
934 iowait_drain_wakeup(&priv->s_iowait); in verbs_pio_complete()
940 struct hfi1_qp_priv *priv = qp->priv; in hfi1_verbs_send_pio()
941 u32 hdrwords = ps->s_txreq->hdr_dwords; in hfi1_verbs_send_pio()
942 struct rvt_sge_state *ss = ps->s_txreq->ss; in hfi1_verbs_send_pio()
943 u32 len = ps->s_txreq->s_cur_size; in hfi1_verbs_send_pio()
946 struct hfi1_pportdata *ppd = ps->ppd; in hfi1_verbs_send_pio()
957 if (ps->s_txreq->phdr.hdr.hdr_type) { in hfi1_verbs_send_pio()
962 hdr = (u32 *)&ps->s_txreq->phdr.hdr.opah; in hfi1_verbs_send_pio()
965 hdr = (u32 *)&ps->s_txreq->phdr.hdr.ibh; in hfi1_verbs_send_pio()
970 switch (qp->ibqp.qp_type) { in hfi1_verbs_send_pio()
980 sc5 = priv->s_sc; in hfi1_verbs_send_pio()
981 sc = ps->s_txreq->psc; in hfi1_verbs_send_pio()
984 u8 vl = sc_to_vlt(dd_from_ibdev(qp->ibqp.device), sc5); in hfi1_verbs_send_pio()
987 if (ps->s_txreq->phdr.hdr.hdr_type) in hfi1_verbs_send_pio()
992 pbc = create_pbc(ppd, pbc, qp->srate_mbps, vl, plen); in hfi1_verbs_send_pio()
993 if (unlikely(hfi1_dbg_should_fault_tx(qp, ps->opcode))) in hfi1_verbs_send_pio()
994 pbc = hfi1_fault_tx(qp, ps->opcode, pbc); in hfi1_verbs_send_pio()
997 pbc = update_hcrc(ps->opcode, pbc); in hfi1_verbs_send_pio()
1000 iowait_pio_inc(&priv->s_iowait); in hfi1_verbs_send_pio()
1026 /* txreq not queued - free */ in hfi1_verbs_send_pio()
1034 pio_copy(ppd->dd, pbuf, pbc, hdr, hdrwords); in hfi1_verbs_send_pio()
1040 void *addr = ss->sge.vaddr; in hfi1_verbs_send_pio()
1041 u32 slen = rvt_get_sge_length(&ss->sge, len); in hfi1_verbs_send_pio()
1045 len -= slen; in hfi1_verbs_send_pio()
1050 seg_pio_copy_mid(pbuf, ppd->dd->sdma_pad_dma, in hfi1_verbs_send_pio()
1057 trace_pio_output_ibhdr(dd_from_ibdev(qp->ibqp.device), in hfi1_verbs_send_pio()
1058 &ps->s_txreq->phdr.hdr, ib_is_sc5(sc5)); in hfi1_verbs_send_pio()
1061 spin_lock_irqsave(&qp->s_lock, flags); in hfi1_verbs_send_pio()
1062 if (qp->s_wqe) { in hfi1_verbs_send_pio()
1063 rvt_send_complete(qp, qp->s_wqe, wc_status); in hfi1_verbs_send_pio()
1064 } else if (qp->ibqp.qp_type == IB_QPT_RC) { in hfi1_verbs_send_pio()
1066 hfi1_rc_verbs_aborted(qp, &ps->s_txreq->phdr.hdr); in hfi1_verbs_send_pio()
1067 hfi1_rc_send_complete(qp, &ps->s_txreq->phdr.hdr); in hfi1_verbs_send_pio()
1069 spin_unlock_irqrestore(&qp->s_lock, flags); in hfi1_verbs_send_pio()
1074 hfi1_put_txreq(ps->s_txreq); in hfi1_verbs_send_pio()
1079 * egress_pkey_matches_entry - return 1 if the pkey matches ent (ent
1103 * egress_pkey_check - check P_KEY of a packet
1123 if (!(ppd->part_enforce & HFI1_PART_ENFORCE_OUT)) in egress_pkey_check()
1136 * the most likely matching pkey has index qp->s_pkey_index in egress_pkey_check()
1139 egress_pkey_matches_entry(pkey, ppd->pkeys[s_pkey_index])) { in egress_pkey_check()
1144 if (egress_pkey_matches_entry(pkey, ppd->pkeys[i])) in egress_pkey_check()
1149 * For the user-context mechanism, the P_KEY check would only happen in egress_pkey_check()
1151 * need to increment the counter for the user-context mechanism. in egress_pkey_check()
1154 incr_cntr64(&ppd->port_xmit_constraint_errors); in egress_pkey_check()
1155 dd = ppd->dd; in egress_pkey_check()
1156 if (!(dd->err_info_xmit_constraint.status & in egress_pkey_check()
1158 dd->err_info_xmit_constraint.status |= in egress_pkey_check()
1160 dd->err_info_xmit_constraint.slid = slid; in egress_pkey_check()
1161 dd->err_info_xmit_constraint.pkey = pkey; in egress_pkey_check()
1168 * get_send_routine - choose an egress routine
1176 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); in get_send_routine()
1177 struct hfi1_qp_priv *priv = qp->priv; in get_send_routine()
1178 struct verbs_txreq *tx = ps->s_txreq; in get_send_routine()
1180 if (unlikely(!(dd->flags & HFI1_HAS_SEND_DMA))) in get_send_routine()
1181 return dd->process_pio_send; in get_send_routine()
1182 switch (qp->ibqp.qp_type) { in get_send_routine()
1184 return dd->process_pio_send; in get_send_routine()
1190 priv->s_running_pkt_size = in get_send_routine()
1191 (tx->s_cur_size + priv->s_running_pkt_size) / 2; in get_send_routine()
1193 priv->s_running_pkt_size <= min(piothreshold, qp->pmtu) && in get_send_routine()
1194 (BIT(ps->opcode & OPMASK) & pio_opmask[ps->opcode >> 5]) && in get_send_routine()
1195 iowait_sdma_pending(&priv->s_iowait) == 0 && in get_send_routine()
1196 !sdma_txreq_built(&tx->txreq)) in get_send_routine()
1197 return dd->process_pio_send; in get_send_routine()
1202 return dd->process_dma_send; in get_send_routine()
1206 * hfi1_verbs_send - send a packet
1211 * Return non-zero and clear qp->s_flags RVT_S_BUSY otherwise.
1215 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); in hfi1_verbs_send()
1216 struct hfi1_qp_priv *priv = qp->priv; in hfi1_verbs_send()
1225 if (ps->s_txreq->phdr.hdr.hdr_type) { in hfi1_verbs_send()
1226 struct hfi1_16b_header *hdr = &ps->s_txreq->phdr.hdr.opah; in hfi1_verbs_send()
1230 ohdr = &hdr->u.oth; in hfi1_verbs_send()
1232 ohdr = &hdr->u.l.oth; in hfi1_verbs_send()
1237 struct ib_header *hdr = &ps->s_txreq->phdr.hdr.ibh; in hfi1_verbs_send()
1241 ohdr = &hdr->u.l.oth; in hfi1_verbs_send()
1243 ohdr = &hdr->u.oth; in hfi1_verbs_send()
1249 ps->opcode = ib_bth_get_opcode(ohdr); in hfi1_verbs_send()
1251 ps->opcode = IB_OPCODE_UD_SEND_ONLY; in hfi1_verbs_send()
1254 ret = egress_pkey_check(dd->pport, slid, pkey, in hfi1_verbs_send()
1255 priv->s_sc, qp->s_pkey_index); in hfi1_verbs_send()
1265 if (sr == dd->process_pio_send) { in hfi1_verbs_send()
1270 spin_lock_irqsave(&qp->s_lock, flags); in hfi1_verbs_send()
1271 rvt_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR); in hfi1_verbs_send()
1272 spin_unlock_irqrestore(&qp->s_lock, flags); in hfi1_verbs_send()
1274 return -EINVAL; in hfi1_verbs_send()
1276 if (sr == dd->process_dma_send && iowait_pio_pending(&priv->s_iowait)) in hfi1_verbs_send()
1278 ps->s_txreq->psc, in hfi1_verbs_send()
1285 * hfi1_fill_device_attr - Fill in rvt dev info device attributes.
1290 struct rvt_dev_info *rdi = &dd->verbs_dev.rdi; in hfi1_fill_device_attr()
1291 u32 ver = dd->dc8051_ver; in hfi1_fill_device_attr()
1293 memset(&rdi->dparms.props, 0, sizeof(rdi->dparms.props)); in hfi1_fill_device_attr()
1295 rdi->dparms.props.fw_ver = ((u64)(dc8051_ver_maj(ver)) << 32) | in hfi1_fill_device_attr()
1299 rdi->dparms.props.device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR | in hfi1_fill_device_attr()
1304 rdi->dparms.props.kernel_cap_flags = IBK_RDMA_NETDEV_OPA; in hfi1_fill_device_attr()
1305 rdi->dparms.props.page_size_cap = PAGE_SIZE; in hfi1_fill_device_attr()
1306 rdi->dparms.props.vendor_id = dd->oui1 << 16 | dd->oui2 << 8 | dd->oui3; in hfi1_fill_device_attr()
1307 rdi->dparms.props.vendor_part_id = dd->pcidev->device; in hfi1_fill_device_attr()
1308 rdi->dparms.props.hw_ver = dd->minrev; in hfi1_fill_device_attr()
1309 rdi->dparms.props.sys_image_guid = ib_hfi1_sys_image_guid; in hfi1_fill_device_attr()
1310 rdi->dparms.props.max_mr_size = U64_MAX; in hfi1_fill_device_attr()
1311 rdi->dparms.props.max_fast_reg_page_list_len = UINT_MAX; in hfi1_fill_device_attr()
1312 rdi->dparms.props.max_qp = hfi1_max_qps; in hfi1_fill_device_attr()
1313 rdi->dparms.props.max_qp_wr = in hfi1_fill_device_attr()
1315 HFI1_QP_WQE_INVALID - 1 : hfi1_max_qp_wrs); in hfi1_fill_device_attr()
1316 rdi->dparms.props.max_send_sge = hfi1_max_sges; in hfi1_fill_device_attr()
1317 rdi->dparms.props.max_recv_sge = hfi1_max_sges; in hfi1_fill_device_attr()
1318 rdi->dparms.props.max_sge_rd = hfi1_max_sges; in hfi1_fill_device_attr()
1319 rdi->dparms.props.max_cq = hfi1_max_cqs; in hfi1_fill_device_attr()
1320 rdi->dparms.props.max_ah = hfi1_max_ahs; in hfi1_fill_device_attr()
1321 rdi->dparms.props.max_cqe = hfi1_max_cqes; in hfi1_fill_device_attr()
1322 rdi->dparms.props.max_pd = hfi1_max_pds; in hfi1_fill_device_attr()
1323 rdi->dparms.props.max_qp_rd_atom = HFI1_MAX_RDMA_ATOMIC; in hfi1_fill_device_attr()
1324 rdi->dparms.props.max_qp_init_rd_atom = 255; in hfi1_fill_device_attr()
1325 rdi->dparms.props.max_srq = hfi1_max_srqs; in hfi1_fill_device_attr()
1326 rdi->dparms.props.max_srq_wr = hfi1_max_srq_wrs; in hfi1_fill_device_attr()
1327 rdi->dparms.props.max_srq_sge = hfi1_max_srq_sges; in hfi1_fill_device_attr()
1328 rdi->dparms.props.atomic_cap = IB_ATOMIC_GLOB; in hfi1_fill_device_attr()
1329 rdi->dparms.props.max_pkeys = hfi1_get_npkeys(dd); in hfi1_fill_device_attr()
1330 rdi->dparms.props.max_mcast_grp = hfi1_max_mcast_grps; in hfi1_fill_device_attr()
1331 rdi->dparms.props.max_mcast_qp_attach = hfi1_max_mcast_qp_attached; in hfi1_fill_device_attr()
1332 rdi->dparms.props.max_total_mcast_qp_attach = in hfi1_fill_device_attr()
1333 rdi->dparms.props.max_mcast_qp_attach * in hfi1_fill_device_attr()
1334 rdi->dparms.props.max_mcast_grp; in hfi1_fill_device_attr()
1373 struct hfi1_pportdata *ppd = &dd->pport[port_num - 1]; in query_port()
1374 u32 lid = ppd->lid; in query_port()
1377 props->lid = lid ? lid : 0; in query_port()
1378 props->lmc = ppd->lmc; in query_port()
1380 props->state = driver_lstate(ppd); in query_port()
1381 props->phys_state = driver_pstate(ppd); in query_port()
1382 props->gid_tbl_len = HFI1_GUIDS_PER_PORT; in query_port()
1383 props->active_width = (u8)opa_width_to_ib(ppd->link_width_active); in query_port()
1385 props->active_speed = opa_speed_to_ib(ppd->link_speed_active); in query_port()
1386 props->max_vl_num = ppd->vls_supported; in query_port()
1396 props->max_mtu = mtu_to_enum((!valid_ib_mtu(hfi1_max_mtu) ? in query_port()
1398 props->active_mtu = !valid_ib_mtu(ppd->ibmtu) ? props->max_mtu : in query_port()
1399 mtu_to_enum(ppd->ibmtu, IB_MTU_4096); in query_port()
1400 props->phys_mtu = hfi1_max_mtu; in query_port()
1415 ret = -EOPNOTSUPP; in modify_device()
1420 memcpy(device->node_desc, device_modify->node_desc, in modify_device()
1422 for (i = 0; i < dd->num_pports; i++) { in modify_device()
1423 struct hfi1_ibport *ibp = &dd->pport[i].ibport_data; in modify_device()
1431 cpu_to_be64(device_modify->sys_image_guid); in modify_device()
1432 for (i = 0; i < dd->num_pports; i++) { in modify_device()
1433 struct hfi1_ibport *ibp = &dd->pport[i].ibport_data; in modify_device()
1449 struct hfi1_pportdata *ppd = &dd->pport[port_num - 1]; in shut_down_port()
1462 return -EINVAL; in hfi1_get_guid_be()
1475 return ibp->sl_to_sc[rdma_ah_get_sl(ah)]; in ah_to_sc()
1488 return -EINVAL; in hfi1_check_ah()
1496 if (sl >= ARRAY_SIZE(ibp->sl_to_sc)) in hfi1_check_ah()
1497 return -EINVAL; in hfi1_check_ah()
1498 sl = array_index_nospec(sl, ARRAY_SIZE(ibp->sl_to_sc)); in hfi1_check_ah()
1500 sc5 = ibp->sl_to_sc[sl]; in hfi1_check_ah()
1502 return -EINVAL; in hfi1_check_ah()
1514 struct rdma_ah_attr *attr = &ah->attr; in hfi1_notify_new_ah()
1523 sc5 = ibp->sl_to_sc[rdma_ah_get_sl(&ah->attr)]; in hfi1_notify_new_ah()
1527 ah->vl = sc_to_vlt(dd, sc5); in hfi1_notify_new_ah()
1528 if (ah->vl < num_vls || ah->vl == 15) in hfi1_notify_new_ah()
1529 ah->log_pmtu = ilog2(dd->vld[ah->vl].mtu); in hfi1_notify_new_ah()
1533 * hfi1_get_npkeys - return the size of the PKEY table for context 0
1538 return ARRAY_SIZE(dd->pport[0].pkeys); in hfi1_get_npkeys()
1543 struct hfi1_ibport *ibp = &ppd->ibport_data; in init_ibport()
1544 size_t sz = ARRAY_SIZE(ibp->sl_to_sc); in init_ibport()
1548 ibp->sl_to_sc[i] = i; in init_ibport()
1549 ibp->sc_to_sl[i] = i; in init_ibport()
1553 INIT_LIST_HEAD(&ibp->rvp.trap_lists[i].list); in init_ibport()
1554 timer_setup(&ibp->rvp.trap_timer, hfi1_handle_trap_timer, 0); in init_ibport()
1556 spin_lock_init(&ibp->rvp.lock); in init_ibport()
1558 ibp->rvp.gid_prefix = IB_DEFAULT_GID_PREFIX; in init_ibport()
1559 ibp->rvp.sm_lid = 0; in init_ibport()
1564 ibp->rvp.port_cap_flags = IB_PORT_AUTO_MIGR_SUP | in init_ibport()
1566 ibp->rvp.port_cap3_flags = OPA_CAP_MASK3_IsSharedSpaceSupported; in init_ibport()
1567 ibp->rvp.pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA; in init_ibport()
1568 ibp->rvp.pma_counter_select[1] = IB_PMA_PORT_RCV_DATA; in init_ibport()
1569 ibp->rvp.pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS; in init_ibport()
1570 ibp->rvp.pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS; in init_ibport()
1571 ibp->rvp.pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT; in init_ibport()
1573 RCU_INIT_POINTER(ibp->rvp.qp[0], NULL); in init_ibport()
1574 RCU_INIT_POINTER(ibp->rvp.qp[1], NULL); in init_ibport()
1581 u32 ver = dd_from_dev(dev)->dc8051_ver; in hfi1_get_dev_fw_str()
1631 return -ENOMEM; in init_cntr_names()
1654 err = init_cntr_names(dd->cntrnames, dd->cntrnameslen, in hfi1_alloc_hw_device_stats()
1676 err = init_cntr_names(dd->portcntrnames, dd->portcntrnameslen, in hfi_alloc_hw_port_stats()
1694 sps_ints += get_all_cpu_total(dd->int_counter); in hfi1_sps_ints()
1722 memcpy(stats->value, values, count * sizeof(u64)); in get_hw_stats()
1744 * hfi1_register_ib_device - register our device with the infiniband core
1750 struct hfi1_ibdev *dev = &dd->verbs_dev; in hfi1_register_ib_device()
1751 struct ib_device *ibdev = &dev->rdi.ibdev; in hfi1_register_ib_device()
1752 struct hfi1_pportdata *ppd = dd->pport; in hfi1_register_ib_device()
1753 struct hfi1_ibport *ibp = &ppd->ibport_data; in hfi1_register_ib_device()
1757 for (i = 0; i < dd->num_pports; i++) in hfi1_register_ib_device()
1760 /* Only need to initialize non-zero fields. */ in hfi1_register_ib_device()
1762 timer_setup(&dev->mem_timer, mem_timer, 0); in hfi1_register_ib_device()
1764 seqlock_init(&dev->iowait_lock); in hfi1_register_ib_device()
1765 seqlock_init(&dev->txwait_lock); in hfi1_register_ib_device()
1766 INIT_LIST_HEAD(&dev->txwait); in hfi1_register_ib_device()
1767 INIT_LIST_HEAD(&dev->memwait); in hfi1_register_ib_device()
1773 /* Use first-port GUID as node guid */ in hfi1_register_ib_device()
1774 ibdev->node_guid = get_sguid(ibp, HFI1_PORT_GUID_INDEX); in hfi1_register_ib_device()
1782 ib_hfi1_sys_image_guid = ibdev->node_guid; in hfi1_register_ib_device()
1783 ibdev->phys_port_cnt = dd->num_pports; in hfi1_register_ib_device()
1784 ibdev->dev.parent = &dd->pcidev->dev; in hfi1_register_ib_device()
1788 strscpy(ibdev->node_desc, init_utsname()->nodename, in hfi1_register_ib_device()
1789 sizeof(ibdev->node_desc)); in hfi1_register_ib_device()
1794 dd->verbs_dev.rdi.driver_f.get_pci_dev = get_pci_dev; in hfi1_register_ib_device()
1795 dd->verbs_dev.rdi.driver_f.check_ah = hfi1_check_ah; in hfi1_register_ib_device()
1796 dd->verbs_dev.rdi.driver_f.notify_new_ah = hfi1_notify_new_ah; in hfi1_register_ib_device()
1797 dd->verbs_dev.rdi.driver_f.get_guid_be = hfi1_get_guid_be; in hfi1_register_ib_device()
1798 dd->verbs_dev.rdi.driver_f.query_port_state = query_port; in hfi1_register_ib_device()
1799 dd->verbs_dev.rdi.driver_f.shut_down_port = shut_down_port; in hfi1_register_ib_device()
1800 dd->verbs_dev.rdi.driver_f.cap_mask_chg = hfi1_cap_mask_chg; in hfi1_register_ib_device()
1807 dd->verbs_dev.rdi.dparms.qp_table_size = hfi1_qp_table_size; in hfi1_register_ib_device()
1808 dd->verbs_dev.rdi.dparms.qpn_start = 0; in hfi1_register_ib_device()
1809 dd->verbs_dev.rdi.dparms.qpn_inc = 1; in hfi1_register_ib_device()
1810 dd->verbs_dev.rdi.dparms.qos_shift = dd->qos_shift; in hfi1_register_ib_device()
1811 dd->verbs_dev.rdi.dparms.qpn_res_start = RVT_KDETH_QP_BASE; in hfi1_register_ib_device()
1812 dd->verbs_dev.rdi.dparms.qpn_res_end = RVT_AIP_QP_MAX; in hfi1_register_ib_device()
1813 dd->verbs_dev.rdi.dparms.max_rdma_atomic = HFI1_MAX_RDMA_ATOMIC; in hfi1_register_ib_device()
1814 dd->verbs_dev.rdi.dparms.psn_mask = PSN_MASK; in hfi1_register_ib_device()
1815 dd->verbs_dev.rdi.dparms.psn_shift = PSN_SHIFT; in hfi1_register_ib_device()
1816 dd->verbs_dev.rdi.dparms.psn_modify_mask = PSN_MODIFY_MASK; in hfi1_register_ib_device()
1817 dd->verbs_dev.rdi.dparms.core_cap_flags = RDMA_CORE_PORT_INTEL_OPA | in hfi1_register_ib_device()
1819 dd->verbs_dev.rdi.dparms.max_mad_size = OPA_MGMT_MAD_SIZE; in hfi1_register_ib_device()
1821 dd->verbs_dev.rdi.driver_f.qp_priv_alloc = qp_priv_alloc; in hfi1_register_ib_device()
1822 dd->verbs_dev.rdi.driver_f.qp_priv_init = hfi1_qp_priv_init; in hfi1_register_ib_device()
1823 dd->verbs_dev.rdi.driver_f.qp_priv_free = qp_priv_free; in hfi1_register_ib_device()
1824 dd->verbs_dev.rdi.driver_f.free_all_qps = free_all_qps; in hfi1_register_ib_device()
1825 dd->verbs_dev.rdi.driver_f.notify_qp_reset = notify_qp_reset; in hfi1_register_ib_device()
1826 dd->verbs_dev.rdi.driver_f.do_send = hfi1_do_send_from_rvt; in hfi1_register_ib_device()
1827 dd->verbs_dev.rdi.driver_f.schedule_send = hfi1_schedule_send; in hfi1_register_ib_device()
1828 dd->verbs_dev.rdi.driver_f.schedule_send_no_lock = _hfi1_schedule_send; in hfi1_register_ib_device()
1829 dd->verbs_dev.rdi.driver_f.get_pmtu_from_attr = get_pmtu_from_attr; in hfi1_register_ib_device()
1830 dd->verbs_dev.rdi.driver_f.notify_error_qp = notify_error_qp; in hfi1_register_ib_device()
1831 dd->verbs_dev.rdi.driver_f.flush_qp_waiters = flush_qp_waiters; in hfi1_register_ib_device()
1832 dd->verbs_dev.rdi.driver_f.stop_send_queue = stop_send_queue; in hfi1_register_ib_device()
1833 dd->verbs_dev.rdi.driver_f.quiesce_qp = quiesce_qp; in hfi1_register_ib_device()
1834 dd->verbs_dev.rdi.driver_f.notify_error_qp = notify_error_qp; in hfi1_register_ib_device()
1835 dd->verbs_dev.rdi.driver_f.mtu_from_qp = mtu_from_qp; in hfi1_register_ib_device()
1836 dd->verbs_dev.rdi.driver_f.mtu_to_path_mtu = mtu_to_path_mtu; in hfi1_register_ib_device()
1837 dd->verbs_dev.rdi.driver_f.check_modify_qp = hfi1_check_modify_qp; in hfi1_register_ib_device()
1838 dd->verbs_dev.rdi.driver_f.modify_qp = hfi1_modify_qp; in hfi1_register_ib_device()
1839 dd->verbs_dev.rdi.driver_f.notify_restart_rc = hfi1_restart_rc; in hfi1_register_ib_device()
1840 dd->verbs_dev.rdi.driver_f.setup_wqe = hfi1_setup_wqe; in hfi1_register_ib_device()
1841 dd->verbs_dev.rdi.driver_f.comp_vect_cpu_lookup = in hfi1_register_ib_device()
1845 dd->verbs_dev.rdi.ibdev.num_comp_vectors = dd->comp_vect_possible_cpus; in hfi1_register_ib_device()
1846 dd->verbs_dev.rdi.dparms.node = dd->node; in hfi1_register_ib_device()
1849 dd->verbs_dev.rdi.flags = 0; /* Let rdmavt handle it all */ in hfi1_register_ib_device()
1850 dd->verbs_dev.rdi.dparms.lkey_table_size = hfi1_lkey_table_size; in hfi1_register_ib_device()
1851 dd->verbs_dev.rdi.dparms.nports = dd->num_pports; in hfi1_register_ib_device()
1852 dd->verbs_dev.rdi.dparms.npkeys = hfi1_get_npkeys(dd); in hfi1_register_ib_device()
1853 dd->verbs_dev.rdi.dparms.sge_copy_mode = sge_copy_mode; in hfi1_register_ib_device()
1854 dd->verbs_dev.rdi.dparms.wss_threshold = wss_threshold; in hfi1_register_ib_device()
1855 dd->verbs_dev.rdi.dparms.wss_clean_period = wss_clean_period; in hfi1_register_ib_device()
1856 dd->verbs_dev.rdi.dparms.reserved_operations = 1; in hfi1_register_ib_device()
1857 dd->verbs_dev.rdi.dparms.extra_rdma_atomic = HFI1_TID_RDMA_WRITE_CNT; in hfi1_register_ib_device()
1860 dd->verbs_dev.rdi.post_parms = hfi1_post_parms; in hfi1_register_ib_device()
1863 dd->verbs_dev.rdi.wc_opcode = ib_hfi1_wc_opcode; in hfi1_register_ib_device()
1865 ppd = dd->pport; in hfi1_register_ib_device()
1866 for (i = 0; i < dd->num_pports; i++, ppd++) in hfi1_register_ib_device()
1867 rvt_init_port(&dd->verbs_dev.rdi, in hfi1_register_ib_device()
1868 &ppd->ibport_data.rvp, in hfi1_register_ib_device()
1870 ppd->pkeys); in hfi1_register_ib_device()
1872 ret = rvt_register_device(&dd->verbs_dev.rdi); in hfi1_register_ib_device()
1883 rvt_unregister_device(&dd->verbs_dev.rdi); in hfi1_register_ib_device()
1886 dd_dev_err(dd, "cannot register verbs: %d!\n", -ret); in hfi1_register_ib_device()
1892 struct hfi1_ibdev *dev = &dd->verbs_dev; in hfi1_unregister_ib_device()
1896 rvt_unregister_device(&dd->verbs_dev.rdi); in hfi1_unregister_ib_device()
1898 if (!list_empty(&dev->txwait)) in hfi1_unregister_ib_device()
1900 if (!list_empty(&dev->memwait)) in hfi1_unregister_ib_device()
1903 del_timer_sync(&dev->mem_timer); in hfi1_unregister_ib_device()
1914 struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd); in hfi1_cnp_rcv()
1916 struct ib_header *hdr = packet->hdr; in hfi1_cnp_rcv()
1917 struct rvt_qp *qp = packet->qp; in hfi1_cnp_rcv()
1922 switch (packet->qp->ibqp.qp_type) { in hfi1_cnp_rcv()
1924 rlid = rdma_ah_get_dlid(&qp->remote_ah_attr); in hfi1_cnp_rcv()
1925 rqpn = qp->remote_qpn; in hfi1_cnp_rcv()
1929 rlid = rdma_ah_get_dlid(&qp->remote_ah_attr); in hfi1_cnp_rcv()
1930 rqpn = qp->remote_qpn; in hfi1_cnp_rcv()
1939 ibp->rvp.n_pkt_drops++; in hfi1_cnp_rcv()
1943 sc5 = hfi1_9B_get_sc5(hdr, packet->rhf); in hfi1_cnp_rcv()
1944 sl = ibp->sc_to_sl[sc5]; in hfi1_cnp_rcv()
1945 lqpn = qp->ibqp.qp_num; in hfi1_cnp_rcv()