Home
last modified time | relevance | path

Searched refs:rxq (Results 1 – 25 of 81) sorted by relevance

1234

/src/sys/dev/sfxge/
H A Dsfxge_rx.c153 sfxge_rx_qflush_done(struct sfxge_rxq *rxq) in sfxge_rx_qflush_done() argument
156 rxq->flush_state = SFXGE_FLUSH_DONE; in sfxge_rx_qflush_done()
160 sfxge_rx_qflush_failed(struct sfxge_rxq *rxq) in sfxge_rx_qflush_failed() argument
163 rxq->flush_state = SFXGE_FLUSH_FAILED; in sfxge_rx_qflush_failed()
171 struct sfxge_rxq *rxq = arg; in sfxge_rx_post_refill() local
177 sc = rxq->sc; in sfxge_rx_post_refill()
178 index = rxq->index; in sfxge_rx_post_refill()
180 magic = sfxge_sw_ev_rxq_magic(SFXGE_SW_EV_RX_QREFILL, rxq); in sfxge_rx_post_refill()
185 KASSERT(rxq->init_state == SFXGE_RXQ_STARTED, in sfxge_rx_post_refill()
191 sfxge_rx_schedule_refill(struct sfxge_rxq *rxq, boolean_t retrying) in sfxge_rx_schedule_refill() argument
[all …]
H A Dsfxge_ev.c53 struct sfxge_rxq *rxq; in sfxge_ev_qcomplete() local
60 rxq = sc->rxq[index]; in sfxge_ev_qcomplete()
82 if (rxq->pending != rxq->completed) in sfxge_ev_qcomplete()
83 sfxge_rx_qcomplete(rxq, eop); in sfxge_ev_qcomplete()
89 struct sfxge_rxq *rxq; in sfxge_get_rxq_by_label() local
93 rxq = evq->sc->rxq[evq->index]; in sfxge_get_rxq_by_label()
95 KASSERT(rxq != NULL, ("rxq == NULL")); in sfxge_get_rxq_by_label()
96 KASSERT(evq->index == rxq->index, ("evq->index != rxq->index")); in sfxge_get_rxq_by_label()
98 return (rxq); in sfxge_get_rxq_by_label()
107 struct sfxge_rxq *rxq; in sfxge_ev_rx() local
[all …]
H A Dsfxge_rx.h187 extern void sfxge_rx_qcomplete(struct sfxge_rxq *rxq, boolean_t eop);
188 extern void sfxge_rx_qrefill(struct sfxge_rxq *rxq);
189 extern void sfxge_rx_qflush_done(struct sfxge_rxq *rxq);
190 extern void sfxge_rx_qflush_failed(struct sfxge_rxq *rxq);
/src/sys/contrib/dev/iwlwifi/pcie/gen1_2/
H A Drx.c120 static int iwl_rxq_space(const struct iwl_rxq *rxq) in iwl_rxq_space() argument
123 WARN_ON(rxq->queue_size & (rxq->queue_size - 1)); in iwl_rxq_space()
131 return (rxq->read - rxq->write - 1) & (rxq->queue_size - 1); in iwl_rxq_space()
168 struct iwl_rxq *rxq) in iwl_pcie_rxq_inc_wr_ptr() argument
172 lockdep_assert_held(&rxq->lock); in iwl_pcie_rxq_inc_wr_ptr()
188 rxq->need_update = true; in iwl_pcie_rxq_inc_wr_ptr()
193 rxq->write_actual = round_down(rxq->write, 8); in iwl_pcie_rxq_inc_wr_ptr()
195 iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual); in iwl_pcie_rxq_inc_wr_ptr()
197 iwl_write32(trans, HBUS_TARG_WRPTR, rxq->write_actual | in iwl_pcie_rxq_inc_wr_ptr()
198 HBUS_TARG_WRPTR_RX_Q(rxq->id)); in iwl_pcie_rxq_inc_wr_ptr()
[all …]
/src/sys/dev/xen/netfront/
H A Dnetfront.c221 struct netfront_rxq *rxq; member
294 xn_get_rx_mbuf(struct netfront_rxq *rxq, RING_IDX ri) in xn_get_rx_mbuf() argument
300 m = rxq->mbufs[i]; in xn_get_rx_mbuf()
301 rxq->mbufs[i] = NULL; in xn_get_rx_mbuf()
306 xn_get_rx_ref(struct netfront_rxq *rxq, RING_IDX ri) in xn_get_rx_ref() argument
309 grant_ref_t ref = rxq->grant_ref[i]; in xn_get_rx_ref()
312 rxq->grant_ref[i] = GRANT_REF_INVALID; in xn_get_rx_ref()
479 XN_RX_LOCK(&np->rxq[i]); in netfront_suspend()
484 XN_RX_UNLOCK(&np->rxq[i]); in netfront_suspend()
504 XN_RX_LOCK(&info->rxq[i]); in netfront_resume()
[all …]
/src/sys/dev/ice/
H A Dice_iflib_txrx.c52 static int _ice_ift_rxd_available(struct ice_rx_queue *rxq, qidx_t pidx, qidx_t budget);
53 static int _ice_ift_rxd_pkt_get(struct ice_rx_queue *rxq, if_rxd_info_t ri);
54 static void _ice_ift_rxd_refill(struct ice_rx_queue *rxq, uint32_t pidx,
56 static void _ice_ift_rxd_flush(struct ice_softc *sc, struct ice_rx_queue *rxq,
323 _ice_ift_rxd_available(struct ice_rx_queue *rxq, qidx_t pidx, qidx_t budget) in _ice_ift_rxd_available() argument
329 nrxd = rxq->desc_count; in _ice_ift_rxd_available()
332 rxd = &rxq->rx_base[i]; in _ice_ift_rxd_available()
360 struct ice_rx_queue *rxq = &sc->pf_vsi.rx_queues[rxqid]; in ice_ift_rxd_available() local
362 return _ice_ift_rxd_available(rxq, pidx, budget); in ice_ift_rxd_available()
377 struct ice_rx_queue *rxq = &sc->pf_vsi.rx_queues[ri->iri_qsidx]; in ice_ift_rxd_pkt_get() local
[all …]
H A Dice_common_txrx.h272 ice_rx_checksum(struct ice_rx_queue *rxq, uint32_t *flags, uint32_t *data, in ice_rx_checksum() argument
326 rxq->stats.cso[ICE_CSO_STAT_RX_IP6_ERR]++; in ice_rx_checksum()
337 rxq->stats.cso[ICE_CSO_STAT_RX_IP4_ERR]++; in ice_rx_checksum()
341 rxq->stats.cso[ICE_CSO_STAT_RX_L3_ERR]++; in ice_rx_checksum()
352 rxq->stats.cso[ICE_CSO_STAT_RX_TCP_ERR]++; in ice_rx_checksum()
356 rxq->stats.cso[ICE_CSO_STAT_RX_UDP_ERR]++; in ice_rx_checksum()
360 rxq->stats.cso[ICE_CSO_STAT_RX_SCTP_ERR]++; in ice_rx_checksum()
368 rxq->stats.cso[ICE_CSO_STAT_RX_L4_ERR]++; in ice_rx_checksum()
H A Dice_iov.c219 struct ice_rx_queue *rxq; in ice_iov_add_vf() local
303 for (i = 0, rxq = vsi->rx_queues; i < vsi->num_rx_queues; i++, rxq++) { in ice_iov_add_vf()
304 rxq->me = i; in ice_iov_add_vf()
305 rxq->vsi = vsi; in ice_iov_add_vf()
976 struct ice_rx_queue *rxq; in ice_vc_cfg_vsi_qs_msg() local
1003 rxq = &vsi->rx_queues[i]; in ice_vc_cfg_vsi_qs_msg()
1005 rxq->desc_count = 0; in ice_vc_cfg_vsi_qs_msg()
1006 rxq->rx_paddr = 0; in ice_vc_cfg_vsi_qs_msg()
1013 vqpi->rxq.vsi_id != vf->vsi->idx || in ice_vc_cfg_vsi_qs_msg()
1014 vqpi->txq.queue_id != vqpi->rxq.queue_id || in ice_vc_cfg_vsi_qs_msg()
[all …]
H A Dif_ice_iflib.c1158 struct ice_rx_queue *rxq; in ice_if_rx_queues_alloc() local
1188 for (i = 0, rxq = vsi->rx_queues; i < nrxqsets; i++, rxq++) { in ice_if_rx_queues_alloc()
1189 rxq->me = i; in ice_if_rx_queues_alloc()
1190 rxq->vsi = vsi; in ice_if_rx_queues_alloc()
1193 rxq->desc_count = sc->scctx->isc_nrxd[0]; in ice_if_rx_queues_alloc()
1196 rxq->tail = QRX_TAIL(vsi->rx_qmap[i]); in ice_if_rx_queues_alloc()
1197 rxq->rx_base = (union ice_32b_rx_flex_desc *)vaddrs[i]; in ice_if_rx_queues_alloc()
1198 rxq->rx_paddr = paddrs[i]; in ice_if_rx_queues_alloc()
1200 ice_add_rxq_sysctls(rxq); in ice_if_rx_queues_alloc()
1279 struct ice_rx_queue __unused *rxq = (struct ice_rx_queue *)arg; in ice_msix_que() local
[all …]
/src/sys/dev/mana/
H A Dmana_en.c415 mana_load_rx_mbuf(struct mana_port_context *apc, struct mana_rxq *rxq, in mana_load_rx_mbuf() argument
424 mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rxq->datasize); in mana_load_rx_mbuf()
428 mbuf->m_pkthdr.len = mbuf->m_len = rxq->datasize; in mana_load_rx_mbuf()
429 mlen = rxq->datasize; in mana_load_rx_mbuf()
445 counter_u64_add(rxq->stats.dma_mapping_err, 1); in mana_load_rx_mbuf()
466 mana_unload_rx_mbuf(struct mana_port_context *apc, struct mana_rxq *rxq, in mana_unload_rx_mbuf() argument
1348 mana_fence_rq(struct mana_port_context *apc, struct mana_rxq *rxq) in mana_fence_rq() argument
1354 init_completion(&rxq->fence_event); in mana_fence_rq()
1358 req.wq_obj_handle = rxq->rxobj; in mana_fence_rq()
1364 rxq->rxq_idx, err); in mana_fence_rq()
[all …]
H A Dmana_sysctl.c66 struct mana_rxq *rxq; in mana_sysctl_rx_stat_agg_u64() local
71 rxq = apc->rxqs[i]; in mana_sysctl_rx_stat_agg_u64()
72 stat += *((uint64_t *)((uint8_t *)rxq + offset)); in mana_sysctl_rx_stat_agg_u64()
80 rxq = apc->rxqs[i]; in mana_sysctl_rx_stat_agg_u64()
81 *((uint64_t *)((uint8_t *)rxq + offset)) = 0; in mana_sysctl_rx_stat_agg_u64()
91 struct mana_rxq *rxq; in mana_sysctl_rx_stat_u16() local
95 rxq = apc->rxqs[0]; in mana_sysctl_rx_stat_u16()
96 val = *((uint16_t *)((uint8_t *)rxq + offset)); in mana_sysctl_rx_stat_u16()
111 struct mana_rxq *rxq; in mana_sysctl_rx_stat_u32() local
115 rxq = apc->rxqs[0]; in mana_sysctl_rx_stat_u32()
[all …]
H A Dhw_channel.c173 hwc->rxq->gdma_wq->id = val; in mana_hwc_init_event_handler()
201 hwc->rxq->msg_buf->gpa_mkey = val; in mana_hwc_init_event_handler()
223 struct hwc_wq *hwc_rxq = hwc->rxq; in mana_hwc_rx_event_handler()
646 struct hwc_wq *hwc_rxq = hwc->rxq; in mana_hwc_test_channel()
677 struct gdma_queue *rq = hwc->rxq->gdma_wq; in mana_hwc_establish_channel()
738 hwc->cq, &hwc->rxq); in mana_hwc_init_queues()
841 if (hwc->rxq) in mana_hwc_destroy_channel()
842 mana_hwc_destroy_wq(hwc, hwc->rxq); in mana_hwc_destroy_channel()
/src/sys/dev/netmap/
H A Dif_vtnet_netmap.h164 struct vtnet_rxq *rxq = &sc->vtnet_rxqs[ring_nr]; in vtnet_netmap_kring_refill() local
165 struct virtqueue *vq = rxq->vtnrx_vq; in vtnet_netmap_kring_refill()
171 for (nm_i = rxq->vtnrx_nm_refill; num > 0; in vtnet_netmap_kring_refill()
186 err = sglist_append(&sg, &rxq->vtnrx_shrhdr, sc->vtnet_hdr_size); in vtnet_netmap_kring_refill()
192 err = virtqueue_enqueue(vq, /*cookie=*/rxq, &sg, in vtnet_netmap_kring_refill()
200 rxq->vtnrx_nm_refill = nm_i; in vtnet_netmap_kring_refill()
212 vtnet_netmap_rxq_populate(struct vtnet_rxq *rxq) in vtnet_netmap_rxq_populate() argument
214 struct netmap_adapter *na = NA(rxq->vtnrx_sc->vtnet_ifp); in vtnet_netmap_rxq_populate()
220 slot = netmap_reset(na, NR_RX, rxq->vtnrx_id, 0); in vtnet_netmap_rxq_populate()
223 kring = na->rx_rings[rxq->vtnrx_id]; in vtnet_netmap_rxq_populate()
[all …]
/src/sys/dev/virtio/network/
H A Dif_vtnet.c151 static void vtnet_rx_vq_process(struct vtnet_rxq *rxq, int tries);
834 struct vtnet_rxq *rxq; in vtnet_init_rxq() local
836 rxq = &sc->vtnet_rxqs[id]; in vtnet_init_rxq()
838 snprintf(rxq->vtnrx_name, sizeof(rxq->vtnrx_name), "%s-rx%d", in vtnet_init_rxq()
840 mtx_init(&rxq->vtnrx_mtx, rxq->vtnrx_name, NULL, MTX_DEF); in vtnet_init_rxq()
842 rxq->vtnrx_sc = sc; in vtnet_init_rxq()
843 rxq->vtnrx_id = id; in vtnet_init_rxq()
845 rxq->vtnrx_sg = sglist_alloc(sc->vtnet_rx_nsegs, M_NOWAIT); in vtnet_init_rxq()
846 if (rxq->vtnrx_sg == NULL) in vtnet_init_rxq()
851 if (tcp_lro_init_args(&rxq->vtnrx_lro, sc->vtnet_ifp, in vtnet_init_rxq()
[all …]
/src/sys/dev/qlnx/qlnxe/
H A Dqlnx_os.c133 static int qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq);
134 static void qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq);
136 struct qlnx_rx_queue *rxq);
1463 if (fp->rxq->handle != NULL) { in qlnx_set_rx_coalesce()
1465 0, fp->rxq->handle); in qlnx_set_rx_coalesce()
3724 struct qlnx_rx_queue *rxq; in qlnx_rx_jumbo_chain() local
3727 rxq = fp->rxq; in qlnx_rx_jumbo_chain()
3731 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); in qlnx_rx_jumbo_chain()
3733 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; in qlnx_rx_jumbo_chain()
3739 rxq->sw_rx_cons = in qlnx_rx_jumbo_chain()
[all …]
/src/sys/dev/vmware/vmxnet3/
H A Dif_vmx.c468 struct vmxnet3_rxqueue *rxq; in vmxnet3_msix_intr_assign() local
479 rxq = &sc->vmx_rxq[i]; in vmxnet3_msix_intr_assign()
480 error = iflib_irq_alloc_generic(ctx, &rxq->vxrxq_irq, i + 1, in vmxnet3_msix_intr_assign()
481 IFLIB_INTR_RXTX, vmxnet3_rxq_intr, rxq, i, irq_name); in vmxnet3_msix_intr_assign()
525 struct vmxnet3_rxqueue *rxq; in vmxnet3_free_irqs() local
531 rxq = &sc->vmx_rxq[i]; in vmxnet3_free_irqs()
532 iflib_irq_free(sc->vmx_ctx, &rxq->vxrxq_irq); in vmxnet3_free_irqs()
687 struct vmxnet3_rxqueue *rxq; in vmxnet3_set_interrupt_idx() local
709 rxq = &sc->vmx_rxq[i]; in vmxnet3_set_interrupt_idx()
710 rxs = rxq->vxrxq_rs; in vmxnet3_set_interrupt_idx()
[all …]
/src/sys/net/
H A Diflib.c143 static void iru_init(if_rxd_update_t iru, iflib_rxq_t rxq, uint8_t flid);
678 static int iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, qidx_t cidx, qidx_t budget);
747 static int netmap_fl_refill(iflib_rxq_t rxq, struct netmap_kring *kring, bool init);
821 iflib_rxq_t rxq = &ctx->ifc_rxqs[0]; in iflib_netmap_config() local
822 iflib_fl_t fl = &rxq->ifr_fl[0]; in iflib_netmap_config()
837 netmap_fl_refill(iflib_rxq_t rxq, struct netmap_kring *kring, bool init) in netmap_fl_refill() argument
844 if_ctx_t ctx = rxq->ifr_ctx; in netmap_fl_refill()
845 iflib_fl_t fl = &rxq->ifr_fl[0]; in netmap_fl_refill()
879 iru_init(&iru, rxq, 0 /* flid */); in netmap_fl_refill()
943 ctx->isc_rxd_flush(ctx->ifc_softc, rxq->ifr_id, fl->ifl_id, in netmap_fl_refill()
[all …]
/src/sys/dev/iavf/
H A Diavf_vc_iflib.c102 vqpi->rxq.vsi_id = vqci->vsi_id; in iavf_configure_queues()
103 vqpi->rxq.queue_id = i; in iavf_configure_queues()
104 vqpi->rxq.ring_len = scctx->isc_nrxd[0]; in iavf_configure_queues()
105 vqpi->rxq.dma_ring_addr = rxr->rx_paddr; in iavf_configure_queues()
106 vqpi->rxq.max_pkt_size = scctx->isc_max_frame_size; in iavf_configure_queues()
107 vqpi->rxq.databuffer_size = rxr->mbuf_sz; in iavf_configure_queues()
108 vqpi->rxq.splithdr_enabled = 0; in iavf_configure_queues()
/src/sys/dev/ral/
H A Drt2560.c252 error = rt2560_alloc_rx_ring(sc, &sc->rxq, RT2560_RX_RING_COUNT); in rt2560_attach()
354 rt2560_free_rx_ring(sc, &sc->rxq); in rt2560_detach()
651 desc = &sc->rxq.desc[i]; in rt2560_alloc_rx_ring()
652 data = &sc->rxq.data[i]; in rt2560_alloc_rx_ring()
1099 hw = RAL_READ(sc, RT2560_SECCSR0) - sc->rxq.physaddr; in rt2560_decryption_intr()
1102 bus_dmamap_sync(sc->rxq.desc_dmat, sc->rxq.desc_map, in rt2560_decryption_intr()
1105 for (; sc->rxq.cur_decrypt != hw;) { in rt2560_decryption_intr()
1106 desc = &sc->rxq.desc[sc->rxq.cur_decrypt]; in rt2560_decryption_intr()
1107 data = &sc->rxq.data[sc->rxq.cur_decrypt]; in rt2560_decryption_intr()
1137 bus_dmamap_sync(sc->rxq.data_dmat, data->map, in rt2560_decryption_intr()
[all …]
H A Drt2661.c255 error = rt2661_alloc_rx_ring(sc, &sc->rxq, RT2661_RX_RING_COUNT); in rt2661_attach()
347 rt2661_free_rx_ring(sc, &sc->rxq); in rt2661_detach()
658 desc = &sc->rxq.desc[i]; in rt2661_alloc_rx_ring()
659 data = &sc->rxq.data[i]; in rt2661_alloc_rx_ring()
969 bus_dmamap_sync(sc->rxq.desc_dmat, sc->rxq.desc_map, in rt2661_rx_intr()
975 desc = &sc->rxq.desc[sc->rxq.cur]; in rt2661_rx_intr()
976 data = &sc->rxq.data[sc->rxq.cur]; in rt2661_rx_intr()
1011 bus_dmamap_sync(sc->rxq.data_dmat, data->map, in rt2661_rx_intr()
1013 bus_dmamap_unload(sc->rxq.data_dmat, data->map); in rt2661_rx_intr()
1015 error = bus_dmamap_load(sc->rxq.data_dmat, data->map, in rt2661_rx_intr()
[all …]
/src/sys/dev/ixl/
H A Dixl_pf_iov.c605 struct i40e_hmc_obj_rxq rxq; in ixl_vf_config_rx_queue() local
611 bzero(&rxq, sizeof(rxq)); in ixl_vf_config_rx_queue()
627 rxq.hsplit_0 = info->rx_split_pos & in ixl_vf_config_rx_queue()
632 rxq.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT; in ixl_vf_config_rx_queue()
634 rxq.dtype = 2; in ixl_vf_config_rx_queue()
641 rxq.base = info->dma_ring_addr / IXL_RX_CTX_BASE_UNITS; in ixl_vf_config_rx_queue()
642 rxq.qlen = info->ring_len; in ixl_vf_config_rx_queue()
644 rxq.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT; in ixl_vf_config_rx_queue()
646 rxq.dsize = 1; in ixl_vf_config_rx_queue()
647 rxq.crcstrip = 1; in ixl_vf_config_rx_queue()
[all …]
/src/sys/dev/nfe/
H A Dif_nfe.c577 if ((error = nfe_alloc_rx_ring(sc, &sc->rxq)) != 0) in nfe_attach()
760 nfe_free_rx_ring(sc, &sc->rxq); in nfe_detach()
1160 data = &sc->rxq.data[i]; in nfe_alloc_rx_ring()
1912 data = &sc->rxq.data[idx]; in nfe_discard_rxbuf()
1916 desc64 = &sc->rxq.desc64[idx]; in nfe_discard_rxbuf()
1923 desc32 = &sc->rxq.desc32[idx]; in nfe_discard_rxbuf()
1972 if (bus_dmamap_load_mbuf_sg(sc->rxq.rx_data_tag, sc->rxq.rx_spare_map, in nfe_newbuf()
1979 data = &sc->rxq.data[idx]; in nfe_newbuf()
1981 bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map, in nfe_newbuf()
1983 bus_dmamap_unload(sc->rxq.rx_data_tag, data->rx_data_map); in nfe_newbuf()
[all …]
/src/sys/dev/cxgbe/
H A Dt4_sge.c1153 struct sge_rxq *rxq; in t4_setup_vi_queues() local
1210 for_each_rxq(vi, i, rxq) { in t4_setup_vi_queues()
1211 rc = alloc_rxq(vi, rxq, i, intr_idx, maxp); in t4_setup_vi_queues()
1261 struct sge_rxq *rxq; in t4_teardown_vi_queues() local
1304 for_each_rxq(vi, i, rxq) { in t4_teardown_vi_queues()
1305 free_rxq(vi, rxq); in t4_teardown_vi_queues()
1417 MPASS(irq->rxq != NULL); in t4_vi_intr()
1418 t4_intr(irq->rxq); in t4_vi_intr()
1612 struct sge_rxq *rxq = iq_to_rxq(iq); in service_iq_fl() local
1623 struct lro_ctrl *lro = &rxq->lro; in service_iq_fl()
[all …]
/src/sys/dev/gve/
H A Dgve_sysctl.c54 struct sysctl_oid_list *child, struct gve_rx_ring *rxq) in gve_setup_rxq_sysctl() argument
61 snprintf(namebuf, sizeof(namebuf), "rxq%d", rxq->com.id); in gve_setup_rxq_sysctl()
66 stats = &rxq->stats; in gve_setup_rxq_sysctl()
108 &rxq->cnt, 0, "Number of descriptors completed"); in gve_setup_rxq_sysctl()
111 &rxq->fill_cnt, rxq->fill_cnt, in gve_setup_rxq_sysctl()
/src/sys/contrib/dev/iwlwifi/pcie/
H A Dctxt-info.c216 rx_cfg->free_rbd_addr = cpu_to_le64(trans_pcie->rxq->bd_dma); in iwl_pcie_ctxt_info_init()
217 rx_cfg->used_rbd_addr = cpu_to_le64(trans_pcie->rxq->used_bd_dma); in iwl_pcie_ctxt_info_init()
218 rx_cfg->status_wr_ptr = cpu_to_le64(trans_pcie->rxq->rb_stts_dma); in iwl_pcie_ctxt_info_init()

1234