Home
last modified time | relevance | path

Searched refs:tx_head (Results 1 – 25 of 56) sorted by relevance

123

/linux/drivers/net/can/rockchip/
H A Drockchip_canfd-tx.c24 "%s: echo_skb[%u]=NULL tx_head=0x%08x tx_tail=0x%08x\n", in rkcanfd_tx_tail_is_eff()
26 priv->tx_head, priv->tx_tail); in rkcanfd_tx_tail_is_eff()
60 const unsigned int tx_head = rkcanfd_get_tx_head(priv); in rkcanfd_xmit_retry() local
61 const u32 reg_cmd = RKCANFD_REG_CMD_TX_REQ(tx_head); in rkcanfd_xmit_retry()
70 unsigned int tx_head, frame_len; in rkcanfd_start_xmit() local
84 "Stopping tx-queue (tx_head=0x%08x, tx_tail=0x%08x, tx_pending=%d)\n", in rkcanfd_start_xmit()
85 priv->tx_head, priv->tx_tail, in rkcanfd_start_xmit()
117 tx_head = rkcanfd_get_tx_head(priv); in rkcanfd_start_xmit()
118 reg_cmd = RKCANFD_REG_CMD_TX_REQ(tx_head); in rkcanfd_start_xmit()
127 err = can_put_echo_skb(skb, ndev, tx_head, frame_le in rkcanfd_start_xmit()
[all...]
H A Drockchip_canfd.h466 unsigned int tx_head; member
515 return READ_ONCE(priv->tx_head) & (RKCANFD_TXFIFO_DEPTH - 1); in rkcanfd_get_tx_head()
527 return READ_ONCE(priv->tx_head) - READ_ONCE(priv->tx_tail); in rkcanfd_get_tx_pending()
/linux/drivers/net/can/spi/mcp251xfd/
H A Dmcp251xfd-tx.c23 u8 tx_head; in mcp251xfd_get_tx_obj_next() local
25 tx_head = mcp251xfd_get_tx_head(tx_ring); in mcp251xfd_get_tx_obj_next()
27 return &tx_ring->obj[tx_head]; in mcp251xfd_get_tx_obj_next()
141 u8 tx_head; in mcp251xfd_tx_failure_drop() local
145 tx_head = mcp251xfd_get_tx_head(tx_ring); in mcp251xfd_tx_failure_drop()
146 can_free_echo_skb(ndev, tx_head, &frame_len); in mcp251xfd_tx_failure_drop()
186 "Stopping tx-queue (tx_head=0x%08x, tx_tail=0x%08x, len=%d).\n", in mcp251xfd_tx_busy()
210 u8 tx_head; in mcp251xfd_start_xmit() local
224 tx_head = mcp251xfd_get_tx_head(tx_ring); in mcp251xfd_start_xmit()
230 err = can_put_echo_skb(skb, ndev, tx_head, frame_le in mcp251xfd_start_xmit()
[all...]
/linux/drivers/net/ethernet/moxa/
H A Dmoxart_ether.c131 priv->tx_head = 0; in moxart_mac_setup_desc_ring()
282 return CIRC_SPACE(priv->tx_head, priv->tx_tail, TX_DESC_NUM); in moxart_tx_queue_space()
288 unsigned int tx_head = priv->tx_head; in moxart_tx_finished() local
291 while (tx_tail != tx_head) { in moxart_tx_finished()
335 unsigned int tx_head; in moxart_mac_start_xmit() local
341 tx_head = priv->tx_head; in moxart_mac_start_xmit()
342 desc = priv->tx_desc_base + (TX_REG_DESC_SIZE * tx_head); in moxart_mac_start_xmit()
356 priv->tx_mapping[tx_head] in moxart_mac_start_xmit()
[all...]
/linux/drivers/infiniband/hw/hfi1/
H A Diowait.h39 * @tx_head: list of prebuilt packets
52 struct list_head tx_head; member
58 * @tx_head: overflow list of sdma_txreq's
68 * @count: total number of descriptors in tx_head'ed list
70 * @tx_count: number of tx entry's in tx_head'ed list
289 if (!list_empty(&wait->tx_head)) { in iowait_get_txhead()
291 &wait->tx_head, in iowait_get_txhead()
304 if (!list_empty(&w->tx_head)) { in iowait_get_desc()
305 tx = list_first_entry(&w->tx_head, struct sdma_txreq, in iowait_get_desc()
327 if (!list_empty(&w->tx_head)) { in iowait_update_priority()
[all...]
H A Dtrace_tx.h371 __field(u16, tx_head)
378 __entry->tx_head = sde->tx_head;
384 "[%s] SDE(%u) sn %llu hwhead %u swhead %u next_descq_idx %u tx_head %u tx_tail %u",
391 __entry->tx_head,
407 __field(u16, tx_head)
414 __entry->tx_head = sde->tx_head;
419 "[%s] SDE(%u) hwhead %u swhead %u next_descq_idx %u tx_head %u tx_tail %u",
425 __entry->tx_head,
[all...]
/linux/drivers/net/ethernet/amd/
H A Dsun3lance.c143 struct lance_tx_head tx_head[TX_RING_SIZE]; member
170 /* tx_head flags */
182 /* tx_head misc field */
394 MEM->init.tdra = dvma_vtob(MEM->tx_head); in lance_probe()
396 (dvma_vtob(MEM->tx_head) >> 16); in lance_probe()
400 (dvma_vtob(MEM->tx_head)))); in lance_probe()
461 MEM->tx_head[i].base = dvma_vtob(MEM->tx_data[i]); in lance_init_ring()
462 MEM->tx_head[i].flag = 0; in lance_init_ring()
463 MEM->tx_head[i].base_hi = in lance_init_ring()
465 MEM->tx_head[ in lance_init_ring()
[all...]
H A Datarilance.c158 struct lance_tx_head tx_head[TX_RING_SIZE]; member
270 /* tx_head flags */
282 /* tx_head misc field */
620 MEM->init.tx_ring.adr_lo = offsetof( struct lance_memory, tx_head ); in lance_probe1()
707 MEM->tx_head[i].base = offset; in lance_init_ring()
708 MEM->tx_head[i].flag = TMD1_OWN_HOST; in lance_init_ring()
709 MEM->tx_head[i].base_hi = 0; in lance_init_ring()
710 MEM->tx_head[i].length = 0; in lance_init_ring()
711 MEM->tx_head[i].misc = 0; in lance_init_ring()
758 i, MEM->tx_head[ in lance_tx_timeout()
[all...]
H A Dau1000_eth.c703 aup->tx_head = (aup->tx_dma_ring[0]->buff_stat & 0xC) >> 2; in au1000_init()
704 aup->tx_tail = aup->tx_head; in au1000_init()
967 skb->data, aup->tx_head); in au1000_tx()
969 ptxd = aup->tx_dma_ring[aup->tx_head]; in au1000_tx()
986 pDB = aup->tx_db_inuse[aup->tx_head]; in au1000_tx()
1002 aup->tx_head = (aup->tx_head + 1) & (NUM_TX_DMA - 1); in au1000_tx()
H A Dau1000_eth.h78 u32 tx_head; member
/linux/drivers/net/ethernet/i825xx/
H A Dether1.c502 priv(dev)->tx_head = NOP_ADDR + NOP_SIZE; in ether1_init_for_open()
619 if (priv(dev)->tx_head + size > TX_AREA_END) { in ether1_txalloc()
620 if (tail > priv(dev)->tx_head) in ether1_txalloc()
625 priv(dev)->tx_head = start + size; in ether1_txalloc()
627 if (priv(dev)->tx_head < tail && (priv(dev)->tx_head + size) > tail) in ether1_txalloc()
629 start = priv(dev)->tx_head; in ether1_txalloc()
630 priv(dev)->tx_head += size; in ether1_txalloc()
729 tmp = priv(dev)->tx_head; in ether1_sendpacket()
731 priv(dev)->tx_head in ether1_sendpacket()
[all...]
/linux/drivers/net/can/
H A Dti_hecc.c187 u32 tx_head; member
194 return priv->tx_head & HECC_TX_MB_MASK; in get_tx_head_mb()
204 return (priv->tx_head >> HECC_TX_PRIO_SHIFT) & MAX_TX_PRIO; in get_tx_head_prio()
360 priv->tx_head = HECC_TX_MASK; in ti_hecc_start()
482 "BUG: TX mbx not ready tx_head=%08X, tx_tail=%08X\n", in ti_hecc_xmit()
483 priv->tx_head, priv->tx_tail); in ti_hecc_xmit()
509 --priv->tx_head; in ti_hecc_xmit()
511 (priv->tx_head & HECC_TX_MASK) == HECC_TX_MASK) { in ti_hecc_xmit()
739 while (priv->tx_tail - priv->tx_head > 0) { in ti_hecc_interrupt()
757 if ((priv->tx_head in ti_hecc_interrupt()
[all...]
H A Dbxcan.c178 unsigned int tx_head; member
263 return priv->tx_head % BXCAN_TX_MB_NUM; in bxcan_get_tx_head()
273 return BXCAN_TX_MB_NUM - (priv->tx_head - priv->tx_tail); in bxcan_get_tx_free()
288 "Stopping tx-queue (tx_head=0x%08x, tx_tail=0x%08x, len=%d).\n", in bxcan_tx_busy()
289 priv->tx_head, priv->tx_tail, in bxcan_tx_busy()
290 priv->tx_head - priv->tx_tail); in bxcan_tx_busy()
442 while (priv->tx_head - priv->tx_tail > 0) { in bxcan_tx_isr()
691 priv->tx_head = 0; in bxcan_chip_start()
852 priv->tx_head++; in bxcan_start_xmit()
1003 priv->tx_head in bxcan_probe()
[all...]
H A Dat91_can.c158 unsigned int tx_head; member
272 return (priv->tx_head & get_head_mb_mask(priv)) + get_mb_tx_first(priv); in get_tx_head_mb()
277 return (priv->tx_head >> get_head_prio_shift(priv)) & 0xf; in get_tx_head_prio()
359 priv->tx_head = priv->tx_tail = 0; in at91_setup_mailboxes()
463 * We use the priv->tx_head as counter for the next transmission
467 * priv->tx_head = (prio << get_next_prio_shift(priv)) |
514 * tx_head buffer prio and mailbox equals 0. in at91_start_xmit()
519 priv->tx_head++; in at91_start_xmit()
522 (priv->tx_head & get_head_mask(priv)) == 0) in at91_start_xmit()
646 * We iterate from priv->tx_tail to priv->tx_head an
[all...]
/linux/drivers/net/ethernet/qualcomm/
H A Dqca_uart.c43 unsigned char *tx_head; /* pointer to next XMIT byte */ member
136 written = serdev_device_write_buf(qca->serdev, qca->tx_head, in qcauart_transmit()
140 qca->tx_head += written; in qcauart_transmit()
225 qca->tx_head = qca->tx_buffer + written; in qcauart_netdev_xmit()
/linux/drivers/net/can/usb/etas_es58x/
H A Des58x_core.c279 u32 num_echo_skb = priv->tx_head - priv->tx_tail; in es58x_is_echo_skb_threshold_reached()
331 rcv_packet_idx, priv->tx_tail, priv->tx_head, in es58x_can_get_echo_skb_recovery()
332 priv->tx_head - priv->tx_tail, in es58x_can_get_echo_skb_recovery()
340 } else if ((s32)(rcv_packet_idx - priv->tx_head) >= 0) { in es58x_can_get_echo_skb_recovery()
352 if (priv->tx_tail == priv->tx_head) in es58x_can_get_echo_skb_recovery()
387 unsigned int num_echo_skb = priv->tx_head - priv->tx_tail; in es58x_can_get_echo_skb()
476 priv->tx_head = 0; in es58x_can_reset_echo_fifo()
505 priv->tx_head--; in es58x_flush_pending_tx_msg()
507 can_free_echo_skb(netdev, priv->tx_head & fifo_mask, in es58x_flush_pending_tx_msg()
543 priv->tx_head in es58x_tx_ack_msg()
[all...]
/linux/drivers/net/ethernet/seeq/
H A Dether3.c335 priv(dev)->tx_head = 0; in ether3_init_for_open()
466 priv(dev)->tx_head, priv(dev)->tx_tail); in ether3_timeout()
474 priv(dev)->tx_head = priv(dev)->tx_tail = 0; in ether3_timeout()
502 next_ptr = (priv(dev)->tx_head + 1) & 15; in ether3_sendpacket()
511 ptr = 0x600 * priv(dev)->tx_head; in ether3_sendpacket()
512 priv(dev)->tx_head = next_ptr; in ether3_sendpacket()
534 next_ptr = (priv(dev)->tx_head + 1) & 15; in ether3_sendpacket()
/linux/drivers/net/ethernet/actions/
H A Dowl-emac.c496 unsigned int tx_head; in owl_emac_setup_frame_xmit() local
515 tx_head = ring->head; in owl_emac_setup_frame_xmit()
516 desc = &ring->descs[tx_head]; in owl_emac_setup_frame_xmit()
530 ring->skbs[tx_head] = skb; in owl_emac_setup_frame_xmit()
531 ring->skbs_dma[tx_head] = dma_addr; in owl_emac_setup_frame_xmit()
572 unsigned int tx_head; in owl_emac_ndo_start_xmit() local
586 tx_head = ring->head; in owl_emac_ndo_start_xmit()
587 desc = &ring->descs[tx_head]; in owl_emac_ndo_start_xmit()
605 ring->skbs[tx_head] = skb; in owl_emac_ndo_start_xmit()
606 ring->skbs_dma[tx_head] in owl_emac_ndo_start_xmit()
[all...]
/linux/drivers/net/ethernet/dec/tulip/
H A Dde2104x.c96 (((CP)->tx_tail <= (CP)->tx_head) ? \
97 (CP)->tx_tail + (DE_TX_RING_SIZE - 1) - (CP)->tx_head : \
98 (CP)->tx_tail - (CP)->tx_head - 1)
292 unsigned tx_head; member
506 de->rx_tail, de->tx_head, de->tx_tail); in de_interrupt()
541 unsigned tx_head = de->tx_head; in de_tx() local
544 while (tx_tail != tx_head) { in de_tx()
622 entry = de->tx_head; in de_start_xmit()
644 de->tx_head in de_start_xmit()
[all...]
/linux/drivers/net/ethernet/silan/
H A Dsc92031.c280 unsigned tx_head; member
410 while (priv->tx_head - priv->tx_tail > 0) { in _sc92031_tx_clear()
414 priv->tx_head = priv->tx_tail = 0; in _sc92031_tx_clear()
657 while (priv->tx_head - priv->tx_tail > 0) { in _sc92031_tx_tasklet()
953 BUG_ON(priv->tx_head - priv->tx_tail >= NUM_TX_DESC); in sc92031_start_xmit()
955 entry = priv->tx_head++ % NUM_TX_DESC; in sc92031_start_xmit()
979 if (priv->tx_head - priv->tx_tail >= NUM_TX_DESC) in sc92031_start_xmit()
1010 priv->tx_head = priv->tx_tail = 0; in sc92031_open()
/linux/drivers/net/ethernet/realtek/
H A D8139cp.c116 (((CP)->tx_tail <= (CP)->tx_head) ? \
117 (CP)->tx_tail + (CP_TX_RING_SIZE - 1) - (CP)->tx_head : \
118 (CP)->tx_tail - (CP)->tx_head - 1)
341 unsigned tx_head ____cacheline_aligned;
650 unsigned tx_head = cp->tx_head; in cp_tx() local
654 while (tx_tail != tx_head) { in cp_tx()
751 entry = cp->tx_head; in cp_start_xmit()
867 cp->tx_head = NEXT_TX(entry); in cp_start_xmit()
966 cp->tx_head in cp_stop_hw()
[all...]
/linux/drivers/infiniband/ulp/ipoib/
H A Dipoib_ib.c651 tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)]; in ipoib_send()
663 /* increase the tx_head after send success, but use it for queue state */ in ipoib_send()
678 rc = post_send(priv, priv->tx_head & (ipoib_sendq_size - 1), in ipoib_send()
691 rc = priv->tx_head; in ipoib_send()
692 ++priv->tx_head; in ipoib_send()
825 while (priv->tx_head != priv->tx_tail || recvs_pending(dev)) { in ipoib_ib_dev_stop_default()
829 priv->tx_head - priv->tx_tail, in ipoib_ib_dev_stop_default()
836 while ((int)priv->tx_tail - (int)priv->tx_head < 0) { in ipoib_ib_dev_stop_default()
/linux/drivers/net/ethernet/calxeda/
H A Dxgmac.c362 unsigned int tx_head; member
401 dma_ring_space((p)->tx_head, (p)->tx_tail, DMA_TX_RING_SZ)
770 priv->tx_head = 0; in xgmac_dma_desc_rings_init()
868 while (dma_ring_cnt(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ)) { in xgmac_tx_complete()
878 priv->tx_head, priv->tx_tail); in xgmac_tx_complete()
925 priv->tx_head = 0; in xgmac_tx_timeout_work()
1091 entry = priv->tx_head; in xgmac_xmit()
1136 priv->tx_head = dma_ring_incr(entry, DMA_TX_RING_SZ); in xgmac_xmit()
1138 /* Ensure tx_head update is visible to tx completion */ in xgmac_xmit()
1150 entry = priv->tx_head; in xgmac_xmit()
[all...]
/linux/drivers/net/can/mscan/
H A Dmscan.c129 INIT_LIST_HEAD(&priv->tx_head); in mscan_start()
265 list_add_tail(&priv->tx_queue[buf_id].list, &priv->tx_head); in mscan_start_xmit()
436 list_for_each_safe(pos, tmp, &priv->tx_head) { in mscan_isr()
452 if (list_empty(&priv->tx_head)) { in mscan_isr()
/linux/drivers/net/can/rcar/
H A Drcar_can.c97 u32 tx_head; member
379 if (priv->tx_head - priv->tx_tail <= unsent) in rcar_can_tx_done()
613 can_put_echo_skb(skb, ndev, priv->tx_head % RCAR_CAN_FIFO_DEPTH, 0); in rcar_can_start_xmit()
614 priv->tx_head++; in rcar_can_start_xmit()
621 if (priv->tx_head - priv->tx_tail >= RCAR_CAN_FIFO_DEPTH) in rcar_can_start_xmit()

123