Lines Matching refs:tx_q
394 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
397 if (tx_q->dirty_tx > tx_q->cur_tx)
398 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
400 avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
430 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
432 if (tx_q->dirty_tx != tx_q->cur_tx)
1381 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1386 head_tx = (void *)tx_q->dma_etx;
1388 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1389 head_tx = (void *)tx_q->dma_entx;
1392 head_tx = (void *)tx_q->dma_tx;
1397 tx_q->dma_tx_phy, desc_size);
1478 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1487 p = &tx_q->dma_etx[i].basic;
1488 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1489 p = &tx_q->dma_entx[i].basic;
1491 p = &tx_q->dma_tx[i];
1603 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1605 if (tx_q->tx_skbuff_dma[i].buf &&
1606 tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1607 if (tx_q->tx_skbuff_dma[i].map_as_page)
1609 tx_q->tx_skbuff_dma[i].buf,
1610 tx_q->tx_skbuff_dma[i].len,
1614 tx_q->tx_skbuff_dma[i].buf,
1615 tx_q->tx_skbuff_dma[i].len,
1619 if (tx_q->xdpf[i] &&
1620 (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1621 tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1622 xdp_return_frame(tx_q->xdpf[i]);
1623 tx_q->xdpf[i] = NULL;
1626 if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1627 tx_q->xsk_frames_done++;
1629 if (tx_q->tx_skbuff[i] &&
1630 tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1631 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1632 tx_q->tx_skbuff[i] = NULL;
1635 tx_q->tx_skbuff_dma[i].buf = 0;
1636 tx_q->tx_skbuff_dma[i].map_as_page = false;
1874 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1879 (u32)tx_q->dma_tx_phy);
1884 stmmac_mode_init(priv, tx_q->dma_etx,
1885 tx_q->dma_tx_phy,
1887 else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1888 stmmac_mode_init(priv, tx_q->dma_tx,
1889 tx_q->dma_tx_phy,
1893 tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1899 p = &((tx_q->dma_etx + i)->basic);
1900 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1901 p = &((tx_q->dma_entx + i)->basic);
1903 p = tx_q->dma_tx + i;
1907 tx_q->tx_skbuff_dma[i].buf = 0;
1908 tx_q->tx_skbuff_dma[i].map_as_page = false;
1909 tx_q->tx_skbuff_dma[i].len = 0;
1910 tx_q->tx_skbuff_dma[i].last_segment = false;
1911 tx_q->tx_skbuff[i] = NULL;
1972 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1975 tx_q->xsk_frames_done = 0;
1980 if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1981 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1982 tx_q->xsk_frames_done = 0;
1983 tx_q->xsk_pool = NULL;
2060 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2069 addr = tx_q->dma_etx;
2070 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
2072 addr = tx_q->dma_entx;
2075 addr = tx_q->dma_tx;
2080 dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
2082 kfree(tx_q->tx_skbuff_dma);
2083 kfree(tx_q->tx_skbuff);
2226 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2230 tx_q->queue_index = queue;
2231 tx_q->priv_data = priv;
2233 tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2234 sizeof(*tx_q->tx_skbuff_dma),
2236 if (!tx_q->tx_skbuff_dma)
2239 tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2242 if (!tx_q->tx_skbuff)
2247 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2255 &tx_q->dma_tx_phy, GFP_KERNEL);
2260 tx_q->dma_etx = addr;
2261 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2262 tx_q->dma_entx = addr;
2264 tx_q->dma_tx = addr;
2585 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2587 struct xsk_buff_pool *pool = tx_q->xsk_pool;
2588 unsigned int entry = tx_q->cur_tx;
2625 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2626 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2627 tx_desc = &tx_q->dma_entx[entry].basic;
2629 tx_desc = tx_q->dma_tx + entry;
2635 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2641 tx_q->tx_skbuff_dma[entry].buf = 0;
2642 tx_q->xdpf[entry] = NULL;
2644 tx_q->tx_skbuff_dma[entry].map_as_page = false;
2645 tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2646 tx_q->tx_skbuff_dma[entry].last_segment = true;
2647 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2651 tx_q->tx_count_frames++;
2655 else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2663 meta_req.tbs = tx_q->tbs;
2664 meta_req.edesc = &tx_q->dma_entx[entry];
2668 tx_q->tx_count_frames = 0;
2680 &tx_q->tx_skbuff_dma[entry].xsk_meta);
2682 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2683 entry = tx_q->cur_tx;
2730 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2738 tx_q->xsk_frames_done = 0;
2740 entry = tx_q->dirty_tx;
2743 while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2749 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2750 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2751 xdpf = tx_q->xdpf[entry];
2753 } else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2755 skb = tx_q->tx_skbuff[entry];
2762 p = (struct dma_desc *)(tx_q->dma_etx + entry);
2763 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2764 p = &tx_q->dma_entx[entry].basic;
2766 p = tx_q->dma_tx + entry;
2792 } else if (tx_q->xsk_pool &&
2793 xp_tx_metadata_enabled(tx_q->xsk_pool)) {
2799 xsk_tx_metadata_complete(&tx_q->tx_skbuff_dma[entry].xsk_meta,
2805 if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2806 tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2807 if (tx_q->tx_skbuff_dma[entry].map_as_page)
2809 tx_q->tx_skbuff_dma[entry].buf,
2810 tx_q->tx_skbuff_dma[entry].len,
2814 tx_q->tx_skbuff_dma[entry].buf,
2815 tx_q->tx_skbuff_dma[entry].len,
2817 tx_q->tx_skbuff_dma[entry].buf = 0;
2818 tx_q->tx_skbuff_dma[entry].len = 0;
2819 tx_q->tx_skbuff_dma[entry].map_as_page = false;
2822 stmmac_clean_desc3(priv, tx_q, p);
2824 tx_q->tx_skbuff_dma[entry].last_segment = false;
2825 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2828 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2830 tx_q->xdpf[entry] = NULL;
2834 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2836 tx_q->xdpf[entry] = NULL;
2839 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2840 tx_q->xsk_frames_done++;
2842 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2847 tx_q->tx_skbuff[entry] = NULL;
2855 tx_q->dirty_tx = entry;
2869 if (tx_q->xsk_pool) {
2872 if (tx_q->xsk_frames_done)
2873 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2875 if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2876 xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2895 if (tx_q->dirty_tx != tx_q->cur_tx)
2921 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2930 tx_q->dma_tx_phy, chan);
2989 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2996 tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3122 struct stmmac_tx_queue *tx_q;
3168 tx_q = &priv->dma_conf.tx_queue[chan];
3171 tx_q->dma_tx_phy, chan);
3173 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
3175 tx_q->tx_tail_addr, chan);
3183 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
3191 ch = &priv->channel[tx_q->queue_index];
3192 napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3199 hrtimer_start(&tx_q->txtimer,
3203 hrtimer_try_to_cancel(&tx_q->txtimer);
3214 struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3215 struct stmmac_priv *priv = tx_q->priv_data;
3219 ch = &priv->channel[tx_q->queue_index];
3220 napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3249 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3254 hrtimer_setup(&tx_q->txtimer, stmmac_tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3597 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3600 if (tx_q->tbs & STMMAC_TBS_AVAIL)
3619 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3620 int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3993 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3997 tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
4179 struct stmmac_tx_queue *tx_q)
4196 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4197 p = &tx_q->dma_entx[tx_q->cur_tx].basic;
4199 p = &tx_q->dma_tx[tx_q->cur_tx];
4205 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4223 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4233 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4235 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4237 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4238 desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4240 desc = &tx_q->dma_tx[tx_q->cur_tx];
4258 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4263 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4274 tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4275 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4321 struct stmmac_tx_queue *tx_q;
4344 tx_q = &priv->dma_conf.tx_queue[queue];
4346 first_tx = tx_q->cur_tx;
4376 if (mss != tx_q->mss) {
4377 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4378 mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4380 mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4383 tx_q->mss = mss;
4384 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4386 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4396 first_entry = tx_q->cur_tx;
4397 WARN_ON(tx_q->tx_skbuff[first_entry]);
4399 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4400 desc = &tx_q->dma_entx[first_entry].basic;
4402 desc = &tx_q->dma_tx[first_entry];
4417 * tx_q->tx_skbuff_dma[].buf corresponding to the last descriptor,
4418 * and leave the other tx_q->tx_skbuff_dma[].buf as NULL to guarantee
4422 * By saving the DMA buffer address to tx_q->tx_skbuff_dma[].buf
4427 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4428 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_headlen(skb);
4429 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = false;
4430 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4445 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4446 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4447 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4448 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4451 tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4454 tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4455 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4458 tx_packets = (tx_q->cur_tx + 1) - first_tx;
4459 tx_q->tx_count_frames += tx_packets;
4467 else if ((tx_q->tx_count_frames %
4474 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4475 desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4477 desc = &tx_q->dma_tx[tx_q->cur_tx];
4479 tx_q->tx_count_frames = 0;
4488 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4516 tx_q->tx_skbuff_dma[first_entry].last_segment,
4532 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4533 tx_q->cur_tx, first, nfrags);
4595 struct stmmac_tx_queue *tx_q;
4600 tx_q = &priv->dma_conf.tx_queue[queue];
4602 first_tx = tx_q->cur_tx;
4635 has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4637 entry = tx_q->cur_tx;
4639 WARN_ON(tx_q->tx_skbuff[first_entry]);
4658 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4659 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4660 desc = &tx_q->dma_entx[entry].basic;
4662 desc = tx_q->dma_tx + entry;
4675 entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4686 WARN_ON(tx_q->tx_skbuff[entry]);
4689 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4690 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4691 desc = &tx_q->dma_entx[entry].basic;
4693 desc = tx_q->dma_tx + entry;
4700 tx_q->tx_skbuff_dma[entry].buf = des;
4704 tx_q->tx_skbuff_dma[entry].map_as_page = true;
4705 tx_q->tx_skbuff_dma[entry].len = len;
4706 tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4707 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4715 tx_q->tx_skbuff[entry] = skb;
4716 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4724 tx_q->tx_count_frames += tx_packets;
4732 else if ((tx_q->tx_count_frames %
4740 desc = &tx_q->dma_etx[entry].basic;
4741 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4742 desc = &tx_q->dma_entx[entry].basic;
4744 desc = &tx_q->dma_tx[entry];
4746 tx_q->tx_count_frames = 0;
4756 tx_q->cur_tx = entry;
4761 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4795 tx_q->tx_skbuff_dma[first_entry].buf = des;
4796 tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4797 tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4801 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4802 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4817 if (tx_q->tbs & STMMAC_TBS_EN) {
4820 tbs_desc = &tx_q->dma_entx[first_entry];
4985 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4986 unsigned int entry = tx_q->cur_tx;
5002 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
5003 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
5004 tx_desc = &tx_q->dma_entx[entry].basic;
5006 tx_desc = tx_q->dma_tx + entry;
5014 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
5023 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
5026 tx_q->tx_skbuff_dma[entry].buf = dma_addr;
5027 tx_q->tx_skbuff_dma[entry].map_as_page = false;
5028 tx_q->tx_skbuff_dma[entry].len = xdpf->len;
5029 tx_q->tx_skbuff_dma[entry].last_segment = true;
5030 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
5032 tx_q->xdpf[entry] = xdpf;
5040 tx_q->tx_count_frames++;
5042 if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
5048 tx_q->tx_count_frames = 0;
5058 tx_q->cur_tx = entry;
6168 struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
6170 int chan = tx_q->queue_index;
6174 dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
6391 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6397 sysfs_display_ring((void *)tx_q->dma_etx,
6398 priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6399 } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6401 sysfs_display_ring((void *)tx_q->dma_tx,
6402 priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6886 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6908 tx_q->dma_tx_phy, tx_q->queue_index);
6910 if (tx_q->tbs & STMMAC_TBS_AVAIL)
6911 stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6913 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6915 tx_q->tx_tail_addr, tx_q->queue_index);
6964 struct stmmac_tx_queue *tx_q;
7024 tx_q = &priv->dma_conf.tx_queue[chan];
7027 tx_q->dma_tx_phy, chan);
7029 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
7031 tx_q->tx_tail_addr, chan);
7033 hrtimer_setup(&tx_q->txtimer, stmmac_tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7069 struct stmmac_tx_queue *tx_q;
7084 tx_q = &priv->dma_conf.tx_queue[queue];
7087 if (!rx_q->xsk_pool && !tx_q->xsk_pool)
7896 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7898 tx_q->cur_tx = 0;
7899 tx_q->dirty_tx = 0;
7900 tx_q->mss = 0;