1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3 Specialised functions for managing Chained mode
4
5 Copyright(C) 2011 STMicroelectronics Ltd
6
7 It defines all the functions used to handle the normal/enhanced
8 descriptors in case of the DMA is configured to work in chained or
9 in ring mode.
10
11
12 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
13 *******************************************************************************/
14
15 #include "stmmac.h"
16
jumbo_frm(struct stmmac_tx_queue * tx_q,struct sk_buff * skb,int csum)17 static int jumbo_frm(struct stmmac_tx_queue *tx_q, struct sk_buff *skb,
18 int csum)
19 {
20 unsigned int nopaged_len = skb_headlen(skb);
21 struct stmmac_priv *priv = tx_q->priv_data;
22 unsigned int entry = tx_q->cur_tx;
23 unsigned int bmax, buf_len, des2;
24 unsigned int i = 1, len;
25 struct dma_desc *desc;
26
27 desc = tx_q->dma_tx + entry;
28
29 if (priv->plat->enh_desc)
30 bmax = BUF_SIZE_8KiB;
31 else
32 bmax = BUF_SIZE_2KiB;
33
34 buf_len = min_t(unsigned int, nopaged_len, bmax);
35 len = nopaged_len - buf_len;
36
37 des2 = dma_map_single(priv->device, skb->data,
38 buf_len, DMA_TO_DEVICE);
39 desc->des2 = cpu_to_le32(des2);
40 if (dma_mapping_error(priv->device, des2))
41 return -1;
42 tx_q->tx_skbuff_dma[entry].buf = des2;
43 tx_q->tx_skbuff_dma[entry].len = buf_len;
44 /* do not close the descriptor and do not set own bit */
45 stmmac_prepare_tx_desc(priv, desc, 1, buf_len, csum, STMMAC_CHAIN_MODE,
46 0, false, skb->len);
47
48 while (len != 0) {
49 tx_q->tx_skbuff[entry] = NULL;
50 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
51 desc = tx_q->dma_tx + entry;
52
53 if (len > bmax) {
54 des2 = dma_map_single(priv->device,
55 (skb->data + bmax * i),
56 bmax, DMA_TO_DEVICE);
57 desc->des2 = cpu_to_le32(des2);
58 if (dma_mapping_error(priv->device, des2))
59 return -1;
60 tx_q->tx_skbuff_dma[entry].buf = des2;
61 tx_q->tx_skbuff_dma[entry].len = bmax;
62 stmmac_prepare_tx_desc(priv, desc, 0, bmax, csum,
63 STMMAC_CHAIN_MODE, 1, false, skb->len);
64 len -= bmax;
65 i++;
66 } else {
67 des2 = dma_map_single(priv->device,
68 (skb->data + bmax * i), len,
69 DMA_TO_DEVICE);
70 desc->des2 = cpu_to_le32(des2);
71 if (dma_mapping_error(priv->device, des2))
72 return -1;
73 tx_q->tx_skbuff_dma[entry].buf = des2;
74 tx_q->tx_skbuff_dma[entry].len = len;
75 /* last descriptor can be set now */
76 stmmac_prepare_tx_desc(priv, desc, 0, len, csum,
77 STMMAC_CHAIN_MODE, 1, true, skb->len);
78 len = 0;
79 }
80 }
81
82 tx_q->cur_tx = entry;
83
84 return entry;
85 }
86
is_jumbo_frm(unsigned int len,bool enh_desc)87 static bool is_jumbo_frm(unsigned int len, bool enh_desc)
88 {
89 bool ret = false;
90
91 if ((enh_desc && (len > BUF_SIZE_8KiB)) ||
92 (!enh_desc && (len > BUF_SIZE_2KiB)))
93 ret = true;
94
95 return ret;
96 }
97
init_dma_chain(void * des,dma_addr_t phy_addr,unsigned int size,unsigned int extend_desc)98 static void init_dma_chain(void *des, dma_addr_t phy_addr,
99 unsigned int size, unsigned int extend_desc)
100 {
101 /*
102 * In chained mode the des3 points to the next element in the ring.
103 * The latest element has to point to the head.
104 */
105 int i;
106 dma_addr_t dma_phy = phy_addr;
107
108 if (extend_desc) {
109 struct dma_extended_desc *p = (struct dma_extended_desc *)des;
110 for (i = 0; i < (size - 1); i++) {
111 dma_phy += sizeof(struct dma_extended_desc);
112 p->basic.des3 = cpu_to_le32((unsigned int)dma_phy);
113 p++;
114 }
115 p->basic.des3 = cpu_to_le32((unsigned int)phy_addr);
116
117 } else {
118 struct dma_desc *p = (struct dma_desc *)des;
119 for (i = 0; i < (size - 1); i++) {
120 dma_phy += sizeof(struct dma_desc);
121 p->des3 = cpu_to_le32((unsigned int)dma_phy);
122 p++;
123 }
124 p->des3 = cpu_to_le32((unsigned int)phy_addr);
125 }
126 }
127
refill_desc3(struct stmmac_rx_queue * rx_q,struct dma_desc * p)128 static void refill_desc3(struct stmmac_rx_queue *rx_q, struct dma_desc *p)
129 {
130 struct stmmac_priv *priv = rx_q->priv_data;
131
132 if (priv->hwts_rx_en && !priv->extend_desc)
133 /* NOTE: Device will overwrite des3 with timestamp value if
134 * 1588-2002 time stamping is enabled, hence reinitialize it
135 * to keep explicit chaining in the descriptor.
136 */
137 p->des3 = cpu_to_le32((unsigned int)(rx_q->dma_rx_phy +
138 (((rx_q->dirty_rx) + 1) %
139 priv->dma_conf.dma_rx_size) *
140 sizeof(struct dma_desc)));
141 }
142
clean_desc3(struct stmmac_tx_queue * tx_q,struct dma_desc * p)143 static void clean_desc3(struct stmmac_tx_queue *tx_q, struct dma_desc *p)
144 {
145 struct stmmac_priv *priv = tx_q->priv_data;
146 unsigned int entry = tx_q->dirty_tx;
147
148 if (tx_q->tx_skbuff_dma[entry].last_segment && !priv->extend_desc &&
149 priv->hwts_tx_en)
150 /* NOTE: Device will overwrite des3 with timestamp value if
151 * 1588-2002 time stamping is enabled, hence reinitialize it
152 * to keep explicit chaining in the descriptor.
153 */
154 p->des3 = cpu_to_le32((unsigned int)((tx_q->dma_tx_phy +
155 ((tx_q->dirty_tx + 1) %
156 priv->dma_conf.dma_tx_size))
157 * sizeof(struct dma_desc)));
158 }
159
160 const struct stmmac_mode_ops chain_mode_ops = {
161 .init = init_dma_chain,
162 .is_jumbo_frm = is_jumbo_frm,
163 .jumbo_frm = jumbo_frm,
164 .refill_desc3 = refill_desc3,
165 .clean_desc3 = clean_desc3,
166 };
167