1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2024 Furong Xu <0x1207@gmail.com>
4 * stmmac FPE(802.3 Qbu) handling
5 */
6 #include "stmmac.h"
7 #include "stmmac_fpe.h"
8 #include "dwmac4.h"
9 #include "dwmac5.h"
10 #include "dwxgmac2.h"
11
12 #define GMAC5_MAC_FPE_CTRL_STS 0x00000234
13 #define XGMAC_MAC_FPE_CTRL_STS 0x00000280
14
15 #define GMAC5_MTL_FPE_CTRL_STS 0x00000c90
16 #define XGMAC_MTL_FPE_CTRL_STS 0x00001090
17 /* Preemption Classification */
18 #define FPE_MTL_PREEMPTION_CLASS GENMASK(15, 8)
19 /* Additional Fragment Size of preempted frames */
20 #define FPE_MTL_ADD_FRAG_SZ GENMASK(1, 0)
21
22 #define STMMAC_MAC_FPE_CTRL_STS_TRSP BIT(19)
23 #define STMMAC_MAC_FPE_CTRL_STS_TVER BIT(18)
24 #define STMMAC_MAC_FPE_CTRL_STS_RRSP BIT(17)
25 #define STMMAC_MAC_FPE_CTRL_STS_RVER BIT(16)
26 #define STMMAC_MAC_FPE_CTRL_STS_SRSP BIT(2)
27 #define STMMAC_MAC_FPE_CTRL_STS_SVER BIT(1)
28 #define STMMAC_MAC_FPE_CTRL_STS_EFPE BIT(0)
29
30 struct stmmac_fpe_reg {
31 const u32 mac_fpe_reg; /* offset of MAC_FPE_CTRL_STS */
32 const u32 mtl_fpe_reg; /* offset of MTL_FPE_CTRL_STS */
33 const u32 rxq_ctrl1_reg; /* offset of MAC_RxQ_Ctrl1 */
34 const u32 fprq_mask; /* Frame Preemption Residue Queue */
35 const u32 int_en_reg; /* offset of MAC_Interrupt_Enable */
36 const u32 int_en_bit; /* Frame Preemption Interrupt Enable */
37 };
38
stmmac_fpe_supported(struct stmmac_priv * priv)39 bool stmmac_fpe_supported(struct stmmac_priv *priv)
40 {
41 return priv->dma_cap.fpesel && priv->fpe_cfg.reg &&
42 priv->hw->mac->fpe_map_preemption_class;
43 }
44
stmmac_fpe_configure_tx(struct ethtool_mmsv * mmsv,bool tx_enable)45 static void stmmac_fpe_configure_tx(struct ethtool_mmsv *mmsv, bool tx_enable)
46 {
47 struct stmmac_fpe_cfg *cfg = container_of(mmsv, struct stmmac_fpe_cfg, mmsv);
48 struct stmmac_priv *priv = container_of(cfg, struct stmmac_priv, fpe_cfg);
49 const struct stmmac_fpe_reg *reg = cfg->reg;
50 u32 num_rxq = priv->plat->rx_queues_to_use;
51 void __iomem *ioaddr = priv->ioaddr;
52 u32 value;
53
54 if (tx_enable) {
55 cfg->fpe_csr = STMMAC_MAC_FPE_CTRL_STS_EFPE;
56 value = readl(ioaddr + reg->rxq_ctrl1_reg);
57 value &= ~reg->fprq_mask;
58 /* Keep this SHIFT, FIELD_PREP() expects a constant mask :-/ */
59 value |= (num_rxq - 1) << __ffs(reg->fprq_mask);
60 writel(value, ioaddr + reg->rxq_ctrl1_reg);
61 } else {
62 cfg->fpe_csr = 0;
63 }
64 writel(cfg->fpe_csr, ioaddr + reg->mac_fpe_reg);
65 }
66
stmmac_fpe_configure_pmac(struct ethtool_mmsv * mmsv,bool pmac_enable)67 static void stmmac_fpe_configure_pmac(struct ethtool_mmsv *mmsv, bool pmac_enable)
68 {
69 struct stmmac_fpe_cfg *cfg = container_of(mmsv, struct stmmac_fpe_cfg, mmsv);
70 struct stmmac_priv *priv = container_of(cfg, struct stmmac_priv, fpe_cfg);
71 const struct stmmac_fpe_reg *reg = cfg->reg;
72 void __iomem *ioaddr = priv->ioaddr;
73 u32 value;
74
75 value = readl(ioaddr + reg->int_en_reg);
76
77 if (pmac_enable) {
78 if (!(value & reg->int_en_bit)) {
79 /* Dummy read to clear any pending masked interrupts */
80 readl(ioaddr + reg->mac_fpe_reg);
81
82 value |= reg->int_en_bit;
83 }
84 } else {
85 value &= ~reg->int_en_bit;
86 }
87
88 writel(value, ioaddr + reg->int_en_reg);
89 }
90
stmmac_fpe_send_mpacket(struct ethtool_mmsv * mmsv,enum ethtool_mpacket type)91 static void stmmac_fpe_send_mpacket(struct ethtool_mmsv *mmsv,
92 enum ethtool_mpacket type)
93 {
94 struct stmmac_fpe_cfg *cfg = container_of(mmsv, struct stmmac_fpe_cfg, mmsv);
95 struct stmmac_priv *priv = container_of(cfg, struct stmmac_priv, fpe_cfg);
96 const struct stmmac_fpe_reg *reg = cfg->reg;
97 void __iomem *ioaddr = priv->ioaddr;
98 u32 value = cfg->fpe_csr;
99
100 if (type == ETHTOOL_MPACKET_VERIFY)
101 value |= STMMAC_MAC_FPE_CTRL_STS_SVER;
102 else if (type == ETHTOOL_MPACKET_RESPONSE)
103 value |= STMMAC_MAC_FPE_CTRL_STS_SRSP;
104
105 writel(value, ioaddr + reg->mac_fpe_reg);
106 }
107
108 static const struct ethtool_mmsv_ops stmmac_mmsv_ops = {
109 .configure_tx = stmmac_fpe_configure_tx,
110 .configure_pmac = stmmac_fpe_configure_pmac,
111 .send_mpacket = stmmac_fpe_send_mpacket,
112 };
113
stmmac_fpe_event_status(struct stmmac_priv * priv,int status)114 static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
115 {
116 struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg;
117 struct ethtool_mmsv *mmsv = &fpe_cfg->mmsv;
118
119 if (status == FPE_EVENT_UNKNOWN)
120 return;
121
122 if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER)
123 ethtool_mmsv_event_handle(mmsv, ETHTOOL_MMSV_LP_SENT_VERIFY_MPACKET);
124
125 if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER)
126 ethtool_mmsv_event_handle(mmsv, ETHTOOL_MMSV_LD_SENT_VERIFY_MPACKET);
127
128 if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
129 ethtool_mmsv_event_handle(mmsv, ETHTOOL_MMSV_LP_SENT_RESPONSE_MPACKET);
130 }
131
stmmac_fpe_irq_status(struct stmmac_priv * priv)132 void stmmac_fpe_irq_status(struct stmmac_priv *priv)
133 {
134 const struct stmmac_fpe_reg *reg = priv->fpe_cfg.reg;
135 void __iomem *ioaddr = priv->ioaddr;
136 struct net_device *dev = priv->dev;
137 int status = FPE_EVENT_UNKNOWN;
138 u32 value;
139
140 /* Reads from the MAC_FPE_CTRL_STS register should only be performed
141 * here, since the status flags of MAC_FPE_CTRL_STS are "clear on read"
142 */
143 value = readl(ioaddr + reg->mac_fpe_reg);
144
145 if (value & STMMAC_MAC_FPE_CTRL_STS_TRSP) {
146 status |= FPE_EVENT_TRSP;
147 netdev_dbg(dev, "FPE: Respond mPacket is transmitted\n");
148 }
149
150 if (value & STMMAC_MAC_FPE_CTRL_STS_TVER) {
151 status |= FPE_EVENT_TVER;
152 netdev_dbg(dev, "FPE: Verify mPacket is transmitted\n");
153 }
154
155 if (value & STMMAC_MAC_FPE_CTRL_STS_RRSP) {
156 status |= FPE_EVENT_RRSP;
157 netdev_dbg(dev, "FPE: Respond mPacket is received\n");
158 }
159
160 if (value & STMMAC_MAC_FPE_CTRL_STS_RVER) {
161 status |= FPE_EVENT_RVER;
162 netdev_dbg(dev, "FPE: Verify mPacket is received\n");
163 }
164
165 stmmac_fpe_event_status(priv, status);
166 }
167
stmmac_fpe_init(struct stmmac_priv * priv)168 void stmmac_fpe_init(struct stmmac_priv *priv)
169 {
170 ethtool_mmsv_init(&priv->fpe_cfg.mmsv, priv->dev,
171 &stmmac_mmsv_ops);
172
173 if ((!priv->fpe_cfg.reg || !priv->hw->mac->fpe_map_preemption_class) &&
174 priv->dma_cap.fpesel)
175 dev_info(priv->device, "FPE is not supported by driver.\n");
176 }
177
stmmac_fpe_get_add_frag_size(struct stmmac_priv * priv)178 int stmmac_fpe_get_add_frag_size(struct stmmac_priv *priv)
179 {
180 const struct stmmac_fpe_reg *reg = priv->fpe_cfg.reg;
181 void __iomem *ioaddr = priv->ioaddr;
182
183 return FIELD_GET(FPE_MTL_ADD_FRAG_SZ, readl(ioaddr + reg->mtl_fpe_reg));
184 }
185
stmmac_fpe_set_add_frag_size(struct stmmac_priv * priv,u32 add_frag_size)186 void stmmac_fpe_set_add_frag_size(struct stmmac_priv *priv, u32 add_frag_size)
187 {
188 const struct stmmac_fpe_reg *reg = priv->fpe_cfg.reg;
189 void __iomem *ioaddr = priv->ioaddr;
190 u32 value;
191
192 value = readl(ioaddr + reg->mtl_fpe_reg);
193 writel(u32_replace_bits(value, add_frag_size, FPE_MTL_ADD_FRAG_SZ),
194 ioaddr + reg->mtl_fpe_reg);
195 }
196
197 #define ALG_ERR_MSG "TX algorithm SP is not suitable for one-to-many mapping"
198 #define WEIGHT_ERR_MSG "TXQ weight %u differs across other TXQs in TC: [%u]"
199
dwmac5_fpe_map_preemption_class(struct net_device * ndev,struct netlink_ext_ack * extack,u32 pclass)200 int dwmac5_fpe_map_preemption_class(struct net_device *ndev,
201 struct netlink_ext_ack *extack, u32 pclass)
202 {
203 u32 val, offset, count, queue_weight, preemptible_txqs = 0;
204 struct stmmac_priv *priv = netdev_priv(ndev);
205 int num_tc = netdev_get_num_tc(ndev);
206
207 if (!pclass)
208 goto update_mapping;
209
210 /* DWMAC CORE4+ can not program TC:TXQ mapping to hardware.
211 *
212 * Synopsys Databook:
213 * "The number of Tx DMA channels is equal to the number of Tx queues,
214 * and is direct one-to-one mapping."
215 */
216 for (u32 tc = 0; tc < num_tc; tc++) {
217 count = ndev->tc_to_txq[tc].count;
218 offset = ndev->tc_to_txq[tc].offset;
219
220 if (pclass & BIT(tc))
221 preemptible_txqs |= GENMASK(offset + count - 1, offset);
222
223 /* This is 1:1 mapping, go to next TC */
224 if (count == 1)
225 continue;
226
227 if (priv->plat->tx_sched_algorithm == MTL_TX_ALGORITHM_SP) {
228 NL_SET_ERR_MSG_MOD(extack, ALG_ERR_MSG);
229 return -EINVAL;
230 }
231
232 queue_weight = priv->plat->tx_queues_cfg[offset].weight;
233
234 for (u32 i = 1; i < count; i++) {
235 if (priv->plat->tx_queues_cfg[offset + i].weight !=
236 queue_weight) {
237 NL_SET_ERR_MSG_FMT_MOD(extack, WEIGHT_ERR_MSG,
238 queue_weight, tc);
239 return -EINVAL;
240 }
241 }
242 }
243
244 update_mapping:
245 val = readl(priv->ioaddr + GMAC5_MTL_FPE_CTRL_STS);
246 writel(u32_replace_bits(val, preemptible_txqs, FPE_MTL_PREEMPTION_CLASS),
247 priv->ioaddr + GMAC5_MTL_FPE_CTRL_STS);
248
249 return 0;
250 }
251
dwxgmac3_fpe_map_preemption_class(struct net_device * ndev,struct netlink_ext_ack * extack,u32 pclass)252 int dwxgmac3_fpe_map_preemption_class(struct net_device *ndev,
253 struct netlink_ext_ack *extack, u32 pclass)
254 {
255 u32 val, offset, count, preemptible_txqs = 0;
256 struct stmmac_priv *priv = netdev_priv(ndev);
257 int num_tc = netdev_get_num_tc(ndev);
258
259 if (!num_tc) {
260 /* Restore default TC:Queue mapping */
261 for (u32 i = 0; i < priv->plat->tx_queues_to_use; i++) {
262 val = readl(priv->ioaddr + XGMAC_MTL_TXQ_OPMODE(i));
263 writel(u32_replace_bits(val, i, XGMAC_Q2TCMAP),
264 priv->ioaddr + XGMAC_MTL_TXQ_OPMODE(i));
265 }
266 }
267
268 /* Synopsys Databook:
269 * "All Queues within a traffic class are selected in a round robin
270 * fashion (when packets are available) when the traffic class is
271 * selected by the scheduler for packet transmission. This is true for
272 * any of the scheduling algorithms."
273 */
274 for (u32 tc = 0; tc < num_tc; tc++) {
275 count = ndev->tc_to_txq[tc].count;
276 offset = ndev->tc_to_txq[tc].offset;
277
278 if (pclass & BIT(tc))
279 preemptible_txqs |= GENMASK(offset + count - 1, offset);
280
281 for (u32 i = 0; i < count; i++) {
282 val = readl(priv->ioaddr + XGMAC_MTL_TXQ_OPMODE(offset + i));
283 writel(u32_replace_bits(val, tc, XGMAC_Q2TCMAP),
284 priv->ioaddr + XGMAC_MTL_TXQ_OPMODE(offset + i));
285 }
286 }
287
288 val = readl(priv->ioaddr + XGMAC_MTL_FPE_CTRL_STS);
289 writel(u32_replace_bits(val, preemptible_txqs, FPE_MTL_PREEMPTION_CLASS),
290 priv->ioaddr + XGMAC_MTL_FPE_CTRL_STS);
291
292 return 0;
293 }
294
295 const struct stmmac_fpe_reg dwmac5_fpe_reg = {
296 .mac_fpe_reg = GMAC5_MAC_FPE_CTRL_STS,
297 .mtl_fpe_reg = GMAC5_MTL_FPE_CTRL_STS,
298 .rxq_ctrl1_reg = GMAC_RXQ_CTRL1,
299 .fprq_mask = GMAC_RXQCTRL_FPRQ,
300 .int_en_reg = GMAC_INT_EN,
301 .int_en_bit = GMAC_INT_FPE_EN,
302 };
303
304 const struct stmmac_fpe_reg dwxgmac3_fpe_reg = {
305 .mac_fpe_reg = XGMAC_MAC_FPE_CTRL_STS,
306 .mtl_fpe_reg = XGMAC_MTL_FPE_CTRL_STS,
307 .rxq_ctrl1_reg = XGMAC_RXQ_CTRL1,
308 .fprq_mask = XGMAC_FPRQ,
309 .int_en_reg = XGMAC_INT_EN,
310 .int_en_bit = XGMAC_FPEIE,
311 };
312