1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2024 Furong Xu <0x1207@gmail.com>
4 * stmmac FPE(802.3 Qbu) handling
5 */
6 #include "stmmac.h"
7 #include "stmmac_fpe.h"
8 #include "dwmac4.h"
9 #include "dwmac5.h"
10 #include "dwxgmac2.h"
11
12 #define GMAC5_MAC_FPE_CTRL_STS 0x00000234
13 #define XGMAC_MAC_FPE_CTRL_STS 0x00000280
14
15 #define GMAC5_MTL_FPE_CTRL_STS 0x00000c90
16 #define XGMAC_MTL_FPE_CTRL_STS 0x00001090
17 /* Preemption Classification */
18 #define FPE_MTL_PREEMPTION_CLASS GENMASK(15, 8)
19 /* Additional Fragment Size of preempted frames */
20 #define FPE_MTL_ADD_FRAG_SZ GENMASK(1, 0)
21
22 #define STMMAC_MAC_FPE_CTRL_STS_TRSP BIT(19)
23 #define STMMAC_MAC_FPE_CTRL_STS_TVER BIT(18)
24 #define STMMAC_MAC_FPE_CTRL_STS_RRSP BIT(17)
25 #define STMMAC_MAC_FPE_CTRL_STS_RVER BIT(16)
26 #define STMMAC_MAC_FPE_CTRL_STS_SRSP BIT(2)
27 #define STMMAC_MAC_FPE_CTRL_STS_SVER BIT(1)
28 #define STMMAC_MAC_FPE_CTRL_STS_EFPE BIT(0)
29
30 struct stmmac_fpe_reg {
31 const u32 mac_fpe_reg; /* offset of MAC_FPE_CTRL_STS */
32 const u32 mtl_fpe_reg; /* offset of MTL_FPE_CTRL_STS */
33 const u32 rxq_ctrl1_reg; /* offset of MAC_RxQ_Ctrl1 */
34 const u32 fprq_mask; /* Frame Preemption Residue Queue */
35 const u32 int_en_reg; /* offset of MAC_Interrupt_Enable */
36 const u32 int_en_bit; /* Frame Preemption Interrupt Enable */
37 };
38
stmmac_fpe_supported(struct stmmac_priv * priv)39 bool stmmac_fpe_supported(struct stmmac_priv *priv)
40 {
41 return priv->dma_cap.fpesel && priv->fpe_cfg.reg &&
42 priv->hw->mac->fpe_map_preemption_class;
43 }
44
stmmac_fpe_configure_tx(struct ethtool_mmsv * mmsv,bool tx_enable)45 static void stmmac_fpe_configure_tx(struct ethtool_mmsv *mmsv, bool tx_enable)
46 {
47 struct stmmac_fpe_cfg *cfg = container_of(mmsv, struct stmmac_fpe_cfg, mmsv);
48 struct stmmac_priv *priv = container_of(cfg, struct stmmac_priv, fpe_cfg);
49 const struct stmmac_fpe_reg *reg = cfg->reg;
50 u32 num_rxq = priv->plat->rx_queues_to_use;
51 void __iomem *ioaddr = priv->ioaddr;
52 u32 value;
53
54 if (tx_enable) {
55 cfg->fpe_csr = STMMAC_MAC_FPE_CTRL_STS_EFPE;
56 value = readl(ioaddr + reg->rxq_ctrl1_reg);
57 value &= ~reg->fprq_mask;
58 /* Keep this SHIFT, FIELD_PREP() expects a constant mask :-/ */
59 value |= (num_rxq - 1) << __ffs(reg->fprq_mask);
60 writel(value, ioaddr + reg->rxq_ctrl1_reg);
61 } else {
62 cfg->fpe_csr = 0;
63 }
64 writel(cfg->fpe_csr, ioaddr + reg->mac_fpe_reg);
65 }
66
stmmac_fpe_configure_pmac(struct ethtool_mmsv * mmsv,bool pmac_enable)67 static void stmmac_fpe_configure_pmac(struct ethtool_mmsv *mmsv, bool pmac_enable)
68 {
69 struct stmmac_fpe_cfg *cfg = container_of(mmsv, struct stmmac_fpe_cfg, mmsv);
70 struct stmmac_priv *priv = container_of(cfg, struct stmmac_priv, fpe_cfg);
71 const struct stmmac_fpe_reg *reg = cfg->reg;
72 void __iomem *ioaddr = priv->ioaddr;
73 unsigned long flags;
74 u32 value;
75
76 spin_lock_irqsave(&priv->hw->irq_ctrl_lock, flags);
77 value = readl(ioaddr + reg->int_en_reg);
78
79 if (pmac_enable) {
80 if (!(value & reg->int_en_bit)) {
81 /* Dummy read to clear any pending masked interrupts */
82 readl(ioaddr + reg->mac_fpe_reg);
83
84 value |= reg->int_en_bit;
85 }
86 } else {
87 value &= ~reg->int_en_bit;
88 }
89
90 writel(value, ioaddr + reg->int_en_reg);
91 spin_unlock_irqrestore(&priv->hw->irq_ctrl_lock, flags);
92 }
93
stmmac_fpe_send_mpacket(struct ethtool_mmsv * mmsv,enum ethtool_mpacket type)94 static void stmmac_fpe_send_mpacket(struct ethtool_mmsv *mmsv,
95 enum ethtool_mpacket type)
96 {
97 struct stmmac_fpe_cfg *cfg = container_of(mmsv, struct stmmac_fpe_cfg, mmsv);
98 struct stmmac_priv *priv = container_of(cfg, struct stmmac_priv, fpe_cfg);
99 const struct stmmac_fpe_reg *reg = cfg->reg;
100 void __iomem *ioaddr = priv->ioaddr;
101 u32 value = cfg->fpe_csr;
102
103 if (type == ETHTOOL_MPACKET_VERIFY)
104 value |= STMMAC_MAC_FPE_CTRL_STS_SVER;
105 else if (type == ETHTOOL_MPACKET_RESPONSE)
106 value |= STMMAC_MAC_FPE_CTRL_STS_SRSP;
107
108 writel(value, ioaddr + reg->mac_fpe_reg);
109 }
110
111 static const struct ethtool_mmsv_ops stmmac_mmsv_ops = {
112 .configure_tx = stmmac_fpe_configure_tx,
113 .configure_pmac = stmmac_fpe_configure_pmac,
114 .send_mpacket = stmmac_fpe_send_mpacket,
115 };
116
stmmac_fpe_event_status(struct stmmac_priv * priv,int status)117 static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
118 {
119 struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg;
120 struct ethtool_mmsv *mmsv = &fpe_cfg->mmsv;
121
122 if (status == FPE_EVENT_UNKNOWN)
123 return;
124
125 if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER)
126 ethtool_mmsv_event_handle(mmsv, ETHTOOL_MMSV_LP_SENT_VERIFY_MPACKET);
127
128 if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER)
129 ethtool_mmsv_event_handle(mmsv, ETHTOOL_MMSV_LD_SENT_VERIFY_MPACKET);
130
131 if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
132 ethtool_mmsv_event_handle(mmsv, ETHTOOL_MMSV_LP_SENT_RESPONSE_MPACKET);
133 }
134
stmmac_fpe_irq_status(struct stmmac_priv * priv)135 void stmmac_fpe_irq_status(struct stmmac_priv *priv)
136 {
137 const struct stmmac_fpe_reg *reg = priv->fpe_cfg.reg;
138 void __iomem *ioaddr = priv->ioaddr;
139 struct net_device *dev = priv->dev;
140 int status = FPE_EVENT_UNKNOWN;
141 u32 value;
142
143 /* Reads from the MAC_FPE_CTRL_STS register should only be performed
144 * here, since the status flags of MAC_FPE_CTRL_STS are "clear on read"
145 */
146 value = readl(ioaddr + reg->mac_fpe_reg);
147
148 if (value & STMMAC_MAC_FPE_CTRL_STS_TRSP) {
149 status |= FPE_EVENT_TRSP;
150 netdev_dbg(dev, "FPE: Respond mPacket is transmitted\n");
151 }
152
153 if (value & STMMAC_MAC_FPE_CTRL_STS_TVER) {
154 status |= FPE_EVENT_TVER;
155 netdev_dbg(dev, "FPE: Verify mPacket is transmitted\n");
156 }
157
158 if (value & STMMAC_MAC_FPE_CTRL_STS_RRSP) {
159 status |= FPE_EVENT_RRSP;
160 netdev_dbg(dev, "FPE: Respond mPacket is received\n");
161 }
162
163 if (value & STMMAC_MAC_FPE_CTRL_STS_RVER) {
164 status |= FPE_EVENT_RVER;
165 netdev_dbg(dev, "FPE: Verify mPacket is received\n");
166 }
167
168 stmmac_fpe_event_status(priv, status);
169 }
170
stmmac_fpe_init(struct stmmac_priv * priv)171 void stmmac_fpe_init(struct stmmac_priv *priv)
172 {
173 ethtool_mmsv_init(&priv->fpe_cfg.mmsv, priv->dev,
174 &stmmac_mmsv_ops);
175
176 if ((!priv->fpe_cfg.reg || !priv->hw->mac->fpe_map_preemption_class) &&
177 priv->dma_cap.fpesel)
178 dev_info(priv->device, "FPE is not supported by driver.\n");
179 }
180
stmmac_fpe_get_add_frag_size(struct stmmac_priv * priv)181 int stmmac_fpe_get_add_frag_size(struct stmmac_priv *priv)
182 {
183 const struct stmmac_fpe_reg *reg = priv->fpe_cfg.reg;
184 void __iomem *ioaddr = priv->ioaddr;
185
186 return FIELD_GET(FPE_MTL_ADD_FRAG_SZ, readl(ioaddr + reg->mtl_fpe_reg));
187 }
188
stmmac_fpe_set_add_frag_size(struct stmmac_priv * priv,u32 add_frag_size)189 void stmmac_fpe_set_add_frag_size(struct stmmac_priv *priv, u32 add_frag_size)
190 {
191 const struct stmmac_fpe_reg *reg = priv->fpe_cfg.reg;
192 void __iomem *ioaddr = priv->ioaddr;
193 u32 value;
194
195 value = readl(ioaddr + reg->mtl_fpe_reg);
196 writel(u32_replace_bits(value, add_frag_size, FPE_MTL_ADD_FRAG_SZ),
197 ioaddr + reg->mtl_fpe_reg);
198 }
199
200 #define ALG_ERR_MSG "TX algorithm SP is not suitable for one-to-many mapping"
201 #define WEIGHT_ERR_MSG "TXQ weight %u differs across other TXQs in TC: [%u]"
202
dwmac5_fpe_map_preemption_class(struct net_device * ndev,struct netlink_ext_ack * extack,u32 pclass)203 int dwmac5_fpe_map_preemption_class(struct net_device *ndev,
204 struct netlink_ext_ack *extack, u32 pclass)
205 {
206 u32 val, offset, count, queue_weight, preemptible_txqs = 0;
207 struct stmmac_priv *priv = netdev_priv(ndev);
208 int num_tc = netdev_get_num_tc(ndev);
209
210 if (!pclass)
211 goto update_mapping;
212
213 /* DWMAC CORE4+ can not program TC:TXQ mapping to hardware.
214 *
215 * Synopsys Databook:
216 * "The number of Tx DMA channels is equal to the number of Tx queues,
217 * and is direct one-to-one mapping."
218 */
219 for (u32 tc = 0; tc < num_tc; tc++) {
220 count = ndev->tc_to_txq[tc].count;
221 offset = ndev->tc_to_txq[tc].offset;
222
223 if (pclass & BIT(tc))
224 preemptible_txqs |= GENMASK(offset + count - 1, offset);
225
226 /* This is 1:1 mapping, go to next TC */
227 if (count == 1)
228 continue;
229
230 if (priv->plat->tx_sched_algorithm == MTL_TX_ALGORITHM_SP) {
231 NL_SET_ERR_MSG_MOD(extack, ALG_ERR_MSG);
232 return -EINVAL;
233 }
234
235 queue_weight = priv->plat->tx_queues_cfg[offset].weight;
236
237 for (u32 i = 1; i < count; i++) {
238 if (priv->plat->tx_queues_cfg[offset + i].weight !=
239 queue_weight) {
240 NL_SET_ERR_MSG_FMT_MOD(extack, WEIGHT_ERR_MSG,
241 queue_weight, tc);
242 return -EINVAL;
243 }
244 }
245 }
246
247 update_mapping:
248 val = readl(priv->ioaddr + GMAC5_MTL_FPE_CTRL_STS);
249 writel(u32_replace_bits(val, preemptible_txqs, FPE_MTL_PREEMPTION_CLASS),
250 priv->ioaddr + GMAC5_MTL_FPE_CTRL_STS);
251
252 return 0;
253 }
254
dwxgmac3_fpe_map_preemption_class(struct net_device * ndev,struct netlink_ext_ack * extack,u32 pclass)255 int dwxgmac3_fpe_map_preemption_class(struct net_device *ndev,
256 struct netlink_ext_ack *extack, u32 pclass)
257 {
258 u32 val, offset, count, preemptible_txqs = 0;
259 struct stmmac_priv *priv = netdev_priv(ndev);
260 int num_tc = netdev_get_num_tc(ndev);
261
262 if (!num_tc) {
263 /* Restore default TC:Queue mapping */
264 for (u32 i = 0; i < priv->plat->tx_queues_to_use; i++) {
265 val = readl(priv->ioaddr + XGMAC_MTL_TXQ_OPMODE(i));
266 writel(u32_replace_bits(val, i, XGMAC_Q2TCMAP),
267 priv->ioaddr + XGMAC_MTL_TXQ_OPMODE(i));
268 }
269 }
270
271 /* Synopsys Databook:
272 * "All Queues within a traffic class are selected in a round robin
273 * fashion (when packets are available) when the traffic class is
274 * selected by the scheduler for packet transmission. This is true for
275 * any of the scheduling algorithms."
276 */
277 for (u32 tc = 0; tc < num_tc; tc++) {
278 count = ndev->tc_to_txq[tc].count;
279 offset = ndev->tc_to_txq[tc].offset;
280
281 if (pclass & BIT(tc))
282 preemptible_txqs |= GENMASK(offset + count - 1, offset);
283
284 for (u32 i = 0; i < count; i++) {
285 val = readl(priv->ioaddr + XGMAC_MTL_TXQ_OPMODE(offset + i));
286 writel(u32_replace_bits(val, tc, XGMAC_Q2TCMAP),
287 priv->ioaddr + XGMAC_MTL_TXQ_OPMODE(offset + i));
288 }
289 }
290
291 val = readl(priv->ioaddr + XGMAC_MTL_FPE_CTRL_STS);
292 writel(u32_replace_bits(val, preemptible_txqs, FPE_MTL_PREEMPTION_CLASS),
293 priv->ioaddr + XGMAC_MTL_FPE_CTRL_STS);
294
295 return 0;
296 }
297
298 const struct stmmac_fpe_reg dwmac5_fpe_reg = {
299 .mac_fpe_reg = GMAC5_MAC_FPE_CTRL_STS,
300 .mtl_fpe_reg = GMAC5_MTL_FPE_CTRL_STS,
301 .rxq_ctrl1_reg = GMAC_RXQ_CTRL1,
302 .fprq_mask = GMAC_RXQCTRL_FPRQ,
303 .int_en_reg = GMAC_INT_EN,
304 .int_en_bit = GMAC_INT_FPE_EN,
305 };
306
307 const struct stmmac_fpe_reg dwxgmac3_fpe_reg = {
308 .mac_fpe_reg = XGMAC_MAC_FPE_CTRL_STS,
309 .mtl_fpe_reg = XGMAC_MTL_FPE_CTRL_STS,
310 .rxq_ctrl1_reg = XGMAC_RXQ_CTRL1,
311 .fprq_mask = XGMAC_FPRQ,
312 .int_en_reg = XGMAC_INT_EN,
313 .int_en_bit = XGMAC_FPEIE,
314 };
315