xref: /linux/drivers/net/ethernet/ti/cpsw_priv.c (revision 91a4855d6c03e770e42f17c798a36a3c46e63de2)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Texas Instruments Ethernet Switch Driver
4  *
5  * Copyright (C) 2019 Texas Instruments
6  */
7 
8 #include <linux/bpf.h>
9 #include <linux/bpf_trace.h>
10 #include <linux/if_ether.h>
11 #include <linux/if_vlan.h>
12 #include <linux/kmemleak.h>
13 #include <linux/module.h>
14 #include <linux/netdevice.h>
15 #include <linux/net_tstamp.h>
16 #include <linux/of.h>
17 #include <linux/phy.h>
18 #include <linux/platform_device.h>
19 #include <linux/pm_runtime.h>
20 #include <linux/skbuff.h>
21 #include <net/page_pool/helpers.h>
22 #include <net/pkt_cls.h>
23 #include <net/pkt_sched.h>
24 
25 #include "cpsw.h"
26 #include "cpts.h"
27 #include "cpsw_ale.h"
28 #include "cpsw_priv.h"
29 #include "cpsw_sl.h"
30 #include "davinci_cpdma.h"
31 
32 #define CPTS_N_ETX_TS 4
33 
34 int (*cpsw_slave_index)(struct cpsw_common *cpsw, struct cpsw_priv *priv);
35 EXPORT_SYMBOL_GPL(cpsw_slave_index);
36 
37 void cpsw_intr_enable(struct cpsw_common *cpsw)
38 {
39 	writel_relaxed(0xFF, &cpsw->wr_regs->tx_en);
40 	writel_relaxed(0xFF, &cpsw->wr_regs->rx_en);
41 
42 	cpdma_ctlr_int_ctrl(cpsw->dma, true);
43 }
44 EXPORT_SYMBOL_GPL(cpsw_intr_enable);
45 
46 void cpsw_intr_disable(struct cpsw_common *cpsw)
47 {
48 	writel_relaxed(0, &cpsw->wr_regs->tx_en);
49 	writel_relaxed(0, &cpsw->wr_regs->rx_en);
50 
51 	cpdma_ctlr_int_ctrl(cpsw->dma, false);
52 }
53 EXPORT_SYMBOL_GPL(cpsw_intr_disable);
54 
55 void cpsw_tx_handler(void *token, int len, int status)
56 {
57 	struct cpsw_meta_xdp	*xmeta;
58 	struct xdp_frame	*xdpf;
59 	struct net_device	*ndev;
60 	struct netdev_queue	*txq;
61 	struct sk_buff		*skb;
62 	int			ch;
63 
64 	if (cpsw_is_xdpf_handle(token)) {
65 		xdpf = cpsw_handle_to_xdpf(token);
66 		xmeta = (void *)xdpf + CPSW_XMETA_OFFSET;
67 		ndev = xmeta->ndev;
68 		ch = xmeta->ch;
69 		xdp_return_frame(xdpf);
70 	} else {
71 		skb = token;
72 		ndev = skb->dev;
73 		ch = skb_get_queue_mapping(skb);
74 		cpts_tx_timestamp(ndev_to_cpsw(ndev)->cpts, skb);
75 		dev_kfree_skb_any(skb);
76 	}
77 
78 	/* Check whether the queue is stopped due to stalled tx dma, if the
79 	 * queue is stopped then start the queue as we have free desc for tx
80 	 */
81 	txq = netdev_get_tx_queue(ndev, ch);
82 	if (unlikely(netif_tx_queue_stopped(txq)))
83 		netif_tx_wake_queue(txq);
84 
85 	ndev->stats.tx_packets++;
86 	ndev->stats.tx_bytes += len;
87 }
88 EXPORT_SYMBOL_GPL(cpsw_tx_handler);
89 
90 irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id)
91 {
92 	struct cpsw_common *cpsw = dev_id;
93 
94 	writel(0, &cpsw->wr_regs->tx_en);
95 	cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_TX);
96 
97 	if (cpsw->quirk_irq) {
98 		disable_irq_nosync(cpsw->irqs_table[1]);
99 		cpsw->tx_irq_disabled = true;
100 	}
101 
102 	napi_schedule(&cpsw->napi_tx);
103 	return IRQ_HANDLED;
104 }
105 EXPORT_SYMBOL_GPL(cpsw_tx_interrupt);
106 
107 irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
108 {
109 	struct cpsw_common *cpsw = dev_id;
110 
111 	writel(0, &cpsw->wr_regs->rx_en);
112 	cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_RX);
113 
114 	if (cpsw->quirk_irq) {
115 		disable_irq_nosync(cpsw->irqs_table[0]);
116 		cpsw->rx_irq_disabled = true;
117 	}
118 
119 	napi_schedule(&cpsw->napi_rx);
120 	return IRQ_HANDLED;
121 }
122 EXPORT_SYMBOL_GPL(cpsw_rx_interrupt);
123 
124 irqreturn_t cpsw_misc_interrupt(int irq, void *dev_id)
125 {
126 	struct cpsw_common *cpsw = dev_id;
127 
128 	writel(0, &cpsw->wr_regs->misc_en);
129 	cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_MISC);
130 	cpts_misc_interrupt(cpsw->cpts);
131 	writel(0x10, &cpsw->wr_regs->misc_en);
132 
133 	return IRQ_HANDLED;
134 }
135 EXPORT_SYMBOL_GPL(cpsw_misc_interrupt);
136 
137 int cpsw_tx_mq_poll(struct napi_struct *napi_tx, int budget)
138 {
139 	struct cpsw_common	*cpsw = napi_to_cpsw(napi_tx);
140 	int			num_tx, cur_budget, ch;
141 	u32			ch_map;
142 	struct cpsw_vector	*txv;
143 
144 	/* process every unprocessed channel */
145 	ch_map = cpdma_ctrl_txchs_state(cpsw->dma);
146 	for (ch = 0, num_tx = 0; ch_map & 0xff; ch_map <<= 1, ch++) {
147 		if (!(ch_map & 0x80))
148 			continue;
149 
150 		txv = &cpsw->txv[ch];
151 		if (unlikely(txv->budget > budget - num_tx))
152 			cur_budget = budget - num_tx;
153 		else
154 			cur_budget = txv->budget;
155 
156 		num_tx += cpdma_chan_process(txv->ch, cur_budget);
157 		if (num_tx >= budget)
158 			break;
159 	}
160 
161 	if (num_tx < budget) {
162 		napi_complete(napi_tx);
163 		writel(0xff, &cpsw->wr_regs->tx_en);
164 	}
165 
166 	return num_tx;
167 }
168 EXPORT_SYMBOL_GPL(cpsw_tx_mq_poll);
169 
170 int cpsw_tx_poll(struct napi_struct *napi_tx, int budget)
171 {
172 	struct cpsw_common *cpsw = napi_to_cpsw(napi_tx);
173 	int num_tx;
174 
175 	num_tx = cpdma_chan_process(cpsw->txv[0].ch, budget);
176 	if (num_tx < budget) {
177 		napi_complete(napi_tx);
178 		writel(0xff, &cpsw->wr_regs->tx_en);
179 		if (cpsw->tx_irq_disabled) {
180 			cpsw->tx_irq_disabled = false;
181 			enable_irq(cpsw->irqs_table[1]);
182 		}
183 	}
184 
185 	return num_tx;
186 }
187 EXPORT_SYMBOL_GPL(cpsw_tx_poll);
188 
189 int cpsw_rx_mq_poll(struct napi_struct *napi_rx, int budget)
190 {
191 	struct cpsw_common	*cpsw = napi_to_cpsw(napi_rx);
192 	int			num_rx, cur_budget, ch;
193 	u32			ch_map;
194 	struct cpsw_vector	*rxv;
195 
196 	/* process every unprocessed channel */
197 	ch_map = cpdma_ctrl_rxchs_state(cpsw->dma);
198 	for (ch = 0, num_rx = 0; ch_map; ch_map >>= 1, ch++) {
199 		if (!(ch_map & 0x01))
200 			continue;
201 
202 		rxv = &cpsw->rxv[ch];
203 		if (unlikely(rxv->budget > budget - num_rx))
204 			cur_budget = budget - num_rx;
205 		else
206 			cur_budget = rxv->budget;
207 
208 		num_rx += cpdma_chan_process(rxv->ch, cur_budget);
209 		if (num_rx >= budget)
210 			break;
211 	}
212 
213 	if (num_rx < budget) {
214 		napi_complete_done(napi_rx, num_rx);
215 		writel(0xff, &cpsw->wr_regs->rx_en);
216 	}
217 
218 	return num_rx;
219 }
220 EXPORT_SYMBOL_GPL(cpsw_rx_mq_poll);
221 
222 int cpsw_rx_poll(struct napi_struct *napi_rx, int budget)
223 {
224 	struct cpsw_common *cpsw = napi_to_cpsw(napi_rx);
225 	int num_rx;
226 
227 	num_rx = cpdma_chan_process(cpsw->rxv[0].ch, budget);
228 	if (num_rx < budget) {
229 		napi_complete_done(napi_rx, num_rx);
230 		writel(0xff, &cpsw->wr_regs->rx_en);
231 		if (cpsw->rx_irq_disabled) {
232 			cpsw->rx_irq_disabled = false;
233 			enable_irq(cpsw->irqs_table[0]);
234 		}
235 	}
236 
237 	return num_rx;
238 }
239 EXPORT_SYMBOL_GPL(cpsw_rx_poll);
240 
241 void cpsw_rx_vlan_encap(struct sk_buff *skb)
242 {
243 	struct cpsw_priv *priv = netdev_priv(skb->dev);
244 	u32 rx_vlan_encap_hdr = *((u32 *)skb->data);
245 	struct cpsw_common *cpsw = priv->cpsw;
246 	u16 vtag, vid, prio, pkt_type;
247 
248 	/* Remove VLAN header encapsulation word */
249 	skb_pull(skb, CPSW_RX_VLAN_ENCAP_HDR_SIZE);
250 
251 	pkt_type = (rx_vlan_encap_hdr >>
252 		    CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_SHIFT) &
253 		    CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_MSK;
254 	/* Ignore unknown & Priority-tagged packets*/
255 	if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_RESERV ||
256 	    pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_PRIO_TAG)
257 		return;
258 
259 	vid = (rx_vlan_encap_hdr >>
260 	       CPSW_RX_VLAN_ENCAP_HDR_VID_SHIFT) &
261 	       VLAN_VID_MASK;
262 	/* Ignore vid 0 and pass packet as is */
263 	if (!vid)
264 		return;
265 
266 	/* Untag P0 packets if set for vlan */
267 	if (!cpsw_ale_get_vlan_p0_untag(cpsw->ale, vid)) {
268 		prio = (rx_vlan_encap_hdr >>
269 			CPSW_RX_VLAN_ENCAP_HDR_PRIO_SHIFT) &
270 			CPSW_RX_VLAN_ENCAP_HDR_PRIO_MSK;
271 
272 		vtag = (prio << VLAN_PRIO_SHIFT) | vid;
273 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
274 	}
275 
276 	/* strip vlan tag for VLAN-tagged packet */
277 	if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_VLAN_TAG) {
278 		memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
279 		skb_pull(skb, VLAN_HLEN);
280 	}
281 }
282 EXPORT_SYMBOL_GPL(cpsw_rx_vlan_encap);
283 
284 void cpsw_set_slave_mac(struct cpsw_slave *slave, struct cpsw_priv *priv)
285 {
286 	slave_write(slave, mac_hi(priv->mac_addr), SA_HI);
287 	slave_write(slave, mac_lo(priv->mac_addr), SA_LO);
288 }
289 EXPORT_SYMBOL_GPL(cpsw_set_slave_mac);
290 
291 void cpsw_soft_reset(const char *module, void __iomem *reg)
292 {
293 	unsigned long timeout = jiffies + HZ;
294 
295 	writel_relaxed(1, reg);
296 	do {
297 		cpu_relax();
298 	} while ((readl_relaxed(reg) & 1) && time_after(timeout, jiffies));
299 
300 	WARN(readl_relaxed(reg) & 1, "failed to soft-reset %s\n", module);
301 }
302 EXPORT_SYMBOL_GPL(cpsw_soft_reset);
303 
304 void cpsw_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue)
305 {
306 	struct cpsw_priv *priv = netdev_priv(ndev);
307 	struct cpsw_common *cpsw = priv->cpsw;
308 	int ch;
309 
310 	cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n");
311 	ndev->stats.tx_errors++;
312 	cpsw_intr_disable(cpsw);
313 	for (ch = 0; ch < cpsw->tx_ch_num; ch++) {
314 		cpdma_chan_stop(cpsw->txv[ch].ch);
315 		cpdma_chan_start(cpsw->txv[ch].ch);
316 	}
317 
318 	cpsw_intr_enable(cpsw);
319 	netif_trans_update(ndev);
320 	netif_tx_wake_all_queues(ndev);
321 }
322 EXPORT_SYMBOL_GPL(cpsw_ndo_tx_timeout);
323 
324 static int cpsw_get_common_speed(struct cpsw_common *cpsw)
325 {
326 	int i, speed;
327 
328 	for (i = 0, speed = 0; i < cpsw->data.slaves; i++)
329 		if (cpsw->slaves[i].phy && cpsw->slaves[i].phy->link)
330 			speed += cpsw->slaves[i].phy->speed;
331 
332 	return speed;
333 }
334 
335 int cpsw_need_resplit(struct cpsw_common *cpsw)
336 {
337 	int i, rlim_ch_num;
338 	int speed, ch_rate;
339 
340 	/* re-split resources only in case speed was changed */
341 	speed = cpsw_get_common_speed(cpsw);
342 	if (speed == cpsw->speed || !speed)
343 		return 0;
344 
345 	cpsw->speed = speed;
346 
347 	for (i = 0, rlim_ch_num = 0; i < cpsw->tx_ch_num; i++) {
348 		ch_rate = cpdma_chan_get_rate(cpsw->txv[i].ch);
349 		if (!ch_rate)
350 			break;
351 
352 		rlim_ch_num++;
353 	}
354 
355 	/* cases not dependent on speed */
356 	if (!rlim_ch_num || rlim_ch_num == cpsw->tx_ch_num)
357 		return 0;
358 
359 	return 1;
360 }
361 EXPORT_SYMBOL_GPL(cpsw_need_resplit);
362 
363 void cpsw_split_res(struct cpsw_common *cpsw)
364 {
365 	u32 consumed_rate = 0, bigest_rate = 0;
366 	struct cpsw_vector *txv = cpsw->txv;
367 	int i, ch_weight, rlim_ch_num = 0;
368 	int budget, bigest_rate_ch = 0;
369 	u32 ch_rate, max_rate;
370 	int ch_budget = 0;
371 
372 	for (i = 0; i < cpsw->tx_ch_num; i++) {
373 		ch_rate = cpdma_chan_get_rate(txv[i].ch);
374 		if (!ch_rate)
375 			continue;
376 
377 		rlim_ch_num++;
378 		consumed_rate += ch_rate;
379 	}
380 
381 	if (cpsw->tx_ch_num == rlim_ch_num) {
382 		max_rate = consumed_rate;
383 	} else if (!rlim_ch_num) {
384 		ch_budget = NAPI_POLL_WEIGHT / cpsw->tx_ch_num;
385 		bigest_rate = 0;
386 		max_rate = consumed_rate;
387 	} else {
388 		max_rate = cpsw->speed * 1000;
389 
390 		/* if max_rate is less then expected due to reduced link speed,
391 		 * split proportionally according next potential max speed
392 		 */
393 		if (max_rate < consumed_rate)
394 			max_rate *= 10;
395 
396 		if (max_rate < consumed_rate)
397 			max_rate *= 10;
398 
399 		ch_budget = (consumed_rate * NAPI_POLL_WEIGHT) / max_rate;
400 		ch_budget = (NAPI_POLL_WEIGHT - ch_budget) /
401 			    (cpsw->tx_ch_num - rlim_ch_num);
402 		bigest_rate = (max_rate - consumed_rate) /
403 			      (cpsw->tx_ch_num - rlim_ch_num);
404 	}
405 
406 	/* split tx weight/budget */
407 	budget = NAPI_POLL_WEIGHT;
408 	for (i = 0; i < cpsw->tx_ch_num; i++) {
409 		ch_rate = cpdma_chan_get_rate(txv[i].ch);
410 		if (ch_rate) {
411 			txv[i].budget = (ch_rate * NAPI_POLL_WEIGHT) / max_rate;
412 			if (!txv[i].budget)
413 				txv[i].budget++;
414 			if (ch_rate > bigest_rate) {
415 				bigest_rate_ch = i;
416 				bigest_rate = ch_rate;
417 			}
418 
419 			ch_weight = (ch_rate * 100) / max_rate;
420 			if (!ch_weight)
421 				ch_weight++;
422 			cpdma_chan_set_weight(cpsw->txv[i].ch, ch_weight);
423 		} else {
424 			txv[i].budget = ch_budget;
425 			if (!bigest_rate_ch)
426 				bigest_rate_ch = i;
427 			cpdma_chan_set_weight(cpsw->txv[i].ch, 0);
428 		}
429 
430 		budget -= txv[i].budget;
431 	}
432 
433 	if (budget)
434 		txv[bigest_rate_ch].budget += budget;
435 
436 	/* split rx budget */
437 	budget = NAPI_POLL_WEIGHT;
438 	ch_budget = budget / cpsw->rx_ch_num;
439 	for (i = 0; i < cpsw->rx_ch_num; i++) {
440 		cpsw->rxv[i].budget = ch_budget;
441 		budget -= ch_budget;
442 	}
443 
444 	if (budget)
445 		cpsw->rxv[0].budget += budget;
446 }
447 EXPORT_SYMBOL_GPL(cpsw_split_res);
448 
449 int cpsw_init_common(struct cpsw_common *cpsw, void __iomem *ss_regs,
450 		     int ale_ageout, phys_addr_t desc_mem_phys,
451 		     int descs_pool_size)
452 {
453 	u32 slave_offset, sliver_offset, slave_size;
454 	struct cpsw_ale_params ale_params;
455 	struct cpsw_platform_data *data;
456 	struct cpdma_params dma_params;
457 	struct device *dev = cpsw->dev;
458 	struct device_node *cpts_node;
459 	void __iomem *cpts_regs;
460 	int ret = 0, i;
461 
462 	data = &cpsw->data;
463 	cpsw->rx_ch_num = 1;
464 	cpsw->tx_ch_num = 1;
465 
466 	cpsw->version = readl(&cpsw->regs->id_ver);
467 
468 	memset(&dma_params, 0, sizeof(dma_params));
469 	memset(&ale_params, 0, sizeof(ale_params));
470 
471 	switch (cpsw->version) {
472 	case CPSW_VERSION_1:
473 		cpsw->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET;
474 		cpts_regs	     = ss_regs + CPSW1_CPTS_OFFSET;
475 		cpsw->hw_stats	     = ss_regs + CPSW1_HW_STATS;
476 		dma_params.dmaregs   = ss_regs + CPSW1_CPDMA_OFFSET;
477 		dma_params.txhdp     = ss_regs + CPSW1_STATERAM_OFFSET;
478 		ale_params.ale_regs  = ss_regs + CPSW1_ALE_OFFSET;
479 		slave_offset         = CPSW1_SLAVE_OFFSET;
480 		slave_size           = CPSW1_SLAVE_SIZE;
481 		sliver_offset        = CPSW1_SLIVER_OFFSET;
482 		dma_params.desc_mem_phys = 0;
483 		break;
484 	case CPSW_VERSION_2:
485 	case CPSW_VERSION_3:
486 	case CPSW_VERSION_4:
487 		cpsw->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET;
488 		cpts_regs	     = ss_regs + CPSW2_CPTS_OFFSET;
489 		cpsw->hw_stats	     = ss_regs + CPSW2_HW_STATS;
490 		dma_params.dmaregs   = ss_regs + CPSW2_CPDMA_OFFSET;
491 		dma_params.txhdp     = ss_regs + CPSW2_STATERAM_OFFSET;
492 		ale_params.ale_regs  = ss_regs + CPSW2_ALE_OFFSET;
493 		slave_offset         = CPSW2_SLAVE_OFFSET;
494 		slave_size           = CPSW2_SLAVE_SIZE;
495 		sliver_offset        = CPSW2_SLIVER_OFFSET;
496 		dma_params.desc_mem_phys = desc_mem_phys;
497 		break;
498 	default:
499 		dev_err(dev, "unknown version 0x%08x\n", cpsw->version);
500 		return -ENODEV;
501 	}
502 
503 	for (i = 0; i < cpsw->data.slaves; i++) {
504 		struct cpsw_slave *slave = &cpsw->slaves[i];
505 		void __iomem		*regs = cpsw->regs;
506 
507 		slave->slave_num = i;
508 		slave->data	= &cpsw->data.slave_data[i];
509 		slave->regs	= regs + slave_offset;
510 		slave->port_vlan = slave->data->dual_emac_res_vlan;
511 		slave->mac_sl = cpsw_sl_get("cpsw", dev, regs + sliver_offset);
512 		if (IS_ERR(slave->mac_sl))
513 			return PTR_ERR(slave->mac_sl);
514 
515 		slave_offset  += slave_size;
516 		sliver_offset += SLIVER_SIZE;
517 	}
518 
519 	ale_params.dev			= dev;
520 	ale_params.ale_ageout		= ale_ageout;
521 	ale_params.ale_ports		= CPSW_ALE_PORTS_NUM;
522 	ale_params.dev_id		= "cpsw";
523 	ale_params.bus_freq		= cpsw->bus_freq_mhz * 1000000;
524 
525 	cpsw->ale = cpsw_ale_create(&ale_params);
526 	if (IS_ERR(cpsw->ale)) {
527 		dev_err(dev, "error initializing ale engine\n");
528 		return PTR_ERR(cpsw->ale);
529 	}
530 
531 	dma_params.dev		= dev;
532 	dma_params.rxthresh	= dma_params.dmaregs + CPDMA_RXTHRESH;
533 	dma_params.rxfree	= dma_params.dmaregs + CPDMA_RXFREE;
534 	dma_params.rxhdp	= dma_params.txhdp + CPDMA_RXHDP;
535 	dma_params.txcp		= dma_params.txhdp + CPDMA_TXCP;
536 	dma_params.rxcp		= dma_params.txhdp + CPDMA_RXCP;
537 
538 	dma_params.num_chan		= data->channels;
539 	dma_params.has_soft_reset	= true;
540 	dma_params.min_packet_size	= CPSW_MIN_PACKET_SIZE;
541 	dma_params.desc_mem_size	= data->bd_ram_size;
542 	dma_params.desc_align		= 16;
543 	dma_params.has_ext_regs		= true;
544 	dma_params.desc_hw_addr         = dma_params.desc_mem_phys;
545 	dma_params.bus_freq_mhz		= cpsw->bus_freq_mhz;
546 	dma_params.descs_pool_size	= descs_pool_size;
547 
548 	cpsw->dma = cpdma_ctlr_create(&dma_params);
549 	if (!cpsw->dma) {
550 		dev_err(dev, "error initializing dma\n");
551 		return -ENOMEM;
552 	}
553 
554 	cpts_node = of_get_child_by_name(cpsw->dev->of_node, "cpts");
555 	if (!cpts_node)
556 		cpts_node = cpsw->dev->of_node;
557 
558 	cpsw->cpts = cpts_create(cpsw->dev, cpts_regs, cpts_node,
559 				 CPTS_N_ETX_TS);
560 	if (IS_ERR(cpsw->cpts)) {
561 		ret = PTR_ERR(cpsw->cpts);
562 		cpdma_ctlr_destroy(cpsw->dma);
563 	}
564 	of_node_put(cpts_node);
565 
566 	return ret;
567 }
568 EXPORT_SYMBOL_GPL(cpsw_init_common);
569 
570 #if IS_ENABLED(CONFIG_TI_CPTS)
571 
572 static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
573 {
574 	struct cpsw_common *cpsw = priv->cpsw;
575 	struct cpsw_slave *slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
576 	u32 ts_en, seq_id;
577 
578 	if (!priv->tx_ts_enabled && !priv->rx_ts_enabled) {
579 		slave_write(slave, 0, CPSW1_TS_CTL);
580 		return;
581 	}
582 
583 	seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
584 	ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS;
585 
586 	if (priv->tx_ts_enabled)
587 		ts_en |= CPSW_V1_TS_TX_EN;
588 
589 	if (priv->rx_ts_enabled)
590 		ts_en |= CPSW_V1_TS_RX_EN;
591 
592 	slave_write(slave, ts_en, CPSW1_TS_CTL);
593 	slave_write(slave, seq_id, CPSW1_TS_SEQ_LTYPE);
594 }
595 
596 static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
597 {
598 	struct cpsw_common *cpsw = priv->cpsw;
599 	struct cpsw_slave *slave;
600 	u32 ctrl, mtype;
601 
602 	slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
603 
604 	ctrl = slave_read(slave, CPSW2_CONTROL);
605 	switch (cpsw->version) {
606 	case CPSW_VERSION_2:
607 		ctrl &= ~CTRL_V2_ALL_TS_MASK;
608 
609 		if (priv->tx_ts_enabled)
610 			ctrl |= CTRL_V2_TX_TS_BITS;
611 
612 		if (priv->rx_ts_enabled)
613 			ctrl |= CTRL_V2_RX_TS_BITS;
614 		break;
615 	case CPSW_VERSION_3:
616 	default:
617 		ctrl &= ~CTRL_V3_ALL_TS_MASK;
618 
619 		if (priv->tx_ts_enabled)
620 			ctrl |= CTRL_V3_TX_TS_BITS;
621 
622 		if (priv->rx_ts_enabled)
623 			ctrl |= CTRL_V3_RX_TS_BITS;
624 		break;
625 	}
626 
627 	mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS;
628 
629 	slave_write(slave, mtype, CPSW2_TS_SEQ_MTYPE);
630 	slave_write(slave, ctrl, CPSW2_CONTROL);
631 	writel_relaxed(ETH_P_1588, &cpsw->regs->ts_ltype);
632 	writel_relaxed(ETH_P_8021Q, &cpsw->regs->vlan_ltype);
633 }
634 
635 int cpsw_hwtstamp_set(struct net_device *dev,
636 		      struct kernel_hwtstamp_config *cfg,
637 		      struct netlink_ext_ack *extack)
638 {
639 	struct cpsw_priv *priv = netdev_priv(dev);
640 	struct cpsw_common *cpsw = priv->cpsw;
641 
642 	/* This will only execute if dev->see_all_hwtstamp_requests is set */
643 	if (cfg->source != HWTSTAMP_SOURCE_NETDEV) {
644 		NL_SET_ERR_MSG_MOD(extack,
645 				   "Switch mode only supports MAC timestamping");
646 		return -EOPNOTSUPP;
647 	}
648 
649 	if (cpsw->version != CPSW_VERSION_1 &&
650 	    cpsw->version != CPSW_VERSION_2 &&
651 	    cpsw->version != CPSW_VERSION_3)
652 		return -EOPNOTSUPP;
653 
654 	if (cfg->tx_type != HWTSTAMP_TX_OFF && cfg->tx_type != HWTSTAMP_TX_ON)
655 		return -ERANGE;
656 
657 	switch (cfg->rx_filter) {
658 	case HWTSTAMP_FILTER_NONE:
659 		priv->rx_ts_enabled = 0;
660 		break;
661 	case HWTSTAMP_FILTER_ALL:
662 	case HWTSTAMP_FILTER_NTP_ALL:
663 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
664 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
665 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
666 		return -ERANGE;
667 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
668 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
669 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
670 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
671 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
672 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
673 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
674 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
675 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
676 		priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V2_EVENT;
677 		cfg->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
678 		break;
679 	default:
680 		return -ERANGE;
681 	}
682 
683 	priv->tx_ts_enabled = cfg->tx_type == HWTSTAMP_TX_ON;
684 
685 	switch (cpsw->version) {
686 	case CPSW_VERSION_1:
687 		cpsw_hwtstamp_v1(priv);
688 		break;
689 	case CPSW_VERSION_2:
690 	case CPSW_VERSION_3:
691 		cpsw_hwtstamp_v2(priv);
692 		break;
693 	default:
694 		WARN_ON(1);
695 	}
696 
697 	return 0;
698 }
699 EXPORT_SYMBOL_GPL(cpsw_hwtstamp_set);
700 
701 int cpsw_hwtstamp_get(struct net_device *dev,
702 		      struct kernel_hwtstamp_config *cfg)
703 {
704 	struct cpsw_common *cpsw = ndev_to_cpsw(dev);
705 	struct cpsw_priv *priv = netdev_priv(dev);
706 
707 	if (cpsw->version != CPSW_VERSION_1 &&
708 	    cpsw->version != CPSW_VERSION_2 &&
709 	    cpsw->version != CPSW_VERSION_3)
710 		return -EOPNOTSUPP;
711 
712 	cfg->tx_type = priv->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
713 	cfg->rx_filter = priv->rx_ts_enabled;
714 
715 	return 0;
716 }
717 EXPORT_SYMBOL_GPL(cpsw_hwtstamp_get);
718 #else
719 int cpsw_hwtstamp_get(struct net_device *dev,
720 		      struct kernel_hwtstamp_config *cfg)
721 {
722 	return -EOPNOTSUPP;
723 }
724 EXPORT_SYMBOL_GPL(cpsw_hwtstamp_set);
725 
726 int cpsw_hwtstamp_set(struct net_device *dev,
727 		      struct kernel_hwtstamp_config *cfg,
728 		      struct netlink_ext_ack *extack)
729 {
730 	return -EOPNOTSUPP;
731 }
732 EXPORT_SYMBOL_GPL(cpsw_hwtstamp_get);
733 #endif /*CONFIG_TI_CPTS*/
734 
735 int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate)
736 {
737 	struct cpsw_priv *priv = netdev_priv(ndev);
738 	struct cpsw_common *cpsw = priv->cpsw;
739 	struct cpsw_slave *slave;
740 	u32 min_rate;
741 	u32 ch_rate;
742 	int i, ret;
743 
744 	ch_rate = netdev_get_tx_queue(ndev, queue)->tx_maxrate;
745 	if (ch_rate == rate)
746 		return 0;
747 
748 	ch_rate = rate * 1000;
749 	min_rate = cpdma_chan_get_min_rate(cpsw->dma);
750 	if ((ch_rate < min_rate && ch_rate)) {
751 		dev_err(priv->dev, "The channel rate cannot be less than %dMbps",
752 			min_rate);
753 		return -EINVAL;
754 	}
755 
756 	if (rate > cpsw->speed) {
757 		dev_err(priv->dev, "The channel rate cannot be more than 2Gbps");
758 		return -EINVAL;
759 	}
760 
761 	ret = pm_runtime_resume_and_get(cpsw->dev);
762 	if (ret < 0)
763 		return ret;
764 
765 	ret = cpdma_chan_set_rate(cpsw->txv[queue].ch, ch_rate);
766 	pm_runtime_put(cpsw->dev);
767 
768 	if (ret)
769 		return ret;
770 
771 	/* update rates for slaves tx queues */
772 	for (i = 0; i < cpsw->data.slaves; i++) {
773 		slave = &cpsw->slaves[i];
774 		if (!slave->ndev)
775 			continue;
776 
777 		netdev_get_tx_queue(slave->ndev, queue)->tx_maxrate = rate;
778 	}
779 
780 	cpsw_split_res(cpsw);
781 	return ret;
782 }
783 EXPORT_SYMBOL_GPL(cpsw_ndo_set_tx_maxrate);
784 
785 static int cpsw_tc_to_fifo(int tc, int num_tc)
786 {
787 	if (tc == num_tc - 1)
788 		return 0;
789 
790 	return CPSW_FIFO_SHAPERS_NUM - tc;
791 }
792 
793 bool cpsw_shp_is_off(struct cpsw_priv *priv)
794 {
795 	struct cpsw_common *cpsw = priv->cpsw;
796 	struct cpsw_slave *slave;
797 	u32 shift, mask, val;
798 
799 	val = readl_relaxed(&cpsw->regs->ptype);
800 
801 	slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
802 	shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num;
803 	mask = 7 << shift;
804 	val = val & mask;
805 
806 	return !val;
807 }
808 EXPORT_SYMBOL_GPL(cpsw_shp_is_off);
809 
810 static void cpsw_fifo_shp_on(struct cpsw_priv *priv, int fifo, int on)
811 {
812 	struct cpsw_common *cpsw = priv->cpsw;
813 	struct cpsw_slave *slave;
814 	u32 shift, mask, val;
815 
816 	val = readl_relaxed(&cpsw->regs->ptype);
817 
818 	slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
819 	shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num;
820 	mask = (1 << --fifo) << shift;
821 	val = on ? val | mask : val & ~mask;
822 
823 	writel_relaxed(val, &cpsw->regs->ptype);
824 }
825 
826 static int cpsw_set_fifo_bw(struct cpsw_priv *priv, int fifo, int bw)
827 {
828 	struct cpsw_common *cpsw = priv->cpsw;
829 	u32 val = 0, send_pct, shift;
830 	struct cpsw_slave *slave;
831 	int pct = 0, i;
832 
833 	if (bw > priv->shp_cfg_speed * 1000)
834 		goto err;
835 
836 	/* shaping has to stay enabled for highest fifos linearly
837 	 * and fifo bw no more then interface can allow
838 	 */
839 	slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
840 	send_pct = slave_read(slave, SEND_PERCENT);
841 	for (i = CPSW_FIFO_SHAPERS_NUM; i > 0; i--) {
842 		if (!bw) {
843 			if (i >= fifo || !priv->fifo_bw[i])
844 				continue;
845 
846 			dev_warn(priv->dev, "Prev FIFO%d is shaped", i);
847 			continue;
848 		}
849 
850 		if (!priv->fifo_bw[i] && i > fifo) {
851 			dev_err(priv->dev, "Upper FIFO%d is not shaped", i);
852 			return -EINVAL;
853 		}
854 
855 		shift = (i - 1) * 8;
856 		if (i == fifo) {
857 			send_pct &= ~(CPSW_PCT_MASK << shift);
858 			val = DIV_ROUND_UP(bw, priv->shp_cfg_speed * 10);
859 			if (!val)
860 				val = 1;
861 
862 			send_pct |= val << shift;
863 			pct += val;
864 			continue;
865 		}
866 
867 		if (priv->fifo_bw[i])
868 			pct += (send_pct >> shift) & CPSW_PCT_MASK;
869 	}
870 
871 	if (pct >= 100)
872 		goto err;
873 
874 	slave_write(slave, send_pct, SEND_PERCENT);
875 	priv->fifo_bw[fifo] = bw;
876 
877 	dev_warn(priv->dev, "set FIFO%d bw = %d\n", fifo,
878 		 DIV_ROUND_CLOSEST(val * priv->shp_cfg_speed, 100));
879 
880 	return 0;
881 err:
882 	dev_err(priv->dev, "Bandwidth doesn't fit in tc configuration");
883 	return -EINVAL;
884 }
885 
886 static int cpsw_set_fifo_rlimit(struct cpsw_priv *priv, int fifo, int bw)
887 {
888 	struct cpsw_common *cpsw = priv->cpsw;
889 	struct cpsw_slave *slave;
890 	u32 tx_in_ctl_rg, val;
891 	int ret;
892 
893 	ret = cpsw_set_fifo_bw(priv, fifo, bw);
894 	if (ret)
895 		return ret;
896 
897 	slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
898 	tx_in_ctl_rg = cpsw->version == CPSW_VERSION_1 ?
899 		       CPSW1_TX_IN_CTL : CPSW2_TX_IN_CTL;
900 
901 	if (!bw)
902 		cpsw_fifo_shp_on(priv, fifo, bw);
903 
904 	val = slave_read(slave, tx_in_ctl_rg);
905 	if (cpsw_shp_is_off(priv)) {
906 		/* disable FIFOs rate limited queues */
907 		val &= ~(0xf << CPSW_FIFO_RATE_EN_SHIFT);
908 
909 		/* set type of FIFO queues to normal priority mode */
910 		val &= ~(3 << CPSW_FIFO_QUEUE_TYPE_SHIFT);
911 
912 		/* set type of FIFO queues to be rate limited */
913 		if (bw)
914 			val |= 2 << CPSW_FIFO_QUEUE_TYPE_SHIFT;
915 		else
916 			priv->shp_cfg_speed = 0;
917 	}
918 
919 	/* toggle a FIFO rate limited queue */
920 	if (bw)
921 		val |= BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT);
922 	else
923 		val &= ~BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT);
924 	slave_write(slave, val, tx_in_ctl_rg);
925 
926 	/* FIFO transmit shape enable */
927 	cpsw_fifo_shp_on(priv, fifo, bw);
928 	return 0;
929 }
930 
931 /* Defaults:
932  * class A - prio 3
933  * class B - prio 2
934  * shaping for class A should be set first
935  */
936 static int cpsw_set_cbs(struct net_device *ndev,
937 			struct tc_cbs_qopt_offload *qopt)
938 {
939 	struct cpsw_priv *priv = netdev_priv(ndev);
940 	struct cpsw_common *cpsw = priv->cpsw;
941 	struct cpsw_slave *slave;
942 	int prev_speed = 0;
943 	int tc, ret, fifo;
944 	u32 bw = 0;
945 
946 	tc = netdev_txq_to_tc(priv->ndev, qopt->queue);
947 
948 	/* enable channels in backward order, as highest FIFOs must be rate
949 	 * limited first and for compliance with CPDMA rate limited channels
950 	 * that also used in bacward order. FIFO0 cannot be rate limited.
951 	 */
952 	fifo = cpsw_tc_to_fifo(tc, ndev->num_tc);
953 	if (!fifo) {
954 		dev_err(priv->dev, "Last tc%d can't be rate limited", tc);
955 		return -EINVAL;
956 	}
957 
958 	/* do nothing, it's disabled anyway */
959 	if (!qopt->enable && !priv->fifo_bw[fifo])
960 		return 0;
961 
962 	/* shapers can be set if link speed is known */
963 	slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
964 	if (slave->phy && slave->phy->link) {
965 		if (priv->shp_cfg_speed &&
966 		    priv->shp_cfg_speed != slave->phy->speed)
967 			prev_speed = priv->shp_cfg_speed;
968 
969 		priv->shp_cfg_speed = slave->phy->speed;
970 	}
971 
972 	if (!priv->shp_cfg_speed) {
973 		dev_err(priv->dev, "Link speed is not known");
974 		return -1;
975 	}
976 
977 	ret = pm_runtime_resume_and_get(cpsw->dev);
978 	if (ret < 0)
979 		return ret;
980 
981 	bw = qopt->enable ? qopt->idleslope : 0;
982 	ret = cpsw_set_fifo_rlimit(priv, fifo, bw);
983 	if (ret) {
984 		priv->shp_cfg_speed = prev_speed;
985 		prev_speed = 0;
986 	}
987 
988 	if (bw && prev_speed)
989 		dev_warn(priv->dev,
990 			 "Speed was changed, CBS shaper speeds are changed!");
991 
992 	pm_runtime_put_sync(cpsw->dev);
993 	return ret;
994 }
995 
996 static int cpsw_set_mqprio(struct net_device *ndev, void *type_data)
997 {
998 	struct tc_mqprio_qopt_offload *mqprio = type_data;
999 	struct cpsw_priv *priv = netdev_priv(ndev);
1000 	struct cpsw_common *cpsw = priv->cpsw;
1001 	int fifo, num_tc, count, offset;
1002 	struct cpsw_slave *slave;
1003 	u32 tx_prio_map = 0;
1004 	int i, tc, ret;
1005 
1006 	num_tc = mqprio->qopt.num_tc;
1007 	if (num_tc > CPSW_TC_NUM)
1008 		return -EINVAL;
1009 
1010 	if (mqprio->mode != TC_MQPRIO_MODE_DCB)
1011 		return -EINVAL;
1012 
1013 	ret = pm_runtime_resume_and_get(cpsw->dev);
1014 	if (ret < 0)
1015 		return ret;
1016 
1017 	if (num_tc) {
1018 		for (i = 0; i < 8; i++) {
1019 			tc = mqprio->qopt.prio_tc_map[i];
1020 			fifo = cpsw_tc_to_fifo(tc, num_tc);
1021 			tx_prio_map |= fifo << (4 * i);
1022 		}
1023 
1024 		netdev_set_num_tc(ndev, num_tc);
1025 		for (i = 0; i < num_tc; i++) {
1026 			count = mqprio->qopt.count[i];
1027 			offset = mqprio->qopt.offset[i];
1028 			netdev_set_tc_queue(ndev, i, count, offset);
1029 		}
1030 	}
1031 
1032 	if (!mqprio->qopt.hw) {
1033 		/* restore default configuration */
1034 		netdev_reset_tc(ndev);
1035 		tx_prio_map = TX_PRIORITY_MAPPING;
1036 	}
1037 
1038 	priv->mqprio_hw = mqprio->qopt.hw;
1039 
1040 	offset = cpsw->version == CPSW_VERSION_1 ?
1041 		 CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP;
1042 
1043 	slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
1044 	slave_write(slave, tx_prio_map, offset);
1045 
1046 	pm_runtime_put_sync(cpsw->dev);
1047 
1048 	return 0;
1049 }
1050 
1051 static int cpsw_qos_setup_tc_block(struct net_device *ndev, struct flow_block_offload *f);
1052 
1053 int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
1054 		      void *type_data)
1055 {
1056 	switch (type) {
1057 	case TC_SETUP_QDISC_CBS:
1058 		return cpsw_set_cbs(ndev, type_data);
1059 
1060 	case TC_SETUP_QDISC_MQPRIO:
1061 		return cpsw_set_mqprio(ndev, type_data);
1062 
1063 	case TC_SETUP_BLOCK:
1064 		return cpsw_qos_setup_tc_block(ndev, type_data);
1065 
1066 	default:
1067 		return -EOPNOTSUPP;
1068 	}
1069 }
1070 EXPORT_SYMBOL_GPL(cpsw_ndo_setup_tc);
1071 
1072 void cpsw_cbs_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
1073 {
1074 	int fifo, bw;
1075 
1076 	for (fifo = CPSW_FIFO_SHAPERS_NUM; fifo > 0; fifo--) {
1077 		bw = priv->fifo_bw[fifo];
1078 		if (!bw)
1079 			continue;
1080 
1081 		cpsw_set_fifo_rlimit(priv, fifo, bw);
1082 	}
1083 }
1084 EXPORT_SYMBOL_GPL(cpsw_cbs_resume);
1085 
1086 void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
1087 {
1088 	struct cpsw_common *cpsw = priv->cpsw;
1089 	u32 tx_prio_map = 0;
1090 	int i, tc, fifo;
1091 	u32 tx_prio_rg;
1092 
1093 	if (!priv->mqprio_hw)
1094 		return;
1095 
1096 	for (i = 0; i < 8; i++) {
1097 		tc = netdev_get_prio_tc_map(priv->ndev, i);
1098 		fifo = CPSW_FIFO_SHAPERS_NUM - tc;
1099 		tx_prio_map |= fifo << (4 * i);
1100 	}
1101 
1102 	tx_prio_rg = cpsw->version == CPSW_VERSION_1 ?
1103 		     CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP;
1104 
1105 	slave_write(slave, tx_prio_map, tx_prio_rg);
1106 }
1107 EXPORT_SYMBOL_GPL(cpsw_mqprio_resume);
1108 
1109 int cpsw_fill_rx_channels(struct cpsw_priv *priv)
1110 {
1111 	struct cpsw_common *cpsw = priv->cpsw;
1112 	struct cpsw_meta_xdp *xmeta;
1113 	struct page_pool *pool;
1114 	struct page *page;
1115 	int ch_buf_num;
1116 	int ch, i, ret;
1117 	dma_addr_t dma;
1118 
1119 	for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
1120 		pool = cpsw->page_pool[ch];
1121 		ch_buf_num = cpdma_chan_get_rx_buf_num(cpsw->rxv[ch].ch);
1122 		for (i = 0; i < ch_buf_num; i++) {
1123 			page = page_pool_dev_alloc_pages(pool);
1124 			if (!page) {
1125 				cpsw_err(priv, ifup, "allocate rx page err\n");
1126 				return -ENOMEM;
1127 			}
1128 
1129 			xmeta = page_address(page) + CPSW_XMETA_OFFSET;
1130 			xmeta->ndev = priv->ndev;
1131 			xmeta->ch = ch;
1132 
1133 			dma = page_pool_get_dma_addr(page) + CPSW_HEADROOM_NA;
1134 			ret = cpdma_chan_idle_submit_mapped(cpsw->rxv[ch].ch,
1135 							    page, dma,
1136 							    cpsw->rx_packet_max,
1137 							    0);
1138 			if (ret < 0) {
1139 				cpsw_err(priv, ifup,
1140 					 "cannot submit page to channel %d rx, error %d\n",
1141 					 ch, ret);
1142 				page_pool_recycle_direct(pool, page);
1143 				return ret;
1144 			}
1145 		}
1146 
1147 		cpsw_info(priv, ifup, "ch %d rx, submitted %d descriptors\n",
1148 			  ch, ch_buf_num);
1149 	}
1150 
1151 	return 0;
1152 }
1153 EXPORT_SYMBOL_GPL(cpsw_fill_rx_channels);
1154 
1155 static struct page_pool *cpsw_create_page_pool(struct cpsw_common *cpsw,
1156 					       int size)
1157 {
1158 	struct page_pool_params pp_params = {};
1159 	struct page_pool *pool;
1160 
1161 	pp_params.order = 0;
1162 	pp_params.flags = PP_FLAG_DMA_MAP;
1163 	pp_params.pool_size = size;
1164 	pp_params.nid = NUMA_NO_NODE;
1165 	pp_params.dma_dir = DMA_BIDIRECTIONAL;
1166 	pp_params.dev = cpsw->dev;
1167 
1168 	pool = page_pool_create(&pp_params);
1169 	if (IS_ERR(pool))
1170 		dev_err(cpsw->dev, "cannot create rx page pool\n");
1171 
1172 	return pool;
1173 }
1174 
1175 static int cpsw_create_rx_pool(struct cpsw_common *cpsw, int ch)
1176 {
1177 	struct page_pool *pool;
1178 	int ret = 0, pool_size;
1179 
1180 	pool_size = cpdma_chan_get_rx_buf_num(cpsw->rxv[ch].ch);
1181 	pool = cpsw_create_page_pool(cpsw, pool_size);
1182 	if (IS_ERR(pool))
1183 		ret = PTR_ERR(pool);
1184 	else
1185 		cpsw->page_pool[ch] = pool;
1186 
1187 	return ret;
1188 }
1189 
1190 static int cpsw_ndev_create_xdp_rxq(struct cpsw_priv *priv, int ch)
1191 {
1192 	struct cpsw_common *cpsw = priv->cpsw;
1193 	struct xdp_rxq_info *rxq;
1194 	struct page_pool *pool;
1195 	int ret;
1196 
1197 	pool = cpsw->page_pool[ch];
1198 	rxq = &priv->xdp_rxq[ch];
1199 
1200 	ret = xdp_rxq_info_reg(rxq, priv->ndev, ch, 0);
1201 	if (ret)
1202 		return ret;
1203 
1204 	ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool);
1205 	if (ret)
1206 		xdp_rxq_info_unreg(rxq);
1207 
1208 	return ret;
1209 }
1210 
1211 static void cpsw_ndev_destroy_xdp_rxq(struct cpsw_priv *priv, int ch)
1212 {
1213 	struct xdp_rxq_info *rxq = &priv->xdp_rxq[ch];
1214 
1215 	if (!xdp_rxq_info_is_reg(rxq))
1216 		return;
1217 
1218 	xdp_rxq_info_unreg(rxq);
1219 }
1220 
1221 void cpsw_destroy_xdp_rxqs(struct cpsw_common *cpsw)
1222 {
1223 	struct net_device *ndev;
1224 	int i, ch;
1225 
1226 	for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
1227 		for (i = 0; i < cpsw->data.slaves; i++) {
1228 			ndev = cpsw->slaves[i].ndev;
1229 			if (!ndev)
1230 				continue;
1231 
1232 			cpsw_ndev_destroy_xdp_rxq(netdev_priv(ndev), ch);
1233 		}
1234 
1235 		page_pool_destroy(cpsw->page_pool[ch]);
1236 		cpsw->page_pool[ch] = NULL;
1237 	}
1238 }
1239 EXPORT_SYMBOL_GPL(cpsw_destroy_xdp_rxqs);
1240 
1241 int cpsw_create_xdp_rxqs(struct cpsw_common *cpsw)
1242 {
1243 	struct net_device *ndev;
1244 	int i, ch, ret;
1245 
1246 	for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
1247 		ret = cpsw_create_rx_pool(cpsw, ch);
1248 		if (ret)
1249 			goto err_cleanup;
1250 
1251 		/* using same page pool is allowed as no running rx handlers
1252 		 * simultaneously for both ndevs
1253 		 */
1254 		for (i = 0; i < cpsw->data.slaves; i++) {
1255 			ndev = cpsw->slaves[i].ndev;
1256 			if (!ndev)
1257 				continue;
1258 
1259 			ret = cpsw_ndev_create_xdp_rxq(netdev_priv(ndev), ch);
1260 			if (ret)
1261 				goto err_cleanup;
1262 		}
1263 	}
1264 
1265 	return 0;
1266 
1267 err_cleanup:
1268 	cpsw_destroy_xdp_rxqs(cpsw);
1269 
1270 	return ret;
1271 }
1272 EXPORT_SYMBOL_GPL(cpsw_create_xdp_rxqs);
1273 
1274 static int cpsw_xdp_prog_setup(struct cpsw_priv *priv, struct netdev_bpf *bpf)
1275 {
1276 	struct bpf_prog *prog = bpf->prog;
1277 
1278 	if (!priv->xdpi.prog && !prog)
1279 		return 0;
1280 
1281 	WRITE_ONCE(priv->xdp_prog, prog);
1282 
1283 	xdp_attachment_setup(&priv->xdpi, bpf);
1284 
1285 	return 0;
1286 }
1287 
1288 int cpsw_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf)
1289 {
1290 	struct cpsw_priv *priv = netdev_priv(ndev);
1291 
1292 	switch (bpf->command) {
1293 	case XDP_SETUP_PROG:
1294 		return cpsw_xdp_prog_setup(priv, bpf);
1295 
1296 	default:
1297 		return -EINVAL;
1298 	}
1299 }
1300 EXPORT_SYMBOL_GPL(cpsw_ndo_bpf);
1301 
1302 int cpsw_xdp_tx_frame(struct cpsw_priv *priv, struct xdp_frame *xdpf,
1303 		      struct page *page, int port)
1304 {
1305 	struct cpsw_common *cpsw = priv->cpsw;
1306 	struct cpsw_meta_xdp *xmeta;
1307 	struct cpdma_chan *txch;
1308 	dma_addr_t dma;
1309 	int ret;
1310 
1311 	xmeta = (void *)xdpf + CPSW_XMETA_OFFSET;
1312 	xmeta->ndev = priv->ndev;
1313 	xmeta->ch = 0;
1314 	txch = cpsw->txv[0].ch;
1315 
1316 	if (page) {
1317 		dma = page_pool_get_dma_addr(page);
1318 		dma += xdpf->headroom + sizeof(struct xdp_frame);
1319 		ret = cpdma_chan_submit_mapped(txch, cpsw_xdpf_to_handle(xdpf),
1320 					       dma, xdpf->len, port);
1321 	} else {
1322 		if (sizeof(*xmeta) > xdpf->headroom)
1323 			return -EINVAL;
1324 
1325 		ret = cpdma_chan_submit(txch, cpsw_xdpf_to_handle(xdpf),
1326 					xdpf->data, xdpf->len, port);
1327 	}
1328 
1329 	if (ret)
1330 		priv->ndev->stats.tx_dropped++;
1331 
1332 	return ret;
1333 }
1334 EXPORT_SYMBOL_GPL(cpsw_xdp_tx_frame);
1335 
1336 int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp,
1337 		 struct page *page, int port, int *len)
1338 {
1339 	struct cpsw_common *cpsw = priv->cpsw;
1340 	struct net_device *ndev = priv->ndev;
1341 	int ret = CPSW_XDP_CONSUMED;
1342 	struct xdp_frame *xdpf;
1343 	struct bpf_prog *prog;
1344 	u32 act;
1345 
1346 	prog = READ_ONCE(priv->xdp_prog);
1347 	if (!prog)
1348 		return CPSW_XDP_PASS;
1349 
1350 	act = bpf_prog_run_xdp(prog, xdp);
1351 	/* XDP prog might have changed packet data and boundaries */
1352 	*len = xdp->data_end - xdp->data;
1353 
1354 	switch (act) {
1355 	case XDP_PASS:
1356 		ret = CPSW_XDP_PASS;
1357 		goto out;
1358 	case XDP_TX:
1359 		xdpf = xdp_convert_buff_to_frame(xdp);
1360 		if (unlikely(!xdpf))
1361 			goto drop;
1362 
1363 		if (cpsw_xdp_tx_frame(priv, xdpf, page, port))
1364 			xdp_return_frame_rx_napi(xdpf);
1365 		break;
1366 	case XDP_REDIRECT:
1367 		if (xdp_do_redirect(ndev, xdp, prog))
1368 			goto drop;
1369 
1370 		/*  Have to flush here, per packet, instead of doing it in bulk
1371 		 *  at the end of the napi handler. The RX devices on this
1372 		 *  particular hardware is sharing a common queue, so the
1373 		 *  incoming device might change per packet.
1374 		 */
1375 		xdp_do_flush();
1376 		break;
1377 	default:
1378 		bpf_warn_invalid_xdp_action(ndev, prog, act);
1379 		fallthrough;
1380 	case XDP_ABORTED:
1381 		trace_xdp_exception(ndev, prog, act);
1382 		fallthrough;	/* handle aborts by dropping packet */
1383 	case XDP_DROP:
1384 		ndev->stats.rx_bytes += *len;
1385 		ndev->stats.rx_packets++;
1386 		goto drop;
1387 	}
1388 
1389 	ndev->stats.rx_bytes += *len;
1390 	ndev->stats.rx_packets++;
1391 out:
1392 	return ret;
1393 drop:
1394 	page_pool_recycle_direct(cpsw->page_pool[ch], page);
1395 	return ret;
1396 }
1397 EXPORT_SYMBOL_GPL(cpsw_run_xdp);
1398 
1399 static int cpsw_qos_clsflower_add_policer(struct cpsw_priv *priv,
1400 					  struct netlink_ext_ack *extack,
1401 					  struct flow_cls_offload *cls,
1402 					  u64 rate_pkt_ps)
1403 {
1404 	struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
1405 	struct flow_dissector *dissector = rule->match.dissector;
1406 	static const u8 mc_mac[] = {0x01, 0x00, 0x00, 0x00, 0x00, 0x00};
1407 	struct flow_match_eth_addrs match;
1408 	u32 port_id;
1409 	int ret;
1410 
1411 	if (dissector->used_keys &
1412 	    ~(BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
1413 	      BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
1414 	      BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS))) {
1415 		NL_SET_ERR_MSG_MOD(extack,
1416 				   "Unsupported keys used");
1417 		return -EOPNOTSUPP;
1418 	}
1419 
1420 	if (flow_rule_match_has_control_flags(rule, extack))
1421 		return -EOPNOTSUPP;
1422 
1423 	if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
1424 		NL_SET_ERR_MSG_MOD(extack, "Not matching on eth address");
1425 		return -EOPNOTSUPP;
1426 	}
1427 
1428 	flow_rule_match_eth_addrs(rule, &match);
1429 
1430 	if (!is_zero_ether_addr(match.mask->src)) {
1431 		NL_SET_ERR_MSG_MOD(extack,
1432 				   "Matching on source MAC not supported");
1433 		return -EOPNOTSUPP;
1434 	}
1435 
1436 	port_id = cpsw_slave_index(priv->cpsw, priv) + 1;
1437 
1438 	if (is_broadcast_ether_addr(match.key->dst) &&
1439 	    is_broadcast_ether_addr(match.mask->dst)) {
1440 		ret = cpsw_ale_rx_ratelimit_bc(priv->cpsw->ale, port_id, rate_pkt_ps);
1441 		if (ret)
1442 			return ret;
1443 
1444 		priv->ale_bc_ratelimit.cookie = cls->cookie;
1445 		priv->ale_bc_ratelimit.rate_packet_ps = rate_pkt_ps;
1446 	} else if (ether_addr_equal_unaligned(match.key->dst, mc_mac) &&
1447 		   ether_addr_equal_unaligned(match.mask->dst, mc_mac)) {
1448 		ret = cpsw_ale_rx_ratelimit_mc(priv->cpsw->ale, port_id, rate_pkt_ps);
1449 		if (ret)
1450 			return ret;
1451 
1452 		priv->ale_mc_ratelimit.cookie = cls->cookie;
1453 		priv->ale_mc_ratelimit.rate_packet_ps = rate_pkt_ps;
1454 	} else {
1455 		NL_SET_ERR_MSG_MOD(extack, "Not supported matching key");
1456 		return -EOPNOTSUPP;
1457 	}
1458 
1459 	return 0;
1460 }
1461 
1462 static int cpsw_qos_clsflower_policer_validate(const struct flow_action *action,
1463 					       const struct flow_action_entry *act,
1464 					       struct netlink_ext_ack *extack)
1465 {
1466 	if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
1467 		NL_SET_ERR_MSG_MOD(extack,
1468 				   "Offload not supported when exceed action is not drop");
1469 		return -EOPNOTSUPP;
1470 	}
1471 
1472 	if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
1473 	    act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
1474 		NL_SET_ERR_MSG_MOD(extack,
1475 				   "Offload not supported when conform action is not pipe or ok");
1476 		return -EOPNOTSUPP;
1477 	}
1478 
1479 	if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
1480 	    !flow_action_is_last_entry(action, act)) {
1481 		NL_SET_ERR_MSG_MOD(extack,
1482 				   "Offload not supported when conform action is ok, but action is not last");
1483 		return -EOPNOTSUPP;
1484 	}
1485 
1486 	if (act->police.rate_bytes_ps || act->police.peakrate_bytes_ps ||
1487 	    act->police.avrate || act->police.overhead) {
1488 		NL_SET_ERR_MSG_MOD(extack,
1489 				   "Offload not supported when bytes per second/peakrate/avrate/overhead is configured");
1490 		return -EOPNOTSUPP;
1491 	}
1492 
1493 	return 0;
1494 }
1495 
1496 static int cpsw_qos_configure_clsflower(struct cpsw_priv *priv, struct flow_cls_offload *cls)
1497 {
1498 	struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
1499 	struct netlink_ext_ack *extack = cls->common.extack;
1500 	const struct flow_action_entry *act;
1501 	int i, ret;
1502 
1503 	flow_action_for_each(i, act, &rule->action) {
1504 		switch (act->id) {
1505 		case FLOW_ACTION_POLICE:
1506 			ret = cpsw_qos_clsflower_policer_validate(&rule->action, act, extack);
1507 			if (ret)
1508 				return ret;
1509 
1510 			return cpsw_qos_clsflower_add_policer(priv, extack, cls,
1511 							      act->police.rate_pkt_ps);
1512 		default:
1513 			NL_SET_ERR_MSG_MOD(extack, "Action not supported");
1514 			return -EOPNOTSUPP;
1515 		}
1516 	}
1517 	return -EOPNOTSUPP;
1518 }
1519 
1520 static int cpsw_qos_delete_clsflower(struct cpsw_priv *priv, struct flow_cls_offload *cls)
1521 {
1522 	u32 port_id = cpsw_slave_index(priv->cpsw, priv) + 1;
1523 
1524 	if (cls->cookie == priv->ale_bc_ratelimit.cookie) {
1525 		priv->ale_bc_ratelimit.cookie = 0;
1526 		priv->ale_bc_ratelimit.rate_packet_ps = 0;
1527 		cpsw_ale_rx_ratelimit_bc(priv->cpsw->ale, port_id, 0);
1528 	}
1529 
1530 	if (cls->cookie == priv->ale_mc_ratelimit.cookie) {
1531 		priv->ale_mc_ratelimit.cookie = 0;
1532 		priv->ale_mc_ratelimit.rate_packet_ps = 0;
1533 		cpsw_ale_rx_ratelimit_mc(priv->cpsw->ale, port_id, 0);
1534 	}
1535 
1536 	return 0;
1537 }
1538 
1539 static int cpsw_qos_setup_tc_clsflower(struct cpsw_priv *priv, struct flow_cls_offload *cls_flower)
1540 {
1541 	switch (cls_flower->command) {
1542 	case FLOW_CLS_REPLACE:
1543 		return cpsw_qos_configure_clsflower(priv, cls_flower);
1544 	case FLOW_CLS_DESTROY:
1545 		return cpsw_qos_delete_clsflower(priv, cls_flower);
1546 	default:
1547 		return -EOPNOTSUPP;
1548 	}
1549 }
1550 
1551 static int cpsw_qos_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
1552 {
1553 	struct cpsw_priv *priv = cb_priv;
1554 	int ret;
1555 
1556 	if (!tc_cls_can_offload_and_chain0(priv->ndev, type_data))
1557 		return -EOPNOTSUPP;
1558 
1559 	ret = pm_runtime_get_sync(priv->dev);
1560 	if (ret < 0) {
1561 		pm_runtime_put_noidle(priv->dev);
1562 		return ret;
1563 	}
1564 
1565 	switch (type) {
1566 	case TC_SETUP_CLSFLOWER:
1567 		ret = cpsw_qos_setup_tc_clsflower(priv, type_data);
1568 		break;
1569 	default:
1570 		ret = -EOPNOTSUPP;
1571 	}
1572 
1573 	pm_runtime_put(priv->dev);
1574 	return ret;
1575 }
1576 
1577 static LIST_HEAD(cpsw_qos_block_cb_list);
1578 
1579 static int cpsw_qos_setup_tc_block(struct net_device *ndev, struct flow_block_offload *f)
1580 {
1581 	struct cpsw_priv *priv = netdev_priv(ndev);
1582 
1583 	return flow_block_cb_setup_simple(f, &cpsw_qos_block_cb_list,
1584 					  cpsw_qos_setup_tc_block_cb,
1585 					  priv, priv, true);
1586 }
1587 
1588 void cpsw_qos_clsflower_resume(struct cpsw_priv *priv)
1589 {
1590 	u32 port_id = cpsw_slave_index(priv->cpsw, priv) + 1;
1591 
1592 	if (priv->ale_bc_ratelimit.cookie)
1593 		cpsw_ale_rx_ratelimit_bc(priv->cpsw->ale, port_id,
1594 					 priv->ale_bc_ratelimit.rate_packet_ps);
1595 
1596 	if (priv->ale_mc_ratelimit.cookie)
1597 		cpsw_ale_rx_ratelimit_mc(priv->cpsw->ale, port_id,
1598 					 priv->ale_mc_ratelimit.rate_packet_ps);
1599 }
1600 EXPORT_SYMBOL_GPL(cpsw_qos_clsflower_resume);
1601 
1602 MODULE_LICENSE("GPL");
1603 MODULE_DESCRIPTION("TI CPSW Ethernet Switch Driver");
1604