1 /* Copyright 2008 - 2016 Freescale Semiconductor Inc.
2 * Copyright 2020 NXP
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 * * Redistributions of source code must retain the above copyright
7 * notice, this list of conditions and the following disclaimer.
8 * * Redistributions in binary form must reproduce the above copyright
9 * notice, this list of conditions and the following disclaimer in the
10 * documentation and/or other materials provided with the distribution.
11 * * Neither the name of Freescale Semiconductor nor the
12 * names of its contributors may be used to endorse or promote products
13 * derived from this software without specific prior written permission.
14 *
15 * ALTERNATIVELY, this software may be distributed under the terms of the
16 * GNU General Public License ("GPL") as published by the Free Software
17 * Foundation, either version 2 of that License or (at your option) any
18 * later version.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
21 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
22 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
24 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
25 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
27 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33
34 #include <linux/init.h>
35 #include <linux/module.h>
36 #include <linux/of_platform.h>
37 #include <linux/of_mdio.h>
38 #include <linux/of_net.h>
39 #include <linux/io.h>
40 #include <linux/if_arp.h>
41 #include <linux/if_vlan.h>
42 #include <linux/icmp.h>
43 #include <linux/ip.h>
44 #include <linux/ipv6.h>
45 #include <linux/udp.h>
46 #include <linux/tcp.h>
47 #include <linux/net.h>
48 #include <linux/skbuff.h>
49 #include <linux/etherdevice.h>
50 #include <linux/if_ether.h>
51 #include <linux/highmem.h>
52 #include <linux/percpu.h>
53 #include <linux/dma-mapping.h>
54 #include <linux/sort.h>
55 #include <linux/phy_fixed.h>
56 #include <soc/fsl/bman.h>
57 #include <soc/fsl/qman.h>
58 #include "fman.h"
59 #include "fman_port.h"
60 #include "mac.h"
61 #include "dpaa_eth.h"
62
63 /* CREATE_TRACE_POINTS only needs to be defined once. Other dpaa files
64 * using trace events only need to #include <trace/events/sched.h>
65 */
66 #define CREATE_TRACE_POINTS
67 #include "dpaa_eth_trace.h"
68
69 static int debug = -1;
70 module_param(debug, int, 0444);
71 MODULE_PARM_DESC(debug, "Module/Driver verbosity level (0=none,...,16=all)");
72
73 static u16 tx_timeout = 1000;
74 module_param(tx_timeout, ushort, 0444);
75 MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
76
77 #define FM_FD_STAT_RX_ERRORS \
78 (FM_FD_ERR_DMA | FM_FD_ERR_PHYSICAL | \
79 FM_FD_ERR_SIZE | FM_FD_ERR_CLS_DISCARD | \
80 FM_FD_ERR_EXTRACTION | FM_FD_ERR_NO_SCHEME | \
81 FM_FD_ERR_PRS_TIMEOUT | FM_FD_ERR_PRS_ILL_INSTRUCT | \
82 FM_FD_ERR_PRS_HDR_ERR)
83
84 #define FM_FD_STAT_TX_ERRORS \
85 (FM_FD_ERR_UNSUPPORTED_FORMAT | \
86 FM_FD_ERR_LENGTH | FM_FD_ERR_DMA)
87
88 #define DPAA_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
89 NETIF_MSG_LINK | NETIF_MSG_IFUP | \
90 NETIF_MSG_IFDOWN | NETIF_MSG_HW)
91
92 #define DPAA_INGRESS_CS_THRESHOLD 0x10000000
93 /* Ingress congestion threshold on FMan ports
94 * The size in bytes of the ingress tail-drop threshold on FMan ports.
95 * Traffic piling up above this value will be rejected by QMan and discarded
96 * by FMan.
97 */
98
99 /* Size in bytes of the FQ taildrop threshold */
100 #define DPAA_FQ_TD 0x200000
101
102 #define DPAA_CS_THRESHOLD_1G 0x06000000
103 /* Egress congestion threshold on 1G ports, range 0x1000 .. 0x10000000
104 * The size in bytes of the egress Congestion State notification threshold on
105 * 1G ports. The 1G dTSECs can quite easily be flooded by cores doing Tx in a
106 * tight loop (e.g. by sending UDP datagrams at "while(1) speed"),
107 * and the larger the frame size, the more acute the problem.
108 * So we have to find a balance between these factors:
109 * - avoiding the device staying congested for a prolonged time (risking
110 * the netdev watchdog to fire - see also the tx_timeout module param);
111 * - affecting performance of protocols such as TCP, which otherwise
112 * behave well under the congestion notification mechanism;
113 * - preventing the Tx cores from tightly-looping (as if the congestion
114 * threshold was too low to be effective);
115 * - running out of memory if the CS threshold is set too high.
116 */
117
118 #define DPAA_CS_THRESHOLD_10G 0x10000000
119 /* The size in bytes of the egress Congestion State notification threshold on
120 * 10G ports, range 0x1000 .. 0x10000000
121 */
122
123 /* Largest value that the FQD's OAL field can hold */
124 #define FSL_QMAN_MAX_OAL 127
125
126 /* Default alignment for start of data in an Rx FD */
127 #ifdef CONFIG_DPAA_ERRATUM_A050385
128 /* aligning data start to 64 avoids DMA transaction splits, unless the buffer
129 * is crossing a 4k page boundary
130 */
131 #define DPAA_FD_DATA_ALIGNMENT (fman_has_errata_a050385() ? 64 : 16)
132 /* aligning to 256 avoids DMA transaction splits caused by 4k page boundary
133 * crossings; also, all SG fragments except the last must have a size multiple
134 * of 256 to avoid DMA transaction splits
135 */
136 #define DPAA_A050385_ALIGN 256
137 #define DPAA_FD_RX_DATA_ALIGNMENT (fman_has_errata_a050385() ? \
138 DPAA_A050385_ALIGN : 16)
139 #else
140 #define DPAA_FD_DATA_ALIGNMENT 16
141 #define DPAA_FD_RX_DATA_ALIGNMENT DPAA_FD_DATA_ALIGNMENT
142 #endif
143
144 /* The DPAA requires 256 bytes reserved and mapped for the SGT */
145 #define DPAA_SGT_SIZE 256
146
147 /* Values for the L3R field of the FM Parse Results
148 */
149 /* L3 Type field: First IP Present IPv4 */
150 #define FM_L3_PARSE_RESULT_IPV4 0x8000
151 /* L3 Type field: First IP Present IPv6 */
152 #define FM_L3_PARSE_RESULT_IPV6 0x4000
153 /* Values for the L4R field of the FM Parse Results */
154 /* L4 Type field: UDP */
155 #define FM_L4_PARSE_RESULT_UDP 0x40
156 /* L4 Type field: TCP */
157 #define FM_L4_PARSE_RESULT_TCP 0x20
158
159 /* FD status field indicating whether the FM Parser has attempted to validate
160 * the L4 csum of the frame.
161 * Note that having this bit set doesn't necessarily imply that the checksum
162 * is valid. One would have to check the parse results to find that out.
163 */
164 #define FM_FD_STAT_L4CV 0x00000004
165
166 #define DPAA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */
167 #define DPAA_BUFF_RELEASE_MAX 8 /* maximum number of buffers released at once */
168
169 #define FSL_DPAA_BPID_INV 0xff
170 #define FSL_DPAA_ETH_MAX_BUF_COUNT 128
171 #define FSL_DPAA_ETH_REFILL_THRESHOLD 80
172
173 #define DPAA_TX_PRIV_DATA_SIZE 16
174 #define DPAA_PARSE_RESULTS_SIZE sizeof(struct fman_prs_result)
175 #define DPAA_TIME_STAMP_SIZE 8
176 #define DPAA_HASH_RESULTS_SIZE 8
177 #define DPAA_HWA_SIZE (DPAA_PARSE_RESULTS_SIZE + DPAA_TIME_STAMP_SIZE \
178 + DPAA_HASH_RESULTS_SIZE)
179 #define DPAA_RX_PRIV_DATA_DEFAULT_SIZE (DPAA_TX_PRIV_DATA_SIZE + \
180 dpaa_rx_extra_headroom)
181 #ifdef CONFIG_DPAA_ERRATUM_A050385
182 #define DPAA_RX_PRIV_DATA_A050385_SIZE (DPAA_A050385_ALIGN - DPAA_HWA_SIZE)
183 #define DPAA_RX_PRIV_DATA_SIZE (fman_has_errata_a050385() ? \
184 DPAA_RX_PRIV_DATA_A050385_SIZE : \
185 DPAA_RX_PRIV_DATA_DEFAULT_SIZE)
186 #else
187 #define DPAA_RX_PRIV_DATA_SIZE DPAA_RX_PRIV_DATA_DEFAULT_SIZE
188 #endif
189
190 #define DPAA_ETH_PCD_RXQ_NUM 128
191
192 #define DPAA_ENQUEUE_RETRIES 100000
193
194 enum port_type {RX, TX};
195
196 struct fm_port_fqs {
197 struct dpaa_fq *tx_defq;
198 struct dpaa_fq *tx_errq;
199 struct dpaa_fq *rx_defq;
200 struct dpaa_fq *rx_errq;
201 struct dpaa_fq *rx_pcdq;
202 };
203
204 /* All the dpa bps in use at any moment */
205 static struct dpaa_bp *dpaa_bp_array[BM_MAX_NUM_OF_POOLS];
206
207 #define DPAA_BP_RAW_SIZE 4096
208
209 #ifdef CONFIG_DPAA_ERRATUM_A050385
210 #define dpaa_bp_size(raw_size) (SKB_WITH_OVERHEAD(raw_size) & \
211 ~(DPAA_A050385_ALIGN - 1))
212 #else
213 #define dpaa_bp_size(raw_size) SKB_WITH_OVERHEAD(raw_size)
214 #endif
215
216 static int dpaa_max_frm;
217
218 static int dpaa_rx_extra_headroom;
219
220 #define dpaa_get_max_mtu() \
221 (dpaa_max_frm - (VLAN_ETH_HLEN + ETH_FCS_LEN))
222
dpaa_netdev_init(struct net_device * net_dev,const struct net_device_ops * dpaa_ops,u16 tx_timeout)223 static int dpaa_netdev_init(struct net_device *net_dev,
224 const struct net_device_ops *dpaa_ops,
225 u16 tx_timeout)
226 {
227 struct dpaa_priv *priv = netdev_priv(net_dev);
228 struct device *dev = net_dev->dev.parent;
229 struct dpaa_percpu_priv *percpu_priv;
230 const u8 *mac_addr;
231 int i, err;
232
233 /* Although we access another CPU's private data here
234 * we do it at initialization so it is safe
235 */
236 for_each_possible_cpu(i) {
237 percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
238 percpu_priv->net_dev = net_dev;
239 }
240
241 net_dev->netdev_ops = dpaa_ops;
242 mac_addr = priv->mac_dev->addr;
243
244 net_dev->mem_start = priv->mac_dev->res->start;
245 net_dev->mem_end = priv->mac_dev->res->end;
246
247 net_dev->min_mtu = ETH_MIN_MTU;
248 net_dev->max_mtu = dpaa_get_max_mtu();
249
250 net_dev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
251 NETIF_F_LLTX | NETIF_F_RXHASH);
252
253 net_dev->hw_features |= NETIF_F_SG | NETIF_F_HIGHDMA;
254 /* The kernels enables GSO automatically, if we declare NETIF_F_SG.
255 * For conformity, we'll still declare GSO explicitly.
256 */
257 net_dev->features |= NETIF_F_GSO;
258 net_dev->features |= NETIF_F_RXCSUM;
259
260 net_dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
261 /* we do not want shared skbs on TX */
262 net_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
263
264 net_dev->features |= net_dev->hw_features;
265 net_dev->vlan_features = net_dev->features;
266
267 if (is_valid_ether_addr(mac_addr)) {
268 memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len);
269 memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
270 } else {
271 eth_hw_addr_random(net_dev);
272 err = priv->mac_dev->change_addr(priv->mac_dev->fman_mac,
273 (enet_addr_t *)net_dev->dev_addr);
274 if (err) {
275 dev_err(dev, "Failed to set random MAC address\n");
276 return -EINVAL;
277 }
278 dev_info(dev, "Using random MAC address: %pM\n",
279 net_dev->dev_addr);
280 }
281
282 net_dev->ethtool_ops = &dpaa_ethtool_ops;
283
284 net_dev->needed_headroom = priv->tx_headroom;
285 net_dev->watchdog_timeo = msecs_to_jiffies(tx_timeout);
286
287 /* start without the RUNNING flag, phylib controls it later */
288 netif_carrier_off(net_dev);
289
290 err = register_netdev(net_dev);
291 if (err < 0) {
292 dev_err(dev, "register_netdev() = %d\n", err);
293 return err;
294 }
295
296 return 0;
297 }
298
dpaa_stop(struct net_device * net_dev)299 static int dpaa_stop(struct net_device *net_dev)
300 {
301 struct mac_device *mac_dev;
302 struct dpaa_priv *priv;
303 int i, err, error;
304
305 priv = netdev_priv(net_dev);
306 mac_dev = priv->mac_dev;
307
308 netif_tx_stop_all_queues(net_dev);
309 /* Allow the Fman (Tx) port to process in-flight frames before we
310 * try switching it off.
311 */
312 msleep(200);
313
314 err = mac_dev->stop(mac_dev);
315 if (err < 0)
316 netif_err(priv, ifdown, net_dev, "mac_dev->stop() = %d\n",
317 err);
318
319 for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
320 error = fman_port_disable(mac_dev->port[i]);
321 if (error)
322 err = error;
323 }
324
325 if (net_dev->phydev)
326 phy_disconnect(net_dev->phydev);
327 net_dev->phydev = NULL;
328
329 msleep(200);
330
331 return err;
332 }
333
dpaa_tx_timeout(struct net_device * net_dev,unsigned int txqueue)334 static void dpaa_tx_timeout(struct net_device *net_dev, unsigned int txqueue)
335 {
336 struct dpaa_percpu_priv *percpu_priv;
337 const struct dpaa_priv *priv;
338
339 priv = netdev_priv(net_dev);
340 percpu_priv = this_cpu_ptr(priv->percpu_priv);
341
342 netif_crit(priv, timer, net_dev, "Transmit timeout latency: %u ms\n",
343 jiffies_to_msecs(jiffies - dev_trans_start(net_dev)));
344
345 percpu_priv->stats.tx_errors++;
346 }
347
348 /* Calculates the statistics for the given device by adding the statistics
349 * collected by each CPU.
350 */
dpaa_get_stats64(struct net_device * net_dev,struct rtnl_link_stats64 * s)351 static void dpaa_get_stats64(struct net_device *net_dev,
352 struct rtnl_link_stats64 *s)
353 {
354 int numstats = sizeof(struct rtnl_link_stats64) / sizeof(u64);
355 struct dpaa_priv *priv = netdev_priv(net_dev);
356 struct dpaa_percpu_priv *percpu_priv;
357 u64 *netstats = (u64 *)s;
358 u64 *cpustats;
359 int i, j;
360
361 for_each_possible_cpu(i) {
362 percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
363
364 cpustats = (u64 *)&percpu_priv->stats;
365
366 /* add stats from all CPUs */
367 for (j = 0; j < numstats; j++)
368 netstats[j] += cpustats[j];
369 }
370 }
371
dpaa_setup_tc(struct net_device * net_dev,enum tc_setup_type type,void * type_data)372 static int dpaa_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
373 void *type_data)
374 {
375 struct dpaa_priv *priv = netdev_priv(net_dev);
376 struct tc_mqprio_qopt *mqprio = type_data;
377 u8 num_tc;
378 int i;
379
380 if (type != TC_SETUP_QDISC_MQPRIO)
381 return -EOPNOTSUPP;
382
383 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
384 num_tc = mqprio->num_tc;
385
386 if (num_tc == priv->num_tc)
387 return 0;
388
389 if (!num_tc) {
390 netdev_reset_tc(net_dev);
391 goto out;
392 }
393
394 if (num_tc > DPAA_TC_NUM) {
395 netdev_err(net_dev, "Too many traffic classes: max %d supported.\n",
396 DPAA_TC_NUM);
397 return -EINVAL;
398 }
399
400 netdev_set_num_tc(net_dev, num_tc);
401
402 for (i = 0; i < num_tc; i++)
403 netdev_set_tc_queue(net_dev, i, DPAA_TC_TXQ_NUM,
404 i * DPAA_TC_TXQ_NUM);
405
406 out:
407 priv->num_tc = num_tc ? : 1;
408 netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM);
409 return 0;
410 }
411
dpaa_mac_dev_get(struct platform_device * pdev)412 static struct mac_device *dpaa_mac_dev_get(struct platform_device *pdev)
413 {
414 struct dpaa_eth_data *eth_data;
415 struct device *dpaa_dev;
416 struct mac_device *mac_dev;
417
418 dpaa_dev = &pdev->dev;
419 eth_data = dpaa_dev->platform_data;
420 if (!eth_data) {
421 dev_err(dpaa_dev, "eth_data missing\n");
422 return ERR_PTR(-ENODEV);
423 }
424 mac_dev = eth_data->mac_dev;
425 if (!mac_dev) {
426 dev_err(dpaa_dev, "mac_dev missing\n");
427 return ERR_PTR(-EINVAL);
428 }
429
430 return mac_dev;
431 }
432
dpaa_set_mac_address(struct net_device * net_dev,void * addr)433 static int dpaa_set_mac_address(struct net_device *net_dev, void *addr)
434 {
435 const struct dpaa_priv *priv;
436 struct mac_device *mac_dev;
437 struct sockaddr old_addr;
438 int err;
439
440 priv = netdev_priv(net_dev);
441
442 memcpy(old_addr.sa_data, net_dev->dev_addr, ETH_ALEN);
443
444 err = eth_mac_addr(net_dev, addr);
445 if (err < 0) {
446 netif_err(priv, drv, net_dev, "eth_mac_addr() = %d\n", err);
447 return err;
448 }
449
450 mac_dev = priv->mac_dev;
451
452 err = mac_dev->change_addr(mac_dev->fman_mac,
453 (enet_addr_t *)net_dev->dev_addr);
454 if (err < 0) {
455 netif_err(priv, drv, net_dev, "mac_dev->change_addr() = %d\n",
456 err);
457 /* reverting to previous address */
458 eth_mac_addr(net_dev, &old_addr);
459
460 return err;
461 }
462
463 return 0;
464 }
465
dpaa_set_rx_mode(struct net_device * net_dev)466 static void dpaa_set_rx_mode(struct net_device *net_dev)
467 {
468 const struct dpaa_priv *priv;
469 int err;
470
471 priv = netdev_priv(net_dev);
472
473 if (!!(net_dev->flags & IFF_PROMISC) != priv->mac_dev->promisc) {
474 priv->mac_dev->promisc = !priv->mac_dev->promisc;
475 err = priv->mac_dev->set_promisc(priv->mac_dev->fman_mac,
476 priv->mac_dev->promisc);
477 if (err < 0)
478 netif_err(priv, drv, net_dev,
479 "mac_dev->set_promisc() = %d\n",
480 err);
481 }
482
483 if (!!(net_dev->flags & IFF_ALLMULTI) != priv->mac_dev->allmulti) {
484 priv->mac_dev->allmulti = !priv->mac_dev->allmulti;
485 err = priv->mac_dev->set_allmulti(priv->mac_dev->fman_mac,
486 priv->mac_dev->allmulti);
487 if (err < 0)
488 netif_err(priv, drv, net_dev,
489 "mac_dev->set_allmulti() = %d\n",
490 err);
491 }
492
493 err = priv->mac_dev->set_multi(net_dev, priv->mac_dev);
494 if (err < 0)
495 netif_err(priv, drv, net_dev, "mac_dev->set_multi() = %d\n",
496 err);
497 }
498
dpaa_bpid2pool(int bpid)499 static struct dpaa_bp *dpaa_bpid2pool(int bpid)
500 {
501 if (WARN_ON(bpid < 0 || bpid >= BM_MAX_NUM_OF_POOLS))
502 return NULL;
503
504 return dpaa_bp_array[bpid];
505 }
506
507 /* checks if this bpool is already allocated */
dpaa_bpid2pool_use(int bpid)508 static bool dpaa_bpid2pool_use(int bpid)
509 {
510 if (dpaa_bpid2pool(bpid)) {
511 refcount_inc(&dpaa_bp_array[bpid]->refs);
512 return true;
513 }
514
515 return false;
516 }
517
518 /* called only once per bpid by dpaa_bp_alloc_pool() */
dpaa_bpid2pool_map(int bpid,struct dpaa_bp * dpaa_bp)519 static void dpaa_bpid2pool_map(int bpid, struct dpaa_bp *dpaa_bp)
520 {
521 dpaa_bp_array[bpid] = dpaa_bp;
522 refcount_set(&dpaa_bp->refs, 1);
523 }
524
dpaa_bp_alloc_pool(struct dpaa_bp * dpaa_bp)525 static int dpaa_bp_alloc_pool(struct dpaa_bp *dpaa_bp)
526 {
527 int err;
528
529 if (dpaa_bp->size == 0 || dpaa_bp->config_count == 0) {
530 pr_err("%s: Buffer pool is not properly initialized! Missing size or initial number of buffers\n",
531 __func__);
532 return -EINVAL;
533 }
534
535 /* If the pool is already specified, we only create one per bpid */
536 if (dpaa_bp->bpid != FSL_DPAA_BPID_INV &&
537 dpaa_bpid2pool_use(dpaa_bp->bpid))
538 return 0;
539
540 if (dpaa_bp->bpid == FSL_DPAA_BPID_INV) {
541 dpaa_bp->pool = bman_new_pool();
542 if (!dpaa_bp->pool) {
543 pr_err("%s: bman_new_pool() failed\n",
544 __func__);
545 return -ENODEV;
546 }
547
548 dpaa_bp->bpid = (u8)bman_get_bpid(dpaa_bp->pool);
549 }
550
551 if (dpaa_bp->seed_cb) {
552 err = dpaa_bp->seed_cb(dpaa_bp);
553 if (err)
554 goto pool_seed_failed;
555 }
556
557 dpaa_bpid2pool_map(dpaa_bp->bpid, dpaa_bp);
558
559 return 0;
560
561 pool_seed_failed:
562 pr_err("%s: pool seeding failed\n", __func__);
563 bman_free_pool(dpaa_bp->pool);
564
565 return err;
566 }
567
568 /* remove and free all the buffers from the given buffer pool */
dpaa_bp_drain(struct dpaa_bp * bp)569 static void dpaa_bp_drain(struct dpaa_bp *bp)
570 {
571 u8 num = 8;
572 int ret;
573
574 do {
575 struct bm_buffer bmb[8];
576 int i;
577
578 ret = bman_acquire(bp->pool, bmb, num);
579 if (ret < 0) {
580 if (num == 8) {
581 /* we have less than 8 buffers left;
582 * drain them one by one
583 */
584 num = 1;
585 ret = 1;
586 continue;
587 } else {
588 /* Pool is fully drained */
589 break;
590 }
591 }
592
593 if (bp->free_buf_cb)
594 for (i = 0; i < num; i++)
595 bp->free_buf_cb(bp, &bmb[i]);
596 } while (ret > 0);
597 }
598
dpaa_bp_free(struct dpaa_bp * dpaa_bp)599 static void dpaa_bp_free(struct dpaa_bp *dpaa_bp)
600 {
601 struct dpaa_bp *bp = dpaa_bpid2pool(dpaa_bp->bpid);
602
603 /* the mapping between bpid and dpaa_bp is done very late in the
604 * allocation procedure; if something failed before the mapping, the bp
605 * was not configured, therefore we don't need the below instructions
606 */
607 if (!bp)
608 return;
609
610 if (!refcount_dec_and_test(&bp->refs))
611 return;
612
613 if (bp->free_buf_cb)
614 dpaa_bp_drain(bp);
615
616 dpaa_bp_array[bp->bpid] = NULL;
617 bman_free_pool(bp->pool);
618 }
619
dpaa_bps_free(struct dpaa_priv * priv)620 static void dpaa_bps_free(struct dpaa_priv *priv)
621 {
622 dpaa_bp_free(priv->dpaa_bp);
623 }
624
625 /* Use multiple WQs for FQ assignment:
626 * - Tx Confirmation queues go to WQ1.
627 * - Rx Error and Tx Error queues go to WQ5 (giving them a better chance
628 * to be scheduled, in case there are many more FQs in WQ6).
629 * - Rx Default goes to WQ6.
630 * - Tx queues go to different WQs depending on their priority. Equal
631 * chunks of NR_CPUS queues go to WQ6 (lowest priority), WQ2, WQ1 and
632 * WQ0 (highest priority).
633 * This ensures that Tx-confirmed buffers are timely released. In particular,
634 * it avoids congestion on the Tx Confirm FQs, which can pile up PFDRs if they
635 * are greatly outnumbered by other FQs in the system, while
636 * dequeue scheduling is round-robin.
637 */
dpaa_assign_wq(struct dpaa_fq * fq,int idx)638 static inline void dpaa_assign_wq(struct dpaa_fq *fq, int idx)
639 {
640 switch (fq->fq_type) {
641 case FQ_TYPE_TX_CONFIRM:
642 case FQ_TYPE_TX_CONF_MQ:
643 fq->wq = 1;
644 break;
645 case FQ_TYPE_RX_ERROR:
646 case FQ_TYPE_TX_ERROR:
647 fq->wq = 5;
648 break;
649 case FQ_TYPE_RX_DEFAULT:
650 case FQ_TYPE_RX_PCD:
651 fq->wq = 6;
652 break;
653 case FQ_TYPE_TX:
654 switch (idx / DPAA_TC_TXQ_NUM) {
655 case 0:
656 /* Low priority (best effort) */
657 fq->wq = 6;
658 break;
659 case 1:
660 /* Medium priority */
661 fq->wq = 2;
662 break;
663 case 2:
664 /* High priority */
665 fq->wq = 1;
666 break;
667 case 3:
668 /* Very high priority */
669 fq->wq = 0;
670 break;
671 default:
672 WARN(1, "Too many TX FQs: more than %d!\n",
673 DPAA_ETH_TXQ_NUM);
674 }
675 break;
676 default:
677 WARN(1, "Invalid FQ type %d for FQID %d!\n",
678 fq->fq_type, fq->fqid);
679 }
680 }
681
dpaa_fq_alloc(struct device * dev,u32 start,u32 count,struct list_head * list,enum dpaa_fq_type fq_type)682 static struct dpaa_fq *dpaa_fq_alloc(struct device *dev,
683 u32 start, u32 count,
684 struct list_head *list,
685 enum dpaa_fq_type fq_type)
686 {
687 struct dpaa_fq *dpaa_fq;
688 int i;
689
690 dpaa_fq = devm_kcalloc(dev, count, sizeof(*dpaa_fq),
691 GFP_KERNEL);
692 if (!dpaa_fq)
693 return NULL;
694
695 for (i = 0; i < count; i++) {
696 dpaa_fq[i].fq_type = fq_type;
697 dpaa_fq[i].fqid = start ? start + i : 0;
698 list_add_tail(&dpaa_fq[i].list, list);
699 }
700
701 for (i = 0; i < count; i++)
702 dpaa_assign_wq(dpaa_fq + i, i);
703
704 return dpaa_fq;
705 }
706
dpaa_alloc_all_fqs(struct device * dev,struct list_head * list,struct fm_port_fqs * port_fqs)707 static int dpaa_alloc_all_fqs(struct device *dev, struct list_head *list,
708 struct fm_port_fqs *port_fqs)
709 {
710 struct dpaa_fq *dpaa_fq;
711 u32 fq_base, fq_base_aligned, i;
712
713 dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_RX_ERROR);
714 if (!dpaa_fq)
715 goto fq_alloc_failed;
716
717 port_fqs->rx_errq = &dpaa_fq[0];
718
719 dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_RX_DEFAULT);
720 if (!dpaa_fq)
721 goto fq_alloc_failed;
722
723 port_fqs->rx_defq = &dpaa_fq[0];
724
725 /* the PCD FQIDs range needs to be aligned for correct operation */
726 if (qman_alloc_fqid_range(&fq_base, 2 * DPAA_ETH_PCD_RXQ_NUM))
727 goto fq_alloc_failed;
728
729 fq_base_aligned = ALIGN(fq_base, DPAA_ETH_PCD_RXQ_NUM);
730
731 for (i = fq_base; i < fq_base_aligned; i++)
732 qman_release_fqid(i);
733
734 for (i = fq_base_aligned + DPAA_ETH_PCD_RXQ_NUM;
735 i < (fq_base + 2 * DPAA_ETH_PCD_RXQ_NUM); i++)
736 qman_release_fqid(i);
737
738 dpaa_fq = dpaa_fq_alloc(dev, fq_base_aligned, DPAA_ETH_PCD_RXQ_NUM,
739 list, FQ_TYPE_RX_PCD);
740 if (!dpaa_fq)
741 goto fq_alloc_failed;
742
743 port_fqs->rx_pcdq = &dpaa_fq[0];
744
745 if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX_CONF_MQ))
746 goto fq_alloc_failed;
747
748 dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_TX_ERROR);
749 if (!dpaa_fq)
750 goto fq_alloc_failed;
751
752 port_fqs->tx_errq = &dpaa_fq[0];
753
754 dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_TX_CONFIRM);
755 if (!dpaa_fq)
756 goto fq_alloc_failed;
757
758 port_fqs->tx_defq = &dpaa_fq[0];
759
760 if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX))
761 goto fq_alloc_failed;
762
763 return 0;
764
765 fq_alloc_failed:
766 dev_err(dev, "dpaa_fq_alloc() failed\n");
767 return -ENOMEM;
768 }
769
770 static u32 rx_pool_channel;
771 static DEFINE_SPINLOCK(rx_pool_channel_init);
772
dpaa_get_channel(void)773 static int dpaa_get_channel(void)
774 {
775 spin_lock(&rx_pool_channel_init);
776 if (!rx_pool_channel) {
777 u32 pool;
778 int ret;
779
780 ret = qman_alloc_pool(&pool);
781
782 if (!ret)
783 rx_pool_channel = pool;
784 }
785 spin_unlock(&rx_pool_channel_init);
786 if (!rx_pool_channel)
787 return -ENOMEM;
788 return rx_pool_channel;
789 }
790
dpaa_release_channel(void)791 static void dpaa_release_channel(void)
792 {
793 qman_release_pool(rx_pool_channel);
794 }
795
dpaa_eth_add_channel(u16 channel,struct device * dev)796 static void dpaa_eth_add_channel(u16 channel, struct device *dev)
797 {
798 u32 pool = QM_SDQCR_CHANNELS_POOL_CONV(channel);
799 const cpumask_t *cpus = qman_affine_cpus();
800 struct qman_portal *portal;
801 int cpu;
802
803 for_each_cpu_and(cpu, cpus, cpu_online_mask) {
804 portal = qman_get_affine_portal(cpu);
805 qman_p_static_dequeue_add(portal, pool);
806 qman_start_using_portal(portal, dev);
807 }
808 }
809
810 /* Congestion group state change notification callback.
811 * Stops the device's egress queues while they are congested and
812 * wakes them upon exiting congested state.
813 * Also updates some CGR-related stats.
814 */
dpaa_eth_cgscn(struct qman_portal * qm,struct qman_cgr * cgr,int congested)815 static void dpaa_eth_cgscn(struct qman_portal *qm, struct qman_cgr *cgr,
816 int congested)
817 {
818 struct dpaa_priv *priv = (struct dpaa_priv *)container_of(cgr,
819 struct dpaa_priv, cgr_data.cgr);
820
821 if (congested) {
822 priv->cgr_data.congestion_start_jiffies = jiffies;
823 netif_tx_stop_all_queues(priv->net_dev);
824 priv->cgr_data.cgr_congested_count++;
825 } else {
826 priv->cgr_data.congested_jiffies +=
827 (jiffies - priv->cgr_data.congestion_start_jiffies);
828 netif_tx_wake_all_queues(priv->net_dev);
829 }
830 }
831
dpaa_eth_cgr_init(struct dpaa_priv * priv)832 static int dpaa_eth_cgr_init(struct dpaa_priv *priv)
833 {
834 struct qm_mcc_initcgr initcgr;
835 u32 cs_th;
836 int err;
837
838 err = qman_alloc_cgrid(&priv->cgr_data.cgr.cgrid);
839 if (err < 0) {
840 if (netif_msg_drv(priv))
841 pr_err("%s: Error %d allocating CGR ID\n",
842 __func__, err);
843 goto out_error;
844 }
845 priv->cgr_data.cgr.cb = dpaa_eth_cgscn;
846
847 /* Enable Congestion State Change Notifications and CS taildrop */
848 memset(&initcgr, 0, sizeof(initcgr));
849 initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES);
850 initcgr.cgr.cscn_en = QM_CGR_EN;
851
852 /* Set different thresholds based on the MAC speed.
853 * This may turn suboptimal if the MAC is reconfigured at a speed
854 * lower than its max, e.g. if a dTSEC later negotiates a 100Mbps link.
855 * In such cases, we ought to reconfigure the threshold, too.
856 */
857 if (priv->mac_dev->if_support & SUPPORTED_10000baseT_Full)
858 cs_th = DPAA_CS_THRESHOLD_10G;
859 else
860 cs_th = DPAA_CS_THRESHOLD_1G;
861 qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1);
862
863 initcgr.we_mask |= cpu_to_be16(QM_CGR_WE_CSTD_EN);
864 initcgr.cgr.cstd_en = QM_CGR_EN;
865
866 err = qman_create_cgr(&priv->cgr_data.cgr, QMAN_CGR_FLAG_USE_INIT,
867 &initcgr);
868 if (err < 0) {
869 if (netif_msg_drv(priv))
870 pr_err("%s: Error %d creating CGR with ID %d\n",
871 __func__, err, priv->cgr_data.cgr.cgrid);
872 qman_release_cgrid(priv->cgr_data.cgr.cgrid);
873 goto out_error;
874 }
875 if (netif_msg_drv(priv))
876 pr_debug("Created CGR %d for netdev with hwaddr %pM on QMan channel %d\n",
877 priv->cgr_data.cgr.cgrid, priv->mac_dev->addr,
878 priv->cgr_data.cgr.chan);
879
880 out_error:
881 return err;
882 }
883
dpaa_setup_ingress(const struct dpaa_priv * priv,struct dpaa_fq * fq,const struct qman_fq * template)884 static inline void dpaa_setup_ingress(const struct dpaa_priv *priv,
885 struct dpaa_fq *fq,
886 const struct qman_fq *template)
887 {
888 fq->fq_base = *template;
889 fq->net_dev = priv->net_dev;
890
891 fq->flags = QMAN_FQ_FLAG_NO_ENQUEUE;
892 fq->channel = priv->channel;
893 }
894
dpaa_setup_egress(const struct dpaa_priv * priv,struct dpaa_fq * fq,struct fman_port * port,const struct qman_fq * template)895 static inline void dpaa_setup_egress(const struct dpaa_priv *priv,
896 struct dpaa_fq *fq,
897 struct fman_port *port,
898 const struct qman_fq *template)
899 {
900 fq->fq_base = *template;
901 fq->net_dev = priv->net_dev;
902
903 if (port) {
904 fq->flags = QMAN_FQ_FLAG_TO_DCPORTAL;
905 fq->channel = (u16)fman_port_get_qman_channel_id(port);
906 } else {
907 fq->flags = QMAN_FQ_FLAG_NO_MODIFY;
908 }
909 }
910
dpaa_fq_setup(struct dpaa_priv * priv,const struct dpaa_fq_cbs * fq_cbs,struct fman_port * tx_port)911 static void dpaa_fq_setup(struct dpaa_priv *priv,
912 const struct dpaa_fq_cbs *fq_cbs,
913 struct fman_port *tx_port)
914 {
915 int egress_cnt = 0, conf_cnt = 0, num_portals = 0, portal_cnt = 0, cpu;
916 const cpumask_t *affine_cpus = qman_affine_cpus();
917 u16 channels[NR_CPUS];
918 struct dpaa_fq *fq;
919
920 for_each_cpu_and(cpu, affine_cpus, cpu_online_mask)
921 channels[num_portals++] = qman_affine_channel(cpu);
922
923 if (num_portals == 0)
924 dev_err(priv->net_dev->dev.parent,
925 "No Qman software (affine) channels found\n");
926
927 /* Initialize each FQ in the list */
928 list_for_each_entry(fq, &priv->dpaa_fq_list, list) {
929 switch (fq->fq_type) {
930 case FQ_TYPE_RX_DEFAULT:
931 dpaa_setup_ingress(priv, fq, &fq_cbs->rx_defq);
932 break;
933 case FQ_TYPE_RX_ERROR:
934 dpaa_setup_ingress(priv, fq, &fq_cbs->rx_errq);
935 break;
936 case FQ_TYPE_RX_PCD:
937 if (!num_portals)
938 continue;
939 dpaa_setup_ingress(priv, fq, &fq_cbs->rx_defq);
940 fq->channel = channels[portal_cnt++ % num_portals];
941 break;
942 case FQ_TYPE_TX:
943 dpaa_setup_egress(priv, fq, tx_port,
944 &fq_cbs->egress_ern);
945 /* If we have more Tx queues than the number of cores,
946 * just ignore the extra ones.
947 */
948 if (egress_cnt < DPAA_ETH_TXQ_NUM)
949 priv->egress_fqs[egress_cnt++] = &fq->fq_base;
950 break;
951 case FQ_TYPE_TX_CONF_MQ:
952 priv->conf_fqs[conf_cnt++] = &fq->fq_base;
953 fallthrough;
954 case FQ_TYPE_TX_CONFIRM:
955 dpaa_setup_ingress(priv, fq, &fq_cbs->tx_defq);
956 break;
957 case FQ_TYPE_TX_ERROR:
958 dpaa_setup_ingress(priv, fq, &fq_cbs->tx_errq);
959 break;
960 default:
961 dev_warn(priv->net_dev->dev.parent,
962 "Unknown FQ type detected!\n");
963 break;
964 }
965 }
966
967 /* Make sure all CPUs receive a corresponding Tx queue. */
968 while (egress_cnt < DPAA_ETH_TXQ_NUM) {
969 list_for_each_entry(fq, &priv->dpaa_fq_list, list) {
970 if (fq->fq_type != FQ_TYPE_TX)
971 continue;
972 priv->egress_fqs[egress_cnt++] = &fq->fq_base;
973 if (egress_cnt == DPAA_ETH_TXQ_NUM)
974 break;
975 }
976 }
977 }
978
dpaa_tx_fq_to_id(const struct dpaa_priv * priv,struct qman_fq * tx_fq)979 static inline int dpaa_tx_fq_to_id(const struct dpaa_priv *priv,
980 struct qman_fq *tx_fq)
981 {
982 int i;
983
984 for (i = 0; i < DPAA_ETH_TXQ_NUM; i++)
985 if (priv->egress_fqs[i] == tx_fq)
986 return i;
987
988 return -EINVAL;
989 }
990
dpaa_fq_init(struct dpaa_fq * dpaa_fq,bool td_enable)991 static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable)
992 {
993 const struct dpaa_priv *priv;
994 struct qman_fq *confq = NULL;
995 struct qm_mcc_initfq initfq;
996 struct device *dev;
997 struct qman_fq *fq;
998 int queue_id;
999 int err;
1000
1001 priv = netdev_priv(dpaa_fq->net_dev);
1002 dev = dpaa_fq->net_dev->dev.parent;
1003
1004 if (dpaa_fq->fqid == 0)
1005 dpaa_fq->flags |= QMAN_FQ_FLAG_DYNAMIC_FQID;
1006
1007 dpaa_fq->init = !(dpaa_fq->flags & QMAN_FQ_FLAG_NO_MODIFY);
1008
1009 err = qman_create_fq(dpaa_fq->fqid, dpaa_fq->flags, &dpaa_fq->fq_base);
1010 if (err) {
1011 dev_err(dev, "qman_create_fq() failed\n");
1012 return err;
1013 }
1014 fq = &dpaa_fq->fq_base;
1015
1016 if (dpaa_fq->init) {
1017 memset(&initfq, 0, sizeof(initfq));
1018
1019 initfq.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL);
1020 /* Note: we may get to keep an empty FQ in cache */
1021 initfq.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_PREFERINCACHE);
1022
1023 /* Try to reduce the number of portal interrupts for
1024 * Tx Confirmation FQs.
1025 */
1026 if (dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM)
1027 initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_AVOIDBLOCK);
1028
1029 /* FQ placement */
1030 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_DESTWQ);
1031
1032 qm_fqd_set_destwq(&initfq.fqd, dpaa_fq->channel, dpaa_fq->wq);
1033
1034 /* Put all egress queues in a congestion group of their own.
1035 * Sensu stricto, the Tx confirmation queues are Rx FQs,
1036 * rather than Tx - but they nonetheless account for the
1037 * memory footprint on behalf of egress traffic. We therefore
1038 * place them in the netdev's CGR, along with the Tx FQs.
1039 */
1040 if (dpaa_fq->fq_type == FQ_TYPE_TX ||
1041 dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM ||
1042 dpaa_fq->fq_type == FQ_TYPE_TX_CONF_MQ) {
1043 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CGID);
1044 initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_CGE);
1045 initfq.fqd.cgid = (u8)priv->cgr_data.cgr.cgrid;
1046 /* Set a fixed overhead accounting, in an attempt to
1047 * reduce the impact of fixed-size skb shells and the
1048 * driver's needed headroom on system memory. This is
1049 * especially the case when the egress traffic is
1050 * composed of small datagrams.
1051 * Unfortunately, QMan's OAL value is capped to an
1052 * insufficient value, but even that is better than
1053 * no overhead accounting at all.
1054 */
1055 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_OAC);
1056 qm_fqd_set_oac(&initfq.fqd, QM_OAC_CG);
1057 qm_fqd_set_oal(&initfq.fqd,
1058 min(sizeof(struct sk_buff) +
1059 priv->tx_headroom,
1060 (size_t)FSL_QMAN_MAX_OAL));
1061 }
1062
1063 if (td_enable) {
1064 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_TDTHRESH);
1065 qm_fqd_set_taildrop(&initfq.fqd, DPAA_FQ_TD, 1);
1066 initfq.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_TDE);
1067 }
1068
1069 if (dpaa_fq->fq_type == FQ_TYPE_TX) {
1070 queue_id = dpaa_tx_fq_to_id(priv, &dpaa_fq->fq_base);
1071 if (queue_id >= 0)
1072 confq = priv->conf_fqs[queue_id];
1073 if (confq) {
1074 initfq.we_mask |=
1075 cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
1076 /* ContextA: OVOM=1(use contextA2 bits instead of ICAD)
1077 * A2V=1 (contextA A2 field is valid)
1078 * A0V=1 (contextA A0 field is valid)
1079 * B0V=1 (contextB field is valid)
1080 * ContextA A2: EBD=1 (deallocate buffers inside FMan)
1081 * ContextB B0(ASPID): 0 (absolute Virtual Storage ID)
1082 */
1083 qm_fqd_context_a_set64(&initfq.fqd,
1084 0x1e00000080000000ULL);
1085 }
1086 }
1087
1088 /* Put all the ingress queues in our "ingress CGR". */
1089 if (priv->use_ingress_cgr &&
1090 (dpaa_fq->fq_type == FQ_TYPE_RX_DEFAULT ||
1091 dpaa_fq->fq_type == FQ_TYPE_RX_ERROR ||
1092 dpaa_fq->fq_type == FQ_TYPE_RX_PCD)) {
1093 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CGID);
1094 initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_CGE);
1095 initfq.fqd.cgid = (u8)priv->ingress_cgr.cgrid;
1096 /* Set a fixed overhead accounting, just like for the
1097 * egress CGR.
1098 */
1099 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_OAC);
1100 qm_fqd_set_oac(&initfq.fqd, QM_OAC_CG);
1101 qm_fqd_set_oal(&initfq.fqd,
1102 min(sizeof(struct sk_buff) +
1103 priv->tx_headroom,
1104 (size_t)FSL_QMAN_MAX_OAL));
1105 }
1106
1107 /* Initialization common to all ingress queues */
1108 if (dpaa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) {
1109 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
1110 initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_HOLDACTIVE |
1111 QM_FQCTRL_CTXASTASHING);
1112 initfq.fqd.context_a.stashing.exclusive =
1113 QM_STASHING_EXCL_DATA | QM_STASHING_EXCL_CTX |
1114 QM_STASHING_EXCL_ANNOTATION;
1115 qm_fqd_set_stashing(&initfq.fqd, 1, 2,
1116 DIV_ROUND_UP(sizeof(struct qman_fq),
1117 64));
1118 }
1119
1120 err = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &initfq);
1121 if (err < 0) {
1122 dev_err(dev, "qman_init_fq(%u) = %d\n",
1123 qman_fq_fqid(fq), err);
1124 qman_destroy_fq(fq);
1125 return err;
1126 }
1127 }
1128
1129 dpaa_fq->fqid = qman_fq_fqid(fq);
1130
1131 return 0;
1132 }
1133
dpaa_fq_free_entry(struct device * dev,struct qman_fq * fq)1134 static int dpaa_fq_free_entry(struct device *dev, struct qman_fq *fq)
1135 {
1136 const struct dpaa_priv *priv;
1137 struct dpaa_fq *dpaa_fq;
1138 int err, error;
1139
1140 err = 0;
1141
1142 dpaa_fq = container_of(fq, struct dpaa_fq, fq_base);
1143 priv = netdev_priv(dpaa_fq->net_dev);
1144
1145 if (dpaa_fq->init) {
1146 err = qman_retire_fq(fq, NULL);
1147 if (err < 0 && netif_msg_drv(priv))
1148 dev_err(dev, "qman_retire_fq(%u) = %d\n",
1149 qman_fq_fqid(fq), err);
1150
1151 error = qman_oos_fq(fq);
1152 if (error < 0 && netif_msg_drv(priv)) {
1153 dev_err(dev, "qman_oos_fq(%u) = %d\n",
1154 qman_fq_fqid(fq), error);
1155 if (err >= 0)
1156 err = error;
1157 }
1158 }
1159
1160 qman_destroy_fq(fq);
1161 list_del(&dpaa_fq->list);
1162
1163 return err;
1164 }
1165
dpaa_fq_free(struct device * dev,struct list_head * list)1166 static int dpaa_fq_free(struct device *dev, struct list_head *list)
1167 {
1168 struct dpaa_fq *dpaa_fq, *tmp;
1169 int err, error;
1170
1171 err = 0;
1172 list_for_each_entry_safe(dpaa_fq, tmp, list, list) {
1173 error = dpaa_fq_free_entry(dev, (struct qman_fq *)dpaa_fq);
1174 if (error < 0 && err >= 0)
1175 err = error;
1176 }
1177
1178 return err;
1179 }
1180
dpaa_eth_init_tx_port(struct fman_port * port,struct dpaa_fq * errq,struct dpaa_fq * defq,struct dpaa_buffer_layout * buf_layout)1181 static int dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq,
1182 struct dpaa_fq *defq,
1183 struct dpaa_buffer_layout *buf_layout)
1184 {
1185 struct fman_buffer_prefix_content buf_prefix_content;
1186 struct fman_port_params params;
1187 int err;
1188
1189 memset(¶ms, 0, sizeof(params));
1190 memset(&buf_prefix_content, 0, sizeof(buf_prefix_content));
1191
1192 buf_prefix_content.priv_data_size = buf_layout->priv_data_size;
1193 buf_prefix_content.pass_prs_result = true;
1194 buf_prefix_content.pass_hash_result = true;
1195 buf_prefix_content.pass_time_stamp = true;
1196 buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT;
1197
1198 params.specific_params.non_rx_params.err_fqid = errq->fqid;
1199 params.specific_params.non_rx_params.dflt_fqid = defq->fqid;
1200
1201 err = fman_port_config(port, ¶ms);
1202 if (err) {
1203 pr_err("%s: fman_port_config failed\n", __func__);
1204 return err;
1205 }
1206
1207 err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content);
1208 if (err) {
1209 pr_err("%s: fman_port_cfg_buf_prefix_content failed\n",
1210 __func__);
1211 return err;
1212 }
1213
1214 err = fman_port_init(port);
1215 if (err)
1216 pr_err("%s: fm_port_init failed\n", __func__);
1217
1218 return err;
1219 }
1220
dpaa_eth_init_rx_port(struct fman_port * port,struct dpaa_bp * bp,struct dpaa_fq * errq,struct dpaa_fq * defq,struct dpaa_fq * pcdq,struct dpaa_buffer_layout * buf_layout)1221 static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp *bp,
1222 struct dpaa_fq *errq,
1223 struct dpaa_fq *defq, struct dpaa_fq *pcdq,
1224 struct dpaa_buffer_layout *buf_layout)
1225 {
1226 struct fman_buffer_prefix_content buf_prefix_content;
1227 struct fman_port_rx_params *rx_p;
1228 struct fman_port_params params;
1229 int err;
1230
1231 memset(¶ms, 0, sizeof(params));
1232 memset(&buf_prefix_content, 0, sizeof(buf_prefix_content));
1233
1234 buf_prefix_content.priv_data_size = buf_layout->priv_data_size;
1235 buf_prefix_content.pass_prs_result = true;
1236 buf_prefix_content.pass_hash_result = true;
1237 buf_prefix_content.pass_time_stamp = true;
1238 buf_prefix_content.data_align = DPAA_FD_RX_DATA_ALIGNMENT;
1239
1240 rx_p = ¶ms.specific_params.rx_params;
1241 rx_p->err_fqid = errq->fqid;
1242 rx_p->dflt_fqid = defq->fqid;
1243 if (pcdq) {
1244 rx_p->pcd_base_fqid = pcdq->fqid;
1245 rx_p->pcd_fqs_count = DPAA_ETH_PCD_RXQ_NUM;
1246 }
1247
1248 rx_p->ext_buf_pools.num_of_pools_used = 1;
1249 rx_p->ext_buf_pools.ext_buf_pool[0].id = bp->bpid;
1250 rx_p->ext_buf_pools.ext_buf_pool[0].size = (u16)bp->size;
1251
1252 err = fman_port_config(port, ¶ms);
1253 if (err) {
1254 pr_err("%s: fman_port_config failed\n", __func__);
1255 return err;
1256 }
1257
1258 err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content);
1259 if (err) {
1260 pr_err("%s: fman_port_cfg_buf_prefix_content failed\n",
1261 __func__);
1262 return err;
1263 }
1264
1265 err = fman_port_init(port);
1266 if (err)
1267 pr_err("%s: fm_port_init failed\n", __func__);
1268
1269 return err;
1270 }
1271
dpaa_eth_init_ports(struct mac_device * mac_dev,struct dpaa_bp * bp,struct fm_port_fqs * port_fqs,struct dpaa_buffer_layout * buf_layout,struct device * dev)1272 static int dpaa_eth_init_ports(struct mac_device *mac_dev,
1273 struct dpaa_bp *bp,
1274 struct fm_port_fqs *port_fqs,
1275 struct dpaa_buffer_layout *buf_layout,
1276 struct device *dev)
1277 {
1278 struct fman_port *rxport = mac_dev->port[RX];
1279 struct fman_port *txport = mac_dev->port[TX];
1280 int err;
1281
1282 err = dpaa_eth_init_tx_port(txport, port_fqs->tx_errq,
1283 port_fqs->tx_defq, &buf_layout[TX]);
1284 if (err)
1285 return err;
1286
1287 err = dpaa_eth_init_rx_port(rxport, bp, port_fqs->rx_errq,
1288 port_fqs->rx_defq, port_fqs->rx_pcdq,
1289 &buf_layout[RX]);
1290
1291 return err;
1292 }
1293
dpaa_bman_release(const struct dpaa_bp * dpaa_bp,struct bm_buffer * bmb,int cnt)1294 static int dpaa_bman_release(const struct dpaa_bp *dpaa_bp,
1295 struct bm_buffer *bmb, int cnt)
1296 {
1297 int err;
1298
1299 err = bman_release(dpaa_bp->pool, bmb, cnt);
1300 /* Should never occur, address anyway to avoid leaking the buffers */
1301 if (WARN_ON(err) && dpaa_bp->free_buf_cb)
1302 while (cnt-- > 0)
1303 dpaa_bp->free_buf_cb(dpaa_bp, &bmb[cnt]);
1304
1305 return cnt;
1306 }
1307
dpaa_release_sgt_members(struct qm_sg_entry * sgt)1308 static void dpaa_release_sgt_members(struct qm_sg_entry *sgt)
1309 {
1310 struct bm_buffer bmb[DPAA_BUFF_RELEASE_MAX];
1311 struct dpaa_bp *dpaa_bp;
1312 int i = 0, j;
1313
1314 memset(bmb, 0, sizeof(bmb));
1315
1316 do {
1317 dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
1318 if (!dpaa_bp)
1319 return;
1320
1321 j = 0;
1322 do {
1323 WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
1324
1325 bm_buffer_set64(&bmb[j], qm_sg_entry_get64(&sgt[i]));
1326
1327 j++; i++;
1328 } while (j < ARRAY_SIZE(bmb) &&
1329 !qm_sg_entry_is_final(&sgt[i - 1]) &&
1330 sgt[i - 1].bpid == sgt[i].bpid);
1331
1332 dpaa_bman_release(dpaa_bp, bmb, j);
1333 } while (!qm_sg_entry_is_final(&sgt[i - 1]));
1334 }
1335
dpaa_fd_release(const struct net_device * net_dev,const struct qm_fd * fd)1336 static void dpaa_fd_release(const struct net_device *net_dev,
1337 const struct qm_fd *fd)
1338 {
1339 struct qm_sg_entry *sgt;
1340 struct dpaa_bp *dpaa_bp;
1341 struct bm_buffer bmb;
1342 dma_addr_t addr;
1343 void *vaddr;
1344
1345 bmb.data = 0;
1346 bm_buffer_set64(&bmb, qm_fd_addr(fd));
1347
1348 dpaa_bp = dpaa_bpid2pool(fd->bpid);
1349 if (!dpaa_bp)
1350 return;
1351
1352 if (qm_fd_get_format(fd) == qm_fd_sg) {
1353 vaddr = phys_to_virt(qm_fd_addr(fd));
1354 sgt = vaddr + qm_fd_get_offset(fd);
1355
1356 dma_unmap_page(dpaa_bp->priv->rx_dma_dev, qm_fd_addr(fd),
1357 DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
1358
1359 dpaa_release_sgt_members(sgt);
1360
1361 addr = dma_map_page(dpaa_bp->priv->rx_dma_dev,
1362 virt_to_page(vaddr), 0, DPAA_BP_RAW_SIZE,
1363 DMA_FROM_DEVICE);
1364 if (dma_mapping_error(dpaa_bp->priv->rx_dma_dev, addr)) {
1365 netdev_err(net_dev, "DMA mapping failed\n");
1366 return;
1367 }
1368 bm_buffer_set64(&bmb, addr);
1369 }
1370
1371 dpaa_bman_release(dpaa_bp, &bmb, 1);
1372 }
1373
count_ern(struct dpaa_percpu_priv * percpu_priv,const union qm_mr_entry * msg)1374 static void count_ern(struct dpaa_percpu_priv *percpu_priv,
1375 const union qm_mr_entry *msg)
1376 {
1377 switch (msg->ern.rc & QM_MR_RC_MASK) {
1378 case QM_MR_RC_CGR_TAILDROP:
1379 percpu_priv->ern_cnt.cg_tdrop++;
1380 break;
1381 case QM_MR_RC_WRED:
1382 percpu_priv->ern_cnt.wred++;
1383 break;
1384 case QM_MR_RC_ERROR:
1385 percpu_priv->ern_cnt.err_cond++;
1386 break;
1387 case QM_MR_RC_ORPWINDOW_EARLY:
1388 percpu_priv->ern_cnt.early_window++;
1389 break;
1390 case QM_MR_RC_ORPWINDOW_LATE:
1391 percpu_priv->ern_cnt.late_window++;
1392 break;
1393 case QM_MR_RC_FQ_TAILDROP:
1394 percpu_priv->ern_cnt.fq_tdrop++;
1395 break;
1396 case QM_MR_RC_ORPWINDOW_RETIRED:
1397 percpu_priv->ern_cnt.fq_retired++;
1398 break;
1399 case QM_MR_RC_ORP_ZERO:
1400 percpu_priv->ern_cnt.orp_zero++;
1401 break;
1402 }
1403 }
1404
1405 /* Turn on HW checksum computation for this outgoing frame.
1406 * If the current protocol is not something we support in this regard
1407 * (or if the stack has already computed the SW checksum), we do nothing.
1408 *
1409 * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value
1410 * otherwise.
1411 *
1412 * Note that this function may modify the fd->cmd field and the skb data buffer
1413 * (the Parse Results area).
1414 */
dpaa_enable_tx_csum(struct dpaa_priv * priv,struct sk_buff * skb,struct qm_fd * fd,void * parse_results)1415 static int dpaa_enable_tx_csum(struct dpaa_priv *priv,
1416 struct sk_buff *skb,
1417 struct qm_fd *fd,
1418 void *parse_results)
1419 {
1420 struct fman_prs_result *parse_result;
1421 u16 ethertype = ntohs(skb->protocol);
1422 struct ipv6hdr *ipv6h = NULL;
1423 struct iphdr *iph;
1424 int retval = 0;
1425 u8 l4_proto;
1426
1427 if (skb->ip_summed != CHECKSUM_PARTIAL)
1428 return 0;
1429
1430 /* Note: L3 csum seems to be already computed in sw, but we can't choose
1431 * L4 alone from the FM configuration anyway.
1432 */
1433
1434 /* Fill in some fields of the Parse Results array, so the FMan
1435 * can find them as if they came from the FMan Parser.
1436 */
1437 parse_result = (struct fman_prs_result *)parse_results;
1438
1439 /* If we're dealing with VLAN, get the real Ethernet type */
1440 if (ethertype == ETH_P_8021Q) {
1441 /* We can't always assume the MAC header is set correctly
1442 * by the stack, so reset to beginning of skb->data
1443 */
1444 skb_reset_mac_header(skb);
1445 ethertype = ntohs(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto);
1446 }
1447
1448 /* Fill in the relevant L3 parse result fields
1449 * and read the L4 protocol type
1450 */
1451 switch (ethertype) {
1452 case ETH_P_IP:
1453 parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV4);
1454 iph = ip_hdr(skb);
1455 WARN_ON(!iph);
1456 l4_proto = iph->protocol;
1457 break;
1458 case ETH_P_IPV6:
1459 parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV6);
1460 ipv6h = ipv6_hdr(skb);
1461 WARN_ON(!ipv6h);
1462 l4_proto = ipv6h->nexthdr;
1463 break;
1464 default:
1465 /* We shouldn't even be here */
1466 if (net_ratelimit())
1467 netif_alert(priv, tx_err, priv->net_dev,
1468 "Can't compute HW csum for L3 proto 0x%x\n",
1469 ntohs(skb->protocol));
1470 retval = -EIO;
1471 goto return_error;
1472 }
1473
1474 /* Fill in the relevant L4 parse result fields */
1475 switch (l4_proto) {
1476 case IPPROTO_UDP:
1477 parse_result->l4r = FM_L4_PARSE_RESULT_UDP;
1478 break;
1479 case IPPROTO_TCP:
1480 parse_result->l4r = FM_L4_PARSE_RESULT_TCP;
1481 break;
1482 default:
1483 if (net_ratelimit())
1484 netif_alert(priv, tx_err, priv->net_dev,
1485 "Can't compute HW csum for L4 proto 0x%x\n",
1486 l4_proto);
1487 retval = -EIO;
1488 goto return_error;
1489 }
1490
1491 /* At index 0 is IPOffset_1 as defined in the Parse Results */
1492 parse_result->ip_off[0] = (u8)skb_network_offset(skb);
1493 parse_result->l4_off = (u8)skb_transport_offset(skb);
1494
1495 /* Enable L3 (and L4, if TCP or UDP) HW checksum. */
1496 fd->cmd |= cpu_to_be32(FM_FD_CMD_RPD | FM_FD_CMD_DTC);
1497
1498 /* On P1023 and similar platforms fd->cmd interpretation could
1499 * be disabled by setting CONTEXT_A bit ICMD; currently this bit
1500 * is not set so we do not need to check; in the future, if/when
1501 * using context_a we need to check this bit
1502 */
1503
1504 return_error:
1505 return retval;
1506 }
1507
dpaa_bp_add_8_bufs(const struct dpaa_bp * dpaa_bp)1508 static int dpaa_bp_add_8_bufs(const struct dpaa_bp *dpaa_bp)
1509 {
1510 struct net_device *net_dev = dpaa_bp->priv->net_dev;
1511 struct bm_buffer bmb[8];
1512 dma_addr_t addr;
1513 struct page *p;
1514 u8 i;
1515
1516 for (i = 0; i < 8; i++) {
1517 p = dev_alloc_pages(0);
1518 if (unlikely(!p)) {
1519 netdev_err(net_dev, "dev_alloc_pages() failed\n");
1520 goto release_previous_buffs;
1521 }
1522
1523 addr = dma_map_page(dpaa_bp->priv->rx_dma_dev, p, 0,
1524 DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
1525 if (unlikely(dma_mapping_error(dpaa_bp->priv->rx_dma_dev,
1526 addr))) {
1527 netdev_err(net_dev, "DMA map failed\n");
1528 goto release_previous_buffs;
1529 }
1530
1531 bmb[i].data = 0;
1532 bm_buffer_set64(&bmb[i], addr);
1533 }
1534
1535 release_bufs:
1536 return dpaa_bman_release(dpaa_bp, bmb, i);
1537
1538 release_previous_buffs:
1539 WARN_ONCE(1, "dpaa_eth: failed to add buffers on Rx\n");
1540
1541 bm_buffer_set64(&bmb[i], 0);
1542 /* Avoid releasing a completely null buffer; bman_release() requires
1543 * at least one buffer.
1544 */
1545 if (likely(i))
1546 goto release_bufs;
1547
1548 return 0;
1549 }
1550
dpaa_bp_seed(struct dpaa_bp * dpaa_bp)1551 static int dpaa_bp_seed(struct dpaa_bp *dpaa_bp)
1552 {
1553 int i;
1554
1555 /* Give each CPU an allotment of "config_count" buffers */
1556 for_each_possible_cpu(i) {
1557 int *count_ptr = per_cpu_ptr(dpaa_bp->percpu_count, i);
1558 int j;
1559
1560 /* Although we access another CPU's counters here
1561 * we do it at boot time so it is safe
1562 */
1563 for (j = 0; j < dpaa_bp->config_count; j += 8)
1564 *count_ptr += dpaa_bp_add_8_bufs(dpaa_bp);
1565 }
1566 return 0;
1567 }
1568
1569 /* Add buffers/(pages) for Rx processing whenever bpool count falls below
1570 * REFILL_THRESHOLD.
1571 */
dpaa_eth_refill_bpool(struct dpaa_bp * dpaa_bp,int * countptr)1572 static int dpaa_eth_refill_bpool(struct dpaa_bp *dpaa_bp, int *countptr)
1573 {
1574 int count = *countptr;
1575 int new_bufs;
1576
1577 if (unlikely(count < FSL_DPAA_ETH_REFILL_THRESHOLD)) {
1578 do {
1579 new_bufs = dpaa_bp_add_8_bufs(dpaa_bp);
1580 if (unlikely(!new_bufs)) {
1581 /* Avoid looping forever if we've temporarily
1582 * run out of memory. We'll try again at the
1583 * next NAPI cycle.
1584 */
1585 break;
1586 }
1587 count += new_bufs;
1588 } while (count < FSL_DPAA_ETH_MAX_BUF_COUNT);
1589
1590 *countptr = count;
1591 if (unlikely(count < FSL_DPAA_ETH_MAX_BUF_COUNT))
1592 return -ENOMEM;
1593 }
1594
1595 return 0;
1596 }
1597
dpaa_eth_refill_bpools(struct dpaa_priv * priv)1598 static int dpaa_eth_refill_bpools(struct dpaa_priv *priv)
1599 {
1600 struct dpaa_bp *dpaa_bp;
1601 int *countptr;
1602 int res;
1603
1604 dpaa_bp = priv->dpaa_bp;
1605 if (!dpaa_bp)
1606 return -EINVAL;
1607 countptr = this_cpu_ptr(dpaa_bp->percpu_count);
1608 res = dpaa_eth_refill_bpool(dpaa_bp, countptr);
1609 if (res)
1610 return res;
1611
1612 return 0;
1613 }
1614
1615 /* Cleanup function for outgoing frame descriptors that were built on Tx path,
1616 * either contiguous frames or scatter/gather ones.
1617 * Skb freeing is not handled here.
1618 *
1619 * This function may be called on error paths in the Tx function, so guard
1620 * against cases when not all fd relevant fields were filled in. To avoid
1621 * reading the invalid transmission timestamp for the error paths set ts to
1622 * false.
1623 *
1624 * Return the skb backpointer, since for S/G frames the buffer containing it
1625 * gets freed here.
1626 */
dpaa_cleanup_tx_fd(const struct dpaa_priv * priv,const struct qm_fd * fd,bool ts)1627 static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
1628 const struct qm_fd *fd, bool ts)
1629 {
1630 const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
1631 struct device *dev = priv->net_dev->dev.parent;
1632 struct skb_shared_hwtstamps shhwtstamps;
1633 dma_addr_t addr = qm_fd_addr(fd);
1634 void *vaddr = phys_to_virt(addr);
1635 const struct qm_sg_entry *sgt;
1636 struct sk_buff *skb;
1637 u64 ns;
1638 int i;
1639
1640 if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) {
1641 dma_unmap_page(priv->tx_dma_dev, addr,
1642 qm_fd_get_offset(fd) + DPAA_SGT_SIZE,
1643 dma_dir);
1644
1645 /* The sgt buffer has been allocated with netdev_alloc_frag(),
1646 * it's from lowmem.
1647 */
1648 sgt = vaddr + qm_fd_get_offset(fd);
1649
1650 /* sgt[0] is from lowmem, was dma_map_single()-ed */
1651 dma_unmap_single(priv->tx_dma_dev, qm_sg_addr(&sgt[0]),
1652 qm_sg_entry_get_len(&sgt[0]), dma_dir);
1653
1654 /* remaining pages were mapped with skb_frag_dma_map() */
1655 for (i = 1; (i < DPAA_SGT_MAX_ENTRIES) &&
1656 !qm_sg_entry_is_final(&sgt[i - 1]); i++) {
1657 WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
1658
1659 dma_unmap_page(priv->tx_dma_dev, qm_sg_addr(&sgt[i]),
1660 qm_sg_entry_get_len(&sgt[i]), dma_dir);
1661 }
1662 } else {
1663 dma_unmap_single(priv->tx_dma_dev, addr,
1664 priv->tx_headroom + qm_fd_get_length(fd),
1665 dma_dir);
1666 }
1667
1668 skb = *(struct sk_buff **)vaddr;
1669
1670 /* DMA unmapping is required before accessing the HW provided info */
1671 if (ts && priv->tx_tstamp &&
1672 skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
1673 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
1674
1675 if (!fman_port_get_tstamp(priv->mac_dev->port[TX], vaddr,
1676 &ns)) {
1677 shhwtstamps.hwtstamp = ns_to_ktime(ns);
1678 skb_tstamp_tx(skb, &shhwtstamps);
1679 } else {
1680 dev_warn(dev, "fman_port_get_tstamp failed!\n");
1681 }
1682 }
1683
1684 if (qm_fd_get_format(fd) == qm_fd_sg)
1685 /* Free the page that we allocated on Tx for the SGT */
1686 free_pages((unsigned long)vaddr, 0);
1687
1688 return skb;
1689 }
1690
rx_csum_offload(const struct dpaa_priv * priv,const struct qm_fd * fd)1691 static u8 rx_csum_offload(const struct dpaa_priv *priv, const struct qm_fd *fd)
1692 {
1693 /* The parser has run and performed L4 checksum validation.
1694 * We know there were no parser errors (and implicitly no
1695 * L4 csum error), otherwise we wouldn't be here.
1696 */
1697 if ((priv->net_dev->features & NETIF_F_RXCSUM) &&
1698 (be32_to_cpu(fd->status) & FM_FD_STAT_L4CV))
1699 return CHECKSUM_UNNECESSARY;
1700
1701 /* We're here because either the parser didn't run or the L4 checksum
1702 * was not verified. This may include the case of a UDP frame with
1703 * checksum zero or an L4 proto other than TCP/UDP
1704 */
1705 return CHECKSUM_NONE;
1706 }
1707
1708 #define PTR_IS_ALIGNED(x, a) (IS_ALIGNED((unsigned long)(x), (a)))
1709
1710 /* Build a linear skb around the received buffer.
1711 * We are guaranteed there is enough room at the end of the data buffer to
1712 * accommodate the shared info area of the skb.
1713 */
contig_fd_to_skb(const struct dpaa_priv * priv,const struct qm_fd * fd)1714 static struct sk_buff *contig_fd_to_skb(const struct dpaa_priv *priv,
1715 const struct qm_fd *fd)
1716 {
1717 ssize_t fd_off = qm_fd_get_offset(fd);
1718 dma_addr_t addr = qm_fd_addr(fd);
1719 struct dpaa_bp *dpaa_bp;
1720 struct sk_buff *skb;
1721 void *vaddr;
1722
1723 vaddr = phys_to_virt(addr);
1724 WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
1725
1726 dpaa_bp = dpaa_bpid2pool(fd->bpid);
1727 if (!dpaa_bp)
1728 goto free_buffer;
1729
1730 skb = build_skb(vaddr, dpaa_bp->size +
1731 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
1732 if (WARN_ONCE(!skb, "Build skb failure on Rx\n"))
1733 goto free_buffer;
1734 WARN_ON(fd_off != priv->rx_headroom);
1735 skb_reserve(skb, fd_off);
1736 skb_put(skb, qm_fd_get_length(fd));
1737
1738 skb->ip_summed = rx_csum_offload(priv, fd);
1739
1740 return skb;
1741
1742 free_buffer:
1743 free_pages((unsigned long)vaddr, 0);
1744 return NULL;
1745 }
1746
1747 /* Build an skb with the data of the first S/G entry in the linear portion and
1748 * the rest of the frame as skb fragments.
1749 *
1750 * The page fragment holding the S/G Table is recycled here.
1751 */
sg_fd_to_skb(const struct dpaa_priv * priv,const struct qm_fd * fd)1752 static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
1753 const struct qm_fd *fd)
1754 {
1755 ssize_t fd_off = qm_fd_get_offset(fd);
1756 dma_addr_t addr = qm_fd_addr(fd);
1757 const struct qm_sg_entry *sgt;
1758 struct page *page, *head_page;
1759 struct dpaa_bp *dpaa_bp;
1760 void *vaddr, *sg_vaddr;
1761 int frag_off, frag_len;
1762 struct sk_buff *skb;
1763 dma_addr_t sg_addr;
1764 int page_offset;
1765 unsigned int sz;
1766 int *count_ptr;
1767 int i, j;
1768
1769 vaddr = phys_to_virt(addr);
1770 WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
1771
1772 /* Iterate through the SGT entries and add data buffers to the skb */
1773 sgt = vaddr + fd_off;
1774 skb = NULL;
1775 for (i = 0; i < DPAA_SGT_MAX_ENTRIES; i++) {
1776 /* Extension bit is not supported */
1777 WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
1778
1779 sg_addr = qm_sg_addr(&sgt[i]);
1780 sg_vaddr = phys_to_virt(sg_addr);
1781 WARN_ON(!PTR_IS_ALIGNED(sg_vaddr, SMP_CACHE_BYTES));
1782
1783 dma_unmap_page(priv->rx_dma_dev, sg_addr,
1784 DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
1785
1786 /* We may use multiple Rx pools */
1787 dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
1788 if (!dpaa_bp)
1789 goto free_buffers;
1790
1791 if (!skb) {
1792 sz = dpaa_bp->size +
1793 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1794 skb = build_skb(sg_vaddr, sz);
1795 if (WARN_ON(!skb))
1796 goto free_buffers;
1797
1798 skb->ip_summed = rx_csum_offload(priv, fd);
1799
1800 /* Make sure forwarded skbs will have enough space
1801 * on Tx, if extra headers are added.
1802 */
1803 WARN_ON(fd_off != priv->rx_headroom);
1804 skb_reserve(skb, fd_off);
1805 skb_put(skb, qm_sg_entry_get_len(&sgt[i]));
1806 } else {
1807 /* Not the first S/G entry; all data from buffer will
1808 * be added in an skb fragment; fragment index is offset
1809 * by one since first S/G entry was incorporated in the
1810 * linear part of the skb.
1811 *
1812 * Caution: 'page' may be a tail page.
1813 */
1814 page = virt_to_page(sg_vaddr);
1815 head_page = virt_to_head_page(sg_vaddr);
1816
1817 /* Compute offset in (possibly tail) page */
1818 page_offset = ((unsigned long)sg_vaddr &
1819 (PAGE_SIZE - 1)) +
1820 (page_address(page) - page_address(head_page));
1821 /* page_offset only refers to the beginning of sgt[i];
1822 * but the buffer itself may have an internal offset.
1823 */
1824 frag_off = qm_sg_entry_get_off(&sgt[i]) + page_offset;
1825 frag_len = qm_sg_entry_get_len(&sgt[i]);
1826 /* skb_add_rx_frag() does no checking on the page; if
1827 * we pass it a tail page, we'll end up with
1828 * bad page accounting and eventually with segafults.
1829 */
1830 skb_add_rx_frag(skb, i - 1, head_page, frag_off,
1831 frag_len, dpaa_bp->size);
1832 }
1833
1834 /* Update the pool count for the current {cpu x bpool} */
1835 count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
1836 (*count_ptr)--;
1837
1838 if (qm_sg_entry_is_final(&sgt[i]))
1839 break;
1840 }
1841 WARN_ONCE(i == DPAA_SGT_MAX_ENTRIES, "No final bit on SGT\n");
1842
1843 /* free the SG table buffer */
1844 free_pages((unsigned long)vaddr, 0);
1845
1846 return skb;
1847
1848 free_buffers:
1849 /* free all the SG entries */
1850 for (j = 0; j < DPAA_SGT_MAX_ENTRIES ; j++) {
1851 sg_addr = qm_sg_addr(&sgt[j]);
1852 sg_vaddr = phys_to_virt(sg_addr);
1853 /* all pages 0..i were unmaped */
1854 if (j > i)
1855 dma_unmap_page(priv->rx_dma_dev, qm_sg_addr(&sgt[j]),
1856 DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
1857 free_pages((unsigned long)sg_vaddr, 0);
1858 /* counters 0..i-1 were decremented */
1859 if (j >= i) {
1860 dpaa_bp = dpaa_bpid2pool(sgt[j].bpid);
1861 if (dpaa_bp) {
1862 count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
1863 (*count_ptr)--;
1864 }
1865 }
1866
1867 if (qm_sg_entry_is_final(&sgt[j]))
1868 break;
1869 }
1870 /* free the SGT fragment */
1871 free_pages((unsigned long)vaddr, 0);
1872
1873 return NULL;
1874 }
1875
skb_to_contig_fd(struct dpaa_priv * priv,struct sk_buff * skb,struct qm_fd * fd,int * offset)1876 static int skb_to_contig_fd(struct dpaa_priv *priv,
1877 struct sk_buff *skb, struct qm_fd *fd,
1878 int *offset)
1879 {
1880 struct net_device *net_dev = priv->net_dev;
1881 enum dma_data_direction dma_dir;
1882 unsigned char *buff_start;
1883 struct sk_buff **skbh;
1884 dma_addr_t addr;
1885 int err;
1886
1887 /* We are guaranteed to have at least tx_headroom bytes
1888 * available, so just use that for offset.
1889 */
1890 fd->bpid = FSL_DPAA_BPID_INV;
1891 buff_start = skb->data - priv->tx_headroom;
1892 dma_dir = DMA_TO_DEVICE;
1893
1894 skbh = (struct sk_buff **)buff_start;
1895 *skbh = skb;
1896
1897 /* Enable L3/L4 hardware checksum computation.
1898 *
1899 * We must do this before dma_map_single(DMA_TO_DEVICE), because we may
1900 * need to write into the skb.
1901 */
1902 err = dpaa_enable_tx_csum(priv, skb, fd,
1903 buff_start + DPAA_TX_PRIV_DATA_SIZE);
1904 if (unlikely(err < 0)) {
1905 if (net_ratelimit())
1906 netif_err(priv, tx_err, net_dev, "HW csum error: %d\n",
1907 err);
1908 return err;
1909 }
1910
1911 /* Fill in the rest of the FD fields */
1912 qm_fd_set_contig(fd, priv->tx_headroom, skb->len);
1913 fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO);
1914
1915 /* Map the entire buffer size that may be seen by FMan, but no more */
1916 addr = dma_map_single(priv->tx_dma_dev, buff_start,
1917 priv->tx_headroom + skb->len, dma_dir);
1918 if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
1919 if (net_ratelimit())
1920 netif_err(priv, tx_err, net_dev, "dma_map_single() failed\n");
1921 return -EINVAL;
1922 }
1923 qm_fd_addr_set64(fd, addr);
1924
1925 return 0;
1926 }
1927
skb_to_sg_fd(struct dpaa_priv * priv,struct sk_buff * skb,struct qm_fd * fd)1928 static int skb_to_sg_fd(struct dpaa_priv *priv,
1929 struct sk_buff *skb, struct qm_fd *fd)
1930 {
1931 const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
1932 const int nr_frags = skb_shinfo(skb)->nr_frags;
1933 struct net_device *net_dev = priv->net_dev;
1934 struct qm_sg_entry *sgt;
1935 struct sk_buff **skbh;
1936 void *buff_start;
1937 skb_frag_t *frag;
1938 dma_addr_t addr;
1939 size_t frag_len;
1940 struct page *p;
1941 int i, j, err;
1942
1943 /* get a page to store the SGTable */
1944 p = dev_alloc_pages(0);
1945 if (unlikely(!p)) {
1946 netdev_err(net_dev, "dev_alloc_pages() failed\n");
1947 return -ENOMEM;
1948 }
1949 buff_start = page_address(p);
1950
1951 /* Enable L3/L4 hardware checksum computation.
1952 *
1953 * We must do this before dma_map_single(DMA_TO_DEVICE), because we may
1954 * need to write into the skb.
1955 */
1956 err = dpaa_enable_tx_csum(priv, skb, fd,
1957 buff_start + DPAA_TX_PRIV_DATA_SIZE);
1958 if (unlikely(err < 0)) {
1959 if (net_ratelimit())
1960 netif_err(priv, tx_err, net_dev, "HW csum error: %d\n",
1961 err);
1962 goto csum_failed;
1963 }
1964
1965 /* SGT[0] is used by the linear part */
1966 sgt = (struct qm_sg_entry *)(buff_start + priv->tx_headroom);
1967 frag_len = skb_headlen(skb);
1968 qm_sg_entry_set_len(&sgt[0], frag_len);
1969 sgt[0].bpid = FSL_DPAA_BPID_INV;
1970 sgt[0].offset = 0;
1971 addr = dma_map_single(priv->tx_dma_dev, skb->data,
1972 skb_headlen(skb), dma_dir);
1973 if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
1974 netdev_err(priv->net_dev, "DMA mapping failed\n");
1975 err = -EINVAL;
1976 goto sg0_map_failed;
1977 }
1978 qm_sg_entry_set64(&sgt[0], addr);
1979
1980 /* populate the rest of SGT entries */
1981 for (i = 0; i < nr_frags; i++) {
1982 frag = &skb_shinfo(skb)->frags[i];
1983 frag_len = skb_frag_size(frag);
1984 WARN_ON(!skb_frag_page(frag));
1985 addr = skb_frag_dma_map(priv->tx_dma_dev, frag, 0,
1986 frag_len, dma_dir);
1987 if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
1988 netdev_err(priv->net_dev, "DMA mapping failed\n");
1989 err = -EINVAL;
1990 goto sg_map_failed;
1991 }
1992
1993 qm_sg_entry_set_len(&sgt[i + 1], frag_len);
1994 sgt[i + 1].bpid = FSL_DPAA_BPID_INV;
1995 sgt[i + 1].offset = 0;
1996
1997 /* keep the offset in the address */
1998 qm_sg_entry_set64(&sgt[i + 1], addr);
1999 }
2000
2001 /* Set the final bit in the last used entry of the SGT */
2002 qm_sg_entry_set_f(&sgt[nr_frags], frag_len);
2003
2004 /* set fd offset to priv->tx_headroom */
2005 qm_fd_set_sg(fd, priv->tx_headroom, skb->len);
2006
2007 /* DMA map the SGT page */
2008 skbh = (struct sk_buff **)buff_start;
2009 *skbh = skb;
2010
2011 addr = dma_map_page(priv->tx_dma_dev, p, 0,
2012 priv->tx_headroom + DPAA_SGT_SIZE, dma_dir);
2013 if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
2014 netdev_err(priv->net_dev, "DMA mapping failed\n");
2015 err = -EINVAL;
2016 goto sgt_map_failed;
2017 }
2018
2019 fd->bpid = FSL_DPAA_BPID_INV;
2020 fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO);
2021 qm_fd_addr_set64(fd, addr);
2022
2023 return 0;
2024
2025 sgt_map_failed:
2026 sg_map_failed:
2027 for (j = 0; j < i; j++)
2028 dma_unmap_page(priv->tx_dma_dev, qm_sg_addr(&sgt[j]),
2029 qm_sg_entry_get_len(&sgt[j]), dma_dir);
2030 sg0_map_failed:
2031 csum_failed:
2032 free_pages((unsigned long)buff_start, 0);
2033
2034 return err;
2035 }
2036
dpaa_xmit(struct dpaa_priv * priv,struct rtnl_link_stats64 * percpu_stats,int queue,struct qm_fd * fd)2037 static inline int dpaa_xmit(struct dpaa_priv *priv,
2038 struct rtnl_link_stats64 *percpu_stats,
2039 int queue,
2040 struct qm_fd *fd)
2041 {
2042 struct qman_fq *egress_fq;
2043 int err, i;
2044
2045 egress_fq = priv->egress_fqs[queue];
2046 if (fd->bpid == FSL_DPAA_BPID_INV)
2047 fd->cmd |= cpu_to_be32(qman_fq_fqid(priv->conf_fqs[queue]));
2048
2049 /* Trace this Tx fd */
2050 trace_dpaa_tx_fd(priv->net_dev, egress_fq, fd);
2051
2052 for (i = 0; i < DPAA_ENQUEUE_RETRIES; i++) {
2053 err = qman_enqueue(egress_fq, fd);
2054 if (err != -EBUSY)
2055 break;
2056 }
2057
2058 if (unlikely(err < 0)) {
2059 percpu_stats->tx_fifo_errors++;
2060 return err;
2061 }
2062
2063 percpu_stats->tx_packets++;
2064 percpu_stats->tx_bytes += qm_fd_get_length(fd);
2065
2066 return 0;
2067 }
2068
2069 #ifdef CONFIG_DPAA_ERRATUM_A050385
dpaa_a050385_wa(struct net_device * net_dev,struct sk_buff ** s)2070 static int dpaa_a050385_wa(struct net_device *net_dev, struct sk_buff **s)
2071 {
2072 struct dpaa_priv *priv = netdev_priv(net_dev);
2073 struct sk_buff *new_skb, *skb = *s;
2074 unsigned char *start, i;
2075
2076 /* check linear buffer alignment */
2077 if (!PTR_IS_ALIGNED(skb->data, DPAA_A050385_ALIGN))
2078 goto workaround;
2079
2080 /* linear buffers just need to have an aligned start */
2081 if (!skb_is_nonlinear(skb))
2082 return 0;
2083
2084 /* linear data size for nonlinear skbs needs to be aligned */
2085 if (!IS_ALIGNED(skb_headlen(skb), DPAA_A050385_ALIGN))
2086 goto workaround;
2087
2088 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2089 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2090
2091 /* all fragments need to have aligned start addresses */
2092 if (!IS_ALIGNED(skb_frag_off(frag), DPAA_A050385_ALIGN))
2093 goto workaround;
2094
2095 /* all but last fragment need to have aligned sizes */
2096 if (!IS_ALIGNED(skb_frag_size(frag), DPAA_A050385_ALIGN) &&
2097 (i < skb_shinfo(skb)->nr_frags - 1))
2098 goto workaround;
2099 }
2100
2101 return 0;
2102
2103 workaround:
2104 /* copy all the skb content into a new linear buffer */
2105 new_skb = netdev_alloc_skb(net_dev, skb->len + DPAA_A050385_ALIGN - 1 +
2106 priv->tx_headroom);
2107 if (!new_skb)
2108 return -ENOMEM;
2109
2110 /* NET_SKB_PAD bytes already reserved, adding up to tx_headroom */
2111 skb_reserve(new_skb, priv->tx_headroom - NET_SKB_PAD);
2112
2113 /* Workaround for DPAA_A050385 requires data start to be aligned */
2114 start = PTR_ALIGN(new_skb->data, DPAA_A050385_ALIGN);
2115 if (start - new_skb->data)
2116 skb_reserve(new_skb, start - new_skb->data);
2117
2118 skb_put(new_skb, skb->len);
2119 skb_copy_bits(skb, 0, new_skb->data, skb->len);
2120 skb_copy_header(new_skb, skb);
2121 new_skb->dev = skb->dev;
2122
2123 /* Copy relevant timestamp info from the old skb to the new */
2124 if (priv->tx_tstamp) {
2125 skb_shinfo(new_skb)->tx_flags = skb_shinfo(skb)->tx_flags;
2126 skb_shinfo(new_skb)->hwtstamps = skb_shinfo(skb)->hwtstamps;
2127 skb_shinfo(new_skb)->tskey = skb_shinfo(skb)->tskey;
2128 if (skb->sk)
2129 skb_set_owner_w(new_skb, skb->sk);
2130 }
2131
2132 /* We move the headroom when we align it so we have to reset the
2133 * network and transport header offsets relative to the new data
2134 * pointer. The checksum offload relies on these offsets.
2135 */
2136 skb_set_network_header(new_skb, skb_network_offset(skb));
2137 skb_set_transport_header(new_skb, skb_transport_offset(skb));
2138
2139 dev_kfree_skb(skb);
2140 *s = new_skb;
2141
2142 return 0;
2143 }
2144 #endif
2145
2146 static netdev_tx_t
dpaa_start_xmit(struct sk_buff * skb,struct net_device * net_dev)2147 dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
2148 {
2149 const int queue_mapping = skb_get_queue_mapping(skb);
2150 bool nonlinear = skb_is_nonlinear(skb);
2151 struct rtnl_link_stats64 *percpu_stats;
2152 struct dpaa_percpu_priv *percpu_priv;
2153 struct netdev_queue *txq;
2154 struct dpaa_priv *priv;
2155 struct qm_fd fd;
2156 int offset = 0;
2157 int err = 0;
2158
2159 priv = netdev_priv(net_dev);
2160 percpu_priv = this_cpu_ptr(priv->percpu_priv);
2161 percpu_stats = &percpu_priv->stats;
2162
2163 qm_fd_clear_fd(&fd);
2164
2165 if (!nonlinear) {
2166 /* We're going to store the skb backpointer at the beginning
2167 * of the data buffer, so we need a privately owned skb
2168 *
2169 * We've made sure skb is not shared in dev->priv_flags,
2170 * we need to verify the skb head is not cloned
2171 */
2172 if (skb_cow_head(skb, priv->tx_headroom))
2173 goto enomem;
2174
2175 WARN_ON(skb_is_nonlinear(skb));
2176 }
2177
2178 /* MAX_SKB_FRAGS is equal or larger than our dpaa_SGT_MAX_ENTRIES;
2179 * make sure we don't feed FMan with more fragments than it supports.
2180 */
2181 if (unlikely(nonlinear &&
2182 (skb_shinfo(skb)->nr_frags >= DPAA_SGT_MAX_ENTRIES))) {
2183 /* If the egress skb contains more fragments than we support
2184 * we have no choice but to linearize it ourselves.
2185 */
2186 if (__skb_linearize(skb))
2187 goto enomem;
2188
2189 nonlinear = skb_is_nonlinear(skb);
2190 }
2191
2192 #ifdef CONFIG_DPAA_ERRATUM_A050385
2193 if (unlikely(fman_has_errata_a050385())) {
2194 if (dpaa_a050385_wa(net_dev, &skb))
2195 goto enomem;
2196 nonlinear = skb_is_nonlinear(skb);
2197 }
2198 #endif
2199
2200 if (nonlinear) {
2201 /* Just create a S/G fd based on the skb */
2202 err = skb_to_sg_fd(priv, skb, &fd);
2203 percpu_priv->tx_frag_skbuffs++;
2204 } else {
2205 /* Create a contig FD from this skb */
2206 err = skb_to_contig_fd(priv, skb, &fd, &offset);
2207 }
2208 if (unlikely(err < 0))
2209 goto skb_to_fd_failed;
2210
2211 txq = netdev_get_tx_queue(net_dev, queue_mapping);
2212
2213 /* LLTX requires to do our own update of trans_start */
2214 txq->trans_start = jiffies;
2215
2216 if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
2217 fd.cmd |= cpu_to_be32(FM_FD_CMD_UPD);
2218 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2219 }
2220
2221 if (likely(dpaa_xmit(priv, percpu_stats, queue_mapping, &fd) == 0))
2222 return NETDEV_TX_OK;
2223
2224 dpaa_cleanup_tx_fd(priv, &fd, false);
2225 skb_to_fd_failed:
2226 enomem:
2227 percpu_stats->tx_errors++;
2228 dev_kfree_skb(skb);
2229 return NETDEV_TX_OK;
2230 }
2231
dpaa_rx_error(struct net_device * net_dev,const struct dpaa_priv * priv,struct dpaa_percpu_priv * percpu_priv,const struct qm_fd * fd,u32 fqid)2232 static void dpaa_rx_error(struct net_device *net_dev,
2233 const struct dpaa_priv *priv,
2234 struct dpaa_percpu_priv *percpu_priv,
2235 const struct qm_fd *fd,
2236 u32 fqid)
2237 {
2238 if (net_ratelimit())
2239 netif_err(priv, hw, net_dev, "Err FD status = 0x%08x\n",
2240 be32_to_cpu(fd->status) & FM_FD_STAT_RX_ERRORS);
2241
2242 percpu_priv->stats.rx_errors++;
2243
2244 if (be32_to_cpu(fd->status) & FM_FD_ERR_DMA)
2245 percpu_priv->rx_errors.dme++;
2246 if (be32_to_cpu(fd->status) & FM_FD_ERR_PHYSICAL)
2247 percpu_priv->rx_errors.fpe++;
2248 if (be32_to_cpu(fd->status) & FM_FD_ERR_SIZE)
2249 percpu_priv->rx_errors.fse++;
2250 if (be32_to_cpu(fd->status) & FM_FD_ERR_PRS_HDR_ERR)
2251 percpu_priv->rx_errors.phe++;
2252
2253 dpaa_fd_release(net_dev, fd);
2254 }
2255
dpaa_tx_error(struct net_device * net_dev,const struct dpaa_priv * priv,struct dpaa_percpu_priv * percpu_priv,const struct qm_fd * fd,u32 fqid)2256 static void dpaa_tx_error(struct net_device *net_dev,
2257 const struct dpaa_priv *priv,
2258 struct dpaa_percpu_priv *percpu_priv,
2259 const struct qm_fd *fd,
2260 u32 fqid)
2261 {
2262 struct sk_buff *skb;
2263
2264 if (net_ratelimit())
2265 netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n",
2266 be32_to_cpu(fd->status) & FM_FD_STAT_TX_ERRORS);
2267
2268 percpu_priv->stats.tx_errors++;
2269
2270 skb = dpaa_cleanup_tx_fd(priv, fd, false);
2271 dev_kfree_skb(skb);
2272 }
2273
dpaa_eth_poll(struct napi_struct * napi,int budget)2274 static int dpaa_eth_poll(struct napi_struct *napi, int budget)
2275 {
2276 struct dpaa_napi_portal *np =
2277 container_of(napi, struct dpaa_napi_portal, napi);
2278
2279 int cleaned = qman_p_poll_dqrr(np->p, budget);
2280
2281 if (cleaned < budget) {
2282 napi_complete_done(napi, cleaned);
2283 qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
2284 } else if (np->down) {
2285 qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
2286 }
2287
2288 return cleaned;
2289 }
2290
dpaa_tx_conf(struct net_device * net_dev,const struct dpaa_priv * priv,struct dpaa_percpu_priv * percpu_priv,const struct qm_fd * fd,u32 fqid)2291 static void dpaa_tx_conf(struct net_device *net_dev,
2292 const struct dpaa_priv *priv,
2293 struct dpaa_percpu_priv *percpu_priv,
2294 const struct qm_fd *fd,
2295 u32 fqid)
2296 {
2297 struct sk_buff *skb;
2298
2299 if (unlikely(be32_to_cpu(fd->status) & FM_FD_STAT_TX_ERRORS)) {
2300 if (net_ratelimit())
2301 netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n",
2302 be32_to_cpu(fd->status) &
2303 FM_FD_STAT_TX_ERRORS);
2304
2305 percpu_priv->stats.tx_errors++;
2306 }
2307
2308 percpu_priv->tx_confirm++;
2309
2310 skb = dpaa_cleanup_tx_fd(priv, fd, true);
2311
2312 consume_skb(skb);
2313 }
2314
dpaa_eth_napi_schedule(struct dpaa_percpu_priv * percpu_priv,struct qman_portal * portal)2315 static inline int dpaa_eth_napi_schedule(struct dpaa_percpu_priv *percpu_priv,
2316 struct qman_portal *portal)
2317 {
2318 if (unlikely(in_irq() || !in_serving_softirq())) {
2319 /* Disable QMan IRQ and invoke NAPI */
2320 qman_p_irqsource_remove(portal, QM_PIRQ_DQRI);
2321
2322 percpu_priv->np.p = portal;
2323 napi_schedule(&percpu_priv->np.napi);
2324 percpu_priv->in_interrupt++;
2325 return 1;
2326 }
2327 return 0;
2328 }
2329
rx_error_dqrr(struct qman_portal * portal,struct qman_fq * fq,const struct qm_dqrr_entry * dq)2330 static enum qman_cb_dqrr_result rx_error_dqrr(struct qman_portal *portal,
2331 struct qman_fq *fq,
2332 const struct qm_dqrr_entry *dq)
2333 {
2334 struct dpaa_fq *dpaa_fq = container_of(fq, struct dpaa_fq, fq_base);
2335 struct dpaa_percpu_priv *percpu_priv;
2336 struct net_device *net_dev;
2337 struct dpaa_bp *dpaa_bp;
2338 struct dpaa_priv *priv;
2339
2340 net_dev = dpaa_fq->net_dev;
2341 priv = netdev_priv(net_dev);
2342 dpaa_bp = dpaa_bpid2pool(dq->fd.bpid);
2343 if (!dpaa_bp)
2344 return qman_cb_dqrr_consume;
2345
2346 percpu_priv = this_cpu_ptr(priv->percpu_priv);
2347
2348 if (dpaa_eth_napi_schedule(percpu_priv, portal))
2349 return qman_cb_dqrr_stop;
2350
2351 dpaa_eth_refill_bpools(priv);
2352 dpaa_rx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
2353
2354 return qman_cb_dqrr_consume;
2355 }
2356
rx_default_dqrr(struct qman_portal * portal,struct qman_fq * fq,const struct qm_dqrr_entry * dq)2357 static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
2358 struct qman_fq *fq,
2359 const struct qm_dqrr_entry *dq)
2360 {
2361 struct skb_shared_hwtstamps *shhwtstamps;
2362 struct rtnl_link_stats64 *percpu_stats;
2363 struct dpaa_percpu_priv *percpu_priv;
2364 const struct qm_fd *fd = &dq->fd;
2365 dma_addr_t addr = qm_fd_addr(fd);
2366 enum qm_fd_format fd_format;
2367 struct net_device *net_dev;
2368 u32 fd_status, hash_offset;
2369 struct dpaa_bp *dpaa_bp;
2370 struct dpaa_priv *priv;
2371 unsigned int skb_len;
2372 struct sk_buff *skb;
2373 int *count_ptr;
2374 void *vaddr;
2375 u64 ns;
2376
2377 fd_status = be32_to_cpu(fd->status);
2378 fd_format = qm_fd_get_format(fd);
2379 net_dev = ((struct dpaa_fq *)fq)->net_dev;
2380 priv = netdev_priv(net_dev);
2381 dpaa_bp = dpaa_bpid2pool(dq->fd.bpid);
2382 if (!dpaa_bp)
2383 return qman_cb_dqrr_consume;
2384
2385 /* Trace the Rx fd */
2386 trace_dpaa_rx_fd(net_dev, fq, &dq->fd);
2387
2388 percpu_priv = this_cpu_ptr(priv->percpu_priv);
2389 percpu_stats = &percpu_priv->stats;
2390
2391 if (unlikely(dpaa_eth_napi_schedule(percpu_priv, portal)))
2392 return qman_cb_dqrr_stop;
2393
2394 /* Make sure we didn't run out of buffers */
2395 if (unlikely(dpaa_eth_refill_bpools(priv))) {
2396 /* Unable to refill the buffer pool due to insufficient
2397 * system memory. Just release the frame back into the pool,
2398 * otherwise we'll soon end up with an empty buffer pool.
2399 */
2400 dpaa_fd_release(net_dev, &dq->fd);
2401 return qman_cb_dqrr_consume;
2402 }
2403
2404 if (unlikely(fd_status & FM_FD_STAT_RX_ERRORS) != 0) {
2405 if (net_ratelimit())
2406 netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n",
2407 fd_status & FM_FD_STAT_RX_ERRORS);
2408
2409 percpu_stats->rx_errors++;
2410 dpaa_fd_release(net_dev, fd);
2411 return qman_cb_dqrr_consume;
2412 }
2413
2414 dma_unmap_page(dpaa_bp->priv->rx_dma_dev, addr, DPAA_BP_RAW_SIZE,
2415 DMA_FROM_DEVICE);
2416
2417 /* prefetch the first 64 bytes of the frame or the SGT start */
2418 vaddr = phys_to_virt(addr);
2419 prefetch(vaddr + qm_fd_get_offset(fd));
2420
2421 /* The only FD types that we may receive are contig and S/G */
2422 WARN_ON((fd_format != qm_fd_contig) && (fd_format != qm_fd_sg));
2423
2424 /* Account for either the contig buffer or the SGT buffer (depending on
2425 * which case we were in) having been removed from the pool.
2426 */
2427 count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
2428 (*count_ptr)--;
2429
2430 if (likely(fd_format == qm_fd_contig))
2431 skb = contig_fd_to_skb(priv, fd);
2432 else
2433 skb = sg_fd_to_skb(priv, fd);
2434 if (!skb)
2435 return qman_cb_dqrr_consume;
2436
2437 if (priv->rx_tstamp) {
2438 shhwtstamps = skb_hwtstamps(skb);
2439 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
2440
2441 if (!fman_port_get_tstamp(priv->mac_dev->port[RX], vaddr, &ns))
2442 shhwtstamps->hwtstamp = ns_to_ktime(ns);
2443 else
2444 dev_warn(net_dev->dev.parent, "fman_port_get_tstamp failed!\n");
2445 }
2446
2447 skb->protocol = eth_type_trans(skb, net_dev);
2448
2449 if (net_dev->features & NETIF_F_RXHASH && priv->keygen_in_use &&
2450 !fman_port_get_hash_result_offset(priv->mac_dev->port[RX],
2451 &hash_offset)) {
2452 enum pkt_hash_types type;
2453
2454 /* if L4 exists, it was used in the hash generation */
2455 type = be32_to_cpu(fd->status) & FM_FD_STAT_L4CV ?
2456 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
2457 skb_set_hash(skb, be32_to_cpu(*(u32 *)(vaddr + hash_offset)),
2458 type);
2459 }
2460
2461 skb_len = skb->len;
2462
2463 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP)) {
2464 percpu_stats->rx_dropped++;
2465 return qman_cb_dqrr_consume;
2466 }
2467
2468 percpu_stats->rx_packets++;
2469 percpu_stats->rx_bytes += skb_len;
2470
2471 return qman_cb_dqrr_consume;
2472 }
2473
conf_error_dqrr(struct qman_portal * portal,struct qman_fq * fq,const struct qm_dqrr_entry * dq)2474 static enum qman_cb_dqrr_result conf_error_dqrr(struct qman_portal *portal,
2475 struct qman_fq *fq,
2476 const struct qm_dqrr_entry *dq)
2477 {
2478 struct dpaa_percpu_priv *percpu_priv;
2479 struct net_device *net_dev;
2480 struct dpaa_priv *priv;
2481
2482 net_dev = ((struct dpaa_fq *)fq)->net_dev;
2483 priv = netdev_priv(net_dev);
2484
2485 percpu_priv = this_cpu_ptr(priv->percpu_priv);
2486
2487 if (dpaa_eth_napi_schedule(percpu_priv, portal))
2488 return qman_cb_dqrr_stop;
2489
2490 dpaa_tx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
2491
2492 return qman_cb_dqrr_consume;
2493 }
2494
conf_dflt_dqrr(struct qman_portal * portal,struct qman_fq * fq,const struct qm_dqrr_entry * dq)2495 static enum qman_cb_dqrr_result conf_dflt_dqrr(struct qman_portal *portal,
2496 struct qman_fq *fq,
2497 const struct qm_dqrr_entry *dq)
2498 {
2499 struct dpaa_percpu_priv *percpu_priv;
2500 struct net_device *net_dev;
2501 struct dpaa_priv *priv;
2502
2503 net_dev = ((struct dpaa_fq *)fq)->net_dev;
2504 priv = netdev_priv(net_dev);
2505
2506 /* Trace the fd */
2507 trace_dpaa_tx_conf_fd(net_dev, fq, &dq->fd);
2508
2509 percpu_priv = this_cpu_ptr(priv->percpu_priv);
2510
2511 if (dpaa_eth_napi_schedule(percpu_priv, portal))
2512 return qman_cb_dqrr_stop;
2513
2514 dpaa_tx_conf(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
2515
2516 return qman_cb_dqrr_consume;
2517 }
2518
egress_ern(struct qman_portal * portal,struct qman_fq * fq,const union qm_mr_entry * msg)2519 static void egress_ern(struct qman_portal *portal,
2520 struct qman_fq *fq,
2521 const union qm_mr_entry *msg)
2522 {
2523 const struct qm_fd *fd = &msg->ern.fd;
2524 struct dpaa_percpu_priv *percpu_priv;
2525 const struct dpaa_priv *priv;
2526 struct net_device *net_dev;
2527 struct sk_buff *skb;
2528
2529 net_dev = ((struct dpaa_fq *)fq)->net_dev;
2530 priv = netdev_priv(net_dev);
2531 percpu_priv = this_cpu_ptr(priv->percpu_priv);
2532
2533 percpu_priv->stats.tx_dropped++;
2534 percpu_priv->stats.tx_fifo_errors++;
2535 count_ern(percpu_priv, msg);
2536
2537 skb = dpaa_cleanup_tx_fd(priv, fd, false);
2538 dev_kfree_skb_any(skb);
2539 }
2540
2541 static const struct dpaa_fq_cbs dpaa_fq_cbs = {
2542 .rx_defq = { .cb = { .dqrr = rx_default_dqrr } },
2543 .tx_defq = { .cb = { .dqrr = conf_dflt_dqrr } },
2544 .rx_errq = { .cb = { .dqrr = rx_error_dqrr } },
2545 .tx_errq = { .cb = { .dqrr = conf_error_dqrr } },
2546 .egress_ern = { .cb = { .ern = egress_ern } }
2547 };
2548
dpaa_eth_napi_enable(struct dpaa_priv * priv)2549 static void dpaa_eth_napi_enable(struct dpaa_priv *priv)
2550 {
2551 struct dpaa_percpu_priv *percpu_priv;
2552 int i;
2553
2554 for_each_online_cpu(i) {
2555 percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
2556
2557 percpu_priv->np.down = 0;
2558 napi_enable(&percpu_priv->np.napi);
2559 }
2560 }
2561
dpaa_eth_napi_disable(struct dpaa_priv * priv)2562 static void dpaa_eth_napi_disable(struct dpaa_priv *priv)
2563 {
2564 struct dpaa_percpu_priv *percpu_priv;
2565 int i;
2566
2567 for_each_online_cpu(i) {
2568 percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
2569
2570 percpu_priv->np.down = 1;
2571 napi_disable(&percpu_priv->np.napi);
2572 }
2573 }
2574
dpaa_adjust_link(struct net_device * net_dev)2575 static void dpaa_adjust_link(struct net_device *net_dev)
2576 {
2577 struct mac_device *mac_dev;
2578 struct dpaa_priv *priv;
2579
2580 priv = netdev_priv(net_dev);
2581 mac_dev = priv->mac_dev;
2582 mac_dev->adjust_link(mac_dev);
2583 }
2584
2585 /* The Aquantia PHYs are capable of performing rate adaptation */
2586 #define PHY_VEND_AQUANTIA 0x03a1b400
2587
dpaa_phy_init(struct net_device * net_dev)2588 static int dpaa_phy_init(struct net_device *net_dev)
2589 {
2590 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
2591 struct mac_device *mac_dev;
2592 struct phy_device *phy_dev;
2593 struct dpaa_priv *priv;
2594
2595 priv = netdev_priv(net_dev);
2596 mac_dev = priv->mac_dev;
2597
2598 phy_dev = of_phy_connect(net_dev, mac_dev->phy_node,
2599 &dpaa_adjust_link, 0,
2600 mac_dev->phy_if);
2601 if (!phy_dev) {
2602 netif_err(priv, ifup, net_dev, "init_phy() failed\n");
2603 return -ENODEV;
2604 }
2605
2606 /* Unless the PHY is capable of rate adaptation */
2607 if (mac_dev->phy_if != PHY_INTERFACE_MODE_XGMII ||
2608 ((phy_dev->drv->phy_id & GENMASK(31, 10)) != PHY_VEND_AQUANTIA)) {
2609 /* remove any features not supported by the controller */
2610 ethtool_convert_legacy_u32_to_link_mode(mask,
2611 mac_dev->if_support);
2612 linkmode_and(phy_dev->supported, phy_dev->supported, mask);
2613 }
2614
2615 phy_support_asym_pause(phy_dev);
2616
2617 mac_dev->phy_dev = phy_dev;
2618 net_dev->phydev = phy_dev;
2619
2620 return 0;
2621 }
2622
dpaa_open(struct net_device * net_dev)2623 static int dpaa_open(struct net_device *net_dev)
2624 {
2625 struct mac_device *mac_dev;
2626 struct dpaa_priv *priv;
2627 int err, i;
2628
2629 priv = netdev_priv(net_dev);
2630 mac_dev = priv->mac_dev;
2631 dpaa_eth_napi_enable(priv);
2632
2633 err = dpaa_phy_init(net_dev);
2634 if (err)
2635 goto phy_init_failed;
2636
2637 for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
2638 err = fman_port_enable(mac_dev->port[i]);
2639 if (err)
2640 goto mac_start_failed;
2641 }
2642
2643 err = priv->mac_dev->start(mac_dev);
2644 if (err < 0) {
2645 netif_err(priv, ifup, net_dev, "mac_dev->start() = %d\n", err);
2646 goto mac_start_failed;
2647 }
2648
2649 netif_tx_start_all_queues(net_dev);
2650
2651 return 0;
2652
2653 mac_start_failed:
2654 for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++)
2655 fman_port_disable(mac_dev->port[i]);
2656
2657 phy_init_failed:
2658 dpaa_eth_napi_disable(priv);
2659
2660 return err;
2661 }
2662
dpaa_eth_stop(struct net_device * net_dev)2663 static int dpaa_eth_stop(struct net_device *net_dev)
2664 {
2665 struct dpaa_priv *priv;
2666 int err;
2667
2668 err = dpaa_stop(net_dev);
2669
2670 priv = netdev_priv(net_dev);
2671 dpaa_eth_napi_disable(priv);
2672
2673 return err;
2674 }
2675
dpaa_ts_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)2676 static int dpaa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2677 {
2678 struct dpaa_priv *priv = netdev_priv(dev);
2679 struct hwtstamp_config config;
2680
2681 if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
2682 return -EFAULT;
2683
2684 switch (config.tx_type) {
2685 case HWTSTAMP_TX_OFF:
2686 /* Couldn't disable rx/tx timestamping separately.
2687 * Do nothing here.
2688 */
2689 priv->tx_tstamp = false;
2690 break;
2691 case HWTSTAMP_TX_ON:
2692 priv->mac_dev->set_tstamp(priv->mac_dev->fman_mac, true);
2693 priv->tx_tstamp = true;
2694 break;
2695 default:
2696 return -ERANGE;
2697 }
2698
2699 if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
2700 /* Couldn't disable rx/tx timestamping separately.
2701 * Do nothing here.
2702 */
2703 priv->rx_tstamp = false;
2704 } else {
2705 priv->mac_dev->set_tstamp(priv->mac_dev->fman_mac, true);
2706 priv->rx_tstamp = true;
2707 /* TS is set for all frame types, not only those requested */
2708 config.rx_filter = HWTSTAMP_FILTER_ALL;
2709 }
2710
2711 return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
2712 -EFAULT : 0;
2713 }
2714
dpaa_ioctl(struct net_device * net_dev,struct ifreq * rq,int cmd)2715 static int dpaa_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd)
2716 {
2717 int ret = -EINVAL;
2718
2719 if (cmd == SIOCGMIIREG) {
2720 if (net_dev->phydev)
2721 return phy_mii_ioctl(net_dev->phydev, rq, cmd);
2722 }
2723
2724 if (cmd == SIOCSHWTSTAMP)
2725 return dpaa_ts_ioctl(net_dev, rq, cmd);
2726
2727 return ret;
2728 }
2729
2730 static const struct net_device_ops dpaa_ops = {
2731 .ndo_open = dpaa_open,
2732 .ndo_start_xmit = dpaa_start_xmit,
2733 .ndo_stop = dpaa_eth_stop,
2734 .ndo_tx_timeout = dpaa_tx_timeout,
2735 .ndo_get_stats64 = dpaa_get_stats64,
2736 .ndo_change_carrier = fixed_phy_change_carrier,
2737 .ndo_set_mac_address = dpaa_set_mac_address,
2738 .ndo_validate_addr = eth_validate_addr,
2739 .ndo_set_rx_mode = dpaa_set_rx_mode,
2740 .ndo_do_ioctl = dpaa_ioctl,
2741 .ndo_setup_tc = dpaa_setup_tc,
2742 };
2743
dpaa_napi_add(struct net_device * net_dev)2744 static int dpaa_napi_add(struct net_device *net_dev)
2745 {
2746 struct dpaa_priv *priv = netdev_priv(net_dev);
2747 struct dpaa_percpu_priv *percpu_priv;
2748 int cpu;
2749
2750 for_each_possible_cpu(cpu) {
2751 percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
2752
2753 netif_napi_add(net_dev, &percpu_priv->np.napi,
2754 dpaa_eth_poll, NAPI_POLL_WEIGHT);
2755 }
2756
2757 return 0;
2758 }
2759
dpaa_napi_del(struct net_device * net_dev)2760 static void dpaa_napi_del(struct net_device *net_dev)
2761 {
2762 struct dpaa_priv *priv = netdev_priv(net_dev);
2763 struct dpaa_percpu_priv *percpu_priv;
2764 int cpu;
2765
2766 for_each_possible_cpu(cpu) {
2767 percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
2768
2769 netif_napi_del(&percpu_priv->np.napi);
2770 }
2771 }
2772
dpaa_bp_free_pf(const struct dpaa_bp * bp,struct bm_buffer * bmb)2773 static inline void dpaa_bp_free_pf(const struct dpaa_bp *bp,
2774 struct bm_buffer *bmb)
2775 {
2776 dma_addr_t addr = bm_buf_addr(bmb);
2777
2778 dma_unmap_page(bp->priv->rx_dma_dev, addr, DPAA_BP_RAW_SIZE,
2779 DMA_FROM_DEVICE);
2780
2781 skb_free_frag(phys_to_virt(addr));
2782 }
2783
2784 /* Alloc the dpaa_bp struct and configure default values */
dpaa_bp_alloc(struct device * dev)2785 static struct dpaa_bp *dpaa_bp_alloc(struct device *dev)
2786 {
2787 struct dpaa_bp *dpaa_bp;
2788
2789 dpaa_bp = devm_kzalloc(dev, sizeof(*dpaa_bp), GFP_KERNEL);
2790 if (!dpaa_bp)
2791 return ERR_PTR(-ENOMEM);
2792
2793 dpaa_bp->bpid = FSL_DPAA_BPID_INV;
2794 dpaa_bp->percpu_count = devm_alloc_percpu(dev, *dpaa_bp->percpu_count);
2795 if (!dpaa_bp->percpu_count)
2796 return ERR_PTR(-ENOMEM);
2797
2798 dpaa_bp->config_count = FSL_DPAA_ETH_MAX_BUF_COUNT;
2799
2800 dpaa_bp->seed_cb = dpaa_bp_seed;
2801 dpaa_bp->free_buf_cb = dpaa_bp_free_pf;
2802
2803 return dpaa_bp;
2804 }
2805
2806 /* Place all ingress FQs (Rx Default, Rx Error) in a dedicated CGR.
2807 * We won't be sending congestion notifications to FMan; for now, we just use
2808 * this CGR to generate enqueue rejections to FMan in order to drop the frames
2809 * before they reach our ingress queues and eat up memory.
2810 */
dpaa_ingress_cgr_init(struct dpaa_priv * priv)2811 static int dpaa_ingress_cgr_init(struct dpaa_priv *priv)
2812 {
2813 struct qm_mcc_initcgr initcgr;
2814 u32 cs_th;
2815 int err;
2816
2817 err = qman_alloc_cgrid(&priv->ingress_cgr.cgrid);
2818 if (err < 0) {
2819 if (netif_msg_drv(priv))
2820 pr_err("Error %d allocating CGR ID\n", err);
2821 goto out_error;
2822 }
2823
2824 /* Enable CS TD, but disable Congestion State Change Notifications. */
2825 memset(&initcgr, 0, sizeof(initcgr));
2826 initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CS_THRES);
2827 initcgr.cgr.cscn_en = QM_CGR_EN;
2828 cs_th = DPAA_INGRESS_CS_THRESHOLD;
2829 qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1);
2830
2831 initcgr.we_mask |= cpu_to_be16(QM_CGR_WE_CSTD_EN);
2832 initcgr.cgr.cstd_en = QM_CGR_EN;
2833
2834 /* This CGR will be associated with the SWP affined to the current CPU.
2835 * However, we'll place all our ingress FQs in it.
2836 */
2837 err = qman_create_cgr(&priv->ingress_cgr, QMAN_CGR_FLAG_USE_INIT,
2838 &initcgr);
2839 if (err < 0) {
2840 if (netif_msg_drv(priv))
2841 pr_err("Error %d creating ingress CGR with ID %d\n",
2842 err, priv->ingress_cgr.cgrid);
2843 qman_release_cgrid(priv->ingress_cgr.cgrid);
2844 goto out_error;
2845 }
2846 if (netif_msg_drv(priv))
2847 pr_debug("Created ingress CGR %d for netdev with hwaddr %pM\n",
2848 priv->ingress_cgr.cgrid, priv->mac_dev->addr);
2849
2850 priv->use_ingress_cgr = true;
2851
2852 out_error:
2853 return err;
2854 }
2855
dpaa_get_headroom(struct dpaa_buffer_layout * bl,enum port_type port)2856 static u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl,
2857 enum port_type port)
2858 {
2859 u16 headroom;
2860
2861 /* The frame headroom must accommodate:
2862 * - the driver private data area
2863 * - parse results, hash results, timestamp if selected
2864 * If either hash results or time stamp are selected, both will
2865 * be copied to/from the frame headroom, as TS is located between PR and
2866 * HR in the IC and IC copy size has a granularity of 16bytes
2867 * (see description of FMBM_RICP and FMBM_TICP registers in DPAARM)
2868 *
2869 * Also make sure the headroom is a multiple of data_align bytes
2870 */
2871 headroom = (u16)(bl[port].priv_data_size + DPAA_HWA_SIZE);
2872
2873 if (port == RX)
2874 return ALIGN(headroom, DPAA_FD_RX_DATA_ALIGNMENT);
2875 else
2876 return ALIGN(headroom, DPAA_FD_DATA_ALIGNMENT);
2877 }
2878
dpaa_eth_probe(struct platform_device * pdev)2879 static int dpaa_eth_probe(struct platform_device *pdev)
2880 {
2881 struct net_device *net_dev = NULL;
2882 struct dpaa_bp *dpaa_bp = NULL;
2883 struct dpaa_fq *dpaa_fq, *tmp;
2884 struct dpaa_priv *priv = NULL;
2885 struct fm_port_fqs port_fqs;
2886 struct mac_device *mac_dev;
2887 int err = 0, channel;
2888 struct device *dev;
2889
2890 dev = &pdev->dev;
2891
2892 err = bman_is_probed();
2893 if (!err)
2894 return -EPROBE_DEFER;
2895 if (err < 0) {
2896 dev_err(dev, "failing probe due to bman probe error\n");
2897 return -ENODEV;
2898 }
2899 err = qman_is_probed();
2900 if (!err)
2901 return -EPROBE_DEFER;
2902 if (err < 0) {
2903 dev_err(dev, "failing probe due to qman probe error\n");
2904 return -ENODEV;
2905 }
2906 err = bman_portals_probed();
2907 if (!err)
2908 return -EPROBE_DEFER;
2909 if (err < 0) {
2910 dev_err(dev,
2911 "failing probe due to bman portals probe error\n");
2912 return -ENODEV;
2913 }
2914 err = qman_portals_probed();
2915 if (!err)
2916 return -EPROBE_DEFER;
2917 if (err < 0) {
2918 dev_err(dev,
2919 "failing probe due to qman portals probe error\n");
2920 return -ENODEV;
2921 }
2922
2923 /* Allocate this early, so we can store relevant information in
2924 * the private area
2925 */
2926 net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TXQ_NUM);
2927 if (!net_dev) {
2928 dev_err(dev, "alloc_etherdev_mq() failed\n");
2929 return -ENOMEM;
2930 }
2931
2932 /* Do this here, so we can be verbose early */
2933 SET_NETDEV_DEV(net_dev, dev->parent);
2934 dev_set_drvdata(dev, net_dev);
2935
2936 priv = netdev_priv(net_dev);
2937 priv->net_dev = net_dev;
2938
2939 priv->msg_enable = netif_msg_init(debug, DPAA_MSG_DEFAULT);
2940
2941 mac_dev = dpaa_mac_dev_get(pdev);
2942 if (IS_ERR(mac_dev)) {
2943 netdev_err(net_dev, "dpaa_mac_dev_get() failed\n");
2944 err = PTR_ERR(mac_dev);
2945 goto free_netdev;
2946 }
2947
2948 /* Devices used for DMA mapping */
2949 priv->rx_dma_dev = fman_port_get_device(mac_dev->port[RX]);
2950 priv->tx_dma_dev = fman_port_get_device(mac_dev->port[TX]);
2951 err = dma_coerce_mask_and_coherent(priv->rx_dma_dev, DMA_BIT_MASK(40));
2952 if (!err)
2953 err = dma_coerce_mask_and_coherent(priv->tx_dma_dev,
2954 DMA_BIT_MASK(40));
2955 if (err) {
2956 netdev_err(net_dev, "dma_coerce_mask_and_coherent() failed\n");
2957 goto free_netdev;
2958 }
2959
2960 /* If fsl_fm_max_frm is set to a higher value than the all-common 1500,
2961 * we choose conservatively and let the user explicitly set a higher
2962 * MTU via ifconfig. Otherwise, the user may end up with different MTUs
2963 * in the same LAN.
2964 * If on the other hand fsl_fm_max_frm has been chosen below 1500,
2965 * start with the maximum allowed.
2966 */
2967 net_dev->mtu = min(dpaa_get_max_mtu(), ETH_DATA_LEN);
2968
2969 netdev_dbg(net_dev, "Setting initial MTU on net device: %d\n",
2970 net_dev->mtu);
2971
2972 priv->buf_layout[RX].priv_data_size = DPAA_RX_PRIV_DATA_SIZE; /* Rx */
2973 priv->buf_layout[TX].priv_data_size = DPAA_TX_PRIV_DATA_SIZE; /* Tx */
2974
2975 /* bp init */
2976 dpaa_bp = dpaa_bp_alloc(dev);
2977 if (IS_ERR(dpaa_bp)) {
2978 err = PTR_ERR(dpaa_bp);
2979 goto free_dpaa_bps;
2980 }
2981 /* the raw size of the buffers used for reception */
2982 dpaa_bp->raw_size = DPAA_BP_RAW_SIZE;
2983 /* avoid runtime computations by keeping the usable size here */
2984 dpaa_bp->size = dpaa_bp_size(dpaa_bp->raw_size);
2985 dpaa_bp->priv = priv;
2986
2987 err = dpaa_bp_alloc_pool(dpaa_bp);
2988 if (err < 0)
2989 goto free_dpaa_bps;
2990 priv->dpaa_bp = dpaa_bp;
2991
2992 INIT_LIST_HEAD(&priv->dpaa_fq_list);
2993
2994 memset(&port_fqs, 0, sizeof(port_fqs));
2995
2996 err = dpaa_alloc_all_fqs(dev, &priv->dpaa_fq_list, &port_fqs);
2997 if (err < 0) {
2998 dev_err(dev, "dpaa_alloc_all_fqs() failed\n");
2999 goto free_dpaa_bps;
3000 }
3001
3002 priv->mac_dev = mac_dev;
3003
3004 channel = dpaa_get_channel();
3005 if (channel < 0) {
3006 dev_err(dev, "dpaa_get_channel() failed\n");
3007 err = channel;
3008 goto free_dpaa_bps;
3009 }
3010
3011 priv->channel = (u16)channel;
3012
3013 /* Walk the CPUs with affine portals
3014 * and add this pool channel to each's dequeue mask.
3015 */
3016 dpaa_eth_add_channel(priv->channel, &pdev->dev);
3017
3018 dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]);
3019
3020 /* Create a congestion group for this netdev, with
3021 * dynamically-allocated CGR ID.
3022 * Must be executed after probing the MAC, but before
3023 * assigning the egress FQs to the CGRs.
3024 */
3025 err = dpaa_eth_cgr_init(priv);
3026 if (err < 0) {
3027 dev_err(dev, "Error initializing CGR\n");
3028 goto free_dpaa_bps;
3029 }
3030
3031 err = dpaa_ingress_cgr_init(priv);
3032 if (err < 0) {
3033 dev_err(dev, "Error initializing ingress CGR\n");
3034 goto delete_egress_cgr;
3035 }
3036
3037 /* Add the FQs to the interface, and make them active */
3038 list_for_each_entry_safe(dpaa_fq, tmp, &priv->dpaa_fq_list, list) {
3039 err = dpaa_fq_init(dpaa_fq, false);
3040 if (err < 0)
3041 goto free_dpaa_fqs;
3042 }
3043
3044 priv->tx_headroom = dpaa_get_headroom(priv->buf_layout, TX);
3045 priv->rx_headroom = dpaa_get_headroom(priv->buf_layout, RX);
3046
3047 /* All real interfaces need their ports initialized */
3048 err = dpaa_eth_init_ports(mac_dev, dpaa_bp, &port_fqs,
3049 &priv->buf_layout[0], dev);
3050 if (err)
3051 goto free_dpaa_fqs;
3052
3053 /* Rx traffic distribution based on keygen hashing defaults to on */
3054 priv->keygen_in_use = true;
3055
3056 priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv);
3057 if (!priv->percpu_priv) {
3058 dev_err(dev, "devm_alloc_percpu() failed\n");
3059 err = -ENOMEM;
3060 goto free_dpaa_fqs;
3061 }
3062
3063 priv->num_tc = 1;
3064 netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM);
3065
3066 /* Initialize NAPI */
3067 err = dpaa_napi_add(net_dev);
3068 if (err < 0)
3069 goto delete_dpaa_napi;
3070
3071 err = dpaa_netdev_init(net_dev, &dpaa_ops, tx_timeout);
3072 if (err < 0)
3073 goto delete_dpaa_napi;
3074
3075 dpaa_eth_sysfs_init(&net_dev->dev);
3076
3077 netif_info(priv, probe, net_dev, "Probed interface %s\n",
3078 net_dev->name);
3079
3080 return 0;
3081
3082 delete_dpaa_napi:
3083 dpaa_napi_del(net_dev);
3084 free_dpaa_fqs:
3085 dpaa_fq_free(dev, &priv->dpaa_fq_list);
3086 qman_delete_cgr_safe(&priv->ingress_cgr);
3087 qman_release_cgrid(priv->ingress_cgr.cgrid);
3088 delete_egress_cgr:
3089 qman_delete_cgr_safe(&priv->cgr_data.cgr);
3090 qman_release_cgrid(priv->cgr_data.cgr.cgrid);
3091 free_dpaa_bps:
3092 dpaa_bps_free(priv);
3093 free_netdev:
3094 dev_set_drvdata(dev, NULL);
3095 free_netdev(net_dev);
3096
3097 return err;
3098 }
3099
dpaa_remove(struct platform_device * pdev)3100 static int dpaa_remove(struct platform_device *pdev)
3101 {
3102 struct net_device *net_dev;
3103 struct dpaa_priv *priv;
3104 struct device *dev;
3105 int err;
3106
3107 dev = &pdev->dev;
3108 net_dev = dev_get_drvdata(dev);
3109
3110 priv = netdev_priv(net_dev);
3111
3112 dpaa_eth_sysfs_remove(dev);
3113
3114 dev_set_drvdata(dev, NULL);
3115 unregister_netdev(net_dev);
3116
3117 err = dpaa_fq_free(dev, &priv->dpaa_fq_list);
3118
3119 qman_delete_cgr_safe(&priv->ingress_cgr);
3120 qman_release_cgrid(priv->ingress_cgr.cgrid);
3121 qman_delete_cgr_safe(&priv->cgr_data.cgr);
3122 qman_release_cgrid(priv->cgr_data.cgr.cgrid);
3123
3124 dpaa_napi_del(net_dev);
3125
3126 dpaa_bps_free(priv);
3127
3128 free_netdev(net_dev);
3129
3130 return err;
3131 }
3132
3133 static const struct platform_device_id dpaa_devtype[] = {
3134 {
3135 .name = "dpaa-ethernet",
3136 .driver_data = 0,
3137 }, {
3138 }
3139 };
3140 MODULE_DEVICE_TABLE(platform, dpaa_devtype);
3141
3142 static struct platform_driver dpaa_driver = {
3143 .driver = {
3144 .name = KBUILD_MODNAME,
3145 },
3146 .id_table = dpaa_devtype,
3147 .probe = dpaa_eth_probe,
3148 .remove = dpaa_remove
3149 };
3150
dpaa_load(void)3151 static int __init dpaa_load(void)
3152 {
3153 int err;
3154
3155 pr_debug("FSL DPAA Ethernet driver\n");
3156
3157 /* initialize dpaa_eth mirror values */
3158 dpaa_rx_extra_headroom = fman_get_rx_extra_headroom();
3159 dpaa_max_frm = fman_get_max_frm();
3160
3161 err = platform_driver_register(&dpaa_driver);
3162 if (err < 0)
3163 pr_err("Error, platform_driver_register() = %d\n", err);
3164
3165 return err;
3166 }
3167 module_init(dpaa_load);
3168
dpaa_unload(void)3169 static void __exit dpaa_unload(void)
3170 {
3171 platform_driver_unregister(&dpaa_driver);
3172
3173 /* Only one channel is used and needs to be released after all
3174 * interfaces are removed
3175 */
3176 dpaa_release_channel();
3177 }
3178 module_exit(dpaa_unload);
3179
3180 MODULE_LICENSE("Dual BSD/GPL");
3181 MODULE_DESCRIPTION("FSL DPAA Ethernet driver");
3182