1 // SPDX-License-Identifier: GPL-2.0
2
3 /* Texas Instruments ICSSG SR1.0 Ethernet Driver
4 *
5 * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/
6 * Copyright (c) Siemens AG, 2024
7 *
8 */
9
10 #include <linux/etherdevice.h>
11 #include <linux/genalloc.h>
12 #include <linux/kernel.h>
13 #include <linux/mfd/syscon.h>
14 #include <linux/module.h>
15 #include <linux/of.h>
16 #include <linux/of_mdio.h>
17 #include <linux/of_net.h>
18 #include <linux/platform_device.h>
19 #include <linux/property.h>
20 #include <linux/phy.h>
21 #include <linux/remoteproc/pruss.h>
22 #include <linux/pruss_driver.h>
23
24 #include "icssg_prueth.h"
25 #include "icssg_mii_rt.h"
26 #include "../k3-cppi-desc-pool.h"
27
28 #define PRUETH_MODULE_DESCRIPTION "PRUSS ICSSG SR1.0 Ethernet driver"
29
30 /* SR1: Set buffer sizes for the pools. There are 8 internal queues
31 * implemented in firmware, but only 4 tx channels/threads in the Egress
32 * direction to firmware. Need a high priority queue for management
33 * messages since they shouldn't be blocked even during high traffic
34 * situation. So use Q0-Q2 as data queues and Q3 as management queue
35 * in the max case. However for ease of configuration, use the max
36 * data queue + 1 for management message if we are not using max
37 * case.
38 *
39 * Allocate 4 MTU buffers per data queue. Firmware requires
40 * pool sizes to be set for internal queues. Set the upper 5 queue
41 * pool size to min size of 128 bytes since there are only 3 tx
42 * data channels and management queue requires only minimum buffer.
43 * i.e lower queues are used by driver and highest priority queue
44 * from that is used for management message.
45 */
46
47 static int emac_egress_buf_pool_size[] = {
48 PRUETH_EMAC_BUF_POOL_SIZE_SR1, PRUETH_EMAC_BUF_POOL_SIZE_SR1,
49 PRUETH_EMAC_BUF_POOL_SIZE_SR1, PRUETH_EMAC_BUF_POOL_MIN_SIZE_SR1,
50 PRUETH_EMAC_BUF_POOL_MIN_SIZE_SR1, PRUETH_EMAC_BUF_POOL_MIN_SIZE_SR1,
51 PRUETH_EMAC_BUF_POOL_MIN_SIZE_SR1, PRUETH_EMAC_BUF_POOL_MIN_SIZE_SR1
52 };
53
icssg_config_sr1(struct prueth * prueth,struct prueth_emac * emac,int slice)54 static void icssg_config_sr1(struct prueth *prueth, struct prueth_emac *emac,
55 int slice)
56 {
57 struct icssg_sr1_config config;
58 void __iomem *va;
59 int i, index;
60
61 memset(&config, 0, sizeof(config));
62 config.addr_lo = cpu_to_le32(lower_32_bits(prueth->msmcram.pa));
63 config.addr_hi = cpu_to_le32(upper_32_bits(prueth->msmcram.pa));
64 config.rx_flow_id = cpu_to_le32(emac->rx_flow_id_base); /* flow id for host port */
65 config.rx_mgr_flow_id = cpu_to_le32(emac->rx_mgm_flow_id_base); /* for mgm ch */
66 config.rand_seed = cpu_to_le32(get_random_u32());
67
68 for (i = PRUETH_EMAC_BUF_POOL_START_SR1; i < PRUETH_NUM_BUF_POOLS_SR1; i++) {
69 index = i - PRUETH_EMAC_BUF_POOL_START_SR1;
70 config.tx_buf_sz[i] = cpu_to_le32(emac_egress_buf_pool_size[index]);
71 }
72
73 va = prueth->shram.va + slice * ICSSG_CONFIG_OFFSET_SLICE1;
74 memcpy_toio(va, &config, sizeof(config));
75
76 emac->speed = SPEED_1000;
77 emac->duplex = DUPLEX_FULL;
78 }
79
emac_send_command_sr1(struct prueth_emac * emac,u32 cmd)80 static int emac_send_command_sr1(struct prueth_emac *emac, u32 cmd)
81 {
82 struct cppi5_host_desc_t *first_desc;
83 u32 pkt_len = sizeof(emac->cmd_data);
84 __le32 *data = emac->cmd_data;
85 dma_addr_t desc_dma, buf_dma;
86 struct prueth_tx_chn *tx_chn;
87 struct prueth_swdata *swdata;
88 int ret = 0;
89 u32 *epib;
90
91 netdev_dbg(emac->ndev, "Sending cmd %x\n", cmd);
92
93 /* only one command at a time allowed to firmware */
94 mutex_lock(&emac->cmd_lock);
95 data[0] = cpu_to_le32(cmd);
96
97 /* highest priority channel for management messages */
98 tx_chn = &emac->tx_chns[emac->tx_ch_num - 1];
99
100 /* Map the linear buffer */
101 buf_dma = dma_map_single(tx_chn->dma_dev, data, pkt_len, DMA_TO_DEVICE);
102 if (dma_mapping_error(tx_chn->dma_dev, buf_dma)) {
103 netdev_err(emac->ndev, "cmd %x: failed to map cmd buffer\n", cmd);
104 ret = -EINVAL;
105 goto err_unlock;
106 }
107
108 first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
109 if (!first_desc) {
110 netdev_err(emac->ndev, "cmd %x: failed to allocate descriptor\n", cmd);
111 dma_unmap_single(tx_chn->dma_dev, buf_dma, pkt_len, DMA_TO_DEVICE);
112 ret = -ENOMEM;
113 goto err_unlock;
114 }
115
116 cppi5_hdesc_init(first_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT,
117 PRUETH_NAV_PS_DATA_SIZE);
118 cppi5_hdesc_set_pkttype(first_desc, PRUETH_PKT_TYPE_CMD);
119 epib = first_desc->epib;
120 epib[0] = 0;
121 epib[1] = 0;
122
123 cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len);
124 swdata = cppi5_hdesc_get_swdata(first_desc);
125 swdata->type = PRUETH_SWDATA_CMD;
126 swdata->data.cmd = le32_to_cpu(data[0]);
127
128 cppi5_hdesc_set_pktlen(first_desc, pkt_len);
129 desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc);
130
131 /* send command */
132 reinit_completion(&emac->cmd_complete);
133 ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
134 if (ret) {
135 netdev_err(emac->ndev, "cmd %x: push failed: %d\n", cmd, ret);
136 goto free_desc;
137 }
138 ret = wait_for_completion_timeout(&emac->cmd_complete, msecs_to_jiffies(100));
139 if (!ret)
140 netdev_err(emac->ndev, "cmd %x: completion timeout\n", cmd);
141
142 mutex_unlock(&emac->cmd_lock);
143
144 return ret;
145 free_desc:
146 prueth_xmit_free(tx_chn, first_desc);
147 err_unlock:
148 mutex_unlock(&emac->cmd_lock);
149
150 return ret;
151 }
152
icssg_config_set_speed_sr1(struct prueth_emac * emac)153 static void icssg_config_set_speed_sr1(struct prueth_emac *emac)
154 {
155 u32 cmd = ICSSG_PSTATE_SPEED_DUPLEX_CMD_SR1, val;
156 struct prueth *prueth = emac->prueth;
157 int slice = prueth_emac_slice(emac);
158
159 val = icssg_rgmii_get_speed(prueth->miig_rt, slice);
160 /* firmware expects speed settings in bit 2-1 */
161 val <<= 1;
162 cmd |= val;
163
164 val = icssg_rgmii_get_fullduplex(prueth->miig_rt, slice);
165 /* firmware expects full duplex settings in bit 3 */
166 val <<= 3;
167 cmd |= val;
168
169 emac_send_command_sr1(emac, cmd);
170 }
171
172 /* called back by PHY layer if there is change in link state of hw port*/
emac_adjust_link_sr1(struct net_device * ndev)173 static void emac_adjust_link_sr1(struct net_device *ndev)
174 {
175 struct prueth_emac *emac = netdev_priv(ndev);
176 struct phy_device *phydev = ndev->phydev;
177 struct prueth *prueth = emac->prueth;
178 bool new_state = false;
179 unsigned long flags;
180
181 if (phydev->link) {
182 /* check the mode of operation - full/half duplex */
183 if (phydev->duplex != emac->duplex) {
184 new_state = true;
185 emac->duplex = phydev->duplex;
186 }
187 if (phydev->speed != emac->speed) {
188 new_state = true;
189 emac->speed = phydev->speed;
190 }
191 if (!emac->link) {
192 new_state = true;
193 emac->link = 1;
194 }
195 } else if (emac->link) {
196 new_state = true;
197 emac->link = 0;
198
199 /* f/w should support 100 & 1000 */
200 emac->speed = SPEED_1000;
201
202 /* half duplex may not be supported by f/w */
203 emac->duplex = DUPLEX_FULL;
204 }
205
206 if (new_state) {
207 phy_print_status(phydev);
208
209 /* update RGMII and MII configuration based on PHY negotiated
210 * values
211 */
212 if (emac->link) {
213 /* Set the RGMII cfg for gig en and full duplex */
214 icssg_update_rgmii_cfg(prueth->miig_rt, emac);
215
216 /* update the Tx IPG based on 100M/1G speed */
217 spin_lock_irqsave(&emac->lock, flags);
218 icssg_config_ipg(emac);
219 spin_unlock_irqrestore(&emac->lock, flags);
220 icssg_config_set_speed_sr1(emac);
221 }
222 }
223
224 if (emac->link) {
225 /* reactivate the transmit queue */
226 netif_tx_wake_all_queues(ndev);
227 } else {
228 netif_tx_stop_all_queues(ndev);
229 prueth_cleanup_tx_ts(emac);
230 }
231 }
232
emac_phy_connect(struct prueth_emac * emac)233 static int emac_phy_connect(struct prueth_emac *emac)
234 {
235 struct prueth *prueth = emac->prueth;
236 struct net_device *ndev = emac->ndev;
237 /* connect PHY */
238 ndev->phydev = of_phy_connect(emac->ndev, emac->phy_node,
239 &emac_adjust_link_sr1, 0,
240 emac->phy_if);
241 if (!ndev->phydev) {
242 dev_err(prueth->dev, "couldn't connect to phy %s\n",
243 emac->phy_node->full_name);
244 return -ENODEV;
245 }
246
247 if (!emac->half_duplex) {
248 dev_dbg(prueth->dev, "half duplex mode is not supported\n");
249 phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
250 }
251
252 /* Remove 100Mbits half-duplex due to RGMII misreporting connection
253 * as full duplex */
254 phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
255
256 /* remove unsupported modes */
257 phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
258 phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_Pause_BIT);
259 phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT);
260
261 if (emac->phy_if == PHY_INTERFACE_MODE_MII)
262 phy_set_max_speed(ndev->phydev, SPEED_100);
263
264 return 0;
265 }
266
267 /* get one packet from requested flow_id
268 *
269 * Returns skb pointer if packet found else NULL
270 * Caller must free the returned skb.
271 */
prueth_process_rx_mgm(struct prueth_emac * emac,u32 flow_id)272 static struct page *prueth_process_rx_mgm(struct prueth_emac *emac,
273 u32 flow_id)
274 {
275 struct prueth_rx_chn *rx_chn = &emac->rx_mgm_chn;
276 struct net_device *ndev = emac->ndev;
277 struct cppi5_host_desc_t *desc_rx;
278 struct page *page, *new_page;
279 struct prueth_swdata *swdata;
280 dma_addr_t desc_dma, buf_dma;
281 u32 buf_dma_len;
282 int ret;
283
284 ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_id, &desc_dma);
285 if (ret) {
286 if (ret != -ENODATA)
287 netdev_err(ndev, "rx mgm pop: failed: %d\n", ret);
288 return NULL;
289 }
290
291 if (cppi5_desc_is_tdcm(desc_dma)) /* Teardown */
292 return NULL;
293
294 desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
295
296 /* Fix FW bug about incorrect PSDATA size */
297 if (cppi5_hdesc_get_psdata_size(desc_rx) != PRUETH_NAV_PS_DATA_SIZE) {
298 cppi5_hdesc_update_psdata_size(desc_rx,
299 PRUETH_NAV_PS_DATA_SIZE);
300 }
301
302 swdata = cppi5_hdesc_get_swdata(desc_rx);
303 page = swdata->data.page;
304 cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
305
306 dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
307 k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
308
309 new_page = page_pool_dev_alloc_pages(rx_chn->pg_pool);
310 /* if allocation fails we drop the packet but push the
311 * descriptor back to the ring with old skb to prevent a stall
312 */
313 if (!new_page) {
314 netdev_err(ndev,
315 "page alloc failed, dropped mgm pkt from flow %d\n",
316 flow_id);
317 new_page = page;
318 page = NULL; /* return NULL */
319 }
320
321 /* queue another DMA */
322 ret = prueth_dma_rx_push_mapped(emac, &emac->rx_chns, new_page,
323 PRUETH_MAX_PKT_SIZE);
324 if (WARN_ON(ret < 0))
325 page_pool_recycle_direct(rx_chn->pg_pool, new_page);
326
327 return page;
328 }
329
prueth_tx_ts_sr1(struct prueth_emac * emac,struct emac_tx_ts_response_sr1 * tsr)330 static void prueth_tx_ts_sr1(struct prueth_emac *emac,
331 struct emac_tx_ts_response_sr1 *tsr)
332 {
333 struct skb_shared_hwtstamps ssh;
334 u32 hi_ts, lo_ts, cookie;
335 struct sk_buff *skb;
336 u64 ns;
337
338 hi_ts = le32_to_cpu(tsr->hi_ts);
339 lo_ts = le32_to_cpu(tsr->lo_ts);
340
341 ns = (u64)hi_ts << 32 | lo_ts;
342
343 cookie = le32_to_cpu(tsr->cookie);
344 if (cookie >= PRUETH_MAX_TX_TS_REQUESTS) {
345 netdev_dbg(emac->ndev, "Invalid TX TS cookie 0x%x\n",
346 cookie);
347 return;
348 }
349
350 skb = emac->tx_ts_skb[cookie];
351 emac->tx_ts_skb[cookie] = NULL; /* free slot */
352
353 memset(&ssh, 0, sizeof(ssh));
354 ssh.hwtstamp = ns_to_ktime(ns);
355
356 skb_tstamp_tx(skb, &ssh);
357 dev_consume_skb_any(skb);
358 }
359
prueth_rx_mgm_ts_thread_sr1(int irq,void * dev_id)360 static irqreturn_t prueth_rx_mgm_ts_thread_sr1(int irq, void *dev_id)
361 {
362 struct prueth_emac *emac = dev_id;
363 struct page *page;
364
365 page = prueth_process_rx_mgm(emac, PRUETH_RX_MGM_FLOW_TIMESTAMP_SR1);
366 if (!page)
367 return IRQ_NONE;
368
369 prueth_tx_ts_sr1(emac, (void *)page_address(page));
370 page_pool_recycle_direct(pp_page_to_nmdesc(page)->pp, page);
371
372 return IRQ_HANDLED;
373 }
374
prueth_rx_mgm_rsp_thread(int irq,void * dev_id)375 static irqreturn_t prueth_rx_mgm_rsp_thread(int irq, void *dev_id)
376 {
377 struct prueth_emac *emac = dev_id;
378 struct page *page;
379 u32 rsp;
380
381 page = prueth_process_rx_mgm(emac, PRUETH_RX_MGM_FLOW_RESPONSE_SR1);
382 if (!page)
383 return IRQ_NONE;
384
385 /* Process command response */
386 rsp = le32_to_cpu(*(__le32 *)page_address(page)) & 0xffff0000;
387 if (rsp == ICSSG_SHUTDOWN_CMD_SR1) {
388 netdev_dbg(emac->ndev, "f/w Shutdown cmd resp %x\n", rsp);
389 complete(&emac->cmd_complete);
390 } else if (rsp == ICSSG_PSTATE_SPEED_DUPLEX_CMD_SR1) {
391 netdev_dbg(emac->ndev, "f/w Speed/Duplex cmd rsp %x\n", rsp);
392 complete(&emac->cmd_complete);
393 }
394
395 page_pool_recycle_direct(pp_page_to_nmdesc(page)->pp, page);
396
397 return IRQ_HANDLED;
398 }
399
400 static struct icssg_firmwares icssg_sr1_emac_firmwares[] = {
401 {
402 .pru = "ti-pruss/am65x-pru0-prueth-fw.elf",
403 .rtu = "ti-pruss/am65x-rtu0-prueth-fw.elf",
404 },
405 {
406 .pru = "ti-pruss/am65x-pru1-prueth-fw.elf",
407 .rtu = "ti-pruss/am65x-rtu1-prueth-fw.elf",
408 }
409 };
410
prueth_emac_start(struct prueth * prueth,struct prueth_emac * emac)411 static int prueth_emac_start(struct prueth *prueth, struct prueth_emac *emac)
412 {
413 struct icssg_firmwares *firmwares;
414 struct device *dev = prueth->dev;
415 int slice, ret;
416
417 firmwares = icssg_sr1_emac_firmwares;
418
419 slice = prueth_emac_slice(emac);
420 if (slice < 0) {
421 netdev_err(emac->ndev, "invalid port\n");
422 return -EINVAL;
423 }
424
425 icssg_config_sr1(prueth, emac, slice);
426
427 ret = rproc_set_firmware(prueth->pru[slice], firmwares[slice].pru);
428 ret = rproc_boot(prueth->pru[slice]);
429 if (ret) {
430 dev_err(dev, "failed to boot PRU%d: %d\n", slice, ret);
431 return -EINVAL;
432 }
433
434 ret = rproc_set_firmware(prueth->rtu[slice], firmwares[slice].rtu);
435 ret = rproc_boot(prueth->rtu[slice]);
436 if (ret) {
437 dev_err(dev, "failed to boot RTU%d: %d\n", slice, ret);
438 goto halt_pru;
439 }
440
441 return 0;
442
443 halt_pru:
444 rproc_shutdown(prueth->pru[slice]);
445
446 return ret;
447 }
448
prueth_emac_stop(struct prueth_emac * emac)449 static void prueth_emac_stop(struct prueth_emac *emac)
450 {
451 struct prueth *prueth = emac->prueth;
452 int slice;
453
454 switch (emac->port_id) {
455 case PRUETH_PORT_MII0:
456 slice = ICSS_SLICE0;
457 break;
458 case PRUETH_PORT_MII1:
459 slice = ICSS_SLICE1;
460 break;
461 default:
462 netdev_err(emac->ndev, "invalid port\n");
463 return;
464 }
465
466 if (!emac->is_sr1)
467 rproc_shutdown(prueth->txpru[slice]);
468 rproc_shutdown(prueth->rtu[slice]);
469 rproc_shutdown(prueth->pru[slice]);
470 }
471
472 /**
473 * emac_ndo_open - EMAC device open
474 * @ndev: network adapter device
475 *
476 * Called when system wants to start the interface.
477 *
478 * Return: 0 for a successful open, or appropriate error code
479 */
emac_ndo_open(struct net_device * ndev)480 static int emac_ndo_open(struct net_device *ndev)
481 {
482 struct prueth_emac *emac = netdev_priv(ndev);
483 int num_data_chn = emac->tx_ch_num - 1;
484 struct prueth *prueth = emac->prueth;
485 int slice = prueth_emac_slice(emac);
486 struct device *dev = prueth->dev;
487 int max_rx_flows, rx_flow;
488 int ret, i;
489
490 /* clear SMEM and MSMC settings for all slices */
491 if (!prueth->emacs_initialized) {
492 memset_io(prueth->msmcram.va, 0, prueth->msmcram.size);
493 memset_io(prueth->shram.va, 0, ICSSG_CONFIG_OFFSET_SLICE1 * PRUETH_NUM_MACS);
494 }
495
496 /* set h/w MAC as user might have re-configured */
497 ether_addr_copy(emac->mac_addr, ndev->dev_addr);
498
499 icssg_class_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr);
500
501 icssg_class_default(prueth->miig_rt, slice, 0, true);
502
503 /* Notify the stack of the actual queue counts. */
504 ret = netif_set_real_num_tx_queues(ndev, num_data_chn);
505 if (ret) {
506 dev_err(dev, "cannot set real number of tx queues\n");
507 return ret;
508 }
509
510 init_completion(&emac->cmd_complete);
511 ret = prueth_init_tx_chns(emac);
512 if (ret) {
513 dev_err(dev, "failed to init tx channel: %d\n", ret);
514 return ret;
515 }
516
517 max_rx_flows = PRUETH_MAX_RX_FLOWS_SR1;
518 ret = prueth_init_rx_chns(emac, &emac->rx_chns, "rx",
519 max_rx_flows, PRUETH_MAX_RX_DESC);
520 if (ret) {
521 dev_err(dev, "failed to init rx channel: %d\n", ret);
522 goto cleanup_tx;
523 }
524
525 ret = prueth_init_rx_chns(emac, &emac->rx_mgm_chn, "rxmgm",
526 PRUETH_MAX_RX_MGM_FLOWS_SR1,
527 PRUETH_MAX_RX_MGM_DESC_SR1);
528 if (ret) {
529 dev_err(dev, "failed to init rx mgmt channel: %d\n",
530 ret);
531 goto cleanup_rx;
532 }
533
534 ret = prueth_ndev_add_tx_napi(emac);
535 if (ret)
536 goto cleanup_rx_mgm;
537
538 /* we use only the highest priority flow for now i.e. @irq[3] */
539 rx_flow = PRUETH_RX_FLOW_DATA_SR1;
540 ret = request_irq(emac->rx_chns.irq[rx_flow], prueth_rx_irq,
541 IRQF_TRIGGER_HIGH, dev_name(dev), emac);
542 if (ret) {
543 dev_err(dev, "unable to request RX IRQ\n");
544 goto cleanup_napi;
545 }
546
547 ret = request_threaded_irq(emac->rx_mgm_chn.irq[PRUETH_RX_MGM_FLOW_RESPONSE_SR1],
548 NULL, prueth_rx_mgm_rsp_thread,
549 IRQF_ONESHOT | IRQF_TRIGGER_HIGH,
550 dev_name(dev), emac);
551 if (ret) {
552 dev_err(dev, "unable to request RX Management RSP IRQ\n");
553 goto free_rx_irq;
554 }
555
556 ret = request_threaded_irq(emac->rx_mgm_chn.irq[PRUETH_RX_MGM_FLOW_TIMESTAMP_SR1],
557 NULL, prueth_rx_mgm_ts_thread_sr1,
558 IRQF_ONESHOT | IRQF_TRIGGER_HIGH,
559 dev_name(dev), emac);
560 if (ret) {
561 dev_err(dev, "unable to request RX Management TS IRQ\n");
562 goto free_rx_mgm_rsp_irq;
563 }
564
565 /* reset and start PRU firmware */
566 ret = prueth_emac_start(prueth, emac);
567 if (ret)
568 goto free_rx_mgmt_ts_irq;
569
570 icssg_mii_update_mtu(prueth->mii_rt, slice, ndev->max_mtu);
571
572 /* Prepare RX */
573 ret = prueth_prepare_rx_chan(emac, &emac->rx_chns, PRUETH_MAX_PKT_SIZE);
574 if (ret)
575 goto stop;
576
577 ret = prueth_prepare_rx_chan(emac, &emac->rx_mgm_chn, 64);
578 if (ret)
579 goto reset_rx_chn;
580
581 ret = k3_udma_glue_enable_rx_chn(emac->rx_mgm_chn.rx_chn);
582 if (ret)
583 goto reset_rx_chn;
584
585 ret = k3_udma_glue_enable_rx_chn(emac->rx_chns.rx_chn);
586 if (ret)
587 goto reset_rx_mgm_chn;
588
589 for (i = 0; i < emac->tx_ch_num; i++) {
590 ret = k3_udma_glue_enable_tx_chn(emac->tx_chns[i].tx_chn);
591 if (ret)
592 goto reset_tx_chan;
593 }
594
595 /* Enable NAPI in Tx and Rx direction */
596 for (i = 0; i < emac->tx_ch_num; i++)
597 napi_enable(&emac->tx_chns[i].napi_tx);
598 napi_enable(&emac->napi_rx);
599
600 /* start PHY */
601 phy_start(ndev->phydev);
602
603 prueth->emacs_initialized++;
604
605 queue_work(system_long_wq, &emac->stats_work.work);
606
607 return 0;
608
609 reset_tx_chan:
610 /* Since interface is not yet up, there is wouldn't be
611 * any SKB for completion. So set false to free_skb
612 */
613 prueth_reset_tx_chan(emac, i, false);
614 reset_rx_mgm_chn:
615 prueth_reset_rx_chan(&emac->rx_mgm_chn,
616 PRUETH_MAX_RX_MGM_FLOWS_SR1, true);
617 reset_rx_chn:
618 prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, false);
619 stop:
620 prueth_emac_stop(emac);
621 free_rx_mgmt_ts_irq:
622 free_irq(emac->rx_mgm_chn.irq[PRUETH_RX_MGM_FLOW_TIMESTAMP_SR1],
623 emac);
624 free_rx_mgm_rsp_irq:
625 free_irq(emac->rx_mgm_chn.irq[PRUETH_RX_MGM_FLOW_RESPONSE_SR1],
626 emac);
627 free_rx_irq:
628 free_irq(emac->rx_chns.irq[rx_flow], emac);
629 cleanup_napi:
630 prueth_ndev_del_tx_napi(emac, emac->tx_ch_num);
631 cleanup_rx_mgm:
632 prueth_cleanup_rx_chns(emac, &emac->rx_mgm_chn,
633 PRUETH_MAX_RX_MGM_FLOWS_SR1);
634 cleanup_rx:
635 prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows);
636 cleanup_tx:
637 prueth_cleanup_tx_chns(emac);
638
639 return ret;
640 }
641
642 /**
643 * emac_ndo_stop - EMAC device stop
644 * @ndev: network adapter device
645 *
646 * Called when system wants to stop or down the interface.
647 *
648 * Return: Always 0 (Success)
649 */
emac_ndo_stop(struct net_device * ndev)650 static int emac_ndo_stop(struct net_device *ndev)
651 {
652 struct prueth_emac *emac = netdev_priv(ndev);
653 int rx_flow = PRUETH_RX_FLOW_DATA_SR1;
654 struct prueth *prueth = emac->prueth;
655 int max_rx_flows;
656 int ret, i;
657
658 /* inform the upper layers. */
659 netif_tx_stop_all_queues(ndev);
660
661 /* block packets from wire */
662 if (ndev->phydev)
663 phy_stop(ndev->phydev);
664
665 icssg_class_disable(prueth->miig_rt, prueth_emac_slice(emac));
666
667 emac_send_command_sr1(emac, ICSSG_SHUTDOWN_CMD_SR1);
668
669 atomic_set(&emac->tdown_cnt, emac->tx_ch_num);
670 /* ensure new tdown_cnt value is visible */
671 smp_mb__after_atomic();
672 /* tear down and disable UDMA channels */
673 reinit_completion(&emac->tdown_complete);
674 for (i = 0; i < emac->tx_ch_num; i++)
675 k3_udma_glue_tdown_tx_chn(emac->tx_chns[i].tx_chn, false);
676
677 ret = wait_for_completion_timeout(&emac->tdown_complete,
678 msecs_to_jiffies(1000));
679 if (!ret)
680 netdev_err(ndev, "tx teardown timeout\n");
681
682 prueth_reset_tx_chan(emac, emac->tx_ch_num, true);
683 for (i = 0; i < emac->tx_ch_num; i++)
684 napi_disable(&emac->tx_chns[i].napi_tx);
685
686 max_rx_flows = PRUETH_MAX_RX_FLOWS_SR1;
687 k3_udma_glue_tdown_rx_chn(emac->rx_chns.rx_chn, true);
688
689 prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, true);
690 /* Teardown RX MGM channel */
691 k3_udma_glue_tdown_rx_chn(emac->rx_mgm_chn.rx_chn, true);
692 prueth_reset_rx_chan(&emac->rx_mgm_chn,
693 PRUETH_MAX_RX_MGM_FLOWS_SR1, true);
694
695 napi_disable(&emac->napi_rx);
696
697 /* Destroying the queued work in ndo_stop() */
698 cancel_delayed_work_sync(&emac->stats_work);
699
700 /* stop PRUs */
701 prueth_emac_stop(emac);
702
703 free_irq(emac->rx_mgm_chn.irq[PRUETH_RX_MGM_FLOW_TIMESTAMP_SR1], emac);
704 free_irq(emac->rx_mgm_chn.irq[PRUETH_RX_MGM_FLOW_RESPONSE_SR1], emac);
705 free_irq(emac->rx_chns.irq[rx_flow], emac);
706 prueth_ndev_del_tx_napi(emac, emac->tx_ch_num);
707 prueth_cleanup_tx_chns(emac);
708
709 prueth_cleanup_rx_chns(emac, &emac->rx_mgm_chn, PRUETH_MAX_RX_MGM_FLOWS_SR1);
710 prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows);
711
712 prueth->emacs_initialized--;
713
714 return 0;
715 }
716
emac_ndo_set_rx_mode_sr1(struct net_device * ndev)717 static void emac_ndo_set_rx_mode_sr1(struct net_device *ndev)
718 {
719 struct prueth_emac *emac = netdev_priv(ndev);
720 bool allmulti = ndev->flags & IFF_ALLMULTI;
721 bool promisc = ndev->flags & IFF_PROMISC;
722 struct prueth *prueth = emac->prueth;
723 int slice = prueth_emac_slice(emac);
724
725 if (promisc) {
726 icssg_class_promiscuous_sr1(prueth->miig_rt, slice);
727 return;
728 }
729
730 if (allmulti) {
731 icssg_class_default(prueth->miig_rt, slice, 1, true);
732 return;
733 }
734
735 icssg_class_default(prueth->miig_rt, slice, 0, true);
736 if (!netdev_mc_empty(ndev)) {
737 /* program multicast address list into Classifier */
738 icssg_class_add_mcast_sr1(prueth->miig_rt, slice, ndev);
739 }
740 }
741
742 static const struct net_device_ops emac_netdev_ops = {
743 .ndo_open = emac_ndo_open,
744 .ndo_stop = emac_ndo_stop,
745 .ndo_start_xmit = icssg_ndo_start_xmit,
746 .ndo_set_mac_address = eth_mac_addr,
747 .ndo_validate_addr = eth_validate_addr,
748 .ndo_tx_timeout = icssg_ndo_tx_timeout,
749 .ndo_set_rx_mode = emac_ndo_set_rx_mode_sr1,
750 .ndo_eth_ioctl = phy_do_ioctl,
751 .ndo_get_stats64 = icssg_ndo_get_stats64,
752 .ndo_get_phys_port_name = icssg_ndo_get_phys_port_name,
753 .ndo_hwtstamp_get = icssg_ndo_get_ts_config,
754 .ndo_hwtstamp_set = icssg_ndo_set_ts_config,
755 };
756
prueth_netdev_init(struct prueth * prueth,struct device_node * eth_node)757 static int prueth_netdev_init(struct prueth *prueth,
758 struct device_node *eth_node)
759 {
760 struct prueth_emac *emac;
761 struct net_device *ndev;
762 enum prueth_port port;
763 enum prueth_mac mac;
764 /* Only enable one TX channel due to timeouts when
765 * using multiple channels */
766 int num_tx_chn = 1;
767 int ret;
768
769 port = prueth_node_port(eth_node);
770 if (port == PRUETH_PORT_INVALID)
771 return -EINVAL;
772
773 mac = prueth_node_mac(eth_node);
774 if (mac == PRUETH_MAC_INVALID)
775 return -EINVAL;
776
777 ndev = alloc_etherdev_mq(sizeof(*emac), num_tx_chn);
778 if (!ndev)
779 return -ENOMEM;
780
781 emac = netdev_priv(ndev);
782 emac->is_sr1 = 1;
783 emac->prueth = prueth;
784 emac->ndev = ndev;
785 emac->port_id = port;
786
787 INIT_DELAYED_WORK(&emac->stats_work, icssg_stats_work_handler);
788
789 ret = pruss_request_mem_region(prueth->pruss,
790 port == PRUETH_PORT_MII0 ?
791 PRUSS_MEM_DRAM0 : PRUSS_MEM_DRAM1,
792 &emac->dram);
793 if (ret) {
794 dev_err(prueth->dev, "unable to get DRAM: %d\n", ret);
795 ret = -ENOMEM;
796 goto free_ndev;
797 }
798
799 /* SR1.0 uses a dedicated high priority channel
800 * to send commands to the firmware
801 */
802 emac->tx_ch_num = 2;
803
804 SET_NETDEV_DEV(ndev, prueth->dev);
805 spin_lock_init(&emac->lock);
806 mutex_init(&emac->cmd_lock);
807
808 emac->phy_node = of_parse_phandle(eth_node, "phy-handle", 0);
809 if (!emac->phy_node && !of_phy_is_fixed_link(eth_node)) {
810 dev_err(prueth->dev, "couldn't find phy-handle\n");
811 ret = -ENODEV;
812 goto free;
813 } else if (of_phy_is_fixed_link(eth_node)) {
814 ret = of_phy_register_fixed_link(eth_node);
815 if (ret) {
816 dev_err_probe(prueth->dev, ret, "failed to register fixed-link phy\n");
817 goto free;
818 }
819
820 emac->phy_node = eth_node;
821 }
822
823 ret = of_get_phy_mode(eth_node, &emac->phy_if);
824 if (ret) {
825 dev_err(prueth->dev, "could not get phy-mode property\n");
826 goto free;
827 }
828
829 if (emac->phy_if != PHY_INTERFACE_MODE_MII &&
830 !phy_interface_mode_is_rgmii(emac->phy_if)) {
831 dev_err(prueth->dev, "PHY mode unsupported %s\n", phy_modes(emac->phy_if));
832 ret = -EINVAL;
833 goto free;
834 }
835
836 /* AM65 SR2.0 has TX Internal delay always enabled by hardware
837 * and it is not possible to disable TX Internal delay. The below
838 * switch case block describes how we handle different phy modes
839 * based on hardware restriction.
840 */
841 switch (emac->phy_if) {
842 case PHY_INTERFACE_MODE_RGMII_ID:
843 emac->phy_if = PHY_INTERFACE_MODE_RGMII_RXID;
844 break;
845 case PHY_INTERFACE_MODE_RGMII_TXID:
846 emac->phy_if = PHY_INTERFACE_MODE_RGMII;
847 break;
848 case PHY_INTERFACE_MODE_RGMII:
849 case PHY_INTERFACE_MODE_RGMII_RXID:
850 dev_err(prueth->dev, "RGMII mode without TX delay is not supported");
851 ret = -EINVAL;
852 goto free;
853 default:
854 break;
855 }
856
857 /* get mac address from DT and set private and netdev addr */
858 ret = of_get_ethdev_address(eth_node, ndev);
859 if (!is_valid_ether_addr(ndev->dev_addr)) {
860 eth_hw_addr_random(ndev);
861 dev_warn(prueth->dev, "port %d: using random MAC addr: %pM\n",
862 port, ndev->dev_addr);
863 }
864 ether_addr_copy(emac->mac_addr, ndev->dev_addr);
865
866 ndev->dev.of_node = eth_node;
867 ndev->min_mtu = PRUETH_MIN_PKT_SIZE;
868 ndev->max_mtu = PRUETH_MAX_MTU;
869 ndev->netdev_ops = &emac_netdev_ops;
870 ndev->ethtool_ops = &icssg_ethtool_ops;
871 ndev->hw_features = NETIF_F_SG;
872 ndev->features = ndev->hw_features;
873
874 netif_napi_add(ndev, &emac->napi_rx, icssg_napi_rx_poll);
875 prueth->emac[mac] = emac;
876
877 return 0;
878
879 free:
880 pruss_release_mem_region(prueth->pruss, &emac->dram);
881 free_ndev:
882 emac->ndev = NULL;
883 prueth->emac[mac] = NULL;
884 free_netdev(ndev);
885
886 return ret;
887 }
888
prueth_probe(struct platform_device * pdev)889 static int prueth_probe(struct platform_device *pdev)
890 {
891 struct device_node *eth_node, *eth_ports_node;
892 struct device_node *eth0_node = NULL;
893 struct device_node *eth1_node = NULL;
894 struct device *dev = &pdev->dev;
895 struct device_node *np;
896 struct prueth *prueth;
897 struct pruss *pruss;
898 u32 msmc_ram_size;
899 int i, ret;
900
901 np = dev->of_node;
902
903 prueth = devm_kzalloc(dev, sizeof(*prueth), GFP_KERNEL);
904 if (!prueth)
905 return -ENOMEM;
906
907 dev_set_drvdata(dev, prueth);
908 prueth->pdev = pdev;
909 prueth->pdata = *(const struct prueth_pdata *)device_get_match_data(dev);
910
911 prueth->dev = dev;
912 eth_ports_node = of_get_child_by_name(np, "ethernet-ports");
913 if (!eth_ports_node)
914 return -ENOENT;
915
916 for_each_child_of_node(eth_ports_node, eth_node) {
917 u32 reg;
918
919 if (strcmp(eth_node->name, "port"))
920 continue;
921 ret = of_property_read_u32(eth_node, "reg", ®);
922 if (ret < 0) {
923 dev_err(dev, "%pOF error reading port_id %d\n",
924 eth_node, ret);
925 }
926
927 of_node_get(eth_node);
928
929 if (reg == 0) {
930 eth0_node = eth_node;
931 if (!of_device_is_available(eth0_node)) {
932 of_node_put(eth0_node);
933 eth0_node = NULL;
934 }
935 } else if (reg == 1) {
936 eth1_node = eth_node;
937 if (!of_device_is_available(eth1_node)) {
938 of_node_put(eth1_node);
939 eth1_node = NULL;
940 }
941 } else {
942 dev_err(dev, "port reg should be 0 or 1\n");
943 }
944 }
945
946 of_node_put(eth_ports_node);
947
948 /* At least one node must be present and available else we fail */
949 if (!eth0_node && !eth1_node) {
950 dev_err(dev, "neither port0 nor port1 node available\n");
951 return -ENODEV;
952 }
953
954 if (eth0_node == eth1_node) {
955 dev_err(dev, "port0 and port1 can't have same reg\n");
956 of_node_put(eth0_node);
957 return -ENODEV;
958 }
959
960 prueth->eth_node[PRUETH_MAC0] = eth0_node;
961 prueth->eth_node[PRUETH_MAC1] = eth1_node;
962
963 prueth->miig_rt = syscon_regmap_lookup_by_phandle(np, "ti,mii-g-rt");
964 if (IS_ERR(prueth->miig_rt)) {
965 dev_err(dev, "couldn't get ti,mii-g-rt syscon regmap\n");
966 return -ENODEV;
967 }
968
969 prueth->mii_rt = syscon_regmap_lookup_by_phandle(np, "ti,mii-rt");
970 if (IS_ERR(prueth->mii_rt)) {
971 dev_err(dev, "couldn't get ti,mii-rt syscon regmap\n");
972 return -ENODEV;
973 }
974
975 if (eth0_node) {
976 ret = prueth_get_cores(prueth, ICSS_SLICE0, true);
977 if (ret)
978 goto put_cores;
979 }
980
981 if (eth1_node) {
982 ret = prueth_get_cores(prueth, ICSS_SLICE1, true);
983 if (ret)
984 goto put_cores;
985 }
986
987 pruss = pruss_get(eth0_node ?
988 prueth->pru[ICSS_SLICE0] : prueth->pru[ICSS_SLICE1]);
989 if (IS_ERR(pruss)) {
990 ret = PTR_ERR(pruss);
991 dev_err(dev, "unable to get pruss handle\n");
992 goto put_cores;
993 }
994
995 prueth->pruss = pruss;
996
997 ret = pruss_request_mem_region(pruss, PRUSS_MEM_SHRD_RAM2,
998 &prueth->shram);
999 if (ret) {
1000 dev_err(dev, "unable to get PRUSS SHRD RAM2: %d\n", ret);
1001 goto put_pruss;
1002 }
1003
1004 prueth->sram_pool = of_gen_pool_get(np, "sram", 0);
1005 if (!prueth->sram_pool) {
1006 dev_err(dev, "unable to get SRAM pool\n");
1007 ret = -ENODEV;
1008
1009 goto put_mem;
1010 }
1011
1012 msmc_ram_size = MSMC_RAM_SIZE_SR1;
1013
1014 prueth->msmcram.va = (void __iomem *)gen_pool_alloc(prueth->sram_pool,
1015 msmc_ram_size);
1016
1017 if (!prueth->msmcram.va) {
1018 ret = -ENOMEM;
1019 dev_err(dev, "unable to allocate MSMC resource\n");
1020 goto put_mem;
1021 }
1022 prueth->msmcram.pa = gen_pool_virt_to_phys(prueth->sram_pool,
1023 (unsigned long)prueth->msmcram.va);
1024 prueth->msmcram.size = msmc_ram_size;
1025 memset_io(prueth->msmcram.va, 0, msmc_ram_size);
1026
1027 prueth->iep0 = icss_iep_get_idx(np, 0);
1028 if (IS_ERR(prueth->iep0)) {
1029 ret = dev_err_probe(dev, PTR_ERR(prueth->iep0),
1030 "iep0 get failed\n");
1031 goto free_pool;
1032 }
1033
1034 prueth->iep1 = icss_iep_get_idx(np, 1);
1035 if (IS_ERR(prueth->iep1)) {
1036 ret = dev_err_probe(dev, PTR_ERR(prueth->iep1),
1037 "iep1 get failed\n");
1038 goto put_iep0;
1039 }
1040
1041 ret = icss_iep_init(prueth->iep0, NULL, NULL, 0);
1042 if (ret) {
1043 dev_err_probe(dev, ret, "failed to init iep0\n");
1044 goto put_iep;
1045 }
1046
1047 ret = icss_iep_init(prueth->iep1, NULL, NULL, 0);
1048 if (ret) {
1049 dev_err_probe(dev, ret, "failed to init iep1\n");
1050 goto exit_iep0;
1051 }
1052
1053 if (eth0_node) {
1054 ret = prueth_netdev_init(prueth, eth0_node);
1055 if (ret) {
1056 dev_err_probe(dev, ret, "netdev init %s failed\n",
1057 eth0_node->name);
1058 goto exit_iep;
1059 }
1060
1061 prueth->emac[PRUETH_MAC0]->half_duplex =
1062 of_property_read_bool(eth0_node, "ti,half-duplex-capable");
1063
1064 prueth->emac[PRUETH_MAC0]->iep = prueth->iep0;
1065 }
1066
1067 if (eth1_node) {
1068 ret = prueth_netdev_init(prueth, eth1_node);
1069 if (ret) {
1070 dev_err_probe(dev, ret, "netdev init %s failed\n",
1071 eth1_node->name);
1072 goto netdev_exit;
1073 }
1074
1075 prueth->emac[PRUETH_MAC1]->half_duplex =
1076 of_property_read_bool(eth1_node, "ti,half-duplex-capable");
1077
1078 prueth->emac[PRUETH_MAC1]->iep = prueth->iep1;
1079 }
1080
1081 /* register the network devices */
1082 if (eth0_node) {
1083 ret = register_netdev(prueth->emac[PRUETH_MAC0]->ndev);
1084 if (ret) {
1085 dev_err(dev, "can't register netdev for port MII0\n");
1086 goto netdev_exit;
1087 }
1088
1089 prueth->registered_netdevs[PRUETH_MAC0] = prueth->emac[PRUETH_MAC0]->ndev;
1090 emac_phy_connect(prueth->emac[PRUETH_MAC0]);
1091 phy_attached_info(prueth->emac[PRUETH_MAC0]->ndev->phydev);
1092 }
1093
1094 if (eth1_node) {
1095 ret = register_netdev(prueth->emac[PRUETH_MAC1]->ndev);
1096 if (ret) {
1097 dev_err(dev, "can't register netdev for port MII1\n");
1098 goto netdev_unregister;
1099 }
1100
1101 prueth->registered_netdevs[PRUETH_MAC1] = prueth->emac[PRUETH_MAC1]->ndev;
1102 emac_phy_connect(prueth->emac[PRUETH_MAC1]);
1103 phy_attached_info(prueth->emac[PRUETH_MAC1]->ndev->phydev);
1104 }
1105
1106 dev_info(dev, "TI PRU SR1.0 ethernet driver initialized: %s EMAC mode\n",
1107 (!eth0_node || !eth1_node) ? "single" : "dual");
1108
1109 if (eth1_node)
1110 of_node_put(eth1_node);
1111 if (eth0_node)
1112 of_node_put(eth0_node);
1113
1114 return 0;
1115
1116 netdev_unregister:
1117 for (i = 0; i < PRUETH_NUM_MACS; i++) {
1118 if (!prueth->registered_netdevs[i])
1119 continue;
1120
1121 if (prueth->emac[i]->ndev->phydev) {
1122 phy_disconnect(prueth->emac[i]->ndev->phydev);
1123 prueth->emac[i]->ndev->phydev = NULL;
1124 }
1125 unregister_netdev(prueth->registered_netdevs[i]);
1126 }
1127
1128 netdev_exit:
1129 for (i = 0; i < PRUETH_NUM_MACS; i++) {
1130 eth_node = prueth->eth_node[i];
1131 if (!eth_node)
1132 continue;
1133
1134 prueth_netdev_exit(prueth, eth_node);
1135 }
1136
1137 exit_iep:
1138 icss_iep_exit(prueth->iep1);
1139 exit_iep0:
1140 icss_iep_exit(prueth->iep0);
1141
1142 put_iep:
1143 icss_iep_put(prueth->iep1);
1144
1145 put_iep0:
1146 icss_iep_put(prueth->iep0);
1147 prueth->iep0 = NULL;
1148 prueth->iep1 = NULL;
1149
1150 free_pool:
1151 gen_pool_free(prueth->sram_pool,
1152 (unsigned long)prueth->msmcram.va, msmc_ram_size);
1153
1154 put_mem:
1155 pruss_release_mem_region(prueth->pruss, &prueth->shram);
1156
1157 put_pruss:
1158 pruss_put(prueth->pruss);
1159
1160 put_cores:
1161 if (eth1_node) {
1162 prueth_put_cores(prueth, ICSS_SLICE1);
1163 of_node_put(eth1_node);
1164 }
1165
1166 if (eth0_node) {
1167 prueth_put_cores(prueth, ICSS_SLICE0);
1168 of_node_put(eth0_node);
1169 }
1170
1171 return ret;
1172 }
1173
prueth_remove(struct platform_device * pdev)1174 static void prueth_remove(struct platform_device *pdev)
1175 {
1176 struct prueth *prueth = platform_get_drvdata(pdev);
1177 struct device_node *eth_node;
1178 int i;
1179
1180 for (i = 0; i < PRUETH_NUM_MACS; i++) {
1181 if (!prueth->registered_netdevs[i])
1182 continue;
1183 phy_stop(prueth->emac[i]->ndev->phydev);
1184 phy_disconnect(prueth->emac[i]->ndev->phydev);
1185 prueth->emac[i]->ndev->phydev = NULL;
1186 unregister_netdev(prueth->registered_netdevs[i]);
1187 }
1188
1189 for (i = 0; i < PRUETH_NUM_MACS; i++) {
1190 eth_node = prueth->eth_node[i];
1191 if (!eth_node)
1192 continue;
1193
1194 prueth_netdev_exit(prueth, eth_node);
1195 }
1196
1197 icss_iep_exit(prueth->iep1);
1198 icss_iep_exit(prueth->iep0);
1199
1200 icss_iep_put(prueth->iep1);
1201 icss_iep_put(prueth->iep0);
1202
1203 gen_pool_free(prueth->sram_pool,
1204 (unsigned long)prueth->msmcram.va,
1205 MSMC_RAM_SIZE_SR1);
1206
1207 pruss_release_mem_region(prueth->pruss, &prueth->shram);
1208
1209 pruss_put(prueth->pruss);
1210
1211 if (prueth->eth_node[PRUETH_MAC1])
1212 prueth_put_cores(prueth, ICSS_SLICE1);
1213
1214 if (prueth->eth_node[PRUETH_MAC0])
1215 prueth_put_cores(prueth, ICSS_SLICE0);
1216 }
1217
1218 static const struct prueth_pdata am654_sr1_icssg_pdata = {
1219 .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
1220 };
1221
1222 static const struct of_device_id prueth_dt_match[] = {
1223 { .compatible = "ti,am654-sr1-icssg-prueth", .data = &am654_sr1_icssg_pdata },
1224 { /* sentinel */ }
1225 };
1226 MODULE_DEVICE_TABLE(of, prueth_dt_match);
1227
1228 static struct platform_driver prueth_driver = {
1229 .probe = prueth_probe,
1230 .remove = prueth_remove,
1231 .driver = {
1232 .name = "icssg-prueth-sr1",
1233 .of_match_table = prueth_dt_match,
1234 .pm = &prueth_dev_pm_ops,
1235 },
1236 };
1237 module_platform_driver(prueth_driver);
1238
1239 MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>");
1240 MODULE_AUTHOR("Md Danish Anwar <danishanwar@ti.com>");
1241 MODULE_AUTHOR("Diogo Ivo <diogo.ivo@siemens.com>");
1242 MODULE_DESCRIPTION(PRUETH_MODULE_DESCRIPTION);
1243 MODULE_LICENSE("GPL");
1244