1 // SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
2 /*
3 * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
4 * Copyright (c) 2014, Synopsys, Inc.
5 * All rights reserved
6 */
7
8 #include <linux/module.h>
9 #include <linux/device.h>
10 #include <linux/platform_device.h>
11 #include <linux/spinlock.h>
12 #include <linux/netdevice.h>
13 #include <linux/etherdevice.h>
14 #include <linux/io.h>
15 #include <linux/of.h>
16 #include <linux/of_net.h>
17 #include <linux/of_platform.h>
18 #include <linux/clk.h>
19 #include <linux/property.h>
20 #include <linux/acpi.h>
21 #include <linux/mdio.h>
22
23 #include "xgbe.h"
24 #include "xgbe-common.h"
25
26 #ifdef CONFIG_ACPI
xgbe_acpi_support(struct xgbe_prv_data * pdata)27 static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
28 {
29 struct device *dev = pdata->dev;
30 u32 property;
31 int ret;
32
33 /* Obtain the system clock setting */
34 ret = device_property_read_u32(dev, XGBE_ACPI_DMA_FREQ, &property);
35 if (ret) {
36 dev_err(dev, "unable to obtain %s property\n",
37 XGBE_ACPI_DMA_FREQ);
38 return ret;
39 }
40 pdata->sysclk_rate = property;
41
42 /* Obtain the PTP clock setting */
43 ret = device_property_read_u32(dev, XGBE_ACPI_PTP_FREQ, &property);
44 if (ret) {
45 dev_err(dev, "unable to obtain %s property\n",
46 XGBE_ACPI_PTP_FREQ);
47 return ret;
48 }
49 pdata->ptpclk_rate = property;
50
51 return 0;
52 }
53 #else /* CONFIG_ACPI */
xgbe_acpi_support(struct xgbe_prv_data * pdata)54 static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
55 {
56 return -EINVAL;
57 }
58 #endif /* CONFIG_ACPI */
59
60 #ifdef CONFIG_OF
xgbe_of_support(struct xgbe_prv_data * pdata)61 static int xgbe_of_support(struct xgbe_prv_data *pdata)
62 {
63 struct device *dev = pdata->dev;
64
65 /* Obtain the system clock setting */
66 pdata->sysclk = devm_clk_get(dev, XGBE_DMA_CLOCK);
67 if (IS_ERR(pdata->sysclk)) {
68 dev_err(dev, "dma devm_clk_get failed\n");
69 return PTR_ERR(pdata->sysclk);
70 }
71 pdata->sysclk_rate = clk_get_rate(pdata->sysclk);
72
73 /* Obtain the PTP clock setting */
74 pdata->ptpclk = devm_clk_get(dev, XGBE_PTP_CLOCK);
75 if (IS_ERR(pdata->ptpclk)) {
76 dev_err(dev, "ptp devm_clk_get failed\n");
77 return PTR_ERR(pdata->ptpclk);
78 }
79 pdata->ptpclk_rate = clk_get_rate(pdata->ptpclk);
80
81 return 0;
82 }
83
xgbe_of_get_phy_pdev(struct xgbe_prv_data * pdata)84 static struct platform_device *xgbe_of_get_phy_pdev(struct xgbe_prv_data *pdata)
85 {
86 struct device *dev = pdata->dev;
87 struct device_node *phy_node;
88 struct platform_device *phy_pdev;
89
90 phy_node = of_parse_phandle(dev->of_node, "phy-handle", 0);
91 if (phy_node) {
92 /* Old style device tree:
93 * The XGBE and PHY resources are separate
94 */
95 phy_pdev = of_find_device_by_node(phy_node);
96 of_node_put(phy_node);
97 } else {
98 /* New style device tree:
99 * The XGBE and PHY resources are grouped together with
100 * the PHY resources listed last
101 */
102 get_device(dev);
103 phy_pdev = pdata->platdev;
104 }
105
106 return phy_pdev;
107 }
108 #else /* CONFIG_OF */
xgbe_of_support(struct xgbe_prv_data * pdata)109 static int xgbe_of_support(struct xgbe_prv_data *pdata)
110 {
111 return -EINVAL;
112 }
113
xgbe_of_get_phy_pdev(struct xgbe_prv_data * pdata)114 static struct platform_device *xgbe_of_get_phy_pdev(struct xgbe_prv_data *pdata)
115 {
116 return NULL;
117 }
118 #endif /* CONFIG_OF */
119
xgbe_resource_count(struct platform_device * pdev,unsigned int type)120 static unsigned int xgbe_resource_count(struct platform_device *pdev,
121 unsigned int type)
122 {
123 unsigned int count;
124 int i;
125
126 for (i = 0, count = 0; i < pdev->num_resources; i++) {
127 struct resource *res = &pdev->resource[i];
128
129 if (type == resource_type(res))
130 count++;
131 }
132
133 return count;
134 }
135
xgbe_get_phy_pdev(struct xgbe_prv_data * pdata)136 static struct platform_device *xgbe_get_phy_pdev(struct xgbe_prv_data *pdata)
137 {
138 struct platform_device *phy_pdev;
139
140 if (pdata->use_acpi) {
141 get_device(pdata->dev);
142 phy_pdev = pdata->platdev;
143 } else {
144 phy_pdev = xgbe_of_get_phy_pdev(pdata);
145 }
146
147 return phy_pdev;
148 }
149
xgbe_platform_probe(struct platform_device * pdev)150 static int xgbe_platform_probe(struct platform_device *pdev)
151 {
152 struct xgbe_prv_data *pdata;
153 struct device *dev = &pdev->dev;
154 struct platform_device *phy_pdev;
155 const char *phy_mode;
156 unsigned int phy_memnum, phy_irqnum;
157 unsigned int dma_irqnum, dma_irqend;
158 enum dev_dma_attr attr;
159 int ret;
160
161 pdata = xgbe_alloc_pdata(dev);
162 if (IS_ERR(pdata)) {
163 ret = PTR_ERR(pdata);
164 goto err_alloc;
165 }
166
167 pdata->platdev = pdev;
168 pdata->adev = ACPI_COMPANION(dev);
169 platform_set_drvdata(pdev, pdata);
170
171 /* Check if we should use ACPI or DT */
172 pdata->use_acpi = dev->of_node ? 0 : 1;
173
174 /* Get the version data */
175 pdata->vdata = (struct xgbe_version_data *)device_get_match_data(dev);
176
177 phy_pdev = xgbe_get_phy_pdev(pdata);
178 if (!phy_pdev) {
179 dev_err(dev, "unable to obtain phy device\n");
180 ret = -EINVAL;
181 goto err_phydev;
182 }
183 pdata->phy_platdev = phy_pdev;
184 pdata->phy_dev = &phy_pdev->dev;
185
186 if (pdev == phy_pdev) {
187 /* New style device tree or ACPI:
188 * The XGBE and PHY resources are grouped together with
189 * the PHY resources listed last
190 */
191 phy_memnum = xgbe_resource_count(pdev, IORESOURCE_MEM) - 3;
192 phy_irqnum = platform_irq_count(pdev) - 1;
193 dma_irqnum = 1;
194 dma_irqend = phy_irqnum;
195 } else {
196 /* Old style device tree:
197 * The XGBE and PHY resources are separate
198 */
199 phy_memnum = 0;
200 phy_irqnum = 0;
201 dma_irqnum = 1;
202 dma_irqend = platform_irq_count(pdev);
203 }
204
205 /* Obtain the mmio areas for the device */
206 pdata->xgmac_regs = devm_platform_ioremap_resource(pdev, 0);
207 if (IS_ERR(pdata->xgmac_regs)) {
208 dev_err(dev, "xgmac ioremap failed\n");
209 ret = PTR_ERR(pdata->xgmac_regs);
210 goto err_io;
211 }
212 if (netif_msg_probe(pdata))
213 dev_dbg(dev, "xgmac_regs = %p\n", pdata->xgmac_regs);
214
215 pdata->xpcs_regs = devm_platform_ioremap_resource(pdev, 1);
216 if (IS_ERR(pdata->xpcs_regs)) {
217 dev_err(dev, "xpcs ioremap failed\n");
218 ret = PTR_ERR(pdata->xpcs_regs);
219 goto err_io;
220 }
221 if (netif_msg_probe(pdata))
222 dev_dbg(dev, "xpcs_regs = %p\n", pdata->xpcs_regs);
223
224 pdata->rxtx_regs = devm_platform_ioremap_resource(phy_pdev,
225 phy_memnum++);
226 if (IS_ERR(pdata->rxtx_regs)) {
227 dev_err(dev, "rxtx ioremap failed\n");
228 ret = PTR_ERR(pdata->rxtx_regs);
229 goto err_io;
230 }
231 if (netif_msg_probe(pdata))
232 dev_dbg(dev, "rxtx_regs = %p\n", pdata->rxtx_regs);
233
234 pdata->sir0_regs = devm_platform_ioremap_resource(phy_pdev,
235 phy_memnum++);
236 if (IS_ERR(pdata->sir0_regs)) {
237 dev_err(dev, "sir0 ioremap failed\n");
238 ret = PTR_ERR(pdata->sir0_regs);
239 goto err_io;
240 }
241 if (netif_msg_probe(pdata))
242 dev_dbg(dev, "sir0_regs = %p\n", pdata->sir0_regs);
243
244 pdata->sir1_regs = devm_platform_ioremap_resource(phy_pdev,
245 phy_memnum++);
246 if (IS_ERR(pdata->sir1_regs)) {
247 dev_err(dev, "sir1 ioremap failed\n");
248 ret = PTR_ERR(pdata->sir1_regs);
249 goto err_io;
250 }
251 if (netif_msg_probe(pdata))
252 dev_dbg(dev, "sir1_regs = %p\n", pdata->sir1_regs);
253
254 /* Retrieve the MAC address */
255 ret = device_property_read_u8_array(dev, XGBE_MAC_ADDR_PROPERTY,
256 pdata->mac_addr,
257 sizeof(pdata->mac_addr));
258 if (ret || !is_valid_ether_addr(pdata->mac_addr)) {
259 dev_err(dev, "invalid %s property\n", XGBE_MAC_ADDR_PROPERTY);
260 if (!ret)
261 ret = -EINVAL;
262 goto err_io;
263 }
264
265 /* Retrieve the PHY mode - it must be "xgmii" */
266 ret = device_property_read_string(dev, XGBE_PHY_MODE_PROPERTY,
267 &phy_mode);
268 if (ret || strcmp(phy_mode, phy_modes(PHY_INTERFACE_MODE_XGMII))) {
269 dev_err(dev, "invalid %s property\n", XGBE_PHY_MODE_PROPERTY);
270 if (!ret)
271 ret = -EINVAL;
272 goto err_io;
273 }
274 pdata->phy_mode = PHY_INTERFACE_MODE_XGMII;
275
276 /* Check for per channel interrupt support */
277 if (device_property_present(dev, XGBE_DMA_IRQS_PROPERTY)) {
278 pdata->per_channel_irq = 1;
279 pdata->channel_irq_mode = XGBE_IRQ_MODE_EDGE;
280 }
281
282 /* Obtain device settings unique to ACPI/OF */
283 if (pdata->use_acpi)
284 ret = xgbe_acpi_support(pdata);
285 else
286 ret = xgbe_of_support(pdata);
287 if (ret)
288 goto err_io;
289
290 /* Set the DMA coherency values */
291 attr = device_get_dma_attr(dev);
292 if (attr == DEV_DMA_NOT_SUPPORTED) {
293 dev_err(dev, "DMA is not supported");
294 ret = -ENODEV;
295 goto err_io;
296 }
297 pdata->coherent = (attr == DEV_DMA_COHERENT);
298 if (pdata->coherent) {
299 pdata->arcr = XGBE_DMA_OS_ARCR;
300 pdata->awcr = XGBE_DMA_OS_AWCR;
301 } else {
302 pdata->arcr = XGBE_DMA_SYS_ARCR;
303 pdata->awcr = XGBE_DMA_SYS_AWCR;
304 }
305
306 /* Set the maximum fifo amounts */
307 pdata->tx_max_fifo_size = pdata->vdata->tx_max_fifo_size;
308 pdata->rx_max_fifo_size = pdata->vdata->rx_max_fifo_size;
309
310 /* Set the hardware channel and queue counts */
311 xgbe_set_counts(pdata);
312
313 /* Always have XGMAC and XPCS (auto-negotiation) interrupts */
314 pdata->irq_count = 2;
315
316 /* Get the device interrupt */
317 ret = platform_get_irq(pdev, 0);
318 if (ret < 0)
319 goto err_io;
320 pdata->dev_irq = ret;
321
322 /* Get the per channel DMA interrupts */
323 if (pdata->per_channel_irq) {
324 unsigned int i, max = ARRAY_SIZE(pdata->channel_irq);
325
326 for (i = 0; (i < max) && (dma_irqnum < dma_irqend); i++) {
327 ret = platform_get_irq(pdata->platdev, dma_irqnum++);
328 if (ret < 0)
329 goto err_io;
330
331 pdata->channel_irq[i] = ret;
332 }
333
334 pdata->channel_irq_count = max;
335
336 pdata->irq_count += max;
337 }
338
339 /* Get the auto-negotiation interrupt */
340 ret = platform_get_irq(phy_pdev, phy_irqnum++);
341 if (ret < 0)
342 goto err_io;
343 pdata->an_irq = ret;
344
345 /* Configure the netdev resource */
346 ret = xgbe_config_netdev(pdata);
347 if (ret)
348 goto err_io;
349
350 netdev_notice(pdata->netdev, "net device enabled\n");
351
352 return 0;
353
354 err_io:
355 platform_device_put(phy_pdev);
356
357 err_phydev:
358 xgbe_free_pdata(pdata);
359
360 err_alloc:
361 dev_notice(dev, "net device not enabled\n");
362
363 return ret;
364 }
365
xgbe_platform_remove(struct platform_device * pdev)366 static void xgbe_platform_remove(struct platform_device *pdev)
367 {
368 struct xgbe_prv_data *pdata = platform_get_drvdata(pdev);
369
370 xgbe_deconfig_netdev(pdata);
371
372 platform_device_put(pdata->phy_platdev);
373
374 xgbe_free_pdata(pdata);
375 }
376
377 #ifdef CONFIG_PM_SLEEP
xgbe_platform_suspend(struct device * dev)378 static int xgbe_platform_suspend(struct device *dev)
379 {
380 struct xgbe_prv_data *pdata = dev_get_drvdata(dev);
381 struct net_device *netdev = pdata->netdev;
382 int ret = 0;
383
384 DBGPR("-->xgbe_suspend\n");
385
386 if (netif_running(netdev))
387 ret = xgbe_powerdown(netdev, XGMAC_DRIVER_CONTEXT);
388
389 pdata->lpm_ctrl = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
390 pdata->lpm_ctrl |= MDIO_CTRL1_LPOWER;
391 XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl);
392
393 DBGPR("<--xgbe_suspend\n");
394
395 return ret;
396 }
397
xgbe_platform_resume(struct device * dev)398 static int xgbe_platform_resume(struct device *dev)
399 {
400 struct xgbe_prv_data *pdata = dev_get_drvdata(dev);
401 struct net_device *netdev = pdata->netdev;
402 int ret = 0;
403
404 DBGPR("-->xgbe_resume\n");
405
406 pdata->lpm_ctrl &= ~MDIO_CTRL1_LPOWER;
407 XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl);
408
409 if (netif_running(netdev)) {
410 ret = xgbe_powerup(netdev, XGMAC_DRIVER_CONTEXT);
411
412 /* Schedule a restart in case the link or phy state changed
413 * while we were powered down.
414 */
415 schedule_work(&pdata->restart_work);
416 }
417
418 DBGPR("<--xgbe_resume\n");
419
420 return ret;
421 }
422 #endif /* CONFIG_PM_SLEEP */
423
424 static const struct xgbe_version_data xgbe_v1 = {
425 .init_function_ptrs_phy_impl = xgbe_init_function_ptrs_phy_v1,
426 .xpcs_access = XGBE_XPCS_ACCESS_V1,
427 .tx_max_fifo_size = 81920,
428 .rx_max_fifo_size = 81920,
429 .tx_tstamp_workaround = 1,
430 };
431
432 static const struct acpi_device_id xgbe_acpi_match[] = {
433 { .id = "AMDI8001",
434 .driver_data = (kernel_ulong_t)&xgbe_v1 },
435 {},
436 };
437
438 MODULE_DEVICE_TABLE(acpi, xgbe_acpi_match);
439
440 static const struct of_device_id xgbe_of_match[] = {
441 { .compatible = "amd,xgbe-seattle-v1a",
442 .data = &xgbe_v1 },
443 {},
444 };
445
446 MODULE_DEVICE_TABLE(of, xgbe_of_match);
447
448 static SIMPLE_DEV_PM_OPS(xgbe_platform_pm_ops,
449 xgbe_platform_suspend, xgbe_platform_resume);
450
451 static struct platform_driver xgbe_driver = {
452 .driver = {
453 .name = XGBE_DRV_NAME,
454 .acpi_match_table = xgbe_acpi_match,
455 .of_match_table = xgbe_of_match,
456 .pm = &xgbe_platform_pm_ops,
457 },
458 .probe = xgbe_platform_probe,
459 .remove = xgbe_platform_remove,
460 };
461
xgbe_platform_init(void)462 int xgbe_platform_init(void)
463 {
464 return platform_driver_register(&xgbe_driver);
465 }
466
xgbe_platform_exit(void)467 void xgbe_platform_exit(void)
468 {
469 platform_driver_unregister(&xgbe_driver);
470 }
471