1 /*
2 * i.MX Fast Ethernet Controller emulation.
3 *
4 * Copyright (c) 2013 Jean-Christophe Dubois. <jcd@tribudubois.net>
5 *
6 * Based on Coldfire Fast Ethernet Controller emulation.
7 *
8 * Copyright (c) 2007 CodeSourcery.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * for more details.
19 *
20 * You should have received a copy of the GNU General Public License along
21 * with this program; if not, see <http://www.gnu.org/licenses/>.
22 */
23
24 #include "qemu/osdep.h"
25 #include "hw/irq.h"
26 #include "hw/net/imx_fec.h"
27 #include "hw/qdev-properties.h"
28 #include "migration/vmstate.h"
29 #include "system/dma.h"
30 #include "qemu/log.h"
31 #include "qemu/module.h"
32 #include "net/checksum.h"
33 #include "net/eth.h"
34 #include "trace.h"
35
36 #include <zlib.h> /* for crc32 */
37
38 #define IMX_MAX_DESC 1024
39
imx_default_reg_name(IMXFECState * s,uint32_t index)40 static const char *imx_default_reg_name(IMXFECState *s, uint32_t index)
41 {
42 static char tmp[20];
43 snprintf(tmp, sizeof(tmp), "index %d", index);
44 return tmp;
45 }
46
imx_fec_reg_name(IMXFECState * s,uint32_t index)47 static const char *imx_fec_reg_name(IMXFECState *s, uint32_t index)
48 {
49 switch (index) {
50 case ENET_FRBR:
51 return "FRBR";
52 case ENET_FRSR:
53 return "FRSR";
54 case ENET_MIIGSK_CFGR:
55 return "MIIGSK_CFGR";
56 case ENET_MIIGSK_ENR:
57 return "MIIGSK_ENR";
58 default:
59 return imx_default_reg_name(s, index);
60 }
61 }
62
imx_enet_reg_name(IMXFECState * s,uint32_t index)63 static const char *imx_enet_reg_name(IMXFECState *s, uint32_t index)
64 {
65 switch (index) {
66 case ENET_RSFL:
67 return "RSFL";
68 case ENET_RSEM:
69 return "RSEM";
70 case ENET_RAEM:
71 return "RAEM";
72 case ENET_RAFL:
73 return "RAFL";
74 case ENET_TSEM:
75 return "TSEM";
76 case ENET_TAEM:
77 return "TAEM";
78 case ENET_TAFL:
79 return "TAFL";
80 case ENET_TIPG:
81 return "TIPG";
82 case ENET_FTRL:
83 return "FTRL";
84 case ENET_TACC:
85 return "TACC";
86 case ENET_RACC:
87 return "RACC";
88 case ENET_ATCR:
89 return "ATCR";
90 case ENET_ATVR:
91 return "ATVR";
92 case ENET_ATOFF:
93 return "ATOFF";
94 case ENET_ATPER:
95 return "ATPER";
96 case ENET_ATCOR:
97 return "ATCOR";
98 case ENET_ATINC:
99 return "ATINC";
100 case ENET_ATSTMP:
101 return "ATSTMP";
102 case ENET_TGSR:
103 return "TGSR";
104 case ENET_TCSR0:
105 return "TCSR0";
106 case ENET_TCCR0:
107 return "TCCR0";
108 case ENET_TCSR1:
109 return "TCSR1";
110 case ENET_TCCR1:
111 return "TCCR1";
112 case ENET_TCSR2:
113 return "TCSR2";
114 case ENET_TCCR2:
115 return "TCCR2";
116 case ENET_TCSR3:
117 return "TCSR3";
118 case ENET_TCCR3:
119 return "TCCR3";
120 default:
121 return imx_default_reg_name(s, index);
122 }
123 }
124
imx_eth_reg_name(IMXFECState * s,uint32_t index)125 static const char *imx_eth_reg_name(IMXFECState *s, uint32_t index)
126 {
127 switch (index) {
128 case ENET_EIR:
129 return "EIR";
130 case ENET_EIMR:
131 return "EIMR";
132 case ENET_RDAR:
133 return "RDAR";
134 case ENET_TDAR:
135 return "TDAR";
136 case ENET_ECR:
137 return "ECR";
138 case ENET_MMFR:
139 return "MMFR";
140 case ENET_MSCR:
141 return "MSCR";
142 case ENET_MIBC:
143 return "MIBC";
144 case ENET_RCR:
145 return "RCR";
146 case ENET_TCR:
147 return "TCR";
148 case ENET_PALR:
149 return "PALR";
150 case ENET_PAUR:
151 return "PAUR";
152 case ENET_OPD:
153 return "OPD";
154 case ENET_IAUR:
155 return "IAUR";
156 case ENET_IALR:
157 return "IALR";
158 case ENET_GAUR:
159 return "GAUR";
160 case ENET_GALR:
161 return "GALR";
162 case ENET_TFWR:
163 return "TFWR";
164 case ENET_RDSR:
165 return "RDSR";
166 case ENET_TDSR:
167 return "TDSR";
168 case ENET_MRBR:
169 return "MRBR";
170 default:
171 if (s->is_fec) {
172 return imx_fec_reg_name(s, index);
173 } else {
174 return imx_enet_reg_name(s, index);
175 }
176 }
177 }
178
179 /*
180 * Versions of this device with more than one TX descriptor save the
181 * 2nd and 3rd descriptors in a subsection, to maintain migration
182 * compatibility with previous versions of the device that only
183 * supported a single descriptor.
184 */
imx_eth_is_multi_tx_ring(void * opaque)185 static bool imx_eth_is_multi_tx_ring(void *opaque)
186 {
187 IMXFECState *s = IMX_FEC(opaque);
188
189 return s->tx_ring_num > 1;
190 }
191
192 static const VMStateDescription vmstate_imx_eth_txdescs = {
193 .name = "imx.fec/txdescs",
194 .version_id = 1,
195 .minimum_version_id = 1,
196 .needed = imx_eth_is_multi_tx_ring,
197 .fields = (const VMStateField[]) {
198 VMSTATE_UINT32(tx_descriptor[1], IMXFECState),
199 VMSTATE_UINT32(tx_descriptor[2], IMXFECState),
200 VMSTATE_END_OF_LIST()
201 }
202 };
203
204 static const VMStateDescription vmstate_imx_eth = {
205 .name = TYPE_IMX_FEC,
206 .version_id = 3,
207 .minimum_version_id = 3,
208 .fields = (const VMStateField[]) {
209 VMSTATE_UINT32_ARRAY(regs, IMXFECState, ENET_MAX),
210 VMSTATE_UINT32(rx_descriptor, IMXFECState),
211 VMSTATE_UINT32(tx_descriptor[0], IMXFECState),
212 VMSTATE_END_OF_LIST()
213 },
214 .subsections = (const VMStateDescription * const []) {
215 &vmstate_imx_eth_txdescs,
216 NULL
217 },
218 };
219
220 static void imx_eth_update(IMXFECState *s);
221
222 /*
223 * The MII phy could raise a GPIO to the processor which in turn
224 * could be handled as an interrpt by the OS.
225 * For now we don't handle any GPIO/interrupt line, so the OS will
226 * have to poll for the PHY status.
227 */
imx_phy_update_irq(void * opaque,int n,int level)228 static void imx_phy_update_irq(void *opaque, int n, int level)
229 {
230 imx_eth_update(opaque);
231 }
232
imx_eth_set_link(NetClientState * nc)233 static void imx_eth_set_link(NetClientState *nc)
234 {
235 lan9118_phy_update_link(&IMX_FEC(qemu_get_nic_opaque(nc))->mii,
236 nc->link_down);
237 }
238
imx_phy_read(IMXFECState * s,int reg)239 static uint32_t imx_phy_read(IMXFECState *s, int reg)
240 {
241 uint32_t phy = reg / 32;
242
243 if (!s->phy_connected) {
244 return 0xffff;
245 }
246
247 if (phy != s->phy_num) {
248 if (s->phy_consumer && phy == s->phy_consumer->phy_num) {
249 s = s->phy_consumer;
250 } else {
251 trace_imx_phy_read_num(phy, s->phy_num);
252 return 0xffff;
253 }
254 }
255
256 reg %= 32;
257
258 return lan9118_phy_read(&s->mii, reg);
259 }
260
imx_phy_write(IMXFECState * s,int reg,uint32_t val)261 static void imx_phy_write(IMXFECState *s, int reg, uint32_t val)
262 {
263 uint32_t phy = reg / 32;
264
265 if (!s->phy_connected) {
266 return;
267 }
268
269 if (phy != s->phy_num) {
270 if (s->phy_consumer && phy == s->phy_consumer->phy_num) {
271 s = s->phy_consumer;
272 } else {
273 trace_imx_phy_write_num(phy, s->phy_num);
274 return;
275 }
276 }
277
278 reg %= 32;
279
280 lan9118_phy_write(&s->mii, reg, val);
281 }
282
imx_fec_read_bd(IMXFECBufDesc * bd,dma_addr_t addr)283 static void imx_fec_read_bd(IMXFECBufDesc *bd, dma_addr_t addr)
284 {
285 dma_memory_read(&address_space_memory, addr, bd, sizeof(*bd),
286 MEMTXATTRS_UNSPECIFIED);
287
288 trace_imx_fec_read_bd(addr, bd->flags, bd->length, bd->data);
289 }
290
imx_fec_write_bd(IMXFECBufDesc * bd,dma_addr_t addr)291 static void imx_fec_write_bd(IMXFECBufDesc *bd, dma_addr_t addr)
292 {
293 dma_memory_write(&address_space_memory, addr, bd, sizeof(*bd),
294 MEMTXATTRS_UNSPECIFIED);
295 }
296
imx_enet_read_bd(IMXENETBufDesc * bd,dma_addr_t addr)297 static void imx_enet_read_bd(IMXENETBufDesc *bd, dma_addr_t addr)
298 {
299 dma_memory_read(&address_space_memory, addr, bd, sizeof(*bd),
300 MEMTXATTRS_UNSPECIFIED);
301
302 trace_imx_enet_read_bd(addr, bd->flags, bd->length, bd->data,
303 bd->option, bd->status);
304 }
305
imx_enet_write_bd(IMXENETBufDesc * bd,dma_addr_t addr)306 static void imx_enet_write_bd(IMXENETBufDesc *bd, dma_addr_t addr)
307 {
308 dma_memory_write(&address_space_memory, addr, bd, sizeof(*bd),
309 MEMTXATTRS_UNSPECIFIED);
310 }
311
imx_eth_update(IMXFECState * s)312 static void imx_eth_update(IMXFECState *s)
313 {
314 /*
315 * Previous versions of qemu had the ENET_INT_MAC and ENET_INT_TS_TIMER
316 * interrupts swapped. This worked with older versions of Linux (4.14
317 * and older) since Linux associated both interrupt lines with Ethernet
318 * MAC interrupts. Specifically,
319 * - Linux 4.15 and later have separate interrupt handlers for the MAC and
320 * timer interrupts. Those versions of Linux fail with versions of QEMU
321 * with swapped interrupt assignments.
322 * - In linux 4.14, both interrupt lines were registered with the Ethernet
323 * MAC interrupt handler. As a result, all versions of qemu happen to
324 * work, though that is accidental.
325 * - In Linux 4.9 and older, the timer interrupt was registered directly
326 * with the Ethernet MAC interrupt handler. The MAC interrupt was
327 * redirected to a GPIO interrupt to work around erratum ERR006687.
328 * This was implemented using the SOC's IOMUX block. In qemu, this GPIO
329 * interrupt never fired since IOMUX is currently not supported in qemu.
330 * Linux instead received MAC interrupts on the timer interrupt.
331 * As a result, qemu versions with the swapped interrupt assignment work,
332 * albeit accidentally, but qemu versions with the correct interrupt
333 * assignment fail.
334 *
335 * To ensure that all versions of Linux work, generate ENET_INT_MAC
336 * interrupts on both interrupt lines. This should be changed if and when
337 * qemu supports IOMUX.
338 */
339 if (s->regs[ENET_EIR] & s->regs[ENET_EIMR] &
340 (ENET_INT_MAC | ENET_INT_TS_TIMER)) {
341 qemu_set_irq(s->irq[1], 1);
342 } else {
343 qemu_set_irq(s->irq[1], 0);
344 }
345
346 if (s->regs[ENET_EIR] & s->regs[ENET_EIMR] & ENET_INT_MAC) {
347 qemu_set_irq(s->irq[0], 1);
348 } else {
349 qemu_set_irq(s->irq[0], 0);
350 }
351 }
352
imx_fec_do_tx(IMXFECState * s)353 static void imx_fec_do_tx(IMXFECState *s)
354 {
355 int frame_size = 0, descnt = 0;
356 uint8_t *ptr = s->frame;
357 uint32_t addr = s->tx_descriptor[0];
358
359 while (descnt++ < IMX_MAX_DESC) {
360 IMXFECBufDesc bd;
361 int len;
362
363 imx_fec_read_bd(&bd, addr);
364 if ((bd.flags & ENET_BD_R) == 0) {
365
366 /* Run out of descriptors to transmit. */
367 trace_imx_eth_tx_bd_busy();
368
369 break;
370 }
371 len = bd.length;
372 if (frame_size + len > ENET_MAX_FRAME_SIZE) {
373 len = ENET_MAX_FRAME_SIZE - frame_size;
374 s->regs[ENET_EIR] |= ENET_INT_BABT;
375 }
376 dma_memory_read(&address_space_memory, bd.data, ptr, len,
377 MEMTXATTRS_UNSPECIFIED);
378 ptr += len;
379 frame_size += len;
380 if (bd.flags & ENET_BD_L) {
381 /* Last buffer in frame. */
382 qemu_send_packet(qemu_get_queue(s->nic), s->frame, frame_size);
383 ptr = s->frame;
384 frame_size = 0;
385 s->regs[ENET_EIR] |= ENET_INT_TXF;
386 }
387 s->regs[ENET_EIR] |= ENET_INT_TXB;
388 bd.flags &= ~ENET_BD_R;
389 /* Write back the modified descriptor. */
390 imx_fec_write_bd(&bd, addr);
391 /* Advance to the next descriptor. */
392 if ((bd.flags & ENET_BD_W) != 0) {
393 addr = s->regs[ENET_TDSR];
394 } else {
395 addr += sizeof(bd);
396 }
397 }
398
399 s->tx_descriptor[0] = addr;
400
401 imx_eth_update(s);
402 }
403
imx_enet_do_tx(IMXFECState * s,uint32_t index)404 static void imx_enet_do_tx(IMXFECState *s, uint32_t index)
405 {
406 int frame_size = 0, descnt = 0;
407
408 uint8_t *ptr = s->frame;
409 uint32_t addr, int_txb, int_txf, tdsr;
410 size_t ring;
411
412 switch (index) {
413 case ENET_TDAR:
414 ring = 0;
415 int_txb = ENET_INT_TXB;
416 int_txf = ENET_INT_TXF;
417 tdsr = ENET_TDSR;
418 break;
419 case ENET_TDAR1:
420 ring = 1;
421 int_txb = ENET_INT_TXB1;
422 int_txf = ENET_INT_TXF1;
423 tdsr = ENET_TDSR1;
424 break;
425 case ENET_TDAR2:
426 ring = 2;
427 int_txb = ENET_INT_TXB2;
428 int_txf = ENET_INT_TXF2;
429 tdsr = ENET_TDSR2;
430 break;
431 default:
432 qemu_log_mask(LOG_GUEST_ERROR,
433 "%s: bogus value for index %x\n",
434 __func__, index);
435 abort();
436 break;
437 }
438
439 addr = s->tx_descriptor[ring];
440
441 while (descnt++ < IMX_MAX_DESC) {
442 IMXENETBufDesc bd;
443 int len;
444
445 imx_enet_read_bd(&bd, addr);
446 if ((bd.flags & ENET_BD_R) == 0) {
447 /* Run out of descriptors to transmit. */
448
449 trace_imx_eth_tx_bd_busy();
450
451 break;
452 }
453 len = bd.length;
454 if (frame_size + len > ENET_MAX_FRAME_SIZE) {
455 len = ENET_MAX_FRAME_SIZE - frame_size;
456 s->regs[ENET_EIR] |= ENET_INT_BABT;
457 }
458 dma_memory_read(&address_space_memory, bd.data, ptr, len,
459 MEMTXATTRS_UNSPECIFIED);
460 ptr += len;
461 frame_size += len;
462 if (bd.flags & ENET_BD_L) {
463 int csum = 0;
464
465 if (bd.option & ENET_BD_PINS) {
466 csum |= (CSUM_TCP | CSUM_UDP);
467 }
468 if (bd.option & ENET_BD_IINS) {
469 csum |= CSUM_IP;
470 }
471 if (csum) {
472 net_checksum_calculate(s->frame, frame_size, csum);
473 }
474
475 /* Last buffer in frame. */
476
477 qemu_send_packet(qemu_get_queue(s->nic), s->frame, frame_size);
478 ptr = s->frame;
479
480 frame_size = 0;
481 if (bd.option & ENET_BD_TX_INT) {
482 s->regs[ENET_EIR] |= int_txf;
483 }
484 /* Indicate that we've updated the last buffer descriptor. */
485 bd.last_buffer = ENET_BD_BDU;
486 }
487 if (bd.option & ENET_BD_TX_INT) {
488 s->regs[ENET_EIR] |= int_txb;
489 }
490 bd.flags &= ~ENET_BD_R;
491 /* Write back the modified descriptor. */
492 imx_enet_write_bd(&bd, addr);
493 /* Advance to the next descriptor. */
494 if ((bd.flags & ENET_BD_W) != 0) {
495 addr = s->regs[tdsr];
496 } else {
497 addr += sizeof(bd);
498 }
499 }
500
501 s->tx_descriptor[ring] = addr;
502
503 imx_eth_update(s);
504 }
505
imx_eth_do_tx(IMXFECState * s,uint32_t index)506 static void imx_eth_do_tx(IMXFECState *s, uint32_t index)
507 {
508 if (!s->is_fec && (s->regs[ENET_ECR] & ENET_ECR_EN1588)) {
509 imx_enet_do_tx(s, index);
510 } else {
511 imx_fec_do_tx(s);
512 }
513 }
514
imx_eth_enable_rx(IMXFECState * s,bool flush)515 static void imx_eth_enable_rx(IMXFECState *s, bool flush)
516 {
517 IMXFECBufDesc bd;
518
519 imx_fec_read_bd(&bd, s->rx_descriptor);
520
521 s->regs[ENET_RDAR] = (bd.flags & ENET_BD_E) ? ENET_RDAR_RDAR : 0;
522
523 if (!s->regs[ENET_RDAR]) {
524 trace_imx_eth_rx_bd_full();
525 } else if (flush) {
526 qemu_flush_queued_packets(qemu_get_queue(s->nic));
527 }
528 }
529
imx_eth_reset(DeviceState * d)530 static void imx_eth_reset(DeviceState *d)
531 {
532 IMXFECState *s = IMX_FEC(d);
533
534 /* Reset the Device */
535 memset(s->regs, 0, sizeof(s->regs));
536 s->regs[ENET_ECR] = 0xf0000000;
537 s->regs[ENET_MIBC] = 0xc0000000;
538 s->regs[ENET_RCR] = 0x05ee0001;
539 s->regs[ENET_OPD] = 0x00010000;
540
541 s->regs[ENET_PALR] = (s->conf.macaddr.a[0] << 24)
542 | (s->conf.macaddr.a[1] << 16)
543 | (s->conf.macaddr.a[2] << 8)
544 | s->conf.macaddr.a[3];
545 s->regs[ENET_PAUR] = (s->conf.macaddr.a[4] << 24)
546 | (s->conf.macaddr.a[5] << 16)
547 | 0x8808;
548
549 if (s->is_fec) {
550 s->regs[ENET_FRBR] = 0x00000600;
551 s->regs[ENET_FRSR] = 0x00000500;
552 s->regs[ENET_MIIGSK_ENR] = 0x00000006;
553 } else {
554 s->regs[ENET_RAEM] = 0x00000004;
555 s->regs[ENET_RAFL] = 0x00000004;
556 s->regs[ENET_TAEM] = 0x00000004;
557 s->regs[ENET_TAFL] = 0x00000008;
558 s->regs[ENET_TIPG] = 0x0000000c;
559 s->regs[ENET_FTRL] = 0x000007ff;
560 s->regs[ENET_ATPER] = 0x3b9aca00;
561 }
562
563 s->rx_descriptor = 0;
564 memset(s->tx_descriptor, 0, sizeof(s->tx_descriptor));
565 }
566
imx_default_read(IMXFECState * s,uint32_t index)567 static uint32_t imx_default_read(IMXFECState *s, uint32_t index)
568 {
569 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad register at offset 0x%"
570 PRIx32 "\n", TYPE_IMX_FEC, __func__, index * 4);
571 return 0;
572 }
573
imx_fec_read(IMXFECState * s,uint32_t index)574 static uint32_t imx_fec_read(IMXFECState *s, uint32_t index)
575 {
576 switch (index) {
577 case ENET_FRBR:
578 case ENET_FRSR:
579 case ENET_MIIGSK_CFGR:
580 case ENET_MIIGSK_ENR:
581 return s->regs[index];
582 default:
583 return imx_default_read(s, index);
584 }
585 }
586
imx_enet_read(IMXFECState * s,uint32_t index)587 static uint32_t imx_enet_read(IMXFECState *s, uint32_t index)
588 {
589 switch (index) {
590 case ENET_RSFL:
591 case ENET_RSEM:
592 case ENET_RAEM:
593 case ENET_RAFL:
594 case ENET_TSEM:
595 case ENET_TAEM:
596 case ENET_TAFL:
597 case ENET_TIPG:
598 case ENET_FTRL:
599 case ENET_TACC:
600 case ENET_RACC:
601 case ENET_ATCR:
602 case ENET_ATVR:
603 case ENET_ATOFF:
604 case ENET_ATPER:
605 case ENET_ATCOR:
606 case ENET_ATINC:
607 case ENET_ATSTMP:
608 case ENET_TGSR:
609 case ENET_TCSR0:
610 case ENET_TCCR0:
611 case ENET_TCSR1:
612 case ENET_TCCR1:
613 case ENET_TCSR2:
614 case ENET_TCCR2:
615 case ENET_TCSR3:
616 case ENET_TCCR3:
617 return s->regs[index];
618 default:
619 return imx_default_read(s, index);
620 }
621 }
622
imx_eth_read(void * opaque,hwaddr offset,unsigned size)623 static uint64_t imx_eth_read(void *opaque, hwaddr offset, unsigned size)
624 {
625 uint32_t value = 0;
626 IMXFECState *s = IMX_FEC(opaque);
627 uint32_t index = offset >> 2;
628
629 switch (index) {
630 case ENET_EIR:
631 case ENET_EIMR:
632 case ENET_RDAR:
633 case ENET_TDAR:
634 case ENET_ECR:
635 case ENET_MMFR:
636 case ENET_MSCR:
637 case ENET_MIBC:
638 case ENET_RCR:
639 case ENET_TCR:
640 case ENET_PALR:
641 case ENET_PAUR:
642 case ENET_OPD:
643 case ENET_IAUR:
644 case ENET_IALR:
645 case ENET_GAUR:
646 case ENET_GALR:
647 case ENET_TFWR:
648 case ENET_RDSR:
649 case ENET_TDSR:
650 case ENET_MRBR:
651 value = s->regs[index];
652 break;
653 default:
654 if (s->is_fec) {
655 value = imx_fec_read(s, index);
656 } else {
657 value = imx_enet_read(s, index);
658 }
659 break;
660 }
661
662 trace_imx_eth_read(index, imx_eth_reg_name(s, index), value);
663
664 return value;
665 }
666
imx_default_write(IMXFECState * s,uint32_t index,uint32_t value)667 static void imx_default_write(IMXFECState *s, uint32_t index, uint32_t value)
668 {
669 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad address at offset 0x%"
670 PRIx32 "\n", TYPE_IMX_FEC, __func__, index * 4);
671 }
672
imx_fec_write(IMXFECState * s,uint32_t index,uint32_t value)673 static void imx_fec_write(IMXFECState *s, uint32_t index, uint32_t value)
674 {
675 switch (index) {
676 case ENET_FRBR:
677 /* FRBR is read only */
678 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Register FRBR is read only\n",
679 TYPE_IMX_FEC, __func__);
680 break;
681 case ENET_FRSR:
682 s->regs[index] = (value & 0x000003fc) | 0x00000400;
683 break;
684 case ENET_MIIGSK_CFGR:
685 s->regs[index] = value & 0x00000053;
686 break;
687 case ENET_MIIGSK_ENR:
688 s->regs[index] = (value & 0x00000002) ? 0x00000006 : 0;
689 break;
690 default:
691 imx_default_write(s, index, value);
692 break;
693 }
694 }
695
imx_enet_write(IMXFECState * s,uint32_t index,uint32_t value)696 static void imx_enet_write(IMXFECState *s, uint32_t index, uint32_t value)
697 {
698 switch (index) {
699 case ENET_RSFL:
700 case ENET_RSEM:
701 case ENET_RAEM:
702 case ENET_RAFL:
703 case ENET_TSEM:
704 case ENET_TAEM:
705 case ENET_TAFL:
706 s->regs[index] = value & 0x000001ff;
707 break;
708 case ENET_TIPG:
709 s->regs[index] = value & 0x0000001f;
710 break;
711 case ENET_FTRL:
712 s->regs[index] = value & 0x00003fff;
713 break;
714 case ENET_TACC:
715 s->regs[index] = value & 0x00000019;
716 break;
717 case ENET_RACC:
718 s->regs[index] = value & 0x000000C7;
719 break;
720 case ENET_ATCR:
721 s->regs[index] = value & 0x00002a9d;
722 break;
723 case ENET_ATVR:
724 case ENET_ATOFF:
725 case ENET_ATPER:
726 s->regs[index] = value;
727 break;
728 case ENET_ATSTMP:
729 /* ATSTMP is read only */
730 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Register ATSTMP is read only\n",
731 TYPE_IMX_FEC, __func__);
732 break;
733 case ENET_ATCOR:
734 s->regs[index] = value & 0x7fffffff;
735 break;
736 case ENET_ATINC:
737 s->regs[index] = value & 0x00007f7f;
738 break;
739 case ENET_TGSR:
740 /* implement clear timer flag */
741 s->regs[index] &= ~(value & 0x0000000f); /* all bits W1C */
742 break;
743 case ENET_TCSR0:
744 case ENET_TCSR1:
745 case ENET_TCSR2:
746 case ENET_TCSR3:
747 s->regs[index] &= ~(value & 0x00000080); /* W1C bits */
748 s->regs[index] &= ~0x0000007d; /* writable fields */
749 s->regs[index] |= (value & 0x0000007d);
750 break;
751 case ENET_TCCR0:
752 case ENET_TCCR1:
753 case ENET_TCCR2:
754 case ENET_TCCR3:
755 s->regs[index] = value;
756 break;
757 default:
758 imx_default_write(s, index, value);
759 break;
760 }
761 }
762
imx_eth_write(void * opaque,hwaddr offset,uint64_t value,unsigned size)763 static void imx_eth_write(void *opaque, hwaddr offset, uint64_t value,
764 unsigned size)
765 {
766 IMXFECState *s = IMX_FEC(opaque);
767 const bool single_tx_ring = !imx_eth_is_multi_tx_ring(s);
768 uint32_t index = offset >> 2;
769
770 trace_imx_eth_write(index, imx_eth_reg_name(s, index), value);
771
772 switch (index) {
773 case ENET_EIR:
774 s->regs[index] &= ~value;
775 break;
776 case ENET_EIMR:
777 s->regs[index] = value;
778 break;
779 case ENET_RDAR:
780 if (s->regs[ENET_ECR] & ENET_ECR_ETHEREN) {
781 if (!s->regs[index]) {
782 imx_eth_enable_rx(s, true);
783 }
784 } else {
785 s->regs[index] = 0;
786 }
787 break;
788 case ENET_TDAR1:
789 case ENET_TDAR2:
790 if (unlikely(single_tx_ring)) {
791 qemu_log_mask(LOG_GUEST_ERROR,
792 "[%s]%s: trying to access TDAR2 or TDAR1\n",
793 TYPE_IMX_FEC, __func__);
794 return;
795 }
796 /* fall through */
797 case ENET_TDAR:
798 if (s->regs[ENET_ECR] & ENET_ECR_ETHEREN) {
799 s->regs[index] = ENET_TDAR_TDAR;
800 imx_eth_do_tx(s, index);
801 }
802 s->regs[index] = 0;
803 break;
804 case ENET_ECR:
805 if (value & ENET_ECR_RESET) {
806 return imx_eth_reset(DEVICE(s));
807 }
808 s->regs[index] = value;
809 if ((s->regs[index] & ENET_ECR_ETHEREN) == 0) {
810 s->regs[ENET_RDAR] = 0;
811 s->rx_descriptor = s->regs[ENET_RDSR];
812 s->regs[ENET_TDAR] = 0;
813 s->regs[ENET_TDAR1] = 0;
814 s->regs[ENET_TDAR2] = 0;
815 s->tx_descriptor[0] = s->regs[ENET_TDSR];
816 s->tx_descriptor[1] = s->regs[ENET_TDSR1];
817 s->tx_descriptor[2] = s->regs[ENET_TDSR2];
818 }
819 break;
820 case ENET_MMFR:
821 s->regs[index] = value;
822 if (extract32(value, 29, 1)) {
823 /* This is a read operation */
824 s->regs[ENET_MMFR] = deposit32(s->regs[ENET_MMFR], 0, 16,
825 imx_phy_read(s,
826 extract32(value,
827 18, 10)));
828 } else {
829 /* This is a write operation */
830 imx_phy_write(s, extract32(value, 18, 10), extract32(value, 0, 16));
831 }
832 /* raise the interrupt as the PHY operation is done */
833 s->regs[ENET_EIR] |= ENET_INT_MII;
834 break;
835 case ENET_MSCR:
836 s->regs[index] = value & 0xfe;
837 break;
838 case ENET_MIBC:
839 /* TODO: Implement MIB. */
840 s->regs[index] = (value & 0x80000000) ? 0xc0000000 : 0;
841 break;
842 case ENET_RCR:
843 s->regs[index] = value & 0x07ff003f;
844 /* TODO: Implement LOOP mode. */
845 break;
846 case ENET_TCR:
847 /* We transmit immediately, so raise GRA immediately. */
848 s->regs[index] = value;
849 if (value & 1) {
850 s->regs[ENET_EIR] |= ENET_INT_GRA;
851 }
852 break;
853 case ENET_PALR:
854 s->regs[index] = value;
855 s->conf.macaddr.a[0] = value >> 24;
856 s->conf.macaddr.a[1] = value >> 16;
857 s->conf.macaddr.a[2] = value >> 8;
858 s->conf.macaddr.a[3] = value;
859 break;
860 case ENET_PAUR:
861 s->regs[index] = (value | 0x0000ffff) & 0xffff8808;
862 s->conf.macaddr.a[4] = value >> 24;
863 s->conf.macaddr.a[5] = value >> 16;
864 break;
865 case ENET_OPD:
866 s->regs[index] = (value & 0x0000ffff) | 0x00010000;
867 break;
868 case ENET_IAUR:
869 case ENET_IALR:
870 case ENET_GAUR:
871 case ENET_GALR:
872 /* TODO: implement MAC hash filtering. */
873 break;
874 case ENET_TFWR:
875 if (s->is_fec) {
876 s->regs[index] = value & 0x3;
877 } else {
878 s->regs[index] = value & 0x13f;
879 }
880 break;
881 case ENET_RDSR:
882 if (s->is_fec) {
883 s->regs[index] = value & ~3;
884 } else {
885 s->regs[index] = value & ~7;
886 }
887 s->rx_descriptor = s->regs[index];
888 break;
889 case ENET_TDSR:
890 if (s->is_fec) {
891 s->regs[index] = value & ~3;
892 } else {
893 s->regs[index] = value & ~7;
894 }
895 s->tx_descriptor[0] = s->regs[index];
896 break;
897 case ENET_TDSR1:
898 if (unlikely(single_tx_ring)) {
899 qemu_log_mask(LOG_GUEST_ERROR,
900 "[%s]%s: trying to access TDSR1\n",
901 TYPE_IMX_FEC, __func__);
902 return;
903 }
904
905 s->regs[index] = value & ~7;
906 s->tx_descriptor[1] = s->regs[index];
907 break;
908 case ENET_TDSR2:
909 if (unlikely(single_tx_ring)) {
910 qemu_log_mask(LOG_GUEST_ERROR,
911 "[%s]%s: trying to access TDSR2\n",
912 TYPE_IMX_FEC, __func__);
913 return;
914 }
915
916 s->regs[index] = value & ~7;
917 s->tx_descriptor[2] = s->regs[index];
918 break;
919 case ENET_MRBR:
920 s->regs[index] = value & 0x00003ff0;
921 break;
922 default:
923 if (s->is_fec) {
924 imx_fec_write(s, index, value);
925 } else {
926 imx_enet_write(s, index, value);
927 }
928 return;
929 }
930
931 imx_eth_update(s);
932 }
933
imx_eth_can_receive(NetClientState * nc)934 static bool imx_eth_can_receive(NetClientState *nc)
935 {
936 IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
937
938 return !!s->regs[ENET_RDAR];
939 }
940
imx_fec_receive(NetClientState * nc,const uint8_t * buf,size_t len)941 static ssize_t imx_fec_receive(NetClientState *nc, const uint8_t *buf,
942 size_t len)
943 {
944 IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
945 IMXFECBufDesc bd;
946 uint32_t flags = 0;
947 uint32_t addr;
948 uint32_t crc;
949 uint32_t buf_addr;
950 uint8_t *crc_ptr;
951 unsigned int buf_len;
952 size_t size = len;
953
954 trace_imx_fec_receive(size);
955
956 if (!s->regs[ENET_RDAR]) {
957 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Unexpected packet\n",
958 TYPE_IMX_FEC, __func__);
959 return 0;
960 }
961
962 crc = cpu_to_be32(crc32(~0, buf, size));
963 /* Increase size by 4, loop below reads the last 4 bytes from crc_ptr. */
964 size += 4;
965 crc_ptr = (uint8_t *) &crc;
966
967 /* Huge frames are truncated. */
968 if (size > ENET_MAX_FRAME_SIZE) {
969 size = ENET_MAX_FRAME_SIZE;
970 flags |= ENET_BD_TR | ENET_BD_LG;
971 }
972
973 /* Frames larger than the user limit just set error flags. */
974 if (size > (s->regs[ENET_RCR] >> 16)) {
975 flags |= ENET_BD_LG;
976 }
977
978 addr = s->rx_descriptor;
979 while (size > 0) {
980 imx_fec_read_bd(&bd, addr);
981 if ((bd.flags & ENET_BD_E) == 0) {
982 /* No descriptors available. Bail out. */
983 /*
984 * FIXME: This is wrong. We should probably either
985 * save the remainder for when more RX buffers are
986 * available, or flag an error.
987 */
988 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Lost end of frame\n",
989 TYPE_IMX_FEC, __func__);
990 break;
991 }
992 buf_len = (size <= s->regs[ENET_MRBR]) ? size : s->regs[ENET_MRBR];
993 bd.length = buf_len;
994 size -= buf_len;
995
996 trace_imx_fec_receive_len(addr, bd.length);
997
998 /* The last 4 bytes are the CRC. */
999 if (size < 4) {
1000 buf_len += size - 4;
1001 }
1002 buf_addr = bd.data;
1003 dma_memory_write(&address_space_memory, buf_addr, buf, buf_len,
1004 MEMTXATTRS_UNSPECIFIED);
1005 buf += buf_len;
1006 if (size < 4) {
1007 dma_memory_write(&address_space_memory, buf_addr + buf_len,
1008 crc_ptr, 4 - size, MEMTXATTRS_UNSPECIFIED);
1009 crc_ptr += 4 - size;
1010 }
1011 bd.flags &= ~ENET_BD_E;
1012 if (size == 0) {
1013 /* Last buffer in frame. */
1014 bd.flags |= flags | ENET_BD_L;
1015
1016 trace_imx_fec_receive_last(bd.flags);
1017
1018 s->regs[ENET_EIR] |= ENET_INT_RXF;
1019 } else {
1020 s->regs[ENET_EIR] |= ENET_INT_RXB;
1021 }
1022 imx_fec_write_bd(&bd, addr);
1023 /* Advance to the next descriptor. */
1024 if ((bd.flags & ENET_BD_W) != 0) {
1025 addr = s->regs[ENET_RDSR];
1026 } else {
1027 addr += sizeof(bd);
1028 }
1029 }
1030 s->rx_descriptor = addr;
1031 imx_eth_enable_rx(s, false);
1032 imx_eth_update(s);
1033 return len;
1034 }
1035
imx_enet_receive(NetClientState * nc,const uint8_t * buf,size_t len)1036 static ssize_t imx_enet_receive(NetClientState *nc, const uint8_t *buf,
1037 size_t len)
1038 {
1039 IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
1040 IMXENETBufDesc bd;
1041 uint32_t flags = 0;
1042 uint32_t addr;
1043 uint32_t crc;
1044 uint32_t buf_addr;
1045 uint8_t *crc_ptr;
1046 unsigned int buf_len;
1047 size_t size = len;
1048 bool shift16 = s->regs[ENET_RACC] & ENET_RACC_SHIFT16;
1049
1050 trace_imx_enet_receive(size);
1051
1052 if (!s->regs[ENET_RDAR]) {
1053 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Unexpected packet\n",
1054 TYPE_IMX_FEC, __func__);
1055 return 0;
1056 }
1057
1058 crc = cpu_to_be32(crc32(~0, buf, size));
1059 /* Increase size by 4, loop below reads the last 4 bytes from crc_ptr. */
1060 size += 4;
1061 crc_ptr = (uint8_t *) &crc;
1062
1063 if (shift16) {
1064 size += 2;
1065 }
1066
1067 /* Huge frames are truncated. */
1068 if (size > s->regs[ENET_FTRL]) {
1069 size = s->regs[ENET_FTRL];
1070 flags |= ENET_BD_TR | ENET_BD_LG;
1071 }
1072
1073 /* Frames larger than the user limit just set error flags. */
1074 if (size > (s->regs[ENET_RCR] >> 16)) {
1075 flags |= ENET_BD_LG;
1076 }
1077
1078 addr = s->rx_descriptor;
1079 while (size > 0) {
1080 imx_enet_read_bd(&bd, addr);
1081 if ((bd.flags & ENET_BD_E) == 0) {
1082 /* No descriptors available. Bail out. */
1083 /*
1084 * FIXME: This is wrong. We should probably either
1085 * save the remainder for when more RX buffers are
1086 * available, or flag an error.
1087 */
1088 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Lost end of frame\n",
1089 TYPE_IMX_FEC, __func__);
1090 break;
1091 }
1092 buf_len = MIN(size, s->regs[ENET_MRBR]);
1093 bd.length = buf_len;
1094 size -= buf_len;
1095
1096 trace_imx_enet_receive_len(addr, bd.length);
1097
1098 /* The last 4 bytes are the CRC. */
1099 if (size < 4) {
1100 buf_len += size - 4;
1101 }
1102 buf_addr = bd.data;
1103
1104 if (shift16) {
1105 /*
1106 * If SHIFT16 bit of ENETx_RACC register is set we need to
1107 * align the payload to 4-byte boundary.
1108 */
1109 const uint8_t zeros[2] = { 0 };
1110
1111 dma_memory_write(&address_space_memory, buf_addr, zeros,
1112 sizeof(zeros), MEMTXATTRS_UNSPECIFIED);
1113
1114 buf_addr += sizeof(zeros);
1115 buf_len -= sizeof(zeros);
1116
1117 /* We only do this once per Ethernet frame */
1118 shift16 = false;
1119 }
1120
1121 dma_memory_write(&address_space_memory, buf_addr, buf, buf_len,
1122 MEMTXATTRS_UNSPECIFIED);
1123 buf += buf_len;
1124 if (size < 4) {
1125 dma_memory_write(&address_space_memory, buf_addr + buf_len,
1126 crc_ptr, 4 - size, MEMTXATTRS_UNSPECIFIED);
1127 crc_ptr += 4 - size;
1128 }
1129 bd.flags &= ~ENET_BD_E;
1130 if (size == 0) {
1131 /* Last buffer in frame. */
1132 bd.flags |= flags | ENET_BD_L;
1133
1134 trace_imx_enet_receive_last(bd.flags);
1135
1136 /* Indicate that we've updated the last buffer descriptor. */
1137 bd.last_buffer = ENET_BD_BDU;
1138 if (bd.option & ENET_BD_RX_INT) {
1139 s->regs[ENET_EIR] |= ENET_INT_RXF;
1140 }
1141 } else {
1142 if (bd.option & ENET_BD_RX_INT) {
1143 s->regs[ENET_EIR] |= ENET_INT_RXB;
1144 }
1145 }
1146 imx_enet_write_bd(&bd, addr);
1147 /* Advance to the next descriptor. */
1148 if ((bd.flags & ENET_BD_W) != 0) {
1149 addr = s->regs[ENET_RDSR];
1150 } else {
1151 addr += sizeof(bd);
1152 }
1153 }
1154 s->rx_descriptor = addr;
1155 imx_eth_enable_rx(s, false);
1156 imx_eth_update(s);
1157 return len;
1158 }
1159
imx_eth_receive(NetClientState * nc,const uint8_t * buf,size_t len)1160 static ssize_t imx_eth_receive(NetClientState *nc, const uint8_t *buf,
1161 size_t len)
1162 {
1163 IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
1164
1165 if (!s->is_fec && (s->regs[ENET_ECR] & ENET_ECR_EN1588)) {
1166 return imx_enet_receive(nc, buf, len);
1167 } else {
1168 return imx_fec_receive(nc, buf, len);
1169 }
1170 }
1171
1172 static const MemoryRegionOps imx_eth_ops = {
1173 .read = imx_eth_read,
1174 .write = imx_eth_write,
1175 .valid.min_access_size = 4,
1176 .valid.max_access_size = 4,
1177 .endianness = DEVICE_NATIVE_ENDIAN,
1178 };
1179
imx_eth_cleanup(NetClientState * nc)1180 static void imx_eth_cleanup(NetClientState *nc)
1181 {
1182 IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
1183
1184 s->nic = NULL;
1185 }
1186
1187 static NetClientInfo imx_eth_net_info = {
1188 .type = NET_CLIENT_DRIVER_NIC,
1189 .size = sizeof(NICState),
1190 .can_receive = imx_eth_can_receive,
1191 .receive = imx_eth_receive,
1192 .cleanup = imx_eth_cleanup,
1193 .link_status_changed = imx_eth_set_link,
1194 };
1195
1196
imx_eth_realize(DeviceState * dev,Error ** errp)1197 static void imx_eth_realize(DeviceState *dev, Error **errp)
1198 {
1199 IMXFECState *s = IMX_FEC(dev);
1200 SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
1201
1202 memory_region_init_io(&s->iomem, OBJECT(dev), &imx_eth_ops, s,
1203 TYPE_IMX_FEC, FSL_IMX25_FEC_SIZE);
1204 sysbus_init_mmio(sbd, &s->iomem);
1205 sysbus_init_irq(sbd, &s->irq[0]);
1206 sysbus_init_irq(sbd, &s->irq[1]);
1207
1208 qemu_init_irq(&s->mii_irq, imx_phy_update_irq, s, 0);
1209 object_initialize_child(OBJECT(s), "mii", &s->mii, TYPE_LAN9118_PHY);
1210 if (!sysbus_realize_and_unref(SYS_BUS_DEVICE(&s->mii), errp)) {
1211 return;
1212 }
1213 qdev_connect_gpio_out(DEVICE(&s->mii), 0, &s->mii_irq);
1214
1215 qemu_macaddr_default_if_unset(&s->conf.macaddr);
1216
1217 s->nic = qemu_new_nic(&imx_eth_net_info, &s->conf,
1218 object_get_typename(OBJECT(dev)),
1219 dev->id, &dev->mem_reentrancy_guard, s);
1220
1221 qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a);
1222 }
1223
1224 static const Property imx_eth_properties[] = {
1225 DEFINE_NIC_PROPERTIES(IMXFECState, conf),
1226 DEFINE_PROP_UINT32("tx-ring-num", IMXFECState, tx_ring_num, 1),
1227 DEFINE_PROP_UINT32("phy-num", IMXFECState, phy_num, 0),
1228 DEFINE_PROP_BOOL("phy-connected", IMXFECState, phy_connected, true),
1229 DEFINE_PROP_LINK("phy-consumer", IMXFECState, phy_consumer, TYPE_IMX_FEC,
1230 IMXFECState *),
1231 };
1232
imx_eth_class_init(ObjectClass * klass,const void * data)1233 static void imx_eth_class_init(ObjectClass *klass, const void *data)
1234 {
1235 DeviceClass *dc = DEVICE_CLASS(klass);
1236
1237 dc->vmsd = &vmstate_imx_eth;
1238 device_class_set_legacy_reset(dc, imx_eth_reset);
1239 device_class_set_props(dc, imx_eth_properties);
1240 dc->realize = imx_eth_realize;
1241 dc->desc = "i.MX FEC/ENET Ethernet Controller";
1242 }
1243
imx_fec_init(Object * obj)1244 static void imx_fec_init(Object *obj)
1245 {
1246 IMXFECState *s = IMX_FEC(obj);
1247
1248 s->is_fec = true;
1249 }
1250
imx_enet_init(Object * obj)1251 static void imx_enet_init(Object *obj)
1252 {
1253 IMXFECState *s = IMX_FEC(obj);
1254
1255 s->is_fec = false;
1256 }
1257
1258 static const TypeInfo imx_fec_info = {
1259 .name = TYPE_IMX_FEC,
1260 .parent = TYPE_SYS_BUS_DEVICE,
1261 .instance_size = sizeof(IMXFECState),
1262 .instance_init = imx_fec_init,
1263 .class_init = imx_eth_class_init,
1264 };
1265
1266 static const TypeInfo imx_enet_info = {
1267 .name = TYPE_IMX_ENET,
1268 .parent = TYPE_IMX_FEC,
1269 .instance_init = imx_enet_init,
1270 };
1271
imx_eth_register_types(void)1272 static void imx_eth_register_types(void)
1273 {
1274 type_register_static(&imx_fec_info);
1275 type_register_static(&imx_enet_info);
1276 }
1277
1278 type_init(imx_eth_register_types)
1279