1 /* 2 * QEMU Sun Happy Meal Ethernet emulation 3 * 4 * Copyright (c) 2017 Mark Cave-Ayland 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include "qemu/osdep.h" 26 #include "hw/pci/pci.h" 27 #include "hw/qdev-properties.h" 28 #include "migration/vmstate.h" 29 #include "hw/net/mii.h" 30 #include "net/net.h" 31 #include "qemu/module.h" 32 #include "net/checksum.h" 33 #include "net/eth.h" 34 #include "sysemu/sysemu.h" 35 #include "trace.h" 36 #include "qom/object.h" 37 38 #define HME_REG_SIZE 0x8000 39 40 #define HME_SEB_REG_SIZE 0x2000 41 42 #define HME_SEBI_RESET 0x0 43 #define HME_SEB_RESET_ETX 0x1 44 #define HME_SEB_RESET_ERX 0x2 45 46 #define HME_SEBI_STAT 0x100 47 #define HME_SEBI_STAT_LINUXBUG 0x108 48 #define HME_SEB_STAT_RXTOHOST 0x10000 49 #define HME_SEB_STAT_NORXD 0x20000 50 #define HME_SEB_STAT_MIFIRQ 0x800000 51 #define HME_SEB_STAT_HOSTTOTX 0x1000000 52 #define HME_SEB_STAT_TXALL 0x2000000 53 54 #define HME_SEBI_IMASK 0x104 55 #define HME_SEBI_IMASK_LINUXBUG 0x10c 56 57 #define HME_ETX_REG_SIZE 0x2000 58 59 #define HME_ETXI_PENDING 0x0 60 61 #define HME_ETXI_RING 0x8 62 #define HME_ETXI_RING_ADDR 0xffffff00 63 #define HME_ETXI_RING_OFFSET 0xff 64 65 #define HME_ETXI_RSIZE 0x2c 66 67 #define HME_ERX_REG_SIZE 0x2000 68 69 #define HME_ERXI_CFG 0x0 70 #define HME_ERX_CFG_RINGSIZE 0x600 71 #define HME_ERX_CFG_RINGSIZE_SHIFT 9 72 #define HME_ERX_CFG_BYTEOFFSET 0x38 73 #define HME_ERX_CFG_BYTEOFFSET_SHIFT 3 74 #define HME_ERX_CFG_CSUMSTART 0x7f0000 75 #define HME_ERX_CFG_CSUMSHIFT 16 76 77 #define HME_ERXI_RING 0x4 78 #define HME_ERXI_RING_ADDR 0xffffff00 79 #define HME_ERXI_RING_OFFSET 0xff 80 81 #define HME_MAC_REG_SIZE 0x1000 82 83 #define HME_MACI_TXCFG 0x20c 84 #define HME_MAC_TXCFG_ENABLE 0x1 85 86 #define HME_MACI_RXCFG 0x30c 87 #define HME_MAC_RXCFG_ENABLE 0x1 88 #define HME_MAC_RXCFG_PMISC 0x40 89 #define HME_MAC_RXCFG_HENABLE 0x800 90 91 #define HME_MACI_MACADDR2 0x318 92 #define HME_MACI_MACADDR1 0x31c 93 #define HME_MACI_MACADDR0 0x320 94 95 #define HME_MACI_HASHTAB3 0x340 96 #define HME_MACI_HASHTAB2 0x344 97 #define HME_MACI_HASHTAB1 0x348 98 #define HME_MACI_HASHTAB0 0x34c 99 100 #define HME_MIF_REG_SIZE 0x20 101 102 #define HME_MIFI_FO 0xc 103 #define HME_MIF_FO_ST 0xc0000000 104 #define HME_MIF_FO_ST_SHIFT 30 105 #define HME_MIF_FO_OPC 0x30000000 106 #define HME_MIF_FO_OPC_SHIFT 28 107 #define HME_MIF_FO_PHYAD 0x0f800000 108 #define HME_MIF_FO_PHYAD_SHIFT 23 109 #define HME_MIF_FO_REGAD 0x007c0000 110 #define HME_MIF_FO_REGAD_SHIFT 18 111 #define HME_MIF_FO_TAMSB 0x20000 112 #define HME_MIF_FO_TALSB 0x10000 113 #define HME_MIF_FO_DATA 0xffff 114 115 #define HME_MIFI_CFG 0x10 116 #define HME_MIF_CFG_MDI0 0x100 117 #define HME_MIF_CFG_MDI1 0x200 118 119 #define HME_MIFI_IMASK 0x14 120 121 #define HME_MIFI_STAT 0x18 122 123 124 /* Wired HME PHY addresses */ 125 #define HME_PHYAD_INTERNAL 1 126 #define HME_PHYAD_EXTERNAL 0 127 128 #define MII_COMMAND_START 0x1 129 #define MII_COMMAND_READ 0x2 130 #define MII_COMMAND_WRITE 0x1 131 132 #define TYPE_SUNHME "sunhme" 133 typedef struct SunHMEState SunHMEState; 134 #define SUNHME(obj) OBJECT_CHECK(SunHMEState, (obj), TYPE_SUNHME) 135 136 /* Maximum size of buffer */ 137 #define HME_FIFO_SIZE 0x800 138 139 /* Size of TX/RX descriptor */ 140 #define HME_DESC_SIZE 0x8 141 142 #define HME_XD_OWN 0x80000000 143 #define HME_XD_OFL 0x40000000 144 #define HME_XD_SOP 0x40000000 145 #define HME_XD_EOP 0x20000000 146 #define HME_XD_RXLENMSK 0x3fff0000 147 #define HME_XD_RXLENSHIFT 16 148 #define HME_XD_RXCKSUM 0xffff 149 #define HME_XD_TXLENMSK 0x00001fff 150 #define HME_XD_TXCKSUM 0x10000000 151 #define HME_XD_TXCSSTUFF 0xff00000 152 #define HME_XD_TXCSSTUFFSHIFT 20 153 #define HME_XD_TXCSSTART 0xfc000 154 #define HME_XD_TXCSSTARTSHIFT 14 155 156 #define HME_MII_REGS_SIZE 0x20 157 158 struct SunHMEState { 159 /*< private >*/ 160 PCIDevice parent_obj; 161 162 NICState *nic; 163 NICConf conf; 164 165 MemoryRegion hme; 166 MemoryRegion sebreg; 167 MemoryRegion etxreg; 168 MemoryRegion erxreg; 169 MemoryRegion macreg; 170 MemoryRegion mifreg; 171 172 uint32_t sebregs[HME_SEB_REG_SIZE >> 2]; 173 uint32_t etxregs[HME_ETX_REG_SIZE >> 2]; 174 uint32_t erxregs[HME_ERX_REG_SIZE >> 2]; 175 uint32_t macregs[HME_MAC_REG_SIZE >> 2]; 176 uint32_t mifregs[HME_MIF_REG_SIZE >> 2]; 177 178 uint16_t miiregs[HME_MII_REGS_SIZE]; 179 }; 180 181 static Property sunhme_properties[] = { 182 DEFINE_NIC_PROPERTIES(SunHMEState, conf), 183 DEFINE_PROP_END_OF_LIST(), 184 }; 185 186 static void sunhme_reset_tx(SunHMEState *s) 187 { 188 /* Indicate TX reset complete */ 189 s->sebregs[HME_SEBI_RESET] &= ~HME_SEB_RESET_ETX; 190 } 191 192 static void sunhme_reset_rx(SunHMEState *s) 193 { 194 /* Indicate RX reset complete */ 195 s->sebregs[HME_SEBI_RESET] &= ~HME_SEB_RESET_ERX; 196 } 197 198 static void sunhme_update_irq(SunHMEState *s) 199 { 200 PCIDevice *d = PCI_DEVICE(s); 201 int level; 202 203 /* MIF interrupt mask (16-bit) */ 204 uint32_t mifmask = ~(s->mifregs[HME_MIFI_IMASK >> 2]) & 0xffff; 205 uint32_t mif = s->mifregs[HME_MIFI_STAT >> 2] & mifmask; 206 207 /* Main SEB interrupt mask (include MIF status from above) */ 208 uint32_t sebmask = ~(s->sebregs[HME_SEBI_IMASK >> 2]) & 209 ~HME_SEB_STAT_MIFIRQ; 210 uint32_t seb = s->sebregs[HME_SEBI_STAT >> 2] & sebmask; 211 if (mif) { 212 seb |= HME_SEB_STAT_MIFIRQ; 213 } 214 215 level = (seb ? 1 : 0); 216 trace_sunhme_update_irq(mifmask, mif, sebmask, seb, level); 217 218 pci_set_irq(d, level); 219 } 220 221 static void sunhme_seb_write(void *opaque, hwaddr addr, 222 uint64_t val, unsigned size) 223 { 224 SunHMEState *s = SUNHME(opaque); 225 226 trace_sunhme_seb_write(addr, val); 227 228 /* Handly buggy Linux drivers before 4.13 which have 229 the wrong offsets for HME_SEBI_STAT and HME_SEBI_IMASK */ 230 switch (addr) { 231 case HME_SEBI_STAT_LINUXBUG: 232 addr = HME_SEBI_STAT; 233 break; 234 case HME_SEBI_IMASK_LINUXBUG: 235 addr = HME_SEBI_IMASK; 236 break; 237 default: 238 break; 239 } 240 241 switch (addr) { 242 case HME_SEBI_RESET: 243 if (val & HME_SEB_RESET_ETX) { 244 sunhme_reset_tx(s); 245 } 246 if (val & HME_SEB_RESET_ERX) { 247 sunhme_reset_rx(s); 248 } 249 val = s->sebregs[HME_SEBI_RESET >> 2]; 250 break; 251 } 252 253 s->sebregs[addr >> 2] = val; 254 } 255 256 static uint64_t sunhme_seb_read(void *opaque, hwaddr addr, 257 unsigned size) 258 { 259 SunHMEState *s = SUNHME(opaque); 260 uint64_t val; 261 262 /* Handly buggy Linux drivers before 4.13 which have 263 the wrong offsets for HME_SEBI_STAT and HME_SEBI_IMASK */ 264 switch (addr) { 265 case HME_SEBI_STAT_LINUXBUG: 266 addr = HME_SEBI_STAT; 267 break; 268 case HME_SEBI_IMASK_LINUXBUG: 269 addr = HME_SEBI_IMASK; 270 break; 271 default: 272 break; 273 } 274 275 val = s->sebregs[addr >> 2]; 276 277 switch (addr) { 278 case HME_SEBI_STAT: 279 /* Autoclear status (except MIF) */ 280 s->sebregs[HME_SEBI_STAT >> 2] &= HME_SEB_STAT_MIFIRQ; 281 sunhme_update_irq(s); 282 break; 283 } 284 285 trace_sunhme_seb_read(addr, val); 286 287 return val; 288 } 289 290 static const MemoryRegionOps sunhme_seb_ops = { 291 .read = sunhme_seb_read, 292 .write = sunhme_seb_write, 293 .endianness = DEVICE_LITTLE_ENDIAN, 294 .valid = { 295 .min_access_size = 4, 296 .max_access_size = 4, 297 }, 298 }; 299 300 static void sunhme_transmit(SunHMEState *s); 301 302 static void sunhme_etx_write(void *opaque, hwaddr addr, 303 uint64_t val, unsigned size) 304 { 305 SunHMEState *s = SUNHME(opaque); 306 307 trace_sunhme_etx_write(addr, val); 308 309 switch (addr) { 310 case HME_ETXI_PENDING: 311 if (val) { 312 sunhme_transmit(s); 313 } 314 break; 315 } 316 317 s->etxregs[addr >> 2] = val; 318 } 319 320 static uint64_t sunhme_etx_read(void *opaque, hwaddr addr, 321 unsigned size) 322 { 323 SunHMEState *s = SUNHME(opaque); 324 uint64_t val; 325 326 val = s->etxregs[addr >> 2]; 327 328 trace_sunhme_etx_read(addr, val); 329 330 return val; 331 } 332 333 static const MemoryRegionOps sunhme_etx_ops = { 334 .read = sunhme_etx_read, 335 .write = sunhme_etx_write, 336 .endianness = DEVICE_LITTLE_ENDIAN, 337 .valid = { 338 .min_access_size = 4, 339 .max_access_size = 4, 340 }, 341 }; 342 343 static void sunhme_erx_write(void *opaque, hwaddr addr, 344 uint64_t val, unsigned size) 345 { 346 SunHMEState *s = SUNHME(opaque); 347 348 trace_sunhme_erx_write(addr, val); 349 350 s->erxregs[addr >> 2] = val; 351 } 352 353 static uint64_t sunhme_erx_read(void *opaque, hwaddr addr, 354 unsigned size) 355 { 356 SunHMEState *s = SUNHME(opaque); 357 uint64_t val; 358 359 val = s->erxregs[addr >> 2]; 360 361 trace_sunhme_erx_read(addr, val); 362 363 return val; 364 } 365 366 static const MemoryRegionOps sunhme_erx_ops = { 367 .read = sunhme_erx_read, 368 .write = sunhme_erx_write, 369 .endianness = DEVICE_LITTLE_ENDIAN, 370 .valid = { 371 .min_access_size = 4, 372 .max_access_size = 4, 373 }, 374 }; 375 376 static void sunhme_mac_write(void *opaque, hwaddr addr, 377 uint64_t val, unsigned size) 378 { 379 SunHMEState *s = SUNHME(opaque); 380 uint64_t oldval = s->macregs[addr >> 2]; 381 382 trace_sunhme_mac_write(addr, val); 383 384 s->macregs[addr >> 2] = val; 385 386 switch (addr) { 387 case HME_MACI_RXCFG: 388 if (!(oldval & HME_MAC_RXCFG_ENABLE) && 389 (val & HME_MAC_RXCFG_ENABLE)) { 390 qemu_flush_queued_packets(qemu_get_queue(s->nic)); 391 } 392 break; 393 } 394 } 395 396 static uint64_t sunhme_mac_read(void *opaque, hwaddr addr, 397 unsigned size) 398 { 399 SunHMEState *s = SUNHME(opaque); 400 uint64_t val; 401 402 val = s->macregs[addr >> 2]; 403 404 trace_sunhme_mac_read(addr, val); 405 406 return val; 407 } 408 409 static const MemoryRegionOps sunhme_mac_ops = { 410 .read = sunhme_mac_read, 411 .write = sunhme_mac_write, 412 .endianness = DEVICE_LITTLE_ENDIAN, 413 .valid = { 414 .min_access_size = 4, 415 .max_access_size = 4, 416 }, 417 }; 418 419 static void sunhme_mii_write(SunHMEState *s, uint8_t reg, uint16_t data) 420 { 421 trace_sunhme_mii_write(reg, data); 422 423 switch (reg) { 424 case MII_BMCR: 425 if (data & MII_BMCR_RESET) { 426 /* Autoclear reset bit, enable auto negotiation */ 427 data &= ~MII_BMCR_RESET; 428 data |= MII_BMCR_AUTOEN; 429 } 430 if (data & MII_BMCR_ANRESTART) { 431 /* Autoclear auto negotiation restart */ 432 data &= ~MII_BMCR_ANRESTART; 433 434 /* Indicate negotiation complete */ 435 s->miiregs[MII_BMSR] |= MII_BMSR_AN_COMP; 436 437 if (!qemu_get_queue(s->nic)->link_down) { 438 s->miiregs[MII_ANLPAR] |= MII_ANLPAR_TXFD; 439 s->miiregs[MII_BMSR] |= MII_BMSR_LINK_ST; 440 } 441 } 442 break; 443 } 444 445 s->miiregs[reg] = data; 446 } 447 448 static uint16_t sunhme_mii_read(SunHMEState *s, uint8_t reg) 449 { 450 uint16_t data = s->miiregs[reg]; 451 452 trace_sunhme_mii_read(reg, data); 453 454 return data; 455 } 456 457 static void sunhme_mif_write(void *opaque, hwaddr addr, 458 uint64_t val, unsigned size) 459 { 460 SunHMEState *s = SUNHME(opaque); 461 uint8_t cmd, reg; 462 uint16_t data; 463 464 trace_sunhme_mif_write(addr, val); 465 466 switch (addr) { 467 case HME_MIFI_CFG: 468 /* Mask the read-only bits */ 469 val &= ~(HME_MIF_CFG_MDI0 | HME_MIF_CFG_MDI1); 470 val |= s->mifregs[HME_MIFI_CFG >> 2] & 471 (HME_MIF_CFG_MDI0 | HME_MIF_CFG_MDI1); 472 break; 473 case HME_MIFI_FO: 474 /* Detect start of MII command */ 475 if ((val & HME_MIF_FO_ST) >> HME_MIF_FO_ST_SHIFT 476 != MII_COMMAND_START) { 477 val |= HME_MIF_FO_TALSB; 478 break; 479 } 480 481 /* Internal phy only */ 482 if ((val & HME_MIF_FO_PHYAD) >> HME_MIF_FO_PHYAD_SHIFT 483 != HME_PHYAD_INTERNAL) { 484 val |= HME_MIF_FO_TALSB; 485 break; 486 } 487 488 cmd = (val & HME_MIF_FO_OPC) >> HME_MIF_FO_OPC_SHIFT; 489 reg = (val & HME_MIF_FO_REGAD) >> HME_MIF_FO_REGAD_SHIFT; 490 data = (val & HME_MIF_FO_DATA); 491 492 switch (cmd) { 493 case MII_COMMAND_WRITE: 494 sunhme_mii_write(s, reg, data); 495 break; 496 497 case MII_COMMAND_READ: 498 val &= ~HME_MIF_FO_DATA; 499 val |= sunhme_mii_read(s, reg); 500 break; 501 } 502 503 val |= HME_MIF_FO_TALSB; 504 break; 505 } 506 507 s->mifregs[addr >> 2] = val; 508 } 509 510 static uint64_t sunhme_mif_read(void *opaque, hwaddr addr, 511 unsigned size) 512 { 513 SunHMEState *s = SUNHME(opaque); 514 uint64_t val; 515 516 val = s->mifregs[addr >> 2]; 517 518 switch (addr) { 519 case HME_MIFI_STAT: 520 /* Autoclear MIF interrupt status */ 521 s->mifregs[HME_MIFI_STAT >> 2] = 0; 522 sunhme_update_irq(s); 523 break; 524 } 525 526 trace_sunhme_mif_read(addr, val); 527 528 return val; 529 } 530 531 static const MemoryRegionOps sunhme_mif_ops = { 532 .read = sunhme_mif_read, 533 .write = sunhme_mif_write, 534 .endianness = DEVICE_LITTLE_ENDIAN, 535 .valid = { 536 .min_access_size = 4, 537 .max_access_size = 4, 538 }, 539 }; 540 541 static void sunhme_transmit_frame(SunHMEState *s, uint8_t *buf, int size) 542 { 543 qemu_send_packet(qemu_get_queue(s->nic), buf, size); 544 } 545 546 static inline int sunhme_get_tx_ring_count(SunHMEState *s) 547 { 548 return (s->etxregs[HME_ETXI_RSIZE >> 2] + 1) << 4; 549 } 550 551 static inline int sunhme_get_tx_ring_nr(SunHMEState *s) 552 { 553 return s->etxregs[HME_ETXI_RING >> 2] & HME_ETXI_RING_OFFSET; 554 } 555 556 static inline void sunhme_set_tx_ring_nr(SunHMEState *s, int i) 557 { 558 uint32_t ring = s->etxregs[HME_ETXI_RING >> 2] & ~HME_ETXI_RING_OFFSET; 559 ring |= i & HME_ETXI_RING_OFFSET; 560 561 s->etxregs[HME_ETXI_RING >> 2] = ring; 562 } 563 564 static void sunhme_transmit(SunHMEState *s) 565 { 566 PCIDevice *d = PCI_DEVICE(s); 567 dma_addr_t tb, addr; 568 uint32_t intstatus, status, buffer, sum = 0; 569 int cr, nr, len, xmit_pos, csum_offset = 0, csum_stuff_offset = 0; 570 uint16_t csum = 0; 571 uint8_t xmit_buffer[HME_FIFO_SIZE]; 572 573 tb = s->etxregs[HME_ETXI_RING >> 2] & HME_ETXI_RING_ADDR; 574 nr = sunhme_get_tx_ring_count(s); 575 cr = sunhme_get_tx_ring_nr(s); 576 577 pci_dma_read(d, tb + cr * HME_DESC_SIZE, &status, 4); 578 pci_dma_read(d, tb + cr * HME_DESC_SIZE + 4, &buffer, 4); 579 580 xmit_pos = 0; 581 while (status & HME_XD_OWN) { 582 trace_sunhme_tx_desc(buffer, status, cr, nr); 583 584 /* Copy data into transmit buffer */ 585 addr = buffer; 586 len = status & HME_XD_TXLENMSK; 587 588 if (xmit_pos + len > HME_FIFO_SIZE) { 589 len = HME_FIFO_SIZE - xmit_pos; 590 } 591 592 pci_dma_read(d, addr, &xmit_buffer[xmit_pos], len); 593 xmit_pos += len; 594 595 /* Detect start of packet for TX checksum */ 596 if (status & HME_XD_SOP) { 597 sum = 0; 598 csum_offset = (status & HME_XD_TXCSSTART) >> HME_XD_TXCSSTARTSHIFT; 599 csum_stuff_offset = (status & HME_XD_TXCSSTUFF) >> 600 HME_XD_TXCSSTUFFSHIFT; 601 } 602 603 if (status & HME_XD_TXCKSUM) { 604 /* Only start calculation from csum_offset */ 605 if (xmit_pos - len <= csum_offset && xmit_pos > csum_offset) { 606 sum += net_checksum_add(xmit_pos - csum_offset, 607 xmit_buffer + csum_offset); 608 trace_sunhme_tx_xsum_add(csum_offset, xmit_pos - csum_offset); 609 } else { 610 sum += net_checksum_add(len, xmit_buffer + xmit_pos - len); 611 trace_sunhme_tx_xsum_add(xmit_pos - len, len); 612 } 613 } 614 615 /* Detect end of packet for TX checksum */ 616 if (status & HME_XD_EOP) { 617 /* Stuff the checksum if required */ 618 if (status & HME_XD_TXCKSUM) { 619 csum = net_checksum_finish(sum); 620 stw_be_p(xmit_buffer + csum_stuff_offset, csum); 621 trace_sunhme_tx_xsum_stuff(csum, csum_stuff_offset); 622 } 623 624 if (s->macregs[HME_MACI_TXCFG >> 2] & HME_MAC_TXCFG_ENABLE) { 625 sunhme_transmit_frame(s, xmit_buffer, xmit_pos); 626 trace_sunhme_tx_done(xmit_pos); 627 } 628 } 629 630 /* Update status */ 631 status &= ~HME_XD_OWN; 632 pci_dma_write(d, tb + cr * HME_DESC_SIZE, &status, 4); 633 634 /* Move onto next descriptor */ 635 cr++; 636 if (cr >= nr) { 637 cr = 0; 638 } 639 sunhme_set_tx_ring_nr(s, cr); 640 641 pci_dma_read(d, tb + cr * HME_DESC_SIZE, &status, 4); 642 pci_dma_read(d, tb + cr * HME_DESC_SIZE + 4, &buffer, 4); 643 644 /* Indicate TX complete */ 645 intstatus = s->sebregs[HME_SEBI_STAT >> 2]; 646 intstatus |= HME_SEB_STAT_HOSTTOTX; 647 s->sebregs[HME_SEBI_STAT >> 2] = intstatus; 648 649 /* Autoclear TX pending */ 650 s->etxregs[HME_ETXI_PENDING >> 2] = 0; 651 652 sunhme_update_irq(s); 653 } 654 655 /* TX FIFO now clear */ 656 intstatus = s->sebregs[HME_SEBI_STAT >> 2]; 657 intstatus |= HME_SEB_STAT_TXALL; 658 s->sebregs[HME_SEBI_STAT >> 2] = intstatus; 659 sunhme_update_irq(s); 660 } 661 662 static bool sunhme_can_receive(NetClientState *nc) 663 { 664 SunHMEState *s = qemu_get_nic_opaque(nc); 665 666 return !!(s->macregs[HME_MACI_RXCFG >> 2] & HME_MAC_RXCFG_ENABLE); 667 } 668 669 static void sunhme_link_status_changed(NetClientState *nc) 670 { 671 SunHMEState *s = qemu_get_nic_opaque(nc); 672 673 if (nc->link_down) { 674 s->miiregs[MII_ANLPAR] &= ~MII_ANLPAR_TXFD; 675 s->miiregs[MII_BMSR] &= ~MII_BMSR_LINK_ST; 676 } else { 677 s->miiregs[MII_ANLPAR] |= MII_ANLPAR_TXFD; 678 s->miiregs[MII_BMSR] |= MII_BMSR_LINK_ST; 679 } 680 681 /* Exact bits unknown */ 682 s->mifregs[HME_MIFI_STAT >> 2] = 0xffff; 683 sunhme_update_irq(s); 684 } 685 686 static inline int sunhme_get_rx_ring_count(SunHMEState *s) 687 { 688 uint32_t rings = (s->erxregs[HME_ERXI_CFG >> 2] & HME_ERX_CFG_RINGSIZE) 689 >> HME_ERX_CFG_RINGSIZE_SHIFT; 690 691 switch (rings) { 692 case 0: 693 return 32; 694 case 1: 695 return 64; 696 case 2: 697 return 128; 698 case 3: 699 return 256; 700 } 701 702 return 0; 703 } 704 705 static inline int sunhme_get_rx_ring_nr(SunHMEState *s) 706 { 707 return s->erxregs[HME_ERXI_RING >> 2] & HME_ERXI_RING_OFFSET; 708 } 709 710 static inline void sunhme_set_rx_ring_nr(SunHMEState *s, int i) 711 { 712 uint32_t ring = s->erxregs[HME_ERXI_RING >> 2] & ~HME_ERXI_RING_OFFSET; 713 ring |= i & HME_ERXI_RING_OFFSET; 714 715 s->erxregs[HME_ERXI_RING >> 2] = ring; 716 } 717 718 #define MIN_BUF_SIZE 60 719 720 static ssize_t sunhme_receive(NetClientState *nc, const uint8_t *buf, 721 size_t size) 722 { 723 SunHMEState *s = qemu_get_nic_opaque(nc); 724 PCIDevice *d = PCI_DEVICE(s); 725 dma_addr_t rb, addr; 726 uint32_t intstatus, status, buffer, buffersize, sum; 727 uint16_t csum; 728 uint8_t buf1[60]; 729 int nr, cr, len, rxoffset, csum_offset; 730 731 trace_sunhme_rx_incoming(size); 732 733 /* Do nothing if MAC RX disabled */ 734 if (!(s->macregs[HME_MACI_RXCFG >> 2] & HME_MAC_RXCFG_ENABLE)) { 735 return 0; 736 } 737 738 trace_sunhme_rx_filter_destmac(buf[0], buf[1], buf[2], 739 buf[3], buf[4], buf[5]); 740 741 /* Check destination MAC address */ 742 if (!(s->macregs[HME_MACI_RXCFG >> 2] & HME_MAC_RXCFG_PMISC)) { 743 /* Try and match local MAC address */ 744 if (((s->macregs[HME_MACI_MACADDR0 >> 2] & 0xff00) >> 8) == buf[0] && 745 (s->macregs[HME_MACI_MACADDR0 >> 2] & 0xff) == buf[1] && 746 ((s->macregs[HME_MACI_MACADDR1 >> 2] & 0xff00) >> 8) == buf[2] && 747 (s->macregs[HME_MACI_MACADDR1 >> 2] & 0xff) == buf[3] && 748 ((s->macregs[HME_MACI_MACADDR2 >> 2] & 0xff00) >> 8) == buf[4] && 749 (s->macregs[HME_MACI_MACADDR2 >> 2] & 0xff) == buf[5]) { 750 /* Matched local MAC address */ 751 trace_sunhme_rx_filter_local_match(); 752 } else if (buf[0] == 0xff && buf[1] == 0xff && buf[2] == 0xff && 753 buf[3] == 0xff && buf[4] == 0xff && buf[5] == 0xff) { 754 /* Matched broadcast address */ 755 trace_sunhme_rx_filter_bcast_match(); 756 } else if (s->macregs[HME_MACI_RXCFG >> 2] & HME_MAC_RXCFG_HENABLE) { 757 /* Didn't match local address, check hash filter */ 758 int mcast_idx = net_crc32_le(buf, ETH_ALEN) >> 26; 759 if (!(s->macregs[(HME_MACI_HASHTAB0 >> 2) - (mcast_idx >> 4)] & 760 (1 << (mcast_idx & 0xf)))) { 761 /* Didn't match hash filter */ 762 trace_sunhme_rx_filter_hash_nomatch(); 763 trace_sunhme_rx_filter_reject(); 764 return -1; 765 } else { 766 trace_sunhme_rx_filter_hash_match(); 767 } 768 } else { 769 /* Not for us */ 770 trace_sunhme_rx_filter_reject(); 771 return -1; 772 } 773 } else { 774 trace_sunhme_rx_filter_promisc_match(); 775 } 776 777 trace_sunhme_rx_filter_accept(); 778 779 /* If too small buffer, then expand it */ 780 if (size < MIN_BUF_SIZE) { 781 memcpy(buf1, buf, size); 782 memset(buf1 + size, 0, MIN_BUF_SIZE - size); 783 buf = buf1; 784 size = MIN_BUF_SIZE; 785 } 786 787 rb = s->erxregs[HME_ERXI_RING >> 2] & HME_ERXI_RING_ADDR; 788 nr = sunhme_get_rx_ring_count(s); 789 cr = sunhme_get_rx_ring_nr(s); 790 791 pci_dma_read(d, rb + cr * HME_DESC_SIZE, &status, 4); 792 pci_dma_read(d, rb + cr * HME_DESC_SIZE + 4, &buffer, 4); 793 794 /* If we don't own the current descriptor then indicate overflow error */ 795 if (!(status & HME_XD_OWN)) { 796 s->sebregs[HME_SEBI_STAT >> 2] |= HME_SEB_STAT_NORXD; 797 sunhme_update_irq(s); 798 trace_sunhme_rx_norxd(); 799 return -1; 800 } 801 802 rxoffset = (s->erxregs[HME_ERXI_CFG >> 2] & HME_ERX_CFG_BYTEOFFSET) >> 803 HME_ERX_CFG_BYTEOFFSET_SHIFT; 804 805 addr = buffer + rxoffset; 806 buffersize = (status & HME_XD_RXLENMSK) >> HME_XD_RXLENSHIFT; 807 808 /* Detect receive overflow */ 809 len = size; 810 if (size > buffersize) { 811 status |= HME_XD_OFL; 812 len = buffersize; 813 } 814 815 pci_dma_write(d, addr, buf, len); 816 817 trace_sunhme_rx_desc(buffer, rxoffset, status, len, cr, nr); 818 819 /* Calculate the receive checksum */ 820 csum_offset = (s->erxregs[HME_ERXI_CFG >> 2] & HME_ERX_CFG_CSUMSTART) >> 821 HME_ERX_CFG_CSUMSHIFT << 1; 822 sum = 0; 823 sum += net_checksum_add(len - csum_offset, (uint8_t *)buf + csum_offset); 824 csum = net_checksum_finish(sum); 825 826 trace_sunhme_rx_xsum_calc(csum); 827 828 /* Update status */ 829 status &= ~HME_XD_OWN; 830 status &= ~HME_XD_RXLENMSK; 831 status |= len << HME_XD_RXLENSHIFT; 832 status &= ~HME_XD_RXCKSUM; 833 status |= csum; 834 835 pci_dma_write(d, rb + cr * HME_DESC_SIZE, &status, 4); 836 837 cr++; 838 if (cr >= nr) { 839 cr = 0; 840 } 841 842 sunhme_set_rx_ring_nr(s, cr); 843 844 /* Indicate RX complete */ 845 intstatus = s->sebregs[HME_SEBI_STAT >> 2]; 846 intstatus |= HME_SEB_STAT_RXTOHOST; 847 s->sebregs[HME_SEBI_STAT >> 2] = intstatus; 848 849 sunhme_update_irq(s); 850 851 return len; 852 } 853 854 static NetClientInfo net_sunhme_info = { 855 .type = NET_CLIENT_DRIVER_NIC, 856 .size = sizeof(NICState), 857 .can_receive = sunhme_can_receive, 858 .receive = sunhme_receive, 859 .link_status_changed = sunhme_link_status_changed, 860 }; 861 862 static void sunhme_realize(PCIDevice *pci_dev, Error **errp) 863 { 864 SunHMEState *s = SUNHME(pci_dev); 865 DeviceState *d = DEVICE(pci_dev); 866 uint8_t *pci_conf; 867 868 pci_conf = pci_dev->config; 869 pci_conf[PCI_INTERRUPT_PIN] = 1; /* interrupt pin A */ 870 871 memory_region_init(&s->hme, OBJECT(pci_dev), "sunhme", HME_REG_SIZE); 872 pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->hme); 873 874 memory_region_init_io(&s->sebreg, OBJECT(pci_dev), &sunhme_seb_ops, s, 875 "sunhme.seb", HME_SEB_REG_SIZE); 876 memory_region_add_subregion(&s->hme, 0, &s->sebreg); 877 878 memory_region_init_io(&s->etxreg, OBJECT(pci_dev), &sunhme_etx_ops, s, 879 "sunhme.etx", HME_ETX_REG_SIZE); 880 memory_region_add_subregion(&s->hme, 0x2000, &s->etxreg); 881 882 memory_region_init_io(&s->erxreg, OBJECT(pci_dev), &sunhme_erx_ops, s, 883 "sunhme.erx", HME_ERX_REG_SIZE); 884 memory_region_add_subregion(&s->hme, 0x4000, &s->erxreg); 885 886 memory_region_init_io(&s->macreg, OBJECT(pci_dev), &sunhme_mac_ops, s, 887 "sunhme.mac", HME_MAC_REG_SIZE); 888 memory_region_add_subregion(&s->hme, 0x6000, &s->macreg); 889 890 memory_region_init_io(&s->mifreg, OBJECT(pci_dev), &sunhme_mif_ops, s, 891 "sunhme.mif", HME_MIF_REG_SIZE); 892 memory_region_add_subregion(&s->hme, 0x7000, &s->mifreg); 893 894 qemu_macaddr_default_if_unset(&s->conf.macaddr); 895 s->nic = qemu_new_nic(&net_sunhme_info, &s->conf, 896 object_get_typename(OBJECT(d)), d->id, s); 897 qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); 898 } 899 900 static void sunhme_instance_init(Object *obj) 901 { 902 SunHMEState *s = SUNHME(obj); 903 904 device_add_bootindex_property(obj, &s->conf.bootindex, 905 "bootindex", "/ethernet-phy@0", 906 DEVICE(obj)); 907 } 908 909 static void sunhme_reset(DeviceState *ds) 910 { 911 SunHMEState *s = SUNHME(ds); 912 913 /* Configure internal transceiver */ 914 s->mifregs[HME_MIFI_CFG >> 2] |= HME_MIF_CFG_MDI0; 915 916 /* Advetise auto, 100Mbps FD */ 917 s->miiregs[MII_ANAR] = MII_ANAR_TXFD; 918 s->miiregs[MII_BMSR] = MII_BMSR_AUTONEG | MII_BMSR_100TX_FD | 919 MII_BMSR_AN_COMP; 920 921 if (!qemu_get_queue(s->nic)->link_down) { 922 s->miiregs[MII_ANLPAR] |= MII_ANLPAR_TXFD; 923 s->miiregs[MII_BMSR] |= MII_BMSR_LINK_ST; 924 } 925 926 /* Set manufacturer */ 927 s->miiregs[MII_PHYID1] = DP83840_PHYID1; 928 s->miiregs[MII_PHYID2] = DP83840_PHYID2; 929 930 /* Configure default interrupt mask */ 931 s->mifregs[HME_MIFI_IMASK >> 2] = 0xffff; 932 s->sebregs[HME_SEBI_IMASK >> 2] = 0xff7fffff; 933 } 934 935 static const VMStateDescription vmstate_hme = { 936 .name = "sunhme", 937 .version_id = 0, 938 .minimum_version_id = 0, 939 .fields = (VMStateField[]) { 940 VMSTATE_PCI_DEVICE(parent_obj, SunHMEState), 941 VMSTATE_MACADDR(conf.macaddr, SunHMEState), 942 VMSTATE_UINT32_ARRAY(sebregs, SunHMEState, (HME_SEB_REG_SIZE >> 2)), 943 VMSTATE_UINT32_ARRAY(etxregs, SunHMEState, (HME_ETX_REG_SIZE >> 2)), 944 VMSTATE_UINT32_ARRAY(erxregs, SunHMEState, (HME_ERX_REG_SIZE >> 2)), 945 VMSTATE_UINT32_ARRAY(macregs, SunHMEState, (HME_MAC_REG_SIZE >> 2)), 946 VMSTATE_UINT32_ARRAY(mifregs, SunHMEState, (HME_MIF_REG_SIZE >> 2)), 947 VMSTATE_UINT16_ARRAY(miiregs, SunHMEState, HME_MII_REGS_SIZE), 948 VMSTATE_END_OF_LIST() 949 } 950 }; 951 952 static void sunhme_class_init(ObjectClass *klass, void *data) 953 { 954 DeviceClass *dc = DEVICE_CLASS(klass); 955 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); 956 957 k->realize = sunhme_realize; 958 k->vendor_id = PCI_VENDOR_ID_SUN; 959 k->device_id = PCI_DEVICE_ID_SUN_HME; 960 k->class_id = PCI_CLASS_NETWORK_ETHERNET; 961 dc->vmsd = &vmstate_hme; 962 dc->reset = sunhme_reset; 963 device_class_set_props(dc, sunhme_properties); 964 set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); 965 } 966 967 static const TypeInfo sunhme_info = { 968 .name = TYPE_SUNHME, 969 .parent = TYPE_PCI_DEVICE, 970 .class_init = sunhme_class_init, 971 .instance_size = sizeof(SunHMEState), 972 .instance_init = sunhme_instance_init, 973 .interfaces = (InterfaceInfo[]) { 974 { INTERFACE_CONVENTIONAL_PCI_DEVICE }, 975 { } 976 } 977 }; 978 979 static void sunhme_register_types(void) 980 { 981 type_register_static(&sunhme_info); 982 } 983 984 type_init(sunhme_register_types) 985