1c110425dSMark Cave-Ayland /* 2c110425dSMark Cave-Ayland * QEMU Sun Happy Meal Ethernet emulation 3c110425dSMark Cave-Ayland * 4c110425dSMark Cave-Ayland * Copyright (c) 2017 Mark Cave-Ayland 5c110425dSMark Cave-Ayland * 6c110425dSMark Cave-Ayland * Permission is hereby granted, free of charge, to any person obtaining a copy 7c110425dSMark Cave-Ayland * of this software and associated documentation files (the "Software"), to deal 8c110425dSMark Cave-Ayland * in the Software without restriction, including without limitation the rights 9c110425dSMark Cave-Ayland * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10c110425dSMark Cave-Ayland * copies of the Software, and to permit persons to whom the Software is 11c110425dSMark Cave-Ayland * furnished to do so, subject to the following conditions: 12c110425dSMark Cave-Ayland * 13c110425dSMark Cave-Ayland * The above copyright notice and this permission notice shall be included in 14c110425dSMark Cave-Ayland * all copies or substantial portions of the Software. 15c110425dSMark Cave-Ayland * 16c110425dSMark Cave-Ayland * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17c110425dSMark Cave-Ayland * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18c110425dSMark Cave-Ayland * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19c110425dSMark Cave-Ayland * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20c110425dSMark Cave-Ayland * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21c110425dSMark Cave-Ayland * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22c110425dSMark Cave-Ayland * THE SOFTWARE. 23c110425dSMark Cave-Ayland */ 24c110425dSMark Cave-Ayland 25c110425dSMark Cave-Ayland #include "qemu/osdep.h" 26edf5ca5dSMarkus Armbruster #include "hw/pci/pci_device.h" 27a27bd6c7SMarkus Armbruster #include "hw/qdev-properties.h" 28d6454270SMarkus Armbruster #include "migration/vmstate.h" 29c110425dSMark Cave-Ayland #include "hw/net/mii.h" 30c110425dSMark Cave-Ayland #include "net/net.h" 310b8fa32fSMarkus Armbruster #include "qemu/module.h" 32c110425dSMark Cave-Ayland #include "net/checksum.h" 33c110425dSMark Cave-Ayland #include "net/eth.h" 34c110425dSMark Cave-Ayland #include "sysemu/sysemu.h" 35c110425dSMark Cave-Ayland #include "trace.h" 36db1015e9SEduardo Habkost #include "qom/object.h" 37c110425dSMark Cave-Ayland 38c110425dSMark Cave-Ayland #define HME_REG_SIZE 0x8000 39c110425dSMark Cave-Ayland 40c110425dSMark Cave-Ayland #define HME_SEB_REG_SIZE 0x2000 41c110425dSMark Cave-Ayland 42c110425dSMark Cave-Ayland #define HME_SEBI_RESET 0x0 43c110425dSMark Cave-Ayland #define HME_SEB_RESET_ETX 0x1 44c110425dSMark Cave-Ayland #define HME_SEB_RESET_ERX 0x2 45c110425dSMark Cave-Ayland 46c110425dSMark Cave-Ayland #define HME_SEBI_STAT 0x100 47c110425dSMark Cave-Ayland #define HME_SEBI_STAT_LINUXBUG 0x108 48c110425dSMark Cave-Ayland #define HME_SEB_STAT_RXTOHOST 0x10000 4909340f49SMark Cave-Ayland #define HME_SEB_STAT_NORXD 0x20000 50c110425dSMark Cave-Ayland #define HME_SEB_STAT_MIFIRQ 0x800000 51c110425dSMark Cave-Ayland #define HME_SEB_STAT_HOSTTOTX 0x1000000 52c110425dSMark Cave-Ayland #define HME_SEB_STAT_TXALL 0x2000000 53c110425dSMark Cave-Ayland 54c110425dSMark Cave-Ayland #define HME_SEBI_IMASK 0x104 55c110425dSMark Cave-Ayland #define HME_SEBI_IMASK_LINUXBUG 0x10c 56c110425dSMark Cave-Ayland 57c110425dSMark Cave-Ayland #define HME_ETX_REG_SIZE 0x2000 58c110425dSMark Cave-Ayland 59c110425dSMark Cave-Ayland #define HME_ETXI_PENDING 0x0 60c110425dSMark Cave-Ayland 61c110425dSMark Cave-Ayland #define HME_ETXI_RING 0x8 62c110425dSMark Cave-Ayland #define HME_ETXI_RING_ADDR 0xffffff00 63c110425dSMark Cave-Ayland #define HME_ETXI_RING_OFFSET 0xff 64c110425dSMark Cave-Ayland 65c110425dSMark Cave-Ayland #define HME_ETXI_RSIZE 0x2c 66c110425dSMark Cave-Ayland 67c110425dSMark Cave-Ayland #define HME_ERX_REG_SIZE 0x2000 68c110425dSMark Cave-Ayland 69c110425dSMark Cave-Ayland #define HME_ERXI_CFG 0x0 70c110425dSMark Cave-Ayland #define HME_ERX_CFG_RINGSIZE 0x600 71c110425dSMark Cave-Ayland #define HME_ERX_CFG_RINGSIZE_SHIFT 9 72c110425dSMark Cave-Ayland #define HME_ERX_CFG_BYTEOFFSET 0x38 73c110425dSMark Cave-Ayland #define HME_ERX_CFG_BYTEOFFSET_SHIFT 3 74c110425dSMark Cave-Ayland #define HME_ERX_CFG_CSUMSTART 0x7f0000 75c110425dSMark Cave-Ayland #define HME_ERX_CFG_CSUMSHIFT 16 76c110425dSMark Cave-Ayland 77c110425dSMark Cave-Ayland #define HME_ERXI_RING 0x4 78c110425dSMark Cave-Ayland #define HME_ERXI_RING_ADDR 0xffffff00 79c110425dSMark Cave-Ayland #define HME_ERXI_RING_OFFSET 0xff 80c110425dSMark Cave-Ayland 81c110425dSMark Cave-Ayland #define HME_MAC_REG_SIZE 0x1000 82c110425dSMark Cave-Ayland 83c110425dSMark Cave-Ayland #define HME_MACI_TXCFG 0x20c 84c110425dSMark Cave-Ayland #define HME_MAC_TXCFG_ENABLE 0x1 85c110425dSMark Cave-Ayland 86c110425dSMark Cave-Ayland #define HME_MACI_RXCFG 0x30c 87c110425dSMark Cave-Ayland #define HME_MAC_RXCFG_ENABLE 0x1 88c110425dSMark Cave-Ayland #define HME_MAC_RXCFG_PMISC 0x40 89c110425dSMark Cave-Ayland #define HME_MAC_RXCFG_HENABLE 0x800 90c110425dSMark Cave-Ayland 91c110425dSMark Cave-Ayland #define HME_MACI_MACADDR2 0x318 92c110425dSMark Cave-Ayland #define HME_MACI_MACADDR1 0x31c 93c110425dSMark Cave-Ayland #define HME_MACI_MACADDR0 0x320 94c110425dSMark Cave-Ayland 95c110425dSMark Cave-Ayland #define HME_MACI_HASHTAB3 0x340 96c110425dSMark Cave-Ayland #define HME_MACI_HASHTAB2 0x344 97c110425dSMark Cave-Ayland #define HME_MACI_HASHTAB1 0x348 98c110425dSMark Cave-Ayland #define HME_MACI_HASHTAB0 0x34c 99c110425dSMark Cave-Ayland 100c110425dSMark Cave-Ayland #define HME_MIF_REG_SIZE 0x20 101c110425dSMark Cave-Ayland 102c110425dSMark Cave-Ayland #define HME_MIFI_FO 0xc 103c110425dSMark Cave-Ayland #define HME_MIF_FO_ST 0xc0000000 104c110425dSMark Cave-Ayland #define HME_MIF_FO_ST_SHIFT 30 105c110425dSMark Cave-Ayland #define HME_MIF_FO_OPC 0x30000000 106c110425dSMark Cave-Ayland #define HME_MIF_FO_OPC_SHIFT 28 107c110425dSMark Cave-Ayland #define HME_MIF_FO_PHYAD 0x0f800000 108c110425dSMark Cave-Ayland #define HME_MIF_FO_PHYAD_SHIFT 23 109c110425dSMark Cave-Ayland #define HME_MIF_FO_REGAD 0x007c0000 110c110425dSMark Cave-Ayland #define HME_MIF_FO_REGAD_SHIFT 18 111c110425dSMark Cave-Ayland #define HME_MIF_FO_TAMSB 0x20000 112c110425dSMark Cave-Ayland #define HME_MIF_FO_TALSB 0x10000 113c110425dSMark Cave-Ayland #define HME_MIF_FO_DATA 0xffff 114c110425dSMark Cave-Ayland 115c110425dSMark Cave-Ayland #define HME_MIFI_CFG 0x10 116c110425dSMark Cave-Ayland #define HME_MIF_CFG_MDI0 0x100 117c110425dSMark Cave-Ayland #define HME_MIF_CFG_MDI1 0x200 118c110425dSMark Cave-Ayland 119c110425dSMark Cave-Ayland #define HME_MIFI_IMASK 0x14 120c110425dSMark Cave-Ayland 121c110425dSMark Cave-Ayland #define HME_MIFI_STAT 0x18 122c110425dSMark Cave-Ayland 123c110425dSMark Cave-Ayland 124c110425dSMark Cave-Ayland /* Wired HME PHY addresses */ 125c110425dSMark Cave-Ayland #define HME_PHYAD_INTERNAL 1 126c110425dSMark Cave-Ayland #define HME_PHYAD_EXTERNAL 0 127c110425dSMark Cave-Ayland 128c110425dSMark Cave-Ayland #define MII_COMMAND_START 0x1 129c110425dSMark Cave-Ayland #define MII_COMMAND_READ 0x2 130c110425dSMark Cave-Ayland #define MII_COMMAND_WRITE 0x1 131c110425dSMark Cave-Ayland 132c110425dSMark Cave-Ayland #define TYPE_SUNHME "sunhme" 1338063396bSEduardo Habkost OBJECT_DECLARE_SIMPLE_TYPE(SunHMEState, SUNHME) 134c110425dSMark Cave-Ayland 135c110425dSMark Cave-Ayland /* Maximum size of buffer */ 136c110425dSMark Cave-Ayland #define HME_FIFO_SIZE 0x800 137c110425dSMark Cave-Ayland 138c110425dSMark Cave-Ayland /* Size of TX/RX descriptor */ 139c110425dSMark Cave-Ayland #define HME_DESC_SIZE 0x8 140c110425dSMark Cave-Ayland 141c110425dSMark Cave-Ayland #define HME_XD_OWN 0x80000000 142c110425dSMark Cave-Ayland #define HME_XD_OFL 0x40000000 143c110425dSMark Cave-Ayland #define HME_XD_SOP 0x40000000 144c110425dSMark Cave-Ayland #define HME_XD_EOP 0x20000000 145c110425dSMark Cave-Ayland #define HME_XD_RXLENMSK 0x3fff0000 146c110425dSMark Cave-Ayland #define HME_XD_RXLENSHIFT 16 147c110425dSMark Cave-Ayland #define HME_XD_RXCKSUM 0xffff 148c110425dSMark Cave-Ayland #define HME_XD_TXLENMSK 0x00001fff 149c110425dSMark Cave-Ayland #define HME_XD_TXCKSUM 0x10000000 150c110425dSMark Cave-Ayland #define HME_XD_TXCSSTUFF 0xff00000 151c110425dSMark Cave-Ayland #define HME_XD_TXCSSTUFFSHIFT 20 152c110425dSMark Cave-Ayland #define HME_XD_TXCSSTART 0xfc000 153c110425dSMark Cave-Ayland #define HME_XD_TXCSSTARTSHIFT 14 154c110425dSMark Cave-Ayland 155c110425dSMark Cave-Ayland #define HME_MII_REGS_SIZE 0x20 156c110425dSMark Cave-Ayland 157db1015e9SEduardo Habkost struct SunHMEState { 158c110425dSMark Cave-Ayland /*< private >*/ 159c110425dSMark Cave-Ayland PCIDevice parent_obj; 160c110425dSMark Cave-Ayland 161c110425dSMark Cave-Ayland NICState *nic; 162c110425dSMark Cave-Ayland NICConf conf; 163c110425dSMark Cave-Ayland 164c110425dSMark Cave-Ayland MemoryRegion hme; 165c110425dSMark Cave-Ayland MemoryRegion sebreg; 166c110425dSMark Cave-Ayland MemoryRegion etxreg; 167c110425dSMark Cave-Ayland MemoryRegion erxreg; 168c110425dSMark Cave-Ayland MemoryRegion macreg; 169c110425dSMark Cave-Ayland MemoryRegion mifreg; 170c110425dSMark Cave-Ayland 171c110425dSMark Cave-Ayland uint32_t sebregs[HME_SEB_REG_SIZE >> 2]; 172c110425dSMark Cave-Ayland uint32_t etxregs[HME_ETX_REG_SIZE >> 2]; 173c110425dSMark Cave-Ayland uint32_t erxregs[HME_ERX_REG_SIZE >> 2]; 174c110425dSMark Cave-Ayland uint32_t macregs[HME_MAC_REG_SIZE >> 2]; 175c110425dSMark Cave-Ayland uint32_t mifregs[HME_MIF_REG_SIZE >> 2]; 176c110425dSMark Cave-Ayland 177c110425dSMark Cave-Ayland uint16_t miiregs[HME_MII_REGS_SIZE]; 178db1015e9SEduardo Habkost }; 179c110425dSMark Cave-Ayland 180*e732f00fSRichard Henderson static const Property sunhme_properties[] = { 181c110425dSMark Cave-Ayland DEFINE_NIC_PROPERTIES(SunHMEState, conf), 182c110425dSMark Cave-Ayland DEFINE_PROP_END_OF_LIST(), 183c110425dSMark Cave-Ayland }; 184c110425dSMark Cave-Ayland 185c110425dSMark Cave-Ayland static void sunhme_reset_tx(SunHMEState *s) 186c110425dSMark Cave-Ayland { 187c110425dSMark Cave-Ayland /* Indicate TX reset complete */ 188c110425dSMark Cave-Ayland s->sebregs[HME_SEBI_RESET] &= ~HME_SEB_RESET_ETX; 189c110425dSMark Cave-Ayland } 190c110425dSMark Cave-Ayland 191c110425dSMark Cave-Ayland static void sunhme_reset_rx(SunHMEState *s) 192c110425dSMark Cave-Ayland { 193c110425dSMark Cave-Ayland /* Indicate RX reset complete */ 194c110425dSMark Cave-Ayland s->sebregs[HME_SEBI_RESET] &= ~HME_SEB_RESET_ERX; 195c110425dSMark Cave-Ayland } 196c110425dSMark Cave-Ayland 197c110425dSMark Cave-Ayland static void sunhme_update_irq(SunHMEState *s) 198c110425dSMark Cave-Ayland { 199c110425dSMark Cave-Ayland PCIDevice *d = PCI_DEVICE(s); 200c110425dSMark Cave-Ayland int level; 201c110425dSMark Cave-Ayland 202c110425dSMark Cave-Ayland /* MIF interrupt mask (16-bit) */ 203c110425dSMark Cave-Ayland uint32_t mifmask = ~(s->mifregs[HME_MIFI_IMASK >> 2]) & 0xffff; 204c110425dSMark Cave-Ayland uint32_t mif = s->mifregs[HME_MIFI_STAT >> 2] & mifmask; 205c110425dSMark Cave-Ayland 206c110425dSMark Cave-Ayland /* Main SEB interrupt mask (include MIF status from above) */ 207c110425dSMark Cave-Ayland uint32_t sebmask = ~(s->sebregs[HME_SEBI_IMASK >> 2]) & 208c110425dSMark Cave-Ayland ~HME_SEB_STAT_MIFIRQ; 209c110425dSMark Cave-Ayland uint32_t seb = s->sebregs[HME_SEBI_STAT >> 2] & sebmask; 210c110425dSMark Cave-Ayland if (mif) { 211c110425dSMark Cave-Ayland seb |= HME_SEB_STAT_MIFIRQ; 212c110425dSMark Cave-Ayland } 213c110425dSMark Cave-Ayland 214c110425dSMark Cave-Ayland level = (seb ? 1 : 0); 2156bdc3707SMark Cave-Ayland trace_sunhme_update_irq(mifmask, mif, sebmask, seb, level); 2166bdc3707SMark Cave-Ayland 217c110425dSMark Cave-Ayland pci_set_irq(d, level); 218c110425dSMark Cave-Ayland } 219c110425dSMark Cave-Ayland 220c110425dSMark Cave-Ayland static void sunhme_seb_write(void *opaque, hwaddr addr, 221c110425dSMark Cave-Ayland uint64_t val, unsigned size) 222c110425dSMark Cave-Ayland { 223c110425dSMark Cave-Ayland SunHMEState *s = SUNHME(opaque); 224c110425dSMark Cave-Ayland 225c110425dSMark Cave-Ayland trace_sunhme_seb_write(addr, val); 226c110425dSMark Cave-Ayland 227c110425dSMark Cave-Ayland /* Handly buggy Linux drivers before 4.13 which have 228c110425dSMark Cave-Ayland the wrong offsets for HME_SEBI_STAT and HME_SEBI_IMASK */ 229c110425dSMark Cave-Ayland switch (addr) { 230c110425dSMark Cave-Ayland case HME_SEBI_STAT_LINUXBUG: 231c110425dSMark Cave-Ayland addr = HME_SEBI_STAT; 232c110425dSMark Cave-Ayland break; 233c110425dSMark Cave-Ayland case HME_SEBI_IMASK_LINUXBUG: 234c110425dSMark Cave-Ayland addr = HME_SEBI_IMASK; 235c110425dSMark Cave-Ayland break; 236c110425dSMark Cave-Ayland default: 237c110425dSMark Cave-Ayland break; 238c110425dSMark Cave-Ayland } 239c110425dSMark Cave-Ayland 240c110425dSMark Cave-Ayland switch (addr) { 241c110425dSMark Cave-Ayland case HME_SEBI_RESET: 242c110425dSMark Cave-Ayland if (val & HME_SEB_RESET_ETX) { 243c110425dSMark Cave-Ayland sunhme_reset_tx(s); 244c110425dSMark Cave-Ayland } 245c110425dSMark Cave-Ayland if (val & HME_SEB_RESET_ERX) { 246c110425dSMark Cave-Ayland sunhme_reset_rx(s); 247c110425dSMark Cave-Ayland } 248c110425dSMark Cave-Ayland val = s->sebregs[HME_SEBI_RESET >> 2]; 249c110425dSMark Cave-Ayland break; 250c110425dSMark Cave-Ayland } 251c110425dSMark Cave-Ayland 252c110425dSMark Cave-Ayland s->sebregs[addr >> 2] = val; 253c110425dSMark Cave-Ayland } 254c110425dSMark Cave-Ayland 255c110425dSMark Cave-Ayland static uint64_t sunhme_seb_read(void *opaque, hwaddr addr, 256c110425dSMark Cave-Ayland unsigned size) 257c110425dSMark Cave-Ayland { 258c110425dSMark Cave-Ayland SunHMEState *s = SUNHME(opaque); 259c110425dSMark Cave-Ayland uint64_t val; 260c110425dSMark Cave-Ayland 261c110425dSMark Cave-Ayland /* Handly buggy Linux drivers before 4.13 which have 262c110425dSMark Cave-Ayland the wrong offsets for HME_SEBI_STAT and HME_SEBI_IMASK */ 263c110425dSMark Cave-Ayland switch (addr) { 264c110425dSMark Cave-Ayland case HME_SEBI_STAT_LINUXBUG: 265c110425dSMark Cave-Ayland addr = HME_SEBI_STAT; 266c110425dSMark Cave-Ayland break; 267c110425dSMark Cave-Ayland case HME_SEBI_IMASK_LINUXBUG: 268c110425dSMark Cave-Ayland addr = HME_SEBI_IMASK; 269c110425dSMark Cave-Ayland break; 270c110425dSMark Cave-Ayland default: 271c110425dSMark Cave-Ayland break; 272c110425dSMark Cave-Ayland } 273c110425dSMark Cave-Ayland 274c110425dSMark Cave-Ayland val = s->sebregs[addr >> 2]; 275c110425dSMark Cave-Ayland 276c110425dSMark Cave-Ayland switch (addr) { 277c110425dSMark Cave-Ayland case HME_SEBI_STAT: 278c110425dSMark Cave-Ayland /* Autoclear status (except MIF) */ 279c110425dSMark Cave-Ayland s->sebregs[HME_SEBI_STAT >> 2] &= HME_SEB_STAT_MIFIRQ; 280c110425dSMark Cave-Ayland sunhme_update_irq(s); 281c110425dSMark Cave-Ayland break; 282c110425dSMark Cave-Ayland } 283c110425dSMark Cave-Ayland 284c110425dSMark Cave-Ayland trace_sunhme_seb_read(addr, val); 285c110425dSMark Cave-Ayland 286c110425dSMark Cave-Ayland return val; 287c110425dSMark Cave-Ayland } 288c110425dSMark Cave-Ayland 289c110425dSMark Cave-Ayland static const MemoryRegionOps sunhme_seb_ops = { 290c110425dSMark Cave-Ayland .read = sunhme_seb_read, 291c110425dSMark Cave-Ayland .write = sunhme_seb_write, 292c110425dSMark Cave-Ayland .endianness = DEVICE_LITTLE_ENDIAN, 293c110425dSMark Cave-Ayland .valid = { 294c110425dSMark Cave-Ayland .min_access_size = 4, 295c110425dSMark Cave-Ayland .max_access_size = 4, 296c110425dSMark Cave-Ayland }, 297c110425dSMark Cave-Ayland }; 298c110425dSMark Cave-Ayland 299c110425dSMark Cave-Ayland static void sunhme_transmit(SunHMEState *s); 300c110425dSMark Cave-Ayland 301c110425dSMark Cave-Ayland static void sunhme_etx_write(void *opaque, hwaddr addr, 302c110425dSMark Cave-Ayland uint64_t val, unsigned size) 303c110425dSMark Cave-Ayland { 304c110425dSMark Cave-Ayland SunHMEState *s = SUNHME(opaque); 305c110425dSMark Cave-Ayland 306c110425dSMark Cave-Ayland trace_sunhme_etx_write(addr, val); 307c110425dSMark Cave-Ayland 308c110425dSMark Cave-Ayland switch (addr) { 309c110425dSMark Cave-Ayland case HME_ETXI_PENDING: 310c110425dSMark Cave-Ayland if (val) { 311c110425dSMark Cave-Ayland sunhme_transmit(s); 312c110425dSMark Cave-Ayland } 313c110425dSMark Cave-Ayland break; 314c110425dSMark Cave-Ayland } 315c110425dSMark Cave-Ayland 316c110425dSMark Cave-Ayland s->etxregs[addr >> 2] = val; 317c110425dSMark Cave-Ayland } 318c110425dSMark Cave-Ayland 319c110425dSMark Cave-Ayland static uint64_t sunhme_etx_read(void *opaque, hwaddr addr, 320c110425dSMark Cave-Ayland unsigned size) 321c110425dSMark Cave-Ayland { 322c110425dSMark Cave-Ayland SunHMEState *s = SUNHME(opaque); 323c110425dSMark Cave-Ayland uint64_t val; 324c110425dSMark Cave-Ayland 325c110425dSMark Cave-Ayland val = s->etxregs[addr >> 2]; 326c110425dSMark Cave-Ayland 327c110425dSMark Cave-Ayland trace_sunhme_etx_read(addr, val); 328c110425dSMark Cave-Ayland 329c110425dSMark Cave-Ayland return val; 330c110425dSMark Cave-Ayland } 331c110425dSMark Cave-Ayland 332c110425dSMark Cave-Ayland static const MemoryRegionOps sunhme_etx_ops = { 333c110425dSMark Cave-Ayland .read = sunhme_etx_read, 334c110425dSMark Cave-Ayland .write = sunhme_etx_write, 335c110425dSMark Cave-Ayland .endianness = DEVICE_LITTLE_ENDIAN, 336c110425dSMark Cave-Ayland .valid = { 337c110425dSMark Cave-Ayland .min_access_size = 4, 338c110425dSMark Cave-Ayland .max_access_size = 4, 339c110425dSMark Cave-Ayland }, 340c110425dSMark Cave-Ayland }; 341c110425dSMark Cave-Ayland 342c110425dSMark Cave-Ayland static void sunhme_erx_write(void *opaque, hwaddr addr, 343c110425dSMark Cave-Ayland uint64_t val, unsigned size) 344c110425dSMark Cave-Ayland { 345c110425dSMark Cave-Ayland SunHMEState *s = SUNHME(opaque); 346c110425dSMark Cave-Ayland 347c110425dSMark Cave-Ayland trace_sunhme_erx_write(addr, val); 348c110425dSMark Cave-Ayland 349c110425dSMark Cave-Ayland s->erxregs[addr >> 2] = val; 350c110425dSMark Cave-Ayland } 351c110425dSMark Cave-Ayland 352c110425dSMark Cave-Ayland static uint64_t sunhme_erx_read(void *opaque, hwaddr addr, 353c110425dSMark Cave-Ayland unsigned size) 354c110425dSMark Cave-Ayland { 355c110425dSMark Cave-Ayland SunHMEState *s = SUNHME(opaque); 356c110425dSMark Cave-Ayland uint64_t val; 357c110425dSMark Cave-Ayland 358c110425dSMark Cave-Ayland val = s->erxregs[addr >> 2]; 359c110425dSMark Cave-Ayland 360c110425dSMark Cave-Ayland trace_sunhme_erx_read(addr, val); 361c110425dSMark Cave-Ayland 362c110425dSMark Cave-Ayland return val; 363c110425dSMark Cave-Ayland } 364c110425dSMark Cave-Ayland 365c110425dSMark Cave-Ayland static const MemoryRegionOps sunhme_erx_ops = { 366c110425dSMark Cave-Ayland .read = sunhme_erx_read, 367c110425dSMark Cave-Ayland .write = sunhme_erx_write, 368c110425dSMark Cave-Ayland .endianness = DEVICE_LITTLE_ENDIAN, 369c110425dSMark Cave-Ayland .valid = { 370c110425dSMark Cave-Ayland .min_access_size = 4, 371c110425dSMark Cave-Ayland .max_access_size = 4, 372c110425dSMark Cave-Ayland }, 373c110425dSMark Cave-Ayland }; 374c110425dSMark Cave-Ayland 375c110425dSMark Cave-Ayland static void sunhme_mac_write(void *opaque, hwaddr addr, 376c110425dSMark Cave-Ayland uint64_t val, unsigned size) 377c110425dSMark Cave-Ayland { 378c110425dSMark Cave-Ayland SunHMEState *s = SUNHME(opaque); 3791058e1a3SMark Cave-Ayland uint64_t oldval = s->macregs[addr >> 2]; 380c110425dSMark Cave-Ayland 381c110425dSMark Cave-Ayland trace_sunhme_mac_write(addr, val); 382c110425dSMark Cave-Ayland 383c110425dSMark Cave-Ayland s->macregs[addr >> 2] = val; 3841058e1a3SMark Cave-Ayland 3851058e1a3SMark Cave-Ayland switch (addr) { 3861058e1a3SMark Cave-Ayland case HME_MACI_RXCFG: 3871058e1a3SMark Cave-Ayland if (!(oldval & HME_MAC_RXCFG_ENABLE) && 3881058e1a3SMark Cave-Ayland (val & HME_MAC_RXCFG_ENABLE)) { 3891058e1a3SMark Cave-Ayland qemu_flush_queued_packets(qemu_get_queue(s->nic)); 3901058e1a3SMark Cave-Ayland } 3911058e1a3SMark Cave-Ayland break; 3921058e1a3SMark Cave-Ayland } 393c110425dSMark Cave-Ayland } 394c110425dSMark Cave-Ayland 395c110425dSMark Cave-Ayland static uint64_t sunhme_mac_read(void *opaque, hwaddr addr, 396c110425dSMark Cave-Ayland unsigned size) 397c110425dSMark Cave-Ayland { 398c110425dSMark Cave-Ayland SunHMEState *s = SUNHME(opaque); 399c110425dSMark Cave-Ayland uint64_t val; 400c110425dSMark Cave-Ayland 401c110425dSMark Cave-Ayland val = s->macregs[addr >> 2]; 402c110425dSMark Cave-Ayland 403c110425dSMark Cave-Ayland trace_sunhme_mac_read(addr, val); 404c110425dSMark Cave-Ayland 405c110425dSMark Cave-Ayland return val; 406c110425dSMark Cave-Ayland } 407c110425dSMark Cave-Ayland 408c110425dSMark Cave-Ayland static const MemoryRegionOps sunhme_mac_ops = { 409c110425dSMark Cave-Ayland .read = sunhme_mac_read, 410c110425dSMark Cave-Ayland .write = sunhme_mac_write, 411c110425dSMark Cave-Ayland .endianness = DEVICE_LITTLE_ENDIAN, 412c110425dSMark Cave-Ayland .valid = { 413c110425dSMark Cave-Ayland .min_access_size = 4, 414c110425dSMark Cave-Ayland .max_access_size = 4, 415c110425dSMark Cave-Ayland }, 416c110425dSMark Cave-Ayland }; 417c110425dSMark Cave-Ayland 418c110425dSMark Cave-Ayland static void sunhme_mii_write(SunHMEState *s, uint8_t reg, uint16_t data) 419c110425dSMark Cave-Ayland { 420c110425dSMark Cave-Ayland trace_sunhme_mii_write(reg, data); 421c110425dSMark Cave-Ayland 422c110425dSMark Cave-Ayland switch (reg) { 423c110425dSMark Cave-Ayland case MII_BMCR: 424c110425dSMark Cave-Ayland if (data & MII_BMCR_RESET) { 425c110425dSMark Cave-Ayland /* Autoclear reset bit, enable auto negotiation */ 426c110425dSMark Cave-Ayland data &= ~MII_BMCR_RESET; 427c110425dSMark Cave-Ayland data |= MII_BMCR_AUTOEN; 428c110425dSMark Cave-Ayland } 429c110425dSMark Cave-Ayland if (data & MII_BMCR_ANRESTART) { 430c110425dSMark Cave-Ayland /* Autoclear auto negotiation restart */ 431c110425dSMark Cave-Ayland data &= ~MII_BMCR_ANRESTART; 432c110425dSMark Cave-Ayland 433c110425dSMark Cave-Ayland /* Indicate negotiation complete */ 434c110425dSMark Cave-Ayland s->miiregs[MII_BMSR] |= MII_BMSR_AN_COMP; 435c110425dSMark Cave-Ayland 436c110425dSMark Cave-Ayland if (!qemu_get_queue(s->nic)->link_down) { 437c110425dSMark Cave-Ayland s->miiregs[MII_ANLPAR] |= MII_ANLPAR_TXFD; 438c110425dSMark Cave-Ayland s->miiregs[MII_BMSR] |= MII_BMSR_LINK_ST; 439c110425dSMark Cave-Ayland } 440c110425dSMark Cave-Ayland } 441c110425dSMark Cave-Ayland break; 442c110425dSMark Cave-Ayland } 443c110425dSMark Cave-Ayland 444c110425dSMark Cave-Ayland s->miiregs[reg] = data; 445c110425dSMark Cave-Ayland } 446c110425dSMark Cave-Ayland 447c110425dSMark Cave-Ayland static uint16_t sunhme_mii_read(SunHMEState *s, uint8_t reg) 448c110425dSMark Cave-Ayland { 449c110425dSMark Cave-Ayland uint16_t data = s->miiregs[reg]; 450c110425dSMark Cave-Ayland 451c110425dSMark Cave-Ayland trace_sunhme_mii_read(reg, data); 452c110425dSMark Cave-Ayland 453c110425dSMark Cave-Ayland return data; 454c110425dSMark Cave-Ayland } 455c110425dSMark Cave-Ayland 456c110425dSMark Cave-Ayland static void sunhme_mif_write(void *opaque, hwaddr addr, 457c110425dSMark Cave-Ayland uint64_t val, unsigned size) 458c110425dSMark Cave-Ayland { 459c110425dSMark Cave-Ayland SunHMEState *s = SUNHME(opaque); 460c110425dSMark Cave-Ayland uint8_t cmd, reg; 461c110425dSMark Cave-Ayland uint16_t data; 462c110425dSMark Cave-Ayland 463c110425dSMark Cave-Ayland trace_sunhme_mif_write(addr, val); 464c110425dSMark Cave-Ayland 465c110425dSMark Cave-Ayland switch (addr) { 466c110425dSMark Cave-Ayland case HME_MIFI_CFG: 467c110425dSMark Cave-Ayland /* Mask the read-only bits */ 468c110425dSMark Cave-Ayland val &= ~(HME_MIF_CFG_MDI0 | HME_MIF_CFG_MDI1); 469c110425dSMark Cave-Ayland val |= s->mifregs[HME_MIFI_CFG >> 2] & 470c110425dSMark Cave-Ayland (HME_MIF_CFG_MDI0 | HME_MIF_CFG_MDI1); 471c110425dSMark Cave-Ayland break; 472c110425dSMark Cave-Ayland case HME_MIFI_FO: 473c110425dSMark Cave-Ayland /* Detect start of MII command */ 474c110425dSMark Cave-Ayland if ((val & HME_MIF_FO_ST) >> HME_MIF_FO_ST_SHIFT 475c110425dSMark Cave-Ayland != MII_COMMAND_START) { 476c110425dSMark Cave-Ayland val |= HME_MIF_FO_TALSB; 477c110425dSMark Cave-Ayland break; 478c110425dSMark Cave-Ayland } 479c110425dSMark Cave-Ayland 480c110425dSMark Cave-Ayland /* Internal phy only */ 481c110425dSMark Cave-Ayland if ((val & HME_MIF_FO_PHYAD) >> HME_MIF_FO_PHYAD_SHIFT 482c110425dSMark Cave-Ayland != HME_PHYAD_INTERNAL) { 483c110425dSMark Cave-Ayland val |= HME_MIF_FO_TALSB; 484c110425dSMark Cave-Ayland break; 485c110425dSMark Cave-Ayland } 486c110425dSMark Cave-Ayland 487c110425dSMark Cave-Ayland cmd = (val & HME_MIF_FO_OPC) >> HME_MIF_FO_OPC_SHIFT; 488c110425dSMark Cave-Ayland reg = (val & HME_MIF_FO_REGAD) >> HME_MIF_FO_REGAD_SHIFT; 489c110425dSMark Cave-Ayland data = (val & HME_MIF_FO_DATA); 490c110425dSMark Cave-Ayland 491c110425dSMark Cave-Ayland switch (cmd) { 492c110425dSMark Cave-Ayland case MII_COMMAND_WRITE: 493c110425dSMark Cave-Ayland sunhme_mii_write(s, reg, data); 494c110425dSMark Cave-Ayland break; 495c110425dSMark Cave-Ayland 496c110425dSMark Cave-Ayland case MII_COMMAND_READ: 497c110425dSMark Cave-Ayland val &= ~HME_MIF_FO_DATA; 498c110425dSMark Cave-Ayland val |= sunhme_mii_read(s, reg); 499c110425dSMark Cave-Ayland break; 500c110425dSMark Cave-Ayland } 501c110425dSMark Cave-Ayland 502c110425dSMark Cave-Ayland val |= HME_MIF_FO_TALSB; 503c110425dSMark Cave-Ayland break; 504c110425dSMark Cave-Ayland } 505c110425dSMark Cave-Ayland 506c110425dSMark Cave-Ayland s->mifregs[addr >> 2] = val; 507c110425dSMark Cave-Ayland } 508c110425dSMark Cave-Ayland 509c110425dSMark Cave-Ayland static uint64_t sunhme_mif_read(void *opaque, hwaddr addr, 510c110425dSMark Cave-Ayland unsigned size) 511c110425dSMark Cave-Ayland { 512c110425dSMark Cave-Ayland SunHMEState *s = SUNHME(opaque); 513c110425dSMark Cave-Ayland uint64_t val; 514c110425dSMark Cave-Ayland 515c110425dSMark Cave-Ayland val = s->mifregs[addr >> 2]; 516c110425dSMark Cave-Ayland 517c110425dSMark Cave-Ayland switch (addr) { 518c110425dSMark Cave-Ayland case HME_MIFI_STAT: 519c110425dSMark Cave-Ayland /* Autoclear MIF interrupt status */ 520c110425dSMark Cave-Ayland s->mifregs[HME_MIFI_STAT >> 2] = 0; 521c110425dSMark Cave-Ayland sunhme_update_irq(s); 522c110425dSMark Cave-Ayland break; 523c110425dSMark Cave-Ayland } 524c110425dSMark Cave-Ayland 525c110425dSMark Cave-Ayland trace_sunhme_mif_read(addr, val); 526c110425dSMark Cave-Ayland 527c110425dSMark Cave-Ayland return val; 528c110425dSMark Cave-Ayland } 529c110425dSMark Cave-Ayland 530c110425dSMark Cave-Ayland static const MemoryRegionOps sunhme_mif_ops = { 531c110425dSMark Cave-Ayland .read = sunhme_mif_read, 532c110425dSMark Cave-Ayland .write = sunhme_mif_write, 533c110425dSMark Cave-Ayland .endianness = DEVICE_LITTLE_ENDIAN, 534c110425dSMark Cave-Ayland .valid = { 535c110425dSMark Cave-Ayland .min_access_size = 4, 536c110425dSMark Cave-Ayland .max_access_size = 4, 537c110425dSMark Cave-Ayland }, 538c110425dSMark Cave-Ayland }; 539c110425dSMark Cave-Ayland 540c110425dSMark Cave-Ayland static void sunhme_transmit_frame(SunHMEState *s, uint8_t *buf, int size) 541c110425dSMark Cave-Ayland { 542c110425dSMark Cave-Ayland qemu_send_packet(qemu_get_queue(s->nic), buf, size); 543c110425dSMark Cave-Ayland } 544c110425dSMark Cave-Ayland 545c110425dSMark Cave-Ayland static inline int sunhme_get_tx_ring_count(SunHMEState *s) 546c110425dSMark Cave-Ayland { 547c110425dSMark Cave-Ayland return (s->etxregs[HME_ETXI_RSIZE >> 2] + 1) << 4; 548c110425dSMark Cave-Ayland } 549c110425dSMark Cave-Ayland 550c110425dSMark Cave-Ayland static inline int sunhme_get_tx_ring_nr(SunHMEState *s) 551c110425dSMark Cave-Ayland { 552c110425dSMark Cave-Ayland return s->etxregs[HME_ETXI_RING >> 2] & HME_ETXI_RING_OFFSET; 553c110425dSMark Cave-Ayland } 554c110425dSMark Cave-Ayland 555c110425dSMark Cave-Ayland static inline void sunhme_set_tx_ring_nr(SunHMEState *s, int i) 556c110425dSMark Cave-Ayland { 557c110425dSMark Cave-Ayland uint32_t ring = s->etxregs[HME_ETXI_RING >> 2] & ~HME_ETXI_RING_OFFSET; 558c110425dSMark Cave-Ayland ring |= i & HME_ETXI_RING_OFFSET; 559c110425dSMark Cave-Ayland 560c110425dSMark Cave-Ayland s->etxregs[HME_ETXI_RING >> 2] = ring; 561c110425dSMark Cave-Ayland } 562c110425dSMark Cave-Ayland 563c110425dSMark Cave-Ayland static void sunhme_transmit(SunHMEState *s) 564c110425dSMark Cave-Ayland { 565c110425dSMark Cave-Ayland PCIDevice *d = PCI_DEVICE(s); 566c110425dSMark Cave-Ayland dma_addr_t tb, addr; 567c110425dSMark Cave-Ayland uint32_t intstatus, status, buffer, sum = 0; 568c110425dSMark Cave-Ayland int cr, nr, len, xmit_pos, csum_offset = 0, csum_stuff_offset = 0; 569c110425dSMark Cave-Ayland uint16_t csum = 0; 570c110425dSMark Cave-Ayland uint8_t xmit_buffer[HME_FIFO_SIZE]; 571c110425dSMark Cave-Ayland 572c110425dSMark Cave-Ayland tb = s->etxregs[HME_ETXI_RING >> 2] & HME_ETXI_RING_ADDR; 573c110425dSMark Cave-Ayland nr = sunhme_get_tx_ring_count(s); 574c110425dSMark Cave-Ayland cr = sunhme_get_tx_ring_nr(s); 575c110425dSMark Cave-Ayland 576c110425dSMark Cave-Ayland pci_dma_read(d, tb + cr * HME_DESC_SIZE, &status, 4); 577c110425dSMark Cave-Ayland pci_dma_read(d, tb + cr * HME_DESC_SIZE + 4, &buffer, 4); 578c110425dSMark Cave-Ayland 579c110425dSMark Cave-Ayland xmit_pos = 0; 580c110425dSMark Cave-Ayland while (status & HME_XD_OWN) { 581c110425dSMark Cave-Ayland trace_sunhme_tx_desc(buffer, status, cr, nr); 582c110425dSMark Cave-Ayland 583c110425dSMark Cave-Ayland /* Copy data into transmit buffer */ 584c110425dSMark Cave-Ayland addr = buffer; 585c110425dSMark Cave-Ayland len = status & HME_XD_TXLENMSK; 586c110425dSMark Cave-Ayland 587c110425dSMark Cave-Ayland if (xmit_pos + len > HME_FIFO_SIZE) { 588c110425dSMark Cave-Ayland len = HME_FIFO_SIZE - xmit_pos; 589c110425dSMark Cave-Ayland } 590c110425dSMark Cave-Ayland 591c110425dSMark Cave-Ayland pci_dma_read(d, addr, &xmit_buffer[xmit_pos], len); 592c110425dSMark Cave-Ayland xmit_pos += len; 593c110425dSMark Cave-Ayland 594c110425dSMark Cave-Ayland /* Detect start of packet for TX checksum */ 595c110425dSMark Cave-Ayland if (status & HME_XD_SOP) { 596c110425dSMark Cave-Ayland sum = 0; 597c110425dSMark Cave-Ayland csum_offset = (status & HME_XD_TXCSSTART) >> HME_XD_TXCSSTARTSHIFT; 598c110425dSMark Cave-Ayland csum_stuff_offset = (status & HME_XD_TXCSSTUFF) >> 599c110425dSMark Cave-Ayland HME_XD_TXCSSTUFFSHIFT; 600c110425dSMark Cave-Ayland } 601c110425dSMark Cave-Ayland 602c110425dSMark Cave-Ayland if (status & HME_XD_TXCKSUM) { 603c110425dSMark Cave-Ayland /* Only start calculation from csum_offset */ 604c110425dSMark Cave-Ayland if (xmit_pos - len <= csum_offset && xmit_pos > csum_offset) { 605c110425dSMark Cave-Ayland sum += net_checksum_add(xmit_pos - csum_offset, 606c110425dSMark Cave-Ayland xmit_buffer + csum_offset); 607c110425dSMark Cave-Ayland trace_sunhme_tx_xsum_add(csum_offset, xmit_pos - csum_offset); 608c110425dSMark Cave-Ayland } else { 609c110425dSMark Cave-Ayland sum += net_checksum_add(len, xmit_buffer + xmit_pos - len); 610c110425dSMark Cave-Ayland trace_sunhme_tx_xsum_add(xmit_pos - len, len); 611c110425dSMark Cave-Ayland } 612c110425dSMark Cave-Ayland } 613c110425dSMark Cave-Ayland 614c110425dSMark Cave-Ayland /* Detect end of packet for TX checksum */ 615c110425dSMark Cave-Ayland if (status & HME_XD_EOP) { 616c110425dSMark Cave-Ayland /* Stuff the checksum if required */ 617c110425dSMark Cave-Ayland if (status & HME_XD_TXCKSUM) { 618c110425dSMark Cave-Ayland csum = net_checksum_finish(sum); 619c110425dSMark Cave-Ayland stw_be_p(xmit_buffer + csum_stuff_offset, csum); 620c110425dSMark Cave-Ayland trace_sunhme_tx_xsum_stuff(csum, csum_stuff_offset); 621c110425dSMark Cave-Ayland } 622c110425dSMark Cave-Ayland 623c110425dSMark Cave-Ayland if (s->macregs[HME_MACI_TXCFG >> 2] & HME_MAC_TXCFG_ENABLE) { 624c110425dSMark Cave-Ayland sunhme_transmit_frame(s, xmit_buffer, xmit_pos); 625c110425dSMark Cave-Ayland trace_sunhme_tx_done(xmit_pos); 626c110425dSMark Cave-Ayland } 627c110425dSMark Cave-Ayland } 628c110425dSMark Cave-Ayland 629c110425dSMark Cave-Ayland /* Update status */ 630c110425dSMark Cave-Ayland status &= ~HME_XD_OWN; 631c110425dSMark Cave-Ayland pci_dma_write(d, tb + cr * HME_DESC_SIZE, &status, 4); 632c110425dSMark Cave-Ayland 633c110425dSMark Cave-Ayland /* Move onto next descriptor */ 634c110425dSMark Cave-Ayland cr++; 635c110425dSMark Cave-Ayland if (cr >= nr) { 636c110425dSMark Cave-Ayland cr = 0; 637c110425dSMark Cave-Ayland } 638c110425dSMark Cave-Ayland sunhme_set_tx_ring_nr(s, cr); 639c110425dSMark Cave-Ayland 640c110425dSMark Cave-Ayland pci_dma_read(d, tb + cr * HME_DESC_SIZE, &status, 4); 641c110425dSMark Cave-Ayland pci_dma_read(d, tb + cr * HME_DESC_SIZE + 4, &buffer, 4); 642c110425dSMark Cave-Ayland 643c110425dSMark Cave-Ayland /* Indicate TX complete */ 644c110425dSMark Cave-Ayland intstatus = s->sebregs[HME_SEBI_STAT >> 2]; 645c110425dSMark Cave-Ayland intstatus |= HME_SEB_STAT_HOSTTOTX; 646c110425dSMark Cave-Ayland s->sebregs[HME_SEBI_STAT >> 2] = intstatus; 647c110425dSMark Cave-Ayland 648c110425dSMark Cave-Ayland /* Autoclear TX pending */ 649c110425dSMark Cave-Ayland s->etxregs[HME_ETXI_PENDING >> 2] = 0; 650c110425dSMark Cave-Ayland 651c110425dSMark Cave-Ayland sunhme_update_irq(s); 652c110425dSMark Cave-Ayland } 653c110425dSMark Cave-Ayland 654c110425dSMark Cave-Ayland /* TX FIFO now clear */ 655c110425dSMark Cave-Ayland intstatus = s->sebregs[HME_SEBI_STAT >> 2]; 656c110425dSMark Cave-Ayland intstatus |= HME_SEB_STAT_TXALL; 657c110425dSMark Cave-Ayland s->sebregs[HME_SEBI_STAT >> 2] = intstatus; 658c110425dSMark Cave-Ayland sunhme_update_irq(s); 659c110425dSMark Cave-Ayland } 660c110425dSMark Cave-Ayland 661b8c4b67eSPhilippe Mathieu-Daudé static bool sunhme_can_receive(NetClientState *nc) 662c110425dSMark Cave-Ayland { 663c110425dSMark Cave-Ayland SunHMEState *s = qemu_get_nic_opaque(nc); 664c110425dSMark Cave-Ayland 665b8c4b67eSPhilippe Mathieu-Daudé return !!(s->macregs[HME_MACI_RXCFG >> 2] & HME_MAC_RXCFG_ENABLE); 666c110425dSMark Cave-Ayland } 667c110425dSMark Cave-Ayland 668c110425dSMark Cave-Ayland static void sunhme_link_status_changed(NetClientState *nc) 669c110425dSMark Cave-Ayland { 670c110425dSMark Cave-Ayland SunHMEState *s = qemu_get_nic_opaque(nc); 671c110425dSMark Cave-Ayland 672c110425dSMark Cave-Ayland if (nc->link_down) { 673c110425dSMark Cave-Ayland s->miiregs[MII_ANLPAR] &= ~MII_ANLPAR_TXFD; 674c110425dSMark Cave-Ayland s->miiregs[MII_BMSR] &= ~MII_BMSR_LINK_ST; 675c110425dSMark Cave-Ayland } else { 676c110425dSMark Cave-Ayland s->miiregs[MII_ANLPAR] |= MII_ANLPAR_TXFD; 677c110425dSMark Cave-Ayland s->miiregs[MII_BMSR] |= MII_BMSR_LINK_ST; 678c110425dSMark Cave-Ayland } 679c110425dSMark Cave-Ayland 680c110425dSMark Cave-Ayland /* Exact bits unknown */ 681c110425dSMark Cave-Ayland s->mifregs[HME_MIFI_STAT >> 2] = 0xffff; 682c110425dSMark Cave-Ayland sunhme_update_irq(s); 683c110425dSMark Cave-Ayland } 684c110425dSMark Cave-Ayland 685c110425dSMark Cave-Ayland static inline int sunhme_get_rx_ring_count(SunHMEState *s) 686c110425dSMark Cave-Ayland { 687c110425dSMark Cave-Ayland uint32_t rings = (s->erxregs[HME_ERXI_CFG >> 2] & HME_ERX_CFG_RINGSIZE) 688c110425dSMark Cave-Ayland >> HME_ERX_CFG_RINGSIZE_SHIFT; 689c110425dSMark Cave-Ayland 690c110425dSMark Cave-Ayland switch (rings) { 691c110425dSMark Cave-Ayland case 0: 692c110425dSMark Cave-Ayland return 32; 693c110425dSMark Cave-Ayland case 1: 694c110425dSMark Cave-Ayland return 64; 695c110425dSMark Cave-Ayland case 2: 696c110425dSMark Cave-Ayland return 128; 697c110425dSMark Cave-Ayland case 3: 698c110425dSMark Cave-Ayland return 256; 699c110425dSMark Cave-Ayland } 700c110425dSMark Cave-Ayland 701c110425dSMark Cave-Ayland return 0; 702c110425dSMark Cave-Ayland } 703c110425dSMark Cave-Ayland 704c110425dSMark Cave-Ayland static inline int sunhme_get_rx_ring_nr(SunHMEState *s) 705c110425dSMark Cave-Ayland { 706c110425dSMark Cave-Ayland return s->erxregs[HME_ERXI_RING >> 2] & HME_ERXI_RING_OFFSET; 707c110425dSMark Cave-Ayland } 708c110425dSMark Cave-Ayland 709c110425dSMark Cave-Ayland static inline void sunhme_set_rx_ring_nr(SunHMEState *s, int i) 710c110425dSMark Cave-Ayland { 711c110425dSMark Cave-Ayland uint32_t ring = s->erxregs[HME_ERXI_RING >> 2] & ~HME_ERXI_RING_OFFSET; 712c110425dSMark Cave-Ayland ring |= i & HME_ERXI_RING_OFFSET; 713c110425dSMark Cave-Ayland 714c110425dSMark Cave-Ayland s->erxregs[HME_ERXI_RING >> 2] = ring; 715c110425dSMark Cave-Ayland } 716c110425dSMark Cave-Ayland 717c110425dSMark Cave-Ayland static ssize_t sunhme_receive(NetClientState *nc, const uint8_t *buf, 718c110425dSMark Cave-Ayland size_t size) 719c110425dSMark Cave-Ayland { 720c110425dSMark Cave-Ayland SunHMEState *s = qemu_get_nic_opaque(nc); 721c110425dSMark Cave-Ayland PCIDevice *d = PCI_DEVICE(s); 722c110425dSMark Cave-Ayland dma_addr_t rb, addr; 723c110425dSMark Cave-Ayland uint32_t intstatus, status, buffer, buffersize, sum; 724c110425dSMark Cave-Ayland uint16_t csum; 725c110425dSMark Cave-Ayland int nr, cr, len, rxoffset, csum_offset; 726c110425dSMark Cave-Ayland 727c110425dSMark Cave-Ayland trace_sunhme_rx_incoming(size); 728c110425dSMark Cave-Ayland 729c110425dSMark Cave-Ayland /* Do nothing if MAC RX disabled */ 730c110425dSMark Cave-Ayland if (!(s->macregs[HME_MACI_RXCFG >> 2] & HME_MAC_RXCFG_ENABLE)) { 73129df47a5SMark Cave-Ayland return 0; 732c110425dSMark Cave-Ayland } 733c110425dSMark Cave-Ayland 734c110425dSMark Cave-Ayland trace_sunhme_rx_filter_destmac(buf[0], buf[1], buf[2], 735c110425dSMark Cave-Ayland buf[3], buf[4], buf[5]); 736c110425dSMark Cave-Ayland 737c110425dSMark Cave-Ayland /* Check destination MAC address */ 738c110425dSMark Cave-Ayland if (!(s->macregs[HME_MACI_RXCFG >> 2] & HME_MAC_RXCFG_PMISC)) { 739c110425dSMark Cave-Ayland /* Try and match local MAC address */ 740c110425dSMark Cave-Ayland if (((s->macregs[HME_MACI_MACADDR0 >> 2] & 0xff00) >> 8) == buf[0] && 741c110425dSMark Cave-Ayland (s->macregs[HME_MACI_MACADDR0 >> 2] & 0xff) == buf[1] && 742c110425dSMark Cave-Ayland ((s->macregs[HME_MACI_MACADDR1 >> 2] & 0xff00) >> 8) == buf[2] && 743c110425dSMark Cave-Ayland (s->macregs[HME_MACI_MACADDR1 >> 2] & 0xff) == buf[3] && 744c110425dSMark Cave-Ayland ((s->macregs[HME_MACI_MACADDR2 >> 2] & 0xff00) >> 8) == buf[4] && 745c110425dSMark Cave-Ayland (s->macregs[HME_MACI_MACADDR2 >> 2] & 0xff) == buf[5]) { 746c110425dSMark Cave-Ayland /* Matched local MAC address */ 747c110425dSMark Cave-Ayland trace_sunhme_rx_filter_local_match(); 748c110425dSMark Cave-Ayland } else if (buf[0] == 0xff && buf[1] == 0xff && buf[2] == 0xff && 749c110425dSMark Cave-Ayland buf[3] == 0xff && buf[4] == 0xff && buf[5] == 0xff) { 750c110425dSMark Cave-Ayland /* Matched broadcast address */ 751c110425dSMark Cave-Ayland trace_sunhme_rx_filter_bcast_match(); 752c110425dSMark Cave-Ayland } else if (s->macregs[HME_MACI_RXCFG >> 2] & HME_MAC_RXCFG_HENABLE) { 753c110425dSMark Cave-Ayland /* Didn't match local address, check hash filter */ 754a89a6b05SMark Cave-Ayland int mcast_idx = net_crc32_le(buf, ETH_ALEN) >> 26; 755c110425dSMark Cave-Ayland if (!(s->macregs[(HME_MACI_HASHTAB0 >> 2) - (mcast_idx >> 4)] & 756c110425dSMark Cave-Ayland (1 << (mcast_idx & 0xf)))) { 757c110425dSMark Cave-Ayland /* Didn't match hash filter */ 758c110425dSMark Cave-Ayland trace_sunhme_rx_filter_hash_nomatch(); 759c110425dSMark Cave-Ayland trace_sunhme_rx_filter_reject(); 76029df47a5SMark Cave-Ayland return -1; 761c110425dSMark Cave-Ayland } else { 762c110425dSMark Cave-Ayland trace_sunhme_rx_filter_hash_match(); 763c110425dSMark Cave-Ayland } 764c110425dSMark Cave-Ayland } else { 765c110425dSMark Cave-Ayland /* Not for us */ 766c110425dSMark Cave-Ayland trace_sunhme_rx_filter_reject(); 76729df47a5SMark Cave-Ayland return -1; 768c110425dSMark Cave-Ayland } 769c110425dSMark Cave-Ayland } else { 770c110425dSMark Cave-Ayland trace_sunhme_rx_filter_promisc_match(); 771c110425dSMark Cave-Ayland } 772c110425dSMark Cave-Ayland 773c110425dSMark Cave-Ayland trace_sunhme_rx_filter_accept(); 774c110425dSMark Cave-Ayland 775c110425dSMark Cave-Ayland rb = s->erxregs[HME_ERXI_RING >> 2] & HME_ERXI_RING_ADDR; 776c110425dSMark Cave-Ayland nr = sunhme_get_rx_ring_count(s); 777c110425dSMark Cave-Ayland cr = sunhme_get_rx_ring_nr(s); 778c110425dSMark Cave-Ayland 779c110425dSMark Cave-Ayland pci_dma_read(d, rb + cr * HME_DESC_SIZE, &status, 4); 780c110425dSMark Cave-Ayland pci_dma_read(d, rb + cr * HME_DESC_SIZE + 4, &buffer, 4); 781c110425dSMark Cave-Ayland 78209340f49SMark Cave-Ayland /* If we don't own the current descriptor then indicate overflow error */ 78309340f49SMark Cave-Ayland if (!(status & HME_XD_OWN)) { 78409340f49SMark Cave-Ayland s->sebregs[HME_SEBI_STAT >> 2] |= HME_SEB_STAT_NORXD; 78509340f49SMark Cave-Ayland sunhme_update_irq(s); 78609340f49SMark Cave-Ayland trace_sunhme_rx_norxd(); 78709340f49SMark Cave-Ayland return -1; 78809340f49SMark Cave-Ayland } 78909340f49SMark Cave-Ayland 790c110425dSMark Cave-Ayland rxoffset = (s->erxregs[HME_ERXI_CFG >> 2] & HME_ERX_CFG_BYTEOFFSET) >> 791c110425dSMark Cave-Ayland HME_ERX_CFG_BYTEOFFSET_SHIFT; 792c110425dSMark Cave-Ayland 793c110425dSMark Cave-Ayland addr = buffer + rxoffset; 794c110425dSMark Cave-Ayland buffersize = (status & HME_XD_RXLENMSK) >> HME_XD_RXLENSHIFT; 795c110425dSMark Cave-Ayland 796c110425dSMark Cave-Ayland /* Detect receive overflow */ 797c110425dSMark Cave-Ayland len = size; 798c110425dSMark Cave-Ayland if (size > buffersize) { 799c110425dSMark Cave-Ayland status |= HME_XD_OFL; 800c110425dSMark Cave-Ayland len = buffersize; 801c110425dSMark Cave-Ayland } 802c110425dSMark Cave-Ayland 803c110425dSMark Cave-Ayland pci_dma_write(d, addr, buf, len); 804c110425dSMark Cave-Ayland 805c110425dSMark Cave-Ayland trace_sunhme_rx_desc(buffer, rxoffset, status, len, cr, nr); 806c110425dSMark Cave-Ayland 807c110425dSMark Cave-Ayland /* Calculate the receive checksum */ 808c110425dSMark Cave-Ayland csum_offset = (s->erxregs[HME_ERXI_CFG >> 2] & HME_ERX_CFG_CSUMSTART) >> 809c110425dSMark Cave-Ayland HME_ERX_CFG_CSUMSHIFT << 1; 810c110425dSMark Cave-Ayland sum = 0; 811c110425dSMark Cave-Ayland sum += net_checksum_add(len - csum_offset, (uint8_t *)buf + csum_offset); 812c110425dSMark Cave-Ayland csum = net_checksum_finish(sum); 813c110425dSMark Cave-Ayland 814c110425dSMark Cave-Ayland trace_sunhme_rx_xsum_calc(csum); 815c110425dSMark Cave-Ayland 816c110425dSMark Cave-Ayland /* Update status */ 817c110425dSMark Cave-Ayland status &= ~HME_XD_OWN; 818c110425dSMark Cave-Ayland status &= ~HME_XD_RXLENMSK; 819c110425dSMark Cave-Ayland status |= len << HME_XD_RXLENSHIFT; 820c110425dSMark Cave-Ayland status &= ~HME_XD_RXCKSUM; 821c110425dSMark Cave-Ayland status |= csum; 822c110425dSMark Cave-Ayland 823c110425dSMark Cave-Ayland pci_dma_write(d, rb + cr * HME_DESC_SIZE, &status, 4); 824c110425dSMark Cave-Ayland 825c110425dSMark Cave-Ayland cr++; 826c110425dSMark Cave-Ayland if (cr >= nr) { 827c110425dSMark Cave-Ayland cr = 0; 828c110425dSMark Cave-Ayland } 829c110425dSMark Cave-Ayland 830c110425dSMark Cave-Ayland sunhme_set_rx_ring_nr(s, cr); 831c110425dSMark Cave-Ayland 832c110425dSMark Cave-Ayland /* Indicate RX complete */ 833c110425dSMark Cave-Ayland intstatus = s->sebregs[HME_SEBI_STAT >> 2]; 834c110425dSMark Cave-Ayland intstatus |= HME_SEB_STAT_RXTOHOST; 835c110425dSMark Cave-Ayland s->sebregs[HME_SEBI_STAT >> 2] = intstatus; 836c110425dSMark Cave-Ayland 837c110425dSMark Cave-Ayland sunhme_update_irq(s); 838c110425dSMark Cave-Ayland 839c110425dSMark Cave-Ayland return len; 840c110425dSMark Cave-Ayland } 841c110425dSMark Cave-Ayland 842c110425dSMark Cave-Ayland static NetClientInfo net_sunhme_info = { 843c110425dSMark Cave-Ayland .type = NET_CLIENT_DRIVER_NIC, 844c110425dSMark Cave-Ayland .size = sizeof(NICState), 845c110425dSMark Cave-Ayland .can_receive = sunhme_can_receive, 846c110425dSMark Cave-Ayland .receive = sunhme_receive, 847c110425dSMark Cave-Ayland .link_status_changed = sunhme_link_status_changed, 848c110425dSMark Cave-Ayland }; 849c110425dSMark Cave-Ayland 850c110425dSMark Cave-Ayland static void sunhme_realize(PCIDevice *pci_dev, Error **errp) 851c110425dSMark Cave-Ayland { 852c110425dSMark Cave-Ayland SunHMEState *s = SUNHME(pci_dev); 853c110425dSMark Cave-Ayland DeviceState *d = DEVICE(pci_dev); 854c110425dSMark Cave-Ayland uint8_t *pci_conf; 855c110425dSMark Cave-Ayland 856c110425dSMark Cave-Ayland pci_conf = pci_dev->config; 857c110425dSMark Cave-Ayland pci_conf[PCI_INTERRUPT_PIN] = 1; /* interrupt pin A */ 858c110425dSMark Cave-Ayland 859c110425dSMark Cave-Ayland memory_region_init(&s->hme, OBJECT(pci_dev), "sunhme", HME_REG_SIZE); 860c110425dSMark Cave-Ayland pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->hme); 861c110425dSMark Cave-Ayland 862c110425dSMark Cave-Ayland memory_region_init_io(&s->sebreg, OBJECT(pci_dev), &sunhme_seb_ops, s, 863c110425dSMark Cave-Ayland "sunhme.seb", HME_SEB_REG_SIZE); 864c110425dSMark Cave-Ayland memory_region_add_subregion(&s->hme, 0, &s->sebreg); 865c110425dSMark Cave-Ayland 866c110425dSMark Cave-Ayland memory_region_init_io(&s->etxreg, OBJECT(pci_dev), &sunhme_etx_ops, s, 867c110425dSMark Cave-Ayland "sunhme.etx", HME_ETX_REG_SIZE); 868c110425dSMark Cave-Ayland memory_region_add_subregion(&s->hme, 0x2000, &s->etxreg); 869c110425dSMark Cave-Ayland 870c110425dSMark Cave-Ayland memory_region_init_io(&s->erxreg, OBJECT(pci_dev), &sunhme_erx_ops, s, 871c110425dSMark Cave-Ayland "sunhme.erx", HME_ERX_REG_SIZE); 872c110425dSMark Cave-Ayland memory_region_add_subregion(&s->hme, 0x4000, &s->erxreg); 873c110425dSMark Cave-Ayland 874c110425dSMark Cave-Ayland memory_region_init_io(&s->macreg, OBJECT(pci_dev), &sunhme_mac_ops, s, 875c110425dSMark Cave-Ayland "sunhme.mac", HME_MAC_REG_SIZE); 876c110425dSMark Cave-Ayland memory_region_add_subregion(&s->hme, 0x6000, &s->macreg); 877c110425dSMark Cave-Ayland 878c110425dSMark Cave-Ayland memory_region_init_io(&s->mifreg, OBJECT(pci_dev), &sunhme_mif_ops, s, 879c110425dSMark Cave-Ayland "sunhme.mif", HME_MIF_REG_SIZE); 880c110425dSMark Cave-Ayland memory_region_add_subregion(&s->hme, 0x7000, &s->mifreg); 881c110425dSMark Cave-Ayland 882c110425dSMark Cave-Ayland qemu_macaddr_default_if_unset(&s->conf.macaddr); 883c110425dSMark Cave-Ayland s->nic = qemu_new_nic(&net_sunhme_info, &s->conf, 8847d0fefdfSAkihiko Odaki object_get_typename(OBJECT(d)), d->id, 8857d0fefdfSAkihiko Odaki &d->mem_reentrancy_guard, s); 886c110425dSMark Cave-Ayland qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); 887c110425dSMark Cave-Ayland } 888c110425dSMark Cave-Ayland 889c110425dSMark Cave-Ayland static void sunhme_instance_init(Object *obj) 890c110425dSMark Cave-Ayland { 891c110425dSMark Cave-Ayland SunHMEState *s = SUNHME(obj); 892c110425dSMark Cave-Ayland 893c110425dSMark Cave-Ayland device_add_bootindex_property(obj, &s->conf.bootindex, 894c110425dSMark Cave-Ayland "bootindex", "/ethernet-phy@0", 89540c2281cSMarkus Armbruster DEVICE(obj)); 896c110425dSMark Cave-Ayland } 897c110425dSMark Cave-Ayland 898c110425dSMark Cave-Ayland static void sunhme_reset(DeviceState *ds) 899c110425dSMark Cave-Ayland { 900c110425dSMark Cave-Ayland SunHMEState *s = SUNHME(ds); 901c110425dSMark Cave-Ayland 902c110425dSMark Cave-Ayland /* Configure internal transceiver */ 903c110425dSMark Cave-Ayland s->mifregs[HME_MIFI_CFG >> 2] |= HME_MIF_CFG_MDI0; 904c110425dSMark Cave-Ayland 9052431f4f1SMichael Tokarev /* Advertise auto, 100Mbps FD */ 906c110425dSMark Cave-Ayland s->miiregs[MII_ANAR] = MII_ANAR_TXFD; 907c110425dSMark Cave-Ayland s->miiregs[MII_BMSR] = MII_BMSR_AUTONEG | MII_BMSR_100TX_FD | 908c110425dSMark Cave-Ayland MII_BMSR_AN_COMP; 909c110425dSMark Cave-Ayland 910c110425dSMark Cave-Ayland if (!qemu_get_queue(s->nic)->link_down) { 911c110425dSMark Cave-Ayland s->miiregs[MII_ANLPAR] |= MII_ANLPAR_TXFD; 912c110425dSMark Cave-Ayland s->miiregs[MII_BMSR] |= MII_BMSR_LINK_ST; 913c110425dSMark Cave-Ayland } 914c110425dSMark Cave-Ayland 915c110425dSMark Cave-Ayland /* Set manufacturer */ 916c110425dSMark Cave-Ayland s->miiregs[MII_PHYID1] = DP83840_PHYID1; 917c110425dSMark Cave-Ayland s->miiregs[MII_PHYID2] = DP83840_PHYID2; 918c110425dSMark Cave-Ayland 919c110425dSMark Cave-Ayland /* Configure default interrupt mask */ 920c110425dSMark Cave-Ayland s->mifregs[HME_MIFI_IMASK >> 2] = 0xffff; 921c110425dSMark Cave-Ayland s->sebregs[HME_SEBI_IMASK >> 2] = 0xff7fffff; 922c110425dSMark Cave-Ayland } 923c110425dSMark Cave-Ayland 924c110425dSMark Cave-Ayland static const VMStateDescription vmstate_hme = { 925c110425dSMark Cave-Ayland .name = "sunhme", 926c110425dSMark Cave-Ayland .version_id = 0, 927c110425dSMark Cave-Ayland .minimum_version_id = 0, 9281de81b42SRichard Henderson .fields = (const VMStateField[]) { 929c110425dSMark Cave-Ayland VMSTATE_PCI_DEVICE(parent_obj, SunHMEState), 930c110425dSMark Cave-Ayland VMSTATE_MACADDR(conf.macaddr, SunHMEState), 931c110425dSMark Cave-Ayland VMSTATE_UINT32_ARRAY(sebregs, SunHMEState, (HME_SEB_REG_SIZE >> 2)), 932c110425dSMark Cave-Ayland VMSTATE_UINT32_ARRAY(etxregs, SunHMEState, (HME_ETX_REG_SIZE >> 2)), 933c110425dSMark Cave-Ayland VMSTATE_UINT32_ARRAY(erxregs, SunHMEState, (HME_ERX_REG_SIZE >> 2)), 934c110425dSMark Cave-Ayland VMSTATE_UINT32_ARRAY(macregs, SunHMEState, (HME_MAC_REG_SIZE >> 2)), 935c110425dSMark Cave-Ayland VMSTATE_UINT32_ARRAY(mifregs, SunHMEState, (HME_MIF_REG_SIZE >> 2)), 936c110425dSMark Cave-Ayland VMSTATE_UINT16_ARRAY(miiregs, SunHMEState, HME_MII_REGS_SIZE), 937c110425dSMark Cave-Ayland VMSTATE_END_OF_LIST() 938c110425dSMark Cave-Ayland } 939c110425dSMark Cave-Ayland }; 940c110425dSMark Cave-Ayland 941c110425dSMark Cave-Ayland static void sunhme_class_init(ObjectClass *klass, void *data) 942c110425dSMark Cave-Ayland { 943c110425dSMark Cave-Ayland DeviceClass *dc = DEVICE_CLASS(klass); 944c110425dSMark Cave-Ayland PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); 945c110425dSMark Cave-Ayland 946c110425dSMark Cave-Ayland k->realize = sunhme_realize; 947c110425dSMark Cave-Ayland k->vendor_id = PCI_VENDOR_ID_SUN; 948c110425dSMark Cave-Ayland k->device_id = PCI_DEVICE_ID_SUN_HME; 949c110425dSMark Cave-Ayland k->class_id = PCI_CLASS_NETWORK_ETHERNET; 950c110425dSMark Cave-Ayland dc->vmsd = &vmstate_hme; 951e3d08143SPeter Maydell device_class_set_legacy_reset(dc, sunhme_reset); 9524f67d30bSMarc-André Lureau device_class_set_props(dc, sunhme_properties); 953c110425dSMark Cave-Ayland set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); 954c110425dSMark Cave-Ayland } 955c110425dSMark Cave-Ayland 956c110425dSMark Cave-Ayland static const TypeInfo sunhme_info = { 957c110425dSMark Cave-Ayland .name = TYPE_SUNHME, 958c110425dSMark Cave-Ayland .parent = TYPE_PCI_DEVICE, 959c110425dSMark Cave-Ayland .class_init = sunhme_class_init, 960c110425dSMark Cave-Ayland .instance_size = sizeof(SunHMEState), 961c110425dSMark Cave-Ayland .instance_init = sunhme_instance_init, 962fd3b02c8SEduardo Habkost .interfaces = (InterfaceInfo[]) { 963fd3b02c8SEduardo Habkost { INTERFACE_CONVENTIONAL_PCI_DEVICE }, 964fd3b02c8SEduardo Habkost { } 965fd3b02c8SEduardo Habkost } 966c110425dSMark Cave-Ayland }; 967c110425dSMark Cave-Ayland 968c110425dSMark Cave-Ayland static void sunhme_register_types(void) 969c110425dSMark Cave-Ayland { 970c110425dSMark Cave-Ayland type_register_static(&sunhme_info); 971c110425dSMark Cave-Ayland } 972c110425dSMark Cave-Ayland 973c110425dSMark Cave-Ayland type_init(sunhme_register_types) 974