xref: /qemu/hw/net/sunhme.c (revision 2cd09e47aa522dfc7bb206f13d6dccb68dd09887)
1c110425dSMark Cave-Ayland /*
2c110425dSMark Cave-Ayland  * QEMU Sun Happy Meal Ethernet emulation
3c110425dSMark Cave-Ayland  *
4c110425dSMark Cave-Ayland  * Copyright (c) 2017 Mark Cave-Ayland
5c110425dSMark Cave-Ayland  *
6c110425dSMark Cave-Ayland  * Permission is hereby granted, free of charge, to any person obtaining a copy
7c110425dSMark Cave-Ayland  * of this software and associated documentation files (the "Software"), to deal
8c110425dSMark Cave-Ayland  * in the Software without restriction, including without limitation the rights
9c110425dSMark Cave-Ayland  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10c110425dSMark Cave-Ayland  * copies of the Software, and to permit persons to whom the Software is
11c110425dSMark Cave-Ayland  * furnished to do so, subject to the following conditions:
12c110425dSMark Cave-Ayland  *
13c110425dSMark Cave-Ayland  * The above copyright notice and this permission notice shall be included in
14c110425dSMark Cave-Ayland  * all copies or substantial portions of the Software.
15c110425dSMark Cave-Ayland  *
16c110425dSMark Cave-Ayland  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17c110425dSMark Cave-Ayland  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18c110425dSMark Cave-Ayland  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19c110425dSMark Cave-Ayland  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20c110425dSMark Cave-Ayland  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21c110425dSMark Cave-Ayland  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22c110425dSMark Cave-Ayland  * THE SOFTWARE.
23c110425dSMark Cave-Ayland  */
24c110425dSMark Cave-Ayland 
25c110425dSMark Cave-Ayland #include "qemu/osdep.h"
26edf5ca5dSMarkus Armbruster #include "hw/pci/pci_device.h"
27a27bd6c7SMarkus Armbruster #include "hw/qdev-properties.h"
28d6454270SMarkus Armbruster #include "migration/vmstate.h"
29c110425dSMark Cave-Ayland #include "hw/net/mii.h"
30c110425dSMark Cave-Ayland #include "net/net.h"
310b8fa32fSMarkus Armbruster #include "qemu/module.h"
32c110425dSMark Cave-Ayland #include "net/checksum.h"
33c110425dSMark Cave-Ayland #include "net/eth.h"
3432cad1ffSPhilippe Mathieu-Daudé #include "system/system.h"
35c110425dSMark Cave-Ayland #include "trace.h"
36db1015e9SEduardo Habkost #include "qom/object.h"
37c110425dSMark Cave-Ayland 
38c110425dSMark Cave-Ayland #define HME_REG_SIZE                   0x8000
39c110425dSMark Cave-Ayland 
40c110425dSMark Cave-Ayland #define HME_SEB_REG_SIZE               0x2000
41c110425dSMark Cave-Ayland 
42c110425dSMark Cave-Ayland #define HME_SEBI_RESET                 0x0
43c110425dSMark Cave-Ayland #define HME_SEB_RESET_ETX              0x1
44c110425dSMark Cave-Ayland #define HME_SEB_RESET_ERX              0x2
45c110425dSMark Cave-Ayland 
46c110425dSMark Cave-Ayland #define HME_SEBI_STAT                  0x100
47c110425dSMark Cave-Ayland #define HME_SEBI_STAT_LINUXBUG         0x108
48c110425dSMark Cave-Ayland #define HME_SEB_STAT_RXTOHOST          0x10000
4909340f49SMark Cave-Ayland #define HME_SEB_STAT_NORXD             0x20000
50c110425dSMark Cave-Ayland #define HME_SEB_STAT_MIFIRQ            0x800000
51c110425dSMark Cave-Ayland #define HME_SEB_STAT_HOSTTOTX          0x1000000
52c110425dSMark Cave-Ayland #define HME_SEB_STAT_TXALL             0x2000000
53c110425dSMark Cave-Ayland 
54c110425dSMark Cave-Ayland #define HME_SEBI_IMASK                 0x104
55c110425dSMark Cave-Ayland #define HME_SEBI_IMASK_LINUXBUG        0x10c
56c110425dSMark Cave-Ayland 
57c110425dSMark Cave-Ayland #define HME_ETX_REG_SIZE               0x2000
58c110425dSMark Cave-Ayland 
59c110425dSMark Cave-Ayland #define HME_ETXI_PENDING               0x0
60c110425dSMark Cave-Ayland 
61c110425dSMark Cave-Ayland #define HME_ETXI_RING                  0x8
62c110425dSMark Cave-Ayland #define HME_ETXI_RING_ADDR             0xffffff00
63c110425dSMark Cave-Ayland #define HME_ETXI_RING_OFFSET           0xff
64c110425dSMark Cave-Ayland 
65c110425dSMark Cave-Ayland #define HME_ETXI_RSIZE                 0x2c
66c110425dSMark Cave-Ayland 
67c110425dSMark Cave-Ayland #define HME_ERX_REG_SIZE               0x2000
68c110425dSMark Cave-Ayland 
69c110425dSMark Cave-Ayland #define HME_ERXI_CFG                   0x0
70c110425dSMark Cave-Ayland #define HME_ERX_CFG_RINGSIZE           0x600
71c110425dSMark Cave-Ayland #define HME_ERX_CFG_RINGSIZE_SHIFT     9
72c110425dSMark Cave-Ayland #define HME_ERX_CFG_BYTEOFFSET         0x38
73c110425dSMark Cave-Ayland #define HME_ERX_CFG_BYTEOFFSET_SHIFT   3
74c110425dSMark Cave-Ayland #define HME_ERX_CFG_CSUMSTART          0x7f0000
75c110425dSMark Cave-Ayland #define HME_ERX_CFG_CSUMSHIFT          16
76c110425dSMark Cave-Ayland 
77c110425dSMark Cave-Ayland #define HME_ERXI_RING                  0x4
78c110425dSMark Cave-Ayland #define HME_ERXI_RING_ADDR             0xffffff00
79c110425dSMark Cave-Ayland #define HME_ERXI_RING_OFFSET           0xff
80c110425dSMark Cave-Ayland 
81c110425dSMark Cave-Ayland #define HME_MAC_REG_SIZE               0x1000
82c110425dSMark Cave-Ayland 
83c110425dSMark Cave-Ayland #define HME_MACI_TXCFG                 0x20c
84c110425dSMark Cave-Ayland #define HME_MAC_TXCFG_ENABLE           0x1
85c110425dSMark Cave-Ayland 
86c110425dSMark Cave-Ayland #define HME_MACI_RXCFG                 0x30c
87c110425dSMark Cave-Ayland #define HME_MAC_RXCFG_ENABLE           0x1
88c110425dSMark Cave-Ayland #define HME_MAC_RXCFG_PMISC            0x40
89c110425dSMark Cave-Ayland #define HME_MAC_RXCFG_HENABLE          0x800
90c110425dSMark Cave-Ayland 
91c110425dSMark Cave-Ayland #define HME_MACI_MACADDR2              0x318
92c110425dSMark Cave-Ayland #define HME_MACI_MACADDR1              0x31c
93c110425dSMark Cave-Ayland #define HME_MACI_MACADDR0              0x320
94c110425dSMark Cave-Ayland 
95c110425dSMark Cave-Ayland #define HME_MACI_HASHTAB3              0x340
96c110425dSMark Cave-Ayland #define HME_MACI_HASHTAB2              0x344
97c110425dSMark Cave-Ayland #define HME_MACI_HASHTAB1              0x348
98c110425dSMark Cave-Ayland #define HME_MACI_HASHTAB0              0x34c
99c110425dSMark Cave-Ayland 
100c110425dSMark Cave-Ayland #define HME_MIF_REG_SIZE               0x20
101c110425dSMark Cave-Ayland 
102c110425dSMark Cave-Ayland #define HME_MIFI_FO                    0xc
103c110425dSMark Cave-Ayland #define HME_MIF_FO_ST                  0xc0000000
104c110425dSMark Cave-Ayland #define HME_MIF_FO_ST_SHIFT            30
105c110425dSMark Cave-Ayland #define HME_MIF_FO_OPC                 0x30000000
106c110425dSMark Cave-Ayland #define HME_MIF_FO_OPC_SHIFT           28
107c110425dSMark Cave-Ayland #define HME_MIF_FO_PHYAD               0x0f800000
108c110425dSMark Cave-Ayland #define HME_MIF_FO_PHYAD_SHIFT         23
109c110425dSMark Cave-Ayland #define HME_MIF_FO_REGAD               0x007c0000
110c110425dSMark Cave-Ayland #define HME_MIF_FO_REGAD_SHIFT         18
111c110425dSMark Cave-Ayland #define HME_MIF_FO_TAMSB               0x20000
112c110425dSMark Cave-Ayland #define HME_MIF_FO_TALSB               0x10000
113c110425dSMark Cave-Ayland #define HME_MIF_FO_DATA                0xffff
114c110425dSMark Cave-Ayland 
115c110425dSMark Cave-Ayland #define HME_MIFI_CFG                   0x10
116c110425dSMark Cave-Ayland #define HME_MIF_CFG_MDI0               0x100
117c110425dSMark Cave-Ayland #define HME_MIF_CFG_MDI1               0x200
118c110425dSMark Cave-Ayland 
119c110425dSMark Cave-Ayland #define HME_MIFI_IMASK                 0x14
120c110425dSMark Cave-Ayland 
121c110425dSMark Cave-Ayland #define HME_MIFI_STAT                  0x18
122c110425dSMark Cave-Ayland 
123c110425dSMark Cave-Ayland 
124c110425dSMark Cave-Ayland /* Wired HME PHY addresses */
125c110425dSMark Cave-Ayland #define HME_PHYAD_INTERNAL     1
126c110425dSMark Cave-Ayland #define HME_PHYAD_EXTERNAL     0
127c110425dSMark Cave-Ayland 
128c110425dSMark Cave-Ayland #define MII_COMMAND_START      0x1
129c110425dSMark Cave-Ayland #define MII_COMMAND_READ       0x2
130c110425dSMark Cave-Ayland #define MII_COMMAND_WRITE      0x1
131c110425dSMark Cave-Ayland 
132c110425dSMark Cave-Ayland #define TYPE_SUNHME "sunhme"
1338063396bSEduardo Habkost OBJECT_DECLARE_SIMPLE_TYPE(SunHMEState, SUNHME)
134c110425dSMark Cave-Ayland 
135c110425dSMark Cave-Ayland /* Maximum size of buffer */
136c110425dSMark Cave-Ayland #define HME_FIFO_SIZE          0x800
137c110425dSMark Cave-Ayland 
138c110425dSMark Cave-Ayland /* Size of TX/RX descriptor */
139c110425dSMark Cave-Ayland #define HME_DESC_SIZE          0x8
140c110425dSMark Cave-Ayland 
141c110425dSMark Cave-Ayland #define HME_XD_OWN             0x80000000
142c110425dSMark Cave-Ayland #define HME_XD_OFL             0x40000000
143c110425dSMark Cave-Ayland #define HME_XD_SOP             0x40000000
144c110425dSMark Cave-Ayland #define HME_XD_EOP             0x20000000
145c110425dSMark Cave-Ayland #define HME_XD_RXLENMSK        0x3fff0000
146c110425dSMark Cave-Ayland #define HME_XD_RXLENSHIFT      16
147c110425dSMark Cave-Ayland #define HME_XD_RXCKSUM         0xffff
148c110425dSMark Cave-Ayland #define HME_XD_TXLENMSK        0x00001fff
149c110425dSMark Cave-Ayland #define HME_XD_TXCKSUM         0x10000000
150c110425dSMark Cave-Ayland #define HME_XD_TXCSSTUFF       0xff00000
151c110425dSMark Cave-Ayland #define HME_XD_TXCSSTUFFSHIFT  20
152c110425dSMark Cave-Ayland #define HME_XD_TXCSSTART       0xfc000
153c110425dSMark Cave-Ayland #define HME_XD_TXCSSTARTSHIFT  14
154c110425dSMark Cave-Ayland 
155c110425dSMark Cave-Ayland #define HME_MII_REGS_SIZE      0x20
156c110425dSMark Cave-Ayland 
157db1015e9SEduardo Habkost struct SunHMEState {
158c110425dSMark Cave-Ayland     /*< private >*/
159c110425dSMark Cave-Ayland     PCIDevice parent_obj;
160c110425dSMark Cave-Ayland 
161c110425dSMark Cave-Ayland     NICState *nic;
162c110425dSMark Cave-Ayland     NICConf conf;
163c110425dSMark Cave-Ayland 
164c110425dSMark Cave-Ayland     MemoryRegion hme;
165c110425dSMark Cave-Ayland     MemoryRegion sebreg;
166c110425dSMark Cave-Ayland     MemoryRegion etxreg;
167c110425dSMark Cave-Ayland     MemoryRegion erxreg;
168c110425dSMark Cave-Ayland     MemoryRegion macreg;
169c110425dSMark Cave-Ayland     MemoryRegion mifreg;
170c110425dSMark Cave-Ayland 
171c110425dSMark Cave-Ayland     uint32_t sebregs[HME_SEB_REG_SIZE >> 2];
172c110425dSMark Cave-Ayland     uint32_t etxregs[HME_ETX_REG_SIZE >> 2];
173c110425dSMark Cave-Ayland     uint32_t erxregs[HME_ERX_REG_SIZE >> 2];
174c110425dSMark Cave-Ayland     uint32_t macregs[HME_MAC_REG_SIZE >> 2];
175c110425dSMark Cave-Ayland     uint32_t mifregs[HME_MIF_REG_SIZE >> 2];
176c110425dSMark Cave-Ayland 
177c110425dSMark Cave-Ayland     uint16_t miiregs[HME_MII_REGS_SIZE];
178db1015e9SEduardo Habkost };
179c110425dSMark Cave-Ayland 
180e732f00fSRichard Henderson static const Property sunhme_properties[] = {
181c110425dSMark Cave-Ayland     DEFINE_NIC_PROPERTIES(SunHMEState, conf),
182c110425dSMark Cave-Ayland };
183c110425dSMark Cave-Ayland 
184c110425dSMark Cave-Ayland static void sunhme_reset_tx(SunHMEState *s)
185c110425dSMark Cave-Ayland {
186c110425dSMark Cave-Ayland     /* Indicate TX reset complete */
187c110425dSMark Cave-Ayland     s->sebregs[HME_SEBI_RESET] &= ~HME_SEB_RESET_ETX;
188c110425dSMark Cave-Ayland }
189c110425dSMark Cave-Ayland 
190c110425dSMark Cave-Ayland static void sunhme_reset_rx(SunHMEState *s)
191c110425dSMark Cave-Ayland {
192c110425dSMark Cave-Ayland     /* Indicate RX reset complete */
193c110425dSMark Cave-Ayland     s->sebregs[HME_SEBI_RESET] &= ~HME_SEB_RESET_ERX;
194c110425dSMark Cave-Ayland }
195c110425dSMark Cave-Ayland 
196c110425dSMark Cave-Ayland static void sunhme_update_irq(SunHMEState *s)
197c110425dSMark Cave-Ayland {
198c110425dSMark Cave-Ayland     PCIDevice *d = PCI_DEVICE(s);
199c110425dSMark Cave-Ayland     int level;
200c110425dSMark Cave-Ayland 
201c110425dSMark Cave-Ayland     /* MIF interrupt mask (16-bit) */
202c110425dSMark Cave-Ayland     uint32_t mifmask = ~(s->mifregs[HME_MIFI_IMASK >> 2]) & 0xffff;
203c110425dSMark Cave-Ayland     uint32_t mif = s->mifregs[HME_MIFI_STAT >> 2] & mifmask;
204c110425dSMark Cave-Ayland 
205c110425dSMark Cave-Ayland     /* Main SEB interrupt mask (include MIF status from above) */
206c110425dSMark Cave-Ayland     uint32_t sebmask = ~(s->sebregs[HME_SEBI_IMASK >> 2]) &
207c110425dSMark Cave-Ayland                        ~HME_SEB_STAT_MIFIRQ;
208c110425dSMark Cave-Ayland     uint32_t seb = s->sebregs[HME_SEBI_STAT >> 2] & sebmask;
209c110425dSMark Cave-Ayland     if (mif) {
210c110425dSMark Cave-Ayland         seb |= HME_SEB_STAT_MIFIRQ;
211c110425dSMark Cave-Ayland     }
212c110425dSMark Cave-Ayland 
213c110425dSMark Cave-Ayland     level = (seb ? 1 : 0);
2146bdc3707SMark Cave-Ayland     trace_sunhme_update_irq(mifmask, mif, sebmask, seb, level);
2156bdc3707SMark Cave-Ayland 
216c110425dSMark Cave-Ayland     pci_set_irq(d, level);
217c110425dSMark Cave-Ayland }
218c110425dSMark Cave-Ayland 
219c110425dSMark Cave-Ayland static void sunhme_seb_write(void *opaque, hwaddr addr,
220c110425dSMark Cave-Ayland                           uint64_t val, unsigned size)
221c110425dSMark Cave-Ayland {
222c110425dSMark Cave-Ayland     SunHMEState *s = SUNHME(opaque);
223c110425dSMark Cave-Ayland 
224c110425dSMark Cave-Ayland     trace_sunhme_seb_write(addr, val);
225c110425dSMark Cave-Ayland 
226c110425dSMark Cave-Ayland     /* Handly buggy Linux drivers before 4.13 which have
227c110425dSMark Cave-Ayland        the wrong offsets for HME_SEBI_STAT and HME_SEBI_IMASK */
228c110425dSMark Cave-Ayland     switch (addr) {
229c110425dSMark Cave-Ayland     case HME_SEBI_STAT_LINUXBUG:
230c110425dSMark Cave-Ayland         addr = HME_SEBI_STAT;
231c110425dSMark Cave-Ayland         break;
232c110425dSMark Cave-Ayland     case HME_SEBI_IMASK_LINUXBUG:
233c110425dSMark Cave-Ayland         addr = HME_SEBI_IMASK;
234c110425dSMark Cave-Ayland         break;
235c110425dSMark Cave-Ayland     default:
236c110425dSMark Cave-Ayland         break;
237c110425dSMark Cave-Ayland     }
238c110425dSMark Cave-Ayland 
239c110425dSMark Cave-Ayland     switch (addr) {
240c110425dSMark Cave-Ayland     case HME_SEBI_RESET:
241c110425dSMark Cave-Ayland         if (val & HME_SEB_RESET_ETX) {
242c110425dSMark Cave-Ayland             sunhme_reset_tx(s);
243c110425dSMark Cave-Ayland         }
244c110425dSMark Cave-Ayland         if (val & HME_SEB_RESET_ERX) {
245c110425dSMark Cave-Ayland             sunhme_reset_rx(s);
246c110425dSMark Cave-Ayland         }
247c110425dSMark Cave-Ayland         val = s->sebregs[HME_SEBI_RESET >> 2];
248c110425dSMark Cave-Ayland         break;
249c110425dSMark Cave-Ayland     }
250c110425dSMark Cave-Ayland 
251c110425dSMark Cave-Ayland     s->sebregs[addr >> 2] = val;
252c110425dSMark Cave-Ayland }
253c110425dSMark Cave-Ayland 
254c110425dSMark Cave-Ayland static uint64_t sunhme_seb_read(void *opaque, hwaddr addr,
255c110425dSMark Cave-Ayland                              unsigned size)
256c110425dSMark Cave-Ayland {
257c110425dSMark Cave-Ayland     SunHMEState *s = SUNHME(opaque);
258c110425dSMark Cave-Ayland     uint64_t val;
259c110425dSMark Cave-Ayland 
260c110425dSMark Cave-Ayland     /* Handly buggy Linux drivers before 4.13 which have
261c110425dSMark Cave-Ayland        the wrong offsets for HME_SEBI_STAT and HME_SEBI_IMASK */
262c110425dSMark Cave-Ayland     switch (addr) {
263c110425dSMark Cave-Ayland     case HME_SEBI_STAT_LINUXBUG:
264c110425dSMark Cave-Ayland         addr = HME_SEBI_STAT;
265c110425dSMark Cave-Ayland         break;
266c110425dSMark Cave-Ayland     case HME_SEBI_IMASK_LINUXBUG:
267c110425dSMark Cave-Ayland         addr = HME_SEBI_IMASK;
268c110425dSMark Cave-Ayland         break;
269c110425dSMark Cave-Ayland     default:
270c110425dSMark Cave-Ayland         break;
271c110425dSMark Cave-Ayland     }
272c110425dSMark Cave-Ayland 
273c110425dSMark Cave-Ayland     val = s->sebregs[addr >> 2];
274c110425dSMark Cave-Ayland 
275c110425dSMark Cave-Ayland     switch (addr) {
276c110425dSMark Cave-Ayland     case HME_SEBI_STAT:
277c110425dSMark Cave-Ayland         /* Autoclear status (except MIF) */
278c110425dSMark Cave-Ayland         s->sebregs[HME_SEBI_STAT >> 2] &= HME_SEB_STAT_MIFIRQ;
279c110425dSMark Cave-Ayland         sunhme_update_irq(s);
280c110425dSMark Cave-Ayland         break;
281c110425dSMark Cave-Ayland     }
282c110425dSMark Cave-Ayland 
283c110425dSMark Cave-Ayland     trace_sunhme_seb_read(addr, val);
284c110425dSMark Cave-Ayland 
285c110425dSMark Cave-Ayland     return val;
286c110425dSMark Cave-Ayland }
287c110425dSMark Cave-Ayland 
288c110425dSMark Cave-Ayland static const MemoryRegionOps sunhme_seb_ops = {
289c110425dSMark Cave-Ayland     .read = sunhme_seb_read,
290c110425dSMark Cave-Ayland     .write = sunhme_seb_write,
291c110425dSMark Cave-Ayland     .endianness = DEVICE_LITTLE_ENDIAN,
292c110425dSMark Cave-Ayland     .valid = {
293c110425dSMark Cave-Ayland         .min_access_size = 4,
294c110425dSMark Cave-Ayland         .max_access_size = 4,
295c110425dSMark Cave-Ayland     },
296c110425dSMark Cave-Ayland };
297c110425dSMark Cave-Ayland 
298c110425dSMark Cave-Ayland static void sunhme_transmit(SunHMEState *s);
299c110425dSMark Cave-Ayland 
300c110425dSMark Cave-Ayland static void sunhme_etx_write(void *opaque, hwaddr addr,
301c110425dSMark Cave-Ayland                           uint64_t val, unsigned size)
302c110425dSMark Cave-Ayland {
303c110425dSMark Cave-Ayland     SunHMEState *s = SUNHME(opaque);
304c110425dSMark Cave-Ayland 
305c110425dSMark Cave-Ayland     trace_sunhme_etx_write(addr, val);
306c110425dSMark Cave-Ayland 
307c110425dSMark Cave-Ayland     switch (addr) {
308c110425dSMark Cave-Ayland     case HME_ETXI_PENDING:
309c110425dSMark Cave-Ayland         if (val) {
310c110425dSMark Cave-Ayland             sunhme_transmit(s);
311c110425dSMark Cave-Ayland         }
312c110425dSMark Cave-Ayland         break;
313c110425dSMark Cave-Ayland     }
314c110425dSMark Cave-Ayland 
315c110425dSMark Cave-Ayland     s->etxregs[addr >> 2] = val;
316c110425dSMark Cave-Ayland }
317c110425dSMark Cave-Ayland 
318c110425dSMark Cave-Ayland static uint64_t sunhme_etx_read(void *opaque, hwaddr addr,
319c110425dSMark Cave-Ayland                              unsigned size)
320c110425dSMark Cave-Ayland {
321c110425dSMark Cave-Ayland     SunHMEState *s = SUNHME(opaque);
322c110425dSMark Cave-Ayland     uint64_t val;
323c110425dSMark Cave-Ayland 
324c110425dSMark Cave-Ayland     val = s->etxregs[addr >> 2];
325c110425dSMark Cave-Ayland 
326c110425dSMark Cave-Ayland     trace_sunhme_etx_read(addr, val);
327c110425dSMark Cave-Ayland 
328c110425dSMark Cave-Ayland     return val;
329c110425dSMark Cave-Ayland }
330c110425dSMark Cave-Ayland 
331c110425dSMark Cave-Ayland static const MemoryRegionOps sunhme_etx_ops = {
332c110425dSMark Cave-Ayland     .read = sunhme_etx_read,
333c110425dSMark Cave-Ayland     .write = sunhme_etx_write,
334c110425dSMark Cave-Ayland     .endianness = DEVICE_LITTLE_ENDIAN,
335c110425dSMark Cave-Ayland     .valid = {
336c110425dSMark Cave-Ayland         .min_access_size = 4,
337c110425dSMark Cave-Ayland         .max_access_size = 4,
338c110425dSMark Cave-Ayland     },
339c110425dSMark Cave-Ayland };
340c110425dSMark Cave-Ayland 
341c110425dSMark Cave-Ayland static void sunhme_erx_write(void *opaque, hwaddr addr,
342c110425dSMark Cave-Ayland                           uint64_t val, unsigned size)
343c110425dSMark Cave-Ayland {
344c110425dSMark Cave-Ayland     SunHMEState *s = SUNHME(opaque);
345c110425dSMark Cave-Ayland 
346c110425dSMark Cave-Ayland     trace_sunhme_erx_write(addr, val);
347c110425dSMark Cave-Ayland 
348c110425dSMark Cave-Ayland     s->erxregs[addr >> 2] = val;
349c110425dSMark Cave-Ayland }
350c110425dSMark Cave-Ayland 
351c110425dSMark Cave-Ayland static uint64_t sunhme_erx_read(void *opaque, hwaddr addr,
352c110425dSMark Cave-Ayland                              unsigned size)
353c110425dSMark Cave-Ayland {
354c110425dSMark Cave-Ayland     SunHMEState *s = SUNHME(opaque);
355c110425dSMark Cave-Ayland     uint64_t val;
356c110425dSMark Cave-Ayland 
357c110425dSMark Cave-Ayland     val = s->erxregs[addr >> 2];
358c110425dSMark Cave-Ayland 
359c110425dSMark Cave-Ayland     trace_sunhme_erx_read(addr, val);
360c110425dSMark Cave-Ayland 
361c110425dSMark Cave-Ayland     return val;
362c110425dSMark Cave-Ayland }
363c110425dSMark Cave-Ayland 
364c110425dSMark Cave-Ayland static const MemoryRegionOps sunhme_erx_ops = {
365c110425dSMark Cave-Ayland     .read = sunhme_erx_read,
366c110425dSMark Cave-Ayland     .write = sunhme_erx_write,
367c110425dSMark Cave-Ayland     .endianness = DEVICE_LITTLE_ENDIAN,
368c110425dSMark Cave-Ayland     .valid = {
369c110425dSMark Cave-Ayland         .min_access_size = 4,
370c110425dSMark Cave-Ayland         .max_access_size = 4,
371c110425dSMark Cave-Ayland     },
372c110425dSMark Cave-Ayland };
373c110425dSMark Cave-Ayland 
374c110425dSMark Cave-Ayland static void sunhme_mac_write(void *opaque, hwaddr addr,
375c110425dSMark Cave-Ayland                           uint64_t val, unsigned size)
376c110425dSMark Cave-Ayland {
377c110425dSMark Cave-Ayland     SunHMEState *s = SUNHME(opaque);
3781058e1a3SMark Cave-Ayland     uint64_t oldval = s->macregs[addr >> 2];
379c110425dSMark Cave-Ayland 
380c110425dSMark Cave-Ayland     trace_sunhme_mac_write(addr, val);
381c110425dSMark Cave-Ayland 
382c110425dSMark Cave-Ayland     s->macregs[addr >> 2] = val;
3831058e1a3SMark Cave-Ayland 
3841058e1a3SMark Cave-Ayland     switch (addr) {
3851058e1a3SMark Cave-Ayland     case HME_MACI_RXCFG:
3861058e1a3SMark Cave-Ayland         if (!(oldval & HME_MAC_RXCFG_ENABLE) &&
3871058e1a3SMark Cave-Ayland              (val & HME_MAC_RXCFG_ENABLE)) {
3881058e1a3SMark Cave-Ayland             qemu_flush_queued_packets(qemu_get_queue(s->nic));
3891058e1a3SMark Cave-Ayland         }
3901058e1a3SMark Cave-Ayland         break;
3911058e1a3SMark Cave-Ayland     }
392c110425dSMark Cave-Ayland }
393c110425dSMark Cave-Ayland 
394c110425dSMark Cave-Ayland static uint64_t sunhme_mac_read(void *opaque, hwaddr addr,
395c110425dSMark Cave-Ayland                              unsigned size)
396c110425dSMark Cave-Ayland {
397c110425dSMark Cave-Ayland     SunHMEState *s = SUNHME(opaque);
398c110425dSMark Cave-Ayland     uint64_t val;
399c110425dSMark Cave-Ayland 
400c110425dSMark Cave-Ayland     val = s->macregs[addr >> 2];
401c110425dSMark Cave-Ayland 
402c110425dSMark Cave-Ayland     trace_sunhme_mac_read(addr, val);
403c110425dSMark Cave-Ayland 
404c110425dSMark Cave-Ayland     return val;
405c110425dSMark Cave-Ayland }
406c110425dSMark Cave-Ayland 
407c110425dSMark Cave-Ayland static const MemoryRegionOps sunhme_mac_ops = {
408c110425dSMark Cave-Ayland     .read = sunhme_mac_read,
409c110425dSMark Cave-Ayland     .write = sunhme_mac_write,
410c110425dSMark Cave-Ayland     .endianness = DEVICE_LITTLE_ENDIAN,
411c110425dSMark Cave-Ayland     .valid = {
412c110425dSMark Cave-Ayland         .min_access_size = 4,
413c110425dSMark Cave-Ayland         .max_access_size = 4,
414c110425dSMark Cave-Ayland     },
415c110425dSMark Cave-Ayland };
416c110425dSMark Cave-Ayland 
417c110425dSMark Cave-Ayland static void sunhme_mii_write(SunHMEState *s, uint8_t reg, uint16_t data)
418c110425dSMark Cave-Ayland {
419c110425dSMark Cave-Ayland     trace_sunhme_mii_write(reg, data);
420c110425dSMark Cave-Ayland 
421c110425dSMark Cave-Ayland     switch (reg) {
422c110425dSMark Cave-Ayland     case MII_BMCR:
423c110425dSMark Cave-Ayland         if (data & MII_BMCR_RESET) {
424c110425dSMark Cave-Ayland             /* Autoclear reset bit, enable auto negotiation */
425c110425dSMark Cave-Ayland             data &= ~MII_BMCR_RESET;
426c110425dSMark Cave-Ayland             data |= MII_BMCR_AUTOEN;
427c110425dSMark Cave-Ayland         }
428c110425dSMark Cave-Ayland         if (data & MII_BMCR_ANRESTART) {
429c110425dSMark Cave-Ayland             /* Autoclear auto negotiation restart */
430c110425dSMark Cave-Ayland             data &= ~MII_BMCR_ANRESTART;
431c110425dSMark Cave-Ayland 
432c110425dSMark Cave-Ayland             /* Indicate negotiation complete */
433c110425dSMark Cave-Ayland             s->miiregs[MII_BMSR] |= MII_BMSR_AN_COMP;
434c110425dSMark Cave-Ayland 
435c110425dSMark Cave-Ayland             if (!qemu_get_queue(s->nic)->link_down) {
436c110425dSMark Cave-Ayland                 s->miiregs[MII_ANLPAR] |= MII_ANLPAR_TXFD;
437c110425dSMark Cave-Ayland                 s->miiregs[MII_BMSR] |= MII_BMSR_LINK_ST;
438c110425dSMark Cave-Ayland             }
439c110425dSMark Cave-Ayland         }
440c110425dSMark Cave-Ayland         break;
441c110425dSMark Cave-Ayland     }
442c110425dSMark Cave-Ayland 
443c110425dSMark Cave-Ayland     s->miiregs[reg] = data;
444c110425dSMark Cave-Ayland }
445c110425dSMark Cave-Ayland 
446c110425dSMark Cave-Ayland static uint16_t sunhme_mii_read(SunHMEState *s, uint8_t reg)
447c110425dSMark Cave-Ayland {
448c110425dSMark Cave-Ayland     uint16_t data = s->miiregs[reg];
449c110425dSMark Cave-Ayland 
450c110425dSMark Cave-Ayland     trace_sunhme_mii_read(reg, data);
451c110425dSMark Cave-Ayland 
452c110425dSMark Cave-Ayland     return data;
453c110425dSMark Cave-Ayland }
454c110425dSMark Cave-Ayland 
455c110425dSMark Cave-Ayland static void sunhme_mif_write(void *opaque, hwaddr addr,
456c110425dSMark Cave-Ayland                           uint64_t val, unsigned size)
457c110425dSMark Cave-Ayland {
458c110425dSMark Cave-Ayland     SunHMEState *s = SUNHME(opaque);
459c110425dSMark Cave-Ayland     uint8_t cmd, reg;
460c110425dSMark Cave-Ayland     uint16_t data;
461c110425dSMark Cave-Ayland 
462c110425dSMark Cave-Ayland     trace_sunhme_mif_write(addr, val);
463c110425dSMark Cave-Ayland 
464c110425dSMark Cave-Ayland     switch (addr) {
465c110425dSMark Cave-Ayland     case HME_MIFI_CFG:
466c110425dSMark Cave-Ayland         /* Mask the read-only bits */
467c110425dSMark Cave-Ayland         val &= ~(HME_MIF_CFG_MDI0 | HME_MIF_CFG_MDI1);
468c110425dSMark Cave-Ayland         val |= s->mifregs[HME_MIFI_CFG >> 2] &
469c110425dSMark Cave-Ayland                (HME_MIF_CFG_MDI0 | HME_MIF_CFG_MDI1);
470c110425dSMark Cave-Ayland         break;
471c110425dSMark Cave-Ayland     case HME_MIFI_FO:
472c110425dSMark Cave-Ayland         /* Detect start of MII command */
473c110425dSMark Cave-Ayland         if ((val & HME_MIF_FO_ST) >> HME_MIF_FO_ST_SHIFT
474c110425dSMark Cave-Ayland                 != MII_COMMAND_START) {
475c110425dSMark Cave-Ayland             val |= HME_MIF_FO_TALSB;
476c110425dSMark Cave-Ayland             break;
477c110425dSMark Cave-Ayland         }
478c110425dSMark Cave-Ayland 
479c110425dSMark Cave-Ayland         /* Internal phy only */
480c110425dSMark Cave-Ayland         if ((val & HME_MIF_FO_PHYAD) >> HME_MIF_FO_PHYAD_SHIFT
481c110425dSMark Cave-Ayland                 != HME_PHYAD_INTERNAL) {
482c110425dSMark Cave-Ayland             val |= HME_MIF_FO_TALSB;
483c110425dSMark Cave-Ayland             break;
484c110425dSMark Cave-Ayland         }
485c110425dSMark Cave-Ayland 
486c110425dSMark Cave-Ayland         cmd = (val & HME_MIF_FO_OPC) >> HME_MIF_FO_OPC_SHIFT;
487c110425dSMark Cave-Ayland         reg = (val & HME_MIF_FO_REGAD) >> HME_MIF_FO_REGAD_SHIFT;
488c110425dSMark Cave-Ayland         data = (val & HME_MIF_FO_DATA);
489c110425dSMark Cave-Ayland 
490c110425dSMark Cave-Ayland         switch (cmd) {
491c110425dSMark Cave-Ayland         case MII_COMMAND_WRITE:
492c110425dSMark Cave-Ayland             sunhme_mii_write(s, reg, data);
493c110425dSMark Cave-Ayland             break;
494c110425dSMark Cave-Ayland 
495c110425dSMark Cave-Ayland         case MII_COMMAND_READ:
496c110425dSMark Cave-Ayland             val &= ~HME_MIF_FO_DATA;
497c110425dSMark Cave-Ayland             val |= sunhme_mii_read(s, reg);
498c110425dSMark Cave-Ayland             break;
499c110425dSMark Cave-Ayland         }
500c110425dSMark Cave-Ayland 
501c110425dSMark Cave-Ayland         val |= HME_MIF_FO_TALSB;
502c110425dSMark Cave-Ayland         break;
503c110425dSMark Cave-Ayland     }
504c110425dSMark Cave-Ayland 
505c110425dSMark Cave-Ayland     s->mifregs[addr >> 2] = val;
506c110425dSMark Cave-Ayland }
507c110425dSMark Cave-Ayland 
508c110425dSMark Cave-Ayland static uint64_t sunhme_mif_read(void *opaque, hwaddr addr,
509c110425dSMark Cave-Ayland                              unsigned size)
510c110425dSMark Cave-Ayland {
511c110425dSMark Cave-Ayland     SunHMEState *s = SUNHME(opaque);
512c110425dSMark Cave-Ayland     uint64_t val;
513c110425dSMark Cave-Ayland 
514c110425dSMark Cave-Ayland     val = s->mifregs[addr >> 2];
515c110425dSMark Cave-Ayland 
516c110425dSMark Cave-Ayland     switch (addr) {
517c110425dSMark Cave-Ayland     case HME_MIFI_STAT:
518c110425dSMark Cave-Ayland         /* Autoclear MIF interrupt status */
519c110425dSMark Cave-Ayland         s->mifregs[HME_MIFI_STAT >> 2] = 0;
520c110425dSMark Cave-Ayland         sunhme_update_irq(s);
521c110425dSMark Cave-Ayland         break;
522c110425dSMark Cave-Ayland     }
523c110425dSMark Cave-Ayland 
524c110425dSMark Cave-Ayland     trace_sunhme_mif_read(addr, val);
525c110425dSMark Cave-Ayland 
526c110425dSMark Cave-Ayland     return val;
527c110425dSMark Cave-Ayland }
528c110425dSMark Cave-Ayland 
529c110425dSMark Cave-Ayland static const MemoryRegionOps sunhme_mif_ops = {
530c110425dSMark Cave-Ayland     .read = sunhme_mif_read,
531c110425dSMark Cave-Ayland     .write = sunhme_mif_write,
532c110425dSMark Cave-Ayland     .endianness = DEVICE_LITTLE_ENDIAN,
533c110425dSMark Cave-Ayland     .valid = {
534c110425dSMark Cave-Ayland         .min_access_size = 4,
535c110425dSMark Cave-Ayland         .max_access_size = 4,
536c110425dSMark Cave-Ayland     },
537c110425dSMark Cave-Ayland };
538c110425dSMark Cave-Ayland 
539c110425dSMark Cave-Ayland static void sunhme_transmit_frame(SunHMEState *s, uint8_t *buf, int size)
540c110425dSMark Cave-Ayland {
541c110425dSMark Cave-Ayland     qemu_send_packet(qemu_get_queue(s->nic), buf, size);
542c110425dSMark Cave-Ayland }
543c110425dSMark Cave-Ayland 
544c110425dSMark Cave-Ayland static inline int sunhme_get_tx_ring_count(SunHMEState *s)
545c110425dSMark Cave-Ayland {
546c110425dSMark Cave-Ayland     return (s->etxregs[HME_ETXI_RSIZE >> 2] + 1) << 4;
547c110425dSMark Cave-Ayland }
548c110425dSMark Cave-Ayland 
549c110425dSMark Cave-Ayland static inline int sunhme_get_tx_ring_nr(SunHMEState *s)
550c110425dSMark Cave-Ayland {
551c110425dSMark Cave-Ayland     return s->etxregs[HME_ETXI_RING >> 2] & HME_ETXI_RING_OFFSET;
552c110425dSMark Cave-Ayland }
553c110425dSMark Cave-Ayland 
554c110425dSMark Cave-Ayland static inline void sunhme_set_tx_ring_nr(SunHMEState *s, int i)
555c110425dSMark Cave-Ayland {
556c110425dSMark Cave-Ayland     uint32_t ring = s->etxregs[HME_ETXI_RING >> 2] & ~HME_ETXI_RING_OFFSET;
557c110425dSMark Cave-Ayland     ring |= i & HME_ETXI_RING_OFFSET;
558c110425dSMark Cave-Ayland 
559c110425dSMark Cave-Ayland     s->etxregs[HME_ETXI_RING >> 2] = ring;
560c110425dSMark Cave-Ayland }
561c110425dSMark Cave-Ayland 
562c110425dSMark Cave-Ayland static void sunhme_transmit(SunHMEState *s)
563c110425dSMark Cave-Ayland {
564c110425dSMark Cave-Ayland     PCIDevice *d = PCI_DEVICE(s);
565c110425dSMark Cave-Ayland     dma_addr_t tb, addr;
566c110425dSMark Cave-Ayland     uint32_t intstatus, status, buffer, sum = 0;
567c110425dSMark Cave-Ayland     int cr, nr, len, xmit_pos, csum_offset = 0, csum_stuff_offset = 0;
568c110425dSMark Cave-Ayland     uint16_t csum = 0;
569c110425dSMark Cave-Ayland     uint8_t xmit_buffer[HME_FIFO_SIZE];
570c110425dSMark Cave-Ayland 
571c110425dSMark Cave-Ayland     tb = s->etxregs[HME_ETXI_RING >> 2] & HME_ETXI_RING_ADDR;
572c110425dSMark Cave-Ayland     nr = sunhme_get_tx_ring_count(s);
573c110425dSMark Cave-Ayland     cr = sunhme_get_tx_ring_nr(s);
574c110425dSMark Cave-Ayland 
575c110425dSMark Cave-Ayland     pci_dma_read(d, tb + cr * HME_DESC_SIZE, &status, 4);
576c110425dSMark Cave-Ayland     pci_dma_read(d, tb + cr * HME_DESC_SIZE + 4, &buffer, 4);
577c110425dSMark Cave-Ayland 
578c110425dSMark Cave-Ayland     xmit_pos = 0;
579c110425dSMark Cave-Ayland     while (status & HME_XD_OWN) {
580c110425dSMark Cave-Ayland         trace_sunhme_tx_desc(buffer, status, cr, nr);
581c110425dSMark Cave-Ayland 
582c110425dSMark Cave-Ayland         /* Copy data into transmit buffer */
583c110425dSMark Cave-Ayland         addr = buffer;
584c110425dSMark Cave-Ayland         len = status & HME_XD_TXLENMSK;
585c110425dSMark Cave-Ayland 
586c110425dSMark Cave-Ayland         if (xmit_pos + len > HME_FIFO_SIZE) {
587c110425dSMark Cave-Ayland             len = HME_FIFO_SIZE - xmit_pos;
588c110425dSMark Cave-Ayland         }
589c110425dSMark Cave-Ayland 
590c110425dSMark Cave-Ayland         pci_dma_read(d, addr, &xmit_buffer[xmit_pos], len);
591c110425dSMark Cave-Ayland         xmit_pos += len;
592c110425dSMark Cave-Ayland 
593c110425dSMark Cave-Ayland         /* Detect start of packet for TX checksum */
594c110425dSMark Cave-Ayland         if (status & HME_XD_SOP) {
595c110425dSMark Cave-Ayland             sum = 0;
596c110425dSMark Cave-Ayland             csum_offset = (status & HME_XD_TXCSSTART) >> HME_XD_TXCSSTARTSHIFT;
597c110425dSMark Cave-Ayland             csum_stuff_offset = (status & HME_XD_TXCSSTUFF) >>
598c110425dSMark Cave-Ayland                                 HME_XD_TXCSSTUFFSHIFT;
599c110425dSMark Cave-Ayland         }
600c110425dSMark Cave-Ayland 
601c110425dSMark Cave-Ayland         if (status & HME_XD_TXCKSUM) {
602c110425dSMark Cave-Ayland             /* Only start calculation from csum_offset */
603c110425dSMark Cave-Ayland             if (xmit_pos - len <= csum_offset && xmit_pos > csum_offset) {
604c110425dSMark Cave-Ayland                 sum += net_checksum_add(xmit_pos - csum_offset,
605c110425dSMark Cave-Ayland                                         xmit_buffer + csum_offset);
606c110425dSMark Cave-Ayland                 trace_sunhme_tx_xsum_add(csum_offset, xmit_pos - csum_offset);
607c110425dSMark Cave-Ayland             } else {
608c110425dSMark Cave-Ayland                 sum += net_checksum_add(len, xmit_buffer + xmit_pos - len);
609c110425dSMark Cave-Ayland                 trace_sunhme_tx_xsum_add(xmit_pos - len, len);
610c110425dSMark Cave-Ayland             }
611c110425dSMark Cave-Ayland         }
612c110425dSMark Cave-Ayland 
613c110425dSMark Cave-Ayland         /* Detect end of packet for TX checksum */
614c110425dSMark Cave-Ayland         if (status & HME_XD_EOP) {
615c110425dSMark Cave-Ayland             /* Stuff the checksum if required */
616c110425dSMark Cave-Ayland             if (status & HME_XD_TXCKSUM) {
617c110425dSMark Cave-Ayland                 csum = net_checksum_finish(sum);
618c110425dSMark Cave-Ayland                 stw_be_p(xmit_buffer + csum_stuff_offset, csum);
619c110425dSMark Cave-Ayland                 trace_sunhme_tx_xsum_stuff(csum, csum_stuff_offset);
620c110425dSMark Cave-Ayland             }
621c110425dSMark Cave-Ayland 
622c110425dSMark Cave-Ayland             if (s->macregs[HME_MACI_TXCFG >> 2] & HME_MAC_TXCFG_ENABLE) {
623c110425dSMark Cave-Ayland                 sunhme_transmit_frame(s, xmit_buffer, xmit_pos);
624c110425dSMark Cave-Ayland                 trace_sunhme_tx_done(xmit_pos);
625c110425dSMark Cave-Ayland             }
626c110425dSMark Cave-Ayland         }
627c110425dSMark Cave-Ayland 
628c110425dSMark Cave-Ayland         /* Update status */
629c110425dSMark Cave-Ayland         status &= ~HME_XD_OWN;
630c110425dSMark Cave-Ayland         pci_dma_write(d, tb + cr * HME_DESC_SIZE, &status, 4);
631c110425dSMark Cave-Ayland 
632c110425dSMark Cave-Ayland         /* Move onto next descriptor */
633c110425dSMark Cave-Ayland         cr++;
634c110425dSMark Cave-Ayland         if (cr >= nr) {
635c110425dSMark Cave-Ayland             cr = 0;
636c110425dSMark Cave-Ayland         }
637c110425dSMark Cave-Ayland         sunhme_set_tx_ring_nr(s, cr);
638c110425dSMark Cave-Ayland 
639c110425dSMark Cave-Ayland         pci_dma_read(d, tb + cr * HME_DESC_SIZE, &status, 4);
640c110425dSMark Cave-Ayland         pci_dma_read(d, tb + cr * HME_DESC_SIZE + 4, &buffer, 4);
641c110425dSMark Cave-Ayland 
642c110425dSMark Cave-Ayland         /* Indicate TX complete */
643c110425dSMark Cave-Ayland         intstatus = s->sebregs[HME_SEBI_STAT >> 2];
644c110425dSMark Cave-Ayland         intstatus |= HME_SEB_STAT_HOSTTOTX;
645c110425dSMark Cave-Ayland         s->sebregs[HME_SEBI_STAT >> 2] = intstatus;
646c110425dSMark Cave-Ayland 
647c110425dSMark Cave-Ayland         /* Autoclear TX pending */
648c110425dSMark Cave-Ayland         s->etxregs[HME_ETXI_PENDING >> 2] = 0;
649c110425dSMark Cave-Ayland 
650c110425dSMark Cave-Ayland         sunhme_update_irq(s);
651c110425dSMark Cave-Ayland     }
652c110425dSMark Cave-Ayland 
653c110425dSMark Cave-Ayland     /* TX FIFO now clear */
654c110425dSMark Cave-Ayland     intstatus = s->sebregs[HME_SEBI_STAT >> 2];
655c110425dSMark Cave-Ayland     intstatus |= HME_SEB_STAT_TXALL;
656c110425dSMark Cave-Ayland     s->sebregs[HME_SEBI_STAT >> 2] = intstatus;
657c110425dSMark Cave-Ayland     sunhme_update_irq(s);
658c110425dSMark Cave-Ayland }
659c110425dSMark Cave-Ayland 
660b8c4b67eSPhilippe Mathieu-Daudé static bool sunhme_can_receive(NetClientState *nc)
661c110425dSMark Cave-Ayland {
662c110425dSMark Cave-Ayland     SunHMEState *s = qemu_get_nic_opaque(nc);
663c110425dSMark Cave-Ayland 
664b8c4b67eSPhilippe Mathieu-Daudé     return !!(s->macregs[HME_MACI_RXCFG >> 2] & HME_MAC_RXCFG_ENABLE);
665c110425dSMark Cave-Ayland }
666c110425dSMark Cave-Ayland 
667c110425dSMark Cave-Ayland static void sunhme_link_status_changed(NetClientState *nc)
668c110425dSMark Cave-Ayland {
669c110425dSMark Cave-Ayland     SunHMEState *s = qemu_get_nic_opaque(nc);
670c110425dSMark Cave-Ayland 
671c110425dSMark Cave-Ayland     if (nc->link_down) {
672c110425dSMark Cave-Ayland         s->miiregs[MII_ANLPAR] &= ~MII_ANLPAR_TXFD;
673c110425dSMark Cave-Ayland         s->miiregs[MII_BMSR] &= ~MII_BMSR_LINK_ST;
674c110425dSMark Cave-Ayland     } else {
675c110425dSMark Cave-Ayland         s->miiregs[MII_ANLPAR] |= MII_ANLPAR_TXFD;
676c110425dSMark Cave-Ayland         s->miiregs[MII_BMSR] |= MII_BMSR_LINK_ST;
677c110425dSMark Cave-Ayland     }
678c110425dSMark Cave-Ayland 
679c110425dSMark Cave-Ayland     /* Exact bits unknown */
680c110425dSMark Cave-Ayland     s->mifregs[HME_MIFI_STAT >> 2] = 0xffff;
681c110425dSMark Cave-Ayland     sunhme_update_irq(s);
682c110425dSMark Cave-Ayland }
683c110425dSMark Cave-Ayland 
684c110425dSMark Cave-Ayland static inline int sunhme_get_rx_ring_count(SunHMEState *s)
685c110425dSMark Cave-Ayland {
686c110425dSMark Cave-Ayland     uint32_t rings = (s->erxregs[HME_ERXI_CFG >> 2] & HME_ERX_CFG_RINGSIZE)
687c110425dSMark Cave-Ayland                       >> HME_ERX_CFG_RINGSIZE_SHIFT;
688c110425dSMark Cave-Ayland 
689c110425dSMark Cave-Ayland     switch (rings) {
690c110425dSMark Cave-Ayland     case 0:
691c110425dSMark Cave-Ayland         return 32;
692c110425dSMark Cave-Ayland     case 1:
693c110425dSMark Cave-Ayland         return 64;
694c110425dSMark Cave-Ayland     case 2:
695c110425dSMark Cave-Ayland         return 128;
696c110425dSMark Cave-Ayland     case 3:
697c110425dSMark Cave-Ayland         return 256;
698c110425dSMark Cave-Ayland     }
699c110425dSMark Cave-Ayland 
700c110425dSMark Cave-Ayland     return 0;
701c110425dSMark Cave-Ayland }
702c110425dSMark Cave-Ayland 
703c110425dSMark Cave-Ayland static inline int sunhme_get_rx_ring_nr(SunHMEState *s)
704c110425dSMark Cave-Ayland {
705c110425dSMark Cave-Ayland     return s->erxregs[HME_ERXI_RING >> 2] & HME_ERXI_RING_OFFSET;
706c110425dSMark Cave-Ayland }
707c110425dSMark Cave-Ayland 
708c110425dSMark Cave-Ayland static inline void sunhme_set_rx_ring_nr(SunHMEState *s, int i)
709c110425dSMark Cave-Ayland {
710c110425dSMark Cave-Ayland     uint32_t ring = s->erxregs[HME_ERXI_RING >> 2] & ~HME_ERXI_RING_OFFSET;
711c110425dSMark Cave-Ayland     ring |= i & HME_ERXI_RING_OFFSET;
712c110425dSMark Cave-Ayland 
713c110425dSMark Cave-Ayland     s->erxregs[HME_ERXI_RING >> 2] = ring;
714c110425dSMark Cave-Ayland }
715c110425dSMark Cave-Ayland 
716c110425dSMark Cave-Ayland static ssize_t sunhme_receive(NetClientState *nc, const uint8_t *buf,
717c110425dSMark Cave-Ayland                               size_t size)
718c110425dSMark Cave-Ayland {
719c110425dSMark Cave-Ayland     SunHMEState *s = qemu_get_nic_opaque(nc);
720c110425dSMark Cave-Ayland     PCIDevice *d = PCI_DEVICE(s);
721c110425dSMark Cave-Ayland     dma_addr_t rb, addr;
722c110425dSMark Cave-Ayland     uint32_t intstatus, status, buffer, buffersize, sum;
723c110425dSMark Cave-Ayland     uint16_t csum;
724c110425dSMark Cave-Ayland     int nr, cr, len, rxoffset, csum_offset;
725c110425dSMark Cave-Ayland 
726c110425dSMark Cave-Ayland     trace_sunhme_rx_incoming(size);
727c110425dSMark Cave-Ayland 
728c110425dSMark Cave-Ayland     /* Do nothing if MAC RX disabled */
729c110425dSMark Cave-Ayland     if (!(s->macregs[HME_MACI_RXCFG >> 2] & HME_MAC_RXCFG_ENABLE)) {
73029df47a5SMark Cave-Ayland         return 0;
731c110425dSMark Cave-Ayland     }
732c110425dSMark Cave-Ayland 
733c110425dSMark Cave-Ayland     trace_sunhme_rx_filter_destmac(buf[0], buf[1], buf[2],
734c110425dSMark Cave-Ayland                                    buf[3], buf[4], buf[5]);
735c110425dSMark Cave-Ayland 
736c110425dSMark Cave-Ayland     /* Check destination MAC address */
737c110425dSMark Cave-Ayland     if (!(s->macregs[HME_MACI_RXCFG >> 2] & HME_MAC_RXCFG_PMISC)) {
738c110425dSMark Cave-Ayland         /* Try and match local MAC address */
739c110425dSMark Cave-Ayland         if (((s->macregs[HME_MACI_MACADDR0 >> 2] & 0xff00) >> 8) == buf[0] &&
740c110425dSMark Cave-Ayland              (s->macregs[HME_MACI_MACADDR0 >> 2] & 0xff) == buf[1] &&
741c110425dSMark Cave-Ayland             ((s->macregs[HME_MACI_MACADDR1 >> 2] & 0xff00) >> 8) == buf[2] &&
742c110425dSMark Cave-Ayland              (s->macregs[HME_MACI_MACADDR1 >> 2] & 0xff) == buf[3] &&
743c110425dSMark Cave-Ayland             ((s->macregs[HME_MACI_MACADDR2 >> 2] & 0xff00) >> 8) == buf[4] &&
744c110425dSMark Cave-Ayland              (s->macregs[HME_MACI_MACADDR2 >> 2] & 0xff) == buf[5]) {
745c110425dSMark Cave-Ayland             /* Matched local MAC address */
746c110425dSMark Cave-Ayland             trace_sunhme_rx_filter_local_match();
747c110425dSMark Cave-Ayland         } else if (buf[0] == 0xff && buf[1] == 0xff && buf[2] == 0xff &&
748c110425dSMark Cave-Ayland                    buf[3] == 0xff && buf[4] == 0xff && buf[5] == 0xff) {
749c110425dSMark Cave-Ayland             /* Matched broadcast address */
750c110425dSMark Cave-Ayland             trace_sunhme_rx_filter_bcast_match();
751c110425dSMark Cave-Ayland         } else if (s->macregs[HME_MACI_RXCFG >> 2] & HME_MAC_RXCFG_HENABLE) {
752c110425dSMark Cave-Ayland             /* Didn't match local address, check hash filter */
753a89a6b05SMark Cave-Ayland             int mcast_idx = net_crc32_le(buf, ETH_ALEN) >> 26;
754c110425dSMark Cave-Ayland             if (!(s->macregs[(HME_MACI_HASHTAB0 >> 2) - (mcast_idx >> 4)] &
755c110425dSMark Cave-Ayland                     (1 << (mcast_idx & 0xf)))) {
756c110425dSMark Cave-Ayland                 /* Didn't match hash filter */
757c110425dSMark Cave-Ayland                 trace_sunhme_rx_filter_hash_nomatch();
758c110425dSMark Cave-Ayland                 trace_sunhme_rx_filter_reject();
75929df47a5SMark Cave-Ayland                 return -1;
760c110425dSMark Cave-Ayland             } else {
761c110425dSMark Cave-Ayland                 trace_sunhme_rx_filter_hash_match();
762c110425dSMark Cave-Ayland             }
763c110425dSMark Cave-Ayland         } else {
764c110425dSMark Cave-Ayland             /* Not for us */
765c110425dSMark Cave-Ayland             trace_sunhme_rx_filter_reject();
76629df47a5SMark Cave-Ayland             return -1;
767c110425dSMark Cave-Ayland         }
768c110425dSMark Cave-Ayland     } else {
769c110425dSMark Cave-Ayland         trace_sunhme_rx_filter_promisc_match();
770c110425dSMark Cave-Ayland     }
771c110425dSMark Cave-Ayland 
772c110425dSMark Cave-Ayland     trace_sunhme_rx_filter_accept();
773c110425dSMark Cave-Ayland 
774c110425dSMark Cave-Ayland     rb = s->erxregs[HME_ERXI_RING >> 2] & HME_ERXI_RING_ADDR;
775c110425dSMark Cave-Ayland     nr = sunhme_get_rx_ring_count(s);
776c110425dSMark Cave-Ayland     cr = sunhme_get_rx_ring_nr(s);
777c110425dSMark Cave-Ayland 
778c110425dSMark Cave-Ayland     pci_dma_read(d, rb + cr * HME_DESC_SIZE, &status, 4);
779c110425dSMark Cave-Ayland     pci_dma_read(d, rb + cr * HME_DESC_SIZE + 4, &buffer, 4);
780c110425dSMark Cave-Ayland 
78109340f49SMark Cave-Ayland     /* If we don't own the current descriptor then indicate overflow error */
78209340f49SMark Cave-Ayland     if (!(status & HME_XD_OWN)) {
78309340f49SMark Cave-Ayland         s->sebregs[HME_SEBI_STAT >> 2] |= HME_SEB_STAT_NORXD;
78409340f49SMark Cave-Ayland         sunhme_update_irq(s);
78509340f49SMark Cave-Ayland         trace_sunhme_rx_norxd();
78609340f49SMark Cave-Ayland         return -1;
78709340f49SMark Cave-Ayland     }
78809340f49SMark Cave-Ayland 
789c110425dSMark Cave-Ayland     rxoffset = (s->erxregs[HME_ERXI_CFG >> 2] & HME_ERX_CFG_BYTEOFFSET) >>
790c110425dSMark Cave-Ayland                 HME_ERX_CFG_BYTEOFFSET_SHIFT;
791c110425dSMark Cave-Ayland 
792c110425dSMark Cave-Ayland     addr = buffer + rxoffset;
793c110425dSMark Cave-Ayland     buffersize = (status & HME_XD_RXLENMSK) >> HME_XD_RXLENSHIFT;
794c110425dSMark Cave-Ayland 
795c110425dSMark Cave-Ayland     /* Detect receive overflow */
796c110425dSMark Cave-Ayland     len = size;
797c110425dSMark Cave-Ayland     if (size > buffersize) {
798c110425dSMark Cave-Ayland         status |= HME_XD_OFL;
799c110425dSMark Cave-Ayland         len = buffersize;
800c110425dSMark Cave-Ayland     }
801c110425dSMark Cave-Ayland 
802c110425dSMark Cave-Ayland     pci_dma_write(d, addr, buf, len);
803c110425dSMark Cave-Ayland 
804c110425dSMark Cave-Ayland     trace_sunhme_rx_desc(buffer, rxoffset, status, len, cr, nr);
805c110425dSMark Cave-Ayland 
806c110425dSMark Cave-Ayland     /* Calculate the receive checksum */
807c110425dSMark Cave-Ayland     csum_offset = (s->erxregs[HME_ERXI_CFG >> 2] & HME_ERX_CFG_CSUMSTART) >>
808c110425dSMark Cave-Ayland                   HME_ERX_CFG_CSUMSHIFT << 1;
809c110425dSMark Cave-Ayland     sum = 0;
810c110425dSMark Cave-Ayland     sum += net_checksum_add(len - csum_offset, (uint8_t *)buf + csum_offset);
811c110425dSMark Cave-Ayland     csum = net_checksum_finish(sum);
812c110425dSMark Cave-Ayland 
813c110425dSMark Cave-Ayland     trace_sunhme_rx_xsum_calc(csum);
814c110425dSMark Cave-Ayland 
815c110425dSMark Cave-Ayland     /* Update status */
816c110425dSMark Cave-Ayland     status &= ~HME_XD_OWN;
817c110425dSMark Cave-Ayland     status &= ~HME_XD_RXLENMSK;
818c110425dSMark Cave-Ayland     status |= len << HME_XD_RXLENSHIFT;
819c110425dSMark Cave-Ayland     status &= ~HME_XD_RXCKSUM;
820c110425dSMark Cave-Ayland     status |= csum;
821c110425dSMark Cave-Ayland 
822c110425dSMark Cave-Ayland     pci_dma_write(d, rb + cr * HME_DESC_SIZE, &status, 4);
823c110425dSMark Cave-Ayland 
824c110425dSMark Cave-Ayland     cr++;
825c110425dSMark Cave-Ayland     if (cr >= nr) {
826c110425dSMark Cave-Ayland         cr = 0;
827c110425dSMark Cave-Ayland     }
828c110425dSMark Cave-Ayland 
829c110425dSMark Cave-Ayland     sunhme_set_rx_ring_nr(s, cr);
830c110425dSMark Cave-Ayland 
831c110425dSMark Cave-Ayland     /* Indicate RX complete */
832c110425dSMark Cave-Ayland     intstatus = s->sebregs[HME_SEBI_STAT >> 2];
833c110425dSMark Cave-Ayland     intstatus |= HME_SEB_STAT_RXTOHOST;
834c110425dSMark Cave-Ayland     s->sebregs[HME_SEBI_STAT >> 2] = intstatus;
835c110425dSMark Cave-Ayland 
836c110425dSMark Cave-Ayland     sunhme_update_irq(s);
837c110425dSMark Cave-Ayland 
838c110425dSMark Cave-Ayland     return len;
839c110425dSMark Cave-Ayland }
840c110425dSMark Cave-Ayland 
841c110425dSMark Cave-Ayland static NetClientInfo net_sunhme_info = {
842c110425dSMark Cave-Ayland     .type = NET_CLIENT_DRIVER_NIC,
843c110425dSMark Cave-Ayland     .size = sizeof(NICState),
844c110425dSMark Cave-Ayland     .can_receive = sunhme_can_receive,
845c110425dSMark Cave-Ayland     .receive = sunhme_receive,
846c110425dSMark Cave-Ayland     .link_status_changed = sunhme_link_status_changed,
847c110425dSMark Cave-Ayland };
848c110425dSMark Cave-Ayland 
849c110425dSMark Cave-Ayland static void sunhme_realize(PCIDevice *pci_dev, Error **errp)
850c110425dSMark Cave-Ayland {
851c110425dSMark Cave-Ayland     SunHMEState *s = SUNHME(pci_dev);
852c110425dSMark Cave-Ayland     DeviceState *d = DEVICE(pci_dev);
853c110425dSMark Cave-Ayland     uint8_t *pci_conf;
854c110425dSMark Cave-Ayland 
855c110425dSMark Cave-Ayland     pci_conf = pci_dev->config;
856c110425dSMark Cave-Ayland     pci_conf[PCI_INTERRUPT_PIN] = 1;    /* interrupt pin A */
857c110425dSMark Cave-Ayland 
858c110425dSMark Cave-Ayland     memory_region_init(&s->hme, OBJECT(pci_dev), "sunhme", HME_REG_SIZE);
859c110425dSMark Cave-Ayland     pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->hme);
860c110425dSMark Cave-Ayland 
861c110425dSMark Cave-Ayland     memory_region_init_io(&s->sebreg, OBJECT(pci_dev), &sunhme_seb_ops, s,
862c110425dSMark Cave-Ayland                           "sunhme.seb", HME_SEB_REG_SIZE);
863c110425dSMark Cave-Ayland     memory_region_add_subregion(&s->hme, 0, &s->sebreg);
864c110425dSMark Cave-Ayland 
865c110425dSMark Cave-Ayland     memory_region_init_io(&s->etxreg, OBJECT(pci_dev), &sunhme_etx_ops, s,
866c110425dSMark Cave-Ayland                           "sunhme.etx", HME_ETX_REG_SIZE);
867c110425dSMark Cave-Ayland     memory_region_add_subregion(&s->hme, 0x2000, &s->etxreg);
868c110425dSMark Cave-Ayland 
869c110425dSMark Cave-Ayland     memory_region_init_io(&s->erxreg, OBJECT(pci_dev), &sunhme_erx_ops, s,
870c110425dSMark Cave-Ayland                           "sunhme.erx", HME_ERX_REG_SIZE);
871c110425dSMark Cave-Ayland     memory_region_add_subregion(&s->hme, 0x4000, &s->erxreg);
872c110425dSMark Cave-Ayland 
873c110425dSMark Cave-Ayland     memory_region_init_io(&s->macreg, OBJECT(pci_dev), &sunhme_mac_ops, s,
874c110425dSMark Cave-Ayland                           "sunhme.mac", HME_MAC_REG_SIZE);
875c110425dSMark Cave-Ayland     memory_region_add_subregion(&s->hme, 0x6000, &s->macreg);
876c110425dSMark Cave-Ayland 
877c110425dSMark Cave-Ayland     memory_region_init_io(&s->mifreg, OBJECT(pci_dev), &sunhme_mif_ops, s,
878c110425dSMark Cave-Ayland                           "sunhme.mif", HME_MIF_REG_SIZE);
879c110425dSMark Cave-Ayland     memory_region_add_subregion(&s->hme, 0x7000, &s->mifreg);
880c110425dSMark Cave-Ayland 
881c110425dSMark Cave-Ayland     qemu_macaddr_default_if_unset(&s->conf.macaddr);
882c110425dSMark Cave-Ayland     s->nic = qemu_new_nic(&net_sunhme_info, &s->conf,
8837d0fefdfSAkihiko Odaki                           object_get_typename(OBJECT(d)), d->id,
8847d0fefdfSAkihiko Odaki                           &d->mem_reentrancy_guard, s);
885c110425dSMark Cave-Ayland     qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a);
886c110425dSMark Cave-Ayland }
887c110425dSMark Cave-Ayland 
888c110425dSMark Cave-Ayland static void sunhme_instance_init(Object *obj)
889c110425dSMark Cave-Ayland {
890c110425dSMark Cave-Ayland     SunHMEState *s = SUNHME(obj);
891c110425dSMark Cave-Ayland 
892c110425dSMark Cave-Ayland     device_add_bootindex_property(obj, &s->conf.bootindex,
893c110425dSMark Cave-Ayland                                   "bootindex", "/ethernet-phy@0",
89440c2281cSMarkus Armbruster                                   DEVICE(obj));
895c110425dSMark Cave-Ayland }
896c110425dSMark Cave-Ayland 
897c110425dSMark Cave-Ayland static void sunhme_reset(DeviceState *ds)
898c110425dSMark Cave-Ayland {
899c110425dSMark Cave-Ayland     SunHMEState *s = SUNHME(ds);
900c110425dSMark Cave-Ayland 
901c110425dSMark Cave-Ayland     /* Configure internal transceiver */
902c110425dSMark Cave-Ayland     s->mifregs[HME_MIFI_CFG >> 2] |= HME_MIF_CFG_MDI0;
903c110425dSMark Cave-Ayland 
9042431f4f1SMichael Tokarev     /* Advertise auto, 100Mbps FD */
905c110425dSMark Cave-Ayland     s->miiregs[MII_ANAR] = MII_ANAR_TXFD;
906c110425dSMark Cave-Ayland     s->miiregs[MII_BMSR] = MII_BMSR_AUTONEG | MII_BMSR_100TX_FD |
907c110425dSMark Cave-Ayland                            MII_BMSR_AN_COMP;
908c110425dSMark Cave-Ayland 
909c110425dSMark Cave-Ayland     if (!qemu_get_queue(s->nic)->link_down) {
910c110425dSMark Cave-Ayland         s->miiregs[MII_ANLPAR] |= MII_ANLPAR_TXFD;
911c110425dSMark Cave-Ayland         s->miiregs[MII_BMSR] |= MII_BMSR_LINK_ST;
912c110425dSMark Cave-Ayland     }
913c110425dSMark Cave-Ayland 
914c110425dSMark Cave-Ayland     /* Set manufacturer */
915c110425dSMark Cave-Ayland     s->miiregs[MII_PHYID1] = DP83840_PHYID1;
916c110425dSMark Cave-Ayland     s->miiregs[MII_PHYID2] = DP83840_PHYID2;
917c110425dSMark Cave-Ayland 
918c110425dSMark Cave-Ayland     /* Configure default interrupt mask */
919c110425dSMark Cave-Ayland     s->mifregs[HME_MIFI_IMASK >> 2] = 0xffff;
920c110425dSMark Cave-Ayland     s->sebregs[HME_SEBI_IMASK >> 2] = 0xff7fffff;
921c110425dSMark Cave-Ayland }
922c110425dSMark Cave-Ayland 
923c110425dSMark Cave-Ayland static const VMStateDescription vmstate_hme = {
924c110425dSMark Cave-Ayland     .name = "sunhme",
925c110425dSMark Cave-Ayland     .version_id = 0,
926c110425dSMark Cave-Ayland     .minimum_version_id = 0,
9271de81b42SRichard Henderson     .fields = (const VMStateField[]) {
928c110425dSMark Cave-Ayland         VMSTATE_PCI_DEVICE(parent_obj, SunHMEState),
929c110425dSMark Cave-Ayland         VMSTATE_MACADDR(conf.macaddr, SunHMEState),
930c110425dSMark Cave-Ayland         VMSTATE_UINT32_ARRAY(sebregs, SunHMEState, (HME_SEB_REG_SIZE >> 2)),
931c110425dSMark Cave-Ayland         VMSTATE_UINT32_ARRAY(etxregs, SunHMEState, (HME_ETX_REG_SIZE >> 2)),
932c110425dSMark Cave-Ayland         VMSTATE_UINT32_ARRAY(erxregs, SunHMEState, (HME_ERX_REG_SIZE >> 2)),
933c110425dSMark Cave-Ayland         VMSTATE_UINT32_ARRAY(macregs, SunHMEState, (HME_MAC_REG_SIZE >> 2)),
934c110425dSMark Cave-Ayland         VMSTATE_UINT32_ARRAY(mifregs, SunHMEState, (HME_MIF_REG_SIZE >> 2)),
935c110425dSMark Cave-Ayland         VMSTATE_UINT16_ARRAY(miiregs, SunHMEState, HME_MII_REGS_SIZE),
936c110425dSMark Cave-Ayland         VMSTATE_END_OF_LIST()
937c110425dSMark Cave-Ayland     }
938c110425dSMark Cave-Ayland };
939c110425dSMark Cave-Ayland 
94012d1a768SPhilippe Mathieu-Daudé static void sunhme_class_init(ObjectClass *klass, const void *data)
941c110425dSMark Cave-Ayland {
942c110425dSMark Cave-Ayland     DeviceClass *dc = DEVICE_CLASS(klass);
943c110425dSMark Cave-Ayland     PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
944c110425dSMark Cave-Ayland 
945c110425dSMark Cave-Ayland     k->realize = sunhme_realize;
946c110425dSMark Cave-Ayland     k->vendor_id = PCI_VENDOR_ID_SUN;
947c110425dSMark Cave-Ayland     k->device_id = PCI_DEVICE_ID_SUN_HME;
948c110425dSMark Cave-Ayland     k->class_id = PCI_CLASS_NETWORK_ETHERNET;
949c110425dSMark Cave-Ayland     dc->vmsd = &vmstate_hme;
950e3d08143SPeter Maydell     device_class_set_legacy_reset(dc, sunhme_reset);
9514f67d30bSMarc-André Lureau     device_class_set_props(dc, sunhme_properties);
952c110425dSMark Cave-Ayland     set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
953c110425dSMark Cave-Ayland }
954c110425dSMark Cave-Ayland 
955c110425dSMark Cave-Ayland static const TypeInfo sunhme_info = {
956c110425dSMark Cave-Ayland     .name          = TYPE_SUNHME,
957c110425dSMark Cave-Ayland     .parent        = TYPE_PCI_DEVICE,
958c110425dSMark Cave-Ayland     .class_init    = sunhme_class_init,
959c110425dSMark Cave-Ayland     .instance_size = sizeof(SunHMEState),
960c110425dSMark Cave-Ayland     .instance_init = sunhme_instance_init,
961*2cd09e47SPhilippe Mathieu-Daudé     .interfaces = (const InterfaceInfo[]) {
962fd3b02c8SEduardo Habkost         { INTERFACE_CONVENTIONAL_PCI_DEVICE },
963fd3b02c8SEduardo Habkost         { }
964fd3b02c8SEduardo Habkost     }
965c110425dSMark Cave-Ayland };
966c110425dSMark Cave-Ayland 
967c110425dSMark Cave-Ayland static void sunhme_register_types(void)
968c110425dSMark Cave-Ayland {
969c110425dSMark Cave-Ayland     type_register_static(&sunhme_info);
970c110425dSMark Cave-Ayland }
971c110425dSMark Cave-Ayland 
972c110425dSMark Cave-Ayland type_init(sunhme_register_types)
973