xref: /qemu/hw/net/e1000.c (revision 6a2acedb19221ddf5e6fd3fb3590ba636aa21007)
1 /*
2  * QEMU e1000 emulation
3  *
4  * Software developer's manual:
5  * http://download.intel.com/design/network/manuals/8254x_GBe_SDM.pdf
6  *
7  * Nir Peleg, Tutis Systems Ltd. for Qumranet Inc.
8  * Copyright (c) 2008 Qumranet
9  * Based on work done by:
10  * Copyright (c) 2007 Dan Aloni
11  * Copyright (c) 2004 Antony T Curtis
12  *
13  * This library is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU Lesser General Public
15  * License as published by the Free Software Foundation; either
16  * version 2 of the License, or (at your option) any later version.
17  *
18  * This library is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
21  * Lesser General Public License for more details.
22  *
23  * You should have received a copy of the GNU Lesser General Public
24  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
25  */
26 
27 
28 #include "hw/hw.h"
29 #include "hw/pci/pci.h"
30 #include "net/net.h"
31 #include "net/checksum.h"
32 #include "hw/loader.h"
33 #include "sysemu/sysemu.h"
34 #include "sysemu/dma.h"
35 #include "qemu/iov.h"
36 
37 #include "e1000_regs.h"
38 
39 #define E1000_DEBUG
40 
41 #ifdef E1000_DEBUG
42 enum {
43     DEBUG_GENERAL,	DEBUG_IO,	DEBUG_MMIO,	DEBUG_INTERRUPT,
44     DEBUG_RX,		DEBUG_TX,	DEBUG_MDIC,	DEBUG_EEPROM,
45     DEBUG_UNKNOWN,	DEBUG_TXSUM,	DEBUG_TXERR,	DEBUG_RXERR,
46     DEBUG_RXFILTER,     DEBUG_PHY,      DEBUG_NOTYET,
47 };
48 #define DBGBIT(x)	(1<<DEBUG_##x)
49 static int debugflags = DBGBIT(TXERR) | DBGBIT(GENERAL);
50 
51 #define	DBGOUT(what, fmt, ...) do { \
52     if (debugflags & DBGBIT(what)) \
53         fprintf(stderr, "e1000: " fmt, ## __VA_ARGS__); \
54     } while (0)
55 #else
56 #define	DBGOUT(what, fmt, ...) do {} while (0)
57 #endif
58 
59 #define IOPORT_SIZE       0x40
60 #define PNPMMIO_SIZE      0x20000
61 #define MIN_BUF_SIZE      60 /* Min. octets in an ethernet frame sans FCS */
62 
63 /* this is the size past which hardware will drop packets when setting LPE=0 */
64 #define MAXIMUM_ETHERNET_VLAN_SIZE 1522
65 /* this is the size past which hardware will drop packets when setting LPE=1 */
66 #define MAXIMUM_ETHERNET_LPE_SIZE 16384
67 
68 #define MAXIMUM_ETHERNET_HDR_LEN (14+4)
69 
70 /*
71  * HW models:
72  *  E1000_DEV_ID_82540EM works with Windows, Linux, and OS X <= 10.8
73  *  E1000_DEV_ID_82544GC_COPPER appears to work; not well tested
74  *  E1000_DEV_ID_82545EM_COPPER works with Linux and OS X >= 10.6
75  *  Others never tested
76  */
77 
78 typedef struct E1000State_st {
79     /*< private >*/
80     PCIDevice parent_obj;
81     /*< public >*/
82 
83     NICState *nic;
84     NICConf conf;
85     MemoryRegion mmio;
86     MemoryRegion io;
87 
88     uint32_t mac_reg[0x8000];
89     uint16_t phy_reg[0x20];
90     uint16_t eeprom_data[64];
91 
92     uint32_t rxbuf_size;
93     uint32_t rxbuf_min_shift;
94     struct e1000_tx {
95         unsigned char header[256];
96         unsigned char vlan_header[4];
97         /* Fields vlan and data must not be reordered or separated. */
98         unsigned char vlan[4];
99         unsigned char data[0x10000];
100         uint16_t size;
101         unsigned char sum_needed;
102         unsigned char vlan_needed;
103         uint8_t ipcss;
104         uint8_t ipcso;
105         uint16_t ipcse;
106         uint8_t tucss;
107         uint8_t tucso;
108         uint16_t tucse;
109         uint8_t hdr_len;
110         uint16_t mss;
111         uint32_t paylen;
112         uint16_t tso_frames;
113         char tse;
114         int8_t ip;
115         int8_t tcp;
116         char cptse;     // current packet tse bit
117     } tx;
118 
119     struct {
120         uint32_t val_in;	// shifted in from guest driver
121         uint16_t bitnum_in;
122         uint16_t bitnum_out;
123         uint16_t reading;
124         uint32_t old_eecd;
125     } eecd_state;
126 
127     QEMUTimer *autoneg_timer;
128 
129     QEMUTimer *mit_timer;      /* Mitigation timer. */
130     bool mit_timer_on;         /* Mitigation timer is running. */
131     bool mit_irq_level;        /* Tracks interrupt pin level. */
132     uint32_t mit_ide;          /* Tracks E1000_TXD_CMD_IDE bit. */
133 
134 /* Compatibility flags for migration to/from qemu 1.3.0 and older */
135 #define E1000_FLAG_AUTONEG_BIT 0
136 #define E1000_FLAG_MIT_BIT 1
137 #define E1000_FLAG_AUTONEG (1 << E1000_FLAG_AUTONEG_BIT)
138 #define E1000_FLAG_MIT (1 << E1000_FLAG_MIT_BIT)
139     uint32_t compat_flags;
140 } E1000State;
141 
142 typedef struct E1000BaseClass {
143     PCIDeviceClass parent_class;
144     uint16_t phy_id2;
145 } E1000BaseClass;
146 
147 #define TYPE_E1000_BASE "e1000-base"
148 
149 #define E1000(obj) \
150     OBJECT_CHECK(E1000State, (obj), TYPE_E1000_BASE)
151 
152 #define E1000_DEVICE_CLASS(klass) \
153      OBJECT_CLASS_CHECK(E1000BaseClass, (klass), TYPE_E1000_BASE)
154 #define E1000_DEVICE_GET_CLASS(obj) \
155     OBJECT_GET_CLASS(E1000BaseClass, (obj), TYPE_E1000_BASE)
156 
157 #define	defreg(x)	x = (E1000_##x>>2)
158 enum {
159     defreg(CTRL),	defreg(EECD),	defreg(EERD),	defreg(GPRC),
160     defreg(GPTC),	defreg(ICR),	defreg(ICS),	defreg(IMC),
161     defreg(IMS),	defreg(LEDCTL),	defreg(MANC),	defreg(MDIC),
162     defreg(MPC),	defreg(PBA),	defreg(RCTL),	defreg(RDBAH),
163     defreg(RDBAL),	defreg(RDH),	defreg(RDLEN),	defreg(RDT),
164     defreg(STATUS),	defreg(SWSM),	defreg(TCTL),	defreg(TDBAH),
165     defreg(TDBAL),	defreg(TDH),	defreg(TDLEN),	defreg(TDT),
166     defreg(TORH),	defreg(TORL),	defreg(TOTH),	defreg(TOTL),
167     defreg(TPR),	defreg(TPT),	defreg(TXDCTL),	defreg(WUFC),
168     defreg(RA),		defreg(MTA),	defreg(CRCERRS),defreg(VFTA),
169     defreg(VET),        defreg(RDTR),   defreg(RADV),   defreg(TADV),
170     defreg(ITR),
171 };
172 
173 static void
174 e1000_link_down(E1000State *s)
175 {
176     s->mac_reg[STATUS] &= ~E1000_STATUS_LU;
177     s->phy_reg[PHY_STATUS] &= ~MII_SR_LINK_STATUS;
178     s->phy_reg[PHY_STATUS] &= ~MII_SR_AUTONEG_COMPLETE;
179 }
180 
181 static void
182 e1000_link_up(E1000State *s)
183 {
184     s->mac_reg[STATUS] |= E1000_STATUS_LU;
185     s->phy_reg[PHY_STATUS] |= MII_SR_LINK_STATUS;
186 }
187 
188 static void
189 set_phy_ctrl(E1000State *s, int index, uint16_t val)
190 {
191     /*
192      * QEMU 1.3 does not support link auto-negotiation emulation, so if we
193      * migrate during auto negotiation, after migration the link will be
194      * down.
195      */
196     if (!(s->compat_flags & E1000_FLAG_AUTONEG)) {
197         return;
198     }
199     if ((val & MII_CR_AUTO_NEG_EN) && (val & MII_CR_RESTART_AUTO_NEG)) {
200         e1000_link_down(s);
201         DBGOUT(PHY, "Start link auto negotiation\n");
202         timer_mod(s->autoneg_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 500);
203     }
204 }
205 
206 static void
207 e1000_autoneg_timer(void *opaque)
208 {
209     E1000State *s = opaque;
210     if (!qemu_get_queue(s->nic)->link_down) {
211         e1000_link_up(s);
212         s->phy_reg[PHY_STATUS] |= MII_SR_AUTONEG_COMPLETE;
213         DBGOUT(PHY, "Auto negotiation is completed\n");
214     }
215 }
216 
217 static void (*phyreg_writeops[])(E1000State *, int, uint16_t) = {
218     [PHY_CTRL] = set_phy_ctrl,
219 };
220 
221 enum { NPHYWRITEOPS = ARRAY_SIZE(phyreg_writeops) };
222 
223 enum { PHY_R = 1, PHY_W = 2, PHY_RW = PHY_R | PHY_W };
224 static const char phy_regcap[0x20] = {
225     [PHY_STATUS] = PHY_R,	[M88E1000_EXT_PHY_SPEC_CTRL] = PHY_RW,
226     [PHY_ID1] = PHY_R,		[M88E1000_PHY_SPEC_CTRL] = PHY_RW,
227     [PHY_CTRL] = PHY_RW,	[PHY_1000T_CTRL] = PHY_RW,
228     [PHY_LP_ABILITY] = PHY_R,	[PHY_1000T_STATUS] = PHY_R,
229     [PHY_AUTONEG_ADV] = PHY_RW,	[M88E1000_RX_ERR_CNTR] = PHY_R,
230     [PHY_ID2] = PHY_R,		[M88E1000_PHY_SPEC_STATUS] = PHY_R
231 };
232 
233 /* PHY_ID2 documented in 8254x_GBe_SDM.pdf, pp. 250 */
234 static const uint16_t phy_reg_init[] = {
235     [PHY_CTRL] = 0x1140,
236     [PHY_STATUS] = 0x794d, /* link initially up with not completed autoneg */
237     [PHY_ID1] = 0x141, /* [PHY_ID2] configured per DevId, from e1000_reset() */
238     [PHY_1000T_CTRL] = 0x0e00,			[M88E1000_PHY_SPEC_CTRL] = 0x360,
239     [M88E1000_EXT_PHY_SPEC_CTRL] = 0x0d60,	[PHY_AUTONEG_ADV] = 0xde1,
240     [PHY_LP_ABILITY] = 0x1e0,			[PHY_1000T_STATUS] = 0x3c00,
241     [M88E1000_PHY_SPEC_STATUS] = 0xac00,
242 };
243 
244 static const uint32_t mac_reg_init[] = {
245     [PBA] =     0x00100030,
246     [LEDCTL] =  0x602,
247     [CTRL] =    E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN0 |
248                 E1000_CTRL_SPD_1000 | E1000_CTRL_SLU,
249     [STATUS] =  0x80000000 | E1000_STATUS_GIO_MASTER_ENABLE |
250                 E1000_STATUS_ASDV | E1000_STATUS_MTXCKOK |
251                 E1000_STATUS_SPEED_1000 | E1000_STATUS_FD |
252                 E1000_STATUS_LU,
253     [MANC] =    E1000_MANC_EN_MNG2HOST | E1000_MANC_RCV_TCO_EN |
254                 E1000_MANC_ARP_EN | E1000_MANC_0298_EN |
255                 E1000_MANC_RMCP_EN,
256 };
257 
258 /* Helper function, *curr == 0 means the value is not set */
259 static inline void
260 mit_update_delay(uint32_t *curr, uint32_t value)
261 {
262     if (value && (*curr == 0 || value < *curr)) {
263         *curr = value;
264     }
265 }
266 
267 static void
268 set_interrupt_cause(E1000State *s, int index, uint32_t val)
269 {
270     PCIDevice *d = PCI_DEVICE(s);
271     uint32_t pending_ints;
272     uint32_t mit_delay;
273 
274     s->mac_reg[ICR] = val;
275 
276     /*
277      * Make sure ICR and ICS registers have the same value.
278      * The spec says that the ICS register is write-only.  However in practice,
279      * on real hardware ICS is readable, and for reads it has the same value as
280      * ICR (except that ICS does not have the clear on read behaviour of ICR).
281      *
282      * The VxWorks PRO/1000 driver uses this behaviour.
283      */
284     s->mac_reg[ICS] = val;
285 
286     pending_ints = (s->mac_reg[IMS] & s->mac_reg[ICR]);
287     if (!s->mit_irq_level && pending_ints) {
288         /*
289          * Here we detect a potential raising edge. We postpone raising the
290          * interrupt line if we are inside the mitigation delay window
291          * (s->mit_timer_on == 1).
292          * We provide a partial implementation of interrupt mitigation,
293          * emulating only RADV, TADV and ITR (lower 16 bits, 1024ns units for
294          * RADV and TADV, 256ns units for ITR). RDTR is only used to enable
295          * RADV; relative timers based on TIDV and RDTR are not implemented.
296          */
297         if (s->mit_timer_on) {
298             return;
299         }
300         if (s->compat_flags & E1000_FLAG_MIT) {
301             /* Compute the next mitigation delay according to pending
302              * interrupts and the current values of RADV (provided
303              * RDTR!=0), TADV and ITR.
304              * Then rearm the timer.
305              */
306             mit_delay = 0;
307             if (s->mit_ide &&
308                     (pending_ints & (E1000_ICR_TXQE | E1000_ICR_TXDW))) {
309                 mit_update_delay(&mit_delay, s->mac_reg[TADV] * 4);
310             }
311             if (s->mac_reg[RDTR] && (pending_ints & E1000_ICS_RXT0)) {
312                 mit_update_delay(&mit_delay, s->mac_reg[RADV] * 4);
313             }
314             mit_update_delay(&mit_delay, s->mac_reg[ITR]);
315 
316             if (mit_delay) {
317                 s->mit_timer_on = 1;
318                 timer_mod(s->mit_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
319                           mit_delay * 256);
320             }
321             s->mit_ide = 0;
322         }
323     }
324 
325     s->mit_irq_level = (pending_ints != 0);
326     pci_set_irq(d, s->mit_irq_level);
327 }
328 
329 static void
330 e1000_mit_timer(void *opaque)
331 {
332     E1000State *s = opaque;
333 
334     s->mit_timer_on = 0;
335     /* Call set_interrupt_cause to update the irq level (if necessary). */
336     set_interrupt_cause(s, 0, s->mac_reg[ICR]);
337 }
338 
339 static void
340 set_ics(E1000State *s, int index, uint32_t val)
341 {
342     DBGOUT(INTERRUPT, "set_ics %x, ICR %x, IMR %x\n", val, s->mac_reg[ICR],
343         s->mac_reg[IMS]);
344     set_interrupt_cause(s, 0, val | s->mac_reg[ICR]);
345 }
346 
347 static int
348 rxbufsize(uint32_t v)
349 {
350     v &= E1000_RCTL_BSEX | E1000_RCTL_SZ_16384 | E1000_RCTL_SZ_8192 |
351          E1000_RCTL_SZ_4096 | E1000_RCTL_SZ_2048 | E1000_RCTL_SZ_1024 |
352          E1000_RCTL_SZ_512 | E1000_RCTL_SZ_256;
353     switch (v) {
354     case E1000_RCTL_BSEX | E1000_RCTL_SZ_16384:
355         return 16384;
356     case E1000_RCTL_BSEX | E1000_RCTL_SZ_8192:
357         return 8192;
358     case E1000_RCTL_BSEX | E1000_RCTL_SZ_4096:
359         return 4096;
360     case E1000_RCTL_SZ_1024:
361         return 1024;
362     case E1000_RCTL_SZ_512:
363         return 512;
364     case E1000_RCTL_SZ_256:
365         return 256;
366     }
367     return 2048;
368 }
369 
370 static void e1000_reset(void *opaque)
371 {
372     E1000State *d = opaque;
373     E1000BaseClass *edc = E1000_DEVICE_GET_CLASS(d);
374     uint8_t *macaddr = d->conf.macaddr.a;
375     int i;
376 
377     timer_del(d->autoneg_timer);
378     timer_del(d->mit_timer);
379     d->mit_timer_on = 0;
380     d->mit_irq_level = 0;
381     d->mit_ide = 0;
382     memset(d->phy_reg, 0, sizeof d->phy_reg);
383     memmove(d->phy_reg, phy_reg_init, sizeof phy_reg_init);
384     d->phy_reg[PHY_ID2] = edc->phy_id2;
385     memset(d->mac_reg, 0, sizeof d->mac_reg);
386     memmove(d->mac_reg, mac_reg_init, sizeof mac_reg_init);
387     d->rxbuf_min_shift = 1;
388     memset(&d->tx, 0, sizeof d->tx);
389 
390     if (qemu_get_queue(d->nic)->link_down) {
391         e1000_link_down(d);
392     }
393 
394     /* Some guests expect pre-initialized RAH/RAL (AddrValid flag + MACaddr) */
395     d->mac_reg[RA] = 0;
396     d->mac_reg[RA + 1] = E1000_RAH_AV;
397     for (i = 0; i < 4; i++) {
398         d->mac_reg[RA] |= macaddr[i] << (8 * i);
399         d->mac_reg[RA + 1] |= (i < 2) ? macaddr[i + 4] << (8 * i) : 0;
400     }
401     qemu_format_nic_info_str(qemu_get_queue(d->nic), macaddr);
402 }
403 
404 static void
405 set_ctrl(E1000State *s, int index, uint32_t val)
406 {
407     /* RST is self clearing */
408     s->mac_reg[CTRL] = val & ~E1000_CTRL_RST;
409 }
410 
411 static void
412 set_rx_control(E1000State *s, int index, uint32_t val)
413 {
414     s->mac_reg[RCTL] = val;
415     s->rxbuf_size = rxbufsize(val);
416     s->rxbuf_min_shift = ((val / E1000_RCTL_RDMTS_QUAT) & 3) + 1;
417     DBGOUT(RX, "RCTL: %d, mac_reg[RCTL] = 0x%x\n", s->mac_reg[RDT],
418            s->mac_reg[RCTL]);
419     qemu_flush_queued_packets(qemu_get_queue(s->nic));
420 }
421 
422 static void
423 set_mdic(E1000State *s, int index, uint32_t val)
424 {
425     uint32_t data = val & E1000_MDIC_DATA_MASK;
426     uint32_t addr = ((val & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT);
427 
428     if ((val & E1000_MDIC_PHY_MASK) >> E1000_MDIC_PHY_SHIFT != 1) // phy #
429         val = s->mac_reg[MDIC] | E1000_MDIC_ERROR;
430     else if (val & E1000_MDIC_OP_READ) {
431         DBGOUT(MDIC, "MDIC read reg 0x%x\n", addr);
432         if (!(phy_regcap[addr] & PHY_R)) {
433             DBGOUT(MDIC, "MDIC read reg %x unhandled\n", addr);
434             val |= E1000_MDIC_ERROR;
435         } else
436             val = (val ^ data) | s->phy_reg[addr];
437     } else if (val & E1000_MDIC_OP_WRITE) {
438         DBGOUT(MDIC, "MDIC write reg 0x%x, value 0x%x\n", addr, data);
439         if (!(phy_regcap[addr] & PHY_W)) {
440             DBGOUT(MDIC, "MDIC write reg %x unhandled\n", addr);
441             val |= E1000_MDIC_ERROR;
442         } else {
443             if (addr < NPHYWRITEOPS && phyreg_writeops[addr]) {
444                 phyreg_writeops[addr](s, index, data);
445             }
446             s->phy_reg[addr] = data;
447         }
448     }
449     s->mac_reg[MDIC] = val | E1000_MDIC_READY;
450 
451     if (val & E1000_MDIC_INT_EN) {
452         set_ics(s, 0, E1000_ICR_MDAC);
453     }
454 }
455 
456 static uint32_t
457 get_eecd(E1000State *s, int index)
458 {
459     uint32_t ret = E1000_EECD_PRES|E1000_EECD_GNT | s->eecd_state.old_eecd;
460 
461     DBGOUT(EEPROM, "reading eeprom bit %d (reading %d)\n",
462            s->eecd_state.bitnum_out, s->eecd_state.reading);
463     if (!s->eecd_state.reading ||
464         ((s->eeprom_data[(s->eecd_state.bitnum_out >> 4) & 0x3f] >>
465           ((s->eecd_state.bitnum_out & 0xf) ^ 0xf))) & 1)
466         ret |= E1000_EECD_DO;
467     return ret;
468 }
469 
470 static void
471 set_eecd(E1000State *s, int index, uint32_t val)
472 {
473     uint32_t oldval = s->eecd_state.old_eecd;
474 
475     s->eecd_state.old_eecd = val & (E1000_EECD_SK | E1000_EECD_CS |
476             E1000_EECD_DI|E1000_EECD_FWE_MASK|E1000_EECD_REQ);
477     if (!(E1000_EECD_CS & val))			// CS inactive; nothing to do
478 	return;
479     if (E1000_EECD_CS & (val ^ oldval)) {	// CS rise edge; reset state
480 	s->eecd_state.val_in = 0;
481 	s->eecd_state.bitnum_in = 0;
482 	s->eecd_state.bitnum_out = 0;
483 	s->eecd_state.reading = 0;
484     }
485     if (!(E1000_EECD_SK & (val ^ oldval)))	// no clock edge
486         return;
487     if (!(E1000_EECD_SK & val)) {		// falling edge
488         s->eecd_state.bitnum_out++;
489         return;
490     }
491     s->eecd_state.val_in <<= 1;
492     if (val & E1000_EECD_DI)
493         s->eecd_state.val_in |= 1;
494     if (++s->eecd_state.bitnum_in == 9 && !s->eecd_state.reading) {
495         s->eecd_state.bitnum_out = ((s->eecd_state.val_in & 0x3f)<<4)-1;
496         s->eecd_state.reading = (((s->eecd_state.val_in >> 6) & 7) ==
497             EEPROM_READ_OPCODE_MICROWIRE);
498     }
499     DBGOUT(EEPROM, "eeprom bitnum in %d out %d, reading %d\n",
500            s->eecd_state.bitnum_in, s->eecd_state.bitnum_out,
501            s->eecd_state.reading);
502 }
503 
504 static uint32_t
505 flash_eerd_read(E1000State *s, int x)
506 {
507     unsigned int index, r = s->mac_reg[EERD] & ~E1000_EEPROM_RW_REG_START;
508 
509     if ((s->mac_reg[EERD] & E1000_EEPROM_RW_REG_START) == 0)
510         return (s->mac_reg[EERD]);
511 
512     if ((index = r >> E1000_EEPROM_RW_ADDR_SHIFT) > EEPROM_CHECKSUM_REG)
513         return (E1000_EEPROM_RW_REG_DONE | r);
514 
515     return ((s->eeprom_data[index] << E1000_EEPROM_RW_REG_DATA) |
516            E1000_EEPROM_RW_REG_DONE | r);
517 }
518 
519 static void
520 putsum(uint8_t *data, uint32_t n, uint32_t sloc, uint32_t css, uint32_t cse)
521 {
522     uint32_t sum;
523 
524     if (cse && cse < n)
525         n = cse + 1;
526     if (sloc < n-1) {
527         sum = net_checksum_add(n-css, data+css);
528         stw_be_p(data + sloc, net_checksum_finish(sum));
529     }
530 }
531 
532 static inline int
533 vlan_enabled(E1000State *s)
534 {
535     return ((s->mac_reg[CTRL] & E1000_CTRL_VME) != 0);
536 }
537 
538 static inline int
539 vlan_rx_filter_enabled(E1000State *s)
540 {
541     return ((s->mac_reg[RCTL] & E1000_RCTL_VFE) != 0);
542 }
543 
544 static inline int
545 is_vlan_packet(E1000State *s, const uint8_t *buf)
546 {
547     return (be16_to_cpup((uint16_t *)(buf + 12)) ==
548                 le16_to_cpup((uint16_t *)(s->mac_reg + VET)));
549 }
550 
551 static inline int
552 is_vlan_txd(uint32_t txd_lower)
553 {
554     return ((txd_lower & E1000_TXD_CMD_VLE) != 0);
555 }
556 
557 /* FCS aka Ethernet CRC-32. We don't get it from backends and can't
558  * fill it in, just pad descriptor length by 4 bytes unless guest
559  * told us to strip it off the packet. */
560 static inline int
561 fcs_len(E1000State *s)
562 {
563     return (s->mac_reg[RCTL] & E1000_RCTL_SECRC) ? 0 : 4;
564 }
565 
566 static void
567 e1000_send_packet(E1000State *s, const uint8_t *buf, int size)
568 {
569     NetClientState *nc = qemu_get_queue(s->nic);
570     if (s->phy_reg[PHY_CTRL] & MII_CR_LOOPBACK) {
571         nc->info->receive(nc, buf, size);
572     } else {
573         qemu_send_packet(nc, buf, size);
574     }
575 }
576 
577 static void
578 xmit_seg(E1000State *s)
579 {
580     uint16_t len, *sp;
581     unsigned int frames = s->tx.tso_frames, css, sofar, n;
582     struct e1000_tx *tp = &s->tx;
583 
584     if (tp->tse && tp->cptse) {
585         css = tp->ipcss;
586         DBGOUT(TXSUM, "frames %d size %d ipcss %d\n",
587                frames, tp->size, css);
588         if (tp->ip) {		// IPv4
589             stw_be_p(tp->data+css+2, tp->size - css);
590             stw_be_p(tp->data+css+4,
591                           be16_to_cpup((uint16_t *)(tp->data+css+4))+frames);
592         } else			// IPv6
593             stw_be_p(tp->data+css+4, tp->size - css);
594         css = tp->tucss;
595         len = tp->size - css;
596         DBGOUT(TXSUM, "tcp %d tucss %d len %d\n", tp->tcp, css, len);
597         if (tp->tcp) {
598             sofar = frames * tp->mss;
599             stl_be_p(tp->data+css+4, ldl_be_p(tp->data+css+4)+sofar); /* seq */
600             if (tp->paylen - sofar > tp->mss)
601                 tp->data[css + 13] &= ~9;		// PSH, FIN
602         } else	// UDP
603             stw_be_p(tp->data+css+4, len);
604         if (tp->sum_needed & E1000_TXD_POPTS_TXSM) {
605             unsigned int phsum;
606             // add pseudo-header length before checksum calculation
607             sp = (uint16_t *)(tp->data + tp->tucso);
608             phsum = be16_to_cpup(sp) + len;
609             phsum = (phsum >> 16) + (phsum & 0xffff);
610             stw_be_p(sp, phsum);
611         }
612         tp->tso_frames++;
613     }
614 
615     if (tp->sum_needed & E1000_TXD_POPTS_TXSM)
616         putsum(tp->data, tp->size, tp->tucso, tp->tucss, tp->tucse);
617     if (tp->sum_needed & E1000_TXD_POPTS_IXSM)
618         putsum(tp->data, tp->size, tp->ipcso, tp->ipcss, tp->ipcse);
619     if (tp->vlan_needed) {
620         memmove(tp->vlan, tp->data, 4);
621         memmove(tp->data, tp->data + 4, 8);
622         memcpy(tp->data + 8, tp->vlan_header, 4);
623         e1000_send_packet(s, tp->vlan, tp->size + 4);
624     } else
625         e1000_send_packet(s, tp->data, tp->size);
626     s->mac_reg[TPT]++;
627     s->mac_reg[GPTC]++;
628     n = s->mac_reg[TOTL];
629     if ((s->mac_reg[TOTL] += s->tx.size) < n)
630         s->mac_reg[TOTH]++;
631 }
632 
633 static void
634 process_tx_desc(E1000State *s, struct e1000_tx_desc *dp)
635 {
636     PCIDevice *d = PCI_DEVICE(s);
637     uint32_t txd_lower = le32_to_cpu(dp->lower.data);
638     uint32_t dtype = txd_lower & (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D);
639     unsigned int split_size = txd_lower & 0xffff, bytes, sz, op;
640     unsigned int msh = 0xfffff;
641     uint64_t addr;
642     struct e1000_context_desc *xp = (struct e1000_context_desc *)dp;
643     struct e1000_tx *tp = &s->tx;
644 
645     s->mit_ide |= (txd_lower & E1000_TXD_CMD_IDE);
646     if (dtype == E1000_TXD_CMD_DEXT) {	// context descriptor
647         op = le32_to_cpu(xp->cmd_and_length);
648         tp->ipcss = xp->lower_setup.ip_fields.ipcss;
649         tp->ipcso = xp->lower_setup.ip_fields.ipcso;
650         tp->ipcse = le16_to_cpu(xp->lower_setup.ip_fields.ipcse);
651         tp->tucss = xp->upper_setup.tcp_fields.tucss;
652         tp->tucso = xp->upper_setup.tcp_fields.tucso;
653         tp->tucse = le16_to_cpu(xp->upper_setup.tcp_fields.tucse);
654         tp->paylen = op & 0xfffff;
655         tp->hdr_len = xp->tcp_seg_setup.fields.hdr_len;
656         tp->mss = le16_to_cpu(xp->tcp_seg_setup.fields.mss);
657         tp->ip = (op & E1000_TXD_CMD_IP) ? 1 : 0;
658         tp->tcp = (op & E1000_TXD_CMD_TCP) ? 1 : 0;
659         tp->tse = (op & E1000_TXD_CMD_TSE) ? 1 : 0;
660         tp->tso_frames = 0;
661         if (tp->tucso == 0) {	// this is probably wrong
662             DBGOUT(TXSUM, "TCP/UDP: cso 0!\n");
663             tp->tucso = tp->tucss + (tp->tcp ? 16 : 6);
664         }
665         return;
666     } else if (dtype == (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D)) {
667         // data descriptor
668         if (tp->size == 0) {
669             tp->sum_needed = le32_to_cpu(dp->upper.data) >> 8;
670         }
671         tp->cptse = ( txd_lower & E1000_TXD_CMD_TSE ) ? 1 : 0;
672     } else {
673         // legacy descriptor
674         tp->cptse = 0;
675     }
676 
677     if (vlan_enabled(s) && is_vlan_txd(txd_lower) &&
678         (tp->cptse || txd_lower & E1000_TXD_CMD_EOP)) {
679         tp->vlan_needed = 1;
680         stw_be_p(tp->vlan_header,
681                       le16_to_cpup((uint16_t *)(s->mac_reg + VET)));
682         stw_be_p(tp->vlan_header + 2,
683                       le16_to_cpu(dp->upper.fields.special));
684     }
685 
686     addr = le64_to_cpu(dp->buffer_addr);
687     if (tp->tse && tp->cptse) {
688         msh = tp->hdr_len + tp->mss;
689         do {
690             bytes = split_size;
691             if (tp->size + bytes > msh)
692                 bytes = msh - tp->size;
693 
694             bytes = MIN(sizeof(tp->data) - tp->size, bytes);
695             pci_dma_read(d, addr, tp->data + tp->size, bytes);
696             sz = tp->size + bytes;
697             if (sz >= tp->hdr_len && tp->size < tp->hdr_len) {
698                 memmove(tp->header, tp->data, tp->hdr_len);
699             }
700             tp->size = sz;
701             addr += bytes;
702             if (sz == msh) {
703                 xmit_seg(s);
704                 memmove(tp->data, tp->header, tp->hdr_len);
705                 tp->size = tp->hdr_len;
706             }
707         } while (split_size -= bytes);
708     } else if (!tp->tse && tp->cptse) {
709         // context descriptor TSE is not set, while data descriptor TSE is set
710         DBGOUT(TXERR, "TCP segmentation error\n");
711     } else {
712         split_size = MIN(sizeof(tp->data) - tp->size, split_size);
713         pci_dma_read(d, addr, tp->data + tp->size, split_size);
714         tp->size += split_size;
715     }
716 
717     if (!(txd_lower & E1000_TXD_CMD_EOP))
718         return;
719     if (!(tp->tse && tp->cptse && tp->size < tp->hdr_len)) {
720         xmit_seg(s);
721     }
722     tp->tso_frames = 0;
723     tp->sum_needed = 0;
724     tp->vlan_needed = 0;
725     tp->size = 0;
726     tp->cptse = 0;
727 }
728 
729 static uint32_t
730 txdesc_writeback(E1000State *s, dma_addr_t base, struct e1000_tx_desc *dp)
731 {
732     PCIDevice *d = PCI_DEVICE(s);
733     uint32_t txd_upper, txd_lower = le32_to_cpu(dp->lower.data);
734 
735     if (!(txd_lower & (E1000_TXD_CMD_RS|E1000_TXD_CMD_RPS)))
736         return 0;
737     txd_upper = (le32_to_cpu(dp->upper.data) | E1000_TXD_STAT_DD) &
738                 ~(E1000_TXD_STAT_EC | E1000_TXD_STAT_LC | E1000_TXD_STAT_TU);
739     dp->upper.data = cpu_to_le32(txd_upper);
740     pci_dma_write(d, base + ((char *)&dp->upper - (char *)dp),
741                   &dp->upper, sizeof(dp->upper));
742     return E1000_ICR_TXDW;
743 }
744 
745 static uint64_t tx_desc_base(E1000State *s)
746 {
747     uint64_t bah = s->mac_reg[TDBAH];
748     uint64_t bal = s->mac_reg[TDBAL] & ~0xf;
749 
750     return (bah << 32) + bal;
751 }
752 
753 static void
754 start_xmit(E1000State *s)
755 {
756     PCIDevice *d = PCI_DEVICE(s);
757     dma_addr_t base;
758     struct e1000_tx_desc desc;
759     uint32_t tdh_start = s->mac_reg[TDH], cause = E1000_ICS_TXQE;
760 
761     if (!(s->mac_reg[TCTL] & E1000_TCTL_EN)) {
762         DBGOUT(TX, "tx disabled\n");
763         return;
764     }
765 
766     while (s->mac_reg[TDH] != s->mac_reg[TDT]) {
767         base = tx_desc_base(s) +
768                sizeof(struct e1000_tx_desc) * s->mac_reg[TDH];
769         pci_dma_read(d, base, &desc, sizeof(desc));
770 
771         DBGOUT(TX, "index %d: %p : %x %x\n", s->mac_reg[TDH],
772                (void *)(intptr_t)desc.buffer_addr, desc.lower.data,
773                desc.upper.data);
774 
775         process_tx_desc(s, &desc);
776         cause |= txdesc_writeback(s, base, &desc);
777 
778         if (++s->mac_reg[TDH] * sizeof(desc) >= s->mac_reg[TDLEN])
779             s->mac_reg[TDH] = 0;
780         /*
781          * the following could happen only if guest sw assigns
782          * bogus values to TDT/TDLEN.
783          * there's nothing too intelligent we could do about this.
784          */
785         if (s->mac_reg[TDH] == tdh_start) {
786             DBGOUT(TXERR, "TDH wraparound @%x, TDT %x, TDLEN %x\n",
787                    tdh_start, s->mac_reg[TDT], s->mac_reg[TDLEN]);
788             break;
789         }
790     }
791     set_ics(s, 0, cause);
792 }
793 
794 static int
795 receive_filter(E1000State *s, const uint8_t *buf, int size)
796 {
797     static const uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
798     static const int mta_shift[] = {4, 3, 2, 0};
799     uint32_t f, rctl = s->mac_reg[RCTL], ra[2], *rp;
800 
801     if (is_vlan_packet(s, buf) && vlan_rx_filter_enabled(s)) {
802         uint16_t vid = be16_to_cpup((uint16_t *)(buf + 14));
803         uint32_t vfta = le32_to_cpup((uint32_t *)(s->mac_reg + VFTA) +
804                                      ((vid >> 5) & 0x7f));
805         if ((vfta & (1 << (vid & 0x1f))) == 0)
806             return 0;
807     }
808 
809     if (rctl & E1000_RCTL_UPE)			// promiscuous
810         return 1;
811 
812     if ((buf[0] & 1) && (rctl & E1000_RCTL_MPE))	// promiscuous mcast
813         return 1;
814 
815     if ((rctl & E1000_RCTL_BAM) && !memcmp(buf, bcast, sizeof bcast))
816         return 1;
817 
818     for (rp = s->mac_reg + RA; rp < s->mac_reg + RA + 32; rp += 2) {
819         if (!(rp[1] & E1000_RAH_AV))
820             continue;
821         ra[0] = cpu_to_le32(rp[0]);
822         ra[1] = cpu_to_le32(rp[1]);
823         if (!memcmp(buf, (uint8_t *)ra, 6)) {
824             DBGOUT(RXFILTER,
825                    "unicast match[%d]: %02x:%02x:%02x:%02x:%02x:%02x\n",
826                    (int)(rp - s->mac_reg - RA)/2,
827                    buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
828             return 1;
829         }
830     }
831     DBGOUT(RXFILTER, "unicast mismatch: %02x:%02x:%02x:%02x:%02x:%02x\n",
832            buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
833 
834     f = mta_shift[(rctl >> E1000_RCTL_MO_SHIFT) & 3];
835     f = (((buf[5] << 8) | buf[4]) >> f) & 0xfff;
836     if (s->mac_reg[MTA + (f >> 5)] & (1 << (f & 0x1f)))
837         return 1;
838     DBGOUT(RXFILTER,
839            "dropping, inexact filter mismatch: %02x:%02x:%02x:%02x:%02x:%02x MO %d MTA[%d] %x\n",
840            buf[0], buf[1], buf[2], buf[3], buf[4], buf[5],
841            (rctl >> E1000_RCTL_MO_SHIFT) & 3, f >> 5,
842            s->mac_reg[MTA + (f >> 5)]);
843 
844     return 0;
845 }
846 
847 static void
848 e1000_set_link_status(NetClientState *nc)
849 {
850     E1000State *s = qemu_get_nic_opaque(nc);
851     uint32_t old_status = s->mac_reg[STATUS];
852 
853     if (nc->link_down) {
854         e1000_link_down(s);
855     } else {
856         if (s->compat_flags & E1000_FLAG_AUTONEG &&
857             s->phy_reg[PHY_CTRL] & MII_CR_AUTO_NEG_EN &&
858             s->phy_reg[PHY_CTRL] & MII_CR_RESTART_AUTO_NEG &&
859             !(s->phy_reg[PHY_STATUS] & MII_SR_AUTONEG_COMPLETE)) {
860             /* emulate auto-negotiation if supported */
861             timer_mod(s->autoneg_timer,
862                       qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 500);
863         } else {
864             e1000_link_up(s);
865         }
866     }
867 
868     if (s->mac_reg[STATUS] != old_status)
869         set_ics(s, 0, E1000_ICR_LSC);
870 }
871 
872 static bool e1000_has_rxbufs(E1000State *s, size_t total_size)
873 {
874     int bufs;
875     /* Fast-path short packets */
876     if (total_size <= s->rxbuf_size) {
877         return s->mac_reg[RDH] != s->mac_reg[RDT];
878     }
879     if (s->mac_reg[RDH] < s->mac_reg[RDT]) {
880         bufs = s->mac_reg[RDT] - s->mac_reg[RDH];
881     } else if (s->mac_reg[RDH] > s->mac_reg[RDT]) {
882         bufs = s->mac_reg[RDLEN] /  sizeof(struct e1000_rx_desc) +
883             s->mac_reg[RDT] - s->mac_reg[RDH];
884     } else {
885         return false;
886     }
887     return total_size <= bufs * s->rxbuf_size;
888 }
889 
890 static int
891 e1000_can_receive(NetClientState *nc)
892 {
893     E1000State *s = qemu_get_nic_opaque(nc);
894 
895     return (s->mac_reg[STATUS] & E1000_STATUS_LU) &&
896         (s->mac_reg[RCTL] & E1000_RCTL_EN) && e1000_has_rxbufs(s, 1);
897 }
898 
899 static uint64_t rx_desc_base(E1000State *s)
900 {
901     uint64_t bah = s->mac_reg[RDBAH];
902     uint64_t bal = s->mac_reg[RDBAL] & ~0xf;
903 
904     return (bah << 32) + bal;
905 }
906 
907 static ssize_t
908 e1000_receive_iov(NetClientState *nc, const struct iovec *iov, int iovcnt)
909 {
910     E1000State *s = qemu_get_nic_opaque(nc);
911     PCIDevice *d = PCI_DEVICE(s);
912     struct e1000_rx_desc desc;
913     dma_addr_t base;
914     unsigned int n, rdt;
915     uint32_t rdh_start;
916     uint16_t vlan_special = 0;
917     uint8_t vlan_status = 0;
918     uint8_t min_buf[MIN_BUF_SIZE];
919     struct iovec min_iov;
920     uint8_t *filter_buf = iov->iov_base;
921     size_t size = iov_size(iov, iovcnt);
922     size_t iov_ofs = 0;
923     size_t desc_offset;
924     size_t desc_size;
925     size_t total_size;
926 
927     if (!(s->mac_reg[STATUS] & E1000_STATUS_LU)) {
928         return -1;
929     }
930 
931     if (!(s->mac_reg[RCTL] & E1000_RCTL_EN)) {
932         return -1;
933     }
934 
935     /* Pad to minimum Ethernet frame length */
936     if (size < sizeof(min_buf)) {
937         iov_to_buf(iov, iovcnt, 0, min_buf, size);
938         memset(&min_buf[size], 0, sizeof(min_buf) - size);
939         min_iov.iov_base = filter_buf = min_buf;
940         min_iov.iov_len = size = sizeof(min_buf);
941         iovcnt = 1;
942         iov = &min_iov;
943     } else if (iov->iov_len < MAXIMUM_ETHERNET_HDR_LEN) {
944         /* This is very unlikely, but may happen. */
945         iov_to_buf(iov, iovcnt, 0, min_buf, MAXIMUM_ETHERNET_HDR_LEN);
946         filter_buf = min_buf;
947     }
948 
949     /* Discard oversized packets if !LPE and !SBP. */
950     if ((size > MAXIMUM_ETHERNET_LPE_SIZE ||
951         (size > MAXIMUM_ETHERNET_VLAN_SIZE
952         && !(s->mac_reg[RCTL] & E1000_RCTL_LPE)))
953         && !(s->mac_reg[RCTL] & E1000_RCTL_SBP)) {
954         return size;
955     }
956 
957     if (!receive_filter(s, filter_buf, size)) {
958         return size;
959     }
960 
961     if (vlan_enabled(s) && is_vlan_packet(s, filter_buf)) {
962         vlan_special = cpu_to_le16(be16_to_cpup((uint16_t *)(filter_buf
963                                                                 + 14)));
964         iov_ofs = 4;
965         if (filter_buf == iov->iov_base) {
966             memmove(filter_buf + 4, filter_buf, 12);
967         } else {
968             iov_from_buf(iov, iovcnt, 4, filter_buf, 12);
969             while (iov->iov_len <= iov_ofs) {
970                 iov_ofs -= iov->iov_len;
971                 iov++;
972             }
973         }
974         vlan_status = E1000_RXD_STAT_VP;
975         size -= 4;
976     }
977 
978     rdh_start = s->mac_reg[RDH];
979     desc_offset = 0;
980     total_size = size + fcs_len(s);
981     if (!e1000_has_rxbufs(s, total_size)) {
982             set_ics(s, 0, E1000_ICS_RXO);
983             return -1;
984     }
985     do {
986         desc_size = total_size - desc_offset;
987         if (desc_size > s->rxbuf_size) {
988             desc_size = s->rxbuf_size;
989         }
990         base = rx_desc_base(s) + sizeof(desc) * s->mac_reg[RDH];
991         pci_dma_read(d, base, &desc, sizeof(desc));
992         desc.special = vlan_special;
993         desc.status |= (vlan_status | E1000_RXD_STAT_DD);
994         if (desc.buffer_addr) {
995             if (desc_offset < size) {
996                 size_t iov_copy;
997                 hwaddr ba = le64_to_cpu(desc.buffer_addr);
998                 size_t copy_size = size - desc_offset;
999                 if (copy_size > s->rxbuf_size) {
1000                     copy_size = s->rxbuf_size;
1001                 }
1002                 do {
1003                     iov_copy = MIN(copy_size, iov->iov_len - iov_ofs);
1004                     pci_dma_write(d, ba, iov->iov_base + iov_ofs, iov_copy);
1005                     copy_size -= iov_copy;
1006                     ba += iov_copy;
1007                     iov_ofs += iov_copy;
1008                     if (iov_ofs == iov->iov_len) {
1009                         iov++;
1010                         iov_ofs = 0;
1011                     }
1012                 } while (copy_size);
1013             }
1014             desc_offset += desc_size;
1015             desc.length = cpu_to_le16(desc_size);
1016             if (desc_offset >= total_size) {
1017                 desc.status |= E1000_RXD_STAT_EOP | E1000_RXD_STAT_IXSM;
1018             } else {
1019                 /* Guest zeroing out status is not a hardware requirement.
1020                    Clear EOP in case guest didn't do it. */
1021                 desc.status &= ~E1000_RXD_STAT_EOP;
1022             }
1023         } else { // as per intel docs; skip descriptors with null buf addr
1024             DBGOUT(RX, "Null RX descriptor!!\n");
1025         }
1026         pci_dma_write(d, base, &desc, sizeof(desc));
1027 
1028         if (++s->mac_reg[RDH] * sizeof(desc) >= s->mac_reg[RDLEN])
1029             s->mac_reg[RDH] = 0;
1030         /* see comment in start_xmit; same here */
1031         if (s->mac_reg[RDH] == rdh_start) {
1032             DBGOUT(RXERR, "RDH wraparound @%x, RDT %x, RDLEN %x\n",
1033                    rdh_start, s->mac_reg[RDT], s->mac_reg[RDLEN]);
1034             set_ics(s, 0, E1000_ICS_RXO);
1035             return -1;
1036         }
1037     } while (desc_offset < total_size);
1038 
1039     s->mac_reg[GPRC]++;
1040     s->mac_reg[TPR]++;
1041     /* TOR - Total Octets Received:
1042      * This register includes bytes received in a packet from the <Destination
1043      * Address> field through the <CRC> field, inclusively.
1044      */
1045     n = s->mac_reg[TORL] + size + /* Always include FCS length. */ 4;
1046     if (n < s->mac_reg[TORL])
1047         s->mac_reg[TORH]++;
1048     s->mac_reg[TORL] = n;
1049 
1050     n = E1000_ICS_RXT0;
1051     if ((rdt = s->mac_reg[RDT]) < s->mac_reg[RDH])
1052         rdt += s->mac_reg[RDLEN] / sizeof(desc);
1053     if (((rdt - s->mac_reg[RDH]) * sizeof(desc)) <= s->mac_reg[RDLEN] >>
1054         s->rxbuf_min_shift)
1055         n |= E1000_ICS_RXDMT0;
1056 
1057     set_ics(s, 0, n);
1058 
1059     return size;
1060 }
1061 
1062 static ssize_t
1063 e1000_receive(NetClientState *nc, const uint8_t *buf, size_t size)
1064 {
1065     const struct iovec iov = {
1066         .iov_base = (uint8_t *)buf,
1067         .iov_len = size
1068     };
1069 
1070     return e1000_receive_iov(nc, &iov, 1);
1071 }
1072 
1073 static uint32_t
1074 mac_readreg(E1000State *s, int index)
1075 {
1076     return s->mac_reg[index];
1077 }
1078 
1079 static uint32_t
1080 mac_icr_read(E1000State *s, int index)
1081 {
1082     uint32_t ret = s->mac_reg[ICR];
1083 
1084     DBGOUT(INTERRUPT, "ICR read: %x\n", ret);
1085     set_interrupt_cause(s, 0, 0);
1086     return ret;
1087 }
1088 
1089 static uint32_t
1090 mac_read_clr4(E1000State *s, int index)
1091 {
1092     uint32_t ret = s->mac_reg[index];
1093 
1094     s->mac_reg[index] = 0;
1095     return ret;
1096 }
1097 
1098 static uint32_t
1099 mac_read_clr8(E1000State *s, int index)
1100 {
1101     uint32_t ret = s->mac_reg[index];
1102 
1103     s->mac_reg[index] = 0;
1104     s->mac_reg[index-1] = 0;
1105     return ret;
1106 }
1107 
1108 static void
1109 mac_writereg(E1000State *s, int index, uint32_t val)
1110 {
1111     uint32_t macaddr[2];
1112 
1113     s->mac_reg[index] = val;
1114 
1115     if (index == RA + 1) {
1116         macaddr[0] = cpu_to_le32(s->mac_reg[RA]);
1117         macaddr[1] = cpu_to_le32(s->mac_reg[RA + 1]);
1118         qemu_format_nic_info_str(qemu_get_queue(s->nic), (uint8_t *)macaddr);
1119     }
1120 }
1121 
1122 static void
1123 set_rdt(E1000State *s, int index, uint32_t val)
1124 {
1125     s->mac_reg[index] = val & 0xffff;
1126     if (e1000_has_rxbufs(s, 1)) {
1127         qemu_flush_queued_packets(qemu_get_queue(s->nic));
1128     }
1129 }
1130 
1131 static void
1132 set_16bit(E1000State *s, int index, uint32_t val)
1133 {
1134     s->mac_reg[index] = val & 0xffff;
1135 }
1136 
1137 static void
1138 set_dlen(E1000State *s, int index, uint32_t val)
1139 {
1140     s->mac_reg[index] = val & 0xfff80;
1141 }
1142 
1143 static void
1144 set_tctl(E1000State *s, int index, uint32_t val)
1145 {
1146     s->mac_reg[index] = val;
1147     s->mac_reg[TDT] &= 0xffff;
1148     start_xmit(s);
1149 }
1150 
1151 static void
1152 set_icr(E1000State *s, int index, uint32_t val)
1153 {
1154     DBGOUT(INTERRUPT, "set_icr %x\n", val);
1155     set_interrupt_cause(s, 0, s->mac_reg[ICR] & ~val);
1156 }
1157 
1158 static void
1159 set_imc(E1000State *s, int index, uint32_t val)
1160 {
1161     s->mac_reg[IMS] &= ~val;
1162     set_ics(s, 0, 0);
1163 }
1164 
1165 static void
1166 set_ims(E1000State *s, int index, uint32_t val)
1167 {
1168     s->mac_reg[IMS] |= val;
1169     set_ics(s, 0, 0);
1170 }
1171 
1172 #define getreg(x)	[x] = mac_readreg
1173 static uint32_t (*macreg_readops[])(E1000State *, int) = {
1174     getreg(PBA),	getreg(RCTL),	getreg(TDH),	getreg(TXDCTL),
1175     getreg(WUFC),	getreg(TDT),	getreg(CTRL),	getreg(LEDCTL),
1176     getreg(MANC),	getreg(MDIC),	getreg(SWSM),	getreg(STATUS),
1177     getreg(TORL),	getreg(TOTL),	getreg(IMS),	getreg(TCTL),
1178     getreg(RDH),	getreg(RDT),	getreg(VET),	getreg(ICS),
1179     getreg(TDBAL),	getreg(TDBAH),	getreg(RDBAH),	getreg(RDBAL),
1180     getreg(TDLEN),      getreg(RDLEN),  getreg(RDTR),   getreg(RADV),
1181     getreg(TADV),       getreg(ITR),
1182 
1183     [TOTH] = mac_read_clr8,	[TORH] = mac_read_clr8,	[GPRC] = mac_read_clr4,
1184     [GPTC] = mac_read_clr4,	[TPR] = mac_read_clr4,	[TPT] = mac_read_clr4,
1185     [ICR] = mac_icr_read,	[EECD] = get_eecd,	[EERD] = flash_eerd_read,
1186     [CRCERRS ... MPC] = &mac_readreg,
1187     [RA ... RA+31] = &mac_readreg,
1188     [MTA ... MTA+127] = &mac_readreg,
1189     [VFTA ... VFTA+127] = &mac_readreg,
1190 };
1191 enum { NREADOPS = ARRAY_SIZE(macreg_readops) };
1192 
1193 #define putreg(x)	[x] = mac_writereg
1194 static void (*macreg_writeops[])(E1000State *, int, uint32_t) = {
1195     putreg(PBA),	putreg(EERD),	putreg(SWSM),	putreg(WUFC),
1196     putreg(TDBAL),	putreg(TDBAH),	putreg(TXDCTL),	putreg(RDBAH),
1197     putreg(RDBAL),	putreg(LEDCTL), putreg(VET),
1198     [TDLEN] = set_dlen,	[RDLEN] = set_dlen,	[TCTL] = set_tctl,
1199     [TDT] = set_tctl,	[MDIC] = set_mdic,	[ICS] = set_ics,
1200     [TDH] = set_16bit,	[RDH] = set_16bit,	[RDT] = set_rdt,
1201     [IMC] = set_imc,	[IMS] = set_ims,	[ICR] = set_icr,
1202     [EECD] = set_eecd,	[RCTL] = set_rx_control, [CTRL] = set_ctrl,
1203     [RDTR] = set_16bit, [RADV] = set_16bit,     [TADV] = set_16bit,
1204     [ITR] = set_16bit,
1205     [RA ... RA+31] = &mac_writereg,
1206     [MTA ... MTA+127] = &mac_writereg,
1207     [VFTA ... VFTA+127] = &mac_writereg,
1208 };
1209 
1210 enum { NWRITEOPS = ARRAY_SIZE(macreg_writeops) };
1211 
1212 static void
1213 e1000_mmio_write(void *opaque, hwaddr addr, uint64_t val,
1214                  unsigned size)
1215 {
1216     E1000State *s = opaque;
1217     unsigned int index = (addr & 0x1ffff) >> 2;
1218 
1219     if (index < NWRITEOPS && macreg_writeops[index]) {
1220         macreg_writeops[index](s, index, val);
1221     } else if (index < NREADOPS && macreg_readops[index]) {
1222         DBGOUT(MMIO, "e1000_mmio_writel RO %x: 0x%04"PRIx64"\n", index<<2, val);
1223     } else {
1224         DBGOUT(UNKNOWN, "MMIO unknown write addr=0x%08x,val=0x%08"PRIx64"\n",
1225                index<<2, val);
1226     }
1227 }
1228 
1229 static uint64_t
1230 e1000_mmio_read(void *opaque, hwaddr addr, unsigned size)
1231 {
1232     E1000State *s = opaque;
1233     unsigned int index = (addr & 0x1ffff) >> 2;
1234 
1235     if (index < NREADOPS && macreg_readops[index])
1236     {
1237         return macreg_readops[index](s, index);
1238     }
1239     DBGOUT(UNKNOWN, "MMIO unknown read addr=0x%08x\n", index<<2);
1240     return 0;
1241 }
1242 
1243 static const MemoryRegionOps e1000_mmio_ops = {
1244     .read = e1000_mmio_read,
1245     .write = e1000_mmio_write,
1246     .endianness = DEVICE_LITTLE_ENDIAN,
1247     .impl = {
1248         .min_access_size = 4,
1249         .max_access_size = 4,
1250     },
1251 };
1252 
1253 static uint64_t e1000_io_read(void *opaque, hwaddr addr,
1254                               unsigned size)
1255 {
1256     E1000State *s = opaque;
1257 
1258     (void)s;
1259     return 0;
1260 }
1261 
1262 static void e1000_io_write(void *opaque, hwaddr addr,
1263                            uint64_t val, unsigned size)
1264 {
1265     E1000State *s = opaque;
1266 
1267     (void)s;
1268 }
1269 
1270 static const MemoryRegionOps e1000_io_ops = {
1271     .read = e1000_io_read,
1272     .write = e1000_io_write,
1273     .endianness = DEVICE_LITTLE_ENDIAN,
1274 };
1275 
1276 static bool is_version_1(void *opaque, int version_id)
1277 {
1278     return version_id == 1;
1279 }
1280 
1281 static void e1000_pre_save(void *opaque)
1282 {
1283     E1000State *s = opaque;
1284     NetClientState *nc = qemu_get_queue(s->nic);
1285 
1286     /* If the mitigation timer is active, emulate a timeout now. */
1287     if (s->mit_timer_on) {
1288         e1000_mit_timer(s);
1289     }
1290 
1291     /*
1292      * If link is down and auto-negotiation is supported and ongoing,
1293      * complete auto-negotiation immediately. This allows us to look
1294      * at MII_SR_AUTONEG_COMPLETE to infer link status on load.
1295      */
1296     if (nc->link_down &&
1297         s->compat_flags & E1000_FLAG_AUTONEG &&
1298         s->phy_reg[PHY_CTRL] & MII_CR_AUTO_NEG_EN &&
1299         s->phy_reg[PHY_CTRL] & MII_CR_RESTART_AUTO_NEG) {
1300          s->phy_reg[PHY_STATUS] |= MII_SR_AUTONEG_COMPLETE;
1301     }
1302 }
1303 
1304 static int e1000_post_load(void *opaque, int version_id)
1305 {
1306     E1000State *s = opaque;
1307     NetClientState *nc = qemu_get_queue(s->nic);
1308 
1309     if (!(s->compat_flags & E1000_FLAG_MIT)) {
1310         s->mac_reg[ITR] = s->mac_reg[RDTR] = s->mac_reg[RADV] =
1311             s->mac_reg[TADV] = 0;
1312         s->mit_irq_level = false;
1313     }
1314     s->mit_ide = 0;
1315     s->mit_timer_on = false;
1316 
1317     /* nc.link_down can't be migrated, so infer link_down according
1318      * to link status bit in mac_reg[STATUS].
1319      * Alternatively, restart link negotiation if it was in progress. */
1320     nc->link_down = (s->mac_reg[STATUS] & E1000_STATUS_LU) == 0;
1321 
1322     if (s->compat_flags & E1000_FLAG_AUTONEG &&
1323         s->phy_reg[PHY_CTRL] & MII_CR_AUTO_NEG_EN &&
1324         s->phy_reg[PHY_CTRL] & MII_CR_RESTART_AUTO_NEG &&
1325         !(s->phy_reg[PHY_STATUS] & MII_SR_AUTONEG_COMPLETE)) {
1326         nc->link_down = false;
1327         timer_mod(s->autoneg_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 500);
1328     }
1329 
1330     return 0;
1331 }
1332 
1333 static bool e1000_mit_state_needed(void *opaque)
1334 {
1335     E1000State *s = opaque;
1336 
1337     return s->compat_flags & E1000_FLAG_MIT;
1338 }
1339 
1340 static const VMStateDescription vmstate_e1000_mit_state = {
1341     .name = "e1000/mit_state",
1342     .version_id = 1,
1343     .minimum_version_id = 1,
1344     .fields = (VMStateField[]) {
1345         VMSTATE_UINT32(mac_reg[RDTR], E1000State),
1346         VMSTATE_UINT32(mac_reg[RADV], E1000State),
1347         VMSTATE_UINT32(mac_reg[TADV], E1000State),
1348         VMSTATE_UINT32(mac_reg[ITR], E1000State),
1349         VMSTATE_BOOL(mit_irq_level, E1000State),
1350         VMSTATE_END_OF_LIST()
1351     }
1352 };
1353 
1354 static const VMStateDescription vmstate_e1000 = {
1355     .name = "e1000",
1356     .version_id = 2,
1357     .minimum_version_id = 1,
1358     .pre_save = e1000_pre_save,
1359     .post_load = e1000_post_load,
1360     .fields = (VMStateField[]) {
1361         VMSTATE_PCI_DEVICE(parent_obj, E1000State),
1362         VMSTATE_UNUSED_TEST(is_version_1, 4), /* was instance id */
1363         VMSTATE_UNUSED(4), /* Was mmio_base.  */
1364         VMSTATE_UINT32(rxbuf_size, E1000State),
1365         VMSTATE_UINT32(rxbuf_min_shift, E1000State),
1366         VMSTATE_UINT32(eecd_state.val_in, E1000State),
1367         VMSTATE_UINT16(eecd_state.bitnum_in, E1000State),
1368         VMSTATE_UINT16(eecd_state.bitnum_out, E1000State),
1369         VMSTATE_UINT16(eecd_state.reading, E1000State),
1370         VMSTATE_UINT32(eecd_state.old_eecd, E1000State),
1371         VMSTATE_UINT8(tx.ipcss, E1000State),
1372         VMSTATE_UINT8(tx.ipcso, E1000State),
1373         VMSTATE_UINT16(tx.ipcse, E1000State),
1374         VMSTATE_UINT8(tx.tucss, E1000State),
1375         VMSTATE_UINT8(tx.tucso, E1000State),
1376         VMSTATE_UINT16(tx.tucse, E1000State),
1377         VMSTATE_UINT32(tx.paylen, E1000State),
1378         VMSTATE_UINT8(tx.hdr_len, E1000State),
1379         VMSTATE_UINT16(tx.mss, E1000State),
1380         VMSTATE_UINT16(tx.size, E1000State),
1381         VMSTATE_UINT16(tx.tso_frames, E1000State),
1382         VMSTATE_UINT8(tx.sum_needed, E1000State),
1383         VMSTATE_INT8(tx.ip, E1000State),
1384         VMSTATE_INT8(tx.tcp, E1000State),
1385         VMSTATE_BUFFER(tx.header, E1000State),
1386         VMSTATE_BUFFER(tx.data, E1000State),
1387         VMSTATE_UINT16_ARRAY(eeprom_data, E1000State, 64),
1388         VMSTATE_UINT16_ARRAY(phy_reg, E1000State, 0x20),
1389         VMSTATE_UINT32(mac_reg[CTRL], E1000State),
1390         VMSTATE_UINT32(mac_reg[EECD], E1000State),
1391         VMSTATE_UINT32(mac_reg[EERD], E1000State),
1392         VMSTATE_UINT32(mac_reg[GPRC], E1000State),
1393         VMSTATE_UINT32(mac_reg[GPTC], E1000State),
1394         VMSTATE_UINT32(mac_reg[ICR], E1000State),
1395         VMSTATE_UINT32(mac_reg[ICS], E1000State),
1396         VMSTATE_UINT32(mac_reg[IMC], E1000State),
1397         VMSTATE_UINT32(mac_reg[IMS], E1000State),
1398         VMSTATE_UINT32(mac_reg[LEDCTL], E1000State),
1399         VMSTATE_UINT32(mac_reg[MANC], E1000State),
1400         VMSTATE_UINT32(mac_reg[MDIC], E1000State),
1401         VMSTATE_UINT32(mac_reg[MPC], E1000State),
1402         VMSTATE_UINT32(mac_reg[PBA], E1000State),
1403         VMSTATE_UINT32(mac_reg[RCTL], E1000State),
1404         VMSTATE_UINT32(mac_reg[RDBAH], E1000State),
1405         VMSTATE_UINT32(mac_reg[RDBAL], E1000State),
1406         VMSTATE_UINT32(mac_reg[RDH], E1000State),
1407         VMSTATE_UINT32(mac_reg[RDLEN], E1000State),
1408         VMSTATE_UINT32(mac_reg[RDT], E1000State),
1409         VMSTATE_UINT32(mac_reg[STATUS], E1000State),
1410         VMSTATE_UINT32(mac_reg[SWSM], E1000State),
1411         VMSTATE_UINT32(mac_reg[TCTL], E1000State),
1412         VMSTATE_UINT32(mac_reg[TDBAH], E1000State),
1413         VMSTATE_UINT32(mac_reg[TDBAL], E1000State),
1414         VMSTATE_UINT32(mac_reg[TDH], E1000State),
1415         VMSTATE_UINT32(mac_reg[TDLEN], E1000State),
1416         VMSTATE_UINT32(mac_reg[TDT], E1000State),
1417         VMSTATE_UINT32(mac_reg[TORH], E1000State),
1418         VMSTATE_UINT32(mac_reg[TORL], E1000State),
1419         VMSTATE_UINT32(mac_reg[TOTH], E1000State),
1420         VMSTATE_UINT32(mac_reg[TOTL], E1000State),
1421         VMSTATE_UINT32(mac_reg[TPR], E1000State),
1422         VMSTATE_UINT32(mac_reg[TPT], E1000State),
1423         VMSTATE_UINT32(mac_reg[TXDCTL], E1000State),
1424         VMSTATE_UINT32(mac_reg[WUFC], E1000State),
1425         VMSTATE_UINT32(mac_reg[VET], E1000State),
1426         VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, RA, 32),
1427         VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, MTA, 128),
1428         VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, VFTA, 128),
1429         VMSTATE_END_OF_LIST()
1430     },
1431     .subsections = (VMStateSubsection[]) {
1432         {
1433             .vmsd = &vmstate_e1000_mit_state,
1434             .needed = e1000_mit_state_needed,
1435         }, {
1436             /* empty */
1437         }
1438     }
1439 };
1440 
1441 /*
1442  * EEPROM contents documented in Tables 5-2 and 5-3, pp. 98-102.
1443  * Note: A valid DevId will be inserted during pci_e1000_init().
1444  */
1445 static const uint16_t e1000_eeprom_template[64] = {
1446     0x0000, 0x0000, 0x0000, 0x0000,      0xffff, 0x0000,      0x0000, 0x0000,
1447     0x3000, 0x1000, 0x6403, 0 /*DevId*/, 0x8086, 0 /*DevId*/, 0x8086, 0x3040,
1448     0x0008, 0x2000, 0x7e14, 0x0048,      0x1000, 0x00d8,      0x0000, 0x2700,
1449     0x6cc9, 0x3150, 0x0722, 0x040b,      0x0984, 0x0000,      0xc000, 0x0706,
1450     0x1008, 0x0000, 0x0f04, 0x7fff,      0x4d01, 0xffff,      0xffff, 0xffff,
1451     0xffff, 0xffff, 0xffff, 0xffff,      0xffff, 0xffff,      0xffff, 0xffff,
1452     0x0100, 0x4000, 0x121c, 0xffff,      0xffff, 0xffff,      0xffff, 0xffff,
1453     0xffff, 0xffff, 0xffff, 0xffff,      0xffff, 0xffff,      0xffff, 0x0000,
1454 };
1455 
1456 /* PCI interface */
1457 
1458 static void
1459 e1000_mmio_setup(E1000State *d)
1460 {
1461     int i;
1462     const uint32_t excluded_regs[] = {
1463         E1000_MDIC, E1000_ICR, E1000_ICS, E1000_IMS,
1464         E1000_IMC, E1000_TCTL, E1000_TDT, PNPMMIO_SIZE
1465     };
1466 
1467     memory_region_init_io(&d->mmio, OBJECT(d), &e1000_mmio_ops, d,
1468                           "e1000-mmio", PNPMMIO_SIZE);
1469     memory_region_add_coalescing(&d->mmio, 0, excluded_regs[0]);
1470     for (i = 0; excluded_regs[i] != PNPMMIO_SIZE; i++)
1471         memory_region_add_coalescing(&d->mmio, excluded_regs[i] + 4,
1472                                      excluded_regs[i+1] - excluded_regs[i] - 4);
1473     memory_region_init_io(&d->io, OBJECT(d), &e1000_io_ops, d, "e1000-io", IOPORT_SIZE);
1474 }
1475 
1476 static void
1477 e1000_cleanup(NetClientState *nc)
1478 {
1479     E1000State *s = qemu_get_nic_opaque(nc);
1480 
1481     s->nic = NULL;
1482 }
1483 
1484 static void
1485 pci_e1000_uninit(PCIDevice *dev)
1486 {
1487     E1000State *d = E1000(dev);
1488 
1489     timer_del(d->autoneg_timer);
1490     timer_free(d->autoneg_timer);
1491     timer_del(d->mit_timer);
1492     timer_free(d->mit_timer);
1493     memory_region_destroy(&d->mmio);
1494     memory_region_destroy(&d->io);
1495     qemu_del_nic(d->nic);
1496 }
1497 
1498 static NetClientInfo net_e1000_info = {
1499     .type = NET_CLIENT_OPTIONS_KIND_NIC,
1500     .size = sizeof(NICState),
1501     .can_receive = e1000_can_receive,
1502     .receive = e1000_receive,
1503     .receive_iov = e1000_receive_iov,
1504     .cleanup = e1000_cleanup,
1505     .link_status_changed = e1000_set_link_status,
1506 };
1507 
1508 static int pci_e1000_init(PCIDevice *pci_dev)
1509 {
1510     DeviceState *dev = DEVICE(pci_dev);
1511     E1000State *d = E1000(pci_dev);
1512     PCIDeviceClass *pdc = PCI_DEVICE_GET_CLASS(pci_dev);
1513     uint8_t *pci_conf;
1514     uint16_t checksum = 0;
1515     int i;
1516     uint8_t *macaddr;
1517 
1518     pci_conf = pci_dev->config;
1519 
1520     /* TODO: RST# value should be 0, PCI spec 6.2.4 */
1521     pci_conf[PCI_CACHE_LINE_SIZE] = 0x10;
1522 
1523     pci_conf[PCI_INTERRUPT_PIN] = 1; /* interrupt pin A */
1524 
1525     e1000_mmio_setup(d);
1526 
1527     pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &d->mmio);
1528 
1529     pci_register_bar(pci_dev, 1, PCI_BASE_ADDRESS_SPACE_IO, &d->io);
1530 
1531     memmove(d->eeprom_data, e1000_eeprom_template,
1532         sizeof e1000_eeprom_template);
1533     qemu_macaddr_default_if_unset(&d->conf.macaddr);
1534     macaddr = d->conf.macaddr.a;
1535     for (i = 0; i < 3; i++)
1536         d->eeprom_data[i] = (macaddr[2*i+1]<<8) | macaddr[2*i];
1537     d->eeprom_data[11] = d->eeprom_data[13] = pdc->device_id;
1538     for (i = 0; i < EEPROM_CHECKSUM_REG; i++)
1539         checksum += d->eeprom_data[i];
1540     checksum = (uint16_t) EEPROM_SUM - checksum;
1541     d->eeprom_data[EEPROM_CHECKSUM_REG] = checksum;
1542 
1543     d->nic = qemu_new_nic(&net_e1000_info, &d->conf,
1544                           object_get_typename(OBJECT(d)), dev->id, d);
1545 
1546     qemu_format_nic_info_str(qemu_get_queue(d->nic), macaddr);
1547 
1548     add_boot_device_path(d->conf.bootindex, dev, "/ethernet-phy@0");
1549 
1550     d->autoneg_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, e1000_autoneg_timer, d);
1551     d->mit_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, e1000_mit_timer, d);
1552 
1553     return 0;
1554 }
1555 
1556 static void qdev_e1000_reset(DeviceState *dev)
1557 {
1558     E1000State *d = E1000(dev);
1559     e1000_reset(d);
1560 }
1561 
1562 static Property e1000_properties[] = {
1563     DEFINE_NIC_PROPERTIES(E1000State, conf),
1564     DEFINE_PROP_BIT("autonegotiation", E1000State,
1565                     compat_flags, E1000_FLAG_AUTONEG_BIT, true),
1566     DEFINE_PROP_BIT("mitigation", E1000State,
1567                     compat_flags, E1000_FLAG_MIT_BIT, true),
1568     DEFINE_PROP_END_OF_LIST(),
1569 };
1570 
1571 typedef struct E1000Info {
1572     const char *name;
1573     uint16_t   device_id;
1574     uint8_t    revision;
1575     uint16_t   phy_id2;
1576 } E1000Info;
1577 
1578 static void e1000_class_init(ObjectClass *klass, void *data)
1579 {
1580     DeviceClass *dc = DEVICE_CLASS(klass);
1581     PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
1582     E1000BaseClass *e = E1000_DEVICE_CLASS(klass);
1583     const E1000Info *info = data;
1584 
1585     k->init = pci_e1000_init;
1586     k->exit = pci_e1000_uninit;
1587     k->romfile = "efi-e1000.rom";
1588     k->vendor_id = PCI_VENDOR_ID_INTEL;
1589     k->device_id = info->device_id;
1590     k->revision = info->revision;
1591     e->phy_id2 = info->phy_id2;
1592     k->class_id = PCI_CLASS_NETWORK_ETHERNET;
1593     set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
1594     dc->desc = "Intel Gigabit Ethernet";
1595     dc->reset = qdev_e1000_reset;
1596     dc->vmsd = &vmstate_e1000;
1597     dc->props = e1000_properties;
1598 }
1599 
1600 static const TypeInfo e1000_base_info = {
1601     .name          = TYPE_E1000_BASE,
1602     .parent        = TYPE_PCI_DEVICE,
1603     .instance_size = sizeof(E1000State),
1604     .class_size    = sizeof(E1000BaseClass),
1605     .abstract      = true,
1606 };
1607 
1608 static const E1000Info e1000_devices[] = {
1609     {
1610         .name      = "e1000-82540em",
1611         .device_id = E1000_DEV_ID_82540EM,
1612         .revision  = 0x03,
1613         .phy_id2   = E1000_PHY_ID2_8254xx_DEFAULT,
1614     },
1615     {
1616         .name      = "e1000-82544gc",
1617         .device_id = E1000_DEV_ID_82544GC_COPPER,
1618         .revision  = 0x03,
1619         .phy_id2   = E1000_PHY_ID2_82544x,
1620     },
1621     {
1622         .name      = "e1000-82545em",
1623         .device_id = E1000_DEV_ID_82545EM_COPPER,
1624         .revision  = 0x03,
1625         .phy_id2   = E1000_PHY_ID2_8254xx_DEFAULT,
1626     },
1627 };
1628 
1629 static const TypeInfo e1000_default_info = {
1630     .name          = "e1000",
1631     .parent        = "e1000-82540em",
1632 };
1633 
1634 static void e1000_register_types(void)
1635 {
1636     int i;
1637 
1638     type_register_static(&e1000_base_info);
1639     for (i = 0; i < ARRAY_SIZE(e1000_devices); i++) {
1640         const E1000Info *info = &e1000_devices[i];
1641         TypeInfo type_info = {};
1642 
1643         type_info.name = info->name;
1644         type_info.parent = TYPE_E1000_BASE;
1645         type_info.class_data = (void *)info;
1646         type_info.class_init = e1000_class_init;
1647 
1648         type_register(&type_info);
1649     }
1650     type_register_static(&e1000_default_info);
1651 }
1652 
1653 type_init(e1000_register_types)
1654