xref: /qemu/hw/net/e1000.c (revision 6883b5914029fa8ffc42a43d2a2188493c27fd58)
1 /*
2  * QEMU e1000 emulation
3  *
4  * Software developer's manual:
5  * http://download.intel.com/design/network/manuals/8254x_GBe_SDM.pdf
6  *
7  * Nir Peleg, Tutis Systems Ltd. for Qumranet Inc.
8  * Copyright (c) 2008 Qumranet
9  * Based on work done by:
10  * Copyright (c) 2007 Dan Aloni
11  * Copyright (c) 2004 Antony T Curtis
12  *
13  * This library is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU Lesser General Public
15  * License as published by the Free Software Foundation; either
16  * version 2 of the License, or (at your option) any later version.
17  *
18  * This library is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
21  * Lesser General Public License for more details.
22  *
23  * You should have received a copy of the GNU Lesser General Public
24  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
25  */
26 
27 
28 #include "hw/hw.h"
29 #include "hw/pci/pci.h"
30 #include "net/net.h"
31 #include "net/checksum.h"
32 #include "hw/loader.h"
33 #include "sysemu/sysemu.h"
34 #include "sysemu/dma.h"
35 #include "qemu/iov.h"
36 
37 #include "e1000_regs.h"
38 
39 #define E1000_DEBUG
40 
41 #ifdef E1000_DEBUG
42 enum {
43     DEBUG_GENERAL,	DEBUG_IO,	DEBUG_MMIO,	DEBUG_INTERRUPT,
44     DEBUG_RX,		DEBUG_TX,	DEBUG_MDIC,	DEBUG_EEPROM,
45     DEBUG_UNKNOWN,	DEBUG_TXSUM,	DEBUG_TXERR,	DEBUG_RXERR,
46     DEBUG_RXFILTER,     DEBUG_PHY,      DEBUG_NOTYET,
47 };
48 #define DBGBIT(x)	(1<<DEBUG_##x)
49 static int debugflags = DBGBIT(TXERR) | DBGBIT(GENERAL);
50 
51 #define	DBGOUT(what, fmt, ...) do { \
52     if (debugflags & DBGBIT(what)) \
53         fprintf(stderr, "e1000: " fmt, ## __VA_ARGS__); \
54     } while (0)
55 #else
56 #define	DBGOUT(what, fmt, ...) do {} while (0)
57 #endif
58 
59 #define IOPORT_SIZE       0x40
60 #define PNPMMIO_SIZE      0x20000
61 #define MIN_BUF_SIZE      60 /* Min. octets in an ethernet frame sans FCS */
62 
63 /* this is the size past which hardware will drop packets when setting LPE=0 */
64 #define MAXIMUM_ETHERNET_VLAN_SIZE 1522
65 /* this is the size past which hardware will drop packets when setting LPE=1 */
66 #define MAXIMUM_ETHERNET_LPE_SIZE 16384
67 
68 #define MAXIMUM_ETHERNET_HDR_LEN (14+4)
69 
70 /*
71  * HW models:
72  *  E1000_DEV_ID_82540EM works with Windows, Linux, and OS X <= 10.8
73  *  E1000_DEV_ID_82544GC_COPPER appears to work; not well tested
74  *  E1000_DEV_ID_82545EM_COPPER works with Linux and OS X >= 10.6
75  *  Others never tested
76  */
77 
78 typedef struct E1000State_st {
79     /*< private >*/
80     PCIDevice parent_obj;
81     /*< public >*/
82 
83     NICState *nic;
84     NICConf conf;
85     MemoryRegion mmio;
86     MemoryRegion io;
87 
88     uint32_t mac_reg[0x8000];
89     uint16_t phy_reg[0x20];
90     uint16_t eeprom_data[64];
91 
92     uint32_t rxbuf_size;
93     uint32_t rxbuf_min_shift;
94     struct e1000_tx {
95         unsigned char header[256];
96         unsigned char vlan_header[4];
97         /* Fields vlan and data must not be reordered or separated. */
98         unsigned char vlan[4];
99         unsigned char data[0x10000];
100         uint16_t size;
101         unsigned char sum_needed;
102         unsigned char vlan_needed;
103         uint8_t ipcss;
104         uint8_t ipcso;
105         uint16_t ipcse;
106         uint8_t tucss;
107         uint8_t tucso;
108         uint16_t tucse;
109         uint8_t hdr_len;
110         uint16_t mss;
111         uint32_t paylen;
112         uint16_t tso_frames;
113         char tse;
114         int8_t ip;
115         int8_t tcp;
116         char cptse;     // current packet tse bit
117     } tx;
118 
119     struct {
120         uint32_t val_in;	// shifted in from guest driver
121         uint16_t bitnum_in;
122         uint16_t bitnum_out;
123         uint16_t reading;
124         uint32_t old_eecd;
125     } eecd_state;
126 
127     QEMUTimer *autoneg_timer;
128 
129     QEMUTimer *mit_timer;      /* Mitigation timer. */
130     bool mit_timer_on;         /* Mitigation timer is running. */
131     bool mit_irq_level;        /* Tracks interrupt pin level. */
132     uint32_t mit_ide;          /* Tracks E1000_TXD_CMD_IDE bit. */
133 
134 /* Compatibility flags for migration to/from qemu 1.3.0 and older */
135 #define E1000_FLAG_AUTONEG_BIT 0
136 #define E1000_FLAG_MIT_BIT 1
137 #define E1000_FLAG_AUTONEG (1 << E1000_FLAG_AUTONEG_BIT)
138 #define E1000_FLAG_MIT (1 << E1000_FLAG_MIT_BIT)
139     uint32_t compat_flags;
140 } E1000State;
141 
142 typedef struct E1000BaseClass {
143     PCIDeviceClass parent_class;
144     uint16_t phy_id2;
145 } E1000BaseClass;
146 
147 #define TYPE_E1000_BASE "e1000-base"
148 
149 #define E1000(obj) \
150     OBJECT_CHECK(E1000State, (obj), TYPE_E1000_BASE)
151 
152 #define E1000_DEVICE_CLASS(klass) \
153      OBJECT_CLASS_CHECK(E1000BaseClass, (klass), TYPE_E1000_BASE)
154 #define E1000_DEVICE_GET_CLASS(obj) \
155     OBJECT_GET_CLASS(E1000BaseClass, (obj), TYPE_E1000_BASE)
156 
157 #define	defreg(x)	x = (E1000_##x>>2)
158 enum {
159     defreg(CTRL),	defreg(EECD),	defreg(EERD),	defreg(GPRC),
160     defreg(GPTC),	defreg(ICR),	defreg(ICS),	defreg(IMC),
161     defreg(IMS),	defreg(LEDCTL),	defreg(MANC),	defreg(MDIC),
162     defreg(MPC),	defreg(PBA),	defreg(RCTL),	defreg(RDBAH),
163     defreg(RDBAL),	defreg(RDH),	defreg(RDLEN),	defreg(RDT),
164     defreg(STATUS),	defreg(SWSM),	defreg(TCTL),	defreg(TDBAH),
165     defreg(TDBAL),	defreg(TDH),	defreg(TDLEN),	defreg(TDT),
166     defreg(TORH),	defreg(TORL),	defreg(TOTH),	defreg(TOTL),
167     defreg(TPR),	defreg(TPT),	defreg(TXDCTL),	defreg(WUFC),
168     defreg(RA),		defreg(MTA),	defreg(CRCERRS),defreg(VFTA),
169     defreg(VET),        defreg(RDTR),   defreg(RADV),   defreg(TADV),
170     defreg(ITR),
171 };
172 
173 static void
174 e1000_link_down(E1000State *s)
175 {
176     s->mac_reg[STATUS] &= ~E1000_STATUS_LU;
177     s->phy_reg[PHY_STATUS] &= ~MII_SR_LINK_STATUS;
178     s->phy_reg[PHY_STATUS] &= ~MII_SR_AUTONEG_COMPLETE;
179     s->phy_reg[PHY_LP_ABILITY] &= ~MII_LPAR_LPACK;
180 }
181 
182 static void
183 e1000_link_up(E1000State *s)
184 {
185     s->mac_reg[STATUS] |= E1000_STATUS_LU;
186     s->phy_reg[PHY_STATUS] |= MII_SR_LINK_STATUS;
187 }
188 
189 static void
190 set_phy_ctrl(E1000State *s, int index, uint16_t val)
191 {
192     /*
193      * QEMU 1.3 does not support link auto-negotiation emulation, so if we
194      * migrate during auto negotiation, after migration the link will be
195      * down.
196      */
197     if (!(s->compat_flags & E1000_FLAG_AUTONEG)) {
198         return;
199     }
200     if ((val & MII_CR_AUTO_NEG_EN) && (val & MII_CR_RESTART_AUTO_NEG)) {
201         e1000_link_down(s);
202         DBGOUT(PHY, "Start link auto negotiation\n");
203         timer_mod(s->autoneg_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 500);
204     }
205 }
206 
207 static void
208 e1000_autoneg_timer(void *opaque)
209 {
210     E1000State *s = opaque;
211     if (!qemu_get_queue(s->nic)->link_down) {
212         e1000_link_up(s);
213         s->phy_reg[PHY_LP_ABILITY] |= MII_LPAR_LPACK;
214         s->phy_reg[PHY_STATUS] |= MII_SR_AUTONEG_COMPLETE;
215         DBGOUT(PHY, "Auto negotiation is completed\n");
216     }
217 }
218 
219 static void (*phyreg_writeops[])(E1000State *, int, uint16_t) = {
220     [PHY_CTRL] = set_phy_ctrl,
221 };
222 
223 enum { NPHYWRITEOPS = ARRAY_SIZE(phyreg_writeops) };
224 
225 enum { PHY_R = 1, PHY_W = 2, PHY_RW = PHY_R | PHY_W };
226 static const char phy_regcap[0x20] = {
227     [PHY_STATUS] = PHY_R,	[M88E1000_EXT_PHY_SPEC_CTRL] = PHY_RW,
228     [PHY_ID1] = PHY_R,		[M88E1000_PHY_SPEC_CTRL] = PHY_RW,
229     [PHY_CTRL] = PHY_RW,	[PHY_1000T_CTRL] = PHY_RW,
230     [PHY_LP_ABILITY] = PHY_R,	[PHY_1000T_STATUS] = PHY_R,
231     [PHY_AUTONEG_ADV] = PHY_RW,	[M88E1000_RX_ERR_CNTR] = PHY_R,
232     [PHY_ID2] = PHY_R,		[M88E1000_PHY_SPEC_STATUS] = PHY_R,
233     [PHY_AUTONEG_EXP] = PHY_R,
234 };
235 
236 /* PHY_ID2 documented in 8254x_GBe_SDM.pdf, pp. 250 */
237 static const uint16_t phy_reg_init[] = {
238     [PHY_CTRL] = 0x1140,
239     [PHY_STATUS] = 0x794d, /* link initially up with not completed autoneg */
240     [PHY_ID1] = 0x141, /* [PHY_ID2] configured per DevId, from e1000_reset() */
241     [PHY_1000T_CTRL] = 0x0e00,			[M88E1000_PHY_SPEC_CTRL] = 0x360,
242     [M88E1000_EXT_PHY_SPEC_CTRL] = 0x0d60,	[PHY_AUTONEG_ADV] = 0xde1,
243     [PHY_LP_ABILITY] = 0x1e0,			[PHY_1000T_STATUS] = 0x3c00,
244     [M88E1000_PHY_SPEC_STATUS] = 0xac00,
245 };
246 
247 static const uint32_t mac_reg_init[] = {
248     [PBA] =     0x00100030,
249     [LEDCTL] =  0x602,
250     [CTRL] =    E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN0 |
251                 E1000_CTRL_SPD_1000 | E1000_CTRL_SLU,
252     [STATUS] =  0x80000000 | E1000_STATUS_GIO_MASTER_ENABLE |
253                 E1000_STATUS_ASDV | E1000_STATUS_MTXCKOK |
254                 E1000_STATUS_SPEED_1000 | E1000_STATUS_FD |
255                 E1000_STATUS_LU,
256     [MANC] =    E1000_MANC_EN_MNG2HOST | E1000_MANC_RCV_TCO_EN |
257                 E1000_MANC_ARP_EN | E1000_MANC_0298_EN |
258                 E1000_MANC_RMCP_EN,
259 };
260 
261 /* Helper function, *curr == 0 means the value is not set */
262 static inline void
263 mit_update_delay(uint32_t *curr, uint32_t value)
264 {
265     if (value && (*curr == 0 || value < *curr)) {
266         *curr = value;
267     }
268 }
269 
270 static void
271 set_interrupt_cause(E1000State *s, int index, uint32_t val)
272 {
273     PCIDevice *d = PCI_DEVICE(s);
274     uint32_t pending_ints;
275     uint32_t mit_delay;
276 
277     s->mac_reg[ICR] = val;
278 
279     /*
280      * Make sure ICR and ICS registers have the same value.
281      * The spec says that the ICS register is write-only.  However in practice,
282      * on real hardware ICS is readable, and for reads it has the same value as
283      * ICR (except that ICS does not have the clear on read behaviour of ICR).
284      *
285      * The VxWorks PRO/1000 driver uses this behaviour.
286      */
287     s->mac_reg[ICS] = val;
288 
289     pending_ints = (s->mac_reg[IMS] & s->mac_reg[ICR]);
290     if (!s->mit_irq_level && pending_ints) {
291         /*
292          * Here we detect a potential raising edge. We postpone raising the
293          * interrupt line if we are inside the mitigation delay window
294          * (s->mit_timer_on == 1).
295          * We provide a partial implementation of interrupt mitigation,
296          * emulating only RADV, TADV and ITR (lower 16 bits, 1024ns units for
297          * RADV and TADV, 256ns units for ITR). RDTR is only used to enable
298          * RADV; relative timers based on TIDV and RDTR are not implemented.
299          */
300         if (s->mit_timer_on) {
301             return;
302         }
303         if (s->compat_flags & E1000_FLAG_MIT) {
304             /* Compute the next mitigation delay according to pending
305              * interrupts and the current values of RADV (provided
306              * RDTR!=0), TADV and ITR.
307              * Then rearm the timer.
308              */
309             mit_delay = 0;
310             if (s->mit_ide &&
311                     (pending_ints & (E1000_ICR_TXQE | E1000_ICR_TXDW))) {
312                 mit_update_delay(&mit_delay, s->mac_reg[TADV] * 4);
313             }
314             if (s->mac_reg[RDTR] && (pending_ints & E1000_ICS_RXT0)) {
315                 mit_update_delay(&mit_delay, s->mac_reg[RADV] * 4);
316             }
317             mit_update_delay(&mit_delay, s->mac_reg[ITR]);
318 
319             if (mit_delay) {
320                 s->mit_timer_on = 1;
321                 timer_mod(s->mit_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
322                           mit_delay * 256);
323             }
324             s->mit_ide = 0;
325         }
326     }
327 
328     s->mit_irq_level = (pending_ints != 0);
329     pci_set_irq(d, s->mit_irq_level);
330 }
331 
332 static void
333 e1000_mit_timer(void *opaque)
334 {
335     E1000State *s = opaque;
336 
337     s->mit_timer_on = 0;
338     /* Call set_interrupt_cause to update the irq level (if necessary). */
339     set_interrupt_cause(s, 0, s->mac_reg[ICR]);
340 }
341 
342 static void
343 set_ics(E1000State *s, int index, uint32_t val)
344 {
345     DBGOUT(INTERRUPT, "set_ics %x, ICR %x, IMR %x\n", val, s->mac_reg[ICR],
346         s->mac_reg[IMS]);
347     set_interrupt_cause(s, 0, val | s->mac_reg[ICR]);
348 }
349 
350 static int
351 rxbufsize(uint32_t v)
352 {
353     v &= E1000_RCTL_BSEX | E1000_RCTL_SZ_16384 | E1000_RCTL_SZ_8192 |
354          E1000_RCTL_SZ_4096 | E1000_RCTL_SZ_2048 | E1000_RCTL_SZ_1024 |
355          E1000_RCTL_SZ_512 | E1000_RCTL_SZ_256;
356     switch (v) {
357     case E1000_RCTL_BSEX | E1000_RCTL_SZ_16384:
358         return 16384;
359     case E1000_RCTL_BSEX | E1000_RCTL_SZ_8192:
360         return 8192;
361     case E1000_RCTL_BSEX | E1000_RCTL_SZ_4096:
362         return 4096;
363     case E1000_RCTL_SZ_1024:
364         return 1024;
365     case E1000_RCTL_SZ_512:
366         return 512;
367     case E1000_RCTL_SZ_256:
368         return 256;
369     }
370     return 2048;
371 }
372 
373 static void e1000_reset(void *opaque)
374 {
375     E1000State *d = opaque;
376     E1000BaseClass *edc = E1000_DEVICE_GET_CLASS(d);
377     uint8_t *macaddr = d->conf.macaddr.a;
378     int i;
379 
380     timer_del(d->autoneg_timer);
381     timer_del(d->mit_timer);
382     d->mit_timer_on = 0;
383     d->mit_irq_level = 0;
384     d->mit_ide = 0;
385     memset(d->phy_reg, 0, sizeof d->phy_reg);
386     memmove(d->phy_reg, phy_reg_init, sizeof phy_reg_init);
387     d->phy_reg[PHY_ID2] = edc->phy_id2;
388     memset(d->mac_reg, 0, sizeof d->mac_reg);
389     memmove(d->mac_reg, mac_reg_init, sizeof mac_reg_init);
390     d->rxbuf_min_shift = 1;
391     memset(&d->tx, 0, sizeof d->tx);
392 
393     if (qemu_get_queue(d->nic)->link_down) {
394         e1000_link_down(d);
395     }
396 
397     /* Some guests expect pre-initialized RAH/RAL (AddrValid flag + MACaddr) */
398     d->mac_reg[RA] = 0;
399     d->mac_reg[RA + 1] = E1000_RAH_AV;
400     for (i = 0; i < 4; i++) {
401         d->mac_reg[RA] |= macaddr[i] << (8 * i);
402         d->mac_reg[RA + 1] |= (i < 2) ? macaddr[i + 4] << (8 * i) : 0;
403     }
404     qemu_format_nic_info_str(qemu_get_queue(d->nic), macaddr);
405 }
406 
407 static void
408 set_ctrl(E1000State *s, int index, uint32_t val)
409 {
410     /* RST is self clearing */
411     s->mac_reg[CTRL] = val & ~E1000_CTRL_RST;
412 }
413 
414 static void
415 set_rx_control(E1000State *s, int index, uint32_t val)
416 {
417     s->mac_reg[RCTL] = val;
418     s->rxbuf_size = rxbufsize(val);
419     s->rxbuf_min_shift = ((val / E1000_RCTL_RDMTS_QUAT) & 3) + 1;
420     DBGOUT(RX, "RCTL: %d, mac_reg[RCTL] = 0x%x\n", s->mac_reg[RDT],
421            s->mac_reg[RCTL]);
422     qemu_flush_queued_packets(qemu_get_queue(s->nic));
423 }
424 
425 static void
426 set_mdic(E1000State *s, int index, uint32_t val)
427 {
428     uint32_t data = val & E1000_MDIC_DATA_MASK;
429     uint32_t addr = ((val & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT);
430 
431     if ((val & E1000_MDIC_PHY_MASK) >> E1000_MDIC_PHY_SHIFT != 1) // phy #
432         val = s->mac_reg[MDIC] | E1000_MDIC_ERROR;
433     else if (val & E1000_MDIC_OP_READ) {
434         DBGOUT(MDIC, "MDIC read reg 0x%x\n", addr);
435         if (!(phy_regcap[addr] & PHY_R)) {
436             DBGOUT(MDIC, "MDIC read reg %x unhandled\n", addr);
437             val |= E1000_MDIC_ERROR;
438         } else
439             val = (val ^ data) | s->phy_reg[addr];
440     } else if (val & E1000_MDIC_OP_WRITE) {
441         DBGOUT(MDIC, "MDIC write reg 0x%x, value 0x%x\n", addr, data);
442         if (!(phy_regcap[addr] & PHY_W)) {
443             DBGOUT(MDIC, "MDIC write reg %x unhandled\n", addr);
444             val |= E1000_MDIC_ERROR;
445         } else {
446             if (addr < NPHYWRITEOPS && phyreg_writeops[addr]) {
447                 phyreg_writeops[addr](s, index, data);
448             }
449             s->phy_reg[addr] = data;
450         }
451     }
452     s->mac_reg[MDIC] = val | E1000_MDIC_READY;
453 
454     if (val & E1000_MDIC_INT_EN) {
455         set_ics(s, 0, E1000_ICR_MDAC);
456     }
457 }
458 
459 static uint32_t
460 get_eecd(E1000State *s, int index)
461 {
462     uint32_t ret = E1000_EECD_PRES|E1000_EECD_GNT | s->eecd_state.old_eecd;
463 
464     DBGOUT(EEPROM, "reading eeprom bit %d (reading %d)\n",
465            s->eecd_state.bitnum_out, s->eecd_state.reading);
466     if (!s->eecd_state.reading ||
467         ((s->eeprom_data[(s->eecd_state.bitnum_out >> 4) & 0x3f] >>
468           ((s->eecd_state.bitnum_out & 0xf) ^ 0xf))) & 1)
469         ret |= E1000_EECD_DO;
470     return ret;
471 }
472 
473 static void
474 set_eecd(E1000State *s, int index, uint32_t val)
475 {
476     uint32_t oldval = s->eecd_state.old_eecd;
477 
478     s->eecd_state.old_eecd = val & (E1000_EECD_SK | E1000_EECD_CS |
479             E1000_EECD_DI|E1000_EECD_FWE_MASK|E1000_EECD_REQ);
480     if (!(E1000_EECD_CS & val))			// CS inactive; nothing to do
481 	return;
482     if (E1000_EECD_CS & (val ^ oldval)) {	// CS rise edge; reset state
483 	s->eecd_state.val_in = 0;
484 	s->eecd_state.bitnum_in = 0;
485 	s->eecd_state.bitnum_out = 0;
486 	s->eecd_state.reading = 0;
487     }
488     if (!(E1000_EECD_SK & (val ^ oldval)))	// no clock edge
489         return;
490     if (!(E1000_EECD_SK & val)) {		// falling edge
491         s->eecd_state.bitnum_out++;
492         return;
493     }
494     s->eecd_state.val_in <<= 1;
495     if (val & E1000_EECD_DI)
496         s->eecd_state.val_in |= 1;
497     if (++s->eecd_state.bitnum_in == 9 && !s->eecd_state.reading) {
498         s->eecd_state.bitnum_out = ((s->eecd_state.val_in & 0x3f)<<4)-1;
499         s->eecd_state.reading = (((s->eecd_state.val_in >> 6) & 7) ==
500             EEPROM_READ_OPCODE_MICROWIRE);
501     }
502     DBGOUT(EEPROM, "eeprom bitnum in %d out %d, reading %d\n",
503            s->eecd_state.bitnum_in, s->eecd_state.bitnum_out,
504            s->eecd_state.reading);
505 }
506 
507 static uint32_t
508 flash_eerd_read(E1000State *s, int x)
509 {
510     unsigned int index, r = s->mac_reg[EERD] & ~E1000_EEPROM_RW_REG_START;
511 
512     if ((s->mac_reg[EERD] & E1000_EEPROM_RW_REG_START) == 0)
513         return (s->mac_reg[EERD]);
514 
515     if ((index = r >> E1000_EEPROM_RW_ADDR_SHIFT) > EEPROM_CHECKSUM_REG)
516         return (E1000_EEPROM_RW_REG_DONE | r);
517 
518     return ((s->eeprom_data[index] << E1000_EEPROM_RW_REG_DATA) |
519            E1000_EEPROM_RW_REG_DONE | r);
520 }
521 
522 static void
523 putsum(uint8_t *data, uint32_t n, uint32_t sloc, uint32_t css, uint32_t cse)
524 {
525     uint32_t sum;
526 
527     if (cse && cse < n)
528         n = cse + 1;
529     if (sloc < n-1) {
530         sum = net_checksum_add(n-css, data+css);
531         stw_be_p(data + sloc, net_checksum_finish(sum));
532     }
533 }
534 
535 static inline int
536 vlan_enabled(E1000State *s)
537 {
538     return ((s->mac_reg[CTRL] & E1000_CTRL_VME) != 0);
539 }
540 
541 static inline int
542 vlan_rx_filter_enabled(E1000State *s)
543 {
544     return ((s->mac_reg[RCTL] & E1000_RCTL_VFE) != 0);
545 }
546 
547 static inline int
548 is_vlan_packet(E1000State *s, const uint8_t *buf)
549 {
550     return (be16_to_cpup((uint16_t *)(buf + 12)) ==
551                 le16_to_cpup((uint16_t *)(s->mac_reg + VET)));
552 }
553 
554 static inline int
555 is_vlan_txd(uint32_t txd_lower)
556 {
557     return ((txd_lower & E1000_TXD_CMD_VLE) != 0);
558 }
559 
560 /* FCS aka Ethernet CRC-32. We don't get it from backends and can't
561  * fill it in, just pad descriptor length by 4 bytes unless guest
562  * told us to strip it off the packet. */
563 static inline int
564 fcs_len(E1000State *s)
565 {
566     return (s->mac_reg[RCTL] & E1000_RCTL_SECRC) ? 0 : 4;
567 }
568 
569 static void
570 e1000_send_packet(E1000State *s, const uint8_t *buf, int size)
571 {
572     NetClientState *nc = qemu_get_queue(s->nic);
573     if (s->phy_reg[PHY_CTRL] & MII_CR_LOOPBACK) {
574         nc->info->receive(nc, buf, size);
575     } else {
576         qemu_send_packet(nc, buf, size);
577     }
578 }
579 
580 static void
581 xmit_seg(E1000State *s)
582 {
583     uint16_t len, *sp;
584     unsigned int frames = s->tx.tso_frames, css, sofar, n;
585     struct e1000_tx *tp = &s->tx;
586 
587     if (tp->tse && tp->cptse) {
588         css = tp->ipcss;
589         DBGOUT(TXSUM, "frames %d size %d ipcss %d\n",
590                frames, tp->size, css);
591         if (tp->ip) {		// IPv4
592             stw_be_p(tp->data+css+2, tp->size - css);
593             stw_be_p(tp->data+css+4,
594                           be16_to_cpup((uint16_t *)(tp->data+css+4))+frames);
595         } else			// IPv6
596             stw_be_p(tp->data+css+4, tp->size - css);
597         css = tp->tucss;
598         len = tp->size - css;
599         DBGOUT(TXSUM, "tcp %d tucss %d len %d\n", tp->tcp, css, len);
600         if (tp->tcp) {
601             sofar = frames * tp->mss;
602             stl_be_p(tp->data+css+4, ldl_be_p(tp->data+css+4)+sofar); /* seq */
603             if (tp->paylen - sofar > tp->mss)
604                 tp->data[css + 13] &= ~9;		// PSH, FIN
605         } else	// UDP
606             stw_be_p(tp->data+css+4, len);
607         if (tp->sum_needed & E1000_TXD_POPTS_TXSM) {
608             unsigned int phsum;
609             // add pseudo-header length before checksum calculation
610             sp = (uint16_t *)(tp->data + tp->tucso);
611             phsum = be16_to_cpup(sp) + len;
612             phsum = (phsum >> 16) + (phsum & 0xffff);
613             stw_be_p(sp, phsum);
614         }
615         tp->tso_frames++;
616     }
617 
618     if (tp->sum_needed & E1000_TXD_POPTS_TXSM)
619         putsum(tp->data, tp->size, tp->tucso, tp->tucss, tp->tucse);
620     if (tp->sum_needed & E1000_TXD_POPTS_IXSM)
621         putsum(tp->data, tp->size, tp->ipcso, tp->ipcss, tp->ipcse);
622     if (tp->vlan_needed) {
623         memmove(tp->vlan, tp->data, 4);
624         memmove(tp->data, tp->data + 4, 8);
625         memcpy(tp->data + 8, tp->vlan_header, 4);
626         e1000_send_packet(s, tp->vlan, tp->size + 4);
627     } else
628         e1000_send_packet(s, tp->data, tp->size);
629     s->mac_reg[TPT]++;
630     s->mac_reg[GPTC]++;
631     n = s->mac_reg[TOTL];
632     if ((s->mac_reg[TOTL] += s->tx.size) < n)
633         s->mac_reg[TOTH]++;
634 }
635 
636 static void
637 process_tx_desc(E1000State *s, struct e1000_tx_desc *dp)
638 {
639     PCIDevice *d = PCI_DEVICE(s);
640     uint32_t txd_lower = le32_to_cpu(dp->lower.data);
641     uint32_t dtype = txd_lower & (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D);
642     unsigned int split_size = txd_lower & 0xffff, bytes, sz, op;
643     unsigned int msh = 0xfffff;
644     uint64_t addr;
645     struct e1000_context_desc *xp = (struct e1000_context_desc *)dp;
646     struct e1000_tx *tp = &s->tx;
647 
648     s->mit_ide |= (txd_lower & E1000_TXD_CMD_IDE);
649     if (dtype == E1000_TXD_CMD_DEXT) {	// context descriptor
650         op = le32_to_cpu(xp->cmd_and_length);
651         tp->ipcss = xp->lower_setup.ip_fields.ipcss;
652         tp->ipcso = xp->lower_setup.ip_fields.ipcso;
653         tp->ipcse = le16_to_cpu(xp->lower_setup.ip_fields.ipcse);
654         tp->tucss = xp->upper_setup.tcp_fields.tucss;
655         tp->tucso = xp->upper_setup.tcp_fields.tucso;
656         tp->tucse = le16_to_cpu(xp->upper_setup.tcp_fields.tucse);
657         tp->paylen = op & 0xfffff;
658         tp->hdr_len = xp->tcp_seg_setup.fields.hdr_len;
659         tp->mss = le16_to_cpu(xp->tcp_seg_setup.fields.mss);
660         tp->ip = (op & E1000_TXD_CMD_IP) ? 1 : 0;
661         tp->tcp = (op & E1000_TXD_CMD_TCP) ? 1 : 0;
662         tp->tse = (op & E1000_TXD_CMD_TSE) ? 1 : 0;
663         tp->tso_frames = 0;
664         if (tp->tucso == 0) {	// this is probably wrong
665             DBGOUT(TXSUM, "TCP/UDP: cso 0!\n");
666             tp->tucso = tp->tucss + (tp->tcp ? 16 : 6);
667         }
668         return;
669     } else if (dtype == (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D)) {
670         // data descriptor
671         if (tp->size == 0) {
672             tp->sum_needed = le32_to_cpu(dp->upper.data) >> 8;
673         }
674         tp->cptse = ( txd_lower & E1000_TXD_CMD_TSE ) ? 1 : 0;
675     } else {
676         // legacy descriptor
677         tp->cptse = 0;
678     }
679 
680     if (vlan_enabled(s) && is_vlan_txd(txd_lower) &&
681         (tp->cptse || txd_lower & E1000_TXD_CMD_EOP)) {
682         tp->vlan_needed = 1;
683         stw_be_p(tp->vlan_header,
684                       le16_to_cpup((uint16_t *)(s->mac_reg + VET)));
685         stw_be_p(tp->vlan_header + 2,
686                       le16_to_cpu(dp->upper.fields.special));
687     }
688 
689     addr = le64_to_cpu(dp->buffer_addr);
690     if (tp->tse && tp->cptse) {
691         msh = tp->hdr_len + tp->mss;
692         do {
693             bytes = split_size;
694             if (tp->size + bytes > msh)
695                 bytes = msh - tp->size;
696 
697             bytes = MIN(sizeof(tp->data) - tp->size, bytes);
698             pci_dma_read(d, addr, tp->data + tp->size, bytes);
699             sz = tp->size + bytes;
700             if (sz >= tp->hdr_len && tp->size < tp->hdr_len) {
701                 memmove(tp->header, tp->data, tp->hdr_len);
702             }
703             tp->size = sz;
704             addr += bytes;
705             if (sz == msh) {
706                 xmit_seg(s);
707                 memmove(tp->data, tp->header, tp->hdr_len);
708                 tp->size = tp->hdr_len;
709             }
710         } while (split_size -= bytes);
711     } else if (!tp->tse && tp->cptse) {
712         // context descriptor TSE is not set, while data descriptor TSE is set
713         DBGOUT(TXERR, "TCP segmentation error\n");
714     } else {
715         split_size = MIN(sizeof(tp->data) - tp->size, split_size);
716         pci_dma_read(d, addr, tp->data + tp->size, split_size);
717         tp->size += split_size;
718     }
719 
720     if (!(txd_lower & E1000_TXD_CMD_EOP))
721         return;
722     if (!(tp->tse && tp->cptse && tp->size < tp->hdr_len)) {
723         xmit_seg(s);
724     }
725     tp->tso_frames = 0;
726     tp->sum_needed = 0;
727     tp->vlan_needed = 0;
728     tp->size = 0;
729     tp->cptse = 0;
730 }
731 
732 static uint32_t
733 txdesc_writeback(E1000State *s, dma_addr_t base, struct e1000_tx_desc *dp)
734 {
735     PCIDevice *d = PCI_DEVICE(s);
736     uint32_t txd_upper, txd_lower = le32_to_cpu(dp->lower.data);
737 
738     if (!(txd_lower & (E1000_TXD_CMD_RS|E1000_TXD_CMD_RPS)))
739         return 0;
740     txd_upper = (le32_to_cpu(dp->upper.data) | E1000_TXD_STAT_DD) &
741                 ~(E1000_TXD_STAT_EC | E1000_TXD_STAT_LC | E1000_TXD_STAT_TU);
742     dp->upper.data = cpu_to_le32(txd_upper);
743     pci_dma_write(d, base + ((char *)&dp->upper - (char *)dp),
744                   &dp->upper, sizeof(dp->upper));
745     return E1000_ICR_TXDW;
746 }
747 
748 static uint64_t tx_desc_base(E1000State *s)
749 {
750     uint64_t bah = s->mac_reg[TDBAH];
751     uint64_t bal = s->mac_reg[TDBAL] & ~0xf;
752 
753     return (bah << 32) + bal;
754 }
755 
756 static void
757 start_xmit(E1000State *s)
758 {
759     PCIDevice *d = PCI_DEVICE(s);
760     dma_addr_t base;
761     struct e1000_tx_desc desc;
762     uint32_t tdh_start = s->mac_reg[TDH], cause = E1000_ICS_TXQE;
763 
764     if (!(s->mac_reg[TCTL] & E1000_TCTL_EN)) {
765         DBGOUT(TX, "tx disabled\n");
766         return;
767     }
768 
769     while (s->mac_reg[TDH] != s->mac_reg[TDT]) {
770         base = tx_desc_base(s) +
771                sizeof(struct e1000_tx_desc) * s->mac_reg[TDH];
772         pci_dma_read(d, base, &desc, sizeof(desc));
773 
774         DBGOUT(TX, "index %d: %p : %x %x\n", s->mac_reg[TDH],
775                (void *)(intptr_t)desc.buffer_addr, desc.lower.data,
776                desc.upper.data);
777 
778         process_tx_desc(s, &desc);
779         cause |= txdesc_writeback(s, base, &desc);
780 
781         if (++s->mac_reg[TDH] * sizeof(desc) >= s->mac_reg[TDLEN])
782             s->mac_reg[TDH] = 0;
783         /*
784          * the following could happen only if guest sw assigns
785          * bogus values to TDT/TDLEN.
786          * there's nothing too intelligent we could do about this.
787          */
788         if (s->mac_reg[TDH] == tdh_start) {
789             DBGOUT(TXERR, "TDH wraparound @%x, TDT %x, TDLEN %x\n",
790                    tdh_start, s->mac_reg[TDT], s->mac_reg[TDLEN]);
791             break;
792         }
793     }
794     set_ics(s, 0, cause);
795 }
796 
797 static int
798 receive_filter(E1000State *s, const uint8_t *buf, int size)
799 {
800     static const uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
801     static const int mta_shift[] = {4, 3, 2, 0};
802     uint32_t f, rctl = s->mac_reg[RCTL], ra[2], *rp;
803 
804     if (is_vlan_packet(s, buf) && vlan_rx_filter_enabled(s)) {
805         uint16_t vid = be16_to_cpup((uint16_t *)(buf + 14));
806         uint32_t vfta = le32_to_cpup((uint32_t *)(s->mac_reg + VFTA) +
807                                      ((vid >> 5) & 0x7f));
808         if ((vfta & (1 << (vid & 0x1f))) == 0)
809             return 0;
810     }
811 
812     if (rctl & E1000_RCTL_UPE)			// promiscuous
813         return 1;
814 
815     if ((buf[0] & 1) && (rctl & E1000_RCTL_MPE))	// promiscuous mcast
816         return 1;
817 
818     if ((rctl & E1000_RCTL_BAM) && !memcmp(buf, bcast, sizeof bcast))
819         return 1;
820 
821     for (rp = s->mac_reg + RA; rp < s->mac_reg + RA + 32; rp += 2) {
822         if (!(rp[1] & E1000_RAH_AV))
823             continue;
824         ra[0] = cpu_to_le32(rp[0]);
825         ra[1] = cpu_to_le32(rp[1]);
826         if (!memcmp(buf, (uint8_t *)ra, 6)) {
827             DBGOUT(RXFILTER,
828                    "unicast match[%d]: %02x:%02x:%02x:%02x:%02x:%02x\n",
829                    (int)(rp - s->mac_reg - RA)/2,
830                    buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
831             return 1;
832         }
833     }
834     DBGOUT(RXFILTER, "unicast mismatch: %02x:%02x:%02x:%02x:%02x:%02x\n",
835            buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
836 
837     f = mta_shift[(rctl >> E1000_RCTL_MO_SHIFT) & 3];
838     f = (((buf[5] << 8) | buf[4]) >> f) & 0xfff;
839     if (s->mac_reg[MTA + (f >> 5)] & (1 << (f & 0x1f)))
840         return 1;
841     DBGOUT(RXFILTER,
842            "dropping, inexact filter mismatch: %02x:%02x:%02x:%02x:%02x:%02x MO %d MTA[%d] %x\n",
843            buf[0], buf[1], buf[2], buf[3], buf[4], buf[5],
844            (rctl >> E1000_RCTL_MO_SHIFT) & 3, f >> 5,
845            s->mac_reg[MTA + (f >> 5)]);
846 
847     return 0;
848 }
849 
850 static void
851 e1000_set_link_status(NetClientState *nc)
852 {
853     E1000State *s = qemu_get_nic_opaque(nc);
854     uint32_t old_status = s->mac_reg[STATUS];
855 
856     if (nc->link_down) {
857         e1000_link_down(s);
858     } else {
859         if (s->compat_flags & E1000_FLAG_AUTONEG &&
860             s->phy_reg[PHY_CTRL] & MII_CR_AUTO_NEG_EN &&
861             s->phy_reg[PHY_CTRL] & MII_CR_RESTART_AUTO_NEG &&
862             !(s->phy_reg[PHY_STATUS] & MII_SR_AUTONEG_COMPLETE)) {
863             /* emulate auto-negotiation if supported */
864             timer_mod(s->autoneg_timer,
865                       qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 500);
866         } else {
867             e1000_link_up(s);
868         }
869     }
870 
871     if (s->mac_reg[STATUS] != old_status)
872         set_ics(s, 0, E1000_ICR_LSC);
873 }
874 
875 static bool e1000_has_rxbufs(E1000State *s, size_t total_size)
876 {
877     int bufs;
878     /* Fast-path short packets */
879     if (total_size <= s->rxbuf_size) {
880         return s->mac_reg[RDH] != s->mac_reg[RDT];
881     }
882     if (s->mac_reg[RDH] < s->mac_reg[RDT]) {
883         bufs = s->mac_reg[RDT] - s->mac_reg[RDH];
884     } else if (s->mac_reg[RDH] > s->mac_reg[RDT]) {
885         bufs = s->mac_reg[RDLEN] /  sizeof(struct e1000_rx_desc) +
886             s->mac_reg[RDT] - s->mac_reg[RDH];
887     } else {
888         return false;
889     }
890     return total_size <= bufs * s->rxbuf_size;
891 }
892 
893 static int
894 e1000_can_receive(NetClientState *nc)
895 {
896     E1000State *s = qemu_get_nic_opaque(nc);
897 
898     return (s->mac_reg[STATUS] & E1000_STATUS_LU) &&
899         (s->mac_reg[RCTL] & E1000_RCTL_EN) && e1000_has_rxbufs(s, 1);
900 }
901 
902 static uint64_t rx_desc_base(E1000State *s)
903 {
904     uint64_t bah = s->mac_reg[RDBAH];
905     uint64_t bal = s->mac_reg[RDBAL] & ~0xf;
906 
907     return (bah << 32) + bal;
908 }
909 
910 static ssize_t
911 e1000_receive_iov(NetClientState *nc, const struct iovec *iov, int iovcnt)
912 {
913     E1000State *s = qemu_get_nic_opaque(nc);
914     PCIDevice *d = PCI_DEVICE(s);
915     struct e1000_rx_desc desc;
916     dma_addr_t base;
917     unsigned int n, rdt;
918     uint32_t rdh_start;
919     uint16_t vlan_special = 0;
920     uint8_t vlan_status = 0;
921     uint8_t min_buf[MIN_BUF_SIZE];
922     struct iovec min_iov;
923     uint8_t *filter_buf = iov->iov_base;
924     size_t size = iov_size(iov, iovcnt);
925     size_t iov_ofs = 0;
926     size_t desc_offset;
927     size_t desc_size;
928     size_t total_size;
929 
930     if (!(s->mac_reg[STATUS] & E1000_STATUS_LU)) {
931         return -1;
932     }
933 
934     if (!(s->mac_reg[RCTL] & E1000_RCTL_EN)) {
935         return -1;
936     }
937 
938     /* Pad to minimum Ethernet frame length */
939     if (size < sizeof(min_buf)) {
940         iov_to_buf(iov, iovcnt, 0, min_buf, size);
941         memset(&min_buf[size], 0, sizeof(min_buf) - size);
942         min_iov.iov_base = filter_buf = min_buf;
943         min_iov.iov_len = size = sizeof(min_buf);
944         iovcnt = 1;
945         iov = &min_iov;
946     } else if (iov->iov_len < MAXIMUM_ETHERNET_HDR_LEN) {
947         /* This is very unlikely, but may happen. */
948         iov_to_buf(iov, iovcnt, 0, min_buf, MAXIMUM_ETHERNET_HDR_LEN);
949         filter_buf = min_buf;
950     }
951 
952     /* Discard oversized packets if !LPE and !SBP. */
953     if ((size > MAXIMUM_ETHERNET_LPE_SIZE ||
954         (size > MAXIMUM_ETHERNET_VLAN_SIZE
955         && !(s->mac_reg[RCTL] & E1000_RCTL_LPE)))
956         && !(s->mac_reg[RCTL] & E1000_RCTL_SBP)) {
957         return size;
958     }
959 
960     if (!receive_filter(s, filter_buf, size)) {
961         return size;
962     }
963 
964     if (vlan_enabled(s) && is_vlan_packet(s, filter_buf)) {
965         vlan_special = cpu_to_le16(be16_to_cpup((uint16_t *)(filter_buf
966                                                                 + 14)));
967         iov_ofs = 4;
968         if (filter_buf == iov->iov_base) {
969             memmove(filter_buf + 4, filter_buf, 12);
970         } else {
971             iov_from_buf(iov, iovcnt, 4, filter_buf, 12);
972             while (iov->iov_len <= iov_ofs) {
973                 iov_ofs -= iov->iov_len;
974                 iov++;
975             }
976         }
977         vlan_status = E1000_RXD_STAT_VP;
978         size -= 4;
979     }
980 
981     rdh_start = s->mac_reg[RDH];
982     desc_offset = 0;
983     total_size = size + fcs_len(s);
984     if (!e1000_has_rxbufs(s, total_size)) {
985             set_ics(s, 0, E1000_ICS_RXO);
986             return -1;
987     }
988     do {
989         desc_size = total_size - desc_offset;
990         if (desc_size > s->rxbuf_size) {
991             desc_size = s->rxbuf_size;
992         }
993         base = rx_desc_base(s) + sizeof(desc) * s->mac_reg[RDH];
994         pci_dma_read(d, base, &desc, sizeof(desc));
995         desc.special = vlan_special;
996         desc.status |= (vlan_status | E1000_RXD_STAT_DD);
997         if (desc.buffer_addr) {
998             if (desc_offset < size) {
999                 size_t iov_copy;
1000                 hwaddr ba = le64_to_cpu(desc.buffer_addr);
1001                 size_t copy_size = size - desc_offset;
1002                 if (copy_size > s->rxbuf_size) {
1003                     copy_size = s->rxbuf_size;
1004                 }
1005                 do {
1006                     iov_copy = MIN(copy_size, iov->iov_len - iov_ofs);
1007                     pci_dma_write(d, ba, iov->iov_base + iov_ofs, iov_copy);
1008                     copy_size -= iov_copy;
1009                     ba += iov_copy;
1010                     iov_ofs += iov_copy;
1011                     if (iov_ofs == iov->iov_len) {
1012                         iov++;
1013                         iov_ofs = 0;
1014                     }
1015                 } while (copy_size);
1016             }
1017             desc_offset += desc_size;
1018             desc.length = cpu_to_le16(desc_size);
1019             if (desc_offset >= total_size) {
1020                 desc.status |= E1000_RXD_STAT_EOP | E1000_RXD_STAT_IXSM;
1021             } else {
1022                 /* Guest zeroing out status is not a hardware requirement.
1023                    Clear EOP in case guest didn't do it. */
1024                 desc.status &= ~E1000_RXD_STAT_EOP;
1025             }
1026         } else { // as per intel docs; skip descriptors with null buf addr
1027             DBGOUT(RX, "Null RX descriptor!!\n");
1028         }
1029         pci_dma_write(d, base, &desc, sizeof(desc));
1030 
1031         if (++s->mac_reg[RDH] * sizeof(desc) >= s->mac_reg[RDLEN])
1032             s->mac_reg[RDH] = 0;
1033         /* see comment in start_xmit; same here */
1034         if (s->mac_reg[RDH] == rdh_start) {
1035             DBGOUT(RXERR, "RDH wraparound @%x, RDT %x, RDLEN %x\n",
1036                    rdh_start, s->mac_reg[RDT], s->mac_reg[RDLEN]);
1037             set_ics(s, 0, E1000_ICS_RXO);
1038             return -1;
1039         }
1040     } while (desc_offset < total_size);
1041 
1042     s->mac_reg[GPRC]++;
1043     s->mac_reg[TPR]++;
1044     /* TOR - Total Octets Received:
1045      * This register includes bytes received in a packet from the <Destination
1046      * Address> field through the <CRC> field, inclusively.
1047      */
1048     n = s->mac_reg[TORL] + size + /* Always include FCS length. */ 4;
1049     if (n < s->mac_reg[TORL])
1050         s->mac_reg[TORH]++;
1051     s->mac_reg[TORL] = n;
1052 
1053     n = E1000_ICS_RXT0;
1054     if ((rdt = s->mac_reg[RDT]) < s->mac_reg[RDH])
1055         rdt += s->mac_reg[RDLEN] / sizeof(desc);
1056     if (((rdt - s->mac_reg[RDH]) * sizeof(desc)) <= s->mac_reg[RDLEN] >>
1057         s->rxbuf_min_shift)
1058         n |= E1000_ICS_RXDMT0;
1059 
1060     set_ics(s, 0, n);
1061 
1062     return size;
1063 }
1064 
1065 static ssize_t
1066 e1000_receive(NetClientState *nc, const uint8_t *buf, size_t size)
1067 {
1068     const struct iovec iov = {
1069         .iov_base = (uint8_t *)buf,
1070         .iov_len = size
1071     };
1072 
1073     return e1000_receive_iov(nc, &iov, 1);
1074 }
1075 
1076 static uint32_t
1077 mac_readreg(E1000State *s, int index)
1078 {
1079     return s->mac_reg[index];
1080 }
1081 
1082 static uint32_t
1083 mac_icr_read(E1000State *s, int index)
1084 {
1085     uint32_t ret = s->mac_reg[ICR];
1086 
1087     DBGOUT(INTERRUPT, "ICR read: %x\n", ret);
1088     set_interrupt_cause(s, 0, 0);
1089     return ret;
1090 }
1091 
1092 static uint32_t
1093 mac_read_clr4(E1000State *s, int index)
1094 {
1095     uint32_t ret = s->mac_reg[index];
1096 
1097     s->mac_reg[index] = 0;
1098     return ret;
1099 }
1100 
1101 static uint32_t
1102 mac_read_clr8(E1000State *s, int index)
1103 {
1104     uint32_t ret = s->mac_reg[index];
1105 
1106     s->mac_reg[index] = 0;
1107     s->mac_reg[index-1] = 0;
1108     return ret;
1109 }
1110 
1111 static void
1112 mac_writereg(E1000State *s, int index, uint32_t val)
1113 {
1114     uint32_t macaddr[2];
1115 
1116     s->mac_reg[index] = val;
1117 
1118     if (index == RA + 1) {
1119         macaddr[0] = cpu_to_le32(s->mac_reg[RA]);
1120         macaddr[1] = cpu_to_le32(s->mac_reg[RA + 1]);
1121         qemu_format_nic_info_str(qemu_get_queue(s->nic), (uint8_t *)macaddr);
1122     }
1123 }
1124 
1125 static void
1126 set_rdt(E1000State *s, int index, uint32_t val)
1127 {
1128     s->mac_reg[index] = val & 0xffff;
1129     if (e1000_has_rxbufs(s, 1)) {
1130         qemu_flush_queued_packets(qemu_get_queue(s->nic));
1131     }
1132 }
1133 
1134 static void
1135 set_16bit(E1000State *s, int index, uint32_t val)
1136 {
1137     s->mac_reg[index] = val & 0xffff;
1138 }
1139 
1140 static void
1141 set_dlen(E1000State *s, int index, uint32_t val)
1142 {
1143     s->mac_reg[index] = val & 0xfff80;
1144 }
1145 
1146 static void
1147 set_tctl(E1000State *s, int index, uint32_t val)
1148 {
1149     s->mac_reg[index] = val;
1150     s->mac_reg[TDT] &= 0xffff;
1151     start_xmit(s);
1152 }
1153 
1154 static void
1155 set_icr(E1000State *s, int index, uint32_t val)
1156 {
1157     DBGOUT(INTERRUPT, "set_icr %x\n", val);
1158     set_interrupt_cause(s, 0, s->mac_reg[ICR] & ~val);
1159 }
1160 
1161 static void
1162 set_imc(E1000State *s, int index, uint32_t val)
1163 {
1164     s->mac_reg[IMS] &= ~val;
1165     set_ics(s, 0, 0);
1166 }
1167 
1168 static void
1169 set_ims(E1000State *s, int index, uint32_t val)
1170 {
1171     s->mac_reg[IMS] |= val;
1172     set_ics(s, 0, 0);
1173 }
1174 
1175 #define getreg(x)	[x] = mac_readreg
1176 static uint32_t (*macreg_readops[])(E1000State *, int) = {
1177     getreg(PBA),	getreg(RCTL),	getreg(TDH),	getreg(TXDCTL),
1178     getreg(WUFC),	getreg(TDT),	getreg(CTRL),	getreg(LEDCTL),
1179     getreg(MANC),	getreg(MDIC),	getreg(SWSM),	getreg(STATUS),
1180     getreg(TORL),	getreg(TOTL),	getreg(IMS),	getreg(TCTL),
1181     getreg(RDH),	getreg(RDT),	getreg(VET),	getreg(ICS),
1182     getreg(TDBAL),	getreg(TDBAH),	getreg(RDBAH),	getreg(RDBAL),
1183     getreg(TDLEN),      getreg(RDLEN),  getreg(RDTR),   getreg(RADV),
1184     getreg(TADV),       getreg(ITR),
1185 
1186     [TOTH] = mac_read_clr8,	[TORH] = mac_read_clr8,	[GPRC] = mac_read_clr4,
1187     [GPTC] = mac_read_clr4,	[TPR] = mac_read_clr4,	[TPT] = mac_read_clr4,
1188     [ICR] = mac_icr_read,	[EECD] = get_eecd,	[EERD] = flash_eerd_read,
1189     [CRCERRS ... MPC] = &mac_readreg,
1190     [RA ... RA+31] = &mac_readreg,
1191     [MTA ... MTA+127] = &mac_readreg,
1192     [VFTA ... VFTA+127] = &mac_readreg,
1193 };
1194 enum { NREADOPS = ARRAY_SIZE(macreg_readops) };
1195 
1196 #define putreg(x)	[x] = mac_writereg
1197 static void (*macreg_writeops[])(E1000State *, int, uint32_t) = {
1198     putreg(PBA),	putreg(EERD),	putreg(SWSM),	putreg(WUFC),
1199     putreg(TDBAL),	putreg(TDBAH),	putreg(TXDCTL),	putreg(RDBAH),
1200     putreg(RDBAL),	putreg(LEDCTL), putreg(VET),
1201     [TDLEN] = set_dlen,	[RDLEN] = set_dlen,	[TCTL] = set_tctl,
1202     [TDT] = set_tctl,	[MDIC] = set_mdic,	[ICS] = set_ics,
1203     [TDH] = set_16bit,	[RDH] = set_16bit,	[RDT] = set_rdt,
1204     [IMC] = set_imc,	[IMS] = set_ims,	[ICR] = set_icr,
1205     [EECD] = set_eecd,	[RCTL] = set_rx_control, [CTRL] = set_ctrl,
1206     [RDTR] = set_16bit, [RADV] = set_16bit,     [TADV] = set_16bit,
1207     [ITR] = set_16bit,
1208     [RA ... RA+31] = &mac_writereg,
1209     [MTA ... MTA+127] = &mac_writereg,
1210     [VFTA ... VFTA+127] = &mac_writereg,
1211 };
1212 
1213 enum { NWRITEOPS = ARRAY_SIZE(macreg_writeops) };
1214 
1215 static void
1216 e1000_mmio_write(void *opaque, hwaddr addr, uint64_t val,
1217                  unsigned size)
1218 {
1219     E1000State *s = opaque;
1220     unsigned int index = (addr & 0x1ffff) >> 2;
1221 
1222     if (index < NWRITEOPS && macreg_writeops[index]) {
1223         macreg_writeops[index](s, index, val);
1224     } else if (index < NREADOPS && macreg_readops[index]) {
1225         DBGOUT(MMIO, "e1000_mmio_writel RO %x: 0x%04"PRIx64"\n", index<<2, val);
1226     } else {
1227         DBGOUT(UNKNOWN, "MMIO unknown write addr=0x%08x,val=0x%08"PRIx64"\n",
1228                index<<2, val);
1229     }
1230 }
1231 
1232 static uint64_t
1233 e1000_mmio_read(void *opaque, hwaddr addr, unsigned size)
1234 {
1235     E1000State *s = opaque;
1236     unsigned int index = (addr & 0x1ffff) >> 2;
1237 
1238     if (index < NREADOPS && macreg_readops[index])
1239     {
1240         return macreg_readops[index](s, index);
1241     }
1242     DBGOUT(UNKNOWN, "MMIO unknown read addr=0x%08x\n", index<<2);
1243     return 0;
1244 }
1245 
1246 static const MemoryRegionOps e1000_mmio_ops = {
1247     .read = e1000_mmio_read,
1248     .write = e1000_mmio_write,
1249     .endianness = DEVICE_LITTLE_ENDIAN,
1250     .impl = {
1251         .min_access_size = 4,
1252         .max_access_size = 4,
1253     },
1254 };
1255 
1256 static uint64_t e1000_io_read(void *opaque, hwaddr addr,
1257                               unsigned size)
1258 {
1259     E1000State *s = opaque;
1260 
1261     (void)s;
1262     return 0;
1263 }
1264 
1265 static void e1000_io_write(void *opaque, hwaddr addr,
1266                            uint64_t val, unsigned size)
1267 {
1268     E1000State *s = opaque;
1269 
1270     (void)s;
1271 }
1272 
1273 static const MemoryRegionOps e1000_io_ops = {
1274     .read = e1000_io_read,
1275     .write = e1000_io_write,
1276     .endianness = DEVICE_LITTLE_ENDIAN,
1277 };
1278 
1279 static bool is_version_1(void *opaque, int version_id)
1280 {
1281     return version_id == 1;
1282 }
1283 
1284 static void e1000_pre_save(void *opaque)
1285 {
1286     E1000State *s = opaque;
1287     NetClientState *nc = qemu_get_queue(s->nic);
1288 
1289     /* If the mitigation timer is active, emulate a timeout now. */
1290     if (s->mit_timer_on) {
1291         e1000_mit_timer(s);
1292     }
1293 
1294     /*
1295      * If link is down and auto-negotiation is supported and ongoing,
1296      * complete auto-negotiation immediately. This allows us to look
1297      * at MII_SR_AUTONEG_COMPLETE to infer link status on load.
1298      */
1299     if (nc->link_down &&
1300         s->compat_flags & E1000_FLAG_AUTONEG &&
1301         s->phy_reg[PHY_CTRL] & MII_CR_AUTO_NEG_EN &&
1302         s->phy_reg[PHY_CTRL] & MII_CR_RESTART_AUTO_NEG) {
1303          s->phy_reg[PHY_STATUS] |= MII_SR_AUTONEG_COMPLETE;
1304     }
1305 }
1306 
1307 static int e1000_post_load(void *opaque, int version_id)
1308 {
1309     E1000State *s = opaque;
1310     NetClientState *nc = qemu_get_queue(s->nic);
1311 
1312     if (!(s->compat_flags & E1000_FLAG_MIT)) {
1313         s->mac_reg[ITR] = s->mac_reg[RDTR] = s->mac_reg[RADV] =
1314             s->mac_reg[TADV] = 0;
1315         s->mit_irq_level = false;
1316     }
1317     s->mit_ide = 0;
1318     s->mit_timer_on = false;
1319 
1320     /* nc.link_down can't be migrated, so infer link_down according
1321      * to link status bit in mac_reg[STATUS].
1322      * Alternatively, restart link negotiation if it was in progress. */
1323     nc->link_down = (s->mac_reg[STATUS] & E1000_STATUS_LU) == 0;
1324 
1325     if (s->compat_flags & E1000_FLAG_AUTONEG &&
1326         s->phy_reg[PHY_CTRL] & MII_CR_AUTO_NEG_EN &&
1327         s->phy_reg[PHY_CTRL] & MII_CR_RESTART_AUTO_NEG &&
1328         !(s->phy_reg[PHY_STATUS] & MII_SR_AUTONEG_COMPLETE)) {
1329         nc->link_down = false;
1330         timer_mod(s->autoneg_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 500);
1331     }
1332 
1333     return 0;
1334 }
1335 
1336 static bool e1000_mit_state_needed(void *opaque)
1337 {
1338     E1000State *s = opaque;
1339 
1340     return s->compat_flags & E1000_FLAG_MIT;
1341 }
1342 
1343 static const VMStateDescription vmstate_e1000_mit_state = {
1344     .name = "e1000/mit_state",
1345     .version_id = 1,
1346     .minimum_version_id = 1,
1347     .fields = (VMStateField[]) {
1348         VMSTATE_UINT32(mac_reg[RDTR], E1000State),
1349         VMSTATE_UINT32(mac_reg[RADV], E1000State),
1350         VMSTATE_UINT32(mac_reg[TADV], E1000State),
1351         VMSTATE_UINT32(mac_reg[ITR], E1000State),
1352         VMSTATE_BOOL(mit_irq_level, E1000State),
1353         VMSTATE_END_OF_LIST()
1354     }
1355 };
1356 
1357 static const VMStateDescription vmstate_e1000 = {
1358     .name = "e1000",
1359     .version_id = 2,
1360     .minimum_version_id = 1,
1361     .pre_save = e1000_pre_save,
1362     .post_load = e1000_post_load,
1363     .fields = (VMStateField[]) {
1364         VMSTATE_PCI_DEVICE(parent_obj, E1000State),
1365         VMSTATE_UNUSED_TEST(is_version_1, 4), /* was instance id */
1366         VMSTATE_UNUSED(4), /* Was mmio_base.  */
1367         VMSTATE_UINT32(rxbuf_size, E1000State),
1368         VMSTATE_UINT32(rxbuf_min_shift, E1000State),
1369         VMSTATE_UINT32(eecd_state.val_in, E1000State),
1370         VMSTATE_UINT16(eecd_state.bitnum_in, E1000State),
1371         VMSTATE_UINT16(eecd_state.bitnum_out, E1000State),
1372         VMSTATE_UINT16(eecd_state.reading, E1000State),
1373         VMSTATE_UINT32(eecd_state.old_eecd, E1000State),
1374         VMSTATE_UINT8(tx.ipcss, E1000State),
1375         VMSTATE_UINT8(tx.ipcso, E1000State),
1376         VMSTATE_UINT16(tx.ipcse, E1000State),
1377         VMSTATE_UINT8(tx.tucss, E1000State),
1378         VMSTATE_UINT8(tx.tucso, E1000State),
1379         VMSTATE_UINT16(tx.tucse, E1000State),
1380         VMSTATE_UINT32(tx.paylen, E1000State),
1381         VMSTATE_UINT8(tx.hdr_len, E1000State),
1382         VMSTATE_UINT16(tx.mss, E1000State),
1383         VMSTATE_UINT16(tx.size, E1000State),
1384         VMSTATE_UINT16(tx.tso_frames, E1000State),
1385         VMSTATE_UINT8(tx.sum_needed, E1000State),
1386         VMSTATE_INT8(tx.ip, E1000State),
1387         VMSTATE_INT8(tx.tcp, E1000State),
1388         VMSTATE_BUFFER(tx.header, E1000State),
1389         VMSTATE_BUFFER(tx.data, E1000State),
1390         VMSTATE_UINT16_ARRAY(eeprom_data, E1000State, 64),
1391         VMSTATE_UINT16_ARRAY(phy_reg, E1000State, 0x20),
1392         VMSTATE_UINT32(mac_reg[CTRL], E1000State),
1393         VMSTATE_UINT32(mac_reg[EECD], E1000State),
1394         VMSTATE_UINT32(mac_reg[EERD], E1000State),
1395         VMSTATE_UINT32(mac_reg[GPRC], E1000State),
1396         VMSTATE_UINT32(mac_reg[GPTC], E1000State),
1397         VMSTATE_UINT32(mac_reg[ICR], E1000State),
1398         VMSTATE_UINT32(mac_reg[ICS], E1000State),
1399         VMSTATE_UINT32(mac_reg[IMC], E1000State),
1400         VMSTATE_UINT32(mac_reg[IMS], E1000State),
1401         VMSTATE_UINT32(mac_reg[LEDCTL], E1000State),
1402         VMSTATE_UINT32(mac_reg[MANC], E1000State),
1403         VMSTATE_UINT32(mac_reg[MDIC], E1000State),
1404         VMSTATE_UINT32(mac_reg[MPC], E1000State),
1405         VMSTATE_UINT32(mac_reg[PBA], E1000State),
1406         VMSTATE_UINT32(mac_reg[RCTL], E1000State),
1407         VMSTATE_UINT32(mac_reg[RDBAH], E1000State),
1408         VMSTATE_UINT32(mac_reg[RDBAL], E1000State),
1409         VMSTATE_UINT32(mac_reg[RDH], E1000State),
1410         VMSTATE_UINT32(mac_reg[RDLEN], E1000State),
1411         VMSTATE_UINT32(mac_reg[RDT], E1000State),
1412         VMSTATE_UINT32(mac_reg[STATUS], E1000State),
1413         VMSTATE_UINT32(mac_reg[SWSM], E1000State),
1414         VMSTATE_UINT32(mac_reg[TCTL], E1000State),
1415         VMSTATE_UINT32(mac_reg[TDBAH], E1000State),
1416         VMSTATE_UINT32(mac_reg[TDBAL], E1000State),
1417         VMSTATE_UINT32(mac_reg[TDH], E1000State),
1418         VMSTATE_UINT32(mac_reg[TDLEN], E1000State),
1419         VMSTATE_UINT32(mac_reg[TDT], E1000State),
1420         VMSTATE_UINT32(mac_reg[TORH], E1000State),
1421         VMSTATE_UINT32(mac_reg[TORL], E1000State),
1422         VMSTATE_UINT32(mac_reg[TOTH], E1000State),
1423         VMSTATE_UINT32(mac_reg[TOTL], E1000State),
1424         VMSTATE_UINT32(mac_reg[TPR], E1000State),
1425         VMSTATE_UINT32(mac_reg[TPT], E1000State),
1426         VMSTATE_UINT32(mac_reg[TXDCTL], E1000State),
1427         VMSTATE_UINT32(mac_reg[WUFC], E1000State),
1428         VMSTATE_UINT32(mac_reg[VET], E1000State),
1429         VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, RA, 32),
1430         VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, MTA, 128),
1431         VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, VFTA, 128),
1432         VMSTATE_END_OF_LIST()
1433     },
1434     .subsections = (VMStateSubsection[]) {
1435         {
1436             .vmsd = &vmstate_e1000_mit_state,
1437             .needed = e1000_mit_state_needed,
1438         }, {
1439             /* empty */
1440         }
1441     }
1442 };
1443 
1444 /*
1445  * EEPROM contents documented in Tables 5-2 and 5-3, pp. 98-102.
1446  * Note: A valid DevId will be inserted during pci_e1000_init().
1447  */
1448 static const uint16_t e1000_eeprom_template[64] = {
1449     0x0000, 0x0000, 0x0000, 0x0000,      0xffff, 0x0000,      0x0000, 0x0000,
1450     0x3000, 0x1000, 0x6403, 0 /*DevId*/, 0x8086, 0 /*DevId*/, 0x8086, 0x3040,
1451     0x0008, 0x2000, 0x7e14, 0x0048,      0x1000, 0x00d8,      0x0000, 0x2700,
1452     0x6cc9, 0x3150, 0x0722, 0x040b,      0x0984, 0x0000,      0xc000, 0x0706,
1453     0x1008, 0x0000, 0x0f04, 0x7fff,      0x4d01, 0xffff,      0xffff, 0xffff,
1454     0xffff, 0xffff, 0xffff, 0xffff,      0xffff, 0xffff,      0xffff, 0xffff,
1455     0x0100, 0x4000, 0x121c, 0xffff,      0xffff, 0xffff,      0xffff, 0xffff,
1456     0xffff, 0xffff, 0xffff, 0xffff,      0xffff, 0xffff,      0xffff, 0x0000,
1457 };
1458 
1459 /* PCI interface */
1460 
1461 static void
1462 e1000_mmio_setup(E1000State *d)
1463 {
1464     int i;
1465     const uint32_t excluded_regs[] = {
1466         E1000_MDIC, E1000_ICR, E1000_ICS, E1000_IMS,
1467         E1000_IMC, E1000_TCTL, E1000_TDT, PNPMMIO_SIZE
1468     };
1469 
1470     memory_region_init_io(&d->mmio, OBJECT(d), &e1000_mmio_ops, d,
1471                           "e1000-mmio", PNPMMIO_SIZE);
1472     memory_region_add_coalescing(&d->mmio, 0, excluded_regs[0]);
1473     for (i = 0; excluded_regs[i] != PNPMMIO_SIZE; i++)
1474         memory_region_add_coalescing(&d->mmio, excluded_regs[i] + 4,
1475                                      excluded_regs[i+1] - excluded_regs[i] - 4);
1476     memory_region_init_io(&d->io, OBJECT(d), &e1000_io_ops, d, "e1000-io", IOPORT_SIZE);
1477 }
1478 
1479 static void
1480 e1000_cleanup(NetClientState *nc)
1481 {
1482     E1000State *s = qemu_get_nic_opaque(nc);
1483 
1484     s->nic = NULL;
1485 }
1486 
1487 static void
1488 pci_e1000_uninit(PCIDevice *dev)
1489 {
1490     E1000State *d = E1000(dev);
1491 
1492     timer_del(d->autoneg_timer);
1493     timer_free(d->autoneg_timer);
1494     timer_del(d->mit_timer);
1495     timer_free(d->mit_timer);
1496     memory_region_destroy(&d->mmio);
1497     memory_region_destroy(&d->io);
1498     qemu_del_nic(d->nic);
1499 }
1500 
1501 static NetClientInfo net_e1000_info = {
1502     .type = NET_CLIENT_OPTIONS_KIND_NIC,
1503     .size = sizeof(NICState),
1504     .can_receive = e1000_can_receive,
1505     .receive = e1000_receive,
1506     .receive_iov = e1000_receive_iov,
1507     .cleanup = e1000_cleanup,
1508     .link_status_changed = e1000_set_link_status,
1509 };
1510 
1511 static int pci_e1000_init(PCIDevice *pci_dev)
1512 {
1513     DeviceState *dev = DEVICE(pci_dev);
1514     E1000State *d = E1000(pci_dev);
1515     PCIDeviceClass *pdc = PCI_DEVICE_GET_CLASS(pci_dev);
1516     uint8_t *pci_conf;
1517     uint16_t checksum = 0;
1518     int i;
1519     uint8_t *macaddr;
1520 
1521     pci_conf = pci_dev->config;
1522 
1523     /* TODO: RST# value should be 0, PCI spec 6.2.4 */
1524     pci_conf[PCI_CACHE_LINE_SIZE] = 0x10;
1525 
1526     pci_conf[PCI_INTERRUPT_PIN] = 1; /* interrupt pin A */
1527 
1528     e1000_mmio_setup(d);
1529 
1530     pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &d->mmio);
1531 
1532     pci_register_bar(pci_dev, 1, PCI_BASE_ADDRESS_SPACE_IO, &d->io);
1533 
1534     memmove(d->eeprom_data, e1000_eeprom_template,
1535         sizeof e1000_eeprom_template);
1536     qemu_macaddr_default_if_unset(&d->conf.macaddr);
1537     macaddr = d->conf.macaddr.a;
1538     for (i = 0; i < 3; i++)
1539         d->eeprom_data[i] = (macaddr[2*i+1]<<8) | macaddr[2*i];
1540     d->eeprom_data[11] = d->eeprom_data[13] = pdc->device_id;
1541     for (i = 0; i < EEPROM_CHECKSUM_REG; i++)
1542         checksum += d->eeprom_data[i];
1543     checksum = (uint16_t) EEPROM_SUM - checksum;
1544     d->eeprom_data[EEPROM_CHECKSUM_REG] = checksum;
1545 
1546     d->nic = qemu_new_nic(&net_e1000_info, &d->conf,
1547                           object_get_typename(OBJECT(d)), dev->id, d);
1548 
1549     qemu_format_nic_info_str(qemu_get_queue(d->nic), macaddr);
1550 
1551     add_boot_device_path(d->conf.bootindex, dev, "/ethernet-phy@0");
1552 
1553     d->autoneg_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, e1000_autoneg_timer, d);
1554     d->mit_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, e1000_mit_timer, d);
1555 
1556     return 0;
1557 }
1558 
1559 static void qdev_e1000_reset(DeviceState *dev)
1560 {
1561     E1000State *d = E1000(dev);
1562     e1000_reset(d);
1563 }
1564 
1565 static Property e1000_properties[] = {
1566     DEFINE_NIC_PROPERTIES(E1000State, conf),
1567     DEFINE_PROP_BIT("autonegotiation", E1000State,
1568                     compat_flags, E1000_FLAG_AUTONEG_BIT, true),
1569     DEFINE_PROP_BIT("mitigation", E1000State,
1570                     compat_flags, E1000_FLAG_MIT_BIT, true),
1571     DEFINE_PROP_END_OF_LIST(),
1572 };
1573 
1574 typedef struct E1000Info {
1575     const char *name;
1576     uint16_t   device_id;
1577     uint8_t    revision;
1578     uint16_t   phy_id2;
1579 } E1000Info;
1580 
1581 static void e1000_class_init(ObjectClass *klass, void *data)
1582 {
1583     DeviceClass *dc = DEVICE_CLASS(klass);
1584     PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
1585     E1000BaseClass *e = E1000_DEVICE_CLASS(klass);
1586     const E1000Info *info = data;
1587 
1588     k->init = pci_e1000_init;
1589     k->exit = pci_e1000_uninit;
1590     k->romfile = "efi-e1000.rom";
1591     k->vendor_id = PCI_VENDOR_ID_INTEL;
1592     k->device_id = info->device_id;
1593     k->revision = info->revision;
1594     e->phy_id2 = info->phy_id2;
1595     k->class_id = PCI_CLASS_NETWORK_ETHERNET;
1596     set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
1597     dc->desc = "Intel Gigabit Ethernet";
1598     dc->reset = qdev_e1000_reset;
1599     dc->vmsd = &vmstate_e1000;
1600     dc->props = e1000_properties;
1601 }
1602 
1603 static const TypeInfo e1000_base_info = {
1604     .name          = TYPE_E1000_BASE,
1605     .parent        = TYPE_PCI_DEVICE,
1606     .instance_size = sizeof(E1000State),
1607     .class_size    = sizeof(E1000BaseClass),
1608     .abstract      = true,
1609 };
1610 
1611 static const E1000Info e1000_devices[] = {
1612     {
1613         .name      = "e1000-82540em",
1614         .device_id = E1000_DEV_ID_82540EM,
1615         .revision  = 0x03,
1616         .phy_id2   = E1000_PHY_ID2_8254xx_DEFAULT,
1617     },
1618     {
1619         .name      = "e1000-82544gc",
1620         .device_id = E1000_DEV_ID_82544GC_COPPER,
1621         .revision  = 0x03,
1622         .phy_id2   = E1000_PHY_ID2_82544x,
1623     },
1624     {
1625         .name      = "e1000-82545em",
1626         .device_id = E1000_DEV_ID_82545EM_COPPER,
1627         .revision  = 0x03,
1628         .phy_id2   = E1000_PHY_ID2_8254xx_DEFAULT,
1629     },
1630 };
1631 
1632 static const TypeInfo e1000_default_info = {
1633     .name          = "e1000",
1634     .parent        = "e1000-82540em",
1635 };
1636 
1637 static void e1000_register_types(void)
1638 {
1639     int i;
1640 
1641     type_register_static(&e1000_base_info);
1642     for (i = 0; i < ARRAY_SIZE(e1000_devices); i++) {
1643         const E1000Info *info = &e1000_devices[i];
1644         TypeInfo type_info = {};
1645 
1646         type_info.name = info->name;
1647         type_info.parent = TYPE_E1000_BASE;
1648         type_info.class_data = (void *)info;
1649         type_info.class_init = e1000_class_init;
1650 
1651         type_register(&type_info);
1652     }
1653     type_register_static(&e1000_default_info);
1654 }
1655 
1656 type_init(e1000_register_types)
1657