xref: /qemu/hw/net/e1000.c (revision 8597f2e19e68a70e4f45de7a5f29b4fdc047fcff)
1 /*
2  * QEMU e1000 emulation
3  *
4  * Software developer's manual:
5  * http://download.intel.com/design/network/manuals/8254x_GBe_SDM.pdf
6  *
7  * Nir Peleg, Tutis Systems Ltd. for Qumranet Inc.
8  * Copyright (c) 2008 Qumranet
9  * Based on work done by:
10  * Copyright (c) 2007 Dan Aloni
11  * Copyright (c) 2004 Antony T Curtis
12  *
13  * This library is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU Lesser General Public
15  * License as published by the Free Software Foundation; either
16  * version 2 of the License, or (at your option) any later version.
17  *
18  * This library is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
21  * Lesser General Public License for more details.
22  *
23  * You should have received a copy of the GNU Lesser General Public
24  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
25  */
26 
27 
28 #include "hw/hw.h"
29 #include "hw/pci/pci.h"
30 #include "net/net.h"
31 #include "net/checksum.h"
32 #include "hw/loader.h"
33 #include "sysemu/sysemu.h"
34 #include "sysemu/dma.h"
35 #include "qemu/iov.h"
36 
37 #include "e1000_regs.h"
38 
39 #define E1000_DEBUG
40 
41 #ifdef E1000_DEBUG
42 enum {
43     DEBUG_GENERAL,	DEBUG_IO,	DEBUG_MMIO,	DEBUG_INTERRUPT,
44     DEBUG_RX,		DEBUG_TX,	DEBUG_MDIC,	DEBUG_EEPROM,
45     DEBUG_UNKNOWN,	DEBUG_TXSUM,	DEBUG_TXERR,	DEBUG_RXERR,
46     DEBUG_RXFILTER,     DEBUG_PHY,      DEBUG_NOTYET,
47 };
48 #define DBGBIT(x)	(1<<DEBUG_##x)
49 static int debugflags = DBGBIT(TXERR) | DBGBIT(GENERAL);
50 
51 #define	DBGOUT(what, fmt, ...) do { \
52     if (debugflags & DBGBIT(what)) \
53         fprintf(stderr, "e1000: " fmt, ## __VA_ARGS__); \
54     } while (0)
55 #else
56 #define	DBGOUT(what, fmt, ...) do {} while (0)
57 #endif
58 
59 #define IOPORT_SIZE       0x40
60 #define PNPMMIO_SIZE      0x20000
61 #define MIN_BUF_SIZE      60 /* Min. octets in an ethernet frame sans FCS */
62 
63 /* this is the size past which hardware will drop packets when setting LPE=0 */
64 #define MAXIMUM_ETHERNET_VLAN_SIZE 1522
65 /* this is the size past which hardware will drop packets when setting LPE=1 */
66 #define MAXIMUM_ETHERNET_LPE_SIZE 16384
67 
68 #define MAXIMUM_ETHERNET_HDR_LEN (14+4)
69 
70 /*
71  * HW models:
72  *  E1000_DEV_ID_82540EM works with Windows, Linux, and OS X <= 10.8
73  *  E1000_DEV_ID_82573L OK with windoze and Linux 2.6.22,
74  *	appears to perform better than 82540EM, but breaks with Linux 2.6.18
75  *  E1000_DEV_ID_82544GC_COPPER appears to work; not well tested
76  *  E1000_DEV_ID_82545EM_COPPER works with Linux and OS X >= 10.6
77  *  Others never tested
78  */
79 
80 typedef struct E1000State_st {
81     /*< private >*/
82     PCIDevice parent_obj;
83     /*< public >*/
84 
85     NICState *nic;
86     NICConf conf;
87     MemoryRegion mmio;
88     MemoryRegion io;
89 
90     uint32_t mac_reg[0x8000];
91     uint16_t phy_reg[0x20];
92     uint16_t eeprom_data[64];
93 
94     uint32_t rxbuf_size;
95     uint32_t rxbuf_min_shift;
96     struct e1000_tx {
97         unsigned char header[256];
98         unsigned char vlan_header[4];
99         /* Fields vlan and data must not be reordered or separated. */
100         unsigned char vlan[4];
101         unsigned char data[0x10000];
102         uint16_t size;
103         unsigned char sum_needed;
104         unsigned char vlan_needed;
105         uint8_t ipcss;
106         uint8_t ipcso;
107         uint16_t ipcse;
108         uint8_t tucss;
109         uint8_t tucso;
110         uint16_t tucse;
111         uint8_t hdr_len;
112         uint16_t mss;
113         uint32_t paylen;
114         uint16_t tso_frames;
115         char tse;
116         int8_t ip;
117         int8_t tcp;
118         char cptse;     // current packet tse bit
119     } tx;
120 
121     struct {
122         uint32_t val_in;	// shifted in from guest driver
123         uint16_t bitnum_in;
124         uint16_t bitnum_out;
125         uint16_t reading;
126         uint32_t old_eecd;
127     } eecd_state;
128 
129     QEMUTimer *autoneg_timer;
130 
131     QEMUTimer *mit_timer;      /* Mitigation timer. */
132     bool mit_timer_on;         /* Mitigation timer is running. */
133     bool mit_irq_level;        /* Tracks interrupt pin level. */
134     uint32_t mit_ide;          /* Tracks E1000_TXD_CMD_IDE bit. */
135 
136 /* Compatibility flags for migration to/from qemu 1.3.0 and older */
137 #define E1000_FLAG_AUTONEG_BIT 0
138 #define E1000_FLAG_MIT_BIT 1
139 #define E1000_FLAG_AUTONEG (1 << E1000_FLAG_AUTONEG_BIT)
140 #define E1000_FLAG_MIT (1 << E1000_FLAG_MIT_BIT)
141     uint32_t compat_flags;
142 } E1000State;
143 
144 typedef struct E1000BaseClass {
145     PCIDeviceClass parent_class;
146     uint16_t phy_id2;
147     bool is_8257xx;
148 } E1000BaseClass;
149 
150 #define TYPE_E1000_BASE "e1000-base"
151 
152 #define E1000(obj) \
153     OBJECT_CHECK(E1000State, (obj), TYPE_E1000_BASE)
154 
155 #define E1000_DEVICE_CLASS(klass) \
156      OBJECT_CLASS_CHECK(E1000BaseClass, (klass), TYPE_E1000_BASE)
157 #define E1000_DEVICE_GET_CLASS(obj) \
158     OBJECT_GET_CLASS(E1000BaseClass, (obj), TYPE_E1000_BASE)
159 
160 #define	defreg(x)	x = (E1000_##x>>2)
161 enum {
162     defreg(CTRL),	defreg(EECD),	defreg(EERD),	defreg(GPRC),
163     defreg(GPTC),	defreg(ICR),	defreg(ICS),	defreg(IMC),
164     defreg(IMS),	defreg(LEDCTL),	defreg(MANC),	defreg(MDIC),
165     defreg(MPC),	defreg(PBA),	defreg(RCTL),	defreg(RDBAH),
166     defreg(RDBAL),	defreg(RDH),	defreg(RDLEN),	defreg(RDT),
167     defreg(STATUS),	defreg(SWSM),	defreg(TCTL),	defreg(TDBAH),
168     defreg(TDBAL),	defreg(TDH),	defreg(TDLEN),	defreg(TDT),
169     defreg(TORH),	defreg(TORL),	defreg(TOTH),	defreg(TOTL),
170     defreg(TPR),	defreg(TPT),	defreg(TXDCTL),	defreg(WUFC),
171     defreg(RA),		defreg(MTA),	defreg(CRCERRS),defreg(VFTA),
172     defreg(VET),        defreg(RDTR),   defreg(RADV),   defreg(TADV),
173     defreg(ITR),
174 };
175 
176 static void
177 e1000_link_down(E1000State *s)
178 {
179     s->mac_reg[STATUS] &= ~E1000_STATUS_LU;
180     s->phy_reg[PHY_STATUS] &= ~MII_SR_LINK_STATUS;
181 }
182 
183 static void
184 e1000_link_up(E1000State *s)
185 {
186     s->mac_reg[STATUS] |= E1000_STATUS_LU;
187     s->phy_reg[PHY_STATUS] |= MII_SR_LINK_STATUS;
188 }
189 
190 static void
191 set_phy_ctrl(E1000State *s, int index, uint16_t val)
192 {
193     /*
194      * QEMU 1.3 does not support link auto-negotiation emulation, so if we
195      * migrate during auto negotiation, after migration the link will be
196      * down.
197      */
198     if (!(s->compat_flags & E1000_FLAG_AUTONEG)) {
199         return;
200     }
201     if ((val & MII_CR_AUTO_NEG_EN) && (val & MII_CR_RESTART_AUTO_NEG)) {
202         e1000_link_down(s);
203         s->phy_reg[PHY_STATUS] &= ~MII_SR_AUTONEG_COMPLETE;
204         DBGOUT(PHY, "Start link auto negotiation\n");
205         timer_mod(s->autoneg_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 500);
206     }
207 }
208 
209 static void
210 e1000_autoneg_timer(void *opaque)
211 {
212     E1000State *s = opaque;
213     if (!qemu_get_queue(s->nic)->link_down) {
214         e1000_link_up(s);
215     }
216     s->phy_reg[PHY_STATUS] |= MII_SR_AUTONEG_COMPLETE;
217     DBGOUT(PHY, "Auto negotiation is completed\n");
218 }
219 
220 static void (*phyreg_writeops[])(E1000State *, int, uint16_t) = {
221     [PHY_CTRL] = set_phy_ctrl,
222 };
223 
224 enum { NPHYWRITEOPS = ARRAY_SIZE(phyreg_writeops) };
225 
226 enum { PHY_R = 1, PHY_W = 2, PHY_RW = PHY_R | PHY_W };
227 static const char phy_regcap[0x20] = {
228     [PHY_STATUS] = PHY_R,	[M88E1000_EXT_PHY_SPEC_CTRL] = PHY_RW,
229     [PHY_ID1] = PHY_R,		[M88E1000_PHY_SPEC_CTRL] = PHY_RW,
230     [PHY_CTRL] = PHY_RW,	[PHY_1000T_CTRL] = PHY_RW,
231     [PHY_LP_ABILITY] = PHY_R,	[PHY_1000T_STATUS] = PHY_R,
232     [PHY_AUTONEG_ADV] = PHY_RW,	[M88E1000_RX_ERR_CNTR] = PHY_R,
233     [PHY_ID2] = PHY_R,		[M88E1000_PHY_SPEC_STATUS] = PHY_R
234 };
235 
236 /* PHY_ID2 documented in 8254x_GBe_SDM.pdf, pp. 250 */
237 static const uint16_t phy_reg_init[] = {
238     [PHY_CTRL] = 0x1140,
239     [PHY_STATUS] = 0x794d, /* link initially up with not completed autoneg */
240     [PHY_ID1] = 0x141, /* [PHY_ID2] configured per DevId, from e1000_reset() */
241     [PHY_1000T_CTRL] = 0x0e00,			[M88E1000_PHY_SPEC_CTRL] = 0x360,
242     [M88E1000_EXT_PHY_SPEC_CTRL] = 0x0d60,	[PHY_AUTONEG_ADV] = 0xde1,
243     [PHY_LP_ABILITY] = 0x1e0,			[PHY_1000T_STATUS] = 0x3c00,
244     [M88E1000_PHY_SPEC_STATUS] = 0xac00,
245 };
246 
247 static const uint32_t mac_reg_init[] = {
248     [PBA] =     0x00100030,
249     [LEDCTL] =  0x602,
250     [CTRL] =    E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN0 |
251                 E1000_CTRL_SPD_1000 | E1000_CTRL_SLU,
252     [STATUS] =  0x80000000 | E1000_STATUS_GIO_MASTER_ENABLE |
253                 E1000_STATUS_ASDV | E1000_STATUS_MTXCKOK |
254                 E1000_STATUS_SPEED_1000 | E1000_STATUS_FD |
255                 E1000_STATUS_LU,
256     [MANC] =    E1000_MANC_EN_MNG2HOST | E1000_MANC_RCV_TCO_EN |
257                 E1000_MANC_ARP_EN | E1000_MANC_0298_EN |
258                 E1000_MANC_RMCP_EN,
259 };
260 
261 /* Helper function, *curr == 0 means the value is not set */
262 static inline void
263 mit_update_delay(uint32_t *curr, uint32_t value)
264 {
265     if (value && (*curr == 0 || value < *curr)) {
266         *curr = value;
267     }
268 }
269 
270 static void
271 set_interrupt_cause(E1000State *s, int index, uint32_t val)
272 {
273     PCIDevice *d = PCI_DEVICE(s);
274     E1000BaseClass *edc = E1000_DEVICE_GET_CLASS(d);
275     uint32_t pending_ints;
276     uint32_t mit_delay;
277 
278     if (val && edc->is_8257xx) {
279         /* hack only for 8257xx models */
280         val |= E1000_ICR_INT_ASSERTED;
281     }
282 
283     s->mac_reg[ICR] = val;
284 
285     /*
286      * Make sure ICR and ICS registers have the same value.
287      * The spec says that the ICS register is write-only.  However in practice,
288      * on real hardware ICS is readable, and for reads it has the same value as
289      * ICR (except that ICS does not have the clear on read behaviour of ICR).
290      *
291      * The VxWorks PRO/1000 driver uses this behaviour.
292      */
293     s->mac_reg[ICS] = val;
294 
295     pending_ints = (s->mac_reg[IMS] & s->mac_reg[ICR]);
296     if (!s->mit_irq_level && pending_ints) {
297         /*
298          * Here we detect a potential raising edge. We postpone raising the
299          * interrupt line if we are inside the mitigation delay window
300          * (s->mit_timer_on == 1).
301          * We provide a partial implementation of interrupt mitigation,
302          * emulating only RADV, TADV and ITR (lower 16 bits, 1024ns units for
303          * RADV and TADV, 256ns units for ITR). RDTR is only used to enable
304          * RADV; relative timers based on TIDV and RDTR are not implemented.
305          */
306         if (s->mit_timer_on) {
307             return;
308         }
309         if (s->compat_flags & E1000_FLAG_MIT) {
310             /* Compute the next mitigation delay according to pending
311              * interrupts and the current values of RADV (provided
312              * RDTR!=0), TADV and ITR.
313              * Then rearm the timer.
314              */
315             mit_delay = 0;
316             if (s->mit_ide &&
317                     (pending_ints & (E1000_ICR_TXQE | E1000_ICR_TXDW))) {
318                 mit_update_delay(&mit_delay, s->mac_reg[TADV] * 4);
319             }
320             if (s->mac_reg[RDTR] && (pending_ints & E1000_ICS_RXT0)) {
321                 mit_update_delay(&mit_delay, s->mac_reg[RADV] * 4);
322             }
323             mit_update_delay(&mit_delay, s->mac_reg[ITR]);
324 
325             if (mit_delay) {
326                 s->mit_timer_on = 1;
327                 timer_mod(s->mit_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
328                           mit_delay * 256);
329             }
330             s->mit_ide = 0;
331         }
332     }
333 
334     s->mit_irq_level = (pending_ints != 0);
335     pci_set_irq(d, s->mit_irq_level);
336 }
337 
338 static void
339 e1000_mit_timer(void *opaque)
340 {
341     E1000State *s = opaque;
342 
343     s->mit_timer_on = 0;
344     /* Call set_interrupt_cause to update the irq level (if necessary). */
345     set_interrupt_cause(s, 0, s->mac_reg[ICR]);
346 }
347 
348 static void
349 set_ics(E1000State *s, int index, uint32_t val)
350 {
351     DBGOUT(INTERRUPT, "set_ics %x, ICR %x, IMR %x\n", val, s->mac_reg[ICR],
352         s->mac_reg[IMS]);
353     set_interrupt_cause(s, 0, val | s->mac_reg[ICR]);
354 }
355 
356 static int
357 rxbufsize(uint32_t v)
358 {
359     v &= E1000_RCTL_BSEX | E1000_RCTL_SZ_16384 | E1000_RCTL_SZ_8192 |
360          E1000_RCTL_SZ_4096 | E1000_RCTL_SZ_2048 | E1000_RCTL_SZ_1024 |
361          E1000_RCTL_SZ_512 | E1000_RCTL_SZ_256;
362     switch (v) {
363     case E1000_RCTL_BSEX | E1000_RCTL_SZ_16384:
364         return 16384;
365     case E1000_RCTL_BSEX | E1000_RCTL_SZ_8192:
366         return 8192;
367     case E1000_RCTL_BSEX | E1000_RCTL_SZ_4096:
368         return 4096;
369     case E1000_RCTL_SZ_1024:
370         return 1024;
371     case E1000_RCTL_SZ_512:
372         return 512;
373     case E1000_RCTL_SZ_256:
374         return 256;
375     }
376     return 2048;
377 }
378 
379 static void e1000_reset(void *opaque)
380 {
381     E1000State *d = opaque;
382     E1000BaseClass *edc = E1000_DEVICE_GET_CLASS(d);
383     uint8_t *macaddr = d->conf.macaddr.a;
384     int i;
385 
386     timer_del(d->autoneg_timer);
387     timer_del(d->mit_timer);
388     d->mit_timer_on = 0;
389     d->mit_irq_level = 0;
390     d->mit_ide = 0;
391     memset(d->phy_reg, 0, sizeof d->phy_reg);
392     memmove(d->phy_reg, phy_reg_init, sizeof phy_reg_init);
393     d->phy_reg[PHY_ID2] = edc->phy_id2;
394     memset(d->mac_reg, 0, sizeof d->mac_reg);
395     memmove(d->mac_reg, mac_reg_init, sizeof mac_reg_init);
396     d->rxbuf_min_shift = 1;
397     memset(&d->tx, 0, sizeof d->tx);
398 
399     if (qemu_get_queue(d->nic)->link_down) {
400         e1000_link_down(d);
401     }
402 
403     /* Some guests expect pre-initialized RAH/RAL (AddrValid flag + MACaddr) */
404     d->mac_reg[RA] = 0;
405     d->mac_reg[RA + 1] = E1000_RAH_AV;
406     for (i = 0; i < 4; i++) {
407         d->mac_reg[RA] |= macaddr[i] << (8 * i);
408         d->mac_reg[RA + 1] |= (i < 2) ? macaddr[i + 4] << (8 * i) : 0;
409     }
410     qemu_format_nic_info_str(qemu_get_queue(d->nic), macaddr);
411 }
412 
413 static void
414 set_ctrl(E1000State *s, int index, uint32_t val)
415 {
416     /* RST is self clearing */
417     s->mac_reg[CTRL] = val & ~E1000_CTRL_RST;
418 }
419 
420 static void
421 set_rx_control(E1000State *s, int index, uint32_t val)
422 {
423     s->mac_reg[RCTL] = val;
424     s->rxbuf_size = rxbufsize(val);
425     s->rxbuf_min_shift = ((val / E1000_RCTL_RDMTS_QUAT) & 3) + 1;
426     DBGOUT(RX, "RCTL: %d, mac_reg[RCTL] = 0x%x\n", s->mac_reg[RDT],
427            s->mac_reg[RCTL]);
428     qemu_flush_queued_packets(qemu_get_queue(s->nic));
429 }
430 
431 static void
432 set_mdic(E1000State *s, int index, uint32_t val)
433 {
434     uint32_t data = val & E1000_MDIC_DATA_MASK;
435     uint32_t addr = ((val & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT);
436 
437     if ((val & E1000_MDIC_PHY_MASK) >> E1000_MDIC_PHY_SHIFT != 1) // phy #
438         val = s->mac_reg[MDIC] | E1000_MDIC_ERROR;
439     else if (val & E1000_MDIC_OP_READ) {
440         DBGOUT(MDIC, "MDIC read reg 0x%x\n", addr);
441         if (!(phy_regcap[addr] & PHY_R)) {
442             DBGOUT(MDIC, "MDIC read reg %x unhandled\n", addr);
443             val |= E1000_MDIC_ERROR;
444         } else
445             val = (val ^ data) | s->phy_reg[addr];
446     } else if (val & E1000_MDIC_OP_WRITE) {
447         DBGOUT(MDIC, "MDIC write reg 0x%x, value 0x%x\n", addr, data);
448         if (!(phy_regcap[addr] & PHY_W)) {
449             DBGOUT(MDIC, "MDIC write reg %x unhandled\n", addr);
450             val |= E1000_MDIC_ERROR;
451         } else {
452             if (addr < NPHYWRITEOPS && phyreg_writeops[addr]) {
453                 phyreg_writeops[addr](s, index, data);
454             }
455             s->phy_reg[addr] = data;
456         }
457     }
458     s->mac_reg[MDIC] = val | E1000_MDIC_READY;
459 
460     if (val & E1000_MDIC_INT_EN) {
461         set_ics(s, 0, E1000_ICR_MDAC);
462     }
463 }
464 
465 static uint32_t
466 get_eecd(E1000State *s, int index)
467 {
468     uint32_t ret = E1000_EECD_PRES|E1000_EECD_GNT | s->eecd_state.old_eecd;
469 
470     DBGOUT(EEPROM, "reading eeprom bit %d (reading %d)\n",
471            s->eecd_state.bitnum_out, s->eecd_state.reading);
472     if (!s->eecd_state.reading ||
473         ((s->eeprom_data[(s->eecd_state.bitnum_out >> 4) & 0x3f] >>
474           ((s->eecd_state.bitnum_out & 0xf) ^ 0xf))) & 1)
475         ret |= E1000_EECD_DO;
476     return ret;
477 }
478 
479 static void
480 set_eecd(E1000State *s, int index, uint32_t val)
481 {
482     uint32_t oldval = s->eecd_state.old_eecd;
483 
484     s->eecd_state.old_eecd = val & (E1000_EECD_SK | E1000_EECD_CS |
485             E1000_EECD_DI|E1000_EECD_FWE_MASK|E1000_EECD_REQ);
486     if (!(E1000_EECD_CS & val))			// CS inactive; nothing to do
487 	return;
488     if (E1000_EECD_CS & (val ^ oldval)) {	// CS rise edge; reset state
489 	s->eecd_state.val_in = 0;
490 	s->eecd_state.bitnum_in = 0;
491 	s->eecd_state.bitnum_out = 0;
492 	s->eecd_state.reading = 0;
493     }
494     if (!(E1000_EECD_SK & (val ^ oldval)))	// no clock edge
495         return;
496     if (!(E1000_EECD_SK & val)) {		// falling edge
497         s->eecd_state.bitnum_out++;
498         return;
499     }
500     s->eecd_state.val_in <<= 1;
501     if (val & E1000_EECD_DI)
502         s->eecd_state.val_in |= 1;
503     if (++s->eecd_state.bitnum_in == 9 && !s->eecd_state.reading) {
504         s->eecd_state.bitnum_out = ((s->eecd_state.val_in & 0x3f)<<4)-1;
505         s->eecd_state.reading = (((s->eecd_state.val_in >> 6) & 7) ==
506             EEPROM_READ_OPCODE_MICROWIRE);
507     }
508     DBGOUT(EEPROM, "eeprom bitnum in %d out %d, reading %d\n",
509            s->eecd_state.bitnum_in, s->eecd_state.bitnum_out,
510            s->eecd_state.reading);
511 }
512 
513 static uint32_t
514 flash_eerd_read(E1000State *s, int x)
515 {
516     unsigned int index, r = s->mac_reg[EERD] & ~E1000_EEPROM_RW_REG_START;
517 
518     if ((s->mac_reg[EERD] & E1000_EEPROM_RW_REG_START) == 0)
519         return (s->mac_reg[EERD]);
520 
521     if ((index = r >> E1000_EEPROM_RW_ADDR_SHIFT) > EEPROM_CHECKSUM_REG)
522         return (E1000_EEPROM_RW_REG_DONE | r);
523 
524     return ((s->eeprom_data[index] << E1000_EEPROM_RW_REG_DATA) |
525            E1000_EEPROM_RW_REG_DONE | r);
526 }
527 
528 static void
529 putsum(uint8_t *data, uint32_t n, uint32_t sloc, uint32_t css, uint32_t cse)
530 {
531     uint32_t sum;
532 
533     if (cse && cse < n)
534         n = cse + 1;
535     if (sloc < n-1) {
536         sum = net_checksum_add(n-css, data+css);
537         stw_be_p(data + sloc, net_checksum_finish(sum));
538     }
539 }
540 
541 static inline int
542 vlan_enabled(E1000State *s)
543 {
544     return ((s->mac_reg[CTRL] & E1000_CTRL_VME) != 0);
545 }
546 
547 static inline int
548 vlan_rx_filter_enabled(E1000State *s)
549 {
550     return ((s->mac_reg[RCTL] & E1000_RCTL_VFE) != 0);
551 }
552 
553 static inline int
554 is_vlan_packet(E1000State *s, const uint8_t *buf)
555 {
556     return (be16_to_cpup((uint16_t *)(buf + 12)) ==
557                 le16_to_cpup((uint16_t *)(s->mac_reg + VET)));
558 }
559 
560 static inline int
561 is_vlan_txd(uint32_t txd_lower)
562 {
563     return ((txd_lower & E1000_TXD_CMD_VLE) != 0);
564 }
565 
566 /* FCS aka Ethernet CRC-32. We don't get it from backends and can't
567  * fill it in, just pad descriptor length by 4 bytes unless guest
568  * told us to strip it off the packet. */
569 static inline int
570 fcs_len(E1000State *s)
571 {
572     return (s->mac_reg[RCTL] & E1000_RCTL_SECRC) ? 0 : 4;
573 }
574 
575 static void
576 e1000_send_packet(E1000State *s, const uint8_t *buf, int size)
577 {
578     NetClientState *nc = qemu_get_queue(s->nic);
579     if (s->phy_reg[PHY_CTRL] & MII_CR_LOOPBACK) {
580         nc->info->receive(nc, buf, size);
581     } else {
582         qemu_send_packet(nc, buf, size);
583     }
584 }
585 
586 static void
587 xmit_seg(E1000State *s)
588 {
589     uint16_t len, *sp;
590     unsigned int frames = s->tx.tso_frames, css, sofar, n;
591     struct e1000_tx *tp = &s->tx;
592 
593     if (tp->tse && tp->cptse) {
594         css = tp->ipcss;
595         DBGOUT(TXSUM, "frames %d size %d ipcss %d\n",
596                frames, tp->size, css);
597         if (tp->ip) {		// IPv4
598             stw_be_p(tp->data+css+2, tp->size - css);
599             stw_be_p(tp->data+css+4,
600                           be16_to_cpup((uint16_t *)(tp->data+css+4))+frames);
601         } else			// IPv6
602             stw_be_p(tp->data+css+4, tp->size - css);
603         css = tp->tucss;
604         len = tp->size - css;
605         DBGOUT(TXSUM, "tcp %d tucss %d len %d\n", tp->tcp, css, len);
606         if (tp->tcp) {
607             sofar = frames * tp->mss;
608             stl_be_p(tp->data+css+4, ldl_be_p(tp->data+css+4)+sofar); /* seq */
609             if (tp->paylen - sofar > tp->mss)
610                 tp->data[css + 13] &= ~9;		// PSH, FIN
611         } else	// UDP
612             stw_be_p(tp->data+css+4, len);
613         if (tp->sum_needed & E1000_TXD_POPTS_TXSM) {
614             unsigned int phsum;
615             // add pseudo-header length before checksum calculation
616             sp = (uint16_t *)(tp->data + tp->tucso);
617             phsum = be16_to_cpup(sp) + len;
618             phsum = (phsum >> 16) + (phsum & 0xffff);
619             stw_be_p(sp, phsum);
620         }
621         tp->tso_frames++;
622     }
623 
624     if (tp->sum_needed & E1000_TXD_POPTS_TXSM)
625         putsum(tp->data, tp->size, tp->tucso, tp->tucss, tp->tucse);
626     if (tp->sum_needed & E1000_TXD_POPTS_IXSM)
627         putsum(tp->data, tp->size, tp->ipcso, tp->ipcss, tp->ipcse);
628     if (tp->vlan_needed) {
629         memmove(tp->vlan, tp->data, 4);
630         memmove(tp->data, tp->data + 4, 8);
631         memcpy(tp->data + 8, tp->vlan_header, 4);
632         e1000_send_packet(s, tp->vlan, tp->size + 4);
633     } else
634         e1000_send_packet(s, tp->data, tp->size);
635     s->mac_reg[TPT]++;
636     s->mac_reg[GPTC]++;
637     n = s->mac_reg[TOTL];
638     if ((s->mac_reg[TOTL] += s->tx.size) < n)
639         s->mac_reg[TOTH]++;
640 }
641 
642 static void
643 process_tx_desc(E1000State *s, struct e1000_tx_desc *dp)
644 {
645     PCIDevice *d = PCI_DEVICE(s);
646     uint32_t txd_lower = le32_to_cpu(dp->lower.data);
647     uint32_t dtype = txd_lower & (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D);
648     unsigned int split_size = txd_lower & 0xffff, bytes, sz, op;
649     unsigned int msh = 0xfffff;
650     uint64_t addr;
651     struct e1000_context_desc *xp = (struct e1000_context_desc *)dp;
652     struct e1000_tx *tp = &s->tx;
653 
654     s->mit_ide |= (txd_lower & E1000_TXD_CMD_IDE);
655     if (dtype == E1000_TXD_CMD_DEXT) {	// context descriptor
656         op = le32_to_cpu(xp->cmd_and_length);
657         tp->ipcss = xp->lower_setup.ip_fields.ipcss;
658         tp->ipcso = xp->lower_setup.ip_fields.ipcso;
659         tp->ipcse = le16_to_cpu(xp->lower_setup.ip_fields.ipcse);
660         tp->tucss = xp->upper_setup.tcp_fields.tucss;
661         tp->tucso = xp->upper_setup.tcp_fields.tucso;
662         tp->tucse = le16_to_cpu(xp->upper_setup.tcp_fields.tucse);
663         tp->paylen = op & 0xfffff;
664         tp->hdr_len = xp->tcp_seg_setup.fields.hdr_len;
665         tp->mss = le16_to_cpu(xp->tcp_seg_setup.fields.mss);
666         tp->ip = (op & E1000_TXD_CMD_IP) ? 1 : 0;
667         tp->tcp = (op & E1000_TXD_CMD_TCP) ? 1 : 0;
668         tp->tse = (op & E1000_TXD_CMD_TSE) ? 1 : 0;
669         tp->tso_frames = 0;
670         if (tp->tucso == 0) {	// this is probably wrong
671             DBGOUT(TXSUM, "TCP/UDP: cso 0!\n");
672             tp->tucso = tp->tucss + (tp->tcp ? 16 : 6);
673         }
674         return;
675     } else if (dtype == (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D)) {
676         // data descriptor
677         if (tp->size == 0) {
678             tp->sum_needed = le32_to_cpu(dp->upper.data) >> 8;
679         }
680         tp->cptse = ( txd_lower & E1000_TXD_CMD_TSE ) ? 1 : 0;
681     } else {
682         // legacy descriptor
683         tp->cptse = 0;
684     }
685 
686     if (vlan_enabled(s) && is_vlan_txd(txd_lower) &&
687         (tp->cptse || txd_lower & E1000_TXD_CMD_EOP)) {
688         tp->vlan_needed = 1;
689         stw_be_p(tp->vlan_header,
690                       le16_to_cpup((uint16_t *)(s->mac_reg + VET)));
691         stw_be_p(tp->vlan_header + 2,
692                       le16_to_cpu(dp->upper.fields.special));
693     }
694 
695     addr = le64_to_cpu(dp->buffer_addr);
696     if (tp->tse && tp->cptse) {
697         msh = tp->hdr_len + tp->mss;
698         do {
699             bytes = split_size;
700             if (tp->size + bytes > msh)
701                 bytes = msh - tp->size;
702 
703             bytes = MIN(sizeof(tp->data) - tp->size, bytes);
704             pci_dma_read(d, addr, tp->data + tp->size, bytes);
705             sz = tp->size + bytes;
706             if (sz >= tp->hdr_len && tp->size < tp->hdr_len) {
707                 memmove(tp->header, tp->data, tp->hdr_len);
708             }
709             tp->size = sz;
710             addr += bytes;
711             if (sz == msh) {
712                 xmit_seg(s);
713                 memmove(tp->data, tp->header, tp->hdr_len);
714                 tp->size = tp->hdr_len;
715             }
716         } while (split_size -= bytes);
717     } else if (!tp->tse && tp->cptse) {
718         // context descriptor TSE is not set, while data descriptor TSE is set
719         DBGOUT(TXERR, "TCP segmentation error\n");
720     } else {
721         split_size = MIN(sizeof(tp->data) - tp->size, split_size);
722         pci_dma_read(d, addr, tp->data + tp->size, split_size);
723         tp->size += split_size;
724     }
725 
726     if (!(txd_lower & E1000_TXD_CMD_EOP))
727         return;
728     if (!(tp->tse && tp->cptse && tp->size < tp->hdr_len)) {
729         xmit_seg(s);
730     }
731     tp->tso_frames = 0;
732     tp->sum_needed = 0;
733     tp->vlan_needed = 0;
734     tp->size = 0;
735     tp->cptse = 0;
736 }
737 
738 static uint32_t
739 txdesc_writeback(E1000State *s, dma_addr_t base, struct e1000_tx_desc *dp)
740 {
741     PCIDevice *d = PCI_DEVICE(s);
742     uint32_t txd_upper, txd_lower = le32_to_cpu(dp->lower.data);
743 
744     if (!(txd_lower & (E1000_TXD_CMD_RS|E1000_TXD_CMD_RPS)))
745         return 0;
746     txd_upper = (le32_to_cpu(dp->upper.data) | E1000_TXD_STAT_DD) &
747                 ~(E1000_TXD_STAT_EC | E1000_TXD_STAT_LC | E1000_TXD_STAT_TU);
748     dp->upper.data = cpu_to_le32(txd_upper);
749     pci_dma_write(d, base + ((char *)&dp->upper - (char *)dp),
750                   &dp->upper, sizeof(dp->upper));
751     return E1000_ICR_TXDW;
752 }
753 
754 static uint64_t tx_desc_base(E1000State *s)
755 {
756     uint64_t bah = s->mac_reg[TDBAH];
757     uint64_t bal = s->mac_reg[TDBAL] & ~0xf;
758 
759     return (bah << 32) + bal;
760 }
761 
762 static void
763 start_xmit(E1000State *s)
764 {
765     PCIDevice *d = PCI_DEVICE(s);
766     dma_addr_t base;
767     struct e1000_tx_desc desc;
768     uint32_t tdh_start = s->mac_reg[TDH], cause = E1000_ICS_TXQE;
769 
770     if (!(s->mac_reg[TCTL] & E1000_TCTL_EN)) {
771         DBGOUT(TX, "tx disabled\n");
772         return;
773     }
774 
775     while (s->mac_reg[TDH] != s->mac_reg[TDT]) {
776         base = tx_desc_base(s) +
777                sizeof(struct e1000_tx_desc) * s->mac_reg[TDH];
778         pci_dma_read(d, base, &desc, sizeof(desc));
779 
780         DBGOUT(TX, "index %d: %p : %x %x\n", s->mac_reg[TDH],
781                (void *)(intptr_t)desc.buffer_addr, desc.lower.data,
782                desc.upper.data);
783 
784         process_tx_desc(s, &desc);
785         cause |= txdesc_writeback(s, base, &desc);
786 
787         if (++s->mac_reg[TDH] * sizeof(desc) >= s->mac_reg[TDLEN])
788             s->mac_reg[TDH] = 0;
789         /*
790          * the following could happen only if guest sw assigns
791          * bogus values to TDT/TDLEN.
792          * there's nothing too intelligent we could do about this.
793          */
794         if (s->mac_reg[TDH] == tdh_start) {
795             DBGOUT(TXERR, "TDH wraparound @%x, TDT %x, TDLEN %x\n",
796                    tdh_start, s->mac_reg[TDT], s->mac_reg[TDLEN]);
797             break;
798         }
799     }
800     set_ics(s, 0, cause);
801 }
802 
803 static int
804 receive_filter(E1000State *s, const uint8_t *buf, int size)
805 {
806     static const uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
807     static const int mta_shift[] = {4, 3, 2, 0};
808     uint32_t f, rctl = s->mac_reg[RCTL], ra[2], *rp;
809 
810     if (is_vlan_packet(s, buf) && vlan_rx_filter_enabled(s)) {
811         uint16_t vid = be16_to_cpup((uint16_t *)(buf + 14));
812         uint32_t vfta = le32_to_cpup((uint32_t *)(s->mac_reg + VFTA) +
813                                      ((vid >> 5) & 0x7f));
814         if ((vfta & (1 << (vid & 0x1f))) == 0)
815             return 0;
816     }
817 
818     if (rctl & E1000_RCTL_UPE)			// promiscuous
819         return 1;
820 
821     if ((buf[0] & 1) && (rctl & E1000_RCTL_MPE))	// promiscuous mcast
822         return 1;
823 
824     if ((rctl & E1000_RCTL_BAM) && !memcmp(buf, bcast, sizeof bcast))
825         return 1;
826 
827     for (rp = s->mac_reg + RA; rp < s->mac_reg + RA + 32; rp += 2) {
828         if (!(rp[1] & E1000_RAH_AV))
829             continue;
830         ra[0] = cpu_to_le32(rp[0]);
831         ra[1] = cpu_to_le32(rp[1]);
832         if (!memcmp(buf, (uint8_t *)ra, 6)) {
833             DBGOUT(RXFILTER,
834                    "unicast match[%d]: %02x:%02x:%02x:%02x:%02x:%02x\n",
835                    (int)(rp - s->mac_reg - RA)/2,
836                    buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
837             return 1;
838         }
839     }
840     DBGOUT(RXFILTER, "unicast mismatch: %02x:%02x:%02x:%02x:%02x:%02x\n",
841            buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
842 
843     f = mta_shift[(rctl >> E1000_RCTL_MO_SHIFT) & 3];
844     f = (((buf[5] << 8) | buf[4]) >> f) & 0xfff;
845     if (s->mac_reg[MTA + (f >> 5)] & (1 << (f & 0x1f)))
846         return 1;
847     DBGOUT(RXFILTER,
848            "dropping, inexact filter mismatch: %02x:%02x:%02x:%02x:%02x:%02x MO %d MTA[%d] %x\n",
849            buf[0], buf[1], buf[2], buf[3], buf[4], buf[5],
850            (rctl >> E1000_RCTL_MO_SHIFT) & 3, f >> 5,
851            s->mac_reg[MTA + (f >> 5)]);
852 
853     return 0;
854 }
855 
856 static void
857 e1000_set_link_status(NetClientState *nc)
858 {
859     E1000State *s = qemu_get_nic_opaque(nc);
860     uint32_t old_status = s->mac_reg[STATUS];
861 
862     if (nc->link_down) {
863         e1000_link_down(s);
864     } else {
865         e1000_link_up(s);
866     }
867 
868     if (s->mac_reg[STATUS] != old_status)
869         set_ics(s, 0, E1000_ICR_LSC);
870 }
871 
872 static bool e1000_has_rxbufs(E1000State *s, size_t total_size)
873 {
874     int bufs;
875     /* Fast-path short packets */
876     if (total_size <= s->rxbuf_size) {
877         return s->mac_reg[RDH] != s->mac_reg[RDT];
878     }
879     if (s->mac_reg[RDH] < s->mac_reg[RDT]) {
880         bufs = s->mac_reg[RDT] - s->mac_reg[RDH];
881     } else if (s->mac_reg[RDH] > s->mac_reg[RDT]) {
882         bufs = s->mac_reg[RDLEN] /  sizeof(struct e1000_rx_desc) +
883             s->mac_reg[RDT] - s->mac_reg[RDH];
884     } else {
885         return false;
886     }
887     return total_size <= bufs * s->rxbuf_size;
888 }
889 
890 static int
891 e1000_can_receive(NetClientState *nc)
892 {
893     E1000State *s = qemu_get_nic_opaque(nc);
894 
895     return (s->mac_reg[STATUS] & E1000_STATUS_LU) &&
896         (s->mac_reg[RCTL] & E1000_RCTL_EN) && e1000_has_rxbufs(s, 1);
897 }
898 
899 static uint64_t rx_desc_base(E1000State *s)
900 {
901     uint64_t bah = s->mac_reg[RDBAH];
902     uint64_t bal = s->mac_reg[RDBAL] & ~0xf;
903 
904     return (bah << 32) + bal;
905 }
906 
907 static ssize_t
908 e1000_receive_iov(NetClientState *nc, const struct iovec *iov, int iovcnt)
909 {
910     E1000State *s = qemu_get_nic_opaque(nc);
911     PCIDevice *d = PCI_DEVICE(s);
912     struct e1000_rx_desc desc;
913     dma_addr_t base;
914     unsigned int n, rdt;
915     uint32_t rdh_start;
916     uint16_t vlan_special = 0;
917     uint8_t vlan_status = 0;
918     uint8_t min_buf[MIN_BUF_SIZE];
919     struct iovec min_iov;
920     uint8_t *filter_buf = iov->iov_base;
921     size_t size = iov_size(iov, iovcnt);
922     size_t iov_ofs = 0;
923     size_t desc_offset;
924     size_t desc_size;
925     size_t total_size;
926 
927     if (!(s->mac_reg[STATUS] & E1000_STATUS_LU)) {
928         return -1;
929     }
930 
931     if (!(s->mac_reg[RCTL] & E1000_RCTL_EN)) {
932         return -1;
933     }
934 
935     /* Pad to minimum Ethernet frame length */
936     if (size < sizeof(min_buf)) {
937         iov_to_buf(iov, iovcnt, 0, min_buf, size);
938         memset(&min_buf[size], 0, sizeof(min_buf) - size);
939         min_iov.iov_base = filter_buf = min_buf;
940         min_iov.iov_len = size = sizeof(min_buf);
941         iovcnt = 1;
942         iov = &min_iov;
943     } else if (iov->iov_len < MAXIMUM_ETHERNET_HDR_LEN) {
944         /* This is very unlikely, but may happen. */
945         iov_to_buf(iov, iovcnt, 0, min_buf, MAXIMUM_ETHERNET_HDR_LEN);
946         filter_buf = min_buf;
947     }
948 
949     /* Discard oversized packets if !LPE and !SBP. */
950     if ((size > MAXIMUM_ETHERNET_LPE_SIZE ||
951         (size > MAXIMUM_ETHERNET_VLAN_SIZE
952         && !(s->mac_reg[RCTL] & E1000_RCTL_LPE)))
953         && !(s->mac_reg[RCTL] & E1000_RCTL_SBP)) {
954         return size;
955     }
956 
957     if (!receive_filter(s, filter_buf, size)) {
958         return size;
959     }
960 
961     if (vlan_enabled(s) && is_vlan_packet(s, filter_buf)) {
962         vlan_special = cpu_to_le16(be16_to_cpup((uint16_t *)(filter_buf
963                                                                 + 14)));
964         iov_ofs = 4;
965         if (filter_buf == iov->iov_base) {
966             memmove(filter_buf + 4, filter_buf, 12);
967         } else {
968             iov_from_buf(iov, iovcnt, 4, filter_buf, 12);
969             while (iov->iov_len <= iov_ofs) {
970                 iov_ofs -= iov->iov_len;
971                 iov++;
972             }
973         }
974         vlan_status = E1000_RXD_STAT_VP;
975         size -= 4;
976     }
977 
978     rdh_start = s->mac_reg[RDH];
979     desc_offset = 0;
980     total_size = size + fcs_len(s);
981     if (!e1000_has_rxbufs(s, total_size)) {
982             set_ics(s, 0, E1000_ICS_RXO);
983             return -1;
984     }
985     do {
986         desc_size = total_size - desc_offset;
987         if (desc_size > s->rxbuf_size) {
988             desc_size = s->rxbuf_size;
989         }
990         base = rx_desc_base(s) + sizeof(desc) * s->mac_reg[RDH];
991         pci_dma_read(d, base, &desc, sizeof(desc));
992         desc.special = vlan_special;
993         desc.status |= (vlan_status | E1000_RXD_STAT_DD);
994         if (desc.buffer_addr) {
995             if (desc_offset < size) {
996                 size_t iov_copy;
997                 hwaddr ba = le64_to_cpu(desc.buffer_addr);
998                 size_t copy_size = size - desc_offset;
999                 if (copy_size > s->rxbuf_size) {
1000                     copy_size = s->rxbuf_size;
1001                 }
1002                 do {
1003                     iov_copy = MIN(copy_size, iov->iov_len - iov_ofs);
1004                     pci_dma_write(d, ba, iov->iov_base + iov_ofs, iov_copy);
1005                     copy_size -= iov_copy;
1006                     ba += iov_copy;
1007                     iov_ofs += iov_copy;
1008                     if (iov_ofs == iov->iov_len) {
1009                         iov++;
1010                         iov_ofs = 0;
1011                     }
1012                 } while (copy_size);
1013             }
1014             desc_offset += desc_size;
1015             desc.length = cpu_to_le16(desc_size);
1016             if (desc_offset >= total_size) {
1017                 desc.status |= E1000_RXD_STAT_EOP | E1000_RXD_STAT_IXSM;
1018             } else {
1019                 /* Guest zeroing out status is not a hardware requirement.
1020                    Clear EOP in case guest didn't do it. */
1021                 desc.status &= ~E1000_RXD_STAT_EOP;
1022             }
1023         } else { // as per intel docs; skip descriptors with null buf addr
1024             DBGOUT(RX, "Null RX descriptor!!\n");
1025         }
1026         pci_dma_write(d, base, &desc, sizeof(desc));
1027 
1028         if (++s->mac_reg[RDH] * sizeof(desc) >= s->mac_reg[RDLEN])
1029             s->mac_reg[RDH] = 0;
1030         /* see comment in start_xmit; same here */
1031         if (s->mac_reg[RDH] == rdh_start) {
1032             DBGOUT(RXERR, "RDH wraparound @%x, RDT %x, RDLEN %x\n",
1033                    rdh_start, s->mac_reg[RDT], s->mac_reg[RDLEN]);
1034             set_ics(s, 0, E1000_ICS_RXO);
1035             return -1;
1036         }
1037     } while (desc_offset < total_size);
1038 
1039     s->mac_reg[GPRC]++;
1040     s->mac_reg[TPR]++;
1041     /* TOR - Total Octets Received:
1042      * This register includes bytes received in a packet from the <Destination
1043      * Address> field through the <CRC> field, inclusively.
1044      */
1045     n = s->mac_reg[TORL] + size + /* Always include FCS length. */ 4;
1046     if (n < s->mac_reg[TORL])
1047         s->mac_reg[TORH]++;
1048     s->mac_reg[TORL] = n;
1049 
1050     n = E1000_ICS_RXT0;
1051     if ((rdt = s->mac_reg[RDT]) < s->mac_reg[RDH])
1052         rdt += s->mac_reg[RDLEN] / sizeof(desc);
1053     if (((rdt - s->mac_reg[RDH]) * sizeof(desc)) <= s->mac_reg[RDLEN] >>
1054         s->rxbuf_min_shift)
1055         n |= E1000_ICS_RXDMT0;
1056 
1057     set_ics(s, 0, n);
1058 
1059     return size;
1060 }
1061 
1062 static ssize_t
1063 e1000_receive(NetClientState *nc, const uint8_t *buf, size_t size)
1064 {
1065     const struct iovec iov = {
1066         .iov_base = (uint8_t *)buf,
1067         .iov_len = size
1068     };
1069 
1070     return e1000_receive_iov(nc, &iov, 1);
1071 }
1072 
1073 static uint32_t
1074 mac_readreg(E1000State *s, int index)
1075 {
1076     return s->mac_reg[index];
1077 }
1078 
1079 static uint32_t
1080 mac_icr_read(E1000State *s, int index)
1081 {
1082     uint32_t ret = s->mac_reg[ICR];
1083 
1084     DBGOUT(INTERRUPT, "ICR read: %x\n", ret);
1085     set_interrupt_cause(s, 0, 0);
1086     return ret;
1087 }
1088 
1089 static uint32_t
1090 mac_read_clr4(E1000State *s, int index)
1091 {
1092     uint32_t ret = s->mac_reg[index];
1093 
1094     s->mac_reg[index] = 0;
1095     return ret;
1096 }
1097 
1098 static uint32_t
1099 mac_read_clr8(E1000State *s, int index)
1100 {
1101     uint32_t ret = s->mac_reg[index];
1102 
1103     s->mac_reg[index] = 0;
1104     s->mac_reg[index-1] = 0;
1105     return ret;
1106 }
1107 
1108 static void
1109 mac_writereg(E1000State *s, int index, uint32_t val)
1110 {
1111     uint32_t macaddr[2];
1112 
1113     s->mac_reg[index] = val;
1114 
1115     if (index == RA + 1) {
1116         macaddr[0] = cpu_to_le32(s->mac_reg[RA]);
1117         macaddr[1] = cpu_to_le32(s->mac_reg[RA + 1]);
1118         qemu_format_nic_info_str(qemu_get_queue(s->nic), (uint8_t *)macaddr);
1119     }
1120 }
1121 
1122 static void
1123 set_rdt(E1000State *s, int index, uint32_t val)
1124 {
1125     s->mac_reg[index] = val & 0xffff;
1126     if (e1000_has_rxbufs(s, 1)) {
1127         qemu_flush_queued_packets(qemu_get_queue(s->nic));
1128     }
1129 }
1130 
1131 static void
1132 set_16bit(E1000State *s, int index, uint32_t val)
1133 {
1134     s->mac_reg[index] = val & 0xffff;
1135 }
1136 
1137 static void
1138 set_dlen(E1000State *s, int index, uint32_t val)
1139 {
1140     s->mac_reg[index] = val & 0xfff80;
1141 }
1142 
1143 static void
1144 set_tctl(E1000State *s, int index, uint32_t val)
1145 {
1146     s->mac_reg[index] = val;
1147     s->mac_reg[TDT] &= 0xffff;
1148     start_xmit(s);
1149 }
1150 
1151 static void
1152 set_icr(E1000State *s, int index, uint32_t val)
1153 {
1154     DBGOUT(INTERRUPT, "set_icr %x\n", val);
1155     set_interrupt_cause(s, 0, s->mac_reg[ICR] & ~val);
1156 }
1157 
1158 static void
1159 set_imc(E1000State *s, int index, uint32_t val)
1160 {
1161     s->mac_reg[IMS] &= ~val;
1162     set_ics(s, 0, 0);
1163 }
1164 
1165 static void
1166 set_ims(E1000State *s, int index, uint32_t val)
1167 {
1168     s->mac_reg[IMS] |= val;
1169     set_ics(s, 0, 0);
1170 }
1171 
1172 #define getreg(x)	[x] = mac_readreg
1173 static uint32_t (*macreg_readops[])(E1000State *, int) = {
1174     getreg(PBA),	getreg(RCTL),	getreg(TDH),	getreg(TXDCTL),
1175     getreg(WUFC),	getreg(TDT),	getreg(CTRL),	getreg(LEDCTL),
1176     getreg(MANC),	getreg(MDIC),	getreg(SWSM),	getreg(STATUS),
1177     getreg(TORL),	getreg(TOTL),	getreg(IMS),	getreg(TCTL),
1178     getreg(RDH),	getreg(RDT),	getreg(VET),	getreg(ICS),
1179     getreg(TDBAL),	getreg(TDBAH),	getreg(RDBAH),	getreg(RDBAL),
1180     getreg(TDLEN),      getreg(RDLEN),  getreg(RDTR),   getreg(RADV),
1181     getreg(TADV),       getreg(ITR),
1182 
1183     [TOTH] = mac_read_clr8,	[TORH] = mac_read_clr8,	[GPRC] = mac_read_clr4,
1184     [GPTC] = mac_read_clr4,	[TPR] = mac_read_clr4,	[TPT] = mac_read_clr4,
1185     [ICR] = mac_icr_read,	[EECD] = get_eecd,	[EERD] = flash_eerd_read,
1186     [CRCERRS ... MPC] = &mac_readreg,
1187     [RA ... RA+31] = &mac_readreg,
1188     [MTA ... MTA+127] = &mac_readreg,
1189     [VFTA ... VFTA+127] = &mac_readreg,
1190 };
1191 enum { NREADOPS = ARRAY_SIZE(macreg_readops) };
1192 
1193 #define putreg(x)	[x] = mac_writereg
1194 static void (*macreg_writeops[])(E1000State *, int, uint32_t) = {
1195     putreg(PBA),	putreg(EERD),	putreg(SWSM),	putreg(WUFC),
1196     putreg(TDBAL),	putreg(TDBAH),	putreg(TXDCTL),	putreg(RDBAH),
1197     putreg(RDBAL),	putreg(LEDCTL), putreg(VET),
1198     [TDLEN] = set_dlen,	[RDLEN] = set_dlen,	[TCTL] = set_tctl,
1199     [TDT] = set_tctl,	[MDIC] = set_mdic,	[ICS] = set_ics,
1200     [TDH] = set_16bit,	[RDH] = set_16bit,	[RDT] = set_rdt,
1201     [IMC] = set_imc,	[IMS] = set_ims,	[ICR] = set_icr,
1202     [EECD] = set_eecd,	[RCTL] = set_rx_control, [CTRL] = set_ctrl,
1203     [RDTR] = set_16bit, [RADV] = set_16bit,     [TADV] = set_16bit,
1204     [ITR] = set_16bit,
1205     [RA ... RA+31] = &mac_writereg,
1206     [MTA ... MTA+127] = &mac_writereg,
1207     [VFTA ... VFTA+127] = &mac_writereg,
1208 };
1209 
1210 enum { NWRITEOPS = ARRAY_SIZE(macreg_writeops) };
1211 
1212 static void
1213 e1000_mmio_write(void *opaque, hwaddr addr, uint64_t val,
1214                  unsigned size)
1215 {
1216     E1000State *s = opaque;
1217     unsigned int index = (addr & 0x1ffff) >> 2;
1218 
1219     if (index < NWRITEOPS && macreg_writeops[index]) {
1220         macreg_writeops[index](s, index, val);
1221     } else if (index < NREADOPS && macreg_readops[index]) {
1222         DBGOUT(MMIO, "e1000_mmio_writel RO %x: 0x%04"PRIx64"\n", index<<2, val);
1223     } else {
1224         DBGOUT(UNKNOWN, "MMIO unknown write addr=0x%08x,val=0x%08"PRIx64"\n",
1225                index<<2, val);
1226     }
1227 }
1228 
1229 static uint64_t
1230 e1000_mmio_read(void *opaque, hwaddr addr, unsigned size)
1231 {
1232     E1000State *s = opaque;
1233     unsigned int index = (addr & 0x1ffff) >> 2;
1234 
1235     if (index < NREADOPS && macreg_readops[index])
1236     {
1237         return macreg_readops[index](s, index);
1238     }
1239     DBGOUT(UNKNOWN, "MMIO unknown read addr=0x%08x\n", index<<2);
1240     return 0;
1241 }
1242 
1243 static const MemoryRegionOps e1000_mmio_ops = {
1244     .read = e1000_mmio_read,
1245     .write = e1000_mmio_write,
1246     .endianness = DEVICE_LITTLE_ENDIAN,
1247     .impl = {
1248         .min_access_size = 4,
1249         .max_access_size = 4,
1250     },
1251 };
1252 
1253 static uint64_t e1000_io_read(void *opaque, hwaddr addr,
1254                               unsigned size)
1255 {
1256     E1000State *s = opaque;
1257 
1258     (void)s;
1259     return 0;
1260 }
1261 
1262 static void e1000_io_write(void *opaque, hwaddr addr,
1263                            uint64_t val, unsigned size)
1264 {
1265     E1000State *s = opaque;
1266 
1267     (void)s;
1268 }
1269 
1270 static const MemoryRegionOps e1000_io_ops = {
1271     .read = e1000_io_read,
1272     .write = e1000_io_write,
1273     .endianness = DEVICE_LITTLE_ENDIAN,
1274 };
1275 
1276 static bool is_version_1(void *opaque, int version_id)
1277 {
1278     return version_id == 1;
1279 }
1280 
1281 static void e1000_pre_save(void *opaque)
1282 {
1283     E1000State *s = opaque;
1284     NetClientState *nc = qemu_get_queue(s->nic);
1285 
1286     /* If the mitigation timer is active, emulate a timeout now. */
1287     if (s->mit_timer_on) {
1288         e1000_mit_timer(s);
1289     }
1290 
1291     if (!(s->compat_flags & E1000_FLAG_AUTONEG)) {
1292         return;
1293     }
1294 
1295     /*
1296      * If link is down and auto-negotiation is ongoing, complete
1297      * auto-negotiation immediately.  This allows is to look at
1298      * MII_SR_AUTONEG_COMPLETE to infer link status on load.
1299      */
1300     if (nc->link_down &&
1301         s->phy_reg[PHY_CTRL] & MII_CR_AUTO_NEG_EN &&
1302         s->phy_reg[PHY_CTRL] & MII_CR_RESTART_AUTO_NEG) {
1303          s->phy_reg[PHY_STATUS] |= MII_SR_AUTONEG_COMPLETE;
1304     }
1305 }
1306 
1307 static int e1000_post_load(void *opaque, int version_id)
1308 {
1309     E1000State *s = opaque;
1310     NetClientState *nc = qemu_get_queue(s->nic);
1311 
1312     if (!(s->compat_flags & E1000_FLAG_MIT)) {
1313         s->mac_reg[ITR] = s->mac_reg[RDTR] = s->mac_reg[RADV] =
1314             s->mac_reg[TADV] = 0;
1315         s->mit_irq_level = false;
1316     }
1317     s->mit_ide = 0;
1318     s->mit_timer_on = false;
1319 
1320     /* nc.link_down can't be migrated, so infer link_down according
1321      * to link status bit in mac_reg[STATUS].
1322      * Alternatively, restart link negotiation if it was in progress. */
1323     nc->link_down = (s->mac_reg[STATUS] & E1000_STATUS_LU) == 0;
1324 
1325     if (!(s->compat_flags & E1000_FLAG_AUTONEG)) {
1326         return 0;
1327     }
1328 
1329     if (s->phy_reg[PHY_CTRL] & MII_CR_AUTO_NEG_EN &&
1330         s->phy_reg[PHY_CTRL] & MII_CR_RESTART_AUTO_NEG &&
1331         !(s->phy_reg[PHY_STATUS] & MII_SR_AUTONEG_COMPLETE)) {
1332         nc->link_down = false;
1333         timer_mod(s->autoneg_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 500);
1334     }
1335 
1336     return 0;
1337 }
1338 
1339 static bool e1000_mit_state_needed(void *opaque)
1340 {
1341     E1000State *s = opaque;
1342 
1343     return s->compat_flags & E1000_FLAG_MIT;
1344 }
1345 
1346 static const VMStateDescription vmstate_e1000_mit_state = {
1347     .name = "e1000/mit_state",
1348     .version_id = 1,
1349     .minimum_version_id = 1,
1350     .minimum_version_id_old = 1,
1351     .fields    = (VMStateField[]) {
1352         VMSTATE_UINT32(mac_reg[RDTR], E1000State),
1353         VMSTATE_UINT32(mac_reg[RADV], E1000State),
1354         VMSTATE_UINT32(mac_reg[TADV], E1000State),
1355         VMSTATE_UINT32(mac_reg[ITR], E1000State),
1356         VMSTATE_BOOL(mit_irq_level, E1000State),
1357         VMSTATE_END_OF_LIST()
1358     }
1359 };
1360 
1361 static const VMStateDescription vmstate_e1000 = {
1362     .name = "e1000",
1363     .version_id = 2,
1364     .minimum_version_id = 1,
1365     .minimum_version_id_old = 1,
1366     .pre_save = e1000_pre_save,
1367     .post_load = e1000_post_load,
1368     .fields      = (VMStateField []) {
1369         VMSTATE_PCI_DEVICE(parent_obj, E1000State),
1370         VMSTATE_UNUSED_TEST(is_version_1, 4), /* was instance id */
1371         VMSTATE_UNUSED(4), /* Was mmio_base.  */
1372         VMSTATE_UINT32(rxbuf_size, E1000State),
1373         VMSTATE_UINT32(rxbuf_min_shift, E1000State),
1374         VMSTATE_UINT32(eecd_state.val_in, E1000State),
1375         VMSTATE_UINT16(eecd_state.bitnum_in, E1000State),
1376         VMSTATE_UINT16(eecd_state.bitnum_out, E1000State),
1377         VMSTATE_UINT16(eecd_state.reading, E1000State),
1378         VMSTATE_UINT32(eecd_state.old_eecd, E1000State),
1379         VMSTATE_UINT8(tx.ipcss, E1000State),
1380         VMSTATE_UINT8(tx.ipcso, E1000State),
1381         VMSTATE_UINT16(tx.ipcse, E1000State),
1382         VMSTATE_UINT8(tx.tucss, E1000State),
1383         VMSTATE_UINT8(tx.tucso, E1000State),
1384         VMSTATE_UINT16(tx.tucse, E1000State),
1385         VMSTATE_UINT32(tx.paylen, E1000State),
1386         VMSTATE_UINT8(tx.hdr_len, E1000State),
1387         VMSTATE_UINT16(tx.mss, E1000State),
1388         VMSTATE_UINT16(tx.size, E1000State),
1389         VMSTATE_UINT16(tx.tso_frames, E1000State),
1390         VMSTATE_UINT8(tx.sum_needed, E1000State),
1391         VMSTATE_INT8(tx.ip, E1000State),
1392         VMSTATE_INT8(tx.tcp, E1000State),
1393         VMSTATE_BUFFER(tx.header, E1000State),
1394         VMSTATE_BUFFER(tx.data, E1000State),
1395         VMSTATE_UINT16_ARRAY(eeprom_data, E1000State, 64),
1396         VMSTATE_UINT16_ARRAY(phy_reg, E1000State, 0x20),
1397         VMSTATE_UINT32(mac_reg[CTRL], E1000State),
1398         VMSTATE_UINT32(mac_reg[EECD], E1000State),
1399         VMSTATE_UINT32(mac_reg[EERD], E1000State),
1400         VMSTATE_UINT32(mac_reg[GPRC], E1000State),
1401         VMSTATE_UINT32(mac_reg[GPTC], E1000State),
1402         VMSTATE_UINT32(mac_reg[ICR], E1000State),
1403         VMSTATE_UINT32(mac_reg[ICS], E1000State),
1404         VMSTATE_UINT32(mac_reg[IMC], E1000State),
1405         VMSTATE_UINT32(mac_reg[IMS], E1000State),
1406         VMSTATE_UINT32(mac_reg[LEDCTL], E1000State),
1407         VMSTATE_UINT32(mac_reg[MANC], E1000State),
1408         VMSTATE_UINT32(mac_reg[MDIC], E1000State),
1409         VMSTATE_UINT32(mac_reg[MPC], E1000State),
1410         VMSTATE_UINT32(mac_reg[PBA], E1000State),
1411         VMSTATE_UINT32(mac_reg[RCTL], E1000State),
1412         VMSTATE_UINT32(mac_reg[RDBAH], E1000State),
1413         VMSTATE_UINT32(mac_reg[RDBAL], E1000State),
1414         VMSTATE_UINT32(mac_reg[RDH], E1000State),
1415         VMSTATE_UINT32(mac_reg[RDLEN], E1000State),
1416         VMSTATE_UINT32(mac_reg[RDT], E1000State),
1417         VMSTATE_UINT32(mac_reg[STATUS], E1000State),
1418         VMSTATE_UINT32(mac_reg[SWSM], E1000State),
1419         VMSTATE_UINT32(mac_reg[TCTL], E1000State),
1420         VMSTATE_UINT32(mac_reg[TDBAH], E1000State),
1421         VMSTATE_UINT32(mac_reg[TDBAL], E1000State),
1422         VMSTATE_UINT32(mac_reg[TDH], E1000State),
1423         VMSTATE_UINT32(mac_reg[TDLEN], E1000State),
1424         VMSTATE_UINT32(mac_reg[TDT], E1000State),
1425         VMSTATE_UINT32(mac_reg[TORH], E1000State),
1426         VMSTATE_UINT32(mac_reg[TORL], E1000State),
1427         VMSTATE_UINT32(mac_reg[TOTH], E1000State),
1428         VMSTATE_UINT32(mac_reg[TOTL], E1000State),
1429         VMSTATE_UINT32(mac_reg[TPR], E1000State),
1430         VMSTATE_UINT32(mac_reg[TPT], E1000State),
1431         VMSTATE_UINT32(mac_reg[TXDCTL], E1000State),
1432         VMSTATE_UINT32(mac_reg[WUFC], E1000State),
1433         VMSTATE_UINT32(mac_reg[VET], E1000State),
1434         VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, RA, 32),
1435         VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, MTA, 128),
1436         VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, VFTA, 128),
1437         VMSTATE_END_OF_LIST()
1438     },
1439     .subsections = (VMStateSubsection[]) {
1440         {
1441             .vmsd = &vmstate_e1000_mit_state,
1442             .needed = e1000_mit_state_needed,
1443         }, {
1444             /* empty */
1445         }
1446     }
1447 };
1448 
1449 /*
1450  * EEPROM contents documented in Tables 5-2 and 5-3, pp. 98-102.
1451  * Note: A valid DevId will be inserted during pci_e1000_init().
1452  */
1453 static const uint16_t e1000_eeprom_template[64] = {
1454     0x0000, 0x0000, 0x0000, 0x0000,      0xffff, 0x0000,      0x0000, 0x0000,
1455     0x3000, 0x1000, 0x6403, 0 /*DevId*/, 0x8086, 0 /*DevId*/, 0x8086, 0x3040,
1456     0x0008, 0x2000, 0x7e14, 0x0048,      0x1000, 0x00d8,      0x0000, 0x2700,
1457     0x6cc9, 0x3150, 0x0722, 0x040b,      0x0984, 0x0000,      0xc000, 0x0706,
1458     0x1008, 0x0000, 0x0f04, 0x7fff,      0x4d01, 0xffff,      0xffff, 0xffff,
1459     0xffff, 0xffff, 0xffff, 0xffff,      0xffff, 0xffff,      0xffff, 0xffff,
1460     0x0100, 0x4000, 0x121c, 0xffff,      0xffff, 0xffff,      0xffff, 0xffff,
1461     0xffff, 0xffff, 0xffff, 0xffff,      0xffff, 0xffff,      0xffff, 0x0000,
1462 };
1463 
1464 /* PCI interface */
1465 
1466 static void
1467 e1000_mmio_setup(E1000State *d)
1468 {
1469     int i;
1470     const uint32_t excluded_regs[] = {
1471         E1000_MDIC, E1000_ICR, E1000_ICS, E1000_IMS,
1472         E1000_IMC, E1000_TCTL, E1000_TDT, PNPMMIO_SIZE
1473     };
1474 
1475     memory_region_init_io(&d->mmio, OBJECT(d), &e1000_mmio_ops, d,
1476                           "e1000-mmio", PNPMMIO_SIZE);
1477     memory_region_add_coalescing(&d->mmio, 0, excluded_regs[0]);
1478     for (i = 0; excluded_regs[i] != PNPMMIO_SIZE; i++)
1479         memory_region_add_coalescing(&d->mmio, excluded_regs[i] + 4,
1480                                      excluded_regs[i+1] - excluded_regs[i] - 4);
1481     memory_region_init_io(&d->io, OBJECT(d), &e1000_io_ops, d, "e1000-io", IOPORT_SIZE);
1482 }
1483 
1484 static void
1485 e1000_cleanup(NetClientState *nc)
1486 {
1487     E1000State *s = qemu_get_nic_opaque(nc);
1488 
1489     s->nic = NULL;
1490 }
1491 
1492 static void
1493 pci_e1000_uninit(PCIDevice *dev)
1494 {
1495     E1000State *d = E1000(dev);
1496 
1497     timer_del(d->autoneg_timer);
1498     timer_free(d->autoneg_timer);
1499     timer_del(d->mit_timer);
1500     timer_free(d->mit_timer);
1501     memory_region_destroy(&d->mmio);
1502     memory_region_destroy(&d->io);
1503     qemu_del_nic(d->nic);
1504 }
1505 
1506 static NetClientInfo net_e1000_info = {
1507     .type = NET_CLIENT_OPTIONS_KIND_NIC,
1508     .size = sizeof(NICState),
1509     .can_receive = e1000_can_receive,
1510     .receive = e1000_receive,
1511     .receive_iov = e1000_receive_iov,
1512     .cleanup = e1000_cleanup,
1513     .link_status_changed = e1000_set_link_status,
1514 };
1515 
1516 static int pci_e1000_init(PCIDevice *pci_dev)
1517 {
1518     DeviceState *dev = DEVICE(pci_dev);
1519     E1000State *d = E1000(pci_dev);
1520     PCIDeviceClass *pdc = PCI_DEVICE_GET_CLASS(pci_dev);
1521     uint8_t *pci_conf;
1522     uint16_t checksum = 0;
1523     int i;
1524     uint8_t *macaddr;
1525 
1526     pci_conf = pci_dev->config;
1527 
1528     /* TODO: RST# value should be 0, PCI spec 6.2.4 */
1529     pci_conf[PCI_CACHE_LINE_SIZE] = 0x10;
1530 
1531     pci_conf[PCI_INTERRUPT_PIN] = 1; /* interrupt pin A */
1532 
1533     e1000_mmio_setup(d);
1534 
1535     pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &d->mmio);
1536 
1537     pci_register_bar(pci_dev, 1, PCI_BASE_ADDRESS_SPACE_IO, &d->io);
1538 
1539     memmove(d->eeprom_data, e1000_eeprom_template,
1540         sizeof e1000_eeprom_template);
1541     qemu_macaddr_default_if_unset(&d->conf.macaddr);
1542     macaddr = d->conf.macaddr.a;
1543     for (i = 0; i < 3; i++)
1544         d->eeprom_data[i] = (macaddr[2*i+1]<<8) | macaddr[2*i];
1545     d->eeprom_data[11] = d->eeprom_data[13] = pdc->device_id;
1546     for (i = 0; i < EEPROM_CHECKSUM_REG; i++)
1547         checksum += d->eeprom_data[i];
1548     checksum = (uint16_t) EEPROM_SUM - checksum;
1549     d->eeprom_data[EEPROM_CHECKSUM_REG] = checksum;
1550 
1551     d->nic = qemu_new_nic(&net_e1000_info, &d->conf,
1552                           object_get_typename(OBJECT(d)), dev->id, d);
1553 
1554     qemu_format_nic_info_str(qemu_get_queue(d->nic), macaddr);
1555 
1556     add_boot_device_path(d->conf.bootindex, dev, "/ethernet-phy@0");
1557 
1558     d->autoneg_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, e1000_autoneg_timer, d);
1559     d->mit_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, e1000_mit_timer, d);
1560 
1561     return 0;
1562 }
1563 
1564 static void qdev_e1000_reset(DeviceState *dev)
1565 {
1566     E1000State *d = E1000(dev);
1567     e1000_reset(d);
1568 }
1569 
1570 static Property e1000_properties[] = {
1571     DEFINE_NIC_PROPERTIES(E1000State, conf),
1572     DEFINE_PROP_BIT("autonegotiation", E1000State,
1573                     compat_flags, E1000_FLAG_AUTONEG_BIT, true),
1574     DEFINE_PROP_BIT("mitigation", E1000State,
1575                     compat_flags, E1000_FLAG_MIT_BIT, true),
1576     DEFINE_PROP_END_OF_LIST(),
1577 };
1578 
1579 typedef struct E1000Info {
1580     const char *name;
1581     uint16_t   device_id;
1582     uint8_t    revision;
1583     uint16_t   phy_id2;
1584     bool       is_8257xx;
1585 } E1000Info;
1586 
1587 static void e1000_class_init(ObjectClass *klass, void *data)
1588 {
1589     DeviceClass *dc = DEVICE_CLASS(klass);
1590     PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
1591     E1000BaseClass *e = E1000_DEVICE_CLASS(klass);
1592     const E1000Info *info = data;
1593 
1594     k->init = pci_e1000_init;
1595     k->exit = pci_e1000_uninit;
1596     k->romfile = "efi-e1000.rom";
1597     k->vendor_id = PCI_VENDOR_ID_INTEL;
1598     k->device_id = info->device_id;
1599     k->revision = info->revision;
1600     e->phy_id2 = info->phy_id2;
1601     e->is_8257xx = info->is_8257xx;
1602     k->class_id = PCI_CLASS_NETWORK_ETHERNET;
1603     set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
1604     dc->desc = "Intel Gigabit Ethernet";
1605     dc->reset = qdev_e1000_reset;
1606     dc->vmsd = &vmstate_e1000;
1607     dc->props = e1000_properties;
1608 }
1609 
1610 static const TypeInfo e1000_base_info = {
1611     .name          = TYPE_E1000_BASE,
1612     .parent        = TYPE_PCI_DEVICE,
1613     .instance_size = sizeof(E1000State),
1614     .class_size    = sizeof(E1000BaseClass),
1615     .abstract      = true,
1616 };
1617 
1618 static const E1000Info e1000_devices[] = {
1619     {
1620         .name      = "e1000-82540em",
1621         .device_id = E1000_DEV_ID_82540EM,
1622         .revision  = 0x03,
1623         .phy_id2   = E1000_PHY_ID2_8254xx_DEFAULT,
1624     },
1625     {
1626         .name      = "e1000-82544gc",
1627         .device_id = E1000_DEV_ID_82544GC_COPPER,
1628         .revision  = 0x03,
1629         .phy_id2   = E1000_PHY_ID2_82544x,
1630     },
1631     {
1632         .name      = "e1000-82545em",
1633         .device_id = E1000_DEV_ID_82545EM_COPPER,
1634         .revision  = 0x03,
1635         .phy_id2   = E1000_PHY_ID2_8254xx_DEFAULT,
1636     },
1637     {
1638         .name      = "e1000-82573l",
1639         .device_id = E1000_DEV_ID_82573L,
1640         .revision  = 0x03,
1641         .phy_id2   = E1000_PHY_ID2_82573x,
1642         .is_8257xx = true,
1643     },
1644 };
1645 
1646 static const TypeInfo e1000_default_info = {
1647     .name          = "e1000",
1648     .parent        = "e1000-82540em",
1649 };
1650 
1651 static void e1000_register_types(void)
1652 {
1653     int i;
1654 
1655     type_register_static(&e1000_base_info);
1656     for (i = 0; i < ARRAY_SIZE(e1000_devices); i++) {
1657         const E1000Info *info = &e1000_devices[i];
1658         TypeInfo type_info = {};
1659 
1660         type_info.name = info->name;
1661         type_info.parent = TYPE_E1000_BASE;
1662         type_info.class_data = (void *)info;
1663         type_info.class_init = e1000_class_init;
1664 
1665         type_register(&type_info);
1666     }
1667     type_register_static(&e1000_default_info);
1668 }
1669 
1670 type_init(e1000_register_types)
1671