xref: /qemu/hw/net/e1000.c (revision 9e11773417d98fd2ec961568ec2875063b95569b)
1 /*
2  * QEMU e1000 emulation
3  *
4  * Software developer's manual:
5  * http://download.intel.com/design/network/manuals/8254x_GBe_SDM.pdf
6  *
7  * Nir Peleg, Tutis Systems Ltd. for Qumranet Inc.
8  * Copyright (c) 2008 Qumranet
9  * Based on work done by:
10  * Copyright (c) 2007 Dan Aloni
11  * Copyright (c) 2004 Antony T Curtis
12  *
13  * This library is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU Lesser General Public
15  * License as published by the Free Software Foundation; either
16  * version 2 of the License, or (at your option) any later version.
17  *
18  * This library is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
21  * Lesser General Public License for more details.
22  *
23  * You should have received a copy of the GNU Lesser General Public
24  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
25  */
26 
27 
28 #include "hw/hw.h"
29 #include "hw/pci/pci.h"
30 #include "net/net.h"
31 #include "net/checksum.h"
32 #include "hw/loader.h"
33 #include "sysemu/sysemu.h"
34 #include "sysemu/dma.h"
35 #include "qemu/iov.h"
36 #include "qemu/range.h"
37 
38 #include "e1000_regs.h"
39 
40 #define E1000_DEBUG
41 
42 #ifdef E1000_DEBUG
43 enum {
44     DEBUG_GENERAL,      DEBUG_IO,       DEBUG_MMIO,     DEBUG_INTERRUPT,
45     DEBUG_RX,           DEBUG_TX,       DEBUG_MDIC,     DEBUG_EEPROM,
46     DEBUG_UNKNOWN,      DEBUG_TXSUM,    DEBUG_TXERR,    DEBUG_RXERR,
47     DEBUG_RXFILTER,     DEBUG_PHY,      DEBUG_NOTYET,
48 };
49 #define DBGBIT(x)    (1<<DEBUG_##x)
50 static int debugflags = DBGBIT(TXERR) | DBGBIT(GENERAL);
51 
52 #define DBGOUT(what, fmt, ...) do { \
53     if (debugflags & DBGBIT(what)) \
54         fprintf(stderr, "e1000: " fmt, ## __VA_ARGS__); \
55     } while (0)
56 #else
57 #define DBGOUT(what, fmt, ...) do {} while (0)
58 #endif
59 
60 #define IOPORT_SIZE       0x40
61 #define PNPMMIO_SIZE      0x20000
62 #define MIN_BUF_SIZE      60 /* Min. octets in an ethernet frame sans FCS */
63 
64 /* this is the size past which hardware will drop packets when setting LPE=0 */
65 #define MAXIMUM_ETHERNET_VLAN_SIZE 1522
66 /* this is the size past which hardware will drop packets when setting LPE=1 */
67 #define MAXIMUM_ETHERNET_LPE_SIZE 16384
68 
69 #define MAXIMUM_ETHERNET_HDR_LEN (14+4)
70 
71 /*
72  * HW models:
73  *  E1000_DEV_ID_82540EM works with Windows, Linux, and OS X <= 10.8
74  *  E1000_DEV_ID_82544GC_COPPER appears to work; not well tested
75  *  E1000_DEV_ID_82545EM_COPPER works with Linux and OS X >= 10.6
76  *  Others never tested
77  */
78 
79 typedef struct E1000State_st {
80     /*< private >*/
81     PCIDevice parent_obj;
82     /*< public >*/
83 
84     NICState *nic;
85     NICConf conf;
86     MemoryRegion mmio;
87     MemoryRegion io;
88 
89     uint32_t mac_reg[0x8000];
90     uint16_t phy_reg[0x20];
91     uint16_t eeprom_data[64];
92 
93     uint32_t rxbuf_size;
94     uint32_t rxbuf_min_shift;
95     struct e1000_tx {
96         unsigned char header[256];
97         unsigned char vlan_header[4];
98         /* Fields vlan and data must not be reordered or separated. */
99         unsigned char vlan[4];
100         unsigned char data[0x10000];
101         uint16_t size;
102         unsigned char sum_needed;
103         unsigned char vlan_needed;
104         uint8_t ipcss;
105         uint8_t ipcso;
106         uint16_t ipcse;
107         uint8_t tucss;
108         uint8_t tucso;
109         uint16_t tucse;
110         uint8_t hdr_len;
111         uint16_t mss;
112         uint32_t paylen;
113         uint16_t tso_frames;
114         char tse;
115         int8_t ip;
116         int8_t tcp;
117         char cptse;     // current packet tse bit
118     } tx;
119 
120     struct {
121         uint32_t val_in;    /* shifted in from guest driver */
122         uint16_t bitnum_in;
123         uint16_t bitnum_out;
124         uint16_t reading;
125         uint32_t old_eecd;
126     } eecd_state;
127 
128     QEMUTimer *autoneg_timer;
129 
130     QEMUTimer *mit_timer;      /* Mitigation timer. */
131     bool mit_timer_on;         /* Mitigation timer is running. */
132     bool mit_irq_level;        /* Tracks interrupt pin level. */
133     uint32_t mit_ide;          /* Tracks E1000_TXD_CMD_IDE bit. */
134 
135 /* Compatibility flags for migration to/from qemu 1.3.0 and older */
136 #define E1000_FLAG_AUTONEG_BIT 0
137 #define E1000_FLAG_MIT_BIT 1
138 #define E1000_FLAG_MAC_BIT 2
139 #define E1000_FLAG_AUTONEG (1 << E1000_FLAG_AUTONEG_BIT)
140 #define E1000_FLAG_MIT (1 << E1000_FLAG_MIT_BIT)
141 #define E1000_FLAG_MAC (1 << E1000_FLAG_MAC_BIT)
142     uint32_t compat_flags;
143 } E1000State;
144 
145 typedef struct E1000BaseClass {
146     PCIDeviceClass parent_class;
147     uint16_t phy_id2;
148 } E1000BaseClass;
149 
150 #define TYPE_E1000_BASE "e1000-base"
151 
152 #define E1000(obj) \
153     OBJECT_CHECK(E1000State, (obj), TYPE_E1000_BASE)
154 
155 #define E1000_DEVICE_CLASS(klass) \
156      OBJECT_CLASS_CHECK(E1000BaseClass, (klass), TYPE_E1000_BASE)
157 #define E1000_DEVICE_GET_CLASS(obj) \
158     OBJECT_GET_CLASS(E1000BaseClass, (obj), TYPE_E1000_BASE)
159 
160 #define defreg(x)    x = (E1000_##x>>2)
161 enum {
162     defreg(CTRL),    defreg(EECD),    defreg(EERD),    defreg(GPRC),
163     defreg(GPTC),    defreg(ICR),     defreg(ICS),     defreg(IMC),
164     defreg(IMS),     defreg(LEDCTL),  defreg(MANC),    defreg(MDIC),
165     defreg(MPC),     defreg(PBA),     defreg(RCTL),    defreg(RDBAH),
166     defreg(RDBAL),   defreg(RDH),     defreg(RDLEN),   defreg(RDT),
167     defreg(STATUS),  defreg(SWSM),    defreg(TCTL),    defreg(TDBAH),
168     defreg(TDBAL),   defreg(TDH),     defreg(TDLEN),   defreg(TDT),
169     defreg(TORH),    defreg(TORL),    defreg(TOTH),    defreg(TOTL),
170     defreg(TPR),     defreg(TPT),     defreg(TXDCTL),  defreg(WUFC),
171     defreg(RA),      defreg(MTA),     defreg(CRCERRS), defreg(VFTA),
172     defreg(VET),     defreg(RDTR),    defreg(RADV),    defreg(TADV),
173     defreg(ITR),
174 };
175 
176 static void
177 e1000_link_down(E1000State *s)
178 {
179     s->mac_reg[STATUS] &= ~E1000_STATUS_LU;
180     s->phy_reg[PHY_STATUS] &= ~MII_SR_LINK_STATUS;
181     s->phy_reg[PHY_STATUS] &= ~MII_SR_AUTONEG_COMPLETE;
182     s->phy_reg[PHY_LP_ABILITY] &= ~MII_LPAR_LPACK;
183 }
184 
185 static void
186 e1000_link_up(E1000State *s)
187 {
188     s->mac_reg[STATUS] |= E1000_STATUS_LU;
189     s->phy_reg[PHY_STATUS] |= MII_SR_LINK_STATUS;
190 
191     /* E1000_STATUS_LU is tested by e1000_can_receive() */
192     qemu_flush_queued_packets(qemu_get_queue(s->nic));
193 }
194 
195 static bool
196 have_autoneg(E1000State *s)
197 {
198     return (s->compat_flags & E1000_FLAG_AUTONEG) &&
199            (s->phy_reg[PHY_CTRL] & MII_CR_AUTO_NEG_EN);
200 }
201 
202 static void
203 set_phy_ctrl(E1000State *s, int index, uint16_t val)
204 {
205     /* bits 0-5 reserved; MII_CR_[RESTART_AUTO_NEG,RESET] are self clearing */
206     s->phy_reg[PHY_CTRL] = val & ~(0x3f |
207                                    MII_CR_RESET |
208                                    MII_CR_RESTART_AUTO_NEG);
209 
210     /*
211      * QEMU 1.3 does not support link auto-negotiation emulation, so if we
212      * migrate during auto negotiation, after migration the link will be
213      * down.
214      */
215     if (have_autoneg(s) && (val & MII_CR_RESTART_AUTO_NEG)) {
216         e1000_link_down(s);
217         DBGOUT(PHY, "Start link auto negotiation\n");
218         timer_mod(s->autoneg_timer,
219                   qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 500);
220     }
221 }
222 
223 static void (*phyreg_writeops[])(E1000State *, int, uint16_t) = {
224     [PHY_CTRL] = set_phy_ctrl,
225 };
226 
227 enum { NPHYWRITEOPS = ARRAY_SIZE(phyreg_writeops) };
228 
229 enum { PHY_R = 1, PHY_W = 2, PHY_RW = PHY_R | PHY_W };
230 static const char phy_regcap[0x20] = {
231     [PHY_STATUS]      = PHY_R,     [M88E1000_EXT_PHY_SPEC_CTRL] = PHY_RW,
232     [PHY_ID1]         = PHY_R,     [M88E1000_PHY_SPEC_CTRL]     = PHY_RW,
233     [PHY_CTRL]        = PHY_RW,    [PHY_1000T_CTRL]             = PHY_RW,
234     [PHY_LP_ABILITY]  = PHY_R,     [PHY_1000T_STATUS]           = PHY_R,
235     [PHY_AUTONEG_ADV] = PHY_RW,    [M88E1000_RX_ERR_CNTR]       = PHY_R,
236     [PHY_ID2]         = PHY_R,     [M88E1000_PHY_SPEC_STATUS]   = PHY_R,
237     [PHY_AUTONEG_EXP] = PHY_R,
238 };
239 
240 /* PHY_ID2 documented in 8254x_GBe_SDM.pdf, pp. 250 */
241 static const uint16_t phy_reg_init[] = {
242     [PHY_CTRL]   = MII_CR_SPEED_SELECT_MSB |
243                    MII_CR_FULL_DUPLEX |
244                    MII_CR_AUTO_NEG_EN,
245 
246     [PHY_STATUS] = MII_SR_EXTENDED_CAPS |
247                    MII_SR_LINK_STATUS |   /* link initially up */
248                    MII_SR_AUTONEG_CAPS |
249                    /* MII_SR_AUTONEG_COMPLETE: initially NOT completed */
250                    MII_SR_PREAMBLE_SUPPRESS |
251                    MII_SR_EXTENDED_STATUS |
252                    MII_SR_10T_HD_CAPS |
253                    MII_SR_10T_FD_CAPS |
254                    MII_SR_100X_HD_CAPS |
255                    MII_SR_100X_FD_CAPS,
256 
257     [PHY_ID1] = 0x141,
258     /* [PHY_ID2] configured per DevId, from e1000_reset() */
259     [PHY_AUTONEG_ADV] = 0xde1,
260     [PHY_LP_ABILITY] = 0x1e0,
261     [PHY_1000T_CTRL] = 0x0e00,
262     [PHY_1000T_STATUS] = 0x3c00,
263     [M88E1000_PHY_SPEC_CTRL] = 0x360,
264     [M88E1000_PHY_SPEC_STATUS] = 0xac00,
265     [M88E1000_EXT_PHY_SPEC_CTRL] = 0x0d60,
266 };
267 
268 static const uint32_t mac_reg_init[] = {
269     [PBA]     = 0x00100030,
270     [LEDCTL]  = 0x602,
271     [CTRL]    = E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN0 |
272                 E1000_CTRL_SPD_1000 | E1000_CTRL_SLU,
273     [STATUS]  = 0x80000000 | E1000_STATUS_GIO_MASTER_ENABLE |
274                 E1000_STATUS_ASDV | E1000_STATUS_MTXCKOK |
275                 E1000_STATUS_SPEED_1000 | E1000_STATUS_FD |
276                 E1000_STATUS_LU,
277     [MANC]    = E1000_MANC_EN_MNG2HOST | E1000_MANC_RCV_TCO_EN |
278                 E1000_MANC_ARP_EN | E1000_MANC_0298_EN |
279                 E1000_MANC_RMCP_EN,
280 };
281 
282 /* Helper function, *curr == 0 means the value is not set */
283 static inline void
284 mit_update_delay(uint32_t *curr, uint32_t value)
285 {
286     if (value && (*curr == 0 || value < *curr)) {
287         *curr = value;
288     }
289 }
290 
291 static void
292 set_interrupt_cause(E1000State *s, int index, uint32_t val)
293 {
294     PCIDevice *d = PCI_DEVICE(s);
295     uint32_t pending_ints;
296     uint32_t mit_delay;
297 
298     s->mac_reg[ICR] = val;
299 
300     /*
301      * Make sure ICR and ICS registers have the same value.
302      * The spec says that the ICS register is write-only.  However in practice,
303      * on real hardware ICS is readable, and for reads it has the same value as
304      * ICR (except that ICS does not have the clear on read behaviour of ICR).
305      *
306      * The VxWorks PRO/1000 driver uses this behaviour.
307      */
308     s->mac_reg[ICS] = val;
309 
310     pending_ints = (s->mac_reg[IMS] & s->mac_reg[ICR]);
311     if (!s->mit_irq_level && pending_ints) {
312         /*
313          * Here we detect a potential raising edge. We postpone raising the
314          * interrupt line if we are inside the mitigation delay window
315          * (s->mit_timer_on == 1).
316          * We provide a partial implementation of interrupt mitigation,
317          * emulating only RADV, TADV and ITR (lower 16 bits, 1024ns units for
318          * RADV and TADV, 256ns units for ITR). RDTR is only used to enable
319          * RADV; relative timers based on TIDV and RDTR are not implemented.
320          */
321         if (s->mit_timer_on) {
322             return;
323         }
324         if (s->compat_flags & E1000_FLAG_MIT) {
325             /* Compute the next mitigation delay according to pending
326              * interrupts and the current values of RADV (provided
327              * RDTR!=0), TADV and ITR.
328              * Then rearm the timer.
329              */
330             mit_delay = 0;
331             if (s->mit_ide &&
332                     (pending_ints & (E1000_ICR_TXQE | E1000_ICR_TXDW))) {
333                 mit_update_delay(&mit_delay, s->mac_reg[TADV] * 4);
334             }
335             if (s->mac_reg[RDTR] && (pending_ints & E1000_ICS_RXT0)) {
336                 mit_update_delay(&mit_delay, s->mac_reg[RADV] * 4);
337             }
338             mit_update_delay(&mit_delay, s->mac_reg[ITR]);
339 
340             if (mit_delay) {
341                 s->mit_timer_on = 1;
342                 timer_mod(s->mit_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
343                           mit_delay * 256);
344             }
345             s->mit_ide = 0;
346         }
347     }
348 
349     s->mit_irq_level = (pending_ints != 0);
350     pci_set_irq(d, s->mit_irq_level);
351 }
352 
353 static void
354 e1000_mit_timer(void *opaque)
355 {
356     E1000State *s = opaque;
357 
358     s->mit_timer_on = 0;
359     /* Call set_interrupt_cause to update the irq level (if necessary). */
360     set_interrupt_cause(s, 0, s->mac_reg[ICR]);
361 }
362 
363 static void
364 set_ics(E1000State *s, int index, uint32_t val)
365 {
366     DBGOUT(INTERRUPT, "set_ics %x, ICR %x, IMR %x\n", val, s->mac_reg[ICR],
367         s->mac_reg[IMS]);
368     set_interrupt_cause(s, 0, val | s->mac_reg[ICR]);
369 }
370 
371 static void
372 e1000_autoneg_timer(void *opaque)
373 {
374     E1000State *s = opaque;
375     if (!qemu_get_queue(s->nic)->link_down) {
376         e1000_link_up(s);
377         s->phy_reg[PHY_LP_ABILITY] |= MII_LPAR_LPACK;
378         s->phy_reg[PHY_STATUS] |= MII_SR_AUTONEG_COMPLETE;
379         DBGOUT(PHY, "Auto negotiation is completed\n");
380         set_ics(s, 0, E1000_ICS_LSC); /* signal link status change to guest */
381     }
382 }
383 
384 static int
385 rxbufsize(uint32_t v)
386 {
387     v &= E1000_RCTL_BSEX | E1000_RCTL_SZ_16384 | E1000_RCTL_SZ_8192 |
388          E1000_RCTL_SZ_4096 | E1000_RCTL_SZ_2048 | E1000_RCTL_SZ_1024 |
389          E1000_RCTL_SZ_512 | E1000_RCTL_SZ_256;
390     switch (v) {
391     case E1000_RCTL_BSEX | E1000_RCTL_SZ_16384:
392         return 16384;
393     case E1000_RCTL_BSEX | E1000_RCTL_SZ_8192:
394         return 8192;
395     case E1000_RCTL_BSEX | E1000_RCTL_SZ_4096:
396         return 4096;
397     case E1000_RCTL_SZ_1024:
398         return 1024;
399     case E1000_RCTL_SZ_512:
400         return 512;
401     case E1000_RCTL_SZ_256:
402         return 256;
403     }
404     return 2048;
405 }
406 
407 static void e1000_reset(void *opaque)
408 {
409     E1000State *d = opaque;
410     E1000BaseClass *edc = E1000_DEVICE_GET_CLASS(d);
411     uint8_t *macaddr = d->conf.macaddr.a;
412     int i;
413 
414     timer_del(d->autoneg_timer);
415     timer_del(d->mit_timer);
416     d->mit_timer_on = 0;
417     d->mit_irq_level = 0;
418     d->mit_ide = 0;
419     memset(d->phy_reg, 0, sizeof d->phy_reg);
420     memmove(d->phy_reg, phy_reg_init, sizeof phy_reg_init);
421     d->phy_reg[PHY_ID2] = edc->phy_id2;
422     memset(d->mac_reg, 0, sizeof d->mac_reg);
423     memmove(d->mac_reg, mac_reg_init, sizeof mac_reg_init);
424     d->rxbuf_min_shift = 1;
425     memset(&d->tx, 0, sizeof d->tx);
426 
427     if (qemu_get_queue(d->nic)->link_down) {
428         e1000_link_down(d);
429     }
430 
431     /* Some guests expect pre-initialized RAH/RAL (AddrValid flag + MACaddr) */
432     d->mac_reg[RA] = 0;
433     d->mac_reg[RA + 1] = E1000_RAH_AV;
434     for (i = 0; i < 4; i++) {
435         d->mac_reg[RA] |= macaddr[i] << (8 * i);
436         d->mac_reg[RA + 1] |= (i < 2) ? macaddr[i + 4] << (8 * i) : 0;
437     }
438     qemu_format_nic_info_str(qemu_get_queue(d->nic), macaddr);
439 }
440 
441 static void
442 set_ctrl(E1000State *s, int index, uint32_t val)
443 {
444     /* RST is self clearing */
445     s->mac_reg[CTRL] = val & ~E1000_CTRL_RST;
446 }
447 
448 static void
449 set_rx_control(E1000State *s, int index, uint32_t val)
450 {
451     s->mac_reg[RCTL] = val;
452     s->rxbuf_size = rxbufsize(val);
453     s->rxbuf_min_shift = ((val / E1000_RCTL_RDMTS_QUAT) & 3) + 1;
454     DBGOUT(RX, "RCTL: %d, mac_reg[RCTL] = 0x%x\n", s->mac_reg[RDT],
455            s->mac_reg[RCTL]);
456     qemu_flush_queued_packets(qemu_get_queue(s->nic));
457 }
458 
459 static void
460 set_mdic(E1000State *s, int index, uint32_t val)
461 {
462     uint32_t data = val & E1000_MDIC_DATA_MASK;
463     uint32_t addr = ((val & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT);
464 
465     if ((val & E1000_MDIC_PHY_MASK) >> E1000_MDIC_PHY_SHIFT != 1) // phy #
466         val = s->mac_reg[MDIC] | E1000_MDIC_ERROR;
467     else if (val & E1000_MDIC_OP_READ) {
468         DBGOUT(MDIC, "MDIC read reg 0x%x\n", addr);
469         if (!(phy_regcap[addr] & PHY_R)) {
470             DBGOUT(MDIC, "MDIC read reg %x unhandled\n", addr);
471             val |= E1000_MDIC_ERROR;
472         } else
473             val = (val ^ data) | s->phy_reg[addr];
474     } else if (val & E1000_MDIC_OP_WRITE) {
475         DBGOUT(MDIC, "MDIC write reg 0x%x, value 0x%x\n", addr, data);
476         if (!(phy_regcap[addr] & PHY_W)) {
477             DBGOUT(MDIC, "MDIC write reg %x unhandled\n", addr);
478             val |= E1000_MDIC_ERROR;
479         } else {
480             if (addr < NPHYWRITEOPS && phyreg_writeops[addr]) {
481                 phyreg_writeops[addr](s, index, data);
482             } else {
483                 s->phy_reg[addr] = data;
484             }
485         }
486     }
487     s->mac_reg[MDIC] = val | E1000_MDIC_READY;
488 
489     if (val & E1000_MDIC_INT_EN) {
490         set_ics(s, 0, E1000_ICR_MDAC);
491     }
492 }
493 
494 static uint32_t
495 get_eecd(E1000State *s, int index)
496 {
497     uint32_t ret = E1000_EECD_PRES|E1000_EECD_GNT | s->eecd_state.old_eecd;
498 
499     DBGOUT(EEPROM, "reading eeprom bit %d (reading %d)\n",
500            s->eecd_state.bitnum_out, s->eecd_state.reading);
501     if (!s->eecd_state.reading ||
502         ((s->eeprom_data[(s->eecd_state.bitnum_out >> 4) & 0x3f] >>
503           ((s->eecd_state.bitnum_out & 0xf) ^ 0xf))) & 1)
504         ret |= E1000_EECD_DO;
505     return ret;
506 }
507 
508 static void
509 set_eecd(E1000State *s, int index, uint32_t val)
510 {
511     uint32_t oldval = s->eecd_state.old_eecd;
512 
513     s->eecd_state.old_eecd = val & (E1000_EECD_SK | E1000_EECD_CS |
514             E1000_EECD_DI|E1000_EECD_FWE_MASK|E1000_EECD_REQ);
515     if (!(E1000_EECD_CS & val)) {            /* CS inactive; nothing to do */
516         return;
517     }
518     if (E1000_EECD_CS & (val ^ oldval)) {    /* CS rise edge; reset state */
519         s->eecd_state.val_in = 0;
520         s->eecd_state.bitnum_in = 0;
521         s->eecd_state.bitnum_out = 0;
522         s->eecd_state.reading = 0;
523     }
524     if (!(E1000_EECD_SK & (val ^ oldval))) {    /* no clock edge */
525         return;
526     }
527     if (!(E1000_EECD_SK & val)) {               /* falling edge */
528         s->eecd_state.bitnum_out++;
529         return;
530     }
531     s->eecd_state.val_in <<= 1;
532     if (val & E1000_EECD_DI)
533         s->eecd_state.val_in |= 1;
534     if (++s->eecd_state.bitnum_in == 9 && !s->eecd_state.reading) {
535         s->eecd_state.bitnum_out = ((s->eecd_state.val_in & 0x3f)<<4)-1;
536         s->eecd_state.reading = (((s->eecd_state.val_in >> 6) & 7) ==
537             EEPROM_READ_OPCODE_MICROWIRE);
538     }
539     DBGOUT(EEPROM, "eeprom bitnum in %d out %d, reading %d\n",
540            s->eecd_state.bitnum_in, s->eecd_state.bitnum_out,
541            s->eecd_state.reading);
542 }
543 
544 static uint32_t
545 flash_eerd_read(E1000State *s, int x)
546 {
547     unsigned int index, r = s->mac_reg[EERD] & ~E1000_EEPROM_RW_REG_START;
548 
549     if ((s->mac_reg[EERD] & E1000_EEPROM_RW_REG_START) == 0)
550         return (s->mac_reg[EERD]);
551 
552     if ((index = r >> E1000_EEPROM_RW_ADDR_SHIFT) > EEPROM_CHECKSUM_REG)
553         return (E1000_EEPROM_RW_REG_DONE | r);
554 
555     return ((s->eeprom_data[index] << E1000_EEPROM_RW_REG_DATA) |
556            E1000_EEPROM_RW_REG_DONE | r);
557 }
558 
559 static void
560 putsum(uint8_t *data, uint32_t n, uint32_t sloc, uint32_t css, uint32_t cse)
561 {
562     uint32_t sum;
563 
564     if (cse && cse < n)
565         n = cse + 1;
566     if (sloc < n-1) {
567         sum = net_checksum_add(n-css, data+css);
568         stw_be_p(data + sloc, net_checksum_finish(sum));
569     }
570 }
571 
572 static inline int
573 vlan_enabled(E1000State *s)
574 {
575     return ((s->mac_reg[CTRL] & E1000_CTRL_VME) != 0);
576 }
577 
578 static inline int
579 vlan_rx_filter_enabled(E1000State *s)
580 {
581     return ((s->mac_reg[RCTL] & E1000_RCTL_VFE) != 0);
582 }
583 
584 static inline int
585 is_vlan_packet(E1000State *s, const uint8_t *buf)
586 {
587     return (be16_to_cpup((uint16_t *)(buf + 12)) ==
588                 le16_to_cpu(s->mac_reg[VET]));
589 }
590 
591 static inline int
592 is_vlan_txd(uint32_t txd_lower)
593 {
594     return ((txd_lower & E1000_TXD_CMD_VLE) != 0);
595 }
596 
597 /* FCS aka Ethernet CRC-32. We don't get it from backends and can't
598  * fill it in, just pad descriptor length by 4 bytes unless guest
599  * told us to strip it off the packet. */
600 static inline int
601 fcs_len(E1000State *s)
602 {
603     return (s->mac_reg[RCTL] & E1000_RCTL_SECRC) ? 0 : 4;
604 }
605 
606 static void
607 e1000_send_packet(E1000State *s, const uint8_t *buf, int size)
608 {
609     NetClientState *nc = qemu_get_queue(s->nic);
610     if (s->phy_reg[PHY_CTRL] & MII_CR_LOOPBACK) {
611         nc->info->receive(nc, buf, size);
612     } else {
613         qemu_send_packet(nc, buf, size);
614     }
615 }
616 
617 static void
618 xmit_seg(E1000State *s)
619 {
620     uint16_t len, *sp;
621     unsigned int frames = s->tx.tso_frames, css, sofar, n;
622     struct e1000_tx *tp = &s->tx;
623 
624     if (tp->tse && tp->cptse) {
625         css = tp->ipcss;
626         DBGOUT(TXSUM, "frames %d size %d ipcss %d\n",
627                frames, tp->size, css);
628         if (tp->ip) {    /* IPv4 */
629             stw_be_p(tp->data+css+2, tp->size - css);
630             stw_be_p(tp->data+css+4,
631                      be16_to_cpup((uint16_t *)(tp->data+css+4))+frames);
632         } else {         /* IPv6 */
633             stw_be_p(tp->data+css+4, tp->size - css);
634         }
635         css = tp->tucss;
636         len = tp->size - css;
637         DBGOUT(TXSUM, "tcp %d tucss %d len %d\n", tp->tcp, css, len);
638         if (tp->tcp) {
639             sofar = frames * tp->mss;
640             stl_be_p(tp->data+css+4, ldl_be_p(tp->data+css+4)+sofar); /* seq */
641             if (tp->paylen - sofar > tp->mss)
642                 tp->data[css + 13] &= ~9;    /* PSH, FIN */
643         } else    /* UDP */
644             stw_be_p(tp->data+css+4, len);
645         if (tp->sum_needed & E1000_TXD_POPTS_TXSM) {
646             unsigned int phsum;
647             // add pseudo-header length before checksum calculation
648             sp = (uint16_t *)(tp->data + tp->tucso);
649             phsum = be16_to_cpup(sp) + len;
650             phsum = (phsum >> 16) + (phsum & 0xffff);
651             stw_be_p(sp, phsum);
652         }
653         tp->tso_frames++;
654     }
655 
656     if (tp->sum_needed & E1000_TXD_POPTS_TXSM)
657         putsum(tp->data, tp->size, tp->tucso, tp->tucss, tp->tucse);
658     if (tp->sum_needed & E1000_TXD_POPTS_IXSM)
659         putsum(tp->data, tp->size, tp->ipcso, tp->ipcss, tp->ipcse);
660     if (tp->vlan_needed) {
661         memmove(tp->vlan, tp->data, 4);
662         memmove(tp->data, tp->data + 4, 8);
663         memcpy(tp->data + 8, tp->vlan_header, 4);
664         e1000_send_packet(s, tp->vlan, tp->size + 4);
665     } else {
666         e1000_send_packet(s, tp->data, tp->size);
667     }
668 
669     s->mac_reg[TPT]++;
670     s->mac_reg[GPTC]++;
671     n = s->mac_reg[TOTL];
672     if ((s->mac_reg[TOTL] += s->tx.size) < n)
673         s->mac_reg[TOTH]++;
674 }
675 
676 static void
677 process_tx_desc(E1000State *s, struct e1000_tx_desc *dp)
678 {
679     PCIDevice *d = PCI_DEVICE(s);
680     uint32_t txd_lower = le32_to_cpu(dp->lower.data);
681     uint32_t dtype = txd_lower & (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D);
682     unsigned int split_size = txd_lower & 0xffff, bytes, sz, op;
683     unsigned int msh = 0xfffff;
684     uint64_t addr;
685     struct e1000_context_desc *xp = (struct e1000_context_desc *)dp;
686     struct e1000_tx *tp = &s->tx;
687 
688     s->mit_ide |= (txd_lower & E1000_TXD_CMD_IDE);
689     if (dtype == E1000_TXD_CMD_DEXT) {    /* context descriptor */
690         op = le32_to_cpu(xp->cmd_and_length);
691         tp->ipcss = xp->lower_setup.ip_fields.ipcss;
692         tp->ipcso = xp->lower_setup.ip_fields.ipcso;
693         tp->ipcse = le16_to_cpu(xp->lower_setup.ip_fields.ipcse);
694         tp->tucss = xp->upper_setup.tcp_fields.tucss;
695         tp->tucso = xp->upper_setup.tcp_fields.tucso;
696         tp->tucse = le16_to_cpu(xp->upper_setup.tcp_fields.tucse);
697         tp->paylen = op & 0xfffff;
698         tp->hdr_len = xp->tcp_seg_setup.fields.hdr_len;
699         tp->mss = le16_to_cpu(xp->tcp_seg_setup.fields.mss);
700         tp->ip = (op & E1000_TXD_CMD_IP) ? 1 : 0;
701         tp->tcp = (op & E1000_TXD_CMD_TCP) ? 1 : 0;
702         tp->tse = (op & E1000_TXD_CMD_TSE) ? 1 : 0;
703         tp->tso_frames = 0;
704         if (tp->tucso == 0) {    /* this is probably wrong */
705             DBGOUT(TXSUM, "TCP/UDP: cso 0!\n");
706             tp->tucso = tp->tucss + (tp->tcp ? 16 : 6);
707         }
708         return;
709     } else if (dtype == (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D)) {
710         // data descriptor
711         if (tp->size == 0) {
712             tp->sum_needed = le32_to_cpu(dp->upper.data) >> 8;
713         }
714         tp->cptse = ( txd_lower & E1000_TXD_CMD_TSE ) ? 1 : 0;
715     } else {
716         // legacy descriptor
717         tp->cptse = 0;
718     }
719 
720     if (vlan_enabled(s) && is_vlan_txd(txd_lower) &&
721         (tp->cptse || txd_lower & E1000_TXD_CMD_EOP)) {
722         tp->vlan_needed = 1;
723         stw_be_p(tp->vlan_header,
724                       le16_to_cpu(s->mac_reg[VET]));
725         stw_be_p(tp->vlan_header + 2,
726                       le16_to_cpu(dp->upper.fields.special));
727     }
728 
729     addr = le64_to_cpu(dp->buffer_addr);
730     if (tp->tse && tp->cptse) {
731         msh = tp->hdr_len + tp->mss;
732         do {
733             bytes = split_size;
734             if (tp->size + bytes > msh)
735                 bytes = msh - tp->size;
736 
737             bytes = MIN(sizeof(tp->data) - tp->size, bytes);
738             pci_dma_read(d, addr, tp->data + tp->size, bytes);
739             sz = tp->size + bytes;
740             if (sz >= tp->hdr_len && tp->size < tp->hdr_len) {
741                 memmove(tp->header, tp->data, tp->hdr_len);
742             }
743             tp->size = sz;
744             addr += bytes;
745             if (sz == msh) {
746                 xmit_seg(s);
747                 memmove(tp->data, tp->header, tp->hdr_len);
748                 tp->size = tp->hdr_len;
749             }
750             split_size -= bytes;
751         } while (bytes && split_size);
752     } else if (!tp->tse && tp->cptse) {
753         // context descriptor TSE is not set, while data descriptor TSE is set
754         DBGOUT(TXERR, "TCP segmentation error\n");
755     } else {
756         split_size = MIN(sizeof(tp->data) - tp->size, split_size);
757         pci_dma_read(d, addr, tp->data + tp->size, split_size);
758         tp->size += split_size;
759     }
760 
761     if (!(txd_lower & E1000_TXD_CMD_EOP))
762         return;
763     if (!(tp->tse && tp->cptse && tp->size < tp->hdr_len)) {
764         xmit_seg(s);
765     }
766     tp->tso_frames = 0;
767     tp->sum_needed = 0;
768     tp->vlan_needed = 0;
769     tp->size = 0;
770     tp->cptse = 0;
771 }
772 
773 static uint32_t
774 txdesc_writeback(E1000State *s, dma_addr_t base, struct e1000_tx_desc *dp)
775 {
776     PCIDevice *d = PCI_DEVICE(s);
777     uint32_t txd_upper, txd_lower = le32_to_cpu(dp->lower.data);
778 
779     if (!(txd_lower & (E1000_TXD_CMD_RS|E1000_TXD_CMD_RPS)))
780         return 0;
781     txd_upper = (le32_to_cpu(dp->upper.data) | E1000_TXD_STAT_DD) &
782                 ~(E1000_TXD_STAT_EC | E1000_TXD_STAT_LC | E1000_TXD_STAT_TU);
783     dp->upper.data = cpu_to_le32(txd_upper);
784     pci_dma_write(d, base + ((char *)&dp->upper - (char *)dp),
785                   &dp->upper, sizeof(dp->upper));
786     return E1000_ICR_TXDW;
787 }
788 
789 static uint64_t tx_desc_base(E1000State *s)
790 {
791     uint64_t bah = s->mac_reg[TDBAH];
792     uint64_t bal = s->mac_reg[TDBAL] & ~0xf;
793 
794     return (bah << 32) + bal;
795 }
796 
797 static void
798 start_xmit(E1000State *s)
799 {
800     PCIDevice *d = PCI_DEVICE(s);
801     dma_addr_t base;
802     struct e1000_tx_desc desc;
803     uint32_t tdh_start = s->mac_reg[TDH], cause = E1000_ICS_TXQE;
804 
805     if (!(s->mac_reg[TCTL] & E1000_TCTL_EN)) {
806         DBGOUT(TX, "tx disabled\n");
807         return;
808     }
809 
810     while (s->mac_reg[TDH] != s->mac_reg[TDT]) {
811         base = tx_desc_base(s) +
812                sizeof(struct e1000_tx_desc) * s->mac_reg[TDH];
813         pci_dma_read(d, base, &desc, sizeof(desc));
814 
815         DBGOUT(TX, "index %d: %p : %x %x\n", s->mac_reg[TDH],
816                (void *)(intptr_t)desc.buffer_addr, desc.lower.data,
817                desc.upper.data);
818 
819         process_tx_desc(s, &desc);
820         cause |= txdesc_writeback(s, base, &desc);
821 
822         if (++s->mac_reg[TDH] * sizeof(desc) >= s->mac_reg[TDLEN])
823             s->mac_reg[TDH] = 0;
824         /*
825          * the following could happen only if guest sw assigns
826          * bogus values to TDT/TDLEN.
827          * there's nothing too intelligent we could do about this.
828          */
829         if (s->mac_reg[TDH] == tdh_start) {
830             DBGOUT(TXERR, "TDH wraparound @%x, TDT %x, TDLEN %x\n",
831                    tdh_start, s->mac_reg[TDT], s->mac_reg[TDLEN]);
832             break;
833         }
834     }
835     set_ics(s, 0, cause);
836 }
837 
838 static int
839 receive_filter(E1000State *s, const uint8_t *buf, int size)
840 {
841     static const uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
842     static const int mta_shift[] = {4, 3, 2, 0};
843     uint32_t f, rctl = s->mac_reg[RCTL], ra[2], *rp;
844 
845     if (is_vlan_packet(s, buf) && vlan_rx_filter_enabled(s)) {
846         uint16_t vid = be16_to_cpup((uint16_t *)(buf + 14));
847         uint32_t vfta = le32_to_cpup((uint32_t *)(s->mac_reg + VFTA) +
848                                      ((vid >> 5) & 0x7f));
849         if ((vfta & (1 << (vid & 0x1f))) == 0)
850             return 0;
851     }
852 
853     if (rctl & E1000_RCTL_UPE)			// promiscuous
854         return 1;
855 
856     if ((buf[0] & 1) && (rctl & E1000_RCTL_MPE))	// promiscuous mcast
857         return 1;
858 
859     if ((rctl & E1000_RCTL_BAM) && !memcmp(buf, bcast, sizeof bcast))
860         return 1;
861 
862     for (rp = s->mac_reg + RA; rp < s->mac_reg + RA + 32; rp += 2) {
863         if (!(rp[1] & E1000_RAH_AV))
864             continue;
865         ra[0] = cpu_to_le32(rp[0]);
866         ra[1] = cpu_to_le32(rp[1]);
867         if (!memcmp(buf, (uint8_t *)ra, 6)) {
868             DBGOUT(RXFILTER,
869                    "unicast match[%d]: %02x:%02x:%02x:%02x:%02x:%02x\n",
870                    (int)(rp - s->mac_reg - RA)/2,
871                    buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
872             return 1;
873         }
874     }
875     DBGOUT(RXFILTER, "unicast mismatch: %02x:%02x:%02x:%02x:%02x:%02x\n",
876            buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
877 
878     f = mta_shift[(rctl >> E1000_RCTL_MO_SHIFT) & 3];
879     f = (((buf[5] << 8) | buf[4]) >> f) & 0xfff;
880     if (s->mac_reg[MTA + (f >> 5)] & (1 << (f & 0x1f)))
881         return 1;
882     DBGOUT(RXFILTER,
883            "dropping, inexact filter mismatch: %02x:%02x:%02x:%02x:%02x:%02x MO %d MTA[%d] %x\n",
884            buf[0], buf[1], buf[2], buf[3], buf[4], buf[5],
885            (rctl >> E1000_RCTL_MO_SHIFT) & 3, f >> 5,
886            s->mac_reg[MTA + (f >> 5)]);
887 
888     return 0;
889 }
890 
891 static void
892 e1000_set_link_status(NetClientState *nc)
893 {
894     E1000State *s = qemu_get_nic_opaque(nc);
895     uint32_t old_status = s->mac_reg[STATUS];
896 
897     if (nc->link_down) {
898         e1000_link_down(s);
899     } else {
900         if (have_autoneg(s) &&
901             !(s->phy_reg[PHY_STATUS] & MII_SR_AUTONEG_COMPLETE)) {
902             /* emulate auto-negotiation if supported */
903             timer_mod(s->autoneg_timer,
904                       qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 500);
905         } else {
906             e1000_link_up(s);
907         }
908     }
909 
910     if (s->mac_reg[STATUS] != old_status)
911         set_ics(s, 0, E1000_ICR_LSC);
912 }
913 
914 static bool e1000_has_rxbufs(E1000State *s, size_t total_size)
915 {
916     int bufs;
917     /* Fast-path short packets */
918     if (total_size <= s->rxbuf_size) {
919         return s->mac_reg[RDH] != s->mac_reg[RDT];
920     }
921     if (s->mac_reg[RDH] < s->mac_reg[RDT]) {
922         bufs = s->mac_reg[RDT] - s->mac_reg[RDH];
923     } else if (s->mac_reg[RDH] > s->mac_reg[RDT]) {
924         bufs = s->mac_reg[RDLEN] /  sizeof(struct e1000_rx_desc) +
925             s->mac_reg[RDT] - s->mac_reg[RDH];
926     } else {
927         return false;
928     }
929     return total_size <= bufs * s->rxbuf_size;
930 }
931 
932 static int
933 e1000_can_receive(NetClientState *nc)
934 {
935     E1000State *s = qemu_get_nic_opaque(nc);
936 
937     return (s->mac_reg[STATUS] & E1000_STATUS_LU) &&
938         (s->mac_reg[RCTL] & E1000_RCTL_EN) &&
939         (s->parent_obj.config[PCI_COMMAND] & PCI_COMMAND_MASTER) &&
940         e1000_has_rxbufs(s, 1);
941 }
942 
943 static uint64_t rx_desc_base(E1000State *s)
944 {
945     uint64_t bah = s->mac_reg[RDBAH];
946     uint64_t bal = s->mac_reg[RDBAL] & ~0xf;
947 
948     return (bah << 32) + bal;
949 }
950 
951 static ssize_t
952 e1000_receive_iov(NetClientState *nc, const struct iovec *iov, int iovcnt)
953 {
954     E1000State *s = qemu_get_nic_opaque(nc);
955     PCIDevice *d = PCI_DEVICE(s);
956     struct e1000_rx_desc desc;
957     dma_addr_t base;
958     unsigned int n, rdt;
959     uint32_t rdh_start;
960     uint16_t vlan_special = 0;
961     uint8_t vlan_status = 0;
962     uint8_t min_buf[MIN_BUF_SIZE];
963     struct iovec min_iov;
964     uint8_t *filter_buf = iov->iov_base;
965     size_t size = iov_size(iov, iovcnt);
966     size_t iov_ofs = 0;
967     size_t desc_offset;
968     size_t desc_size;
969     size_t total_size;
970 
971     if (!(s->mac_reg[STATUS] & E1000_STATUS_LU)) {
972         return -1;
973     }
974 
975     if (!(s->mac_reg[RCTL] & E1000_RCTL_EN)) {
976         return -1;
977     }
978 
979     /* Pad to minimum Ethernet frame length */
980     if (size < sizeof(min_buf)) {
981         iov_to_buf(iov, iovcnt, 0, min_buf, size);
982         memset(&min_buf[size], 0, sizeof(min_buf) - size);
983         min_iov.iov_base = filter_buf = min_buf;
984         min_iov.iov_len = size = sizeof(min_buf);
985         iovcnt = 1;
986         iov = &min_iov;
987     } else if (iov->iov_len < MAXIMUM_ETHERNET_HDR_LEN) {
988         /* This is very unlikely, but may happen. */
989         iov_to_buf(iov, iovcnt, 0, min_buf, MAXIMUM_ETHERNET_HDR_LEN);
990         filter_buf = min_buf;
991     }
992 
993     /* Discard oversized packets if !LPE and !SBP. */
994     if ((size > MAXIMUM_ETHERNET_LPE_SIZE ||
995         (size > MAXIMUM_ETHERNET_VLAN_SIZE
996         && !(s->mac_reg[RCTL] & E1000_RCTL_LPE)))
997         && !(s->mac_reg[RCTL] & E1000_RCTL_SBP)) {
998         return size;
999     }
1000 
1001     if (!receive_filter(s, filter_buf, size)) {
1002         return size;
1003     }
1004 
1005     if (vlan_enabled(s) && is_vlan_packet(s, filter_buf)) {
1006         vlan_special = cpu_to_le16(be16_to_cpup((uint16_t *)(filter_buf
1007                                                                 + 14)));
1008         iov_ofs = 4;
1009         if (filter_buf == iov->iov_base) {
1010             memmove(filter_buf + 4, filter_buf, 12);
1011         } else {
1012             iov_from_buf(iov, iovcnt, 4, filter_buf, 12);
1013             while (iov->iov_len <= iov_ofs) {
1014                 iov_ofs -= iov->iov_len;
1015                 iov++;
1016             }
1017         }
1018         vlan_status = E1000_RXD_STAT_VP;
1019         size -= 4;
1020     }
1021 
1022     rdh_start = s->mac_reg[RDH];
1023     desc_offset = 0;
1024     total_size = size + fcs_len(s);
1025     if (!e1000_has_rxbufs(s, total_size)) {
1026             set_ics(s, 0, E1000_ICS_RXO);
1027             return -1;
1028     }
1029     do {
1030         desc_size = total_size - desc_offset;
1031         if (desc_size > s->rxbuf_size) {
1032             desc_size = s->rxbuf_size;
1033         }
1034         base = rx_desc_base(s) + sizeof(desc) * s->mac_reg[RDH];
1035         pci_dma_read(d, base, &desc, sizeof(desc));
1036         desc.special = vlan_special;
1037         desc.status |= (vlan_status | E1000_RXD_STAT_DD);
1038         if (desc.buffer_addr) {
1039             if (desc_offset < size) {
1040                 size_t iov_copy;
1041                 hwaddr ba = le64_to_cpu(desc.buffer_addr);
1042                 size_t copy_size = size - desc_offset;
1043                 if (copy_size > s->rxbuf_size) {
1044                     copy_size = s->rxbuf_size;
1045                 }
1046                 do {
1047                     iov_copy = MIN(copy_size, iov->iov_len - iov_ofs);
1048                     pci_dma_write(d, ba, iov->iov_base + iov_ofs, iov_copy);
1049                     copy_size -= iov_copy;
1050                     ba += iov_copy;
1051                     iov_ofs += iov_copy;
1052                     if (iov_ofs == iov->iov_len) {
1053                         iov++;
1054                         iov_ofs = 0;
1055                     }
1056                 } while (copy_size);
1057             }
1058             desc_offset += desc_size;
1059             desc.length = cpu_to_le16(desc_size);
1060             if (desc_offset >= total_size) {
1061                 desc.status |= E1000_RXD_STAT_EOP | E1000_RXD_STAT_IXSM;
1062             } else {
1063                 /* Guest zeroing out status is not a hardware requirement.
1064                    Clear EOP in case guest didn't do it. */
1065                 desc.status &= ~E1000_RXD_STAT_EOP;
1066             }
1067         } else { // as per intel docs; skip descriptors with null buf addr
1068             DBGOUT(RX, "Null RX descriptor!!\n");
1069         }
1070         pci_dma_write(d, base, &desc, sizeof(desc));
1071 
1072         if (++s->mac_reg[RDH] * sizeof(desc) >= s->mac_reg[RDLEN])
1073             s->mac_reg[RDH] = 0;
1074         /* see comment in start_xmit; same here */
1075         if (s->mac_reg[RDH] == rdh_start) {
1076             DBGOUT(RXERR, "RDH wraparound @%x, RDT %x, RDLEN %x\n",
1077                    rdh_start, s->mac_reg[RDT], s->mac_reg[RDLEN]);
1078             set_ics(s, 0, E1000_ICS_RXO);
1079             return -1;
1080         }
1081     } while (desc_offset < total_size);
1082 
1083     s->mac_reg[GPRC]++;
1084     s->mac_reg[TPR]++;
1085     /* TOR - Total Octets Received:
1086      * This register includes bytes received in a packet from the <Destination
1087      * Address> field through the <CRC> field, inclusively.
1088      */
1089     n = s->mac_reg[TORL] + size + /* Always include FCS length. */ 4;
1090     if (n < s->mac_reg[TORL])
1091         s->mac_reg[TORH]++;
1092     s->mac_reg[TORL] = n;
1093 
1094     n = E1000_ICS_RXT0;
1095     if ((rdt = s->mac_reg[RDT]) < s->mac_reg[RDH])
1096         rdt += s->mac_reg[RDLEN] / sizeof(desc);
1097     if (((rdt - s->mac_reg[RDH]) * sizeof(desc)) <= s->mac_reg[RDLEN] >>
1098         s->rxbuf_min_shift)
1099         n |= E1000_ICS_RXDMT0;
1100 
1101     set_ics(s, 0, n);
1102 
1103     return size;
1104 }
1105 
1106 static ssize_t
1107 e1000_receive(NetClientState *nc, const uint8_t *buf, size_t size)
1108 {
1109     const struct iovec iov = {
1110         .iov_base = (uint8_t *)buf,
1111         .iov_len = size
1112     };
1113 
1114     return e1000_receive_iov(nc, &iov, 1);
1115 }
1116 
1117 static uint32_t
1118 mac_readreg(E1000State *s, int index)
1119 {
1120     return s->mac_reg[index];
1121 }
1122 
1123 static uint32_t
1124 mac_icr_read(E1000State *s, int index)
1125 {
1126     uint32_t ret = s->mac_reg[ICR];
1127 
1128     DBGOUT(INTERRUPT, "ICR read: %x\n", ret);
1129     set_interrupt_cause(s, 0, 0);
1130     return ret;
1131 }
1132 
1133 static uint32_t
1134 mac_read_clr4(E1000State *s, int index)
1135 {
1136     uint32_t ret = s->mac_reg[index];
1137 
1138     s->mac_reg[index] = 0;
1139     return ret;
1140 }
1141 
1142 static uint32_t
1143 mac_read_clr8(E1000State *s, int index)
1144 {
1145     uint32_t ret = s->mac_reg[index];
1146 
1147     s->mac_reg[index] = 0;
1148     s->mac_reg[index-1] = 0;
1149     return ret;
1150 }
1151 
1152 static void
1153 mac_writereg(E1000State *s, int index, uint32_t val)
1154 {
1155     uint32_t macaddr[2];
1156 
1157     s->mac_reg[index] = val;
1158 
1159     if (index == RA + 1) {
1160         macaddr[0] = cpu_to_le32(s->mac_reg[RA]);
1161         macaddr[1] = cpu_to_le32(s->mac_reg[RA + 1]);
1162         qemu_format_nic_info_str(qemu_get_queue(s->nic), (uint8_t *)macaddr);
1163     }
1164 }
1165 
1166 static void
1167 set_rdt(E1000State *s, int index, uint32_t val)
1168 {
1169     s->mac_reg[index] = val & 0xffff;
1170     if (e1000_has_rxbufs(s, 1)) {
1171         qemu_flush_queued_packets(qemu_get_queue(s->nic));
1172     }
1173 }
1174 
1175 static void
1176 set_16bit(E1000State *s, int index, uint32_t val)
1177 {
1178     s->mac_reg[index] = val & 0xffff;
1179 }
1180 
1181 static void
1182 set_dlen(E1000State *s, int index, uint32_t val)
1183 {
1184     s->mac_reg[index] = val & 0xfff80;
1185 }
1186 
1187 static void
1188 set_tctl(E1000State *s, int index, uint32_t val)
1189 {
1190     s->mac_reg[index] = val;
1191     s->mac_reg[TDT] &= 0xffff;
1192     start_xmit(s);
1193 }
1194 
1195 static void
1196 set_icr(E1000State *s, int index, uint32_t val)
1197 {
1198     DBGOUT(INTERRUPT, "set_icr %x\n", val);
1199     set_interrupt_cause(s, 0, s->mac_reg[ICR] & ~val);
1200 }
1201 
1202 static void
1203 set_imc(E1000State *s, int index, uint32_t val)
1204 {
1205     s->mac_reg[IMS] &= ~val;
1206     set_ics(s, 0, 0);
1207 }
1208 
1209 static void
1210 set_ims(E1000State *s, int index, uint32_t val)
1211 {
1212     s->mac_reg[IMS] |= val;
1213     set_ics(s, 0, 0);
1214 }
1215 
1216 #define getreg(x)    [x] = mac_readreg
1217 static uint32_t (*macreg_readops[])(E1000State *, int) = {
1218     getreg(PBA),      getreg(RCTL),     getreg(TDH),      getreg(TXDCTL),
1219     getreg(WUFC),     getreg(TDT),      getreg(CTRL),     getreg(LEDCTL),
1220     getreg(MANC),     getreg(MDIC),     getreg(SWSM),     getreg(STATUS),
1221     getreg(TORL),     getreg(TOTL),     getreg(IMS),      getreg(TCTL),
1222     getreg(RDH),      getreg(RDT),      getreg(VET),      getreg(ICS),
1223     getreg(TDBAL),    getreg(TDBAH),    getreg(RDBAH),    getreg(RDBAL),
1224     getreg(TDLEN),    getreg(RDLEN),    getreg(RDTR),     getreg(RADV),
1225     getreg(TADV),     getreg(ITR),
1226 
1227     [TOTH]    = mac_read_clr8,      [TORH]    = mac_read_clr8,
1228     [GPRC]    = mac_read_clr4,      [GPTC]    = mac_read_clr4,
1229     [TPT]     = mac_read_clr4,      [TPR]     = mac_read_clr4,
1230     [ICR]     = mac_icr_read,       [EECD]    = get_eecd,
1231     [EERD]    = flash_eerd_read,
1232 
1233     [CRCERRS ... MPC]   = &mac_readreg,
1234     [RA ... RA+31]      = &mac_readreg,
1235     [MTA ... MTA+127]   = &mac_readreg,
1236     [VFTA ... VFTA+127] = &mac_readreg,
1237 };
1238 enum { NREADOPS = ARRAY_SIZE(macreg_readops) };
1239 
1240 #define putreg(x)    [x] = mac_writereg
1241 static void (*macreg_writeops[])(E1000State *, int, uint32_t) = {
1242     putreg(PBA),      putreg(EERD),     putreg(SWSM),     putreg(WUFC),
1243     putreg(TDBAL),    putreg(TDBAH),    putreg(TXDCTL),   putreg(RDBAH),
1244     putreg(RDBAL),    putreg(LEDCTL),   putreg(VET),
1245 
1246     [TDLEN]  = set_dlen,   [RDLEN]  = set_dlen,       [TCTL] = set_tctl,
1247     [TDT]    = set_tctl,   [MDIC]   = set_mdic,       [ICS]  = set_ics,
1248     [TDH]    = set_16bit,  [RDH]    = set_16bit,      [RDT]  = set_rdt,
1249     [IMC]    = set_imc,    [IMS]    = set_ims,        [ICR]  = set_icr,
1250     [EECD]   = set_eecd,   [RCTL]   = set_rx_control, [CTRL] = set_ctrl,
1251     [RDTR]   = set_16bit,  [RADV]   = set_16bit,      [TADV] = set_16bit,
1252     [ITR]    = set_16bit,
1253 
1254     [RA ... RA+31]      = &mac_writereg,
1255     [MTA ... MTA+127]   = &mac_writereg,
1256     [VFTA ... VFTA+127] = &mac_writereg,
1257 };
1258 
1259 enum { NWRITEOPS = ARRAY_SIZE(macreg_writeops) };
1260 
1261 static void
1262 e1000_mmio_write(void *opaque, hwaddr addr, uint64_t val,
1263                  unsigned size)
1264 {
1265     E1000State *s = opaque;
1266     unsigned int index = (addr & 0x1ffff) >> 2;
1267 
1268     if (index < NWRITEOPS && macreg_writeops[index]) {
1269         macreg_writeops[index](s, index, val);
1270     } else if (index < NREADOPS && macreg_readops[index]) {
1271         DBGOUT(MMIO, "e1000_mmio_writel RO %x: 0x%04"PRIx64"\n", index<<2, val);
1272     } else {
1273         DBGOUT(UNKNOWN, "MMIO unknown write addr=0x%08x,val=0x%08"PRIx64"\n",
1274                index<<2, val);
1275     }
1276 }
1277 
1278 static uint64_t
1279 e1000_mmio_read(void *opaque, hwaddr addr, unsigned size)
1280 {
1281     E1000State *s = opaque;
1282     unsigned int index = (addr & 0x1ffff) >> 2;
1283 
1284     if (index < NREADOPS && macreg_readops[index])
1285     {
1286         return macreg_readops[index](s, index);
1287     }
1288     DBGOUT(UNKNOWN, "MMIO unknown read addr=0x%08x\n", index<<2);
1289     return 0;
1290 }
1291 
1292 static const MemoryRegionOps e1000_mmio_ops = {
1293     .read = e1000_mmio_read,
1294     .write = e1000_mmio_write,
1295     .endianness = DEVICE_LITTLE_ENDIAN,
1296     .impl = {
1297         .min_access_size = 4,
1298         .max_access_size = 4,
1299     },
1300 };
1301 
1302 static uint64_t e1000_io_read(void *opaque, hwaddr addr,
1303                               unsigned size)
1304 {
1305     E1000State *s = opaque;
1306 
1307     (void)s;
1308     return 0;
1309 }
1310 
1311 static void e1000_io_write(void *opaque, hwaddr addr,
1312                            uint64_t val, unsigned size)
1313 {
1314     E1000State *s = opaque;
1315 
1316     (void)s;
1317 }
1318 
1319 static const MemoryRegionOps e1000_io_ops = {
1320     .read = e1000_io_read,
1321     .write = e1000_io_write,
1322     .endianness = DEVICE_LITTLE_ENDIAN,
1323 };
1324 
1325 static bool is_version_1(void *opaque, int version_id)
1326 {
1327     return version_id == 1;
1328 }
1329 
1330 static void e1000_pre_save(void *opaque)
1331 {
1332     E1000State *s = opaque;
1333     NetClientState *nc = qemu_get_queue(s->nic);
1334 
1335     /* If the mitigation timer is active, emulate a timeout now. */
1336     if (s->mit_timer_on) {
1337         e1000_mit_timer(s);
1338     }
1339 
1340     /*
1341      * If link is down and auto-negotiation is supported and ongoing,
1342      * complete auto-negotiation immediately. This allows us to look
1343      * at MII_SR_AUTONEG_COMPLETE to infer link status on load.
1344      */
1345     if (nc->link_down && have_autoneg(s)) {
1346         s->phy_reg[PHY_STATUS] |= MII_SR_AUTONEG_COMPLETE;
1347     }
1348 }
1349 
1350 static int e1000_post_load(void *opaque, int version_id)
1351 {
1352     E1000State *s = opaque;
1353     NetClientState *nc = qemu_get_queue(s->nic);
1354 
1355     if (!(s->compat_flags & E1000_FLAG_MIT)) {
1356         s->mac_reg[ITR] = s->mac_reg[RDTR] = s->mac_reg[RADV] =
1357             s->mac_reg[TADV] = 0;
1358         s->mit_irq_level = false;
1359     }
1360     s->mit_ide = 0;
1361     s->mit_timer_on = false;
1362 
1363     /* nc.link_down can't be migrated, so infer link_down according
1364      * to link status bit in mac_reg[STATUS].
1365      * Alternatively, restart link negotiation if it was in progress. */
1366     nc->link_down = (s->mac_reg[STATUS] & E1000_STATUS_LU) == 0;
1367 
1368     if (have_autoneg(s) &&
1369         !(s->phy_reg[PHY_STATUS] & MII_SR_AUTONEG_COMPLETE)) {
1370         nc->link_down = false;
1371         timer_mod(s->autoneg_timer,
1372                   qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 500);
1373     }
1374 
1375     return 0;
1376 }
1377 
1378 static bool e1000_mit_state_needed(void *opaque)
1379 {
1380     E1000State *s = opaque;
1381 
1382     return s->compat_flags & E1000_FLAG_MIT;
1383 }
1384 
1385 static bool e1000_full_mac_needed(void *opaque)
1386 {
1387     E1000State *s = opaque;
1388 
1389     return s->compat_flags & E1000_FLAG_MAC;
1390 }
1391 
1392 static const VMStateDescription vmstate_e1000_mit_state = {
1393     .name = "e1000/mit_state",
1394     .version_id = 1,
1395     .minimum_version_id = 1,
1396     .needed = e1000_mit_state_needed,
1397     .fields = (VMStateField[]) {
1398         VMSTATE_UINT32(mac_reg[RDTR], E1000State),
1399         VMSTATE_UINT32(mac_reg[RADV], E1000State),
1400         VMSTATE_UINT32(mac_reg[TADV], E1000State),
1401         VMSTATE_UINT32(mac_reg[ITR], E1000State),
1402         VMSTATE_BOOL(mit_irq_level, E1000State),
1403         VMSTATE_END_OF_LIST()
1404     }
1405 };
1406 
1407 static const VMStateDescription vmstate_e1000_full_mac_state = {
1408     .name = "e1000/full_mac_state",
1409     .version_id = 1,
1410     .minimum_version_id = 1,
1411     .needed = e1000_full_mac_needed,
1412     .fields = (VMStateField[]) {
1413         VMSTATE_UINT32_ARRAY(mac_reg, E1000State, 0x8000),
1414         VMSTATE_END_OF_LIST()
1415     }
1416 };
1417 
1418 static const VMStateDescription vmstate_e1000 = {
1419     .name = "e1000",
1420     .version_id = 2,
1421     .minimum_version_id = 1,
1422     .pre_save = e1000_pre_save,
1423     .post_load = e1000_post_load,
1424     .fields = (VMStateField[]) {
1425         VMSTATE_PCI_DEVICE(parent_obj, E1000State),
1426         VMSTATE_UNUSED_TEST(is_version_1, 4), /* was instance id */
1427         VMSTATE_UNUSED(4), /* Was mmio_base.  */
1428         VMSTATE_UINT32(rxbuf_size, E1000State),
1429         VMSTATE_UINT32(rxbuf_min_shift, E1000State),
1430         VMSTATE_UINT32(eecd_state.val_in, E1000State),
1431         VMSTATE_UINT16(eecd_state.bitnum_in, E1000State),
1432         VMSTATE_UINT16(eecd_state.bitnum_out, E1000State),
1433         VMSTATE_UINT16(eecd_state.reading, E1000State),
1434         VMSTATE_UINT32(eecd_state.old_eecd, E1000State),
1435         VMSTATE_UINT8(tx.ipcss, E1000State),
1436         VMSTATE_UINT8(tx.ipcso, E1000State),
1437         VMSTATE_UINT16(tx.ipcse, E1000State),
1438         VMSTATE_UINT8(tx.tucss, E1000State),
1439         VMSTATE_UINT8(tx.tucso, E1000State),
1440         VMSTATE_UINT16(tx.tucse, E1000State),
1441         VMSTATE_UINT32(tx.paylen, E1000State),
1442         VMSTATE_UINT8(tx.hdr_len, E1000State),
1443         VMSTATE_UINT16(tx.mss, E1000State),
1444         VMSTATE_UINT16(tx.size, E1000State),
1445         VMSTATE_UINT16(tx.tso_frames, E1000State),
1446         VMSTATE_UINT8(tx.sum_needed, E1000State),
1447         VMSTATE_INT8(tx.ip, E1000State),
1448         VMSTATE_INT8(tx.tcp, E1000State),
1449         VMSTATE_BUFFER(tx.header, E1000State),
1450         VMSTATE_BUFFER(tx.data, E1000State),
1451         VMSTATE_UINT16_ARRAY(eeprom_data, E1000State, 64),
1452         VMSTATE_UINT16_ARRAY(phy_reg, E1000State, 0x20),
1453         VMSTATE_UINT32(mac_reg[CTRL], E1000State),
1454         VMSTATE_UINT32(mac_reg[EECD], E1000State),
1455         VMSTATE_UINT32(mac_reg[EERD], E1000State),
1456         VMSTATE_UINT32(mac_reg[GPRC], E1000State),
1457         VMSTATE_UINT32(mac_reg[GPTC], E1000State),
1458         VMSTATE_UINT32(mac_reg[ICR], E1000State),
1459         VMSTATE_UINT32(mac_reg[ICS], E1000State),
1460         VMSTATE_UINT32(mac_reg[IMC], E1000State),
1461         VMSTATE_UINT32(mac_reg[IMS], E1000State),
1462         VMSTATE_UINT32(mac_reg[LEDCTL], E1000State),
1463         VMSTATE_UINT32(mac_reg[MANC], E1000State),
1464         VMSTATE_UINT32(mac_reg[MDIC], E1000State),
1465         VMSTATE_UINT32(mac_reg[MPC], E1000State),
1466         VMSTATE_UINT32(mac_reg[PBA], E1000State),
1467         VMSTATE_UINT32(mac_reg[RCTL], E1000State),
1468         VMSTATE_UINT32(mac_reg[RDBAH], E1000State),
1469         VMSTATE_UINT32(mac_reg[RDBAL], E1000State),
1470         VMSTATE_UINT32(mac_reg[RDH], E1000State),
1471         VMSTATE_UINT32(mac_reg[RDLEN], E1000State),
1472         VMSTATE_UINT32(mac_reg[RDT], E1000State),
1473         VMSTATE_UINT32(mac_reg[STATUS], E1000State),
1474         VMSTATE_UINT32(mac_reg[SWSM], E1000State),
1475         VMSTATE_UINT32(mac_reg[TCTL], E1000State),
1476         VMSTATE_UINT32(mac_reg[TDBAH], E1000State),
1477         VMSTATE_UINT32(mac_reg[TDBAL], E1000State),
1478         VMSTATE_UINT32(mac_reg[TDH], E1000State),
1479         VMSTATE_UINT32(mac_reg[TDLEN], E1000State),
1480         VMSTATE_UINT32(mac_reg[TDT], E1000State),
1481         VMSTATE_UINT32(mac_reg[TORH], E1000State),
1482         VMSTATE_UINT32(mac_reg[TORL], E1000State),
1483         VMSTATE_UINT32(mac_reg[TOTH], E1000State),
1484         VMSTATE_UINT32(mac_reg[TOTL], E1000State),
1485         VMSTATE_UINT32(mac_reg[TPR], E1000State),
1486         VMSTATE_UINT32(mac_reg[TPT], E1000State),
1487         VMSTATE_UINT32(mac_reg[TXDCTL], E1000State),
1488         VMSTATE_UINT32(mac_reg[WUFC], E1000State),
1489         VMSTATE_UINT32(mac_reg[VET], E1000State),
1490         VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, RA, 32),
1491         VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, MTA, 128),
1492         VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, VFTA, 128),
1493         VMSTATE_END_OF_LIST()
1494     },
1495     .subsections = (const VMStateDescription*[]) {
1496         &vmstate_e1000_mit_state,
1497         &vmstate_e1000_full_mac_state,
1498         NULL
1499     }
1500 };
1501 
1502 /*
1503  * EEPROM contents documented in Tables 5-2 and 5-3, pp. 98-102.
1504  * Note: A valid DevId will be inserted during pci_e1000_init().
1505  */
1506 static const uint16_t e1000_eeprom_template[64] = {
1507     0x0000, 0x0000, 0x0000, 0x0000,      0xffff, 0x0000,      0x0000, 0x0000,
1508     0x3000, 0x1000, 0x6403, 0 /*DevId*/, 0x8086, 0 /*DevId*/, 0x8086, 0x3040,
1509     0x0008, 0x2000, 0x7e14, 0x0048,      0x1000, 0x00d8,      0x0000, 0x2700,
1510     0x6cc9, 0x3150, 0x0722, 0x040b,      0x0984, 0x0000,      0xc000, 0x0706,
1511     0x1008, 0x0000, 0x0f04, 0x7fff,      0x4d01, 0xffff,      0xffff, 0xffff,
1512     0xffff, 0xffff, 0xffff, 0xffff,      0xffff, 0xffff,      0xffff, 0xffff,
1513     0x0100, 0x4000, 0x121c, 0xffff,      0xffff, 0xffff,      0xffff, 0xffff,
1514     0xffff, 0xffff, 0xffff, 0xffff,      0xffff, 0xffff,      0xffff, 0x0000,
1515 };
1516 
1517 /* PCI interface */
1518 
1519 static void
1520 e1000_mmio_setup(E1000State *d)
1521 {
1522     int i;
1523     const uint32_t excluded_regs[] = {
1524         E1000_MDIC, E1000_ICR, E1000_ICS, E1000_IMS,
1525         E1000_IMC, E1000_TCTL, E1000_TDT, PNPMMIO_SIZE
1526     };
1527 
1528     memory_region_init_io(&d->mmio, OBJECT(d), &e1000_mmio_ops, d,
1529                           "e1000-mmio", PNPMMIO_SIZE);
1530     memory_region_add_coalescing(&d->mmio, 0, excluded_regs[0]);
1531     for (i = 0; excluded_regs[i] != PNPMMIO_SIZE; i++)
1532         memory_region_add_coalescing(&d->mmio, excluded_regs[i] + 4,
1533                                      excluded_regs[i+1] - excluded_regs[i] - 4);
1534     memory_region_init_io(&d->io, OBJECT(d), &e1000_io_ops, d, "e1000-io", IOPORT_SIZE);
1535 }
1536 
1537 static void
1538 pci_e1000_uninit(PCIDevice *dev)
1539 {
1540     E1000State *d = E1000(dev);
1541 
1542     timer_del(d->autoneg_timer);
1543     timer_free(d->autoneg_timer);
1544     timer_del(d->mit_timer);
1545     timer_free(d->mit_timer);
1546     qemu_del_nic(d->nic);
1547 }
1548 
1549 static NetClientInfo net_e1000_info = {
1550     .type = NET_CLIENT_OPTIONS_KIND_NIC,
1551     .size = sizeof(NICState),
1552     .can_receive = e1000_can_receive,
1553     .receive = e1000_receive,
1554     .receive_iov = e1000_receive_iov,
1555     .link_status_changed = e1000_set_link_status,
1556 };
1557 
1558 static void e1000_write_config(PCIDevice *pci_dev, uint32_t address,
1559                                 uint32_t val, int len)
1560 {
1561     E1000State *s = E1000(pci_dev);
1562 
1563     pci_default_write_config(pci_dev, address, val, len);
1564 
1565     if (range_covers_byte(address, len, PCI_COMMAND) &&
1566         (pci_dev->config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
1567         qemu_flush_queued_packets(qemu_get_queue(s->nic));
1568     }
1569 }
1570 
1571 
1572 static void pci_e1000_realize(PCIDevice *pci_dev, Error **errp)
1573 {
1574     DeviceState *dev = DEVICE(pci_dev);
1575     E1000State *d = E1000(pci_dev);
1576     PCIDeviceClass *pdc = PCI_DEVICE_GET_CLASS(pci_dev);
1577     uint8_t *pci_conf;
1578     uint16_t checksum = 0;
1579     int i;
1580     uint8_t *macaddr;
1581 
1582     pci_dev->config_write = e1000_write_config;
1583 
1584     pci_conf = pci_dev->config;
1585 
1586     /* TODO: RST# value should be 0, PCI spec 6.2.4 */
1587     pci_conf[PCI_CACHE_LINE_SIZE] = 0x10;
1588 
1589     pci_conf[PCI_INTERRUPT_PIN] = 1; /* interrupt pin A */
1590 
1591     e1000_mmio_setup(d);
1592 
1593     pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &d->mmio);
1594 
1595     pci_register_bar(pci_dev, 1, PCI_BASE_ADDRESS_SPACE_IO, &d->io);
1596 
1597     memmove(d->eeprom_data, e1000_eeprom_template,
1598         sizeof e1000_eeprom_template);
1599     qemu_macaddr_default_if_unset(&d->conf.macaddr);
1600     macaddr = d->conf.macaddr.a;
1601     for (i = 0; i < 3; i++)
1602         d->eeprom_data[i] = (macaddr[2*i+1]<<8) | macaddr[2*i];
1603     d->eeprom_data[11] = d->eeprom_data[13] = pdc->device_id;
1604     for (i = 0; i < EEPROM_CHECKSUM_REG; i++)
1605         checksum += d->eeprom_data[i];
1606     checksum = (uint16_t) EEPROM_SUM - checksum;
1607     d->eeprom_data[EEPROM_CHECKSUM_REG] = checksum;
1608 
1609     d->nic = qemu_new_nic(&net_e1000_info, &d->conf,
1610                           object_get_typename(OBJECT(d)), dev->id, d);
1611 
1612     qemu_format_nic_info_str(qemu_get_queue(d->nic), macaddr);
1613 
1614     d->autoneg_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, e1000_autoneg_timer, d);
1615     d->mit_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, e1000_mit_timer, d);
1616 }
1617 
1618 static void qdev_e1000_reset(DeviceState *dev)
1619 {
1620     E1000State *d = E1000(dev);
1621     e1000_reset(d);
1622 }
1623 
1624 static Property e1000_properties[] = {
1625     DEFINE_NIC_PROPERTIES(E1000State, conf),
1626     DEFINE_PROP_BIT("autonegotiation", E1000State,
1627                     compat_flags, E1000_FLAG_AUTONEG_BIT, true),
1628     DEFINE_PROP_BIT("mitigation", E1000State,
1629                     compat_flags, E1000_FLAG_MIT_BIT, true),
1630     DEFINE_PROP_END_OF_LIST(),
1631 };
1632 
1633 typedef struct E1000Info {
1634     const char *name;
1635     uint16_t   device_id;
1636     uint8_t    revision;
1637     uint16_t   phy_id2;
1638 } E1000Info;
1639 
1640 static void e1000_class_init(ObjectClass *klass, void *data)
1641 {
1642     DeviceClass *dc = DEVICE_CLASS(klass);
1643     PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
1644     E1000BaseClass *e = E1000_DEVICE_CLASS(klass);
1645     const E1000Info *info = data;
1646 
1647     k->realize = pci_e1000_realize;
1648     k->exit = pci_e1000_uninit;
1649     k->romfile = "efi-e1000.rom";
1650     k->vendor_id = PCI_VENDOR_ID_INTEL;
1651     k->device_id = info->device_id;
1652     k->revision = info->revision;
1653     e->phy_id2 = info->phy_id2;
1654     k->class_id = PCI_CLASS_NETWORK_ETHERNET;
1655     set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
1656     dc->desc = "Intel Gigabit Ethernet";
1657     dc->reset = qdev_e1000_reset;
1658     dc->vmsd = &vmstate_e1000;
1659     dc->props = e1000_properties;
1660 }
1661 
1662 static void e1000_instance_init(Object *obj)
1663 {
1664     E1000State *n = E1000(obj);
1665     device_add_bootindex_property(obj, &n->conf.bootindex,
1666                                   "bootindex", "/ethernet-phy@0",
1667                                   DEVICE(n), NULL);
1668 }
1669 
1670 static const TypeInfo e1000_base_info = {
1671     .name          = TYPE_E1000_BASE,
1672     .parent        = TYPE_PCI_DEVICE,
1673     .instance_size = sizeof(E1000State),
1674     .instance_init = e1000_instance_init,
1675     .class_size    = sizeof(E1000BaseClass),
1676     .abstract      = true,
1677 };
1678 
1679 static const E1000Info e1000_devices[] = {
1680     {
1681         .name      = "e1000",
1682         .device_id = E1000_DEV_ID_82540EM,
1683         .revision  = 0x03,
1684         .phy_id2   = E1000_PHY_ID2_8254xx_DEFAULT,
1685     },
1686     {
1687         .name      = "e1000-82544gc",
1688         .device_id = E1000_DEV_ID_82544GC_COPPER,
1689         .revision  = 0x03,
1690         .phy_id2   = E1000_PHY_ID2_82544x,
1691     },
1692     {
1693         .name      = "e1000-82545em",
1694         .device_id = E1000_DEV_ID_82545EM_COPPER,
1695         .revision  = 0x03,
1696         .phy_id2   = E1000_PHY_ID2_8254xx_DEFAULT,
1697     },
1698 };
1699 
1700 static void e1000_register_types(void)
1701 {
1702     int i;
1703 
1704     type_register_static(&e1000_base_info);
1705     for (i = 0; i < ARRAY_SIZE(e1000_devices); i++) {
1706         const E1000Info *info = &e1000_devices[i];
1707         TypeInfo type_info = {};
1708 
1709         type_info.name = info->name;
1710         type_info.parent = TYPE_E1000_BASE;
1711         type_info.class_data = (void *)info;
1712         type_info.class_init = e1000_class_init;
1713         type_info.instance_init = e1000_instance_init;
1714 
1715         type_register(&type_info);
1716     }
1717 }
1718 
1719 type_init(e1000_register_types)
1720