VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/DevE1000.cpp@ 44540

Last change on this file since 44540 was 44540, checked in by vboxsync, 12 years ago

DevE1000.cpp: Some more cleanups.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 312.3 KB
Line 
1/* $Id: DevE1000.cpp 44540 2013-02-05 13:02:18Z vboxsync $ */
2/** @file
3 * DevE1000 - Intel 82540EM Ethernet Controller Emulation.
4 *
5 * Implemented in accordance with the specification:
6 *
7 * PCI/PCI-X Family of Gigabit Ethernet Controllers Software Developer's Manual
8 * 82540EP/EM, 82541xx, 82544GC/EI, 82545GM/EM, 82546GB/EB, and 82547xx
9 *
10 * 317453-002 Revision 3.5
11 *
12 * @todo IPv6 checksum offloading support
13 * @todo Flexible Filter / Wakeup (optional?)
14 */
15
16/*
17 * Copyright (C) 2007-2013 Oracle Corporation
18 *
19 * This file is part of VirtualBox Open Source Edition (OSE), as
20 * available from http://www.virtualbox.org. This file is free software;
21 * you can redistribute it and/or modify it under the terms of the GNU
22 * General Public License (GPL) as published by the Free Software
23 * Foundation, in version 2 as it comes in the "COPYING" file of the
24 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
25 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
26 */
27
28/*******************************************************************************
29* Header Files *
30*******************************************************************************/
31#define LOG_GROUP LOG_GROUP_DEV_E1000
32#include <iprt/crc.h>
33#include <iprt/ctype.h>
34#include <iprt/net.h>
35#include <iprt/semaphore.h>
36#include <iprt/string.h>
37#include <iprt/time.h>
38#include <iprt/uuid.h>
39#include <VBox/vmm/pdmdev.h>
40#include <VBox/vmm/pdmnetifs.h>
41#include <VBox/vmm/pdmnetinline.h>
42#include <VBox/param.h>
43#include "VBoxDD.h"
44
45#include "DevEEPROM.h"
46#include "DevE1000Phy.h"
47
48
49/* Options *******************************************************************/
50/** @def E1K_INIT_RA0
51 * E1K_INIT_RA0 forces E1000 to set the first entry in Receive Address filter
52 * table to MAC address obtained from CFGM. Most guests read MAC address from
53 * EEPROM and write it to RA[0] explicitly, but Mac OS X seems to depend on it
54 * being already set (see @bugref{4657}).
55 */
56#define E1K_INIT_RA0
57/** @def E1K_LSC_ON_SLU
58 * E1K_LSC_ON_SLU causes E1000 to generate Link Status Change interrupt when
59 * the guest driver brings up the link via STATUS.LU bit. Again the only guest
60 * that requires it is Mac OS X (see @bugref{4657}).
61 */
62#define E1K_LSC_ON_SLU
63/** @def E1K_ITR_ENABLED
64 * E1K_ITR_ENABLED reduces the number of interrupts generated by E1000 if a
65 * guest driver requested it by writing non-zero value to the Interrupt
66 * Throttling Register (see section 13.4.18 in "8254x Family of Gigabit
67 * Ethernet Controllers Software Developer’s Manual").
68 */
69//#define E1K_ITR_ENABLED
70/** @def E1K_TX_DELAY
71 * E1K_TX_DELAY aims to improve guest-host transfer rate for TCP streams by
72 * preventing packets to be sent immediately. It allows to send several
73 * packets in a batch reducing the number of acknowledgments. Note that it
74 * effectively disables R0 TX path, forcing sending in R3.
75 */
76//#define E1K_TX_DELAY 150
77/** @def E1K_USE_TX_TIMERS
78 * E1K_USE_TX_TIMERS aims to reduce the number of generated TX interrupts if a
79 * guest driver set the delays via the Transmit Interrupt Delay Value (TIDV)
80 * register. Enabling it showed no positive effects on existing guests so it
81 * stays disabled. See sections 3.2.7.1 and 3.4.3.1 in "8254x Family of Gigabit
82 * Ethernet Controllers Software Developer’s Manual" for more detailed
83 * explanation.
84 */
85//#define E1K_USE_TX_TIMERS
86/** @def E1K_NO_TAD
87 * E1K_NO_TAD disables one of two timers enabled by E1K_USE_TX_TIMERS, the
88 * Transmit Absolute Delay time. This timer sets the maximum time interval
89 * during which TX interrupts can be postponed (delayed). It has no effect
90 * if E1K_USE_TX_TIMERS is not defined.
91 */
92//#define E1K_NO_TAD
93/** @def E1K_REL_DEBUG
94 * E1K_REL_DEBUG enables debug logging of l1, l2, l3 in release build.
95 */
96//#define E1K_REL_DEBUG
97/** @def E1K_INT_STATS
98 * E1K_INT_STATS enables collection of internal statistics used for
99 * debugging of delayed interrupts, etc.
100 */
101//#define E1K_INT_STATS
102/** @def E1K_WITH_MSI
103 * E1K_WITH_MSI enables rudimentary MSI support. Not implemented.
104 */
105//#define E1K_WITH_MSI
106/** @def E1K_WITH_TX_CS
107 * E1K_WITH_TX_CS protects e1kXmitPending with a critical section.
108 */
109#define E1K_WITH_TX_CS
110/** @def E1K_WITH_TXD_CACHE
111 * E1K_WITH_TXD_CACHE causes E1000 to fetch multiple TX descriptors in a
112 * single physical memory read (or two if it wraps around the end of TX
113 * descriptor ring). It is required for proper functioning of bandwidth
114 * resource control as it allows to compute exact sizes of packets prior
115 * to allocating their buffers (see @bugref{5582}).
116 */
117#define E1K_WITH_TXD_CACHE
118/** @def E1K_WITH_RXD_CACHE
119 * E1K_WITH_RXD_CACHE causes E1000 to fetch multiple RX descriptors in a
120 * single physical memory read (or two if it wraps around the end of RX
121 * descriptor ring). Intel's packet driver for DOS needs this option in
122 * order to work properly (see @bugref{6217}).
123 */
124#define E1K_WITH_RXD_CACHE
125/* End of Options ************************************************************/
126
127#ifdef E1K_WITH_TXD_CACHE
128/**
129 * E1K_TXD_CACHE_SIZE specifies the maximum number of TX descriptors stored
130 * in the state structure. It limits the amount of descriptors loaded in one
131 * batch read. For example, Linux guest may use up to 20 descriptors per
132 * TSE packet. The largest TSE packet seen (Windows guest) was 45 descriptors.
133 */
134# define E1K_TXD_CACHE_SIZE 64u
135#endif /* E1K_WITH_TXD_CACHE */
136
137#ifdef E1K_WITH_RXD_CACHE
138/**
139 * E1K_RXD_CACHE_SIZE specifies the maximum number of RX descriptors stored
140 * in the state structure. It limits the amount of descriptors loaded in one
141 * batch read. For example, XP guest adds 15 RX descriptors at a time.
142 */
143# define E1K_RXD_CACHE_SIZE 16u
144#endif /* E1K_WITH_RXD_CACHE */
145
146
147/* Little helpers ************************************************************/
148#undef htons
149#undef ntohs
150#undef htonl
151#undef ntohl
152#define htons(x) ((((x) & 0xff00) >> 8) | (((x) & 0x00ff) << 8))
153#define ntohs(x) htons(x)
154#define htonl(x) ASMByteSwapU32(x)
155#define ntohl(x) htonl(x)
156
157#ifndef DEBUG
158# ifdef E1K_REL_DEBUG
159# define DEBUG
160# define E1kLog(a) LogRel(a)
161# define E1kLog2(a) LogRel(a)
162# define E1kLog3(a) LogRel(a)
163# define E1kLogX(x, a) LogRel(a)
164//# define E1kLog3(a) do {} while (0)
165# else
166# define E1kLog(a) do {} while (0)
167# define E1kLog2(a) do {} while (0)
168# define E1kLog3(a) do {} while (0)
169# define E1kLogX(x, a) do {} while (0)
170# endif
171#else
172# define E1kLog(a) Log(a)
173# define E1kLog2(a) Log2(a)
174# define E1kLog3(a) Log3(a)
175# define E1kLogX(x, a) LogIt(LOG_INSTANCE, x, LOG_GROUP, a)
176//# define E1kLog(a) do {} while (0)
177//# define E1kLog2(a) do {} while (0)
178//# define E1kLog3(a) do {} while (0)
179#endif
180
181#if 0
182# define E1kLogRel(a) LogRel(a)
183#else
184# define E1kLogRel(a) do { } while (0)
185#endif
186
187//#undef DEBUG
188
189#define INSTANCE(pState) pState->szInstance
190#define STATE_TO_DEVINS(pState) (((E1KSTATE *)pState)->CTX_SUFF(pDevIns))
191#define E1K_RELOCATE(p, o) *(RTHCUINTPTR *)&p += o
192
193#define E1K_INC_CNT32(cnt) \
194do { \
195 if (cnt < UINT32_MAX) \
196 cnt++; \
197} while (0)
198
199#define E1K_ADD_CNT64(cntLo, cntHi, val) \
200do { \
201 uint64_t u64Cnt = RT_MAKE_U64(cntLo, cntHi); \
202 uint64_t tmp = u64Cnt; \
203 u64Cnt += val; \
204 if (tmp > u64Cnt ) \
205 u64Cnt = UINT64_MAX; \
206 cntLo = (uint32_t)u64Cnt; \
207 cntHi = (uint32_t)(u64Cnt >> 32); \
208} while (0)
209
210#ifdef E1K_INT_STATS
211# define E1K_INC_ISTAT_CNT(cnt) do { ++cnt; } while (0)
212#else /* E1K_INT_STATS */
213# define E1K_INC_ISTAT_CNT(cnt) do { } while (0)
214#endif /* E1K_INT_STATS */
215
216
217/*****************************************************************************/
218
219typedef uint32_t E1KCHIP;
220#define E1K_CHIP_82540EM 0
221#define E1K_CHIP_82543GC 1
222#define E1K_CHIP_82545EM 2
223
224/** Different E1000 chips. */
225static const struct E1kChips
226{
227 uint16_t uPCIVendorId;
228 uint16_t uPCIDeviceId;
229 uint16_t uPCISubsystemVendorId;
230 uint16_t uPCISubsystemId;
231 const char *pcszName;
232} g_Chips[] =
233{
234 /* Vendor Device SSVendor SubSys Name */
235 { 0x8086,
236 /* Temporary code, as MSI-aware driver dislike 0x100E. How to do that right? */
237#ifdef E1K_WITH_MSI
238 0x105E,
239#else
240 0x100E,
241#endif
242 0x8086, 0x001E, "82540EM" }, /* Intel 82540EM-A in Intel PRO/1000 MT Desktop */
243 { 0x8086, 0x1004, 0x8086, 0x1004, "82543GC" }, /* Intel 82543GC in Intel PRO/1000 T Server */
244 { 0x8086, 0x100F, 0x15AD, 0x0750, "82545EM" } /* Intel 82545EM-A in VMWare Network Adapter */
245};
246
247
248/* The size of register area mapped to I/O space */
249#define E1K_IOPORT_SIZE 0x8
250/* The size of memory-mapped register area */
251#define E1K_MM_SIZE 0x20000
252
253#define E1K_MAX_TX_PKT_SIZE 16288
254#define E1K_MAX_RX_PKT_SIZE 16384
255
256/*****************************************************************************/
257
258/** Gets the specfieid bits from the register. */
259#define GET_BITS(reg, bits) ((reg & reg##_##bits##_MASK) >> reg##_##bits##_SHIFT)
260#define GET_BITS_V(val, reg, bits) ((val & reg##_##bits##_MASK) >> reg##_##bits##_SHIFT)
261#define BITS(reg, bits, bitval) (bitval << reg##_##bits##_SHIFT)
262#define SET_BITS(reg, bits, bitval) do { reg = (reg & ~reg##_##bits##_MASK) | (bitval << reg##_##bits##_SHIFT); } while (0)
263#define SET_BITS_V(val, reg, bits, bitval) do { val = (val & ~reg##_##bits##_MASK) | (bitval << reg##_##bits##_SHIFT); } while (0)
264
265#define CTRL_SLU UINT32_C(0x00000040)
266#define CTRL_MDIO UINT32_C(0x00100000)
267#define CTRL_MDC UINT32_C(0x00200000)
268#define CTRL_MDIO_DIR UINT32_C(0x01000000)
269#define CTRL_MDC_DIR UINT32_C(0x02000000)
270#define CTRL_RESET UINT32_C(0x04000000)
271#define CTRL_VME UINT32_C(0x40000000)
272
273#define STATUS_LU UINT32_C(0x00000002)
274#define STATUS_TXOFF UINT32_C(0x00000010)
275
276#define EECD_EE_WIRES UINT32_C(0x0F)
277#define EECD_EE_REQ UINT32_C(0x40)
278#define EECD_EE_GNT UINT32_C(0x80)
279
280#define EERD_START UINT32_C(0x00000001)
281#define EERD_DONE UINT32_C(0x00000010)
282#define EERD_DATA_MASK UINT32_C(0xFFFF0000)
283#define EERD_DATA_SHIFT 16
284#define EERD_ADDR_MASK UINT32_C(0x0000FF00)
285#define EERD_ADDR_SHIFT 8
286
287#define MDIC_DATA_MASK UINT32_C(0x0000FFFF)
288#define MDIC_DATA_SHIFT 0
289#define MDIC_REG_MASK UINT32_C(0x001F0000)
290#define MDIC_REG_SHIFT 16
291#define MDIC_PHY_MASK UINT32_C(0x03E00000)
292#define MDIC_PHY_SHIFT 21
293#define MDIC_OP_WRITE UINT32_C(0x04000000)
294#define MDIC_OP_READ UINT32_C(0x08000000)
295#define MDIC_READY UINT32_C(0x10000000)
296#define MDIC_INT_EN UINT32_C(0x20000000)
297#define MDIC_ERROR UINT32_C(0x40000000)
298
299#define TCTL_EN UINT32_C(0x00000002)
300#define TCTL_PSP UINT32_C(0x00000008)
301
302#define RCTL_EN UINT32_C(0x00000002)
303#define RCTL_UPE UINT32_C(0x00000008)
304#define RCTL_MPE UINT32_C(0x00000010)
305#define RCTL_LPE UINT32_C(0x00000020)
306#define RCTL_LBM_MASK UINT32_C(0x000000C0)
307#define RCTL_LBM_SHIFT 6
308#define RCTL_RDMTS_MASK UINT32_C(0x00000300)
309#define RCTL_RDMTS_SHIFT 8
310#define RCTL_LBM_TCVR UINT32_C(3) /**< PHY or external SerDes loopback. */
311#define RCTL_MO_MASK UINT32_C(0x00003000)
312#define RCTL_MO_SHIFT 12
313#define RCTL_BAM UINT32_C(0x00008000)
314#define RCTL_BSIZE_MASK UINT32_C(0x00030000)
315#define RCTL_BSIZE_SHIFT 16
316#define RCTL_VFE UINT32_C(0x00040000)
317#define RCTL_CFIEN UINT32_C(0x00080000)
318#define RCTL_CFI UINT32_C(0x00100000)
319#define RCTL_BSEX UINT32_C(0x02000000)
320#define RCTL_SECRC UINT32_C(0x04000000)
321
322#define ICR_TXDW UINT32_C(0x00000001)
323#define ICR_TXQE UINT32_C(0x00000002)
324#define ICR_LSC UINT32_C(0x00000004)
325#define ICR_RXDMT0 UINT32_C(0x00000010)
326#define ICR_RXT0 UINT32_C(0x00000080)
327#define ICR_TXD_LOW UINT32_C(0x00008000)
328#define RDTR_FPD UINT32_C(0x80000000)
329
330#define PBA_st ((PBAST*)(pState->auRegs + PBA_IDX))
331typedef struct
332{
333 unsigned rxa : 7;
334 unsigned rxa_r : 9;
335 unsigned txa : 16;
336} PBAST;
337AssertCompileSize(PBAST, 4);
338
339#define TXDCTL_WTHRESH_MASK 0x003F0000
340#define TXDCTL_WTHRESH_SHIFT 16
341#define TXDCTL_LWTHRESH_MASK 0xFE000000
342#define TXDCTL_LWTHRESH_SHIFT 25
343
344#define RXCSUM_PCSS_MASK UINT32_C(0x000000FF)
345#define RXCSUM_PCSS_SHIFT 0
346
347/** @name Register access macros
348 * @{ */
349#define CTRL pState->auRegs[CTRL_IDX]
350#define STATUS pState->auRegs[STATUS_IDX]
351#define EECD pState->auRegs[EECD_IDX]
352#define EERD pState->auRegs[EERD_IDX]
353#define CTRL_EXT pState->auRegs[CTRL_EXT_IDX]
354#define FLA pState->auRegs[FLA_IDX]
355#define MDIC pState->auRegs[MDIC_IDX]
356#define FCAL pState->auRegs[FCAL_IDX]
357#define FCAH pState->auRegs[FCAH_IDX]
358#define FCT pState->auRegs[FCT_IDX]
359#define VET pState->auRegs[VET_IDX]
360#define ICR pState->auRegs[ICR_IDX]
361#define ITR pState->auRegs[ITR_IDX]
362#define ICS pState->auRegs[ICS_IDX]
363#define IMS pState->auRegs[IMS_IDX]
364#define IMC pState->auRegs[IMC_IDX]
365#define RCTL pState->auRegs[RCTL_IDX]
366#define FCTTV pState->auRegs[FCTTV_IDX]
367#define TXCW pState->auRegs[TXCW_IDX]
368#define RXCW pState->auRegs[RXCW_IDX]
369#define TCTL pState->auRegs[TCTL_IDX]
370#define TIPG pState->auRegs[TIPG_IDX]
371#define AIFS pState->auRegs[AIFS_IDX]
372#define LEDCTL pState->auRegs[LEDCTL_IDX]
373#define PBA pState->auRegs[PBA_IDX]
374#define FCRTL pState->auRegs[FCRTL_IDX]
375#define FCRTH pState->auRegs[FCRTH_IDX]
376#define RDFH pState->auRegs[RDFH_IDX]
377#define RDFT pState->auRegs[RDFT_IDX]
378#define RDFHS pState->auRegs[RDFHS_IDX]
379#define RDFTS pState->auRegs[RDFTS_IDX]
380#define RDFPC pState->auRegs[RDFPC_IDX]
381#define RDBAL pState->auRegs[RDBAL_IDX]
382#define RDBAH pState->auRegs[RDBAH_IDX]
383#define RDLEN pState->auRegs[RDLEN_IDX]
384#define RDH pState->auRegs[RDH_IDX]
385#define RDT pState->auRegs[RDT_IDX]
386#define RDTR pState->auRegs[RDTR_IDX]
387#define RXDCTL pState->auRegs[RXDCTL_IDX]
388#define RADV pState->auRegs[RADV_IDX]
389#define RSRPD pState->auRegs[RSRPD_IDX]
390#define TXDMAC pState->auRegs[TXDMAC_IDX]
391#define TDFH pState->auRegs[TDFH_IDX]
392#define TDFT pState->auRegs[TDFT_IDX]
393#define TDFHS pState->auRegs[TDFHS_IDX]
394#define TDFTS pState->auRegs[TDFTS_IDX]
395#define TDFPC pState->auRegs[TDFPC_IDX]
396#define TDBAL pState->auRegs[TDBAL_IDX]
397#define TDBAH pState->auRegs[TDBAH_IDX]
398#define TDLEN pState->auRegs[TDLEN_IDX]
399#define TDH pState->auRegs[TDH_IDX]
400#define TDT pState->auRegs[TDT_IDX]
401#define TIDV pState->auRegs[TIDV_IDX]
402#define TXDCTL pState->auRegs[TXDCTL_IDX]
403#define TADV pState->auRegs[TADV_IDX]
404#define TSPMT pState->auRegs[TSPMT_IDX]
405#define CRCERRS pState->auRegs[CRCERRS_IDX]
406#define ALGNERRC pState->auRegs[ALGNERRC_IDX]
407#define SYMERRS pState->auRegs[SYMERRS_IDX]
408#define RXERRC pState->auRegs[RXERRC_IDX]
409#define MPC pState->auRegs[MPC_IDX]
410#define SCC pState->auRegs[SCC_IDX]
411#define ECOL pState->auRegs[ECOL_IDX]
412#define MCC pState->auRegs[MCC_IDX]
413#define LATECOL pState->auRegs[LATECOL_IDX]
414#define COLC pState->auRegs[COLC_IDX]
415#define DC pState->auRegs[DC_IDX]
416#define TNCRS pState->auRegs[TNCRS_IDX]
417#define SEC pState->auRegs[SEC_IDX]
418#define CEXTERR pState->auRegs[CEXTERR_IDX]
419#define RLEC pState->auRegs[RLEC_IDX]
420#define XONRXC pState->auRegs[XONRXC_IDX]
421#define XONTXC pState->auRegs[XONTXC_IDX]
422#define XOFFRXC pState->auRegs[XOFFRXC_IDX]
423#define XOFFTXC pState->auRegs[XOFFTXC_IDX]
424#define FCRUC pState->auRegs[FCRUC_IDX]
425#define PRC64 pState->auRegs[PRC64_IDX]
426#define PRC127 pState->auRegs[PRC127_IDX]
427#define PRC255 pState->auRegs[PRC255_IDX]
428#define PRC511 pState->auRegs[PRC511_IDX]
429#define PRC1023 pState->auRegs[PRC1023_IDX]
430#define PRC1522 pState->auRegs[PRC1522_IDX]
431#define GPRC pState->auRegs[GPRC_IDX]
432#define BPRC pState->auRegs[BPRC_IDX]
433#define MPRC pState->auRegs[MPRC_IDX]
434#define GPTC pState->auRegs[GPTC_IDX]
435#define GORCL pState->auRegs[GORCL_IDX]
436#define GORCH pState->auRegs[GORCH_IDX]
437#define GOTCL pState->auRegs[GOTCL_IDX]
438#define GOTCH pState->auRegs[GOTCH_IDX]
439#define RNBC pState->auRegs[RNBC_IDX]
440#define RUC pState->auRegs[RUC_IDX]
441#define RFC pState->auRegs[RFC_IDX]
442#define ROC pState->auRegs[ROC_IDX]
443#define RJC pState->auRegs[RJC_IDX]
444#define MGTPRC pState->auRegs[MGTPRC_IDX]
445#define MGTPDC pState->auRegs[MGTPDC_IDX]
446#define MGTPTC pState->auRegs[MGTPTC_IDX]
447#define TORL pState->auRegs[TORL_IDX]
448#define TORH pState->auRegs[TORH_IDX]
449#define TOTL pState->auRegs[TOTL_IDX]
450#define TOTH pState->auRegs[TOTH_IDX]
451#define TPR pState->auRegs[TPR_IDX]
452#define TPT pState->auRegs[TPT_IDX]
453#define PTC64 pState->auRegs[PTC64_IDX]
454#define PTC127 pState->auRegs[PTC127_IDX]
455#define PTC255 pState->auRegs[PTC255_IDX]
456#define PTC511 pState->auRegs[PTC511_IDX]
457#define PTC1023 pState->auRegs[PTC1023_IDX]
458#define PTC1522 pState->auRegs[PTC1522_IDX]
459#define MPTC pState->auRegs[MPTC_IDX]
460#define BPTC pState->auRegs[BPTC_IDX]
461#define TSCTC pState->auRegs[TSCTC_IDX]
462#define TSCTFC pState->auRegs[TSCTFC_IDX]
463#define RXCSUM pState->auRegs[RXCSUM_IDX]
464#define WUC pState->auRegs[WUC_IDX]
465#define WUFC pState->auRegs[WUFC_IDX]
466#define WUS pState->auRegs[WUS_IDX]
467#define MANC pState->auRegs[MANC_IDX]
468#define IPAV pState->auRegs[IPAV_IDX]
469#define WUPL pState->auRegs[WUPL_IDX]
470/** @} */
471
472/**
473 * Indices of memory-mapped registers in register table.
474 */
475typedef enum
476{
477 CTRL_IDX,
478 STATUS_IDX,
479 EECD_IDX,
480 EERD_IDX,
481 CTRL_EXT_IDX,
482 FLA_IDX,
483 MDIC_IDX,
484 FCAL_IDX,
485 FCAH_IDX,
486 FCT_IDX,
487 VET_IDX,
488 ICR_IDX,
489 ITR_IDX,
490 ICS_IDX,
491 IMS_IDX,
492 IMC_IDX,
493 RCTL_IDX,
494 FCTTV_IDX,
495 TXCW_IDX,
496 RXCW_IDX,
497 TCTL_IDX,
498 TIPG_IDX,
499 AIFS_IDX,
500 LEDCTL_IDX,
501 PBA_IDX,
502 FCRTL_IDX,
503 FCRTH_IDX,
504 RDFH_IDX,
505 RDFT_IDX,
506 RDFHS_IDX,
507 RDFTS_IDX,
508 RDFPC_IDX,
509 RDBAL_IDX,
510 RDBAH_IDX,
511 RDLEN_IDX,
512 RDH_IDX,
513 RDT_IDX,
514 RDTR_IDX,
515 RXDCTL_IDX,
516 RADV_IDX,
517 RSRPD_IDX,
518 TXDMAC_IDX,
519 TDFH_IDX,
520 TDFT_IDX,
521 TDFHS_IDX,
522 TDFTS_IDX,
523 TDFPC_IDX,
524 TDBAL_IDX,
525 TDBAH_IDX,
526 TDLEN_IDX,
527 TDH_IDX,
528 TDT_IDX,
529 TIDV_IDX,
530 TXDCTL_IDX,
531 TADV_IDX,
532 TSPMT_IDX,
533 CRCERRS_IDX,
534 ALGNERRC_IDX,
535 SYMERRS_IDX,
536 RXERRC_IDX,
537 MPC_IDX,
538 SCC_IDX,
539 ECOL_IDX,
540 MCC_IDX,
541 LATECOL_IDX,
542 COLC_IDX,
543 DC_IDX,
544 TNCRS_IDX,
545 SEC_IDX,
546 CEXTERR_IDX,
547 RLEC_IDX,
548 XONRXC_IDX,
549 XONTXC_IDX,
550 XOFFRXC_IDX,
551 XOFFTXC_IDX,
552 FCRUC_IDX,
553 PRC64_IDX,
554 PRC127_IDX,
555 PRC255_IDX,
556 PRC511_IDX,
557 PRC1023_IDX,
558 PRC1522_IDX,
559 GPRC_IDX,
560 BPRC_IDX,
561 MPRC_IDX,
562 GPTC_IDX,
563 GORCL_IDX,
564 GORCH_IDX,
565 GOTCL_IDX,
566 GOTCH_IDX,
567 RNBC_IDX,
568 RUC_IDX,
569 RFC_IDX,
570 ROC_IDX,
571 RJC_IDX,
572 MGTPRC_IDX,
573 MGTPDC_IDX,
574 MGTPTC_IDX,
575 TORL_IDX,
576 TORH_IDX,
577 TOTL_IDX,
578 TOTH_IDX,
579 TPR_IDX,
580 TPT_IDX,
581 PTC64_IDX,
582 PTC127_IDX,
583 PTC255_IDX,
584 PTC511_IDX,
585 PTC1023_IDX,
586 PTC1522_IDX,
587 MPTC_IDX,
588 BPTC_IDX,
589 TSCTC_IDX,
590 TSCTFC_IDX,
591 RXCSUM_IDX,
592 WUC_IDX,
593 WUFC_IDX,
594 WUS_IDX,
595 MANC_IDX,
596 IPAV_IDX,
597 WUPL_IDX,
598 MTA_IDX,
599 RA_IDX,
600 VFTA_IDX,
601 IP4AT_IDX,
602 IP6AT_IDX,
603 WUPM_IDX,
604 FFLT_IDX,
605 FFMT_IDX,
606 FFVT_IDX,
607 PBM_IDX,
608 RA_82542_IDX,
609 MTA_82542_IDX,
610 VFTA_82542_IDX,
611 E1K_NUM_OF_REGS
612} E1kRegIndex;
613
614#define E1K_NUM_OF_32BIT_REGS MTA_IDX
615
616
617/**
618 * Define E1000-specific EEPROM layout.
619 */
620struct E1kEEPROM
621{
622 public:
623 EEPROM93C46 eeprom;
624
625#ifdef IN_RING3
626 /**
627 * Initialize EEPROM content.
628 *
629 * @param macAddr MAC address of E1000.
630 */
631 void init(RTMAC &macAddr)
632 {
633 eeprom.init();
634 memcpy(eeprom.m_au16Data, macAddr.au16, sizeof(macAddr.au16));
635 eeprom.m_au16Data[0x04] = 0xFFFF;
636 /*
637 * bit 3 - full support for power management
638 * bit 10 - full duplex
639 */
640 eeprom.m_au16Data[0x0A] = 0x4408;
641 eeprom.m_au16Data[0x0B] = 0x001E;
642 eeprom.m_au16Data[0x0C] = 0x8086;
643 eeprom.m_au16Data[0x0D] = 0x100E;
644 eeprom.m_au16Data[0x0E] = 0x8086;
645 eeprom.m_au16Data[0x0F] = 0x3040;
646 eeprom.m_au16Data[0x21] = 0x7061;
647 eeprom.m_au16Data[0x22] = 0x280C;
648 eeprom.m_au16Data[0x23] = 0x00C8;
649 eeprom.m_au16Data[0x24] = 0x00C8;
650 eeprom.m_au16Data[0x2F] = 0x0602;
651 updateChecksum();
652 };
653
654 /**
655 * Compute the checksum as required by E1000 and store it
656 * in the last word.
657 */
658 void updateChecksum()
659 {
660 uint16_t u16Checksum = 0;
661
662 for (int i = 0; i < eeprom.SIZE-1; i++)
663 u16Checksum += eeprom.m_au16Data[i];
664 eeprom.m_au16Data[eeprom.SIZE-1] = 0xBABA - u16Checksum;
665 };
666
667 /**
668 * First 6 bytes of EEPROM contain MAC address.
669 *
670 * @returns MAC address of E1000.
671 */
672 void getMac(PRTMAC pMac)
673 {
674 memcpy(pMac->au16, eeprom.m_au16Data, sizeof(pMac->au16));
675 };
676
677 uint32_t read()
678 {
679 return eeprom.read();
680 }
681
682 void write(uint32_t u32Wires)
683 {
684 eeprom.write(u32Wires);
685 }
686
687 bool readWord(uint32_t u32Addr, uint16_t *pu16Value)
688 {
689 return eeprom.readWord(u32Addr, pu16Value);
690 }
691
692 int load(PSSMHANDLE pSSM)
693 {
694 return eeprom.load(pSSM);
695 }
696
697 void save(PSSMHANDLE pSSM)
698 {
699 eeprom.save(pSSM);
700 }
701#endif /* IN_RING3 */
702};
703
704
705#define E1K_SPEC_VLAN(s) (s & 0xFFF)
706#define E1K_SPEC_CFI(s) (!!((s>>12) & 0x1))
707#define E1K_SPEC_PRI(s) ((s>>13) & 0x7)
708
709struct E1kRxDStatus
710{
711 /** @name Descriptor Status field (3.2.3.1)
712 * @{ */
713 unsigned fDD : 1; /**< Descriptor Done. */
714 unsigned fEOP : 1; /**< End of packet. */
715 unsigned fIXSM : 1; /**< Ignore checksum indication. */
716 unsigned fVP : 1; /**< VLAN, matches VET. */
717 unsigned : 1;
718 unsigned fTCPCS : 1; /**< RCP Checksum calculated on the packet. */
719 unsigned fIPCS : 1; /**< IP Checksum calculated on the packet. */
720 unsigned fPIF : 1; /**< Passed in-exact filter */
721 /** @} */
722 /** @name Descriptor Errors field (3.2.3.2)
723 * (Only valid when fEOP and fDD are set.)
724 * @{ */
725 unsigned fCE : 1; /**< CRC or alignment error. */
726 unsigned : 4; /**< Reserved, varies with different models... */
727 unsigned fTCPE : 1; /**< TCP/UDP checksum error. */
728 unsigned fIPE : 1; /**< IP Checksum error. */
729 unsigned fRXE : 1; /**< RX Data error. */
730 /** @} */
731 /** @name Descriptor Special field (3.2.3.3)
732 * @{ */
733 unsigned u16Special : 16; /**< VLAN: Id, Canonical form, Priority. */
734 /** @} */
735};
736typedef struct E1kRxDStatus E1KRXDST;
737
738struct E1kRxDesc_st
739{
740 uint64_t u64BufAddr; /**< Address of data buffer */
741 uint16_t u16Length; /**< Length of data in buffer */
742 uint16_t u16Checksum; /**< Packet checksum */
743 E1KRXDST status;
744};
745typedef struct E1kRxDesc_st E1KRXDESC;
746AssertCompileSize(E1KRXDESC, 16);
747
748#define E1K_DTYP_LEGACY -1
749#define E1K_DTYP_CONTEXT 0
750#define E1K_DTYP_DATA 1
751
752struct E1kTDLegacy
753{
754 uint64_t u64BufAddr; /**< Address of data buffer */
755 struct TDLCmd_st
756 {
757 unsigned u16Length : 16;
758 unsigned u8CSO : 8;
759 /* CMD field : 8 */
760 unsigned fEOP : 1;
761 unsigned fIFCS : 1;
762 unsigned fIC : 1;
763 unsigned fRS : 1;
764 unsigned fRPS : 1;
765 unsigned fDEXT : 1;
766 unsigned fVLE : 1;
767 unsigned fIDE : 1;
768 } cmd;
769 struct TDLDw3_st
770 {
771 /* STA field */
772 unsigned fDD : 1;
773 unsigned fEC : 1;
774 unsigned fLC : 1;
775 unsigned fTURSV : 1;
776 /* RSV field */
777 unsigned u4RSV : 4;
778 /* CSS field */
779 unsigned u8CSS : 8;
780 /* Special field*/
781 unsigned u16Special: 16;
782 } dw3;
783};
784
785/**
786 * TCP/IP Context Transmit Descriptor, section 3.3.6.
787 */
788struct E1kTDContext
789{
790 struct CheckSum_st
791 {
792 /** TSE: Header start. !TSE: Checksum start. */
793 unsigned u8CSS : 8;
794 /** Checksum offset - where to store it. */
795 unsigned u8CSO : 8;
796 /** Checksum ending (inclusive) offset, 0 = end of packet. */
797 unsigned u16CSE : 16;
798 } ip;
799 struct CheckSum_st tu;
800 struct TDCDw2_st
801 {
802 /** TSE: The total number of payload bytes for this context. Sans header. */
803 unsigned u20PAYLEN : 20;
804 /** The descriptor type - E1K_DTYP_CONTEXT (0). */
805 unsigned u4DTYP : 4;
806 /** TUCMD field, 8 bits
807 * @{ */
808 /** TSE: TCP (set) or UDP (clear). */
809 unsigned fTCP : 1;
810 /** TSE: IPv4 (set) or IPv6 (clear) - for finding the payload length field in
811 * the IP header. Does not affect the checksumming.
812 * @remarks 82544GC/EI interprets a cleared field differently. */
813 unsigned fIP : 1;
814 /** TSE: TCP segmentation enable. When clear the context describes */
815 unsigned fTSE : 1;
816 /** Report status (only applies to dw3.fDD for here). */
817 unsigned fRS : 1;
818 /** Reserved, MBZ. */
819 unsigned fRSV1 : 1;
820 /** Descriptor extension, must be set for this descriptor type. */
821 unsigned fDEXT : 1;
822 /** Reserved, MBZ. */
823 unsigned fRSV2 : 1;
824 /** Interrupt delay enable. */
825 unsigned fIDE : 1;
826 /** @} */
827 } dw2;
828 struct TDCDw3_st
829 {
830 /** Descriptor Done. */
831 unsigned fDD : 1;
832 /** Reserved, MBZ. */
833 unsigned u7RSV : 7;
834 /** TSO: The header (prototype) length (Ethernet[, VLAN tag], IP, TCP/UDP. */
835 unsigned u8HDRLEN : 8;
836 /** TSO: Maximum segment size. */
837 unsigned u16MSS : 16;
838 } dw3;
839};
840typedef struct E1kTDContext E1KTXCTX;
841
842/**
843 * TCP/IP Data Transmit Descriptor, section 3.3.7.
844 */
845struct E1kTDData
846{
847 uint64_t u64BufAddr; /**< Address of data buffer */
848 struct TDDCmd_st
849 {
850 /** The total length of data pointed to by this descriptor. */
851 unsigned u20DTALEN : 20;
852 /** The descriptor type - E1K_DTYP_DATA (1). */
853 unsigned u4DTYP : 4;
854 /** @name DCMD field, 8 bits (3.3.7.1).
855 * @{ */
856 /** End of packet. Note TSCTFC update. */
857 unsigned fEOP : 1;
858 /** Insert Ethernet FCS/CRC (requires fEOP to be set). */
859 unsigned fIFCS : 1;
860 /** Use the TSE context when set and the normal when clear. */
861 unsigned fTSE : 1;
862 /** Report status (dw3.STA). */
863 unsigned fRS : 1;
864 /** Reserved. 82544GC/EI defines this report packet set (RPS). */
865 unsigned fRPS : 1;
866 /** Descriptor extension, must be set for this descriptor type. */
867 unsigned fDEXT : 1;
868 /** VLAN enable, requires CTRL.VME, auto enables FCS/CRC.
869 * Insert dw3.SPECIAL after ethernet header. */
870 unsigned fVLE : 1;
871 /** Interrupt delay enable. */
872 unsigned fIDE : 1;
873 /** @} */
874 } cmd;
875 struct TDDDw3_st
876 {
877 /** @name STA field (3.3.7.2)
878 * @{ */
879 unsigned fDD : 1; /**< Descriptor done. */
880 unsigned fEC : 1; /**< Excess collision. */
881 unsigned fLC : 1; /**< Late collision. */
882 /** Reserved, except for the usual oddball (82544GC/EI) where it's called TU. */
883 unsigned fTURSV : 1;
884 /** @} */
885 unsigned u4RSV : 4; /**< Reserved field, MBZ. */
886 /** @name POPTS (Packet Option) field (3.3.7.3)
887 * @{ */
888 unsigned fIXSM : 1; /**< Insert IP checksum. */
889 unsigned fTXSM : 1; /**< Insert TCP/UDP checksum. */
890 unsigned u6RSV : 6; /**< Reserved, MBZ. */
891 /** @} */
892 /** @name SPECIAL field - VLAN tag to be inserted after ethernet header.
893 * Requires fEOP, fVLE and CTRL.VME to be set.
894 * @{ */
895 unsigned u16Special: 16; /**< VLAN: Id, Canonical form, Priority. */
896 /** @} */
897 } dw3;
898};
899typedef struct E1kTDData E1KTXDAT;
900
901union E1kTxDesc
902{
903 struct E1kTDLegacy legacy;
904 struct E1kTDContext context;
905 struct E1kTDData data;
906};
907typedef union E1kTxDesc E1KTXDESC;
908AssertCompileSize(E1KTXDESC, 16);
909
910#define RA_CTL_AS 0x0003
911#define RA_CTL_AV 0x8000
912
913union E1kRecAddr
914{
915 uint32_t au32[32];
916 struct RAArray
917 {
918 uint8_t addr[6];
919 uint16_t ctl;
920 } array[16];
921};
922typedef struct E1kRecAddr::RAArray E1KRAELEM;
923typedef union E1kRecAddr E1KRA;
924AssertCompileSize(E1KRA, 8*16);
925
926#define E1K_IP_RF UINT16_C(0x8000) /**< reserved fragment flag */
927#define E1K_IP_DF UINT16_C(0x4000) /**< dont fragment flag */
928#define E1K_IP_MF UINT16_C(0x2000) /**< more fragments flag */
929#define E1K_IP_OFFMASK UINT16_C(0x1fff) /**< mask for fragmenting bits */
930
931/** @todo use+extend RTNETIPV4 */
932struct E1kIpHeader
933{
934 /* type of service / version / header length */
935 uint16_t tos_ver_hl;
936 /* total length */
937 uint16_t total_len;
938 /* identification */
939 uint16_t ident;
940 /* fragment offset field */
941 uint16_t offset;
942 /* time to live / protocol*/
943 uint16_t ttl_proto;
944 /* checksum */
945 uint16_t chksum;
946 /* source IP address */
947 uint32_t src;
948 /* destination IP address */
949 uint32_t dest;
950};
951AssertCompileSize(struct E1kIpHeader, 20);
952
953#define E1K_TCP_FIN UINT16_C(0x01)
954#define E1K_TCP_SYN UINT16_C(0x02)
955#define E1K_TCP_RST UINT16_C(0x04)
956#define E1K_TCP_PSH UINT16_C(0x08)
957#define E1K_TCP_ACK UINT16_C(0x10)
958#define E1K_TCP_URG UINT16_C(0x20)
959#define E1K_TCP_ECE UINT16_C(0x40)
960#define E1K_TCP_CWR UINT16_C(0x80)
961#define E1K_TCP_FLAGS UINT16_C(0x3f)
962
963/** @todo use+extend RTNETTCP */
964struct E1kTcpHeader
965{
966 uint16_t src;
967 uint16_t dest;
968 uint32_t seqno;
969 uint32_t ackno;
970 uint16_t hdrlen_flags;
971 uint16_t wnd;
972 uint16_t chksum;
973 uint16_t urgp;
974};
975AssertCompileSize(struct E1kTcpHeader, 20);
976
977
978#ifdef E1K_WITH_TXD_CACHE
979/** The current Saved state version. */
980# define E1K_SAVEDSTATE_VERSION 4
981/** Saved state version for VirtualBox 4.2 with VLAN tag fields. */
982# define E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG 3
983#else /* !E1K_WITH_TXD_CACHE */
984/** The current Saved state version. */
985# define E1K_SAVEDSTATE_VERSION 3
986#endif /* !E1K_WITH_TXD_CACHE */
987/** Saved state version for VirtualBox 4.1 and earlier.
988 * These did not include VLAN tag fields. */
989#define E1K_SAVEDSTATE_VERSION_VBOX_41 2
990/** Saved state version for VirtualBox 3.0 and earlier.
991 * This did not include the configuration part nor the E1kEEPROM. */
992#define E1K_SAVEDSTATE_VERSION_VBOX_30 1
993
994/**
995 * Device state structure.
996 *
997 * Holds the current state of device.
998 *
999 * @implements PDMINETWORKDOWN
1000 * @implements PDMINETWORKCONFIG
1001 * @implements PDMILEDPORTS
1002 */
1003struct E1kState_st
1004{
1005 char szInstance[8]; /**< Instance name, e.g. E1000#1. */
1006 PDMIBASE IBase;
1007 PDMINETWORKDOWN INetworkDown;
1008 PDMINETWORKCONFIG INetworkConfig;
1009 PDMILEDPORTS ILeds; /**< LED interface */
1010 R3PTRTYPE(PPDMIBASE) pDrvBase; /**< Attached network driver. */
1011 R3PTRTYPE(PPDMILEDCONNECTORS) pLedsConnector;
1012
1013 PPDMDEVINSR3 pDevInsR3; /**< Device instance - R3. */
1014 R3PTRTYPE(PPDMQUEUE) pTxQueueR3; /**< Transmit queue - R3. */
1015 R3PTRTYPE(PPDMQUEUE) pCanRxQueueR3; /**< Rx wakeup signaller - R3. */
1016 PPDMINETWORKUPR3 pDrvR3; /**< Attached network driver - R3. */
1017 PTMTIMERR3 pRIDTimerR3; /**< Receive Interrupt Delay Timer - R3. */
1018 PTMTIMERR3 pRADTimerR3; /**< Receive Absolute Delay Timer - R3. */
1019 PTMTIMERR3 pTIDTimerR3; /**< Transmit Interrupt Delay Timer - R3. */
1020 PTMTIMERR3 pTADTimerR3; /**< Transmit Absolute Delay Timer - R3. */
1021 PTMTIMERR3 pTXDTimerR3; /**< Transmit Delay Timer - R3. */
1022 PTMTIMERR3 pIntTimerR3; /**< Late Interrupt Timer - R3. */
1023 PTMTIMERR3 pLUTimerR3; /**< Link Up(/Restore) Timer. */
1024 /** The scatter / gather buffer used for the current outgoing packet - R3. */
1025 R3PTRTYPE(PPDMSCATTERGATHER) pTxSgR3;
1026
1027 PPDMDEVINSR0 pDevInsR0; /**< Device instance - R0. */
1028 R0PTRTYPE(PPDMQUEUE) pTxQueueR0; /**< Transmit queue - R0. */
1029 R0PTRTYPE(PPDMQUEUE) pCanRxQueueR0; /**< Rx wakeup signaller - R0. */
1030 PPDMINETWORKUPR0 pDrvR0; /**< Attached network driver - R0. */
1031 PTMTIMERR0 pRIDTimerR0; /**< Receive Interrupt Delay Timer - R0. */
1032 PTMTIMERR0 pRADTimerR0; /**< Receive Absolute Delay Timer - R0. */
1033 PTMTIMERR0 pTIDTimerR0; /**< Transmit Interrupt Delay Timer - R0. */
1034 PTMTIMERR0 pTADTimerR0; /**< Transmit Absolute Delay Timer - R0. */
1035 PTMTIMERR0 pTXDTimerR0; /**< Transmit Delay Timer - R0. */
1036 PTMTIMERR0 pIntTimerR0; /**< Late Interrupt Timer - R0. */
1037 PTMTIMERR0 pLUTimerR0; /**< Link Up(/Restore) Timer - R0. */
1038 /** The scatter / gather buffer used for the current outgoing packet - R0. */
1039 R0PTRTYPE(PPDMSCATTERGATHER) pTxSgR0;
1040
1041 PPDMDEVINSRC pDevInsRC; /**< Device instance - RC. */
1042 RCPTRTYPE(PPDMQUEUE) pTxQueueRC; /**< Transmit queue - RC. */
1043 RCPTRTYPE(PPDMQUEUE) pCanRxQueueRC; /**< Rx wakeup signaller - RC. */
1044 PPDMINETWORKUPRC pDrvRC; /**< Attached network driver - RC. */
1045 PTMTIMERRC pRIDTimerRC; /**< Receive Interrupt Delay Timer - RC. */
1046 PTMTIMERRC pRADTimerRC; /**< Receive Absolute Delay Timer - RC. */
1047 PTMTIMERRC pTIDTimerRC; /**< Transmit Interrupt Delay Timer - RC. */
1048 PTMTIMERRC pTADTimerRC; /**< Transmit Absolute Delay Timer - RC. */
1049 PTMTIMERRC pTXDTimerRC; /**< Transmit Delay Timer - RC. */
1050 PTMTIMERRC pIntTimerRC; /**< Late Interrupt Timer - RC. */
1051 PTMTIMERRC pLUTimerRC; /**< Link Up(/Restore) Timer - RC. */
1052 /** The scatter / gather buffer used for the current outgoing packet - RC. */
1053 RCPTRTYPE(PPDMSCATTERGATHER) pTxSgRC;
1054 RTRCPTR RCPtrAlignment;
1055
1056#if HC_ARCH_BITS != 32
1057 uint32_t Alignment1;
1058#endif
1059 PDMCRITSECT cs; /**< Critical section - what is it protecting? */
1060 PDMCRITSECT csRx; /**< RX Critical section. */
1061#ifdef E1K_WITH_TX_CS
1062 PDMCRITSECT csTx; /**< TX Critical section. */
1063#endif /* E1K_WITH_TX_CS */
1064 /** Base address of memory-mapped registers. */
1065 RTGCPHYS addrMMReg;
1066 /** MAC address obtained from the configuration. */
1067 RTMAC macConfigured;
1068 /** Base port of I/O space region. */
1069 RTIOPORT addrIOPort;
1070 /** EMT: */
1071 PCIDEVICE pciDevice;
1072 /** EMT: Last time the interrupt was acknowledged. */
1073 uint64_t u64AckedAt;
1074 /** All: Used for eliminating spurious interrupts. */
1075 bool fIntRaised;
1076 /** EMT: false if the cable is disconnected by the GUI. */
1077 bool fCableConnected;
1078 /** EMT: */
1079 bool fR0Enabled;
1080 /** EMT: */
1081 bool fGCEnabled;
1082 /** EMT: Compute Ethernet CRC for RX packets. */
1083 bool fEthernetCRC;
1084
1085 bool Alignment2[3];
1086 /** Link up delay (in milliseconds). */
1087 uint32_t cMsLinkUpDelay;
1088
1089 /** All: Device register storage. */
1090 uint32_t auRegs[E1K_NUM_OF_32BIT_REGS];
1091 /** TX/RX: Status LED. */
1092 PDMLED led;
1093 /** TX/RX: Number of packet being sent/received to show in debug log. */
1094 uint32_t u32PktNo;
1095
1096 /** EMT: Offset of the register to be read via IO. */
1097 uint32_t uSelectedReg;
1098 /** EMT: Multicast Table Array. */
1099 uint32_t auMTA[128];
1100 /** EMT: Receive Address registers. */
1101 E1KRA aRecAddr;
1102 /** EMT: VLAN filter table array. */
1103 uint32_t auVFTA[128];
1104 /** EMT: Receive buffer size. */
1105 uint16_t u16RxBSize;
1106 /** EMT: Locked state -- no state alteration possible. */
1107 bool fLocked;
1108 /** EMT: */
1109 bool fDelayInts;
1110 /** All: */
1111 bool fIntMaskUsed;
1112
1113 /** N/A: */
1114 bool volatile fMaybeOutOfSpace;
1115 /** EMT: Gets signalled when more RX descriptors become available. */
1116 RTSEMEVENT hEventMoreRxDescAvail;
1117#ifdef E1K_WITH_RXD_CACHE
1118 /** RX: Fetched RX descriptors. */
1119 E1KRXDESC aRxDescriptors[E1K_RXD_CACHE_SIZE];
1120 //uint64_t aRxDescAddr[E1K_RXD_CACHE_SIZE];
1121 /** RX: Actual number of fetched RX descriptors. */
1122 uint32_t nRxDFetched;
1123 /** RX: Index in cache of RX descriptor being processed. */
1124 uint32_t iRxDCurrent;
1125#endif /* E1K_WITH_RXD_CACHE */
1126
1127 /** TX: Context used for TCP segmentation packets. */
1128 E1KTXCTX contextTSE;
1129 /** TX: Context used for ordinary packets. */
1130 E1KTXCTX contextNormal;
1131#ifdef E1K_WITH_TXD_CACHE
1132 /** TX: Fetched TX descriptors. */
1133 E1KTXDESC aTxDescriptors[E1K_TXD_CACHE_SIZE];
1134 /** TX: Actual number of fetched TX descriptors. */
1135 uint8_t nTxDFetched;
1136 /** TX: Index in cache of TX descriptor being processed. */
1137 uint8_t iTxDCurrent;
1138 /** TX: Will this frame be sent as GSO. */
1139 bool fGSO;
1140 /** TX: False will force segmentation in e1000 instead of sending frames as GSO. */
1141 bool fGSOEnabled;
1142 /** TX: Number of bytes in next packet. */
1143 uint32_t cbTxAlloc;
1144
1145#endif /* E1K_WITH_TXD_CACHE */
1146 /** GSO context. u8Type is set to PDMNETWORKGSOTYPE_INVALID when not
1147 * applicable to the current TSE mode. */
1148 PDMNETWORKGSO GsoCtx;
1149 /** Scratch space for holding the loopback / fallback scatter / gather
1150 * descriptor. */
1151 union
1152 {
1153 PDMSCATTERGATHER Sg;
1154 uint8_t padding[8 * sizeof(RTUINTPTR)];
1155 } uTxFallback;
1156 /** TX: Transmit packet buffer use for TSE fallback and loopback. */
1157 uint8_t aTxPacketFallback[E1K_MAX_TX_PKT_SIZE];
1158 /** TX: Number of bytes assembled in TX packet buffer. */
1159 uint16_t u16TxPktLen;
1160 /** TX: IP checksum has to be inserted if true. */
1161 bool fIPcsum;
1162 /** TX: TCP/UDP checksum has to be inserted if true. */
1163 bool fTCPcsum;
1164 /** TX: VLAN tag has to be inserted if true. */
1165 bool fVTag;
1166 /** TX: TCI part of VLAN tag to be inserted. */
1167 uint16_t u16VTagTCI;
1168 /** TX TSE fallback: Number of payload bytes remaining in TSE context. */
1169 uint32_t u32PayRemain;
1170 /** TX TSE fallback: Number of header bytes remaining in TSE context. */
1171 uint16_t u16HdrRemain;
1172 /** TX TSE fallback: Flags from template header. */
1173 uint16_t u16SavedFlags;
1174 /** TX TSE fallback: Partial checksum from template header. */
1175 uint32_t u32SavedCsum;
1176 /** ?: Emulated controller type. */
1177 E1KCHIP eChip;
1178
1179 /** EMT: EEPROM emulation */
1180 E1kEEPROM eeprom;
1181 /** EMT: Physical interface emulation. */
1182 PHY phy;
1183
1184#if 0
1185 /** Alignment padding. */
1186 uint8_t Alignment[HC_ARCH_BITS == 64 ? 8 : 4];
1187#endif
1188
1189 STAMCOUNTER StatReceiveBytes;
1190 STAMCOUNTER StatTransmitBytes;
1191#if defined(VBOX_WITH_STATISTICS)
1192 STAMPROFILEADV StatMMIOReadRZ;
1193 STAMPROFILEADV StatMMIOReadR3;
1194 STAMPROFILEADV StatMMIOWriteRZ;
1195 STAMPROFILEADV StatMMIOWriteR3;
1196 STAMPROFILEADV StatEEPROMRead;
1197 STAMPROFILEADV StatEEPROMWrite;
1198 STAMPROFILEADV StatIOReadRZ;
1199 STAMPROFILEADV StatIOReadR3;
1200 STAMPROFILEADV StatIOWriteRZ;
1201 STAMPROFILEADV StatIOWriteR3;
1202 STAMPROFILEADV StatLateIntTimer;
1203 STAMCOUNTER StatLateInts;
1204 STAMCOUNTER StatIntsRaised;
1205 STAMCOUNTER StatIntsPrevented;
1206 STAMPROFILEADV StatReceive;
1207 STAMPROFILEADV StatReceiveCRC;
1208 STAMPROFILEADV StatReceiveFilter;
1209 STAMPROFILEADV StatReceiveStore;
1210 STAMPROFILEADV StatTransmitRZ;
1211 STAMPROFILEADV StatTransmitR3;
1212 STAMPROFILE StatTransmitSendRZ;
1213 STAMPROFILE StatTransmitSendR3;
1214 STAMPROFILE StatRxOverflow;
1215 STAMCOUNTER StatRxOverflowWakeup;
1216 STAMCOUNTER StatTxDescCtxNormal;
1217 STAMCOUNTER StatTxDescCtxTSE;
1218 STAMCOUNTER StatTxDescLegacy;
1219 STAMCOUNTER StatTxDescData;
1220 STAMCOUNTER StatTxDescTSEData;
1221 STAMCOUNTER StatTxPathFallback;
1222 STAMCOUNTER StatTxPathGSO;
1223 STAMCOUNTER StatTxPathRegular;
1224 STAMCOUNTER StatPHYAccesses;
1225
1226#endif /* VBOX_WITH_STATISTICS */
1227
1228#ifdef E1K_INT_STATS
1229 /* Internal stats */
1230 uint64_t u64ArmedAt;
1231 uint64_t uStatMaxTxDelay;
1232 uint32_t uStatInt;
1233 uint32_t uStatIntTry;
1234 uint32_t uStatIntLower;
1235 uint32_t uStatIntDly;
1236 int32_t iStatIntLost;
1237 int32_t iStatIntLostOne;
1238 uint32_t uStatDisDly;
1239 uint32_t uStatIntSkip;
1240 uint32_t uStatIntLate;
1241 uint32_t uStatIntMasked;
1242 uint32_t uStatIntEarly;
1243 uint32_t uStatIntRx;
1244 uint32_t uStatIntTx;
1245 uint32_t uStatIntICS;
1246 uint32_t uStatIntRDTR;
1247 uint32_t uStatIntRXDMT0;
1248 uint32_t uStatIntTXQE;
1249 uint32_t uStatTxNoRS;
1250 uint32_t uStatTxIDE;
1251 uint32_t uStatTxDelayed;
1252 uint32_t uStatTxDelayExp;
1253 uint32_t uStatTAD;
1254 uint32_t uStatTID;
1255 uint32_t uStatRAD;
1256 uint32_t uStatRID;
1257 uint32_t uStatRxFrm;
1258 uint32_t uStatTxFrm;
1259 uint32_t uStatDescCtx;
1260 uint32_t uStatDescDat;
1261 uint32_t uStatDescLeg;
1262 uint32_t uStatTx1514;
1263 uint32_t uStatTx2962;
1264 uint32_t uStatTx4410;
1265 uint32_t uStatTx5858;
1266 uint32_t uStatTx7306;
1267 uint32_t uStatTx8754;
1268 uint32_t uStatTx16384;
1269 uint32_t uStatTx32768;
1270 uint32_t uStatTxLarge;
1271 uint32_t uStatAlign;
1272#endif /* E1K_INT_STATS */
1273};
1274typedef struct E1kState_st E1KSTATE;
1275/** Pointer to the E1000 device state. */
1276typedef E1KSTATE *PE1KSTATE;
1277
1278#ifndef VBOX_DEVICE_STRUCT_TESTCASE
1279
1280/* Forward declarations ******************************************************/
1281static int e1kXmitPending(E1KSTATE *pState, bool fOnWorkerThread);
1282
1283static int e1kRegReadUnimplemented (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1284static int e1kRegWriteUnimplemented(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1285static int e1kRegReadAutoClear (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1286static int e1kRegReadDefault (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1287static int e1kRegWriteDefault (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1288#if 0 /* unused */
1289static int e1kRegReadCTRL (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1290#endif
1291static int e1kRegWriteCTRL (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1292static int e1kRegReadEECD (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1293static int e1kRegWriteEECD (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1294static int e1kRegWriteEERD (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1295static int e1kRegWriteMDIC (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1296static int e1kRegReadICR (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1297static int e1kRegWriteICR (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1298static int e1kRegWriteICS (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1299static int e1kRegWriteIMS (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1300static int e1kRegWriteIMC (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1301static int e1kRegWriteRCTL (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1302static int e1kRegWritePBA (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1303static int e1kRegWriteRDT (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1304static int e1kRegWriteRDTR (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1305static int e1kRegWriteTDT (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1306static int e1kRegReadMTA (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1307static int e1kRegWriteMTA (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1308static int e1kRegReadRA (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1309static int e1kRegWriteRA (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1310static int e1kRegReadVFTA (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1311static int e1kRegWriteVFTA (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1312
1313/**
1314 * Register map table.
1315 *
1316 * Override fn_read and fn_write to get register-specific behavior.
1317 */
1318const static struct E1kRegMap_st
1319{
1320 /** Register offset in the register space. */
1321 uint32_t offset;
1322 /** Size in bytes. Registers of size > 4 are in fact tables. */
1323 uint32_t size;
1324 /** Readable bits. */
1325 uint32_t readable;
1326 /** Writable bits. */
1327 uint32_t writable;
1328 /** Read callback. */
1329 int (*pfnRead)(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1330 /** Write callback. */
1331 int (*pfnWrite)(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1332 /** Abbreviated name. */
1333 const char *abbrev;
1334 /** Full name. */
1335 const char *name;
1336} s_e1kRegMap[E1K_NUM_OF_REGS] =
1337{
1338 /* offset size read mask write mask read callback write callback abbrev full name */
1339 /*------- ------- ---------- ---------- ----------------------- ------------------------ ---------- ------------------------------*/
1340 { 0x00000, 0x00004, 0xDBF31BE9, 0xDBF31BE9, e1kRegReadDefault , e1kRegWriteCTRL , "CTRL" , "Device Control" },
1341 { 0x00008, 0x00004, 0x0000FDFF, 0x00000000, e1kRegReadDefault , e1kRegWriteUnimplemented, "STATUS" , "Device Status" },
1342 { 0x00010, 0x00004, 0x000027F0, 0x00000070, e1kRegReadEECD , e1kRegWriteEECD , "EECD" , "EEPROM/Flash Control/Data" },
1343 { 0x00014, 0x00004, 0xFFFFFF10, 0xFFFFFF00, e1kRegReadDefault , e1kRegWriteEERD , "EERD" , "EEPROM Read" },
1344 { 0x00018, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CTRL_EXT", "Extended Device Control" },
1345 { 0x0001c, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FLA" , "Flash Access (N/A)" },
1346 { 0x00020, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteMDIC , "MDIC" , "MDI Control" },
1347 { 0x00028, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCAL" , "Flow Control Address Low" },
1348 { 0x0002c, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCAH" , "Flow Control Address High" },
1349 { 0x00030, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCT" , "Flow Control Type" },
1350 { 0x00038, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "VET" , "VLAN EtherType" },
1351 { 0x000c0, 0x00004, 0x0001F6DF, 0x0001F6DF, e1kRegReadICR , e1kRegWriteICR , "ICR" , "Interrupt Cause Read" },
1352 { 0x000c4, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "ITR" , "Interrupt Throttling" },
1353 { 0x000c8, 0x00004, 0x00000000, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteICS , "ICS" , "Interrupt Cause Set" },
1354 { 0x000d0, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteIMS , "IMS" , "Interrupt Mask Set/Read" },
1355 { 0x000d8, 0x00004, 0x00000000, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteIMC , "IMC" , "Interrupt Mask Clear" },
1356 { 0x00100, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteRCTL , "RCTL" , "Receive Control" },
1357 { 0x00170, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCTTV" , "Flow Control Transmit Timer Value" },
1358 { 0x00178, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TXCW" , "Transmit Configuration Word (N/A)" },
1359 { 0x00180, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXCW" , "Receive Configuration Word (N/A)" },
1360 { 0x00400, 0x00004, 0x017FFFFA, 0x017FFFFA, e1kRegReadDefault , e1kRegWriteDefault , "TCTL" , "Transmit Control" },
1361 { 0x00410, 0x00004, 0x3FFFFFFF, 0x3FFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TIPG" , "Transmit IPG" },
1362 { 0x00458, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "AIFS" , "Adaptive IFS Throttle - AIT" },
1363 { 0x00e00, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "LEDCTL" , "LED Control" },
1364 { 0x01000, 0x00004, 0xFFFF007F, 0x0000007F, e1kRegReadDefault , e1kRegWritePBA , "PBA" , "Packet Buffer Allocation" },
1365 { 0x02160, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRTL" , "Flow Control Receive Threshold Low" },
1366 { 0x02168, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRTH" , "Flow Control Receive Threshold High" },
1367 { 0x02410, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFH" , "Receive Data FIFO Head" },
1368 { 0x02418, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFT" , "Receive Data FIFO Tail" },
1369 { 0x02420, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFHS" , "Receive Data FIFO Head Saved Register" },
1370 { 0x02428, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFTS" , "Receive Data FIFO Tail Saved Register" },
1371 { 0x02430, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFPC" , "Receive Data FIFO Packet Count" },
1372 { 0x02800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDBAL" , "Receive Descriptor Base Low" },
1373 { 0x02804, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDBAH" , "Receive Descriptor Base High" },
1374 { 0x02808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDLEN" , "Receive Descriptor Length" },
1375 { 0x02810, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDH" , "Receive Descriptor Head" },
1376 { 0x02818, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteRDT , "RDT" , "Receive Descriptor Tail" },
1377 { 0x02820, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteRDTR , "RDTR" , "Receive Delay Timer" },
1378 { 0x02828, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXDCTL" , "Receive Descriptor Control" },
1379 { 0x0282c, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "RADV" , "Receive Interrupt Absolute Delay Timer" },
1380 { 0x02c00, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RSRPD" , "Receive Small Packet Detect Interrupt" },
1381 { 0x03000, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TXDMAC" , "TX DMA Control (N/A)" },
1382 { 0x03410, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFH" , "Transmit Data FIFO Head" },
1383 { 0x03418, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFT" , "Transmit Data FIFO Tail" },
1384 { 0x03420, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFHS" , "Transmit Data FIFO Head Saved Register" },
1385 { 0x03428, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFTS" , "Transmit Data FIFO Tail Saved Register" },
1386 { 0x03430, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFPC" , "Transmit Data FIFO Packet Count" },
1387 { 0x03800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDBAL" , "Transmit Descriptor Base Low" },
1388 { 0x03804, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDBAH" , "Transmit Descriptor Base High" },
1389 { 0x03808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDLEN" , "Transmit Descriptor Length" },
1390 { 0x03810, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDH" , "Transmit Descriptor Head" },
1391 { 0x03818, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteTDT , "TDT" , "Transmit Descriptor Tail" },
1392 { 0x03820, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TIDV" , "Transmit Interrupt Delay Value" },
1393 { 0x03828, 0x00004, 0xFF3F3F3F, 0xFF3F3F3F, e1kRegReadDefault , e1kRegWriteDefault , "TXDCTL" , "Transmit Descriptor Control" },
1394 { 0x0382c, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TADV" , "Transmit Absolute Interrupt Delay Timer" },
1395 { 0x03830, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TSPMT" , "TCP Segmentation Pad and Threshold" },
1396 { 0x04000, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CRCERRS" , "CRC Error Count" },
1397 { 0x04004, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "ALGNERRC", "Alignment Error Count" },
1398 { 0x04008, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SYMERRS" , "Symbol Error Count" },
1399 { 0x0400c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXERRC" , "RX Error Count" },
1400 { 0x04010, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MPC" , "Missed Packets Count" },
1401 { 0x04014, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SCC" , "Single Collision Count" },
1402 { 0x04018, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "ECOL" , "Excessive Collisions Count" },
1403 { 0x0401c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MCC" , "Multiple Collision Count" },
1404 { 0x04020, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "LATECOL" , "Late Collisions Count" },
1405 { 0x04028, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "COLC" , "Collision Count" },
1406 { 0x04030, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "DC" , "Defer Count" },
1407 { 0x04034, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TNCRS" , "Transmit - No CRS" },
1408 { 0x04038, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SEC" , "Sequence Error Count" },
1409 { 0x0403c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CEXTERR" , "Carrier Extension Error Count" },
1410 { 0x04040, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RLEC" , "Receive Length Error Count" },
1411 { 0x04048, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XONRXC" , "XON Received Count" },
1412 { 0x0404c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XONTXC" , "XON Transmitted Count" },
1413 { 0x04050, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XOFFRXC" , "XOFF Received Count" },
1414 { 0x04054, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XOFFTXC" , "XOFF Transmitted Count" },
1415 { 0x04058, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRUC" , "FC Received Unsupported Count" },
1416 { 0x0405c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC64" , "Packets Received (64 Bytes) Count" },
1417 { 0x04060, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC127" , "Packets Received (65-127 Bytes) Count" },
1418 { 0x04064, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC255" , "Packets Received (128-255 Bytes) Count" },
1419 { 0x04068, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC511" , "Packets Received (256-511 Bytes) Count" },
1420 { 0x0406c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC1023" , "Packets Received (512-1023 Bytes) Count" },
1421 { 0x04070, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC1522" , "Packets Received (1024-Max Bytes)" },
1422 { 0x04074, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GPRC" , "Good Packets Received Count" },
1423 { 0x04078, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "BPRC" , "Broadcast Packets Received Count" },
1424 { 0x0407c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "MPRC" , "Multicast Packets Received Count" },
1425 { 0x04080, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GPTC" , "Good Packets Transmitted Count" },
1426 { 0x04088, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GORCL" , "Good Octets Received Count (Low)" },
1427 { 0x0408c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GORCH" , "Good Octets Received Count (Hi)" },
1428 { 0x04090, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GOTCL" , "Good Octets Transmitted Count (Low)" },
1429 { 0x04094, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GOTCH" , "Good Octets Transmitted Count (Hi)" },
1430 { 0x040a0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RNBC" , "Receive No Buffers Count" },
1431 { 0x040a4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RUC" , "Receive Undersize Count" },
1432 { 0x040a8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RFC" , "Receive Fragment Count" },
1433 { 0x040ac, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "ROC" , "Receive Oversize Count" },
1434 { 0x040b0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RJC" , "Receive Jabber Count" },
1435 { 0x040b4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPRC" , "Management Packets Received Count" },
1436 { 0x040b8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPDC" , "Management Packets Dropped Count" },
1437 { 0x040bc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPTC" , "Management Pkts Transmitted Count" },
1438 { 0x040c0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TORL" , "Total Octets Received (Lo)" },
1439 { 0x040c4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TORH" , "Total Octets Received (Hi)" },
1440 { 0x040c8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TOTL" , "Total Octets Transmitted (Lo)" },
1441 { 0x040cc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TOTH" , "Total Octets Transmitted (Hi)" },
1442 { 0x040d0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TPR" , "Total Packets Received" },
1443 { 0x040d4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TPT" , "Total Packets Transmitted" },
1444 { 0x040d8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC64" , "Packets Transmitted (64 Bytes) Count" },
1445 { 0x040dc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC127" , "Packets Transmitted (65-127 Bytes) Count" },
1446 { 0x040e0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC255" , "Packets Transmitted (128-255 Bytes) Count" },
1447 { 0x040e4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC511" , "Packets Transmitted (256-511 Bytes) Count" },
1448 { 0x040e8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC1023" , "Packets Transmitted (512-1023 Bytes) Count" },
1449 { 0x040ec, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC1522" , "Packets Transmitted (1024 Bytes or Greater) Count" },
1450 { 0x040f0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "MPTC" , "Multicast Packets Transmitted Count" },
1451 { 0x040f4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "BPTC" , "Broadcast Packets Transmitted Count" },
1452 { 0x040f8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TSCTC" , "TCP Segmentation Context Transmitted Count" },
1453 { 0x040fc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TSCTFC" , "TCP Segmentation Context Tx Fail Count" },
1454 { 0x05000, 0x00004, 0x000007FF, 0x000007FF, e1kRegReadDefault , e1kRegWriteDefault , "RXCSUM" , "Receive Checksum Control" },
1455 { 0x05800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUC" , "Wakeup Control" },
1456 { 0x05808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUFC" , "Wakeup Filter Control" },
1457 { 0x05810, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUS" , "Wakeup Status" },
1458 { 0x05820, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "MANC" , "Management Control" },
1459 { 0x05838, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IPAV" , "IP Address Valid" },
1460 { 0x05900, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUPL" , "Wakeup Packet Length" },
1461 { 0x05200, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadMTA , e1kRegWriteMTA , "MTA" , "Multicast Table Array (n)" },
1462 { 0x05400, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadRA , e1kRegWriteRA , "RA" , "Receive Address (64-bit) (n)" },
1463 { 0x05600, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadVFTA , e1kRegWriteVFTA , "VFTA" , "VLAN Filter Table Array (n)" },
1464 { 0x05840, 0x0001c, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IP4AT" , "IPv4 Address Table" },
1465 { 0x05880, 0x00010, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IP6AT" , "IPv6 Address Table" },
1466 { 0x05a00, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUPM" , "Wakeup Packet Memory" },
1467 { 0x05f00, 0x0001c, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFLT" , "Flexible Filter Length Table" },
1468 { 0x09000, 0x003fc, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFMT" , "Flexible Filter Mask Table" },
1469 { 0x09800, 0x003fc, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFVT" , "Flexible Filter Value Table" },
1470 { 0x10000, 0x10000, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "PBM" , "Packet Buffer Memory (n)" },
1471 { 0x00040, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadRA , e1kRegWriteRA , "RA" , "Receive Address (64-bit) (n) (82542)" },
1472 { 0x00200, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadMTA , e1kRegWriteMTA , "MTA" , "Multicast Table Array (n) (82542)" },
1473 { 0x00600, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadVFTA , e1kRegWriteVFTA , "VFTA" , "VLAN Filter Table Array (n) (82542)" }
1474};
1475
1476#ifdef DEBUG
1477
1478/**
1479 * Convert U32 value to hex string. Masked bytes are replaced with dots.
1480 *
1481 * @remarks The mask has byte (not bit) granularity (e.g. 000000FF).
1482 *
1483 * @returns The buffer.
1484 *
1485 * @param u32 The word to convert into string.
1486 * @param mask Selects which bytes to convert.
1487 * @param buf Where to put the result.
1488 */
1489static char *e1kU32toHex(uint32_t u32, uint32_t mask, char *buf)
1490{
1491 for (char *ptr = buf + 7; ptr >= buf; --ptr, u32 >>=4, mask >>=4)
1492 {
1493 if (mask & 0xF)
1494 *ptr = (u32 & 0xF) + ((u32 & 0xF) > 9 ? '7' : '0');
1495 else
1496 *ptr = '.';
1497 }
1498 buf[8] = 0;
1499 return buf;
1500}
1501
1502/**
1503 * Returns timer name for debug purposes.
1504 *
1505 * @returns The timer name.
1506 *
1507 * @param pState The device state structure.
1508 * @param pTimer The timer to get the name for.
1509 */
1510DECLINLINE(const char *) e1kGetTimerName(E1KSTATE *pState, PTMTIMER pTimer)
1511{
1512 if (pTimer == pState->CTX_SUFF(pTIDTimer))
1513 return "TID";
1514 if (pTimer == pState->CTX_SUFF(pTADTimer))
1515 return "TAD";
1516 if (pTimer == pState->CTX_SUFF(pRIDTimer))
1517 return "RID";
1518 if (pTimer == pState->CTX_SUFF(pRADTimer))
1519 return "RAD";
1520 if (pTimer == pState->CTX_SUFF(pIntTimer))
1521 return "Int";
1522 if (pTimer == pState->CTX_SUFF(pTXDTimer))
1523 return "TXD";
1524 return "unknown";
1525}
1526
1527#endif /* DEBUG */
1528
1529/**
1530 * Arm a timer.
1531 *
1532 * @param pState Pointer to the device state structure.
1533 * @param pTimer Pointer to the timer.
1534 * @param uExpireIn Expiration interval in microseconds.
1535 */
1536DECLINLINE(void) e1kArmTimer(E1KSTATE *pState, PTMTIMER pTimer, uint32_t uExpireIn)
1537{
1538 if (pState->fLocked)
1539 return;
1540
1541 E1kLog2(("%s Arming %s timer to fire in %d usec...\n",
1542 INSTANCE(pState), e1kGetTimerName(pState, pTimer), uExpireIn));
1543 TMTimerSetMicro(pTimer, uExpireIn);
1544}
1545
1546/**
1547 * Cancel a timer.
1548 *
1549 * @param pState Pointer to the device state structure.
1550 * @param pTimer Pointer to the timer.
1551 */
1552DECLINLINE(void) e1kCancelTimer(E1KSTATE *pState, PTMTIMER pTimer)
1553{
1554 E1kLog2(("%s Stopping %s timer...\n",
1555 INSTANCE(pState), e1kGetTimerName(pState, pTimer)));
1556 int rc = TMTimerStop(pTimer);
1557 if (RT_FAILURE(rc))
1558 {
1559 E1kLog2(("%s e1kCancelTimer: TMTimerStop() failed with %Rrc\n",
1560 INSTANCE(pState), rc));
1561 }
1562}
1563
1564#define e1kCsEnter(ps, rc) PDMCritSectEnter(&ps->cs, rc)
1565#define e1kCsLeave(ps) PDMCritSectLeave(&ps->cs)
1566
1567#define e1kCsRxEnter(ps, rc) PDMCritSectEnter(&ps->csRx, rc)
1568#define e1kCsRxLeave(ps) PDMCritSectLeave(&ps->csRx)
1569#define e1kCsRxIsOwner(ps) PDMCritSectIsOwner(&ps->csRx)
1570
1571#ifndef E1K_WITH_TX_CS
1572# define e1kCsTxEnter(ps, rc) VINF_SUCCESS
1573# define e1kCsTxLeave(ps) do { } while (0)
1574#else /* E1K_WITH_TX_CS */
1575# define e1kCsTxEnter(ps, rc) PDMCritSectEnter(&ps->csTx, rc)
1576# define e1kCsTxLeave(ps) PDMCritSectLeave(&ps->csTx)
1577#endif /* E1K_WITH_TX_CS */
1578
1579#ifdef IN_RING3
1580
1581/**
1582 * Wakeup the RX thread.
1583 */
1584static void e1kWakeupReceive(PPDMDEVINS pDevIns)
1585{
1586 E1KSTATE *pState = PDMINS_2_DATA(pDevIns, E1KSTATE *);
1587 if ( pState->fMaybeOutOfSpace
1588 && pState->hEventMoreRxDescAvail != NIL_RTSEMEVENT)
1589 {
1590 STAM_COUNTER_INC(&pState->StatRxOverflowWakeup);
1591 E1kLog(("%s Waking up Out-of-RX-space semaphore\n", INSTANCE(pState)));
1592 RTSemEventSignal(pState->hEventMoreRxDescAvail);
1593 }
1594}
1595
1596/**
1597 * Hardware reset. Revert all registers to initial values.
1598 *
1599 * @param pState The device state structure.
1600 */
1601static void e1kHardReset(E1KSTATE *pState)
1602{
1603 E1kLog(("%s Hard reset triggered\n", INSTANCE(pState)));
1604 memset(pState->auRegs, 0, sizeof(pState->auRegs));
1605 memset(pState->aRecAddr.au32, 0, sizeof(pState->aRecAddr.au32));
1606#ifdef E1K_INIT_RA0
1607 memcpy(pState->aRecAddr.au32, pState->macConfigured.au8,
1608 sizeof(pState->macConfigured.au8));
1609 pState->aRecAddr.array[0].ctl |= RA_CTL_AV;
1610#endif /* E1K_INIT_RA0 */
1611 STATUS = 0x0081; /* SPEED=10b (1000 Mb/s), FD=1b (Full Duplex) */
1612 EECD = 0x0100; /* EE_PRES=1b (EEPROM present) */
1613 CTRL = 0x0a09; /* FRCSPD=1b SPEED=10b LRST=1b FD=1b */
1614 TSPMT = 0x01000400;/* TSMT=0400h TSPBP=0100h */
1615 Assert(GET_BITS(RCTL, BSIZE) == 0);
1616 pState->u16RxBSize = 2048;
1617
1618 /* Reset promiscuous mode */
1619 if (pState->pDrvR3)
1620 pState->pDrvR3->pfnSetPromiscuousMode(pState->pDrvR3, false);
1621
1622#ifdef E1K_WITH_TXD_CACHE
1623 int rc = e1kCsTxEnter(pState, VERR_SEM_BUSY);
1624 if (RT_LIKELY(rc == VINF_SUCCESS))
1625 {
1626 pState->nTxDFetched = 0;
1627 pState->iTxDCurrent = 0;
1628 pState->fGSO = false;
1629 pState->cbTxAlloc = 0;
1630 e1kCsTxLeave(pState);
1631 }
1632#endif /* E1K_WITH_TXD_CACHE */
1633#ifdef E1K_WITH_RXD_CACHE
1634 if (RT_LIKELY(e1kCsRxEnter(pState, VERR_SEM_BUSY) == VINF_SUCCESS))
1635 {
1636 pState->iRxDCurrent = pState->nRxDFetched = 0;
1637 e1kCsRxLeave(pState);
1638 }
1639#endif /* E1K_WITH_RXD_CACHE */
1640}
1641
1642#endif /* IN_RING3 */
1643
1644/**
1645 * Compute Internet checksum.
1646 *
1647 * @remarks Refer to http://www.netfor2.com/checksum.html for short intro.
1648 *
1649 * @param pState The device state structure.
1650 * @param cpPacket The packet.
1651 * @param cb The size of the packet.
1652 * @param cszText A string denoting direction of packet transfer.
1653 *
1654 * @return The 1's complement of the 1's complement sum.
1655 *
1656 * @thread E1000_TX
1657 */
1658static uint16_t e1kCSum16(const void *pvBuf, size_t cb)
1659{
1660 uint32_t csum = 0;
1661 uint16_t *pu16 = (uint16_t *)pvBuf;
1662
1663 while (cb > 1)
1664 {
1665 csum += *pu16++;
1666 cb -= 2;
1667 }
1668 if (cb)
1669 csum += *(uint8_t*)pu16;
1670 while (csum >> 16)
1671 csum = (csum >> 16) + (csum & 0xFFFF);
1672 return ~csum;
1673}
1674
1675/**
1676 * Dump a packet to debug log.
1677 *
1678 * @param pState The device state structure.
1679 * @param cpPacket The packet.
1680 * @param cb The size of the packet.
1681 * @param cszText A string denoting direction of packet transfer.
1682 * @thread E1000_TX
1683 */
1684DECLINLINE(void) e1kPacketDump(E1KSTATE* pState, const uint8_t *cpPacket, size_t cb, const char *cszText)
1685{
1686#ifdef DEBUG
1687 if (RT_LIKELY(e1kCsEnter(pState, VERR_SEM_BUSY) == VINF_SUCCESS))
1688 {
1689 E1kLog(("%s --- %s packet #%d: ---\n",
1690 INSTANCE(pState), cszText, ++pState->u32PktNo));
1691 E1kLog3(("%.*Rhxd\n", cb, cpPacket));
1692 e1kCsLeave(pState);
1693 }
1694#else
1695 if (RT_LIKELY(e1kCsEnter(pState, VERR_SEM_BUSY) == VINF_SUCCESS))
1696 {
1697 E1kLogRel(("E1000: %s packet #%d, seq=%x ack=%x\n", cszText, pState->u32PktNo++, ntohl(*(uint32_t*)(cpPacket+0x26)), ntohl(*(uint32_t*)(cpPacket+0x2A))));
1698 e1kCsLeave(pState);
1699 }
1700#endif
1701}
1702
1703/**
1704 * Determine the type of transmit descriptor.
1705 *
1706 * @returns Descriptor type. See E1K_DTYP_XXX defines.
1707 *
1708 * @param pDesc Pointer to descriptor union.
1709 * @thread E1000_TX
1710 */
1711DECLINLINE(int) e1kGetDescType(E1KTXDESC* pDesc)
1712{
1713 if (pDesc->legacy.cmd.fDEXT)
1714 return pDesc->context.dw2.u4DTYP;
1715 return E1K_DTYP_LEGACY;
1716}
1717
1718/**
1719 * Dump receive descriptor to debug log.
1720 *
1721 * @param pState The device state structure.
1722 * @param pDesc Pointer to the descriptor.
1723 * @thread E1000_RX
1724 */
1725static void e1kPrintRDesc(E1KSTATE* pState, E1KRXDESC* pDesc)
1726{
1727 E1kLog2(("%s <-- Receive Descriptor (%d bytes):\n", INSTANCE(pState), pDesc->u16Length));
1728 E1kLog2((" Address=%16LX Length=%04X Csum=%04X\n",
1729 pDesc->u64BufAddr, pDesc->u16Length, pDesc->u16Checksum));
1730 E1kLog2((" STA: %s %s %s %s %s %s %s ERR: %s %s %s %s SPECIAL: %s VLAN=%03x PRI=%x\n",
1731 pDesc->status.fPIF ? "PIF" : "pif",
1732 pDesc->status.fIPCS ? "IPCS" : "ipcs",
1733 pDesc->status.fTCPCS ? "TCPCS" : "tcpcs",
1734 pDesc->status.fVP ? "VP" : "vp",
1735 pDesc->status.fIXSM ? "IXSM" : "ixsm",
1736 pDesc->status.fEOP ? "EOP" : "eop",
1737 pDesc->status.fDD ? "DD" : "dd",
1738 pDesc->status.fRXE ? "RXE" : "rxe",
1739 pDesc->status.fIPE ? "IPE" : "ipe",
1740 pDesc->status.fTCPE ? "TCPE" : "tcpe",
1741 pDesc->status.fCE ? "CE" : "ce",
1742 E1K_SPEC_CFI(pDesc->status.u16Special) ? "CFI" :"cfi",
1743 E1K_SPEC_VLAN(pDesc->status.u16Special),
1744 E1K_SPEC_PRI(pDesc->status.u16Special)));
1745}
1746
1747/**
1748 * Dump transmit descriptor to debug log.
1749 *
1750 * @param pState The device state structure.
1751 * @param pDesc Pointer to descriptor union.
1752 * @param cszDir A string denoting direction of descriptor transfer
1753 * @thread E1000_TX
1754 */
1755static void e1kPrintTDesc(E1KSTATE* pState, E1KTXDESC* pDesc, const char* cszDir,
1756 unsigned uLevel = RTLOGGRPFLAGS_LEVEL_2)
1757{
1758 /*
1759 * Unfortunately we cannot use our format handler here, we want R0 logging
1760 * as well.
1761 */
1762 switch (e1kGetDescType(pDesc))
1763 {
1764 case E1K_DTYP_CONTEXT:
1765 E1kLogX(uLevel, ("%s %s Context Transmit Descriptor %s\n",
1766 INSTANCE(pState), cszDir, cszDir));
1767 E1kLogX(uLevel, (" IPCSS=%02X IPCSO=%02X IPCSE=%04X TUCSS=%02X TUCSO=%02X TUCSE=%04X\n",
1768 pDesc->context.ip.u8CSS, pDesc->context.ip.u8CSO, pDesc->context.ip.u16CSE,
1769 pDesc->context.tu.u8CSS, pDesc->context.tu.u8CSO, pDesc->context.tu.u16CSE));
1770 E1kLogX(uLevel, (" TUCMD:%s%s%s %s %s PAYLEN=%04x HDRLEN=%04x MSS=%04x STA: %s\n",
1771 pDesc->context.dw2.fIDE ? " IDE":"",
1772 pDesc->context.dw2.fRS ? " RS" :"",
1773 pDesc->context.dw2.fTSE ? " TSE":"",
1774 pDesc->context.dw2.fIP ? "IPv4":"IPv6",
1775 pDesc->context.dw2.fTCP ? "TCP":"UDP",
1776 pDesc->context.dw2.u20PAYLEN,
1777 pDesc->context.dw3.u8HDRLEN,
1778 pDesc->context.dw3.u16MSS,
1779 pDesc->context.dw3.fDD?"DD":""));
1780 break;
1781 case E1K_DTYP_DATA:
1782 E1kLogX(uLevel, ("%s %s Data Transmit Descriptor (%d bytes) %s\n",
1783 INSTANCE(pState), cszDir, pDesc->data.cmd.u20DTALEN, cszDir));
1784 E1kLogX(uLevel, (" Address=%16LX DTALEN=%05X\n",
1785 pDesc->data.u64BufAddr,
1786 pDesc->data.cmd.u20DTALEN));
1787 E1kLogX(uLevel, (" DCMD:%s%s%s%s%s%s%s STA:%s%s%s POPTS:%s%s SPECIAL:%s VLAN=%03x PRI=%x\n",
1788 pDesc->data.cmd.fIDE ? " IDE" :"",
1789 pDesc->data.cmd.fVLE ? " VLE" :"",
1790 pDesc->data.cmd.fRPS ? " RPS" :"",
1791 pDesc->data.cmd.fRS ? " RS" :"",
1792 pDesc->data.cmd.fTSE ? " TSE" :"",
1793 pDesc->data.cmd.fIFCS? " IFCS":"",
1794 pDesc->data.cmd.fEOP ? " EOP" :"",
1795 pDesc->data.dw3.fDD ? " DD" :"",
1796 pDesc->data.dw3.fEC ? " EC" :"",
1797 pDesc->data.dw3.fLC ? " LC" :"",
1798 pDesc->data.dw3.fTXSM? " TXSM":"",
1799 pDesc->data.dw3.fIXSM? " IXSM":"",
1800 E1K_SPEC_CFI(pDesc->data.dw3.u16Special) ? "CFI" :"cfi",
1801 E1K_SPEC_VLAN(pDesc->data.dw3.u16Special),
1802 E1K_SPEC_PRI(pDesc->data.dw3.u16Special)));
1803 break;
1804 case E1K_DTYP_LEGACY:
1805 E1kLogX(uLevel, ("%s %s Legacy Transmit Descriptor (%d bytes) %s\n",
1806 INSTANCE(pState), cszDir, pDesc->legacy.cmd.u16Length, cszDir));
1807 E1kLogX(uLevel, (" Address=%16LX DTALEN=%05X\n",
1808 pDesc->data.u64BufAddr,
1809 pDesc->legacy.cmd.u16Length));
1810 E1kLogX(uLevel, (" CMD:%s%s%s%s%s%s%s STA:%s%s%s CSO=%02x CSS=%02x SPECIAL:%s VLAN=%03x PRI=%x\n",
1811 pDesc->legacy.cmd.fIDE ? " IDE" :"",
1812 pDesc->legacy.cmd.fVLE ? " VLE" :"",
1813 pDesc->legacy.cmd.fRPS ? " RPS" :"",
1814 pDesc->legacy.cmd.fRS ? " RS" :"",
1815 pDesc->legacy.cmd.fIC ? " IC" :"",
1816 pDesc->legacy.cmd.fIFCS? " IFCS":"",
1817 pDesc->legacy.cmd.fEOP ? " EOP" :"",
1818 pDesc->legacy.dw3.fDD ? " DD" :"",
1819 pDesc->legacy.dw3.fEC ? " EC" :"",
1820 pDesc->legacy.dw3.fLC ? " LC" :"",
1821 pDesc->legacy.cmd.u8CSO,
1822 pDesc->legacy.dw3.u8CSS,
1823 E1K_SPEC_CFI(pDesc->legacy.dw3.u16Special) ? "CFI" :"cfi",
1824 E1K_SPEC_VLAN(pDesc->legacy.dw3.u16Special),
1825 E1K_SPEC_PRI(pDesc->legacy.dw3.u16Special)));
1826 break;
1827 default:
1828 E1kLog(("%s %s Invalid Transmit Descriptor %s\n",
1829 INSTANCE(pState), cszDir, cszDir));
1830 break;
1831 }
1832}
1833
1834/**
1835 * Raise interrupt if not masked.
1836 *
1837 * @param pState The device state structure.
1838 */
1839static int e1kRaiseInterrupt(E1KSTATE *pState, int rcBusy, uint32_t u32IntCause = 0)
1840{
1841 int rc = e1kCsEnter(pState, rcBusy);
1842 if (RT_UNLIKELY(rc != VINF_SUCCESS))
1843 return rc;
1844
1845 E1K_INC_ISTAT_CNT(pState->uStatIntTry);
1846 ICR |= u32IntCause;
1847 if (ICR & IMS)
1848 {
1849#if 0
1850 if (pState->fDelayInts)
1851 {
1852 E1K_INC_ISTAT_CNT(pState->uStatIntDly);
1853 pState->iStatIntLostOne = 1;
1854 E1kLog2(("%s e1kRaiseInterrupt: Delayed. ICR=%08x\n",
1855 INSTANCE(pState), ICR));
1856#define E1K_LOST_IRQ_THRSLD 20
1857//#define E1K_LOST_IRQ_THRSLD 200000000
1858 if (pState->iStatIntLost >= E1K_LOST_IRQ_THRSLD)
1859 {
1860 E1kLog2(("%s WARNING! Disabling delayed interrupt logic: delayed=%d, delivered=%d\n",
1861 INSTANCE(pState), pState->uStatIntDly, pState->uStatIntLate));
1862 pState->fIntMaskUsed = false;
1863 pState->uStatDisDly++;
1864 }
1865 }
1866 else
1867#endif
1868 if (pState->fIntRaised)
1869 {
1870 E1K_INC_ISTAT_CNT(pState->uStatIntSkip);
1871 E1kLog2(("%s e1kRaiseInterrupt: Already raised, skipped. ICR&IMS=%08x\n",
1872 INSTANCE(pState), ICR & IMS));
1873 }
1874 else
1875 {
1876#ifdef E1K_ITR_ENABLED
1877 uint64_t tstamp = TMTimerGet(pState->CTX_SUFF(pIntTimer));
1878 /* interrupts/sec = 1 / (256 * 10E-9 * ITR) */
1879 E1kLog2(("%s e1kRaiseInterrupt: tstamp - pState->u64AckedAt = %d, ITR * 256 = %d\n",
1880 INSTANCE(pState), (uint32_t)(tstamp - pState->u64AckedAt), ITR * 256));
1881 //if (!!ITR && pState->fIntMaskUsed && tstamp - pState->u64AckedAt < ITR * 256)
1882 if (!!ITR && tstamp - pState->u64AckedAt < ITR * 256 && !(ICR & ICR_RXT0))
1883 {
1884 E1K_INC_ISTAT_CNT(pState->uStatIntEarly);
1885 E1kLog2(("%s e1kRaiseInterrupt: Too early to raise again: %d ns < %d ns.\n",
1886 INSTANCE(pState), (uint32_t)(tstamp - pState->u64AckedAt), ITR * 256));
1887 }
1888 else
1889#endif
1890 {
1891
1892 /* Since we are delivering the interrupt now
1893 * there is no need to do it later -- stop the timer.
1894 */
1895 TMTimerStop(pState->CTX_SUFF(pIntTimer));
1896 E1K_INC_ISTAT_CNT(pState->uStatInt);
1897 STAM_COUNTER_INC(&pState->StatIntsRaised);
1898 /* Got at least one unmasked interrupt cause */
1899 pState->fIntRaised = true;
1900 /* Raise(1) INTA(0) */
1901 E1kLogRel(("E1000: irq RAISED icr&mask=0x%x, icr=0x%x\n", ICR & IMS, ICR));
1902 PDMDevHlpPCISetIrq(pState->CTX_SUFF(pDevIns), 0, 1);
1903 E1kLog(("%s e1kRaiseInterrupt: Raised. ICR&IMS=%08x\n",
1904 INSTANCE(pState), ICR & IMS));
1905 }
1906 }
1907 }
1908 else
1909 {
1910 E1K_INC_ISTAT_CNT(pState->uStatIntMasked);
1911 E1kLog2(("%s e1kRaiseInterrupt: Not raising, ICR=%08x, IMS=%08x\n",
1912 INSTANCE(pState), ICR, IMS));
1913 }
1914 e1kCsLeave(pState);
1915 return VINF_SUCCESS;
1916}
1917
1918/**
1919 * Compute the physical address of the descriptor.
1920 *
1921 * @returns the physical address of the descriptor.
1922 *
1923 * @param baseHigh High-order 32 bits of descriptor table address.
1924 * @param baseLow Low-order 32 bits of descriptor table address.
1925 * @param idxDesc The descriptor index in the table.
1926 */
1927DECLINLINE(RTGCPHYS) e1kDescAddr(uint32_t baseHigh, uint32_t baseLow, uint32_t idxDesc)
1928{
1929 AssertCompile(sizeof(E1KRXDESC) == sizeof(E1KTXDESC));
1930 return ((uint64_t)baseHigh << 32) + baseLow + idxDesc * sizeof(E1KRXDESC);
1931}
1932
1933/**
1934 * Advance the head pointer of the receive descriptor queue.
1935 *
1936 * @remarks RDH always points to the next available RX descriptor.
1937 *
1938 * @param pState The device state structure.
1939 */
1940DECLINLINE(void) e1kAdvanceRDH(E1KSTATE *pState)
1941{
1942 Assert(e1kCsRxIsOwner(pState));
1943 //e1kCsEnter(pState, RT_SRC_POS);
1944 if (++RDH * sizeof(E1KRXDESC) >= RDLEN)
1945 RDH = 0;
1946 /*
1947 * Compute current receive queue length and fire RXDMT0 interrupt
1948 * if we are low on receive buffers
1949 */
1950 uint32_t uRQueueLen = RDH>RDT ? RDLEN/sizeof(E1KRXDESC)-RDH+RDT : RDT-RDH;
1951 /*
1952 * The minimum threshold is controlled by RDMTS bits of RCTL:
1953 * 00 = 1/2 of RDLEN
1954 * 01 = 1/4 of RDLEN
1955 * 10 = 1/8 of RDLEN
1956 * 11 = reserved
1957 */
1958 uint32_t uMinRQThreshold = RDLEN / sizeof(E1KRXDESC) / (2 << GET_BITS(RCTL, RDMTS));
1959 if (uRQueueLen <= uMinRQThreshold)
1960 {
1961 E1kLogRel(("E1000: low on RX descriptors, RDH=%x RDT=%x len=%x threshold=%x\n", RDH, RDT, uRQueueLen, uMinRQThreshold));
1962 E1kLog2(("%s Low on RX descriptors, RDH=%x RDT=%x len=%x threshold=%x, raise an interrupt\n",
1963 INSTANCE(pState), RDH, RDT, uRQueueLen, uMinRQThreshold));
1964 E1K_INC_ISTAT_CNT(pState->uStatIntRXDMT0);
1965 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, ICR_RXDMT0);
1966 }
1967 E1kLog2(("%s e1kAdvanceRDH: at exit RDH=%x RDT=%x len=%x\n",
1968 INSTANCE(pState), RDH, RDT, uRQueueLen));
1969 //e1kCsLeave(pState);
1970}
1971
1972#ifdef E1K_WITH_RXD_CACHE
1973/**
1974 * Return the number of RX descriptor that belong to the hardware.
1975 *
1976 * @returns the number of available descriptors in RX ring.
1977 * @param pState The device state structure.
1978 * @thread ???
1979 */
1980DECLINLINE(uint32_t) e1kGetRxLen(E1KSTATE* pState)
1981{
1982 /**
1983 * Make sure RDT won't change during computation. EMT may modify RDT at
1984 * any moment.
1985 */
1986 uint32_t rdt = RDT;
1987 return (RDH > rdt ? RDLEN/sizeof(E1KRXDESC) : 0) + rdt - RDH;
1988}
1989
1990DECLINLINE(unsigned) e1kRxDInCache(E1KSTATE* pState)
1991{
1992 return pState->nRxDFetched > pState->iRxDCurrent ?
1993 pState->nRxDFetched - pState->iRxDCurrent : 0;
1994}
1995
1996DECLINLINE(unsigned) e1kRxDIsCacheEmpty(E1KSTATE* pState)
1997{
1998 return pState->iRxDCurrent >= pState->nRxDFetched;
1999}
2000
2001/**
2002 * Load receive descriptors from guest memory. The caller needs to be in Rx
2003 * critical section.
2004 *
2005 * We need two physical reads in case the tail wrapped around the end of RX
2006 * descriptor ring.
2007 *
2008 * @returns the actual number of descriptors fetched.
2009 * @param pState The device state structure.
2010 * @param pDesc Pointer to descriptor union.
2011 * @param addr Physical address in guest context.
2012 * @thread EMT, RX
2013 */
2014DECLINLINE(unsigned) e1kRxDPrefetch(E1KSTATE* pState)
2015{
2016 /* We've already loaded pState->nRxDFetched descriptors past RDH. */
2017 unsigned nDescsAvailable = e1kGetRxLen(pState) - e1kRxDInCache(pState);
2018 unsigned nDescsToFetch = RT_MIN(nDescsAvailable, E1K_RXD_CACHE_SIZE - pState->nRxDFetched);
2019 unsigned nDescsTotal = RDLEN / sizeof(E1KRXDESC);
2020 Assert(nDescsTotal != 0);
2021 if (nDescsTotal == 0)
2022 return 0;
2023 unsigned nFirstNotLoaded = (RDH + e1kRxDInCache(pState)) % nDescsTotal;
2024 unsigned nDescsInSingleRead = RT_MIN(nDescsToFetch, nDescsTotal - nFirstNotLoaded);
2025 E1kLog3(("%s e1kRxDPrefetch: nDescsAvailable=%u nDescsToFetch=%u "
2026 "nDescsTotal=%u nFirstNotLoaded=0x%x nDescsInSingleRead=%u\n",
2027 INSTANCE(pState), nDescsAvailable, nDescsToFetch, nDescsTotal,
2028 nFirstNotLoaded, nDescsInSingleRead));
2029 if (nDescsToFetch == 0)
2030 return 0;
2031 E1KRXDESC* pFirstEmptyDesc = &pState->aRxDescriptors[pState->nRxDFetched];
2032 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns),
2033 ((uint64_t)RDBAH << 32) + RDBAL + nFirstNotLoaded * sizeof(E1KRXDESC),
2034 pFirstEmptyDesc, nDescsInSingleRead * sizeof(E1KRXDESC));
2035 // uint64_t addrBase = ((uint64_t)RDBAH << 32) + RDBAL;
2036 // unsigned i, j;
2037 // for (i = pState->nRxDFetched; i < pState->nRxDFetched + nDescsInSingleRead; ++i)
2038 // {
2039 // pState->aRxDescAddr[i] = addrBase + (nFirstNotLoaded + i - pState->nRxDFetched) * sizeof(E1KRXDESC);
2040 // E1kLog3(("%s aRxDescAddr[%d] = %p\n", INSTANCE(pState), i, pState->aRxDescAddr[i]));
2041 // }
2042 E1kLog3(("%s Fetched %u RX descriptors at %08x%08x(0x%x), RDLEN=%08x, RDH=%08x, RDT=%08x\n",
2043 INSTANCE(pState), nDescsInSingleRead,
2044 RDBAH, RDBAL + RDH * sizeof(E1KRXDESC),
2045 nFirstNotLoaded, RDLEN, RDH, RDT));
2046 if (nDescsToFetch > nDescsInSingleRead)
2047 {
2048 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns),
2049 ((uint64_t)RDBAH << 32) + RDBAL,
2050 pFirstEmptyDesc + nDescsInSingleRead,
2051 (nDescsToFetch - nDescsInSingleRead) * sizeof(E1KRXDESC));
2052 // Assert(i == pState->nRxDFetched + nDescsInSingleRead);
2053 // for (j = 0; i < pState->nRxDFetched + nDescsToFetch; ++i, ++j)
2054 // {
2055 // pState->aRxDescAddr[i] = addrBase + j * sizeof(E1KRXDESC);
2056 // E1kLog3(("%s aRxDescAddr[%d] = %p\n", INSTANCE(pState), i, pState->aRxDescAddr[i]));
2057 // }
2058 E1kLog3(("%s Fetched %u RX descriptors at %08x%08x\n",
2059 INSTANCE(pState), nDescsToFetch - nDescsInSingleRead,
2060 RDBAH, RDBAL));
2061 }
2062 pState->nRxDFetched += nDescsToFetch;
2063 return nDescsToFetch;
2064}
2065
2066/**
2067 * Obtain the next RX descriptor from RXD cache, fetching descriptors from the
2068 * RX ring if the cache is empty.
2069 *
2070 * Note that we cannot advance the cache pointer (iRxDCurrent) yet as it will
2071 * go out of sync with RDH which will cause trouble when EMT checks if the
2072 * cache is empty to do pre-fetch @bugref(6217).
2073 *
2074 * @param pState The device state structure.
2075 * @thread RX
2076 */
2077DECLINLINE(E1KRXDESC*) e1kRxDGet(E1KSTATE* pState)
2078{
2079 Assert(e1kCsRxIsOwner(pState));
2080 /* Check the cache first. */
2081 if (pState->iRxDCurrent < pState->nRxDFetched)
2082 return &pState->aRxDescriptors[pState->iRxDCurrent];
2083 /* Cache is empty, reset it and check if we can fetch more. */
2084 pState->iRxDCurrent = pState->nRxDFetched = 0;
2085 if (e1kRxDPrefetch(pState))
2086 return &pState->aRxDescriptors[pState->iRxDCurrent];
2087 /* Out of Rx descriptors. */
2088 return NULL;
2089}
2090
2091/**
2092 * Return the RX descriptor obtained with e1kRxDGet() and advance the cache
2093 * pointer. The descriptor gets written back to the RXD ring.
2094 *
2095 * @param pState The device state structure.
2096 * @param pDesc The descriptor being "returned" to the RX ring.
2097 * @thread RX
2098 */
2099DECLINLINE(void) e1kRxDPut(E1KSTATE* pState, E1KRXDESC* pDesc)
2100{
2101 Assert(e1kCsRxIsOwner(pState));
2102 pState->iRxDCurrent++;
2103 // Assert(pDesc >= pState->aRxDescriptors);
2104 // Assert(pDesc < pState->aRxDescriptors + E1K_RXD_CACHE_SIZE);
2105 // uint64_t addr = e1kDescAddr(RDBAH, RDBAL, RDH);
2106 // uint32_t rdh = RDH;
2107 // Assert(pState->aRxDescAddr[pDesc - pState->aRxDescriptors] == addr);
2108 PDMDevHlpPhysWrite(pState->CTX_SUFF(pDevIns),
2109 e1kDescAddr(RDBAH, RDBAL, RDH),
2110 pDesc, sizeof(E1KRXDESC));
2111 e1kAdvanceRDH(pState);
2112 e1kPrintRDesc(pState, pDesc);
2113}
2114
2115/**
2116 * Store a fragment of received packet at the specifed address.
2117 *
2118 * @param pState The device state structure.
2119 * @param pDesc The next available RX descriptor.
2120 * @param pvBuf The fragment.
2121 * @param cb The size of the fragment.
2122 */
2123static DECLCALLBACK(void) e1kStoreRxFragment(E1KSTATE *pState, E1KRXDESC *pDesc, const void *pvBuf, size_t cb)
2124{
2125 STAM_PROFILE_ADV_START(&pState->StatReceiveStore, a);
2126 E1kLog2(("%s e1kStoreRxFragment: store fragment of %04X at %016LX, EOP=%d\n",
2127 INSTANCE(pState), cb, pDesc->u64BufAddr, pDesc->status.fEOP));
2128 PDMDevHlpPhysWrite(pState->CTX_SUFF(pDevIns), pDesc->u64BufAddr, pvBuf, cb);
2129 pDesc->u16Length = (uint16_t)cb; Assert(pDesc->u16Length == cb);
2130 STAM_PROFILE_ADV_STOP(&pState->StatReceiveStore, a);
2131}
2132
2133#else /* !E1K_WITH_RXD_CACHE */
2134
2135/**
2136 * Store a fragment of received packet that fits into the next available RX
2137 * buffer.
2138 *
2139 * @remarks Trigger the RXT0 interrupt if it is the last fragment of the packet.
2140 *
2141 * @param pState The device state structure.
2142 * @param pDesc The next available RX descriptor.
2143 * @param pvBuf The fragment.
2144 * @param cb The size of the fragment.
2145 */
2146static DECLCALLBACK(void) e1kStoreRxFragment(E1KSTATE *pState, E1KRXDESC *pDesc, const void *pvBuf, size_t cb)
2147{
2148 STAM_PROFILE_ADV_START(&pState->StatReceiveStore, a);
2149 E1kLog2(("%s e1kStoreRxFragment: store fragment of %04X at %016LX, EOP=%d\n", pState->szInstance, cb, pDesc->u64BufAddr, pDesc->status.fEOP));
2150 PDMDevHlpPhysWrite(pState->CTX_SUFF(pDevIns), pDesc->u64BufAddr, pvBuf, cb);
2151 pDesc->u16Length = (uint16_t)cb; Assert(pDesc->u16Length == cb);
2152 /* Write back the descriptor */
2153 PDMDevHlpPhysWrite(pState->CTX_SUFF(pDevIns), e1kDescAddr(RDBAH, RDBAL, RDH), pDesc, sizeof(E1KRXDESC));
2154 e1kPrintRDesc(pState, pDesc);
2155 E1kLogRel(("E1000: Wrote back RX desc, RDH=%x\n", RDH));
2156 /* Advance head */
2157 e1kAdvanceRDH(pState);
2158 //E1kLog2(("%s e1kStoreRxFragment: EOP=%d RDTR=%08X RADV=%08X\n", INSTANCE(pState), pDesc->fEOP, RDTR, RADV));
2159 if (pDesc->status.fEOP)
2160 {
2161 /* Complete packet has been stored -- it is time to let the guest know. */
2162#ifdef E1K_USE_RX_TIMERS
2163 if (RDTR)
2164 {
2165 /* Arm the timer to fire in RDTR usec (discard .024) */
2166 e1kArmTimer(pState, pState->CTX_SUFF(pRIDTimer), RDTR);
2167 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
2168 if (RADV != 0 && !TMTimerIsActive(pState->CTX_SUFF(pRADTimer)))
2169 e1kArmTimer(pState, pState->CTX_SUFF(pRADTimer), RADV);
2170 }
2171 else
2172 {
2173#endif
2174 /* 0 delay means immediate interrupt */
2175 E1K_INC_ISTAT_CNT(pState->uStatIntRx);
2176 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, ICR_RXT0);
2177#ifdef E1K_USE_RX_TIMERS
2178 }
2179#endif
2180 }
2181 STAM_PROFILE_ADV_STOP(&pState->StatReceiveStore, a);
2182}
2183#endif /* !E1K_WITH_RXD_CACHE */
2184
2185/**
2186 * Returns true if it is a broadcast packet.
2187 *
2188 * @returns true if destination address indicates broadcast.
2189 * @param pvBuf The ethernet packet.
2190 */
2191DECLINLINE(bool) e1kIsBroadcast(const void *pvBuf)
2192{
2193 static const uint8_t s_abBcastAddr[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
2194 return memcmp(pvBuf, s_abBcastAddr, sizeof(s_abBcastAddr)) == 0;
2195}
2196
2197/**
2198 * Returns true if it is a multicast packet.
2199 *
2200 * @remarks returns true for broadcast packets as well.
2201 * @returns true if destination address indicates multicast.
2202 * @param pvBuf The ethernet packet.
2203 */
2204DECLINLINE(bool) e1kIsMulticast(const void *pvBuf)
2205{
2206 return (*(char*)pvBuf) & 1;
2207}
2208
2209/**
2210 * Set IXSM, IPCS and TCPCS flags according to the packet type.
2211 *
2212 * @remarks We emulate checksum offloading for major packets types only.
2213 *
2214 * @returns VBox status code.
2215 * @param pState The device state structure.
2216 * @param pFrame The available data.
2217 * @param cb Number of bytes available in the buffer.
2218 * @param status Bit fields containing status info.
2219 */
2220static int e1kRxChecksumOffload(E1KSTATE* pState, const uint8_t *pFrame, size_t cb, E1KRXDST *pStatus)
2221{
2222 /** @todo
2223 * It is not safe to bypass checksum verification for packets coming
2224 * from real wire. We currently unable to tell where packets are
2225 * coming from so we tell the driver to ignore our checksum flags
2226 * and do verification in software.
2227 */
2228#if 0
2229 uint16_t uEtherType = ntohs(*(uint16_t*)(pFrame + 12));
2230
2231 E1kLog2(("%s e1kRxChecksumOffload: EtherType=%x\n", INSTANCE(pState), uEtherType));
2232
2233 switch (uEtherType)
2234 {
2235 case 0x800: /* IPv4 */
2236 {
2237 pStatus->fIXSM = false;
2238 pStatus->fIPCS = true;
2239 PRTNETIPV4 pIpHdr4 = (PRTNETIPV4)(pFrame + 14);
2240 /* TCP/UDP checksum offloading works with TCP and UDP only */
2241 pStatus->fTCPCS = pIpHdr4->ip_p == 6 || pIpHdr4->ip_p == 17;
2242 break;
2243 }
2244 case 0x86DD: /* IPv6 */
2245 pStatus->fIXSM = false;
2246 pStatus->fIPCS = false;
2247 pStatus->fTCPCS = true;
2248 break;
2249 default: /* ARP, VLAN, etc. */
2250 pStatus->fIXSM = true;
2251 break;
2252 }
2253#else
2254 pStatus->fIXSM = true;
2255#endif
2256 return VINF_SUCCESS;
2257}
2258
2259/**
2260 * Pad and store received packet.
2261 *
2262 * @remarks Make sure that the packet appears to upper layer as one coming
2263 * from real Ethernet: pad it and insert FCS.
2264 *
2265 * @returns VBox status code.
2266 * @param pState The device state structure.
2267 * @param pvBuf The available data.
2268 * @param cb Number of bytes available in the buffer.
2269 * @param status Bit fields containing status info.
2270 */
2271static int e1kHandleRxPacket(E1KSTATE* pState, const void *pvBuf, size_t cb, E1KRXDST status)
2272{
2273#if defined(IN_RING3) /** @todo Remove this extra copying, it's gonna make us run out of kernel / hypervisor stack! */
2274 uint8_t rxPacket[E1K_MAX_RX_PKT_SIZE];
2275 uint8_t *ptr = rxPacket;
2276
2277 int rc = e1kCsRxEnter(pState, VERR_SEM_BUSY);
2278 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2279 return rc;
2280
2281 if (cb > 70) /* unqualified guess */
2282 pState->led.Asserted.s.fReading = pState->led.Actual.s.fReading = 1;
2283
2284 Assert(cb <= E1K_MAX_RX_PKT_SIZE);
2285 Assert(cb > 16);
2286 size_t cbMax = ((RCTL & RCTL_LPE) ? E1K_MAX_RX_PKT_SIZE - 4 : 1518) - (status.fVP ? 0 : 4);
2287 E1kLog3(("%s Max RX packet size is %u\n", INSTANCE(pState), cbMax));
2288 if (status.fVP)
2289 {
2290 /* VLAN packet -- strip VLAN tag in VLAN mode */
2291 if ((CTRL & CTRL_VME) && cb > 16)
2292 {
2293 uint16_t *u16Ptr = (uint16_t*)pvBuf;
2294 memcpy(rxPacket, pvBuf, 12); /* Copy src and dst addresses */
2295 status.u16Special = RT_BE2H_U16(u16Ptr[7]); /* Extract VLAN tag */
2296 memcpy(rxPacket + 12, (uint8_t*)pvBuf + 16, cb - 16); /* Copy the rest of the packet */
2297 cb -= 4;
2298 E1kLog3(("%s Stripped tag for VLAN %u (cb=%u)\n",
2299 INSTANCE(pState), status.u16Special, cb));
2300 }
2301 else
2302 status.fVP = false; /* Set VP only if we stripped the tag */
2303 }
2304 else
2305 memcpy(rxPacket, pvBuf, cb);
2306 /* Pad short packets */
2307 if (cb < 60)
2308 {
2309 memset(rxPacket + cb, 0, 60 - cb);
2310 cb = 60;
2311 }
2312 if (!(RCTL & RCTL_SECRC) && cb <= cbMax)
2313 {
2314 STAM_PROFILE_ADV_START(&pState->StatReceiveCRC, a);
2315 /*
2316 * Add FCS if CRC stripping is not enabled. Since the value of CRC
2317 * is ignored by most of drivers we may as well save us the trouble
2318 * of calculating it (see EthernetCRC CFGM parameter).
2319 */
2320 if (pState->fEthernetCRC)
2321 *(uint32_t*)(rxPacket + cb) = RTCrc32(rxPacket, cb);
2322 cb += sizeof(uint32_t);
2323 STAM_PROFILE_ADV_STOP(&pState->StatReceiveCRC, a);
2324 E1kLog3(("%s Added FCS (cb=%u)\n", INSTANCE(pState), cb));
2325 }
2326 /* Compute checksum of complete packet */
2327 uint16_t checksum = e1kCSum16(rxPacket + GET_BITS(RXCSUM, PCSS), cb);
2328 e1kRxChecksumOffload(pState, rxPacket, cb, &status);
2329
2330 /* Update stats */
2331 E1K_INC_CNT32(GPRC);
2332 if (e1kIsBroadcast(pvBuf))
2333 E1K_INC_CNT32(BPRC);
2334 else if (e1kIsMulticast(pvBuf))
2335 E1K_INC_CNT32(MPRC);
2336 /* Update octet receive counter */
2337 E1K_ADD_CNT64(GORCL, GORCH, cb);
2338 STAM_REL_COUNTER_ADD(&pState->StatReceiveBytes, cb);
2339 if (cb == 64)
2340 E1K_INC_CNT32(PRC64);
2341 else if (cb < 128)
2342 E1K_INC_CNT32(PRC127);
2343 else if (cb < 256)
2344 E1K_INC_CNT32(PRC255);
2345 else if (cb < 512)
2346 E1K_INC_CNT32(PRC511);
2347 else if (cb < 1024)
2348 E1K_INC_CNT32(PRC1023);
2349 else
2350 E1K_INC_CNT32(PRC1522);
2351
2352 E1K_INC_ISTAT_CNT(pState->uStatRxFrm);
2353
2354#ifdef E1K_WITH_RXD_CACHE
2355 while (cb > 0)
2356 {
2357 E1KRXDESC *pDesc = e1kRxDGet(pState);
2358
2359 if (pDesc == NULL)
2360 {
2361 E1kLog(("%s Out of receive buffers, dropping the packet "
2362 "(cb=%u, in_cache=%u, RDH=%x RDT=%x)\n",
2363 INSTANCE(pState), cb, e1kRxDInCache(pState), RDH, RDT));
2364 break;
2365 }
2366#else /* !E1K_WITH_RXD_CACHE */
2367 if (RDH == RDT)
2368 {
2369 E1kLog(("%s Out of receive buffers, dropping the packet\n",
2370 INSTANCE(pState)));
2371 }
2372 /* Store the packet to receive buffers */
2373 while (RDH != RDT)
2374 {
2375 /* Load the descriptor pointed by head */
2376 E1KRXDESC desc, *pDesc = &desc;
2377 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns), e1kDescAddr(RDBAH, RDBAL, RDH),
2378 &desc, sizeof(desc));
2379#endif /* !E1K_WITH_RXD_CACHE */
2380 if (pDesc->u64BufAddr)
2381 {
2382 /* Update descriptor */
2383 pDesc->status = status;
2384 pDesc->u16Checksum = checksum;
2385 pDesc->status.fDD = true;
2386
2387 /*
2388 * We need to leave Rx critical section here or we risk deadlocking
2389 * with EMT in e1kRegWriteRDT when the write is to an unallocated
2390 * page or has an access handler associated with it.
2391 * Note that it is safe to leave the critical section here since
2392 * e1kRegWriteRDT() never modifies RDH. It never touches already
2393 * fetched RxD cache entries either.
2394 */
2395 if (cb > pState->u16RxBSize)
2396 {
2397 pDesc->status.fEOP = false;
2398 e1kCsRxLeave(pState);
2399 e1kStoreRxFragment(pState, pDesc, ptr, pState->u16RxBSize);
2400 rc = e1kCsRxEnter(pState, VERR_SEM_BUSY);
2401 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2402 return rc;
2403 ptr += pState->u16RxBSize;
2404 cb -= pState->u16RxBSize;
2405 }
2406 else
2407 {
2408 pDesc->status.fEOP = true;
2409 e1kCsRxLeave(pState);
2410 e1kStoreRxFragment(pState, pDesc, ptr, cb);
2411#ifdef E1K_WITH_RXD_CACHE
2412 rc = e1kCsRxEnter(pState, VERR_SEM_BUSY);
2413 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2414 return rc;
2415 cb = 0;
2416#else /* !E1K_WITH_RXD_CACHE */
2417 pState->led.Actual.s.fReading = 0;
2418 return VINF_SUCCESS;
2419#endif /* !E1K_WITH_RXD_CACHE */
2420 }
2421 /*
2422 * Note: RDH is advanced by e1kStoreRxFragment if E1K_WITH_RXD_CACHE
2423 * is not defined.
2424 */
2425 }
2426#ifndef E1K_WITH_RXD_CACHE
2427 else
2428 {
2429#endif /* !E1K_WITH_RXD_CACHE */
2430 /* Write back the descriptor. */
2431 pDesc->status.fDD = true;
2432 e1kRxDPut(pState, pDesc);
2433#ifndef E1K_WITH_RXD_CACHE
2434 }
2435#endif /* !E1K_WITH_RXD_CACHE */
2436 }
2437
2438 if (cb > 0)
2439 E1kLog(("%s Out of receive buffers, dropping %u bytes", INSTANCE(pState), cb));
2440
2441 pState->led.Actual.s.fReading = 0;
2442
2443 e1kCsRxLeave(pState);
2444#ifdef E1K_WITH_RXD_CACHE
2445 /* Complete packet has been stored -- it is time to let the guest know. */
2446# ifdef E1K_USE_RX_TIMERS
2447 if (RDTR)
2448 {
2449 /* Arm the timer to fire in RDTR usec (discard .024) */
2450 e1kArmTimer(pState, pState->CTX_SUFF(pRIDTimer), RDTR);
2451 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
2452 if (RADV != 0 && !TMTimerIsActive(pState->CTX_SUFF(pRADTimer)))
2453 e1kArmTimer(pState, pState->CTX_SUFF(pRADTimer), RADV);
2454 }
2455 else
2456 {
2457# endif /* E1K_USE_RX_TIMERS */
2458 /* 0 delay means immediate interrupt */
2459 E1K_INC_ISTAT_CNT(pState->uStatIntRx);
2460 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, ICR_RXT0);
2461# ifdef E1K_USE_RX_TIMERS
2462 }
2463# endif /* E1K_USE_RX_TIMERS */
2464#endif /* E1K_WITH_RXD_CACHE */
2465
2466 return VINF_SUCCESS;
2467#else
2468 return VERR_INTERNAL_ERROR_2;
2469#endif
2470}
2471
2472
2473/**
2474 * Bring the link up after the configured delay, 5 seconds by default.
2475 *
2476 * @param pState The device state structure.
2477 * @thread any
2478 */
2479DECLINLINE(void) e1kBringLinkUpDelayed(E1KSTATE* pState)
2480{
2481 E1kLog(("%s Will bring up the link in %d seconds...\n",
2482 INSTANCE(pState), pState->cMsLinkUpDelay / 1000));
2483 e1kArmTimer(pState, pState->CTX_SUFF(pLUTimer), pState->cMsLinkUpDelay * 1000);
2484}
2485
2486#if 0 /* unused */
2487/**
2488 * Read handler for Device Status register.
2489 *
2490 * Get the link status from PHY.
2491 *
2492 * @returns VBox status code.
2493 *
2494 * @param pState The device state structure.
2495 * @param offset Register offset in memory-mapped frame.
2496 * @param index Register index in register array.
2497 * @param mask Used to implement partial reads (8 and 16-bit).
2498 */
2499static int e1kRegReadCTRL(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value)
2500{
2501 E1kLog(("%s e1kRegReadCTRL: mdio dir=%s mdc dir=%s mdc=%d\n",
2502 INSTANCE(pState), (CTRL & CTRL_MDIO_DIR)?"OUT":"IN ",
2503 (CTRL & CTRL_MDC_DIR)?"OUT":"IN ", !!(CTRL & CTRL_MDC)));
2504 if ((CTRL & CTRL_MDIO_DIR) == 0 && (CTRL & CTRL_MDC))
2505 {
2506 /* MDC is high and MDIO pin is used for input, read MDIO pin from PHY */
2507 if (Phy::readMDIO(&pState->phy))
2508 *pu32Value = CTRL | CTRL_MDIO;
2509 else
2510 *pu32Value = CTRL & ~CTRL_MDIO;
2511 E1kLog(("%s e1kRegReadCTRL: Phy::readMDIO(%d)\n",
2512 INSTANCE(pState), !!(*pu32Value & CTRL_MDIO)));
2513 }
2514 else
2515 {
2516 /* MDIO pin is used for output, ignore it */
2517 *pu32Value = CTRL;
2518 }
2519 return VINF_SUCCESS;
2520}
2521#endif /* unused */
2522
2523/**
2524 * Write handler for Device Control register.
2525 *
2526 * Handles reset.
2527 *
2528 * @param pState The device state structure.
2529 * @param offset Register offset in memory-mapped frame.
2530 * @param index Register index in register array.
2531 * @param value The value to store.
2532 * @param mask Used to implement partial writes (8 and 16-bit).
2533 * @thread EMT
2534 */
2535static int e1kRegWriteCTRL(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
2536{
2537 int rc = VINF_SUCCESS;
2538
2539 if (value & CTRL_RESET)
2540 { /* RST */
2541#ifndef IN_RING3
2542 return VINF_IOM_R3_IOPORT_WRITE;
2543#else
2544 e1kHardReset(pState);
2545#endif
2546 }
2547 else
2548 {
2549 if ( (value & CTRL_SLU)
2550 && pState->fCableConnected
2551 && !(STATUS & STATUS_LU))
2552 {
2553 /* The driver indicates that we should bring up the link */
2554 /* Do so in 5 seconds (by default). */
2555 e1kBringLinkUpDelayed(pState);
2556 /*
2557 * Change the status (but not PHY status) anyway as Windows expects
2558 * it for 82543GC.
2559 */
2560 STATUS |= STATUS_LU;
2561 }
2562 if (value & CTRL_VME)
2563 {
2564 E1kLog(("%s VLAN Mode Enabled\n", INSTANCE(pState)));
2565 }
2566 E1kLog(("%s e1kRegWriteCTRL: mdio dir=%s mdc dir=%s mdc=%s mdio=%d\n",
2567 INSTANCE(pState), (value & CTRL_MDIO_DIR)?"OUT":"IN ",
2568 (value & CTRL_MDC_DIR)?"OUT":"IN ", (value & CTRL_MDC)?"HIGH":"LOW ", !!(value & CTRL_MDIO)));
2569 if (value & CTRL_MDC)
2570 {
2571 if (value & CTRL_MDIO_DIR)
2572 {
2573 E1kLog(("%s e1kRegWriteCTRL: Phy::writeMDIO(%d)\n", INSTANCE(pState), !!(value & CTRL_MDIO)));
2574 /* MDIO direction pin is set to output and MDC is high, write MDIO pin value to PHY */
2575 Phy::writeMDIO(&pState->phy, !!(value & CTRL_MDIO));
2576 }
2577 else
2578 {
2579 if (Phy::readMDIO(&pState->phy))
2580 value |= CTRL_MDIO;
2581 else
2582 value &= ~CTRL_MDIO;
2583 E1kLog(("%s e1kRegWriteCTRL: Phy::readMDIO(%d)\n",
2584 INSTANCE(pState), !!(value & CTRL_MDIO)));
2585 }
2586 }
2587 rc = e1kRegWriteDefault(pState, offset, index, value);
2588 }
2589
2590 return rc;
2591}
2592
2593/**
2594 * Write handler for EEPROM/Flash Control/Data register.
2595 *
2596 * Handles EEPROM access requests; forwards writes to EEPROM device if access has been granted.
2597 *
2598 * @param pState The device state structure.
2599 * @param offset Register offset in memory-mapped frame.
2600 * @param index Register index in register array.
2601 * @param value The value to store.
2602 * @param mask Used to implement partial writes (8 and 16-bit).
2603 * @thread EMT
2604 */
2605static int e1kRegWriteEECD(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
2606{
2607#ifdef IN_RING3
2608 /* So far we are concerned with lower byte only */
2609 if ((EECD & EECD_EE_GNT) || pState->eChip == E1K_CHIP_82543GC)
2610 {
2611 /* Access to EEPROM granted -- forward 4-wire bits to EEPROM device */
2612 /* Note: 82543GC does not need to request EEPROM access */
2613 STAM_PROFILE_ADV_START(&pState->StatEEPROMWrite, a);
2614 pState->eeprom.write(value & EECD_EE_WIRES);
2615 STAM_PROFILE_ADV_STOP(&pState->StatEEPROMWrite, a);
2616 }
2617 if (value & EECD_EE_REQ)
2618 EECD |= EECD_EE_REQ|EECD_EE_GNT;
2619 else
2620 EECD &= ~EECD_EE_GNT;
2621 //e1kRegWriteDefault(pState, offset, index, value );
2622
2623 return VINF_SUCCESS;
2624#else /* !IN_RING3 */
2625 return VINF_IOM_R3_MMIO_WRITE;
2626#endif /* !IN_RING3 */
2627}
2628
2629/**
2630 * Read handler for EEPROM/Flash Control/Data register.
2631 *
2632 * Lower 4 bits come from EEPROM device if EEPROM access has been granted.
2633 *
2634 * @returns VBox status code.
2635 *
2636 * @param pState The device state structure.
2637 * @param offset Register offset in memory-mapped frame.
2638 * @param index Register index in register array.
2639 * @param mask Used to implement partial reads (8 and 16-bit).
2640 * @thread EMT
2641 */
2642static int e1kRegReadEECD(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value)
2643{
2644#ifdef IN_RING3
2645 uint32_t value;
2646 int rc = e1kRegReadDefault(pState, offset, index, &value);
2647 if (RT_SUCCESS(rc))
2648 {
2649 if ((value & EECD_EE_GNT) || pState->eChip == E1K_CHIP_82543GC)
2650 {
2651 /* Note: 82543GC does not need to request EEPROM access */
2652 /* Access to EEPROM granted -- get 4-wire bits to EEPROM device */
2653 STAM_PROFILE_ADV_START(&pState->StatEEPROMRead, a);
2654 value |= pState->eeprom.read();
2655 STAM_PROFILE_ADV_STOP(&pState->StatEEPROMRead, a);
2656 }
2657 *pu32Value = value;
2658 }
2659
2660 return rc;
2661#else /* !IN_RING3 */
2662 return VINF_IOM_R3_MMIO_READ;
2663#endif /* !IN_RING3 */
2664}
2665
2666/**
2667 * Write handler for EEPROM Read register.
2668 *
2669 * Handles EEPROM word access requests, reads EEPROM and stores the result
2670 * into DATA field.
2671 *
2672 * @param pState The device state structure.
2673 * @param offset Register offset in memory-mapped frame.
2674 * @param index Register index in register array.
2675 * @param value The value to store.
2676 * @param mask Used to implement partial writes (8 and 16-bit).
2677 * @thread EMT
2678 */
2679static int e1kRegWriteEERD(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
2680{
2681#ifdef IN_RING3
2682 /* Make use of 'writable' and 'readable' masks. */
2683 e1kRegWriteDefault(pState, offset, index, value);
2684 /* DONE and DATA are set only if read was triggered by START. */
2685 if (value & EERD_START)
2686 {
2687 uint16_t tmp;
2688 STAM_PROFILE_ADV_START(&pState->StatEEPROMRead, a);
2689 if (pState->eeprom.readWord(GET_BITS_V(value, EERD, ADDR), &tmp))
2690 SET_BITS(EERD, DATA, tmp);
2691 EERD |= EERD_DONE;
2692 STAM_PROFILE_ADV_STOP(&pState->StatEEPROMRead, a);
2693 }
2694
2695 return VINF_SUCCESS;
2696#else /* !IN_RING3 */
2697 return VINF_IOM_R3_MMIO_WRITE;
2698#endif /* !IN_RING3 */
2699}
2700
2701
2702/**
2703 * Write handler for MDI Control register.
2704 *
2705 * Handles PHY read/write requests; forwards requests to internal PHY device.
2706 *
2707 * @param pState The device state structure.
2708 * @param offset Register offset in memory-mapped frame.
2709 * @param index Register index in register array.
2710 * @param value The value to store.
2711 * @param mask Used to implement partial writes (8 and 16-bit).
2712 * @thread EMT
2713 */
2714static int e1kRegWriteMDIC(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
2715{
2716 if (value & MDIC_INT_EN)
2717 {
2718 E1kLog(("%s ERROR! Interrupt at the end of an MDI cycle is not supported yet.\n",
2719 INSTANCE(pState)));
2720 }
2721 else if (value & MDIC_READY)
2722 {
2723 E1kLog(("%s ERROR! Ready bit is not reset by software during write operation.\n",
2724 INSTANCE(pState)));
2725 }
2726 else if (GET_BITS_V(value, MDIC, PHY) != 1)
2727 {
2728 E1kLog(("%s ERROR! Access to invalid PHY detected, phy=%d.\n",
2729 INSTANCE(pState), GET_BITS_V(value, MDIC, PHY)));
2730 }
2731 else
2732 {
2733 /* Store the value */
2734 e1kRegWriteDefault(pState, offset, index, value);
2735 STAM_COUNTER_INC(&pState->StatPHYAccesses);
2736 /* Forward op to PHY */
2737 if (value & MDIC_OP_READ)
2738 SET_BITS(MDIC, DATA, Phy::readRegister(&pState->phy, GET_BITS_V(value, MDIC, REG)));
2739 else
2740 Phy::writeRegister(&pState->phy, GET_BITS_V(value, MDIC, REG), value & MDIC_DATA_MASK);
2741 /* Let software know that we are done */
2742 MDIC |= MDIC_READY;
2743 }
2744
2745 return VINF_SUCCESS;
2746}
2747
2748/**
2749 * Write handler for Interrupt Cause Read register.
2750 *
2751 * Bits corresponding to 1s in 'value' will be cleared in ICR register.
2752 *
2753 * @param pState The device state structure.
2754 * @param offset Register offset in memory-mapped frame.
2755 * @param index Register index in register array.
2756 * @param value The value to store.
2757 * @param mask Used to implement partial writes (8 and 16-bit).
2758 * @thread EMT
2759 */
2760static int e1kRegWriteICR(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
2761{
2762 ICR &= ~value;
2763
2764 return VINF_SUCCESS;
2765}
2766
2767/**
2768 * Read handler for Interrupt Cause Read register.
2769 *
2770 * Reading this register acknowledges all interrupts.
2771 *
2772 * @returns VBox status code.
2773 *
2774 * @param pState The device state structure.
2775 * @param offset Register offset in memory-mapped frame.
2776 * @param index Register index in register array.
2777 * @param mask Not used.
2778 * @thread EMT
2779 */
2780static int e1kRegReadICR(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value)
2781{
2782 int rc = e1kCsEnter(pState, VINF_IOM_R3_MMIO_READ);
2783 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2784 return rc;
2785
2786 uint32_t value = 0;
2787 rc = e1kRegReadDefault(pState, offset, index, &value);
2788 if (RT_SUCCESS(rc))
2789 {
2790 if (value)
2791 {
2792 /*
2793 * Not clearing ICR causes QNX to hang as it reads ICR in a loop
2794 * with disabled interrupts.
2795 */
2796 //if (IMS)
2797 if (1)
2798 {
2799 /*
2800 * Interrupts were enabled -- we are supposedly at the very
2801 * beginning of interrupt handler
2802 */
2803 E1kLogRel(("E1000: irq lowered, icr=0x%x\n", ICR));
2804 E1kLog(("%s e1kRegReadICR: Lowered IRQ (%08x)\n", INSTANCE(pState), ICR));
2805 /* Clear all pending interrupts */
2806 ICR = 0;
2807 pState->fIntRaised = false;
2808 /* Lower(0) INTA(0) */
2809 PDMDevHlpPCISetIrq(pState->CTX_SUFF(pDevIns), 0, 0);
2810
2811 pState->u64AckedAt = TMTimerGet(pState->CTX_SUFF(pIntTimer));
2812 if (pState->fIntMaskUsed)
2813 pState->fDelayInts = true;
2814 }
2815 else
2816 {
2817 /*
2818 * Interrupts are disabled -- in windows guests ICR read is done
2819 * just before re-enabling interrupts
2820 */
2821 E1kLog(("%s e1kRegReadICR: Suppressing auto-clear due to disabled interrupts (%08x)\n", INSTANCE(pState), ICR));
2822 }
2823 }
2824 *pu32Value = value;
2825 }
2826 e1kCsLeave(pState);
2827
2828 return rc;
2829}
2830
2831/**
2832 * Write handler for Interrupt Cause Set register.
2833 *
2834 * Bits corresponding to 1s in 'value' will be set in ICR register.
2835 *
2836 * @param pState The device state structure.
2837 * @param offset Register offset in memory-mapped frame.
2838 * @param index Register index in register array.
2839 * @param value The value to store.
2840 * @param mask Used to implement partial writes (8 and 16-bit).
2841 * @thread EMT
2842 */
2843static int e1kRegWriteICS(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
2844{
2845 E1K_INC_ISTAT_CNT(pState->uStatIntICS);
2846 return e1kRaiseInterrupt(pState, VINF_IOM_R3_MMIO_WRITE, value & s_e1kRegMap[ICS_IDX].writable);
2847}
2848
2849/**
2850 * Write handler for Interrupt Mask Set register.
2851 *
2852 * Will trigger pending interrupts.
2853 *
2854 * @param pState The device state structure.
2855 * @param offset Register offset in memory-mapped frame.
2856 * @param index Register index in register array.
2857 * @param value The value to store.
2858 * @param mask Used to implement partial writes (8 and 16-bit).
2859 * @thread EMT
2860 */
2861static int e1kRegWriteIMS(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
2862{
2863 IMS |= value;
2864 E1kLogRel(("E1000: irq enabled, RDH=%x RDT=%x TDH=%x TDT=%x\n", RDH, RDT, TDH, TDT));
2865 E1kLog(("%s e1kRegWriteIMS: IRQ enabled\n", INSTANCE(pState)));
2866 /* Mask changes, we need to raise pending interrupts. */
2867 if ((ICR & IMS) && !pState->fLocked)
2868 {
2869 E1kLog2(("%s e1kRegWriteIMS: IRQ pending (%08x), arming late int timer...\n",
2870 INSTANCE(pState), ICR));
2871 /* Raising an interrupt immediately causes win7 to hang upon NIC reconfiguration, see @bugref{5023}. */
2872 TMTimerSet(pState->CTX_SUFF(pIntTimer), TMTimerFromNano(pState->CTX_SUFF(pIntTimer), ITR * 256) +
2873 TMTimerGet(pState->CTX_SUFF(pIntTimer)));
2874 }
2875
2876 return VINF_SUCCESS;
2877}
2878
2879/**
2880 * Write handler for Interrupt Mask Clear register.
2881 *
2882 * Bits corresponding to 1s in 'value' will be cleared in IMS register.
2883 *
2884 * @param pState The device state structure.
2885 * @param offset Register offset in memory-mapped frame.
2886 * @param index Register index in register array.
2887 * @param value The value to store.
2888 * @param mask Used to implement partial writes (8 and 16-bit).
2889 * @thread EMT
2890 */
2891static int e1kRegWriteIMC(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
2892{
2893 int rc = e1kCsEnter(pState, VINF_IOM_R3_MMIO_WRITE);
2894 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2895 return rc;
2896 if (pState->fIntRaised)
2897 {
2898 /*
2899 * Technically we should reset fIntRaised in ICR read handler, but it will cause
2900 * Windows to freeze since it may receive an interrupt while still in the very beginning
2901 * of interrupt handler.
2902 */
2903 E1K_INC_ISTAT_CNT(pState->uStatIntLower);
2904 STAM_COUNTER_INC(&pState->StatIntsPrevented);
2905 E1kLogRel(("E1000: irq lowered (IMC), icr=0x%x\n", ICR));
2906 /* Lower(0) INTA(0) */
2907 PDMDevHlpPCISetIrq(pState->CTX_SUFF(pDevIns), 0, 0);
2908 pState->fIntRaised = false;
2909 E1kLog(("%s e1kRegWriteIMC: Lowered IRQ: ICR=%08x\n", INSTANCE(pState), ICR));
2910 }
2911 IMS &= ~value;
2912 E1kLog(("%s e1kRegWriteIMC: IRQ disabled\n", INSTANCE(pState)));
2913 e1kCsLeave(pState);
2914
2915 return VINF_SUCCESS;
2916}
2917
2918/**
2919 * Write handler for Receive Control register.
2920 *
2921 * @param pState The device state structure.
2922 * @param offset Register offset in memory-mapped frame.
2923 * @param index Register index in register array.
2924 * @param value The value to store.
2925 * @param mask Used to implement partial writes (8 and 16-bit).
2926 * @thread EMT
2927 */
2928static int e1kRegWriteRCTL(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
2929{
2930 /* Update promiscuous mode */
2931 bool fBecomePromiscous = !!(value & (RCTL_UPE | RCTL_MPE));
2932 if (fBecomePromiscous != !!( RCTL & (RCTL_UPE | RCTL_MPE)))
2933 {
2934 /* Promiscuity has changed, pass the knowledge on. */
2935#ifndef IN_RING3
2936 return VINF_IOM_R3_IOPORT_WRITE;
2937#else
2938 if (pState->pDrvR3)
2939 pState->pDrvR3->pfnSetPromiscuousMode(pState->pDrvR3, fBecomePromiscous);
2940#endif
2941 }
2942
2943 /* Adjust receive buffer size */
2944 unsigned cbRxBuf = 2048 >> GET_BITS_V(value, RCTL, BSIZE);
2945 if (value & RCTL_BSEX)
2946 cbRxBuf *= 16;
2947 if (cbRxBuf != pState->u16RxBSize)
2948 E1kLog2(("%s e1kRegWriteRCTL: Setting receive buffer size to %d (old %d)\n",
2949 INSTANCE(pState), cbRxBuf, pState->u16RxBSize));
2950 pState->u16RxBSize = cbRxBuf;
2951
2952 /* Update the register */
2953 e1kRegWriteDefault(pState, offset, index, value);
2954
2955 return VINF_SUCCESS;
2956}
2957
2958/**
2959 * Write handler for Packet Buffer Allocation register.
2960 *
2961 * TXA = 64 - RXA.
2962 *
2963 * @param pState The device state structure.
2964 * @param offset Register offset in memory-mapped frame.
2965 * @param index Register index in register array.
2966 * @param value The value to store.
2967 * @param mask Used to implement partial writes (8 and 16-bit).
2968 * @thread EMT
2969 */
2970static int e1kRegWritePBA(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
2971{
2972 e1kRegWriteDefault(pState, offset, index, value);
2973 PBA_st->txa = 64 - PBA_st->rxa;
2974
2975 return VINF_SUCCESS;
2976}
2977
2978/**
2979 * Write handler for Receive Descriptor Tail register.
2980 *
2981 * @remarks Write into RDT forces switch to HC and signal to
2982 * e1kNetworkDown_WaitReceiveAvail().
2983 *
2984 * @returns VBox status code.
2985 *
2986 * @param pState The device state structure.
2987 * @param offset Register offset in memory-mapped frame.
2988 * @param index Register index in register array.
2989 * @param value The value to store.
2990 * @param mask Used to implement partial writes (8 and 16-bit).
2991 * @thread EMT
2992 */
2993static int e1kRegWriteRDT(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
2994{
2995#ifndef IN_RING3
2996 /* XXX */
2997// return VINF_IOM_R3_MMIO_WRITE;
2998#endif
2999 int rc = e1kCsRxEnter(pState, VINF_IOM_R3_MMIO_WRITE);
3000 if (RT_LIKELY(rc == VINF_SUCCESS))
3001 {
3002 E1kLog(("%s e1kRegWriteRDT\n", INSTANCE(pState)));
3003 rc = e1kRegWriteDefault(pState, offset, index, value);
3004#ifdef E1K_WITH_RXD_CACHE
3005 /*
3006 * We need to fetch descriptors now as RDT may go whole circle
3007 * before we attempt to store a received packet. For example,
3008 * Intel's DOS drivers use 2 (!) RX descriptors with the total ring
3009 * size being only 8 descriptors! Note that we fetch descriptors
3010 * only when the cache is empty to reduce the number of memory reads
3011 * in case of frequent RDT writes. Don't fetch anything when the
3012 * receiver is disabled either as RDH, RDT, RDLEN can be in some
3013 * messed up state.
3014 * Note that despite the cache may seem empty, meaning that there are
3015 * no more available descriptors in it, it may still be used by RX
3016 * thread which has not yet written the last descriptor back but has
3017 * temporarily released the RX lock in order to write the packet body
3018 * to descriptor's buffer. At this point we still going to do prefetch
3019 * but it won't actually fetch anything if there are no unused slots in
3020 * our "empty" cache (nRxDFetched==E1K_RXD_CACHE_SIZE). We must not
3021 * reset the cache here even if it appears empty. It will be reset at
3022 * a later point in e1kRxDGet().
3023 */
3024 if (e1kRxDIsCacheEmpty(pState) && (RCTL & RCTL_EN))
3025 e1kRxDPrefetch(pState);
3026#endif /* E1K_WITH_RXD_CACHE */
3027 e1kCsRxLeave(pState);
3028 if (RT_SUCCESS(rc))
3029 {
3030/** @todo bird: Use SUPSem* for this so we can signal it in ring-0 as well
3031 * without requiring any context switches. We should also check the
3032 * wait condition before bothering to queue the item as we're currently
3033 * queuing thousands of items per second here in a normal transmit
3034 * scenario. Expect performance changes when fixing this! */
3035#ifdef IN_RING3
3036 /* Signal that we have more receive descriptors available. */
3037 e1kWakeupReceive(pState->CTX_SUFF(pDevIns));
3038#else
3039 PPDMQUEUEITEMCORE pItem = PDMQueueAlloc(pState->CTX_SUFF(pCanRxQueue));
3040 if (pItem)
3041 PDMQueueInsert(pState->CTX_SUFF(pCanRxQueue), pItem);
3042#endif
3043 }
3044 }
3045 return rc;
3046}
3047
3048/**
3049 * Write handler for Receive Delay Timer register.
3050 *
3051 * @param pState The device state structure.
3052 * @param offset Register offset in memory-mapped frame.
3053 * @param index Register index in register array.
3054 * @param value The value to store.
3055 * @param mask Used to implement partial writes (8 and 16-bit).
3056 * @thread EMT
3057 */
3058static int e1kRegWriteRDTR(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
3059{
3060 e1kRegWriteDefault(pState, offset, index, value);
3061 if (value & RDTR_FPD)
3062 {
3063 /* Flush requested, cancel both timers and raise interrupt */
3064#ifdef E1K_USE_RX_TIMERS
3065 e1kCancelTimer(pState, pState->CTX_SUFF(pRIDTimer));
3066 e1kCancelTimer(pState, pState->CTX_SUFF(pRADTimer));
3067#endif
3068 E1K_INC_ISTAT_CNT(pState->uStatIntRDTR);
3069 return e1kRaiseInterrupt(pState, VINF_IOM_R3_MMIO_WRITE, ICR_RXT0);
3070 }
3071
3072 return VINF_SUCCESS;
3073}
3074
3075DECLINLINE(uint32_t) e1kGetTxLen(E1KSTATE* pState)
3076{
3077 /**
3078 * Make sure TDT won't change during computation. EMT may modify TDT at
3079 * any moment.
3080 */
3081 uint32_t tdt = TDT;
3082 return (TDH>tdt ? TDLEN/sizeof(E1KTXDESC) : 0) + tdt - TDH;
3083}
3084
3085#ifdef IN_RING3
3086#ifdef E1K_TX_DELAY
3087
3088/**
3089 * Transmit Delay Timer handler.
3090 *
3091 * @remarks We only get here when the timer expires.
3092 *
3093 * @param pDevIns Pointer to device instance structure.
3094 * @param pTimer Pointer to the timer.
3095 * @param pvUser NULL.
3096 * @thread EMT
3097 */
3098static DECLCALLBACK(void) e1kTxDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3099{
3100 E1KSTATE *pState = (E1KSTATE *)pvUser;
3101 Assert(PDMCritSectIsOwner(&pState->csTx));
3102
3103 E1K_INC_ISTAT_CNT(pState->uStatTxDelayExp);
3104#ifdef E1K_INT_STATS
3105 uint64_t u64Elapsed = RTTimeNanoTS() - pState->u64ArmedAt;
3106 if (u64Elapsed > pState->uStatMaxTxDelay)
3107 pState->uStatMaxTxDelay = u64Elapsed;
3108#endif
3109 int rc = e1kXmitPending(pState, false /*fOnWorkerThread*/);
3110 AssertMsg(RT_SUCCESS(rc) || rc == VERR_TRY_AGAIN, ("%Rrc\n", rc));
3111}
3112#endif /* E1K_TX_DELAY */
3113
3114#ifdef E1K_USE_TX_TIMERS
3115
3116/**
3117 * Transmit Interrupt Delay Timer handler.
3118 *
3119 * @remarks We only get here when the timer expires.
3120 *
3121 * @param pDevIns Pointer to device instance structure.
3122 * @param pTimer Pointer to the timer.
3123 * @param pvUser NULL.
3124 * @thread EMT
3125 */
3126static DECLCALLBACK(void) e1kTxIntDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3127{
3128 E1KSTATE *pState = (E1KSTATE *)pvUser;
3129
3130 E1K_INC_ISTAT_CNT(pState->uStatTID);
3131 /* Cancel absolute delay timer as we have already got attention */
3132#ifndef E1K_NO_TAD
3133 e1kCancelTimer(pState, pState->CTX_SUFF(pTADTimer));
3134#endif /* E1K_NO_TAD */
3135 e1kRaiseInterrupt(pState, ICR_TXDW);
3136}
3137
3138/**
3139 * Transmit Absolute Delay Timer handler.
3140 *
3141 * @remarks We only get here when the timer expires.
3142 *
3143 * @param pDevIns Pointer to device instance structure.
3144 * @param pTimer Pointer to the timer.
3145 * @param pvUser NULL.
3146 * @thread EMT
3147 */
3148static DECLCALLBACK(void) e1kTxAbsDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3149{
3150 E1KSTATE *pState = (E1KSTATE *)pvUser;
3151
3152 E1K_INC_ISTAT_CNT(pState->uStatTAD);
3153 /* Cancel interrupt delay timer as we have already got attention */
3154 e1kCancelTimer(pState, pState->CTX_SUFF(pTIDTimer));
3155 e1kRaiseInterrupt(pState, ICR_TXDW);
3156}
3157
3158#endif /* E1K_USE_TX_TIMERS */
3159#ifdef E1K_USE_RX_TIMERS
3160
3161/**
3162 * Receive Interrupt Delay Timer handler.
3163 *
3164 * @remarks We only get here when the timer expires.
3165 *
3166 * @param pDevIns Pointer to device instance structure.
3167 * @param pTimer Pointer to the timer.
3168 * @param pvUser NULL.
3169 * @thread EMT
3170 */
3171static DECLCALLBACK(void) e1kRxIntDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3172{
3173 E1KSTATE *pState = (E1KSTATE *)pvUser;
3174
3175 E1K_INC_ISTAT_CNT(pState->uStatRID);
3176 /* Cancel absolute delay timer as we have already got attention */
3177 e1kCancelTimer(pState, pState->CTX_SUFF(pRADTimer));
3178 e1kRaiseInterrupt(pState, ICR_RXT0);
3179}
3180
3181/**
3182 * Receive Absolute Delay Timer handler.
3183 *
3184 * @remarks We only get here when the timer expires.
3185 *
3186 * @param pDevIns Pointer to device instance structure.
3187 * @param pTimer Pointer to the timer.
3188 * @param pvUser NULL.
3189 * @thread EMT
3190 */
3191static DECLCALLBACK(void) e1kRxAbsDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3192{
3193 E1KSTATE *pState = (E1KSTATE *)pvUser;
3194
3195 E1K_INC_ISTAT_CNT(pState->uStatRAD);
3196 /* Cancel interrupt delay timer as we have already got attention */
3197 e1kCancelTimer(pState, pState->CTX_SUFF(pRIDTimer));
3198 e1kRaiseInterrupt(pState, ICR_RXT0);
3199}
3200
3201#endif /* E1K_USE_RX_TIMERS */
3202
3203/**
3204 * Late Interrupt Timer handler.
3205 *
3206 * @param pDevIns Pointer to device instance structure.
3207 * @param pTimer Pointer to the timer.
3208 * @param pvUser NULL.
3209 * @thread EMT
3210 */
3211static DECLCALLBACK(void) e1kLateIntTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3212{
3213 E1KSTATE *pState = (E1KSTATE *)pvUser;
3214
3215 STAM_PROFILE_ADV_START(&pState->StatLateIntTimer, a);
3216 STAM_COUNTER_INC(&pState->StatLateInts);
3217 E1K_INC_ISTAT_CNT(pState->uStatIntLate);
3218#if 0
3219 if (pState->iStatIntLost > -100)
3220 pState->iStatIntLost--;
3221#endif
3222 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, 0);
3223 STAM_PROFILE_ADV_STOP(&pState->StatLateIntTimer, a);
3224}
3225
3226/**
3227 * Link Up Timer handler.
3228 *
3229 * @param pDevIns Pointer to device instance structure.
3230 * @param pTimer Pointer to the timer.
3231 * @param pvUser NULL.
3232 * @thread EMT
3233 */
3234static DECLCALLBACK(void) e1kLinkUpTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3235{
3236 E1KSTATE *pState = (E1KSTATE *)pvUser;
3237
3238 /*
3239 * This can happen if we set the link status to down when the Link up timer was
3240 * already armed (shortly after e1kLoadDone() or when the cable was disconnected
3241 * and connect+disconnect the cable very quick.
3242 */
3243 if (!pState->fCableConnected)
3244 return;
3245
3246 E1kLog(("%s e1kLinkUpTimer: Link is up\n", INSTANCE(pState)));
3247 STATUS |= STATUS_LU;
3248 Phy::setLinkStatus(&pState->phy, true);
3249 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, ICR_LSC);
3250}
3251
3252#endif /* IN_RING3 */
3253
3254/**
3255 * Sets up the GSO context according to the TSE new context descriptor.
3256 *
3257 * @param pGso The GSO context to setup.
3258 * @param pCtx The context descriptor.
3259 */
3260DECLINLINE(void) e1kSetupGsoCtx(PPDMNETWORKGSO pGso, E1KTXCTX const *pCtx)
3261{
3262 pGso->u8Type = PDMNETWORKGSOTYPE_INVALID;
3263
3264 /*
3265 * See if the context descriptor describes something that could be TCP or
3266 * UDP over IPv[46].
3267 */
3268 /* Check the header ordering and spacing: 1. Ethernet, 2. IP, 3. TCP/UDP. */
3269 if (RT_UNLIKELY( pCtx->ip.u8CSS < sizeof(RTNETETHERHDR) ))
3270 {
3271 E1kLog(("e1kSetupGsoCtx: IPCSS=%#x\n", pCtx->ip.u8CSS));
3272 return;
3273 }
3274 if (RT_UNLIKELY( pCtx->tu.u8CSS < (size_t)pCtx->ip.u8CSS + (pCtx->dw2.fIP ? RTNETIPV4_MIN_LEN : RTNETIPV6_MIN_LEN) ))
3275 {
3276 E1kLog(("e1kSetupGsoCtx: TUCSS=%#x\n", pCtx->tu.u8CSS));
3277 return;
3278 }
3279 if (RT_UNLIKELY( pCtx->dw2.fTCP
3280 ? pCtx->dw3.u8HDRLEN < (size_t)pCtx->tu.u8CSS + RTNETTCP_MIN_LEN
3281 : pCtx->dw3.u8HDRLEN != (size_t)pCtx->tu.u8CSS + RTNETUDP_MIN_LEN ))
3282 {
3283 E1kLog(("e1kSetupGsoCtx: HDRLEN=%#x TCP=%d\n", pCtx->dw3.u8HDRLEN, pCtx->dw2.fTCP));
3284 return;
3285 }
3286
3287 /* The end of the TCP/UDP checksum should stop at the end of the packet or at least after the headers. */
3288 if (RT_UNLIKELY( pCtx->tu.u16CSE > 0 && pCtx->tu.u16CSE <= pCtx->dw3.u8HDRLEN ))
3289 {
3290 E1kLog(("e1kSetupGsoCtx: TUCSE=%#x HDRLEN=%#x\n", pCtx->tu.u16CSE, pCtx->dw3.u8HDRLEN));
3291 return;
3292 }
3293
3294 /* IPv4 checksum offset. */
3295 if (RT_UNLIKELY( pCtx->dw2.fIP && (size_t)pCtx->ip.u8CSO - pCtx->ip.u8CSS != RT_UOFFSETOF(RTNETIPV4, ip_sum) ))
3296 {
3297 E1kLog(("e1kSetupGsoCtx: IPCSO=%#x IPCSS=%#x\n", pCtx->ip.u8CSO, pCtx->ip.u8CSS));
3298 return;
3299 }
3300
3301 /* TCP/UDP checksum offsets. */
3302 if (RT_UNLIKELY( (size_t)pCtx->tu.u8CSO - pCtx->tu.u8CSS
3303 != ( pCtx->dw2.fTCP
3304 ? RT_UOFFSETOF(RTNETTCP, th_sum)
3305 : RT_UOFFSETOF(RTNETUDP, uh_sum) ) ))
3306 {
3307 E1kLog(("e1kSetupGsoCtx: TUCSO=%#x TUCSS=%#x TCP=%d\n", pCtx->ip.u8CSO, pCtx->ip.u8CSS, pCtx->dw2.fTCP));
3308 return;
3309 }
3310
3311 /*
3312 * Because of internal networking using a 16-bit size field for GSO context
3313 * plus frame, we have to make sure we don't exceed this.
3314 */
3315 if (RT_UNLIKELY( pCtx->dw3.u8HDRLEN + pCtx->dw2.u20PAYLEN > VBOX_MAX_GSO_SIZE ))
3316 {
3317 E1kLog(("e1kSetupGsoCtx: HDRLEN(=%#x) + PAYLEN(=%#x) = %#x, max is %#x\n",
3318 pCtx->dw3.u8HDRLEN, pCtx->dw2.u20PAYLEN, pCtx->dw3.u8HDRLEN + pCtx->dw2.u20PAYLEN, VBOX_MAX_GSO_SIZE));
3319 return;
3320 }
3321
3322 /*
3323 * We're good for now - we'll do more checks when seeing the data.
3324 * So, figure the type of offloading and setup the context.
3325 */
3326 if (pCtx->dw2.fIP)
3327 {
3328 if (pCtx->dw2.fTCP)
3329 {
3330 pGso->u8Type = PDMNETWORKGSOTYPE_IPV4_TCP;
3331 pGso->cbHdrsSeg = pCtx->dw3.u8HDRLEN;
3332 }
3333 else
3334 {
3335 pGso->u8Type = PDMNETWORKGSOTYPE_IPV4_UDP;
3336 pGso->cbHdrsSeg = pCtx->tu.u8CSS; /* IP header only */
3337 }
3338 /** @todo Detect IPv4-IPv6 tunneling (need test setup since linux doesn't do
3339 * this yet it seems)... */
3340 }
3341 else
3342 {
3343 pGso->cbHdrsSeg = pCtx->dw3.u8HDRLEN; /* @todo IPv6 UFO */
3344 if (pCtx->dw2.fTCP)
3345 pGso->u8Type = PDMNETWORKGSOTYPE_IPV6_TCP;
3346 else
3347 pGso->u8Type = PDMNETWORKGSOTYPE_IPV6_UDP;
3348 }
3349 pGso->offHdr1 = pCtx->ip.u8CSS;
3350 pGso->offHdr2 = pCtx->tu.u8CSS;
3351 pGso->cbHdrsTotal = pCtx->dw3.u8HDRLEN;
3352 pGso->cbMaxSeg = pCtx->dw3.u16MSS;
3353 Assert(PDMNetGsoIsValid(pGso, sizeof(*pGso), pGso->cbMaxSeg * 5));
3354 E1kLog2(("e1kSetupGsoCtx: mss=%#x hdr=%#x hdrseg=%#x hdr1=%#x hdr2=%#x %s\n",
3355 pGso->cbMaxSeg, pGso->cbHdrsTotal, pGso->cbHdrsSeg, pGso->offHdr1, pGso->offHdr2, PDMNetGsoTypeName((PDMNETWORKGSOTYPE)pGso->u8Type) ));
3356}
3357
3358/**
3359 * Checks if we can use GSO processing for the current TSE frame.
3360 *
3361 * @param pState The device state structure.
3362 * @param pGso The GSO context.
3363 * @param pData The first data descriptor of the frame.
3364 * @param pCtx The TSO context descriptor.
3365 */
3366DECLINLINE(bool) e1kCanDoGso(E1KSTATE *pState, PCPDMNETWORKGSO pGso, E1KTXDAT const *pData, E1KTXCTX const *pCtx)
3367{
3368 if (!pData->cmd.fTSE)
3369 {
3370 E1kLog2(("e1kCanDoGso: !TSE\n"));
3371 return false;
3372 }
3373 if (pData->cmd.fVLE) /** @todo VLAN tagging. */
3374 {
3375 E1kLog(("e1kCanDoGso: VLE\n"));
3376 return false;
3377 }
3378 if (RT_UNLIKELY(!pState->fGSOEnabled))
3379 {
3380 E1kLog3(("e1kCanDoGso: GSO disabled via CFGM\n"));
3381 return false;
3382 }
3383
3384 switch ((PDMNETWORKGSOTYPE)pGso->u8Type)
3385 {
3386 case PDMNETWORKGSOTYPE_IPV4_TCP:
3387 case PDMNETWORKGSOTYPE_IPV4_UDP:
3388 if (!pData->dw3.fIXSM)
3389 {
3390 E1kLog(("e1kCanDoGso: !IXSM (IPv4)\n"));
3391 return false;
3392 }
3393 if (!pData->dw3.fTXSM)
3394 {
3395 E1kLog(("e1kCanDoGso: !TXSM (IPv4)\n"));
3396 return false;
3397 }
3398 /** @todo what more check should we perform here? Ethernet frame type? */
3399 E1kLog2(("e1kCanDoGso: OK, IPv4\n"));
3400 return true;
3401
3402 case PDMNETWORKGSOTYPE_IPV6_TCP:
3403 case PDMNETWORKGSOTYPE_IPV6_UDP:
3404 if (pData->dw3.fIXSM && pCtx->ip.u8CSO)
3405 {
3406 E1kLog(("e1kCanDoGso: IXSM (IPv6)\n"));
3407 return false;
3408 }
3409 if (!pData->dw3.fTXSM)
3410 {
3411 E1kLog(("e1kCanDoGso: TXSM (IPv6)\n"));
3412 return false;
3413 }
3414 /** @todo what more check should we perform here? Ethernet frame type? */
3415 E1kLog2(("e1kCanDoGso: OK, IPv4\n"));
3416 return true;
3417
3418 default:
3419 Assert(pGso->u8Type == PDMNETWORKGSOTYPE_INVALID);
3420 E1kLog2(("e1kCanDoGso: e1kSetupGsoCtx failed\n"));
3421 return false;
3422 }
3423}
3424
3425/**
3426 * Frees the current xmit buffer.
3427 *
3428 * @param pState The device state structure.
3429 */
3430static void e1kXmitFreeBuf(E1KSTATE *pState)
3431{
3432 PPDMSCATTERGATHER pSg = pState->CTX_SUFF(pTxSg);
3433 if (pSg)
3434 {
3435 pState->CTX_SUFF(pTxSg) = NULL;
3436
3437 if (pSg->pvAllocator != pState)
3438 {
3439 PPDMINETWORKUP pDrv = pState->CTX_SUFF(pDrv);
3440 if (pDrv)
3441 pDrv->pfnFreeBuf(pDrv, pSg);
3442 }
3443 else
3444 {
3445 /* loopback */
3446 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3447 Assert(pSg->fFlags == (PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3));
3448 pSg->fFlags = 0;
3449 pSg->pvAllocator = NULL;
3450 }
3451 }
3452}
3453
3454#ifndef E1K_WITH_TXD_CACHE
3455/**
3456 * Allocates an xmit buffer.
3457 *
3458 * @returns See PDMINETWORKUP::pfnAllocBuf.
3459 * @param pState The device state structure.
3460 * @param cbMin The minimum frame size.
3461 * @param fExactSize Whether cbMin is exact or if we have to max it
3462 * out to the max MTU size.
3463 * @param fGso Whether this is a GSO frame or not.
3464 */
3465DECLINLINE(int) e1kXmitAllocBuf(E1KSTATE *pState, size_t cbMin, bool fExactSize, bool fGso)
3466{
3467 /* Adjust cbMin if necessary. */
3468 if (!fExactSize)
3469 cbMin = RT_MAX(cbMin, E1K_MAX_TX_PKT_SIZE);
3470
3471 /* Deal with existing buffer (descriptor screw up, reset, etc). */
3472 if (RT_UNLIKELY(pState->CTX_SUFF(pTxSg)))
3473 e1kXmitFreeBuf(pState);
3474 Assert(pState->CTX_SUFF(pTxSg) == NULL);
3475
3476 /*
3477 * Allocate the buffer.
3478 */
3479 PPDMSCATTERGATHER pSg;
3480 if (RT_LIKELY(GET_BITS(RCTL, LBM) != RCTL_LBM_TCVR))
3481 {
3482 PPDMINETWORKUP pDrv = pState->CTX_SUFF(pDrv);
3483 if (RT_UNLIKELY(!pDrv))
3484 return VERR_NET_DOWN;
3485 int rc = pDrv->pfnAllocBuf(pDrv, cbMin, fGso ? &pState->GsoCtx : NULL, &pSg);
3486 if (RT_FAILURE(rc))
3487 {
3488 /* Suspend TX as we are out of buffers atm */
3489 STATUS |= STATUS_TXOFF;
3490 return rc;
3491 }
3492 }
3493 else
3494 {
3495 /* Create a loopback using the fallback buffer and preallocated SG. */
3496 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3497 pSg = &pState->uTxFallback.Sg;
3498 pSg->fFlags = PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3;
3499 pSg->cbUsed = 0;
3500 pSg->cbAvailable = 0;
3501 pSg->pvAllocator = pState;
3502 pSg->pvUser = NULL; /* No GSO here. */
3503 pSg->cSegs = 1;
3504 pSg->aSegs[0].pvSeg = pState->aTxPacketFallback;
3505 pSg->aSegs[0].cbSeg = sizeof(pState->aTxPacketFallback);
3506 }
3507
3508 pState->CTX_SUFF(pTxSg) = pSg;
3509 return VINF_SUCCESS;
3510}
3511#else /* E1K_WITH_TXD_CACHE */
3512/**
3513 * Allocates an xmit buffer.
3514 *
3515 * @returns See PDMINETWORKUP::pfnAllocBuf.
3516 * @param pState The device state structure.
3517 * @param cbMin The minimum frame size.
3518 * @param fExactSize Whether cbMin is exact or if we have to max it
3519 * out to the max MTU size.
3520 * @param fGso Whether this is a GSO frame or not.
3521 */
3522DECLINLINE(int) e1kXmitAllocBuf(E1KSTATE *pState, bool fGso)
3523{
3524 /* Deal with existing buffer (descriptor screw up, reset, etc). */
3525 if (RT_UNLIKELY(pState->CTX_SUFF(pTxSg)))
3526 e1kXmitFreeBuf(pState);
3527 Assert(pState->CTX_SUFF(pTxSg) == NULL);
3528
3529 /*
3530 * Allocate the buffer.
3531 */
3532 PPDMSCATTERGATHER pSg;
3533 if (RT_LIKELY(GET_BITS(RCTL, LBM) != RCTL_LBM_TCVR))
3534 {
3535 if (pState->cbTxAlloc == 0)
3536 {
3537 /* Zero packet, no need for the buffer */
3538 return VINF_SUCCESS;
3539 }
3540
3541 PPDMINETWORKUP pDrv = pState->CTX_SUFF(pDrv);
3542 if (RT_UNLIKELY(!pDrv))
3543 return VERR_NET_DOWN;
3544 int rc = pDrv->pfnAllocBuf(pDrv, pState->cbTxAlloc, fGso ? &pState->GsoCtx : NULL, &pSg);
3545 if (RT_FAILURE(rc))
3546 {
3547 /* Suspend TX as we are out of buffers atm */
3548 STATUS |= STATUS_TXOFF;
3549 return rc;
3550 }
3551 E1kLog3(("%s Allocated buffer for TX packet: cb=%u %s%s\n",
3552 INSTANCE(pState), pState->cbTxAlloc,
3553 pState->fVTag ? "VLAN " : "",
3554 pState->fGSO ? "GSO " : ""));
3555 pState->cbTxAlloc = 0;
3556 }
3557 else
3558 {
3559 /* Create a loopback using the fallback buffer and preallocated SG. */
3560 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3561 pSg = &pState->uTxFallback.Sg;
3562 pSg->fFlags = PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3;
3563 pSg->cbUsed = 0;
3564 pSg->cbAvailable = 0;
3565 pSg->pvAllocator = pState;
3566 pSg->pvUser = NULL; /* No GSO here. */
3567 pSg->cSegs = 1;
3568 pSg->aSegs[0].pvSeg = pState->aTxPacketFallback;
3569 pSg->aSegs[0].cbSeg = sizeof(pState->aTxPacketFallback);
3570 }
3571
3572 pState->CTX_SUFF(pTxSg) = pSg;
3573 return VINF_SUCCESS;
3574}
3575#endif /* E1K_WITH_TXD_CACHE */
3576
3577/**
3578 * Checks if it's a GSO buffer or not.
3579 *
3580 * @returns true / false.
3581 * @param pTxSg The scatter / gather buffer.
3582 */
3583DECLINLINE(bool) e1kXmitIsGsoBuf(PDMSCATTERGATHER const *pTxSg)
3584{
3585#if 0
3586 if (!pTxSg)
3587 E1kLog(("e1kXmitIsGsoBuf: pTxSG is NULL\n"));
3588 if (pTxSg && pTxSg->pvUser)
3589 E1kLog(("e1kXmitIsGsoBuf: pvUser is NULL\n"));
3590#endif
3591 return pTxSg && pTxSg->pvUser /* GSO indicator */;
3592}
3593
3594#ifndef E1K_WITH_TXD_CACHE
3595/**
3596 * Load transmit descriptor from guest memory.
3597 *
3598 * @param pState The device state structure.
3599 * @param pDesc Pointer to descriptor union.
3600 * @param addr Physical address in guest context.
3601 * @thread E1000_TX
3602 */
3603DECLINLINE(void) e1kLoadDesc(E1KSTATE* pState, E1KTXDESC* pDesc, RTGCPHYS addr)
3604{
3605 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns), addr, pDesc, sizeof(E1KTXDESC));
3606}
3607#else /* E1K_WITH_TXD_CACHE */
3608/**
3609 * Load transmit descriptors from guest memory.
3610 *
3611 * We need two physical reads in case the tail wrapped around the end of TX
3612 * descriptor ring.
3613 *
3614 * @returns the actual number of descriptors fetched.
3615 * @param pState The device state structure.
3616 * @param pDesc Pointer to descriptor union.
3617 * @param addr Physical address in guest context.
3618 * @thread E1000_TX
3619 */
3620DECLINLINE(unsigned) e1kTxDLoadMore(E1KSTATE* pState)
3621{
3622 Assert(pState->iTxDCurrent == 0);
3623 /* We've already loaded pState->nTxDFetched descriptors past TDH. */
3624 unsigned nDescsAvailable = e1kGetTxLen(pState) - pState->nTxDFetched;
3625 unsigned nDescsToFetch = RT_MIN(nDescsAvailable, E1K_TXD_CACHE_SIZE - pState->nTxDFetched);
3626 unsigned nDescsTotal = TDLEN / sizeof(E1KTXDESC);
3627 unsigned nFirstNotLoaded = (TDH + pState->nTxDFetched) % nDescsTotal;
3628 unsigned nDescsInSingleRead = RT_MIN(nDescsToFetch, nDescsTotal - nFirstNotLoaded);
3629 E1kLog3(("%s e1kTxDLoadMore: nDescsAvailable=%u nDescsToFetch=%u "
3630 "nDescsTotal=%u nFirstNotLoaded=0x%x nDescsInSingleRead=%u\n",
3631 INSTANCE(pState), nDescsAvailable, nDescsToFetch, nDescsTotal,
3632 nFirstNotLoaded, nDescsInSingleRead));
3633 if (nDescsToFetch == 0)
3634 return 0;
3635 E1KTXDESC* pFirstEmptyDesc = &pState->aTxDescriptors[pState->nTxDFetched];
3636 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns),
3637 ((uint64_t)TDBAH << 32) + TDBAL + nFirstNotLoaded * sizeof(E1KTXDESC),
3638 pFirstEmptyDesc, nDescsInSingleRead * sizeof(E1KTXDESC));
3639 E1kLog3(("%s Fetched %u TX descriptors at %08x%08x(0x%x), TDLEN=%08x, TDH=%08x, TDT=%08x\n",
3640 INSTANCE(pState), nDescsInSingleRead,
3641 TDBAH, TDBAL + TDH * sizeof(E1KTXDESC),
3642 nFirstNotLoaded, TDLEN, TDH, TDT));
3643 if (nDescsToFetch > nDescsInSingleRead)
3644 {
3645 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns),
3646 ((uint64_t)TDBAH << 32) + TDBAL,
3647 pFirstEmptyDesc + nDescsInSingleRead,
3648 (nDescsToFetch - nDescsInSingleRead) * sizeof(E1KTXDESC));
3649 E1kLog3(("%s Fetched %u TX descriptors at %08x%08x\n",
3650 INSTANCE(pState), nDescsToFetch - nDescsInSingleRead,
3651 TDBAH, TDBAL));
3652 }
3653 pState->nTxDFetched += nDescsToFetch;
3654 return nDescsToFetch;
3655}
3656
3657/**
3658 * Load transmit descriptors from guest memory only if there are no loaded
3659 * descriptors.
3660 *
3661 * @returns true if there are descriptors in cache.
3662 * @param pState The device state structure.
3663 * @param pDesc Pointer to descriptor union.
3664 * @param addr Physical address in guest context.
3665 * @thread E1000_TX
3666 */
3667DECLINLINE(bool) e1kTxDLazyLoad(E1KSTATE* pState)
3668{
3669 if (pState->nTxDFetched == 0)
3670 return e1kTxDLoadMore(pState) != 0;
3671 return true;
3672}
3673#endif /* E1K_WITH_TXD_CACHE */
3674
3675/**
3676 * Write back transmit descriptor to guest memory.
3677 *
3678 * @param pState The device state structure.
3679 * @param pDesc Pointer to descriptor union.
3680 * @param addr Physical address in guest context.
3681 * @thread E1000_TX
3682 */
3683DECLINLINE(void) e1kWriteBackDesc(E1KSTATE* pState, E1KTXDESC* pDesc, RTGCPHYS addr)
3684{
3685 /* Only the last half of the descriptor has to be written back. */
3686 e1kPrintTDesc(pState, pDesc, "^^^");
3687 PDMDevHlpPhysWrite(pState->CTX_SUFF(pDevIns), addr, pDesc, sizeof(E1KTXDESC));
3688}
3689
3690/**
3691 * Transmit complete frame.
3692 *
3693 * @remarks We skip the FCS since we're not responsible for sending anything to
3694 * a real ethernet wire.
3695 *
3696 * @param pState The device state structure.
3697 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
3698 * @thread E1000_TX
3699 */
3700static void e1kTransmitFrame(E1KSTATE* pState, bool fOnWorkerThread)
3701{
3702 PPDMSCATTERGATHER pSg = pState->CTX_SUFF(pTxSg);
3703 uint32_t cbFrame = pSg ? (uint32_t)pSg->cbUsed : 0;
3704 Assert(!pSg || pSg->cSegs == 1);
3705
3706 if (cbFrame > 70) /* unqualified guess */
3707 pState->led.Asserted.s.fWriting = pState->led.Actual.s.fWriting = 1;
3708
3709#ifdef E1K_INT_STATS
3710 if (cbFrame <= 1514)
3711 E1K_INC_ISTAT_CNT(pState->uStatTx1514);
3712 else if (cbFrame <= 2962)
3713 E1K_INC_ISTAT_CNT(pState->uStatTx2962);
3714 else if (cbFrame <= 4410)
3715 E1K_INC_ISTAT_CNT(pState->uStatTx4410);
3716 else if (cbFrame <= 5858)
3717 E1K_INC_ISTAT_CNT(pState->uStatTx5858);
3718 else if (cbFrame <= 7306)
3719 E1K_INC_ISTAT_CNT(pState->uStatTx7306);
3720 else if (cbFrame <= 8754)
3721 E1K_INC_ISTAT_CNT(pState->uStatTx8754);
3722 else if (cbFrame <= 16384)
3723 E1K_INC_ISTAT_CNT(pState->uStatTx16384);
3724 else if (cbFrame <= 32768)
3725 E1K_INC_ISTAT_CNT(pState->uStatTx32768);
3726 else
3727 E1K_INC_ISTAT_CNT(pState->uStatTxLarge);
3728#endif /* E1K_INT_STATS */
3729
3730 /* Add VLAN tag */
3731 if (cbFrame > 12 && pState->fVTag)
3732 {
3733 E1kLog3(("%s Inserting VLAN tag %08x\n",
3734 INSTANCE(pState), RT_BE2H_U16(VET) | (RT_BE2H_U16(pState->u16VTagTCI) << 16)));
3735 memmove((uint8_t*)pSg->aSegs[0].pvSeg + 16, (uint8_t*)pSg->aSegs[0].pvSeg + 12, cbFrame - 12);
3736 *((uint32_t*)pSg->aSegs[0].pvSeg + 3) = RT_BE2H_U16(VET) | (RT_BE2H_U16(pState->u16VTagTCI) << 16);
3737 pSg->cbUsed += 4;
3738 cbFrame += 4;
3739 Assert(pSg->cbUsed == cbFrame);
3740 Assert(pSg->cbUsed <= pSg->cbAvailable);
3741 }
3742/* E1kLog2(("%s < < < Outgoing packet. Dump follows: > > >\n"
3743 "%.*Rhxd\n"
3744 "%s < < < < < < < < < < < < < End of dump > > > > > > > > > > > >\n",
3745 INSTANCE(pState), cbFrame, pSg->aSegs[0].pvSeg, INSTANCE(pState)));*/
3746
3747 /* Update the stats */
3748 E1K_INC_CNT32(TPT);
3749 E1K_ADD_CNT64(TOTL, TOTH, cbFrame);
3750 E1K_INC_CNT32(GPTC);
3751 if (pSg && e1kIsBroadcast(pSg->aSegs[0].pvSeg))
3752 E1K_INC_CNT32(BPTC);
3753 else if (pSg && e1kIsMulticast(pSg->aSegs[0].pvSeg))
3754 E1K_INC_CNT32(MPTC);
3755 /* Update octet transmit counter */
3756 E1K_ADD_CNT64(GOTCL, GOTCH, cbFrame);
3757 if (pState->CTX_SUFF(pDrv))
3758 STAM_REL_COUNTER_ADD(&pState->StatTransmitBytes, cbFrame);
3759 if (cbFrame == 64)
3760 E1K_INC_CNT32(PTC64);
3761 else if (cbFrame < 128)
3762 E1K_INC_CNT32(PTC127);
3763 else if (cbFrame < 256)
3764 E1K_INC_CNT32(PTC255);
3765 else if (cbFrame < 512)
3766 E1K_INC_CNT32(PTC511);
3767 else if (cbFrame < 1024)
3768 E1K_INC_CNT32(PTC1023);
3769 else
3770 E1K_INC_CNT32(PTC1522);
3771
3772 E1K_INC_ISTAT_CNT(pState->uStatTxFrm);
3773
3774 /*
3775 * Dump and send the packet.
3776 */
3777 int rc = VERR_NET_DOWN;
3778 if (pSg && pSg->pvAllocator != pState)
3779 {
3780 e1kPacketDump(pState, (uint8_t const *)pSg->aSegs[0].pvSeg, cbFrame, "--> Outgoing");
3781
3782 pState->CTX_SUFF(pTxSg) = NULL;
3783 PPDMINETWORKUP pDrv = pState->CTX_SUFF(pDrv);
3784 if (pDrv)
3785 {
3786 /* Release critical section to avoid deadlock in CanReceive */
3787 //e1kCsLeave(pState);
3788 STAM_PROFILE_START(&pState->CTX_SUFF_Z(StatTransmitSend), a);
3789 rc = pDrv->pfnSendBuf(pDrv, pSg, fOnWorkerThread);
3790 STAM_PROFILE_STOP(&pState->CTX_SUFF_Z(StatTransmitSend), a);
3791 //e1kCsEnter(pState, RT_SRC_POS);
3792 }
3793 }
3794 else if (pSg)
3795 {
3796 Assert(pSg->aSegs[0].pvSeg == pState->aTxPacketFallback);
3797 e1kPacketDump(pState, (uint8_t const *)pSg->aSegs[0].pvSeg, cbFrame, "--> Loopback");
3798
3799 /** @todo do we actually need to check that we're in loopback mode here? */
3800 if (GET_BITS(RCTL, LBM) == RCTL_LBM_TCVR)
3801 {
3802 E1KRXDST status;
3803 RT_ZERO(status);
3804 status.fPIF = true;
3805 e1kHandleRxPacket(pState, pSg->aSegs[0].pvSeg, cbFrame, status);
3806 rc = VINF_SUCCESS;
3807 }
3808 e1kXmitFreeBuf(pState);
3809 }
3810 else
3811 rc = VERR_NET_DOWN;
3812 if (RT_FAILURE(rc))
3813 {
3814 E1kLogRel(("E1000: ERROR! pfnSend returned %Rrc\n", rc));
3815 /** @todo handle VERR_NET_DOWN and VERR_NET_NO_BUFFER_SPACE. Signal error ? */
3816 }
3817
3818 pState->led.Actual.s.fWriting = 0;
3819}
3820
3821/**
3822 * Compute and write internet checksum (e1kCSum16) at the specified offset.
3823 *
3824 * @param pState The device state structure.
3825 * @param pPkt Pointer to the packet.
3826 * @param u16PktLen Total length of the packet.
3827 * @param cso Offset in packet to write checksum at.
3828 * @param css Offset in packet to start computing
3829 * checksum from.
3830 * @param cse Offset in packet to stop computing
3831 * checksum at.
3832 * @thread E1000_TX
3833 */
3834static void e1kInsertChecksum(E1KSTATE* pState, uint8_t *pPkt, uint16_t u16PktLen, uint8_t cso, uint8_t css, uint16_t cse)
3835{
3836 if (css >= u16PktLen)
3837 {
3838 E1kLog2(("%s css(%X) is greater than packet length-1(%X), checksum is not inserted\n",
3839 INSTANCE(pState), cso, u16PktLen));
3840 return;
3841 }
3842
3843 if (cso >= u16PktLen - 1)
3844 {
3845 E1kLog2(("%s cso(%X) is greater than packet length-2(%X), checksum is not inserted\n",
3846 INSTANCE(pState), cso, u16PktLen));
3847 return;
3848 }
3849
3850 if (cse == 0)
3851 cse = u16PktLen - 1;
3852 uint16_t u16ChkSum = e1kCSum16(pPkt + css, cse - css + 1);
3853 E1kLog2(("%s Inserting csum: %04X at %02X, old value: %04X\n", INSTANCE(pState),
3854 u16ChkSum, cso, *(uint16_t*)(pPkt + cso)));
3855 *(uint16_t*)(pPkt + cso) = u16ChkSum;
3856}
3857
3858/**
3859 * Add a part of descriptor's buffer to transmit frame.
3860 *
3861 * @remarks data.u64BufAddr is used unconditionally for both data
3862 * and legacy descriptors since it is identical to
3863 * legacy.u64BufAddr.
3864 *
3865 * @param pState The device state structure.
3866 * @param pDesc Pointer to the descriptor to transmit.
3867 * @param u16Len Length of buffer to the end of segment.
3868 * @param fSend Force packet sending.
3869 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
3870 * @thread E1000_TX
3871 */
3872#ifndef E1K_WITH_TXD_CACHE
3873static void e1kFallbackAddSegment(E1KSTATE* pState, RTGCPHYS PhysAddr, uint16_t u16Len, bool fSend, bool fOnWorkerThread)
3874{
3875 /* TCP header being transmitted */
3876 struct E1kTcpHeader *pTcpHdr = (struct E1kTcpHeader *)
3877 (pState->aTxPacketFallback + pState->contextTSE.tu.u8CSS);
3878 /* IP header being transmitted */
3879 struct E1kIpHeader *pIpHdr = (struct E1kIpHeader *)
3880 (pState->aTxPacketFallback + pState->contextTSE.ip.u8CSS);
3881
3882 E1kLog3(("%s e1kFallbackAddSegment: Length=%x, remaining payload=%x, header=%x, send=%RTbool\n",
3883 INSTANCE(pState), u16Len, pState->u32PayRemain, pState->u16HdrRemain, fSend));
3884 Assert(pState->u32PayRemain + pState->u16HdrRemain > 0);
3885
3886 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns), PhysAddr,
3887 pState->aTxPacketFallback + pState->u16TxPktLen, u16Len);
3888 E1kLog3(("%s Dump of the segment:\n"
3889 "%.*Rhxd\n"
3890 "%s --- End of dump ---\n",
3891 INSTANCE(pState), u16Len, pState->aTxPacketFallback + pState->u16TxPktLen, INSTANCE(pState)));
3892 pState->u16TxPktLen += u16Len;
3893 E1kLog3(("%s e1kFallbackAddSegment: pState->u16TxPktLen=%x\n",
3894 INSTANCE(pState), pState->u16TxPktLen));
3895 if (pState->u16HdrRemain > 0)
3896 {
3897 /* The header was not complete, check if it is now */
3898 if (u16Len >= pState->u16HdrRemain)
3899 {
3900 /* The rest is payload */
3901 u16Len -= pState->u16HdrRemain;
3902 pState->u16HdrRemain = 0;
3903 /* Save partial checksum and flags */
3904 pState->u32SavedCsum = pTcpHdr->chksum;
3905 pState->u16SavedFlags = pTcpHdr->hdrlen_flags;
3906 /* Clear FIN and PSH flags now and set them only in the last segment */
3907 pTcpHdr->hdrlen_flags &= ~htons(E1K_TCP_FIN | E1K_TCP_PSH);
3908 }
3909 else
3910 {
3911 /* Still not */
3912 pState->u16HdrRemain -= u16Len;
3913 E1kLog3(("%s e1kFallbackAddSegment: Header is still incomplete, 0x%x bytes remain.\n",
3914 INSTANCE(pState), pState->u16HdrRemain));
3915 return;
3916 }
3917 }
3918
3919 pState->u32PayRemain -= u16Len;
3920
3921 if (fSend)
3922 {
3923 /* Leave ethernet header intact */
3924 /* IP Total Length = payload + headers - ethernet header */
3925 pIpHdr->total_len = htons(pState->u16TxPktLen - pState->contextTSE.ip.u8CSS);
3926 E1kLog3(("%s e1kFallbackAddSegment: End of packet, pIpHdr->total_len=%x\n",
3927 INSTANCE(pState), ntohs(pIpHdr->total_len)));
3928 /* Update IP Checksum */
3929 pIpHdr->chksum = 0;
3930 e1kInsertChecksum(pState, pState->aTxPacketFallback, pState->u16TxPktLen,
3931 pState->contextTSE.ip.u8CSO,
3932 pState->contextTSE.ip.u8CSS,
3933 pState->contextTSE.ip.u16CSE);
3934
3935 /* Update TCP flags */
3936 /* Restore original FIN and PSH flags for the last segment */
3937 if (pState->u32PayRemain == 0)
3938 {
3939 pTcpHdr->hdrlen_flags = pState->u16SavedFlags;
3940 E1K_INC_CNT32(TSCTC);
3941 }
3942 /* Add TCP length to partial pseudo header sum */
3943 uint32_t csum = pState->u32SavedCsum
3944 + htons(pState->u16TxPktLen - pState->contextTSE.tu.u8CSS);
3945 while (csum >> 16)
3946 csum = (csum >> 16) + (csum & 0xFFFF);
3947 pTcpHdr->chksum = csum;
3948 /* Compute final checksum */
3949 e1kInsertChecksum(pState, pState->aTxPacketFallback, pState->u16TxPktLen,
3950 pState->contextTSE.tu.u8CSO,
3951 pState->contextTSE.tu.u8CSS,
3952 pState->contextTSE.tu.u16CSE);
3953
3954 /*
3955 * Transmit it. If we've use the SG already, allocate a new one before
3956 * we copy of the data.
3957 */
3958 if (!pState->CTX_SUFF(pTxSg))
3959 e1kXmitAllocBuf(pState, pState->u16TxPktLen + (pState->fVTag ? 4 : 0), true /*fExactSize*/, false /*fGso*/);
3960 if (pState->CTX_SUFF(pTxSg))
3961 {
3962 Assert(pState->u16TxPktLen <= pState->CTX_SUFF(pTxSg)->cbAvailable);
3963 Assert(pState->CTX_SUFF(pTxSg)->cSegs == 1);
3964 if (pState->CTX_SUFF(pTxSg)->aSegs[0].pvSeg != pState->aTxPacketFallback)
3965 memcpy(pState->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pState->aTxPacketFallback, pState->u16TxPktLen);
3966 pState->CTX_SUFF(pTxSg)->cbUsed = pState->u16TxPktLen;
3967 pState->CTX_SUFF(pTxSg)->aSegs[0].cbSeg = pState->u16TxPktLen;
3968 }
3969 e1kTransmitFrame(pState, fOnWorkerThread);
3970
3971 /* Update Sequence Number */
3972 pTcpHdr->seqno = htonl(ntohl(pTcpHdr->seqno) + pState->u16TxPktLen
3973 - pState->contextTSE.dw3.u8HDRLEN);
3974 /* Increment IP identification */
3975 pIpHdr->ident = htons(ntohs(pIpHdr->ident) + 1);
3976 }
3977}
3978#else /* E1K_WITH_TXD_CACHE */
3979static int e1kFallbackAddSegment(E1KSTATE* pState, RTGCPHYS PhysAddr, uint16_t u16Len, bool fSend, bool fOnWorkerThread)
3980{
3981 int rc = VINF_SUCCESS;
3982 /* TCP header being transmitted */
3983 struct E1kTcpHeader *pTcpHdr = (struct E1kTcpHeader *)
3984 (pState->aTxPacketFallback + pState->contextTSE.tu.u8CSS);
3985 /* IP header being transmitted */
3986 struct E1kIpHeader *pIpHdr = (struct E1kIpHeader *)
3987 (pState->aTxPacketFallback + pState->contextTSE.ip.u8CSS);
3988
3989 E1kLog3(("%s e1kFallbackAddSegment: Length=%x, remaining payload=%x, header=%x, send=%RTbool\n",
3990 INSTANCE(pState), u16Len, pState->u32PayRemain, pState->u16HdrRemain, fSend));
3991 Assert(pState->u32PayRemain + pState->u16HdrRemain > 0);
3992
3993 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns), PhysAddr,
3994 pState->aTxPacketFallback + pState->u16TxPktLen, u16Len);
3995 E1kLog3(("%s Dump of the segment:\n"
3996 "%.*Rhxd\n"
3997 "%s --- End of dump ---\n",
3998 INSTANCE(pState), u16Len, pState->aTxPacketFallback + pState->u16TxPktLen, INSTANCE(pState)));
3999 pState->u16TxPktLen += u16Len;
4000 E1kLog3(("%s e1kFallbackAddSegment: pState->u16TxPktLen=%x\n",
4001 INSTANCE(pState), pState->u16TxPktLen));
4002 if (pState->u16HdrRemain > 0)
4003 {
4004 /* The header was not complete, check if it is now */
4005 if (u16Len >= pState->u16HdrRemain)
4006 {
4007 /* The rest is payload */
4008 u16Len -= pState->u16HdrRemain;
4009 pState->u16HdrRemain = 0;
4010 /* Save partial checksum and flags */
4011 pState->u32SavedCsum = pTcpHdr->chksum;
4012 pState->u16SavedFlags = pTcpHdr->hdrlen_flags;
4013 /* Clear FIN and PSH flags now and set them only in the last segment */
4014 pTcpHdr->hdrlen_flags &= ~htons(E1K_TCP_FIN | E1K_TCP_PSH);
4015 }
4016 else
4017 {
4018 /* Still not */
4019 pState->u16HdrRemain -= u16Len;
4020 E1kLog3(("%s e1kFallbackAddSegment: Header is still incomplete, 0x%x bytes remain.\n",
4021 INSTANCE(pState), pState->u16HdrRemain));
4022 return rc;
4023 }
4024 }
4025
4026 pState->u32PayRemain -= u16Len;
4027
4028 if (fSend)
4029 {
4030 /* Leave ethernet header intact */
4031 /* IP Total Length = payload + headers - ethernet header */
4032 pIpHdr->total_len = htons(pState->u16TxPktLen - pState->contextTSE.ip.u8CSS);
4033 E1kLog3(("%s e1kFallbackAddSegment: End of packet, pIpHdr->total_len=%x\n",
4034 INSTANCE(pState), ntohs(pIpHdr->total_len)));
4035 /* Update IP Checksum */
4036 pIpHdr->chksum = 0;
4037 e1kInsertChecksum(pState, pState->aTxPacketFallback, pState->u16TxPktLen,
4038 pState->contextTSE.ip.u8CSO,
4039 pState->contextTSE.ip.u8CSS,
4040 pState->contextTSE.ip.u16CSE);
4041
4042 /* Update TCP flags */
4043 /* Restore original FIN and PSH flags for the last segment */
4044 if (pState->u32PayRemain == 0)
4045 {
4046 pTcpHdr->hdrlen_flags = pState->u16SavedFlags;
4047 E1K_INC_CNT32(TSCTC);
4048 }
4049 /* Add TCP length to partial pseudo header sum */
4050 uint32_t csum = pState->u32SavedCsum
4051 + htons(pState->u16TxPktLen - pState->contextTSE.tu.u8CSS);
4052 while (csum >> 16)
4053 csum = (csum >> 16) + (csum & 0xFFFF);
4054 pTcpHdr->chksum = csum;
4055 /* Compute final checksum */
4056 e1kInsertChecksum(pState, pState->aTxPacketFallback, pState->u16TxPktLen,
4057 pState->contextTSE.tu.u8CSO,
4058 pState->contextTSE.tu.u8CSS,
4059 pState->contextTSE.tu.u16CSE);
4060
4061 /*
4062 * Transmit it.
4063 */
4064 if (pState->CTX_SUFF(pTxSg))
4065 {
4066 Assert(pState->u16TxPktLen <= pState->CTX_SUFF(pTxSg)->cbAvailable);
4067 Assert(pState->CTX_SUFF(pTxSg)->cSegs == 1);
4068 if (pState->CTX_SUFF(pTxSg)->aSegs[0].pvSeg != pState->aTxPacketFallback)
4069 memcpy(pState->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pState->aTxPacketFallback, pState->u16TxPktLen);
4070 pState->CTX_SUFF(pTxSg)->cbUsed = pState->u16TxPktLen;
4071 pState->CTX_SUFF(pTxSg)->aSegs[0].cbSeg = pState->u16TxPktLen;
4072 }
4073 e1kTransmitFrame(pState, fOnWorkerThread);
4074
4075 /* Update Sequence Number */
4076 pTcpHdr->seqno = htonl(ntohl(pTcpHdr->seqno) + pState->u16TxPktLen
4077 - pState->contextTSE.dw3.u8HDRLEN);
4078 /* Increment IP identification */
4079 pIpHdr->ident = htons(ntohs(pIpHdr->ident) + 1);
4080
4081 /* Allocate new buffer for the next segment. */
4082 if (pState->u32PayRemain)
4083 {
4084 pState->cbTxAlloc = RT_MIN(pState->u32PayRemain,
4085 pState->contextTSE.dw3.u16MSS)
4086 + pState->contextTSE.dw3.u8HDRLEN
4087 + (pState->fVTag ? 4 : 0);
4088 rc = e1kXmitAllocBuf(pState, false /* fGSO */);
4089 }
4090 }
4091
4092 return rc;
4093}
4094#endif /* E1K_WITH_TXD_CACHE */
4095
4096#ifndef E1K_WITH_TXD_CACHE
4097/**
4098 * TCP segmentation offloading fallback: Add descriptor's buffer to transmit
4099 * frame.
4100 *
4101 * We construct the frame in the fallback buffer first and the copy it to the SG
4102 * buffer before passing it down to the network driver code.
4103 *
4104 * @returns true if the frame should be transmitted, false if not.
4105 *
4106 * @param pState The device state structure.
4107 * @param pDesc Pointer to the descriptor to transmit.
4108 * @param cbFragment Length of descriptor's buffer.
4109 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4110 * @thread E1000_TX
4111 */
4112static bool e1kFallbackAddToFrame(E1KSTATE* pState, E1KTXDESC* pDesc, uint32_t cbFragment, bool fOnWorkerThread)
4113{
4114 PPDMSCATTERGATHER pTxSg = pState->CTX_SUFF(pTxSg);
4115 Assert(e1kGetDescType(pDesc) == E1K_DTYP_DATA);
4116 Assert(pDesc->data.cmd.fTSE);
4117 Assert(!e1kXmitIsGsoBuf(pTxSg));
4118
4119 uint16_t u16MaxPktLen = pState->contextTSE.dw3.u8HDRLEN + pState->contextTSE.dw3.u16MSS;
4120 Assert(u16MaxPktLen != 0);
4121 Assert(u16MaxPktLen < E1K_MAX_TX_PKT_SIZE);
4122
4123 /*
4124 * Carve out segments.
4125 */
4126 do
4127 {
4128 /* Calculate how many bytes we have left in this TCP segment */
4129 uint32_t cb = u16MaxPktLen - pState->u16TxPktLen;
4130 if (cb > cbFragment)
4131 {
4132 /* This descriptor fits completely into current segment */
4133 cb = cbFragment;
4134 e1kFallbackAddSegment(pState, pDesc->data.u64BufAddr, cb, pDesc->data.cmd.fEOP /*fSend*/, fOnWorkerThread);
4135 }
4136 else
4137 {
4138 e1kFallbackAddSegment(pState, pDesc->data.u64BufAddr, cb, true /*fSend*/, fOnWorkerThread);
4139 /*
4140 * Rewind the packet tail pointer to the beginning of payload,
4141 * so we continue writing right beyond the header.
4142 */
4143 pState->u16TxPktLen = pState->contextTSE.dw3.u8HDRLEN;
4144 }
4145
4146 pDesc->data.u64BufAddr += cb;
4147 cbFragment -= cb;
4148 } while (cbFragment > 0);
4149
4150 if (pDesc->data.cmd.fEOP)
4151 {
4152 /* End of packet, next segment will contain header. */
4153 if (pState->u32PayRemain != 0)
4154 E1K_INC_CNT32(TSCTFC);
4155 pState->u16TxPktLen = 0;
4156 e1kXmitFreeBuf(pState);
4157 }
4158
4159 return false;
4160}
4161#else /* E1K_WITH_TXD_CACHE */
4162/**
4163 * TCP segmentation offloading fallback: Add descriptor's buffer to transmit
4164 * frame.
4165 *
4166 * We construct the frame in the fallback buffer first and the copy it to the SG
4167 * buffer before passing it down to the network driver code.
4168 *
4169 * @returns error code
4170 *
4171 * @param pState The device state structure.
4172 * @param pDesc Pointer to the descriptor to transmit.
4173 * @param cbFragment Length of descriptor's buffer.
4174 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4175 * @thread E1000_TX
4176 */
4177static int e1kFallbackAddToFrame(E1KSTATE* pState, E1KTXDESC* pDesc, bool fOnWorkerThread)
4178{
4179 int rc = VINF_SUCCESS;
4180 PPDMSCATTERGATHER pTxSg = pState->CTX_SUFF(pTxSg);
4181 Assert(e1kGetDescType(pDesc) == E1K_DTYP_DATA);
4182 Assert(pDesc->data.cmd.fTSE);
4183 Assert(!e1kXmitIsGsoBuf(pTxSg));
4184
4185 uint16_t u16MaxPktLen = pState->contextTSE.dw3.u8HDRLEN + pState->contextTSE.dw3.u16MSS;
4186 Assert(u16MaxPktLen != 0);
4187 Assert(u16MaxPktLen < E1K_MAX_TX_PKT_SIZE);
4188
4189 /*
4190 * Carve out segments.
4191 */
4192 do
4193 {
4194 /* Calculate how many bytes we have left in this TCP segment */
4195 uint32_t cb = u16MaxPktLen - pState->u16TxPktLen;
4196 if (cb > pDesc->data.cmd.u20DTALEN)
4197 {
4198 /* This descriptor fits completely into current segment */
4199 cb = pDesc->data.cmd.u20DTALEN;
4200 rc = e1kFallbackAddSegment(pState, pDesc->data.u64BufAddr, cb, pDesc->data.cmd.fEOP /*fSend*/, fOnWorkerThread);
4201 }
4202 else
4203 {
4204 rc = e1kFallbackAddSegment(pState, pDesc->data.u64BufAddr, cb, true /*fSend*/, fOnWorkerThread);
4205 /*
4206 * Rewind the packet tail pointer to the beginning of payload,
4207 * so we continue writing right beyond the header.
4208 */
4209 pState->u16TxPktLen = pState->contextTSE.dw3.u8HDRLEN;
4210 }
4211
4212 pDesc->data.u64BufAddr += cb;
4213 pDesc->data.cmd.u20DTALEN -= cb;
4214 } while (pDesc->data.cmd.u20DTALEN > 0 && RT_SUCCESS(rc));
4215
4216 if (pDesc->data.cmd.fEOP)
4217 {
4218 /* End of packet, next segment will contain header. */
4219 if (pState->u32PayRemain != 0)
4220 E1K_INC_CNT32(TSCTFC);
4221 pState->u16TxPktLen = 0;
4222 e1kXmitFreeBuf(pState);
4223 }
4224
4225 return false;
4226}
4227#endif /* E1K_WITH_TXD_CACHE */
4228
4229
4230/**
4231 * Add descriptor's buffer to transmit frame.
4232 *
4233 * This deals with GSO and normal frames, e1kFallbackAddToFrame deals with the
4234 * TSE frames we cannot handle as GSO.
4235 *
4236 * @returns true on success, false on failure.
4237 *
4238 * @param pThis The device state structure.
4239 * @param PhysAddr The physical address of the descriptor buffer.
4240 * @param cbFragment Length of descriptor's buffer.
4241 * @thread E1000_TX
4242 */
4243static bool e1kAddToFrame(E1KSTATE *pThis, RTGCPHYS PhysAddr, uint32_t cbFragment)
4244{
4245 PPDMSCATTERGATHER pTxSg = pThis->CTX_SUFF(pTxSg);
4246 bool const fGso = e1kXmitIsGsoBuf(pTxSg);
4247 uint32_t const cbNewPkt = cbFragment + pThis->u16TxPktLen;
4248
4249 if (RT_UNLIKELY( !fGso && cbNewPkt > E1K_MAX_TX_PKT_SIZE ))
4250 {
4251 E1kLog(("%s Transmit packet is too large: %u > %u(max)\n", INSTANCE(pThis), cbNewPkt, E1K_MAX_TX_PKT_SIZE));
4252 return false;
4253 }
4254 if (RT_UNLIKELY( fGso && cbNewPkt > pTxSg->cbAvailable ))
4255 {
4256 E1kLog(("%s Transmit packet is too large: %u > %u(max)/GSO\n", INSTANCE(pThis), cbNewPkt, pTxSg->cbAvailable));
4257 return false;
4258 }
4259
4260 if (RT_LIKELY(pTxSg))
4261 {
4262 Assert(pTxSg->cSegs == 1);
4263 Assert(pTxSg->cbUsed == pThis->u16TxPktLen);
4264
4265 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), PhysAddr,
4266 (uint8_t *)pTxSg->aSegs[0].pvSeg + pThis->u16TxPktLen, cbFragment);
4267
4268 pTxSg->cbUsed = cbNewPkt;
4269 }
4270 pThis->u16TxPktLen = cbNewPkt;
4271
4272 return true;
4273}
4274
4275
4276/**
4277 * Write the descriptor back to guest memory and notify the guest.
4278 *
4279 * @param pState The device state structure.
4280 * @param pDesc Pointer to the descriptor have been transmitted.
4281 * @param addr Physical address of the descriptor in guest memory.
4282 * @thread E1000_TX
4283 */
4284static void e1kDescReport(E1KSTATE* pState, E1KTXDESC* pDesc, RTGCPHYS addr)
4285{
4286 /*
4287 * We fake descriptor write-back bursting. Descriptors are written back as they are
4288 * processed.
4289 */
4290 /* Let's pretend we process descriptors. Write back with DD set. */
4291 /*
4292 * Prior to r71586 we tried to accomodate the case when write-back bursts
4293 * are enabled without actually implementing bursting by writing back all
4294 * descriptors, even the ones that do not have RS set. This caused kernel
4295 * panics with Linux SMP kernels, as the e1000 driver tried to free up skb
4296 * associated with written back descriptor if it happened to be a context
4297 * descriptor since context descriptors do not have skb associated to them.
4298 * Starting from r71586 we write back only the descriptors with RS set,
4299 * which is a little bit different from what the real hardware does in
4300 * case there is a chain of data descritors where some of them have RS set
4301 * and others do not. It is very uncommon scenario imho.
4302 * We need to check RPS as well since some legacy drivers use it instead of
4303 * RS even with newer cards.
4304 */
4305 if (pDesc->legacy.cmd.fRS || pDesc->legacy.cmd.fRPS)
4306 {
4307 pDesc->legacy.dw3.fDD = 1; /* Descriptor Done */
4308 e1kWriteBackDesc(pState, pDesc, addr);
4309 if (pDesc->legacy.cmd.fEOP)
4310 {
4311#ifdef E1K_USE_TX_TIMERS
4312 if (pDesc->legacy.cmd.fIDE)
4313 {
4314 E1K_INC_ISTAT_CNT(pState->uStatTxIDE);
4315 //if (pState->fIntRaised)
4316 //{
4317 // /* Interrupt is already pending, no need for timers */
4318 // ICR |= ICR_TXDW;
4319 //}
4320 //else {
4321 /* Arm the timer to fire in TIVD usec (discard .024) */
4322 e1kArmTimer(pState, pState->CTX_SUFF(pTIDTimer), TIDV);
4323# ifndef E1K_NO_TAD
4324 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
4325 E1kLog2(("%s Checking if TAD timer is running\n",
4326 INSTANCE(pState)));
4327 if (TADV != 0 && !TMTimerIsActive(pState->CTX_SUFF(pTADTimer)))
4328 e1kArmTimer(pState, pState->CTX_SUFF(pTADTimer), TADV);
4329# endif /* E1K_NO_TAD */
4330 }
4331 else
4332 {
4333 E1kLog2(("%s No IDE set, cancel TAD timer and raise interrupt\n",
4334 INSTANCE(pState)));
4335# ifndef E1K_NO_TAD
4336 /* Cancel both timers if armed and fire immediately. */
4337 e1kCancelTimer(pState, pState->CTX_SUFF(pTADTimer));
4338# endif /* E1K_NO_TAD */
4339#endif /* E1K_USE_TX_TIMERS */
4340 E1K_INC_ISTAT_CNT(pState->uStatIntTx);
4341 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, ICR_TXDW);
4342#ifdef E1K_USE_TX_TIMERS
4343 }
4344#endif /* E1K_USE_TX_TIMERS */
4345 }
4346 }
4347 else
4348 {
4349 E1K_INC_ISTAT_CNT(pState->uStatTxNoRS);
4350 }
4351}
4352
4353#ifndef E1K_WITH_TXD_CACHE
4354/**
4355 * Process Transmit Descriptor.
4356 *
4357 * E1000 supports three types of transmit descriptors:
4358 * - legacy data descriptors of older format (context-less).
4359 * - data the same as legacy but providing new offloading capabilities.
4360 * - context sets up the context for following data descriptors.
4361 *
4362 * @param pState The device state structure.
4363 * @param pDesc Pointer to descriptor union.
4364 * @param addr Physical address of descriptor in guest memory.
4365 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4366 * @thread E1000_TX
4367 */
4368static int e1kXmitDesc(E1KSTATE* pState, E1KTXDESC* pDesc, RTGCPHYS addr, bool fOnWorkerThread)
4369{
4370 int rc = VINF_SUCCESS;
4371 uint32_t cbVTag = 0;
4372
4373 e1kPrintTDesc(pState, pDesc, "vvv");
4374
4375#ifdef E1K_USE_TX_TIMERS
4376 e1kCancelTimer(pState, pState->CTX_SUFF(pTIDTimer));
4377#endif /* E1K_USE_TX_TIMERS */
4378
4379 switch (e1kGetDescType(pDesc))
4380 {
4381 case E1K_DTYP_CONTEXT:
4382 if (pDesc->context.dw2.fTSE)
4383 {
4384 pState->contextTSE = pDesc->context;
4385 pState->u32PayRemain = pDesc->context.dw2.u20PAYLEN;
4386 pState->u16HdrRemain = pDesc->context.dw3.u8HDRLEN;
4387 e1kSetupGsoCtx(&pState->GsoCtx, &pDesc->context);
4388 STAM_COUNTER_INC(&pState->StatTxDescCtxTSE);
4389 }
4390 else
4391 {
4392 pState->contextNormal = pDesc->context;
4393 STAM_COUNTER_INC(&pState->StatTxDescCtxNormal);
4394 }
4395 E1kLog2(("%s %s context updated: IP CSS=%02X, IP CSO=%02X, IP CSE=%04X"
4396 ", TU CSS=%02X, TU CSO=%02X, TU CSE=%04X\n", INSTANCE(pState),
4397 pDesc->context.dw2.fTSE ? "TSE" : "Normal",
4398 pDesc->context.ip.u8CSS,
4399 pDesc->context.ip.u8CSO,
4400 pDesc->context.ip.u16CSE,
4401 pDesc->context.tu.u8CSS,
4402 pDesc->context.tu.u8CSO,
4403 pDesc->context.tu.u16CSE));
4404 E1K_INC_ISTAT_CNT(pState->uStatDescCtx);
4405 e1kDescReport(pState, pDesc, addr);
4406 break;
4407
4408 case E1K_DTYP_DATA:
4409 {
4410 if (pDesc->data.cmd.u20DTALEN == 0 || pDesc->data.u64BufAddr == 0)
4411 {
4412 E1kLog2(("% Empty data descriptor, skipped.\n", INSTANCE(pState)));
4413 /** @todo Same as legacy when !TSE. See below. */
4414 break;
4415 }
4416 STAM_COUNTER_INC(pDesc->data.cmd.fTSE?
4417 &pState->StatTxDescTSEData:
4418 &pState->StatTxDescData);
4419 STAM_PROFILE_ADV_START(&pState->CTX_SUFF_Z(StatTransmit), a);
4420 E1K_INC_ISTAT_CNT(pState->uStatDescDat);
4421
4422 /*
4423 * The last descriptor of non-TSE packet must contain VLE flag.
4424 * TSE packets have VLE flag in the first descriptor. The later
4425 * case is taken care of a bit later when cbVTag gets assigned.
4426 *
4427 * 1) pDesc->data.cmd.fEOP && !pDesc->data.cmd.fTSE
4428 */
4429 if (pDesc->data.cmd.fEOP && !pDesc->data.cmd.fTSE)
4430 {
4431 pState->fVTag = pDesc->data.cmd.fVLE;
4432 pState->u16VTagTCI = pDesc->data.dw3.u16Special;
4433 }
4434 /*
4435 * First fragment: Allocate new buffer and save the IXSM and TXSM
4436 * packet options as these are only valid in the first fragment.
4437 */
4438 if (pState->u16TxPktLen == 0)
4439 {
4440 pState->fIPcsum = pDesc->data.dw3.fIXSM;
4441 pState->fTCPcsum = pDesc->data.dw3.fTXSM;
4442 E1kLog2(("%s Saving checksum flags:%s%s; \n", INSTANCE(pState),
4443 pState->fIPcsum ? " IP" : "",
4444 pState->fTCPcsum ? " TCP/UDP" : ""));
4445 if (pDesc->data.cmd.fTSE)
4446 {
4447 /* 2) pDesc->data.cmd.fTSE && pState->u16TxPktLen == 0 */
4448 pState->fVTag = pDesc->data.cmd.fVLE;
4449 pState->u16VTagTCI = pDesc->data.dw3.u16Special;
4450 cbVTag = pState->fVTag ? 4 : 0;
4451 }
4452 else if (pDesc->data.cmd.fEOP)
4453 cbVTag = pDesc->data.cmd.fVLE ? 4 : 0;
4454 else
4455 cbVTag = 4;
4456 E1kLog3(("%s About to allocate TX buffer: cbVTag=%u\n", INSTANCE(pState), cbVTag));
4457 if (e1kCanDoGso(pState, &pState->GsoCtx, &pDesc->data, &pState->contextTSE))
4458 rc = e1kXmitAllocBuf(pState, pState->contextTSE.dw2.u20PAYLEN + pState->contextTSE.dw3.u8HDRLEN + cbVTag,
4459 true /*fExactSize*/, true /*fGso*/);
4460 else if (pDesc->data.cmd.fTSE)
4461 rc = e1kXmitAllocBuf(pState, pState->contextTSE.dw3.u16MSS + pState->contextTSE.dw3.u8HDRLEN + cbVTag,
4462 pDesc->data.cmd.fTSE /*fExactSize*/, false /*fGso*/);
4463 else
4464 rc = e1kXmitAllocBuf(pState, pDesc->data.cmd.u20DTALEN + cbVTag,
4465 pDesc->data.cmd.fEOP /*fExactSize*/, false /*fGso*/);
4466
4467 /**
4468 * @todo: Perhaps it is not that simple for GSO packets! We may
4469 * need to unwind some changes.
4470 */
4471 if (RT_FAILURE(rc))
4472 {
4473 STAM_PROFILE_ADV_STOP(&pState->CTX_SUFF_Z(StatTransmit), a);
4474 break;
4475 }
4476 /** @todo Is there any way to indicating errors other than collisions? Like
4477 * VERR_NET_DOWN. */
4478 }
4479
4480 /*
4481 * Add the descriptor data to the frame. If the frame is complete,
4482 * transmit it and reset the u16TxPktLen field.
4483 */
4484 if (e1kXmitIsGsoBuf(pState->CTX_SUFF(pTxSg)))
4485 {
4486 STAM_COUNTER_INC(&pState->StatTxPathGSO);
4487 bool fRc = e1kAddToFrame(pState, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4488 if (pDesc->data.cmd.fEOP)
4489 {
4490 if ( fRc
4491 && pState->CTX_SUFF(pTxSg)
4492 && pState->CTX_SUFF(pTxSg)->cbUsed == (size_t)pState->contextTSE.dw3.u8HDRLEN + pState->contextTSE.dw2.u20PAYLEN)
4493 {
4494 e1kTransmitFrame(pState, fOnWorkerThread);
4495 E1K_INC_CNT32(TSCTC);
4496 }
4497 else
4498 {
4499 if (fRc)
4500 E1kLog(("%s bad GSO/TSE %p or %u < %u\n" , INSTANCE(pState),
4501 pState->CTX_SUFF(pTxSg), pState->CTX_SUFF(pTxSg) ? pState->CTX_SUFF(pTxSg)->cbUsed : 0,
4502 pState->contextTSE.dw3.u8HDRLEN + pState->contextTSE.dw2.u20PAYLEN));
4503 e1kXmitFreeBuf(pState);
4504 E1K_INC_CNT32(TSCTFC);
4505 }
4506 pState->u16TxPktLen = 0;
4507 }
4508 }
4509 else if (!pDesc->data.cmd.fTSE)
4510 {
4511 STAM_COUNTER_INC(&pState->StatTxPathRegular);
4512 bool fRc = e1kAddToFrame(pState, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4513 if (pDesc->data.cmd.fEOP)
4514 {
4515 if (fRc && pState->CTX_SUFF(pTxSg))
4516 {
4517 Assert(pState->CTX_SUFF(pTxSg)->cSegs == 1);
4518 if (pState->fIPcsum)
4519 e1kInsertChecksum(pState, (uint8_t *)pState->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pState->u16TxPktLen,
4520 pState->contextNormal.ip.u8CSO,
4521 pState->contextNormal.ip.u8CSS,
4522 pState->contextNormal.ip.u16CSE);
4523 if (pState->fTCPcsum)
4524 e1kInsertChecksum(pState, (uint8_t *)pState->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pState->u16TxPktLen,
4525 pState->contextNormal.tu.u8CSO,
4526 pState->contextNormal.tu.u8CSS,
4527 pState->contextNormal.tu.u16CSE);
4528 e1kTransmitFrame(pState, fOnWorkerThread);
4529 }
4530 else
4531 e1kXmitFreeBuf(pState);
4532 pState->u16TxPktLen = 0;
4533 }
4534 }
4535 else
4536 {
4537 STAM_COUNTER_INC(&pState->StatTxPathFallback);
4538 e1kFallbackAddToFrame(pState, pDesc, pDesc->data.cmd.u20DTALEN, fOnWorkerThread);
4539 }
4540
4541 e1kDescReport(pState, pDesc, addr);
4542 STAM_PROFILE_ADV_STOP(&pState->CTX_SUFF_Z(StatTransmit), a);
4543 break;
4544 }
4545
4546 case E1K_DTYP_LEGACY:
4547 if (pDesc->legacy.cmd.u16Length == 0 || pDesc->legacy.u64BufAddr == 0)
4548 {
4549 E1kLog(("%s Empty legacy descriptor, skipped.\n", INSTANCE(pState)));
4550 /** @todo 3.3.3, Length/Buffer Address: RS set -> write DD when processing. */
4551 break;
4552 }
4553 STAM_COUNTER_INC(&pState->StatTxDescLegacy);
4554 STAM_PROFILE_ADV_START(&pState->CTX_SUFF_Z(StatTransmit), a);
4555
4556 /* First fragment: allocate new buffer. */
4557 if (pState->u16TxPktLen == 0)
4558 {
4559 if (pDesc->legacy.cmd.fEOP)
4560 cbVTag = pDesc->legacy.cmd.fVLE ? 4 : 0;
4561 else
4562 cbVTag = 4;
4563 E1kLog3(("%s About to allocate TX buffer: cbVTag=%u\n", INSTANCE(pState), cbVTag));
4564 /** @todo reset status bits? */
4565 rc = e1kXmitAllocBuf(pState, pDesc->legacy.cmd.u16Length + cbVTag, pDesc->legacy.cmd.fEOP, false /*fGso*/);
4566 if (RT_FAILURE(rc))
4567 {
4568 STAM_PROFILE_ADV_STOP(&pState->CTX_SUFF_Z(StatTransmit), a);
4569 break;
4570 }
4571
4572 /** @todo Is there any way to indicating errors other than collisions? Like
4573 * VERR_NET_DOWN. */
4574 }
4575
4576 /* Add fragment to frame. */
4577 if (e1kAddToFrame(pState, pDesc->data.u64BufAddr, pDesc->legacy.cmd.u16Length))
4578 {
4579 E1K_INC_ISTAT_CNT(pState->uStatDescLeg);
4580
4581 /* Last fragment: Transmit and reset the packet storage counter. */
4582 if (pDesc->legacy.cmd.fEOP)
4583 {
4584 pState->fVTag = pDesc->legacy.cmd.fVLE;
4585 pState->u16VTagTCI = pDesc->legacy.dw3.u16Special;
4586 /** @todo Offload processing goes here. */
4587 e1kTransmitFrame(pState, fOnWorkerThread);
4588 pState->u16TxPktLen = 0;
4589 }
4590 }
4591 /* Last fragment + failure: free the buffer and reset the storage counter. */
4592 else if (pDesc->legacy.cmd.fEOP)
4593 {
4594 e1kXmitFreeBuf(pState);
4595 pState->u16TxPktLen = 0;
4596 }
4597
4598 e1kDescReport(pState, pDesc, addr);
4599 STAM_PROFILE_ADV_STOP(&pState->CTX_SUFF_Z(StatTransmit), a);
4600 break;
4601
4602 default:
4603 E1kLog(("%s ERROR Unsupported transmit descriptor type: 0x%04x\n",
4604 INSTANCE(pState), e1kGetDescType(pDesc)));
4605 break;
4606 }
4607
4608 return rc;
4609}
4610#else /* E1K_WITH_TXD_CACHE */
4611/**
4612 * Process Transmit Descriptor.
4613 *
4614 * E1000 supports three types of transmit descriptors:
4615 * - legacy data descriptors of older format (context-less).
4616 * - data the same as legacy but providing new offloading capabilities.
4617 * - context sets up the context for following data descriptors.
4618 *
4619 * @param pState The device state structure.
4620 * @param pDesc Pointer to descriptor union.
4621 * @param addr Physical address of descriptor in guest memory.
4622 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4623 * @param cbPacketSize Size of the packet as previously computed.
4624 * @thread E1000_TX
4625 */
4626static int e1kXmitDesc(E1KSTATE* pState, E1KTXDESC* pDesc, RTGCPHYS addr,
4627 bool fOnWorkerThread)
4628{
4629 int rc = VINF_SUCCESS;
4630 uint32_t cbVTag = 0;
4631
4632 e1kPrintTDesc(pState, pDesc, "vvv");
4633
4634#ifdef E1K_USE_TX_TIMERS
4635 e1kCancelTimer(pState, pState->CTX_SUFF(pTIDTimer));
4636#endif /* E1K_USE_TX_TIMERS */
4637
4638 switch (e1kGetDescType(pDesc))
4639 {
4640 case E1K_DTYP_CONTEXT:
4641 /* The caller have already updated the context */
4642 E1K_INC_ISTAT_CNT(pState->uStatDescCtx);
4643 e1kDescReport(pState, pDesc, addr);
4644 break;
4645
4646 case E1K_DTYP_DATA:
4647 {
4648 STAM_COUNTER_INC(pDesc->data.cmd.fTSE?
4649 &pState->StatTxDescTSEData:
4650 &pState->StatTxDescData);
4651 E1K_INC_ISTAT_CNT(pState->uStatDescDat);
4652 STAM_PROFILE_ADV_START(&pState->CTX_SUFF_Z(StatTransmit), a);
4653 if (pDesc->data.cmd.u20DTALEN == 0 || pDesc->data.u64BufAddr == 0)
4654 {
4655 E1kLog2(("% Empty data descriptor, skipped.\n", INSTANCE(pState)));
4656 }
4657 else
4658 {
4659 /*
4660 * Add the descriptor data to the frame. If the frame is complete,
4661 * transmit it and reset the u16TxPktLen field.
4662 */
4663 if (e1kXmitIsGsoBuf(pState->CTX_SUFF(pTxSg)))
4664 {
4665 STAM_COUNTER_INC(&pState->StatTxPathGSO);
4666 bool fRc = e1kAddToFrame(pState, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4667 if (pDesc->data.cmd.fEOP)
4668 {
4669 if ( fRc
4670 && pState->CTX_SUFF(pTxSg)
4671 && pState->CTX_SUFF(pTxSg)->cbUsed == (size_t)pState->contextTSE.dw3.u8HDRLEN + pState->contextTSE.dw2.u20PAYLEN)
4672 {
4673 e1kTransmitFrame(pState, fOnWorkerThread);
4674 E1K_INC_CNT32(TSCTC);
4675 }
4676 else
4677 {
4678 if (fRc)
4679 E1kLog(("%s bad GSO/TSE %p or %u < %u\n" , INSTANCE(pState),
4680 pState->CTX_SUFF(pTxSg), pState->CTX_SUFF(pTxSg) ? pState->CTX_SUFF(pTxSg)->cbUsed : 0,
4681 pState->contextTSE.dw3.u8HDRLEN + pState->contextTSE.dw2.u20PAYLEN));
4682 e1kXmitFreeBuf(pState);
4683 E1K_INC_CNT32(TSCTFC);
4684 }
4685 pState->u16TxPktLen = 0;
4686 }
4687 }
4688 else if (!pDesc->data.cmd.fTSE)
4689 {
4690 STAM_COUNTER_INC(&pState->StatTxPathRegular);
4691 bool fRc = e1kAddToFrame(pState, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4692 if (pDesc->data.cmd.fEOP)
4693 {
4694 if (fRc && pState->CTX_SUFF(pTxSg))
4695 {
4696 Assert(pState->CTX_SUFF(pTxSg)->cSegs == 1);
4697 if (pState->fIPcsum)
4698 e1kInsertChecksum(pState, (uint8_t *)pState->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pState->u16TxPktLen,
4699 pState->contextNormal.ip.u8CSO,
4700 pState->contextNormal.ip.u8CSS,
4701 pState->contextNormal.ip.u16CSE);
4702 if (pState->fTCPcsum)
4703 e1kInsertChecksum(pState, (uint8_t *)pState->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pState->u16TxPktLen,
4704 pState->contextNormal.tu.u8CSO,
4705 pState->contextNormal.tu.u8CSS,
4706 pState->contextNormal.tu.u16CSE);
4707 e1kTransmitFrame(pState, fOnWorkerThread);
4708 }
4709 else
4710 e1kXmitFreeBuf(pState);
4711 pState->u16TxPktLen = 0;
4712 }
4713 }
4714 else
4715 {
4716 STAM_COUNTER_INC(&pState->StatTxPathFallback);
4717 rc = e1kFallbackAddToFrame(pState, pDesc, fOnWorkerThread);
4718 }
4719 }
4720 e1kDescReport(pState, pDesc, addr);
4721 STAM_PROFILE_ADV_STOP(&pState->CTX_SUFF_Z(StatTransmit), a);
4722 break;
4723 }
4724
4725 case E1K_DTYP_LEGACY:
4726 STAM_COUNTER_INC(&pState->StatTxDescLegacy);
4727 STAM_PROFILE_ADV_START(&pState->CTX_SUFF_Z(StatTransmit), a);
4728 if (pDesc->legacy.cmd.u16Length == 0 || pDesc->legacy.u64BufAddr == 0)
4729 {
4730 E1kLog(("%s Empty legacy descriptor, skipped.\n", INSTANCE(pState)));
4731 }
4732 else
4733 {
4734 /* Add fragment to frame. */
4735 if (e1kAddToFrame(pState, pDesc->data.u64BufAddr, pDesc->legacy.cmd.u16Length))
4736 {
4737 E1K_INC_ISTAT_CNT(pState->uStatDescLeg);
4738
4739 /* Last fragment: Transmit and reset the packet storage counter. */
4740 if (pDesc->legacy.cmd.fEOP)
4741 {
4742 if (pDesc->legacy.cmd.fIC)
4743 {
4744 e1kInsertChecksum(pState,
4745 (uint8_t *)pState->CTX_SUFF(pTxSg)->aSegs[0].pvSeg,
4746 pState->u16TxPktLen,
4747 pDesc->legacy.cmd.u8CSO,
4748 pDesc->legacy.dw3.u8CSS,
4749 0);
4750 }
4751 e1kTransmitFrame(pState, fOnWorkerThread);
4752 pState->u16TxPktLen = 0;
4753 }
4754 }
4755 /* Last fragment + failure: free the buffer and reset the storage counter. */
4756 else if (pDesc->legacy.cmd.fEOP)
4757 {
4758 e1kXmitFreeBuf(pState);
4759 pState->u16TxPktLen = 0;
4760 }
4761 }
4762 e1kDescReport(pState, pDesc, addr);
4763 STAM_PROFILE_ADV_STOP(&pState->CTX_SUFF_Z(StatTransmit), a);
4764 break;
4765
4766 default:
4767 E1kLog(("%s ERROR Unsupported transmit descriptor type: 0x%04x\n",
4768 INSTANCE(pState), e1kGetDescType(pDesc)));
4769 break;
4770 }
4771
4772 return rc;
4773}
4774
4775
4776DECLINLINE(void) e1kUpdateTxContext(E1KSTATE* pState, E1KTXDESC* pDesc)
4777{
4778 if (pDesc->context.dw2.fTSE)
4779 {
4780 pState->contextTSE = pDesc->context;
4781 pState->u32PayRemain = pDesc->context.dw2.u20PAYLEN;
4782 pState->u16HdrRemain = pDesc->context.dw3.u8HDRLEN;
4783 e1kSetupGsoCtx(&pState->GsoCtx, &pDesc->context);
4784 STAM_COUNTER_INC(&pState->StatTxDescCtxTSE);
4785 }
4786 else
4787 {
4788 pState->contextNormal = pDesc->context;
4789 STAM_COUNTER_INC(&pState->StatTxDescCtxNormal);
4790 }
4791 E1kLog2(("%s %s context updated: IP CSS=%02X, IP CSO=%02X, IP CSE=%04X"
4792 ", TU CSS=%02X, TU CSO=%02X, TU CSE=%04X\n", INSTANCE(pState),
4793 pDesc->context.dw2.fTSE ? "TSE" : "Normal",
4794 pDesc->context.ip.u8CSS,
4795 pDesc->context.ip.u8CSO,
4796 pDesc->context.ip.u16CSE,
4797 pDesc->context.tu.u8CSS,
4798 pDesc->context.tu.u8CSO,
4799 pDesc->context.tu.u16CSE));
4800}
4801
4802
4803static bool e1kLocateTxPacket(E1KSTATE *pState)
4804{
4805 LogFlow(("%s e1kLocateTxPacket: ENTER cbTxAlloc=%d\n",
4806 INSTANCE(pState), pState->cbTxAlloc));
4807 /* Check if we have located the packet already. */
4808 if (pState->cbTxAlloc)
4809 {
4810 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d\n",
4811 INSTANCE(pState), pState->cbTxAlloc));
4812 return true;
4813 }
4814
4815 bool fTSE = false;
4816 uint32_t cbPacket = 0;
4817
4818 for (int i = pState->iTxDCurrent; i < pState->nTxDFetched; ++i)
4819 {
4820 E1KTXDESC *pDesc = &pState->aTxDescriptors[i];
4821 switch (e1kGetDescType(pDesc))
4822 {
4823 case E1K_DTYP_CONTEXT:
4824 e1kUpdateTxContext(pState, pDesc);
4825 continue;
4826 case E1K_DTYP_LEGACY:
4827 /* Skip empty descriptors. */
4828 if (!pDesc->legacy.u64BufAddr || !pDesc->legacy.cmd.u16Length)
4829 break;
4830 cbPacket += pDesc->legacy.cmd.u16Length;
4831 pState->fGSO = false;
4832 break;
4833 case E1K_DTYP_DATA:
4834 /* Skip empty descriptors. */
4835 if (!pDesc->data.u64BufAddr || !pDesc->data.cmd.u20DTALEN)
4836 break;
4837 if (cbPacket == 0)
4838 {
4839 /*
4840 * The first fragment: save IXSM and TXSM options
4841 * as these are only valid in the first fragment.
4842 */
4843 pState->fIPcsum = pDesc->data.dw3.fIXSM;
4844 pState->fTCPcsum = pDesc->data.dw3.fTXSM;
4845 fTSE = pDesc->data.cmd.fTSE;
4846 /*
4847 * TSE descriptors have VLE bit properly set in
4848 * the first fragment.
4849 */
4850 if (fTSE)
4851 {
4852 pState->fVTag = pDesc->data.cmd.fVLE;
4853 pState->u16VTagTCI = pDesc->data.dw3.u16Special;
4854 }
4855 pState->fGSO = e1kCanDoGso(pState, &pState->GsoCtx, &pDesc->data, &pState->contextTSE);
4856 }
4857 cbPacket += pDesc->data.cmd.u20DTALEN;
4858 break;
4859 default:
4860 AssertMsgFailed(("Impossible descriptor type!"));
4861 }
4862 if (pDesc->legacy.cmd.fEOP)
4863 {
4864 /*
4865 * Non-TSE descriptors have VLE bit properly set in
4866 * the last fragment.
4867 */
4868 if (!fTSE)
4869 {
4870 pState->fVTag = pDesc->data.cmd.fVLE;
4871 pState->u16VTagTCI = pDesc->data.dw3.u16Special;
4872 }
4873 /*
4874 * Compute the required buffer size. If we cannot do GSO but still
4875 * have to do segmentation we allocate the first segment only.
4876 */
4877 pState->cbTxAlloc = (!fTSE || pState->fGSO) ?
4878 cbPacket :
4879 RT_MIN(cbPacket, pState->contextTSE.dw3.u16MSS + pState->contextTSE.dw3.u8HDRLEN);
4880 if (pState->fVTag)
4881 pState->cbTxAlloc += 4;
4882 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d\n",
4883 INSTANCE(pState), pState->cbTxAlloc));
4884 return true;
4885 }
4886 }
4887
4888 if (cbPacket == 0 && pState->nTxDFetched - pState->iTxDCurrent > 0)
4889 {
4890 /* All descriptors were empty, we need to process them as a dummy packet */
4891 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d, zero packet!\n",
4892 INSTANCE(pState), pState->cbTxAlloc));
4893 return true;
4894 }
4895 LogFlow(("%s e1kLocateTxPacket: RET false cbTxAlloc=%d\n",
4896 INSTANCE(pState), pState->cbTxAlloc));
4897 return false;
4898}
4899
4900
4901static int e1kXmitPacket(E1KSTATE *pState, bool fOnWorkerThread)
4902{
4903 int rc = VINF_SUCCESS;
4904
4905 LogFlow(("%s e1kXmitPacket: ENTER current=%d fetched=%d\n",
4906 INSTANCE(pState), pState->iTxDCurrent, pState->nTxDFetched));
4907
4908 while (pState->iTxDCurrent < pState->nTxDFetched)
4909 {
4910 E1KTXDESC *pDesc = &pState->aTxDescriptors[pState->iTxDCurrent];
4911 E1kLog3(("%s About to process new TX descriptor at %08x%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
4912 INSTANCE(pState), TDBAH, TDBAL + TDH * sizeof(E1KTXDESC), TDLEN, TDH, TDT));
4913 rc = e1kXmitDesc(pState, pDesc, e1kDescAddr(TDBAH, TDBAL, TDH), fOnWorkerThread);
4914 if (RT_FAILURE(rc))
4915 break;
4916 if (++TDH * sizeof(E1KTXDESC) >= TDLEN)
4917 TDH = 0;
4918 uint32_t uLowThreshold = GET_BITS(TXDCTL, LWTHRESH)*8;
4919 if (uLowThreshold != 0 && e1kGetTxLen(pState) <= uLowThreshold)
4920 {
4921 E1kLog2(("%s Low on transmit descriptors, raise ICR.TXD_LOW, len=%x thresh=%x\n",
4922 INSTANCE(pState), e1kGetTxLen(pState), GET_BITS(TXDCTL, LWTHRESH)*8));
4923 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, ICR_TXD_LOW);
4924 }
4925 ++pState->iTxDCurrent;
4926 if (e1kGetDescType(pDesc) != E1K_DTYP_CONTEXT && pDesc->legacy.cmd.fEOP)
4927 break;
4928 }
4929
4930 LogFlow(("%s e1kXmitPacket: RET %Rrc current=%d fetched=%d\n",
4931 INSTANCE(pState), rc, pState->iTxDCurrent, pState->nTxDFetched));
4932 return rc;
4933}
4934#endif /* E1K_WITH_TXD_CACHE */
4935
4936#ifndef E1K_WITH_TXD_CACHE
4937/**
4938 * Transmit pending descriptors.
4939 *
4940 * @returns VBox status code. VERR_TRY_AGAIN is returned if we're busy.
4941 *
4942 * @param pState The E1000 state.
4943 * @param fOnWorkerThread Whether we're on a worker thread or on an EMT.
4944 */
4945static int e1kXmitPending(E1KSTATE *pState, bool fOnWorkerThread)
4946{
4947 int rc = VINF_SUCCESS;
4948
4949 /* Check if transmitter is enabled. */
4950 if (!(TCTL & TCTL_EN))
4951 return VINF_SUCCESS;
4952 /*
4953 * Grab the xmit lock of the driver as well as the E1K device state.
4954 */
4955 rc = e1kCsTxEnter(pState, VERR_SEM_BUSY);
4956 if (RT_LIKELY(rc == VINF_SUCCESS))
4957 {
4958 PPDMINETWORKUP pDrv = pState->CTX_SUFF(pDrv);
4959 if (pDrv)
4960 {
4961 rc = pDrv->pfnBeginXmit(pDrv, fOnWorkerThread);
4962 if (RT_FAILURE(rc))
4963 {
4964 e1kCsTxLeave(pState);
4965 return rc;
4966 }
4967 }
4968 /*
4969 * Process all pending descriptors.
4970 * Note! Do not process descriptors in locked state
4971 */
4972 while (TDH != TDT && !pState->fLocked)
4973 {
4974 E1KTXDESC desc;
4975 E1kLog3(("%s About to process new TX descriptor at %08x%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
4976 INSTANCE(pState), TDBAH, TDBAL + TDH * sizeof(desc), TDLEN, TDH, TDT));
4977
4978 e1kLoadDesc(pState, &desc, ((uint64_t)TDBAH << 32) + TDBAL + TDH * sizeof(desc));
4979 rc = e1kXmitDesc(pState, &desc, e1kDescAddr(TDBAH, TDBAL, TDH), fOnWorkerThread);
4980 /* If we failed to transmit descriptor we will try it again later */
4981 if (RT_FAILURE(rc))
4982 break;
4983 if (++TDH * sizeof(desc) >= TDLEN)
4984 TDH = 0;
4985
4986 if (e1kGetTxLen(pState) <= GET_BITS(TXDCTL, LWTHRESH)*8)
4987 {
4988 E1kLog2(("%s Low on transmit descriptors, raise ICR.TXD_LOW, len=%x thresh=%x\n",
4989 INSTANCE(pState), e1kGetTxLen(pState), GET_BITS(TXDCTL, LWTHRESH)*8));
4990 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, ICR_TXD_LOW);
4991 }
4992
4993 STAM_PROFILE_ADV_STOP(&pState->CTX_SUFF_Z(StatTransmit), a);
4994 }
4995
4996 /// @todo: uncomment: pState->uStatIntTXQE++;
4997 /// @todo: uncomment: e1kRaiseInterrupt(pState, ICR_TXQE);
4998 /*
4999 * Release the lock.
5000 */
5001 if (pDrv)
5002 pDrv->pfnEndXmit(pDrv);
5003 e1kCsTxLeave(pState);
5004 }
5005
5006 return rc;
5007}
5008#else /* E1K_WITH_TXD_CACHE */
5009static void e1kDumpTxDCache(E1KSTATE *pState)
5010{
5011 unsigned i, cDescs = TDLEN / sizeof(E1KTXDESC);
5012 uint32_t tdh = TDH;
5013 LogRel(("-- Transmit Descriptors (%d total) --\n", cDescs));
5014 for (i = 0; i < cDescs; ++i)
5015 {
5016 E1KTXDESC desc;
5017 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns), e1kDescAddr(TDBAH, TDBAL, i),
5018 &desc, sizeof(desc));
5019 if (i == tdh)
5020 LogRel((">>> "));
5021 LogRel(("%RGp: %R[e1ktxd]\n", e1kDescAddr(TDBAH, TDBAL, i), &desc));
5022 }
5023 LogRel(("-- Transmit Descriptors in Cache (at %d (TDH %d)/ fetched %d / max %d) --\n",
5024 pState->iTxDCurrent, TDH, pState->nTxDFetched, E1K_TXD_CACHE_SIZE));
5025 if (tdh > pState->iTxDCurrent)
5026 tdh -= pState->iTxDCurrent;
5027 else
5028 tdh = cDescs + tdh - pState->iTxDCurrent;
5029 for (i = 0; i < pState->nTxDFetched; ++i)
5030 {
5031 if (i == pState->iTxDCurrent)
5032 LogRel((">>> "));
5033 LogRel(("%RGp: %R[e1ktxd]\n", e1kDescAddr(TDBAH, TDBAL, tdh++ % cDescs), &pState->aTxDescriptors[i]));
5034 }
5035}
5036
5037/**
5038 * Transmit pending descriptors.
5039 *
5040 * @returns VBox status code. VERR_TRY_AGAIN is returned if we're busy.
5041 *
5042 * @param pState The E1000 state.
5043 * @param fOnWorkerThread Whether we're on a worker thread or on an EMT.
5044 */
5045static int e1kXmitPending(E1KSTATE *pState, bool fOnWorkerThread)
5046{
5047 int rc = VINF_SUCCESS;
5048
5049 /* Check if transmitter is enabled. */
5050 if (!(TCTL & TCTL_EN))
5051 return VINF_SUCCESS;
5052 /*
5053 * Grab the xmit lock of the driver as well as the E1K device state.
5054 */
5055 PPDMINETWORKUP pDrv = pState->CTX_SUFF(pDrv);
5056 if (pDrv)
5057 {
5058 rc = pDrv->pfnBeginXmit(pDrv, fOnWorkerThread);
5059 if (RT_FAILURE(rc))
5060 return rc;
5061 }
5062
5063 /*
5064 * Process all pending descriptors.
5065 * Note! Do not process descriptors in locked state
5066 */
5067 rc = e1kCsTxEnter(pState, VERR_SEM_BUSY);
5068 if (RT_LIKELY(rc == VINF_SUCCESS))
5069 {
5070 STAM_PROFILE_ADV_START(&pState->CTX_SUFF_Z(StatTransmit), a);
5071 /*
5072 * fIncomplete is set whenever we try to fetch additional descriptors
5073 * for an incomplete packet. If fail to locate a complete packet on
5074 * the next iteration we need to reset the cache or we risk to get
5075 * stuck in this loop forever.
5076 */
5077 bool fIncomplete = false;
5078 while (!pState->fLocked && e1kTxDLazyLoad(pState))
5079 {
5080 while (e1kLocateTxPacket(pState))
5081 {
5082 fIncomplete = false;
5083 /* Found a complete packet, allocate it. */
5084 rc = e1kXmitAllocBuf(pState, pState->fGSO);
5085 /* If we're out of bandwidth we'll come back later. */
5086 if (RT_FAILURE(rc))
5087 goto out;
5088 /* Copy the packet to allocated buffer and send it. */
5089 rc = e1kXmitPacket(pState, fOnWorkerThread);
5090 /* If we're out of bandwidth we'll come back later. */
5091 if (RT_FAILURE(rc))
5092 goto out;
5093 }
5094 uint8_t u8Remain = pState->nTxDFetched - pState->iTxDCurrent;
5095 if (RT_UNLIKELY(fIncomplete))
5096 {
5097 static bool fTxDCacheDumped = false;
5098 /*
5099 * The descriptor cache is full, but we were unable to find
5100 * a complete packet in it. Drop the cache and hope that
5101 * the guest driver can recover from network card error.
5102 */
5103 LogRel(("%s No complete packets in%s TxD cache! "
5104 "Fetched=%d, current=%d, TX len=%d.\n",
5105 INSTANCE(pState),
5106 u8Remain == E1K_TXD_CACHE_SIZE ? " full" : "",
5107 pState->nTxDFetched, pState->iTxDCurrent,
5108 e1kGetTxLen(pState)));
5109 if (!fTxDCacheDumped)
5110 {
5111 fTxDCacheDumped = true;
5112 e1kDumpTxDCache(pState);
5113 }
5114 pState->iTxDCurrent = pState->nTxDFetched = 0;
5115 /*
5116 * Returning an error at this point means Guru in R0
5117 * (see @bugref{6428}).
5118 */
5119# ifdef IN_RING3
5120 rc = VERR_NET_INCOMPLETE_TX_PACKET;
5121# else /* !IN_RING3 */
5122 rc = VINF_IOM_R3_IOPORT_WRITE;
5123# endif /* !IN_RING3 */
5124 goto out;
5125 }
5126 if (u8Remain > 0)
5127 {
5128 Log4(("%s Incomplete packet at %d. Already fetched %d, "
5129 "%d more are available\n",
5130 INSTANCE(pState), pState->iTxDCurrent, u8Remain,
5131 e1kGetTxLen(pState) - u8Remain));
5132
5133 /*
5134 * A packet was partially fetched. Move incomplete packet to
5135 * the beginning of cache buffer, then load more descriptors.
5136 */
5137 memmove(pState->aTxDescriptors,
5138 &pState->aTxDescriptors[pState->iTxDCurrent],
5139 u8Remain * sizeof(E1KTXDESC));
5140 pState->iTxDCurrent = 0;
5141 pState->nTxDFetched = u8Remain;
5142 e1kTxDLoadMore(pState);
5143 fIncomplete = true;
5144 }
5145 else
5146 pState->nTxDFetched = 0;
5147 pState->iTxDCurrent = 0;
5148 }
5149 if (!pState->fLocked && GET_BITS(TXDCTL, LWTHRESH) == 0)
5150 {
5151 E1kLog2(("%s Out of transmit descriptors, raise ICR.TXD_LOW\n",
5152 INSTANCE(pState)));
5153 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, ICR_TXD_LOW);
5154 }
5155out:
5156 STAM_PROFILE_ADV_STOP(&pState->CTX_SUFF_Z(StatTransmit), a);
5157
5158 /// @todo: uncomment: pState->uStatIntTXQE++;
5159 /// @todo: uncomment: e1kRaiseInterrupt(pState, ICR_TXQE);
5160
5161 e1kCsTxLeave(pState);
5162 }
5163
5164
5165 /*
5166 * Release the lock.
5167 */
5168 if (pDrv)
5169 pDrv->pfnEndXmit(pDrv);
5170 return rc;
5171}
5172#endif /* E1K_WITH_TXD_CACHE */
5173
5174#ifdef IN_RING3
5175
5176/**
5177 * @interface_method_impl{PDMINETWORKDOWN,pfnXmitPending}
5178 */
5179static DECLCALLBACK(void) e1kNetworkDown_XmitPending(PPDMINETWORKDOWN pInterface)
5180{
5181 E1KSTATE *pState = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkDown);
5182 /* Resume suspended transmission */
5183 STATUS &= ~STATUS_TXOFF;
5184 e1kXmitPending(pState, true /*fOnWorkerThread*/);
5185}
5186
5187/**
5188 * Callback for consuming from transmit queue. It gets called in R3 whenever
5189 * we enqueue something in R0/GC.
5190 *
5191 * @returns true
5192 * @param pDevIns Pointer to device instance structure.
5193 * @param pItem Pointer to the element being dequeued (not used).
5194 * @thread ???
5195 */
5196static DECLCALLBACK(bool) e1kTxQueueConsumer(PPDMDEVINS pDevIns, PPDMQUEUEITEMCORE pItem)
5197{
5198 NOREF(pItem);
5199 E1KSTATE *pState = PDMINS_2_DATA(pDevIns, E1KSTATE *);
5200 E1kLog2(("%s e1kTxQueueConsumer:\n", INSTANCE(pState)));
5201
5202 int rc = e1kXmitPending(pState, false /*fOnWorkerThread*/);
5203 AssertMsg(RT_SUCCESS(rc) || rc == VERR_TRY_AGAIN, ("%Rrc\n", rc));
5204
5205 return true;
5206}
5207
5208/**
5209 * Handler for the wakeup signaller queue.
5210 */
5211static DECLCALLBACK(bool) e1kCanRxQueueConsumer(PPDMDEVINS pDevIns, PPDMQUEUEITEMCORE pItem)
5212{
5213 e1kWakeupReceive(pDevIns);
5214 return true;
5215}
5216
5217#endif /* IN_RING3 */
5218
5219/**
5220 * Write handler for Transmit Descriptor Tail register.
5221 *
5222 * @param pState The device state structure.
5223 * @param offset Register offset in memory-mapped frame.
5224 * @param index Register index in register array.
5225 * @param value The value to store.
5226 * @param mask Used to implement partial writes (8 and 16-bit).
5227 * @thread EMT
5228 */
5229static int e1kRegWriteTDT(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
5230{
5231 int rc = e1kRegWriteDefault(pState, offset, index, value);
5232
5233 /* All descriptors starting with head and not including tail belong to us. */
5234 /* Process them. */
5235 E1kLog2(("%s e1kRegWriteTDT: TDBAL=%08x, TDBAH=%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
5236 INSTANCE(pState), TDBAL, TDBAH, TDLEN, TDH, TDT));
5237
5238 /* Ignore TDT writes when the link is down. */
5239 if (TDH != TDT && (STATUS & STATUS_LU))
5240 {
5241 E1kLogRel(("E1000: TDT write: %d descriptors to process\n", e1kGetTxLen(pState)));
5242 E1kLog(("%s e1kRegWriteTDT: %d descriptors to process\n",
5243 INSTANCE(pState), e1kGetTxLen(pState)));
5244
5245 /* Transmit pending packets if possible, defer it if we cannot do it
5246 in the current context. */
5247#ifdef E1K_TX_DELAY
5248 rc = e1kCsTxEnter(pState, VERR_SEM_BUSY);
5249 if (RT_LIKELY(rc == VINF_SUCCESS))
5250 {
5251 if (!TMTimerIsActive(pState->CTX_SUFF(pTXDTimer)))
5252 {
5253#ifdef E1K_INT_STATS
5254 pState->u64ArmedAt = RTTimeNanoTS();
5255#endif
5256 e1kArmTimer(pState, pState->CTX_SUFF(pTXDTimer), E1K_TX_DELAY);
5257 }
5258 E1K_INC_ISTAT_CNT(pState->uStatTxDelayed);
5259 e1kCsTxLeave(pState);
5260 return rc;
5261 }
5262 /* We failed to enter the TX critical section -- transmit as usual. */
5263#endif /* E1K_TX_DELAY */
5264#ifndef IN_RING3
5265 if (!pState->CTX_SUFF(pDrv))
5266 {
5267 PPDMQUEUEITEMCORE pItem = PDMQueueAlloc(pState->CTX_SUFF(pTxQueue));
5268 if (RT_UNLIKELY(pItem))
5269 PDMQueueInsert(pState->CTX_SUFF(pTxQueue), pItem);
5270 }
5271 else
5272#endif
5273 {
5274 rc = e1kXmitPending(pState, false /*fOnWorkerThread*/);
5275 if (rc == VERR_TRY_AGAIN)
5276 rc = VINF_SUCCESS;
5277 else if (rc == VERR_SEM_BUSY)
5278 rc = VINF_IOM_R3_IOPORT_WRITE;
5279 AssertRC(rc);
5280 }
5281 }
5282
5283 return rc;
5284}
5285
5286/**
5287 * Write handler for Multicast Table Array registers.
5288 *
5289 * @param pState The device state structure.
5290 * @param offset Register offset in memory-mapped frame.
5291 * @param index Register index in register array.
5292 * @param value The value to store.
5293 * @thread EMT
5294 */
5295static int e1kRegWriteMTA(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
5296{
5297 AssertReturn(offset - s_e1kRegMap[index].offset < sizeof(pState->auMTA), VERR_DEV_IO_ERROR);
5298 pState->auMTA[(offset - s_e1kRegMap[index].offset)/sizeof(pState->auMTA[0])] = value;
5299
5300 return VINF_SUCCESS;
5301}
5302
5303/**
5304 * Read handler for Multicast Table Array registers.
5305 *
5306 * @returns VBox status code.
5307 *
5308 * @param pState The device state structure.
5309 * @param offset Register offset in memory-mapped frame.
5310 * @param index Register index in register array.
5311 * @thread EMT
5312 */
5313static int e1kRegReadMTA(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5314{
5315 AssertReturn(offset - s_e1kRegMap[index].offset< sizeof(pState->auMTA), VERR_DEV_IO_ERROR);
5316 *pu32Value = pState->auMTA[(offset - s_e1kRegMap[index].offset)/sizeof(pState->auMTA[0])];
5317
5318 return VINF_SUCCESS;
5319}
5320
5321/**
5322 * Write handler for Receive Address registers.
5323 *
5324 * @param pState The device state structure.
5325 * @param offset Register offset in memory-mapped frame.
5326 * @param index Register index in register array.
5327 * @param value The value to store.
5328 * @thread EMT
5329 */
5330static int e1kRegWriteRA(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
5331{
5332 AssertReturn(offset - s_e1kRegMap[index].offset < sizeof(pState->aRecAddr.au32), VERR_DEV_IO_ERROR);
5333 pState->aRecAddr.au32[(offset - s_e1kRegMap[index].offset)/sizeof(pState->aRecAddr.au32[0])] = value;
5334
5335 return VINF_SUCCESS;
5336}
5337
5338/**
5339 * Read handler for Receive Address registers.
5340 *
5341 * @returns VBox status code.
5342 *
5343 * @param pState The device state structure.
5344 * @param offset Register offset in memory-mapped frame.
5345 * @param index Register index in register array.
5346 * @thread EMT
5347 */
5348static int e1kRegReadRA(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5349{
5350 AssertReturn(offset - s_e1kRegMap[index].offset< sizeof(pState->aRecAddr.au32), VERR_DEV_IO_ERROR);
5351 *pu32Value = pState->aRecAddr.au32[(offset - s_e1kRegMap[index].offset)/sizeof(pState->aRecAddr.au32[0])];
5352
5353 return VINF_SUCCESS;
5354}
5355
5356/**
5357 * Write handler for VLAN Filter Table Array registers.
5358 *
5359 * @param pState The device state structure.
5360 * @param offset Register offset in memory-mapped frame.
5361 * @param index Register index in register array.
5362 * @param value The value to store.
5363 * @thread EMT
5364 */
5365static int e1kRegWriteVFTA(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
5366{
5367 AssertReturn(offset - s_e1kRegMap[index].offset < sizeof(pState->auVFTA), VINF_SUCCESS);
5368 pState->auVFTA[(offset - s_e1kRegMap[index].offset)/sizeof(pState->auVFTA[0])] = value;
5369
5370 return VINF_SUCCESS;
5371}
5372
5373/**
5374 * Read handler for VLAN Filter Table Array registers.
5375 *
5376 * @returns VBox status code.
5377 *
5378 * @param pState The device state structure.
5379 * @param offset Register offset in memory-mapped frame.
5380 * @param index Register index in register array.
5381 * @thread EMT
5382 */
5383static int e1kRegReadVFTA(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5384{
5385 AssertReturn(offset - s_e1kRegMap[index].offset< sizeof(pState->auVFTA), VERR_DEV_IO_ERROR);
5386 *pu32Value = pState->auVFTA[(offset - s_e1kRegMap[index].offset)/sizeof(pState->auVFTA[0])];
5387
5388 return VINF_SUCCESS;
5389}
5390
5391/**
5392 * Read handler for unimplemented registers.
5393 *
5394 * Merely reports reads from unimplemented registers.
5395 *
5396 * @returns VBox status code.
5397 *
5398 * @param pState The device state structure.
5399 * @param offset Register offset in memory-mapped frame.
5400 * @param index Register index in register array.
5401 * @thread EMT
5402 */
5403
5404static int e1kRegReadUnimplemented(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5405{
5406 E1kLog(("%s At %08X read (00000000) attempt from unimplemented register %s (%s)\n",
5407 INSTANCE(pState), offset, s_e1kRegMap[index].abbrev, s_e1kRegMap[index].name));
5408 *pu32Value = 0;
5409
5410 return VINF_SUCCESS;
5411}
5412
5413/**
5414 * Default register read handler with automatic clear operation.
5415 *
5416 * Retrieves the value of register from register array in device state structure.
5417 * Then resets all bits.
5418 *
5419 * @remarks The 'mask' parameter is simply ignored as masking and shifting is
5420 * done in the caller.
5421 *
5422 * @returns VBox status code.
5423 *
5424 * @param pState The device state structure.
5425 * @param offset Register offset in memory-mapped frame.
5426 * @param index Register index in register array.
5427 * @thread EMT
5428 */
5429
5430static int e1kRegReadAutoClear(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5431{
5432 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
5433 int rc = e1kRegReadDefault(pState, offset, index, pu32Value);
5434 pState->auRegs[index] = 0;
5435
5436 return rc;
5437}
5438
5439/**
5440 * Default register read handler.
5441 *
5442 * Retrieves the value of register from register array in device state structure.
5443 * Bits corresponding to 0s in 'readable' mask will always read as 0s.
5444 *
5445 * @remarks The 'mask' parameter is simply ignored as masking and shifting is
5446 * done in the caller.
5447 *
5448 * @returns VBox status code.
5449 *
5450 * @param pState The device state structure.
5451 * @param offset Register offset in memory-mapped frame.
5452 * @param index Register index in register array.
5453 * @thread EMT
5454 */
5455
5456static int e1kRegReadDefault(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5457{
5458 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
5459 *pu32Value = pState->auRegs[index] & s_e1kRegMap[index].readable;
5460
5461 return VINF_SUCCESS;
5462}
5463
5464/**
5465 * Write handler for unimplemented registers.
5466 *
5467 * Merely reports writes to unimplemented registers.
5468 *
5469 * @param pState The device state structure.
5470 * @param offset Register offset in memory-mapped frame.
5471 * @param index Register index in register array.
5472 * @param value The value to store.
5473 * @thread EMT
5474 */
5475
5476 static int e1kRegWriteUnimplemented(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
5477{
5478 E1kLog(("%s At %08X write attempt (%08X) to unimplemented register %s (%s)\n",
5479 INSTANCE(pState), offset, value, s_e1kRegMap[index].abbrev, s_e1kRegMap[index].name));
5480
5481 return VINF_SUCCESS;
5482}
5483
5484/**
5485 * Default register write handler.
5486 *
5487 * Stores the value to the register array in device state structure. Only bits
5488 * corresponding to 1s both in 'writable' and 'mask' will be stored.
5489 *
5490 * @returns VBox status code.
5491 *
5492 * @param pState The device state structure.
5493 * @param offset Register offset in memory-mapped frame.
5494 * @param index Register index in register array.
5495 * @param value The value to store.
5496 * @param mask Used to implement partial writes (8 and 16-bit).
5497 * @thread EMT
5498 */
5499
5500static int e1kRegWriteDefault(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
5501{
5502 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
5503 pState->auRegs[index] = (value & s_e1kRegMap[index].writable) |
5504 (pState->auRegs[index] & ~s_e1kRegMap[index].writable);
5505
5506 return VINF_SUCCESS;
5507}
5508
5509/**
5510 * Search register table for matching register.
5511 *
5512 * @returns Index in the register table or -1 if not found.
5513 *
5514 * @param pState The device state structure.
5515 * @param uOffset Register offset in memory-mapped region.
5516 * @thread EMT
5517 */
5518static int e1kRegLookup(E1KSTATE *pState, uint32_t uOffset)
5519{
5520 int index;
5521
5522 for (index = 0; index < E1K_NUM_OF_REGS; index++)
5523 {
5524 if (s_e1kRegMap[index].offset <= uOffset && uOffset < s_e1kRegMap[index].offset + s_e1kRegMap[index].size)
5525 {
5526 return index;
5527 }
5528 }
5529
5530 return -1;
5531}
5532
5533/**
5534 * Handle register read operation.
5535 *
5536 * Looks up and calls appropriate handler.
5537 *
5538 * @returns VBox status code.
5539 *
5540 * @param pState The device state structure.
5541 * @param uOffset Register offset in memory-mapped frame.
5542 * @param pv Where to store the result.
5543 * @param cb Number of bytes to read.
5544 * @thread EMT
5545 */
5546static int e1kRegRead(E1KSTATE *pState, uint32_t uOffset, void *pv, uint32_t cb)
5547{
5548 uint32_t u32 = 0;
5549 uint32_t mask = 0;
5550 uint32_t shift;
5551 int rc = VINF_SUCCESS;
5552 int index = e1kRegLookup(pState, uOffset);
5553 const char *szInst = INSTANCE(pState);
5554#ifdef DEBUG
5555 char buf[9];
5556#endif
5557
5558 /*
5559 * From the spec:
5560 * For registers that should be accessed as 32-bit double words, partial writes (less than a 32-bit
5561 * double word) is ignored. Partial reads return all 32 bits of data regardless of the byte enables.
5562 */
5563
5564 /*
5565 * To be able to write bytes and short word we convert them
5566 * to properly shifted 32-bit words and masks. The idea is
5567 * to keep register-specific handlers simple. Most accesses
5568 * will be 32-bit anyway.
5569 */
5570 switch (cb)
5571 {
5572 case 1: mask = 0x000000FF; break;
5573 case 2: mask = 0x0000FFFF; break;
5574 case 4: mask = 0xFFFFFFFF; break;
5575 default:
5576 return PDMDevHlpDBGFStop(pState->CTX_SUFF(pDevIns), RT_SRC_POS,
5577 "%s e1kRegRead: unsupported op size: offset=%#10x cb=%#10x\n",
5578 szInst, uOffset, cb);
5579 }
5580 if (index != -1)
5581 {
5582 if (s_e1kRegMap[index].readable)
5583 {
5584 /* Make the mask correspond to the bits we are about to read. */
5585 shift = (uOffset - s_e1kRegMap[index].offset) % sizeof(uint32_t) * 8;
5586 mask <<= shift;
5587 if (!mask)
5588 return PDMDevHlpDBGFStop(pState->CTX_SUFF(pDevIns), RT_SRC_POS,
5589 "%s e1kRegRead: Zero mask: offset=%#10x cb=%#10x\n",
5590 szInst, uOffset, cb);
5591 /*
5592 * Read it. Pass the mask so the handler knows what has to be read.
5593 * Mask out irrelevant bits.
5594 */
5595 //rc = e1kCsEnter(pState, VERR_SEM_BUSY, RT_SRC_POS);
5596 if (RT_UNLIKELY(rc != VINF_SUCCESS))
5597 return rc;
5598 //pState->fDelayInts = false;
5599 //pState->iStatIntLost += pState->iStatIntLostOne;
5600 //pState->iStatIntLostOne = 0;
5601 rc = s_e1kRegMap[index].pfnRead(pState, uOffset & 0xFFFFFFFC, index, &u32);
5602 u32 &= mask;
5603 //e1kCsLeave(pState);
5604 E1kLog2(("%s At %08X read %s from %s (%s)\n",
5605 szInst, uOffset, e1kU32toHex(u32, mask, buf), s_e1kRegMap[index].abbrev, s_e1kRegMap[index].name));
5606 /* Shift back the result. */
5607 u32 >>= shift;
5608 }
5609 else
5610 {
5611 E1kLog(("%s At %08X read (%s) attempt from write-only register %s (%s)\n",
5612 szInst, uOffset, e1kU32toHex(u32, mask, buf), s_e1kRegMap[index].abbrev, s_e1kRegMap[index].name));
5613 }
5614 }
5615 else
5616 {
5617 E1kLog(("%s At %08X read (%s) attempt from non-existing register\n",
5618 szInst, uOffset, e1kU32toHex(u32, mask, buf)));
5619 }
5620
5621 memcpy(pv, &u32, cb);
5622 return rc;
5623}
5624
5625/**
5626 * Handle register write operation.
5627 *
5628 * Looks up and calls appropriate handler.
5629 *
5630 * @returns VBox status code.
5631 *
5632 * @param pState The device state structure.
5633 * @param offReg Register offset in memory-mapped frame.
5634 * @param pv Where to fetch the value.
5635 * @param cb Number of bytes to write.
5636 * @thread EMT
5637 */
5638static int e1kRegWrite(E1KSTATE *pState, uint32_t offReg, void const *pv, unsigned cb)
5639{
5640 int rc = VINF_SUCCESS;
5641 int index = e1kRegLookup(pState, offReg);
5642 uint32_t u32;
5643
5644 /*
5645 * From the spec:
5646 * For registers that should be accessed as 32-bit double words, partial writes (less than a 32-bit
5647 * double word) is ignored. Partial reads return all 32 bits of data regardless of the byte enables.
5648 */
5649
5650 if (cb != 4)
5651 {
5652 E1kLog(("%s e1kRegWrite: Spec violation: unsupported op size: offset=%#10x cb=%#10x, ignored.\n",
5653 INSTANCE(pState), offReg, cb));
5654 return VINF_SUCCESS;
5655 }
5656 if (offReg & 3)
5657 {
5658 E1kLog(("%s e1kRegWrite: Spec violation: misaligned offset: %#10x cb=%#10x, ignored.\n",
5659 INSTANCE(pState), offReg, cb));
5660 return VINF_SUCCESS;
5661 }
5662 u32 = *(uint32_t*)pv;
5663 if (index != -1)
5664 {
5665 if (s_e1kRegMap[index].writable)
5666 {
5667 /*
5668 * Write it. Pass the mask so the handler knows what has to be written.
5669 * Mask out irrelevant bits.
5670 */
5671 E1kLog2(("%s At %08X write %08X to %s (%s)\n",
5672 INSTANCE(pState), offReg, u32, s_e1kRegMap[index].abbrev, s_e1kRegMap[index].name));
5673 //rc = e1kCsEnter(pState, VERR_SEM_BUSY, RT_SRC_POS);
5674 if (RT_UNLIKELY(rc != VINF_SUCCESS))
5675 return rc;
5676 //pState->fDelayInts = false;
5677 //pState->iStatIntLost += pState->iStatIntLostOne;
5678 //pState->iStatIntLostOne = 0;
5679 rc = s_e1kRegMap[index].pfnWrite(pState, offReg, index, u32);
5680 //e1kCsLeave(pState);
5681 }
5682 else
5683 {
5684 E1kLog(("%s At %08X write attempt (%08X) to read-only register %s (%s)\n",
5685 INSTANCE(pState), offReg, u32, s_e1kRegMap[index].abbrev, s_e1kRegMap[index].name));
5686 }
5687 }
5688 else
5689 {
5690 E1kLog(("%s At %08X write attempt (%08X) to non-existing register\n",
5691 INSTANCE(pState), offReg, u32));
5692 }
5693 return rc;
5694}
5695
5696
5697/* -=-=-=-=- MMIO and I/O Port Callbacks -=-=-=-=- */
5698
5699/**
5700 * I/O handler for memory-mapped read operations.
5701 *
5702 * @returns VBox status code.
5703 *
5704 * @param pDevIns The device instance.
5705 * @param pvUser User argument.
5706 * @param GCPhysAddr Physical address (in GC) where the read starts.
5707 * @param pv Where to store the result.
5708 * @param cb Number of bytes read.
5709 * @thread EMT
5710 */
5711PDMBOTHCBDECL(int) e1kMMIORead(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS GCPhysAddr, void *pv, unsigned cb)
5712{
5713 NOREF(pvUser);
5714 E1KSTATE *pState = PDMINS_2_DATA(pDevIns, E1KSTATE *);
5715 uint32_t offReg = GCPhysAddr - pState->addrMMReg;
5716 STAM_PROFILE_ADV_START(&pState->CTX_SUFF_Z(StatMMIORead), a);
5717
5718 Assert(offReg < E1K_MM_SIZE);
5719
5720 int rc = e1kRegRead(pState, offReg, pv, cb);
5721 STAM_PROFILE_ADV_STOP(&pState->CTX_SUFF_Z(StatMMIORead), a);
5722 return rc;
5723}
5724
5725/**
5726 * Memory mapped I/O Handler for write operations.
5727 *
5728 * @returns VBox status code.
5729 *
5730 * @param pDevIns The device instance.
5731 * @param pvUser User argument.
5732 * @param GCPhysAddr Physical address (in GC) where the read starts.
5733 * @param pv Where to fetch the value.
5734 * @param cb Number of bytes to write.
5735 * @thread EMT
5736 */
5737PDMBOTHCBDECL(int) e1kMMIOWrite(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS GCPhysAddr, void const *pv, unsigned cb)
5738{
5739 NOREF(pvUser);
5740 E1KSTATE *pState = PDMINS_2_DATA(pDevIns, E1KSTATE *);
5741 uint32_t offReg = GCPhysAddr - pState->addrMMReg;
5742 int rc;
5743 STAM_PROFILE_ADV_START(&pState->CTX_SUFF_Z(StatMMIOWrite), a);
5744
5745 Assert(offReg < E1K_MM_SIZE);
5746 if (cb != 4)
5747 {
5748 E1kLog(("%s e1kMMIOWrite: invalid op size: offset=%#10x cb=%#10x", pDevIns, offReg, cb));
5749 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "e1kMMIOWrite: invalid op size: offset=%#10x cb=%#10x\n", offReg, cb);
5750 }
5751 else
5752 rc = e1kRegWrite(pState, offReg, pv, cb);
5753
5754 STAM_PROFILE_ADV_STOP(&pState->CTX_SUFF_Z(StatMMIOWrite), a);
5755 return rc;
5756}
5757
5758/**
5759 * Port I/O Handler for IN operations.
5760 *
5761 * @returns VBox status code.
5762 *
5763 * @param pDevIns The device instance.
5764 * @param pvUser Pointer to the device state structure.
5765 * @param port Port number used for the IN operation.
5766 * @param pu32 Where to store the result.
5767 * @param cb Number of bytes read.
5768 * @thread EMT
5769 */
5770PDMBOTHCBDECL(int) e1kIOPortIn(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT port, uint32_t *pu32, unsigned cb)
5771{
5772 E1KSTATE *pState = PDMINS_2_DATA(pDevIns, E1KSTATE *);
5773 int rc = VINF_SUCCESS;
5774 const char *szInst = INSTANCE(pState);
5775 STAM_PROFILE_ADV_START(&pState->CTX_SUFF_Z(StatIORead), a);
5776
5777 port -= pState->addrIOPort;
5778 if (cb != 4)
5779 {
5780 E1kLog(("%s e1kIOPortIn: invalid op size: port=%RTiop cb=%08x", szInst, port, cb));
5781 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "%s e1kIOPortIn: invalid op size: port=%RTiop cb=%08x\n", szInst, port, cb);
5782 }
5783 else
5784 switch (port)
5785 {
5786 case 0x00: /* IOADDR */
5787 *pu32 = pState->uSelectedReg;
5788 E1kLog2(("%s e1kIOPortIn: IOADDR(0), selecting register %#010x, val=%#010x\n", szInst, pState->uSelectedReg, *pu32));
5789 break;
5790 case 0x04: /* IODATA */
5791 rc = e1kRegRead(pState, pState->uSelectedReg, pu32, cb);
5792 /** @todo wrong return code triggers assertions in the debug build; fix please */
5793 if (rc == VINF_IOM_R3_MMIO_READ)
5794 rc = VINF_IOM_R3_IOPORT_READ;
5795
5796 E1kLog2(("%s e1kIOPortIn: IODATA(4), reading from selected register %#010x, val=%#010x\n", szInst, pState->uSelectedReg, *pu32));
5797 break;
5798 default:
5799 E1kLog(("%s e1kIOPortIn: invalid port %#010x\n", szInst, port));
5800 //*pRC = VERR_IOM_IOPORT_UNUSED;
5801 }
5802
5803 STAM_PROFILE_ADV_STOP(&pState->CTX_SUFF_Z(StatIORead), a);
5804 return rc;
5805}
5806
5807
5808/**
5809 * Port I/O Handler for OUT operations.
5810 *
5811 * @returns VBox status code.
5812 *
5813 * @param pDevIns The device instance.
5814 * @param pvUser User argument.
5815 * @param Port Port number used for the IN operation.
5816 * @param u32 The value to output.
5817 * @param cb The value size in bytes.
5818 * @thread EMT
5819 */
5820PDMBOTHCBDECL(int) e1kIOPortOut(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT port, uint32_t u32, unsigned cb)
5821{
5822 E1KSTATE *pState = PDMINS_2_DATA(pDevIns, E1KSTATE *);
5823 int rc = VINF_SUCCESS;
5824 const char *szInst = INSTANCE(pState);
5825 STAM_PROFILE_ADV_START(&pState->CTX_SUFF_Z(StatIOWrite), a);
5826
5827 E1kLog2(("%s e1kIOPortOut: port=%RTiop value=%08x\n", szInst, port, u32));
5828 if (cb != 4)
5829 {
5830 E1kLog(("%s e1kIOPortOut: invalid op size: port=%RTiop cb=%08x\n", szInst, port, cb));
5831 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "invalid op size: port=%RTiop cb=%#x\n", szInst, port, cb);
5832 }
5833 else
5834 {
5835 port -= pState->addrIOPort;
5836 switch (port)
5837 {
5838 case 0x00: /* IOADDR */
5839 pState->uSelectedReg = u32;
5840 E1kLog2(("%s e1kIOPortOut: IOADDR(0), selected register %08x\n", szInst, pState->uSelectedReg));
5841 break;
5842 case 0x04: /* IODATA */
5843 E1kLog2(("%s e1kIOPortOut: IODATA(4), writing to selected register %#010x, value=%#010x\n", szInst, pState->uSelectedReg, u32));
5844 rc = e1kRegWrite(pState, pState->uSelectedReg, &u32, cb);
5845 /** @todo wrong return code triggers assertions in the debug build; fix please */
5846 if (rc == VINF_IOM_R3_MMIO_WRITE)
5847 rc = VINF_IOM_R3_IOPORT_WRITE;
5848 break;
5849 default:
5850 E1kLog(("%s e1kIOPortOut: invalid port %#010x\n", szInst, port));
5851 /** @todo Do we need to return an error here?
5852 * bird: VINF_SUCCESS is fine for unhandled cases of an OUT handler. (If you're curious
5853 * about the guest code and a bit adventuresome, try rc = PDMDeviceDBGFStop(...);) */
5854 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "invalid port %#010x\n", port);
5855 }
5856 }
5857
5858 STAM_PROFILE_ADV_STOP(&pState->CTX_SUFF_Z(StatIOWrite), a);
5859 return rc;
5860}
5861
5862#ifdef IN_RING3
5863
5864/**
5865 * Dump complete device state to log.
5866 *
5867 * @param pState Pointer to device state.
5868 */
5869static void e1kDumpState(E1KSTATE *pState)
5870{
5871 for (int i = 0; i < E1K_NUM_OF_32BIT_REGS; ++i)
5872 {
5873 E1kLog2(("%s %8.8s = %08x\n", INSTANCE(pState),
5874 s_e1kRegMap[i].abbrev, pState->auRegs[i]));
5875 }
5876# ifdef E1K_INT_STATS
5877 LogRel(("%s Interrupt attempts: %d\n", INSTANCE(pState), pState->uStatIntTry));
5878 LogRel(("%s Interrupts raised : %d\n", INSTANCE(pState), pState->uStatInt));
5879 LogRel(("%s Interrupts lowered: %d\n", INSTANCE(pState), pState->uStatIntLower));
5880 LogRel(("%s Interrupts delayed: %d\n", INSTANCE(pState), pState->uStatIntDly));
5881 LogRel(("%s Disabled delayed: %d\n", INSTANCE(pState), pState->uStatDisDly));
5882 LogRel(("%s Interrupts skipped: %d\n", INSTANCE(pState), pState->uStatIntSkip));
5883 LogRel(("%s Masked interrupts : %d\n", INSTANCE(pState), pState->uStatIntMasked));
5884 LogRel(("%s Early interrupts : %d\n", INSTANCE(pState), pState->uStatIntEarly));
5885 LogRel(("%s Late interrupts : %d\n", INSTANCE(pState), pState->uStatIntLate));
5886 LogRel(("%s Lost interrupts : %d\n", INSTANCE(pState), pState->iStatIntLost));
5887 LogRel(("%s Interrupts by RX : %d\n", INSTANCE(pState), pState->uStatIntRx));
5888 LogRel(("%s Interrupts by TX : %d\n", INSTANCE(pState), pState->uStatIntTx));
5889 LogRel(("%s Interrupts by ICS : %d\n", INSTANCE(pState), pState->uStatIntICS));
5890 LogRel(("%s Interrupts by RDTR: %d\n", INSTANCE(pState), pState->uStatIntRDTR));
5891 LogRel(("%s Interrupts by RDMT: %d\n", INSTANCE(pState), pState->uStatIntRXDMT0));
5892 LogRel(("%s Interrupts by TXQE: %d\n", INSTANCE(pState), pState->uStatIntTXQE));
5893 LogRel(("%s TX int delay asked: %d\n", INSTANCE(pState), pState->uStatTxIDE));
5894 LogRel(("%s TX delayed: %d\n", INSTANCE(pState), pState->uStatTxDelayed));
5895 LogRel(("%s TX delay expired: %d\n", INSTANCE(pState), pState->uStatTxDelayExp));
5896 LogRel(("%s TX no report asked: %d\n", INSTANCE(pState), pState->uStatTxNoRS));
5897 LogRel(("%s TX abs timer expd : %d\n", INSTANCE(pState), pState->uStatTAD));
5898 LogRel(("%s TX int timer expd : %d\n", INSTANCE(pState), pState->uStatTID));
5899 LogRel(("%s RX abs timer expd : %d\n", INSTANCE(pState), pState->uStatRAD));
5900 LogRel(("%s RX int timer expd : %d\n", INSTANCE(pState), pState->uStatRID));
5901 LogRel(("%s TX CTX descriptors: %d\n", INSTANCE(pState), pState->uStatDescCtx));
5902 LogRel(("%s TX DAT descriptors: %d\n", INSTANCE(pState), pState->uStatDescDat));
5903 LogRel(("%s TX LEG descriptors: %d\n", INSTANCE(pState), pState->uStatDescLeg));
5904 LogRel(("%s Received frames : %d\n", INSTANCE(pState), pState->uStatRxFrm));
5905 LogRel(("%s Transmitted frames: %d\n", INSTANCE(pState), pState->uStatTxFrm));
5906 LogRel(("%s TX frames up to 1514: %d\n", INSTANCE(pState), pState->uStatTx1514));
5907 LogRel(("%s TX frames up to 2962: %d\n", INSTANCE(pState), pState->uStatTx2962));
5908 LogRel(("%s TX frames up to 4410: %d\n", INSTANCE(pState), pState->uStatTx4410));
5909 LogRel(("%s TX frames up to 5858: %d\n", INSTANCE(pState), pState->uStatTx5858));
5910 LogRel(("%s TX frames up to 7306: %d\n", INSTANCE(pState), pState->uStatTx7306));
5911 LogRel(("%s TX frames up to 8754: %d\n", INSTANCE(pState), pState->uStatTx8754));
5912 LogRel(("%s TX frames up to 16384: %d\n", INSTANCE(pState), pState->uStatTx16384));
5913 LogRel(("%s TX frames up to 32768: %d\n", INSTANCE(pState), pState->uStatTx32768));
5914 LogRel(("%s Larger TX frames : %d\n", INSTANCE(pState), pState->uStatTxLarge));
5915 LogRel(("%s Max TX Delay : %lld\n", INSTANCE(pState), pState->uStatMaxTxDelay));
5916# endif /* E1K_INT_STATS */
5917}
5918
5919/**
5920 * @callback_method_impl{FNPCIIOREGIONMAP}
5921 */
5922static DECLCALLBACK(int) e1kMap(PPCIDEVICE pPciDev, int iRegion, RTGCPHYS GCPhysAddress, uint32_t cb, PCIADDRESSSPACE enmType)
5923{
5924 int rc;
5925 E1KSTATE *pState = PDMINS_2_DATA(pPciDev->pDevIns, E1KSTATE*);
5926
5927 switch (enmType)
5928 {
5929 case PCI_ADDRESS_SPACE_IO:
5930 pState->addrIOPort = (RTIOPORT)GCPhysAddress;
5931 rc = PDMDevHlpIOPortRegister(pPciDev->pDevIns, pState->addrIOPort, cb, 0,
5932 e1kIOPortOut, e1kIOPortIn, NULL, NULL, "E1000");
5933 if (RT_FAILURE(rc))
5934 break;
5935 if (pState->fR0Enabled)
5936 {
5937 rc = PDMDevHlpIOPortRegisterR0(pPciDev->pDevIns, pState->addrIOPort, cb, 0,
5938 "e1kIOPortOut", "e1kIOPortIn", NULL, NULL, "E1000");
5939 if (RT_FAILURE(rc))
5940 break;
5941 }
5942 if (pState->fGCEnabled)
5943 {
5944 rc = PDMDevHlpIOPortRegisterRC(pPciDev->pDevIns, pState->addrIOPort, cb, 0,
5945 "e1kIOPortOut", "e1kIOPortIn", NULL, NULL, "E1000");
5946 }
5947 break;
5948 case PCI_ADDRESS_SPACE_MEM:
5949 pState->addrMMReg = GCPhysAddress;
5950 rc = PDMDevHlpMMIORegister(pPciDev->pDevIns, GCPhysAddress, cb, NULL /*pvUser*/,
5951 IOMMMIO_FLAGS_READ_PASSTHRU | IOMMMIO_FLAGS_WRITE_PASSTHRU,
5952 e1kMMIOWrite, e1kMMIORead, "E1000");
5953 if (pState->fR0Enabled)
5954 {
5955 rc = PDMDevHlpMMIORegisterR0(pPciDev->pDevIns, GCPhysAddress, cb, NIL_RTR0PTR /*pvUser*/,
5956 "e1kMMIOWrite", "e1kMMIORead");
5957 if (RT_FAILURE(rc))
5958 break;
5959 }
5960 if (pState->fGCEnabled)
5961 {
5962 rc = PDMDevHlpMMIORegisterRC(pPciDev->pDevIns, GCPhysAddress, cb, NIL_RTRCPTR /*pvUser*/,
5963 "e1kMMIOWrite", "e1kMMIORead");
5964 }
5965 break;
5966 default:
5967 /* We should never get here */
5968 AssertMsgFailed(("Invalid PCI address space param in map callback"));
5969 rc = VERR_INTERNAL_ERROR;
5970 break;
5971 }
5972 return rc;
5973}
5974
5975
5976/* -=-=-=-=- PDMINETWORKDOWN -=-=-=-=- */
5977
5978/**
5979 * Check if the device can receive data now.
5980 * This must be called before the pfnRecieve() method is called.
5981 *
5982 * @returns Number of bytes the device can receive.
5983 * @param pInterface Pointer to the interface structure containing the called function pointer.
5984 * @thread EMT
5985 */
5986static int e1kCanReceive(E1KSTATE *pState)
5987{
5988#ifndef E1K_WITH_RXD_CACHE
5989 size_t cb;
5990
5991 if (RT_UNLIKELY(e1kCsRxEnter(pState, VERR_SEM_BUSY) != VINF_SUCCESS))
5992 return VERR_NET_NO_BUFFER_SPACE;
5993
5994 if (RT_UNLIKELY(RDLEN == sizeof(E1KRXDESC)))
5995 {
5996 E1KRXDESC desc;
5997 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns), e1kDescAddr(RDBAH, RDBAL, RDH),
5998 &desc, sizeof(desc));
5999 if (desc.status.fDD)
6000 cb = 0;
6001 else
6002 cb = pState->u16RxBSize;
6003 }
6004 else if (RDH < RDT)
6005 cb = (RDT - RDH) * pState->u16RxBSize;
6006 else if (RDH > RDT)
6007 cb = (RDLEN/sizeof(E1KRXDESC) - RDH + RDT) * pState->u16RxBSize;
6008 else
6009 {
6010 cb = 0;
6011 E1kLogRel(("E1000: OUT of RX descriptors!\n"));
6012 }
6013 E1kLog2(("%s e1kCanReceive: at exit RDH=%d RDT=%d RDLEN=%d u16RxBSize=%d cb=%lu\n",
6014 INSTANCE(pState), RDH, RDT, RDLEN, pState->u16RxBSize, cb));
6015
6016 e1kCsRxLeave(pState);
6017 return cb > 0 ? VINF_SUCCESS : VERR_NET_NO_BUFFER_SPACE;
6018#else /* E1K_WITH_RXD_CACHE */
6019 int rc = VINF_SUCCESS;
6020
6021 if (RT_UNLIKELY(e1kCsRxEnter(pState, VERR_SEM_BUSY) != VINF_SUCCESS))
6022 return VERR_NET_NO_BUFFER_SPACE;
6023
6024 if (RT_UNLIKELY(RDLEN == sizeof(E1KRXDESC)))
6025 {
6026 E1KRXDESC desc;
6027 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns), e1kDescAddr(RDBAH, RDBAL, RDH),
6028 &desc, sizeof(desc));
6029 if (desc.status.fDD)
6030 rc = VERR_NET_NO_BUFFER_SPACE;
6031 }
6032 else if (e1kRxDIsCacheEmpty(pState) && RDH == RDT)
6033 {
6034 /* Cache is empty, so is the RX ring. */
6035 rc = VERR_NET_NO_BUFFER_SPACE;
6036 }
6037 E1kLog2(("%s e1kCanReceive: at exit in_cache=%d RDH=%d RDT=%d RDLEN=%d"
6038 " u16RxBSize=%d rc=%Rrc\n", INSTANCE(pState),
6039 e1kRxDInCache(pState), RDH, RDT, RDLEN, pState->u16RxBSize, rc));
6040
6041 e1kCsRxLeave(pState);
6042 return rc;
6043#endif /* E1K_WITH_RXD_CACHE */
6044}
6045
6046/**
6047 * @interface_method_impl{PDMINETWORKDOWN,pfnWaitReceiveAvail}
6048 */
6049static DECLCALLBACK(int) e1kNetworkDown_WaitReceiveAvail(PPDMINETWORKDOWN pInterface, RTMSINTERVAL cMillies)
6050{
6051 E1KSTATE *pState = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkDown);
6052 int rc = e1kCanReceive(pState);
6053
6054 if (RT_SUCCESS(rc))
6055 return VINF_SUCCESS;
6056 if (RT_UNLIKELY(cMillies == 0))
6057 return VERR_NET_NO_BUFFER_SPACE;
6058
6059 rc = VERR_INTERRUPTED;
6060 ASMAtomicXchgBool(&pState->fMaybeOutOfSpace, true);
6061 STAM_PROFILE_START(&pState->StatRxOverflow, a);
6062 VMSTATE enmVMState;
6063 while (RT_LIKELY( (enmVMState = PDMDevHlpVMState(pState->CTX_SUFF(pDevIns))) == VMSTATE_RUNNING
6064 || enmVMState == VMSTATE_RUNNING_LS))
6065 {
6066 int rc2 = e1kCanReceive(pState);
6067 if (RT_SUCCESS(rc2))
6068 {
6069 rc = VINF_SUCCESS;
6070 break;
6071 }
6072 E1kLogRel(("E1000 e1kNetworkDown_WaitReceiveAvail: waiting cMillies=%u...\n", cMillies));
6073 E1kLog(("%s e1kNetworkDown_WaitReceiveAvail: waiting cMillies=%u...\n", INSTANCE(pState), cMillies));
6074 RTSemEventWait(pState->hEventMoreRxDescAvail, cMillies);
6075 }
6076 STAM_PROFILE_STOP(&pState->StatRxOverflow, a);
6077 ASMAtomicXchgBool(&pState->fMaybeOutOfSpace, false);
6078
6079 return rc;
6080}
6081
6082
6083/**
6084 * Matches the packet addresses against Receive Address table. Looks for
6085 * exact matches only.
6086 *
6087 * @returns true if address matches.
6088 * @param pState Pointer to the state structure.
6089 * @param pvBuf The ethernet packet.
6090 * @param cb Number of bytes available in the packet.
6091 * @thread EMT
6092 */
6093static bool e1kPerfectMatch(E1KSTATE *pState, const void *pvBuf)
6094{
6095 for (unsigned i = 0; i < RT_ELEMENTS(pState->aRecAddr.array); i++)
6096 {
6097 E1KRAELEM* ra = pState->aRecAddr.array + i;
6098
6099 /* Valid address? */
6100 if (ra->ctl & RA_CTL_AV)
6101 {
6102 Assert((ra->ctl & RA_CTL_AS) < 2);
6103 //unsigned char *pAddr = (unsigned char*)pvBuf + sizeof(ra->addr)*(ra->ctl & RA_CTL_AS);
6104 //E1kLog3(("%s Matching %02x:%02x:%02x:%02x:%02x:%02x against %02x:%02x:%02x:%02x:%02x:%02x...\n",
6105 // INSTANCE(pState), pAddr[0], pAddr[1], pAddr[2], pAddr[3], pAddr[4], pAddr[5],
6106 // ra->addr[0], ra->addr[1], ra->addr[2], ra->addr[3], ra->addr[4], ra->addr[5]));
6107 /*
6108 * Address Select:
6109 * 00b = Destination address
6110 * 01b = Source address
6111 * 10b = Reserved
6112 * 11b = Reserved
6113 * Since ethernet header is (DA, SA, len) we can use address
6114 * select as index.
6115 */
6116 if (memcmp((char*)pvBuf + sizeof(ra->addr)*(ra->ctl & RA_CTL_AS),
6117 ra->addr, sizeof(ra->addr)) == 0)
6118 return true;
6119 }
6120 }
6121
6122 return false;
6123}
6124
6125/**
6126 * Matches the packet addresses against Multicast Table Array.
6127 *
6128 * @remarks This is imperfect match since it matches not exact address but
6129 * a subset of addresses.
6130 *
6131 * @returns true if address matches.
6132 * @param pState Pointer to the state structure.
6133 * @param pvBuf The ethernet packet.
6134 * @param cb Number of bytes available in the packet.
6135 * @thread EMT
6136 */
6137static bool e1kImperfectMatch(E1KSTATE *pState, const void *pvBuf)
6138{
6139 /* Get bits 32..47 of destination address */
6140 uint16_t u16Bit = ((uint16_t*)pvBuf)[2];
6141
6142 unsigned offset = GET_BITS(RCTL, MO);
6143 /*
6144 * offset means:
6145 * 00b = bits 36..47
6146 * 01b = bits 35..46
6147 * 10b = bits 34..45
6148 * 11b = bits 32..43
6149 */
6150 if (offset < 3)
6151 u16Bit = u16Bit >> (4 - offset);
6152 return ASMBitTest(pState->auMTA, u16Bit & 0xFFF);
6153}
6154
6155/**
6156 * Determines if the packet is to be delivered to upper layer.
6157 *
6158 * The following filters supported:
6159 * - Exact Unicast/Multicast
6160 * - Promiscuous Unicast/Multicast
6161 * - Multicast
6162 * - VLAN
6163 *
6164 * @returns true if packet is intended for this node.
6165 * @param pState Pointer to the state structure.
6166 * @param pvBuf The ethernet packet.
6167 * @param cb Number of bytes available in the packet.
6168 * @param pStatus Bit field to store status bits.
6169 * @thread EMT
6170 */
6171static bool e1kAddressFilter(E1KSTATE *pState, const void *pvBuf, size_t cb, E1KRXDST *pStatus)
6172{
6173 Assert(cb > 14);
6174 /* Assume that we fail to pass exact filter. */
6175 pStatus->fPIF = false;
6176 pStatus->fVP = false;
6177 /* Discard oversized packets */
6178 if (cb > E1K_MAX_RX_PKT_SIZE)
6179 {
6180 E1kLog(("%s ERROR: Incoming packet is too big, cb=%d > max=%d\n",
6181 INSTANCE(pState), cb, E1K_MAX_RX_PKT_SIZE));
6182 E1K_INC_CNT32(ROC);
6183 return false;
6184 }
6185 else if (!(RCTL & RCTL_LPE) && cb > 1522)
6186 {
6187 /* When long packet reception is disabled packets over 1522 are discarded */
6188 E1kLog(("%s Discarding incoming packet (LPE=0), cb=%d\n",
6189 INSTANCE(pState), cb));
6190 E1K_INC_CNT32(ROC);
6191 return false;
6192 }
6193
6194 uint16_t *u16Ptr = (uint16_t*)pvBuf;
6195 /* Compare TPID with VLAN Ether Type */
6196 if (RT_BE2H_U16(u16Ptr[6]) == VET)
6197 {
6198 pStatus->fVP = true;
6199 /* Is VLAN filtering enabled? */
6200 if (RCTL & RCTL_VFE)
6201 {
6202 /* It is 802.1q packet indeed, let's filter by VID */
6203 if (RCTL & RCTL_CFIEN)
6204 {
6205 E1kLog3(("%s VLAN filter: VLAN=%d CFI=%d RCTL_CFI=%d\n", INSTANCE(pState),
6206 E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7])),
6207 E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])),
6208 !!(RCTL & RCTL_CFI)));
6209 if (E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])) != !!(RCTL & RCTL_CFI))
6210 {
6211 E1kLog2(("%s Packet filter: CFIs do not match in packet and RCTL (%d!=%d)\n",
6212 INSTANCE(pState), E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])), !!(RCTL & RCTL_CFI)));
6213 return false;
6214 }
6215 }
6216 else
6217 E1kLog3(("%s VLAN filter: VLAN=%d\n", INSTANCE(pState),
6218 E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))));
6219 if (!ASMBitTest(pState->auVFTA, E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))))
6220 {
6221 E1kLog2(("%s Packet filter: no VLAN match (id=%d)\n",
6222 INSTANCE(pState), E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))));
6223 return false;
6224 }
6225 }
6226 }
6227 /* Broadcast filtering */
6228 if (e1kIsBroadcast(pvBuf) && (RCTL & RCTL_BAM))
6229 return true;
6230 E1kLog2(("%s Packet filter: not a broadcast\n", INSTANCE(pState)));
6231 if (e1kIsMulticast(pvBuf))
6232 {
6233 /* Is multicast promiscuous enabled? */
6234 if (RCTL & RCTL_MPE)
6235 return true;
6236 E1kLog2(("%s Packet filter: no promiscuous multicast\n", INSTANCE(pState)));
6237 /* Try perfect matches first */
6238 if (e1kPerfectMatch(pState, pvBuf))
6239 {
6240 pStatus->fPIF = true;
6241 return true;
6242 }
6243 E1kLog2(("%s Packet filter: no perfect match\n", INSTANCE(pState)));
6244 if (e1kImperfectMatch(pState, pvBuf))
6245 return true;
6246 E1kLog2(("%s Packet filter: no imperfect match\n", INSTANCE(pState)));
6247 }
6248 else {
6249 /* Is unicast promiscuous enabled? */
6250 if (RCTL & RCTL_UPE)
6251 return true;
6252 E1kLog2(("%s Packet filter: no promiscuous unicast\n", INSTANCE(pState)));
6253 if (e1kPerfectMatch(pState, pvBuf))
6254 {
6255 pStatus->fPIF = true;
6256 return true;
6257 }
6258 E1kLog2(("%s Packet filter: no perfect match\n", INSTANCE(pState)));
6259 }
6260 E1kLog2(("%s Packet filter: packet discarded\n", INSTANCE(pState)));
6261 return false;
6262}
6263
6264/**
6265 * @interface_method_impl{PDMINETWORKDOWN,pfnReceive}
6266 */
6267static DECLCALLBACK(int) e1kNetworkDown_Receive(PPDMINETWORKDOWN pInterface, const void *pvBuf, size_t cb)
6268{
6269 E1KSTATE *pState = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkDown);
6270 int rc = VINF_SUCCESS;
6271
6272 /*
6273 * Drop packets if the VM is not running yet/anymore.
6274 */
6275 VMSTATE enmVMState = PDMDevHlpVMState(STATE_TO_DEVINS(pState));
6276 if ( enmVMState != VMSTATE_RUNNING
6277 && enmVMState != VMSTATE_RUNNING_LS)
6278 {
6279 E1kLog(("%s Dropping incoming packet as VM is not running.\n", INSTANCE(pState)));
6280 return VINF_SUCCESS;
6281 }
6282
6283 /* Discard incoming packets in locked state */
6284 if (!(RCTL & RCTL_EN) || pState->fLocked || !(STATUS & STATUS_LU))
6285 {
6286 E1kLog(("%s Dropping incoming packet as receive operation is disabled.\n", INSTANCE(pState)));
6287 return VINF_SUCCESS;
6288 }
6289
6290 STAM_PROFILE_ADV_START(&pState->StatReceive, a);
6291
6292 //if (!e1kCsEnter(pState, RT_SRC_POS))
6293 // return VERR_PERMISSION_DENIED;
6294
6295 e1kPacketDump(pState, (const uint8_t*)pvBuf, cb, "<-- Incoming");
6296
6297 /* Update stats */
6298 if (RT_LIKELY(e1kCsEnter(pState, VERR_SEM_BUSY) == VINF_SUCCESS))
6299 {
6300 E1K_INC_CNT32(TPR);
6301 E1K_ADD_CNT64(TORL, TORH, cb < 64? 64 : cb);
6302 e1kCsLeave(pState);
6303 }
6304 STAM_PROFILE_ADV_START(&pState->StatReceiveFilter, a);
6305 E1KRXDST status;
6306 RT_ZERO(status);
6307 bool fPassed = e1kAddressFilter(pState, pvBuf, cb, &status);
6308 STAM_PROFILE_ADV_STOP(&pState->StatReceiveFilter, a);
6309 if (fPassed)
6310 {
6311 rc = e1kHandleRxPacket(pState, pvBuf, cb, status);
6312 }
6313 //e1kCsLeave(pState);
6314 STAM_PROFILE_ADV_STOP(&pState->StatReceive, a);
6315
6316 return rc;
6317}
6318
6319
6320/* -=-=-=-=- PDMILEDPORTS -=-=-=-=- */
6321
6322/**
6323 * @interface_method_impl{PDMILEDPORTS,pfnQueryStatusLed}
6324 */
6325static DECLCALLBACK(int) e1kQueryStatusLed(PPDMILEDPORTS pInterface, unsigned iLUN, PPDMLED *ppLed)
6326{
6327 E1KSTATE *pState = RT_FROM_MEMBER(pInterface, E1KSTATE, ILeds);
6328 int rc = VERR_PDM_LUN_NOT_FOUND;
6329
6330 if (iLUN == 0)
6331 {
6332 *ppLed = &pState->led;
6333 rc = VINF_SUCCESS;
6334 }
6335 return rc;
6336}
6337
6338
6339/* -=-=-=-=- PDMINETWORKCONFIG -=-=-=-=- */
6340
6341/**
6342 * @interface_method_impl{PDMINETWORKCONFIG,pfnGetMac}
6343 */
6344static DECLCALLBACK(int) e1kGetMac(PPDMINETWORKCONFIG pInterface, PRTMAC pMac)
6345{
6346 E1KSTATE *pState = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkConfig);
6347 pState->eeprom.getMac(pMac);
6348 return VINF_SUCCESS;
6349}
6350
6351/**
6352 * @interface_method_impl{PDMINETWORKCONFIG,pfnGetLinkState}
6353 */
6354static DECLCALLBACK(PDMNETWORKLINKSTATE) e1kGetLinkState(PPDMINETWORKCONFIG pInterface)
6355{
6356 E1KSTATE *pState = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkConfig);
6357 if (STATUS & STATUS_LU)
6358 return PDMNETWORKLINKSTATE_UP;
6359 return PDMNETWORKLINKSTATE_DOWN;
6360}
6361
6362/**
6363 * @interface_method_impl{PDMINETWORKCONFIG,pfnSetLinkState}
6364 */
6365static DECLCALLBACK(int) e1kSetLinkState(PPDMINETWORKCONFIG pInterface, PDMNETWORKLINKSTATE enmState)
6366{
6367 E1KSTATE *pState = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkConfig);
6368 bool fOldUp = !!(STATUS & STATUS_LU);
6369 bool fNewUp = enmState == PDMNETWORKLINKSTATE_UP;
6370
6371 if ( fNewUp != fOldUp
6372 || (!fNewUp && pState->fCableConnected)) /* old state was connected but STATUS not
6373 * yet written by guest */
6374 {
6375 if (fNewUp)
6376 {
6377 E1kLog(("%s Link will be up in approximately %d secs\n",
6378 INSTANCE(pState), pState->cMsLinkUpDelay / 1000));
6379 pState->fCableConnected = true;
6380 STATUS &= ~STATUS_LU;
6381 Phy::setLinkStatus(&pState->phy, false);
6382 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, ICR_LSC);
6383 /* Restore the link back in 5 seconds (by default). */
6384 e1kBringLinkUpDelayed(pState);
6385 }
6386 else
6387 {
6388 E1kLog(("%s Link is down\n", INSTANCE(pState)));
6389 pState->fCableConnected = false;
6390 STATUS &= ~STATUS_LU;
6391 Phy::setLinkStatus(&pState->phy, false);
6392 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, ICR_LSC);
6393 }
6394 if (pState->pDrvR3)
6395 pState->pDrvR3->pfnNotifyLinkChanged(pState->pDrvR3, enmState);
6396 }
6397 return VINF_SUCCESS;
6398}
6399
6400
6401/* -=-=-=-=- PDMIBASE -=-=-=-=- */
6402
6403/**
6404 * @interface_method_impl{PDMIBASE,pfnQueryInterface}
6405 */
6406static DECLCALLBACK(void *) e1kQueryInterface(struct PDMIBASE *pInterface, const char *pszIID)
6407{
6408 E1KSTATE *pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, IBase);
6409 Assert(&pThis->IBase == pInterface);
6410
6411 PDMIBASE_RETURN_INTERFACE(pszIID, PDMIBASE, &pThis->IBase);
6412 PDMIBASE_RETURN_INTERFACE(pszIID, PDMINETWORKDOWN, &pThis->INetworkDown);
6413 PDMIBASE_RETURN_INTERFACE(pszIID, PDMINETWORKCONFIG, &pThis->INetworkConfig);
6414 PDMIBASE_RETURN_INTERFACE(pszIID, PDMILEDPORTS, &pThis->ILeds);
6415 return NULL;
6416}
6417
6418
6419/* -=-=-=-=- Saved State -=-=-=-=- */
6420
6421/**
6422 * Saves the configuration.
6423 *
6424 * @param pState The E1K state.
6425 * @param pSSM The handle to the saved state.
6426 */
6427static void e1kSaveConfig(E1KSTATE *pState, PSSMHANDLE pSSM)
6428{
6429 SSMR3PutMem(pSSM, &pState->macConfigured, sizeof(pState->macConfigured));
6430 SSMR3PutU32(pSSM, pState->eChip);
6431}
6432
6433/**
6434 * @callback_method_impl{FNSSMDEVLIVEEXEC,Save basic configuration.}
6435 */
6436static DECLCALLBACK(int) e1kLiveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uPass)
6437{
6438 E1KSTATE *pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6439 e1kSaveConfig(pState, pSSM);
6440 return VINF_SSM_DONT_CALL_AGAIN;
6441}
6442
6443/**
6444 * @callback_method_impl{FNSSMDEVSAVEPREP,Synchronize.}
6445 */
6446static DECLCALLBACK(int) e1kSavePrep(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6447{
6448 E1KSTATE* pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6449
6450 int rc = e1kCsEnter(pState, VERR_SEM_BUSY);
6451 if (RT_UNLIKELY(rc != VINF_SUCCESS))
6452 return rc;
6453 e1kCsLeave(pState);
6454 return VINF_SUCCESS;
6455#if 0
6456 /* 1) Prevent all threads from modifying the state and memory */
6457 //pState->fLocked = true;
6458 /* 2) Cancel all timers */
6459#ifdef E1K_TX_DELAY
6460 e1kCancelTimer(pState, pState->CTX_SUFF(pTXDTimer));
6461#endif /* E1K_TX_DELAY */
6462#ifdef E1K_USE_TX_TIMERS
6463 e1kCancelTimer(pState, pState->CTX_SUFF(pTIDTimer));
6464#ifndef E1K_NO_TAD
6465 e1kCancelTimer(pState, pState->CTX_SUFF(pTADTimer));
6466#endif /* E1K_NO_TAD */
6467#endif /* E1K_USE_TX_TIMERS */
6468#ifdef E1K_USE_RX_TIMERS
6469 e1kCancelTimer(pState, pState->CTX_SUFF(pRIDTimer));
6470 e1kCancelTimer(pState, pState->CTX_SUFF(pRADTimer));
6471#endif /* E1K_USE_RX_TIMERS */
6472 e1kCancelTimer(pState, pState->CTX_SUFF(pIntTimer));
6473 /* 3) Did I forget anything? */
6474 E1kLog(("%s Locked\n", INSTANCE(pState)));
6475 return VINF_SUCCESS;
6476#endif
6477}
6478
6479/**
6480 * @callback_method_impl{FNSSMDEVSAVEEXEC}
6481 */
6482static DECLCALLBACK(int) e1kSaveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6483{
6484 E1KSTATE* pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6485
6486 e1kSaveConfig(pState, pSSM);
6487 pState->eeprom.save(pSSM);
6488 e1kDumpState(pState);
6489 SSMR3PutMem(pSSM, pState->auRegs, sizeof(pState->auRegs));
6490 SSMR3PutBool(pSSM, pState->fIntRaised);
6491 Phy::saveState(pSSM, &pState->phy);
6492 SSMR3PutU32(pSSM, pState->uSelectedReg);
6493 SSMR3PutMem(pSSM, pState->auMTA, sizeof(pState->auMTA));
6494 SSMR3PutMem(pSSM, &pState->aRecAddr, sizeof(pState->aRecAddr));
6495 SSMR3PutMem(pSSM, pState->auVFTA, sizeof(pState->auVFTA));
6496 SSMR3PutU64(pSSM, pState->u64AckedAt);
6497 SSMR3PutU16(pSSM, pState->u16RxBSize);
6498 //SSMR3PutBool(pSSM, pState->fDelayInts);
6499 //SSMR3PutBool(pSSM, pState->fIntMaskUsed);
6500 SSMR3PutU16(pSSM, pState->u16TxPktLen);
6501/** @todo State wrt to the TSE buffer is incomplete, so little point in
6502 * saving this actually. */
6503 SSMR3PutMem(pSSM, pState->aTxPacketFallback, pState->u16TxPktLen);
6504 SSMR3PutBool(pSSM, pState->fIPcsum);
6505 SSMR3PutBool(pSSM, pState->fTCPcsum);
6506 SSMR3PutMem(pSSM, &pState->contextTSE, sizeof(pState->contextTSE));
6507 SSMR3PutMem(pSSM, &pState->contextNormal, sizeof(pState->contextNormal));
6508 SSMR3PutBool(pSSM, pState->fVTag);
6509 SSMR3PutU16(pSSM, pState->u16VTagTCI);
6510#ifdef E1K_WITH_TXD_CACHE
6511#if 0
6512 SSMR3PutU8(pSSM, pState->nTxDFetched);
6513 SSMR3PutMem(pSSM, pState->aTxDescriptors,
6514 pState->nTxDFetched * sizeof(pState->aTxDescriptors[0]));
6515#else
6516 /*
6517 * There is no point in storing TX descriptor cache entries as we can simply
6518 * fetch them again. Moreover, normally the cache is always empty when we
6519 * save the state. Store zero entries for compatibility.
6520 */
6521 SSMR3PutU8(pSSM, 0);
6522#endif
6523#endif /* E1K_WITH_TXD_CACHE */
6524/**@todo GSO requires some more state here. */
6525 E1kLog(("%s State has been saved\n", INSTANCE(pState)));
6526 return VINF_SUCCESS;
6527}
6528
6529#if 0
6530/**
6531 * @callback_method_impl{FNSSMDEVSAVEDONE}
6532 */
6533static DECLCALLBACK(int) e1kSaveDone(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6534{
6535 E1KSTATE* pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6536
6537 /* If VM is being powered off unlocking will result in assertions in PGM */
6538 if (PDMDevHlpGetVM(pDevIns)->enmVMState == VMSTATE_RUNNING)
6539 pState->fLocked = false;
6540 else
6541 E1kLog(("%s VM is not running -- remain locked\n", INSTANCE(pState)));
6542 E1kLog(("%s Unlocked\n", INSTANCE(pState)));
6543 return VINF_SUCCESS;
6544}
6545#endif
6546
6547/**
6548 * @callback_method_impl{FNSSMDEVLOADPREP,Synchronize.}
6549 */
6550static DECLCALLBACK(int) e1kLoadPrep(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6551{
6552 E1KSTATE* pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6553
6554 int rc = e1kCsEnter(pState, VERR_SEM_BUSY);
6555 if (RT_UNLIKELY(rc != VINF_SUCCESS))
6556 return rc;
6557 e1kCsLeave(pState);
6558 return VINF_SUCCESS;
6559}
6560
6561/**
6562 * @callback_method_impl{FNSSMDEVLOADEXEC}
6563 */
6564static DECLCALLBACK(int) e1kLoadExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
6565{
6566 E1KSTATE *pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6567 int rc;
6568
6569 if ( uVersion != E1K_SAVEDSTATE_VERSION
6570#ifdef E1K_WITH_TXD_CACHE
6571 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG
6572#endif /* E1K_WITH_TXD_CACHE */
6573 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_41
6574 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_30)
6575 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
6576
6577 if ( uVersion > E1K_SAVEDSTATE_VERSION_VBOX_30
6578 || uPass != SSM_PASS_FINAL)
6579 {
6580 /* config checks */
6581 RTMAC macConfigured;
6582 rc = SSMR3GetMem(pSSM, &macConfigured, sizeof(macConfigured));
6583 AssertRCReturn(rc, rc);
6584 if ( memcmp(&macConfigured, &pState->macConfigured, sizeof(macConfigured))
6585 && (uPass == 0 || !PDMDevHlpVMTeleportedAndNotFullyResumedYet(pDevIns)) )
6586 LogRel(("%s: The mac address differs: config=%RTmac saved=%RTmac\n", INSTANCE(pState), &pState->macConfigured, &macConfigured));
6587
6588 E1KCHIP eChip;
6589 rc = SSMR3GetU32(pSSM, &eChip);
6590 AssertRCReturn(rc, rc);
6591 if (eChip != pState->eChip)
6592 return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("The chip type differs: config=%u saved=%u"), pState->eChip, eChip);
6593 }
6594
6595 if (uPass == SSM_PASS_FINAL)
6596 {
6597 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_30)
6598 {
6599 rc = pState->eeprom.load(pSSM);
6600 AssertRCReturn(rc, rc);
6601 }
6602 /* the state */
6603 SSMR3GetMem(pSSM, &pState->auRegs, sizeof(pState->auRegs));
6604 SSMR3GetBool(pSSM, &pState->fIntRaised);
6605 /** @todo: PHY could be made a separate device with its own versioning */
6606 Phy::loadState(pSSM, &pState->phy);
6607 SSMR3GetU32(pSSM, &pState->uSelectedReg);
6608 SSMR3GetMem(pSSM, &pState->auMTA, sizeof(pState->auMTA));
6609 SSMR3GetMem(pSSM, &pState->aRecAddr, sizeof(pState->aRecAddr));
6610 SSMR3GetMem(pSSM, &pState->auVFTA, sizeof(pState->auVFTA));
6611 SSMR3GetU64(pSSM, &pState->u64AckedAt);
6612 SSMR3GetU16(pSSM, &pState->u16RxBSize);
6613 //SSMR3GetBool(pSSM, pState->fDelayInts);
6614 //SSMR3GetBool(pSSM, pState->fIntMaskUsed);
6615 SSMR3GetU16(pSSM, &pState->u16TxPktLen);
6616 SSMR3GetMem(pSSM, &pState->aTxPacketFallback[0], pState->u16TxPktLen);
6617 SSMR3GetBool(pSSM, &pState->fIPcsum);
6618 SSMR3GetBool(pSSM, &pState->fTCPcsum);
6619 SSMR3GetMem(pSSM, &pState->contextTSE, sizeof(pState->contextTSE));
6620 rc = SSMR3GetMem(pSSM, &pState->contextNormal, sizeof(pState->contextNormal));
6621 AssertRCReturn(rc, rc);
6622 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_41)
6623 {
6624 SSMR3GetBool(pSSM, &pState->fVTag);
6625 rc = SSMR3GetU16(pSSM, &pState->u16VTagTCI);
6626 AssertRCReturn(rc, rc);
6627 }
6628 else
6629 {
6630 pState->fVTag = false;
6631 pState->u16VTagTCI = 0;
6632 }
6633#ifdef E1K_WITH_TXD_CACHE
6634 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG)
6635 {
6636 rc = SSMR3GetU8(pSSM, &pState->nTxDFetched);
6637 AssertRCReturn(rc, rc);
6638 if (pState->nTxDFetched)
6639 SSMR3GetMem(pSSM, pState->aTxDescriptors,
6640 pState->nTxDFetched * sizeof(pState->aTxDescriptors[0]));
6641 }
6642 else
6643 pState->nTxDFetched = 0;
6644 /*
6645 * @todo: Perhaps we should not store TXD cache as the entries can be
6646 * simply fetched again from guest's memory. Or can't they?
6647 */
6648#endif /* E1K_WITH_TXD_CACHE */
6649#ifdef E1K_WITH_RXD_CACHE
6650 /*
6651 * There is no point in storing the RX descriptor cache in the saved
6652 * state, we just need to make sure it is empty.
6653 */
6654 pState->iRxDCurrent = pState->nRxDFetched = 0;
6655#endif /* E1K_WITH_RXD_CACHE */
6656 /* derived state */
6657 e1kSetupGsoCtx(&pState->GsoCtx, &pState->contextTSE);
6658
6659 E1kLog(("%s State has been restored\n", INSTANCE(pState)));
6660 e1kDumpState(pState);
6661 }
6662 return VINF_SUCCESS;
6663}
6664
6665/**
6666 * @callback_method_impl{FNSSMDEVLOADDONE, Link status adjustments after loading.}
6667 */
6668static DECLCALLBACK(int) e1kLoadDone(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6669{
6670 E1KSTATE* pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6671
6672 /* Update promiscuous mode */
6673 if (pState->pDrvR3)
6674 pState->pDrvR3->pfnSetPromiscuousMode(pState->pDrvR3,
6675 !!(RCTL & (RCTL_UPE | RCTL_MPE)));
6676
6677 /*
6678 * Force the link down here, since PDMNETWORKLINKSTATE_DOWN_RESUME is never
6679 * passed to us. We go through all this stuff if the link was up and we
6680 * wasn't teleported.
6681 */
6682 if ( (STATUS & STATUS_LU)
6683 && !PDMDevHlpVMTeleportedAndNotFullyResumedYet(pDevIns)
6684 && pState->cMsLinkUpDelay)
6685 {
6686 E1kLog(("%s Link is down temporarily\n", INSTANCE(pState)));
6687 STATUS &= ~STATUS_LU;
6688 Phy::setLinkStatus(&pState->phy, false);
6689 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, ICR_LSC);
6690 /* Restore the link back in five seconds (default). */
6691 e1kBringLinkUpDelayed(pState);
6692 }
6693 return VINF_SUCCESS;
6694}
6695
6696
6697
6698/* -=-=-=-=- Debug Info + Log Types -=-=-=-=- */
6699
6700/**
6701 * @callback_method_impl{FNRTSTRFORMATTYPE}
6702 */
6703static DECLCALLBACK(size_t) e1kFmtRxDesc(PFNRTSTROUTPUT pfnOutput,
6704 void *pvArgOutput,
6705 const char *pszType,
6706 void const *pvValue,
6707 int cchWidth,
6708 int cchPrecision,
6709 unsigned fFlags,
6710 void *pvUser)
6711{
6712 AssertReturn(strcmp(pszType, "e1krxd") == 0, 0);
6713 E1KRXDESC* pDesc = (E1KRXDESC*)pvValue;
6714 if (!pDesc)
6715 return RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "NULL_RXD");
6716
6717 size_t cbPrintf = 0;
6718 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Address=%16LX Length=%04X Csum=%04X\n",
6719 pDesc->u64BufAddr, pDesc->u16Length, pDesc->u16Checksum);
6720 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, " STA: %s %s %s %s %s %s %s ERR: %s %s %s %s SPECIAL: %s VLAN=%03x PRI=%x",
6721 pDesc->status.fPIF ? "PIF" : "pif",
6722 pDesc->status.fIPCS ? "IPCS" : "ipcs",
6723 pDesc->status.fTCPCS ? "TCPCS" : "tcpcs",
6724 pDesc->status.fVP ? "VP" : "vp",
6725 pDesc->status.fIXSM ? "IXSM" : "ixsm",
6726 pDesc->status.fEOP ? "EOP" : "eop",
6727 pDesc->status.fDD ? "DD" : "dd",
6728 pDesc->status.fRXE ? "RXE" : "rxe",
6729 pDesc->status.fIPE ? "IPE" : "ipe",
6730 pDesc->status.fTCPE ? "TCPE" : "tcpe",
6731 pDesc->status.fCE ? "CE" : "ce",
6732 E1K_SPEC_CFI(pDesc->status.u16Special) ? "CFI" :"cfi",
6733 E1K_SPEC_VLAN(pDesc->status.u16Special),
6734 E1K_SPEC_PRI(pDesc->status.u16Special));
6735 return cbPrintf;
6736}
6737
6738/**
6739 * @callback_method_impl{FNRTSTRFORMATTYPE}
6740 */
6741static DECLCALLBACK(size_t) e1kFmtTxDesc(PFNRTSTROUTPUT pfnOutput,
6742 void *pvArgOutput,
6743 const char *pszType,
6744 void const *pvValue,
6745 int cchWidth,
6746 int cchPrecision,
6747 unsigned fFlags,
6748 void *pvUser)
6749{
6750 AssertReturn(strcmp(pszType, "e1ktxd") == 0, 0);
6751 E1KTXDESC* pDesc = (E1KTXDESC*)pvValue;
6752 if (!pDesc)
6753 return RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "NULL_TXD");
6754
6755 size_t cbPrintf = 0;
6756 switch (e1kGetDescType(pDesc))
6757 {
6758 case E1K_DTYP_CONTEXT:
6759 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Type=Context\n"
6760 " IPCSS=%02X IPCSO=%02X IPCSE=%04X TUCSS=%02X TUCSO=%02X TUCSE=%04X\n"
6761 " TUCMD:%s%s%s %s %s PAYLEN=%04x HDRLEN=%04x MSS=%04x STA: %s",
6762 pDesc->context.ip.u8CSS, pDesc->context.ip.u8CSO, pDesc->context.ip.u16CSE,
6763 pDesc->context.tu.u8CSS, pDesc->context.tu.u8CSO, pDesc->context.tu.u16CSE,
6764 pDesc->context.dw2.fIDE ? " IDE":"",
6765 pDesc->context.dw2.fRS ? " RS" :"",
6766 pDesc->context.dw2.fTSE ? " TSE":"",
6767 pDesc->context.dw2.fIP ? "IPv4":"IPv6",
6768 pDesc->context.dw2.fTCP ? "TCP":"UDP",
6769 pDesc->context.dw2.u20PAYLEN,
6770 pDesc->context.dw3.u8HDRLEN,
6771 pDesc->context.dw3.u16MSS,
6772 pDesc->context.dw3.fDD?"DD":"");
6773 break;
6774 case E1K_DTYP_DATA:
6775 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Type=Data Address=%16LX DTALEN=%05X\n"
6776 " DCMD:%s%s%s%s%s%s%s STA:%s%s%s POPTS:%s%s SPECIAL:%s VLAN=%03x PRI=%x",
6777 pDesc->data.u64BufAddr,
6778 pDesc->data.cmd.u20DTALEN,
6779 pDesc->data.cmd.fIDE ? " IDE" :"",
6780 pDesc->data.cmd.fVLE ? " VLE" :"",
6781 pDesc->data.cmd.fRPS ? " RPS" :"",
6782 pDesc->data.cmd.fRS ? " RS" :"",
6783 pDesc->data.cmd.fTSE ? " TSE" :"",
6784 pDesc->data.cmd.fIFCS? " IFCS":"",
6785 pDesc->data.cmd.fEOP ? " EOP" :"",
6786 pDesc->data.dw3.fDD ? " DD" :"",
6787 pDesc->data.dw3.fEC ? " EC" :"",
6788 pDesc->data.dw3.fLC ? " LC" :"",
6789 pDesc->data.dw3.fTXSM? " TXSM":"",
6790 pDesc->data.dw3.fIXSM? " IXSM":"",
6791 E1K_SPEC_CFI(pDesc->data.dw3.u16Special) ? "CFI" :"cfi",
6792 E1K_SPEC_VLAN(pDesc->data.dw3.u16Special),
6793 E1K_SPEC_PRI(pDesc->data.dw3.u16Special));
6794 break;
6795 case E1K_DTYP_LEGACY:
6796 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Type=Legacy Address=%16LX DTALEN=%05X\n"
6797 " CMD:%s%s%s%s%s%s%s STA:%s%s%s CSO=%02x CSS=%02x SPECIAL:%s VLAN=%03x PRI=%x",
6798 pDesc->data.u64BufAddr,
6799 pDesc->legacy.cmd.u16Length,
6800 pDesc->legacy.cmd.fIDE ? " IDE" :"",
6801 pDesc->legacy.cmd.fVLE ? " VLE" :"",
6802 pDesc->legacy.cmd.fRPS ? " RPS" :"",
6803 pDesc->legacy.cmd.fRS ? " RS" :"",
6804 pDesc->legacy.cmd.fIC ? " IC" :"",
6805 pDesc->legacy.cmd.fIFCS? " IFCS":"",
6806 pDesc->legacy.cmd.fEOP ? " EOP" :"",
6807 pDesc->legacy.dw3.fDD ? " DD" :"",
6808 pDesc->legacy.dw3.fEC ? " EC" :"",
6809 pDesc->legacy.dw3.fLC ? " LC" :"",
6810 pDesc->legacy.cmd.u8CSO,
6811 pDesc->legacy.dw3.u8CSS,
6812 E1K_SPEC_CFI(pDesc->legacy.dw3.u16Special) ? "CFI" :"cfi",
6813 E1K_SPEC_VLAN(pDesc->legacy.dw3.u16Special),
6814 E1K_SPEC_PRI(pDesc->legacy.dw3.u16Special));
6815 break;
6816 default:
6817 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Invalid Transmit Descriptor");
6818 break;
6819 }
6820
6821 return cbPrintf;
6822}
6823
6824/** Initializes debug helpers (logging format types). */
6825static int e1kInitDebugHelpers(void)
6826{
6827 int rc = VINF_SUCCESS;
6828 static bool s_fHelpersRegistered = false;
6829 if (!s_fHelpersRegistered)
6830 {
6831 s_fHelpersRegistered = true;
6832 rc = RTStrFormatTypeRegister("e1krxd", e1kFmtRxDesc, NULL);
6833 AssertRCReturn(rc, rc);
6834 rc = RTStrFormatTypeRegister("e1ktxd", e1kFmtTxDesc, NULL);
6835 AssertRCReturn(rc, rc);
6836 }
6837 return rc;
6838}
6839
6840/**
6841 * Status info callback.
6842 *
6843 * @param pDevIns The device instance.
6844 * @param pHlp The output helpers.
6845 * @param pszArgs The arguments.
6846 */
6847static DECLCALLBACK(void) e1kInfo(PPDMDEVINS pDevIns, PCDBGFINFOHLP pHlp, const char *pszArgs)
6848{
6849 E1KSTATE* pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6850 unsigned i;
6851 // bool fRcvRing = false;
6852 // bool fXmtRing = false;
6853
6854 /*
6855 * Parse args.
6856 if (pszArgs)
6857 {
6858 fRcvRing = strstr(pszArgs, "verbose") || strstr(pszArgs, "rcv");
6859 fXmtRing = strstr(pszArgs, "verbose") || strstr(pszArgs, "xmt");
6860 }
6861 */
6862
6863 /*
6864 * Show info.
6865 */
6866 pHlp->pfnPrintf(pHlp, "E1000 #%d: port=%RTiop mmio=%RGp mac-cfg=%RTmac %s%s%s\n",
6867 pDevIns->iInstance, pState->addrIOPort, pState->addrMMReg,
6868 &pState->macConfigured, g_Chips[pState->eChip].pcszName,
6869 pState->fGCEnabled ? " GC" : "", pState->fR0Enabled ? " R0" : "");
6870
6871 e1kCsEnter(pState, VERR_INTERNAL_ERROR); /* Not sure why but PCNet does it */
6872
6873 for (i = 0; i < E1K_NUM_OF_32BIT_REGS; ++i)
6874 pHlp->pfnPrintf(pHlp, "%8.8s = %08x\n", s_e1kRegMap[i].abbrev, pState->auRegs[i]);
6875
6876 for (i = 0; i < RT_ELEMENTS(pState->aRecAddr.array); i++)
6877 {
6878 E1KRAELEM* ra = pState->aRecAddr.array + i;
6879 if (ra->ctl & RA_CTL_AV)
6880 {
6881 const char *pcszTmp;
6882 switch (ra->ctl & RA_CTL_AS)
6883 {
6884 case 0: pcszTmp = "DST"; break;
6885 case 1: pcszTmp = "SRC"; break;
6886 default: pcszTmp = "reserved";
6887 }
6888 pHlp->pfnPrintf(pHlp, "RA%02d: %s %RTmac\n", i, pcszTmp, ra->addr);
6889 }
6890 }
6891 unsigned cDescs = RDLEN / sizeof(E1KRXDESC);
6892 uint32_t rdh = RDH;
6893 pHlp->pfnPrintf(pHlp, "\n-- Receive Descriptors (%d total) --\n", cDescs);
6894 for (i = 0; i < cDescs; ++i)
6895 {
6896 E1KRXDESC desc;
6897 PDMDevHlpPhysRead(pDevIns, e1kDescAddr(RDBAH, RDBAL, i),
6898 &desc, sizeof(desc));
6899 if (i == rdh)
6900 pHlp->pfnPrintf(pHlp, ">>> ");
6901 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1krxd]\n", e1kDescAddr(RDBAH, RDBAL, i), &desc);
6902 }
6903 pHlp->pfnPrintf(pHlp, "\n-- Receive Descriptors in Cache (at %d (RDH %d)/ fetched %d / max %d) --\n",
6904 pState->iRxDCurrent, RDH, pState->nRxDFetched, E1K_RXD_CACHE_SIZE);
6905 if (rdh > pState->iRxDCurrent)
6906 rdh -= pState->iRxDCurrent;
6907 else
6908 rdh = cDescs + rdh - pState->iRxDCurrent;
6909 for (i = 0; i < pState->nRxDFetched; ++i)
6910 {
6911 if (i == pState->iRxDCurrent)
6912 pHlp->pfnPrintf(pHlp, ">>> ");
6913 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1krxd]\n",
6914 e1kDescAddr(RDBAH, RDBAL, rdh++ % cDescs),
6915 &pState->aRxDescriptors[i]);
6916 }
6917
6918 cDescs = TDLEN / sizeof(E1KTXDESC);
6919 uint32_t tdh = TDH;
6920 pHlp->pfnPrintf(pHlp, "\n-- Transmit Descriptors (%d total) --\n", cDescs);
6921 for (i = 0; i < cDescs; ++i)
6922 {
6923 E1KTXDESC desc;
6924 PDMDevHlpPhysRead(pDevIns, e1kDescAddr(TDBAH, TDBAL, i),
6925 &desc, sizeof(desc));
6926 if (i == tdh)
6927 pHlp->pfnPrintf(pHlp, ">>> ");
6928 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1ktxd]\n", e1kDescAddr(TDBAH, TDBAL, i), &desc);
6929 }
6930 pHlp->pfnPrintf(pHlp, "\n-- Transmit Descriptors in Cache (at %d (TDH %d)/ fetched %d / max %d) --\n",
6931 pState->iTxDCurrent, TDH, pState->nTxDFetched, E1K_TXD_CACHE_SIZE);
6932 if (tdh > pState->iTxDCurrent)
6933 tdh -= pState->iTxDCurrent;
6934 else
6935 tdh = cDescs + tdh - pState->iTxDCurrent;
6936 for (i = 0; i < pState->nTxDFetched; ++i)
6937 {
6938 if (i == pState->iTxDCurrent)
6939 pHlp->pfnPrintf(pHlp, ">>> ");
6940 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1ktxd]\n",
6941 e1kDescAddr(TDBAH, TDBAL, tdh++ % cDescs),
6942 &pState->aTxDescriptors[i]);
6943 }
6944
6945
6946#ifdef E1K_INT_STATS
6947 pHlp->pfnPrintf(pHlp, "Interrupt attempts: %d\n", pState->uStatIntTry);
6948 pHlp->pfnPrintf(pHlp, "Interrupts raised : %d\n", pState->uStatInt);
6949 pHlp->pfnPrintf(pHlp, "Interrupts lowered: %d\n", pState->uStatIntLower);
6950 pHlp->pfnPrintf(pHlp, "Interrupts delayed: %d\n", pState->uStatIntDly);
6951 pHlp->pfnPrintf(pHlp, "Disabled delayed: %d\n", pState->uStatDisDly);
6952 pHlp->pfnPrintf(pHlp, "Interrupts skipped: %d\n", pState->uStatIntSkip);
6953 pHlp->pfnPrintf(pHlp, "Masked interrupts : %d\n", pState->uStatIntMasked);
6954 pHlp->pfnPrintf(pHlp, "Early interrupts : %d\n", pState->uStatIntEarly);
6955 pHlp->pfnPrintf(pHlp, "Late interrupts : %d\n", pState->uStatIntLate);
6956 pHlp->pfnPrintf(pHlp, "Lost interrupts : %d\n", pState->iStatIntLost);
6957 pHlp->pfnPrintf(pHlp, "Interrupts by RX : %d\n", pState->uStatIntRx);
6958 pHlp->pfnPrintf(pHlp, "Interrupts by TX : %d\n", pState->uStatIntTx);
6959 pHlp->pfnPrintf(pHlp, "Interrupts by ICS : %d\n", pState->uStatIntICS);
6960 pHlp->pfnPrintf(pHlp, "Interrupts by RDTR: %d\n", pState->uStatIntRDTR);
6961 pHlp->pfnPrintf(pHlp, "Interrupts by RDMT: %d\n", pState->uStatIntRXDMT0);
6962 pHlp->pfnPrintf(pHlp, "Interrupts by TXQE: %d\n", pState->uStatIntTXQE);
6963 pHlp->pfnPrintf(pHlp, "TX int delay asked: %d\n", pState->uStatTxIDE);
6964 pHlp->pfnPrintf(pHlp, "TX delayed: %d\n", pState->uStatTxDelayed);
6965 pHlp->pfnPrintf(pHlp, "TX delayed expired: %d\n", pState->uStatTxDelayExp);
6966 pHlp->pfnPrintf(pHlp, "TX no report asked: %d\n", pState->uStatTxNoRS);
6967 pHlp->pfnPrintf(pHlp, "TX abs timer expd : %d\n", pState->uStatTAD);
6968 pHlp->pfnPrintf(pHlp, "TX int timer expd : %d\n", pState->uStatTID);
6969 pHlp->pfnPrintf(pHlp, "RX abs timer expd : %d\n", pState->uStatRAD);
6970 pHlp->pfnPrintf(pHlp, "RX int timer expd : %d\n", pState->uStatRID);
6971 pHlp->pfnPrintf(pHlp, "TX CTX descriptors: %d\n", pState->uStatDescCtx);
6972 pHlp->pfnPrintf(pHlp, "TX DAT descriptors: %d\n", pState->uStatDescDat);
6973 pHlp->pfnPrintf(pHlp, "TX LEG descriptors: %d\n", pState->uStatDescLeg);
6974 pHlp->pfnPrintf(pHlp, "Received frames : %d\n", pState->uStatRxFrm);
6975 pHlp->pfnPrintf(pHlp, "Transmitted frames: %d\n", pState->uStatTxFrm);
6976 pHlp->pfnPrintf(pHlp, "TX frames up to 1514: %d\n", pState->uStatTx1514);
6977 pHlp->pfnPrintf(pHlp, "TX frames up to 2962: %d\n", pState->uStatTx2962);
6978 pHlp->pfnPrintf(pHlp, "TX frames up to 4410: %d\n", pState->uStatTx4410);
6979 pHlp->pfnPrintf(pHlp, "TX frames up to 5858: %d\n", pState->uStatTx5858);
6980 pHlp->pfnPrintf(pHlp, "TX frames up to 7306: %d\n", pState->uStatTx7306);
6981 pHlp->pfnPrintf(pHlp, "TX frames up to 8754: %d\n", pState->uStatTx8754);
6982 pHlp->pfnPrintf(pHlp, "TX frames up to 16384: %d\n", pState->uStatTx16384);
6983 pHlp->pfnPrintf(pHlp, "TX frames up to 32768: %d\n", pState->uStatTx32768);
6984 pHlp->pfnPrintf(pHlp, "Larger TX frames : %d\n", pState->uStatTxLarge);
6985#endif /* E1K_INT_STATS */
6986
6987 e1kCsLeave(pState);
6988}
6989
6990
6991
6992/* -=-=-=-=- PDMDEVREG -=-=-=-=- */
6993
6994/**
6995 * Detach notification.
6996 *
6997 * One port on the network card has been disconnected from the network.
6998 *
6999 * @param pDevIns The device instance.
7000 * @param iLUN The logical unit which is being detached.
7001 * @param fFlags Flags, combination of the PDMDEVATT_FLAGS_* \#defines.
7002 */
7003static DECLCALLBACK(void) e1kR3Detach(PPDMDEVINS pDevIns, unsigned iLUN, uint32_t fFlags)
7004{
7005 E1KSTATE *pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7006 Log(("%s e1kR3Detach:\n", INSTANCE(pState)));
7007
7008 AssertLogRelReturnVoid(iLUN == 0);
7009
7010 PDMCritSectEnter(&pState->cs, VERR_SEM_BUSY);
7011
7012 /** @todo: r=pritesh still need to check if i missed
7013 * to clean something in this function
7014 */
7015
7016 /*
7017 * Zero some important members.
7018 */
7019 pState->pDrvBase = NULL;
7020 pState->pDrvR3 = NULL;
7021 pState->pDrvR0 = NIL_RTR0PTR;
7022 pState->pDrvRC = NIL_RTRCPTR;
7023
7024 PDMCritSectLeave(&pState->cs);
7025}
7026
7027/**
7028 * Attach the Network attachment.
7029 *
7030 * One port on the network card has been connected to a network.
7031 *
7032 * @returns VBox status code.
7033 * @param pDevIns The device instance.
7034 * @param iLUN The logical unit which is being attached.
7035 * @param fFlags Flags, combination of the PDMDEVATT_FLAGS_* \#defines.
7036 *
7037 * @remarks This code path is not used during construction.
7038 */
7039static DECLCALLBACK(int) e1kR3Attach(PPDMDEVINS pDevIns, unsigned iLUN, uint32_t fFlags)
7040{
7041 E1KSTATE *pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7042 LogFlow(("%s e1kR3Attach:\n", INSTANCE(pState)));
7043
7044 AssertLogRelReturn(iLUN == 0, VERR_PDM_NO_SUCH_LUN);
7045
7046 PDMCritSectEnter(&pState->cs, VERR_SEM_BUSY);
7047
7048 /*
7049 * Attach the driver.
7050 */
7051 int rc = PDMDevHlpDriverAttach(pDevIns, 0, &pState->IBase, &pState->pDrvBase, "Network Port");
7052 if (RT_SUCCESS(rc))
7053 {
7054 if (rc == VINF_NAT_DNS)
7055 {
7056#ifdef RT_OS_LINUX
7057 PDMDevHlpVMSetRuntimeError(pDevIns, 0 /*fFlags*/, "NoDNSforNAT",
7058 N_("A Domain Name Server (DNS) for NAT networking could not be determined. Please check your /etc/resolv.conf for <tt>nameserver</tt> entries. Either add one manually (<i>man resolv.conf</i>) or ensure that your host is correctly connected to an ISP. If you ignore this warning the guest will not be able to perform nameserver lookups and it will probably observe delays if trying so"));
7059#else
7060 PDMDevHlpVMSetRuntimeError(pDevIns, 0 /*fFlags*/, "NoDNSforNAT",
7061 N_("A Domain Name Server (DNS) for NAT networking could not be determined. Ensure that your host is correctly connected to an ISP. If you ignore this warning the guest will not be able to perform nameserver lookups and it will probably observe delays if trying so"));
7062#endif
7063 }
7064 pState->pDrvR3 = PDMIBASE_QUERY_INTERFACE(pState->pDrvBase, PDMINETWORKUP);
7065 AssertMsgStmt(pState->pDrvR3, ("Failed to obtain the PDMINETWORKUP interface!\n"),
7066 rc = VERR_PDM_MISSING_INTERFACE_BELOW);
7067 if (RT_SUCCESS(rc))
7068 {
7069 PPDMIBASER0 pBaseR0 = PDMIBASE_QUERY_INTERFACE(pState->pDrvBase, PDMIBASER0);
7070 pState->pDrvR0 = pBaseR0 ? pBaseR0->pfnQueryInterface(pBaseR0, PDMINETWORKUP_IID) : NIL_RTR0PTR;
7071
7072 PPDMIBASERC pBaseRC = PDMIBASE_QUERY_INTERFACE(pState->pDrvBase, PDMIBASERC);
7073 pState->pDrvRC = pBaseRC ? pBaseRC->pfnQueryInterface(pBaseRC, PDMINETWORKUP_IID) : NIL_RTR0PTR;
7074 }
7075 }
7076 else if ( rc == VERR_PDM_NO_ATTACHED_DRIVER
7077 || rc == VERR_PDM_CFG_MISSING_DRIVER_NAME)
7078 {
7079 /* This should never happen because this function is not called
7080 * if there is no driver to attach! */
7081 Log(("%s No attached driver!\n", INSTANCE(pState)));
7082 }
7083
7084 /*
7085 * Temporary set the link down if it was up so that the guest
7086 * will know that we have change the configuration of the
7087 * network card
7088 */
7089 if ((STATUS & STATUS_LU) && RT_SUCCESS(rc))
7090 {
7091 STATUS &= ~STATUS_LU;
7092 Phy::setLinkStatus(&pState->phy, false);
7093 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, ICR_LSC);
7094 /* Restore the link back in 5 seconds (default). */
7095 e1kBringLinkUpDelayed(pState);
7096 }
7097
7098 PDMCritSectLeave(&pState->cs);
7099 return rc;
7100
7101}
7102
7103/**
7104 * @copydoc FNPDMDEVPOWEROFF
7105 */
7106static DECLCALLBACK(void) e1kR3PowerOff(PPDMDEVINS pDevIns)
7107{
7108 /* Poke thread waiting for buffer space. */
7109 e1kWakeupReceive(pDevIns);
7110}
7111
7112/**
7113 * @copydoc FNPDMDEVRESET
7114 */
7115static DECLCALLBACK(void) e1kR3Reset(PPDMDEVINS pDevIns)
7116{
7117 E1KSTATE *pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7118#ifdef E1K_TX_DELAY
7119 e1kCancelTimer(pState, pState->CTX_SUFF(pTXDTimer));
7120#endif /* E1K_TX_DELAY */
7121 e1kCancelTimer(pState, pState->CTX_SUFF(pIntTimer));
7122 e1kCancelTimer(pState, pState->CTX_SUFF(pLUTimer));
7123 e1kXmitFreeBuf(pState);
7124 pState->u16TxPktLen = 0;
7125 pState->fIPcsum = false;
7126 pState->fTCPcsum = false;
7127 pState->fIntMaskUsed = false;
7128 pState->fDelayInts = false;
7129 pState->fLocked = false;
7130 pState->u64AckedAt = 0;
7131 e1kHardReset(pState);
7132}
7133
7134/**
7135 * @copydoc FNPDMDEVSUSPEND
7136 */
7137static DECLCALLBACK(void) e1kR3Suspend(PPDMDEVINS pDevIns)
7138{
7139 /* Poke thread waiting for buffer space. */
7140 e1kWakeupReceive(pDevIns);
7141}
7142
7143/**
7144 * Device relocation callback.
7145 *
7146 * When this callback is called the device instance data, and if the
7147 * device have a GC component, is being relocated, or/and the selectors
7148 * have been changed. The device must use the chance to perform the
7149 * necessary pointer relocations and data updates.
7150 *
7151 * Before the GC code is executed the first time, this function will be
7152 * called with a 0 delta so GC pointer calculations can be one in one place.
7153 *
7154 * @param pDevIns Pointer to the device instance.
7155 * @param offDelta The relocation delta relative to the old location.
7156 *
7157 * @remark A relocation CANNOT fail.
7158 */
7159static DECLCALLBACK(void) e1kR3Relocate(PPDMDEVINS pDevIns, RTGCINTPTR offDelta)
7160{
7161 E1KSTATE* pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7162 pState->pDevInsRC = PDMDEVINS_2_RCPTR(pDevIns);
7163 pState->pTxQueueRC = PDMQueueRCPtr(pState->pTxQueueR3);
7164 pState->pCanRxQueueRC = PDMQueueRCPtr(pState->pCanRxQueueR3);
7165#ifdef E1K_USE_RX_TIMERS
7166 pState->pRIDTimerRC = TMTimerRCPtr(pState->pRIDTimerR3);
7167 pState->pRADTimerRC = TMTimerRCPtr(pState->pRADTimerR3);
7168#endif /* E1K_USE_RX_TIMERS */
7169#ifdef E1K_USE_TX_TIMERS
7170 pState->pTIDTimerRC = TMTimerRCPtr(pState->pTIDTimerR3);
7171# ifndef E1K_NO_TAD
7172 pState->pTADTimerRC = TMTimerRCPtr(pState->pTADTimerR3);
7173# endif /* E1K_NO_TAD */
7174#endif /* E1K_USE_TX_TIMERS */
7175#ifdef E1K_TX_DELAY
7176 pState->pTXDTimerRC = TMTimerRCPtr(pState->pTXDTimerR3);
7177#endif /* E1K_TX_DELAY */
7178 pState->pIntTimerRC = TMTimerRCPtr(pState->pIntTimerR3);
7179 pState->pLUTimerRC = TMTimerRCPtr(pState->pLUTimerR3);
7180}
7181
7182/**
7183 * Destruct a device instance.
7184 *
7185 * We need to free non-VM resources only.
7186 *
7187 * @returns VBox status.
7188 * @param pDevIns The device instance data.
7189 * @thread EMT
7190 */
7191static DECLCALLBACK(int) e1kR3Destruct(PPDMDEVINS pDevIns)
7192{
7193 E1KSTATE* pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7194 PDMDEV_CHECK_VERSIONS_RETURN_QUIET(pDevIns);
7195
7196 e1kDumpState(pState);
7197 E1kLog(("%s Destroying instance\n", INSTANCE(pState)));
7198 if (PDMCritSectIsInitialized(&pState->cs))
7199 {
7200 if (pState->hEventMoreRxDescAvail != NIL_RTSEMEVENT)
7201 {
7202 RTSemEventSignal(pState->hEventMoreRxDescAvail);
7203 RTSemEventDestroy(pState->hEventMoreRxDescAvail);
7204 pState->hEventMoreRxDescAvail = NIL_RTSEMEVENT;
7205 }
7206#ifdef E1K_WITH_TX_CS
7207 PDMR3CritSectDelete(&pState->csTx);
7208#endif /* E1K_WITH_TX_CS */
7209 PDMR3CritSectDelete(&pState->csRx);
7210 PDMR3CritSectDelete(&pState->cs);
7211 }
7212 return VINF_SUCCESS;
7213}
7214
7215
7216/**
7217 * Set PCI configuration space registers.
7218 *
7219 * @param pci Reference to PCI device structure.
7220 * @thread EMT
7221 */
7222static DECLCALLBACK(void) e1kConfigurePciDev(PPCIDEVICE pPciDev, E1KCHIP eChip)
7223{
7224 Assert(eChip < RT_ELEMENTS(g_Chips));
7225 /* Configure PCI Device, assume 32-bit mode ******************************/
7226 PCIDevSetVendorId(pPciDev, g_Chips[eChip].uPCIVendorId);
7227 PCIDevSetDeviceId(pPciDev, g_Chips[eChip].uPCIDeviceId);
7228 PCIDevSetWord( pPciDev, VBOX_PCI_SUBSYSTEM_VENDOR_ID, g_Chips[eChip].uPCISubsystemVendorId);
7229 PCIDevSetWord( pPciDev, VBOX_PCI_SUBSYSTEM_ID, g_Chips[eChip].uPCISubsystemId);
7230
7231 PCIDevSetWord( pPciDev, VBOX_PCI_COMMAND, 0x0000);
7232 /* DEVSEL Timing (medium device), 66 MHz Capable, New capabilities */
7233 PCIDevSetWord( pPciDev, VBOX_PCI_STATUS,
7234 VBOX_PCI_STATUS_DEVSEL_MEDIUM | VBOX_PCI_STATUS_CAP_LIST | VBOX_PCI_STATUS_66MHZ);
7235 /* Stepping A2 */
7236 PCIDevSetByte( pPciDev, VBOX_PCI_REVISION_ID, 0x02);
7237 /* Ethernet adapter */
7238 PCIDevSetByte( pPciDev, VBOX_PCI_CLASS_PROG, 0x00);
7239 PCIDevSetWord( pPciDev, VBOX_PCI_CLASS_DEVICE, 0x0200);
7240 /* normal single function Ethernet controller */
7241 PCIDevSetByte( pPciDev, VBOX_PCI_HEADER_TYPE, 0x00);
7242 /* Memory Register Base Address */
7243 PCIDevSetDWord(pPciDev, VBOX_PCI_BASE_ADDRESS_0, 0x00000000);
7244 /* Memory Flash Base Address */
7245 PCIDevSetDWord(pPciDev, VBOX_PCI_BASE_ADDRESS_1, 0x00000000);
7246 /* IO Register Base Address */
7247 PCIDevSetDWord(pPciDev, VBOX_PCI_BASE_ADDRESS_2, 0x00000001);
7248 /* Expansion ROM Base Address */
7249 PCIDevSetDWord(pPciDev, VBOX_PCI_ROM_ADDRESS, 0x00000000);
7250 /* Capabilities Pointer */
7251 PCIDevSetByte( pPciDev, VBOX_PCI_CAPABILITY_LIST, 0xDC);
7252 /* Interrupt Pin: INTA# */
7253 PCIDevSetByte( pPciDev, VBOX_PCI_INTERRUPT_PIN, 0x01);
7254 /* Max_Lat/Min_Gnt: very high priority and time slice */
7255 PCIDevSetByte( pPciDev, VBOX_PCI_MIN_GNT, 0xFF);
7256 PCIDevSetByte( pPciDev, VBOX_PCI_MAX_LAT, 0x00);
7257
7258 /* PCI Power Management Registers ****************************************/
7259 /* Capability ID: PCI Power Management Registers */
7260 PCIDevSetByte( pPciDev, 0xDC, VBOX_PCI_CAP_ID_PM);
7261 /* Next Item Pointer: PCI-X */
7262 PCIDevSetByte( pPciDev, 0xDC + 1, 0xE4);
7263 /* Power Management Capabilities: PM disabled, DSI */
7264 PCIDevSetWord( pPciDev, 0xDC + 2,
7265 0x0002 | VBOX_PCI_PM_CAP_DSI);
7266 /* Power Management Control / Status Register: PM disabled */
7267 PCIDevSetWord( pPciDev, 0xDC + 4, 0x0000);
7268 /* PMCSR_BSE Bridge Support Extensions: Not supported */
7269 PCIDevSetByte( pPciDev, 0xDC + 6, 0x00);
7270 /* Data Register: PM disabled, always 0 */
7271 PCIDevSetByte( pPciDev, 0xDC + 7, 0x00);
7272
7273 /* PCI-X Configuration Registers *****************************************/
7274 /* Capability ID: PCI-X Configuration Registers */
7275 PCIDevSetByte( pPciDev, 0xE4, VBOX_PCI_CAP_ID_PCIX);
7276#ifdef E1K_WITH_MSI
7277 PCIDevSetByte( pPciDev, 0xE4 + 1, 0x80);
7278#else
7279 /* Next Item Pointer: None (Message Signalled Interrupts are disabled) */
7280 PCIDevSetByte( pPciDev, 0xE4 + 1, 0x00);
7281#endif
7282 /* PCI-X Command: Enable Relaxed Ordering */
7283 PCIDevSetWord( pPciDev, 0xE4 + 2, VBOX_PCI_X_CMD_ERO);
7284 /* PCI-X Status: 32-bit, 66MHz*/
7285 /** @todo is this value really correct? fff8 doesn't look like actual PCI address */
7286 PCIDevSetDWord(pPciDev, 0xE4 + 4, 0x0040FFF8);
7287}
7288
7289/**
7290 * @interface_method_impl{PDMDEVREG,pfnConstruct}
7291 */
7292static DECLCALLBACK(int) e1kR3Construct(PPDMDEVINS pDevIns, int iInstance, PCFGMNODE pCfg)
7293{
7294 E1KSTATE *pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7295 int rc;
7296 PDMDEV_CHECK_VERSIONS_RETURN(pDevIns);
7297
7298 /*
7299 * Initialize the instance data (state).
7300 * Note! Caller has initialized it to ZERO already.
7301 */
7302 RTStrPrintf(pThis->szInstance, sizeof(pThis->szInstance), "E1000#%d", iInstance);
7303 E1kLog(("%s Constructing new instance sizeof(E1KRXDESC)=%d\n", INSTANCE(pThis), sizeof(E1KRXDESC)));
7304 pThis->hEventMoreRxDescAvail = NIL_RTSEMEVENT;
7305 pThis->pDevInsR3 = pDevIns;
7306 pThis->pDevInsR0 = PDMDEVINS_2_R0PTR(pDevIns);
7307 pThis->pDevInsRC = PDMDEVINS_2_RCPTR(pDevIns);
7308 pThis->u16TxPktLen = 0;
7309 pThis->fIPcsum = false;
7310 pThis->fTCPcsum = false;
7311 pThis->fIntMaskUsed = false;
7312 pThis->fDelayInts = false;
7313 pThis->fLocked = false;
7314 pThis->u64AckedAt = 0;
7315 pThis->led.u32Magic = PDMLED_MAGIC;
7316 pThis->u32PktNo = 1;
7317
7318 /* Interfaces */
7319 pThis->IBase.pfnQueryInterface = e1kQueryInterface;
7320
7321 pThis->INetworkDown.pfnWaitReceiveAvail = e1kNetworkDown_WaitReceiveAvail;
7322 pThis->INetworkDown.pfnReceive = e1kNetworkDown_Receive;
7323 pThis->INetworkDown.pfnXmitPending = e1kNetworkDown_XmitPending;
7324
7325 pThis->ILeds.pfnQueryStatusLed = e1kQueryStatusLed;
7326
7327 pThis->INetworkConfig.pfnGetMac = e1kGetMac;
7328 pThis->INetworkConfig.pfnGetLinkState = e1kGetLinkState;
7329 pThis->INetworkConfig.pfnSetLinkState = e1kSetLinkState;
7330
7331 /*
7332 * Validate configuration.
7333 */
7334 if (!CFGMR3AreValuesValid(pCfg, "MAC\0" "CableConnected\0" "AdapterType\0"
7335 "LineSpeed\0" "GCEnabled\0" "R0Enabled\0"
7336 "EthernetCRC\0" "GSOEnabled\0" "LinkUpDelay\0"))
7337 return PDMDEV_SET_ERROR(pDevIns, VERR_PDM_DEVINS_UNKNOWN_CFG_VALUES,
7338 N_("Invalid configuration for E1000 device"));
7339
7340 /** @todo: LineSpeed unused! */
7341
7342 pThis->fR0Enabled = true;
7343 pThis->fGCEnabled = true;
7344 pThis->fEthernetCRC = true;
7345 pThis->fGSOEnabled = true;
7346
7347 /* Get config params */
7348 rc = CFGMR3QueryBytes(pCfg, "MAC", pThis->macConfigured.au8,
7349 sizeof(pThis->macConfigured.au8));
7350 if (RT_FAILURE(rc))
7351 return PDMDEV_SET_ERROR(pDevIns, rc,
7352 N_("Configuration error: Failed to get MAC address"));
7353 rc = CFGMR3QueryBool(pCfg, "CableConnected", &pThis->fCableConnected);
7354 if (RT_FAILURE(rc))
7355 return PDMDEV_SET_ERROR(pDevIns, rc,
7356 N_("Configuration error: Failed to get the value of 'CableConnected'"));
7357 rc = CFGMR3QueryU32(pCfg, "AdapterType", (uint32_t*)&pThis->eChip);
7358 if (RT_FAILURE(rc))
7359 return PDMDEV_SET_ERROR(pDevIns, rc,
7360 N_("Configuration error: Failed to get the value of 'AdapterType'"));
7361 Assert(pThis->eChip <= E1K_CHIP_82545EM);
7362 rc = CFGMR3QueryBoolDef(pCfg, "GCEnabled", &pThis->fGCEnabled, true);
7363 if (RT_FAILURE(rc))
7364 return PDMDEV_SET_ERROR(pDevIns, rc,
7365 N_("Configuration error: Failed to get the value of 'GCEnabled'"));
7366
7367 rc = CFGMR3QueryBoolDef(pCfg, "R0Enabled", &pThis->fR0Enabled, true);
7368 if (RT_FAILURE(rc))
7369 return PDMDEV_SET_ERROR(pDevIns, rc,
7370 N_("Configuration error: Failed to get the value of 'R0Enabled'"));
7371
7372 rc = CFGMR3QueryBoolDef(pCfg, "EthernetCRC", &pThis->fEthernetCRC, true);
7373 if (RT_FAILURE(rc))
7374 return PDMDEV_SET_ERROR(pDevIns, rc,
7375 N_("Configuration error: Failed to get the value of 'EthernetCRC'"));
7376
7377 rc = CFGMR3QueryBoolDef(pCfg, "GSOEnabled", &pThis->fGSOEnabled, true);
7378 if (RT_FAILURE(rc))
7379 return PDMDEV_SET_ERROR(pDevIns, rc,
7380 N_("Configuration error: Failed to get the value of 'GSOEnabled'"));
7381
7382 rc = CFGMR3QueryU32Def(pCfg, "LinkUpDelay", (uint32_t*)&pThis->cMsLinkUpDelay, 5000); /* ms */
7383 if (RT_FAILURE(rc))
7384 return PDMDEV_SET_ERROR(pDevIns, rc,
7385 N_("Configuration error: Failed to get the value of 'LinkUpDelay'"));
7386 Assert(pThis->cMsLinkUpDelay <= 300000); /* less than 5 minutes */
7387 if (pThis->cMsLinkUpDelay > 5000)
7388 LogRel(("%s WARNING! Link up delay is set to %u seconds!\n", INSTANCE(pThis), pThis->cMsLinkUpDelay / 1000));
7389 else if (pThis->cMsLinkUpDelay == 0)
7390 LogRel(("%s WARNING! Link up delay is disabled!\n", INSTANCE(pThis)));
7391
7392 E1kLog(("%s Chip=%s LinkUpDelay=%ums EthernetCRC=%s GSO=%s\n", INSTANCE(pThis),
7393 g_Chips[pThis->eChip].pcszName, pThis->cMsLinkUpDelay,
7394 pThis->fEthernetCRC ? "on" : "off",
7395 pThis->fGSOEnabled ? "enabled" : "disabled"));
7396
7397 /* Initialize the EEPROM */
7398 pThis->eeprom.init(pThis->macConfigured);
7399
7400 /* Initialize internal PHY */
7401 Phy::init(&pThis->phy, iInstance, pThis->eChip == E1K_CHIP_82543GC ? PHY_EPID_M881000 : PHY_EPID_M881011);
7402 Phy::setLinkStatus(&pThis->phy, pThis->fCableConnected);
7403
7404 /* Initialize critical sections. We do our own locking. */
7405 rc = PDMDevHlpSetDeviceCritSect(pDevIns, PDMDevHlpCritSectGetNop(pDevIns));
7406 AssertRCReturn(rc, rc);
7407
7408 rc = PDMDevHlpCritSectInit(pDevIns, &pThis->cs, RT_SRC_POS, "%s", pThis->szInstance);
7409 if (RT_FAILURE(rc))
7410 return rc;
7411 rc = PDMDevHlpCritSectInit(pDevIns, &pThis->csRx, RT_SRC_POS, "%sRX", pThis->szInstance);
7412 if (RT_FAILURE(rc))
7413 return rc;
7414#ifdef E1K_WITH_TX_CS
7415 rc = PDMDevHlpCritSectInit(pDevIns, &pThis->csTx, RT_SRC_POS, "%sTX", pThis->szInstance);
7416 if (RT_FAILURE(rc))
7417 return rc;
7418#endif /* E1K_WITH_TX_CS */
7419
7420 /* Saved state registration. */
7421 rc = PDMDevHlpSSMRegisterEx(pDevIns, E1K_SAVEDSTATE_VERSION, sizeof(E1KSTATE), NULL,
7422 NULL, e1kLiveExec, NULL,
7423 e1kSavePrep, e1kSaveExec, NULL,
7424 e1kLoadPrep, e1kLoadExec, e1kLoadDone);
7425 if (RT_FAILURE(rc))
7426 return rc;
7427
7428 /* Set PCI config registers and register ourselves with the PCI bus. */
7429 e1kConfigurePciDev(&pThis->pciDevice, pThis->eChip);
7430 rc = PDMDevHlpPCIRegister(pDevIns, &pThis->pciDevice);
7431 if (RT_FAILURE(rc))
7432 return rc;
7433
7434#ifdef E1K_WITH_MSI
7435 PDMMSIREG aMsiReg;
7436 aMsiReg.cMsiVectors = 1;
7437 aMsiReg.iMsiCapOffset = 0x80;
7438 aMsiReg.iMsiNextOffset = 0x0;
7439 aMsiReg.fMsi64bit = false;
7440 rc = PDMDevHlpPCIRegisterMsi(pDevIns, &aMsiReg);
7441 AssertRC(rc);
7442 if (RT_FAILURE (rc))
7443 return rc;
7444#endif
7445
7446
7447 /* Map our registers to memory space (region 0, see e1kConfigurePCI)*/
7448 rc = PDMDevHlpPCIIORegionRegister(pDevIns, 0, E1K_MM_SIZE, PCI_ADDRESS_SPACE_MEM, e1kMap);
7449 if (RT_FAILURE(rc))
7450 return rc;
7451 /* Map our registers to IO space (region 2, see e1kConfigurePCI) */
7452 rc = PDMDevHlpPCIIORegionRegister(pDevIns, 2, E1K_IOPORT_SIZE, PCI_ADDRESS_SPACE_IO, e1kMap);
7453 if (RT_FAILURE(rc))
7454 return rc;
7455
7456 /* Create transmit queue */
7457 rc = PDMDevHlpQueueCreate(pDevIns, sizeof(PDMQUEUEITEMCORE), 1, 0,
7458 e1kTxQueueConsumer, true, "E1000-Xmit", &pThis->pTxQueueR3);
7459 if (RT_FAILURE(rc))
7460 return rc;
7461 pThis->pTxQueueR0 = PDMQueueR0Ptr(pThis->pTxQueueR3);
7462 pThis->pTxQueueRC = PDMQueueRCPtr(pThis->pTxQueueR3);
7463
7464 /* Create the RX notifier signaller. */
7465 rc = PDMDevHlpQueueCreate(pDevIns, sizeof(PDMQUEUEITEMCORE), 1, 0,
7466 e1kCanRxQueueConsumer, true, "E1000-Rcv", &pThis->pCanRxQueueR3);
7467 if (RT_FAILURE(rc))
7468 return rc;
7469 pThis->pCanRxQueueR0 = PDMQueueR0Ptr(pThis->pCanRxQueueR3);
7470 pThis->pCanRxQueueRC = PDMQueueRCPtr(pThis->pCanRxQueueR3);
7471
7472#ifdef E1K_TX_DELAY
7473 /* Create Transmit Delay Timer */
7474 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kTxDelayTimer, pThis,
7475 TMTIMER_FLAGS_NO_CRIT_SECT,
7476 "E1000 Transmit Delay Timer", &pThis->pTXDTimerR3);
7477 if (RT_FAILURE(rc))
7478 return rc;
7479 pThis->pTXDTimerR0 = TMTimerR0Ptr(pThis->pTXDTimerR3);
7480 pThis->pTXDTimerRC = TMTimerRCPtr(pThis->pTXDTimerR3);
7481 TMR3TimerSetCritSect(pThis->pTXDTimerR3, &pThis->csTx);
7482#endif /* E1K_TX_DELAY */
7483
7484#ifdef E1K_USE_TX_TIMERS
7485 /* Create Transmit Interrupt Delay Timer */
7486 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kTxIntDelayTimer, pThis,
7487 TMTIMER_FLAGS_NO_CRIT_SECT,
7488 "E1000 Transmit Interrupt Delay Timer", &pThis->pTIDTimerR3);
7489 if (RT_FAILURE(rc))
7490 return rc;
7491 pThis->pTIDTimerR0 = TMTimerR0Ptr(pThis->pTIDTimerR3);
7492 pThis->pTIDTimerRC = TMTimerRCPtr(pThis->pTIDTimerR3);
7493
7494# ifndef E1K_NO_TAD
7495 /* Create Transmit Absolute Delay Timer */
7496 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kTxAbsDelayTimer, pThis,
7497 TMTIMER_FLAGS_NO_CRIT_SECT,
7498 "E1000 Transmit Absolute Delay Timer", &pThis->pTADTimerR3);
7499 if (RT_FAILURE(rc))
7500 return rc;
7501 pThis->pTADTimerR0 = TMTimerR0Ptr(pThis->pTADTimerR3);
7502 pThis->pTADTimerRC = TMTimerRCPtr(pThis->pTADTimerR3);
7503# endif /* E1K_NO_TAD */
7504#endif /* E1K_USE_TX_TIMERS */
7505
7506#ifdef E1K_USE_RX_TIMERS
7507 /* Create Receive Interrupt Delay Timer */
7508 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kRxIntDelayTimer, pThis,
7509 TMTIMER_FLAGS_NO_CRIT_SECT,
7510 "E1000 Receive Interrupt Delay Timer", &pThis->pRIDTimerR3);
7511 if (RT_FAILURE(rc))
7512 return rc;
7513 pThis->pRIDTimerR0 = TMTimerR0Ptr(pThis->pRIDTimerR3);
7514 pThis->pRIDTimerRC = TMTimerRCPtr(pThis->pRIDTimerR3);
7515
7516 /* Create Receive Absolute Delay Timer */
7517 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kRxAbsDelayTimer, pThis,
7518 TMTIMER_FLAGS_NO_CRIT_SECT,
7519 "E1000 Receive Absolute Delay Timer", &pThis->pRADTimerR3);
7520 if (RT_FAILURE(rc))
7521 return rc;
7522 pThis->pRADTimerR0 = TMTimerR0Ptr(pThis->pRADTimerR3);
7523 pThis->pRADTimerRC = TMTimerRCPtr(pThis->pRADTimerR3);
7524#endif /* E1K_USE_RX_TIMERS */
7525
7526 /* Create Late Interrupt Timer */
7527 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kLateIntTimer, pThis,
7528 TMTIMER_FLAGS_NO_CRIT_SECT,
7529 "E1000 Late Interrupt Timer", &pThis->pIntTimerR3);
7530 if (RT_FAILURE(rc))
7531 return rc;
7532 pThis->pIntTimerR0 = TMTimerR0Ptr(pThis->pIntTimerR3);
7533 pThis->pIntTimerRC = TMTimerRCPtr(pThis->pIntTimerR3);
7534
7535 /* Create Link Up Timer */
7536 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kLinkUpTimer, pThis,
7537 TMTIMER_FLAGS_NO_CRIT_SECT,
7538 "E1000 Link Up Timer", &pThis->pLUTimerR3);
7539 if (RT_FAILURE(rc))
7540 return rc;
7541 pThis->pLUTimerR0 = TMTimerR0Ptr(pThis->pLUTimerR3);
7542 pThis->pLUTimerRC = TMTimerRCPtr(pThis->pLUTimerR3);
7543
7544 /* Register the info item */
7545 char szTmp[20];
7546 RTStrPrintf(szTmp, sizeof(szTmp), "e1k%d", iInstance);
7547 PDMDevHlpDBGFInfoRegister(pDevIns, szTmp, "E1000 info.", e1kInfo);
7548
7549 /* Status driver */
7550 PPDMIBASE pBase;
7551 rc = PDMDevHlpDriverAttach(pDevIns, PDM_STATUS_LUN, &pThis->IBase, &pBase, "Status Port");
7552 if (RT_FAILURE(rc))
7553 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to attach the status LUN"));
7554 pThis->pLedsConnector = PDMIBASE_QUERY_INTERFACE(pBase, PDMILEDCONNECTORS);
7555
7556 /* Network driver */
7557 rc = PDMDevHlpDriverAttach(pDevIns, 0, &pThis->IBase, &pThis->pDrvBase, "Network Port");
7558 if (RT_SUCCESS(rc))
7559 {
7560 if (rc == VINF_NAT_DNS)
7561 PDMDevHlpVMSetRuntimeError(pDevIns, 0 /*fFlags*/, "NoDNSforNAT",
7562 N_("A Domain Name Server (DNS) for NAT networking could not be determined. Ensure that your host is correctly connected to an ISP. If you ignore this warning the guest will not be able to perform nameserver lookups and it will probably observe delays if trying so"));
7563 pThis->pDrvR3 = PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMINETWORKUP);
7564 AssertMsgReturn(pThis->pDrvR3, ("Failed to obtain the PDMINETWORKUP interface!\n"), VERR_PDM_MISSING_INTERFACE_BELOW);
7565
7566 pThis->pDrvR0 = PDMIBASER0_QUERY_INTERFACE(PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMIBASER0), PDMINETWORKUP);
7567 pThis->pDrvRC = PDMIBASERC_QUERY_INTERFACE(PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMIBASERC), PDMINETWORKUP);
7568 }
7569 else if ( rc == VERR_PDM_NO_ATTACHED_DRIVER
7570 || rc == VERR_PDM_CFG_MISSING_DRIVER_NAME)
7571 {
7572 /* No error! */
7573 E1kLog(("%s This adapter is not attached to any network!\n", INSTANCE(pThis)));
7574 }
7575 else
7576 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to attach the network LUN"));
7577
7578 rc = RTSemEventCreate(&pThis->hEventMoreRxDescAvail);
7579 if (RT_FAILURE(rc))
7580 return rc;
7581
7582 rc = e1kInitDebugHelpers();
7583 if (RT_FAILURE(rc))
7584 return rc;
7585
7586 e1kHardReset(pThis);
7587
7588#if defined(VBOX_WITH_STATISTICS)
7589 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMMIOReadRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO reads in RZ", "/Devices/E1k%d/MMIO/ReadRZ", iInstance);
7590 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMMIOReadR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO reads in R3", "/Devices/E1k%d/MMIO/ReadR3", iInstance);
7591 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMMIOWriteRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO writes in RZ", "/Devices/E1k%d/MMIO/WriteRZ", iInstance);
7592 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMMIOWriteR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO writes in R3", "/Devices/E1k%d/MMIO/WriteR3", iInstance);
7593 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatEEPROMRead, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling EEPROM reads", "/Devices/E1k%d/EEPROM/Read", iInstance);
7594 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatEEPROMWrite, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling EEPROM writes", "/Devices/E1k%d/EEPROM/Write", iInstance);
7595 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIOReadRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in RZ", "/Devices/E1k%d/IO/ReadRZ", iInstance);
7596 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIOReadR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in R3", "/Devices/E1k%d/IO/ReadR3", iInstance);
7597 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIOWriteRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in RZ", "/Devices/E1k%d/IO/WriteRZ", iInstance);
7598 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIOWriteR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in R3", "/Devices/E1k%d/IO/WriteR3", iInstance);
7599 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatLateIntTimer, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling late int timer", "/Devices/E1k%d/LateInt/Timer", iInstance);
7600 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatLateInts, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of late interrupts", "/Devices/E1k%d/LateInt/Occured", iInstance);
7601 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIntsRaised, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of raised interrupts", "/Devices/E1k%d/Interrupts/Raised", iInstance);
7602 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIntsPrevented, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of prevented interrupts", "/Devices/E1k%d/Interrupts/Prevented", iInstance);
7603 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceive, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive", "/Devices/E1k%d/Receive/Total", iInstance);
7604 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveCRC, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive checksumming", "/Devices/E1k%d/Receive/CRC", iInstance);
7605 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveFilter, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive filtering", "/Devices/E1k%d/Receive/Filter", iInstance);
7606 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveStore, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive storing", "/Devices/E1k%d/Receive/Store", iInstance);
7607 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatRxOverflow, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_OCCURENCE, "Profiling RX overflows", "/Devices/E1k%d/RxOverflow", iInstance);
7608 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatRxOverflowWakeup, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Nr of RX overflow wakeups", "/Devices/E1k%d/RxOverflowWakeup", iInstance);
7609#endif /* VBOX_WITH_STATISTICS */
7610 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data received", "/Devices/E1k%d/ReceiveBytes", iInstance);
7611#if defined(VBOX_WITH_STATISTICS)
7612 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling transmits in RZ", "/Devices/E1k%d/Transmit/TotalRZ", iInstance);
7613 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling transmits in R3", "/Devices/E1k%d/Transmit/TotalR3", iInstance);
7614#endif /* VBOX_WITH_STATISTICS */
7615 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data transmitted", "/Devices/E1k%d/TransmitBytes", iInstance);
7616#if defined(VBOX_WITH_STATISTICS)
7617 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitSendRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling send transmit in RZ", "/Devices/E1k%d/Transmit/SendRZ", iInstance);
7618 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitSendR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling send transmit in R3", "/Devices/E1k%d/Transmit/SendR3", iInstance);
7619
7620 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescCtxNormal, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of normal context descriptors","/Devices/E1k%d/TxDesc/ContexNormal", iInstance);
7621 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescCtxTSE, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TSE context descriptors", "/Devices/E1k%d/TxDesc/ContextTSE", iInstance);
7622 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescData, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TX data descriptors", "/Devices/E1k%d/TxDesc/Data", iInstance);
7623 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescLegacy, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TX legacy descriptors", "/Devices/E1k%d/TxDesc/Legacy", iInstance);
7624 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescTSEData, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TX TSE data descriptors", "/Devices/E1k%d/TxDesc/TSEData", iInstance);
7625 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxPathFallback, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Fallback TSE descriptor path", "/Devices/E1k%d/TxPath/Fallback", iInstance);
7626 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxPathGSO, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "GSO TSE descriptor path", "/Devices/E1k%d/TxPath/GSO", iInstance);
7627 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxPathRegular, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Regular descriptor path", "/Devices/E1k%d/TxPath/Normal", iInstance);
7628 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatPHYAccesses, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of PHY accesses", "/Devices/E1k%d/PHYAccesses", iInstance);
7629#endif /* VBOX_WITH_STATISTICS */
7630
7631#ifdef E1K_INT_STATS
7632 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->u64ArmedAt, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "u64ArmedAt", "/Devices/E1k%d/u64ArmedAt", iInstance);
7633 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatMaxTxDelay, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatMaxTxDelay", "/Devices/E1k%d/uStatMaxTxDelay", iInstance);
7634 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatInt, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatInt", "/Devices/E1k%d/uStatInt", iInstance);
7635 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntTry, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntTry", "/Devices/E1k%d/uStatIntTry", iInstance);
7636 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntLower, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntLower", "/Devices/E1k%d/uStatIntLower", iInstance);
7637 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntDly, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntDly", "/Devices/E1k%d/uStatIntDly", iInstance);
7638 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->iStatIntLost, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "iStatIntLost", "/Devices/E1k%d/iStatIntLost", iInstance);
7639 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->iStatIntLostOne, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "iStatIntLostOne", "/Devices/E1k%d/iStatIntLostOne", iInstance);
7640 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatDisDly, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatDisDly", "/Devices/E1k%d/uStatDisDly", iInstance);
7641 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntSkip, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntSkip", "/Devices/E1k%d/uStatIntSkip", iInstance);
7642 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntLate, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntLate", "/Devices/E1k%d/uStatIntLate", iInstance);
7643 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntMasked, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntMasked", "/Devices/E1k%d/uStatIntMasked", iInstance);
7644 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntEarly, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntEarly", "/Devices/E1k%d/uStatIntEarly", iInstance);
7645 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntRx, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntRx", "/Devices/E1k%d/uStatIntRx", iInstance);
7646 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntTx, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntTx", "/Devices/E1k%d/uStatIntTx", iInstance);
7647 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntICS, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntICS", "/Devices/E1k%d/uStatIntICS", iInstance);
7648 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntRDTR, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntRDTR", "/Devices/E1k%d/uStatIntRDTR", iInstance);
7649 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntRXDMT0, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntRXDMT0", "/Devices/E1k%d/uStatIntRXDMT0", iInstance);
7650 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntTXQE, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntTXQE", "/Devices/E1k%d/uStatIntTXQE", iInstance);
7651 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxNoRS, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxNoRS", "/Devices/E1k%d/uStatTxNoRS", iInstance);
7652 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxIDE, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxIDE", "/Devices/E1k%d/uStatTxIDE", iInstance);
7653 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxDelayed, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxDelayed", "/Devices/E1k%d/uStatTxDelayed", iInstance);
7654 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxDelayExp, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxDelayExp", "/Devices/E1k%d/uStatTxDelayExp", iInstance);
7655 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTAD, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTAD", "/Devices/E1k%d/uStatTAD", iInstance);
7656 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTID, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTID", "/Devices/E1k%d/uStatTID", iInstance);
7657 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatRAD, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatRAD", "/Devices/E1k%d/uStatRAD", iInstance);
7658 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatRID, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatRID", "/Devices/E1k%d/uStatRID", iInstance);
7659 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatRxFrm, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatRxFrm", "/Devices/E1k%d/uStatRxFrm", iInstance);
7660 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxFrm, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxFrm", "/Devices/E1k%d/uStatTxFrm", iInstance);
7661 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatDescCtx, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatDescCtx", "/Devices/E1k%d/uStatDescCtx", iInstance);
7662 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatDescDat, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatDescDat", "/Devices/E1k%d/uStatDescDat", iInstance);
7663 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatDescLeg, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatDescLeg", "/Devices/E1k%d/uStatDescLeg", iInstance);
7664 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx1514, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx1514", "/Devices/E1k%d/uStatTx1514", iInstance);
7665 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx2962, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx2962", "/Devices/E1k%d/uStatTx2962", iInstance);
7666 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx4410, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx4410", "/Devices/E1k%d/uStatTx4410", iInstance);
7667 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx5858, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx5858", "/Devices/E1k%d/uStatTx5858", iInstance);
7668 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx7306, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx7306", "/Devices/E1k%d/uStatTx7306", iInstance);
7669 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx8754, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx8754", "/Devices/E1k%d/uStatTx8754", iInstance);
7670 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx16384, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx16384", "/Devices/E1k%d/uStatTx16384", iInstance);
7671 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx32768, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx32768", "/Devices/E1k%d/uStatTx32768", iInstance);
7672 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxLarge, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxLarge", "/Devices/E1k%d/uStatTxLarge", iInstance);
7673#endif /* E1K_INT_STATS */
7674
7675 return VINF_SUCCESS;
7676}
7677
7678/**
7679 * The device registration structure.
7680 */
7681const PDMDEVREG g_DeviceE1000 =
7682{
7683 /* Structure version. PDM_DEVREG_VERSION defines the current version. */
7684 PDM_DEVREG_VERSION,
7685 /* Device name. */
7686 "e1000",
7687 /* Name of guest context module (no path).
7688 * Only evalutated if PDM_DEVREG_FLAGS_RC is set. */
7689 "VBoxDDGC.gc",
7690 /* Name of ring-0 module (no path).
7691 * Only evalutated if PDM_DEVREG_FLAGS_RC is set. */
7692 "VBoxDDR0.r0",
7693 /* The description of the device. The UTF-8 string pointed to shall, like this structure,
7694 * remain unchanged from registration till VM destruction. */
7695 "Intel PRO/1000 MT Desktop Ethernet.\n",
7696
7697 /* Flags, combination of the PDM_DEVREG_FLAGS_* \#defines. */
7698 PDM_DEVREG_FLAGS_DEFAULT_BITS | PDM_DEVREG_FLAGS_RC | PDM_DEVREG_FLAGS_R0,
7699 /* Device class(es), combination of the PDM_DEVREG_CLASS_* \#defines. */
7700 PDM_DEVREG_CLASS_NETWORK,
7701 /* Maximum number of instances (per VM). */
7702 ~0U,
7703 /* Size of the instance data. */
7704 sizeof(E1KSTATE),
7705
7706 /* Construct instance - required. */
7707 e1kR3Construct,
7708 /* Destruct instance - optional. */
7709 e1kR3Destruct,
7710 /* Relocation command - optional. */
7711 e1kR3Relocate,
7712 /* I/O Control interface - optional. */
7713 NULL,
7714 /* Power on notification - optional. */
7715 NULL,
7716 /* Reset notification - optional. */
7717 e1kR3Reset,
7718 /* Suspend notification - optional. */
7719 e1kR3Suspend,
7720 /* Resume notification - optional. */
7721 NULL,
7722 /* Attach command - optional. */
7723 e1kR3Attach,
7724 /* Detach notification - optional. */
7725 e1kR3Detach,
7726 /* Query a LUN base interface - optional. */
7727 NULL,
7728 /* Init complete notification - optional. */
7729 NULL,
7730 /* Power off notification - optional. */
7731 e1kR3PowerOff,
7732 /* pfnSoftReset */
7733 NULL,
7734 /* u32VersionEnd */
7735 PDM_DEVREG_VERSION
7736};
7737
7738#endif /* IN_RING3 */
7739#endif /* !VBOX_DEVICE_STRUCT_TESTCASE */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette