VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/DevE1000.cpp@ 73353

Last change on this file since 73353 was 71788, checked in by vboxsync, 7 years ago

DevVirtioNet: A couple of todos and paranoia.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 326.0 KB
Line 
1/* $Id: DevE1000.cpp 71788 2018-04-09 16:06:34Z vboxsync $ */
2/** @file
3 * DevE1000 - Intel 82540EM Ethernet Controller Emulation.
4 *
5 * Implemented in accordance with the specification:
6 *
7 * PCI/PCI-X Family of Gigabit Ethernet Controllers Software Developer's Manual
8 * 82540EP/EM, 82541xx, 82544GC/EI, 82545GM/EM, 82546GB/EB, and 82547xx
9 *
10 * 317453-002 Revision 3.5
11 *
12 * @todo IPv6 checksum offloading support
13 * @todo Flexible Filter / Wakeup (optional?)
14 */
15
16/*
17 * Copyright (C) 2007-2017 Oracle Corporation
18 *
19 * This file is part of VirtualBox Open Source Edition (OSE), as
20 * available from http://www.virtualbox.org. This file is free software;
21 * you can redistribute it and/or modify it under the terms of the GNU
22 * General Public License (GPL) as published by the Free Software
23 * Foundation, in version 2 as it comes in the "COPYING" file of the
24 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
25 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_DEV_E1000
33#include <iprt/crc.h>
34#include <iprt/ctype.h>
35#include <iprt/net.h>
36#include <iprt/semaphore.h>
37#include <iprt/string.h>
38#include <iprt/time.h>
39#include <iprt/uuid.h>
40#include <VBox/vmm/pdmdev.h>
41#include <VBox/vmm/pdmnetifs.h>
42#include <VBox/vmm/pdmnetinline.h>
43#include <VBox/param.h>
44#include "VBoxDD.h"
45
46#include "DevEEPROM.h"
47#include "DevE1000Phy.h"
48
49
50/*********************************************************************************************************************************
51* Defined Constants And Macros *
52*********************************************************************************************************************************/
53/** @name E1000 Build Options
54 * @{ */
55/** @def E1K_INIT_RA0
56 * E1K_INIT_RA0 forces E1000 to set the first entry in Receive Address filter
57 * table to MAC address obtained from CFGM. Most guests read MAC address from
58 * EEPROM and write it to RA[0] explicitly, but Mac OS X seems to depend on it
59 * being already set (see @bugref{4657}).
60 */
61#define E1K_INIT_RA0
62/** @def E1K_LSC_ON_RESET
63 * E1K_LSC_ON_RESET causes e1000 to generate Link Status Change
64 * interrupt after hard reset. This makes the E1K_LSC_ON_SLU option unnecessary.
65 * With unplugged cable, LSC is triggerred for 82543GC only.
66 */
67#define E1K_LSC_ON_RESET
68/** @def E1K_LSC_ON_SLU
69 * E1K_LSC_ON_SLU causes E1000 to generate Link Status Change interrupt when
70 * the guest driver brings up the link via STATUS.LU bit. Again the only guest
71 * that requires it is Mac OS X (see @bugref{4657}).
72 */
73//#define E1K_LSC_ON_SLU
74/** @def E1K_INIT_LINKUP_DELAY
75 * E1K_INIT_LINKUP_DELAY prevents the link going up while the driver is still
76 * in init (see @bugref{8624}).
77 */
78#define E1K_INIT_LINKUP_DELAY_US (2000 * 1000)
79/** @def E1K_IMS_INT_DELAY_NS
80 * E1K_IMS_INT_DELAY_NS prevents interrupt storms in Windows guests on enabling
81 * interrupts (see @bugref{8624}).
82 */
83#define E1K_IMS_INT_DELAY_NS 100
84/** @def E1K_TX_DELAY
85 * E1K_TX_DELAY aims to improve guest-host transfer rate for TCP streams by
86 * preventing packets to be sent immediately. It allows to send several
87 * packets in a batch reducing the number of acknowledgments. Note that it
88 * effectively disables R0 TX path, forcing sending in R3.
89 */
90//#define E1K_TX_DELAY 150
91/** @def E1K_USE_TX_TIMERS
92 * E1K_USE_TX_TIMERS aims to reduce the number of generated TX interrupts if a
93 * guest driver set the delays via the Transmit Interrupt Delay Value (TIDV)
94 * register. Enabling it showed no positive effects on existing guests so it
95 * stays disabled. See sections 3.2.7.1 and 3.4.3.1 in "8254x Family of Gigabit
96 * Ethernet Controllers Software Developer’s Manual" for more detailed
97 * explanation.
98 */
99//#define E1K_USE_TX_TIMERS
100/** @def E1K_NO_TAD
101 * E1K_NO_TAD disables one of two timers enabled by E1K_USE_TX_TIMERS, the
102 * Transmit Absolute Delay time. This timer sets the maximum time interval
103 * during which TX interrupts can be postponed (delayed). It has no effect
104 * if E1K_USE_TX_TIMERS is not defined.
105 */
106//#define E1K_NO_TAD
107/** @def E1K_REL_DEBUG
108 * E1K_REL_DEBUG enables debug logging of l1, l2, l3 in release build.
109 */
110//#define E1K_REL_DEBUG
111/** @def E1K_INT_STATS
112 * E1K_INT_STATS enables collection of internal statistics used for
113 * debugging of delayed interrupts, etc.
114 */
115#define E1K_INT_STATS
116/** @def E1K_WITH_MSI
117 * E1K_WITH_MSI enables rudimentary MSI support. Not implemented.
118 */
119//#define E1K_WITH_MSI
120/** @def E1K_WITH_TX_CS
121 * E1K_WITH_TX_CS protects e1kXmitPending with a critical section.
122 */
123#define E1K_WITH_TX_CS
124/** @def E1K_WITH_TXD_CACHE
125 * E1K_WITH_TXD_CACHE causes E1000 to fetch multiple TX descriptors in a
126 * single physical memory read (or two if it wraps around the end of TX
127 * descriptor ring). It is required for proper functioning of bandwidth
128 * resource control as it allows to compute exact sizes of packets prior
129 * to allocating their buffers (see @bugref{5582}).
130 */
131#define E1K_WITH_TXD_CACHE
132/** @def E1K_WITH_RXD_CACHE
133 * E1K_WITH_RXD_CACHE causes E1000 to fetch multiple RX descriptors in a
134 * single physical memory read (or two if it wraps around the end of RX
135 * descriptor ring). Intel's packet driver for DOS needs this option in
136 * order to work properly (see @bugref{6217}).
137 */
138#define E1K_WITH_RXD_CACHE
139/** @def E1K_WITH_PREREG_MMIO
140 * E1K_WITH_PREREG_MMIO enables a new style MMIO registration and is
141 * currently only done for testing the relateted PDM, IOM and PGM code. */
142//#define E1K_WITH_PREREG_MMIO
143/* @} */
144/* End of Options ************************************************************/
145
146#ifdef E1K_WITH_TXD_CACHE
147/**
148 * E1K_TXD_CACHE_SIZE specifies the maximum number of TX descriptors stored
149 * in the state structure. It limits the amount of descriptors loaded in one
150 * batch read. For example, Linux guest may use up to 20 descriptors per
151 * TSE packet. The largest TSE packet seen (Windows guest) was 45 descriptors.
152 */
153# define E1K_TXD_CACHE_SIZE 64u
154#endif /* E1K_WITH_TXD_CACHE */
155
156#ifdef E1K_WITH_RXD_CACHE
157/**
158 * E1K_RXD_CACHE_SIZE specifies the maximum number of RX descriptors stored
159 * in the state structure. It limits the amount of descriptors loaded in one
160 * batch read. For example, XP guest adds 15 RX descriptors at a time.
161 */
162# define E1K_RXD_CACHE_SIZE 16u
163#endif /* E1K_WITH_RXD_CACHE */
164
165
166/* Little helpers ************************************************************/
167#undef htons
168#undef ntohs
169#undef htonl
170#undef ntohl
171#define htons(x) ((((x) & 0xff00) >> 8) | (((x) & 0x00ff) << 8))
172#define ntohs(x) htons(x)
173#define htonl(x) ASMByteSwapU32(x)
174#define ntohl(x) htonl(x)
175
176#ifndef DEBUG
177# ifdef E1K_REL_DEBUG
178# define DEBUG
179# define E1kLog(a) LogRel(a)
180# define E1kLog2(a) LogRel(a)
181# define E1kLog3(a) LogRel(a)
182# define E1kLogX(x, a) LogRel(a)
183//# define E1kLog3(a) do {} while (0)
184# else
185# define E1kLog(a) do {} while (0)
186# define E1kLog2(a) do {} while (0)
187# define E1kLog3(a) do {} while (0)
188# define E1kLogX(x, a) do {} while (0)
189# endif
190#else
191# define E1kLog(a) Log(a)
192# define E1kLog2(a) Log2(a)
193# define E1kLog3(a) Log3(a)
194# define E1kLogX(x, a) LogIt(x, LOG_GROUP, a)
195//# define E1kLog(a) do {} while (0)
196//# define E1kLog2(a) do {} while (0)
197//# define E1kLog3(a) do {} while (0)
198#endif
199
200#if 0
201# define LOG_ENABLED
202# define E1kLogRel(a) LogRel(a)
203# undef Log6
204# define Log6(a) LogRel(a)
205#else
206# define E1kLogRel(a) do { } while (0)
207#endif
208
209//#undef DEBUG
210
211#define STATE_TO_DEVINS(pThis) (((PE1KSTATE )pThis)->CTX_SUFF(pDevIns))
212#define E1K_RELOCATE(p, o) *(RTHCUINTPTR *)&p += o
213
214#define E1K_INC_CNT32(cnt) \
215do { \
216 if (cnt < UINT32_MAX) \
217 cnt++; \
218} while (0)
219
220#define E1K_ADD_CNT64(cntLo, cntHi, val) \
221do { \
222 uint64_t u64Cnt = RT_MAKE_U64(cntLo, cntHi); \
223 uint64_t tmp = u64Cnt; \
224 u64Cnt += val; \
225 if (tmp > u64Cnt ) \
226 u64Cnt = UINT64_MAX; \
227 cntLo = (uint32_t)u64Cnt; \
228 cntHi = (uint32_t)(u64Cnt >> 32); \
229} while (0)
230
231#ifdef E1K_INT_STATS
232# define E1K_INC_ISTAT_CNT(cnt) do { ++cnt; } while (0)
233#else /* E1K_INT_STATS */
234# define E1K_INC_ISTAT_CNT(cnt) do { } while (0)
235#endif /* E1K_INT_STATS */
236
237
238/*****************************************************************************/
239
240typedef uint32_t E1KCHIP;
241#define E1K_CHIP_82540EM 0
242#define E1K_CHIP_82543GC 1
243#define E1K_CHIP_82545EM 2
244
245#ifdef IN_RING3
246/** Different E1000 chips. */
247static const struct E1kChips
248{
249 uint16_t uPCIVendorId;
250 uint16_t uPCIDeviceId;
251 uint16_t uPCISubsystemVendorId;
252 uint16_t uPCISubsystemId;
253 const char *pcszName;
254} g_aChips[] =
255{
256 /* Vendor Device SSVendor SubSys Name */
257 { 0x8086,
258 /* Temporary code, as MSI-aware driver dislike 0x100E. How to do that right? */
259# ifdef E1K_WITH_MSI
260 0x105E,
261# else
262 0x100E,
263# endif
264 0x8086, 0x001E, "82540EM" }, /* Intel 82540EM-A in Intel PRO/1000 MT Desktop */
265 { 0x8086, 0x1004, 0x8086, 0x1004, "82543GC" }, /* Intel 82543GC in Intel PRO/1000 T Server */
266 { 0x8086, 0x100F, 0x15AD, 0x0750, "82545EM" } /* Intel 82545EM-A in VMWare Network Adapter */
267};
268#endif /* IN_RING3 */
269
270
271/* The size of register area mapped to I/O space */
272#define E1K_IOPORT_SIZE 0x8
273/* The size of memory-mapped register area */
274#define E1K_MM_SIZE 0x20000
275
276#define E1K_MAX_TX_PKT_SIZE 16288
277#define E1K_MAX_RX_PKT_SIZE 16384
278
279/*****************************************************************************/
280
281/** Gets the specfieid bits from the register. */
282#define GET_BITS(reg, bits) ((reg & reg##_##bits##_MASK) >> reg##_##bits##_SHIFT)
283#define GET_BITS_V(val, reg, bits) ((val & reg##_##bits##_MASK) >> reg##_##bits##_SHIFT)
284#define BITS(reg, bits, bitval) (bitval << reg##_##bits##_SHIFT)
285#define SET_BITS(reg, bits, bitval) do { reg = (reg & ~reg##_##bits##_MASK) | (bitval << reg##_##bits##_SHIFT); } while (0)
286#define SET_BITS_V(val, reg, bits, bitval) do { val = (val & ~reg##_##bits##_MASK) | (bitval << reg##_##bits##_SHIFT); } while (0)
287
288#define CTRL_SLU UINT32_C(0x00000040)
289#define CTRL_MDIO UINT32_C(0x00100000)
290#define CTRL_MDC UINT32_C(0x00200000)
291#define CTRL_MDIO_DIR UINT32_C(0x01000000)
292#define CTRL_MDC_DIR UINT32_C(0x02000000)
293#define CTRL_RESET UINT32_C(0x04000000)
294#define CTRL_VME UINT32_C(0x40000000)
295
296#define STATUS_LU UINT32_C(0x00000002)
297#define STATUS_TXOFF UINT32_C(0x00000010)
298
299#define EECD_EE_WIRES UINT32_C(0x0F)
300#define EECD_EE_REQ UINT32_C(0x40)
301#define EECD_EE_GNT UINT32_C(0x80)
302
303#define EERD_START UINT32_C(0x00000001)
304#define EERD_DONE UINT32_C(0x00000010)
305#define EERD_DATA_MASK UINT32_C(0xFFFF0000)
306#define EERD_DATA_SHIFT 16
307#define EERD_ADDR_MASK UINT32_C(0x0000FF00)
308#define EERD_ADDR_SHIFT 8
309
310#define MDIC_DATA_MASK UINT32_C(0x0000FFFF)
311#define MDIC_DATA_SHIFT 0
312#define MDIC_REG_MASK UINT32_C(0x001F0000)
313#define MDIC_REG_SHIFT 16
314#define MDIC_PHY_MASK UINT32_C(0x03E00000)
315#define MDIC_PHY_SHIFT 21
316#define MDIC_OP_WRITE UINT32_C(0x04000000)
317#define MDIC_OP_READ UINT32_C(0x08000000)
318#define MDIC_READY UINT32_C(0x10000000)
319#define MDIC_INT_EN UINT32_C(0x20000000)
320#define MDIC_ERROR UINT32_C(0x40000000)
321
322#define TCTL_EN UINT32_C(0x00000002)
323#define TCTL_PSP UINT32_C(0x00000008)
324
325#define RCTL_EN UINT32_C(0x00000002)
326#define RCTL_UPE UINT32_C(0x00000008)
327#define RCTL_MPE UINT32_C(0x00000010)
328#define RCTL_LPE UINT32_C(0x00000020)
329#define RCTL_LBM_MASK UINT32_C(0x000000C0)
330#define RCTL_LBM_SHIFT 6
331#define RCTL_RDMTS_MASK UINT32_C(0x00000300)
332#define RCTL_RDMTS_SHIFT 8
333#define RCTL_LBM_TCVR UINT32_C(3) /**< PHY or external SerDes loopback. */
334#define RCTL_MO_MASK UINT32_C(0x00003000)
335#define RCTL_MO_SHIFT 12
336#define RCTL_BAM UINT32_C(0x00008000)
337#define RCTL_BSIZE_MASK UINT32_C(0x00030000)
338#define RCTL_BSIZE_SHIFT 16
339#define RCTL_VFE UINT32_C(0x00040000)
340#define RCTL_CFIEN UINT32_C(0x00080000)
341#define RCTL_CFI UINT32_C(0x00100000)
342#define RCTL_BSEX UINT32_C(0x02000000)
343#define RCTL_SECRC UINT32_C(0x04000000)
344
345#define ICR_TXDW UINT32_C(0x00000001)
346#define ICR_TXQE UINT32_C(0x00000002)
347#define ICR_LSC UINT32_C(0x00000004)
348#define ICR_RXDMT0 UINT32_C(0x00000010)
349#define ICR_RXT0 UINT32_C(0x00000080)
350#define ICR_TXD_LOW UINT32_C(0x00008000)
351#define RDTR_FPD UINT32_C(0x80000000)
352
353#define PBA_st ((PBAST*)(pThis->auRegs + PBA_IDX))
354typedef struct
355{
356 unsigned rxa : 7;
357 unsigned rxa_r : 9;
358 unsigned txa : 16;
359} PBAST;
360AssertCompileSize(PBAST, 4);
361
362#define TXDCTL_WTHRESH_MASK 0x003F0000
363#define TXDCTL_WTHRESH_SHIFT 16
364#define TXDCTL_LWTHRESH_MASK 0xFE000000
365#define TXDCTL_LWTHRESH_SHIFT 25
366
367#define RXCSUM_PCSS_MASK UINT32_C(0x000000FF)
368#define RXCSUM_PCSS_SHIFT 0
369
370/** @name Register access macros
371 * @remarks These ASSUME alocal variable @a pThis of type PE1KSTATE.
372 * @{ */
373#define CTRL pThis->auRegs[CTRL_IDX]
374#define STATUS pThis->auRegs[STATUS_IDX]
375#define EECD pThis->auRegs[EECD_IDX]
376#define EERD pThis->auRegs[EERD_IDX]
377#define CTRL_EXT pThis->auRegs[CTRL_EXT_IDX]
378#define FLA pThis->auRegs[FLA_IDX]
379#define MDIC pThis->auRegs[MDIC_IDX]
380#define FCAL pThis->auRegs[FCAL_IDX]
381#define FCAH pThis->auRegs[FCAH_IDX]
382#define FCT pThis->auRegs[FCT_IDX]
383#define VET pThis->auRegs[VET_IDX]
384#define ICR pThis->auRegs[ICR_IDX]
385#define ITR pThis->auRegs[ITR_IDX]
386#define ICS pThis->auRegs[ICS_IDX]
387#define IMS pThis->auRegs[IMS_IDX]
388#define IMC pThis->auRegs[IMC_IDX]
389#define RCTL pThis->auRegs[RCTL_IDX]
390#define FCTTV pThis->auRegs[FCTTV_IDX]
391#define TXCW pThis->auRegs[TXCW_IDX]
392#define RXCW pThis->auRegs[RXCW_IDX]
393#define TCTL pThis->auRegs[TCTL_IDX]
394#define TIPG pThis->auRegs[TIPG_IDX]
395#define AIFS pThis->auRegs[AIFS_IDX]
396#define LEDCTL pThis->auRegs[LEDCTL_IDX]
397#define PBA pThis->auRegs[PBA_IDX]
398#define FCRTL pThis->auRegs[FCRTL_IDX]
399#define FCRTH pThis->auRegs[FCRTH_IDX]
400#define RDFH pThis->auRegs[RDFH_IDX]
401#define RDFT pThis->auRegs[RDFT_IDX]
402#define RDFHS pThis->auRegs[RDFHS_IDX]
403#define RDFTS pThis->auRegs[RDFTS_IDX]
404#define RDFPC pThis->auRegs[RDFPC_IDX]
405#define RDBAL pThis->auRegs[RDBAL_IDX]
406#define RDBAH pThis->auRegs[RDBAH_IDX]
407#define RDLEN pThis->auRegs[RDLEN_IDX]
408#define RDH pThis->auRegs[RDH_IDX]
409#define RDT pThis->auRegs[RDT_IDX]
410#define RDTR pThis->auRegs[RDTR_IDX]
411#define RXDCTL pThis->auRegs[RXDCTL_IDX]
412#define RADV pThis->auRegs[RADV_IDX]
413#define RSRPD pThis->auRegs[RSRPD_IDX]
414#define TXDMAC pThis->auRegs[TXDMAC_IDX]
415#define TDFH pThis->auRegs[TDFH_IDX]
416#define TDFT pThis->auRegs[TDFT_IDX]
417#define TDFHS pThis->auRegs[TDFHS_IDX]
418#define TDFTS pThis->auRegs[TDFTS_IDX]
419#define TDFPC pThis->auRegs[TDFPC_IDX]
420#define TDBAL pThis->auRegs[TDBAL_IDX]
421#define TDBAH pThis->auRegs[TDBAH_IDX]
422#define TDLEN pThis->auRegs[TDLEN_IDX]
423#define TDH pThis->auRegs[TDH_IDX]
424#define TDT pThis->auRegs[TDT_IDX]
425#define TIDV pThis->auRegs[TIDV_IDX]
426#define TXDCTL pThis->auRegs[TXDCTL_IDX]
427#define TADV pThis->auRegs[TADV_IDX]
428#define TSPMT pThis->auRegs[TSPMT_IDX]
429#define CRCERRS pThis->auRegs[CRCERRS_IDX]
430#define ALGNERRC pThis->auRegs[ALGNERRC_IDX]
431#define SYMERRS pThis->auRegs[SYMERRS_IDX]
432#define RXERRC pThis->auRegs[RXERRC_IDX]
433#define MPC pThis->auRegs[MPC_IDX]
434#define SCC pThis->auRegs[SCC_IDX]
435#define ECOL pThis->auRegs[ECOL_IDX]
436#define MCC pThis->auRegs[MCC_IDX]
437#define LATECOL pThis->auRegs[LATECOL_IDX]
438#define COLC pThis->auRegs[COLC_IDX]
439#define DC pThis->auRegs[DC_IDX]
440#define TNCRS pThis->auRegs[TNCRS_IDX]
441/* #define SEC pThis->auRegs[SEC_IDX] Conflict with sys/time.h */
442#define CEXTERR pThis->auRegs[CEXTERR_IDX]
443#define RLEC pThis->auRegs[RLEC_IDX]
444#define XONRXC pThis->auRegs[XONRXC_IDX]
445#define XONTXC pThis->auRegs[XONTXC_IDX]
446#define XOFFRXC pThis->auRegs[XOFFRXC_IDX]
447#define XOFFTXC pThis->auRegs[XOFFTXC_IDX]
448#define FCRUC pThis->auRegs[FCRUC_IDX]
449#define PRC64 pThis->auRegs[PRC64_IDX]
450#define PRC127 pThis->auRegs[PRC127_IDX]
451#define PRC255 pThis->auRegs[PRC255_IDX]
452#define PRC511 pThis->auRegs[PRC511_IDX]
453#define PRC1023 pThis->auRegs[PRC1023_IDX]
454#define PRC1522 pThis->auRegs[PRC1522_IDX]
455#define GPRC pThis->auRegs[GPRC_IDX]
456#define BPRC pThis->auRegs[BPRC_IDX]
457#define MPRC pThis->auRegs[MPRC_IDX]
458#define GPTC pThis->auRegs[GPTC_IDX]
459#define GORCL pThis->auRegs[GORCL_IDX]
460#define GORCH pThis->auRegs[GORCH_IDX]
461#define GOTCL pThis->auRegs[GOTCL_IDX]
462#define GOTCH pThis->auRegs[GOTCH_IDX]
463#define RNBC pThis->auRegs[RNBC_IDX]
464#define RUC pThis->auRegs[RUC_IDX]
465#define RFC pThis->auRegs[RFC_IDX]
466#define ROC pThis->auRegs[ROC_IDX]
467#define RJC pThis->auRegs[RJC_IDX]
468#define MGTPRC pThis->auRegs[MGTPRC_IDX]
469#define MGTPDC pThis->auRegs[MGTPDC_IDX]
470#define MGTPTC pThis->auRegs[MGTPTC_IDX]
471#define TORL pThis->auRegs[TORL_IDX]
472#define TORH pThis->auRegs[TORH_IDX]
473#define TOTL pThis->auRegs[TOTL_IDX]
474#define TOTH pThis->auRegs[TOTH_IDX]
475#define TPR pThis->auRegs[TPR_IDX]
476#define TPT pThis->auRegs[TPT_IDX]
477#define PTC64 pThis->auRegs[PTC64_IDX]
478#define PTC127 pThis->auRegs[PTC127_IDX]
479#define PTC255 pThis->auRegs[PTC255_IDX]
480#define PTC511 pThis->auRegs[PTC511_IDX]
481#define PTC1023 pThis->auRegs[PTC1023_IDX]
482#define PTC1522 pThis->auRegs[PTC1522_IDX]
483#define MPTC pThis->auRegs[MPTC_IDX]
484#define BPTC pThis->auRegs[BPTC_IDX]
485#define TSCTC pThis->auRegs[TSCTC_IDX]
486#define TSCTFC pThis->auRegs[TSCTFC_IDX]
487#define RXCSUM pThis->auRegs[RXCSUM_IDX]
488#define WUC pThis->auRegs[WUC_IDX]
489#define WUFC pThis->auRegs[WUFC_IDX]
490#define WUS pThis->auRegs[WUS_IDX]
491#define MANC pThis->auRegs[MANC_IDX]
492#define IPAV pThis->auRegs[IPAV_IDX]
493#define WUPL pThis->auRegs[WUPL_IDX]
494/** @} */
495
496/**
497 * Indices of memory-mapped registers in register table.
498 */
499typedef enum
500{
501 CTRL_IDX,
502 STATUS_IDX,
503 EECD_IDX,
504 EERD_IDX,
505 CTRL_EXT_IDX,
506 FLA_IDX,
507 MDIC_IDX,
508 FCAL_IDX,
509 FCAH_IDX,
510 FCT_IDX,
511 VET_IDX,
512 ICR_IDX,
513 ITR_IDX,
514 ICS_IDX,
515 IMS_IDX,
516 IMC_IDX,
517 RCTL_IDX,
518 FCTTV_IDX,
519 TXCW_IDX,
520 RXCW_IDX,
521 TCTL_IDX,
522 TIPG_IDX,
523 AIFS_IDX,
524 LEDCTL_IDX,
525 PBA_IDX,
526 FCRTL_IDX,
527 FCRTH_IDX,
528 RDFH_IDX,
529 RDFT_IDX,
530 RDFHS_IDX,
531 RDFTS_IDX,
532 RDFPC_IDX,
533 RDBAL_IDX,
534 RDBAH_IDX,
535 RDLEN_IDX,
536 RDH_IDX,
537 RDT_IDX,
538 RDTR_IDX,
539 RXDCTL_IDX,
540 RADV_IDX,
541 RSRPD_IDX,
542 TXDMAC_IDX,
543 TDFH_IDX,
544 TDFT_IDX,
545 TDFHS_IDX,
546 TDFTS_IDX,
547 TDFPC_IDX,
548 TDBAL_IDX,
549 TDBAH_IDX,
550 TDLEN_IDX,
551 TDH_IDX,
552 TDT_IDX,
553 TIDV_IDX,
554 TXDCTL_IDX,
555 TADV_IDX,
556 TSPMT_IDX,
557 CRCERRS_IDX,
558 ALGNERRC_IDX,
559 SYMERRS_IDX,
560 RXERRC_IDX,
561 MPC_IDX,
562 SCC_IDX,
563 ECOL_IDX,
564 MCC_IDX,
565 LATECOL_IDX,
566 COLC_IDX,
567 DC_IDX,
568 TNCRS_IDX,
569 SEC_IDX,
570 CEXTERR_IDX,
571 RLEC_IDX,
572 XONRXC_IDX,
573 XONTXC_IDX,
574 XOFFRXC_IDX,
575 XOFFTXC_IDX,
576 FCRUC_IDX,
577 PRC64_IDX,
578 PRC127_IDX,
579 PRC255_IDX,
580 PRC511_IDX,
581 PRC1023_IDX,
582 PRC1522_IDX,
583 GPRC_IDX,
584 BPRC_IDX,
585 MPRC_IDX,
586 GPTC_IDX,
587 GORCL_IDX,
588 GORCH_IDX,
589 GOTCL_IDX,
590 GOTCH_IDX,
591 RNBC_IDX,
592 RUC_IDX,
593 RFC_IDX,
594 ROC_IDX,
595 RJC_IDX,
596 MGTPRC_IDX,
597 MGTPDC_IDX,
598 MGTPTC_IDX,
599 TORL_IDX,
600 TORH_IDX,
601 TOTL_IDX,
602 TOTH_IDX,
603 TPR_IDX,
604 TPT_IDX,
605 PTC64_IDX,
606 PTC127_IDX,
607 PTC255_IDX,
608 PTC511_IDX,
609 PTC1023_IDX,
610 PTC1522_IDX,
611 MPTC_IDX,
612 BPTC_IDX,
613 TSCTC_IDX,
614 TSCTFC_IDX,
615 RXCSUM_IDX,
616 WUC_IDX,
617 WUFC_IDX,
618 WUS_IDX,
619 MANC_IDX,
620 IPAV_IDX,
621 WUPL_IDX,
622 MTA_IDX,
623 RA_IDX,
624 VFTA_IDX,
625 IP4AT_IDX,
626 IP6AT_IDX,
627 WUPM_IDX,
628 FFLT_IDX,
629 FFMT_IDX,
630 FFVT_IDX,
631 PBM_IDX,
632 RA_82542_IDX,
633 MTA_82542_IDX,
634 VFTA_82542_IDX,
635 E1K_NUM_OF_REGS
636} E1kRegIndex;
637
638#define E1K_NUM_OF_32BIT_REGS MTA_IDX
639/** The number of registers with strictly increasing offset. */
640#define E1K_NUM_OF_BINARY_SEARCHABLE (WUPL_IDX + 1)
641
642
643/**
644 * Define E1000-specific EEPROM layout.
645 */
646struct E1kEEPROM
647{
648 public:
649 EEPROM93C46 eeprom;
650
651#ifdef IN_RING3
652 /**
653 * Initialize EEPROM content.
654 *
655 * @param macAddr MAC address of E1000.
656 */
657 void init(RTMAC &macAddr)
658 {
659 eeprom.init();
660 memcpy(eeprom.m_au16Data, macAddr.au16, sizeof(macAddr.au16));
661 eeprom.m_au16Data[0x04] = 0xFFFF;
662 /*
663 * bit 3 - full support for power management
664 * bit 10 - full duplex
665 */
666 eeprom.m_au16Data[0x0A] = 0x4408;
667 eeprom.m_au16Data[0x0B] = 0x001E;
668 eeprom.m_au16Data[0x0C] = 0x8086;
669 eeprom.m_au16Data[0x0D] = 0x100E;
670 eeprom.m_au16Data[0x0E] = 0x8086;
671 eeprom.m_au16Data[0x0F] = 0x3040;
672 eeprom.m_au16Data[0x21] = 0x7061;
673 eeprom.m_au16Data[0x22] = 0x280C;
674 eeprom.m_au16Data[0x23] = 0x00C8;
675 eeprom.m_au16Data[0x24] = 0x00C8;
676 eeprom.m_au16Data[0x2F] = 0x0602;
677 updateChecksum();
678 };
679
680 /**
681 * Compute the checksum as required by E1000 and store it
682 * in the last word.
683 */
684 void updateChecksum()
685 {
686 uint16_t u16Checksum = 0;
687
688 for (int i = 0; i < eeprom.SIZE-1; i++)
689 u16Checksum += eeprom.m_au16Data[i];
690 eeprom.m_au16Data[eeprom.SIZE-1] = 0xBABA - u16Checksum;
691 };
692
693 /**
694 * First 6 bytes of EEPROM contain MAC address.
695 *
696 * @returns MAC address of E1000.
697 */
698 void getMac(PRTMAC pMac)
699 {
700 memcpy(pMac->au16, eeprom.m_au16Data, sizeof(pMac->au16));
701 };
702
703 uint32_t read()
704 {
705 return eeprom.read();
706 }
707
708 void write(uint32_t u32Wires)
709 {
710 eeprom.write(u32Wires);
711 }
712
713 bool readWord(uint32_t u32Addr, uint16_t *pu16Value)
714 {
715 return eeprom.readWord(u32Addr, pu16Value);
716 }
717
718 int load(PSSMHANDLE pSSM)
719 {
720 return eeprom.load(pSSM);
721 }
722
723 void save(PSSMHANDLE pSSM)
724 {
725 eeprom.save(pSSM);
726 }
727#endif /* IN_RING3 */
728};
729
730
731#define E1K_SPEC_VLAN(s) (s & 0xFFF)
732#define E1K_SPEC_CFI(s) (!!((s>>12) & 0x1))
733#define E1K_SPEC_PRI(s) ((s>>13) & 0x7)
734
735struct E1kRxDStatus
736{
737 /** @name Descriptor Status field (3.2.3.1)
738 * @{ */
739 unsigned fDD : 1; /**< Descriptor Done. */
740 unsigned fEOP : 1; /**< End of packet. */
741 unsigned fIXSM : 1; /**< Ignore checksum indication. */
742 unsigned fVP : 1; /**< VLAN, matches VET. */
743 unsigned : 1;
744 unsigned fTCPCS : 1; /**< RCP Checksum calculated on the packet. */
745 unsigned fIPCS : 1; /**< IP Checksum calculated on the packet. */
746 unsigned fPIF : 1; /**< Passed in-exact filter */
747 /** @} */
748 /** @name Descriptor Errors field (3.2.3.2)
749 * (Only valid when fEOP and fDD are set.)
750 * @{ */
751 unsigned fCE : 1; /**< CRC or alignment error. */
752 unsigned : 4; /**< Reserved, varies with different models... */
753 unsigned fTCPE : 1; /**< TCP/UDP checksum error. */
754 unsigned fIPE : 1; /**< IP Checksum error. */
755 unsigned fRXE : 1; /**< RX Data error. */
756 /** @} */
757 /** @name Descriptor Special field (3.2.3.3)
758 * @{ */
759 unsigned u16Special : 16; /**< VLAN: Id, Canonical form, Priority. */
760 /** @} */
761};
762typedef struct E1kRxDStatus E1KRXDST;
763
764struct E1kRxDesc_st
765{
766 uint64_t u64BufAddr; /**< Address of data buffer */
767 uint16_t u16Length; /**< Length of data in buffer */
768 uint16_t u16Checksum; /**< Packet checksum */
769 E1KRXDST status;
770};
771typedef struct E1kRxDesc_st E1KRXDESC;
772AssertCompileSize(E1KRXDESC, 16);
773
774#define E1K_DTYP_LEGACY -1
775#define E1K_DTYP_CONTEXT 0
776#define E1K_DTYP_DATA 1
777
778struct E1kTDLegacy
779{
780 uint64_t u64BufAddr; /**< Address of data buffer */
781 struct TDLCmd_st
782 {
783 unsigned u16Length : 16;
784 unsigned u8CSO : 8;
785 /* CMD field : 8 */
786 unsigned fEOP : 1;
787 unsigned fIFCS : 1;
788 unsigned fIC : 1;
789 unsigned fRS : 1;
790 unsigned fRPS : 1;
791 unsigned fDEXT : 1;
792 unsigned fVLE : 1;
793 unsigned fIDE : 1;
794 } cmd;
795 struct TDLDw3_st
796 {
797 /* STA field */
798 unsigned fDD : 1;
799 unsigned fEC : 1;
800 unsigned fLC : 1;
801 unsigned fTURSV : 1;
802 /* RSV field */
803 unsigned u4RSV : 4;
804 /* CSS field */
805 unsigned u8CSS : 8;
806 /* Special field*/
807 unsigned u16Special: 16;
808 } dw3;
809};
810
811/**
812 * TCP/IP Context Transmit Descriptor, section 3.3.6.
813 */
814struct E1kTDContext
815{
816 struct CheckSum_st
817 {
818 /** TSE: Header start. !TSE: Checksum start. */
819 unsigned u8CSS : 8;
820 /** Checksum offset - where to store it. */
821 unsigned u8CSO : 8;
822 /** Checksum ending (inclusive) offset, 0 = end of packet. */
823 unsigned u16CSE : 16;
824 } ip;
825 struct CheckSum_st tu;
826 struct TDCDw2_st
827 {
828 /** TSE: The total number of payload bytes for this context. Sans header. */
829 unsigned u20PAYLEN : 20;
830 /** The descriptor type - E1K_DTYP_CONTEXT (0). */
831 unsigned u4DTYP : 4;
832 /** TUCMD field, 8 bits
833 * @{ */
834 /** TSE: TCP (set) or UDP (clear). */
835 unsigned fTCP : 1;
836 /** TSE: IPv4 (set) or IPv6 (clear) - for finding the payload length field in
837 * the IP header. Does not affect the checksumming.
838 * @remarks 82544GC/EI interprets a cleared field differently. */
839 unsigned fIP : 1;
840 /** TSE: TCP segmentation enable. When clear the context describes */
841 unsigned fTSE : 1;
842 /** Report status (only applies to dw3.fDD for here). */
843 unsigned fRS : 1;
844 /** Reserved, MBZ. */
845 unsigned fRSV1 : 1;
846 /** Descriptor extension, must be set for this descriptor type. */
847 unsigned fDEXT : 1;
848 /** Reserved, MBZ. */
849 unsigned fRSV2 : 1;
850 /** Interrupt delay enable. */
851 unsigned fIDE : 1;
852 /** @} */
853 } dw2;
854 struct TDCDw3_st
855 {
856 /** Descriptor Done. */
857 unsigned fDD : 1;
858 /** Reserved, MBZ. */
859 unsigned u7RSV : 7;
860 /** TSO: The header (prototype) length (Ethernet[, VLAN tag], IP, TCP/UDP. */
861 unsigned u8HDRLEN : 8;
862 /** TSO: Maximum segment size. */
863 unsigned u16MSS : 16;
864 } dw3;
865};
866typedef struct E1kTDContext E1KTXCTX;
867
868/**
869 * TCP/IP Data Transmit Descriptor, section 3.3.7.
870 */
871struct E1kTDData
872{
873 uint64_t u64BufAddr; /**< Address of data buffer */
874 struct TDDCmd_st
875 {
876 /** The total length of data pointed to by this descriptor. */
877 unsigned u20DTALEN : 20;
878 /** The descriptor type - E1K_DTYP_DATA (1). */
879 unsigned u4DTYP : 4;
880 /** @name DCMD field, 8 bits (3.3.7.1).
881 * @{ */
882 /** End of packet. Note TSCTFC update. */
883 unsigned fEOP : 1;
884 /** Insert Ethernet FCS/CRC (requires fEOP to be set). */
885 unsigned fIFCS : 1;
886 /** Use the TSE context when set and the normal when clear. */
887 unsigned fTSE : 1;
888 /** Report status (dw3.STA). */
889 unsigned fRS : 1;
890 /** Reserved. 82544GC/EI defines this report packet set (RPS). */
891 unsigned fRPS : 1;
892 /** Descriptor extension, must be set for this descriptor type. */
893 unsigned fDEXT : 1;
894 /** VLAN enable, requires CTRL.VME, auto enables FCS/CRC.
895 * Insert dw3.SPECIAL after ethernet header. */
896 unsigned fVLE : 1;
897 /** Interrupt delay enable. */
898 unsigned fIDE : 1;
899 /** @} */
900 } cmd;
901 struct TDDDw3_st
902 {
903 /** @name STA field (3.3.7.2)
904 * @{ */
905 unsigned fDD : 1; /**< Descriptor done. */
906 unsigned fEC : 1; /**< Excess collision. */
907 unsigned fLC : 1; /**< Late collision. */
908 /** Reserved, except for the usual oddball (82544GC/EI) where it's called TU. */
909 unsigned fTURSV : 1;
910 /** @} */
911 unsigned u4RSV : 4; /**< Reserved field, MBZ. */
912 /** @name POPTS (Packet Option) field (3.3.7.3)
913 * @{ */
914 unsigned fIXSM : 1; /**< Insert IP checksum. */
915 unsigned fTXSM : 1; /**< Insert TCP/UDP checksum. */
916 unsigned u6RSV : 6; /**< Reserved, MBZ. */
917 /** @} */
918 /** @name SPECIAL field - VLAN tag to be inserted after ethernet header.
919 * Requires fEOP, fVLE and CTRL.VME to be set.
920 * @{ */
921 unsigned u16Special: 16; /**< VLAN: Id, Canonical form, Priority. */
922 /** @} */
923 } dw3;
924};
925typedef struct E1kTDData E1KTXDAT;
926
927union E1kTxDesc
928{
929 struct E1kTDLegacy legacy;
930 struct E1kTDContext context;
931 struct E1kTDData data;
932};
933typedef union E1kTxDesc E1KTXDESC;
934AssertCompileSize(E1KTXDESC, 16);
935
936#define RA_CTL_AS 0x0003
937#define RA_CTL_AV 0x8000
938
939union E1kRecAddr
940{
941 uint32_t au32[32];
942 struct RAArray
943 {
944 uint8_t addr[6];
945 uint16_t ctl;
946 } array[16];
947};
948typedef struct E1kRecAddr::RAArray E1KRAELEM;
949typedef union E1kRecAddr E1KRA;
950AssertCompileSize(E1KRA, 8*16);
951
952#define E1K_IP_RF UINT16_C(0x8000) /**< reserved fragment flag */
953#define E1K_IP_DF UINT16_C(0x4000) /**< dont fragment flag */
954#define E1K_IP_MF UINT16_C(0x2000) /**< more fragments flag */
955#define E1K_IP_OFFMASK UINT16_C(0x1fff) /**< mask for fragmenting bits */
956
957/** @todo use+extend RTNETIPV4 */
958struct E1kIpHeader
959{
960 /* type of service / version / header length */
961 uint16_t tos_ver_hl;
962 /* total length */
963 uint16_t total_len;
964 /* identification */
965 uint16_t ident;
966 /* fragment offset field */
967 uint16_t offset;
968 /* time to live / protocol*/
969 uint16_t ttl_proto;
970 /* checksum */
971 uint16_t chksum;
972 /* source IP address */
973 uint32_t src;
974 /* destination IP address */
975 uint32_t dest;
976};
977AssertCompileSize(struct E1kIpHeader, 20);
978
979#define E1K_TCP_FIN UINT16_C(0x01)
980#define E1K_TCP_SYN UINT16_C(0x02)
981#define E1K_TCP_RST UINT16_C(0x04)
982#define E1K_TCP_PSH UINT16_C(0x08)
983#define E1K_TCP_ACK UINT16_C(0x10)
984#define E1K_TCP_URG UINT16_C(0x20)
985#define E1K_TCP_ECE UINT16_C(0x40)
986#define E1K_TCP_CWR UINT16_C(0x80)
987#define E1K_TCP_FLAGS UINT16_C(0x3f)
988
989/** @todo use+extend RTNETTCP */
990struct E1kTcpHeader
991{
992 uint16_t src;
993 uint16_t dest;
994 uint32_t seqno;
995 uint32_t ackno;
996 uint16_t hdrlen_flags;
997 uint16_t wnd;
998 uint16_t chksum;
999 uint16_t urgp;
1000};
1001AssertCompileSize(struct E1kTcpHeader, 20);
1002
1003
1004#ifdef E1K_WITH_TXD_CACHE
1005/** The current Saved state version. */
1006# define E1K_SAVEDSTATE_VERSION 4
1007/** Saved state version for VirtualBox 4.2 with VLAN tag fields. */
1008# define E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG 3
1009#else /* !E1K_WITH_TXD_CACHE */
1010/** The current Saved state version. */
1011# define E1K_SAVEDSTATE_VERSION 3
1012#endif /* !E1K_WITH_TXD_CACHE */
1013/** Saved state version for VirtualBox 4.1 and earlier.
1014 * These did not include VLAN tag fields. */
1015#define E1K_SAVEDSTATE_VERSION_VBOX_41 2
1016/** Saved state version for VirtualBox 3.0 and earlier.
1017 * This did not include the configuration part nor the E1kEEPROM. */
1018#define E1K_SAVEDSTATE_VERSION_VBOX_30 1
1019
1020/**
1021 * Device state structure.
1022 *
1023 * Holds the current state of device.
1024 *
1025 * @implements PDMINETWORKDOWN
1026 * @implements PDMINETWORKCONFIG
1027 * @implements PDMILEDPORTS
1028 */
1029struct E1kState_st
1030{
1031 char szPrf[8]; /**< Log prefix, e.g. E1000#1. */
1032 PDMIBASE IBase;
1033 PDMINETWORKDOWN INetworkDown;
1034 PDMINETWORKCONFIG INetworkConfig;
1035 PDMILEDPORTS ILeds; /**< LED interface */
1036 R3PTRTYPE(PPDMIBASE) pDrvBase; /**< Attached network driver. */
1037 R3PTRTYPE(PPDMILEDCONNECTORS) pLedsConnector;
1038
1039 PPDMDEVINSR3 pDevInsR3; /**< Device instance - R3. */
1040 R3PTRTYPE(PPDMQUEUE) pTxQueueR3; /**< Transmit queue - R3. */
1041 R3PTRTYPE(PPDMQUEUE) pCanRxQueueR3; /**< Rx wakeup signaller - R3. */
1042 PPDMINETWORKUPR3 pDrvR3; /**< Attached network driver - R3. */
1043 PTMTIMERR3 pRIDTimerR3; /**< Receive Interrupt Delay Timer - R3. */
1044 PTMTIMERR3 pRADTimerR3; /**< Receive Absolute Delay Timer - R3. */
1045 PTMTIMERR3 pTIDTimerR3; /**< Transmit Interrupt Delay Timer - R3. */
1046 PTMTIMERR3 pTADTimerR3; /**< Transmit Absolute Delay Timer - R3. */
1047 PTMTIMERR3 pTXDTimerR3; /**< Transmit Delay Timer - R3. */
1048 PTMTIMERR3 pIntTimerR3; /**< Late Interrupt Timer - R3. */
1049 PTMTIMERR3 pLUTimerR3; /**< Link Up(/Restore) Timer. */
1050 /** The scatter / gather buffer used for the current outgoing packet - R3. */
1051 R3PTRTYPE(PPDMSCATTERGATHER) pTxSgR3;
1052
1053 PPDMDEVINSR0 pDevInsR0; /**< Device instance - R0. */
1054 R0PTRTYPE(PPDMQUEUE) pTxQueueR0; /**< Transmit queue - R0. */
1055 R0PTRTYPE(PPDMQUEUE) pCanRxQueueR0; /**< Rx wakeup signaller - R0. */
1056 PPDMINETWORKUPR0 pDrvR0; /**< Attached network driver - R0. */
1057 PTMTIMERR0 pRIDTimerR0; /**< Receive Interrupt Delay Timer - R0. */
1058 PTMTIMERR0 pRADTimerR0; /**< Receive Absolute Delay Timer - R0. */
1059 PTMTIMERR0 pTIDTimerR0; /**< Transmit Interrupt Delay Timer - R0. */
1060 PTMTIMERR0 pTADTimerR0; /**< Transmit Absolute Delay Timer - R0. */
1061 PTMTIMERR0 pTXDTimerR0; /**< Transmit Delay Timer - R0. */
1062 PTMTIMERR0 pIntTimerR0; /**< Late Interrupt Timer - R0. */
1063 PTMTIMERR0 pLUTimerR0; /**< Link Up(/Restore) Timer - R0. */
1064 /** The scatter / gather buffer used for the current outgoing packet - R0. */
1065 R0PTRTYPE(PPDMSCATTERGATHER) pTxSgR0;
1066
1067 PPDMDEVINSRC pDevInsRC; /**< Device instance - RC. */
1068 RCPTRTYPE(PPDMQUEUE) pTxQueueRC; /**< Transmit queue - RC. */
1069 RCPTRTYPE(PPDMQUEUE) pCanRxQueueRC; /**< Rx wakeup signaller - RC. */
1070 PPDMINETWORKUPRC pDrvRC; /**< Attached network driver - RC. */
1071 PTMTIMERRC pRIDTimerRC; /**< Receive Interrupt Delay Timer - RC. */
1072 PTMTIMERRC pRADTimerRC; /**< Receive Absolute Delay Timer - RC. */
1073 PTMTIMERRC pTIDTimerRC; /**< Transmit Interrupt Delay Timer - RC. */
1074 PTMTIMERRC pTADTimerRC; /**< Transmit Absolute Delay Timer - RC. */
1075 PTMTIMERRC pTXDTimerRC; /**< Transmit Delay Timer - RC. */
1076 PTMTIMERRC pIntTimerRC; /**< Late Interrupt Timer - RC. */
1077 PTMTIMERRC pLUTimerRC; /**< Link Up(/Restore) Timer - RC. */
1078 /** The scatter / gather buffer used for the current outgoing packet - RC. */
1079 RCPTRTYPE(PPDMSCATTERGATHER) pTxSgRC;
1080 RTRCPTR RCPtrAlignment;
1081
1082#if HC_ARCH_BITS != 32
1083 uint32_t Alignment1;
1084#endif
1085 PDMCRITSECT cs; /**< Critical section - what is it protecting? */
1086 PDMCRITSECT csRx; /**< RX Critical section. */
1087#ifdef E1K_WITH_TX_CS
1088 PDMCRITSECT csTx; /**< TX Critical section. */
1089#endif /* E1K_WITH_TX_CS */
1090 /** Base address of memory-mapped registers. */
1091 RTGCPHYS addrMMReg;
1092 /** MAC address obtained from the configuration. */
1093 RTMAC macConfigured;
1094 /** Base port of I/O space region. */
1095 RTIOPORT IOPortBase;
1096 /** EMT: */
1097 PDMPCIDEV pciDevice;
1098 /** EMT: Last time the interrupt was acknowledged. */
1099 uint64_t u64AckedAt;
1100 /** All: Used for eliminating spurious interrupts. */
1101 bool fIntRaised;
1102 /** EMT: false if the cable is disconnected by the GUI. */
1103 bool fCableConnected;
1104 /** EMT: */
1105 bool fR0Enabled;
1106 /** EMT: */
1107 bool fRCEnabled;
1108 /** EMT: Compute Ethernet CRC for RX packets. */
1109 bool fEthernetCRC;
1110 /** All: throttle interrupts. */
1111 bool fItrEnabled;
1112 /** All: throttle RX interrupts. */
1113 bool fItrRxEnabled;
1114 /** All: Delay TX interrupts using TIDV/TADV. */
1115 bool fTidEnabled;
1116 /** Link up delay (in milliseconds). */
1117 uint32_t cMsLinkUpDelay;
1118
1119 /** All: Device register storage. */
1120 uint32_t auRegs[E1K_NUM_OF_32BIT_REGS];
1121 /** TX/RX: Status LED. */
1122 PDMLED led;
1123 /** TX/RX: Number of packet being sent/received to show in debug log. */
1124 uint32_t u32PktNo;
1125
1126 /** EMT: Offset of the register to be read via IO. */
1127 uint32_t uSelectedReg;
1128 /** EMT: Multicast Table Array. */
1129 uint32_t auMTA[128];
1130 /** EMT: Receive Address registers. */
1131 E1KRA aRecAddr;
1132 /** EMT: VLAN filter table array. */
1133 uint32_t auVFTA[128];
1134 /** EMT: Receive buffer size. */
1135 uint16_t u16RxBSize;
1136 /** EMT: Locked state -- no state alteration possible. */
1137 bool fLocked;
1138 /** EMT: */
1139 bool fDelayInts;
1140 /** All: */
1141 bool fIntMaskUsed;
1142
1143 /** N/A: */
1144 bool volatile fMaybeOutOfSpace;
1145 /** EMT: Gets signalled when more RX descriptors become available. */
1146 RTSEMEVENT hEventMoreRxDescAvail;
1147#ifdef E1K_WITH_RXD_CACHE
1148 /** RX: Fetched RX descriptors. */
1149 E1KRXDESC aRxDescriptors[E1K_RXD_CACHE_SIZE];
1150 //uint64_t aRxDescAddr[E1K_RXD_CACHE_SIZE];
1151 /** RX: Actual number of fetched RX descriptors. */
1152 uint32_t nRxDFetched;
1153 /** RX: Index in cache of RX descriptor being processed. */
1154 uint32_t iRxDCurrent;
1155#endif /* E1K_WITH_RXD_CACHE */
1156
1157 /** TX: Context used for TCP segmentation packets. */
1158 E1KTXCTX contextTSE;
1159 /** TX: Context used for ordinary packets. */
1160 E1KTXCTX contextNormal;
1161#ifdef E1K_WITH_TXD_CACHE
1162 /** TX: Fetched TX descriptors. */
1163 E1KTXDESC aTxDescriptors[E1K_TXD_CACHE_SIZE];
1164 /** TX: Actual number of fetched TX descriptors. */
1165 uint8_t nTxDFetched;
1166 /** TX: Index in cache of TX descriptor being processed. */
1167 uint8_t iTxDCurrent;
1168 /** TX: Will this frame be sent as GSO. */
1169 bool fGSO;
1170 /** Alignment padding. */
1171 bool fReserved;
1172 /** TX: Number of bytes in next packet. */
1173 uint32_t cbTxAlloc;
1174
1175#endif /* E1K_WITH_TXD_CACHE */
1176 /** GSO context. u8Type is set to PDMNETWORKGSOTYPE_INVALID when not
1177 * applicable to the current TSE mode. */
1178 PDMNETWORKGSO GsoCtx;
1179 /** Scratch space for holding the loopback / fallback scatter / gather
1180 * descriptor. */
1181 union
1182 {
1183 PDMSCATTERGATHER Sg;
1184 uint8_t padding[8 * sizeof(RTUINTPTR)];
1185 } uTxFallback;
1186 /** TX: Transmit packet buffer use for TSE fallback and loopback. */
1187 uint8_t aTxPacketFallback[E1K_MAX_TX_PKT_SIZE];
1188 /** TX: Number of bytes assembled in TX packet buffer. */
1189 uint16_t u16TxPktLen;
1190 /** TX: False will force segmentation in e1000 instead of sending frames as GSO. */
1191 bool fGSOEnabled;
1192 /** TX: IP checksum has to be inserted if true. */
1193 bool fIPcsum;
1194 /** TX: TCP/UDP checksum has to be inserted if true. */
1195 bool fTCPcsum;
1196 /** TX: VLAN tag has to be inserted if true. */
1197 bool fVTag;
1198 /** TX: TCI part of VLAN tag to be inserted. */
1199 uint16_t u16VTagTCI;
1200 /** TX TSE fallback: Number of payload bytes remaining in TSE context. */
1201 uint32_t u32PayRemain;
1202 /** TX TSE fallback: Number of header bytes remaining in TSE context. */
1203 uint16_t u16HdrRemain;
1204 /** TX TSE fallback: Flags from template header. */
1205 uint16_t u16SavedFlags;
1206 /** TX TSE fallback: Partial checksum from template header. */
1207 uint32_t u32SavedCsum;
1208 /** ?: Emulated controller type. */
1209 E1KCHIP eChip;
1210
1211 /** EMT: EEPROM emulation */
1212 E1kEEPROM eeprom;
1213 /** EMT: Physical interface emulation. */
1214 PHY phy;
1215
1216#if 0
1217 /** Alignment padding. */
1218 uint8_t Alignment[HC_ARCH_BITS == 64 ? 8 : 4];
1219#endif
1220
1221 STAMCOUNTER StatReceiveBytes;
1222 STAMCOUNTER StatTransmitBytes;
1223#if defined(VBOX_WITH_STATISTICS)
1224 STAMPROFILEADV StatMMIOReadRZ;
1225 STAMPROFILEADV StatMMIOReadR3;
1226 STAMPROFILEADV StatMMIOWriteRZ;
1227 STAMPROFILEADV StatMMIOWriteR3;
1228 STAMPROFILEADV StatEEPROMRead;
1229 STAMPROFILEADV StatEEPROMWrite;
1230 STAMPROFILEADV StatIOReadRZ;
1231 STAMPROFILEADV StatIOReadR3;
1232 STAMPROFILEADV StatIOWriteRZ;
1233 STAMPROFILEADV StatIOWriteR3;
1234 STAMPROFILEADV StatLateIntTimer;
1235 STAMCOUNTER StatLateInts;
1236 STAMCOUNTER StatIntsRaised;
1237 STAMCOUNTER StatIntsPrevented;
1238 STAMPROFILEADV StatReceive;
1239 STAMPROFILEADV StatReceiveCRC;
1240 STAMPROFILEADV StatReceiveFilter;
1241 STAMPROFILEADV StatReceiveStore;
1242 STAMPROFILEADV StatTransmitRZ;
1243 STAMPROFILEADV StatTransmitR3;
1244 STAMPROFILE StatTransmitSendRZ;
1245 STAMPROFILE StatTransmitSendR3;
1246 STAMPROFILE StatRxOverflow;
1247 STAMCOUNTER StatRxOverflowWakeup;
1248 STAMCOUNTER StatTxDescCtxNormal;
1249 STAMCOUNTER StatTxDescCtxTSE;
1250 STAMCOUNTER StatTxDescLegacy;
1251 STAMCOUNTER StatTxDescData;
1252 STAMCOUNTER StatTxDescTSEData;
1253 STAMCOUNTER StatTxPathFallback;
1254 STAMCOUNTER StatTxPathGSO;
1255 STAMCOUNTER StatTxPathRegular;
1256 STAMCOUNTER StatPHYAccesses;
1257 STAMCOUNTER aStatRegWrites[E1K_NUM_OF_REGS];
1258 STAMCOUNTER aStatRegReads[E1K_NUM_OF_REGS];
1259#endif /* VBOX_WITH_STATISTICS */
1260
1261#ifdef E1K_INT_STATS
1262 /* Internal stats */
1263 uint64_t u64ArmedAt;
1264 uint64_t uStatMaxTxDelay;
1265 uint32_t uStatInt;
1266 uint32_t uStatIntTry;
1267 uint32_t uStatIntLower;
1268 uint32_t uStatNoIntICR;
1269 int32_t iStatIntLost;
1270 int32_t iStatIntLostOne;
1271 uint32_t uStatIntIMS;
1272 uint32_t uStatIntSkip;
1273 uint32_t uStatIntLate;
1274 uint32_t uStatIntMasked;
1275 uint32_t uStatIntEarly;
1276 uint32_t uStatIntRx;
1277 uint32_t uStatIntTx;
1278 uint32_t uStatIntICS;
1279 uint32_t uStatIntRDTR;
1280 uint32_t uStatIntRXDMT0;
1281 uint32_t uStatIntTXQE;
1282 uint32_t uStatTxNoRS;
1283 uint32_t uStatTxIDE;
1284 uint32_t uStatTxDelayed;
1285 uint32_t uStatTxDelayExp;
1286 uint32_t uStatTAD;
1287 uint32_t uStatTID;
1288 uint32_t uStatRAD;
1289 uint32_t uStatRID;
1290 uint32_t uStatRxFrm;
1291 uint32_t uStatTxFrm;
1292 uint32_t uStatDescCtx;
1293 uint32_t uStatDescDat;
1294 uint32_t uStatDescLeg;
1295 uint32_t uStatTx1514;
1296 uint32_t uStatTx2962;
1297 uint32_t uStatTx4410;
1298 uint32_t uStatTx5858;
1299 uint32_t uStatTx7306;
1300 uint32_t uStatTx8754;
1301 uint32_t uStatTx16384;
1302 uint32_t uStatTx32768;
1303 uint32_t uStatTxLarge;
1304 uint32_t uStatAlign;
1305#endif /* E1K_INT_STATS */
1306};
1307typedef struct E1kState_st E1KSTATE;
1308/** Pointer to the E1000 device state. */
1309typedef E1KSTATE *PE1KSTATE;
1310
1311#ifndef VBOX_DEVICE_STRUCT_TESTCASE
1312
1313/* Forward declarations ******************************************************/
1314static int e1kXmitPending(PE1KSTATE pThis, bool fOnWorkerThread);
1315
1316static int e1kRegReadUnimplemented (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1317static int e1kRegWriteUnimplemented(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1318static int e1kRegReadAutoClear (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1319static int e1kRegReadDefault (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1320static int e1kRegWriteDefault (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1321#if 0 /* unused */
1322static int e1kRegReadCTRL (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1323#endif
1324static int e1kRegWriteCTRL (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1325static int e1kRegReadEECD (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1326static int e1kRegWriteEECD (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1327static int e1kRegWriteEERD (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1328static int e1kRegWriteMDIC (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1329static int e1kRegReadICR (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1330static int e1kRegWriteICR (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1331static int e1kRegWriteICS (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1332static int e1kRegWriteIMS (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1333static int e1kRegWriteIMC (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1334static int e1kRegWriteRCTL (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1335static int e1kRegWritePBA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1336static int e1kRegWriteRDT (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1337static int e1kRegWriteRDTR (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1338static int e1kRegWriteTDT (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1339static int e1kRegReadMTA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1340static int e1kRegWriteMTA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1341static int e1kRegReadRA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1342static int e1kRegWriteRA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1343static int e1kRegReadVFTA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1344static int e1kRegWriteVFTA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1345
1346/**
1347 * Register map table.
1348 *
1349 * Override pfnRead and pfnWrite to get register-specific behavior.
1350 */
1351static const struct E1kRegMap_st
1352{
1353 /** Register offset in the register space. */
1354 uint32_t offset;
1355 /** Size in bytes. Registers of size > 4 are in fact tables. */
1356 uint32_t size;
1357 /** Readable bits. */
1358 uint32_t readable;
1359 /** Writable bits. */
1360 uint32_t writable;
1361 /** Read callback. */
1362 int (*pfnRead)(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1363 /** Write callback. */
1364 int (*pfnWrite)(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1365 /** Abbreviated name. */
1366 const char *abbrev;
1367 /** Full name. */
1368 const char *name;
1369} g_aE1kRegMap[E1K_NUM_OF_REGS] =
1370{
1371 /* offset size read mask write mask read callback write callback abbrev full name */
1372 /*------- ------- ---------- ---------- ----------------------- ------------------------ ---------- ------------------------------*/
1373 { 0x00000, 0x00004, 0xDBF31BE9, 0xDBF31BE9, e1kRegReadDefault , e1kRegWriteCTRL , "CTRL" , "Device Control" },
1374 { 0x00008, 0x00004, 0x0000FDFF, 0x00000000, e1kRegReadDefault , e1kRegWriteUnimplemented, "STATUS" , "Device Status" },
1375 { 0x00010, 0x00004, 0x000027F0, 0x00000070, e1kRegReadEECD , e1kRegWriteEECD , "EECD" , "EEPROM/Flash Control/Data" },
1376 { 0x00014, 0x00004, 0xFFFFFF10, 0xFFFFFF00, e1kRegReadDefault , e1kRegWriteEERD , "EERD" , "EEPROM Read" },
1377 { 0x00018, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CTRL_EXT", "Extended Device Control" },
1378 { 0x0001c, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FLA" , "Flash Access (N/A)" },
1379 { 0x00020, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteMDIC , "MDIC" , "MDI Control" },
1380 { 0x00028, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCAL" , "Flow Control Address Low" },
1381 { 0x0002c, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCAH" , "Flow Control Address High" },
1382 { 0x00030, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCT" , "Flow Control Type" },
1383 { 0x00038, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "VET" , "VLAN EtherType" },
1384 { 0x000c0, 0x00004, 0x0001F6DF, 0x0001F6DF, e1kRegReadICR , e1kRegWriteICR , "ICR" , "Interrupt Cause Read" },
1385 { 0x000c4, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "ITR" , "Interrupt Throttling" },
1386 { 0x000c8, 0x00004, 0x00000000, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteICS , "ICS" , "Interrupt Cause Set" },
1387 { 0x000d0, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteIMS , "IMS" , "Interrupt Mask Set/Read" },
1388 { 0x000d8, 0x00004, 0x00000000, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteIMC , "IMC" , "Interrupt Mask Clear" },
1389 { 0x00100, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteRCTL , "RCTL" , "Receive Control" },
1390 { 0x00170, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCTTV" , "Flow Control Transmit Timer Value" },
1391 { 0x00178, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TXCW" , "Transmit Configuration Word (N/A)" },
1392 { 0x00180, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXCW" , "Receive Configuration Word (N/A)" },
1393 { 0x00400, 0x00004, 0x017FFFFA, 0x017FFFFA, e1kRegReadDefault , e1kRegWriteDefault , "TCTL" , "Transmit Control" },
1394 { 0x00410, 0x00004, 0x3FFFFFFF, 0x3FFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TIPG" , "Transmit IPG" },
1395 { 0x00458, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "AIFS" , "Adaptive IFS Throttle - AIT" },
1396 { 0x00e00, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "LEDCTL" , "LED Control" },
1397 { 0x01000, 0x00004, 0xFFFF007F, 0x0000007F, e1kRegReadDefault , e1kRegWritePBA , "PBA" , "Packet Buffer Allocation" },
1398 { 0x02160, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRTL" , "Flow Control Receive Threshold Low" },
1399 { 0x02168, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRTH" , "Flow Control Receive Threshold High" },
1400 { 0x02410, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFH" , "Receive Data FIFO Head" },
1401 { 0x02418, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFT" , "Receive Data FIFO Tail" },
1402 { 0x02420, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFHS" , "Receive Data FIFO Head Saved Register" },
1403 { 0x02428, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFTS" , "Receive Data FIFO Tail Saved Register" },
1404 { 0x02430, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFPC" , "Receive Data FIFO Packet Count" },
1405 { 0x02800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDBAL" , "Receive Descriptor Base Low" },
1406 { 0x02804, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDBAH" , "Receive Descriptor Base High" },
1407 { 0x02808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDLEN" , "Receive Descriptor Length" },
1408 { 0x02810, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDH" , "Receive Descriptor Head" },
1409 { 0x02818, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteRDT , "RDT" , "Receive Descriptor Tail" },
1410 { 0x02820, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteRDTR , "RDTR" , "Receive Delay Timer" },
1411 { 0x02828, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXDCTL" , "Receive Descriptor Control" },
1412 { 0x0282c, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "RADV" , "Receive Interrupt Absolute Delay Timer" },
1413 { 0x02c00, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RSRPD" , "Receive Small Packet Detect Interrupt" },
1414 { 0x03000, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TXDMAC" , "TX DMA Control (N/A)" },
1415 { 0x03410, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFH" , "Transmit Data FIFO Head" },
1416 { 0x03418, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFT" , "Transmit Data FIFO Tail" },
1417 { 0x03420, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFHS" , "Transmit Data FIFO Head Saved Register" },
1418 { 0x03428, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFTS" , "Transmit Data FIFO Tail Saved Register" },
1419 { 0x03430, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFPC" , "Transmit Data FIFO Packet Count" },
1420 { 0x03800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDBAL" , "Transmit Descriptor Base Low" },
1421 { 0x03804, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDBAH" , "Transmit Descriptor Base High" },
1422 { 0x03808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDLEN" , "Transmit Descriptor Length" },
1423 { 0x03810, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDH" , "Transmit Descriptor Head" },
1424 { 0x03818, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteTDT , "TDT" , "Transmit Descriptor Tail" },
1425 { 0x03820, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TIDV" , "Transmit Interrupt Delay Value" },
1426 { 0x03828, 0x00004, 0xFF3F3F3F, 0xFF3F3F3F, e1kRegReadDefault , e1kRegWriteDefault , "TXDCTL" , "Transmit Descriptor Control" },
1427 { 0x0382c, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TADV" , "Transmit Absolute Interrupt Delay Timer" },
1428 { 0x03830, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TSPMT" , "TCP Segmentation Pad and Threshold" },
1429 { 0x04000, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CRCERRS" , "CRC Error Count" },
1430 { 0x04004, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "ALGNERRC", "Alignment Error Count" },
1431 { 0x04008, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SYMERRS" , "Symbol Error Count" },
1432 { 0x0400c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXERRC" , "RX Error Count" },
1433 { 0x04010, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MPC" , "Missed Packets Count" },
1434 { 0x04014, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SCC" , "Single Collision Count" },
1435 { 0x04018, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "ECOL" , "Excessive Collisions Count" },
1436 { 0x0401c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MCC" , "Multiple Collision Count" },
1437 { 0x04020, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "LATECOL" , "Late Collisions Count" },
1438 { 0x04028, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "COLC" , "Collision Count" },
1439 { 0x04030, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "DC" , "Defer Count" },
1440 { 0x04034, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TNCRS" , "Transmit - No CRS" },
1441 { 0x04038, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SEC" , "Sequence Error Count" },
1442 { 0x0403c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CEXTERR" , "Carrier Extension Error Count" },
1443 { 0x04040, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RLEC" , "Receive Length Error Count" },
1444 { 0x04048, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XONRXC" , "XON Received Count" },
1445 { 0x0404c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XONTXC" , "XON Transmitted Count" },
1446 { 0x04050, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XOFFRXC" , "XOFF Received Count" },
1447 { 0x04054, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XOFFTXC" , "XOFF Transmitted Count" },
1448 { 0x04058, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRUC" , "FC Received Unsupported Count" },
1449 { 0x0405c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC64" , "Packets Received (64 Bytes) Count" },
1450 { 0x04060, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC127" , "Packets Received (65-127 Bytes) Count" },
1451 { 0x04064, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC255" , "Packets Received (128-255 Bytes) Count" },
1452 { 0x04068, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC511" , "Packets Received (256-511 Bytes) Count" },
1453 { 0x0406c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC1023" , "Packets Received (512-1023 Bytes) Count" },
1454 { 0x04070, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC1522" , "Packets Received (1024-Max Bytes)" },
1455 { 0x04074, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GPRC" , "Good Packets Received Count" },
1456 { 0x04078, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "BPRC" , "Broadcast Packets Received Count" },
1457 { 0x0407c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "MPRC" , "Multicast Packets Received Count" },
1458 { 0x04080, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GPTC" , "Good Packets Transmitted Count" },
1459 { 0x04088, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GORCL" , "Good Octets Received Count (Low)" },
1460 { 0x0408c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GORCH" , "Good Octets Received Count (Hi)" },
1461 { 0x04090, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GOTCL" , "Good Octets Transmitted Count (Low)" },
1462 { 0x04094, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GOTCH" , "Good Octets Transmitted Count (Hi)" },
1463 { 0x040a0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RNBC" , "Receive No Buffers Count" },
1464 { 0x040a4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RUC" , "Receive Undersize Count" },
1465 { 0x040a8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RFC" , "Receive Fragment Count" },
1466 { 0x040ac, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "ROC" , "Receive Oversize Count" },
1467 { 0x040b0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RJC" , "Receive Jabber Count" },
1468 { 0x040b4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPRC" , "Management Packets Received Count" },
1469 { 0x040b8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPDC" , "Management Packets Dropped Count" },
1470 { 0x040bc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPTC" , "Management Pkts Transmitted Count" },
1471 { 0x040c0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TORL" , "Total Octets Received (Lo)" },
1472 { 0x040c4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TORH" , "Total Octets Received (Hi)" },
1473 { 0x040c8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TOTL" , "Total Octets Transmitted (Lo)" },
1474 { 0x040cc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TOTH" , "Total Octets Transmitted (Hi)" },
1475 { 0x040d0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TPR" , "Total Packets Received" },
1476 { 0x040d4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TPT" , "Total Packets Transmitted" },
1477 { 0x040d8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC64" , "Packets Transmitted (64 Bytes) Count" },
1478 { 0x040dc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC127" , "Packets Transmitted (65-127 Bytes) Count" },
1479 { 0x040e0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC255" , "Packets Transmitted (128-255 Bytes) Count" },
1480 { 0x040e4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC511" , "Packets Transmitted (256-511 Bytes) Count" },
1481 { 0x040e8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC1023" , "Packets Transmitted (512-1023 Bytes) Count" },
1482 { 0x040ec, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC1522" , "Packets Transmitted (1024 Bytes or Greater) Count" },
1483 { 0x040f0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "MPTC" , "Multicast Packets Transmitted Count" },
1484 { 0x040f4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "BPTC" , "Broadcast Packets Transmitted Count" },
1485 { 0x040f8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TSCTC" , "TCP Segmentation Context Transmitted Count" },
1486 { 0x040fc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TSCTFC" , "TCP Segmentation Context Tx Fail Count" },
1487 { 0x05000, 0x00004, 0x000007FF, 0x000007FF, e1kRegReadDefault , e1kRegWriteDefault , "RXCSUM" , "Receive Checksum Control" },
1488 { 0x05800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUC" , "Wakeup Control" },
1489 { 0x05808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUFC" , "Wakeup Filter Control" },
1490 { 0x05810, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUS" , "Wakeup Status" },
1491 { 0x05820, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "MANC" , "Management Control" },
1492 { 0x05838, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IPAV" , "IP Address Valid" },
1493 { 0x05900, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUPL" , "Wakeup Packet Length" },
1494 { 0x05200, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadMTA , e1kRegWriteMTA , "MTA" , "Multicast Table Array (n)" },
1495 { 0x05400, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadRA , e1kRegWriteRA , "RA" , "Receive Address (64-bit) (n)" },
1496 { 0x05600, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadVFTA , e1kRegWriteVFTA , "VFTA" , "VLAN Filter Table Array (n)" },
1497 { 0x05840, 0x0001c, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IP4AT" , "IPv4 Address Table" },
1498 { 0x05880, 0x00010, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IP6AT" , "IPv6 Address Table" },
1499 { 0x05a00, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUPM" , "Wakeup Packet Memory" },
1500 { 0x05f00, 0x0001c, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFLT" , "Flexible Filter Length Table" },
1501 { 0x09000, 0x003fc, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFMT" , "Flexible Filter Mask Table" },
1502 { 0x09800, 0x003fc, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFVT" , "Flexible Filter Value Table" },
1503 { 0x10000, 0x10000, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "PBM" , "Packet Buffer Memory (n)" },
1504 { 0x00040, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadRA , e1kRegWriteRA , "RA82542" , "Receive Address (64-bit) (n) (82542)" },
1505 { 0x00200, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadMTA , e1kRegWriteMTA , "MTA82542", "Multicast Table Array (n) (82542)" },
1506 { 0x00600, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadVFTA , e1kRegWriteVFTA , "VFTA82542", "VLAN Filter Table Array (n) (82542)" }
1507};
1508
1509#ifdef LOG_ENABLED
1510
1511/**
1512 * Convert U32 value to hex string. Masked bytes are replaced with dots.
1513 *
1514 * @remarks The mask has byte (not bit) granularity (e.g. 000000FF).
1515 *
1516 * @returns The buffer.
1517 *
1518 * @param u32 The word to convert into string.
1519 * @param mask Selects which bytes to convert.
1520 * @param buf Where to put the result.
1521 */
1522static char *e1kU32toHex(uint32_t u32, uint32_t mask, char *buf)
1523{
1524 for (char *ptr = buf + 7; ptr >= buf; --ptr, u32 >>=4, mask >>=4)
1525 {
1526 if (mask & 0xF)
1527 *ptr = (u32 & 0xF) + ((u32 & 0xF) > 9 ? '7' : '0');
1528 else
1529 *ptr = '.';
1530 }
1531 buf[8] = 0;
1532 return buf;
1533}
1534
1535/**
1536 * Returns timer name for debug purposes.
1537 *
1538 * @returns The timer name.
1539 *
1540 * @param pThis The device state structure.
1541 * @param pTimer The timer to get the name for.
1542 */
1543DECLINLINE(const char *) e1kGetTimerName(PE1KSTATE pThis, PTMTIMER pTimer)
1544{
1545 if (pTimer == pThis->CTX_SUFF(pTIDTimer))
1546 return "TID";
1547 if (pTimer == pThis->CTX_SUFF(pTADTimer))
1548 return "TAD";
1549 if (pTimer == pThis->CTX_SUFF(pRIDTimer))
1550 return "RID";
1551 if (pTimer == pThis->CTX_SUFF(pRADTimer))
1552 return "RAD";
1553 if (pTimer == pThis->CTX_SUFF(pIntTimer))
1554 return "Int";
1555 if (pTimer == pThis->CTX_SUFF(pTXDTimer))
1556 return "TXD";
1557 if (pTimer == pThis->CTX_SUFF(pLUTimer))
1558 return "LinkUp";
1559 return "unknown";
1560}
1561
1562#endif /* DEBUG */
1563
1564/**
1565 * Arm a timer.
1566 *
1567 * @param pThis Pointer to the device state structure.
1568 * @param pTimer Pointer to the timer.
1569 * @param uExpireIn Expiration interval in microseconds.
1570 */
1571DECLINLINE(void) e1kArmTimer(PE1KSTATE pThis, PTMTIMER pTimer, uint32_t uExpireIn)
1572{
1573 if (pThis->fLocked)
1574 return;
1575
1576 E1kLog2(("%s Arming %s timer to fire in %d usec...\n",
1577 pThis->szPrf, e1kGetTimerName(pThis, pTimer), uExpireIn));
1578 TMTimerSetMicro(pTimer, uExpireIn);
1579}
1580
1581#ifdef IN_RING3
1582/**
1583 * Cancel a timer.
1584 *
1585 * @param pThis Pointer to the device state structure.
1586 * @param pTimer Pointer to the timer.
1587 */
1588DECLINLINE(void) e1kCancelTimer(PE1KSTATE pThis, PTMTIMER pTimer)
1589{
1590 E1kLog2(("%s Stopping %s timer...\n",
1591 pThis->szPrf, e1kGetTimerName(pThis, pTimer)));
1592 int rc = TMTimerStop(pTimer);
1593 if (RT_FAILURE(rc))
1594 E1kLog2(("%s e1kCancelTimer: TMTimerStop() failed with %Rrc\n",
1595 pThis->szPrf, rc));
1596 RT_NOREF1(pThis);
1597}
1598#endif /* IN_RING3 */
1599
1600#define e1kCsEnter(ps, rc) PDMCritSectEnter(&ps->cs, rc)
1601#define e1kCsLeave(ps) PDMCritSectLeave(&ps->cs)
1602
1603#define e1kCsRxEnter(ps, rc) PDMCritSectEnter(&ps->csRx, rc)
1604#define e1kCsRxLeave(ps) PDMCritSectLeave(&ps->csRx)
1605#define e1kCsRxIsOwner(ps) PDMCritSectIsOwner(&ps->csRx)
1606
1607#ifndef E1K_WITH_TX_CS
1608# define e1kCsTxEnter(ps, rc) VINF_SUCCESS
1609# define e1kCsTxLeave(ps) do { } while (0)
1610#else /* E1K_WITH_TX_CS */
1611# define e1kCsTxEnter(ps, rc) PDMCritSectEnter(&ps->csTx, rc)
1612# define e1kCsTxLeave(ps) PDMCritSectLeave(&ps->csTx)
1613#endif /* E1K_WITH_TX_CS */
1614
1615#ifdef IN_RING3
1616
1617/**
1618 * Wakeup the RX thread.
1619 */
1620static void e1kWakeupReceive(PPDMDEVINS pDevIns)
1621{
1622 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
1623 if ( pThis->fMaybeOutOfSpace
1624 && pThis->hEventMoreRxDescAvail != NIL_RTSEMEVENT)
1625 {
1626 STAM_COUNTER_INC(&pThis->StatRxOverflowWakeup);
1627 E1kLog(("%s Waking up Out-of-RX-space semaphore\n", pThis->szPrf));
1628 RTSemEventSignal(pThis->hEventMoreRxDescAvail);
1629 }
1630}
1631
1632/**
1633 * Hardware reset. Revert all registers to initial values.
1634 *
1635 * @param pThis The device state structure.
1636 */
1637static void e1kHardReset(PE1KSTATE pThis)
1638{
1639 E1kLog(("%s Hard reset triggered\n", pThis->szPrf));
1640 memset(pThis->auRegs, 0, sizeof(pThis->auRegs));
1641 memset(pThis->aRecAddr.au32, 0, sizeof(pThis->aRecAddr.au32));
1642#ifdef E1K_INIT_RA0
1643 memcpy(pThis->aRecAddr.au32, pThis->macConfigured.au8,
1644 sizeof(pThis->macConfigured.au8));
1645 pThis->aRecAddr.array[0].ctl |= RA_CTL_AV;
1646#endif /* E1K_INIT_RA0 */
1647 STATUS = 0x0081; /* SPEED=10b (1000 Mb/s), FD=1b (Full Duplex) */
1648 EECD = 0x0100; /* EE_PRES=1b (EEPROM present) */
1649 CTRL = 0x0a09; /* FRCSPD=1b SPEED=10b LRST=1b FD=1b */
1650 TSPMT = 0x01000400;/* TSMT=0400h TSPBP=0100h */
1651 Assert(GET_BITS(RCTL, BSIZE) == 0);
1652 pThis->u16RxBSize = 2048;
1653
1654 /* Reset promiscuous mode */
1655 if (pThis->pDrvR3)
1656 pThis->pDrvR3->pfnSetPromiscuousMode(pThis->pDrvR3, false);
1657
1658#ifdef E1K_WITH_TXD_CACHE
1659 int rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
1660 if (RT_LIKELY(rc == VINF_SUCCESS))
1661 {
1662 pThis->nTxDFetched = 0;
1663 pThis->iTxDCurrent = 0;
1664 pThis->fGSO = false;
1665 pThis->cbTxAlloc = 0;
1666 e1kCsTxLeave(pThis);
1667 }
1668#endif /* E1K_WITH_TXD_CACHE */
1669#ifdef E1K_WITH_RXD_CACHE
1670 if (RT_LIKELY(e1kCsRxEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
1671 {
1672 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
1673 e1kCsRxLeave(pThis);
1674 }
1675#endif /* E1K_WITH_RXD_CACHE */
1676#ifdef E1K_LSC_ON_RESET
1677 E1kLog(("%s Will trigger LSC in %d seconds...\n",
1678 pThis->szPrf, pThis->cMsLinkUpDelay / 1000));
1679 e1kArmTimer(pThis, pThis->CTX_SUFF(pLUTimer), pThis->cMsLinkUpDelay * 1000);
1680#endif /* E1K_LSC_ON_RESET */
1681}
1682
1683#endif /* IN_RING3 */
1684
1685/**
1686 * Compute Internet checksum.
1687 *
1688 * @remarks Refer to http://www.netfor2.com/checksum.html for short intro.
1689 *
1690 * @param pThis The device state structure.
1691 * @param cpPacket The packet.
1692 * @param cb The size of the packet.
1693 * @param pszText A string denoting direction of packet transfer.
1694 *
1695 * @return The 1's complement of the 1's complement sum.
1696 *
1697 * @thread E1000_TX
1698 */
1699static uint16_t e1kCSum16(const void *pvBuf, size_t cb)
1700{
1701 uint32_t csum = 0;
1702 uint16_t *pu16 = (uint16_t *)pvBuf;
1703
1704 while (cb > 1)
1705 {
1706 csum += *pu16++;
1707 cb -= 2;
1708 }
1709 if (cb)
1710 csum += *(uint8_t*)pu16;
1711 while (csum >> 16)
1712 csum = (csum >> 16) + (csum & 0xFFFF);
1713 return ~csum;
1714}
1715
1716/**
1717 * Dump a packet to debug log.
1718 *
1719 * @param pThis The device state structure.
1720 * @param cpPacket The packet.
1721 * @param cb The size of the packet.
1722 * @param pszText A string denoting direction of packet transfer.
1723 * @thread E1000_TX
1724 */
1725DECLINLINE(void) e1kPacketDump(PE1KSTATE pThis, const uint8_t *cpPacket, size_t cb, const char *pszText)
1726{
1727#ifdef DEBUG
1728 if (RT_LIKELY(e1kCsEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
1729 {
1730 Log4(("%s --- %s packet #%d: %RTmac => %RTmac (%d bytes) ---\n",
1731 pThis->szPrf, pszText, ++pThis->u32PktNo, cpPacket+6, cpPacket, cb));
1732 if (ntohs(*(uint16_t*)(cpPacket+12)) == 0x86DD)
1733 {
1734 Log4(("%s --- IPv6: %RTnaipv6 => %RTnaipv6\n",
1735 pThis->szPrf, cpPacket+14+8, cpPacket+14+24));
1736 if (*(cpPacket+14+6) == 0x6)
1737 Log4(("%s --- TCP: seq=%x ack=%x\n", pThis->szPrf,
1738 ntohl(*(uint32_t*)(cpPacket+14+40+4)), ntohl(*(uint32_t*)(cpPacket+14+40+8))));
1739 }
1740 else if (ntohs(*(uint16_t*)(cpPacket+12)) == 0x800)
1741 {
1742 Log4(("%s --- IPv4: %RTnaipv4 => %RTnaipv4\n",
1743 pThis->szPrf, *(uint32_t*)(cpPacket+14+12), *(uint32_t*)(cpPacket+14+16)));
1744 if (*(cpPacket+14+6) == 0x6)
1745 Log4(("%s --- TCP: seq=%x ack=%x\n", pThis->szPrf,
1746 ntohl(*(uint32_t*)(cpPacket+14+20+4)), ntohl(*(uint32_t*)(cpPacket+14+20+8))));
1747 }
1748 E1kLog3(("%.*Rhxd\n", cb, cpPacket));
1749 e1kCsLeave(pThis);
1750 }
1751#else
1752 if (RT_LIKELY(e1kCsEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
1753 {
1754 if (ntohs(*(uint16_t*)(cpPacket+12)) == 0x86DD)
1755 E1kLogRel(("E1000: %s packet #%d, %RTmac => %RTmac, %RTnaipv6 => %RTnaipv6, seq=%x ack=%x\n",
1756 pszText, ++pThis->u32PktNo, cpPacket+6, cpPacket, cpPacket+14+8, cpPacket+14+24,
1757 ntohl(*(uint32_t*)(cpPacket+14+40+4)), ntohl(*(uint32_t*)(cpPacket+14+40+8))));
1758 else
1759 E1kLogRel(("E1000: %s packet #%d, %RTmac => %RTmac, %RTnaipv4 => %RTnaipv4, seq=%x ack=%x\n",
1760 pszText, ++pThis->u32PktNo, cpPacket+6, cpPacket,
1761 *(uint32_t*)(cpPacket+14+12), *(uint32_t*)(cpPacket+14+16),
1762 ntohl(*(uint32_t*)(cpPacket+14+20+4)), ntohl(*(uint32_t*)(cpPacket+14+20+8))));
1763 e1kCsLeave(pThis);
1764 }
1765 RT_NOREF2(cb, pszText);
1766#endif
1767}
1768
1769/**
1770 * Determine the type of transmit descriptor.
1771 *
1772 * @returns Descriptor type. See E1K_DTYP_XXX defines.
1773 *
1774 * @param pDesc Pointer to descriptor union.
1775 * @thread E1000_TX
1776 */
1777DECLINLINE(int) e1kGetDescType(E1KTXDESC *pDesc)
1778{
1779 if (pDesc->legacy.cmd.fDEXT)
1780 return pDesc->context.dw2.u4DTYP;
1781 return E1K_DTYP_LEGACY;
1782}
1783
1784
1785#ifdef E1K_WITH_RXD_CACHE
1786/**
1787 * Return the number of RX descriptor that belong to the hardware.
1788 *
1789 * @returns the number of available descriptors in RX ring.
1790 * @param pThis The device state structure.
1791 * @thread ???
1792 */
1793DECLINLINE(uint32_t) e1kGetRxLen(PE1KSTATE pThis)
1794{
1795 /**
1796 * Make sure RDT won't change during computation. EMT may modify RDT at
1797 * any moment.
1798 */
1799 uint32_t rdt = RDT;
1800 return (RDH > rdt ? RDLEN/sizeof(E1KRXDESC) : 0) + rdt - RDH;
1801}
1802
1803DECLINLINE(unsigned) e1kRxDInCache(PE1KSTATE pThis)
1804{
1805 return pThis->nRxDFetched > pThis->iRxDCurrent ?
1806 pThis->nRxDFetched - pThis->iRxDCurrent : 0;
1807}
1808
1809DECLINLINE(unsigned) e1kRxDIsCacheEmpty(PE1KSTATE pThis)
1810{
1811 return pThis->iRxDCurrent >= pThis->nRxDFetched;
1812}
1813
1814/**
1815 * Load receive descriptors from guest memory. The caller needs to be in Rx
1816 * critical section.
1817 *
1818 * We need two physical reads in case the tail wrapped around the end of RX
1819 * descriptor ring.
1820 *
1821 * @returns the actual number of descriptors fetched.
1822 * @param pThis The device state structure.
1823 * @param pDesc Pointer to descriptor union.
1824 * @param addr Physical address in guest context.
1825 * @thread EMT, RX
1826 */
1827DECLINLINE(unsigned) e1kRxDPrefetch(PE1KSTATE pThis)
1828{
1829 /* We've already loaded pThis->nRxDFetched descriptors past RDH. */
1830 unsigned nDescsAvailable = e1kGetRxLen(pThis) - e1kRxDInCache(pThis);
1831 unsigned nDescsToFetch = RT_MIN(nDescsAvailable, E1K_RXD_CACHE_SIZE - pThis->nRxDFetched);
1832 unsigned nDescsTotal = RDLEN / sizeof(E1KRXDESC);
1833 Assert(nDescsTotal != 0);
1834 if (nDescsTotal == 0)
1835 return 0;
1836 unsigned nFirstNotLoaded = (RDH + e1kRxDInCache(pThis)) % nDescsTotal;
1837 unsigned nDescsInSingleRead = RT_MIN(nDescsToFetch, nDescsTotal - nFirstNotLoaded);
1838 E1kLog3(("%s e1kRxDPrefetch: nDescsAvailable=%u nDescsToFetch=%u "
1839 "nDescsTotal=%u nFirstNotLoaded=0x%x nDescsInSingleRead=%u\n",
1840 pThis->szPrf, nDescsAvailable, nDescsToFetch, nDescsTotal,
1841 nFirstNotLoaded, nDescsInSingleRead));
1842 if (nDescsToFetch == 0)
1843 return 0;
1844 E1KRXDESC* pFirstEmptyDesc = &pThis->aRxDescriptors[pThis->nRxDFetched];
1845 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns),
1846 ((uint64_t)RDBAH << 32) + RDBAL + nFirstNotLoaded * sizeof(E1KRXDESC),
1847 pFirstEmptyDesc, nDescsInSingleRead * sizeof(E1KRXDESC));
1848 // uint64_t addrBase = ((uint64_t)RDBAH << 32) + RDBAL;
1849 // unsigned i, j;
1850 // for (i = pThis->nRxDFetched; i < pThis->nRxDFetched + nDescsInSingleRead; ++i)
1851 // {
1852 // pThis->aRxDescAddr[i] = addrBase + (nFirstNotLoaded + i - pThis->nRxDFetched) * sizeof(E1KRXDESC);
1853 // E1kLog3(("%s aRxDescAddr[%d] = %p\n", pThis->szPrf, i, pThis->aRxDescAddr[i]));
1854 // }
1855 E1kLog3(("%s Fetched %u RX descriptors at %08x%08x(0x%x), RDLEN=%08x, RDH=%08x, RDT=%08x\n",
1856 pThis->szPrf, nDescsInSingleRead,
1857 RDBAH, RDBAL + RDH * sizeof(E1KRXDESC),
1858 nFirstNotLoaded, RDLEN, RDH, RDT));
1859 if (nDescsToFetch > nDescsInSingleRead)
1860 {
1861 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns),
1862 ((uint64_t)RDBAH << 32) + RDBAL,
1863 pFirstEmptyDesc + nDescsInSingleRead,
1864 (nDescsToFetch - nDescsInSingleRead) * sizeof(E1KRXDESC));
1865 // Assert(i == pThis->nRxDFetched + nDescsInSingleRead);
1866 // for (j = 0; i < pThis->nRxDFetched + nDescsToFetch; ++i, ++j)
1867 // {
1868 // pThis->aRxDescAddr[i] = addrBase + j * sizeof(E1KRXDESC);
1869 // E1kLog3(("%s aRxDescAddr[%d] = %p\n", pThis->szPrf, i, pThis->aRxDescAddr[i]));
1870 // }
1871 E1kLog3(("%s Fetched %u RX descriptors at %08x%08x\n",
1872 pThis->szPrf, nDescsToFetch - nDescsInSingleRead,
1873 RDBAH, RDBAL));
1874 }
1875 pThis->nRxDFetched += nDescsToFetch;
1876 return nDescsToFetch;
1877}
1878
1879# ifdef IN_RING3 /* currently only used in ring-3 due to stack space requirements of the caller */
1880/**
1881 * Dump receive descriptor to debug log.
1882 *
1883 * @param pThis The device state structure.
1884 * @param pDesc Pointer to the descriptor.
1885 * @thread E1000_RX
1886 */
1887static void e1kPrintRDesc(PE1KSTATE pThis, E1KRXDESC *pDesc)
1888{
1889 RT_NOREF2(pThis, pDesc);
1890 E1kLog2(("%s <-- Receive Descriptor (%d bytes):\n", pThis->szPrf, pDesc->u16Length));
1891 E1kLog2((" Address=%16LX Length=%04X Csum=%04X\n",
1892 pDesc->u64BufAddr, pDesc->u16Length, pDesc->u16Checksum));
1893 E1kLog2((" STA: %s %s %s %s %s %s %s ERR: %s %s %s %s SPECIAL: %s VLAN=%03x PRI=%x\n",
1894 pDesc->status.fPIF ? "PIF" : "pif",
1895 pDesc->status.fIPCS ? "IPCS" : "ipcs",
1896 pDesc->status.fTCPCS ? "TCPCS" : "tcpcs",
1897 pDesc->status.fVP ? "VP" : "vp",
1898 pDesc->status.fIXSM ? "IXSM" : "ixsm",
1899 pDesc->status.fEOP ? "EOP" : "eop",
1900 pDesc->status.fDD ? "DD" : "dd",
1901 pDesc->status.fRXE ? "RXE" : "rxe",
1902 pDesc->status.fIPE ? "IPE" : "ipe",
1903 pDesc->status.fTCPE ? "TCPE" : "tcpe",
1904 pDesc->status.fCE ? "CE" : "ce",
1905 E1K_SPEC_CFI(pDesc->status.u16Special) ? "CFI" :"cfi",
1906 E1K_SPEC_VLAN(pDesc->status.u16Special),
1907 E1K_SPEC_PRI(pDesc->status.u16Special)));
1908}
1909# endif /* IN_RING3 */
1910#endif /* E1K_WITH_RXD_CACHE */
1911
1912/**
1913 * Dump transmit descriptor to debug log.
1914 *
1915 * @param pThis The device state structure.
1916 * @param pDesc Pointer to descriptor union.
1917 * @param pszDir A string denoting direction of descriptor transfer
1918 * @thread E1000_TX
1919 */
1920static void e1kPrintTDesc(PE1KSTATE pThis, E1KTXDESC *pDesc, const char *pszDir,
1921 unsigned uLevel = RTLOGGRPFLAGS_LEVEL_2)
1922{
1923 RT_NOREF4(pThis, pDesc, pszDir, uLevel);
1924
1925 /*
1926 * Unfortunately we cannot use our format handler here, we want R0 logging
1927 * as well.
1928 */
1929 switch (e1kGetDescType(pDesc))
1930 {
1931 case E1K_DTYP_CONTEXT:
1932 E1kLogX(uLevel, ("%s %s Context Transmit Descriptor %s\n",
1933 pThis->szPrf, pszDir, pszDir));
1934 E1kLogX(uLevel, (" IPCSS=%02X IPCSO=%02X IPCSE=%04X TUCSS=%02X TUCSO=%02X TUCSE=%04X\n",
1935 pDesc->context.ip.u8CSS, pDesc->context.ip.u8CSO, pDesc->context.ip.u16CSE,
1936 pDesc->context.tu.u8CSS, pDesc->context.tu.u8CSO, pDesc->context.tu.u16CSE));
1937 E1kLogX(uLevel, (" TUCMD:%s%s%s %s %s PAYLEN=%04x HDRLEN=%04x MSS=%04x STA: %s\n",
1938 pDesc->context.dw2.fIDE ? " IDE":"",
1939 pDesc->context.dw2.fRS ? " RS" :"",
1940 pDesc->context.dw2.fTSE ? " TSE":"",
1941 pDesc->context.dw2.fIP ? "IPv4":"IPv6",
1942 pDesc->context.dw2.fTCP ? "TCP":"UDP",
1943 pDesc->context.dw2.u20PAYLEN,
1944 pDesc->context.dw3.u8HDRLEN,
1945 pDesc->context.dw3.u16MSS,
1946 pDesc->context.dw3.fDD?"DD":""));
1947 break;
1948 case E1K_DTYP_DATA:
1949 E1kLogX(uLevel, ("%s %s Data Transmit Descriptor (%d bytes) %s\n",
1950 pThis->szPrf, pszDir, pDesc->data.cmd.u20DTALEN, pszDir));
1951 E1kLogX(uLevel, (" Address=%16LX DTALEN=%05X\n",
1952 pDesc->data.u64BufAddr,
1953 pDesc->data.cmd.u20DTALEN));
1954 E1kLogX(uLevel, (" DCMD:%s%s%s%s%s%s%s STA:%s%s%s POPTS:%s%s SPECIAL:%s VLAN=%03x PRI=%x\n",
1955 pDesc->data.cmd.fIDE ? " IDE" :"",
1956 pDesc->data.cmd.fVLE ? " VLE" :"",
1957 pDesc->data.cmd.fRPS ? " RPS" :"",
1958 pDesc->data.cmd.fRS ? " RS" :"",
1959 pDesc->data.cmd.fTSE ? " TSE" :"",
1960 pDesc->data.cmd.fIFCS? " IFCS":"",
1961 pDesc->data.cmd.fEOP ? " EOP" :"",
1962 pDesc->data.dw3.fDD ? " DD" :"",
1963 pDesc->data.dw3.fEC ? " EC" :"",
1964 pDesc->data.dw3.fLC ? " LC" :"",
1965 pDesc->data.dw3.fTXSM? " TXSM":"",
1966 pDesc->data.dw3.fIXSM? " IXSM":"",
1967 E1K_SPEC_CFI(pDesc->data.dw3.u16Special) ? "CFI" :"cfi",
1968 E1K_SPEC_VLAN(pDesc->data.dw3.u16Special),
1969 E1K_SPEC_PRI(pDesc->data.dw3.u16Special)));
1970 break;
1971 case E1K_DTYP_LEGACY:
1972 E1kLogX(uLevel, ("%s %s Legacy Transmit Descriptor (%d bytes) %s\n",
1973 pThis->szPrf, pszDir, pDesc->legacy.cmd.u16Length, pszDir));
1974 E1kLogX(uLevel, (" Address=%16LX DTALEN=%05X\n",
1975 pDesc->data.u64BufAddr,
1976 pDesc->legacy.cmd.u16Length));
1977 E1kLogX(uLevel, (" CMD:%s%s%s%s%s%s%s STA:%s%s%s CSO=%02x CSS=%02x SPECIAL:%s VLAN=%03x PRI=%x\n",
1978 pDesc->legacy.cmd.fIDE ? " IDE" :"",
1979 pDesc->legacy.cmd.fVLE ? " VLE" :"",
1980 pDesc->legacy.cmd.fRPS ? " RPS" :"",
1981 pDesc->legacy.cmd.fRS ? " RS" :"",
1982 pDesc->legacy.cmd.fIC ? " IC" :"",
1983 pDesc->legacy.cmd.fIFCS? " IFCS":"",
1984 pDesc->legacy.cmd.fEOP ? " EOP" :"",
1985 pDesc->legacy.dw3.fDD ? " DD" :"",
1986 pDesc->legacy.dw3.fEC ? " EC" :"",
1987 pDesc->legacy.dw3.fLC ? " LC" :"",
1988 pDesc->legacy.cmd.u8CSO,
1989 pDesc->legacy.dw3.u8CSS,
1990 E1K_SPEC_CFI(pDesc->legacy.dw3.u16Special) ? "CFI" :"cfi",
1991 E1K_SPEC_VLAN(pDesc->legacy.dw3.u16Special),
1992 E1K_SPEC_PRI(pDesc->legacy.dw3.u16Special)));
1993 break;
1994 default:
1995 E1kLog(("%s %s Invalid Transmit Descriptor %s\n",
1996 pThis->szPrf, pszDir, pszDir));
1997 break;
1998 }
1999}
2000
2001/**
2002 * Raise an interrupt later.
2003 *
2004 * @param pThis The device state structure.
2005 */
2006inline void e1kPostponeInterrupt(PE1KSTATE pThis, uint64_t uNanoseconds)
2007{
2008 if (!TMTimerIsActive(pThis->CTX_SUFF(pIntTimer)))
2009 TMTimerSetNano(pThis->CTX_SUFF(pIntTimer), uNanoseconds);
2010}
2011
2012/**
2013 * Raise interrupt if not masked.
2014 *
2015 * @param pThis The device state structure.
2016 */
2017static int e1kRaiseInterrupt(PE1KSTATE pThis, int rcBusy, uint32_t u32IntCause = 0)
2018{
2019 int rc = e1kCsEnter(pThis, rcBusy);
2020 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2021 return rc;
2022
2023 E1K_INC_ISTAT_CNT(pThis->uStatIntTry);
2024 ICR |= u32IntCause;
2025 if (ICR & IMS)
2026 {
2027 if (pThis->fIntRaised)
2028 {
2029 E1K_INC_ISTAT_CNT(pThis->uStatIntSkip);
2030 E1kLog2(("%s e1kRaiseInterrupt: Already raised, skipped. ICR&IMS=%08x\n",
2031 pThis->szPrf, ICR & IMS));
2032 }
2033 else
2034 {
2035 uint64_t tsNow = TMTimerGet(pThis->CTX_SUFF(pIntTimer));
2036 if (!!ITR && tsNow - pThis->u64AckedAt < ITR * 256
2037 && pThis->fItrEnabled && (pThis->fItrRxEnabled || !(ICR & ICR_RXT0)))
2038 {
2039 E1K_INC_ISTAT_CNT(pThis->uStatIntEarly);
2040 E1kLog2(("%s e1kRaiseInterrupt: Too early to raise again: %d ns < %d ns.\n",
2041 pThis->szPrf, (uint32_t)(tsNow - pThis->u64AckedAt), ITR * 256));
2042 e1kPostponeInterrupt(pThis, ITR * 256);
2043 }
2044 else
2045 {
2046
2047 /* Since we are delivering the interrupt now
2048 * there is no need to do it later -- stop the timer.
2049 */
2050 TMTimerStop(pThis->CTX_SUFF(pIntTimer));
2051 E1K_INC_ISTAT_CNT(pThis->uStatInt);
2052 STAM_COUNTER_INC(&pThis->StatIntsRaised);
2053 /* Got at least one unmasked interrupt cause */
2054 pThis->fIntRaised = true;
2055 /* Raise(1) INTA(0) */
2056 E1kLogRel(("E1000: irq RAISED icr&mask=0x%x, icr=0x%x\n", ICR & IMS, ICR));
2057 PDMDevHlpPCISetIrq(pThis->CTX_SUFF(pDevIns), 0, 1);
2058 E1kLog(("%s e1kRaiseInterrupt: Raised. ICR&IMS=%08x\n",
2059 pThis->szPrf, ICR & IMS));
2060 }
2061 }
2062 }
2063 else
2064 {
2065 E1K_INC_ISTAT_CNT(pThis->uStatIntMasked);
2066 E1kLog2(("%s e1kRaiseInterrupt: Not raising, ICR=%08x, IMS=%08x\n",
2067 pThis->szPrf, ICR, IMS));
2068 }
2069 e1kCsLeave(pThis);
2070 return VINF_SUCCESS;
2071}
2072
2073/**
2074 * Compute the physical address of the descriptor.
2075 *
2076 * @returns the physical address of the descriptor.
2077 *
2078 * @param baseHigh High-order 32 bits of descriptor table address.
2079 * @param baseLow Low-order 32 bits of descriptor table address.
2080 * @param idxDesc The descriptor index in the table.
2081 */
2082DECLINLINE(RTGCPHYS) e1kDescAddr(uint32_t baseHigh, uint32_t baseLow, uint32_t idxDesc)
2083{
2084 AssertCompile(sizeof(E1KRXDESC) == sizeof(E1KTXDESC));
2085 return ((uint64_t)baseHigh << 32) + baseLow + idxDesc * sizeof(E1KRXDESC);
2086}
2087
2088#ifdef IN_RING3 /* currently only used in ring-3 due to stack space requirements of the caller */
2089/**
2090 * Advance the head pointer of the receive descriptor queue.
2091 *
2092 * @remarks RDH always points to the next available RX descriptor.
2093 *
2094 * @param pThis The device state structure.
2095 */
2096DECLINLINE(void) e1kAdvanceRDH(PE1KSTATE pThis)
2097{
2098 Assert(e1kCsRxIsOwner(pThis));
2099 //e1kCsEnter(pThis, RT_SRC_POS);
2100 if (++RDH * sizeof(E1KRXDESC) >= RDLEN)
2101 RDH = 0;
2102#ifdef E1K_WITH_RXD_CACHE
2103 /*
2104 * We need to fetch descriptors now as the guest may advance RDT all the way
2105 * to RDH as soon as we generate RXDMT0 interrupt. This is mostly to provide
2106 * compatibility with Phar Lap ETS, see @bugref(7346). Note that we do not
2107 * check if the receiver is enabled. It must be, otherwise we won't get here
2108 * in the first place.
2109 *
2110 * Note that we should have moved both RDH and iRxDCurrent by now.
2111 */
2112 if (e1kRxDIsCacheEmpty(pThis))
2113 {
2114 /* Cache is empty, reset it and check if we can fetch more. */
2115 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
2116 E1kLog3(("%s e1kAdvanceRDH: Rx cache is empty, RDH=%x RDT=%x "
2117 "iRxDCurrent=%x nRxDFetched=%x\n",
2118 pThis->szPrf, RDH, RDT, pThis->iRxDCurrent, pThis->nRxDFetched));
2119 e1kRxDPrefetch(pThis);
2120 }
2121#endif /* E1K_WITH_RXD_CACHE */
2122 /*
2123 * Compute current receive queue length and fire RXDMT0 interrupt
2124 * if we are low on receive buffers
2125 */
2126 uint32_t uRQueueLen = RDH>RDT ? RDLEN/sizeof(E1KRXDESC)-RDH+RDT : RDT-RDH;
2127 /*
2128 * The minimum threshold is controlled by RDMTS bits of RCTL:
2129 * 00 = 1/2 of RDLEN
2130 * 01 = 1/4 of RDLEN
2131 * 10 = 1/8 of RDLEN
2132 * 11 = reserved
2133 */
2134 uint32_t uMinRQThreshold = RDLEN / sizeof(E1KRXDESC) / (2 << GET_BITS(RCTL, RDMTS));
2135 if (uRQueueLen <= uMinRQThreshold)
2136 {
2137 E1kLogRel(("E1000: low on RX descriptors, RDH=%x RDT=%x len=%x threshold=%x\n", RDH, RDT, uRQueueLen, uMinRQThreshold));
2138 E1kLog2(("%s Low on RX descriptors, RDH=%x RDT=%x len=%x threshold=%x, raise an interrupt\n",
2139 pThis->szPrf, RDH, RDT, uRQueueLen, uMinRQThreshold));
2140 E1K_INC_ISTAT_CNT(pThis->uStatIntRXDMT0);
2141 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_RXDMT0);
2142 }
2143 E1kLog2(("%s e1kAdvanceRDH: at exit RDH=%x RDT=%x len=%x\n",
2144 pThis->szPrf, RDH, RDT, uRQueueLen));
2145 //e1kCsLeave(pThis);
2146}
2147#endif /* IN_RING3 */
2148
2149#ifdef E1K_WITH_RXD_CACHE
2150
2151# ifdef IN_RING3 /* currently only used in ring-3 due to stack space requirements of the caller */
2152
2153/**
2154 * Obtain the next RX descriptor from RXD cache, fetching descriptors from the
2155 * RX ring if the cache is empty.
2156 *
2157 * Note that we cannot advance the cache pointer (iRxDCurrent) yet as it will
2158 * go out of sync with RDH which will cause trouble when EMT checks if the
2159 * cache is empty to do pre-fetch @bugref(6217).
2160 *
2161 * @param pThis The device state structure.
2162 * @thread RX
2163 */
2164DECLINLINE(E1KRXDESC*) e1kRxDGet(PE1KSTATE pThis)
2165{
2166 Assert(e1kCsRxIsOwner(pThis));
2167 /* Check the cache first. */
2168 if (pThis->iRxDCurrent < pThis->nRxDFetched)
2169 return &pThis->aRxDescriptors[pThis->iRxDCurrent];
2170 /* Cache is empty, reset it and check if we can fetch more. */
2171 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
2172 if (e1kRxDPrefetch(pThis))
2173 return &pThis->aRxDescriptors[pThis->iRxDCurrent];
2174 /* Out of Rx descriptors. */
2175 return NULL;
2176}
2177
2178
2179/**
2180 * Return the RX descriptor obtained with e1kRxDGet() and advance the cache
2181 * pointer. The descriptor gets written back to the RXD ring.
2182 *
2183 * @param pThis The device state structure.
2184 * @param pDesc The descriptor being "returned" to the RX ring.
2185 * @thread RX
2186 */
2187DECLINLINE(void) e1kRxDPut(PE1KSTATE pThis, E1KRXDESC* pDesc)
2188{
2189 Assert(e1kCsRxIsOwner(pThis));
2190 pThis->iRxDCurrent++;
2191 // Assert(pDesc >= pThis->aRxDescriptors);
2192 // Assert(pDesc < pThis->aRxDescriptors + E1K_RXD_CACHE_SIZE);
2193 // uint64_t addr = e1kDescAddr(RDBAH, RDBAL, RDH);
2194 // uint32_t rdh = RDH;
2195 // Assert(pThis->aRxDescAddr[pDesc - pThis->aRxDescriptors] == addr);
2196 PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns),
2197 e1kDescAddr(RDBAH, RDBAL, RDH),
2198 pDesc, sizeof(E1KRXDESC));
2199 /*
2200 * We need to print the descriptor before advancing RDH as it may fetch new
2201 * descriptors into the cache.
2202 */
2203 e1kPrintRDesc(pThis, pDesc);
2204 e1kAdvanceRDH(pThis);
2205}
2206
2207/**
2208 * Store a fragment of received packet at the specifed address.
2209 *
2210 * @param pThis The device state structure.
2211 * @param pDesc The next available RX descriptor.
2212 * @param pvBuf The fragment.
2213 * @param cb The size of the fragment.
2214 */
2215static DECLCALLBACK(void) e1kStoreRxFragment(PE1KSTATE pThis, E1KRXDESC *pDesc, const void *pvBuf, size_t cb)
2216{
2217 STAM_PROFILE_ADV_START(&pThis->StatReceiveStore, a);
2218 E1kLog2(("%s e1kStoreRxFragment: store fragment of %04X at %016LX, EOP=%d\n",
2219 pThis->szPrf, cb, pDesc->u64BufAddr, pDesc->status.fEOP));
2220 PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns), pDesc->u64BufAddr, pvBuf, cb);
2221 pDesc->u16Length = (uint16_t)cb; Assert(pDesc->u16Length == cb);
2222 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveStore, a);
2223}
2224
2225# endif /* IN_RING3 */
2226
2227#else /* !E1K_WITH_RXD_CACHE */
2228
2229/**
2230 * Store a fragment of received packet that fits into the next available RX
2231 * buffer.
2232 *
2233 * @remarks Trigger the RXT0 interrupt if it is the last fragment of the packet.
2234 *
2235 * @param pThis The device state structure.
2236 * @param pDesc The next available RX descriptor.
2237 * @param pvBuf The fragment.
2238 * @param cb The size of the fragment.
2239 */
2240static DECLCALLBACK(void) e1kStoreRxFragment(PE1KSTATE pThis, E1KRXDESC *pDesc, const void *pvBuf, size_t cb)
2241{
2242 STAM_PROFILE_ADV_START(&pThis->StatReceiveStore, a);
2243 E1kLog2(("%s e1kStoreRxFragment: store fragment of %04X at %016LX, EOP=%d\n", pThis->szPrf, cb, pDesc->u64BufAddr, pDesc->status.fEOP));
2244 PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns), pDesc->u64BufAddr, pvBuf, cb);
2245 pDesc->u16Length = (uint16_t)cb; Assert(pDesc->u16Length == cb);
2246 /* Write back the descriptor */
2247 PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns), e1kDescAddr(RDBAH, RDBAL, RDH), pDesc, sizeof(E1KRXDESC));
2248 e1kPrintRDesc(pThis, pDesc);
2249 E1kLogRel(("E1000: Wrote back RX desc, RDH=%x\n", RDH));
2250 /* Advance head */
2251 e1kAdvanceRDH(pThis);
2252 //E1kLog2(("%s e1kStoreRxFragment: EOP=%d RDTR=%08X RADV=%08X\n", pThis->szPrf, pDesc->fEOP, RDTR, RADV));
2253 if (pDesc->status.fEOP)
2254 {
2255 /* Complete packet has been stored -- it is time to let the guest know. */
2256#ifdef E1K_USE_RX_TIMERS
2257 if (RDTR)
2258 {
2259 /* Arm the timer to fire in RDTR usec (discard .024) */
2260 e1kArmTimer(pThis, pThis->CTX_SUFF(pRIDTimer), RDTR);
2261 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
2262 if (RADV != 0 && !TMTimerIsActive(pThis->CTX_SUFF(pRADTimer)))
2263 e1kArmTimer(pThis, pThis->CTX_SUFF(pRADTimer), RADV);
2264 }
2265 else
2266 {
2267#endif
2268 /* 0 delay means immediate interrupt */
2269 E1K_INC_ISTAT_CNT(pThis->uStatIntRx);
2270 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_RXT0);
2271#ifdef E1K_USE_RX_TIMERS
2272 }
2273#endif
2274 }
2275 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveStore, a);
2276}
2277
2278#endif /* !E1K_WITH_RXD_CACHE */
2279
2280/**
2281 * Returns true if it is a broadcast packet.
2282 *
2283 * @returns true if destination address indicates broadcast.
2284 * @param pvBuf The ethernet packet.
2285 */
2286DECLINLINE(bool) e1kIsBroadcast(const void *pvBuf)
2287{
2288 static const uint8_t s_abBcastAddr[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
2289 return memcmp(pvBuf, s_abBcastAddr, sizeof(s_abBcastAddr)) == 0;
2290}
2291
2292/**
2293 * Returns true if it is a multicast packet.
2294 *
2295 * @remarks returns true for broadcast packets as well.
2296 * @returns true if destination address indicates multicast.
2297 * @param pvBuf The ethernet packet.
2298 */
2299DECLINLINE(bool) e1kIsMulticast(const void *pvBuf)
2300{
2301 return (*(char*)pvBuf) & 1;
2302}
2303
2304#ifdef IN_RING3 /* currently only used in ring-3 due to stack space requirements of the caller */
2305/**
2306 * Set IXSM, IPCS and TCPCS flags according to the packet type.
2307 *
2308 * @remarks We emulate checksum offloading for major packets types only.
2309 *
2310 * @returns VBox status code.
2311 * @param pThis The device state structure.
2312 * @param pFrame The available data.
2313 * @param cb Number of bytes available in the buffer.
2314 * @param status Bit fields containing status info.
2315 */
2316static int e1kRxChecksumOffload(PE1KSTATE pThis, const uint8_t *pFrame, size_t cb, E1KRXDST *pStatus)
2317{
2318 /** @todo
2319 * It is not safe to bypass checksum verification for packets coming
2320 * from real wire. We currently unable to tell where packets are
2321 * coming from so we tell the driver to ignore our checksum flags
2322 * and do verification in software.
2323 */
2324# if 0
2325 uint16_t uEtherType = ntohs(*(uint16_t*)(pFrame + 12));
2326
2327 E1kLog2(("%s e1kRxChecksumOffload: EtherType=%x\n", pThis->szPrf, uEtherType));
2328
2329 switch (uEtherType)
2330 {
2331 case 0x800: /* IPv4 */
2332 {
2333 pStatus->fIXSM = false;
2334 pStatus->fIPCS = true;
2335 PRTNETIPV4 pIpHdr4 = (PRTNETIPV4)(pFrame + 14);
2336 /* TCP/UDP checksum offloading works with TCP and UDP only */
2337 pStatus->fTCPCS = pIpHdr4->ip_p == 6 || pIpHdr4->ip_p == 17;
2338 break;
2339 }
2340 case 0x86DD: /* IPv6 */
2341 pStatus->fIXSM = false;
2342 pStatus->fIPCS = false;
2343 pStatus->fTCPCS = true;
2344 break;
2345 default: /* ARP, VLAN, etc. */
2346 pStatus->fIXSM = true;
2347 break;
2348 }
2349# else
2350 pStatus->fIXSM = true;
2351 RT_NOREF_PV(pThis); RT_NOREF_PV(pFrame); RT_NOREF_PV(cb);
2352# endif
2353 return VINF_SUCCESS;
2354}
2355#endif /* IN_RING3 */
2356
2357/**
2358 * Pad and store received packet.
2359 *
2360 * @remarks Make sure that the packet appears to upper layer as one coming
2361 * from real Ethernet: pad it and insert FCS.
2362 *
2363 * @returns VBox status code.
2364 * @param pThis The device state structure.
2365 * @param pvBuf The available data.
2366 * @param cb Number of bytes available in the buffer.
2367 * @param status Bit fields containing status info.
2368 */
2369static int e1kHandleRxPacket(PE1KSTATE pThis, const void *pvBuf, size_t cb, E1KRXDST status)
2370{
2371#if defined(IN_RING3) /** @todo Remove this extra copying, it's gonna make us run out of kernel / hypervisor stack! */
2372 uint8_t rxPacket[E1K_MAX_RX_PKT_SIZE];
2373 uint8_t *ptr = rxPacket;
2374
2375 int rc = e1kCsRxEnter(pThis, VERR_SEM_BUSY);
2376 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2377 return rc;
2378
2379 if (cb > 70) /* unqualified guess */
2380 pThis->led.Asserted.s.fReading = pThis->led.Actual.s.fReading = 1;
2381
2382 Assert(cb <= E1K_MAX_RX_PKT_SIZE);
2383 Assert(cb > 16);
2384 size_t cbMax = ((RCTL & RCTL_LPE) ? E1K_MAX_RX_PKT_SIZE - 4 : 1518) - (status.fVP ? 0 : 4);
2385 E1kLog3(("%s Max RX packet size is %u\n", pThis->szPrf, cbMax));
2386 if (status.fVP)
2387 {
2388 /* VLAN packet -- strip VLAN tag in VLAN mode */
2389 if ((CTRL & CTRL_VME) && cb > 16)
2390 {
2391 uint16_t *u16Ptr = (uint16_t*)pvBuf;
2392 memcpy(rxPacket, pvBuf, 12); /* Copy src and dst addresses */
2393 status.u16Special = RT_BE2H_U16(u16Ptr[7]); /* Extract VLAN tag */
2394 memcpy(rxPacket + 12, (uint8_t*)pvBuf + 16, cb - 16); /* Copy the rest of the packet */
2395 cb -= 4;
2396 E1kLog3(("%s Stripped tag for VLAN %u (cb=%u)\n",
2397 pThis->szPrf, status.u16Special, cb));
2398 }
2399 else
2400 status.fVP = false; /* Set VP only if we stripped the tag */
2401 }
2402 else
2403 memcpy(rxPacket, pvBuf, cb);
2404 /* Pad short packets */
2405 if (cb < 60)
2406 {
2407 memset(rxPacket + cb, 0, 60 - cb);
2408 cb = 60;
2409 }
2410 if (!(RCTL & RCTL_SECRC) && cb <= cbMax)
2411 {
2412 STAM_PROFILE_ADV_START(&pThis->StatReceiveCRC, a);
2413 /*
2414 * Add FCS if CRC stripping is not enabled. Since the value of CRC
2415 * is ignored by most of drivers we may as well save us the trouble
2416 * of calculating it (see EthernetCRC CFGM parameter).
2417 */
2418 if (pThis->fEthernetCRC)
2419 *(uint32_t*)(rxPacket + cb) = RTCrc32(rxPacket, cb);
2420 cb += sizeof(uint32_t);
2421 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveCRC, a);
2422 E1kLog3(("%s Added FCS (cb=%u)\n", pThis->szPrf, cb));
2423 }
2424 /* Compute checksum of complete packet */
2425 uint16_t checksum = e1kCSum16(rxPacket + GET_BITS(RXCSUM, PCSS), cb);
2426 e1kRxChecksumOffload(pThis, rxPacket, cb, &status);
2427
2428 /* Update stats */
2429 E1K_INC_CNT32(GPRC);
2430 if (e1kIsBroadcast(pvBuf))
2431 E1K_INC_CNT32(BPRC);
2432 else if (e1kIsMulticast(pvBuf))
2433 E1K_INC_CNT32(MPRC);
2434 /* Update octet receive counter */
2435 E1K_ADD_CNT64(GORCL, GORCH, cb);
2436 STAM_REL_COUNTER_ADD(&pThis->StatReceiveBytes, cb);
2437 if (cb == 64)
2438 E1K_INC_CNT32(PRC64);
2439 else if (cb < 128)
2440 E1K_INC_CNT32(PRC127);
2441 else if (cb < 256)
2442 E1K_INC_CNT32(PRC255);
2443 else if (cb < 512)
2444 E1K_INC_CNT32(PRC511);
2445 else if (cb < 1024)
2446 E1K_INC_CNT32(PRC1023);
2447 else
2448 E1K_INC_CNT32(PRC1522);
2449
2450 E1K_INC_ISTAT_CNT(pThis->uStatRxFrm);
2451
2452# ifdef E1K_WITH_RXD_CACHE
2453 while (cb > 0)
2454 {
2455 E1KRXDESC *pDesc = e1kRxDGet(pThis);
2456
2457 if (pDesc == NULL)
2458 {
2459 E1kLog(("%s Out of receive buffers, dropping the packet "
2460 "(cb=%u, in_cache=%u, RDH=%x RDT=%x)\n",
2461 pThis->szPrf, cb, e1kRxDInCache(pThis), RDH, RDT));
2462 break;
2463 }
2464# else /* !E1K_WITH_RXD_CACHE */
2465 if (RDH == RDT)
2466 {
2467 E1kLog(("%s Out of receive buffers, dropping the packet\n",
2468 pThis->szPrf));
2469 }
2470 /* Store the packet to receive buffers */
2471 while (RDH != RDT)
2472 {
2473 /* Load the descriptor pointed by head */
2474 E1KRXDESC desc, *pDesc = &desc;
2475 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), e1kDescAddr(RDBAH, RDBAL, RDH),
2476 &desc, sizeof(desc));
2477# endif /* !E1K_WITH_RXD_CACHE */
2478 if (pDesc->u64BufAddr)
2479 {
2480 /* Update descriptor */
2481 pDesc->status = status;
2482 pDesc->u16Checksum = checksum;
2483 pDesc->status.fDD = true;
2484
2485 /*
2486 * We need to leave Rx critical section here or we risk deadlocking
2487 * with EMT in e1kRegWriteRDT when the write is to an unallocated
2488 * page or has an access handler associated with it.
2489 * Note that it is safe to leave the critical section here since
2490 * e1kRegWriteRDT() never modifies RDH. It never touches already
2491 * fetched RxD cache entries either.
2492 */
2493 if (cb > pThis->u16RxBSize)
2494 {
2495 pDesc->status.fEOP = false;
2496 e1kCsRxLeave(pThis);
2497 e1kStoreRxFragment(pThis, pDesc, ptr, pThis->u16RxBSize);
2498 rc = e1kCsRxEnter(pThis, VERR_SEM_BUSY);
2499 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2500 return rc;
2501 ptr += pThis->u16RxBSize;
2502 cb -= pThis->u16RxBSize;
2503 }
2504 else
2505 {
2506 pDesc->status.fEOP = true;
2507 e1kCsRxLeave(pThis);
2508 e1kStoreRxFragment(pThis, pDesc, ptr, cb);
2509# ifdef E1K_WITH_RXD_CACHE
2510 rc = e1kCsRxEnter(pThis, VERR_SEM_BUSY);
2511 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2512 return rc;
2513 cb = 0;
2514# else /* !E1K_WITH_RXD_CACHE */
2515 pThis->led.Actual.s.fReading = 0;
2516 return VINF_SUCCESS;
2517# endif /* !E1K_WITH_RXD_CACHE */
2518 }
2519 /*
2520 * Note: RDH is advanced by e1kStoreRxFragment if E1K_WITH_RXD_CACHE
2521 * is not defined.
2522 */
2523 }
2524# ifdef E1K_WITH_RXD_CACHE
2525 /* Write back the descriptor. */
2526 pDesc->status.fDD = true;
2527 e1kRxDPut(pThis, pDesc);
2528# else /* !E1K_WITH_RXD_CACHE */
2529 else
2530 {
2531 /* Write back the descriptor. */
2532 pDesc->status.fDD = true;
2533 PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns),
2534 e1kDescAddr(RDBAH, RDBAL, RDH),
2535 pDesc, sizeof(E1KRXDESC));
2536 e1kAdvanceRDH(pThis);
2537 }
2538# endif /* !E1K_WITH_RXD_CACHE */
2539 }
2540
2541 if (cb > 0)
2542 E1kLog(("%s Out of receive buffers, dropping %u bytes", pThis->szPrf, cb));
2543
2544 pThis->led.Actual.s.fReading = 0;
2545
2546 e1kCsRxLeave(pThis);
2547# ifdef E1K_WITH_RXD_CACHE
2548 /* Complete packet has been stored -- it is time to let the guest know. */
2549# ifdef E1K_USE_RX_TIMERS
2550 if (RDTR)
2551 {
2552 /* Arm the timer to fire in RDTR usec (discard .024) */
2553 e1kArmTimer(pThis, pThis->CTX_SUFF(pRIDTimer), RDTR);
2554 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
2555 if (RADV != 0 && !TMTimerIsActive(pThis->CTX_SUFF(pRADTimer)))
2556 e1kArmTimer(pThis, pThis->CTX_SUFF(pRADTimer), RADV);
2557 }
2558 else
2559 {
2560# endif /* E1K_USE_RX_TIMERS */
2561 /* 0 delay means immediate interrupt */
2562 E1K_INC_ISTAT_CNT(pThis->uStatIntRx);
2563 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_RXT0);
2564# ifdef E1K_USE_RX_TIMERS
2565 }
2566# endif /* E1K_USE_RX_TIMERS */
2567# endif /* E1K_WITH_RXD_CACHE */
2568
2569 return VINF_SUCCESS;
2570#else /* !IN_RING3 */
2571 RT_NOREF_PV(pThis); RT_NOREF_PV(pvBuf); RT_NOREF_PV(cb); RT_NOREF_PV(status);
2572 return VERR_INTERNAL_ERROR_2;
2573#endif /* !IN_RING3 */
2574}
2575
2576
2577#ifdef IN_RING3
2578/**
2579 * Bring the link up after the configured delay, 5 seconds by default.
2580 *
2581 * @param pThis The device state structure.
2582 * @thread any
2583 */
2584DECLINLINE(void) e1kBringLinkUpDelayed(PE1KSTATE pThis)
2585{
2586 E1kLog(("%s Will bring up the link in %d seconds...\n",
2587 pThis->szPrf, pThis->cMsLinkUpDelay / 1000));
2588 e1kArmTimer(pThis, pThis->CTX_SUFF(pLUTimer), pThis->cMsLinkUpDelay * 1000);
2589}
2590
2591/**
2592 * Bring up the link immediately.
2593 *
2594 * @param pThis The device state structure.
2595 */
2596DECLINLINE(void) e1kR3LinkUp(PE1KSTATE pThis)
2597{
2598 E1kLog(("%s Link is up\n", pThis->szPrf));
2599 STATUS |= STATUS_LU;
2600 Phy::setLinkStatus(&pThis->phy, true);
2601 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_LSC);
2602 if (pThis->pDrvR3)
2603 pThis->pDrvR3->pfnNotifyLinkChanged(pThis->pDrvR3, PDMNETWORKLINKSTATE_UP);
2604 /* Process pending TX descriptors (see @bugref{8942}) */
2605 PPDMQUEUEITEMCORE pItem = PDMQueueAlloc(pThis->CTX_SUFF(pTxQueue));
2606 if (RT_UNLIKELY(pItem))
2607 PDMQueueInsert(pThis->CTX_SUFF(pTxQueue), pItem);
2608}
2609
2610/**
2611 * Bring down the link immediately.
2612 *
2613 * @param pThis The device state structure.
2614 */
2615DECLINLINE(void) e1kR3LinkDown(PE1KSTATE pThis)
2616{
2617 E1kLog(("%s Link is down\n", pThis->szPrf));
2618 STATUS &= ~STATUS_LU;
2619#ifdef E1K_LSC_ON_RESET
2620 Phy::setLinkStatus(&pThis->phy, false);
2621#endif /* E1K_LSC_ON_RESET */
2622 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_LSC);
2623 if (pThis->pDrvR3)
2624 pThis->pDrvR3->pfnNotifyLinkChanged(pThis->pDrvR3, PDMNETWORKLINKSTATE_DOWN);
2625}
2626
2627/**
2628 * Bring down the link temporarily.
2629 *
2630 * @param pThis The device state structure.
2631 */
2632DECLINLINE(void) e1kR3LinkDownTemp(PE1KSTATE pThis)
2633{
2634 E1kLog(("%s Link is down temporarily\n", pThis->szPrf));
2635 STATUS &= ~STATUS_LU;
2636 Phy::setLinkStatus(&pThis->phy, false);
2637 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_LSC);
2638 /*
2639 * Notifying the associated driver that the link went down (even temporarily)
2640 * seems to be the right thing, but it was not done before. This may cause
2641 * a regression if the driver does not expect the link to go down as a result
2642 * of sending PDMNETWORKLINKSTATE_DOWN_RESUME to this device. Earlier versions
2643 * of code notified the driver that the link was up! See @bugref{7057}.
2644 */
2645 if (pThis->pDrvR3)
2646 pThis->pDrvR3->pfnNotifyLinkChanged(pThis->pDrvR3, PDMNETWORKLINKSTATE_DOWN);
2647 e1kBringLinkUpDelayed(pThis);
2648}
2649#endif /* IN_RING3 */
2650
2651#if 0 /* unused */
2652/**
2653 * Read handler for Device Status register.
2654 *
2655 * Get the link status from PHY.
2656 *
2657 * @returns VBox status code.
2658 *
2659 * @param pThis The device state structure.
2660 * @param offset Register offset in memory-mapped frame.
2661 * @param index Register index in register array.
2662 * @param mask Used to implement partial reads (8 and 16-bit).
2663 */
2664static int e1kRegReadCTRL(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
2665{
2666 E1kLog(("%s e1kRegReadCTRL: mdio dir=%s mdc dir=%s mdc=%d\n",
2667 pThis->szPrf, (CTRL & CTRL_MDIO_DIR)?"OUT":"IN ",
2668 (CTRL & CTRL_MDC_DIR)?"OUT":"IN ", !!(CTRL & CTRL_MDC)));
2669 if ((CTRL & CTRL_MDIO_DIR) == 0 && (CTRL & CTRL_MDC))
2670 {
2671 /* MDC is high and MDIO pin is used for input, read MDIO pin from PHY */
2672 if (Phy::readMDIO(&pThis->phy))
2673 *pu32Value = CTRL | CTRL_MDIO;
2674 else
2675 *pu32Value = CTRL & ~CTRL_MDIO;
2676 E1kLog(("%s e1kRegReadCTRL: Phy::readMDIO(%d)\n",
2677 pThis->szPrf, !!(*pu32Value & CTRL_MDIO)));
2678 }
2679 else
2680 {
2681 /* MDIO pin is used for output, ignore it */
2682 *pu32Value = CTRL;
2683 }
2684 return VINF_SUCCESS;
2685}
2686#endif /* unused */
2687
2688/**
2689 * A callback used by PHY to indicate that the link needs to be updated due to
2690 * reset of PHY.
2691 *
2692 * @param pPhy A pointer to phy member of the device state structure.
2693 * @thread any
2694 */
2695void e1kPhyLinkResetCallback(PPHY pPhy)
2696{
2697 /* PHY is aggregated into e1000, get pThis from pPhy. */
2698 PE1KSTATE pThis = RT_FROM_MEMBER(pPhy, E1KSTATE, phy);
2699 /* Make sure we have cable connected and MAC can talk to PHY */
2700 if (pThis->fCableConnected && (CTRL & CTRL_SLU))
2701 e1kArmTimer(pThis, pThis->CTX_SUFF(pLUTimer), E1K_INIT_LINKUP_DELAY_US);
2702}
2703
2704/**
2705 * Write handler for Device Control register.
2706 *
2707 * Handles reset.
2708 *
2709 * @param pThis The device state structure.
2710 * @param offset Register offset in memory-mapped frame.
2711 * @param index Register index in register array.
2712 * @param value The value to store.
2713 * @param mask Used to implement partial writes (8 and 16-bit).
2714 * @thread EMT
2715 */
2716static int e1kRegWriteCTRL(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2717{
2718 int rc = VINF_SUCCESS;
2719
2720 if (value & CTRL_RESET)
2721 { /* RST */
2722#ifndef IN_RING3
2723 return VINF_IOM_R3_MMIO_WRITE;
2724#else
2725 e1kHardReset(pThis);
2726#endif
2727 }
2728 else
2729 {
2730#ifdef E1K_LSC_ON_SLU
2731 /*
2732 * When the guest changes 'Set Link Up' bit from 0 to 1 we check if
2733 * the link is down and the cable is connected, and if they are we
2734 * bring the link up, see @bugref{8624}.
2735 */
2736 if ( (value & CTRL_SLU)
2737 && !(CTRL & CTRL_SLU)
2738 && pThis->fCableConnected
2739 && !(STATUS & STATUS_LU))
2740 {
2741 /* It should take about 2 seconds for the link to come up */
2742 e1kArmTimer(pThis, pThis->CTX_SUFF(pLUTimer), E1K_INIT_LINKUP_DELAY_US);
2743 }
2744#else /* !E1K_LSC_ON_SLU */
2745 if ( (value & CTRL_SLU)
2746 && !(CTRL & CTRL_SLU)
2747 && pThis->fCableConnected
2748 && !TMTimerIsActive(pThis->CTX_SUFF(pLUTimer)))
2749 {
2750 /* PXE does not use LSC interrupts, see @bugref{9113}. */
2751 STATUS |= STATUS_LU;
2752 }
2753#endif /* !E1K_LSC_ON_SLU */
2754 if ((value & CTRL_VME) != (CTRL & CTRL_VME))
2755 {
2756 E1kLog(("%s VLAN Mode %s\n", pThis->szPrf, (value & CTRL_VME) ? "Enabled" : "Disabled"));
2757 }
2758 Log7(("%s e1kRegWriteCTRL: mdio dir=%s mdc dir=%s mdc=%s mdio=%d\n",
2759 pThis->szPrf, (value & CTRL_MDIO_DIR)?"OUT":"IN ",
2760 (value & CTRL_MDC_DIR)?"OUT":"IN ", (value & CTRL_MDC)?"HIGH":"LOW ", !!(value & CTRL_MDIO)));
2761 if (value & CTRL_MDC)
2762 {
2763 if (value & CTRL_MDIO_DIR)
2764 {
2765 Log7(("%s e1kRegWriteCTRL: Phy::writeMDIO(%d)\n", pThis->szPrf, !!(value & CTRL_MDIO)));
2766 /* MDIO direction pin is set to output and MDC is high, write MDIO pin value to PHY */
2767 Phy::writeMDIO(&pThis->phy, !!(value & CTRL_MDIO));
2768 }
2769 else
2770 {
2771 if (Phy::readMDIO(&pThis->phy))
2772 value |= CTRL_MDIO;
2773 else
2774 value &= ~CTRL_MDIO;
2775 Log7(("%s e1kRegWriteCTRL: Phy::readMDIO(%d)\n", pThis->szPrf, !!(value & CTRL_MDIO)));
2776 }
2777 }
2778 rc = e1kRegWriteDefault(pThis, offset, index, value);
2779 }
2780
2781 return rc;
2782}
2783
2784/**
2785 * Write handler for EEPROM/Flash Control/Data register.
2786 *
2787 * Handles EEPROM access requests; forwards writes to EEPROM device if access has been granted.
2788 *
2789 * @param pThis The device state structure.
2790 * @param offset Register offset in memory-mapped frame.
2791 * @param index Register index in register array.
2792 * @param value The value to store.
2793 * @param mask Used to implement partial writes (8 and 16-bit).
2794 * @thread EMT
2795 */
2796static int e1kRegWriteEECD(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2797{
2798 RT_NOREF(offset, index);
2799#ifdef IN_RING3
2800 /* So far we are concerned with lower byte only */
2801 if ((EECD & EECD_EE_GNT) || pThis->eChip == E1K_CHIP_82543GC)
2802 {
2803 /* Access to EEPROM granted -- forward 4-wire bits to EEPROM device */
2804 /* Note: 82543GC does not need to request EEPROM access */
2805 STAM_PROFILE_ADV_START(&pThis->StatEEPROMWrite, a);
2806 pThis->eeprom.write(value & EECD_EE_WIRES);
2807 STAM_PROFILE_ADV_STOP(&pThis->StatEEPROMWrite, a);
2808 }
2809 if (value & EECD_EE_REQ)
2810 EECD |= EECD_EE_REQ|EECD_EE_GNT;
2811 else
2812 EECD &= ~EECD_EE_GNT;
2813 //e1kRegWriteDefault(pThis, offset, index, value );
2814
2815 return VINF_SUCCESS;
2816#else /* !IN_RING3 */
2817 RT_NOREF(pThis, value);
2818 return VINF_IOM_R3_MMIO_WRITE;
2819#endif /* !IN_RING3 */
2820}
2821
2822/**
2823 * Read handler for EEPROM/Flash Control/Data register.
2824 *
2825 * Lower 4 bits come from EEPROM device if EEPROM access has been granted.
2826 *
2827 * @returns VBox status code.
2828 *
2829 * @param pThis The device state structure.
2830 * @param offset Register offset in memory-mapped frame.
2831 * @param index Register index in register array.
2832 * @param mask Used to implement partial reads (8 and 16-bit).
2833 * @thread EMT
2834 */
2835static int e1kRegReadEECD(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
2836{
2837#ifdef IN_RING3
2838 uint32_t value;
2839 int rc = e1kRegReadDefault(pThis, offset, index, &value);
2840 if (RT_SUCCESS(rc))
2841 {
2842 if ((value & EECD_EE_GNT) || pThis->eChip == E1K_CHIP_82543GC)
2843 {
2844 /* Note: 82543GC does not need to request EEPROM access */
2845 /* Access to EEPROM granted -- get 4-wire bits to EEPROM device */
2846 STAM_PROFILE_ADV_START(&pThis->StatEEPROMRead, a);
2847 value |= pThis->eeprom.read();
2848 STAM_PROFILE_ADV_STOP(&pThis->StatEEPROMRead, a);
2849 }
2850 *pu32Value = value;
2851 }
2852
2853 return rc;
2854#else /* !IN_RING3 */
2855 RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index); RT_NOREF_PV(pu32Value);
2856 return VINF_IOM_R3_MMIO_READ;
2857#endif /* !IN_RING3 */
2858}
2859
2860/**
2861 * Write handler for EEPROM Read register.
2862 *
2863 * Handles EEPROM word access requests, reads EEPROM and stores the result
2864 * into DATA field.
2865 *
2866 * @param pThis The device state structure.
2867 * @param offset Register offset in memory-mapped frame.
2868 * @param index Register index in register array.
2869 * @param value The value to store.
2870 * @param mask Used to implement partial writes (8 and 16-bit).
2871 * @thread EMT
2872 */
2873static int e1kRegWriteEERD(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2874{
2875#ifdef IN_RING3
2876 /* Make use of 'writable' and 'readable' masks. */
2877 e1kRegWriteDefault(pThis, offset, index, value);
2878 /* DONE and DATA are set only if read was triggered by START. */
2879 if (value & EERD_START)
2880 {
2881 uint16_t tmp;
2882 STAM_PROFILE_ADV_START(&pThis->StatEEPROMRead, a);
2883 if (pThis->eeprom.readWord(GET_BITS_V(value, EERD, ADDR), &tmp))
2884 SET_BITS(EERD, DATA, tmp);
2885 EERD |= EERD_DONE;
2886 STAM_PROFILE_ADV_STOP(&pThis->StatEEPROMRead, a);
2887 }
2888
2889 return VINF_SUCCESS;
2890#else /* !IN_RING3 */
2891 RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index); RT_NOREF_PV(value);
2892 return VINF_IOM_R3_MMIO_WRITE;
2893#endif /* !IN_RING3 */
2894}
2895
2896
2897/**
2898 * Write handler for MDI Control register.
2899 *
2900 * Handles PHY read/write requests; forwards requests to internal PHY device.
2901 *
2902 * @param pThis The device state structure.
2903 * @param offset Register offset in memory-mapped frame.
2904 * @param index Register index in register array.
2905 * @param value The value to store.
2906 * @param mask Used to implement partial writes (8 and 16-bit).
2907 * @thread EMT
2908 */
2909static int e1kRegWriteMDIC(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2910{
2911 if (value & MDIC_INT_EN)
2912 {
2913 E1kLog(("%s ERROR! Interrupt at the end of an MDI cycle is not supported yet.\n",
2914 pThis->szPrf));
2915 }
2916 else if (value & MDIC_READY)
2917 {
2918 E1kLog(("%s ERROR! Ready bit is not reset by software during write operation.\n",
2919 pThis->szPrf));
2920 }
2921 else if (GET_BITS_V(value, MDIC, PHY) != 1)
2922 {
2923 E1kLog(("%s WARNING! Access to invalid PHY detected, phy=%d.\n",
2924 pThis->szPrf, GET_BITS_V(value, MDIC, PHY)));
2925 /*
2926 * Some drivers scan the MDIO bus for a PHY. We can work with these
2927 * drivers if we set MDIC_READY and MDIC_ERROR when there isn't a PHY
2928 * at the requested address, see @bugref{7346}.
2929 */
2930 MDIC = MDIC_READY | MDIC_ERROR;
2931 }
2932 else
2933 {
2934 /* Store the value */
2935 e1kRegWriteDefault(pThis, offset, index, value);
2936 STAM_COUNTER_INC(&pThis->StatPHYAccesses);
2937 /* Forward op to PHY */
2938 if (value & MDIC_OP_READ)
2939 SET_BITS(MDIC, DATA, Phy::readRegister(&pThis->phy, GET_BITS_V(value, MDIC, REG)));
2940 else
2941 Phy::writeRegister(&pThis->phy, GET_BITS_V(value, MDIC, REG), value & MDIC_DATA_MASK);
2942 /* Let software know that we are done */
2943 MDIC |= MDIC_READY;
2944 }
2945
2946 return VINF_SUCCESS;
2947}
2948
2949/**
2950 * Write handler for Interrupt Cause Read register.
2951 *
2952 * Bits corresponding to 1s in 'value' will be cleared in ICR register.
2953 *
2954 * @param pThis The device state structure.
2955 * @param offset Register offset in memory-mapped frame.
2956 * @param index Register index in register array.
2957 * @param value The value to store.
2958 * @param mask Used to implement partial writes (8 and 16-bit).
2959 * @thread EMT
2960 */
2961static int e1kRegWriteICR(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2962{
2963 ICR &= ~value;
2964
2965 RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index);
2966 return VINF_SUCCESS;
2967}
2968
2969/**
2970 * Read handler for Interrupt Cause Read register.
2971 *
2972 * Reading this register acknowledges all interrupts.
2973 *
2974 * @returns VBox status code.
2975 *
2976 * @param pThis The device state structure.
2977 * @param offset Register offset in memory-mapped frame.
2978 * @param index Register index in register array.
2979 * @param mask Not used.
2980 * @thread EMT
2981 */
2982static int e1kRegReadICR(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
2983{
2984 int rc = e1kCsEnter(pThis, VINF_IOM_R3_MMIO_READ);
2985 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2986 return rc;
2987
2988 uint32_t value = 0;
2989 rc = e1kRegReadDefault(pThis, offset, index, &value);
2990 if (RT_SUCCESS(rc))
2991 {
2992 if (value)
2993 {
2994 if (!pThis->fIntRaised)
2995 E1K_INC_ISTAT_CNT(pThis->uStatNoIntICR);
2996 /*
2997 * Not clearing ICR causes QNX to hang as it reads ICR in a loop
2998 * with disabled interrupts.
2999 */
3000 //if (IMS)
3001 if (1)
3002 {
3003 /*
3004 * Interrupts were enabled -- we are supposedly at the very
3005 * beginning of interrupt handler
3006 */
3007 E1kLogRel(("E1000: irq lowered, icr=0x%x\n", ICR));
3008 E1kLog(("%s e1kRegReadICR: Lowered IRQ (%08x)\n", pThis->szPrf, ICR));
3009 /* Clear all pending interrupts */
3010 ICR = 0;
3011 pThis->fIntRaised = false;
3012 /* Lower(0) INTA(0) */
3013 PDMDevHlpPCISetIrq(pThis->CTX_SUFF(pDevIns), 0, 0);
3014
3015 pThis->u64AckedAt = TMTimerGet(pThis->CTX_SUFF(pIntTimer));
3016 if (pThis->fIntMaskUsed)
3017 pThis->fDelayInts = true;
3018 }
3019 else
3020 {
3021 /*
3022 * Interrupts are disabled -- in windows guests ICR read is done
3023 * just before re-enabling interrupts
3024 */
3025 E1kLog(("%s e1kRegReadICR: Suppressing auto-clear due to disabled interrupts (%08x)\n", pThis->szPrf, ICR));
3026 }
3027 }
3028 *pu32Value = value;
3029 }
3030 e1kCsLeave(pThis);
3031
3032 return rc;
3033}
3034
3035/**
3036 * Write handler for Interrupt Cause Set register.
3037 *
3038 * Bits corresponding to 1s in 'value' will be set in ICR register.
3039 *
3040 * @param pThis The device state structure.
3041 * @param offset Register offset in memory-mapped frame.
3042 * @param index Register index in register array.
3043 * @param value The value to store.
3044 * @param mask Used to implement partial writes (8 and 16-bit).
3045 * @thread EMT
3046 */
3047static int e1kRegWriteICS(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3048{
3049 RT_NOREF_PV(offset); RT_NOREF_PV(index);
3050 E1K_INC_ISTAT_CNT(pThis->uStatIntICS);
3051 return e1kRaiseInterrupt(pThis, VINF_IOM_R3_MMIO_WRITE, value & g_aE1kRegMap[ICS_IDX].writable);
3052}
3053
3054/**
3055 * Write handler for Interrupt Mask Set register.
3056 *
3057 * Will trigger pending interrupts.
3058 *
3059 * @param pThis The device state structure.
3060 * @param offset Register offset in memory-mapped frame.
3061 * @param index Register index in register array.
3062 * @param value The value to store.
3063 * @param mask Used to implement partial writes (8 and 16-bit).
3064 * @thread EMT
3065 */
3066static int e1kRegWriteIMS(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3067{
3068 RT_NOREF_PV(offset); RT_NOREF_PV(index);
3069
3070 IMS |= value;
3071 E1kLogRel(("E1000: irq enabled, RDH=%x RDT=%x TDH=%x TDT=%x\n", RDH, RDT, TDH, TDT));
3072 E1kLog(("%s e1kRegWriteIMS: IRQ enabled\n", pThis->szPrf));
3073 /*
3074 * We cannot raise an interrupt here as it will occasionally cause an interrupt storm
3075 * in Windows guests (see @bugref{8624}, @bugref{5023}).
3076 */
3077 if ((ICR & IMS) && !pThis->fLocked)
3078 {
3079 E1K_INC_ISTAT_CNT(pThis->uStatIntIMS);
3080 e1kPostponeInterrupt(pThis, E1K_IMS_INT_DELAY_NS);
3081 }
3082
3083 return VINF_SUCCESS;
3084}
3085
3086/**
3087 * Write handler for Interrupt Mask Clear register.
3088 *
3089 * Bits corresponding to 1s in 'value' will be cleared in IMS register.
3090 *
3091 * @param pThis The device state structure.
3092 * @param offset Register offset in memory-mapped frame.
3093 * @param index Register index in register array.
3094 * @param value The value to store.
3095 * @param mask Used to implement partial writes (8 and 16-bit).
3096 * @thread EMT
3097 */
3098static int e1kRegWriteIMC(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3099{
3100 RT_NOREF_PV(offset); RT_NOREF_PV(index);
3101
3102 int rc = e1kCsEnter(pThis, VINF_IOM_R3_MMIO_WRITE);
3103 if (RT_UNLIKELY(rc != VINF_SUCCESS))
3104 return rc;
3105 if (pThis->fIntRaised)
3106 {
3107 /*
3108 * Technically we should reset fIntRaised in ICR read handler, but it will cause
3109 * Windows to freeze since it may receive an interrupt while still in the very beginning
3110 * of interrupt handler.
3111 */
3112 E1K_INC_ISTAT_CNT(pThis->uStatIntLower);
3113 STAM_COUNTER_INC(&pThis->StatIntsPrevented);
3114 E1kLogRel(("E1000: irq lowered (IMC), icr=0x%x\n", ICR));
3115 /* Lower(0) INTA(0) */
3116 PDMDevHlpPCISetIrq(pThis->CTX_SUFF(pDevIns), 0, 0);
3117 pThis->fIntRaised = false;
3118 E1kLog(("%s e1kRegWriteIMC: Lowered IRQ: ICR=%08x\n", pThis->szPrf, ICR));
3119 }
3120 IMS &= ~value;
3121 E1kLog(("%s e1kRegWriteIMC: IRQ disabled\n", pThis->szPrf));
3122 e1kCsLeave(pThis);
3123
3124 return VINF_SUCCESS;
3125}
3126
3127/**
3128 * Write handler for Receive Control register.
3129 *
3130 * @param pThis The device state structure.
3131 * @param offset Register offset in memory-mapped frame.
3132 * @param index Register index in register array.
3133 * @param value The value to store.
3134 * @param mask Used to implement partial writes (8 and 16-bit).
3135 * @thread EMT
3136 */
3137static int e1kRegWriteRCTL(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3138{
3139 /* Update promiscuous mode */
3140 bool fBecomePromiscous = !!(value & (RCTL_UPE | RCTL_MPE));
3141 if (fBecomePromiscous != !!( RCTL & (RCTL_UPE | RCTL_MPE)))
3142 {
3143 /* Promiscuity has changed, pass the knowledge on. */
3144#ifndef IN_RING3
3145 return VINF_IOM_R3_MMIO_WRITE;
3146#else
3147 if (pThis->pDrvR3)
3148 pThis->pDrvR3->pfnSetPromiscuousMode(pThis->pDrvR3, fBecomePromiscous);
3149#endif
3150 }
3151
3152 /* Adjust receive buffer size */
3153 unsigned cbRxBuf = 2048 >> GET_BITS_V(value, RCTL, BSIZE);
3154 if (value & RCTL_BSEX)
3155 cbRxBuf *= 16;
3156 if (cbRxBuf != pThis->u16RxBSize)
3157 E1kLog2(("%s e1kRegWriteRCTL: Setting receive buffer size to %d (old %d)\n",
3158 pThis->szPrf, cbRxBuf, pThis->u16RxBSize));
3159 pThis->u16RxBSize = cbRxBuf;
3160
3161 /* Update the register */
3162 e1kRegWriteDefault(pThis, offset, index, value);
3163
3164 return VINF_SUCCESS;
3165}
3166
3167/**
3168 * Write handler for Packet Buffer Allocation register.
3169 *
3170 * TXA = 64 - RXA.
3171 *
3172 * @param pThis The device state structure.
3173 * @param offset Register offset in memory-mapped frame.
3174 * @param index Register index in register array.
3175 * @param value The value to store.
3176 * @param mask Used to implement partial writes (8 and 16-bit).
3177 * @thread EMT
3178 */
3179static int e1kRegWritePBA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3180{
3181 e1kRegWriteDefault(pThis, offset, index, value);
3182 PBA_st->txa = 64 - PBA_st->rxa;
3183
3184 return VINF_SUCCESS;
3185}
3186
3187/**
3188 * Write handler for Receive Descriptor Tail register.
3189 *
3190 * @remarks Write into RDT forces switch to HC and signal to
3191 * e1kR3NetworkDown_WaitReceiveAvail().
3192 *
3193 * @returns VBox status code.
3194 *
3195 * @param pThis The device state structure.
3196 * @param offset Register offset in memory-mapped frame.
3197 * @param index Register index in register array.
3198 * @param value The value to store.
3199 * @param mask Used to implement partial writes (8 and 16-bit).
3200 * @thread EMT
3201 */
3202static int e1kRegWriteRDT(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3203{
3204#ifndef IN_RING3
3205 /* XXX */
3206// return VINF_IOM_R3_MMIO_WRITE;
3207#endif
3208 int rc = e1kCsRxEnter(pThis, VINF_IOM_R3_MMIO_WRITE);
3209 if (RT_LIKELY(rc == VINF_SUCCESS))
3210 {
3211 E1kLog(("%s e1kRegWriteRDT\n", pThis->szPrf));
3212#ifndef E1K_WITH_RXD_CACHE
3213 /*
3214 * Some drivers advance RDT too far, so that it equals RDH. This
3215 * somehow manages to work with real hardware but not with this
3216 * emulated device. We can work with these drivers if we just
3217 * write 1 less when we see a driver writing RDT equal to RDH,
3218 * see @bugref{7346}.
3219 */
3220 if (value == RDH)
3221 {
3222 if (RDH == 0)
3223 value = (RDLEN / sizeof(E1KRXDESC)) - 1;
3224 else
3225 value = RDH - 1;
3226 }
3227#endif /* !E1K_WITH_RXD_CACHE */
3228 rc = e1kRegWriteDefault(pThis, offset, index, value);
3229#ifdef E1K_WITH_RXD_CACHE
3230 /*
3231 * We need to fetch descriptors now as RDT may go whole circle
3232 * before we attempt to store a received packet. For example,
3233 * Intel's DOS drivers use 2 (!) RX descriptors with the total ring
3234 * size being only 8 descriptors! Note that we fetch descriptors
3235 * only when the cache is empty to reduce the number of memory reads
3236 * in case of frequent RDT writes. Don't fetch anything when the
3237 * receiver is disabled either as RDH, RDT, RDLEN can be in some
3238 * messed up state.
3239 * Note that despite the cache may seem empty, meaning that there are
3240 * no more available descriptors in it, it may still be used by RX
3241 * thread which has not yet written the last descriptor back but has
3242 * temporarily released the RX lock in order to write the packet body
3243 * to descriptor's buffer. At this point we still going to do prefetch
3244 * but it won't actually fetch anything if there are no unused slots in
3245 * our "empty" cache (nRxDFetched==E1K_RXD_CACHE_SIZE). We must not
3246 * reset the cache here even if it appears empty. It will be reset at
3247 * a later point in e1kRxDGet().
3248 */
3249 if (e1kRxDIsCacheEmpty(pThis) && (RCTL & RCTL_EN))
3250 e1kRxDPrefetch(pThis);
3251#endif /* E1K_WITH_RXD_CACHE */
3252 e1kCsRxLeave(pThis);
3253 if (RT_SUCCESS(rc))
3254 {
3255/** @todo bird: Use SUPSem* for this so we can signal it in ring-0 as well
3256 * without requiring any context switches. We should also check the
3257 * wait condition before bothering to queue the item as we're currently
3258 * queuing thousands of items per second here in a normal transmit
3259 * scenario. Expect performance changes when fixing this! */
3260#ifdef IN_RING3
3261 /* Signal that we have more receive descriptors available. */
3262 e1kWakeupReceive(pThis->CTX_SUFF(pDevIns));
3263#else
3264 PPDMQUEUEITEMCORE pItem = PDMQueueAlloc(pThis->CTX_SUFF(pCanRxQueue));
3265 if (pItem)
3266 PDMQueueInsert(pThis->CTX_SUFF(pCanRxQueue), pItem);
3267#endif
3268 }
3269 }
3270 return rc;
3271}
3272
3273/**
3274 * Write handler for Receive Delay Timer register.
3275 *
3276 * @param pThis The device state structure.
3277 * @param offset Register offset in memory-mapped frame.
3278 * @param index Register index in register array.
3279 * @param value The value to store.
3280 * @param mask Used to implement partial writes (8 and 16-bit).
3281 * @thread EMT
3282 */
3283static int e1kRegWriteRDTR(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3284{
3285 e1kRegWriteDefault(pThis, offset, index, value);
3286 if (value & RDTR_FPD)
3287 {
3288 /* Flush requested, cancel both timers and raise interrupt */
3289#ifdef E1K_USE_RX_TIMERS
3290 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRIDTimer));
3291 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRADTimer));
3292#endif
3293 E1K_INC_ISTAT_CNT(pThis->uStatIntRDTR);
3294 return e1kRaiseInterrupt(pThis, VINF_IOM_R3_MMIO_WRITE, ICR_RXT0);
3295 }
3296
3297 return VINF_SUCCESS;
3298}
3299
3300DECLINLINE(uint32_t) e1kGetTxLen(PE1KSTATE pThis)
3301{
3302 /**
3303 * Make sure TDT won't change during computation. EMT may modify TDT at
3304 * any moment.
3305 */
3306 uint32_t tdt = TDT;
3307 return (TDH>tdt ? TDLEN/sizeof(E1KTXDESC) : 0) + tdt - TDH;
3308}
3309
3310#ifdef IN_RING3
3311
3312# ifdef E1K_TX_DELAY
3313/**
3314 * Transmit Delay Timer handler.
3315 *
3316 * @remarks We only get here when the timer expires.
3317 *
3318 * @param pDevIns Pointer to device instance structure.
3319 * @param pTimer Pointer to the timer.
3320 * @param pvUser NULL.
3321 * @thread EMT
3322 */
3323static DECLCALLBACK(void) e1kTxDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3324{
3325 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3326 Assert(PDMCritSectIsOwner(&pThis->csTx));
3327
3328 E1K_INC_ISTAT_CNT(pThis->uStatTxDelayExp);
3329# ifdef E1K_INT_STATS
3330 uint64_t u64Elapsed = RTTimeNanoTS() - pThis->u64ArmedAt;
3331 if (u64Elapsed > pThis->uStatMaxTxDelay)
3332 pThis->uStatMaxTxDelay = u64Elapsed;
3333# endif
3334 int rc = e1kXmitPending(pThis, false /*fOnWorkerThread*/);
3335 AssertMsg(RT_SUCCESS(rc) || rc == VERR_TRY_AGAIN, ("%Rrc\n", rc));
3336}
3337# endif /* E1K_TX_DELAY */
3338
3339//# ifdef E1K_USE_TX_TIMERS
3340
3341/**
3342 * Transmit Interrupt Delay Timer handler.
3343 *
3344 * @remarks We only get here when the timer expires.
3345 *
3346 * @param pDevIns Pointer to device instance structure.
3347 * @param pTimer Pointer to the timer.
3348 * @param pvUser NULL.
3349 * @thread EMT
3350 */
3351static DECLCALLBACK(void) e1kTxIntDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3352{
3353 RT_NOREF(pDevIns);
3354 RT_NOREF(pTimer);
3355 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3356
3357 E1K_INC_ISTAT_CNT(pThis->uStatTID);
3358 /* Cancel absolute delay timer as we have already got attention */
3359# ifndef E1K_NO_TAD
3360 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTADTimer));
3361# endif
3362 e1kRaiseInterrupt(pThis, ICR_TXDW);
3363}
3364
3365/**
3366 * Transmit Absolute Delay Timer handler.
3367 *
3368 * @remarks We only get here when the timer expires.
3369 *
3370 * @param pDevIns Pointer to device instance structure.
3371 * @param pTimer Pointer to the timer.
3372 * @param pvUser NULL.
3373 * @thread EMT
3374 */
3375static DECLCALLBACK(void) e1kTxAbsDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3376{
3377 RT_NOREF(pDevIns);
3378 RT_NOREF(pTimer);
3379 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3380
3381 E1K_INC_ISTAT_CNT(pThis->uStatTAD);
3382 /* Cancel interrupt delay timer as we have already got attention */
3383 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTIDTimer));
3384 e1kRaiseInterrupt(pThis, ICR_TXDW);
3385}
3386
3387//# endif /* E1K_USE_TX_TIMERS */
3388# ifdef E1K_USE_RX_TIMERS
3389
3390/**
3391 * Receive Interrupt Delay Timer handler.
3392 *
3393 * @remarks We only get here when the timer expires.
3394 *
3395 * @param pDevIns Pointer to device instance structure.
3396 * @param pTimer Pointer to the timer.
3397 * @param pvUser NULL.
3398 * @thread EMT
3399 */
3400static DECLCALLBACK(void) e1kRxIntDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3401{
3402 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3403
3404 E1K_INC_ISTAT_CNT(pThis->uStatRID);
3405 /* Cancel absolute delay timer as we have already got attention */
3406 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRADTimer));
3407 e1kRaiseInterrupt(pThis, ICR_RXT0);
3408}
3409
3410/**
3411 * Receive Absolute Delay Timer handler.
3412 *
3413 * @remarks We only get here when the timer expires.
3414 *
3415 * @param pDevIns Pointer to device instance structure.
3416 * @param pTimer Pointer to the timer.
3417 * @param pvUser NULL.
3418 * @thread EMT
3419 */
3420static DECLCALLBACK(void) e1kRxAbsDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3421{
3422 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3423
3424 E1K_INC_ISTAT_CNT(pThis->uStatRAD);
3425 /* Cancel interrupt delay timer as we have already got attention */
3426 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRIDTimer));
3427 e1kRaiseInterrupt(pThis, ICR_RXT0);
3428}
3429
3430# endif /* E1K_USE_RX_TIMERS */
3431
3432/**
3433 * Late Interrupt Timer handler.
3434 *
3435 * @param pDevIns Pointer to device instance structure.
3436 * @param pTimer Pointer to the timer.
3437 * @param pvUser NULL.
3438 * @thread EMT
3439 */
3440static DECLCALLBACK(void) e1kLateIntTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3441{
3442 RT_NOREF(pDevIns, pTimer);
3443 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3444
3445 STAM_PROFILE_ADV_START(&pThis->StatLateIntTimer, a);
3446 STAM_COUNTER_INC(&pThis->StatLateInts);
3447 E1K_INC_ISTAT_CNT(pThis->uStatIntLate);
3448# if 0
3449 if (pThis->iStatIntLost > -100)
3450 pThis->iStatIntLost--;
3451# endif
3452 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, 0);
3453 STAM_PROFILE_ADV_STOP(&pThis->StatLateIntTimer, a);
3454}
3455
3456/**
3457 * Link Up Timer handler.
3458 *
3459 * @param pDevIns Pointer to device instance structure.
3460 * @param pTimer Pointer to the timer.
3461 * @param pvUser NULL.
3462 * @thread EMT
3463 */
3464static DECLCALLBACK(void) e1kLinkUpTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3465{
3466 RT_NOREF(pDevIns, pTimer);
3467 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3468
3469 /*
3470 * This can happen if we set the link status to down when the Link up timer was
3471 * already armed (shortly after e1kLoadDone() or when the cable was disconnected
3472 * and connect+disconnect the cable very quick. Moreover, 82543GC triggers LSC
3473 * on reset even if the cable is unplugged (see @bugref{8942}).
3474 */
3475 if (pThis->fCableConnected)
3476 {
3477 /* 82543GC does not have an internal PHY */
3478 if (pThis->eChip == E1K_CHIP_82543GC || (CTRL & CTRL_SLU))
3479 e1kR3LinkUp(pThis);
3480 }
3481#ifdef E1K_LSC_ON_RESET
3482 else if (pThis->eChip == E1K_CHIP_82543GC)
3483 e1kR3LinkDown(pThis);
3484#endif /* E1K_LSC_ON_RESET */
3485}
3486
3487#endif /* IN_RING3 */
3488
3489/**
3490 * Sets up the GSO context according to the TSE new context descriptor.
3491 *
3492 * @param pGso The GSO context to setup.
3493 * @param pCtx The context descriptor.
3494 */
3495DECLINLINE(void) e1kSetupGsoCtx(PPDMNETWORKGSO pGso, E1KTXCTX const *pCtx)
3496{
3497 pGso->u8Type = PDMNETWORKGSOTYPE_INVALID;
3498
3499 /*
3500 * See if the context descriptor describes something that could be TCP or
3501 * UDP over IPv[46].
3502 */
3503 /* Check the header ordering and spacing: 1. Ethernet, 2. IP, 3. TCP/UDP. */
3504 if (RT_UNLIKELY( pCtx->ip.u8CSS < sizeof(RTNETETHERHDR) ))
3505 {
3506 E1kLog(("e1kSetupGsoCtx: IPCSS=%#x\n", pCtx->ip.u8CSS));
3507 return;
3508 }
3509 if (RT_UNLIKELY( pCtx->tu.u8CSS < (size_t)pCtx->ip.u8CSS + (pCtx->dw2.fIP ? RTNETIPV4_MIN_LEN : RTNETIPV6_MIN_LEN) ))
3510 {
3511 E1kLog(("e1kSetupGsoCtx: TUCSS=%#x\n", pCtx->tu.u8CSS));
3512 return;
3513 }
3514 if (RT_UNLIKELY( pCtx->dw2.fTCP
3515 ? pCtx->dw3.u8HDRLEN < (size_t)pCtx->tu.u8CSS + RTNETTCP_MIN_LEN
3516 : pCtx->dw3.u8HDRLEN != (size_t)pCtx->tu.u8CSS + RTNETUDP_MIN_LEN ))
3517 {
3518 E1kLog(("e1kSetupGsoCtx: HDRLEN=%#x TCP=%d\n", pCtx->dw3.u8HDRLEN, pCtx->dw2.fTCP));
3519 return;
3520 }
3521
3522 /* The end of the TCP/UDP checksum should stop at the end of the packet or at least after the headers. */
3523 if (RT_UNLIKELY( pCtx->tu.u16CSE > 0 && pCtx->tu.u16CSE <= pCtx->dw3.u8HDRLEN ))
3524 {
3525 E1kLog(("e1kSetupGsoCtx: TUCSE=%#x HDRLEN=%#x\n", pCtx->tu.u16CSE, pCtx->dw3.u8HDRLEN));
3526 return;
3527 }
3528
3529 /* IPv4 checksum offset. */
3530 if (RT_UNLIKELY( pCtx->dw2.fIP && (size_t)pCtx->ip.u8CSO - pCtx->ip.u8CSS != RT_UOFFSETOF(RTNETIPV4, ip_sum) ))
3531 {
3532 E1kLog(("e1kSetupGsoCtx: IPCSO=%#x IPCSS=%#x\n", pCtx->ip.u8CSO, pCtx->ip.u8CSS));
3533 return;
3534 }
3535
3536 /* TCP/UDP checksum offsets. */
3537 if (RT_UNLIKELY( (size_t)pCtx->tu.u8CSO - pCtx->tu.u8CSS
3538 != ( pCtx->dw2.fTCP
3539 ? RT_UOFFSETOF(RTNETTCP, th_sum)
3540 : RT_UOFFSETOF(RTNETUDP, uh_sum) ) ))
3541 {
3542 E1kLog(("e1kSetupGsoCtx: TUCSO=%#x TUCSS=%#x TCP=%d\n", pCtx->ip.u8CSO, pCtx->ip.u8CSS, pCtx->dw2.fTCP));
3543 return;
3544 }
3545
3546 /*
3547 * Because of internal networking using a 16-bit size field for GSO context
3548 * plus frame, we have to make sure we don't exceed this.
3549 */
3550 if (RT_UNLIKELY( pCtx->dw3.u8HDRLEN + pCtx->dw2.u20PAYLEN > VBOX_MAX_GSO_SIZE ))
3551 {
3552 E1kLog(("e1kSetupGsoCtx: HDRLEN(=%#x) + PAYLEN(=%#x) = %#x, max is %#x\n",
3553 pCtx->dw3.u8HDRLEN, pCtx->dw2.u20PAYLEN, pCtx->dw3.u8HDRLEN + pCtx->dw2.u20PAYLEN, VBOX_MAX_GSO_SIZE));
3554 return;
3555 }
3556
3557 /*
3558 * We're good for now - we'll do more checks when seeing the data.
3559 * So, figure the type of offloading and setup the context.
3560 */
3561 if (pCtx->dw2.fIP)
3562 {
3563 if (pCtx->dw2.fTCP)
3564 {
3565 pGso->u8Type = PDMNETWORKGSOTYPE_IPV4_TCP;
3566 pGso->cbHdrsSeg = pCtx->dw3.u8HDRLEN;
3567 }
3568 else
3569 {
3570 pGso->u8Type = PDMNETWORKGSOTYPE_IPV4_UDP;
3571 pGso->cbHdrsSeg = pCtx->tu.u8CSS; /* IP header only */
3572 }
3573 /** @todo Detect IPv4-IPv6 tunneling (need test setup since linux doesn't do
3574 * this yet it seems)... */
3575 }
3576 else
3577 {
3578 pGso->cbHdrsSeg = pCtx->dw3.u8HDRLEN; /** @todo IPv6 UFO */
3579 if (pCtx->dw2.fTCP)
3580 pGso->u8Type = PDMNETWORKGSOTYPE_IPV6_TCP;
3581 else
3582 pGso->u8Type = PDMNETWORKGSOTYPE_IPV6_UDP;
3583 }
3584 pGso->offHdr1 = pCtx->ip.u8CSS;
3585 pGso->offHdr2 = pCtx->tu.u8CSS;
3586 pGso->cbHdrsTotal = pCtx->dw3.u8HDRLEN;
3587 pGso->cbMaxSeg = pCtx->dw3.u16MSS;
3588 Assert(PDMNetGsoIsValid(pGso, sizeof(*pGso), pGso->cbMaxSeg * 5));
3589 E1kLog2(("e1kSetupGsoCtx: mss=%#x hdr=%#x hdrseg=%#x hdr1=%#x hdr2=%#x %s\n",
3590 pGso->cbMaxSeg, pGso->cbHdrsTotal, pGso->cbHdrsSeg, pGso->offHdr1, pGso->offHdr2, PDMNetGsoTypeName((PDMNETWORKGSOTYPE)pGso->u8Type) ));
3591}
3592
3593/**
3594 * Checks if we can use GSO processing for the current TSE frame.
3595 *
3596 * @param pThis The device state structure.
3597 * @param pGso The GSO context.
3598 * @param pData The first data descriptor of the frame.
3599 * @param pCtx The TSO context descriptor.
3600 */
3601DECLINLINE(bool) e1kCanDoGso(PE1KSTATE pThis, PCPDMNETWORKGSO pGso, E1KTXDAT const *pData, E1KTXCTX const *pCtx)
3602{
3603 if (!pData->cmd.fTSE)
3604 {
3605 E1kLog2(("e1kCanDoGso: !TSE\n"));
3606 return false;
3607 }
3608 if (pData->cmd.fVLE) /** @todo VLAN tagging. */
3609 {
3610 E1kLog(("e1kCanDoGso: VLE\n"));
3611 return false;
3612 }
3613 if (RT_UNLIKELY(!pThis->fGSOEnabled))
3614 {
3615 E1kLog3(("e1kCanDoGso: GSO disabled via CFGM\n"));
3616 return false;
3617 }
3618
3619 switch ((PDMNETWORKGSOTYPE)pGso->u8Type)
3620 {
3621 case PDMNETWORKGSOTYPE_IPV4_TCP:
3622 case PDMNETWORKGSOTYPE_IPV4_UDP:
3623 if (!pData->dw3.fIXSM)
3624 {
3625 E1kLog(("e1kCanDoGso: !IXSM (IPv4)\n"));
3626 return false;
3627 }
3628 if (!pData->dw3.fTXSM)
3629 {
3630 E1kLog(("e1kCanDoGso: !TXSM (IPv4)\n"));
3631 return false;
3632 }
3633 /** @todo what more check should we perform here? Ethernet frame type? */
3634 E1kLog2(("e1kCanDoGso: OK, IPv4\n"));
3635 return true;
3636
3637 case PDMNETWORKGSOTYPE_IPV6_TCP:
3638 case PDMNETWORKGSOTYPE_IPV6_UDP:
3639 if (pData->dw3.fIXSM && pCtx->ip.u8CSO)
3640 {
3641 E1kLog(("e1kCanDoGso: IXSM (IPv6)\n"));
3642 return false;
3643 }
3644 if (!pData->dw3.fTXSM)
3645 {
3646 E1kLog(("e1kCanDoGso: TXSM (IPv6)\n"));
3647 return false;
3648 }
3649 /** @todo what more check should we perform here? Ethernet frame type? */
3650 E1kLog2(("e1kCanDoGso: OK, IPv4\n"));
3651 return true;
3652
3653 default:
3654 Assert(pGso->u8Type == PDMNETWORKGSOTYPE_INVALID);
3655 E1kLog2(("e1kCanDoGso: e1kSetupGsoCtx failed\n"));
3656 return false;
3657 }
3658}
3659
3660/**
3661 * Frees the current xmit buffer.
3662 *
3663 * @param pThis The device state structure.
3664 */
3665static void e1kXmitFreeBuf(PE1KSTATE pThis)
3666{
3667 PPDMSCATTERGATHER pSg = pThis->CTX_SUFF(pTxSg);
3668 if (pSg)
3669 {
3670 pThis->CTX_SUFF(pTxSg) = NULL;
3671
3672 if (pSg->pvAllocator != pThis)
3673 {
3674 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
3675 if (pDrv)
3676 pDrv->pfnFreeBuf(pDrv, pSg);
3677 }
3678 else
3679 {
3680 /* loopback */
3681 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3682 Assert(pSg->fFlags == (PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3));
3683 pSg->fFlags = 0;
3684 pSg->pvAllocator = NULL;
3685 }
3686 }
3687}
3688
3689#ifndef E1K_WITH_TXD_CACHE
3690/**
3691 * Allocates an xmit buffer.
3692 *
3693 * @returns See PDMINETWORKUP::pfnAllocBuf.
3694 * @param pThis The device state structure.
3695 * @param cbMin The minimum frame size.
3696 * @param fExactSize Whether cbMin is exact or if we have to max it
3697 * out to the max MTU size.
3698 * @param fGso Whether this is a GSO frame or not.
3699 */
3700DECLINLINE(int) e1kXmitAllocBuf(PE1KSTATE pThis, size_t cbMin, bool fExactSize, bool fGso)
3701{
3702 /* Adjust cbMin if necessary. */
3703 if (!fExactSize)
3704 cbMin = RT_MAX(cbMin, E1K_MAX_TX_PKT_SIZE);
3705
3706 /* Deal with existing buffer (descriptor screw up, reset, etc). */
3707 if (RT_UNLIKELY(pThis->CTX_SUFF(pTxSg)))
3708 e1kXmitFreeBuf(pThis);
3709 Assert(pThis->CTX_SUFF(pTxSg) == NULL);
3710
3711 /*
3712 * Allocate the buffer.
3713 */
3714 PPDMSCATTERGATHER pSg;
3715 if (RT_LIKELY(GET_BITS(RCTL, LBM) != RCTL_LBM_TCVR))
3716 {
3717 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
3718 if (RT_UNLIKELY(!pDrv))
3719 return VERR_NET_DOWN;
3720 int rc = pDrv->pfnAllocBuf(pDrv, cbMin, fGso ? &pThis->GsoCtx : NULL, &pSg);
3721 if (RT_FAILURE(rc))
3722 {
3723 /* Suspend TX as we are out of buffers atm */
3724 STATUS |= STATUS_TXOFF;
3725 return rc;
3726 }
3727 }
3728 else
3729 {
3730 /* Create a loopback using the fallback buffer and preallocated SG. */
3731 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3732 pSg = &pThis->uTxFallback.Sg;
3733 pSg->fFlags = PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3;
3734 pSg->cbUsed = 0;
3735 pSg->cbAvailable = 0;
3736 pSg->pvAllocator = pThis;
3737 pSg->pvUser = NULL; /* No GSO here. */
3738 pSg->cSegs = 1;
3739 pSg->aSegs[0].pvSeg = pThis->aTxPacketFallback;
3740 pSg->aSegs[0].cbSeg = sizeof(pThis->aTxPacketFallback);
3741 }
3742
3743 pThis->CTX_SUFF(pTxSg) = pSg;
3744 return VINF_SUCCESS;
3745}
3746#else /* E1K_WITH_TXD_CACHE */
3747/**
3748 * Allocates an xmit buffer.
3749 *
3750 * @returns See PDMINETWORKUP::pfnAllocBuf.
3751 * @param pThis The device state structure.
3752 * @param cbMin The minimum frame size.
3753 * @param fExactSize Whether cbMin is exact or if we have to max it
3754 * out to the max MTU size.
3755 * @param fGso Whether this is a GSO frame or not.
3756 */
3757DECLINLINE(int) e1kXmitAllocBuf(PE1KSTATE pThis, bool fGso)
3758{
3759 /* Deal with existing buffer (descriptor screw up, reset, etc). */
3760 if (RT_UNLIKELY(pThis->CTX_SUFF(pTxSg)))
3761 e1kXmitFreeBuf(pThis);
3762 Assert(pThis->CTX_SUFF(pTxSg) == NULL);
3763
3764 /*
3765 * Allocate the buffer.
3766 */
3767 PPDMSCATTERGATHER pSg;
3768 if (RT_LIKELY(GET_BITS(RCTL, LBM) != RCTL_LBM_TCVR))
3769 {
3770 if (pThis->cbTxAlloc == 0)
3771 {
3772 /* Zero packet, no need for the buffer */
3773 return VINF_SUCCESS;
3774 }
3775
3776 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
3777 if (RT_UNLIKELY(!pDrv))
3778 return VERR_NET_DOWN;
3779 int rc = pDrv->pfnAllocBuf(pDrv, pThis->cbTxAlloc, fGso ? &pThis->GsoCtx : NULL, &pSg);
3780 if (RT_FAILURE(rc))
3781 {
3782 /* Suspend TX as we are out of buffers atm */
3783 STATUS |= STATUS_TXOFF;
3784 return rc;
3785 }
3786 E1kLog3(("%s Allocated buffer for TX packet: cb=%u %s%s\n",
3787 pThis->szPrf, pThis->cbTxAlloc,
3788 pThis->fVTag ? "VLAN " : "",
3789 pThis->fGSO ? "GSO " : ""));
3790 }
3791 else
3792 {
3793 /* Create a loopback using the fallback buffer and preallocated SG. */
3794 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3795 pSg = &pThis->uTxFallback.Sg;
3796 pSg->fFlags = PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3;
3797 pSg->cbUsed = 0;
3798 pSg->cbAvailable = 0;
3799 pSg->pvAllocator = pThis;
3800 pSg->pvUser = NULL; /* No GSO here. */
3801 pSg->cSegs = 1;
3802 pSg->aSegs[0].pvSeg = pThis->aTxPacketFallback;
3803 pSg->aSegs[0].cbSeg = sizeof(pThis->aTxPacketFallback);
3804 }
3805 pThis->cbTxAlloc = 0;
3806
3807 pThis->CTX_SUFF(pTxSg) = pSg;
3808 return VINF_SUCCESS;
3809}
3810#endif /* E1K_WITH_TXD_CACHE */
3811
3812/**
3813 * Checks if it's a GSO buffer or not.
3814 *
3815 * @returns true / false.
3816 * @param pTxSg The scatter / gather buffer.
3817 */
3818DECLINLINE(bool) e1kXmitIsGsoBuf(PDMSCATTERGATHER const *pTxSg)
3819{
3820#if 0
3821 if (!pTxSg)
3822 E1kLog(("e1kXmitIsGsoBuf: pTxSG is NULL\n"));
3823 if (pTxSg && pTxSg->pvUser)
3824 E1kLog(("e1kXmitIsGsoBuf: pvUser is NULL\n"));
3825#endif
3826 return pTxSg && pTxSg->pvUser /* GSO indicator */;
3827}
3828
3829#ifndef E1K_WITH_TXD_CACHE
3830/**
3831 * Load transmit descriptor from guest memory.
3832 *
3833 * @param pThis The device state structure.
3834 * @param pDesc Pointer to descriptor union.
3835 * @param addr Physical address in guest context.
3836 * @thread E1000_TX
3837 */
3838DECLINLINE(void) e1kLoadDesc(PE1KSTATE pThis, E1KTXDESC *pDesc, RTGCPHYS addr)
3839{
3840 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), addr, pDesc, sizeof(E1KTXDESC));
3841}
3842#else /* E1K_WITH_TXD_CACHE */
3843/**
3844 * Load transmit descriptors from guest memory.
3845 *
3846 * We need two physical reads in case the tail wrapped around the end of TX
3847 * descriptor ring.
3848 *
3849 * @returns the actual number of descriptors fetched.
3850 * @param pThis The device state structure.
3851 * @param pDesc Pointer to descriptor union.
3852 * @param addr Physical address in guest context.
3853 * @thread E1000_TX
3854 */
3855DECLINLINE(unsigned) e1kTxDLoadMore(PE1KSTATE pThis)
3856{
3857 Assert(pThis->iTxDCurrent == 0);
3858 /* We've already loaded pThis->nTxDFetched descriptors past TDH. */
3859 unsigned nDescsAvailable = e1kGetTxLen(pThis) - pThis->nTxDFetched;
3860 unsigned nDescsToFetch = RT_MIN(nDescsAvailable, E1K_TXD_CACHE_SIZE - pThis->nTxDFetched);
3861 unsigned nDescsTotal = TDLEN / sizeof(E1KTXDESC);
3862 unsigned nFirstNotLoaded = (TDH + pThis->nTxDFetched) % nDescsTotal;
3863 unsigned nDescsInSingleRead = RT_MIN(nDescsToFetch, nDescsTotal - nFirstNotLoaded);
3864 E1kLog3(("%s e1kTxDLoadMore: nDescsAvailable=%u nDescsToFetch=%u "
3865 "nDescsTotal=%u nFirstNotLoaded=0x%x nDescsInSingleRead=%u\n",
3866 pThis->szPrf, nDescsAvailable, nDescsToFetch, nDescsTotal,
3867 nFirstNotLoaded, nDescsInSingleRead));
3868 if (nDescsToFetch == 0)
3869 return 0;
3870 E1KTXDESC* pFirstEmptyDesc = &pThis->aTxDescriptors[pThis->nTxDFetched];
3871 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns),
3872 ((uint64_t)TDBAH << 32) + TDBAL + nFirstNotLoaded * sizeof(E1KTXDESC),
3873 pFirstEmptyDesc, nDescsInSingleRead * sizeof(E1KTXDESC));
3874 E1kLog3(("%s Fetched %u TX descriptors at %08x%08x(0x%x), TDLEN=%08x, TDH=%08x, TDT=%08x\n",
3875 pThis->szPrf, nDescsInSingleRead,
3876 TDBAH, TDBAL + TDH * sizeof(E1KTXDESC),
3877 nFirstNotLoaded, TDLEN, TDH, TDT));
3878 if (nDescsToFetch > nDescsInSingleRead)
3879 {
3880 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns),
3881 ((uint64_t)TDBAH << 32) + TDBAL,
3882 pFirstEmptyDesc + nDescsInSingleRead,
3883 (nDescsToFetch - nDescsInSingleRead) * sizeof(E1KTXDESC));
3884 E1kLog3(("%s Fetched %u TX descriptors at %08x%08x\n",
3885 pThis->szPrf, nDescsToFetch - nDescsInSingleRead,
3886 TDBAH, TDBAL));
3887 }
3888 pThis->nTxDFetched += nDescsToFetch;
3889 return nDescsToFetch;
3890}
3891
3892/**
3893 * Load transmit descriptors from guest memory only if there are no loaded
3894 * descriptors.
3895 *
3896 * @returns true if there are descriptors in cache.
3897 * @param pThis The device state structure.
3898 * @param pDesc Pointer to descriptor union.
3899 * @param addr Physical address in guest context.
3900 * @thread E1000_TX
3901 */
3902DECLINLINE(bool) e1kTxDLazyLoad(PE1KSTATE pThis)
3903{
3904 if (pThis->nTxDFetched == 0)
3905 return e1kTxDLoadMore(pThis) != 0;
3906 return true;
3907}
3908#endif /* E1K_WITH_TXD_CACHE */
3909
3910/**
3911 * Write back transmit descriptor to guest memory.
3912 *
3913 * @param pThis The device state structure.
3914 * @param pDesc Pointer to descriptor union.
3915 * @param addr Physical address in guest context.
3916 * @thread E1000_TX
3917 */
3918DECLINLINE(void) e1kWriteBackDesc(PE1KSTATE pThis, E1KTXDESC *pDesc, RTGCPHYS addr)
3919{
3920 /* Only the last half of the descriptor has to be written back. */
3921 e1kPrintTDesc(pThis, pDesc, "^^^");
3922 PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns), addr, pDesc, sizeof(E1KTXDESC));
3923}
3924
3925/**
3926 * Transmit complete frame.
3927 *
3928 * @remarks We skip the FCS since we're not responsible for sending anything to
3929 * a real ethernet wire.
3930 *
3931 * @param pThis The device state structure.
3932 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
3933 * @thread E1000_TX
3934 */
3935static void e1kTransmitFrame(PE1KSTATE pThis, bool fOnWorkerThread)
3936{
3937 PPDMSCATTERGATHER pSg = pThis->CTX_SUFF(pTxSg);
3938 uint32_t cbFrame = pSg ? (uint32_t)pSg->cbUsed : 0;
3939 Assert(!pSg || pSg->cSegs == 1);
3940
3941 if (cbFrame > 70) /* unqualified guess */
3942 pThis->led.Asserted.s.fWriting = pThis->led.Actual.s.fWriting = 1;
3943
3944#ifdef E1K_INT_STATS
3945 if (cbFrame <= 1514)
3946 E1K_INC_ISTAT_CNT(pThis->uStatTx1514);
3947 else if (cbFrame <= 2962)
3948 E1K_INC_ISTAT_CNT(pThis->uStatTx2962);
3949 else if (cbFrame <= 4410)
3950 E1K_INC_ISTAT_CNT(pThis->uStatTx4410);
3951 else if (cbFrame <= 5858)
3952 E1K_INC_ISTAT_CNT(pThis->uStatTx5858);
3953 else if (cbFrame <= 7306)
3954 E1K_INC_ISTAT_CNT(pThis->uStatTx7306);
3955 else if (cbFrame <= 8754)
3956 E1K_INC_ISTAT_CNT(pThis->uStatTx8754);
3957 else if (cbFrame <= 16384)
3958 E1K_INC_ISTAT_CNT(pThis->uStatTx16384);
3959 else if (cbFrame <= 32768)
3960 E1K_INC_ISTAT_CNT(pThis->uStatTx32768);
3961 else
3962 E1K_INC_ISTAT_CNT(pThis->uStatTxLarge);
3963#endif /* E1K_INT_STATS */
3964
3965 /* Add VLAN tag */
3966 if (cbFrame > 12 && pThis->fVTag)
3967 {
3968 E1kLog3(("%s Inserting VLAN tag %08x\n",
3969 pThis->szPrf, RT_BE2H_U16(VET) | (RT_BE2H_U16(pThis->u16VTagTCI) << 16)));
3970 memmove((uint8_t*)pSg->aSegs[0].pvSeg + 16, (uint8_t*)pSg->aSegs[0].pvSeg + 12, cbFrame - 12);
3971 *((uint32_t*)pSg->aSegs[0].pvSeg + 3) = RT_BE2H_U16(VET) | (RT_BE2H_U16(pThis->u16VTagTCI) << 16);
3972 pSg->cbUsed += 4;
3973 cbFrame += 4;
3974 Assert(pSg->cbUsed == cbFrame);
3975 Assert(pSg->cbUsed <= pSg->cbAvailable);
3976 }
3977/* E1kLog2(("%s < < < Outgoing packet. Dump follows: > > >\n"
3978 "%.*Rhxd\n"
3979 "%s < < < < < < < < < < < < < End of dump > > > > > > > > > > > >\n",
3980 pThis->szPrf, cbFrame, pSg->aSegs[0].pvSeg, pThis->szPrf));*/
3981
3982 /* Update the stats */
3983 E1K_INC_CNT32(TPT);
3984 E1K_ADD_CNT64(TOTL, TOTH, cbFrame);
3985 E1K_INC_CNT32(GPTC);
3986 if (pSg && e1kIsBroadcast(pSg->aSegs[0].pvSeg))
3987 E1K_INC_CNT32(BPTC);
3988 else if (pSg && e1kIsMulticast(pSg->aSegs[0].pvSeg))
3989 E1K_INC_CNT32(MPTC);
3990 /* Update octet transmit counter */
3991 E1K_ADD_CNT64(GOTCL, GOTCH, cbFrame);
3992 if (pThis->CTX_SUFF(pDrv))
3993 STAM_REL_COUNTER_ADD(&pThis->StatTransmitBytes, cbFrame);
3994 if (cbFrame == 64)
3995 E1K_INC_CNT32(PTC64);
3996 else if (cbFrame < 128)
3997 E1K_INC_CNT32(PTC127);
3998 else if (cbFrame < 256)
3999 E1K_INC_CNT32(PTC255);
4000 else if (cbFrame < 512)
4001 E1K_INC_CNT32(PTC511);
4002 else if (cbFrame < 1024)
4003 E1K_INC_CNT32(PTC1023);
4004 else
4005 E1K_INC_CNT32(PTC1522);
4006
4007 E1K_INC_ISTAT_CNT(pThis->uStatTxFrm);
4008
4009 /*
4010 * Dump and send the packet.
4011 */
4012 int rc = VERR_NET_DOWN;
4013 if (pSg && pSg->pvAllocator != pThis)
4014 {
4015 e1kPacketDump(pThis, (uint8_t const *)pSg->aSegs[0].pvSeg, cbFrame, "--> Outgoing");
4016
4017 pThis->CTX_SUFF(pTxSg) = NULL;
4018 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
4019 if (pDrv)
4020 {
4021 /* Release critical section to avoid deadlock in CanReceive */
4022 //e1kCsLeave(pThis);
4023 STAM_PROFILE_START(&pThis->CTX_SUFF_Z(StatTransmitSend), a);
4024 rc = pDrv->pfnSendBuf(pDrv, pSg, fOnWorkerThread);
4025 STAM_PROFILE_STOP(&pThis->CTX_SUFF_Z(StatTransmitSend), a);
4026 //e1kCsEnter(pThis, RT_SRC_POS);
4027 }
4028 }
4029 else if (pSg)
4030 {
4031 Assert(pSg->aSegs[0].pvSeg == pThis->aTxPacketFallback);
4032 e1kPacketDump(pThis, (uint8_t const *)pSg->aSegs[0].pvSeg, cbFrame, "--> Loopback");
4033
4034 /** @todo do we actually need to check that we're in loopback mode here? */
4035 if (GET_BITS(RCTL, LBM) == RCTL_LBM_TCVR)
4036 {
4037 E1KRXDST status;
4038 RT_ZERO(status);
4039 status.fPIF = true;
4040 e1kHandleRxPacket(pThis, pSg->aSegs[0].pvSeg, cbFrame, status);
4041 rc = VINF_SUCCESS;
4042 }
4043 e1kXmitFreeBuf(pThis);
4044 }
4045 else
4046 rc = VERR_NET_DOWN;
4047 if (RT_FAILURE(rc))
4048 {
4049 E1kLogRel(("E1000: ERROR! pfnSend returned %Rrc\n", rc));
4050 /** @todo handle VERR_NET_DOWN and VERR_NET_NO_BUFFER_SPACE. Signal error ? */
4051 }
4052
4053 pThis->led.Actual.s.fWriting = 0;
4054}
4055
4056/**
4057 * Compute and write internet checksum (e1kCSum16) at the specified offset.
4058 *
4059 * @param pThis The device state structure.
4060 * @param pPkt Pointer to the packet.
4061 * @param u16PktLen Total length of the packet.
4062 * @param cso Offset in packet to write checksum at.
4063 * @param css Offset in packet to start computing
4064 * checksum from.
4065 * @param cse Offset in packet to stop computing
4066 * checksum at.
4067 * @thread E1000_TX
4068 */
4069static void e1kInsertChecksum(PE1KSTATE pThis, uint8_t *pPkt, uint16_t u16PktLen, uint8_t cso, uint8_t css, uint16_t cse)
4070{
4071 RT_NOREF1(pThis);
4072
4073 if (css >= u16PktLen)
4074 {
4075 E1kLog2(("%s css(%X) is greater than packet length-1(%X), checksum is not inserted\n",
4076 pThis->szPrf, cso, u16PktLen));
4077 return;
4078 }
4079
4080 if (cso >= u16PktLen - 1)
4081 {
4082 E1kLog2(("%s cso(%X) is greater than packet length-2(%X), checksum is not inserted\n",
4083 pThis->szPrf, cso, u16PktLen));
4084 return;
4085 }
4086
4087 if (cse == 0)
4088 cse = u16PktLen - 1;
4089 else if (cse < css)
4090 {
4091 E1kLog2(("%s css(%X) is greater than cse(%X), checksum is not inserted\n",
4092 pThis->szPrf, css, cse));
4093 return;
4094 }
4095
4096 uint16_t u16ChkSum = e1kCSum16(pPkt + css, cse - css + 1);
4097 E1kLog2(("%s Inserting csum: %04X at %02X, old value: %04X\n", pThis->szPrf,
4098 u16ChkSum, cso, *(uint16_t*)(pPkt + cso)));
4099 *(uint16_t*)(pPkt + cso) = u16ChkSum;
4100}
4101
4102/**
4103 * Add a part of descriptor's buffer to transmit frame.
4104 *
4105 * @remarks data.u64BufAddr is used unconditionally for both data
4106 * and legacy descriptors since it is identical to
4107 * legacy.u64BufAddr.
4108 *
4109 * @param pThis The device state structure.
4110 * @param pDesc Pointer to the descriptor to transmit.
4111 * @param u16Len Length of buffer to the end of segment.
4112 * @param fSend Force packet sending.
4113 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4114 * @thread E1000_TX
4115 */
4116#ifndef E1K_WITH_TXD_CACHE
4117static void e1kFallbackAddSegment(PE1KSTATE pThis, RTGCPHYS PhysAddr, uint16_t u16Len, bool fSend, bool fOnWorkerThread)
4118{
4119 /* TCP header being transmitted */
4120 struct E1kTcpHeader *pTcpHdr = (struct E1kTcpHeader *)
4121 (pThis->aTxPacketFallback + pThis->contextTSE.tu.u8CSS);
4122 /* IP header being transmitted */
4123 struct E1kIpHeader *pIpHdr = (struct E1kIpHeader *)
4124 (pThis->aTxPacketFallback + pThis->contextTSE.ip.u8CSS);
4125
4126 E1kLog3(("%s e1kFallbackAddSegment: Length=%x, remaining payload=%x, header=%x, send=%RTbool\n",
4127 pThis->szPrf, u16Len, pThis->u32PayRemain, pThis->u16HdrRemain, fSend));
4128 Assert(pThis->u32PayRemain + pThis->u16HdrRemain > 0);
4129
4130 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), PhysAddr,
4131 pThis->aTxPacketFallback + pThis->u16TxPktLen, u16Len);
4132 E1kLog3(("%s Dump of the segment:\n"
4133 "%.*Rhxd\n"
4134 "%s --- End of dump ---\n",
4135 pThis->szPrf, u16Len, pThis->aTxPacketFallback + pThis->u16TxPktLen, pThis->szPrf));
4136 pThis->u16TxPktLen += u16Len;
4137 E1kLog3(("%s e1kFallbackAddSegment: pThis->u16TxPktLen=%x\n",
4138 pThis->szPrf, pThis->u16TxPktLen));
4139 if (pThis->u16HdrRemain > 0)
4140 {
4141 /* The header was not complete, check if it is now */
4142 if (u16Len >= pThis->u16HdrRemain)
4143 {
4144 /* The rest is payload */
4145 u16Len -= pThis->u16HdrRemain;
4146 pThis->u16HdrRemain = 0;
4147 /* Save partial checksum and flags */
4148 pThis->u32SavedCsum = pTcpHdr->chksum;
4149 pThis->u16SavedFlags = pTcpHdr->hdrlen_flags;
4150 /* Clear FIN and PSH flags now and set them only in the last segment */
4151 pTcpHdr->hdrlen_flags &= ~htons(E1K_TCP_FIN | E1K_TCP_PSH);
4152 }
4153 else
4154 {
4155 /* Still not */
4156 pThis->u16HdrRemain -= u16Len;
4157 E1kLog3(("%s e1kFallbackAddSegment: Header is still incomplete, 0x%x bytes remain.\n",
4158 pThis->szPrf, pThis->u16HdrRemain));
4159 return;
4160 }
4161 }
4162
4163 pThis->u32PayRemain -= u16Len;
4164
4165 if (fSend)
4166 {
4167 /* Leave ethernet header intact */
4168 /* IP Total Length = payload + headers - ethernet header */
4169 pIpHdr->total_len = htons(pThis->u16TxPktLen - pThis->contextTSE.ip.u8CSS);
4170 E1kLog3(("%s e1kFallbackAddSegment: End of packet, pIpHdr->total_len=%x\n",
4171 pThis->szPrf, ntohs(pIpHdr->total_len)));
4172 /* Update IP Checksum */
4173 pIpHdr->chksum = 0;
4174 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4175 pThis->contextTSE.ip.u8CSO,
4176 pThis->contextTSE.ip.u8CSS,
4177 pThis->contextTSE.ip.u16CSE);
4178
4179 /* Update TCP flags */
4180 /* Restore original FIN and PSH flags for the last segment */
4181 if (pThis->u32PayRemain == 0)
4182 {
4183 pTcpHdr->hdrlen_flags = pThis->u16SavedFlags;
4184 E1K_INC_CNT32(TSCTC);
4185 }
4186 /* Add TCP length to partial pseudo header sum */
4187 uint32_t csum = pThis->u32SavedCsum
4188 + htons(pThis->u16TxPktLen - pThis->contextTSE.tu.u8CSS);
4189 while (csum >> 16)
4190 csum = (csum >> 16) + (csum & 0xFFFF);
4191 pTcpHdr->chksum = csum;
4192 /* Compute final checksum */
4193 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4194 pThis->contextTSE.tu.u8CSO,
4195 pThis->contextTSE.tu.u8CSS,
4196 pThis->contextTSE.tu.u16CSE);
4197
4198 /*
4199 * Transmit it. If we've use the SG already, allocate a new one before
4200 * we copy of the data.
4201 */
4202 if (!pThis->CTX_SUFF(pTxSg))
4203 e1kXmitAllocBuf(pThis, pThis->u16TxPktLen + (pThis->fVTag ? 4 : 0), true /*fExactSize*/, false /*fGso*/);
4204 if (pThis->CTX_SUFF(pTxSg))
4205 {
4206 Assert(pThis->u16TxPktLen <= pThis->CTX_SUFF(pTxSg)->cbAvailable);
4207 Assert(pThis->CTX_SUFF(pTxSg)->cSegs == 1);
4208 if (pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg != pThis->aTxPacketFallback)
4209 memcpy(pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->aTxPacketFallback, pThis->u16TxPktLen);
4210 pThis->CTX_SUFF(pTxSg)->cbUsed = pThis->u16TxPktLen;
4211 pThis->CTX_SUFF(pTxSg)->aSegs[0].cbSeg = pThis->u16TxPktLen;
4212 }
4213 e1kTransmitFrame(pThis, fOnWorkerThread);
4214
4215 /* Update Sequence Number */
4216 pTcpHdr->seqno = htonl(ntohl(pTcpHdr->seqno) + pThis->u16TxPktLen
4217 - pThis->contextTSE.dw3.u8HDRLEN);
4218 /* Increment IP identification */
4219 pIpHdr->ident = htons(ntohs(pIpHdr->ident) + 1);
4220 }
4221}
4222#else /* E1K_WITH_TXD_CACHE */
4223static int e1kFallbackAddSegment(PE1KSTATE pThis, RTGCPHYS PhysAddr, uint16_t u16Len, bool fSend, bool fOnWorkerThread)
4224{
4225 int rc = VINF_SUCCESS;
4226 /* TCP header being transmitted */
4227 struct E1kTcpHeader *pTcpHdr = (struct E1kTcpHeader *)
4228 (pThis->aTxPacketFallback + pThis->contextTSE.tu.u8CSS);
4229 /* IP header being transmitted */
4230 struct E1kIpHeader *pIpHdr = (struct E1kIpHeader *)
4231 (pThis->aTxPacketFallback + pThis->contextTSE.ip.u8CSS);
4232
4233 E1kLog3(("%s e1kFallbackAddSegment: Length=%x, remaining payload=%x, header=%x, send=%RTbool\n",
4234 pThis->szPrf, u16Len, pThis->u32PayRemain, pThis->u16HdrRemain, fSend));
4235 Assert(pThis->u32PayRemain + pThis->u16HdrRemain > 0);
4236
4237 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), PhysAddr,
4238 pThis->aTxPacketFallback + pThis->u16TxPktLen, u16Len);
4239 E1kLog3(("%s Dump of the segment:\n"
4240 "%.*Rhxd\n"
4241 "%s --- End of dump ---\n",
4242 pThis->szPrf, u16Len, pThis->aTxPacketFallback + pThis->u16TxPktLen, pThis->szPrf));
4243 pThis->u16TxPktLen += u16Len;
4244 E1kLog3(("%s e1kFallbackAddSegment: pThis->u16TxPktLen=%x\n",
4245 pThis->szPrf, pThis->u16TxPktLen));
4246 if (pThis->u16HdrRemain > 0)
4247 {
4248 /* The header was not complete, check if it is now */
4249 if (u16Len >= pThis->u16HdrRemain)
4250 {
4251 /* The rest is payload */
4252 u16Len -= pThis->u16HdrRemain;
4253 pThis->u16HdrRemain = 0;
4254 /* Save partial checksum and flags */
4255 pThis->u32SavedCsum = pTcpHdr->chksum;
4256 pThis->u16SavedFlags = pTcpHdr->hdrlen_flags;
4257 /* Clear FIN and PSH flags now and set them only in the last segment */
4258 pTcpHdr->hdrlen_flags &= ~htons(E1K_TCP_FIN | E1K_TCP_PSH);
4259 }
4260 else
4261 {
4262 /* Still not */
4263 pThis->u16HdrRemain -= u16Len;
4264 E1kLog3(("%s e1kFallbackAddSegment: Header is still incomplete, 0x%x bytes remain.\n",
4265 pThis->szPrf, pThis->u16HdrRemain));
4266 return rc;
4267 }
4268 }
4269
4270 pThis->u32PayRemain -= u16Len;
4271
4272 if (fSend)
4273 {
4274 /* Leave ethernet header intact */
4275 /* IP Total Length = payload + headers - ethernet header */
4276 pIpHdr->total_len = htons(pThis->u16TxPktLen - pThis->contextTSE.ip.u8CSS);
4277 E1kLog3(("%s e1kFallbackAddSegment: End of packet, pIpHdr->total_len=%x\n",
4278 pThis->szPrf, ntohs(pIpHdr->total_len)));
4279 /* Update IP Checksum */
4280 pIpHdr->chksum = 0;
4281 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4282 pThis->contextTSE.ip.u8CSO,
4283 pThis->contextTSE.ip.u8CSS,
4284 pThis->contextTSE.ip.u16CSE);
4285
4286 /* Update TCP flags */
4287 /* Restore original FIN and PSH flags for the last segment */
4288 if (pThis->u32PayRemain == 0)
4289 {
4290 pTcpHdr->hdrlen_flags = pThis->u16SavedFlags;
4291 E1K_INC_CNT32(TSCTC);
4292 }
4293 /* Add TCP length to partial pseudo header sum */
4294 uint32_t csum = pThis->u32SavedCsum
4295 + htons(pThis->u16TxPktLen - pThis->contextTSE.tu.u8CSS);
4296 while (csum >> 16)
4297 csum = (csum >> 16) + (csum & 0xFFFF);
4298 pTcpHdr->chksum = csum;
4299 /* Compute final checksum */
4300 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4301 pThis->contextTSE.tu.u8CSO,
4302 pThis->contextTSE.tu.u8CSS,
4303 pThis->contextTSE.tu.u16CSE);
4304
4305 /*
4306 * Transmit it.
4307 */
4308 if (pThis->CTX_SUFF(pTxSg))
4309 {
4310 Assert(pThis->u16TxPktLen <= pThis->CTX_SUFF(pTxSg)->cbAvailable);
4311 Assert(pThis->CTX_SUFF(pTxSg)->cSegs == 1);
4312 if (pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg != pThis->aTxPacketFallback)
4313 memcpy(pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->aTxPacketFallback, pThis->u16TxPktLen);
4314 pThis->CTX_SUFF(pTxSg)->cbUsed = pThis->u16TxPktLen;
4315 pThis->CTX_SUFF(pTxSg)->aSegs[0].cbSeg = pThis->u16TxPktLen;
4316 }
4317 e1kTransmitFrame(pThis, fOnWorkerThread);
4318
4319 /* Update Sequence Number */
4320 pTcpHdr->seqno = htonl(ntohl(pTcpHdr->seqno) + pThis->u16TxPktLen
4321 - pThis->contextTSE.dw3.u8HDRLEN);
4322 /* Increment IP identification */
4323 pIpHdr->ident = htons(ntohs(pIpHdr->ident) + 1);
4324
4325 /* Allocate new buffer for the next segment. */
4326 if (pThis->u32PayRemain)
4327 {
4328 pThis->cbTxAlloc = RT_MIN(pThis->u32PayRemain,
4329 pThis->contextTSE.dw3.u16MSS)
4330 + pThis->contextTSE.dw3.u8HDRLEN
4331 + (pThis->fVTag ? 4 : 0);
4332 rc = e1kXmitAllocBuf(pThis, false /* fGSO */);
4333 }
4334 }
4335
4336 return rc;
4337}
4338#endif /* E1K_WITH_TXD_CACHE */
4339
4340#ifndef E1K_WITH_TXD_CACHE
4341/**
4342 * TCP segmentation offloading fallback: Add descriptor's buffer to transmit
4343 * frame.
4344 *
4345 * We construct the frame in the fallback buffer first and the copy it to the SG
4346 * buffer before passing it down to the network driver code.
4347 *
4348 * @returns true if the frame should be transmitted, false if not.
4349 *
4350 * @param pThis The device state structure.
4351 * @param pDesc Pointer to the descriptor to transmit.
4352 * @param cbFragment Length of descriptor's buffer.
4353 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4354 * @thread E1000_TX
4355 */
4356static bool e1kFallbackAddToFrame(PE1KSTATE pThis, E1KTXDESC *pDesc, uint32_t cbFragment, bool fOnWorkerThread)
4357{
4358 PPDMSCATTERGATHER pTxSg = pThis->CTX_SUFF(pTxSg);
4359 Assert(e1kGetDescType(pDesc) == E1K_DTYP_DATA);
4360 Assert(pDesc->data.cmd.fTSE);
4361 Assert(!e1kXmitIsGsoBuf(pTxSg));
4362
4363 uint16_t u16MaxPktLen = pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw3.u16MSS;
4364 Assert(u16MaxPktLen != 0);
4365 Assert(u16MaxPktLen < E1K_MAX_TX_PKT_SIZE);
4366
4367 /*
4368 * Carve out segments.
4369 */
4370 do
4371 {
4372 /* Calculate how many bytes we have left in this TCP segment */
4373 uint32_t cb = u16MaxPktLen - pThis->u16TxPktLen;
4374 if (cb > cbFragment)
4375 {
4376 /* This descriptor fits completely into current segment */
4377 cb = cbFragment;
4378 e1kFallbackAddSegment(pThis, pDesc->data.u64BufAddr, cb, pDesc->data.cmd.fEOP /*fSend*/, fOnWorkerThread);
4379 }
4380 else
4381 {
4382 e1kFallbackAddSegment(pThis, pDesc->data.u64BufAddr, cb, true /*fSend*/, fOnWorkerThread);
4383 /*
4384 * Rewind the packet tail pointer to the beginning of payload,
4385 * so we continue writing right beyond the header.
4386 */
4387 pThis->u16TxPktLen = pThis->contextTSE.dw3.u8HDRLEN;
4388 }
4389
4390 pDesc->data.u64BufAddr += cb;
4391 cbFragment -= cb;
4392 } while (cbFragment > 0);
4393
4394 if (pDesc->data.cmd.fEOP)
4395 {
4396 /* End of packet, next segment will contain header. */
4397 if (pThis->u32PayRemain != 0)
4398 E1K_INC_CNT32(TSCTFC);
4399 pThis->u16TxPktLen = 0;
4400 e1kXmitFreeBuf(pThis);
4401 }
4402
4403 return false;
4404}
4405#else /* E1K_WITH_TXD_CACHE */
4406/**
4407 * TCP segmentation offloading fallback: Add descriptor's buffer to transmit
4408 * frame.
4409 *
4410 * We construct the frame in the fallback buffer first and the copy it to the SG
4411 * buffer before passing it down to the network driver code.
4412 *
4413 * @returns error code
4414 *
4415 * @param pThis The device state structure.
4416 * @param pDesc Pointer to the descriptor to transmit.
4417 * @param cbFragment Length of descriptor's buffer.
4418 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4419 * @thread E1000_TX
4420 */
4421static int e1kFallbackAddToFrame(PE1KSTATE pThis, E1KTXDESC *pDesc, bool fOnWorkerThread)
4422{
4423#ifdef VBOX_STRICT
4424 PPDMSCATTERGATHER pTxSg = pThis->CTX_SUFF(pTxSg);
4425 Assert(e1kGetDescType(pDesc) == E1K_DTYP_DATA);
4426 Assert(pDesc->data.cmd.fTSE);
4427 Assert(!e1kXmitIsGsoBuf(pTxSg));
4428#endif
4429
4430 uint16_t u16MaxPktLen = pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw3.u16MSS;
4431
4432 /*
4433 * Carve out segments.
4434 */
4435 int rc = VINF_SUCCESS;
4436 do
4437 {
4438 /* Calculate how many bytes we have left in this TCP segment */
4439 uint32_t cb = u16MaxPktLen - pThis->u16TxPktLen;
4440 if (cb > pDesc->data.cmd.u20DTALEN)
4441 {
4442 /* This descriptor fits completely into current segment */
4443 cb = pDesc->data.cmd.u20DTALEN;
4444 rc = e1kFallbackAddSegment(pThis, pDesc->data.u64BufAddr, cb, pDesc->data.cmd.fEOP /*fSend*/, fOnWorkerThread);
4445 }
4446 else
4447 {
4448 rc = e1kFallbackAddSegment(pThis, pDesc->data.u64BufAddr, cb, true /*fSend*/, fOnWorkerThread);
4449 /*
4450 * Rewind the packet tail pointer to the beginning of payload,
4451 * so we continue writing right beyond the header.
4452 */
4453 pThis->u16TxPktLen = pThis->contextTSE.dw3.u8HDRLEN;
4454 }
4455
4456 pDesc->data.u64BufAddr += cb;
4457 pDesc->data.cmd.u20DTALEN -= cb;
4458 } while (pDesc->data.cmd.u20DTALEN > 0 && RT_SUCCESS(rc));
4459
4460 if (pDesc->data.cmd.fEOP)
4461 {
4462 /* End of packet, next segment will contain header. */
4463 if (pThis->u32PayRemain != 0)
4464 E1K_INC_CNT32(TSCTFC);
4465 pThis->u16TxPktLen = 0;
4466 e1kXmitFreeBuf(pThis);
4467 }
4468
4469 return VINF_SUCCESS; /// @todo consider rc;
4470}
4471#endif /* E1K_WITH_TXD_CACHE */
4472
4473
4474/**
4475 * Add descriptor's buffer to transmit frame.
4476 *
4477 * This deals with GSO and normal frames, e1kFallbackAddToFrame deals with the
4478 * TSE frames we cannot handle as GSO.
4479 *
4480 * @returns true on success, false on failure.
4481 *
4482 * @param pThis The device state structure.
4483 * @param PhysAddr The physical address of the descriptor buffer.
4484 * @param cbFragment Length of descriptor's buffer.
4485 * @thread E1000_TX
4486 */
4487static bool e1kAddToFrame(PE1KSTATE pThis, RTGCPHYS PhysAddr, uint32_t cbFragment)
4488{
4489 PPDMSCATTERGATHER pTxSg = pThis->CTX_SUFF(pTxSg);
4490 bool const fGso = e1kXmitIsGsoBuf(pTxSg);
4491 uint32_t const cbNewPkt = cbFragment + pThis->u16TxPktLen;
4492
4493 if (RT_UNLIKELY( !fGso && cbNewPkt > E1K_MAX_TX_PKT_SIZE ))
4494 {
4495 E1kLog(("%s Transmit packet is too large: %u > %u(max)\n", pThis->szPrf, cbNewPkt, E1K_MAX_TX_PKT_SIZE));
4496 return false;
4497 }
4498 if (RT_UNLIKELY( fGso && cbNewPkt > pTxSg->cbAvailable ))
4499 {
4500 E1kLog(("%s Transmit packet is too large: %u > %u(max)/GSO\n", pThis->szPrf, cbNewPkt, pTxSg->cbAvailable));
4501 return false;
4502 }
4503
4504 if (RT_LIKELY(pTxSg))
4505 {
4506 Assert(pTxSg->cSegs == 1);
4507 Assert(pTxSg->cbUsed == pThis->u16TxPktLen);
4508
4509 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), PhysAddr,
4510 (uint8_t *)pTxSg->aSegs[0].pvSeg + pThis->u16TxPktLen, cbFragment);
4511
4512 pTxSg->cbUsed = cbNewPkt;
4513 }
4514 pThis->u16TxPktLen = cbNewPkt;
4515
4516 return true;
4517}
4518
4519
4520/**
4521 * Write the descriptor back to guest memory and notify the guest.
4522 *
4523 * @param pThis The device state structure.
4524 * @param pDesc Pointer to the descriptor have been transmitted.
4525 * @param addr Physical address of the descriptor in guest memory.
4526 * @thread E1000_TX
4527 */
4528static void e1kDescReport(PE1KSTATE pThis, E1KTXDESC *pDesc, RTGCPHYS addr)
4529{
4530 /*
4531 * We fake descriptor write-back bursting. Descriptors are written back as they are
4532 * processed.
4533 */
4534 /* Let's pretend we process descriptors. Write back with DD set. */
4535 /*
4536 * Prior to r71586 we tried to accomodate the case when write-back bursts
4537 * are enabled without actually implementing bursting by writing back all
4538 * descriptors, even the ones that do not have RS set. This caused kernel
4539 * panics with Linux SMP kernels, as the e1000 driver tried to free up skb
4540 * associated with written back descriptor if it happened to be a context
4541 * descriptor since context descriptors do not have skb associated to them.
4542 * Starting from r71586 we write back only the descriptors with RS set,
4543 * which is a little bit different from what the real hardware does in
4544 * case there is a chain of data descritors where some of them have RS set
4545 * and others do not. It is very uncommon scenario imho.
4546 * We need to check RPS as well since some legacy drivers use it instead of
4547 * RS even with newer cards.
4548 */
4549 if (pDesc->legacy.cmd.fRS || pDesc->legacy.cmd.fRPS)
4550 {
4551 pDesc->legacy.dw3.fDD = 1; /* Descriptor Done */
4552 e1kWriteBackDesc(pThis, pDesc, addr);
4553 if (pDesc->legacy.cmd.fEOP)
4554 {
4555//#ifdef E1K_USE_TX_TIMERS
4556 if (pThis->fTidEnabled && pDesc->legacy.cmd.fIDE)
4557 {
4558 E1K_INC_ISTAT_CNT(pThis->uStatTxIDE);
4559 //if (pThis->fIntRaised)
4560 //{
4561 // /* Interrupt is already pending, no need for timers */
4562 // ICR |= ICR_TXDW;
4563 //}
4564 //else {
4565 /* Arm the timer to fire in TIVD usec (discard .024) */
4566 e1kArmTimer(pThis, pThis->CTX_SUFF(pTIDTimer), TIDV);
4567# ifndef E1K_NO_TAD
4568 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
4569 E1kLog2(("%s Checking if TAD timer is running\n",
4570 pThis->szPrf));
4571 if (TADV != 0 && !TMTimerIsActive(pThis->CTX_SUFF(pTADTimer)))
4572 e1kArmTimer(pThis, pThis->CTX_SUFF(pTADTimer), TADV);
4573# endif /* E1K_NO_TAD */
4574 }
4575 else
4576 {
4577 if (pThis->fTidEnabled)
4578 {
4579 E1kLog2(("%s No IDE set, cancel TAD timer and raise interrupt\n",
4580 pThis->szPrf));
4581 /* Cancel both timers if armed and fire immediately. */
4582# ifndef E1K_NO_TAD
4583 TMTimerStop(pThis->CTX_SUFF(pTADTimer));
4584# endif
4585 TMTimerStop(pThis->CTX_SUFF(pTIDTimer));
4586 }
4587//#endif /* E1K_USE_TX_TIMERS */
4588 E1K_INC_ISTAT_CNT(pThis->uStatIntTx);
4589 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_TXDW);
4590//#ifdef E1K_USE_TX_TIMERS
4591 }
4592//#endif /* E1K_USE_TX_TIMERS */
4593 }
4594 }
4595 else
4596 {
4597 E1K_INC_ISTAT_CNT(pThis->uStatTxNoRS);
4598 }
4599}
4600
4601#ifndef E1K_WITH_TXD_CACHE
4602
4603/**
4604 * Process Transmit Descriptor.
4605 *
4606 * E1000 supports three types of transmit descriptors:
4607 * - legacy data descriptors of older format (context-less).
4608 * - data the same as legacy but providing new offloading capabilities.
4609 * - context sets up the context for following data descriptors.
4610 *
4611 * @param pThis The device state structure.
4612 * @param pDesc Pointer to descriptor union.
4613 * @param addr Physical address of descriptor in guest memory.
4614 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4615 * @thread E1000_TX
4616 */
4617static int e1kXmitDesc(PE1KSTATE pThis, E1KTXDESC *pDesc, RTGCPHYS addr, bool fOnWorkerThread)
4618{
4619 int rc = VINF_SUCCESS;
4620 uint32_t cbVTag = 0;
4621
4622 e1kPrintTDesc(pThis, pDesc, "vvv");
4623
4624//#ifdef E1K_USE_TX_TIMERS
4625 if (pThis->fTidEnabled)
4626 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTIDTimer));
4627//#endif /* E1K_USE_TX_TIMERS */
4628
4629 switch (e1kGetDescType(pDesc))
4630 {
4631 case E1K_DTYP_CONTEXT:
4632 if (pDesc->context.dw2.fTSE)
4633 {
4634 pThis->contextTSE = pDesc->context;
4635 pThis->u32PayRemain = pDesc->context.dw2.u20PAYLEN;
4636 pThis->u16HdrRemain = pDesc->context.dw3.u8HDRLEN;
4637 e1kSetupGsoCtx(&pThis->GsoCtx, &pDesc->context);
4638 STAM_COUNTER_INC(&pThis->StatTxDescCtxTSE);
4639 }
4640 else
4641 {
4642 pThis->contextNormal = pDesc->context;
4643 STAM_COUNTER_INC(&pThis->StatTxDescCtxNormal);
4644 }
4645 E1kLog2(("%s %s context updated: IP CSS=%02X, IP CSO=%02X, IP CSE=%04X"
4646 ", TU CSS=%02X, TU CSO=%02X, TU CSE=%04X\n", pThis->szPrf,
4647 pDesc->context.dw2.fTSE ? "TSE" : "Normal",
4648 pDesc->context.ip.u8CSS,
4649 pDesc->context.ip.u8CSO,
4650 pDesc->context.ip.u16CSE,
4651 pDesc->context.tu.u8CSS,
4652 pDesc->context.tu.u8CSO,
4653 pDesc->context.tu.u16CSE));
4654 E1K_INC_ISTAT_CNT(pThis->uStatDescCtx);
4655 e1kDescReport(pThis, pDesc, addr);
4656 break;
4657
4658 case E1K_DTYP_DATA:
4659 {
4660 if (pDesc->data.cmd.u20DTALEN == 0 || pDesc->data.u64BufAddr == 0)
4661 {
4662 E1kLog2(("% Empty data descriptor, skipped.\n", pThis->szPrf));
4663 /** @todo Same as legacy when !TSE. See below. */
4664 break;
4665 }
4666 STAM_COUNTER_INC(pDesc->data.cmd.fTSE?
4667 &pThis->StatTxDescTSEData:
4668 &pThis->StatTxDescData);
4669 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
4670 E1K_INC_ISTAT_CNT(pThis->uStatDescDat);
4671
4672 /*
4673 * The last descriptor of non-TSE packet must contain VLE flag.
4674 * TSE packets have VLE flag in the first descriptor. The later
4675 * case is taken care of a bit later when cbVTag gets assigned.
4676 *
4677 * 1) pDesc->data.cmd.fEOP && !pDesc->data.cmd.fTSE
4678 */
4679 if (pDesc->data.cmd.fEOP && !pDesc->data.cmd.fTSE)
4680 {
4681 pThis->fVTag = pDesc->data.cmd.fVLE;
4682 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
4683 }
4684 /*
4685 * First fragment: Allocate new buffer and save the IXSM and TXSM
4686 * packet options as these are only valid in the first fragment.
4687 */
4688 if (pThis->u16TxPktLen == 0)
4689 {
4690 pThis->fIPcsum = pDesc->data.dw3.fIXSM;
4691 pThis->fTCPcsum = pDesc->data.dw3.fTXSM;
4692 E1kLog2(("%s Saving checksum flags:%s%s; \n", pThis->szPrf,
4693 pThis->fIPcsum ? " IP" : "",
4694 pThis->fTCPcsum ? " TCP/UDP" : ""));
4695 if (pDesc->data.cmd.fTSE)
4696 {
4697 /* 2) pDesc->data.cmd.fTSE && pThis->u16TxPktLen == 0 */
4698 pThis->fVTag = pDesc->data.cmd.fVLE;
4699 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
4700 cbVTag = pThis->fVTag ? 4 : 0;
4701 }
4702 else if (pDesc->data.cmd.fEOP)
4703 cbVTag = pDesc->data.cmd.fVLE ? 4 : 0;
4704 else
4705 cbVTag = 4;
4706 E1kLog3(("%s About to allocate TX buffer: cbVTag=%u\n", pThis->szPrf, cbVTag));
4707 if (e1kCanDoGso(pThis, &pThis->GsoCtx, &pDesc->data, &pThis->contextTSE))
4708 rc = e1kXmitAllocBuf(pThis, pThis->contextTSE.dw2.u20PAYLEN + pThis->contextTSE.dw3.u8HDRLEN + cbVTag,
4709 true /*fExactSize*/, true /*fGso*/);
4710 else if (pDesc->data.cmd.fTSE)
4711 rc = e1kXmitAllocBuf(pThis, pThis->contextTSE.dw3.u16MSS + pThis->contextTSE.dw3.u8HDRLEN + cbVTag,
4712 pDesc->data.cmd.fTSE /*fExactSize*/, false /*fGso*/);
4713 else
4714 rc = e1kXmitAllocBuf(pThis, pDesc->data.cmd.u20DTALEN + cbVTag,
4715 pDesc->data.cmd.fEOP /*fExactSize*/, false /*fGso*/);
4716
4717 /**
4718 * @todo: Perhaps it is not that simple for GSO packets! We may
4719 * need to unwind some changes.
4720 */
4721 if (RT_FAILURE(rc))
4722 {
4723 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4724 break;
4725 }
4726 /** @todo Is there any way to indicating errors other than collisions? Like
4727 * VERR_NET_DOWN. */
4728 }
4729
4730 /*
4731 * Add the descriptor data to the frame. If the frame is complete,
4732 * transmit it and reset the u16TxPktLen field.
4733 */
4734 if (e1kXmitIsGsoBuf(pThis->CTX_SUFF(pTxSg)))
4735 {
4736 STAM_COUNTER_INC(&pThis->StatTxPathGSO);
4737 bool fRc = e1kAddToFrame(pThis, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4738 if (pDesc->data.cmd.fEOP)
4739 {
4740 if ( fRc
4741 && pThis->CTX_SUFF(pTxSg)
4742 && pThis->CTX_SUFF(pTxSg)->cbUsed == (size_t)pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN)
4743 {
4744 e1kTransmitFrame(pThis, fOnWorkerThread);
4745 E1K_INC_CNT32(TSCTC);
4746 }
4747 else
4748 {
4749 if (fRc)
4750 E1kLog(("%s bad GSO/TSE %p or %u < %u\n" , pThis->szPrf,
4751 pThis->CTX_SUFF(pTxSg), pThis->CTX_SUFF(pTxSg) ? pThis->CTX_SUFF(pTxSg)->cbUsed : 0,
4752 pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN));
4753 e1kXmitFreeBuf(pThis);
4754 E1K_INC_CNT32(TSCTFC);
4755 }
4756 pThis->u16TxPktLen = 0;
4757 }
4758 }
4759 else if (!pDesc->data.cmd.fTSE)
4760 {
4761 STAM_COUNTER_INC(&pThis->StatTxPathRegular);
4762 bool fRc = e1kAddToFrame(pThis, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4763 if (pDesc->data.cmd.fEOP)
4764 {
4765 if (fRc && pThis->CTX_SUFF(pTxSg))
4766 {
4767 Assert(pThis->CTX_SUFF(pTxSg)->cSegs == 1);
4768 if (pThis->fIPcsum)
4769 e1kInsertChecksum(pThis, (uint8_t *)pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
4770 pThis->contextNormal.ip.u8CSO,
4771 pThis->contextNormal.ip.u8CSS,
4772 pThis->contextNormal.ip.u16CSE);
4773 if (pThis->fTCPcsum)
4774 e1kInsertChecksum(pThis, (uint8_t *)pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
4775 pThis->contextNormal.tu.u8CSO,
4776 pThis->contextNormal.tu.u8CSS,
4777 pThis->contextNormal.tu.u16CSE);
4778 e1kTransmitFrame(pThis, fOnWorkerThread);
4779 }
4780 else
4781 e1kXmitFreeBuf(pThis);
4782 pThis->u16TxPktLen = 0;
4783 }
4784 }
4785 else
4786 {
4787 STAM_COUNTER_INC(&pThis->StatTxPathFallback);
4788 e1kFallbackAddToFrame(pThis, pDesc, pDesc->data.cmd.u20DTALEN, fOnWorkerThread);
4789 }
4790
4791 e1kDescReport(pThis, pDesc, addr);
4792 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4793 break;
4794 }
4795
4796 case E1K_DTYP_LEGACY:
4797 if (pDesc->legacy.cmd.u16Length == 0 || pDesc->legacy.u64BufAddr == 0)
4798 {
4799 E1kLog(("%s Empty legacy descriptor, skipped.\n", pThis->szPrf));
4800 /** @todo 3.3.3, Length/Buffer Address: RS set -> write DD when processing. */
4801 break;
4802 }
4803 STAM_COUNTER_INC(&pThis->StatTxDescLegacy);
4804 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
4805
4806 /* First fragment: allocate new buffer. */
4807 if (pThis->u16TxPktLen == 0)
4808 {
4809 if (pDesc->legacy.cmd.fEOP)
4810 cbVTag = pDesc->legacy.cmd.fVLE ? 4 : 0;
4811 else
4812 cbVTag = 4;
4813 E1kLog3(("%s About to allocate TX buffer: cbVTag=%u\n", pThis->szPrf, cbVTag));
4814 /** @todo reset status bits? */
4815 rc = e1kXmitAllocBuf(pThis, pDesc->legacy.cmd.u16Length + cbVTag, pDesc->legacy.cmd.fEOP, false /*fGso*/);
4816 if (RT_FAILURE(rc))
4817 {
4818 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4819 break;
4820 }
4821
4822 /** @todo Is there any way to indicating errors other than collisions? Like
4823 * VERR_NET_DOWN. */
4824 }
4825
4826 /* Add fragment to frame. */
4827 if (e1kAddToFrame(pThis, pDesc->data.u64BufAddr, pDesc->legacy.cmd.u16Length))
4828 {
4829 E1K_INC_ISTAT_CNT(pThis->uStatDescLeg);
4830
4831 /* Last fragment: Transmit and reset the packet storage counter. */
4832 if (pDesc->legacy.cmd.fEOP)
4833 {
4834 pThis->fVTag = pDesc->legacy.cmd.fVLE;
4835 pThis->u16VTagTCI = pDesc->legacy.dw3.u16Special;
4836 /** @todo Offload processing goes here. */
4837 e1kTransmitFrame(pThis, fOnWorkerThread);
4838 pThis->u16TxPktLen = 0;
4839 }
4840 }
4841 /* Last fragment + failure: free the buffer and reset the storage counter. */
4842 else if (pDesc->legacy.cmd.fEOP)
4843 {
4844 e1kXmitFreeBuf(pThis);
4845 pThis->u16TxPktLen = 0;
4846 }
4847
4848 e1kDescReport(pThis, pDesc, addr);
4849 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4850 break;
4851
4852 default:
4853 E1kLog(("%s ERROR Unsupported transmit descriptor type: 0x%04x\n",
4854 pThis->szPrf, e1kGetDescType(pDesc)));
4855 break;
4856 }
4857
4858 return rc;
4859}
4860
4861#else /* E1K_WITH_TXD_CACHE */
4862
4863/**
4864 * Process Transmit Descriptor.
4865 *
4866 * E1000 supports three types of transmit descriptors:
4867 * - legacy data descriptors of older format (context-less).
4868 * - data the same as legacy but providing new offloading capabilities.
4869 * - context sets up the context for following data descriptors.
4870 *
4871 * @param pThis The device state structure.
4872 * @param pDesc Pointer to descriptor union.
4873 * @param addr Physical address of descriptor in guest memory.
4874 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4875 * @param cbPacketSize Size of the packet as previously computed.
4876 * @thread E1000_TX
4877 */
4878static int e1kXmitDesc(PE1KSTATE pThis, E1KTXDESC *pDesc, RTGCPHYS addr,
4879 bool fOnWorkerThread)
4880{
4881 int rc = VINF_SUCCESS;
4882
4883 e1kPrintTDesc(pThis, pDesc, "vvv");
4884
4885//#ifdef E1K_USE_TX_TIMERS
4886 if (pThis->fTidEnabled)
4887 TMTimerStop(pThis->CTX_SUFF(pTIDTimer));
4888//#endif /* E1K_USE_TX_TIMERS */
4889
4890 switch (e1kGetDescType(pDesc))
4891 {
4892 case E1K_DTYP_CONTEXT:
4893 /* The caller have already updated the context */
4894 E1K_INC_ISTAT_CNT(pThis->uStatDescCtx);
4895 e1kDescReport(pThis, pDesc, addr);
4896 break;
4897
4898 case E1K_DTYP_DATA:
4899 {
4900 STAM_COUNTER_INC(pDesc->data.cmd.fTSE?
4901 &pThis->StatTxDescTSEData:
4902 &pThis->StatTxDescData);
4903 E1K_INC_ISTAT_CNT(pThis->uStatDescDat);
4904 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
4905 if (pDesc->data.cmd.u20DTALEN == 0 || pDesc->data.u64BufAddr == 0)
4906 {
4907 E1kLog2(("% Empty data descriptor, skipped.\n", pThis->szPrf));
4908 }
4909 else
4910 {
4911 /*
4912 * Add the descriptor data to the frame. If the frame is complete,
4913 * transmit it and reset the u16TxPktLen field.
4914 */
4915 if (e1kXmitIsGsoBuf(pThis->CTX_SUFF(pTxSg)))
4916 {
4917 STAM_COUNTER_INC(&pThis->StatTxPathGSO);
4918 bool fRc = e1kAddToFrame(pThis, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4919 if (pDesc->data.cmd.fEOP)
4920 {
4921 if ( fRc
4922 && pThis->CTX_SUFF(pTxSg)
4923 && pThis->CTX_SUFF(pTxSg)->cbUsed == (size_t)pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN)
4924 {
4925 e1kTransmitFrame(pThis, fOnWorkerThread);
4926 E1K_INC_CNT32(TSCTC);
4927 }
4928 else
4929 {
4930 if (fRc)
4931 E1kLog(("%s bad GSO/TSE %p or %u < %u\n" , pThis->szPrf,
4932 pThis->CTX_SUFF(pTxSg), pThis->CTX_SUFF(pTxSg) ? pThis->CTX_SUFF(pTxSg)->cbUsed : 0,
4933 pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN));
4934 e1kXmitFreeBuf(pThis);
4935 E1K_INC_CNT32(TSCTFC);
4936 }
4937 pThis->u16TxPktLen = 0;
4938 }
4939 }
4940 else if (!pDesc->data.cmd.fTSE)
4941 {
4942 STAM_COUNTER_INC(&pThis->StatTxPathRegular);
4943 bool fRc = e1kAddToFrame(pThis, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4944 if (pDesc->data.cmd.fEOP)
4945 {
4946 if (fRc && pThis->CTX_SUFF(pTxSg))
4947 {
4948 Assert(pThis->CTX_SUFF(pTxSg)->cSegs == 1);
4949 if (pThis->fIPcsum)
4950 e1kInsertChecksum(pThis, (uint8_t *)pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
4951 pThis->contextNormal.ip.u8CSO,
4952 pThis->contextNormal.ip.u8CSS,
4953 pThis->contextNormal.ip.u16CSE);
4954 if (pThis->fTCPcsum)
4955 e1kInsertChecksum(pThis, (uint8_t *)pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
4956 pThis->contextNormal.tu.u8CSO,
4957 pThis->contextNormal.tu.u8CSS,
4958 pThis->contextNormal.tu.u16CSE);
4959 e1kTransmitFrame(pThis, fOnWorkerThread);
4960 }
4961 else
4962 e1kXmitFreeBuf(pThis);
4963 pThis->u16TxPktLen = 0;
4964 }
4965 }
4966 else
4967 {
4968 STAM_COUNTER_INC(&pThis->StatTxPathFallback);
4969 rc = e1kFallbackAddToFrame(pThis, pDesc, fOnWorkerThread);
4970 }
4971 }
4972 e1kDescReport(pThis, pDesc, addr);
4973 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4974 break;
4975 }
4976
4977 case E1K_DTYP_LEGACY:
4978 STAM_COUNTER_INC(&pThis->StatTxDescLegacy);
4979 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
4980 if (pDesc->legacy.cmd.u16Length == 0 || pDesc->legacy.u64BufAddr == 0)
4981 {
4982 E1kLog(("%s Empty legacy descriptor, skipped.\n", pThis->szPrf));
4983 }
4984 else
4985 {
4986 /* Add fragment to frame. */
4987 if (e1kAddToFrame(pThis, pDesc->data.u64BufAddr, pDesc->legacy.cmd.u16Length))
4988 {
4989 E1K_INC_ISTAT_CNT(pThis->uStatDescLeg);
4990
4991 /* Last fragment: Transmit and reset the packet storage counter. */
4992 if (pDesc->legacy.cmd.fEOP)
4993 {
4994 if (pDesc->legacy.cmd.fIC)
4995 {
4996 e1kInsertChecksum(pThis,
4997 (uint8_t *)pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg,
4998 pThis->u16TxPktLen,
4999 pDesc->legacy.cmd.u8CSO,
5000 pDesc->legacy.dw3.u8CSS,
5001 0);
5002 }
5003 e1kTransmitFrame(pThis, fOnWorkerThread);
5004 pThis->u16TxPktLen = 0;
5005 }
5006 }
5007 /* Last fragment + failure: free the buffer and reset the storage counter. */
5008 else if (pDesc->legacy.cmd.fEOP)
5009 {
5010 e1kXmitFreeBuf(pThis);
5011 pThis->u16TxPktLen = 0;
5012 }
5013 }
5014 e1kDescReport(pThis, pDesc, addr);
5015 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5016 break;
5017
5018 default:
5019 E1kLog(("%s ERROR Unsupported transmit descriptor type: 0x%04x\n",
5020 pThis->szPrf, e1kGetDescType(pDesc)));
5021 break;
5022 }
5023
5024 return rc;
5025}
5026
5027DECLINLINE(void) e1kUpdateTxContext(PE1KSTATE pThis, E1KTXDESC *pDesc)
5028{
5029 if (pDesc->context.dw2.fTSE)
5030 {
5031 pThis->contextTSE = pDesc->context;
5032 uint32_t cbMaxSegmentSize = pThis->contextTSE.dw3.u16MSS + pThis->contextTSE.dw3.u8HDRLEN + 4; /*VTAG*/
5033 if (RT_UNLIKELY(cbMaxSegmentSize > E1K_MAX_TX_PKT_SIZE))
5034 {
5035 pThis->contextTSE.dw3.u16MSS = E1K_MAX_TX_PKT_SIZE - pThis->contextTSE.dw3.u8HDRLEN - 4; /*VTAG*/
5036 LogRelMax(10, ("%s: Transmit packet is too large: %u > %u(max). Adjusted MSS to %u.\n",
5037 pThis->szPrf, cbMaxSegmentSize, E1K_MAX_TX_PKT_SIZE, pThis->contextTSE.dw3.u16MSS));
5038 }
5039 pThis->u32PayRemain = pThis->contextTSE.dw2.u20PAYLEN;
5040 pThis->u16HdrRemain = pThis->contextTSE.dw3.u8HDRLEN;
5041 e1kSetupGsoCtx(&pThis->GsoCtx, &pThis->contextTSE);
5042 STAM_COUNTER_INC(&pThis->StatTxDescCtxTSE);
5043 }
5044 else
5045 {
5046 pThis->contextNormal = pDesc->context;
5047 STAM_COUNTER_INC(&pThis->StatTxDescCtxNormal);
5048 }
5049 E1kLog2(("%s %s context updated: IP CSS=%02X, IP CSO=%02X, IP CSE=%04X"
5050 ", TU CSS=%02X, TU CSO=%02X, TU CSE=%04X\n", pThis->szPrf,
5051 pDesc->context.dw2.fTSE ? "TSE" : "Normal",
5052 pDesc->context.ip.u8CSS,
5053 pDesc->context.ip.u8CSO,
5054 pDesc->context.ip.u16CSE,
5055 pDesc->context.tu.u8CSS,
5056 pDesc->context.tu.u8CSO,
5057 pDesc->context.tu.u16CSE));
5058}
5059
5060static bool e1kLocateTxPacket(PE1KSTATE pThis)
5061{
5062 LogFlow(("%s e1kLocateTxPacket: ENTER cbTxAlloc=%d\n",
5063 pThis->szPrf, pThis->cbTxAlloc));
5064 /* Check if we have located the packet already. */
5065 if (pThis->cbTxAlloc)
5066 {
5067 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d\n",
5068 pThis->szPrf, pThis->cbTxAlloc));
5069 return true;
5070 }
5071
5072 bool fTSE = false;
5073 uint32_t cbPacket = 0;
5074
5075 for (int i = pThis->iTxDCurrent; i < pThis->nTxDFetched; ++i)
5076 {
5077 E1KTXDESC *pDesc = &pThis->aTxDescriptors[i];
5078 switch (e1kGetDescType(pDesc))
5079 {
5080 case E1K_DTYP_CONTEXT:
5081 e1kUpdateTxContext(pThis, pDesc);
5082 continue;
5083 case E1K_DTYP_LEGACY:
5084 /* Skip empty descriptors. */
5085 if (!pDesc->legacy.u64BufAddr || !pDesc->legacy.cmd.u16Length)
5086 break;
5087 cbPacket += pDesc->legacy.cmd.u16Length;
5088 pThis->fGSO = false;
5089 break;
5090 case E1K_DTYP_DATA:
5091 /* Skip empty descriptors. */
5092 if (!pDesc->data.u64BufAddr || !pDesc->data.cmd.u20DTALEN)
5093 break;
5094 if (cbPacket == 0)
5095 {
5096 /*
5097 * The first fragment: save IXSM and TXSM options
5098 * as these are only valid in the first fragment.
5099 */
5100 pThis->fIPcsum = pDesc->data.dw3.fIXSM;
5101 pThis->fTCPcsum = pDesc->data.dw3.fTXSM;
5102 fTSE = pDesc->data.cmd.fTSE;
5103 /*
5104 * TSE descriptors have VLE bit properly set in
5105 * the first fragment.
5106 */
5107 if (fTSE)
5108 {
5109 pThis->fVTag = pDesc->data.cmd.fVLE;
5110 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
5111 }
5112 pThis->fGSO = e1kCanDoGso(pThis, &pThis->GsoCtx, &pDesc->data, &pThis->contextTSE);
5113 }
5114 cbPacket += pDesc->data.cmd.u20DTALEN;
5115 break;
5116 default:
5117 AssertMsgFailed(("Impossible descriptor type!"));
5118 }
5119 if (pDesc->legacy.cmd.fEOP)
5120 {
5121 /*
5122 * Non-TSE descriptors have VLE bit properly set in
5123 * the last fragment.
5124 */
5125 if (!fTSE)
5126 {
5127 pThis->fVTag = pDesc->data.cmd.fVLE;
5128 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
5129 }
5130 /*
5131 * Compute the required buffer size. If we cannot do GSO but still
5132 * have to do segmentation we allocate the first segment only.
5133 */
5134 pThis->cbTxAlloc = (!fTSE || pThis->fGSO) ?
5135 cbPacket :
5136 RT_MIN(cbPacket, pThis->contextTSE.dw3.u16MSS + pThis->contextTSE.dw3.u8HDRLEN);
5137 if (pThis->fVTag)
5138 pThis->cbTxAlloc += 4;
5139 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d\n",
5140 pThis->szPrf, pThis->cbTxAlloc));
5141 return true;
5142 }
5143 }
5144
5145 if (cbPacket == 0 && pThis->nTxDFetched - pThis->iTxDCurrent > 0)
5146 {
5147 /* All descriptors were empty, we need to process them as a dummy packet */
5148 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d, zero packet!\n",
5149 pThis->szPrf, pThis->cbTxAlloc));
5150 return true;
5151 }
5152 LogFlow(("%s e1kLocateTxPacket: RET false cbTxAlloc=%d\n",
5153 pThis->szPrf, pThis->cbTxAlloc));
5154 return false;
5155}
5156
5157static int e1kXmitPacket(PE1KSTATE pThis, bool fOnWorkerThread)
5158{
5159 int rc = VINF_SUCCESS;
5160
5161 LogFlow(("%s e1kXmitPacket: ENTER current=%d fetched=%d\n",
5162 pThis->szPrf, pThis->iTxDCurrent, pThis->nTxDFetched));
5163
5164 while (pThis->iTxDCurrent < pThis->nTxDFetched)
5165 {
5166 E1KTXDESC *pDesc = &pThis->aTxDescriptors[pThis->iTxDCurrent];
5167 E1kLog3(("%s About to process new TX descriptor at %08x%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
5168 pThis->szPrf, TDBAH, TDBAL + TDH * sizeof(E1KTXDESC), TDLEN, TDH, TDT));
5169 rc = e1kXmitDesc(pThis, pDesc, e1kDescAddr(TDBAH, TDBAL, TDH), fOnWorkerThread);
5170 if (RT_FAILURE(rc))
5171 break;
5172 if (++TDH * sizeof(E1KTXDESC) >= TDLEN)
5173 TDH = 0;
5174 uint32_t uLowThreshold = GET_BITS(TXDCTL, LWTHRESH)*8;
5175 if (uLowThreshold != 0 && e1kGetTxLen(pThis) <= uLowThreshold)
5176 {
5177 E1kLog2(("%s Low on transmit descriptors, raise ICR.TXD_LOW, len=%x thresh=%x\n",
5178 pThis->szPrf, e1kGetTxLen(pThis), GET_BITS(TXDCTL, LWTHRESH)*8));
5179 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_TXD_LOW);
5180 }
5181 ++pThis->iTxDCurrent;
5182 if (e1kGetDescType(pDesc) != E1K_DTYP_CONTEXT && pDesc->legacy.cmd.fEOP)
5183 break;
5184 }
5185
5186 LogFlow(("%s e1kXmitPacket: RET %Rrc current=%d fetched=%d\n",
5187 pThis->szPrf, rc, pThis->iTxDCurrent, pThis->nTxDFetched));
5188 return rc;
5189}
5190
5191#endif /* E1K_WITH_TXD_CACHE */
5192#ifndef E1K_WITH_TXD_CACHE
5193
5194/**
5195 * Transmit pending descriptors.
5196 *
5197 * @returns VBox status code. VERR_TRY_AGAIN is returned if we're busy.
5198 *
5199 * @param pThis The E1000 state.
5200 * @param fOnWorkerThread Whether we're on a worker thread or on an EMT.
5201 */
5202static int e1kXmitPending(PE1KSTATE pThis, bool fOnWorkerThread)
5203{
5204 int rc = VINF_SUCCESS;
5205
5206 /* Check if transmitter is enabled. */
5207 if (!(TCTL & TCTL_EN))
5208 return VINF_SUCCESS;
5209 /*
5210 * Grab the xmit lock of the driver as well as the E1K device state.
5211 */
5212 rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
5213 if (RT_LIKELY(rc == VINF_SUCCESS))
5214 {
5215 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
5216 if (pDrv)
5217 {
5218 rc = pDrv->pfnBeginXmit(pDrv, fOnWorkerThread);
5219 if (RT_FAILURE(rc))
5220 {
5221 e1kCsTxLeave(pThis);
5222 return rc;
5223 }
5224 }
5225 /*
5226 * Process all pending descriptors.
5227 * Note! Do not process descriptors in locked state
5228 */
5229 while (TDH != TDT && !pThis->fLocked)
5230 {
5231 E1KTXDESC desc;
5232 E1kLog3(("%s About to process new TX descriptor at %08x%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
5233 pThis->szPrf, TDBAH, TDBAL + TDH * sizeof(desc), TDLEN, TDH, TDT));
5234
5235 e1kLoadDesc(pThis, &desc, ((uint64_t)TDBAH << 32) + TDBAL + TDH * sizeof(desc));
5236 rc = e1kXmitDesc(pThis, &desc, e1kDescAddr(TDBAH, TDBAL, TDH), fOnWorkerThread);
5237 /* If we failed to transmit descriptor we will try it again later */
5238 if (RT_FAILURE(rc))
5239 break;
5240 if (++TDH * sizeof(desc) >= TDLEN)
5241 TDH = 0;
5242
5243 if (e1kGetTxLen(pThis) <= GET_BITS(TXDCTL, LWTHRESH)*8)
5244 {
5245 E1kLog2(("%s Low on transmit descriptors, raise ICR.TXD_LOW, len=%x thresh=%x\n",
5246 pThis->szPrf, e1kGetTxLen(pThis), GET_BITS(TXDCTL, LWTHRESH)*8));
5247 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_TXD_LOW);
5248 }
5249
5250 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5251 }
5252
5253 /// @todo uncomment: pThis->uStatIntTXQE++;
5254 /// @todo uncomment: e1kRaiseInterrupt(pThis, ICR_TXQE);
5255 /*
5256 * Release the lock.
5257 */
5258 if (pDrv)
5259 pDrv->pfnEndXmit(pDrv);
5260 e1kCsTxLeave(pThis);
5261 }
5262
5263 return rc;
5264}
5265
5266#else /* E1K_WITH_TXD_CACHE */
5267
5268static void e1kDumpTxDCache(PE1KSTATE pThis)
5269{
5270 unsigned i, cDescs = TDLEN / sizeof(E1KTXDESC);
5271 uint32_t tdh = TDH;
5272 LogRel(("E1000: -- Transmit Descriptors (%d total) --\n", cDescs));
5273 for (i = 0; i < cDescs; ++i)
5274 {
5275 E1KTXDESC desc;
5276 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), e1kDescAddr(TDBAH, TDBAL, i),
5277 &desc, sizeof(desc));
5278 if (i == tdh)
5279 LogRel(("E1000: >>> "));
5280 LogRel(("E1000: %RGp: %R[e1ktxd]\n", e1kDescAddr(TDBAH, TDBAL, i), &desc));
5281 }
5282 LogRel(("E1000: -- Transmit Descriptors in Cache (at %d (TDH %d)/ fetched %d / max %d) --\n",
5283 pThis->iTxDCurrent, TDH, pThis->nTxDFetched, E1K_TXD_CACHE_SIZE));
5284 if (tdh > pThis->iTxDCurrent)
5285 tdh -= pThis->iTxDCurrent;
5286 else
5287 tdh = cDescs + tdh - pThis->iTxDCurrent;
5288 for (i = 0; i < pThis->nTxDFetched; ++i)
5289 {
5290 if (i == pThis->iTxDCurrent)
5291 LogRel(("E1000: >>> "));
5292 LogRel(("E1000: %RGp: %R[e1ktxd]\n", e1kDescAddr(TDBAH, TDBAL, tdh++ % cDescs), &pThis->aTxDescriptors[i]));
5293 }
5294}
5295
5296/**
5297 * Transmit pending descriptors.
5298 *
5299 * @returns VBox status code. VERR_TRY_AGAIN is returned if we're busy.
5300 *
5301 * @param pThis The E1000 state.
5302 * @param fOnWorkerThread Whether we're on a worker thread or on an EMT.
5303 */
5304static int e1kXmitPending(PE1KSTATE pThis, bool fOnWorkerThread)
5305{
5306 int rc = VINF_SUCCESS;
5307
5308 /* Check if transmitter is enabled. */
5309 if (!(TCTL & TCTL_EN))
5310 return VINF_SUCCESS;
5311 /*
5312 * Grab the xmit lock of the driver as well as the E1K device state.
5313 */
5314 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
5315 if (pDrv)
5316 {
5317 rc = pDrv->pfnBeginXmit(pDrv, fOnWorkerThread);
5318 if (RT_FAILURE(rc))
5319 return rc;
5320 }
5321
5322 /*
5323 * Process all pending descriptors.
5324 * Note! Do not process descriptors in locked state
5325 */
5326 rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
5327 if (RT_LIKELY(rc == VINF_SUCCESS))
5328 {
5329 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
5330 /*
5331 * fIncomplete is set whenever we try to fetch additional descriptors
5332 * for an incomplete packet. If fail to locate a complete packet on
5333 * the next iteration we need to reset the cache or we risk to get
5334 * stuck in this loop forever.
5335 */
5336 bool fIncomplete = false;
5337 while (!pThis->fLocked && e1kTxDLazyLoad(pThis))
5338 {
5339 while (e1kLocateTxPacket(pThis))
5340 {
5341 fIncomplete = false;
5342 /* Found a complete packet, allocate it. */
5343 rc = e1kXmitAllocBuf(pThis, pThis->fGSO);
5344 /* If we're out of bandwidth we'll come back later. */
5345 if (RT_FAILURE(rc))
5346 goto out;
5347 /* Copy the packet to allocated buffer and send it. */
5348 rc = e1kXmitPacket(pThis, fOnWorkerThread);
5349 /* If we're out of bandwidth we'll come back later. */
5350 if (RT_FAILURE(rc))
5351 goto out;
5352 }
5353 uint8_t u8Remain = pThis->nTxDFetched - pThis->iTxDCurrent;
5354 if (RT_UNLIKELY(fIncomplete))
5355 {
5356 static bool fTxDCacheDumped = false;
5357 /*
5358 * The descriptor cache is full, but we were unable to find
5359 * a complete packet in it. Drop the cache and hope that
5360 * the guest driver can recover from network card error.
5361 */
5362 LogRel(("%s: No complete packets in%s TxD cache! "
5363 "Fetched=%d, current=%d, TX len=%d.\n",
5364 pThis->szPrf,
5365 u8Remain == E1K_TXD_CACHE_SIZE ? " full" : "",
5366 pThis->nTxDFetched, pThis->iTxDCurrent,
5367 e1kGetTxLen(pThis)));
5368 if (!fTxDCacheDumped)
5369 {
5370 fTxDCacheDumped = true;
5371 e1kDumpTxDCache(pThis);
5372 }
5373 pThis->iTxDCurrent = pThis->nTxDFetched = 0;
5374 /*
5375 * Returning an error at this point means Guru in R0
5376 * (see @bugref{6428}).
5377 */
5378# ifdef IN_RING3
5379 rc = VERR_NET_INCOMPLETE_TX_PACKET;
5380# else /* !IN_RING3 */
5381 rc = VINF_IOM_R3_MMIO_WRITE;
5382# endif /* !IN_RING3 */
5383 goto out;
5384 }
5385 if (u8Remain > 0)
5386 {
5387 Log4(("%s Incomplete packet at %d. Already fetched %d, "
5388 "%d more are available\n",
5389 pThis->szPrf, pThis->iTxDCurrent, u8Remain,
5390 e1kGetTxLen(pThis) - u8Remain));
5391
5392 /*
5393 * A packet was partially fetched. Move incomplete packet to
5394 * the beginning of cache buffer, then load more descriptors.
5395 */
5396 memmove(pThis->aTxDescriptors,
5397 &pThis->aTxDescriptors[pThis->iTxDCurrent],
5398 u8Remain * sizeof(E1KTXDESC));
5399 pThis->iTxDCurrent = 0;
5400 pThis->nTxDFetched = u8Remain;
5401 e1kTxDLoadMore(pThis);
5402 fIncomplete = true;
5403 }
5404 else
5405 pThis->nTxDFetched = 0;
5406 pThis->iTxDCurrent = 0;
5407 }
5408 if (!pThis->fLocked && GET_BITS(TXDCTL, LWTHRESH) == 0)
5409 {
5410 E1kLog2(("%s Out of transmit descriptors, raise ICR.TXD_LOW\n",
5411 pThis->szPrf));
5412 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_TXD_LOW);
5413 }
5414out:
5415 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5416
5417 /// @todo uncomment: pThis->uStatIntTXQE++;
5418 /// @todo uncomment: e1kRaiseInterrupt(pThis, ICR_TXQE);
5419
5420 e1kCsTxLeave(pThis);
5421 }
5422
5423
5424 /*
5425 * Release the lock.
5426 */
5427 if (pDrv)
5428 pDrv->pfnEndXmit(pDrv);
5429 return rc;
5430}
5431
5432#endif /* E1K_WITH_TXD_CACHE */
5433#ifdef IN_RING3
5434
5435/**
5436 * @interface_method_impl{PDMINETWORKDOWN,pfnXmitPending}
5437 */
5438static DECLCALLBACK(void) e1kR3NetworkDown_XmitPending(PPDMINETWORKDOWN pInterface)
5439{
5440 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkDown);
5441 /* Resume suspended transmission */
5442 STATUS &= ~STATUS_TXOFF;
5443 e1kXmitPending(pThis, true /*fOnWorkerThread*/);
5444}
5445
5446/**
5447 * Callback for consuming from transmit queue. It gets called in R3 whenever
5448 * we enqueue something in R0/GC.
5449 *
5450 * @returns true
5451 * @param pDevIns Pointer to device instance structure.
5452 * @param pItem Pointer to the element being dequeued (not used).
5453 * @thread ???
5454 */
5455static DECLCALLBACK(bool) e1kTxQueueConsumer(PPDMDEVINS pDevIns, PPDMQUEUEITEMCORE pItem)
5456{
5457 NOREF(pItem);
5458 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
5459 E1kLog2(("%s e1kTxQueueConsumer:\n", pThis->szPrf));
5460
5461 int rc = e1kXmitPending(pThis, false /*fOnWorkerThread*/); NOREF(rc);
5462#ifndef DEBUG_andy /** @todo r=andy Happens for me a lot, mute this for me. */
5463 AssertMsg(RT_SUCCESS(rc) || rc == VERR_TRY_AGAIN, ("%Rrc\n", rc));
5464#endif
5465 return true;
5466}
5467
5468/**
5469 * Handler for the wakeup signaller queue.
5470 */
5471static DECLCALLBACK(bool) e1kCanRxQueueConsumer(PPDMDEVINS pDevIns, PPDMQUEUEITEMCORE pItem)
5472{
5473 RT_NOREF(pItem);
5474 e1kWakeupReceive(pDevIns);
5475 return true;
5476}
5477
5478#endif /* IN_RING3 */
5479
5480/**
5481 * Write handler for Transmit Descriptor Tail register.
5482 *
5483 * @param pThis The device state structure.
5484 * @param offset Register offset in memory-mapped frame.
5485 * @param index Register index in register array.
5486 * @param value The value to store.
5487 * @param mask Used to implement partial writes (8 and 16-bit).
5488 * @thread EMT
5489 */
5490static int e1kRegWriteTDT(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5491{
5492 int rc = e1kRegWriteDefault(pThis, offset, index, value);
5493
5494 /* All descriptors starting with head and not including tail belong to us. */
5495 /* Process them. */
5496 E1kLog2(("%s e1kRegWriteTDT: TDBAL=%08x, TDBAH=%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
5497 pThis->szPrf, TDBAL, TDBAH, TDLEN, TDH, TDT));
5498
5499 /* Ignore TDT writes when the link is down. */
5500 if (TDH != TDT && (STATUS & STATUS_LU))
5501 {
5502 Log5(("E1000: TDT write: TDH=%08x, TDT=%08x, %d descriptors to process\n", TDH, TDT, e1kGetTxLen(pThis)));
5503 E1kLog(("%s e1kRegWriteTDT: %d descriptors to process\n",
5504 pThis->szPrf, e1kGetTxLen(pThis)));
5505
5506 /* Transmit pending packets if possible, defer it if we cannot do it
5507 in the current context. */
5508#ifdef E1K_TX_DELAY
5509 rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
5510 if (RT_LIKELY(rc == VINF_SUCCESS))
5511 {
5512 if (!TMTimerIsActive(pThis->CTX_SUFF(pTXDTimer)))
5513 {
5514#ifdef E1K_INT_STATS
5515 pThis->u64ArmedAt = RTTimeNanoTS();
5516#endif
5517 e1kArmTimer(pThis, pThis->CTX_SUFF(pTXDTimer), E1K_TX_DELAY);
5518 }
5519 E1K_INC_ISTAT_CNT(pThis->uStatTxDelayed);
5520 e1kCsTxLeave(pThis);
5521 return rc;
5522 }
5523 /* We failed to enter the TX critical section -- transmit as usual. */
5524#endif /* E1K_TX_DELAY */
5525#ifndef IN_RING3
5526 if (!pThis->CTX_SUFF(pDrv))
5527 {
5528 PPDMQUEUEITEMCORE pItem = PDMQueueAlloc(pThis->CTX_SUFF(pTxQueue));
5529 if (RT_UNLIKELY(pItem))
5530 PDMQueueInsert(pThis->CTX_SUFF(pTxQueue), pItem);
5531 }
5532 else
5533#endif
5534 {
5535 rc = e1kXmitPending(pThis, false /*fOnWorkerThread*/);
5536 if (rc == VERR_TRY_AGAIN)
5537 rc = VINF_SUCCESS;
5538 else if (rc == VERR_SEM_BUSY)
5539 rc = VINF_IOM_R3_MMIO_WRITE;
5540 AssertRC(rc);
5541 }
5542 }
5543
5544 return rc;
5545}
5546
5547/**
5548 * Write handler for Multicast Table Array registers.
5549 *
5550 * @param pThis The device state structure.
5551 * @param offset Register offset in memory-mapped frame.
5552 * @param index Register index in register array.
5553 * @param value The value to store.
5554 * @thread EMT
5555 */
5556static int e1kRegWriteMTA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5557{
5558 AssertReturn(offset - g_aE1kRegMap[index].offset < sizeof(pThis->auMTA), VERR_DEV_IO_ERROR);
5559 pThis->auMTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auMTA[0])] = value;
5560
5561 return VINF_SUCCESS;
5562}
5563
5564/**
5565 * Read handler for Multicast Table Array registers.
5566 *
5567 * @returns VBox status code.
5568 *
5569 * @param pThis The device state structure.
5570 * @param offset Register offset in memory-mapped frame.
5571 * @param index Register index in register array.
5572 * @thread EMT
5573 */
5574static int e1kRegReadMTA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5575{
5576 AssertReturn(offset - g_aE1kRegMap[index].offset< sizeof(pThis->auMTA), VERR_DEV_IO_ERROR);
5577 *pu32Value = pThis->auMTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auMTA[0])];
5578
5579 return VINF_SUCCESS;
5580}
5581
5582/**
5583 * Write handler for Receive Address registers.
5584 *
5585 * @param pThis The device state structure.
5586 * @param offset Register offset in memory-mapped frame.
5587 * @param index Register index in register array.
5588 * @param value The value to store.
5589 * @thread EMT
5590 */
5591static int e1kRegWriteRA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5592{
5593 AssertReturn(offset - g_aE1kRegMap[index].offset < sizeof(pThis->aRecAddr.au32), VERR_DEV_IO_ERROR);
5594 pThis->aRecAddr.au32[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->aRecAddr.au32[0])] = value;
5595
5596 return VINF_SUCCESS;
5597}
5598
5599/**
5600 * Read handler for Receive Address registers.
5601 *
5602 * @returns VBox status code.
5603 *
5604 * @param pThis The device state structure.
5605 * @param offset Register offset in memory-mapped frame.
5606 * @param index Register index in register array.
5607 * @thread EMT
5608 */
5609static int e1kRegReadRA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5610{
5611 AssertReturn(offset - g_aE1kRegMap[index].offset< sizeof(pThis->aRecAddr.au32), VERR_DEV_IO_ERROR);
5612 *pu32Value = pThis->aRecAddr.au32[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->aRecAddr.au32[0])];
5613
5614 return VINF_SUCCESS;
5615}
5616
5617/**
5618 * Write handler for VLAN Filter Table Array registers.
5619 *
5620 * @param pThis The device state structure.
5621 * @param offset Register offset in memory-mapped frame.
5622 * @param index Register index in register array.
5623 * @param value The value to store.
5624 * @thread EMT
5625 */
5626static int e1kRegWriteVFTA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5627{
5628 AssertReturn(offset - g_aE1kRegMap[index].offset < sizeof(pThis->auVFTA), VINF_SUCCESS);
5629 pThis->auVFTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auVFTA[0])] = value;
5630
5631 return VINF_SUCCESS;
5632}
5633
5634/**
5635 * Read handler for VLAN Filter Table Array registers.
5636 *
5637 * @returns VBox status code.
5638 *
5639 * @param pThis The device state structure.
5640 * @param offset Register offset in memory-mapped frame.
5641 * @param index Register index in register array.
5642 * @thread EMT
5643 */
5644static int e1kRegReadVFTA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5645{
5646 AssertReturn(offset - g_aE1kRegMap[index].offset< sizeof(pThis->auVFTA), VERR_DEV_IO_ERROR);
5647 *pu32Value = pThis->auVFTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auVFTA[0])];
5648
5649 return VINF_SUCCESS;
5650}
5651
5652/**
5653 * Read handler for unimplemented registers.
5654 *
5655 * Merely reports reads from unimplemented registers.
5656 *
5657 * @returns VBox status code.
5658 *
5659 * @param pThis The device state structure.
5660 * @param offset Register offset in memory-mapped frame.
5661 * @param index Register index in register array.
5662 * @thread EMT
5663 */
5664static int e1kRegReadUnimplemented(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5665{
5666 RT_NOREF3(pThis, offset, index);
5667 E1kLog(("%s At %08X read (00000000) attempt from unimplemented register %s (%s)\n",
5668 pThis->szPrf, offset, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5669 *pu32Value = 0;
5670
5671 return VINF_SUCCESS;
5672}
5673
5674/**
5675 * Default register read handler with automatic clear operation.
5676 *
5677 * Retrieves the value of register from register array in device state structure.
5678 * Then resets all bits.
5679 *
5680 * @remarks The 'mask' parameter is simply ignored as masking and shifting is
5681 * done in the caller.
5682 *
5683 * @returns VBox status code.
5684 *
5685 * @param pThis The device state structure.
5686 * @param offset Register offset in memory-mapped frame.
5687 * @param index Register index in register array.
5688 * @thread EMT
5689 */
5690static int e1kRegReadAutoClear(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5691{
5692 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
5693 int rc = e1kRegReadDefault(pThis, offset, index, pu32Value);
5694 pThis->auRegs[index] = 0;
5695
5696 return rc;
5697}
5698
5699/**
5700 * Default register read handler.
5701 *
5702 * Retrieves the value of register from register array in device state structure.
5703 * Bits corresponding to 0s in 'readable' mask will always read as 0s.
5704 *
5705 * @remarks The 'mask' parameter is simply ignored as masking and shifting is
5706 * done in the caller.
5707 *
5708 * @returns VBox status code.
5709 *
5710 * @param pThis The device state structure.
5711 * @param offset Register offset in memory-mapped frame.
5712 * @param index Register index in register array.
5713 * @thread EMT
5714 */
5715static int e1kRegReadDefault(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5716{
5717 RT_NOREF_PV(offset);
5718
5719 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
5720 *pu32Value = pThis->auRegs[index] & g_aE1kRegMap[index].readable;
5721
5722 return VINF_SUCCESS;
5723}
5724
5725/**
5726 * Write handler for unimplemented registers.
5727 *
5728 * Merely reports writes to unimplemented registers.
5729 *
5730 * @param pThis The device state structure.
5731 * @param offset Register offset in memory-mapped frame.
5732 * @param index Register index in register array.
5733 * @param value The value to store.
5734 * @thread EMT
5735 */
5736
5737 static int e1kRegWriteUnimplemented(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5738{
5739 RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index); RT_NOREF_PV(value);
5740
5741 E1kLog(("%s At %08X write attempt (%08X) to unimplemented register %s (%s)\n",
5742 pThis->szPrf, offset, value, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5743
5744 return VINF_SUCCESS;
5745}
5746
5747/**
5748 * Default register write handler.
5749 *
5750 * Stores the value to the register array in device state structure. Only bits
5751 * corresponding to 1s both in 'writable' and 'mask' will be stored.
5752 *
5753 * @returns VBox status code.
5754 *
5755 * @param pThis The device state structure.
5756 * @param offset Register offset in memory-mapped frame.
5757 * @param index Register index in register array.
5758 * @param value The value to store.
5759 * @param mask Used to implement partial writes (8 and 16-bit).
5760 * @thread EMT
5761 */
5762
5763static int e1kRegWriteDefault(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5764{
5765 RT_NOREF_PV(offset);
5766
5767 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
5768 pThis->auRegs[index] = (value & g_aE1kRegMap[index].writable)
5769 | (pThis->auRegs[index] & ~g_aE1kRegMap[index].writable);
5770
5771 return VINF_SUCCESS;
5772}
5773
5774/**
5775 * Search register table for matching register.
5776 *
5777 * @returns Index in the register table or -1 if not found.
5778 *
5779 * @param offReg Register offset in memory-mapped region.
5780 * @thread EMT
5781 */
5782static int e1kRegLookup(uint32_t offReg)
5783{
5784
5785#if 0
5786 int index;
5787
5788 for (index = 0; index < E1K_NUM_OF_REGS; index++)
5789 {
5790 if (g_aE1kRegMap[index].offset <= offReg && offReg < g_aE1kRegMap[index].offset + g_aE1kRegMap[index].size)
5791 {
5792 return index;
5793 }
5794 }
5795#else
5796 int iStart = 0;
5797 int iEnd = E1K_NUM_OF_BINARY_SEARCHABLE;
5798 for (;;)
5799 {
5800 int i = (iEnd - iStart) / 2 + iStart;
5801 uint32_t offCur = g_aE1kRegMap[i].offset;
5802 if (offReg < offCur)
5803 {
5804 if (i == iStart)
5805 break;
5806 iEnd = i;
5807 }
5808 else if (offReg >= offCur + g_aE1kRegMap[i].size)
5809 {
5810 i++;
5811 if (i == iEnd)
5812 break;
5813 iStart = i;
5814 }
5815 else
5816 return i;
5817 Assert(iEnd > iStart);
5818 }
5819
5820 for (unsigned i = E1K_NUM_OF_BINARY_SEARCHABLE; i < RT_ELEMENTS(g_aE1kRegMap); i++)
5821 if (offReg - g_aE1kRegMap[i].offset < g_aE1kRegMap[i].size)
5822 return i;
5823
5824# ifdef VBOX_STRICT
5825 for (unsigned i = 0; i < RT_ELEMENTS(g_aE1kRegMap); i++)
5826 Assert(offReg - g_aE1kRegMap[i].offset >= g_aE1kRegMap[i].size);
5827# endif
5828
5829#endif
5830
5831 return -1;
5832}
5833
5834/**
5835 * Handle unaligned register read operation.
5836 *
5837 * Looks up and calls appropriate handler.
5838 *
5839 * @returns VBox status code.
5840 *
5841 * @param pThis The device state structure.
5842 * @param offReg Register offset in memory-mapped frame.
5843 * @param pv Where to store the result.
5844 * @param cb Number of bytes to read.
5845 * @thread EMT
5846 * @remarks IOM takes care of unaligned and small reads via MMIO. For I/O port
5847 * accesses we have to take care of that ourselves.
5848 */
5849static int e1kRegReadUnaligned(PE1KSTATE pThis, uint32_t offReg, void *pv, uint32_t cb)
5850{
5851 uint32_t u32 = 0;
5852 uint32_t shift;
5853 int rc = VINF_SUCCESS;
5854 int index = e1kRegLookup(offReg);
5855#ifdef LOG_ENABLED
5856 char buf[9];
5857#endif
5858
5859 /*
5860 * From the spec:
5861 * For registers that should be accessed as 32-bit double words, partial writes (less than a 32-bit
5862 * double word) is ignored. Partial reads return all 32 bits of data regardless of the byte enables.
5863 */
5864
5865 /*
5866 * To be able to read bytes and short word we convert them to properly
5867 * shifted 32-bit words and masks. The idea is to keep register-specific
5868 * handlers simple. Most accesses will be 32-bit anyway.
5869 */
5870 uint32_t mask;
5871 switch (cb)
5872 {
5873 case 4: mask = 0xFFFFFFFF; break;
5874 case 2: mask = 0x0000FFFF; break;
5875 case 1: mask = 0x000000FF; break;
5876 default:
5877 return PDMDevHlpDBGFStop(pThis->CTX_SUFF(pDevIns), RT_SRC_POS,
5878 "unsupported op size: offset=%#10x cb=%#10x\n", offReg, cb);
5879 }
5880 if (index != -1)
5881 {
5882 RT_UNTRUSTED_VALIDATED_FENCE(); /* paranoia because of port I/O. */
5883 if (g_aE1kRegMap[index].readable)
5884 {
5885 /* Make the mask correspond to the bits we are about to read. */
5886 shift = (offReg - g_aE1kRegMap[index].offset) % sizeof(uint32_t) * 8;
5887 mask <<= shift;
5888 if (!mask)
5889 return PDMDevHlpDBGFStop(pThis->CTX_SUFF(pDevIns), RT_SRC_POS, "Zero mask: offset=%#10x cb=%#10x\n", offReg, cb);
5890 /*
5891 * Read it. Pass the mask so the handler knows what has to be read.
5892 * Mask out irrelevant bits.
5893 */
5894 //rc = e1kCsEnter(pThis, VERR_SEM_BUSY, RT_SRC_POS);
5895 if (RT_UNLIKELY(rc != VINF_SUCCESS))
5896 return rc;
5897 //pThis->fDelayInts = false;
5898 //pThis->iStatIntLost += pThis->iStatIntLostOne;
5899 //pThis->iStatIntLostOne = 0;
5900 rc = g_aE1kRegMap[index].pfnRead(pThis, offReg & 0xFFFFFFFC, index, &u32);
5901 u32 &= mask;
5902 //e1kCsLeave(pThis);
5903 E1kLog2(("%s At %08X read %s from %s (%s)\n",
5904 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf), g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5905 Log6(("%s At %08X read %s from %s (%s) [UNALIGNED]\n",
5906 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf), g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5907 /* Shift back the result. */
5908 u32 >>= shift;
5909 }
5910 else
5911 E1kLog(("%s At %08X read (%s) attempt from write-only register %s (%s)\n",
5912 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf), g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5913 if (IOM_SUCCESS(rc))
5914 STAM_COUNTER_INC(&pThis->aStatRegReads[index]);
5915 }
5916 else
5917 E1kLog(("%s At %08X read (%s) attempt from non-existing register\n",
5918 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf)));
5919
5920 memcpy(pv, &u32, cb);
5921 return rc;
5922}
5923
5924/**
5925 * Handle 4 byte aligned and sized read operation.
5926 *
5927 * Looks up and calls appropriate handler.
5928 *
5929 * @returns VBox status code.
5930 *
5931 * @param pThis The device state structure.
5932 * @param offReg Register offset in memory-mapped frame.
5933 * @param pu32 Where to store the result.
5934 * @thread EMT
5935 */
5936static int e1kRegReadAlignedU32(PE1KSTATE pThis, uint32_t offReg, uint32_t *pu32)
5937{
5938 Assert(!(offReg & 3));
5939
5940 /*
5941 * Lookup the register and check that it's readable.
5942 */
5943 int rc = VINF_SUCCESS;
5944 int idxReg = e1kRegLookup(offReg);
5945 if (RT_LIKELY(idxReg != -1))
5946 {
5947 RT_UNTRUSTED_VALIDATED_FENCE(); /* paranoia because of port I/O. */
5948 if (RT_UNLIKELY(g_aE1kRegMap[idxReg].readable))
5949 {
5950 /*
5951 * Read it. Pass the mask so the handler knows what has to be read.
5952 * Mask out irrelevant bits.
5953 */
5954 //rc = e1kCsEnter(pThis, VERR_SEM_BUSY, RT_SRC_POS);
5955 //if (RT_UNLIKELY(rc != VINF_SUCCESS))
5956 // return rc;
5957 //pThis->fDelayInts = false;
5958 //pThis->iStatIntLost += pThis->iStatIntLostOne;
5959 //pThis->iStatIntLostOne = 0;
5960 rc = g_aE1kRegMap[idxReg].pfnRead(pThis, offReg & 0xFFFFFFFC, idxReg, pu32);
5961 //e1kCsLeave(pThis);
5962 Log6(("%s At %08X read %08X from %s (%s)\n",
5963 pThis->szPrf, offReg, *pu32, g_aE1kRegMap[idxReg].abbrev, g_aE1kRegMap[idxReg].name));
5964 if (IOM_SUCCESS(rc))
5965 STAM_COUNTER_INC(&pThis->aStatRegReads[idxReg]);
5966 }
5967 else
5968 E1kLog(("%s At %08X read attempt from non-readable register %s (%s)\n",
5969 pThis->szPrf, offReg, g_aE1kRegMap[idxReg].abbrev, g_aE1kRegMap[idxReg].name));
5970 }
5971 else
5972 E1kLog(("%s At %08X read attempt from non-existing register\n", pThis->szPrf, offReg));
5973 return rc;
5974}
5975
5976/**
5977 * Handle 4 byte sized and aligned register write operation.
5978 *
5979 * Looks up and calls appropriate handler.
5980 *
5981 * @returns VBox status code.
5982 *
5983 * @param pThis The device state structure.
5984 * @param offReg Register offset in memory-mapped frame.
5985 * @param u32Value The value to write.
5986 * @thread EMT
5987 */
5988static int e1kRegWriteAlignedU32(PE1KSTATE pThis, uint32_t offReg, uint32_t u32Value)
5989{
5990 int rc = VINF_SUCCESS;
5991 int index = e1kRegLookup(offReg);
5992 if (RT_LIKELY(index != -1))
5993 {
5994 RT_UNTRUSTED_VALIDATED_FENCE(); /* paranoia because of port I/O. */
5995 if (RT_LIKELY(g_aE1kRegMap[index].writable))
5996 {
5997 /*
5998 * Write it. Pass the mask so the handler knows what has to be written.
5999 * Mask out irrelevant bits.
6000 */
6001 Log6(("%s At %08X write %08X to %s (%s)\n",
6002 pThis->szPrf, offReg, u32Value, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
6003 //rc = e1kCsEnter(pThis, VERR_SEM_BUSY, RT_SRC_POS);
6004 //if (RT_UNLIKELY(rc != VINF_SUCCESS))
6005 // return rc;
6006 //pThis->fDelayInts = false;
6007 //pThis->iStatIntLost += pThis->iStatIntLostOne;
6008 //pThis->iStatIntLostOne = 0;
6009 rc = g_aE1kRegMap[index].pfnWrite(pThis, offReg, index, u32Value);
6010 //e1kCsLeave(pThis);
6011 }
6012 else
6013 E1kLog(("%s At %08X write attempt (%08X) to read-only register %s (%s)\n",
6014 pThis->szPrf, offReg, u32Value, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
6015 if (IOM_SUCCESS(rc))
6016 STAM_COUNTER_INC(&pThis->aStatRegWrites[index]);
6017 }
6018 else
6019 E1kLog(("%s At %08X write attempt (%08X) to non-existing register\n",
6020 pThis->szPrf, offReg, u32Value));
6021 return rc;
6022}
6023
6024
6025/* -=-=-=-=- MMIO and I/O Port Callbacks -=-=-=-=- */
6026
6027/**
6028 * @callback_method_impl{FNIOMMMIOREAD}
6029 */
6030PDMBOTHCBDECL(int) e1kMMIORead(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS GCPhysAddr, void *pv, unsigned cb)
6031{
6032 RT_NOREF2(pvUser, cb);
6033 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
6034 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatMMIORead), a);
6035
6036 uint32_t offReg = GCPhysAddr - pThis->addrMMReg;
6037 Assert(offReg < E1K_MM_SIZE);
6038 Assert(cb == 4);
6039 Assert(!(GCPhysAddr & 3));
6040
6041 int rc = e1kRegReadAlignedU32(pThis, offReg, (uint32_t *)pv);
6042
6043 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatMMIORead), a);
6044 return rc;
6045}
6046
6047/**
6048 * @callback_method_impl{FNIOMMMIOWRITE}
6049 */
6050PDMBOTHCBDECL(int) e1kMMIOWrite(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS GCPhysAddr, void const *pv, unsigned cb)
6051{
6052 RT_NOREF2(pvUser, cb);
6053 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
6054 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatMMIOWrite), a);
6055
6056 uint32_t offReg = GCPhysAddr - pThis->addrMMReg;
6057 Assert(offReg < E1K_MM_SIZE);
6058 Assert(cb == 4);
6059 Assert(!(GCPhysAddr & 3));
6060
6061 int rc = e1kRegWriteAlignedU32(pThis, offReg, *(uint32_t const *)pv);
6062
6063 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatMMIOWrite), a);
6064 return rc;
6065}
6066
6067/**
6068 * @callback_method_impl{FNIOMIOPORTIN}
6069 */
6070PDMBOTHCBDECL(int) e1kIOPortIn(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT uPort, uint32_t *pu32, unsigned cb)
6071{
6072 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
6073 int rc;
6074 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatIORead), a);
6075 RT_NOREF_PV(pvUser);
6076
6077 uPort -= pThis->IOPortBase;
6078 if (RT_LIKELY(cb == 4))
6079 switch (uPort)
6080 {
6081 case 0x00: /* IOADDR */
6082 *pu32 = pThis->uSelectedReg;
6083 E1kLog2(("%s e1kIOPortIn: IOADDR(0), selecting register %#010x, val=%#010x\n", pThis->szPrf, pThis->uSelectedReg, *pu32));
6084 rc = VINF_SUCCESS;
6085 break;
6086
6087 case 0x04: /* IODATA */
6088 if (!(pThis->uSelectedReg & 3))
6089 rc = e1kRegReadAlignedU32(pThis, pThis->uSelectedReg, pu32);
6090 else /** @todo r=bird: I wouldn't be surprised if this unaligned branch wasn't necessary. */
6091 rc = e1kRegReadUnaligned(pThis, pThis->uSelectedReg, pu32, cb);
6092 if (rc == VINF_IOM_R3_MMIO_READ)
6093 rc = VINF_IOM_R3_IOPORT_READ;
6094 E1kLog2(("%s e1kIOPortIn: IODATA(4), reading from selected register %#010x, val=%#010x\n", pThis->szPrf, pThis->uSelectedReg, *pu32));
6095 break;
6096
6097 default:
6098 E1kLog(("%s e1kIOPortIn: invalid port %#010x\n", pThis->szPrf, uPort));
6099 //rc = VERR_IOM_IOPORT_UNUSED; /* Why not? */
6100 rc = VINF_SUCCESS;
6101 }
6102 else
6103 {
6104 E1kLog(("%s e1kIOPortIn: invalid op size: uPort=%RTiop cb=%08x", pThis->szPrf, uPort, cb));
6105 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "%s e1kIOPortIn: invalid op size: uPort=%RTiop cb=%08x\n", pThis->szPrf, uPort, cb);
6106 }
6107 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatIORead), a);
6108 return rc;
6109}
6110
6111
6112/**
6113 * @callback_method_impl{FNIOMIOPORTOUT}
6114 */
6115PDMBOTHCBDECL(int) e1kIOPortOut(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT uPort, uint32_t u32, unsigned cb)
6116{
6117 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
6118 int rc;
6119 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatIOWrite), a);
6120 RT_NOREF_PV(pvUser);
6121
6122 E1kLog2(("%s e1kIOPortOut: uPort=%RTiop value=%08x\n", pThis->szPrf, uPort, u32));
6123 if (RT_LIKELY(cb == 4))
6124 {
6125 uPort -= pThis->IOPortBase;
6126 switch (uPort)
6127 {
6128 case 0x00: /* IOADDR */
6129 pThis->uSelectedReg = u32;
6130 E1kLog2(("%s e1kIOPortOut: IOADDR(0), selected register %08x\n", pThis->szPrf, pThis->uSelectedReg));
6131 rc = VINF_SUCCESS;
6132 break;
6133
6134 case 0x04: /* IODATA */
6135 E1kLog2(("%s e1kIOPortOut: IODATA(4), writing to selected register %#010x, value=%#010x\n", pThis->szPrf, pThis->uSelectedReg, u32));
6136 if (RT_LIKELY(!(pThis->uSelectedReg & 3)))
6137 {
6138 rc = e1kRegWriteAlignedU32(pThis, pThis->uSelectedReg, u32);
6139 if (rc == VINF_IOM_R3_MMIO_WRITE)
6140 rc = VINF_IOM_R3_IOPORT_WRITE;
6141 }
6142 else
6143 rc = PDMDevHlpDBGFStop(pThis->CTX_SUFF(pDevIns), RT_SRC_POS,
6144 "Spec violation: misaligned offset: %#10x, ignored.\n", pThis->uSelectedReg);
6145 break;
6146
6147 default:
6148 E1kLog(("%s e1kIOPortOut: invalid port %#010x\n", pThis->szPrf, uPort));
6149 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "invalid port %#010x\n", uPort);
6150 }
6151 }
6152 else
6153 {
6154 E1kLog(("%s e1kIOPortOut: invalid op size: uPort=%RTiop cb=%08x\n", pThis->szPrf, uPort, cb));
6155 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "%s: invalid op size: uPort=%RTiop cb=%#x\n", pThis->szPrf, uPort, cb);
6156 }
6157
6158 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatIOWrite), a);
6159 return rc;
6160}
6161
6162#ifdef IN_RING3
6163
6164/**
6165 * Dump complete device state to log.
6166 *
6167 * @param pThis Pointer to device state.
6168 */
6169static void e1kDumpState(PE1KSTATE pThis)
6170{
6171 RT_NOREF(pThis);
6172 for (int i = 0; i < E1K_NUM_OF_32BIT_REGS; ++i)
6173 E1kLog2(("%s: %8.8s = %08x\n", pThis->szPrf, g_aE1kRegMap[i].abbrev, pThis->auRegs[i]));
6174# ifdef E1K_INT_STATS
6175 LogRel(("%s: Interrupt attempts: %d\n", pThis->szPrf, pThis->uStatIntTry));
6176 LogRel(("%s: Interrupts raised : %d\n", pThis->szPrf, pThis->uStatInt));
6177 LogRel(("%s: Interrupts lowered: %d\n", pThis->szPrf, pThis->uStatIntLower));
6178 LogRel(("%s: ICR outside ISR : %d\n", pThis->szPrf, pThis->uStatNoIntICR));
6179 LogRel(("%s: IMS raised ints : %d\n", pThis->szPrf, pThis->uStatIntIMS));
6180 LogRel(("%s: Interrupts skipped: %d\n", pThis->szPrf, pThis->uStatIntSkip));
6181 LogRel(("%s: Masked interrupts : %d\n", pThis->szPrf, pThis->uStatIntMasked));
6182 LogRel(("%s: Early interrupts : %d\n", pThis->szPrf, pThis->uStatIntEarly));
6183 LogRel(("%s: Late interrupts : %d\n", pThis->szPrf, pThis->uStatIntLate));
6184 LogRel(("%s: Lost interrupts : %d\n", pThis->szPrf, pThis->iStatIntLost));
6185 LogRel(("%s: Interrupts by RX : %d\n", pThis->szPrf, pThis->uStatIntRx));
6186 LogRel(("%s: Interrupts by TX : %d\n", pThis->szPrf, pThis->uStatIntTx));
6187 LogRel(("%s: Interrupts by ICS : %d\n", pThis->szPrf, pThis->uStatIntICS));
6188 LogRel(("%s: Interrupts by RDTR: %d\n", pThis->szPrf, pThis->uStatIntRDTR));
6189 LogRel(("%s: Interrupts by RDMT: %d\n", pThis->szPrf, pThis->uStatIntRXDMT0));
6190 LogRel(("%s: Interrupts by TXQE: %d\n", pThis->szPrf, pThis->uStatIntTXQE));
6191 LogRel(("%s: TX int delay asked: %d\n", pThis->szPrf, pThis->uStatTxIDE));
6192 LogRel(("%s: TX delayed: %d\n", pThis->szPrf, pThis->uStatTxDelayed));
6193 LogRel(("%s: TX delay expired: %d\n", pThis->szPrf, pThis->uStatTxDelayExp));
6194 LogRel(("%s: TX no report asked: %d\n", pThis->szPrf, pThis->uStatTxNoRS));
6195 LogRel(("%s: TX abs timer expd : %d\n", pThis->szPrf, pThis->uStatTAD));
6196 LogRel(("%s: TX int timer expd : %d\n", pThis->szPrf, pThis->uStatTID));
6197 LogRel(("%s: RX abs timer expd : %d\n", pThis->szPrf, pThis->uStatRAD));
6198 LogRel(("%s: RX int timer expd : %d\n", pThis->szPrf, pThis->uStatRID));
6199 LogRel(("%s: TX CTX descriptors: %d\n", pThis->szPrf, pThis->uStatDescCtx));
6200 LogRel(("%s: TX DAT descriptors: %d\n", pThis->szPrf, pThis->uStatDescDat));
6201 LogRel(("%s: TX LEG descriptors: %d\n", pThis->szPrf, pThis->uStatDescLeg));
6202 LogRel(("%s: Received frames : %d\n", pThis->szPrf, pThis->uStatRxFrm));
6203 LogRel(("%s: Transmitted frames: %d\n", pThis->szPrf, pThis->uStatTxFrm));
6204 LogRel(("%s: TX frames up to 1514: %d\n", pThis->szPrf, pThis->uStatTx1514));
6205 LogRel(("%s: TX frames up to 2962: %d\n", pThis->szPrf, pThis->uStatTx2962));
6206 LogRel(("%s: TX frames up to 4410: %d\n", pThis->szPrf, pThis->uStatTx4410));
6207 LogRel(("%s: TX frames up to 5858: %d\n", pThis->szPrf, pThis->uStatTx5858));
6208 LogRel(("%s: TX frames up to 7306: %d\n", pThis->szPrf, pThis->uStatTx7306));
6209 LogRel(("%s: TX frames up to 8754: %d\n", pThis->szPrf, pThis->uStatTx8754));
6210 LogRel(("%s: TX frames up to 16384: %d\n", pThis->szPrf, pThis->uStatTx16384));
6211 LogRel(("%s: TX frames up to 32768: %d\n", pThis->szPrf, pThis->uStatTx32768));
6212 LogRel(("%s: Larger TX frames : %d\n", pThis->szPrf, pThis->uStatTxLarge));
6213 LogRel(("%s: Max TX Delay : %lld\n", pThis->szPrf, pThis->uStatMaxTxDelay));
6214# endif /* E1K_INT_STATS */
6215}
6216
6217/**
6218 * @callback_method_impl{FNPCIIOREGIONMAP}
6219 */
6220static DECLCALLBACK(int) e1kMap(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, uint32_t iRegion,
6221 RTGCPHYS GCPhysAddress, RTGCPHYS cb, PCIADDRESSSPACE enmType)
6222{
6223 RT_NOREF(pPciDev, iRegion);
6224 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE *);
6225 int rc;
6226
6227 switch (enmType)
6228 {
6229 case PCI_ADDRESS_SPACE_IO:
6230 pThis->IOPortBase = (RTIOPORT)GCPhysAddress;
6231 rc = PDMDevHlpIOPortRegister(pDevIns, pThis->IOPortBase, cb, NULL /*pvUser*/,
6232 e1kIOPortOut, e1kIOPortIn, NULL, NULL, "E1000");
6233 if (pThis->fR0Enabled && RT_SUCCESS(rc))
6234 rc = PDMDevHlpIOPortRegisterR0(pDevIns, pThis->IOPortBase, cb, NIL_RTR0PTR /*pvUser*/,
6235 "e1kIOPortOut", "e1kIOPortIn", NULL, NULL, "E1000");
6236 if (pThis->fRCEnabled && RT_SUCCESS(rc))
6237 rc = PDMDevHlpIOPortRegisterRC(pDevIns, pThis->IOPortBase, cb, NIL_RTRCPTR /*pvUser*/,
6238 "e1kIOPortOut", "e1kIOPortIn", NULL, NULL, "E1000");
6239 break;
6240
6241 case PCI_ADDRESS_SPACE_MEM:
6242 /*
6243 * From the spec:
6244 * For registers that should be accessed as 32-bit double words,
6245 * partial writes (less than a 32-bit double word) is ignored.
6246 * Partial reads return all 32 bits of data regardless of the
6247 * byte enables.
6248 */
6249#ifdef E1K_WITH_PREREG_MMIO
6250 pThis->addrMMReg = GCPhysAddress;
6251 if (GCPhysAddress == NIL_RTGCPHYS)
6252 rc = VINF_SUCCESS;
6253 else
6254 {
6255 Assert(!(GCPhysAddress & 7));
6256 rc = PDMDevHlpMMIOExMap(pDevIns, pPciDev, iRegion, GCPhysAddress);
6257 }
6258#else
6259 pThis->addrMMReg = GCPhysAddress; Assert(!(GCPhysAddress & 7));
6260 rc = PDMDevHlpMMIORegister(pDevIns, GCPhysAddress, cb, NULL /*pvUser*/,
6261 IOMMMIO_FLAGS_READ_DWORD | IOMMMIO_FLAGS_WRITE_ONLY_DWORD,
6262 e1kMMIOWrite, e1kMMIORead, "E1000");
6263 if (pThis->fR0Enabled && RT_SUCCESS(rc))
6264 rc = PDMDevHlpMMIORegisterR0(pDevIns, GCPhysAddress, cb, NIL_RTR0PTR /*pvUser*/,
6265 "e1kMMIOWrite", "e1kMMIORead");
6266 if (pThis->fRCEnabled && RT_SUCCESS(rc))
6267 rc = PDMDevHlpMMIORegisterRC(pDevIns, GCPhysAddress, cb, NIL_RTRCPTR /*pvUser*/,
6268 "e1kMMIOWrite", "e1kMMIORead");
6269#endif
6270 break;
6271
6272 default:
6273 /* We should never get here */
6274 AssertMsgFailed(("Invalid PCI address space param in map callback"));
6275 rc = VERR_INTERNAL_ERROR;
6276 break;
6277 }
6278 return rc;
6279}
6280
6281
6282/* -=-=-=-=- PDMINETWORKDOWN -=-=-=-=- */
6283
6284/**
6285 * Check if the device can receive data now.
6286 * This must be called before the pfnRecieve() method is called.
6287 *
6288 * @returns Number of bytes the device can receive.
6289 * @param pInterface Pointer to the interface structure containing the called function pointer.
6290 * @thread EMT
6291 */
6292static int e1kCanReceive(PE1KSTATE pThis)
6293{
6294#ifndef E1K_WITH_RXD_CACHE
6295 size_t cb;
6296
6297 if (RT_UNLIKELY(e1kCsRxEnter(pThis, VERR_SEM_BUSY) != VINF_SUCCESS))
6298 return VERR_NET_NO_BUFFER_SPACE;
6299
6300 if (RT_UNLIKELY(RDLEN == sizeof(E1KRXDESC)))
6301 {
6302 E1KRXDESC desc;
6303 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), e1kDescAddr(RDBAH, RDBAL, RDH),
6304 &desc, sizeof(desc));
6305 if (desc.status.fDD)
6306 cb = 0;
6307 else
6308 cb = pThis->u16RxBSize;
6309 }
6310 else if (RDH < RDT)
6311 cb = (RDT - RDH) * pThis->u16RxBSize;
6312 else if (RDH > RDT)
6313 cb = (RDLEN/sizeof(E1KRXDESC) - RDH + RDT) * pThis->u16RxBSize;
6314 else
6315 {
6316 cb = 0;
6317 E1kLogRel(("E1000: OUT of RX descriptors!\n"));
6318 }
6319 E1kLog2(("%s e1kCanReceive: at exit RDH=%d RDT=%d RDLEN=%d u16RxBSize=%d cb=%lu\n",
6320 pThis->szPrf, RDH, RDT, RDLEN, pThis->u16RxBSize, cb));
6321
6322 e1kCsRxLeave(pThis);
6323 return cb > 0 ? VINF_SUCCESS : VERR_NET_NO_BUFFER_SPACE;
6324#else /* E1K_WITH_RXD_CACHE */
6325 int rc = VINF_SUCCESS;
6326
6327 if (RT_UNLIKELY(e1kCsRxEnter(pThis, VERR_SEM_BUSY) != VINF_SUCCESS))
6328 return VERR_NET_NO_BUFFER_SPACE;
6329
6330 if (RT_UNLIKELY(RDLEN == sizeof(E1KRXDESC)))
6331 {
6332 E1KRXDESC desc;
6333 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), e1kDescAddr(RDBAH, RDBAL, RDH),
6334 &desc, sizeof(desc));
6335 if (desc.status.fDD)
6336 rc = VERR_NET_NO_BUFFER_SPACE;
6337 }
6338 else if (e1kRxDIsCacheEmpty(pThis) && RDH == RDT)
6339 {
6340 /* Cache is empty, so is the RX ring. */
6341 rc = VERR_NET_NO_BUFFER_SPACE;
6342 }
6343 E1kLog2(("%s e1kCanReceive: at exit in_cache=%d RDH=%d RDT=%d RDLEN=%d"
6344 " u16RxBSize=%d rc=%Rrc\n", pThis->szPrf,
6345 e1kRxDInCache(pThis), RDH, RDT, RDLEN, pThis->u16RxBSize, rc));
6346
6347 e1kCsRxLeave(pThis);
6348 return rc;
6349#endif /* E1K_WITH_RXD_CACHE */
6350}
6351
6352/**
6353 * @interface_method_impl{PDMINETWORKDOWN,pfnWaitReceiveAvail}
6354 */
6355static DECLCALLBACK(int) e1kR3NetworkDown_WaitReceiveAvail(PPDMINETWORKDOWN pInterface, RTMSINTERVAL cMillies)
6356{
6357 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkDown);
6358 int rc = e1kCanReceive(pThis);
6359
6360 if (RT_SUCCESS(rc))
6361 return VINF_SUCCESS;
6362 if (RT_UNLIKELY(cMillies == 0))
6363 return VERR_NET_NO_BUFFER_SPACE;
6364
6365 rc = VERR_INTERRUPTED;
6366 ASMAtomicXchgBool(&pThis->fMaybeOutOfSpace, true);
6367 STAM_PROFILE_START(&pThis->StatRxOverflow, a);
6368 VMSTATE enmVMState;
6369 while (RT_LIKELY( (enmVMState = PDMDevHlpVMState(pThis->CTX_SUFF(pDevIns))) == VMSTATE_RUNNING
6370 || enmVMState == VMSTATE_RUNNING_LS))
6371 {
6372 int rc2 = e1kCanReceive(pThis);
6373 if (RT_SUCCESS(rc2))
6374 {
6375 rc = VINF_SUCCESS;
6376 break;
6377 }
6378 E1kLogRel(("E1000: e1kR3NetworkDown_WaitReceiveAvail: waiting cMillies=%u...\n", cMillies));
6379 E1kLog(("%s: e1kR3NetworkDown_WaitReceiveAvail: waiting cMillies=%u...\n", pThis->szPrf, cMillies));
6380 RTSemEventWait(pThis->hEventMoreRxDescAvail, cMillies);
6381 }
6382 STAM_PROFILE_STOP(&pThis->StatRxOverflow, a);
6383 ASMAtomicXchgBool(&pThis->fMaybeOutOfSpace, false);
6384
6385 return rc;
6386}
6387
6388
6389/**
6390 * Matches the packet addresses against Receive Address table. Looks for
6391 * exact matches only.
6392 *
6393 * @returns true if address matches.
6394 * @param pThis Pointer to the state structure.
6395 * @param pvBuf The ethernet packet.
6396 * @param cb Number of bytes available in the packet.
6397 * @thread EMT
6398 */
6399static bool e1kPerfectMatch(PE1KSTATE pThis, const void *pvBuf)
6400{
6401 for (unsigned i = 0; i < RT_ELEMENTS(pThis->aRecAddr.array); i++)
6402 {
6403 E1KRAELEM* ra = pThis->aRecAddr.array + i;
6404
6405 /* Valid address? */
6406 if (ra->ctl & RA_CTL_AV)
6407 {
6408 Assert((ra->ctl & RA_CTL_AS) < 2);
6409 //unsigned char *pAddr = (unsigned char*)pvBuf + sizeof(ra->addr)*(ra->ctl & RA_CTL_AS);
6410 //E1kLog3(("%s Matching %02x:%02x:%02x:%02x:%02x:%02x against %02x:%02x:%02x:%02x:%02x:%02x...\n",
6411 // pThis->szPrf, pAddr[0], pAddr[1], pAddr[2], pAddr[3], pAddr[4], pAddr[5],
6412 // ra->addr[0], ra->addr[1], ra->addr[2], ra->addr[3], ra->addr[4], ra->addr[5]));
6413 /*
6414 * Address Select:
6415 * 00b = Destination address
6416 * 01b = Source address
6417 * 10b = Reserved
6418 * 11b = Reserved
6419 * Since ethernet header is (DA, SA, len) we can use address
6420 * select as index.
6421 */
6422 if (memcmp((char*)pvBuf + sizeof(ra->addr)*(ra->ctl & RA_CTL_AS),
6423 ra->addr, sizeof(ra->addr)) == 0)
6424 return true;
6425 }
6426 }
6427
6428 return false;
6429}
6430
6431/**
6432 * Matches the packet addresses against Multicast Table Array.
6433 *
6434 * @remarks This is imperfect match since it matches not exact address but
6435 * a subset of addresses.
6436 *
6437 * @returns true if address matches.
6438 * @param pThis Pointer to the state structure.
6439 * @param pvBuf The ethernet packet.
6440 * @param cb Number of bytes available in the packet.
6441 * @thread EMT
6442 */
6443static bool e1kImperfectMatch(PE1KSTATE pThis, const void *pvBuf)
6444{
6445 /* Get bits 32..47 of destination address */
6446 uint16_t u16Bit = ((uint16_t*)pvBuf)[2];
6447
6448 unsigned offset = GET_BITS(RCTL, MO);
6449 /*
6450 * offset means:
6451 * 00b = bits 36..47
6452 * 01b = bits 35..46
6453 * 10b = bits 34..45
6454 * 11b = bits 32..43
6455 */
6456 if (offset < 3)
6457 u16Bit = u16Bit >> (4 - offset);
6458 return ASMBitTest(pThis->auMTA, u16Bit & 0xFFF);
6459}
6460
6461/**
6462 * Determines if the packet is to be delivered to upper layer.
6463 *
6464 * The following filters supported:
6465 * - Exact Unicast/Multicast
6466 * - Promiscuous Unicast/Multicast
6467 * - Multicast
6468 * - VLAN
6469 *
6470 * @returns true if packet is intended for this node.
6471 * @param pThis Pointer to the state structure.
6472 * @param pvBuf The ethernet packet.
6473 * @param cb Number of bytes available in the packet.
6474 * @param pStatus Bit field to store status bits.
6475 * @thread EMT
6476 */
6477static bool e1kAddressFilter(PE1KSTATE pThis, const void *pvBuf, size_t cb, E1KRXDST *pStatus)
6478{
6479 Assert(cb > 14);
6480 /* Assume that we fail to pass exact filter. */
6481 pStatus->fPIF = false;
6482 pStatus->fVP = false;
6483 /* Discard oversized packets */
6484 if (cb > E1K_MAX_RX_PKT_SIZE)
6485 {
6486 E1kLog(("%s ERROR: Incoming packet is too big, cb=%d > max=%d\n",
6487 pThis->szPrf, cb, E1K_MAX_RX_PKT_SIZE));
6488 E1K_INC_CNT32(ROC);
6489 return false;
6490 }
6491 else if (!(RCTL & RCTL_LPE) && cb > 1522)
6492 {
6493 /* When long packet reception is disabled packets over 1522 are discarded */
6494 E1kLog(("%s Discarding incoming packet (LPE=0), cb=%d\n",
6495 pThis->szPrf, cb));
6496 E1K_INC_CNT32(ROC);
6497 return false;
6498 }
6499
6500 uint16_t *u16Ptr = (uint16_t*)pvBuf;
6501 /* Compare TPID with VLAN Ether Type */
6502 if (RT_BE2H_U16(u16Ptr[6]) == VET)
6503 {
6504 pStatus->fVP = true;
6505 /* Is VLAN filtering enabled? */
6506 if (RCTL & RCTL_VFE)
6507 {
6508 /* It is 802.1q packet indeed, let's filter by VID */
6509 if (RCTL & RCTL_CFIEN)
6510 {
6511 E1kLog3(("%s VLAN filter: VLAN=%d CFI=%d RCTL_CFI=%d\n", pThis->szPrf,
6512 E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7])),
6513 E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])),
6514 !!(RCTL & RCTL_CFI)));
6515 if (E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])) != !!(RCTL & RCTL_CFI))
6516 {
6517 E1kLog2(("%s Packet filter: CFIs do not match in packet and RCTL (%d!=%d)\n",
6518 pThis->szPrf, E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])), !!(RCTL & RCTL_CFI)));
6519 return false;
6520 }
6521 }
6522 else
6523 E1kLog3(("%s VLAN filter: VLAN=%d\n", pThis->szPrf,
6524 E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))));
6525 if (!ASMBitTest(pThis->auVFTA, E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))))
6526 {
6527 E1kLog2(("%s Packet filter: no VLAN match (id=%d)\n",
6528 pThis->szPrf, E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))));
6529 return false;
6530 }
6531 }
6532 }
6533 /* Broadcast filtering */
6534 if (e1kIsBroadcast(pvBuf) && (RCTL & RCTL_BAM))
6535 return true;
6536 E1kLog2(("%s Packet filter: not a broadcast\n", pThis->szPrf));
6537 if (e1kIsMulticast(pvBuf))
6538 {
6539 /* Is multicast promiscuous enabled? */
6540 if (RCTL & RCTL_MPE)
6541 return true;
6542 E1kLog2(("%s Packet filter: no promiscuous multicast\n", pThis->szPrf));
6543 /* Try perfect matches first */
6544 if (e1kPerfectMatch(pThis, pvBuf))
6545 {
6546 pStatus->fPIF = true;
6547 return true;
6548 }
6549 E1kLog2(("%s Packet filter: no perfect match\n", pThis->szPrf));
6550 if (e1kImperfectMatch(pThis, pvBuf))
6551 return true;
6552 E1kLog2(("%s Packet filter: no imperfect match\n", pThis->szPrf));
6553 }
6554 else {
6555 /* Is unicast promiscuous enabled? */
6556 if (RCTL & RCTL_UPE)
6557 return true;
6558 E1kLog2(("%s Packet filter: no promiscuous unicast\n", pThis->szPrf));
6559 if (e1kPerfectMatch(pThis, pvBuf))
6560 {
6561 pStatus->fPIF = true;
6562 return true;
6563 }
6564 E1kLog2(("%s Packet filter: no perfect match\n", pThis->szPrf));
6565 }
6566 E1kLog2(("%s Packet filter: packet discarded\n", pThis->szPrf));
6567 return false;
6568}
6569
6570/**
6571 * @interface_method_impl{PDMINETWORKDOWN,pfnReceive}
6572 */
6573static DECLCALLBACK(int) e1kR3NetworkDown_Receive(PPDMINETWORKDOWN pInterface, const void *pvBuf, size_t cb)
6574{
6575 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkDown);
6576 int rc = VINF_SUCCESS;
6577
6578 /*
6579 * Drop packets if the VM is not running yet/anymore.
6580 */
6581 VMSTATE enmVMState = PDMDevHlpVMState(STATE_TO_DEVINS(pThis));
6582 if ( enmVMState != VMSTATE_RUNNING
6583 && enmVMState != VMSTATE_RUNNING_LS)
6584 {
6585 E1kLog(("%s Dropping incoming packet as VM is not running.\n", pThis->szPrf));
6586 return VINF_SUCCESS;
6587 }
6588
6589 /* Discard incoming packets in locked state */
6590 if (!(RCTL & RCTL_EN) || pThis->fLocked || !(STATUS & STATUS_LU))
6591 {
6592 E1kLog(("%s Dropping incoming packet as receive operation is disabled.\n", pThis->szPrf));
6593 return VINF_SUCCESS;
6594 }
6595
6596 STAM_PROFILE_ADV_START(&pThis->StatReceive, a);
6597
6598 //if (!e1kCsEnter(pThis, RT_SRC_POS))
6599 // return VERR_PERMISSION_DENIED;
6600
6601 e1kPacketDump(pThis, (const uint8_t*)pvBuf, cb, "<-- Incoming");
6602
6603 /* Update stats */
6604 if (RT_LIKELY(e1kCsEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
6605 {
6606 E1K_INC_CNT32(TPR);
6607 E1K_ADD_CNT64(TORL, TORH, cb < 64? 64 : cb);
6608 e1kCsLeave(pThis);
6609 }
6610 STAM_PROFILE_ADV_START(&pThis->StatReceiveFilter, a);
6611 E1KRXDST status;
6612 RT_ZERO(status);
6613 bool fPassed = e1kAddressFilter(pThis, pvBuf, cb, &status);
6614 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveFilter, a);
6615 if (fPassed)
6616 {
6617 rc = e1kHandleRxPacket(pThis, pvBuf, cb, status);
6618 }
6619 //e1kCsLeave(pThis);
6620 STAM_PROFILE_ADV_STOP(&pThis->StatReceive, a);
6621
6622 return rc;
6623}
6624
6625
6626/* -=-=-=-=- PDMILEDPORTS -=-=-=-=- */
6627
6628/**
6629 * @interface_method_impl{PDMILEDPORTS,pfnQueryStatusLed}
6630 */
6631static DECLCALLBACK(int) e1kR3QueryStatusLed(PPDMILEDPORTS pInterface, unsigned iLUN, PPDMLED *ppLed)
6632{
6633 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, ILeds);
6634 int rc = VERR_PDM_LUN_NOT_FOUND;
6635
6636 if (iLUN == 0)
6637 {
6638 *ppLed = &pThis->led;
6639 rc = VINF_SUCCESS;
6640 }
6641 return rc;
6642}
6643
6644
6645/* -=-=-=-=- PDMINETWORKCONFIG -=-=-=-=- */
6646
6647/**
6648 * @interface_method_impl{PDMINETWORKCONFIG,pfnGetMac}
6649 */
6650static DECLCALLBACK(int) e1kR3GetMac(PPDMINETWORKCONFIG pInterface, PRTMAC pMac)
6651{
6652 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkConfig);
6653 pThis->eeprom.getMac(pMac);
6654 return VINF_SUCCESS;
6655}
6656
6657/**
6658 * @interface_method_impl{PDMINETWORKCONFIG,pfnGetLinkState}
6659 */
6660static DECLCALLBACK(PDMNETWORKLINKSTATE) e1kR3GetLinkState(PPDMINETWORKCONFIG pInterface)
6661{
6662 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkConfig);
6663 if (STATUS & STATUS_LU)
6664 return PDMNETWORKLINKSTATE_UP;
6665 return PDMNETWORKLINKSTATE_DOWN;
6666}
6667
6668/**
6669 * @interface_method_impl{PDMINETWORKCONFIG,pfnSetLinkState}
6670 */
6671static DECLCALLBACK(int) e1kR3SetLinkState(PPDMINETWORKCONFIG pInterface, PDMNETWORKLINKSTATE enmState)
6672{
6673 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkConfig);
6674
6675 E1kLog(("%s e1kR3SetLinkState: enmState=%d\n", pThis->szPrf, enmState));
6676 switch (enmState)
6677 {
6678 case PDMNETWORKLINKSTATE_UP:
6679 pThis->fCableConnected = true;
6680 /* If link was down, bring it up after a while. */
6681 if (!(STATUS & STATUS_LU))
6682 e1kBringLinkUpDelayed(pThis);
6683 break;
6684 case PDMNETWORKLINKSTATE_DOWN:
6685 pThis->fCableConnected = false;
6686 /* Always set the phy link state to down, regardless of the STATUS_LU bit.
6687 * We might have to set the link state before the driver initializes us. */
6688 Phy::setLinkStatus(&pThis->phy, false);
6689 /* If link was up, bring it down. */
6690 if (STATUS & STATUS_LU)
6691 e1kR3LinkDown(pThis);
6692 break;
6693 case PDMNETWORKLINKSTATE_DOWN_RESUME:
6694 /*
6695 * There is not much sense in bringing down the link if it has not come up yet.
6696 * If it is up though, we bring it down temporarely, then bring it up again.
6697 */
6698 if (STATUS & STATUS_LU)
6699 e1kR3LinkDownTemp(pThis);
6700 break;
6701 default:
6702 ;
6703 }
6704 return VINF_SUCCESS;
6705}
6706
6707
6708/* -=-=-=-=- PDMIBASE -=-=-=-=- */
6709
6710/**
6711 * @interface_method_impl{PDMIBASE,pfnQueryInterface}
6712 */
6713static DECLCALLBACK(void *) e1kR3QueryInterface(struct PDMIBASE *pInterface, const char *pszIID)
6714{
6715 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, IBase);
6716 Assert(&pThis->IBase == pInterface);
6717
6718 PDMIBASE_RETURN_INTERFACE(pszIID, PDMIBASE, &pThis->IBase);
6719 PDMIBASE_RETURN_INTERFACE(pszIID, PDMINETWORKDOWN, &pThis->INetworkDown);
6720 PDMIBASE_RETURN_INTERFACE(pszIID, PDMINETWORKCONFIG, &pThis->INetworkConfig);
6721 PDMIBASE_RETURN_INTERFACE(pszIID, PDMILEDPORTS, &pThis->ILeds);
6722 return NULL;
6723}
6724
6725
6726/* -=-=-=-=- Saved State -=-=-=-=- */
6727
6728/**
6729 * Saves the configuration.
6730 *
6731 * @param pThis The E1K state.
6732 * @param pSSM The handle to the saved state.
6733 */
6734static void e1kSaveConfig(PE1KSTATE pThis, PSSMHANDLE pSSM)
6735{
6736 SSMR3PutMem(pSSM, &pThis->macConfigured, sizeof(pThis->macConfigured));
6737 SSMR3PutU32(pSSM, pThis->eChip);
6738}
6739
6740/**
6741 * @callback_method_impl{FNSSMDEVLIVEEXEC,Save basic configuration.}
6742 */
6743static DECLCALLBACK(int) e1kLiveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uPass)
6744{
6745 RT_NOREF(uPass);
6746 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6747 e1kSaveConfig(pThis, pSSM);
6748 return VINF_SSM_DONT_CALL_AGAIN;
6749}
6750
6751/**
6752 * @callback_method_impl{FNSSMDEVSAVEPREP,Synchronize.}
6753 */
6754static DECLCALLBACK(int) e1kSavePrep(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6755{
6756 RT_NOREF(pSSM);
6757 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6758
6759 int rc = e1kCsEnter(pThis, VERR_SEM_BUSY);
6760 if (RT_UNLIKELY(rc != VINF_SUCCESS))
6761 return rc;
6762 e1kCsLeave(pThis);
6763 return VINF_SUCCESS;
6764#if 0
6765 /* 1) Prevent all threads from modifying the state and memory */
6766 //pThis->fLocked = true;
6767 /* 2) Cancel all timers */
6768#ifdef E1K_TX_DELAY
6769 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTXDTimer));
6770#endif /* E1K_TX_DELAY */
6771//#ifdef E1K_USE_TX_TIMERS
6772 if (pThis->fTidEnabled)
6773 {
6774 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTIDTimer));
6775#ifndef E1K_NO_TAD
6776 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTADTimer));
6777#endif /* E1K_NO_TAD */
6778 }
6779//#endif /* E1K_USE_TX_TIMERS */
6780#ifdef E1K_USE_RX_TIMERS
6781 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRIDTimer));
6782 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRADTimer));
6783#endif /* E1K_USE_RX_TIMERS */
6784 e1kCancelTimer(pThis, pThis->CTX_SUFF(pIntTimer));
6785 /* 3) Did I forget anything? */
6786 E1kLog(("%s Locked\n", pThis->szPrf));
6787 return VINF_SUCCESS;
6788#endif
6789}
6790
6791/**
6792 * @callback_method_impl{FNSSMDEVSAVEEXEC}
6793 */
6794static DECLCALLBACK(int) e1kSaveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6795{
6796 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6797
6798 e1kSaveConfig(pThis, pSSM);
6799 pThis->eeprom.save(pSSM);
6800 e1kDumpState(pThis);
6801 SSMR3PutMem(pSSM, pThis->auRegs, sizeof(pThis->auRegs));
6802 SSMR3PutBool(pSSM, pThis->fIntRaised);
6803 Phy::saveState(pSSM, &pThis->phy);
6804 SSMR3PutU32(pSSM, pThis->uSelectedReg);
6805 SSMR3PutMem(pSSM, pThis->auMTA, sizeof(pThis->auMTA));
6806 SSMR3PutMem(pSSM, &pThis->aRecAddr, sizeof(pThis->aRecAddr));
6807 SSMR3PutMem(pSSM, pThis->auVFTA, sizeof(pThis->auVFTA));
6808 SSMR3PutU64(pSSM, pThis->u64AckedAt);
6809 SSMR3PutU16(pSSM, pThis->u16RxBSize);
6810 //SSMR3PutBool(pSSM, pThis->fDelayInts);
6811 //SSMR3PutBool(pSSM, pThis->fIntMaskUsed);
6812 SSMR3PutU16(pSSM, pThis->u16TxPktLen);
6813/** @todo State wrt to the TSE buffer is incomplete, so little point in
6814 * saving this actually. */
6815 SSMR3PutMem(pSSM, pThis->aTxPacketFallback, pThis->u16TxPktLen);
6816 SSMR3PutBool(pSSM, pThis->fIPcsum);
6817 SSMR3PutBool(pSSM, pThis->fTCPcsum);
6818 SSMR3PutMem(pSSM, &pThis->contextTSE, sizeof(pThis->contextTSE));
6819 SSMR3PutMem(pSSM, &pThis->contextNormal, sizeof(pThis->contextNormal));
6820 SSMR3PutBool(pSSM, pThis->fVTag);
6821 SSMR3PutU16(pSSM, pThis->u16VTagTCI);
6822#ifdef E1K_WITH_TXD_CACHE
6823#if 0
6824 SSMR3PutU8(pSSM, pThis->nTxDFetched);
6825 SSMR3PutMem(pSSM, pThis->aTxDescriptors,
6826 pThis->nTxDFetched * sizeof(pThis->aTxDescriptors[0]));
6827#else
6828 /*
6829 * There is no point in storing TX descriptor cache entries as we can simply
6830 * fetch them again. Moreover, normally the cache is always empty when we
6831 * save the state. Store zero entries for compatibility.
6832 */
6833 SSMR3PutU8(pSSM, 0);
6834#endif
6835#endif /* E1K_WITH_TXD_CACHE */
6836/** @todo GSO requires some more state here. */
6837 E1kLog(("%s State has been saved\n", pThis->szPrf));
6838 return VINF_SUCCESS;
6839}
6840
6841#if 0
6842/**
6843 * @callback_method_impl{FNSSMDEVSAVEDONE}
6844 */
6845static DECLCALLBACK(int) e1kSaveDone(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6846{
6847 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6848
6849 /* If VM is being powered off unlocking will result in assertions in PGM */
6850 if (PDMDevHlpGetVM(pDevIns)->enmVMState == VMSTATE_RUNNING)
6851 pThis->fLocked = false;
6852 else
6853 E1kLog(("%s VM is not running -- remain locked\n", pThis->szPrf));
6854 E1kLog(("%s Unlocked\n", pThis->szPrf));
6855 return VINF_SUCCESS;
6856}
6857#endif
6858
6859/**
6860 * @callback_method_impl{FNSSMDEVLOADPREP,Synchronize.}
6861 */
6862static DECLCALLBACK(int) e1kLoadPrep(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6863{
6864 RT_NOREF(pSSM);
6865 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6866
6867 int rc = e1kCsEnter(pThis, VERR_SEM_BUSY);
6868 if (RT_UNLIKELY(rc != VINF_SUCCESS))
6869 return rc;
6870 e1kCsLeave(pThis);
6871 return VINF_SUCCESS;
6872}
6873
6874/**
6875 * @callback_method_impl{FNSSMDEVLOADEXEC}
6876 */
6877static DECLCALLBACK(int) e1kLoadExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
6878{
6879 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6880 int rc;
6881
6882 if ( uVersion != E1K_SAVEDSTATE_VERSION
6883#ifdef E1K_WITH_TXD_CACHE
6884 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG
6885#endif /* E1K_WITH_TXD_CACHE */
6886 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_41
6887 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_30)
6888 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
6889
6890 if ( uVersion > E1K_SAVEDSTATE_VERSION_VBOX_30
6891 || uPass != SSM_PASS_FINAL)
6892 {
6893 /* config checks */
6894 RTMAC macConfigured;
6895 rc = SSMR3GetMem(pSSM, &macConfigured, sizeof(macConfigured));
6896 AssertRCReturn(rc, rc);
6897 if ( memcmp(&macConfigured, &pThis->macConfigured, sizeof(macConfigured))
6898 && (uPass == 0 || !PDMDevHlpVMTeleportedAndNotFullyResumedYet(pDevIns)) )
6899 LogRel(("%s: The mac address differs: config=%RTmac saved=%RTmac\n", pThis->szPrf, &pThis->macConfigured, &macConfigured));
6900
6901 E1KCHIP eChip;
6902 rc = SSMR3GetU32(pSSM, &eChip);
6903 AssertRCReturn(rc, rc);
6904 if (eChip != pThis->eChip)
6905 return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("The chip type differs: config=%u saved=%u"), pThis->eChip, eChip);
6906 }
6907
6908 if (uPass == SSM_PASS_FINAL)
6909 {
6910 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_30)
6911 {
6912 rc = pThis->eeprom.load(pSSM);
6913 AssertRCReturn(rc, rc);
6914 }
6915 /* the state */
6916 SSMR3GetMem(pSSM, &pThis->auRegs, sizeof(pThis->auRegs));
6917 SSMR3GetBool(pSSM, &pThis->fIntRaised);
6918 /** @todo PHY could be made a separate device with its own versioning */
6919 Phy::loadState(pSSM, &pThis->phy);
6920 SSMR3GetU32(pSSM, &pThis->uSelectedReg);
6921 SSMR3GetMem(pSSM, &pThis->auMTA, sizeof(pThis->auMTA));
6922 SSMR3GetMem(pSSM, &pThis->aRecAddr, sizeof(pThis->aRecAddr));
6923 SSMR3GetMem(pSSM, &pThis->auVFTA, sizeof(pThis->auVFTA));
6924 SSMR3GetU64(pSSM, &pThis->u64AckedAt);
6925 SSMR3GetU16(pSSM, &pThis->u16RxBSize);
6926 //SSMR3GetBool(pSSM, pThis->fDelayInts);
6927 //SSMR3GetBool(pSSM, pThis->fIntMaskUsed);
6928 SSMR3GetU16(pSSM, &pThis->u16TxPktLen);
6929 SSMR3GetMem(pSSM, &pThis->aTxPacketFallback[0], pThis->u16TxPktLen);
6930 SSMR3GetBool(pSSM, &pThis->fIPcsum);
6931 SSMR3GetBool(pSSM, &pThis->fTCPcsum);
6932 SSMR3GetMem(pSSM, &pThis->contextTSE, sizeof(pThis->contextTSE));
6933 rc = SSMR3GetMem(pSSM, &pThis->contextNormal, sizeof(pThis->contextNormal));
6934 AssertRCReturn(rc, rc);
6935 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_41)
6936 {
6937 SSMR3GetBool(pSSM, &pThis->fVTag);
6938 rc = SSMR3GetU16(pSSM, &pThis->u16VTagTCI);
6939 AssertRCReturn(rc, rc);
6940 }
6941 else
6942 {
6943 pThis->fVTag = false;
6944 pThis->u16VTagTCI = 0;
6945 }
6946#ifdef E1K_WITH_TXD_CACHE
6947 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG)
6948 {
6949 rc = SSMR3GetU8(pSSM, &pThis->nTxDFetched);
6950 AssertRCReturn(rc, rc);
6951 if (pThis->nTxDFetched)
6952 SSMR3GetMem(pSSM, pThis->aTxDescriptors,
6953 pThis->nTxDFetched * sizeof(pThis->aTxDescriptors[0]));
6954 }
6955 else
6956 pThis->nTxDFetched = 0;
6957 /*
6958 * @todo: Perhaps we should not store TXD cache as the entries can be
6959 * simply fetched again from guest's memory. Or can't they?
6960 */
6961#endif /* E1K_WITH_TXD_CACHE */
6962#ifdef E1K_WITH_RXD_CACHE
6963 /*
6964 * There is no point in storing the RX descriptor cache in the saved
6965 * state, we just need to make sure it is empty.
6966 */
6967 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
6968#endif /* E1K_WITH_RXD_CACHE */
6969 /* derived state */
6970 e1kSetupGsoCtx(&pThis->GsoCtx, &pThis->contextTSE);
6971
6972 E1kLog(("%s State has been restored\n", pThis->szPrf));
6973 e1kDumpState(pThis);
6974 }
6975 return VINF_SUCCESS;
6976}
6977
6978/**
6979 * @callback_method_impl{FNSSMDEVLOADDONE, Link status adjustments after loading.}
6980 */
6981static DECLCALLBACK(int) e1kLoadDone(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6982{
6983 RT_NOREF(pSSM);
6984 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6985
6986 /* Update promiscuous mode */
6987 if (pThis->pDrvR3)
6988 pThis->pDrvR3->pfnSetPromiscuousMode(pThis->pDrvR3,
6989 !!(RCTL & (RCTL_UPE | RCTL_MPE)));
6990
6991 /*
6992 * Force the link down here, since PDMNETWORKLINKSTATE_DOWN_RESUME is never
6993 * passed to us. We go through all this stuff if the link was up and we
6994 * wasn't teleported.
6995 */
6996 if ( (STATUS & STATUS_LU)
6997 && !PDMDevHlpVMTeleportedAndNotFullyResumedYet(pDevIns)
6998 && pThis->cMsLinkUpDelay)
6999 {
7000 e1kR3LinkDownTemp(pThis);
7001 }
7002 return VINF_SUCCESS;
7003}
7004
7005
7006
7007/* -=-=-=-=- Debug Info + Log Types -=-=-=-=- */
7008
7009/**
7010 * @callback_method_impl{FNRTSTRFORMATTYPE}
7011 */
7012static DECLCALLBACK(size_t) e1kFmtRxDesc(PFNRTSTROUTPUT pfnOutput,
7013 void *pvArgOutput,
7014 const char *pszType,
7015 void const *pvValue,
7016 int cchWidth,
7017 int cchPrecision,
7018 unsigned fFlags,
7019 void *pvUser)
7020{
7021 RT_NOREF(cchWidth, cchPrecision, fFlags, pvUser);
7022 AssertReturn(strcmp(pszType, "e1krxd") == 0, 0);
7023 E1KRXDESC* pDesc = (E1KRXDESC*)pvValue;
7024 if (!pDesc)
7025 return RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "NULL_RXD");
7026
7027 size_t cbPrintf = 0;
7028 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Address=%16LX Length=%04X Csum=%04X\n",
7029 pDesc->u64BufAddr, pDesc->u16Length, pDesc->u16Checksum);
7030 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, " STA: %s %s %s %s %s %s %s ERR: %s %s %s %s SPECIAL: %s VLAN=%03x PRI=%x",
7031 pDesc->status.fPIF ? "PIF" : "pif",
7032 pDesc->status.fIPCS ? "IPCS" : "ipcs",
7033 pDesc->status.fTCPCS ? "TCPCS" : "tcpcs",
7034 pDesc->status.fVP ? "VP" : "vp",
7035 pDesc->status.fIXSM ? "IXSM" : "ixsm",
7036 pDesc->status.fEOP ? "EOP" : "eop",
7037 pDesc->status.fDD ? "DD" : "dd",
7038 pDesc->status.fRXE ? "RXE" : "rxe",
7039 pDesc->status.fIPE ? "IPE" : "ipe",
7040 pDesc->status.fTCPE ? "TCPE" : "tcpe",
7041 pDesc->status.fCE ? "CE" : "ce",
7042 E1K_SPEC_CFI(pDesc->status.u16Special) ? "CFI" :"cfi",
7043 E1K_SPEC_VLAN(pDesc->status.u16Special),
7044 E1K_SPEC_PRI(pDesc->status.u16Special));
7045 return cbPrintf;
7046}
7047
7048/**
7049 * @callback_method_impl{FNRTSTRFORMATTYPE}
7050 */
7051static DECLCALLBACK(size_t) e1kFmtTxDesc(PFNRTSTROUTPUT pfnOutput,
7052 void *pvArgOutput,
7053 const char *pszType,
7054 void const *pvValue,
7055 int cchWidth,
7056 int cchPrecision,
7057 unsigned fFlags,
7058 void *pvUser)
7059{
7060 RT_NOREF(cchWidth, cchPrecision, fFlags, pvUser);
7061 AssertReturn(strcmp(pszType, "e1ktxd") == 0, 0);
7062 E1KTXDESC *pDesc = (E1KTXDESC*)pvValue;
7063 if (!pDesc)
7064 return RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "NULL_TXD");
7065
7066 size_t cbPrintf = 0;
7067 switch (e1kGetDescType(pDesc))
7068 {
7069 case E1K_DTYP_CONTEXT:
7070 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Type=Context\n"
7071 " IPCSS=%02X IPCSO=%02X IPCSE=%04X TUCSS=%02X TUCSO=%02X TUCSE=%04X\n"
7072 " TUCMD:%s%s%s %s %s PAYLEN=%04x HDRLEN=%04x MSS=%04x STA: %s",
7073 pDesc->context.ip.u8CSS, pDesc->context.ip.u8CSO, pDesc->context.ip.u16CSE,
7074 pDesc->context.tu.u8CSS, pDesc->context.tu.u8CSO, pDesc->context.tu.u16CSE,
7075 pDesc->context.dw2.fIDE ? " IDE":"",
7076 pDesc->context.dw2.fRS ? " RS" :"",
7077 pDesc->context.dw2.fTSE ? " TSE":"",
7078 pDesc->context.dw2.fIP ? "IPv4":"IPv6",
7079 pDesc->context.dw2.fTCP ? "TCP":"UDP",
7080 pDesc->context.dw2.u20PAYLEN,
7081 pDesc->context.dw3.u8HDRLEN,
7082 pDesc->context.dw3.u16MSS,
7083 pDesc->context.dw3.fDD?"DD":"");
7084 break;
7085 case E1K_DTYP_DATA:
7086 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Type=Data Address=%16LX DTALEN=%05X\n"
7087 " DCMD:%s%s%s%s%s%s%s STA:%s%s%s POPTS:%s%s SPECIAL:%s VLAN=%03x PRI=%x",
7088 pDesc->data.u64BufAddr,
7089 pDesc->data.cmd.u20DTALEN,
7090 pDesc->data.cmd.fIDE ? " IDE" :"",
7091 pDesc->data.cmd.fVLE ? " VLE" :"",
7092 pDesc->data.cmd.fRPS ? " RPS" :"",
7093 pDesc->data.cmd.fRS ? " RS" :"",
7094 pDesc->data.cmd.fTSE ? " TSE" :"",
7095 pDesc->data.cmd.fIFCS? " IFCS":"",
7096 pDesc->data.cmd.fEOP ? " EOP" :"",
7097 pDesc->data.dw3.fDD ? " DD" :"",
7098 pDesc->data.dw3.fEC ? " EC" :"",
7099 pDesc->data.dw3.fLC ? " LC" :"",
7100 pDesc->data.dw3.fTXSM? " TXSM":"",
7101 pDesc->data.dw3.fIXSM? " IXSM":"",
7102 E1K_SPEC_CFI(pDesc->data.dw3.u16Special) ? "CFI" :"cfi",
7103 E1K_SPEC_VLAN(pDesc->data.dw3.u16Special),
7104 E1K_SPEC_PRI(pDesc->data.dw3.u16Special));
7105 break;
7106 case E1K_DTYP_LEGACY:
7107 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Type=Legacy Address=%16LX DTALEN=%05X\n"
7108 " CMD:%s%s%s%s%s%s%s STA:%s%s%s CSO=%02x CSS=%02x SPECIAL:%s VLAN=%03x PRI=%x",
7109 pDesc->data.u64BufAddr,
7110 pDesc->legacy.cmd.u16Length,
7111 pDesc->legacy.cmd.fIDE ? " IDE" :"",
7112 pDesc->legacy.cmd.fVLE ? " VLE" :"",
7113 pDesc->legacy.cmd.fRPS ? " RPS" :"",
7114 pDesc->legacy.cmd.fRS ? " RS" :"",
7115 pDesc->legacy.cmd.fIC ? " IC" :"",
7116 pDesc->legacy.cmd.fIFCS? " IFCS":"",
7117 pDesc->legacy.cmd.fEOP ? " EOP" :"",
7118 pDesc->legacy.dw3.fDD ? " DD" :"",
7119 pDesc->legacy.dw3.fEC ? " EC" :"",
7120 pDesc->legacy.dw3.fLC ? " LC" :"",
7121 pDesc->legacy.cmd.u8CSO,
7122 pDesc->legacy.dw3.u8CSS,
7123 E1K_SPEC_CFI(pDesc->legacy.dw3.u16Special) ? "CFI" :"cfi",
7124 E1K_SPEC_VLAN(pDesc->legacy.dw3.u16Special),
7125 E1K_SPEC_PRI(pDesc->legacy.dw3.u16Special));
7126 break;
7127 default:
7128 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Invalid Transmit Descriptor");
7129 break;
7130 }
7131
7132 return cbPrintf;
7133}
7134
7135/** Initializes debug helpers (logging format types). */
7136static int e1kInitDebugHelpers(void)
7137{
7138 int rc = VINF_SUCCESS;
7139 static bool s_fHelpersRegistered = false;
7140 if (!s_fHelpersRegistered)
7141 {
7142 s_fHelpersRegistered = true;
7143 rc = RTStrFormatTypeRegister("e1krxd", e1kFmtRxDesc, NULL);
7144 AssertRCReturn(rc, rc);
7145 rc = RTStrFormatTypeRegister("e1ktxd", e1kFmtTxDesc, NULL);
7146 AssertRCReturn(rc, rc);
7147 }
7148 return rc;
7149}
7150
7151/**
7152 * Status info callback.
7153 *
7154 * @param pDevIns The device instance.
7155 * @param pHlp The output helpers.
7156 * @param pszArgs The arguments.
7157 */
7158static DECLCALLBACK(void) e1kInfo(PPDMDEVINS pDevIns, PCDBGFINFOHLP pHlp, const char *pszArgs)
7159{
7160 RT_NOREF(pszArgs);
7161 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7162 unsigned i;
7163 // bool fRcvRing = false;
7164 // bool fXmtRing = false;
7165
7166 /*
7167 * Parse args.
7168 if (pszArgs)
7169 {
7170 fRcvRing = strstr(pszArgs, "verbose") || strstr(pszArgs, "rcv");
7171 fXmtRing = strstr(pszArgs, "verbose") || strstr(pszArgs, "xmt");
7172 }
7173 */
7174
7175 /*
7176 * Show info.
7177 */
7178 pHlp->pfnPrintf(pHlp, "E1000 #%d: port=%RTiop mmio=%RGp mac-cfg=%RTmac %s%s%s\n",
7179 pDevIns->iInstance, pThis->IOPortBase, pThis->addrMMReg,
7180 &pThis->macConfigured, g_aChips[pThis->eChip].pcszName,
7181 pThis->fRCEnabled ? " GC" : "", pThis->fR0Enabled ? " R0" : "");
7182
7183 e1kCsEnter(pThis, VERR_INTERNAL_ERROR); /* Not sure why but PCNet does it */
7184
7185 for (i = 0; i < E1K_NUM_OF_32BIT_REGS; ++i)
7186 pHlp->pfnPrintf(pHlp, "%8.8s = %08x\n", g_aE1kRegMap[i].abbrev, pThis->auRegs[i]);
7187
7188 for (i = 0; i < RT_ELEMENTS(pThis->aRecAddr.array); i++)
7189 {
7190 E1KRAELEM* ra = pThis->aRecAddr.array + i;
7191 if (ra->ctl & RA_CTL_AV)
7192 {
7193 const char *pcszTmp;
7194 switch (ra->ctl & RA_CTL_AS)
7195 {
7196 case 0: pcszTmp = "DST"; break;
7197 case 1: pcszTmp = "SRC"; break;
7198 default: pcszTmp = "reserved";
7199 }
7200 pHlp->pfnPrintf(pHlp, "RA%02d: %s %RTmac\n", i, pcszTmp, ra->addr);
7201 }
7202 }
7203 unsigned cDescs = RDLEN / sizeof(E1KRXDESC);
7204 uint32_t rdh = RDH;
7205 pHlp->pfnPrintf(pHlp, "\n-- Receive Descriptors (%d total) --\n", cDescs);
7206 for (i = 0; i < cDescs; ++i)
7207 {
7208 E1KRXDESC desc;
7209 PDMDevHlpPhysRead(pDevIns, e1kDescAddr(RDBAH, RDBAL, i),
7210 &desc, sizeof(desc));
7211 if (i == rdh)
7212 pHlp->pfnPrintf(pHlp, ">>> ");
7213 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1krxd]\n", e1kDescAddr(RDBAH, RDBAL, i), &desc);
7214 }
7215#ifdef E1K_WITH_RXD_CACHE
7216 pHlp->pfnPrintf(pHlp, "\n-- Receive Descriptors in Cache (at %d (RDH %d)/ fetched %d / max %d) --\n",
7217 pThis->iRxDCurrent, RDH, pThis->nRxDFetched, E1K_RXD_CACHE_SIZE);
7218 if (rdh > pThis->iRxDCurrent)
7219 rdh -= pThis->iRxDCurrent;
7220 else
7221 rdh = cDescs + rdh - pThis->iRxDCurrent;
7222 for (i = 0; i < pThis->nRxDFetched; ++i)
7223 {
7224 if (i == pThis->iRxDCurrent)
7225 pHlp->pfnPrintf(pHlp, ">>> ");
7226 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1krxd]\n",
7227 e1kDescAddr(RDBAH, RDBAL, rdh++ % cDescs),
7228 &pThis->aRxDescriptors[i]);
7229 }
7230#endif /* E1K_WITH_RXD_CACHE */
7231
7232 cDescs = TDLEN / sizeof(E1KTXDESC);
7233 uint32_t tdh = TDH;
7234 pHlp->pfnPrintf(pHlp, "\n-- Transmit Descriptors (%d total) --\n", cDescs);
7235 for (i = 0; i < cDescs; ++i)
7236 {
7237 E1KTXDESC desc;
7238 PDMDevHlpPhysRead(pDevIns, e1kDescAddr(TDBAH, TDBAL, i),
7239 &desc, sizeof(desc));
7240 if (i == tdh)
7241 pHlp->pfnPrintf(pHlp, ">>> ");
7242 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1ktxd]\n", e1kDescAddr(TDBAH, TDBAL, i), &desc);
7243 }
7244#ifdef E1K_WITH_TXD_CACHE
7245 pHlp->pfnPrintf(pHlp, "\n-- Transmit Descriptors in Cache (at %d (TDH %d)/ fetched %d / max %d) --\n",
7246 pThis->iTxDCurrent, TDH, pThis->nTxDFetched, E1K_TXD_CACHE_SIZE);
7247 if (tdh > pThis->iTxDCurrent)
7248 tdh -= pThis->iTxDCurrent;
7249 else
7250 tdh = cDescs + tdh - pThis->iTxDCurrent;
7251 for (i = 0; i < pThis->nTxDFetched; ++i)
7252 {
7253 if (i == pThis->iTxDCurrent)
7254 pHlp->pfnPrintf(pHlp, ">>> ");
7255 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1ktxd]\n",
7256 e1kDescAddr(TDBAH, TDBAL, tdh++ % cDescs),
7257 &pThis->aTxDescriptors[i]);
7258 }
7259#endif /* E1K_WITH_TXD_CACHE */
7260
7261
7262#ifdef E1K_INT_STATS
7263 pHlp->pfnPrintf(pHlp, "Interrupt attempts: %d\n", pThis->uStatIntTry);
7264 pHlp->pfnPrintf(pHlp, "Interrupts raised : %d\n", pThis->uStatInt);
7265 pHlp->pfnPrintf(pHlp, "Interrupts lowered: %d\n", pThis->uStatIntLower);
7266 pHlp->pfnPrintf(pHlp, "ICR outside ISR : %d\n", pThis->uStatNoIntICR);
7267 pHlp->pfnPrintf(pHlp, "IMS raised ints : %d\n", pThis->uStatIntIMS);
7268 pHlp->pfnPrintf(pHlp, "Interrupts skipped: %d\n", pThis->uStatIntSkip);
7269 pHlp->pfnPrintf(pHlp, "Masked interrupts : %d\n", pThis->uStatIntMasked);
7270 pHlp->pfnPrintf(pHlp, "Early interrupts : %d\n", pThis->uStatIntEarly);
7271 pHlp->pfnPrintf(pHlp, "Late interrupts : %d\n", pThis->uStatIntLate);
7272 pHlp->pfnPrintf(pHlp, "Lost interrupts : %d\n", pThis->iStatIntLost);
7273 pHlp->pfnPrintf(pHlp, "Interrupts by RX : %d\n", pThis->uStatIntRx);
7274 pHlp->pfnPrintf(pHlp, "Interrupts by TX : %d\n", pThis->uStatIntTx);
7275 pHlp->pfnPrintf(pHlp, "Interrupts by ICS : %d\n", pThis->uStatIntICS);
7276 pHlp->pfnPrintf(pHlp, "Interrupts by RDTR: %d\n", pThis->uStatIntRDTR);
7277 pHlp->pfnPrintf(pHlp, "Interrupts by RDMT: %d\n", pThis->uStatIntRXDMT0);
7278 pHlp->pfnPrintf(pHlp, "Interrupts by TXQE: %d\n", pThis->uStatIntTXQE);
7279 pHlp->pfnPrintf(pHlp, "TX int delay asked: %d\n", pThis->uStatTxIDE);
7280 pHlp->pfnPrintf(pHlp, "TX delayed: %d\n", pThis->uStatTxDelayed);
7281 pHlp->pfnPrintf(pHlp, "TX delayed expired: %d\n", pThis->uStatTxDelayExp);
7282 pHlp->pfnPrintf(pHlp, "TX no report asked: %d\n", pThis->uStatTxNoRS);
7283 pHlp->pfnPrintf(pHlp, "TX abs timer expd : %d\n", pThis->uStatTAD);
7284 pHlp->pfnPrintf(pHlp, "TX int timer expd : %d\n", pThis->uStatTID);
7285 pHlp->pfnPrintf(pHlp, "RX abs timer expd : %d\n", pThis->uStatRAD);
7286 pHlp->pfnPrintf(pHlp, "RX int timer expd : %d\n", pThis->uStatRID);
7287 pHlp->pfnPrintf(pHlp, "TX CTX descriptors: %d\n", pThis->uStatDescCtx);
7288 pHlp->pfnPrintf(pHlp, "TX DAT descriptors: %d\n", pThis->uStatDescDat);
7289 pHlp->pfnPrintf(pHlp, "TX LEG descriptors: %d\n", pThis->uStatDescLeg);
7290 pHlp->pfnPrintf(pHlp, "Received frames : %d\n", pThis->uStatRxFrm);
7291 pHlp->pfnPrintf(pHlp, "Transmitted frames: %d\n", pThis->uStatTxFrm);
7292 pHlp->pfnPrintf(pHlp, "TX frames up to 1514: %d\n", pThis->uStatTx1514);
7293 pHlp->pfnPrintf(pHlp, "TX frames up to 2962: %d\n", pThis->uStatTx2962);
7294 pHlp->pfnPrintf(pHlp, "TX frames up to 4410: %d\n", pThis->uStatTx4410);
7295 pHlp->pfnPrintf(pHlp, "TX frames up to 5858: %d\n", pThis->uStatTx5858);
7296 pHlp->pfnPrintf(pHlp, "TX frames up to 7306: %d\n", pThis->uStatTx7306);
7297 pHlp->pfnPrintf(pHlp, "TX frames up to 8754: %d\n", pThis->uStatTx8754);
7298 pHlp->pfnPrintf(pHlp, "TX frames up to 16384: %d\n", pThis->uStatTx16384);
7299 pHlp->pfnPrintf(pHlp, "TX frames up to 32768: %d\n", pThis->uStatTx32768);
7300 pHlp->pfnPrintf(pHlp, "Larger TX frames : %d\n", pThis->uStatTxLarge);
7301#endif /* E1K_INT_STATS */
7302
7303 e1kCsLeave(pThis);
7304}
7305
7306
7307
7308/* -=-=-=-=- PDMDEVREG -=-=-=-=- */
7309
7310/**
7311 * Detach notification.
7312 *
7313 * One port on the network card has been disconnected from the network.
7314 *
7315 * @param pDevIns The device instance.
7316 * @param iLUN The logical unit which is being detached.
7317 * @param fFlags Flags, combination of the PDMDEVATT_FLAGS_* \#defines.
7318 */
7319static DECLCALLBACK(void) e1kR3Detach(PPDMDEVINS pDevIns, unsigned iLUN, uint32_t fFlags)
7320{
7321 RT_NOREF(fFlags);
7322 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7323 Log(("%s e1kR3Detach:\n", pThis->szPrf));
7324
7325 AssertLogRelReturnVoid(iLUN == 0);
7326
7327 PDMCritSectEnter(&pThis->cs, VERR_SEM_BUSY);
7328
7329 /** @todo r=pritesh still need to check if i missed
7330 * to clean something in this function
7331 */
7332
7333 /*
7334 * Zero some important members.
7335 */
7336 pThis->pDrvBase = NULL;
7337 pThis->pDrvR3 = NULL;
7338 pThis->pDrvR0 = NIL_RTR0PTR;
7339 pThis->pDrvRC = NIL_RTRCPTR;
7340
7341 PDMCritSectLeave(&pThis->cs);
7342}
7343
7344/**
7345 * Attach the Network attachment.
7346 *
7347 * One port on the network card has been connected to a network.
7348 *
7349 * @returns VBox status code.
7350 * @param pDevIns The device instance.
7351 * @param iLUN The logical unit which is being attached.
7352 * @param fFlags Flags, combination of the PDMDEVATT_FLAGS_* \#defines.
7353 *
7354 * @remarks This code path is not used during construction.
7355 */
7356static DECLCALLBACK(int) e1kR3Attach(PPDMDEVINS pDevIns, unsigned iLUN, uint32_t fFlags)
7357{
7358 RT_NOREF(fFlags);
7359 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7360 LogFlow(("%s e1kR3Attach:\n", pThis->szPrf));
7361
7362 AssertLogRelReturn(iLUN == 0, VERR_PDM_NO_SUCH_LUN);
7363
7364 PDMCritSectEnter(&pThis->cs, VERR_SEM_BUSY);
7365
7366 /*
7367 * Attach the driver.
7368 */
7369 int rc = PDMDevHlpDriverAttach(pDevIns, 0, &pThis->IBase, &pThis->pDrvBase, "Network Port");
7370 if (RT_SUCCESS(rc))
7371 {
7372 if (rc == VINF_NAT_DNS)
7373 {
7374#ifdef RT_OS_LINUX
7375 PDMDevHlpVMSetRuntimeError(pDevIns, 0 /*fFlags*/, "NoDNSforNAT",
7376 N_("A Domain Name Server (DNS) for NAT networking could not be determined. Please check your /etc/resolv.conf for <tt>nameserver</tt> entries. Either add one manually (<i>man resolv.conf</i>) or ensure that your host is correctly connected to an ISP. If you ignore this warning the guest will not be able to perform nameserver lookups and it will probably observe delays if trying so"));
7377#else
7378 PDMDevHlpVMSetRuntimeError(pDevIns, 0 /*fFlags*/, "NoDNSforNAT",
7379 N_("A Domain Name Server (DNS) for NAT networking could not be determined. Ensure that your host is correctly connected to an ISP. If you ignore this warning the guest will not be able to perform nameserver lookups and it will probably observe delays if trying so"));
7380#endif
7381 }
7382 pThis->pDrvR3 = PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMINETWORKUP);
7383 AssertMsgStmt(pThis->pDrvR3, ("Failed to obtain the PDMINETWORKUP interface!\n"),
7384 rc = VERR_PDM_MISSING_INTERFACE_BELOW);
7385 if (RT_SUCCESS(rc))
7386 {
7387 PPDMIBASER0 pBaseR0 = PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMIBASER0);
7388 pThis->pDrvR0 = pBaseR0 ? pBaseR0->pfnQueryInterface(pBaseR0, PDMINETWORKUP_IID) : NIL_RTR0PTR;
7389
7390 PPDMIBASERC pBaseRC = PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMIBASERC);
7391 pThis->pDrvRC = pBaseRC ? pBaseRC->pfnQueryInterface(pBaseRC, PDMINETWORKUP_IID) : NIL_RTR0PTR;
7392 }
7393 }
7394 else if ( rc == VERR_PDM_NO_ATTACHED_DRIVER
7395 || rc == VERR_PDM_CFG_MISSING_DRIVER_NAME)
7396 {
7397 /* This should never happen because this function is not called
7398 * if there is no driver to attach! */
7399 Log(("%s No attached driver!\n", pThis->szPrf));
7400 }
7401
7402 /*
7403 * Temporary set the link down if it was up so that the guest
7404 * will know that we have change the configuration of the
7405 * network card
7406 */
7407 if ((STATUS & STATUS_LU) && RT_SUCCESS(rc))
7408 e1kR3LinkDownTemp(pThis);
7409
7410 PDMCritSectLeave(&pThis->cs);
7411 return rc;
7412
7413}
7414
7415/**
7416 * @copydoc FNPDMDEVPOWEROFF
7417 */
7418static DECLCALLBACK(void) e1kR3PowerOff(PPDMDEVINS pDevIns)
7419{
7420 /* Poke thread waiting for buffer space. */
7421 e1kWakeupReceive(pDevIns);
7422}
7423
7424/**
7425 * @copydoc FNPDMDEVRESET
7426 */
7427static DECLCALLBACK(void) e1kR3Reset(PPDMDEVINS pDevIns)
7428{
7429 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7430#ifdef E1K_TX_DELAY
7431 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTXDTimer));
7432#endif /* E1K_TX_DELAY */
7433 e1kCancelTimer(pThis, pThis->CTX_SUFF(pIntTimer));
7434 e1kCancelTimer(pThis, pThis->CTX_SUFF(pLUTimer));
7435 e1kXmitFreeBuf(pThis);
7436 pThis->u16TxPktLen = 0;
7437 pThis->fIPcsum = false;
7438 pThis->fTCPcsum = false;
7439 pThis->fIntMaskUsed = false;
7440 pThis->fDelayInts = false;
7441 pThis->fLocked = false;
7442 pThis->u64AckedAt = 0;
7443 e1kHardReset(pThis);
7444}
7445
7446/**
7447 * @copydoc FNPDMDEVSUSPEND
7448 */
7449static DECLCALLBACK(void) e1kR3Suspend(PPDMDEVINS pDevIns)
7450{
7451 /* Poke thread waiting for buffer space. */
7452 e1kWakeupReceive(pDevIns);
7453}
7454
7455/**
7456 * Device relocation callback.
7457 *
7458 * When this callback is called the device instance data, and if the
7459 * device have a GC component, is being relocated, or/and the selectors
7460 * have been changed. The device must use the chance to perform the
7461 * necessary pointer relocations and data updates.
7462 *
7463 * Before the GC code is executed the first time, this function will be
7464 * called with a 0 delta so GC pointer calculations can be one in one place.
7465 *
7466 * @param pDevIns Pointer to the device instance.
7467 * @param offDelta The relocation delta relative to the old location.
7468 *
7469 * @remark A relocation CANNOT fail.
7470 */
7471static DECLCALLBACK(void) e1kR3Relocate(PPDMDEVINS pDevIns, RTGCINTPTR offDelta)
7472{
7473 RT_NOREF(offDelta);
7474 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7475 pThis->pDevInsRC = PDMDEVINS_2_RCPTR(pDevIns);
7476 pThis->pTxQueueRC = PDMQueueRCPtr(pThis->pTxQueueR3);
7477 pThis->pCanRxQueueRC = PDMQueueRCPtr(pThis->pCanRxQueueR3);
7478#ifdef E1K_USE_RX_TIMERS
7479 pThis->pRIDTimerRC = TMTimerRCPtr(pThis->pRIDTimerR3);
7480 pThis->pRADTimerRC = TMTimerRCPtr(pThis->pRADTimerR3);
7481#endif /* E1K_USE_RX_TIMERS */
7482//#ifdef E1K_USE_TX_TIMERS
7483 if (pThis->fTidEnabled)
7484 {
7485 pThis->pTIDTimerRC = TMTimerRCPtr(pThis->pTIDTimerR3);
7486# ifndef E1K_NO_TAD
7487 pThis->pTADTimerRC = TMTimerRCPtr(pThis->pTADTimerR3);
7488# endif /* E1K_NO_TAD */
7489 }
7490//#endif /* E1K_USE_TX_TIMERS */
7491#ifdef E1K_TX_DELAY
7492 pThis->pTXDTimerRC = TMTimerRCPtr(pThis->pTXDTimerR3);
7493#endif /* E1K_TX_DELAY */
7494 pThis->pIntTimerRC = TMTimerRCPtr(pThis->pIntTimerR3);
7495 pThis->pLUTimerRC = TMTimerRCPtr(pThis->pLUTimerR3);
7496}
7497
7498/**
7499 * Destruct a device instance.
7500 *
7501 * We need to free non-VM resources only.
7502 *
7503 * @returns VBox status code.
7504 * @param pDevIns The device instance data.
7505 * @thread EMT
7506 */
7507static DECLCALLBACK(int) e1kR3Destruct(PPDMDEVINS pDevIns)
7508{
7509 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7510 PDMDEV_CHECK_VERSIONS_RETURN_QUIET(pDevIns);
7511
7512 e1kDumpState(pThis);
7513 E1kLog(("%s Destroying instance\n", pThis->szPrf));
7514 if (PDMCritSectIsInitialized(&pThis->cs))
7515 {
7516 if (pThis->hEventMoreRxDescAvail != NIL_RTSEMEVENT)
7517 {
7518 RTSemEventSignal(pThis->hEventMoreRxDescAvail);
7519 RTSemEventDestroy(pThis->hEventMoreRxDescAvail);
7520 pThis->hEventMoreRxDescAvail = NIL_RTSEMEVENT;
7521 }
7522#ifdef E1K_WITH_TX_CS
7523 PDMR3CritSectDelete(&pThis->csTx);
7524#endif /* E1K_WITH_TX_CS */
7525 PDMR3CritSectDelete(&pThis->csRx);
7526 PDMR3CritSectDelete(&pThis->cs);
7527 }
7528 return VINF_SUCCESS;
7529}
7530
7531
7532/**
7533 * Set PCI configuration space registers.
7534 *
7535 * @param pci Reference to PCI device structure.
7536 * @thread EMT
7537 */
7538static DECLCALLBACK(void) e1kConfigurePciDev(PPDMPCIDEV pPciDev, E1KCHIP eChip)
7539{
7540 Assert(eChip < RT_ELEMENTS(g_aChips));
7541 /* Configure PCI Device, assume 32-bit mode ******************************/
7542 PCIDevSetVendorId(pPciDev, g_aChips[eChip].uPCIVendorId);
7543 PCIDevSetDeviceId(pPciDev, g_aChips[eChip].uPCIDeviceId);
7544 PCIDevSetWord( pPciDev, VBOX_PCI_SUBSYSTEM_VENDOR_ID, g_aChips[eChip].uPCISubsystemVendorId);
7545 PCIDevSetWord( pPciDev, VBOX_PCI_SUBSYSTEM_ID, g_aChips[eChip].uPCISubsystemId);
7546
7547 PCIDevSetWord( pPciDev, VBOX_PCI_COMMAND, 0x0000);
7548 /* DEVSEL Timing (medium device), 66 MHz Capable, New capabilities */
7549 PCIDevSetWord( pPciDev, VBOX_PCI_STATUS,
7550 VBOX_PCI_STATUS_DEVSEL_MEDIUM | VBOX_PCI_STATUS_CAP_LIST | VBOX_PCI_STATUS_66MHZ);
7551 /* Stepping A2 */
7552 PCIDevSetByte( pPciDev, VBOX_PCI_REVISION_ID, 0x02);
7553 /* Ethernet adapter */
7554 PCIDevSetByte( pPciDev, VBOX_PCI_CLASS_PROG, 0x00);
7555 PCIDevSetWord( pPciDev, VBOX_PCI_CLASS_DEVICE, 0x0200);
7556 /* normal single function Ethernet controller */
7557 PCIDevSetByte( pPciDev, VBOX_PCI_HEADER_TYPE, 0x00);
7558 /* Memory Register Base Address */
7559 PCIDevSetDWord(pPciDev, VBOX_PCI_BASE_ADDRESS_0, 0x00000000);
7560 /* Memory Flash Base Address */
7561 PCIDevSetDWord(pPciDev, VBOX_PCI_BASE_ADDRESS_1, 0x00000000);
7562 /* IO Register Base Address */
7563 PCIDevSetDWord(pPciDev, VBOX_PCI_BASE_ADDRESS_2, 0x00000001);
7564 /* Expansion ROM Base Address */
7565 PCIDevSetDWord(pPciDev, VBOX_PCI_ROM_ADDRESS, 0x00000000);
7566 /* Capabilities Pointer */
7567 PCIDevSetByte( pPciDev, VBOX_PCI_CAPABILITY_LIST, 0xDC);
7568 /* Interrupt Pin: INTA# */
7569 PCIDevSetByte( pPciDev, VBOX_PCI_INTERRUPT_PIN, 0x01);
7570 /* Max_Lat/Min_Gnt: very high priority and time slice */
7571 PCIDevSetByte( pPciDev, VBOX_PCI_MIN_GNT, 0xFF);
7572 PCIDevSetByte( pPciDev, VBOX_PCI_MAX_LAT, 0x00);
7573
7574 /* PCI Power Management Registers ****************************************/
7575 /* Capability ID: PCI Power Management Registers */
7576 PCIDevSetByte( pPciDev, 0xDC, VBOX_PCI_CAP_ID_PM);
7577 /* Next Item Pointer: PCI-X */
7578 PCIDevSetByte( pPciDev, 0xDC + 1, 0xE4);
7579 /* Power Management Capabilities: PM disabled, DSI */
7580 PCIDevSetWord( pPciDev, 0xDC + 2,
7581 0x0002 | VBOX_PCI_PM_CAP_DSI);
7582 /* Power Management Control / Status Register: PM disabled */
7583 PCIDevSetWord( pPciDev, 0xDC + 4, 0x0000);
7584 /* PMCSR_BSE Bridge Support Extensions: Not supported */
7585 PCIDevSetByte( pPciDev, 0xDC + 6, 0x00);
7586 /* Data Register: PM disabled, always 0 */
7587 PCIDevSetByte( pPciDev, 0xDC + 7, 0x00);
7588
7589 /* PCI-X Configuration Registers *****************************************/
7590 /* Capability ID: PCI-X Configuration Registers */
7591 PCIDevSetByte( pPciDev, 0xE4, VBOX_PCI_CAP_ID_PCIX);
7592#ifdef E1K_WITH_MSI
7593 PCIDevSetByte( pPciDev, 0xE4 + 1, 0x80);
7594#else
7595 /* Next Item Pointer: None (Message Signalled Interrupts are disabled) */
7596 PCIDevSetByte( pPciDev, 0xE4 + 1, 0x00);
7597#endif
7598 /* PCI-X Command: Enable Relaxed Ordering */
7599 PCIDevSetWord( pPciDev, 0xE4 + 2, VBOX_PCI_X_CMD_ERO);
7600 /* PCI-X Status: 32-bit, 66MHz*/
7601 /** @todo is this value really correct? fff8 doesn't look like actual PCI address */
7602 PCIDevSetDWord(pPciDev, 0xE4 + 4, 0x0040FFF8);
7603}
7604
7605/**
7606 * @interface_method_impl{PDMDEVREG,pfnConstruct}
7607 */
7608static DECLCALLBACK(int) e1kR3Construct(PPDMDEVINS pDevIns, int iInstance, PCFGMNODE pCfg)
7609{
7610 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7611 int rc;
7612 PDMDEV_CHECK_VERSIONS_RETURN(pDevIns);
7613
7614 /*
7615 * Initialize the instance data (state).
7616 * Note! Caller has initialized it to ZERO already.
7617 */
7618 RTStrPrintf(pThis->szPrf, sizeof(pThis->szPrf), "E1000#%d", iInstance);
7619 E1kLog(("%s Constructing new instance sizeof(E1KRXDESC)=%d\n", pThis->szPrf, sizeof(E1KRXDESC)));
7620 pThis->hEventMoreRxDescAvail = NIL_RTSEMEVENT;
7621 pThis->pDevInsR3 = pDevIns;
7622 pThis->pDevInsR0 = PDMDEVINS_2_R0PTR(pDevIns);
7623 pThis->pDevInsRC = PDMDEVINS_2_RCPTR(pDevIns);
7624 pThis->u16TxPktLen = 0;
7625 pThis->fIPcsum = false;
7626 pThis->fTCPcsum = false;
7627 pThis->fIntMaskUsed = false;
7628 pThis->fDelayInts = false;
7629 pThis->fLocked = false;
7630 pThis->u64AckedAt = 0;
7631 pThis->led.u32Magic = PDMLED_MAGIC;
7632 pThis->u32PktNo = 1;
7633
7634 /* Interfaces */
7635 pThis->IBase.pfnQueryInterface = e1kR3QueryInterface;
7636
7637 pThis->INetworkDown.pfnWaitReceiveAvail = e1kR3NetworkDown_WaitReceiveAvail;
7638 pThis->INetworkDown.pfnReceive = e1kR3NetworkDown_Receive;
7639 pThis->INetworkDown.pfnXmitPending = e1kR3NetworkDown_XmitPending;
7640
7641 pThis->ILeds.pfnQueryStatusLed = e1kR3QueryStatusLed;
7642
7643 pThis->INetworkConfig.pfnGetMac = e1kR3GetMac;
7644 pThis->INetworkConfig.pfnGetLinkState = e1kR3GetLinkState;
7645 pThis->INetworkConfig.pfnSetLinkState = e1kR3SetLinkState;
7646
7647 /*
7648 * Internal validations.
7649 */
7650 for (uint32_t iReg = 1; iReg < E1K_NUM_OF_BINARY_SEARCHABLE; iReg++)
7651 AssertLogRelMsgReturn( g_aE1kRegMap[iReg].offset > g_aE1kRegMap[iReg - 1].offset
7652 && g_aE1kRegMap[iReg].offset + g_aE1kRegMap[iReg].size
7653 >= g_aE1kRegMap[iReg - 1].offset + g_aE1kRegMap[iReg - 1].size,
7654 ("%s@%#xLB%#x vs %s@%#xLB%#x\n",
7655 g_aE1kRegMap[iReg].abbrev, g_aE1kRegMap[iReg].offset, g_aE1kRegMap[iReg].size,
7656 g_aE1kRegMap[iReg - 1].abbrev, g_aE1kRegMap[iReg - 1].offset, g_aE1kRegMap[iReg - 1].size),
7657 VERR_INTERNAL_ERROR_4);
7658
7659 /*
7660 * Validate configuration.
7661 */
7662 if (!CFGMR3AreValuesValid(pCfg, "MAC\0" "CableConnected\0" "AdapterType\0"
7663 "LineSpeed\0" "GCEnabled\0" "R0Enabled\0"
7664 "ItrEnabled\0" "ItrRxEnabled\0"
7665 "EthernetCRC\0" "GSOEnabled\0" "LinkUpDelay\0"))
7666 return PDMDEV_SET_ERROR(pDevIns, VERR_PDM_DEVINS_UNKNOWN_CFG_VALUES,
7667 N_("Invalid configuration for E1000 device"));
7668
7669 /** @todo LineSpeed unused! */
7670
7671 /* Get config params */
7672 rc = CFGMR3QueryBytes(pCfg, "MAC", pThis->macConfigured.au8, sizeof(pThis->macConfigured.au8));
7673 if (RT_FAILURE(rc))
7674 return PDMDEV_SET_ERROR(pDevIns, rc,
7675 N_("Configuration error: Failed to get MAC address"));
7676 rc = CFGMR3QueryBool(pCfg, "CableConnected", &pThis->fCableConnected);
7677 if (RT_FAILURE(rc))
7678 return PDMDEV_SET_ERROR(pDevIns, rc,
7679 N_("Configuration error: Failed to get the value of 'CableConnected'"));
7680 rc = CFGMR3QueryU32(pCfg, "AdapterType", (uint32_t*)&pThis->eChip);
7681 if (RT_FAILURE(rc))
7682 return PDMDEV_SET_ERROR(pDevIns, rc,
7683 N_("Configuration error: Failed to get the value of 'AdapterType'"));
7684 Assert(pThis->eChip <= E1K_CHIP_82545EM);
7685 rc = CFGMR3QueryBoolDef(pCfg, "GCEnabled", &pThis->fRCEnabled, true);
7686 if (RT_FAILURE(rc))
7687 return PDMDEV_SET_ERROR(pDevIns, rc,
7688 N_("Configuration error: Failed to get the value of 'GCEnabled'"));
7689
7690 rc = CFGMR3QueryBoolDef(pCfg, "R0Enabled", &pThis->fR0Enabled, true);
7691 if (RT_FAILURE(rc))
7692 return PDMDEV_SET_ERROR(pDevIns, rc,
7693 N_("Configuration error: Failed to get the value of 'R0Enabled'"));
7694
7695 rc = CFGMR3QueryBoolDef(pCfg, "EthernetCRC", &pThis->fEthernetCRC, true);
7696 if (RT_FAILURE(rc))
7697 return PDMDEV_SET_ERROR(pDevIns, rc,
7698 N_("Configuration error: Failed to get the value of 'EthernetCRC'"));
7699
7700 rc = CFGMR3QueryBoolDef(pCfg, "GSOEnabled", &pThis->fGSOEnabled, true);
7701 if (RT_FAILURE(rc))
7702 return PDMDEV_SET_ERROR(pDevIns, rc,
7703 N_("Configuration error: Failed to get the value of 'GSOEnabled'"));
7704
7705 rc = CFGMR3QueryBoolDef(pCfg, "ItrEnabled", &pThis->fItrEnabled, false);
7706 if (RT_FAILURE(rc))
7707 return PDMDEV_SET_ERROR(pDevIns, rc,
7708 N_("Configuration error: Failed to get the value of 'ItrEnabled'"));
7709
7710 rc = CFGMR3QueryBoolDef(pCfg, "ItrRxEnabled", &pThis->fItrRxEnabled, true);
7711 if (RT_FAILURE(rc))
7712 return PDMDEV_SET_ERROR(pDevIns, rc,
7713 N_("Configuration error: Failed to get the value of 'ItrRxEnabled'"));
7714
7715 rc = CFGMR3QueryBoolDef(pCfg, "TidEnabled", &pThis->fTidEnabled, false);
7716 if (RT_FAILURE(rc))
7717 return PDMDEV_SET_ERROR(pDevIns, rc,
7718 N_("Configuration error: Failed to get the value of 'TidEnabled'"));
7719
7720 rc = CFGMR3QueryU32Def(pCfg, "LinkUpDelay", (uint32_t*)&pThis->cMsLinkUpDelay, 3000); /* ms */
7721 if (RT_FAILURE(rc))
7722 return PDMDEV_SET_ERROR(pDevIns, rc,
7723 N_("Configuration error: Failed to get the value of 'LinkUpDelay'"));
7724 Assert(pThis->cMsLinkUpDelay <= 300000); /* less than 5 minutes */
7725 if (pThis->cMsLinkUpDelay > 5000)
7726 LogRel(("%s: WARNING! Link up delay is set to %u seconds!\n", pThis->szPrf, pThis->cMsLinkUpDelay / 1000));
7727 else if (pThis->cMsLinkUpDelay == 0)
7728 LogRel(("%s: WARNING! Link up delay is disabled!\n", pThis->szPrf));
7729
7730 LogRel(("%s: Chip=%s LinkUpDelay=%ums EthernetCRC=%s GSO=%s Itr=%s ItrRx=%s TID=%s R0=%s GC=%s\n", pThis->szPrf,
7731 g_aChips[pThis->eChip].pcszName, pThis->cMsLinkUpDelay,
7732 pThis->fEthernetCRC ? "on" : "off",
7733 pThis->fGSOEnabled ? "enabled" : "disabled",
7734 pThis->fItrEnabled ? "enabled" : "disabled",
7735 pThis->fItrRxEnabled ? "enabled" : "disabled",
7736 pThis->fTidEnabled ? "enabled" : "disabled",
7737 pThis->fR0Enabled ? "enabled" : "disabled",
7738 pThis->fRCEnabled ? "enabled" : "disabled"));
7739
7740 /* Initialize the EEPROM. */
7741 pThis->eeprom.init(pThis->macConfigured);
7742
7743 /* Initialize internal PHY. */
7744 Phy::init(&pThis->phy, iInstance, pThis->eChip == E1K_CHIP_82543GC ? PHY_EPID_M881000 : PHY_EPID_M881011);
7745
7746 /* Initialize critical sections. We do our own locking. */
7747 rc = PDMDevHlpSetDeviceCritSect(pDevIns, PDMDevHlpCritSectGetNop(pDevIns));
7748 AssertRCReturn(rc, rc);
7749
7750 rc = PDMDevHlpCritSectInit(pDevIns, &pThis->cs, RT_SRC_POS, "E1000#%d", iInstance);
7751 if (RT_FAILURE(rc))
7752 return rc;
7753 rc = PDMDevHlpCritSectInit(pDevIns, &pThis->csRx, RT_SRC_POS, "E1000#%dRX", iInstance);
7754 if (RT_FAILURE(rc))
7755 return rc;
7756#ifdef E1K_WITH_TX_CS
7757 rc = PDMDevHlpCritSectInit(pDevIns, &pThis->csTx, RT_SRC_POS, "E1000#%dTX", iInstance);
7758 if (RT_FAILURE(rc))
7759 return rc;
7760#endif /* E1K_WITH_TX_CS */
7761
7762 /* Saved state registration. */
7763 rc = PDMDevHlpSSMRegisterEx(pDevIns, E1K_SAVEDSTATE_VERSION, sizeof(E1KSTATE), NULL,
7764 NULL, e1kLiveExec, NULL,
7765 e1kSavePrep, e1kSaveExec, NULL,
7766 e1kLoadPrep, e1kLoadExec, e1kLoadDone);
7767 if (RT_FAILURE(rc))
7768 return rc;
7769
7770 /* Set PCI config registers and register ourselves with the PCI bus. */
7771 e1kConfigurePciDev(&pThis->pciDevice, pThis->eChip);
7772 rc = PDMDevHlpPCIRegister(pDevIns, &pThis->pciDevice);
7773 if (RT_FAILURE(rc))
7774 return rc;
7775
7776#ifdef E1K_WITH_MSI
7777 PDMMSIREG MsiReg;
7778 RT_ZERO(MsiReg);
7779 MsiReg.cMsiVectors = 1;
7780 MsiReg.iMsiCapOffset = 0x80;
7781 MsiReg.iMsiNextOffset = 0x0;
7782 MsiReg.fMsi64bit = false;
7783 rc = PDMDevHlpPCIRegisterMsi(pDevIns, &MsiReg);
7784 AssertRCReturn(rc, rc);
7785#endif
7786
7787
7788 /* Map our registers to memory space (region 0, see e1kConfigurePCI)*/
7789 rc = PDMDevHlpPCIIORegionRegister(pDevIns, 0, E1K_MM_SIZE, PCI_ADDRESS_SPACE_MEM, e1kMap);
7790 if (RT_FAILURE(rc))
7791 return rc;
7792#ifdef E1K_WITH_PREREG_MMIO
7793 rc = PDMDevHlpMMIOExPreRegister(pDevIns, 0, E1K_MM_SIZE, IOMMMIO_FLAGS_READ_DWORD | IOMMMIO_FLAGS_WRITE_ONLY_DWORD, "E1000",
7794 NULL /*pvUserR3*/, e1kMMIOWrite, e1kMMIORead, NULL /*pfnFillR3*/,
7795 NIL_RTR0PTR /*pvUserR0*/, pThis->fR0Enabled ? "e1kMMIOWrite" : NULL,
7796 pThis->fR0Enabled ? "e1kMMIORead" : NULL, NULL /*pszFillR0*/,
7797 NIL_RTRCPTR /*pvUserRC*/, pThis->fRCEnabled ? "e1kMMIOWrite" : NULL,
7798 pThis->fRCEnabled ? "e1kMMIORead" : NULL, NULL /*pszFillRC*/);
7799 AssertLogRelRCReturn(rc, rc);
7800#endif
7801 /* Map our registers to IO space (region 2, see e1kConfigurePCI) */
7802 rc = PDMDevHlpPCIIORegionRegister(pDevIns, 2, E1K_IOPORT_SIZE, PCI_ADDRESS_SPACE_IO, e1kMap);
7803 if (RT_FAILURE(rc))
7804 return rc;
7805
7806 /* Create transmit queue */
7807 rc = PDMDevHlpQueueCreate(pDevIns, sizeof(PDMQUEUEITEMCORE), 1, 0,
7808 e1kTxQueueConsumer, true, "E1000-Xmit", &pThis->pTxQueueR3);
7809 if (RT_FAILURE(rc))
7810 return rc;
7811 pThis->pTxQueueR0 = PDMQueueR0Ptr(pThis->pTxQueueR3);
7812 pThis->pTxQueueRC = PDMQueueRCPtr(pThis->pTxQueueR3);
7813
7814 /* Create the RX notifier signaller. */
7815 rc = PDMDevHlpQueueCreate(pDevIns, sizeof(PDMQUEUEITEMCORE), 1, 0,
7816 e1kCanRxQueueConsumer, true, "E1000-Rcv", &pThis->pCanRxQueueR3);
7817 if (RT_FAILURE(rc))
7818 return rc;
7819 pThis->pCanRxQueueR0 = PDMQueueR0Ptr(pThis->pCanRxQueueR3);
7820 pThis->pCanRxQueueRC = PDMQueueRCPtr(pThis->pCanRxQueueR3);
7821
7822#ifdef E1K_TX_DELAY
7823 /* Create Transmit Delay Timer */
7824 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kTxDelayTimer, pThis,
7825 TMTIMER_FLAGS_NO_CRIT_SECT,
7826 "E1000 Transmit Delay Timer", &pThis->pTXDTimerR3);
7827 if (RT_FAILURE(rc))
7828 return rc;
7829 pThis->pTXDTimerR0 = TMTimerR0Ptr(pThis->pTXDTimerR3);
7830 pThis->pTXDTimerRC = TMTimerRCPtr(pThis->pTXDTimerR3);
7831 TMR3TimerSetCritSect(pThis->pTXDTimerR3, &pThis->csTx);
7832#endif /* E1K_TX_DELAY */
7833
7834//#ifdef E1K_USE_TX_TIMERS
7835 if (pThis->fTidEnabled)
7836 {
7837 /* Create Transmit Interrupt Delay Timer */
7838 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kTxIntDelayTimer, pThis,
7839 TMTIMER_FLAGS_NO_CRIT_SECT,
7840 "E1000 Transmit Interrupt Delay Timer", &pThis->pTIDTimerR3);
7841 if (RT_FAILURE(rc))
7842 return rc;
7843 pThis->pTIDTimerR0 = TMTimerR0Ptr(pThis->pTIDTimerR3);
7844 pThis->pTIDTimerRC = TMTimerRCPtr(pThis->pTIDTimerR3);
7845
7846# ifndef E1K_NO_TAD
7847 /* Create Transmit Absolute Delay Timer */
7848 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kTxAbsDelayTimer, pThis,
7849 TMTIMER_FLAGS_NO_CRIT_SECT,
7850 "E1000 Transmit Absolute Delay Timer", &pThis->pTADTimerR3);
7851 if (RT_FAILURE(rc))
7852 return rc;
7853 pThis->pTADTimerR0 = TMTimerR0Ptr(pThis->pTADTimerR3);
7854 pThis->pTADTimerRC = TMTimerRCPtr(pThis->pTADTimerR3);
7855# endif /* E1K_NO_TAD */
7856 }
7857//#endif /* E1K_USE_TX_TIMERS */
7858
7859#ifdef E1K_USE_RX_TIMERS
7860 /* Create Receive Interrupt Delay Timer */
7861 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kRxIntDelayTimer, pThis,
7862 TMTIMER_FLAGS_NO_CRIT_SECT,
7863 "E1000 Receive Interrupt Delay Timer", &pThis->pRIDTimerR3);
7864 if (RT_FAILURE(rc))
7865 return rc;
7866 pThis->pRIDTimerR0 = TMTimerR0Ptr(pThis->pRIDTimerR3);
7867 pThis->pRIDTimerRC = TMTimerRCPtr(pThis->pRIDTimerR3);
7868
7869 /* Create Receive Absolute Delay Timer */
7870 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kRxAbsDelayTimer, pThis,
7871 TMTIMER_FLAGS_NO_CRIT_SECT,
7872 "E1000 Receive Absolute Delay Timer", &pThis->pRADTimerR3);
7873 if (RT_FAILURE(rc))
7874 return rc;
7875 pThis->pRADTimerR0 = TMTimerR0Ptr(pThis->pRADTimerR3);
7876 pThis->pRADTimerRC = TMTimerRCPtr(pThis->pRADTimerR3);
7877#endif /* E1K_USE_RX_TIMERS */
7878
7879 /* Create Late Interrupt Timer */
7880 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kLateIntTimer, pThis,
7881 TMTIMER_FLAGS_NO_CRIT_SECT,
7882 "E1000 Late Interrupt Timer", &pThis->pIntTimerR3);
7883 if (RT_FAILURE(rc))
7884 return rc;
7885 pThis->pIntTimerR0 = TMTimerR0Ptr(pThis->pIntTimerR3);
7886 pThis->pIntTimerRC = TMTimerRCPtr(pThis->pIntTimerR3);
7887
7888 /* Create Link Up Timer */
7889 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kLinkUpTimer, pThis,
7890 TMTIMER_FLAGS_NO_CRIT_SECT,
7891 "E1000 Link Up Timer", &pThis->pLUTimerR3);
7892 if (RT_FAILURE(rc))
7893 return rc;
7894 pThis->pLUTimerR0 = TMTimerR0Ptr(pThis->pLUTimerR3);
7895 pThis->pLUTimerRC = TMTimerRCPtr(pThis->pLUTimerR3);
7896
7897 /* Register the info item */
7898 char szTmp[20];
7899 RTStrPrintf(szTmp, sizeof(szTmp), "e1k%d", iInstance);
7900 PDMDevHlpDBGFInfoRegister(pDevIns, szTmp, "E1000 info.", e1kInfo);
7901
7902 /* Status driver */
7903 PPDMIBASE pBase;
7904 rc = PDMDevHlpDriverAttach(pDevIns, PDM_STATUS_LUN, &pThis->IBase, &pBase, "Status Port");
7905 if (RT_FAILURE(rc))
7906 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to attach the status LUN"));
7907 pThis->pLedsConnector = PDMIBASE_QUERY_INTERFACE(pBase, PDMILEDCONNECTORS);
7908
7909 /* Network driver */
7910 rc = PDMDevHlpDriverAttach(pDevIns, 0, &pThis->IBase, &pThis->pDrvBase, "Network Port");
7911 if (RT_SUCCESS(rc))
7912 {
7913 if (rc == VINF_NAT_DNS)
7914 PDMDevHlpVMSetRuntimeError(pDevIns, 0 /*fFlags*/, "NoDNSforNAT",
7915 N_("A Domain Name Server (DNS) for NAT networking could not be determined. Ensure that your host is correctly connected to an ISP. If you ignore this warning the guest will not be able to perform nameserver lookups and it will probably observe delays if trying so"));
7916 pThis->pDrvR3 = PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMINETWORKUP);
7917 AssertMsgReturn(pThis->pDrvR3, ("Failed to obtain the PDMINETWORKUP interface!\n"), VERR_PDM_MISSING_INTERFACE_BELOW);
7918
7919 pThis->pDrvR0 = PDMIBASER0_QUERY_INTERFACE(PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMIBASER0), PDMINETWORKUP);
7920 pThis->pDrvRC = PDMIBASERC_QUERY_INTERFACE(PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMIBASERC), PDMINETWORKUP);
7921 }
7922 else if ( rc == VERR_PDM_NO_ATTACHED_DRIVER
7923 || rc == VERR_PDM_CFG_MISSING_DRIVER_NAME)
7924 {
7925 /* No error! */
7926 E1kLog(("%s This adapter is not attached to any network!\n", pThis->szPrf));
7927 }
7928 else
7929 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to attach the network LUN"));
7930
7931 rc = RTSemEventCreate(&pThis->hEventMoreRxDescAvail);
7932 if (RT_FAILURE(rc))
7933 return rc;
7934
7935 rc = e1kInitDebugHelpers();
7936 if (RT_FAILURE(rc))
7937 return rc;
7938
7939 e1kHardReset(pThis);
7940
7941 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data received", "/Public/Net/E1k%u/BytesReceived", iInstance);
7942 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data transmitted", "/Public/Net/E1k%u/BytesTransmitted", iInstance);
7943
7944 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data received", "/Devices/E1k%d/ReceiveBytes", iInstance);
7945 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data transmitted", "/Devices/E1k%d/TransmitBytes", iInstance);
7946
7947#if defined(VBOX_WITH_STATISTICS)
7948 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMMIOReadRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO reads in RZ", "/Devices/E1k%d/MMIO/ReadRZ", iInstance);
7949 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMMIOReadR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO reads in R3", "/Devices/E1k%d/MMIO/ReadR3", iInstance);
7950 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMMIOWriteRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO writes in RZ", "/Devices/E1k%d/MMIO/WriteRZ", iInstance);
7951 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMMIOWriteR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO writes in R3", "/Devices/E1k%d/MMIO/WriteR3", iInstance);
7952 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatEEPROMRead, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling EEPROM reads", "/Devices/E1k%d/EEPROM/Read", iInstance);
7953 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatEEPROMWrite, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling EEPROM writes", "/Devices/E1k%d/EEPROM/Write", iInstance);
7954 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIOReadRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in RZ", "/Devices/E1k%d/IO/ReadRZ", iInstance);
7955 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIOReadR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in R3", "/Devices/E1k%d/IO/ReadR3", iInstance);
7956 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIOWriteRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in RZ", "/Devices/E1k%d/IO/WriteRZ", iInstance);
7957 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIOWriteR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in R3", "/Devices/E1k%d/IO/WriteR3", iInstance);
7958 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatLateIntTimer, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling late int timer", "/Devices/E1k%d/LateInt/Timer", iInstance);
7959 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatLateInts, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of late interrupts", "/Devices/E1k%d/LateInt/Occured", iInstance);
7960 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIntsRaised, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of raised interrupts", "/Devices/E1k%d/Interrupts/Raised", iInstance);
7961 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIntsPrevented, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of prevented interrupts", "/Devices/E1k%d/Interrupts/Prevented", iInstance);
7962 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceive, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive", "/Devices/E1k%d/Receive/Total", iInstance);
7963 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveCRC, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive checksumming", "/Devices/E1k%d/Receive/CRC", iInstance);
7964 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveFilter, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive filtering", "/Devices/E1k%d/Receive/Filter", iInstance);
7965 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveStore, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive storing", "/Devices/E1k%d/Receive/Store", iInstance);
7966 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatRxOverflow, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_OCCURENCE, "Profiling RX overflows", "/Devices/E1k%d/RxOverflow", iInstance);
7967 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatRxOverflowWakeup, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Nr of RX overflow wakeups", "/Devices/E1k%d/RxOverflowWakeup", iInstance);
7968 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling transmits in RZ", "/Devices/E1k%d/Transmit/TotalRZ", iInstance);
7969 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling transmits in R3", "/Devices/E1k%d/Transmit/TotalR3", iInstance);
7970 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitSendRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling send transmit in RZ", "/Devices/E1k%d/Transmit/SendRZ", iInstance);
7971 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitSendR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling send transmit in R3", "/Devices/E1k%d/Transmit/SendR3", iInstance);
7972
7973 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescCtxNormal, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of normal context descriptors","/Devices/E1k%d/TxDesc/ContexNormal", iInstance);
7974 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescCtxTSE, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TSE context descriptors", "/Devices/E1k%d/TxDesc/ContextTSE", iInstance);
7975 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescData, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TX data descriptors", "/Devices/E1k%d/TxDesc/Data", iInstance);
7976 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescLegacy, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TX legacy descriptors", "/Devices/E1k%d/TxDesc/Legacy", iInstance);
7977 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescTSEData, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TX TSE data descriptors", "/Devices/E1k%d/TxDesc/TSEData", iInstance);
7978 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxPathFallback, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Fallback TSE descriptor path", "/Devices/E1k%d/TxPath/Fallback", iInstance);
7979 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxPathGSO, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "GSO TSE descriptor path", "/Devices/E1k%d/TxPath/GSO", iInstance);
7980 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxPathRegular, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Regular descriptor path", "/Devices/E1k%d/TxPath/Normal", iInstance);
7981 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatPHYAccesses, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of PHY accesses", "/Devices/E1k%d/PHYAccesses", iInstance);
7982 for (unsigned iReg = 0; iReg < E1K_NUM_OF_REGS; iReg++)
7983 {
7984 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->aStatRegReads[iReg], STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
7985 g_aE1kRegMap[iReg].name, "/Devices/E1k%d/Regs/%s-Reads", iInstance, g_aE1kRegMap[iReg].abbrev);
7986 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->aStatRegWrites[iReg], STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
7987 g_aE1kRegMap[iReg].name, "/Devices/E1k%d/Regs/%s-Writes", iInstance, g_aE1kRegMap[iReg].abbrev);
7988 }
7989#endif /* VBOX_WITH_STATISTICS */
7990
7991#ifdef E1K_INT_STATS
7992 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->u64ArmedAt, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "u64ArmedAt", "/Devices/E1k%d/u64ArmedAt", iInstance);
7993 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatMaxTxDelay, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatMaxTxDelay", "/Devices/E1k%d/uStatMaxTxDelay", iInstance);
7994 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatInt, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatInt", "/Devices/E1k%d/uStatInt", iInstance);
7995 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntTry, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntTry", "/Devices/E1k%d/uStatIntTry", iInstance);
7996 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntLower, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntLower", "/Devices/E1k%d/uStatIntLower", iInstance);
7997 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatNoIntICR, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatNoIntICR", "/Devices/E1k%d/uStatNoIntICR", iInstance);
7998 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->iStatIntLost, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "iStatIntLost", "/Devices/E1k%d/iStatIntLost", iInstance);
7999 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->iStatIntLostOne, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "iStatIntLostOne", "/Devices/E1k%d/iStatIntLostOne", iInstance);
8000 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntIMS, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntIMS", "/Devices/E1k%d/uStatIntIMS", iInstance);
8001 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntSkip, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntSkip", "/Devices/E1k%d/uStatIntSkip", iInstance);
8002 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntLate, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntLate", "/Devices/E1k%d/uStatIntLate", iInstance);
8003 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntMasked, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntMasked", "/Devices/E1k%d/uStatIntMasked", iInstance);
8004 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntEarly, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntEarly", "/Devices/E1k%d/uStatIntEarly", iInstance);
8005 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntRx, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntRx", "/Devices/E1k%d/uStatIntRx", iInstance);
8006 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntTx, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntTx", "/Devices/E1k%d/uStatIntTx", iInstance);
8007 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntICS, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntICS", "/Devices/E1k%d/uStatIntICS", iInstance);
8008 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntRDTR, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntRDTR", "/Devices/E1k%d/uStatIntRDTR", iInstance);
8009 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntRXDMT0, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntRXDMT0", "/Devices/E1k%d/uStatIntRXDMT0", iInstance);
8010 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntTXQE, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntTXQE", "/Devices/E1k%d/uStatIntTXQE", iInstance);
8011 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxNoRS, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxNoRS", "/Devices/E1k%d/uStatTxNoRS", iInstance);
8012 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxIDE, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxIDE", "/Devices/E1k%d/uStatTxIDE", iInstance);
8013 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxDelayed, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxDelayed", "/Devices/E1k%d/uStatTxDelayed", iInstance);
8014 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxDelayExp, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxDelayExp", "/Devices/E1k%d/uStatTxDelayExp", iInstance);
8015 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTAD, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTAD", "/Devices/E1k%d/uStatTAD", iInstance);
8016 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTID, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTID", "/Devices/E1k%d/uStatTID", iInstance);
8017 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatRAD, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatRAD", "/Devices/E1k%d/uStatRAD", iInstance);
8018 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatRID, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatRID", "/Devices/E1k%d/uStatRID", iInstance);
8019 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatRxFrm, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatRxFrm", "/Devices/E1k%d/uStatRxFrm", iInstance);
8020 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxFrm, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxFrm", "/Devices/E1k%d/uStatTxFrm", iInstance);
8021 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatDescCtx, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatDescCtx", "/Devices/E1k%d/uStatDescCtx", iInstance);
8022 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatDescDat, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatDescDat", "/Devices/E1k%d/uStatDescDat", iInstance);
8023 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatDescLeg, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatDescLeg", "/Devices/E1k%d/uStatDescLeg", iInstance);
8024 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx1514, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx1514", "/Devices/E1k%d/uStatTx1514", iInstance);
8025 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx2962, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx2962", "/Devices/E1k%d/uStatTx2962", iInstance);
8026 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx4410, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx4410", "/Devices/E1k%d/uStatTx4410", iInstance);
8027 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx5858, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx5858", "/Devices/E1k%d/uStatTx5858", iInstance);
8028 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx7306, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx7306", "/Devices/E1k%d/uStatTx7306", iInstance);
8029 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx8754, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx8754", "/Devices/E1k%d/uStatTx8754", iInstance);
8030 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx16384, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx16384", "/Devices/E1k%d/uStatTx16384", iInstance);
8031 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx32768, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx32768", "/Devices/E1k%d/uStatTx32768", iInstance);
8032 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxLarge, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxLarge", "/Devices/E1k%d/uStatTxLarge", iInstance);
8033#endif /* E1K_INT_STATS */
8034
8035 return VINF_SUCCESS;
8036}
8037
8038/**
8039 * The device registration structure.
8040 */
8041const PDMDEVREG g_DeviceE1000 =
8042{
8043 /* Structure version. PDM_DEVREG_VERSION defines the current version. */
8044 PDM_DEVREG_VERSION,
8045 /* Device name. */
8046 "e1000",
8047 /* Name of guest context module (no path).
8048 * Only evalutated if PDM_DEVREG_FLAGS_RC is set. */
8049 "VBoxDDRC.rc",
8050 /* Name of ring-0 module (no path).
8051 * Only evalutated if PDM_DEVREG_FLAGS_RC is set. */
8052 "VBoxDDR0.r0",
8053 /* The description of the device. The UTF-8 string pointed to shall, like this structure,
8054 * remain unchanged from registration till VM destruction. */
8055 "Intel PRO/1000 MT Desktop Ethernet.\n",
8056
8057 /* Flags, combination of the PDM_DEVREG_FLAGS_* \#defines. */
8058 PDM_DEVREG_FLAGS_DEFAULT_BITS | PDM_DEVREG_FLAGS_RC | PDM_DEVREG_FLAGS_R0,
8059 /* Device class(es), combination of the PDM_DEVREG_CLASS_* \#defines. */
8060 PDM_DEVREG_CLASS_NETWORK,
8061 /* Maximum number of instances (per VM). */
8062 ~0U,
8063 /* Size of the instance data. */
8064 sizeof(E1KSTATE),
8065
8066 /* pfnConstruct */
8067 e1kR3Construct,
8068 /* pfnDestruct */
8069 e1kR3Destruct,
8070 /* pfnRelocate */
8071 e1kR3Relocate,
8072 /* pfnMemSetup */
8073 NULL,
8074 /* pfnPowerOn */
8075 NULL,
8076 /* pfnReset */
8077 e1kR3Reset,
8078 /* pfnSuspend */
8079 e1kR3Suspend,
8080 /* pfnResume */
8081 NULL,
8082 /* pfnAttach */
8083 e1kR3Attach,
8084 /* pfnDeatch */
8085 e1kR3Detach,
8086 /* pfnQueryInterface */
8087 NULL,
8088 /* pfnInitComplete */
8089 NULL,
8090 /* pfnPowerOff */
8091 e1kR3PowerOff,
8092 /* pfnSoftReset */
8093 NULL,
8094
8095 /* u32VersionEnd */
8096 PDM_DEVREG_VERSION
8097};
8098
8099#endif /* IN_RING3 */
8100#endif /* !VBOX_DEVICE_STRUCT_TESTCASE */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette