VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/DevE1000.cpp@ 75593

Last change on this file since 75593 was 75330, checked in by vboxsync, 6 years ago

Dev/E1000 bugref:9291 Proper handling of empty TX descriptors with EOP set

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 326.5 KB
Line 
1/* $Id: DevE1000.cpp 75330 2018-11-08 17:45:51Z vboxsync $ */
2/** @file
3 * DevE1000 - Intel 82540EM Ethernet Controller Emulation.
4 *
5 * Implemented in accordance with the specification:
6 *
7 * PCI/PCI-X Family of Gigabit Ethernet Controllers Software Developer's Manual
8 * 82540EP/EM, 82541xx, 82544GC/EI, 82545GM/EM, 82546GB/EB, and 82547xx
9 *
10 * 317453-002 Revision 3.5
11 *
12 * @todo IPv6 checksum offloading support
13 * @todo Flexible Filter / Wakeup (optional?)
14 */
15
16/*
17 * Copyright (C) 2007-2017 Oracle Corporation
18 *
19 * This file is part of VirtualBox Open Source Edition (OSE), as
20 * available from http://www.virtualbox.org. This file is free software;
21 * you can redistribute it and/or modify it under the terms of the GNU
22 * General Public License (GPL) as published by the Free Software
23 * Foundation, in version 2 as it comes in the "COPYING" file of the
24 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
25 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_DEV_E1000
33#include <iprt/crc.h>
34#include <iprt/ctype.h>
35#include <iprt/net.h>
36#include <iprt/semaphore.h>
37#include <iprt/string.h>
38#include <iprt/time.h>
39#include <iprt/uuid.h>
40#include <VBox/vmm/pdmdev.h>
41#include <VBox/vmm/pdmnetifs.h>
42#include <VBox/vmm/pdmnetinline.h>
43#include <VBox/param.h>
44#include "VBoxDD.h"
45
46#include "DevEEPROM.h"
47#include "DevE1000Phy.h"
48
49
50/*********************************************************************************************************************************
51* Defined Constants And Macros *
52*********************************************************************************************************************************/
53/** @name E1000 Build Options
54 * @{ */
55/** @def E1K_INIT_RA0
56 * E1K_INIT_RA0 forces E1000 to set the first entry in Receive Address filter
57 * table to MAC address obtained from CFGM. Most guests read MAC address from
58 * EEPROM and write it to RA[0] explicitly, but Mac OS X seems to depend on it
59 * being already set (see @bugref{4657}).
60 */
61#define E1K_INIT_RA0
62/** @def E1K_LSC_ON_RESET
63 * E1K_LSC_ON_RESET causes e1000 to generate Link Status Change
64 * interrupt after hard reset. This makes the E1K_LSC_ON_SLU option unnecessary.
65 * With unplugged cable, LSC is triggerred for 82543GC only.
66 */
67#define E1K_LSC_ON_RESET
68/** @def E1K_LSC_ON_SLU
69 * E1K_LSC_ON_SLU causes E1000 to generate Link Status Change interrupt when
70 * the guest driver brings up the link via STATUS.LU bit. Again the only guest
71 * that requires it is Mac OS X (see @bugref{4657}).
72 */
73//#define E1K_LSC_ON_SLU
74/** @def E1K_INIT_LINKUP_DELAY
75 * E1K_INIT_LINKUP_DELAY prevents the link going up while the driver is still
76 * in init (see @bugref{8624}).
77 */
78#define E1K_INIT_LINKUP_DELAY_US (2000 * 1000)
79/** @def E1K_IMS_INT_DELAY_NS
80 * E1K_IMS_INT_DELAY_NS prevents interrupt storms in Windows guests on enabling
81 * interrupts (see @bugref{8624}).
82 */
83#define E1K_IMS_INT_DELAY_NS 100
84/** @def E1K_TX_DELAY
85 * E1K_TX_DELAY aims to improve guest-host transfer rate for TCP streams by
86 * preventing packets to be sent immediately. It allows to send several
87 * packets in a batch reducing the number of acknowledgments. Note that it
88 * effectively disables R0 TX path, forcing sending in R3.
89 */
90//#define E1K_TX_DELAY 150
91/** @def E1K_USE_TX_TIMERS
92 * E1K_USE_TX_TIMERS aims to reduce the number of generated TX interrupts if a
93 * guest driver set the delays via the Transmit Interrupt Delay Value (TIDV)
94 * register. Enabling it showed no positive effects on existing guests so it
95 * stays disabled. See sections 3.2.7.1 and 3.4.3.1 in "8254x Family of Gigabit
96 * Ethernet Controllers Software Developer’s Manual" for more detailed
97 * explanation.
98 */
99//#define E1K_USE_TX_TIMERS
100/** @def E1K_NO_TAD
101 * E1K_NO_TAD disables one of two timers enabled by E1K_USE_TX_TIMERS, the
102 * Transmit Absolute Delay time. This timer sets the maximum time interval
103 * during which TX interrupts can be postponed (delayed). It has no effect
104 * if E1K_USE_TX_TIMERS is not defined.
105 */
106//#define E1K_NO_TAD
107/** @def E1K_REL_DEBUG
108 * E1K_REL_DEBUG enables debug logging of l1, l2, l3 in release build.
109 */
110//#define E1K_REL_DEBUG
111/** @def E1K_INT_STATS
112 * E1K_INT_STATS enables collection of internal statistics used for
113 * debugging of delayed interrupts, etc.
114 */
115#define E1K_INT_STATS
116/** @def E1K_WITH_MSI
117 * E1K_WITH_MSI enables rudimentary MSI support. Not implemented.
118 */
119//#define E1K_WITH_MSI
120/** @def E1K_WITH_TX_CS
121 * E1K_WITH_TX_CS protects e1kXmitPending with a critical section.
122 */
123#define E1K_WITH_TX_CS
124/** @def E1K_WITH_TXD_CACHE
125 * E1K_WITH_TXD_CACHE causes E1000 to fetch multiple TX descriptors in a
126 * single physical memory read (or two if it wraps around the end of TX
127 * descriptor ring). It is required for proper functioning of bandwidth
128 * resource control as it allows to compute exact sizes of packets prior
129 * to allocating their buffers (see @bugref{5582}).
130 */
131#define E1K_WITH_TXD_CACHE
132/** @def E1K_WITH_RXD_CACHE
133 * E1K_WITH_RXD_CACHE causes E1000 to fetch multiple RX descriptors in a
134 * single physical memory read (or two if it wraps around the end of RX
135 * descriptor ring). Intel's packet driver for DOS needs this option in
136 * order to work properly (see @bugref{6217}).
137 */
138#define E1K_WITH_RXD_CACHE
139/** @def E1K_WITH_PREREG_MMIO
140 * E1K_WITH_PREREG_MMIO enables a new style MMIO registration and is
141 * currently only done for testing the relateted PDM, IOM and PGM code. */
142//#define E1K_WITH_PREREG_MMIO
143/* @} */
144/* End of Options ************************************************************/
145
146#ifdef E1K_WITH_TXD_CACHE
147/**
148 * E1K_TXD_CACHE_SIZE specifies the maximum number of TX descriptors stored
149 * in the state structure. It limits the amount of descriptors loaded in one
150 * batch read. For example, Linux guest may use up to 20 descriptors per
151 * TSE packet. The largest TSE packet seen (Windows guest) was 45 descriptors.
152 */
153# define E1K_TXD_CACHE_SIZE 64u
154#endif /* E1K_WITH_TXD_CACHE */
155
156#ifdef E1K_WITH_RXD_CACHE
157/**
158 * E1K_RXD_CACHE_SIZE specifies the maximum number of RX descriptors stored
159 * in the state structure. It limits the amount of descriptors loaded in one
160 * batch read. For example, XP guest adds 15 RX descriptors at a time.
161 */
162# define E1K_RXD_CACHE_SIZE 16u
163#endif /* E1K_WITH_RXD_CACHE */
164
165
166/* Little helpers ************************************************************/
167#undef htons
168#undef ntohs
169#undef htonl
170#undef ntohl
171#define htons(x) ((((x) & 0xff00) >> 8) | (((x) & 0x00ff) << 8))
172#define ntohs(x) htons(x)
173#define htonl(x) ASMByteSwapU32(x)
174#define ntohl(x) htonl(x)
175
176#ifndef DEBUG
177# ifdef E1K_REL_DEBUG
178# define DEBUG
179# define E1kLog(a) LogRel(a)
180# define E1kLog2(a) LogRel(a)
181# define E1kLog3(a) LogRel(a)
182# define E1kLogX(x, a) LogRel(a)
183//# define E1kLog3(a) do {} while (0)
184# else
185# define E1kLog(a) do {} while (0)
186# define E1kLog2(a) do {} while (0)
187# define E1kLog3(a) do {} while (0)
188# define E1kLogX(x, a) do {} while (0)
189# endif
190#else
191# define E1kLog(a) Log(a)
192# define E1kLog2(a) Log2(a)
193# define E1kLog3(a) Log3(a)
194# define E1kLogX(x, a) LogIt(x, LOG_GROUP, a)
195//# define E1kLog(a) do {} while (0)
196//# define E1kLog2(a) do {} while (0)
197//# define E1kLog3(a) do {} while (0)
198#endif
199
200#if 0
201# define LOG_ENABLED
202# define E1kLogRel(a) LogRel(a)
203# undef Log6
204# define Log6(a) LogRel(a)
205#else
206# define E1kLogRel(a) do { } while (0)
207#endif
208
209//#undef DEBUG
210
211#define STATE_TO_DEVINS(pThis) (((PE1KSTATE )pThis)->CTX_SUFF(pDevIns))
212#define E1K_RELOCATE(p, o) *(RTHCUINTPTR *)&p += o
213
214#define E1K_INC_CNT32(cnt) \
215do { \
216 if (cnt < UINT32_MAX) \
217 cnt++; \
218} while (0)
219
220#define E1K_ADD_CNT64(cntLo, cntHi, val) \
221do { \
222 uint64_t u64Cnt = RT_MAKE_U64(cntLo, cntHi); \
223 uint64_t tmp = u64Cnt; \
224 u64Cnt += val; \
225 if (tmp > u64Cnt ) \
226 u64Cnt = UINT64_MAX; \
227 cntLo = (uint32_t)u64Cnt; \
228 cntHi = (uint32_t)(u64Cnt >> 32); \
229} while (0)
230
231#ifdef E1K_INT_STATS
232# define E1K_INC_ISTAT_CNT(cnt) do { ++cnt; } while (0)
233#else /* E1K_INT_STATS */
234# define E1K_INC_ISTAT_CNT(cnt) do { } while (0)
235#endif /* E1K_INT_STATS */
236
237
238/*****************************************************************************/
239
240typedef uint32_t E1KCHIP;
241#define E1K_CHIP_82540EM 0
242#define E1K_CHIP_82543GC 1
243#define E1K_CHIP_82545EM 2
244
245#ifdef IN_RING3
246/** Different E1000 chips. */
247static const struct E1kChips
248{
249 uint16_t uPCIVendorId;
250 uint16_t uPCIDeviceId;
251 uint16_t uPCISubsystemVendorId;
252 uint16_t uPCISubsystemId;
253 const char *pcszName;
254} g_aChips[] =
255{
256 /* Vendor Device SSVendor SubSys Name */
257 { 0x8086,
258 /* Temporary code, as MSI-aware driver dislike 0x100E. How to do that right? */
259# ifdef E1K_WITH_MSI
260 0x105E,
261# else
262 0x100E,
263# endif
264 0x8086, 0x001E, "82540EM" }, /* Intel 82540EM-A in Intel PRO/1000 MT Desktop */
265 { 0x8086, 0x1004, 0x8086, 0x1004, "82543GC" }, /* Intel 82543GC in Intel PRO/1000 T Server */
266 { 0x8086, 0x100F, 0x15AD, 0x0750, "82545EM" } /* Intel 82545EM-A in VMWare Network Adapter */
267};
268#endif /* IN_RING3 */
269
270
271/* The size of register area mapped to I/O space */
272#define E1K_IOPORT_SIZE 0x8
273/* The size of memory-mapped register area */
274#define E1K_MM_SIZE 0x20000
275
276#define E1K_MAX_TX_PKT_SIZE 16288
277#define E1K_MAX_RX_PKT_SIZE 16384
278
279/*****************************************************************************/
280
281/** Gets the specfieid bits from the register. */
282#define GET_BITS(reg, bits) ((reg & reg##_##bits##_MASK) >> reg##_##bits##_SHIFT)
283#define GET_BITS_V(val, reg, bits) ((val & reg##_##bits##_MASK) >> reg##_##bits##_SHIFT)
284#define BITS(reg, bits, bitval) (bitval << reg##_##bits##_SHIFT)
285#define SET_BITS(reg, bits, bitval) do { reg = (reg & ~reg##_##bits##_MASK) | (bitval << reg##_##bits##_SHIFT); } while (0)
286#define SET_BITS_V(val, reg, bits, bitval) do { val = (val & ~reg##_##bits##_MASK) | (bitval << reg##_##bits##_SHIFT); } while (0)
287
288#define CTRL_SLU UINT32_C(0x00000040)
289#define CTRL_MDIO UINT32_C(0x00100000)
290#define CTRL_MDC UINT32_C(0x00200000)
291#define CTRL_MDIO_DIR UINT32_C(0x01000000)
292#define CTRL_MDC_DIR UINT32_C(0x02000000)
293#define CTRL_RESET UINT32_C(0x04000000)
294#define CTRL_VME UINT32_C(0x40000000)
295
296#define STATUS_LU UINT32_C(0x00000002)
297#define STATUS_TXOFF UINT32_C(0x00000010)
298
299#define EECD_EE_WIRES UINT32_C(0x0F)
300#define EECD_EE_REQ UINT32_C(0x40)
301#define EECD_EE_GNT UINT32_C(0x80)
302
303#define EERD_START UINT32_C(0x00000001)
304#define EERD_DONE UINT32_C(0x00000010)
305#define EERD_DATA_MASK UINT32_C(0xFFFF0000)
306#define EERD_DATA_SHIFT 16
307#define EERD_ADDR_MASK UINT32_C(0x0000FF00)
308#define EERD_ADDR_SHIFT 8
309
310#define MDIC_DATA_MASK UINT32_C(0x0000FFFF)
311#define MDIC_DATA_SHIFT 0
312#define MDIC_REG_MASK UINT32_C(0x001F0000)
313#define MDIC_REG_SHIFT 16
314#define MDIC_PHY_MASK UINT32_C(0x03E00000)
315#define MDIC_PHY_SHIFT 21
316#define MDIC_OP_WRITE UINT32_C(0x04000000)
317#define MDIC_OP_READ UINT32_C(0x08000000)
318#define MDIC_READY UINT32_C(0x10000000)
319#define MDIC_INT_EN UINT32_C(0x20000000)
320#define MDIC_ERROR UINT32_C(0x40000000)
321
322#define TCTL_EN UINT32_C(0x00000002)
323#define TCTL_PSP UINT32_C(0x00000008)
324
325#define RCTL_EN UINT32_C(0x00000002)
326#define RCTL_UPE UINT32_C(0x00000008)
327#define RCTL_MPE UINT32_C(0x00000010)
328#define RCTL_LPE UINT32_C(0x00000020)
329#define RCTL_LBM_MASK UINT32_C(0x000000C0)
330#define RCTL_LBM_SHIFT 6
331#define RCTL_RDMTS_MASK UINT32_C(0x00000300)
332#define RCTL_RDMTS_SHIFT 8
333#define RCTL_LBM_TCVR UINT32_C(3) /**< PHY or external SerDes loopback. */
334#define RCTL_MO_MASK UINT32_C(0x00003000)
335#define RCTL_MO_SHIFT 12
336#define RCTL_BAM UINT32_C(0x00008000)
337#define RCTL_BSIZE_MASK UINT32_C(0x00030000)
338#define RCTL_BSIZE_SHIFT 16
339#define RCTL_VFE UINT32_C(0x00040000)
340#define RCTL_CFIEN UINT32_C(0x00080000)
341#define RCTL_CFI UINT32_C(0x00100000)
342#define RCTL_BSEX UINT32_C(0x02000000)
343#define RCTL_SECRC UINT32_C(0x04000000)
344
345#define ICR_TXDW UINT32_C(0x00000001)
346#define ICR_TXQE UINT32_C(0x00000002)
347#define ICR_LSC UINT32_C(0x00000004)
348#define ICR_RXDMT0 UINT32_C(0x00000010)
349#define ICR_RXT0 UINT32_C(0x00000080)
350#define ICR_TXD_LOW UINT32_C(0x00008000)
351#define RDTR_FPD UINT32_C(0x80000000)
352
353#define PBA_st ((PBAST*)(pThis->auRegs + PBA_IDX))
354typedef struct
355{
356 unsigned rxa : 7;
357 unsigned rxa_r : 9;
358 unsigned txa : 16;
359} PBAST;
360AssertCompileSize(PBAST, 4);
361
362#define TXDCTL_WTHRESH_MASK 0x003F0000
363#define TXDCTL_WTHRESH_SHIFT 16
364#define TXDCTL_LWTHRESH_MASK 0xFE000000
365#define TXDCTL_LWTHRESH_SHIFT 25
366
367#define RXCSUM_PCSS_MASK UINT32_C(0x000000FF)
368#define RXCSUM_PCSS_SHIFT 0
369
370/** @name Register access macros
371 * @remarks These ASSUME alocal variable @a pThis of type PE1KSTATE.
372 * @{ */
373#define CTRL pThis->auRegs[CTRL_IDX]
374#define STATUS pThis->auRegs[STATUS_IDX]
375#define EECD pThis->auRegs[EECD_IDX]
376#define EERD pThis->auRegs[EERD_IDX]
377#define CTRL_EXT pThis->auRegs[CTRL_EXT_IDX]
378#define FLA pThis->auRegs[FLA_IDX]
379#define MDIC pThis->auRegs[MDIC_IDX]
380#define FCAL pThis->auRegs[FCAL_IDX]
381#define FCAH pThis->auRegs[FCAH_IDX]
382#define FCT pThis->auRegs[FCT_IDX]
383#define VET pThis->auRegs[VET_IDX]
384#define ICR pThis->auRegs[ICR_IDX]
385#define ITR pThis->auRegs[ITR_IDX]
386#define ICS pThis->auRegs[ICS_IDX]
387#define IMS pThis->auRegs[IMS_IDX]
388#define IMC pThis->auRegs[IMC_IDX]
389#define RCTL pThis->auRegs[RCTL_IDX]
390#define FCTTV pThis->auRegs[FCTTV_IDX]
391#define TXCW pThis->auRegs[TXCW_IDX]
392#define RXCW pThis->auRegs[RXCW_IDX]
393#define TCTL pThis->auRegs[TCTL_IDX]
394#define TIPG pThis->auRegs[TIPG_IDX]
395#define AIFS pThis->auRegs[AIFS_IDX]
396#define LEDCTL pThis->auRegs[LEDCTL_IDX]
397#define PBA pThis->auRegs[PBA_IDX]
398#define FCRTL pThis->auRegs[FCRTL_IDX]
399#define FCRTH pThis->auRegs[FCRTH_IDX]
400#define RDFH pThis->auRegs[RDFH_IDX]
401#define RDFT pThis->auRegs[RDFT_IDX]
402#define RDFHS pThis->auRegs[RDFHS_IDX]
403#define RDFTS pThis->auRegs[RDFTS_IDX]
404#define RDFPC pThis->auRegs[RDFPC_IDX]
405#define RDBAL pThis->auRegs[RDBAL_IDX]
406#define RDBAH pThis->auRegs[RDBAH_IDX]
407#define RDLEN pThis->auRegs[RDLEN_IDX]
408#define RDH pThis->auRegs[RDH_IDX]
409#define RDT pThis->auRegs[RDT_IDX]
410#define RDTR pThis->auRegs[RDTR_IDX]
411#define RXDCTL pThis->auRegs[RXDCTL_IDX]
412#define RADV pThis->auRegs[RADV_IDX]
413#define RSRPD pThis->auRegs[RSRPD_IDX]
414#define TXDMAC pThis->auRegs[TXDMAC_IDX]
415#define TDFH pThis->auRegs[TDFH_IDX]
416#define TDFT pThis->auRegs[TDFT_IDX]
417#define TDFHS pThis->auRegs[TDFHS_IDX]
418#define TDFTS pThis->auRegs[TDFTS_IDX]
419#define TDFPC pThis->auRegs[TDFPC_IDX]
420#define TDBAL pThis->auRegs[TDBAL_IDX]
421#define TDBAH pThis->auRegs[TDBAH_IDX]
422#define TDLEN pThis->auRegs[TDLEN_IDX]
423#define TDH pThis->auRegs[TDH_IDX]
424#define TDT pThis->auRegs[TDT_IDX]
425#define TIDV pThis->auRegs[TIDV_IDX]
426#define TXDCTL pThis->auRegs[TXDCTL_IDX]
427#define TADV pThis->auRegs[TADV_IDX]
428#define TSPMT pThis->auRegs[TSPMT_IDX]
429#define CRCERRS pThis->auRegs[CRCERRS_IDX]
430#define ALGNERRC pThis->auRegs[ALGNERRC_IDX]
431#define SYMERRS pThis->auRegs[SYMERRS_IDX]
432#define RXERRC pThis->auRegs[RXERRC_IDX]
433#define MPC pThis->auRegs[MPC_IDX]
434#define SCC pThis->auRegs[SCC_IDX]
435#define ECOL pThis->auRegs[ECOL_IDX]
436#define MCC pThis->auRegs[MCC_IDX]
437#define LATECOL pThis->auRegs[LATECOL_IDX]
438#define COLC pThis->auRegs[COLC_IDX]
439#define DC pThis->auRegs[DC_IDX]
440#define TNCRS pThis->auRegs[TNCRS_IDX]
441/* #define SEC pThis->auRegs[SEC_IDX] Conflict with sys/time.h */
442#define CEXTERR pThis->auRegs[CEXTERR_IDX]
443#define RLEC pThis->auRegs[RLEC_IDX]
444#define XONRXC pThis->auRegs[XONRXC_IDX]
445#define XONTXC pThis->auRegs[XONTXC_IDX]
446#define XOFFRXC pThis->auRegs[XOFFRXC_IDX]
447#define XOFFTXC pThis->auRegs[XOFFTXC_IDX]
448#define FCRUC pThis->auRegs[FCRUC_IDX]
449#define PRC64 pThis->auRegs[PRC64_IDX]
450#define PRC127 pThis->auRegs[PRC127_IDX]
451#define PRC255 pThis->auRegs[PRC255_IDX]
452#define PRC511 pThis->auRegs[PRC511_IDX]
453#define PRC1023 pThis->auRegs[PRC1023_IDX]
454#define PRC1522 pThis->auRegs[PRC1522_IDX]
455#define GPRC pThis->auRegs[GPRC_IDX]
456#define BPRC pThis->auRegs[BPRC_IDX]
457#define MPRC pThis->auRegs[MPRC_IDX]
458#define GPTC pThis->auRegs[GPTC_IDX]
459#define GORCL pThis->auRegs[GORCL_IDX]
460#define GORCH pThis->auRegs[GORCH_IDX]
461#define GOTCL pThis->auRegs[GOTCL_IDX]
462#define GOTCH pThis->auRegs[GOTCH_IDX]
463#define RNBC pThis->auRegs[RNBC_IDX]
464#define RUC pThis->auRegs[RUC_IDX]
465#define RFC pThis->auRegs[RFC_IDX]
466#define ROC pThis->auRegs[ROC_IDX]
467#define RJC pThis->auRegs[RJC_IDX]
468#define MGTPRC pThis->auRegs[MGTPRC_IDX]
469#define MGTPDC pThis->auRegs[MGTPDC_IDX]
470#define MGTPTC pThis->auRegs[MGTPTC_IDX]
471#define TORL pThis->auRegs[TORL_IDX]
472#define TORH pThis->auRegs[TORH_IDX]
473#define TOTL pThis->auRegs[TOTL_IDX]
474#define TOTH pThis->auRegs[TOTH_IDX]
475#define TPR pThis->auRegs[TPR_IDX]
476#define TPT pThis->auRegs[TPT_IDX]
477#define PTC64 pThis->auRegs[PTC64_IDX]
478#define PTC127 pThis->auRegs[PTC127_IDX]
479#define PTC255 pThis->auRegs[PTC255_IDX]
480#define PTC511 pThis->auRegs[PTC511_IDX]
481#define PTC1023 pThis->auRegs[PTC1023_IDX]
482#define PTC1522 pThis->auRegs[PTC1522_IDX]
483#define MPTC pThis->auRegs[MPTC_IDX]
484#define BPTC pThis->auRegs[BPTC_IDX]
485#define TSCTC pThis->auRegs[TSCTC_IDX]
486#define TSCTFC pThis->auRegs[TSCTFC_IDX]
487#define RXCSUM pThis->auRegs[RXCSUM_IDX]
488#define WUC pThis->auRegs[WUC_IDX]
489#define WUFC pThis->auRegs[WUFC_IDX]
490#define WUS pThis->auRegs[WUS_IDX]
491#define MANC pThis->auRegs[MANC_IDX]
492#define IPAV pThis->auRegs[IPAV_IDX]
493#define WUPL pThis->auRegs[WUPL_IDX]
494/** @} */
495
496/**
497 * Indices of memory-mapped registers in register table.
498 */
499typedef enum
500{
501 CTRL_IDX,
502 STATUS_IDX,
503 EECD_IDX,
504 EERD_IDX,
505 CTRL_EXT_IDX,
506 FLA_IDX,
507 MDIC_IDX,
508 FCAL_IDX,
509 FCAH_IDX,
510 FCT_IDX,
511 VET_IDX,
512 ICR_IDX,
513 ITR_IDX,
514 ICS_IDX,
515 IMS_IDX,
516 IMC_IDX,
517 RCTL_IDX,
518 FCTTV_IDX,
519 TXCW_IDX,
520 RXCW_IDX,
521 TCTL_IDX,
522 TIPG_IDX,
523 AIFS_IDX,
524 LEDCTL_IDX,
525 PBA_IDX,
526 FCRTL_IDX,
527 FCRTH_IDX,
528 RDFH_IDX,
529 RDFT_IDX,
530 RDFHS_IDX,
531 RDFTS_IDX,
532 RDFPC_IDX,
533 RDBAL_IDX,
534 RDBAH_IDX,
535 RDLEN_IDX,
536 RDH_IDX,
537 RDT_IDX,
538 RDTR_IDX,
539 RXDCTL_IDX,
540 RADV_IDX,
541 RSRPD_IDX,
542 TXDMAC_IDX,
543 TDFH_IDX,
544 TDFT_IDX,
545 TDFHS_IDX,
546 TDFTS_IDX,
547 TDFPC_IDX,
548 TDBAL_IDX,
549 TDBAH_IDX,
550 TDLEN_IDX,
551 TDH_IDX,
552 TDT_IDX,
553 TIDV_IDX,
554 TXDCTL_IDX,
555 TADV_IDX,
556 TSPMT_IDX,
557 CRCERRS_IDX,
558 ALGNERRC_IDX,
559 SYMERRS_IDX,
560 RXERRC_IDX,
561 MPC_IDX,
562 SCC_IDX,
563 ECOL_IDX,
564 MCC_IDX,
565 LATECOL_IDX,
566 COLC_IDX,
567 DC_IDX,
568 TNCRS_IDX,
569 SEC_IDX,
570 CEXTERR_IDX,
571 RLEC_IDX,
572 XONRXC_IDX,
573 XONTXC_IDX,
574 XOFFRXC_IDX,
575 XOFFTXC_IDX,
576 FCRUC_IDX,
577 PRC64_IDX,
578 PRC127_IDX,
579 PRC255_IDX,
580 PRC511_IDX,
581 PRC1023_IDX,
582 PRC1522_IDX,
583 GPRC_IDX,
584 BPRC_IDX,
585 MPRC_IDX,
586 GPTC_IDX,
587 GORCL_IDX,
588 GORCH_IDX,
589 GOTCL_IDX,
590 GOTCH_IDX,
591 RNBC_IDX,
592 RUC_IDX,
593 RFC_IDX,
594 ROC_IDX,
595 RJC_IDX,
596 MGTPRC_IDX,
597 MGTPDC_IDX,
598 MGTPTC_IDX,
599 TORL_IDX,
600 TORH_IDX,
601 TOTL_IDX,
602 TOTH_IDX,
603 TPR_IDX,
604 TPT_IDX,
605 PTC64_IDX,
606 PTC127_IDX,
607 PTC255_IDX,
608 PTC511_IDX,
609 PTC1023_IDX,
610 PTC1522_IDX,
611 MPTC_IDX,
612 BPTC_IDX,
613 TSCTC_IDX,
614 TSCTFC_IDX,
615 RXCSUM_IDX,
616 WUC_IDX,
617 WUFC_IDX,
618 WUS_IDX,
619 MANC_IDX,
620 IPAV_IDX,
621 WUPL_IDX,
622 MTA_IDX,
623 RA_IDX,
624 VFTA_IDX,
625 IP4AT_IDX,
626 IP6AT_IDX,
627 WUPM_IDX,
628 FFLT_IDX,
629 FFMT_IDX,
630 FFVT_IDX,
631 PBM_IDX,
632 RA_82542_IDX,
633 MTA_82542_IDX,
634 VFTA_82542_IDX,
635 E1K_NUM_OF_REGS
636} E1kRegIndex;
637
638#define E1K_NUM_OF_32BIT_REGS MTA_IDX
639/** The number of registers with strictly increasing offset. */
640#define E1K_NUM_OF_BINARY_SEARCHABLE (WUPL_IDX + 1)
641
642
643/**
644 * Define E1000-specific EEPROM layout.
645 */
646struct E1kEEPROM
647{
648 public:
649 EEPROM93C46 eeprom;
650
651#ifdef IN_RING3
652 /**
653 * Initialize EEPROM content.
654 *
655 * @param macAddr MAC address of E1000.
656 */
657 void init(RTMAC &macAddr)
658 {
659 eeprom.init();
660 memcpy(eeprom.m_au16Data, macAddr.au16, sizeof(macAddr.au16));
661 eeprom.m_au16Data[0x04] = 0xFFFF;
662 /*
663 * bit 3 - full support for power management
664 * bit 10 - full duplex
665 */
666 eeprom.m_au16Data[0x0A] = 0x4408;
667 eeprom.m_au16Data[0x0B] = 0x001E;
668 eeprom.m_au16Data[0x0C] = 0x8086;
669 eeprom.m_au16Data[0x0D] = 0x100E;
670 eeprom.m_au16Data[0x0E] = 0x8086;
671 eeprom.m_au16Data[0x0F] = 0x3040;
672 eeprom.m_au16Data[0x21] = 0x7061;
673 eeprom.m_au16Data[0x22] = 0x280C;
674 eeprom.m_au16Data[0x23] = 0x00C8;
675 eeprom.m_au16Data[0x24] = 0x00C8;
676 eeprom.m_au16Data[0x2F] = 0x0602;
677 updateChecksum();
678 };
679
680 /**
681 * Compute the checksum as required by E1000 and store it
682 * in the last word.
683 */
684 void updateChecksum()
685 {
686 uint16_t u16Checksum = 0;
687
688 for (int i = 0; i < eeprom.SIZE-1; i++)
689 u16Checksum += eeprom.m_au16Data[i];
690 eeprom.m_au16Data[eeprom.SIZE-1] = 0xBABA - u16Checksum;
691 };
692
693 /**
694 * First 6 bytes of EEPROM contain MAC address.
695 *
696 * @returns MAC address of E1000.
697 */
698 void getMac(PRTMAC pMac)
699 {
700 memcpy(pMac->au16, eeprom.m_au16Data, sizeof(pMac->au16));
701 };
702
703 uint32_t read()
704 {
705 return eeprom.read();
706 }
707
708 void write(uint32_t u32Wires)
709 {
710 eeprom.write(u32Wires);
711 }
712
713 bool readWord(uint32_t u32Addr, uint16_t *pu16Value)
714 {
715 return eeprom.readWord(u32Addr, pu16Value);
716 }
717
718 int load(PSSMHANDLE pSSM)
719 {
720 return eeprom.load(pSSM);
721 }
722
723 void save(PSSMHANDLE pSSM)
724 {
725 eeprom.save(pSSM);
726 }
727#endif /* IN_RING3 */
728};
729
730
731#define E1K_SPEC_VLAN(s) (s & 0xFFF)
732#define E1K_SPEC_CFI(s) (!!((s>>12) & 0x1))
733#define E1K_SPEC_PRI(s) ((s>>13) & 0x7)
734
735struct E1kRxDStatus
736{
737 /** @name Descriptor Status field (3.2.3.1)
738 * @{ */
739 unsigned fDD : 1; /**< Descriptor Done. */
740 unsigned fEOP : 1; /**< End of packet. */
741 unsigned fIXSM : 1; /**< Ignore checksum indication. */
742 unsigned fVP : 1; /**< VLAN, matches VET. */
743 unsigned : 1;
744 unsigned fTCPCS : 1; /**< RCP Checksum calculated on the packet. */
745 unsigned fIPCS : 1; /**< IP Checksum calculated on the packet. */
746 unsigned fPIF : 1; /**< Passed in-exact filter */
747 /** @} */
748 /** @name Descriptor Errors field (3.2.3.2)
749 * (Only valid when fEOP and fDD are set.)
750 * @{ */
751 unsigned fCE : 1; /**< CRC or alignment error. */
752 unsigned : 4; /**< Reserved, varies with different models... */
753 unsigned fTCPE : 1; /**< TCP/UDP checksum error. */
754 unsigned fIPE : 1; /**< IP Checksum error. */
755 unsigned fRXE : 1; /**< RX Data error. */
756 /** @} */
757 /** @name Descriptor Special field (3.2.3.3)
758 * @{ */
759 unsigned u16Special : 16; /**< VLAN: Id, Canonical form, Priority. */
760 /** @} */
761};
762typedef struct E1kRxDStatus E1KRXDST;
763
764struct E1kRxDesc_st
765{
766 uint64_t u64BufAddr; /**< Address of data buffer */
767 uint16_t u16Length; /**< Length of data in buffer */
768 uint16_t u16Checksum; /**< Packet checksum */
769 E1KRXDST status;
770};
771typedef struct E1kRxDesc_st E1KRXDESC;
772AssertCompileSize(E1KRXDESC, 16);
773
774#define E1K_DTYP_LEGACY -1
775#define E1K_DTYP_CONTEXT 0
776#define E1K_DTYP_DATA 1
777
778struct E1kTDLegacy
779{
780 uint64_t u64BufAddr; /**< Address of data buffer */
781 struct TDLCmd_st
782 {
783 unsigned u16Length : 16;
784 unsigned u8CSO : 8;
785 /* CMD field : 8 */
786 unsigned fEOP : 1;
787 unsigned fIFCS : 1;
788 unsigned fIC : 1;
789 unsigned fRS : 1;
790 unsigned fRPS : 1;
791 unsigned fDEXT : 1;
792 unsigned fVLE : 1;
793 unsigned fIDE : 1;
794 } cmd;
795 struct TDLDw3_st
796 {
797 /* STA field */
798 unsigned fDD : 1;
799 unsigned fEC : 1;
800 unsigned fLC : 1;
801 unsigned fTURSV : 1;
802 /* RSV field */
803 unsigned u4RSV : 4;
804 /* CSS field */
805 unsigned u8CSS : 8;
806 /* Special field*/
807 unsigned u16Special: 16;
808 } dw3;
809};
810
811/**
812 * TCP/IP Context Transmit Descriptor, section 3.3.6.
813 */
814struct E1kTDContext
815{
816 struct CheckSum_st
817 {
818 /** TSE: Header start. !TSE: Checksum start. */
819 unsigned u8CSS : 8;
820 /** Checksum offset - where to store it. */
821 unsigned u8CSO : 8;
822 /** Checksum ending (inclusive) offset, 0 = end of packet. */
823 unsigned u16CSE : 16;
824 } ip;
825 struct CheckSum_st tu;
826 struct TDCDw2_st
827 {
828 /** TSE: The total number of payload bytes for this context. Sans header. */
829 unsigned u20PAYLEN : 20;
830 /** The descriptor type - E1K_DTYP_CONTEXT (0). */
831 unsigned u4DTYP : 4;
832 /** TUCMD field, 8 bits
833 * @{ */
834 /** TSE: TCP (set) or UDP (clear). */
835 unsigned fTCP : 1;
836 /** TSE: IPv4 (set) or IPv6 (clear) - for finding the payload length field in
837 * the IP header. Does not affect the checksumming.
838 * @remarks 82544GC/EI interprets a cleared field differently. */
839 unsigned fIP : 1;
840 /** TSE: TCP segmentation enable. When clear the context describes */
841 unsigned fTSE : 1;
842 /** Report status (only applies to dw3.fDD for here). */
843 unsigned fRS : 1;
844 /** Reserved, MBZ. */
845 unsigned fRSV1 : 1;
846 /** Descriptor extension, must be set for this descriptor type. */
847 unsigned fDEXT : 1;
848 /** Reserved, MBZ. */
849 unsigned fRSV2 : 1;
850 /** Interrupt delay enable. */
851 unsigned fIDE : 1;
852 /** @} */
853 } dw2;
854 struct TDCDw3_st
855 {
856 /** Descriptor Done. */
857 unsigned fDD : 1;
858 /** Reserved, MBZ. */
859 unsigned u7RSV : 7;
860 /** TSO: The header (prototype) length (Ethernet[, VLAN tag], IP, TCP/UDP. */
861 unsigned u8HDRLEN : 8;
862 /** TSO: Maximum segment size. */
863 unsigned u16MSS : 16;
864 } dw3;
865};
866typedef struct E1kTDContext E1KTXCTX;
867
868/**
869 * TCP/IP Data Transmit Descriptor, section 3.3.7.
870 */
871struct E1kTDData
872{
873 uint64_t u64BufAddr; /**< Address of data buffer */
874 struct TDDCmd_st
875 {
876 /** The total length of data pointed to by this descriptor. */
877 unsigned u20DTALEN : 20;
878 /** The descriptor type - E1K_DTYP_DATA (1). */
879 unsigned u4DTYP : 4;
880 /** @name DCMD field, 8 bits (3.3.7.1).
881 * @{ */
882 /** End of packet. Note TSCTFC update. */
883 unsigned fEOP : 1;
884 /** Insert Ethernet FCS/CRC (requires fEOP to be set). */
885 unsigned fIFCS : 1;
886 /** Use the TSE context when set and the normal when clear. */
887 unsigned fTSE : 1;
888 /** Report status (dw3.STA). */
889 unsigned fRS : 1;
890 /** Reserved. 82544GC/EI defines this report packet set (RPS). */
891 unsigned fRPS : 1;
892 /** Descriptor extension, must be set for this descriptor type. */
893 unsigned fDEXT : 1;
894 /** VLAN enable, requires CTRL.VME, auto enables FCS/CRC.
895 * Insert dw3.SPECIAL after ethernet header. */
896 unsigned fVLE : 1;
897 /** Interrupt delay enable. */
898 unsigned fIDE : 1;
899 /** @} */
900 } cmd;
901 struct TDDDw3_st
902 {
903 /** @name STA field (3.3.7.2)
904 * @{ */
905 unsigned fDD : 1; /**< Descriptor done. */
906 unsigned fEC : 1; /**< Excess collision. */
907 unsigned fLC : 1; /**< Late collision. */
908 /** Reserved, except for the usual oddball (82544GC/EI) where it's called TU. */
909 unsigned fTURSV : 1;
910 /** @} */
911 unsigned u4RSV : 4; /**< Reserved field, MBZ. */
912 /** @name POPTS (Packet Option) field (3.3.7.3)
913 * @{ */
914 unsigned fIXSM : 1; /**< Insert IP checksum. */
915 unsigned fTXSM : 1; /**< Insert TCP/UDP checksum. */
916 unsigned u6RSV : 6; /**< Reserved, MBZ. */
917 /** @} */
918 /** @name SPECIAL field - VLAN tag to be inserted after ethernet header.
919 * Requires fEOP, fVLE and CTRL.VME to be set.
920 * @{ */
921 unsigned u16Special: 16; /**< VLAN: Id, Canonical form, Priority. */
922 /** @} */
923 } dw3;
924};
925typedef struct E1kTDData E1KTXDAT;
926
927union E1kTxDesc
928{
929 struct E1kTDLegacy legacy;
930 struct E1kTDContext context;
931 struct E1kTDData data;
932};
933typedef union E1kTxDesc E1KTXDESC;
934AssertCompileSize(E1KTXDESC, 16);
935
936#define RA_CTL_AS 0x0003
937#define RA_CTL_AV 0x8000
938
939union E1kRecAddr
940{
941 uint32_t au32[32];
942 struct RAArray
943 {
944 uint8_t addr[6];
945 uint16_t ctl;
946 } array[16];
947};
948typedef struct E1kRecAddr::RAArray E1KRAELEM;
949typedef union E1kRecAddr E1KRA;
950AssertCompileSize(E1KRA, 8*16);
951
952#define E1K_IP_RF UINT16_C(0x8000) /**< reserved fragment flag */
953#define E1K_IP_DF UINT16_C(0x4000) /**< dont fragment flag */
954#define E1K_IP_MF UINT16_C(0x2000) /**< more fragments flag */
955#define E1K_IP_OFFMASK UINT16_C(0x1fff) /**< mask for fragmenting bits */
956
957/** @todo use+extend RTNETIPV4 */
958struct E1kIpHeader
959{
960 /* type of service / version / header length */
961 uint16_t tos_ver_hl;
962 /* total length */
963 uint16_t total_len;
964 /* identification */
965 uint16_t ident;
966 /* fragment offset field */
967 uint16_t offset;
968 /* time to live / protocol*/
969 uint16_t ttl_proto;
970 /* checksum */
971 uint16_t chksum;
972 /* source IP address */
973 uint32_t src;
974 /* destination IP address */
975 uint32_t dest;
976};
977AssertCompileSize(struct E1kIpHeader, 20);
978
979#define E1K_TCP_FIN UINT16_C(0x01)
980#define E1K_TCP_SYN UINT16_C(0x02)
981#define E1K_TCP_RST UINT16_C(0x04)
982#define E1K_TCP_PSH UINT16_C(0x08)
983#define E1K_TCP_ACK UINT16_C(0x10)
984#define E1K_TCP_URG UINT16_C(0x20)
985#define E1K_TCP_ECE UINT16_C(0x40)
986#define E1K_TCP_CWR UINT16_C(0x80)
987#define E1K_TCP_FLAGS UINT16_C(0x3f)
988
989/** @todo use+extend RTNETTCP */
990struct E1kTcpHeader
991{
992 uint16_t src;
993 uint16_t dest;
994 uint32_t seqno;
995 uint32_t ackno;
996 uint16_t hdrlen_flags;
997 uint16_t wnd;
998 uint16_t chksum;
999 uint16_t urgp;
1000};
1001AssertCompileSize(struct E1kTcpHeader, 20);
1002
1003
1004#ifdef E1K_WITH_TXD_CACHE
1005/** The current Saved state version. */
1006# define E1K_SAVEDSTATE_VERSION 4
1007/** Saved state version for VirtualBox 4.2 with VLAN tag fields. */
1008# define E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG 3
1009#else /* !E1K_WITH_TXD_CACHE */
1010/** The current Saved state version. */
1011# define E1K_SAVEDSTATE_VERSION 3
1012#endif /* !E1K_WITH_TXD_CACHE */
1013/** Saved state version for VirtualBox 4.1 and earlier.
1014 * These did not include VLAN tag fields. */
1015#define E1K_SAVEDSTATE_VERSION_VBOX_41 2
1016/** Saved state version for VirtualBox 3.0 and earlier.
1017 * This did not include the configuration part nor the E1kEEPROM. */
1018#define E1K_SAVEDSTATE_VERSION_VBOX_30 1
1019
1020/**
1021 * Device state structure.
1022 *
1023 * Holds the current state of device.
1024 *
1025 * @implements PDMINETWORKDOWN
1026 * @implements PDMINETWORKCONFIG
1027 * @implements PDMILEDPORTS
1028 */
1029struct E1kState_st
1030{
1031 char szPrf[8]; /**< Log prefix, e.g. E1000#1. */
1032 PDMIBASE IBase;
1033 PDMINETWORKDOWN INetworkDown;
1034 PDMINETWORKCONFIG INetworkConfig;
1035 PDMILEDPORTS ILeds; /**< LED interface */
1036 R3PTRTYPE(PPDMIBASE) pDrvBase; /**< Attached network driver. */
1037 R3PTRTYPE(PPDMILEDCONNECTORS) pLedsConnector;
1038
1039 PPDMDEVINSR3 pDevInsR3; /**< Device instance - R3. */
1040 R3PTRTYPE(PPDMQUEUE) pTxQueueR3; /**< Transmit queue - R3. */
1041 R3PTRTYPE(PPDMQUEUE) pCanRxQueueR3; /**< Rx wakeup signaller - R3. */
1042 PPDMINETWORKUPR3 pDrvR3; /**< Attached network driver - R3. */
1043 PTMTIMERR3 pRIDTimerR3; /**< Receive Interrupt Delay Timer - R3. */
1044 PTMTIMERR3 pRADTimerR3; /**< Receive Absolute Delay Timer - R3. */
1045 PTMTIMERR3 pTIDTimerR3; /**< Transmit Interrupt Delay Timer - R3. */
1046 PTMTIMERR3 pTADTimerR3; /**< Transmit Absolute Delay Timer - R3. */
1047 PTMTIMERR3 pTXDTimerR3; /**< Transmit Delay Timer - R3. */
1048 PTMTIMERR3 pIntTimerR3; /**< Late Interrupt Timer - R3. */
1049 PTMTIMERR3 pLUTimerR3; /**< Link Up(/Restore) Timer. */
1050 /** The scatter / gather buffer used for the current outgoing packet - R3. */
1051 R3PTRTYPE(PPDMSCATTERGATHER) pTxSgR3;
1052
1053 PPDMDEVINSR0 pDevInsR0; /**< Device instance - R0. */
1054 R0PTRTYPE(PPDMQUEUE) pTxQueueR0; /**< Transmit queue - R0. */
1055 R0PTRTYPE(PPDMQUEUE) pCanRxQueueR0; /**< Rx wakeup signaller - R0. */
1056 PPDMINETWORKUPR0 pDrvR0; /**< Attached network driver - R0. */
1057 PTMTIMERR0 pRIDTimerR0; /**< Receive Interrupt Delay Timer - R0. */
1058 PTMTIMERR0 pRADTimerR0; /**< Receive Absolute Delay Timer - R0. */
1059 PTMTIMERR0 pTIDTimerR0; /**< Transmit Interrupt Delay Timer - R0. */
1060 PTMTIMERR0 pTADTimerR0; /**< Transmit Absolute Delay Timer - R0. */
1061 PTMTIMERR0 pTXDTimerR0; /**< Transmit Delay Timer - R0. */
1062 PTMTIMERR0 pIntTimerR0; /**< Late Interrupt Timer - R0. */
1063 PTMTIMERR0 pLUTimerR0; /**< Link Up(/Restore) Timer - R0. */
1064 /** The scatter / gather buffer used for the current outgoing packet - R0. */
1065 R0PTRTYPE(PPDMSCATTERGATHER) pTxSgR0;
1066
1067 PPDMDEVINSRC pDevInsRC; /**< Device instance - RC. */
1068 RCPTRTYPE(PPDMQUEUE) pTxQueueRC; /**< Transmit queue - RC. */
1069 RCPTRTYPE(PPDMQUEUE) pCanRxQueueRC; /**< Rx wakeup signaller - RC. */
1070 PPDMINETWORKUPRC pDrvRC; /**< Attached network driver - RC. */
1071 PTMTIMERRC pRIDTimerRC; /**< Receive Interrupt Delay Timer - RC. */
1072 PTMTIMERRC pRADTimerRC; /**< Receive Absolute Delay Timer - RC. */
1073 PTMTIMERRC pTIDTimerRC; /**< Transmit Interrupt Delay Timer - RC. */
1074 PTMTIMERRC pTADTimerRC; /**< Transmit Absolute Delay Timer - RC. */
1075 PTMTIMERRC pTXDTimerRC; /**< Transmit Delay Timer - RC. */
1076 PTMTIMERRC pIntTimerRC; /**< Late Interrupt Timer - RC. */
1077 PTMTIMERRC pLUTimerRC; /**< Link Up(/Restore) Timer - RC. */
1078 /** The scatter / gather buffer used for the current outgoing packet - RC. */
1079 RCPTRTYPE(PPDMSCATTERGATHER) pTxSgRC;
1080 RTRCPTR RCPtrAlignment;
1081
1082#if HC_ARCH_BITS != 32
1083 uint32_t Alignment1;
1084#endif
1085 PDMCRITSECT cs; /**< Critical section - what is it protecting? */
1086 PDMCRITSECT csRx; /**< RX Critical section. */
1087#ifdef E1K_WITH_TX_CS
1088 PDMCRITSECT csTx; /**< TX Critical section. */
1089#endif /* E1K_WITH_TX_CS */
1090 /** Base address of memory-mapped registers. */
1091 RTGCPHYS addrMMReg;
1092 /** MAC address obtained from the configuration. */
1093 RTMAC macConfigured;
1094 /** Base port of I/O space region. */
1095 RTIOPORT IOPortBase;
1096 /** EMT: */
1097 PDMPCIDEV pciDevice;
1098 /** EMT: Last time the interrupt was acknowledged. */
1099 uint64_t u64AckedAt;
1100 /** All: Used for eliminating spurious interrupts. */
1101 bool fIntRaised;
1102 /** EMT: false if the cable is disconnected by the GUI. */
1103 bool fCableConnected;
1104 /** EMT: */
1105 bool fR0Enabled;
1106 /** EMT: */
1107 bool fRCEnabled;
1108 /** EMT: Compute Ethernet CRC for RX packets. */
1109 bool fEthernetCRC;
1110 /** All: throttle interrupts. */
1111 bool fItrEnabled;
1112 /** All: throttle RX interrupts. */
1113 bool fItrRxEnabled;
1114 /** All: Delay TX interrupts using TIDV/TADV. */
1115 bool fTidEnabled;
1116 /** Link up delay (in milliseconds). */
1117 uint32_t cMsLinkUpDelay;
1118
1119 /** All: Device register storage. */
1120 uint32_t auRegs[E1K_NUM_OF_32BIT_REGS];
1121 /** TX/RX: Status LED. */
1122 PDMLED led;
1123 /** TX/RX: Number of packet being sent/received to show in debug log. */
1124 uint32_t u32PktNo;
1125
1126 /** EMT: Offset of the register to be read via IO. */
1127 uint32_t uSelectedReg;
1128 /** EMT: Multicast Table Array. */
1129 uint32_t auMTA[128];
1130 /** EMT: Receive Address registers. */
1131 E1KRA aRecAddr;
1132 /** EMT: VLAN filter table array. */
1133 uint32_t auVFTA[128];
1134 /** EMT: Receive buffer size. */
1135 uint16_t u16RxBSize;
1136 /** EMT: Locked state -- no state alteration possible. */
1137 bool fLocked;
1138 /** EMT: */
1139 bool fDelayInts;
1140 /** All: */
1141 bool fIntMaskUsed;
1142
1143 /** N/A: */
1144 bool volatile fMaybeOutOfSpace;
1145 /** EMT: Gets signalled when more RX descriptors become available. */
1146 RTSEMEVENT hEventMoreRxDescAvail;
1147#ifdef E1K_WITH_RXD_CACHE
1148 /** RX: Fetched RX descriptors. */
1149 E1KRXDESC aRxDescriptors[E1K_RXD_CACHE_SIZE];
1150 //uint64_t aRxDescAddr[E1K_RXD_CACHE_SIZE];
1151 /** RX: Actual number of fetched RX descriptors. */
1152 uint32_t nRxDFetched;
1153 /** RX: Index in cache of RX descriptor being processed. */
1154 uint32_t iRxDCurrent;
1155#endif /* E1K_WITH_RXD_CACHE */
1156
1157 /** TX: Context used for TCP segmentation packets. */
1158 E1KTXCTX contextTSE;
1159 /** TX: Context used for ordinary packets. */
1160 E1KTXCTX contextNormal;
1161#ifdef E1K_WITH_TXD_CACHE
1162 /** TX: Fetched TX descriptors. */
1163 E1KTXDESC aTxDescriptors[E1K_TXD_CACHE_SIZE];
1164 /** TX: Actual number of fetched TX descriptors. */
1165 uint8_t nTxDFetched;
1166 /** TX: Index in cache of TX descriptor being processed. */
1167 uint8_t iTxDCurrent;
1168 /** TX: Will this frame be sent as GSO. */
1169 bool fGSO;
1170 /** Alignment padding. */
1171 bool fReserved;
1172 /** TX: Number of bytes in next packet. */
1173 uint32_t cbTxAlloc;
1174
1175#endif /* E1K_WITH_TXD_CACHE */
1176 /** GSO context. u8Type is set to PDMNETWORKGSOTYPE_INVALID when not
1177 * applicable to the current TSE mode. */
1178 PDMNETWORKGSO GsoCtx;
1179 /** Scratch space for holding the loopback / fallback scatter / gather
1180 * descriptor. */
1181 union
1182 {
1183 PDMSCATTERGATHER Sg;
1184 uint8_t padding[8 * sizeof(RTUINTPTR)];
1185 } uTxFallback;
1186 /** TX: Transmit packet buffer use for TSE fallback and loopback. */
1187 uint8_t aTxPacketFallback[E1K_MAX_TX_PKT_SIZE];
1188 /** TX: Number of bytes assembled in TX packet buffer. */
1189 uint16_t u16TxPktLen;
1190 /** TX: False will force segmentation in e1000 instead of sending frames as GSO. */
1191 bool fGSOEnabled;
1192 /** TX: IP checksum has to be inserted if true. */
1193 bool fIPcsum;
1194 /** TX: TCP/UDP checksum has to be inserted if true. */
1195 bool fTCPcsum;
1196 /** TX: VLAN tag has to be inserted if true. */
1197 bool fVTag;
1198 /** TX: TCI part of VLAN tag to be inserted. */
1199 uint16_t u16VTagTCI;
1200 /** TX TSE fallback: Number of payload bytes remaining in TSE context. */
1201 uint32_t u32PayRemain;
1202 /** TX TSE fallback: Number of header bytes remaining in TSE context. */
1203 uint16_t u16HdrRemain;
1204 /** TX TSE fallback: Flags from template header. */
1205 uint16_t u16SavedFlags;
1206 /** TX TSE fallback: Partial checksum from template header. */
1207 uint32_t u32SavedCsum;
1208 /** ?: Emulated controller type. */
1209 E1KCHIP eChip;
1210
1211 /** EMT: EEPROM emulation */
1212 E1kEEPROM eeprom;
1213 /** EMT: Physical interface emulation. */
1214 PHY phy;
1215
1216#if 0
1217 /** Alignment padding. */
1218 uint8_t Alignment[HC_ARCH_BITS == 64 ? 8 : 4];
1219#endif
1220
1221 STAMCOUNTER StatReceiveBytes;
1222 STAMCOUNTER StatTransmitBytes;
1223#if defined(VBOX_WITH_STATISTICS)
1224 STAMPROFILEADV StatMMIOReadRZ;
1225 STAMPROFILEADV StatMMIOReadR3;
1226 STAMPROFILEADV StatMMIOWriteRZ;
1227 STAMPROFILEADV StatMMIOWriteR3;
1228 STAMPROFILEADV StatEEPROMRead;
1229 STAMPROFILEADV StatEEPROMWrite;
1230 STAMPROFILEADV StatIOReadRZ;
1231 STAMPROFILEADV StatIOReadR3;
1232 STAMPROFILEADV StatIOWriteRZ;
1233 STAMPROFILEADV StatIOWriteR3;
1234 STAMPROFILEADV StatLateIntTimer;
1235 STAMCOUNTER StatLateInts;
1236 STAMCOUNTER StatIntsRaised;
1237 STAMCOUNTER StatIntsPrevented;
1238 STAMPROFILEADV StatReceive;
1239 STAMPROFILEADV StatReceiveCRC;
1240 STAMPROFILEADV StatReceiveFilter;
1241 STAMPROFILEADV StatReceiveStore;
1242 STAMPROFILEADV StatTransmitRZ;
1243 STAMPROFILEADV StatTransmitR3;
1244 STAMPROFILE StatTransmitSendRZ;
1245 STAMPROFILE StatTransmitSendR3;
1246 STAMPROFILE StatRxOverflow;
1247 STAMCOUNTER StatRxOverflowWakeup;
1248 STAMCOUNTER StatTxDescCtxNormal;
1249 STAMCOUNTER StatTxDescCtxTSE;
1250 STAMCOUNTER StatTxDescLegacy;
1251 STAMCOUNTER StatTxDescData;
1252 STAMCOUNTER StatTxDescTSEData;
1253 STAMCOUNTER StatTxPathFallback;
1254 STAMCOUNTER StatTxPathGSO;
1255 STAMCOUNTER StatTxPathRegular;
1256 STAMCOUNTER StatPHYAccesses;
1257 STAMCOUNTER aStatRegWrites[E1K_NUM_OF_REGS];
1258 STAMCOUNTER aStatRegReads[E1K_NUM_OF_REGS];
1259#endif /* VBOX_WITH_STATISTICS */
1260
1261#ifdef E1K_INT_STATS
1262 /* Internal stats */
1263 uint64_t u64ArmedAt;
1264 uint64_t uStatMaxTxDelay;
1265 uint32_t uStatInt;
1266 uint32_t uStatIntTry;
1267 uint32_t uStatIntLower;
1268 uint32_t uStatNoIntICR;
1269 int32_t iStatIntLost;
1270 int32_t iStatIntLostOne;
1271 uint32_t uStatIntIMS;
1272 uint32_t uStatIntSkip;
1273 uint32_t uStatIntLate;
1274 uint32_t uStatIntMasked;
1275 uint32_t uStatIntEarly;
1276 uint32_t uStatIntRx;
1277 uint32_t uStatIntTx;
1278 uint32_t uStatIntICS;
1279 uint32_t uStatIntRDTR;
1280 uint32_t uStatIntRXDMT0;
1281 uint32_t uStatIntTXQE;
1282 uint32_t uStatTxNoRS;
1283 uint32_t uStatTxIDE;
1284 uint32_t uStatTxDelayed;
1285 uint32_t uStatTxDelayExp;
1286 uint32_t uStatTAD;
1287 uint32_t uStatTID;
1288 uint32_t uStatRAD;
1289 uint32_t uStatRID;
1290 uint32_t uStatRxFrm;
1291 uint32_t uStatTxFrm;
1292 uint32_t uStatDescCtx;
1293 uint32_t uStatDescDat;
1294 uint32_t uStatDescLeg;
1295 uint32_t uStatTx1514;
1296 uint32_t uStatTx2962;
1297 uint32_t uStatTx4410;
1298 uint32_t uStatTx5858;
1299 uint32_t uStatTx7306;
1300 uint32_t uStatTx8754;
1301 uint32_t uStatTx16384;
1302 uint32_t uStatTx32768;
1303 uint32_t uStatTxLarge;
1304 uint32_t uStatAlign;
1305#endif /* E1K_INT_STATS */
1306};
1307typedef struct E1kState_st E1KSTATE;
1308/** Pointer to the E1000 device state. */
1309typedef E1KSTATE *PE1KSTATE;
1310
1311#ifndef VBOX_DEVICE_STRUCT_TESTCASE
1312
1313/* Forward declarations ******************************************************/
1314static int e1kXmitPending(PE1KSTATE pThis, bool fOnWorkerThread);
1315
1316static int e1kRegReadUnimplemented (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1317static int e1kRegWriteUnimplemented(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1318static int e1kRegReadAutoClear (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1319static int e1kRegReadDefault (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1320static int e1kRegWriteDefault (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1321#if 0 /* unused */
1322static int e1kRegReadCTRL (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1323#endif
1324static int e1kRegWriteCTRL (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1325static int e1kRegReadEECD (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1326static int e1kRegWriteEECD (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1327static int e1kRegWriteEERD (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1328static int e1kRegWriteMDIC (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1329static int e1kRegReadICR (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1330static int e1kRegWriteICR (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1331static int e1kRegWriteICS (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1332static int e1kRegWriteIMS (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1333static int e1kRegWriteIMC (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1334static int e1kRegWriteRCTL (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1335static int e1kRegWritePBA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1336static int e1kRegWriteRDT (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1337static int e1kRegWriteRDTR (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1338static int e1kRegWriteTDT (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1339static int e1kRegReadMTA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1340static int e1kRegWriteMTA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1341static int e1kRegReadRA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1342static int e1kRegWriteRA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1343static int e1kRegReadVFTA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1344static int e1kRegWriteVFTA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1345
1346/**
1347 * Register map table.
1348 *
1349 * Override pfnRead and pfnWrite to get register-specific behavior.
1350 */
1351static const struct E1kRegMap_st
1352{
1353 /** Register offset in the register space. */
1354 uint32_t offset;
1355 /** Size in bytes. Registers of size > 4 are in fact tables. */
1356 uint32_t size;
1357 /** Readable bits. */
1358 uint32_t readable;
1359 /** Writable bits. */
1360 uint32_t writable;
1361 /** Read callback. */
1362 int (*pfnRead)(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1363 /** Write callback. */
1364 int (*pfnWrite)(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1365 /** Abbreviated name. */
1366 const char *abbrev;
1367 /** Full name. */
1368 const char *name;
1369} g_aE1kRegMap[E1K_NUM_OF_REGS] =
1370{
1371 /* offset size read mask write mask read callback write callback abbrev full name */
1372 /*------- ------- ---------- ---------- ----------------------- ------------------------ ---------- ------------------------------*/
1373 { 0x00000, 0x00004, 0xDBF31BE9, 0xDBF31BE9, e1kRegReadDefault , e1kRegWriteCTRL , "CTRL" , "Device Control" },
1374 { 0x00008, 0x00004, 0x0000FDFF, 0x00000000, e1kRegReadDefault , e1kRegWriteUnimplemented, "STATUS" , "Device Status" },
1375 { 0x00010, 0x00004, 0x000027F0, 0x00000070, e1kRegReadEECD , e1kRegWriteEECD , "EECD" , "EEPROM/Flash Control/Data" },
1376 { 0x00014, 0x00004, 0xFFFFFF10, 0xFFFFFF00, e1kRegReadDefault , e1kRegWriteEERD , "EERD" , "EEPROM Read" },
1377 { 0x00018, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CTRL_EXT", "Extended Device Control" },
1378 { 0x0001c, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FLA" , "Flash Access (N/A)" },
1379 { 0x00020, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteMDIC , "MDIC" , "MDI Control" },
1380 { 0x00028, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCAL" , "Flow Control Address Low" },
1381 { 0x0002c, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCAH" , "Flow Control Address High" },
1382 { 0x00030, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCT" , "Flow Control Type" },
1383 { 0x00038, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "VET" , "VLAN EtherType" },
1384 { 0x000c0, 0x00004, 0x0001F6DF, 0x0001F6DF, e1kRegReadICR , e1kRegWriteICR , "ICR" , "Interrupt Cause Read" },
1385 { 0x000c4, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "ITR" , "Interrupt Throttling" },
1386 { 0x000c8, 0x00004, 0x00000000, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteICS , "ICS" , "Interrupt Cause Set" },
1387 { 0x000d0, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteIMS , "IMS" , "Interrupt Mask Set/Read" },
1388 { 0x000d8, 0x00004, 0x00000000, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteIMC , "IMC" , "Interrupt Mask Clear" },
1389 { 0x00100, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteRCTL , "RCTL" , "Receive Control" },
1390 { 0x00170, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCTTV" , "Flow Control Transmit Timer Value" },
1391 { 0x00178, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TXCW" , "Transmit Configuration Word (N/A)" },
1392 { 0x00180, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXCW" , "Receive Configuration Word (N/A)" },
1393 { 0x00400, 0x00004, 0x017FFFFA, 0x017FFFFA, e1kRegReadDefault , e1kRegWriteDefault , "TCTL" , "Transmit Control" },
1394 { 0x00410, 0x00004, 0x3FFFFFFF, 0x3FFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TIPG" , "Transmit IPG" },
1395 { 0x00458, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "AIFS" , "Adaptive IFS Throttle - AIT" },
1396 { 0x00e00, 0x00004, 0xCFCFCFCF, 0xCFCFCFCF, e1kRegReadDefault , e1kRegWriteDefault , "LEDCTL" , "LED Control" },
1397 { 0x01000, 0x00004, 0xFFFF007F, 0x0000007F, e1kRegReadDefault , e1kRegWritePBA , "PBA" , "Packet Buffer Allocation" },
1398 { 0x02160, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRTL" , "Flow Control Receive Threshold Low" },
1399 { 0x02168, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRTH" , "Flow Control Receive Threshold High" },
1400 { 0x02410, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFH" , "Receive Data FIFO Head" },
1401 { 0x02418, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFT" , "Receive Data FIFO Tail" },
1402 { 0x02420, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFHS" , "Receive Data FIFO Head Saved Register" },
1403 { 0x02428, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFTS" , "Receive Data FIFO Tail Saved Register" },
1404 { 0x02430, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFPC" , "Receive Data FIFO Packet Count" },
1405 { 0x02800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDBAL" , "Receive Descriptor Base Low" },
1406 { 0x02804, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDBAH" , "Receive Descriptor Base High" },
1407 { 0x02808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDLEN" , "Receive Descriptor Length" },
1408 { 0x02810, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDH" , "Receive Descriptor Head" },
1409 { 0x02818, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteRDT , "RDT" , "Receive Descriptor Tail" },
1410 { 0x02820, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteRDTR , "RDTR" , "Receive Delay Timer" },
1411 { 0x02828, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXDCTL" , "Receive Descriptor Control" },
1412 { 0x0282c, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "RADV" , "Receive Interrupt Absolute Delay Timer" },
1413 { 0x02c00, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RSRPD" , "Receive Small Packet Detect Interrupt" },
1414 { 0x03000, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TXDMAC" , "TX DMA Control (N/A)" },
1415 { 0x03410, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFH" , "Transmit Data FIFO Head" },
1416 { 0x03418, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFT" , "Transmit Data FIFO Tail" },
1417 { 0x03420, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFHS" , "Transmit Data FIFO Head Saved Register" },
1418 { 0x03428, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFTS" , "Transmit Data FIFO Tail Saved Register" },
1419 { 0x03430, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFPC" , "Transmit Data FIFO Packet Count" },
1420 { 0x03800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDBAL" , "Transmit Descriptor Base Low" },
1421 { 0x03804, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDBAH" , "Transmit Descriptor Base High" },
1422 { 0x03808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDLEN" , "Transmit Descriptor Length" },
1423 { 0x03810, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDH" , "Transmit Descriptor Head" },
1424 { 0x03818, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteTDT , "TDT" , "Transmit Descriptor Tail" },
1425 { 0x03820, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TIDV" , "Transmit Interrupt Delay Value" },
1426 { 0x03828, 0x00004, 0xFF3F3F3F, 0xFF3F3F3F, e1kRegReadDefault , e1kRegWriteDefault , "TXDCTL" , "Transmit Descriptor Control" },
1427 { 0x0382c, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TADV" , "Transmit Absolute Interrupt Delay Timer" },
1428 { 0x03830, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TSPMT" , "TCP Segmentation Pad and Threshold" },
1429 { 0x04000, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CRCERRS" , "CRC Error Count" },
1430 { 0x04004, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "ALGNERRC", "Alignment Error Count" },
1431 { 0x04008, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SYMERRS" , "Symbol Error Count" },
1432 { 0x0400c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXERRC" , "RX Error Count" },
1433 { 0x04010, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MPC" , "Missed Packets Count" },
1434 { 0x04014, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SCC" , "Single Collision Count" },
1435 { 0x04018, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "ECOL" , "Excessive Collisions Count" },
1436 { 0x0401c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MCC" , "Multiple Collision Count" },
1437 { 0x04020, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "LATECOL" , "Late Collisions Count" },
1438 { 0x04028, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "COLC" , "Collision Count" },
1439 { 0x04030, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "DC" , "Defer Count" },
1440 { 0x04034, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TNCRS" , "Transmit - No CRS" },
1441 { 0x04038, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SEC" , "Sequence Error Count" },
1442 { 0x0403c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CEXTERR" , "Carrier Extension Error Count" },
1443 { 0x04040, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RLEC" , "Receive Length Error Count" },
1444 { 0x04048, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XONRXC" , "XON Received Count" },
1445 { 0x0404c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XONTXC" , "XON Transmitted Count" },
1446 { 0x04050, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XOFFRXC" , "XOFF Received Count" },
1447 { 0x04054, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XOFFTXC" , "XOFF Transmitted Count" },
1448 { 0x04058, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRUC" , "FC Received Unsupported Count" },
1449 { 0x0405c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC64" , "Packets Received (64 Bytes) Count" },
1450 { 0x04060, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC127" , "Packets Received (65-127 Bytes) Count" },
1451 { 0x04064, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC255" , "Packets Received (128-255 Bytes) Count" },
1452 { 0x04068, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC511" , "Packets Received (256-511 Bytes) Count" },
1453 { 0x0406c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC1023" , "Packets Received (512-1023 Bytes) Count" },
1454 { 0x04070, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC1522" , "Packets Received (1024-Max Bytes)" },
1455 { 0x04074, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GPRC" , "Good Packets Received Count" },
1456 { 0x04078, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "BPRC" , "Broadcast Packets Received Count" },
1457 { 0x0407c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "MPRC" , "Multicast Packets Received Count" },
1458 { 0x04080, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GPTC" , "Good Packets Transmitted Count" },
1459 { 0x04088, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GORCL" , "Good Octets Received Count (Low)" },
1460 { 0x0408c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GORCH" , "Good Octets Received Count (Hi)" },
1461 { 0x04090, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GOTCL" , "Good Octets Transmitted Count (Low)" },
1462 { 0x04094, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GOTCH" , "Good Octets Transmitted Count (Hi)" },
1463 { 0x040a0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RNBC" , "Receive No Buffers Count" },
1464 { 0x040a4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RUC" , "Receive Undersize Count" },
1465 { 0x040a8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RFC" , "Receive Fragment Count" },
1466 { 0x040ac, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "ROC" , "Receive Oversize Count" },
1467 { 0x040b0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RJC" , "Receive Jabber Count" },
1468 { 0x040b4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPRC" , "Management Packets Received Count" },
1469 { 0x040b8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPDC" , "Management Packets Dropped Count" },
1470 { 0x040bc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPTC" , "Management Pkts Transmitted Count" },
1471 { 0x040c0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TORL" , "Total Octets Received (Lo)" },
1472 { 0x040c4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TORH" , "Total Octets Received (Hi)" },
1473 { 0x040c8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TOTL" , "Total Octets Transmitted (Lo)" },
1474 { 0x040cc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TOTH" , "Total Octets Transmitted (Hi)" },
1475 { 0x040d0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TPR" , "Total Packets Received" },
1476 { 0x040d4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TPT" , "Total Packets Transmitted" },
1477 { 0x040d8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC64" , "Packets Transmitted (64 Bytes) Count" },
1478 { 0x040dc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC127" , "Packets Transmitted (65-127 Bytes) Count" },
1479 { 0x040e0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC255" , "Packets Transmitted (128-255 Bytes) Count" },
1480 { 0x040e4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC511" , "Packets Transmitted (256-511 Bytes) Count" },
1481 { 0x040e8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC1023" , "Packets Transmitted (512-1023 Bytes) Count" },
1482 { 0x040ec, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC1522" , "Packets Transmitted (1024 Bytes or Greater) Count" },
1483 { 0x040f0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "MPTC" , "Multicast Packets Transmitted Count" },
1484 { 0x040f4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "BPTC" , "Broadcast Packets Transmitted Count" },
1485 { 0x040f8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TSCTC" , "TCP Segmentation Context Transmitted Count" },
1486 { 0x040fc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TSCTFC" , "TCP Segmentation Context Tx Fail Count" },
1487 { 0x05000, 0x00004, 0x000007FF, 0x000007FF, e1kRegReadDefault , e1kRegWriteDefault , "RXCSUM" , "Receive Checksum Control" },
1488 { 0x05800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUC" , "Wakeup Control" },
1489 { 0x05808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUFC" , "Wakeup Filter Control" },
1490 { 0x05810, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUS" , "Wakeup Status" },
1491 { 0x05820, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "MANC" , "Management Control" },
1492 { 0x05838, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IPAV" , "IP Address Valid" },
1493 { 0x05900, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUPL" , "Wakeup Packet Length" },
1494 { 0x05200, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadMTA , e1kRegWriteMTA , "MTA" , "Multicast Table Array (n)" },
1495 { 0x05400, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadRA , e1kRegWriteRA , "RA" , "Receive Address (64-bit) (n)" },
1496 { 0x05600, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadVFTA , e1kRegWriteVFTA , "VFTA" , "VLAN Filter Table Array (n)" },
1497 { 0x05840, 0x0001c, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IP4AT" , "IPv4 Address Table" },
1498 { 0x05880, 0x00010, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IP6AT" , "IPv6 Address Table" },
1499 { 0x05a00, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUPM" , "Wakeup Packet Memory" },
1500 { 0x05f00, 0x0001c, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFLT" , "Flexible Filter Length Table" },
1501 { 0x09000, 0x003fc, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFMT" , "Flexible Filter Mask Table" },
1502 { 0x09800, 0x003fc, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFVT" , "Flexible Filter Value Table" },
1503 { 0x10000, 0x10000, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "PBM" , "Packet Buffer Memory (n)" },
1504 { 0x00040, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadRA , e1kRegWriteRA , "RA82542" , "Receive Address (64-bit) (n) (82542)" },
1505 { 0x00200, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadMTA , e1kRegWriteMTA , "MTA82542", "Multicast Table Array (n) (82542)" },
1506 { 0x00600, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadVFTA , e1kRegWriteVFTA , "VFTA82542", "VLAN Filter Table Array (n) (82542)" }
1507};
1508
1509#ifdef LOG_ENABLED
1510
1511/**
1512 * Convert U32 value to hex string. Masked bytes are replaced with dots.
1513 *
1514 * @remarks The mask has byte (not bit) granularity (e.g. 000000FF).
1515 *
1516 * @returns The buffer.
1517 *
1518 * @param u32 The word to convert into string.
1519 * @param mask Selects which bytes to convert.
1520 * @param buf Where to put the result.
1521 */
1522static char *e1kU32toHex(uint32_t u32, uint32_t mask, char *buf)
1523{
1524 for (char *ptr = buf + 7; ptr >= buf; --ptr, u32 >>=4, mask >>=4)
1525 {
1526 if (mask & 0xF)
1527 *ptr = (u32 & 0xF) + ((u32 & 0xF) > 9 ? '7' : '0');
1528 else
1529 *ptr = '.';
1530 }
1531 buf[8] = 0;
1532 return buf;
1533}
1534
1535/**
1536 * Returns timer name for debug purposes.
1537 *
1538 * @returns The timer name.
1539 *
1540 * @param pThis The device state structure.
1541 * @param pTimer The timer to get the name for.
1542 */
1543DECLINLINE(const char *) e1kGetTimerName(PE1KSTATE pThis, PTMTIMER pTimer)
1544{
1545 if (pTimer == pThis->CTX_SUFF(pTIDTimer))
1546 return "TID";
1547 if (pTimer == pThis->CTX_SUFF(pTADTimer))
1548 return "TAD";
1549 if (pTimer == pThis->CTX_SUFF(pRIDTimer))
1550 return "RID";
1551 if (pTimer == pThis->CTX_SUFF(pRADTimer))
1552 return "RAD";
1553 if (pTimer == pThis->CTX_SUFF(pIntTimer))
1554 return "Int";
1555 if (pTimer == pThis->CTX_SUFF(pTXDTimer))
1556 return "TXD";
1557 if (pTimer == pThis->CTX_SUFF(pLUTimer))
1558 return "LinkUp";
1559 return "unknown";
1560}
1561
1562#endif /* DEBUG */
1563
1564/**
1565 * Arm a timer.
1566 *
1567 * @param pThis Pointer to the device state structure.
1568 * @param pTimer Pointer to the timer.
1569 * @param uExpireIn Expiration interval in microseconds.
1570 */
1571DECLINLINE(void) e1kArmTimer(PE1KSTATE pThis, PTMTIMER pTimer, uint32_t uExpireIn)
1572{
1573 if (pThis->fLocked)
1574 return;
1575
1576 E1kLog2(("%s Arming %s timer to fire in %d usec...\n",
1577 pThis->szPrf, e1kGetTimerName(pThis, pTimer), uExpireIn));
1578 TMTimerSetMicro(pTimer, uExpireIn);
1579}
1580
1581#ifdef IN_RING3
1582/**
1583 * Cancel a timer.
1584 *
1585 * @param pThis Pointer to the device state structure.
1586 * @param pTimer Pointer to the timer.
1587 */
1588DECLINLINE(void) e1kCancelTimer(PE1KSTATE pThis, PTMTIMER pTimer)
1589{
1590 E1kLog2(("%s Stopping %s timer...\n",
1591 pThis->szPrf, e1kGetTimerName(pThis, pTimer)));
1592 int rc = TMTimerStop(pTimer);
1593 if (RT_FAILURE(rc))
1594 E1kLog2(("%s e1kCancelTimer: TMTimerStop() failed with %Rrc\n",
1595 pThis->szPrf, rc));
1596 RT_NOREF1(pThis);
1597}
1598#endif /* IN_RING3 */
1599
1600#define e1kCsEnter(ps, rc) PDMCritSectEnter(&ps->cs, rc)
1601#define e1kCsLeave(ps) PDMCritSectLeave(&ps->cs)
1602
1603#define e1kCsRxEnter(ps, rc) PDMCritSectEnter(&ps->csRx, rc)
1604#define e1kCsRxLeave(ps) PDMCritSectLeave(&ps->csRx)
1605#define e1kCsRxIsOwner(ps) PDMCritSectIsOwner(&ps->csRx)
1606
1607#ifndef E1K_WITH_TX_CS
1608# define e1kCsTxEnter(ps, rc) VINF_SUCCESS
1609# define e1kCsTxLeave(ps) do { } while (0)
1610#else /* E1K_WITH_TX_CS */
1611# define e1kCsTxEnter(ps, rc) PDMCritSectEnter(&ps->csTx, rc)
1612# define e1kCsTxLeave(ps) PDMCritSectLeave(&ps->csTx)
1613#endif /* E1K_WITH_TX_CS */
1614
1615#ifdef IN_RING3
1616
1617/**
1618 * Wakeup the RX thread.
1619 */
1620static void e1kWakeupReceive(PPDMDEVINS pDevIns)
1621{
1622 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
1623 if ( pThis->fMaybeOutOfSpace
1624 && pThis->hEventMoreRxDescAvail != NIL_RTSEMEVENT)
1625 {
1626 STAM_COUNTER_INC(&pThis->StatRxOverflowWakeup);
1627 E1kLog(("%s Waking up Out-of-RX-space semaphore\n", pThis->szPrf));
1628 RTSemEventSignal(pThis->hEventMoreRxDescAvail);
1629 }
1630}
1631
1632/**
1633 * Hardware reset. Revert all registers to initial values.
1634 *
1635 * @param pThis The device state structure.
1636 */
1637static void e1kHardReset(PE1KSTATE pThis)
1638{
1639 E1kLog(("%s Hard reset triggered\n", pThis->szPrf));
1640 memset(pThis->auRegs, 0, sizeof(pThis->auRegs));
1641 memset(pThis->aRecAddr.au32, 0, sizeof(pThis->aRecAddr.au32));
1642#ifdef E1K_INIT_RA0
1643 memcpy(pThis->aRecAddr.au32, pThis->macConfigured.au8,
1644 sizeof(pThis->macConfigured.au8));
1645 pThis->aRecAddr.array[0].ctl |= RA_CTL_AV;
1646#endif /* E1K_INIT_RA0 */
1647 STATUS = 0x0081; /* SPEED=10b (1000 Mb/s), FD=1b (Full Duplex) */
1648 EECD = 0x0100; /* EE_PRES=1b (EEPROM present) */
1649 CTRL = 0x0a09; /* FRCSPD=1b SPEED=10b LRST=1b FD=1b */
1650 TSPMT = 0x01000400;/* TSMT=0400h TSPBP=0100h */
1651 Assert(GET_BITS(RCTL, BSIZE) == 0);
1652 pThis->u16RxBSize = 2048;
1653
1654 uint16_t u16LedCtl = 0x0602; /* LED0/LINK_UP#, LED2/LINK100# */
1655 pThis->eeprom.readWord(0x2F, &u16LedCtl); /* Read LEDCTL defaults from EEPROM */
1656 LEDCTL = 0x07008300 | (((uint32_t)u16LedCtl & 0xCF00) << 8) | (u16LedCtl & 0xCF); /* Only LED0 and LED2 defaults come from EEPROM */
1657
1658 /* Reset promiscuous mode */
1659 if (pThis->pDrvR3)
1660 pThis->pDrvR3->pfnSetPromiscuousMode(pThis->pDrvR3, false);
1661
1662#ifdef E1K_WITH_TXD_CACHE
1663 int rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
1664 if (RT_LIKELY(rc == VINF_SUCCESS))
1665 {
1666 pThis->nTxDFetched = 0;
1667 pThis->iTxDCurrent = 0;
1668 pThis->fGSO = false;
1669 pThis->cbTxAlloc = 0;
1670 e1kCsTxLeave(pThis);
1671 }
1672#endif /* E1K_WITH_TXD_CACHE */
1673#ifdef E1K_WITH_RXD_CACHE
1674 if (RT_LIKELY(e1kCsRxEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
1675 {
1676 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
1677 e1kCsRxLeave(pThis);
1678 }
1679#endif /* E1K_WITH_RXD_CACHE */
1680#ifdef E1K_LSC_ON_RESET
1681 E1kLog(("%s Will trigger LSC in %d seconds...\n",
1682 pThis->szPrf, pThis->cMsLinkUpDelay / 1000));
1683 e1kArmTimer(pThis, pThis->CTX_SUFF(pLUTimer), pThis->cMsLinkUpDelay * 1000);
1684#endif /* E1K_LSC_ON_RESET */
1685}
1686
1687#endif /* IN_RING3 */
1688
1689/**
1690 * Compute Internet checksum.
1691 *
1692 * @remarks Refer to http://www.netfor2.com/checksum.html for short intro.
1693 *
1694 * @param pThis The device state structure.
1695 * @param cpPacket The packet.
1696 * @param cb The size of the packet.
1697 * @param pszText A string denoting direction of packet transfer.
1698 *
1699 * @return The 1's complement of the 1's complement sum.
1700 *
1701 * @thread E1000_TX
1702 */
1703static uint16_t e1kCSum16(const void *pvBuf, size_t cb)
1704{
1705 uint32_t csum = 0;
1706 uint16_t *pu16 = (uint16_t *)pvBuf;
1707
1708 while (cb > 1)
1709 {
1710 csum += *pu16++;
1711 cb -= 2;
1712 }
1713 if (cb)
1714 csum += *(uint8_t*)pu16;
1715 while (csum >> 16)
1716 csum = (csum >> 16) + (csum & 0xFFFF);
1717 return ~csum;
1718}
1719
1720/**
1721 * Dump a packet to debug log.
1722 *
1723 * @param pThis The device state structure.
1724 * @param cpPacket The packet.
1725 * @param cb The size of the packet.
1726 * @param pszText A string denoting direction of packet transfer.
1727 * @thread E1000_TX
1728 */
1729DECLINLINE(void) e1kPacketDump(PE1KSTATE pThis, const uint8_t *cpPacket, size_t cb, const char *pszText)
1730{
1731#ifdef DEBUG
1732 if (RT_LIKELY(e1kCsEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
1733 {
1734 Log4(("%s --- %s packet #%d: %RTmac => %RTmac (%d bytes) ---\n",
1735 pThis->szPrf, pszText, ++pThis->u32PktNo, cpPacket+6, cpPacket, cb));
1736 if (ntohs(*(uint16_t*)(cpPacket+12)) == 0x86DD)
1737 {
1738 Log4(("%s --- IPv6: %RTnaipv6 => %RTnaipv6\n",
1739 pThis->szPrf, cpPacket+14+8, cpPacket+14+24));
1740 if (*(cpPacket+14+6) == 0x6)
1741 Log4(("%s --- TCP: seq=%x ack=%x\n", pThis->szPrf,
1742 ntohl(*(uint32_t*)(cpPacket+14+40+4)), ntohl(*(uint32_t*)(cpPacket+14+40+8))));
1743 }
1744 else if (ntohs(*(uint16_t*)(cpPacket+12)) == 0x800)
1745 {
1746 Log4(("%s --- IPv4: %RTnaipv4 => %RTnaipv4\n",
1747 pThis->szPrf, *(uint32_t*)(cpPacket+14+12), *(uint32_t*)(cpPacket+14+16)));
1748 if (*(cpPacket+14+6) == 0x6)
1749 Log4(("%s --- TCP: seq=%x ack=%x\n", pThis->szPrf,
1750 ntohl(*(uint32_t*)(cpPacket+14+20+4)), ntohl(*(uint32_t*)(cpPacket+14+20+8))));
1751 }
1752 E1kLog3(("%.*Rhxd\n", cb, cpPacket));
1753 e1kCsLeave(pThis);
1754 }
1755#else
1756 if (RT_LIKELY(e1kCsEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
1757 {
1758 if (ntohs(*(uint16_t*)(cpPacket+12)) == 0x86DD)
1759 E1kLogRel(("E1000: %s packet #%d, %RTmac => %RTmac, %RTnaipv6 => %RTnaipv6, seq=%x ack=%x\n",
1760 pszText, ++pThis->u32PktNo, cpPacket+6, cpPacket, cpPacket+14+8, cpPacket+14+24,
1761 ntohl(*(uint32_t*)(cpPacket+14+40+4)), ntohl(*(uint32_t*)(cpPacket+14+40+8))));
1762 else
1763 E1kLogRel(("E1000: %s packet #%d, %RTmac => %RTmac, %RTnaipv4 => %RTnaipv4, seq=%x ack=%x\n",
1764 pszText, ++pThis->u32PktNo, cpPacket+6, cpPacket,
1765 *(uint32_t*)(cpPacket+14+12), *(uint32_t*)(cpPacket+14+16),
1766 ntohl(*(uint32_t*)(cpPacket+14+20+4)), ntohl(*(uint32_t*)(cpPacket+14+20+8))));
1767 e1kCsLeave(pThis);
1768 }
1769 RT_NOREF2(cb, pszText);
1770#endif
1771}
1772
1773/**
1774 * Determine the type of transmit descriptor.
1775 *
1776 * @returns Descriptor type. See E1K_DTYP_XXX defines.
1777 *
1778 * @param pDesc Pointer to descriptor union.
1779 * @thread E1000_TX
1780 */
1781DECLINLINE(int) e1kGetDescType(E1KTXDESC *pDesc)
1782{
1783 if (pDesc->legacy.cmd.fDEXT)
1784 return pDesc->context.dw2.u4DTYP;
1785 return E1K_DTYP_LEGACY;
1786}
1787
1788
1789#ifdef E1K_WITH_RXD_CACHE
1790/**
1791 * Return the number of RX descriptor that belong to the hardware.
1792 *
1793 * @returns the number of available descriptors in RX ring.
1794 * @param pThis The device state structure.
1795 * @thread ???
1796 */
1797DECLINLINE(uint32_t) e1kGetRxLen(PE1KSTATE pThis)
1798{
1799 /**
1800 * Make sure RDT won't change during computation. EMT may modify RDT at
1801 * any moment.
1802 */
1803 uint32_t rdt = RDT;
1804 return (RDH > rdt ? RDLEN/sizeof(E1KRXDESC) : 0) + rdt - RDH;
1805}
1806
1807DECLINLINE(unsigned) e1kRxDInCache(PE1KSTATE pThis)
1808{
1809 return pThis->nRxDFetched > pThis->iRxDCurrent ?
1810 pThis->nRxDFetched - pThis->iRxDCurrent : 0;
1811}
1812
1813DECLINLINE(unsigned) e1kRxDIsCacheEmpty(PE1KSTATE pThis)
1814{
1815 return pThis->iRxDCurrent >= pThis->nRxDFetched;
1816}
1817
1818/**
1819 * Load receive descriptors from guest memory. The caller needs to be in Rx
1820 * critical section.
1821 *
1822 * We need two physical reads in case the tail wrapped around the end of RX
1823 * descriptor ring.
1824 *
1825 * @returns the actual number of descriptors fetched.
1826 * @param pThis The device state structure.
1827 * @param pDesc Pointer to descriptor union.
1828 * @param addr Physical address in guest context.
1829 * @thread EMT, RX
1830 */
1831DECLINLINE(unsigned) e1kRxDPrefetch(PE1KSTATE pThis)
1832{
1833 /* We've already loaded pThis->nRxDFetched descriptors past RDH. */
1834 unsigned nDescsAvailable = e1kGetRxLen(pThis) - e1kRxDInCache(pThis);
1835 unsigned nDescsToFetch = RT_MIN(nDescsAvailable, E1K_RXD_CACHE_SIZE - pThis->nRxDFetched);
1836 unsigned nDescsTotal = RDLEN / sizeof(E1KRXDESC);
1837 Assert(nDescsTotal != 0);
1838 if (nDescsTotal == 0)
1839 return 0;
1840 unsigned nFirstNotLoaded = (RDH + e1kRxDInCache(pThis)) % nDescsTotal;
1841 unsigned nDescsInSingleRead = RT_MIN(nDescsToFetch, nDescsTotal - nFirstNotLoaded);
1842 E1kLog3(("%s e1kRxDPrefetch: nDescsAvailable=%u nDescsToFetch=%u "
1843 "nDescsTotal=%u nFirstNotLoaded=0x%x nDescsInSingleRead=%u\n",
1844 pThis->szPrf, nDescsAvailable, nDescsToFetch, nDescsTotal,
1845 nFirstNotLoaded, nDescsInSingleRead));
1846 if (nDescsToFetch == 0)
1847 return 0;
1848 E1KRXDESC* pFirstEmptyDesc = &pThis->aRxDescriptors[pThis->nRxDFetched];
1849 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns),
1850 ((uint64_t)RDBAH << 32) + RDBAL + nFirstNotLoaded * sizeof(E1KRXDESC),
1851 pFirstEmptyDesc, nDescsInSingleRead * sizeof(E1KRXDESC));
1852 // uint64_t addrBase = ((uint64_t)RDBAH << 32) + RDBAL;
1853 // unsigned i, j;
1854 // for (i = pThis->nRxDFetched; i < pThis->nRxDFetched + nDescsInSingleRead; ++i)
1855 // {
1856 // pThis->aRxDescAddr[i] = addrBase + (nFirstNotLoaded + i - pThis->nRxDFetched) * sizeof(E1KRXDESC);
1857 // E1kLog3(("%s aRxDescAddr[%d] = %p\n", pThis->szPrf, i, pThis->aRxDescAddr[i]));
1858 // }
1859 E1kLog3(("%s Fetched %u RX descriptors at %08x%08x(0x%x), RDLEN=%08x, RDH=%08x, RDT=%08x\n",
1860 pThis->szPrf, nDescsInSingleRead,
1861 RDBAH, RDBAL + RDH * sizeof(E1KRXDESC),
1862 nFirstNotLoaded, RDLEN, RDH, RDT));
1863 if (nDescsToFetch > nDescsInSingleRead)
1864 {
1865 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns),
1866 ((uint64_t)RDBAH << 32) + RDBAL,
1867 pFirstEmptyDesc + nDescsInSingleRead,
1868 (nDescsToFetch - nDescsInSingleRead) * sizeof(E1KRXDESC));
1869 // Assert(i == pThis->nRxDFetched + nDescsInSingleRead);
1870 // for (j = 0; i < pThis->nRxDFetched + nDescsToFetch; ++i, ++j)
1871 // {
1872 // pThis->aRxDescAddr[i] = addrBase + j * sizeof(E1KRXDESC);
1873 // E1kLog3(("%s aRxDescAddr[%d] = %p\n", pThis->szPrf, i, pThis->aRxDescAddr[i]));
1874 // }
1875 E1kLog3(("%s Fetched %u RX descriptors at %08x%08x\n",
1876 pThis->szPrf, nDescsToFetch - nDescsInSingleRead,
1877 RDBAH, RDBAL));
1878 }
1879 pThis->nRxDFetched += nDescsToFetch;
1880 return nDescsToFetch;
1881}
1882
1883# ifdef IN_RING3 /* currently only used in ring-3 due to stack space requirements of the caller */
1884/**
1885 * Dump receive descriptor to debug log.
1886 *
1887 * @param pThis The device state structure.
1888 * @param pDesc Pointer to the descriptor.
1889 * @thread E1000_RX
1890 */
1891static void e1kPrintRDesc(PE1KSTATE pThis, E1KRXDESC *pDesc)
1892{
1893 RT_NOREF2(pThis, pDesc);
1894 E1kLog2(("%s <-- Receive Descriptor (%d bytes):\n", pThis->szPrf, pDesc->u16Length));
1895 E1kLog2((" Address=%16LX Length=%04X Csum=%04X\n",
1896 pDesc->u64BufAddr, pDesc->u16Length, pDesc->u16Checksum));
1897 E1kLog2((" STA: %s %s %s %s %s %s %s ERR: %s %s %s %s SPECIAL: %s VLAN=%03x PRI=%x\n",
1898 pDesc->status.fPIF ? "PIF" : "pif",
1899 pDesc->status.fIPCS ? "IPCS" : "ipcs",
1900 pDesc->status.fTCPCS ? "TCPCS" : "tcpcs",
1901 pDesc->status.fVP ? "VP" : "vp",
1902 pDesc->status.fIXSM ? "IXSM" : "ixsm",
1903 pDesc->status.fEOP ? "EOP" : "eop",
1904 pDesc->status.fDD ? "DD" : "dd",
1905 pDesc->status.fRXE ? "RXE" : "rxe",
1906 pDesc->status.fIPE ? "IPE" : "ipe",
1907 pDesc->status.fTCPE ? "TCPE" : "tcpe",
1908 pDesc->status.fCE ? "CE" : "ce",
1909 E1K_SPEC_CFI(pDesc->status.u16Special) ? "CFI" :"cfi",
1910 E1K_SPEC_VLAN(pDesc->status.u16Special),
1911 E1K_SPEC_PRI(pDesc->status.u16Special)));
1912}
1913# endif /* IN_RING3 */
1914#endif /* E1K_WITH_RXD_CACHE */
1915
1916/**
1917 * Dump transmit descriptor to debug log.
1918 *
1919 * @param pThis The device state structure.
1920 * @param pDesc Pointer to descriptor union.
1921 * @param pszDir A string denoting direction of descriptor transfer
1922 * @thread E1000_TX
1923 */
1924static void e1kPrintTDesc(PE1KSTATE pThis, E1KTXDESC *pDesc, const char *pszDir,
1925 unsigned uLevel = RTLOGGRPFLAGS_LEVEL_2)
1926{
1927 RT_NOREF4(pThis, pDesc, pszDir, uLevel);
1928
1929 /*
1930 * Unfortunately we cannot use our format handler here, we want R0 logging
1931 * as well.
1932 */
1933 switch (e1kGetDescType(pDesc))
1934 {
1935 case E1K_DTYP_CONTEXT:
1936 E1kLogX(uLevel, ("%s %s Context Transmit Descriptor %s\n",
1937 pThis->szPrf, pszDir, pszDir));
1938 E1kLogX(uLevel, (" IPCSS=%02X IPCSO=%02X IPCSE=%04X TUCSS=%02X TUCSO=%02X TUCSE=%04X\n",
1939 pDesc->context.ip.u8CSS, pDesc->context.ip.u8CSO, pDesc->context.ip.u16CSE,
1940 pDesc->context.tu.u8CSS, pDesc->context.tu.u8CSO, pDesc->context.tu.u16CSE));
1941 E1kLogX(uLevel, (" TUCMD:%s%s%s %s %s PAYLEN=%04x HDRLEN=%04x MSS=%04x STA: %s\n",
1942 pDesc->context.dw2.fIDE ? " IDE":"",
1943 pDesc->context.dw2.fRS ? " RS" :"",
1944 pDesc->context.dw2.fTSE ? " TSE":"",
1945 pDesc->context.dw2.fIP ? "IPv4":"IPv6",
1946 pDesc->context.dw2.fTCP ? "TCP":"UDP",
1947 pDesc->context.dw2.u20PAYLEN,
1948 pDesc->context.dw3.u8HDRLEN,
1949 pDesc->context.dw3.u16MSS,
1950 pDesc->context.dw3.fDD?"DD":""));
1951 break;
1952 case E1K_DTYP_DATA:
1953 E1kLogX(uLevel, ("%s %s Data Transmit Descriptor (%d bytes) %s\n",
1954 pThis->szPrf, pszDir, pDesc->data.cmd.u20DTALEN, pszDir));
1955 E1kLogX(uLevel, (" Address=%16LX DTALEN=%05X\n",
1956 pDesc->data.u64BufAddr,
1957 pDesc->data.cmd.u20DTALEN));
1958 E1kLogX(uLevel, (" DCMD:%s%s%s%s%s%s%s STA:%s%s%s POPTS:%s%s SPECIAL:%s VLAN=%03x PRI=%x\n",
1959 pDesc->data.cmd.fIDE ? " IDE" :"",
1960 pDesc->data.cmd.fVLE ? " VLE" :"",
1961 pDesc->data.cmd.fRPS ? " RPS" :"",
1962 pDesc->data.cmd.fRS ? " RS" :"",
1963 pDesc->data.cmd.fTSE ? " TSE" :"",
1964 pDesc->data.cmd.fIFCS? " IFCS":"",
1965 pDesc->data.cmd.fEOP ? " EOP" :"",
1966 pDesc->data.dw3.fDD ? " DD" :"",
1967 pDesc->data.dw3.fEC ? " EC" :"",
1968 pDesc->data.dw3.fLC ? " LC" :"",
1969 pDesc->data.dw3.fTXSM? " TXSM":"",
1970 pDesc->data.dw3.fIXSM? " IXSM":"",
1971 E1K_SPEC_CFI(pDesc->data.dw3.u16Special) ? "CFI" :"cfi",
1972 E1K_SPEC_VLAN(pDesc->data.dw3.u16Special),
1973 E1K_SPEC_PRI(pDesc->data.dw3.u16Special)));
1974 break;
1975 case E1K_DTYP_LEGACY:
1976 E1kLogX(uLevel, ("%s %s Legacy Transmit Descriptor (%d bytes) %s\n",
1977 pThis->szPrf, pszDir, pDesc->legacy.cmd.u16Length, pszDir));
1978 E1kLogX(uLevel, (" Address=%16LX DTALEN=%05X\n",
1979 pDesc->data.u64BufAddr,
1980 pDesc->legacy.cmd.u16Length));
1981 E1kLogX(uLevel, (" CMD:%s%s%s%s%s%s%s STA:%s%s%s CSO=%02x CSS=%02x SPECIAL:%s VLAN=%03x PRI=%x\n",
1982 pDesc->legacy.cmd.fIDE ? " IDE" :"",
1983 pDesc->legacy.cmd.fVLE ? " VLE" :"",
1984 pDesc->legacy.cmd.fRPS ? " RPS" :"",
1985 pDesc->legacy.cmd.fRS ? " RS" :"",
1986 pDesc->legacy.cmd.fIC ? " IC" :"",
1987 pDesc->legacy.cmd.fIFCS? " IFCS":"",
1988 pDesc->legacy.cmd.fEOP ? " EOP" :"",
1989 pDesc->legacy.dw3.fDD ? " DD" :"",
1990 pDesc->legacy.dw3.fEC ? " EC" :"",
1991 pDesc->legacy.dw3.fLC ? " LC" :"",
1992 pDesc->legacy.cmd.u8CSO,
1993 pDesc->legacy.dw3.u8CSS,
1994 E1K_SPEC_CFI(pDesc->legacy.dw3.u16Special) ? "CFI" :"cfi",
1995 E1K_SPEC_VLAN(pDesc->legacy.dw3.u16Special),
1996 E1K_SPEC_PRI(pDesc->legacy.dw3.u16Special)));
1997 break;
1998 default:
1999 E1kLog(("%s %s Invalid Transmit Descriptor %s\n",
2000 pThis->szPrf, pszDir, pszDir));
2001 break;
2002 }
2003}
2004
2005/**
2006 * Raise an interrupt later.
2007 *
2008 * @param pThis The device state structure.
2009 */
2010inline void e1kPostponeInterrupt(PE1KSTATE pThis, uint64_t uNanoseconds)
2011{
2012 if (!TMTimerIsActive(pThis->CTX_SUFF(pIntTimer)))
2013 TMTimerSetNano(pThis->CTX_SUFF(pIntTimer), uNanoseconds);
2014}
2015
2016/**
2017 * Raise interrupt if not masked.
2018 *
2019 * @param pThis The device state structure.
2020 */
2021static int e1kRaiseInterrupt(PE1KSTATE pThis, int rcBusy, uint32_t u32IntCause = 0)
2022{
2023 int rc = e1kCsEnter(pThis, rcBusy);
2024 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2025 return rc;
2026
2027 E1K_INC_ISTAT_CNT(pThis->uStatIntTry);
2028 ICR |= u32IntCause;
2029 if (ICR & IMS)
2030 {
2031 if (pThis->fIntRaised)
2032 {
2033 E1K_INC_ISTAT_CNT(pThis->uStatIntSkip);
2034 E1kLog2(("%s e1kRaiseInterrupt: Already raised, skipped. ICR&IMS=%08x\n",
2035 pThis->szPrf, ICR & IMS));
2036 }
2037 else
2038 {
2039 uint64_t tsNow = TMTimerGet(pThis->CTX_SUFF(pIntTimer));
2040 if (!!ITR && tsNow - pThis->u64AckedAt < ITR * 256
2041 && pThis->fItrEnabled && (pThis->fItrRxEnabled || !(ICR & ICR_RXT0)))
2042 {
2043 E1K_INC_ISTAT_CNT(pThis->uStatIntEarly);
2044 E1kLog2(("%s e1kRaiseInterrupt: Too early to raise again: %d ns < %d ns.\n",
2045 pThis->szPrf, (uint32_t)(tsNow - pThis->u64AckedAt), ITR * 256));
2046 e1kPostponeInterrupt(pThis, ITR * 256);
2047 }
2048 else
2049 {
2050
2051 /* Since we are delivering the interrupt now
2052 * there is no need to do it later -- stop the timer.
2053 */
2054 TMTimerStop(pThis->CTX_SUFF(pIntTimer));
2055 E1K_INC_ISTAT_CNT(pThis->uStatInt);
2056 STAM_COUNTER_INC(&pThis->StatIntsRaised);
2057 /* Got at least one unmasked interrupt cause */
2058 pThis->fIntRaised = true;
2059 /* Raise(1) INTA(0) */
2060 E1kLogRel(("E1000: irq RAISED icr&mask=0x%x, icr=0x%x\n", ICR & IMS, ICR));
2061 PDMDevHlpPCISetIrq(pThis->CTX_SUFF(pDevIns), 0, 1);
2062 E1kLog(("%s e1kRaiseInterrupt: Raised. ICR&IMS=%08x\n",
2063 pThis->szPrf, ICR & IMS));
2064 }
2065 }
2066 }
2067 else
2068 {
2069 E1K_INC_ISTAT_CNT(pThis->uStatIntMasked);
2070 E1kLog2(("%s e1kRaiseInterrupt: Not raising, ICR=%08x, IMS=%08x\n",
2071 pThis->szPrf, ICR, IMS));
2072 }
2073 e1kCsLeave(pThis);
2074 return VINF_SUCCESS;
2075}
2076
2077/**
2078 * Compute the physical address of the descriptor.
2079 *
2080 * @returns the physical address of the descriptor.
2081 *
2082 * @param baseHigh High-order 32 bits of descriptor table address.
2083 * @param baseLow Low-order 32 bits of descriptor table address.
2084 * @param idxDesc The descriptor index in the table.
2085 */
2086DECLINLINE(RTGCPHYS) e1kDescAddr(uint32_t baseHigh, uint32_t baseLow, uint32_t idxDesc)
2087{
2088 AssertCompile(sizeof(E1KRXDESC) == sizeof(E1KTXDESC));
2089 return ((uint64_t)baseHigh << 32) + baseLow + idxDesc * sizeof(E1KRXDESC);
2090}
2091
2092#ifdef IN_RING3 /* currently only used in ring-3 due to stack space requirements of the caller */
2093/**
2094 * Advance the head pointer of the receive descriptor queue.
2095 *
2096 * @remarks RDH always points to the next available RX descriptor.
2097 *
2098 * @param pThis The device state structure.
2099 */
2100DECLINLINE(void) e1kAdvanceRDH(PE1KSTATE pThis)
2101{
2102 Assert(e1kCsRxIsOwner(pThis));
2103 //e1kCsEnter(pThis, RT_SRC_POS);
2104 if (++RDH * sizeof(E1KRXDESC) >= RDLEN)
2105 RDH = 0;
2106#ifdef E1K_WITH_RXD_CACHE
2107 /*
2108 * We need to fetch descriptors now as the guest may advance RDT all the way
2109 * to RDH as soon as we generate RXDMT0 interrupt. This is mostly to provide
2110 * compatibility with Phar Lap ETS, see @bugref(7346). Note that we do not
2111 * check if the receiver is enabled. It must be, otherwise we won't get here
2112 * in the first place.
2113 *
2114 * Note that we should have moved both RDH and iRxDCurrent by now.
2115 */
2116 if (e1kRxDIsCacheEmpty(pThis))
2117 {
2118 /* Cache is empty, reset it and check if we can fetch more. */
2119 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
2120 E1kLog3(("%s e1kAdvanceRDH: Rx cache is empty, RDH=%x RDT=%x "
2121 "iRxDCurrent=%x nRxDFetched=%x\n",
2122 pThis->szPrf, RDH, RDT, pThis->iRxDCurrent, pThis->nRxDFetched));
2123 e1kRxDPrefetch(pThis);
2124 }
2125#endif /* E1K_WITH_RXD_CACHE */
2126 /*
2127 * Compute current receive queue length and fire RXDMT0 interrupt
2128 * if we are low on receive buffers
2129 */
2130 uint32_t uRQueueLen = RDH>RDT ? RDLEN/sizeof(E1KRXDESC)-RDH+RDT : RDT-RDH;
2131 /*
2132 * The minimum threshold is controlled by RDMTS bits of RCTL:
2133 * 00 = 1/2 of RDLEN
2134 * 01 = 1/4 of RDLEN
2135 * 10 = 1/8 of RDLEN
2136 * 11 = reserved
2137 */
2138 uint32_t uMinRQThreshold = RDLEN / sizeof(E1KRXDESC) / (2 << GET_BITS(RCTL, RDMTS));
2139 if (uRQueueLen <= uMinRQThreshold)
2140 {
2141 E1kLogRel(("E1000: low on RX descriptors, RDH=%x RDT=%x len=%x threshold=%x\n", RDH, RDT, uRQueueLen, uMinRQThreshold));
2142 E1kLog2(("%s Low on RX descriptors, RDH=%x RDT=%x len=%x threshold=%x, raise an interrupt\n",
2143 pThis->szPrf, RDH, RDT, uRQueueLen, uMinRQThreshold));
2144 E1K_INC_ISTAT_CNT(pThis->uStatIntRXDMT0);
2145 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_RXDMT0);
2146 }
2147 E1kLog2(("%s e1kAdvanceRDH: at exit RDH=%x RDT=%x len=%x\n",
2148 pThis->szPrf, RDH, RDT, uRQueueLen));
2149 //e1kCsLeave(pThis);
2150}
2151#endif /* IN_RING3 */
2152
2153#ifdef E1K_WITH_RXD_CACHE
2154
2155# ifdef IN_RING3 /* currently only used in ring-3 due to stack space requirements of the caller */
2156
2157/**
2158 * Obtain the next RX descriptor from RXD cache, fetching descriptors from the
2159 * RX ring if the cache is empty.
2160 *
2161 * Note that we cannot advance the cache pointer (iRxDCurrent) yet as it will
2162 * go out of sync with RDH which will cause trouble when EMT checks if the
2163 * cache is empty to do pre-fetch @bugref(6217).
2164 *
2165 * @param pThis The device state structure.
2166 * @thread RX
2167 */
2168DECLINLINE(E1KRXDESC*) e1kRxDGet(PE1KSTATE pThis)
2169{
2170 Assert(e1kCsRxIsOwner(pThis));
2171 /* Check the cache first. */
2172 if (pThis->iRxDCurrent < pThis->nRxDFetched)
2173 return &pThis->aRxDescriptors[pThis->iRxDCurrent];
2174 /* Cache is empty, reset it and check if we can fetch more. */
2175 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
2176 if (e1kRxDPrefetch(pThis))
2177 return &pThis->aRxDescriptors[pThis->iRxDCurrent];
2178 /* Out of Rx descriptors. */
2179 return NULL;
2180}
2181
2182
2183/**
2184 * Return the RX descriptor obtained with e1kRxDGet() and advance the cache
2185 * pointer. The descriptor gets written back to the RXD ring.
2186 *
2187 * @param pThis The device state structure.
2188 * @param pDesc The descriptor being "returned" to the RX ring.
2189 * @thread RX
2190 */
2191DECLINLINE(void) e1kRxDPut(PE1KSTATE pThis, E1KRXDESC* pDesc)
2192{
2193 Assert(e1kCsRxIsOwner(pThis));
2194 pThis->iRxDCurrent++;
2195 // Assert(pDesc >= pThis->aRxDescriptors);
2196 // Assert(pDesc < pThis->aRxDescriptors + E1K_RXD_CACHE_SIZE);
2197 // uint64_t addr = e1kDescAddr(RDBAH, RDBAL, RDH);
2198 // uint32_t rdh = RDH;
2199 // Assert(pThis->aRxDescAddr[pDesc - pThis->aRxDescriptors] == addr);
2200 PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns),
2201 e1kDescAddr(RDBAH, RDBAL, RDH),
2202 pDesc, sizeof(E1KRXDESC));
2203 /*
2204 * We need to print the descriptor before advancing RDH as it may fetch new
2205 * descriptors into the cache.
2206 */
2207 e1kPrintRDesc(pThis, pDesc);
2208 e1kAdvanceRDH(pThis);
2209}
2210
2211/**
2212 * Store a fragment of received packet at the specifed address.
2213 *
2214 * @param pThis The device state structure.
2215 * @param pDesc The next available RX descriptor.
2216 * @param pvBuf The fragment.
2217 * @param cb The size of the fragment.
2218 */
2219static DECLCALLBACK(void) e1kStoreRxFragment(PE1KSTATE pThis, E1KRXDESC *pDesc, const void *pvBuf, size_t cb)
2220{
2221 STAM_PROFILE_ADV_START(&pThis->StatReceiveStore, a);
2222 E1kLog2(("%s e1kStoreRxFragment: store fragment of %04X at %016LX, EOP=%d\n",
2223 pThis->szPrf, cb, pDesc->u64BufAddr, pDesc->status.fEOP));
2224 PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns), pDesc->u64BufAddr, pvBuf, cb);
2225 pDesc->u16Length = (uint16_t)cb; Assert(pDesc->u16Length == cb);
2226 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveStore, a);
2227}
2228
2229# endif /* IN_RING3 */
2230
2231#else /* !E1K_WITH_RXD_CACHE */
2232
2233/**
2234 * Store a fragment of received packet that fits into the next available RX
2235 * buffer.
2236 *
2237 * @remarks Trigger the RXT0 interrupt if it is the last fragment of the packet.
2238 *
2239 * @param pThis The device state structure.
2240 * @param pDesc The next available RX descriptor.
2241 * @param pvBuf The fragment.
2242 * @param cb The size of the fragment.
2243 */
2244static DECLCALLBACK(void) e1kStoreRxFragment(PE1KSTATE pThis, E1KRXDESC *pDesc, const void *pvBuf, size_t cb)
2245{
2246 STAM_PROFILE_ADV_START(&pThis->StatReceiveStore, a);
2247 E1kLog2(("%s e1kStoreRxFragment: store fragment of %04X at %016LX, EOP=%d\n", pThis->szPrf, cb, pDesc->u64BufAddr, pDesc->status.fEOP));
2248 PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns), pDesc->u64BufAddr, pvBuf, cb);
2249 pDesc->u16Length = (uint16_t)cb; Assert(pDesc->u16Length == cb);
2250 /* Write back the descriptor */
2251 PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns), e1kDescAddr(RDBAH, RDBAL, RDH), pDesc, sizeof(E1KRXDESC));
2252 e1kPrintRDesc(pThis, pDesc);
2253 E1kLogRel(("E1000: Wrote back RX desc, RDH=%x\n", RDH));
2254 /* Advance head */
2255 e1kAdvanceRDH(pThis);
2256 //E1kLog2(("%s e1kStoreRxFragment: EOP=%d RDTR=%08X RADV=%08X\n", pThis->szPrf, pDesc->fEOP, RDTR, RADV));
2257 if (pDesc->status.fEOP)
2258 {
2259 /* Complete packet has been stored -- it is time to let the guest know. */
2260#ifdef E1K_USE_RX_TIMERS
2261 if (RDTR)
2262 {
2263 /* Arm the timer to fire in RDTR usec (discard .024) */
2264 e1kArmTimer(pThis, pThis->CTX_SUFF(pRIDTimer), RDTR);
2265 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
2266 if (RADV != 0 && !TMTimerIsActive(pThis->CTX_SUFF(pRADTimer)))
2267 e1kArmTimer(pThis, pThis->CTX_SUFF(pRADTimer), RADV);
2268 }
2269 else
2270 {
2271#endif
2272 /* 0 delay means immediate interrupt */
2273 E1K_INC_ISTAT_CNT(pThis->uStatIntRx);
2274 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_RXT0);
2275#ifdef E1K_USE_RX_TIMERS
2276 }
2277#endif
2278 }
2279 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveStore, a);
2280}
2281
2282#endif /* !E1K_WITH_RXD_CACHE */
2283
2284/**
2285 * Returns true if it is a broadcast packet.
2286 *
2287 * @returns true if destination address indicates broadcast.
2288 * @param pvBuf The ethernet packet.
2289 */
2290DECLINLINE(bool) e1kIsBroadcast(const void *pvBuf)
2291{
2292 static const uint8_t s_abBcastAddr[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
2293 return memcmp(pvBuf, s_abBcastAddr, sizeof(s_abBcastAddr)) == 0;
2294}
2295
2296/**
2297 * Returns true if it is a multicast packet.
2298 *
2299 * @remarks returns true for broadcast packets as well.
2300 * @returns true if destination address indicates multicast.
2301 * @param pvBuf The ethernet packet.
2302 */
2303DECLINLINE(bool) e1kIsMulticast(const void *pvBuf)
2304{
2305 return (*(char*)pvBuf) & 1;
2306}
2307
2308#ifdef IN_RING3 /* currently only used in ring-3 due to stack space requirements of the caller */
2309/**
2310 * Set IXSM, IPCS and TCPCS flags according to the packet type.
2311 *
2312 * @remarks We emulate checksum offloading for major packets types only.
2313 *
2314 * @returns VBox status code.
2315 * @param pThis The device state structure.
2316 * @param pFrame The available data.
2317 * @param cb Number of bytes available in the buffer.
2318 * @param status Bit fields containing status info.
2319 */
2320static int e1kRxChecksumOffload(PE1KSTATE pThis, const uint8_t *pFrame, size_t cb, E1KRXDST *pStatus)
2321{
2322 /** @todo
2323 * It is not safe to bypass checksum verification for packets coming
2324 * from real wire. We currently unable to tell where packets are
2325 * coming from so we tell the driver to ignore our checksum flags
2326 * and do verification in software.
2327 */
2328# if 0
2329 uint16_t uEtherType = ntohs(*(uint16_t*)(pFrame + 12));
2330
2331 E1kLog2(("%s e1kRxChecksumOffload: EtherType=%x\n", pThis->szPrf, uEtherType));
2332
2333 switch (uEtherType)
2334 {
2335 case 0x800: /* IPv4 */
2336 {
2337 pStatus->fIXSM = false;
2338 pStatus->fIPCS = true;
2339 PRTNETIPV4 pIpHdr4 = (PRTNETIPV4)(pFrame + 14);
2340 /* TCP/UDP checksum offloading works with TCP and UDP only */
2341 pStatus->fTCPCS = pIpHdr4->ip_p == 6 || pIpHdr4->ip_p == 17;
2342 break;
2343 }
2344 case 0x86DD: /* IPv6 */
2345 pStatus->fIXSM = false;
2346 pStatus->fIPCS = false;
2347 pStatus->fTCPCS = true;
2348 break;
2349 default: /* ARP, VLAN, etc. */
2350 pStatus->fIXSM = true;
2351 break;
2352 }
2353# else
2354 pStatus->fIXSM = true;
2355 RT_NOREF_PV(pThis); RT_NOREF_PV(pFrame); RT_NOREF_PV(cb);
2356# endif
2357 return VINF_SUCCESS;
2358}
2359#endif /* IN_RING3 */
2360
2361/**
2362 * Pad and store received packet.
2363 *
2364 * @remarks Make sure that the packet appears to upper layer as one coming
2365 * from real Ethernet: pad it and insert FCS.
2366 *
2367 * @returns VBox status code.
2368 * @param pThis The device state structure.
2369 * @param pvBuf The available data.
2370 * @param cb Number of bytes available in the buffer.
2371 * @param status Bit fields containing status info.
2372 */
2373static int e1kHandleRxPacket(PE1KSTATE pThis, const void *pvBuf, size_t cb, E1KRXDST status)
2374{
2375#if defined(IN_RING3) /** @todo Remove this extra copying, it's gonna make us run out of kernel / hypervisor stack! */
2376 uint8_t rxPacket[E1K_MAX_RX_PKT_SIZE];
2377 uint8_t *ptr = rxPacket;
2378
2379 int rc = e1kCsRxEnter(pThis, VERR_SEM_BUSY);
2380 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2381 return rc;
2382
2383 if (cb > 70) /* unqualified guess */
2384 pThis->led.Asserted.s.fReading = pThis->led.Actual.s.fReading = 1;
2385
2386 Assert(cb <= E1K_MAX_RX_PKT_SIZE);
2387 Assert(cb > 16);
2388 size_t cbMax = ((RCTL & RCTL_LPE) ? E1K_MAX_RX_PKT_SIZE - 4 : 1518) - (status.fVP ? 0 : 4);
2389 E1kLog3(("%s Max RX packet size is %u\n", pThis->szPrf, cbMax));
2390 if (status.fVP)
2391 {
2392 /* VLAN packet -- strip VLAN tag in VLAN mode */
2393 if ((CTRL & CTRL_VME) && cb > 16)
2394 {
2395 uint16_t *u16Ptr = (uint16_t*)pvBuf;
2396 memcpy(rxPacket, pvBuf, 12); /* Copy src and dst addresses */
2397 status.u16Special = RT_BE2H_U16(u16Ptr[7]); /* Extract VLAN tag */
2398 memcpy(rxPacket + 12, (uint8_t*)pvBuf + 16, cb - 16); /* Copy the rest of the packet */
2399 cb -= 4;
2400 E1kLog3(("%s Stripped tag for VLAN %u (cb=%u)\n",
2401 pThis->szPrf, status.u16Special, cb));
2402 }
2403 else
2404 status.fVP = false; /* Set VP only if we stripped the tag */
2405 }
2406 else
2407 memcpy(rxPacket, pvBuf, cb);
2408 /* Pad short packets */
2409 if (cb < 60)
2410 {
2411 memset(rxPacket + cb, 0, 60 - cb);
2412 cb = 60;
2413 }
2414 if (!(RCTL & RCTL_SECRC) && cb <= cbMax)
2415 {
2416 STAM_PROFILE_ADV_START(&pThis->StatReceiveCRC, a);
2417 /*
2418 * Add FCS if CRC stripping is not enabled. Since the value of CRC
2419 * is ignored by most of drivers we may as well save us the trouble
2420 * of calculating it (see EthernetCRC CFGM parameter).
2421 */
2422 if (pThis->fEthernetCRC)
2423 *(uint32_t*)(rxPacket + cb) = RTCrc32(rxPacket, cb);
2424 cb += sizeof(uint32_t);
2425 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveCRC, a);
2426 E1kLog3(("%s Added FCS (cb=%u)\n", pThis->szPrf, cb));
2427 }
2428 /* Compute checksum of complete packet */
2429 uint16_t checksum = e1kCSum16(rxPacket + GET_BITS(RXCSUM, PCSS), cb);
2430 e1kRxChecksumOffload(pThis, rxPacket, cb, &status);
2431
2432 /* Update stats */
2433 E1K_INC_CNT32(GPRC);
2434 if (e1kIsBroadcast(pvBuf))
2435 E1K_INC_CNT32(BPRC);
2436 else if (e1kIsMulticast(pvBuf))
2437 E1K_INC_CNT32(MPRC);
2438 /* Update octet receive counter */
2439 E1K_ADD_CNT64(GORCL, GORCH, cb);
2440 STAM_REL_COUNTER_ADD(&pThis->StatReceiveBytes, cb);
2441 if (cb == 64)
2442 E1K_INC_CNT32(PRC64);
2443 else if (cb < 128)
2444 E1K_INC_CNT32(PRC127);
2445 else if (cb < 256)
2446 E1K_INC_CNT32(PRC255);
2447 else if (cb < 512)
2448 E1K_INC_CNT32(PRC511);
2449 else if (cb < 1024)
2450 E1K_INC_CNT32(PRC1023);
2451 else
2452 E1K_INC_CNT32(PRC1522);
2453
2454 E1K_INC_ISTAT_CNT(pThis->uStatRxFrm);
2455
2456# ifdef E1K_WITH_RXD_CACHE
2457 while (cb > 0)
2458 {
2459 E1KRXDESC *pDesc = e1kRxDGet(pThis);
2460
2461 if (pDesc == NULL)
2462 {
2463 E1kLog(("%s Out of receive buffers, dropping the packet "
2464 "(cb=%u, in_cache=%u, RDH=%x RDT=%x)\n",
2465 pThis->szPrf, cb, e1kRxDInCache(pThis), RDH, RDT));
2466 break;
2467 }
2468# else /* !E1K_WITH_RXD_CACHE */
2469 if (RDH == RDT)
2470 {
2471 E1kLog(("%s Out of receive buffers, dropping the packet\n",
2472 pThis->szPrf));
2473 }
2474 /* Store the packet to receive buffers */
2475 while (RDH != RDT)
2476 {
2477 /* Load the descriptor pointed by head */
2478 E1KRXDESC desc, *pDesc = &desc;
2479 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), e1kDescAddr(RDBAH, RDBAL, RDH),
2480 &desc, sizeof(desc));
2481# endif /* !E1K_WITH_RXD_CACHE */
2482 if (pDesc->u64BufAddr)
2483 {
2484 /* Update descriptor */
2485 pDesc->status = status;
2486 pDesc->u16Checksum = checksum;
2487 pDesc->status.fDD = true;
2488
2489 /*
2490 * We need to leave Rx critical section here or we risk deadlocking
2491 * with EMT in e1kRegWriteRDT when the write is to an unallocated
2492 * page or has an access handler associated with it.
2493 * Note that it is safe to leave the critical section here since
2494 * e1kRegWriteRDT() never modifies RDH. It never touches already
2495 * fetched RxD cache entries either.
2496 */
2497 if (cb > pThis->u16RxBSize)
2498 {
2499 pDesc->status.fEOP = false;
2500 e1kCsRxLeave(pThis);
2501 e1kStoreRxFragment(pThis, pDesc, ptr, pThis->u16RxBSize);
2502 rc = e1kCsRxEnter(pThis, VERR_SEM_BUSY);
2503 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2504 return rc;
2505 ptr += pThis->u16RxBSize;
2506 cb -= pThis->u16RxBSize;
2507 }
2508 else
2509 {
2510 pDesc->status.fEOP = true;
2511 e1kCsRxLeave(pThis);
2512 e1kStoreRxFragment(pThis, pDesc, ptr, cb);
2513# ifdef E1K_WITH_RXD_CACHE
2514 rc = e1kCsRxEnter(pThis, VERR_SEM_BUSY);
2515 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2516 return rc;
2517 cb = 0;
2518# else /* !E1K_WITH_RXD_CACHE */
2519 pThis->led.Actual.s.fReading = 0;
2520 return VINF_SUCCESS;
2521# endif /* !E1K_WITH_RXD_CACHE */
2522 }
2523 /*
2524 * Note: RDH is advanced by e1kStoreRxFragment if E1K_WITH_RXD_CACHE
2525 * is not defined.
2526 */
2527 }
2528# ifdef E1K_WITH_RXD_CACHE
2529 /* Write back the descriptor. */
2530 pDesc->status.fDD = true;
2531 e1kRxDPut(pThis, pDesc);
2532# else /* !E1K_WITH_RXD_CACHE */
2533 else
2534 {
2535 /* Write back the descriptor. */
2536 pDesc->status.fDD = true;
2537 PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns),
2538 e1kDescAddr(RDBAH, RDBAL, RDH),
2539 pDesc, sizeof(E1KRXDESC));
2540 e1kAdvanceRDH(pThis);
2541 }
2542# endif /* !E1K_WITH_RXD_CACHE */
2543 }
2544
2545 if (cb > 0)
2546 E1kLog(("%s Out of receive buffers, dropping %u bytes", pThis->szPrf, cb));
2547
2548 pThis->led.Actual.s.fReading = 0;
2549
2550 e1kCsRxLeave(pThis);
2551# ifdef E1K_WITH_RXD_CACHE
2552 /* Complete packet has been stored -- it is time to let the guest know. */
2553# ifdef E1K_USE_RX_TIMERS
2554 if (RDTR)
2555 {
2556 /* Arm the timer to fire in RDTR usec (discard .024) */
2557 e1kArmTimer(pThis, pThis->CTX_SUFF(pRIDTimer), RDTR);
2558 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
2559 if (RADV != 0 && !TMTimerIsActive(pThis->CTX_SUFF(pRADTimer)))
2560 e1kArmTimer(pThis, pThis->CTX_SUFF(pRADTimer), RADV);
2561 }
2562 else
2563 {
2564# endif /* E1K_USE_RX_TIMERS */
2565 /* 0 delay means immediate interrupt */
2566 E1K_INC_ISTAT_CNT(pThis->uStatIntRx);
2567 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_RXT0);
2568# ifdef E1K_USE_RX_TIMERS
2569 }
2570# endif /* E1K_USE_RX_TIMERS */
2571# endif /* E1K_WITH_RXD_CACHE */
2572
2573 return VINF_SUCCESS;
2574#else /* !IN_RING3 */
2575 RT_NOREF_PV(pThis); RT_NOREF_PV(pvBuf); RT_NOREF_PV(cb); RT_NOREF_PV(status);
2576 return VERR_INTERNAL_ERROR_2;
2577#endif /* !IN_RING3 */
2578}
2579
2580
2581#ifdef IN_RING3
2582/**
2583 * Bring the link up after the configured delay, 5 seconds by default.
2584 *
2585 * @param pThis The device state structure.
2586 * @thread any
2587 */
2588DECLINLINE(void) e1kBringLinkUpDelayed(PE1KSTATE pThis)
2589{
2590 E1kLog(("%s Will bring up the link in %d seconds...\n",
2591 pThis->szPrf, pThis->cMsLinkUpDelay / 1000));
2592 e1kArmTimer(pThis, pThis->CTX_SUFF(pLUTimer), pThis->cMsLinkUpDelay * 1000);
2593}
2594
2595/**
2596 * Bring up the link immediately.
2597 *
2598 * @param pThis The device state structure.
2599 */
2600DECLINLINE(void) e1kR3LinkUp(PE1KSTATE pThis)
2601{
2602 E1kLog(("%s Link is up\n", pThis->szPrf));
2603 STATUS |= STATUS_LU;
2604 Phy::setLinkStatus(&pThis->phy, true);
2605 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_LSC);
2606 if (pThis->pDrvR3)
2607 pThis->pDrvR3->pfnNotifyLinkChanged(pThis->pDrvR3, PDMNETWORKLINKSTATE_UP);
2608 /* Process pending TX descriptors (see @bugref{8942}) */
2609 PPDMQUEUEITEMCORE pItem = PDMQueueAlloc(pThis->CTX_SUFF(pTxQueue));
2610 if (RT_UNLIKELY(pItem))
2611 PDMQueueInsert(pThis->CTX_SUFF(pTxQueue), pItem);
2612}
2613
2614/**
2615 * Bring down the link immediately.
2616 *
2617 * @param pThis The device state structure.
2618 */
2619DECLINLINE(void) e1kR3LinkDown(PE1KSTATE pThis)
2620{
2621 E1kLog(("%s Link is down\n", pThis->szPrf));
2622 STATUS &= ~STATUS_LU;
2623#ifdef E1K_LSC_ON_RESET
2624 Phy::setLinkStatus(&pThis->phy, false);
2625#endif /* E1K_LSC_ON_RESET */
2626 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_LSC);
2627 if (pThis->pDrvR3)
2628 pThis->pDrvR3->pfnNotifyLinkChanged(pThis->pDrvR3, PDMNETWORKLINKSTATE_DOWN);
2629}
2630
2631/**
2632 * Bring down the link temporarily.
2633 *
2634 * @param pThis The device state structure.
2635 */
2636DECLINLINE(void) e1kR3LinkDownTemp(PE1KSTATE pThis)
2637{
2638 E1kLog(("%s Link is down temporarily\n", pThis->szPrf));
2639 STATUS &= ~STATUS_LU;
2640 Phy::setLinkStatus(&pThis->phy, false);
2641 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_LSC);
2642 /*
2643 * Notifying the associated driver that the link went down (even temporarily)
2644 * seems to be the right thing, but it was not done before. This may cause
2645 * a regression if the driver does not expect the link to go down as a result
2646 * of sending PDMNETWORKLINKSTATE_DOWN_RESUME to this device. Earlier versions
2647 * of code notified the driver that the link was up! See @bugref{7057}.
2648 */
2649 if (pThis->pDrvR3)
2650 pThis->pDrvR3->pfnNotifyLinkChanged(pThis->pDrvR3, PDMNETWORKLINKSTATE_DOWN);
2651 e1kBringLinkUpDelayed(pThis);
2652}
2653#endif /* IN_RING3 */
2654
2655#if 0 /* unused */
2656/**
2657 * Read handler for Device Status register.
2658 *
2659 * Get the link status from PHY.
2660 *
2661 * @returns VBox status code.
2662 *
2663 * @param pThis The device state structure.
2664 * @param offset Register offset in memory-mapped frame.
2665 * @param index Register index in register array.
2666 * @param mask Used to implement partial reads (8 and 16-bit).
2667 */
2668static int e1kRegReadCTRL(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
2669{
2670 E1kLog(("%s e1kRegReadCTRL: mdio dir=%s mdc dir=%s mdc=%d\n",
2671 pThis->szPrf, (CTRL & CTRL_MDIO_DIR)?"OUT":"IN ",
2672 (CTRL & CTRL_MDC_DIR)?"OUT":"IN ", !!(CTRL & CTRL_MDC)));
2673 if ((CTRL & CTRL_MDIO_DIR) == 0 && (CTRL & CTRL_MDC))
2674 {
2675 /* MDC is high and MDIO pin is used for input, read MDIO pin from PHY */
2676 if (Phy::readMDIO(&pThis->phy))
2677 *pu32Value = CTRL | CTRL_MDIO;
2678 else
2679 *pu32Value = CTRL & ~CTRL_MDIO;
2680 E1kLog(("%s e1kRegReadCTRL: Phy::readMDIO(%d)\n",
2681 pThis->szPrf, !!(*pu32Value & CTRL_MDIO)));
2682 }
2683 else
2684 {
2685 /* MDIO pin is used for output, ignore it */
2686 *pu32Value = CTRL;
2687 }
2688 return VINF_SUCCESS;
2689}
2690#endif /* unused */
2691
2692/**
2693 * A callback used by PHY to indicate that the link needs to be updated due to
2694 * reset of PHY.
2695 *
2696 * @param pPhy A pointer to phy member of the device state structure.
2697 * @thread any
2698 */
2699void e1kPhyLinkResetCallback(PPHY pPhy)
2700{
2701 /* PHY is aggregated into e1000, get pThis from pPhy. */
2702 PE1KSTATE pThis = RT_FROM_MEMBER(pPhy, E1KSTATE, phy);
2703 /* Make sure we have cable connected and MAC can talk to PHY */
2704 if (pThis->fCableConnected && (CTRL & CTRL_SLU))
2705 e1kArmTimer(pThis, pThis->CTX_SUFF(pLUTimer), E1K_INIT_LINKUP_DELAY_US);
2706}
2707
2708/**
2709 * Write handler for Device Control register.
2710 *
2711 * Handles reset.
2712 *
2713 * @param pThis The device state structure.
2714 * @param offset Register offset in memory-mapped frame.
2715 * @param index Register index in register array.
2716 * @param value The value to store.
2717 * @param mask Used to implement partial writes (8 and 16-bit).
2718 * @thread EMT
2719 */
2720static int e1kRegWriteCTRL(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2721{
2722 int rc = VINF_SUCCESS;
2723
2724 if (value & CTRL_RESET)
2725 { /* RST */
2726#ifndef IN_RING3
2727 return VINF_IOM_R3_MMIO_WRITE;
2728#else
2729 e1kHardReset(pThis);
2730#endif
2731 }
2732 else
2733 {
2734#ifdef E1K_LSC_ON_SLU
2735 /*
2736 * When the guest changes 'Set Link Up' bit from 0 to 1 we check if
2737 * the link is down and the cable is connected, and if they are we
2738 * bring the link up, see @bugref{8624}.
2739 */
2740 if ( (value & CTRL_SLU)
2741 && !(CTRL & CTRL_SLU)
2742 && pThis->fCableConnected
2743 && !(STATUS & STATUS_LU))
2744 {
2745 /* It should take about 2 seconds for the link to come up */
2746 e1kArmTimer(pThis, pThis->CTX_SUFF(pLUTimer), E1K_INIT_LINKUP_DELAY_US);
2747 }
2748#else /* !E1K_LSC_ON_SLU */
2749 if ( (value & CTRL_SLU)
2750 && !(CTRL & CTRL_SLU)
2751 && pThis->fCableConnected
2752 && !TMTimerIsActive(pThis->CTX_SUFF(pLUTimer)))
2753 {
2754 /* PXE does not use LSC interrupts, see @bugref{9113}. */
2755 STATUS |= STATUS_LU;
2756 }
2757#endif /* !E1K_LSC_ON_SLU */
2758 if ((value & CTRL_VME) != (CTRL & CTRL_VME))
2759 {
2760 E1kLog(("%s VLAN Mode %s\n", pThis->szPrf, (value & CTRL_VME) ? "Enabled" : "Disabled"));
2761 }
2762 Log7(("%s e1kRegWriteCTRL: mdio dir=%s mdc dir=%s mdc=%s mdio=%d\n",
2763 pThis->szPrf, (value & CTRL_MDIO_DIR)?"OUT":"IN ",
2764 (value & CTRL_MDC_DIR)?"OUT":"IN ", (value & CTRL_MDC)?"HIGH":"LOW ", !!(value & CTRL_MDIO)));
2765 if (value & CTRL_MDC)
2766 {
2767 if (value & CTRL_MDIO_DIR)
2768 {
2769 Log7(("%s e1kRegWriteCTRL: Phy::writeMDIO(%d)\n", pThis->szPrf, !!(value & CTRL_MDIO)));
2770 /* MDIO direction pin is set to output and MDC is high, write MDIO pin value to PHY */
2771 Phy::writeMDIO(&pThis->phy, !!(value & CTRL_MDIO));
2772 }
2773 else
2774 {
2775 if (Phy::readMDIO(&pThis->phy))
2776 value |= CTRL_MDIO;
2777 else
2778 value &= ~CTRL_MDIO;
2779 Log7(("%s e1kRegWriteCTRL: Phy::readMDIO(%d)\n", pThis->szPrf, !!(value & CTRL_MDIO)));
2780 }
2781 }
2782 rc = e1kRegWriteDefault(pThis, offset, index, value);
2783 }
2784
2785 return rc;
2786}
2787
2788/**
2789 * Write handler for EEPROM/Flash Control/Data register.
2790 *
2791 * Handles EEPROM access requests; forwards writes to EEPROM device if access has been granted.
2792 *
2793 * @param pThis The device state structure.
2794 * @param offset Register offset in memory-mapped frame.
2795 * @param index Register index in register array.
2796 * @param value The value to store.
2797 * @param mask Used to implement partial writes (8 and 16-bit).
2798 * @thread EMT
2799 */
2800static int e1kRegWriteEECD(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2801{
2802 RT_NOREF(offset, index);
2803#ifdef IN_RING3
2804 /* So far we are concerned with lower byte only */
2805 if ((EECD & EECD_EE_GNT) || pThis->eChip == E1K_CHIP_82543GC)
2806 {
2807 /* Access to EEPROM granted -- forward 4-wire bits to EEPROM device */
2808 /* Note: 82543GC does not need to request EEPROM access */
2809 STAM_PROFILE_ADV_START(&pThis->StatEEPROMWrite, a);
2810 pThis->eeprom.write(value & EECD_EE_WIRES);
2811 STAM_PROFILE_ADV_STOP(&pThis->StatEEPROMWrite, a);
2812 }
2813 if (value & EECD_EE_REQ)
2814 EECD |= EECD_EE_REQ|EECD_EE_GNT;
2815 else
2816 EECD &= ~EECD_EE_GNT;
2817 //e1kRegWriteDefault(pThis, offset, index, value );
2818
2819 return VINF_SUCCESS;
2820#else /* !IN_RING3 */
2821 RT_NOREF(pThis, value);
2822 return VINF_IOM_R3_MMIO_WRITE;
2823#endif /* !IN_RING3 */
2824}
2825
2826/**
2827 * Read handler for EEPROM/Flash Control/Data register.
2828 *
2829 * Lower 4 bits come from EEPROM device if EEPROM access has been granted.
2830 *
2831 * @returns VBox status code.
2832 *
2833 * @param pThis The device state structure.
2834 * @param offset Register offset in memory-mapped frame.
2835 * @param index Register index in register array.
2836 * @param mask Used to implement partial reads (8 and 16-bit).
2837 * @thread EMT
2838 */
2839static int e1kRegReadEECD(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
2840{
2841#ifdef IN_RING3
2842 uint32_t value;
2843 int rc = e1kRegReadDefault(pThis, offset, index, &value);
2844 if (RT_SUCCESS(rc))
2845 {
2846 if ((value & EECD_EE_GNT) || pThis->eChip == E1K_CHIP_82543GC)
2847 {
2848 /* Note: 82543GC does not need to request EEPROM access */
2849 /* Access to EEPROM granted -- get 4-wire bits to EEPROM device */
2850 STAM_PROFILE_ADV_START(&pThis->StatEEPROMRead, a);
2851 value |= pThis->eeprom.read();
2852 STAM_PROFILE_ADV_STOP(&pThis->StatEEPROMRead, a);
2853 }
2854 *pu32Value = value;
2855 }
2856
2857 return rc;
2858#else /* !IN_RING3 */
2859 RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index); RT_NOREF_PV(pu32Value);
2860 return VINF_IOM_R3_MMIO_READ;
2861#endif /* !IN_RING3 */
2862}
2863
2864/**
2865 * Write handler for EEPROM Read register.
2866 *
2867 * Handles EEPROM word access requests, reads EEPROM and stores the result
2868 * into DATA field.
2869 *
2870 * @param pThis The device state structure.
2871 * @param offset Register offset in memory-mapped frame.
2872 * @param index Register index in register array.
2873 * @param value The value to store.
2874 * @param mask Used to implement partial writes (8 and 16-bit).
2875 * @thread EMT
2876 */
2877static int e1kRegWriteEERD(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2878{
2879#ifdef IN_RING3
2880 /* Make use of 'writable' and 'readable' masks. */
2881 e1kRegWriteDefault(pThis, offset, index, value);
2882 /* DONE and DATA are set only if read was triggered by START. */
2883 if (value & EERD_START)
2884 {
2885 uint16_t tmp;
2886 STAM_PROFILE_ADV_START(&pThis->StatEEPROMRead, a);
2887 if (pThis->eeprom.readWord(GET_BITS_V(value, EERD, ADDR), &tmp))
2888 SET_BITS(EERD, DATA, tmp);
2889 EERD |= EERD_DONE;
2890 STAM_PROFILE_ADV_STOP(&pThis->StatEEPROMRead, a);
2891 }
2892
2893 return VINF_SUCCESS;
2894#else /* !IN_RING3 */
2895 RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index); RT_NOREF_PV(value);
2896 return VINF_IOM_R3_MMIO_WRITE;
2897#endif /* !IN_RING3 */
2898}
2899
2900
2901/**
2902 * Write handler for MDI Control register.
2903 *
2904 * Handles PHY read/write requests; forwards requests to internal PHY device.
2905 *
2906 * @param pThis The device state structure.
2907 * @param offset Register offset in memory-mapped frame.
2908 * @param index Register index in register array.
2909 * @param value The value to store.
2910 * @param mask Used to implement partial writes (8 and 16-bit).
2911 * @thread EMT
2912 */
2913static int e1kRegWriteMDIC(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2914{
2915 if (value & MDIC_INT_EN)
2916 {
2917 E1kLog(("%s ERROR! Interrupt at the end of an MDI cycle is not supported yet.\n",
2918 pThis->szPrf));
2919 }
2920 else if (value & MDIC_READY)
2921 {
2922 E1kLog(("%s ERROR! Ready bit is not reset by software during write operation.\n",
2923 pThis->szPrf));
2924 }
2925 else if (GET_BITS_V(value, MDIC, PHY) != 1)
2926 {
2927 E1kLog(("%s WARNING! Access to invalid PHY detected, phy=%d.\n",
2928 pThis->szPrf, GET_BITS_V(value, MDIC, PHY)));
2929 /*
2930 * Some drivers scan the MDIO bus for a PHY. We can work with these
2931 * drivers if we set MDIC_READY and MDIC_ERROR when there isn't a PHY
2932 * at the requested address, see @bugref{7346}.
2933 */
2934 MDIC = MDIC_READY | MDIC_ERROR;
2935 }
2936 else
2937 {
2938 /* Store the value */
2939 e1kRegWriteDefault(pThis, offset, index, value);
2940 STAM_COUNTER_INC(&pThis->StatPHYAccesses);
2941 /* Forward op to PHY */
2942 if (value & MDIC_OP_READ)
2943 SET_BITS(MDIC, DATA, Phy::readRegister(&pThis->phy, GET_BITS_V(value, MDIC, REG)));
2944 else
2945 Phy::writeRegister(&pThis->phy, GET_BITS_V(value, MDIC, REG), value & MDIC_DATA_MASK);
2946 /* Let software know that we are done */
2947 MDIC |= MDIC_READY;
2948 }
2949
2950 return VINF_SUCCESS;
2951}
2952
2953/**
2954 * Write handler for Interrupt Cause Read register.
2955 *
2956 * Bits corresponding to 1s in 'value' will be cleared in ICR register.
2957 *
2958 * @param pThis The device state structure.
2959 * @param offset Register offset in memory-mapped frame.
2960 * @param index Register index in register array.
2961 * @param value The value to store.
2962 * @param mask Used to implement partial writes (8 and 16-bit).
2963 * @thread EMT
2964 */
2965static int e1kRegWriteICR(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2966{
2967 ICR &= ~value;
2968
2969 RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index);
2970 return VINF_SUCCESS;
2971}
2972
2973/**
2974 * Read handler for Interrupt Cause Read register.
2975 *
2976 * Reading this register acknowledges all interrupts.
2977 *
2978 * @returns VBox status code.
2979 *
2980 * @param pThis The device state structure.
2981 * @param offset Register offset in memory-mapped frame.
2982 * @param index Register index in register array.
2983 * @param mask Not used.
2984 * @thread EMT
2985 */
2986static int e1kRegReadICR(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
2987{
2988 int rc = e1kCsEnter(pThis, VINF_IOM_R3_MMIO_READ);
2989 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2990 return rc;
2991
2992 uint32_t value = 0;
2993 rc = e1kRegReadDefault(pThis, offset, index, &value);
2994 if (RT_SUCCESS(rc))
2995 {
2996 if (value)
2997 {
2998 if (!pThis->fIntRaised)
2999 E1K_INC_ISTAT_CNT(pThis->uStatNoIntICR);
3000 /*
3001 * Not clearing ICR causes QNX to hang as it reads ICR in a loop
3002 * with disabled interrupts.
3003 */
3004 //if (IMS)
3005 if (1)
3006 {
3007 /*
3008 * Interrupts were enabled -- we are supposedly at the very
3009 * beginning of interrupt handler
3010 */
3011 E1kLogRel(("E1000: irq lowered, icr=0x%x\n", ICR));
3012 E1kLog(("%s e1kRegReadICR: Lowered IRQ (%08x)\n", pThis->szPrf, ICR));
3013 /* Clear all pending interrupts */
3014 ICR = 0;
3015 pThis->fIntRaised = false;
3016 /* Lower(0) INTA(0) */
3017 PDMDevHlpPCISetIrq(pThis->CTX_SUFF(pDevIns), 0, 0);
3018
3019 pThis->u64AckedAt = TMTimerGet(pThis->CTX_SUFF(pIntTimer));
3020 if (pThis->fIntMaskUsed)
3021 pThis->fDelayInts = true;
3022 }
3023 else
3024 {
3025 /*
3026 * Interrupts are disabled -- in windows guests ICR read is done
3027 * just before re-enabling interrupts
3028 */
3029 E1kLog(("%s e1kRegReadICR: Suppressing auto-clear due to disabled interrupts (%08x)\n", pThis->szPrf, ICR));
3030 }
3031 }
3032 *pu32Value = value;
3033 }
3034 e1kCsLeave(pThis);
3035
3036 return rc;
3037}
3038
3039/**
3040 * Write handler for Interrupt Cause Set register.
3041 *
3042 * Bits corresponding to 1s in 'value' will be set in ICR register.
3043 *
3044 * @param pThis The device state structure.
3045 * @param offset Register offset in memory-mapped frame.
3046 * @param index Register index in register array.
3047 * @param value The value to store.
3048 * @param mask Used to implement partial writes (8 and 16-bit).
3049 * @thread EMT
3050 */
3051static int e1kRegWriteICS(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3052{
3053 RT_NOREF_PV(offset); RT_NOREF_PV(index);
3054 E1K_INC_ISTAT_CNT(pThis->uStatIntICS);
3055 return e1kRaiseInterrupt(pThis, VINF_IOM_R3_MMIO_WRITE, value & g_aE1kRegMap[ICS_IDX].writable);
3056}
3057
3058/**
3059 * Write handler for Interrupt Mask Set register.
3060 *
3061 * Will trigger pending interrupts.
3062 *
3063 * @param pThis The device state structure.
3064 * @param offset Register offset in memory-mapped frame.
3065 * @param index Register index in register array.
3066 * @param value The value to store.
3067 * @param mask Used to implement partial writes (8 and 16-bit).
3068 * @thread EMT
3069 */
3070static int e1kRegWriteIMS(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3071{
3072 RT_NOREF_PV(offset); RT_NOREF_PV(index);
3073
3074 IMS |= value;
3075 E1kLogRel(("E1000: irq enabled, RDH=%x RDT=%x TDH=%x TDT=%x\n", RDH, RDT, TDH, TDT));
3076 E1kLog(("%s e1kRegWriteIMS: IRQ enabled\n", pThis->szPrf));
3077 /*
3078 * We cannot raise an interrupt here as it will occasionally cause an interrupt storm
3079 * in Windows guests (see @bugref{8624}, @bugref{5023}).
3080 */
3081 if ((ICR & IMS) && !pThis->fLocked)
3082 {
3083 E1K_INC_ISTAT_CNT(pThis->uStatIntIMS);
3084 e1kPostponeInterrupt(pThis, E1K_IMS_INT_DELAY_NS);
3085 }
3086
3087 return VINF_SUCCESS;
3088}
3089
3090/**
3091 * Write handler for Interrupt Mask Clear register.
3092 *
3093 * Bits corresponding to 1s in 'value' will be cleared in IMS register.
3094 *
3095 * @param pThis The device state structure.
3096 * @param offset Register offset in memory-mapped frame.
3097 * @param index Register index in register array.
3098 * @param value The value to store.
3099 * @param mask Used to implement partial writes (8 and 16-bit).
3100 * @thread EMT
3101 */
3102static int e1kRegWriteIMC(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3103{
3104 RT_NOREF_PV(offset); RT_NOREF_PV(index);
3105
3106 int rc = e1kCsEnter(pThis, VINF_IOM_R3_MMIO_WRITE);
3107 if (RT_UNLIKELY(rc != VINF_SUCCESS))
3108 return rc;
3109 if (pThis->fIntRaised)
3110 {
3111 /*
3112 * Technically we should reset fIntRaised in ICR read handler, but it will cause
3113 * Windows to freeze since it may receive an interrupt while still in the very beginning
3114 * of interrupt handler.
3115 */
3116 E1K_INC_ISTAT_CNT(pThis->uStatIntLower);
3117 STAM_COUNTER_INC(&pThis->StatIntsPrevented);
3118 E1kLogRel(("E1000: irq lowered (IMC), icr=0x%x\n", ICR));
3119 /* Lower(0) INTA(0) */
3120 PDMDevHlpPCISetIrq(pThis->CTX_SUFF(pDevIns), 0, 0);
3121 pThis->fIntRaised = false;
3122 E1kLog(("%s e1kRegWriteIMC: Lowered IRQ: ICR=%08x\n", pThis->szPrf, ICR));
3123 }
3124 IMS &= ~value;
3125 E1kLog(("%s e1kRegWriteIMC: IRQ disabled\n", pThis->szPrf));
3126 e1kCsLeave(pThis);
3127
3128 return VINF_SUCCESS;
3129}
3130
3131/**
3132 * Write handler for Receive Control register.
3133 *
3134 * @param pThis The device state structure.
3135 * @param offset Register offset in memory-mapped frame.
3136 * @param index Register index in register array.
3137 * @param value The value to store.
3138 * @param mask Used to implement partial writes (8 and 16-bit).
3139 * @thread EMT
3140 */
3141static int e1kRegWriteRCTL(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3142{
3143 /* Update promiscuous mode */
3144 bool fBecomePromiscous = !!(value & (RCTL_UPE | RCTL_MPE));
3145 if (fBecomePromiscous != !!( RCTL & (RCTL_UPE | RCTL_MPE)))
3146 {
3147 /* Promiscuity has changed, pass the knowledge on. */
3148#ifndef IN_RING3
3149 return VINF_IOM_R3_MMIO_WRITE;
3150#else
3151 if (pThis->pDrvR3)
3152 pThis->pDrvR3->pfnSetPromiscuousMode(pThis->pDrvR3, fBecomePromiscous);
3153#endif
3154 }
3155
3156 /* Adjust receive buffer size */
3157 unsigned cbRxBuf = 2048 >> GET_BITS_V(value, RCTL, BSIZE);
3158 if (value & RCTL_BSEX)
3159 cbRxBuf *= 16;
3160 if (cbRxBuf != pThis->u16RxBSize)
3161 E1kLog2(("%s e1kRegWriteRCTL: Setting receive buffer size to %d (old %d)\n",
3162 pThis->szPrf, cbRxBuf, pThis->u16RxBSize));
3163 pThis->u16RxBSize = cbRxBuf;
3164
3165 /* Update the register */
3166 e1kRegWriteDefault(pThis, offset, index, value);
3167
3168 return VINF_SUCCESS;
3169}
3170
3171/**
3172 * Write handler for Packet Buffer Allocation register.
3173 *
3174 * TXA = 64 - RXA.
3175 *
3176 * @param pThis The device state structure.
3177 * @param offset Register offset in memory-mapped frame.
3178 * @param index Register index in register array.
3179 * @param value The value to store.
3180 * @param mask Used to implement partial writes (8 and 16-bit).
3181 * @thread EMT
3182 */
3183static int e1kRegWritePBA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3184{
3185 e1kRegWriteDefault(pThis, offset, index, value);
3186 PBA_st->txa = 64 - PBA_st->rxa;
3187
3188 return VINF_SUCCESS;
3189}
3190
3191/**
3192 * Write handler for Receive Descriptor Tail register.
3193 *
3194 * @remarks Write into RDT forces switch to HC and signal to
3195 * e1kR3NetworkDown_WaitReceiveAvail().
3196 *
3197 * @returns VBox status code.
3198 *
3199 * @param pThis The device state structure.
3200 * @param offset Register offset in memory-mapped frame.
3201 * @param index Register index in register array.
3202 * @param value The value to store.
3203 * @param mask Used to implement partial writes (8 and 16-bit).
3204 * @thread EMT
3205 */
3206static int e1kRegWriteRDT(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3207{
3208#ifndef IN_RING3
3209 /* XXX */
3210// return VINF_IOM_R3_MMIO_WRITE;
3211#endif
3212 int rc = e1kCsRxEnter(pThis, VINF_IOM_R3_MMIO_WRITE);
3213 if (RT_LIKELY(rc == VINF_SUCCESS))
3214 {
3215 E1kLog(("%s e1kRegWriteRDT\n", pThis->szPrf));
3216#ifndef E1K_WITH_RXD_CACHE
3217 /*
3218 * Some drivers advance RDT too far, so that it equals RDH. This
3219 * somehow manages to work with real hardware but not with this
3220 * emulated device. We can work with these drivers if we just
3221 * write 1 less when we see a driver writing RDT equal to RDH,
3222 * see @bugref{7346}.
3223 */
3224 if (value == RDH)
3225 {
3226 if (RDH == 0)
3227 value = (RDLEN / sizeof(E1KRXDESC)) - 1;
3228 else
3229 value = RDH - 1;
3230 }
3231#endif /* !E1K_WITH_RXD_CACHE */
3232 rc = e1kRegWriteDefault(pThis, offset, index, value);
3233#ifdef E1K_WITH_RXD_CACHE
3234 /*
3235 * We need to fetch descriptors now as RDT may go whole circle
3236 * before we attempt to store a received packet. For example,
3237 * Intel's DOS drivers use 2 (!) RX descriptors with the total ring
3238 * size being only 8 descriptors! Note that we fetch descriptors
3239 * only when the cache is empty to reduce the number of memory reads
3240 * in case of frequent RDT writes. Don't fetch anything when the
3241 * receiver is disabled either as RDH, RDT, RDLEN can be in some
3242 * messed up state.
3243 * Note that despite the cache may seem empty, meaning that there are
3244 * no more available descriptors in it, it may still be used by RX
3245 * thread which has not yet written the last descriptor back but has
3246 * temporarily released the RX lock in order to write the packet body
3247 * to descriptor's buffer. At this point we still going to do prefetch
3248 * but it won't actually fetch anything if there are no unused slots in
3249 * our "empty" cache (nRxDFetched==E1K_RXD_CACHE_SIZE). We must not
3250 * reset the cache here even if it appears empty. It will be reset at
3251 * a later point in e1kRxDGet().
3252 */
3253 if (e1kRxDIsCacheEmpty(pThis) && (RCTL & RCTL_EN))
3254 e1kRxDPrefetch(pThis);
3255#endif /* E1K_WITH_RXD_CACHE */
3256 e1kCsRxLeave(pThis);
3257 if (RT_SUCCESS(rc))
3258 {
3259/** @todo bird: Use SUPSem* for this so we can signal it in ring-0 as well
3260 * without requiring any context switches. We should also check the
3261 * wait condition before bothering to queue the item as we're currently
3262 * queuing thousands of items per second here in a normal transmit
3263 * scenario. Expect performance changes when fixing this! */
3264#ifdef IN_RING3
3265 /* Signal that we have more receive descriptors available. */
3266 e1kWakeupReceive(pThis->CTX_SUFF(pDevIns));
3267#else
3268 PPDMQUEUEITEMCORE pItem = PDMQueueAlloc(pThis->CTX_SUFF(pCanRxQueue));
3269 if (pItem)
3270 PDMQueueInsert(pThis->CTX_SUFF(pCanRxQueue), pItem);
3271#endif
3272 }
3273 }
3274 return rc;
3275}
3276
3277/**
3278 * Write handler for Receive Delay Timer register.
3279 *
3280 * @param pThis The device state structure.
3281 * @param offset Register offset in memory-mapped frame.
3282 * @param index Register index in register array.
3283 * @param value The value to store.
3284 * @param mask Used to implement partial writes (8 and 16-bit).
3285 * @thread EMT
3286 */
3287static int e1kRegWriteRDTR(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3288{
3289 e1kRegWriteDefault(pThis, offset, index, value);
3290 if (value & RDTR_FPD)
3291 {
3292 /* Flush requested, cancel both timers and raise interrupt */
3293#ifdef E1K_USE_RX_TIMERS
3294 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRIDTimer));
3295 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRADTimer));
3296#endif
3297 E1K_INC_ISTAT_CNT(pThis->uStatIntRDTR);
3298 return e1kRaiseInterrupt(pThis, VINF_IOM_R3_MMIO_WRITE, ICR_RXT0);
3299 }
3300
3301 return VINF_SUCCESS;
3302}
3303
3304DECLINLINE(uint32_t) e1kGetTxLen(PE1KSTATE pThis)
3305{
3306 /**
3307 * Make sure TDT won't change during computation. EMT may modify TDT at
3308 * any moment.
3309 */
3310 uint32_t tdt = TDT;
3311 return (TDH>tdt ? TDLEN/sizeof(E1KTXDESC) : 0) + tdt - TDH;
3312}
3313
3314#ifdef IN_RING3
3315
3316# ifdef E1K_TX_DELAY
3317/**
3318 * Transmit Delay Timer handler.
3319 *
3320 * @remarks We only get here when the timer expires.
3321 *
3322 * @param pDevIns Pointer to device instance structure.
3323 * @param pTimer Pointer to the timer.
3324 * @param pvUser NULL.
3325 * @thread EMT
3326 */
3327static DECLCALLBACK(void) e1kTxDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3328{
3329 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3330 Assert(PDMCritSectIsOwner(&pThis->csTx));
3331
3332 E1K_INC_ISTAT_CNT(pThis->uStatTxDelayExp);
3333# ifdef E1K_INT_STATS
3334 uint64_t u64Elapsed = RTTimeNanoTS() - pThis->u64ArmedAt;
3335 if (u64Elapsed > pThis->uStatMaxTxDelay)
3336 pThis->uStatMaxTxDelay = u64Elapsed;
3337# endif
3338 int rc = e1kXmitPending(pThis, false /*fOnWorkerThread*/);
3339 AssertMsg(RT_SUCCESS(rc) || rc == VERR_TRY_AGAIN, ("%Rrc\n", rc));
3340}
3341# endif /* E1K_TX_DELAY */
3342
3343//# ifdef E1K_USE_TX_TIMERS
3344
3345/**
3346 * Transmit Interrupt Delay Timer handler.
3347 *
3348 * @remarks We only get here when the timer expires.
3349 *
3350 * @param pDevIns Pointer to device instance structure.
3351 * @param pTimer Pointer to the timer.
3352 * @param pvUser NULL.
3353 * @thread EMT
3354 */
3355static DECLCALLBACK(void) e1kTxIntDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3356{
3357 RT_NOREF(pDevIns);
3358 RT_NOREF(pTimer);
3359 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3360
3361 E1K_INC_ISTAT_CNT(pThis->uStatTID);
3362 /* Cancel absolute delay timer as we have already got attention */
3363# ifndef E1K_NO_TAD
3364 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTADTimer));
3365# endif
3366 e1kRaiseInterrupt(pThis, ICR_TXDW);
3367}
3368
3369/**
3370 * Transmit Absolute Delay Timer handler.
3371 *
3372 * @remarks We only get here when the timer expires.
3373 *
3374 * @param pDevIns Pointer to device instance structure.
3375 * @param pTimer Pointer to the timer.
3376 * @param pvUser NULL.
3377 * @thread EMT
3378 */
3379static DECLCALLBACK(void) e1kTxAbsDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3380{
3381 RT_NOREF(pDevIns);
3382 RT_NOREF(pTimer);
3383 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3384
3385 E1K_INC_ISTAT_CNT(pThis->uStatTAD);
3386 /* Cancel interrupt delay timer as we have already got attention */
3387 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTIDTimer));
3388 e1kRaiseInterrupt(pThis, ICR_TXDW);
3389}
3390
3391//# endif /* E1K_USE_TX_TIMERS */
3392# ifdef E1K_USE_RX_TIMERS
3393
3394/**
3395 * Receive Interrupt Delay Timer handler.
3396 *
3397 * @remarks We only get here when the timer expires.
3398 *
3399 * @param pDevIns Pointer to device instance structure.
3400 * @param pTimer Pointer to the timer.
3401 * @param pvUser NULL.
3402 * @thread EMT
3403 */
3404static DECLCALLBACK(void) e1kRxIntDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3405{
3406 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3407
3408 E1K_INC_ISTAT_CNT(pThis->uStatRID);
3409 /* Cancel absolute delay timer as we have already got attention */
3410 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRADTimer));
3411 e1kRaiseInterrupt(pThis, ICR_RXT0);
3412}
3413
3414/**
3415 * Receive Absolute Delay Timer handler.
3416 *
3417 * @remarks We only get here when the timer expires.
3418 *
3419 * @param pDevIns Pointer to device instance structure.
3420 * @param pTimer Pointer to the timer.
3421 * @param pvUser NULL.
3422 * @thread EMT
3423 */
3424static DECLCALLBACK(void) e1kRxAbsDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3425{
3426 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3427
3428 E1K_INC_ISTAT_CNT(pThis->uStatRAD);
3429 /* Cancel interrupt delay timer as we have already got attention */
3430 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRIDTimer));
3431 e1kRaiseInterrupt(pThis, ICR_RXT0);
3432}
3433
3434# endif /* E1K_USE_RX_TIMERS */
3435
3436/**
3437 * Late Interrupt Timer handler.
3438 *
3439 * @param pDevIns Pointer to device instance structure.
3440 * @param pTimer Pointer to the timer.
3441 * @param pvUser NULL.
3442 * @thread EMT
3443 */
3444static DECLCALLBACK(void) e1kLateIntTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3445{
3446 RT_NOREF(pDevIns, pTimer);
3447 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3448
3449 STAM_PROFILE_ADV_START(&pThis->StatLateIntTimer, a);
3450 STAM_COUNTER_INC(&pThis->StatLateInts);
3451 E1K_INC_ISTAT_CNT(pThis->uStatIntLate);
3452# if 0
3453 if (pThis->iStatIntLost > -100)
3454 pThis->iStatIntLost--;
3455# endif
3456 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, 0);
3457 STAM_PROFILE_ADV_STOP(&pThis->StatLateIntTimer, a);
3458}
3459
3460/**
3461 * Link Up Timer handler.
3462 *
3463 * @param pDevIns Pointer to device instance structure.
3464 * @param pTimer Pointer to the timer.
3465 * @param pvUser NULL.
3466 * @thread EMT
3467 */
3468static DECLCALLBACK(void) e1kLinkUpTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3469{
3470 RT_NOREF(pDevIns, pTimer);
3471 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3472
3473 /*
3474 * This can happen if we set the link status to down when the Link up timer was
3475 * already armed (shortly after e1kLoadDone() or when the cable was disconnected
3476 * and connect+disconnect the cable very quick. Moreover, 82543GC triggers LSC
3477 * on reset even if the cable is unplugged (see @bugref{8942}).
3478 */
3479 if (pThis->fCableConnected)
3480 {
3481 /* 82543GC does not have an internal PHY */
3482 if (pThis->eChip == E1K_CHIP_82543GC || (CTRL & CTRL_SLU))
3483 e1kR3LinkUp(pThis);
3484 }
3485#ifdef E1K_LSC_ON_RESET
3486 else if (pThis->eChip == E1K_CHIP_82543GC)
3487 e1kR3LinkDown(pThis);
3488#endif /* E1K_LSC_ON_RESET */
3489}
3490
3491#endif /* IN_RING3 */
3492
3493/**
3494 * Sets up the GSO context according to the TSE new context descriptor.
3495 *
3496 * @param pGso The GSO context to setup.
3497 * @param pCtx The context descriptor.
3498 */
3499DECLINLINE(void) e1kSetupGsoCtx(PPDMNETWORKGSO pGso, E1KTXCTX const *pCtx)
3500{
3501 pGso->u8Type = PDMNETWORKGSOTYPE_INVALID;
3502
3503 /*
3504 * See if the context descriptor describes something that could be TCP or
3505 * UDP over IPv[46].
3506 */
3507 /* Check the header ordering and spacing: 1. Ethernet, 2. IP, 3. TCP/UDP. */
3508 if (RT_UNLIKELY( pCtx->ip.u8CSS < sizeof(RTNETETHERHDR) ))
3509 {
3510 E1kLog(("e1kSetupGsoCtx: IPCSS=%#x\n", pCtx->ip.u8CSS));
3511 return;
3512 }
3513 if (RT_UNLIKELY( pCtx->tu.u8CSS < (size_t)pCtx->ip.u8CSS + (pCtx->dw2.fIP ? RTNETIPV4_MIN_LEN : RTNETIPV6_MIN_LEN) ))
3514 {
3515 E1kLog(("e1kSetupGsoCtx: TUCSS=%#x\n", pCtx->tu.u8CSS));
3516 return;
3517 }
3518 if (RT_UNLIKELY( pCtx->dw2.fTCP
3519 ? pCtx->dw3.u8HDRLEN < (size_t)pCtx->tu.u8CSS + RTNETTCP_MIN_LEN
3520 : pCtx->dw3.u8HDRLEN != (size_t)pCtx->tu.u8CSS + RTNETUDP_MIN_LEN ))
3521 {
3522 E1kLog(("e1kSetupGsoCtx: HDRLEN=%#x TCP=%d\n", pCtx->dw3.u8HDRLEN, pCtx->dw2.fTCP));
3523 return;
3524 }
3525
3526 /* The end of the TCP/UDP checksum should stop at the end of the packet or at least after the headers. */
3527 if (RT_UNLIKELY( pCtx->tu.u16CSE > 0 && pCtx->tu.u16CSE <= pCtx->dw3.u8HDRLEN ))
3528 {
3529 E1kLog(("e1kSetupGsoCtx: TUCSE=%#x HDRLEN=%#x\n", pCtx->tu.u16CSE, pCtx->dw3.u8HDRLEN));
3530 return;
3531 }
3532
3533 /* IPv4 checksum offset. */
3534 if (RT_UNLIKELY( pCtx->dw2.fIP && (size_t)pCtx->ip.u8CSO - pCtx->ip.u8CSS != RT_UOFFSETOF(RTNETIPV4, ip_sum) ))
3535 {
3536 E1kLog(("e1kSetupGsoCtx: IPCSO=%#x IPCSS=%#x\n", pCtx->ip.u8CSO, pCtx->ip.u8CSS));
3537 return;
3538 }
3539
3540 /* TCP/UDP checksum offsets. */
3541 if (RT_UNLIKELY( (size_t)pCtx->tu.u8CSO - pCtx->tu.u8CSS
3542 != ( pCtx->dw2.fTCP
3543 ? RT_UOFFSETOF(RTNETTCP, th_sum)
3544 : RT_UOFFSETOF(RTNETUDP, uh_sum) ) ))
3545 {
3546 E1kLog(("e1kSetupGsoCtx: TUCSO=%#x TUCSS=%#x TCP=%d\n", pCtx->ip.u8CSO, pCtx->ip.u8CSS, pCtx->dw2.fTCP));
3547 return;
3548 }
3549
3550 /*
3551 * Because of internal networking using a 16-bit size field for GSO context
3552 * plus frame, we have to make sure we don't exceed this.
3553 */
3554 if (RT_UNLIKELY( pCtx->dw3.u8HDRLEN + pCtx->dw2.u20PAYLEN > VBOX_MAX_GSO_SIZE ))
3555 {
3556 E1kLog(("e1kSetupGsoCtx: HDRLEN(=%#x) + PAYLEN(=%#x) = %#x, max is %#x\n",
3557 pCtx->dw3.u8HDRLEN, pCtx->dw2.u20PAYLEN, pCtx->dw3.u8HDRLEN + pCtx->dw2.u20PAYLEN, VBOX_MAX_GSO_SIZE));
3558 return;
3559 }
3560
3561 /*
3562 * We're good for now - we'll do more checks when seeing the data.
3563 * So, figure the type of offloading and setup the context.
3564 */
3565 if (pCtx->dw2.fIP)
3566 {
3567 if (pCtx->dw2.fTCP)
3568 {
3569 pGso->u8Type = PDMNETWORKGSOTYPE_IPV4_TCP;
3570 pGso->cbHdrsSeg = pCtx->dw3.u8HDRLEN;
3571 }
3572 else
3573 {
3574 pGso->u8Type = PDMNETWORKGSOTYPE_IPV4_UDP;
3575 pGso->cbHdrsSeg = pCtx->tu.u8CSS; /* IP header only */
3576 }
3577 /** @todo Detect IPv4-IPv6 tunneling (need test setup since linux doesn't do
3578 * this yet it seems)... */
3579 }
3580 else
3581 {
3582 pGso->cbHdrsSeg = pCtx->dw3.u8HDRLEN; /** @todo IPv6 UFO */
3583 if (pCtx->dw2.fTCP)
3584 pGso->u8Type = PDMNETWORKGSOTYPE_IPV6_TCP;
3585 else
3586 pGso->u8Type = PDMNETWORKGSOTYPE_IPV6_UDP;
3587 }
3588 pGso->offHdr1 = pCtx->ip.u8CSS;
3589 pGso->offHdr2 = pCtx->tu.u8CSS;
3590 pGso->cbHdrsTotal = pCtx->dw3.u8HDRLEN;
3591 pGso->cbMaxSeg = pCtx->dw3.u16MSS;
3592 Assert(PDMNetGsoIsValid(pGso, sizeof(*pGso), pGso->cbMaxSeg * 5));
3593 E1kLog2(("e1kSetupGsoCtx: mss=%#x hdr=%#x hdrseg=%#x hdr1=%#x hdr2=%#x %s\n",
3594 pGso->cbMaxSeg, pGso->cbHdrsTotal, pGso->cbHdrsSeg, pGso->offHdr1, pGso->offHdr2, PDMNetGsoTypeName((PDMNETWORKGSOTYPE)pGso->u8Type) ));
3595}
3596
3597/**
3598 * Checks if we can use GSO processing for the current TSE frame.
3599 *
3600 * @param pThis The device state structure.
3601 * @param pGso The GSO context.
3602 * @param pData The first data descriptor of the frame.
3603 * @param pCtx The TSO context descriptor.
3604 */
3605DECLINLINE(bool) e1kCanDoGso(PE1KSTATE pThis, PCPDMNETWORKGSO pGso, E1KTXDAT const *pData, E1KTXCTX const *pCtx)
3606{
3607 if (!pData->cmd.fTSE)
3608 {
3609 E1kLog2(("e1kCanDoGso: !TSE\n"));
3610 return false;
3611 }
3612 if (pData->cmd.fVLE) /** @todo VLAN tagging. */
3613 {
3614 E1kLog(("e1kCanDoGso: VLE\n"));
3615 return false;
3616 }
3617 if (RT_UNLIKELY(!pThis->fGSOEnabled))
3618 {
3619 E1kLog3(("e1kCanDoGso: GSO disabled via CFGM\n"));
3620 return false;
3621 }
3622
3623 switch ((PDMNETWORKGSOTYPE)pGso->u8Type)
3624 {
3625 case PDMNETWORKGSOTYPE_IPV4_TCP:
3626 case PDMNETWORKGSOTYPE_IPV4_UDP:
3627 if (!pData->dw3.fIXSM)
3628 {
3629 E1kLog(("e1kCanDoGso: !IXSM (IPv4)\n"));
3630 return false;
3631 }
3632 if (!pData->dw3.fTXSM)
3633 {
3634 E1kLog(("e1kCanDoGso: !TXSM (IPv4)\n"));
3635 return false;
3636 }
3637 /** @todo what more check should we perform here? Ethernet frame type? */
3638 E1kLog2(("e1kCanDoGso: OK, IPv4\n"));
3639 return true;
3640
3641 case PDMNETWORKGSOTYPE_IPV6_TCP:
3642 case PDMNETWORKGSOTYPE_IPV6_UDP:
3643 if (pData->dw3.fIXSM && pCtx->ip.u8CSO)
3644 {
3645 E1kLog(("e1kCanDoGso: IXSM (IPv6)\n"));
3646 return false;
3647 }
3648 if (!pData->dw3.fTXSM)
3649 {
3650 E1kLog(("e1kCanDoGso: TXSM (IPv6)\n"));
3651 return false;
3652 }
3653 /** @todo what more check should we perform here? Ethernet frame type? */
3654 E1kLog2(("e1kCanDoGso: OK, IPv4\n"));
3655 return true;
3656
3657 default:
3658 Assert(pGso->u8Type == PDMNETWORKGSOTYPE_INVALID);
3659 E1kLog2(("e1kCanDoGso: e1kSetupGsoCtx failed\n"));
3660 return false;
3661 }
3662}
3663
3664/**
3665 * Frees the current xmit buffer.
3666 *
3667 * @param pThis The device state structure.
3668 */
3669static void e1kXmitFreeBuf(PE1KSTATE pThis)
3670{
3671 PPDMSCATTERGATHER pSg = pThis->CTX_SUFF(pTxSg);
3672 if (pSg)
3673 {
3674 pThis->CTX_SUFF(pTxSg) = NULL;
3675
3676 if (pSg->pvAllocator != pThis)
3677 {
3678 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
3679 if (pDrv)
3680 pDrv->pfnFreeBuf(pDrv, pSg);
3681 }
3682 else
3683 {
3684 /* loopback */
3685 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3686 Assert(pSg->fFlags == (PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3));
3687 pSg->fFlags = 0;
3688 pSg->pvAllocator = NULL;
3689 }
3690 }
3691}
3692
3693#ifndef E1K_WITH_TXD_CACHE
3694/**
3695 * Allocates an xmit buffer.
3696 *
3697 * @returns See PDMINETWORKUP::pfnAllocBuf.
3698 * @param pThis The device state structure.
3699 * @param cbMin The minimum frame size.
3700 * @param fExactSize Whether cbMin is exact or if we have to max it
3701 * out to the max MTU size.
3702 * @param fGso Whether this is a GSO frame or not.
3703 */
3704DECLINLINE(int) e1kXmitAllocBuf(PE1KSTATE pThis, size_t cbMin, bool fExactSize, bool fGso)
3705{
3706 /* Adjust cbMin if necessary. */
3707 if (!fExactSize)
3708 cbMin = RT_MAX(cbMin, E1K_MAX_TX_PKT_SIZE);
3709
3710 /* Deal with existing buffer (descriptor screw up, reset, etc). */
3711 if (RT_UNLIKELY(pThis->CTX_SUFF(pTxSg)))
3712 e1kXmitFreeBuf(pThis);
3713 Assert(pThis->CTX_SUFF(pTxSg) == NULL);
3714
3715 /*
3716 * Allocate the buffer.
3717 */
3718 PPDMSCATTERGATHER pSg;
3719 if (RT_LIKELY(GET_BITS(RCTL, LBM) != RCTL_LBM_TCVR))
3720 {
3721 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
3722 if (RT_UNLIKELY(!pDrv))
3723 return VERR_NET_DOWN;
3724 int rc = pDrv->pfnAllocBuf(pDrv, cbMin, fGso ? &pThis->GsoCtx : NULL, &pSg);
3725 if (RT_FAILURE(rc))
3726 {
3727 /* Suspend TX as we are out of buffers atm */
3728 STATUS |= STATUS_TXOFF;
3729 return rc;
3730 }
3731 }
3732 else
3733 {
3734 /* Create a loopback using the fallback buffer and preallocated SG. */
3735 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3736 pSg = &pThis->uTxFallback.Sg;
3737 pSg->fFlags = PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3;
3738 pSg->cbUsed = 0;
3739 pSg->cbAvailable = 0;
3740 pSg->pvAllocator = pThis;
3741 pSg->pvUser = NULL; /* No GSO here. */
3742 pSg->cSegs = 1;
3743 pSg->aSegs[0].pvSeg = pThis->aTxPacketFallback;
3744 pSg->aSegs[0].cbSeg = sizeof(pThis->aTxPacketFallback);
3745 }
3746
3747 pThis->CTX_SUFF(pTxSg) = pSg;
3748 return VINF_SUCCESS;
3749}
3750#else /* E1K_WITH_TXD_CACHE */
3751/**
3752 * Allocates an xmit buffer.
3753 *
3754 * @returns See PDMINETWORKUP::pfnAllocBuf.
3755 * @param pThis The device state structure.
3756 * @param cbMin The minimum frame size.
3757 * @param fExactSize Whether cbMin is exact or if we have to max it
3758 * out to the max MTU size.
3759 * @param fGso Whether this is a GSO frame or not.
3760 */
3761DECLINLINE(int) e1kXmitAllocBuf(PE1KSTATE pThis, bool fGso)
3762{
3763 /* Deal with existing buffer (descriptor screw up, reset, etc). */
3764 if (RT_UNLIKELY(pThis->CTX_SUFF(pTxSg)))
3765 e1kXmitFreeBuf(pThis);
3766 Assert(pThis->CTX_SUFF(pTxSg) == NULL);
3767
3768 /*
3769 * Allocate the buffer.
3770 */
3771 PPDMSCATTERGATHER pSg;
3772 if (RT_LIKELY(GET_BITS(RCTL, LBM) != RCTL_LBM_TCVR))
3773 {
3774 if (pThis->cbTxAlloc == 0)
3775 {
3776 /* Zero packet, no need for the buffer */
3777 return VINF_SUCCESS;
3778 }
3779
3780 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
3781 if (RT_UNLIKELY(!pDrv))
3782 return VERR_NET_DOWN;
3783 int rc = pDrv->pfnAllocBuf(pDrv, pThis->cbTxAlloc, fGso ? &pThis->GsoCtx : NULL, &pSg);
3784 if (RT_FAILURE(rc))
3785 {
3786 /* Suspend TX as we are out of buffers atm */
3787 STATUS |= STATUS_TXOFF;
3788 return rc;
3789 }
3790 E1kLog3(("%s Allocated buffer for TX packet: cb=%u %s%s\n",
3791 pThis->szPrf, pThis->cbTxAlloc,
3792 pThis->fVTag ? "VLAN " : "",
3793 pThis->fGSO ? "GSO " : ""));
3794 }
3795 else
3796 {
3797 /* Create a loopback using the fallback buffer and preallocated SG. */
3798 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3799 pSg = &pThis->uTxFallback.Sg;
3800 pSg->fFlags = PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3;
3801 pSg->cbUsed = 0;
3802 pSg->cbAvailable = 0;
3803 pSg->pvAllocator = pThis;
3804 pSg->pvUser = NULL; /* No GSO here. */
3805 pSg->cSegs = 1;
3806 pSg->aSegs[0].pvSeg = pThis->aTxPacketFallback;
3807 pSg->aSegs[0].cbSeg = sizeof(pThis->aTxPacketFallback);
3808 }
3809 pThis->cbTxAlloc = 0;
3810
3811 pThis->CTX_SUFF(pTxSg) = pSg;
3812 return VINF_SUCCESS;
3813}
3814#endif /* E1K_WITH_TXD_CACHE */
3815
3816/**
3817 * Checks if it's a GSO buffer or not.
3818 *
3819 * @returns true / false.
3820 * @param pTxSg The scatter / gather buffer.
3821 */
3822DECLINLINE(bool) e1kXmitIsGsoBuf(PDMSCATTERGATHER const *pTxSg)
3823{
3824#if 0
3825 if (!pTxSg)
3826 E1kLog(("e1kXmitIsGsoBuf: pTxSG is NULL\n"));
3827 if (pTxSg && pTxSg->pvUser)
3828 E1kLog(("e1kXmitIsGsoBuf: pvUser is NULL\n"));
3829#endif
3830 return pTxSg && pTxSg->pvUser /* GSO indicator */;
3831}
3832
3833#ifndef E1K_WITH_TXD_CACHE
3834/**
3835 * Load transmit descriptor from guest memory.
3836 *
3837 * @param pThis The device state structure.
3838 * @param pDesc Pointer to descriptor union.
3839 * @param addr Physical address in guest context.
3840 * @thread E1000_TX
3841 */
3842DECLINLINE(void) e1kLoadDesc(PE1KSTATE pThis, E1KTXDESC *pDesc, RTGCPHYS addr)
3843{
3844 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), addr, pDesc, sizeof(E1KTXDESC));
3845}
3846#else /* E1K_WITH_TXD_CACHE */
3847/**
3848 * Load transmit descriptors from guest memory.
3849 *
3850 * We need two physical reads in case the tail wrapped around the end of TX
3851 * descriptor ring.
3852 *
3853 * @returns the actual number of descriptors fetched.
3854 * @param pThis The device state structure.
3855 * @param pDesc Pointer to descriptor union.
3856 * @param addr Physical address in guest context.
3857 * @thread E1000_TX
3858 */
3859DECLINLINE(unsigned) e1kTxDLoadMore(PE1KSTATE pThis)
3860{
3861 Assert(pThis->iTxDCurrent == 0);
3862 /* We've already loaded pThis->nTxDFetched descriptors past TDH. */
3863 unsigned nDescsAvailable = e1kGetTxLen(pThis) - pThis->nTxDFetched;
3864 unsigned nDescsToFetch = RT_MIN(nDescsAvailable, E1K_TXD_CACHE_SIZE - pThis->nTxDFetched);
3865 unsigned nDescsTotal = TDLEN / sizeof(E1KTXDESC);
3866 unsigned nFirstNotLoaded = (TDH + pThis->nTxDFetched) % nDescsTotal;
3867 unsigned nDescsInSingleRead = RT_MIN(nDescsToFetch, nDescsTotal - nFirstNotLoaded);
3868 E1kLog3(("%s e1kTxDLoadMore: nDescsAvailable=%u nDescsToFetch=%u "
3869 "nDescsTotal=%u nFirstNotLoaded=0x%x nDescsInSingleRead=%u\n",
3870 pThis->szPrf, nDescsAvailable, nDescsToFetch, nDescsTotal,
3871 nFirstNotLoaded, nDescsInSingleRead));
3872 if (nDescsToFetch == 0)
3873 return 0;
3874 E1KTXDESC* pFirstEmptyDesc = &pThis->aTxDescriptors[pThis->nTxDFetched];
3875 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns),
3876 ((uint64_t)TDBAH << 32) + TDBAL + nFirstNotLoaded * sizeof(E1KTXDESC),
3877 pFirstEmptyDesc, nDescsInSingleRead * sizeof(E1KTXDESC));
3878 E1kLog3(("%s Fetched %u TX descriptors at %08x%08x(0x%x), TDLEN=%08x, TDH=%08x, TDT=%08x\n",
3879 pThis->szPrf, nDescsInSingleRead,
3880 TDBAH, TDBAL + TDH * sizeof(E1KTXDESC),
3881 nFirstNotLoaded, TDLEN, TDH, TDT));
3882 if (nDescsToFetch > nDescsInSingleRead)
3883 {
3884 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns),
3885 ((uint64_t)TDBAH << 32) + TDBAL,
3886 pFirstEmptyDesc + nDescsInSingleRead,
3887 (nDescsToFetch - nDescsInSingleRead) * sizeof(E1KTXDESC));
3888 E1kLog3(("%s Fetched %u TX descriptors at %08x%08x\n",
3889 pThis->szPrf, nDescsToFetch - nDescsInSingleRead,
3890 TDBAH, TDBAL));
3891 }
3892 pThis->nTxDFetched += nDescsToFetch;
3893 return nDescsToFetch;
3894}
3895
3896/**
3897 * Load transmit descriptors from guest memory only if there are no loaded
3898 * descriptors.
3899 *
3900 * @returns true if there are descriptors in cache.
3901 * @param pThis The device state structure.
3902 * @param pDesc Pointer to descriptor union.
3903 * @param addr Physical address in guest context.
3904 * @thread E1000_TX
3905 */
3906DECLINLINE(bool) e1kTxDLazyLoad(PE1KSTATE pThis)
3907{
3908 if (pThis->nTxDFetched == 0)
3909 return e1kTxDLoadMore(pThis) != 0;
3910 return true;
3911}
3912#endif /* E1K_WITH_TXD_CACHE */
3913
3914/**
3915 * Write back transmit descriptor to guest memory.
3916 *
3917 * @param pThis The device state structure.
3918 * @param pDesc Pointer to descriptor union.
3919 * @param addr Physical address in guest context.
3920 * @thread E1000_TX
3921 */
3922DECLINLINE(void) e1kWriteBackDesc(PE1KSTATE pThis, E1KTXDESC *pDesc, RTGCPHYS addr)
3923{
3924 /* Only the last half of the descriptor has to be written back. */
3925 e1kPrintTDesc(pThis, pDesc, "^^^");
3926 PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns), addr, pDesc, sizeof(E1KTXDESC));
3927}
3928
3929/**
3930 * Transmit complete frame.
3931 *
3932 * @remarks We skip the FCS since we're not responsible for sending anything to
3933 * a real ethernet wire.
3934 *
3935 * @param pThis The device state structure.
3936 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
3937 * @thread E1000_TX
3938 */
3939static void e1kTransmitFrame(PE1KSTATE pThis, bool fOnWorkerThread)
3940{
3941 PPDMSCATTERGATHER pSg = pThis->CTX_SUFF(pTxSg);
3942 uint32_t cbFrame = pSg ? (uint32_t)pSg->cbUsed : 0;
3943 Assert(!pSg || pSg->cSegs == 1);
3944
3945 if (cbFrame > 70) /* unqualified guess */
3946 pThis->led.Asserted.s.fWriting = pThis->led.Actual.s.fWriting = 1;
3947
3948#ifdef E1K_INT_STATS
3949 if (cbFrame <= 1514)
3950 E1K_INC_ISTAT_CNT(pThis->uStatTx1514);
3951 else if (cbFrame <= 2962)
3952 E1K_INC_ISTAT_CNT(pThis->uStatTx2962);
3953 else if (cbFrame <= 4410)
3954 E1K_INC_ISTAT_CNT(pThis->uStatTx4410);
3955 else if (cbFrame <= 5858)
3956 E1K_INC_ISTAT_CNT(pThis->uStatTx5858);
3957 else if (cbFrame <= 7306)
3958 E1K_INC_ISTAT_CNT(pThis->uStatTx7306);
3959 else if (cbFrame <= 8754)
3960 E1K_INC_ISTAT_CNT(pThis->uStatTx8754);
3961 else if (cbFrame <= 16384)
3962 E1K_INC_ISTAT_CNT(pThis->uStatTx16384);
3963 else if (cbFrame <= 32768)
3964 E1K_INC_ISTAT_CNT(pThis->uStatTx32768);
3965 else
3966 E1K_INC_ISTAT_CNT(pThis->uStatTxLarge);
3967#endif /* E1K_INT_STATS */
3968
3969 /* Add VLAN tag */
3970 if (cbFrame > 12 && pThis->fVTag)
3971 {
3972 E1kLog3(("%s Inserting VLAN tag %08x\n",
3973 pThis->szPrf, RT_BE2H_U16(VET) | (RT_BE2H_U16(pThis->u16VTagTCI) << 16)));
3974 memmove((uint8_t*)pSg->aSegs[0].pvSeg + 16, (uint8_t*)pSg->aSegs[0].pvSeg + 12, cbFrame - 12);
3975 *((uint32_t*)pSg->aSegs[0].pvSeg + 3) = RT_BE2H_U16(VET) | (RT_BE2H_U16(pThis->u16VTagTCI) << 16);
3976 pSg->cbUsed += 4;
3977 cbFrame += 4;
3978 Assert(pSg->cbUsed == cbFrame);
3979 Assert(pSg->cbUsed <= pSg->cbAvailable);
3980 }
3981/* E1kLog2(("%s < < < Outgoing packet. Dump follows: > > >\n"
3982 "%.*Rhxd\n"
3983 "%s < < < < < < < < < < < < < End of dump > > > > > > > > > > > >\n",
3984 pThis->szPrf, cbFrame, pSg->aSegs[0].pvSeg, pThis->szPrf));*/
3985
3986 /* Update the stats */
3987 E1K_INC_CNT32(TPT);
3988 E1K_ADD_CNT64(TOTL, TOTH, cbFrame);
3989 E1K_INC_CNT32(GPTC);
3990 if (pSg && e1kIsBroadcast(pSg->aSegs[0].pvSeg))
3991 E1K_INC_CNT32(BPTC);
3992 else if (pSg && e1kIsMulticast(pSg->aSegs[0].pvSeg))
3993 E1K_INC_CNT32(MPTC);
3994 /* Update octet transmit counter */
3995 E1K_ADD_CNT64(GOTCL, GOTCH, cbFrame);
3996 if (pThis->CTX_SUFF(pDrv))
3997 STAM_REL_COUNTER_ADD(&pThis->StatTransmitBytes, cbFrame);
3998 if (cbFrame == 64)
3999 E1K_INC_CNT32(PTC64);
4000 else if (cbFrame < 128)
4001 E1K_INC_CNT32(PTC127);
4002 else if (cbFrame < 256)
4003 E1K_INC_CNT32(PTC255);
4004 else if (cbFrame < 512)
4005 E1K_INC_CNT32(PTC511);
4006 else if (cbFrame < 1024)
4007 E1K_INC_CNT32(PTC1023);
4008 else
4009 E1K_INC_CNT32(PTC1522);
4010
4011 E1K_INC_ISTAT_CNT(pThis->uStatTxFrm);
4012
4013 /*
4014 * Dump and send the packet.
4015 */
4016 int rc = VERR_NET_DOWN;
4017 if (pSg && pSg->pvAllocator != pThis)
4018 {
4019 e1kPacketDump(pThis, (uint8_t const *)pSg->aSegs[0].pvSeg, cbFrame, "--> Outgoing");
4020
4021 pThis->CTX_SUFF(pTxSg) = NULL;
4022 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
4023 if (pDrv)
4024 {
4025 /* Release critical section to avoid deadlock in CanReceive */
4026 //e1kCsLeave(pThis);
4027 STAM_PROFILE_START(&pThis->CTX_SUFF_Z(StatTransmitSend), a);
4028 rc = pDrv->pfnSendBuf(pDrv, pSg, fOnWorkerThread);
4029 STAM_PROFILE_STOP(&pThis->CTX_SUFF_Z(StatTransmitSend), a);
4030 //e1kCsEnter(pThis, RT_SRC_POS);
4031 }
4032 }
4033 else if (pSg)
4034 {
4035 Assert(pSg->aSegs[0].pvSeg == pThis->aTxPacketFallback);
4036 e1kPacketDump(pThis, (uint8_t const *)pSg->aSegs[0].pvSeg, cbFrame, "--> Loopback");
4037
4038 /** @todo do we actually need to check that we're in loopback mode here? */
4039 if (GET_BITS(RCTL, LBM) == RCTL_LBM_TCVR)
4040 {
4041 E1KRXDST status;
4042 RT_ZERO(status);
4043 status.fPIF = true;
4044 e1kHandleRxPacket(pThis, pSg->aSegs[0].pvSeg, cbFrame, status);
4045 rc = VINF_SUCCESS;
4046 }
4047 e1kXmitFreeBuf(pThis);
4048 }
4049 else
4050 rc = VERR_NET_DOWN;
4051 if (RT_FAILURE(rc))
4052 {
4053 E1kLogRel(("E1000: ERROR! pfnSend returned %Rrc\n", rc));
4054 /** @todo handle VERR_NET_DOWN and VERR_NET_NO_BUFFER_SPACE. Signal error ? */
4055 }
4056
4057 pThis->led.Actual.s.fWriting = 0;
4058}
4059
4060/**
4061 * Compute and write internet checksum (e1kCSum16) at the specified offset.
4062 *
4063 * @param pThis The device state structure.
4064 * @param pPkt Pointer to the packet.
4065 * @param u16PktLen Total length of the packet.
4066 * @param cso Offset in packet to write checksum at.
4067 * @param css Offset in packet to start computing
4068 * checksum from.
4069 * @param cse Offset in packet to stop computing
4070 * checksum at.
4071 * @thread E1000_TX
4072 */
4073static void e1kInsertChecksum(PE1KSTATE pThis, uint8_t *pPkt, uint16_t u16PktLen, uint8_t cso, uint8_t css, uint16_t cse)
4074{
4075 RT_NOREF1(pThis);
4076
4077 if (css >= u16PktLen)
4078 {
4079 E1kLog2(("%s css(%X) is greater than packet length-1(%X), checksum is not inserted\n",
4080 pThis->szPrf, cso, u16PktLen));
4081 return;
4082 }
4083
4084 if (cso >= u16PktLen - 1)
4085 {
4086 E1kLog2(("%s cso(%X) is greater than packet length-2(%X), checksum is not inserted\n",
4087 pThis->szPrf, cso, u16PktLen));
4088 return;
4089 }
4090
4091 if (cse == 0)
4092 cse = u16PktLen - 1;
4093 else if (cse < css)
4094 {
4095 E1kLog2(("%s css(%X) is greater than cse(%X), checksum is not inserted\n",
4096 pThis->szPrf, css, cse));
4097 return;
4098 }
4099
4100 uint16_t u16ChkSum = e1kCSum16(pPkt + css, cse - css + 1);
4101 E1kLog2(("%s Inserting csum: %04X at %02X, old value: %04X\n", pThis->szPrf,
4102 u16ChkSum, cso, *(uint16_t*)(pPkt + cso)));
4103 *(uint16_t*)(pPkt + cso) = u16ChkSum;
4104}
4105
4106/**
4107 * Add a part of descriptor's buffer to transmit frame.
4108 *
4109 * @remarks data.u64BufAddr is used unconditionally for both data
4110 * and legacy descriptors since it is identical to
4111 * legacy.u64BufAddr.
4112 *
4113 * @param pThis The device state structure.
4114 * @param pDesc Pointer to the descriptor to transmit.
4115 * @param u16Len Length of buffer to the end of segment.
4116 * @param fSend Force packet sending.
4117 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4118 * @thread E1000_TX
4119 */
4120#ifndef E1K_WITH_TXD_CACHE
4121static void e1kFallbackAddSegment(PE1KSTATE pThis, RTGCPHYS PhysAddr, uint16_t u16Len, bool fSend, bool fOnWorkerThread)
4122{
4123 /* TCP header being transmitted */
4124 struct E1kTcpHeader *pTcpHdr = (struct E1kTcpHeader *)
4125 (pThis->aTxPacketFallback + pThis->contextTSE.tu.u8CSS);
4126 /* IP header being transmitted */
4127 struct E1kIpHeader *pIpHdr = (struct E1kIpHeader *)
4128 (pThis->aTxPacketFallback + pThis->contextTSE.ip.u8CSS);
4129
4130 E1kLog3(("%s e1kFallbackAddSegment: Length=%x, remaining payload=%x, header=%x, send=%RTbool\n",
4131 pThis->szPrf, u16Len, pThis->u32PayRemain, pThis->u16HdrRemain, fSend));
4132 Assert(pThis->u32PayRemain + pThis->u16HdrRemain > 0);
4133
4134 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), PhysAddr,
4135 pThis->aTxPacketFallback + pThis->u16TxPktLen, u16Len);
4136 E1kLog3(("%s Dump of the segment:\n"
4137 "%.*Rhxd\n"
4138 "%s --- End of dump ---\n",
4139 pThis->szPrf, u16Len, pThis->aTxPacketFallback + pThis->u16TxPktLen, pThis->szPrf));
4140 pThis->u16TxPktLen += u16Len;
4141 E1kLog3(("%s e1kFallbackAddSegment: pThis->u16TxPktLen=%x\n",
4142 pThis->szPrf, pThis->u16TxPktLen));
4143 if (pThis->u16HdrRemain > 0)
4144 {
4145 /* The header was not complete, check if it is now */
4146 if (u16Len >= pThis->u16HdrRemain)
4147 {
4148 /* The rest is payload */
4149 u16Len -= pThis->u16HdrRemain;
4150 pThis->u16HdrRemain = 0;
4151 /* Save partial checksum and flags */
4152 pThis->u32SavedCsum = pTcpHdr->chksum;
4153 pThis->u16SavedFlags = pTcpHdr->hdrlen_flags;
4154 /* Clear FIN and PSH flags now and set them only in the last segment */
4155 pTcpHdr->hdrlen_flags &= ~htons(E1K_TCP_FIN | E1K_TCP_PSH);
4156 }
4157 else
4158 {
4159 /* Still not */
4160 pThis->u16HdrRemain -= u16Len;
4161 E1kLog3(("%s e1kFallbackAddSegment: Header is still incomplete, 0x%x bytes remain.\n",
4162 pThis->szPrf, pThis->u16HdrRemain));
4163 return;
4164 }
4165 }
4166
4167 pThis->u32PayRemain -= u16Len;
4168
4169 if (fSend)
4170 {
4171 /* Leave ethernet header intact */
4172 /* IP Total Length = payload + headers - ethernet header */
4173 pIpHdr->total_len = htons(pThis->u16TxPktLen - pThis->contextTSE.ip.u8CSS);
4174 E1kLog3(("%s e1kFallbackAddSegment: End of packet, pIpHdr->total_len=%x\n",
4175 pThis->szPrf, ntohs(pIpHdr->total_len)));
4176 /* Update IP Checksum */
4177 pIpHdr->chksum = 0;
4178 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4179 pThis->contextTSE.ip.u8CSO,
4180 pThis->contextTSE.ip.u8CSS,
4181 pThis->contextTSE.ip.u16CSE);
4182
4183 /* Update TCP flags */
4184 /* Restore original FIN and PSH flags for the last segment */
4185 if (pThis->u32PayRemain == 0)
4186 {
4187 pTcpHdr->hdrlen_flags = pThis->u16SavedFlags;
4188 E1K_INC_CNT32(TSCTC);
4189 }
4190 /* Add TCP length to partial pseudo header sum */
4191 uint32_t csum = pThis->u32SavedCsum
4192 + htons(pThis->u16TxPktLen - pThis->contextTSE.tu.u8CSS);
4193 while (csum >> 16)
4194 csum = (csum >> 16) + (csum & 0xFFFF);
4195 pTcpHdr->chksum = csum;
4196 /* Compute final checksum */
4197 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4198 pThis->contextTSE.tu.u8CSO,
4199 pThis->contextTSE.tu.u8CSS,
4200 pThis->contextTSE.tu.u16CSE);
4201
4202 /*
4203 * Transmit it. If we've use the SG already, allocate a new one before
4204 * we copy of the data.
4205 */
4206 if (!pThis->CTX_SUFF(pTxSg))
4207 e1kXmitAllocBuf(pThis, pThis->u16TxPktLen + (pThis->fVTag ? 4 : 0), true /*fExactSize*/, false /*fGso*/);
4208 if (pThis->CTX_SUFF(pTxSg))
4209 {
4210 Assert(pThis->u16TxPktLen <= pThis->CTX_SUFF(pTxSg)->cbAvailable);
4211 Assert(pThis->CTX_SUFF(pTxSg)->cSegs == 1);
4212 if (pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg != pThis->aTxPacketFallback)
4213 memcpy(pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->aTxPacketFallback, pThis->u16TxPktLen);
4214 pThis->CTX_SUFF(pTxSg)->cbUsed = pThis->u16TxPktLen;
4215 pThis->CTX_SUFF(pTxSg)->aSegs[0].cbSeg = pThis->u16TxPktLen;
4216 }
4217 e1kTransmitFrame(pThis, fOnWorkerThread);
4218
4219 /* Update Sequence Number */
4220 pTcpHdr->seqno = htonl(ntohl(pTcpHdr->seqno) + pThis->u16TxPktLen
4221 - pThis->contextTSE.dw3.u8HDRLEN);
4222 /* Increment IP identification */
4223 pIpHdr->ident = htons(ntohs(pIpHdr->ident) + 1);
4224 }
4225}
4226#else /* E1K_WITH_TXD_CACHE */
4227static int e1kFallbackAddSegment(PE1KSTATE pThis, RTGCPHYS PhysAddr, uint16_t u16Len, bool fSend, bool fOnWorkerThread)
4228{
4229 int rc = VINF_SUCCESS;
4230 /* TCP header being transmitted */
4231 struct E1kTcpHeader *pTcpHdr = (struct E1kTcpHeader *)
4232 (pThis->aTxPacketFallback + pThis->contextTSE.tu.u8CSS);
4233 /* IP header being transmitted */
4234 struct E1kIpHeader *pIpHdr = (struct E1kIpHeader *)
4235 (pThis->aTxPacketFallback + pThis->contextTSE.ip.u8CSS);
4236
4237 E1kLog3(("%s e1kFallbackAddSegment: Length=%x, remaining payload=%x, header=%x, send=%RTbool\n",
4238 pThis->szPrf, u16Len, pThis->u32PayRemain, pThis->u16HdrRemain, fSend));
4239 Assert(pThis->u32PayRemain + pThis->u16HdrRemain > 0);
4240
4241 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), PhysAddr,
4242 pThis->aTxPacketFallback + pThis->u16TxPktLen, u16Len);
4243 E1kLog3(("%s Dump of the segment:\n"
4244 "%.*Rhxd\n"
4245 "%s --- End of dump ---\n",
4246 pThis->szPrf, u16Len, pThis->aTxPacketFallback + pThis->u16TxPktLen, pThis->szPrf));
4247 pThis->u16TxPktLen += u16Len;
4248 E1kLog3(("%s e1kFallbackAddSegment: pThis->u16TxPktLen=%x\n",
4249 pThis->szPrf, pThis->u16TxPktLen));
4250 if (pThis->u16HdrRemain > 0)
4251 {
4252 /* The header was not complete, check if it is now */
4253 if (u16Len >= pThis->u16HdrRemain)
4254 {
4255 /* The rest is payload */
4256 u16Len -= pThis->u16HdrRemain;
4257 pThis->u16HdrRemain = 0;
4258 /* Save partial checksum and flags */
4259 pThis->u32SavedCsum = pTcpHdr->chksum;
4260 pThis->u16SavedFlags = pTcpHdr->hdrlen_flags;
4261 /* Clear FIN and PSH flags now and set them only in the last segment */
4262 pTcpHdr->hdrlen_flags &= ~htons(E1K_TCP_FIN | E1K_TCP_PSH);
4263 }
4264 else
4265 {
4266 /* Still not */
4267 pThis->u16HdrRemain -= u16Len;
4268 E1kLog3(("%s e1kFallbackAddSegment: Header is still incomplete, 0x%x bytes remain.\n",
4269 pThis->szPrf, pThis->u16HdrRemain));
4270 return rc;
4271 }
4272 }
4273
4274 pThis->u32PayRemain -= u16Len;
4275
4276 if (fSend)
4277 {
4278 /* Leave ethernet header intact */
4279 /* IP Total Length = payload + headers - ethernet header */
4280 pIpHdr->total_len = htons(pThis->u16TxPktLen - pThis->contextTSE.ip.u8CSS);
4281 E1kLog3(("%s e1kFallbackAddSegment: End of packet, pIpHdr->total_len=%x\n",
4282 pThis->szPrf, ntohs(pIpHdr->total_len)));
4283 /* Update IP Checksum */
4284 pIpHdr->chksum = 0;
4285 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4286 pThis->contextTSE.ip.u8CSO,
4287 pThis->contextTSE.ip.u8CSS,
4288 pThis->contextTSE.ip.u16CSE);
4289
4290 /* Update TCP flags */
4291 /* Restore original FIN and PSH flags for the last segment */
4292 if (pThis->u32PayRemain == 0)
4293 {
4294 pTcpHdr->hdrlen_flags = pThis->u16SavedFlags;
4295 E1K_INC_CNT32(TSCTC);
4296 }
4297 /* Add TCP length to partial pseudo header sum */
4298 uint32_t csum = pThis->u32SavedCsum
4299 + htons(pThis->u16TxPktLen - pThis->contextTSE.tu.u8CSS);
4300 while (csum >> 16)
4301 csum = (csum >> 16) + (csum & 0xFFFF);
4302 pTcpHdr->chksum = csum;
4303 /* Compute final checksum */
4304 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4305 pThis->contextTSE.tu.u8CSO,
4306 pThis->contextTSE.tu.u8CSS,
4307 pThis->contextTSE.tu.u16CSE);
4308
4309 /*
4310 * Transmit it.
4311 */
4312 if (pThis->CTX_SUFF(pTxSg))
4313 {
4314 Assert(pThis->u16TxPktLen <= pThis->CTX_SUFF(pTxSg)->cbAvailable);
4315 Assert(pThis->CTX_SUFF(pTxSg)->cSegs == 1);
4316 if (pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg != pThis->aTxPacketFallback)
4317 memcpy(pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->aTxPacketFallback, pThis->u16TxPktLen);
4318 pThis->CTX_SUFF(pTxSg)->cbUsed = pThis->u16TxPktLen;
4319 pThis->CTX_SUFF(pTxSg)->aSegs[0].cbSeg = pThis->u16TxPktLen;
4320 }
4321 e1kTransmitFrame(pThis, fOnWorkerThread);
4322
4323 /* Update Sequence Number */
4324 pTcpHdr->seqno = htonl(ntohl(pTcpHdr->seqno) + pThis->u16TxPktLen
4325 - pThis->contextTSE.dw3.u8HDRLEN);
4326 /* Increment IP identification */
4327 pIpHdr->ident = htons(ntohs(pIpHdr->ident) + 1);
4328
4329 /* Allocate new buffer for the next segment. */
4330 if (pThis->u32PayRemain)
4331 {
4332 pThis->cbTxAlloc = RT_MIN(pThis->u32PayRemain,
4333 pThis->contextTSE.dw3.u16MSS)
4334 + pThis->contextTSE.dw3.u8HDRLEN
4335 + (pThis->fVTag ? 4 : 0);
4336 rc = e1kXmitAllocBuf(pThis, false /* fGSO */);
4337 }
4338 }
4339
4340 return rc;
4341}
4342#endif /* E1K_WITH_TXD_CACHE */
4343
4344#ifndef E1K_WITH_TXD_CACHE
4345/**
4346 * TCP segmentation offloading fallback: Add descriptor's buffer to transmit
4347 * frame.
4348 *
4349 * We construct the frame in the fallback buffer first and the copy it to the SG
4350 * buffer before passing it down to the network driver code.
4351 *
4352 * @returns true if the frame should be transmitted, false if not.
4353 *
4354 * @param pThis The device state structure.
4355 * @param pDesc Pointer to the descriptor to transmit.
4356 * @param cbFragment Length of descriptor's buffer.
4357 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4358 * @thread E1000_TX
4359 */
4360static bool e1kFallbackAddToFrame(PE1KSTATE pThis, E1KTXDESC *pDesc, uint32_t cbFragment, bool fOnWorkerThread)
4361{
4362 PPDMSCATTERGATHER pTxSg = pThis->CTX_SUFF(pTxSg);
4363 Assert(e1kGetDescType(pDesc) == E1K_DTYP_DATA);
4364 Assert(pDesc->data.cmd.fTSE);
4365 Assert(!e1kXmitIsGsoBuf(pTxSg));
4366
4367 uint16_t u16MaxPktLen = pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw3.u16MSS;
4368 Assert(u16MaxPktLen != 0);
4369 Assert(u16MaxPktLen < E1K_MAX_TX_PKT_SIZE);
4370
4371 /*
4372 * Carve out segments.
4373 */
4374 do
4375 {
4376 /* Calculate how many bytes we have left in this TCP segment */
4377 uint32_t cb = u16MaxPktLen - pThis->u16TxPktLen;
4378 if (cb > cbFragment)
4379 {
4380 /* This descriptor fits completely into current segment */
4381 cb = cbFragment;
4382 e1kFallbackAddSegment(pThis, pDesc->data.u64BufAddr, cb, pDesc->data.cmd.fEOP /*fSend*/, fOnWorkerThread);
4383 }
4384 else
4385 {
4386 e1kFallbackAddSegment(pThis, pDesc->data.u64BufAddr, cb, true /*fSend*/, fOnWorkerThread);
4387 /*
4388 * Rewind the packet tail pointer to the beginning of payload,
4389 * so we continue writing right beyond the header.
4390 */
4391 pThis->u16TxPktLen = pThis->contextTSE.dw3.u8HDRLEN;
4392 }
4393
4394 pDesc->data.u64BufAddr += cb;
4395 cbFragment -= cb;
4396 } while (cbFragment > 0);
4397
4398 if (pDesc->data.cmd.fEOP)
4399 {
4400 /* End of packet, next segment will contain header. */
4401 if (pThis->u32PayRemain != 0)
4402 E1K_INC_CNT32(TSCTFC);
4403 pThis->u16TxPktLen = 0;
4404 e1kXmitFreeBuf(pThis);
4405 }
4406
4407 return false;
4408}
4409#else /* E1K_WITH_TXD_CACHE */
4410/**
4411 * TCP segmentation offloading fallback: Add descriptor's buffer to transmit
4412 * frame.
4413 *
4414 * We construct the frame in the fallback buffer first and the copy it to the SG
4415 * buffer before passing it down to the network driver code.
4416 *
4417 * @returns error code
4418 *
4419 * @param pThis The device state structure.
4420 * @param pDesc Pointer to the descriptor to transmit.
4421 * @param cbFragment Length of descriptor's buffer.
4422 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4423 * @thread E1000_TX
4424 */
4425static int e1kFallbackAddToFrame(PE1KSTATE pThis, E1KTXDESC *pDesc, bool fOnWorkerThread)
4426{
4427#ifdef VBOX_STRICT
4428 PPDMSCATTERGATHER pTxSg = pThis->CTX_SUFF(pTxSg);
4429 Assert(e1kGetDescType(pDesc) == E1K_DTYP_DATA);
4430 Assert(pDesc->data.cmd.fTSE);
4431 Assert(!e1kXmitIsGsoBuf(pTxSg));
4432#endif
4433
4434 uint16_t u16MaxPktLen = pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw3.u16MSS;
4435
4436 /*
4437 * Carve out segments.
4438 */
4439 int rc = VINF_SUCCESS;
4440 do
4441 {
4442 /* Calculate how many bytes we have left in this TCP segment */
4443 uint32_t cb = u16MaxPktLen - pThis->u16TxPktLen;
4444 if (cb > pDesc->data.cmd.u20DTALEN)
4445 {
4446 /* This descriptor fits completely into current segment */
4447 cb = pDesc->data.cmd.u20DTALEN;
4448 rc = e1kFallbackAddSegment(pThis, pDesc->data.u64BufAddr, cb, pDesc->data.cmd.fEOP /*fSend*/, fOnWorkerThread);
4449 }
4450 else
4451 {
4452 rc = e1kFallbackAddSegment(pThis, pDesc->data.u64BufAddr, cb, true /*fSend*/, fOnWorkerThread);
4453 /*
4454 * Rewind the packet tail pointer to the beginning of payload,
4455 * so we continue writing right beyond the header.
4456 */
4457 pThis->u16TxPktLen = pThis->contextTSE.dw3.u8HDRLEN;
4458 }
4459
4460 pDesc->data.u64BufAddr += cb;
4461 pDesc->data.cmd.u20DTALEN -= cb;
4462 } while (pDesc->data.cmd.u20DTALEN > 0 && RT_SUCCESS(rc));
4463
4464 if (pDesc->data.cmd.fEOP)
4465 {
4466 /* End of packet, next segment will contain header. */
4467 if (pThis->u32PayRemain != 0)
4468 E1K_INC_CNT32(TSCTFC);
4469 pThis->u16TxPktLen = 0;
4470 e1kXmitFreeBuf(pThis);
4471 }
4472
4473 return VINF_SUCCESS; /// @todo consider rc;
4474}
4475#endif /* E1K_WITH_TXD_CACHE */
4476
4477
4478/**
4479 * Add descriptor's buffer to transmit frame.
4480 *
4481 * This deals with GSO and normal frames, e1kFallbackAddToFrame deals with the
4482 * TSE frames we cannot handle as GSO.
4483 *
4484 * @returns true on success, false on failure.
4485 *
4486 * @param pThis The device state structure.
4487 * @param PhysAddr The physical address of the descriptor buffer.
4488 * @param cbFragment Length of descriptor's buffer.
4489 * @thread E1000_TX
4490 */
4491static bool e1kAddToFrame(PE1KSTATE pThis, RTGCPHYS PhysAddr, uint32_t cbFragment)
4492{
4493 PPDMSCATTERGATHER pTxSg = pThis->CTX_SUFF(pTxSg);
4494 bool const fGso = e1kXmitIsGsoBuf(pTxSg);
4495 uint32_t const cbNewPkt = cbFragment + pThis->u16TxPktLen;
4496
4497 if (RT_UNLIKELY( !fGso && cbNewPkt > E1K_MAX_TX_PKT_SIZE ))
4498 {
4499 E1kLog(("%s Transmit packet is too large: %u > %u(max)\n", pThis->szPrf, cbNewPkt, E1K_MAX_TX_PKT_SIZE));
4500 return false;
4501 }
4502 if (RT_UNLIKELY( fGso && cbNewPkt > pTxSg->cbAvailable ))
4503 {
4504 E1kLog(("%s Transmit packet is too large: %u > %u(max)/GSO\n", pThis->szPrf, cbNewPkt, pTxSg->cbAvailable));
4505 return false;
4506 }
4507
4508 if (RT_LIKELY(pTxSg))
4509 {
4510 Assert(pTxSg->cSegs == 1);
4511 Assert(pTxSg->cbUsed == pThis->u16TxPktLen);
4512
4513 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), PhysAddr,
4514 (uint8_t *)pTxSg->aSegs[0].pvSeg + pThis->u16TxPktLen, cbFragment);
4515
4516 pTxSg->cbUsed = cbNewPkt;
4517 }
4518 pThis->u16TxPktLen = cbNewPkt;
4519
4520 return true;
4521}
4522
4523
4524/**
4525 * Write the descriptor back to guest memory and notify the guest.
4526 *
4527 * @param pThis The device state structure.
4528 * @param pDesc Pointer to the descriptor have been transmitted.
4529 * @param addr Physical address of the descriptor in guest memory.
4530 * @thread E1000_TX
4531 */
4532static void e1kDescReport(PE1KSTATE pThis, E1KTXDESC *pDesc, RTGCPHYS addr)
4533{
4534 /*
4535 * We fake descriptor write-back bursting. Descriptors are written back as they are
4536 * processed.
4537 */
4538 /* Let's pretend we process descriptors. Write back with DD set. */
4539 /*
4540 * Prior to r71586 we tried to accomodate the case when write-back bursts
4541 * are enabled without actually implementing bursting by writing back all
4542 * descriptors, even the ones that do not have RS set. This caused kernel
4543 * panics with Linux SMP kernels, as the e1000 driver tried to free up skb
4544 * associated with written back descriptor if it happened to be a context
4545 * descriptor since context descriptors do not have skb associated to them.
4546 * Starting from r71586 we write back only the descriptors with RS set,
4547 * which is a little bit different from what the real hardware does in
4548 * case there is a chain of data descritors where some of them have RS set
4549 * and others do not. It is very uncommon scenario imho.
4550 * We need to check RPS as well since some legacy drivers use it instead of
4551 * RS even with newer cards.
4552 */
4553 if (pDesc->legacy.cmd.fRS || pDesc->legacy.cmd.fRPS)
4554 {
4555 pDesc->legacy.dw3.fDD = 1; /* Descriptor Done */
4556 e1kWriteBackDesc(pThis, pDesc, addr);
4557 if (pDesc->legacy.cmd.fEOP)
4558 {
4559//#ifdef E1K_USE_TX_TIMERS
4560 if (pThis->fTidEnabled && pDesc->legacy.cmd.fIDE)
4561 {
4562 E1K_INC_ISTAT_CNT(pThis->uStatTxIDE);
4563 //if (pThis->fIntRaised)
4564 //{
4565 // /* Interrupt is already pending, no need for timers */
4566 // ICR |= ICR_TXDW;
4567 //}
4568 //else {
4569 /* Arm the timer to fire in TIVD usec (discard .024) */
4570 e1kArmTimer(pThis, pThis->CTX_SUFF(pTIDTimer), TIDV);
4571# ifndef E1K_NO_TAD
4572 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
4573 E1kLog2(("%s Checking if TAD timer is running\n",
4574 pThis->szPrf));
4575 if (TADV != 0 && !TMTimerIsActive(pThis->CTX_SUFF(pTADTimer)))
4576 e1kArmTimer(pThis, pThis->CTX_SUFF(pTADTimer), TADV);
4577# endif /* E1K_NO_TAD */
4578 }
4579 else
4580 {
4581 if (pThis->fTidEnabled)
4582 {
4583 E1kLog2(("%s No IDE set, cancel TAD timer and raise interrupt\n",
4584 pThis->szPrf));
4585 /* Cancel both timers if armed and fire immediately. */
4586# ifndef E1K_NO_TAD
4587 TMTimerStop(pThis->CTX_SUFF(pTADTimer));
4588# endif
4589 TMTimerStop(pThis->CTX_SUFF(pTIDTimer));
4590 }
4591//#endif /* E1K_USE_TX_TIMERS */
4592 E1K_INC_ISTAT_CNT(pThis->uStatIntTx);
4593 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_TXDW);
4594//#ifdef E1K_USE_TX_TIMERS
4595 }
4596//#endif /* E1K_USE_TX_TIMERS */
4597 }
4598 }
4599 else
4600 {
4601 E1K_INC_ISTAT_CNT(pThis->uStatTxNoRS);
4602 }
4603}
4604
4605#ifndef E1K_WITH_TXD_CACHE
4606
4607/**
4608 * Process Transmit Descriptor.
4609 *
4610 * E1000 supports three types of transmit descriptors:
4611 * - legacy data descriptors of older format (context-less).
4612 * - data the same as legacy but providing new offloading capabilities.
4613 * - context sets up the context for following data descriptors.
4614 *
4615 * @param pThis The device state structure.
4616 * @param pDesc Pointer to descriptor union.
4617 * @param addr Physical address of descriptor in guest memory.
4618 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4619 * @thread E1000_TX
4620 */
4621static int e1kXmitDesc(PE1KSTATE pThis, E1KTXDESC *pDesc, RTGCPHYS addr, bool fOnWorkerThread)
4622{
4623 int rc = VINF_SUCCESS;
4624 uint32_t cbVTag = 0;
4625
4626 e1kPrintTDesc(pThis, pDesc, "vvv");
4627
4628//#ifdef E1K_USE_TX_TIMERS
4629 if (pThis->fTidEnabled)
4630 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTIDTimer));
4631//#endif /* E1K_USE_TX_TIMERS */
4632
4633 switch (e1kGetDescType(pDesc))
4634 {
4635 case E1K_DTYP_CONTEXT:
4636 if (pDesc->context.dw2.fTSE)
4637 {
4638 pThis->contextTSE = pDesc->context;
4639 pThis->u32PayRemain = pDesc->context.dw2.u20PAYLEN;
4640 pThis->u16HdrRemain = pDesc->context.dw3.u8HDRLEN;
4641 e1kSetupGsoCtx(&pThis->GsoCtx, &pDesc->context);
4642 STAM_COUNTER_INC(&pThis->StatTxDescCtxTSE);
4643 }
4644 else
4645 {
4646 pThis->contextNormal = pDesc->context;
4647 STAM_COUNTER_INC(&pThis->StatTxDescCtxNormal);
4648 }
4649 E1kLog2(("%s %s context updated: IP CSS=%02X, IP CSO=%02X, IP CSE=%04X"
4650 ", TU CSS=%02X, TU CSO=%02X, TU CSE=%04X\n", pThis->szPrf,
4651 pDesc->context.dw2.fTSE ? "TSE" : "Normal",
4652 pDesc->context.ip.u8CSS,
4653 pDesc->context.ip.u8CSO,
4654 pDesc->context.ip.u16CSE,
4655 pDesc->context.tu.u8CSS,
4656 pDesc->context.tu.u8CSO,
4657 pDesc->context.tu.u16CSE));
4658 E1K_INC_ISTAT_CNT(pThis->uStatDescCtx);
4659 e1kDescReport(pThis, pDesc, addr);
4660 break;
4661
4662 case E1K_DTYP_DATA:
4663 {
4664 if (pDesc->data.cmd.u20DTALEN == 0 || pDesc->data.u64BufAddr == 0)
4665 {
4666 E1kLog2(("% Empty data descriptor, skipped.\n", pThis->szPrf));
4667 /** @todo Same as legacy when !TSE. See below. */
4668 break;
4669 }
4670 STAM_COUNTER_INC(pDesc->data.cmd.fTSE?
4671 &pThis->StatTxDescTSEData:
4672 &pThis->StatTxDescData);
4673 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
4674 E1K_INC_ISTAT_CNT(pThis->uStatDescDat);
4675
4676 /*
4677 * The last descriptor of non-TSE packet must contain VLE flag.
4678 * TSE packets have VLE flag in the first descriptor. The later
4679 * case is taken care of a bit later when cbVTag gets assigned.
4680 *
4681 * 1) pDesc->data.cmd.fEOP && !pDesc->data.cmd.fTSE
4682 */
4683 if (pDesc->data.cmd.fEOP && !pDesc->data.cmd.fTSE)
4684 {
4685 pThis->fVTag = pDesc->data.cmd.fVLE;
4686 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
4687 }
4688 /*
4689 * First fragment: Allocate new buffer and save the IXSM and TXSM
4690 * packet options as these are only valid in the first fragment.
4691 */
4692 if (pThis->u16TxPktLen == 0)
4693 {
4694 pThis->fIPcsum = pDesc->data.dw3.fIXSM;
4695 pThis->fTCPcsum = pDesc->data.dw3.fTXSM;
4696 E1kLog2(("%s Saving checksum flags:%s%s; \n", pThis->szPrf,
4697 pThis->fIPcsum ? " IP" : "",
4698 pThis->fTCPcsum ? " TCP/UDP" : ""));
4699 if (pDesc->data.cmd.fTSE)
4700 {
4701 /* 2) pDesc->data.cmd.fTSE && pThis->u16TxPktLen == 0 */
4702 pThis->fVTag = pDesc->data.cmd.fVLE;
4703 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
4704 cbVTag = pThis->fVTag ? 4 : 0;
4705 }
4706 else if (pDesc->data.cmd.fEOP)
4707 cbVTag = pDesc->data.cmd.fVLE ? 4 : 0;
4708 else
4709 cbVTag = 4;
4710 E1kLog3(("%s About to allocate TX buffer: cbVTag=%u\n", pThis->szPrf, cbVTag));
4711 if (e1kCanDoGso(pThis, &pThis->GsoCtx, &pDesc->data, &pThis->contextTSE))
4712 rc = e1kXmitAllocBuf(pThis, pThis->contextTSE.dw2.u20PAYLEN + pThis->contextTSE.dw3.u8HDRLEN + cbVTag,
4713 true /*fExactSize*/, true /*fGso*/);
4714 else if (pDesc->data.cmd.fTSE)
4715 rc = e1kXmitAllocBuf(pThis, pThis->contextTSE.dw3.u16MSS + pThis->contextTSE.dw3.u8HDRLEN + cbVTag,
4716 pDesc->data.cmd.fTSE /*fExactSize*/, false /*fGso*/);
4717 else
4718 rc = e1kXmitAllocBuf(pThis, pDesc->data.cmd.u20DTALEN + cbVTag,
4719 pDesc->data.cmd.fEOP /*fExactSize*/, false /*fGso*/);
4720
4721 /**
4722 * @todo: Perhaps it is not that simple for GSO packets! We may
4723 * need to unwind some changes.
4724 */
4725 if (RT_FAILURE(rc))
4726 {
4727 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4728 break;
4729 }
4730 /** @todo Is there any way to indicating errors other than collisions? Like
4731 * VERR_NET_DOWN. */
4732 }
4733
4734 /*
4735 * Add the descriptor data to the frame. If the frame is complete,
4736 * transmit it and reset the u16TxPktLen field.
4737 */
4738 if (e1kXmitIsGsoBuf(pThis->CTX_SUFF(pTxSg)))
4739 {
4740 STAM_COUNTER_INC(&pThis->StatTxPathGSO);
4741 bool fRc = e1kAddToFrame(pThis, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4742 if (pDesc->data.cmd.fEOP)
4743 {
4744 if ( fRc
4745 && pThis->CTX_SUFF(pTxSg)
4746 && pThis->CTX_SUFF(pTxSg)->cbUsed == (size_t)pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN)
4747 {
4748 e1kTransmitFrame(pThis, fOnWorkerThread);
4749 E1K_INC_CNT32(TSCTC);
4750 }
4751 else
4752 {
4753 if (fRc)
4754 E1kLog(("%s bad GSO/TSE %p or %u < %u\n" , pThis->szPrf,
4755 pThis->CTX_SUFF(pTxSg), pThis->CTX_SUFF(pTxSg) ? pThis->CTX_SUFF(pTxSg)->cbUsed : 0,
4756 pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN));
4757 e1kXmitFreeBuf(pThis);
4758 E1K_INC_CNT32(TSCTFC);
4759 }
4760 pThis->u16TxPktLen = 0;
4761 }
4762 }
4763 else if (!pDesc->data.cmd.fTSE)
4764 {
4765 STAM_COUNTER_INC(&pThis->StatTxPathRegular);
4766 bool fRc = e1kAddToFrame(pThis, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4767 if (pDesc->data.cmd.fEOP)
4768 {
4769 if (fRc && pThis->CTX_SUFF(pTxSg))
4770 {
4771 Assert(pThis->CTX_SUFF(pTxSg)->cSegs == 1);
4772 if (pThis->fIPcsum)
4773 e1kInsertChecksum(pThis, (uint8_t *)pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
4774 pThis->contextNormal.ip.u8CSO,
4775 pThis->contextNormal.ip.u8CSS,
4776 pThis->contextNormal.ip.u16CSE);
4777 if (pThis->fTCPcsum)
4778 e1kInsertChecksum(pThis, (uint8_t *)pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
4779 pThis->contextNormal.tu.u8CSO,
4780 pThis->contextNormal.tu.u8CSS,
4781 pThis->contextNormal.tu.u16CSE);
4782 e1kTransmitFrame(pThis, fOnWorkerThread);
4783 }
4784 else
4785 e1kXmitFreeBuf(pThis);
4786 pThis->u16TxPktLen = 0;
4787 }
4788 }
4789 else
4790 {
4791 STAM_COUNTER_INC(&pThis->StatTxPathFallback);
4792 e1kFallbackAddToFrame(pThis, pDesc, pDesc->data.cmd.u20DTALEN, fOnWorkerThread);
4793 }
4794
4795 e1kDescReport(pThis, pDesc, addr);
4796 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4797 break;
4798 }
4799
4800 case E1K_DTYP_LEGACY:
4801 if (pDesc->legacy.cmd.u16Length == 0 || pDesc->legacy.u64BufAddr == 0)
4802 {
4803 E1kLog(("%s Empty legacy descriptor, skipped.\n", pThis->szPrf));
4804 /** @todo 3.3.3, Length/Buffer Address: RS set -> write DD when processing. */
4805 break;
4806 }
4807 STAM_COUNTER_INC(&pThis->StatTxDescLegacy);
4808 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
4809
4810 /* First fragment: allocate new buffer. */
4811 if (pThis->u16TxPktLen == 0)
4812 {
4813 if (pDesc->legacy.cmd.fEOP)
4814 cbVTag = pDesc->legacy.cmd.fVLE ? 4 : 0;
4815 else
4816 cbVTag = 4;
4817 E1kLog3(("%s About to allocate TX buffer: cbVTag=%u\n", pThis->szPrf, cbVTag));
4818 /** @todo reset status bits? */
4819 rc = e1kXmitAllocBuf(pThis, pDesc->legacy.cmd.u16Length + cbVTag, pDesc->legacy.cmd.fEOP, false /*fGso*/);
4820 if (RT_FAILURE(rc))
4821 {
4822 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4823 break;
4824 }
4825
4826 /** @todo Is there any way to indicating errors other than collisions? Like
4827 * VERR_NET_DOWN. */
4828 }
4829
4830 /* Add fragment to frame. */
4831 if (e1kAddToFrame(pThis, pDesc->data.u64BufAddr, pDesc->legacy.cmd.u16Length))
4832 {
4833 E1K_INC_ISTAT_CNT(pThis->uStatDescLeg);
4834
4835 /* Last fragment: Transmit and reset the packet storage counter. */
4836 if (pDesc->legacy.cmd.fEOP)
4837 {
4838 pThis->fVTag = pDesc->legacy.cmd.fVLE;
4839 pThis->u16VTagTCI = pDesc->legacy.dw3.u16Special;
4840 /** @todo Offload processing goes here. */
4841 e1kTransmitFrame(pThis, fOnWorkerThread);
4842 pThis->u16TxPktLen = 0;
4843 }
4844 }
4845 /* Last fragment + failure: free the buffer and reset the storage counter. */
4846 else if (pDesc->legacy.cmd.fEOP)
4847 {
4848 e1kXmitFreeBuf(pThis);
4849 pThis->u16TxPktLen = 0;
4850 }
4851
4852 e1kDescReport(pThis, pDesc, addr);
4853 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4854 break;
4855
4856 default:
4857 E1kLog(("%s ERROR Unsupported transmit descriptor type: 0x%04x\n",
4858 pThis->szPrf, e1kGetDescType(pDesc)));
4859 break;
4860 }
4861
4862 return rc;
4863}
4864
4865#else /* E1K_WITH_TXD_CACHE */
4866
4867/**
4868 * Process Transmit Descriptor.
4869 *
4870 * E1000 supports three types of transmit descriptors:
4871 * - legacy data descriptors of older format (context-less).
4872 * - data the same as legacy but providing new offloading capabilities.
4873 * - context sets up the context for following data descriptors.
4874 *
4875 * @param pThis The device state structure.
4876 * @param pDesc Pointer to descriptor union.
4877 * @param addr Physical address of descriptor in guest memory.
4878 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4879 * @param cbPacketSize Size of the packet as previously computed.
4880 * @thread E1000_TX
4881 */
4882static int e1kXmitDesc(PE1KSTATE pThis, E1KTXDESC *pDesc, RTGCPHYS addr,
4883 bool fOnWorkerThread)
4884{
4885 int rc = VINF_SUCCESS;
4886
4887 e1kPrintTDesc(pThis, pDesc, "vvv");
4888
4889//#ifdef E1K_USE_TX_TIMERS
4890 if (pThis->fTidEnabled)
4891 TMTimerStop(pThis->CTX_SUFF(pTIDTimer));
4892//#endif /* E1K_USE_TX_TIMERS */
4893
4894 switch (e1kGetDescType(pDesc))
4895 {
4896 case E1K_DTYP_CONTEXT:
4897 /* The caller have already updated the context */
4898 E1K_INC_ISTAT_CNT(pThis->uStatDescCtx);
4899 e1kDescReport(pThis, pDesc, addr);
4900 break;
4901
4902 case E1K_DTYP_DATA:
4903 {
4904 STAM_COUNTER_INC(pDesc->data.cmd.fTSE?
4905 &pThis->StatTxDescTSEData:
4906 &pThis->StatTxDescData);
4907 E1K_INC_ISTAT_CNT(pThis->uStatDescDat);
4908 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
4909 if (pDesc->data.cmd.u20DTALEN == 0 || pDesc->data.u64BufAddr == 0)
4910 {
4911 E1kLog2(("% Empty data descriptor, skipped.\n", pThis->szPrf));
4912 if (pDesc->data.cmd.fEOP)
4913 {
4914 e1kTransmitFrame(pThis, fOnWorkerThread);
4915 pThis->u16TxPktLen = 0;
4916 }
4917 }
4918 else
4919 {
4920 /*
4921 * Add the descriptor data to the frame. If the frame is complete,
4922 * transmit it and reset the u16TxPktLen field.
4923 */
4924 if (e1kXmitIsGsoBuf(pThis->CTX_SUFF(pTxSg)))
4925 {
4926 STAM_COUNTER_INC(&pThis->StatTxPathGSO);
4927 bool fRc = e1kAddToFrame(pThis, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4928 if (pDesc->data.cmd.fEOP)
4929 {
4930 if ( fRc
4931 && pThis->CTX_SUFF(pTxSg)
4932 && pThis->CTX_SUFF(pTxSg)->cbUsed == (size_t)pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN)
4933 {
4934 e1kTransmitFrame(pThis, fOnWorkerThread);
4935 E1K_INC_CNT32(TSCTC);
4936 }
4937 else
4938 {
4939 if (fRc)
4940 E1kLog(("%s bad GSO/TSE %p or %u < %u\n" , pThis->szPrf,
4941 pThis->CTX_SUFF(pTxSg), pThis->CTX_SUFF(pTxSg) ? pThis->CTX_SUFF(pTxSg)->cbUsed : 0,
4942 pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN));
4943 e1kXmitFreeBuf(pThis);
4944 E1K_INC_CNT32(TSCTFC);
4945 }
4946 pThis->u16TxPktLen = 0;
4947 }
4948 }
4949 else if (!pDesc->data.cmd.fTSE)
4950 {
4951 STAM_COUNTER_INC(&pThis->StatTxPathRegular);
4952 bool fRc = e1kAddToFrame(pThis, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4953 if (pDesc->data.cmd.fEOP)
4954 {
4955 if (fRc && pThis->CTX_SUFF(pTxSg))
4956 {
4957 Assert(pThis->CTX_SUFF(pTxSg)->cSegs == 1);
4958 if (pThis->fIPcsum)
4959 e1kInsertChecksum(pThis, (uint8_t *)pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
4960 pThis->contextNormal.ip.u8CSO,
4961 pThis->contextNormal.ip.u8CSS,
4962 pThis->contextNormal.ip.u16CSE);
4963 if (pThis->fTCPcsum)
4964 e1kInsertChecksum(pThis, (uint8_t *)pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
4965 pThis->contextNormal.tu.u8CSO,
4966 pThis->contextNormal.tu.u8CSS,
4967 pThis->contextNormal.tu.u16CSE);
4968 e1kTransmitFrame(pThis, fOnWorkerThread);
4969 }
4970 else
4971 e1kXmitFreeBuf(pThis);
4972 pThis->u16TxPktLen = 0;
4973 }
4974 }
4975 else
4976 {
4977 STAM_COUNTER_INC(&pThis->StatTxPathFallback);
4978 rc = e1kFallbackAddToFrame(pThis, pDesc, fOnWorkerThread);
4979 }
4980 }
4981 e1kDescReport(pThis, pDesc, addr);
4982 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4983 break;
4984 }
4985
4986 case E1K_DTYP_LEGACY:
4987 STAM_COUNTER_INC(&pThis->StatTxDescLegacy);
4988 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
4989 if (pDesc->legacy.cmd.u16Length == 0 || pDesc->legacy.u64BufAddr == 0)
4990 {
4991 E1kLog(("%s Empty legacy descriptor, skipped.\n", pThis->szPrf));
4992 }
4993 else
4994 {
4995 /* Add fragment to frame. */
4996 if (e1kAddToFrame(pThis, pDesc->data.u64BufAddr, pDesc->legacy.cmd.u16Length))
4997 {
4998 E1K_INC_ISTAT_CNT(pThis->uStatDescLeg);
4999
5000 /* Last fragment: Transmit and reset the packet storage counter. */
5001 if (pDesc->legacy.cmd.fEOP)
5002 {
5003 if (pDesc->legacy.cmd.fIC)
5004 {
5005 e1kInsertChecksum(pThis,
5006 (uint8_t *)pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg,
5007 pThis->u16TxPktLen,
5008 pDesc->legacy.cmd.u8CSO,
5009 pDesc->legacy.dw3.u8CSS,
5010 0);
5011 }
5012 e1kTransmitFrame(pThis, fOnWorkerThread);
5013 pThis->u16TxPktLen = 0;
5014 }
5015 }
5016 /* Last fragment + failure: free the buffer and reset the storage counter. */
5017 else if (pDesc->legacy.cmd.fEOP)
5018 {
5019 e1kXmitFreeBuf(pThis);
5020 pThis->u16TxPktLen = 0;
5021 }
5022 }
5023 e1kDescReport(pThis, pDesc, addr);
5024 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5025 break;
5026
5027 default:
5028 E1kLog(("%s ERROR Unsupported transmit descriptor type: 0x%04x\n",
5029 pThis->szPrf, e1kGetDescType(pDesc)));
5030 break;
5031 }
5032
5033 return rc;
5034}
5035
5036DECLINLINE(void) e1kUpdateTxContext(PE1KSTATE pThis, E1KTXDESC *pDesc)
5037{
5038 if (pDesc->context.dw2.fTSE)
5039 {
5040 pThis->contextTSE = pDesc->context;
5041 uint32_t cbMaxSegmentSize = pThis->contextTSE.dw3.u16MSS + pThis->contextTSE.dw3.u8HDRLEN + 4; /*VTAG*/
5042 if (RT_UNLIKELY(cbMaxSegmentSize > E1K_MAX_TX_PKT_SIZE))
5043 {
5044 pThis->contextTSE.dw3.u16MSS = E1K_MAX_TX_PKT_SIZE - pThis->contextTSE.dw3.u8HDRLEN - 4; /*VTAG*/
5045 LogRelMax(10, ("%s: Transmit packet is too large: %u > %u(max). Adjusted MSS to %u.\n",
5046 pThis->szPrf, cbMaxSegmentSize, E1K_MAX_TX_PKT_SIZE, pThis->contextTSE.dw3.u16MSS));
5047 }
5048 pThis->u32PayRemain = pThis->contextTSE.dw2.u20PAYLEN;
5049 pThis->u16HdrRemain = pThis->contextTSE.dw3.u8HDRLEN;
5050 e1kSetupGsoCtx(&pThis->GsoCtx, &pThis->contextTSE);
5051 STAM_COUNTER_INC(&pThis->StatTxDescCtxTSE);
5052 }
5053 else
5054 {
5055 pThis->contextNormal = pDesc->context;
5056 STAM_COUNTER_INC(&pThis->StatTxDescCtxNormal);
5057 }
5058 E1kLog2(("%s %s context updated: IP CSS=%02X, IP CSO=%02X, IP CSE=%04X"
5059 ", TU CSS=%02X, TU CSO=%02X, TU CSE=%04X\n", pThis->szPrf,
5060 pDesc->context.dw2.fTSE ? "TSE" : "Normal",
5061 pDesc->context.ip.u8CSS,
5062 pDesc->context.ip.u8CSO,
5063 pDesc->context.ip.u16CSE,
5064 pDesc->context.tu.u8CSS,
5065 pDesc->context.tu.u8CSO,
5066 pDesc->context.tu.u16CSE));
5067}
5068
5069static bool e1kLocateTxPacket(PE1KSTATE pThis)
5070{
5071 LogFlow(("%s e1kLocateTxPacket: ENTER cbTxAlloc=%d\n",
5072 pThis->szPrf, pThis->cbTxAlloc));
5073 /* Check if we have located the packet already. */
5074 if (pThis->cbTxAlloc)
5075 {
5076 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d\n",
5077 pThis->szPrf, pThis->cbTxAlloc));
5078 return true;
5079 }
5080
5081 bool fTSE = false;
5082 uint32_t cbPacket = 0;
5083
5084 for (int i = pThis->iTxDCurrent; i < pThis->nTxDFetched; ++i)
5085 {
5086 E1KTXDESC *pDesc = &pThis->aTxDescriptors[i];
5087 switch (e1kGetDescType(pDesc))
5088 {
5089 case E1K_DTYP_CONTEXT:
5090 e1kUpdateTxContext(pThis, pDesc);
5091 continue;
5092 case E1K_DTYP_LEGACY:
5093 /* Skip empty descriptors. */
5094 if (!pDesc->legacy.u64BufAddr || !pDesc->legacy.cmd.u16Length)
5095 break;
5096 cbPacket += pDesc->legacy.cmd.u16Length;
5097 pThis->fGSO = false;
5098 break;
5099 case E1K_DTYP_DATA:
5100 /* Skip empty descriptors. */
5101 if (!pDesc->data.u64BufAddr || !pDesc->data.cmd.u20DTALEN)
5102 break;
5103 if (cbPacket == 0)
5104 {
5105 /*
5106 * The first fragment: save IXSM and TXSM options
5107 * as these are only valid in the first fragment.
5108 */
5109 pThis->fIPcsum = pDesc->data.dw3.fIXSM;
5110 pThis->fTCPcsum = pDesc->data.dw3.fTXSM;
5111 fTSE = pDesc->data.cmd.fTSE;
5112 /*
5113 * TSE descriptors have VLE bit properly set in
5114 * the first fragment.
5115 */
5116 if (fTSE)
5117 {
5118 pThis->fVTag = pDesc->data.cmd.fVLE;
5119 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
5120 }
5121 pThis->fGSO = e1kCanDoGso(pThis, &pThis->GsoCtx, &pDesc->data, &pThis->contextTSE);
5122 }
5123 cbPacket += pDesc->data.cmd.u20DTALEN;
5124 break;
5125 default:
5126 AssertMsgFailed(("Impossible descriptor type!"));
5127 }
5128 if (pDesc->legacy.cmd.fEOP)
5129 {
5130 /*
5131 * Non-TSE descriptors have VLE bit properly set in
5132 * the last fragment.
5133 */
5134 if (!fTSE)
5135 {
5136 pThis->fVTag = pDesc->data.cmd.fVLE;
5137 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
5138 }
5139 /*
5140 * Compute the required buffer size. If we cannot do GSO but still
5141 * have to do segmentation we allocate the first segment only.
5142 */
5143 pThis->cbTxAlloc = (!fTSE || pThis->fGSO) ?
5144 cbPacket :
5145 RT_MIN(cbPacket, pThis->contextTSE.dw3.u16MSS + pThis->contextTSE.dw3.u8HDRLEN);
5146 if (pThis->fVTag)
5147 pThis->cbTxAlloc += 4;
5148 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d\n",
5149 pThis->szPrf, pThis->cbTxAlloc));
5150 return true;
5151 }
5152 }
5153
5154 if (cbPacket == 0 && pThis->nTxDFetched - pThis->iTxDCurrent > 0)
5155 {
5156 /* All descriptors were empty, we need to process them as a dummy packet */
5157 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d, zero packet!\n",
5158 pThis->szPrf, pThis->cbTxAlloc));
5159 return true;
5160 }
5161 LogFlow(("%s e1kLocateTxPacket: RET false cbTxAlloc=%d\n",
5162 pThis->szPrf, pThis->cbTxAlloc));
5163 return false;
5164}
5165
5166static int e1kXmitPacket(PE1KSTATE pThis, bool fOnWorkerThread)
5167{
5168 int rc = VINF_SUCCESS;
5169
5170 LogFlow(("%s e1kXmitPacket: ENTER current=%d fetched=%d\n",
5171 pThis->szPrf, pThis->iTxDCurrent, pThis->nTxDFetched));
5172
5173 while (pThis->iTxDCurrent < pThis->nTxDFetched)
5174 {
5175 E1KTXDESC *pDesc = &pThis->aTxDescriptors[pThis->iTxDCurrent];
5176 E1kLog3(("%s About to process new TX descriptor at %08x%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
5177 pThis->szPrf, TDBAH, TDBAL + TDH * sizeof(E1KTXDESC), TDLEN, TDH, TDT));
5178 rc = e1kXmitDesc(pThis, pDesc, e1kDescAddr(TDBAH, TDBAL, TDH), fOnWorkerThread);
5179 if (RT_FAILURE(rc))
5180 break;
5181 if (++TDH * sizeof(E1KTXDESC) >= TDLEN)
5182 TDH = 0;
5183 uint32_t uLowThreshold = GET_BITS(TXDCTL, LWTHRESH)*8;
5184 if (uLowThreshold != 0 && e1kGetTxLen(pThis) <= uLowThreshold)
5185 {
5186 E1kLog2(("%s Low on transmit descriptors, raise ICR.TXD_LOW, len=%x thresh=%x\n",
5187 pThis->szPrf, e1kGetTxLen(pThis), GET_BITS(TXDCTL, LWTHRESH)*8));
5188 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_TXD_LOW);
5189 }
5190 ++pThis->iTxDCurrent;
5191 if (e1kGetDescType(pDesc) != E1K_DTYP_CONTEXT && pDesc->legacy.cmd.fEOP)
5192 break;
5193 }
5194
5195 LogFlow(("%s e1kXmitPacket: RET %Rrc current=%d fetched=%d\n",
5196 pThis->szPrf, rc, pThis->iTxDCurrent, pThis->nTxDFetched));
5197 return rc;
5198}
5199
5200#endif /* E1K_WITH_TXD_CACHE */
5201#ifndef E1K_WITH_TXD_CACHE
5202
5203/**
5204 * Transmit pending descriptors.
5205 *
5206 * @returns VBox status code. VERR_TRY_AGAIN is returned if we're busy.
5207 *
5208 * @param pThis The E1000 state.
5209 * @param fOnWorkerThread Whether we're on a worker thread or on an EMT.
5210 */
5211static int e1kXmitPending(PE1KSTATE pThis, bool fOnWorkerThread)
5212{
5213 int rc = VINF_SUCCESS;
5214
5215 /* Check if transmitter is enabled. */
5216 if (!(TCTL & TCTL_EN))
5217 return VINF_SUCCESS;
5218 /*
5219 * Grab the xmit lock of the driver as well as the E1K device state.
5220 */
5221 rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
5222 if (RT_LIKELY(rc == VINF_SUCCESS))
5223 {
5224 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
5225 if (pDrv)
5226 {
5227 rc = pDrv->pfnBeginXmit(pDrv, fOnWorkerThread);
5228 if (RT_FAILURE(rc))
5229 {
5230 e1kCsTxLeave(pThis);
5231 return rc;
5232 }
5233 }
5234 /*
5235 * Process all pending descriptors.
5236 * Note! Do not process descriptors in locked state
5237 */
5238 while (TDH != TDT && !pThis->fLocked)
5239 {
5240 E1KTXDESC desc;
5241 E1kLog3(("%s About to process new TX descriptor at %08x%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
5242 pThis->szPrf, TDBAH, TDBAL + TDH * sizeof(desc), TDLEN, TDH, TDT));
5243
5244 e1kLoadDesc(pThis, &desc, ((uint64_t)TDBAH << 32) + TDBAL + TDH * sizeof(desc));
5245 rc = e1kXmitDesc(pThis, &desc, e1kDescAddr(TDBAH, TDBAL, TDH), fOnWorkerThread);
5246 /* If we failed to transmit descriptor we will try it again later */
5247 if (RT_FAILURE(rc))
5248 break;
5249 if (++TDH * sizeof(desc) >= TDLEN)
5250 TDH = 0;
5251
5252 if (e1kGetTxLen(pThis) <= GET_BITS(TXDCTL, LWTHRESH)*8)
5253 {
5254 E1kLog2(("%s Low on transmit descriptors, raise ICR.TXD_LOW, len=%x thresh=%x\n",
5255 pThis->szPrf, e1kGetTxLen(pThis), GET_BITS(TXDCTL, LWTHRESH)*8));
5256 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_TXD_LOW);
5257 }
5258
5259 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5260 }
5261
5262 /// @todo uncomment: pThis->uStatIntTXQE++;
5263 /// @todo uncomment: e1kRaiseInterrupt(pThis, ICR_TXQE);
5264 /*
5265 * Release the lock.
5266 */
5267 if (pDrv)
5268 pDrv->pfnEndXmit(pDrv);
5269 e1kCsTxLeave(pThis);
5270 }
5271
5272 return rc;
5273}
5274
5275#else /* E1K_WITH_TXD_CACHE */
5276
5277static void e1kDumpTxDCache(PE1KSTATE pThis)
5278{
5279 unsigned i, cDescs = TDLEN / sizeof(E1KTXDESC);
5280 uint32_t tdh = TDH;
5281 LogRel(("E1000: -- Transmit Descriptors (%d total) --\n", cDescs));
5282 for (i = 0; i < cDescs; ++i)
5283 {
5284 E1KTXDESC desc;
5285 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), e1kDescAddr(TDBAH, TDBAL, i),
5286 &desc, sizeof(desc));
5287 if (i == tdh)
5288 LogRel(("E1000: >>> "));
5289 LogRel(("E1000: %RGp: %R[e1ktxd]\n", e1kDescAddr(TDBAH, TDBAL, i), &desc));
5290 }
5291 LogRel(("E1000: -- Transmit Descriptors in Cache (at %d (TDH %d)/ fetched %d / max %d) --\n",
5292 pThis->iTxDCurrent, TDH, pThis->nTxDFetched, E1K_TXD_CACHE_SIZE));
5293 if (tdh > pThis->iTxDCurrent)
5294 tdh -= pThis->iTxDCurrent;
5295 else
5296 tdh = cDescs + tdh - pThis->iTxDCurrent;
5297 for (i = 0; i < pThis->nTxDFetched; ++i)
5298 {
5299 if (i == pThis->iTxDCurrent)
5300 LogRel(("E1000: >>> "));
5301 LogRel(("E1000: %RGp: %R[e1ktxd]\n", e1kDescAddr(TDBAH, TDBAL, tdh++ % cDescs), &pThis->aTxDescriptors[i]));
5302 }
5303}
5304
5305/**
5306 * Transmit pending descriptors.
5307 *
5308 * @returns VBox status code. VERR_TRY_AGAIN is returned if we're busy.
5309 *
5310 * @param pThis The E1000 state.
5311 * @param fOnWorkerThread Whether we're on a worker thread or on an EMT.
5312 */
5313static int e1kXmitPending(PE1KSTATE pThis, bool fOnWorkerThread)
5314{
5315 int rc = VINF_SUCCESS;
5316
5317 /* Check if transmitter is enabled. */
5318 if (!(TCTL & TCTL_EN))
5319 return VINF_SUCCESS;
5320 /*
5321 * Grab the xmit lock of the driver as well as the E1K device state.
5322 */
5323 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
5324 if (pDrv)
5325 {
5326 rc = pDrv->pfnBeginXmit(pDrv, fOnWorkerThread);
5327 if (RT_FAILURE(rc))
5328 return rc;
5329 }
5330
5331 /*
5332 * Process all pending descriptors.
5333 * Note! Do not process descriptors in locked state
5334 */
5335 rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
5336 if (RT_LIKELY(rc == VINF_SUCCESS))
5337 {
5338 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
5339 /*
5340 * fIncomplete is set whenever we try to fetch additional descriptors
5341 * for an incomplete packet. If fail to locate a complete packet on
5342 * the next iteration we need to reset the cache or we risk to get
5343 * stuck in this loop forever.
5344 */
5345 bool fIncomplete = false;
5346 while (!pThis->fLocked && e1kTxDLazyLoad(pThis))
5347 {
5348 while (e1kLocateTxPacket(pThis))
5349 {
5350 fIncomplete = false;
5351 /* Found a complete packet, allocate it. */
5352 rc = e1kXmitAllocBuf(pThis, pThis->fGSO);
5353 /* If we're out of bandwidth we'll come back later. */
5354 if (RT_FAILURE(rc))
5355 goto out;
5356 /* Copy the packet to allocated buffer and send it. */
5357 rc = e1kXmitPacket(pThis, fOnWorkerThread);
5358 /* If we're out of bandwidth we'll come back later. */
5359 if (RT_FAILURE(rc))
5360 goto out;
5361 }
5362 uint8_t u8Remain = pThis->nTxDFetched - pThis->iTxDCurrent;
5363 if (RT_UNLIKELY(fIncomplete))
5364 {
5365 static bool fTxDCacheDumped = false;
5366 /*
5367 * The descriptor cache is full, but we were unable to find
5368 * a complete packet in it. Drop the cache and hope that
5369 * the guest driver can recover from network card error.
5370 */
5371 LogRel(("%s: No complete packets in%s TxD cache! "
5372 "Fetched=%d, current=%d, TX len=%d.\n",
5373 pThis->szPrf,
5374 u8Remain == E1K_TXD_CACHE_SIZE ? " full" : "",
5375 pThis->nTxDFetched, pThis->iTxDCurrent,
5376 e1kGetTxLen(pThis)));
5377 if (!fTxDCacheDumped)
5378 {
5379 fTxDCacheDumped = true;
5380 e1kDumpTxDCache(pThis);
5381 }
5382 pThis->iTxDCurrent = pThis->nTxDFetched = 0;
5383 /*
5384 * Returning an error at this point means Guru in R0
5385 * (see @bugref{6428}).
5386 */
5387# ifdef IN_RING3
5388 rc = VERR_NET_INCOMPLETE_TX_PACKET;
5389# else /* !IN_RING3 */
5390 rc = VINF_IOM_R3_MMIO_WRITE;
5391# endif /* !IN_RING3 */
5392 goto out;
5393 }
5394 if (u8Remain > 0)
5395 {
5396 Log4(("%s Incomplete packet at %d. Already fetched %d, "
5397 "%d more are available\n",
5398 pThis->szPrf, pThis->iTxDCurrent, u8Remain,
5399 e1kGetTxLen(pThis) - u8Remain));
5400
5401 /*
5402 * A packet was partially fetched. Move incomplete packet to
5403 * the beginning of cache buffer, then load more descriptors.
5404 */
5405 memmove(pThis->aTxDescriptors,
5406 &pThis->aTxDescriptors[pThis->iTxDCurrent],
5407 u8Remain * sizeof(E1KTXDESC));
5408 pThis->iTxDCurrent = 0;
5409 pThis->nTxDFetched = u8Remain;
5410 e1kTxDLoadMore(pThis);
5411 fIncomplete = true;
5412 }
5413 else
5414 pThis->nTxDFetched = 0;
5415 pThis->iTxDCurrent = 0;
5416 }
5417 if (!pThis->fLocked && GET_BITS(TXDCTL, LWTHRESH) == 0)
5418 {
5419 E1kLog2(("%s Out of transmit descriptors, raise ICR.TXD_LOW\n",
5420 pThis->szPrf));
5421 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_TXD_LOW);
5422 }
5423out:
5424 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5425
5426 /// @todo uncomment: pThis->uStatIntTXQE++;
5427 /// @todo uncomment: e1kRaiseInterrupt(pThis, ICR_TXQE);
5428
5429 e1kCsTxLeave(pThis);
5430 }
5431
5432
5433 /*
5434 * Release the lock.
5435 */
5436 if (pDrv)
5437 pDrv->pfnEndXmit(pDrv);
5438 return rc;
5439}
5440
5441#endif /* E1K_WITH_TXD_CACHE */
5442#ifdef IN_RING3
5443
5444/**
5445 * @interface_method_impl{PDMINETWORKDOWN,pfnXmitPending}
5446 */
5447static DECLCALLBACK(void) e1kR3NetworkDown_XmitPending(PPDMINETWORKDOWN pInterface)
5448{
5449 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkDown);
5450 /* Resume suspended transmission */
5451 STATUS &= ~STATUS_TXOFF;
5452 e1kXmitPending(pThis, true /*fOnWorkerThread*/);
5453}
5454
5455/**
5456 * Callback for consuming from transmit queue. It gets called in R3 whenever
5457 * we enqueue something in R0/GC.
5458 *
5459 * @returns true
5460 * @param pDevIns Pointer to device instance structure.
5461 * @param pItem Pointer to the element being dequeued (not used).
5462 * @thread ???
5463 */
5464static DECLCALLBACK(bool) e1kTxQueueConsumer(PPDMDEVINS pDevIns, PPDMQUEUEITEMCORE pItem)
5465{
5466 NOREF(pItem);
5467 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
5468 E1kLog2(("%s e1kTxQueueConsumer:\n", pThis->szPrf));
5469
5470 int rc = e1kXmitPending(pThis, false /*fOnWorkerThread*/); NOREF(rc);
5471#ifndef DEBUG_andy /** @todo r=andy Happens for me a lot, mute this for me. */
5472 AssertMsg(RT_SUCCESS(rc) || rc == VERR_TRY_AGAIN, ("%Rrc\n", rc));
5473#endif
5474 return true;
5475}
5476
5477/**
5478 * Handler for the wakeup signaller queue.
5479 */
5480static DECLCALLBACK(bool) e1kCanRxQueueConsumer(PPDMDEVINS pDevIns, PPDMQUEUEITEMCORE pItem)
5481{
5482 RT_NOREF(pItem);
5483 e1kWakeupReceive(pDevIns);
5484 return true;
5485}
5486
5487#endif /* IN_RING3 */
5488
5489/**
5490 * Write handler for Transmit Descriptor Tail register.
5491 *
5492 * @param pThis The device state structure.
5493 * @param offset Register offset in memory-mapped frame.
5494 * @param index Register index in register array.
5495 * @param value The value to store.
5496 * @param mask Used to implement partial writes (8 and 16-bit).
5497 * @thread EMT
5498 */
5499static int e1kRegWriteTDT(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5500{
5501 int rc = e1kRegWriteDefault(pThis, offset, index, value);
5502
5503 /* All descriptors starting with head and not including tail belong to us. */
5504 /* Process them. */
5505 E1kLog2(("%s e1kRegWriteTDT: TDBAL=%08x, TDBAH=%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
5506 pThis->szPrf, TDBAL, TDBAH, TDLEN, TDH, TDT));
5507
5508 /* Ignore TDT writes when the link is down. */
5509 if (TDH != TDT && (STATUS & STATUS_LU))
5510 {
5511 Log5(("E1000: TDT write: TDH=%08x, TDT=%08x, %d descriptors to process\n", TDH, TDT, e1kGetTxLen(pThis)));
5512 E1kLog(("%s e1kRegWriteTDT: %d descriptors to process\n",
5513 pThis->szPrf, e1kGetTxLen(pThis)));
5514
5515 /* Transmit pending packets if possible, defer it if we cannot do it
5516 in the current context. */
5517#ifdef E1K_TX_DELAY
5518 rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
5519 if (RT_LIKELY(rc == VINF_SUCCESS))
5520 {
5521 if (!TMTimerIsActive(pThis->CTX_SUFF(pTXDTimer)))
5522 {
5523#ifdef E1K_INT_STATS
5524 pThis->u64ArmedAt = RTTimeNanoTS();
5525#endif
5526 e1kArmTimer(pThis, pThis->CTX_SUFF(pTXDTimer), E1K_TX_DELAY);
5527 }
5528 E1K_INC_ISTAT_CNT(pThis->uStatTxDelayed);
5529 e1kCsTxLeave(pThis);
5530 return rc;
5531 }
5532 /* We failed to enter the TX critical section -- transmit as usual. */
5533#endif /* E1K_TX_DELAY */
5534#ifndef IN_RING3
5535 if (!pThis->CTX_SUFF(pDrv))
5536 {
5537 PPDMQUEUEITEMCORE pItem = PDMQueueAlloc(pThis->CTX_SUFF(pTxQueue));
5538 if (RT_UNLIKELY(pItem))
5539 PDMQueueInsert(pThis->CTX_SUFF(pTxQueue), pItem);
5540 }
5541 else
5542#endif
5543 {
5544 rc = e1kXmitPending(pThis, false /*fOnWorkerThread*/);
5545 if (rc == VERR_TRY_AGAIN)
5546 rc = VINF_SUCCESS;
5547 else if (rc == VERR_SEM_BUSY)
5548 rc = VINF_IOM_R3_MMIO_WRITE;
5549 AssertRC(rc);
5550 }
5551 }
5552
5553 return rc;
5554}
5555
5556/**
5557 * Write handler for Multicast Table Array registers.
5558 *
5559 * @param pThis The device state structure.
5560 * @param offset Register offset in memory-mapped frame.
5561 * @param index Register index in register array.
5562 * @param value The value to store.
5563 * @thread EMT
5564 */
5565static int e1kRegWriteMTA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5566{
5567 AssertReturn(offset - g_aE1kRegMap[index].offset < sizeof(pThis->auMTA), VERR_DEV_IO_ERROR);
5568 pThis->auMTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auMTA[0])] = value;
5569
5570 return VINF_SUCCESS;
5571}
5572
5573/**
5574 * Read handler for Multicast Table Array registers.
5575 *
5576 * @returns VBox status code.
5577 *
5578 * @param pThis The device state structure.
5579 * @param offset Register offset in memory-mapped frame.
5580 * @param index Register index in register array.
5581 * @thread EMT
5582 */
5583static int e1kRegReadMTA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5584{
5585 AssertReturn(offset - g_aE1kRegMap[index].offset< sizeof(pThis->auMTA), VERR_DEV_IO_ERROR);
5586 *pu32Value = pThis->auMTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auMTA[0])];
5587
5588 return VINF_SUCCESS;
5589}
5590
5591/**
5592 * Write handler for Receive Address registers.
5593 *
5594 * @param pThis The device state structure.
5595 * @param offset Register offset in memory-mapped frame.
5596 * @param index Register index in register array.
5597 * @param value The value to store.
5598 * @thread EMT
5599 */
5600static int e1kRegWriteRA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5601{
5602 AssertReturn(offset - g_aE1kRegMap[index].offset < sizeof(pThis->aRecAddr.au32), VERR_DEV_IO_ERROR);
5603 pThis->aRecAddr.au32[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->aRecAddr.au32[0])] = value;
5604
5605 return VINF_SUCCESS;
5606}
5607
5608/**
5609 * Read handler for Receive Address registers.
5610 *
5611 * @returns VBox status code.
5612 *
5613 * @param pThis The device state structure.
5614 * @param offset Register offset in memory-mapped frame.
5615 * @param index Register index in register array.
5616 * @thread EMT
5617 */
5618static int e1kRegReadRA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5619{
5620 AssertReturn(offset - g_aE1kRegMap[index].offset< sizeof(pThis->aRecAddr.au32), VERR_DEV_IO_ERROR);
5621 *pu32Value = pThis->aRecAddr.au32[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->aRecAddr.au32[0])];
5622
5623 return VINF_SUCCESS;
5624}
5625
5626/**
5627 * Write handler for VLAN Filter Table Array registers.
5628 *
5629 * @param pThis The device state structure.
5630 * @param offset Register offset in memory-mapped frame.
5631 * @param index Register index in register array.
5632 * @param value The value to store.
5633 * @thread EMT
5634 */
5635static int e1kRegWriteVFTA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5636{
5637 AssertReturn(offset - g_aE1kRegMap[index].offset < sizeof(pThis->auVFTA), VINF_SUCCESS);
5638 pThis->auVFTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auVFTA[0])] = value;
5639
5640 return VINF_SUCCESS;
5641}
5642
5643/**
5644 * Read handler for VLAN Filter Table Array registers.
5645 *
5646 * @returns VBox status code.
5647 *
5648 * @param pThis The device state structure.
5649 * @param offset Register offset in memory-mapped frame.
5650 * @param index Register index in register array.
5651 * @thread EMT
5652 */
5653static int e1kRegReadVFTA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5654{
5655 AssertReturn(offset - g_aE1kRegMap[index].offset< sizeof(pThis->auVFTA), VERR_DEV_IO_ERROR);
5656 *pu32Value = pThis->auVFTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auVFTA[0])];
5657
5658 return VINF_SUCCESS;
5659}
5660
5661/**
5662 * Read handler for unimplemented registers.
5663 *
5664 * Merely reports reads from unimplemented registers.
5665 *
5666 * @returns VBox status code.
5667 *
5668 * @param pThis The device state structure.
5669 * @param offset Register offset in memory-mapped frame.
5670 * @param index Register index in register array.
5671 * @thread EMT
5672 */
5673static int e1kRegReadUnimplemented(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5674{
5675 RT_NOREF3(pThis, offset, index);
5676 E1kLog(("%s At %08X read (00000000) attempt from unimplemented register %s (%s)\n",
5677 pThis->szPrf, offset, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5678 *pu32Value = 0;
5679
5680 return VINF_SUCCESS;
5681}
5682
5683/**
5684 * Default register read handler with automatic clear operation.
5685 *
5686 * Retrieves the value of register from register array in device state structure.
5687 * Then resets all bits.
5688 *
5689 * @remarks The 'mask' parameter is simply ignored as masking and shifting is
5690 * done in the caller.
5691 *
5692 * @returns VBox status code.
5693 *
5694 * @param pThis The device state structure.
5695 * @param offset Register offset in memory-mapped frame.
5696 * @param index Register index in register array.
5697 * @thread EMT
5698 */
5699static int e1kRegReadAutoClear(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5700{
5701 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
5702 int rc = e1kRegReadDefault(pThis, offset, index, pu32Value);
5703 pThis->auRegs[index] = 0;
5704
5705 return rc;
5706}
5707
5708/**
5709 * Default register read handler.
5710 *
5711 * Retrieves the value of register from register array in device state structure.
5712 * Bits corresponding to 0s in 'readable' mask will always read as 0s.
5713 *
5714 * @remarks The 'mask' parameter is simply ignored as masking and shifting is
5715 * done in the caller.
5716 *
5717 * @returns VBox status code.
5718 *
5719 * @param pThis The device state structure.
5720 * @param offset Register offset in memory-mapped frame.
5721 * @param index Register index in register array.
5722 * @thread EMT
5723 */
5724static int e1kRegReadDefault(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5725{
5726 RT_NOREF_PV(offset);
5727
5728 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
5729 *pu32Value = pThis->auRegs[index] & g_aE1kRegMap[index].readable;
5730
5731 return VINF_SUCCESS;
5732}
5733
5734/**
5735 * Write handler for unimplemented registers.
5736 *
5737 * Merely reports writes to unimplemented registers.
5738 *
5739 * @param pThis The device state structure.
5740 * @param offset Register offset in memory-mapped frame.
5741 * @param index Register index in register array.
5742 * @param value The value to store.
5743 * @thread EMT
5744 */
5745
5746 static int e1kRegWriteUnimplemented(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5747{
5748 RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index); RT_NOREF_PV(value);
5749
5750 E1kLog(("%s At %08X write attempt (%08X) to unimplemented register %s (%s)\n",
5751 pThis->szPrf, offset, value, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5752
5753 return VINF_SUCCESS;
5754}
5755
5756/**
5757 * Default register write handler.
5758 *
5759 * Stores the value to the register array in device state structure. Only bits
5760 * corresponding to 1s both in 'writable' and 'mask' will be stored.
5761 *
5762 * @returns VBox status code.
5763 *
5764 * @param pThis The device state structure.
5765 * @param offset Register offset in memory-mapped frame.
5766 * @param index Register index in register array.
5767 * @param value The value to store.
5768 * @param mask Used to implement partial writes (8 and 16-bit).
5769 * @thread EMT
5770 */
5771
5772static int e1kRegWriteDefault(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5773{
5774 RT_NOREF_PV(offset);
5775
5776 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
5777 pThis->auRegs[index] = (value & g_aE1kRegMap[index].writable)
5778 | (pThis->auRegs[index] & ~g_aE1kRegMap[index].writable);
5779
5780 return VINF_SUCCESS;
5781}
5782
5783/**
5784 * Search register table for matching register.
5785 *
5786 * @returns Index in the register table or -1 if not found.
5787 *
5788 * @param offReg Register offset in memory-mapped region.
5789 * @thread EMT
5790 */
5791static int e1kRegLookup(uint32_t offReg)
5792{
5793
5794#if 0
5795 int index;
5796
5797 for (index = 0; index < E1K_NUM_OF_REGS; index++)
5798 {
5799 if (g_aE1kRegMap[index].offset <= offReg && offReg < g_aE1kRegMap[index].offset + g_aE1kRegMap[index].size)
5800 {
5801 return index;
5802 }
5803 }
5804#else
5805 int iStart = 0;
5806 int iEnd = E1K_NUM_OF_BINARY_SEARCHABLE;
5807 for (;;)
5808 {
5809 int i = (iEnd - iStart) / 2 + iStart;
5810 uint32_t offCur = g_aE1kRegMap[i].offset;
5811 if (offReg < offCur)
5812 {
5813 if (i == iStart)
5814 break;
5815 iEnd = i;
5816 }
5817 else if (offReg >= offCur + g_aE1kRegMap[i].size)
5818 {
5819 i++;
5820 if (i == iEnd)
5821 break;
5822 iStart = i;
5823 }
5824 else
5825 return i;
5826 Assert(iEnd > iStart);
5827 }
5828
5829 for (unsigned i = E1K_NUM_OF_BINARY_SEARCHABLE; i < RT_ELEMENTS(g_aE1kRegMap); i++)
5830 if (offReg - g_aE1kRegMap[i].offset < g_aE1kRegMap[i].size)
5831 return i;
5832
5833# ifdef VBOX_STRICT
5834 for (unsigned i = 0; i < RT_ELEMENTS(g_aE1kRegMap); i++)
5835 Assert(offReg - g_aE1kRegMap[i].offset >= g_aE1kRegMap[i].size);
5836# endif
5837
5838#endif
5839
5840 return -1;
5841}
5842
5843/**
5844 * Handle unaligned register read operation.
5845 *
5846 * Looks up and calls appropriate handler.
5847 *
5848 * @returns VBox status code.
5849 *
5850 * @param pThis The device state structure.
5851 * @param offReg Register offset in memory-mapped frame.
5852 * @param pv Where to store the result.
5853 * @param cb Number of bytes to read.
5854 * @thread EMT
5855 * @remarks IOM takes care of unaligned and small reads via MMIO. For I/O port
5856 * accesses we have to take care of that ourselves.
5857 */
5858static int e1kRegReadUnaligned(PE1KSTATE pThis, uint32_t offReg, void *pv, uint32_t cb)
5859{
5860 uint32_t u32 = 0;
5861 uint32_t shift;
5862 int rc = VINF_SUCCESS;
5863 int index = e1kRegLookup(offReg);
5864#ifdef LOG_ENABLED
5865 char buf[9];
5866#endif
5867
5868 /*
5869 * From the spec:
5870 * For registers that should be accessed as 32-bit double words, partial writes (less than a 32-bit
5871 * double word) is ignored. Partial reads return all 32 bits of data regardless of the byte enables.
5872 */
5873
5874 /*
5875 * To be able to read bytes and short word we convert them to properly
5876 * shifted 32-bit words and masks. The idea is to keep register-specific
5877 * handlers simple. Most accesses will be 32-bit anyway.
5878 */
5879 uint32_t mask;
5880 switch (cb)
5881 {
5882 case 4: mask = 0xFFFFFFFF; break;
5883 case 2: mask = 0x0000FFFF; break;
5884 case 1: mask = 0x000000FF; break;
5885 default:
5886 return PDMDevHlpDBGFStop(pThis->CTX_SUFF(pDevIns), RT_SRC_POS,
5887 "unsupported op size: offset=%#10x cb=%#10x\n", offReg, cb);
5888 }
5889 if (index != -1)
5890 {
5891 RT_UNTRUSTED_VALIDATED_FENCE(); /* paranoia because of port I/O. */
5892 if (g_aE1kRegMap[index].readable)
5893 {
5894 /* Make the mask correspond to the bits we are about to read. */
5895 shift = (offReg - g_aE1kRegMap[index].offset) % sizeof(uint32_t) * 8;
5896 mask <<= shift;
5897 if (!mask)
5898 return PDMDevHlpDBGFStop(pThis->CTX_SUFF(pDevIns), RT_SRC_POS, "Zero mask: offset=%#10x cb=%#10x\n", offReg, cb);
5899 /*
5900 * Read it. Pass the mask so the handler knows what has to be read.
5901 * Mask out irrelevant bits.
5902 */
5903 //rc = e1kCsEnter(pThis, VERR_SEM_BUSY, RT_SRC_POS);
5904 if (RT_UNLIKELY(rc != VINF_SUCCESS))
5905 return rc;
5906 //pThis->fDelayInts = false;
5907 //pThis->iStatIntLost += pThis->iStatIntLostOne;
5908 //pThis->iStatIntLostOne = 0;
5909 rc = g_aE1kRegMap[index].pfnRead(pThis, offReg & 0xFFFFFFFC, index, &u32);
5910 u32 &= mask;
5911 //e1kCsLeave(pThis);
5912 E1kLog2(("%s At %08X read %s from %s (%s)\n",
5913 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf), g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5914 Log6(("%s At %08X read %s from %s (%s) [UNALIGNED]\n",
5915 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf), g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5916 /* Shift back the result. */
5917 u32 >>= shift;
5918 }
5919 else
5920 E1kLog(("%s At %08X read (%s) attempt from write-only register %s (%s)\n",
5921 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf), g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5922 if (IOM_SUCCESS(rc))
5923 STAM_COUNTER_INC(&pThis->aStatRegReads[index]);
5924 }
5925 else
5926 E1kLog(("%s At %08X read (%s) attempt from non-existing register\n",
5927 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf)));
5928
5929 memcpy(pv, &u32, cb);
5930 return rc;
5931}
5932
5933/**
5934 * Handle 4 byte aligned and sized read operation.
5935 *
5936 * Looks up and calls appropriate handler.
5937 *
5938 * @returns VBox status code.
5939 *
5940 * @param pThis The device state structure.
5941 * @param offReg Register offset in memory-mapped frame.
5942 * @param pu32 Where to store the result.
5943 * @thread EMT
5944 */
5945static int e1kRegReadAlignedU32(PE1KSTATE pThis, uint32_t offReg, uint32_t *pu32)
5946{
5947 Assert(!(offReg & 3));
5948
5949 /*
5950 * Lookup the register and check that it's readable.
5951 */
5952 int rc = VINF_SUCCESS;
5953 int idxReg = e1kRegLookup(offReg);
5954 if (RT_LIKELY(idxReg != -1))
5955 {
5956 RT_UNTRUSTED_VALIDATED_FENCE(); /* paranoia because of port I/O. */
5957 if (RT_UNLIKELY(g_aE1kRegMap[idxReg].readable))
5958 {
5959 /*
5960 * Read it. Pass the mask so the handler knows what has to be read.
5961 * Mask out irrelevant bits.
5962 */
5963 //rc = e1kCsEnter(pThis, VERR_SEM_BUSY, RT_SRC_POS);
5964 //if (RT_UNLIKELY(rc != VINF_SUCCESS))
5965 // return rc;
5966 //pThis->fDelayInts = false;
5967 //pThis->iStatIntLost += pThis->iStatIntLostOne;
5968 //pThis->iStatIntLostOne = 0;
5969 rc = g_aE1kRegMap[idxReg].pfnRead(pThis, offReg & 0xFFFFFFFC, idxReg, pu32);
5970 //e1kCsLeave(pThis);
5971 Log6(("%s At %08X read %08X from %s (%s)\n",
5972 pThis->szPrf, offReg, *pu32, g_aE1kRegMap[idxReg].abbrev, g_aE1kRegMap[idxReg].name));
5973 if (IOM_SUCCESS(rc))
5974 STAM_COUNTER_INC(&pThis->aStatRegReads[idxReg]);
5975 }
5976 else
5977 E1kLog(("%s At %08X read attempt from non-readable register %s (%s)\n",
5978 pThis->szPrf, offReg, g_aE1kRegMap[idxReg].abbrev, g_aE1kRegMap[idxReg].name));
5979 }
5980 else
5981 E1kLog(("%s At %08X read attempt from non-existing register\n", pThis->szPrf, offReg));
5982 return rc;
5983}
5984
5985/**
5986 * Handle 4 byte sized and aligned register write operation.
5987 *
5988 * Looks up and calls appropriate handler.
5989 *
5990 * @returns VBox status code.
5991 *
5992 * @param pThis The device state structure.
5993 * @param offReg Register offset in memory-mapped frame.
5994 * @param u32Value The value to write.
5995 * @thread EMT
5996 */
5997static int e1kRegWriteAlignedU32(PE1KSTATE pThis, uint32_t offReg, uint32_t u32Value)
5998{
5999 int rc = VINF_SUCCESS;
6000 int index = e1kRegLookup(offReg);
6001 if (RT_LIKELY(index != -1))
6002 {
6003 RT_UNTRUSTED_VALIDATED_FENCE(); /* paranoia because of port I/O. */
6004 if (RT_LIKELY(g_aE1kRegMap[index].writable))
6005 {
6006 /*
6007 * Write it. Pass the mask so the handler knows what has to be written.
6008 * Mask out irrelevant bits.
6009 */
6010 Log6(("%s At %08X write %08X to %s (%s)\n",
6011 pThis->szPrf, offReg, u32Value, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
6012 //rc = e1kCsEnter(pThis, VERR_SEM_BUSY, RT_SRC_POS);
6013 //if (RT_UNLIKELY(rc != VINF_SUCCESS))
6014 // return rc;
6015 //pThis->fDelayInts = false;
6016 //pThis->iStatIntLost += pThis->iStatIntLostOne;
6017 //pThis->iStatIntLostOne = 0;
6018 rc = g_aE1kRegMap[index].pfnWrite(pThis, offReg, index, u32Value);
6019 //e1kCsLeave(pThis);
6020 }
6021 else
6022 E1kLog(("%s At %08X write attempt (%08X) to read-only register %s (%s)\n",
6023 pThis->szPrf, offReg, u32Value, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
6024 if (IOM_SUCCESS(rc))
6025 STAM_COUNTER_INC(&pThis->aStatRegWrites[index]);
6026 }
6027 else
6028 E1kLog(("%s At %08X write attempt (%08X) to non-existing register\n",
6029 pThis->szPrf, offReg, u32Value));
6030 return rc;
6031}
6032
6033
6034/* -=-=-=-=- MMIO and I/O Port Callbacks -=-=-=-=- */
6035
6036/**
6037 * @callback_method_impl{FNIOMMMIOREAD}
6038 */
6039PDMBOTHCBDECL(int) e1kMMIORead(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS GCPhysAddr, void *pv, unsigned cb)
6040{
6041 RT_NOREF2(pvUser, cb);
6042 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
6043 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatMMIORead), a);
6044
6045 uint32_t offReg = GCPhysAddr - pThis->addrMMReg;
6046 Assert(offReg < E1K_MM_SIZE);
6047 Assert(cb == 4);
6048 Assert(!(GCPhysAddr & 3));
6049
6050 int rc = e1kRegReadAlignedU32(pThis, offReg, (uint32_t *)pv);
6051
6052 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatMMIORead), a);
6053 return rc;
6054}
6055
6056/**
6057 * @callback_method_impl{FNIOMMMIOWRITE}
6058 */
6059PDMBOTHCBDECL(int) e1kMMIOWrite(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS GCPhysAddr, void const *pv, unsigned cb)
6060{
6061 RT_NOREF2(pvUser, cb);
6062 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
6063 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatMMIOWrite), a);
6064
6065 uint32_t offReg = GCPhysAddr - pThis->addrMMReg;
6066 Assert(offReg < E1K_MM_SIZE);
6067 Assert(cb == 4);
6068 Assert(!(GCPhysAddr & 3));
6069
6070 int rc = e1kRegWriteAlignedU32(pThis, offReg, *(uint32_t const *)pv);
6071
6072 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatMMIOWrite), a);
6073 return rc;
6074}
6075
6076/**
6077 * @callback_method_impl{FNIOMIOPORTIN}
6078 */
6079PDMBOTHCBDECL(int) e1kIOPortIn(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT uPort, uint32_t *pu32, unsigned cb)
6080{
6081 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
6082 int rc;
6083 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatIORead), a);
6084 RT_NOREF_PV(pvUser);
6085
6086 uPort -= pThis->IOPortBase;
6087 if (RT_LIKELY(cb == 4))
6088 switch (uPort)
6089 {
6090 case 0x00: /* IOADDR */
6091 *pu32 = pThis->uSelectedReg;
6092 E1kLog2(("%s e1kIOPortIn: IOADDR(0), selecting register %#010x, val=%#010x\n", pThis->szPrf, pThis->uSelectedReg, *pu32));
6093 rc = VINF_SUCCESS;
6094 break;
6095
6096 case 0x04: /* IODATA */
6097 if (!(pThis->uSelectedReg & 3))
6098 rc = e1kRegReadAlignedU32(pThis, pThis->uSelectedReg, pu32);
6099 else /** @todo r=bird: I wouldn't be surprised if this unaligned branch wasn't necessary. */
6100 rc = e1kRegReadUnaligned(pThis, pThis->uSelectedReg, pu32, cb);
6101 if (rc == VINF_IOM_R3_MMIO_READ)
6102 rc = VINF_IOM_R3_IOPORT_READ;
6103 E1kLog2(("%s e1kIOPortIn: IODATA(4), reading from selected register %#010x, val=%#010x\n", pThis->szPrf, pThis->uSelectedReg, *pu32));
6104 break;
6105
6106 default:
6107 E1kLog(("%s e1kIOPortIn: invalid port %#010x\n", pThis->szPrf, uPort));
6108 //rc = VERR_IOM_IOPORT_UNUSED; /* Why not? */
6109 rc = VINF_SUCCESS;
6110 }
6111 else
6112 {
6113 E1kLog(("%s e1kIOPortIn: invalid op size: uPort=%RTiop cb=%08x", pThis->szPrf, uPort, cb));
6114 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "%s e1kIOPortIn: invalid op size: uPort=%RTiop cb=%08x\n", pThis->szPrf, uPort, cb);
6115 }
6116 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatIORead), a);
6117 return rc;
6118}
6119
6120
6121/**
6122 * @callback_method_impl{FNIOMIOPORTOUT}
6123 */
6124PDMBOTHCBDECL(int) e1kIOPortOut(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT uPort, uint32_t u32, unsigned cb)
6125{
6126 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
6127 int rc;
6128 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatIOWrite), a);
6129 RT_NOREF_PV(pvUser);
6130
6131 E1kLog2(("%s e1kIOPortOut: uPort=%RTiop value=%08x\n", pThis->szPrf, uPort, u32));
6132 if (RT_LIKELY(cb == 4))
6133 {
6134 uPort -= pThis->IOPortBase;
6135 switch (uPort)
6136 {
6137 case 0x00: /* IOADDR */
6138 pThis->uSelectedReg = u32;
6139 E1kLog2(("%s e1kIOPortOut: IOADDR(0), selected register %08x\n", pThis->szPrf, pThis->uSelectedReg));
6140 rc = VINF_SUCCESS;
6141 break;
6142
6143 case 0x04: /* IODATA */
6144 E1kLog2(("%s e1kIOPortOut: IODATA(4), writing to selected register %#010x, value=%#010x\n", pThis->szPrf, pThis->uSelectedReg, u32));
6145 if (RT_LIKELY(!(pThis->uSelectedReg & 3)))
6146 {
6147 rc = e1kRegWriteAlignedU32(pThis, pThis->uSelectedReg, u32);
6148 if (rc == VINF_IOM_R3_MMIO_WRITE)
6149 rc = VINF_IOM_R3_IOPORT_WRITE;
6150 }
6151 else
6152 rc = PDMDevHlpDBGFStop(pThis->CTX_SUFF(pDevIns), RT_SRC_POS,
6153 "Spec violation: misaligned offset: %#10x, ignored.\n", pThis->uSelectedReg);
6154 break;
6155
6156 default:
6157 E1kLog(("%s e1kIOPortOut: invalid port %#010x\n", pThis->szPrf, uPort));
6158 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "invalid port %#010x\n", uPort);
6159 }
6160 }
6161 else
6162 {
6163 E1kLog(("%s e1kIOPortOut: invalid op size: uPort=%RTiop cb=%08x\n", pThis->szPrf, uPort, cb));
6164 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "%s: invalid op size: uPort=%RTiop cb=%#x\n", pThis->szPrf, uPort, cb);
6165 }
6166
6167 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatIOWrite), a);
6168 return rc;
6169}
6170
6171#ifdef IN_RING3
6172
6173/**
6174 * Dump complete device state to log.
6175 *
6176 * @param pThis Pointer to device state.
6177 */
6178static void e1kDumpState(PE1KSTATE pThis)
6179{
6180 RT_NOREF(pThis);
6181 for (int i = 0; i < E1K_NUM_OF_32BIT_REGS; ++i)
6182 E1kLog2(("%s: %8.8s = %08x\n", pThis->szPrf, g_aE1kRegMap[i].abbrev, pThis->auRegs[i]));
6183# ifdef E1K_INT_STATS
6184 LogRel(("%s: Interrupt attempts: %d\n", pThis->szPrf, pThis->uStatIntTry));
6185 LogRel(("%s: Interrupts raised : %d\n", pThis->szPrf, pThis->uStatInt));
6186 LogRel(("%s: Interrupts lowered: %d\n", pThis->szPrf, pThis->uStatIntLower));
6187 LogRel(("%s: ICR outside ISR : %d\n", pThis->szPrf, pThis->uStatNoIntICR));
6188 LogRel(("%s: IMS raised ints : %d\n", pThis->szPrf, pThis->uStatIntIMS));
6189 LogRel(("%s: Interrupts skipped: %d\n", pThis->szPrf, pThis->uStatIntSkip));
6190 LogRel(("%s: Masked interrupts : %d\n", pThis->szPrf, pThis->uStatIntMasked));
6191 LogRel(("%s: Early interrupts : %d\n", pThis->szPrf, pThis->uStatIntEarly));
6192 LogRel(("%s: Late interrupts : %d\n", pThis->szPrf, pThis->uStatIntLate));
6193 LogRel(("%s: Lost interrupts : %d\n", pThis->szPrf, pThis->iStatIntLost));
6194 LogRel(("%s: Interrupts by RX : %d\n", pThis->szPrf, pThis->uStatIntRx));
6195 LogRel(("%s: Interrupts by TX : %d\n", pThis->szPrf, pThis->uStatIntTx));
6196 LogRel(("%s: Interrupts by ICS : %d\n", pThis->szPrf, pThis->uStatIntICS));
6197 LogRel(("%s: Interrupts by RDTR: %d\n", pThis->szPrf, pThis->uStatIntRDTR));
6198 LogRel(("%s: Interrupts by RDMT: %d\n", pThis->szPrf, pThis->uStatIntRXDMT0));
6199 LogRel(("%s: Interrupts by TXQE: %d\n", pThis->szPrf, pThis->uStatIntTXQE));
6200 LogRel(("%s: TX int delay asked: %d\n", pThis->szPrf, pThis->uStatTxIDE));
6201 LogRel(("%s: TX delayed: %d\n", pThis->szPrf, pThis->uStatTxDelayed));
6202 LogRel(("%s: TX delay expired: %d\n", pThis->szPrf, pThis->uStatTxDelayExp));
6203 LogRel(("%s: TX no report asked: %d\n", pThis->szPrf, pThis->uStatTxNoRS));
6204 LogRel(("%s: TX abs timer expd : %d\n", pThis->szPrf, pThis->uStatTAD));
6205 LogRel(("%s: TX int timer expd : %d\n", pThis->szPrf, pThis->uStatTID));
6206 LogRel(("%s: RX abs timer expd : %d\n", pThis->szPrf, pThis->uStatRAD));
6207 LogRel(("%s: RX int timer expd : %d\n", pThis->szPrf, pThis->uStatRID));
6208 LogRel(("%s: TX CTX descriptors: %d\n", pThis->szPrf, pThis->uStatDescCtx));
6209 LogRel(("%s: TX DAT descriptors: %d\n", pThis->szPrf, pThis->uStatDescDat));
6210 LogRel(("%s: TX LEG descriptors: %d\n", pThis->szPrf, pThis->uStatDescLeg));
6211 LogRel(("%s: Received frames : %d\n", pThis->szPrf, pThis->uStatRxFrm));
6212 LogRel(("%s: Transmitted frames: %d\n", pThis->szPrf, pThis->uStatTxFrm));
6213 LogRel(("%s: TX frames up to 1514: %d\n", pThis->szPrf, pThis->uStatTx1514));
6214 LogRel(("%s: TX frames up to 2962: %d\n", pThis->szPrf, pThis->uStatTx2962));
6215 LogRel(("%s: TX frames up to 4410: %d\n", pThis->szPrf, pThis->uStatTx4410));
6216 LogRel(("%s: TX frames up to 5858: %d\n", pThis->szPrf, pThis->uStatTx5858));
6217 LogRel(("%s: TX frames up to 7306: %d\n", pThis->szPrf, pThis->uStatTx7306));
6218 LogRel(("%s: TX frames up to 8754: %d\n", pThis->szPrf, pThis->uStatTx8754));
6219 LogRel(("%s: TX frames up to 16384: %d\n", pThis->szPrf, pThis->uStatTx16384));
6220 LogRel(("%s: TX frames up to 32768: %d\n", pThis->szPrf, pThis->uStatTx32768));
6221 LogRel(("%s: Larger TX frames : %d\n", pThis->szPrf, pThis->uStatTxLarge));
6222 LogRel(("%s: Max TX Delay : %lld\n", pThis->szPrf, pThis->uStatMaxTxDelay));
6223# endif /* E1K_INT_STATS */
6224}
6225
6226/**
6227 * @callback_method_impl{FNPCIIOREGIONMAP}
6228 */
6229static DECLCALLBACK(int) e1kMap(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, uint32_t iRegion,
6230 RTGCPHYS GCPhysAddress, RTGCPHYS cb, PCIADDRESSSPACE enmType)
6231{
6232 RT_NOREF(pPciDev, iRegion);
6233 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE *);
6234 int rc;
6235
6236 switch (enmType)
6237 {
6238 case PCI_ADDRESS_SPACE_IO:
6239 pThis->IOPortBase = (RTIOPORT)GCPhysAddress;
6240 rc = PDMDevHlpIOPortRegister(pDevIns, pThis->IOPortBase, cb, NULL /*pvUser*/,
6241 e1kIOPortOut, e1kIOPortIn, NULL, NULL, "E1000");
6242 if (pThis->fR0Enabled && RT_SUCCESS(rc))
6243 rc = PDMDevHlpIOPortRegisterR0(pDevIns, pThis->IOPortBase, cb, NIL_RTR0PTR /*pvUser*/,
6244 "e1kIOPortOut", "e1kIOPortIn", NULL, NULL, "E1000");
6245 if (pThis->fRCEnabled && RT_SUCCESS(rc))
6246 rc = PDMDevHlpIOPortRegisterRC(pDevIns, pThis->IOPortBase, cb, NIL_RTRCPTR /*pvUser*/,
6247 "e1kIOPortOut", "e1kIOPortIn", NULL, NULL, "E1000");
6248 break;
6249
6250 case PCI_ADDRESS_SPACE_MEM:
6251 /*
6252 * From the spec:
6253 * For registers that should be accessed as 32-bit double words,
6254 * partial writes (less than a 32-bit double word) is ignored.
6255 * Partial reads return all 32 bits of data regardless of the
6256 * byte enables.
6257 */
6258#ifdef E1K_WITH_PREREG_MMIO
6259 pThis->addrMMReg = GCPhysAddress;
6260 if (GCPhysAddress == NIL_RTGCPHYS)
6261 rc = VINF_SUCCESS;
6262 else
6263 {
6264 Assert(!(GCPhysAddress & 7));
6265 rc = PDMDevHlpMMIOExMap(pDevIns, pPciDev, iRegion, GCPhysAddress);
6266 }
6267#else
6268 pThis->addrMMReg = GCPhysAddress; Assert(!(GCPhysAddress & 7));
6269 rc = PDMDevHlpMMIORegister(pDevIns, GCPhysAddress, cb, NULL /*pvUser*/,
6270 IOMMMIO_FLAGS_READ_DWORD | IOMMMIO_FLAGS_WRITE_ONLY_DWORD,
6271 e1kMMIOWrite, e1kMMIORead, "E1000");
6272 if (pThis->fR0Enabled && RT_SUCCESS(rc))
6273 rc = PDMDevHlpMMIORegisterR0(pDevIns, GCPhysAddress, cb, NIL_RTR0PTR /*pvUser*/,
6274 "e1kMMIOWrite", "e1kMMIORead");
6275 if (pThis->fRCEnabled && RT_SUCCESS(rc))
6276 rc = PDMDevHlpMMIORegisterRC(pDevIns, GCPhysAddress, cb, NIL_RTRCPTR /*pvUser*/,
6277 "e1kMMIOWrite", "e1kMMIORead");
6278#endif
6279 break;
6280
6281 default:
6282 /* We should never get here */
6283 AssertMsgFailed(("Invalid PCI address space param in map callback"));
6284 rc = VERR_INTERNAL_ERROR;
6285 break;
6286 }
6287 return rc;
6288}
6289
6290
6291/* -=-=-=-=- PDMINETWORKDOWN -=-=-=-=- */
6292
6293/**
6294 * Check if the device can receive data now.
6295 * This must be called before the pfnRecieve() method is called.
6296 *
6297 * @returns Number of bytes the device can receive.
6298 * @param pInterface Pointer to the interface structure containing the called function pointer.
6299 * @thread EMT
6300 */
6301static int e1kCanReceive(PE1KSTATE pThis)
6302{
6303#ifndef E1K_WITH_RXD_CACHE
6304 size_t cb;
6305
6306 if (RT_UNLIKELY(e1kCsRxEnter(pThis, VERR_SEM_BUSY) != VINF_SUCCESS))
6307 return VERR_NET_NO_BUFFER_SPACE;
6308
6309 if (RT_UNLIKELY(RDLEN == sizeof(E1KRXDESC)))
6310 {
6311 E1KRXDESC desc;
6312 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), e1kDescAddr(RDBAH, RDBAL, RDH),
6313 &desc, sizeof(desc));
6314 if (desc.status.fDD)
6315 cb = 0;
6316 else
6317 cb = pThis->u16RxBSize;
6318 }
6319 else if (RDH < RDT)
6320 cb = (RDT - RDH) * pThis->u16RxBSize;
6321 else if (RDH > RDT)
6322 cb = (RDLEN/sizeof(E1KRXDESC) - RDH + RDT) * pThis->u16RxBSize;
6323 else
6324 {
6325 cb = 0;
6326 E1kLogRel(("E1000: OUT of RX descriptors!\n"));
6327 }
6328 E1kLog2(("%s e1kCanReceive: at exit RDH=%d RDT=%d RDLEN=%d u16RxBSize=%d cb=%lu\n",
6329 pThis->szPrf, RDH, RDT, RDLEN, pThis->u16RxBSize, cb));
6330
6331 e1kCsRxLeave(pThis);
6332 return cb > 0 ? VINF_SUCCESS : VERR_NET_NO_BUFFER_SPACE;
6333#else /* E1K_WITH_RXD_CACHE */
6334 int rc = VINF_SUCCESS;
6335
6336 if (RT_UNLIKELY(e1kCsRxEnter(pThis, VERR_SEM_BUSY) != VINF_SUCCESS))
6337 return VERR_NET_NO_BUFFER_SPACE;
6338
6339 if (RT_UNLIKELY(RDLEN == sizeof(E1KRXDESC)))
6340 {
6341 E1KRXDESC desc;
6342 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), e1kDescAddr(RDBAH, RDBAL, RDH),
6343 &desc, sizeof(desc));
6344 if (desc.status.fDD)
6345 rc = VERR_NET_NO_BUFFER_SPACE;
6346 }
6347 else if (e1kRxDIsCacheEmpty(pThis) && RDH == RDT)
6348 {
6349 /* Cache is empty, so is the RX ring. */
6350 rc = VERR_NET_NO_BUFFER_SPACE;
6351 }
6352 E1kLog2(("%s e1kCanReceive: at exit in_cache=%d RDH=%d RDT=%d RDLEN=%d"
6353 " u16RxBSize=%d rc=%Rrc\n", pThis->szPrf,
6354 e1kRxDInCache(pThis), RDH, RDT, RDLEN, pThis->u16RxBSize, rc));
6355
6356 e1kCsRxLeave(pThis);
6357 return rc;
6358#endif /* E1K_WITH_RXD_CACHE */
6359}
6360
6361/**
6362 * @interface_method_impl{PDMINETWORKDOWN,pfnWaitReceiveAvail}
6363 */
6364static DECLCALLBACK(int) e1kR3NetworkDown_WaitReceiveAvail(PPDMINETWORKDOWN pInterface, RTMSINTERVAL cMillies)
6365{
6366 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkDown);
6367 int rc = e1kCanReceive(pThis);
6368
6369 if (RT_SUCCESS(rc))
6370 return VINF_SUCCESS;
6371 if (RT_UNLIKELY(cMillies == 0))
6372 return VERR_NET_NO_BUFFER_SPACE;
6373
6374 rc = VERR_INTERRUPTED;
6375 ASMAtomicXchgBool(&pThis->fMaybeOutOfSpace, true);
6376 STAM_PROFILE_START(&pThis->StatRxOverflow, a);
6377 VMSTATE enmVMState;
6378 while (RT_LIKELY( (enmVMState = PDMDevHlpVMState(pThis->CTX_SUFF(pDevIns))) == VMSTATE_RUNNING
6379 || enmVMState == VMSTATE_RUNNING_LS))
6380 {
6381 int rc2 = e1kCanReceive(pThis);
6382 if (RT_SUCCESS(rc2))
6383 {
6384 rc = VINF_SUCCESS;
6385 break;
6386 }
6387 E1kLogRel(("E1000: e1kR3NetworkDown_WaitReceiveAvail: waiting cMillies=%u...\n", cMillies));
6388 E1kLog(("%s: e1kR3NetworkDown_WaitReceiveAvail: waiting cMillies=%u...\n", pThis->szPrf, cMillies));
6389 RTSemEventWait(pThis->hEventMoreRxDescAvail, cMillies);
6390 }
6391 STAM_PROFILE_STOP(&pThis->StatRxOverflow, a);
6392 ASMAtomicXchgBool(&pThis->fMaybeOutOfSpace, false);
6393
6394 return rc;
6395}
6396
6397
6398/**
6399 * Matches the packet addresses against Receive Address table. Looks for
6400 * exact matches only.
6401 *
6402 * @returns true if address matches.
6403 * @param pThis Pointer to the state structure.
6404 * @param pvBuf The ethernet packet.
6405 * @param cb Number of bytes available in the packet.
6406 * @thread EMT
6407 */
6408static bool e1kPerfectMatch(PE1KSTATE pThis, const void *pvBuf)
6409{
6410 for (unsigned i = 0; i < RT_ELEMENTS(pThis->aRecAddr.array); i++)
6411 {
6412 E1KRAELEM* ra = pThis->aRecAddr.array + i;
6413
6414 /* Valid address? */
6415 if (ra->ctl & RA_CTL_AV)
6416 {
6417 Assert((ra->ctl & RA_CTL_AS) < 2);
6418 //unsigned char *pAddr = (unsigned char*)pvBuf + sizeof(ra->addr)*(ra->ctl & RA_CTL_AS);
6419 //E1kLog3(("%s Matching %02x:%02x:%02x:%02x:%02x:%02x against %02x:%02x:%02x:%02x:%02x:%02x...\n",
6420 // pThis->szPrf, pAddr[0], pAddr[1], pAddr[2], pAddr[3], pAddr[4], pAddr[5],
6421 // ra->addr[0], ra->addr[1], ra->addr[2], ra->addr[3], ra->addr[4], ra->addr[5]));
6422 /*
6423 * Address Select:
6424 * 00b = Destination address
6425 * 01b = Source address
6426 * 10b = Reserved
6427 * 11b = Reserved
6428 * Since ethernet header is (DA, SA, len) we can use address
6429 * select as index.
6430 */
6431 if (memcmp((char*)pvBuf + sizeof(ra->addr)*(ra->ctl & RA_CTL_AS),
6432 ra->addr, sizeof(ra->addr)) == 0)
6433 return true;
6434 }
6435 }
6436
6437 return false;
6438}
6439
6440/**
6441 * Matches the packet addresses against Multicast Table Array.
6442 *
6443 * @remarks This is imperfect match since it matches not exact address but
6444 * a subset of addresses.
6445 *
6446 * @returns true if address matches.
6447 * @param pThis Pointer to the state structure.
6448 * @param pvBuf The ethernet packet.
6449 * @param cb Number of bytes available in the packet.
6450 * @thread EMT
6451 */
6452static bool e1kImperfectMatch(PE1KSTATE pThis, const void *pvBuf)
6453{
6454 /* Get bits 32..47 of destination address */
6455 uint16_t u16Bit = ((uint16_t*)pvBuf)[2];
6456
6457 unsigned offset = GET_BITS(RCTL, MO);
6458 /*
6459 * offset means:
6460 * 00b = bits 36..47
6461 * 01b = bits 35..46
6462 * 10b = bits 34..45
6463 * 11b = bits 32..43
6464 */
6465 if (offset < 3)
6466 u16Bit = u16Bit >> (4 - offset);
6467 return ASMBitTest(pThis->auMTA, u16Bit & 0xFFF);
6468}
6469
6470/**
6471 * Determines if the packet is to be delivered to upper layer.
6472 *
6473 * The following filters supported:
6474 * - Exact Unicast/Multicast
6475 * - Promiscuous Unicast/Multicast
6476 * - Multicast
6477 * - VLAN
6478 *
6479 * @returns true if packet is intended for this node.
6480 * @param pThis Pointer to the state structure.
6481 * @param pvBuf The ethernet packet.
6482 * @param cb Number of bytes available in the packet.
6483 * @param pStatus Bit field to store status bits.
6484 * @thread EMT
6485 */
6486static bool e1kAddressFilter(PE1KSTATE pThis, const void *pvBuf, size_t cb, E1KRXDST *pStatus)
6487{
6488 Assert(cb > 14);
6489 /* Assume that we fail to pass exact filter. */
6490 pStatus->fPIF = false;
6491 pStatus->fVP = false;
6492 /* Discard oversized packets */
6493 if (cb > E1K_MAX_RX_PKT_SIZE)
6494 {
6495 E1kLog(("%s ERROR: Incoming packet is too big, cb=%d > max=%d\n",
6496 pThis->szPrf, cb, E1K_MAX_RX_PKT_SIZE));
6497 E1K_INC_CNT32(ROC);
6498 return false;
6499 }
6500 else if (!(RCTL & RCTL_LPE) && cb > 1522)
6501 {
6502 /* When long packet reception is disabled packets over 1522 are discarded */
6503 E1kLog(("%s Discarding incoming packet (LPE=0), cb=%d\n",
6504 pThis->szPrf, cb));
6505 E1K_INC_CNT32(ROC);
6506 return false;
6507 }
6508
6509 uint16_t *u16Ptr = (uint16_t*)pvBuf;
6510 /* Compare TPID with VLAN Ether Type */
6511 if (RT_BE2H_U16(u16Ptr[6]) == VET)
6512 {
6513 pStatus->fVP = true;
6514 /* Is VLAN filtering enabled? */
6515 if (RCTL & RCTL_VFE)
6516 {
6517 /* It is 802.1q packet indeed, let's filter by VID */
6518 if (RCTL & RCTL_CFIEN)
6519 {
6520 E1kLog3(("%s VLAN filter: VLAN=%d CFI=%d RCTL_CFI=%d\n", pThis->szPrf,
6521 E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7])),
6522 E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])),
6523 !!(RCTL & RCTL_CFI)));
6524 if (E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])) != !!(RCTL & RCTL_CFI))
6525 {
6526 E1kLog2(("%s Packet filter: CFIs do not match in packet and RCTL (%d!=%d)\n",
6527 pThis->szPrf, E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])), !!(RCTL & RCTL_CFI)));
6528 return false;
6529 }
6530 }
6531 else
6532 E1kLog3(("%s VLAN filter: VLAN=%d\n", pThis->szPrf,
6533 E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))));
6534 if (!ASMBitTest(pThis->auVFTA, E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))))
6535 {
6536 E1kLog2(("%s Packet filter: no VLAN match (id=%d)\n",
6537 pThis->szPrf, E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))));
6538 return false;
6539 }
6540 }
6541 }
6542 /* Broadcast filtering */
6543 if (e1kIsBroadcast(pvBuf) && (RCTL & RCTL_BAM))
6544 return true;
6545 E1kLog2(("%s Packet filter: not a broadcast\n", pThis->szPrf));
6546 if (e1kIsMulticast(pvBuf))
6547 {
6548 /* Is multicast promiscuous enabled? */
6549 if (RCTL & RCTL_MPE)
6550 return true;
6551 E1kLog2(("%s Packet filter: no promiscuous multicast\n", pThis->szPrf));
6552 /* Try perfect matches first */
6553 if (e1kPerfectMatch(pThis, pvBuf))
6554 {
6555 pStatus->fPIF = true;
6556 return true;
6557 }
6558 E1kLog2(("%s Packet filter: no perfect match\n", pThis->szPrf));
6559 if (e1kImperfectMatch(pThis, pvBuf))
6560 return true;
6561 E1kLog2(("%s Packet filter: no imperfect match\n", pThis->szPrf));
6562 }
6563 else {
6564 /* Is unicast promiscuous enabled? */
6565 if (RCTL & RCTL_UPE)
6566 return true;
6567 E1kLog2(("%s Packet filter: no promiscuous unicast\n", pThis->szPrf));
6568 if (e1kPerfectMatch(pThis, pvBuf))
6569 {
6570 pStatus->fPIF = true;
6571 return true;
6572 }
6573 E1kLog2(("%s Packet filter: no perfect match\n", pThis->szPrf));
6574 }
6575 E1kLog2(("%s Packet filter: packet discarded\n", pThis->szPrf));
6576 return false;
6577}
6578
6579/**
6580 * @interface_method_impl{PDMINETWORKDOWN,pfnReceive}
6581 */
6582static DECLCALLBACK(int) e1kR3NetworkDown_Receive(PPDMINETWORKDOWN pInterface, const void *pvBuf, size_t cb)
6583{
6584 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkDown);
6585 int rc = VINF_SUCCESS;
6586
6587 /*
6588 * Drop packets if the VM is not running yet/anymore.
6589 */
6590 VMSTATE enmVMState = PDMDevHlpVMState(STATE_TO_DEVINS(pThis));
6591 if ( enmVMState != VMSTATE_RUNNING
6592 && enmVMState != VMSTATE_RUNNING_LS)
6593 {
6594 E1kLog(("%s Dropping incoming packet as VM is not running.\n", pThis->szPrf));
6595 return VINF_SUCCESS;
6596 }
6597
6598 /* Discard incoming packets in locked state */
6599 if (!(RCTL & RCTL_EN) || pThis->fLocked || !(STATUS & STATUS_LU))
6600 {
6601 E1kLog(("%s Dropping incoming packet as receive operation is disabled.\n", pThis->szPrf));
6602 return VINF_SUCCESS;
6603 }
6604
6605 STAM_PROFILE_ADV_START(&pThis->StatReceive, a);
6606
6607 //if (!e1kCsEnter(pThis, RT_SRC_POS))
6608 // return VERR_PERMISSION_DENIED;
6609
6610 e1kPacketDump(pThis, (const uint8_t*)pvBuf, cb, "<-- Incoming");
6611
6612 /* Update stats */
6613 if (RT_LIKELY(e1kCsEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
6614 {
6615 E1K_INC_CNT32(TPR);
6616 E1K_ADD_CNT64(TORL, TORH, cb < 64? 64 : cb);
6617 e1kCsLeave(pThis);
6618 }
6619 STAM_PROFILE_ADV_START(&pThis->StatReceiveFilter, a);
6620 E1KRXDST status;
6621 RT_ZERO(status);
6622 bool fPassed = e1kAddressFilter(pThis, pvBuf, cb, &status);
6623 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveFilter, a);
6624 if (fPassed)
6625 {
6626 rc = e1kHandleRxPacket(pThis, pvBuf, cb, status);
6627 }
6628 //e1kCsLeave(pThis);
6629 STAM_PROFILE_ADV_STOP(&pThis->StatReceive, a);
6630
6631 return rc;
6632}
6633
6634
6635/* -=-=-=-=- PDMILEDPORTS -=-=-=-=- */
6636
6637/**
6638 * @interface_method_impl{PDMILEDPORTS,pfnQueryStatusLed}
6639 */
6640static DECLCALLBACK(int) e1kR3QueryStatusLed(PPDMILEDPORTS pInterface, unsigned iLUN, PPDMLED *ppLed)
6641{
6642 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, ILeds);
6643 int rc = VERR_PDM_LUN_NOT_FOUND;
6644
6645 if (iLUN == 0)
6646 {
6647 *ppLed = &pThis->led;
6648 rc = VINF_SUCCESS;
6649 }
6650 return rc;
6651}
6652
6653
6654/* -=-=-=-=- PDMINETWORKCONFIG -=-=-=-=- */
6655
6656/**
6657 * @interface_method_impl{PDMINETWORKCONFIG,pfnGetMac}
6658 */
6659static DECLCALLBACK(int) e1kR3GetMac(PPDMINETWORKCONFIG pInterface, PRTMAC pMac)
6660{
6661 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkConfig);
6662 pThis->eeprom.getMac(pMac);
6663 return VINF_SUCCESS;
6664}
6665
6666/**
6667 * @interface_method_impl{PDMINETWORKCONFIG,pfnGetLinkState}
6668 */
6669static DECLCALLBACK(PDMNETWORKLINKSTATE) e1kR3GetLinkState(PPDMINETWORKCONFIG pInterface)
6670{
6671 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkConfig);
6672 if (STATUS & STATUS_LU)
6673 return PDMNETWORKLINKSTATE_UP;
6674 return PDMNETWORKLINKSTATE_DOWN;
6675}
6676
6677/**
6678 * @interface_method_impl{PDMINETWORKCONFIG,pfnSetLinkState}
6679 */
6680static DECLCALLBACK(int) e1kR3SetLinkState(PPDMINETWORKCONFIG pInterface, PDMNETWORKLINKSTATE enmState)
6681{
6682 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkConfig);
6683
6684 E1kLog(("%s e1kR3SetLinkState: enmState=%d\n", pThis->szPrf, enmState));
6685 switch (enmState)
6686 {
6687 case PDMNETWORKLINKSTATE_UP:
6688 pThis->fCableConnected = true;
6689 /* If link was down, bring it up after a while. */
6690 if (!(STATUS & STATUS_LU))
6691 e1kBringLinkUpDelayed(pThis);
6692 break;
6693 case PDMNETWORKLINKSTATE_DOWN:
6694 pThis->fCableConnected = false;
6695 /* Always set the phy link state to down, regardless of the STATUS_LU bit.
6696 * We might have to set the link state before the driver initializes us. */
6697 Phy::setLinkStatus(&pThis->phy, false);
6698 /* If link was up, bring it down. */
6699 if (STATUS & STATUS_LU)
6700 e1kR3LinkDown(pThis);
6701 break;
6702 case PDMNETWORKLINKSTATE_DOWN_RESUME:
6703 /*
6704 * There is not much sense in bringing down the link if it has not come up yet.
6705 * If it is up though, we bring it down temporarely, then bring it up again.
6706 */
6707 if (STATUS & STATUS_LU)
6708 e1kR3LinkDownTemp(pThis);
6709 break;
6710 default:
6711 ;
6712 }
6713 return VINF_SUCCESS;
6714}
6715
6716
6717/* -=-=-=-=- PDMIBASE -=-=-=-=- */
6718
6719/**
6720 * @interface_method_impl{PDMIBASE,pfnQueryInterface}
6721 */
6722static DECLCALLBACK(void *) e1kR3QueryInterface(struct PDMIBASE *pInterface, const char *pszIID)
6723{
6724 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, IBase);
6725 Assert(&pThis->IBase == pInterface);
6726
6727 PDMIBASE_RETURN_INTERFACE(pszIID, PDMIBASE, &pThis->IBase);
6728 PDMIBASE_RETURN_INTERFACE(pszIID, PDMINETWORKDOWN, &pThis->INetworkDown);
6729 PDMIBASE_RETURN_INTERFACE(pszIID, PDMINETWORKCONFIG, &pThis->INetworkConfig);
6730 PDMIBASE_RETURN_INTERFACE(pszIID, PDMILEDPORTS, &pThis->ILeds);
6731 return NULL;
6732}
6733
6734
6735/* -=-=-=-=- Saved State -=-=-=-=- */
6736
6737/**
6738 * Saves the configuration.
6739 *
6740 * @param pThis The E1K state.
6741 * @param pSSM The handle to the saved state.
6742 */
6743static void e1kSaveConfig(PE1KSTATE pThis, PSSMHANDLE pSSM)
6744{
6745 SSMR3PutMem(pSSM, &pThis->macConfigured, sizeof(pThis->macConfigured));
6746 SSMR3PutU32(pSSM, pThis->eChip);
6747}
6748
6749/**
6750 * @callback_method_impl{FNSSMDEVLIVEEXEC,Save basic configuration.}
6751 */
6752static DECLCALLBACK(int) e1kLiveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uPass)
6753{
6754 RT_NOREF(uPass);
6755 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6756 e1kSaveConfig(pThis, pSSM);
6757 return VINF_SSM_DONT_CALL_AGAIN;
6758}
6759
6760/**
6761 * @callback_method_impl{FNSSMDEVSAVEPREP,Synchronize.}
6762 */
6763static DECLCALLBACK(int) e1kSavePrep(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6764{
6765 RT_NOREF(pSSM);
6766 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6767
6768 int rc = e1kCsEnter(pThis, VERR_SEM_BUSY);
6769 if (RT_UNLIKELY(rc != VINF_SUCCESS))
6770 return rc;
6771 e1kCsLeave(pThis);
6772 return VINF_SUCCESS;
6773#if 0
6774 /* 1) Prevent all threads from modifying the state and memory */
6775 //pThis->fLocked = true;
6776 /* 2) Cancel all timers */
6777#ifdef E1K_TX_DELAY
6778 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTXDTimer));
6779#endif /* E1K_TX_DELAY */
6780//#ifdef E1K_USE_TX_TIMERS
6781 if (pThis->fTidEnabled)
6782 {
6783 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTIDTimer));
6784#ifndef E1K_NO_TAD
6785 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTADTimer));
6786#endif /* E1K_NO_TAD */
6787 }
6788//#endif /* E1K_USE_TX_TIMERS */
6789#ifdef E1K_USE_RX_TIMERS
6790 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRIDTimer));
6791 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRADTimer));
6792#endif /* E1K_USE_RX_TIMERS */
6793 e1kCancelTimer(pThis, pThis->CTX_SUFF(pIntTimer));
6794 /* 3) Did I forget anything? */
6795 E1kLog(("%s Locked\n", pThis->szPrf));
6796 return VINF_SUCCESS;
6797#endif
6798}
6799
6800/**
6801 * @callback_method_impl{FNSSMDEVSAVEEXEC}
6802 */
6803static DECLCALLBACK(int) e1kSaveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6804{
6805 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6806
6807 e1kSaveConfig(pThis, pSSM);
6808 pThis->eeprom.save(pSSM);
6809 e1kDumpState(pThis);
6810 SSMR3PutMem(pSSM, pThis->auRegs, sizeof(pThis->auRegs));
6811 SSMR3PutBool(pSSM, pThis->fIntRaised);
6812 Phy::saveState(pSSM, &pThis->phy);
6813 SSMR3PutU32(pSSM, pThis->uSelectedReg);
6814 SSMR3PutMem(pSSM, pThis->auMTA, sizeof(pThis->auMTA));
6815 SSMR3PutMem(pSSM, &pThis->aRecAddr, sizeof(pThis->aRecAddr));
6816 SSMR3PutMem(pSSM, pThis->auVFTA, sizeof(pThis->auVFTA));
6817 SSMR3PutU64(pSSM, pThis->u64AckedAt);
6818 SSMR3PutU16(pSSM, pThis->u16RxBSize);
6819 //SSMR3PutBool(pSSM, pThis->fDelayInts);
6820 //SSMR3PutBool(pSSM, pThis->fIntMaskUsed);
6821 SSMR3PutU16(pSSM, pThis->u16TxPktLen);
6822/** @todo State wrt to the TSE buffer is incomplete, so little point in
6823 * saving this actually. */
6824 SSMR3PutMem(pSSM, pThis->aTxPacketFallback, pThis->u16TxPktLen);
6825 SSMR3PutBool(pSSM, pThis->fIPcsum);
6826 SSMR3PutBool(pSSM, pThis->fTCPcsum);
6827 SSMR3PutMem(pSSM, &pThis->contextTSE, sizeof(pThis->contextTSE));
6828 SSMR3PutMem(pSSM, &pThis->contextNormal, sizeof(pThis->contextNormal));
6829 SSMR3PutBool(pSSM, pThis->fVTag);
6830 SSMR3PutU16(pSSM, pThis->u16VTagTCI);
6831#ifdef E1K_WITH_TXD_CACHE
6832#if 0
6833 SSMR3PutU8(pSSM, pThis->nTxDFetched);
6834 SSMR3PutMem(pSSM, pThis->aTxDescriptors,
6835 pThis->nTxDFetched * sizeof(pThis->aTxDescriptors[0]));
6836#else
6837 /*
6838 * There is no point in storing TX descriptor cache entries as we can simply
6839 * fetch them again. Moreover, normally the cache is always empty when we
6840 * save the state. Store zero entries for compatibility.
6841 */
6842 SSMR3PutU8(pSSM, 0);
6843#endif
6844#endif /* E1K_WITH_TXD_CACHE */
6845/** @todo GSO requires some more state here. */
6846 E1kLog(("%s State has been saved\n", pThis->szPrf));
6847 return VINF_SUCCESS;
6848}
6849
6850#if 0
6851/**
6852 * @callback_method_impl{FNSSMDEVSAVEDONE}
6853 */
6854static DECLCALLBACK(int) e1kSaveDone(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6855{
6856 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6857
6858 /* If VM is being powered off unlocking will result in assertions in PGM */
6859 if (PDMDevHlpGetVM(pDevIns)->enmVMState == VMSTATE_RUNNING)
6860 pThis->fLocked = false;
6861 else
6862 E1kLog(("%s VM is not running -- remain locked\n", pThis->szPrf));
6863 E1kLog(("%s Unlocked\n", pThis->szPrf));
6864 return VINF_SUCCESS;
6865}
6866#endif
6867
6868/**
6869 * @callback_method_impl{FNSSMDEVLOADPREP,Synchronize.}
6870 */
6871static DECLCALLBACK(int) e1kLoadPrep(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6872{
6873 RT_NOREF(pSSM);
6874 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6875
6876 int rc = e1kCsEnter(pThis, VERR_SEM_BUSY);
6877 if (RT_UNLIKELY(rc != VINF_SUCCESS))
6878 return rc;
6879 e1kCsLeave(pThis);
6880 return VINF_SUCCESS;
6881}
6882
6883/**
6884 * @callback_method_impl{FNSSMDEVLOADEXEC}
6885 */
6886static DECLCALLBACK(int) e1kLoadExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
6887{
6888 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6889 int rc;
6890
6891 if ( uVersion != E1K_SAVEDSTATE_VERSION
6892#ifdef E1K_WITH_TXD_CACHE
6893 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG
6894#endif /* E1K_WITH_TXD_CACHE */
6895 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_41
6896 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_30)
6897 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
6898
6899 if ( uVersion > E1K_SAVEDSTATE_VERSION_VBOX_30
6900 || uPass != SSM_PASS_FINAL)
6901 {
6902 /* config checks */
6903 RTMAC macConfigured;
6904 rc = SSMR3GetMem(pSSM, &macConfigured, sizeof(macConfigured));
6905 AssertRCReturn(rc, rc);
6906 if ( memcmp(&macConfigured, &pThis->macConfigured, sizeof(macConfigured))
6907 && (uPass == 0 || !PDMDevHlpVMTeleportedAndNotFullyResumedYet(pDevIns)) )
6908 LogRel(("%s: The mac address differs: config=%RTmac saved=%RTmac\n", pThis->szPrf, &pThis->macConfigured, &macConfigured));
6909
6910 E1KCHIP eChip;
6911 rc = SSMR3GetU32(pSSM, &eChip);
6912 AssertRCReturn(rc, rc);
6913 if (eChip != pThis->eChip)
6914 return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("The chip type differs: config=%u saved=%u"), pThis->eChip, eChip);
6915 }
6916
6917 if (uPass == SSM_PASS_FINAL)
6918 {
6919 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_30)
6920 {
6921 rc = pThis->eeprom.load(pSSM);
6922 AssertRCReturn(rc, rc);
6923 }
6924 /* the state */
6925 SSMR3GetMem(pSSM, &pThis->auRegs, sizeof(pThis->auRegs));
6926 SSMR3GetBool(pSSM, &pThis->fIntRaised);
6927 /** @todo PHY could be made a separate device with its own versioning */
6928 Phy::loadState(pSSM, &pThis->phy);
6929 SSMR3GetU32(pSSM, &pThis->uSelectedReg);
6930 SSMR3GetMem(pSSM, &pThis->auMTA, sizeof(pThis->auMTA));
6931 SSMR3GetMem(pSSM, &pThis->aRecAddr, sizeof(pThis->aRecAddr));
6932 SSMR3GetMem(pSSM, &pThis->auVFTA, sizeof(pThis->auVFTA));
6933 SSMR3GetU64(pSSM, &pThis->u64AckedAt);
6934 SSMR3GetU16(pSSM, &pThis->u16RxBSize);
6935 //SSMR3GetBool(pSSM, pThis->fDelayInts);
6936 //SSMR3GetBool(pSSM, pThis->fIntMaskUsed);
6937 SSMR3GetU16(pSSM, &pThis->u16TxPktLen);
6938 SSMR3GetMem(pSSM, &pThis->aTxPacketFallback[0], pThis->u16TxPktLen);
6939 SSMR3GetBool(pSSM, &pThis->fIPcsum);
6940 SSMR3GetBool(pSSM, &pThis->fTCPcsum);
6941 SSMR3GetMem(pSSM, &pThis->contextTSE, sizeof(pThis->contextTSE));
6942 rc = SSMR3GetMem(pSSM, &pThis->contextNormal, sizeof(pThis->contextNormal));
6943 AssertRCReturn(rc, rc);
6944 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_41)
6945 {
6946 SSMR3GetBool(pSSM, &pThis->fVTag);
6947 rc = SSMR3GetU16(pSSM, &pThis->u16VTagTCI);
6948 AssertRCReturn(rc, rc);
6949 }
6950 else
6951 {
6952 pThis->fVTag = false;
6953 pThis->u16VTagTCI = 0;
6954 }
6955#ifdef E1K_WITH_TXD_CACHE
6956 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG)
6957 {
6958 rc = SSMR3GetU8(pSSM, &pThis->nTxDFetched);
6959 AssertRCReturn(rc, rc);
6960 if (pThis->nTxDFetched)
6961 SSMR3GetMem(pSSM, pThis->aTxDescriptors,
6962 pThis->nTxDFetched * sizeof(pThis->aTxDescriptors[0]));
6963 }
6964 else
6965 pThis->nTxDFetched = 0;
6966 /*
6967 * @todo: Perhaps we should not store TXD cache as the entries can be
6968 * simply fetched again from guest's memory. Or can't they?
6969 */
6970#endif /* E1K_WITH_TXD_CACHE */
6971#ifdef E1K_WITH_RXD_CACHE
6972 /*
6973 * There is no point in storing the RX descriptor cache in the saved
6974 * state, we just need to make sure it is empty.
6975 */
6976 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
6977#endif /* E1K_WITH_RXD_CACHE */
6978 /* derived state */
6979 e1kSetupGsoCtx(&pThis->GsoCtx, &pThis->contextTSE);
6980
6981 E1kLog(("%s State has been restored\n", pThis->szPrf));
6982 e1kDumpState(pThis);
6983 }
6984 return VINF_SUCCESS;
6985}
6986
6987/**
6988 * @callback_method_impl{FNSSMDEVLOADDONE, Link status adjustments after loading.}
6989 */
6990static DECLCALLBACK(int) e1kLoadDone(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6991{
6992 RT_NOREF(pSSM);
6993 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6994
6995 /* Update promiscuous mode */
6996 if (pThis->pDrvR3)
6997 pThis->pDrvR3->pfnSetPromiscuousMode(pThis->pDrvR3,
6998 !!(RCTL & (RCTL_UPE | RCTL_MPE)));
6999
7000 /*
7001 * Force the link down here, since PDMNETWORKLINKSTATE_DOWN_RESUME is never
7002 * passed to us. We go through all this stuff if the link was up and we
7003 * wasn't teleported.
7004 */
7005 if ( (STATUS & STATUS_LU)
7006 && !PDMDevHlpVMTeleportedAndNotFullyResumedYet(pDevIns)
7007 && pThis->cMsLinkUpDelay)
7008 {
7009 e1kR3LinkDownTemp(pThis);
7010 }
7011 return VINF_SUCCESS;
7012}
7013
7014
7015
7016/* -=-=-=-=- Debug Info + Log Types -=-=-=-=- */
7017
7018/**
7019 * @callback_method_impl{FNRTSTRFORMATTYPE}
7020 */
7021static DECLCALLBACK(size_t) e1kFmtRxDesc(PFNRTSTROUTPUT pfnOutput,
7022 void *pvArgOutput,
7023 const char *pszType,
7024 void const *pvValue,
7025 int cchWidth,
7026 int cchPrecision,
7027 unsigned fFlags,
7028 void *pvUser)
7029{
7030 RT_NOREF(cchWidth, cchPrecision, fFlags, pvUser);
7031 AssertReturn(strcmp(pszType, "e1krxd") == 0, 0);
7032 E1KRXDESC* pDesc = (E1KRXDESC*)pvValue;
7033 if (!pDesc)
7034 return RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "NULL_RXD");
7035
7036 size_t cbPrintf = 0;
7037 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Address=%16LX Length=%04X Csum=%04X\n",
7038 pDesc->u64BufAddr, pDesc->u16Length, pDesc->u16Checksum);
7039 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, " STA: %s %s %s %s %s %s %s ERR: %s %s %s %s SPECIAL: %s VLAN=%03x PRI=%x",
7040 pDesc->status.fPIF ? "PIF" : "pif",
7041 pDesc->status.fIPCS ? "IPCS" : "ipcs",
7042 pDesc->status.fTCPCS ? "TCPCS" : "tcpcs",
7043 pDesc->status.fVP ? "VP" : "vp",
7044 pDesc->status.fIXSM ? "IXSM" : "ixsm",
7045 pDesc->status.fEOP ? "EOP" : "eop",
7046 pDesc->status.fDD ? "DD" : "dd",
7047 pDesc->status.fRXE ? "RXE" : "rxe",
7048 pDesc->status.fIPE ? "IPE" : "ipe",
7049 pDesc->status.fTCPE ? "TCPE" : "tcpe",
7050 pDesc->status.fCE ? "CE" : "ce",
7051 E1K_SPEC_CFI(pDesc->status.u16Special) ? "CFI" :"cfi",
7052 E1K_SPEC_VLAN(pDesc->status.u16Special),
7053 E1K_SPEC_PRI(pDesc->status.u16Special));
7054 return cbPrintf;
7055}
7056
7057/**
7058 * @callback_method_impl{FNRTSTRFORMATTYPE}
7059 */
7060static DECLCALLBACK(size_t) e1kFmtTxDesc(PFNRTSTROUTPUT pfnOutput,
7061 void *pvArgOutput,
7062 const char *pszType,
7063 void const *pvValue,
7064 int cchWidth,
7065 int cchPrecision,
7066 unsigned fFlags,
7067 void *pvUser)
7068{
7069 RT_NOREF(cchWidth, cchPrecision, fFlags, pvUser);
7070 AssertReturn(strcmp(pszType, "e1ktxd") == 0, 0);
7071 E1KTXDESC *pDesc = (E1KTXDESC*)pvValue;
7072 if (!pDesc)
7073 return RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "NULL_TXD");
7074
7075 size_t cbPrintf = 0;
7076 switch (e1kGetDescType(pDesc))
7077 {
7078 case E1K_DTYP_CONTEXT:
7079 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Type=Context\n"
7080 " IPCSS=%02X IPCSO=%02X IPCSE=%04X TUCSS=%02X TUCSO=%02X TUCSE=%04X\n"
7081 " TUCMD:%s%s%s %s %s PAYLEN=%04x HDRLEN=%04x MSS=%04x STA: %s",
7082 pDesc->context.ip.u8CSS, pDesc->context.ip.u8CSO, pDesc->context.ip.u16CSE,
7083 pDesc->context.tu.u8CSS, pDesc->context.tu.u8CSO, pDesc->context.tu.u16CSE,
7084 pDesc->context.dw2.fIDE ? " IDE":"",
7085 pDesc->context.dw2.fRS ? " RS" :"",
7086 pDesc->context.dw2.fTSE ? " TSE":"",
7087 pDesc->context.dw2.fIP ? "IPv4":"IPv6",
7088 pDesc->context.dw2.fTCP ? "TCP":"UDP",
7089 pDesc->context.dw2.u20PAYLEN,
7090 pDesc->context.dw3.u8HDRLEN,
7091 pDesc->context.dw3.u16MSS,
7092 pDesc->context.dw3.fDD?"DD":"");
7093 break;
7094 case E1K_DTYP_DATA:
7095 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Type=Data Address=%16LX DTALEN=%05X\n"
7096 " DCMD:%s%s%s%s%s%s%s STA:%s%s%s POPTS:%s%s SPECIAL:%s VLAN=%03x PRI=%x",
7097 pDesc->data.u64BufAddr,
7098 pDesc->data.cmd.u20DTALEN,
7099 pDesc->data.cmd.fIDE ? " IDE" :"",
7100 pDesc->data.cmd.fVLE ? " VLE" :"",
7101 pDesc->data.cmd.fRPS ? " RPS" :"",
7102 pDesc->data.cmd.fRS ? " RS" :"",
7103 pDesc->data.cmd.fTSE ? " TSE" :"",
7104 pDesc->data.cmd.fIFCS? " IFCS":"",
7105 pDesc->data.cmd.fEOP ? " EOP" :"",
7106 pDesc->data.dw3.fDD ? " DD" :"",
7107 pDesc->data.dw3.fEC ? " EC" :"",
7108 pDesc->data.dw3.fLC ? " LC" :"",
7109 pDesc->data.dw3.fTXSM? " TXSM":"",
7110 pDesc->data.dw3.fIXSM? " IXSM":"",
7111 E1K_SPEC_CFI(pDesc->data.dw3.u16Special) ? "CFI" :"cfi",
7112 E1K_SPEC_VLAN(pDesc->data.dw3.u16Special),
7113 E1K_SPEC_PRI(pDesc->data.dw3.u16Special));
7114 break;
7115 case E1K_DTYP_LEGACY:
7116 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Type=Legacy Address=%16LX DTALEN=%05X\n"
7117 " CMD:%s%s%s%s%s%s%s STA:%s%s%s CSO=%02x CSS=%02x SPECIAL:%s VLAN=%03x PRI=%x",
7118 pDesc->data.u64BufAddr,
7119 pDesc->legacy.cmd.u16Length,
7120 pDesc->legacy.cmd.fIDE ? " IDE" :"",
7121 pDesc->legacy.cmd.fVLE ? " VLE" :"",
7122 pDesc->legacy.cmd.fRPS ? " RPS" :"",
7123 pDesc->legacy.cmd.fRS ? " RS" :"",
7124 pDesc->legacy.cmd.fIC ? " IC" :"",
7125 pDesc->legacy.cmd.fIFCS? " IFCS":"",
7126 pDesc->legacy.cmd.fEOP ? " EOP" :"",
7127 pDesc->legacy.dw3.fDD ? " DD" :"",
7128 pDesc->legacy.dw3.fEC ? " EC" :"",
7129 pDesc->legacy.dw3.fLC ? " LC" :"",
7130 pDesc->legacy.cmd.u8CSO,
7131 pDesc->legacy.dw3.u8CSS,
7132 E1K_SPEC_CFI(pDesc->legacy.dw3.u16Special) ? "CFI" :"cfi",
7133 E1K_SPEC_VLAN(pDesc->legacy.dw3.u16Special),
7134 E1K_SPEC_PRI(pDesc->legacy.dw3.u16Special));
7135 break;
7136 default:
7137 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Invalid Transmit Descriptor");
7138 break;
7139 }
7140
7141 return cbPrintf;
7142}
7143
7144/** Initializes debug helpers (logging format types). */
7145static int e1kInitDebugHelpers(void)
7146{
7147 int rc = VINF_SUCCESS;
7148 static bool s_fHelpersRegistered = false;
7149 if (!s_fHelpersRegistered)
7150 {
7151 s_fHelpersRegistered = true;
7152 rc = RTStrFormatTypeRegister("e1krxd", e1kFmtRxDesc, NULL);
7153 AssertRCReturn(rc, rc);
7154 rc = RTStrFormatTypeRegister("e1ktxd", e1kFmtTxDesc, NULL);
7155 AssertRCReturn(rc, rc);
7156 }
7157 return rc;
7158}
7159
7160/**
7161 * Status info callback.
7162 *
7163 * @param pDevIns The device instance.
7164 * @param pHlp The output helpers.
7165 * @param pszArgs The arguments.
7166 */
7167static DECLCALLBACK(void) e1kInfo(PPDMDEVINS pDevIns, PCDBGFINFOHLP pHlp, const char *pszArgs)
7168{
7169 RT_NOREF(pszArgs);
7170 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7171 unsigned i;
7172 // bool fRcvRing = false;
7173 // bool fXmtRing = false;
7174
7175 /*
7176 * Parse args.
7177 if (pszArgs)
7178 {
7179 fRcvRing = strstr(pszArgs, "verbose") || strstr(pszArgs, "rcv");
7180 fXmtRing = strstr(pszArgs, "verbose") || strstr(pszArgs, "xmt");
7181 }
7182 */
7183
7184 /*
7185 * Show info.
7186 */
7187 pHlp->pfnPrintf(pHlp, "E1000 #%d: port=%RTiop mmio=%RGp mac-cfg=%RTmac %s%s%s\n",
7188 pDevIns->iInstance, pThis->IOPortBase, pThis->addrMMReg,
7189 &pThis->macConfigured, g_aChips[pThis->eChip].pcszName,
7190 pThis->fRCEnabled ? " GC" : "", pThis->fR0Enabled ? " R0" : "");
7191
7192 e1kCsEnter(pThis, VERR_INTERNAL_ERROR); /* Not sure why but PCNet does it */
7193
7194 for (i = 0; i < E1K_NUM_OF_32BIT_REGS; ++i)
7195 pHlp->pfnPrintf(pHlp, "%8.8s = %08x\n", g_aE1kRegMap[i].abbrev, pThis->auRegs[i]);
7196
7197 for (i = 0; i < RT_ELEMENTS(pThis->aRecAddr.array); i++)
7198 {
7199 E1KRAELEM* ra = pThis->aRecAddr.array + i;
7200 if (ra->ctl & RA_CTL_AV)
7201 {
7202 const char *pcszTmp;
7203 switch (ra->ctl & RA_CTL_AS)
7204 {
7205 case 0: pcszTmp = "DST"; break;
7206 case 1: pcszTmp = "SRC"; break;
7207 default: pcszTmp = "reserved";
7208 }
7209 pHlp->pfnPrintf(pHlp, "RA%02d: %s %RTmac\n", i, pcszTmp, ra->addr);
7210 }
7211 }
7212 unsigned cDescs = RDLEN / sizeof(E1KRXDESC);
7213 uint32_t rdh = RDH;
7214 pHlp->pfnPrintf(pHlp, "\n-- Receive Descriptors (%d total) --\n", cDescs);
7215 for (i = 0; i < cDescs; ++i)
7216 {
7217 E1KRXDESC desc;
7218 PDMDevHlpPhysRead(pDevIns, e1kDescAddr(RDBAH, RDBAL, i),
7219 &desc, sizeof(desc));
7220 if (i == rdh)
7221 pHlp->pfnPrintf(pHlp, ">>> ");
7222 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1krxd]\n", e1kDescAddr(RDBAH, RDBAL, i), &desc);
7223 }
7224#ifdef E1K_WITH_RXD_CACHE
7225 pHlp->pfnPrintf(pHlp, "\n-- Receive Descriptors in Cache (at %d (RDH %d)/ fetched %d / max %d) --\n",
7226 pThis->iRxDCurrent, RDH, pThis->nRxDFetched, E1K_RXD_CACHE_SIZE);
7227 if (rdh > pThis->iRxDCurrent)
7228 rdh -= pThis->iRxDCurrent;
7229 else
7230 rdh = cDescs + rdh - pThis->iRxDCurrent;
7231 for (i = 0; i < pThis->nRxDFetched; ++i)
7232 {
7233 if (i == pThis->iRxDCurrent)
7234 pHlp->pfnPrintf(pHlp, ">>> ");
7235 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1krxd]\n",
7236 e1kDescAddr(RDBAH, RDBAL, rdh++ % cDescs),
7237 &pThis->aRxDescriptors[i]);
7238 }
7239#endif /* E1K_WITH_RXD_CACHE */
7240
7241 cDescs = TDLEN / sizeof(E1KTXDESC);
7242 uint32_t tdh = TDH;
7243 pHlp->pfnPrintf(pHlp, "\n-- Transmit Descriptors (%d total) --\n", cDescs);
7244 for (i = 0; i < cDescs; ++i)
7245 {
7246 E1KTXDESC desc;
7247 PDMDevHlpPhysRead(pDevIns, e1kDescAddr(TDBAH, TDBAL, i),
7248 &desc, sizeof(desc));
7249 if (i == tdh)
7250 pHlp->pfnPrintf(pHlp, ">>> ");
7251 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1ktxd]\n", e1kDescAddr(TDBAH, TDBAL, i), &desc);
7252 }
7253#ifdef E1K_WITH_TXD_CACHE
7254 pHlp->pfnPrintf(pHlp, "\n-- Transmit Descriptors in Cache (at %d (TDH %d)/ fetched %d / max %d) --\n",
7255 pThis->iTxDCurrent, TDH, pThis->nTxDFetched, E1K_TXD_CACHE_SIZE);
7256 if (tdh > pThis->iTxDCurrent)
7257 tdh -= pThis->iTxDCurrent;
7258 else
7259 tdh = cDescs + tdh - pThis->iTxDCurrent;
7260 for (i = 0; i < pThis->nTxDFetched; ++i)
7261 {
7262 if (i == pThis->iTxDCurrent)
7263 pHlp->pfnPrintf(pHlp, ">>> ");
7264 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1ktxd]\n",
7265 e1kDescAddr(TDBAH, TDBAL, tdh++ % cDescs),
7266 &pThis->aTxDescriptors[i]);
7267 }
7268#endif /* E1K_WITH_TXD_CACHE */
7269
7270
7271#ifdef E1K_INT_STATS
7272 pHlp->pfnPrintf(pHlp, "Interrupt attempts: %d\n", pThis->uStatIntTry);
7273 pHlp->pfnPrintf(pHlp, "Interrupts raised : %d\n", pThis->uStatInt);
7274 pHlp->pfnPrintf(pHlp, "Interrupts lowered: %d\n", pThis->uStatIntLower);
7275 pHlp->pfnPrintf(pHlp, "ICR outside ISR : %d\n", pThis->uStatNoIntICR);
7276 pHlp->pfnPrintf(pHlp, "IMS raised ints : %d\n", pThis->uStatIntIMS);
7277 pHlp->pfnPrintf(pHlp, "Interrupts skipped: %d\n", pThis->uStatIntSkip);
7278 pHlp->pfnPrintf(pHlp, "Masked interrupts : %d\n", pThis->uStatIntMasked);
7279 pHlp->pfnPrintf(pHlp, "Early interrupts : %d\n", pThis->uStatIntEarly);
7280 pHlp->pfnPrintf(pHlp, "Late interrupts : %d\n", pThis->uStatIntLate);
7281 pHlp->pfnPrintf(pHlp, "Lost interrupts : %d\n", pThis->iStatIntLost);
7282 pHlp->pfnPrintf(pHlp, "Interrupts by RX : %d\n", pThis->uStatIntRx);
7283 pHlp->pfnPrintf(pHlp, "Interrupts by TX : %d\n", pThis->uStatIntTx);
7284 pHlp->pfnPrintf(pHlp, "Interrupts by ICS : %d\n", pThis->uStatIntICS);
7285 pHlp->pfnPrintf(pHlp, "Interrupts by RDTR: %d\n", pThis->uStatIntRDTR);
7286 pHlp->pfnPrintf(pHlp, "Interrupts by RDMT: %d\n", pThis->uStatIntRXDMT0);
7287 pHlp->pfnPrintf(pHlp, "Interrupts by TXQE: %d\n", pThis->uStatIntTXQE);
7288 pHlp->pfnPrintf(pHlp, "TX int delay asked: %d\n", pThis->uStatTxIDE);
7289 pHlp->pfnPrintf(pHlp, "TX delayed: %d\n", pThis->uStatTxDelayed);
7290 pHlp->pfnPrintf(pHlp, "TX delayed expired: %d\n", pThis->uStatTxDelayExp);
7291 pHlp->pfnPrintf(pHlp, "TX no report asked: %d\n", pThis->uStatTxNoRS);
7292 pHlp->pfnPrintf(pHlp, "TX abs timer expd : %d\n", pThis->uStatTAD);
7293 pHlp->pfnPrintf(pHlp, "TX int timer expd : %d\n", pThis->uStatTID);
7294 pHlp->pfnPrintf(pHlp, "RX abs timer expd : %d\n", pThis->uStatRAD);
7295 pHlp->pfnPrintf(pHlp, "RX int timer expd : %d\n", pThis->uStatRID);
7296 pHlp->pfnPrintf(pHlp, "TX CTX descriptors: %d\n", pThis->uStatDescCtx);
7297 pHlp->pfnPrintf(pHlp, "TX DAT descriptors: %d\n", pThis->uStatDescDat);
7298 pHlp->pfnPrintf(pHlp, "TX LEG descriptors: %d\n", pThis->uStatDescLeg);
7299 pHlp->pfnPrintf(pHlp, "Received frames : %d\n", pThis->uStatRxFrm);
7300 pHlp->pfnPrintf(pHlp, "Transmitted frames: %d\n", pThis->uStatTxFrm);
7301 pHlp->pfnPrintf(pHlp, "TX frames up to 1514: %d\n", pThis->uStatTx1514);
7302 pHlp->pfnPrintf(pHlp, "TX frames up to 2962: %d\n", pThis->uStatTx2962);
7303 pHlp->pfnPrintf(pHlp, "TX frames up to 4410: %d\n", pThis->uStatTx4410);
7304 pHlp->pfnPrintf(pHlp, "TX frames up to 5858: %d\n", pThis->uStatTx5858);
7305 pHlp->pfnPrintf(pHlp, "TX frames up to 7306: %d\n", pThis->uStatTx7306);
7306 pHlp->pfnPrintf(pHlp, "TX frames up to 8754: %d\n", pThis->uStatTx8754);
7307 pHlp->pfnPrintf(pHlp, "TX frames up to 16384: %d\n", pThis->uStatTx16384);
7308 pHlp->pfnPrintf(pHlp, "TX frames up to 32768: %d\n", pThis->uStatTx32768);
7309 pHlp->pfnPrintf(pHlp, "Larger TX frames : %d\n", pThis->uStatTxLarge);
7310#endif /* E1K_INT_STATS */
7311
7312 e1kCsLeave(pThis);
7313}
7314
7315
7316
7317/* -=-=-=-=- PDMDEVREG -=-=-=-=- */
7318
7319/**
7320 * Detach notification.
7321 *
7322 * One port on the network card has been disconnected from the network.
7323 *
7324 * @param pDevIns The device instance.
7325 * @param iLUN The logical unit which is being detached.
7326 * @param fFlags Flags, combination of the PDMDEVATT_FLAGS_* \#defines.
7327 */
7328static DECLCALLBACK(void) e1kR3Detach(PPDMDEVINS pDevIns, unsigned iLUN, uint32_t fFlags)
7329{
7330 RT_NOREF(fFlags);
7331 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7332 Log(("%s e1kR3Detach:\n", pThis->szPrf));
7333
7334 AssertLogRelReturnVoid(iLUN == 0);
7335
7336 PDMCritSectEnter(&pThis->cs, VERR_SEM_BUSY);
7337
7338 /** @todo r=pritesh still need to check if i missed
7339 * to clean something in this function
7340 */
7341
7342 /*
7343 * Zero some important members.
7344 */
7345 pThis->pDrvBase = NULL;
7346 pThis->pDrvR3 = NULL;
7347 pThis->pDrvR0 = NIL_RTR0PTR;
7348 pThis->pDrvRC = NIL_RTRCPTR;
7349
7350 PDMCritSectLeave(&pThis->cs);
7351}
7352
7353/**
7354 * Attach the Network attachment.
7355 *
7356 * One port on the network card has been connected to a network.
7357 *
7358 * @returns VBox status code.
7359 * @param pDevIns The device instance.
7360 * @param iLUN The logical unit which is being attached.
7361 * @param fFlags Flags, combination of the PDMDEVATT_FLAGS_* \#defines.
7362 *
7363 * @remarks This code path is not used during construction.
7364 */
7365static DECLCALLBACK(int) e1kR3Attach(PPDMDEVINS pDevIns, unsigned iLUN, uint32_t fFlags)
7366{
7367 RT_NOREF(fFlags);
7368 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7369 LogFlow(("%s e1kR3Attach:\n", pThis->szPrf));
7370
7371 AssertLogRelReturn(iLUN == 0, VERR_PDM_NO_SUCH_LUN);
7372
7373 PDMCritSectEnter(&pThis->cs, VERR_SEM_BUSY);
7374
7375 /*
7376 * Attach the driver.
7377 */
7378 int rc = PDMDevHlpDriverAttach(pDevIns, 0, &pThis->IBase, &pThis->pDrvBase, "Network Port");
7379 if (RT_SUCCESS(rc))
7380 {
7381 if (rc == VINF_NAT_DNS)
7382 {
7383#ifdef RT_OS_LINUX
7384 PDMDevHlpVMSetRuntimeError(pDevIns, 0 /*fFlags*/, "NoDNSforNAT",
7385 N_("A Domain Name Server (DNS) for NAT networking could not be determined. Please check your /etc/resolv.conf for <tt>nameserver</tt> entries. Either add one manually (<i>man resolv.conf</i>) or ensure that your host is correctly connected to an ISP. If you ignore this warning the guest will not be able to perform nameserver lookups and it will probably observe delays if trying so"));
7386#else
7387 PDMDevHlpVMSetRuntimeError(pDevIns, 0 /*fFlags*/, "NoDNSforNAT",
7388 N_("A Domain Name Server (DNS) for NAT networking could not be determined. Ensure that your host is correctly connected to an ISP. If you ignore this warning the guest will not be able to perform nameserver lookups and it will probably observe delays if trying so"));
7389#endif
7390 }
7391 pThis->pDrvR3 = PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMINETWORKUP);
7392 AssertMsgStmt(pThis->pDrvR3, ("Failed to obtain the PDMINETWORKUP interface!\n"),
7393 rc = VERR_PDM_MISSING_INTERFACE_BELOW);
7394 if (RT_SUCCESS(rc))
7395 {
7396 PPDMIBASER0 pBaseR0 = PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMIBASER0);
7397 pThis->pDrvR0 = pBaseR0 ? pBaseR0->pfnQueryInterface(pBaseR0, PDMINETWORKUP_IID) : NIL_RTR0PTR;
7398
7399 PPDMIBASERC pBaseRC = PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMIBASERC);
7400 pThis->pDrvRC = pBaseRC ? pBaseRC->pfnQueryInterface(pBaseRC, PDMINETWORKUP_IID) : NIL_RTR0PTR;
7401 }
7402 }
7403 else if ( rc == VERR_PDM_NO_ATTACHED_DRIVER
7404 || rc == VERR_PDM_CFG_MISSING_DRIVER_NAME)
7405 {
7406 /* This should never happen because this function is not called
7407 * if there is no driver to attach! */
7408 Log(("%s No attached driver!\n", pThis->szPrf));
7409 }
7410
7411 /*
7412 * Temporary set the link down if it was up so that the guest
7413 * will know that we have change the configuration of the
7414 * network card
7415 */
7416 if ((STATUS & STATUS_LU) && RT_SUCCESS(rc))
7417 e1kR3LinkDownTemp(pThis);
7418
7419 PDMCritSectLeave(&pThis->cs);
7420 return rc;
7421
7422}
7423
7424/**
7425 * @copydoc FNPDMDEVPOWEROFF
7426 */
7427static DECLCALLBACK(void) e1kR3PowerOff(PPDMDEVINS pDevIns)
7428{
7429 /* Poke thread waiting for buffer space. */
7430 e1kWakeupReceive(pDevIns);
7431}
7432
7433/**
7434 * @copydoc FNPDMDEVRESET
7435 */
7436static DECLCALLBACK(void) e1kR3Reset(PPDMDEVINS pDevIns)
7437{
7438 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7439#ifdef E1K_TX_DELAY
7440 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTXDTimer));
7441#endif /* E1K_TX_DELAY */
7442 e1kCancelTimer(pThis, pThis->CTX_SUFF(pIntTimer));
7443 e1kCancelTimer(pThis, pThis->CTX_SUFF(pLUTimer));
7444 e1kXmitFreeBuf(pThis);
7445 pThis->u16TxPktLen = 0;
7446 pThis->fIPcsum = false;
7447 pThis->fTCPcsum = false;
7448 pThis->fIntMaskUsed = false;
7449 pThis->fDelayInts = false;
7450 pThis->fLocked = false;
7451 pThis->u64AckedAt = 0;
7452 e1kHardReset(pThis);
7453}
7454
7455/**
7456 * @copydoc FNPDMDEVSUSPEND
7457 */
7458static DECLCALLBACK(void) e1kR3Suspend(PPDMDEVINS pDevIns)
7459{
7460 /* Poke thread waiting for buffer space. */
7461 e1kWakeupReceive(pDevIns);
7462}
7463
7464/**
7465 * Device relocation callback.
7466 *
7467 * When this callback is called the device instance data, and if the
7468 * device have a GC component, is being relocated, or/and the selectors
7469 * have been changed. The device must use the chance to perform the
7470 * necessary pointer relocations and data updates.
7471 *
7472 * Before the GC code is executed the first time, this function will be
7473 * called with a 0 delta so GC pointer calculations can be one in one place.
7474 *
7475 * @param pDevIns Pointer to the device instance.
7476 * @param offDelta The relocation delta relative to the old location.
7477 *
7478 * @remark A relocation CANNOT fail.
7479 */
7480static DECLCALLBACK(void) e1kR3Relocate(PPDMDEVINS pDevIns, RTGCINTPTR offDelta)
7481{
7482 RT_NOREF(offDelta);
7483 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7484 pThis->pDevInsRC = PDMDEVINS_2_RCPTR(pDevIns);
7485 pThis->pTxQueueRC = PDMQueueRCPtr(pThis->pTxQueueR3);
7486 pThis->pCanRxQueueRC = PDMQueueRCPtr(pThis->pCanRxQueueR3);
7487#ifdef E1K_USE_RX_TIMERS
7488 pThis->pRIDTimerRC = TMTimerRCPtr(pThis->pRIDTimerR3);
7489 pThis->pRADTimerRC = TMTimerRCPtr(pThis->pRADTimerR3);
7490#endif /* E1K_USE_RX_TIMERS */
7491//#ifdef E1K_USE_TX_TIMERS
7492 if (pThis->fTidEnabled)
7493 {
7494 pThis->pTIDTimerRC = TMTimerRCPtr(pThis->pTIDTimerR3);
7495# ifndef E1K_NO_TAD
7496 pThis->pTADTimerRC = TMTimerRCPtr(pThis->pTADTimerR3);
7497# endif /* E1K_NO_TAD */
7498 }
7499//#endif /* E1K_USE_TX_TIMERS */
7500#ifdef E1K_TX_DELAY
7501 pThis->pTXDTimerRC = TMTimerRCPtr(pThis->pTXDTimerR3);
7502#endif /* E1K_TX_DELAY */
7503 pThis->pIntTimerRC = TMTimerRCPtr(pThis->pIntTimerR3);
7504 pThis->pLUTimerRC = TMTimerRCPtr(pThis->pLUTimerR3);
7505}
7506
7507/**
7508 * Destruct a device instance.
7509 *
7510 * We need to free non-VM resources only.
7511 *
7512 * @returns VBox status code.
7513 * @param pDevIns The device instance data.
7514 * @thread EMT
7515 */
7516static DECLCALLBACK(int) e1kR3Destruct(PPDMDEVINS pDevIns)
7517{
7518 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7519 PDMDEV_CHECK_VERSIONS_RETURN_QUIET(pDevIns);
7520
7521 e1kDumpState(pThis);
7522 E1kLog(("%s Destroying instance\n", pThis->szPrf));
7523 if (PDMCritSectIsInitialized(&pThis->cs))
7524 {
7525 if (pThis->hEventMoreRxDescAvail != NIL_RTSEMEVENT)
7526 {
7527 RTSemEventSignal(pThis->hEventMoreRxDescAvail);
7528 RTSemEventDestroy(pThis->hEventMoreRxDescAvail);
7529 pThis->hEventMoreRxDescAvail = NIL_RTSEMEVENT;
7530 }
7531#ifdef E1K_WITH_TX_CS
7532 PDMR3CritSectDelete(&pThis->csTx);
7533#endif /* E1K_WITH_TX_CS */
7534 PDMR3CritSectDelete(&pThis->csRx);
7535 PDMR3CritSectDelete(&pThis->cs);
7536 }
7537 return VINF_SUCCESS;
7538}
7539
7540
7541/**
7542 * Set PCI configuration space registers.
7543 *
7544 * @param pci Reference to PCI device structure.
7545 * @thread EMT
7546 */
7547static DECLCALLBACK(void) e1kConfigurePciDev(PPDMPCIDEV pPciDev, E1KCHIP eChip)
7548{
7549 Assert(eChip < RT_ELEMENTS(g_aChips));
7550 /* Configure PCI Device, assume 32-bit mode ******************************/
7551 PCIDevSetVendorId(pPciDev, g_aChips[eChip].uPCIVendorId);
7552 PCIDevSetDeviceId(pPciDev, g_aChips[eChip].uPCIDeviceId);
7553 PCIDevSetWord( pPciDev, VBOX_PCI_SUBSYSTEM_VENDOR_ID, g_aChips[eChip].uPCISubsystemVendorId);
7554 PCIDevSetWord( pPciDev, VBOX_PCI_SUBSYSTEM_ID, g_aChips[eChip].uPCISubsystemId);
7555
7556 PCIDevSetWord( pPciDev, VBOX_PCI_COMMAND, 0x0000);
7557 /* DEVSEL Timing (medium device), 66 MHz Capable, New capabilities */
7558 PCIDevSetWord( pPciDev, VBOX_PCI_STATUS,
7559 VBOX_PCI_STATUS_DEVSEL_MEDIUM | VBOX_PCI_STATUS_CAP_LIST | VBOX_PCI_STATUS_66MHZ);
7560 /* Stepping A2 */
7561 PCIDevSetByte( pPciDev, VBOX_PCI_REVISION_ID, 0x02);
7562 /* Ethernet adapter */
7563 PCIDevSetByte( pPciDev, VBOX_PCI_CLASS_PROG, 0x00);
7564 PCIDevSetWord( pPciDev, VBOX_PCI_CLASS_DEVICE, 0x0200);
7565 /* normal single function Ethernet controller */
7566 PCIDevSetByte( pPciDev, VBOX_PCI_HEADER_TYPE, 0x00);
7567 /* Memory Register Base Address */
7568 PCIDevSetDWord(pPciDev, VBOX_PCI_BASE_ADDRESS_0, 0x00000000);
7569 /* Memory Flash Base Address */
7570 PCIDevSetDWord(pPciDev, VBOX_PCI_BASE_ADDRESS_1, 0x00000000);
7571 /* IO Register Base Address */
7572 PCIDevSetDWord(pPciDev, VBOX_PCI_BASE_ADDRESS_2, 0x00000001);
7573 /* Expansion ROM Base Address */
7574 PCIDevSetDWord(pPciDev, VBOX_PCI_ROM_ADDRESS, 0x00000000);
7575 /* Capabilities Pointer */
7576 PCIDevSetByte( pPciDev, VBOX_PCI_CAPABILITY_LIST, 0xDC);
7577 /* Interrupt Pin: INTA# */
7578 PCIDevSetByte( pPciDev, VBOX_PCI_INTERRUPT_PIN, 0x01);
7579 /* Max_Lat/Min_Gnt: very high priority and time slice */
7580 PCIDevSetByte( pPciDev, VBOX_PCI_MIN_GNT, 0xFF);
7581 PCIDevSetByte( pPciDev, VBOX_PCI_MAX_LAT, 0x00);
7582
7583 /* PCI Power Management Registers ****************************************/
7584 /* Capability ID: PCI Power Management Registers */
7585 PCIDevSetByte( pPciDev, 0xDC, VBOX_PCI_CAP_ID_PM);
7586 /* Next Item Pointer: PCI-X */
7587 PCIDevSetByte( pPciDev, 0xDC + 1, 0xE4);
7588 /* Power Management Capabilities: PM disabled, DSI */
7589 PCIDevSetWord( pPciDev, 0xDC + 2,
7590 0x0002 | VBOX_PCI_PM_CAP_DSI);
7591 /* Power Management Control / Status Register: PM disabled */
7592 PCIDevSetWord( pPciDev, 0xDC + 4, 0x0000);
7593 /* PMCSR_BSE Bridge Support Extensions: Not supported */
7594 PCIDevSetByte( pPciDev, 0xDC + 6, 0x00);
7595 /* Data Register: PM disabled, always 0 */
7596 PCIDevSetByte( pPciDev, 0xDC + 7, 0x00);
7597
7598 /* PCI-X Configuration Registers *****************************************/
7599 /* Capability ID: PCI-X Configuration Registers */
7600 PCIDevSetByte( pPciDev, 0xE4, VBOX_PCI_CAP_ID_PCIX);
7601#ifdef E1K_WITH_MSI
7602 PCIDevSetByte( pPciDev, 0xE4 + 1, 0x80);
7603#else
7604 /* Next Item Pointer: None (Message Signalled Interrupts are disabled) */
7605 PCIDevSetByte( pPciDev, 0xE4 + 1, 0x00);
7606#endif
7607 /* PCI-X Command: Enable Relaxed Ordering */
7608 PCIDevSetWord( pPciDev, 0xE4 + 2, VBOX_PCI_X_CMD_ERO);
7609 /* PCI-X Status: 32-bit, 66MHz*/
7610 /** @todo is this value really correct? fff8 doesn't look like actual PCI address */
7611 PCIDevSetDWord(pPciDev, 0xE4 + 4, 0x0040FFF8);
7612}
7613
7614/**
7615 * @interface_method_impl{PDMDEVREG,pfnConstruct}
7616 */
7617static DECLCALLBACK(int) e1kR3Construct(PPDMDEVINS pDevIns, int iInstance, PCFGMNODE pCfg)
7618{
7619 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7620 int rc;
7621 PDMDEV_CHECK_VERSIONS_RETURN(pDevIns);
7622
7623 /*
7624 * Initialize the instance data (state).
7625 * Note! Caller has initialized it to ZERO already.
7626 */
7627 RTStrPrintf(pThis->szPrf, sizeof(pThis->szPrf), "E1000#%d", iInstance);
7628 E1kLog(("%s Constructing new instance sizeof(E1KRXDESC)=%d\n", pThis->szPrf, sizeof(E1KRXDESC)));
7629 pThis->hEventMoreRxDescAvail = NIL_RTSEMEVENT;
7630 pThis->pDevInsR3 = pDevIns;
7631 pThis->pDevInsR0 = PDMDEVINS_2_R0PTR(pDevIns);
7632 pThis->pDevInsRC = PDMDEVINS_2_RCPTR(pDevIns);
7633 pThis->u16TxPktLen = 0;
7634 pThis->fIPcsum = false;
7635 pThis->fTCPcsum = false;
7636 pThis->fIntMaskUsed = false;
7637 pThis->fDelayInts = false;
7638 pThis->fLocked = false;
7639 pThis->u64AckedAt = 0;
7640 pThis->led.u32Magic = PDMLED_MAGIC;
7641 pThis->u32PktNo = 1;
7642
7643 /* Interfaces */
7644 pThis->IBase.pfnQueryInterface = e1kR3QueryInterface;
7645
7646 pThis->INetworkDown.pfnWaitReceiveAvail = e1kR3NetworkDown_WaitReceiveAvail;
7647 pThis->INetworkDown.pfnReceive = e1kR3NetworkDown_Receive;
7648 pThis->INetworkDown.pfnXmitPending = e1kR3NetworkDown_XmitPending;
7649
7650 pThis->ILeds.pfnQueryStatusLed = e1kR3QueryStatusLed;
7651
7652 pThis->INetworkConfig.pfnGetMac = e1kR3GetMac;
7653 pThis->INetworkConfig.pfnGetLinkState = e1kR3GetLinkState;
7654 pThis->INetworkConfig.pfnSetLinkState = e1kR3SetLinkState;
7655
7656 /*
7657 * Internal validations.
7658 */
7659 for (uint32_t iReg = 1; iReg < E1K_NUM_OF_BINARY_SEARCHABLE; iReg++)
7660 AssertLogRelMsgReturn( g_aE1kRegMap[iReg].offset > g_aE1kRegMap[iReg - 1].offset
7661 && g_aE1kRegMap[iReg].offset + g_aE1kRegMap[iReg].size
7662 >= g_aE1kRegMap[iReg - 1].offset + g_aE1kRegMap[iReg - 1].size,
7663 ("%s@%#xLB%#x vs %s@%#xLB%#x\n",
7664 g_aE1kRegMap[iReg].abbrev, g_aE1kRegMap[iReg].offset, g_aE1kRegMap[iReg].size,
7665 g_aE1kRegMap[iReg - 1].abbrev, g_aE1kRegMap[iReg - 1].offset, g_aE1kRegMap[iReg - 1].size),
7666 VERR_INTERNAL_ERROR_4);
7667
7668 /*
7669 * Validate configuration.
7670 */
7671 if (!CFGMR3AreValuesValid(pCfg, "MAC\0" "CableConnected\0" "AdapterType\0"
7672 "LineSpeed\0" "GCEnabled\0" "R0Enabled\0"
7673 "ItrEnabled\0" "ItrRxEnabled\0"
7674 "EthernetCRC\0" "GSOEnabled\0" "LinkUpDelay\0"))
7675 return PDMDEV_SET_ERROR(pDevIns, VERR_PDM_DEVINS_UNKNOWN_CFG_VALUES,
7676 N_("Invalid configuration for E1000 device"));
7677
7678 /** @todo LineSpeed unused! */
7679
7680 /* Get config params */
7681 rc = CFGMR3QueryBytes(pCfg, "MAC", pThis->macConfigured.au8, sizeof(pThis->macConfigured.au8));
7682 if (RT_FAILURE(rc))
7683 return PDMDEV_SET_ERROR(pDevIns, rc,
7684 N_("Configuration error: Failed to get MAC address"));
7685 rc = CFGMR3QueryBool(pCfg, "CableConnected", &pThis->fCableConnected);
7686 if (RT_FAILURE(rc))
7687 return PDMDEV_SET_ERROR(pDevIns, rc,
7688 N_("Configuration error: Failed to get the value of 'CableConnected'"));
7689 rc = CFGMR3QueryU32(pCfg, "AdapterType", (uint32_t*)&pThis->eChip);
7690 if (RT_FAILURE(rc))
7691 return PDMDEV_SET_ERROR(pDevIns, rc,
7692 N_("Configuration error: Failed to get the value of 'AdapterType'"));
7693 Assert(pThis->eChip <= E1K_CHIP_82545EM);
7694 rc = CFGMR3QueryBoolDef(pCfg, "GCEnabled", &pThis->fRCEnabled, true);
7695 if (RT_FAILURE(rc))
7696 return PDMDEV_SET_ERROR(pDevIns, rc,
7697 N_("Configuration error: Failed to get the value of 'GCEnabled'"));
7698
7699 rc = CFGMR3QueryBoolDef(pCfg, "R0Enabled", &pThis->fR0Enabled, true);
7700 if (RT_FAILURE(rc))
7701 return PDMDEV_SET_ERROR(pDevIns, rc,
7702 N_("Configuration error: Failed to get the value of 'R0Enabled'"));
7703
7704 rc = CFGMR3QueryBoolDef(pCfg, "EthernetCRC", &pThis->fEthernetCRC, true);
7705 if (RT_FAILURE(rc))
7706 return PDMDEV_SET_ERROR(pDevIns, rc,
7707 N_("Configuration error: Failed to get the value of 'EthernetCRC'"));
7708
7709 rc = CFGMR3QueryBoolDef(pCfg, "GSOEnabled", &pThis->fGSOEnabled, true);
7710 if (RT_FAILURE(rc))
7711 return PDMDEV_SET_ERROR(pDevIns, rc,
7712 N_("Configuration error: Failed to get the value of 'GSOEnabled'"));
7713
7714 rc = CFGMR3QueryBoolDef(pCfg, "ItrEnabled", &pThis->fItrEnabled, false);
7715 if (RT_FAILURE(rc))
7716 return PDMDEV_SET_ERROR(pDevIns, rc,
7717 N_("Configuration error: Failed to get the value of 'ItrEnabled'"));
7718
7719 rc = CFGMR3QueryBoolDef(pCfg, "ItrRxEnabled", &pThis->fItrRxEnabled, true);
7720 if (RT_FAILURE(rc))
7721 return PDMDEV_SET_ERROR(pDevIns, rc,
7722 N_("Configuration error: Failed to get the value of 'ItrRxEnabled'"));
7723
7724 rc = CFGMR3QueryBoolDef(pCfg, "TidEnabled", &pThis->fTidEnabled, false);
7725 if (RT_FAILURE(rc))
7726 return PDMDEV_SET_ERROR(pDevIns, rc,
7727 N_("Configuration error: Failed to get the value of 'TidEnabled'"));
7728
7729 rc = CFGMR3QueryU32Def(pCfg, "LinkUpDelay", (uint32_t*)&pThis->cMsLinkUpDelay, 3000); /* ms */
7730 if (RT_FAILURE(rc))
7731 return PDMDEV_SET_ERROR(pDevIns, rc,
7732 N_("Configuration error: Failed to get the value of 'LinkUpDelay'"));
7733 Assert(pThis->cMsLinkUpDelay <= 300000); /* less than 5 minutes */
7734 if (pThis->cMsLinkUpDelay > 5000)
7735 LogRel(("%s: WARNING! Link up delay is set to %u seconds!\n", pThis->szPrf, pThis->cMsLinkUpDelay / 1000));
7736 else if (pThis->cMsLinkUpDelay == 0)
7737 LogRel(("%s: WARNING! Link up delay is disabled!\n", pThis->szPrf));
7738
7739 LogRel(("%s: Chip=%s LinkUpDelay=%ums EthernetCRC=%s GSO=%s Itr=%s ItrRx=%s TID=%s R0=%s GC=%s\n", pThis->szPrf,
7740 g_aChips[pThis->eChip].pcszName, pThis->cMsLinkUpDelay,
7741 pThis->fEthernetCRC ? "on" : "off",
7742 pThis->fGSOEnabled ? "enabled" : "disabled",
7743 pThis->fItrEnabled ? "enabled" : "disabled",
7744 pThis->fItrRxEnabled ? "enabled" : "disabled",
7745 pThis->fTidEnabled ? "enabled" : "disabled",
7746 pThis->fR0Enabled ? "enabled" : "disabled",
7747 pThis->fRCEnabled ? "enabled" : "disabled"));
7748
7749 /* Initialize the EEPROM. */
7750 pThis->eeprom.init(pThis->macConfigured);
7751
7752 /* Initialize internal PHY. */
7753 Phy::init(&pThis->phy, iInstance, pThis->eChip == E1K_CHIP_82543GC ? PHY_EPID_M881000 : PHY_EPID_M881011);
7754
7755 /* Initialize critical sections. We do our own locking. */
7756 rc = PDMDevHlpSetDeviceCritSect(pDevIns, PDMDevHlpCritSectGetNop(pDevIns));
7757 AssertRCReturn(rc, rc);
7758
7759 rc = PDMDevHlpCritSectInit(pDevIns, &pThis->cs, RT_SRC_POS, "E1000#%d", iInstance);
7760 if (RT_FAILURE(rc))
7761 return rc;
7762 rc = PDMDevHlpCritSectInit(pDevIns, &pThis->csRx, RT_SRC_POS, "E1000#%dRX", iInstance);
7763 if (RT_FAILURE(rc))
7764 return rc;
7765#ifdef E1K_WITH_TX_CS
7766 rc = PDMDevHlpCritSectInit(pDevIns, &pThis->csTx, RT_SRC_POS, "E1000#%dTX", iInstance);
7767 if (RT_FAILURE(rc))
7768 return rc;
7769#endif /* E1K_WITH_TX_CS */
7770
7771 /* Saved state registration. */
7772 rc = PDMDevHlpSSMRegisterEx(pDevIns, E1K_SAVEDSTATE_VERSION, sizeof(E1KSTATE), NULL,
7773 NULL, e1kLiveExec, NULL,
7774 e1kSavePrep, e1kSaveExec, NULL,
7775 e1kLoadPrep, e1kLoadExec, e1kLoadDone);
7776 if (RT_FAILURE(rc))
7777 return rc;
7778
7779 /* Set PCI config registers and register ourselves with the PCI bus. */
7780 e1kConfigurePciDev(&pThis->pciDevice, pThis->eChip);
7781 rc = PDMDevHlpPCIRegister(pDevIns, &pThis->pciDevice);
7782 if (RT_FAILURE(rc))
7783 return rc;
7784
7785#ifdef E1K_WITH_MSI
7786 PDMMSIREG MsiReg;
7787 RT_ZERO(MsiReg);
7788 MsiReg.cMsiVectors = 1;
7789 MsiReg.iMsiCapOffset = 0x80;
7790 MsiReg.iMsiNextOffset = 0x0;
7791 MsiReg.fMsi64bit = false;
7792 rc = PDMDevHlpPCIRegisterMsi(pDevIns, &MsiReg);
7793 AssertRCReturn(rc, rc);
7794#endif
7795
7796
7797 /* Map our registers to memory space (region 0, see e1kConfigurePCI)*/
7798 rc = PDMDevHlpPCIIORegionRegister(pDevIns, 0, E1K_MM_SIZE, PCI_ADDRESS_SPACE_MEM, e1kMap);
7799 if (RT_FAILURE(rc))
7800 return rc;
7801#ifdef E1K_WITH_PREREG_MMIO
7802 rc = PDMDevHlpMMIOExPreRegister(pDevIns, 0, E1K_MM_SIZE, IOMMMIO_FLAGS_READ_DWORD | IOMMMIO_FLAGS_WRITE_ONLY_DWORD, "E1000",
7803 NULL /*pvUserR3*/, e1kMMIOWrite, e1kMMIORead, NULL /*pfnFillR3*/,
7804 NIL_RTR0PTR /*pvUserR0*/, pThis->fR0Enabled ? "e1kMMIOWrite" : NULL,
7805 pThis->fR0Enabled ? "e1kMMIORead" : NULL, NULL /*pszFillR0*/,
7806 NIL_RTRCPTR /*pvUserRC*/, pThis->fRCEnabled ? "e1kMMIOWrite" : NULL,
7807 pThis->fRCEnabled ? "e1kMMIORead" : NULL, NULL /*pszFillRC*/);
7808 AssertLogRelRCReturn(rc, rc);
7809#endif
7810 /* Map our registers to IO space (region 2, see e1kConfigurePCI) */
7811 rc = PDMDevHlpPCIIORegionRegister(pDevIns, 2, E1K_IOPORT_SIZE, PCI_ADDRESS_SPACE_IO, e1kMap);
7812 if (RT_FAILURE(rc))
7813 return rc;
7814
7815 /* Create transmit queue */
7816 rc = PDMDevHlpQueueCreate(pDevIns, sizeof(PDMQUEUEITEMCORE), 1, 0,
7817 e1kTxQueueConsumer, true, "E1000-Xmit", &pThis->pTxQueueR3);
7818 if (RT_FAILURE(rc))
7819 return rc;
7820 pThis->pTxQueueR0 = PDMQueueR0Ptr(pThis->pTxQueueR3);
7821 pThis->pTxQueueRC = PDMQueueRCPtr(pThis->pTxQueueR3);
7822
7823 /* Create the RX notifier signaller. */
7824 rc = PDMDevHlpQueueCreate(pDevIns, sizeof(PDMQUEUEITEMCORE), 1, 0,
7825 e1kCanRxQueueConsumer, true, "E1000-Rcv", &pThis->pCanRxQueueR3);
7826 if (RT_FAILURE(rc))
7827 return rc;
7828 pThis->pCanRxQueueR0 = PDMQueueR0Ptr(pThis->pCanRxQueueR3);
7829 pThis->pCanRxQueueRC = PDMQueueRCPtr(pThis->pCanRxQueueR3);
7830
7831#ifdef E1K_TX_DELAY
7832 /* Create Transmit Delay Timer */
7833 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kTxDelayTimer, pThis,
7834 TMTIMER_FLAGS_NO_CRIT_SECT,
7835 "E1000 Transmit Delay Timer", &pThis->pTXDTimerR3);
7836 if (RT_FAILURE(rc))
7837 return rc;
7838 pThis->pTXDTimerR0 = TMTimerR0Ptr(pThis->pTXDTimerR3);
7839 pThis->pTXDTimerRC = TMTimerRCPtr(pThis->pTXDTimerR3);
7840 TMR3TimerSetCritSect(pThis->pTXDTimerR3, &pThis->csTx);
7841#endif /* E1K_TX_DELAY */
7842
7843//#ifdef E1K_USE_TX_TIMERS
7844 if (pThis->fTidEnabled)
7845 {
7846 /* Create Transmit Interrupt Delay Timer */
7847 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kTxIntDelayTimer, pThis,
7848 TMTIMER_FLAGS_NO_CRIT_SECT,
7849 "E1000 Transmit Interrupt Delay Timer", &pThis->pTIDTimerR3);
7850 if (RT_FAILURE(rc))
7851 return rc;
7852 pThis->pTIDTimerR0 = TMTimerR0Ptr(pThis->pTIDTimerR3);
7853 pThis->pTIDTimerRC = TMTimerRCPtr(pThis->pTIDTimerR3);
7854
7855# ifndef E1K_NO_TAD
7856 /* Create Transmit Absolute Delay Timer */
7857 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kTxAbsDelayTimer, pThis,
7858 TMTIMER_FLAGS_NO_CRIT_SECT,
7859 "E1000 Transmit Absolute Delay Timer", &pThis->pTADTimerR3);
7860 if (RT_FAILURE(rc))
7861 return rc;
7862 pThis->pTADTimerR0 = TMTimerR0Ptr(pThis->pTADTimerR3);
7863 pThis->pTADTimerRC = TMTimerRCPtr(pThis->pTADTimerR3);
7864# endif /* E1K_NO_TAD */
7865 }
7866//#endif /* E1K_USE_TX_TIMERS */
7867
7868#ifdef E1K_USE_RX_TIMERS
7869 /* Create Receive Interrupt Delay Timer */
7870 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kRxIntDelayTimer, pThis,
7871 TMTIMER_FLAGS_NO_CRIT_SECT,
7872 "E1000 Receive Interrupt Delay Timer", &pThis->pRIDTimerR3);
7873 if (RT_FAILURE(rc))
7874 return rc;
7875 pThis->pRIDTimerR0 = TMTimerR0Ptr(pThis->pRIDTimerR3);
7876 pThis->pRIDTimerRC = TMTimerRCPtr(pThis->pRIDTimerR3);
7877
7878 /* Create Receive Absolute Delay Timer */
7879 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kRxAbsDelayTimer, pThis,
7880 TMTIMER_FLAGS_NO_CRIT_SECT,
7881 "E1000 Receive Absolute Delay Timer", &pThis->pRADTimerR3);
7882 if (RT_FAILURE(rc))
7883 return rc;
7884 pThis->pRADTimerR0 = TMTimerR0Ptr(pThis->pRADTimerR3);
7885 pThis->pRADTimerRC = TMTimerRCPtr(pThis->pRADTimerR3);
7886#endif /* E1K_USE_RX_TIMERS */
7887
7888 /* Create Late Interrupt Timer */
7889 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kLateIntTimer, pThis,
7890 TMTIMER_FLAGS_NO_CRIT_SECT,
7891 "E1000 Late Interrupt Timer", &pThis->pIntTimerR3);
7892 if (RT_FAILURE(rc))
7893 return rc;
7894 pThis->pIntTimerR0 = TMTimerR0Ptr(pThis->pIntTimerR3);
7895 pThis->pIntTimerRC = TMTimerRCPtr(pThis->pIntTimerR3);
7896
7897 /* Create Link Up Timer */
7898 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kLinkUpTimer, pThis,
7899 TMTIMER_FLAGS_NO_CRIT_SECT,
7900 "E1000 Link Up Timer", &pThis->pLUTimerR3);
7901 if (RT_FAILURE(rc))
7902 return rc;
7903 pThis->pLUTimerR0 = TMTimerR0Ptr(pThis->pLUTimerR3);
7904 pThis->pLUTimerRC = TMTimerRCPtr(pThis->pLUTimerR3);
7905
7906 /* Register the info item */
7907 char szTmp[20];
7908 RTStrPrintf(szTmp, sizeof(szTmp), "e1k%d", iInstance);
7909 PDMDevHlpDBGFInfoRegister(pDevIns, szTmp, "E1000 info.", e1kInfo);
7910
7911 /* Status driver */
7912 PPDMIBASE pBase;
7913 rc = PDMDevHlpDriverAttach(pDevIns, PDM_STATUS_LUN, &pThis->IBase, &pBase, "Status Port");
7914 if (RT_FAILURE(rc))
7915 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to attach the status LUN"));
7916 pThis->pLedsConnector = PDMIBASE_QUERY_INTERFACE(pBase, PDMILEDCONNECTORS);
7917
7918 /* Network driver */
7919 rc = PDMDevHlpDriverAttach(pDevIns, 0, &pThis->IBase, &pThis->pDrvBase, "Network Port");
7920 if (RT_SUCCESS(rc))
7921 {
7922 if (rc == VINF_NAT_DNS)
7923 PDMDevHlpVMSetRuntimeError(pDevIns, 0 /*fFlags*/, "NoDNSforNAT",
7924 N_("A Domain Name Server (DNS) for NAT networking could not be determined. Ensure that your host is correctly connected to an ISP. If you ignore this warning the guest will not be able to perform nameserver lookups and it will probably observe delays if trying so"));
7925 pThis->pDrvR3 = PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMINETWORKUP);
7926 AssertMsgReturn(pThis->pDrvR3, ("Failed to obtain the PDMINETWORKUP interface!\n"), VERR_PDM_MISSING_INTERFACE_BELOW);
7927
7928 pThis->pDrvR0 = PDMIBASER0_QUERY_INTERFACE(PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMIBASER0), PDMINETWORKUP);
7929 pThis->pDrvRC = PDMIBASERC_QUERY_INTERFACE(PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMIBASERC), PDMINETWORKUP);
7930 }
7931 else if ( rc == VERR_PDM_NO_ATTACHED_DRIVER
7932 || rc == VERR_PDM_CFG_MISSING_DRIVER_NAME)
7933 {
7934 /* No error! */
7935 E1kLog(("%s This adapter is not attached to any network!\n", pThis->szPrf));
7936 }
7937 else
7938 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to attach the network LUN"));
7939
7940 rc = RTSemEventCreate(&pThis->hEventMoreRxDescAvail);
7941 if (RT_FAILURE(rc))
7942 return rc;
7943
7944 rc = e1kInitDebugHelpers();
7945 if (RT_FAILURE(rc))
7946 return rc;
7947
7948 e1kHardReset(pThis);
7949
7950 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data received", "/Public/Net/E1k%u/BytesReceived", iInstance);
7951 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data transmitted", "/Public/Net/E1k%u/BytesTransmitted", iInstance);
7952
7953 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data received", "/Devices/E1k%d/ReceiveBytes", iInstance);
7954 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data transmitted", "/Devices/E1k%d/TransmitBytes", iInstance);
7955
7956#if defined(VBOX_WITH_STATISTICS)
7957 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMMIOReadRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO reads in RZ", "/Devices/E1k%d/MMIO/ReadRZ", iInstance);
7958 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMMIOReadR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO reads in R3", "/Devices/E1k%d/MMIO/ReadR3", iInstance);
7959 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMMIOWriteRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO writes in RZ", "/Devices/E1k%d/MMIO/WriteRZ", iInstance);
7960 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMMIOWriteR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO writes in R3", "/Devices/E1k%d/MMIO/WriteR3", iInstance);
7961 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatEEPROMRead, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling EEPROM reads", "/Devices/E1k%d/EEPROM/Read", iInstance);
7962 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatEEPROMWrite, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling EEPROM writes", "/Devices/E1k%d/EEPROM/Write", iInstance);
7963 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIOReadRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in RZ", "/Devices/E1k%d/IO/ReadRZ", iInstance);
7964 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIOReadR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in R3", "/Devices/E1k%d/IO/ReadR3", iInstance);
7965 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIOWriteRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in RZ", "/Devices/E1k%d/IO/WriteRZ", iInstance);
7966 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIOWriteR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in R3", "/Devices/E1k%d/IO/WriteR3", iInstance);
7967 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatLateIntTimer, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling late int timer", "/Devices/E1k%d/LateInt/Timer", iInstance);
7968 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatLateInts, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of late interrupts", "/Devices/E1k%d/LateInt/Occured", iInstance);
7969 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIntsRaised, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of raised interrupts", "/Devices/E1k%d/Interrupts/Raised", iInstance);
7970 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIntsPrevented, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of prevented interrupts", "/Devices/E1k%d/Interrupts/Prevented", iInstance);
7971 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceive, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive", "/Devices/E1k%d/Receive/Total", iInstance);
7972 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveCRC, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive checksumming", "/Devices/E1k%d/Receive/CRC", iInstance);
7973 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveFilter, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive filtering", "/Devices/E1k%d/Receive/Filter", iInstance);
7974 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveStore, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive storing", "/Devices/E1k%d/Receive/Store", iInstance);
7975 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatRxOverflow, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_OCCURENCE, "Profiling RX overflows", "/Devices/E1k%d/RxOverflow", iInstance);
7976 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatRxOverflowWakeup, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Nr of RX overflow wakeups", "/Devices/E1k%d/RxOverflowWakeup", iInstance);
7977 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling transmits in RZ", "/Devices/E1k%d/Transmit/TotalRZ", iInstance);
7978 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling transmits in R3", "/Devices/E1k%d/Transmit/TotalR3", iInstance);
7979 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitSendRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling send transmit in RZ", "/Devices/E1k%d/Transmit/SendRZ", iInstance);
7980 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitSendR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling send transmit in R3", "/Devices/E1k%d/Transmit/SendR3", iInstance);
7981
7982 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescCtxNormal, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of normal context descriptors","/Devices/E1k%d/TxDesc/ContexNormal", iInstance);
7983 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescCtxTSE, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TSE context descriptors", "/Devices/E1k%d/TxDesc/ContextTSE", iInstance);
7984 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescData, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TX data descriptors", "/Devices/E1k%d/TxDesc/Data", iInstance);
7985 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescLegacy, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TX legacy descriptors", "/Devices/E1k%d/TxDesc/Legacy", iInstance);
7986 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescTSEData, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TX TSE data descriptors", "/Devices/E1k%d/TxDesc/TSEData", iInstance);
7987 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxPathFallback, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Fallback TSE descriptor path", "/Devices/E1k%d/TxPath/Fallback", iInstance);
7988 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxPathGSO, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "GSO TSE descriptor path", "/Devices/E1k%d/TxPath/GSO", iInstance);
7989 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxPathRegular, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Regular descriptor path", "/Devices/E1k%d/TxPath/Normal", iInstance);
7990 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatPHYAccesses, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of PHY accesses", "/Devices/E1k%d/PHYAccesses", iInstance);
7991 for (unsigned iReg = 0; iReg < E1K_NUM_OF_REGS; iReg++)
7992 {
7993 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->aStatRegReads[iReg], STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
7994 g_aE1kRegMap[iReg].name, "/Devices/E1k%d/Regs/%s-Reads", iInstance, g_aE1kRegMap[iReg].abbrev);
7995 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->aStatRegWrites[iReg], STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
7996 g_aE1kRegMap[iReg].name, "/Devices/E1k%d/Regs/%s-Writes", iInstance, g_aE1kRegMap[iReg].abbrev);
7997 }
7998#endif /* VBOX_WITH_STATISTICS */
7999
8000#ifdef E1K_INT_STATS
8001 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->u64ArmedAt, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "u64ArmedAt", "/Devices/E1k%d/u64ArmedAt", iInstance);
8002 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatMaxTxDelay, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatMaxTxDelay", "/Devices/E1k%d/uStatMaxTxDelay", iInstance);
8003 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatInt, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatInt", "/Devices/E1k%d/uStatInt", iInstance);
8004 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntTry, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntTry", "/Devices/E1k%d/uStatIntTry", iInstance);
8005 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntLower, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntLower", "/Devices/E1k%d/uStatIntLower", iInstance);
8006 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatNoIntICR, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatNoIntICR", "/Devices/E1k%d/uStatNoIntICR", iInstance);
8007 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->iStatIntLost, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "iStatIntLost", "/Devices/E1k%d/iStatIntLost", iInstance);
8008 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->iStatIntLostOne, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "iStatIntLostOne", "/Devices/E1k%d/iStatIntLostOne", iInstance);
8009 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntIMS, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntIMS", "/Devices/E1k%d/uStatIntIMS", iInstance);
8010 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntSkip, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntSkip", "/Devices/E1k%d/uStatIntSkip", iInstance);
8011 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntLate, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntLate", "/Devices/E1k%d/uStatIntLate", iInstance);
8012 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntMasked, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntMasked", "/Devices/E1k%d/uStatIntMasked", iInstance);
8013 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntEarly, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntEarly", "/Devices/E1k%d/uStatIntEarly", iInstance);
8014 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntRx, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntRx", "/Devices/E1k%d/uStatIntRx", iInstance);
8015 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntTx, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntTx", "/Devices/E1k%d/uStatIntTx", iInstance);
8016 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntICS, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntICS", "/Devices/E1k%d/uStatIntICS", iInstance);
8017 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntRDTR, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntRDTR", "/Devices/E1k%d/uStatIntRDTR", iInstance);
8018 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntRXDMT0, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntRXDMT0", "/Devices/E1k%d/uStatIntRXDMT0", iInstance);
8019 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntTXQE, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntTXQE", "/Devices/E1k%d/uStatIntTXQE", iInstance);
8020 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxNoRS, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxNoRS", "/Devices/E1k%d/uStatTxNoRS", iInstance);
8021 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxIDE, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxIDE", "/Devices/E1k%d/uStatTxIDE", iInstance);
8022 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxDelayed, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxDelayed", "/Devices/E1k%d/uStatTxDelayed", iInstance);
8023 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxDelayExp, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxDelayExp", "/Devices/E1k%d/uStatTxDelayExp", iInstance);
8024 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTAD, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTAD", "/Devices/E1k%d/uStatTAD", iInstance);
8025 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTID, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTID", "/Devices/E1k%d/uStatTID", iInstance);
8026 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatRAD, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatRAD", "/Devices/E1k%d/uStatRAD", iInstance);
8027 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatRID, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatRID", "/Devices/E1k%d/uStatRID", iInstance);
8028 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatRxFrm, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatRxFrm", "/Devices/E1k%d/uStatRxFrm", iInstance);
8029 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxFrm, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxFrm", "/Devices/E1k%d/uStatTxFrm", iInstance);
8030 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatDescCtx, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatDescCtx", "/Devices/E1k%d/uStatDescCtx", iInstance);
8031 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatDescDat, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatDescDat", "/Devices/E1k%d/uStatDescDat", iInstance);
8032 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatDescLeg, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatDescLeg", "/Devices/E1k%d/uStatDescLeg", iInstance);
8033 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx1514, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx1514", "/Devices/E1k%d/uStatTx1514", iInstance);
8034 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx2962, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx2962", "/Devices/E1k%d/uStatTx2962", iInstance);
8035 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx4410, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx4410", "/Devices/E1k%d/uStatTx4410", iInstance);
8036 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx5858, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx5858", "/Devices/E1k%d/uStatTx5858", iInstance);
8037 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx7306, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx7306", "/Devices/E1k%d/uStatTx7306", iInstance);
8038 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx8754, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx8754", "/Devices/E1k%d/uStatTx8754", iInstance);
8039 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx16384, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx16384", "/Devices/E1k%d/uStatTx16384", iInstance);
8040 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx32768, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx32768", "/Devices/E1k%d/uStatTx32768", iInstance);
8041 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxLarge, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxLarge", "/Devices/E1k%d/uStatTxLarge", iInstance);
8042#endif /* E1K_INT_STATS */
8043
8044 return VINF_SUCCESS;
8045}
8046
8047/**
8048 * The device registration structure.
8049 */
8050const PDMDEVREG g_DeviceE1000 =
8051{
8052 /* Structure version. PDM_DEVREG_VERSION defines the current version. */
8053 PDM_DEVREG_VERSION,
8054 /* Device name. */
8055 "e1000",
8056 /* Name of guest context module (no path).
8057 * Only evalutated if PDM_DEVREG_FLAGS_RC is set. */
8058 "VBoxDDRC.rc",
8059 /* Name of ring-0 module (no path).
8060 * Only evalutated if PDM_DEVREG_FLAGS_RC is set. */
8061 "VBoxDDR0.r0",
8062 /* The description of the device. The UTF-8 string pointed to shall, like this structure,
8063 * remain unchanged from registration till VM destruction. */
8064 "Intel PRO/1000 MT Desktop Ethernet.\n",
8065
8066 /* Flags, combination of the PDM_DEVREG_FLAGS_* \#defines. */
8067 PDM_DEVREG_FLAGS_DEFAULT_BITS | PDM_DEVREG_FLAGS_RC | PDM_DEVREG_FLAGS_R0,
8068 /* Device class(es), combination of the PDM_DEVREG_CLASS_* \#defines. */
8069 PDM_DEVREG_CLASS_NETWORK,
8070 /* Maximum number of instances (per VM). */
8071 ~0U,
8072 /* Size of the instance data. */
8073 sizeof(E1KSTATE),
8074
8075 /* pfnConstruct */
8076 e1kR3Construct,
8077 /* pfnDestruct */
8078 e1kR3Destruct,
8079 /* pfnRelocate */
8080 e1kR3Relocate,
8081 /* pfnMemSetup */
8082 NULL,
8083 /* pfnPowerOn */
8084 NULL,
8085 /* pfnReset */
8086 e1kR3Reset,
8087 /* pfnSuspend */
8088 e1kR3Suspend,
8089 /* pfnResume */
8090 NULL,
8091 /* pfnAttach */
8092 e1kR3Attach,
8093 /* pfnDeatch */
8094 e1kR3Detach,
8095 /* pfnQueryInterface */
8096 NULL,
8097 /* pfnInitComplete */
8098 NULL,
8099 /* pfnPowerOff */
8100 e1kR3PowerOff,
8101 /* pfnSoftReset */
8102 NULL,
8103
8104 /* u32VersionEnd */
8105 PDM_DEVREG_VERSION
8106};
8107
8108#endif /* IN_RING3 */
8109#endif /* !VBOX_DEVICE_STRUCT_TESTCASE */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette