VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/DevE1000.cpp@ 81471

Last change on this file since 81471 was 81471, checked in by vboxsync, 5 years ago

DevE1000: Missing PDMDEV_CHECK_VERSIONS_RETURN() in ring-0 constructor. bugref:9218

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 331.9 KB
Line 
1/* $Id: DevE1000.cpp 81471 2019-10-23 01:42:27Z vboxsync $ */
2/** @file
3 * DevE1000 - Intel 82540EM Ethernet Controller Emulation.
4 *
5 * Implemented in accordance with the specification:
6 *
7 * PCI/PCI-X Family of Gigabit Ethernet Controllers Software Developer's Manual
8 * 82540EP/EM, 82541xx, 82544GC/EI, 82545GM/EM, 82546GB/EB, and 82547xx
9 *
10 * 317453-002 Revision 3.5
11 *
12 * @todo IPv6 checksum offloading support
13 * @todo Flexible Filter / Wakeup (optional?)
14 */
15
16/*
17 * Copyright (C) 2007-2019 Oracle Corporation
18 *
19 * This file is part of VirtualBox Open Source Edition (OSE), as
20 * available from http://www.virtualbox.org. This file is free software;
21 * you can redistribute it and/or modify it under the terms of the GNU
22 * General Public License (GPL) as published by the Free Software
23 * Foundation, in version 2 as it comes in the "COPYING" file of the
24 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
25 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_DEV_E1000
33#include <iprt/crc.h>
34#include <iprt/ctype.h>
35#include <iprt/net.h>
36#include <iprt/semaphore.h>
37#include <iprt/string.h>
38#include <iprt/time.h>
39#include <iprt/uuid.h>
40#include <VBox/vmm/pdmdev.h>
41#include <VBox/vmm/pdmnetifs.h>
42#include <VBox/vmm/pdmnetinline.h>
43#include <VBox/param.h>
44#include "VBoxDD.h"
45
46#include "DevEEPROM.h"
47#include "DevE1000Phy.h"
48
49
50/*********************************************************************************************************************************
51* Defined Constants And Macros *
52*********************************************************************************************************************************/
53/** @name E1000 Build Options
54 * @{ */
55/** @def E1K_INIT_RA0
56 * E1K_INIT_RA0 forces E1000 to set the first entry in Receive Address filter
57 * table to MAC address obtained from CFGM. Most guests read MAC address from
58 * EEPROM and write it to RA[0] explicitly, but Mac OS X seems to depend on it
59 * being already set (see @bugref{4657}).
60 */
61#define E1K_INIT_RA0
62/** @def E1K_LSC_ON_RESET
63 * E1K_LSC_ON_RESET causes e1000 to generate Link Status Change
64 * interrupt after hard reset. This makes the E1K_LSC_ON_SLU option unnecessary.
65 * With unplugged cable, LSC is triggerred for 82543GC only.
66 */
67#define E1K_LSC_ON_RESET
68/** @def E1K_LSC_ON_SLU
69 * E1K_LSC_ON_SLU causes E1000 to generate Link Status Change interrupt when
70 * the guest driver brings up the link via STATUS.LU bit. Again the only guest
71 * that requires it is Mac OS X (see @bugref{4657}).
72 */
73//#define E1K_LSC_ON_SLU
74/** @def E1K_INIT_LINKUP_DELAY
75 * E1K_INIT_LINKUP_DELAY prevents the link going up while the driver is still
76 * in init (see @bugref{8624}).
77 */
78#define E1K_INIT_LINKUP_DELAY_US (2000 * 1000)
79/** @def E1K_IMS_INT_DELAY_NS
80 * E1K_IMS_INT_DELAY_NS prevents interrupt storms in Windows guests on enabling
81 * interrupts (see @bugref{8624}).
82 */
83#define E1K_IMS_INT_DELAY_NS 100
84/** @def E1K_TX_DELAY
85 * E1K_TX_DELAY aims to improve guest-host transfer rate for TCP streams by
86 * preventing packets to be sent immediately. It allows to send several
87 * packets in a batch reducing the number of acknowledgments. Note that it
88 * effectively disables R0 TX path, forcing sending in R3.
89 */
90//#define E1K_TX_DELAY 150
91/** @def E1K_USE_TX_TIMERS
92 * E1K_USE_TX_TIMERS aims to reduce the number of generated TX interrupts if a
93 * guest driver set the delays via the Transmit Interrupt Delay Value (TIDV)
94 * register. Enabling it showed no positive effects on existing guests so it
95 * stays disabled. See sections 3.2.7.1 and 3.4.3.1 in "8254x Family of Gigabit
96 * Ethernet Controllers Software Developer’s Manual" for more detailed
97 * explanation.
98 */
99//#define E1K_USE_TX_TIMERS
100/** @def E1K_NO_TAD
101 * E1K_NO_TAD disables one of two timers enabled by E1K_USE_TX_TIMERS, the
102 * Transmit Absolute Delay time. This timer sets the maximum time interval
103 * during which TX interrupts can be postponed (delayed). It has no effect
104 * if E1K_USE_TX_TIMERS is not defined.
105 */
106//#define E1K_NO_TAD
107/** @def E1K_REL_DEBUG
108 * E1K_REL_DEBUG enables debug logging of l1, l2, l3 in release build.
109 */
110//#define E1K_REL_DEBUG
111/** @def E1K_INT_STATS
112 * E1K_INT_STATS enables collection of internal statistics used for
113 * debugging of delayed interrupts, etc.
114 */
115#define E1K_INT_STATS
116/** @def E1K_WITH_MSI
117 * E1K_WITH_MSI enables rudimentary MSI support. Not implemented.
118 */
119//#define E1K_WITH_MSI
120/** @def E1K_WITH_TX_CS
121 * E1K_WITH_TX_CS protects e1kXmitPending with a critical section.
122 */
123#define E1K_WITH_TX_CS
124/** @def E1K_WITH_TXD_CACHE
125 * E1K_WITH_TXD_CACHE causes E1000 to fetch multiple TX descriptors in a
126 * single physical memory read (or two if it wraps around the end of TX
127 * descriptor ring). It is required for proper functioning of bandwidth
128 * resource control as it allows to compute exact sizes of packets prior
129 * to allocating their buffers (see @bugref{5582}).
130 */
131#define E1K_WITH_TXD_CACHE
132/** @def E1K_WITH_RXD_CACHE
133 * E1K_WITH_RXD_CACHE causes E1000 to fetch multiple RX descriptors in a
134 * single physical memory read (or two if it wraps around the end of RX
135 * descriptor ring). Intel's packet driver for DOS needs this option in
136 * order to work properly (see @bugref{6217}).
137 */
138#define E1K_WITH_RXD_CACHE
139/** @def E1K_WITH_PREREG_MMIO
140 * E1K_WITH_PREREG_MMIO enables a new style MMIO registration and is
141 * currently only done for testing the relateted PDM, IOM and PGM code. */
142//#define E1K_WITH_PREREG_MMIO
143/* @} */
144/* End of Options ************************************************************/
145
146#ifdef E1K_WITH_TXD_CACHE
147/**
148 * E1K_TXD_CACHE_SIZE specifies the maximum number of TX descriptors stored
149 * in the state structure. It limits the amount of descriptors loaded in one
150 * batch read. For example, Linux guest may use up to 20 descriptors per
151 * TSE packet. The largest TSE packet seen (Windows guest) was 45 descriptors.
152 */
153# define E1K_TXD_CACHE_SIZE 64u
154#endif /* E1K_WITH_TXD_CACHE */
155
156#ifdef E1K_WITH_RXD_CACHE
157/**
158 * E1K_RXD_CACHE_SIZE specifies the maximum number of RX descriptors stored
159 * in the state structure. It limits the amount of descriptors loaded in one
160 * batch read. For example, XP guest adds 15 RX descriptors at a time.
161 */
162# define E1K_RXD_CACHE_SIZE 16u
163#endif /* E1K_WITH_RXD_CACHE */
164
165
166/* Little helpers ************************************************************/
167#undef htons
168#undef ntohs
169#undef htonl
170#undef ntohl
171#define htons(x) ((((x) & 0xff00) >> 8) | (((x) & 0x00ff) << 8))
172#define ntohs(x) htons(x)
173#define htonl(x) ASMByteSwapU32(x)
174#define ntohl(x) htonl(x)
175
176#ifndef DEBUG
177# ifdef E1K_REL_DEBUG
178# define DEBUG
179# define E1kLog(a) LogRel(a)
180# define E1kLog2(a) LogRel(a)
181# define E1kLog3(a) LogRel(a)
182# define E1kLogX(x, a) LogRel(a)
183//# define E1kLog3(a) do {} while (0)
184# else
185# define E1kLog(a) do {} while (0)
186# define E1kLog2(a) do {} while (0)
187# define E1kLog3(a) do {} while (0)
188# define E1kLogX(x, a) do {} while (0)
189# endif
190#else
191# define E1kLog(a) Log(a)
192# define E1kLog2(a) Log2(a)
193# define E1kLog3(a) Log3(a)
194# define E1kLogX(x, a) LogIt(x, LOG_GROUP, a)
195//# define E1kLog(a) do {} while (0)
196//# define E1kLog2(a) do {} while (0)
197//# define E1kLog3(a) do {} while (0)
198#endif
199
200#if 0
201# define LOG_ENABLED
202# define E1kLogRel(a) LogRel(a)
203# undef Log6
204# define Log6(a) LogRel(a)
205#else
206# define E1kLogRel(a) do { } while (0)
207#endif
208
209//#undef DEBUG
210
211#define E1K_RELOCATE(p, o) *(RTHCUINTPTR *)&p += o
212
213#define E1K_INC_CNT32(cnt) \
214do { \
215 if (cnt < UINT32_MAX) \
216 cnt++; \
217} while (0)
218
219#define E1K_ADD_CNT64(cntLo, cntHi, val) \
220do { \
221 uint64_t u64Cnt = RT_MAKE_U64(cntLo, cntHi); \
222 uint64_t tmp = u64Cnt; \
223 u64Cnt += val; \
224 if (tmp > u64Cnt ) \
225 u64Cnt = UINT64_MAX; \
226 cntLo = (uint32_t)u64Cnt; \
227 cntHi = (uint32_t)(u64Cnt >> 32); \
228} while (0)
229
230#ifdef E1K_INT_STATS
231# define E1K_INC_ISTAT_CNT(cnt) do { ++cnt; } while (0)
232#else /* E1K_INT_STATS */
233# define E1K_INC_ISTAT_CNT(cnt) do { } while (0)
234#endif /* E1K_INT_STATS */
235
236
237/*****************************************************************************/
238
239typedef uint32_t E1KCHIP;
240#define E1K_CHIP_82540EM 0
241#define E1K_CHIP_82543GC 1
242#define E1K_CHIP_82545EM 2
243
244#ifdef IN_RING3
245/** Different E1000 chips. */
246static const struct E1kChips
247{
248 uint16_t uPCIVendorId;
249 uint16_t uPCIDeviceId;
250 uint16_t uPCISubsystemVendorId;
251 uint16_t uPCISubsystemId;
252 const char *pcszName;
253} g_aChips[] =
254{
255 /* Vendor Device SSVendor SubSys Name */
256 { 0x8086,
257 /* Temporary code, as MSI-aware driver dislike 0x100E. How to do that right? */
258# ifdef E1K_WITH_MSI
259 0x105E,
260# else
261 0x100E,
262# endif
263 0x8086, 0x001E, "82540EM" }, /* Intel 82540EM-A in Intel PRO/1000 MT Desktop */
264 { 0x8086, 0x1004, 0x8086, 0x1004, "82543GC" }, /* Intel 82543GC in Intel PRO/1000 T Server */
265 { 0x8086, 0x100F, 0x15AD, 0x0750, "82545EM" } /* Intel 82545EM-A in VMWare Network Adapter */
266};
267#endif /* IN_RING3 */
268
269
270/* The size of register area mapped to I/O space */
271#define E1K_IOPORT_SIZE 0x8
272/* The size of memory-mapped register area */
273#define E1K_MM_SIZE 0x20000
274
275#define E1K_MAX_TX_PKT_SIZE 16288
276#define E1K_MAX_RX_PKT_SIZE 16384
277
278/*****************************************************************************/
279
280/** Gets the specfieid bits from the register. */
281#define GET_BITS(reg, bits) ((reg & reg##_##bits##_MASK) >> reg##_##bits##_SHIFT)
282#define GET_BITS_V(val, reg, bits) ((val & reg##_##bits##_MASK) >> reg##_##bits##_SHIFT)
283#define BITS(reg, bits, bitval) (bitval << reg##_##bits##_SHIFT)
284#define SET_BITS(reg, bits, bitval) do { reg = (reg & ~reg##_##bits##_MASK) | (bitval << reg##_##bits##_SHIFT); } while (0)
285#define SET_BITS_V(val, reg, bits, bitval) do { val = (val & ~reg##_##bits##_MASK) | (bitval << reg##_##bits##_SHIFT); } while (0)
286
287#define CTRL_SLU UINT32_C(0x00000040)
288#define CTRL_MDIO UINT32_C(0x00100000)
289#define CTRL_MDC UINT32_C(0x00200000)
290#define CTRL_MDIO_DIR UINT32_C(0x01000000)
291#define CTRL_MDC_DIR UINT32_C(0x02000000)
292#define CTRL_RESET UINT32_C(0x04000000)
293#define CTRL_VME UINT32_C(0x40000000)
294
295#define STATUS_LU UINT32_C(0x00000002)
296#define STATUS_TXOFF UINT32_C(0x00000010)
297
298#define EECD_EE_WIRES UINT32_C(0x0F)
299#define EECD_EE_REQ UINT32_C(0x40)
300#define EECD_EE_GNT UINT32_C(0x80)
301
302#define EERD_START UINT32_C(0x00000001)
303#define EERD_DONE UINT32_C(0x00000010)
304#define EERD_DATA_MASK UINT32_C(0xFFFF0000)
305#define EERD_DATA_SHIFT 16
306#define EERD_ADDR_MASK UINT32_C(0x0000FF00)
307#define EERD_ADDR_SHIFT 8
308
309#define MDIC_DATA_MASK UINT32_C(0x0000FFFF)
310#define MDIC_DATA_SHIFT 0
311#define MDIC_REG_MASK UINT32_C(0x001F0000)
312#define MDIC_REG_SHIFT 16
313#define MDIC_PHY_MASK UINT32_C(0x03E00000)
314#define MDIC_PHY_SHIFT 21
315#define MDIC_OP_WRITE UINT32_C(0x04000000)
316#define MDIC_OP_READ UINT32_C(0x08000000)
317#define MDIC_READY UINT32_C(0x10000000)
318#define MDIC_INT_EN UINT32_C(0x20000000)
319#define MDIC_ERROR UINT32_C(0x40000000)
320
321#define TCTL_EN UINT32_C(0x00000002)
322#define TCTL_PSP UINT32_C(0x00000008)
323
324#define RCTL_EN UINT32_C(0x00000002)
325#define RCTL_UPE UINT32_C(0x00000008)
326#define RCTL_MPE UINT32_C(0x00000010)
327#define RCTL_LPE UINT32_C(0x00000020)
328#define RCTL_LBM_MASK UINT32_C(0x000000C0)
329#define RCTL_LBM_SHIFT 6
330#define RCTL_RDMTS_MASK UINT32_C(0x00000300)
331#define RCTL_RDMTS_SHIFT 8
332#define RCTL_LBM_TCVR UINT32_C(3) /**< PHY or external SerDes loopback. */
333#define RCTL_MO_MASK UINT32_C(0x00003000)
334#define RCTL_MO_SHIFT 12
335#define RCTL_BAM UINT32_C(0x00008000)
336#define RCTL_BSIZE_MASK UINT32_C(0x00030000)
337#define RCTL_BSIZE_SHIFT 16
338#define RCTL_VFE UINT32_C(0x00040000)
339#define RCTL_CFIEN UINT32_C(0x00080000)
340#define RCTL_CFI UINT32_C(0x00100000)
341#define RCTL_BSEX UINT32_C(0x02000000)
342#define RCTL_SECRC UINT32_C(0x04000000)
343
344#define ICR_TXDW UINT32_C(0x00000001)
345#define ICR_TXQE UINT32_C(0x00000002)
346#define ICR_LSC UINT32_C(0x00000004)
347#define ICR_RXDMT0 UINT32_C(0x00000010)
348#define ICR_RXT0 UINT32_C(0x00000080)
349#define ICR_TXD_LOW UINT32_C(0x00008000)
350#define RDTR_FPD UINT32_C(0x80000000)
351
352#define PBA_st ((PBAST*)(pThis->auRegs + PBA_IDX))
353typedef struct
354{
355 unsigned rxa : 7;
356 unsigned rxa_r : 9;
357 unsigned txa : 16;
358} PBAST;
359AssertCompileSize(PBAST, 4);
360
361#define TXDCTL_WTHRESH_MASK 0x003F0000
362#define TXDCTL_WTHRESH_SHIFT 16
363#define TXDCTL_LWTHRESH_MASK 0xFE000000
364#define TXDCTL_LWTHRESH_SHIFT 25
365
366#define RXCSUM_PCSS_MASK UINT32_C(0x000000FF)
367#define RXCSUM_PCSS_SHIFT 0
368
369/** @name Register access macros
370 * @remarks These ASSUME alocal variable @a pThis of type PE1KSTATE.
371 * @{ */
372#define CTRL pThis->auRegs[CTRL_IDX]
373#define STATUS pThis->auRegs[STATUS_IDX]
374#define EECD pThis->auRegs[EECD_IDX]
375#define EERD pThis->auRegs[EERD_IDX]
376#define CTRL_EXT pThis->auRegs[CTRL_EXT_IDX]
377#define FLA pThis->auRegs[FLA_IDX]
378#define MDIC pThis->auRegs[MDIC_IDX]
379#define FCAL pThis->auRegs[FCAL_IDX]
380#define FCAH pThis->auRegs[FCAH_IDX]
381#define FCT pThis->auRegs[FCT_IDX]
382#define VET pThis->auRegs[VET_IDX]
383#define ICR pThis->auRegs[ICR_IDX]
384#define ITR pThis->auRegs[ITR_IDX]
385#define ICS pThis->auRegs[ICS_IDX]
386#define IMS pThis->auRegs[IMS_IDX]
387#define IMC pThis->auRegs[IMC_IDX]
388#define RCTL pThis->auRegs[RCTL_IDX]
389#define FCTTV pThis->auRegs[FCTTV_IDX]
390#define TXCW pThis->auRegs[TXCW_IDX]
391#define RXCW pThis->auRegs[RXCW_IDX]
392#define TCTL pThis->auRegs[TCTL_IDX]
393#define TIPG pThis->auRegs[TIPG_IDX]
394#define AIFS pThis->auRegs[AIFS_IDX]
395#define LEDCTL pThis->auRegs[LEDCTL_IDX]
396#define PBA pThis->auRegs[PBA_IDX]
397#define FCRTL pThis->auRegs[FCRTL_IDX]
398#define FCRTH pThis->auRegs[FCRTH_IDX]
399#define RDFH pThis->auRegs[RDFH_IDX]
400#define RDFT pThis->auRegs[RDFT_IDX]
401#define RDFHS pThis->auRegs[RDFHS_IDX]
402#define RDFTS pThis->auRegs[RDFTS_IDX]
403#define RDFPC pThis->auRegs[RDFPC_IDX]
404#define RDBAL pThis->auRegs[RDBAL_IDX]
405#define RDBAH pThis->auRegs[RDBAH_IDX]
406#define RDLEN pThis->auRegs[RDLEN_IDX]
407#define RDH pThis->auRegs[RDH_IDX]
408#define RDT pThis->auRegs[RDT_IDX]
409#define RDTR pThis->auRegs[RDTR_IDX]
410#define RXDCTL pThis->auRegs[RXDCTL_IDX]
411#define RADV pThis->auRegs[RADV_IDX]
412#define RSRPD pThis->auRegs[RSRPD_IDX]
413#define TXDMAC pThis->auRegs[TXDMAC_IDX]
414#define TDFH pThis->auRegs[TDFH_IDX]
415#define TDFT pThis->auRegs[TDFT_IDX]
416#define TDFHS pThis->auRegs[TDFHS_IDX]
417#define TDFTS pThis->auRegs[TDFTS_IDX]
418#define TDFPC pThis->auRegs[TDFPC_IDX]
419#define TDBAL pThis->auRegs[TDBAL_IDX]
420#define TDBAH pThis->auRegs[TDBAH_IDX]
421#define TDLEN pThis->auRegs[TDLEN_IDX]
422#define TDH pThis->auRegs[TDH_IDX]
423#define TDT pThis->auRegs[TDT_IDX]
424#define TIDV pThis->auRegs[TIDV_IDX]
425#define TXDCTL pThis->auRegs[TXDCTL_IDX]
426#define TADV pThis->auRegs[TADV_IDX]
427#define TSPMT pThis->auRegs[TSPMT_IDX]
428#define CRCERRS pThis->auRegs[CRCERRS_IDX]
429#define ALGNERRC pThis->auRegs[ALGNERRC_IDX]
430#define SYMERRS pThis->auRegs[SYMERRS_IDX]
431#define RXERRC pThis->auRegs[RXERRC_IDX]
432#define MPC pThis->auRegs[MPC_IDX]
433#define SCC pThis->auRegs[SCC_IDX]
434#define ECOL pThis->auRegs[ECOL_IDX]
435#define MCC pThis->auRegs[MCC_IDX]
436#define LATECOL pThis->auRegs[LATECOL_IDX]
437#define COLC pThis->auRegs[COLC_IDX]
438#define DC pThis->auRegs[DC_IDX]
439#define TNCRS pThis->auRegs[TNCRS_IDX]
440/* #define SEC pThis->auRegs[SEC_IDX] Conflict with sys/time.h */
441#define CEXTERR pThis->auRegs[CEXTERR_IDX]
442#define RLEC pThis->auRegs[RLEC_IDX]
443#define XONRXC pThis->auRegs[XONRXC_IDX]
444#define XONTXC pThis->auRegs[XONTXC_IDX]
445#define XOFFRXC pThis->auRegs[XOFFRXC_IDX]
446#define XOFFTXC pThis->auRegs[XOFFTXC_IDX]
447#define FCRUC pThis->auRegs[FCRUC_IDX]
448#define PRC64 pThis->auRegs[PRC64_IDX]
449#define PRC127 pThis->auRegs[PRC127_IDX]
450#define PRC255 pThis->auRegs[PRC255_IDX]
451#define PRC511 pThis->auRegs[PRC511_IDX]
452#define PRC1023 pThis->auRegs[PRC1023_IDX]
453#define PRC1522 pThis->auRegs[PRC1522_IDX]
454#define GPRC pThis->auRegs[GPRC_IDX]
455#define BPRC pThis->auRegs[BPRC_IDX]
456#define MPRC pThis->auRegs[MPRC_IDX]
457#define GPTC pThis->auRegs[GPTC_IDX]
458#define GORCL pThis->auRegs[GORCL_IDX]
459#define GORCH pThis->auRegs[GORCH_IDX]
460#define GOTCL pThis->auRegs[GOTCL_IDX]
461#define GOTCH pThis->auRegs[GOTCH_IDX]
462#define RNBC pThis->auRegs[RNBC_IDX]
463#define RUC pThis->auRegs[RUC_IDX]
464#define RFC pThis->auRegs[RFC_IDX]
465#define ROC pThis->auRegs[ROC_IDX]
466#define RJC pThis->auRegs[RJC_IDX]
467#define MGTPRC pThis->auRegs[MGTPRC_IDX]
468#define MGTPDC pThis->auRegs[MGTPDC_IDX]
469#define MGTPTC pThis->auRegs[MGTPTC_IDX]
470#define TORL pThis->auRegs[TORL_IDX]
471#define TORH pThis->auRegs[TORH_IDX]
472#define TOTL pThis->auRegs[TOTL_IDX]
473#define TOTH pThis->auRegs[TOTH_IDX]
474#define TPR pThis->auRegs[TPR_IDX]
475#define TPT pThis->auRegs[TPT_IDX]
476#define PTC64 pThis->auRegs[PTC64_IDX]
477#define PTC127 pThis->auRegs[PTC127_IDX]
478#define PTC255 pThis->auRegs[PTC255_IDX]
479#define PTC511 pThis->auRegs[PTC511_IDX]
480#define PTC1023 pThis->auRegs[PTC1023_IDX]
481#define PTC1522 pThis->auRegs[PTC1522_IDX]
482#define MPTC pThis->auRegs[MPTC_IDX]
483#define BPTC pThis->auRegs[BPTC_IDX]
484#define TSCTC pThis->auRegs[TSCTC_IDX]
485#define TSCTFC pThis->auRegs[TSCTFC_IDX]
486#define RXCSUM pThis->auRegs[RXCSUM_IDX]
487#define WUC pThis->auRegs[WUC_IDX]
488#define WUFC pThis->auRegs[WUFC_IDX]
489#define WUS pThis->auRegs[WUS_IDX]
490#define MANC pThis->auRegs[MANC_IDX]
491#define IPAV pThis->auRegs[IPAV_IDX]
492#define WUPL pThis->auRegs[WUPL_IDX]
493/** @} */
494
495/**
496 * Indices of memory-mapped registers in register table.
497 */
498typedef enum
499{
500 CTRL_IDX,
501 STATUS_IDX,
502 EECD_IDX,
503 EERD_IDX,
504 CTRL_EXT_IDX,
505 FLA_IDX,
506 MDIC_IDX,
507 FCAL_IDX,
508 FCAH_IDX,
509 FCT_IDX,
510 VET_IDX,
511 ICR_IDX,
512 ITR_IDX,
513 ICS_IDX,
514 IMS_IDX,
515 IMC_IDX,
516 RCTL_IDX,
517 FCTTV_IDX,
518 TXCW_IDX,
519 RXCW_IDX,
520 TCTL_IDX,
521 TIPG_IDX,
522 AIFS_IDX,
523 LEDCTL_IDX,
524 PBA_IDX,
525 FCRTL_IDX,
526 FCRTH_IDX,
527 RDFH_IDX,
528 RDFT_IDX,
529 RDFHS_IDX,
530 RDFTS_IDX,
531 RDFPC_IDX,
532 RDBAL_IDX,
533 RDBAH_IDX,
534 RDLEN_IDX,
535 RDH_IDX,
536 RDT_IDX,
537 RDTR_IDX,
538 RXDCTL_IDX,
539 RADV_IDX,
540 RSRPD_IDX,
541 TXDMAC_IDX,
542 TDFH_IDX,
543 TDFT_IDX,
544 TDFHS_IDX,
545 TDFTS_IDX,
546 TDFPC_IDX,
547 TDBAL_IDX,
548 TDBAH_IDX,
549 TDLEN_IDX,
550 TDH_IDX,
551 TDT_IDX,
552 TIDV_IDX,
553 TXDCTL_IDX,
554 TADV_IDX,
555 TSPMT_IDX,
556 CRCERRS_IDX,
557 ALGNERRC_IDX,
558 SYMERRS_IDX,
559 RXERRC_IDX,
560 MPC_IDX,
561 SCC_IDX,
562 ECOL_IDX,
563 MCC_IDX,
564 LATECOL_IDX,
565 COLC_IDX,
566 DC_IDX,
567 TNCRS_IDX,
568 SEC_IDX,
569 CEXTERR_IDX,
570 RLEC_IDX,
571 XONRXC_IDX,
572 XONTXC_IDX,
573 XOFFRXC_IDX,
574 XOFFTXC_IDX,
575 FCRUC_IDX,
576 PRC64_IDX,
577 PRC127_IDX,
578 PRC255_IDX,
579 PRC511_IDX,
580 PRC1023_IDX,
581 PRC1522_IDX,
582 GPRC_IDX,
583 BPRC_IDX,
584 MPRC_IDX,
585 GPTC_IDX,
586 GORCL_IDX,
587 GORCH_IDX,
588 GOTCL_IDX,
589 GOTCH_IDX,
590 RNBC_IDX,
591 RUC_IDX,
592 RFC_IDX,
593 ROC_IDX,
594 RJC_IDX,
595 MGTPRC_IDX,
596 MGTPDC_IDX,
597 MGTPTC_IDX,
598 TORL_IDX,
599 TORH_IDX,
600 TOTL_IDX,
601 TOTH_IDX,
602 TPR_IDX,
603 TPT_IDX,
604 PTC64_IDX,
605 PTC127_IDX,
606 PTC255_IDX,
607 PTC511_IDX,
608 PTC1023_IDX,
609 PTC1522_IDX,
610 MPTC_IDX,
611 BPTC_IDX,
612 TSCTC_IDX,
613 TSCTFC_IDX,
614 RXCSUM_IDX,
615 WUC_IDX,
616 WUFC_IDX,
617 WUS_IDX,
618 MANC_IDX,
619 IPAV_IDX,
620 WUPL_IDX,
621 MTA_IDX,
622 RA_IDX,
623 VFTA_IDX,
624 IP4AT_IDX,
625 IP6AT_IDX,
626 WUPM_IDX,
627 FFLT_IDX,
628 FFMT_IDX,
629 FFVT_IDX,
630 PBM_IDX,
631 RA_82542_IDX,
632 MTA_82542_IDX,
633 VFTA_82542_IDX,
634 E1K_NUM_OF_REGS
635} E1kRegIndex;
636
637#define E1K_NUM_OF_32BIT_REGS MTA_IDX
638/** The number of registers with strictly increasing offset. */
639#define E1K_NUM_OF_BINARY_SEARCHABLE (WUPL_IDX + 1)
640
641
642/**
643 * Define E1000-specific EEPROM layout.
644 */
645struct E1kEEPROM
646{
647 public:
648 EEPROM93C46 eeprom;
649
650#ifdef IN_RING3
651 /**
652 * Initialize EEPROM content.
653 *
654 * @param macAddr MAC address of E1000.
655 */
656 void init(RTMAC &macAddr)
657 {
658 eeprom.init();
659 memcpy(eeprom.m_au16Data, macAddr.au16, sizeof(macAddr.au16));
660 eeprom.m_au16Data[0x04] = 0xFFFF;
661 /*
662 * bit 3 - full support for power management
663 * bit 10 - full duplex
664 */
665 eeprom.m_au16Data[0x0A] = 0x4408;
666 eeprom.m_au16Data[0x0B] = 0x001E;
667 eeprom.m_au16Data[0x0C] = 0x8086;
668 eeprom.m_au16Data[0x0D] = 0x100E;
669 eeprom.m_au16Data[0x0E] = 0x8086;
670 eeprom.m_au16Data[0x0F] = 0x3040;
671 eeprom.m_au16Data[0x21] = 0x7061;
672 eeprom.m_au16Data[0x22] = 0x280C;
673 eeprom.m_au16Data[0x23] = 0x00C8;
674 eeprom.m_au16Data[0x24] = 0x00C8;
675 eeprom.m_au16Data[0x2F] = 0x0602;
676 updateChecksum();
677 };
678
679 /**
680 * Compute the checksum as required by E1000 and store it
681 * in the last word.
682 */
683 void updateChecksum()
684 {
685 uint16_t u16Checksum = 0;
686
687 for (int i = 0; i < eeprom.SIZE-1; i++)
688 u16Checksum += eeprom.m_au16Data[i];
689 eeprom.m_au16Data[eeprom.SIZE-1] = 0xBABA - u16Checksum;
690 };
691
692 /**
693 * First 6 bytes of EEPROM contain MAC address.
694 *
695 * @returns MAC address of E1000.
696 */
697 void getMac(PRTMAC pMac)
698 {
699 memcpy(pMac->au16, eeprom.m_au16Data, sizeof(pMac->au16));
700 };
701
702 uint32_t read()
703 {
704 return eeprom.read();
705 }
706
707 void write(uint32_t u32Wires)
708 {
709 eeprom.write(u32Wires);
710 }
711
712 bool readWord(uint32_t u32Addr, uint16_t *pu16Value)
713 {
714 return eeprom.readWord(u32Addr, pu16Value);
715 }
716
717 int load(PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM)
718 {
719 return eeprom.load(pHlp, pSSM);
720 }
721
722 void save(PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM)
723 {
724 eeprom.save(pHlp, pSSM);
725 }
726#endif /* IN_RING3 */
727};
728
729
730#define E1K_SPEC_VLAN(s) (s & 0xFFF)
731#define E1K_SPEC_CFI(s) (!!((s>>12) & 0x1))
732#define E1K_SPEC_PRI(s) ((s>>13) & 0x7)
733
734struct E1kRxDStatus
735{
736 /** @name Descriptor Status field (3.2.3.1)
737 * @{ */
738 unsigned fDD : 1; /**< Descriptor Done. */
739 unsigned fEOP : 1; /**< End of packet. */
740 unsigned fIXSM : 1; /**< Ignore checksum indication. */
741 unsigned fVP : 1; /**< VLAN, matches VET. */
742 unsigned : 1;
743 unsigned fTCPCS : 1; /**< RCP Checksum calculated on the packet. */
744 unsigned fIPCS : 1; /**< IP Checksum calculated on the packet. */
745 unsigned fPIF : 1; /**< Passed in-exact filter */
746 /** @} */
747 /** @name Descriptor Errors field (3.2.3.2)
748 * (Only valid when fEOP and fDD are set.)
749 * @{ */
750 unsigned fCE : 1; /**< CRC or alignment error. */
751 unsigned : 4; /**< Reserved, varies with different models... */
752 unsigned fTCPE : 1; /**< TCP/UDP checksum error. */
753 unsigned fIPE : 1; /**< IP Checksum error. */
754 unsigned fRXE : 1; /**< RX Data error. */
755 /** @} */
756 /** @name Descriptor Special field (3.2.3.3)
757 * @{ */
758 unsigned u16Special : 16; /**< VLAN: Id, Canonical form, Priority. */
759 /** @} */
760};
761typedef struct E1kRxDStatus E1KRXDST;
762
763struct E1kRxDesc_st
764{
765 uint64_t u64BufAddr; /**< Address of data buffer */
766 uint16_t u16Length; /**< Length of data in buffer */
767 uint16_t u16Checksum; /**< Packet checksum */
768 E1KRXDST status;
769};
770typedef struct E1kRxDesc_st E1KRXDESC;
771AssertCompileSize(E1KRXDESC, 16);
772
773#define E1K_DTYP_LEGACY -1
774#define E1K_DTYP_CONTEXT 0
775#define E1K_DTYP_DATA 1
776
777struct E1kTDLegacy
778{
779 uint64_t u64BufAddr; /**< Address of data buffer */
780 struct TDLCmd_st
781 {
782 unsigned u16Length : 16;
783 unsigned u8CSO : 8;
784 /* CMD field : 8 */
785 unsigned fEOP : 1;
786 unsigned fIFCS : 1;
787 unsigned fIC : 1;
788 unsigned fRS : 1;
789 unsigned fRPS : 1;
790 unsigned fDEXT : 1;
791 unsigned fVLE : 1;
792 unsigned fIDE : 1;
793 } cmd;
794 struct TDLDw3_st
795 {
796 /* STA field */
797 unsigned fDD : 1;
798 unsigned fEC : 1;
799 unsigned fLC : 1;
800 unsigned fTURSV : 1;
801 /* RSV field */
802 unsigned u4RSV : 4;
803 /* CSS field */
804 unsigned u8CSS : 8;
805 /* Special field*/
806 unsigned u16Special: 16;
807 } dw3;
808};
809
810/**
811 * TCP/IP Context Transmit Descriptor, section 3.3.6.
812 */
813struct E1kTDContext
814{
815 struct CheckSum_st
816 {
817 /** TSE: Header start. !TSE: Checksum start. */
818 unsigned u8CSS : 8;
819 /** Checksum offset - where to store it. */
820 unsigned u8CSO : 8;
821 /** Checksum ending (inclusive) offset, 0 = end of packet. */
822 unsigned u16CSE : 16;
823 } ip;
824 struct CheckSum_st tu;
825 struct TDCDw2_st
826 {
827 /** TSE: The total number of payload bytes for this context. Sans header. */
828 unsigned u20PAYLEN : 20;
829 /** The descriptor type - E1K_DTYP_CONTEXT (0). */
830 unsigned u4DTYP : 4;
831 /** TUCMD field, 8 bits
832 * @{ */
833 /** TSE: TCP (set) or UDP (clear). */
834 unsigned fTCP : 1;
835 /** TSE: IPv4 (set) or IPv6 (clear) - for finding the payload length field in
836 * the IP header. Does not affect the checksumming.
837 * @remarks 82544GC/EI interprets a cleared field differently. */
838 unsigned fIP : 1;
839 /** TSE: TCP segmentation enable. When clear the context describes */
840 unsigned fTSE : 1;
841 /** Report status (only applies to dw3.fDD for here). */
842 unsigned fRS : 1;
843 /** Reserved, MBZ. */
844 unsigned fRSV1 : 1;
845 /** Descriptor extension, must be set for this descriptor type. */
846 unsigned fDEXT : 1;
847 /** Reserved, MBZ. */
848 unsigned fRSV2 : 1;
849 /** Interrupt delay enable. */
850 unsigned fIDE : 1;
851 /** @} */
852 } dw2;
853 struct TDCDw3_st
854 {
855 /** Descriptor Done. */
856 unsigned fDD : 1;
857 /** Reserved, MBZ. */
858 unsigned u7RSV : 7;
859 /** TSO: The header (prototype) length (Ethernet[, VLAN tag], IP, TCP/UDP. */
860 unsigned u8HDRLEN : 8;
861 /** TSO: Maximum segment size. */
862 unsigned u16MSS : 16;
863 } dw3;
864};
865typedef struct E1kTDContext E1KTXCTX;
866
867/**
868 * TCP/IP Data Transmit Descriptor, section 3.3.7.
869 */
870struct E1kTDData
871{
872 uint64_t u64BufAddr; /**< Address of data buffer */
873 struct TDDCmd_st
874 {
875 /** The total length of data pointed to by this descriptor. */
876 unsigned u20DTALEN : 20;
877 /** The descriptor type - E1K_DTYP_DATA (1). */
878 unsigned u4DTYP : 4;
879 /** @name DCMD field, 8 bits (3.3.7.1).
880 * @{ */
881 /** End of packet. Note TSCTFC update. */
882 unsigned fEOP : 1;
883 /** Insert Ethernet FCS/CRC (requires fEOP to be set). */
884 unsigned fIFCS : 1;
885 /** Use the TSE context when set and the normal when clear. */
886 unsigned fTSE : 1;
887 /** Report status (dw3.STA). */
888 unsigned fRS : 1;
889 /** Reserved. 82544GC/EI defines this report packet set (RPS). */
890 unsigned fRPS : 1;
891 /** Descriptor extension, must be set for this descriptor type. */
892 unsigned fDEXT : 1;
893 /** VLAN enable, requires CTRL.VME, auto enables FCS/CRC.
894 * Insert dw3.SPECIAL after ethernet header. */
895 unsigned fVLE : 1;
896 /** Interrupt delay enable. */
897 unsigned fIDE : 1;
898 /** @} */
899 } cmd;
900 struct TDDDw3_st
901 {
902 /** @name STA field (3.3.7.2)
903 * @{ */
904 unsigned fDD : 1; /**< Descriptor done. */
905 unsigned fEC : 1; /**< Excess collision. */
906 unsigned fLC : 1; /**< Late collision. */
907 /** Reserved, except for the usual oddball (82544GC/EI) where it's called TU. */
908 unsigned fTURSV : 1;
909 /** @} */
910 unsigned u4RSV : 4; /**< Reserved field, MBZ. */
911 /** @name POPTS (Packet Option) field (3.3.7.3)
912 * @{ */
913 unsigned fIXSM : 1; /**< Insert IP checksum. */
914 unsigned fTXSM : 1; /**< Insert TCP/UDP checksum. */
915 unsigned u6RSV : 6; /**< Reserved, MBZ. */
916 /** @} */
917 /** @name SPECIAL field - VLAN tag to be inserted after ethernet header.
918 * Requires fEOP, fVLE and CTRL.VME to be set.
919 * @{ */
920 unsigned u16Special: 16; /**< VLAN: Id, Canonical form, Priority. */
921 /** @} */
922 } dw3;
923};
924typedef struct E1kTDData E1KTXDAT;
925
926union E1kTxDesc
927{
928 struct E1kTDLegacy legacy;
929 struct E1kTDContext context;
930 struct E1kTDData data;
931};
932typedef union E1kTxDesc E1KTXDESC;
933AssertCompileSize(E1KTXDESC, 16);
934
935#define RA_CTL_AS 0x0003
936#define RA_CTL_AV 0x8000
937
938union E1kRecAddr
939{
940 uint32_t au32[32];
941 struct RAArray
942 {
943 uint8_t addr[6];
944 uint16_t ctl;
945 } array[16];
946};
947typedef struct E1kRecAddr::RAArray E1KRAELEM;
948typedef union E1kRecAddr E1KRA;
949AssertCompileSize(E1KRA, 8*16);
950
951#define E1K_IP_RF UINT16_C(0x8000) /**< reserved fragment flag */
952#define E1K_IP_DF UINT16_C(0x4000) /**< dont fragment flag */
953#define E1K_IP_MF UINT16_C(0x2000) /**< more fragments flag */
954#define E1K_IP_OFFMASK UINT16_C(0x1fff) /**< mask for fragmenting bits */
955
956/** @todo use+extend RTNETIPV4 */
957struct E1kIpHeader
958{
959 /* type of service / version / header length */
960 uint16_t tos_ver_hl;
961 /* total length */
962 uint16_t total_len;
963 /* identification */
964 uint16_t ident;
965 /* fragment offset field */
966 uint16_t offset;
967 /* time to live / protocol*/
968 uint16_t ttl_proto;
969 /* checksum */
970 uint16_t chksum;
971 /* source IP address */
972 uint32_t src;
973 /* destination IP address */
974 uint32_t dest;
975};
976AssertCompileSize(struct E1kIpHeader, 20);
977
978#define E1K_TCP_FIN UINT16_C(0x01)
979#define E1K_TCP_SYN UINT16_C(0x02)
980#define E1K_TCP_RST UINT16_C(0x04)
981#define E1K_TCP_PSH UINT16_C(0x08)
982#define E1K_TCP_ACK UINT16_C(0x10)
983#define E1K_TCP_URG UINT16_C(0x20)
984#define E1K_TCP_ECE UINT16_C(0x40)
985#define E1K_TCP_CWR UINT16_C(0x80)
986#define E1K_TCP_FLAGS UINT16_C(0x3f)
987
988/** @todo use+extend RTNETTCP */
989struct E1kTcpHeader
990{
991 uint16_t src;
992 uint16_t dest;
993 uint32_t seqno;
994 uint32_t ackno;
995 uint16_t hdrlen_flags;
996 uint16_t wnd;
997 uint16_t chksum;
998 uint16_t urgp;
999};
1000AssertCompileSize(struct E1kTcpHeader, 20);
1001
1002
1003#ifdef E1K_WITH_TXD_CACHE
1004/** The current Saved state version. */
1005# define E1K_SAVEDSTATE_VERSION 4
1006/** Saved state version for VirtualBox 4.2 with VLAN tag fields. */
1007# define E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG 3
1008#else /* !E1K_WITH_TXD_CACHE */
1009/** The current Saved state version. */
1010# define E1K_SAVEDSTATE_VERSION 3
1011#endif /* !E1K_WITH_TXD_CACHE */
1012/** Saved state version for VirtualBox 4.1 and earlier.
1013 * These did not include VLAN tag fields. */
1014#define E1K_SAVEDSTATE_VERSION_VBOX_41 2
1015/** Saved state version for VirtualBox 3.0 and earlier.
1016 * This did not include the configuration part nor the E1kEEPROM. */
1017#define E1K_SAVEDSTATE_VERSION_VBOX_30 1
1018
1019/**
1020 * E1000 shared device state.
1021 *
1022 * This is shared between ring-0 and ring-3.
1023 */
1024typedef struct E1KSTATE
1025{
1026 char szPrf[8]; /**< Log prefix, e.g. E1000#1. */
1027
1028 /** Handle to PCI region \#0, the MMIO region. */
1029 IOMIOPORTHANDLE hMmioRegion;
1030 /** Handle to PCI region \#2, the I/O ports. */
1031 IOMIOPORTHANDLE hIoPorts;
1032
1033 /** Receive Interrupt Delay Timer. */
1034 TMTIMERHANDLE hRIDTimer;
1035 /** Receive Absolute Delay Timer. */
1036 TMTIMERHANDLE hRADTimer;
1037 /** Transmit Interrupt Delay Timer. */
1038 TMTIMERHANDLE hTIDTimer;
1039 /** Transmit Absolute Delay Timer. */
1040 TMTIMERHANDLE hTADTimer;
1041 /** Transmit Delay Timer. */
1042 TMTIMERHANDLE hTXDTimer;
1043 /** Late Interrupt Timer. */
1044 TMTIMERHANDLE hIntTimer;
1045 /** Link Up(/Restore) Timer. */
1046 TMTIMERHANDLE hLUTimer;
1047
1048 /** Transmit task. */
1049 PDMTASKHANDLE hTxTask;
1050
1051 /** Critical section - what is it protecting? */
1052 PDMCRITSECT cs;
1053 /** RX Critical section. */
1054 PDMCRITSECT csRx;
1055#ifdef E1K_WITH_TX_CS
1056 /** TX Critical section. */
1057 PDMCRITSECT csTx;
1058#endif /* E1K_WITH_TX_CS */
1059 /** Base address of memory-mapped registers. */
1060 RTGCPHYS addrMMReg;
1061 /** MAC address obtained from the configuration. */
1062 RTMAC macConfigured;
1063 /** Base port of I/O space region. */
1064 RTIOPORT IOPortBase;
1065 /** EMT: Last time the interrupt was acknowledged. */
1066 uint64_t u64AckedAt;
1067 /** All: Used for eliminating spurious interrupts. */
1068 bool fIntRaised;
1069 /** EMT: false if the cable is disconnected by the GUI. */
1070 bool fCableConnected;
1071 /** EMT: Compute Ethernet CRC for RX packets. */
1072 bool fEthernetCRC;
1073 /** All: throttle interrupts. */
1074 bool fItrEnabled;
1075 /** All: throttle RX interrupts. */
1076 bool fItrRxEnabled;
1077 /** All: Delay TX interrupts using TIDV/TADV. */
1078 bool fTidEnabled;
1079 bool afPadding[2];
1080 /** Link up delay (in milliseconds). */
1081 uint32_t cMsLinkUpDelay;
1082
1083 /** All: Device register storage. */
1084 uint32_t auRegs[E1K_NUM_OF_32BIT_REGS];
1085 /** TX/RX: Status LED. */
1086 PDMLED led;
1087 /** TX/RX: Number of packet being sent/received to show in debug log. */
1088 uint32_t u32PktNo;
1089
1090 /** EMT: Offset of the register to be read via IO. */
1091 uint32_t uSelectedReg;
1092 /** EMT: Multicast Table Array. */
1093 uint32_t auMTA[128];
1094 /** EMT: Receive Address registers. */
1095 E1KRA aRecAddr;
1096 /** EMT: VLAN filter table array. */
1097 uint32_t auVFTA[128];
1098 /** EMT: Receive buffer size. */
1099 uint16_t u16RxBSize;
1100 /** EMT: Locked state -- no state alteration possible. */
1101 bool fLocked;
1102 /** EMT: */
1103 bool fDelayInts;
1104 /** All: */
1105 bool fIntMaskUsed;
1106
1107 /** N/A: */
1108 bool volatile fMaybeOutOfSpace;
1109 /** EMT: Gets signalled when more RX descriptors become available. */
1110 SUPSEMEVENT hEventMoreRxDescAvail;
1111#ifdef E1K_WITH_RXD_CACHE
1112 /** RX: Fetched RX descriptors. */
1113 E1KRXDESC aRxDescriptors[E1K_RXD_CACHE_SIZE];
1114 //uint64_t aRxDescAddr[E1K_RXD_CACHE_SIZE];
1115 /** RX: Actual number of fetched RX descriptors. */
1116 uint32_t nRxDFetched;
1117 /** RX: Index in cache of RX descriptor being processed. */
1118 uint32_t iRxDCurrent;
1119#endif /* E1K_WITH_RXD_CACHE */
1120
1121 /** TX: Context used for TCP segmentation packets. */
1122 E1KTXCTX contextTSE;
1123 /** TX: Context used for ordinary packets. */
1124 E1KTXCTX contextNormal;
1125#ifdef E1K_WITH_TXD_CACHE
1126 /** TX: Fetched TX descriptors. */
1127 E1KTXDESC aTxDescriptors[E1K_TXD_CACHE_SIZE];
1128 /** TX: Actual number of fetched TX descriptors. */
1129 uint8_t nTxDFetched;
1130 /** TX: Index in cache of TX descriptor being processed. */
1131 uint8_t iTxDCurrent;
1132 /** TX: Will this frame be sent as GSO. */
1133 bool fGSO;
1134 /** Alignment padding. */
1135 bool fReserved;
1136 /** TX: Number of bytes in next packet. */
1137 uint32_t cbTxAlloc;
1138
1139#endif /* E1K_WITH_TXD_CACHE */
1140 /** GSO context. u8Type is set to PDMNETWORKGSOTYPE_INVALID when not
1141 * applicable to the current TSE mode. */
1142 PDMNETWORKGSO GsoCtx;
1143 /** Scratch space for holding the loopback / fallback scatter / gather
1144 * descriptor. */
1145 union
1146 {
1147 PDMSCATTERGATHER Sg;
1148 uint8_t padding[8 * sizeof(RTUINTPTR)];
1149 } uTxFallback;
1150 /** TX: Transmit packet buffer use for TSE fallback and loopback. */
1151 uint8_t aTxPacketFallback[E1K_MAX_TX_PKT_SIZE];
1152 /** TX: Number of bytes assembled in TX packet buffer. */
1153 uint16_t u16TxPktLen;
1154 /** TX: False will force segmentation in e1000 instead of sending frames as GSO. */
1155 bool fGSOEnabled;
1156 /** TX: IP checksum has to be inserted if true. */
1157 bool fIPcsum;
1158 /** TX: TCP/UDP checksum has to be inserted if true. */
1159 bool fTCPcsum;
1160 /** TX: VLAN tag has to be inserted if true. */
1161 bool fVTag;
1162 /** TX: TCI part of VLAN tag to be inserted. */
1163 uint16_t u16VTagTCI;
1164 /** TX TSE fallback: Number of payload bytes remaining in TSE context. */
1165 uint32_t u32PayRemain;
1166 /** TX TSE fallback: Number of header bytes remaining in TSE context. */
1167 uint16_t u16HdrRemain;
1168 /** TX TSE fallback: Flags from template header. */
1169 uint16_t u16SavedFlags;
1170 /** TX TSE fallback: Partial checksum from template header. */
1171 uint32_t u32SavedCsum;
1172 /** ?: Emulated controller type. */
1173 E1KCHIP eChip;
1174
1175 /** EMT: Physical interface emulation. */
1176 PHY phy;
1177
1178#if 0
1179 /** Alignment padding. */
1180 uint8_t Alignment[HC_ARCH_BITS == 64 ? 8 : 4];
1181#endif
1182
1183 STAMCOUNTER StatReceiveBytes;
1184 STAMCOUNTER StatTransmitBytes;
1185#if defined(VBOX_WITH_STATISTICS)
1186 STAMPROFILEADV StatMMIOReadRZ;
1187 STAMPROFILEADV StatMMIOReadR3;
1188 STAMPROFILEADV StatMMIOWriteRZ;
1189 STAMPROFILEADV StatMMIOWriteR3;
1190 STAMPROFILEADV StatEEPROMRead;
1191 STAMPROFILEADV StatEEPROMWrite;
1192 STAMPROFILEADV StatIOReadRZ;
1193 STAMPROFILEADV StatIOReadR3;
1194 STAMPROFILEADV StatIOWriteRZ;
1195 STAMPROFILEADV StatIOWriteR3;
1196 STAMPROFILEADV StatLateIntTimer;
1197 STAMCOUNTER StatLateInts;
1198 STAMCOUNTER StatIntsRaised;
1199 STAMCOUNTER StatIntsPrevented;
1200 STAMPROFILEADV StatReceive;
1201 STAMPROFILEADV StatReceiveCRC;
1202 STAMPROFILEADV StatReceiveFilter;
1203 STAMPROFILEADV StatReceiveStore;
1204 STAMPROFILEADV StatTransmitRZ;
1205 STAMPROFILEADV StatTransmitR3;
1206 STAMPROFILE StatTransmitSendRZ;
1207 STAMPROFILE StatTransmitSendR3;
1208 STAMPROFILE StatRxOverflow;
1209 STAMCOUNTER StatRxOverflowWakeupRZ;
1210 STAMCOUNTER StatRxOverflowWakeupR3;
1211 STAMCOUNTER StatTxDescCtxNormal;
1212 STAMCOUNTER StatTxDescCtxTSE;
1213 STAMCOUNTER StatTxDescLegacy;
1214 STAMCOUNTER StatTxDescData;
1215 STAMCOUNTER StatTxDescTSEData;
1216 STAMCOUNTER StatTxPathFallback;
1217 STAMCOUNTER StatTxPathGSO;
1218 STAMCOUNTER StatTxPathRegular;
1219 STAMCOUNTER StatPHYAccesses;
1220 STAMCOUNTER aStatRegWrites[E1K_NUM_OF_REGS];
1221 STAMCOUNTER aStatRegReads[E1K_NUM_OF_REGS];
1222#endif /* VBOX_WITH_STATISTICS */
1223
1224#ifdef E1K_INT_STATS
1225 /* Internal stats */
1226 uint64_t u64ArmedAt;
1227 uint64_t uStatMaxTxDelay;
1228 uint32_t uStatInt;
1229 uint32_t uStatIntTry;
1230 uint32_t uStatIntLower;
1231 uint32_t uStatNoIntICR;
1232 int32_t iStatIntLost;
1233 int32_t iStatIntLostOne;
1234 uint32_t uStatIntIMS;
1235 uint32_t uStatIntSkip;
1236 uint32_t uStatIntLate;
1237 uint32_t uStatIntMasked;
1238 uint32_t uStatIntEarly;
1239 uint32_t uStatIntRx;
1240 uint32_t uStatIntTx;
1241 uint32_t uStatIntICS;
1242 uint32_t uStatIntRDTR;
1243 uint32_t uStatIntRXDMT0;
1244 uint32_t uStatIntTXQE;
1245 uint32_t uStatTxNoRS;
1246 uint32_t uStatTxIDE;
1247 uint32_t uStatTxDelayed;
1248 uint32_t uStatTxDelayExp;
1249 uint32_t uStatTAD;
1250 uint32_t uStatTID;
1251 uint32_t uStatRAD;
1252 uint32_t uStatRID;
1253 uint32_t uStatRxFrm;
1254 uint32_t uStatTxFrm;
1255 uint32_t uStatDescCtx;
1256 uint32_t uStatDescDat;
1257 uint32_t uStatDescLeg;
1258 uint32_t uStatTx1514;
1259 uint32_t uStatTx2962;
1260 uint32_t uStatTx4410;
1261 uint32_t uStatTx5858;
1262 uint32_t uStatTx7306;
1263 uint32_t uStatTx8754;
1264 uint32_t uStatTx16384;
1265 uint32_t uStatTx32768;
1266 uint32_t uStatTxLarge;
1267 uint32_t uStatAlign;
1268#endif /* E1K_INT_STATS */
1269} E1KSTATE;
1270/** Pointer to the E1000 device state. */
1271typedef E1KSTATE *PE1KSTATE;
1272
1273/**
1274 * E1000 ring-3 device state
1275 *
1276 * @implements PDMINETWORKDOWN
1277 * @implements PDMINETWORKCONFIG
1278 * @implements PDMILEDPORTS
1279 */
1280typedef struct E1KSTATER3
1281{
1282 PDMIBASE IBase;
1283 PDMINETWORKDOWN INetworkDown;
1284 PDMINETWORKCONFIG INetworkConfig;
1285 /** LED interface */
1286 PDMILEDPORTS ILeds;
1287 /** Attached network driver. */
1288 R3PTRTYPE(PPDMIBASE) pDrvBase;
1289 R3PTRTYPE(PPDMILEDCONNECTORS) pLedsConnector;
1290
1291 /** Pointer to the shared state. */
1292 R3PTRTYPE(PE1KSTATE) pShared;
1293
1294 /** Device instance. */
1295 PPDMDEVINSR3 pDevInsR3;
1296 /** Attached network driver. */
1297 PPDMINETWORKUPR3 pDrvR3;
1298 /** The scatter / gather buffer used for the current outgoing packet. */
1299 R3PTRTYPE(PPDMSCATTERGATHER) pTxSgR3;
1300
1301 /** EMT: EEPROM emulation */
1302 E1kEEPROM eeprom;
1303} E1KSTATER3;
1304/** Pointer to the E1000 ring-3 device state. */
1305typedef E1KSTATER3 *PE1KSTATER3;
1306
1307
1308/**
1309 * E1000 ring-0 device state
1310 */
1311typedef struct E1KSTATER0
1312{
1313 /** Device instance. */
1314 PPDMDEVINSR0 pDevInsR0;
1315 /** Attached network driver. */
1316 PPDMINETWORKUPR0 pDrvR0;
1317 /** The scatter / gather buffer used for the current outgoing packet - R0. */
1318 R0PTRTYPE(PPDMSCATTERGATHER) pTxSgR0;
1319} E1KSTATER0;
1320/** Pointer to the E1000 ring-0 device state. */
1321typedef E1KSTATER0 *PE1KSTATER0;
1322
1323
1324/**
1325 * E1000 raw-mode device state
1326 */
1327typedef struct E1KSTATERC
1328{
1329 /** Device instance. */
1330 PPDMDEVINSRC pDevInsRC;
1331 /** Attached network driver. */
1332 PPDMINETWORKUPRC pDrvRC;
1333 /** The scatter / gather buffer used for the current outgoing packet. */
1334 RCPTRTYPE(PPDMSCATTERGATHER) pTxSgRC;
1335} E1KSTATERC;
1336/** Pointer to the E1000 raw-mode device state. */
1337typedef E1KSTATERC *PE1KSTATERC;
1338
1339
1340/** @def PE1KSTATECC
1341 * Pointer to the instance data for the current context. */
1342#ifdef IN_RING3
1343typedef E1KSTATER3 E1KSTATECC;
1344typedef PE1KSTATER3 PE1KSTATECC;
1345#elif defined(IN_RING0)
1346typedef E1KSTATER0 E1KSTATECC;
1347typedef PE1KSTATER0 PE1KSTATECC;
1348#elif defined(IN_RC)
1349typedef E1KSTATERC E1KSTATECC;
1350typedef PE1KSTATERC PE1KSTATECC;
1351#else
1352# error "Not IN_RING3, IN_RING0 or IN_RC"
1353#endif
1354
1355
1356#ifndef VBOX_DEVICE_STRUCT_TESTCASE
1357
1358/* Forward declarations ******************************************************/
1359static int e1kXmitPending(PPDMDEVINS pDevIns, PE1KSTATE pThis, bool fOnWorkerThread);
1360
1361/**
1362 * E1000 register read handler.
1363 */
1364typedef int (FNE1KREGREAD)(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1365/**
1366 * E1000 register write handler.
1367 */
1368typedef int (FNE1KREGWRITE)(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1369
1370static FNE1KREGREAD e1kRegReadUnimplemented;
1371static FNE1KREGWRITE e1kRegWriteUnimplemented;
1372static FNE1KREGREAD e1kRegReadAutoClear;
1373static FNE1KREGREAD e1kRegReadDefault;
1374static FNE1KREGWRITE e1kRegWriteDefault;
1375#if 0 /* unused */
1376static FNE1KREGREAD e1kRegReadCTRL;
1377#endif
1378static FNE1KREGWRITE e1kRegWriteCTRL;
1379static FNE1KREGREAD e1kRegReadEECD;
1380static FNE1KREGWRITE e1kRegWriteEECD;
1381static FNE1KREGWRITE e1kRegWriteEERD;
1382static FNE1KREGWRITE e1kRegWriteMDIC;
1383static FNE1KREGREAD e1kRegReadICR;
1384static FNE1KREGWRITE e1kRegWriteICR;
1385static FNE1KREGWRITE e1kRegWriteICS;
1386static FNE1KREGWRITE e1kRegWriteIMS;
1387static FNE1KREGWRITE e1kRegWriteIMC;
1388static FNE1KREGWRITE e1kRegWriteRCTL;
1389static FNE1KREGWRITE e1kRegWritePBA;
1390static FNE1KREGWRITE e1kRegWriteRDT;
1391static FNE1KREGWRITE e1kRegWriteRDTR;
1392static FNE1KREGWRITE e1kRegWriteTDT;
1393static FNE1KREGREAD e1kRegReadMTA;
1394static FNE1KREGWRITE e1kRegWriteMTA;
1395static FNE1KREGREAD e1kRegReadRA;
1396static FNE1KREGWRITE e1kRegWriteRA;
1397static FNE1KREGREAD e1kRegReadVFTA;
1398static FNE1KREGWRITE e1kRegWriteVFTA;
1399
1400/**
1401 * Register map table.
1402 *
1403 * Override pfnRead and pfnWrite to get register-specific behavior.
1404 */
1405static const struct E1kRegMap_st
1406{
1407 /** Register offset in the register space. */
1408 uint32_t offset;
1409 /** Size in bytes. Registers of size > 4 are in fact tables. */
1410 uint32_t size;
1411 /** Readable bits. */
1412 uint32_t readable;
1413 /** Writable bits. */
1414 uint32_t writable;
1415 /** Read callback. */
1416 FNE1KREGREAD *pfnRead;
1417 /** Write callback. */
1418 FNE1KREGWRITE *pfnWrite;
1419 /** Abbreviated name. */
1420 const char *abbrev;
1421 /** Full name. */
1422 const char *name;
1423} g_aE1kRegMap[E1K_NUM_OF_REGS] =
1424{
1425 /* offset size read mask write mask read callback write callback abbrev full name */
1426 /*------- ------- ---------- ---------- ----------------------- ------------------------ ---------- ------------------------------*/
1427 { 0x00000, 0x00004, 0xDBF31BE9, 0xDBF31BE9, e1kRegReadDefault , e1kRegWriteCTRL , "CTRL" , "Device Control" },
1428 { 0x00008, 0x00004, 0x0000FDFF, 0x00000000, e1kRegReadDefault , e1kRegWriteUnimplemented, "STATUS" , "Device Status" },
1429 { 0x00010, 0x00004, 0x000027F0, 0x00000070, e1kRegReadEECD , e1kRegWriteEECD , "EECD" , "EEPROM/Flash Control/Data" },
1430 { 0x00014, 0x00004, 0xFFFFFF10, 0xFFFFFF00, e1kRegReadDefault , e1kRegWriteEERD , "EERD" , "EEPROM Read" },
1431 { 0x00018, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CTRL_EXT", "Extended Device Control" },
1432 { 0x0001c, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FLA" , "Flash Access (N/A)" },
1433 { 0x00020, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteMDIC , "MDIC" , "MDI Control" },
1434 { 0x00028, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCAL" , "Flow Control Address Low" },
1435 { 0x0002c, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCAH" , "Flow Control Address High" },
1436 { 0x00030, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCT" , "Flow Control Type" },
1437 { 0x00038, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "VET" , "VLAN EtherType" },
1438 { 0x000c0, 0x00004, 0x0001F6DF, 0x0001F6DF, e1kRegReadICR , e1kRegWriteICR , "ICR" , "Interrupt Cause Read" },
1439 { 0x000c4, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "ITR" , "Interrupt Throttling" },
1440 { 0x000c8, 0x00004, 0x00000000, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteICS , "ICS" , "Interrupt Cause Set" },
1441 { 0x000d0, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteIMS , "IMS" , "Interrupt Mask Set/Read" },
1442 { 0x000d8, 0x00004, 0x00000000, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteIMC , "IMC" , "Interrupt Mask Clear" },
1443 { 0x00100, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteRCTL , "RCTL" , "Receive Control" },
1444 { 0x00170, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCTTV" , "Flow Control Transmit Timer Value" },
1445 { 0x00178, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TXCW" , "Transmit Configuration Word (N/A)" },
1446 { 0x00180, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXCW" , "Receive Configuration Word (N/A)" },
1447 { 0x00400, 0x00004, 0x017FFFFA, 0x017FFFFA, e1kRegReadDefault , e1kRegWriteDefault , "TCTL" , "Transmit Control" },
1448 { 0x00410, 0x00004, 0x3FFFFFFF, 0x3FFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TIPG" , "Transmit IPG" },
1449 { 0x00458, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "AIFS" , "Adaptive IFS Throttle - AIT" },
1450 { 0x00e00, 0x00004, 0xCFCFCFCF, 0xCFCFCFCF, e1kRegReadDefault , e1kRegWriteDefault , "LEDCTL" , "LED Control" },
1451 { 0x01000, 0x00004, 0xFFFF007F, 0x0000007F, e1kRegReadDefault , e1kRegWritePBA , "PBA" , "Packet Buffer Allocation" },
1452 { 0x02160, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRTL" , "Flow Control Receive Threshold Low" },
1453 { 0x02168, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRTH" , "Flow Control Receive Threshold High" },
1454 { 0x02410, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFH" , "Receive Data FIFO Head" },
1455 { 0x02418, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFT" , "Receive Data FIFO Tail" },
1456 { 0x02420, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFHS" , "Receive Data FIFO Head Saved Register" },
1457 { 0x02428, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFTS" , "Receive Data FIFO Tail Saved Register" },
1458 { 0x02430, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFPC" , "Receive Data FIFO Packet Count" },
1459 { 0x02800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDBAL" , "Receive Descriptor Base Low" },
1460 { 0x02804, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDBAH" , "Receive Descriptor Base High" },
1461 { 0x02808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDLEN" , "Receive Descriptor Length" },
1462 { 0x02810, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDH" , "Receive Descriptor Head" },
1463 { 0x02818, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteRDT , "RDT" , "Receive Descriptor Tail" },
1464 { 0x02820, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteRDTR , "RDTR" , "Receive Delay Timer" },
1465 { 0x02828, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXDCTL" , "Receive Descriptor Control" },
1466 { 0x0282c, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "RADV" , "Receive Interrupt Absolute Delay Timer" },
1467 { 0x02c00, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RSRPD" , "Receive Small Packet Detect Interrupt" },
1468 { 0x03000, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TXDMAC" , "TX DMA Control (N/A)" },
1469 { 0x03410, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFH" , "Transmit Data FIFO Head" },
1470 { 0x03418, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFT" , "Transmit Data FIFO Tail" },
1471 { 0x03420, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFHS" , "Transmit Data FIFO Head Saved Register" },
1472 { 0x03428, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFTS" , "Transmit Data FIFO Tail Saved Register" },
1473 { 0x03430, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFPC" , "Transmit Data FIFO Packet Count" },
1474 { 0x03800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDBAL" , "Transmit Descriptor Base Low" },
1475 { 0x03804, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDBAH" , "Transmit Descriptor Base High" },
1476 { 0x03808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDLEN" , "Transmit Descriptor Length" },
1477 { 0x03810, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDH" , "Transmit Descriptor Head" },
1478 { 0x03818, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteTDT , "TDT" , "Transmit Descriptor Tail" },
1479 { 0x03820, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TIDV" , "Transmit Interrupt Delay Value" },
1480 { 0x03828, 0x00004, 0xFF3F3F3F, 0xFF3F3F3F, e1kRegReadDefault , e1kRegWriteDefault , "TXDCTL" , "Transmit Descriptor Control" },
1481 { 0x0382c, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TADV" , "Transmit Absolute Interrupt Delay Timer" },
1482 { 0x03830, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TSPMT" , "TCP Segmentation Pad and Threshold" },
1483 { 0x04000, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CRCERRS" , "CRC Error Count" },
1484 { 0x04004, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "ALGNERRC", "Alignment Error Count" },
1485 { 0x04008, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SYMERRS" , "Symbol Error Count" },
1486 { 0x0400c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXERRC" , "RX Error Count" },
1487 { 0x04010, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MPC" , "Missed Packets Count" },
1488 { 0x04014, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SCC" , "Single Collision Count" },
1489 { 0x04018, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "ECOL" , "Excessive Collisions Count" },
1490 { 0x0401c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MCC" , "Multiple Collision Count" },
1491 { 0x04020, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "LATECOL" , "Late Collisions Count" },
1492 { 0x04028, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "COLC" , "Collision Count" },
1493 { 0x04030, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "DC" , "Defer Count" },
1494 { 0x04034, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TNCRS" , "Transmit - No CRS" },
1495 { 0x04038, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SEC" , "Sequence Error Count" },
1496 { 0x0403c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CEXTERR" , "Carrier Extension Error Count" },
1497 { 0x04040, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RLEC" , "Receive Length Error Count" },
1498 { 0x04048, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XONRXC" , "XON Received Count" },
1499 { 0x0404c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XONTXC" , "XON Transmitted Count" },
1500 { 0x04050, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XOFFRXC" , "XOFF Received Count" },
1501 { 0x04054, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XOFFTXC" , "XOFF Transmitted Count" },
1502 { 0x04058, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRUC" , "FC Received Unsupported Count" },
1503 { 0x0405c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC64" , "Packets Received (64 Bytes) Count" },
1504 { 0x04060, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC127" , "Packets Received (65-127 Bytes) Count" },
1505 { 0x04064, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC255" , "Packets Received (128-255 Bytes) Count" },
1506 { 0x04068, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC511" , "Packets Received (256-511 Bytes) Count" },
1507 { 0x0406c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC1023" , "Packets Received (512-1023 Bytes) Count" },
1508 { 0x04070, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC1522" , "Packets Received (1024-Max Bytes)" },
1509 { 0x04074, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GPRC" , "Good Packets Received Count" },
1510 { 0x04078, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "BPRC" , "Broadcast Packets Received Count" },
1511 { 0x0407c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "MPRC" , "Multicast Packets Received Count" },
1512 { 0x04080, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GPTC" , "Good Packets Transmitted Count" },
1513 { 0x04088, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GORCL" , "Good Octets Received Count (Low)" },
1514 { 0x0408c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GORCH" , "Good Octets Received Count (Hi)" },
1515 { 0x04090, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GOTCL" , "Good Octets Transmitted Count (Low)" },
1516 { 0x04094, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GOTCH" , "Good Octets Transmitted Count (Hi)" },
1517 { 0x040a0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RNBC" , "Receive No Buffers Count" },
1518 { 0x040a4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RUC" , "Receive Undersize Count" },
1519 { 0x040a8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RFC" , "Receive Fragment Count" },
1520 { 0x040ac, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "ROC" , "Receive Oversize Count" },
1521 { 0x040b0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RJC" , "Receive Jabber Count" },
1522 { 0x040b4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPRC" , "Management Packets Received Count" },
1523 { 0x040b8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPDC" , "Management Packets Dropped Count" },
1524 { 0x040bc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPTC" , "Management Pkts Transmitted Count" },
1525 { 0x040c0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TORL" , "Total Octets Received (Lo)" },
1526 { 0x040c4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TORH" , "Total Octets Received (Hi)" },
1527 { 0x040c8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TOTL" , "Total Octets Transmitted (Lo)" },
1528 { 0x040cc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TOTH" , "Total Octets Transmitted (Hi)" },
1529 { 0x040d0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TPR" , "Total Packets Received" },
1530 { 0x040d4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TPT" , "Total Packets Transmitted" },
1531 { 0x040d8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC64" , "Packets Transmitted (64 Bytes) Count" },
1532 { 0x040dc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC127" , "Packets Transmitted (65-127 Bytes) Count" },
1533 { 0x040e0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC255" , "Packets Transmitted (128-255 Bytes) Count" },
1534 { 0x040e4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC511" , "Packets Transmitted (256-511 Bytes) Count" },
1535 { 0x040e8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC1023" , "Packets Transmitted (512-1023 Bytes) Count" },
1536 { 0x040ec, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC1522" , "Packets Transmitted (1024 Bytes or Greater) Count" },
1537 { 0x040f0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "MPTC" , "Multicast Packets Transmitted Count" },
1538 { 0x040f4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "BPTC" , "Broadcast Packets Transmitted Count" },
1539 { 0x040f8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TSCTC" , "TCP Segmentation Context Transmitted Count" },
1540 { 0x040fc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TSCTFC" , "TCP Segmentation Context Tx Fail Count" },
1541 { 0x05000, 0x00004, 0x000007FF, 0x000007FF, e1kRegReadDefault , e1kRegWriteDefault , "RXCSUM" , "Receive Checksum Control" },
1542 { 0x05800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUC" , "Wakeup Control" },
1543 { 0x05808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUFC" , "Wakeup Filter Control" },
1544 { 0x05810, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUS" , "Wakeup Status" },
1545 { 0x05820, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "MANC" , "Management Control" },
1546 { 0x05838, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IPAV" , "IP Address Valid" },
1547 { 0x05900, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUPL" , "Wakeup Packet Length" },
1548 { 0x05200, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadMTA , e1kRegWriteMTA , "MTA" , "Multicast Table Array (n)" },
1549 { 0x05400, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadRA , e1kRegWriteRA , "RA" , "Receive Address (64-bit) (n)" },
1550 { 0x05600, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadVFTA , e1kRegWriteVFTA , "VFTA" , "VLAN Filter Table Array (n)" },
1551 { 0x05840, 0x0001c, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IP4AT" , "IPv4 Address Table" },
1552 { 0x05880, 0x00010, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IP6AT" , "IPv6 Address Table" },
1553 { 0x05a00, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUPM" , "Wakeup Packet Memory" },
1554 { 0x05f00, 0x0001c, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFLT" , "Flexible Filter Length Table" },
1555 { 0x09000, 0x003fc, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFMT" , "Flexible Filter Mask Table" },
1556 { 0x09800, 0x003fc, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFVT" , "Flexible Filter Value Table" },
1557 { 0x10000, 0x10000, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "PBM" , "Packet Buffer Memory (n)" },
1558 { 0x00040, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadRA , e1kRegWriteRA , "RA82542" , "Receive Address (64-bit) (n) (82542)" },
1559 { 0x00200, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadMTA , e1kRegWriteMTA , "MTA82542", "Multicast Table Array (n) (82542)" },
1560 { 0x00600, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadVFTA , e1kRegWriteVFTA , "VFTA82542", "VLAN Filter Table Array (n) (82542)" }
1561};
1562
1563#ifdef LOG_ENABLED
1564
1565/**
1566 * Convert U32 value to hex string. Masked bytes are replaced with dots.
1567 *
1568 * @remarks The mask has half-byte byte (not bit) granularity (e.g. 0000000F).
1569 *
1570 * @returns The buffer.
1571 *
1572 * @param u32 The word to convert into string.
1573 * @param mask Selects which bytes to convert.
1574 * @param buf Where to put the result.
1575 */
1576static char *e1kU32toHex(uint32_t u32, uint32_t mask, char *buf)
1577{
1578 for (char *ptr = buf + 7; ptr >= buf; --ptr, u32 >>=4, mask >>=4)
1579 {
1580 if (mask & 0xF)
1581 *ptr = (u32 & 0xF) + ((u32 & 0xF) > 9 ? '7' : '0');
1582 else
1583 *ptr = '.';
1584 }
1585 buf[8] = 0;
1586 return buf;
1587}
1588
1589/**
1590 * Returns timer name for debug purposes.
1591 *
1592 * @returns The timer name.
1593 *
1594 * @param pThis The device state structure.
1595 * @param hTimer The timer to name.
1596 */
1597DECLINLINE(const char *) e1kGetTimerName(PE1KSTATE pThis, TMTIMERHANDLE hTimer)
1598{
1599 if (hTimer == pThis->hTIDTimer)
1600 return "TID";
1601 if (hTimer == pThis->hTADTimer)
1602 return "TAD";
1603 if (hTimer == pThis->hRIDTimer)
1604 return "RID";
1605 if (hTimer == pThis->hRADTimer)
1606 return "RAD";
1607 if (hTimer == pThis->hIntTimer)
1608 return "Int";
1609 if (hTimer == pThis->hTXDTimer)
1610 return "TXD";
1611 if (hTimer == pThis->hLUTimer)
1612 return "LinkUp";
1613 return "unknown";
1614}
1615
1616#endif /* LOG_ENABLED */
1617
1618/**
1619 * Arm a timer.
1620 *
1621 * @param pDevIns The device instance.
1622 * @param pThis Pointer to the device state structure.
1623 * @param hTimer The timer to arm.
1624 * @param uExpireIn Expiration interval in microseconds.
1625 */
1626DECLINLINE(void) e1kArmTimer(PPDMDEVINS pDevIns, PE1KSTATE pThis, TMTIMERHANDLE hTimer, uint32_t uExpireIn)
1627{
1628 if (pThis->fLocked)
1629 return;
1630
1631 E1kLog2(("%s Arming %s timer to fire in %d usec...\n",
1632 pThis->szPrf, e1kGetTimerName(pThis, hTimer), uExpireIn));
1633 int rc = PDMDevHlpTimerSetMicro(pDevIns, hTimer, uExpireIn);
1634 AssertRC(rc);
1635}
1636
1637#ifdef IN_RING3
1638/**
1639 * Cancel a timer.
1640 *
1641 * @param pDevIns The device instance.
1642 * @param pThis Pointer to the device state structure.
1643 * @param pTimer Pointer to the timer.
1644 */
1645DECLINLINE(void) e1kCancelTimer(PPDMDEVINS pDevIns, PE1KSTATE pThis, TMTIMERHANDLE hTimer)
1646{
1647 E1kLog2(("%s Stopping %s timer...\n",
1648 pThis->szPrf, e1kGetTimerName(pThis, hTimer)));
1649 int rc = PDMDevHlpTimerStop(pDevIns, hTimer);
1650 if (RT_FAILURE(rc))
1651 E1kLog2(("%s e1kCancelTimer: TMTimerStop(%s) failed with %Rrc\n",
1652 pThis->szPrf, e1kGetTimerName(pThis, hTimer), rc));
1653 RT_NOREF_PV(pThis);
1654}
1655#endif /* IN_RING3 */
1656
1657#define e1kCsEnter(ps, rc) PDMDevHlpCritSectEnter(pDevIns, &ps->cs, rc)
1658#define e1kCsLeave(ps) PDMDevHlpCritSectLeave(pDevIns, &ps->cs)
1659
1660#define e1kCsRxEnter(ps, rc) PDMDevHlpCritSectEnter(pDevIns, &ps->csRx, rc)
1661#define e1kCsRxLeave(ps) PDMDevHlpCritSectLeave(pDevIns, &ps->csRx)
1662#define e1kCsRxIsOwner(ps) PDMDevHlpCritSectIsOwner(pDevIns, &ps->csRx)
1663
1664#ifndef E1K_WITH_TX_CS
1665# define e1kCsTxEnter(ps, rc) VINF_SUCCESS
1666# define e1kCsTxLeave(ps) do { } while (0)
1667#else /* E1K_WITH_TX_CS */
1668# define e1kCsTxEnter(ps, rc) PDMDevHlpCritSectEnter(pDevIns, &ps->csTx, rc)
1669# define e1kCsTxLeave(ps) PDMDevHlpCritSectLeave(pDevIns, &ps->csTx)
1670#endif /* E1K_WITH_TX_CS */
1671
1672
1673/**
1674 * Wakeup the RX thread.
1675 */
1676static void e1kWakeupReceive(PPDMDEVINS pDevIns, PE1KSTATE pThis)
1677{
1678 if ( pThis->fMaybeOutOfSpace
1679 && pThis->hEventMoreRxDescAvail != NIL_SUPSEMEVENT)
1680 {
1681 STAM_COUNTER_INC(&pThis->CTX_SUFF_Z(StatRxOverflowWakeup));
1682 E1kLog(("%s Waking up Out-of-RX-space semaphore\n", pThis->szPrf));
1683 int rc = PDMDevHlpSUPSemEventSignal(pDevIns, pThis->hEventMoreRxDescAvail);
1684 AssertRC(rc);
1685 }
1686}
1687
1688#ifdef IN_RING3
1689
1690/**
1691 * Hardware reset. Revert all registers to initial values.
1692 *
1693 * @param pDevIns The device instance.
1694 * @param pThis The device state structure.
1695 * @param pThisCC The current context instance data.
1696 */
1697static void e1kR3HardReset(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KSTATECC pThisCC)
1698{
1699 E1kLog(("%s Hard reset triggered\n", pThis->szPrf));
1700 /* No interrupts should survive device reset, see @bugref(9556). */
1701 if (pThis->fIntRaised)
1702 {
1703 /* Lower(0) INTA(0) */
1704 PDMDevHlpPCISetIrq(pDevIns, 0, 0);
1705 pThis->fIntRaised = false;
1706 E1kLog(("%s e1kR3HardReset: Lowered IRQ: ICR=%08x\n", pThis->szPrf, ICR));
1707 }
1708 memset(pThis->auRegs, 0, sizeof(pThis->auRegs));
1709 memset(pThis->aRecAddr.au32, 0, sizeof(pThis->aRecAddr.au32));
1710#ifdef E1K_INIT_RA0
1711 memcpy(pThis->aRecAddr.au32, pThis->macConfigured.au8,
1712 sizeof(pThis->macConfigured.au8));
1713 pThis->aRecAddr.array[0].ctl |= RA_CTL_AV;
1714#endif /* E1K_INIT_RA0 */
1715 STATUS = 0x0081; /* SPEED=10b (1000 Mb/s), FD=1b (Full Duplex) */
1716 EECD = 0x0100; /* EE_PRES=1b (EEPROM present) */
1717 CTRL = 0x0a09; /* FRCSPD=1b SPEED=10b LRST=1b FD=1b */
1718 TSPMT = 0x01000400;/* TSMT=0400h TSPBP=0100h */
1719 Assert(GET_BITS(RCTL, BSIZE) == 0);
1720 pThis->u16RxBSize = 2048;
1721
1722 uint16_t u16LedCtl = 0x0602; /* LED0/LINK_UP#, LED2/LINK100# */
1723 pThisCC->eeprom.readWord(0x2F, &u16LedCtl); /* Read LEDCTL defaults from EEPROM */
1724 LEDCTL = 0x07008300 | (((uint32_t)u16LedCtl & 0xCF00) << 8) | (u16LedCtl & 0xCF); /* Only LED0 and LED2 defaults come from EEPROM */
1725
1726 /* Reset promiscuous mode */
1727 if (pThisCC->pDrvR3)
1728 pThisCC->pDrvR3->pfnSetPromiscuousMode(pThisCC->pDrvR3, false);
1729
1730#ifdef E1K_WITH_TXD_CACHE
1731 int rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
1732 if (RT_LIKELY(rc == VINF_SUCCESS))
1733 {
1734 pThis->nTxDFetched = 0;
1735 pThis->iTxDCurrent = 0;
1736 pThis->fGSO = false;
1737 pThis->cbTxAlloc = 0;
1738 e1kCsTxLeave(pThis);
1739 }
1740#endif /* E1K_WITH_TXD_CACHE */
1741#ifdef E1K_WITH_RXD_CACHE
1742 if (RT_LIKELY(e1kCsRxEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
1743 {
1744 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
1745 e1kCsRxLeave(pThis);
1746 }
1747#endif /* E1K_WITH_RXD_CACHE */
1748#ifdef E1K_LSC_ON_RESET
1749 E1kLog(("%s Will trigger LSC in %d seconds...\n",
1750 pThis->szPrf, pThis->cMsLinkUpDelay / 1000));
1751 e1kArmTimer(pDevIns, pThis, pThis->hLUTimer, pThis->cMsLinkUpDelay * 1000);
1752#endif /* E1K_LSC_ON_RESET */
1753}
1754
1755#endif /* IN_RING3 */
1756
1757/**
1758 * Compute Internet checksum.
1759 *
1760 * @remarks Refer to http://www.netfor2.com/checksum.html for short intro.
1761 *
1762 * @param pThis The device state structure.
1763 * @param cpPacket The packet.
1764 * @param cb The size of the packet.
1765 * @param pszText A string denoting direction of packet transfer.
1766 *
1767 * @return The 1's complement of the 1's complement sum.
1768 *
1769 * @thread E1000_TX
1770 */
1771static uint16_t e1kCSum16(const void *pvBuf, size_t cb)
1772{
1773 uint32_t csum = 0;
1774 uint16_t *pu16 = (uint16_t *)pvBuf;
1775
1776 while (cb > 1)
1777 {
1778 csum += *pu16++;
1779 cb -= 2;
1780 }
1781 if (cb)
1782 csum += *(uint8_t*)pu16;
1783 while (csum >> 16)
1784 csum = (csum >> 16) + (csum & 0xFFFF);
1785 return ~csum;
1786}
1787
1788/**
1789 * Dump a packet to debug log.
1790 *
1791 * @param pDevIns The device instance.
1792 * @param pThis The device state structure.
1793 * @param cpPacket The packet.
1794 * @param cb The size of the packet.
1795 * @param pszText A string denoting direction of packet transfer.
1796 * @thread E1000_TX
1797 */
1798DECLINLINE(void) e1kPacketDump(PPDMDEVINS pDevIns, PE1KSTATE pThis, const uint8_t *cpPacket, size_t cb, const char *pszText)
1799{
1800#ifdef DEBUG
1801 if (RT_LIKELY(e1kCsEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
1802 {
1803 Log4(("%s --- %s packet #%d: %RTmac => %RTmac (%d bytes) ---\n",
1804 pThis->szPrf, pszText, ++pThis->u32PktNo, cpPacket+6, cpPacket, cb));
1805 if (ntohs(*(uint16_t*)(cpPacket+12)) == 0x86DD)
1806 {
1807 Log4(("%s --- IPv6: %RTnaipv6 => %RTnaipv6\n",
1808 pThis->szPrf, cpPacket+14+8, cpPacket+14+24));
1809 if (*(cpPacket+14+6) == 0x6)
1810 Log4(("%s --- TCP: seq=%x ack=%x\n", pThis->szPrf,
1811 ntohl(*(uint32_t*)(cpPacket+14+40+4)), ntohl(*(uint32_t*)(cpPacket+14+40+8))));
1812 }
1813 else if (ntohs(*(uint16_t*)(cpPacket+12)) == 0x800)
1814 {
1815 Log4(("%s --- IPv4: %RTnaipv4 => %RTnaipv4\n",
1816 pThis->szPrf, *(uint32_t*)(cpPacket+14+12), *(uint32_t*)(cpPacket+14+16)));
1817 if (*(cpPacket+14+6) == 0x6)
1818 Log4(("%s --- TCP: seq=%x ack=%x\n", pThis->szPrf,
1819 ntohl(*(uint32_t*)(cpPacket+14+20+4)), ntohl(*(uint32_t*)(cpPacket+14+20+8))));
1820 }
1821 E1kLog3(("%.*Rhxd\n", cb, cpPacket));
1822 e1kCsLeave(pThis);
1823 }
1824#else
1825 if (RT_LIKELY(e1kCsEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
1826 {
1827 if (ntohs(*(uint16_t*)(cpPacket+12)) == 0x86DD)
1828 E1kLogRel(("E1000: %s packet #%d, %RTmac => %RTmac, %RTnaipv6 => %RTnaipv6, seq=%x ack=%x\n",
1829 pszText, ++pThis->u32PktNo, cpPacket+6, cpPacket, cpPacket+14+8, cpPacket+14+24,
1830 ntohl(*(uint32_t*)(cpPacket+14+40+4)), ntohl(*(uint32_t*)(cpPacket+14+40+8))));
1831 else
1832 E1kLogRel(("E1000: %s packet #%d, %RTmac => %RTmac, %RTnaipv4 => %RTnaipv4, seq=%x ack=%x\n",
1833 pszText, ++pThis->u32PktNo, cpPacket+6, cpPacket,
1834 *(uint32_t*)(cpPacket+14+12), *(uint32_t*)(cpPacket+14+16),
1835 ntohl(*(uint32_t*)(cpPacket+14+20+4)), ntohl(*(uint32_t*)(cpPacket+14+20+8))));
1836 e1kCsLeave(pThis);
1837 }
1838 RT_NOREF2(cb, pszText);
1839#endif
1840}
1841
1842/**
1843 * Determine the type of transmit descriptor.
1844 *
1845 * @returns Descriptor type. See E1K_DTYP_XXX defines.
1846 *
1847 * @param pDesc Pointer to descriptor union.
1848 * @thread E1000_TX
1849 */
1850DECLINLINE(int) e1kGetDescType(E1KTXDESC *pDesc)
1851{
1852 if (pDesc->legacy.cmd.fDEXT)
1853 return pDesc->context.dw2.u4DTYP;
1854 return E1K_DTYP_LEGACY;
1855}
1856
1857
1858#ifdef E1K_WITH_RXD_CACHE
1859/**
1860 * Return the number of RX descriptor that belong to the hardware.
1861 *
1862 * @returns the number of available descriptors in RX ring.
1863 * @param pThis The device state structure.
1864 * @thread ???
1865 */
1866DECLINLINE(uint32_t) e1kGetRxLen(PE1KSTATE pThis)
1867{
1868 /**
1869 * Make sure RDT won't change during computation. EMT may modify RDT at
1870 * any moment.
1871 */
1872 uint32_t rdt = RDT;
1873 return (RDH > rdt ? RDLEN/sizeof(E1KRXDESC) : 0) + rdt - RDH;
1874}
1875
1876DECLINLINE(unsigned) e1kRxDInCache(PE1KSTATE pThis)
1877{
1878 return pThis->nRxDFetched > pThis->iRxDCurrent ?
1879 pThis->nRxDFetched - pThis->iRxDCurrent : 0;
1880}
1881
1882DECLINLINE(unsigned) e1kRxDIsCacheEmpty(PE1KSTATE pThis)
1883{
1884 return pThis->iRxDCurrent >= pThis->nRxDFetched;
1885}
1886
1887/**
1888 * Load receive descriptors from guest memory. The caller needs to be in Rx
1889 * critical section.
1890 *
1891 * We need two physical reads in case the tail wrapped around the end of RX
1892 * descriptor ring.
1893 *
1894 * @returns the actual number of descriptors fetched.
1895 * @param pDevIns The device instance.
1896 * @param pThis The device state structure.
1897 * @thread EMT, RX
1898 */
1899DECLINLINE(unsigned) e1kRxDPrefetch(PPDMDEVINS pDevIns, PE1KSTATE pThis)
1900{
1901 /* We've already loaded pThis->nRxDFetched descriptors past RDH. */
1902 unsigned nDescsAvailable = e1kGetRxLen(pThis) - e1kRxDInCache(pThis);
1903 unsigned nDescsToFetch = RT_MIN(nDescsAvailable, E1K_RXD_CACHE_SIZE - pThis->nRxDFetched);
1904 unsigned nDescsTotal = RDLEN / sizeof(E1KRXDESC);
1905 Assert(nDescsTotal != 0);
1906 if (nDescsTotal == 0)
1907 return 0;
1908 unsigned nFirstNotLoaded = (RDH + e1kRxDInCache(pThis)) % nDescsTotal;
1909 unsigned nDescsInSingleRead = RT_MIN(nDescsToFetch, nDescsTotal - nFirstNotLoaded);
1910 E1kLog3(("%s e1kRxDPrefetch: nDescsAvailable=%u nDescsToFetch=%u "
1911 "nDescsTotal=%u nFirstNotLoaded=0x%x nDescsInSingleRead=%u\n",
1912 pThis->szPrf, nDescsAvailable, nDescsToFetch, nDescsTotal,
1913 nFirstNotLoaded, nDescsInSingleRead));
1914 if (nDescsToFetch == 0)
1915 return 0;
1916 E1KRXDESC* pFirstEmptyDesc = &pThis->aRxDescriptors[pThis->nRxDFetched];
1917 PDMDevHlpPhysRead(pDevIns,
1918 ((uint64_t)RDBAH << 32) + RDBAL + nFirstNotLoaded * sizeof(E1KRXDESC),
1919 pFirstEmptyDesc, nDescsInSingleRead * sizeof(E1KRXDESC));
1920 // uint64_t addrBase = ((uint64_t)RDBAH << 32) + RDBAL;
1921 // unsigned i, j;
1922 // for (i = pThis->nRxDFetched; i < pThis->nRxDFetched + nDescsInSingleRead; ++i)
1923 // {
1924 // pThis->aRxDescAddr[i] = addrBase + (nFirstNotLoaded + i - pThis->nRxDFetched) * sizeof(E1KRXDESC);
1925 // E1kLog3(("%s aRxDescAddr[%d] = %p\n", pThis->szPrf, i, pThis->aRxDescAddr[i]));
1926 // }
1927 E1kLog3(("%s Fetched %u RX descriptors at %08x%08x(0x%x), RDLEN=%08x, RDH=%08x, RDT=%08x\n",
1928 pThis->szPrf, nDescsInSingleRead,
1929 RDBAH, RDBAL + RDH * sizeof(E1KRXDESC),
1930 nFirstNotLoaded, RDLEN, RDH, RDT));
1931 if (nDescsToFetch > nDescsInSingleRead)
1932 {
1933 PDMDevHlpPhysRead(pDevIns,
1934 ((uint64_t)RDBAH << 32) + RDBAL,
1935 pFirstEmptyDesc + nDescsInSingleRead,
1936 (nDescsToFetch - nDescsInSingleRead) * sizeof(E1KRXDESC));
1937 // Assert(i == pThis->nRxDFetched + nDescsInSingleRead);
1938 // for (j = 0; i < pThis->nRxDFetched + nDescsToFetch; ++i, ++j)
1939 // {
1940 // pThis->aRxDescAddr[i] = addrBase + j * sizeof(E1KRXDESC);
1941 // E1kLog3(("%s aRxDescAddr[%d] = %p\n", pThis->szPrf, i, pThis->aRxDescAddr[i]));
1942 // }
1943 E1kLog3(("%s Fetched %u RX descriptors at %08x%08x\n",
1944 pThis->szPrf, nDescsToFetch - nDescsInSingleRead,
1945 RDBAH, RDBAL));
1946 }
1947 pThis->nRxDFetched += nDescsToFetch;
1948 return nDescsToFetch;
1949}
1950
1951# ifdef IN_RING3 /* currently only used in ring-3 due to stack space requirements of the caller */
1952/**
1953 * Dump receive descriptor to debug log.
1954 *
1955 * @param pThis The device state structure.
1956 * @param pDesc Pointer to the descriptor.
1957 * @thread E1000_RX
1958 */
1959static void e1kPrintRDesc(PE1KSTATE pThis, E1KRXDESC *pDesc)
1960{
1961 RT_NOREF2(pThis, pDesc);
1962 E1kLog2(("%s <-- Receive Descriptor (%d bytes):\n", pThis->szPrf, pDesc->u16Length));
1963 E1kLog2((" Address=%16LX Length=%04X Csum=%04X\n",
1964 pDesc->u64BufAddr, pDesc->u16Length, pDesc->u16Checksum));
1965 E1kLog2((" STA: %s %s %s %s %s %s %s ERR: %s %s %s %s SPECIAL: %s VLAN=%03x PRI=%x\n",
1966 pDesc->status.fPIF ? "PIF" : "pif",
1967 pDesc->status.fIPCS ? "IPCS" : "ipcs",
1968 pDesc->status.fTCPCS ? "TCPCS" : "tcpcs",
1969 pDesc->status.fVP ? "VP" : "vp",
1970 pDesc->status.fIXSM ? "IXSM" : "ixsm",
1971 pDesc->status.fEOP ? "EOP" : "eop",
1972 pDesc->status.fDD ? "DD" : "dd",
1973 pDesc->status.fRXE ? "RXE" : "rxe",
1974 pDesc->status.fIPE ? "IPE" : "ipe",
1975 pDesc->status.fTCPE ? "TCPE" : "tcpe",
1976 pDesc->status.fCE ? "CE" : "ce",
1977 E1K_SPEC_CFI(pDesc->status.u16Special) ? "CFI" :"cfi",
1978 E1K_SPEC_VLAN(pDesc->status.u16Special),
1979 E1K_SPEC_PRI(pDesc->status.u16Special)));
1980}
1981# endif /* IN_RING3 */
1982#endif /* E1K_WITH_RXD_CACHE */
1983
1984/**
1985 * Dump transmit descriptor to debug log.
1986 *
1987 * @param pThis The device state structure.
1988 * @param pDesc Pointer to descriptor union.
1989 * @param pszDir A string denoting direction of descriptor transfer
1990 * @thread E1000_TX
1991 */
1992static void e1kPrintTDesc(PE1KSTATE pThis, E1KTXDESC *pDesc, const char *pszDir,
1993 unsigned uLevel = RTLOGGRPFLAGS_LEVEL_2)
1994{
1995 RT_NOREF4(pThis, pDesc, pszDir, uLevel);
1996
1997 /*
1998 * Unfortunately we cannot use our format handler here, we want R0 logging
1999 * as well.
2000 */
2001 switch (e1kGetDescType(pDesc))
2002 {
2003 case E1K_DTYP_CONTEXT:
2004 E1kLogX(uLevel, ("%s %s Context Transmit Descriptor %s\n",
2005 pThis->szPrf, pszDir, pszDir));
2006 E1kLogX(uLevel, (" IPCSS=%02X IPCSO=%02X IPCSE=%04X TUCSS=%02X TUCSO=%02X TUCSE=%04X\n",
2007 pDesc->context.ip.u8CSS, pDesc->context.ip.u8CSO, pDesc->context.ip.u16CSE,
2008 pDesc->context.tu.u8CSS, pDesc->context.tu.u8CSO, pDesc->context.tu.u16CSE));
2009 E1kLogX(uLevel, (" TUCMD:%s%s%s %s %s PAYLEN=%04x HDRLEN=%04x MSS=%04x STA: %s\n",
2010 pDesc->context.dw2.fIDE ? " IDE":"",
2011 pDesc->context.dw2.fRS ? " RS" :"",
2012 pDesc->context.dw2.fTSE ? " TSE":"",
2013 pDesc->context.dw2.fIP ? "IPv4":"IPv6",
2014 pDesc->context.dw2.fTCP ? "TCP":"UDP",
2015 pDesc->context.dw2.u20PAYLEN,
2016 pDesc->context.dw3.u8HDRLEN,
2017 pDesc->context.dw3.u16MSS,
2018 pDesc->context.dw3.fDD?"DD":""));
2019 break;
2020 case E1K_DTYP_DATA:
2021 E1kLogX(uLevel, ("%s %s Data Transmit Descriptor (%d bytes) %s\n",
2022 pThis->szPrf, pszDir, pDesc->data.cmd.u20DTALEN, pszDir));
2023 E1kLogX(uLevel, (" Address=%16LX DTALEN=%05X\n",
2024 pDesc->data.u64BufAddr,
2025 pDesc->data.cmd.u20DTALEN));
2026 E1kLogX(uLevel, (" DCMD:%s%s%s%s%s%s%s STA:%s%s%s POPTS:%s%s SPECIAL:%s VLAN=%03x PRI=%x\n",
2027 pDesc->data.cmd.fIDE ? " IDE" :"",
2028 pDesc->data.cmd.fVLE ? " VLE" :"",
2029 pDesc->data.cmd.fRPS ? " RPS" :"",
2030 pDesc->data.cmd.fRS ? " RS" :"",
2031 pDesc->data.cmd.fTSE ? " TSE" :"",
2032 pDesc->data.cmd.fIFCS? " IFCS":"",
2033 pDesc->data.cmd.fEOP ? " EOP" :"",
2034 pDesc->data.dw3.fDD ? " DD" :"",
2035 pDesc->data.dw3.fEC ? " EC" :"",
2036 pDesc->data.dw3.fLC ? " LC" :"",
2037 pDesc->data.dw3.fTXSM? " TXSM":"",
2038 pDesc->data.dw3.fIXSM? " IXSM":"",
2039 E1K_SPEC_CFI(pDesc->data.dw3.u16Special) ? "CFI" :"cfi",
2040 E1K_SPEC_VLAN(pDesc->data.dw3.u16Special),
2041 E1K_SPEC_PRI(pDesc->data.dw3.u16Special)));
2042 break;
2043 case E1K_DTYP_LEGACY:
2044 E1kLogX(uLevel, ("%s %s Legacy Transmit Descriptor (%d bytes) %s\n",
2045 pThis->szPrf, pszDir, pDesc->legacy.cmd.u16Length, pszDir));
2046 E1kLogX(uLevel, (" Address=%16LX DTALEN=%05X\n",
2047 pDesc->data.u64BufAddr,
2048 pDesc->legacy.cmd.u16Length));
2049 E1kLogX(uLevel, (" CMD:%s%s%s%s%s%s%s STA:%s%s%s CSO=%02x CSS=%02x SPECIAL:%s VLAN=%03x PRI=%x\n",
2050 pDesc->legacy.cmd.fIDE ? " IDE" :"",
2051 pDesc->legacy.cmd.fVLE ? " VLE" :"",
2052 pDesc->legacy.cmd.fRPS ? " RPS" :"",
2053 pDesc->legacy.cmd.fRS ? " RS" :"",
2054 pDesc->legacy.cmd.fIC ? " IC" :"",
2055 pDesc->legacy.cmd.fIFCS? " IFCS":"",
2056 pDesc->legacy.cmd.fEOP ? " EOP" :"",
2057 pDesc->legacy.dw3.fDD ? " DD" :"",
2058 pDesc->legacy.dw3.fEC ? " EC" :"",
2059 pDesc->legacy.dw3.fLC ? " LC" :"",
2060 pDesc->legacy.cmd.u8CSO,
2061 pDesc->legacy.dw3.u8CSS,
2062 E1K_SPEC_CFI(pDesc->legacy.dw3.u16Special) ? "CFI" :"cfi",
2063 E1K_SPEC_VLAN(pDesc->legacy.dw3.u16Special),
2064 E1K_SPEC_PRI(pDesc->legacy.dw3.u16Special)));
2065 break;
2066 default:
2067 E1kLog(("%s %s Invalid Transmit Descriptor %s\n",
2068 pThis->szPrf, pszDir, pszDir));
2069 break;
2070 }
2071}
2072
2073/**
2074 * Raise an interrupt later.
2075 *
2076 * @param pThis The device state structure.
2077 */
2078DECLINLINE(void) e1kPostponeInterrupt(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint64_t nsDeadline)
2079{
2080 if (!PDMDevHlpTimerIsActive(pDevIns, pThis->hIntTimer))
2081 PDMDevHlpTimerSetNano(pDevIns, pThis->hIntTimer, nsDeadline);
2082}
2083
2084/**
2085 * Raise interrupt if not masked.
2086 *
2087 * @param pThis The device state structure.
2088 */
2089static int e1kRaiseInterrupt(PPDMDEVINS pDevIns, PE1KSTATE pThis, int rcBusy, uint32_t u32IntCause = 0)
2090{
2091 int rc = e1kCsEnter(pThis, rcBusy);
2092 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2093 return rc;
2094
2095 E1K_INC_ISTAT_CNT(pThis->uStatIntTry);
2096 ICR |= u32IntCause;
2097 if (ICR & IMS)
2098 {
2099 if (pThis->fIntRaised)
2100 {
2101 E1K_INC_ISTAT_CNT(pThis->uStatIntSkip);
2102 E1kLog2(("%s e1kRaiseInterrupt: Already raised, skipped. ICR&IMS=%08x\n",
2103 pThis->szPrf, ICR & IMS));
2104 }
2105 else
2106 {
2107 uint64_t tsNow = PDMDevHlpTimerGet(pDevIns, pThis->hIntTimer);
2108 if (!!ITR && tsNow - pThis->u64AckedAt < ITR * 256
2109 && pThis->fItrEnabled && (pThis->fItrRxEnabled || !(ICR & ICR_RXT0)))
2110 {
2111 E1K_INC_ISTAT_CNT(pThis->uStatIntEarly);
2112 E1kLog2(("%s e1kRaiseInterrupt: Too early to raise again: %d ns < %d ns.\n",
2113 pThis->szPrf, (uint32_t)(tsNow - pThis->u64AckedAt), ITR * 256));
2114 e1kPostponeInterrupt(pDevIns, pThis, ITR * 256);
2115 }
2116 else
2117 {
2118
2119 /* Since we are delivering the interrupt now
2120 * there is no need to do it later -- stop the timer.
2121 */
2122 PDMDevHlpTimerStop(pDevIns, pThis->hIntTimer);
2123 E1K_INC_ISTAT_CNT(pThis->uStatInt);
2124 STAM_COUNTER_INC(&pThis->StatIntsRaised);
2125 /* Got at least one unmasked interrupt cause */
2126 pThis->fIntRaised = true;
2127 /* Raise(1) INTA(0) */
2128 E1kLogRel(("E1000: irq RAISED icr&mask=0x%x, icr=0x%x\n", ICR & IMS, ICR));
2129 PDMDevHlpPCISetIrq(pDevIns, 0, 1);
2130 E1kLog(("%s e1kRaiseInterrupt: Raised. ICR&IMS=%08x\n",
2131 pThis->szPrf, ICR & IMS));
2132 }
2133 }
2134 }
2135 else
2136 {
2137 E1K_INC_ISTAT_CNT(pThis->uStatIntMasked);
2138 E1kLog2(("%s e1kRaiseInterrupt: Not raising, ICR=%08x, IMS=%08x\n",
2139 pThis->szPrf, ICR, IMS));
2140 }
2141 e1kCsLeave(pThis);
2142 return VINF_SUCCESS;
2143}
2144
2145/**
2146 * Compute the physical address of the descriptor.
2147 *
2148 * @returns the physical address of the descriptor.
2149 *
2150 * @param baseHigh High-order 32 bits of descriptor table address.
2151 * @param baseLow Low-order 32 bits of descriptor table address.
2152 * @param idxDesc The descriptor index in the table.
2153 */
2154DECLINLINE(RTGCPHYS) e1kDescAddr(uint32_t baseHigh, uint32_t baseLow, uint32_t idxDesc)
2155{
2156 AssertCompile(sizeof(E1KRXDESC) == sizeof(E1KTXDESC));
2157 return ((uint64_t)baseHigh << 32) + baseLow + idxDesc * sizeof(E1KRXDESC);
2158}
2159
2160#ifdef IN_RING3 /* currently only used in ring-3 due to stack space requirements of the caller */
2161/**
2162 * Advance the head pointer of the receive descriptor queue.
2163 *
2164 * @remarks RDH always points to the next available RX descriptor.
2165 *
2166 * @param pDevIns The device instance.
2167 * @param pThis The device state structure.
2168 */
2169DECLINLINE(void) e1kAdvanceRDH(PPDMDEVINS pDevIns, PE1KSTATE pThis)
2170{
2171 Assert(e1kCsRxIsOwner(pThis));
2172 //e1kCsEnter(pThis, RT_SRC_POS);
2173 if (++RDH * sizeof(E1KRXDESC) >= RDLEN)
2174 RDH = 0;
2175#ifdef E1K_WITH_RXD_CACHE
2176 /*
2177 * We need to fetch descriptors now as the guest may advance RDT all the way
2178 * to RDH as soon as we generate RXDMT0 interrupt. This is mostly to provide
2179 * compatibility with Phar Lap ETS, see @bugref(7346). Note that we do not
2180 * check if the receiver is enabled. It must be, otherwise we won't get here
2181 * in the first place.
2182 *
2183 * Note that we should have moved both RDH and iRxDCurrent by now.
2184 */
2185 if (e1kRxDIsCacheEmpty(pThis))
2186 {
2187 /* Cache is empty, reset it and check if we can fetch more. */
2188 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
2189 E1kLog3(("%s e1kAdvanceRDH: Rx cache is empty, RDH=%x RDT=%x "
2190 "iRxDCurrent=%x nRxDFetched=%x\n",
2191 pThis->szPrf, RDH, RDT, pThis->iRxDCurrent, pThis->nRxDFetched));
2192 e1kRxDPrefetch(pDevIns, pThis);
2193 }
2194#endif /* E1K_WITH_RXD_CACHE */
2195 /*
2196 * Compute current receive queue length and fire RXDMT0 interrupt
2197 * if we are low on receive buffers
2198 */
2199 uint32_t uRQueueLen = RDH>RDT ? RDLEN/sizeof(E1KRXDESC)-RDH+RDT : RDT-RDH;
2200 /*
2201 * The minimum threshold is controlled by RDMTS bits of RCTL:
2202 * 00 = 1/2 of RDLEN
2203 * 01 = 1/4 of RDLEN
2204 * 10 = 1/8 of RDLEN
2205 * 11 = reserved
2206 */
2207 uint32_t uMinRQThreshold = RDLEN / sizeof(E1KRXDESC) / (2 << GET_BITS(RCTL, RDMTS));
2208 if (uRQueueLen <= uMinRQThreshold)
2209 {
2210 E1kLogRel(("E1000: low on RX descriptors, RDH=%x RDT=%x len=%x threshold=%x\n", RDH, RDT, uRQueueLen, uMinRQThreshold));
2211 E1kLog2(("%s Low on RX descriptors, RDH=%x RDT=%x len=%x threshold=%x, raise an interrupt\n",
2212 pThis->szPrf, RDH, RDT, uRQueueLen, uMinRQThreshold));
2213 E1K_INC_ISTAT_CNT(pThis->uStatIntRXDMT0);
2214 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_RXDMT0);
2215 }
2216 E1kLog2(("%s e1kAdvanceRDH: at exit RDH=%x RDT=%x len=%x\n",
2217 pThis->szPrf, RDH, RDT, uRQueueLen));
2218 //e1kCsLeave(pThis);
2219}
2220#endif /* IN_RING3 */
2221
2222#ifdef E1K_WITH_RXD_CACHE
2223
2224# ifdef IN_RING3 /* currently only used in ring-3 due to stack space requirements of the caller */
2225
2226/**
2227 * Obtain the next RX descriptor from RXD cache, fetching descriptors from the
2228 * RX ring if the cache is empty.
2229 *
2230 * Note that we cannot advance the cache pointer (iRxDCurrent) yet as it will
2231 * go out of sync with RDH which will cause trouble when EMT checks if the
2232 * cache is empty to do pre-fetch @bugref(6217).
2233 *
2234 * @param pDevIns The device instance.
2235 * @param pThis The device state structure.
2236 * @thread RX
2237 */
2238DECLINLINE(E1KRXDESC *) e1kRxDGet(PPDMDEVINS pDevIns, PE1KSTATE pThis)
2239{
2240 Assert(e1kCsRxIsOwner(pThis));
2241 /* Check the cache first. */
2242 if (pThis->iRxDCurrent < pThis->nRxDFetched)
2243 return &pThis->aRxDescriptors[pThis->iRxDCurrent];
2244 /* Cache is empty, reset it and check if we can fetch more. */
2245 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
2246 if (e1kRxDPrefetch(pDevIns, pThis))
2247 return &pThis->aRxDescriptors[pThis->iRxDCurrent];
2248 /* Out of Rx descriptors. */
2249 return NULL;
2250}
2251
2252
2253/**
2254 * Return the RX descriptor obtained with e1kRxDGet() and advance the cache
2255 * pointer. The descriptor gets written back to the RXD ring.
2256 *
2257 * @param pDevIns The device instance.
2258 * @param pThis The device state structure.
2259 * @param pDesc The descriptor being "returned" to the RX ring.
2260 * @thread RX
2261 */
2262DECLINLINE(void) e1kRxDPut(PPDMDEVINS pDevIns, PE1KSTATE pThis, E1KRXDESC* pDesc)
2263{
2264 Assert(e1kCsRxIsOwner(pThis));
2265 pThis->iRxDCurrent++;
2266 // Assert(pDesc >= pThis->aRxDescriptors);
2267 // Assert(pDesc < pThis->aRxDescriptors + E1K_RXD_CACHE_SIZE);
2268 // uint64_t addr = e1kDescAddr(RDBAH, RDBAL, RDH);
2269 // uint32_t rdh = RDH;
2270 // Assert(pThis->aRxDescAddr[pDesc - pThis->aRxDescriptors] == addr);
2271 PDMDevHlpPCIPhysWrite(pDevIns, e1kDescAddr(RDBAH, RDBAL, RDH), pDesc, sizeof(E1KRXDESC));
2272 /*
2273 * We need to print the descriptor before advancing RDH as it may fetch new
2274 * descriptors into the cache.
2275 */
2276 e1kPrintRDesc(pThis, pDesc);
2277 e1kAdvanceRDH(pDevIns, pThis);
2278}
2279
2280/**
2281 * Store a fragment of received packet at the specifed address.
2282 *
2283 * @param pDevIns The device instance.
2284 * @param pThis The device state structure.
2285 * @param pDesc The next available RX descriptor.
2286 * @param pvBuf The fragment.
2287 * @param cb The size of the fragment.
2288 */
2289static void e1kStoreRxFragment(PPDMDEVINS pDevIns, PE1KSTATE pThis, E1KRXDESC *pDesc, const void *pvBuf, size_t cb)
2290{
2291 STAM_PROFILE_ADV_START(&pThis->StatReceiveStore, a);
2292 E1kLog2(("%s e1kStoreRxFragment: store fragment of %04X at %016LX, EOP=%d\n",
2293 pThis->szPrf, cb, pDesc->u64BufAddr, pDesc->status.fEOP));
2294 PDMDevHlpPCIPhysWrite(pDevIns, pDesc->u64BufAddr, pvBuf, cb);
2295 pDesc->u16Length = (uint16_t)cb;
2296 Assert(pDesc->u16Length == cb);
2297 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveStore, a);
2298 RT_NOREF(pThis);
2299}
2300
2301# endif /* IN_RING3 */
2302
2303#else /* !E1K_WITH_RXD_CACHE */
2304
2305/**
2306 * Store a fragment of received packet that fits into the next available RX
2307 * buffer.
2308 *
2309 * @remarks Trigger the RXT0 interrupt if it is the last fragment of the packet.
2310 *
2311 * @param pDevIns The device instance.
2312 * @param pThis The device state structure.
2313 * @param pDesc The next available RX descriptor.
2314 * @param pvBuf The fragment.
2315 * @param cb The size of the fragment.
2316 */
2317static void e1kStoreRxFragment(PPDMDEVINS pDevIns, PE1KSTATE pThis, E1KRXDESC *pDesc, const void *pvBuf, size_t cb)
2318{
2319 STAM_PROFILE_ADV_START(&pThis->StatReceiveStore, a);
2320 E1kLog2(("%s e1kStoreRxFragment: store fragment of %04X at %016LX, EOP=%d\n", pThis->szPrf, cb, pDesc->u64BufAddr, pDesc->status.fEOP));
2321 PDMDevHlpPCIPhysWrite(pDevIns, pDesc->u64BufAddr, pvBuf, cb);
2322 pDesc->u16Length = (uint16_t)cb; Assert(pDesc->u16Length == cb);
2323 /* Write back the descriptor */
2324 PDMDevHlpPCIPhysWrite(pDevIns, e1kDescAddr(RDBAH, RDBAL, RDH), pDesc, sizeof(E1KRXDESC));
2325 e1kPrintRDesc(pThis, pDesc);
2326 E1kLogRel(("E1000: Wrote back RX desc, RDH=%x\n", RDH));
2327 /* Advance head */
2328 e1kAdvanceRDH(pDevIns, pThis);
2329 //E1kLog2(("%s e1kStoreRxFragment: EOP=%d RDTR=%08X RADV=%08X\n", pThis->szPrf, pDesc->fEOP, RDTR, RADV));
2330 if (pDesc->status.fEOP)
2331 {
2332 /* Complete packet has been stored -- it is time to let the guest know. */
2333#ifdef E1K_USE_RX_TIMERS
2334 if (RDTR)
2335 {
2336 /* Arm the timer to fire in RDTR usec (discard .024) */
2337 e1kArmTimer(pDevIns, pThis, pThis->hRIDTimer, RDTR);
2338 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
2339 if (RADV != 0 && !PDMDevHlpTimerIsActive(pDevIns, pThis->CTX_SUFF(pRADTimer)))
2340 e1kArmTimer(pThis, pThis->hRADTimer, RADV);
2341 }
2342 else
2343 {
2344#endif
2345 /* 0 delay means immediate interrupt */
2346 E1K_INC_ISTAT_CNT(pThis->uStatIntRx);
2347 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_RXT0);
2348#ifdef E1K_USE_RX_TIMERS
2349 }
2350#endif
2351 }
2352 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveStore, a);
2353}
2354
2355#endif /* !E1K_WITH_RXD_CACHE */
2356
2357/**
2358 * Returns true if it is a broadcast packet.
2359 *
2360 * @returns true if destination address indicates broadcast.
2361 * @param pvBuf The ethernet packet.
2362 */
2363DECLINLINE(bool) e1kIsBroadcast(const void *pvBuf)
2364{
2365 static const uint8_t s_abBcastAddr[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
2366 return memcmp(pvBuf, s_abBcastAddr, sizeof(s_abBcastAddr)) == 0;
2367}
2368
2369/**
2370 * Returns true if it is a multicast packet.
2371 *
2372 * @remarks returns true for broadcast packets as well.
2373 * @returns true if destination address indicates multicast.
2374 * @param pvBuf The ethernet packet.
2375 */
2376DECLINLINE(bool) e1kIsMulticast(const void *pvBuf)
2377{
2378 return (*(char*)pvBuf) & 1;
2379}
2380
2381#ifdef IN_RING3 /* currently only used in ring-3 due to stack space requirements of the caller */
2382/**
2383 * Set IXSM, IPCS and TCPCS flags according to the packet type.
2384 *
2385 * @remarks We emulate checksum offloading for major packets types only.
2386 *
2387 * @returns VBox status code.
2388 * @param pThis The device state structure.
2389 * @param pFrame The available data.
2390 * @param cb Number of bytes available in the buffer.
2391 * @param status Bit fields containing status info.
2392 */
2393static int e1kRxChecksumOffload(PE1KSTATE pThis, const uint8_t *pFrame, size_t cb, E1KRXDST *pStatus)
2394{
2395 /** @todo
2396 * It is not safe to bypass checksum verification for packets coming
2397 * from real wire. We currently unable to tell where packets are
2398 * coming from so we tell the driver to ignore our checksum flags
2399 * and do verification in software.
2400 */
2401# if 0
2402 uint16_t uEtherType = ntohs(*(uint16_t*)(pFrame + 12));
2403
2404 E1kLog2(("%s e1kRxChecksumOffload: EtherType=%x\n", pThis->szPrf, uEtherType));
2405
2406 switch (uEtherType)
2407 {
2408 case 0x800: /* IPv4 */
2409 {
2410 pStatus->fIXSM = false;
2411 pStatus->fIPCS = true;
2412 PRTNETIPV4 pIpHdr4 = (PRTNETIPV4)(pFrame + 14);
2413 /* TCP/UDP checksum offloading works with TCP and UDP only */
2414 pStatus->fTCPCS = pIpHdr4->ip_p == 6 || pIpHdr4->ip_p == 17;
2415 break;
2416 }
2417 case 0x86DD: /* IPv6 */
2418 pStatus->fIXSM = false;
2419 pStatus->fIPCS = false;
2420 pStatus->fTCPCS = true;
2421 break;
2422 default: /* ARP, VLAN, etc. */
2423 pStatus->fIXSM = true;
2424 break;
2425 }
2426# else
2427 pStatus->fIXSM = true;
2428 RT_NOREF_PV(pThis); RT_NOREF_PV(pFrame); RT_NOREF_PV(cb);
2429# endif
2430 return VINF_SUCCESS;
2431}
2432#endif /* IN_RING3 */
2433
2434/**
2435 * Pad and store received packet.
2436 *
2437 * @remarks Make sure that the packet appears to upper layer as one coming
2438 * from real Ethernet: pad it and insert FCS.
2439 *
2440 * @returns VBox status code.
2441 * @param pDevIns The device instance.
2442 * @param pThis The device state structure.
2443 * @param pvBuf The available data.
2444 * @param cb Number of bytes available in the buffer.
2445 * @param status Bit fields containing status info.
2446 */
2447static int e1kHandleRxPacket(PPDMDEVINS pDevIns, PE1KSTATE pThis, const void *pvBuf, size_t cb, E1KRXDST status)
2448{
2449#if defined(IN_RING3) /** @todo Remove this extra copying, it's gonna make us run out of kernel / hypervisor stack! */
2450 uint8_t rxPacket[E1K_MAX_RX_PKT_SIZE];
2451 uint8_t *ptr = rxPacket;
2452
2453 int rc = e1kCsRxEnter(pThis, VERR_SEM_BUSY);
2454 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2455 return rc;
2456
2457 if (cb > 70) /* unqualified guess */
2458 pThis->led.Asserted.s.fReading = pThis->led.Actual.s.fReading = 1;
2459
2460 Assert(cb <= E1K_MAX_RX_PKT_SIZE);
2461 Assert(cb > 16);
2462 size_t cbMax = ((RCTL & RCTL_LPE) ? E1K_MAX_RX_PKT_SIZE - 4 : 1518) - (status.fVP ? 0 : 4);
2463 E1kLog3(("%s Max RX packet size is %u\n", pThis->szPrf, cbMax));
2464 if (status.fVP)
2465 {
2466 /* VLAN packet -- strip VLAN tag in VLAN mode */
2467 if ((CTRL & CTRL_VME) && cb > 16)
2468 {
2469 uint16_t *u16Ptr = (uint16_t*)pvBuf;
2470 memcpy(rxPacket, pvBuf, 12); /* Copy src and dst addresses */
2471 status.u16Special = RT_BE2H_U16(u16Ptr[7]); /* Extract VLAN tag */
2472 memcpy(rxPacket + 12, (uint8_t*)pvBuf + 16, cb - 16); /* Copy the rest of the packet */
2473 cb -= 4;
2474 E1kLog3(("%s Stripped tag for VLAN %u (cb=%u)\n",
2475 pThis->szPrf, status.u16Special, cb));
2476 }
2477 else
2478 status.fVP = false; /* Set VP only if we stripped the tag */
2479 }
2480 else
2481 memcpy(rxPacket, pvBuf, cb);
2482 /* Pad short packets */
2483 if (cb < 60)
2484 {
2485 memset(rxPacket + cb, 0, 60 - cb);
2486 cb = 60;
2487 }
2488 if (!(RCTL & RCTL_SECRC) && cb <= cbMax)
2489 {
2490 STAM_PROFILE_ADV_START(&pThis->StatReceiveCRC, a);
2491 /*
2492 * Add FCS if CRC stripping is not enabled. Since the value of CRC
2493 * is ignored by most of drivers we may as well save us the trouble
2494 * of calculating it (see EthernetCRC CFGM parameter).
2495 */
2496 if (pThis->fEthernetCRC)
2497 *(uint32_t*)(rxPacket + cb) = RTCrc32(rxPacket, cb);
2498 cb += sizeof(uint32_t);
2499 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveCRC, a);
2500 E1kLog3(("%s Added FCS (cb=%u)\n", pThis->szPrf, cb));
2501 }
2502 /* Compute checksum of complete packet */
2503 size_t cbCSumStart = RT_MIN(GET_BITS(RXCSUM, PCSS), cb);
2504 uint16_t checksum = e1kCSum16(rxPacket + cbCSumStart, cb - cbCSumStart);
2505 e1kRxChecksumOffload(pThis, rxPacket, cb, &status);
2506
2507 /* Update stats */
2508 E1K_INC_CNT32(GPRC);
2509 if (e1kIsBroadcast(pvBuf))
2510 E1K_INC_CNT32(BPRC);
2511 else if (e1kIsMulticast(pvBuf))
2512 E1K_INC_CNT32(MPRC);
2513 /* Update octet receive counter */
2514 E1K_ADD_CNT64(GORCL, GORCH, cb);
2515 STAM_REL_COUNTER_ADD(&pThis->StatReceiveBytes, cb);
2516 if (cb == 64)
2517 E1K_INC_CNT32(PRC64);
2518 else if (cb < 128)
2519 E1K_INC_CNT32(PRC127);
2520 else if (cb < 256)
2521 E1K_INC_CNT32(PRC255);
2522 else if (cb < 512)
2523 E1K_INC_CNT32(PRC511);
2524 else if (cb < 1024)
2525 E1K_INC_CNT32(PRC1023);
2526 else
2527 E1K_INC_CNT32(PRC1522);
2528
2529 E1K_INC_ISTAT_CNT(pThis->uStatRxFrm);
2530
2531# ifdef E1K_WITH_RXD_CACHE
2532 while (cb > 0)
2533 {
2534 E1KRXDESC *pDesc = e1kRxDGet(pDevIns, pThis);
2535
2536 if (pDesc == NULL)
2537 {
2538 E1kLog(("%s Out of receive buffers, dropping the packet "
2539 "(cb=%u, in_cache=%u, RDH=%x RDT=%x)\n",
2540 pThis->szPrf, cb, e1kRxDInCache(pThis), RDH, RDT));
2541 break;
2542 }
2543# else /* !E1K_WITH_RXD_CACHE */
2544 if (RDH == RDT)
2545 {
2546 E1kLog(("%s Out of receive buffers, dropping the packet\n",
2547 pThis->szPrf));
2548 }
2549 /* Store the packet to receive buffers */
2550 while (RDH != RDT)
2551 {
2552 /* Load the descriptor pointed by head */
2553 E1KRXDESC desc, *pDesc = &desc;
2554 PDMDevHlpPhysRead(pDevIns, e1kDescAddr(RDBAH, RDBAL, RDH), &desc, sizeof(desc));
2555# endif /* !E1K_WITH_RXD_CACHE */
2556 if (pDesc->u64BufAddr)
2557 {
2558 uint16_t u16RxBufferSize = pThis->u16RxBSize; /* see @bugref{9427} */
2559
2560 /* Update descriptor */
2561 pDesc->status = status;
2562 pDesc->u16Checksum = checksum;
2563 pDesc->status.fDD = true;
2564
2565 /*
2566 * We need to leave Rx critical section here or we risk deadlocking
2567 * with EMT in e1kRegWriteRDT when the write is to an unallocated
2568 * page or has an access handler associated with it.
2569 * Note that it is safe to leave the critical section here since
2570 * e1kRegWriteRDT() never modifies RDH. It never touches already
2571 * fetched RxD cache entries either.
2572 */
2573 if (cb > u16RxBufferSize)
2574 {
2575 pDesc->status.fEOP = false;
2576 e1kCsRxLeave(pThis);
2577 e1kStoreRxFragment(pDevIns, pThis, pDesc, ptr, u16RxBufferSize);
2578 rc = e1kCsRxEnter(pThis, VERR_SEM_BUSY);
2579 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2580 return rc;
2581 ptr += u16RxBufferSize;
2582 cb -= u16RxBufferSize;
2583 }
2584 else
2585 {
2586 pDesc->status.fEOP = true;
2587 e1kCsRxLeave(pThis);
2588 e1kStoreRxFragment(pDevIns, pThis, pDesc, ptr, cb);
2589# ifdef E1K_WITH_RXD_CACHE
2590 rc = e1kCsRxEnter(pThis, VERR_SEM_BUSY);
2591 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2592 return rc;
2593 cb = 0;
2594# else /* !E1K_WITH_RXD_CACHE */
2595 pThis->led.Actual.s.fReading = 0;
2596 return VINF_SUCCESS;
2597# endif /* !E1K_WITH_RXD_CACHE */
2598 }
2599 /*
2600 * Note: RDH is advanced by e1kStoreRxFragment if E1K_WITH_RXD_CACHE
2601 * is not defined.
2602 */
2603 }
2604# ifdef E1K_WITH_RXD_CACHE
2605 /* Write back the descriptor. */
2606 pDesc->status.fDD = true;
2607 e1kRxDPut(pDevIns, pThis, pDesc);
2608# else /* !E1K_WITH_RXD_CACHE */
2609 else
2610 {
2611 /* Write back the descriptor. */
2612 pDesc->status.fDD = true;
2613 PDMDevHlpPCIPhysWrite(pDevIns, e1kDescAddr(RDBAH, RDBAL, RDH), pDesc, sizeof(E1KRXDESC));
2614 e1kAdvanceRDH(pDevIns, pThis);
2615 }
2616# endif /* !E1K_WITH_RXD_CACHE */
2617 }
2618
2619 if (cb > 0)
2620 E1kLog(("%s Out of receive buffers, dropping %u bytes", pThis->szPrf, cb));
2621
2622 pThis->led.Actual.s.fReading = 0;
2623
2624 e1kCsRxLeave(pThis);
2625# ifdef E1K_WITH_RXD_CACHE
2626 /* Complete packet has been stored -- it is time to let the guest know. */
2627# ifdef E1K_USE_RX_TIMERS
2628 if (RDTR)
2629 {
2630 /* Arm the timer to fire in RDTR usec (discard .024) */
2631 e1kArmTimer(pThis, pThis->hRIDTimer, RDTR);
2632 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
2633 if (RADV != 0 && !PDMDevHlpTimerIsActive(pDevIns, pThis->hRADTimer))
2634 e1kArmTimer(pThis, pThis->hRADTimer, RADV);
2635 }
2636 else
2637 {
2638# endif /* E1K_USE_RX_TIMERS */
2639 /* 0 delay means immediate interrupt */
2640 E1K_INC_ISTAT_CNT(pThis->uStatIntRx);
2641 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_RXT0);
2642# ifdef E1K_USE_RX_TIMERS
2643 }
2644# endif /* E1K_USE_RX_TIMERS */
2645# endif /* E1K_WITH_RXD_CACHE */
2646
2647 return VINF_SUCCESS;
2648#else /* !IN_RING3 */
2649 RT_NOREF(pDevIns, pThis, pvBuf, cb, status);
2650 return VERR_INTERNAL_ERROR_2;
2651#endif /* !IN_RING3 */
2652}
2653
2654
2655#ifdef IN_RING3
2656/**
2657 * Bring the link up after the configured delay, 5 seconds by default.
2658 *
2659 * @param pDevIns The device instance.
2660 * @param pThis The device state structure.
2661 * @thread any
2662 */
2663DECLINLINE(void) e1kBringLinkUpDelayed(PPDMDEVINS pDevIns, PE1KSTATE pThis)
2664{
2665 E1kLog(("%s Will bring up the link in %d seconds...\n",
2666 pThis->szPrf, pThis->cMsLinkUpDelay / 1000));
2667 e1kArmTimer(pDevIns, pThis, pThis->hLUTimer, pThis->cMsLinkUpDelay * 1000);
2668}
2669
2670/**
2671 * Bring up the link immediately.
2672 *
2673 * @param pDevIns The device instance.
2674 * @param pThis The device state structure.
2675 * @param pThisCC The current context instance data.
2676 */
2677DECLINLINE(void) e1kR3LinkUp(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KSTATECC pThisCC)
2678{
2679 E1kLog(("%s Link is up\n", pThis->szPrf));
2680 STATUS |= STATUS_LU;
2681 Phy::setLinkStatus(&pThis->phy, true);
2682 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_LSC);
2683 if (pThisCC->pDrvR3)
2684 pThisCC->pDrvR3->pfnNotifyLinkChanged(pThisCC->pDrvR3, PDMNETWORKLINKSTATE_UP);
2685 /* Trigger processing of pending TX descriptors (see @bugref{8942}). */
2686 PDMDevHlpTaskTrigger(pDevIns, pThis->hTxTask);
2687}
2688
2689/**
2690 * Bring down the link immediately.
2691 *
2692 * @param pDevIns The device instance.
2693 * @param pThis The device state structure.
2694 * @param pThisCC The current context instance data.
2695 */
2696DECLINLINE(void) e1kR3LinkDown(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KSTATECC pThisCC)
2697{
2698 E1kLog(("%s Link is down\n", pThis->szPrf));
2699 STATUS &= ~STATUS_LU;
2700#ifdef E1K_LSC_ON_RESET
2701 Phy::setLinkStatus(&pThis->phy, false);
2702#endif /* E1K_LSC_ON_RESET */
2703 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_LSC);
2704 if (pThisCC->pDrvR3)
2705 pThisCC->pDrvR3->pfnNotifyLinkChanged(pThisCC->pDrvR3, PDMNETWORKLINKSTATE_DOWN);
2706}
2707
2708/**
2709 * Bring down the link temporarily.
2710 *
2711 * @param pDevIns The device instance.
2712 * @param pThis The device state structure.
2713 * @param pThisCC The current context instance data.
2714 */
2715DECLINLINE(void) e1kR3LinkDownTemp(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KSTATECC pThisCC)
2716{
2717 E1kLog(("%s Link is down temporarily\n", pThis->szPrf));
2718 STATUS &= ~STATUS_LU;
2719 Phy::setLinkStatus(&pThis->phy, false);
2720 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_LSC);
2721 /*
2722 * Notifying the associated driver that the link went down (even temporarily)
2723 * seems to be the right thing, but it was not done before. This may cause
2724 * a regression if the driver does not expect the link to go down as a result
2725 * of sending PDMNETWORKLINKSTATE_DOWN_RESUME to this device. Earlier versions
2726 * of code notified the driver that the link was up! See @bugref{7057}.
2727 */
2728 if (pThisCC->pDrvR3)
2729 pThisCC->pDrvR3->pfnNotifyLinkChanged(pThisCC->pDrvR3, PDMNETWORKLINKSTATE_DOWN);
2730 e1kBringLinkUpDelayed(pDevIns, pThis);
2731}
2732#endif /* IN_RING3 */
2733
2734#if 0 /* unused */
2735/**
2736 * Read handler for Device Status register.
2737 *
2738 * Get the link status from PHY.
2739 *
2740 * @returns VBox status code.
2741 *
2742 * @param pThis The device state structure.
2743 * @param offset Register offset in memory-mapped frame.
2744 * @param index Register index in register array.
2745 * @param mask Used to implement partial reads (8 and 16-bit).
2746 */
2747static int e1kRegReadCTRL(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
2748{
2749 E1kLog(("%s e1kRegReadCTRL: mdio dir=%s mdc dir=%s mdc=%d\n",
2750 pThis->szPrf, (CTRL & CTRL_MDIO_DIR)?"OUT":"IN ",
2751 (CTRL & CTRL_MDC_DIR)?"OUT":"IN ", !!(CTRL & CTRL_MDC)));
2752 if ((CTRL & CTRL_MDIO_DIR) == 0 && (CTRL & CTRL_MDC))
2753 {
2754 /* MDC is high and MDIO pin is used for input, read MDIO pin from PHY */
2755 if (Phy::readMDIO(&pThis->phy))
2756 *pu32Value = CTRL | CTRL_MDIO;
2757 else
2758 *pu32Value = CTRL & ~CTRL_MDIO;
2759 E1kLog(("%s e1kRegReadCTRL: Phy::readMDIO(%d)\n",
2760 pThis->szPrf, !!(*pu32Value & CTRL_MDIO)));
2761 }
2762 else
2763 {
2764 /* MDIO pin is used for output, ignore it */
2765 *pu32Value = CTRL;
2766 }
2767 return VINF_SUCCESS;
2768}
2769#endif /* unused */
2770
2771/**
2772 * A callback used by PHY to indicate that the link needs to be updated due to
2773 * reset of PHY.
2774 *
2775 * @param pDevIns The device instance.
2776 * @thread any
2777 */
2778void e1kPhyLinkResetCallback(PPDMDEVINS pDevIns)
2779{
2780 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
2781
2782 /* Make sure we have cable connected and MAC can talk to PHY */
2783 if (pThis->fCableConnected && (CTRL & CTRL_SLU))
2784 e1kArmTimer(pDevIns, pThis, pThis->hLUTimer, E1K_INIT_LINKUP_DELAY_US);
2785}
2786
2787/**
2788 * Write handler for Device Control register.
2789 *
2790 * Handles reset.
2791 *
2792 * @param pThis The device state structure.
2793 * @param offset Register offset in memory-mapped frame.
2794 * @param index Register index in register array.
2795 * @param value The value to store.
2796 * @param mask Used to implement partial writes (8 and 16-bit).
2797 * @thread EMT
2798 */
2799static int e1kRegWriteCTRL(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2800{
2801 int rc = VINF_SUCCESS;
2802
2803 if (value & CTRL_RESET)
2804 { /* RST */
2805#ifndef IN_RING3
2806 return VINF_IOM_R3_MMIO_WRITE;
2807#else
2808 e1kR3HardReset(pDevIns, pThis, PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC));
2809#endif
2810 }
2811 else
2812 {
2813#ifdef E1K_LSC_ON_SLU
2814 /*
2815 * When the guest changes 'Set Link Up' bit from 0 to 1 we check if
2816 * the link is down and the cable is connected, and if they are we
2817 * bring the link up, see @bugref{8624}.
2818 */
2819 if ( (value & CTRL_SLU)
2820 && !(CTRL & CTRL_SLU)
2821 && pThis->fCableConnected
2822 && !(STATUS & STATUS_LU))
2823 {
2824 /* It should take about 2 seconds for the link to come up */
2825 e1kArmTimer(pDevIns, pThis, pThis->hLUTimer, E1K_INIT_LINKUP_DELAY_US);
2826 }
2827#else /* !E1K_LSC_ON_SLU */
2828 if ( (value & CTRL_SLU)
2829 && !(CTRL & CTRL_SLU)
2830 && pThis->fCableConnected
2831 && !PDMDevHlpTimerIsActive(pDevIns, pThis->hLUTimer))
2832 {
2833 /* PXE does not use LSC interrupts, see @bugref{9113}. */
2834 STATUS |= STATUS_LU;
2835 }
2836#endif /* !E1K_LSC_ON_SLU */
2837 if ((value & CTRL_VME) != (CTRL & CTRL_VME))
2838 {
2839 E1kLog(("%s VLAN Mode %s\n", pThis->szPrf, (value & CTRL_VME) ? "Enabled" : "Disabled"));
2840 }
2841 Log7(("%s e1kRegWriteCTRL: mdio dir=%s mdc dir=%s mdc=%s mdio=%d\n",
2842 pThis->szPrf, (value & CTRL_MDIO_DIR)?"OUT":"IN ",
2843 (value & CTRL_MDC_DIR)?"OUT":"IN ", (value & CTRL_MDC)?"HIGH":"LOW ", !!(value & CTRL_MDIO)));
2844 if (value & CTRL_MDC)
2845 {
2846 if (value & CTRL_MDIO_DIR)
2847 {
2848 Log7(("%s e1kRegWriteCTRL: Phy::writeMDIO(%d)\n", pThis->szPrf, !!(value & CTRL_MDIO)));
2849 /* MDIO direction pin is set to output and MDC is high, write MDIO pin value to PHY */
2850 Phy::writeMDIO(&pThis->phy, !!(value & CTRL_MDIO), pDevIns);
2851 }
2852 else
2853 {
2854 if (Phy::readMDIO(&pThis->phy))
2855 value |= CTRL_MDIO;
2856 else
2857 value &= ~CTRL_MDIO;
2858 Log7(("%s e1kRegWriteCTRL: Phy::readMDIO(%d)\n", pThis->szPrf, !!(value & CTRL_MDIO)));
2859 }
2860 }
2861 rc = e1kRegWriteDefault(pDevIns, pThis, offset, index, value);
2862 }
2863
2864 return rc;
2865}
2866
2867/**
2868 * Write handler for EEPROM/Flash Control/Data register.
2869 *
2870 * Handles EEPROM access requests; forwards writes to EEPROM device if access has been granted.
2871 *
2872 * @param pThis The device state structure.
2873 * @param offset Register offset in memory-mapped frame.
2874 * @param index Register index in register array.
2875 * @param value The value to store.
2876 * @param mask Used to implement partial writes (8 and 16-bit).
2877 * @thread EMT
2878 */
2879static int e1kRegWriteEECD(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2880{
2881 RT_NOREF(pDevIns, offset, index);
2882#ifdef IN_RING3
2883 /* So far we are concerned with lower byte only */
2884 if ((EECD & EECD_EE_GNT) || pThis->eChip == E1K_CHIP_82543GC)
2885 {
2886 /* Access to EEPROM granted -- forward 4-wire bits to EEPROM device */
2887 /* Note: 82543GC does not need to request EEPROM access */
2888 STAM_PROFILE_ADV_START(&pThis->StatEEPROMWrite, a);
2889 PE1KSTATECC pThisCC = PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC);
2890 pThisCC->eeprom.write(value & EECD_EE_WIRES);
2891 STAM_PROFILE_ADV_STOP(&pThis->StatEEPROMWrite, a);
2892 }
2893 if (value & EECD_EE_REQ)
2894 EECD |= EECD_EE_REQ|EECD_EE_GNT;
2895 else
2896 EECD &= ~EECD_EE_GNT;
2897 //e1kRegWriteDefault(pThis, offset, index, value );
2898
2899 return VINF_SUCCESS;
2900#else /* !IN_RING3 */
2901 RT_NOREF(pThis, value);
2902 return VINF_IOM_R3_MMIO_WRITE;
2903#endif /* !IN_RING3 */
2904}
2905
2906/**
2907 * Read handler for EEPROM/Flash Control/Data register.
2908 *
2909 * Lower 4 bits come from EEPROM device if EEPROM access has been granted.
2910 *
2911 * @returns VBox status code.
2912 *
2913 * @param pThis The device state structure.
2914 * @param offset Register offset in memory-mapped frame.
2915 * @param index Register index in register array.
2916 * @param mask Used to implement partial reads (8 and 16-bit).
2917 * @thread EMT
2918 */
2919static int e1kRegReadEECD(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
2920{
2921#ifdef IN_RING3
2922 uint32_t value;
2923 int rc = e1kRegReadDefault(pDevIns, pThis, offset, index, &value);
2924 if (RT_SUCCESS(rc))
2925 {
2926 if ((value & EECD_EE_GNT) || pThis->eChip == E1K_CHIP_82543GC)
2927 {
2928 /* Note: 82543GC does not need to request EEPROM access */
2929 /* Access to EEPROM granted -- get 4-wire bits to EEPROM device */
2930 STAM_PROFILE_ADV_START(&pThis->StatEEPROMRead, a);
2931 PE1KSTATECC pThisCC = PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC);
2932 value |= pThisCC->eeprom.read();
2933 STAM_PROFILE_ADV_STOP(&pThis->StatEEPROMRead, a);
2934 }
2935 *pu32Value = value;
2936 }
2937
2938 return rc;
2939#else /* !IN_RING3 */
2940 RT_NOREF_PV(pDevIns); RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index); RT_NOREF_PV(pu32Value);
2941 return VINF_IOM_R3_MMIO_READ;
2942#endif /* !IN_RING3 */
2943}
2944
2945/**
2946 * Write handler for EEPROM Read register.
2947 *
2948 * Handles EEPROM word access requests, reads EEPROM and stores the result
2949 * into DATA field.
2950 *
2951 * @param pThis The device state structure.
2952 * @param offset Register offset in memory-mapped frame.
2953 * @param index Register index in register array.
2954 * @param value The value to store.
2955 * @param mask Used to implement partial writes (8 and 16-bit).
2956 * @thread EMT
2957 */
2958static int e1kRegWriteEERD(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2959{
2960#ifdef IN_RING3
2961 /* Make use of 'writable' and 'readable' masks. */
2962 e1kRegWriteDefault(pDevIns, pThis, offset, index, value);
2963 /* DONE and DATA are set only if read was triggered by START. */
2964 if (value & EERD_START)
2965 {
2966 STAM_PROFILE_ADV_START(&pThis->StatEEPROMRead, a);
2967 uint16_t tmp;
2968 PE1KSTATECC pThisCC = PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC);
2969 if (pThisCC->eeprom.readWord(GET_BITS_V(value, EERD, ADDR), &tmp))
2970 SET_BITS(EERD, DATA, tmp);
2971 EERD |= EERD_DONE;
2972 STAM_PROFILE_ADV_STOP(&pThis->StatEEPROMRead, a);
2973 }
2974
2975 return VINF_SUCCESS;
2976#else /* !IN_RING3 */
2977 RT_NOREF_PV(pDevIns); RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index); RT_NOREF_PV(value);
2978 return VINF_IOM_R3_MMIO_WRITE;
2979#endif /* !IN_RING3 */
2980}
2981
2982
2983/**
2984 * Write handler for MDI Control register.
2985 *
2986 * Handles PHY read/write requests; forwards requests to internal PHY device.
2987 *
2988 * @param pThis The device state structure.
2989 * @param offset Register offset in memory-mapped frame.
2990 * @param index Register index in register array.
2991 * @param value The value to store.
2992 * @param mask Used to implement partial writes (8 and 16-bit).
2993 * @thread EMT
2994 */
2995static int e1kRegWriteMDIC(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2996{
2997 if (value & MDIC_INT_EN)
2998 {
2999 E1kLog(("%s ERROR! Interrupt at the end of an MDI cycle is not supported yet.\n",
3000 pThis->szPrf));
3001 }
3002 else if (value & MDIC_READY)
3003 {
3004 E1kLog(("%s ERROR! Ready bit is not reset by software during write operation.\n",
3005 pThis->szPrf));
3006 }
3007 else if (GET_BITS_V(value, MDIC, PHY) != 1)
3008 {
3009 E1kLog(("%s WARNING! Access to invalid PHY detected, phy=%d.\n",
3010 pThis->szPrf, GET_BITS_V(value, MDIC, PHY)));
3011 /*
3012 * Some drivers scan the MDIO bus for a PHY. We can work with these
3013 * drivers if we set MDIC_READY and MDIC_ERROR when there isn't a PHY
3014 * at the requested address, see @bugref{7346}.
3015 */
3016 MDIC = MDIC_READY | MDIC_ERROR;
3017 }
3018 else
3019 {
3020 /* Store the value */
3021 e1kRegWriteDefault(pDevIns, pThis, offset, index, value);
3022 STAM_COUNTER_INC(&pThis->StatPHYAccesses);
3023 /* Forward op to PHY */
3024 if (value & MDIC_OP_READ)
3025 SET_BITS(MDIC, DATA, Phy::readRegister(&pThis->phy, GET_BITS_V(value, MDIC, REG), pDevIns));
3026 else
3027 Phy::writeRegister(&pThis->phy, GET_BITS_V(value, MDIC, REG), value & MDIC_DATA_MASK, pDevIns);
3028 /* Let software know that we are done */
3029 MDIC |= MDIC_READY;
3030 }
3031
3032 return VINF_SUCCESS;
3033}
3034
3035/**
3036 * Write handler for Interrupt Cause Read register.
3037 *
3038 * Bits corresponding to 1s in 'value' will be cleared in ICR register.
3039 *
3040 * @param pThis The device state structure.
3041 * @param offset Register offset in memory-mapped frame.
3042 * @param index Register index in register array.
3043 * @param value The value to store.
3044 * @param mask Used to implement partial writes (8 and 16-bit).
3045 * @thread EMT
3046 */
3047static int e1kRegWriteICR(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3048{
3049 ICR &= ~value;
3050
3051 RT_NOREF_PV(pDevIns); RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index);
3052 return VINF_SUCCESS;
3053}
3054
3055/**
3056 * Read handler for Interrupt Cause Read register.
3057 *
3058 * Reading this register acknowledges all interrupts.
3059 *
3060 * @returns VBox status code.
3061 *
3062 * @param pThis The device state structure.
3063 * @param offset Register offset in memory-mapped frame.
3064 * @param index Register index in register array.
3065 * @param mask Not used.
3066 * @thread EMT
3067 */
3068static int e1kRegReadICR(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
3069{
3070 int rc = e1kCsEnter(pThis, VINF_IOM_R3_MMIO_READ);
3071 if (RT_UNLIKELY(rc != VINF_SUCCESS))
3072 return rc;
3073
3074 uint32_t value = 0;
3075 rc = e1kRegReadDefault(pDevIns, pThis, offset, index, &value);
3076 if (RT_SUCCESS(rc))
3077 {
3078 if (value)
3079 {
3080 if (!pThis->fIntRaised)
3081 E1K_INC_ISTAT_CNT(pThis->uStatNoIntICR);
3082 /*
3083 * Not clearing ICR causes QNX to hang as it reads ICR in a loop
3084 * with disabled interrupts.
3085 */
3086 //if (IMS)
3087 if (1)
3088 {
3089 /*
3090 * Interrupts were enabled -- we are supposedly at the very
3091 * beginning of interrupt handler
3092 */
3093 E1kLogRel(("E1000: irq lowered, icr=0x%x\n", ICR));
3094 E1kLog(("%s e1kRegReadICR: Lowered IRQ (%08x)\n", pThis->szPrf, ICR));
3095 /* Clear all pending interrupts */
3096 ICR = 0;
3097 pThis->fIntRaised = false;
3098 /* Lower(0) INTA(0) */
3099 PDMDevHlpPCISetIrq(pDevIns, 0, 0);
3100
3101 pThis->u64AckedAt = PDMDevHlpTimerGet(pDevIns, pThis->hIntTimer);
3102 if (pThis->fIntMaskUsed)
3103 pThis->fDelayInts = true;
3104 }
3105 else
3106 {
3107 /*
3108 * Interrupts are disabled -- in windows guests ICR read is done
3109 * just before re-enabling interrupts
3110 */
3111 E1kLog(("%s e1kRegReadICR: Suppressing auto-clear due to disabled interrupts (%08x)\n", pThis->szPrf, ICR));
3112 }
3113 }
3114 *pu32Value = value;
3115 }
3116 e1kCsLeave(pThis);
3117
3118 return rc;
3119}
3120
3121/**
3122 * Write handler for Interrupt Cause Set register.
3123 *
3124 * Bits corresponding to 1s in 'value' will be set in ICR register.
3125 *
3126 * @param pThis The device state structure.
3127 * @param offset Register offset in memory-mapped frame.
3128 * @param index Register index in register array.
3129 * @param value The value to store.
3130 * @param mask Used to implement partial writes (8 and 16-bit).
3131 * @thread EMT
3132 */
3133static int e1kRegWriteICS(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3134{
3135 RT_NOREF_PV(offset); RT_NOREF_PV(index);
3136 E1K_INC_ISTAT_CNT(pThis->uStatIntICS);
3137 return e1kRaiseInterrupt(pDevIns, pThis, VINF_IOM_R3_MMIO_WRITE, value & g_aE1kRegMap[ICS_IDX].writable);
3138}
3139
3140/**
3141 * Write handler for Interrupt Mask Set register.
3142 *
3143 * Will trigger pending interrupts.
3144 *
3145 * @param pThis The device state structure.
3146 * @param offset Register offset in memory-mapped frame.
3147 * @param index Register index in register array.
3148 * @param value The value to store.
3149 * @param mask Used to implement partial writes (8 and 16-bit).
3150 * @thread EMT
3151 */
3152static int e1kRegWriteIMS(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3153{
3154 RT_NOREF_PV(offset); RT_NOREF_PV(index);
3155
3156 IMS |= value;
3157 E1kLogRel(("E1000: irq enabled, RDH=%x RDT=%x TDH=%x TDT=%x\n", RDH, RDT, TDH, TDT));
3158 E1kLog(("%s e1kRegWriteIMS: IRQ enabled\n", pThis->szPrf));
3159 /*
3160 * We cannot raise an interrupt here as it will occasionally cause an interrupt storm
3161 * in Windows guests (see @bugref{8624}, @bugref{5023}).
3162 */
3163 if ((ICR & IMS) && !pThis->fLocked)
3164 {
3165 E1K_INC_ISTAT_CNT(pThis->uStatIntIMS);
3166 e1kPostponeInterrupt(pDevIns, pThis, E1K_IMS_INT_DELAY_NS);
3167 }
3168
3169 return VINF_SUCCESS;
3170}
3171
3172/**
3173 * Write handler for Interrupt Mask Clear register.
3174 *
3175 * Bits corresponding to 1s in 'value' will be cleared in IMS register.
3176 *
3177 * @param pThis The device state structure.
3178 * @param offset Register offset in memory-mapped frame.
3179 * @param index Register index in register array.
3180 * @param value The value to store.
3181 * @param mask Used to implement partial writes (8 and 16-bit).
3182 * @thread EMT
3183 */
3184static int e1kRegWriteIMC(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3185{
3186 RT_NOREF_PV(offset); RT_NOREF_PV(index);
3187
3188 int rc = e1kCsEnter(pThis, VINF_IOM_R3_MMIO_WRITE);
3189 if (RT_UNLIKELY(rc != VINF_SUCCESS))
3190 return rc;
3191 if (pThis->fIntRaised)
3192 {
3193 /*
3194 * Technically we should reset fIntRaised in ICR read handler, but it will cause
3195 * Windows to freeze since it may receive an interrupt while still in the very beginning
3196 * of interrupt handler.
3197 */
3198 E1K_INC_ISTAT_CNT(pThis->uStatIntLower);
3199 STAM_COUNTER_INC(&pThis->StatIntsPrevented);
3200 E1kLogRel(("E1000: irq lowered (IMC), icr=0x%x\n", ICR));
3201 /* Lower(0) INTA(0) */
3202 PDMDevHlpPCISetIrq(pDevIns, 0, 0);
3203 pThis->fIntRaised = false;
3204 E1kLog(("%s e1kRegWriteIMC: Lowered IRQ: ICR=%08x\n", pThis->szPrf, ICR));
3205 }
3206 IMS &= ~value;
3207 E1kLog(("%s e1kRegWriteIMC: IRQ disabled\n", pThis->szPrf));
3208 e1kCsLeave(pThis);
3209
3210 return VINF_SUCCESS;
3211}
3212
3213/**
3214 * Write handler for Receive Control register.
3215 *
3216 * @param pThis The device state structure.
3217 * @param offset Register offset in memory-mapped frame.
3218 * @param index Register index in register array.
3219 * @param value The value to store.
3220 * @param mask Used to implement partial writes (8 and 16-bit).
3221 * @thread EMT
3222 */
3223static int e1kRegWriteRCTL(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3224{
3225 /* Update promiscuous mode */
3226 bool fBecomePromiscous = !!(value & (RCTL_UPE | RCTL_MPE));
3227 if (fBecomePromiscous != !!( RCTL & (RCTL_UPE | RCTL_MPE)))
3228 {
3229 /* Promiscuity has changed, pass the knowledge on. */
3230#ifndef IN_RING3
3231 return VINF_IOM_R3_MMIO_WRITE;
3232#else
3233 PE1KSTATECC pThisCC = PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC);
3234 if (pThisCC->pDrvR3)
3235 pThisCC->pDrvR3->pfnSetPromiscuousMode(pThisCC->pDrvR3, fBecomePromiscous);
3236#endif
3237 }
3238
3239 /* Adjust receive buffer size */
3240 unsigned cbRxBuf = 2048 >> GET_BITS_V(value, RCTL, BSIZE);
3241 if (value & RCTL_BSEX)
3242 cbRxBuf *= 16;
3243 if (cbRxBuf > E1K_MAX_RX_PKT_SIZE)
3244 cbRxBuf = E1K_MAX_RX_PKT_SIZE;
3245 if (cbRxBuf != pThis->u16RxBSize)
3246 E1kLog2(("%s e1kRegWriteRCTL: Setting receive buffer size to %d (old %d)\n",
3247 pThis->szPrf, cbRxBuf, pThis->u16RxBSize));
3248 pThis->u16RxBSize = cbRxBuf;
3249
3250 /* Update the register */
3251 return e1kRegWriteDefault(pDevIns, pThis, offset, index, value);
3252}
3253
3254/**
3255 * Write handler for Packet Buffer Allocation register.
3256 *
3257 * TXA = 64 - RXA.
3258 *
3259 * @param pThis The device state structure.
3260 * @param offset Register offset in memory-mapped frame.
3261 * @param index Register index in register array.
3262 * @param value The value to store.
3263 * @param mask Used to implement partial writes (8 and 16-bit).
3264 * @thread EMT
3265 */
3266static int e1kRegWritePBA(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3267{
3268 e1kRegWriteDefault(pDevIns, pThis, offset, index, value);
3269 PBA_st->txa = 64 - PBA_st->rxa;
3270
3271 return VINF_SUCCESS;
3272}
3273
3274/**
3275 * Write handler for Receive Descriptor Tail register.
3276 *
3277 * @remarks Write into RDT forces switch to HC and signal to
3278 * e1kR3NetworkDown_WaitReceiveAvail().
3279 *
3280 * @returns VBox status code.
3281 *
3282 * @param pThis The device state structure.
3283 * @param offset Register offset in memory-mapped frame.
3284 * @param index Register index in register array.
3285 * @param value The value to store.
3286 * @param mask Used to implement partial writes (8 and 16-bit).
3287 * @thread EMT
3288 */
3289static int e1kRegWriteRDT(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3290{
3291#ifndef IN_RING3
3292 /* XXX */
3293// return VINF_IOM_R3_MMIO_WRITE;
3294#endif
3295 int rc = e1kCsRxEnter(pThis, VINF_IOM_R3_MMIO_WRITE);
3296 if (RT_LIKELY(rc == VINF_SUCCESS))
3297 {
3298 E1kLog(("%s e1kRegWriteRDT\n", pThis->szPrf));
3299#ifndef E1K_WITH_RXD_CACHE
3300 /*
3301 * Some drivers advance RDT too far, so that it equals RDH. This
3302 * somehow manages to work with real hardware but not with this
3303 * emulated device. We can work with these drivers if we just
3304 * write 1 less when we see a driver writing RDT equal to RDH,
3305 * see @bugref{7346}.
3306 */
3307 if (value == RDH)
3308 {
3309 if (RDH == 0)
3310 value = (RDLEN / sizeof(E1KRXDESC)) - 1;
3311 else
3312 value = RDH - 1;
3313 }
3314#endif /* !E1K_WITH_RXD_CACHE */
3315 rc = e1kRegWriteDefault(pDevIns, pThis, offset, index, value);
3316#ifdef E1K_WITH_RXD_CACHE
3317 /*
3318 * We need to fetch descriptors now as RDT may go whole circle
3319 * before we attempt to store a received packet. For example,
3320 * Intel's DOS drivers use 2 (!) RX descriptors with the total ring
3321 * size being only 8 descriptors! Note that we fetch descriptors
3322 * only when the cache is empty to reduce the number of memory reads
3323 * in case of frequent RDT writes. Don't fetch anything when the
3324 * receiver is disabled either as RDH, RDT, RDLEN can be in some
3325 * messed up state.
3326 * Note that despite the cache may seem empty, meaning that there are
3327 * no more available descriptors in it, it may still be used by RX
3328 * thread which has not yet written the last descriptor back but has
3329 * temporarily released the RX lock in order to write the packet body
3330 * to descriptor's buffer. At this point we still going to do prefetch
3331 * but it won't actually fetch anything if there are no unused slots in
3332 * our "empty" cache (nRxDFetched==E1K_RXD_CACHE_SIZE). We must not
3333 * reset the cache here even if it appears empty. It will be reset at
3334 * a later point in e1kRxDGet().
3335 */
3336 if (e1kRxDIsCacheEmpty(pThis) && (RCTL & RCTL_EN))
3337 e1kRxDPrefetch(pDevIns, pThis);
3338#endif /* E1K_WITH_RXD_CACHE */
3339 e1kCsRxLeave(pThis);
3340 if (RT_SUCCESS(rc))
3341 {
3342 /* Signal that we have more receive descriptors available. */
3343 e1kWakeupReceive(pDevIns, pThis);
3344 }
3345 }
3346 return rc;
3347}
3348
3349/**
3350 * Write handler for Receive Delay Timer register.
3351 *
3352 * @param pThis The device state structure.
3353 * @param offset Register offset in memory-mapped frame.
3354 * @param index Register index in register array.
3355 * @param value The value to store.
3356 * @param mask Used to implement partial writes (8 and 16-bit).
3357 * @thread EMT
3358 */
3359static int e1kRegWriteRDTR(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3360{
3361 e1kRegWriteDefault(pDevIns, pThis, offset, index, value);
3362 if (value & RDTR_FPD)
3363 {
3364 /* Flush requested, cancel both timers and raise interrupt */
3365#ifdef E1K_USE_RX_TIMERS
3366 e1kCancelTimer(pDevIns, pThis, pThis->hRIDTimer);
3367 e1kCancelTimer(pDevIns, pThis, pThis->hRADTimer);
3368#endif
3369 E1K_INC_ISTAT_CNT(pThis->uStatIntRDTR);
3370 return e1kRaiseInterrupt(pDevIns, pThis, VINF_IOM_R3_MMIO_WRITE, ICR_RXT0);
3371 }
3372
3373 return VINF_SUCCESS;
3374}
3375
3376DECLINLINE(uint32_t) e1kGetTxLen(PE1KSTATE pThis)
3377{
3378 /**
3379 * Make sure TDT won't change during computation. EMT may modify TDT at
3380 * any moment.
3381 */
3382 uint32_t tdt = TDT;
3383 return (TDH>tdt ? TDLEN/sizeof(E1KTXDESC) : 0) + tdt - TDH;
3384}
3385
3386#ifdef IN_RING3
3387
3388# ifdef E1K_TX_DELAY
3389/**
3390 * Transmit Delay Timer handler.
3391 *
3392 * @remarks We only get here when the timer expires.
3393 *
3394 * @param pDevIns Pointer to device instance structure.
3395 * @param pTimer Pointer to the timer.
3396 * @param pvUser NULL.
3397 * @thread EMT
3398 */
3399static DECLCALLBACK(void) e1kR3TxDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3400{
3401 PE1KSTATE pThis = (PE1KSTATE)pvUser;
3402 Assert(PDMCritSectIsOwner(&pThis->csTx));
3403
3404 E1K_INC_ISTAT_CNT(pThis->uStatTxDelayExp);
3405# ifdef E1K_INT_STATS
3406 uint64_t u64Elapsed = RTTimeNanoTS() - pThis->u64ArmedAt;
3407 if (u64Elapsed > pThis->uStatMaxTxDelay)
3408 pThis->uStatMaxTxDelay = u64Elapsed;
3409# endif
3410 int rc = e1kXmitPending(pDevIns, pThis, false /*fOnWorkerThread*/);
3411 AssertMsg(RT_SUCCESS(rc) || rc == VERR_TRY_AGAIN, ("%Rrc\n", rc));
3412}
3413# endif /* E1K_TX_DELAY */
3414
3415//# ifdef E1K_USE_TX_TIMERS
3416
3417/**
3418 * Transmit Interrupt Delay Timer handler.
3419 *
3420 * @remarks We only get here when the timer expires.
3421 *
3422 * @param pDevIns Pointer to device instance structure.
3423 * @param pTimer Pointer to the timer.
3424 * @param pvUser NULL.
3425 * @thread EMT
3426 */
3427static DECLCALLBACK(void) e1kR3TxIntDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3428{
3429 RT_NOREF(pDevIns);
3430 RT_NOREF(pTimer);
3431 PE1KSTATE pThis = (PE1KSTATE)pvUser;
3432
3433 E1K_INC_ISTAT_CNT(pThis->uStatTID);
3434 /* Cancel absolute delay timer as we have already got attention */
3435# ifndef E1K_NO_TAD
3436 e1kCancelTimer(pDevIns, pThis, pThis->hTADTimer);
3437# endif
3438 e1kRaiseInterrupt(pDevIns, pThis, ICR_TXDW);
3439}
3440
3441/**
3442 * Transmit Absolute Delay Timer handler.
3443 *
3444 * @remarks We only get here when the timer expires.
3445 *
3446 * @param pDevIns Pointer to device instance structure.
3447 * @param pTimer Pointer to the timer.
3448 * @param pvUser NULL.
3449 * @thread EMT
3450 */
3451static DECLCALLBACK(void) e1kR3TxAbsDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3452{
3453 RT_NOREF(pDevIns);
3454 RT_NOREF(pTimer);
3455 PE1KSTATE pThis = (PE1KSTATE)pvUser;
3456
3457 E1K_INC_ISTAT_CNT(pThis->uStatTAD);
3458 /* Cancel interrupt delay timer as we have already got attention */
3459 e1kCancelTimer(pDevIns, pThis, pThis->hTIDTimer);
3460 e1kRaiseInterrupt(pDevIns, pThis, ICR_TXDW);
3461}
3462
3463//# endif /* E1K_USE_TX_TIMERS */
3464# ifdef E1K_USE_RX_TIMERS
3465
3466/**
3467 * Receive Interrupt Delay Timer handler.
3468 *
3469 * @remarks We only get here when the timer expires.
3470 *
3471 * @param pDevIns Pointer to device instance structure.
3472 * @param pTimer Pointer to the timer.
3473 * @param pvUser NULL.
3474 * @thread EMT
3475 */
3476static DECLCALLBACK(void) e1kR3RxIntDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3477{
3478 PE1KSTATE pThis = (PE1KSTATE)pvUser;
3479
3480 E1K_INC_ISTAT_CNT(pThis->uStatRID);
3481 /* Cancel absolute delay timer as we have already got attention */
3482 e1kCancelTimer(pDevIns, pThis, pThis->hRADTimer);
3483 e1kRaiseInterrupt(pDevIns, pThis, ICR_RXT0);
3484}
3485
3486/**
3487 * Receive Absolute Delay Timer handler.
3488 *
3489 * @remarks We only get here when the timer expires.
3490 *
3491 * @param pDevIns Pointer to device instance structure.
3492 * @param pTimer Pointer to the timer.
3493 * @param pvUser NULL.
3494 * @thread EMT
3495 */
3496static DECLCALLBACK(void) e1kR3RxAbsDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3497{
3498 PE1KSTATE pThis = (PE1KSTATE)pvUser;
3499
3500 E1K_INC_ISTAT_CNT(pThis->uStatRAD);
3501 /* Cancel interrupt delay timer as we have already got attention */
3502 e1kCancelTimer(pDevIns, pThis, pThis->hRIDTimer);
3503 e1kRaiseInterrupt(pDevIns, pThis, ICR_RXT0);
3504}
3505
3506# endif /* E1K_USE_RX_TIMERS */
3507
3508/**
3509 * Late Interrupt Timer handler.
3510 *
3511 * @param pDevIns Pointer to device instance structure.
3512 * @param pTimer Pointer to the timer.
3513 * @param pvUser NULL.
3514 * @thread EMT
3515 */
3516static DECLCALLBACK(void) e1kR3LateIntTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3517{
3518 RT_NOREF(pDevIns, pTimer);
3519 PE1KSTATE pThis = (PE1KSTATE)pvUser;
3520
3521 STAM_PROFILE_ADV_START(&pThis->StatLateIntTimer, a);
3522 STAM_COUNTER_INC(&pThis->StatLateInts);
3523 E1K_INC_ISTAT_CNT(pThis->uStatIntLate);
3524# if 0
3525 if (pThis->iStatIntLost > -100)
3526 pThis->iStatIntLost--;
3527# endif
3528 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, 0);
3529 STAM_PROFILE_ADV_STOP(&pThis->StatLateIntTimer, a);
3530}
3531
3532/**
3533 * Link Up Timer handler.
3534 *
3535 * @param pDevIns Pointer to device instance structure.
3536 * @param pTimer Pointer to the timer.
3537 * @param pvUser NULL.
3538 * @thread EMT
3539 */
3540static DECLCALLBACK(void) e1kR3LinkUpTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3541{
3542 RT_NOREF(pTimer);
3543 PE1KSTATE pThis = (PE1KSTATE)pvUser;
3544 PE1KSTATECC pThisCC = PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC);
3545
3546 /*
3547 * This can happen if we set the link status to down when the Link up timer was
3548 * already armed (shortly after e1kLoadDone() or when the cable was disconnected
3549 * and connect+disconnect the cable very quick. Moreover, 82543GC triggers LSC
3550 * on reset even if the cable is unplugged (see @bugref{8942}).
3551 */
3552 if (pThis->fCableConnected)
3553 {
3554 /* 82543GC does not have an internal PHY */
3555 if (pThis->eChip == E1K_CHIP_82543GC || (CTRL & CTRL_SLU))
3556 e1kR3LinkUp(pDevIns, pThis, pThisCC);
3557 }
3558# ifdef E1K_LSC_ON_RESET
3559 else if (pThis->eChip == E1K_CHIP_82543GC)
3560 e1kR3LinkDown(pDevIns, pThis, pThisCC);
3561# endif /* E1K_LSC_ON_RESET */
3562}
3563
3564#endif /* IN_RING3 */
3565
3566/**
3567 * Sets up the GSO context according to the TSE new context descriptor.
3568 *
3569 * @param pGso The GSO context to setup.
3570 * @param pCtx The context descriptor.
3571 */
3572DECLINLINE(void) e1kSetupGsoCtx(PPDMNETWORKGSO pGso, E1KTXCTX const *pCtx)
3573{
3574 pGso->u8Type = PDMNETWORKGSOTYPE_INVALID;
3575
3576 /*
3577 * See if the context descriptor describes something that could be TCP or
3578 * UDP over IPv[46].
3579 */
3580 /* Check the header ordering and spacing: 1. Ethernet, 2. IP, 3. TCP/UDP. */
3581 if (RT_UNLIKELY( pCtx->ip.u8CSS < sizeof(RTNETETHERHDR) ))
3582 {
3583 E1kLog(("e1kSetupGsoCtx: IPCSS=%#x\n", pCtx->ip.u8CSS));
3584 return;
3585 }
3586 if (RT_UNLIKELY( pCtx->tu.u8CSS < (size_t)pCtx->ip.u8CSS + (pCtx->dw2.fIP ? RTNETIPV4_MIN_LEN : RTNETIPV6_MIN_LEN) ))
3587 {
3588 E1kLog(("e1kSetupGsoCtx: TUCSS=%#x\n", pCtx->tu.u8CSS));
3589 return;
3590 }
3591 if (RT_UNLIKELY( pCtx->dw2.fTCP
3592 ? pCtx->dw3.u8HDRLEN < (size_t)pCtx->tu.u8CSS + RTNETTCP_MIN_LEN
3593 : pCtx->dw3.u8HDRLEN != (size_t)pCtx->tu.u8CSS + RTNETUDP_MIN_LEN ))
3594 {
3595 E1kLog(("e1kSetupGsoCtx: HDRLEN=%#x TCP=%d\n", pCtx->dw3.u8HDRLEN, pCtx->dw2.fTCP));
3596 return;
3597 }
3598
3599 /* The end of the TCP/UDP checksum should stop at the end of the packet or at least after the headers. */
3600 if (RT_UNLIKELY( pCtx->tu.u16CSE > 0 && pCtx->tu.u16CSE <= pCtx->dw3.u8HDRLEN ))
3601 {
3602 E1kLog(("e1kSetupGsoCtx: TUCSE=%#x HDRLEN=%#x\n", pCtx->tu.u16CSE, pCtx->dw3.u8HDRLEN));
3603 return;
3604 }
3605
3606 /* IPv4 checksum offset. */
3607 if (RT_UNLIKELY( pCtx->dw2.fIP && (size_t)pCtx->ip.u8CSO - pCtx->ip.u8CSS != RT_UOFFSETOF(RTNETIPV4, ip_sum) ))
3608 {
3609 E1kLog(("e1kSetupGsoCtx: IPCSO=%#x IPCSS=%#x\n", pCtx->ip.u8CSO, pCtx->ip.u8CSS));
3610 return;
3611 }
3612
3613 /* TCP/UDP checksum offsets. */
3614 if (RT_UNLIKELY( (size_t)pCtx->tu.u8CSO - pCtx->tu.u8CSS
3615 != ( pCtx->dw2.fTCP
3616 ? RT_UOFFSETOF(RTNETTCP, th_sum)
3617 : RT_UOFFSETOF(RTNETUDP, uh_sum) ) ))
3618 {
3619 E1kLog(("e1kSetupGsoCtx: TUCSO=%#x TUCSS=%#x TCP=%d\n", pCtx->ip.u8CSO, pCtx->ip.u8CSS, pCtx->dw2.fTCP));
3620 return;
3621 }
3622
3623 /*
3624 * Because of internal networking using a 16-bit size field for GSO context
3625 * plus frame, we have to make sure we don't exceed this.
3626 */
3627 if (RT_UNLIKELY( pCtx->dw3.u8HDRLEN + pCtx->dw2.u20PAYLEN > VBOX_MAX_GSO_SIZE ))
3628 {
3629 E1kLog(("e1kSetupGsoCtx: HDRLEN(=%#x) + PAYLEN(=%#x) = %#x, max is %#x\n",
3630 pCtx->dw3.u8HDRLEN, pCtx->dw2.u20PAYLEN, pCtx->dw3.u8HDRLEN + pCtx->dw2.u20PAYLEN, VBOX_MAX_GSO_SIZE));
3631 return;
3632 }
3633
3634 /*
3635 * We're good for now - we'll do more checks when seeing the data.
3636 * So, figure the type of offloading and setup the context.
3637 */
3638 if (pCtx->dw2.fIP)
3639 {
3640 if (pCtx->dw2.fTCP)
3641 {
3642 pGso->u8Type = PDMNETWORKGSOTYPE_IPV4_TCP;
3643 pGso->cbHdrsSeg = pCtx->dw3.u8HDRLEN;
3644 }
3645 else
3646 {
3647 pGso->u8Type = PDMNETWORKGSOTYPE_IPV4_UDP;
3648 pGso->cbHdrsSeg = pCtx->tu.u8CSS; /* IP header only */
3649 }
3650 /** @todo Detect IPv4-IPv6 tunneling (need test setup since linux doesn't do
3651 * this yet it seems)... */
3652 }
3653 else
3654 {
3655 pGso->cbHdrsSeg = pCtx->dw3.u8HDRLEN; /** @todo IPv6 UFO */
3656 if (pCtx->dw2.fTCP)
3657 pGso->u8Type = PDMNETWORKGSOTYPE_IPV6_TCP;
3658 else
3659 pGso->u8Type = PDMNETWORKGSOTYPE_IPV6_UDP;
3660 }
3661 pGso->offHdr1 = pCtx->ip.u8CSS;
3662 pGso->offHdr2 = pCtx->tu.u8CSS;
3663 pGso->cbHdrsTotal = pCtx->dw3.u8HDRLEN;
3664 pGso->cbMaxSeg = pCtx->dw3.u16MSS;
3665 Assert(PDMNetGsoIsValid(pGso, sizeof(*pGso), pGso->cbMaxSeg * 5));
3666 E1kLog2(("e1kSetupGsoCtx: mss=%#x hdr=%#x hdrseg=%#x hdr1=%#x hdr2=%#x %s\n",
3667 pGso->cbMaxSeg, pGso->cbHdrsTotal, pGso->cbHdrsSeg, pGso->offHdr1, pGso->offHdr2, PDMNetGsoTypeName((PDMNETWORKGSOTYPE)pGso->u8Type) ));
3668}
3669
3670/**
3671 * Checks if we can use GSO processing for the current TSE frame.
3672 *
3673 * @param pThis The device state structure.
3674 * @param pGso The GSO context.
3675 * @param pData The first data descriptor of the frame.
3676 * @param pCtx The TSO context descriptor.
3677 */
3678DECLINLINE(bool) e1kCanDoGso(PE1KSTATE pThis, PCPDMNETWORKGSO pGso, E1KTXDAT const *pData, E1KTXCTX const *pCtx)
3679{
3680 if (!pData->cmd.fTSE)
3681 {
3682 E1kLog2(("e1kCanDoGso: !TSE\n"));
3683 return false;
3684 }
3685 if (pData->cmd.fVLE) /** @todo VLAN tagging. */
3686 {
3687 E1kLog(("e1kCanDoGso: VLE\n"));
3688 return false;
3689 }
3690 if (RT_UNLIKELY(!pThis->fGSOEnabled))
3691 {
3692 E1kLog3(("e1kCanDoGso: GSO disabled via CFGM\n"));
3693 return false;
3694 }
3695
3696 switch ((PDMNETWORKGSOTYPE)pGso->u8Type)
3697 {
3698 case PDMNETWORKGSOTYPE_IPV4_TCP:
3699 case PDMNETWORKGSOTYPE_IPV4_UDP:
3700 if (!pData->dw3.fIXSM)
3701 {
3702 E1kLog(("e1kCanDoGso: !IXSM (IPv4)\n"));
3703 return false;
3704 }
3705 if (!pData->dw3.fTXSM)
3706 {
3707 E1kLog(("e1kCanDoGso: !TXSM (IPv4)\n"));
3708 return false;
3709 }
3710 /** @todo what more check should we perform here? Ethernet frame type? */
3711 E1kLog2(("e1kCanDoGso: OK, IPv4\n"));
3712 return true;
3713
3714 case PDMNETWORKGSOTYPE_IPV6_TCP:
3715 case PDMNETWORKGSOTYPE_IPV6_UDP:
3716 if (pData->dw3.fIXSM && pCtx->ip.u8CSO)
3717 {
3718 E1kLog(("e1kCanDoGso: IXSM (IPv6)\n"));
3719 return false;
3720 }
3721 if (!pData->dw3.fTXSM)
3722 {
3723 E1kLog(("e1kCanDoGso: TXSM (IPv6)\n"));
3724 return false;
3725 }
3726 /** @todo what more check should we perform here? Ethernet frame type? */
3727 E1kLog2(("e1kCanDoGso: OK, IPv4\n"));
3728 return true;
3729
3730 default:
3731 Assert(pGso->u8Type == PDMNETWORKGSOTYPE_INVALID);
3732 E1kLog2(("e1kCanDoGso: e1kSetupGsoCtx failed\n"));
3733 return false;
3734 }
3735}
3736
3737/**
3738 * Frees the current xmit buffer.
3739 *
3740 * @param pThis The device state structure.
3741 */
3742static void e1kXmitFreeBuf(PE1KSTATE pThis, PE1KSTATECC pThisCC)
3743{
3744 PPDMSCATTERGATHER pSg = pThisCC->CTX_SUFF(pTxSg);
3745 if (pSg)
3746 {
3747 pThisCC->CTX_SUFF(pTxSg) = NULL;
3748
3749 if (pSg->pvAllocator != pThis)
3750 {
3751 PPDMINETWORKUP pDrv = pThisCC->CTX_SUFF(pDrv);
3752 if (pDrv)
3753 pDrv->pfnFreeBuf(pDrv, pSg);
3754 }
3755 else
3756 {
3757 /* loopback */
3758 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3759 Assert(pSg->fFlags == (PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3));
3760 pSg->fFlags = 0;
3761 pSg->pvAllocator = NULL;
3762 }
3763 }
3764}
3765
3766#ifndef E1K_WITH_TXD_CACHE
3767/**
3768 * Allocates an xmit buffer.
3769 *
3770 * @returns See PDMINETWORKUP::pfnAllocBuf.
3771 * @param pThis The device state structure.
3772 * @param cbMin The minimum frame size.
3773 * @param fExactSize Whether cbMin is exact or if we have to max it
3774 * out to the max MTU size.
3775 * @param fGso Whether this is a GSO frame or not.
3776 */
3777DECLINLINE(int) e1kXmitAllocBuf(PE1KSTATE pThis, PE1KSTATECC pThisCC, size_t cbMin, bool fExactSize, bool fGso)
3778{
3779 /* Adjust cbMin if necessary. */
3780 if (!fExactSize)
3781 cbMin = RT_MAX(cbMin, E1K_MAX_TX_PKT_SIZE);
3782
3783 /* Deal with existing buffer (descriptor screw up, reset, etc). */
3784 if (RT_UNLIKELY(pThisCC->CTX_SUFF(pTxSg)))
3785 e1kXmitFreeBuf(pThis, pThisCC);
3786 Assert(pThisCC->CTX_SUFF(pTxSg) == NULL);
3787
3788 /*
3789 * Allocate the buffer.
3790 */
3791 PPDMSCATTERGATHER pSg;
3792 if (RT_LIKELY(GET_BITS(RCTL, LBM) != RCTL_LBM_TCVR))
3793 {
3794 PPDMINETWORKUP pDrv = pThisCC->CTX_SUFF(pDrv);
3795 if (RT_UNLIKELY(!pDrv))
3796 return VERR_NET_DOWN;
3797 int rc = pDrv->pfnAllocBuf(pDrv, cbMin, fGso ? &pThis->GsoCtx : NULL, &pSg);
3798 if (RT_FAILURE(rc))
3799 {
3800 /* Suspend TX as we are out of buffers atm */
3801 STATUS |= STATUS_TXOFF;
3802 return rc;
3803 }
3804 }
3805 else
3806 {
3807 /* Create a loopback using the fallback buffer and preallocated SG. */
3808 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3809 pSg = &pThis->uTxFallback.Sg;
3810 pSg->fFlags = PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3;
3811 pSg->cbUsed = 0;
3812 pSg->cbAvailable = 0;
3813 pSg->pvAllocator = pThis;
3814 pSg->pvUser = NULL; /* No GSO here. */
3815 pSg->cSegs = 1;
3816 pSg->aSegs[0].pvSeg = pThis->aTxPacketFallback;
3817 pSg->aSegs[0].cbSeg = sizeof(pThis->aTxPacketFallback);
3818 }
3819
3820 pThisCC->CTX_SUFF(pTxSg) = pSg;
3821 return VINF_SUCCESS;
3822}
3823#else /* E1K_WITH_TXD_CACHE */
3824/**
3825 * Allocates an xmit buffer.
3826 *
3827 * @returns See PDMINETWORKUP::pfnAllocBuf.
3828 * @param pThis The device state structure.
3829 * @param cbMin The minimum frame size.
3830 * @param fExactSize Whether cbMin is exact or if we have to max it
3831 * out to the max MTU size.
3832 * @param fGso Whether this is a GSO frame or not.
3833 */
3834DECLINLINE(int) e1kXmitAllocBuf(PE1KSTATE pThis, PE1KSTATECC pThisCC, bool fGso)
3835{
3836 /* Deal with existing buffer (descriptor screw up, reset, etc). */
3837 if (RT_UNLIKELY(pThisCC->CTX_SUFF(pTxSg)))
3838 e1kXmitFreeBuf(pThis, pThisCC);
3839 Assert(pThisCC->CTX_SUFF(pTxSg) == NULL);
3840
3841 /*
3842 * Allocate the buffer.
3843 */
3844 PPDMSCATTERGATHER pSg;
3845 if (RT_LIKELY(GET_BITS(RCTL, LBM) != RCTL_LBM_TCVR))
3846 {
3847 if (pThis->cbTxAlloc == 0)
3848 {
3849 /* Zero packet, no need for the buffer */
3850 return VINF_SUCCESS;
3851 }
3852
3853 PPDMINETWORKUP pDrv = pThisCC->CTX_SUFF(pDrv);
3854 if (RT_UNLIKELY(!pDrv))
3855 return VERR_NET_DOWN;
3856 int rc = pDrv->pfnAllocBuf(pDrv, pThis->cbTxAlloc, fGso ? &pThis->GsoCtx : NULL, &pSg);
3857 if (RT_FAILURE(rc))
3858 {
3859 /* Suspend TX as we are out of buffers atm */
3860 STATUS |= STATUS_TXOFF;
3861 return rc;
3862 }
3863 E1kLog3(("%s Allocated buffer for TX packet: cb=%u %s%s\n",
3864 pThis->szPrf, pThis->cbTxAlloc,
3865 pThis->fVTag ? "VLAN " : "",
3866 pThis->fGSO ? "GSO " : ""));
3867 }
3868 else
3869 {
3870 /* Create a loopback using the fallback buffer and preallocated SG. */
3871 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3872 pSg = &pThis->uTxFallback.Sg;
3873 pSg->fFlags = PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3;
3874 pSg->cbUsed = 0;
3875 pSg->cbAvailable = sizeof(pThis->aTxPacketFallback);
3876 pSg->pvAllocator = pThis;
3877 pSg->pvUser = NULL; /* No GSO here. */
3878 pSg->cSegs = 1;
3879 pSg->aSegs[0].pvSeg = pThis->aTxPacketFallback;
3880 pSg->aSegs[0].cbSeg = sizeof(pThis->aTxPacketFallback);
3881 }
3882 pThis->cbTxAlloc = 0;
3883
3884 pThisCC->CTX_SUFF(pTxSg) = pSg;
3885 return VINF_SUCCESS;
3886}
3887#endif /* E1K_WITH_TXD_CACHE */
3888
3889/**
3890 * Checks if it's a GSO buffer or not.
3891 *
3892 * @returns true / false.
3893 * @param pTxSg The scatter / gather buffer.
3894 */
3895DECLINLINE(bool) e1kXmitIsGsoBuf(PDMSCATTERGATHER const *pTxSg)
3896{
3897#if 0
3898 if (!pTxSg)
3899 E1kLog(("e1kXmitIsGsoBuf: pTxSG is NULL\n"));
3900 if (pTxSg && pTxSg->pvUser)
3901 E1kLog(("e1kXmitIsGsoBuf: pvUser is NULL\n"));
3902#endif
3903 return pTxSg && pTxSg->pvUser /* GSO indicator */;
3904}
3905
3906#ifndef E1K_WITH_TXD_CACHE
3907/**
3908 * Load transmit descriptor from guest memory.
3909 *
3910 * @param pDevIns The device instance.
3911 * @param pDesc Pointer to descriptor union.
3912 * @param addr Physical address in guest context.
3913 * @thread E1000_TX
3914 */
3915DECLINLINE(void) e1kLoadDesc(PPDMDEVINS pDevIns, E1KTXDESC *pDesc, RTGCPHYS addr)
3916{
3917 PDMDevHlpPhysRead(pDevIns, addr, pDesc, sizeof(E1KTXDESC));
3918}
3919#else /* E1K_WITH_TXD_CACHE */
3920/**
3921 * Load transmit descriptors from guest memory.
3922 *
3923 * We need two physical reads in case the tail wrapped around the end of TX
3924 * descriptor ring.
3925 *
3926 * @returns the actual number of descriptors fetched.
3927 * @param pDevIns The device instance.
3928 * @param pThis The device state structure.
3929 * @thread E1000_TX
3930 */
3931DECLINLINE(unsigned) e1kTxDLoadMore(PPDMDEVINS pDevIns, PE1KSTATE pThis)
3932{
3933 Assert(pThis->iTxDCurrent == 0);
3934 /* We've already loaded pThis->nTxDFetched descriptors past TDH. */
3935 unsigned nDescsAvailable = e1kGetTxLen(pThis) - pThis->nTxDFetched;
3936 unsigned nDescsToFetch = RT_MIN(nDescsAvailable, E1K_TXD_CACHE_SIZE - pThis->nTxDFetched);
3937 unsigned nDescsTotal = TDLEN / sizeof(E1KTXDESC);
3938 unsigned nFirstNotLoaded = (TDH + pThis->nTxDFetched) % nDescsTotal;
3939 unsigned nDescsInSingleRead = RT_MIN(nDescsToFetch, nDescsTotal - nFirstNotLoaded);
3940 E1kLog3(("%s e1kTxDLoadMore: nDescsAvailable=%u nDescsToFetch=%u nDescsTotal=%u nFirstNotLoaded=0x%x nDescsInSingleRead=%u\n",
3941 pThis->szPrf, nDescsAvailable, nDescsToFetch, nDescsTotal,
3942 nFirstNotLoaded, nDescsInSingleRead));
3943 if (nDescsToFetch == 0)
3944 return 0;
3945 E1KTXDESC* pFirstEmptyDesc = &pThis->aTxDescriptors[pThis->nTxDFetched];
3946 PDMDevHlpPhysRead(pDevIns,
3947 ((uint64_t)TDBAH << 32) + TDBAL + nFirstNotLoaded * sizeof(E1KTXDESC),
3948 pFirstEmptyDesc, nDescsInSingleRead * sizeof(E1KTXDESC));
3949 E1kLog3(("%s Fetched %u TX descriptors at %08x%08x(0x%x), TDLEN=%08x, TDH=%08x, TDT=%08x\n",
3950 pThis->szPrf, nDescsInSingleRead,
3951 TDBAH, TDBAL + TDH * sizeof(E1KTXDESC),
3952 nFirstNotLoaded, TDLEN, TDH, TDT));
3953 if (nDescsToFetch > nDescsInSingleRead)
3954 {
3955 PDMDevHlpPhysRead(pDevIns,
3956 ((uint64_t)TDBAH << 32) + TDBAL,
3957 pFirstEmptyDesc + nDescsInSingleRead,
3958 (nDescsToFetch - nDescsInSingleRead) * sizeof(E1KTXDESC));
3959 E1kLog3(("%s Fetched %u TX descriptors at %08x%08x\n",
3960 pThis->szPrf, nDescsToFetch - nDescsInSingleRead,
3961 TDBAH, TDBAL));
3962 }
3963 pThis->nTxDFetched += nDescsToFetch;
3964 return nDescsToFetch;
3965}
3966
3967/**
3968 * Load transmit descriptors from guest memory only if there are no loaded
3969 * descriptors.
3970 *
3971 * @returns true if there are descriptors in cache.
3972 * @param pDevIns The device instance.
3973 * @param pThis The device state structure.
3974 * @thread E1000_TX
3975 */
3976DECLINLINE(bool) e1kTxDLazyLoad(PPDMDEVINS pDevIns, PE1KSTATE pThis)
3977{
3978 if (pThis->nTxDFetched == 0)
3979 return e1kTxDLoadMore(pDevIns, pThis) != 0;
3980 return true;
3981}
3982#endif /* E1K_WITH_TXD_CACHE */
3983
3984/**
3985 * Write back transmit descriptor to guest memory.
3986 *
3987 * @param pDevIns The device instance.
3988 * @param pThis The device state structure.
3989 * @param pDesc Pointer to descriptor union.
3990 * @param addr Physical address in guest context.
3991 * @thread E1000_TX
3992 */
3993DECLINLINE(void) e1kWriteBackDesc(PPDMDEVINS pDevIns, PE1KSTATE pThis, E1KTXDESC *pDesc, RTGCPHYS addr)
3994{
3995 /* Only the last half of the descriptor has to be written back. */
3996 e1kPrintTDesc(pThis, pDesc, "^^^");
3997 PDMDevHlpPCIPhysWrite(pDevIns, addr, pDesc, sizeof(E1KTXDESC));
3998}
3999
4000/**
4001 * Transmit complete frame.
4002 *
4003 * @remarks We skip the FCS since we're not responsible for sending anything to
4004 * a real ethernet wire.
4005 *
4006 * @param pDevIns The device instance.
4007 * @param pThis The device state structure.
4008 * @param pThisCC The current context instance data.
4009 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4010 * @thread E1000_TX
4011 */
4012static void e1kTransmitFrame(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KSTATECC pThisCC, bool fOnWorkerThread)
4013{
4014 PPDMSCATTERGATHER pSg = pThisCC->CTX_SUFF(pTxSg);
4015 uint32_t cbFrame = pSg ? (uint32_t)pSg->cbUsed : 0;
4016 Assert(!pSg || pSg->cSegs == 1);
4017
4018 if (cbFrame > 70) /* unqualified guess */
4019 pThis->led.Asserted.s.fWriting = pThis->led.Actual.s.fWriting = 1;
4020
4021#ifdef E1K_INT_STATS
4022 if (cbFrame <= 1514)
4023 E1K_INC_ISTAT_CNT(pThis->uStatTx1514);
4024 else if (cbFrame <= 2962)
4025 E1K_INC_ISTAT_CNT(pThis->uStatTx2962);
4026 else if (cbFrame <= 4410)
4027 E1K_INC_ISTAT_CNT(pThis->uStatTx4410);
4028 else if (cbFrame <= 5858)
4029 E1K_INC_ISTAT_CNT(pThis->uStatTx5858);
4030 else if (cbFrame <= 7306)
4031 E1K_INC_ISTAT_CNT(pThis->uStatTx7306);
4032 else if (cbFrame <= 8754)
4033 E1K_INC_ISTAT_CNT(pThis->uStatTx8754);
4034 else if (cbFrame <= 16384)
4035 E1K_INC_ISTAT_CNT(pThis->uStatTx16384);
4036 else if (cbFrame <= 32768)
4037 E1K_INC_ISTAT_CNT(pThis->uStatTx32768);
4038 else
4039 E1K_INC_ISTAT_CNT(pThis->uStatTxLarge);
4040#endif /* E1K_INT_STATS */
4041
4042 /* Add VLAN tag */
4043 if (cbFrame > 12 && pThis->fVTag)
4044 {
4045 E1kLog3(("%s Inserting VLAN tag %08x\n",
4046 pThis->szPrf, RT_BE2H_U16(VET) | (RT_BE2H_U16(pThis->u16VTagTCI) << 16)));
4047 memmove((uint8_t*)pSg->aSegs[0].pvSeg + 16, (uint8_t*)pSg->aSegs[0].pvSeg + 12, cbFrame - 12);
4048 *((uint32_t*)pSg->aSegs[0].pvSeg + 3) = RT_BE2H_U16(VET) | (RT_BE2H_U16(pThis->u16VTagTCI) << 16);
4049 pSg->cbUsed += 4;
4050 cbFrame += 4;
4051 Assert(pSg->cbUsed == cbFrame);
4052 Assert(pSg->cbUsed <= pSg->cbAvailable);
4053 }
4054/* E1kLog2(("%s < < < Outgoing packet. Dump follows: > > >\n"
4055 "%.*Rhxd\n"
4056 "%s < < < < < < < < < < < < < End of dump > > > > > > > > > > > >\n",
4057 pThis->szPrf, cbFrame, pSg->aSegs[0].pvSeg, pThis->szPrf));*/
4058
4059 /* Update the stats */
4060 E1K_INC_CNT32(TPT);
4061 E1K_ADD_CNT64(TOTL, TOTH, cbFrame);
4062 E1K_INC_CNT32(GPTC);
4063 if (pSg && e1kIsBroadcast(pSg->aSegs[0].pvSeg))
4064 E1K_INC_CNT32(BPTC);
4065 else if (pSg && e1kIsMulticast(pSg->aSegs[0].pvSeg))
4066 E1K_INC_CNT32(MPTC);
4067 /* Update octet transmit counter */
4068 E1K_ADD_CNT64(GOTCL, GOTCH, cbFrame);
4069 if (pThisCC->CTX_SUFF(pDrv))
4070 STAM_REL_COUNTER_ADD(&pThis->StatTransmitBytes, cbFrame);
4071 if (cbFrame == 64)
4072 E1K_INC_CNT32(PTC64);
4073 else if (cbFrame < 128)
4074 E1K_INC_CNT32(PTC127);
4075 else if (cbFrame < 256)
4076 E1K_INC_CNT32(PTC255);
4077 else if (cbFrame < 512)
4078 E1K_INC_CNT32(PTC511);
4079 else if (cbFrame < 1024)
4080 E1K_INC_CNT32(PTC1023);
4081 else
4082 E1K_INC_CNT32(PTC1522);
4083
4084 E1K_INC_ISTAT_CNT(pThis->uStatTxFrm);
4085
4086 /*
4087 * Dump and send the packet.
4088 */
4089 int rc = VERR_NET_DOWN;
4090 if (pSg && pSg->pvAllocator != pThis)
4091 {
4092 e1kPacketDump(pDevIns, pThis, (uint8_t const *)pSg->aSegs[0].pvSeg, cbFrame, "--> Outgoing");
4093
4094 pThisCC->CTX_SUFF(pTxSg) = NULL;
4095 PPDMINETWORKUP pDrv = pThisCC->CTX_SUFF(pDrv);
4096 if (pDrv)
4097 {
4098 /* Release critical section to avoid deadlock in CanReceive */
4099 //e1kCsLeave(pThis);
4100 STAM_PROFILE_START(&pThis->CTX_SUFF_Z(StatTransmitSend), a);
4101 rc = pDrv->pfnSendBuf(pDrv, pSg, fOnWorkerThread);
4102 STAM_PROFILE_STOP(&pThis->CTX_SUFF_Z(StatTransmitSend), a);
4103 //e1kCsEnter(pThis, RT_SRC_POS);
4104 }
4105 }
4106 else if (pSg)
4107 {
4108 Assert(pSg->aSegs[0].pvSeg == pThis->aTxPacketFallback);
4109 e1kPacketDump(pDevIns, pThis, (uint8_t const *)pSg->aSegs[0].pvSeg, cbFrame, "--> Loopback");
4110
4111 /** @todo do we actually need to check that we're in loopback mode here? */
4112 if (GET_BITS(RCTL, LBM) == RCTL_LBM_TCVR)
4113 {
4114 E1KRXDST status;
4115 RT_ZERO(status);
4116 status.fPIF = true;
4117 e1kHandleRxPacket(pDevIns, pThis, pSg->aSegs[0].pvSeg, cbFrame, status);
4118 rc = VINF_SUCCESS;
4119 }
4120 e1kXmitFreeBuf(pThis, pThisCC);
4121 }
4122 else
4123 rc = VERR_NET_DOWN;
4124 if (RT_FAILURE(rc))
4125 {
4126 E1kLogRel(("E1000: ERROR! pfnSend returned %Rrc\n", rc));
4127 /** @todo handle VERR_NET_DOWN and VERR_NET_NO_BUFFER_SPACE. Signal error ? */
4128 }
4129
4130 pThis->led.Actual.s.fWriting = 0;
4131}
4132
4133/**
4134 * Compute and write internet checksum (e1kCSum16) at the specified offset.
4135 *
4136 * @param pThis The device state structure.
4137 * @param pPkt Pointer to the packet.
4138 * @param u16PktLen Total length of the packet.
4139 * @param cso Offset in packet to write checksum at.
4140 * @param css Offset in packet to start computing
4141 * checksum from.
4142 * @param cse Offset in packet to stop computing
4143 * checksum at.
4144 * @thread E1000_TX
4145 */
4146static void e1kInsertChecksum(PE1KSTATE pThis, uint8_t *pPkt, uint16_t u16PktLen, uint8_t cso, uint8_t css, uint16_t cse)
4147{
4148 RT_NOREF1(pThis);
4149
4150 if (css >= u16PktLen)
4151 {
4152 E1kLog2(("%s css(%X) is greater than packet length-1(%X), checksum is not inserted\n",
4153 pThis->szPrf, cso, u16PktLen));
4154 return;
4155 }
4156
4157 if (cso >= u16PktLen - 1)
4158 {
4159 E1kLog2(("%s cso(%X) is greater than packet length-2(%X), checksum is not inserted\n",
4160 pThis->szPrf, cso, u16PktLen));
4161 return;
4162 }
4163
4164 if (cse == 0)
4165 cse = u16PktLen - 1;
4166 else if (cse < css)
4167 {
4168 E1kLog2(("%s css(%X) is greater than cse(%X), checksum is not inserted\n",
4169 pThis->szPrf, css, cse));
4170 return;
4171 }
4172
4173 uint16_t u16ChkSum = e1kCSum16(pPkt + css, cse - css + 1);
4174 E1kLog2(("%s Inserting csum: %04X at %02X, old value: %04X\n", pThis->szPrf,
4175 u16ChkSum, cso, *(uint16_t*)(pPkt + cso)));
4176 *(uint16_t*)(pPkt + cso) = u16ChkSum;
4177}
4178
4179/**
4180 * Add a part of descriptor's buffer to transmit frame.
4181 *
4182 * @remarks data.u64BufAddr is used unconditionally for both data
4183 * and legacy descriptors since it is identical to
4184 * legacy.u64BufAddr.
4185 *
4186 * @param pDevIns The device instance.
4187 * @param pThis The device state structure.
4188 * @param pDesc Pointer to the descriptor to transmit.
4189 * @param u16Len Length of buffer to the end of segment.
4190 * @param fSend Force packet sending.
4191 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4192 * @thread E1000_TX
4193 */
4194#ifndef E1K_WITH_TXD_CACHE
4195static void e1kFallbackAddSegment(PPDMDEVINS pDevIns, PE1KSTATE pThis, RTGCPHYS PhysAddr, uint16_t u16Len, bool fSend, bool fOnWorkerThread)
4196{
4197 PE1KSTATECC pThisCC = PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC);
4198 /* TCP header being transmitted */
4199 struct E1kTcpHeader *pTcpHdr = (struct E1kTcpHeader *)(pThis->aTxPacketFallback + pThis->contextTSE.tu.u8CSS);
4200 /* IP header being transmitted */
4201 struct E1kIpHeader *pIpHdr = (struct E1kIpHeader *)(pThis->aTxPacketFallback + pThis->contextTSE.ip.u8CSS);
4202
4203 E1kLog3(("%s e1kFallbackAddSegment: Length=%x, remaining payload=%x, header=%x, send=%RTbool\n",
4204 pThis->szPrf, u16Len, pThis->u32PayRemain, pThis->u16HdrRemain, fSend));
4205 Assert(pThis->u32PayRemain + pThis->u16HdrRemain > 0);
4206
4207 PDMDevHlpPhysRead(pDevIns, PhysAddr, pThis->aTxPacketFallback + pThis->u16TxPktLen, u16Len);
4208 E1kLog3(("%s Dump of the segment:\n"
4209 "%.*Rhxd\n"
4210 "%s --- End of dump ---\n",
4211 pThis->szPrf, u16Len, pThis->aTxPacketFallback + pThis->u16TxPktLen, pThis->szPrf));
4212 pThis->u16TxPktLen += u16Len;
4213 E1kLog3(("%s e1kFallbackAddSegment: pThis->u16TxPktLen=%x\n",
4214 pThis->szPrf, pThis->u16TxPktLen));
4215 if (pThis->u16HdrRemain > 0)
4216 {
4217 /* The header was not complete, check if it is now */
4218 if (u16Len >= pThis->u16HdrRemain)
4219 {
4220 /* The rest is payload */
4221 u16Len -= pThis->u16HdrRemain;
4222 pThis->u16HdrRemain = 0;
4223 /* Save partial checksum and flags */
4224 pThis->u32SavedCsum = pTcpHdr->chksum;
4225 pThis->u16SavedFlags = pTcpHdr->hdrlen_flags;
4226 /* Clear FIN and PSH flags now and set them only in the last segment */
4227 pTcpHdr->hdrlen_flags &= ~htons(E1K_TCP_FIN | E1K_TCP_PSH);
4228 }
4229 else
4230 {
4231 /* Still not */
4232 pThis->u16HdrRemain -= u16Len;
4233 E1kLog3(("%s e1kFallbackAddSegment: Header is still incomplete, 0x%x bytes remain.\n",
4234 pThis->szPrf, pThis->u16HdrRemain));
4235 return;
4236 }
4237 }
4238
4239 pThis->u32PayRemain -= u16Len;
4240
4241 if (fSend)
4242 {
4243 /* Leave ethernet header intact */
4244 /* IP Total Length = payload + headers - ethernet header */
4245 pIpHdr->total_len = htons(pThis->u16TxPktLen - pThis->contextTSE.ip.u8CSS);
4246 E1kLog3(("%s e1kFallbackAddSegment: End of packet, pIpHdr->total_len=%x\n",
4247 pThis->szPrf, ntohs(pIpHdr->total_len)));
4248 /* Update IP Checksum */
4249 pIpHdr->chksum = 0;
4250 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4251 pThis->contextTSE.ip.u8CSO,
4252 pThis->contextTSE.ip.u8CSS,
4253 pThis->contextTSE.ip.u16CSE);
4254
4255 /* Update TCP flags */
4256 /* Restore original FIN and PSH flags for the last segment */
4257 if (pThis->u32PayRemain == 0)
4258 {
4259 pTcpHdr->hdrlen_flags = pThis->u16SavedFlags;
4260 E1K_INC_CNT32(TSCTC);
4261 }
4262 /* Add TCP length to partial pseudo header sum */
4263 uint32_t csum = pThis->u32SavedCsum
4264 + htons(pThis->u16TxPktLen - pThis->contextTSE.tu.u8CSS);
4265 while (csum >> 16)
4266 csum = (csum >> 16) + (csum & 0xFFFF);
4267 pTcpHdr->chksum = csum;
4268 /* Compute final checksum */
4269 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4270 pThis->contextTSE.tu.u8CSO,
4271 pThis->contextTSE.tu.u8CSS,
4272 pThis->contextTSE.tu.u16CSE);
4273
4274 /*
4275 * Transmit it. If we've use the SG already, allocate a new one before
4276 * we copy of the data.
4277 */
4278 PPDMSCATTERGATHER pTxSg = pThisCC->CTX_SUFF(pTxSg);
4279 if (!pTxSg)
4280 {
4281 e1kXmitAllocBuf(pThis, pThisCC, pThis->u16TxPktLen + (pThis->fVTag ? 4 : 0), true /*fExactSize*/, false /*fGso*/);
4282 pTxSg = pThisCC->CTX_SUFF(pTxSg);
4283 }
4284 if (pTxSg)
4285 {
4286 Assert(pThis->u16TxPktLen <= pThisCC->CTX_SUFF(pTxSg)->cbAvailable);
4287 Assert(pTxSg->cSegs == 1);
4288 if (pThis->CCCTX_SUFF(pTxSg)->aSegs[0].pvSeg != pThis->aTxPacketFallback)
4289 memcpy(pTxSg->aSegs[0].pvSeg, pThis->aTxPacketFallback, pThis->u16TxPktLen);
4290 pTxSg->cbUsed = pThis->u16TxPktLen;
4291 pTxSg->aSegs[0].cbSeg = pThis->u16TxPktLen;
4292 }
4293 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
4294
4295 /* Update Sequence Number */
4296 pTcpHdr->seqno = htonl(ntohl(pTcpHdr->seqno) + pThis->u16TxPktLen
4297 - pThis->contextTSE.dw3.u8HDRLEN);
4298 /* Increment IP identification */
4299 pIpHdr->ident = htons(ntohs(pIpHdr->ident) + 1);
4300 }
4301}
4302#else /* E1K_WITH_TXD_CACHE */
4303static int e1kFallbackAddSegment(PPDMDEVINS pDevIns, PE1KSTATE pThis, RTGCPHYS PhysAddr, uint16_t u16Len, bool fSend, bool fOnWorkerThread)
4304{
4305 int rc = VINF_SUCCESS;
4306 PE1KSTATECC pThisCC = PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC);
4307 /* TCP header being transmitted */
4308 struct E1kTcpHeader *pTcpHdr = (struct E1kTcpHeader *)(pThis->aTxPacketFallback + pThis->contextTSE.tu.u8CSS);
4309 /* IP header being transmitted */
4310 struct E1kIpHeader *pIpHdr = (struct E1kIpHeader *)(pThis->aTxPacketFallback + pThis->contextTSE.ip.u8CSS);
4311
4312 E1kLog3(("%s e1kFallbackAddSegment: Length=%x, remaining payload=%x, header=%x, send=%RTbool\n",
4313 pThis->szPrf, u16Len, pThis->u32PayRemain, pThis->u16HdrRemain, fSend));
4314 AssertReturn(pThis->u32PayRemain + pThis->u16HdrRemain > 0, VINF_SUCCESS);
4315
4316 if (pThis->u16TxPktLen + u16Len <= sizeof(pThis->aTxPacketFallback))
4317 PDMDevHlpPhysRead(pDevIns, PhysAddr, pThis->aTxPacketFallback + pThis->u16TxPktLen, u16Len);
4318 else
4319 E1kLog(("%s e1kFallbackAddSegment: writing beyond aTxPacketFallback, u16TxPktLen=%d(0x%x) + u16Len=%d(0x%x) > %d\n",
4320 pThis->szPrf, pThis->u16TxPktLen, pThis->u16TxPktLen, u16Len, u16Len, sizeof(pThis->aTxPacketFallback)));
4321 E1kLog3(("%s Dump of the segment:\n"
4322 "%.*Rhxd\n"
4323 "%s --- End of dump ---\n",
4324 pThis->szPrf, u16Len, pThis->aTxPacketFallback + pThis->u16TxPktLen, pThis->szPrf));
4325 pThis->u16TxPktLen += u16Len;
4326 E1kLog3(("%s e1kFallbackAddSegment: pThis->u16TxPktLen=%x\n",
4327 pThis->szPrf, pThis->u16TxPktLen));
4328 if (pThis->u16HdrRemain > 0)
4329 {
4330 /* The header was not complete, check if it is now */
4331 if (u16Len >= pThis->u16HdrRemain)
4332 {
4333 /* The rest is payload */
4334 u16Len -= pThis->u16HdrRemain;
4335 pThis->u16HdrRemain = 0;
4336 /* Save partial checksum and flags */
4337 pThis->u32SavedCsum = pTcpHdr->chksum;
4338 pThis->u16SavedFlags = pTcpHdr->hdrlen_flags;
4339 /* Clear FIN and PSH flags now and set them only in the last segment */
4340 pTcpHdr->hdrlen_flags &= ~htons(E1K_TCP_FIN | E1K_TCP_PSH);
4341 }
4342 else
4343 {
4344 /* Still not */
4345 pThis->u16HdrRemain -= u16Len;
4346 E1kLog3(("%s e1kFallbackAddSegment: Header is still incomplete, 0x%x bytes remain.\n",
4347 pThis->szPrf, pThis->u16HdrRemain));
4348 return rc;
4349 }
4350 }
4351
4352 if (u16Len > pThis->u32PayRemain)
4353 pThis->u32PayRemain = 0;
4354 else
4355 pThis->u32PayRemain -= u16Len;
4356
4357 if (fSend)
4358 {
4359 /* Leave ethernet header intact */
4360 /* IP Total Length = payload + headers - ethernet header */
4361 pIpHdr->total_len = htons(pThis->u16TxPktLen - pThis->contextTSE.ip.u8CSS);
4362 E1kLog3(("%s e1kFallbackAddSegment: End of packet, pIpHdr->total_len=%x\n",
4363 pThis->szPrf, ntohs(pIpHdr->total_len)));
4364 /* Update IP Checksum */
4365 pIpHdr->chksum = 0;
4366 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4367 pThis->contextTSE.ip.u8CSO,
4368 pThis->contextTSE.ip.u8CSS,
4369 pThis->contextTSE.ip.u16CSE);
4370
4371 /* Update TCP flags */
4372 /* Restore original FIN and PSH flags for the last segment */
4373 if (pThis->u32PayRemain == 0)
4374 {
4375 pTcpHdr->hdrlen_flags = pThis->u16SavedFlags;
4376 E1K_INC_CNT32(TSCTC);
4377 }
4378 /* Add TCP length to partial pseudo header sum */
4379 uint32_t csum = pThis->u32SavedCsum
4380 + htons(pThis->u16TxPktLen - pThis->contextTSE.tu.u8CSS);
4381 while (csum >> 16)
4382 csum = (csum >> 16) + (csum & 0xFFFF);
4383 pTcpHdr->chksum = csum;
4384 /* Compute final checksum */
4385 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4386 pThis->contextTSE.tu.u8CSO,
4387 pThis->contextTSE.tu.u8CSS,
4388 pThis->contextTSE.tu.u16CSE);
4389
4390 /*
4391 * Transmit it.
4392 */
4393 PPDMSCATTERGATHER pTxSg = pThisCC->CTX_SUFF(pTxSg);
4394 if (pTxSg)
4395 {
4396 /* Make sure the packet fits into the allocated buffer */
4397 size_t cbCopy = RT_MIN(pThis->u16TxPktLen, pThisCC->CTX_SUFF(pTxSg)->cbAvailable);
4398#ifdef DEBUG
4399 if (pThis->u16TxPktLen > pTxSg->cbAvailable)
4400 E1kLog(("%s e1kFallbackAddSegment: truncating packet, u16TxPktLen=%d(0x%x) > cbAvailable=%d(0x%x)\n",
4401 pThis->szPrf, pThis->u16TxPktLen, pThis->u16TxPktLen, pTxSg->cbAvailable, pTxSg->cbAvailable));
4402#endif /* DEBUG */
4403 Assert(pTxSg->cSegs == 1);
4404 if (pTxSg->aSegs[0].pvSeg != pThis->aTxPacketFallback)
4405 memcpy(pTxSg->aSegs[0].pvSeg, pThis->aTxPacketFallback, cbCopy);
4406 pTxSg->cbUsed = cbCopy;
4407 pTxSg->aSegs[0].cbSeg = cbCopy;
4408 }
4409 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
4410
4411 /* Update Sequence Number */
4412 pTcpHdr->seqno = htonl(ntohl(pTcpHdr->seqno) + pThis->u16TxPktLen
4413 - pThis->contextTSE.dw3.u8HDRLEN);
4414 /* Increment IP identification */
4415 pIpHdr->ident = htons(ntohs(pIpHdr->ident) + 1);
4416
4417 /* Allocate new buffer for the next segment. */
4418 if (pThis->u32PayRemain)
4419 {
4420 pThis->cbTxAlloc = RT_MIN(pThis->u32PayRemain,
4421 pThis->contextTSE.dw3.u16MSS)
4422 + pThis->contextTSE.dw3.u8HDRLEN
4423 + (pThis->fVTag ? 4 : 0);
4424 rc = e1kXmitAllocBuf(pThis, pThisCC, false /* fGSO */);
4425 }
4426 }
4427
4428 return rc;
4429}
4430#endif /* E1K_WITH_TXD_CACHE */
4431
4432#ifndef E1K_WITH_TXD_CACHE
4433/**
4434 * TCP segmentation offloading fallback: Add descriptor's buffer to transmit
4435 * frame.
4436 *
4437 * We construct the frame in the fallback buffer first and the copy it to the SG
4438 * buffer before passing it down to the network driver code.
4439 *
4440 * @returns true if the frame should be transmitted, false if not.
4441 *
4442 * @param pThis The device state structure.
4443 * @param pDesc Pointer to the descriptor to transmit.
4444 * @param cbFragment Length of descriptor's buffer.
4445 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4446 * @thread E1000_TX
4447 */
4448static bool e1kFallbackAddToFrame(PE1KSTATE pThis, E1KTXDESC *pDesc, uint32_t cbFragment, bool fOnWorkerThread)
4449{
4450 PPDMSCATTERGATHER pTxSg = pThisCC->CTX_SUFF(pTxSg);
4451 Assert(e1kGetDescType(pDesc) == E1K_DTYP_DATA);
4452 Assert(pDesc->data.cmd.fTSE);
4453 Assert(!e1kXmitIsGsoBuf(pTxSg));
4454
4455 uint16_t u16MaxPktLen = pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw3.u16MSS;
4456 Assert(u16MaxPktLen != 0);
4457 Assert(u16MaxPktLen < E1K_MAX_TX_PKT_SIZE);
4458
4459 /*
4460 * Carve out segments.
4461 */
4462 do
4463 {
4464 /* Calculate how many bytes we have left in this TCP segment */
4465 uint32_t cb = u16MaxPktLen - pThis->u16TxPktLen;
4466 if (cb > cbFragment)
4467 {
4468 /* This descriptor fits completely into current segment */
4469 cb = cbFragment;
4470 e1kFallbackAddSegment(pDevIns, pThis, pDesc->data.u64BufAddr, cb, pDesc->data.cmd.fEOP /*fSend*/, fOnWorkerThread);
4471 }
4472 else
4473 {
4474 e1kFallbackAddSegment(pDevIns, pThis, pDesc->data.u64BufAddr, cb, true /*fSend*/, fOnWorkerThread);
4475 /*
4476 * Rewind the packet tail pointer to the beginning of payload,
4477 * so we continue writing right beyond the header.
4478 */
4479 pThis->u16TxPktLen = pThis->contextTSE.dw3.u8HDRLEN;
4480 }
4481
4482 pDesc->data.u64BufAddr += cb;
4483 cbFragment -= cb;
4484 } while (cbFragment > 0);
4485
4486 if (pDesc->data.cmd.fEOP)
4487 {
4488 /* End of packet, next segment will contain header. */
4489 if (pThis->u32PayRemain != 0)
4490 E1K_INC_CNT32(TSCTFC);
4491 pThis->u16TxPktLen = 0;
4492 e1kXmitFreeBuf(pThis, PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC));
4493 }
4494
4495 return false;
4496}
4497#else /* E1K_WITH_TXD_CACHE */
4498/**
4499 * TCP segmentation offloading fallback: Add descriptor's buffer to transmit
4500 * frame.
4501 *
4502 * We construct the frame in the fallback buffer first and the copy it to the SG
4503 * buffer before passing it down to the network driver code.
4504 *
4505 * @returns error code
4506 *
4507 * @param pDevIns The device instance.
4508 * @param pThis The device state structure.
4509 * @param pDesc Pointer to the descriptor to transmit.
4510 * @param cbFragment Length of descriptor's buffer.
4511 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4512 * @thread E1000_TX
4513 */
4514static int e1kFallbackAddToFrame(PPDMDEVINS pDevIns, PE1KSTATE pThis, E1KTXDESC *pDesc, bool fOnWorkerThread)
4515{
4516#ifdef VBOX_STRICT
4517 PPDMSCATTERGATHER pTxSg = PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC)->CTX_SUFF(pTxSg);
4518 Assert(e1kGetDescType(pDesc) == E1K_DTYP_DATA);
4519 Assert(pDesc->data.cmd.fTSE);
4520 Assert(!e1kXmitIsGsoBuf(pTxSg));
4521#endif
4522
4523 uint16_t u16MaxPktLen = pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw3.u16MSS;
4524 /* We cannot produce empty packets, ignore all TX descriptors (see @bugref{9571}) */
4525 if (u16MaxPktLen == 0)
4526 return VINF_SUCCESS;
4527
4528 /*
4529 * Carve out segments.
4530 */
4531 int rc = VINF_SUCCESS;
4532 do
4533 {
4534 /* Calculate how many bytes we have left in this TCP segment */
4535 uint32_t cb = u16MaxPktLen - pThis->u16TxPktLen;
4536 if (cb > pDesc->data.cmd.u20DTALEN)
4537 {
4538 /* This descriptor fits completely into current segment */
4539 cb = pDesc->data.cmd.u20DTALEN;
4540 rc = e1kFallbackAddSegment(pDevIns, pThis, pDesc->data.u64BufAddr, cb, pDesc->data.cmd.fEOP /*fSend*/, fOnWorkerThread);
4541 }
4542 else
4543 {
4544 rc = e1kFallbackAddSegment(pDevIns, pThis, pDesc->data.u64BufAddr, cb, true /*fSend*/, fOnWorkerThread);
4545 /*
4546 * Rewind the packet tail pointer to the beginning of payload,
4547 * so we continue writing right beyond the header.
4548 */
4549 pThis->u16TxPktLen = pThis->contextTSE.dw3.u8HDRLEN;
4550 }
4551
4552 pDesc->data.u64BufAddr += cb;
4553 pDesc->data.cmd.u20DTALEN -= cb;
4554 } while (pDesc->data.cmd.u20DTALEN > 0 && RT_SUCCESS(rc));
4555
4556 if (pDesc->data.cmd.fEOP)
4557 {
4558 /* End of packet, next segment will contain header. */
4559 if (pThis->u32PayRemain != 0)
4560 E1K_INC_CNT32(TSCTFC);
4561 pThis->u16TxPktLen = 0;
4562 e1kXmitFreeBuf(pThis, PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC));
4563 }
4564
4565 return VINF_SUCCESS; /// @todo consider rc;
4566}
4567#endif /* E1K_WITH_TXD_CACHE */
4568
4569
4570/**
4571 * Add descriptor's buffer to transmit frame.
4572 *
4573 * This deals with GSO and normal frames, e1kFallbackAddToFrame deals with the
4574 * TSE frames we cannot handle as GSO.
4575 *
4576 * @returns true on success, false on failure.
4577 *
4578 * @param pDevIns The device instance.
4579 * @param pThisCC The current context instance data.
4580 * @param pThis The device state structure.
4581 * @param PhysAddr The physical address of the descriptor buffer.
4582 * @param cbFragment Length of descriptor's buffer.
4583 * @thread E1000_TX
4584 */
4585static bool e1kAddToFrame(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KSTATECC pThisCC, RTGCPHYS PhysAddr, uint32_t cbFragment)
4586{
4587 PPDMSCATTERGATHER pTxSg = pThisCC->CTX_SUFF(pTxSg);
4588 bool const fGso = e1kXmitIsGsoBuf(pTxSg);
4589 uint32_t const cbNewPkt = cbFragment + pThis->u16TxPktLen;
4590
4591 LogFlow(("%s e1kAddToFrame: ENTER cbFragment=%d u16TxPktLen=%d cbUsed=%d cbAvailable=%d fGSO=%s\n",
4592 pThis->szPrf, cbFragment, pThis->u16TxPktLen, pTxSg->cbUsed, pTxSg->cbAvailable,
4593 fGso ? "true" : "false"));
4594 if (RT_UNLIKELY( !fGso && cbNewPkt > E1K_MAX_TX_PKT_SIZE ))
4595 {
4596 E1kLog(("%s Transmit packet is too large: %u > %u(max)\n", pThis->szPrf, cbNewPkt, E1K_MAX_TX_PKT_SIZE));
4597 return false;
4598 }
4599 if (RT_UNLIKELY( cbNewPkt > pTxSg->cbAvailable ))
4600 {
4601 E1kLog(("%s Transmit packet is too large: %u > %u(max)\n", pThis->szPrf, cbNewPkt, pTxSg->cbAvailable));
4602 return false;
4603 }
4604
4605 if (RT_LIKELY(pTxSg))
4606 {
4607 Assert(pTxSg->cSegs == 1);
4608 if (pTxSg->cbUsed != pThis->u16TxPktLen)
4609 E1kLog(("%s e1kAddToFrame: pTxSg->cbUsed=%d(0x%x) != u16TxPktLen=%d(0x%x)\n",
4610 pThis->szPrf, pTxSg->cbUsed, pTxSg->cbUsed, pThis->u16TxPktLen, pThis->u16TxPktLen));
4611
4612 PDMDevHlpPhysRead(pDevIns, PhysAddr, (uint8_t *)pTxSg->aSegs[0].pvSeg + pThis->u16TxPktLen, cbFragment);
4613
4614 pTxSg->cbUsed = cbNewPkt;
4615 }
4616 pThis->u16TxPktLen = cbNewPkt;
4617
4618 return true;
4619}
4620
4621
4622/**
4623 * Write the descriptor back to guest memory and notify the guest.
4624 *
4625 * @param pThis The device state structure.
4626 * @param pDesc Pointer to the descriptor have been transmitted.
4627 * @param addr Physical address of the descriptor in guest memory.
4628 * @thread E1000_TX
4629 */
4630static void e1kDescReport(PPDMDEVINS pDevIns, PE1KSTATE pThis, E1KTXDESC *pDesc, RTGCPHYS addr)
4631{
4632 /*
4633 * We fake descriptor write-back bursting. Descriptors are written back as they are
4634 * processed.
4635 */
4636 /* Let's pretend we process descriptors. Write back with DD set. */
4637 /*
4638 * Prior to r71586 we tried to accomodate the case when write-back bursts
4639 * are enabled without actually implementing bursting by writing back all
4640 * descriptors, even the ones that do not have RS set. This caused kernel
4641 * panics with Linux SMP kernels, as the e1000 driver tried to free up skb
4642 * associated with written back descriptor if it happened to be a context
4643 * descriptor since context descriptors do not have skb associated to them.
4644 * Starting from r71586 we write back only the descriptors with RS set,
4645 * which is a little bit different from what the real hardware does in
4646 * case there is a chain of data descritors where some of them have RS set
4647 * and others do not. It is very uncommon scenario imho.
4648 * We need to check RPS as well since some legacy drivers use it instead of
4649 * RS even with newer cards.
4650 */
4651 if (pDesc->legacy.cmd.fRS || pDesc->legacy.cmd.fRPS)
4652 {
4653 pDesc->legacy.dw3.fDD = 1; /* Descriptor Done */
4654 e1kWriteBackDesc(pDevIns, pThis, pDesc, addr);
4655 if (pDesc->legacy.cmd.fEOP)
4656 {
4657//#ifdef E1K_USE_TX_TIMERS
4658 if (pThis->fTidEnabled && pDesc->legacy.cmd.fIDE)
4659 {
4660 E1K_INC_ISTAT_CNT(pThis->uStatTxIDE);
4661 //if (pThis->fIntRaised)
4662 //{
4663 // /* Interrupt is already pending, no need for timers */
4664 // ICR |= ICR_TXDW;
4665 //}
4666 //else {
4667 /* Arm the timer to fire in TIVD usec (discard .024) */
4668 e1kArmTimer(pDevIns, pThis, pThis->hTIDTimer, TIDV);
4669# ifndef E1K_NO_TAD
4670 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
4671 E1kLog2(("%s Checking if TAD timer is running\n",
4672 pThis->szPrf));
4673 if (TADV != 0 && !PDMDevHlpTimerIsActive(pDevIns, pThis->hTADTimer))
4674 e1kArmTimer(pDevIns, pThis, pThis->hTADTimer, TADV);
4675# endif /* E1K_NO_TAD */
4676 }
4677 else
4678 {
4679 if (pThis->fTidEnabled)
4680 {
4681 E1kLog2(("%s No IDE set, cancel TAD timer and raise interrupt\n",
4682 pThis->szPrf));
4683 /* Cancel both timers if armed and fire immediately. */
4684# ifndef E1K_NO_TAD
4685 PDMDevHlpTimerStop(pDevIns, pThis->hTADTimer);
4686# endif
4687 PDMDevHlpTimerStop(pDevIns, pThis->hTIDTimer);
4688 }
4689//#endif /* E1K_USE_TX_TIMERS */
4690 E1K_INC_ISTAT_CNT(pThis->uStatIntTx);
4691 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_TXDW);
4692//#ifdef E1K_USE_TX_TIMERS
4693 }
4694//#endif /* E1K_USE_TX_TIMERS */
4695 }
4696 }
4697 else
4698 {
4699 E1K_INC_ISTAT_CNT(pThis->uStatTxNoRS);
4700 }
4701}
4702
4703#ifndef E1K_WITH_TXD_CACHE
4704
4705/**
4706 * Process Transmit Descriptor.
4707 *
4708 * E1000 supports three types of transmit descriptors:
4709 * - legacy data descriptors of older format (context-less).
4710 * - data the same as legacy but providing new offloading capabilities.
4711 * - context sets up the context for following data descriptors.
4712 *
4713 * @param pDevIns The device instance.
4714 * @param pThis The device state structure.
4715 * @param pThisCC The current context instance data.
4716 * @param pDesc Pointer to descriptor union.
4717 * @param addr Physical address of descriptor in guest memory.
4718 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4719 * @thread E1000_TX
4720 */
4721static int e1kXmitDesc(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KSTATECC pThisCC, E1KTXDESC *pDesc,
4722 RTGCPHYS addr, bool fOnWorkerThread)
4723{
4724 int rc = VINF_SUCCESS;
4725 uint32_t cbVTag = 0;
4726
4727 e1kPrintTDesc(pThis, pDesc, "vvv");
4728
4729//#ifdef E1K_USE_TX_TIMERS
4730 if (pThis->fTidEnabled)
4731 e1kCancelTimer(pDevIns, pThis, pThis->hTIDTimer);
4732//#endif /* E1K_USE_TX_TIMERS */
4733
4734 switch (e1kGetDescType(pDesc))
4735 {
4736 case E1K_DTYP_CONTEXT:
4737 if (pDesc->context.dw2.fTSE)
4738 {
4739 pThis->contextTSE = pDesc->context;
4740 pThis->u32PayRemain = pDesc->context.dw2.u20PAYLEN;
4741 pThis->u16HdrRemain = pDesc->context.dw3.u8HDRLEN;
4742 e1kSetupGsoCtx(&pThis->GsoCtx, &pDesc->context);
4743 STAM_COUNTER_INC(&pThis->StatTxDescCtxTSE);
4744 }
4745 else
4746 {
4747 pThis->contextNormal = pDesc->context;
4748 STAM_COUNTER_INC(&pThis->StatTxDescCtxNormal);
4749 }
4750 E1kLog2(("%s %s context updated: IP CSS=%02X, IP CSO=%02X, IP CSE=%04X"
4751 ", TU CSS=%02X, TU CSO=%02X, TU CSE=%04X\n", pThis->szPrf,
4752 pDesc->context.dw2.fTSE ? "TSE" : "Normal",
4753 pDesc->context.ip.u8CSS,
4754 pDesc->context.ip.u8CSO,
4755 pDesc->context.ip.u16CSE,
4756 pDesc->context.tu.u8CSS,
4757 pDesc->context.tu.u8CSO,
4758 pDesc->context.tu.u16CSE));
4759 E1K_INC_ISTAT_CNT(pThis->uStatDescCtx);
4760 e1kDescReport(pThis, pDesc, addr);
4761 break;
4762
4763 case E1K_DTYP_DATA:
4764 {
4765 if (pDesc->data.cmd.u20DTALEN == 0 || pDesc->data.u64BufAddr == 0)
4766 {
4767 E1kLog2(("% Empty data descriptor, skipped.\n", pThis->szPrf));
4768 /** @todo Same as legacy when !TSE. See below. */
4769 break;
4770 }
4771 STAM_COUNTER_INC(pDesc->data.cmd.fTSE?
4772 &pThis->StatTxDescTSEData:
4773 &pThis->StatTxDescData);
4774 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
4775 E1K_INC_ISTAT_CNT(pThis->uStatDescDat);
4776
4777 /*
4778 * The last descriptor of non-TSE packet must contain VLE flag.
4779 * TSE packets have VLE flag in the first descriptor. The later
4780 * case is taken care of a bit later when cbVTag gets assigned.
4781 *
4782 * 1) pDesc->data.cmd.fEOP && !pDesc->data.cmd.fTSE
4783 */
4784 if (pDesc->data.cmd.fEOP && !pDesc->data.cmd.fTSE)
4785 {
4786 pThis->fVTag = pDesc->data.cmd.fVLE;
4787 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
4788 }
4789 /*
4790 * First fragment: Allocate new buffer and save the IXSM and TXSM
4791 * packet options as these are only valid in the first fragment.
4792 */
4793 if (pThis->u16TxPktLen == 0)
4794 {
4795 pThis->fIPcsum = pDesc->data.dw3.fIXSM;
4796 pThis->fTCPcsum = pDesc->data.dw3.fTXSM;
4797 E1kLog2(("%s Saving checksum flags:%s%s; \n", pThis->szPrf,
4798 pThis->fIPcsum ? " IP" : "",
4799 pThis->fTCPcsum ? " TCP/UDP" : ""));
4800 if (pDesc->data.cmd.fTSE)
4801 {
4802 /* 2) pDesc->data.cmd.fTSE && pThis->u16TxPktLen == 0 */
4803 pThis->fVTag = pDesc->data.cmd.fVLE;
4804 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
4805 cbVTag = pThis->fVTag ? 4 : 0;
4806 }
4807 else if (pDesc->data.cmd.fEOP)
4808 cbVTag = pDesc->data.cmd.fVLE ? 4 : 0;
4809 else
4810 cbVTag = 4;
4811 E1kLog3(("%s About to allocate TX buffer: cbVTag=%u\n", pThis->szPrf, cbVTag));
4812 if (e1kCanDoGso(pThis, &pThis->GsoCtx, &pDesc->data, &pThis->contextTSE))
4813 rc = e1kXmitAllocBuf(pThis, pThisCC, pThis->contextTSE.dw2.u20PAYLEN + pThis->contextTSE.dw3.u8HDRLEN + cbVTag,
4814 true /*fExactSize*/, true /*fGso*/);
4815 else if (pDesc->data.cmd.fTSE)
4816 rc = e1kXmitAllocBuf(pThis, pThisCC, , pThis->contextTSE.dw3.u16MSS + pThis->contextTSE.dw3.u8HDRLEN + cbVTag,
4817 pDesc->data.cmd.fTSE /*fExactSize*/, false /*fGso*/);
4818 else
4819 rc = e1kXmitAllocBuf(pThis, pThisCC, pDesc->data.cmd.u20DTALEN + cbVTag,
4820 pDesc->data.cmd.fEOP /*fExactSize*/, false /*fGso*/);
4821
4822 /**
4823 * @todo: Perhaps it is not that simple for GSO packets! We may
4824 * need to unwind some changes.
4825 */
4826 if (RT_FAILURE(rc))
4827 {
4828 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4829 break;
4830 }
4831 /** @todo Is there any way to indicating errors other than collisions? Like
4832 * VERR_NET_DOWN. */
4833 }
4834
4835 /*
4836 * Add the descriptor data to the frame. If the frame is complete,
4837 * transmit it and reset the u16TxPktLen field.
4838 */
4839 if (e1kXmitIsGsoBuf(pThisCC->CTX_SUFF(pTxSg)))
4840 {
4841 STAM_COUNTER_INC(&pThis->StatTxPathGSO);
4842 bool fRc = e1kAddToFrame(pDevIns, pThis, pThisCC, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4843 if (pDesc->data.cmd.fEOP)
4844 {
4845 if ( fRc
4846 && pThisCC->CTX_SUFF(pTxSg)
4847 && pThisCC->CTX_SUFF(pTxSg)->cbUsed == (size_t)pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN)
4848 {
4849 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
4850 E1K_INC_CNT32(TSCTC);
4851 }
4852 else
4853 {
4854 if (fRc)
4855 E1kLog(("%s bad GSO/TSE %p or %u < %u\n" , pThis->szPrf,
4856 pThisCC->CTX_SUFF(pTxSg), pThisCC->CTX_SUFF(pTxSg) ? pThisCC->CTX_SUFF(pTxSg)->cbUsed : 0,
4857 pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN));
4858 e1kXmitFreeBuf(pThis);
4859 E1K_INC_CNT32(TSCTFC);
4860 }
4861 pThis->u16TxPktLen = 0;
4862 }
4863 }
4864 else if (!pDesc->data.cmd.fTSE)
4865 {
4866 STAM_COUNTER_INC(&pThis->StatTxPathRegular);
4867 bool fRc = e1kAddToFrame(pDevIns, pThis, pThisCC, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4868 if (pDesc->data.cmd.fEOP)
4869 {
4870 if (fRc && pThisCC->CTX_SUFF(pTxSg))
4871 {
4872 Assert(pThisCC->CTX_SUFF(pTxSg)->cSegs == 1);
4873 if (pThis->fIPcsum)
4874 e1kInsertChecksum(pThis, (uint8_t *)pThisCC->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
4875 pThis->contextNormal.ip.u8CSO,
4876 pThis->contextNormal.ip.u8CSS,
4877 pThis->contextNormal.ip.u16CSE);
4878 if (pThis->fTCPcsum)
4879 e1kInsertChecksum(pThis, (uint8_t *)pThisCC->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
4880 pThis->contextNormal.tu.u8CSO,
4881 pThis->contextNormal.tu.u8CSS,
4882 pThis->contextNormal.tu.u16CSE);
4883 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
4884 }
4885 else
4886 e1kXmitFreeBuf(pThis);
4887 pThis->u16TxPktLen = 0;
4888 }
4889 }
4890 else
4891 {
4892 STAM_COUNTER_INC(&pThis->StatTxPathFallback);
4893 e1kFallbackAddToFrame(pDevIns, pThis, pDesc, pDesc->data.cmd.u20DTALEN, fOnWorkerThread);
4894 }
4895
4896 e1kDescReport(pThis, pDesc, addr);
4897 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4898 break;
4899 }
4900
4901 case E1K_DTYP_LEGACY:
4902 if (pDesc->legacy.cmd.u16Length == 0 || pDesc->legacy.u64BufAddr == 0)
4903 {
4904 E1kLog(("%s Empty legacy descriptor, skipped.\n", pThis->szPrf));
4905 /** @todo 3.3.3, Length/Buffer Address: RS set -> write DD when processing. */
4906 break;
4907 }
4908 STAM_COUNTER_INC(&pThis->StatTxDescLegacy);
4909 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
4910
4911 /* First fragment: allocate new buffer. */
4912 if (pThis->u16TxPktLen == 0)
4913 {
4914 if (pDesc->legacy.cmd.fEOP)
4915 cbVTag = pDesc->legacy.cmd.fVLE ? 4 : 0;
4916 else
4917 cbVTag = 4;
4918 E1kLog3(("%s About to allocate TX buffer: cbVTag=%u\n", pThis->szPrf, cbVTag));
4919 /** @todo reset status bits? */
4920 rc = e1kXmitAllocBuf(pThis, pThisCC, pDesc->legacy.cmd.u16Length + cbVTag, pDesc->legacy.cmd.fEOP, false /*fGso*/);
4921 if (RT_FAILURE(rc))
4922 {
4923 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4924 break;
4925 }
4926
4927 /** @todo Is there any way to indicating errors other than collisions? Like
4928 * VERR_NET_DOWN. */
4929 }
4930
4931 /* Add fragment to frame. */
4932 if (e1kAddToFrame(pDevIns, pThis, pThisCC, pDesc->data.u64BufAddr, pDesc->legacy.cmd.u16Length))
4933 {
4934 E1K_INC_ISTAT_CNT(pThis->uStatDescLeg);
4935
4936 /* Last fragment: Transmit and reset the packet storage counter. */
4937 if (pDesc->legacy.cmd.fEOP)
4938 {
4939 pThis->fVTag = pDesc->legacy.cmd.fVLE;
4940 pThis->u16VTagTCI = pDesc->legacy.dw3.u16Special;
4941 /** @todo Offload processing goes here. */
4942 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
4943 pThis->u16TxPktLen = 0;
4944 }
4945 }
4946 /* Last fragment + failure: free the buffer and reset the storage counter. */
4947 else if (pDesc->legacy.cmd.fEOP)
4948 {
4949 e1kXmitFreeBuf(pThis);
4950 pThis->u16TxPktLen = 0;
4951 }
4952
4953 e1kDescReport(pThis, pDesc, addr);
4954 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4955 break;
4956
4957 default:
4958 E1kLog(("%s ERROR Unsupported transmit descriptor type: 0x%04x\n",
4959 pThis->szPrf, e1kGetDescType(pDesc)));
4960 break;
4961 }
4962
4963 return rc;
4964}
4965
4966#else /* E1K_WITH_TXD_CACHE */
4967
4968/**
4969 * Process Transmit Descriptor.
4970 *
4971 * E1000 supports three types of transmit descriptors:
4972 * - legacy data descriptors of older format (context-less).
4973 * - data the same as legacy but providing new offloading capabilities.
4974 * - context sets up the context for following data descriptors.
4975 *
4976 * @param pDevIns The device instance.
4977 * @param pThis The device state structure.
4978 * @param pThisCC The current context instance data.
4979 * @param pDesc Pointer to descriptor union.
4980 * @param addr Physical address of descriptor in guest memory.
4981 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4982 * @param cbPacketSize Size of the packet as previously computed.
4983 * @thread E1000_TX
4984 */
4985static int e1kXmitDesc(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KSTATECC pThisCC, E1KTXDESC *pDesc,
4986 RTGCPHYS addr, bool fOnWorkerThread)
4987{
4988 int rc = VINF_SUCCESS;
4989
4990 e1kPrintTDesc(pThis, pDesc, "vvv");
4991
4992 if (pDesc->legacy.dw3.fDD)
4993 {
4994 E1kLog(("%s e1kXmitDesc: skipping bad descriptor ^^^\n", pThis->szPrf));
4995 e1kDescReport(pDevIns, pThis, pDesc, addr);
4996 return VINF_SUCCESS;
4997 }
4998
4999//#ifdef E1K_USE_TX_TIMERS
5000 if (pThis->fTidEnabled)
5001 PDMDevHlpTimerStop(pDevIns, pThis->hTIDTimer);
5002//#endif /* E1K_USE_TX_TIMERS */
5003
5004 switch (e1kGetDescType(pDesc))
5005 {
5006 case E1K_DTYP_CONTEXT:
5007 /* The caller have already updated the context */
5008 E1K_INC_ISTAT_CNT(pThis->uStatDescCtx);
5009 e1kDescReport(pDevIns, pThis, pDesc, addr);
5010 break;
5011
5012 case E1K_DTYP_DATA:
5013 {
5014 STAM_COUNTER_INC(pDesc->data.cmd.fTSE?
5015 &pThis->StatTxDescTSEData:
5016 &pThis->StatTxDescData);
5017 E1K_INC_ISTAT_CNT(pThis->uStatDescDat);
5018 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
5019 if (pDesc->data.cmd.u20DTALEN == 0 || pDesc->data.u64BufAddr == 0)
5020 {
5021 E1kLog2(("% Empty data descriptor, skipped.\n", pThis->szPrf));
5022 if (pDesc->data.cmd.fEOP)
5023 {
5024 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
5025 pThis->u16TxPktLen = 0;
5026 }
5027 }
5028 else
5029 {
5030 /*
5031 * Add the descriptor data to the frame. If the frame is complete,
5032 * transmit it and reset the u16TxPktLen field.
5033 */
5034 if (e1kXmitIsGsoBuf(pThisCC->CTX_SUFF(pTxSg)))
5035 {
5036 STAM_COUNTER_INC(&pThis->StatTxPathGSO);
5037 bool fRc = e1kAddToFrame(pDevIns, pThis, pThisCC, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
5038 if (pDesc->data.cmd.fEOP)
5039 {
5040 if ( fRc
5041 && pThisCC->CTX_SUFF(pTxSg)
5042 && pThisCC->CTX_SUFF(pTxSg)->cbUsed == (size_t)pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN)
5043 {
5044 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
5045 E1K_INC_CNT32(TSCTC);
5046 }
5047 else
5048 {
5049 if (fRc)
5050 E1kLog(("%s bad GSO/TSE %p or %u < %u\n" , pThis->szPrf,
5051 pThisCC->CTX_SUFF(pTxSg), pThisCC->CTX_SUFF(pTxSg) ? pThisCC->CTX_SUFF(pTxSg)->cbUsed : 0,
5052 pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN));
5053 e1kXmitFreeBuf(pThis, pThisCC);
5054 E1K_INC_CNT32(TSCTFC);
5055 }
5056 pThis->u16TxPktLen = 0;
5057 }
5058 }
5059 else if (!pDesc->data.cmd.fTSE)
5060 {
5061 STAM_COUNTER_INC(&pThis->StatTxPathRegular);
5062 bool fRc = e1kAddToFrame(pDevIns, pThis, pThisCC, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
5063 if (pDesc->data.cmd.fEOP)
5064 {
5065 if (fRc && pThisCC->CTX_SUFF(pTxSg))
5066 {
5067 Assert(pThisCC->CTX_SUFF(pTxSg)->cSegs == 1);
5068 if (pThis->fIPcsum)
5069 e1kInsertChecksum(pThis, (uint8_t *)pThisCC->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
5070 pThis->contextNormal.ip.u8CSO,
5071 pThis->contextNormal.ip.u8CSS,
5072 pThis->contextNormal.ip.u16CSE);
5073 if (pThis->fTCPcsum)
5074 e1kInsertChecksum(pThis, (uint8_t *)pThisCC->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
5075 pThis->contextNormal.tu.u8CSO,
5076 pThis->contextNormal.tu.u8CSS,
5077 pThis->contextNormal.tu.u16CSE);
5078 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
5079 }
5080 else
5081 e1kXmitFreeBuf(pThis, pThisCC);
5082 pThis->u16TxPktLen = 0;
5083 }
5084 }
5085 else
5086 {
5087 STAM_COUNTER_INC(&pThis->StatTxPathFallback);
5088 rc = e1kFallbackAddToFrame(pDevIns, pThis, pDesc, fOnWorkerThread);
5089 }
5090 }
5091 e1kDescReport(pDevIns, pThis, pDesc, addr);
5092 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5093 break;
5094 }
5095
5096 case E1K_DTYP_LEGACY:
5097 STAM_COUNTER_INC(&pThis->StatTxDescLegacy);
5098 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
5099 if (pDesc->legacy.cmd.u16Length == 0 || pDesc->legacy.u64BufAddr == 0)
5100 {
5101 E1kLog(("%s Empty legacy descriptor, skipped.\n", pThis->szPrf));
5102 }
5103 else
5104 {
5105 /* Add fragment to frame. */
5106 if (e1kAddToFrame(pDevIns, pThis, pThisCC, pDesc->data.u64BufAddr, pDesc->legacy.cmd.u16Length))
5107 {
5108 E1K_INC_ISTAT_CNT(pThis->uStatDescLeg);
5109
5110 /* Last fragment: Transmit and reset the packet storage counter. */
5111 if (pDesc->legacy.cmd.fEOP)
5112 {
5113 if (pDesc->legacy.cmd.fIC)
5114 {
5115 e1kInsertChecksum(pThis,
5116 (uint8_t *)pThisCC->CTX_SUFF(pTxSg)->aSegs[0].pvSeg,
5117 pThis->u16TxPktLen,
5118 pDesc->legacy.cmd.u8CSO,
5119 pDesc->legacy.dw3.u8CSS,
5120 0);
5121 }
5122 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
5123 pThis->u16TxPktLen = 0;
5124 }
5125 }
5126 /* Last fragment + failure: free the buffer and reset the storage counter. */
5127 else if (pDesc->legacy.cmd.fEOP)
5128 {
5129 e1kXmitFreeBuf(pThis, pThisCC);
5130 pThis->u16TxPktLen = 0;
5131 }
5132 }
5133 e1kDescReport(pDevIns, pThis, pDesc, addr);
5134 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5135 break;
5136
5137 default:
5138 E1kLog(("%s ERROR Unsupported transmit descriptor type: 0x%04x\n",
5139 pThis->szPrf, e1kGetDescType(pDesc)));
5140 break;
5141 }
5142
5143 return rc;
5144}
5145
5146DECLINLINE(void) e1kUpdateTxContext(PE1KSTATE pThis, E1KTXDESC *pDesc)
5147{
5148 if (pDesc->context.dw2.fTSE)
5149 {
5150 pThis->contextTSE = pDesc->context;
5151 uint32_t cbMaxSegmentSize = pThis->contextTSE.dw3.u16MSS + pThis->contextTSE.dw3.u8HDRLEN + 4; /*VTAG*/
5152 if (RT_UNLIKELY(cbMaxSegmentSize > E1K_MAX_TX_PKT_SIZE))
5153 {
5154 pThis->contextTSE.dw3.u16MSS = E1K_MAX_TX_PKT_SIZE - pThis->contextTSE.dw3.u8HDRLEN - 4; /*VTAG*/
5155 LogRelMax(10, ("%s: Transmit packet is too large: %u > %u(max). Adjusted MSS to %u.\n",
5156 pThis->szPrf, cbMaxSegmentSize, E1K_MAX_TX_PKT_SIZE, pThis->contextTSE.dw3.u16MSS));
5157 }
5158 pThis->u32PayRemain = pThis->contextTSE.dw2.u20PAYLEN;
5159 pThis->u16HdrRemain = pThis->contextTSE.dw3.u8HDRLEN;
5160 e1kSetupGsoCtx(&pThis->GsoCtx, &pThis->contextTSE);
5161 STAM_COUNTER_INC(&pThis->StatTxDescCtxTSE);
5162 }
5163 else
5164 {
5165 pThis->contextNormal = pDesc->context;
5166 STAM_COUNTER_INC(&pThis->StatTxDescCtxNormal);
5167 }
5168 E1kLog2(("%s %s context updated: IP CSS=%02X, IP CSO=%02X, IP CSE=%04X"
5169 ", TU CSS=%02X, TU CSO=%02X, TU CSE=%04X\n", pThis->szPrf,
5170 pDesc->context.dw2.fTSE ? "TSE" : "Normal",
5171 pDesc->context.ip.u8CSS,
5172 pDesc->context.ip.u8CSO,
5173 pDesc->context.ip.u16CSE,
5174 pDesc->context.tu.u8CSS,
5175 pDesc->context.tu.u8CSO,
5176 pDesc->context.tu.u16CSE));
5177}
5178
5179static bool e1kLocateTxPacket(PE1KSTATE pThis)
5180{
5181 LogFlow(("%s e1kLocateTxPacket: ENTER cbTxAlloc=%d\n",
5182 pThis->szPrf, pThis->cbTxAlloc));
5183 /* Check if we have located the packet already. */
5184 if (pThis->cbTxAlloc)
5185 {
5186 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d\n",
5187 pThis->szPrf, pThis->cbTxAlloc));
5188 return true;
5189 }
5190
5191 bool fTSE = false;
5192 uint32_t cbPacket = 0;
5193
5194 for (int i = pThis->iTxDCurrent; i < pThis->nTxDFetched; ++i)
5195 {
5196 E1KTXDESC *pDesc = &pThis->aTxDescriptors[i];
5197 switch (e1kGetDescType(pDesc))
5198 {
5199 case E1K_DTYP_CONTEXT:
5200 if (cbPacket == 0)
5201 e1kUpdateTxContext(pThis, pDesc);
5202 else
5203 E1kLog(("%s e1kLocateTxPacket: ignoring a context descriptor in the middle of a packet, cbPacket=%d\n",
5204 pThis->szPrf, cbPacket));
5205 continue;
5206 case E1K_DTYP_LEGACY:
5207 /* Skip invalid descriptors. */
5208 if (cbPacket > 0 && (pThis->fGSO || fTSE))
5209 {
5210 E1kLog(("%s e1kLocateTxPacket: ignoring a legacy descriptor in the segmentation context, cbPacket=%d\n",
5211 pThis->szPrf, cbPacket));
5212 pDesc->legacy.dw3.fDD = true; /* Make sure it is skipped by processing */
5213 continue;
5214 }
5215 /* Skip empty descriptors. */
5216 if (!pDesc->legacy.u64BufAddr || !pDesc->legacy.cmd.u16Length)
5217 break;
5218 cbPacket += pDesc->legacy.cmd.u16Length;
5219 pThis->fGSO = false;
5220 break;
5221 case E1K_DTYP_DATA:
5222 /* Skip invalid descriptors. */
5223 if (cbPacket > 0 && (bool)pDesc->data.cmd.fTSE != fTSE)
5224 {
5225 E1kLog(("%s e1kLocateTxPacket: ignoring %sTSE descriptor in the %ssegmentation context, cbPacket=%d\n",
5226 pThis->szPrf, pDesc->data.cmd.fTSE ? "" : "non-", fTSE ? "" : "non-", cbPacket));
5227 pDesc->data.dw3.fDD = true; /* Make sure it is skipped by processing */
5228 continue;
5229 }
5230 /* Skip empty descriptors. */
5231 if (!pDesc->data.u64BufAddr || !pDesc->data.cmd.u20DTALEN)
5232 break;
5233 if (cbPacket == 0)
5234 {
5235 /*
5236 * The first fragment: save IXSM and TXSM options
5237 * as these are only valid in the first fragment.
5238 */
5239 pThis->fIPcsum = pDesc->data.dw3.fIXSM;
5240 pThis->fTCPcsum = pDesc->data.dw3.fTXSM;
5241 fTSE = pDesc->data.cmd.fTSE;
5242 /*
5243 * TSE descriptors have VLE bit properly set in
5244 * the first fragment.
5245 */
5246 if (fTSE)
5247 {
5248 pThis->fVTag = pDesc->data.cmd.fVLE;
5249 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
5250 }
5251 pThis->fGSO = e1kCanDoGso(pThis, &pThis->GsoCtx, &pDesc->data, &pThis->contextTSE);
5252 }
5253 cbPacket += pDesc->data.cmd.u20DTALEN;
5254 break;
5255 default:
5256 AssertMsgFailed(("Impossible descriptor type!"));
5257 }
5258 if (pDesc->legacy.cmd.fEOP)
5259 {
5260 /*
5261 * Non-TSE descriptors have VLE bit properly set in
5262 * the last fragment.
5263 */
5264 if (!fTSE)
5265 {
5266 pThis->fVTag = pDesc->data.cmd.fVLE;
5267 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
5268 }
5269 /*
5270 * Compute the required buffer size. If we cannot do GSO but still
5271 * have to do segmentation we allocate the first segment only.
5272 */
5273 pThis->cbTxAlloc = (!fTSE || pThis->fGSO) ?
5274 cbPacket :
5275 RT_MIN(cbPacket, pThis->contextTSE.dw3.u16MSS + pThis->contextTSE.dw3.u8HDRLEN);
5276 if (pThis->fVTag)
5277 pThis->cbTxAlloc += 4;
5278 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d cbPacket=%d%s%s\n",
5279 pThis->szPrf, pThis->cbTxAlloc, cbPacket,
5280 pThis->fGSO ? " GSO" : "", fTSE ? " TSE" : ""));
5281 return true;
5282 }
5283 }
5284
5285 if (cbPacket == 0 && pThis->nTxDFetched - pThis->iTxDCurrent > 0)
5286 {
5287 /* All descriptors were empty, we need to process them as a dummy packet */
5288 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d, zero packet!\n",
5289 pThis->szPrf, pThis->cbTxAlloc));
5290 return true;
5291 }
5292 LogFlow(("%s e1kLocateTxPacket: RET false cbTxAlloc=%d cbPacket=%d\n",
5293 pThis->szPrf, pThis->cbTxAlloc, cbPacket));
5294 return false;
5295}
5296
5297static int e1kXmitPacket(PPDMDEVINS pDevIns, PE1KSTATE pThis, bool fOnWorkerThread)
5298{
5299 PE1KSTATECC pThisCC = PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC);
5300 int rc = VINF_SUCCESS;
5301
5302 LogFlow(("%s e1kXmitPacket: ENTER current=%d fetched=%d\n",
5303 pThis->szPrf, pThis->iTxDCurrent, pThis->nTxDFetched));
5304
5305 while (pThis->iTxDCurrent < pThis->nTxDFetched)
5306 {
5307 E1KTXDESC *pDesc = &pThis->aTxDescriptors[pThis->iTxDCurrent];
5308 E1kLog3(("%s About to process new TX descriptor at %08x%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
5309 pThis->szPrf, TDBAH, TDBAL + TDH * sizeof(E1KTXDESC), TDLEN, TDH, TDT));
5310 rc = e1kXmitDesc(pDevIns, pThis, pThisCC, pDesc, e1kDescAddr(TDBAH, TDBAL, TDH), fOnWorkerThread);
5311 if (RT_FAILURE(rc))
5312 break;
5313 if (++TDH * sizeof(E1KTXDESC) >= TDLEN)
5314 TDH = 0;
5315 uint32_t uLowThreshold = GET_BITS(TXDCTL, LWTHRESH)*8;
5316 if (uLowThreshold != 0 && e1kGetTxLen(pThis) <= uLowThreshold)
5317 {
5318 E1kLog2(("%s Low on transmit descriptors, raise ICR.TXD_LOW, len=%x thresh=%x\n",
5319 pThis->szPrf, e1kGetTxLen(pThis), GET_BITS(TXDCTL, LWTHRESH)*8));
5320 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_TXD_LOW);
5321 }
5322 ++pThis->iTxDCurrent;
5323 if (e1kGetDescType(pDesc) != E1K_DTYP_CONTEXT && pDesc->legacy.cmd.fEOP)
5324 break;
5325 }
5326
5327 LogFlow(("%s e1kXmitPacket: RET %Rrc current=%d fetched=%d\n",
5328 pThis->szPrf, rc, pThis->iTxDCurrent, pThis->nTxDFetched));
5329 return rc;
5330}
5331
5332#endif /* E1K_WITH_TXD_CACHE */
5333#ifndef E1K_WITH_TXD_CACHE
5334
5335/**
5336 * Transmit pending descriptors.
5337 *
5338 * @returns VBox status code. VERR_TRY_AGAIN is returned if we're busy.
5339 *
5340 * @param pDevIns The device instance.
5341 * @param pThis The E1000 state.
5342 * @param fOnWorkerThread Whether we're on a worker thread or on an EMT.
5343 */
5344static int e1kXmitPending(PPDMDEVINS pDevIns, PE1KSTATE pThis, bool fOnWorkerThread)
5345{
5346 int rc = VINF_SUCCESS;
5347 PE1KSTATECC pThisCC = PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC);
5348
5349 /* Check if transmitter is enabled. */
5350 if (!(TCTL & TCTL_EN))
5351 return VINF_SUCCESS;
5352 /*
5353 * Grab the xmit lock of the driver as well as the E1K device state.
5354 */
5355 rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
5356 if (RT_LIKELY(rc == VINF_SUCCESS))
5357 {
5358 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
5359 if (pDrv)
5360 {
5361 rc = pDrv->pfnBeginXmit(pDrv, fOnWorkerThread);
5362 if (RT_FAILURE(rc))
5363 {
5364 e1kCsTxLeave(pThis);
5365 return rc;
5366 }
5367 }
5368 /*
5369 * Process all pending descriptors.
5370 * Note! Do not process descriptors in locked state
5371 */
5372 while (TDH != TDT && !pThis->fLocked)
5373 {
5374 E1KTXDESC desc;
5375 E1kLog3(("%s About to process new TX descriptor at %08x%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
5376 pThis->szPrf, TDBAH, TDBAL + TDH * sizeof(desc), TDLEN, TDH, TDT));
5377
5378 e1kLoadDesc(pDevIns, &desc, ((uint64_t)TDBAH << 32) + TDBAL + TDH * sizeof(desc));
5379 rc = e1kXmitDesc(pDevIns, pThis, pThisCC, &desc, e1kDescAddr(TDBAH, TDBAL, TDH), fOnWorkerThread);
5380 /* If we failed to transmit descriptor we will try it again later */
5381 if (RT_FAILURE(rc))
5382 break;
5383 if (++TDH * sizeof(desc) >= TDLEN)
5384 TDH = 0;
5385
5386 if (e1kGetTxLen(pThis) <= GET_BITS(TXDCTL, LWTHRESH)*8)
5387 {
5388 E1kLog2(("%s Low on transmit descriptors, raise ICR.TXD_LOW, len=%x thresh=%x\n",
5389 pThis->szPrf, e1kGetTxLen(pThis), GET_BITS(TXDCTL, LWTHRESH)*8));
5390 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_TXD_LOW);
5391 }
5392
5393 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5394 }
5395
5396 /// @todo uncomment: pThis->uStatIntTXQE++;
5397 /// @todo uncomment: e1kRaiseInterrupt(pDevIns, pThis, ICR_TXQE);
5398 /*
5399 * Release the lock.
5400 */
5401 if (pDrv)
5402 pDrv->pfnEndXmit(pDrv);
5403 e1kCsTxLeave(pThis);
5404 }
5405
5406 return rc;
5407}
5408
5409#else /* E1K_WITH_TXD_CACHE */
5410
5411static void e1kDumpTxDCache(PPDMDEVINS pDevIns, PE1KSTATE pThis)
5412{
5413 unsigned i, cDescs = TDLEN / sizeof(E1KTXDESC);
5414 uint32_t tdh = TDH;
5415 LogRel(("E1000: -- Transmit Descriptors (%d total) --\n", cDescs));
5416 for (i = 0; i < cDescs; ++i)
5417 {
5418 E1KTXDESC desc;
5419 PDMDevHlpPhysRead(pDevIns , e1kDescAddr(TDBAH, TDBAL, i), &desc, sizeof(desc));
5420 if (i == tdh)
5421 LogRel(("E1000: >>> "));
5422 LogRel(("E1000: %RGp: %R[e1ktxd]\n", e1kDescAddr(TDBAH, TDBAL, i), &desc));
5423 }
5424 LogRel(("E1000: -- Transmit Descriptors in Cache (at %d (TDH %d)/ fetched %d / max %d) --\n",
5425 pThis->iTxDCurrent, TDH, pThis->nTxDFetched, E1K_TXD_CACHE_SIZE));
5426 if (tdh > pThis->iTxDCurrent)
5427 tdh -= pThis->iTxDCurrent;
5428 else
5429 tdh = cDescs + tdh - pThis->iTxDCurrent;
5430 for (i = 0; i < pThis->nTxDFetched; ++i)
5431 {
5432 if (i == pThis->iTxDCurrent)
5433 LogRel(("E1000: >>> "));
5434 LogRel(("E1000: %RGp: %R[e1ktxd]\n", e1kDescAddr(TDBAH, TDBAL, tdh++ % cDescs), &pThis->aTxDescriptors[i]));
5435 }
5436}
5437
5438/**
5439 * Transmit pending descriptors.
5440 *
5441 * @returns VBox status code. VERR_TRY_AGAIN is returned if we're busy.
5442 *
5443 * @param pDevIns The device instance.
5444 * @param pThis The E1000 state.
5445 * @param fOnWorkerThread Whether we're on a worker thread or on an EMT.
5446 */
5447static int e1kXmitPending(PPDMDEVINS pDevIns, PE1KSTATE pThis, bool fOnWorkerThread)
5448{
5449 PE1KSTATECC pThisCC = PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC);
5450 int rc = VINF_SUCCESS;
5451
5452 /* Check if transmitter is enabled. */
5453 if (!(TCTL & TCTL_EN))
5454 return VINF_SUCCESS;
5455 /*
5456 * Grab the xmit lock of the driver as well as the E1K device state.
5457 */
5458 PPDMINETWORKUP pDrv = pThisCC->CTX_SUFF(pDrv);
5459 if (pDrv)
5460 {
5461 rc = pDrv->pfnBeginXmit(pDrv, fOnWorkerThread);
5462 if (RT_FAILURE(rc))
5463 return rc;
5464 }
5465
5466 /*
5467 * Process all pending descriptors.
5468 * Note! Do not process descriptors in locked state
5469 */
5470 rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
5471 if (RT_LIKELY(rc == VINF_SUCCESS))
5472 {
5473 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
5474 /*
5475 * fIncomplete is set whenever we try to fetch additional descriptors
5476 * for an incomplete packet. If fail to locate a complete packet on
5477 * the next iteration we need to reset the cache or we risk to get
5478 * stuck in this loop forever.
5479 */
5480 bool fIncomplete = false;
5481 while (!pThis->fLocked && e1kTxDLazyLoad(pDevIns, pThis))
5482 {
5483 while (e1kLocateTxPacket(pThis))
5484 {
5485 fIncomplete = false;
5486 /* Found a complete packet, allocate it. */
5487 rc = e1kXmitAllocBuf(pThis, pThisCC, pThis->fGSO);
5488 /* If we're out of bandwidth we'll come back later. */
5489 if (RT_FAILURE(rc))
5490 goto out;
5491 /* Copy the packet to allocated buffer and send it. */
5492 rc = e1kXmitPacket(pDevIns, pThis, fOnWorkerThread);
5493 /* If we're out of bandwidth we'll come back later. */
5494 if (RT_FAILURE(rc))
5495 goto out;
5496 }
5497 uint8_t u8Remain = pThis->nTxDFetched - pThis->iTxDCurrent;
5498 if (RT_UNLIKELY(fIncomplete))
5499 {
5500 static bool fTxDCacheDumped = false;
5501 /*
5502 * The descriptor cache is full, but we were unable to find
5503 * a complete packet in it. Drop the cache and hope that
5504 * the guest driver can recover from network card error.
5505 */
5506 LogRel(("%s: No complete packets in%s TxD cache! "
5507 "Fetched=%d, current=%d, TX len=%d.\n",
5508 pThis->szPrf,
5509 u8Remain == E1K_TXD_CACHE_SIZE ? " full" : "",
5510 pThis->nTxDFetched, pThis->iTxDCurrent,
5511 e1kGetTxLen(pThis)));
5512 if (!fTxDCacheDumped)
5513 {
5514 fTxDCacheDumped = true;
5515 e1kDumpTxDCache(pDevIns, pThis);
5516 }
5517 pThis->iTxDCurrent = pThis->nTxDFetched = 0;
5518 /*
5519 * Returning an error at this point means Guru in R0
5520 * (see @bugref{6428}).
5521 */
5522# ifdef IN_RING3
5523 rc = VERR_NET_INCOMPLETE_TX_PACKET;
5524# else /* !IN_RING3 */
5525 rc = VINF_IOM_R3_MMIO_WRITE;
5526# endif /* !IN_RING3 */
5527 goto out;
5528 }
5529 if (u8Remain > 0)
5530 {
5531 Log4(("%s Incomplete packet at %d. Already fetched %d, "
5532 "%d more are available\n",
5533 pThis->szPrf, pThis->iTxDCurrent, u8Remain,
5534 e1kGetTxLen(pThis) - u8Remain));
5535
5536 /*
5537 * A packet was partially fetched. Move incomplete packet to
5538 * the beginning of cache buffer, then load more descriptors.
5539 */
5540 memmove(pThis->aTxDescriptors,
5541 &pThis->aTxDescriptors[pThis->iTxDCurrent],
5542 u8Remain * sizeof(E1KTXDESC));
5543 pThis->iTxDCurrent = 0;
5544 pThis->nTxDFetched = u8Remain;
5545 e1kTxDLoadMore(pDevIns, pThis);
5546 fIncomplete = true;
5547 }
5548 else
5549 pThis->nTxDFetched = 0;
5550 pThis->iTxDCurrent = 0;
5551 }
5552 if (!pThis->fLocked && GET_BITS(TXDCTL, LWTHRESH) == 0)
5553 {
5554 E1kLog2(("%s Out of transmit descriptors, raise ICR.TXD_LOW\n",
5555 pThis->szPrf));
5556 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_TXD_LOW);
5557 }
5558out:
5559 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5560
5561 /// @todo uncomment: pThis->uStatIntTXQE++;
5562 /// @todo uncomment: e1kRaiseInterrupt(pDevIns, pThis, ICR_TXQE);
5563
5564 e1kCsTxLeave(pThis);
5565 }
5566
5567
5568 /*
5569 * Release the lock.
5570 */
5571 if (pDrv)
5572 pDrv->pfnEndXmit(pDrv);
5573 return rc;
5574}
5575
5576#endif /* E1K_WITH_TXD_CACHE */
5577#ifdef IN_RING3
5578
5579/**
5580 * @interface_method_impl{PDMINETWORKDOWN,pfnXmitPending}
5581 */
5582static DECLCALLBACK(void) e1kR3NetworkDown_XmitPending(PPDMINETWORKDOWN pInterface)
5583{
5584 PE1KSTATECC pThisCC = RT_FROM_MEMBER(pInterface, E1KSTATECC, INetworkDown);
5585 PE1KSTATE pThis = pThisCC->pShared;
5586 /* Resume suspended transmission */
5587 STATUS &= ~STATUS_TXOFF;
5588 e1kXmitPending(pThisCC->pDevInsR3, pThis, true /*fOnWorkerThread*/);
5589}
5590
5591/**
5592 * @callback_method_impl{FNPDMTASKDEV,
5593 * Executes e1kXmitPending at the behest of ring-0/raw-mode.}
5594 * @note Not executed on EMT.
5595 */
5596static DECLCALLBACK(void) e1kR3TxTaskCallback(PPDMDEVINS pDevIns, void *pvUser)
5597{
5598 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
5599 E1kLog2(("%s e1kR3TxTaskCallback:\n", pThis->szPrf));
5600
5601 int rc = e1kXmitPending(pDevIns, pThis, false /*fOnWorkerThread*/);
5602 AssertMsg(RT_SUCCESS(rc) || rc == VERR_TRY_AGAIN || rc == VERR_NET_DOWN, ("%Rrc\n", rc));
5603
5604 RT_NOREF(rc, pvUser);
5605}
5606
5607#endif /* IN_RING3 */
5608
5609/**
5610 * Write handler for Transmit Descriptor Tail register.
5611 *
5612 * @param pThis The device state structure.
5613 * @param offset Register offset in memory-mapped frame.
5614 * @param index Register index in register array.
5615 * @param value The value to store.
5616 * @param mask Used to implement partial writes (8 and 16-bit).
5617 * @thread EMT
5618 */
5619static int e1kRegWriteTDT(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5620{
5621 int rc = e1kRegWriteDefault(pDevIns, pThis, offset, index, value);
5622
5623 /* All descriptors starting with head and not including tail belong to us. */
5624 /* Process them. */
5625 E1kLog2(("%s e1kRegWriteTDT: TDBAL=%08x, TDBAH=%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
5626 pThis->szPrf, TDBAL, TDBAH, TDLEN, TDH, TDT));
5627
5628 /* Ignore TDT writes when the link is down. */
5629 if (TDH != TDT && (STATUS & STATUS_LU))
5630 {
5631 Log5(("E1000: TDT write: TDH=%08x, TDT=%08x, %d descriptors to process\n", TDH, TDT, e1kGetTxLen(pThis)));
5632 E1kLog(("%s e1kRegWriteTDT: %d descriptors to process\n",
5633 pThis->szPrf, e1kGetTxLen(pThis)));
5634
5635 /* Transmit pending packets if possible, defer it if we cannot do it
5636 in the current context. */
5637#ifdef E1K_TX_DELAY
5638 rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
5639 if (RT_LIKELY(rc == VINF_SUCCESS))
5640 {
5641 if (!PDMDevInsTimerIsActive(pDevIns, pThis->hTXDTimer))
5642 {
5643# ifdef E1K_INT_STATS
5644 pThis->u64ArmedAt = RTTimeNanoTS();
5645# endif
5646 e1kArmTimer(pDevIns, pThis, pThis->hTXDTimer, E1K_TX_DELAY);
5647 }
5648 E1K_INC_ISTAT_CNT(pThis->uStatTxDelayed);
5649 e1kCsTxLeave(pThis);
5650 return rc;
5651 }
5652 /* We failed to enter the TX critical section -- transmit as usual. */
5653#endif /* E1K_TX_DELAY */
5654#ifndef IN_RING3
5655 PE1KSTATECC pThisCC = PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC);
5656 if (!pThisCC->CTX_SUFF(pDrv))
5657 {
5658 PDMDevHlpTaskTrigger(pDevIns, pThis->hTxTask);
5659 rc = VINF_SUCCESS;
5660 }
5661 else
5662#endif
5663 {
5664 rc = e1kXmitPending(pDevIns, pThis, false /*fOnWorkerThread*/);
5665 if (rc == VERR_TRY_AGAIN)
5666 rc = VINF_SUCCESS;
5667#ifndef IN_RING3
5668 else if (rc == VERR_SEM_BUSY)
5669 rc = VINF_IOM_R3_MMIO_WRITE;
5670#endif
5671 AssertRC(rc);
5672 }
5673 }
5674
5675 return rc;
5676}
5677
5678/**
5679 * Write handler for Multicast Table Array registers.
5680 *
5681 * @param pThis The device state structure.
5682 * @param offset Register offset in memory-mapped frame.
5683 * @param index Register index in register array.
5684 * @param value The value to store.
5685 * @thread EMT
5686 */
5687static int e1kRegWriteMTA(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5688{
5689 RT_NOREF_PV(pDevIns);
5690 AssertReturn(offset - g_aE1kRegMap[index].offset < sizeof(pThis->auMTA), VERR_DEV_IO_ERROR);
5691 pThis->auMTA[(offset - g_aE1kRegMap[index].offset) / sizeof(pThis->auMTA[0])] = value;
5692
5693 return VINF_SUCCESS;
5694}
5695
5696/**
5697 * Read handler for Multicast Table Array registers.
5698 *
5699 * @returns VBox status code.
5700 *
5701 * @param pThis The device state structure.
5702 * @param offset Register offset in memory-mapped frame.
5703 * @param index Register index in register array.
5704 * @thread EMT
5705 */
5706static int e1kRegReadMTA(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5707{
5708 RT_NOREF_PV(pDevIns);
5709 AssertReturn(offset - g_aE1kRegMap[index].offset < sizeof(pThis->auMTA), VERR_DEV_IO_ERROR);
5710 *pu32Value = pThis->auMTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auMTA[0])];
5711
5712 return VINF_SUCCESS;
5713}
5714
5715/**
5716 * Write handler for Receive Address registers.
5717 *
5718 * @param pThis The device state structure.
5719 * @param offset Register offset in memory-mapped frame.
5720 * @param index Register index in register array.
5721 * @param value The value to store.
5722 * @thread EMT
5723 */
5724static int e1kRegWriteRA(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5725{
5726 RT_NOREF_PV(pDevIns);
5727 AssertReturn(offset - g_aE1kRegMap[index].offset < sizeof(pThis->aRecAddr.au32), VERR_DEV_IO_ERROR);
5728 pThis->aRecAddr.au32[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->aRecAddr.au32[0])] = value;
5729
5730 return VINF_SUCCESS;
5731}
5732
5733/**
5734 * Read handler for Receive Address registers.
5735 *
5736 * @returns VBox status code.
5737 *
5738 * @param pThis The device state structure.
5739 * @param offset Register offset in memory-mapped frame.
5740 * @param index Register index in register array.
5741 * @thread EMT
5742 */
5743static int e1kRegReadRA(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5744{
5745 RT_NOREF_PV(pDevIns);
5746 AssertReturn(offset - g_aE1kRegMap[index].offset< sizeof(pThis->aRecAddr.au32), VERR_DEV_IO_ERROR);
5747 *pu32Value = pThis->aRecAddr.au32[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->aRecAddr.au32[0])];
5748
5749 return VINF_SUCCESS;
5750}
5751
5752/**
5753 * Write handler for VLAN Filter Table Array registers.
5754 *
5755 * @param pThis The device state structure.
5756 * @param offset Register offset in memory-mapped frame.
5757 * @param index Register index in register array.
5758 * @param value The value to store.
5759 * @thread EMT
5760 */
5761static int e1kRegWriteVFTA(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5762{
5763 RT_NOREF_PV(pDevIns);
5764 AssertReturn(offset - g_aE1kRegMap[index].offset < sizeof(pThis->auVFTA), VINF_SUCCESS);
5765 pThis->auVFTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auVFTA[0])] = value;
5766
5767 return VINF_SUCCESS;
5768}
5769
5770/**
5771 * Read handler for VLAN Filter Table Array registers.
5772 *
5773 * @returns VBox status code.
5774 *
5775 * @param pThis The device state structure.
5776 * @param offset Register offset in memory-mapped frame.
5777 * @param index Register index in register array.
5778 * @thread EMT
5779 */
5780static int e1kRegReadVFTA(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5781{
5782 RT_NOREF_PV(pDevIns);
5783 AssertReturn(offset - g_aE1kRegMap[index].offset< sizeof(pThis->auVFTA), VERR_DEV_IO_ERROR);
5784 *pu32Value = pThis->auVFTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auVFTA[0])];
5785
5786 return VINF_SUCCESS;
5787}
5788
5789/**
5790 * Read handler for unimplemented registers.
5791 *
5792 * Merely reports reads from unimplemented registers.
5793 *
5794 * @returns VBox status code.
5795 *
5796 * @param pThis The device state structure.
5797 * @param offset Register offset in memory-mapped frame.
5798 * @param index Register index in register array.
5799 * @thread EMT
5800 */
5801static int e1kRegReadUnimplemented(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5802{
5803 RT_NOREF(pDevIns, pThis, offset, index);
5804 E1kLog(("%s At %08X read (00000000) attempt from unimplemented register %s (%s)\n",
5805 pThis->szPrf, offset, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5806 *pu32Value = 0;
5807
5808 return VINF_SUCCESS;
5809}
5810
5811/**
5812 * Default register read handler with automatic clear operation.
5813 *
5814 * Retrieves the value of register from register array in device state structure.
5815 * Then resets all bits.
5816 *
5817 * @remarks The 'mask' parameter is simply ignored as masking and shifting is
5818 * done in the caller.
5819 *
5820 * @returns VBox status code.
5821 *
5822 * @param pThis The device state structure.
5823 * @param offset Register offset in memory-mapped frame.
5824 * @param index Register index in register array.
5825 * @thread EMT
5826 */
5827static int e1kRegReadAutoClear(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5828{
5829 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
5830 int rc = e1kRegReadDefault(pDevIns, pThis, offset, index, pu32Value);
5831 pThis->auRegs[index] = 0;
5832
5833 return rc;
5834}
5835
5836/**
5837 * Default register read handler.
5838 *
5839 * Retrieves the value of register from register array in device state structure.
5840 * Bits corresponding to 0s in 'readable' mask will always read as 0s.
5841 *
5842 * @remarks The 'mask' parameter is simply ignored as masking and shifting is
5843 * done in the caller.
5844 *
5845 * @returns VBox status code.
5846 *
5847 * @param pThis The device state structure.
5848 * @param offset Register offset in memory-mapped frame.
5849 * @param index Register index in register array.
5850 * @thread EMT
5851 */
5852static int e1kRegReadDefault(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5853{
5854 RT_NOREF_PV(pDevIns); RT_NOREF_PV(offset);
5855
5856 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
5857 *pu32Value = pThis->auRegs[index] & g_aE1kRegMap[index].readable;
5858
5859 return VINF_SUCCESS;
5860}
5861
5862/**
5863 * Write handler for unimplemented registers.
5864 *
5865 * Merely reports writes to unimplemented registers.
5866 *
5867 * @param pThis The device state structure.
5868 * @param offset Register offset in memory-mapped frame.
5869 * @param index Register index in register array.
5870 * @param value The value to store.
5871 * @thread EMT
5872 */
5873
5874 static int e1kRegWriteUnimplemented(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5875{
5876 RT_NOREF_PV(pDevIns); RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index); RT_NOREF_PV(value);
5877
5878 E1kLog(("%s At %08X write attempt (%08X) to unimplemented register %s (%s)\n",
5879 pThis->szPrf, offset, value, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5880
5881 return VINF_SUCCESS;
5882}
5883
5884/**
5885 * Default register write handler.
5886 *
5887 * Stores the value to the register array in device state structure. Only bits
5888 * corresponding to 1s both in 'writable' and 'mask' will be stored.
5889 *
5890 * @returns VBox status code.
5891 *
5892 * @param pThis The device state structure.
5893 * @param offset Register offset in memory-mapped frame.
5894 * @param index Register index in register array.
5895 * @param value The value to store.
5896 * @param mask Used to implement partial writes (8 and 16-bit).
5897 * @thread EMT
5898 */
5899
5900static int e1kRegWriteDefault(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5901{
5902 RT_NOREF(pDevIns, offset);
5903
5904 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
5905 pThis->auRegs[index] = (value & g_aE1kRegMap[index].writable)
5906 | (pThis->auRegs[index] & ~g_aE1kRegMap[index].writable);
5907
5908 return VINF_SUCCESS;
5909}
5910
5911/**
5912 * Search register table for matching register.
5913 *
5914 * @returns Index in the register table or -1 if not found.
5915 *
5916 * @param offReg Register offset in memory-mapped region.
5917 * @thread EMT
5918 */
5919static int e1kRegLookup(uint32_t offReg)
5920{
5921
5922#if 0
5923 int index;
5924
5925 for (index = 0; index < E1K_NUM_OF_REGS; index++)
5926 {
5927 if (g_aE1kRegMap[index].offset <= offReg && offReg < g_aE1kRegMap[index].offset + g_aE1kRegMap[index].size)
5928 {
5929 return index;
5930 }
5931 }
5932#else
5933 int iStart = 0;
5934 int iEnd = E1K_NUM_OF_BINARY_SEARCHABLE;
5935 for (;;)
5936 {
5937 int i = (iEnd - iStart) / 2 + iStart;
5938 uint32_t offCur = g_aE1kRegMap[i].offset;
5939 if (offReg < offCur)
5940 {
5941 if (i == iStart)
5942 break;
5943 iEnd = i;
5944 }
5945 else if (offReg >= offCur + g_aE1kRegMap[i].size)
5946 {
5947 i++;
5948 if (i == iEnd)
5949 break;
5950 iStart = i;
5951 }
5952 else
5953 return i;
5954 Assert(iEnd > iStart);
5955 }
5956
5957 for (unsigned i = E1K_NUM_OF_BINARY_SEARCHABLE; i < RT_ELEMENTS(g_aE1kRegMap); i++)
5958 if (offReg - g_aE1kRegMap[i].offset < g_aE1kRegMap[i].size)
5959 return i;
5960
5961# ifdef VBOX_STRICT
5962 for (unsigned i = 0; i < RT_ELEMENTS(g_aE1kRegMap); i++)
5963 Assert(offReg - g_aE1kRegMap[i].offset >= g_aE1kRegMap[i].size);
5964# endif
5965
5966#endif
5967
5968 return -1;
5969}
5970
5971/**
5972 * Handle unaligned register read operation.
5973 *
5974 * Looks up and calls appropriate handler.
5975 *
5976 * @returns VBox status code.
5977 *
5978 * @param pDevIns The device instance.
5979 * @param pThis The device state structure.
5980 * @param offReg Register offset in memory-mapped frame.
5981 * @param pv Where to store the result.
5982 * @param cb Number of bytes to read.
5983 * @thread EMT
5984 * @remarks IOM takes care of unaligned and small reads via MMIO. For I/O port
5985 * accesses we have to take care of that ourselves.
5986 */
5987static int e1kRegReadUnaligned(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offReg, void *pv, uint32_t cb)
5988{
5989 uint32_t u32 = 0;
5990 uint32_t shift;
5991 int rc = VINF_SUCCESS;
5992 int index = e1kRegLookup(offReg);
5993#ifdef LOG_ENABLED
5994 char buf[9];
5995#endif
5996
5997 /*
5998 * From the spec:
5999 * For registers that should be accessed as 32-bit double words, partial writes (less than a 32-bit
6000 * double word) is ignored. Partial reads return all 32 bits of data regardless of the byte enables.
6001 */
6002
6003 /*
6004 * To be able to read bytes and short word we convert them to properly
6005 * shifted 32-bit words and masks. The idea is to keep register-specific
6006 * handlers simple. Most accesses will be 32-bit anyway.
6007 */
6008 uint32_t mask;
6009 switch (cb)
6010 {
6011 case 4: mask = 0xFFFFFFFF; break;
6012 case 2: mask = 0x0000FFFF; break;
6013 case 1: mask = 0x000000FF; break;
6014 default:
6015 return PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "unsupported op size: offset=%#10x cb=%#10x\n", offReg, cb);
6016 }
6017 if (index != -1)
6018 {
6019 RT_UNTRUSTED_VALIDATED_FENCE(); /* paranoia because of port I/O. */
6020 if (g_aE1kRegMap[index].readable)
6021 {
6022 /* Make the mask correspond to the bits we are about to read. */
6023 shift = (offReg - g_aE1kRegMap[index].offset) % sizeof(uint32_t) * 8;
6024 mask <<= shift;
6025 if (!mask)
6026 return PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "Zero mask: offset=%#10x cb=%#10x\n", offReg, cb);
6027 /*
6028 * Read it. Pass the mask so the handler knows what has to be read.
6029 * Mask out irrelevant bits.
6030 */
6031 //rc = e1kCsEnter(pThis, VERR_SEM_BUSY, RT_SRC_POS);
6032 if (RT_UNLIKELY(rc != VINF_SUCCESS))
6033 return rc;
6034 //pThis->fDelayInts = false;
6035 //pThis->iStatIntLost += pThis->iStatIntLostOne;
6036 //pThis->iStatIntLostOne = 0;
6037 rc = g_aE1kRegMap[index].pfnRead(pDevIns, pThis, offReg & 0xFFFFFFFC, index, &u32);
6038 u32 &= mask;
6039 //e1kCsLeave(pThis);
6040 E1kLog2(("%s At %08X read %s from %s (%s)\n",
6041 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf), g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
6042 Log6(("%s At %08X read %s from %s (%s) [UNALIGNED]\n",
6043 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf), g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
6044 /* Shift back the result. */
6045 u32 >>= shift;
6046 }
6047 else
6048 E1kLog(("%s At %08X read (%s) attempt from write-only register %s (%s)\n",
6049 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf), g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
6050 if (IOM_SUCCESS(rc))
6051 STAM_COUNTER_INC(&pThis->aStatRegReads[index]);
6052 }
6053 else
6054 E1kLog(("%s At %08X read (%s) attempt from non-existing register\n",
6055 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf)));
6056
6057 memcpy(pv, &u32, cb);
6058 return rc;
6059}
6060
6061/**
6062 * Handle 4 byte aligned and sized read operation.
6063 *
6064 * Looks up and calls appropriate handler.
6065 *
6066 * @returns VBox status code.
6067 *
6068 * @param pDevIns The device instance.
6069 * @param pThis The device state structure.
6070 * @param offReg Register offset in memory-mapped frame.
6071 * @param pu32 Where to store the result.
6072 * @thread EMT
6073 */
6074static VBOXSTRICTRC e1kRegReadAlignedU32(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offReg, uint32_t *pu32)
6075{
6076 Assert(!(offReg & 3));
6077
6078 /*
6079 * Lookup the register and check that it's readable.
6080 */
6081 VBOXSTRICTRC rc = VINF_SUCCESS;
6082 int idxReg = e1kRegLookup(offReg);
6083 if (RT_LIKELY(idxReg != -1))
6084 {
6085 RT_UNTRUSTED_VALIDATED_FENCE(); /* paranoia because of port I/O. */
6086 if (RT_UNLIKELY(g_aE1kRegMap[idxReg].readable))
6087 {
6088 /*
6089 * Read it. Pass the mask so the handler knows what has to be read.
6090 * Mask out irrelevant bits.
6091 */
6092 //rc = e1kCsEnter(pThis, VERR_SEM_BUSY, RT_SRC_POS);
6093 //if (RT_UNLIKELY(rc != VINF_SUCCESS))
6094 // return rc;
6095 //pThis->fDelayInts = false;
6096 //pThis->iStatIntLost += pThis->iStatIntLostOne;
6097 //pThis->iStatIntLostOne = 0;
6098 rc = g_aE1kRegMap[idxReg].pfnRead(pDevIns, pThis, offReg & 0xFFFFFFFC, idxReg, pu32);
6099 //e1kCsLeave(pThis);
6100 Log6(("%s At %08X read %08X from %s (%s)\n",
6101 pThis->szPrf, offReg, *pu32, g_aE1kRegMap[idxReg].abbrev, g_aE1kRegMap[idxReg].name));
6102 if (IOM_SUCCESS(rc))
6103 STAM_COUNTER_INC(&pThis->aStatRegReads[idxReg]);
6104 }
6105 else
6106 E1kLog(("%s At %08X read attempt from non-readable register %s (%s)\n",
6107 pThis->szPrf, offReg, g_aE1kRegMap[idxReg].abbrev, g_aE1kRegMap[idxReg].name));
6108 }
6109 else
6110 E1kLog(("%s At %08X read attempt from non-existing register\n", pThis->szPrf, offReg));
6111 return rc;
6112}
6113
6114/**
6115 * Handle 4 byte sized and aligned register write operation.
6116 *
6117 * Looks up and calls appropriate handler.
6118 *
6119 * @returns VBox status code.
6120 *
6121 * @param pDevIns The device instance.
6122 * @param pThis The device state structure.
6123 * @param offReg Register offset in memory-mapped frame.
6124 * @param u32Value The value to write.
6125 * @thread EMT
6126 */
6127static VBOXSTRICTRC e1kRegWriteAlignedU32(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offReg, uint32_t u32Value)
6128{
6129 VBOXSTRICTRC rc = VINF_SUCCESS;
6130 int index = e1kRegLookup(offReg);
6131 if (RT_LIKELY(index != -1))
6132 {
6133 RT_UNTRUSTED_VALIDATED_FENCE(); /* paranoia because of port I/O. */
6134 if (RT_LIKELY(g_aE1kRegMap[index].writable))
6135 {
6136 /*
6137 * Write it. Pass the mask so the handler knows what has to be written.
6138 * Mask out irrelevant bits.
6139 */
6140 Log6(("%s At %08X write %08X to %s (%s)\n",
6141 pThis->szPrf, offReg, u32Value, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
6142 //rc = e1kCsEnter(pThis, VERR_SEM_BUSY, RT_SRC_POS);
6143 //if (RT_UNLIKELY(rc != VINF_SUCCESS))
6144 // return rc;
6145 //pThis->fDelayInts = false;
6146 //pThis->iStatIntLost += pThis->iStatIntLostOne;
6147 //pThis->iStatIntLostOne = 0;
6148 rc = g_aE1kRegMap[index].pfnWrite(pDevIns, pThis, offReg, index, u32Value);
6149 //e1kCsLeave(pThis);
6150 }
6151 else
6152 E1kLog(("%s At %08X write attempt (%08X) to read-only register %s (%s)\n",
6153 pThis->szPrf, offReg, u32Value, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
6154 if (IOM_SUCCESS(rc))
6155 STAM_COUNTER_INC(&pThis->aStatRegWrites[index]);
6156 }
6157 else
6158 E1kLog(("%s At %08X write attempt (%08X) to non-existing register\n",
6159 pThis->szPrf, offReg, u32Value));
6160 return rc;
6161}
6162
6163
6164/* -=-=-=-=- MMIO and I/O Port Callbacks -=-=-=-=- */
6165
6166/**
6167 * @callback_method_impl{FNIOMMMIONEWREAD}
6168 */
6169static DECLCALLBACK(VBOXSTRICTRC) e1kMMIORead(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void *pv, uint32_t cb)
6170{
6171 RT_NOREF2(pvUser, cb);
6172 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
6173 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatMMIORead), a);
6174
6175 Assert(off < E1K_MM_SIZE);
6176 Assert(cb == 4);
6177 Assert(!(off & 3));
6178
6179 VBOXSTRICTRC rcStrict = e1kRegReadAlignedU32(pDevIns, pThis, (uint32_t)off, (uint32_t *)pv);
6180
6181 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatMMIORead), a);
6182 return rcStrict;
6183}
6184
6185/**
6186 * @callback_method_impl{FNIOMMMIONEWWRITE}
6187 */
6188static DECLCALLBACK(VBOXSTRICTRC) e1kMMIOWrite(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void const *pv, uint32_t cb)
6189{
6190 RT_NOREF2(pvUser, cb);
6191 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
6192 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatMMIOWrite), a);
6193
6194 Assert(off < E1K_MM_SIZE);
6195 Assert(cb == 4);
6196 Assert(!(off & 3));
6197
6198 VBOXSTRICTRC rcStrict = e1kRegWriteAlignedU32(pDevIns, pThis, (uint32_t)off, *(uint32_t const *)pv);
6199
6200 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatMMIOWrite), a);
6201 return rcStrict;
6202}
6203
6204/**
6205 * @callback_method_impl{FNIOMIOPORTNEWIN}
6206 */
6207static DECLCALLBACK(VBOXSTRICTRC) e1kIOPortIn(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT offPort, uint32_t *pu32, unsigned cb)
6208{
6209 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
6210 VBOXSTRICTRC rc;
6211 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatIORead), a);
6212 RT_NOREF_PV(pvUser);
6213
6214 if (RT_LIKELY(cb == 4))
6215 switch (offPort)
6216 {
6217 case 0x00: /* IOADDR */
6218 *pu32 = pThis->uSelectedReg;
6219 E1kLog2(("%s e1kIOPortIn: IOADDR(0), selecting register %#010x, val=%#010x\n", pThis->szPrf, pThis->uSelectedReg, *pu32));
6220 rc = VINF_SUCCESS;
6221 break;
6222
6223 case 0x04: /* IODATA */
6224 if (!(pThis->uSelectedReg & 3))
6225 rc = e1kRegReadAlignedU32(pDevIns, pThis, pThis->uSelectedReg, pu32);
6226 else /** @todo r=bird: I wouldn't be surprised if this unaligned branch wasn't necessary. */
6227 rc = e1kRegReadUnaligned(pDevIns, pThis, pThis->uSelectedReg, pu32, cb);
6228 if (rc == VINF_IOM_R3_MMIO_READ)
6229 rc = VINF_IOM_R3_IOPORT_READ;
6230 E1kLog2(("%s e1kIOPortIn: IODATA(4), reading from selected register %#010x, val=%#010x\n", pThis->szPrf, pThis->uSelectedReg, *pu32));
6231 break;
6232
6233 default:
6234 E1kLog(("%s e1kIOPortIn: invalid port %#010x\n", pThis->szPrf, offPort));
6235 /** @todo r=bird: Check what real hardware returns here. */
6236 //rc = VERR_IOM_IOPORT_UNUSED; /* Why not? */
6237 rc = VINF_IOM_MMIO_UNUSED_00; /* used to return VINF_SUCCESS and not touch *pu32, which amounted to this. */
6238 break;
6239 }
6240 else
6241 {
6242 E1kLog(("%s e1kIOPortIn: invalid op size: offPort=%RTiop cb=%08x", pThis->szPrf, offPort, cb));
6243 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "%s e1kIOPortIn: invalid op size: offPort=%RTiop cb=%08x\n", pThis->szPrf, offPort, cb);
6244 *pu32 = 0; /** @todo r=bird: Check what real hardware returns here. (Didn't used to set a value here, picked zero as that's what we'd end up in most cases.) */
6245 }
6246 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatIORead), a);
6247 return rc;
6248}
6249
6250
6251/**
6252 * @callback_method_impl{FNIOMIOPORTNEWOUT}
6253 */
6254static DECLCALLBACK(VBOXSTRICTRC) e1kIOPortOut(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT offPort, uint32_t u32, unsigned cb)
6255{
6256 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
6257 VBOXSTRICTRC rc;
6258 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatIOWrite), a);
6259 RT_NOREF_PV(pvUser);
6260
6261 E1kLog2(("%s e1kIOPortOut: offPort=%RTiop value=%08x\n", pThis->szPrf, offPort, u32));
6262 if (RT_LIKELY(cb == 4))
6263 {
6264 switch (offPort)
6265 {
6266 case 0x00: /* IOADDR */
6267 pThis->uSelectedReg = u32;
6268 E1kLog2(("%s e1kIOPortOut: IOADDR(0), selected register %08x\n", pThis->szPrf, pThis->uSelectedReg));
6269 rc = VINF_SUCCESS;
6270 break;
6271
6272 case 0x04: /* IODATA */
6273 E1kLog2(("%s e1kIOPortOut: IODATA(4), writing to selected register %#010x, value=%#010x\n", pThis->szPrf, pThis->uSelectedReg, u32));
6274 if (RT_LIKELY(!(pThis->uSelectedReg & 3)))
6275 {
6276 rc = e1kRegWriteAlignedU32(pDevIns, pThis, pThis->uSelectedReg, u32);
6277 if (rc == VINF_IOM_R3_MMIO_WRITE)
6278 rc = VINF_IOM_R3_IOPORT_WRITE;
6279 }
6280 else
6281 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS,
6282 "Spec violation: misaligned offset: %#10x, ignored.\n", pThis->uSelectedReg);
6283 break;
6284
6285 default:
6286 E1kLog(("%s e1kIOPortOut: invalid port %#010x\n", pThis->szPrf, offPort));
6287 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "invalid port %#010x\n", offPort);
6288 }
6289 }
6290 else
6291 {
6292 E1kLog(("%s e1kIOPortOut: invalid op size: offPort=%RTiop cb=%08x\n", pThis->szPrf, offPort, cb));
6293 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "%s: invalid op size: offPort=%RTiop cb=%#x\n", pThis->szPrf, offPort, cb);
6294 }
6295
6296 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatIOWrite), a);
6297 return rc;
6298}
6299
6300#ifdef IN_RING3
6301
6302/**
6303 * Dump complete device state to log.
6304 *
6305 * @param pThis Pointer to device state.
6306 */
6307static void e1kDumpState(PE1KSTATE pThis)
6308{
6309 RT_NOREF(pThis);
6310 for (int i = 0; i < E1K_NUM_OF_32BIT_REGS; ++i)
6311 E1kLog2(("%s: %8.8s = %08x\n", pThis->szPrf, g_aE1kRegMap[i].abbrev, pThis->auRegs[i]));
6312# ifdef E1K_INT_STATS
6313 LogRel(("%s: Interrupt attempts: %d\n", pThis->szPrf, pThis->uStatIntTry));
6314 LogRel(("%s: Interrupts raised : %d\n", pThis->szPrf, pThis->uStatInt));
6315 LogRel(("%s: Interrupts lowered: %d\n", pThis->szPrf, pThis->uStatIntLower));
6316 LogRel(("%s: ICR outside ISR : %d\n", pThis->szPrf, pThis->uStatNoIntICR));
6317 LogRel(("%s: IMS raised ints : %d\n", pThis->szPrf, pThis->uStatIntIMS));
6318 LogRel(("%s: Interrupts skipped: %d\n", pThis->szPrf, pThis->uStatIntSkip));
6319 LogRel(("%s: Masked interrupts : %d\n", pThis->szPrf, pThis->uStatIntMasked));
6320 LogRel(("%s: Early interrupts : %d\n", pThis->szPrf, pThis->uStatIntEarly));
6321 LogRel(("%s: Late interrupts : %d\n", pThis->szPrf, pThis->uStatIntLate));
6322 LogRel(("%s: Lost interrupts : %d\n", pThis->szPrf, pThis->iStatIntLost));
6323 LogRel(("%s: Interrupts by RX : %d\n", pThis->szPrf, pThis->uStatIntRx));
6324 LogRel(("%s: Interrupts by TX : %d\n", pThis->szPrf, pThis->uStatIntTx));
6325 LogRel(("%s: Interrupts by ICS : %d\n", pThis->szPrf, pThis->uStatIntICS));
6326 LogRel(("%s: Interrupts by RDTR: %d\n", pThis->szPrf, pThis->uStatIntRDTR));
6327 LogRel(("%s: Interrupts by RDMT: %d\n", pThis->szPrf, pThis->uStatIntRXDMT0));
6328 LogRel(("%s: Interrupts by TXQE: %d\n", pThis->szPrf, pThis->uStatIntTXQE));
6329 LogRel(("%s: TX int delay asked: %d\n", pThis->szPrf, pThis->uStatTxIDE));
6330 LogRel(("%s: TX delayed: %d\n", pThis->szPrf, pThis->uStatTxDelayed));
6331 LogRel(("%s: TX delay expired: %d\n", pThis->szPrf, pThis->uStatTxDelayExp));
6332 LogRel(("%s: TX no report asked: %d\n", pThis->szPrf, pThis->uStatTxNoRS));
6333 LogRel(("%s: TX abs timer expd : %d\n", pThis->szPrf, pThis->uStatTAD));
6334 LogRel(("%s: TX int timer expd : %d\n", pThis->szPrf, pThis->uStatTID));
6335 LogRel(("%s: RX abs timer expd : %d\n", pThis->szPrf, pThis->uStatRAD));
6336 LogRel(("%s: RX int timer expd : %d\n", pThis->szPrf, pThis->uStatRID));
6337 LogRel(("%s: TX CTX descriptors: %d\n", pThis->szPrf, pThis->uStatDescCtx));
6338 LogRel(("%s: TX DAT descriptors: %d\n", pThis->szPrf, pThis->uStatDescDat));
6339 LogRel(("%s: TX LEG descriptors: %d\n", pThis->szPrf, pThis->uStatDescLeg));
6340 LogRel(("%s: Received frames : %d\n", pThis->szPrf, pThis->uStatRxFrm));
6341 LogRel(("%s: Transmitted frames: %d\n", pThis->szPrf, pThis->uStatTxFrm));
6342 LogRel(("%s: TX frames up to 1514: %d\n", pThis->szPrf, pThis->uStatTx1514));
6343 LogRel(("%s: TX frames up to 2962: %d\n", pThis->szPrf, pThis->uStatTx2962));
6344 LogRel(("%s: TX frames up to 4410: %d\n", pThis->szPrf, pThis->uStatTx4410));
6345 LogRel(("%s: TX frames up to 5858: %d\n", pThis->szPrf, pThis->uStatTx5858));
6346 LogRel(("%s: TX frames up to 7306: %d\n", pThis->szPrf, pThis->uStatTx7306));
6347 LogRel(("%s: TX frames up to 8754: %d\n", pThis->szPrf, pThis->uStatTx8754));
6348 LogRel(("%s: TX frames up to 16384: %d\n", pThis->szPrf, pThis->uStatTx16384));
6349 LogRel(("%s: TX frames up to 32768: %d\n", pThis->szPrf, pThis->uStatTx32768));
6350 LogRel(("%s: Larger TX frames : %d\n", pThis->szPrf, pThis->uStatTxLarge));
6351 LogRel(("%s: Max TX Delay : %lld\n", pThis->szPrf, pThis->uStatMaxTxDelay));
6352# endif /* E1K_INT_STATS */
6353}
6354
6355/**
6356 * @callback_method_impl{FNPCIIOREGIONMAP}
6357 *
6358 * @todo Can remove this one later, it's realy just here for taking down
6359 * addresses for e1kInfo(), an alignment assertion and sentimentality.
6360 */
6361static DECLCALLBACK(int) e1kR3Map(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, uint32_t iRegion,
6362 RTGCPHYS GCPhysAddress, RTGCPHYS cb, PCIADDRESSSPACE enmType)
6363{
6364 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
6365 E1kLog(("%s e1kR3Map: iRegion=%u GCPhysAddress=%RGp\n", pThis->szPrf, iRegion, GCPhysAddress));
6366 RT_NOREF(pPciDev, iRegion, cb);
6367 Assert(pPciDev == pDevIns->apPciDevs[0]);
6368
6369 switch (enmType)
6370 {
6371 case PCI_ADDRESS_SPACE_IO:
6372 pThis->IOPortBase = (RTIOPORT)GCPhysAddress;
6373 break;
6374
6375 case PCI_ADDRESS_SPACE_MEM:
6376 pThis->addrMMReg = GCPhysAddress;
6377 Assert(!(GCPhysAddress & 7) || GCPhysAddress == NIL_RTGCPHYS);
6378 break;
6379
6380 default:
6381 /* We should never get here */
6382 AssertMsgFailedReturn(("Invalid PCI address space param in map callback"), VERR_INTERNAL_ERROR);
6383 }
6384 return VINF_SUCCESS;
6385}
6386
6387
6388/* -=-=-=-=- PDMINETWORKDOWN -=-=-=-=- */
6389
6390/**
6391 * Check if the device can receive data now.
6392 * This must be called before the pfnRecieve() method is called.
6393 *
6394 * @returns Number of bytes the device can receive.
6395 * @param pDevIns The device instance.
6396 * @param pThis The instance data.
6397 * @thread EMT
6398 */
6399static int e1kCanReceive(PPDMDEVINS pDevIns, PE1KSTATE pThis)
6400{
6401#ifndef E1K_WITH_RXD_CACHE
6402 size_t cb;
6403
6404 if (RT_UNLIKELY(e1kCsRxEnter(pThis, VERR_SEM_BUSY) != VINF_SUCCESS))
6405 return VERR_NET_NO_BUFFER_SPACE;
6406
6407 if (RT_UNLIKELY(RDLEN == sizeof(E1KRXDESC)))
6408 {
6409 E1KRXDESC desc;
6410 PDMDevHlpPhysRead(pDevIns, e1kDescAddr(RDBAH, RDBAL, RDH), &desc, sizeof(desc));
6411 if (desc.status.fDD)
6412 cb = 0;
6413 else
6414 cb = pThis->u16RxBSize;
6415 }
6416 else if (RDH < RDT)
6417 cb = (RDT - RDH) * pThis->u16RxBSize;
6418 else if (RDH > RDT)
6419 cb = (RDLEN/sizeof(E1KRXDESC) - RDH + RDT) * pThis->u16RxBSize;
6420 else
6421 {
6422 cb = 0;
6423 E1kLogRel(("E1000: OUT of RX descriptors!\n"));
6424 }
6425 E1kLog2(("%s e1kCanReceive: at exit RDH=%d RDT=%d RDLEN=%d u16RxBSize=%d cb=%lu\n",
6426 pThis->szPrf, RDH, RDT, RDLEN, pThis->u16RxBSize, cb));
6427
6428 e1kCsRxLeave(pThis);
6429 return cb > 0 ? VINF_SUCCESS : VERR_NET_NO_BUFFER_SPACE;
6430#else /* E1K_WITH_RXD_CACHE */
6431 int rc = VINF_SUCCESS;
6432
6433 if (RT_UNLIKELY(e1kCsRxEnter(pThis, VERR_SEM_BUSY) != VINF_SUCCESS))
6434 return VERR_NET_NO_BUFFER_SPACE;
6435
6436 if (RT_UNLIKELY(RDLEN == sizeof(E1KRXDESC)))
6437 {
6438 E1KRXDESC desc;
6439 PDMDevHlpPhysRead(pDevIns, e1kDescAddr(RDBAH, RDBAL, RDH), &desc, sizeof(desc));
6440 if (desc.status.fDD)
6441 rc = VERR_NET_NO_BUFFER_SPACE;
6442 }
6443 else if (e1kRxDIsCacheEmpty(pThis) && RDH == RDT)
6444 {
6445 /* Cache is empty, so is the RX ring. */
6446 rc = VERR_NET_NO_BUFFER_SPACE;
6447 }
6448 E1kLog2(("%s e1kCanReceive: at exit in_cache=%d RDH=%d RDT=%d RDLEN=%d"
6449 " u16RxBSize=%d rc=%Rrc\n", pThis->szPrf,
6450 e1kRxDInCache(pThis), RDH, RDT, RDLEN, pThis->u16RxBSize, rc));
6451
6452 e1kCsRxLeave(pThis);
6453 return rc;
6454#endif /* E1K_WITH_RXD_CACHE */
6455}
6456
6457/**
6458 * @interface_method_impl{PDMINETWORKDOWN,pfnWaitReceiveAvail}
6459 */
6460static DECLCALLBACK(int) e1kR3NetworkDown_WaitReceiveAvail(PPDMINETWORKDOWN pInterface, RTMSINTERVAL cMillies)
6461{
6462 PE1KSTATECC pThisCC = RT_FROM_MEMBER(pInterface, E1KSTATECC, INetworkDown);
6463 PE1KSTATE pThis = pThisCC->pShared;
6464 PPDMDEVINS pDevIns = pThisCC->pDevInsR3;
6465
6466 int rc = e1kCanReceive(pDevIns, pThis);
6467
6468 if (RT_SUCCESS(rc))
6469 return VINF_SUCCESS;
6470 if (RT_UNLIKELY(cMillies == 0))
6471 return VERR_NET_NO_BUFFER_SPACE;
6472
6473 rc = VERR_INTERRUPTED;
6474 ASMAtomicXchgBool(&pThis->fMaybeOutOfSpace, true);
6475 STAM_PROFILE_START(&pThis->StatRxOverflow, a);
6476 VMSTATE enmVMState;
6477 while (RT_LIKELY( (enmVMState = PDMDevHlpVMState(pDevIns)) == VMSTATE_RUNNING
6478 || enmVMState == VMSTATE_RUNNING_LS))
6479 {
6480 int rc2 = e1kCanReceive(pDevIns, pThis);
6481 if (RT_SUCCESS(rc2))
6482 {
6483 rc = VINF_SUCCESS;
6484 break;
6485 }
6486 E1kLogRel(("E1000: e1kR3NetworkDown_WaitReceiveAvail: waiting cMillies=%u...\n", cMillies));
6487 E1kLog(("%s: e1kR3NetworkDown_WaitReceiveAvail: waiting cMillies=%u...\n", pThis->szPrf, cMillies));
6488 PDMDevHlpSUPSemEventWaitNoResume(pDevIns, pThis->hEventMoreRxDescAvail, cMillies);
6489 }
6490 STAM_PROFILE_STOP(&pThis->StatRxOverflow, a);
6491 ASMAtomicXchgBool(&pThis->fMaybeOutOfSpace, false);
6492
6493 return rc;
6494}
6495
6496
6497/**
6498 * Matches the packet addresses against Receive Address table. Looks for
6499 * exact matches only.
6500 *
6501 * @returns true if address matches.
6502 * @param pThis Pointer to the state structure.
6503 * @param pvBuf The ethernet packet.
6504 * @param cb Number of bytes available in the packet.
6505 * @thread EMT
6506 */
6507static bool e1kPerfectMatch(PE1KSTATE pThis, const void *pvBuf)
6508{
6509 for (unsigned i = 0; i < RT_ELEMENTS(pThis->aRecAddr.array); i++)
6510 {
6511 E1KRAELEM* ra = pThis->aRecAddr.array + i;
6512
6513 /* Valid address? */
6514 if (ra->ctl & RA_CTL_AV)
6515 {
6516 Assert((ra->ctl & RA_CTL_AS) < 2);
6517 //unsigned char *pAddr = (unsigned char*)pvBuf + sizeof(ra->addr)*(ra->ctl & RA_CTL_AS);
6518 //E1kLog3(("%s Matching %02x:%02x:%02x:%02x:%02x:%02x against %02x:%02x:%02x:%02x:%02x:%02x...\n",
6519 // pThis->szPrf, pAddr[0], pAddr[1], pAddr[2], pAddr[3], pAddr[4], pAddr[5],
6520 // ra->addr[0], ra->addr[1], ra->addr[2], ra->addr[3], ra->addr[4], ra->addr[5]));
6521 /*
6522 * Address Select:
6523 * 00b = Destination address
6524 * 01b = Source address
6525 * 10b = Reserved
6526 * 11b = Reserved
6527 * Since ethernet header is (DA, SA, len) we can use address
6528 * select as index.
6529 */
6530 if (memcmp((char*)pvBuf + sizeof(ra->addr)*(ra->ctl & RA_CTL_AS),
6531 ra->addr, sizeof(ra->addr)) == 0)
6532 return true;
6533 }
6534 }
6535
6536 return false;
6537}
6538
6539/**
6540 * Matches the packet addresses against Multicast Table Array.
6541 *
6542 * @remarks This is imperfect match since it matches not exact address but
6543 * a subset of addresses.
6544 *
6545 * @returns true if address matches.
6546 * @param pThis Pointer to the state structure.
6547 * @param pvBuf The ethernet packet.
6548 * @param cb Number of bytes available in the packet.
6549 * @thread EMT
6550 */
6551static bool e1kImperfectMatch(PE1KSTATE pThis, const void *pvBuf)
6552{
6553 /* Get bits 32..47 of destination address */
6554 uint16_t u16Bit = ((uint16_t*)pvBuf)[2];
6555
6556 unsigned offset = GET_BITS(RCTL, MO);
6557 /*
6558 * offset means:
6559 * 00b = bits 36..47
6560 * 01b = bits 35..46
6561 * 10b = bits 34..45
6562 * 11b = bits 32..43
6563 */
6564 if (offset < 3)
6565 u16Bit = u16Bit >> (4 - offset);
6566 return ASMBitTest(pThis->auMTA, u16Bit & 0xFFF);
6567}
6568
6569/**
6570 * Determines if the packet is to be delivered to upper layer.
6571 *
6572 * The following filters supported:
6573 * - Exact Unicast/Multicast
6574 * - Promiscuous Unicast/Multicast
6575 * - Multicast
6576 * - VLAN
6577 *
6578 * @returns true if packet is intended for this node.
6579 * @param pThis Pointer to the state structure.
6580 * @param pvBuf The ethernet packet.
6581 * @param cb Number of bytes available in the packet.
6582 * @param pStatus Bit field to store status bits.
6583 * @thread EMT
6584 */
6585static bool e1kAddressFilter(PE1KSTATE pThis, const void *pvBuf, size_t cb, E1KRXDST *pStatus)
6586{
6587 Assert(cb > 14);
6588 /* Assume that we fail to pass exact filter. */
6589 pStatus->fPIF = false;
6590 pStatus->fVP = false;
6591 /* Discard oversized packets */
6592 if (cb > E1K_MAX_RX_PKT_SIZE)
6593 {
6594 E1kLog(("%s ERROR: Incoming packet is too big, cb=%d > max=%d\n",
6595 pThis->szPrf, cb, E1K_MAX_RX_PKT_SIZE));
6596 E1K_INC_CNT32(ROC);
6597 return false;
6598 }
6599 else if (!(RCTL & RCTL_LPE) && cb > 1522)
6600 {
6601 /* When long packet reception is disabled packets over 1522 are discarded */
6602 E1kLog(("%s Discarding incoming packet (LPE=0), cb=%d\n",
6603 pThis->szPrf, cb));
6604 E1K_INC_CNT32(ROC);
6605 return false;
6606 }
6607
6608 uint16_t *u16Ptr = (uint16_t*)pvBuf;
6609 /* Compare TPID with VLAN Ether Type */
6610 if (RT_BE2H_U16(u16Ptr[6]) == VET)
6611 {
6612 pStatus->fVP = true;
6613 /* Is VLAN filtering enabled? */
6614 if (RCTL & RCTL_VFE)
6615 {
6616 /* It is 802.1q packet indeed, let's filter by VID */
6617 if (RCTL & RCTL_CFIEN)
6618 {
6619 E1kLog3(("%s VLAN filter: VLAN=%d CFI=%d RCTL_CFI=%d\n", pThis->szPrf,
6620 E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7])),
6621 E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])),
6622 !!(RCTL & RCTL_CFI)));
6623 if (E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])) != !!(RCTL & RCTL_CFI))
6624 {
6625 E1kLog2(("%s Packet filter: CFIs do not match in packet and RCTL (%d!=%d)\n",
6626 pThis->szPrf, E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])), !!(RCTL & RCTL_CFI)));
6627 return false;
6628 }
6629 }
6630 else
6631 E1kLog3(("%s VLAN filter: VLAN=%d\n", pThis->szPrf,
6632 E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))));
6633 if (!ASMBitTest(pThis->auVFTA, E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))))
6634 {
6635 E1kLog2(("%s Packet filter: no VLAN match (id=%d)\n",
6636 pThis->szPrf, E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))));
6637 return false;
6638 }
6639 }
6640 }
6641 /* Broadcast filtering */
6642 if (e1kIsBroadcast(pvBuf) && (RCTL & RCTL_BAM))
6643 return true;
6644 E1kLog2(("%s Packet filter: not a broadcast\n", pThis->szPrf));
6645 if (e1kIsMulticast(pvBuf))
6646 {
6647 /* Is multicast promiscuous enabled? */
6648 if (RCTL & RCTL_MPE)
6649 return true;
6650 E1kLog2(("%s Packet filter: no promiscuous multicast\n", pThis->szPrf));
6651 /* Try perfect matches first */
6652 if (e1kPerfectMatch(pThis, pvBuf))
6653 {
6654 pStatus->fPIF = true;
6655 return true;
6656 }
6657 E1kLog2(("%s Packet filter: no perfect match\n", pThis->szPrf));
6658 if (e1kImperfectMatch(pThis, pvBuf))
6659 return true;
6660 E1kLog2(("%s Packet filter: no imperfect match\n", pThis->szPrf));
6661 }
6662 else {
6663 /* Is unicast promiscuous enabled? */
6664 if (RCTL & RCTL_UPE)
6665 return true;
6666 E1kLog2(("%s Packet filter: no promiscuous unicast\n", pThis->szPrf));
6667 if (e1kPerfectMatch(pThis, pvBuf))
6668 {
6669 pStatus->fPIF = true;
6670 return true;
6671 }
6672 E1kLog2(("%s Packet filter: no perfect match\n", pThis->szPrf));
6673 }
6674 E1kLog2(("%s Packet filter: packet discarded\n", pThis->szPrf));
6675 return false;
6676}
6677
6678/**
6679 * @interface_method_impl{PDMINETWORKDOWN,pfnReceive}
6680 */
6681static DECLCALLBACK(int) e1kR3NetworkDown_Receive(PPDMINETWORKDOWN pInterface, const void *pvBuf, size_t cb)
6682{
6683 PE1KSTATECC pThisCC = RT_FROM_MEMBER(pInterface, E1KSTATECC, INetworkDown);
6684 PE1KSTATE pThis = pThisCC->pShared;
6685 PPDMDEVINS pDevIns = pThisCC->pDevInsR3;
6686 int rc = VINF_SUCCESS;
6687
6688 /*
6689 * Drop packets if the VM is not running yet/anymore.
6690 */
6691 VMSTATE enmVMState = PDMDevHlpVMState(pDevIns);
6692 if ( enmVMState != VMSTATE_RUNNING
6693 && enmVMState != VMSTATE_RUNNING_LS)
6694 {
6695 E1kLog(("%s Dropping incoming packet as VM is not running.\n", pThis->szPrf));
6696 return VINF_SUCCESS;
6697 }
6698
6699 /* Discard incoming packets in locked state */
6700 if (!(RCTL & RCTL_EN) || pThis->fLocked || !(STATUS & STATUS_LU))
6701 {
6702 E1kLog(("%s Dropping incoming packet as receive operation is disabled.\n", pThis->szPrf));
6703 return VINF_SUCCESS;
6704 }
6705
6706 STAM_PROFILE_ADV_START(&pThis->StatReceive, a);
6707
6708 //if (!e1kCsEnter(pThis, RT_SRC_POS))
6709 // return VERR_PERMISSION_DENIED;
6710
6711 e1kPacketDump(pDevIns, pThis, (const uint8_t*)pvBuf, cb, "<-- Incoming");
6712
6713 /* Update stats */
6714 if (RT_LIKELY(e1kCsEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
6715 {
6716 E1K_INC_CNT32(TPR);
6717 E1K_ADD_CNT64(TORL, TORH, cb < 64? 64 : cb);
6718 e1kCsLeave(pThis);
6719 }
6720 STAM_PROFILE_ADV_START(&pThis->StatReceiveFilter, a);
6721 E1KRXDST status;
6722 RT_ZERO(status);
6723 bool fPassed = e1kAddressFilter(pThis, pvBuf, cb, &status);
6724 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveFilter, a);
6725 if (fPassed)
6726 {
6727 rc = e1kHandleRxPacket(pDevIns, pThis, pvBuf, cb, status);
6728 }
6729 //e1kCsLeave(pThis);
6730 STAM_PROFILE_ADV_STOP(&pThis->StatReceive, a);
6731
6732 return rc;
6733}
6734
6735
6736/* -=-=-=-=- PDMILEDPORTS -=-=-=-=- */
6737
6738/**
6739 * @interface_method_impl{PDMILEDPORTS,pfnQueryStatusLed}
6740 */
6741static DECLCALLBACK(int) e1kR3QueryStatusLed(PPDMILEDPORTS pInterface, unsigned iLUN, PPDMLED *ppLed)
6742{
6743 if (iLUN == 0)
6744 {
6745 PE1KSTATECC pThisCC = RT_FROM_MEMBER(pInterface, E1KSTATECC, ILeds);
6746 *ppLed = &pThisCC->pShared->led;
6747 return VINF_SUCCESS;
6748 }
6749 return VERR_PDM_LUN_NOT_FOUND;
6750}
6751
6752
6753/* -=-=-=-=- PDMINETWORKCONFIG -=-=-=-=- */
6754
6755/**
6756 * @interface_method_impl{PDMINETWORKCONFIG,pfnGetMac}
6757 */
6758static DECLCALLBACK(int) e1kR3GetMac(PPDMINETWORKCONFIG pInterface, PRTMAC pMac)
6759{
6760 PE1KSTATECC pThisCC = RT_FROM_MEMBER(pInterface, E1KSTATECC, INetworkConfig);
6761 pThisCC->eeprom.getMac(pMac);
6762 return VINF_SUCCESS;
6763}
6764
6765/**
6766 * @interface_method_impl{PDMINETWORKCONFIG,pfnGetLinkState}
6767 */
6768static DECLCALLBACK(PDMNETWORKLINKSTATE) e1kR3GetLinkState(PPDMINETWORKCONFIG pInterface)
6769{
6770 PE1KSTATECC pThisCC = RT_FROM_MEMBER(pInterface, E1KSTATECC, INetworkConfig);
6771 PE1KSTATE pThis = pThisCC->pShared;
6772 if (STATUS & STATUS_LU)
6773 return PDMNETWORKLINKSTATE_UP;
6774 return PDMNETWORKLINKSTATE_DOWN;
6775}
6776
6777/**
6778 * @interface_method_impl{PDMINETWORKCONFIG,pfnSetLinkState}
6779 */
6780static DECLCALLBACK(int) e1kR3SetLinkState(PPDMINETWORKCONFIG pInterface, PDMNETWORKLINKSTATE enmState)
6781{
6782 PE1KSTATECC pThisCC = RT_FROM_MEMBER(pInterface, E1KSTATECC, INetworkConfig);
6783 PE1KSTATE pThis = pThisCC->pShared;
6784 PPDMDEVINS pDevIns = pThisCC->pDevInsR3;
6785
6786 E1kLog(("%s e1kR3SetLinkState: enmState=%d\n", pThis->szPrf, enmState));
6787 switch (enmState)
6788 {
6789 case PDMNETWORKLINKSTATE_UP:
6790 pThis->fCableConnected = true;
6791 /* If link was down, bring it up after a while. */
6792 if (!(STATUS & STATUS_LU))
6793 e1kBringLinkUpDelayed(pDevIns, pThis);
6794 break;
6795 case PDMNETWORKLINKSTATE_DOWN:
6796 pThis->fCableConnected = false;
6797 /* Always set the phy link state to down, regardless of the STATUS_LU bit.
6798 * We might have to set the link state before the driver initializes us. */
6799 Phy::setLinkStatus(&pThis->phy, false);
6800 /* If link was up, bring it down. */
6801 if (STATUS & STATUS_LU)
6802 e1kR3LinkDown(pDevIns, pThis, pThisCC);
6803 break;
6804 case PDMNETWORKLINKSTATE_DOWN_RESUME:
6805 /*
6806 * There is not much sense in bringing down the link if it has not come up yet.
6807 * If it is up though, we bring it down temporarely, then bring it up again.
6808 */
6809 if (STATUS & STATUS_LU)
6810 e1kR3LinkDownTemp(pDevIns, pThis, pThisCC);
6811 break;
6812 default:
6813 ;
6814 }
6815 return VINF_SUCCESS;
6816}
6817
6818
6819/* -=-=-=-=- PDMIBASE -=-=-=-=- */
6820
6821/**
6822 * @interface_method_impl{PDMIBASE,pfnQueryInterface}
6823 */
6824static DECLCALLBACK(void *) e1kR3QueryInterface(struct PDMIBASE *pInterface, const char *pszIID)
6825{
6826 PE1KSTATECC pThisCC = RT_FROM_MEMBER(pInterface, E1KSTATECC, IBase);
6827 Assert(&pThisCC->IBase == pInterface);
6828
6829 PDMIBASE_RETURN_INTERFACE(pszIID, PDMIBASE, &pThisCC->IBase);
6830 PDMIBASE_RETURN_INTERFACE(pszIID, PDMINETWORKDOWN, &pThisCC->INetworkDown);
6831 PDMIBASE_RETURN_INTERFACE(pszIID, PDMINETWORKCONFIG, &pThisCC->INetworkConfig);
6832 PDMIBASE_RETURN_INTERFACE(pszIID, PDMILEDPORTS, &pThisCC->ILeds);
6833 return NULL;
6834}
6835
6836
6837/* -=-=-=-=- Saved State -=-=-=-=- */
6838
6839/**
6840 * Saves the configuration.
6841 *
6842 * @param pThis The E1K state.
6843 * @param pSSM The handle to the saved state.
6844 */
6845static void e1kSaveConfig(PCPDMDEVHLPR3 pHlp, PE1KSTATE pThis, PSSMHANDLE pSSM)
6846{
6847 pHlp->pfnSSMPutMem(pSSM, &pThis->macConfigured, sizeof(pThis->macConfigured));
6848 pHlp->pfnSSMPutU32(pSSM, pThis->eChip);
6849}
6850
6851/**
6852 * @callback_method_impl{FNSSMDEVLIVEEXEC,Save basic configuration.}
6853 */
6854static DECLCALLBACK(int) e1kLiveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uPass)
6855{
6856 RT_NOREF(uPass);
6857 e1kSaveConfig(pDevIns->pHlpR3, PDMINS_2_DATA(pDevIns, PE1KSTATE), pSSM);
6858 return VINF_SSM_DONT_CALL_AGAIN;
6859}
6860
6861/**
6862 * @callback_method_impl{FNSSMDEVSAVEPREP,Synchronize.}
6863 */
6864static DECLCALLBACK(int) e1kSavePrep(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6865{
6866 RT_NOREF(pSSM);
6867 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
6868
6869 int rc = e1kCsEnter(pThis, VERR_SEM_BUSY);
6870 if (RT_UNLIKELY(rc != VINF_SUCCESS))
6871 return rc;
6872 e1kCsLeave(pThis);
6873 return VINF_SUCCESS;
6874#if 0
6875 /* 1) Prevent all threads from modifying the state and memory */
6876 //pThis->fLocked = true;
6877 /* 2) Cancel all timers */
6878#ifdef E1K_TX_DELAY
6879 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTXDTimer));
6880#endif /* E1K_TX_DELAY */
6881//#ifdef E1K_USE_TX_TIMERS
6882 if (pThis->fTidEnabled)
6883 {
6884 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTIDTimer));
6885#ifndef E1K_NO_TAD
6886 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTADTimer));
6887#endif /* E1K_NO_TAD */
6888 }
6889//#endif /* E1K_USE_TX_TIMERS */
6890#ifdef E1K_USE_RX_TIMERS
6891 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRIDTimer));
6892 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRADTimer));
6893#endif /* E1K_USE_RX_TIMERS */
6894 e1kCancelTimer(pThis, pThis->CTX_SUFF(pIntTimer));
6895 /* 3) Did I forget anything? */
6896 E1kLog(("%s Locked\n", pThis->szPrf));
6897 return VINF_SUCCESS;
6898#endif
6899}
6900
6901/**
6902 * @callback_method_impl{FNSSMDEVSAVEEXEC}
6903 */
6904static DECLCALLBACK(int) e1kSaveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6905{
6906 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
6907 PE1KSTATECC pThisCC = PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC);
6908 PCPDMDEVHLPR3 pHlp = pDevIns->pHlpR3;
6909
6910 e1kSaveConfig(pHlp, pThis, pSSM);
6911 pThisCC->eeprom.save(pHlp, pSSM);
6912 e1kDumpState(pThis);
6913 pHlp->pfnSSMPutMem(pSSM, pThis->auRegs, sizeof(pThis->auRegs));
6914 pHlp->pfnSSMPutBool(pSSM, pThis->fIntRaised);
6915 Phy::saveState(pHlp, pSSM, &pThis->phy);
6916 pHlp->pfnSSMPutU32(pSSM, pThis->uSelectedReg);
6917 pHlp->pfnSSMPutMem(pSSM, pThis->auMTA, sizeof(pThis->auMTA));
6918 pHlp->pfnSSMPutMem(pSSM, &pThis->aRecAddr, sizeof(pThis->aRecAddr));
6919 pHlp->pfnSSMPutMem(pSSM, pThis->auVFTA, sizeof(pThis->auVFTA));
6920 pHlp->pfnSSMPutU64(pSSM, pThis->u64AckedAt);
6921 pHlp->pfnSSMPutU16(pSSM, pThis->u16RxBSize);
6922 //pHlp->pfnSSMPutBool(pSSM, pThis->fDelayInts);
6923 //pHlp->pfnSSMPutBool(pSSM, pThis->fIntMaskUsed);
6924 pHlp->pfnSSMPutU16(pSSM, pThis->u16TxPktLen);
6925/** @todo State wrt to the TSE buffer is incomplete, so little point in
6926 * saving this actually. */
6927 pHlp->pfnSSMPutMem(pSSM, pThis->aTxPacketFallback, pThis->u16TxPktLen);
6928 pHlp->pfnSSMPutBool(pSSM, pThis->fIPcsum);
6929 pHlp->pfnSSMPutBool(pSSM, pThis->fTCPcsum);
6930 pHlp->pfnSSMPutMem(pSSM, &pThis->contextTSE, sizeof(pThis->contextTSE));
6931 pHlp->pfnSSMPutMem(pSSM, &pThis->contextNormal, sizeof(pThis->contextNormal));
6932 pHlp->pfnSSMPutBool(pSSM, pThis->fVTag);
6933 pHlp->pfnSSMPutU16(pSSM, pThis->u16VTagTCI);
6934#ifdef E1K_WITH_TXD_CACHE
6935# if 0
6936 pHlp->pfnSSMPutU8(pSSM, pThis->nTxDFetched);
6937 pHlp->pfnSSMPutMem(pSSM, pThis->aTxDescriptors,
6938 pThis->nTxDFetched * sizeof(pThis->aTxDescriptors[0]));
6939# else
6940 /*
6941 * There is no point in storing TX descriptor cache entries as we can simply
6942 * fetch them again. Moreover, normally the cache is always empty when we
6943 * save the state. Store zero entries for compatibility.
6944 */
6945 pHlp->pfnSSMPutU8(pSSM, 0);
6946# endif
6947#endif /* E1K_WITH_TXD_CACHE */
6948/** @todo GSO requires some more state here. */
6949 E1kLog(("%s State has been saved\n", pThis->szPrf));
6950 return VINF_SUCCESS;
6951}
6952
6953#if 0
6954/**
6955 * @callback_method_impl{FNSSMDEVSAVEDONE}
6956 */
6957static DECLCALLBACK(int) e1kSaveDone(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6958{
6959 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
6960
6961 /* If VM is being powered off unlocking will result in assertions in PGM */
6962 if (PDMDevHlpGetVM(pDevIns)->enmVMState == VMSTATE_RUNNING)
6963 pThis->fLocked = false;
6964 else
6965 E1kLog(("%s VM is not running -- remain locked\n", pThis->szPrf));
6966 E1kLog(("%s Unlocked\n", pThis->szPrf));
6967 return VINF_SUCCESS;
6968}
6969#endif
6970
6971/**
6972 * @callback_method_impl{FNSSMDEVLOADPREP,Synchronize.}
6973 */
6974static DECLCALLBACK(int) e1kLoadPrep(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6975{
6976 RT_NOREF(pSSM);
6977 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
6978
6979 int rc = e1kCsEnter(pThis, VERR_SEM_BUSY);
6980 if (RT_UNLIKELY(rc != VINF_SUCCESS))
6981 return rc;
6982 e1kCsLeave(pThis);
6983 return VINF_SUCCESS;
6984}
6985
6986/**
6987 * @callback_method_impl{FNSSMDEVLOADEXEC}
6988 */
6989static DECLCALLBACK(int) e1kLoadExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
6990{
6991 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
6992 PE1KSTATECC pThisCC = PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC);
6993 PCPDMDEVHLPR3 pHlp = pDevIns->pHlpR3;
6994 int rc;
6995
6996 if ( uVersion != E1K_SAVEDSTATE_VERSION
6997#ifdef E1K_WITH_TXD_CACHE
6998 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG
6999#endif /* E1K_WITH_TXD_CACHE */
7000 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_41
7001 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_30)
7002 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
7003
7004 if ( uVersion > E1K_SAVEDSTATE_VERSION_VBOX_30
7005 || uPass != SSM_PASS_FINAL)
7006 {
7007 /* config checks */
7008 RTMAC macConfigured;
7009 rc = pHlp->pfnSSMGetMem(pSSM, &macConfigured, sizeof(macConfigured));
7010 AssertRCReturn(rc, rc);
7011 if ( memcmp(&macConfigured, &pThis->macConfigured, sizeof(macConfigured))
7012 && (uPass == 0 || !PDMDevHlpVMTeleportedAndNotFullyResumedYet(pDevIns)) )
7013 LogRel(("%s: The mac address differs: config=%RTmac saved=%RTmac\n", pThis->szPrf, &pThis->macConfigured, &macConfigured));
7014
7015 E1KCHIP eChip;
7016 rc = pHlp->pfnSSMGetU32(pSSM, &eChip);
7017 AssertRCReturn(rc, rc);
7018 if (eChip != pThis->eChip)
7019 return pHlp->pfnSSMSetCfgError(pSSM, RT_SRC_POS, N_("The chip type differs: config=%u saved=%u"), pThis->eChip, eChip);
7020 }
7021
7022 if (uPass == SSM_PASS_FINAL)
7023 {
7024 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_30)
7025 {
7026 rc = pThisCC->eeprom.load(pHlp, pSSM);
7027 AssertRCReturn(rc, rc);
7028 }
7029 /* the state */
7030 pHlp->pfnSSMGetMem(pSSM, &pThis->auRegs, sizeof(pThis->auRegs));
7031 pHlp->pfnSSMGetBool(pSSM, &pThis->fIntRaised);
7032 /** @todo PHY could be made a separate device with its own versioning */
7033 Phy::loadState(pHlp, pSSM, &pThis->phy);
7034 pHlp->pfnSSMGetU32(pSSM, &pThis->uSelectedReg);
7035 pHlp->pfnSSMGetMem(pSSM, &pThis->auMTA, sizeof(pThis->auMTA));
7036 pHlp->pfnSSMGetMem(pSSM, &pThis->aRecAddr, sizeof(pThis->aRecAddr));
7037 pHlp->pfnSSMGetMem(pSSM, &pThis->auVFTA, sizeof(pThis->auVFTA));
7038 pHlp->pfnSSMGetU64(pSSM, &pThis->u64AckedAt);
7039 pHlp->pfnSSMGetU16(pSSM, &pThis->u16RxBSize);
7040 //pHlp->pfnSSMGetBool(pSSM, pThis->fDelayInts);
7041 //pHlp->pfnSSMGetBool(pSSM, pThis->fIntMaskUsed);
7042 rc = pHlp->pfnSSMGetU16(pSSM, &pThis->u16TxPktLen);
7043 AssertRCReturn(rc, rc);
7044 if (pThis->u16TxPktLen > sizeof(pThis->aTxPacketFallback))
7045 pThis->u16TxPktLen = sizeof(pThis->aTxPacketFallback);
7046 pHlp->pfnSSMGetMem(pSSM, &pThis->aTxPacketFallback[0], pThis->u16TxPktLen);
7047 pHlp->pfnSSMGetBool(pSSM, &pThis->fIPcsum);
7048 pHlp->pfnSSMGetBool(pSSM, &pThis->fTCPcsum);
7049 pHlp->pfnSSMGetMem(pSSM, &pThis->contextTSE, sizeof(pThis->contextTSE));
7050 rc = pHlp->pfnSSMGetMem(pSSM, &pThis->contextNormal, sizeof(pThis->contextNormal));
7051 AssertRCReturn(rc, rc);
7052 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_41)
7053 {
7054 pHlp->pfnSSMGetBool(pSSM, &pThis->fVTag);
7055 rc = pHlp->pfnSSMGetU16(pSSM, &pThis->u16VTagTCI);
7056 AssertRCReturn(rc, rc);
7057 }
7058 else
7059 {
7060 pThis->fVTag = false;
7061 pThis->u16VTagTCI = 0;
7062 }
7063#ifdef E1K_WITH_TXD_CACHE
7064 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG)
7065 {
7066 rc = pHlp->pfnSSMGetU8(pSSM, &pThis->nTxDFetched);
7067 AssertRCReturn(rc, rc);
7068 if (pThis->nTxDFetched)
7069 pHlp->pfnSSMGetMem(pSSM, pThis->aTxDescriptors,
7070 pThis->nTxDFetched * sizeof(pThis->aTxDescriptors[0]));
7071 }
7072 else
7073 pThis->nTxDFetched = 0;
7074 /**
7075 * @todo Perhaps we should not store TXD cache as the entries can be
7076 * simply fetched again from guest's memory. Or can't they?
7077 */
7078#endif /* E1K_WITH_TXD_CACHE */
7079#ifdef E1K_WITH_RXD_CACHE
7080 /*
7081 * There is no point in storing the RX descriptor cache in the saved
7082 * state, we just need to make sure it is empty.
7083 */
7084 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
7085#endif /* E1K_WITH_RXD_CACHE */
7086 rc = pHlp->pfnSSMHandleGetStatus(pSSM);
7087 AssertRCReturn(rc, rc);
7088
7089 /* derived state */
7090 e1kSetupGsoCtx(&pThis->GsoCtx, &pThis->contextTSE);
7091
7092 E1kLog(("%s State has been restored\n", pThis->szPrf));
7093 e1kDumpState(pThis);
7094 }
7095 return VINF_SUCCESS;
7096}
7097
7098/**
7099 * @callback_method_impl{FNSSMDEVLOADDONE, Link status adjustments after loading.}
7100 */
7101static DECLCALLBACK(int) e1kLoadDone(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
7102{
7103 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
7104 PE1KSTATECC pThisCC = PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC);
7105 RT_NOREF(pSSM);
7106
7107 /* Update promiscuous mode */
7108 if (pThisCC->pDrvR3)
7109 pThisCC->pDrvR3->pfnSetPromiscuousMode(pThisCC->pDrvR3, !!(RCTL & (RCTL_UPE | RCTL_MPE)));
7110
7111 /*
7112 * Force the link down here, since PDMNETWORKLINKSTATE_DOWN_RESUME is never
7113 * passed to us. We go through all this stuff if the link was up and we
7114 * wasn't teleported.
7115 */
7116 if ( (STATUS & STATUS_LU)
7117 && !PDMDevHlpVMTeleportedAndNotFullyResumedYet(pDevIns)
7118 && pThis->cMsLinkUpDelay)
7119 {
7120 e1kR3LinkDownTemp(pDevIns, pThis, pThisCC);
7121 }
7122 return VINF_SUCCESS;
7123}
7124
7125
7126
7127/* -=-=-=-=- Debug Info + Log Types -=-=-=-=- */
7128
7129/**
7130 * @callback_method_impl{FNRTSTRFORMATTYPE}
7131 */
7132static DECLCALLBACK(size_t) e1kFmtRxDesc(PFNRTSTROUTPUT pfnOutput,
7133 void *pvArgOutput,
7134 const char *pszType,
7135 void const *pvValue,
7136 int cchWidth,
7137 int cchPrecision,
7138 unsigned fFlags,
7139 void *pvUser)
7140{
7141 RT_NOREF(cchWidth, cchPrecision, fFlags, pvUser);
7142 AssertReturn(strcmp(pszType, "e1krxd") == 0, 0);
7143 E1KRXDESC* pDesc = (E1KRXDESC*)pvValue;
7144 if (!pDesc)
7145 return RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "NULL_RXD");
7146
7147 size_t cbPrintf = 0;
7148 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Address=%16LX Length=%04X Csum=%04X\n",
7149 pDesc->u64BufAddr, pDesc->u16Length, pDesc->u16Checksum);
7150 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, " STA: %s %s %s %s %s %s %s ERR: %s %s %s %s SPECIAL: %s VLAN=%03x PRI=%x",
7151 pDesc->status.fPIF ? "PIF" : "pif",
7152 pDesc->status.fIPCS ? "IPCS" : "ipcs",
7153 pDesc->status.fTCPCS ? "TCPCS" : "tcpcs",
7154 pDesc->status.fVP ? "VP" : "vp",
7155 pDesc->status.fIXSM ? "IXSM" : "ixsm",
7156 pDesc->status.fEOP ? "EOP" : "eop",
7157 pDesc->status.fDD ? "DD" : "dd",
7158 pDesc->status.fRXE ? "RXE" : "rxe",
7159 pDesc->status.fIPE ? "IPE" : "ipe",
7160 pDesc->status.fTCPE ? "TCPE" : "tcpe",
7161 pDesc->status.fCE ? "CE" : "ce",
7162 E1K_SPEC_CFI(pDesc->status.u16Special) ? "CFI" :"cfi",
7163 E1K_SPEC_VLAN(pDesc->status.u16Special),
7164 E1K_SPEC_PRI(pDesc->status.u16Special));
7165 return cbPrintf;
7166}
7167
7168/**
7169 * @callback_method_impl{FNRTSTRFORMATTYPE}
7170 */
7171static DECLCALLBACK(size_t) e1kFmtTxDesc(PFNRTSTROUTPUT pfnOutput,
7172 void *pvArgOutput,
7173 const char *pszType,
7174 void const *pvValue,
7175 int cchWidth,
7176 int cchPrecision,
7177 unsigned fFlags,
7178 void *pvUser)
7179{
7180 RT_NOREF(cchWidth, cchPrecision, fFlags, pvUser);
7181 AssertReturn(strcmp(pszType, "e1ktxd") == 0, 0);
7182 E1KTXDESC *pDesc = (E1KTXDESC*)pvValue;
7183 if (!pDesc)
7184 return RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "NULL_TXD");
7185
7186 size_t cbPrintf = 0;
7187 switch (e1kGetDescType(pDesc))
7188 {
7189 case E1K_DTYP_CONTEXT:
7190 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Type=Context\n"
7191 " IPCSS=%02X IPCSO=%02X IPCSE=%04X TUCSS=%02X TUCSO=%02X TUCSE=%04X\n"
7192 " TUCMD:%s%s%s %s %s PAYLEN=%04x HDRLEN=%04x MSS=%04x STA: %s",
7193 pDesc->context.ip.u8CSS, pDesc->context.ip.u8CSO, pDesc->context.ip.u16CSE,
7194 pDesc->context.tu.u8CSS, pDesc->context.tu.u8CSO, pDesc->context.tu.u16CSE,
7195 pDesc->context.dw2.fIDE ? " IDE":"",
7196 pDesc->context.dw2.fRS ? " RS" :"",
7197 pDesc->context.dw2.fTSE ? " TSE":"",
7198 pDesc->context.dw2.fIP ? "IPv4":"IPv6",
7199 pDesc->context.dw2.fTCP ? "TCP":"UDP",
7200 pDesc->context.dw2.u20PAYLEN,
7201 pDesc->context.dw3.u8HDRLEN,
7202 pDesc->context.dw3.u16MSS,
7203 pDesc->context.dw3.fDD?"DD":"");
7204 break;
7205 case E1K_DTYP_DATA:
7206 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Type=Data Address=%16LX DTALEN=%05X\n"
7207 " DCMD:%s%s%s%s%s%s%s STA:%s%s%s POPTS:%s%s SPECIAL:%s VLAN=%03x PRI=%x",
7208 pDesc->data.u64BufAddr,
7209 pDesc->data.cmd.u20DTALEN,
7210 pDesc->data.cmd.fIDE ? " IDE" :"",
7211 pDesc->data.cmd.fVLE ? " VLE" :"",
7212 pDesc->data.cmd.fRPS ? " RPS" :"",
7213 pDesc->data.cmd.fRS ? " RS" :"",
7214 pDesc->data.cmd.fTSE ? " TSE" :"",
7215 pDesc->data.cmd.fIFCS? " IFCS":"",
7216 pDesc->data.cmd.fEOP ? " EOP" :"",
7217 pDesc->data.dw3.fDD ? " DD" :"",
7218 pDesc->data.dw3.fEC ? " EC" :"",
7219 pDesc->data.dw3.fLC ? " LC" :"",
7220 pDesc->data.dw3.fTXSM? " TXSM":"",
7221 pDesc->data.dw3.fIXSM? " IXSM":"",
7222 E1K_SPEC_CFI(pDesc->data.dw3.u16Special) ? "CFI" :"cfi",
7223 E1K_SPEC_VLAN(pDesc->data.dw3.u16Special),
7224 E1K_SPEC_PRI(pDesc->data.dw3.u16Special));
7225 break;
7226 case E1K_DTYP_LEGACY:
7227 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Type=Legacy Address=%16LX DTALEN=%05X\n"
7228 " CMD:%s%s%s%s%s%s%s STA:%s%s%s CSO=%02x CSS=%02x SPECIAL:%s VLAN=%03x PRI=%x",
7229 pDesc->data.u64BufAddr,
7230 pDesc->legacy.cmd.u16Length,
7231 pDesc->legacy.cmd.fIDE ? " IDE" :"",
7232 pDesc->legacy.cmd.fVLE ? " VLE" :"",
7233 pDesc->legacy.cmd.fRPS ? " RPS" :"",
7234 pDesc->legacy.cmd.fRS ? " RS" :"",
7235 pDesc->legacy.cmd.fIC ? " IC" :"",
7236 pDesc->legacy.cmd.fIFCS? " IFCS":"",
7237 pDesc->legacy.cmd.fEOP ? " EOP" :"",
7238 pDesc->legacy.dw3.fDD ? " DD" :"",
7239 pDesc->legacy.dw3.fEC ? " EC" :"",
7240 pDesc->legacy.dw3.fLC ? " LC" :"",
7241 pDesc->legacy.cmd.u8CSO,
7242 pDesc->legacy.dw3.u8CSS,
7243 E1K_SPEC_CFI(pDesc->legacy.dw3.u16Special) ? "CFI" :"cfi",
7244 E1K_SPEC_VLAN(pDesc->legacy.dw3.u16Special),
7245 E1K_SPEC_PRI(pDesc->legacy.dw3.u16Special));
7246 break;
7247 default:
7248 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Invalid Transmit Descriptor");
7249 break;
7250 }
7251
7252 return cbPrintf;
7253}
7254
7255/** Initializes debug helpers (logging format types). */
7256static int e1kInitDebugHelpers(void)
7257{
7258 int rc = VINF_SUCCESS;
7259 static bool s_fHelpersRegistered = false;
7260 if (!s_fHelpersRegistered)
7261 {
7262 s_fHelpersRegistered = true;
7263 rc = RTStrFormatTypeRegister("e1krxd", e1kFmtRxDesc, NULL);
7264 AssertRCReturn(rc, rc);
7265 rc = RTStrFormatTypeRegister("e1ktxd", e1kFmtTxDesc, NULL);
7266 AssertRCReturn(rc, rc);
7267 }
7268 return rc;
7269}
7270
7271/**
7272 * Status info callback.
7273 *
7274 * @param pDevIns The device instance.
7275 * @param pHlp The output helpers.
7276 * @param pszArgs The arguments.
7277 */
7278static DECLCALLBACK(void) e1kInfo(PPDMDEVINS pDevIns, PCDBGFINFOHLP pHlp, const char *pszArgs)
7279{
7280 RT_NOREF(pszArgs);
7281 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
7282 unsigned i;
7283 // bool fRcvRing = false;
7284 // bool fXmtRing = false;
7285
7286 /*
7287 * Parse args.
7288 if (pszArgs)
7289 {
7290 fRcvRing = strstr(pszArgs, "verbose") || strstr(pszArgs, "rcv");
7291 fXmtRing = strstr(pszArgs, "verbose") || strstr(pszArgs, "xmt");
7292 }
7293 */
7294
7295 /*
7296 * Show info.
7297 */
7298 pHlp->pfnPrintf(pHlp, "E1000 #%d: port=%RTiop mmio=%RGp mac-cfg=%RTmac %s%s%s\n",
7299 pDevIns->iInstance, pThis->IOPortBase, pThis->addrMMReg,
7300 &pThis->macConfigured, g_aChips[pThis->eChip].pcszName,
7301 pDevIns->fRCEnabled ? " RC" : "", pDevIns->fR0Enabled ? " R0" : "");
7302
7303 e1kCsEnter(pThis, VERR_INTERNAL_ERROR); /* Not sure why but PCNet does it */
7304
7305 for (i = 0; i < E1K_NUM_OF_32BIT_REGS; ++i)
7306 pHlp->pfnPrintf(pHlp, "%8.8s = %08x\n", g_aE1kRegMap[i].abbrev, pThis->auRegs[i]);
7307
7308 for (i = 0; i < RT_ELEMENTS(pThis->aRecAddr.array); i++)
7309 {
7310 E1KRAELEM* ra = pThis->aRecAddr.array + i;
7311 if (ra->ctl & RA_CTL_AV)
7312 {
7313 const char *pcszTmp;
7314 switch (ra->ctl & RA_CTL_AS)
7315 {
7316 case 0: pcszTmp = "DST"; break;
7317 case 1: pcszTmp = "SRC"; break;
7318 default: pcszTmp = "reserved";
7319 }
7320 pHlp->pfnPrintf(pHlp, "RA%02d: %s %RTmac\n", i, pcszTmp, ra->addr);
7321 }
7322 }
7323 unsigned cDescs = RDLEN / sizeof(E1KRXDESC);
7324 uint32_t rdh = RDH;
7325 pHlp->pfnPrintf(pHlp, "\n-- Receive Descriptors (%d total) --\n", cDescs);
7326 for (i = 0; i < cDescs; ++i)
7327 {
7328 E1KRXDESC desc;
7329 PDMDevHlpPhysRead(pDevIns, e1kDescAddr(RDBAH, RDBAL, i),
7330 &desc, sizeof(desc));
7331 if (i == rdh)
7332 pHlp->pfnPrintf(pHlp, ">>> ");
7333 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1krxd]\n", e1kDescAddr(RDBAH, RDBAL, i), &desc);
7334 }
7335#ifdef E1K_WITH_RXD_CACHE
7336 pHlp->pfnPrintf(pHlp, "\n-- Receive Descriptors in Cache (at %d (RDH %d)/ fetched %d / max %d) --\n",
7337 pThis->iRxDCurrent, RDH, pThis->nRxDFetched, E1K_RXD_CACHE_SIZE);
7338 if (rdh > pThis->iRxDCurrent)
7339 rdh -= pThis->iRxDCurrent;
7340 else
7341 rdh = cDescs + rdh - pThis->iRxDCurrent;
7342 for (i = 0; i < pThis->nRxDFetched; ++i)
7343 {
7344 if (i == pThis->iRxDCurrent)
7345 pHlp->pfnPrintf(pHlp, ">>> ");
7346 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1krxd]\n",
7347 e1kDescAddr(RDBAH, RDBAL, rdh++ % cDescs),
7348 &pThis->aRxDescriptors[i]);
7349 }
7350#endif /* E1K_WITH_RXD_CACHE */
7351
7352 cDescs = TDLEN / sizeof(E1KTXDESC);
7353 uint32_t tdh = TDH;
7354 pHlp->pfnPrintf(pHlp, "\n-- Transmit Descriptors (%d total) --\n", cDescs);
7355 for (i = 0; i < cDescs; ++i)
7356 {
7357 E1KTXDESC desc;
7358 PDMDevHlpPhysRead(pDevIns, e1kDescAddr(TDBAH, TDBAL, i),
7359 &desc, sizeof(desc));
7360 if (i == tdh)
7361 pHlp->pfnPrintf(pHlp, ">>> ");
7362 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1ktxd]\n", e1kDescAddr(TDBAH, TDBAL, i), &desc);
7363 }
7364#ifdef E1K_WITH_TXD_CACHE
7365 pHlp->pfnPrintf(pHlp, "\n-- Transmit Descriptors in Cache (at %d (TDH %d)/ fetched %d / max %d) --\n",
7366 pThis->iTxDCurrent, TDH, pThis->nTxDFetched, E1K_TXD_CACHE_SIZE);
7367 if (tdh > pThis->iTxDCurrent)
7368 tdh -= pThis->iTxDCurrent;
7369 else
7370 tdh = cDescs + tdh - pThis->iTxDCurrent;
7371 for (i = 0; i < pThis->nTxDFetched; ++i)
7372 {
7373 if (i == pThis->iTxDCurrent)
7374 pHlp->pfnPrintf(pHlp, ">>> ");
7375 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1ktxd]\n",
7376 e1kDescAddr(TDBAH, TDBAL, tdh++ % cDescs),
7377 &pThis->aTxDescriptors[i]);
7378 }
7379#endif /* E1K_WITH_TXD_CACHE */
7380
7381
7382#ifdef E1K_INT_STATS
7383 pHlp->pfnPrintf(pHlp, "Interrupt attempts: %d\n", pThis->uStatIntTry);
7384 pHlp->pfnPrintf(pHlp, "Interrupts raised : %d\n", pThis->uStatInt);
7385 pHlp->pfnPrintf(pHlp, "Interrupts lowered: %d\n", pThis->uStatIntLower);
7386 pHlp->pfnPrintf(pHlp, "ICR outside ISR : %d\n", pThis->uStatNoIntICR);
7387 pHlp->pfnPrintf(pHlp, "IMS raised ints : %d\n", pThis->uStatIntIMS);
7388 pHlp->pfnPrintf(pHlp, "Interrupts skipped: %d\n", pThis->uStatIntSkip);
7389 pHlp->pfnPrintf(pHlp, "Masked interrupts : %d\n", pThis->uStatIntMasked);
7390 pHlp->pfnPrintf(pHlp, "Early interrupts : %d\n", pThis->uStatIntEarly);
7391 pHlp->pfnPrintf(pHlp, "Late interrupts : %d\n", pThis->uStatIntLate);
7392 pHlp->pfnPrintf(pHlp, "Lost interrupts : %d\n", pThis->iStatIntLost);
7393 pHlp->pfnPrintf(pHlp, "Interrupts by RX : %d\n", pThis->uStatIntRx);
7394 pHlp->pfnPrintf(pHlp, "Interrupts by TX : %d\n", pThis->uStatIntTx);
7395 pHlp->pfnPrintf(pHlp, "Interrupts by ICS : %d\n", pThis->uStatIntICS);
7396 pHlp->pfnPrintf(pHlp, "Interrupts by RDTR: %d\n", pThis->uStatIntRDTR);
7397 pHlp->pfnPrintf(pHlp, "Interrupts by RDMT: %d\n", pThis->uStatIntRXDMT0);
7398 pHlp->pfnPrintf(pHlp, "Interrupts by TXQE: %d\n", pThis->uStatIntTXQE);
7399 pHlp->pfnPrintf(pHlp, "TX int delay asked: %d\n", pThis->uStatTxIDE);
7400 pHlp->pfnPrintf(pHlp, "TX delayed: %d\n", pThis->uStatTxDelayed);
7401 pHlp->pfnPrintf(pHlp, "TX delayed expired: %d\n", pThis->uStatTxDelayExp);
7402 pHlp->pfnPrintf(pHlp, "TX no report asked: %d\n", pThis->uStatTxNoRS);
7403 pHlp->pfnPrintf(pHlp, "TX abs timer expd : %d\n", pThis->uStatTAD);
7404 pHlp->pfnPrintf(pHlp, "TX int timer expd : %d\n", pThis->uStatTID);
7405 pHlp->pfnPrintf(pHlp, "RX abs timer expd : %d\n", pThis->uStatRAD);
7406 pHlp->pfnPrintf(pHlp, "RX int timer expd : %d\n", pThis->uStatRID);
7407 pHlp->pfnPrintf(pHlp, "TX CTX descriptors: %d\n", pThis->uStatDescCtx);
7408 pHlp->pfnPrintf(pHlp, "TX DAT descriptors: %d\n", pThis->uStatDescDat);
7409 pHlp->pfnPrintf(pHlp, "TX LEG descriptors: %d\n", pThis->uStatDescLeg);
7410 pHlp->pfnPrintf(pHlp, "Received frames : %d\n", pThis->uStatRxFrm);
7411 pHlp->pfnPrintf(pHlp, "Transmitted frames: %d\n", pThis->uStatTxFrm);
7412 pHlp->pfnPrintf(pHlp, "TX frames up to 1514: %d\n", pThis->uStatTx1514);
7413 pHlp->pfnPrintf(pHlp, "TX frames up to 2962: %d\n", pThis->uStatTx2962);
7414 pHlp->pfnPrintf(pHlp, "TX frames up to 4410: %d\n", pThis->uStatTx4410);
7415 pHlp->pfnPrintf(pHlp, "TX frames up to 5858: %d\n", pThis->uStatTx5858);
7416 pHlp->pfnPrintf(pHlp, "TX frames up to 7306: %d\n", pThis->uStatTx7306);
7417 pHlp->pfnPrintf(pHlp, "TX frames up to 8754: %d\n", pThis->uStatTx8754);
7418 pHlp->pfnPrintf(pHlp, "TX frames up to 16384: %d\n", pThis->uStatTx16384);
7419 pHlp->pfnPrintf(pHlp, "TX frames up to 32768: %d\n", pThis->uStatTx32768);
7420 pHlp->pfnPrintf(pHlp, "Larger TX frames : %d\n", pThis->uStatTxLarge);
7421#endif /* E1K_INT_STATS */
7422
7423 e1kCsLeave(pThis);
7424}
7425
7426
7427
7428/* -=-=-=-=- PDMDEVREG -=-=-=-=- */
7429
7430/**
7431 * Detach notification.
7432 *
7433 * One port on the network card has been disconnected from the network.
7434 *
7435 * @param pDevIns The device instance.
7436 * @param iLUN The logical unit which is being detached.
7437 * @param fFlags Flags, combination of the PDMDEVATT_FLAGS_* \#defines.
7438 */
7439static DECLCALLBACK(void) e1kR3Detach(PPDMDEVINS pDevIns, unsigned iLUN, uint32_t fFlags)
7440{
7441 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
7442 PE1KSTATECC pThisCC = PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC);
7443 Log(("%s e1kR3Detach:\n", pThis->szPrf));
7444 RT_NOREF(fFlags);
7445
7446 AssertLogRelReturnVoid(iLUN == 0);
7447
7448 PDMDevHlpCritSectEnter(pDevIns, &pThis->cs, VERR_SEM_BUSY);
7449
7450 /** @todo r=pritesh still need to check if i missed
7451 * to clean something in this function
7452 */
7453
7454 /*
7455 * Zero some important members.
7456 */
7457 pThisCC->pDrvBase = NULL;
7458 pThisCC->pDrvR3 = NULL;
7459#if 0 /** @todo @bugref{9218} ring-0 driver stuff */
7460 pThisR0->pDrvR0 = NIL_RTR0PTR;
7461 pThisRC->pDrvRC = NIL_RTRCPTR;
7462#endif
7463
7464 PDMDevHlpCritSectLeave(pDevIns, &pThis->cs);
7465}
7466
7467/**
7468 * Attach the Network attachment.
7469 *
7470 * One port on the network card has been connected to a network.
7471 *
7472 * @returns VBox status code.
7473 * @param pDevIns The device instance.
7474 * @param iLUN The logical unit which is being attached.
7475 * @param fFlags Flags, combination of the PDMDEVATT_FLAGS_* \#defines.
7476 *
7477 * @remarks This code path is not used during construction.
7478 */
7479static DECLCALLBACK(int) e1kR3Attach(PPDMDEVINS pDevIns, unsigned iLUN, uint32_t fFlags)
7480{
7481 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
7482 PE1KSTATECC pThisCC = PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC);
7483 LogFlow(("%s e1kR3Attach:\n", pThis->szPrf));
7484 RT_NOREF(fFlags);
7485
7486 AssertLogRelReturn(iLUN == 0, VERR_PDM_NO_SUCH_LUN);
7487
7488 PDMDevHlpCritSectEnter(pDevIns, &pThis->cs, VERR_SEM_BUSY);
7489
7490 /*
7491 * Attach the driver.
7492 */
7493 int rc = PDMDevHlpDriverAttach(pDevIns, 0, &pThisCC->IBase, &pThisCC->pDrvBase, "Network Port");
7494 if (RT_SUCCESS(rc))
7495 {
7496 if (rc == VINF_NAT_DNS)
7497 {
7498#ifdef RT_OS_LINUX
7499 PDMDevHlpVMSetRuntimeError(pDevIns, 0 /*fFlags*/, "NoDNSforNAT",
7500 N_("A Domain Name Server (DNS) for NAT networking could not be determined. Please check your /etc/resolv.conf for <tt>nameserver</tt> entries. Either add one manually (<i>man resolv.conf</i>) or ensure that your host is correctly connected to an ISP. If you ignore this warning the guest will not be able to perform nameserver lookups and it will probably observe delays if trying so"));
7501#else
7502 PDMDevHlpVMSetRuntimeError(pDevIns, 0 /*fFlags*/, "NoDNSforNAT",
7503 N_("A Domain Name Server (DNS) for NAT networking could not be determined. Ensure that your host is correctly connected to an ISP. If you ignore this warning the guest will not be able to perform nameserver lookups and it will probably observe delays if trying so"));
7504#endif
7505 }
7506 pThisCC->pDrvR3 = PDMIBASE_QUERY_INTERFACE(pThisCC->pDrvBase, PDMINETWORKUP);
7507 AssertMsgStmt(pThisCC->pDrvR3, ("Failed to obtain the PDMINETWORKUP interface!\n"),
7508 rc = VERR_PDM_MISSING_INTERFACE_BELOW);
7509 if (RT_SUCCESS(rc))
7510 {
7511#if 0 /** @todo @bugref{9218} ring-0 driver stuff */
7512 pThisR0->pDrvR0 = PDMIBASER0_QUERY_INTERFACE(PDMIBASE_QUERY_INTERFACE(pThisCC->pDrvBase, PDMIBASER0), PDMINETWORKUP);
7513 pThisRC->pDrvRC = PDMIBASERC_QUERY_INTERFACE(PDMIBASE_QUERY_INTERFACE(pThisCC->pDrvBase, PDMIBASERC), PDMINETWORKUP);
7514#endif
7515 }
7516 }
7517 else if ( rc == VERR_PDM_NO_ATTACHED_DRIVER
7518 || rc == VERR_PDM_CFG_MISSING_DRIVER_NAME)
7519 {
7520 /* This should never happen because this function is not called
7521 * if there is no driver to attach! */
7522 Log(("%s No attached driver!\n", pThis->szPrf));
7523 }
7524
7525 /*
7526 * Temporary set the link down if it was up so that the guest will know
7527 * that we have change the configuration of the network card
7528 */
7529 if ((STATUS & STATUS_LU) && RT_SUCCESS(rc))
7530 e1kR3LinkDownTemp(pDevIns, pThis, pThisCC);
7531
7532 PDMDevHlpCritSectLeave(pDevIns, &pThis->cs);
7533 return rc;
7534}
7535
7536/**
7537 * @copydoc FNPDMDEVPOWEROFF
7538 */
7539static DECLCALLBACK(void) e1kR3PowerOff(PPDMDEVINS pDevIns)
7540{
7541 /* Poke thread waiting for buffer space. */
7542 e1kWakeupReceive(pDevIns, PDMINS_2_DATA(pDevIns, PE1KSTATE));
7543}
7544
7545/**
7546 * @copydoc FNPDMDEVRESET
7547 */
7548static DECLCALLBACK(void) e1kR3Reset(PPDMDEVINS pDevIns)
7549{
7550 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
7551 PE1KSTATECC pThisCC = PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC);
7552#ifdef E1K_TX_DELAY
7553 e1kCancelTimer(pDevIns, pThis, pThis->hTXDTimer);
7554#endif /* E1K_TX_DELAY */
7555 e1kCancelTimer(pDevIns, pThis, pThis->hIntTimer);
7556 e1kCancelTimer(pDevIns, pThis, pThis->hLUTimer);
7557 e1kXmitFreeBuf(pThis, pThisCC);
7558 pThis->u16TxPktLen = 0;
7559 pThis->fIPcsum = false;
7560 pThis->fTCPcsum = false;
7561 pThis->fIntMaskUsed = false;
7562 pThis->fDelayInts = false;
7563 pThis->fLocked = false;
7564 pThis->u64AckedAt = 0;
7565 e1kR3HardReset(pDevIns, pThis, pThisCC);
7566}
7567
7568/**
7569 * @copydoc FNPDMDEVSUSPEND
7570 */
7571static DECLCALLBACK(void) e1kR3Suspend(PPDMDEVINS pDevIns)
7572{
7573 /* Poke thread waiting for buffer space. */
7574 e1kWakeupReceive(pDevIns, PDMINS_2_DATA(pDevIns, PE1KSTATE));
7575}
7576
7577/**
7578 * Device relocation callback.
7579 *
7580 * When this callback is called the device instance data, and if the
7581 * device have a GC component, is being relocated, or/and the selectors
7582 * have been changed. The device must use the chance to perform the
7583 * necessary pointer relocations and data updates.
7584 *
7585 * Before the GC code is executed the first time, this function will be
7586 * called with a 0 delta so GC pointer calculations can be one in one place.
7587 *
7588 * @param pDevIns Pointer to the device instance.
7589 * @param offDelta The relocation delta relative to the old location.
7590 *
7591 * @remark A relocation CANNOT fail.
7592 */
7593static DECLCALLBACK(void) e1kR3Relocate(PPDMDEVINS pDevIns, RTGCINTPTR offDelta)
7594{
7595 PE1KSTATERC pThisRC = PDMINS_2_DATA_RC(pDevIns, PE1KSTATERC);
7596 if (pThisRC)
7597 pThisRC->pDevInsRC = PDMDEVINS_2_RCPTR(pDevIns);
7598 RT_NOREF(offDelta);
7599}
7600
7601/**
7602 * Destruct a device instance.
7603 *
7604 * We need to free non-VM resources only.
7605 *
7606 * @returns VBox status code.
7607 * @param pDevIns The device instance data.
7608 * @thread EMT
7609 */
7610static DECLCALLBACK(int) e1kR3Destruct(PPDMDEVINS pDevIns)
7611{
7612 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
7613 PDMDEV_CHECK_VERSIONS_RETURN_QUIET(pDevIns);
7614
7615 e1kDumpState(pThis);
7616 E1kLog(("%s Destroying instance\n", pThis->szPrf));
7617 if (PDMDevHlpCritSectIsInitialized(pDevIns, &pThis->cs))
7618 {
7619 if (pThis->hEventMoreRxDescAvail != NIL_SUPSEMEVENT)
7620 {
7621 PDMDevHlpSUPSemEventSignal(pDevIns, pThis->hEventMoreRxDescAvail);
7622 RTThreadYield();
7623 PDMDevHlpSUPSemEventClose(pDevIns, pThis->hEventMoreRxDescAvail);
7624 pThis->hEventMoreRxDescAvail = NIL_SUPSEMEVENT;
7625 }
7626#ifdef E1K_WITH_TX_CS
7627 PDMDevHlpCritSectDelete(pDevIns, &pThis->csTx);
7628#endif /* E1K_WITH_TX_CS */
7629 PDMDevHlpCritSectDelete(pDevIns, &pThis->csRx);
7630 PDMDevHlpCritSectDelete(pDevIns, &pThis->cs);
7631 }
7632 return VINF_SUCCESS;
7633}
7634
7635
7636/**
7637 * Set PCI configuration space registers.
7638 *
7639 * @param pci Reference to PCI device structure.
7640 * @thread EMT
7641 */
7642static void e1kR3ConfigurePciDev(PPDMPCIDEV pPciDev, E1KCHIP eChip)
7643{
7644 Assert(eChip < RT_ELEMENTS(g_aChips));
7645 /* Configure PCI Device, assume 32-bit mode ******************************/
7646 PDMPciDevSetVendorId(pPciDev, g_aChips[eChip].uPCIVendorId);
7647 PDMPciDevSetDeviceId(pPciDev, g_aChips[eChip].uPCIDeviceId);
7648 PDMPciDevSetWord( pPciDev, VBOX_PCI_SUBSYSTEM_VENDOR_ID, g_aChips[eChip].uPCISubsystemVendorId);
7649 PDMPciDevSetWord( pPciDev, VBOX_PCI_SUBSYSTEM_ID, g_aChips[eChip].uPCISubsystemId);
7650
7651 PDMPciDevSetWord( pPciDev, VBOX_PCI_COMMAND, 0x0000);
7652 /* DEVSEL Timing (medium device), 66 MHz Capable, New capabilities */
7653 PDMPciDevSetWord( pPciDev, VBOX_PCI_STATUS,
7654 VBOX_PCI_STATUS_DEVSEL_MEDIUM | VBOX_PCI_STATUS_CAP_LIST | VBOX_PCI_STATUS_66MHZ);
7655 /* Stepping A2 */
7656 PDMPciDevSetByte( pPciDev, VBOX_PCI_REVISION_ID, 0x02);
7657 /* Ethernet adapter */
7658 PDMPciDevSetByte( pPciDev, VBOX_PCI_CLASS_PROG, 0x00);
7659 PDMPciDevSetWord( pPciDev, VBOX_PCI_CLASS_DEVICE, 0x0200);
7660 /* normal single function Ethernet controller */
7661 PDMPciDevSetByte( pPciDev, VBOX_PCI_HEADER_TYPE, 0x00);
7662 /* Memory Register Base Address */
7663 PDMPciDevSetDWord(pPciDev, VBOX_PCI_BASE_ADDRESS_0, 0x00000000);
7664 /* Memory Flash Base Address */
7665 PDMPciDevSetDWord(pPciDev, VBOX_PCI_BASE_ADDRESS_1, 0x00000000);
7666 /* IO Register Base Address */
7667 PDMPciDevSetDWord(pPciDev, VBOX_PCI_BASE_ADDRESS_2, 0x00000001);
7668 /* Expansion ROM Base Address */
7669 PDMPciDevSetDWord(pPciDev, VBOX_PCI_ROM_ADDRESS, 0x00000000);
7670 /* Capabilities Pointer */
7671 PDMPciDevSetByte( pPciDev, VBOX_PCI_CAPABILITY_LIST, 0xDC);
7672 /* Interrupt Pin: INTA# */
7673 PDMPciDevSetByte( pPciDev, VBOX_PCI_INTERRUPT_PIN, 0x01);
7674 /* Max_Lat/Min_Gnt: very high priority and time slice */
7675 PDMPciDevSetByte( pPciDev, VBOX_PCI_MIN_GNT, 0xFF);
7676 PDMPciDevSetByte( pPciDev, VBOX_PCI_MAX_LAT, 0x00);
7677
7678 /* PCI Power Management Registers ****************************************/
7679 /* Capability ID: PCI Power Management Registers */
7680 PDMPciDevSetByte( pPciDev, 0xDC, VBOX_PCI_CAP_ID_PM);
7681 /* Next Item Pointer: PCI-X */
7682 PDMPciDevSetByte( pPciDev, 0xDC + 1, 0xE4);
7683 /* Power Management Capabilities: PM disabled, DSI */
7684 PDMPciDevSetWord( pPciDev, 0xDC + 2,
7685 0x0002 | VBOX_PCI_PM_CAP_DSI);
7686 /* Power Management Control / Status Register: PM disabled */
7687 PDMPciDevSetWord( pPciDev, 0xDC + 4, 0x0000);
7688 /* PMCSR_BSE Bridge Support Extensions: Not supported */
7689 PDMPciDevSetByte( pPciDev, 0xDC + 6, 0x00);
7690 /* Data Register: PM disabled, always 0 */
7691 PDMPciDevSetByte( pPciDev, 0xDC + 7, 0x00);
7692
7693 /* PCI-X Configuration Registers *****************************************/
7694 /* Capability ID: PCI-X Configuration Registers */
7695 PDMPciDevSetByte( pPciDev, 0xE4, VBOX_PCI_CAP_ID_PCIX);
7696#ifdef E1K_WITH_MSI
7697 PDMPciDevSetByte( pPciDev, 0xE4 + 1, 0x80);
7698#else
7699 /* Next Item Pointer: None (Message Signalled Interrupts are disabled) */
7700 PDMPciDevSetByte( pPciDev, 0xE4 + 1, 0x00);
7701#endif
7702 /* PCI-X Command: Enable Relaxed Ordering */
7703 PDMPciDevSetWord( pPciDev, 0xE4 + 2, VBOX_PCI_X_CMD_ERO);
7704 /* PCI-X Status: 32-bit, 66MHz*/
7705 /** @todo is this value really correct? fff8 doesn't look like actual PCI address */
7706 PDMPciDevSetDWord(pPciDev, 0xE4 + 4, 0x0040FFF8);
7707}
7708
7709/**
7710 * @interface_method_impl{PDMDEVREG,pfnConstruct}
7711 */
7712static DECLCALLBACK(int) e1kR3Construct(PPDMDEVINS pDevIns, int iInstance, PCFGMNODE pCfg)
7713{
7714 PDMDEV_CHECK_VERSIONS_RETURN(pDevIns);
7715 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
7716 PE1KSTATECC pThisCC = PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC);
7717 int rc;
7718
7719 /*
7720 * Initialize the instance data (state).
7721 * Note! Caller has initialized it to ZERO already.
7722 */
7723 RTStrPrintf(pThis->szPrf, sizeof(pThis->szPrf), "E1000#%d", iInstance);
7724 E1kLog(("%s Constructing new instance sizeof(E1KRXDESC)=%d\n", pThis->szPrf, sizeof(E1KRXDESC)));
7725 pThis->hEventMoreRxDescAvail = NIL_SUPSEMEVENT;
7726 pThis->u16TxPktLen = 0;
7727 pThis->fIPcsum = false;
7728 pThis->fTCPcsum = false;
7729 pThis->fIntMaskUsed = false;
7730 pThis->fDelayInts = false;
7731 pThis->fLocked = false;
7732 pThis->u64AckedAt = 0;
7733 pThis->led.u32Magic = PDMLED_MAGIC;
7734 pThis->u32PktNo = 1;
7735
7736 pThisCC->pDevInsR3 = pDevIns;
7737 pThisCC->pShared = pThis;
7738
7739 /* Interfaces */
7740 pThisCC->IBase.pfnQueryInterface = e1kR3QueryInterface;
7741
7742 pThisCC->INetworkDown.pfnWaitReceiveAvail = e1kR3NetworkDown_WaitReceiveAvail;
7743 pThisCC->INetworkDown.pfnReceive = e1kR3NetworkDown_Receive;
7744 pThisCC->INetworkDown.pfnXmitPending = e1kR3NetworkDown_XmitPending;
7745
7746 pThisCC->ILeds.pfnQueryStatusLed = e1kR3QueryStatusLed;
7747
7748 pThisCC->INetworkConfig.pfnGetMac = e1kR3GetMac;
7749 pThisCC->INetworkConfig.pfnGetLinkState = e1kR3GetLinkState;
7750 pThisCC->INetworkConfig.pfnSetLinkState = e1kR3SetLinkState;
7751
7752 /*
7753 * Internal validations.
7754 */
7755 for (uint32_t iReg = 1; iReg < E1K_NUM_OF_BINARY_SEARCHABLE; iReg++)
7756 AssertLogRelMsgReturn( g_aE1kRegMap[iReg].offset > g_aE1kRegMap[iReg - 1].offset
7757 && g_aE1kRegMap[iReg].offset + g_aE1kRegMap[iReg].size
7758 >= g_aE1kRegMap[iReg - 1].offset + g_aE1kRegMap[iReg - 1].size,
7759 ("%s@%#xLB%#x vs %s@%#xLB%#x\n",
7760 g_aE1kRegMap[iReg].abbrev, g_aE1kRegMap[iReg].offset, g_aE1kRegMap[iReg].size,
7761 g_aE1kRegMap[iReg - 1].abbrev, g_aE1kRegMap[iReg - 1].offset, g_aE1kRegMap[iReg - 1].size),
7762 VERR_INTERNAL_ERROR_4);
7763
7764 /*
7765 * Validate configuration.
7766 */
7767 PDMDEV_VALIDATE_CONFIG_RETURN(pDevIns,
7768 "MAC|"
7769 "CableConnected|"
7770 "AdapterType|"
7771 "LineSpeed|"
7772 "ItrEnabled|"
7773 "ItrRxEnabled|"
7774 "EthernetCRC|"
7775 "GSOEnabled|"
7776 "LinkUpDelay", "");
7777
7778 /** @todo LineSpeed unused! */
7779
7780 /*
7781 * Get config params
7782 */
7783 PCPDMDEVHLPR3 pHlp = pDevIns->pHlpR3;
7784 rc = pHlp->pfnCFGMQueryBytes(pCfg, "MAC", pThis->macConfigured.au8, sizeof(pThis->macConfigured.au8));
7785 if (RT_FAILURE(rc))
7786 return PDMDEV_SET_ERROR(pDevIns, rc,
7787 N_("Configuration error: Failed to get MAC address"));
7788 rc = pHlp->pfnCFGMQueryBool(pCfg, "CableConnected", &pThis->fCableConnected);
7789 if (RT_FAILURE(rc))
7790 return PDMDEV_SET_ERROR(pDevIns, rc,
7791 N_("Configuration error: Failed to get the value of 'CableConnected'"));
7792 rc = pHlp->pfnCFGMQueryU32(pCfg, "AdapterType", (uint32_t*)&pThis->eChip);
7793 if (RT_FAILURE(rc))
7794 return PDMDEV_SET_ERROR(pDevIns, rc,
7795 N_("Configuration error: Failed to get the value of 'AdapterType'"));
7796 Assert(pThis->eChip <= E1K_CHIP_82545EM);
7797
7798 rc = pHlp->pfnCFGMQueryBoolDef(pCfg, "EthernetCRC", &pThis->fEthernetCRC, true);
7799 if (RT_FAILURE(rc))
7800 return PDMDEV_SET_ERROR(pDevIns, rc,
7801 N_("Configuration error: Failed to get the value of 'EthernetCRC'"));
7802
7803 rc = pHlp->pfnCFGMQueryBoolDef(pCfg, "GSOEnabled", &pThis->fGSOEnabled, true);
7804 if (RT_FAILURE(rc))
7805 return PDMDEV_SET_ERROR(pDevIns, rc,
7806 N_("Configuration error: Failed to get the value of 'GSOEnabled'"));
7807
7808 rc = pHlp->pfnCFGMQueryBoolDef(pCfg, "ItrEnabled", &pThis->fItrEnabled, false);
7809 if (RT_FAILURE(rc))
7810 return PDMDEV_SET_ERROR(pDevIns, rc,
7811 N_("Configuration error: Failed to get the value of 'ItrEnabled'"));
7812
7813 rc = pHlp->pfnCFGMQueryBoolDef(pCfg, "ItrRxEnabled", &pThis->fItrRxEnabled, true);
7814 if (RT_FAILURE(rc))
7815 return PDMDEV_SET_ERROR(pDevIns, rc,
7816 N_("Configuration error: Failed to get the value of 'ItrRxEnabled'"));
7817
7818 rc = pHlp->pfnCFGMQueryBoolDef(pCfg, "TidEnabled", &pThis->fTidEnabled, false);
7819 if (RT_FAILURE(rc))
7820 return PDMDEV_SET_ERROR(pDevIns, rc,
7821 N_("Configuration error: Failed to get the value of 'TidEnabled'"));
7822
7823 rc = pHlp->pfnCFGMQueryU32Def(pCfg, "LinkUpDelay", (uint32_t*)&pThis->cMsLinkUpDelay, 3000); /* ms */
7824 if (RT_FAILURE(rc))
7825 return PDMDEV_SET_ERROR(pDevIns, rc,
7826 N_("Configuration error: Failed to get the value of 'LinkUpDelay'"));
7827 Assert(pThis->cMsLinkUpDelay <= 300000); /* less than 5 minutes */
7828 if (pThis->cMsLinkUpDelay > 5000)
7829 LogRel(("%s: WARNING! Link up delay is set to %u seconds!\n", pThis->szPrf, pThis->cMsLinkUpDelay / 1000));
7830 else if (pThis->cMsLinkUpDelay == 0)
7831 LogRel(("%s: WARNING! Link up delay is disabled!\n", pThis->szPrf));
7832
7833 LogRel(("%s: Chip=%s LinkUpDelay=%ums EthernetCRC=%s GSO=%s Itr=%s ItrRx=%s TID=%s R0=%s RC=%s\n", pThis->szPrf,
7834 g_aChips[pThis->eChip].pcszName, pThis->cMsLinkUpDelay,
7835 pThis->fEthernetCRC ? "on" : "off",
7836 pThis->fGSOEnabled ? "enabled" : "disabled",
7837 pThis->fItrEnabled ? "enabled" : "disabled",
7838 pThis->fItrRxEnabled ? "enabled" : "disabled",
7839 pThis->fTidEnabled ? "enabled" : "disabled",
7840 pDevIns->fR0Enabled ? "enabled" : "disabled",
7841 pDevIns->fRCEnabled ? "enabled" : "disabled"));
7842
7843 /*
7844 * Initialize sub-components and register everything with the VMM.
7845 */
7846
7847 /* Initialize the EEPROM. */
7848 pThisCC->eeprom.init(pThis->macConfigured);
7849
7850 /* Initialize internal PHY. */
7851 Phy::init(&pThis->phy, iInstance, pThis->eChip == E1K_CHIP_82543GC ? PHY_EPID_M881000 : PHY_EPID_M881011);
7852
7853 /* Initialize critical sections. We do our own locking. */
7854 rc = PDMDevHlpSetDeviceCritSect(pDevIns, PDMDevHlpCritSectGetNop(pDevIns));
7855 AssertRCReturn(rc, rc);
7856
7857 rc = PDMDevHlpCritSectInit(pDevIns, &pThis->cs, RT_SRC_POS, "E1000#%d", iInstance);
7858 AssertRCReturn(rc, rc);
7859 rc = PDMDevHlpCritSectInit(pDevIns, &pThis->csRx, RT_SRC_POS, "E1000#%dRX", iInstance);
7860 AssertRCReturn(rc, rc);
7861#ifdef E1K_WITH_TX_CS
7862 rc = PDMDevHlpCritSectInit(pDevIns, &pThis->csTx, RT_SRC_POS, "E1000#%dTX", iInstance);
7863 AssertRCReturn(rc, rc);
7864#endif
7865
7866 /* Saved state registration. */
7867 rc = PDMDevHlpSSMRegisterEx(pDevIns, E1K_SAVEDSTATE_VERSION, sizeof(E1KSTATE), NULL,
7868 NULL, e1kLiveExec, NULL,
7869 e1kSavePrep, e1kSaveExec, NULL,
7870 e1kLoadPrep, e1kLoadExec, e1kLoadDone);
7871 AssertRCReturn(rc, rc);
7872
7873 /* Set PCI config registers and register ourselves with the PCI bus. */
7874 PDMPCIDEV_ASSERT_VALID(pDevIns, pDevIns->apPciDevs[0]);
7875 e1kR3ConfigurePciDev(pDevIns->apPciDevs[0], pThis->eChip);
7876 rc = PDMDevHlpPCIRegister(pDevIns, pDevIns->apPciDevs[0]);
7877 AssertRCReturn(rc, rc);
7878
7879#ifdef E1K_WITH_MSI
7880 PDMMSIREG MsiReg;
7881 RT_ZERO(MsiReg);
7882 MsiReg.cMsiVectors = 1;
7883 MsiReg.iMsiCapOffset = 0x80;
7884 MsiReg.iMsiNextOffset = 0x0;
7885 MsiReg.fMsi64bit = false;
7886 rc = PDMDevHlpPCIRegisterMsi(pDevIns, &MsiReg);
7887 AssertRCReturn(rc, rc);
7888#endif
7889
7890 /*
7891 * Map our registers to memory space (region 0, see e1kR3ConfigurePciDev)
7892 * From the spec (regarding flags):
7893 * For registers that should be accessed as 32-bit double words,
7894 * partial writes (less than a 32-bit double word) is ignored.
7895 * Partial reads return all 32 bits of data regardless of the
7896 * byte enables.
7897 */
7898 rc = PDMDevHlpMmioCreateEx(pDevIns, E1K_MM_SIZE, IOMMMIO_FLAGS_READ_DWORD | IOMMMIO_FLAGS_WRITE_ONLY_DWORD,
7899 pDevIns->apPciDevs[0], 0 /*iPciRegion*/,
7900 e1kMMIOWrite, e1kMMIORead, NULL /*pfnFill*/, NULL /*pvUser*/, "E1000", &pThis->hMmioRegion);
7901 AssertRCReturn(rc, rc);
7902 rc = PDMDevHlpPCIIORegionRegisterMmio(pDevIns, 0, E1K_MM_SIZE, PCI_ADDRESS_SPACE_MEM, pThis->hMmioRegion, e1kR3Map);
7903 AssertRCReturn(rc, rc);
7904
7905 /* Map our registers to IO space (region 2, see e1kR3ConfigurePciDev) */
7906 static IOMIOPORTDESC const s_aExtDescs[] =
7907 {
7908 { "IOADDR", "IOADDR", NULL, NULL }, { "unused", "unused", NULL, NULL }, { "unused", "unused", NULL, NULL }, { "unused", "unused", NULL, NULL },
7909 { "IODATA", "IODATA", NULL, NULL }, { "unused", "unused", NULL, NULL }, { "unused", "unused", NULL, NULL }, { "unused", "unused", NULL, NULL },
7910 { NULL, NULL, NULL, NULL }
7911 };
7912 rc = PDMDevHlpIoPortCreate(pDevIns, E1K_IOPORT_SIZE, pDevIns->apPciDevs[0], 2 /*iPciRegion*/,
7913 e1kIOPortOut, e1kIOPortIn, NULL /*pvUser*/, "E1000", s_aExtDescs, &pThis->hIoPorts);
7914 AssertRCReturn(rc, rc);
7915 rc = PDMDevHlpPCIIORegionRegisterIo(pDevIns, 2, E1K_IOPORT_SIZE, pThis->hIoPorts, e1kR3Map);
7916 AssertRCReturn(rc, rc);
7917
7918 /* Create transmit queue */
7919 rc = PDMDevHlpTaskCreate(pDevIns, PDMTASK_F_RZ, "E1000-Xmit", e1kR3TxTaskCallback, NULL, &pThis->hTxTask);
7920 AssertRCReturn(rc, rc);
7921
7922#ifdef E1K_TX_DELAY
7923 /* Create Transmit Delay Timer */
7924 rc = PDMDevHlpTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kR3TxDelayTimer, pThis, TMTIMER_FLAGS_NO_CRIT_SECT,
7925 "E1000 Transmit Delay Timer", &pThis->hTXDTimer);
7926 AssertRCReturn(rc, rc);
7927 rc = PDMDevHlpTimerSetCritSect(pDevIns, pThis->hTXDTimer, &pThis->csTx);
7928 AssertRCReturn(rc, rc);
7929#endif /* E1K_TX_DELAY */
7930
7931//#ifdef E1K_USE_TX_TIMERS
7932 if (pThis->fTidEnabled)
7933 {
7934 /* Create Transmit Interrupt Delay Timer */
7935 rc = PDMDevHlpTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kR3TxIntDelayTimer, pThis, TMTIMER_FLAGS_NO_CRIT_SECT,
7936 "E1000 Transmit Interrupt Delay Timer", &pThis->hTIDTimer);
7937 AssertRCReturn(rc, rc);
7938
7939# ifndef E1K_NO_TAD
7940 /* Create Transmit Absolute Delay Timer */
7941 rc = PDMDevHlpTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kR3TxAbsDelayTimer, pThis, TMTIMER_FLAGS_NO_CRIT_SECT,
7942 "E1000 Transmit Absolute Delay Timer", &pThis->hTADTimer);
7943 AssertRCReturn(rc, rc);
7944# endif /* E1K_NO_TAD */
7945 }
7946//#endif /* E1K_USE_TX_TIMERS */
7947
7948#ifdef E1K_USE_RX_TIMERS
7949 /* Create Receive Interrupt Delay Timer */
7950 rc = PDMDevHlpTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kR3RxIntDelayTimer, pThis, TMTIMER_FLAGS_NO_CRIT_SECT,
7951 "E1000 Receive Interrupt Delay Timer", &pThis->hRIDTimer);
7952 AssertRCReturn(rc, rc);
7953
7954 /* Create Receive Absolute Delay Timer */
7955 rc = PDMDevHlpTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kR3RxAbsDelayTimer, pThis, TMTIMER_FLAGS_NO_CRIT_SECT,
7956 "E1000 Receive Absolute Delay Timer", &pThis->hRADTimer);
7957 AssertRCReturn(rc, rc);
7958#endif /* E1K_USE_RX_TIMERS */
7959
7960 /* Create Late Interrupt Timer */
7961 rc = PDMDevHlpTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kR3LateIntTimer, pThis, TMTIMER_FLAGS_NO_CRIT_SECT,
7962 "E1000 Late Interrupt Timer", &pThis->hIntTimer);
7963 AssertRCReturn(rc, rc);
7964
7965 /* Create Link Up Timer */
7966 rc = PDMDevHlpTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kR3LinkUpTimer, pThis, TMTIMER_FLAGS_NO_CRIT_SECT,
7967 "E1000 Link Up Timer", &pThis->hLUTimer);
7968 AssertRCReturn(rc, rc);
7969
7970 /* Register the info item */
7971 char szTmp[20];
7972 RTStrPrintf(szTmp, sizeof(szTmp), "e1k%d", iInstance);
7973 PDMDevHlpDBGFInfoRegister(pDevIns, szTmp, "E1000 info.", e1kInfo);
7974
7975 /* Status driver */
7976 PPDMIBASE pBase;
7977 rc = PDMDevHlpDriverAttach(pDevIns, PDM_STATUS_LUN, &pThisCC->IBase, &pBase, "Status Port");
7978 if (RT_FAILURE(rc))
7979 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to attach the status LUN"));
7980 pThisCC->pLedsConnector = PDMIBASE_QUERY_INTERFACE(pBase, PDMILEDCONNECTORS);
7981
7982 /* Network driver */
7983 rc = PDMDevHlpDriverAttach(pDevIns, 0, &pThisCC->IBase, &pThisCC->pDrvBase, "Network Port");
7984 if (RT_SUCCESS(rc))
7985 {
7986 if (rc == VINF_NAT_DNS)
7987 PDMDevHlpVMSetRuntimeError(pDevIns, 0 /*fFlags*/, "NoDNSforNAT",
7988 N_("A Domain Name Server (DNS) for NAT networking could not be determined. Ensure that your host is correctly connected to an ISP. If you ignore this warning the guest will not be able to perform nameserver lookups and it will probably observe delays if trying so"));
7989 pThisCC->pDrvR3 = PDMIBASE_QUERY_INTERFACE(pThisCC->pDrvBase, PDMINETWORKUP);
7990 AssertMsgReturn(pThisCC->pDrvR3, ("Failed to obtain the PDMINETWORKUP interface!\n"), VERR_PDM_MISSING_INTERFACE_BELOW);
7991
7992#if 0 /** @todo @bugref{9218} ring-0 driver stuff */
7993 pThisR0->pDrvR0 = PDMIBASER0_QUERY_INTERFACE(PDMIBASE_QUERY_INTERFACE(pThisCC->pDrvBase, PDMIBASER0), PDMINETWORKUP);
7994 pThisRC->pDrvRC = PDMIBASERC_QUERY_INTERFACE(PDMIBASE_QUERY_INTERFACE(pThisCC->pDrvBase, PDMIBASERC), PDMINETWORKUP);
7995#endif
7996 }
7997 else if ( rc == VERR_PDM_NO_ATTACHED_DRIVER
7998 || rc == VERR_PDM_CFG_MISSING_DRIVER_NAME)
7999 {
8000 /* No error! */
8001 E1kLog(("%s This adapter is not attached to any network!\n", pThis->szPrf));
8002 }
8003 else
8004 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to attach the network LUN"));
8005
8006 rc = PDMDevHlpSUPSemEventCreate(pDevIns, &pThis->hEventMoreRxDescAvail);
8007 AssertRCReturn(rc, rc);
8008
8009 rc = e1kInitDebugHelpers();
8010 AssertRCReturn(rc, rc);
8011
8012 e1kR3HardReset(pDevIns, pThis, pThisCC);
8013
8014 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data received", "/Public/Net/E1k%u/BytesReceived", iInstance);
8015 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data transmitted", "/Public/Net/E1k%u/BytesTransmitted", iInstance);
8016
8017 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data received", "/Devices/E1k%d/ReceiveBytes", iInstance);
8018 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data transmitted", "/Devices/E1k%d/TransmitBytes", iInstance);
8019
8020#if defined(VBOX_WITH_STATISTICS)
8021 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMMIOReadRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO reads in RZ", "/Devices/E1k%d/MMIO/ReadRZ", iInstance);
8022 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMMIOReadR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO reads in R3", "/Devices/E1k%d/MMIO/ReadR3", iInstance);
8023 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMMIOWriteRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO writes in RZ", "/Devices/E1k%d/MMIO/WriteRZ", iInstance);
8024 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMMIOWriteR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO writes in R3", "/Devices/E1k%d/MMIO/WriteR3", iInstance);
8025 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatEEPROMRead, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling EEPROM reads", "/Devices/E1k%d/EEPROM/Read", iInstance);
8026 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatEEPROMWrite, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling EEPROM writes", "/Devices/E1k%d/EEPROM/Write", iInstance);
8027 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIOReadRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in RZ", "/Devices/E1k%d/IO/ReadRZ", iInstance);
8028 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIOReadR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in R3", "/Devices/E1k%d/IO/ReadR3", iInstance);
8029 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIOWriteRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in RZ", "/Devices/E1k%d/IO/WriteRZ", iInstance);
8030 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIOWriteR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in R3", "/Devices/E1k%d/IO/WriteR3", iInstance);
8031 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatLateIntTimer, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling late int timer", "/Devices/E1k%d/LateInt/Timer", iInstance);
8032 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatLateInts, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of late interrupts", "/Devices/E1k%d/LateInt/Occured", iInstance);
8033 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIntsRaised, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of raised interrupts", "/Devices/E1k%d/Interrupts/Raised", iInstance);
8034 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIntsPrevented, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of prevented interrupts", "/Devices/E1k%d/Interrupts/Prevented", iInstance);
8035 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceive, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive", "/Devices/E1k%d/Receive/Total", iInstance);
8036 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveCRC, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive checksumming", "/Devices/E1k%d/Receive/CRC", iInstance);
8037 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveFilter, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive filtering", "/Devices/E1k%d/Receive/Filter", iInstance);
8038 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveStore, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive storing", "/Devices/E1k%d/Receive/Store", iInstance);
8039 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatRxOverflow, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_OCCURENCE, "Profiling RX overflows", "/Devices/E1k%d/RxOverflow", iInstance);
8040 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatRxOverflowWakeupRZ, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Nr of RX overflow wakeups in RZ", "/Devices/E1k%d/RxOverflowWakeupRZ", iInstance);
8041 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatRxOverflowWakeupR3, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Nr of RX overflow wakeups in R3", "/Devices/E1k%d/RxOverflowWakeupR3", iInstance);
8042 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling transmits in RZ", "/Devices/E1k%d/Transmit/TotalRZ", iInstance);
8043 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling transmits in R3", "/Devices/E1k%d/Transmit/TotalR3", iInstance);
8044 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitSendRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling send transmit in RZ", "/Devices/E1k%d/Transmit/SendRZ", iInstance);
8045 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitSendR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling send transmit in R3", "/Devices/E1k%d/Transmit/SendR3", iInstance);
8046
8047 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescCtxNormal, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of normal context descriptors","/Devices/E1k%d/TxDesc/ContexNormal", iInstance);
8048 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescCtxTSE, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TSE context descriptors", "/Devices/E1k%d/TxDesc/ContextTSE", iInstance);
8049 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescData, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TX data descriptors", "/Devices/E1k%d/TxDesc/Data", iInstance);
8050 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescLegacy, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TX legacy descriptors", "/Devices/E1k%d/TxDesc/Legacy", iInstance);
8051 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescTSEData, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TX TSE data descriptors", "/Devices/E1k%d/TxDesc/TSEData", iInstance);
8052 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxPathFallback, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Fallback TSE descriptor path", "/Devices/E1k%d/TxPath/Fallback", iInstance);
8053 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxPathGSO, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "GSO TSE descriptor path", "/Devices/E1k%d/TxPath/GSO", iInstance);
8054 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxPathRegular, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Regular descriptor path", "/Devices/E1k%d/TxPath/Normal", iInstance);
8055 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatPHYAccesses, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of PHY accesses", "/Devices/E1k%d/PHYAccesses", iInstance);
8056 for (unsigned iReg = 0; iReg < E1K_NUM_OF_REGS; iReg++)
8057 {
8058 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->aStatRegReads[iReg], STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
8059 g_aE1kRegMap[iReg].name, "/Devices/E1k%d/Regs/%s-Reads", iInstance, g_aE1kRegMap[iReg].abbrev);
8060 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->aStatRegWrites[iReg], STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
8061 g_aE1kRegMap[iReg].name, "/Devices/E1k%d/Regs/%s-Writes", iInstance, g_aE1kRegMap[iReg].abbrev);
8062 }
8063#endif /* VBOX_WITH_STATISTICS */
8064
8065#ifdef E1K_INT_STATS
8066 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->u64ArmedAt, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "u64ArmedAt", "/Devices/E1k%d/u64ArmedAt", iInstance);
8067 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatMaxTxDelay, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatMaxTxDelay", "/Devices/E1k%d/uStatMaxTxDelay", iInstance);
8068 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatInt, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatInt", "/Devices/E1k%d/uStatInt", iInstance);
8069 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntTry, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntTry", "/Devices/E1k%d/uStatIntTry", iInstance);
8070 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntLower, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntLower", "/Devices/E1k%d/uStatIntLower", iInstance);
8071 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatNoIntICR, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatNoIntICR", "/Devices/E1k%d/uStatNoIntICR", iInstance);
8072 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->iStatIntLost, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "iStatIntLost", "/Devices/E1k%d/iStatIntLost", iInstance);
8073 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->iStatIntLostOne, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "iStatIntLostOne", "/Devices/E1k%d/iStatIntLostOne", iInstance);
8074 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntIMS, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntIMS", "/Devices/E1k%d/uStatIntIMS", iInstance);
8075 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntSkip, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntSkip", "/Devices/E1k%d/uStatIntSkip", iInstance);
8076 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntLate, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntLate", "/Devices/E1k%d/uStatIntLate", iInstance);
8077 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntMasked, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntMasked", "/Devices/E1k%d/uStatIntMasked", iInstance);
8078 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntEarly, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntEarly", "/Devices/E1k%d/uStatIntEarly", iInstance);
8079 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntRx, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntRx", "/Devices/E1k%d/uStatIntRx", iInstance);
8080 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntTx, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntTx", "/Devices/E1k%d/uStatIntTx", iInstance);
8081 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntICS, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntICS", "/Devices/E1k%d/uStatIntICS", iInstance);
8082 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntRDTR, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntRDTR", "/Devices/E1k%d/uStatIntRDTR", iInstance);
8083 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntRXDMT0, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntRXDMT0", "/Devices/E1k%d/uStatIntRXDMT0", iInstance);
8084 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntTXQE, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntTXQE", "/Devices/E1k%d/uStatIntTXQE", iInstance);
8085 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxNoRS, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxNoRS", "/Devices/E1k%d/uStatTxNoRS", iInstance);
8086 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxIDE, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxIDE", "/Devices/E1k%d/uStatTxIDE", iInstance);
8087 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxDelayed, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxDelayed", "/Devices/E1k%d/uStatTxDelayed", iInstance);
8088 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxDelayExp, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxDelayExp", "/Devices/E1k%d/uStatTxDelayExp", iInstance);
8089 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTAD, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTAD", "/Devices/E1k%d/uStatTAD", iInstance);
8090 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTID, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTID", "/Devices/E1k%d/uStatTID", iInstance);
8091 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatRAD, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatRAD", "/Devices/E1k%d/uStatRAD", iInstance);
8092 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatRID, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatRID", "/Devices/E1k%d/uStatRID", iInstance);
8093 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatRxFrm, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatRxFrm", "/Devices/E1k%d/uStatRxFrm", iInstance);
8094 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxFrm, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxFrm", "/Devices/E1k%d/uStatTxFrm", iInstance);
8095 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatDescCtx, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatDescCtx", "/Devices/E1k%d/uStatDescCtx", iInstance);
8096 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatDescDat, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatDescDat", "/Devices/E1k%d/uStatDescDat", iInstance);
8097 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatDescLeg, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatDescLeg", "/Devices/E1k%d/uStatDescLeg", iInstance);
8098 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx1514, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx1514", "/Devices/E1k%d/uStatTx1514", iInstance);
8099 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx2962, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx2962", "/Devices/E1k%d/uStatTx2962", iInstance);
8100 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx4410, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx4410", "/Devices/E1k%d/uStatTx4410", iInstance);
8101 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx5858, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx5858", "/Devices/E1k%d/uStatTx5858", iInstance);
8102 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx7306, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx7306", "/Devices/E1k%d/uStatTx7306", iInstance);
8103 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx8754, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx8754", "/Devices/E1k%d/uStatTx8754", iInstance);
8104 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx16384, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx16384", "/Devices/E1k%d/uStatTx16384", iInstance);
8105 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx32768, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx32768", "/Devices/E1k%d/uStatTx32768", iInstance);
8106 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxLarge, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxLarge", "/Devices/E1k%d/uStatTxLarge", iInstance);
8107#endif /* E1K_INT_STATS */
8108
8109 return VINF_SUCCESS;
8110}
8111
8112#else /* !IN_RING3 */
8113
8114/**
8115 * @callback_method_impl{PDMDEVREGR0,pfnConstruct}
8116 */
8117static DECLCALLBACK(int) e1kRZConstruct(PPDMDEVINS pDevIns)
8118{
8119 PDMDEV_CHECK_VERSIONS_RETURN(pDevIns);
8120 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
8121 PE1KSTATECC pThisCC = PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC);
8122
8123 /* Initialize context specific state data: */
8124 pThisCC->CTX_SUFF(pDevIns) = pDevIns;
8125 /** @todo @bugref{9218} ring-0 driver stuff */
8126 pThisCC->CTX_SUFF(pDrv) = NULL;
8127 pThisCC->CTX_SUFF(pTxSg) = NULL;
8128
8129 /* Configure critical sections the same way: */
8130 int rc = PDMDevHlpSetDeviceCritSect(pDevIns, PDMDevHlpCritSectGetNop(pDevIns));
8131 AssertRCReturn(rc, rc);
8132
8133 /* Set up MMIO and I/O port callbacks for this context: */
8134 rc = PDMDevHlpMmioSetUpContext(pDevIns, pThis->hMmioRegion, e1kMMIOWrite, e1kMMIORead, NULL /*pvUser*/);
8135 AssertRCReturn(rc, rc);
8136
8137 rc = PDMDevHlpIoPortSetUpContext(pDevIns, pThis->hIoPorts, e1kIOPortOut, e1kIOPortIn, NULL /*pvUser*/);
8138 AssertRCReturn(rc, rc);
8139
8140 return VINF_SUCCESS;
8141}
8142
8143#endif /* !IN_RING3 */
8144
8145/**
8146 * The device registration structure.
8147 */
8148const PDMDEVREG g_DeviceE1000 =
8149{
8150 /* .u32version = */ PDM_DEVREG_VERSION,
8151 /* .uReserved0 = */ 0,
8152 /* .szName = */ "e1000",
8153 /* .fFlags = */ PDM_DEVREG_FLAGS_DEFAULT_BITS | PDM_DEVREG_FLAGS_RC | PDM_DEVREG_FLAGS_R0 | PDM_DEVREG_FLAGS_NEW_STYLE,
8154 /* .fClass = */ PDM_DEVREG_CLASS_NETWORK,
8155 /* .cMaxInstances = */ ~0U,
8156 /* .uSharedVersion = */ 42,
8157 /* .cbInstanceShared = */ sizeof(E1KSTATE),
8158 /* .cbInstanceCC = */ sizeof(E1KSTATECC),
8159 /* .cbInstanceRC = */ sizeof(E1KSTATERC),
8160 /* .cMaxPciDevices = */ 1,
8161 /* .cMaxMsixVectors = */ 0,
8162 /* .pszDescription = */ "Intel PRO/1000 MT Desktop Ethernet.",
8163#if defined(IN_RING3)
8164 /* .pszRCMod = */ "VBoxDDRC.rc",
8165 /* .pszR0Mod = */ "VBoxDDR0.r0",
8166 /* .pfnConstruct = */ e1kR3Construct,
8167 /* .pfnDestruct = */ e1kR3Destruct,
8168 /* .pfnRelocate = */ e1kR3Relocate,
8169 /* .pfnMemSetup = */ NULL,
8170 /* .pfnPowerOn = */ NULL,
8171 /* .pfnReset = */ e1kR3Reset,
8172 /* .pfnSuspend = */ e1kR3Suspend,
8173 /* .pfnResume = */ NULL,
8174 /* .pfnAttach = */ e1kR3Attach,
8175 /* .pfnDeatch = */ e1kR3Detach,
8176 /* .pfnQueryInterface = */ NULL,
8177 /* .pfnInitComplete = */ NULL,
8178 /* .pfnPowerOff = */ e1kR3PowerOff,
8179 /* .pfnSoftReset = */ NULL,
8180 /* .pfnReserved0 = */ NULL,
8181 /* .pfnReserved1 = */ NULL,
8182 /* .pfnReserved2 = */ NULL,
8183 /* .pfnReserved3 = */ NULL,
8184 /* .pfnReserved4 = */ NULL,
8185 /* .pfnReserved5 = */ NULL,
8186 /* .pfnReserved6 = */ NULL,
8187 /* .pfnReserved7 = */ NULL,
8188#elif defined(IN_RING0)
8189 /* .pfnEarlyConstruct = */ NULL,
8190 /* .pfnConstruct = */ e1kRZConstruct,
8191 /* .pfnDestruct = */ NULL,
8192 /* .pfnFinalDestruct = */ NULL,
8193 /* .pfnRequest = */ NULL,
8194 /* .pfnReserved0 = */ NULL,
8195 /* .pfnReserved1 = */ NULL,
8196 /* .pfnReserved2 = */ NULL,
8197 /* .pfnReserved3 = */ NULL,
8198 /* .pfnReserved4 = */ NULL,
8199 /* .pfnReserved5 = */ NULL,
8200 /* .pfnReserved6 = */ NULL,
8201 /* .pfnReserved7 = */ NULL,
8202#elif defined(IN_RC)
8203 /* .pfnConstruct = */ e1kRZConstruct,
8204 /* .pfnReserved0 = */ NULL,
8205 /* .pfnReserved1 = */ NULL,
8206 /* .pfnReserved2 = */ NULL,
8207 /* .pfnReserved3 = */ NULL,
8208 /* .pfnReserved4 = */ NULL,
8209 /* .pfnReserved5 = */ NULL,
8210 /* .pfnReserved6 = */ NULL,
8211 /* .pfnReserved7 = */ NULL,
8212#else
8213# error "Not in IN_RING3, IN_RING0 or IN_RC!"
8214#endif
8215 /* .u32VersionEnd = */ PDM_DEVREG_VERSION
8216};
8217
8218#endif /* !VBOX_DEVICE_STRUCT_TESTCASE */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette