VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/DevE1000.cpp@ 87093

Last change on this file since 87093 was 87071, checked in by vboxsync, 4 years ago

Dev/E1000: (bugref:9883) No more zero UDP checksums.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 326.9 KB
Line 
1/* $Id: DevE1000.cpp 87071 2020-12-09 15:24:48Z vboxsync $ */
2/** @file
3 * DevE1000 - Intel 82540EM Ethernet Controller Emulation.
4 *
5 * Implemented in accordance with the specification:
6 *
7 * PCI/PCI-X Family of Gigabit Ethernet Controllers Software Developer's Manual
8 * 82540EP/EM, 82541xx, 82544GC/EI, 82545GM/EM, 82546GB/EB, and 82547xx
9 *
10 * 317453-002 Revision 3.5
11 *
12 * @todo IPv6 checksum offloading support
13 * @todo Flexible Filter / Wakeup (optional?)
14 */
15
16/*
17 * Copyright (C) 2007-2020 Oracle Corporation
18 *
19 * This file is part of VirtualBox Open Source Edition (OSE), as
20 * available from http://www.virtualbox.org. This file is free software;
21 * you can redistribute it and/or modify it under the terms of the GNU
22 * General Public License (GPL) as published by the Free Software
23 * Foundation, in version 2 as it comes in the "COPYING" file of the
24 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
25 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_DEV_E1000
33#include <iprt/crc.h>
34#include <iprt/ctype.h>
35#include <iprt/net.h>
36#include <iprt/semaphore.h>
37#include <iprt/string.h>
38#include <iprt/time.h>
39#include <iprt/uuid.h>
40#include <VBox/vmm/pdmdev.h>
41#include <VBox/vmm/pdmnetifs.h>
42#include <VBox/vmm/pdmnetinline.h>
43#include <VBox/param.h>
44#include "VBoxDD.h"
45
46#include "DevEEPROM.h"
47#include "DevE1000Phy.h"
48
49
50/*********************************************************************************************************************************
51* Defined Constants And Macros *
52*********************************************************************************************************************************/
53/** @name E1000 Build Options
54 * @{ */
55/** @def E1K_INIT_RA0
56 * E1K_INIT_RA0 forces E1000 to set the first entry in Receive Address filter
57 * table to MAC address obtained from CFGM. Most guests read MAC address from
58 * EEPROM and write it to RA[0] explicitly, but Mac OS X seems to depend on it
59 * being already set (see @bugref{4657}).
60 */
61#define E1K_INIT_RA0
62/** @def E1K_LSC_ON_RESET
63 * E1K_LSC_ON_RESET causes e1000 to generate Link Status Change
64 * interrupt after hard reset. This makes the E1K_LSC_ON_SLU option unnecessary.
65 * With unplugged cable, LSC is triggerred for 82543GC only.
66 */
67#define E1K_LSC_ON_RESET
68/** @def E1K_LSC_ON_SLU
69 * E1K_LSC_ON_SLU causes E1000 to generate Link Status Change interrupt when
70 * the guest driver brings up the link via STATUS.LU bit. Again the only guest
71 * that requires it is Mac OS X (see @bugref{4657}).
72 */
73//#define E1K_LSC_ON_SLU
74/** @def E1K_INIT_LINKUP_DELAY
75 * E1K_INIT_LINKUP_DELAY prevents the link going up while the driver is still
76 * in init (see @bugref{8624}).
77 */
78#define E1K_INIT_LINKUP_DELAY_US (2000 * 1000)
79/** @def E1K_IMS_INT_DELAY_NS
80 * E1K_IMS_INT_DELAY_NS prevents interrupt storms in Windows guests on enabling
81 * interrupts (see @bugref{8624}).
82 */
83#define E1K_IMS_INT_DELAY_NS 100
84/** @def E1K_TX_DELAY
85 * E1K_TX_DELAY aims to improve guest-host transfer rate for TCP streams by
86 * preventing packets to be sent immediately. It allows to send several
87 * packets in a batch reducing the number of acknowledgments. Note that it
88 * effectively disables R0 TX path, forcing sending in R3.
89 */
90//#define E1K_TX_DELAY 150
91/** @def E1K_USE_TX_TIMERS
92 * E1K_USE_TX_TIMERS aims to reduce the number of generated TX interrupts if a
93 * guest driver set the delays via the Transmit Interrupt Delay Value (TIDV)
94 * register. Enabling it showed no positive effects on existing guests so it
95 * stays disabled. See sections 3.2.7.1 and 3.4.3.1 in "8254x Family of Gigabit
96 * Ethernet Controllers Software Developer’s Manual" for more detailed
97 * explanation.
98 */
99//#define E1K_USE_TX_TIMERS
100/** @def E1K_NO_TAD
101 * E1K_NO_TAD disables one of two timers enabled by E1K_USE_TX_TIMERS, the
102 * Transmit Absolute Delay time. This timer sets the maximum time interval
103 * during which TX interrupts can be postponed (delayed). It has no effect
104 * if E1K_USE_TX_TIMERS is not defined.
105 */
106//#define E1K_NO_TAD
107/** @def E1K_REL_DEBUG
108 * E1K_REL_DEBUG enables debug logging of l1, l2, l3 in release build.
109 */
110//#define E1K_REL_DEBUG
111/** @def E1K_INT_STATS
112 * E1K_INT_STATS enables collection of internal statistics used for
113 * debugging of delayed interrupts, etc.
114 */
115#define E1K_INT_STATS
116/** @def E1K_WITH_MSI
117 * E1K_WITH_MSI enables rudimentary MSI support. Not implemented.
118 */
119//#define E1K_WITH_MSI
120/** @def E1K_WITH_TX_CS
121 * E1K_WITH_TX_CS protects e1kXmitPending with a critical section.
122 */
123#define E1K_WITH_TX_CS
124/** @def E1K_WITH_TXD_CACHE
125 * E1K_WITH_TXD_CACHE causes E1000 to fetch multiple TX descriptors in a
126 * single physical memory read (or two if it wraps around the end of TX
127 * descriptor ring). It is required for proper functioning of bandwidth
128 * resource control as it allows to compute exact sizes of packets prior
129 * to allocating their buffers (see @bugref{5582}).
130 */
131#define E1K_WITH_TXD_CACHE
132/** @def E1K_WITH_RXD_CACHE
133 * E1K_WITH_RXD_CACHE causes E1000 to fetch multiple RX descriptors in a
134 * single physical memory read (or two if it wraps around the end of RX
135 * descriptor ring). Intel's packet driver for DOS needs this option in
136 * order to work properly (see @bugref{6217}).
137 */
138#define E1K_WITH_RXD_CACHE
139/** @def E1K_WITH_PREREG_MMIO
140 * E1K_WITH_PREREG_MMIO enables a new style MMIO registration and is
141 * currently only done for testing the relateted PDM, IOM and PGM code. */
142//#define E1K_WITH_PREREG_MMIO
143/* @} */
144/* End of Options ************************************************************/
145
146#ifdef E1K_WITH_TXD_CACHE
147/**
148 * E1K_TXD_CACHE_SIZE specifies the maximum number of TX descriptors stored
149 * in the state structure. It limits the amount of descriptors loaded in one
150 * batch read. For example, Linux guest may use up to 20 descriptors per
151 * TSE packet. The largest TSE packet seen (Windows guest) was 45 descriptors.
152 */
153# define E1K_TXD_CACHE_SIZE 64u
154#endif /* E1K_WITH_TXD_CACHE */
155
156#ifdef E1K_WITH_RXD_CACHE
157/**
158 * E1K_RXD_CACHE_SIZE specifies the maximum number of RX descriptors stored
159 * in the state structure. It limits the amount of descriptors loaded in one
160 * batch read. For example, XP guest adds 15 RX descriptors at a time.
161 */
162# define E1K_RXD_CACHE_SIZE 16u
163#endif /* E1K_WITH_RXD_CACHE */
164
165
166/* Little helpers ************************************************************/
167#undef htons
168#undef ntohs
169#undef htonl
170#undef ntohl
171#define htons(x) ((((x) & 0xff00) >> 8) | (((x) & 0x00ff) << 8))
172#define ntohs(x) htons(x)
173#define htonl(x) ASMByteSwapU32(x)
174#define ntohl(x) htonl(x)
175
176#ifndef DEBUG
177# ifdef E1K_REL_DEBUG
178# define DEBUG
179# define E1kLog(a) LogRel(a)
180# define E1kLog2(a) LogRel(a)
181# define E1kLog3(a) LogRel(a)
182# define E1kLogX(x, a) LogRel(a)
183//# define E1kLog3(a) do {} while (0)
184# else
185# define E1kLog(a) do {} while (0)
186# define E1kLog2(a) do {} while (0)
187# define E1kLog3(a) do {} while (0)
188# define E1kLogX(x, a) do {} while (0)
189# endif
190#else
191# define E1kLog(a) Log(a)
192# define E1kLog2(a) Log2(a)
193# define E1kLog3(a) Log3(a)
194# define E1kLogX(x, a) LogIt(x, LOG_GROUP, a)
195//# define E1kLog(a) do {} while (0)
196//# define E1kLog2(a) do {} while (0)
197//# define E1kLog3(a) do {} while (0)
198#endif
199
200#if 0
201# define LOG_ENABLED
202# define E1kLogRel(a) LogRel(a)
203# undef Log6
204# define Log6(a) LogRel(a)
205#else
206# define E1kLogRel(a) do { } while (0)
207#endif
208
209//#undef DEBUG
210
211#define E1K_RELOCATE(p, o) *(RTHCUINTPTR *)&p += o
212
213#define E1K_INC_CNT32(cnt) \
214do { \
215 if (cnt < UINT32_MAX) \
216 cnt++; \
217} while (0)
218
219#define E1K_ADD_CNT64(cntLo, cntHi, val) \
220do { \
221 uint64_t u64Cnt = RT_MAKE_U64(cntLo, cntHi); \
222 uint64_t tmp = u64Cnt; \
223 u64Cnt += val; \
224 if (tmp > u64Cnt ) \
225 u64Cnt = UINT64_MAX; \
226 cntLo = (uint32_t)u64Cnt; \
227 cntHi = (uint32_t)(u64Cnt >> 32); \
228} while (0)
229
230#ifdef E1K_INT_STATS
231# define E1K_INC_ISTAT_CNT(cnt) do { ++cnt; } while (0)
232#else /* E1K_INT_STATS */
233# define E1K_INC_ISTAT_CNT(cnt) do { } while (0)
234#endif /* E1K_INT_STATS */
235
236
237/*****************************************************************************/
238
239typedef uint32_t E1KCHIP;
240#define E1K_CHIP_82540EM 0
241#define E1K_CHIP_82543GC 1
242#define E1K_CHIP_82545EM 2
243
244#ifdef IN_RING3
245/** Different E1000 chips. */
246static const struct E1kChips
247{
248 uint16_t uPCIVendorId;
249 uint16_t uPCIDeviceId;
250 uint16_t uPCISubsystemVendorId;
251 uint16_t uPCISubsystemId;
252 const char *pcszName;
253} g_aChips[] =
254{
255 /* Vendor Device SSVendor SubSys Name */
256 { 0x8086,
257 /* Temporary code, as MSI-aware driver dislike 0x100E. How to do that right? */
258# ifdef E1K_WITH_MSI
259 0x105E,
260# else
261 0x100E,
262# endif
263 0x8086, 0x001E, "82540EM" }, /* Intel 82540EM-A in Intel PRO/1000 MT Desktop */
264 { 0x8086, 0x1004, 0x8086, 0x1004, "82543GC" }, /* Intel 82543GC in Intel PRO/1000 T Server */
265 { 0x8086, 0x100F, 0x15AD, 0x0750, "82545EM" } /* Intel 82545EM-A in VMWare Network Adapter */
266};
267#endif /* IN_RING3 */
268
269
270/* The size of register area mapped to I/O space */
271#define E1K_IOPORT_SIZE 0x8
272/* The size of memory-mapped register area */
273#define E1K_MM_SIZE 0x20000
274
275#define E1K_MAX_TX_PKT_SIZE 16288
276#define E1K_MAX_RX_PKT_SIZE 16384
277
278/*****************************************************************************/
279
280/** Gets the specfieid bits from the register. */
281#define GET_BITS(reg, bits) ((reg & reg##_##bits##_MASK) >> reg##_##bits##_SHIFT)
282#define GET_BITS_V(val, reg, bits) ((val & reg##_##bits##_MASK) >> reg##_##bits##_SHIFT)
283#define BITS(reg, bits, bitval) (bitval << reg##_##bits##_SHIFT)
284#define SET_BITS(reg, bits, bitval) do { reg = (reg & ~reg##_##bits##_MASK) | (bitval << reg##_##bits##_SHIFT); } while (0)
285#define SET_BITS_V(val, reg, bits, bitval) do { val = (val & ~reg##_##bits##_MASK) | (bitval << reg##_##bits##_SHIFT); } while (0)
286
287#define CTRL_SLU UINT32_C(0x00000040)
288#define CTRL_MDIO UINT32_C(0x00100000)
289#define CTRL_MDC UINT32_C(0x00200000)
290#define CTRL_MDIO_DIR UINT32_C(0x01000000)
291#define CTRL_MDC_DIR UINT32_C(0x02000000)
292#define CTRL_RESET UINT32_C(0x04000000)
293#define CTRL_VME UINT32_C(0x40000000)
294
295#define STATUS_LU UINT32_C(0x00000002)
296#define STATUS_TXOFF UINT32_C(0x00000010)
297
298#define EECD_EE_WIRES UINT32_C(0x0F)
299#define EECD_EE_REQ UINT32_C(0x40)
300#define EECD_EE_GNT UINT32_C(0x80)
301
302#define EERD_START UINT32_C(0x00000001)
303#define EERD_DONE UINT32_C(0x00000010)
304#define EERD_DATA_MASK UINT32_C(0xFFFF0000)
305#define EERD_DATA_SHIFT 16
306#define EERD_ADDR_MASK UINT32_C(0x0000FF00)
307#define EERD_ADDR_SHIFT 8
308
309#define MDIC_DATA_MASK UINT32_C(0x0000FFFF)
310#define MDIC_DATA_SHIFT 0
311#define MDIC_REG_MASK UINT32_C(0x001F0000)
312#define MDIC_REG_SHIFT 16
313#define MDIC_PHY_MASK UINT32_C(0x03E00000)
314#define MDIC_PHY_SHIFT 21
315#define MDIC_OP_WRITE UINT32_C(0x04000000)
316#define MDIC_OP_READ UINT32_C(0x08000000)
317#define MDIC_READY UINT32_C(0x10000000)
318#define MDIC_INT_EN UINT32_C(0x20000000)
319#define MDIC_ERROR UINT32_C(0x40000000)
320
321#define TCTL_EN UINT32_C(0x00000002)
322#define TCTL_PSP UINT32_C(0x00000008)
323
324#define RCTL_EN UINT32_C(0x00000002)
325#define RCTL_UPE UINT32_C(0x00000008)
326#define RCTL_MPE UINT32_C(0x00000010)
327#define RCTL_LPE UINT32_C(0x00000020)
328#define RCTL_LBM_MASK UINT32_C(0x000000C0)
329#define RCTL_LBM_SHIFT 6
330#define RCTL_RDMTS_MASK UINT32_C(0x00000300)
331#define RCTL_RDMTS_SHIFT 8
332#define RCTL_LBM_TCVR UINT32_C(3) /**< PHY or external SerDes loopback. */
333#define RCTL_MO_MASK UINT32_C(0x00003000)
334#define RCTL_MO_SHIFT 12
335#define RCTL_BAM UINT32_C(0x00008000)
336#define RCTL_BSIZE_MASK UINT32_C(0x00030000)
337#define RCTL_BSIZE_SHIFT 16
338#define RCTL_VFE UINT32_C(0x00040000)
339#define RCTL_CFIEN UINT32_C(0x00080000)
340#define RCTL_CFI UINT32_C(0x00100000)
341#define RCTL_BSEX UINT32_C(0x02000000)
342#define RCTL_SECRC UINT32_C(0x04000000)
343
344#define ICR_TXDW UINT32_C(0x00000001)
345#define ICR_TXQE UINT32_C(0x00000002)
346#define ICR_LSC UINT32_C(0x00000004)
347#define ICR_RXDMT0 UINT32_C(0x00000010)
348#define ICR_RXT0 UINT32_C(0x00000080)
349#define ICR_TXD_LOW UINT32_C(0x00008000)
350#define RDTR_FPD UINT32_C(0x80000000)
351
352#define PBA_st ((PBAST*)(pThis->auRegs + PBA_IDX))
353typedef struct
354{
355 unsigned rxa : 7;
356 unsigned rxa_r : 9;
357 unsigned txa : 16;
358} PBAST;
359AssertCompileSize(PBAST, 4);
360
361#define TXDCTL_WTHRESH_MASK 0x003F0000
362#define TXDCTL_WTHRESH_SHIFT 16
363#define TXDCTL_LWTHRESH_MASK 0xFE000000
364#define TXDCTL_LWTHRESH_SHIFT 25
365
366#define RXCSUM_PCSS_MASK UINT32_C(0x000000FF)
367#define RXCSUM_PCSS_SHIFT 0
368
369/** @name Register access macros
370 * @remarks These ASSUME alocal variable @a pThis of type PE1KSTATE.
371 * @{ */
372#define CTRL pThis->auRegs[CTRL_IDX]
373#define STATUS pThis->auRegs[STATUS_IDX]
374#define EECD pThis->auRegs[EECD_IDX]
375#define EERD pThis->auRegs[EERD_IDX]
376#define CTRL_EXT pThis->auRegs[CTRL_EXT_IDX]
377#define FLA pThis->auRegs[FLA_IDX]
378#define MDIC pThis->auRegs[MDIC_IDX]
379#define FCAL pThis->auRegs[FCAL_IDX]
380#define FCAH pThis->auRegs[FCAH_IDX]
381#define FCT pThis->auRegs[FCT_IDX]
382#define VET pThis->auRegs[VET_IDX]
383#define ICR pThis->auRegs[ICR_IDX]
384#define ITR pThis->auRegs[ITR_IDX]
385#define ICS pThis->auRegs[ICS_IDX]
386#define IMS pThis->auRegs[IMS_IDX]
387#define IMC pThis->auRegs[IMC_IDX]
388#define RCTL pThis->auRegs[RCTL_IDX]
389#define FCTTV pThis->auRegs[FCTTV_IDX]
390#define TXCW pThis->auRegs[TXCW_IDX]
391#define RXCW pThis->auRegs[RXCW_IDX]
392#define TCTL pThis->auRegs[TCTL_IDX]
393#define TIPG pThis->auRegs[TIPG_IDX]
394#define AIFS pThis->auRegs[AIFS_IDX]
395#define LEDCTL pThis->auRegs[LEDCTL_IDX]
396#define PBA pThis->auRegs[PBA_IDX]
397#define FCRTL pThis->auRegs[FCRTL_IDX]
398#define FCRTH pThis->auRegs[FCRTH_IDX]
399#define RDFH pThis->auRegs[RDFH_IDX]
400#define RDFT pThis->auRegs[RDFT_IDX]
401#define RDFHS pThis->auRegs[RDFHS_IDX]
402#define RDFTS pThis->auRegs[RDFTS_IDX]
403#define RDFPC pThis->auRegs[RDFPC_IDX]
404#define RDBAL pThis->auRegs[RDBAL_IDX]
405#define RDBAH pThis->auRegs[RDBAH_IDX]
406#define RDLEN pThis->auRegs[RDLEN_IDX]
407#define RDH pThis->auRegs[RDH_IDX]
408#define RDT pThis->auRegs[RDT_IDX]
409#define RDTR pThis->auRegs[RDTR_IDX]
410#define RXDCTL pThis->auRegs[RXDCTL_IDX]
411#define RADV pThis->auRegs[RADV_IDX]
412#define RSRPD pThis->auRegs[RSRPD_IDX]
413#define TXDMAC pThis->auRegs[TXDMAC_IDX]
414#define TDFH pThis->auRegs[TDFH_IDX]
415#define TDFT pThis->auRegs[TDFT_IDX]
416#define TDFHS pThis->auRegs[TDFHS_IDX]
417#define TDFTS pThis->auRegs[TDFTS_IDX]
418#define TDFPC pThis->auRegs[TDFPC_IDX]
419#define TDBAL pThis->auRegs[TDBAL_IDX]
420#define TDBAH pThis->auRegs[TDBAH_IDX]
421#define TDLEN pThis->auRegs[TDLEN_IDX]
422#define TDH pThis->auRegs[TDH_IDX]
423#define TDT pThis->auRegs[TDT_IDX]
424#define TIDV pThis->auRegs[TIDV_IDX]
425#define TXDCTL pThis->auRegs[TXDCTL_IDX]
426#define TADV pThis->auRegs[TADV_IDX]
427#define TSPMT pThis->auRegs[TSPMT_IDX]
428#define CRCERRS pThis->auRegs[CRCERRS_IDX]
429#define ALGNERRC pThis->auRegs[ALGNERRC_IDX]
430#define SYMERRS pThis->auRegs[SYMERRS_IDX]
431#define RXERRC pThis->auRegs[RXERRC_IDX]
432#define MPC pThis->auRegs[MPC_IDX]
433#define SCC pThis->auRegs[SCC_IDX]
434#define ECOL pThis->auRegs[ECOL_IDX]
435#define MCC pThis->auRegs[MCC_IDX]
436#define LATECOL pThis->auRegs[LATECOL_IDX]
437#define COLC pThis->auRegs[COLC_IDX]
438#define DC pThis->auRegs[DC_IDX]
439#define TNCRS pThis->auRegs[TNCRS_IDX]
440/* #define SEC pThis->auRegs[SEC_IDX] Conflict with sys/time.h */
441#define CEXTERR pThis->auRegs[CEXTERR_IDX]
442#define RLEC pThis->auRegs[RLEC_IDX]
443#define XONRXC pThis->auRegs[XONRXC_IDX]
444#define XONTXC pThis->auRegs[XONTXC_IDX]
445#define XOFFRXC pThis->auRegs[XOFFRXC_IDX]
446#define XOFFTXC pThis->auRegs[XOFFTXC_IDX]
447#define FCRUC pThis->auRegs[FCRUC_IDX]
448#define PRC64 pThis->auRegs[PRC64_IDX]
449#define PRC127 pThis->auRegs[PRC127_IDX]
450#define PRC255 pThis->auRegs[PRC255_IDX]
451#define PRC511 pThis->auRegs[PRC511_IDX]
452#define PRC1023 pThis->auRegs[PRC1023_IDX]
453#define PRC1522 pThis->auRegs[PRC1522_IDX]
454#define GPRC pThis->auRegs[GPRC_IDX]
455#define BPRC pThis->auRegs[BPRC_IDX]
456#define MPRC pThis->auRegs[MPRC_IDX]
457#define GPTC pThis->auRegs[GPTC_IDX]
458#define GORCL pThis->auRegs[GORCL_IDX]
459#define GORCH pThis->auRegs[GORCH_IDX]
460#define GOTCL pThis->auRegs[GOTCL_IDX]
461#define GOTCH pThis->auRegs[GOTCH_IDX]
462#define RNBC pThis->auRegs[RNBC_IDX]
463#define RUC pThis->auRegs[RUC_IDX]
464#define RFC pThis->auRegs[RFC_IDX]
465#define ROC pThis->auRegs[ROC_IDX]
466#define RJC pThis->auRegs[RJC_IDX]
467#define MGTPRC pThis->auRegs[MGTPRC_IDX]
468#define MGTPDC pThis->auRegs[MGTPDC_IDX]
469#define MGTPTC pThis->auRegs[MGTPTC_IDX]
470#define TORL pThis->auRegs[TORL_IDX]
471#define TORH pThis->auRegs[TORH_IDX]
472#define TOTL pThis->auRegs[TOTL_IDX]
473#define TOTH pThis->auRegs[TOTH_IDX]
474#define TPR pThis->auRegs[TPR_IDX]
475#define TPT pThis->auRegs[TPT_IDX]
476#define PTC64 pThis->auRegs[PTC64_IDX]
477#define PTC127 pThis->auRegs[PTC127_IDX]
478#define PTC255 pThis->auRegs[PTC255_IDX]
479#define PTC511 pThis->auRegs[PTC511_IDX]
480#define PTC1023 pThis->auRegs[PTC1023_IDX]
481#define PTC1522 pThis->auRegs[PTC1522_IDX]
482#define MPTC pThis->auRegs[MPTC_IDX]
483#define BPTC pThis->auRegs[BPTC_IDX]
484#define TSCTC pThis->auRegs[TSCTC_IDX]
485#define TSCTFC pThis->auRegs[TSCTFC_IDX]
486#define RXCSUM pThis->auRegs[RXCSUM_IDX]
487#define WUC pThis->auRegs[WUC_IDX]
488#define WUFC pThis->auRegs[WUFC_IDX]
489#define WUS pThis->auRegs[WUS_IDX]
490#define MANC pThis->auRegs[MANC_IDX]
491#define IPAV pThis->auRegs[IPAV_IDX]
492#define WUPL pThis->auRegs[WUPL_IDX]
493/** @} */
494
495/**
496 * Indices of memory-mapped registers in register table.
497 */
498typedef enum
499{
500 CTRL_IDX,
501 STATUS_IDX,
502 EECD_IDX,
503 EERD_IDX,
504 CTRL_EXT_IDX,
505 FLA_IDX,
506 MDIC_IDX,
507 FCAL_IDX,
508 FCAH_IDX,
509 FCT_IDX,
510 VET_IDX,
511 ICR_IDX,
512 ITR_IDX,
513 ICS_IDX,
514 IMS_IDX,
515 IMC_IDX,
516 RCTL_IDX,
517 FCTTV_IDX,
518 TXCW_IDX,
519 RXCW_IDX,
520 TCTL_IDX,
521 TIPG_IDX,
522 AIFS_IDX,
523 LEDCTL_IDX,
524 PBA_IDX,
525 FCRTL_IDX,
526 FCRTH_IDX,
527 RDFH_IDX,
528 RDFT_IDX,
529 RDFHS_IDX,
530 RDFTS_IDX,
531 RDFPC_IDX,
532 RDBAL_IDX,
533 RDBAH_IDX,
534 RDLEN_IDX,
535 RDH_IDX,
536 RDT_IDX,
537 RDTR_IDX,
538 RXDCTL_IDX,
539 RADV_IDX,
540 RSRPD_IDX,
541 TXDMAC_IDX,
542 TDFH_IDX,
543 TDFT_IDX,
544 TDFHS_IDX,
545 TDFTS_IDX,
546 TDFPC_IDX,
547 TDBAL_IDX,
548 TDBAH_IDX,
549 TDLEN_IDX,
550 TDH_IDX,
551 TDT_IDX,
552 TIDV_IDX,
553 TXDCTL_IDX,
554 TADV_IDX,
555 TSPMT_IDX,
556 CRCERRS_IDX,
557 ALGNERRC_IDX,
558 SYMERRS_IDX,
559 RXERRC_IDX,
560 MPC_IDX,
561 SCC_IDX,
562 ECOL_IDX,
563 MCC_IDX,
564 LATECOL_IDX,
565 COLC_IDX,
566 DC_IDX,
567 TNCRS_IDX,
568 SEC_IDX,
569 CEXTERR_IDX,
570 RLEC_IDX,
571 XONRXC_IDX,
572 XONTXC_IDX,
573 XOFFRXC_IDX,
574 XOFFTXC_IDX,
575 FCRUC_IDX,
576 PRC64_IDX,
577 PRC127_IDX,
578 PRC255_IDX,
579 PRC511_IDX,
580 PRC1023_IDX,
581 PRC1522_IDX,
582 GPRC_IDX,
583 BPRC_IDX,
584 MPRC_IDX,
585 GPTC_IDX,
586 GORCL_IDX,
587 GORCH_IDX,
588 GOTCL_IDX,
589 GOTCH_IDX,
590 RNBC_IDX,
591 RUC_IDX,
592 RFC_IDX,
593 ROC_IDX,
594 RJC_IDX,
595 MGTPRC_IDX,
596 MGTPDC_IDX,
597 MGTPTC_IDX,
598 TORL_IDX,
599 TORH_IDX,
600 TOTL_IDX,
601 TOTH_IDX,
602 TPR_IDX,
603 TPT_IDX,
604 PTC64_IDX,
605 PTC127_IDX,
606 PTC255_IDX,
607 PTC511_IDX,
608 PTC1023_IDX,
609 PTC1522_IDX,
610 MPTC_IDX,
611 BPTC_IDX,
612 TSCTC_IDX,
613 TSCTFC_IDX,
614 RXCSUM_IDX,
615 WUC_IDX,
616 WUFC_IDX,
617 WUS_IDX,
618 MANC_IDX,
619 IPAV_IDX,
620 WUPL_IDX,
621 MTA_IDX,
622 RA_IDX,
623 VFTA_IDX,
624 IP4AT_IDX,
625 IP6AT_IDX,
626 WUPM_IDX,
627 FFLT_IDX,
628 FFMT_IDX,
629 FFVT_IDX,
630 PBM_IDX,
631 RA_82542_IDX,
632 MTA_82542_IDX,
633 VFTA_82542_IDX,
634 E1K_NUM_OF_REGS
635} E1kRegIndex;
636
637#define E1K_NUM_OF_32BIT_REGS MTA_IDX
638/** The number of registers with strictly increasing offset. */
639#define E1K_NUM_OF_BINARY_SEARCHABLE (WUPL_IDX + 1)
640
641
642/**
643 * Define E1000-specific EEPROM layout.
644 */
645struct E1kEEPROM
646{
647 public:
648 EEPROM93C46 eeprom;
649
650#ifdef IN_RING3
651 /**
652 * Initialize EEPROM content.
653 *
654 * @param macAddr MAC address of E1000.
655 */
656 void init(RTMAC &macAddr)
657 {
658 eeprom.init();
659 memcpy(eeprom.m_au16Data, macAddr.au16, sizeof(macAddr.au16));
660 eeprom.m_au16Data[0x04] = 0xFFFF;
661 /*
662 * bit 3 - full support for power management
663 * bit 10 - full duplex
664 */
665 eeprom.m_au16Data[0x0A] = 0x4408;
666 eeprom.m_au16Data[0x0B] = 0x001E;
667 eeprom.m_au16Data[0x0C] = 0x8086;
668 eeprom.m_au16Data[0x0D] = 0x100E;
669 eeprom.m_au16Data[0x0E] = 0x8086;
670 eeprom.m_au16Data[0x0F] = 0x3040;
671 eeprom.m_au16Data[0x21] = 0x7061;
672 eeprom.m_au16Data[0x22] = 0x280C;
673 eeprom.m_au16Data[0x23] = 0x00C8;
674 eeprom.m_au16Data[0x24] = 0x00C8;
675 eeprom.m_au16Data[0x2F] = 0x0602;
676 updateChecksum();
677 };
678
679 /**
680 * Compute the checksum as required by E1000 and store it
681 * in the last word.
682 */
683 void updateChecksum()
684 {
685 uint16_t u16Checksum = 0;
686
687 for (int i = 0; i < eeprom.SIZE-1; i++)
688 u16Checksum += eeprom.m_au16Data[i];
689 eeprom.m_au16Data[eeprom.SIZE-1] = 0xBABA - u16Checksum;
690 };
691
692 /**
693 * First 6 bytes of EEPROM contain MAC address.
694 *
695 * @returns MAC address of E1000.
696 */
697 void getMac(PRTMAC pMac)
698 {
699 memcpy(pMac->au16, eeprom.m_au16Data, sizeof(pMac->au16));
700 };
701
702 uint32_t read()
703 {
704 return eeprom.read();
705 }
706
707 void write(uint32_t u32Wires)
708 {
709 eeprom.write(u32Wires);
710 }
711
712 bool readWord(uint32_t u32Addr, uint16_t *pu16Value)
713 {
714 return eeprom.readWord(u32Addr, pu16Value);
715 }
716
717 int load(PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM)
718 {
719 return eeprom.load(pHlp, pSSM);
720 }
721
722 void save(PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM)
723 {
724 eeprom.save(pHlp, pSSM);
725 }
726#endif /* IN_RING3 */
727};
728
729
730#define E1K_SPEC_VLAN(s) (s & 0xFFF)
731#define E1K_SPEC_CFI(s) (!!((s>>12) & 0x1))
732#define E1K_SPEC_PRI(s) ((s>>13) & 0x7)
733
734struct E1kRxDStatus
735{
736 /** @name Descriptor Status field (3.2.3.1)
737 * @{ */
738 unsigned fDD : 1; /**< Descriptor Done. */
739 unsigned fEOP : 1; /**< End of packet. */
740 unsigned fIXSM : 1; /**< Ignore checksum indication. */
741 unsigned fVP : 1; /**< VLAN, matches VET. */
742 unsigned : 1;
743 unsigned fTCPCS : 1; /**< RCP Checksum calculated on the packet. */
744 unsigned fIPCS : 1; /**< IP Checksum calculated on the packet. */
745 unsigned fPIF : 1; /**< Passed in-exact filter */
746 /** @} */
747 /** @name Descriptor Errors field (3.2.3.2)
748 * (Only valid when fEOP and fDD are set.)
749 * @{ */
750 unsigned fCE : 1; /**< CRC or alignment error. */
751 unsigned : 4; /**< Reserved, varies with different models... */
752 unsigned fTCPE : 1; /**< TCP/UDP checksum error. */
753 unsigned fIPE : 1; /**< IP Checksum error. */
754 unsigned fRXE : 1; /**< RX Data error. */
755 /** @} */
756 /** @name Descriptor Special field (3.2.3.3)
757 * @{ */
758 unsigned u16Special : 16; /**< VLAN: Id, Canonical form, Priority. */
759 /** @} */
760};
761typedef struct E1kRxDStatus E1KRXDST;
762
763struct E1kRxDesc_st
764{
765 uint64_t u64BufAddr; /**< Address of data buffer */
766 uint16_t u16Length; /**< Length of data in buffer */
767 uint16_t u16Checksum; /**< Packet checksum */
768 E1KRXDST status;
769};
770typedef struct E1kRxDesc_st E1KRXDESC;
771AssertCompileSize(E1KRXDESC, 16);
772
773#define E1K_DTYP_LEGACY -1
774#define E1K_DTYP_CONTEXT 0
775#define E1K_DTYP_DATA 1
776
777struct E1kTDLegacy
778{
779 uint64_t u64BufAddr; /**< Address of data buffer */
780 struct TDLCmd_st
781 {
782 unsigned u16Length : 16;
783 unsigned u8CSO : 8;
784 /* CMD field : 8 */
785 unsigned fEOP : 1;
786 unsigned fIFCS : 1;
787 unsigned fIC : 1;
788 unsigned fRS : 1;
789 unsigned fRPS : 1;
790 unsigned fDEXT : 1;
791 unsigned fVLE : 1;
792 unsigned fIDE : 1;
793 } cmd;
794 struct TDLDw3_st
795 {
796 /* STA field */
797 unsigned fDD : 1;
798 unsigned fEC : 1;
799 unsigned fLC : 1;
800 unsigned fTURSV : 1;
801 /* RSV field */
802 unsigned u4RSV : 4;
803 /* CSS field */
804 unsigned u8CSS : 8;
805 /* Special field*/
806 unsigned u16Special: 16;
807 } dw3;
808};
809
810/**
811 * TCP/IP Context Transmit Descriptor, section 3.3.6.
812 */
813struct E1kTDContext
814{
815 struct CheckSum_st
816 {
817 /** TSE: Header start. !TSE: Checksum start. */
818 unsigned u8CSS : 8;
819 /** Checksum offset - where to store it. */
820 unsigned u8CSO : 8;
821 /** Checksum ending (inclusive) offset, 0 = end of packet. */
822 unsigned u16CSE : 16;
823 } ip;
824 struct CheckSum_st tu;
825 struct TDCDw2_st
826 {
827 /** TSE: The total number of payload bytes for this context. Sans header. */
828 unsigned u20PAYLEN : 20;
829 /** The descriptor type - E1K_DTYP_CONTEXT (0). */
830 unsigned u4DTYP : 4;
831 /** TUCMD field, 8 bits
832 * @{ */
833 /** TSE: TCP (set) or UDP (clear). */
834 unsigned fTCP : 1;
835 /** TSE: IPv4 (set) or IPv6 (clear) - for finding the payload length field in
836 * the IP header. Does not affect the checksumming.
837 * @remarks 82544GC/EI interprets a cleared field differently. */
838 unsigned fIP : 1;
839 /** TSE: TCP segmentation enable. When clear the context describes */
840 unsigned fTSE : 1;
841 /** Report status (only applies to dw3.fDD for here). */
842 unsigned fRS : 1;
843 /** Reserved, MBZ. */
844 unsigned fRSV1 : 1;
845 /** Descriptor extension, must be set for this descriptor type. */
846 unsigned fDEXT : 1;
847 /** Reserved, MBZ. */
848 unsigned fRSV2 : 1;
849 /** Interrupt delay enable. */
850 unsigned fIDE : 1;
851 /** @} */
852 } dw2;
853 struct TDCDw3_st
854 {
855 /** Descriptor Done. */
856 unsigned fDD : 1;
857 /** Reserved, MBZ. */
858 unsigned u7RSV : 7;
859 /** TSO: The header (prototype) length (Ethernet[, VLAN tag], IP, TCP/UDP. */
860 unsigned u8HDRLEN : 8;
861 /** TSO: Maximum segment size. */
862 unsigned u16MSS : 16;
863 } dw3;
864};
865typedef struct E1kTDContext E1KTXCTX;
866
867/**
868 * TCP/IP Data Transmit Descriptor, section 3.3.7.
869 */
870struct E1kTDData
871{
872 uint64_t u64BufAddr; /**< Address of data buffer */
873 struct TDDCmd_st
874 {
875 /** The total length of data pointed to by this descriptor. */
876 unsigned u20DTALEN : 20;
877 /** The descriptor type - E1K_DTYP_DATA (1). */
878 unsigned u4DTYP : 4;
879 /** @name DCMD field, 8 bits (3.3.7.1).
880 * @{ */
881 /** End of packet. Note TSCTFC update. */
882 unsigned fEOP : 1;
883 /** Insert Ethernet FCS/CRC (requires fEOP to be set). */
884 unsigned fIFCS : 1;
885 /** Use the TSE context when set and the normal when clear. */
886 unsigned fTSE : 1;
887 /** Report status (dw3.STA). */
888 unsigned fRS : 1;
889 /** Reserved. 82544GC/EI defines this report packet set (RPS). */
890 unsigned fRPS : 1;
891 /** Descriptor extension, must be set for this descriptor type. */
892 unsigned fDEXT : 1;
893 /** VLAN enable, requires CTRL.VME, auto enables FCS/CRC.
894 * Insert dw3.SPECIAL after ethernet header. */
895 unsigned fVLE : 1;
896 /** Interrupt delay enable. */
897 unsigned fIDE : 1;
898 /** @} */
899 } cmd;
900 struct TDDDw3_st
901 {
902 /** @name STA field (3.3.7.2)
903 * @{ */
904 unsigned fDD : 1; /**< Descriptor done. */
905 unsigned fEC : 1; /**< Excess collision. */
906 unsigned fLC : 1; /**< Late collision. */
907 /** Reserved, except for the usual oddball (82544GC/EI) where it's called TU. */
908 unsigned fTURSV : 1;
909 /** @} */
910 unsigned u4RSV : 4; /**< Reserved field, MBZ. */
911 /** @name POPTS (Packet Option) field (3.3.7.3)
912 * @{ */
913 unsigned fIXSM : 1; /**< Insert IP checksum. */
914 unsigned fTXSM : 1; /**< Insert TCP/UDP checksum. */
915 unsigned u6RSV : 6; /**< Reserved, MBZ. */
916 /** @} */
917 /** @name SPECIAL field - VLAN tag to be inserted after ethernet header.
918 * Requires fEOP, fVLE and CTRL.VME to be set.
919 * @{ */
920 unsigned u16Special: 16; /**< VLAN: Id, Canonical form, Priority. */
921 /** @} */
922 } dw3;
923};
924typedef struct E1kTDData E1KTXDAT;
925
926union E1kTxDesc
927{
928 struct E1kTDLegacy legacy;
929 struct E1kTDContext context;
930 struct E1kTDData data;
931};
932typedef union E1kTxDesc E1KTXDESC;
933AssertCompileSize(E1KTXDESC, 16);
934
935#define RA_CTL_AS 0x0003
936#define RA_CTL_AV 0x8000
937
938union E1kRecAddr
939{
940 uint32_t au32[32];
941 struct RAArray
942 {
943 uint8_t addr[6];
944 uint16_t ctl;
945 } array[16];
946};
947typedef struct E1kRecAddr::RAArray E1KRAELEM;
948typedef union E1kRecAddr E1KRA;
949AssertCompileSize(E1KRA, 8*16);
950
951#define E1K_IP_RF UINT16_C(0x8000) /**< reserved fragment flag */
952#define E1K_IP_DF UINT16_C(0x4000) /**< dont fragment flag */
953#define E1K_IP_MF UINT16_C(0x2000) /**< more fragments flag */
954#define E1K_IP_OFFMASK UINT16_C(0x1fff) /**< mask for fragmenting bits */
955
956/** @todo use+extend RTNETIPV4 */
957struct E1kIpHeader
958{
959 /* type of service / version / header length */
960 uint16_t tos_ver_hl;
961 /* total length */
962 uint16_t total_len;
963 /* identification */
964 uint16_t ident;
965 /* fragment offset field */
966 uint16_t offset;
967 /* time to live / protocol*/
968 uint16_t ttl_proto;
969 /* checksum */
970 uint16_t chksum;
971 /* source IP address */
972 uint32_t src;
973 /* destination IP address */
974 uint32_t dest;
975};
976AssertCompileSize(struct E1kIpHeader, 20);
977
978#define E1K_TCP_FIN UINT16_C(0x01)
979#define E1K_TCP_SYN UINT16_C(0x02)
980#define E1K_TCP_RST UINT16_C(0x04)
981#define E1K_TCP_PSH UINT16_C(0x08)
982#define E1K_TCP_ACK UINT16_C(0x10)
983#define E1K_TCP_URG UINT16_C(0x20)
984#define E1K_TCP_ECE UINT16_C(0x40)
985#define E1K_TCP_CWR UINT16_C(0x80)
986#define E1K_TCP_FLAGS UINT16_C(0x3f)
987
988/** @todo use+extend RTNETTCP */
989struct E1kTcpHeader
990{
991 uint16_t src;
992 uint16_t dest;
993 uint32_t seqno;
994 uint32_t ackno;
995 uint16_t hdrlen_flags;
996 uint16_t wnd;
997 uint16_t chksum;
998 uint16_t urgp;
999};
1000AssertCompileSize(struct E1kTcpHeader, 20);
1001
1002
1003#ifdef E1K_WITH_TXD_CACHE
1004/** The current Saved state version. */
1005# define E1K_SAVEDSTATE_VERSION 4
1006/** Saved state version for VirtualBox 4.2 with VLAN tag fields. */
1007# define E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG 3
1008#else /* !E1K_WITH_TXD_CACHE */
1009/** The current Saved state version. */
1010# define E1K_SAVEDSTATE_VERSION 3
1011#endif /* !E1K_WITH_TXD_CACHE */
1012/** Saved state version for VirtualBox 4.1 and earlier.
1013 * These did not include VLAN tag fields. */
1014#define E1K_SAVEDSTATE_VERSION_VBOX_41 2
1015/** Saved state version for VirtualBox 3.0 and earlier.
1016 * This did not include the configuration part nor the E1kEEPROM. */
1017#define E1K_SAVEDSTATE_VERSION_VBOX_30 1
1018
1019/**
1020 * E1000 shared device state.
1021 *
1022 * This is shared between ring-0 and ring-3.
1023 */
1024typedef struct E1KSTATE
1025{
1026 char szPrf[8]; /**< Log prefix, e.g. E1000#1. */
1027
1028 /** Handle to PCI region \#0, the MMIO region. */
1029 IOMIOPORTHANDLE hMmioRegion;
1030 /** Handle to PCI region \#2, the I/O ports. */
1031 IOMIOPORTHANDLE hIoPorts;
1032
1033 /** Receive Interrupt Delay Timer. */
1034 TMTIMERHANDLE hRIDTimer;
1035 /** Receive Absolute Delay Timer. */
1036 TMTIMERHANDLE hRADTimer;
1037 /** Transmit Interrupt Delay Timer. */
1038 TMTIMERHANDLE hTIDTimer;
1039 /** Transmit Absolute Delay Timer. */
1040 TMTIMERHANDLE hTADTimer;
1041 /** Transmit Delay Timer. */
1042 TMTIMERHANDLE hTXDTimer;
1043 /** Late Interrupt Timer. */
1044 TMTIMERHANDLE hIntTimer;
1045 /** Link Up(/Restore) Timer. */
1046 TMTIMERHANDLE hLUTimer;
1047
1048 /** Transmit task. */
1049 PDMTASKHANDLE hTxTask;
1050
1051 /** Critical section - what is it protecting? */
1052 PDMCRITSECT cs;
1053 /** RX Critical section. */
1054 PDMCRITSECT csRx;
1055#ifdef E1K_WITH_TX_CS
1056 /** TX Critical section. */
1057 PDMCRITSECT csTx;
1058#endif /* E1K_WITH_TX_CS */
1059 /** MAC address obtained from the configuration. */
1060 RTMAC macConfigured;
1061 uint16_t u16Padding0;
1062 /** EMT: Last time the interrupt was acknowledged. */
1063 uint64_t u64AckedAt;
1064 /** All: Used for eliminating spurious interrupts. */
1065 bool fIntRaised;
1066 /** EMT: false if the cable is disconnected by the GUI. */
1067 bool fCableConnected;
1068 /** EMT: Compute Ethernet CRC for RX packets. */
1069 bool fEthernetCRC;
1070 /** All: throttle interrupts. */
1071 bool fItrEnabled;
1072 /** All: throttle RX interrupts. */
1073 bool fItrRxEnabled;
1074 /** All: Delay TX interrupts using TIDV/TADV. */
1075 bool fTidEnabled;
1076 bool afPadding[2];
1077 /** Link up delay (in milliseconds). */
1078 uint32_t cMsLinkUpDelay;
1079
1080 /** All: Device register storage. */
1081 uint32_t auRegs[E1K_NUM_OF_32BIT_REGS];
1082 /** TX/RX: Status LED. */
1083 PDMLED led;
1084 /** TX/RX: Number of packet being sent/received to show in debug log. */
1085 uint32_t u32PktNo;
1086
1087 /** EMT: Offset of the register to be read via IO. */
1088 uint32_t uSelectedReg;
1089 /** EMT: Multicast Table Array. */
1090 uint32_t auMTA[128];
1091 /** EMT: Receive Address registers. */
1092 E1KRA aRecAddr;
1093 /** EMT: VLAN filter table array. */
1094 uint32_t auVFTA[128];
1095 /** EMT: Receive buffer size. */
1096 uint16_t u16RxBSize;
1097 /** EMT: Locked state -- no state alteration possible. */
1098 bool fLocked;
1099 /** EMT: */
1100 bool fDelayInts;
1101 /** All: */
1102 bool fIntMaskUsed;
1103
1104 /** N/A: */
1105 bool volatile fMaybeOutOfSpace;
1106 /** EMT: Gets signalled when more RX descriptors become available. */
1107 SUPSEMEVENT hEventMoreRxDescAvail;
1108#ifdef E1K_WITH_RXD_CACHE
1109 /** RX: Fetched RX descriptors. */
1110 E1KRXDESC aRxDescriptors[E1K_RXD_CACHE_SIZE];
1111 //uint64_t aRxDescAddr[E1K_RXD_CACHE_SIZE];
1112 /** RX: Actual number of fetched RX descriptors. */
1113 uint32_t nRxDFetched;
1114 /** RX: Index in cache of RX descriptor being processed. */
1115 uint32_t iRxDCurrent;
1116#endif /* E1K_WITH_RXD_CACHE */
1117
1118 /** TX: Context used for TCP segmentation packets. */
1119 E1KTXCTX contextTSE;
1120 /** TX: Context used for ordinary packets. */
1121 E1KTXCTX contextNormal;
1122#ifdef E1K_WITH_TXD_CACHE
1123 /** TX: Fetched TX descriptors. */
1124 E1KTXDESC aTxDescriptors[E1K_TXD_CACHE_SIZE];
1125 /** TX: Actual number of fetched TX descriptors. */
1126 uint8_t nTxDFetched;
1127 /** TX: Index in cache of TX descriptor being processed. */
1128 uint8_t iTxDCurrent;
1129 /** TX: Will this frame be sent as GSO. */
1130 bool fGSO;
1131 /** Alignment padding. */
1132 bool fReserved;
1133 /** TX: Number of bytes in next packet. */
1134 uint32_t cbTxAlloc;
1135
1136#endif /* E1K_WITH_TXD_CACHE */
1137 /** GSO context. u8Type is set to PDMNETWORKGSOTYPE_INVALID when not
1138 * applicable to the current TSE mode. */
1139 PDMNETWORKGSO GsoCtx;
1140 /** Scratch space for holding the loopback / fallback scatter / gather
1141 * descriptor. */
1142 union
1143 {
1144 PDMSCATTERGATHER Sg;
1145 uint8_t padding[8 * sizeof(RTUINTPTR)];
1146 } uTxFallback;
1147 /** TX: Transmit packet buffer use for TSE fallback and loopback. */
1148 uint8_t aTxPacketFallback[E1K_MAX_TX_PKT_SIZE];
1149 /** TX: Number of bytes assembled in TX packet buffer. */
1150 uint16_t u16TxPktLen;
1151 /** TX: False will force segmentation in e1000 instead of sending frames as GSO. */
1152 bool fGSOEnabled;
1153 /** TX: IP checksum has to be inserted if true. */
1154 bool fIPcsum;
1155 /** TX: TCP/UDP checksum has to be inserted if true. */
1156 bool fTCPcsum;
1157 /** TX: VLAN tag has to be inserted if true. */
1158 bool fVTag;
1159 /** TX: TCI part of VLAN tag to be inserted. */
1160 uint16_t u16VTagTCI;
1161 /** TX TSE fallback: Number of payload bytes remaining in TSE context. */
1162 uint32_t u32PayRemain;
1163 /** TX TSE fallback: Number of header bytes remaining in TSE context. */
1164 uint16_t u16HdrRemain;
1165 /** TX TSE fallback: Flags from template header. */
1166 uint16_t u16SavedFlags;
1167 /** TX TSE fallback: Partial checksum from template header. */
1168 uint32_t u32SavedCsum;
1169 /** ?: Emulated controller type. */
1170 E1KCHIP eChip;
1171
1172 /** EMT: Physical interface emulation. */
1173 PHY phy;
1174
1175#if 0
1176 /** Alignment padding. */
1177 uint8_t Alignment[HC_ARCH_BITS == 64 ? 8 : 4];
1178#endif
1179
1180 STAMCOUNTER StatReceiveBytes;
1181 STAMCOUNTER StatTransmitBytes;
1182#if defined(VBOX_WITH_STATISTICS)
1183 STAMPROFILEADV StatMMIOReadRZ;
1184 STAMPROFILEADV StatMMIOReadR3;
1185 STAMPROFILEADV StatMMIOWriteRZ;
1186 STAMPROFILEADV StatMMIOWriteR3;
1187 STAMPROFILEADV StatEEPROMRead;
1188 STAMPROFILEADV StatEEPROMWrite;
1189 STAMPROFILEADV StatIOReadRZ;
1190 STAMPROFILEADV StatIOReadR3;
1191 STAMPROFILEADV StatIOWriteRZ;
1192 STAMPROFILEADV StatIOWriteR3;
1193 STAMPROFILEADV StatLateIntTimer;
1194 STAMCOUNTER StatLateInts;
1195 STAMCOUNTER StatIntsRaised;
1196 STAMCOUNTER StatIntsPrevented;
1197 STAMPROFILEADV StatReceive;
1198 STAMPROFILEADV StatReceiveCRC;
1199 STAMPROFILEADV StatReceiveFilter;
1200 STAMPROFILEADV StatReceiveStore;
1201 STAMPROFILEADV StatTransmitRZ;
1202 STAMPROFILEADV StatTransmitR3;
1203 STAMPROFILE StatTransmitSendRZ;
1204 STAMPROFILE StatTransmitSendR3;
1205 STAMPROFILE StatRxOverflow;
1206 STAMCOUNTER StatRxOverflowWakeupRZ;
1207 STAMCOUNTER StatRxOverflowWakeupR3;
1208 STAMCOUNTER StatTxDescCtxNormal;
1209 STAMCOUNTER StatTxDescCtxTSE;
1210 STAMCOUNTER StatTxDescLegacy;
1211 STAMCOUNTER StatTxDescData;
1212 STAMCOUNTER StatTxDescTSEData;
1213 STAMCOUNTER StatTxPathFallback;
1214 STAMCOUNTER StatTxPathGSO;
1215 STAMCOUNTER StatTxPathRegular;
1216 STAMCOUNTER StatPHYAccesses;
1217 STAMCOUNTER aStatRegWrites[E1K_NUM_OF_REGS];
1218 STAMCOUNTER aStatRegReads[E1K_NUM_OF_REGS];
1219#endif /* VBOX_WITH_STATISTICS */
1220
1221#ifdef E1K_INT_STATS
1222 /* Internal stats */
1223 uint64_t u64ArmedAt;
1224 uint64_t uStatMaxTxDelay;
1225 uint32_t uStatInt;
1226 uint32_t uStatIntTry;
1227 uint32_t uStatIntLower;
1228 uint32_t uStatNoIntICR;
1229 int32_t iStatIntLost;
1230 int32_t iStatIntLostOne;
1231 uint32_t uStatIntIMS;
1232 uint32_t uStatIntSkip;
1233 uint32_t uStatIntLate;
1234 uint32_t uStatIntMasked;
1235 uint32_t uStatIntEarly;
1236 uint32_t uStatIntRx;
1237 uint32_t uStatIntTx;
1238 uint32_t uStatIntICS;
1239 uint32_t uStatIntRDTR;
1240 uint32_t uStatIntRXDMT0;
1241 uint32_t uStatIntTXQE;
1242 uint32_t uStatTxNoRS;
1243 uint32_t uStatTxIDE;
1244 uint32_t uStatTxDelayed;
1245 uint32_t uStatTxDelayExp;
1246 uint32_t uStatTAD;
1247 uint32_t uStatTID;
1248 uint32_t uStatRAD;
1249 uint32_t uStatRID;
1250 uint32_t uStatRxFrm;
1251 uint32_t uStatTxFrm;
1252 uint32_t uStatDescCtx;
1253 uint32_t uStatDescDat;
1254 uint32_t uStatDescLeg;
1255 uint32_t uStatTx1514;
1256 uint32_t uStatTx2962;
1257 uint32_t uStatTx4410;
1258 uint32_t uStatTx5858;
1259 uint32_t uStatTx7306;
1260 uint32_t uStatTx8754;
1261 uint32_t uStatTx16384;
1262 uint32_t uStatTx32768;
1263 uint32_t uStatTxLarge;
1264 uint32_t uStatAlign;
1265#endif /* E1K_INT_STATS */
1266} E1KSTATE;
1267/** Pointer to the E1000 device state. */
1268typedef E1KSTATE *PE1KSTATE;
1269
1270/**
1271 * E1000 ring-3 device state
1272 *
1273 * @implements PDMINETWORKDOWN
1274 * @implements PDMINETWORKCONFIG
1275 * @implements PDMILEDPORTS
1276 */
1277typedef struct E1KSTATER3
1278{
1279 PDMIBASE IBase;
1280 PDMINETWORKDOWN INetworkDown;
1281 PDMINETWORKCONFIG INetworkConfig;
1282 /** LED interface */
1283 PDMILEDPORTS ILeds;
1284 /** Attached network driver. */
1285 R3PTRTYPE(PPDMIBASE) pDrvBase;
1286 R3PTRTYPE(PPDMILEDCONNECTORS) pLedsConnector;
1287
1288 /** Pointer to the shared state. */
1289 R3PTRTYPE(PE1KSTATE) pShared;
1290
1291 /** Device instance. */
1292 PPDMDEVINSR3 pDevInsR3;
1293 /** Attached network driver. */
1294 PPDMINETWORKUPR3 pDrvR3;
1295 /** The scatter / gather buffer used for the current outgoing packet. */
1296 R3PTRTYPE(PPDMSCATTERGATHER) pTxSgR3;
1297
1298 /** EMT: EEPROM emulation */
1299 E1kEEPROM eeprom;
1300} E1KSTATER3;
1301/** Pointer to the E1000 ring-3 device state. */
1302typedef E1KSTATER3 *PE1KSTATER3;
1303
1304
1305/**
1306 * E1000 ring-0 device state
1307 */
1308typedef struct E1KSTATER0
1309{
1310 /** Device instance. */
1311 PPDMDEVINSR0 pDevInsR0;
1312 /** Attached network driver. */
1313 PPDMINETWORKUPR0 pDrvR0;
1314 /** The scatter / gather buffer used for the current outgoing packet - R0. */
1315 R0PTRTYPE(PPDMSCATTERGATHER) pTxSgR0;
1316} E1KSTATER0;
1317/** Pointer to the E1000 ring-0 device state. */
1318typedef E1KSTATER0 *PE1KSTATER0;
1319
1320
1321/**
1322 * E1000 raw-mode device state
1323 */
1324typedef struct E1KSTATERC
1325{
1326 /** Device instance. */
1327 PPDMDEVINSRC pDevInsRC;
1328 /** Attached network driver. */
1329 PPDMINETWORKUPRC pDrvRC;
1330 /** The scatter / gather buffer used for the current outgoing packet. */
1331 RCPTRTYPE(PPDMSCATTERGATHER) pTxSgRC;
1332} E1KSTATERC;
1333/** Pointer to the E1000 raw-mode device state. */
1334typedef E1KSTATERC *PE1KSTATERC;
1335
1336
1337/** @def PE1KSTATECC
1338 * Pointer to the instance data for the current context. */
1339#ifdef IN_RING3
1340typedef E1KSTATER3 E1KSTATECC;
1341typedef PE1KSTATER3 PE1KSTATECC;
1342#elif defined(IN_RING0)
1343typedef E1KSTATER0 E1KSTATECC;
1344typedef PE1KSTATER0 PE1KSTATECC;
1345#elif defined(IN_RC)
1346typedef E1KSTATERC E1KSTATECC;
1347typedef PE1KSTATERC PE1KSTATECC;
1348#else
1349# error "Not IN_RING3, IN_RING0 or IN_RC"
1350#endif
1351
1352
1353#ifndef VBOX_DEVICE_STRUCT_TESTCASE
1354
1355/* Forward declarations ******************************************************/
1356static int e1kXmitPending(PPDMDEVINS pDevIns, PE1KSTATE pThis, bool fOnWorkerThread);
1357
1358/**
1359 * E1000 register read handler.
1360 */
1361typedef int (FNE1KREGREAD)(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1362/**
1363 * E1000 register write handler.
1364 */
1365typedef int (FNE1KREGWRITE)(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1366
1367static FNE1KREGREAD e1kRegReadUnimplemented;
1368static FNE1KREGWRITE e1kRegWriteUnimplemented;
1369static FNE1KREGREAD e1kRegReadAutoClear;
1370static FNE1KREGREAD e1kRegReadDefault;
1371static FNE1KREGWRITE e1kRegWriteDefault;
1372#if 0 /* unused */
1373static FNE1KREGREAD e1kRegReadCTRL;
1374#endif
1375static FNE1KREGWRITE e1kRegWriteCTRL;
1376static FNE1KREGREAD e1kRegReadEECD;
1377static FNE1KREGWRITE e1kRegWriteEECD;
1378static FNE1KREGWRITE e1kRegWriteEERD;
1379static FNE1KREGWRITE e1kRegWriteMDIC;
1380static FNE1KREGREAD e1kRegReadICR;
1381static FNE1KREGWRITE e1kRegWriteICR;
1382static FNE1KREGWRITE e1kRegWriteICS;
1383static FNE1KREGWRITE e1kRegWriteIMS;
1384static FNE1KREGWRITE e1kRegWriteIMC;
1385static FNE1KREGWRITE e1kRegWriteRCTL;
1386static FNE1KREGWRITE e1kRegWritePBA;
1387static FNE1KREGWRITE e1kRegWriteRDT;
1388static FNE1KREGWRITE e1kRegWriteRDTR;
1389static FNE1KREGWRITE e1kRegWriteTDT;
1390static FNE1KREGREAD e1kRegReadMTA;
1391static FNE1KREGWRITE e1kRegWriteMTA;
1392static FNE1KREGREAD e1kRegReadRA;
1393static FNE1KREGWRITE e1kRegWriteRA;
1394static FNE1KREGREAD e1kRegReadVFTA;
1395static FNE1KREGWRITE e1kRegWriteVFTA;
1396
1397/**
1398 * Register map table.
1399 *
1400 * Override pfnRead and pfnWrite to get register-specific behavior.
1401 */
1402static const struct E1kRegMap_st
1403{
1404 /** Register offset in the register space. */
1405 uint32_t offset;
1406 /** Size in bytes. Registers of size > 4 are in fact tables. */
1407 uint32_t size;
1408 /** Readable bits. */
1409 uint32_t readable;
1410 /** Writable bits. */
1411 uint32_t writable;
1412 /** Read callback. */
1413 FNE1KREGREAD *pfnRead;
1414 /** Write callback. */
1415 FNE1KREGWRITE *pfnWrite;
1416 /** Abbreviated name. */
1417 const char *abbrev;
1418 /** Full name. */
1419 const char *name;
1420} g_aE1kRegMap[E1K_NUM_OF_REGS] =
1421{
1422 /* offset size read mask write mask read callback write callback abbrev full name */
1423 /*------- ------- ---------- ---------- ----------------------- ------------------------ ---------- ------------------------------*/
1424 { 0x00000, 0x00004, 0xDBF31BE9, 0xDBF31BE9, e1kRegReadDefault , e1kRegWriteCTRL , "CTRL" , "Device Control" },
1425 { 0x00008, 0x00004, 0x0000FDFF, 0x00000000, e1kRegReadDefault , e1kRegWriteUnimplemented, "STATUS" , "Device Status" },
1426 { 0x00010, 0x00004, 0x000027F0, 0x00000070, e1kRegReadEECD , e1kRegWriteEECD , "EECD" , "EEPROM/Flash Control/Data" },
1427 { 0x00014, 0x00004, 0xFFFFFF10, 0xFFFFFF00, e1kRegReadDefault , e1kRegWriteEERD , "EERD" , "EEPROM Read" },
1428 { 0x00018, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CTRL_EXT", "Extended Device Control" },
1429 { 0x0001c, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FLA" , "Flash Access (N/A)" },
1430 { 0x00020, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteMDIC , "MDIC" , "MDI Control" },
1431 { 0x00028, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCAL" , "Flow Control Address Low" },
1432 { 0x0002c, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCAH" , "Flow Control Address High" },
1433 { 0x00030, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCT" , "Flow Control Type" },
1434 { 0x00038, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "VET" , "VLAN EtherType" },
1435 { 0x000c0, 0x00004, 0x0001F6DF, 0x0001F6DF, e1kRegReadICR , e1kRegWriteICR , "ICR" , "Interrupt Cause Read" },
1436 { 0x000c4, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "ITR" , "Interrupt Throttling" },
1437 { 0x000c8, 0x00004, 0x00000000, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteICS , "ICS" , "Interrupt Cause Set" },
1438 { 0x000d0, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteIMS , "IMS" , "Interrupt Mask Set/Read" },
1439 { 0x000d8, 0x00004, 0x00000000, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteIMC , "IMC" , "Interrupt Mask Clear" },
1440 { 0x00100, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteRCTL , "RCTL" , "Receive Control" },
1441 { 0x00170, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCTTV" , "Flow Control Transmit Timer Value" },
1442 { 0x00178, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TXCW" , "Transmit Configuration Word (N/A)" },
1443 { 0x00180, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXCW" , "Receive Configuration Word (N/A)" },
1444 { 0x00400, 0x00004, 0x017FFFFA, 0x017FFFFA, e1kRegReadDefault , e1kRegWriteDefault , "TCTL" , "Transmit Control" },
1445 { 0x00410, 0x00004, 0x3FFFFFFF, 0x3FFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TIPG" , "Transmit IPG" },
1446 { 0x00458, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "AIFS" , "Adaptive IFS Throttle - AIT" },
1447 { 0x00e00, 0x00004, 0xCFCFCFCF, 0xCFCFCFCF, e1kRegReadDefault , e1kRegWriteDefault , "LEDCTL" , "LED Control" },
1448 { 0x01000, 0x00004, 0xFFFF007F, 0x0000007F, e1kRegReadDefault , e1kRegWritePBA , "PBA" , "Packet Buffer Allocation" },
1449 { 0x02160, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRTL" , "Flow Control Receive Threshold Low" },
1450 { 0x02168, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRTH" , "Flow Control Receive Threshold High" },
1451 { 0x02410, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFH" , "Receive Data FIFO Head" },
1452 { 0x02418, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFT" , "Receive Data FIFO Tail" },
1453 { 0x02420, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFHS" , "Receive Data FIFO Head Saved Register" },
1454 { 0x02428, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFTS" , "Receive Data FIFO Tail Saved Register" },
1455 { 0x02430, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFPC" , "Receive Data FIFO Packet Count" },
1456 { 0x02800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDBAL" , "Receive Descriptor Base Low" },
1457 { 0x02804, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDBAH" , "Receive Descriptor Base High" },
1458 { 0x02808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDLEN" , "Receive Descriptor Length" },
1459 { 0x02810, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDH" , "Receive Descriptor Head" },
1460 { 0x02818, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteRDT , "RDT" , "Receive Descriptor Tail" },
1461 { 0x02820, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteRDTR , "RDTR" , "Receive Delay Timer" },
1462 { 0x02828, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXDCTL" , "Receive Descriptor Control" },
1463 { 0x0282c, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "RADV" , "Receive Interrupt Absolute Delay Timer" },
1464 { 0x02c00, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RSRPD" , "Receive Small Packet Detect Interrupt" },
1465 { 0x03000, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TXDMAC" , "TX DMA Control (N/A)" },
1466 { 0x03410, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFH" , "Transmit Data FIFO Head" },
1467 { 0x03418, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFT" , "Transmit Data FIFO Tail" },
1468 { 0x03420, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFHS" , "Transmit Data FIFO Head Saved Register" },
1469 { 0x03428, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFTS" , "Transmit Data FIFO Tail Saved Register" },
1470 { 0x03430, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFPC" , "Transmit Data FIFO Packet Count" },
1471 { 0x03800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDBAL" , "Transmit Descriptor Base Low" },
1472 { 0x03804, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDBAH" , "Transmit Descriptor Base High" },
1473 { 0x03808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDLEN" , "Transmit Descriptor Length" },
1474 { 0x03810, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDH" , "Transmit Descriptor Head" },
1475 { 0x03818, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteTDT , "TDT" , "Transmit Descriptor Tail" },
1476 { 0x03820, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TIDV" , "Transmit Interrupt Delay Value" },
1477 { 0x03828, 0x00004, 0xFF3F3F3F, 0xFF3F3F3F, e1kRegReadDefault , e1kRegWriteDefault , "TXDCTL" , "Transmit Descriptor Control" },
1478 { 0x0382c, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TADV" , "Transmit Absolute Interrupt Delay Timer" },
1479 { 0x03830, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TSPMT" , "TCP Segmentation Pad and Threshold" },
1480 { 0x04000, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CRCERRS" , "CRC Error Count" },
1481 { 0x04004, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "ALGNERRC", "Alignment Error Count" },
1482 { 0x04008, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SYMERRS" , "Symbol Error Count" },
1483 { 0x0400c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXERRC" , "RX Error Count" },
1484 { 0x04010, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MPC" , "Missed Packets Count" },
1485 { 0x04014, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SCC" , "Single Collision Count" },
1486 { 0x04018, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "ECOL" , "Excessive Collisions Count" },
1487 { 0x0401c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MCC" , "Multiple Collision Count" },
1488 { 0x04020, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "LATECOL" , "Late Collisions Count" },
1489 { 0x04028, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "COLC" , "Collision Count" },
1490 { 0x04030, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "DC" , "Defer Count" },
1491 { 0x04034, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TNCRS" , "Transmit - No CRS" },
1492 { 0x04038, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SEC" , "Sequence Error Count" },
1493 { 0x0403c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CEXTERR" , "Carrier Extension Error Count" },
1494 { 0x04040, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RLEC" , "Receive Length Error Count" },
1495 { 0x04048, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XONRXC" , "XON Received Count" },
1496 { 0x0404c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XONTXC" , "XON Transmitted Count" },
1497 { 0x04050, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XOFFRXC" , "XOFF Received Count" },
1498 { 0x04054, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XOFFTXC" , "XOFF Transmitted Count" },
1499 { 0x04058, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRUC" , "FC Received Unsupported Count" },
1500 { 0x0405c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC64" , "Packets Received (64 Bytes) Count" },
1501 { 0x04060, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC127" , "Packets Received (65-127 Bytes) Count" },
1502 { 0x04064, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC255" , "Packets Received (128-255 Bytes) Count" },
1503 { 0x04068, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC511" , "Packets Received (256-511 Bytes) Count" },
1504 { 0x0406c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC1023" , "Packets Received (512-1023 Bytes) Count" },
1505 { 0x04070, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC1522" , "Packets Received (1024-Max Bytes)" },
1506 { 0x04074, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GPRC" , "Good Packets Received Count" },
1507 { 0x04078, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "BPRC" , "Broadcast Packets Received Count" },
1508 { 0x0407c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "MPRC" , "Multicast Packets Received Count" },
1509 { 0x04080, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GPTC" , "Good Packets Transmitted Count" },
1510 { 0x04088, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GORCL" , "Good Octets Received Count (Low)" },
1511 { 0x0408c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GORCH" , "Good Octets Received Count (Hi)" },
1512 { 0x04090, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GOTCL" , "Good Octets Transmitted Count (Low)" },
1513 { 0x04094, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GOTCH" , "Good Octets Transmitted Count (Hi)" },
1514 { 0x040a0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RNBC" , "Receive No Buffers Count" },
1515 { 0x040a4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RUC" , "Receive Undersize Count" },
1516 { 0x040a8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RFC" , "Receive Fragment Count" },
1517 { 0x040ac, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "ROC" , "Receive Oversize Count" },
1518 { 0x040b0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RJC" , "Receive Jabber Count" },
1519 { 0x040b4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPRC" , "Management Packets Received Count" },
1520 { 0x040b8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPDC" , "Management Packets Dropped Count" },
1521 { 0x040bc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPTC" , "Management Pkts Transmitted Count" },
1522 { 0x040c0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TORL" , "Total Octets Received (Lo)" },
1523 { 0x040c4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TORH" , "Total Octets Received (Hi)" },
1524 { 0x040c8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TOTL" , "Total Octets Transmitted (Lo)" },
1525 { 0x040cc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TOTH" , "Total Octets Transmitted (Hi)" },
1526 { 0x040d0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TPR" , "Total Packets Received" },
1527 { 0x040d4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TPT" , "Total Packets Transmitted" },
1528 { 0x040d8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC64" , "Packets Transmitted (64 Bytes) Count" },
1529 { 0x040dc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC127" , "Packets Transmitted (65-127 Bytes) Count" },
1530 { 0x040e0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC255" , "Packets Transmitted (128-255 Bytes) Count" },
1531 { 0x040e4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC511" , "Packets Transmitted (256-511 Bytes) Count" },
1532 { 0x040e8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC1023" , "Packets Transmitted (512-1023 Bytes) Count" },
1533 { 0x040ec, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC1522" , "Packets Transmitted (1024 Bytes or Greater) Count" },
1534 { 0x040f0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "MPTC" , "Multicast Packets Transmitted Count" },
1535 { 0x040f4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "BPTC" , "Broadcast Packets Transmitted Count" },
1536 { 0x040f8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TSCTC" , "TCP Segmentation Context Transmitted Count" },
1537 { 0x040fc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TSCTFC" , "TCP Segmentation Context Tx Fail Count" },
1538 { 0x05000, 0x00004, 0x000007FF, 0x000007FF, e1kRegReadDefault , e1kRegWriteDefault , "RXCSUM" , "Receive Checksum Control" },
1539 { 0x05800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUC" , "Wakeup Control" },
1540 { 0x05808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUFC" , "Wakeup Filter Control" },
1541 { 0x05810, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUS" , "Wakeup Status" },
1542 { 0x05820, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "MANC" , "Management Control" },
1543 { 0x05838, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IPAV" , "IP Address Valid" },
1544 { 0x05900, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUPL" , "Wakeup Packet Length" },
1545 { 0x05200, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadMTA , e1kRegWriteMTA , "MTA" , "Multicast Table Array (n)" },
1546 { 0x05400, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadRA , e1kRegWriteRA , "RA" , "Receive Address (64-bit) (n)" },
1547 { 0x05600, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadVFTA , e1kRegWriteVFTA , "VFTA" , "VLAN Filter Table Array (n)" },
1548 { 0x05840, 0x0001c, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IP4AT" , "IPv4 Address Table" },
1549 { 0x05880, 0x00010, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IP6AT" , "IPv6 Address Table" },
1550 { 0x05a00, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUPM" , "Wakeup Packet Memory" },
1551 { 0x05f00, 0x0001c, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFLT" , "Flexible Filter Length Table" },
1552 { 0x09000, 0x003fc, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFMT" , "Flexible Filter Mask Table" },
1553 { 0x09800, 0x003fc, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFVT" , "Flexible Filter Value Table" },
1554 { 0x10000, 0x10000, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "PBM" , "Packet Buffer Memory (n)" },
1555 { 0x00040, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadRA , e1kRegWriteRA , "RA82542" , "Receive Address (64-bit) (n) (82542)" },
1556 { 0x00200, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadMTA , e1kRegWriteMTA , "MTA82542", "Multicast Table Array (n) (82542)" },
1557 { 0x00600, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadVFTA , e1kRegWriteVFTA , "VFTA82542", "VLAN Filter Table Array (n) (82542)" }
1558};
1559
1560#ifdef LOG_ENABLED
1561
1562/**
1563 * Convert U32 value to hex string. Masked bytes are replaced with dots.
1564 *
1565 * @remarks The mask has half-byte byte (not bit) granularity (e.g. 0000000F).
1566 *
1567 * @returns The buffer.
1568 *
1569 * @param u32 The word to convert into string.
1570 * @param mask Selects which bytes to convert.
1571 * @param buf Where to put the result.
1572 */
1573static char *e1kU32toHex(uint32_t u32, uint32_t mask, char *buf)
1574{
1575 for (char *ptr = buf + 7; ptr >= buf; --ptr, u32 >>=4, mask >>=4)
1576 {
1577 if (mask & 0xF)
1578 *ptr = (u32 & 0xF) + ((u32 & 0xF) > 9 ? '7' : '0');
1579 else
1580 *ptr = '.';
1581 }
1582 buf[8] = 0;
1583 return buf;
1584}
1585
1586/**
1587 * Returns timer name for debug purposes.
1588 *
1589 * @returns The timer name.
1590 *
1591 * @param pThis The device state structure.
1592 * @param hTimer The timer to name.
1593 */
1594DECLINLINE(const char *) e1kGetTimerName(PE1KSTATE pThis, TMTIMERHANDLE hTimer)
1595{
1596 if (hTimer == pThis->hTIDTimer)
1597 return "TID";
1598 if (hTimer == pThis->hTADTimer)
1599 return "TAD";
1600 if (hTimer == pThis->hRIDTimer)
1601 return "RID";
1602 if (hTimer == pThis->hRADTimer)
1603 return "RAD";
1604 if (hTimer == pThis->hIntTimer)
1605 return "Int";
1606 if (hTimer == pThis->hTXDTimer)
1607 return "TXD";
1608 if (hTimer == pThis->hLUTimer)
1609 return "LinkUp";
1610 return "unknown";
1611}
1612
1613#endif /* LOG_ENABLED */
1614
1615/**
1616 * Arm a timer.
1617 *
1618 * @param pDevIns The device instance.
1619 * @param pThis Pointer to the device state structure.
1620 * @param hTimer The timer to arm.
1621 * @param uExpireIn Expiration interval in microseconds.
1622 */
1623DECLINLINE(void) e1kArmTimer(PPDMDEVINS pDevIns, PE1KSTATE pThis, TMTIMERHANDLE hTimer, uint32_t uExpireIn)
1624{
1625 if (pThis->fLocked)
1626 return;
1627
1628 E1kLog2(("%s Arming %s timer to fire in %d usec...\n",
1629 pThis->szPrf, e1kGetTimerName(pThis, hTimer), uExpireIn));
1630 int rc = PDMDevHlpTimerSetMicro(pDevIns, hTimer, uExpireIn);
1631 AssertRC(rc);
1632}
1633
1634#ifdef IN_RING3
1635/**
1636 * Cancel a timer.
1637 *
1638 * @param pDevIns The device instance.
1639 * @param pThis Pointer to the device state structure.
1640 * @param pTimer Pointer to the timer.
1641 */
1642DECLINLINE(void) e1kCancelTimer(PPDMDEVINS pDevIns, PE1KSTATE pThis, TMTIMERHANDLE hTimer)
1643{
1644 E1kLog2(("%s Stopping %s timer...\n",
1645 pThis->szPrf, e1kGetTimerName(pThis, hTimer)));
1646 int rc = PDMDevHlpTimerStop(pDevIns, hTimer);
1647 if (RT_FAILURE(rc))
1648 E1kLog2(("%s e1kCancelTimer: TMTimerStop(%s) failed with %Rrc\n",
1649 pThis->szPrf, e1kGetTimerName(pThis, hTimer), rc));
1650 RT_NOREF_PV(pThis);
1651}
1652#endif /* IN_RING3 */
1653
1654#define e1kCsEnter(ps, rc) PDMDevHlpCritSectEnter(pDevIns, &ps->cs, rc)
1655#define e1kCsLeave(ps) PDMDevHlpCritSectLeave(pDevIns, &ps->cs)
1656
1657#define e1kCsRxEnter(ps, rc) PDMDevHlpCritSectEnter(pDevIns, &ps->csRx, rc)
1658#define e1kCsRxLeave(ps) PDMDevHlpCritSectLeave(pDevIns, &ps->csRx)
1659#define e1kCsRxIsOwner(ps) PDMDevHlpCritSectIsOwner(pDevIns, &ps->csRx)
1660
1661#ifndef E1K_WITH_TX_CS
1662# define e1kCsTxEnter(ps, rc) VINF_SUCCESS
1663# define e1kCsTxLeave(ps) do { } while (0)
1664#else /* E1K_WITH_TX_CS */
1665# define e1kCsTxEnter(ps, rc) PDMDevHlpCritSectEnter(pDevIns, &ps->csTx, rc)
1666# define e1kCsTxLeave(ps) PDMDevHlpCritSectLeave(pDevIns, &ps->csTx)
1667#endif /* E1K_WITH_TX_CS */
1668
1669
1670/**
1671 * Wakeup the RX thread.
1672 */
1673static void e1kWakeupReceive(PPDMDEVINS pDevIns, PE1KSTATE pThis)
1674{
1675 if ( pThis->fMaybeOutOfSpace
1676 && pThis->hEventMoreRxDescAvail != NIL_SUPSEMEVENT)
1677 {
1678 STAM_COUNTER_INC(&pThis->CTX_SUFF_Z(StatRxOverflowWakeup));
1679 E1kLog(("%s Waking up Out-of-RX-space semaphore\n", pThis->szPrf));
1680 int rc = PDMDevHlpSUPSemEventSignal(pDevIns, pThis->hEventMoreRxDescAvail);
1681 AssertRC(rc);
1682 }
1683}
1684
1685#ifdef IN_RING3
1686
1687/**
1688 * Hardware reset. Revert all registers to initial values.
1689 *
1690 * @param pDevIns The device instance.
1691 * @param pThis The device state structure.
1692 * @param pThisCC The current context instance data.
1693 */
1694static void e1kR3HardReset(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KSTATECC pThisCC)
1695{
1696 E1kLog(("%s Hard reset triggered\n", pThis->szPrf));
1697 /* No interrupts should survive device reset, see @bugref(9556). */
1698 if (pThis->fIntRaised)
1699 {
1700 /* Lower(0) INTA(0) */
1701 PDMDevHlpPCISetIrq(pDevIns, 0, 0);
1702 pThis->fIntRaised = false;
1703 E1kLog(("%s e1kR3HardReset: Lowered IRQ: ICR=%08x\n", pThis->szPrf, ICR));
1704 }
1705 memset(pThis->auRegs, 0, sizeof(pThis->auRegs));
1706 memset(pThis->aRecAddr.au32, 0, sizeof(pThis->aRecAddr.au32));
1707#ifdef E1K_INIT_RA0
1708 memcpy(pThis->aRecAddr.au32, pThis->macConfigured.au8,
1709 sizeof(pThis->macConfigured.au8));
1710 pThis->aRecAddr.array[0].ctl |= RA_CTL_AV;
1711#endif /* E1K_INIT_RA0 */
1712 STATUS = 0x0081; /* SPEED=10b (1000 Mb/s), FD=1b (Full Duplex) */
1713 EECD = 0x0100; /* EE_PRES=1b (EEPROM present) */
1714 CTRL = 0x0a09; /* FRCSPD=1b SPEED=10b LRST=1b FD=1b */
1715 TSPMT = 0x01000400;/* TSMT=0400h TSPBP=0100h */
1716 Assert(GET_BITS(RCTL, BSIZE) == 0);
1717 pThis->u16RxBSize = 2048;
1718
1719 uint16_t u16LedCtl = 0x0602; /* LED0/LINK_UP#, LED2/LINK100# */
1720 pThisCC->eeprom.readWord(0x2F, &u16LedCtl); /* Read LEDCTL defaults from EEPROM */
1721 LEDCTL = 0x07008300 | (((uint32_t)u16LedCtl & 0xCF00) << 8) | (u16LedCtl & 0xCF); /* Only LED0 and LED2 defaults come from EEPROM */
1722
1723 /* Reset promiscuous mode */
1724 if (pThisCC->pDrvR3)
1725 pThisCC->pDrvR3->pfnSetPromiscuousMode(pThisCC->pDrvR3, false);
1726
1727#ifdef E1K_WITH_TXD_CACHE
1728 int rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
1729 if (RT_LIKELY(rc == VINF_SUCCESS))
1730 {
1731 pThis->nTxDFetched = 0;
1732 pThis->iTxDCurrent = 0;
1733 pThis->fGSO = false;
1734 pThis->cbTxAlloc = 0;
1735 e1kCsTxLeave(pThis);
1736 }
1737#endif /* E1K_WITH_TXD_CACHE */
1738#ifdef E1K_WITH_RXD_CACHE
1739 if (RT_LIKELY(e1kCsRxEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
1740 {
1741 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
1742 e1kCsRxLeave(pThis);
1743 }
1744#endif /* E1K_WITH_RXD_CACHE */
1745#ifdef E1K_LSC_ON_RESET
1746 E1kLog(("%s Will trigger LSC in %d seconds...\n",
1747 pThis->szPrf, pThis->cMsLinkUpDelay / 1000));
1748 e1kArmTimer(pDevIns, pThis, pThis->hLUTimer, pThis->cMsLinkUpDelay * 1000);
1749#endif /* E1K_LSC_ON_RESET */
1750}
1751
1752#endif /* IN_RING3 */
1753
1754/**
1755 * Compute Internet checksum.
1756 *
1757 * @remarks Refer to http://www.netfor2.com/checksum.html for short intro.
1758 *
1759 * @param pThis The device state structure.
1760 * @param cpPacket The packet.
1761 * @param cb The size of the packet.
1762 * @param pszText A string denoting direction of packet transfer.
1763 *
1764 * @return The 1's complement of the 1's complement sum.
1765 *
1766 * @thread E1000_TX
1767 */
1768static uint16_t e1kCSum16(const void *pvBuf, size_t cb)
1769{
1770 uint32_t csum = 0;
1771 uint16_t *pu16 = (uint16_t *)pvBuf;
1772
1773 while (cb > 1)
1774 {
1775 csum += *pu16++;
1776 cb -= 2;
1777 }
1778 if (cb)
1779 csum += *(uint8_t*)pu16;
1780 while (csum >> 16)
1781 csum = (csum >> 16) + (csum & 0xFFFF);
1782 Assert(csum < 65536);
1783 return (uint16_t)~csum;
1784}
1785
1786/**
1787 * Dump a packet to debug log.
1788 *
1789 * @param pDevIns The device instance.
1790 * @param pThis The device state structure.
1791 * @param cpPacket The packet.
1792 * @param cb The size of the packet.
1793 * @param pszText A string denoting direction of packet transfer.
1794 * @thread E1000_TX
1795 */
1796DECLINLINE(void) e1kPacketDump(PPDMDEVINS pDevIns, PE1KSTATE pThis, const uint8_t *cpPacket, size_t cb, const char *pszText)
1797{
1798#ifdef DEBUG
1799 if (RT_LIKELY(e1kCsEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
1800 {
1801 Log4(("%s --- %s packet #%d: %RTmac => %RTmac (%d bytes) ---\n",
1802 pThis->szPrf, pszText, ++pThis->u32PktNo, cpPacket+6, cpPacket, cb));
1803 if (ntohs(*(uint16_t*)(cpPacket+12)) == 0x86DD)
1804 {
1805 Log4(("%s --- IPv6: %RTnaipv6 => %RTnaipv6\n",
1806 pThis->szPrf, cpPacket+14+8, cpPacket+14+24));
1807 if (*(cpPacket+14+6) == 0x6)
1808 Log4(("%s --- TCP: seq=%x ack=%x\n", pThis->szPrf,
1809 ntohl(*(uint32_t*)(cpPacket+14+40+4)), ntohl(*(uint32_t*)(cpPacket+14+40+8))));
1810 }
1811 else if (ntohs(*(uint16_t*)(cpPacket+12)) == 0x800)
1812 {
1813 Log4(("%s --- IPv4: %RTnaipv4 => %RTnaipv4\n",
1814 pThis->szPrf, *(uint32_t*)(cpPacket+14+12), *(uint32_t*)(cpPacket+14+16)));
1815 if (*(cpPacket+14+6) == 0x6)
1816 Log4(("%s --- TCP: seq=%x ack=%x\n", pThis->szPrf,
1817 ntohl(*(uint32_t*)(cpPacket+14+20+4)), ntohl(*(uint32_t*)(cpPacket+14+20+8))));
1818 }
1819 E1kLog3(("%.*Rhxd\n", cb, cpPacket));
1820 e1kCsLeave(pThis);
1821 }
1822#else
1823 if (RT_LIKELY(e1kCsEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
1824 {
1825 if (ntohs(*(uint16_t*)(cpPacket+12)) == 0x86DD)
1826 E1kLogRel(("E1000: %s packet #%d, %RTmac => %RTmac, %RTnaipv6 => %RTnaipv6, seq=%x ack=%x\n",
1827 pszText, ++pThis->u32PktNo, cpPacket+6, cpPacket, cpPacket+14+8, cpPacket+14+24,
1828 ntohl(*(uint32_t*)(cpPacket+14+40+4)), ntohl(*(uint32_t*)(cpPacket+14+40+8))));
1829 else
1830 E1kLogRel(("E1000: %s packet #%d, %RTmac => %RTmac, %RTnaipv4 => %RTnaipv4, seq=%x ack=%x\n",
1831 pszText, ++pThis->u32PktNo, cpPacket+6, cpPacket,
1832 *(uint32_t*)(cpPacket+14+12), *(uint32_t*)(cpPacket+14+16),
1833 ntohl(*(uint32_t*)(cpPacket+14+20+4)), ntohl(*(uint32_t*)(cpPacket+14+20+8))));
1834 e1kCsLeave(pThis);
1835 }
1836 RT_NOREF2(cb, pszText);
1837#endif
1838}
1839
1840/**
1841 * Determine the type of transmit descriptor.
1842 *
1843 * @returns Descriptor type. See E1K_DTYP_XXX defines.
1844 *
1845 * @param pDesc Pointer to descriptor union.
1846 * @thread E1000_TX
1847 */
1848DECLINLINE(int) e1kGetDescType(E1KTXDESC *pDesc)
1849{
1850 if (pDesc->legacy.cmd.fDEXT)
1851 return pDesc->context.dw2.u4DTYP;
1852 return E1K_DTYP_LEGACY;
1853}
1854
1855
1856#ifdef E1K_WITH_RXD_CACHE
1857/**
1858 * Return the number of RX descriptor that belong to the hardware.
1859 *
1860 * @returns the number of available descriptors in RX ring.
1861 * @param pThis The device state structure.
1862 * @thread ???
1863 */
1864DECLINLINE(uint32_t) e1kGetRxLen(PE1KSTATE pThis)
1865{
1866 /**
1867 * Make sure RDT won't change during computation. EMT may modify RDT at
1868 * any moment.
1869 */
1870 uint32_t rdt = RDT;
1871 return (RDH > rdt ? RDLEN/sizeof(E1KRXDESC) : 0) + rdt - RDH;
1872}
1873
1874DECLINLINE(unsigned) e1kRxDInCache(PE1KSTATE pThis)
1875{
1876 return pThis->nRxDFetched > pThis->iRxDCurrent ?
1877 pThis->nRxDFetched - pThis->iRxDCurrent : 0;
1878}
1879
1880DECLINLINE(unsigned) e1kRxDIsCacheEmpty(PE1KSTATE pThis)
1881{
1882 return pThis->iRxDCurrent >= pThis->nRxDFetched;
1883}
1884
1885/**
1886 * Load receive descriptors from guest memory. The caller needs to be in Rx
1887 * critical section.
1888 *
1889 * We need two physical reads in case the tail wrapped around the end of RX
1890 * descriptor ring.
1891 *
1892 * @returns the actual number of descriptors fetched.
1893 * @param pDevIns The device instance.
1894 * @param pThis The device state structure.
1895 * @thread EMT, RX
1896 */
1897DECLINLINE(unsigned) e1kRxDPrefetch(PPDMDEVINS pDevIns, PE1KSTATE pThis)
1898{
1899 /* We've already loaded pThis->nRxDFetched descriptors past RDH. */
1900 unsigned nDescsAvailable = e1kGetRxLen(pThis) - e1kRxDInCache(pThis);
1901 unsigned nDescsToFetch = RT_MIN(nDescsAvailable, E1K_RXD_CACHE_SIZE - pThis->nRxDFetched);
1902 unsigned nDescsTotal = RDLEN / sizeof(E1KRXDESC);
1903 Assert(nDescsTotal != 0);
1904 if (nDescsTotal == 0)
1905 return 0;
1906 unsigned nFirstNotLoaded = (RDH + e1kRxDInCache(pThis)) % nDescsTotal;
1907 unsigned nDescsInSingleRead = RT_MIN(nDescsToFetch, nDescsTotal - nFirstNotLoaded);
1908 E1kLog3(("%s e1kRxDPrefetch: nDescsAvailable=%u nDescsToFetch=%u "
1909 "nDescsTotal=%u nFirstNotLoaded=0x%x nDescsInSingleRead=%u\n",
1910 pThis->szPrf, nDescsAvailable, nDescsToFetch, nDescsTotal,
1911 nFirstNotLoaded, nDescsInSingleRead));
1912 if (nDescsToFetch == 0)
1913 return 0;
1914 E1KRXDESC* pFirstEmptyDesc = &pThis->aRxDescriptors[pThis->nRxDFetched];
1915 PDMDevHlpPCIPhysRead(pDevIns,
1916 ((uint64_t)RDBAH << 32) + RDBAL + nFirstNotLoaded * sizeof(E1KRXDESC),
1917 pFirstEmptyDesc, nDescsInSingleRead * sizeof(E1KRXDESC));
1918 // uint64_t addrBase = ((uint64_t)RDBAH << 32) + RDBAL;
1919 // unsigned i, j;
1920 // for (i = pThis->nRxDFetched; i < pThis->nRxDFetched + nDescsInSingleRead; ++i)
1921 // {
1922 // pThis->aRxDescAddr[i] = addrBase + (nFirstNotLoaded + i - pThis->nRxDFetched) * sizeof(E1KRXDESC);
1923 // E1kLog3(("%s aRxDescAddr[%d] = %p\n", pThis->szPrf, i, pThis->aRxDescAddr[i]));
1924 // }
1925 E1kLog3(("%s Fetched %u RX descriptors at %08x%08x(0x%x), RDLEN=%08x, RDH=%08x, RDT=%08x\n",
1926 pThis->szPrf, nDescsInSingleRead,
1927 RDBAH, RDBAL + RDH * sizeof(E1KRXDESC),
1928 nFirstNotLoaded, RDLEN, RDH, RDT));
1929 if (nDescsToFetch > nDescsInSingleRead)
1930 {
1931 PDMDevHlpPCIPhysRead(pDevIns,
1932 ((uint64_t)RDBAH << 32) + RDBAL,
1933 pFirstEmptyDesc + nDescsInSingleRead,
1934 (nDescsToFetch - nDescsInSingleRead) * sizeof(E1KRXDESC));
1935 // Assert(i == pThis->nRxDFetched + nDescsInSingleRead);
1936 // for (j = 0; i < pThis->nRxDFetched + nDescsToFetch; ++i, ++j)
1937 // {
1938 // pThis->aRxDescAddr[i] = addrBase + j * sizeof(E1KRXDESC);
1939 // E1kLog3(("%s aRxDescAddr[%d] = %p\n", pThis->szPrf, i, pThis->aRxDescAddr[i]));
1940 // }
1941 E1kLog3(("%s Fetched %u RX descriptors at %08x%08x\n",
1942 pThis->szPrf, nDescsToFetch - nDescsInSingleRead,
1943 RDBAH, RDBAL));
1944 }
1945 pThis->nRxDFetched += nDescsToFetch;
1946 return nDescsToFetch;
1947}
1948
1949# ifdef IN_RING3 /* currently only used in ring-3 due to stack space requirements of the caller */
1950/**
1951 * Dump receive descriptor to debug log.
1952 *
1953 * @param pThis The device state structure.
1954 * @param pDesc Pointer to the descriptor.
1955 * @thread E1000_RX
1956 */
1957static void e1kPrintRDesc(PE1KSTATE pThis, E1KRXDESC *pDesc)
1958{
1959 RT_NOREF2(pThis, pDesc);
1960 E1kLog2(("%s <-- Receive Descriptor (%d bytes):\n", pThis->szPrf, pDesc->u16Length));
1961 E1kLog2((" Address=%16LX Length=%04X Csum=%04X\n",
1962 pDesc->u64BufAddr, pDesc->u16Length, pDesc->u16Checksum));
1963 E1kLog2((" STA: %s %s %s %s %s %s %s ERR: %s %s %s %s SPECIAL: %s VLAN=%03x PRI=%x\n",
1964 pDesc->status.fPIF ? "PIF" : "pif",
1965 pDesc->status.fIPCS ? "IPCS" : "ipcs",
1966 pDesc->status.fTCPCS ? "TCPCS" : "tcpcs",
1967 pDesc->status.fVP ? "VP" : "vp",
1968 pDesc->status.fIXSM ? "IXSM" : "ixsm",
1969 pDesc->status.fEOP ? "EOP" : "eop",
1970 pDesc->status.fDD ? "DD" : "dd",
1971 pDesc->status.fRXE ? "RXE" : "rxe",
1972 pDesc->status.fIPE ? "IPE" : "ipe",
1973 pDesc->status.fTCPE ? "TCPE" : "tcpe",
1974 pDesc->status.fCE ? "CE" : "ce",
1975 E1K_SPEC_CFI(pDesc->status.u16Special) ? "CFI" :"cfi",
1976 E1K_SPEC_VLAN(pDesc->status.u16Special),
1977 E1K_SPEC_PRI(pDesc->status.u16Special)));
1978}
1979# endif /* IN_RING3 */
1980#endif /* E1K_WITH_RXD_CACHE */
1981
1982/**
1983 * Dump transmit descriptor to debug log.
1984 *
1985 * @param pThis The device state structure.
1986 * @param pDesc Pointer to descriptor union.
1987 * @param pszDir A string denoting direction of descriptor transfer
1988 * @thread E1000_TX
1989 */
1990static void e1kPrintTDesc(PE1KSTATE pThis, E1KTXDESC *pDesc, const char *pszDir,
1991 unsigned uLevel = RTLOGGRPFLAGS_LEVEL_2)
1992{
1993 RT_NOREF4(pThis, pDesc, pszDir, uLevel);
1994
1995 /*
1996 * Unfortunately we cannot use our format handler here, we want R0 logging
1997 * as well.
1998 */
1999 switch (e1kGetDescType(pDesc))
2000 {
2001 case E1K_DTYP_CONTEXT:
2002 E1kLogX(uLevel, ("%s %s Context Transmit Descriptor %s\n",
2003 pThis->szPrf, pszDir, pszDir));
2004 E1kLogX(uLevel, (" IPCSS=%02X IPCSO=%02X IPCSE=%04X TUCSS=%02X TUCSO=%02X TUCSE=%04X\n",
2005 pDesc->context.ip.u8CSS, pDesc->context.ip.u8CSO, pDesc->context.ip.u16CSE,
2006 pDesc->context.tu.u8CSS, pDesc->context.tu.u8CSO, pDesc->context.tu.u16CSE));
2007 E1kLogX(uLevel, (" TUCMD:%s%s%s %s %s PAYLEN=%04x HDRLEN=%04x MSS=%04x STA: %s\n",
2008 pDesc->context.dw2.fIDE ? " IDE":"",
2009 pDesc->context.dw2.fRS ? " RS" :"",
2010 pDesc->context.dw2.fTSE ? " TSE":"",
2011 pDesc->context.dw2.fIP ? "IPv4":"IPv6",
2012 pDesc->context.dw2.fTCP ? "TCP":"UDP",
2013 pDesc->context.dw2.u20PAYLEN,
2014 pDesc->context.dw3.u8HDRLEN,
2015 pDesc->context.dw3.u16MSS,
2016 pDesc->context.dw3.fDD?"DD":""));
2017 break;
2018 case E1K_DTYP_DATA:
2019 E1kLogX(uLevel, ("%s %s Data Transmit Descriptor (%d bytes) %s\n",
2020 pThis->szPrf, pszDir, pDesc->data.cmd.u20DTALEN, pszDir));
2021 E1kLogX(uLevel, (" Address=%16LX DTALEN=%05X\n",
2022 pDesc->data.u64BufAddr,
2023 pDesc->data.cmd.u20DTALEN));
2024 E1kLogX(uLevel, (" DCMD:%s%s%s%s%s%s%s STA:%s%s%s POPTS:%s%s SPECIAL:%s VLAN=%03x PRI=%x\n",
2025 pDesc->data.cmd.fIDE ? " IDE" :"",
2026 pDesc->data.cmd.fVLE ? " VLE" :"",
2027 pDesc->data.cmd.fRPS ? " RPS" :"",
2028 pDesc->data.cmd.fRS ? " RS" :"",
2029 pDesc->data.cmd.fTSE ? " TSE" :"",
2030 pDesc->data.cmd.fIFCS? " IFCS":"",
2031 pDesc->data.cmd.fEOP ? " EOP" :"",
2032 pDesc->data.dw3.fDD ? " DD" :"",
2033 pDesc->data.dw3.fEC ? " EC" :"",
2034 pDesc->data.dw3.fLC ? " LC" :"",
2035 pDesc->data.dw3.fTXSM? " TXSM":"",
2036 pDesc->data.dw3.fIXSM? " IXSM":"",
2037 E1K_SPEC_CFI(pDesc->data.dw3.u16Special) ? "CFI" :"cfi",
2038 E1K_SPEC_VLAN(pDesc->data.dw3.u16Special),
2039 E1K_SPEC_PRI(pDesc->data.dw3.u16Special)));
2040 break;
2041 case E1K_DTYP_LEGACY:
2042 E1kLogX(uLevel, ("%s %s Legacy Transmit Descriptor (%d bytes) %s\n",
2043 pThis->szPrf, pszDir, pDesc->legacy.cmd.u16Length, pszDir));
2044 E1kLogX(uLevel, (" Address=%16LX DTALEN=%05X\n",
2045 pDesc->data.u64BufAddr,
2046 pDesc->legacy.cmd.u16Length));
2047 E1kLogX(uLevel, (" CMD:%s%s%s%s%s%s%s STA:%s%s%s CSO=%02x CSS=%02x SPECIAL:%s VLAN=%03x PRI=%x\n",
2048 pDesc->legacy.cmd.fIDE ? " IDE" :"",
2049 pDesc->legacy.cmd.fVLE ? " VLE" :"",
2050 pDesc->legacy.cmd.fRPS ? " RPS" :"",
2051 pDesc->legacy.cmd.fRS ? " RS" :"",
2052 pDesc->legacy.cmd.fIC ? " IC" :"",
2053 pDesc->legacy.cmd.fIFCS? " IFCS":"",
2054 pDesc->legacy.cmd.fEOP ? " EOP" :"",
2055 pDesc->legacy.dw3.fDD ? " DD" :"",
2056 pDesc->legacy.dw3.fEC ? " EC" :"",
2057 pDesc->legacy.dw3.fLC ? " LC" :"",
2058 pDesc->legacy.cmd.u8CSO,
2059 pDesc->legacy.dw3.u8CSS,
2060 E1K_SPEC_CFI(pDesc->legacy.dw3.u16Special) ? "CFI" :"cfi",
2061 E1K_SPEC_VLAN(pDesc->legacy.dw3.u16Special),
2062 E1K_SPEC_PRI(pDesc->legacy.dw3.u16Special)));
2063 break;
2064 default:
2065 E1kLog(("%s %s Invalid Transmit Descriptor %s\n",
2066 pThis->szPrf, pszDir, pszDir));
2067 break;
2068 }
2069}
2070
2071/**
2072 * Raise an interrupt later.
2073 *
2074 * @param pThis The device state structure.
2075 */
2076DECLINLINE(void) e1kPostponeInterrupt(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint64_t nsDeadline)
2077{
2078 if (!PDMDevHlpTimerIsActive(pDevIns, pThis->hIntTimer))
2079 PDMDevHlpTimerSetNano(pDevIns, pThis->hIntTimer, nsDeadline);
2080}
2081
2082/**
2083 * Raise interrupt if not masked.
2084 *
2085 * @param pThis The device state structure.
2086 */
2087static int e1kRaiseInterrupt(PPDMDEVINS pDevIns, PE1KSTATE pThis, int rcBusy, uint32_t u32IntCause = 0)
2088{
2089 int rc = e1kCsEnter(pThis, rcBusy);
2090 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2091 return rc;
2092
2093 E1K_INC_ISTAT_CNT(pThis->uStatIntTry);
2094 ICR |= u32IntCause;
2095 if (ICR & IMS)
2096 {
2097 if (pThis->fIntRaised)
2098 {
2099 E1K_INC_ISTAT_CNT(pThis->uStatIntSkip);
2100 E1kLog2(("%s e1kRaiseInterrupt: Already raised, skipped. ICR&IMS=%08x\n",
2101 pThis->szPrf, ICR & IMS));
2102 }
2103 else
2104 {
2105 uint64_t tsNow = PDMDevHlpTimerGet(pDevIns, pThis->hIntTimer);
2106 if (!!ITR && tsNow - pThis->u64AckedAt < ITR * 256
2107 && pThis->fItrEnabled && (pThis->fItrRxEnabled || !(ICR & ICR_RXT0)))
2108 {
2109 E1K_INC_ISTAT_CNT(pThis->uStatIntEarly);
2110 E1kLog2(("%s e1kRaiseInterrupt: Too early to raise again: %d ns < %d ns.\n",
2111 pThis->szPrf, (uint32_t)(tsNow - pThis->u64AckedAt), ITR * 256));
2112 e1kPostponeInterrupt(pDevIns, pThis, ITR * 256);
2113 }
2114 else
2115 {
2116
2117 /* Since we are delivering the interrupt now
2118 * there is no need to do it later -- stop the timer.
2119 */
2120 PDMDevHlpTimerStop(pDevIns, pThis->hIntTimer);
2121 E1K_INC_ISTAT_CNT(pThis->uStatInt);
2122 STAM_COUNTER_INC(&pThis->StatIntsRaised);
2123 /* Got at least one unmasked interrupt cause */
2124 pThis->fIntRaised = true;
2125 /* Raise(1) INTA(0) */
2126 E1kLogRel(("E1000: irq RAISED icr&mask=0x%x, icr=0x%x\n", ICR & IMS, ICR));
2127 PDMDevHlpPCISetIrq(pDevIns, 0, 1);
2128 E1kLog(("%s e1kRaiseInterrupt: Raised. ICR&IMS=%08x\n",
2129 pThis->szPrf, ICR & IMS));
2130 }
2131 }
2132 }
2133 else
2134 {
2135 E1K_INC_ISTAT_CNT(pThis->uStatIntMasked);
2136 E1kLog2(("%s e1kRaiseInterrupt: Not raising, ICR=%08x, IMS=%08x\n",
2137 pThis->szPrf, ICR, IMS));
2138 }
2139 e1kCsLeave(pThis);
2140 return VINF_SUCCESS;
2141}
2142
2143/**
2144 * Compute the physical address of the descriptor.
2145 *
2146 * @returns the physical address of the descriptor.
2147 *
2148 * @param baseHigh High-order 32 bits of descriptor table address.
2149 * @param baseLow Low-order 32 bits of descriptor table address.
2150 * @param idxDesc The descriptor index in the table.
2151 */
2152DECLINLINE(RTGCPHYS) e1kDescAddr(uint32_t baseHigh, uint32_t baseLow, uint32_t idxDesc)
2153{
2154 AssertCompile(sizeof(E1KRXDESC) == sizeof(E1KTXDESC));
2155 return ((uint64_t)baseHigh << 32) + baseLow + idxDesc * sizeof(E1KRXDESC);
2156}
2157
2158#ifdef IN_RING3 /* currently only used in ring-3 due to stack space requirements of the caller */
2159/**
2160 * Advance the head pointer of the receive descriptor queue.
2161 *
2162 * @remarks RDH always points to the next available RX descriptor.
2163 *
2164 * @param pDevIns The device instance.
2165 * @param pThis The device state structure.
2166 */
2167DECLINLINE(void) e1kAdvanceRDH(PPDMDEVINS pDevIns, PE1KSTATE pThis)
2168{
2169 Assert(e1kCsRxIsOwner(pThis));
2170 //e1kCsEnter(pThis, RT_SRC_POS);
2171 if (++RDH * sizeof(E1KRXDESC) >= RDLEN)
2172 RDH = 0;
2173#ifdef E1K_WITH_RXD_CACHE
2174 /*
2175 * We need to fetch descriptors now as the guest may advance RDT all the way
2176 * to RDH as soon as we generate RXDMT0 interrupt. This is mostly to provide
2177 * compatibility with Phar Lap ETS, see @bugref(7346). Note that we do not
2178 * check if the receiver is enabled. It must be, otherwise we won't get here
2179 * in the first place.
2180 *
2181 * Note that we should have moved both RDH and iRxDCurrent by now.
2182 */
2183 if (e1kRxDIsCacheEmpty(pThis))
2184 {
2185 /* Cache is empty, reset it and check if we can fetch more. */
2186 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
2187 E1kLog3(("%s e1kAdvanceRDH: Rx cache is empty, RDH=%x RDT=%x "
2188 "iRxDCurrent=%x nRxDFetched=%x\n",
2189 pThis->szPrf, RDH, RDT, pThis->iRxDCurrent, pThis->nRxDFetched));
2190 e1kRxDPrefetch(pDevIns, pThis);
2191 }
2192#endif /* E1K_WITH_RXD_CACHE */
2193 /*
2194 * Compute current receive queue length and fire RXDMT0 interrupt
2195 * if we are low on receive buffers
2196 */
2197 uint32_t uRQueueLen = RDH>RDT ? RDLEN/sizeof(E1KRXDESC)-RDH+RDT : RDT-RDH;
2198 /*
2199 * The minimum threshold is controlled by RDMTS bits of RCTL:
2200 * 00 = 1/2 of RDLEN
2201 * 01 = 1/4 of RDLEN
2202 * 10 = 1/8 of RDLEN
2203 * 11 = reserved
2204 */
2205 uint32_t uMinRQThreshold = RDLEN / sizeof(E1KRXDESC) / (2 << GET_BITS(RCTL, RDMTS));
2206 if (uRQueueLen <= uMinRQThreshold)
2207 {
2208 E1kLogRel(("E1000: low on RX descriptors, RDH=%x RDT=%x len=%x threshold=%x\n", RDH, RDT, uRQueueLen, uMinRQThreshold));
2209 E1kLog2(("%s Low on RX descriptors, RDH=%x RDT=%x len=%x threshold=%x, raise an interrupt\n",
2210 pThis->szPrf, RDH, RDT, uRQueueLen, uMinRQThreshold));
2211 E1K_INC_ISTAT_CNT(pThis->uStatIntRXDMT0);
2212 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_RXDMT0);
2213 }
2214 E1kLog2(("%s e1kAdvanceRDH: at exit RDH=%x RDT=%x len=%x\n",
2215 pThis->szPrf, RDH, RDT, uRQueueLen));
2216 //e1kCsLeave(pThis);
2217}
2218#endif /* IN_RING3 */
2219
2220#ifdef E1K_WITH_RXD_CACHE
2221
2222# ifdef IN_RING3 /* currently only used in ring-3 due to stack space requirements of the caller */
2223
2224/**
2225 * Obtain the next RX descriptor from RXD cache, fetching descriptors from the
2226 * RX ring if the cache is empty.
2227 *
2228 * Note that we cannot advance the cache pointer (iRxDCurrent) yet as it will
2229 * go out of sync with RDH which will cause trouble when EMT checks if the
2230 * cache is empty to do pre-fetch @bugref(6217).
2231 *
2232 * @param pDevIns The device instance.
2233 * @param pThis The device state structure.
2234 * @thread RX
2235 */
2236DECLINLINE(E1KRXDESC *) e1kRxDGet(PPDMDEVINS pDevIns, PE1KSTATE pThis)
2237{
2238 Assert(e1kCsRxIsOwner(pThis));
2239 /* Check the cache first. */
2240 if (pThis->iRxDCurrent < pThis->nRxDFetched)
2241 return &pThis->aRxDescriptors[pThis->iRxDCurrent];
2242 /* Cache is empty, reset it and check if we can fetch more. */
2243 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
2244 if (e1kRxDPrefetch(pDevIns, pThis))
2245 return &pThis->aRxDescriptors[pThis->iRxDCurrent];
2246 /* Out of Rx descriptors. */
2247 return NULL;
2248}
2249
2250
2251/**
2252 * Return the RX descriptor obtained with e1kRxDGet() and advance the cache
2253 * pointer. The descriptor gets written back to the RXD ring.
2254 *
2255 * @param pDevIns The device instance.
2256 * @param pThis The device state structure.
2257 * @param pDesc The descriptor being "returned" to the RX ring.
2258 * @thread RX
2259 */
2260DECLINLINE(void) e1kRxDPut(PPDMDEVINS pDevIns, PE1KSTATE pThis, E1KRXDESC* pDesc)
2261{
2262 Assert(e1kCsRxIsOwner(pThis));
2263 pThis->iRxDCurrent++;
2264 // Assert(pDesc >= pThis->aRxDescriptors);
2265 // Assert(pDesc < pThis->aRxDescriptors + E1K_RXD_CACHE_SIZE);
2266 // uint64_t addr = e1kDescAddr(RDBAH, RDBAL, RDH);
2267 // uint32_t rdh = RDH;
2268 // Assert(pThis->aRxDescAddr[pDesc - pThis->aRxDescriptors] == addr);
2269 PDMDevHlpPCIPhysWrite(pDevIns, e1kDescAddr(RDBAH, RDBAL, RDH), pDesc, sizeof(E1KRXDESC));
2270 /*
2271 * We need to print the descriptor before advancing RDH as it may fetch new
2272 * descriptors into the cache.
2273 */
2274 e1kPrintRDesc(pThis, pDesc);
2275 e1kAdvanceRDH(pDevIns, pThis);
2276}
2277
2278/**
2279 * Store a fragment of received packet at the specifed address.
2280 *
2281 * @param pDevIns The device instance.
2282 * @param pThis The device state structure.
2283 * @param pDesc The next available RX descriptor.
2284 * @param pvBuf The fragment.
2285 * @param cb The size of the fragment.
2286 */
2287static void e1kStoreRxFragment(PPDMDEVINS pDevIns, PE1KSTATE pThis, E1KRXDESC *pDesc, const void *pvBuf, size_t cb)
2288{
2289 STAM_PROFILE_ADV_START(&pThis->StatReceiveStore, a);
2290 E1kLog2(("%s e1kStoreRxFragment: store fragment of %04X at %016LX, EOP=%d\n",
2291 pThis->szPrf, cb, pDesc->u64BufAddr, pDesc->status.fEOP));
2292 PDMDevHlpPCIPhysWrite(pDevIns, pDesc->u64BufAddr, pvBuf, cb);
2293 pDesc->u16Length = (uint16_t)cb;
2294 Assert(pDesc->u16Length == cb);
2295 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveStore, a);
2296 RT_NOREF(pThis);
2297}
2298
2299# endif /* IN_RING3 */
2300
2301#else /* !E1K_WITH_RXD_CACHE */
2302
2303/**
2304 * Store a fragment of received packet that fits into the next available RX
2305 * buffer.
2306 *
2307 * @remarks Trigger the RXT0 interrupt if it is the last fragment of the packet.
2308 *
2309 * @param pDevIns The device instance.
2310 * @param pThis The device state structure.
2311 * @param pDesc The next available RX descriptor.
2312 * @param pvBuf The fragment.
2313 * @param cb The size of the fragment.
2314 */
2315static void e1kStoreRxFragment(PPDMDEVINS pDevIns, PE1KSTATE pThis, E1KRXDESC *pDesc, const void *pvBuf, size_t cb)
2316{
2317 STAM_PROFILE_ADV_START(&pThis->StatReceiveStore, a);
2318 E1kLog2(("%s e1kStoreRxFragment: store fragment of %04X at %016LX, EOP=%d\n", pThis->szPrf, cb, pDesc->u64BufAddr, pDesc->status.fEOP));
2319 PDMDevHlpPCIPhysWrite(pDevIns, pDesc->u64BufAddr, pvBuf, cb);
2320 pDesc->u16Length = (uint16_t)cb; Assert(pDesc->u16Length == cb);
2321 /* Write back the descriptor */
2322 PDMDevHlpPCIPhysWrite(pDevIns, e1kDescAddr(RDBAH, RDBAL, RDH), pDesc, sizeof(E1KRXDESC));
2323 e1kPrintRDesc(pThis, pDesc);
2324 E1kLogRel(("E1000: Wrote back RX desc, RDH=%x\n", RDH));
2325 /* Advance head */
2326 e1kAdvanceRDH(pDevIns, pThis);
2327 //E1kLog2(("%s e1kStoreRxFragment: EOP=%d RDTR=%08X RADV=%08X\n", pThis->szPrf, pDesc->fEOP, RDTR, RADV));
2328 if (pDesc->status.fEOP)
2329 {
2330 /* Complete packet has been stored -- it is time to let the guest know. */
2331#ifdef E1K_USE_RX_TIMERS
2332 if (RDTR)
2333 {
2334 /* Arm the timer to fire in RDTR usec (discard .024) */
2335 e1kArmTimer(pDevIns, pThis, pThis->hRIDTimer, RDTR);
2336 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
2337 if (RADV != 0 && !PDMDevHlpTimerIsActive(pDevIns, pThis->CTX_SUFF(pRADTimer)))
2338 e1kArmTimer(pThis, pThis->hRADTimer, RADV);
2339 }
2340 else
2341 {
2342#endif
2343 /* 0 delay means immediate interrupt */
2344 E1K_INC_ISTAT_CNT(pThis->uStatIntRx);
2345 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_RXT0);
2346#ifdef E1K_USE_RX_TIMERS
2347 }
2348#endif
2349 }
2350 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveStore, a);
2351}
2352
2353#endif /* !E1K_WITH_RXD_CACHE */
2354
2355/**
2356 * Returns true if it is a broadcast packet.
2357 *
2358 * @returns true if destination address indicates broadcast.
2359 * @param pvBuf The ethernet packet.
2360 */
2361DECLINLINE(bool) e1kIsBroadcast(const void *pvBuf)
2362{
2363 static const uint8_t s_abBcastAddr[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
2364 return memcmp(pvBuf, s_abBcastAddr, sizeof(s_abBcastAddr)) == 0;
2365}
2366
2367/**
2368 * Returns true if it is a multicast packet.
2369 *
2370 * @remarks returns true for broadcast packets as well.
2371 * @returns true if destination address indicates multicast.
2372 * @param pvBuf The ethernet packet.
2373 */
2374DECLINLINE(bool) e1kIsMulticast(const void *pvBuf)
2375{
2376 return (*(char*)pvBuf) & 1;
2377}
2378
2379#ifdef IN_RING3 /* currently only used in ring-3 due to stack space requirements of the caller */
2380/**
2381 * Set IXSM, IPCS and TCPCS flags according to the packet type.
2382 *
2383 * @remarks We emulate checksum offloading for major packets types only.
2384 *
2385 * @returns VBox status code.
2386 * @param pThis The device state structure.
2387 * @param pFrame The available data.
2388 * @param cb Number of bytes available in the buffer.
2389 * @param status Bit fields containing status info.
2390 */
2391static int e1kRxChecksumOffload(PE1KSTATE pThis, const uint8_t *pFrame, size_t cb, E1KRXDST *pStatus)
2392{
2393 /** @todo
2394 * It is not safe to bypass checksum verification for packets coming
2395 * from real wire. We currently unable to tell where packets are
2396 * coming from so we tell the driver to ignore our checksum flags
2397 * and do verification in software.
2398 */
2399# if 0
2400 uint16_t uEtherType = ntohs(*(uint16_t*)(pFrame + 12));
2401
2402 E1kLog2(("%s e1kRxChecksumOffload: EtherType=%x\n", pThis->szPrf, uEtherType));
2403
2404 switch (uEtherType)
2405 {
2406 case 0x800: /* IPv4 */
2407 {
2408 pStatus->fIXSM = false;
2409 pStatus->fIPCS = true;
2410 PRTNETIPV4 pIpHdr4 = (PRTNETIPV4)(pFrame + 14);
2411 /* TCP/UDP checksum offloading works with TCP and UDP only */
2412 pStatus->fTCPCS = pIpHdr4->ip_p == 6 || pIpHdr4->ip_p == 17;
2413 break;
2414 }
2415 case 0x86DD: /* IPv6 */
2416 pStatus->fIXSM = false;
2417 pStatus->fIPCS = false;
2418 pStatus->fTCPCS = true;
2419 break;
2420 default: /* ARP, VLAN, etc. */
2421 pStatus->fIXSM = true;
2422 break;
2423 }
2424# else
2425 pStatus->fIXSM = true;
2426 RT_NOREF_PV(pThis); RT_NOREF_PV(pFrame); RT_NOREF_PV(cb);
2427# endif
2428 return VINF_SUCCESS;
2429}
2430#endif /* IN_RING3 */
2431
2432/**
2433 * Pad and store received packet.
2434 *
2435 * @remarks Make sure that the packet appears to upper layer as one coming
2436 * from real Ethernet: pad it and insert FCS.
2437 *
2438 * @returns VBox status code.
2439 * @param pDevIns The device instance.
2440 * @param pThis The device state structure.
2441 * @param pvBuf The available data.
2442 * @param cb Number of bytes available in the buffer.
2443 * @param status Bit fields containing status info.
2444 */
2445static int e1kHandleRxPacket(PPDMDEVINS pDevIns, PE1KSTATE pThis, const void *pvBuf, size_t cb, E1KRXDST status)
2446{
2447#if defined(IN_RING3) /** @todo Remove this extra copying, it's gonna make us run out of kernel / hypervisor stack! */
2448 uint8_t rxPacket[E1K_MAX_RX_PKT_SIZE];
2449 uint8_t *ptr = rxPacket;
2450
2451 int rc = e1kCsRxEnter(pThis, VERR_SEM_BUSY);
2452 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2453 return rc;
2454
2455 if (cb > 70) /* unqualified guess */
2456 pThis->led.Asserted.s.fReading = pThis->led.Actual.s.fReading = 1;
2457
2458 Assert(cb <= E1K_MAX_RX_PKT_SIZE);
2459 Assert(cb > 16);
2460 size_t cbMax = ((RCTL & RCTL_LPE) ? E1K_MAX_RX_PKT_SIZE - 4 : 1518) - (status.fVP ? 0 : 4);
2461 E1kLog3(("%s Max RX packet size is %u\n", pThis->szPrf, cbMax));
2462 if (status.fVP)
2463 {
2464 /* VLAN packet -- strip VLAN tag in VLAN mode */
2465 if ((CTRL & CTRL_VME) && cb > 16)
2466 {
2467 uint16_t *u16Ptr = (uint16_t*)pvBuf;
2468 memcpy(rxPacket, pvBuf, 12); /* Copy src and dst addresses */
2469 status.u16Special = RT_BE2H_U16(u16Ptr[7]); /* Extract VLAN tag */
2470 memcpy(rxPacket + 12, (uint8_t*)pvBuf + 16, cb - 16); /* Copy the rest of the packet */
2471 cb -= 4;
2472 E1kLog3(("%s Stripped tag for VLAN %u (cb=%u)\n",
2473 pThis->szPrf, status.u16Special, cb));
2474 }
2475 else
2476 {
2477 status.fVP = false; /* Set VP only if we stripped the tag */
2478 memcpy(rxPacket, pvBuf, cb);
2479 }
2480 }
2481 else
2482 memcpy(rxPacket, pvBuf, cb);
2483 /* Pad short packets */
2484 if (cb < 60)
2485 {
2486 memset(rxPacket + cb, 0, 60 - cb);
2487 cb = 60;
2488 }
2489 if (!(RCTL & RCTL_SECRC) && cb <= cbMax)
2490 {
2491 STAM_PROFILE_ADV_START(&pThis->StatReceiveCRC, a);
2492 /*
2493 * Add FCS if CRC stripping is not enabled. Since the value of CRC
2494 * is ignored by most of drivers we may as well save us the trouble
2495 * of calculating it (see EthernetCRC CFGM parameter).
2496 */
2497 if (pThis->fEthernetCRC)
2498 *(uint32_t*)(rxPacket + cb) = RTCrc32(rxPacket, cb);
2499 cb += sizeof(uint32_t);
2500 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveCRC, a);
2501 E1kLog3(("%s Added FCS (cb=%u)\n", pThis->szPrf, cb));
2502 }
2503 /* Compute checksum of complete packet */
2504 size_t cbCSumStart = RT_MIN(GET_BITS(RXCSUM, PCSS), cb);
2505 uint16_t checksum = e1kCSum16(rxPacket + cbCSumStart, cb - cbCSumStart);
2506 e1kRxChecksumOffload(pThis, rxPacket, cb, &status);
2507
2508 /* Update stats */
2509 E1K_INC_CNT32(GPRC);
2510 if (e1kIsBroadcast(pvBuf))
2511 E1K_INC_CNT32(BPRC);
2512 else if (e1kIsMulticast(pvBuf))
2513 E1K_INC_CNT32(MPRC);
2514 /* Update octet receive counter */
2515 E1K_ADD_CNT64(GORCL, GORCH, cb);
2516 STAM_REL_COUNTER_ADD(&pThis->StatReceiveBytes, cb);
2517 if (cb == 64)
2518 E1K_INC_CNT32(PRC64);
2519 else if (cb < 128)
2520 E1K_INC_CNT32(PRC127);
2521 else if (cb < 256)
2522 E1K_INC_CNT32(PRC255);
2523 else if (cb < 512)
2524 E1K_INC_CNT32(PRC511);
2525 else if (cb < 1024)
2526 E1K_INC_CNT32(PRC1023);
2527 else
2528 E1K_INC_CNT32(PRC1522);
2529
2530 E1K_INC_ISTAT_CNT(pThis->uStatRxFrm);
2531
2532# ifdef E1K_WITH_RXD_CACHE
2533 while (cb > 0)
2534 {
2535 E1KRXDESC *pDesc = e1kRxDGet(pDevIns, pThis);
2536
2537 if (pDesc == NULL)
2538 {
2539 E1kLog(("%s Out of receive buffers, dropping the packet "
2540 "(cb=%u, in_cache=%u, RDH=%x RDT=%x)\n",
2541 pThis->szPrf, cb, e1kRxDInCache(pThis), RDH, RDT));
2542 break;
2543 }
2544# else /* !E1K_WITH_RXD_CACHE */
2545 if (RDH == RDT)
2546 {
2547 E1kLog(("%s Out of receive buffers, dropping the packet\n",
2548 pThis->szPrf));
2549 }
2550 /* Store the packet to receive buffers */
2551 while (RDH != RDT)
2552 {
2553 /* Load the descriptor pointed by head */
2554 E1KRXDESC desc, *pDesc = &desc;
2555 PDMDevHlpPCIPhysRead(pDevIns, e1kDescAddr(RDBAH, RDBAL, RDH), &desc, sizeof(desc));
2556# endif /* !E1K_WITH_RXD_CACHE */
2557 if (pDesc->u64BufAddr)
2558 {
2559 uint16_t u16RxBufferSize = pThis->u16RxBSize; /* see @bugref{9427} */
2560
2561 /* Update descriptor */
2562 pDesc->status = status;
2563 pDesc->u16Checksum = checksum;
2564 pDesc->status.fDD = true;
2565
2566 /*
2567 * We need to leave Rx critical section here or we risk deadlocking
2568 * with EMT in e1kRegWriteRDT when the write is to an unallocated
2569 * page or has an access handler associated with it.
2570 * Note that it is safe to leave the critical section here since
2571 * e1kRegWriteRDT() never modifies RDH. It never touches already
2572 * fetched RxD cache entries either.
2573 */
2574 if (cb > u16RxBufferSize)
2575 {
2576 pDesc->status.fEOP = false;
2577 e1kCsRxLeave(pThis);
2578 e1kStoreRxFragment(pDevIns, pThis, pDesc, ptr, u16RxBufferSize);
2579 rc = e1kCsRxEnter(pThis, VERR_SEM_BUSY);
2580 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2581 return rc;
2582 ptr += u16RxBufferSize;
2583 cb -= u16RxBufferSize;
2584 }
2585 else
2586 {
2587 pDesc->status.fEOP = true;
2588 e1kCsRxLeave(pThis);
2589 e1kStoreRxFragment(pDevIns, pThis, pDesc, ptr, cb);
2590# ifdef E1K_WITH_RXD_CACHE
2591 rc = e1kCsRxEnter(pThis, VERR_SEM_BUSY);
2592 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2593 return rc;
2594 cb = 0;
2595# else /* !E1K_WITH_RXD_CACHE */
2596 pThis->led.Actual.s.fReading = 0;
2597 return VINF_SUCCESS;
2598# endif /* !E1K_WITH_RXD_CACHE */
2599 }
2600 /*
2601 * Note: RDH is advanced by e1kStoreRxFragment if E1K_WITH_RXD_CACHE
2602 * is not defined.
2603 */
2604 }
2605# ifdef E1K_WITH_RXD_CACHE
2606 /* Write back the descriptor. */
2607 pDesc->status.fDD = true;
2608 e1kRxDPut(pDevIns, pThis, pDesc);
2609# else /* !E1K_WITH_RXD_CACHE */
2610 else
2611 {
2612 /* Write back the descriptor. */
2613 pDesc->status.fDD = true;
2614 PDMDevHlpPCIPhysWrite(pDevIns, e1kDescAddr(RDBAH, RDBAL, RDH), pDesc, sizeof(E1KRXDESC));
2615 e1kAdvanceRDH(pDevIns, pThis);
2616 }
2617# endif /* !E1K_WITH_RXD_CACHE */
2618 }
2619
2620 if (cb > 0)
2621 E1kLog(("%s Out of receive buffers, dropping %u bytes", pThis->szPrf, cb));
2622
2623 pThis->led.Actual.s.fReading = 0;
2624
2625 e1kCsRxLeave(pThis);
2626# ifdef E1K_WITH_RXD_CACHE
2627 /* Complete packet has been stored -- it is time to let the guest know. */
2628# ifdef E1K_USE_RX_TIMERS
2629 if (RDTR)
2630 {
2631 /* Arm the timer to fire in RDTR usec (discard .024) */
2632 e1kArmTimer(pThis, pThis->hRIDTimer, RDTR);
2633 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
2634 if (RADV != 0 && !PDMDevHlpTimerIsActive(pDevIns, pThis->hRADTimer))
2635 e1kArmTimer(pThis, pThis->hRADTimer, RADV);
2636 }
2637 else
2638 {
2639# endif /* E1K_USE_RX_TIMERS */
2640 /* 0 delay means immediate interrupt */
2641 E1K_INC_ISTAT_CNT(pThis->uStatIntRx);
2642 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_RXT0);
2643# ifdef E1K_USE_RX_TIMERS
2644 }
2645# endif /* E1K_USE_RX_TIMERS */
2646# endif /* E1K_WITH_RXD_CACHE */
2647
2648 return VINF_SUCCESS;
2649#else /* !IN_RING3 */
2650 RT_NOREF(pDevIns, pThis, pvBuf, cb, status);
2651 return VERR_INTERNAL_ERROR_2;
2652#endif /* !IN_RING3 */
2653}
2654
2655
2656#ifdef IN_RING3
2657/**
2658 * Bring the link up after the configured delay, 5 seconds by default.
2659 *
2660 * @param pDevIns The device instance.
2661 * @param pThis The device state structure.
2662 * @thread any
2663 */
2664DECLINLINE(void) e1kBringLinkUpDelayed(PPDMDEVINS pDevIns, PE1KSTATE pThis)
2665{
2666 E1kLog(("%s Will bring up the link in %d seconds...\n",
2667 pThis->szPrf, pThis->cMsLinkUpDelay / 1000));
2668 e1kArmTimer(pDevIns, pThis, pThis->hLUTimer, pThis->cMsLinkUpDelay * 1000);
2669}
2670
2671/**
2672 * Bring up the link immediately.
2673 *
2674 * @param pDevIns The device instance.
2675 * @param pThis The device state structure.
2676 * @param pThisCC The current context instance data.
2677 */
2678DECLINLINE(void) e1kR3LinkUp(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KSTATECC pThisCC)
2679{
2680 E1kLog(("%s Link is up\n", pThis->szPrf));
2681 STATUS |= STATUS_LU;
2682 Phy::setLinkStatus(&pThis->phy, true);
2683 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_LSC);
2684 if (pThisCC->pDrvR3)
2685 pThisCC->pDrvR3->pfnNotifyLinkChanged(pThisCC->pDrvR3, PDMNETWORKLINKSTATE_UP);
2686 /* Trigger processing of pending TX descriptors (see @bugref{8942}). */
2687 PDMDevHlpTaskTrigger(pDevIns, pThis->hTxTask);
2688}
2689
2690/**
2691 * Bring down the link immediately.
2692 *
2693 * @param pDevIns The device instance.
2694 * @param pThis The device state structure.
2695 * @param pThisCC The current context instance data.
2696 */
2697DECLINLINE(void) e1kR3LinkDown(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KSTATECC pThisCC)
2698{
2699 E1kLog(("%s Link is down\n", pThis->szPrf));
2700 STATUS &= ~STATUS_LU;
2701#ifdef E1K_LSC_ON_RESET
2702 Phy::setLinkStatus(&pThis->phy, false);
2703#endif /* E1K_LSC_ON_RESET */
2704 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_LSC);
2705 if (pThisCC->pDrvR3)
2706 pThisCC->pDrvR3->pfnNotifyLinkChanged(pThisCC->pDrvR3, PDMNETWORKLINKSTATE_DOWN);
2707}
2708
2709/**
2710 * Bring down the link temporarily.
2711 *
2712 * @param pDevIns The device instance.
2713 * @param pThis The device state structure.
2714 * @param pThisCC The current context instance data.
2715 */
2716DECLINLINE(void) e1kR3LinkDownTemp(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KSTATECC pThisCC)
2717{
2718 E1kLog(("%s Link is down temporarily\n", pThis->szPrf));
2719 STATUS &= ~STATUS_LU;
2720 Phy::setLinkStatus(&pThis->phy, false);
2721 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_LSC);
2722 /*
2723 * Notifying the associated driver that the link went down (even temporarily)
2724 * seems to be the right thing, but it was not done before. This may cause
2725 * a regression if the driver does not expect the link to go down as a result
2726 * of sending PDMNETWORKLINKSTATE_DOWN_RESUME to this device. Earlier versions
2727 * of code notified the driver that the link was up! See @bugref{7057}.
2728 */
2729 if (pThisCC->pDrvR3)
2730 pThisCC->pDrvR3->pfnNotifyLinkChanged(pThisCC->pDrvR3, PDMNETWORKLINKSTATE_DOWN);
2731 e1kBringLinkUpDelayed(pDevIns, pThis);
2732}
2733#endif /* IN_RING3 */
2734
2735#if 0 /* unused */
2736/**
2737 * Read handler for Device Status register.
2738 *
2739 * Get the link status from PHY.
2740 *
2741 * @returns VBox status code.
2742 *
2743 * @param pThis The device state structure.
2744 * @param offset Register offset in memory-mapped frame.
2745 * @param index Register index in register array.
2746 * @param mask Used to implement partial reads (8 and 16-bit).
2747 */
2748static int e1kRegReadCTRL(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
2749{
2750 E1kLog(("%s e1kRegReadCTRL: mdio dir=%s mdc dir=%s mdc=%d\n",
2751 pThis->szPrf, (CTRL & CTRL_MDIO_DIR)?"OUT":"IN ",
2752 (CTRL & CTRL_MDC_DIR)?"OUT":"IN ", !!(CTRL & CTRL_MDC)));
2753 if ((CTRL & CTRL_MDIO_DIR) == 0 && (CTRL & CTRL_MDC))
2754 {
2755 /* MDC is high and MDIO pin is used for input, read MDIO pin from PHY */
2756 if (Phy::readMDIO(&pThis->phy))
2757 *pu32Value = CTRL | CTRL_MDIO;
2758 else
2759 *pu32Value = CTRL & ~CTRL_MDIO;
2760 E1kLog(("%s e1kRegReadCTRL: Phy::readMDIO(%d)\n",
2761 pThis->szPrf, !!(*pu32Value & CTRL_MDIO)));
2762 }
2763 else
2764 {
2765 /* MDIO pin is used for output, ignore it */
2766 *pu32Value = CTRL;
2767 }
2768 return VINF_SUCCESS;
2769}
2770#endif /* unused */
2771
2772/**
2773 * A callback used by PHY to indicate that the link needs to be updated due to
2774 * reset of PHY.
2775 *
2776 * @param pDevIns The device instance.
2777 * @thread any
2778 */
2779void e1kPhyLinkResetCallback(PPDMDEVINS pDevIns)
2780{
2781 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
2782
2783 /* Make sure we have cable connected and MAC can talk to PHY */
2784 if (pThis->fCableConnected && (CTRL & CTRL_SLU))
2785 e1kArmTimer(pDevIns, pThis, pThis->hLUTimer, E1K_INIT_LINKUP_DELAY_US);
2786}
2787
2788/**
2789 * Write handler for Device Control register.
2790 *
2791 * Handles reset.
2792 *
2793 * @param pThis The device state structure.
2794 * @param offset Register offset in memory-mapped frame.
2795 * @param index Register index in register array.
2796 * @param value The value to store.
2797 * @param mask Used to implement partial writes (8 and 16-bit).
2798 * @thread EMT
2799 */
2800static int e1kRegWriteCTRL(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2801{
2802 int rc = VINF_SUCCESS;
2803
2804 if (value & CTRL_RESET)
2805 { /* RST */
2806#ifndef IN_RING3
2807 return VINF_IOM_R3_MMIO_WRITE;
2808#else
2809 e1kR3HardReset(pDevIns, pThis, PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC));
2810#endif
2811 }
2812 else
2813 {
2814#ifdef E1K_LSC_ON_SLU
2815 /*
2816 * When the guest changes 'Set Link Up' bit from 0 to 1 we check if
2817 * the link is down and the cable is connected, and if they are we
2818 * bring the link up, see @bugref{8624}.
2819 */
2820 if ( (value & CTRL_SLU)
2821 && !(CTRL & CTRL_SLU)
2822 && pThis->fCableConnected
2823 && !(STATUS & STATUS_LU))
2824 {
2825 /* It should take about 2 seconds for the link to come up */
2826 e1kArmTimer(pDevIns, pThis, pThis->hLUTimer, E1K_INIT_LINKUP_DELAY_US);
2827 }
2828#else /* !E1K_LSC_ON_SLU */
2829 if ( (value & CTRL_SLU)
2830 && !(CTRL & CTRL_SLU)
2831 && pThis->fCableConnected
2832 && !PDMDevHlpTimerIsActive(pDevIns, pThis->hLUTimer))
2833 {
2834 /* PXE does not use LSC interrupts, see @bugref{9113}. */
2835 STATUS |= STATUS_LU;
2836 }
2837#endif /* !E1K_LSC_ON_SLU */
2838 if ((value & CTRL_VME) != (CTRL & CTRL_VME))
2839 {
2840 E1kLog(("%s VLAN Mode %s\n", pThis->szPrf, (value & CTRL_VME) ? "Enabled" : "Disabled"));
2841 }
2842 Log7(("%s e1kRegWriteCTRL: mdio dir=%s mdc dir=%s mdc=%s mdio=%d\n",
2843 pThis->szPrf, (value & CTRL_MDIO_DIR)?"OUT":"IN ",
2844 (value & CTRL_MDC_DIR)?"OUT":"IN ", (value & CTRL_MDC)?"HIGH":"LOW ", !!(value & CTRL_MDIO)));
2845 if (value & CTRL_MDC)
2846 {
2847 if (value & CTRL_MDIO_DIR)
2848 {
2849 Log7(("%s e1kRegWriteCTRL: Phy::writeMDIO(%d)\n", pThis->szPrf, !!(value & CTRL_MDIO)));
2850 /* MDIO direction pin is set to output and MDC is high, write MDIO pin value to PHY */
2851 Phy::writeMDIO(&pThis->phy, !!(value & CTRL_MDIO), pDevIns);
2852 }
2853 else
2854 {
2855 if (Phy::readMDIO(&pThis->phy))
2856 value |= CTRL_MDIO;
2857 else
2858 value &= ~CTRL_MDIO;
2859 Log7(("%s e1kRegWriteCTRL: Phy::readMDIO(%d)\n", pThis->szPrf, !!(value & CTRL_MDIO)));
2860 }
2861 }
2862 rc = e1kRegWriteDefault(pDevIns, pThis, offset, index, value);
2863 }
2864
2865 return rc;
2866}
2867
2868/**
2869 * Write handler for EEPROM/Flash Control/Data register.
2870 *
2871 * Handles EEPROM access requests; forwards writes to EEPROM device if access has been granted.
2872 *
2873 * @param pThis The device state structure.
2874 * @param offset Register offset in memory-mapped frame.
2875 * @param index Register index in register array.
2876 * @param value The value to store.
2877 * @param mask Used to implement partial writes (8 and 16-bit).
2878 * @thread EMT
2879 */
2880static int e1kRegWriteEECD(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2881{
2882 RT_NOREF(pDevIns, offset, index);
2883#ifdef IN_RING3
2884 /* So far we are concerned with lower byte only */
2885 if ((EECD & EECD_EE_GNT) || pThis->eChip == E1K_CHIP_82543GC)
2886 {
2887 /* Access to EEPROM granted -- forward 4-wire bits to EEPROM device */
2888 /* Note: 82543GC does not need to request EEPROM access */
2889 STAM_PROFILE_ADV_START(&pThis->StatEEPROMWrite, a);
2890 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
2891 pThisCC->eeprom.write(value & EECD_EE_WIRES);
2892 STAM_PROFILE_ADV_STOP(&pThis->StatEEPROMWrite, a);
2893 }
2894 if (value & EECD_EE_REQ)
2895 EECD |= EECD_EE_REQ|EECD_EE_GNT;
2896 else
2897 EECD &= ~EECD_EE_GNT;
2898 //e1kRegWriteDefault(pThis, offset, index, value );
2899
2900 return VINF_SUCCESS;
2901#else /* !IN_RING3 */
2902 RT_NOREF(pThis, value);
2903 return VINF_IOM_R3_MMIO_WRITE;
2904#endif /* !IN_RING3 */
2905}
2906
2907/**
2908 * Read handler for EEPROM/Flash Control/Data register.
2909 *
2910 * Lower 4 bits come from EEPROM device if EEPROM access has been granted.
2911 *
2912 * @returns VBox status code.
2913 *
2914 * @param pThis The device state structure.
2915 * @param offset Register offset in memory-mapped frame.
2916 * @param index Register index in register array.
2917 * @param mask Used to implement partial reads (8 and 16-bit).
2918 * @thread EMT
2919 */
2920static int e1kRegReadEECD(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
2921{
2922#ifdef IN_RING3
2923 uint32_t value = 0; /* Get rid of false positive in parfait. */
2924 int rc = e1kRegReadDefault(pDevIns, pThis, offset, index, &value);
2925 if (RT_SUCCESS(rc))
2926 {
2927 if ((value & EECD_EE_GNT) || pThis->eChip == E1K_CHIP_82543GC)
2928 {
2929 /* Note: 82543GC does not need to request EEPROM access */
2930 /* Access to EEPROM granted -- get 4-wire bits to EEPROM device */
2931 STAM_PROFILE_ADV_START(&pThis->StatEEPROMRead, a);
2932 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
2933 value |= pThisCC->eeprom.read();
2934 STAM_PROFILE_ADV_STOP(&pThis->StatEEPROMRead, a);
2935 }
2936 *pu32Value = value;
2937 }
2938
2939 return rc;
2940#else /* !IN_RING3 */
2941 RT_NOREF_PV(pDevIns); RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index); RT_NOREF_PV(pu32Value);
2942 return VINF_IOM_R3_MMIO_READ;
2943#endif /* !IN_RING3 */
2944}
2945
2946/**
2947 * Write handler for EEPROM Read register.
2948 *
2949 * Handles EEPROM word access requests, reads EEPROM and stores the result
2950 * into DATA field.
2951 *
2952 * @param pThis The device state structure.
2953 * @param offset Register offset in memory-mapped frame.
2954 * @param index Register index in register array.
2955 * @param value The value to store.
2956 * @param mask Used to implement partial writes (8 and 16-bit).
2957 * @thread EMT
2958 */
2959static int e1kRegWriteEERD(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2960{
2961#ifdef IN_RING3
2962 /* Make use of 'writable' and 'readable' masks. */
2963 e1kRegWriteDefault(pDevIns, pThis, offset, index, value);
2964 /* DONE and DATA are set only if read was triggered by START. */
2965 if (value & EERD_START)
2966 {
2967 STAM_PROFILE_ADV_START(&pThis->StatEEPROMRead, a);
2968 uint16_t tmp;
2969 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
2970 if (pThisCC->eeprom.readWord(GET_BITS_V(value, EERD, ADDR), &tmp))
2971 SET_BITS(EERD, DATA, tmp);
2972 EERD |= EERD_DONE;
2973 STAM_PROFILE_ADV_STOP(&pThis->StatEEPROMRead, a);
2974 }
2975
2976 return VINF_SUCCESS;
2977#else /* !IN_RING3 */
2978 RT_NOREF_PV(pDevIns); RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index); RT_NOREF_PV(value);
2979 return VINF_IOM_R3_MMIO_WRITE;
2980#endif /* !IN_RING3 */
2981}
2982
2983
2984/**
2985 * Write handler for MDI Control register.
2986 *
2987 * Handles PHY read/write requests; forwards requests to internal PHY device.
2988 *
2989 * @param pThis The device state structure.
2990 * @param offset Register offset in memory-mapped frame.
2991 * @param index Register index in register array.
2992 * @param value The value to store.
2993 * @param mask Used to implement partial writes (8 and 16-bit).
2994 * @thread EMT
2995 */
2996static int e1kRegWriteMDIC(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2997{
2998 if (value & MDIC_INT_EN)
2999 {
3000 E1kLog(("%s ERROR! Interrupt at the end of an MDI cycle is not supported yet.\n",
3001 pThis->szPrf));
3002 }
3003 else if (value & MDIC_READY)
3004 {
3005 E1kLog(("%s ERROR! Ready bit is not reset by software during write operation.\n",
3006 pThis->szPrf));
3007 }
3008 else if (GET_BITS_V(value, MDIC, PHY) != 1)
3009 {
3010 E1kLog(("%s WARNING! Access to invalid PHY detected, phy=%d.\n",
3011 pThis->szPrf, GET_BITS_V(value, MDIC, PHY)));
3012 /*
3013 * Some drivers scan the MDIO bus for a PHY. We can work with these
3014 * drivers if we set MDIC_READY and MDIC_ERROR when there isn't a PHY
3015 * at the requested address, see @bugref{7346}.
3016 */
3017 MDIC = MDIC_READY | MDIC_ERROR;
3018 }
3019 else
3020 {
3021 /* Store the value */
3022 e1kRegWriteDefault(pDevIns, pThis, offset, index, value);
3023 STAM_COUNTER_INC(&pThis->StatPHYAccesses);
3024 /* Forward op to PHY */
3025 if (value & MDIC_OP_READ)
3026 SET_BITS(MDIC, DATA, Phy::readRegister(&pThis->phy, GET_BITS_V(value, MDIC, REG), pDevIns));
3027 else
3028 Phy::writeRegister(&pThis->phy, GET_BITS_V(value, MDIC, REG), value & MDIC_DATA_MASK, pDevIns);
3029 /* Let software know that we are done */
3030 MDIC |= MDIC_READY;
3031 }
3032
3033 return VINF_SUCCESS;
3034}
3035
3036/**
3037 * Write handler for Interrupt Cause Read register.
3038 *
3039 * Bits corresponding to 1s in 'value' will be cleared in ICR register.
3040 *
3041 * @param pThis The device state structure.
3042 * @param offset Register offset in memory-mapped frame.
3043 * @param index Register index in register array.
3044 * @param value The value to store.
3045 * @param mask Used to implement partial writes (8 and 16-bit).
3046 * @thread EMT
3047 */
3048static int e1kRegWriteICR(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3049{
3050 ICR &= ~value;
3051
3052 RT_NOREF_PV(pDevIns); RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index);
3053 return VINF_SUCCESS;
3054}
3055
3056/**
3057 * Read handler for Interrupt Cause Read register.
3058 *
3059 * Reading this register acknowledges all interrupts.
3060 *
3061 * @returns VBox status code.
3062 *
3063 * @param pThis The device state structure.
3064 * @param offset Register offset in memory-mapped frame.
3065 * @param index Register index in register array.
3066 * @param mask Not used.
3067 * @thread EMT
3068 */
3069static int e1kRegReadICR(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
3070{
3071 int rc = e1kCsEnter(pThis, VINF_IOM_R3_MMIO_READ);
3072 if (RT_UNLIKELY(rc != VINF_SUCCESS))
3073 return rc;
3074
3075 uint32_t value = 0;
3076 rc = e1kRegReadDefault(pDevIns, pThis, offset, index, &value);
3077 if (RT_SUCCESS(rc))
3078 {
3079 if (value)
3080 {
3081 if (!pThis->fIntRaised)
3082 E1K_INC_ISTAT_CNT(pThis->uStatNoIntICR);
3083 /*
3084 * Not clearing ICR causes QNX to hang as it reads ICR in a loop
3085 * with disabled interrupts.
3086 */
3087 //if (IMS)
3088 if (1)
3089 {
3090 /*
3091 * Interrupts were enabled -- we are supposedly at the very
3092 * beginning of interrupt handler
3093 */
3094 E1kLogRel(("E1000: irq lowered, icr=0x%x\n", ICR));
3095 E1kLog(("%s e1kRegReadICR: Lowered IRQ (%08x)\n", pThis->szPrf, ICR));
3096 /* Clear all pending interrupts */
3097 ICR = 0;
3098 pThis->fIntRaised = false;
3099 /* Lower(0) INTA(0) */
3100 PDMDevHlpPCISetIrq(pDevIns, 0, 0);
3101
3102 pThis->u64AckedAt = PDMDevHlpTimerGet(pDevIns, pThis->hIntTimer);
3103 if (pThis->fIntMaskUsed)
3104 pThis->fDelayInts = true;
3105 }
3106 else
3107 {
3108 /*
3109 * Interrupts are disabled -- in windows guests ICR read is done
3110 * just before re-enabling interrupts
3111 */
3112 E1kLog(("%s e1kRegReadICR: Suppressing auto-clear due to disabled interrupts (%08x)\n", pThis->szPrf, ICR));
3113 }
3114 }
3115 *pu32Value = value;
3116 }
3117 e1kCsLeave(pThis);
3118
3119 return rc;
3120}
3121
3122/**
3123 * Write handler for Interrupt Cause Set register.
3124 *
3125 * Bits corresponding to 1s in 'value' will be set in ICR register.
3126 *
3127 * @param pThis The device state structure.
3128 * @param offset Register offset in memory-mapped frame.
3129 * @param index Register index in register array.
3130 * @param value The value to store.
3131 * @param mask Used to implement partial writes (8 and 16-bit).
3132 * @thread EMT
3133 */
3134static int e1kRegWriteICS(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3135{
3136 RT_NOREF_PV(offset); RT_NOREF_PV(index);
3137 E1K_INC_ISTAT_CNT(pThis->uStatIntICS);
3138 return e1kRaiseInterrupt(pDevIns, pThis, VINF_IOM_R3_MMIO_WRITE, value & g_aE1kRegMap[ICS_IDX].writable);
3139}
3140
3141/**
3142 * Write handler for Interrupt Mask Set register.
3143 *
3144 * Will trigger pending interrupts.
3145 *
3146 * @param pThis The device state structure.
3147 * @param offset Register offset in memory-mapped frame.
3148 * @param index Register index in register array.
3149 * @param value The value to store.
3150 * @param mask Used to implement partial writes (8 and 16-bit).
3151 * @thread EMT
3152 */
3153static int e1kRegWriteIMS(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3154{
3155 RT_NOREF_PV(offset); RT_NOREF_PV(index);
3156
3157 IMS |= value;
3158 E1kLogRel(("E1000: irq enabled, RDH=%x RDT=%x TDH=%x TDT=%x\n", RDH, RDT, TDH, TDT));
3159 E1kLog(("%s e1kRegWriteIMS: IRQ enabled\n", pThis->szPrf));
3160 /*
3161 * We cannot raise an interrupt here as it will occasionally cause an interrupt storm
3162 * in Windows guests (see @bugref{8624}, @bugref{5023}).
3163 */
3164 if ((ICR & IMS) && !pThis->fLocked)
3165 {
3166 E1K_INC_ISTAT_CNT(pThis->uStatIntIMS);
3167 e1kPostponeInterrupt(pDevIns, pThis, E1K_IMS_INT_DELAY_NS);
3168 }
3169
3170 return VINF_SUCCESS;
3171}
3172
3173/**
3174 * Write handler for Interrupt Mask Clear register.
3175 *
3176 * Bits corresponding to 1s in 'value' will be cleared in IMS register.
3177 *
3178 * @param pThis The device state structure.
3179 * @param offset Register offset in memory-mapped frame.
3180 * @param index Register index in register array.
3181 * @param value The value to store.
3182 * @param mask Used to implement partial writes (8 and 16-bit).
3183 * @thread EMT
3184 */
3185static int e1kRegWriteIMC(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3186{
3187 RT_NOREF_PV(offset); RT_NOREF_PV(index);
3188
3189 int rc = e1kCsEnter(pThis, VINF_IOM_R3_MMIO_WRITE);
3190 if (RT_UNLIKELY(rc != VINF_SUCCESS))
3191 return rc;
3192 if (pThis->fIntRaised)
3193 {
3194 /*
3195 * Technically we should reset fIntRaised in ICR read handler, but it will cause
3196 * Windows to freeze since it may receive an interrupt while still in the very beginning
3197 * of interrupt handler.
3198 */
3199 E1K_INC_ISTAT_CNT(pThis->uStatIntLower);
3200 STAM_COUNTER_INC(&pThis->StatIntsPrevented);
3201 E1kLogRel(("E1000: irq lowered (IMC), icr=0x%x\n", ICR));
3202 /* Lower(0) INTA(0) */
3203 PDMDevHlpPCISetIrq(pDevIns, 0, 0);
3204 pThis->fIntRaised = false;
3205 E1kLog(("%s e1kRegWriteIMC: Lowered IRQ: ICR=%08x\n", pThis->szPrf, ICR));
3206 }
3207 IMS &= ~value;
3208 E1kLog(("%s e1kRegWriteIMC: IRQ disabled\n", pThis->szPrf));
3209 e1kCsLeave(pThis);
3210
3211 return VINF_SUCCESS;
3212}
3213
3214/**
3215 * Write handler for Receive Control register.
3216 *
3217 * @param pThis The device state structure.
3218 * @param offset Register offset in memory-mapped frame.
3219 * @param index Register index in register array.
3220 * @param value The value to store.
3221 * @param mask Used to implement partial writes (8 and 16-bit).
3222 * @thread EMT
3223 */
3224static int e1kRegWriteRCTL(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3225{
3226 /* Update promiscuous mode */
3227 bool fBecomePromiscous = !!(value & (RCTL_UPE | RCTL_MPE));
3228 if (fBecomePromiscous != !!( RCTL & (RCTL_UPE | RCTL_MPE)))
3229 {
3230 /* Promiscuity has changed, pass the knowledge on. */
3231#ifndef IN_RING3
3232 return VINF_IOM_R3_MMIO_WRITE;
3233#else
3234 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
3235 if (pThisCC->pDrvR3)
3236 pThisCC->pDrvR3->pfnSetPromiscuousMode(pThisCC->pDrvR3, fBecomePromiscous);
3237#endif
3238 }
3239
3240 /* Adjust receive buffer size */
3241 unsigned cbRxBuf = 2048 >> GET_BITS_V(value, RCTL, BSIZE);
3242 if (value & RCTL_BSEX)
3243 cbRxBuf *= 16;
3244 if (cbRxBuf > E1K_MAX_RX_PKT_SIZE)
3245 cbRxBuf = E1K_MAX_RX_PKT_SIZE;
3246 if (cbRxBuf != pThis->u16RxBSize)
3247 E1kLog2(("%s e1kRegWriteRCTL: Setting receive buffer size to %d (old %d)\n",
3248 pThis->szPrf, cbRxBuf, pThis->u16RxBSize));
3249 Assert(cbRxBuf < 65536);
3250 pThis->u16RxBSize = (uint16_t)cbRxBuf;
3251
3252 /* Update the register */
3253 return e1kRegWriteDefault(pDevIns, pThis, offset, index, value);
3254}
3255
3256/**
3257 * Write handler for Packet Buffer Allocation register.
3258 *
3259 * TXA = 64 - RXA.
3260 *
3261 * @param pThis The device state structure.
3262 * @param offset Register offset in memory-mapped frame.
3263 * @param index Register index in register array.
3264 * @param value The value to store.
3265 * @param mask Used to implement partial writes (8 and 16-bit).
3266 * @thread EMT
3267 */
3268static int e1kRegWritePBA(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3269{
3270 e1kRegWriteDefault(pDevIns, pThis, offset, index, value);
3271 PBA_st->txa = 64 - PBA_st->rxa;
3272
3273 return VINF_SUCCESS;
3274}
3275
3276/**
3277 * Write handler for Receive Descriptor Tail register.
3278 *
3279 * @remarks Write into RDT forces switch to HC and signal to
3280 * e1kR3NetworkDown_WaitReceiveAvail().
3281 *
3282 * @returns VBox status code.
3283 *
3284 * @param pThis The device state structure.
3285 * @param offset Register offset in memory-mapped frame.
3286 * @param index Register index in register array.
3287 * @param value The value to store.
3288 * @param mask Used to implement partial writes (8 and 16-bit).
3289 * @thread EMT
3290 */
3291static int e1kRegWriteRDT(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3292{
3293#ifndef IN_RING3
3294 /* XXX */
3295// return VINF_IOM_R3_MMIO_WRITE;
3296#endif
3297 int rc = e1kCsRxEnter(pThis, VINF_IOM_R3_MMIO_WRITE);
3298 if (RT_LIKELY(rc == VINF_SUCCESS))
3299 {
3300 E1kLog(("%s e1kRegWriteRDT\n", pThis->szPrf));
3301#ifndef E1K_WITH_RXD_CACHE
3302 /*
3303 * Some drivers advance RDT too far, so that it equals RDH. This
3304 * somehow manages to work with real hardware but not with this
3305 * emulated device. We can work with these drivers if we just
3306 * write 1 less when we see a driver writing RDT equal to RDH,
3307 * see @bugref{7346}.
3308 */
3309 if (value == RDH)
3310 {
3311 if (RDH == 0)
3312 value = (RDLEN / sizeof(E1KRXDESC)) - 1;
3313 else
3314 value = RDH - 1;
3315 }
3316#endif /* !E1K_WITH_RXD_CACHE */
3317 rc = e1kRegWriteDefault(pDevIns, pThis, offset, index, value);
3318#ifdef E1K_WITH_RXD_CACHE
3319 /*
3320 * We need to fetch descriptors now as RDT may go whole circle
3321 * before we attempt to store a received packet. For example,
3322 * Intel's DOS drivers use 2 (!) RX descriptors with the total ring
3323 * size being only 8 descriptors! Note that we fetch descriptors
3324 * only when the cache is empty to reduce the number of memory reads
3325 * in case of frequent RDT writes. Don't fetch anything when the
3326 * receiver is disabled either as RDH, RDT, RDLEN can be in some
3327 * messed up state.
3328 * Note that despite the cache may seem empty, meaning that there are
3329 * no more available descriptors in it, it may still be used by RX
3330 * thread which has not yet written the last descriptor back but has
3331 * temporarily released the RX lock in order to write the packet body
3332 * to descriptor's buffer. At this point we still going to do prefetch
3333 * but it won't actually fetch anything if there are no unused slots in
3334 * our "empty" cache (nRxDFetched==E1K_RXD_CACHE_SIZE). We must not
3335 * reset the cache here even if it appears empty. It will be reset at
3336 * a later point in e1kRxDGet().
3337 */
3338 if (e1kRxDIsCacheEmpty(pThis) && (RCTL & RCTL_EN))
3339 e1kRxDPrefetch(pDevIns, pThis);
3340#endif /* E1K_WITH_RXD_CACHE */
3341 e1kCsRxLeave(pThis);
3342 if (RT_SUCCESS(rc))
3343 {
3344 /* Signal that we have more receive descriptors available. */
3345 e1kWakeupReceive(pDevIns, pThis);
3346 }
3347 }
3348 return rc;
3349}
3350
3351/**
3352 * Write handler for Receive Delay Timer register.
3353 *
3354 * @param pThis The device state structure.
3355 * @param offset Register offset in memory-mapped frame.
3356 * @param index Register index in register array.
3357 * @param value The value to store.
3358 * @param mask Used to implement partial writes (8 and 16-bit).
3359 * @thread EMT
3360 */
3361static int e1kRegWriteRDTR(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3362{
3363 e1kRegWriteDefault(pDevIns, pThis, offset, index, value);
3364 if (value & RDTR_FPD)
3365 {
3366 /* Flush requested, cancel both timers and raise interrupt */
3367#ifdef E1K_USE_RX_TIMERS
3368 e1kCancelTimer(pDevIns, pThis, pThis->hRIDTimer);
3369 e1kCancelTimer(pDevIns, pThis, pThis->hRADTimer);
3370#endif
3371 E1K_INC_ISTAT_CNT(pThis->uStatIntRDTR);
3372 return e1kRaiseInterrupt(pDevIns, pThis, VINF_IOM_R3_MMIO_WRITE, ICR_RXT0);
3373 }
3374
3375 return VINF_SUCCESS;
3376}
3377
3378DECLINLINE(uint32_t) e1kGetTxLen(PE1KSTATE pThis)
3379{
3380 /**
3381 * Make sure TDT won't change during computation. EMT may modify TDT at
3382 * any moment.
3383 */
3384 uint32_t tdt = TDT;
3385 return (TDH>tdt ? TDLEN/sizeof(E1KTXDESC) : 0) + tdt - TDH;
3386}
3387
3388#ifdef IN_RING3
3389
3390# ifdef E1K_TX_DELAY
3391/**
3392 * Transmit Delay Timer handler.
3393 *
3394 * @remarks We only get here when the timer expires.
3395 *
3396 * @param pDevIns Pointer to device instance structure.
3397 * @param pTimer Pointer to the timer.
3398 * @param pvUser NULL.
3399 * @thread EMT
3400 */
3401static DECLCALLBACK(void) e1kR3TxDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3402{
3403 PE1KSTATE pThis = (PE1KSTATE)pvUser;
3404 Assert(PDMCritSectIsOwner(&pThis->csTx));
3405
3406 E1K_INC_ISTAT_CNT(pThis->uStatTxDelayExp);
3407# ifdef E1K_INT_STATS
3408 uint64_t u64Elapsed = RTTimeNanoTS() - pThis->u64ArmedAt;
3409 if (u64Elapsed > pThis->uStatMaxTxDelay)
3410 pThis->uStatMaxTxDelay = u64Elapsed;
3411# endif
3412 int rc = e1kXmitPending(pDevIns, pThis, false /*fOnWorkerThread*/);
3413 AssertMsg(RT_SUCCESS(rc) || rc == VERR_TRY_AGAIN, ("%Rrc\n", rc));
3414}
3415# endif /* E1K_TX_DELAY */
3416
3417//# ifdef E1K_USE_TX_TIMERS
3418
3419/**
3420 * Transmit Interrupt Delay Timer handler.
3421 *
3422 * @remarks We only get here when the timer expires.
3423 *
3424 * @param pDevIns Pointer to device instance structure.
3425 * @param pTimer Pointer to the timer.
3426 * @param pvUser NULL.
3427 * @thread EMT
3428 */
3429static DECLCALLBACK(void) e1kR3TxIntDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3430{
3431 RT_NOREF(pDevIns);
3432 RT_NOREF(pTimer);
3433 PE1KSTATE pThis = (PE1KSTATE)pvUser;
3434
3435 E1K_INC_ISTAT_CNT(pThis->uStatTID);
3436 /* Cancel absolute delay timer as we have already got attention */
3437# ifndef E1K_NO_TAD
3438 e1kCancelTimer(pDevIns, pThis, pThis->hTADTimer);
3439# endif
3440 e1kRaiseInterrupt(pDevIns, pThis, ICR_TXDW);
3441}
3442
3443/**
3444 * Transmit Absolute Delay Timer handler.
3445 *
3446 * @remarks We only get here when the timer expires.
3447 *
3448 * @param pDevIns Pointer to device instance structure.
3449 * @param pTimer Pointer to the timer.
3450 * @param pvUser NULL.
3451 * @thread EMT
3452 */
3453static DECLCALLBACK(void) e1kR3TxAbsDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3454{
3455 RT_NOREF(pDevIns);
3456 RT_NOREF(pTimer);
3457 PE1KSTATE pThis = (PE1KSTATE)pvUser;
3458
3459 E1K_INC_ISTAT_CNT(pThis->uStatTAD);
3460 /* Cancel interrupt delay timer as we have already got attention */
3461 e1kCancelTimer(pDevIns, pThis, pThis->hTIDTimer);
3462 e1kRaiseInterrupt(pDevIns, pThis, ICR_TXDW);
3463}
3464
3465//# endif /* E1K_USE_TX_TIMERS */
3466# ifdef E1K_USE_RX_TIMERS
3467
3468/**
3469 * Receive Interrupt Delay Timer handler.
3470 *
3471 * @remarks We only get here when the timer expires.
3472 *
3473 * @param pDevIns Pointer to device instance structure.
3474 * @param pTimer Pointer to the timer.
3475 * @param pvUser NULL.
3476 * @thread EMT
3477 */
3478static DECLCALLBACK(void) e1kR3RxIntDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3479{
3480 PE1KSTATE pThis = (PE1KSTATE)pvUser;
3481
3482 E1K_INC_ISTAT_CNT(pThis->uStatRID);
3483 /* Cancel absolute delay timer as we have already got attention */
3484 e1kCancelTimer(pDevIns, pThis, pThis->hRADTimer);
3485 e1kRaiseInterrupt(pDevIns, pThis, ICR_RXT0);
3486}
3487
3488/**
3489 * Receive Absolute Delay Timer handler.
3490 *
3491 * @remarks We only get here when the timer expires.
3492 *
3493 * @param pDevIns Pointer to device instance structure.
3494 * @param pTimer Pointer to the timer.
3495 * @param pvUser NULL.
3496 * @thread EMT
3497 */
3498static DECLCALLBACK(void) e1kR3RxAbsDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3499{
3500 PE1KSTATE pThis = (PE1KSTATE)pvUser;
3501
3502 E1K_INC_ISTAT_CNT(pThis->uStatRAD);
3503 /* Cancel interrupt delay timer as we have already got attention */
3504 e1kCancelTimer(pDevIns, pThis, pThis->hRIDTimer);
3505 e1kRaiseInterrupt(pDevIns, pThis, ICR_RXT0);
3506}
3507
3508# endif /* E1K_USE_RX_TIMERS */
3509
3510/**
3511 * Late Interrupt Timer handler.
3512 *
3513 * @param pDevIns Pointer to device instance structure.
3514 * @param pTimer Pointer to the timer.
3515 * @param pvUser NULL.
3516 * @thread EMT
3517 */
3518static DECLCALLBACK(void) e1kR3LateIntTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3519{
3520 RT_NOREF(pDevIns, pTimer);
3521 PE1KSTATE pThis = (PE1KSTATE)pvUser;
3522
3523 STAM_PROFILE_ADV_START(&pThis->StatLateIntTimer, a);
3524 STAM_COUNTER_INC(&pThis->StatLateInts);
3525 E1K_INC_ISTAT_CNT(pThis->uStatIntLate);
3526# if 0
3527 if (pThis->iStatIntLost > -100)
3528 pThis->iStatIntLost--;
3529# endif
3530 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, 0);
3531 STAM_PROFILE_ADV_STOP(&pThis->StatLateIntTimer, a);
3532}
3533
3534/**
3535 * Link Up Timer handler.
3536 *
3537 * @param pDevIns Pointer to device instance structure.
3538 * @param pTimer Pointer to the timer.
3539 * @param pvUser NULL.
3540 * @thread EMT
3541 */
3542static DECLCALLBACK(void) e1kR3LinkUpTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3543{
3544 RT_NOREF(pTimer);
3545 PE1KSTATE pThis = (PE1KSTATE)pvUser;
3546 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
3547
3548 /*
3549 * This can happen if we set the link status to down when the Link up timer was
3550 * already armed (shortly after e1kLoadDone() or when the cable was disconnected
3551 * and connect+disconnect the cable very quick. Moreover, 82543GC triggers LSC
3552 * on reset even if the cable is unplugged (see @bugref{8942}).
3553 */
3554 if (pThis->fCableConnected)
3555 {
3556 /* 82543GC does not have an internal PHY */
3557 if (pThis->eChip == E1K_CHIP_82543GC || (CTRL & CTRL_SLU))
3558 e1kR3LinkUp(pDevIns, pThis, pThisCC);
3559 }
3560# ifdef E1K_LSC_ON_RESET
3561 else if (pThis->eChip == E1K_CHIP_82543GC)
3562 e1kR3LinkDown(pDevIns, pThis, pThisCC);
3563# endif /* E1K_LSC_ON_RESET */
3564}
3565
3566#endif /* IN_RING3 */
3567
3568/**
3569 * Sets up the GSO context according to the TSE new context descriptor.
3570 *
3571 * @param pGso The GSO context to setup.
3572 * @param pCtx The context descriptor.
3573 */
3574DECLINLINE(void) e1kSetupGsoCtx(PPDMNETWORKGSO pGso, E1KTXCTX const *pCtx)
3575{
3576 pGso->u8Type = PDMNETWORKGSOTYPE_INVALID;
3577
3578 /*
3579 * See if the context descriptor describes something that could be TCP or
3580 * UDP over IPv[46].
3581 */
3582 /* Check the header ordering and spacing: 1. Ethernet, 2. IP, 3. TCP/UDP. */
3583 if (RT_UNLIKELY( pCtx->ip.u8CSS < sizeof(RTNETETHERHDR) ))
3584 {
3585 E1kLog(("e1kSetupGsoCtx: IPCSS=%#x\n", pCtx->ip.u8CSS));
3586 return;
3587 }
3588 if (RT_UNLIKELY( pCtx->tu.u8CSS < (size_t)pCtx->ip.u8CSS + (pCtx->dw2.fIP ? RTNETIPV4_MIN_LEN : RTNETIPV6_MIN_LEN) ))
3589 {
3590 E1kLog(("e1kSetupGsoCtx: TUCSS=%#x\n", pCtx->tu.u8CSS));
3591 return;
3592 }
3593 if (RT_UNLIKELY( pCtx->dw2.fTCP
3594 ? pCtx->dw3.u8HDRLEN < (size_t)pCtx->tu.u8CSS + RTNETTCP_MIN_LEN
3595 : pCtx->dw3.u8HDRLEN != (size_t)pCtx->tu.u8CSS + RTNETUDP_MIN_LEN ))
3596 {
3597 E1kLog(("e1kSetupGsoCtx: HDRLEN=%#x TCP=%d\n", pCtx->dw3.u8HDRLEN, pCtx->dw2.fTCP));
3598 return;
3599 }
3600
3601 /* The end of the TCP/UDP checksum should stop at the end of the packet or at least after the headers. */
3602 if (RT_UNLIKELY( pCtx->tu.u16CSE > 0 && pCtx->tu.u16CSE <= pCtx->dw3.u8HDRLEN ))
3603 {
3604 E1kLog(("e1kSetupGsoCtx: TUCSE=%#x HDRLEN=%#x\n", pCtx->tu.u16CSE, pCtx->dw3.u8HDRLEN));
3605 return;
3606 }
3607
3608 /* IPv4 checksum offset. */
3609 if (RT_UNLIKELY( pCtx->dw2.fIP && (size_t)pCtx->ip.u8CSO - pCtx->ip.u8CSS != RT_UOFFSETOF(RTNETIPV4, ip_sum) ))
3610 {
3611 E1kLog(("e1kSetupGsoCtx: IPCSO=%#x IPCSS=%#x\n", pCtx->ip.u8CSO, pCtx->ip.u8CSS));
3612 return;
3613 }
3614
3615 /* TCP/UDP checksum offsets. */
3616 if (RT_UNLIKELY( (size_t)pCtx->tu.u8CSO - pCtx->tu.u8CSS
3617 != ( pCtx->dw2.fTCP
3618 ? RT_UOFFSETOF(RTNETTCP, th_sum)
3619 : RT_UOFFSETOF(RTNETUDP, uh_sum) ) ))
3620 {
3621 E1kLog(("e1kSetupGsoCtx: TUCSO=%#x TUCSS=%#x TCP=%d\n", pCtx->ip.u8CSO, pCtx->ip.u8CSS, pCtx->dw2.fTCP));
3622 return;
3623 }
3624
3625 /*
3626 * Because of internal networking using a 16-bit size field for GSO context
3627 * plus frame, we have to make sure we don't exceed this.
3628 */
3629 if (RT_UNLIKELY( pCtx->dw3.u8HDRLEN + pCtx->dw2.u20PAYLEN > VBOX_MAX_GSO_SIZE ))
3630 {
3631 E1kLog(("e1kSetupGsoCtx: HDRLEN(=%#x) + PAYLEN(=%#x) = %#x, max is %#x\n",
3632 pCtx->dw3.u8HDRLEN, pCtx->dw2.u20PAYLEN, pCtx->dw3.u8HDRLEN + pCtx->dw2.u20PAYLEN, VBOX_MAX_GSO_SIZE));
3633 return;
3634 }
3635
3636 /*
3637 * We're good for now - we'll do more checks when seeing the data.
3638 * So, figure the type of offloading and setup the context.
3639 */
3640 if (pCtx->dw2.fIP)
3641 {
3642 if (pCtx->dw2.fTCP)
3643 {
3644 pGso->u8Type = PDMNETWORKGSOTYPE_IPV4_TCP;
3645 pGso->cbHdrsSeg = pCtx->dw3.u8HDRLEN;
3646 }
3647 else
3648 {
3649 pGso->u8Type = PDMNETWORKGSOTYPE_IPV4_UDP;
3650 pGso->cbHdrsSeg = pCtx->tu.u8CSS; /* IP header only */
3651 }
3652 /** @todo Detect IPv4-IPv6 tunneling (need test setup since linux doesn't do
3653 * this yet it seems)... */
3654 }
3655 else
3656 {
3657 pGso->cbHdrsSeg = pCtx->dw3.u8HDRLEN; /** @todo IPv6 UFO */
3658 if (pCtx->dw2.fTCP)
3659 pGso->u8Type = PDMNETWORKGSOTYPE_IPV6_TCP;
3660 else
3661 pGso->u8Type = PDMNETWORKGSOTYPE_IPV6_UDP;
3662 }
3663 pGso->offHdr1 = pCtx->ip.u8CSS;
3664 pGso->offHdr2 = pCtx->tu.u8CSS;
3665 pGso->cbHdrsTotal = pCtx->dw3.u8HDRLEN;
3666 pGso->cbMaxSeg = pCtx->dw3.u16MSS + (pGso->u8Type == PDMNETWORKGSOTYPE_IPV4_UDP ? pGso->offHdr2 : 0);
3667 Assert(PDMNetGsoIsValid(pGso, sizeof(*pGso), pGso->cbMaxSeg * 5));
3668 E1kLog2(("e1kSetupGsoCtx: mss=%#x hdr=%#x hdrseg=%#x hdr1=%#x hdr2=%#x %s\n",
3669 pGso->cbMaxSeg, pGso->cbHdrsTotal, pGso->cbHdrsSeg, pGso->offHdr1, pGso->offHdr2, PDMNetGsoTypeName((PDMNETWORKGSOTYPE)pGso->u8Type) ));
3670}
3671
3672/**
3673 * Checks if we can use GSO processing for the current TSE frame.
3674 *
3675 * @param pThis The device state structure.
3676 * @param pGso The GSO context.
3677 * @param pData The first data descriptor of the frame.
3678 * @param pCtx The TSO context descriptor.
3679 */
3680DECLINLINE(bool) e1kCanDoGso(PE1KSTATE pThis, PCPDMNETWORKGSO pGso, E1KTXDAT const *pData, E1KTXCTX const *pCtx)
3681{
3682 if (!pData->cmd.fTSE)
3683 {
3684 E1kLog2(("e1kCanDoGso: !TSE\n"));
3685 return false;
3686 }
3687 if (pData->cmd.fVLE) /** @todo VLAN tagging. */
3688 {
3689 E1kLog(("e1kCanDoGso: VLE\n"));
3690 return false;
3691 }
3692 if (RT_UNLIKELY(!pThis->fGSOEnabled))
3693 {
3694 E1kLog3(("e1kCanDoGso: GSO disabled via CFGM\n"));
3695 return false;
3696 }
3697
3698 switch ((PDMNETWORKGSOTYPE)pGso->u8Type)
3699 {
3700 case PDMNETWORKGSOTYPE_IPV4_TCP:
3701 case PDMNETWORKGSOTYPE_IPV4_UDP:
3702 if (!pData->dw3.fIXSM)
3703 {
3704 E1kLog(("e1kCanDoGso: !IXSM (IPv4)\n"));
3705 return false;
3706 }
3707 if (!pData->dw3.fTXSM)
3708 {
3709 E1kLog(("e1kCanDoGso: !TXSM (IPv4)\n"));
3710 return false;
3711 }
3712 /** @todo what more check should we perform here? Ethernet frame type? */
3713 E1kLog2(("e1kCanDoGso: OK, IPv4\n"));
3714 return true;
3715
3716 case PDMNETWORKGSOTYPE_IPV6_TCP:
3717 case PDMNETWORKGSOTYPE_IPV6_UDP:
3718 if (pData->dw3.fIXSM && pCtx->ip.u8CSO)
3719 {
3720 E1kLog(("e1kCanDoGso: IXSM (IPv6)\n"));
3721 return false;
3722 }
3723 if (!pData->dw3.fTXSM)
3724 {
3725 E1kLog(("e1kCanDoGso: TXSM (IPv6)\n"));
3726 return false;
3727 }
3728 /** @todo what more check should we perform here? Ethernet frame type? */
3729 E1kLog2(("e1kCanDoGso: OK, IPv4\n"));
3730 return true;
3731
3732 default:
3733 Assert(pGso->u8Type == PDMNETWORKGSOTYPE_INVALID);
3734 E1kLog2(("e1kCanDoGso: e1kSetupGsoCtx failed\n"));
3735 return false;
3736 }
3737}
3738
3739/**
3740 * Frees the current xmit buffer.
3741 *
3742 * @param pThis The device state structure.
3743 */
3744static void e1kXmitFreeBuf(PE1KSTATE pThis, PE1KSTATECC pThisCC)
3745{
3746 PPDMSCATTERGATHER pSg = pThisCC->CTX_SUFF(pTxSg);
3747 if (pSg)
3748 {
3749 pThisCC->CTX_SUFF(pTxSg) = NULL;
3750
3751 if (pSg->pvAllocator != pThis)
3752 {
3753 PPDMINETWORKUP pDrv = pThisCC->CTX_SUFF(pDrv);
3754 if (pDrv)
3755 pDrv->pfnFreeBuf(pDrv, pSg);
3756 }
3757 else
3758 {
3759 /* loopback */
3760 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3761 Assert(pSg->fFlags == (PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3));
3762 pSg->fFlags = 0;
3763 pSg->pvAllocator = NULL;
3764 }
3765 }
3766}
3767
3768#ifndef E1K_WITH_TXD_CACHE
3769/**
3770 * Allocates an xmit buffer.
3771 *
3772 * @returns See PDMINETWORKUP::pfnAllocBuf.
3773 * @param pThis The device state structure.
3774 * @param cbMin The minimum frame size.
3775 * @param fExactSize Whether cbMin is exact or if we have to max it
3776 * out to the max MTU size.
3777 * @param fGso Whether this is a GSO frame or not.
3778 */
3779DECLINLINE(int) e1kXmitAllocBuf(PE1KSTATE pThis, PE1KSTATECC pThisCC, size_t cbMin, bool fExactSize, bool fGso)
3780{
3781 /* Adjust cbMin if necessary. */
3782 if (!fExactSize)
3783 cbMin = RT_MAX(cbMin, E1K_MAX_TX_PKT_SIZE);
3784
3785 /* Deal with existing buffer (descriptor screw up, reset, etc). */
3786 if (RT_UNLIKELY(pThisCC->CTX_SUFF(pTxSg)))
3787 e1kXmitFreeBuf(pThis, pThisCC);
3788 Assert(pThisCC->CTX_SUFF(pTxSg) == NULL);
3789
3790 /*
3791 * Allocate the buffer.
3792 */
3793 PPDMSCATTERGATHER pSg;
3794 if (RT_LIKELY(GET_BITS(RCTL, LBM) != RCTL_LBM_TCVR))
3795 {
3796 PPDMINETWORKUP pDrv = pThisCC->CTX_SUFF(pDrv);
3797 if (RT_UNLIKELY(!pDrv))
3798 return VERR_NET_DOWN;
3799 int rc = pDrv->pfnAllocBuf(pDrv, cbMin, fGso ? &pThis->GsoCtx : NULL, &pSg);
3800 if (RT_FAILURE(rc))
3801 {
3802 /* Suspend TX as we are out of buffers atm */
3803 STATUS |= STATUS_TXOFF;
3804 return rc;
3805 }
3806 }
3807 else
3808 {
3809 /* Create a loopback using the fallback buffer and preallocated SG. */
3810 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3811 pSg = &pThis->uTxFallback.Sg;
3812 pSg->fFlags = PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3;
3813 pSg->cbUsed = 0;
3814 pSg->cbAvailable = 0;
3815 pSg->pvAllocator = pThis;
3816 pSg->pvUser = NULL; /* No GSO here. */
3817 pSg->cSegs = 1;
3818 pSg->aSegs[0].pvSeg = pThis->aTxPacketFallback;
3819 pSg->aSegs[0].cbSeg = sizeof(pThis->aTxPacketFallback);
3820 }
3821
3822 pThisCC->CTX_SUFF(pTxSg) = pSg;
3823 return VINF_SUCCESS;
3824}
3825#else /* E1K_WITH_TXD_CACHE */
3826/**
3827 * Allocates an xmit buffer.
3828 *
3829 * @returns See PDMINETWORKUP::pfnAllocBuf.
3830 * @param pThis The device state structure.
3831 * @param cbMin The minimum frame size.
3832 * @param fExactSize Whether cbMin is exact or if we have to max it
3833 * out to the max MTU size.
3834 * @param fGso Whether this is a GSO frame or not.
3835 */
3836DECLINLINE(int) e1kXmitAllocBuf(PE1KSTATE pThis, PE1KSTATECC pThisCC, bool fGso)
3837{
3838 /* Deal with existing buffer (descriptor screw up, reset, etc). */
3839 if (RT_UNLIKELY(pThisCC->CTX_SUFF(pTxSg)))
3840 e1kXmitFreeBuf(pThis, pThisCC);
3841 Assert(pThisCC->CTX_SUFF(pTxSg) == NULL);
3842
3843 /*
3844 * Allocate the buffer.
3845 */
3846 PPDMSCATTERGATHER pSg;
3847 if (RT_LIKELY(GET_BITS(RCTL, LBM) != RCTL_LBM_TCVR))
3848 {
3849 if (pThis->cbTxAlloc == 0)
3850 {
3851 /* Zero packet, no need for the buffer */
3852 return VINF_SUCCESS;
3853 }
3854 if (fGso && pThis->GsoCtx.u8Type == PDMNETWORKGSOTYPE_INVALID)
3855 {
3856 E1kLog3(("Invalid GSO context, won't allocate this packet, cb=%u %s%s\n",
3857 pThis->cbTxAlloc, pThis->fVTag ? "VLAN " : "", pThis->fGSO ? "GSO " : ""));
3858 /* No valid GSO context is available, ignore this packet. */
3859 pThis->cbTxAlloc = 0;
3860 return VINF_SUCCESS;
3861 }
3862
3863 PPDMINETWORKUP pDrv = pThisCC->CTX_SUFF(pDrv);
3864 if (RT_UNLIKELY(!pDrv))
3865 return VERR_NET_DOWN;
3866 int rc = pDrv->pfnAllocBuf(pDrv, pThis->cbTxAlloc, fGso ? &pThis->GsoCtx : NULL, &pSg);
3867 if (RT_FAILURE(rc))
3868 {
3869 /* Suspend TX as we are out of buffers atm */
3870 STATUS |= STATUS_TXOFF;
3871 return rc;
3872 }
3873 E1kLog3(("%s Allocated buffer for TX packet: cb=%u %s%s\n",
3874 pThis->szPrf, pThis->cbTxAlloc,
3875 pThis->fVTag ? "VLAN " : "",
3876 pThis->fGSO ? "GSO " : ""));
3877 }
3878 else
3879 {
3880 /* Create a loopback using the fallback buffer and preallocated SG. */
3881 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3882 pSg = &pThis->uTxFallback.Sg;
3883 pSg->fFlags = PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3;
3884 pSg->cbUsed = 0;
3885 pSg->cbAvailable = sizeof(pThis->aTxPacketFallback);
3886 pSg->pvAllocator = pThis;
3887 pSg->pvUser = NULL; /* No GSO here. */
3888 pSg->cSegs = 1;
3889 pSg->aSegs[0].pvSeg = pThis->aTxPacketFallback;
3890 pSg->aSegs[0].cbSeg = sizeof(pThis->aTxPacketFallback);
3891 }
3892 pThis->cbTxAlloc = 0;
3893
3894 pThisCC->CTX_SUFF(pTxSg) = pSg;
3895 return VINF_SUCCESS;
3896}
3897#endif /* E1K_WITH_TXD_CACHE */
3898
3899/**
3900 * Checks if it's a GSO buffer or not.
3901 *
3902 * @returns true / false.
3903 * @param pTxSg The scatter / gather buffer.
3904 */
3905DECLINLINE(bool) e1kXmitIsGsoBuf(PDMSCATTERGATHER const *pTxSg)
3906{
3907#if 0
3908 if (!pTxSg)
3909 E1kLog(("e1kXmitIsGsoBuf: pTxSG is NULL\n"));
3910 if (pTxSg && pTxSg->pvUser)
3911 E1kLog(("e1kXmitIsGsoBuf: pvUser is NULL\n"));
3912#endif
3913 return pTxSg && pTxSg->pvUser /* GSO indicator */;
3914}
3915
3916#ifndef E1K_WITH_TXD_CACHE
3917/**
3918 * Load transmit descriptor from guest memory.
3919 *
3920 * @param pDevIns The device instance.
3921 * @param pDesc Pointer to descriptor union.
3922 * @param addr Physical address in guest context.
3923 * @thread E1000_TX
3924 */
3925DECLINLINE(void) e1kLoadDesc(PPDMDEVINS pDevIns, E1KTXDESC *pDesc, RTGCPHYS addr)
3926{
3927 PDMDevHlpPCIPhysRead(pDevIns, addr, pDesc, sizeof(E1KTXDESC));
3928}
3929#else /* E1K_WITH_TXD_CACHE */
3930/**
3931 * Load transmit descriptors from guest memory.
3932 *
3933 * We need two physical reads in case the tail wrapped around the end of TX
3934 * descriptor ring.
3935 *
3936 * @returns the actual number of descriptors fetched.
3937 * @param pDevIns The device instance.
3938 * @param pThis The device state structure.
3939 * @thread E1000_TX
3940 */
3941DECLINLINE(unsigned) e1kTxDLoadMore(PPDMDEVINS pDevIns, PE1KSTATE pThis)
3942{
3943 Assert(pThis->iTxDCurrent == 0);
3944 /* We've already loaded pThis->nTxDFetched descriptors past TDH. */
3945 unsigned nDescsAvailable = e1kGetTxLen(pThis) - pThis->nTxDFetched;
3946 /* The following two lines ensure that pThis->nTxDFetched never overflows. */
3947 AssertCompile(E1K_TXD_CACHE_SIZE < (256 * sizeof(pThis->nTxDFetched)));
3948 unsigned nDescsToFetch = RT_MIN(nDescsAvailable, E1K_TXD_CACHE_SIZE - pThis->nTxDFetched);
3949 unsigned nDescsTotal = TDLEN / sizeof(E1KTXDESC);
3950 Assert(nDescsTotal != 0);
3951 if (nDescsTotal == 0)
3952 return 0;
3953 unsigned nFirstNotLoaded = (TDH + pThis->nTxDFetched) % nDescsTotal;
3954 unsigned nDescsInSingleRead = RT_MIN(nDescsToFetch, nDescsTotal - nFirstNotLoaded);
3955 E1kLog3(("%s e1kTxDLoadMore: nDescsAvailable=%u nDescsToFetch=%u nDescsTotal=%u nFirstNotLoaded=0x%x nDescsInSingleRead=%u\n",
3956 pThis->szPrf, nDescsAvailable, nDescsToFetch, nDescsTotal,
3957 nFirstNotLoaded, nDescsInSingleRead));
3958 if (nDescsToFetch == 0)
3959 return 0;
3960 E1KTXDESC* pFirstEmptyDesc = &pThis->aTxDescriptors[pThis->nTxDFetched];
3961 PDMDevHlpPCIPhysRead(pDevIns,
3962 ((uint64_t)TDBAH << 32) + TDBAL + nFirstNotLoaded * sizeof(E1KTXDESC),
3963 pFirstEmptyDesc, nDescsInSingleRead * sizeof(E1KTXDESC));
3964 E1kLog3(("%s Fetched %u TX descriptors at %08x%08x(0x%x), TDLEN=%08x, TDH=%08x, TDT=%08x\n",
3965 pThis->szPrf, nDescsInSingleRead,
3966 TDBAH, TDBAL + TDH * sizeof(E1KTXDESC),
3967 nFirstNotLoaded, TDLEN, TDH, TDT));
3968 if (nDescsToFetch > nDescsInSingleRead)
3969 {
3970 PDMDevHlpPCIPhysRead(pDevIns,
3971 ((uint64_t)TDBAH << 32) + TDBAL,
3972 pFirstEmptyDesc + nDescsInSingleRead,
3973 (nDescsToFetch - nDescsInSingleRead) * sizeof(E1KTXDESC));
3974 E1kLog3(("%s Fetched %u TX descriptors at %08x%08x\n",
3975 pThis->szPrf, nDescsToFetch - nDescsInSingleRead,
3976 TDBAH, TDBAL));
3977 }
3978 pThis->nTxDFetched += (uint8_t)nDescsToFetch;
3979 return nDescsToFetch;
3980}
3981
3982/**
3983 * Load transmit descriptors from guest memory only if there are no loaded
3984 * descriptors.
3985 *
3986 * @returns true if there are descriptors in cache.
3987 * @param pDevIns The device instance.
3988 * @param pThis The device state structure.
3989 * @thread E1000_TX
3990 */
3991DECLINLINE(bool) e1kTxDLazyLoad(PPDMDEVINS pDevIns, PE1KSTATE pThis)
3992{
3993 if (pThis->nTxDFetched == 0)
3994 return e1kTxDLoadMore(pDevIns, pThis) != 0;
3995 return true;
3996}
3997#endif /* E1K_WITH_TXD_CACHE */
3998
3999/**
4000 * Write back transmit descriptor to guest memory.
4001 *
4002 * @param pDevIns The device instance.
4003 * @param pThis The device state structure.
4004 * @param pDesc Pointer to descriptor union.
4005 * @param addr Physical address in guest context.
4006 * @thread E1000_TX
4007 */
4008DECLINLINE(void) e1kWriteBackDesc(PPDMDEVINS pDevIns, PE1KSTATE pThis, E1KTXDESC *pDesc, RTGCPHYS addr)
4009{
4010 /* Only the last half of the descriptor has to be written back. */
4011 e1kPrintTDesc(pThis, pDesc, "^^^");
4012 PDMDevHlpPCIPhysWrite(pDevIns, addr, pDesc, sizeof(E1KTXDESC));
4013}
4014
4015/**
4016 * Transmit complete frame.
4017 *
4018 * @remarks We skip the FCS since we're not responsible for sending anything to
4019 * a real ethernet wire.
4020 *
4021 * @param pDevIns The device instance.
4022 * @param pThis The device state structure.
4023 * @param pThisCC The current context instance data.
4024 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4025 * @thread E1000_TX
4026 */
4027static void e1kTransmitFrame(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KSTATECC pThisCC, bool fOnWorkerThread)
4028{
4029 PPDMSCATTERGATHER pSg = pThisCC->CTX_SUFF(pTxSg);
4030 uint32_t cbFrame = pSg ? (uint32_t)pSg->cbUsed : 0;
4031 Assert(!pSg || pSg->cSegs == 1);
4032
4033 if (cbFrame > 70) /* unqualified guess */
4034 pThis->led.Asserted.s.fWriting = pThis->led.Actual.s.fWriting = 1;
4035
4036#ifdef E1K_INT_STATS
4037 if (cbFrame <= 1514)
4038 E1K_INC_ISTAT_CNT(pThis->uStatTx1514);
4039 else if (cbFrame <= 2962)
4040 E1K_INC_ISTAT_CNT(pThis->uStatTx2962);
4041 else if (cbFrame <= 4410)
4042 E1K_INC_ISTAT_CNT(pThis->uStatTx4410);
4043 else if (cbFrame <= 5858)
4044 E1K_INC_ISTAT_CNT(pThis->uStatTx5858);
4045 else if (cbFrame <= 7306)
4046 E1K_INC_ISTAT_CNT(pThis->uStatTx7306);
4047 else if (cbFrame <= 8754)
4048 E1K_INC_ISTAT_CNT(pThis->uStatTx8754);
4049 else if (cbFrame <= 16384)
4050 E1K_INC_ISTAT_CNT(pThis->uStatTx16384);
4051 else if (cbFrame <= 32768)
4052 E1K_INC_ISTAT_CNT(pThis->uStatTx32768);
4053 else
4054 E1K_INC_ISTAT_CNT(pThis->uStatTxLarge);
4055#endif /* E1K_INT_STATS */
4056
4057 /* Add VLAN tag */
4058 if (cbFrame > 12 && pThis->fVTag)
4059 {
4060 E1kLog3(("%s Inserting VLAN tag %08x\n",
4061 pThis->szPrf, RT_BE2H_U16((uint16_t)VET) | (RT_BE2H_U16(pThis->u16VTagTCI) << 16)));
4062 memmove((uint8_t*)pSg->aSegs[0].pvSeg + 16, (uint8_t*)pSg->aSegs[0].pvSeg + 12, cbFrame - 12);
4063 *((uint32_t*)pSg->aSegs[0].pvSeg + 3) = RT_BE2H_U16((uint16_t)VET) | (RT_BE2H_U16(pThis->u16VTagTCI) << 16);
4064 pSg->cbUsed += 4;
4065 cbFrame += 4;
4066 Assert(pSg->cbUsed == cbFrame);
4067 Assert(pSg->cbUsed <= pSg->cbAvailable);
4068 }
4069/* E1kLog2(("%s < < < Outgoing packet. Dump follows: > > >\n"
4070 "%.*Rhxd\n"
4071 "%s < < < < < < < < < < < < < End of dump > > > > > > > > > > > >\n",
4072 pThis->szPrf, cbFrame, pSg->aSegs[0].pvSeg, pThis->szPrf));*/
4073
4074 /* Update the stats */
4075 E1K_INC_CNT32(TPT);
4076 E1K_ADD_CNT64(TOTL, TOTH, cbFrame);
4077 E1K_INC_CNT32(GPTC);
4078 if (pSg && e1kIsBroadcast(pSg->aSegs[0].pvSeg))
4079 E1K_INC_CNT32(BPTC);
4080 else if (pSg && e1kIsMulticast(pSg->aSegs[0].pvSeg))
4081 E1K_INC_CNT32(MPTC);
4082 /* Update octet transmit counter */
4083 E1K_ADD_CNT64(GOTCL, GOTCH, cbFrame);
4084 if (pThisCC->CTX_SUFF(pDrv))
4085 STAM_REL_COUNTER_ADD(&pThis->StatTransmitBytes, cbFrame);
4086 if (cbFrame == 64)
4087 E1K_INC_CNT32(PTC64);
4088 else if (cbFrame < 128)
4089 E1K_INC_CNT32(PTC127);
4090 else if (cbFrame < 256)
4091 E1K_INC_CNT32(PTC255);
4092 else if (cbFrame < 512)
4093 E1K_INC_CNT32(PTC511);
4094 else if (cbFrame < 1024)
4095 E1K_INC_CNT32(PTC1023);
4096 else
4097 E1K_INC_CNT32(PTC1522);
4098
4099 E1K_INC_ISTAT_CNT(pThis->uStatTxFrm);
4100
4101 /*
4102 * Dump and send the packet.
4103 */
4104 int rc = VERR_NET_DOWN;
4105 if (pSg && pSg->pvAllocator != pThis)
4106 {
4107 e1kPacketDump(pDevIns, pThis, (uint8_t const *)pSg->aSegs[0].pvSeg, cbFrame, "--> Outgoing");
4108
4109 pThisCC->CTX_SUFF(pTxSg) = NULL;
4110 PPDMINETWORKUP pDrv = pThisCC->CTX_SUFF(pDrv);
4111 if (pDrv)
4112 {
4113 /* Release critical section to avoid deadlock in CanReceive */
4114 //e1kCsLeave(pThis);
4115 STAM_PROFILE_START(&pThis->CTX_SUFF_Z(StatTransmitSend), a);
4116 rc = pDrv->pfnSendBuf(pDrv, pSg, fOnWorkerThread);
4117 STAM_PROFILE_STOP(&pThis->CTX_SUFF_Z(StatTransmitSend), a);
4118 //e1kCsEnter(pThis, RT_SRC_POS);
4119 }
4120 }
4121 else if (pSg)
4122 {
4123 Assert(pSg->aSegs[0].pvSeg == pThis->aTxPacketFallback);
4124 e1kPacketDump(pDevIns, pThis, (uint8_t const *)pSg->aSegs[0].pvSeg, cbFrame, "--> Loopback");
4125
4126 /** @todo do we actually need to check that we're in loopback mode here? */
4127 if (GET_BITS(RCTL, LBM) == RCTL_LBM_TCVR)
4128 {
4129 E1KRXDST status;
4130 RT_ZERO(status);
4131 status.fPIF = true;
4132 e1kHandleRxPacket(pDevIns, pThis, pSg->aSegs[0].pvSeg, cbFrame, status);
4133 rc = VINF_SUCCESS;
4134 }
4135 e1kXmitFreeBuf(pThis, pThisCC);
4136 }
4137 else
4138 rc = VERR_NET_DOWN;
4139 if (RT_FAILURE(rc))
4140 {
4141 E1kLogRel(("E1000: ERROR! pfnSend returned %Rrc\n", rc));
4142 /** @todo handle VERR_NET_DOWN and VERR_NET_NO_BUFFER_SPACE. Signal error ? */
4143 }
4144
4145 pThis->led.Actual.s.fWriting = 0;
4146}
4147
4148/**
4149 * Compute and write internet checksum (e1kCSum16) at the specified offset.
4150 *
4151 * @param pThis The device state structure.
4152 * @param pPkt Pointer to the packet.
4153 * @param u16PktLen Total length of the packet.
4154 * @param cso Offset in packet to write checksum at.
4155 * @param css Offset in packet to start computing
4156 * checksum from.
4157 * @param cse Offset in packet to stop computing
4158 * checksum at.
4159 * @param fUdp Replace 0 checksum with all 1s.
4160 * @thread E1000_TX
4161 */
4162static void e1kInsertChecksum(PE1KSTATE pThis, uint8_t *pPkt, uint16_t u16PktLen, uint8_t cso, uint8_t css, uint16_t cse, bool fUdp = false)
4163{
4164 RT_NOREF1(pThis);
4165
4166 if (css >= u16PktLen)
4167 {
4168 E1kLog2(("%s css(%X) is greater than packet length-1(%X), checksum is not inserted\n",
4169 pThis->szPrf, cso, u16PktLen));
4170 return;
4171 }
4172
4173 if (cso >= u16PktLen - 1)
4174 {
4175 E1kLog2(("%s cso(%X) is greater than packet length-2(%X), checksum is not inserted\n",
4176 pThis->szPrf, cso, u16PktLen));
4177 return;
4178 }
4179
4180 if (cse == 0 || cse >= u16PktLen)
4181 cse = u16PktLen - 1;
4182 else if (cse < css)
4183 {
4184 E1kLog2(("%s css(%X) is greater than cse(%X), checksum is not inserted\n",
4185 pThis->szPrf, css, cse));
4186 return;
4187 }
4188
4189 uint16_t u16ChkSum = e1kCSum16(pPkt + css, cse - css + 1);
4190 if (fUdp && u16ChkSum == 0)
4191 u16ChkSum = ~u16ChkSum; /* 0 means no checksum computed in case of UDP (see @bugref{9883}) */
4192 E1kLog2(("%s Inserting csum: %04X at %02X, old value: %04X\n", pThis->szPrf,
4193 u16ChkSum, cso, *(uint16_t*)(pPkt + cso)));
4194 *(uint16_t*)(pPkt + cso) = u16ChkSum;
4195}
4196
4197/**
4198 * Add a part of descriptor's buffer to transmit frame.
4199 *
4200 * @remarks data.u64BufAddr is used unconditionally for both data
4201 * and legacy descriptors since it is identical to
4202 * legacy.u64BufAddr.
4203 *
4204 * @param pDevIns The device instance.
4205 * @param pThis The device state structure.
4206 * @param pDesc Pointer to the descriptor to transmit.
4207 * @param u16Len Length of buffer to the end of segment.
4208 * @param fSend Force packet sending.
4209 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4210 * @thread E1000_TX
4211 */
4212#ifndef E1K_WITH_TXD_CACHE
4213static void e1kFallbackAddSegment(PPDMDEVINS pDevIns, PE1KSTATE pThis, RTGCPHYS PhysAddr, uint16_t u16Len, bool fSend, bool fOnWorkerThread)
4214{
4215 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
4216 /* TCP header being transmitted */
4217 struct E1kTcpHeader *pTcpHdr = (struct E1kTcpHeader *)(pThis->aTxPacketFallback + pThis->contextTSE.tu.u8CSS);
4218 /* IP header being transmitted */
4219 struct E1kIpHeader *pIpHdr = (struct E1kIpHeader *)(pThis->aTxPacketFallback + pThis->contextTSE.ip.u8CSS);
4220
4221 E1kLog3(("%s e1kFallbackAddSegment: Length=%x, remaining payload=%x, header=%x, send=%RTbool\n",
4222 pThis->szPrf, u16Len, pThis->u32PayRemain, pThis->u16HdrRemain, fSend));
4223 Assert(pThis->u32PayRemain + pThis->u16HdrRemain > 0);
4224
4225 PDMDevHlpPCIPhysRead(pDevIns, PhysAddr, pThis->aTxPacketFallback + pThis->u16TxPktLen, u16Len);
4226 E1kLog3(("%s Dump of the segment:\n"
4227 "%.*Rhxd\n"
4228 "%s --- End of dump ---\n",
4229 pThis->szPrf, u16Len, pThis->aTxPacketFallback + pThis->u16TxPktLen, pThis->szPrf));
4230 pThis->u16TxPktLen += u16Len;
4231 E1kLog3(("%s e1kFallbackAddSegment: pThis->u16TxPktLen=%x\n",
4232 pThis->szPrf, pThis->u16TxPktLen));
4233 if (pThis->u16HdrRemain > 0)
4234 {
4235 /* The header was not complete, check if it is now */
4236 if (u16Len >= pThis->u16HdrRemain)
4237 {
4238 /* The rest is payload */
4239 u16Len -= pThis->u16HdrRemain;
4240 pThis->u16HdrRemain = 0;
4241 /* Save partial checksum and flags */
4242 pThis->u32SavedCsum = pTcpHdr->chksum;
4243 pThis->u16SavedFlags = pTcpHdr->hdrlen_flags;
4244 /* Clear FIN and PSH flags now and set them only in the last segment */
4245 pTcpHdr->hdrlen_flags &= ~htons(E1K_TCP_FIN | E1K_TCP_PSH);
4246 }
4247 else
4248 {
4249 /* Still not */
4250 pThis->u16HdrRemain -= u16Len;
4251 E1kLog3(("%s e1kFallbackAddSegment: Header is still incomplete, 0x%x bytes remain.\n",
4252 pThis->szPrf, pThis->u16HdrRemain));
4253 return;
4254 }
4255 }
4256
4257 pThis->u32PayRemain -= u16Len;
4258
4259 if (fSend)
4260 {
4261 /* Leave ethernet header intact */
4262 /* IP Total Length = payload + headers - ethernet header */
4263 pIpHdr->total_len = htons(pThis->u16TxPktLen - pThis->contextTSE.ip.u8CSS);
4264 E1kLog3(("%s e1kFallbackAddSegment: End of packet, pIpHdr->total_len=%x\n",
4265 pThis->szPrf, ntohs(pIpHdr->total_len)));
4266 /* Update IP Checksum */
4267 pIpHdr->chksum = 0;
4268 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4269 pThis->contextTSE.ip.u8CSO,
4270 pThis->contextTSE.ip.u8CSS,
4271 pThis->contextTSE.ip.u16CSE);
4272
4273 /* Update TCP flags */
4274 /* Restore original FIN and PSH flags for the last segment */
4275 if (pThis->u32PayRemain == 0)
4276 {
4277 pTcpHdr->hdrlen_flags = pThis->u16SavedFlags;
4278 E1K_INC_CNT32(TSCTC);
4279 }
4280 /* Add TCP length to partial pseudo header sum */
4281 uint32_t csum = pThis->u32SavedCsum
4282 + htons(pThis->u16TxPktLen - pThis->contextTSE.tu.u8CSS);
4283 while (csum >> 16)
4284 csum = (csum >> 16) + (csum & 0xFFFF);
4285 pTcpHdr->chksum = csum;
4286 /* Compute final checksum */
4287 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4288 pThis->contextTSE.tu.u8CSO,
4289 pThis->contextTSE.tu.u8CSS,
4290 pThis->contextTSE.tu.u16CSE);
4291
4292 /*
4293 * Transmit it. If we've use the SG already, allocate a new one before
4294 * we copy of the data.
4295 */
4296 PPDMSCATTERGATHER pTxSg = pThisCC->CTX_SUFF(pTxSg);
4297 if (!pTxSg)
4298 {
4299 e1kXmitAllocBuf(pThis, pThisCC, pThis->u16TxPktLen + (pThis->fVTag ? 4 : 0), true /*fExactSize*/, false /*fGso*/);
4300 pTxSg = pThisCC->CTX_SUFF(pTxSg);
4301 }
4302 if (pTxSg)
4303 {
4304 Assert(pThis->u16TxPktLen <= pThisCC->CTX_SUFF(pTxSg)->cbAvailable);
4305 Assert(pTxSg->cSegs == 1);
4306 if (pThis->CCCTX_SUFF(pTxSg)->aSegs[0].pvSeg != pThis->aTxPacketFallback)
4307 memcpy(pTxSg->aSegs[0].pvSeg, pThis->aTxPacketFallback, pThis->u16TxPktLen);
4308 pTxSg->cbUsed = pThis->u16TxPktLen;
4309 pTxSg->aSegs[0].cbSeg = pThis->u16TxPktLen;
4310 }
4311 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
4312
4313 /* Update Sequence Number */
4314 pTcpHdr->seqno = htonl(ntohl(pTcpHdr->seqno) + pThis->u16TxPktLen
4315 - pThis->contextTSE.dw3.u8HDRLEN);
4316 /* Increment IP identification */
4317 pIpHdr->ident = htons(ntohs(pIpHdr->ident) + 1);
4318 }
4319}
4320#else /* E1K_WITH_TXD_CACHE */
4321static int e1kFallbackAddSegment(PPDMDEVINS pDevIns, PE1KSTATE pThis, RTGCPHYS PhysAddr, uint16_t u16Len, bool fSend, bool fOnWorkerThread)
4322{
4323 int rc = VINF_SUCCESS;
4324 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
4325 /* TCP header being transmitted */
4326 struct E1kTcpHeader *pTcpHdr = (struct E1kTcpHeader *)(pThis->aTxPacketFallback + pThis->contextTSE.tu.u8CSS);
4327 /* IP header being transmitted */
4328 struct E1kIpHeader *pIpHdr = (struct E1kIpHeader *)(pThis->aTxPacketFallback + pThis->contextTSE.ip.u8CSS);
4329
4330 E1kLog3(("%s e1kFallbackAddSegment: Length=%x, remaining payload=%x, header=%x, send=%RTbool\n",
4331 pThis->szPrf, u16Len, pThis->u32PayRemain, pThis->u16HdrRemain, fSend));
4332 AssertReturn(pThis->u32PayRemain + pThis->u16HdrRemain > 0, VINF_SUCCESS);
4333
4334 if (pThis->u16TxPktLen + u16Len <= sizeof(pThis->aTxPacketFallback))
4335 PDMDevHlpPCIPhysRead(pDevIns, PhysAddr, pThis->aTxPacketFallback + pThis->u16TxPktLen, u16Len);
4336 else
4337 E1kLog(("%s e1kFallbackAddSegment: writing beyond aTxPacketFallback, u16TxPktLen=%d(0x%x) + u16Len=%d(0x%x) > %d\n",
4338 pThis->szPrf, pThis->u16TxPktLen, pThis->u16TxPktLen, u16Len, u16Len, sizeof(pThis->aTxPacketFallback)));
4339 E1kLog3(("%s Dump of the segment:\n"
4340 "%.*Rhxd\n"
4341 "%s --- End of dump ---\n",
4342 pThis->szPrf, u16Len, pThis->aTxPacketFallback + pThis->u16TxPktLen, pThis->szPrf));
4343 pThis->u16TxPktLen += u16Len;
4344 E1kLog3(("%s e1kFallbackAddSegment: pThis->u16TxPktLen=%x\n",
4345 pThis->szPrf, pThis->u16TxPktLen));
4346 if (pThis->u16HdrRemain > 0)
4347 {
4348 /* The header was not complete, check if it is now */
4349 if (u16Len >= pThis->u16HdrRemain)
4350 {
4351 /* The rest is payload */
4352 u16Len -= pThis->u16HdrRemain;
4353 pThis->u16HdrRemain = 0;
4354 /* Save partial checksum and flags */
4355 pThis->u32SavedCsum = pTcpHdr->chksum;
4356 pThis->u16SavedFlags = pTcpHdr->hdrlen_flags;
4357 /* Clear FIN and PSH flags now and set them only in the last segment */
4358 pTcpHdr->hdrlen_flags &= ~htons(E1K_TCP_FIN | E1K_TCP_PSH);
4359 }
4360 else
4361 {
4362 /* Still not */
4363 pThis->u16HdrRemain -= u16Len;
4364 E1kLog3(("%s e1kFallbackAddSegment: Header is still incomplete, 0x%x bytes remain.\n",
4365 pThis->szPrf, pThis->u16HdrRemain));
4366 return rc;
4367 }
4368 }
4369
4370 if (u16Len > pThis->u32PayRemain)
4371 pThis->u32PayRemain = 0;
4372 else
4373 pThis->u32PayRemain -= u16Len;
4374
4375 if (fSend)
4376 {
4377 /* Leave ethernet header intact */
4378 /* IP Total Length = payload + headers - ethernet header */
4379 pIpHdr->total_len = htons(pThis->u16TxPktLen - pThis->contextTSE.ip.u8CSS);
4380 E1kLog3(("%s e1kFallbackAddSegment: End of packet, pIpHdr->total_len=%x\n",
4381 pThis->szPrf, ntohs(pIpHdr->total_len)));
4382 /* Update IP Checksum */
4383 pIpHdr->chksum = 0;
4384 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4385 pThis->contextTSE.ip.u8CSO,
4386 pThis->contextTSE.ip.u8CSS,
4387 pThis->contextTSE.ip.u16CSE);
4388
4389 /* Update TCP flags */
4390 /* Restore original FIN and PSH flags for the last segment */
4391 if (pThis->u32PayRemain == 0)
4392 {
4393 pTcpHdr->hdrlen_flags = pThis->u16SavedFlags;
4394 E1K_INC_CNT32(TSCTC);
4395 }
4396 /* Add TCP length to partial pseudo header sum */
4397 uint32_t csum = pThis->u32SavedCsum
4398 + htons(pThis->u16TxPktLen - pThis->contextTSE.tu.u8CSS);
4399 while (csum >> 16)
4400 csum = (csum >> 16) + (csum & 0xFFFF);
4401 Assert(csum < 65536);
4402 pTcpHdr->chksum = (uint16_t)csum;
4403 /* Compute final checksum */
4404 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4405 pThis->contextTSE.tu.u8CSO,
4406 pThis->contextTSE.tu.u8CSS,
4407 pThis->contextTSE.tu.u16CSE);
4408
4409 /*
4410 * Transmit it.
4411 */
4412 PPDMSCATTERGATHER pTxSg = pThisCC->CTX_SUFF(pTxSg);
4413 if (pTxSg)
4414 {
4415 /* Make sure the packet fits into the allocated buffer */
4416 size_t cbCopy = RT_MIN(pThis->u16TxPktLen, pThisCC->CTX_SUFF(pTxSg)->cbAvailable);
4417#ifdef DEBUG
4418 if (pThis->u16TxPktLen > pTxSg->cbAvailable)
4419 E1kLog(("%s e1kFallbackAddSegment: truncating packet, u16TxPktLen=%d(0x%x) > cbAvailable=%d(0x%x)\n",
4420 pThis->szPrf, pThis->u16TxPktLen, pThis->u16TxPktLen, pTxSg->cbAvailable, pTxSg->cbAvailable));
4421#endif /* DEBUG */
4422 Assert(pTxSg->cSegs == 1);
4423 if (pTxSg->aSegs[0].pvSeg != pThis->aTxPacketFallback)
4424 memcpy(pTxSg->aSegs[0].pvSeg, pThis->aTxPacketFallback, cbCopy);
4425 pTxSg->cbUsed = cbCopy;
4426 pTxSg->aSegs[0].cbSeg = cbCopy;
4427 }
4428 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
4429
4430 /* Update Sequence Number */
4431 pTcpHdr->seqno = htonl(ntohl(pTcpHdr->seqno) + pThis->u16TxPktLen
4432 - pThis->contextTSE.dw3.u8HDRLEN);
4433 /* Increment IP identification */
4434 pIpHdr->ident = htons(ntohs(pIpHdr->ident) + 1);
4435
4436 /* Allocate new buffer for the next segment. */
4437 if (pThis->u32PayRemain)
4438 {
4439 pThis->cbTxAlloc = RT_MIN(pThis->u32PayRemain,
4440 pThis->contextTSE.dw3.u16MSS)
4441 + pThis->contextTSE.dw3.u8HDRLEN;
4442 /* Do not add VLAN tags to empty packets. */
4443 if (pThis->fVTag && pThis->cbTxAlloc > 0)
4444 pThis->cbTxAlloc += 4;
4445 rc = e1kXmitAllocBuf(pThis, pThisCC, false /* fGSO */);
4446 }
4447 }
4448
4449 return rc;
4450}
4451#endif /* E1K_WITH_TXD_CACHE */
4452
4453#ifndef E1K_WITH_TXD_CACHE
4454/**
4455 * TCP segmentation offloading fallback: Add descriptor's buffer to transmit
4456 * frame.
4457 *
4458 * We construct the frame in the fallback buffer first and the copy it to the SG
4459 * buffer before passing it down to the network driver code.
4460 *
4461 * @returns true if the frame should be transmitted, false if not.
4462 *
4463 * @param pThis The device state structure.
4464 * @param pDesc Pointer to the descriptor to transmit.
4465 * @param cbFragment Length of descriptor's buffer.
4466 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4467 * @thread E1000_TX
4468 */
4469static bool e1kFallbackAddToFrame(PE1KSTATE pThis, E1KTXDESC *pDesc, uint32_t cbFragment, bool fOnWorkerThread)
4470{
4471 PPDMSCATTERGATHER pTxSg = pThisCC->CTX_SUFF(pTxSg);
4472 Assert(e1kGetDescType(pDesc) == E1K_DTYP_DATA);
4473 Assert(pDesc->data.cmd.fTSE);
4474 Assert(!e1kXmitIsGsoBuf(pTxSg));
4475
4476 uint16_t u16MaxPktLen = pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw3.u16MSS;
4477 Assert(u16MaxPktLen != 0);
4478 Assert(u16MaxPktLen < E1K_MAX_TX_PKT_SIZE);
4479
4480 /*
4481 * Carve out segments.
4482 */
4483 do
4484 {
4485 /* Calculate how many bytes we have left in this TCP segment */
4486 uint32_t cb = u16MaxPktLen - pThis->u16TxPktLen;
4487 if (cb > cbFragment)
4488 {
4489 /* This descriptor fits completely into current segment */
4490 cb = cbFragment;
4491 e1kFallbackAddSegment(pDevIns, pThis, pDesc->data.u64BufAddr, cb, pDesc->data.cmd.fEOP /*fSend*/, fOnWorkerThread);
4492 }
4493 else
4494 {
4495 e1kFallbackAddSegment(pDevIns, pThis, pDesc->data.u64BufAddr, cb, true /*fSend*/, fOnWorkerThread);
4496 /*
4497 * Rewind the packet tail pointer to the beginning of payload,
4498 * so we continue writing right beyond the header.
4499 */
4500 pThis->u16TxPktLen = pThis->contextTSE.dw3.u8HDRLEN;
4501 }
4502
4503 pDesc->data.u64BufAddr += cb;
4504 cbFragment -= cb;
4505 } while (cbFragment > 0);
4506
4507 if (pDesc->data.cmd.fEOP)
4508 {
4509 /* End of packet, next segment will contain header. */
4510 if (pThis->u32PayRemain != 0)
4511 E1K_INC_CNT32(TSCTFC);
4512 pThis->u16TxPktLen = 0;
4513 e1kXmitFreeBuf(pThis, PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC));
4514 }
4515
4516 return false;
4517}
4518#else /* E1K_WITH_TXD_CACHE */
4519/**
4520 * TCP segmentation offloading fallback: Add descriptor's buffer to transmit
4521 * frame.
4522 *
4523 * We construct the frame in the fallback buffer first and the copy it to the SG
4524 * buffer before passing it down to the network driver code.
4525 *
4526 * @returns error code
4527 *
4528 * @param pDevIns The device instance.
4529 * @param pThis The device state structure.
4530 * @param pDesc Pointer to the descriptor to transmit.
4531 * @param cbFragment Length of descriptor's buffer.
4532 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4533 * @thread E1000_TX
4534 */
4535static int e1kFallbackAddToFrame(PPDMDEVINS pDevIns, PE1KSTATE pThis, E1KTXDESC *pDesc, bool fOnWorkerThread)
4536{
4537#ifdef VBOX_STRICT
4538 PPDMSCATTERGATHER pTxSg = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC)->CTX_SUFF(pTxSg);
4539 Assert(e1kGetDescType(pDesc) == E1K_DTYP_DATA);
4540 Assert(pDesc->data.cmd.fTSE);
4541 Assert(!e1kXmitIsGsoBuf(pTxSg));
4542#endif
4543
4544 uint16_t u16MaxPktLen = pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw3.u16MSS;
4545 /* We cannot produce empty packets, ignore all TX descriptors (see @bugref{9571}) */
4546 if (u16MaxPktLen == 0)
4547 return VINF_SUCCESS;
4548
4549 /*
4550 * Carve out segments.
4551 */
4552 int rc = VINF_SUCCESS;
4553 do
4554 {
4555 /* Calculate how many bytes we have left in this TCP segment */
4556 uint16_t cb = u16MaxPktLen - pThis->u16TxPktLen;
4557 if (cb > pDesc->data.cmd.u20DTALEN)
4558 {
4559 /* This descriptor fits completely into current segment */
4560 cb = (uint16_t)pDesc->data.cmd.u20DTALEN; /* u20DTALEN at this point is guarantied to fit into 16 bits. */
4561 rc = e1kFallbackAddSegment(pDevIns, pThis, pDesc->data.u64BufAddr, cb, pDesc->data.cmd.fEOP /*fSend*/, fOnWorkerThread);
4562 }
4563 else
4564 {
4565 rc = e1kFallbackAddSegment(pDevIns, pThis, pDesc->data.u64BufAddr, cb, true /*fSend*/, fOnWorkerThread);
4566 /*
4567 * Rewind the packet tail pointer to the beginning of payload,
4568 * so we continue writing right beyond the header.
4569 */
4570 pThis->u16TxPktLen = pThis->contextTSE.dw3.u8HDRLEN;
4571 }
4572
4573 pDesc->data.u64BufAddr += cb;
4574 pDesc->data.cmd.u20DTALEN -= cb;
4575 } while (pDesc->data.cmd.u20DTALEN > 0 && RT_SUCCESS(rc));
4576
4577 if (pDesc->data.cmd.fEOP)
4578 {
4579 /* End of packet, next segment will contain header. */
4580 if (pThis->u32PayRemain != 0)
4581 E1K_INC_CNT32(TSCTFC);
4582 pThis->u16TxPktLen = 0;
4583 e1kXmitFreeBuf(pThis, PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC));
4584 }
4585
4586 return VINF_SUCCESS; /// @todo consider rc;
4587}
4588#endif /* E1K_WITH_TXD_CACHE */
4589
4590
4591/**
4592 * Add descriptor's buffer to transmit frame.
4593 *
4594 * This deals with GSO and normal frames, e1kFallbackAddToFrame deals with the
4595 * TSE frames we cannot handle as GSO.
4596 *
4597 * @returns true on success, false on failure.
4598 *
4599 * @param pDevIns The device instance.
4600 * @param pThisCC The current context instance data.
4601 * @param pThis The device state structure.
4602 * @param PhysAddr The physical address of the descriptor buffer.
4603 * @param cbFragment Length of descriptor's buffer.
4604 * @thread E1000_TX
4605 */
4606static bool e1kAddToFrame(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KSTATECC pThisCC, RTGCPHYS PhysAddr, uint32_t cbFragment)
4607{
4608 PPDMSCATTERGATHER pTxSg = pThisCC->CTX_SUFF(pTxSg);
4609 bool const fGso = e1kXmitIsGsoBuf(pTxSg);
4610 uint32_t const cbNewPkt = cbFragment + pThis->u16TxPktLen;
4611
4612 LogFlow(("%s e1kAddToFrame: ENTER cbFragment=%d u16TxPktLen=%d cbUsed=%d cbAvailable=%d fGSO=%s\n",
4613 pThis->szPrf, cbFragment, pThis->u16TxPktLen, pTxSg->cbUsed, pTxSg->cbAvailable,
4614 fGso ? "true" : "false"));
4615 PCPDMNETWORKGSO pGso = (PCPDMNETWORKGSO)pTxSg->pvUser;
4616 if (pGso)
4617 {
4618 if (RT_UNLIKELY(pGso->cbMaxSeg == 0))
4619 {
4620 E1kLog(("%s zero-sized fragments are not allowed\n", pThis->szPrf));
4621 return false;
4622 }
4623 if (RT_UNLIKELY(pGso->u8Type == PDMNETWORKGSOTYPE_IPV4_UDP))
4624 {
4625 E1kLog(("%s UDP fragmentation is no longer supported\n", pThis->szPrf));
4626 return false;
4627 }
4628 }
4629 if (RT_UNLIKELY( !fGso && cbNewPkt > E1K_MAX_TX_PKT_SIZE ))
4630 {
4631 E1kLog(("%s Transmit packet is too large: %u > %u(max)\n", pThis->szPrf, cbNewPkt, E1K_MAX_TX_PKT_SIZE));
4632 return false;
4633 }
4634 if (RT_UNLIKELY( cbNewPkt > pTxSg->cbAvailable ))
4635 {
4636 E1kLog(("%s Transmit packet is too large: %u > %u(max)\n", pThis->szPrf, cbNewPkt, pTxSg->cbAvailable));
4637 return false;
4638 }
4639
4640 if (RT_LIKELY(pTxSg))
4641 {
4642 Assert(pTxSg->cSegs == 1);
4643 if (pTxSg->cbUsed != pThis->u16TxPktLen)
4644 E1kLog(("%s e1kAddToFrame: pTxSg->cbUsed=%d(0x%x) != u16TxPktLen=%d(0x%x)\n",
4645 pThis->szPrf, pTxSg->cbUsed, pTxSg->cbUsed, pThis->u16TxPktLen, pThis->u16TxPktLen));
4646
4647 PDMDevHlpPCIPhysRead(pDevIns, PhysAddr, (uint8_t *)pTxSg->aSegs[0].pvSeg + pThis->u16TxPktLen, cbFragment);
4648
4649 pTxSg->cbUsed = cbNewPkt;
4650 }
4651 pThis->u16TxPktLen = cbNewPkt;
4652
4653 return true;
4654}
4655
4656
4657/**
4658 * Write the descriptor back to guest memory and notify the guest.
4659 *
4660 * @param pThis The device state structure.
4661 * @param pDesc Pointer to the descriptor have been transmitted.
4662 * @param addr Physical address of the descriptor in guest memory.
4663 * @thread E1000_TX
4664 */
4665static void e1kDescReport(PPDMDEVINS pDevIns, PE1KSTATE pThis, E1KTXDESC *pDesc, RTGCPHYS addr)
4666{
4667 /*
4668 * We fake descriptor write-back bursting. Descriptors are written back as they are
4669 * processed.
4670 */
4671 /* Let's pretend we process descriptors. Write back with DD set. */
4672 /*
4673 * Prior to r71586 we tried to accomodate the case when write-back bursts
4674 * are enabled without actually implementing bursting by writing back all
4675 * descriptors, even the ones that do not have RS set. This caused kernel
4676 * panics with Linux SMP kernels, as the e1000 driver tried to free up skb
4677 * associated with written back descriptor if it happened to be a context
4678 * descriptor since context descriptors do not have skb associated to them.
4679 * Starting from r71586 we write back only the descriptors with RS set,
4680 * which is a little bit different from what the real hardware does in
4681 * case there is a chain of data descritors where some of them have RS set
4682 * and others do not. It is very uncommon scenario imho.
4683 * We need to check RPS as well since some legacy drivers use it instead of
4684 * RS even with newer cards.
4685 */
4686 if (pDesc->legacy.cmd.fRS || pDesc->legacy.cmd.fRPS)
4687 {
4688 pDesc->legacy.dw3.fDD = 1; /* Descriptor Done */
4689 e1kWriteBackDesc(pDevIns, pThis, pDesc, addr);
4690 if (pDesc->legacy.cmd.fEOP)
4691 {
4692//#ifdef E1K_USE_TX_TIMERS
4693 if (pThis->fTidEnabled && pDesc->legacy.cmd.fIDE)
4694 {
4695 E1K_INC_ISTAT_CNT(pThis->uStatTxIDE);
4696 //if (pThis->fIntRaised)
4697 //{
4698 // /* Interrupt is already pending, no need for timers */
4699 // ICR |= ICR_TXDW;
4700 //}
4701 //else {
4702 /* Arm the timer to fire in TIVD usec (discard .024) */
4703 e1kArmTimer(pDevIns, pThis, pThis->hTIDTimer, TIDV);
4704# ifndef E1K_NO_TAD
4705 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
4706 E1kLog2(("%s Checking if TAD timer is running\n",
4707 pThis->szPrf));
4708 if (TADV != 0 && !PDMDevHlpTimerIsActive(pDevIns, pThis->hTADTimer))
4709 e1kArmTimer(pDevIns, pThis, pThis->hTADTimer, TADV);
4710# endif /* E1K_NO_TAD */
4711 }
4712 else
4713 {
4714 if (pThis->fTidEnabled)
4715 {
4716 E1kLog2(("%s No IDE set, cancel TAD timer and raise interrupt\n",
4717 pThis->szPrf));
4718 /* Cancel both timers if armed and fire immediately. */
4719# ifndef E1K_NO_TAD
4720 PDMDevHlpTimerStop(pDevIns, pThis->hTADTimer);
4721# endif
4722 PDMDevHlpTimerStop(pDevIns, pThis->hTIDTimer);
4723 }
4724//#endif /* E1K_USE_TX_TIMERS */
4725 E1K_INC_ISTAT_CNT(pThis->uStatIntTx);
4726 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_TXDW);
4727//#ifdef E1K_USE_TX_TIMERS
4728 }
4729//#endif /* E1K_USE_TX_TIMERS */
4730 }
4731 }
4732 else
4733 {
4734 E1K_INC_ISTAT_CNT(pThis->uStatTxNoRS);
4735 }
4736}
4737
4738#ifndef E1K_WITH_TXD_CACHE
4739
4740/**
4741 * Process Transmit Descriptor.
4742 *
4743 * E1000 supports three types of transmit descriptors:
4744 * - legacy data descriptors of older format (context-less).
4745 * - data the same as legacy but providing new offloading capabilities.
4746 * - context sets up the context for following data descriptors.
4747 *
4748 * @param pDevIns The device instance.
4749 * @param pThis The device state structure.
4750 * @param pThisCC The current context instance data.
4751 * @param pDesc Pointer to descriptor union.
4752 * @param addr Physical address of descriptor in guest memory.
4753 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4754 * @thread E1000_TX
4755 */
4756static int e1kXmitDesc(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KSTATECC pThisCC, E1KTXDESC *pDesc,
4757 RTGCPHYS addr, bool fOnWorkerThread)
4758{
4759 int rc = VINF_SUCCESS;
4760 uint32_t cbVTag = 0;
4761
4762 e1kPrintTDesc(pThis, pDesc, "vvv");
4763
4764//#ifdef E1K_USE_TX_TIMERS
4765 if (pThis->fTidEnabled)
4766 e1kCancelTimer(pDevIns, pThis, pThis->hTIDTimer);
4767//#endif /* E1K_USE_TX_TIMERS */
4768
4769 switch (e1kGetDescType(pDesc))
4770 {
4771 case E1K_DTYP_CONTEXT:
4772 if (pDesc->context.dw2.fTSE)
4773 {
4774 pThis->contextTSE = pDesc->context;
4775 pThis->u32PayRemain = pDesc->context.dw2.u20PAYLEN;
4776 pThis->u16HdrRemain = pDesc->context.dw3.u8HDRLEN;
4777 e1kSetupGsoCtx(&pThis->GsoCtx, &pDesc->context);
4778 STAM_COUNTER_INC(&pThis->StatTxDescCtxTSE);
4779 }
4780 else
4781 {
4782 pThis->contextNormal = pDesc->context;
4783 STAM_COUNTER_INC(&pThis->StatTxDescCtxNormal);
4784 }
4785 E1kLog2(("%s %s context updated: IP CSS=%02X, IP CSO=%02X, IP CSE=%04X"
4786 ", TU CSS=%02X, TU CSO=%02X, TU CSE=%04X\n", pThis->szPrf,
4787 pDesc->context.dw2.fTSE ? "TSE" : "Normal",
4788 pDesc->context.ip.u8CSS,
4789 pDesc->context.ip.u8CSO,
4790 pDesc->context.ip.u16CSE,
4791 pDesc->context.tu.u8CSS,
4792 pDesc->context.tu.u8CSO,
4793 pDesc->context.tu.u16CSE));
4794 E1K_INC_ISTAT_CNT(pThis->uStatDescCtx);
4795 e1kDescReport(pThis, pDesc, addr);
4796 break;
4797
4798 case E1K_DTYP_DATA:
4799 {
4800 if (pDesc->data.cmd.u20DTALEN == 0 || pDesc->data.u64BufAddr == 0)
4801 {
4802 E1kLog2(("% Empty data descriptor, skipped.\n", pThis->szPrf));
4803 /** @todo Same as legacy when !TSE. See below. */
4804 break;
4805 }
4806 STAM_COUNTER_INC(pDesc->data.cmd.fTSE?
4807 &pThis->StatTxDescTSEData:
4808 &pThis->StatTxDescData);
4809 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
4810 E1K_INC_ISTAT_CNT(pThis->uStatDescDat);
4811
4812 /*
4813 * The last descriptor of non-TSE packet must contain VLE flag.
4814 * TSE packets have VLE flag in the first descriptor. The later
4815 * case is taken care of a bit later when cbVTag gets assigned.
4816 *
4817 * 1) pDesc->data.cmd.fEOP && !pDesc->data.cmd.fTSE
4818 */
4819 if (pDesc->data.cmd.fEOP && !pDesc->data.cmd.fTSE)
4820 {
4821 pThis->fVTag = pDesc->data.cmd.fVLE;
4822 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
4823 }
4824 /*
4825 * First fragment: Allocate new buffer and save the IXSM and TXSM
4826 * packet options as these are only valid in the first fragment.
4827 */
4828 if (pThis->u16TxPktLen == 0)
4829 {
4830 pThis->fIPcsum = pDesc->data.dw3.fIXSM;
4831 pThis->fTCPcsum = pDesc->data.dw3.fTXSM;
4832 E1kLog2(("%s Saving checksum flags:%s%s; \n", pThis->szPrf,
4833 pThis->fIPcsum ? " IP" : "",
4834 pThis->fTCPcsum ? " TCP/UDP" : ""));
4835 if (pDesc->data.cmd.fTSE)
4836 {
4837 /* 2) pDesc->data.cmd.fTSE && pThis->u16TxPktLen == 0 */
4838 pThis->fVTag = pDesc->data.cmd.fVLE;
4839 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
4840 cbVTag = pThis->fVTag ? 4 : 0;
4841 }
4842 else if (pDesc->data.cmd.fEOP)
4843 cbVTag = pDesc->data.cmd.fVLE ? 4 : 0;
4844 else
4845 cbVTag = 4;
4846 E1kLog3(("%s About to allocate TX buffer: cbVTag=%u\n", pThis->szPrf, cbVTag));
4847 if (e1kCanDoGso(pThis, &pThis->GsoCtx, &pDesc->data, &pThis->contextTSE))
4848 rc = e1kXmitAllocBuf(pThis, pThisCC, pThis->contextTSE.dw2.u20PAYLEN + pThis->contextTSE.dw3.u8HDRLEN + cbVTag,
4849 true /*fExactSize*/, true /*fGso*/);
4850 else if (pDesc->data.cmd.fTSE)
4851 rc = e1kXmitAllocBuf(pThis, pThisCC, , pThis->contextTSE.dw3.u16MSS + pThis->contextTSE.dw3.u8HDRLEN + cbVTag,
4852 pDesc->data.cmd.fTSE /*fExactSize*/, false /*fGso*/);
4853 else
4854 rc = e1kXmitAllocBuf(pThis, pThisCC, pDesc->data.cmd.u20DTALEN + cbVTag,
4855 pDesc->data.cmd.fEOP /*fExactSize*/, false /*fGso*/);
4856
4857 /**
4858 * @todo: Perhaps it is not that simple for GSO packets! We may
4859 * need to unwind some changes.
4860 */
4861 if (RT_FAILURE(rc))
4862 {
4863 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4864 break;
4865 }
4866 /** @todo Is there any way to indicating errors other than collisions? Like
4867 * VERR_NET_DOWN. */
4868 }
4869
4870 /*
4871 * Add the descriptor data to the frame. If the frame is complete,
4872 * transmit it and reset the u16TxPktLen field.
4873 */
4874 if (e1kXmitIsGsoBuf(pThisCC->CTX_SUFF(pTxSg)))
4875 {
4876 STAM_COUNTER_INC(&pThis->StatTxPathGSO);
4877 bool fRc = e1kAddToFrame(pDevIns, pThis, pThisCC, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4878 if (pDesc->data.cmd.fEOP)
4879 {
4880 if ( fRc
4881 && pThisCC->CTX_SUFF(pTxSg)
4882 && pThisCC->CTX_SUFF(pTxSg)->cbUsed == (size_t)pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN)
4883 {
4884 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
4885 E1K_INC_CNT32(TSCTC);
4886 }
4887 else
4888 {
4889 if (fRc)
4890 E1kLog(("%s bad GSO/TSE %p or %u < %u\n" , pThis->szPrf,
4891 pThisCC->CTX_SUFF(pTxSg), pThisCC->CTX_SUFF(pTxSg) ? pThisCC->CTX_SUFF(pTxSg)->cbUsed : 0,
4892 pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN));
4893 e1kXmitFreeBuf(pThis);
4894 E1K_INC_CNT32(TSCTFC);
4895 }
4896 pThis->u16TxPktLen = 0;
4897 }
4898 }
4899 else if (!pDesc->data.cmd.fTSE)
4900 {
4901 STAM_COUNTER_INC(&pThis->StatTxPathRegular);
4902 bool fRc = e1kAddToFrame(pDevIns, pThis, pThisCC, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4903 if (pDesc->data.cmd.fEOP)
4904 {
4905 if (fRc && pThisCC->CTX_SUFF(pTxSg))
4906 {
4907 Assert(pThisCC->CTX_SUFF(pTxSg)->cSegs == 1);
4908 if (pThis->fIPcsum)
4909 e1kInsertChecksum(pThis, (uint8_t *)pThisCC->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
4910 pThis->contextNormal.ip.u8CSO,
4911 pThis->contextNormal.ip.u8CSS,
4912 pThis->contextNormal.ip.u16CSE);
4913 if (pThis->fTCPcsum)
4914 e1kInsertChecksum(pThis, (uint8_t *)pThisCC->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
4915 pThis->contextNormal.tu.u8CSO,
4916 pThis->contextNormal.tu.u8CSS,
4917 pThis->contextNormal.tu.u16CSE,
4918 !pThis->contextNormal.dw2.fTCP);
4919 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
4920 }
4921 else
4922 e1kXmitFreeBuf(pThis);
4923 pThis->u16TxPktLen = 0;
4924 }
4925 }
4926 else
4927 {
4928 STAM_COUNTER_INC(&pThis->StatTxPathFallback);
4929 e1kFallbackAddToFrame(pDevIns, pThis, pDesc, pDesc->data.cmd.u20DTALEN, fOnWorkerThread);
4930 }
4931
4932 e1kDescReport(pThis, pDesc, addr);
4933 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4934 break;
4935 }
4936
4937 case E1K_DTYP_LEGACY:
4938 if (pDesc->legacy.cmd.u16Length == 0 || pDesc->legacy.u64BufAddr == 0)
4939 {
4940 E1kLog(("%s Empty legacy descriptor, skipped.\n", pThis->szPrf));
4941 /** @todo 3.3.3, Length/Buffer Address: RS set -> write DD when processing. */
4942 break;
4943 }
4944 STAM_COUNTER_INC(&pThis->StatTxDescLegacy);
4945 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
4946
4947 /* First fragment: allocate new buffer. */
4948 if (pThis->u16TxPktLen == 0)
4949 {
4950 if (pDesc->legacy.cmd.fEOP)
4951 cbVTag = pDesc->legacy.cmd.fVLE ? 4 : 0;
4952 else
4953 cbVTag = 4;
4954 E1kLog3(("%s About to allocate TX buffer: cbVTag=%u\n", pThis->szPrf, cbVTag));
4955 /** @todo reset status bits? */
4956 rc = e1kXmitAllocBuf(pThis, pThisCC, pDesc->legacy.cmd.u16Length + cbVTag, pDesc->legacy.cmd.fEOP, false /*fGso*/);
4957 if (RT_FAILURE(rc))
4958 {
4959 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4960 break;
4961 }
4962
4963 /** @todo Is there any way to indicating errors other than collisions? Like
4964 * VERR_NET_DOWN. */
4965 }
4966
4967 /* Add fragment to frame. */
4968 if (e1kAddToFrame(pDevIns, pThis, pThisCC, pDesc->data.u64BufAddr, pDesc->legacy.cmd.u16Length))
4969 {
4970 E1K_INC_ISTAT_CNT(pThis->uStatDescLeg);
4971
4972 /* Last fragment: Transmit and reset the packet storage counter. */
4973 if (pDesc->legacy.cmd.fEOP)
4974 {
4975 pThis->fVTag = pDesc->legacy.cmd.fVLE;
4976 pThis->u16VTagTCI = pDesc->legacy.dw3.u16Special;
4977 /** @todo Offload processing goes here. */
4978 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
4979 pThis->u16TxPktLen = 0;
4980 }
4981 }
4982 /* Last fragment + failure: free the buffer and reset the storage counter. */
4983 else if (pDesc->legacy.cmd.fEOP)
4984 {
4985 e1kXmitFreeBuf(pThis);
4986 pThis->u16TxPktLen = 0;
4987 }
4988
4989 e1kDescReport(pThis, pDesc, addr);
4990 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4991 break;
4992
4993 default:
4994 E1kLog(("%s ERROR Unsupported transmit descriptor type: 0x%04x\n",
4995 pThis->szPrf, e1kGetDescType(pDesc)));
4996 break;
4997 }
4998
4999 return rc;
5000}
5001
5002#else /* E1K_WITH_TXD_CACHE */
5003
5004/**
5005 * Process Transmit Descriptor.
5006 *
5007 * E1000 supports three types of transmit descriptors:
5008 * - legacy data descriptors of older format (context-less).
5009 * - data the same as legacy but providing new offloading capabilities.
5010 * - context sets up the context for following data descriptors.
5011 *
5012 * @param pDevIns The device instance.
5013 * @param pThis The device state structure.
5014 * @param pThisCC The current context instance data.
5015 * @param pDesc Pointer to descriptor union.
5016 * @param addr Physical address of descriptor in guest memory.
5017 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
5018 * @param cbPacketSize Size of the packet as previously computed.
5019 * @thread E1000_TX
5020 */
5021static int e1kXmitDesc(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KSTATECC pThisCC, E1KTXDESC *pDesc,
5022 RTGCPHYS addr, bool fOnWorkerThread)
5023{
5024 int rc = VINF_SUCCESS;
5025
5026 e1kPrintTDesc(pThis, pDesc, "vvv");
5027
5028 if (pDesc->legacy.dw3.fDD)
5029 {
5030 E1kLog(("%s e1kXmitDesc: skipping bad descriptor ^^^\n", pThis->szPrf));
5031 e1kDescReport(pDevIns, pThis, pDesc, addr);
5032 return VINF_SUCCESS;
5033 }
5034
5035//#ifdef E1K_USE_TX_TIMERS
5036 if (pThis->fTidEnabled)
5037 PDMDevHlpTimerStop(pDevIns, pThis->hTIDTimer);
5038//#endif /* E1K_USE_TX_TIMERS */
5039
5040 switch (e1kGetDescType(pDesc))
5041 {
5042 case E1K_DTYP_CONTEXT:
5043 /* The caller have already updated the context */
5044 E1K_INC_ISTAT_CNT(pThis->uStatDescCtx);
5045 e1kDescReport(pDevIns, pThis, pDesc, addr);
5046 break;
5047
5048 case E1K_DTYP_DATA:
5049 {
5050 STAM_COUNTER_INC(pDesc->data.cmd.fTSE?
5051 &pThis->StatTxDescTSEData:
5052 &pThis->StatTxDescData);
5053 E1K_INC_ISTAT_CNT(pThis->uStatDescDat);
5054 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
5055 if (pDesc->data.cmd.u20DTALEN == 0 || pDesc->data.u64BufAddr == 0)
5056 {
5057 E1kLog2(("% Empty data descriptor, skipped.\n", pThis->szPrf));
5058 if (pDesc->data.cmd.fEOP)
5059 {
5060 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
5061 pThis->u16TxPktLen = 0;
5062 }
5063 }
5064 else
5065 {
5066 /*
5067 * Add the descriptor data to the frame. If the frame is complete,
5068 * transmit it and reset the u16TxPktLen field.
5069 */
5070 if (e1kXmitIsGsoBuf(pThisCC->CTX_SUFF(pTxSg)))
5071 {
5072 STAM_COUNTER_INC(&pThis->StatTxPathGSO);
5073 bool fRc = e1kAddToFrame(pDevIns, pThis, pThisCC, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
5074 if (pDesc->data.cmd.fEOP)
5075 {
5076 if ( fRc
5077 && pThisCC->CTX_SUFF(pTxSg)
5078 && pThisCC->CTX_SUFF(pTxSg)->cbUsed == (size_t)pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN)
5079 {
5080 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
5081 E1K_INC_CNT32(TSCTC);
5082 }
5083 else
5084 {
5085 if (fRc)
5086 E1kLog(("%s bad GSO/TSE %p or %u < %u\n" , pThis->szPrf,
5087 pThisCC->CTX_SUFF(pTxSg), pThisCC->CTX_SUFF(pTxSg) ? pThisCC->CTX_SUFF(pTxSg)->cbUsed : 0,
5088 pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN));
5089 e1kXmitFreeBuf(pThis, pThisCC);
5090 E1K_INC_CNT32(TSCTFC);
5091 }
5092 pThis->u16TxPktLen = 0;
5093 }
5094 }
5095 else if (!pDesc->data.cmd.fTSE)
5096 {
5097 STAM_COUNTER_INC(&pThis->StatTxPathRegular);
5098 bool fRc = e1kAddToFrame(pDevIns, pThis, pThisCC, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
5099 if (pDesc->data.cmd.fEOP)
5100 {
5101 if (fRc && pThisCC->CTX_SUFF(pTxSg))
5102 {
5103 Assert(pThisCC->CTX_SUFF(pTxSg)->cSegs == 1);
5104 if (pThis->fIPcsum)
5105 e1kInsertChecksum(pThis, (uint8_t *)pThisCC->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
5106 pThis->contextNormal.ip.u8CSO,
5107 pThis->contextNormal.ip.u8CSS,
5108 pThis->contextNormal.ip.u16CSE);
5109 if (pThis->fTCPcsum)
5110 e1kInsertChecksum(pThis, (uint8_t *)pThisCC->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
5111 pThis->contextNormal.tu.u8CSO,
5112 pThis->contextNormal.tu.u8CSS,
5113 pThis->contextNormal.tu.u16CSE,
5114 !pThis->contextNormal.dw2.fTCP);
5115 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
5116 }
5117 else
5118 e1kXmitFreeBuf(pThis, pThisCC);
5119 pThis->u16TxPktLen = 0;
5120 }
5121 }
5122 else
5123 {
5124 STAM_COUNTER_INC(&pThis->StatTxPathFallback);
5125 rc = e1kFallbackAddToFrame(pDevIns, pThis, pDesc, fOnWorkerThread);
5126 }
5127 }
5128 e1kDescReport(pDevIns, pThis, pDesc, addr);
5129 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5130 break;
5131 }
5132
5133 case E1K_DTYP_LEGACY:
5134 STAM_COUNTER_INC(&pThis->StatTxDescLegacy);
5135 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
5136 if (pDesc->legacy.cmd.u16Length == 0 || pDesc->legacy.u64BufAddr == 0)
5137 {
5138 E1kLog(("%s Empty legacy descriptor, skipped.\n", pThis->szPrf));
5139 }
5140 else
5141 {
5142 /* Add fragment to frame. */
5143 if (e1kAddToFrame(pDevIns, pThis, pThisCC, pDesc->data.u64BufAddr, pDesc->legacy.cmd.u16Length))
5144 {
5145 E1K_INC_ISTAT_CNT(pThis->uStatDescLeg);
5146
5147 /* Last fragment: Transmit and reset the packet storage counter. */
5148 if (pDesc->legacy.cmd.fEOP)
5149 {
5150 if (pDesc->legacy.cmd.fIC)
5151 {
5152 e1kInsertChecksum(pThis,
5153 (uint8_t *)pThisCC->CTX_SUFF(pTxSg)->aSegs[0].pvSeg,
5154 pThis->u16TxPktLen,
5155 pDesc->legacy.cmd.u8CSO,
5156 pDesc->legacy.dw3.u8CSS,
5157 0);
5158 }
5159 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
5160 pThis->u16TxPktLen = 0;
5161 }
5162 }
5163 /* Last fragment + failure: free the buffer and reset the storage counter. */
5164 else if (pDesc->legacy.cmd.fEOP)
5165 {
5166 e1kXmitFreeBuf(pThis, pThisCC);
5167 pThis->u16TxPktLen = 0;
5168 }
5169 }
5170 e1kDescReport(pDevIns, pThis, pDesc, addr);
5171 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5172 break;
5173
5174 default:
5175 E1kLog(("%s ERROR Unsupported transmit descriptor type: 0x%04x\n",
5176 pThis->szPrf, e1kGetDescType(pDesc)));
5177 break;
5178 }
5179
5180 return rc;
5181}
5182
5183DECLINLINE(void) e1kUpdateTxContext(PE1KSTATE pThis, E1KTXDESC *pDesc)
5184{
5185 if (pDesc->context.dw2.fTSE)
5186 {
5187 pThis->contextTSE = pDesc->context;
5188 uint32_t cbMaxSegmentSize = pThis->contextTSE.dw3.u16MSS + pThis->contextTSE.dw3.u8HDRLEN + 4; /*VTAG*/
5189 if (RT_UNLIKELY(cbMaxSegmentSize > E1K_MAX_TX_PKT_SIZE))
5190 {
5191 pThis->contextTSE.dw3.u16MSS = E1K_MAX_TX_PKT_SIZE - pThis->contextTSE.dw3.u8HDRLEN - 4; /*VTAG*/
5192 LogRelMax(10, ("%s: Transmit packet is too large: %u > %u(max). Adjusted MSS to %u.\n",
5193 pThis->szPrf, cbMaxSegmentSize, E1K_MAX_TX_PKT_SIZE, pThis->contextTSE.dw3.u16MSS));
5194 }
5195 pThis->u32PayRemain = pThis->contextTSE.dw2.u20PAYLEN;
5196 pThis->u16HdrRemain = pThis->contextTSE.dw3.u8HDRLEN;
5197 e1kSetupGsoCtx(&pThis->GsoCtx, &pThis->contextTSE);
5198 STAM_COUNTER_INC(&pThis->StatTxDescCtxTSE);
5199 }
5200 else
5201 {
5202 pThis->contextNormal = pDesc->context;
5203 STAM_COUNTER_INC(&pThis->StatTxDescCtxNormal);
5204 }
5205 E1kLog2(("%s %s context updated: IP CSS=%02X, IP CSO=%02X, IP CSE=%04X"
5206 ", TU CSS=%02X, TU CSO=%02X, TU CSE=%04X\n", pThis->szPrf,
5207 pDesc->context.dw2.fTSE ? "TSE" : "Normal",
5208 pDesc->context.ip.u8CSS,
5209 pDesc->context.ip.u8CSO,
5210 pDesc->context.ip.u16CSE,
5211 pDesc->context.tu.u8CSS,
5212 pDesc->context.tu.u8CSO,
5213 pDesc->context.tu.u16CSE));
5214}
5215
5216static bool e1kLocateTxPacket(PE1KSTATE pThis)
5217{
5218 LogFlow(("%s e1kLocateTxPacket: ENTER cbTxAlloc=%d\n",
5219 pThis->szPrf, pThis->cbTxAlloc));
5220 /* Check if we have located the packet already. */
5221 if (pThis->cbTxAlloc)
5222 {
5223 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d\n",
5224 pThis->szPrf, pThis->cbTxAlloc));
5225 return true;
5226 }
5227
5228 bool fTSE = false;
5229 uint32_t cbPacket = 0;
5230
5231 for (int i = pThis->iTxDCurrent; i < pThis->nTxDFetched; ++i)
5232 {
5233 E1KTXDESC *pDesc = &pThis->aTxDescriptors[i];
5234 switch (e1kGetDescType(pDesc))
5235 {
5236 case E1K_DTYP_CONTEXT:
5237 if (cbPacket == 0)
5238 e1kUpdateTxContext(pThis, pDesc);
5239 else
5240 E1kLog(("%s e1kLocateTxPacket: ignoring a context descriptor in the middle of a packet, cbPacket=%d\n",
5241 pThis->szPrf, cbPacket));
5242 continue;
5243 case E1K_DTYP_LEGACY:
5244 /* Skip invalid descriptors. */
5245 if (cbPacket > 0 && (pThis->fGSO || fTSE))
5246 {
5247 E1kLog(("%s e1kLocateTxPacket: ignoring a legacy descriptor in the segmentation context, cbPacket=%d\n",
5248 pThis->szPrf, cbPacket));
5249 pDesc->legacy.dw3.fDD = true; /* Make sure it is skipped by processing */
5250 continue;
5251 }
5252 /* Skip empty descriptors. */
5253 if (!pDesc->legacy.u64BufAddr || !pDesc->legacy.cmd.u16Length)
5254 break;
5255 cbPacket += pDesc->legacy.cmd.u16Length;
5256 pThis->fGSO = false;
5257 break;
5258 case E1K_DTYP_DATA:
5259 /* Skip invalid descriptors. */
5260 if (cbPacket > 0 && (bool)pDesc->data.cmd.fTSE != fTSE)
5261 {
5262 E1kLog(("%s e1kLocateTxPacket: ignoring %sTSE descriptor in the %ssegmentation context, cbPacket=%d\n",
5263 pThis->szPrf, pDesc->data.cmd.fTSE ? "" : "non-", fTSE ? "" : "non-", cbPacket));
5264 pDesc->data.dw3.fDD = true; /* Make sure it is skipped by processing */
5265 continue;
5266 }
5267 /* Skip empty descriptors. */
5268 if (!pDesc->data.u64BufAddr || !pDesc->data.cmd.u20DTALEN)
5269 break;
5270 if (cbPacket == 0)
5271 {
5272 /*
5273 * The first fragment: save IXSM and TXSM options
5274 * as these are only valid in the first fragment.
5275 */
5276 pThis->fIPcsum = pDesc->data.dw3.fIXSM;
5277 pThis->fTCPcsum = pDesc->data.dw3.fTXSM;
5278 fTSE = pDesc->data.cmd.fTSE;
5279 /*
5280 * TSE descriptors have VLE bit properly set in
5281 * the first fragment.
5282 */
5283 if (fTSE)
5284 {
5285 pThis->fVTag = pDesc->data.cmd.fVLE;
5286 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
5287 }
5288 pThis->fGSO = e1kCanDoGso(pThis, &pThis->GsoCtx, &pDesc->data, &pThis->contextTSE);
5289 }
5290 cbPacket += pDesc->data.cmd.u20DTALEN;
5291 break;
5292 default:
5293 AssertMsgFailed(("Impossible descriptor type!"));
5294 continue;
5295 }
5296 if (pDesc->legacy.cmd.fEOP)
5297 {
5298 /*
5299 * Non-TSE descriptors have VLE bit properly set in
5300 * the last fragment.
5301 */
5302 if (!fTSE)
5303 {
5304 pThis->fVTag = pDesc->data.cmd.fVLE;
5305 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
5306 }
5307 /*
5308 * Compute the required buffer size. If we cannot do GSO but still
5309 * have to do segmentation we allocate the first segment only.
5310 */
5311 pThis->cbTxAlloc = (!fTSE || pThis->fGSO) ?
5312 cbPacket :
5313 RT_MIN(cbPacket, pThis->contextTSE.dw3.u16MSS + pThis->contextTSE.dw3.u8HDRLEN);
5314 /* Do not add VLAN tags to empty packets. */
5315 if (pThis->fVTag && pThis->cbTxAlloc > 0)
5316 pThis->cbTxAlloc += 4;
5317 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d cbPacket=%d%s%s\n",
5318 pThis->szPrf, pThis->cbTxAlloc, cbPacket,
5319 pThis->fGSO ? " GSO" : "", fTSE ? " TSE" : ""));
5320 return true;
5321 }
5322 }
5323
5324 if (cbPacket == 0 && pThis->nTxDFetched - pThis->iTxDCurrent > 0)
5325 {
5326 /* All descriptors were empty, we need to process them as a dummy packet */
5327 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d, zero packet!\n",
5328 pThis->szPrf, pThis->cbTxAlloc));
5329 return true;
5330 }
5331 LogFlow(("%s e1kLocateTxPacket: RET false cbTxAlloc=%d cbPacket=%d\n",
5332 pThis->szPrf, pThis->cbTxAlloc, cbPacket));
5333 return false;
5334}
5335
5336static int e1kXmitPacket(PPDMDEVINS pDevIns, PE1KSTATE pThis, bool fOnWorkerThread)
5337{
5338 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
5339 int rc = VINF_SUCCESS;
5340
5341 LogFlow(("%s e1kXmitPacket: ENTER current=%d fetched=%d\n",
5342 pThis->szPrf, pThis->iTxDCurrent, pThis->nTxDFetched));
5343
5344 while (pThis->iTxDCurrent < pThis->nTxDFetched)
5345 {
5346 E1KTXDESC *pDesc = &pThis->aTxDescriptors[pThis->iTxDCurrent];
5347 E1kLog3(("%s About to process new TX descriptor at %08x%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
5348 pThis->szPrf, TDBAH, TDBAL + TDH * sizeof(E1KTXDESC), TDLEN, TDH, TDT));
5349 rc = e1kXmitDesc(pDevIns, pThis, pThisCC, pDesc, e1kDescAddr(TDBAH, TDBAL, TDH), fOnWorkerThread);
5350 if (RT_FAILURE(rc))
5351 break;
5352 if (++TDH * sizeof(E1KTXDESC) >= TDLEN)
5353 TDH = 0;
5354 uint32_t uLowThreshold = GET_BITS(TXDCTL, LWTHRESH)*8;
5355 if (uLowThreshold != 0 && e1kGetTxLen(pThis) <= uLowThreshold)
5356 {
5357 E1kLog2(("%s Low on transmit descriptors, raise ICR.TXD_LOW, len=%x thresh=%x\n",
5358 pThis->szPrf, e1kGetTxLen(pThis), GET_BITS(TXDCTL, LWTHRESH)*8));
5359 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_TXD_LOW);
5360 }
5361 ++pThis->iTxDCurrent;
5362 if (e1kGetDescType(pDesc) != E1K_DTYP_CONTEXT && pDesc->legacy.cmd.fEOP)
5363 break;
5364 }
5365
5366 LogFlow(("%s e1kXmitPacket: RET %Rrc current=%d fetched=%d\n",
5367 pThis->szPrf, rc, pThis->iTxDCurrent, pThis->nTxDFetched));
5368 return rc;
5369}
5370
5371#endif /* E1K_WITH_TXD_CACHE */
5372#ifndef E1K_WITH_TXD_CACHE
5373
5374/**
5375 * Transmit pending descriptors.
5376 *
5377 * @returns VBox status code. VERR_TRY_AGAIN is returned if we're busy.
5378 *
5379 * @param pDevIns The device instance.
5380 * @param pThis The E1000 state.
5381 * @param fOnWorkerThread Whether we're on a worker thread or on an EMT.
5382 */
5383static int e1kXmitPending(PPDMDEVINS pDevIns, PE1KSTATE pThis, bool fOnWorkerThread)
5384{
5385 int rc = VINF_SUCCESS;
5386 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
5387
5388 /* Check if transmitter is enabled. */
5389 if (!(TCTL & TCTL_EN))
5390 return VINF_SUCCESS;
5391 /*
5392 * Grab the xmit lock of the driver as well as the E1K device state.
5393 */
5394 rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
5395 if (RT_LIKELY(rc == VINF_SUCCESS))
5396 {
5397 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
5398 if (pDrv)
5399 {
5400 rc = pDrv->pfnBeginXmit(pDrv, fOnWorkerThread);
5401 if (RT_FAILURE(rc))
5402 {
5403 e1kCsTxLeave(pThis);
5404 return rc;
5405 }
5406 }
5407 /*
5408 * Process all pending descriptors.
5409 * Note! Do not process descriptors in locked state
5410 */
5411 while (TDH != TDT && !pThis->fLocked)
5412 {
5413 E1KTXDESC desc;
5414 E1kLog3(("%s About to process new TX descriptor at %08x%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
5415 pThis->szPrf, TDBAH, TDBAL + TDH * sizeof(desc), TDLEN, TDH, TDT));
5416
5417 e1kLoadDesc(pDevIns, &desc, ((uint64_t)TDBAH << 32) + TDBAL + TDH * sizeof(desc));
5418 rc = e1kXmitDesc(pDevIns, pThis, pThisCC, &desc, e1kDescAddr(TDBAH, TDBAL, TDH), fOnWorkerThread);
5419 /* If we failed to transmit descriptor we will try it again later */
5420 if (RT_FAILURE(rc))
5421 break;
5422 if (++TDH * sizeof(desc) >= TDLEN)
5423 TDH = 0;
5424
5425 if (e1kGetTxLen(pThis) <= GET_BITS(TXDCTL, LWTHRESH)*8)
5426 {
5427 E1kLog2(("%s Low on transmit descriptors, raise ICR.TXD_LOW, len=%x thresh=%x\n",
5428 pThis->szPrf, e1kGetTxLen(pThis), GET_BITS(TXDCTL, LWTHRESH)*8));
5429 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_TXD_LOW);
5430 }
5431
5432 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5433 }
5434
5435 /// @todo uncomment: pThis->uStatIntTXQE++;
5436 /// @todo uncomment: e1kRaiseInterrupt(pDevIns, pThis, ICR_TXQE);
5437 /*
5438 * Release the lock.
5439 */
5440 if (pDrv)
5441 pDrv->pfnEndXmit(pDrv);
5442 e1kCsTxLeave(pThis);
5443 }
5444
5445 return rc;
5446}
5447
5448#else /* E1K_WITH_TXD_CACHE */
5449
5450static void e1kDumpTxDCache(PPDMDEVINS pDevIns, PE1KSTATE pThis)
5451{
5452 unsigned i, cDescs = TDLEN / sizeof(E1KTXDESC);
5453 uint32_t tdh = TDH;
5454 LogRel(("E1000: -- Transmit Descriptors (%d total) --\n", cDescs));
5455 for (i = 0; i < cDescs; ++i)
5456 {
5457 E1KTXDESC desc;
5458 PDMDevHlpPCIPhysRead(pDevIns , e1kDescAddr(TDBAH, TDBAL, i), &desc, sizeof(desc));
5459 if (i == tdh)
5460 LogRel(("E1000: >>> "));
5461 LogRel(("E1000: %RGp: %R[e1ktxd]\n", e1kDescAddr(TDBAH, TDBAL, i), &desc));
5462 }
5463 LogRel(("E1000: -- Transmit Descriptors in Cache (at %d (TDH %d)/ fetched %d / max %d) --\n",
5464 pThis->iTxDCurrent, TDH, pThis->nTxDFetched, E1K_TXD_CACHE_SIZE));
5465 if (tdh > pThis->iTxDCurrent)
5466 tdh -= pThis->iTxDCurrent;
5467 else
5468 tdh = cDescs + tdh - pThis->iTxDCurrent;
5469 for (i = 0; i < pThis->nTxDFetched; ++i)
5470 {
5471 if (i == pThis->iTxDCurrent)
5472 LogRel(("E1000: >>> "));
5473 if (cDescs)
5474 LogRel(("E1000: %RGp: %R[e1ktxd]\n", e1kDescAddr(TDBAH, TDBAL, tdh++ % cDescs), &pThis->aTxDescriptors[i]));
5475 else
5476 LogRel(("E1000: <lost>: %R[e1ktxd]\n", &pThis->aTxDescriptors[i]));
5477 }
5478}
5479
5480/**
5481 * Transmit pending descriptors.
5482 *
5483 * @returns VBox status code. VERR_TRY_AGAIN is returned if we're busy.
5484 *
5485 * @param pDevIns The device instance.
5486 * @param pThis The E1000 state.
5487 * @param fOnWorkerThread Whether we're on a worker thread or on an EMT.
5488 */
5489static int e1kXmitPending(PPDMDEVINS pDevIns, PE1KSTATE pThis, bool fOnWorkerThread)
5490{
5491 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
5492 int rc = VINF_SUCCESS;
5493
5494 /* Check if transmitter is enabled. */
5495 if (!(TCTL & TCTL_EN))
5496 return VINF_SUCCESS;
5497 /*
5498 * Grab the xmit lock of the driver as well as the E1K device state.
5499 */
5500 PPDMINETWORKUP pDrv = pThisCC->CTX_SUFF(pDrv);
5501 if (pDrv)
5502 {
5503 rc = pDrv->pfnBeginXmit(pDrv, fOnWorkerThread);
5504 if (RT_FAILURE(rc))
5505 return rc;
5506 }
5507
5508 /*
5509 * Process all pending descriptors.
5510 * Note! Do not process descriptors in locked state
5511 */
5512 rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
5513 if (RT_LIKELY(rc == VINF_SUCCESS))
5514 {
5515 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
5516 /*
5517 * fIncomplete is set whenever we try to fetch additional descriptors
5518 * for an incomplete packet. If fail to locate a complete packet on
5519 * the next iteration we need to reset the cache or we risk to get
5520 * stuck in this loop forever.
5521 */
5522 bool fIncomplete = false;
5523 while (!pThis->fLocked && e1kTxDLazyLoad(pDevIns, pThis))
5524 {
5525 while (e1kLocateTxPacket(pThis))
5526 {
5527 fIncomplete = false;
5528 /* Found a complete packet, allocate it. */
5529 rc = e1kXmitAllocBuf(pThis, pThisCC, pThis->fGSO);
5530 /* If we're out of bandwidth we'll come back later. */
5531 if (RT_FAILURE(rc))
5532 goto out;
5533 /* Copy the packet to allocated buffer and send it. */
5534 rc = e1kXmitPacket(pDevIns, pThis, fOnWorkerThread);
5535 /* If we're out of bandwidth we'll come back later. */
5536 if (RT_FAILURE(rc))
5537 goto out;
5538 }
5539 uint8_t u8Remain = pThis->nTxDFetched - pThis->iTxDCurrent;
5540 if (RT_UNLIKELY(fIncomplete))
5541 {
5542 static bool fTxDCacheDumped = false;
5543 /*
5544 * The descriptor cache is full, but we were unable to find
5545 * a complete packet in it. Drop the cache and hope that
5546 * the guest driver can recover from network card error.
5547 */
5548 LogRel(("%s: No complete packets in%s TxD cache! "
5549 "Fetched=%d, current=%d, TX len=%d.\n",
5550 pThis->szPrf,
5551 u8Remain == E1K_TXD_CACHE_SIZE ? " full" : "",
5552 pThis->nTxDFetched, pThis->iTxDCurrent,
5553 e1kGetTxLen(pThis)));
5554 if (!fTxDCacheDumped)
5555 {
5556 fTxDCacheDumped = true;
5557 e1kDumpTxDCache(pDevIns, pThis);
5558 }
5559 pThis->iTxDCurrent = pThis->nTxDFetched = 0;
5560 /*
5561 * Returning an error at this point means Guru in R0
5562 * (see @bugref{6428}).
5563 */
5564# ifdef IN_RING3
5565 rc = VERR_NET_INCOMPLETE_TX_PACKET;
5566# else /* !IN_RING3 */
5567 rc = VINF_IOM_R3_MMIO_WRITE;
5568# endif /* !IN_RING3 */
5569 goto out;
5570 }
5571 if (u8Remain > 0)
5572 {
5573 Log4(("%s Incomplete packet at %d. Already fetched %d, "
5574 "%d more are available\n",
5575 pThis->szPrf, pThis->iTxDCurrent, u8Remain,
5576 e1kGetTxLen(pThis) - u8Remain));
5577
5578 /*
5579 * A packet was partially fetched. Move incomplete packet to
5580 * the beginning of cache buffer, then load more descriptors.
5581 */
5582 memmove(pThis->aTxDescriptors,
5583 &pThis->aTxDescriptors[pThis->iTxDCurrent],
5584 u8Remain * sizeof(E1KTXDESC));
5585 pThis->iTxDCurrent = 0;
5586 pThis->nTxDFetched = u8Remain;
5587 e1kTxDLoadMore(pDevIns, pThis);
5588 fIncomplete = true;
5589 }
5590 else
5591 pThis->nTxDFetched = 0;
5592 pThis->iTxDCurrent = 0;
5593 }
5594 if (!pThis->fLocked && GET_BITS(TXDCTL, LWTHRESH) == 0)
5595 {
5596 E1kLog2(("%s Out of transmit descriptors, raise ICR.TXD_LOW\n",
5597 pThis->szPrf));
5598 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_TXD_LOW);
5599 }
5600out:
5601 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5602
5603 /// @todo uncomment: pThis->uStatIntTXQE++;
5604 /// @todo uncomment: e1kRaiseInterrupt(pDevIns, pThis, ICR_TXQE);
5605
5606 e1kCsTxLeave(pThis);
5607 }
5608
5609
5610 /*
5611 * Release the lock.
5612 */
5613 if (pDrv)
5614 pDrv->pfnEndXmit(pDrv);
5615 return rc;
5616}
5617
5618#endif /* E1K_WITH_TXD_CACHE */
5619#ifdef IN_RING3
5620
5621/**
5622 * @interface_method_impl{PDMINETWORKDOWN,pfnXmitPending}
5623 */
5624static DECLCALLBACK(void) e1kR3NetworkDown_XmitPending(PPDMINETWORKDOWN pInterface)
5625{
5626 PE1KSTATECC pThisCC = RT_FROM_MEMBER(pInterface, E1KSTATECC, INetworkDown);
5627 PE1KSTATE pThis = pThisCC->pShared;
5628 /* Resume suspended transmission */
5629 STATUS &= ~STATUS_TXOFF;
5630 e1kXmitPending(pThisCC->pDevInsR3, pThis, true /*fOnWorkerThread*/);
5631}
5632
5633/**
5634 * @callback_method_impl{FNPDMTASKDEV,
5635 * Executes e1kXmitPending at the behest of ring-0/raw-mode.}
5636 * @note Not executed on EMT.
5637 */
5638static DECLCALLBACK(void) e1kR3TxTaskCallback(PPDMDEVINS pDevIns, void *pvUser)
5639{
5640 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
5641 E1kLog2(("%s e1kR3TxTaskCallback:\n", pThis->szPrf));
5642
5643 int rc = e1kXmitPending(pDevIns, pThis, false /*fOnWorkerThread*/);
5644 AssertMsg(RT_SUCCESS(rc) || rc == VERR_TRY_AGAIN || rc == VERR_NET_DOWN, ("%Rrc\n", rc));
5645
5646 RT_NOREF(rc, pvUser);
5647}
5648
5649#endif /* IN_RING3 */
5650
5651/**
5652 * Write handler for Transmit Descriptor Tail register.
5653 *
5654 * @param pThis The device state structure.
5655 * @param offset Register offset in memory-mapped frame.
5656 * @param index Register index in register array.
5657 * @param value The value to store.
5658 * @param mask Used to implement partial writes (8 and 16-bit).
5659 * @thread EMT
5660 */
5661static int e1kRegWriteTDT(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5662{
5663 int rc = e1kRegWriteDefault(pDevIns, pThis, offset, index, value);
5664
5665 /* All descriptors starting with head and not including tail belong to us. */
5666 /* Process them. */
5667 E1kLog2(("%s e1kRegWriteTDT: TDBAL=%08x, TDBAH=%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
5668 pThis->szPrf, TDBAL, TDBAH, TDLEN, TDH, TDT));
5669
5670 /* Ignore TDT writes when the link is down. */
5671 if (TDH != TDT && (STATUS & STATUS_LU))
5672 {
5673 Log5(("E1000: TDT write: TDH=%08x, TDT=%08x, %d descriptors to process\n", TDH, TDT, e1kGetTxLen(pThis)));
5674 E1kLog(("%s e1kRegWriteTDT: %d descriptors to process\n",
5675 pThis->szPrf, e1kGetTxLen(pThis)));
5676
5677 /* Transmit pending packets if possible, defer it if we cannot do it
5678 in the current context. */
5679#ifdef E1K_TX_DELAY
5680 rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
5681 if (RT_LIKELY(rc == VINF_SUCCESS))
5682 {
5683 if (!PDMDevInsTimerIsActive(pDevIns, pThis->hTXDTimer))
5684 {
5685# ifdef E1K_INT_STATS
5686 pThis->u64ArmedAt = RTTimeNanoTS();
5687# endif
5688 e1kArmTimer(pDevIns, pThis, pThis->hTXDTimer, E1K_TX_DELAY);
5689 }
5690 E1K_INC_ISTAT_CNT(pThis->uStatTxDelayed);
5691 e1kCsTxLeave(pThis);
5692 return rc;
5693 }
5694 /* We failed to enter the TX critical section -- transmit as usual. */
5695#endif /* E1K_TX_DELAY */
5696#ifndef IN_RING3
5697 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
5698 if (!pThisCC->CTX_SUFF(pDrv))
5699 {
5700 PDMDevHlpTaskTrigger(pDevIns, pThis->hTxTask);
5701 rc = VINF_SUCCESS;
5702 }
5703 else
5704#endif
5705 {
5706 rc = e1kXmitPending(pDevIns, pThis, false /*fOnWorkerThread*/);
5707 if (rc == VERR_TRY_AGAIN)
5708 rc = VINF_SUCCESS;
5709#ifndef IN_RING3
5710 else if (rc == VERR_SEM_BUSY)
5711 rc = VINF_IOM_R3_MMIO_WRITE;
5712#endif
5713 AssertRC(rc);
5714 }
5715 }
5716
5717 return rc;
5718}
5719
5720/**
5721 * Write handler for Multicast Table Array registers.
5722 *
5723 * @param pThis The device state structure.
5724 * @param offset Register offset in memory-mapped frame.
5725 * @param index Register index in register array.
5726 * @param value The value to store.
5727 * @thread EMT
5728 */
5729static int e1kRegWriteMTA(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5730{
5731 RT_NOREF_PV(pDevIns);
5732 AssertReturn(offset - g_aE1kRegMap[index].offset < sizeof(pThis->auMTA), VERR_DEV_IO_ERROR);
5733 pThis->auMTA[(offset - g_aE1kRegMap[index].offset) / sizeof(pThis->auMTA[0])] = value;
5734
5735 return VINF_SUCCESS;
5736}
5737
5738/**
5739 * Read handler for Multicast Table Array registers.
5740 *
5741 * @returns VBox status code.
5742 *
5743 * @param pThis The device state structure.
5744 * @param offset Register offset in memory-mapped frame.
5745 * @param index Register index in register array.
5746 * @thread EMT
5747 */
5748static int e1kRegReadMTA(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5749{
5750 RT_NOREF_PV(pDevIns);
5751 AssertReturn(offset - g_aE1kRegMap[index].offset < sizeof(pThis->auMTA), VERR_DEV_IO_ERROR);
5752 *pu32Value = pThis->auMTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auMTA[0])];
5753
5754 return VINF_SUCCESS;
5755}
5756
5757/**
5758 * Write handler for Receive Address registers.
5759 *
5760 * @param pThis The device state structure.
5761 * @param offset Register offset in memory-mapped frame.
5762 * @param index Register index in register array.
5763 * @param value The value to store.
5764 * @thread EMT
5765 */
5766static int e1kRegWriteRA(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5767{
5768 RT_NOREF_PV(pDevIns);
5769 AssertReturn(offset - g_aE1kRegMap[index].offset < sizeof(pThis->aRecAddr.au32), VERR_DEV_IO_ERROR);
5770 pThis->aRecAddr.au32[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->aRecAddr.au32[0])] = value;
5771
5772 return VINF_SUCCESS;
5773}
5774
5775/**
5776 * Read handler for Receive Address registers.
5777 *
5778 * @returns VBox status code.
5779 *
5780 * @param pThis The device state structure.
5781 * @param offset Register offset in memory-mapped frame.
5782 * @param index Register index in register array.
5783 * @thread EMT
5784 */
5785static int e1kRegReadRA(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5786{
5787 RT_NOREF_PV(pDevIns);
5788 AssertReturn(offset - g_aE1kRegMap[index].offset< sizeof(pThis->aRecAddr.au32), VERR_DEV_IO_ERROR);
5789 *pu32Value = pThis->aRecAddr.au32[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->aRecAddr.au32[0])];
5790
5791 return VINF_SUCCESS;
5792}
5793
5794/**
5795 * Write handler for VLAN Filter Table Array registers.
5796 *
5797 * @param pThis The device state structure.
5798 * @param offset Register offset in memory-mapped frame.
5799 * @param index Register index in register array.
5800 * @param value The value to store.
5801 * @thread EMT
5802 */
5803static int e1kRegWriteVFTA(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5804{
5805 RT_NOREF_PV(pDevIns);
5806 AssertReturn(offset - g_aE1kRegMap[index].offset < sizeof(pThis->auVFTA), VINF_SUCCESS);
5807 pThis->auVFTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auVFTA[0])] = value;
5808
5809 return VINF_SUCCESS;
5810}
5811
5812/**
5813 * Read handler for VLAN Filter Table Array registers.
5814 *
5815 * @returns VBox status code.
5816 *
5817 * @param pThis The device state structure.
5818 * @param offset Register offset in memory-mapped frame.
5819 * @param index Register index in register array.
5820 * @thread EMT
5821 */
5822static int e1kRegReadVFTA(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5823{
5824 RT_NOREF_PV(pDevIns);
5825 AssertReturn(offset - g_aE1kRegMap[index].offset< sizeof(pThis->auVFTA), VERR_DEV_IO_ERROR);
5826 *pu32Value = pThis->auVFTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auVFTA[0])];
5827
5828 return VINF_SUCCESS;
5829}
5830
5831/**
5832 * Read handler for unimplemented registers.
5833 *
5834 * Merely reports reads from unimplemented registers.
5835 *
5836 * @returns VBox status code.
5837 *
5838 * @param pThis The device state structure.
5839 * @param offset Register offset in memory-mapped frame.
5840 * @param index Register index in register array.
5841 * @thread EMT
5842 */
5843static int e1kRegReadUnimplemented(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5844{
5845 RT_NOREF(pDevIns, pThis, offset, index);
5846 E1kLog(("%s At %08X read (00000000) attempt from unimplemented register %s (%s)\n",
5847 pThis->szPrf, offset, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5848 *pu32Value = 0;
5849
5850 return VINF_SUCCESS;
5851}
5852
5853/**
5854 * Default register read handler with automatic clear operation.
5855 *
5856 * Retrieves the value of register from register array in device state structure.
5857 * Then resets all bits.
5858 *
5859 * @remarks The 'mask' parameter is simply ignored as masking and shifting is
5860 * done in the caller.
5861 *
5862 * @returns VBox status code.
5863 *
5864 * @param pThis The device state structure.
5865 * @param offset Register offset in memory-mapped frame.
5866 * @param index Register index in register array.
5867 * @thread EMT
5868 */
5869static int e1kRegReadAutoClear(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5870{
5871 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
5872 int rc = e1kRegReadDefault(pDevIns, pThis, offset, index, pu32Value);
5873 pThis->auRegs[index] = 0;
5874
5875 return rc;
5876}
5877
5878/**
5879 * Default register read handler.
5880 *
5881 * Retrieves the value of register from register array in device state structure.
5882 * Bits corresponding to 0s in 'readable' mask will always read as 0s.
5883 *
5884 * @remarks The 'mask' parameter is simply ignored as masking and shifting is
5885 * done in the caller.
5886 *
5887 * @returns VBox status code.
5888 *
5889 * @param pThis The device state structure.
5890 * @param offset Register offset in memory-mapped frame.
5891 * @param index Register index in register array.
5892 * @thread EMT
5893 */
5894static int e1kRegReadDefault(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5895{
5896 RT_NOREF_PV(pDevIns); RT_NOREF_PV(offset);
5897
5898 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
5899 *pu32Value = pThis->auRegs[index] & g_aE1kRegMap[index].readable;
5900
5901 return VINF_SUCCESS;
5902}
5903
5904/**
5905 * Write handler for unimplemented registers.
5906 *
5907 * Merely reports writes to unimplemented registers.
5908 *
5909 * @param pThis The device state structure.
5910 * @param offset Register offset in memory-mapped frame.
5911 * @param index Register index in register array.
5912 * @param value The value to store.
5913 * @thread EMT
5914 */
5915
5916 static int e1kRegWriteUnimplemented(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5917{
5918 RT_NOREF_PV(pDevIns); RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index); RT_NOREF_PV(value);
5919
5920 E1kLog(("%s At %08X write attempt (%08X) to unimplemented register %s (%s)\n",
5921 pThis->szPrf, offset, value, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5922
5923 return VINF_SUCCESS;
5924}
5925
5926/**
5927 * Default register write handler.
5928 *
5929 * Stores the value to the register array in device state structure. Only bits
5930 * corresponding to 1s both in 'writable' and 'mask' will be stored.
5931 *
5932 * @returns VBox status code.
5933 *
5934 * @param pThis The device state structure.
5935 * @param offset Register offset in memory-mapped frame.
5936 * @param index Register index in register array.
5937 * @param value The value to store.
5938 * @param mask Used to implement partial writes (8 and 16-bit).
5939 * @thread EMT
5940 */
5941
5942static int e1kRegWriteDefault(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5943{
5944 RT_NOREF(pDevIns, offset);
5945
5946 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
5947 pThis->auRegs[index] = (value & g_aE1kRegMap[index].writable)
5948 | (pThis->auRegs[index] & ~g_aE1kRegMap[index].writable);
5949
5950 return VINF_SUCCESS;
5951}
5952
5953/**
5954 * Search register table for matching register.
5955 *
5956 * @returns Index in the register table or -1 if not found.
5957 *
5958 * @param offReg Register offset in memory-mapped region.
5959 * @thread EMT
5960 */
5961static int e1kRegLookup(uint32_t offReg)
5962{
5963
5964#if 0
5965 int index;
5966
5967 for (index = 0; index < E1K_NUM_OF_REGS; index++)
5968 {
5969 if (g_aE1kRegMap[index].offset <= offReg && offReg < g_aE1kRegMap[index].offset + g_aE1kRegMap[index].size)
5970 {
5971 return index;
5972 }
5973 }
5974#else
5975 int iStart = 0;
5976 int iEnd = E1K_NUM_OF_BINARY_SEARCHABLE;
5977 for (;;)
5978 {
5979 int i = (iEnd - iStart) / 2 + iStart;
5980 uint32_t offCur = g_aE1kRegMap[i].offset;
5981 if (offReg < offCur)
5982 {
5983 if (i == iStart)
5984 break;
5985 iEnd = i;
5986 }
5987 else if (offReg >= offCur + g_aE1kRegMap[i].size)
5988 {
5989 i++;
5990 if (i == iEnd)
5991 break;
5992 iStart = i;
5993 }
5994 else
5995 return i;
5996 Assert(iEnd > iStart);
5997 }
5998
5999 for (unsigned i = E1K_NUM_OF_BINARY_SEARCHABLE; i < RT_ELEMENTS(g_aE1kRegMap); i++)
6000 if (offReg - g_aE1kRegMap[i].offset < g_aE1kRegMap[i].size)
6001 return (int)i;
6002
6003# ifdef VBOX_STRICT
6004 for (unsigned i = 0; i < RT_ELEMENTS(g_aE1kRegMap); i++)
6005 Assert(offReg - g_aE1kRegMap[i].offset >= g_aE1kRegMap[i].size);
6006# endif
6007
6008#endif
6009
6010 return -1;
6011}
6012
6013/**
6014 * Handle unaligned register read operation.
6015 *
6016 * Looks up and calls appropriate handler.
6017 *
6018 * @returns VBox status code.
6019 *
6020 * @param pDevIns The device instance.
6021 * @param pThis The device state structure.
6022 * @param offReg Register offset in memory-mapped frame.
6023 * @param pv Where to store the result.
6024 * @param cb Number of bytes to read.
6025 * @thread EMT
6026 * @remarks IOM takes care of unaligned and small reads via MMIO. For I/O port
6027 * accesses we have to take care of that ourselves.
6028 */
6029static int e1kRegReadUnaligned(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offReg, void *pv, uint32_t cb)
6030{
6031 uint32_t u32 = 0;
6032 uint32_t shift;
6033 int rc = VINF_SUCCESS;
6034 int index = e1kRegLookup(offReg);
6035#ifdef LOG_ENABLED
6036 char buf[9];
6037#endif
6038
6039 /*
6040 * From the spec:
6041 * For registers that should be accessed as 32-bit double words, partial writes (less than a 32-bit
6042 * double word) is ignored. Partial reads return all 32 bits of data regardless of the byte enables.
6043 */
6044
6045 /*
6046 * To be able to read bytes and short word we convert them to properly
6047 * shifted 32-bit words and masks. The idea is to keep register-specific
6048 * handlers simple. Most accesses will be 32-bit anyway.
6049 */
6050 uint32_t mask;
6051 switch (cb)
6052 {
6053 case 4: mask = 0xFFFFFFFF; break;
6054 case 2: mask = 0x0000FFFF; break;
6055 case 1: mask = 0x000000FF; break;
6056 default:
6057 return PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "unsupported op size: offset=%#10x cb=%#10x\n", offReg, cb);
6058 }
6059 if (index >= 0)
6060 {
6061 RT_UNTRUSTED_VALIDATED_FENCE(); /* paranoia because of port I/O. */
6062 if (g_aE1kRegMap[index].readable)
6063 {
6064 /* Make the mask correspond to the bits we are about to read. */
6065 shift = (offReg - g_aE1kRegMap[index].offset) % sizeof(uint32_t) * 8;
6066 mask <<= shift;
6067 if (!mask)
6068 return PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "Zero mask: offset=%#10x cb=%#10x\n", offReg, cb);
6069 /*
6070 * Read it. Pass the mask so the handler knows what has to be read.
6071 * Mask out irrelevant bits.
6072 */
6073 //rc = e1kCsEnter(pThis, VERR_SEM_BUSY, RT_SRC_POS);
6074 if (RT_UNLIKELY(rc != VINF_SUCCESS))
6075 return rc;
6076 //pThis->fDelayInts = false;
6077 //pThis->iStatIntLost += pThis->iStatIntLostOne;
6078 //pThis->iStatIntLostOne = 0;
6079 rc = g_aE1kRegMap[index].pfnRead(pDevIns, pThis, offReg & 0xFFFFFFFC, (uint32_t)index, &u32);
6080 u32 &= mask;
6081 //e1kCsLeave(pThis);
6082 E1kLog2(("%s At %08X read %s from %s (%s)\n",
6083 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf), g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
6084 Log6(("%s At %08X read %s from %s (%s) [UNALIGNED]\n",
6085 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf), g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
6086 /* Shift back the result. */
6087 u32 >>= shift;
6088 }
6089 else
6090 E1kLog(("%s At %08X read (%s) attempt from write-only register %s (%s)\n",
6091 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf), g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
6092 if (IOM_SUCCESS(rc))
6093 STAM_COUNTER_INC(&pThis->aStatRegReads[index]);
6094 }
6095 else
6096 E1kLog(("%s At %08X read (%s) attempt from non-existing register\n",
6097 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf)));
6098
6099 memcpy(pv, &u32, cb);
6100 return rc;
6101}
6102
6103/**
6104 * Handle 4 byte aligned and sized read operation.
6105 *
6106 * Looks up and calls appropriate handler.
6107 *
6108 * @returns VBox status code.
6109 *
6110 * @param pDevIns The device instance.
6111 * @param pThis The device state structure.
6112 * @param offReg Register offset in memory-mapped frame.
6113 * @param pu32 Where to store the result.
6114 * @thread EMT
6115 */
6116static VBOXSTRICTRC e1kRegReadAlignedU32(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offReg, uint32_t *pu32)
6117{
6118 Assert(!(offReg & 3));
6119
6120 /*
6121 * Lookup the register and check that it's readable.
6122 */
6123 VBOXSTRICTRC rc = VINF_SUCCESS;
6124 int idxReg = e1kRegLookup(offReg);
6125 if (RT_LIKELY(idxReg >= 0))
6126 {
6127 RT_UNTRUSTED_VALIDATED_FENCE(); /* paranoia because of port I/O. */
6128 if (RT_UNLIKELY(g_aE1kRegMap[idxReg].readable))
6129 {
6130 /*
6131 * Read it. Pass the mask so the handler knows what has to be read.
6132 * Mask out irrelevant bits.
6133 */
6134 //rc = e1kCsEnter(pThis, VERR_SEM_BUSY, RT_SRC_POS);
6135 //if (RT_UNLIKELY(rc != VINF_SUCCESS))
6136 // return rc;
6137 //pThis->fDelayInts = false;
6138 //pThis->iStatIntLost += pThis->iStatIntLostOne;
6139 //pThis->iStatIntLostOne = 0;
6140 rc = g_aE1kRegMap[idxReg].pfnRead(pDevIns, pThis, offReg & 0xFFFFFFFC, (uint32_t)idxReg, pu32);
6141 //e1kCsLeave(pThis);
6142 Log6(("%s At %08X read %08X from %s (%s)\n",
6143 pThis->szPrf, offReg, *pu32, g_aE1kRegMap[idxReg].abbrev, g_aE1kRegMap[idxReg].name));
6144 if (IOM_SUCCESS(rc))
6145 STAM_COUNTER_INC(&pThis->aStatRegReads[idxReg]);
6146 }
6147 else
6148 E1kLog(("%s At %08X read attempt from non-readable register %s (%s)\n",
6149 pThis->szPrf, offReg, g_aE1kRegMap[idxReg].abbrev, g_aE1kRegMap[idxReg].name));
6150 }
6151 else
6152 E1kLog(("%s At %08X read attempt from non-existing register\n", pThis->szPrf, offReg));
6153 return rc;
6154}
6155
6156/**
6157 * Handle 4 byte sized and aligned register write operation.
6158 *
6159 * Looks up and calls appropriate handler.
6160 *
6161 * @returns VBox status code.
6162 *
6163 * @param pDevIns The device instance.
6164 * @param pThis The device state structure.
6165 * @param offReg Register offset in memory-mapped frame.
6166 * @param u32Value The value to write.
6167 * @thread EMT
6168 */
6169static VBOXSTRICTRC e1kRegWriteAlignedU32(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offReg, uint32_t u32Value)
6170{
6171 VBOXSTRICTRC rc = VINF_SUCCESS;
6172 int index = e1kRegLookup(offReg);
6173 if (RT_LIKELY(index >= 0))
6174 {
6175 RT_UNTRUSTED_VALIDATED_FENCE(); /* paranoia because of port I/O. */
6176 if (RT_LIKELY(g_aE1kRegMap[index].writable))
6177 {
6178 /*
6179 * Write it. Pass the mask so the handler knows what has to be written.
6180 * Mask out irrelevant bits.
6181 */
6182 Log6(("%s At %08X write %08X to %s (%s)\n",
6183 pThis->szPrf, offReg, u32Value, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
6184 //rc = e1kCsEnter(pThis, VERR_SEM_BUSY, RT_SRC_POS);
6185 //if (RT_UNLIKELY(rc != VINF_SUCCESS))
6186 // return rc;
6187 //pThis->fDelayInts = false;
6188 //pThis->iStatIntLost += pThis->iStatIntLostOne;
6189 //pThis->iStatIntLostOne = 0;
6190 rc = g_aE1kRegMap[index].pfnWrite(pDevIns, pThis, offReg, (uint32_t)index, u32Value);
6191 //e1kCsLeave(pThis);
6192 }
6193 else
6194 E1kLog(("%s At %08X write attempt (%08X) to read-only register %s (%s)\n",
6195 pThis->szPrf, offReg, u32Value, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
6196 if (IOM_SUCCESS(rc))
6197 STAM_COUNTER_INC(&pThis->aStatRegWrites[index]);
6198 }
6199 else
6200 E1kLog(("%s At %08X write attempt (%08X) to non-existing register\n",
6201 pThis->szPrf, offReg, u32Value));
6202 return rc;
6203}
6204
6205
6206/* -=-=-=-=- MMIO and I/O Port Callbacks -=-=-=-=- */
6207
6208/**
6209 * @callback_method_impl{FNIOMMMIONEWREAD}
6210 */
6211static DECLCALLBACK(VBOXSTRICTRC) e1kMMIORead(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void *pv, uint32_t cb)
6212{
6213 RT_NOREF2(pvUser, cb);
6214 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
6215 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatMMIORead), a);
6216
6217 Assert(off < E1K_MM_SIZE);
6218 Assert(cb == 4);
6219 Assert(!(off & 3));
6220
6221 VBOXSTRICTRC rcStrict = e1kRegReadAlignedU32(pDevIns, pThis, (uint32_t)off, (uint32_t *)pv);
6222
6223 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatMMIORead), a);
6224 return rcStrict;
6225}
6226
6227/**
6228 * @callback_method_impl{FNIOMMMIONEWWRITE}
6229 */
6230static DECLCALLBACK(VBOXSTRICTRC) e1kMMIOWrite(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void const *pv, uint32_t cb)
6231{
6232 RT_NOREF2(pvUser, cb);
6233 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
6234 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatMMIOWrite), a);
6235
6236 Assert(off < E1K_MM_SIZE);
6237 Assert(cb == 4);
6238 Assert(!(off & 3));
6239
6240 VBOXSTRICTRC rcStrict = e1kRegWriteAlignedU32(pDevIns, pThis, (uint32_t)off, *(uint32_t const *)pv);
6241
6242 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatMMIOWrite), a);
6243 return rcStrict;
6244}
6245
6246/**
6247 * @callback_method_impl{FNIOMIOPORTNEWIN}
6248 */
6249static DECLCALLBACK(VBOXSTRICTRC) e1kIOPortIn(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT offPort, uint32_t *pu32, unsigned cb)
6250{
6251 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
6252 VBOXSTRICTRC rc;
6253 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatIORead), a);
6254 RT_NOREF_PV(pvUser);
6255
6256 if (RT_LIKELY(cb == 4))
6257 switch (offPort)
6258 {
6259 case 0x00: /* IOADDR */
6260 *pu32 = pThis->uSelectedReg;
6261 E1kLog2(("%s e1kIOPortIn: IOADDR(0), selecting register %#010x, val=%#010x\n", pThis->szPrf, pThis->uSelectedReg, *pu32));
6262 rc = VINF_SUCCESS;
6263 break;
6264
6265 case 0x04: /* IODATA */
6266 if (!(pThis->uSelectedReg & 3))
6267 rc = e1kRegReadAlignedU32(pDevIns, pThis, pThis->uSelectedReg, pu32);
6268 else /** @todo r=bird: I wouldn't be surprised if this unaligned branch wasn't necessary. */
6269 rc = e1kRegReadUnaligned(pDevIns, pThis, pThis->uSelectedReg, pu32, cb);
6270 if (rc == VINF_IOM_R3_MMIO_READ)
6271 rc = VINF_IOM_R3_IOPORT_READ;
6272 E1kLog2(("%s e1kIOPortIn: IODATA(4), reading from selected register %#010x, val=%#010x\n", pThis->szPrf, pThis->uSelectedReg, *pu32));
6273 break;
6274
6275 default:
6276 E1kLog(("%s e1kIOPortIn: invalid port %#010x\n", pThis->szPrf, offPort));
6277 /** @todo r=bird: Check what real hardware returns here. */
6278 //rc = VERR_IOM_IOPORT_UNUSED; /* Why not? */
6279 rc = VINF_IOM_MMIO_UNUSED_00; /* used to return VINF_SUCCESS and not touch *pu32, which amounted to this. */
6280 break;
6281 }
6282 else
6283 {
6284 E1kLog(("%s e1kIOPortIn: invalid op size: offPort=%RTiop cb=%08x", pThis->szPrf, offPort, cb));
6285 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "%s e1kIOPortIn: invalid op size: offPort=%RTiop cb=%08x\n", pThis->szPrf, offPort, cb);
6286 *pu32 = 0; /** @todo r=bird: Check what real hardware returns here. (Didn't used to set a value here, picked zero as that's what we'd end up in most cases.) */
6287 }
6288 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatIORead), a);
6289 return rc;
6290}
6291
6292
6293/**
6294 * @callback_method_impl{FNIOMIOPORTNEWOUT}
6295 */
6296static DECLCALLBACK(VBOXSTRICTRC) e1kIOPortOut(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT offPort, uint32_t u32, unsigned cb)
6297{
6298 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
6299 VBOXSTRICTRC rc;
6300 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatIOWrite), a);
6301 RT_NOREF_PV(pvUser);
6302
6303 E1kLog2(("%s e1kIOPortOut: offPort=%RTiop value=%08x\n", pThis->szPrf, offPort, u32));
6304 if (RT_LIKELY(cb == 4))
6305 {
6306 switch (offPort)
6307 {
6308 case 0x00: /* IOADDR */
6309 pThis->uSelectedReg = u32;
6310 E1kLog2(("%s e1kIOPortOut: IOADDR(0), selected register %08x\n", pThis->szPrf, pThis->uSelectedReg));
6311 rc = VINF_SUCCESS;
6312 break;
6313
6314 case 0x04: /* IODATA */
6315 E1kLog2(("%s e1kIOPortOut: IODATA(4), writing to selected register %#010x, value=%#010x\n", pThis->szPrf, pThis->uSelectedReg, u32));
6316 if (RT_LIKELY(!(pThis->uSelectedReg & 3)))
6317 {
6318 rc = e1kRegWriteAlignedU32(pDevIns, pThis, pThis->uSelectedReg, u32);
6319 if (rc == VINF_IOM_R3_MMIO_WRITE)
6320 rc = VINF_IOM_R3_IOPORT_WRITE;
6321 }
6322 else
6323 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS,
6324 "Spec violation: misaligned offset: %#10x, ignored.\n", pThis->uSelectedReg);
6325 break;
6326
6327 default:
6328 E1kLog(("%s e1kIOPortOut: invalid port %#010x\n", pThis->szPrf, offPort));
6329 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "invalid port %#010x\n", offPort);
6330 }
6331 }
6332 else
6333 {
6334 E1kLog(("%s e1kIOPortOut: invalid op size: offPort=%RTiop cb=%08x\n", pThis->szPrf, offPort, cb));
6335 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "%s: invalid op size: offPort=%RTiop cb=%#x\n", pThis->szPrf, offPort, cb);
6336 }
6337
6338 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatIOWrite), a);
6339 return rc;
6340}
6341
6342#ifdef IN_RING3
6343
6344/**
6345 * Dump complete device state to log.
6346 *
6347 * @param pThis Pointer to device state.
6348 */
6349static void e1kDumpState(PE1KSTATE pThis)
6350{
6351 RT_NOREF(pThis);
6352 for (int i = 0; i < E1K_NUM_OF_32BIT_REGS; ++i)
6353 E1kLog2(("%s: %8.8s = %08x\n", pThis->szPrf, g_aE1kRegMap[i].abbrev, pThis->auRegs[i]));
6354# ifdef E1K_INT_STATS
6355 LogRel(("%s: Interrupt attempts: %d\n", pThis->szPrf, pThis->uStatIntTry));
6356 LogRel(("%s: Interrupts raised : %d\n", pThis->szPrf, pThis->uStatInt));
6357 LogRel(("%s: Interrupts lowered: %d\n", pThis->szPrf, pThis->uStatIntLower));
6358 LogRel(("%s: ICR outside ISR : %d\n", pThis->szPrf, pThis->uStatNoIntICR));
6359 LogRel(("%s: IMS raised ints : %d\n", pThis->szPrf, pThis->uStatIntIMS));
6360 LogRel(("%s: Interrupts skipped: %d\n", pThis->szPrf, pThis->uStatIntSkip));
6361 LogRel(("%s: Masked interrupts : %d\n", pThis->szPrf, pThis->uStatIntMasked));
6362 LogRel(("%s: Early interrupts : %d\n", pThis->szPrf, pThis->uStatIntEarly));
6363 LogRel(("%s: Late interrupts : %d\n", pThis->szPrf, pThis->uStatIntLate));
6364 LogRel(("%s: Lost interrupts : %d\n", pThis->szPrf, pThis->iStatIntLost));
6365 LogRel(("%s: Interrupts by RX : %d\n", pThis->szPrf, pThis->uStatIntRx));
6366 LogRel(("%s: Interrupts by TX : %d\n", pThis->szPrf, pThis->uStatIntTx));
6367 LogRel(("%s: Interrupts by ICS : %d\n", pThis->szPrf, pThis->uStatIntICS));
6368 LogRel(("%s: Interrupts by RDTR: %d\n", pThis->szPrf, pThis->uStatIntRDTR));
6369 LogRel(("%s: Interrupts by RDMT: %d\n", pThis->szPrf, pThis->uStatIntRXDMT0));
6370 LogRel(("%s: Interrupts by TXQE: %d\n", pThis->szPrf, pThis->uStatIntTXQE));
6371 LogRel(("%s: TX int delay asked: %d\n", pThis->szPrf, pThis->uStatTxIDE));
6372 LogRel(("%s: TX delayed: %d\n", pThis->szPrf, pThis->uStatTxDelayed));
6373 LogRel(("%s: TX delay expired: %d\n", pThis->szPrf, pThis->uStatTxDelayExp));
6374 LogRel(("%s: TX no report asked: %d\n", pThis->szPrf, pThis->uStatTxNoRS));
6375 LogRel(("%s: TX abs timer expd : %d\n", pThis->szPrf, pThis->uStatTAD));
6376 LogRel(("%s: TX int timer expd : %d\n", pThis->szPrf, pThis->uStatTID));
6377 LogRel(("%s: RX abs timer expd : %d\n", pThis->szPrf, pThis->uStatRAD));
6378 LogRel(("%s: RX int timer expd : %d\n", pThis->szPrf, pThis->uStatRID));
6379 LogRel(("%s: TX CTX descriptors: %d\n", pThis->szPrf, pThis->uStatDescCtx));
6380 LogRel(("%s: TX DAT descriptors: %d\n", pThis->szPrf, pThis->uStatDescDat));
6381 LogRel(("%s: TX LEG descriptors: %d\n", pThis->szPrf, pThis->uStatDescLeg));
6382 LogRel(("%s: Received frames : %d\n", pThis->szPrf, pThis->uStatRxFrm));
6383 LogRel(("%s: Transmitted frames: %d\n", pThis->szPrf, pThis->uStatTxFrm));
6384 LogRel(("%s: TX frames up to 1514: %d\n", pThis->szPrf, pThis->uStatTx1514));
6385 LogRel(("%s: TX frames up to 2962: %d\n", pThis->szPrf, pThis->uStatTx2962));
6386 LogRel(("%s: TX frames up to 4410: %d\n", pThis->szPrf, pThis->uStatTx4410));
6387 LogRel(("%s: TX frames up to 5858: %d\n", pThis->szPrf, pThis->uStatTx5858));
6388 LogRel(("%s: TX frames up to 7306: %d\n", pThis->szPrf, pThis->uStatTx7306));
6389 LogRel(("%s: TX frames up to 8754: %d\n", pThis->szPrf, pThis->uStatTx8754));
6390 LogRel(("%s: TX frames up to 16384: %d\n", pThis->szPrf, pThis->uStatTx16384));
6391 LogRel(("%s: TX frames up to 32768: %d\n", pThis->szPrf, pThis->uStatTx32768));
6392 LogRel(("%s: Larger TX frames : %d\n", pThis->szPrf, pThis->uStatTxLarge));
6393 LogRel(("%s: Max TX Delay : %lld\n", pThis->szPrf, pThis->uStatMaxTxDelay));
6394# endif /* E1K_INT_STATS */
6395}
6396
6397
6398/* -=-=-=-=- PDMINETWORKDOWN -=-=-=-=- */
6399
6400/**
6401 * Check if the device can receive data now.
6402 * This must be called before the pfnRecieve() method is called.
6403 *
6404 * @returns Number of bytes the device can receive.
6405 * @param pDevIns The device instance.
6406 * @param pThis The instance data.
6407 * @thread EMT
6408 */
6409static int e1kCanReceive(PPDMDEVINS pDevIns, PE1KSTATE pThis)
6410{
6411#ifndef E1K_WITH_RXD_CACHE
6412 size_t cb;
6413
6414 if (RT_UNLIKELY(e1kCsRxEnter(pThis, VERR_SEM_BUSY) != VINF_SUCCESS))
6415 return VERR_NET_NO_BUFFER_SPACE;
6416
6417 if (RT_UNLIKELY(RDLEN == sizeof(E1KRXDESC)))
6418 {
6419 E1KRXDESC desc;
6420 PDMDevHlpPCIPhysRead(pDevIns, e1kDescAddr(RDBAH, RDBAL, RDH), &desc, sizeof(desc));
6421 if (desc.status.fDD)
6422 cb = 0;
6423 else
6424 cb = pThis->u16RxBSize;
6425 }
6426 else if (RDH < RDT)
6427 cb = (RDT - RDH) * pThis->u16RxBSize;
6428 else if (RDH > RDT)
6429 cb = (RDLEN/sizeof(E1KRXDESC) - RDH + RDT) * pThis->u16RxBSize;
6430 else
6431 {
6432 cb = 0;
6433 E1kLogRel(("E1000: OUT of RX descriptors!\n"));
6434 }
6435 E1kLog2(("%s e1kCanReceive: at exit RDH=%d RDT=%d RDLEN=%d u16RxBSize=%d cb=%lu\n",
6436 pThis->szPrf, RDH, RDT, RDLEN, pThis->u16RxBSize, cb));
6437
6438 e1kCsRxLeave(pThis);
6439 return cb > 0 ? VINF_SUCCESS : VERR_NET_NO_BUFFER_SPACE;
6440#else /* E1K_WITH_RXD_CACHE */
6441 int rc = VINF_SUCCESS;
6442
6443 if (RT_UNLIKELY(e1kCsRxEnter(pThis, VERR_SEM_BUSY) != VINF_SUCCESS))
6444 return VERR_NET_NO_BUFFER_SPACE;
6445
6446 if (RT_UNLIKELY(RDLEN == sizeof(E1KRXDESC)))
6447 {
6448 E1KRXDESC desc;
6449 PDMDevHlpPCIPhysRead(pDevIns, e1kDescAddr(RDBAH, RDBAL, RDH), &desc, sizeof(desc));
6450 if (desc.status.fDD)
6451 rc = VERR_NET_NO_BUFFER_SPACE;
6452 }
6453 else if (e1kRxDIsCacheEmpty(pThis) && RDH == RDT)
6454 {
6455 /* Cache is empty, so is the RX ring. */
6456 rc = VERR_NET_NO_BUFFER_SPACE;
6457 }
6458 E1kLog2(("%s e1kCanReceive: at exit in_cache=%d RDH=%d RDT=%d RDLEN=%d"
6459 " u16RxBSize=%d rc=%Rrc\n", pThis->szPrf,
6460 e1kRxDInCache(pThis), RDH, RDT, RDLEN, pThis->u16RxBSize, rc));
6461
6462 e1kCsRxLeave(pThis);
6463 return rc;
6464#endif /* E1K_WITH_RXD_CACHE */
6465}
6466
6467/**
6468 * @interface_method_impl{PDMINETWORKDOWN,pfnWaitReceiveAvail}
6469 */
6470static DECLCALLBACK(int) e1kR3NetworkDown_WaitReceiveAvail(PPDMINETWORKDOWN pInterface, RTMSINTERVAL cMillies)
6471{
6472 PE1KSTATECC pThisCC = RT_FROM_MEMBER(pInterface, E1KSTATECC, INetworkDown);
6473 PE1KSTATE pThis = pThisCC->pShared;
6474 PPDMDEVINS pDevIns = pThisCC->pDevInsR3;
6475
6476 int rc = e1kCanReceive(pDevIns, pThis);
6477
6478 if (RT_SUCCESS(rc))
6479 return VINF_SUCCESS;
6480 if (RT_UNLIKELY(cMillies == 0))
6481 return VERR_NET_NO_BUFFER_SPACE;
6482
6483 rc = VERR_INTERRUPTED;
6484 ASMAtomicXchgBool(&pThis->fMaybeOutOfSpace, true);
6485 STAM_PROFILE_START(&pThis->StatRxOverflow, a);
6486 VMSTATE enmVMState;
6487 while (RT_LIKELY( (enmVMState = PDMDevHlpVMState(pDevIns)) == VMSTATE_RUNNING
6488 || enmVMState == VMSTATE_RUNNING_LS))
6489 {
6490 int rc2 = e1kCanReceive(pDevIns, pThis);
6491 if (RT_SUCCESS(rc2))
6492 {
6493 rc = VINF_SUCCESS;
6494 break;
6495 }
6496 E1kLogRel(("E1000: e1kR3NetworkDown_WaitReceiveAvail: waiting cMillies=%u...\n", cMillies));
6497 E1kLog(("%s: e1kR3NetworkDown_WaitReceiveAvail: waiting cMillies=%u...\n", pThis->szPrf, cMillies));
6498 PDMDevHlpSUPSemEventWaitNoResume(pDevIns, pThis->hEventMoreRxDescAvail, cMillies);
6499 }
6500 STAM_PROFILE_STOP(&pThis->StatRxOverflow, a);
6501 ASMAtomicXchgBool(&pThis->fMaybeOutOfSpace, false);
6502
6503 return rc;
6504}
6505
6506
6507/**
6508 * Matches the packet addresses against Receive Address table. Looks for
6509 * exact matches only.
6510 *
6511 * @returns true if address matches.
6512 * @param pThis Pointer to the state structure.
6513 * @param pvBuf The ethernet packet.
6514 * @param cb Number of bytes available in the packet.
6515 * @thread EMT
6516 */
6517static bool e1kPerfectMatch(PE1KSTATE pThis, const void *pvBuf)
6518{
6519 for (unsigned i = 0; i < RT_ELEMENTS(pThis->aRecAddr.array); i++)
6520 {
6521 E1KRAELEM* ra = pThis->aRecAddr.array + i;
6522
6523 /* Valid address? */
6524 if (ra->ctl & RA_CTL_AV)
6525 {
6526 Assert((ra->ctl & RA_CTL_AS) < 2);
6527 //unsigned char *pAddr = (unsigned char*)pvBuf + sizeof(ra->addr)*(ra->ctl & RA_CTL_AS);
6528 //E1kLog3(("%s Matching %02x:%02x:%02x:%02x:%02x:%02x against %02x:%02x:%02x:%02x:%02x:%02x...\n",
6529 // pThis->szPrf, pAddr[0], pAddr[1], pAddr[2], pAddr[3], pAddr[4], pAddr[5],
6530 // ra->addr[0], ra->addr[1], ra->addr[2], ra->addr[3], ra->addr[4], ra->addr[5]));
6531 /*
6532 * Address Select:
6533 * 00b = Destination address
6534 * 01b = Source address
6535 * 10b = Reserved
6536 * 11b = Reserved
6537 * Since ethernet header is (DA, SA, len) we can use address
6538 * select as index.
6539 */
6540 if (memcmp((char*)pvBuf + sizeof(ra->addr)*(ra->ctl & RA_CTL_AS),
6541 ra->addr, sizeof(ra->addr)) == 0)
6542 return true;
6543 }
6544 }
6545
6546 return false;
6547}
6548
6549/**
6550 * Matches the packet addresses against Multicast Table Array.
6551 *
6552 * @remarks This is imperfect match since it matches not exact address but
6553 * a subset of addresses.
6554 *
6555 * @returns true if address matches.
6556 * @param pThis Pointer to the state structure.
6557 * @param pvBuf The ethernet packet.
6558 * @param cb Number of bytes available in the packet.
6559 * @thread EMT
6560 */
6561static bool e1kImperfectMatch(PE1KSTATE pThis, const void *pvBuf)
6562{
6563 /* Get bits 32..47 of destination address */
6564 uint16_t u16Bit = ((uint16_t*)pvBuf)[2];
6565
6566 unsigned offset = GET_BITS(RCTL, MO);
6567 /*
6568 * offset means:
6569 * 00b = bits 36..47
6570 * 01b = bits 35..46
6571 * 10b = bits 34..45
6572 * 11b = bits 32..43
6573 */
6574 if (offset < 3)
6575 u16Bit = u16Bit >> (4 - offset);
6576 return ASMBitTest(pThis->auMTA, u16Bit & 0xFFF);
6577}
6578
6579/**
6580 * Determines if the packet is to be delivered to upper layer.
6581 *
6582 * The following filters supported:
6583 * - Exact Unicast/Multicast
6584 * - Promiscuous Unicast/Multicast
6585 * - Multicast
6586 * - VLAN
6587 *
6588 * @returns true if packet is intended for this node.
6589 * @param pThis Pointer to the state structure.
6590 * @param pvBuf The ethernet packet.
6591 * @param cb Number of bytes available in the packet.
6592 * @param pStatus Bit field to store status bits.
6593 * @thread EMT
6594 */
6595static bool e1kAddressFilter(PE1KSTATE pThis, const void *pvBuf, size_t cb, E1KRXDST *pStatus)
6596{
6597 Assert(cb > 14);
6598 /* Assume that we fail to pass exact filter. */
6599 pStatus->fPIF = false;
6600 pStatus->fVP = false;
6601 /* Discard oversized packets */
6602 if (cb > E1K_MAX_RX_PKT_SIZE)
6603 {
6604 E1kLog(("%s ERROR: Incoming packet is too big, cb=%d > max=%d\n",
6605 pThis->szPrf, cb, E1K_MAX_RX_PKT_SIZE));
6606 E1K_INC_CNT32(ROC);
6607 return false;
6608 }
6609 else if (!(RCTL & RCTL_LPE) && cb > 1522)
6610 {
6611 /* When long packet reception is disabled packets over 1522 are discarded */
6612 E1kLog(("%s Discarding incoming packet (LPE=0), cb=%d\n",
6613 pThis->szPrf, cb));
6614 E1K_INC_CNT32(ROC);
6615 return false;
6616 }
6617
6618 uint16_t *u16Ptr = (uint16_t*)pvBuf;
6619 /* Compare TPID with VLAN Ether Type */
6620 if (RT_BE2H_U16(u16Ptr[6]) == VET)
6621 {
6622 pStatus->fVP = true;
6623 /* Is VLAN filtering enabled? */
6624 if (RCTL & RCTL_VFE)
6625 {
6626 /* It is 802.1q packet indeed, let's filter by VID */
6627 if (RCTL & RCTL_CFIEN)
6628 {
6629 E1kLog3(("%s VLAN filter: VLAN=%d CFI=%d RCTL_CFI=%d\n", pThis->szPrf,
6630 E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7])),
6631 E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])),
6632 !!(RCTL & RCTL_CFI)));
6633 if (E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])) != !!(RCTL & RCTL_CFI))
6634 {
6635 E1kLog2(("%s Packet filter: CFIs do not match in packet and RCTL (%d!=%d)\n",
6636 pThis->szPrf, E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])), !!(RCTL & RCTL_CFI)));
6637 return false;
6638 }
6639 }
6640 else
6641 E1kLog3(("%s VLAN filter: VLAN=%d\n", pThis->szPrf,
6642 E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))));
6643 if (!ASMBitTest(pThis->auVFTA, E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))))
6644 {
6645 E1kLog2(("%s Packet filter: no VLAN match (id=%d)\n",
6646 pThis->szPrf, E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))));
6647 return false;
6648 }
6649 }
6650 }
6651 /* Broadcast filtering */
6652 if (e1kIsBroadcast(pvBuf) && (RCTL & RCTL_BAM))
6653 return true;
6654 E1kLog2(("%s Packet filter: not a broadcast\n", pThis->szPrf));
6655 if (e1kIsMulticast(pvBuf))
6656 {
6657 /* Is multicast promiscuous enabled? */
6658 if (RCTL & RCTL_MPE)
6659 return true;
6660 E1kLog2(("%s Packet filter: no promiscuous multicast\n", pThis->szPrf));
6661 /* Try perfect matches first */
6662 if (e1kPerfectMatch(pThis, pvBuf))
6663 {
6664 pStatus->fPIF = true;
6665 return true;
6666 }
6667 E1kLog2(("%s Packet filter: no perfect match\n", pThis->szPrf));
6668 if (e1kImperfectMatch(pThis, pvBuf))
6669 return true;
6670 E1kLog2(("%s Packet filter: no imperfect match\n", pThis->szPrf));
6671 }
6672 else {
6673 /* Is unicast promiscuous enabled? */
6674 if (RCTL & RCTL_UPE)
6675 return true;
6676 E1kLog2(("%s Packet filter: no promiscuous unicast\n", pThis->szPrf));
6677 if (e1kPerfectMatch(pThis, pvBuf))
6678 {
6679 pStatus->fPIF = true;
6680 return true;
6681 }
6682 E1kLog2(("%s Packet filter: no perfect match\n", pThis->szPrf));
6683 }
6684 E1kLog2(("%s Packet filter: packet discarded\n", pThis->szPrf));
6685 return false;
6686}
6687
6688/**
6689 * @interface_method_impl{PDMINETWORKDOWN,pfnReceive}
6690 */
6691static DECLCALLBACK(int) e1kR3NetworkDown_Receive(PPDMINETWORKDOWN pInterface, const void *pvBuf, size_t cb)
6692{
6693 PE1KSTATECC pThisCC = RT_FROM_MEMBER(pInterface, E1KSTATECC, INetworkDown);
6694 PE1KSTATE pThis = pThisCC->pShared;
6695 PPDMDEVINS pDevIns = pThisCC->pDevInsR3;
6696 int rc = VINF_SUCCESS;
6697
6698 /*
6699 * Drop packets if the VM is not running yet/anymore.
6700 */
6701 VMSTATE enmVMState = PDMDevHlpVMState(pDevIns);
6702 if ( enmVMState != VMSTATE_RUNNING
6703 && enmVMState != VMSTATE_RUNNING_LS)
6704 {
6705 E1kLog(("%s Dropping incoming packet as VM is not running.\n", pThis->szPrf));
6706 return VINF_SUCCESS;
6707 }
6708
6709 /* Discard incoming packets in locked state */
6710 if (!(RCTL & RCTL_EN) || pThis->fLocked || !(STATUS & STATUS_LU))
6711 {
6712 E1kLog(("%s Dropping incoming packet as receive operation is disabled.\n", pThis->szPrf));
6713 return VINF_SUCCESS;
6714 }
6715
6716 STAM_PROFILE_ADV_START(&pThis->StatReceive, a);
6717
6718 //if (!e1kCsEnter(pThis, RT_SRC_POS))
6719 // return VERR_PERMISSION_DENIED;
6720
6721 e1kPacketDump(pDevIns, pThis, (const uint8_t*)pvBuf, cb, "<-- Incoming");
6722
6723 /* Update stats */
6724 if (RT_LIKELY(e1kCsEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
6725 {
6726 E1K_INC_CNT32(TPR);
6727 E1K_ADD_CNT64(TORL, TORH, cb < 64? 64 : cb);
6728 e1kCsLeave(pThis);
6729 }
6730 STAM_PROFILE_ADV_START(&pThis->StatReceiveFilter, a);
6731 E1KRXDST status;
6732 RT_ZERO(status);
6733 bool fPassed = e1kAddressFilter(pThis, pvBuf, cb, &status);
6734 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveFilter, a);
6735 if (fPassed)
6736 {
6737 rc = e1kHandleRxPacket(pDevIns, pThis, pvBuf, cb, status);
6738 }
6739 //e1kCsLeave(pThis);
6740 STAM_PROFILE_ADV_STOP(&pThis->StatReceive, a);
6741
6742 return rc;
6743}
6744
6745
6746/* -=-=-=-=- PDMILEDPORTS -=-=-=-=- */
6747
6748/**
6749 * @interface_method_impl{PDMILEDPORTS,pfnQueryStatusLed}
6750 */
6751static DECLCALLBACK(int) e1kR3QueryStatusLed(PPDMILEDPORTS pInterface, unsigned iLUN, PPDMLED *ppLed)
6752{
6753 if (iLUN == 0)
6754 {
6755 PE1KSTATECC pThisCC = RT_FROM_MEMBER(pInterface, E1KSTATECC, ILeds);
6756 *ppLed = &pThisCC->pShared->led;
6757 return VINF_SUCCESS;
6758 }
6759 return VERR_PDM_LUN_NOT_FOUND;
6760}
6761
6762
6763/* -=-=-=-=- PDMINETWORKCONFIG -=-=-=-=- */
6764
6765/**
6766 * @interface_method_impl{PDMINETWORKCONFIG,pfnGetMac}
6767 */
6768static DECLCALLBACK(int) e1kR3GetMac(PPDMINETWORKCONFIG pInterface, PRTMAC pMac)
6769{
6770 PE1KSTATECC pThisCC = RT_FROM_MEMBER(pInterface, E1KSTATECC, INetworkConfig);
6771 pThisCC->eeprom.getMac(pMac);
6772 return VINF_SUCCESS;
6773}
6774
6775/**
6776 * @interface_method_impl{PDMINETWORKCONFIG,pfnGetLinkState}
6777 */
6778static DECLCALLBACK(PDMNETWORKLINKSTATE) e1kR3GetLinkState(PPDMINETWORKCONFIG pInterface)
6779{
6780 PE1KSTATECC pThisCC = RT_FROM_MEMBER(pInterface, E1KSTATECC, INetworkConfig);
6781 PE1KSTATE pThis = pThisCC->pShared;
6782 if (STATUS & STATUS_LU)
6783 return PDMNETWORKLINKSTATE_UP;
6784 return PDMNETWORKLINKSTATE_DOWN;
6785}
6786
6787/**
6788 * @interface_method_impl{PDMINETWORKCONFIG,pfnSetLinkState}
6789 */
6790static DECLCALLBACK(int) e1kR3SetLinkState(PPDMINETWORKCONFIG pInterface, PDMNETWORKLINKSTATE enmState)
6791{
6792 PE1KSTATECC pThisCC = RT_FROM_MEMBER(pInterface, E1KSTATECC, INetworkConfig);
6793 PE1KSTATE pThis = pThisCC->pShared;
6794 PPDMDEVINS pDevIns = pThisCC->pDevInsR3;
6795
6796 E1kLog(("%s e1kR3SetLinkState: enmState=%d\n", pThis->szPrf, enmState));
6797 switch (enmState)
6798 {
6799 case PDMNETWORKLINKSTATE_UP:
6800 pThis->fCableConnected = true;
6801 /* If link was down, bring it up after a while. */
6802 if (!(STATUS & STATUS_LU))
6803 e1kBringLinkUpDelayed(pDevIns, pThis);
6804 break;
6805 case PDMNETWORKLINKSTATE_DOWN:
6806 pThis->fCableConnected = false;
6807 /* Always set the phy link state to down, regardless of the STATUS_LU bit.
6808 * We might have to set the link state before the driver initializes us. */
6809 Phy::setLinkStatus(&pThis->phy, false);
6810 /* If link was up, bring it down. */
6811 if (STATUS & STATUS_LU)
6812 e1kR3LinkDown(pDevIns, pThis, pThisCC);
6813 break;
6814 case PDMNETWORKLINKSTATE_DOWN_RESUME:
6815 /*
6816 * There is not much sense in bringing down the link if it has not come up yet.
6817 * If it is up though, we bring it down temporarely, then bring it up again.
6818 */
6819 if (STATUS & STATUS_LU)
6820 e1kR3LinkDownTemp(pDevIns, pThis, pThisCC);
6821 break;
6822 default:
6823 ;
6824 }
6825 return VINF_SUCCESS;
6826}
6827
6828
6829/* -=-=-=-=- PDMIBASE -=-=-=-=- */
6830
6831/**
6832 * @interface_method_impl{PDMIBASE,pfnQueryInterface}
6833 */
6834static DECLCALLBACK(void *) e1kR3QueryInterface(struct PDMIBASE *pInterface, const char *pszIID)
6835{
6836 PE1KSTATECC pThisCC = RT_FROM_MEMBER(pInterface, E1KSTATECC, IBase);
6837 Assert(&pThisCC->IBase == pInterface);
6838
6839 PDMIBASE_RETURN_INTERFACE(pszIID, PDMIBASE, &pThisCC->IBase);
6840 PDMIBASE_RETURN_INTERFACE(pszIID, PDMINETWORKDOWN, &pThisCC->INetworkDown);
6841 PDMIBASE_RETURN_INTERFACE(pszIID, PDMINETWORKCONFIG, &pThisCC->INetworkConfig);
6842 PDMIBASE_RETURN_INTERFACE(pszIID, PDMILEDPORTS, &pThisCC->ILeds);
6843 return NULL;
6844}
6845
6846
6847/* -=-=-=-=- Saved State -=-=-=-=- */
6848
6849/**
6850 * Saves the configuration.
6851 *
6852 * @param pThis The E1K state.
6853 * @param pSSM The handle to the saved state.
6854 */
6855static void e1kSaveConfig(PCPDMDEVHLPR3 pHlp, PE1KSTATE pThis, PSSMHANDLE pSSM)
6856{
6857 pHlp->pfnSSMPutMem(pSSM, &pThis->macConfigured, sizeof(pThis->macConfigured));
6858 pHlp->pfnSSMPutU32(pSSM, pThis->eChip);
6859}
6860
6861/**
6862 * @callback_method_impl{FNSSMDEVLIVEEXEC,Save basic configuration.}
6863 */
6864static DECLCALLBACK(int) e1kLiveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uPass)
6865{
6866 RT_NOREF(uPass);
6867 e1kSaveConfig(pDevIns->pHlpR3, PDMDEVINS_2_DATA(pDevIns, PE1KSTATE), pSSM);
6868 return VINF_SSM_DONT_CALL_AGAIN;
6869}
6870
6871/**
6872 * @callback_method_impl{FNSSMDEVSAVEPREP,Synchronize.}
6873 */
6874static DECLCALLBACK(int) e1kSavePrep(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6875{
6876 RT_NOREF(pSSM);
6877 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
6878
6879 int rc = e1kCsEnter(pThis, VERR_SEM_BUSY);
6880 if (RT_UNLIKELY(rc != VINF_SUCCESS))
6881 return rc;
6882 e1kCsLeave(pThis);
6883 return VINF_SUCCESS;
6884#if 0
6885 /* 1) Prevent all threads from modifying the state and memory */
6886 //pThis->fLocked = true;
6887 /* 2) Cancel all timers */
6888#ifdef E1K_TX_DELAY
6889 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTXDTimer));
6890#endif /* E1K_TX_DELAY */
6891//#ifdef E1K_USE_TX_TIMERS
6892 if (pThis->fTidEnabled)
6893 {
6894 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTIDTimer));
6895#ifndef E1K_NO_TAD
6896 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTADTimer));
6897#endif /* E1K_NO_TAD */
6898 }
6899//#endif /* E1K_USE_TX_TIMERS */
6900#ifdef E1K_USE_RX_TIMERS
6901 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRIDTimer));
6902 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRADTimer));
6903#endif /* E1K_USE_RX_TIMERS */
6904 e1kCancelTimer(pThis, pThis->CTX_SUFF(pIntTimer));
6905 /* 3) Did I forget anything? */
6906 E1kLog(("%s Locked\n", pThis->szPrf));
6907 return VINF_SUCCESS;
6908#endif
6909}
6910
6911/**
6912 * @callback_method_impl{FNSSMDEVSAVEEXEC}
6913 */
6914static DECLCALLBACK(int) e1kSaveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6915{
6916 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
6917 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
6918 PCPDMDEVHLPR3 pHlp = pDevIns->pHlpR3;
6919
6920 e1kSaveConfig(pHlp, pThis, pSSM);
6921 pThisCC->eeprom.save(pHlp, pSSM);
6922 e1kDumpState(pThis);
6923 pHlp->pfnSSMPutMem(pSSM, pThis->auRegs, sizeof(pThis->auRegs));
6924 pHlp->pfnSSMPutBool(pSSM, pThis->fIntRaised);
6925 Phy::saveState(pHlp, pSSM, &pThis->phy);
6926 pHlp->pfnSSMPutU32(pSSM, pThis->uSelectedReg);
6927 pHlp->pfnSSMPutMem(pSSM, pThis->auMTA, sizeof(pThis->auMTA));
6928 pHlp->pfnSSMPutMem(pSSM, &pThis->aRecAddr, sizeof(pThis->aRecAddr));
6929 pHlp->pfnSSMPutMem(pSSM, pThis->auVFTA, sizeof(pThis->auVFTA));
6930 pHlp->pfnSSMPutU64(pSSM, pThis->u64AckedAt);
6931 pHlp->pfnSSMPutU16(pSSM, pThis->u16RxBSize);
6932 //pHlp->pfnSSMPutBool(pSSM, pThis->fDelayInts);
6933 //pHlp->pfnSSMPutBool(pSSM, pThis->fIntMaskUsed);
6934 pHlp->pfnSSMPutU16(pSSM, pThis->u16TxPktLen);
6935/** @todo State wrt to the TSE buffer is incomplete, so little point in
6936 * saving this actually. */
6937 pHlp->pfnSSMPutMem(pSSM, pThis->aTxPacketFallback, pThis->u16TxPktLen);
6938 pHlp->pfnSSMPutBool(pSSM, pThis->fIPcsum);
6939 pHlp->pfnSSMPutBool(pSSM, pThis->fTCPcsum);
6940 pHlp->pfnSSMPutMem(pSSM, &pThis->contextTSE, sizeof(pThis->contextTSE));
6941 pHlp->pfnSSMPutMem(pSSM, &pThis->contextNormal, sizeof(pThis->contextNormal));
6942 pHlp->pfnSSMPutBool(pSSM, pThis->fVTag);
6943 pHlp->pfnSSMPutU16(pSSM, pThis->u16VTagTCI);
6944#ifdef E1K_WITH_TXD_CACHE
6945# if 0
6946 pHlp->pfnSSMPutU8(pSSM, pThis->nTxDFetched);
6947 pHlp->pfnSSMPutMem(pSSM, pThis->aTxDescriptors,
6948 pThis->nTxDFetched * sizeof(pThis->aTxDescriptors[0]));
6949# else
6950 /*
6951 * There is no point in storing TX descriptor cache entries as we can simply
6952 * fetch them again. Moreover, normally the cache is always empty when we
6953 * save the state. Store zero entries for compatibility.
6954 */
6955 pHlp->pfnSSMPutU8(pSSM, 0);
6956# endif
6957#endif /* E1K_WITH_TXD_CACHE */
6958/** @todo GSO requires some more state here. */
6959 E1kLog(("%s State has been saved\n", pThis->szPrf));
6960 return VINF_SUCCESS;
6961}
6962
6963#if 0
6964/**
6965 * @callback_method_impl{FNSSMDEVSAVEDONE}
6966 */
6967static DECLCALLBACK(int) e1kSaveDone(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6968{
6969 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
6970
6971 /* If VM is being powered off unlocking will result in assertions in PGM */
6972 if (PDMDevHlpGetVM(pDevIns)->enmVMState == VMSTATE_RUNNING)
6973 pThis->fLocked = false;
6974 else
6975 E1kLog(("%s VM is not running -- remain locked\n", pThis->szPrf));
6976 E1kLog(("%s Unlocked\n", pThis->szPrf));
6977 return VINF_SUCCESS;
6978}
6979#endif
6980
6981/**
6982 * @callback_method_impl{FNSSMDEVLOADPREP,Synchronize.}
6983 */
6984static DECLCALLBACK(int) e1kLoadPrep(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6985{
6986 RT_NOREF(pSSM);
6987 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
6988
6989 int rc = e1kCsEnter(pThis, VERR_SEM_BUSY);
6990 if (RT_UNLIKELY(rc != VINF_SUCCESS))
6991 return rc;
6992 e1kCsLeave(pThis);
6993 return VINF_SUCCESS;
6994}
6995
6996/**
6997 * @callback_method_impl{FNSSMDEVLOADEXEC}
6998 */
6999static DECLCALLBACK(int) e1kLoadExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
7000{
7001 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
7002 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
7003 PCPDMDEVHLPR3 pHlp = pDevIns->pHlpR3;
7004 int rc;
7005
7006 if ( uVersion != E1K_SAVEDSTATE_VERSION
7007#ifdef E1K_WITH_TXD_CACHE
7008 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG
7009#endif /* E1K_WITH_TXD_CACHE */
7010 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_41
7011 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_30)
7012 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
7013
7014 if ( uVersion > E1K_SAVEDSTATE_VERSION_VBOX_30
7015 || uPass != SSM_PASS_FINAL)
7016 {
7017 /* config checks */
7018 RTMAC macConfigured;
7019 rc = pHlp->pfnSSMGetMem(pSSM, &macConfigured, sizeof(macConfigured));
7020 AssertRCReturn(rc, rc);
7021 if ( memcmp(&macConfigured, &pThis->macConfigured, sizeof(macConfigured))
7022 && (uPass == 0 || !PDMDevHlpVMTeleportedAndNotFullyResumedYet(pDevIns)) )
7023 LogRel(("%s: The mac address differs: config=%RTmac saved=%RTmac\n", pThis->szPrf, &pThis->macConfigured, &macConfigured));
7024
7025 E1KCHIP eChip;
7026 rc = pHlp->pfnSSMGetU32(pSSM, &eChip);
7027 AssertRCReturn(rc, rc);
7028 if (eChip != pThis->eChip)
7029 return pHlp->pfnSSMSetCfgError(pSSM, RT_SRC_POS, N_("The chip type differs: config=%u saved=%u"), pThis->eChip, eChip);
7030 }
7031
7032 if (uPass == SSM_PASS_FINAL)
7033 {
7034 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_30)
7035 {
7036 rc = pThisCC->eeprom.load(pHlp, pSSM);
7037 AssertRCReturn(rc, rc);
7038 }
7039 /* the state */
7040 pHlp->pfnSSMGetMem(pSSM, &pThis->auRegs, sizeof(pThis->auRegs));
7041 pHlp->pfnSSMGetBool(pSSM, &pThis->fIntRaised);
7042 /** @todo PHY could be made a separate device with its own versioning */
7043 Phy::loadState(pHlp, pSSM, &pThis->phy);
7044 pHlp->pfnSSMGetU32(pSSM, &pThis->uSelectedReg);
7045 pHlp->pfnSSMGetMem(pSSM, &pThis->auMTA, sizeof(pThis->auMTA));
7046 pHlp->pfnSSMGetMem(pSSM, &pThis->aRecAddr, sizeof(pThis->aRecAddr));
7047 pHlp->pfnSSMGetMem(pSSM, &pThis->auVFTA, sizeof(pThis->auVFTA));
7048 pHlp->pfnSSMGetU64(pSSM, &pThis->u64AckedAt);
7049 pHlp->pfnSSMGetU16(pSSM, &pThis->u16RxBSize);
7050 //pHlp->pfnSSMGetBool(pSSM, pThis->fDelayInts);
7051 //pHlp->pfnSSMGetBool(pSSM, pThis->fIntMaskUsed);
7052 rc = pHlp->pfnSSMGetU16(pSSM, &pThis->u16TxPktLen);
7053 AssertRCReturn(rc, rc);
7054 if (pThis->u16TxPktLen > sizeof(pThis->aTxPacketFallback))
7055 pThis->u16TxPktLen = sizeof(pThis->aTxPacketFallback);
7056 pHlp->pfnSSMGetMem(pSSM, &pThis->aTxPacketFallback[0], pThis->u16TxPktLen);
7057 pHlp->pfnSSMGetBool(pSSM, &pThis->fIPcsum);
7058 pHlp->pfnSSMGetBool(pSSM, &pThis->fTCPcsum);
7059 pHlp->pfnSSMGetMem(pSSM, &pThis->contextTSE, sizeof(pThis->contextTSE));
7060 rc = pHlp->pfnSSMGetMem(pSSM, &pThis->contextNormal, sizeof(pThis->contextNormal));
7061 AssertRCReturn(rc, rc);
7062 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_41)
7063 {
7064 pHlp->pfnSSMGetBool(pSSM, &pThis->fVTag);
7065 rc = pHlp->pfnSSMGetU16(pSSM, &pThis->u16VTagTCI);
7066 AssertRCReturn(rc, rc);
7067 }
7068 else
7069 {
7070 pThis->fVTag = false;
7071 pThis->u16VTagTCI = 0;
7072 }
7073#ifdef E1K_WITH_TXD_CACHE
7074 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG)
7075 {
7076 rc = pHlp->pfnSSMGetU8(pSSM, &pThis->nTxDFetched);
7077 AssertRCReturn(rc, rc);
7078 if (pThis->nTxDFetched)
7079 pHlp->pfnSSMGetMem(pSSM, pThis->aTxDescriptors,
7080 pThis->nTxDFetched * sizeof(pThis->aTxDescriptors[0]));
7081 }
7082 else
7083 pThis->nTxDFetched = 0;
7084 /**
7085 * @todo Perhaps we should not store TXD cache as the entries can be
7086 * simply fetched again from guest's memory. Or can't they?
7087 */
7088#endif /* E1K_WITH_TXD_CACHE */
7089#ifdef E1K_WITH_RXD_CACHE
7090 /*
7091 * There is no point in storing the RX descriptor cache in the saved
7092 * state, we just need to make sure it is empty.
7093 */
7094 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
7095#endif /* E1K_WITH_RXD_CACHE */
7096 rc = pHlp->pfnSSMHandleGetStatus(pSSM);
7097 AssertRCReturn(rc, rc);
7098
7099 /* derived state */
7100 e1kSetupGsoCtx(&pThis->GsoCtx, &pThis->contextTSE);
7101
7102 E1kLog(("%s State has been restored\n", pThis->szPrf));
7103 e1kDumpState(pThis);
7104 }
7105 return VINF_SUCCESS;
7106}
7107
7108/**
7109 * @callback_method_impl{FNSSMDEVLOADDONE, Link status adjustments after loading.}
7110 */
7111static DECLCALLBACK(int) e1kLoadDone(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
7112{
7113 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
7114 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
7115 RT_NOREF(pSSM);
7116
7117 /* Update promiscuous mode */
7118 if (pThisCC->pDrvR3)
7119 pThisCC->pDrvR3->pfnSetPromiscuousMode(pThisCC->pDrvR3, !!(RCTL & (RCTL_UPE | RCTL_MPE)));
7120
7121 /*
7122 * Force the link down here, since PDMNETWORKLINKSTATE_DOWN_RESUME is never
7123 * passed to us. We go through all this stuff if the link was up and we
7124 * wasn't teleported.
7125 */
7126 if ( (STATUS & STATUS_LU)
7127 && !PDMDevHlpVMTeleportedAndNotFullyResumedYet(pDevIns)
7128 && pThis->cMsLinkUpDelay)
7129 {
7130 e1kR3LinkDownTemp(pDevIns, pThis, pThisCC);
7131 }
7132 return VINF_SUCCESS;
7133}
7134
7135
7136
7137/* -=-=-=-=- Debug Info + Log Types -=-=-=-=- */
7138
7139/**
7140 * @callback_method_impl{FNRTSTRFORMATTYPE}
7141 */
7142static DECLCALLBACK(size_t) e1kFmtRxDesc(PFNRTSTROUTPUT pfnOutput,
7143 void *pvArgOutput,
7144 const char *pszType,
7145 void const *pvValue,
7146 int cchWidth,
7147 int cchPrecision,
7148 unsigned fFlags,
7149 void *pvUser)
7150{
7151 RT_NOREF(cchWidth, cchPrecision, fFlags, pvUser);
7152 AssertReturn(strcmp(pszType, "e1krxd") == 0, 0);
7153 E1KRXDESC* pDesc = (E1KRXDESC*)pvValue;
7154 if (!pDesc)
7155 return RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "NULL_RXD");
7156
7157 size_t cbPrintf = 0;
7158 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Address=%16LX Length=%04X Csum=%04X\n",
7159 pDesc->u64BufAddr, pDesc->u16Length, pDesc->u16Checksum);
7160 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, " STA: %s %s %s %s %s %s %s ERR: %s %s %s %s SPECIAL: %s VLAN=%03x PRI=%x",
7161 pDesc->status.fPIF ? "PIF" : "pif",
7162 pDesc->status.fIPCS ? "IPCS" : "ipcs",
7163 pDesc->status.fTCPCS ? "TCPCS" : "tcpcs",
7164 pDesc->status.fVP ? "VP" : "vp",
7165 pDesc->status.fIXSM ? "IXSM" : "ixsm",
7166 pDesc->status.fEOP ? "EOP" : "eop",
7167 pDesc->status.fDD ? "DD" : "dd",
7168 pDesc->status.fRXE ? "RXE" : "rxe",
7169 pDesc->status.fIPE ? "IPE" : "ipe",
7170 pDesc->status.fTCPE ? "TCPE" : "tcpe",
7171 pDesc->status.fCE ? "CE" : "ce",
7172 E1K_SPEC_CFI(pDesc->status.u16Special) ? "CFI" :"cfi",
7173 E1K_SPEC_VLAN(pDesc->status.u16Special),
7174 E1K_SPEC_PRI(pDesc->status.u16Special));
7175 return cbPrintf;
7176}
7177
7178/**
7179 * @callback_method_impl{FNRTSTRFORMATTYPE}
7180 */
7181static DECLCALLBACK(size_t) e1kFmtTxDesc(PFNRTSTROUTPUT pfnOutput,
7182 void *pvArgOutput,
7183 const char *pszType,
7184 void const *pvValue,
7185 int cchWidth,
7186 int cchPrecision,
7187 unsigned fFlags,
7188 void *pvUser)
7189{
7190 RT_NOREF(cchWidth, cchPrecision, fFlags, pvUser);
7191 AssertReturn(strcmp(pszType, "e1ktxd") == 0, 0);
7192 E1KTXDESC *pDesc = (E1KTXDESC*)pvValue;
7193 if (!pDesc)
7194 return RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "NULL_TXD");
7195
7196 size_t cbPrintf = 0;
7197 switch (e1kGetDescType(pDesc))
7198 {
7199 case E1K_DTYP_CONTEXT:
7200 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Type=Context\n"
7201 " IPCSS=%02X IPCSO=%02X IPCSE=%04X TUCSS=%02X TUCSO=%02X TUCSE=%04X\n"
7202 " TUCMD:%s%s%s %s %s PAYLEN=%04x HDRLEN=%04x MSS=%04x STA: %s",
7203 pDesc->context.ip.u8CSS, pDesc->context.ip.u8CSO, pDesc->context.ip.u16CSE,
7204 pDesc->context.tu.u8CSS, pDesc->context.tu.u8CSO, pDesc->context.tu.u16CSE,
7205 pDesc->context.dw2.fIDE ? " IDE":"",
7206 pDesc->context.dw2.fRS ? " RS" :"",
7207 pDesc->context.dw2.fTSE ? " TSE":"",
7208 pDesc->context.dw2.fIP ? "IPv4":"IPv6",
7209 pDesc->context.dw2.fTCP ? "TCP":"UDP",
7210 pDesc->context.dw2.u20PAYLEN,
7211 pDesc->context.dw3.u8HDRLEN,
7212 pDesc->context.dw3.u16MSS,
7213 pDesc->context.dw3.fDD?"DD":"");
7214 break;
7215 case E1K_DTYP_DATA:
7216 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Type=Data Address=%16LX DTALEN=%05X\n"
7217 " DCMD:%s%s%s%s%s%s%s STA:%s%s%s POPTS:%s%s SPECIAL:%s VLAN=%03x PRI=%x",
7218 pDesc->data.u64BufAddr,
7219 pDesc->data.cmd.u20DTALEN,
7220 pDesc->data.cmd.fIDE ? " IDE" :"",
7221 pDesc->data.cmd.fVLE ? " VLE" :"",
7222 pDesc->data.cmd.fRPS ? " RPS" :"",
7223 pDesc->data.cmd.fRS ? " RS" :"",
7224 pDesc->data.cmd.fTSE ? " TSE" :"",
7225 pDesc->data.cmd.fIFCS? " IFCS":"",
7226 pDesc->data.cmd.fEOP ? " EOP" :"",
7227 pDesc->data.dw3.fDD ? " DD" :"",
7228 pDesc->data.dw3.fEC ? " EC" :"",
7229 pDesc->data.dw3.fLC ? " LC" :"",
7230 pDesc->data.dw3.fTXSM? " TXSM":"",
7231 pDesc->data.dw3.fIXSM? " IXSM":"",
7232 E1K_SPEC_CFI(pDesc->data.dw3.u16Special) ? "CFI" :"cfi",
7233 E1K_SPEC_VLAN(pDesc->data.dw3.u16Special),
7234 E1K_SPEC_PRI(pDesc->data.dw3.u16Special));
7235 break;
7236 case E1K_DTYP_LEGACY:
7237 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Type=Legacy Address=%16LX DTALEN=%05X\n"
7238 " CMD:%s%s%s%s%s%s%s STA:%s%s%s CSO=%02x CSS=%02x SPECIAL:%s VLAN=%03x PRI=%x",
7239 pDesc->data.u64BufAddr,
7240 pDesc->legacy.cmd.u16Length,
7241 pDesc->legacy.cmd.fIDE ? " IDE" :"",
7242 pDesc->legacy.cmd.fVLE ? " VLE" :"",
7243 pDesc->legacy.cmd.fRPS ? " RPS" :"",
7244 pDesc->legacy.cmd.fRS ? " RS" :"",
7245 pDesc->legacy.cmd.fIC ? " IC" :"",
7246 pDesc->legacy.cmd.fIFCS? " IFCS":"",
7247 pDesc->legacy.cmd.fEOP ? " EOP" :"",
7248 pDesc->legacy.dw3.fDD ? " DD" :"",
7249 pDesc->legacy.dw3.fEC ? " EC" :"",
7250 pDesc->legacy.dw3.fLC ? " LC" :"",
7251 pDesc->legacy.cmd.u8CSO,
7252 pDesc->legacy.dw3.u8CSS,
7253 E1K_SPEC_CFI(pDesc->legacy.dw3.u16Special) ? "CFI" :"cfi",
7254 E1K_SPEC_VLAN(pDesc->legacy.dw3.u16Special),
7255 E1K_SPEC_PRI(pDesc->legacy.dw3.u16Special));
7256 break;
7257 default:
7258 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Invalid Transmit Descriptor");
7259 break;
7260 }
7261
7262 return cbPrintf;
7263}
7264
7265/** Initializes debug helpers (logging format types). */
7266static int e1kInitDebugHelpers(void)
7267{
7268 int rc = VINF_SUCCESS;
7269 static bool s_fHelpersRegistered = false;
7270 if (!s_fHelpersRegistered)
7271 {
7272 s_fHelpersRegistered = true;
7273 rc = RTStrFormatTypeRegister("e1krxd", e1kFmtRxDesc, NULL);
7274 AssertRCReturn(rc, rc);
7275 rc = RTStrFormatTypeRegister("e1ktxd", e1kFmtTxDesc, NULL);
7276 AssertRCReturn(rc, rc);
7277 }
7278 return rc;
7279}
7280
7281/**
7282 * Status info callback.
7283 *
7284 * @param pDevIns The device instance.
7285 * @param pHlp The output helpers.
7286 * @param pszArgs The arguments.
7287 */
7288static DECLCALLBACK(void) e1kInfo(PPDMDEVINS pDevIns, PCDBGFINFOHLP pHlp, const char *pszArgs)
7289{
7290 RT_NOREF(pszArgs);
7291 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
7292 unsigned i;
7293 // bool fRcvRing = false;
7294 // bool fXmtRing = false;
7295
7296 /*
7297 * Parse args.
7298 if (pszArgs)
7299 {
7300 fRcvRing = strstr(pszArgs, "verbose") || strstr(pszArgs, "rcv");
7301 fXmtRing = strstr(pszArgs, "verbose") || strstr(pszArgs, "xmt");
7302 }
7303 */
7304
7305 /*
7306 * Show info.
7307 */
7308 pHlp->pfnPrintf(pHlp, "E1000 #%d: port=%04x mmio=%RGp mac-cfg=%RTmac %s%s%s\n",
7309 pDevIns->iInstance,
7310 PDMDevHlpIoPortGetMappingAddress(pDevIns, pThis->hIoPorts),
7311 PDMDevHlpMmioGetMappingAddress(pDevIns, pThis->hMmioRegion),
7312 &pThis->macConfigured, g_aChips[pThis->eChip].pcszName,
7313 pDevIns->fRCEnabled ? " RC" : "", pDevIns->fR0Enabled ? " R0" : "");
7314
7315 e1kCsEnter(pThis, VERR_INTERNAL_ERROR); /* Not sure why but PCNet does it */
7316
7317 for (i = 0; i < E1K_NUM_OF_32BIT_REGS; ++i)
7318 pHlp->pfnPrintf(pHlp, "%8.8s = %08x\n", g_aE1kRegMap[i].abbrev, pThis->auRegs[i]);
7319
7320 for (i = 0; i < RT_ELEMENTS(pThis->aRecAddr.array); i++)
7321 {
7322 E1KRAELEM* ra = pThis->aRecAddr.array + i;
7323 if (ra->ctl & RA_CTL_AV)
7324 {
7325 const char *pcszTmp;
7326 switch (ra->ctl & RA_CTL_AS)
7327 {
7328 case 0: pcszTmp = "DST"; break;
7329 case 1: pcszTmp = "SRC"; break;
7330 default: pcszTmp = "reserved";
7331 }
7332 pHlp->pfnPrintf(pHlp, "RA%02d: %s %RTmac\n", i, pcszTmp, ra->addr);
7333 }
7334 }
7335 unsigned cDescs = RDLEN / sizeof(E1KRXDESC);
7336 uint32_t rdh = RDH;
7337 pHlp->pfnPrintf(pHlp, "\n-- Receive Descriptors (%d total) --\n", cDescs);
7338 for (i = 0; i < cDescs; ++i)
7339 {
7340 E1KRXDESC desc;
7341 PDMDevHlpPCIPhysRead(pDevIns, e1kDescAddr(RDBAH, RDBAL, i),
7342 &desc, sizeof(desc));
7343 if (i == rdh)
7344 pHlp->pfnPrintf(pHlp, ">>> ");
7345 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1krxd]\n", e1kDescAddr(RDBAH, RDBAL, i), &desc);
7346 }
7347#ifdef E1K_WITH_RXD_CACHE
7348 pHlp->pfnPrintf(pHlp, "\n-- Receive Descriptors in Cache (at %d (RDH %d)/ fetched %d / max %d) --\n",
7349 pThis->iRxDCurrent, RDH, pThis->nRxDFetched, E1K_RXD_CACHE_SIZE);
7350 if (rdh > pThis->iRxDCurrent)
7351 rdh -= pThis->iRxDCurrent;
7352 else
7353 rdh = cDescs + rdh - pThis->iRxDCurrent;
7354 for (i = 0; i < pThis->nRxDFetched; ++i)
7355 {
7356 if (i == pThis->iRxDCurrent)
7357 pHlp->pfnPrintf(pHlp, ">>> ");
7358 if (cDescs)
7359 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1krxd]\n",
7360 e1kDescAddr(RDBAH, RDBAL, rdh++ % cDescs),
7361 &pThis->aRxDescriptors[i]);
7362 else
7363 pHlp->pfnPrintf(pHlp, "<lost>: %R[e1krxd]\n",
7364 &pThis->aRxDescriptors[i]);
7365 }
7366#endif /* E1K_WITH_RXD_CACHE */
7367
7368 cDescs = TDLEN / sizeof(E1KTXDESC);
7369 uint32_t tdh = TDH;
7370 pHlp->pfnPrintf(pHlp, "\n-- Transmit Descriptors (%d total) --\n", cDescs);
7371 for (i = 0; i < cDescs; ++i)
7372 {
7373 E1KTXDESC desc;
7374 PDMDevHlpPCIPhysRead(pDevIns, e1kDescAddr(TDBAH, TDBAL, i),
7375 &desc, sizeof(desc));
7376 if (i == tdh)
7377 pHlp->pfnPrintf(pHlp, ">>> ");
7378 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1ktxd]\n", e1kDescAddr(TDBAH, TDBAL, i), &desc);
7379 }
7380#ifdef E1K_WITH_TXD_CACHE
7381 pHlp->pfnPrintf(pHlp, "\n-- Transmit Descriptors in Cache (at %d (TDH %d)/ fetched %d / max %d) --\n",
7382 pThis->iTxDCurrent, TDH, pThis->nTxDFetched, E1K_TXD_CACHE_SIZE);
7383 if (tdh > pThis->iTxDCurrent)
7384 tdh -= pThis->iTxDCurrent;
7385 else
7386 tdh = cDescs + tdh - pThis->iTxDCurrent;
7387 for (i = 0; i < pThis->nTxDFetched; ++i)
7388 {
7389 if (i == pThis->iTxDCurrent)
7390 pHlp->pfnPrintf(pHlp, ">>> ");
7391 if (cDescs)
7392 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1ktxd]\n",
7393 e1kDescAddr(TDBAH, TDBAL, tdh++ % cDescs),
7394 &pThis->aTxDescriptors[i]);
7395 else
7396 pHlp->pfnPrintf(pHlp, "<lost>: %R[e1ktxd]\n",
7397 &pThis->aTxDescriptors[i]);
7398 }
7399#endif /* E1K_WITH_TXD_CACHE */
7400
7401
7402#ifdef E1K_INT_STATS
7403 pHlp->pfnPrintf(pHlp, "Interrupt attempts: %d\n", pThis->uStatIntTry);
7404 pHlp->pfnPrintf(pHlp, "Interrupts raised : %d\n", pThis->uStatInt);
7405 pHlp->pfnPrintf(pHlp, "Interrupts lowered: %d\n", pThis->uStatIntLower);
7406 pHlp->pfnPrintf(pHlp, "ICR outside ISR : %d\n", pThis->uStatNoIntICR);
7407 pHlp->pfnPrintf(pHlp, "IMS raised ints : %d\n", pThis->uStatIntIMS);
7408 pHlp->pfnPrintf(pHlp, "Interrupts skipped: %d\n", pThis->uStatIntSkip);
7409 pHlp->pfnPrintf(pHlp, "Masked interrupts : %d\n", pThis->uStatIntMasked);
7410 pHlp->pfnPrintf(pHlp, "Early interrupts : %d\n", pThis->uStatIntEarly);
7411 pHlp->pfnPrintf(pHlp, "Late interrupts : %d\n", pThis->uStatIntLate);
7412 pHlp->pfnPrintf(pHlp, "Lost interrupts : %d\n", pThis->iStatIntLost);
7413 pHlp->pfnPrintf(pHlp, "Interrupts by RX : %d\n", pThis->uStatIntRx);
7414 pHlp->pfnPrintf(pHlp, "Interrupts by TX : %d\n", pThis->uStatIntTx);
7415 pHlp->pfnPrintf(pHlp, "Interrupts by ICS : %d\n", pThis->uStatIntICS);
7416 pHlp->pfnPrintf(pHlp, "Interrupts by RDTR: %d\n", pThis->uStatIntRDTR);
7417 pHlp->pfnPrintf(pHlp, "Interrupts by RDMT: %d\n", pThis->uStatIntRXDMT0);
7418 pHlp->pfnPrintf(pHlp, "Interrupts by TXQE: %d\n", pThis->uStatIntTXQE);
7419 pHlp->pfnPrintf(pHlp, "TX int delay asked: %d\n", pThis->uStatTxIDE);
7420 pHlp->pfnPrintf(pHlp, "TX delayed: %d\n", pThis->uStatTxDelayed);
7421 pHlp->pfnPrintf(pHlp, "TX delayed expired: %d\n", pThis->uStatTxDelayExp);
7422 pHlp->pfnPrintf(pHlp, "TX no report asked: %d\n", pThis->uStatTxNoRS);
7423 pHlp->pfnPrintf(pHlp, "TX abs timer expd : %d\n", pThis->uStatTAD);
7424 pHlp->pfnPrintf(pHlp, "TX int timer expd : %d\n", pThis->uStatTID);
7425 pHlp->pfnPrintf(pHlp, "RX abs timer expd : %d\n", pThis->uStatRAD);
7426 pHlp->pfnPrintf(pHlp, "RX int timer expd : %d\n", pThis->uStatRID);
7427 pHlp->pfnPrintf(pHlp, "TX CTX descriptors: %d\n", pThis->uStatDescCtx);
7428 pHlp->pfnPrintf(pHlp, "TX DAT descriptors: %d\n", pThis->uStatDescDat);
7429 pHlp->pfnPrintf(pHlp, "TX LEG descriptors: %d\n", pThis->uStatDescLeg);
7430 pHlp->pfnPrintf(pHlp, "Received frames : %d\n", pThis->uStatRxFrm);
7431 pHlp->pfnPrintf(pHlp, "Transmitted frames: %d\n", pThis->uStatTxFrm);
7432 pHlp->pfnPrintf(pHlp, "TX frames up to 1514: %d\n", pThis->uStatTx1514);
7433 pHlp->pfnPrintf(pHlp, "TX frames up to 2962: %d\n", pThis->uStatTx2962);
7434 pHlp->pfnPrintf(pHlp, "TX frames up to 4410: %d\n", pThis->uStatTx4410);
7435 pHlp->pfnPrintf(pHlp, "TX frames up to 5858: %d\n", pThis->uStatTx5858);
7436 pHlp->pfnPrintf(pHlp, "TX frames up to 7306: %d\n", pThis->uStatTx7306);
7437 pHlp->pfnPrintf(pHlp, "TX frames up to 8754: %d\n", pThis->uStatTx8754);
7438 pHlp->pfnPrintf(pHlp, "TX frames up to 16384: %d\n", pThis->uStatTx16384);
7439 pHlp->pfnPrintf(pHlp, "TX frames up to 32768: %d\n", pThis->uStatTx32768);
7440 pHlp->pfnPrintf(pHlp, "Larger TX frames : %d\n", pThis->uStatTxLarge);
7441#endif /* E1K_INT_STATS */
7442
7443 e1kCsLeave(pThis);
7444}
7445
7446
7447
7448/* -=-=-=-=- PDMDEVREG -=-=-=-=- */
7449
7450/**
7451 * Detach notification.
7452 *
7453 * One port on the network card has been disconnected from the network.
7454 *
7455 * @param pDevIns The device instance.
7456 * @param iLUN The logical unit which is being detached.
7457 * @param fFlags Flags, combination of the PDMDEVATT_FLAGS_* \#defines.
7458 */
7459static DECLCALLBACK(void) e1kR3Detach(PPDMDEVINS pDevIns, unsigned iLUN, uint32_t fFlags)
7460{
7461 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
7462 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
7463 Log(("%s e1kR3Detach:\n", pThis->szPrf));
7464 RT_NOREF(fFlags);
7465
7466 AssertLogRelReturnVoid(iLUN == 0);
7467
7468 PDMDevHlpCritSectEnter(pDevIns, &pThis->cs, VERR_SEM_BUSY);
7469
7470 /** @todo r=pritesh still need to check if i missed
7471 * to clean something in this function
7472 */
7473
7474 /*
7475 * Zero some important members.
7476 */
7477 pThisCC->pDrvBase = NULL;
7478 pThisCC->pDrvR3 = NULL;
7479#if 0 /** @todo @bugref{9218} ring-0 driver stuff */
7480 pThisR0->pDrvR0 = NIL_RTR0PTR;
7481 pThisRC->pDrvRC = NIL_RTRCPTR;
7482#endif
7483
7484 PDMDevHlpCritSectLeave(pDevIns, &pThis->cs);
7485}
7486
7487/**
7488 * Attach the Network attachment.
7489 *
7490 * One port on the network card has been connected to a network.
7491 *
7492 * @returns VBox status code.
7493 * @param pDevIns The device instance.
7494 * @param iLUN The logical unit which is being attached.
7495 * @param fFlags Flags, combination of the PDMDEVATT_FLAGS_* \#defines.
7496 *
7497 * @remarks This code path is not used during construction.
7498 */
7499static DECLCALLBACK(int) e1kR3Attach(PPDMDEVINS pDevIns, unsigned iLUN, uint32_t fFlags)
7500{
7501 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
7502 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
7503 LogFlow(("%s e1kR3Attach:\n", pThis->szPrf));
7504 RT_NOREF(fFlags);
7505
7506 AssertLogRelReturn(iLUN == 0, VERR_PDM_NO_SUCH_LUN);
7507
7508 PDMDevHlpCritSectEnter(pDevIns, &pThis->cs, VERR_SEM_BUSY);
7509
7510 /*
7511 * Attach the driver.
7512 */
7513 int rc = PDMDevHlpDriverAttach(pDevIns, 0, &pThisCC->IBase, &pThisCC->pDrvBase, "Network Port");
7514 if (RT_SUCCESS(rc))
7515 {
7516 pThisCC->pDrvR3 = PDMIBASE_QUERY_INTERFACE(pThisCC->pDrvBase, PDMINETWORKUP);
7517 AssertMsgStmt(pThisCC->pDrvR3, ("Failed to obtain the PDMINETWORKUP interface!\n"),
7518 rc = VERR_PDM_MISSING_INTERFACE_BELOW);
7519 if (RT_SUCCESS(rc))
7520 {
7521#if 0 /** @todo @bugref{9218} ring-0 driver stuff */
7522 pThisR0->pDrvR0 = PDMIBASER0_QUERY_INTERFACE(PDMIBASE_QUERY_INTERFACE(pThisCC->pDrvBase, PDMIBASER0), PDMINETWORKUP);
7523 pThisRC->pDrvRC = PDMIBASERC_QUERY_INTERFACE(PDMIBASE_QUERY_INTERFACE(pThisCC->pDrvBase, PDMIBASERC), PDMINETWORKUP);
7524#endif
7525 }
7526 }
7527 else if ( rc == VERR_PDM_NO_ATTACHED_DRIVER
7528 || rc == VERR_PDM_CFG_MISSING_DRIVER_NAME)
7529 {
7530 /* This should never happen because this function is not called
7531 * if there is no driver to attach! */
7532 Log(("%s No attached driver!\n", pThis->szPrf));
7533 }
7534
7535 /*
7536 * Temporary set the link down if it was up so that the guest will know
7537 * that we have change the configuration of the network card
7538 */
7539 if ((STATUS & STATUS_LU) && RT_SUCCESS(rc))
7540 e1kR3LinkDownTemp(pDevIns, pThis, pThisCC);
7541
7542 PDMDevHlpCritSectLeave(pDevIns, &pThis->cs);
7543 return rc;
7544}
7545
7546/**
7547 * @copydoc FNPDMDEVPOWEROFF
7548 */
7549static DECLCALLBACK(void) e1kR3PowerOff(PPDMDEVINS pDevIns)
7550{
7551 /* Poke thread waiting for buffer space. */
7552 e1kWakeupReceive(pDevIns, PDMDEVINS_2_DATA(pDevIns, PE1KSTATE));
7553}
7554
7555/**
7556 * @copydoc FNPDMDEVRESET
7557 */
7558static DECLCALLBACK(void) e1kR3Reset(PPDMDEVINS pDevIns)
7559{
7560 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
7561 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
7562#ifdef E1K_TX_DELAY
7563 e1kCancelTimer(pDevIns, pThis, pThis->hTXDTimer);
7564#endif /* E1K_TX_DELAY */
7565 e1kCancelTimer(pDevIns, pThis, pThis->hIntTimer);
7566 e1kCancelTimer(pDevIns, pThis, pThis->hLUTimer);
7567 e1kXmitFreeBuf(pThis, pThisCC);
7568 pThis->u16TxPktLen = 0;
7569 pThis->fIPcsum = false;
7570 pThis->fTCPcsum = false;
7571 pThis->fIntMaskUsed = false;
7572 pThis->fDelayInts = false;
7573 pThis->fLocked = false;
7574 pThis->u64AckedAt = 0;
7575 e1kR3HardReset(pDevIns, pThis, pThisCC);
7576}
7577
7578/**
7579 * @copydoc FNPDMDEVSUSPEND
7580 */
7581static DECLCALLBACK(void) e1kR3Suspend(PPDMDEVINS pDevIns)
7582{
7583 /* Poke thread waiting for buffer space. */
7584 e1kWakeupReceive(pDevIns, PDMDEVINS_2_DATA(pDevIns, PE1KSTATE));
7585}
7586
7587/**
7588 * Device relocation callback.
7589 *
7590 * When this callback is called the device instance data, and if the
7591 * device have a GC component, is being relocated, or/and the selectors
7592 * have been changed. The device must use the chance to perform the
7593 * necessary pointer relocations and data updates.
7594 *
7595 * Before the GC code is executed the first time, this function will be
7596 * called with a 0 delta so GC pointer calculations can be one in one place.
7597 *
7598 * @param pDevIns Pointer to the device instance.
7599 * @param offDelta The relocation delta relative to the old location.
7600 *
7601 * @remark A relocation CANNOT fail.
7602 */
7603static DECLCALLBACK(void) e1kR3Relocate(PPDMDEVINS pDevIns, RTGCINTPTR offDelta)
7604{
7605 PE1KSTATERC pThisRC = PDMINS_2_DATA_RC(pDevIns, PE1KSTATERC);
7606 if (pThisRC)
7607 pThisRC->pDevInsRC = PDMDEVINS_2_RCPTR(pDevIns);
7608 RT_NOREF(offDelta);
7609}
7610
7611/**
7612 * Destruct a device instance.
7613 *
7614 * We need to free non-VM resources only.
7615 *
7616 * @returns VBox status code.
7617 * @param pDevIns The device instance data.
7618 * @thread EMT
7619 */
7620static DECLCALLBACK(int) e1kR3Destruct(PPDMDEVINS pDevIns)
7621{
7622 PDMDEV_CHECK_VERSIONS_RETURN_QUIET(pDevIns);
7623 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
7624
7625 e1kDumpState(pThis);
7626 E1kLog(("%s Destroying instance\n", pThis->szPrf));
7627 if (PDMDevHlpCritSectIsInitialized(pDevIns, &pThis->cs))
7628 {
7629 if (pThis->hEventMoreRxDescAvail != NIL_SUPSEMEVENT)
7630 {
7631 PDMDevHlpSUPSemEventSignal(pDevIns, pThis->hEventMoreRxDescAvail);
7632 RTThreadYield();
7633 PDMDevHlpSUPSemEventClose(pDevIns, pThis->hEventMoreRxDescAvail);
7634 pThis->hEventMoreRxDescAvail = NIL_SUPSEMEVENT;
7635 }
7636#ifdef E1K_WITH_TX_CS
7637 PDMDevHlpCritSectDelete(pDevIns, &pThis->csTx);
7638#endif /* E1K_WITH_TX_CS */
7639 PDMDevHlpCritSectDelete(pDevIns, &pThis->csRx);
7640 PDMDevHlpCritSectDelete(pDevIns, &pThis->cs);
7641 }
7642 return VINF_SUCCESS;
7643}
7644
7645
7646/**
7647 * Set PCI configuration space registers.
7648 *
7649 * @param pci Reference to PCI device structure.
7650 * @thread EMT
7651 */
7652static void e1kR3ConfigurePciDev(PPDMPCIDEV pPciDev, E1KCHIP eChip)
7653{
7654 Assert(eChip < RT_ELEMENTS(g_aChips));
7655 /* Configure PCI Device, assume 32-bit mode ******************************/
7656 PDMPciDevSetVendorId(pPciDev, g_aChips[eChip].uPCIVendorId);
7657 PDMPciDevSetDeviceId(pPciDev, g_aChips[eChip].uPCIDeviceId);
7658 PDMPciDevSetWord( pPciDev, VBOX_PCI_SUBSYSTEM_VENDOR_ID, g_aChips[eChip].uPCISubsystemVendorId);
7659 PDMPciDevSetWord( pPciDev, VBOX_PCI_SUBSYSTEM_ID, g_aChips[eChip].uPCISubsystemId);
7660
7661 PDMPciDevSetWord( pPciDev, VBOX_PCI_COMMAND, 0x0000);
7662 /* DEVSEL Timing (medium device), 66 MHz Capable, New capabilities */
7663 PDMPciDevSetWord( pPciDev, VBOX_PCI_STATUS,
7664 VBOX_PCI_STATUS_DEVSEL_MEDIUM | VBOX_PCI_STATUS_CAP_LIST | VBOX_PCI_STATUS_66MHZ);
7665 /* Stepping A2 */
7666 PDMPciDevSetByte( pPciDev, VBOX_PCI_REVISION_ID, 0x02);
7667 /* Ethernet adapter */
7668 PDMPciDevSetByte( pPciDev, VBOX_PCI_CLASS_PROG, 0x00);
7669 PDMPciDevSetWord( pPciDev, VBOX_PCI_CLASS_DEVICE, 0x0200);
7670 /* normal single function Ethernet controller */
7671 PDMPciDevSetByte( pPciDev, VBOX_PCI_HEADER_TYPE, 0x00);
7672 /* Memory Register Base Address */
7673 PDMPciDevSetDWord(pPciDev, VBOX_PCI_BASE_ADDRESS_0, 0x00000000);
7674 /* Memory Flash Base Address */
7675 PDMPciDevSetDWord(pPciDev, VBOX_PCI_BASE_ADDRESS_1, 0x00000000);
7676 /* IO Register Base Address */
7677 PDMPciDevSetDWord(pPciDev, VBOX_PCI_BASE_ADDRESS_2, 0x00000001);
7678 /* Expansion ROM Base Address */
7679 PDMPciDevSetDWord(pPciDev, VBOX_PCI_ROM_ADDRESS, 0x00000000);
7680 /* Capabilities Pointer */
7681 PDMPciDevSetByte( pPciDev, VBOX_PCI_CAPABILITY_LIST, 0xDC);
7682 /* Interrupt Pin: INTA# */
7683 PDMPciDevSetByte( pPciDev, VBOX_PCI_INTERRUPT_PIN, 0x01);
7684 /* Max_Lat/Min_Gnt: very high priority and time slice */
7685 PDMPciDevSetByte( pPciDev, VBOX_PCI_MIN_GNT, 0xFF);
7686 PDMPciDevSetByte( pPciDev, VBOX_PCI_MAX_LAT, 0x00);
7687
7688 /* PCI Power Management Registers ****************************************/
7689 /* Capability ID: PCI Power Management Registers */
7690 PDMPciDevSetByte( pPciDev, 0xDC, VBOX_PCI_CAP_ID_PM);
7691 /* Next Item Pointer: PCI-X */
7692 PDMPciDevSetByte( pPciDev, 0xDC + 1, 0xE4);
7693 /* Power Management Capabilities: PM disabled, DSI */
7694 PDMPciDevSetWord( pPciDev, 0xDC + 2,
7695 0x0002 | VBOX_PCI_PM_CAP_DSI);
7696 /* Power Management Control / Status Register: PM disabled */
7697 PDMPciDevSetWord( pPciDev, 0xDC + 4, 0x0000);
7698 /* PMCSR_BSE Bridge Support Extensions: Not supported */
7699 PDMPciDevSetByte( pPciDev, 0xDC + 6, 0x00);
7700 /* Data Register: PM disabled, always 0 */
7701 PDMPciDevSetByte( pPciDev, 0xDC + 7, 0x00);
7702
7703 /* PCI-X Configuration Registers *****************************************/
7704 /* Capability ID: PCI-X Configuration Registers */
7705 PDMPciDevSetByte( pPciDev, 0xE4, VBOX_PCI_CAP_ID_PCIX);
7706#ifdef E1K_WITH_MSI
7707 PDMPciDevSetByte( pPciDev, 0xE4 + 1, 0x80);
7708#else
7709 /* Next Item Pointer: None (Message Signalled Interrupts are disabled) */
7710 PDMPciDevSetByte( pPciDev, 0xE4 + 1, 0x00);
7711#endif
7712 /* PCI-X Command: Enable Relaxed Ordering */
7713 PDMPciDevSetWord( pPciDev, 0xE4 + 2, VBOX_PCI_X_CMD_ERO);
7714 /* PCI-X Status: 32-bit, 66MHz*/
7715 /** @todo is this value really correct? fff8 doesn't look like actual PCI address */
7716 PDMPciDevSetDWord(pPciDev, 0xE4 + 4, 0x0040FFF8);
7717}
7718
7719/**
7720 * @interface_method_impl{PDMDEVREG,pfnConstruct}
7721 */
7722static DECLCALLBACK(int) e1kR3Construct(PPDMDEVINS pDevIns, int iInstance, PCFGMNODE pCfg)
7723{
7724 PDMDEV_CHECK_VERSIONS_RETURN(pDevIns);
7725 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
7726 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
7727 int rc;
7728
7729 /*
7730 * Initialize the instance data (state).
7731 * Note! Caller has initialized it to ZERO already.
7732 */
7733 RTStrPrintf(pThis->szPrf, sizeof(pThis->szPrf), "E1000#%d", iInstance);
7734 E1kLog(("%s Constructing new instance sizeof(E1KRXDESC)=%d\n", pThis->szPrf, sizeof(E1KRXDESC)));
7735 pThis->hEventMoreRxDescAvail = NIL_SUPSEMEVENT;
7736 pThis->u16TxPktLen = 0;
7737 pThis->fIPcsum = false;
7738 pThis->fTCPcsum = false;
7739 pThis->fIntMaskUsed = false;
7740 pThis->fDelayInts = false;
7741 pThis->fLocked = false;
7742 pThis->u64AckedAt = 0;
7743 pThis->led.u32Magic = PDMLED_MAGIC;
7744 pThis->u32PktNo = 1;
7745
7746 pThisCC->pDevInsR3 = pDevIns;
7747 pThisCC->pShared = pThis;
7748
7749 /* Interfaces */
7750 pThisCC->IBase.pfnQueryInterface = e1kR3QueryInterface;
7751
7752 pThisCC->INetworkDown.pfnWaitReceiveAvail = e1kR3NetworkDown_WaitReceiveAvail;
7753 pThisCC->INetworkDown.pfnReceive = e1kR3NetworkDown_Receive;
7754 pThisCC->INetworkDown.pfnXmitPending = e1kR3NetworkDown_XmitPending;
7755
7756 pThisCC->ILeds.pfnQueryStatusLed = e1kR3QueryStatusLed;
7757
7758 pThisCC->INetworkConfig.pfnGetMac = e1kR3GetMac;
7759 pThisCC->INetworkConfig.pfnGetLinkState = e1kR3GetLinkState;
7760 pThisCC->INetworkConfig.pfnSetLinkState = e1kR3SetLinkState;
7761
7762 /*
7763 * Internal validations.
7764 */
7765 for (uint32_t iReg = 1; iReg < E1K_NUM_OF_BINARY_SEARCHABLE; iReg++)
7766 AssertLogRelMsgReturn( g_aE1kRegMap[iReg].offset > g_aE1kRegMap[iReg - 1].offset
7767 && g_aE1kRegMap[iReg].offset + g_aE1kRegMap[iReg].size
7768 >= g_aE1kRegMap[iReg - 1].offset + g_aE1kRegMap[iReg - 1].size,
7769 ("%s@%#xLB%#x vs %s@%#xLB%#x\n",
7770 g_aE1kRegMap[iReg].abbrev, g_aE1kRegMap[iReg].offset, g_aE1kRegMap[iReg].size,
7771 g_aE1kRegMap[iReg - 1].abbrev, g_aE1kRegMap[iReg - 1].offset, g_aE1kRegMap[iReg - 1].size),
7772 VERR_INTERNAL_ERROR_4);
7773
7774 /*
7775 * Validate configuration.
7776 */
7777 PDMDEV_VALIDATE_CONFIG_RETURN(pDevIns,
7778 "MAC|"
7779 "CableConnected|"
7780 "AdapterType|"
7781 "LineSpeed|"
7782 "ItrEnabled|"
7783 "ItrRxEnabled|"
7784 "EthernetCRC|"
7785 "GSOEnabled|"
7786 "LinkUpDelay|"
7787 "StatNo",
7788 "");
7789
7790 /** @todo LineSpeed unused! */
7791
7792 /*
7793 * Get config params
7794 */
7795 PCPDMDEVHLPR3 pHlp = pDevIns->pHlpR3;
7796 rc = pHlp->pfnCFGMQueryBytes(pCfg, "MAC", pThis->macConfigured.au8, sizeof(pThis->macConfigured.au8));
7797 if (RT_FAILURE(rc))
7798 return PDMDEV_SET_ERROR(pDevIns, rc,
7799 N_("Configuration error: Failed to get MAC address"));
7800 rc = pHlp->pfnCFGMQueryBool(pCfg, "CableConnected", &pThis->fCableConnected);
7801 if (RT_FAILURE(rc))
7802 return PDMDEV_SET_ERROR(pDevIns, rc,
7803 N_("Configuration error: Failed to get the value of 'CableConnected'"));
7804 rc = pHlp->pfnCFGMQueryU32(pCfg, "AdapterType", (uint32_t*)&pThis->eChip);
7805 if (RT_FAILURE(rc))
7806 return PDMDEV_SET_ERROR(pDevIns, rc,
7807 N_("Configuration error: Failed to get the value of 'AdapterType'"));
7808 Assert(pThis->eChip <= E1K_CHIP_82545EM);
7809
7810 rc = pHlp->pfnCFGMQueryBoolDef(pCfg, "EthernetCRC", &pThis->fEthernetCRC, true);
7811 if (RT_FAILURE(rc))
7812 return PDMDEV_SET_ERROR(pDevIns, rc,
7813 N_("Configuration error: Failed to get the value of 'EthernetCRC'"));
7814
7815 rc = pHlp->pfnCFGMQueryBoolDef(pCfg, "GSOEnabled", &pThis->fGSOEnabled, true);
7816 if (RT_FAILURE(rc))
7817 return PDMDEV_SET_ERROR(pDevIns, rc,
7818 N_("Configuration error: Failed to get the value of 'GSOEnabled'"));
7819
7820 rc = pHlp->pfnCFGMQueryBoolDef(pCfg, "ItrEnabled", &pThis->fItrEnabled, false);
7821 if (RT_FAILURE(rc))
7822 return PDMDEV_SET_ERROR(pDevIns, rc,
7823 N_("Configuration error: Failed to get the value of 'ItrEnabled'"));
7824
7825 rc = pHlp->pfnCFGMQueryBoolDef(pCfg, "ItrRxEnabled", &pThis->fItrRxEnabled, true);
7826 if (RT_FAILURE(rc))
7827 return PDMDEV_SET_ERROR(pDevIns, rc,
7828 N_("Configuration error: Failed to get the value of 'ItrRxEnabled'"));
7829
7830 rc = pHlp->pfnCFGMQueryBoolDef(pCfg, "TidEnabled", &pThis->fTidEnabled, false);
7831 if (RT_FAILURE(rc))
7832 return PDMDEV_SET_ERROR(pDevIns, rc,
7833 N_("Configuration error: Failed to get the value of 'TidEnabled'"));
7834
7835 rc = pHlp->pfnCFGMQueryU32Def(pCfg, "LinkUpDelay", (uint32_t*)&pThis->cMsLinkUpDelay, 3000); /* ms */
7836 if (RT_FAILURE(rc))
7837 return PDMDEV_SET_ERROR(pDevIns, rc,
7838 N_("Configuration error: Failed to get the value of 'LinkUpDelay'"));
7839 Assert(pThis->cMsLinkUpDelay <= 300000); /* less than 5 minutes */
7840 if (pThis->cMsLinkUpDelay > 5000)
7841 LogRel(("%s: WARNING! Link up delay is set to %u seconds!\n", pThis->szPrf, pThis->cMsLinkUpDelay / 1000));
7842 else if (pThis->cMsLinkUpDelay == 0)
7843 LogRel(("%s: WARNING! Link up delay is disabled!\n", pThis->szPrf));
7844
7845 uint32_t uStatNo = (uint32_t)iInstance;
7846 rc = pHlp->pfnCFGMQueryU32Def(pCfg, "StatNo", &uStatNo, (uint32_t)iInstance);
7847 if (RT_FAILURE(rc))
7848 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Configuration error: Failed to get the \"StatNo\" value"));
7849
7850 LogRel(("%s: Chip=%s LinkUpDelay=%ums EthernetCRC=%s GSO=%s Itr=%s ItrRx=%s TID=%s R0=%s RC=%s\n", pThis->szPrf,
7851 g_aChips[pThis->eChip].pcszName, pThis->cMsLinkUpDelay,
7852 pThis->fEthernetCRC ? "on" : "off",
7853 pThis->fGSOEnabled ? "enabled" : "disabled",
7854 pThis->fItrEnabled ? "enabled" : "disabled",
7855 pThis->fItrRxEnabled ? "enabled" : "disabled",
7856 pThis->fTidEnabled ? "enabled" : "disabled",
7857 pDevIns->fR0Enabled ? "enabled" : "disabled",
7858 pDevIns->fRCEnabled ? "enabled" : "disabled"));
7859
7860 /*
7861 * Initialize sub-components and register everything with the VMM.
7862 */
7863
7864 /* Initialize the EEPROM. */
7865 pThisCC->eeprom.init(pThis->macConfigured);
7866
7867 /* Initialize internal PHY. */
7868 Phy::init(&pThis->phy, iInstance, pThis->eChip == E1K_CHIP_82543GC ? PHY_EPID_M881000 : PHY_EPID_M881011);
7869
7870 /* Initialize critical sections. We do our own locking. */
7871 rc = PDMDevHlpSetDeviceCritSect(pDevIns, PDMDevHlpCritSectGetNop(pDevIns));
7872 AssertRCReturn(rc, rc);
7873
7874 rc = PDMDevHlpCritSectInit(pDevIns, &pThis->cs, RT_SRC_POS, "E1000#%d", iInstance);
7875 AssertRCReturn(rc, rc);
7876 rc = PDMDevHlpCritSectInit(pDevIns, &pThis->csRx, RT_SRC_POS, "E1000#%dRX", iInstance);
7877 AssertRCReturn(rc, rc);
7878#ifdef E1K_WITH_TX_CS
7879 rc = PDMDevHlpCritSectInit(pDevIns, &pThis->csTx, RT_SRC_POS, "E1000#%dTX", iInstance);
7880 AssertRCReturn(rc, rc);
7881#endif
7882
7883 /* Saved state registration. */
7884 rc = PDMDevHlpSSMRegisterEx(pDevIns, E1K_SAVEDSTATE_VERSION, sizeof(E1KSTATE), NULL,
7885 NULL, e1kLiveExec, NULL,
7886 e1kSavePrep, e1kSaveExec, NULL,
7887 e1kLoadPrep, e1kLoadExec, e1kLoadDone);
7888 AssertRCReturn(rc, rc);
7889
7890 /* Set PCI config registers and register ourselves with the PCI bus. */
7891 PDMPCIDEV_ASSERT_VALID(pDevIns, pDevIns->apPciDevs[0]);
7892 e1kR3ConfigurePciDev(pDevIns->apPciDevs[0], pThis->eChip);
7893 rc = PDMDevHlpPCIRegister(pDevIns, pDevIns->apPciDevs[0]);
7894 AssertRCReturn(rc, rc);
7895
7896#ifdef E1K_WITH_MSI
7897 PDMMSIREG MsiReg;
7898 RT_ZERO(MsiReg);
7899 MsiReg.cMsiVectors = 1;
7900 MsiReg.iMsiCapOffset = 0x80;
7901 MsiReg.iMsiNextOffset = 0x0;
7902 MsiReg.fMsi64bit = false;
7903 rc = PDMDevHlpPCIRegisterMsi(pDevIns, &MsiReg);
7904 AssertRCReturn(rc, rc);
7905#endif
7906
7907 /*
7908 * Map our registers to memory space (region 0, see e1kR3ConfigurePciDev)
7909 * From the spec (regarding flags):
7910 * For registers that should be accessed as 32-bit double words,
7911 * partial writes (less than a 32-bit double word) is ignored.
7912 * Partial reads return all 32 bits of data regardless of the
7913 * byte enables.
7914 */
7915 rc = PDMDevHlpMmioCreateEx(pDevIns, E1K_MM_SIZE, IOMMMIO_FLAGS_READ_DWORD | IOMMMIO_FLAGS_WRITE_ONLY_DWORD,
7916 pDevIns->apPciDevs[0], 0 /*iPciRegion*/,
7917 e1kMMIOWrite, e1kMMIORead, NULL /*pfnFill*/, NULL /*pvUser*/, "E1000", &pThis->hMmioRegion);
7918 AssertRCReturn(rc, rc);
7919 rc = PDMDevHlpPCIIORegionRegisterMmio(pDevIns, 0, E1K_MM_SIZE, PCI_ADDRESS_SPACE_MEM, pThis->hMmioRegion, NULL);
7920 AssertRCReturn(rc, rc);
7921
7922 /* Map our registers to IO space (region 2, see e1kR3ConfigurePciDev) */
7923 static IOMIOPORTDESC const s_aExtDescs[] =
7924 {
7925 { "IOADDR", "IOADDR", NULL, NULL }, { "unused", "unused", NULL, NULL }, { "unused", "unused", NULL, NULL }, { "unused", "unused", NULL, NULL },
7926 { "IODATA", "IODATA", NULL, NULL }, { "unused", "unused", NULL, NULL }, { "unused", "unused", NULL, NULL }, { "unused", "unused", NULL, NULL },
7927 { NULL, NULL, NULL, NULL }
7928 };
7929 rc = PDMDevHlpIoPortCreate(pDevIns, E1K_IOPORT_SIZE, pDevIns->apPciDevs[0], 2 /*iPciRegion*/,
7930 e1kIOPortOut, e1kIOPortIn, NULL /*pvUser*/, "E1000", s_aExtDescs, &pThis->hIoPorts);
7931 AssertRCReturn(rc, rc);
7932 rc = PDMDevHlpPCIIORegionRegisterIo(pDevIns, 2, E1K_IOPORT_SIZE, pThis->hIoPorts);
7933 AssertRCReturn(rc, rc);
7934
7935 /* Create transmit queue */
7936 rc = PDMDevHlpTaskCreate(pDevIns, PDMTASK_F_RZ, "E1000-Xmit", e1kR3TxTaskCallback, NULL, &pThis->hTxTask);
7937 AssertRCReturn(rc, rc);
7938
7939#ifdef E1K_TX_DELAY
7940 /* Create Transmit Delay Timer */
7941 rc = PDMDevHlpTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kR3TxDelayTimer, pThis, TMTIMER_FLAGS_NO_CRIT_SECT,
7942 "E1000 Transmit Delay Timer", &pThis->hTXDTimer);
7943 AssertRCReturn(rc, rc);
7944 rc = PDMDevHlpTimerSetCritSect(pDevIns, pThis->hTXDTimer, &pThis->csTx);
7945 AssertRCReturn(rc, rc);
7946#endif /* E1K_TX_DELAY */
7947
7948//#ifdef E1K_USE_TX_TIMERS
7949 if (pThis->fTidEnabled)
7950 {
7951 /* Create Transmit Interrupt Delay Timer */
7952 rc = PDMDevHlpTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kR3TxIntDelayTimer, pThis, TMTIMER_FLAGS_NO_CRIT_SECT,
7953 "E1000 Transmit Interrupt Delay Timer", &pThis->hTIDTimer);
7954 AssertRCReturn(rc, rc);
7955
7956# ifndef E1K_NO_TAD
7957 /* Create Transmit Absolute Delay Timer */
7958 rc = PDMDevHlpTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kR3TxAbsDelayTimer, pThis, TMTIMER_FLAGS_NO_CRIT_SECT,
7959 "E1000 Transmit Absolute Delay Timer", &pThis->hTADTimer);
7960 AssertRCReturn(rc, rc);
7961# endif /* E1K_NO_TAD */
7962 }
7963//#endif /* E1K_USE_TX_TIMERS */
7964
7965#ifdef E1K_USE_RX_TIMERS
7966 /* Create Receive Interrupt Delay Timer */
7967 rc = PDMDevHlpTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kR3RxIntDelayTimer, pThis, TMTIMER_FLAGS_NO_CRIT_SECT,
7968 "E1000 Receive Interrupt Delay Timer", &pThis->hRIDTimer);
7969 AssertRCReturn(rc, rc);
7970
7971 /* Create Receive Absolute Delay Timer */
7972 rc = PDMDevHlpTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kR3RxAbsDelayTimer, pThis, TMTIMER_FLAGS_NO_CRIT_SECT,
7973 "E1000 Receive Absolute Delay Timer", &pThis->hRADTimer);
7974 AssertRCReturn(rc, rc);
7975#endif /* E1K_USE_RX_TIMERS */
7976
7977 /* Create Late Interrupt Timer */
7978 rc = PDMDevHlpTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kR3LateIntTimer, pThis, TMTIMER_FLAGS_NO_CRIT_SECT,
7979 "E1000 Late Interrupt Timer", &pThis->hIntTimer);
7980 AssertRCReturn(rc, rc);
7981
7982 /* Create Link Up Timer */
7983 rc = PDMDevHlpTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kR3LinkUpTimer, pThis, TMTIMER_FLAGS_NO_CRIT_SECT,
7984 "E1000 Link Up Timer", &pThis->hLUTimer);
7985 AssertRCReturn(rc, rc);
7986
7987 /* Register the info item */
7988 char szTmp[20];
7989 RTStrPrintf(szTmp, sizeof(szTmp), "e1k%d", iInstance);
7990 PDMDevHlpDBGFInfoRegister(pDevIns, szTmp, "E1000 info.", e1kInfo);
7991
7992 /* Status driver */
7993 PPDMIBASE pBase;
7994 rc = PDMDevHlpDriverAttach(pDevIns, PDM_STATUS_LUN, &pThisCC->IBase, &pBase, "Status Port");
7995 if (RT_FAILURE(rc))
7996 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to attach the status LUN"));
7997 pThisCC->pLedsConnector = PDMIBASE_QUERY_INTERFACE(pBase, PDMILEDCONNECTORS);
7998
7999 /* Network driver */
8000 rc = PDMDevHlpDriverAttach(pDevIns, 0, &pThisCC->IBase, &pThisCC->pDrvBase, "Network Port");
8001 if (RT_SUCCESS(rc))
8002 {
8003 pThisCC->pDrvR3 = PDMIBASE_QUERY_INTERFACE(pThisCC->pDrvBase, PDMINETWORKUP);
8004 AssertMsgReturn(pThisCC->pDrvR3, ("Failed to obtain the PDMINETWORKUP interface!\n"), VERR_PDM_MISSING_INTERFACE_BELOW);
8005
8006#if 0 /** @todo @bugref{9218} ring-0 driver stuff */
8007 pThisR0->pDrvR0 = PDMIBASER0_QUERY_INTERFACE(PDMIBASE_QUERY_INTERFACE(pThisCC->pDrvBase, PDMIBASER0), PDMINETWORKUP);
8008 pThisRC->pDrvRC = PDMIBASERC_QUERY_INTERFACE(PDMIBASE_QUERY_INTERFACE(pThisCC->pDrvBase, PDMIBASERC), PDMINETWORKUP);
8009#endif
8010 }
8011 else if ( rc == VERR_PDM_NO_ATTACHED_DRIVER
8012 || rc == VERR_PDM_CFG_MISSING_DRIVER_NAME)
8013 {
8014 /* No error! */
8015 E1kLog(("%s This adapter is not attached to any network!\n", pThis->szPrf));
8016 }
8017 else
8018 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to attach the network LUN"));
8019
8020 rc = PDMDevHlpSUPSemEventCreate(pDevIns, &pThis->hEventMoreRxDescAvail);
8021 AssertRCReturn(rc, rc);
8022
8023 rc = e1kInitDebugHelpers();
8024 AssertRCReturn(rc, rc);
8025
8026 e1kR3HardReset(pDevIns, pThis, pThisCC);
8027
8028 /*
8029 * Register statistics.
8030 * The /Public/ bits are official and used by session info in the GUI.
8031 */
8032 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
8033 "Amount of data received", "/Public/NetAdapter/%u/BytesReceived", uStatNo);
8034 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
8035 "Amount of data transmitted", "/Public/NetAdapter/%u/BytesTransmitted", uStatNo);
8036 PDMDevHlpSTAMRegisterF(pDevIns, &pDevIns->iInstance, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
8037 "Device instance number", "/Public/NetAdapter/%u/%s", uStatNo, pDevIns->pReg->szName);
8038
8039 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatReceiveBytes, STAMTYPE_COUNTER, "ReceiveBytes", STAMUNIT_BYTES, "Amount of data received");
8040 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTransmitBytes, STAMTYPE_COUNTER, "TransmitBytes", STAMUNIT_BYTES, "Amount of data transmitted");
8041
8042#if defined(VBOX_WITH_STATISTICS)
8043 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMMIOReadRZ, STAMTYPE_PROFILE, "MMIO/ReadRZ", STAMUNIT_TICKS_PER_CALL, "Profiling MMIO reads in RZ");
8044 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMMIOReadR3, STAMTYPE_PROFILE, "MMIO/ReadR3", STAMUNIT_TICKS_PER_CALL, "Profiling MMIO reads in R3");
8045 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMMIOWriteRZ, STAMTYPE_PROFILE, "MMIO/WriteRZ", STAMUNIT_TICKS_PER_CALL, "Profiling MMIO writes in RZ");
8046 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMMIOWriteR3, STAMTYPE_PROFILE, "MMIO/WriteR3", STAMUNIT_TICKS_PER_CALL, "Profiling MMIO writes in R3");
8047 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatEEPROMRead, STAMTYPE_PROFILE, "EEPROM/Read", STAMUNIT_TICKS_PER_CALL, "Profiling EEPROM reads");
8048 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatEEPROMWrite, STAMTYPE_PROFILE, "EEPROM/Write", STAMUNIT_TICKS_PER_CALL, "Profiling EEPROM writes");
8049 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIOReadRZ, STAMTYPE_PROFILE, "IO/ReadRZ", STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in RZ");
8050 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIOReadR3, STAMTYPE_PROFILE, "IO/ReadR3", STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in R3");
8051 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIOWriteRZ, STAMTYPE_PROFILE, "IO/WriteRZ", STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in RZ");
8052 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIOWriteR3, STAMTYPE_PROFILE, "IO/WriteR3", STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in R3");
8053 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatLateIntTimer, STAMTYPE_PROFILE, "LateInt/Timer", STAMUNIT_TICKS_PER_CALL, "Profiling late int timer");
8054 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatLateInts, STAMTYPE_COUNTER, "LateInt/Occured", STAMUNIT_OCCURENCES, "Number of late interrupts");
8055 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIntsRaised, STAMTYPE_COUNTER, "Interrupts/Raised", STAMUNIT_OCCURENCES, "Number of raised interrupts");
8056 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIntsPrevented, STAMTYPE_COUNTER, "Interrupts/Prevented", STAMUNIT_OCCURENCES, "Number of prevented interrupts");
8057 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatReceive, STAMTYPE_PROFILE, "Receive/Total", STAMUNIT_TICKS_PER_CALL, "Profiling receive");
8058 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatReceiveCRC, STAMTYPE_PROFILE, "Receive/CRC", STAMUNIT_TICKS_PER_CALL, "Profiling receive checksumming");
8059 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatReceiveFilter, STAMTYPE_PROFILE, "Receive/Filter", STAMUNIT_TICKS_PER_CALL, "Profiling receive filtering");
8060 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatReceiveStore, STAMTYPE_PROFILE, "Receive/Store", STAMUNIT_TICKS_PER_CALL, "Profiling receive storing");
8061 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatRxOverflow, STAMTYPE_PROFILE, "RxOverflow", STAMUNIT_TICKS_PER_OCCURENCE, "Profiling RX overflows");
8062 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatRxOverflowWakeupRZ, STAMTYPE_COUNTER, "RxOverflowWakeupRZ", STAMUNIT_OCCURENCES, "Nr of RX overflow wakeups in RZ");
8063 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatRxOverflowWakeupR3, STAMTYPE_COUNTER, "RxOverflowWakeupR3", STAMUNIT_OCCURENCES, "Nr of RX overflow wakeups in R3");
8064 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTransmitRZ, STAMTYPE_PROFILE, "Transmit/TotalRZ", STAMUNIT_TICKS_PER_CALL, "Profiling transmits in RZ");
8065 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTransmitR3, STAMTYPE_PROFILE, "Transmit/TotalR3", STAMUNIT_TICKS_PER_CALL, "Profiling transmits in R3");
8066 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTransmitSendRZ, STAMTYPE_PROFILE, "Transmit/SendRZ", STAMUNIT_TICKS_PER_CALL, "Profiling send transmit in RZ");
8067 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTransmitSendR3, STAMTYPE_PROFILE, "Transmit/SendR3", STAMUNIT_TICKS_PER_CALL, "Profiling send transmit in R3");
8068
8069 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTxDescCtxNormal, STAMTYPE_COUNTER, "TxDesc/ContexNormal", STAMUNIT_OCCURENCES, "Number of normal context descriptors");
8070 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTxDescCtxTSE, STAMTYPE_COUNTER, "TxDesc/ContextTSE", STAMUNIT_OCCURENCES, "Number of TSE context descriptors");
8071 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTxDescData, STAMTYPE_COUNTER, "TxDesc/Data", STAMUNIT_OCCURENCES, "Number of TX data descriptors");
8072 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTxDescLegacy, STAMTYPE_COUNTER, "TxDesc/Legacy", STAMUNIT_OCCURENCES, "Number of TX legacy descriptors");
8073 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTxDescTSEData, STAMTYPE_COUNTER, "TxDesc/TSEData", STAMUNIT_OCCURENCES, "Number of TX TSE data descriptors");
8074 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTxPathFallback, STAMTYPE_COUNTER, "TxPath/Fallback", STAMUNIT_OCCURENCES, "Fallback TSE descriptor path");
8075 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTxPathGSO, STAMTYPE_COUNTER, "TxPath/GSO", STAMUNIT_OCCURENCES, "GSO TSE descriptor path");
8076 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTxPathRegular, STAMTYPE_COUNTER, "TxPath/Normal", STAMUNIT_OCCURENCES, "Regular descriptor path");
8077 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatPHYAccesses, STAMTYPE_COUNTER, "PHYAccesses", STAMUNIT_OCCURENCES, "Number of PHY accesses");
8078 for (unsigned iReg = 0; iReg < E1K_NUM_OF_REGS; iReg++)
8079 {
8080 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->aStatRegReads[iReg], STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
8081 g_aE1kRegMap[iReg].name, "Regs/%s-Reads", g_aE1kRegMap[iReg].abbrev);
8082 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->aStatRegWrites[iReg], STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
8083 g_aE1kRegMap[iReg].name, "Regs/%s-Writes", g_aE1kRegMap[iReg].abbrev);
8084 }
8085#endif /* VBOX_WITH_STATISTICS */
8086
8087#ifdef E1K_INT_STATS
8088 PDMDevHlpSTAMRegister(pDevIns, &pThis->u64ArmedAt, STAMTYPE_U64, "u64ArmedAt", STAMUNIT_NS, NULL);
8089 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatMaxTxDelay, STAMTYPE_U64, "uStatMaxTxDelay", STAMUNIT_NS, NULL);
8090 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatInt, STAMTYPE_U32, "uStatInt", STAMUNIT_NS, NULL);
8091 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatIntTry, STAMTYPE_U32, "uStatIntTry", STAMUNIT_NS, NULL);
8092 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatIntLower, STAMTYPE_U32, "uStatIntLower", STAMUNIT_NS, NULL);
8093 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatNoIntICR, STAMTYPE_U32, "uStatNoIntICR", STAMUNIT_NS, NULL);
8094 PDMDevHlpSTAMRegister(pDevIns, &pThis->iStatIntLost, STAMTYPE_U32, "iStatIntLost", STAMUNIT_NS, NULL);
8095 PDMDevHlpSTAMRegister(pDevIns, &pThis->iStatIntLostOne, STAMTYPE_U32, "iStatIntLostOne", STAMUNIT_NS, NULL);
8096 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatIntIMS, STAMTYPE_U32, "uStatIntIMS", STAMUNIT_NS, NULL);
8097 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatIntSkip, STAMTYPE_U32, "uStatIntSkip", STAMUNIT_NS, NULL);
8098 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatIntLate, STAMTYPE_U32, "uStatIntLate", STAMUNIT_NS, NULL);
8099 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatIntMasked, STAMTYPE_U32, "uStatIntMasked", STAMUNIT_NS, NULL);
8100 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatIntEarly, STAMTYPE_U32, "uStatIntEarly", STAMUNIT_NS, NULL);
8101 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatIntRx, STAMTYPE_U32, "uStatIntRx", STAMUNIT_NS, NULL);
8102 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatIntTx, STAMTYPE_U32, "uStatIntTx", STAMUNIT_NS, NULL);
8103 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatIntICS, STAMTYPE_U32, "uStatIntICS", STAMUNIT_NS, NULL);
8104 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatIntRDTR, STAMTYPE_U32, "uStatIntRDTR", STAMUNIT_NS, NULL);
8105 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatIntRXDMT0, STAMTYPE_U32, "uStatIntRXDMT0", STAMUNIT_NS, NULL);
8106 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatIntTXQE, STAMTYPE_U32, "uStatIntTXQE", STAMUNIT_NS, NULL);
8107 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTxNoRS, STAMTYPE_U32, "uStatTxNoRS", STAMUNIT_NS, NULL);
8108 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTxIDE, STAMTYPE_U32, "uStatTxIDE", STAMUNIT_NS, NULL);
8109 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTxDelayed, STAMTYPE_U32, "uStatTxDelayed", STAMUNIT_NS, NULL);
8110 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTxDelayExp, STAMTYPE_U32, "uStatTxDelayExp", STAMUNIT_NS, NULL);
8111 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTAD, STAMTYPE_U32, "uStatTAD", STAMUNIT_NS, NULL);
8112 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTID, STAMTYPE_U32, "uStatTID", STAMUNIT_NS, NULL);
8113 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatRAD, STAMTYPE_U32, "uStatRAD", STAMUNIT_NS, NULL);
8114 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatRID, STAMTYPE_U32, "uStatRID", STAMUNIT_NS, NULL);
8115 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatRxFrm, STAMTYPE_U32, "uStatRxFrm", STAMUNIT_NS, NULL);
8116 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTxFrm, STAMTYPE_U32, "uStatTxFrm", STAMUNIT_NS, NULL);
8117 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatDescCtx, STAMTYPE_U32, "uStatDescCtx", STAMUNIT_NS, NULL);
8118 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatDescDat, STAMTYPE_U32, "uStatDescDat", STAMUNIT_NS, NULL);
8119 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatDescLeg, STAMTYPE_U32, "uStatDescLeg", STAMUNIT_NS, NULL);
8120 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTx1514, STAMTYPE_U32, "uStatTx1514", STAMUNIT_NS, NULL);
8121 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTx2962, STAMTYPE_U32, "uStatTx2962", STAMUNIT_NS, NULL);
8122 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTx4410, STAMTYPE_U32, "uStatTx4410", STAMUNIT_NS, NULL);
8123 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTx5858, STAMTYPE_U32, "uStatTx5858", STAMUNIT_NS, NULL);
8124 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTx7306, STAMTYPE_U32, "uStatTx7306", STAMUNIT_NS, NULL);
8125 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTx8754, STAMTYPE_U32, "uStatTx8754", STAMUNIT_NS, NULL);
8126 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTx16384, STAMTYPE_U32, "uStatTx16384", STAMUNIT_NS, NULL);
8127 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTx32768, STAMTYPE_U32, "uStatTx32768", STAMUNIT_NS, NULL);
8128 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTxLarge, STAMTYPE_U32, "uStatTxLarge", STAMUNIT_NS, NULL);
8129#endif /* E1K_INT_STATS */
8130
8131 return VINF_SUCCESS;
8132}
8133
8134#else /* !IN_RING3 */
8135
8136/**
8137 * @callback_method_impl{PDMDEVREGR0,pfnConstruct}
8138 */
8139static DECLCALLBACK(int) e1kRZConstruct(PPDMDEVINS pDevIns)
8140{
8141 PDMDEV_CHECK_VERSIONS_RETURN(pDevIns);
8142 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
8143 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
8144
8145 /* Initialize context specific state data: */
8146 pThisCC->CTX_SUFF(pDevIns) = pDevIns;
8147 /** @todo @bugref{9218} ring-0 driver stuff */
8148 pThisCC->CTX_SUFF(pDrv) = NULL;
8149 pThisCC->CTX_SUFF(pTxSg) = NULL;
8150
8151 /* Configure critical sections the same way: */
8152 int rc = PDMDevHlpSetDeviceCritSect(pDevIns, PDMDevHlpCritSectGetNop(pDevIns));
8153 AssertRCReturn(rc, rc);
8154
8155 /* Set up MMIO and I/O port callbacks for this context: */
8156 rc = PDMDevHlpMmioSetUpContext(pDevIns, pThis->hMmioRegion, e1kMMIOWrite, e1kMMIORead, NULL /*pvUser*/);
8157 AssertRCReturn(rc, rc);
8158
8159 rc = PDMDevHlpIoPortSetUpContext(pDevIns, pThis->hIoPorts, e1kIOPortOut, e1kIOPortIn, NULL /*pvUser*/);
8160 AssertRCReturn(rc, rc);
8161
8162 return VINF_SUCCESS;
8163}
8164
8165#endif /* !IN_RING3 */
8166
8167/**
8168 * The device registration structure.
8169 */
8170const PDMDEVREG g_DeviceE1000 =
8171{
8172 /* .u32version = */ PDM_DEVREG_VERSION,
8173 /* .uReserved0 = */ 0,
8174 /* .szName = */ "e1000",
8175 /* .fFlags = */ PDM_DEVREG_FLAGS_DEFAULT_BITS | PDM_DEVREG_FLAGS_RZ | PDM_DEVREG_FLAGS_NEW_STYLE,
8176 /* .fClass = */ PDM_DEVREG_CLASS_NETWORK,
8177 /* .cMaxInstances = */ ~0U,
8178 /* .uSharedVersion = */ 42,
8179 /* .cbInstanceShared = */ sizeof(E1KSTATE),
8180 /* .cbInstanceCC = */ sizeof(E1KSTATECC),
8181 /* .cbInstanceRC = */ sizeof(E1KSTATERC),
8182 /* .cMaxPciDevices = */ 1,
8183 /* .cMaxMsixVectors = */ 0,
8184 /* .pszDescription = */ "Intel PRO/1000 MT Desktop Ethernet.",
8185#if defined(IN_RING3)
8186 /* .pszRCMod = */ "VBoxDDRC.rc",
8187 /* .pszR0Mod = */ "VBoxDDR0.r0",
8188 /* .pfnConstruct = */ e1kR3Construct,
8189 /* .pfnDestruct = */ e1kR3Destruct,
8190 /* .pfnRelocate = */ e1kR3Relocate,
8191 /* .pfnMemSetup = */ NULL,
8192 /* .pfnPowerOn = */ NULL,
8193 /* .pfnReset = */ e1kR3Reset,
8194 /* .pfnSuspend = */ e1kR3Suspend,
8195 /* .pfnResume = */ NULL,
8196 /* .pfnAttach = */ e1kR3Attach,
8197 /* .pfnDeatch = */ e1kR3Detach,
8198 /* .pfnQueryInterface = */ NULL,
8199 /* .pfnInitComplete = */ NULL,
8200 /* .pfnPowerOff = */ e1kR3PowerOff,
8201 /* .pfnSoftReset = */ NULL,
8202 /* .pfnReserved0 = */ NULL,
8203 /* .pfnReserved1 = */ NULL,
8204 /* .pfnReserved2 = */ NULL,
8205 /* .pfnReserved3 = */ NULL,
8206 /* .pfnReserved4 = */ NULL,
8207 /* .pfnReserved5 = */ NULL,
8208 /* .pfnReserved6 = */ NULL,
8209 /* .pfnReserved7 = */ NULL,
8210#elif defined(IN_RING0)
8211 /* .pfnEarlyConstruct = */ NULL,
8212 /* .pfnConstruct = */ e1kRZConstruct,
8213 /* .pfnDestruct = */ NULL,
8214 /* .pfnFinalDestruct = */ NULL,
8215 /* .pfnRequest = */ NULL,
8216 /* .pfnReserved0 = */ NULL,
8217 /* .pfnReserved1 = */ NULL,
8218 /* .pfnReserved2 = */ NULL,
8219 /* .pfnReserved3 = */ NULL,
8220 /* .pfnReserved4 = */ NULL,
8221 /* .pfnReserved5 = */ NULL,
8222 /* .pfnReserved6 = */ NULL,
8223 /* .pfnReserved7 = */ NULL,
8224#elif defined(IN_RC)
8225 /* .pfnConstruct = */ e1kRZConstruct,
8226 /* .pfnReserved0 = */ NULL,
8227 /* .pfnReserved1 = */ NULL,
8228 /* .pfnReserved2 = */ NULL,
8229 /* .pfnReserved3 = */ NULL,
8230 /* .pfnReserved4 = */ NULL,
8231 /* .pfnReserved5 = */ NULL,
8232 /* .pfnReserved6 = */ NULL,
8233 /* .pfnReserved7 = */ NULL,
8234#else
8235# error "Not in IN_RING3, IN_RING0 or IN_RC!"
8236#endif
8237 /* .u32VersionEnd = */ PDM_DEVREG_VERSION
8238};
8239
8240#endif /* !VBOX_DEVICE_STRUCT_TESTCASE */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette