VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/DevE1000.cpp@ 81410

Last change on this file since 81410 was 81410, checked in by vboxsync, 5 years ago

DevE1000: split up the state structure, converted timers to handles and queues to tasks. bugref:9218

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 330.9 KB
Line 
1/* $Id: DevE1000.cpp 81410 2019-10-21 13:12:17Z vboxsync $ */
2/** @file
3 * DevE1000 - Intel 82540EM Ethernet Controller Emulation.
4 *
5 * Implemented in accordance with the specification:
6 *
7 * PCI/PCI-X Family of Gigabit Ethernet Controllers Software Developer's Manual
8 * 82540EP/EM, 82541xx, 82544GC/EI, 82545GM/EM, 82546GB/EB, and 82547xx
9 *
10 * 317453-002 Revision 3.5
11 *
12 * @todo IPv6 checksum offloading support
13 * @todo Flexible Filter / Wakeup (optional?)
14 */
15
16/*
17 * Copyright (C) 2007-2019 Oracle Corporation
18 *
19 * This file is part of VirtualBox Open Source Edition (OSE), as
20 * available from http://www.virtualbox.org. This file is free software;
21 * you can redistribute it and/or modify it under the terms of the GNU
22 * General Public License (GPL) as published by the Free Software
23 * Foundation, in version 2 as it comes in the "COPYING" file of the
24 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
25 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_DEV_E1000
33#include <iprt/crc.h>
34#include <iprt/ctype.h>
35#include <iprt/net.h>
36#include <iprt/semaphore.h>
37#include <iprt/string.h>
38#include <iprt/time.h>
39#include <iprt/uuid.h>
40#include <VBox/vmm/pdmdev.h>
41#include <VBox/vmm/pdmnetifs.h>
42#include <VBox/vmm/pdmnetinline.h>
43#include <VBox/param.h>
44#include "VBoxDD.h"
45
46#include "DevEEPROM.h"
47#include "DevE1000Phy.h"
48
49
50/*********************************************************************************************************************************
51* Defined Constants And Macros *
52*********************************************************************************************************************************/
53/** @name E1000 Build Options
54 * @{ */
55/** @def E1K_INIT_RA0
56 * E1K_INIT_RA0 forces E1000 to set the first entry in Receive Address filter
57 * table to MAC address obtained from CFGM. Most guests read MAC address from
58 * EEPROM and write it to RA[0] explicitly, but Mac OS X seems to depend on it
59 * being already set (see @bugref{4657}).
60 */
61#define E1K_INIT_RA0
62/** @def E1K_LSC_ON_RESET
63 * E1K_LSC_ON_RESET causes e1000 to generate Link Status Change
64 * interrupt after hard reset. This makes the E1K_LSC_ON_SLU option unnecessary.
65 * With unplugged cable, LSC is triggerred for 82543GC only.
66 */
67#define E1K_LSC_ON_RESET
68/** @def E1K_LSC_ON_SLU
69 * E1K_LSC_ON_SLU causes E1000 to generate Link Status Change interrupt when
70 * the guest driver brings up the link via STATUS.LU bit. Again the only guest
71 * that requires it is Mac OS X (see @bugref{4657}).
72 */
73//#define E1K_LSC_ON_SLU
74/** @def E1K_INIT_LINKUP_DELAY
75 * E1K_INIT_LINKUP_DELAY prevents the link going up while the driver is still
76 * in init (see @bugref{8624}).
77 */
78#define E1K_INIT_LINKUP_DELAY_US (2000 * 1000)
79/** @def E1K_IMS_INT_DELAY_NS
80 * E1K_IMS_INT_DELAY_NS prevents interrupt storms in Windows guests on enabling
81 * interrupts (see @bugref{8624}).
82 */
83#define E1K_IMS_INT_DELAY_NS 100
84/** @def E1K_TX_DELAY
85 * E1K_TX_DELAY aims to improve guest-host transfer rate for TCP streams by
86 * preventing packets to be sent immediately. It allows to send several
87 * packets in a batch reducing the number of acknowledgments. Note that it
88 * effectively disables R0 TX path, forcing sending in R3.
89 */
90//#define E1K_TX_DELAY 150
91/** @def E1K_USE_TX_TIMERS
92 * E1K_USE_TX_TIMERS aims to reduce the number of generated TX interrupts if a
93 * guest driver set the delays via the Transmit Interrupt Delay Value (TIDV)
94 * register. Enabling it showed no positive effects on existing guests so it
95 * stays disabled. See sections 3.2.7.1 and 3.4.3.1 in "8254x Family of Gigabit
96 * Ethernet Controllers Software Developer’s Manual" for more detailed
97 * explanation.
98 */
99//#define E1K_USE_TX_TIMERS
100/** @def E1K_NO_TAD
101 * E1K_NO_TAD disables one of two timers enabled by E1K_USE_TX_TIMERS, the
102 * Transmit Absolute Delay time. This timer sets the maximum time interval
103 * during which TX interrupts can be postponed (delayed). It has no effect
104 * if E1K_USE_TX_TIMERS is not defined.
105 */
106//#define E1K_NO_TAD
107/** @def E1K_REL_DEBUG
108 * E1K_REL_DEBUG enables debug logging of l1, l2, l3 in release build.
109 */
110//#define E1K_REL_DEBUG
111/** @def E1K_INT_STATS
112 * E1K_INT_STATS enables collection of internal statistics used for
113 * debugging of delayed interrupts, etc.
114 */
115#define E1K_INT_STATS
116/** @def E1K_WITH_MSI
117 * E1K_WITH_MSI enables rudimentary MSI support. Not implemented.
118 */
119//#define E1K_WITH_MSI
120/** @def E1K_WITH_TX_CS
121 * E1K_WITH_TX_CS protects e1kXmitPending with a critical section.
122 */
123#define E1K_WITH_TX_CS
124/** @def E1K_WITH_TXD_CACHE
125 * E1K_WITH_TXD_CACHE causes E1000 to fetch multiple TX descriptors in a
126 * single physical memory read (or two if it wraps around the end of TX
127 * descriptor ring). It is required for proper functioning of bandwidth
128 * resource control as it allows to compute exact sizes of packets prior
129 * to allocating their buffers (see @bugref{5582}).
130 */
131#define E1K_WITH_TXD_CACHE
132/** @def E1K_WITH_RXD_CACHE
133 * E1K_WITH_RXD_CACHE causes E1000 to fetch multiple RX descriptors in a
134 * single physical memory read (or two if it wraps around the end of RX
135 * descriptor ring). Intel's packet driver for DOS needs this option in
136 * order to work properly (see @bugref{6217}).
137 */
138#define E1K_WITH_RXD_CACHE
139/** @def E1K_WITH_PREREG_MMIO
140 * E1K_WITH_PREREG_MMIO enables a new style MMIO registration and is
141 * currently only done for testing the relateted PDM, IOM and PGM code. */
142//#define E1K_WITH_PREREG_MMIO
143/* @} */
144/* End of Options ************************************************************/
145
146#ifdef E1K_WITH_TXD_CACHE
147/**
148 * E1K_TXD_CACHE_SIZE specifies the maximum number of TX descriptors stored
149 * in the state structure. It limits the amount of descriptors loaded in one
150 * batch read. For example, Linux guest may use up to 20 descriptors per
151 * TSE packet. The largest TSE packet seen (Windows guest) was 45 descriptors.
152 */
153# define E1K_TXD_CACHE_SIZE 64u
154#endif /* E1K_WITH_TXD_CACHE */
155
156#ifdef E1K_WITH_RXD_CACHE
157/**
158 * E1K_RXD_CACHE_SIZE specifies the maximum number of RX descriptors stored
159 * in the state structure. It limits the amount of descriptors loaded in one
160 * batch read. For example, XP guest adds 15 RX descriptors at a time.
161 */
162# define E1K_RXD_CACHE_SIZE 16u
163#endif /* E1K_WITH_RXD_CACHE */
164
165
166/* Little helpers ************************************************************/
167#undef htons
168#undef ntohs
169#undef htonl
170#undef ntohl
171#define htons(x) ((((x) & 0xff00) >> 8) | (((x) & 0x00ff) << 8))
172#define ntohs(x) htons(x)
173#define htonl(x) ASMByteSwapU32(x)
174#define ntohl(x) htonl(x)
175
176#ifndef DEBUG
177# ifdef E1K_REL_DEBUG
178# define DEBUG
179# define E1kLog(a) LogRel(a)
180# define E1kLog2(a) LogRel(a)
181# define E1kLog3(a) LogRel(a)
182# define E1kLogX(x, a) LogRel(a)
183//# define E1kLog3(a) do {} while (0)
184# else
185# define E1kLog(a) do {} while (0)
186# define E1kLog2(a) do {} while (0)
187# define E1kLog3(a) do {} while (0)
188# define E1kLogX(x, a) do {} while (0)
189# endif
190#else
191# define E1kLog(a) Log(a)
192# define E1kLog2(a) Log2(a)
193# define E1kLog3(a) Log3(a)
194# define E1kLogX(x, a) LogIt(x, LOG_GROUP, a)
195//# define E1kLog(a) do {} while (0)
196//# define E1kLog2(a) do {} while (0)
197//# define E1kLog3(a) do {} while (0)
198#endif
199
200#if 0
201# define LOG_ENABLED
202# define E1kLogRel(a) LogRel(a)
203# undef Log6
204# define Log6(a) LogRel(a)
205#else
206# define E1kLogRel(a) do { } while (0)
207#endif
208
209//#undef DEBUG
210
211#define E1K_RELOCATE(p, o) *(RTHCUINTPTR *)&p += o
212
213#define E1K_INC_CNT32(cnt) \
214do { \
215 if (cnt < UINT32_MAX) \
216 cnt++; \
217} while (0)
218
219#define E1K_ADD_CNT64(cntLo, cntHi, val) \
220do { \
221 uint64_t u64Cnt = RT_MAKE_U64(cntLo, cntHi); \
222 uint64_t tmp = u64Cnt; \
223 u64Cnt += val; \
224 if (tmp > u64Cnt ) \
225 u64Cnt = UINT64_MAX; \
226 cntLo = (uint32_t)u64Cnt; \
227 cntHi = (uint32_t)(u64Cnt >> 32); \
228} while (0)
229
230#ifdef E1K_INT_STATS
231# define E1K_INC_ISTAT_CNT(cnt) do { ++cnt; } while (0)
232#else /* E1K_INT_STATS */
233# define E1K_INC_ISTAT_CNT(cnt) do { } while (0)
234#endif /* E1K_INT_STATS */
235
236
237/*****************************************************************************/
238
239typedef uint32_t E1KCHIP;
240#define E1K_CHIP_82540EM 0
241#define E1K_CHIP_82543GC 1
242#define E1K_CHIP_82545EM 2
243
244#ifdef IN_RING3
245/** Different E1000 chips. */
246static const struct E1kChips
247{
248 uint16_t uPCIVendorId;
249 uint16_t uPCIDeviceId;
250 uint16_t uPCISubsystemVendorId;
251 uint16_t uPCISubsystemId;
252 const char *pcszName;
253} g_aChips[] =
254{
255 /* Vendor Device SSVendor SubSys Name */
256 { 0x8086,
257 /* Temporary code, as MSI-aware driver dislike 0x100E. How to do that right? */
258# ifdef E1K_WITH_MSI
259 0x105E,
260# else
261 0x100E,
262# endif
263 0x8086, 0x001E, "82540EM" }, /* Intel 82540EM-A in Intel PRO/1000 MT Desktop */
264 { 0x8086, 0x1004, 0x8086, 0x1004, "82543GC" }, /* Intel 82543GC in Intel PRO/1000 T Server */
265 { 0x8086, 0x100F, 0x15AD, 0x0750, "82545EM" } /* Intel 82545EM-A in VMWare Network Adapter */
266};
267#endif /* IN_RING3 */
268
269
270/* The size of register area mapped to I/O space */
271#define E1K_IOPORT_SIZE 0x8
272/* The size of memory-mapped register area */
273#define E1K_MM_SIZE 0x20000
274
275#define E1K_MAX_TX_PKT_SIZE 16288
276#define E1K_MAX_RX_PKT_SIZE 16384
277
278/*****************************************************************************/
279
280/** Gets the specfieid bits from the register. */
281#define GET_BITS(reg, bits) ((reg & reg##_##bits##_MASK) >> reg##_##bits##_SHIFT)
282#define GET_BITS_V(val, reg, bits) ((val & reg##_##bits##_MASK) >> reg##_##bits##_SHIFT)
283#define BITS(reg, bits, bitval) (bitval << reg##_##bits##_SHIFT)
284#define SET_BITS(reg, bits, bitval) do { reg = (reg & ~reg##_##bits##_MASK) | (bitval << reg##_##bits##_SHIFT); } while (0)
285#define SET_BITS_V(val, reg, bits, bitval) do { val = (val & ~reg##_##bits##_MASK) | (bitval << reg##_##bits##_SHIFT); } while (0)
286
287#define CTRL_SLU UINT32_C(0x00000040)
288#define CTRL_MDIO UINT32_C(0x00100000)
289#define CTRL_MDC UINT32_C(0x00200000)
290#define CTRL_MDIO_DIR UINT32_C(0x01000000)
291#define CTRL_MDC_DIR UINT32_C(0x02000000)
292#define CTRL_RESET UINT32_C(0x04000000)
293#define CTRL_VME UINT32_C(0x40000000)
294
295#define STATUS_LU UINT32_C(0x00000002)
296#define STATUS_TXOFF UINT32_C(0x00000010)
297
298#define EECD_EE_WIRES UINT32_C(0x0F)
299#define EECD_EE_REQ UINT32_C(0x40)
300#define EECD_EE_GNT UINT32_C(0x80)
301
302#define EERD_START UINT32_C(0x00000001)
303#define EERD_DONE UINT32_C(0x00000010)
304#define EERD_DATA_MASK UINT32_C(0xFFFF0000)
305#define EERD_DATA_SHIFT 16
306#define EERD_ADDR_MASK UINT32_C(0x0000FF00)
307#define EERD_ADDR_SHIFT 8
308
309#define MDIC_DATA_MASK UINT32_C(0x0000FFFF)
310#define MDIC_DATA_SHIFT 0
311#define MDIC_REG_MASK UINT32_C(0x001F0000)
312#define MDIC_REG_SHIFT 16
313#define MDIC_PHY_MASK UINT32_C(0x03E00000)
314#define MDIC_PHY_SHIFT 21
315#define MDIC_OP_WRITE UINT32_C(0x04000000)
316#define MDIC_OP_READ UINT32_C(0x08000000)
317#define MDIC_READY UINT32_C(0x10000000)
318#define MDIC_INT_EN UINT32_C(0x20000000)
319#define MDIC_ERROR UINT32_C(0x40000000)
320
321#define TCTL_EN UINT32_C(0x00000002)
322#define TCTL_PSP UINT32_C(0x00000008)
323
324#define RCTL_EN UINT32_C(0x00000002)
325#define RCTL_UPE UINT32_C(0x00000008)
326#define RCTL_MPE UINT32_C(0x00000010)
327#define RCTL_LPE UINT32_C(0x00000020)
328#define RCTL_LBM_MASK UINT32_C(0x000000C0)
329#define RCTL_LBM_SHIFT 6
330#define RCTL_RDMTS_MASK UINT32_C(0x00000300)
331#define RCTL_RDMTS_SHIFT 8
332#define RCTL_LBM_TCVR UINT32_C(3) /**< PHY or external SerDes loopback. */
333#define RCTL_MO_MASK UINT32_C(0x00003000)
334#define RCTL_MO_SHIFT 12
335#define RCTL_BAM UINT32_C(0x00008000)
336#define RCTL_BSIZE_MASK UINT32_C(0x00030000)
337#define RCTL_BSIZE_SHIFT 16
338#define RCTL_VFE UINT32_C(0x00040000)
339#define RCTL_CFIEN UINT32_C(0x00080000)
340#define RCTL_CFI UINT32_C(0x00100000)
341#define RCTL_BSEX UINT32_C(0x02000000)
342#define RCTL_SECRC UINT32_C(0x04000000)
343
344#define ICR_TXDW UINT32_C(0x00000001)
345#define ICR_TXQE UINT32_C(0x00000002)
346#define ICR_LSC UINT32_C(0x00000004)
347#define ICR_RXDMT0 UINT32_C(0x00000010)
348#define ICR_RXT0 UINT32_C(0x00000080)
349#define ICR_TXD_LOW UINT32_C(0x00008000)
350#define RDTR_FPD UINT32_C(0x80000000)
351
352#define PBA_st ((PBAST*)(pThis->auRegs + PBA_IDX))
353typedef struct
354{
355 unsigned rxa : 7;
356 unsigned rxa_r : 9;
357 unsigned txa : 16;
358} PBAST;
359AssertCompileSize(PBAST, 4);
360
361#define TXDCTL_WTHRESH_MASK 0x003F0000
362#define TXDCTL_WTHRESH_SHIFT 16
363#define TXDCTL_LWTHRESH_MASK 0xFE000000
364#define TXDCTL_LWTHRESH_SHIFT 25
365
366#define RXCSUM_PCSS_MASK UINT32_C(0x000000FF)
367#define RXCSUM_PCSS_SHIFT 0
368
369/** @name Register access macros
370 * @remarks These ASSUME alocal variable @a pThis of type PE1KSTATE.
371 * @{ */
372#define CTRL pThis->auRegs[CTRL_IDX]
373#define STATUS pThis->auRegs[STATUS_IDX]
374#define EECD pThis->auRegs[EECD_IDX]
375#define EERD pThis->auRegs[EERD_IDX]
376#define CTRL_EXT pThis->auRegs[CTRL_EXT_IDX]
377#define FLA pThis->auRegs[FLA_IDX]
378#define MDIC pThis->auRegs[MDIC_IDX]
379#define FCAL pThis->auRegs[FCAL_IDX]
380#define FCAH pThis->auRegs[FCAH_IDX]
381#define FCT pThis->auRegs[FCT_IDX]
382#define VET pThis->auRegs[VET_IDX]
383#define ICR pThis->auRegs[ICR_IDX]
384#define ITR pThis->auRegs[ITR_IDX]
385#define ICS pThis->auRegs[ICS_IDX]
386#define IMS pThis->auRegs[IMS_IDX]
387#define IMC pThis->auRegs[IMC_IDX]
388#define RCTL pThis->auRegs[RCTL_IDX]
389#define FCTTV pThis->auRegs[FCTTV_IDX]
390#define TXCW pThis->auRegs[TXCW_IDX]
391#define RXCW pThis->auRegs[RXCW_IDX]
392#define TCTL pThis->auRegs[TCTL_IDX]
393#define TIPG pThis->auRegs[TIPG_IDX]
394#define AIFS pThis->auRegs[AIFS_IDX]
395#define LEDCTL pThis->auRegs[LEDCTL_IDX]
396#define PBA pThis->auRegs[PBA_IDX]
397#define FCRTL pThis->auRegs[FCRTL_IDX]
398#define FCRTH pThis->auRegs[FCRTH_IDX]
399#define RDFH pThis->auRegs[RDFH_IDX]
400#define RDFT pThis->auRegs[RDFT_IDX]
401#define RDFHS pThis->auRegs[RDFHS_IDX]
402#define RDFTS pThis->auRegs[RDFTS_IDX]
403#define RDFPC pThis->auRegs[RDFPC_IDX]
404#define RDBAL pThis->auRegs[RDBAL_IDX]
405#define RDBAH pThis->auRegs[RDBAH_IDX]
406#define RDLEN pThis->auRegs[RDLEN_IDX]
407#define RDH pThis->auRegs[RDH_IDX]
408#define RDT pThis->auRegs[RDT_IDX]
409#define RDTR pThis->auRegs[RDTR_IDX]
410#define RXDCTL pThis->auRegs[RXDCTL_IDX]
411#define RADV pThis->auRegs[RADV_IDX]
412#define RSRPD pThis->auRegs[RSRPD_IDX]
413#define TXDMAC pThis->auRegs[TXDMAC_IDX]
414#define TDFH pThis->auRegs[TDFH_IDX]
415#define TDFT pThis->auRegs[TDFT_IDX]
416#define TDFHS pThis->auRegs[TDFHS_IDX]
417#define TDFTS pThis->auRegs[TDFTS_IDX]
418#define TDFPC pThis->auRegs[TDFPC_IDX]
419#define TDBAL pThis->auRegs[TDBAL_IDX]
420#define TDBAH pThis->auRegs[TDBAH_IDX]
421#define TDLEN pThis->auRegs[TDLEN_IDX]
422#define TDH pThis->auRegs[TDH_IDX]
423#define TDT pThis->auRegs[TDT_IDX]
424#define TIDV pThis->auRegs[TIDV_IDX]
425#define TXDCTL pThis->auRegs[TXDCTL_IDX]
426#define TADV pThis->auRegs[TADV_IDX]
427#define TSPMT pThis->auRegs[TSPMT_IDX]
428#define CRCERRS pThis->auRegs[CRCERRS_IDX]
429#define ALGNERRC pThis->auRegs[ALGNERRC_IDX]
430#define SYMERRS pThis->auRegs[SYMERRS_IDX]
431#define RXERRC pThis->auRegs[RXERRC_IDX]
432#define MPC pThis->auRegs[MPC_IDX]
433#define SCC pThis->auRegs[SCC_IDX]
434#define ECOL pThis->auRegs[ECOL_IDX]
435#define MCC pThis->auRegs[MCC_IDX]
436#define LATECOL pThis->auRegs[LATECOL_IDX]
437#define COLC pThis->auRegs[COLC_IDX]
438#define DC pThis->auRegs[DC_IDX]
439#define TNCRS pThis->auRegs[TNCRS_IDX]
440/* #define SEC pThis->auRegs[SEC_IDX] Conflict with sys/time.h */
441#define CEXTERR pThis->auRegs[CEXTERR_IDX]
442#define RLEC pThis->auRegs[RLEC_IDX]
443#define XONRXC pThis->auRegs[XONRXC_IDX]
444#define XONTXC pThis->auRegs[XONTXC_IDX]
445#define XOFFRXC pThis->auRegs[XOFFRXC_IDX]
446#define XOFFTXC pThis->auRegs[XOFFTXC_IDX]
447#define FCRUC pThis->auRegs[FCRUC_IDX]
448#define PRC64 pThis->auRegs[PRC64_IDX]
449#define PRC127 pThis->auRegs[PRC127_IDX]
450#define PRC255 pThis->auRegs[PRC255_IDX]
451#define PRC511 pThis->auRegs[PRC511_IDX]
452#define PRC1023 pThis->auRegs[PRC1023_IDX]
453#define PRC1522 pThis->auRegs[PRC1522_IDX]
454#define GPRC pThis->auRegs[GPRC_IDX]
455#define BPRC pThis->auRegs[BPRC_IDX]
456#define MPRC pThis->auRegs[MPRC_IDX]
457#define GPTC pThis->auRegs[GPTC_IDX]
458#define GORCL pThis->auRegs[GORCL_IDX]
459#define GORCH pThis->auRegs[GORCH_IDX]
460#define GOTCL pThis->auRegs[GOTCL_IDX]
461#define GOTCH pThis->auRegs[GOTCH_IDX]
462#define RNBC pThis->auRegs[RNBC_IDX]
463#define RUC pThis->auRegs[RUC_IDX]
464#define RFC pThis->auRegs[RFC_IDX]
465#define ROC pThis->auRegs[ROC_IDX]
466#define RJC pThis->auRegs[RJC_IDX]
467#define MGTPRC pThis->auRegs[MGTPRC_IDX]
468#define MGTPDC pThis->auRegs[MGTPDC_IDX]
469#define MGTPTC pThis->auRegs[MGTPTC_IDX]
470#define TORL pThis->auRegs[TORL_IDX]
471#define TORH pThis->auRegs[TORH_IDX]
472#define TOTL pThis->auRegs[TOTL_IDX]
473#define TOTH pThis->auRegs[TOTH_IDX]
474#define TPR pThis->auRegs[TPR_IDX]
475#define TPT pThis->auRegs[TPT_IDX]
476#define PTC64 pThis->auRegs[PTC64_IDX]
477#define PTC127 pThis->auRegs[PTC127_IDX]
478#define PTC255 pThis->auRegs[PTC255_IDX]
479#define PTC511 pThis->auRegs[PTC511_IDX]
480#define PTC1023 pThis->auRegs[PTC1023_IDX]
481#define PTC1522 pThis->auRegs[PTC1522_IDX]
482#define MPTC pThis->auRegs[MPTC_IDX]
483#define BPTC pThis->auRegs[BPTC_IDX]
484#define TSCTC pThis->auRegs[TSCTC_IDX]
485#define TSCTFC pThis->auRegs[TSCTFC_IDX]
486#define RXCSUM pThis->auRegs[RXCSUM_IDX]
487#define WUC pThis->auRegs[WUC_IDX]
488#define WUFC pThis->auRegs[WUFC_IDX]
489#define WUS pThis->auRegs[WUS_IDX]
490#define MANC pThis->auRegs[MANC_IDX]
491#define IPAV pThis->auRegs[IPAV_IDX]
492#define WUPL pThis->auRegs[WUPL_IDX]
493/** @} */
494
495/**
496 * Indices of memory-mapped registers in register table.
497 */
498typedef enum
499{
500 CTRL_IDX,
501 STATUS_IDX,
502 EECD_IDX,
503 EERD_IDX,
504 CTRL_EXT_IDX,
505 FLA_IDX,
506 MDIC_IDX,
507 FCAL_IDX,
508 FCAH_IDX,
509 FCT_IDX,
510 VET_IDX,
511 ICR_IDX,
512 ITR_IDX,
513 ICS_IDX,
514 IMS_IDX,
515 IMC_IDX,
516 RCTL_IDX,
517 FCTTV_IDX,
518 TXCW_IDX,
519 RXCW_IDX,
520 TCTL_IDX,
521 TIPG_IDX,
522 AIFS_IDX,
523 LEDCTL_IDX,
524 PBA_IDX,
525 FCRTL_IDX,
526 FCRTH_IDX,
527 RDFH_IDX,
528 RDFT_IDX,
529 RDFHS_IDX,
530 RDFTS_IDX,
531 RDFPC_IDX,
532 RDBAL_IDX,
533 RDBAH_IDX,
534 RDLEN_IDX,
535 RDH_IDX,
536 RDT_IDX,
537 RDTR_IDX,
538 RXDCTL_IDX,
539 RADV_IDX,
540 RSRPD_IDX,
541 TXDMAC_IDX,
542 TDFH_IDX,
543 TDFT_IDX,
544 TDFHS_IDX,
545 TDFTS_IDX,
546 TDFPC_IDX,
547 TDBAL_IDX,
548 TDBAH_IDX,
549 TDLEN_IDX,
550 TDH_IDX,
551 TDT_IDX,
552 TIDV_IDX,
553 TXDCTL_IDX,
554 TADV_IDX,
555 TSPMT_IDX,
556 CRCERRS_IDX,
557 ALGNERRC_IDX,
558 SYMERRS_IDX,
559 RXERRC_IDX,
560 MPC_IDX,
561 SCC_IDX,
562 ECOL_IDX,
563 MCC_IDX,
564 LATECOL_IDX,
565 COLC_IDX,
566 DC_IDX,
567 TNCRS_IDX,
568 SEC_IDX,
569 CEXTERR_IDX,
570 RLEC_IDX,
571 XONRXC_IDX,
572 XONTXC_IDX,
573 XOFFRXC_IDX,
574 XOFFTXC_IDX,
575 FCRUC_IDX,
576 PRC64_IDX,
577 PRC127_IDX,
578 PRC255_IDX,
579 PRC511_IDX,
580 PRC1023_IDX,
581 PRC1522_IDX,
582 GPRC_IDX,
583 BPRC_IDX,
584 MPRC_IDX,
585 GPTC_IDX,
586 GORCL_IDX,
587 GORCH_IDX,
588 GOTCL_IDX,
589 GOTCH_IDX,
590 RNBC_IDX,
591 RUC_IDX,
592 RFC_IDX,
593 ROC_IDX,
594 RJC_IDX,
595 MGTPRC_IDX,
596 MGTPDC_IDX,
597 MGTPTC_IDX,
598 TORL_IDX,
599 TORH_IDX,
600 TOTL_IDX,
601 TOTH_IDX,
602 TPR_IDX,
603 TPT_IDX,
604 PTC64_IDX,
605 PTC127_IDX,
606 PTC255_IDX,
607 PTC511_IDX,
608 PTC1023_IDX,
609 PTC1522_IDX,
610 MPTC_IDX,
611 BPTC_IDX,
612 TSCTC_IDX,
613 TSCTFC_IDX,
614 RXCSUM_IDX,
615 WUC_IDX,
616 WUFC_IDX,
617 WUS_IDX,
618 MANC_IDX,
619 IPAV_IDX,
620 WUPL_IDX,
621 MTA_IDX,
622 RA_IDX,
623 VFTA_IDX,
624 IP4AT_IDX,
625 IP6AT_IDX,
626 WUPM_IDX,
627 FFLT_IDX,
628 FFMT_IDX,
629 FFVT_IDX,
630 PBM_IDX,
631 RA_82542_IDX,
632 MTA_82542_IDX,
633 VFTA_82542_IDX,
634 E1K_NUM_OF_REGS
635} E1kRegIndex;
636
637#define E1K_NUM_OF_32BIT_REGS MTA_IDX
638/** The number of registers with strictly increasing offset. */
639#define E1K_NUM_OF_BINARY_SEARCHABLE (WUPL_IDX + 1)
640
641
642/**
643 * Define E1000-specific EEPROM layout.
644 */
645struct E1kEEPROM
646{
647 public:
648 EEPROM93C46 eeprom;
649
650#ifdef IN_RING3
651 /**
652 * Initialize EEPROM content.
653 *
654 * @param macAddr MAC address of E1000.
655 */
656 void init(RTMAC &macAddr)
657 {
658 eeprom.init();
659 memcpy(eeprom.m_au16Data, macAddr.au16, sizeof(macAddr.au16));
660 eeprom.m_au16Data[0x04] = 0xFFFF;
661 /*
662 * bit 3 - full support for power management
663 * bit 10 - full duplex
664 */
665 eeprom.m_au16Data[0x0A] = 0x4408;
666 eeprom.m_au16Data[0x0B] = 0x001E;
667 eeprom.m_au16Data[0x0C] = 0x8086;
668 eeprom.m_au16Data[0x0D] = 0x100E;
669 eeprom.m_au16Data[0x0E] = 0x8086;
670 eeprom.m_au16Data[0x0F] = 0x3040;
671 eeprom.m_au16Data[0x21] = 0x7061;
672 eeprom.m_au16Data[0x22] = 0x280C;
673 eeprom.m_au16Data[0x23] = 0x00C8;
674 eeprom.m_au16Data[0x24] = 0x00C8;
675 eeprom.m_au16Data[0x2F] = 0x0602;
676 updateChecksum();
677 };
678
679 /**
680 * Compute the checksum as required by E1000 and store it
681 * in the last word.
682 */
683 void updateChecksum()
684 {
685 uint16_t u16Checksum = 0;
686
687 for (int i = 0; i < eeprom.SIZE-1; i++)
688 u16Checksum += eeprom.m_au16Data[i];
689 eeprom.m_au16Data[eeprom.SIZE-1] = 0xBABA - u16Checksum;
690 };
691
692 /**
693 * First 6 bytes of EEPROM contain MAC address.
694 *
695 * @returns MAC address of E1000.
696 */
697 void getMac(PRTMAC pMac)
698 {
699 memcpy(pMac->au16, eeprom.m_au16Data, sizeof(pMac->au16));
700 };
701
702 uint32_t read()
703 {
704 return eeprom.read();
705 }
706
707 void write(uint32_t u32Wires)
708 {
709 eeprom.write(u32Wires);
710 }
711
712 bool readWord(uint32_t u32Addr, uint16_t *pu16Value)
713 {
714 return eeprom.readWord(u32Addr, pu16Value);
715 }
716
717 int load(PSSMHANDLE pSSM)
718 {
719 return eeprom.load(pSSM);
720 }
721
722 void save(PSSMHANDLE pSSM)
723 {
724 eeprom.save(pSSM);
725 }
726#endif /* IN_RING3 */
727};
728
729
730#define E1K_SPEC_VLAN(s) (s & 0xFFF)
731#define E1K_SPEC_CFI(s) (!!((s>>12) & 0x1))
732#define E1K_SPEC_PRI(s) ((s>>13) & 0x7)
733
734struct E1kRxDStatus
735{
736 /** @name Descriptor Status field (3.2.3.1)
737 * @{ */
738 unsigned fDD : 1; /**< Descriptor Done. */
739 unsigned fEOP : 1; /**< End of packet. */
740 unsigned fIXSM : 1; /**< Ignore checksum indication. */
741 unsigned fVP : 1; /**< VLAN, matches VET. */
742 unsigned : 1;
743 unsigned fTCPCS : 1; /**< RCP Checksum calculated on the packet. */
744 unsigned fIPCS : 1; /**< IP Checksum calculated on the packet. */
745 unsigned fPIF : 1; /**< Passed in-exact filter */
746 /** @} */
747 /** @name Descriptor Errors field (3.2.3.2)
748 * (Only valid when fEOP and fDD are set.)
749 * @{ */
750 unsigned fCE : 1; /**< CRC or alignment error. */
751 unsigned : 4; /**< Reserved, varies with different models... */
752 unsigned fTCPE : 1; /**< TCP/UDP checksum error. */
753 unsigned fIPE : 1; /**< IP Checksum error. */
754 unsigned fRXE : 1; /**< RX Data error. */
755 /** @} */
756 /** @name Descriptor Special field (3.2.3.3)
757 * @{ */
758 unsigned u16Special : 16; /**< VLAN: Id, Canonical form, Priority. */
759 /** @} */
760};
761typedef struct E1kRxDStatus E1KRXDST;
762
763struct E1kRxDesc_st
764{
765 uint64_t u64BufAddr; /**< Address of data buffer */
766 uint16_t u16Length; /**< Length of data in buffer */
767 uint16_t u16Checksum; /**< Packet checksum */
768 E1KRXDST status;
769};
770typedef struct E1kRxDesc_st E1KRXDESC;
771AssertCompileSize(E1KRXDESC, 16);
772
773#define E1K_DTYP_LEGACY -1
774#define E1K_DTYP_CONTEXT 0
775#define E1K_DTYP_DATA 1
776
777struct E1kTDLegacy
778{
779 uint64_t u64BufAddr; /**< Address of data buffer */
780 struct TDLCmd_st
781 {
782 unsigned u16Length : 16;
783 unsigned u8CSO : 8;
784 /* CMD field : 8 */
785 unsigned fEOP : 1;
786 unsigned fIFCS : 1;
787 unsigned fIC : 1;
788 unsigned fRS : 1;
789 unsigned fRPS : 1;
790 unsigned fDEXT : 1;
791 unsigned fVLE : 1;
792 unsigned fIDE : 1;
793 } cmd;
794 struct TDLDw3_st
795 {
796 /* STA field */
797 unsigned fDD : 1;
798 unsigned fEC : 1;
799 unsigned fLC : 1;
800 unsigned fTURSV : 1;
801 /* RSV field */
802 unsigned u4RSV : 4;
803 /* CSS field */
804 unsigned u8CSS : 8;
805 /* Special field*/
806 unsigned u16Special: 16;
807 } dw3;
808};
809
810/**
811 * TCP/IP Context Transmit Descriptor, section 3.3.6.
812 */
813struct E1kTDContext
814{
815 struct CheckSum_st
816 {
817 /** TSE: Header start. !TSE: Checksum start. */
818 unsigned u8CSS : 8;
819 /** Checksum offset - where to store it. */
820 unsigned u8CSO : 8;
821 /** Checksum ending (inclusive) offset, 0 = end of packet. */
822 unsigned u16CSE : 16;
823 } ip;
824 struct CheckSum_st tu;
825 struct TDCDw2_st
826 {
827 /** TSE: The total number of payload bytes for this context. Sans header. */
828 unsigned u20PAYLEN : 20;
829 /** The descriptor type - E1K_DTYP_CONTEXT (0). */
830 unsigned u4DTYP : 4;
831 /** TUCMD field, 8 bits
832 * @{ */
833 /** TSE: TCP (set) or UDP (clear). */
834 unsigned fTCP : 1;
835 /** TSE: IPv4 (set) or IPv6 (clear) - for finding the payload length field in
836 * the IP header. Does not affect the checksumming.
837 * @remarks 82544GC/EI interprets a cleared field differently. */
838 unsigned fIP : 1;
839 /** TSE: TCP segmentation enable. When clear the context describes */
840 unsigned fTSE : 1;
841 /** Report status (only applies to dw3.fDD for here). */
842 unsigned fRS : 1;
843 /** Reserved, MBZ. */
844 unsigned fRSV1 : 1;
845 /** Descriptor extension, must be set for this descriptor type. */
846 unsigned fDEXT : 1;
847 /** Reserved, MBZ. */
848 unsigned fRSV2 : 1;
849 /** Interrupt delay enable. */
850 unsigned fIDE : 1;
851 /** @} */
852 } dw2;
853 struct TDCDw3_st
854 {
855 /** Descriptor Done. */
856 unsigned fDD : 1;
857 /** Reserved, MBZ. */
858 unsigned u7RSV : 7;
859 /** TSO: The header (prototype) length (Ethernet[, VLAN tag], IP, TCP/UDP. */
860 unsigned u8HDRLEN : 8;
861 /** TSO: Maximum segment size. */
862 unsigned u16MSS : 16;
863 } dw3;
864};
865typedef struct E1kTDContext E1KTXCTX;
866
867/**
868 * TCP/IP Data Transmit Descriptor, section 3.3.7.
869 */
870struct E1kTDData
871{
872 uint64_t u64BufAddr; /**< Address of data buffer */
873 struct TDDCmd_st
874 {
875 /** The total length of data pointed to by this descriptor. */
876 unsigned u20DTALEN : 20;
877 /** The descriptor type - E1K_DTYP_DATA (1). */
878 unsigned u4DTYP : 4;
879 /** @name DCMD field, 8 bits (3.3.7.1).
880 * @{ */
881 /** End of packet. Note TSCTFC update. */
882 unsigned fEOP : 1;
883 /** Insert Ethernet FCS/CRC (requires fEOP to be set). */
884 unsigned fIFCS : 1;
885 /** Use the TSE context when set and the normal when clear. */
886 unsigned fTSE : 1;
887 /** Report status (dw3.STA). */
888 unsigned fRS : 1;
889 /** Reserved. 82544GC/EI defines this report packet set (RPS). */
890 unsigned fRPS : 1;
891 /** Descriptor extension, must be set for this descriptor type. */
892 unsigned fDEXT : 1;
893 /** VLAN enable, requires CTRL.VME, auto enables FCS/CRC.
894 * Insert dw3.SPECIAL after ethernet header. */
895 unsigned fVLE : 1;
896 /** Interrupt delay enable. */
897 unsigned fIDE : 1;
898 /** @} */
899 } cmd;
900 struct TDDDw3_st
901 {
902 /** @name STA field (3.3.7.2)
903 * @{ */
904 unsigned fDD : 1; /**< Descriptor done. */
905 unsigned fEC : 1; /**< Excess collision. */
906 unsigned fLC : 1; /**< Late collision. */
907 /** Reserved, except for the usual oddball (82544GC/EI) where it's called TU. */
908 unsigned fTURSV : 1;
909 /** @} */
910 unsigned u4RSV : 4; /**< Reserved field, MBZ. */
911 /** @name POPTS (Packet Option) field (3.3.7.3)
912 * @{ */
913 unsigned fIXSM : 1; /**< Insert IP checksum. */
914 unsigned fTXSM : 1; /**< Insert TCP/UDP checksum. */
915 unsigned u6RSV : 6; /**< Reserved, MBZ. */
916 /** @} */
917 /** @name SPECIAL field - VLAN tag to be inserted after ethernet header.
918 * Requires fEOP, fVLE and CTRL.VME to be set.
919 * @{ */
920 unsigned u16Special: 16; /**< VLAN: Id, Canonical form, Priority. */
921 /** @} */
922 } dw3;
923};
924typedef struct E1kTDData E1KTXDAT;
925
926union E1kTxDesc
927{
928 struct E1kTDLegacy legacy;
929 struct E1kTDContext context;
930 struct E1kTDData data;
931};
932typedef union E1kTxDesc E1KTXDESC;
933AssertCompileSize(E1KTXDESC, 16);
934
935#define RA_CTL_AS 0x0003
936#define RA_CTL_AV 0x8000
937
938union E1kRecAddr
939{
940 uint32_t au32[32];
941 struct RAArray
942 {
943 uint8_t addr[6];
944 uint16_t ctl;
945 } array[16];
946};
947typedef struct E1kRecAddr::RAArray E1KRAELEM;
948typedef union E1kRecAddr E1KRA;
949AssertCompileSize(E1KRA, 8*16);
950
951#define E1K_IP_RF UINT16_C(0x8000) /**< reserved fragment flag */
952#define E1K_IP_DF UINT16_C(0x4000) /**< dont fragment flag */
953#define E1K_IP_MF UINT16_C(0x2000) /**< more fragments flag */
954#define E1K_IP_OFFMASK UINT16_C(0x1fff) /**< mask for fragmenting bits */
955
956/** @todo use+extend RTNETIPV4 */
957struct E1kIpHeader
958{
959 /* type of service / version / header length */
960 uint16_t tos_ver_hl;
961 /* total length */
962 uint16_t total_len;
963 /* identification */
964 uint16_t ident;
965 /* fragment offset field */
966 uint16_t offset;
967 /* time to live / protocol*/
968 uint16_t ttl_proto;
969 /* checksum */
970 uint16_t chksum;
971 /* source IP address */
972 uint32_t src;
973 /* destination IP address */
974 uint32_t dest;
975};
976AssertCompileSize(struct E1kIpHeader, 20);
977
978#define E1K_TCP_FIN UINT16_C(0x01)
979#define E1K_TCP_SYN UINT16_C(0x02)
980#define E1K_TCP_RST UINT16_C(0x04)
981#define E1K_TCP_PSH UINT16_C(0x08)
982#define E1K_TCP_ACK UINT16_C(0x10)
983#define E1K_TCP_URG UINT16_C(0x20)
984#define E1K_TCP_ECE UINT16_C(0x40)
985#define E1K_TCP_CWR UINT16_C(0x80)
986#define E1K_TCP_FLAGS UINT16_C(0x3f)
987
988/** @todo use+extend RTNETTCP */
989struct E1kTcpHeader
990{
991 uint16_t src;
992 uint16_t dest;
993 uint32_t seqno;
994 uint32_t ackno;
995 uint16_t hdrlen_flags;
996 uint16_t wnd;
997 uint16_t chksum;
998 uint16_t urgp;
999};
1000AssertCompileSize(struct E1kTcpHeader, 20);
1001
1002
1003#ifdef E1K_WITH_TXD_CACHE
1004/** The current Saved state version. */
1005# define E1K_SAVEDSTATE_VERSION 4
1006/** Saved state version for VirtualBox 4.2 with VLAN tag fields. */
1007# define E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG 3
1008#else /* !E1K_WITH_TXD_CACHE */
1009/** The current Saved state version. */
1010# define E1K_SAVEDSTATE_VERSION 3
1011#endif /* !E1K_WITH_TXD_CACHE */
1012/** Saved state version for VirtualBox 4.1 and earlier.
1013 * These did not include VLAN tag fields. */
1014#define E1K_SAVEDSTATE_VERSION_VBOX_41 2
1015/** Saved state version for VirtualBox 3.0 and earlier.
1016 * This did not include the configuration part nor the E1kEEPROM. */
1017#define E1K_SAVEDSTATE_VERSION_VBOX_30 1
1018
1019/**
1020 * E1000 shared device state.
1021 *
1022 * This is shared between ring-0 and ring-3.
1023 */
1024typedef struct E1KSTATE
1025{
1026 char szPrf[8]; /**< Log prefix, e.g. E1000#1. */
1027
1028 /** Handle to PCI region \#0, the MMIO region. */
1029 IOMIOPORTHANDLE hMmioRegion;
1030 /** Handle to PCI region \#2, the I/O ports. */
1031 IOMIOPORTHANDLE hIoPorts;
1032
1033 /** Receive Interrupt Delay Timer. */
1034 TMTIMERHANDLE hRIDTimer;
1035 /** Receive Absolute Delay Timer. */
1036 TMTIMERHANDLE hRADTimer;
1037 /** Transmit Interrupt Delay Timer. */
1038 TMTIMERHANDLE hTIDTimer;
1039 /** Transmit Absolute Delay Timer. */
1040 TMTIMERHANDLE hTADTimer;
1041 /** Transmit Delay Timer. */
1042 TMTIMERHANDLE hTXDTimer;
1043 /** Late Interrupt Timer. */
1044 TMTIMERHANDLE hIntTimer;
1045 /** Link Up(/Restore) Timer. */
1046 TMTIMERHANDLE hLUTimer;
1047
1048 /** Transmit task. */
1049 PDMTASKHANDLE hTxTask;
1050 /** Rx wakeup signaller. */
1051 PDMTASKHANDLE hCanRxTask;
1052
1053 /** Critical section - what is it protecting? */
1054 PDMCRITSECT cs;
1055 /** RX Critical section. */
1056 PDMCRITSECT csRx;
1057#ifdef E1K_WITH_TX_CS
1058 /** TX Critical section. */
1059 PDMCRITSECT csTx;
1060#endif /* E1K_WITH_TX_CS */
1061 /** Base address of memory-mapped registers. */
1062 RTGCPHYS addrMMReg;
1063 /** MAC address obtained from the configuration. */
1064 RTMAC macConfigured;
1065 /** Base port of I/O space region. */
1066 RTIOPORT IOPortBase;
1067 /** EMT: Last time the interrupt was acknowledged. */
1068 uint64_t u64AckedAt;
1069 /** All: Used for eliminating spurious interrupts. */
1070 bool fIntRaised;
1071 /** EMT: false if the cable is disconnected by the GUI. */
1072 bool fCableConnected;
1073 /** EMT: Compute Ethernet CRC for RX packets. */
1074 bool fEthernetCRC;
1075 /** All: throttle interrupts. */
1076 bool fItrEnabled;
1077 /** All: throttle RX interrupts. */
1078 bool fItrRxEnabled;
1079 /** All: Delay TX interrupts using TIDV/TADV. */
1080 bool fTidEnabled;
1081 bool afPadding[2];
1082 /** Link up delay (in milliseconds). */
1083 uint32_t cMsLinkUpDelay;
1084
1085 /** All: Device register storage. */
1086 uint32_t auRegs[E1K_NUM_OF_32BIT_REGS];
1087 /** TX/RX: Status LED. */
1088 PDMLED led;
1089 /** TX/RX: Number of packet being sent/received to show in debug log. */
1090 uint32_t u32PktNo;
1091
1092 /** EMT: Offset of the register to be read via IO. */
1093 uint32_t uSelectedReg;
1094 /** EMT: Multicast Table Array. */
1095 uint32_t auMTA[128];
1096 /** EMT: Receive Address registers. */
1097 E1KRA aRecAddr;
1098 /** EMT: VLAN filter table array. */
1099 uint32_t auVFTA[128];
1100 /** EMT: Receive buffer size. */
1101 uint16_t u16RxBSize;
1102 /** EMT: Locked state -- no state alteration possible. */
1103 bool fLocked;
1104 /** EMT: */
1105 bool fDelayInts;
1106 /** All: */
1107 bool fIntMaskUsed;
1108
1109 /** N/A: */
1110 bool volatile fMaybeOutOfSpace;
1111 /** EMT: Gets signalled when more RX descriptors become available. */
1112 RTSEMEVENT hEventMoreRxDescAvail;
1113#ifdef E1K_WITH_RXD_CACHE
1114 /** RX: Fetched RX descriptors. */
1115 E1KRXDESC aRxDescriptors[E1K_RXD_CACHE_SIZE];
1116 //uint64_t aRxDescAddr[E1K_RXD_CACHE_SIZE];
1117 /** RX: Actual number of fetched RX descriptors. */
1118 uint32_t nRxDFetched;
1119 /** RX: Index in cache of RX descriptor being processed. */
1120 uint32_t iRxDCurrent;
1121#endif /* E1K_WITH_RXD_CACHE */
1122
1123 /** TX: Context used for TCP segmentation packets. */
1124 E1KTXCTX contextTSE;
1125 /** TX: Context used for ordinary packets. */
1126 E1KTXCTX contextNormal;
1127#ifdef E1K_WITH_TXD_CACHE
1128 /** TX: Fetched TX descriptors. */
1129 E1KTXDESC aTxDescriptors[E1K_TXD_CACHE_SIZE];
1130 /** TX: Actual number of fetched TX descriptors. */
1131 uint8_t nTxDFetched;
1132 /** TX: Index in cache of TX descriptor being processed. */
1133 uint8_t iTxDCurrent;
1134 /** TX: Will this frame be sent as GSO. */
1135 bool fGSO;
1136 /** Alignment padding. */
1137 bool fReserved;
1138 /** TX: Number of bytes in next packet. */
1139 uint32_t cbTxAlloc;
1140
1141#endif /* E1K_WITH_TXD_CACHE */
1142 /** GSO context. u8Type is set to PDMNETWORKGSOTYPE_INVALID when not
1143 * applicable to the current TSE mode. */
1144 PDMNETWORKGSO GsoCtx;
1145 /** Scratch space for holding the loopback / fallback scatter / gather
1146 * descriptor. */
1147 union
1148 {
1149 PDMSCATTERGATHER Sg;
1150 uint8_t padding[8 * sizeof(RTUINTPTR)];
1151 } uTxFallback;
1152 /** TX: Transmit packet buffer use for TSE fallback and loopback. */
1153 uint8_t aTxPacketFallback[E1K_MAX_TX_PKT_SIZE];
1154 /** TX: Number of bytes assembled in TX packet buffer. */
1155 uint16_t u16TxPktLen;
1156 /** TX: False will force segmentation in e1000 instead of sending frames as GSO. */
1157 bool fGSOEnabled;
1158 /** TX: IP checksum has to be inserted if true. */
1159 bool fIPcsum;
1160 /** TX: TCP/UDP checksum has to be inserted if true. */
1161 bool fTCPcsum;
1162 /** TX: VLAN tag has to be inserted if true. */
1163 bool fVTag;
1164 /** TX: TCI part of VLAN tag to be inserted. */
1165 uint16_t u16VTagTCI;
1166 /** TX TSE fallback: Number of payload bytes remaining in TSE context. */
1167 uint32_t u32PayRemain;
1168 /** TX TSE fallback: Number of header bytes remaining in TSE context. */
1169 uint16_t u16HdrRemain;
1170 /** TX TSE fallback: Flags from template header. */
1171 uint16_t u16SavedFlags;
1172 /** TX TSE fallback: Partial checksum from template header. */
1173 uint32_t u32SavedCsum;
1174 /** ?: Emulated controller type. */
1175 E1KCHIP eChip;
1176
1177 /** EMT: Physical interface emulation. */
1178 PHY phy;
1179
1180#if 0
1181 /** Alignment padding. */
1182 uint8_t Alignment[HC_ARCH_BITS == 64 ? 8 : 4];
1183#endif
1184
1185 STAMCOUNTER StatReceiveBytes;
1186 STAMCOUNTER StatTransmitBytes;
1187#if defined(VBOX_WITH_STATISTICS)
1188 STAMPROFILEADV StatMMIOReadRZ;
1189 STAMPROFILEADV StatMMIOReadR3;
1190 STAMPROFILEADV StatMMIOWriteRZ;
1191 STAMPROFILEADV StatMMIOWriteR3;
1192 STAMPROFILEADV StatEEPROMRead;
1193 STAMPROFILEADV StatEEPROMWrite;
1194 STAMPROFILEADV StatIOReadRZ;
1195 STAMPROFILEADV StatIOReadR3;
1196 STAMPROFILEADV StatIOWriteRZ;
1197 STAMPROFILEADV StatIOWriteR3;
1198 STAMPROFILEADV StatLateIntTimer;
1199 STAMCOUNTER StatLateInts;
1200 STAMCOUNTER StatIntsRaised;
1201 STAMCOUNTER StatIntsPrevented;
1202 STAMPROFILEADV StatReceive;
1203 STAMPROFILEADV StatReceiveCRC;
1204 STAMPROFILEADV StatReceiveFilter;
1205 STAMPROFILEADV StatReceiveStore;
1206 STAMPROFILEADV StatTransmitRZ;
1207 STAMPROFILEADV StatTransmitR3;
1208 STAMPROFILE StatTransmitSendRZ;
1209 STAMPROFILE StatTransmitSendR3;
1210 STAMPROFILE StatRxOverflow;
1211 STAMCOUNTER StatRxOverflowWakeup;
1212 STAMCOUNTER StatTxDescCtxNormal;
1213 STAMCOUNTER StatTxDescCtxTSE;
1214 STAMCOUNTER StatTxDescLegacy;
1215 STAMCOUNTER StatTxDescData;
1216 STAMCOUNTER StatTxDescTSEData;
1217 STAMCOUNTER StatTxPathFallback;
1218 STAMCOUNTER StatTxPathGSO;
1219 STAMCOUNTER StatTxPathRegular;
1220 STAMCOUNTER StatPHYAccesses;
1221 STAMCOUNTER aStatRegWrites[E1K_NUM_OF_REGS];
1222 STAMCOUNTER aStatRegReads[E1K_NUM_OF_REGS];
1223#endif /* VBOX_WITH_STATISTICS */
1224
1225#ifdef E1K_INT_STATS
1226 /* Internal stats */
1227 uint64_t u64ArmedAt;
1228 uint64_t uStatMaxTxDelay;
1229 uint32_t uStatInt;
1230 uint32_t uStatIntTry;
1231 uint32_t uStatIntLower;
1232 uint32_t uStatNoIntICR;
1233 int32_t iStatIntLost;
1234 int32_t iStatIntLostOne;
1235 uint32_t uStatIntIMS;
1236 uint32_t uStatIntSkip;
1237 uint32_t uStatIntLate;
1238 uint32_t uStatIntMasked;
1239 uint32_t uStatIntEarly;
1240 uint32_t uStatIntRx;
1241 uint32_t uStatIntTx;
1242 uint32_t uStatIntICS;
1243 uint32_t uStatIntRDTR;
1244 uint32_t uStatIntRXDMT0;
1245 uint32_t uStatIntTXQE;
1246 uint32_t uStatTxNoRS;
1247 uint32_t uStatTxIDE;
1248 uint32_t uStatTxDelayed;
1249 uint32_t uStatTxDelayExp;
1250 uint32_t uStatTAD;
1251 uint32_t uStatTID;
1252 uint32_t uStatRAD;
1253 uint32_t uStatRID;
1254 uint32_t uStatRxFrm;
1255 uint32_t uStatTxFrm;
1256 uint32_t uStatDescCtx;
1257 uint32_t uStatDescDat;
1258 uint32_t uStatDescLeg;
1259 uint32_t uStatTx1514;
1260 uint32_t uStatTx2962;
1261 uint32_t uStatTx4410;
1262 uint32_t uStatTx5858;
1263 uint32_t uStatTx7306;
1264 uint32_t uStatTx8754;
1265 uint32_t uStatTx16384;
1266 uint32_t uStatTx32768;
1267 uint32_t uStatTxLarge;
1268 uint32_t uStatAlign;
1269#endif /* E1K_INT_STATS */
1270} E1KSTATE;
1271/** Pointer to the E1000 device state. */
1272typedef E1KSTATE *PE1KSTATE;
1273
1274/**
1275 * E1000 ring-3 device state
1276 *
1277 * @implements PDMINETWORKDOWN
1278 * @implements PDMINETWORKCONFIG
1279 * @implements PDMILEDPORTS
1280 */
1281typedef struct E1KSTATER3
1282{
1283 PDMIBASE IBase;
1284 PDMINETWORKDOWN INetworkDown;
1285 PDMINETWORKCONFIG INetworkConfig;
1286 /** LED interface */
1287 PDMILEDPORTS ILeds;
1288 /** Attached network driver. */
1289 R3PTRTYPE(PPDMIBASE) pDrvBase;
1290 R3PTRTYPE(PPDMILEDCONNECTORS) pLedsConnector;
1291
1292 /** Pointer to the shared state. */
1293 R3PTRTYPE(PE1KSTATE) pShared;
1294
1295 /** Device instance. */
1296 PPDMDEVINSR3 pDevInsR3;
1297 /** Attached network driver. */
1298 PPDMINETWORKUPR3 pDrvR3;
1299 /** The scatter / gather buffer used for the current outgoing packet. */
1300 R3PTRTYPE(PPDMSCATTERGATHER) pTxSgR3;
1301
1302 /** EMT: EEPROM emulation */
1303 E1kEEPROM eeprom;
1304} E1KSTATER3;
1305/** Pointer to the E1000 ring-3 device state. */
1306typedef E1KSTATER3 *PE1KSTATER3;
1307
1308
1309/**
1310 * E1000 ring-0 device state
1311 */
1312typedef struct E1KSTATER0
1313{
1314 /** Device instance. */
1315 PPDMDEVINSR0 pDevInsR0;
1316 /** Attached network driver. */
1317 PPDMINETWORKUPR0 pDrvR0;
1318 /** The scatter / gather buffer used for the current outgoing packet - R0. */
1319 R0PTRTYPE(PPDMSCATTERGATHER) pTxSgR0;
1320} E1KSTATER0;
1321/** Pointer to the E1000 ring-0 device state. */
1322typedef E1KSTATER0 *PE1KSTATER0;
1323
1324
1325/**
1326 * E1000 raw-mode device state
1327 */
1328typedef struct E1KSTATERC
1329{
1330 /** Device instance. */
1331 PPDMDEVINSRC pDevInsRC;
1332 /** Attached network driver. */
1333 PPDMINETWORKUPRC pDrvRC;
1334 /** The scatter / gather buffer used for the current outgoing packet. */
1335 RCPTRTYPE(PPDMSCATTERGATHER) pTxSgRC;
1336} E1KSTATERC;
1337/** Pointer to the E1000 raw-mode device state. */
1338typedef E1KSTATERC *PE1KSTATERC;
1339
1340
1341/** @def PE1KSTATECC
1342 * Pointer to the instance data for the current context. */
1343#ifdef IN_RING3
1344typedef E1KSTATER3 E1KSTATECC;
1345typedef PE1KSTATER3 PE1KSTATECC;
1346#elif defined(IN_RING0)
1347typedef E1KSTATER0 E1KSTATECC;
1348typedef PE1KSTATER0 PE1KSTATECC;
1349#elif defined(IN_RC)
1350typedef E1KSTATERC E1KSTATECC;
1351typedef PE1KSTATERC PE1KSTATECC;
1352#else
1353# error "Not IN_RING3, IN_RING0 or IN_RC"
1354#endif
1355
1356
1357#ifndef VBOX_DEVICE_STRUCT_TESTCASE
1358
1359/* Forward declarations ******************************************************/
1360static int e1kXmitPending(PPDMDEVINS pDevIns, PE1KSTATE pThis, bool fOnWorkerThread);
1361
1362/**
1363 * E1000 register read handler.
1364 */
1365typedef int (FNE1KREGREAD)(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1366/**
1367 * E1000 register write handler.
1368 */
1369typedef int (FNE1KREGWRITE)(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1370
1371static FNE1KREGREAD e1kRegReadUnimplemented;
1372static FNE1KREGWRITE e1kRegWriteUnimplemented;
1373static FNE1KREGREAD e1kRegReadAutoClear;
1374static FNE1KREGREAD e1kRegReadDefault;
1375static FNE1KREGWRITE e1kRegWriteDefault;
1376#if 0 /* unused */
1377static FNE1KREGREAD e1kRegReadCTRL;
1378#endif
1379static FNE1KREGWRITE e1kRegWriteCTRL;
1380static FNE1KREGREAD e1kRegReadEECD;
1381static FNE1KREGWRITE e1kRegWriteEECD;
1382static FNE1KREGWRITE e1kRegWriteEERD;
1383static FNE1KREGWRITE e1kRegWriteMDIC;
1384static FNE1KREGREAD e1kRegReadICR;
1385static FNE1KREGWRITE e1kRegWriteICR;
1386static FNE1KREGWRITE e1kRegWriteICS;
1387static FNE1KREGWRITE e1kRegWriteIMS;
1388static FNE1KREGWRITE e1kRegWriteIMC;
1389static FNE1KREGWRITE e1kRegWriteRCTL;
1390static FNE1KREGWRITE e1kRegWritePBA;
1391static FNE1KREGWRITE e1kRegWriteRDT;
1392static FNE1KREGWRITE e1kRegWriteRDTR;
1393static FNE1KREGWRITE e1kRegWriteTDT;
1394static FNE1KREGREAD e1kRegReadMTA;
1395static FNE1KREGWRITE e1kRegWriteMTA;
1396static FNE1KREGREAD e1kRegReadRA;
1397static FNE1KREGWRITE e1kRegWriteRA;
1398static FNE1KREGREAD e1kRegReadVFTA;
1399static FNE1KREGWRITE e1kRegWriteVFTA;
1400
1401/**
1402 * Register map table.
1403 *
1404 * Override pfnRead and pfnWrite to get register-specific behavior.
1405 */
1406static const struct E1kRegMap_st
1407{
1408 /** Register offset in the register space. */
1409 uint32_t offset;
1410 /** Size in bytes. Registers of size > 4 are in fact tables. */
1411 uint32_t size;
1412 /** Readable bits. */
1413 uint32_t readable;
1414 /** Writable bits. */
1415 uint32_t writable;
1416 /** Read callback. */
1417 FNE1KREGREAD *pfnRead;
1418 /** Write callback. */
1419 FNE1KREGWRITE *pfnWrite;
1420 /** Abbreviated name. */
1421 const char *abbrev;
1422 /** Full name. */
1423 const char *name;
1424} g_aE1kRegMap[E1K_NUM_OF_REGS] =
1425{
1426 /* offset size read mask write mask read callback write callback abbrev full name */
1427 /*------- ------- ---------- ---------- ----------------------- ------------------------ ---------- ------------------------------*/
1428 { 0x00000, 0x00004, 0xDBF31BE9, 0xDBF31BE9, e1kRegReadDefault , e1kRegWriteCTRL , "CTRL" , "Device Control" },
1429 { 0x00008, 0x00004, 0x0000FDFF, 0x00000000, e1kRegReadDefault , e1kRegWriteUnimplemented, "STATUS" , "Device Status" },
1430 { 0x00010, 0x00004, 0x000027F0, 0x00000070, e1kRegReadEECD , e1kRegWriteEECD , "EECD" , "EEPROM/Flash Control/Data" },
1431 { 0x00014, 0x00004, 0xFFFFFF10, 0xFFFFFF00, e1kRegReadDefault , e1kRegWriteEERD , "EERD" , "EEPROM Read" },
1432 { 0x00018, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CTRL_EXT", "Extended Device Control" },
1433 { 0x0001c, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FLA" , "Flash Access (N/A)" },
1434 { 0x00020, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteMDIC , "MDIC" , "MDI Control" },
1435 { 0x00028, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCAL" , "Flow Control Address Low" },
1436 { 0x0002c, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCAH" , "Flow Control Address High" },
1437 { 0x00030, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCT" , "Flow Control Type" },
1438 { 0x00038, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "VET" , "VLAN EtherType" },
1439 { 0x000c0, 0x00004, 0x0001F6DF, 0x0001F6DF, e1kRegReadICR , e1kRegWriteICR , "ICR" , "Interrupt Cause Read" },
1440 { 0x000c4, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "ITR" , "Interrupt Throttling" },
1441 { 0x000c8, 0x00004, 0x00000000, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteICS , "ICS" , "Interrupt Cause Set" },
1442 { 0x000d0, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteIMS , "IMS" , "Interrupt Mask Set/Read" },
1443 { 0x000d8, 0x00004, 0x00000000, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteIMC , "IMC" , "Interrupt Mask Clear" },
1444 { 0x00100, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteRCTL , "RCTL" , "Receive Control" },
1445 { 0x00170, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCTTV" , "Flow Control Transmit Timer Value" },
1446 { 0x00178, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TXCW" , "Transmit Configuration Word (N/A)" },
1447 { 0x00180, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXCW" , "Receive Configuration Word (N/A)" },
1448 { 0x00400, 0x00004, 0x017FFFFA, 0x017FFFFA, e1kRegReadDefault , e1kRegWriteDefault , "TCTL" , "Transmit Control" },
1449 { 0x00410, 0x00004, 0x3FFFFFFF, 0x3FFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TIPG" , "Transmit IPG" },
1450 { 0x00458, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "AIFS" , "Adaptive IFS Throttle - AIT" },
1451 { 0x00e00, 0x00004, 0xCFCFCFCF, 0xCFCFCFCF, e1kRegReadDefault , e1kRegWriteDefault , "LEDCTL" , "LED Control" },
1452 { 0x01000, 0x00004, 0xFFFF007F, 0x0000007F, e1kRegReadDefault , e1kRegWritePBA , "PBA" , "Packet Buffer Allocation" },
1453 { 0x02160, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRTL" , "Flow Control Receive Threshold Low" },
1454 { 0x02168, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRTH" , "Flow Control Receive Threshold High" },
1455 { 0x02410, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFH" , "Receive Data FIFO Head" },
1456 { 0x02418, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFT" , "Receive Data FIFO Tail" },
1457 { 0x02420, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFHS" , "Receive Data FIFO Head Saved Register" },
1458 { 0x02428, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFTS" , "Receive Data FIFO Tail Saved Register" },
1459 { 0x02430, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFPC" , "Receive Data FIFO Packet Count" },
1460 { 0x02800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDBAL" , "Receive Descriptor Base Low" },
1461 { 0x02804, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDBAH" , "Receive Descriptor Base High" },
1462 { 0x02808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDLEN" , "Receive Descriptor Length" },
1463 { 0x02810, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDH" , "Receive Descriptor Head" },
1464 { 0x02818, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteRDT , "RDT" , "Receive Descriptor Tail" },
1465 { 0x02820, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteRDTR , "RDTR" , "Receive Delay Timer" },
1466 { 0x02828, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXDCTL" , "Receive Descriptor Control" },
1467 { 0x0282c, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "RADV" , "Receive Interrupt Absolute Delay Timer" },
1468 { 0x02c00, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RSRPD" , "Receive Small Packet Detect Interrupt" },
1469 { 0x03000, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TXDMAC" , "TX DMA Control (N/A)" },
1470 { 0x03410, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFH" , "Transmit Data FIFO Head" },
1471 { 0x03418, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFT" , "Transmit Data FIFO Tail" },
1472 { 0x03420, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFHS" , "Transmit Data FIFO Head Saved Register" },
1473 { 0x03428, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFTS" , "Transmit Data FIFO Tail Saved Register" },
1474 { 0x03430, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFPC" , "Transmit Data FIFO Packet Count" },
1475 { 0x03800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDBAL" , "Transmit Descriptor Base Low" },
1476 { 0x03804, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDBAH" , "Transmit Descriptor Base High" },
1477 { 0x03808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDLEN" , "Transmit Descriptor Length" },
1478 { 0x03810, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDH" , "Transmit Descriptor Head" },
1479 { 0x03818, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteTDT , "TDT" , "Transmit Descriptor Tail" },
1480 { 0x03820, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TIDV" , "Transmit Interrupt Delay Value" },
1481 { 0x03828, 0x00004, 0xFF3F3F3F, 0xFF3F3F3F, e1kRegReadDefault , e1kRegWriteDefault , "TXDCTL" , "Transmit Descriptor Control" },
1482 { 0x0382c, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TADV" , "Transmit Absolute Interrupt Delay Timer" },
1483 { 0x03830, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TSPMT" , "TCP Segmentation Pad and Threshold" },
1484 { 0x04000, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CRCERRS" , "CRC Error Count" },
1485 { 0x04004, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "ALGNERRC", "Alignment Error Count" },
1486 { 0x04008, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SYMERRS" , "Symbol Error Count" },
1487 { 0x0400c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXERRC" , "RX Error Count" },
1488 { 0x04010, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MPC" , "Missed Packets Count" },
1489 { 0x04014, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SCC" , "Single Collision Count" },
1490 { 0x04018, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "ECOL" , "Excessive Collisions Count" },
1491 { 0x0401c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MCC" , "Multiple Collision Count" },
1492 { 0x04020, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "LATECOL" , "Late Collisions Count" },
1493 { 0x04028, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "COLC" , "Collision Count" },
1494 { 0x04030, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "DC" , "Defer Count" },
1495 { 0x04034, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TNCRS" , "Transmit - No CRS" },
1496 { 0x04038, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SEC" , "Sequence Error Count" },
1497 { 0x0403c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CEXTERR" , "Carrier Extension Error Count" },
1498 { 0x04040, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RLEC" , "Receive Length Error Count" },
1499 { 0x04048, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XONRXC" , "XON Received Count" },
1500 { 0x0404c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XONTXC" , "XON Transmitted Count" },
1501 { 0x04050, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XOFFRXC" , "XOFF Received Count" },
1502 { 0x04054, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XOFFTXC" , "XOFF Transmitted Count" },
1503 { 0x04058, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRUC" , "FC Received Unsupported Count" },
1504 { 0x0405c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC64" , "Packets Received (64 Bytes) Count" },
1505 { 0x04060, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC127" , "Packets Received (65-127 Bytes) Count" },
1506 { 0x04064, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC255" , "Packets Received (128-255 Bytes) Count" },
1507 { 0x04068, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC511" , "Packets Received (256-511 Bytes) Count" },
1508 { 0x0406c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC1023" , "Packets Received (512-1023 Bytes) Count" },
1509 { 0x04070, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC1522" , "Packets Received (1024-Max Bytes)" },
1510 { 0x04074, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GPRC" , "Good Packets Received Count" },
1511 { 0x04078, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "BPRC" , "Broadcast Packets Received Count" },
1512 { 0x0407c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "MPRC" , "Multicast Packets Received Count" },
1513 { 0x04080, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GPTC" , "Good Packets Transmitted Count" },
1514 { 0x04088, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GORCL" , "Good Octets Received Count (Low)" },
1515 { 0x0408c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GORCH" , "Good Octets Received Count (Hi)" },
1516 { 0x04090, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GOTCL" , "Good Octets Transmitted Count (Low)" },
1517 { 0x04094, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GOTCH" , "Good Octets Transmitted Count (Hi)" },
1518 { 0x040a0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RNBC" , "Receive No Buffers Count" },
1519 { 0x040a4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RUC" , "Receive Undersize Count" },
1520 { 0x040a8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RFC" , "Receive Fragment Count" },
1521 { 0x040ac, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "ROC" , "Receive Oversize Count" },
1522 { 0x040b0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RJC" , "Receive Jabber Count" },
1523 { 0x040b4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPRC" , "Management Packets Received Count" },
1524 { 0x040b8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPDC" , "Management Packets Dropped Count" },
1525 { 0x040bc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPTC" , "Management Pkts Transmitted Count" },
1526 { 0x040c0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TORL" , "Total Octets Received (Lo)" },
1527 { 0x040c4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TORH" , "Total Octets Received (Hi)" },
1528 { 0x040c8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TOTL" , "Total Octets Transmitted (Lo)" },
1529 { 0x040cc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TOTH" , "Total Octets Transmitted (Hi)" },
1530 { 0x040d0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TPR" , "Total Packets Received" },
1531 { 0x040d4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TPT" , "Total Packets Transmitted" },
1532 { 0x040d8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC64" , "Packets Transmitted (64 Bytes) Count" },
1533 { 0x040dc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC127" , "Packets Transmitted (65-127 Bytes) Count" },
1534 { 0x040e0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC255" , "Packets Transmitted (128-255 Bytes) Count" },
1535 { 0x040e4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC511" , "Packets Transmitted (256-511 Bytes) Count" },
1536 { 0x040e8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC1023" , "Packets Transmitted (512-1023 Bytes) Count" },
1537 { 0x040ec, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC1522" , "Packets Transmitted (1024 Bytes or Greater) Count" },
1538 { 0x040f0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "MPTC" , "Multicast Packets Transmitted Count" },
1539 { 0x040f4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "BPTC" , "Broadcast Packets Transmitted Count" },
1540 { 0x040f8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TSCTC" , "TCP Segmentation Context Transmitted Count" },
1541 { 0x040fc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TSCTFC" , "TCP Segmentation Context Tx Fail Count" },
1542 { 0x05000, 0x00004, 0x000007FF, 0x000007FF, e1kRegReadDefault , e1kRegWriteDefault , "RXCSUM" , "Receive Checksum Control" },
1543 { 0x05800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUC" , "Wakeup Control" },
1544 { 0x05808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUFC" , "Wakeup Filter Control" },
1545 { 0x05810, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUS" , "Wakeup Status" },
1546 { 0x05820, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "MANC" , "Management Control" },
1547 { 0x05838, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IPAV" , "IP Address Valid" },
1548 { 0x05900, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUPL" , "Wakeup Packet Length" },
1549 { 0x05200, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadMTA , e1kRegWriteMTA , "MTA" , "Multicast Table Array (n)" },
1550 { 0x05400, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadRA , e1kRegWriteRA , "RA" , "Receive Address (64-bit) (n)" },
1551 { 0x05600, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadVFTA , e1kRegWriteVFTA , "VFTA" , "VLAN Filter Table Array (n)" },
1552 { 0x05840, 0x0001c, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IP4AT" , "IPv4 Address Table" },
1553 { 0x05880, 0x00010, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IP6AT" , "IPv6 Address Table" },
1554 { 0x05a00, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUPM" , "Wakeup Packet Memory" },
1555 { 0x05f00, 0x0001c, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFLT" , "Flexible Filter Length Table" },
1556 { 0x09000, 0x003fc, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFMT" , "Flexible Filter Mask Table" },
1557 { 0x09800, 0x003fc, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFVT" , "Flexible Filter Value Table" },
1558 { 0x10000, 0x10000, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "PBM" , "Packet Buffer Memory (n)" },
1559 { 0x00040, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadRA , e1kRegWriteRA , "RA82542" , "Receive Address (64-bit) (n) (82542)" },
1560 { 0x00200, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadMTA , e1kRegWriteMTA , "MTA82542", "Multicast Table Array (n) (82542)" },
1561 { 0x00600, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadVFTA , e1kRegWriteVFTA , "VFTA82542", "VLAN Filter Table Array (n) (82542)" }
1562};
1563
1564#ifdef LOG_ENABLED
1565
1566/**
1567 * Convert U32 value to hex string. Masked bytes are replaced with dots.
1568 *
1569 * @remarks The mask has half-byte byte (not bit) granularity (e.g. 0000000F).
1570 *
1571 * @returns The buffer.
1572 *
1573 * @param u32 The word to convert into string.
1574 * @param mask Selects which bytes to convert.
1575 * @param buf Where to put the result.
1576 */
1577static char *e1kU32toHex(uint32_t u32, uint32_t mask, char *buf)
1578{
1579 for (char *ptr = buf + 7; ptr >= buf; --ptr, u32 >>=4, mask >>=4)
1580 {
1581 if (mask & 0xF)
1582 *ptr = (u32 & 0xF) + ((u32 & 0xF) > 9 ? '7' : '0');
1583 else
1584 *ptr = '.';
1585 }
1586 buf[8] = 0;
1587 return buf;
1588}
1589
1590/**
1591 * Returns timer name for debug purposes.
1592 *
1593 * @returns The timer name.
1594 *
1595 * @param pThis The device state structure.
1596 * @param hTimer The timer to name.
1597 */
1598DECLINLINE(const char *) e1kGetTimerName(PE1KSTATE pThis, TMTIMERHANDLE hTimer)
1599{
1600 if (hTimer == pThis->hTIDTimer)
1601 return "TID";
1602 if (hTimer == pThis->hTADTimer)
1603 return "TAD";
1604 if (hTimer == pThis->hRIDTimer)
1605 return "RID";
1606 if (hTimer == pThis->hRADTimer)
1607 return "RAD";
1608 if (hTimer == pThis->hIntTimer)
1609 return "Int";
1610 if (hTimer == pThis->hTXDTimer)
1611 return "TXD";
1612 if (hTimer == pThis->hLUTimer)
1613 return "LinkUp";
1614 return "unknown";
1615}
1616
1617#endif /* LOG_ENABLED */
1618
1619/**
1620 * Arm a timer.
1621 *
1622 * @param pDevIns The device instance.
1623 * @param pThis Pointer to the device state structure.
1624 * @param hTimer The timer to arm.
1625 * @param uExpireIn Expiration interval in microseconds.
1626 */
1627DECLINLINE(void) e1kArmTimer(PPDMDEVINS pDevIns, PE1KSTATE pThis, TMTIMERHANDLE hTimer, uint32_t uExpireIn)
1628{
1629 if (pThis->fLocked)
1630 return;
1631
1632 E1kLog2(("%s Arming %s timer to fire in %d usec...\n",
1633 pThis->szPrf, e1kGetTimerName(pThis, hTimer), uExpireIn));
1634 int rc = PDMDevHlpTimerSetMicro(pDevIns, hTimer, uExpireIn);
1635 AssertRC(rc);
1636}
1637
1638#ifdef IN_RING3
1639/**
1640 * Cancel a timer.
1641 *
1642 * @param pDevIns The device instance.
1643 * @param pThis Pointer to the device state structure.
1644 * @param pTimer Pointer to the timer.
1645 */
1646DECLINLINE(void) e1kCancelTimer(PPDMDEVINS pDevIns, PE1KSTATE pThis, TMTIMERHANDLE hTimer)
1647{
1648 E1kLog2(("%s Stopping %s timer...\n",
1649 pThis->szPrf, e1kGetTimerName(pThis, hTimer)));
1650 int rc = PDMDevHlpTimerStop(pDevIns, hTimer);
1651 if (RT_FAILURE(rc))
1652 E1kLog2(("%s e1kCancelTimer: TMTimerStop(%s) failed with %Rrc\n",
1653 pThis->szPrf, e1kGetTimerName(pThis, hTimer), rc));
1654 RT_NOREF_PV(pThis);
1655}
1656#endif /* IN_RING3 */
1657
1658#define e1kCsEnter(ps, rc) PDMCritSectEnter(&ps->cs, rc)
1659#define e1kCsLeave(ps) PDMCritSectLeave(&ps->cs)
1660
1661#define e1kCsRxEnter(ps, rc) PDMCritSectEnter(&ps->csRx, rc)
1662#define e1kCsRxLeave(ps) PDMCritSectLeave(&ps->csRx)
1663#define e1kCsRxIsOwner(ps) PDMCritSectIsOwner(&ps->csRx)
1664
1665#ifndef E1K_WITH_TX_CS
1666# define e1kCsTxEnter(ps, rc) VINF_SUCCESS
1667# define e1kCsTxLeave(ps) do { } while (0)
1668#else /* E1K_WITH_TX_CS */
1669# define e1kCsTxEnter(ps, rc) PDMCritSectEnter(&ps->csTx, rc)
1670# define e1kCsTxLeave(ps) PDMCritSectLeave(&ps->csTx)
1671#endif /* E1K_WITH_TX_CS */
1672
1673#ifdef IN_RING3
1674
1675/**
1676 * Wakeup the RX thread.
1677 */
1678static void e1kWakeupReceive(PPDMDEVINS pDevIns)
1679{
1680 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
1681 if ( pThis->fMaybeOutOfSpace
1682 && pThis->hEventMoreRxDescAvail != NIL_RTSEMEVENT)
1683 {
1684 STAM_COUNTER_INC(&pThis->StatRxOverflowWakeup);
1685 E1kLog(("%s Waking up Out-of-RX-space semaphore\n", pThis->szPrf));
1686 RTSemEventSignal(pThis->hEventMoreRxDescAvail);
1687 }
1688}
1689
1690/**
1691 * Hardware reset. Revert all registers to initial values.
1692 *
1693 * @param pDevIns The device instance.
1694 * @param pThis The device state structure.
1695 * @param pThisCC The current context instance data.
1696 */
1697static void e1kR3HardReset(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KSTATECC pThisCC)
1698{
1699 E1kLog(("%s Hard reset triggered\n", pThis->szPrf));
1700 /* No interrupts should survive device reset, see @bugref(9556). */
1701 if (pThis->fIntRaised)
1702 {
1703 /* Lower(0) INTA(0) */
1704 PDMDevHlpPCISetIrq(pDevIns, 0, 0);
1705 pThis->fIntRaised = false;
1706 E1kLog(("%s e1kR3HardReset: Lowered IRQ: ICR=%08x\n", pThis->szPrf, ICR));
1707 }
1708 memset(pThis->auRegs, 0, sizeof(pThis->auRegs));
1709 memset(pThis->aRecAddr.au32, 0, sizeof(pThis->aRecAddr.au32));
1710#ifdef E1K_INIT_RA0
1711 memcpy(pThis->aRecAddr.au32, pThis->macConfigured.au8,
1712 sizeof(pThis->macConfigured.au8));
1713 pThis->aRecAddr.array[0].ctl |= RA_CTL_AV;
1714#endif /* E1K_INIT_RA0 */
1715 STATUS = 0x0081; /* SPEED=10b (1000 Mb/s), FD=1b (Full Duplex) */
1716 EECD = 0x0100; /* EE_PRES=1b (EEPROM present) */
1717 CTRL = 0x0a09; /* FRCSPD=1b SPEED=10b LRST=1b FD=1b */
1718 TSPMT = 0x01000400;/* TSMT=0400h TSPBP=0100h */
1719 Assert(GET_BITS(RCTL, BSIZE) == 0);
1720 pThis->u16RxBSize = 2048;
1721
1722 uint16_t u16LedCtl = 0x0602; /* LED0/LINK_UP#, LED2/LINK100# */
1723 pThisCC->eeprom.readWord(0x2F, &u16LedCtl); /* Read LEDCTL defaults from EEPROM */
1724 LEDCTL = 0x07008300 | (((uint32_t)u16LedCtl & 0xCF00) << 8) | (u16LedCtl & 0xCF); /* Only LED0 and LED2 defaults come from EEPROM */
1725
1726 /* Reset promiscuous mode */
1727 if (pThisCC->pDrvR3)
1728 pThisCC->pDrvR3->pfnSetPromiscuousMode(pThisCC->pDrvR3, false);
1729
1730#ifdef E1K_WITH_TXD_CACHE
1731 int rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
1732 if (RT_LIKELY(rc == VINF_SUCCESS))
1733 {
1734 pThis->nTxDFetched = 0;
1735 pThis->iTxDCurrent = 0;
1736 pThis->fGSO = false;
1737 pThis->cbTxAlloc = 0;
1738 e1kCsTxLeave(pThis);
1739 }
1740#endif /* E1K_WITH_TXD_CACHE */
1741#ifdef E1K_WITH_RXD_CACHE
1742 if (RT_LIKELY(e1kCsRxEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
1743 {
1744 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
1745 e1kCsRxLeave(pThis);
1746 }
1747#endif /* E1K_WITH_RXD_CACHE */
1748#ifdef E1K_LSC_ON_RESET
1749 E1kLog(("%s Will trigger LSC in %d seconds...\n",
1750 pThis->szPrf, pThis->cMsLinkUpDelay / 1000));
1751 e1kArmTimer(pDevIns, pThis, pThis->hLUTimer, pThis->cMsLinkUpDelay * 1000);
1752#endif /* E1K_LSC_ON_RESET */
1753}
1754
1755#endif /* IN_RING3 */
1756
1757/**
1758 * Compute Internet checksum.
1759 *
1760 * @remarks Refer to http://www.netfor2.com/checksum.html for short intro.
1761 *
1762 * @param pThis The device state structure.
1763 * @param cpPacket The packet.
1764 * @param cb The size of the packet.
1765 * @param pszText A string denoting direction of packet transfer.
1766 *
1767 * @return The 1's complement of the 1's complement sum.
1768 *
1769 * @thread E1000_TX
1770 */
1771static uint16_t e1kCSum16(const void *pvBuf, size_t cb)
1772{
1773 uint32_t csum = 0;
1774 uint16_t *pu16 = (uint16_t *)pvBuf;
1775
1776 while (cb > 1)
1777 {
1778 csum += *pu16++;
1779 cb -= 2;
1780 }
1781 if (cb)
1782 csum += *(uint8_t*)pu16;
1783 while (csum >> 16)
1784 csum = (csum >> 16) + (csum & 0xFFFF);
1785 return ~csum;
1786}
1787
1788/**
1789 * Dump a packet to debug log.
1790 *
1791 * @param pThis The device state structure.
1792 * @param cpPacket The packet.
1793 * @param cb The size of the packet.
1794 * @param pszText A string denoting direction of packet transfer.
1795 * @thread E1000_TX
1796 */
1797DECLINLINE(void) e1kPacketDump(PE1KSTATE pThis, const uint8_t *cpPacket, size_t cb, const char *pszText)
1798{
1799#ifdef DEBUG
1800 if (RT_LIKELY(e1kCsEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
1801 {
1802 Log4(("%s --- %s packet #%d: %RTmac => %RTmac (%d bytes) ---\n",
1803 pThis->szPrf, pszText, ++pThis->u32PktNo, cpPacket+6, cpPacket, cb));
1804 if (ntohs(*(uint16_t*)(cpPacket+12)) == 0x86DD)
1805 {
1806 Log4(("%s --- IPv6: %RTnaipv6 => %RTnaipv6\n",
1807 pThis->szPrf, cpPacket+14+8, cpPacket+14+24));
1808 if (*(cpPacket+14+6) == 0x6)
1809 Log4(("%s --- TCP: seq=%x ack=%x\n", pThis->szPrf,
1810 ntohl(*(uint32_t*)(cpPacket+14+40+4)), ntohl(*(uint32_t*)(cpPacket+14+40+8))));
1811 }
1812 else if (ntohs(*(uint16_t*)(cpPacket+12)) == 0x800)
1813 {
1814 Log4(("%s --- IPv4: %RTnaipv4 => %RTnaipv4\n",
1815 pThis->szPrf, *(uint32_t*)(cpPacket+14+12), *(uint32_t*)(cpPacket+14+16)));
1816 if (*(cpPacket+14+6) == 0x6)
1817 Log4(("%s --- TCP: seq=%x ack=%x\n", pThis->szPrf,
1818 ntohl(*(uint32_t*)(cpPacket+14+20+4)), ntohl(*(uint32_t*)(cpPacket+14+20+8))));
1819 }
1820 E1kLog3(("%.*Rhxd\n", cb, cpPacket));
1821 e1kCsLeave(pThis);
1822 }
1823#else
1824 if (RT_LIKELY(e1kCsEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
1825 {
1826 if (ntohs(*(uint16_t*)(cpPacket+12)) == 0x86DD)
1827 E1kLogRel(("E1000: %s packet #%d, %RTmac => %RTmac, %RTnaipv6 => %RTnaipv6, seq=%x ack=%x\n",
1828 pszText, ++pThis->u32PktNo, cpPacket+6, cpPacket, cpPacket+14+8, cpPacket+14+24,
1829 ntohl(*(uint32_t*)(cpPacket+14+40+4)), ntohl(*(uint32_t*)(cpPacket+14+40+8))));
1830 else
1831 E1kLogRel(("E1000: %s packet #%d, %RTmac => %RTmac, %RTnaipv4 => %RTnaipv4, seq=%x ack=%x\n",
1832 pszText, ++pThis->u32PktNo, cpPacket+6, cpPacket,
1833 *(uint32_t*)(cpPacket+14+12), *(uint32_t*)(cpPacket+14+16),
1834 ntohl(*(uint32_t*)(cpPacket+14+20+4)), ntohl(*(uint32_t*)(cpPacket+14+20+8))));
1835 e1kCsLeave(pThis);
1836 }
1837 RT_NOREF2(cb, pszText);
1838#endif
1839}
1840
1841/**
1842 * Determine the type of transmit descriptor.
1843 *
1844 * @returns Descriptor type. See E1K_DTYP_XXX defines.
1845 *
1846 * @param pDesc Pointer to descriptor union.
1847 * @thread E1000_TX
1848 */
1849DECLINLINE(int) e1kGetDescType(E1KTXDESC *pDesc)
1850{
1851 if (pDesc->legacy.cmd.fDEXT)
1852 return pDesc->context.dw2.u4DTYP;
1853 return E1K_DTYP_LEGACY;
1854}
1855
1856
1857#ifdef E1K_WITH_RXD_CACHE
1858/**
1859 * Return the number of RX descriptor that belong to the hardware.
1860 *
1861 * @returns the number of available descriptors in RX ring.
1862 * @param pThis The device state structure.
1863 * @thread ???
1864 */
1865DECLINLINE(uint32_t) e1kGetRxLen(PE1KSTATE pThis)
1866{
1867 /**
1868 * Make sure RDT won't change during computation. EMT may modify RDT at
1869 * any moment.
1870 */
1871 uint32_t rdt = RDT;
1872 return (RDH > rdt ? RDLEN/sizeof(E1KRXDESC) : 0) + rdt - RDH;
1873}
1874
1875DECLINLINE(unsigned) e1kRxDInCache(PE1KSTATE pThis)
1876{
1877 return pThis->nRxDFetched > pThis->iRxDCurrent ?
1878 pThis->nRxDFetched - pThis->iRxDCurrent : 0;
1879}
1880
1881DECLINLINE(unsigned) e1kRxDIsCacheEmpty(PE1KSTATE pThis)
1882{
1883 return pThis->iRxDCurrent >= pThis->nRxDFetched;
1884}
1885
1886/**
1887 * Load receive descriptors from guest memory. The caller needs to be in Rx
1888 * critical section.
1889 *
1890 * We need two physical reads in case the tail wrapped around the end of RX
1891 * descriptor ring.
1892 *
1893 * @returns the actual number of descriptors fetched.
1894 * @param pDevIns The device instance.
1895 * @param pThis The device state structure.
1896 * @thread EMT, RX
1897 */
1898DECLINLINE(unsigned) e1kRxDPrefetch(PPDMDEVINS pDevIns, PE1KSTATE pThis)
1899{
1900 /* We've already loaded pThis->nRxDFetched descriptors past RDH. */
1901 unsigned nDescsAvailable = e1kGetRxLen(pThis) - e1kRxDInCache(pThis);
1902 unsigned nDescsToFetch = RT_MIN(nDescsAvailable, E1K_RXD_CACHE_SIZE - pThis->nRxDFetched);
1903 unsigned nDescsTotal = RDLEN / sizeof(E1KRXDESC);
1904 Assert(nDescsTotal != 0);
1905 if (nDescsTotal == 0)
1906 return 0;
1907 unsigned nFirstNotLoaded = (RDH + e1kRxDInCache(pThis)) % nDescsTotal;
1908 unsigned nDescsInSingleRead = RT_MIN(nDescsToFetch, nDescsTotal - nFirstNotLoaded);
1909 E1kLog3(("%s e1kRxDPrefetch: nDescsAvailable=%u nDescsToFetch=%u "
1910 "nDescsTotal=%u nFirstNotLoaded=0x%x nDescsInSingleRead=%u\n",
1911 pThis->szPrf, nDescsAvailable, nDescsToFetch, nDescsTotal,
1912 nFirstNotLoaded, nDescsInSingleRead));
1913 if (nDescsToFetch == 0)
1914 return 0;
1915 E1KRXDESC* pFirstEmptyDesc = &pThis->aRxDescriptors[pThis->nRxDFetched];
1916 PDMDevHlpPhysRead(pDevIns,
1917 ((uint64_t)RDBAH << 32) + RDBAL + nFirstNotLoaded * sizeof(E1KRXDESC),
1918 pFirstEmptyDesc, nDescsInSingleRead * sizeof(E1KRXDESC));
1919 // uint64_t addrBase = ((uint64_t)RDBAH << 32) + RDBAL;
1920 // unsigned i, j;
1921 // for (i = pThis->nRxDFetched; i < pThis->nRxDFetched + nDescsInSingleRead; ++i)
1922 // {
1923 // pThis->aRxDescAddr[i] = addrBase + (nFirstNotLoaded + i - pThis->nRxDFetched) * sizeof(E1KRXDESC);
1924 // E1kLog3(("%s aRxDescAddr[%d] = %p\n", pThis->szPrf, i, pThis->aRxDescAddr[i]));
1925 // }
1926 E1kLog3(("%s Fetched %u RX descriptors at %08x%08x(0x%x), RDLEN=%08x, RDH=%08x, RDT=%08x\n",
1927 pThis->szPrf, nDescsInSingleRead,
1928 RDBAH, RDBAL + RDH * sizeof(E1KRXDESC),
1929 nFirstNotLoaded, RDLEN, RDH, RDT));
1930 if (nDescsToFetch > nDescsInSingleRead)
1931 {
1932 PDMDevHlpPhysRead(pDevIns,
1933 ((uint64_t)RDBAH << 32) + RDBAL,
1934 pFirstEmptyDesc + nDescsInSingleRead,
1935 (nDescsToFetch - nDescsInSingleRead) * sizeof(E1KRXDESC));
1936 // Assert(i == pThis->nRxDFetched + nDescsInSingleRead);
1937 // for (j = 0; i < pThis->nRxDFetched + nDescsToFetch; ++i, ++j)
1938 // {
1939 // pThis->aRxDescAddr[i] = addrBase + j * sizeof(E1KRXDESC);
1940 // E1kLog3(("%s aRxDescAddr[%d] = %p\n", pThis->szPrf, i, pThis->aRxDescAddr[i]));
1941 // }
1942 E1kLog3(("%s Fetched %u RX descriptors at %08x%08x\n",
1943 pThis->szPrf, nDescsToFetch - nDescsInSingleRead,
1944 RDBAH, RDBAL));
1945 }
1946 pThis->nRxDFetched += nDescsToFetch;
1947 return nDescsToFetch;
1948}
1949
1950# ifdef IN_RING3 /* currently only used in ring-3 due to stack space requirements of the caller */
1951/**
1952 * Dump receive descriptor to debug log.
1953 *
1954 * @param pThis The device state structure.
1955 * @param pDesc Pointer to the descriptor.
1956 * @thread E1000_RX
1957 */
1958static void e1kPrintRDesc(PE1KSTATE pThis, E1KRXDESC *pDesc)
1959{
1960 RT_NOREF2(pThis, pDesc);
1961 E1kLog2(("%s <-- Receive Descriptor (%d bytes):\n", pThis->szPrf, pDesc->u16Length));
1962 E1kLog2((" Address=%16LX Length=%04X Csum=%04X\n",
1963 pDesc->u64BufAddr, pDesc->u16Length, pDesc->u16Checksum));
1964 E1kLog2((" STA: %s %s %s %s %s %s %s ERR: %s %s %s %s SPECIAL: %s VLAN=%03x PRI=%x\n",
1965 pDesc->status.fPIF ? "PIF" : "pif",
1966 pDesc->status.fIPCS ? "IPCS" : "ipcs",
1967 pDesc->status.fTCPCS ? "TCPCS" : "tcpcs",
1968 pDesc->status.fVP ? "VP" : "vp",
1969 pDesc->status.fIXSM ? "IXSM" : "ixsm",
1970 pDesc->status.fEOP ? "EOP" : "eop",
1971 pDesc->status.fDD ? "DD" : "dd",
1972 pDesc->status.fRXE ? "RXE" : "rxe",
1973 pDesc->status.fIPE ? "IPE" : "ipe",
1974 pDesc->status.fTCPE ? "TCPE" : "tcpe",
1975 pDesc->status.fCE ? "CE" : "ce",
1976 E1K_SPEC_CFI(pDesc->status.u16Special) ? "CFI" :"cfi",
1977 E1K_SPEC_VLAN(pDesc->status.u16Special),
1978 E1K_SPEC_PRI(pDesc->status.u16Special)));
1979}
1980# endif /* IN_RING3 */
1981#endif /* E1K_WITH_RXD_CACHE */
1982
1983/**
1984 * Dump transmit descriptor to debug log.
1985 *
1986 * @param pThis The device state structure.
1987 * @param pDesc Pointer to descriptor union.
1988 * @param pszDir A string denoting direction of descriptor transfer
1989 * @thread E1000_TX
1990 */
1991static void e1kPrintTDesc(PE1KSTATE pThis, E1KTXDESC *pDesc, const char *pszDir,
1992 unsigned uLevel = RTLOGGRPFLAGS_LEVEL_2)
1993{
1994 RT_NOREF4(pThis, pDesc, pszDir, uLevel);
1995
1996 /*
1997 * Unfortunately we cannot use our format handler here, we want R0 logging
1998 * as well.
1999 */
2000 switch (e1kGetDescType(pDesc))
2001 {
2002 case E1K_DTYP_CONTEXT:
2003 E1kLogX(uLevel, ("%s %s Context Transmit Descriptor %s\n",
2004 pThis->szPrf, pszDir, pszDir));
2005 E1kLogX(uLevel, (" IPCSS=%02X IPCSO=%02X IPCSE=%04X TUCSS=%02X TUCSO=%02X TUCSE=%04X\n",
2006 pDesc->context.ip.u8CSS, pDesc->context.ip.u8CSO, pDesc->context.ip.u16CSE,
2007 pDesc->context.tu.u8CSS, pDesc->context.tu.u8CSO, pDesc->context.tu.u16CSE));
2008 E1kLogX(uLevel, (" TUCMD:%s%s%s %s %s PAYLEN=%04x HDRLEN=%04x MSS=%04x STA: %s\n",
2009 pDesc->context.dw2.fIDE ? " IDE":"",
2010 pDesc->context.dw2.fRS ? " RS" :"",
2011 pDesc->context.dw2.fTSE ? " TSE":"",
2012 pDesc->context.dw2.fIP ? "IPv4":"IPv6",
2013 pDesc->context.dw2.fTCP ? "TCP":"UDP",
2014 pDesc->context.dw2.u20PAYLEN,
2015 pDesc->context.dw3.u8HDRLEN,
2016 pDesc->context.dw3.u16MSS,
2017 pDesc->context.dw3.fDD?"DD":""));
2018 break;
2019 case E1K_DTYP_DATA:
2020 E1kLogX(uLevel, ("%s %s Data Transmit Descriptor (%d bytes) %s\n",
2021 pThis->szPrf, pszDir, pDesc->data.cmd.u20DTALEN, pszDir));
2022 E1kLogX(uLevel, (" Address=%16LX DTALEN=%05X\n",
2023 pDesc->data.u64BufAddr,
2024 pDesc->data.cmd.u20DTALEN));
2025 E1kLogX(uLevel, (" DCMD:%s%s%s%s%s%s%s STA:%s%s%s POPTS:%s%s SPECIAL:%s VLAN=%03x PRI=%x\n",
2026 pDesc->data.cmd.fIDE ? " IDE" :"",
2027 pDesc->data.cmd.fVLE ? " VLE" :"",
2028 pDesc->data.cmd.fRPS ? " RPS" :"",
2029 pDesc->data.cmd.fRS ? " RS" :"",
2030 pDesc->data.cmd.fTSE ? " TSE" :"",
2031 pDesc->data.cmd.fIFCS? " IFCS":"",
2032 pDesc->data.cmd.fEOP ? " EOP" :"",
2033 pDesc->data.dw3.fDD ? " DD" :"",
2034 pDesc->data.dw3.fEC ? " EC" :"",
2035 pDesc->data.dw3.fLC ? " LC" :"",
2036 pDesc->data.dw3.fTXSM? " TXSM":"",
2037 pDesc->data.dw3.fIXSM? " IXSM":"",
2038 E1K_SPEC_CFI(pDesc->data.dw3.u16Special) ? "CFI" :"cfi",
2039 E1K_SPEC_VLAN(pDesc->data.dw3.u16Special),
2040 E1K_SPEC_PRI(pDesc->data.dw3.u16Special)));
2041 break;
2042 case E1K_DTYP_LEGACY:
2043 E1kLogX(uLevel, ("%s %s Legacy Transmit Descriptor (%d bytes) %s\n",
2044 pThis->szPrf, pszDir, pDesc->legacy.cmd.u16Length, pszDir));
2045 E1kLogX(uLevel, (" Address=%16LX DTALEN=%05X\n",
2046 pDesc->data.u64BufAddr,
2047 pDesc->legacy.cmd.u16Length));
2048 E1kLogX(uLevel, (" CMD:%s%s%s%s%s%s%s STA:%s%s%s CSO=%02x CSS=%02x SPECIAL:%s VLAN=%03x PRI=%x\n",
2049 pDesc->legacy.cmd.fIDE ? " IDE" :"",
2050 pDesc->legacy.cmd.fVLE ? " VLE" :"",
2051 pDesc->legacy.cmd.fRPS ? " RPS" :"",
2052 pDesc->legacy.cmd.fRS ? " RS" :"",
2053 pDesc->legacy.cmd.fIC ? " IC" :"",
2054 pDesc->legacy.cmd.fIFCS? " IFCS":"",
2055 pDesc->legacy.cmd.fEOP ? " EOP" :"",
2056 pDesc->legacy.dw3.fDD ? " DD" :"",
2057 pDesc->legacy.dw3.fEC ? " EC" :"",
2058 pDesc->legacy.dw3.fLC ? " LC" :"",
2059 pDesc->legacy.cmd.u8CSO,
2060 pDesc->legacy.dw3.u8CSS,
2061 E1K_SPEC_CFI(pDesc->legacy.dw3.u16Special) ? "CFI" :"cfi",
2062 E1K_SPEC_VLAN(pDesc->legacy.dw3.u16Special),
2063 E1K_SPEC_PRI(pDesc->legacy.dw3.u16Special)));
2064 break;
2065 default:
2066 E1kLog(("%s %s Invalid Transmit Descriptor %s\n",
2067 pThis->szPrf, pszDir, pszDir));
2068 break;
2069 }
2070}
2071
2072/**
2073 * Raise an interrupt later.
2074 *
2075 * @param pThis The device state structure.
2076 */
2077DECLINLINE(void) e1kPostponeInterrupt(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint64_t nsDeadline)
2078{
2079 if (!PDMDevHlpTimerIsActive(pDevIns, pThis->hIntTimer))
2080 PDMDevHlpTimerSetNano(pDevIns, pThis->hIntTimer, nsDeadline);
2081}
2082
2083/**
2084 * Raise interrupt if not masked.
2085 *
2086 * @param pThis The device state structure.
2087 */
2088static int e1kRaiseInterrupt(PPDMDEVINS pDevIns, PE1KSTATE pThis, int rcBusy, uint32_t u32IntCause = 0)
2089{
2090 int rc = e1kCsEnter(pThis, rcBusy);
2091 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2092 return rc;
2093
2094 E1K_INC_ISTAT_CNT(pThis->uStatIntTry);
2095 ICR |= u32IntCause;
2096 if (ICR & IMS)
2097 {
2098 if (pThis->fIntRaised)
2099 {
2100 E1K_INC_ISTAT_CNT(pThis->uStatIntSkip);
2101 E1kLog2(("%s e1kRaiseInterrupt: Already raised, skipped. ICR&IMS=%08x\n",
2102 pThis->szPrf, ICR & IMS));
2103 }
2104 else
2105 {
2106 uint64_t tsNow = PDMDevHlpTimerGet(pDevIns, pThis->hIntTimer);
2107 if (!!ITR && tsNow - pThis->u64AckedAt < ITR * 256
2108 && pThis->fItrEnabled && (pThis->fItrRxEnabled || !(ICR & ICR_RXT0)))
2109 {
2110 E1K_INC_ISTAT_CNT(pThis->uStatIntEarly);
2111 E1kLog2(("%s e1kRaiseInterrupt: Too early to raise again: %d ns < %d ns.\n",
2112 pThis->szPrf, (uint32_t)(tsNow - pThis->u64AckedAt), ITR * 256));
2113 e1kPostponeInterrupt(pDevIns, pThis, ITR * 256);
2114 }
2115 else
2116 {
2117
2118 /* Since we are delivering the interrupt now
2119 * there is no need to do it later -- stop the timer.
2120 */
2121 PDMDevHlpTimerStop(pDevIns, pThis->hIntTimer);
2122 E1K_INC_ISTAT_CNT(pThis->uStatInt);
2123 STAM_COUNTER_INC(&pThis->StatIntsRaised);
2124 /* Got at least one unmasked interrupt cause */
2125 pThis->fIntRaised = true;
2126 /* Raise(1) INTA(0) */
2127 E1kLogRel(("E1000: irq RAISED icr&mask=0x%x, icr=0x%x\n", ICR & IMS, ICR));
2128 PDMDevHlpPCISetIrq(pDevIns, 0, 1);
2129 E1kLog(("%s e1kRaiseInterrupt: Raised. ICR&IMS=%08x\n",
2130 pThis->szPrf, ICR & IMS));
2131 }
2132 }
2133 }
2134 else
2135 {
2136 E1K_INC_ISTAT_CNT(pThis->uStatIntMasked);
2137 E1kLog2(("%s e1kRaiseInterrupt: Not raising, ICR=%08x, IMS=%08x\n",
2138 pThis->szPrf, ICR, IMS));
2139 }
2140 e1kCsLeave(pThis);
2141 return VINF_SUCCESS;
2142}
2143
2144/**
2145 * Compute the physical address of the descriptor.
2146 *
2147 * @returns the physical address of the descriptor.
2148 *
2149 * @param baseHigh High-order 32 bits of descriptor table address.
2150 * @param baseLow Low-order 32 bits of descriptor table address.
2151 * @param idxDesc The descriptor index in the table.
2152 */
2153DECLINLINE(RTGCPHYS) e1kDescAddr(uint32_t baseHigh, uint32_t baseLow, uint32_t idxDesc)
2154{
2155 AssertCompile(sizeof(E1KRXDESC) == sizeof(E1KTXDESC));
2156 return ((uint64_t)baseHigh << 32) + baseLow + idxDesc * sizeof(E1KRXDESC);
2157}
2158
2159#ifdef IN_RING3 /* currently only used in ring-3 due to stack space requirements of the caller */
2160/**
2161 * Advance the head pointer of the receive descriptor queue.
2162 *
2163 * @remarks RDH always points to the next available RX descriptor.
2164 *
2165 * @param pDevIns The device instance.
2166 * @param pThis The device state structure.
2167 */
2168DECLINLINE(void) e1kAdvanceRDH(PPDMDEVINS pDevIns, PE1KSTATE pThis)
2169{
2170 Assert(e1kCsRxIsOwner(pThis));
2171 //e1kCsEnter(pThis, RT_SRC_POS);
2172 if (++RDH * sizeof(E1KRXDESC) >= RDLEN)
2173 RDH = 0;
2174#ifdef E1K_WITH_RXD_CACHE
2175 /*
2176 * We need to fetch descriptors now as the guest may advance RDT all the way
2177 * to RDH as soon as we generate RXDMT0 interrupt. This is mostly to provide
2178 * compatibility with Phar Lap ETS, see @bugref(7346). Note that we do not
2179 * check if the receiver is enabled. It must be, otherwise we won't get here
2180 * in the first place.
2181 *
2182 * Note that we should have moved both RDH and iRxDCurrent by now.
2183 */
2184 if (e1kRxDIsCacheEmpty(pThis))
2185 {
2186 /* Cache is empty, reset it and check if we can fetch more. */
2187 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
2188 E1kLog3(("%s e1kAdvanceRDH: Rx cache is empty, RDH=%x RDT=%x "
2189 "iRxDCurrent=%x nRxDFetched=%x\n",
2190 pThis->szPrf, RDH, RDT, pThis->iRxDCurrent, pThis->nRxDFetched));
2191 e1kRxDPrefetch(pDevIns, pThis);
2192 }
2193#endif /* E1K_WITH_RXD_CACHE */
2194 /*
2195 * Compute current receive queue length and fire RXDMT0 interrupt
2196 * if we are low on receive buffers
2197 */
2198 uint32_t uRQueueLen = RDH>RDT ? RDLEN/sizeof(E1KRXDESC)-RDH+RDT : RDT-RDH;
2199 /*
2200 * The minimum threshold is controlled by RDMTS bits of RCTL:
2201 * 00 = 1/2 of RDLEN
2202 * 01 = 1/4 of RDLEN
2203 * 10 = 1/8 of RDLEN
2204 * 11 = reserved
2205 */
2206 uint32_t uMinRQThreshold = RDLEN / sizeof(E1KRXDESC) / (2 << GET_BITS(RCTL, RDMTS));
2207 if (uRQueueLen <= uMinRQThreshold)
2208 {
2209 E1kLogRel(("E1000: low on RX descriptors, RDH=%x RDT=%x len=%x threshold=%x\n", RDH, RDT, uRQueueLen, uMinRQThreshold));
2210 E1kLog2(("%s Low on RX descriptors, RDH=%x RDT=%x len=%x threshold=%x, raise an interrupt\n",
2211 pThis->szPrf, RDH, RDT, uRQueueLen, uMinRQThreshold));
2212 E1K_INC_ISTAT_CNT(pThis->uStatIntRXDMT0);
2213 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_RXDMT0);
2214 }
2215 E1kLog2(("%s e1kAdvanceRDH: at exit RDH=%x RDT=%x len=%x\n",
2216 pThis->szPrf, RDH, RDT, uRQueueLen));
2217 //e1kCsLeave(pThis);
2218}
2219#endif /* IN_RING3 */
2220
2221#ifdef E1K_WITH_RXD_CACHE
2222
2223# ifdef IN_RING3 /* currently only used in ring-3 due to stack space requirements of the caller */
2224
2225/**
2226 * Obtain the next RX descriptor from RXD cache, fetching descriptors from the
2227 * RX ring if the cache is empty.
2228 *
2229 * Note that we cannot advance the cache pointer (iRxDCurrent) yet as it will
2230 * go out of sync with RDH which will cause trouble when EMT checks if the
2231 * cache is empty to do pre-fetch @bugref(6217).
2232 *
2233 * @param pDevIns The device instance.
2234 * @param pThis The device state structure.
2235 * @thread RX
2236 */
2237DECLINLINE(E1KRXDESC *) e1kRxDGet(PPDMDEVINS pDevIns, PE1KSTATE pThis)
2238{
2239 Assert(e1kCsRxIsOwner(pThis));
2240 /* Check the cache first. */
2241 if (pThis->iRxDCurrent < pThis->nRxDFetched)
2242 return &pThis->aRxDescriptors[pThis->iRxDCurrent];
2243 /* Cache is empty, reset it and check if we can fetch more. */
2244 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
2245 if (e1kRxDPrefetch(pDevIns, pThis))
2246 return &pThis->aRxDescriptors[pThis->iRxDCurrent];
2247 /* Out of Rx descriptors. */
2248 return NULL;
2249}
2250
2251
2252/**
2253 * Return the RX descriptor obtained with e1kRxDGet() and advance the cache
2254 * pointer. The descriptor gets written back to the RXD ring.
2255 *
2256 * @param pDevIns The device instance.
2257 * @param pThis The device state structure.
2258 * @param pDesc The descriptor being "returned" to the RX ring.
2259 * @thread RX
2260 */
2261DECLINLINE(void) e1kRxDPut(PPDMDEVINS pDevIns, PE1KSTATE pThis, E1KRXDESC* pDesc)
2262{
2263 Assert(e1kCsRxIsOwner(pThis));
2264 pThis->iRxDCurrent++;
2265 // Assert(pDesc >= pThis->aRxDescriptors);
2266 // Assert(pDesc < pThis->aRxDescriptors + E1K_RXD_CACHE_SIZE);
2267 // uint64_t addr = e1kDescAddr(RDBAH, RDBAL, RDH);
2268 // uint32_t rdh = RDH;
2269 // Assert(pThis->aRxDescAddr[pDesc - pThis->aRxDescriptors] == addr);
2270 PDMDevHlpPCIPhysWrite(pDevIns, e1kDescAddr(RDBAH, RDBAL, RDH), pDesc, sizeof(E1KRXDESC));
2271 /*
2272 * We need to print the descriptor before advancing RDH as it may fetch new
2273 * descriptors into the cache.
2274 */
2275 e1kPrintRDesc(pThis, pDesc);
2276 e1kAdvanceRDH(pDevIns, pThis);
2277}
2278
2279/**
2280 * Store a fragment of received packet at the specifed address.
2281 *
2282 * @param pDevIns The device instance.
2283 * @param pThis The device state structure.
2284 * @param pDesc The next available RX descriptor.
2285 * @param pvBuf The fragment.
2286 * @param cb The size of the fragment.
2287 */
2288static DECLCALLBACK(void) e1kStoreRxFragment(PPDMDEVINS pDevIns, PE1KSTATE pThis, E1KRXDESC *pDesc, const void *pvBuf, size_t cb)
2289{
2290 STAM_PROFILE_ADV_START(&pThis->StatReceiveStore, a);
2291 E1kLog2(("%s e1kStoreRxFragment: store fragment of %04X at %016LX, EOP=%d\n",
2292 pThis->szPrf, cb, pDesc->u64BufAddr, pDesc->status.fEOP));
2293 PDMDevHlpPCIPhysWrite(pDevIns, pDesc->u64BufAddr, pvBuf, cb);
2294 pDesc->u16Length = (uint16_t)cb; Assert(pDesc->u16Length == cb);
2295 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveStore, a);
2296}
2297
2298# endif /* IN_RING3 */
2299
2300#else /* !E1K_WITH_RXD_CACHE */
2301
2302/**
2303 * Store a fragment of received packet that fits into the next available RX
2304 * buffer.
2305 *
2306 * @remarks Trigger the RXT0 interrupt if it is the last fragment of the packet.
2307 *
2308 * @param pDevIns The device instance.
2309 * @param pThis The device state structure.
2310 * @param pDesc The next available RX descriptor.
2311 * @param pvBuf The fragment.
2312 * @param cb The size of the fragment.
2313 */
2314static DECLCALLBACK(void) e1kStoreRxFragment(PPDMDEVINS pDevIns, PE1KSTATE pThis, E1KRXDESC *pDesc, const void *pvBuf, size_t cb)
2315{
2316 STAM_PROFILE_ADV_START(&pThis->StatReceiveStore, a);
2317 E1kLog2(("%s e1kStoreRxFragment: store fragment of %04X at %016LX, EOP=%d\n", pThis->szPrf, cb, pDesc->u64BufAddr, pDesc->status.fEOP));
2318 PDMDevHlpPCIPhysWrite(pDevIns, pDesc->u64BufAddr, pvBuf, cb);
2319 pDesc->u16Length = (uint16_t)cb; Assert(pDesc->u16Length == cb);
2320 /* Write back the descriptor */
2321 PDMDevHlpPCIPhysWrite(pDevIns, e1kDescAddr(RDBAH, RDBAL, RDH), pDesc, sizeof(E1KRXDESC));
2322 e1kPrintRDesc(pThis, pDesc);
2323 E1kLogRel(("E1000: Wrote back RX desc, RDH=%x\n", RDH));
2324 /* Advance head */
2325 e1kAdvanceRDH(pDevIns, pThis);
2326 //E1kLog2(("%s e1kStoreRxFragment: EOP=%d RDTR=%08X RADV=%08X\n", pThis->szPrf, pDesc->fEOP, RDTR, RADV));
2327 if (pDesc->status.fEOP)
2328 {
2329 /* Complete packet has been stored -- it is time to let the guest know. */
2330#ifdef E1K_USE_RX_TIMERS
2331 if (RDTR)
2332 {
2333 /* Arm the timer to fire in RDTR usec (discard .024) */
2334 e1kArmTimer(pDevIns, pThis, pThis->hRIDTimer, RDTR);
2335 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
2336 if (RADV != 0 && !PDMDevHlpTimerIsActive(pDevIns, pThis->CTX_SUFF(pRADTimer)))
2337 e1kArmTimer(pThis, pThis->hRADTimer, RADV);
2338 }
2339 else
2340 {
2341#endif
2342 /* 0 delay means immediate interrupt */
2343 E1K_INC_ISTAT_CNT(pThis->uStatIntRx);
2344 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_RXT0);
2345#ifdef E1K_USE_RX_TIMERS
2346 }
2347#endif
2348 }
2349 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveStore, a);
2350}
2351
2352#endif /* !E1K_WITH_RXD_CACHE */
2353
2354/**
2355 * Returns true if it is a broadcast packet.
2356 *
2357 * @returns true if destination address indicates broadcast.
2358 * @param pvBuf The ethernet packet.
2359 */
2360DECLINLINE(bool) e1kIsBroadcast(const void *pvBuf)
2361{
2362 static const uint8_t s_abBcastAddr[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
2363 return memcmp(pvBuf, s_abBcastAddr, sizeof(s_abBcastAddr)) == 0;
2364}
2365
2366/**
2367 * Returns true if it is a multicast packet.
2368 *
2369 * @remarks returns true for broadcast packets as well.
2370 * @returns true if destination address indicates multicast.
2371 * @param pvBuf The ethernet packet.
2372 */
2373DECLINLINE(bool) e1kIsMulticast(const void *pvBuf)
2374{
2375 return (*(char*)pvBuf) & 1;
2376}
2377
2378#ifdef IN_RING3 /* currently only used in ring-3 due to stack space requirements of the caller */
2379/**
2380 * Set IXSM, IPCS and TCPCS flags according to the packet type.
2381 *
2382 * @remarks We emulate checksum offloading for major packets types only.
2383 *
2384 * @returns VBox status code.
2385 * @param pThis The device state structure.
2386 * @param pFrame The available data.
2387 * @param cb Number of bytes available in the buffer.
2388 * @param status Bit fields containing status info.
2389 */
2390static int e1kRxChecksumOffload(PE1KSTATE pThis, const uint8_t *pFrame, size_t cb, E1KRXDST *pStatus)
2391{
2392 /** @todo
2393 * It is not safe to bypass checksum verification for packets coming
2394 * from real wire. We currently unable to tell where packets are
2395 * coming from so we tell the driver to ignore our checksum flags
2396 * and do verification in software.
2397 */
2398# if 0
2399 uint16_t uEtherType = ntohs(*(uint16_t*)(pFrame + 12));
2400
2401 E1kLog2(("%s e1kRxChecksumOffload: EtherType=%x\n", pThis->szPrf, uEtherType));
2402
2403 switch (uEtherType)
2404 {
2405 case 0x800: /* IPv4 */
2406 {
2407 pStatus->fIXSM = false;
2408 pStatus->fIPCS = true;
2409 PRTNETIPV4 pIpHdr4 = (PRTNETIPV4)(pFrame + 14);
2410 /* TCP/UDP checksum offloading works with TCP and UDP only */
2411 pStatus->fTCPCS = pIpHdr4->ip_p == 6 || pIpHdr4->ip_p == 17;
2412 break;
2413 }
2414 case 0x86DD: /* IPv6 */
2415 pStatus->fIXSM = false;
2416 pStatus->fIPCS = false;
2417 pStatus->fTCPCS = true;
2418 break;
2419 default: /* ARP, VLAN, etc. */
2420 pStatus->fIXSM = true;
2421 break;
2422 }
2423# else
2424 pStatus->fIXSM = true;
2425 RT_NOREF_PV(pThis); RT_NOREF_PV(pFrame); RT_NOREF_PV(cb);
2426# endif
2427 return VINF_SUCCESS;
2428}
2429#endif /* IN_RING3 */
2430
2431/**
2432 * Pad and store received packet.
2433 *
2434 * @remarks Make sure that the packet appears to upper layer as one coming
2435 * from real Ethernet: pad it and insert FCS.
2436 *
2437 * @returns VBox status code.
2438 * @param pDevIns The device instance.
2439 * @param pThis The device state structure.
2440 * @param pvBuf The available data.
2441 * @param cb Number of bytes available in the buffer.
2442 * @param status Bit fields containing status info.
2443 */
2444static int e1kHandleRxPacket(PPDMDEVINS pDevIns, PE1KSTATE pThis, const void *pvBuf, size_t cb, E1KRXDST status)
2445{
2446#if defined(IN_RING3) /** @todo Remove this extra copying, it's gonna make us run out of kernel / hypervisor stack! */
2447 uint8_t rxPacket[E1K_MAX_RX_PKT_SIZE];
2448 uint8_t *ptr = rxPacket;
2449
2450 int rc = e1kCsRxEnter(pThis, VERR_SEM_BUSY);
2451 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2452 return rc;
2453
2454 if (cb > 70) /* unqualified guess */
2455 pThis->led.Asserted.s.fReading = pThis->led.Actual.s.fReading = 1;
2456
2457 Assert(cb <= E1K_MAX_RX_PKT_SIZE);
2458 Assert(cb > 16);
2459 size_t cbMax = ((RCTL & RCTL_LPE) ? E1K_MAX_RX_PKT_SIZE - 4 : 1518) - (status.fVP ? 0 : 4);
2460 E1kLog3(("%s Max RX packet size is %u\n", pThis->szPrf, cbMax));
2461 if (status.fVP)
2462 {
2463 /* VLAN packet -- strip VLAN tag in VLAN mode */
2464 if ((CTRL & CTRL_VME) && cb > 16)
2465 {
2466 uint16_t *u16Ptr = (uint16_t*)pvBuf;
2467 memcpy(rxPacket, pvBuf, 12); /* Copy src and dst addresses */
2468 status.u16Special = RT_BE2H_U16(u16Ptr[7]); /* Extract VLAN tag */
2469 memcpy(rxPacket + 12, (uint8_t*)pvBuf + 16, cb - 16); /* Copy the rest of the packet */
2470 cb -= 4;
2471 E1kLog3(("%s Stripped tag for VLAN %u (cb=%u)\n",
2472 pThis->szPrf, status.u16Special, cb));
2473 }
2474 else
2475 status.fVP = false; /* Set VP only if we stripped the tag */
2476 }
2477 else
2478 memcpy(rxPacket, pvBuf, cb);
2479 /* Pad short packets */
2480 if (cb < 60)
2481 {
2482 memset(rxPacket + cb, 0, 60 - cb);
2483 cb = 60;
2484 }
2485 if (!(RCTL & RCTL_SECRC) && cb <= cbMax)
2486 {
2487 STAM_PROFILE_ADV_START(&pThis->StatReceiveCRC, a);
2488 /*
2489 * Add FCS if CRC stripping is not enabled. Since the value of CRC
2490 * is ignored by most of drivers we may as well save us the trouble
2491 * of calculating it (see EthernetCRC CFGM parameter).
2492 */
2493 if (pThis->fEthernetCRC)
2494 *(uint32_t*)(rxPacket + cb) = RTCrc32(rxPacket, cb);
2495 cb += sizeof(uint32_t);
2496 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveCRC, a);
2497 E1kLog3(("%s Added FCS (cb=%u)\n", pThis->szPrf, cb));
2498 }
2499 /* Compute checksum of complete packet */
2500 size_t cbCSumStart = RT_MIN(GET_BITS(RXCSUM, PCSS), cb);
2501 uint16_t checksum = e1kCSum16(rxPacket + cbCSumStart, cb - cbCSumStart);
2502 e1kRxChecksumOffload(pThis, rxPacket, cb, &status);
2503
2504 /* Update stats */
2505 E1K_INC_CNT32(GPRC);
2506 if (e1kIsBroadcast(pvBuf))
2507 E1K_INC_CNT32(BPRC);
2508 else if (e1kIsMulticast(pvBuf))
2509 E1K_INC_CNT32(MPRC);
2510 /* Update octet receive counter */
2511 E1K_ADD_CNT64(GORCL, GORCH, cb);
2512 STAM_REL_COUNTER_ADD(&pThis->StatReceiveBytes, cb);
2513 if (cb == 64)
2514 E1K_INC_CNT32(PRC64);
2515 else if (cb < 128)
2516 E1K_INC_CNT32(PRC127);
2517 else if (cb < 256)
2518 E1K_INC_CNT32(PRC255);
2519 else if (cb < 512)
2520 E1K_INC_CNT32(PRC511);
2521 else if (cb < 1024)
2522 E1K_INC_CNT32(PRC1023);
2523 else
2524 E1K_INC_CNT32(PRC1522);
2525
2526 E1K_INC_ISTAT_CNT(pThis->uStatRxFrm);
2527
2528# ifdef E1K_WITH_RXD_CACHE
2529 while (cb > 0)
2530 {
2531 E1KRXDESC *pDesc = e1kRxDGet(pDevIns, pThis);
2532
2533 if (pDesc == NULL)
2534 {
2535 E1kLog(("%s Out of receive buffers, dropping the packet "
2536 "(cb=%u, in_cache=%u, RDH=%x RDT=%x)\n",
2537 pThis->szPrf, cb, e1kRxDInCache(pThis), RDH, RDT));
2538 break;
2539 }
2540# else /* !E1K_WITH_RXD_CACHE */
2541 if (RDH == RDT)
2542 {
2543 E1kLog(("%s Out of receive buffers, dropping the packet\n",
2544 pThis->szPrf));
2545 }
2546 /* Store the packet to receive buffers */
2547 while (RDH != RDT)
2548 {
2549 /* Load the descriptor pointed by head */
2550 E1KRXDESC desc, *pDesc = &desc;
2551 PDMDevHlpPhysRead(pDevIns, e1kDescAddr(RDBAH, RDBAL, RDH), &desc, sizeof(desc));
2552# endif /* !E1K_WITH_RXD_CACHE */
2553 if (pDesc->u64BufAddr)
2554 {
2555 uint16_t u16RxBufferSize = pThis->u16RxBSize; /* see @bugref{9427} */
2556
2557 /* Update descriptor */
2558 pDesc->status = status;
2559 pDesc->u16Checksum = checksum;
2560 pDesc->status.fDD = true;
2561
2562 /*
2563 * We need to leave Rx critical section here or we risk deadlocking
2564 * with EMT in e1kRegWriteRDT when the write is to an unallocated
2565 * page or has an access handler associated with it.
2566 * Note that it is safe to leave the critical section here since
2567 * e1kRegWriteRDT() never modifies RDH. It never touches already
2568 * fetched RxD cache entries either.
2569 */
2570 if (cb > u16RxBufferSize)
2571 {
2572 pDesc->status.fEOP = false;
2573 e1kCsRxLeave(pThis);
2574 e1kStoreRxFragment(pDevIns, pThis, pDesc, ptr, u16RxBufferSize);
2575 rc = e1kCsRxEnter(pThis, VERR_SEM_BUSY);
2576 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2577 return rc;
2578 ptr += u16RxBufferSize;
2579 cb -= u16RxBufferSize;
2580 }
2581 else
2582 {
2583 pDesc->status.fEOP = true;
2584 e1kCsRxLeave(pThis);
2585 e1kStoreRxFragment(pDevIns, pThis, pDesc, ptr, cb);
2586# ifdef E1K_WITH_RXD_CACHE
2587 rc = e1kCsRxEnter(pThis, VERR_SEM_BUSY);
2588 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2589 return rc;
2590 cb = 0;
2591# else /* !E1K_WITH_RXD_CACHE */
2592 pThis->led.Actual.s.fReading = 0;
2593 return VINF_SUCCESS;
2594# endif /* !E1K_WITH_RXD_CACHE */
2595 }
2596 /*
2597 * Note: RDH is advanced by e1kStoreRxFragment if E1K_WITH_RXD_CACHE
2598 * is not defined.
2599 */
2600 }
2601# ifdef E1K_WITH_RXD_CACHE
2602 /* Write back the descriptor. */
2603 pDesc->status.fDD = true;
2604 e1kRxDPut(pDevIns, pThis, pDesc);
2605# else /* !E1K_WITH_RXD_CACHE */
2606 else
2607 {
2608 /* Write back the descriptor. */
2609 pDesc->status.fDD = true;
2610 PDMDevHlpPCIPhysWrite(pDevIns, e1kDescAddr(RDBAH, RDBAL, RDH), pDesc, sizeof(E1KRXDESC));
2611 e1kAdvanceRDH(pDevIns, pThis);
2612 }
2613# endif /* !E1K_WITH_RXD_CACHE */
2614 }
2615
2616 if (cb > 0)
2617 E1kLog(("%s Out of receive buffers, dropping %u bytes", pThis->szPrf, cb));
2618
2619 pThis->led.Actual.s.fReading = 0;
2620
2621 e1kCsRxLeave(pThis);
2622# ifdef E1K_WITH_RXD_CACHE
2623 /* Complete packet has been stored -- it is time to let the guest know. */
2624# ifdef E1K_USE_RX_TIMERS
2625 if (RDTR)
2626 {
2627 /* Arm the timer to fire in RDTR usec (discard .024) */
2628 e1kArmTimer(pThis, pThis->hRIDTimer, RDTR);
2629 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
2630 if (RADV != 0 && !PDMDevHlpTimerIsActive(pDevIns, pThis->hRADTimer))
2631 e1kArmTimer(pThis, pThis->hRADTimer, RADV);
2632 }
2633 else
2634 {
2635# endif /* E1K_USE_RX_TIMERS */
2636 /* 0 delay means immediate interrupt */
2637 E1K_INC_ISTAT_CNT(pThis->uStatIntRx);
2638 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_RXT0);
2639# ifdef E1K_USE_RX_TIMERS
2640 }
2641# endif /* E1K_USE_RX_TIMERS */
2642# endif /* E1K_WITH_RXD_CACHE */
2643
2644 return VINF_SUCCESS;
2645#else /* !IN_RING3 */
2646 RT_NOREF(pDevIns, pThis, pvBuf, cb, status);
2647 return VERR_INTERNAL_ERROR_2;
2648#endif /* !IN_RING3 */
2649}
2650
2651
2652#ifdef IN_RING3
2653/**
2654 * Bring the link up after the configured delay, 5 seconds by default.
2655 *
2656 * @param pDevIns The device instance.
2657 * @param pThis The device state structure.
2658 * @thread any
2659 */
2660DECLINLINE(void) e1kBringLinkUpDelayed(PPDMDEVINS pDevIns, PE1KSTATE pThis)
2661{
2662 E1kLog(("%s Will bring up the link in %d seconds...\n",
2663 pThis->szPrf, pThis->cMsLinkUpDelay / 1000));
2664 e1kArmTimer(pDevIns, pThis, pThis->hLUTimer, pThis->cMsLinkUpDelay * 1000);
2665}
2666
2667/**
2668 * Bring up the link immediately.
2669 *
2670 * @param pDevIns The device instance.
2671 * @param pThis The device state structure.
2672 * @param pThisCC The current context instance data.
2673 */
2674DECLINLINE(void) e1kR3LinkUp(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KSTATECC pThisCC)
2675{
2676 E1kLog(("%s Link is up\n", pThis->szPrf));
2677 STATUS |= STATUS_LU;
2678 Phy::setLinkStatus(&pThis->phy, true);
2679 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_LSC);
2680 if (pThisCC->pDrvR3)
2681 pThisCC->pDrvR3->pfnNotifyLinkChanged(pThisCC->pDrvR3, PDMNETWORKLINKSTATE_UP);
2682 /* Trigger processing of pending TX descriptors (see @bugref{8942}). */
2683 PDMDevHlpTaskTrigger(pDevIns, pThis->hTxTask);
2684}
2685
2686/**
2687 * Bring down the link immediately.
2688 *
2689 * @param pDevIns The device instance.
2690 * @param pThis The device state structure.
2691 * @param pThisCC The current context instance data.
2692 */
2693DECLINLINE(void) e1kR3LinkDown(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KSTATECC pThisCC)
2694{
2695 E1kLog(("%s Link is down\n", pThis->szPrf));
2696 STATUS &= ~STATUS_LU;
2697#ifdef E1K_LSC_ON_RESET
2698 Phy::setLinkStatus(&pThis->phy, false);
2699#endif /* E1K_LSC_ON_RESET */
2700 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_LSC);
2701 if (pThisCC->pDrvR3)
2702 pThisCC->pDrvR3->pfnNotifyLinkChanged(pThisCC->pDrvR3, PDMNETWORKLINKSTATE_DOWN);
2703}
2704
2705/**
2706 * Bring down the link temporarily.
2707 *
2708 * @param pDevIns The device instance.
2709 * @param pThis The device state structure.
2710 * @param pThisCC The current context instance data.
2711 */
2712DECLINLINE(void) e1kR3LinkDownTemp(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KSTATECC pThisCC)
2713{
2714 E1kLog(("%s Link is down temporarily\n", pThis->szPrf));
2715 STATUS &= ~STATUS_LU;
2716 Phy::setLinkStatus(&pThis->phy, false);
2717 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_LSC);
2718 /*
2719 * Notifying the associated driver that the link went down (even temporarily)
2720 * seems to be the right thing, but it was not done before. This may cause
2721 * a regression if the driver does not expect the link to go down as a result
2722 * of sending PDMNETWORKLINKSTATE_DOWN_RESUME to this device. Earlier versions
2723 * of code notified the driver that the link was up! See @bugref{7057}.
2724 */
2725 if (pThisCC->pDrvR3)
2726 pThisCC->pDrvR3->pfnNotifyLinkChanged(pThisCC->pDrvR3, PDMNETWORKLINKSTATE_DOWN);
2727 e1kBringLinkUpDelayed(pDevIns, pThis);
2728}
2729#endif /* IN_RING3 */
2730
2731#if 0 /* unused */
2732/**
2733 * Read handler for Device Status register.
2734 *
2735 * Get the link status from PHY.
2736 *
2737 * @returns VBox status code.
2738 *
2739 * @param pThis The device state structure.
2740 * @param offset Register offset in memory-mapped frame.
2741 * @param index Register index in register array.
2742 * @param mask Used to implement partial reads (8 and 16-bit).
2743 */
2744static int e1kRegReadCTRL(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
2745{
2746 E1kLog(("%s e1kRegReadCTRL: mdio dir=%s mdc dir=%s mdc=%d\n",
2747 pThis->szPrf, (CTRL & CTRL_MDIO_DIR)?"OUT":"IN ",
2748 (CTRL & CTRL_MDC_DIR)?"OUT":"IN ", !!(CTRL & CTRL_MDC)));
2749 if ((CTRL & CTRL_MDIO_DIR) == 0 && (CTRL & CTRL_MDC))
2750 {
2751 /* MDC is high and MDIO pin is used for input, read MDIO pin from PHY */
2752 if (Phy::readMDIO(&pThis->phy))
2753 *pu32Value = CTRL | CTRL_MDIO;
2754 else
2755 *pu32Value = CTRL & ~CTRL_MDIO;
2756 E1kLog(("%s e1kRegReadCTRL: Phy::readMDIO(%d)\n",
2757 pThis->szPrf, !!(*pu32Value & CTRL_MDIO)));
2758 }
2759 else
2760 {
2761 /* MDIO pin is used for output, ignore it */
2762 *pu32Value = CTRL;
2763 }
2764 return VINF_SUCCESS;
2765}
2766#endif /* unused */
2767
2768/**
2769 * A callback used by PHY to indicate that the link needs to be updated due to
2770 * reset of PHY.
2771 *
2772 * @param pDevIns The device instance.
2773 * @thread any
2774 */
2775void e1kPhyLinkResetCallback(PPDMDEVINS pDevIns)
2776{
2777 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
2778
2779 /* Make sure we have cable connected and MAC can talk to PHY */
2780 if (pThis->fCableConnected && (CTRL & CTRL_SLU))
2781 e1kArmTimer(pDevIns, pThis, pThis->hLUTimer, E1K_INIT_LINKUP_DELAY_US);
2782}
2783
2784/**
2785 * Write handler for Device Control register.
2786 *
2787 * Handles reset.
2788 *
2789 * @param pThis The device state structure.
2790 * @param offset Register offset in memory-mapped frame.
2791 * @param index Register index in register array.
2792 * @param value The value to store.
2793 * @param mask Used to implement partial writes (8 and 16-bit).
2794 * @thread EMT
2795 */
2796static int e1kRegWriteCTRL(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2797{
2798 int rc = VINF_SUCCESS;
2799
2800 if (value & CTRL_RESET)
2801 { /* RST */
2802#ifndef IN_RING3
2803 return VINF_IOM_R3_MMIO_WRITE;
2804#else
2805 e1kR3HardReset(pDevIns, pThis, PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC));
2806#endif
2807 }
2808 else
2809 {
2810#ifdef E1K_LSC_ON_SLU
2811 /*
2812 * When the guest changes 'Set Link Up' bit from 0 to 1 we check if
2813 * the link is down and the cable is connected, and if they are we
2814 * bring the link up, see @bugref{8624}.
2815 */
2816 if ( (value & CTRL_SLU)
2817 && !(CTRL & CTRL_SLU)
2818 && pThis->fCableConnected
2819 && !(STATUS & STATUS_LU))
2820 {
2821 /* It should take about 2 seconds for the link to come up */
2822 e1kArmTimer(pDevIns, pThis, pThis->hLUTimer, E1K_INIT_LINKUP_DELAY_US);
2823 }
2824#else /* !E1K_LSC_ON_SLU */
2825 if ( (value & CTRL_SLU)
2826 && !(CTRL & CTRL_SLU)
2827 && pThis->fCableConnected
2828 && !PDMDevHlpTimerIsActive(pDevIns, pThis->hLUTimer))
2829 {
2830 /* PXE does not use LSC interrupts, see @bugref{9113}. */
2831 STATUS |= STATUS_LU;
2832 }
2833#endif /* !E1K_LSC_ON_SLU */
2834 if ((value & CTRL_VME) != (CTRL & CTRL_VME))
2835 {
2836 E1kLog(("%s VLAN Mode %s\n", pThis->szPrf, (value & CTRL_VME) ? "Enabled" : "Disabled"));
2837 }
2838 Log7(("%s e1kRegWriteCTRL: mdio dir=%s mdc dir=%s mdc=%s mdio=%d\n",
2839 pThis->szPrf, (value & CTRL_MDIO_DIR)?"OUT":"IN ",
2840 (value & CTRL_MDC_DIR)?"OUT":"IN ", (value & CTRL_MDC)?"HIGH":"LOW ", !!(value & CTRL_MDIO)));
2841 if (value & CTRL_MDC)
2842 {
2843 if (value & CTRL_MDIO_DIR)
2844 {
2845 Log7(("%s e1kRegWriteCTRL: Phy::writeMDIO(%d)\n", pThis->szPrf, !!(value & CTRL_MDIO)));
2846 /* MDIO direction pin is set to output and MDC is high, write MDIO pin value to PHY */
2847 Phy::writeMDIO(&pThis->phy, !!(value & CTRL_MDIO), pDevIns);
2848 }
2849 else
2850 {
2851 if (Phy::readMDIO(&pThis->phy))
2852 value |= CTRL_MDIO;
2853 else
2854 value &= ~CTRL_MDIO;
2855 Log7(("%s e1kRegWriteCTRL: Phy::readMDIO(%d)\n", pThis->szPrf, !!(value & CTRL_MDIO)));
2856 }
2857 }
2858 rc = e1kRegWriteDefault(pDevIns, pThis, offset, index, value);
2859 }
2860
2861 return rc;
2862}
2863
2864/**
2865 * Write handler for EEPROM/Flash Control/Data register.
2866 *
2867 * Handles EEPROM access requests; forwards writes to EEPROM device if access has been granted.
2868 *
2869 * @param pThis The device state structure.
2870 * @param offset Register offset in memory-mapped frame.
2871 * @param index Register index in register array.
2872 * @param value The value to store.
2873 * @param mask Used to implement partial writes (8 and 16-bit).
2874 * @thread EMT
2875 */
2876static int e1kRegWriteEECD(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2877{
2878 RT_NOREF(pDevIns, offset, index);
2879#ifdef IN_RING3
2880 /* So far we are concerned with lower byte only */
2881 if ((EECD & EECD_EE_GNT) || pThis->eChip == E1K_CHIP_82543GC)
2882 {
2883 /* Access to EEPROM granted -- forward 4-wire bits to EEPROM device */
2884 /* Note: 82543GC does not need to request EEPROM access */
2885 STAM_PROFILE_ADV_START(&pThis->StatEEPROMWrite, a);
2886 PE1KSTATECC pThisCC = PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC);
2887 pThisCC->eeprom.write(value & EECD_EE_WIRES);
2888 STAM_PROFILE_ADV_STOP(&pThis->StatEEPROMWrite, a);
2889 }
2890 if (value & EECD_EE_REQ)
2891 EECD |= EECD_EE_REQ|EECD_EE_GNT;
2892 else
2893 EECD &= ~EECD_EE_GNT;
2894 //e1kRegWriteDefault(pThis, offset, index, value );
2895
2896 return VINF_SUCCESS;
2897#else /* !IN_RING3 */
2898 RT_NOREF(pThis, value);
2899 return VINF_IOM_R3_MMIO_WRITE;
2900#endif /* !IN_RING3 */
2901}
2902
2903/**
2904 * Read handler for EEPROM/Flash Control/Data register.
2905 *
2906 * Lower 4 bits come from EEPROM device if EEPROM access has been granted.
2907 *
2908 * @returns VBox status code.
2909 *
2910 * @param pThis The device state structure.
2911 * @param offset Register offset in memory-mapped frame.
2912 * @param index Register index in register array.
2913 * @param mask Used to implement partial reads (8 and 16-bit).
2914 * @thread EMT
2915 */
2916static int e1kRegReadEECD(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
2917{
2918#ifdef IN_RING3
2919 uint32_t value;
2920 int rc = e1kRegReadDefault(pDevIns, pThis, offset, index, &value);
2921 if (RT_SUCCESS(rc))
2922 {
2923 if ((value & EECD_EE_GNT) || pThis->eChip == E1K_CHIP_82543GC)
2924 {
2925 /* Note: 82543GC does not need to request EEPROM access */
2926 /* Access to EEPROM granted -- get 4-wire bits to EEPROM device */
2927 STAM_PROFILE_ADV_START(&pThis->StatEEPROMRead, a);
2928 PE1KSTATECC pThisCC = PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC);
2929 value |= pThisCC->eeprom.read();
2930 STAM_PROFILE_ADV_STOP(&pThis->StatEEPROMRead, a);
2931 }
2932 *pu32Value = value;
2933 }
2934
2935 return rc;
2936#else /* !IN_RING3 */
2937 RT_NOREF_PV(pDevIns); RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index); RT_NOREF_PV(pu32Value);
2938 return VINF_IOM_R3_MMIO_READ;
2939#endif /* !IN_RING3 */
2940}
2941
2942/**
2943 * Write handler for EEPROM Read register.
2944 *
2945 * Handles EEPROM word access requests, reads EEPROM and stores the result
2946 * into DATA field.
2947 *
2948 * @param pThis The device state structure.
2949 * @param offset Register offset in memory-mapped frame.
2950 * @param index Register index in register array.
2951 * @param value The value to store.
2952 * @param mask Used to implement partial writes (8 and 16-bit).
2953 * @thread EMT
2954 */
2955static int e1kRegWriteEERD(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2956{
2957#ifdef IN_RING3
2958 /* Make use of 'writable' and 'readable' masks. */
2959 e1kRegWriteDefault(pDevIns, pThis, offset, index, value);
2960 /* DONE and DATA are set only if read was triggered by START. */
2961 if (value & EERD_START)
2962 {
2963 STAM_PROFILE_ADV_START(&pThis->StatEEPROMRead, a);
2964 uint16_t tmp;
2965 PE1KSTATECC pThisCC = PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC);
2966 if (pThisCC->eeprom.readWord(GET_BITS_V(value, EERD, ADDR), &tmp))
2967 SET_BITS(EERD, DATA, tmp);
2968 EERD |= EERD_DONE;
2969 STAM_PROFILE_ADV_STOP(&pThis->StatEEPROMRead, a);
2970 }
2971
2972 return VINF_SUCCESS;
2973#else /* !IN_RING3 */
2974 RT_NOREF_PV(pDevIns); RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index); RT_NOREF_PV(value);
2975 return VINF_IOM_R3_MMIO_WRITE;
2976#endif /* !IN_RING3 */
2977}
2978
2979
2980/**
2981 * Write handler for MDI Control register.
2982 *
2983 * Handles PHY read/write requests; forwards requests to internal PHY device.
2984 *
2985 * @param pThis The device state structure.
2986 * @param offset Register offset in memory-mapped frame.
2987 * @param index Register index in register array.
2988 * @param value The value to store.
2989 * @param mask Used to implement partial writes (8 and 16-bit).
2990 * @thread EMT
2991 */
2992static int e1kRegWriteMDIC(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2993{
2994 if (value & MDIC_INT_EN)
2995 {
2996 E1kLog(("%s ERROR! Interrupt at the end of an MDI cycle is not supported yet.\n",
2997 pThis->szPrf));
2998 }
2999 else if (value & MDIC_READY)
3000 {
3001 E1kLog(("%s ERROR! Ready bit is not reset by software during write operation.\n",
3002 pThis->szPrf));
3003 }
3004 else if (GET_BITS_V(value, MDIC, PHY) != 1)
3005 {
3006 E1kLog(("%s WARNING! Access to invalid PHY detected, phy=%d.\n",
3007 pThis->szPrf, GET_BITS_V(value, MDIC, PHY)));
3008 /*
3009 * Some drivers scan the MDIO bus for a PHY. We can work with these
3010 * drivers if we set MDIC_READY and MDIC_ERROR when there isn't a PHY
3011 * at the requested address, see @bugref{7346}.
3012 */
3013 MDIC = MDIC_READY | MDIC_ERROR;
3014 }
3015 else
3016 {
3017 /* Store the value */
3018 e1kRegWriteDefault(pDevIns, pThis, offset, index, value);
3019 STAM_COUNTER_INC(&pThis->StatPHYAccesses);
3020 /* Forward op to PHY */
3021 if (value & MDIC_OP_READ)
3022 SET_BITS(MDIC, DATA, Phy::readRegister(&pThis->phy, GET_BITS_V(value, MDIC, REG), pDevIns));
3023 else
3024 Phy::writeRegister(&pThis->phy, GET_BITS_V(value, MDIC, REG), value & MDIC_DATA_MASK, pDevIns);
3025 /* Let software know that we are done */
3026 MDIC |= MDIC_READY;
3027 }
3028
3029 return VINF_SUCCESS;
3030}
3031
3032/**
3033 * Write handler for Interrupt Cause Read register.
3034 *
3035 * Bits corresponding to 1s in 'value' will be cleared in ICR register.
3036 *
3037 * @param pThis The device state structure.
3038 * @param offset Register offset in memory-mapped frame.
3039 * @param index Register index in register array.
3040 * @param value The value to store.
3041 * @param mask Used to implement partial writes (8 and 16-bit).
3042 * @thread EMT
3043 */
3044static int e1kRegWriteICR(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3045{
3046 ICR &= ~value;
3047
3048 RT_NOREF_PV(pDevIns); RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index);
3049 return VINF_SUCCESS;
3050}
3051
3052/**
3053 * Read handler for Interrupt Cause Read register.
3054 *
3055 * Reading this register acknowledges all interrupts.
3056 *
3057 * @returns VBox status code.
3058 *
3059 * @param pThis The device state structure.
3060 * @param offset Register offset in memory-mapped frame.
3061 * @param index Register index in register array.
3062 * @param mask Not used.
3063 * @thread EMT
3064 */
3065static int e1kRegReadICR(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
3066{
3067 int rc = e1kCsEnter(pThis, VINF_IOM_R3_MMIO_READ);
3068 if (RT_UNLIKELY(rc != VINF_SUCCESS))
3069 return rc;
3070
3071 uint32_t value = 0;
3072 rc = e1kRegReadDefault(pDevIns, pThis, offset, index, &value);
3073 if (RT_SUCCESS(rc))
3074 {
3075 if (value)
3076 {
3077 if (!pThis->fIntRaised)
3078 E1K_INC_ISTAT_CNT(pThis->uStatNoIntICR);
3079 /*
3080 * Not clearing ICR causes QNX to hang as it reads ICR in a loop
3081 * with disabled interrupts.
3082 */
3083 //if (IMS)
3084 if (1)
3085 {
3086 /*
3087 * Interrupts were enabled -- we are supposedly at the very
3088 * beginning of interrupt handler
3089 */
3090 E1kLogRel(("E1000: irq lowered, icr=0x%x\n", ICR));
3091 E1kLog(("%s e1kRegReadICR: Lowered IRQ (%08x)\n", pThis->szPrf, ICR));
3092 /* Clear all pending interrupts */
3093 ICR = 0;
3094 pThis->fIntRaised = false;
3095 /* Lower(0) INTA(0) */
3096 PDMDevHlpPCISetIrq(pDevIns, 0, 0);
3097
3098 pThis->u64AckedAt = PDMDevHlpTimerGet(pDevIns, pThis->hIntTimer);
3099 if (pThis->fIntMaskUsed)
3100 pThis->fDelayInts = true;
3101 }
3102 else
3103 {
3104 /*
3105 * Interrupts are disabled -- in windows guests ICR read is done
3106 * just before re-enabling interrupts
3107 */
3108 E1kLog(("%s e1kRegReadICR: Suppressing auto-clear due to disabled interrupts (%08x)\n", pThis->szPrf, ICR));
3109 }
3110 }
3111 *pu32Value = value;
3112 }
3113 e1kCsLeave(pThis);
3114
3115 return rc;
3116}
3117
3118/**
3119 * Write handler for Interrupt Cause Set register.
3120 *
3121 * Bits corresponding to 1s in 'value' will be set in ICR register.
3122 *
3123 * @param pThis The device state structure.
3124 * @param offset Register offset in memory-mapped frame.
3125 * @param index Register index in register array.
3126 * @param value The value to store.
3127 * @param mask Used to implement partial writes (8 and 16-bit).
3128 * @thread EMT
3129 */
3130static int e1kRegWriteICS(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3131{
3132 RT_NOREF_PV(offset); RT_NOREF_PV(index);
3133 E1K_INC_ISTAT_CNT(pThis->uStatIntICS);
3134 return e1kRaiseInterrupt(pDevIns, pThis, VINF_IOM_R3_MMIO_WRITE, value & g_aE1kRegMap[ICS_IDX].writable);
3135}
3136
3137/**
3138 * Write handler for Interrupt Mask Set register.
3139 *
3140 * Will trigger pending interrupts.
3141 *
3142 * @param pThis The device state structure.
3143 * @param offset Register offset in memory-mapped frame.
3144 * @param index Register index in register array.
3145 * @param value The value to store.
3146 * @param mask Used to implement partial writes (8 and 16-bit).
3147 * @thread EMT
3148 */
3149static int e1kRegWriteIMS(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3150{
3151 RT_NOREF_PV(offset); RT_NOREF_PV(index);
3152
3153 IMS |= value;
3154 E1kLogRel(("E1000: irq enabled, RDH=%x RDT=%x TDH=%x TDT=%x\n", RDH, RDT, TDH, TDT));
3155 E1kLog(("%s e1kRegWriteIMS: IRQ enabled\n", pThis->szPrf));
3156 /*
3157 * We cannot raise an interrupt here as it will occasionally cause an interrupt storm
3158 * in Windows guests (see @bugref{8624}, @bugref{5023}).
3159 */
3160 if ((ICR & IMS) && !pThis->fLocked)
3161 {
3162 E1K_INC_ISTAT_CNT(pThis->uStatIntIMS);
3163 e1kPostponeInterrupt(pDevIns, pThis, E1K_IMS_INT_DELAY_NS);
3164 }
3165
3166 return VINF_SUCCESS;
3167}
3168
3169/**
3170 * Write handler for Interrupt Mask Clear register.
3171 *
3172 * Bits corresponding to 1s in 'value' will be cleared in IMS register.
3173 *
3174 * @param pThis The device state structure.
3175 * @param offset Register offset in memory-mapped frame.
3176 * @param index Register index in register array.
3177 * @param value The value to store.
3178 * @param mask Used to implement partial writes (8 and 16-bit).
3179 * @thread EMT
3180 */
3181static int e1kRegWriteIMC(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3182{
3183 RT_NOREF_PV(offset); RT_NOREF_PV(index);
3184
3185 int rc = e1kCsEnter(pThis, VINF_IOM_R3_MMIO_WRITE);
3186 if (RT_UNLIKELY(rc != VINF_SUCCESS))
3187 return rc;
3188 if (pThis->fIntRaised)
3189 {
3190 /*
3191 * Technically we should reset fIntRaised in ICR read handler, but it will cause
3192 * Windows to freeze since it may receive an interrupt while still in the very beginning
3193 * of interrupt handler.
3194 */
3195 E1K_INC_ISTAT_CNT(pThis->uStatIntLower);
3196 STAM_COUNTER_INC(&pThis->StatIntsPrevented);
3197 E1kLogRel(("E1000: irq lowered (IMC), icr=0x%x\n", ICR));
3198 /* Lower(0) INTA(0) */
3199 PDMDevHlpPCISetIrq(pDevIns, 0, 0);
3200 pThis->fIntRaised = false;
3201 E1kLog(("%s e1kRegWriteIMC: Lowered IRQ: ICR=%08x\n", pThis->szPrf, ICR));
3202 }
3203 IMS &= ~value;
3204 E1kLog(("%s e1kRegWriteIMC: IRQ disabled\n", pThis->szPrf));
3205 e1kCsLeave(pThis);
3206
3207 return VINF_SUCCESS;
3208}
3209
3210/**
3211 * Write handler for Receive Control register.
3212 *
3213 * @param pThis The device state structure.
3214 * @param offset Register offset in memory-mapped frame.
3215 * @param index Register index in register array.
3216 * @param value The value to store.
3217 * @param mask Used to implement partial writes (8 and 16-bit).
3218 * @thread EMT
3219 */
3220static int e1kRegWriteRCTL(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3221{
3222 /* Update promiscuous mode */
3223 bool fBecomePromiscous = !!(value & (RCTL_UPE | RCTL_MPE));
3224 if (fBecomePromiscous != !!( RCTL & (RCTL_UPE | RCTL_MPE)))
3225 {
3226 /* Promiscuity has changed, pass the knowledge on. */
3227#ifndef IN_RING3
3228 return VINF_IOM_R3_MMIO_WRITE;
3229#else
3230 PE1KSTATECC pThisCC = PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC);
3231 if (pThisCC->pDrvR3)
3232 pThisCC->pDrvR3->pfnSetPromiscuousMode(pThisCC->pDrvR3, fBecomePromiscous);
3233#endif
3234 }
3235
3236 /* Adjust receive buffer size */
3237 unsigned cbRxBuf = 2048 >> GET_BITS_V(value, RCTL, BSIZE);
3238 if (value & RCTL_BSEX)
3239 cbRxBuf *= 16;
3240 if (cbRxBuf > E1K_MAX_RX_PKT_SIZE)
3241 cbRxBuf = E1K_MAX_RX_PKT_SIZE;
3242 if (cbRxBuf != pThis->u16RxBSize)
3243 E1kLog2(("%s e1kRegWriteRCTL: Setting receive buffer size to %d (old %d)\n",
3244 pThis->szPrf, cbRxBuf, pThis->u16RxBSize));
3245 pThis->u16RxBSize = cbRxBuf;
3246
3247 /* Update the register */
3248 return e1kRegWriteDefault(pDevIns, pThis, offset, index, value);
3249}
3250
3251/**
3252 * Write handler for Packet Buffer Allocation register.
3253 *
3254 * TXA = 64 - RXA.
3255 *
3256 * @param pThis The device state structure.
3257 * @param offset Register offset in memory-mapped frame.
3258 * @param index Register index in register array.
3259 * @param value The value to store.
3260 * @param mask Used to implement partial writes (8 and 16-bit).
3261 * @thread EMT
3262 */
3263static int e1kRegWritePBA(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3264{
3265 e1kRegWriteDefault(pDevIns, pThis, offset, index, value);
3266 PBA_st->txa = 64 - PBA_st->rxa;
3267
3268 return VINF_SUCCESS;
3269}
3270
3271/**
3272 * Write handler for Receive Descriptor Tail register.
3273 *
3274 * @remarks Write into RDT forces switch to HC and signal to
3275 * e1kR3NetworkDown_WaitReceiveAvail().
3276 *
3277 * @returns VBox status code.
3278 *
3279 * @param pThis The device state structure.
3280 * @param offset Register offset in memory-mapped frame.
3281 * @param index Register index in register array.
3282 * @param value The value to store.
3283 * @param mask Used to implement partial writes (8 and 16-bit).
3284 * @thread EMT
3285 */
3286static int e1kRegWriteRDT(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3287{
3288#ifndef IN_RING3
3289 /* XXX */
3290// return VINF_IOM_R3_MMIO_WRITE;
3291#endif
3292 int rc = e1kCsRxEnter(pThis, VINF_IOM_R3_MMIO_WRITE);
3293 if (RT_LIKELY(rc == VINF_SUCCESS))
3294 {
3295 E1kLog(("%s e1kRegWriteRDT\n", pThis->szPrf));
3296#ifndef E1K_WITH_RXD_CACHE
3297 /*
3298 * Some drivers advance RDT too far, so that it equals RDH. This
3299 * somehow manages to work with real hardware but not with this
3300 * emulated device. We can work with these drivers if we just
3301 * write 1 less when we see a driver writing RDT equal to RDH,
3302 * see @bugref{7346}.
3303 */
3304 if (value == RDH)
3305 {
3306 if (RDH == 0)
3307 value = (RDLEN / sizeof(E1KRXDESC)) - 1;
3308 else
3309 value = RDH - 1;
3310 }
3311#endif /* !E1K_WITH_RXD_CACHE */
3312 rc = e1kRegWriteDefault(pDevIns, pThis, offset, index, value);
3313#ifdef E1K_WITH_RXD_CACHE
3314 /*
3315 * We need to fetch descriptors now as RDT may go whole circle
3316 * before we attempt to store a received packet. For example,
3317 * Intel's DOS drivers use 2 (!) RX descriptors with the total ring
3318 * size being only 8 descriptors! Note that we fetch descriptors
3319 * only when the cache is empty to reduce the number of memory reads
3320 * in case of frequent RDT writes. Don't fetch anything when the
3321 * receiver is disabled either as RDH, RDT, RDLEN can be in some
3322 * messed up state.
3323 * Note that despite the cache may seem empty, meaning that there are
3324 * no more available descriptors in it, it may still be used by RX
3325 * thread which has not yet written the last descriptor back but has
3326 * temporarily released the RX lock in order to write the packet body
3327 * to descriptor's buffer. At this point we still going to do prefetch
3328 * but it won't actually fetch anything if there are no unused slots in
3329 * our "empty" cache (nRxDFetched==E1K_RXD_CACHE_SIZE). We must not
3330 * reset the cache here even if it appears empty. It will be reset at
3331 * a later point in e1kRxDGet().
3332 */
3333 if (e1kRxDIsCacheEmpty(pThis) && (RCTL & RCTL_EN))
3334 e1kRxDPrefetch(pDevIns, pThis);
3335#endif /* E1K_WITH_RXD_CACHE */
3336 e1kCsRxLeave(pThis);
3337 if (RT_SUCCESS(rc))
3338 {
3339/** @todo bird: Use SUPSem* for this so we can signal it in ring-0 as well
3340 * without requiring any context switches. We should also check the
3341 * wait condition before bothering to queue the item as we're currently
3342 * queuing thousands of items per second here in a normal transmit
3343 * scenario. Expect performance changes when fixing this! */
3344#ifdef IN_RING3
3345 /* Signal that we have more receive descriptors available. */
3346 e1kWakeupReceive(pDevIns);
3347#else
3348 PDMDevHlpTaskTrigger(pDevIns, pThis->hCanRxTask);
3349#endif
3350 }
3351 }
3352 return rc;
3353}
3354
3355/**
3356 * Write handler for Receive Delay Timer register.
3357 *
3358 * @param pThis The device state structure.
3359 * @param offset Register offset in memory-mapped frame.
3360 * @param index Register index in register array.
3361 * @param value The value to store.
3362 * @param mask Used to implement partial writes (8 and 16-bit).
3363 * @thread EMT
3364 */
3365static int e1kRegWriteRDTR(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3366{
3367 e1kRegWriteDefault(pDevIns, pThis, offset, index, value);
3368 if (value & RDTR_FPD)
3369 {
3370 /* Flush requested, cancel both timers and raise interrupt */
3371#ifdef E1K_USE_RX_TIMERS
3372 e1kCancelTimer(pDevIns, pThis, pThis->hRIDTimer);
3373 e1kCancelTimer(pDevIns, pThis, pThis->hRADTimer);
3374#endif
3375 E1K_INC_ISTAT_CNT(pThis->uStatIntRDTR);
3376 return e1kRaiseInterrupt(pDevIns, pThis, VINF_IOM_R3_MMIO_WRITE, ICR_RXT0);
3377 }
3378
3379 return VINF_SUCCESS;
3380}
3381
3382DECLINLINE(uint32_t) e1kGetTxLen(PE1KSTATE pThis)
3383{
3384 /**
3385 * Make sure TDT won't change during computation. EMT may modify TDT at
3386 * any moment.
3387 */
3388 uint32_t tdt = TDT;
3389 return (TDH>tdt ? TDLEN/sizeof(E1KTXDESC) : 0) + tdt - TDH;
3390}
3391
3392#ifdef IN_RING3
3393
3394# ifdef E1K_TX_DELAY
3395/**
3396 * Transmit Delay Timer handler.
3397 *
3398 * @remarks We only get here when the timer expires.
3399 *
3400 * @param pDevIns Pointer to device instance structure.
3401 * @param pTimer Pointer to the timer.
3402 * @param pvUser NULL.
3403 * @thread EMT
3404 */
3405static DECLCALLBACK(void) e1kTxDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3406{
3407 PE1KSTATE pThis = (PE1KSTATE)pvUser;
3408 Assert(PDMCritSectIsOwner(&pThis->csTx));
3409
3410 E1K_INC_ISTAT_CNT(pThis->uStatTxDelayExp);
3411# ifdef E1K_INT_STATS
3412 uint64_t u64Elapsed = RTTimeNanoTS() - pThis->u64ArmedAt;
3413 if (u64Elapsed > pThis->uStatMaxTxDelay)
3414 pThis->uStatMaxTxDelay = u64Elapsed;
3415# endif
3416 int rc = e1kXmitPending(pDevIns, pThis, false /*fOnWorkerThread*/);
3417 AssertMsg(RT_SUCCESS(rc) || rc == VERR_TRY_AGAIN, ("%Rrc\n", rc));
3418}
3419# endif /* E1K_TX_DELAY */
3420
3421//# ifdef E1K_USE_TX_TIMERS
3422
3423/**
3424 * Transmit Interrupt Delay Timer handler.
3425 *
3426 * @remarks We only get here when the timer expires.
3427 *
3428 * @param pDevIns Pointer to device instance structure.
3429 * @param pTimer Pointer to the timer.
3430 * @param pvUser NULL.
3431 * @thread EMT
3432 */
3433static DECLCALLBACK(void) e1kTxIntDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3434{
3435 RT_NOREF(pDevIns);
3436 RT_NOREF(pTimer);
3437 PE1KSTATE pThis = (PE1KSTATE)pvUser;
3438
3439 E1K_INC_ISTAT_CNT(pThis->uStatTID);
3440 /* Cancel absolute delay timer as we have already got attention */
3441# ifndef E1K_NO_TAD
3442 e1kCancelTimer(pDevIns, pThis, pThis->hTADTimer);
3443# endif
3444 e1kRaiseInterrupt(pDevIns, pThis, ICR_TXDW);
3445}
3446
3447/**
3448 * Transmit Absolute Delay Timer handler.
3449 *
3450 * @remarks We only get here when the timer expires.
3451 *
3452 * @param pDevIns Pointer to device instance structure.
3453 * @param pTimer Pointer to the timer.
3454 * @param pvUser NULL.
3455 * @thread EMT
3456 */
3457static DECLCALLBACK(void) e1kTxAbsDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3458{
3459 RT_NOREF(pDevIns);
3460 RT_NOREF(pTimer);
3461 PE1KSTATE pThis = (PE1KSTATE)pvUser;
3462
3463 E1K_INC_ISTAT_CNT(pThis->uStatTAD);
3464 /* Cancel interrupt delay timer as we have already got attention */
3465 e1kCancelTimer(pDevIns, pThis, pThis->hTIDTimer);
3466 e1kRaiseInterrupt(pDevIns, pThis, ICR_TXDW);
3467}
3468
3469//# endif /* E1K_USE_TX_TIMERS */
3470# ifdef E1K_USE_RX_TIMERS
3471
3472/**
3473 * Receive Interrupt Delay Timer handler.
3474 *
3475 * @remarks We only get here when the timer expires.
3476 *
3477 * @param pDevIns Pointer to device instance structure.
3478 * @param pTimer Pointer to the timer.
3479 * @param pvUser NULL.
3480 * @thread EMT
3481 */
3482static DECLCALLBACK(void) e1kRxIntDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3483{
3484 PE1KSTATE pThis = (PE1KSTATE)pvUser;
3485
3486 E1K_INC_ISTAT_CNT(pThis->uStatRID);
3487 /* Cancel absolute delay timer as we have already got attention */
3488 e1kCancelTimer(pDevIns, pThis, pThis->hRADTimer);
3489 e1kRaiseInterrupt(pDevIns, pThis, ICR_RXT0);
3490}
3491
3492/**
3493 * Receive Absolute Delay Timer handler.
3494 *
3495 * @remarks We only get here when the timer expires.
3496 *
3497 * @param pDevIns Pointer to device instance structure.
3498 * @param pTimer Pointer to the timer.
3499 * @param pvUser NULL.
3500 * @thread EMT
3501 */
3502static DECLCALLBACK(void) e1kRxAbsDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3503{
3504 PE1KSTATE pThis = (PE1KSTATE)pvUser;
3505
3506 E1K_INC_ISTAT_CNT(pThis->uStatRAD);
3507 /* Cancel interrupt delay timer as we have already got attention */
3508 e1kCancelTimer(pDevIns, pThis, pThis->hRIDTimer);
3509 e1kRaiseInterrupt(pDevIns, pThis, ICR_RXT0);
3510}
3511
3512# endif /* E1K_USE_RX_TIMERS */
3513
3514/**
3515 * Late Interrupt Timer handler.
3516 *
3517 * @param pDevIns Pointer to device instance structure.
3518 * @param pTimer Pointer to the timer.
3519 * @param pvUser NULL.
3520 * @thread EMT
3521 */
3522static DECLCALLBACK(void) e1kLateIntTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3523{
3524 RT_NOREF(pDevIns, pTimer);
3525 PE1KSTATE pThis = (PE1KSTATE)pvUser;
3526
3527 STAM_PROFILE_ADV_START(&pThis->StatLateIntTimer, a);
3528 STAM_COUNTER_INC(&pThis->StatLateInts);
3529 E1K_INC_ISTAT_CNT(pThis->uStatIntLate);
3530# if 0
3531 if (pThis->iStatIntLost > -100)
3532 pThis->iStatIntLost--;
3533# endif
3534 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, 0);
3535 STAM_PROFILE_ADV_STOP(&pThis->StatLateIntTimer, a);
3536}
3537
3538/**
3539 * Link Up Timer handler.
3540 *
3541 * @param pDevIns Pointer to device instance structure.
3542 * @param pTimer Pointer to the timer.
3543 * @param pvUser NULL.
3544 * @thread EMT
3545 */
3546static DECLCALLBACK(void) e1kLinkUpTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3547{
3548 RT_NOREF(pTimer);
3549 PE1KSTATE pThis = (PE1KSTATE)pvUser;
3550 PE1KSTATECC pThisCC = PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC);
3551
3552 /*
3553 * This can happen if we set the link status to down when the Link up timer was
3554 * already armed (shortly after e1kLoadDone() or when the cable was disconnected
3555 * and connect+disconnect the cable very quick. Moreover, 82543GC triggers LSC
3556 * on reset even if the cable is unplugged (see @bugref{8942}).
3557 */
3558 if (pThis->fCableConnected)
3559 {
3560 /* 82543GC does not have an internal PHY */
3561 if (pThis->eChip == E1K_CHIP_82543GC || (CTRL & CTRL_SLU))
3562 e1kR3LinkUp(pDevIns, pThis, pThisCC);
3563 }
3564# ifdef E1K_LSC_ON_RESET
3565 else if (pThis->eChip == E1K_CHIP_82543GC)
3566 e1kR3LinkDown(pDevIns, pThis, pThisCC);
3567# endif /* E1K_LSC_ON_RESET */
3568}
3569
3570#endif /* IN_RING3 */
3571
3572/**
3573 * Sets up the GSO context according to the TSE new context descriptor.
3574 *
3575 * @param pGso The GSO context to setup.
3576 * @param pCtx The context descriptor.
3577 */
3578DECLINLINE(void) e1kSetupGsoCtx(PPDMNETWORKGSO pGso, E1KTXCTX const *pCtx)
3579{
3580 pGso->u8Type = PDMNETWORKGSOTYPE_INVALID;
3581
3582 /*
3583 * See if the context descriptor describes something that could be TCP or
3584 * UDP over IPv[46].
3585 */
3586 /* Check the header ordering and spacing: 1. Ethernet, 2. IP, 3. TCP/UDP. */
3587 if (RT_UNLIKELY( pCtx->ip.u8CSS < sizeof(RTNETETHERHDR) ))
3588 {
3589 E1kLog(("e1kSetupGsoCtx: IPCSS=%#x\n", pCtx->ip.u8CSS));
3590 return;
3591 }
3592 if (RT_UNLIKELY( pCtx->tu.u8CSS < (size_t)pCtx->ip.u8CSS + (pCtx->dw2.fIP ? RTNETIPV4_MIN_LEN : RTNETIPV6_MIN_LEN) ))
3593 {
3594 E1kLog(("e1kSetupGsoCtx: TUCSS=%#x\n", pCtx->tu.u8CSS));
3595 return;
3596 }
3597 if (RT_UNLIKELY( pCtx->dw2.fTCP
3598 ? pCtx->dw3.u8HDRLEN < (size_t)pCtx->tu.u8CSS + RTNETTCP_MIN_LEN
3599 : pCtx->dw3.u8HDRLEN != (size_t)pCtx->tu.u8CSS + RTNETUDP_MIN_LEN ))
3600 {
3601 E1kLog(("e1kSetupGsoCtx: HDRLEN=%#x TCP=%d\n", pCtx->dw3.u8HDRLEN, pCtx->dw2.fTCP));
3602 return;
3603 }
3604
3605 /* The end of the TCP/UDP checksum should stop at the end of the packet or at least after the headers. */
3606 if (RT_UNLIKELY( pCtx->tu.u16CSE > 0 && pCtx->tu.u16CSE <= pCtx->dw3.u8HDRLEN ))
3607 {
3608 E1kLog(("e1kSetupGsoCtx: TUCSE=%#x HDRLEN=%#x\n", pCtx->tu.u16CSE, pCtx->dw3.u8HDRLEN));
3609 return;
3610 }
3611
3612 /* IPv4 checksum offset. */
3613 if (RT_UNLIKELY( pCtx->dw2.fIP && (size_t)pCtx->ip.u8CSO - pCtx->ip.u8CSS != RT_UOFFSETOF(RTNETIPV4, ip_sum) ))
3614 {
3615 E1kLog(("e1kSetupGsoCtx: IPCSO=%#x IPCSS=%#x\n", pCtx->ip.u8CSO, pCtx->ip.u8CSS));
3616 return;
3617 }
3618
3619 /* TCP/UDP checksum offsets. */
3620 if (RT_UNLIKELY( (size_t)pCtx->tu.u8CSO - pCtx->tu.u8CSS
3621 != ( pCtx->dw2.fTCP
3622 ? RT_UOFFSETOF(RTNETTCP, th_sum)
3623 : RT_UOFFSETOF(RTNETUDP, uh_sum) ) ))
3624 {
3625 E1kLog(("e1kSetupGsoCtx: TUCSO=%#x TUCSS=%#x TCP=%d\n", pCtx->ip.u8CSO, pCtx->ip.u8CSS, pCtx->dw2.fTCP));
3626 return;
3627 }
3628
3629 /*
3630 * Because of internal networking using a 16-bit size field for GSO context
3631 * plus frame, we have to make sure we don't exceed this.
3632 */
3633 if (RT_UNLIKELY( pCtx->dw3.u8HDRLEN + pCtx->dw2.u20PAYLEN > VBOX_MAX_GSO_SIZE ))
3634 {
3635 E1kLog(("e1kSetupGsoCtx: HDRLEN(=%#x) + PAYLEN(=%#x) = %#x, max is %#x\n",
3636 pCtx->dw3.u8HDRLEN, pCtx->dw2.u20PAYLEN, pCtx->dw3.u8HDRLEN + pCtx->dw2.u20PAYLEN, VBOX_MAX_GSO_SIZE));
3637 return;
3638 }
3639
3640 /*
3641 * We're good for now - we'll do more checks when seeing the data.
3642 * So, figure the type of offloading and setup the context.
3643 */
3644 if (pCtx->dw2.fIP)
3645 {
3646 if (pCtx->dw2.fTCP)
3647 {
3648 pGso->u8Type = PDMNETWORKGSOTYPE_IPV4_TCP;
3649 pGso->cbHdrsSeg = pCtx->dw3.u8HDRLEN;
3650 }
3651 else
3652 {
3653 pGso->u8Type = PDMNETWORKGSOTYPE_IPV4_UDP;
3654 pGso->cbHdrsSeg = pCtx->tu.u8CSS; /* IP header only */
3655 }
3656 /** @todo Detect IPv4-IPv6 tunneling (need test setup since linux doesn't do
3657 * this yet it seems)... */
3658 }
3659 else
3660 {
3661 pGso->cbHdrsSeg = pCtx->dw3.u8HDRLEN; /** @todo IPv6 UFO */
3662 if (pCtx->dw2.fTCP)
3663 pGso->u8Type = PDMNETWORKGSOTYPE_IPV6_TCP;
3664 else
3665 pGso->u8Type = PDMNETWORKGSOTYPE_IPV6_UDP;
3666 }
3667 pGso->offHdr1 = pCtx->ip.u8CSS;
3668 pGso->offHdr2 = pCtx->tu.u8CSS;
3669 pGso->cbHdrsTotal = pCtx->dw3.u8HDRLEN;
3670 pGso->cbMaxSeg = pCtx->dw3.u16MSS;
3671 Assert(PDMNetGsoIsValid(pGso, sizeof(*pGso), pGso->cbMaxSeg * 5));
3672 E1kLog2(("e1kSetupGsoCtx: mss=%#x hdr=%#x hdrseg=%#x hdr1=%#x hdr2=%#x %s\n",
3673 pGso->cbMaxSeg, pGso->cbHdrsTotal, pGso->cbHdrsSeg, pGso->offHdr1, pGso->offHdr2, PDMNetGsoTypeName((PDMNETWORKGSOTYPE)pGso->u8Type) ));
3674}
3675
3676/**
3677 * Checks if we can use GSO processing for the current TSE frame.
3678 *
3679 * @param pThis The device state structure.
3680 * @param pGso The GSO context.
3681 * @param pData The first data descriptor of the frame.
3682 * @param pCtx The TSO context descriptor.
3683 */
3684DECLINLINE(bool) e1kCanDoGso(PE1KSTATE pThis, PCPDMNETWORKGSO pGso, E1KTXDAT const *pData, E1KTXCTX const *pCtx)
3685{
3686 if (!pData->cmd.fTSE)
3687 {
3688 E1kLog2(("e1kCanDoGso: !TSE\n"));
3689 return false;
3690 }
3691 if (pData->cmd.fVLE) /** @todo VLAN tagging. */
3692 {
3693 E1kLog(("e1kCanDoGso: VLE\n"));
3694 return false;
3695 }
3696 if (RT_UNLIKELY(!pThis->fGSOEnabled))
3697 {
3698 E1kLog3(("e1kCanDoGso: GSO disabled via CFGM\n"));
3699 return false;
3700 }
3701
3702 switch ((PDMNETWORKGSOTYPE)pGso->u8Type)
3703 {
3704 case PDMNETWORKGSOTYPE_IPV4_TCP:
3705 case PDMNETWORKGSOTYPE_IPV4_UDP:
3706 if (!pData->dw3.fIXSM)
3707 {
3708 E1kLog(("e1kCanDoGso: !IXSM (IPv4)\n"));
3709 return false;
3710 }
3711 if (!pData->dw3.fTXSM)
3712 {
3713 E1kLog(("e1kCanDoGso: !TXSM (IPv4)\n"));
3714 return false;
3715 }
3716 /** @todo what more check should we perform here? Ethernet frame type? */
3717 E1kLog2(("e1kCanDoGso: OK, IPv4\n"));
3718 return true;
3719
3720 case PDMNETWORKGSOTYPE_IPV6_TCP:
3721 case PDMNETWORKGSOTYPE_IPV6_UDP:
3722 if (pData->dw3.fIXSM && pCtx->ip.u8CSO)
3723 {
3724 E1kLog(("e1kCanDoGso: IXSM (IPv6)\n"));
3725 return false;
3726 }
3727 if (!pData->dw3.fTXSM)
3728 {
3729 E1kLog(("e1kCanDoGso: TXSM (IPv6)\n"));
3730 return false;
3731 }
3732 /** @todo what more check should we perform here? Ethernet frame type? */
3733 E1kLog2(("e1kCanDoGso: OK, IPv4\n"));
3734 return true;
3735
3736 default:
3737 Assert(pGso->u8Type == PDMNETWORKGSOTYPE_INVALID);
3738 E1kLog2(("e1kCanDoGso: e1kSetupGsoCtx failed\n"));
3739 return false;
3740 }
3741}
3742
3743/**
3744 * Frees the current xmit buffer.
3745 *
3746 * @param pThis The device state structure.
3747 */
3748static void e1kXmitFreeBuf(PE1KSTATE pThis, PE1KSTATECC pThisCC)
3749{
3750 PPDMSCATTERGATHER pSg = pThisCC->CTX_SUFF(pTxSg);
3751 if (pSg)
3752 {
3753 pThisCC->CTX_SUFF(pTxSg) = NULL;
3754
3755 if (pSg->pvAllocator != pThis)
3756 {
3757 PPDMINETWORKUP pDrv = pThisCC->CTX_SUFF(pDrv);
3758 if (pDrv)
3759 pDrv->pfnFreeBuf(pDrv, pSg);
3760 }
3761 else
3762 {
3763 /* loopback */
3764 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3765 Assert(pSg->fFlags == (PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3));
3766 pSg->fFlags = 0;
3767 pSg->pvAllocator = NULL;
3768 }
3769 }
3770}
3771
3772#ifndef E1K_WITH_TXD_CACHE
3773/**
3774 * Allocates an xmit buffer.
3775 *
3776 * @returns See PDMINETWORKUP::pfnAllocBuf.
3777 * @param pThis The device state structure.
3778 * @param cbMin The minimum frame size.
3779 * @param fExactSize Whether cbMin is exact or if we have to max it
3780 * out to the max MTU size.
3781 * @param fGso Whether this is a GSO frame or not.
3782 */
3783DECLINLINE(int) e1kXmitAllocBuf(PE1KSTATE pThis, PE1KSTATECC pThisCC, size_t cbMin, bool fExactSize, bool fGso)
3784{
3785 /* Adjust cbMin if necessary. */
3786 if (!fExactSize)
3787 cbMin = RT_MAX(cbMin, E1K_MAX_TX_PKT_SIZE);
3788
3789 /* Deal with existing buffer (descriptor screw up, reset, etc). */
3790 if (RT_UNLIKELY(pThisCC->CTX_SUFF(pTxSg)))
3791 e1kXmitFreeBuf(pThis, pThisCC);
3792 Assert(pThisCC->CTX_SUFF(pTxSg) == NULL);
3793
3794 /*
3795 * Allocate the buffer.
3796 */
3797 PPDMSCATTERGATHER pSg;
3798 if (RT_LIKELY(GET_BITS(RCTL, LBM) != RCTL_LBM_TCVR))
3799 {
3800 PPDMINETWORKUP pDrv = pThisCC->CTX_SUFF(pDrv);
3801 if (RT_UNLIKELY(!pDrv))
3802 return VERR_NET_DOWN;
3803 int rc = pDrv->pfnAllocBuf(pDrv, cbMin, fGso ? &pThis->GsoCtx : NULL, &pSg);
3804 if (RT_FAILURE(rc))
3805 {
3806 /* Suspend TX as we are out of buffers atm */
3807 STATUS |= STATUS_TXOFF;
3808 return rc;
3809 }
3810 }
3811 else
3812 {
3813 /* Create a loopback using the fallback buffer and preallocated SG. */
3814 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3815 pSg = &pThis->uTxFallback.Sg;
3816 pSg->fFlags = PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3;
3817 pSg->cbUsed = 0;
3818 pSg->cbAvailable = 0;
3819 pSg->pvAllocator = pThis;
3820 pSg->pvUser = NULL; /* No GSO here. */
3821 pSg->cSegs = 1;
3822 pSg->aSegs[0].pvSeg = pThis->aTxPacketFallback;
3823 pSg->aSegs[0].cbSeg = sizeof(pThis->aTxPacketFallback);
3824 }
3825
3826 pThisCC->CTX_SUFF(pTxSg) = pSg;
3827 return VINF_SUCCESS;
3828}
3829#else /* E1K_WITH_TXD_CACHE */
3830/**
3831 * Allocates an xmit buffer.
3832 *
3833 * @returns See PDMINETWORKUP::pfnAllocBuf.
3834 * @param pThis The device state structure.
3835 * @param cbMin The minimum frame size.
3836 * @param fExactSize Whether cbMin is exact or if we have to max it
3837 * out to the max MTU size.
3838 * @param fGso Whether this is a GSO frame or not.
3839 */
3840DECLINLINE(int) e1kXmitAllocBuf(PE1KSTATE pThis, PE1KSTATECC pThisCC, bool fGso)
3841{
3842 /* Deal with existing buffer (descriptor screw up, reset, etc). */
3843 if (RT_UNLIKELY(pThisCC->CTX_SUFF(pTxSg)))
3844 e1kXmitFreeBuf(pThis, pThisCC);
3845 Assert(pThisCC->CTX_SUFF(pTxSg) == NULL);
3846
3847 /*
3848 * Allocate the buffer.
3849 */
3850 PPDMSCATTERGATHER pSg;
3851 if (RT_LIKELY(GET_BITS(RCTL, LBM) != RCTL_LBM_TCVR))
3852 {
3853 if (pThis->cbTxAlloc == 0)
3854 {
3855 /* Zero packet, no need for the buffer */
3856 return VINF_SUCCESS;
3857 }
3858
3859 PPDMINETWORKUP pDrv = pThisCC->CTX_SUFF(pDrv);
3860 if (RT_UNLIKELY(!pDrv))
3861 return VERR_NET_DOWN;
3862 int rc = pDrv->pfnAllocBuf(pDrv, pThis->cbTxAlloc, fGso ? &pThis->GsoCtx : NULL, &pSg);
3863 if (RT_FAILURE(rc))
3864 {
3865 /* Suspend TX as we are out of buffers atm */
3866 STATUS |= STATUS_TXOFF;
3867 return rc;
3868 }
3869 E1kLog3(("%s Allocated buffer for TX packet: cb=%u %s%s\n",
3870 pThis->szPrf, pThis->cbTxAlloc,
3871 pThis->fVTag ? "VLAN " : "",
3872 pThis->fGSO ? "GSO " : ""));
3873 }
3874 else
3875 {
3876 /* Create a loopback using the fallback buffer and preallocated SG. */
3877 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3878 pSg = &pThis->uTxFallback.Sg;
3879 pSg->fFlags = PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3;
3880 pSg->cbUsed = 0;
3881 pSg->cbAvailable = sizeof(pThis->aTxPacketFallback);
3882 pSg->pvAllocator = pThis;
3883 pSg->pvUser = NULL; /* No GSO here. */
3884 pSg->cSegs = 1;
3885 pSg->aSegs[0].pvSeg = pThis->aTxPacketFallback;
3886 pSg->aSegs[0].cbSeg = sizeof(pThis->aTxPacketFallback);
3887 }
3888 pThis->cbTxAlloc = 0;
3889
3890 pThisCC->CTX_SUFF(pTxSg) = pSg;
3891 return VINF_SUCCESS;
3892}
3893#endif /* E1K_WITH_TXD_CACHE */
3894
3895/**
3896 * Checks if it's a GSO buffer or not.
3897 *
3898 * @returns true / false.
3899 * @param pTxSg The scatter / gather buffer.
3900 */
3901DECLINLINE(bool) e1kXmitIsGsoBuf(PDMSCATTERGATHER const *pTxSg)
3902{
3903#if 0
3904 if (!pTxSg)
3905 E1kLog(("e1kXmitIsGsoBuf: pTxSG is NULL\n"));
3906 if (pTxSg && pTxSg->pvUser)
3907 E1kLog(("e1kXmitIsGsoBuf: pvUser is NULL\n"));
3908#endif
3909 return pTxSg && pTxSg->pvUser /* GSO indicator */;
3910}
3911
3912#ifndef E1K_WITH_TXD_CACHE
3913/**
3914 * Load transmit descriptor from guest memory.
3915 *
3916 * @param pDevIns The device instance.
3917 * @param pDesc Pointer to descriptor union.
3918 * @param addr Physical address in guest context.
3919 * @thread E1000_TX
3920 */
3921DECLINLINE(void) e1kLoadDesc(PPDMDEVINS pDevIns, E1KTXDESC *pDesc, RTGCPHYS addr)
3922{
3923 PDMDevHlpPhysRead(pDevIns, addr, pDesc, sizeof(E1KTXDESC));
3924}
3925#else /* E1K_WITH_TXD_CACHE */
3926/**
3927 * Load transmit descriptors from guest memory.
3928 *
3929 * We need two physical reads in case the tail wrapped around the end of TX
3930 * descriptor ring.
3931 *
3932 * @returns the actual number of descriptors fetched.
3933 * @param pDevIns The device instance.
3934 * @param pThis The device state structure.
3935 * @thread E1000_TX
3936 */
3937DECLINLINE(unsigned) e1kTxDLoadMore(PPDMDEVINS pDevIns, PE1KSTATE pThis)
3938{
3939 Assert(pThis->iTxDCurrent == 0);
3940 /* We've already loaded pThis->nTxDFetched descriptors past TDH. */
3941 unsigned nDescsAvailable = e1kGetTxLen(pThis) - pThis->nTxDFetched;
3942 unsigned nDescsToFetch = RT_MIN(nDescsAvailable, E1K_TXD_CACHE_SIZE - pThis->nTxDFetched);
3943 unsigned nDescsTotal = TDLEN / sizeof(E1KTXDESC);
3944 unsigned nFirstNotLoaded = (TDH + pThis->nTxDFetched) % nDescsTotal;
3945 unsigned nDescsInSingleRead = RT_MIN(nDescsToFetch, nDescsTotal - nFirstNotLoaded);
3946 E1kLog3(("%s e1kTxDLoadMore: nDescsAvailable=%u nDescsToFetch=%u nDescsTotal=%u nFirstNotLoaded=0x%x nDescsInSingleRead=%u\n",
3947 pThis->szPrf, nDescsAvailable, nDescsToFetch, nDescsTotal,
3948 nFirstNotLoaded, nDescsInSingleRead));
3949 if (nDescsToFetch == 0)
3950 return 0;
3951 E1KTXDESC* pFirstEmptyDesc = &pThis->aTxDescriptors[pThis->nTxDFetched];
3952 PDMDevHlpPhysRead(pDevIns,
3953 ((uint64_t)TDBAH << 32) + TDBAL + nFirstNotLoaded * sizeof(E1KTXDESC),
3954 pFirstEmptyDesc, nDescsInSingleRead * sizeof(E1KTXDESC));
3955 E1kLog3(("%s Fetched %u TX descriptors at %08x%08x(0x%x), TDLEN=%08x, TDH=%08x, TDT=%08x\n",
3956 pThis->szPrf, nDescsInSingleRead,
3957 TDBAH, TDBAL + TDH * sizeof(E1KTXDESC),
3958 nFirstNotLoaded, TDLEN, TDH, TDT));
3959 if (nDescsToFetch > nDescsInSingleRead)
3960 {
3961 PDMDevHlpPhysRead(pDevIns,
3962 ((uint64_t)TDBAH << 32) + TDBAL,
3963 pFirstEmptyDesc + nDescsInSingleRead,
3964 (nDescsToFetch - nDescsInSingleRead) * sizeof(E1KTXDESC));
3965 E1kLog3(("%s Fetched %u TX descriptors at %08x%08x\n",
3966 pThis->szPrf, nDescsToFetch - nDescsInSingleRead,
3967 TDBAH, TDBAL));
3968 }
3969 pThis->nTxDFetched += nDescsToFetch;
3970 return nDescsToFetch;
3971}
3972
3973/**
3974 * Load transmit descriptors from guest memory only if there are no loaded
3975 * descriptors.
3976 *
3977 * @returns true if there are descriptors in cache.
3978 * @param pDevIns The device instance.
3979 * @param pThis The device state structure.
3980 * @thread E1000_TX
3981 */
3982DECLINLINE(bool) e1kTxDLazyLoad(PPDMDEVINS pDevIns, PE1KSTATE pThis)
3983{
3984 if (pThis->nTxDFetched == 0)
3985 return e1kTxDLoadMore(pDevIns, pThis) != 0;
3986 return true;
3987}
3988#endif /* E1K_WITH_TXD_CACHE */
3989
3990/**
3991 * Write back transmit descriptor to guest memory.
3992 *
3993 * @param pDevIns The device instance.
3994 * @param pThis The device state structure.
3995 * @param pDesc Pointer to descriptor union.
3996 * @param addr Physical address in guest context.
3997 * @thread E1000_TX
3998 */
3999DECLINLINE(void) e1kWriteBackDesc(PPDMDEVINS pDevIns, PE1KSTATE pThis, E1KTXDESC *pDesc, RTGCPHYS addr)
4000{
4001 /* Only the last half of the descriptor has to be written back. */
4002 e1kPrintTDesc(pThis, pDesc, "^^^");
4003 PDMDevHlpPCIPhysWrite(pDevIns, addr, pDesc, sizeof(E1KTXDESC));
4004}
4005
4006/**
4007 * Transmit complete frame.
4008 *
4009 * @remarks We skip the FCS since we're not responsible for sending anything to
4010 * a real ethernet wire.
4011 *
4012 * @param pDevIns The device instance.
4013 * @param pThis The device state structure.
4014 * @param pThisCC The current context instance data.
4015 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4016 * @thread E1000_TX
4017 */
4018static void e1kTransmitFrame(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KSTATECC pThisCC, bool fOnWorkerThread)
4019{
4020 PPDMSCATTERGATHER pSg = pThisCC->CTX_SUFF(pTxSg);
4021 uint32_t cbFrame = pSg ? (uint32_t)pSg->cbUsed : 0;
4022 Assert(!pSg || pSg->cSegs == 1);
4023
4024 if (cbFrame > 70) /* unqualified guess */
4025 pThis->led.Asserted.s.fWriting = pThis->led.Actual.s.fWriting = 1;
4026
4027#ifdef E1K_INT_STATS
4028 if (cbFrame <= 1514)
4029 E1K_INC_ISTAT_CNT(pThis->uStatTx1514);
4030 else if (cbFrame <= 2962)
4031 E1K_INC_ISTAT_CNT(pThis->uStatTx2962);
4032 else if (cbFrame <= 4410)
4033 E1K_INC_ISTAT_CNT(pThis->uStatTx4410);
4034 else if (cbFrame <= 5858)
4035 E1K_INC_ISTAT_CNT(pThis->uStatTx5858);
4036 else if (cbFrame <= 7306)
4037 E1K_INC_ISTAT_CNT(pThis->uStatTx7306);
4038 else if (cbFrame <= 8754)
4039 E1K_INC_ISTAT_CNT(pThis->uStatTx8754);
4040 else if (cbFrame <= 16384)
4041 E1K_INC_ISTAT_CNT(pThis->uStatTx16384);
4042 else if (cbFrame <= 32768)
4043 E1K_INC_ISTAT_CNT(pThis->uStatTx32768);
4044 else
4045 E1K_INC_ISTAT_CNT(pThis->uStatTxLarge);
4046#endif /* E1K_INT_STATS */
4047
4048 /* Add VLAN tag */
4049 if (cbFrame > 12 && pThis->fVTag)
4050 {
4051 E1kLog3(("%s Inserting VLAN tag %08x\n",
4052 pThis->szPrf, RT_BE2H_U16(VET) | (RT_BE2H_U16(pThis->u16VTagTCI) << 16)));
4053 memmove((uint8_t*)pSg->aSegs[0].pvSeg + 16, (uint8_t*)pSg->aSegs[0].pvSeg + 12, cbFrame - 12);
4054 *((uint32_t*)pSg->aSegs[0].pvSeg + 3) = RT_BE2H_U16(VET) | (RT_BE2H_U16(pThis->u16VTagTCI) << 16);
4055 pSg->cbUsed += 4;
4056 cbFrame += 4;
4057 Assert(pSg->cbUsed == cbFrame);
4058 Assert(pSg->cbUsed <= pSg->cbAvailable);
4059 }
4060/* E1kLog2(("%s < < < Outgoing packet. Dump follows: > > >\n"
4061 "%.*Rhxd\n"
4062 "%s < < < < < < < < < < < < < End of dump > > > > > > > > > > > >\n",
4063 pThis->szPrf, cbFrame, pSg->aSegs[0].pvSeg, pThis->szPrf));*/
4064
4065 /* Update the stats */
4066 E1K_INC_CNT32(TPT);
4067 E1K_ADD_CNT64(TOTL, TOTH, cbFrame);
4068 E1K_INC_CNT32(GPTC);
4069 if (pSg && e1kIsBroadcast(pSg->aSegs[0].pvSeg))
4070 E1K_INC_CNT32(BPTC);
4071 else if (pSg && e1kIsMulticast(pSg->aSegs[0].pvSeg))
4072 E1K_INC_CNT32(MPTC);
4073 /* Update octet transmit counter */
4074 E1K_ADD_CNT64(GOTCL, GOTCH, cbFrame);
4075 if (pThisCC->CTX_SUFF(pDrv))
4076 STAM_REL_COUNTER_ADD(&pThis->StatTransmitBytes, cbFrame);
4077 if (cbFrame == 64)
4078 E1K_INC_CNT32(PTC64);
4079 else if (cbFrame < 128)
4080 E1K_INC_CNT32(PTC127);
4081 else if (cbFrame < 256)
4082 E1K_INC_CNT32(PTC255);
4083 else if (cbFrame < 512)
4084 E1K_INC_CNT32(PTC511);
4085 else if (cbFrame < 1024)
4086 E1K_INC_CNT32(PTC1023);
4087 else
4088 E1K_INC_CNT32(PTC1522);
4089
4090 E1K_INC_ISTAT_CNT(pThis->uStatTxFrm);
4091
4092 /*
4093 * Dump and send the packet.
4094 */
4095 int rc = VERR_NET_DOWN;
4096 if (pSg && pSg->pvAllocator != pThis)
4097 {
4098 e1kPacketDump(pThis, (uint8_t const *)pSg->aSegs[0].pvSeg, cbFrame, "--> Outgoing");
4099
4100 pThisCC->CTX_SUFF(pTxSg) = NULL;
4101 PPDMINETWORKUP pDrv = pThisCC->CTX_SUFF(pDrv);
4102 if (pDrv)
4103 {
4104 /* Release critical section to avoid deadlock in CanReceive */
4105 //e1kCsLeave(pThis);
4106 STAM_PROFILE_START(&pThis->CTX_SUFF_Z(StatTransmitSend), a);
4107 rc = pDrv->pfnSendBuf(pDrv, pSg, fOnWorkerThread);
4108 STAM_PROFILE_STOP(&pThis->CTX_SUFF_Z(StatTransmitSend), a);
4109 //e1kCsEnter(pThis, RT_SRC_POS);
4110 }
4111 }
4112 else if (pSg)
4113 {
4114 Assert(pSg->aSegs[0].pvSeg == pThis->aTxPacketFallback);
4115 e1kPacketDump(pThis, (uint8_t const *)pSg->aSegs[0].pvSeg, cbFrame, "--> Loopback");
4116
4117 /** @todo do we actually need to check that we're in loopback mode here? */
4118 if (GET_BITS(RCTL, LBM) == RCTL_LBM_TCVR)
4119 {
4120 E1KRXDST status;
4121 RT_ZERO(status);
4122 status.fPIF = true;
4123 e1kHandleRxPacket(pDevIns, pThis, pSg->aSegs[0].pvSeg, cbFrame, status);
4124 rc = VINF_SUCCESS;
4125 }
4126 e1kXmitFreeBuf(pThis, pThisCC);
4127 }
4128 else
4129 rc = VERR_NET_DOWN;
4130 if (RT_FAILURE(rc))
4131 {
4132 E1kLogRel(("E1000: ERROR! pfnSend returned %Rrc\n", rc));
4133 /** @todo handle VERR_NET_DOWN and VERR_NET_NO_BUFFER_SPACE. Signal error ? */
4134 }
4135
4136 pThis->led.Actual.s.fWriting = 0;
4137}
4138
4139/**
4140 * Compute and write internet checksum (e1kCSum16) at the specified offset.
4141 *
4142 * @param pThis The device state structure.
4143 * @param pPkt Pointer to the packet.
4144 * @param u16PktLen Total length of the packet.
4145 * @param cso Offset in packet to write checksum at.
4146 * @param css Offset in packet to start computing
4147 * checksum from.
4148 * @param cse Offset in packet to stop computing
4149 * checksum at.
4150 * @thread E1000_TX
4151 */
4152static void e1kInsertChecksum(PE1KSTATE pThis, uint8_t *pPkt, uint16_t u16PktLen, uint8_t cso, uint8_t css, uint16_t cse)
4153{
4154 RT_NOREF1(pThis);
4155
4156 if (css >= u16PktLen)
4157 {
4158 E1kLog2(("%s css(%X) is greater than packet length-1(%X), checksum is not inserted\n",
4159 pThis->szPrf, cso, u16PktLen));
4160 return;
4161 }
4162
4163 if (cso >= u16PktLen - 1)
4164 {
4165 E1kLog2(("%s cso(%X) is greater than packet length-2(%X), checksum is not inserted\n",
4166 pThis->szPrf, cso, u16PktLen));
4167 return;
4168 }
4169
4170 if (cse == 0)
4171 cse = u16PktLen - 1;
4172 else if (cse < css)
4173 {
4174 E1kLog2(("%s css(%X) is greater than cse(%X), checksum is not inserted\n",
4175 pThis->szPrf, css, cse));
4176 return;
4177 }
4178
4179 uint16_t u16ChkSum = e1kCSum16(pPkt + css, cse - css + 1);
4180 E1kLog2(("%s Inserting csum: %04X at %02X, old value: %04X\n", pThis->szPrf,
4181 u16ChkSum, cso, *(uint16_t*)(pPkt + cso)));
4182 *(uint16_t*)(pPkt + cso) = u16ChkSum;
4183}
4184
4185/**
4186 * Add a part of descriptor's buffer to transmit frame.
4187 *
4188 * @remarks data.u64BufAddr is used unconditionally for both data
4189 * and legacy descriptors since it is identical to
4190 * legacy.u64BufAddr.
4191 *
4192 * @param pDevIns The device instance.
4193 * @param pThis The device state structure.
4194 * @param pDesc Pointer to the descriptor to transmit.
4195 * @param u16Len Length of buffer to the end of segment.
4196 * @param fSend Force packet sending.
4197 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4198 * @thread E1000_TX
4199 */
4200#ifndef E1K_WITH_TXD_CACHE
4201static void e1kFallbackAddSegment(PPDMDEVINS pDevIns, PE1KSTATE pThis, RTGCPHYS PhysAddr, uint16_t u16Len, bool fSend, bool fOnWorkerThread)
4202{
4203 PE1KSTATECC pThisCC = PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC);
4204 /* TCP header being transmitted */
4205 struct E1kTcpHeader *pTcpHdr = (struct E1kTcpHeader *)(pThis->aTxPacketFallback + pThis->contextTSE.tu.u8CSS);
4206 /* IP header being transmitted */
4207 struct E1kIpHeader *pIpHdr = (struct E1kIpHeader *)(pThis->aTxPacketFallback + pThis->contextTSE.ip.u8CSS);
4208
4209 E1kLog3(("%s e1kFallbackAddSegment: Length=%x, remaining payload=%x, header=%x, send=%RTbool\n",
4210 pThis->szPrf, u16Len, pThis->u32PayRemain, pThis->u16HdrRemain, fSend));
4211 Assert(pThis->u32PayRemain + pThis->u16HdrRemain > 0);
4212
4213 PDMDevHlpPhysRead(pDevIns, PhysAddr, pThis->aTxPacketFallback + pThis->u16TxPktLen, u16Len);
4214 E1kLog3(("%s Dump of the segment:\n"
4215 "%.*Rhxd\n"
4216 "%s --- End of dump ---\n",
4217 pThis->szPrf, u16Len, pThis->aTxPacketFallback + pThis->u16TxPktLen, pThis->szPrf));
4218 pThis->u16TxPktLen += u16Len;
4219 E1kLog3(("%s e1kFallbackAddSegment: pThis->u16TxPktLen=%x\n",
4220 pThis->szPrf, pThis->u16TxPktLen));
4221 if (pThis->u16HdrRemain > 0)
4222 {
4223 /* The header was not complete, check if it is now */
4224 if (u16Len >= pThis->u16HdrRemain)
4225 {
4226 /* The rest is payload */
4227 u16Len -= pThis->u16HdrRemain;
4228 pThis->u16HdrRemain = 0;
4229 /* Save partial checksum and flags */
4230 pThis->u32SavedCsum = pTcpHdr->chksum;
4231 pThis->u16SavedFlags = pTcpHdr->hdrlen_flags;
4232 /* Clear FIN and PSH flags now and set them only in the last segment */
4233 pTcpHdr->hdrlen_flags &= ~htons(E1K_TCP_FIN | E1K_TCP_PSH);
4234 }
4235 else
4236 {
4237 /* Still not */
4238 pThis->u16HdrRemain -= u16Len;
4239 E1kLog3(("%s e1kFallbackAddSegment: Header is still incomplete, 0x%x bytes remain.\n",
4240 pThis->szPrf, pThis->u16HdrRemain));
4241 return;
4242 }
4243 }
4244
4245 pThis->u32PayRemain -= u16Len;
4246
4247 if (fSend)
4248 {
4249 /* Leave ethernet header intact */
4250 /* IP Total Length = payload + headers - ethernet header */
4251 pIpHdr->total_len = htons(pThis->u16TxPktLen - pThis->contextTSE.ip.u8CSS);
4252 E1kLog3(("%s e1kFallbackAddSegment: End of packet, pIpHdr->total_len=%x\n",
4253 pThis->szPrf, ntohs(pIpHdr->total_len)));
4254 /* Update IP Checksum */
4255 pIpHdr->chksum = 0;
4256 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4257 pThis->contextTSE.ip.u8CSO,
4258 pThis->contextTSE.ip.u8CSS,
4259 pThis->contextTSE.ip.u16CSE);
4260
4261 /* Update TCP flags */
4262 /* Restore original FIN and PSH flags for the last segment */
4263 if (pThis->u32PayRemain == 0)
4264 {
4265 pTcpHdr->hdrlen_flags = pThis->u16SavedFlags;
4266 E1K_INC_CNT32(TSCTC);
4267 }
4268 /* Add TCP length to partial pseudo header sum */
4269 uint32_t csum = pThis->u32SavedCsum
4270 + htons(pThis->u16TxPktLen - pThis->contextTSE.tu.u8CSS);
4271 while (csum >> 16)
4272 csum = (csum >> 16) + (csum & 0xFFFF);
4273 pTcpHdr->chksum = csum;
4274 /* Compute final checksum */
4275 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4276 pThis->contextTSE.tu.u8CSO,
4277 pThis->contextTSE.tu.u8CSS,
4278 pThis->contextTSE.tu.u16CSE);
4279
4280 /*
4281 * Transmit it. If we've use the SG already, allocate a new one before
4282 * we copy of the data.
4283 */
4284 PPDMSCATTERGATHER pTxSg = pThisCC->CTX_SUFF(pTxSg);
4285 if (!pTxSg)
4286 {
4287 e1kXmitAllocBuf(pThis, pThisCC, pThis->u16TxPktLen + (pThis->fVTag ? 4 : 0), true /*fExactSize*/, false /*fGso*/);
4288 pTxSg = pThisCC->CTX_SUFF(pTxSg);
4289 }
4290 if (pTxSg)
4291 {
4292 Assert(pThis->u16TxPktLen <= pThisCC->CTX_SUFF(pTxSg)->cbAvailable);
4293 Assert(pTxSg->cSegs == 1);
4294 if (pThis->CCCTX_SUFF(pTxSg)->aSegs[0].pvSeg != pThis->aTxPacketFallback)
4295 memcpy(pTxSg->aSegs[0].pvSeg, pThis->aTxPacketFallback, pThis->u16TxPktLen);
4296 pTxSg->cbUsed = pThis->u16TxPktLen;
4297 pTxSg->aSegs[0].cbSeg = pThis->u16TxPktLen;
4298 }
4299 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
4300
4301 /* Update Sequence Number */
4302 pTcpHdr->seqno = htonl(ntohl(pTcpHdr->seqno) + pThis->u16TxPktLen
4303 - pThis->contextTSE.dw3.u8HDRLEN);
4304 /* Increment IP identification */
4305 pIpHdr->ident = htons(ntohs(pIpHdr->ident) + 1);
4306 }
4307}
4308#else /* E1K_WITH_TXD_CACHE */
4309static int e1kFallbackAddSegment(PPDMDEVINS pDevIns, PE1KSTATE pThis, RTGCPHYS PhysAddr, uint16_t u16Len, bool fSend, bool fOnWorkerThread)
4310{
4311 int rc = VINF_SUCCESS;
4312 PE1KSTATECC pThisCC = PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC);
4313 /* TCP header being transmitted */
4314 struct E1kTcpHeader *pTcpHdr = (struct E1kTcpHeader *)(pThis->aTxPacketFallback + pThis->contextTSE.tu.u8CSS);
4315 /* IP header being transmitted */
4316 struct E1kIpHeader *pIpHdr = (struct E1kIpHeader *)(pThis->aTxPacketFallback + pThis->contextTSE.ip.u8CSS);
4317
4318 E1kLog3(("%s e1kFallbackAddSegment: Length=%x, remaining payload=%x, header=%x, send=%RTbool\n",
4319 pThis->szPrf, u16Len, pThis->u32PayRemain, pThis->u16HdrRemain, fSend));
4320 AssertReturn(pThis->u32PayRemain + pThis->u16HdrRemain > 0, VINF_SUCCESS);
4321
4322 if (pThis->u16TxPktLen + u16Len <= sizeof(pThis->aTxPacketFallback))
4323 PDMDevHlpPhysRead(pDevIns, PhysAddr, pThis->aTxPacketFallback + pThis->u16TxPktLen, u16Len);
4324 else
4325 E1kLog(("%s e1kFallbackAddSegment: writing beyond aTxPacketFallback, u16TxPktLen=%d(0x%x) + u16Len=%d(0x%x) > %d\n",
4326 pThis->szPrf, pThis->u16TxPktLen, pThis->u16TxPktLen, u16Len, u16Len, sizeof(pThis->aTxPacketFallback)));
4327 E1kLog3(("%s Dump of the segment:\n"
4328 "%.*Rhxd\n"
4329 "%s --- End of dump ---\n",
4330 pThis->szPrf, u16Len, pThis->aTxPacketFallback + pThis->u16TxPktLen, pThis->szPrf));
4331 pThis->u16TxPktLen += u16Len;
4332 E1kLog3(("%s e1kFallbackAddSegment: pThis->u16TxPktLen=%x\n",
4333 pThis->szPrf, pThis->u16TxPktLen));
4334 if (pThis->u16HdrRemain > 0)
4335 {
4336 /* The header was not complete, check if it is now */
4337 if (u16Len >= pThis->u16HdrRemain)
4338 {
4339 /* The rest is payload */
4340 u16Len -= pThis->u16HdrRemain;
4341 pThis->u16HdrRemain = 0;
4342 /* Save partial checksum and flags */
4343 pThis->u32SavedCsum = pTcpHdr->chksum;
4344 pThis->u16SavedFlags = pTcpHdr->hdrlen_flags;
4345 /* Clear FIN and PSH flags now and set them only in the last segment */
4346 pTcpHdr->hdrlen_flags &= ~htons(E1K_TCP_FIN | E1K_TCP_PSH);
4347 }
4348 else
4349 {
4350 /* Still not */
4351 pThis->u16HdrRemain -= u16Len;
4352 E1kLog3(("%s e1kFallbackAddSegment: Header is still incomplete, 0x%x bytes remain.\n",
4353 pThis->szPrf, pThis->u16HdrRemain));
4354 return rc;
4355 }
4356 }
4357
4358 if (u16Len > pThis->u32PayRemain)
4359 pThis->u32PayRemain = 0;
4360 else
4361 pThis->u32PayRemain -= u16Len;
4362
4363 if (fSend)
4364 {
4365 /* Leave ethernet header intact */
4366 /* IP Total Length = payload + headers - ethernet header */
4367 pIpHdr->total_len = htons(pThis->u16TxPktLen - pThis->contextTSE.ip.u8CSS);
4368 E1kLog3(("%s e1kFallbackAddSegment: End of packet, pIpHdr->total_len=%x\n",
4369 pThis->szPrf, ntohs(pIpHdr->total_len)));
4370 /* Update IP Checksum */
4371 pIpHdr->chksum = 0;
4372 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4373 pThis->contextTSE.ip.u8CSO,
4374 pThis->contextTSE.ip.u8CSS,
4375 pThis->contextTSE.ip.u16CSE);
4376
4377 /* Update TCP flags */
4378 /* Restore original FIN and PSH flags for the last segment */
4379 if (pThis->u32PayRemain == 0)
4380 {
4381 pTcpHdr->hdrlen_flags = pThis->u16SavedFlags;
4382 E1K_INC_CNT32(TSCTC);
4383 }
4384 /* Add TCP length to partial pseudo header sum */
4385 uint32_t csum = pThis->u32SavedCsum
4386 + htons(pThis->u16TxPktLen - pThis->contextTSE.tu.u8CSS);
4387 while (csum >> 16)
4388 csum = (csum >> 16) + (csum & 0xFFFF);
4389 pTcpHdr->chksum = csum;
4390 /* Compute final checksum */
4391 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4392 pThis->contextTSE.tu.u8CSO,
4393 pThis->contextTSE.tu.u8CSS,
4394 pThis->contextTSE.tu.u16CSE);
4395
4396 /*
4397 * Transmit it.
4398 */
4399 PPDMSCATTERGATHER pTxSg = pThisCC->CTX_SUFF(pTxSg);
4400 if (pTxSg)
4401 {
4402 /* Make sure the packet fits into the allocated buffer */
4403 size_t cbCopy = RT_MIN(pThis->u16TxPktLen, pThisCC->CTX_SUFF(pTxSg)->cbAvailable);
4404#ifdef DEBUG
4405 if (pThis->u16TxPktLen > pTxSg->cbAvailable)
4406 E1kLog(("%s e1kFallbackAddSegment: truncating packet, u16TxPktLen=%d(0x%x) > cbAvailable=%d(0x%x)\n",
4407 pThis->szPrf, pThis->u16TxPktLen, pThis->u16TxPktLen, pTxSg->cbAvailable, pTxSg->cbAvailable));
4408#endif /* DEBUG */
4409 Assert(pTxSg->cSegs == 1);
4410 if (pTxSg->aSegs[0].pvSeg != pThis->aTxPacketFallback)
4411 memcpy(pTxSg->aSegs[0].pvSeg, pThis->aTxPacketFallback, cbCopy);
4412 pTxSg->cbUsed = cbCopy;
4413 pTxSg->aSegs[0].cbSeg = cbCopy;
4414 }
4415 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
4416
4417 /* Update Sequence Number */
4418 pTcpHdr->seqno = htonl(ntohl(pTcpHdr->seqno) + pThis->u16TxPktLen
4419 - pThis->contextTSE.dw3.u8HDRLEN);
4420 /* Increment IP identification */
4421 pIpHdr->ident = htons(ntohs(pIpHdr->ident) + 1);
4422
4423 /* Allocate new buffer for the next segment. */
4424 if (pThis->u32PayRemain)
4425 {
4426 pThis->cbTxAlloc = RT_MIN(pThis->u32PayRemain,
4427 pThis->contextTSE.dw3.u16MSS)
4428 + pThis->contextTSE.dw3.u8HDRLEN
4429 + (pThis->fVTag ? 4 : 0);
4430 rc = e1kXmitAllocBuf(pThis, pThisCC, false /* fGSO */);
4431 }
4432 }
4433
4434 return rc;
4435}
4436#endif /* E1K_WITH_TXD_CACHE */
4437
4438#ifndef E1K_WITH_TXD_CACHE
4439/**
4440 * TCP segmentation offloading fallback: Add descriptor's buffer to transmit
4441 * frame.
4442 *
4443 * We construct the frame in the fallback buffer first and the copy it to the SG
4444 * buffer before passing it down to the network driver code.
4445 *
4446 * @returns true if the frame should be transmitted, false if not.
4447 *
4448 * @param pThis The device state structure.
4449 * @param pDesc Pointer to the descriptor to transmit.
4450 * @param cbFragment Length of descriptor's buffer.
4451 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4452 * @thread E1000_TX
4453 */
4454static bool e1kFallbackAddToFrame(PE1KSTATE pThis, E1KTXDESC *pDesc, uint32_t cbFragment, bool fOnWorkerThread)
4455{
4456 PPDMSCATTERGATHER pTxSg = pThisCC->CTX_SUFF(pTxSg);
4457 Assert(e1kGetDescType(pDesc) == E1K_DTYP_DATA);
4458 Assert(pDesc->data.cmd.fTSE);
4459 Assert(!e1kXmitIsGsoBuf(pTxSg));
4460
4461 uint16_t u16MaxPktLen = pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw3.u16MSS;
4462 Assert(u16MaxPktLen != 0);
4463 Assert(u16MaxPktLen < E1K_MAX_TX_PKT_SIZE);
4464
4465 /*
4466 * Carve out segments.
4467 */
4468 do
4469 {
4470 /* Calculate how many bytes we have left in this TCP segment */
4471 uint32_t cb = u16MaxPktLen - pThis->u16TxPktLen;
4472 if (cb > cbFragment)
4473 {
4474 /* This descriptor fits completely into current segment */
4475 cb = cbFragment;
4476 e1kFallbackAddSegment(pDevIns, pThis, pDesc->data.u64BufAddr, cb, pDesc->data.cmd.fEOP /*fSend*/, fOnWorkerThread);
4477 }
4478 else
4479 {
4480 e1kFallbackAddSegment(pDevIns, pThis, pDesc->data.u64BufAddr, cb, true /*fSend*/, fOnWorkerThread);
4481 /*
4482 * Rewind the packet tail pointer to the beginning of payload,
4483 * so we continue writing right beyond the header.
4484 */
4485 pThis->u16TxPktLen = pThis->contextTSE.dw3.u8HDRLEN;
4486 }
4487
4488 pDesc->data.u64BufAddr += cb;
4489 cbFragment -= cb;
4490 } while (cbFragment > 0);
4491
4492 if (pDesc->data.cmd.fEOP)
4493 {
4494 /* End of packet, next segment will contain header. */
4495 if (pThis->u32PayRemain != 0)
4496 E1K_INC_CNT32(TSCTFC);
4497 pThis->u16TxPktLen = 0;
4498 e1kXmitFreeBuf(pThis, PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC));
4499 }
4500
4501 return false;
4502}
4503#else /* E1K_WITH_TXD_CACHE */
4504/**
4505 * TCP segmentation offloading fallback: Add descriptor's buffer to transmit
4506 * frame.
4507 *
4508 * We construct the frame in the fallback buffer first and the copy it to the SG
4509 * buffer before passing it down to the network driver code.
4510 *
4511 * @returns error code
4512 *
4513 * @param pDevIns The device instance.
4514 * @param pThis The device state structure.
4515 * @param pDesc Pointer to the descriptor to transmit.
4516 * @param cbFragment Length of descriptor's buffer.
4517 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4518 * @thread E1000_TX
4519 */
4520static int e1kFallbackAddToFrame(PPDMDEVINS pDevIns, PE1KSTATE pThis, E1KTXDESC *pDesc, bool fOnWorkerThread)
4521{
4522#ifdef VBOX_STRICT
4523 PPDMSCATTERGATHER pTxSg = PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC)->CTX_SUFF(pTxSg);
4524 Assert(e1kGetDescType(pDesc) == E1K_DTYP_DATA);
4525 Assert(pDesc->data.cmd.fTSE);
4526 Assert(!e1kXmitIsGsoBuf(pTxSg));
4527#endif
4528
4529 uint16_t u16MaxPktLen = pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw3.u16MSS;
4530 /* We cannot produce empty packets, ignore all TX descriptors (see @bugref{9571}) */
4531 if (u16MaxPktLen == 0)
4532 return VINF_SUCCESS;
4533
4534 /*
4535 * Carve out segments.
4536 */
4537 int rc = VINF_SUCCESS;
4538 do
4539 {
4540 /* Calculate how many bytes we have left in this TCP segment */
4541 uint32_t cb = u16MaxPktLen - pThis->u16TxPktLen;
4542 if (cb > pDesc->data.cmd.u20DTALEN)
4543 {
4544 /* This descriptor fits completely into current segment */
4545 cb = pDesc->data.cmd.u20DTALEN;
4546 rc = e1kFallbackAddSegment(pDevIns, pThis, pDesc->data.u64BufAddr, cb, pDesc->data.cmd.fEOP /*fSend*/, fOnWorkerThread);
4547 }
4548 else
4549 {
4550 rc = e1kFallbackAddSegment(pDevIns, pThis, pDesc->data.u64BufAddr, cb, true /*fSend*/, fOnWorkerThread);
4551 /*
4552 * Rewind the packet tail pointer to the beginning of payload,
4553 * so we continue writing right beyond the header.
4554 */
4555 pThis->u16TxPktLen = pThis->contextTSE.dw3.u8HDRLEN;
4556 }
4557
4558 pDesc->data.u64BufAddr += cb;
4559 pDesc->data.cmd.u20DTALEN -= cb;
4560 } while (pDesc->data.cmd.u20DTALEN > 0 && RT_SUCCESS(rc));
4561
4562 if (pDesc->data.cmd.fEOP)
4563 {
4564 /* End of packet, next segment will contain header. */
4565 if (pThis->u32PayRemain != 0)
4566 E1K_INC_CNT32(TSCTFC);
4567 pThis->u16TxPktLen = 0;
4568 e1kXmitFreeBuf(pThis, PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC));
4569 }
4570
4571 return VINF_SUCCESS; /// @todo consider rc;
4572}
4573#endif /* E1K_WITH_TXD_CACHE */
4574
4575
4576/**
4577 * Add descriptor's buffer to transmit frame.
4578 *
4579 * This deals with GSO and normal frames, e1kFallbackAddToFrame deals with the
4580 * TSE frames we cannot handle as GSO.
4581 *
4582 * @returns true on success, false on failure.
4583 *
4584 * @param pDevIns The device instance.
4585 * @param pThisCC The current context instance data.
4586 * @param pThis The device state structure.
4587 * @param PhysAddr The physical address of the descriptor buffer.
4588 * @param cbFragment Length of descriptor's buffer.
4589 * @thread E1000_TX
4590 */
4591static bool e1kAddToFrame(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KSTATECC pThisCC, RTGCPHYS PhysAddr, uint32_t cbFragment)
4592{
4593 PPDMSCATTERGATHER pTxSg = pThisCC->CTX_SUFF(pTxSg);
4594 bool const fGso = e1kXmitIsGsoBuf(pTxSg);
4595 uint32_t const cbNewPkt = cbFragment + pThis->u16TxPktLen;
4596
4597 LogFlow(("%s e1kAddToFrame: ENTER cbFragment=%d u16TxPktLen=%d cbUsed=%d cbAvailable=%d fGSO=%s\n",
4598 pThis->szPrf, cbFragment, pThis->u16TxPktLen, pTxSg->cbUsed, pTxSg->cbAvailable,
4599 fGso ? "true" : "false"));
4600 if (RT_UNLIKELY( !fGso && cbNewPkt > E1K_MAX_TX_PKT_SIZE ))
4601 {
4602 E1kLog(("%s Transmit packet is too large: %u > %u(max)\n", pThis->szPrf, cbNewPkt, E1K_MAX_TX_PKT_SIZE));
4603 return false;
4604 }
4605 if (RT_UNLIKELY( cbNewPkt > pTxSg->cbAvailable ))
4606 {
4607 E1kLog(("%s Transmit packet is too large: %u > %u(max)\n", pThis->szPrf, cbNewPkt, pTxSg->cbAvailable));
4608 return false;
4609 }
4610
4611 if (RT_LIKELY(pTxSg))
4612 {
4613 Assert(pTxSg->cSegs == 1);
4614 if (pTxSg->cbUsed != pThis->u16TxPktLen)
4615 E1kLog(("%s e1kAddToFrame: pTxSg->cbUsed=%d(0x%x) != u16TxPktLen=%d(0x%x)\n",
4616 pThis->szPrf, pTxSg->cbUsed, pTxSg->cbUsed, pThis->u16TxPktLen, pThis->u16TxPktLen));
4617
4618 PDMDevHlpPhysRead(pDevIns, PhysAddr, (uint8_t *)pTxSg->aSegs[0].pvSeg + pThis->u16TxPktLen, cbFragment);
4619
4620 pTxSg->cbUsed = cbNewPkt;
4621 }
4622 pThis->u16TxPktLen = cbNewPkt;
4623
4624 return true;
4625}
4626
4627
4628/**
4629 * Write the descriptor back to guest memory and notify the guest.
4630 *
4631 * @param pThis The device state structure.
4632 * @param pDesc Pointer to the descriptor have been transmitted.
4633 * @param addr Physical address of the descriptor in guest memory.
4634 * @thread E1000_TX
4635 */
4636static void e1kDescReport(PPDMDEVINS pDevIns, PE1KSTATE pThis, E1KTXDESC *pDesc, RTGCPHYS addr)
4637{
4638 /*
4639 * We fake descriptor write-back bursting. Descriptors are written back as they are
4640 * processed.
4641 */
4642 /* Let's pretend we process descriptors. Write back with DD set. */
4643 /*
4644 * Prior to r71586 we tried to accomodate the case when write-back bursts
4645 * are enabled without actually implementing bursting by writing back all
4646 * descriptors, even the ones that do not have RS set. This caused kernel
4647 * panics with Linux SMP kernels, as the e1000 driver tried to free up skb
4648 * associated with written back descriptor if it happened to be a context
4649 * descriptor since context descriptors do not have skb associated to them.
4650 * Starting from r71586 we write back only the descriptors with RS set,
4651 * which is a little bit different from what the real hardware does in
4652 * case there is a chain of data descritors where some of them have RS set
4653 * and others do not. It is very uncommon scenario imho.
4654 * We need to check RPS as well since some legacy drivers use it instead of
4655 * RS even with newer cards.
4656 */
4657 if (pDesc->legacy.cmd.fRS || pDesc->legacy.cmd.fRPS)
4658 {
4659 pDesc->legacy.dw3.fDD = 1; /* Descriptor Done */
4660 e1kWriteBackDesc(pDevIns, pThis, pDesc, addr);
4661 if (pDesc->legacy.cmd.fEOP)
4662 {
4663//#ifdef E1K_USE_TX_TIMERS
4664 if (pThis->fTidEnabled && pDesc->legacy.cmd.fIDE)
4665 {
4666 E1K_INC_ISTAT_CNT(pThis->uStatTxIDE);
4667 //if (pThis->fIntRaised)
4668 //{
4669 // /* Interrupt is already pending, no need for timers */
4670 // ICR |= ICR_TXDW;
4671 //}
4672 //else {
4673 /* Arm the timer to fire in TIVD usec (discard .024) */
4674 e1kArmTimer(pDevIns, pThis, pThis->hTIDTimer, TIDV);
4675# ifndef E1K_NO_TAD
4676 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
4677 E1kLog2(("%s Checking if TAD timer is running\n",
4678 pThis->szPrf));
4679 if (TADV != 0 && !PDMDevHlpTimerIsActive(pDevIns, pThis->hTADTimer))
4680 e1kArmTimer(pDevIns, pThis, pThis->hTADTimer, TADV);
4681# endif /* E1K_NO_TAD */
4682 }
4683 else
4684 {
4685 if (pThis->fTidEnabled)
4686 {
4687 E1kLog2(("%s No IDE set, cancel TAD timer and raise interrupt\n",
4688 pThis->szPrf));
4689 /* Cancel both timers if armed and fire immediately. */
4690# ifndef E1K_NO_TAD
4691 PDMDevHlpTimerStop(pDevIns, pThis->hTADTimer);
4692# endif
4693 PDMDevHlpTimerStop(pDevIns, pThis->hTIDTimer);
4694 }
4695//#endif /* E1K_USE_TX_TIMERS */
4696 E1K_INC_ISTAT_CNT(pThis->uStatIntTx);
4697 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_TXDW);
4698//#ifdef E1K_USE_TX_TIMERS
4699 }
4700//#endif /* E1K_USE_TX_TIMERS */
4701 }
4702 }
4703 else
4704 {
4705 E1K_INC_ISTAT_CNT(pThis->uStatTxNoRS);
4706 }
4707}
4708
4709#ifndef E1K_WITH_TXD_CACHE
4710
4711/**
4712 * Process Transmit Descriptor.
4713 *
4714 * E1000 supports three types of transmit descriptors:
4715 * - legacy data descriptors of older format (context-less).
4716 * - data the same as legacy but providing new offloading capabilities.
4717 * - context sets up the context for following data descriptors.
4718 *
4719 * @param pDevIns The device instance.
4720 * @param pThis The device state structure.
4721 * @param pThisCC The current context instance data.
4722 * @param pDesc Pointer to descriptor union.
4723 * @param addr Physical address of descriptor in guest memory.
4724 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4725 * @thread E1000_TX
4726 */
4727static int e1kXmitDesc(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KSTATECC pThisCC, E1KTXDESC *pDesc,
4728 RTGCPHYS addr, bool fOnWorkerThread)
4729{
4730 int rc = VINF_SUCCESS;
4731 uint32_t cbVTag = 0;
4732
4733 e1kPrintTDesc(pThis, pDesc, "vvv");
4734
4735//#ifdef E1K_USE_TX_TIMERS
4736 if (pThis->fTidEnabled)
4737 e1kCancelTimer(pDevIns, pThis, pThis->hTIDTimer);
4738//#endif /* E1K_USE_TX_TIMERS */
4739
4740 switch (e1kGetDescType(pDesc))
4741 {
4742 case E1K_DTYP_CONTEXT:
4743 if (pDesc->context.dw2.fTSE)
4744 {
4745 pThis->contextTSE = pDesc->context;
4746 pThis->u32PayRemain = pDesc->context.dw2.u20PAYLEN;
4747 pThis->u16HdrRemain = pDesc->context.dw3.u8HDRLEN;
4748 e1kSetupGsoCtx(&pThis->GsoCtx, &pDesc->context);
4749 STAM_COUNTER_INC(&pThis->StatTxDescCtxTSE);
4750 }
4751 else
4752 {
4753 pThis->contextNormal = pDesc->context;
4754 STAM_COUNTER_INC(&pThis->StatTxDescCtxNormal);
4755 }
4756 E1kLog2(("%s %s context updated: IP CSS=%02X, IP CSO=%02X, IP CSE=%04X"
4757 ", TU CSS=%02X, TU CSO=%02X, TU CSE=%04X\n", pThis->szPrf,
4758 pDesc->context.dw2.fTSE ? "TSE" : "Normal",
4759 pDesc->context.ip.u8CSS,
4760 pDesc->context.ip.u8CSO,
4761 pDesc->context.ip.u16CSE,
4762 pDesc->context.tu.u8CSS,
4763 pDesc->context.tu.u8CSO,
4764 pDesc->context.tu.u16CSE));
4765 E1K_INC_ISTAT_CNT(pThis->uStatDescCtx);
4766 e1kDescReport(pThis, pDesc, addr);
4767 break;
4768
4769 case E1K_DTYP_DATA:
4770 {
4771 if (pDesc->data.cmd.u20DTALEN == 0 || pDesc->data.u64BufAddr == 0)
4772 {
4773 E1kLog2(("% Empty data descriptor, skipped.\n", pThis->szPrf));
4774 /** @todo Same as legacy when !TSE. See below. */
4775 break;
4776 }
4777 STAM_COUNTER_INC(pDesc->data.cmd.fTSE?
4778 &pThis->StatTxDescTSEData:
4779 &pThis->StatTxDescData);
4780 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
4781 E1K_INC_ISTAT_CNT(pThis->uStatDescDat);
4782
4783 /*
4784 * The last descriptor of non-TSE packet must contain VLE flag.
4785 * TSE packets have VLE flag in the first descriptor. The later
4786 * case is taken care of a bit later when cbVTag gets assigned.
4787 *
4788 * 1) pDesc->data.cmd.fEOP && !pDesc->data.cmd.fTSE
4789 */
4790 if (pDesc->data.cmd.fEOP && !pDesc->data.cmd.fTSE)
4791 {
4792 pThis->fVTag = pDesc->data.cmd.fVLE;
4793 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
4794 }
4795 /*
4796 * First fragment: Allocate new buffer and save the IXSM and TXSM
4797 * packet options as these are only valid in the first fragment.
4798 */
4799 if (pThis->u16TxPktLen == 0)
4800 {
4801 pThis->fIPcsum = pDesc->data.dw3.fIXSM;
4802 pThis->fTCPcsum = pDesc->data.dw3.fTXSM;
4803 E1kLog2(("%s Saving checksum flags:%s%s; \n", pThis->szPrf,
4804 pThis->fIPcsum ? " IP" : "",
4805 pThis->fTCPcsum ? " TCP/UDP" : ""));
4806 if (pDesc->data.cmd.fTSE)
4807 {
4808 /* 2) pDesc->data.cmd.fTSE && pThis->u16TxPktLen == 0 */
4809 pThis->fVTag = pDesc->data.cmd.fVLE;
4810 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
4811 cbVTag = pThis->fVTag ? 4 : 0;
4812 }
4813 else if (pDesc->data.cmd.fEOP)
4814 cbVTag = pDesc->data.cmd.fVLE ? 4 : 0;
4815 else
4816 cbVTag = 4;
4817 E1kLog3(("%s About to allocate TX buffer: cbVTag=%u\n", pThis->szPrf, cbVTag));
4818 if (e1kCanDoGso(pThis, &pThis->GsoCtx, &pDesc->data, &pThis->contextTSE))
4819 rc = e1kXmitAllocBuf(pThis, pThisCC, pThis->contextTSE.dw2.u20PAYLEN + pThis->contextTSE.dw3.u8HDRLEN + cbVTag,
4820 true /*fExactSize*/, true /*fGso*/);
4821 else if (pDesc->data.cmd.fTSE)
4822 rc = e1kXmitAllocBuf(pThis, pThisCC, , pThis->contextTSE.dw3.u16MSS + pThis->contextTSE.dw3.u8HDRLEN + cbVTag,
4823 pDesc->data.cmd.fTSE /*fExactSize*/, false /*fGso*/);
4824 else
4825 rc = e1kXmitAllocBuf(pThis, pThisCC, pDesc->data.cmd.u20DTALEN + cbVTag,
4826 pDesc->data.cmd.fEOP /*fExactSize*/, false /*fGso*/);
4827
4828 /**
4829 * @todo: Perhaps it is not that simple for GSO packets! We may
4830 * need to unwind some changes.
4831 */
4832 if (RT_FAILURE(rc))
4833 {
4834 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4835 break;
4836 }
4837 /** @todo Is there any way to indicating errors other than collisions? Like
4838 * VERR_NET_DOWN. */
4839 }
4840
4841 /*
4842 * Add the descriptor data to the frame. If the frame is complete,
4843 * transmit it and reset the u16TxPktLen field.
4844 */
4845 if (e1kXmitIsGsoBuf(pThisCC->CTX_SUFF(pTxSg)))
4846 {
4847 STAM_COUNTER_INC(&pThis->StatTxPathGSO);
4848 bool fRc = e1kAddToFrame(pDevIns, pThis, pThisCC, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4849 if (pDesc->data.cmd.fEOP)
4850 {
4851 if ( fRc
4852 && pThisCC->CTX_SUFF(pTxSg)
4853 && pThisCC->CTX_SUFF(pTxSg)->cbUsed == (size_t)pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN)
4854 {
4855 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
4856 E1K_INC_CNT32(TSCTC);
4857 }
4858 else
4859 {
4860 if (fRc)
4861 E1kLog(("%s bad GSO/TSE %p or %u < %u\n" , pThis->szPrf,
4862 pThisCC->CTX_SUFF(pTxSg), pThisCC->CTX_SUFF(pTxSg) ? pThisCC->CTX_SUFF(pTxSg)->cbUsed : 0,
4863 pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN));
4864 e1kXmitFreeBuf(pThis);
4865 E1K_INC_CNT32(TSCTFC);
4866 }
4867 pThis->u16TxPktLen = 0;
4868 }
4869 }
4870 else if (!pDesc->data.cmd.fTSE)
4871 {
4872 STAM_COUNTER_INC(&pThis->StatTxPathRegular);
4873 bool fRc = e1kAddToFrame(pDevIns, pThis, pThisCC, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4874 if (pDesc->data.cmd.fEOP)
4875 {
4876 if (fRc && pThisCC->CTX_SUFF(pTxSg))
4877 {
4878 Assert(pThisCC->CTX_SUFF(pTxSg)->cSegs == 1);
4879 if (pThis->fIPcsum)
4880 e1kInsertChecksum(pThis, (uint8_t *)pThisCC->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
4881 pThis->contextNormal.ip.u8CSO,
4882 pThis->contextNormal.ip.u8CSS,
4883 pThis->contextNormal.ip.u16CSE);
4884 if (pThis->fTCPcsum)
4885 e1kInsertChecksum(pThis, (uint8_t *)pThisCC->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
4886 pThis->contextNormal.tu.u8CSO,
4887 pThis->contextNormal.tu.u8CSS,
4888 pThis->contextNormal.tu.u16CSE);
4889 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
4890 }
4891 else
4892 e1kXmitFreeBuf(pThis);
4893 pThis->u16TxPktLen = 0;
4894 }
4895 }
4896 else
4897 {
4898 STAM_COUNTER_INC(&pThis->StatTxPathFallback);
4899 e1kFallbackAddToFrame(pDevIns, pThis, pDesc, pDesc->data.cmd.u20DTALEN, fOnWorkerThread);
4900 }
4901
4902 e1kDescReport(pThis, pDesc, addr);
4903 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4904 break;
4905 }
4906
4907 case E1K_DTYP_LEGACY:
4908 if (pDesc->legacy.cmd.u16Length == 0 || pDesc->legacy.u64BufAddr == 0)
4909 {
4910 E1kLog(("%s Empty legacy descriptor, skipped.\n", pThis->szPrf));
4911 /** @todo 3.3.3, Length/Buffer Address: RS set -> write DD when processing. */
4912 break;
4913 }
4914 STAM_COUNTER_INC(&pThis->StatTxDescLegacy);
4915 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
4916
4917 /* First fragment: allocate new buffer. */
4918 if (pThis->u16TxPktLen == 0)
4919 {
4920 if (pDesc->legacy.cmd.fEOP)
4921 cbVTag = pDesc->legacy.cmd.fVLE ? 4 : 0;
4922 else
4923 cbVTag = 4;
4924 E1kLog3(("%s About to allocate TX buffer: cbVTag=%u\n", pThis->szPrf, cbVTag));
4925 /** @todo reset status bits? */
4926 rc = e1kXmitAllocBuf(pThis, pThisCC, pDesc->legacy.cmd.u16Length + cbVTag, pDesc->legacy.cmd.fEOP, false /*fGso*/);
4927 if (RT_FAILURE(rc))
4928 {
4929 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4930 break;
4931 }
4932
4933 /** @todo Is there any way to indicating errors other than collisions? Like
4934 * VERR_NET_DOWN. */
4935 }
4936
4937 /* Add fragment to frame. */
4938 if (e1kAddToFrame(pDevIns, pThis, pThisCC, pDesc->data.u64BufAddr, pDesc->legacy.cmd.u16Length))
4939 {
4940 E1K_INC_ISTAT_CNT(pThis->uStatDescLeg);
4941
4942 /* Last fragment: Transmit and reset the packet storage counter. */
4943 if (pDesc->legacy.cmd.fEOP)
4944 {
4945 pThis->fVTag = pDesc->legacy.cmd.fVLE;
4946 pThis->u16VTagTCI = pDesc->legacy.dw3.u16Special;
4947 /** @todo Offload processing goes here. */
4948 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
4949 pThis->u16TxPktLen = 0;
4950 }
4951 }
4952 /* Last fragment + failure: free the buffer and reset the storage counter. */
4953 else if (pDesc->legacy.cmd.fEOP)
4954 {
4955 e1kXmitFreeBuf(pThis);
4956 pThis->u16TxPktLen = 0;
4957 }
4958
4959 e1kDescReport(pThis, pDesc, addr);
4960 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4961 break;
4962
4963 default:
4964 E1kLog(("%s ERROR Unsupported transmit descriptor type: 0x%04x\n",
4965 pThis->szPrf, e1kGetDescType(pDesc)));
4966 break;
4967 }
4968
4969 return rc;
4970}
4971
4972#else /* E1K_WITH_TXD_CACHE */
4973
4974/**
4975 * Process Transmit Descriptor.
4976 *
4977 * E1000 supports three types of transmit descriptors:
4978 * - legacy data descriptors of older format (context-less).
4979 * - data the same as legacy but providing new offloading capabilities.
4980 * - context sets up the context for following data descriptors.
4981 *
4982 * @param pDevIns The device instance.
4983 * @param pThis The device state structure.
4984 * @param pThisCC The current context instance data.
4985 * @param pDesc Pointer to descriptor union.
4986 * @param addr Physical address of descriptor in guest memory.
4987 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4988 * @param cbPacketSize Size of the packet as previously computed.
4989 * @thread E1000_TX
4990 */
4991static int e1kXmitDesc(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KSTATECC pThisCC, E1KTXDESC *pDesc,
4992 RTGCPHYS addr, bool fOnWorkerThread)
4993{
4994 int rc = VINF_SUCCESS;
4995
4996 e1kPrintTDesc(pThis, pDesc, "vvv");
4997
4998 if (pDesc->legacy.dw3.fDD)
4999 {
5000 E1kLog(("%s e1kXmitDesc: skipping bad descriptor ^^^\n", pThis->szPrf));
5001 e1kDescReport(pDevIns, pThis, pDesc, addr);
5002 return VINF_SUCCESS;
5003 }
5004
5005//#ifdef E1K_USE_TX_TIMERS
5006 if (pThis->fTidEnabled)
5007 PDMDevHlpTimerStop(pDevIns, pThis->hTIDTimer);
5008//#endif /* E1K_USE_TX_TIMERS */
5009
5010 switch (e1kGetDescType(pDesc))
5011 {
5012 case E1K_DTYP_CONTEXT:
5013 /* The caller have already updated the context */
5014 E1K_INC_ISTAT_CNT(pThis->uStatDescCtx);
5015 e1kDescReport(pDevIns, pThis, pDesc, addr);
5016 break;
5017
5018 case E1K_DTYP_DATA:
5019 {
5020 STAM_COUNTER_INC(pDesc->data.cmd.fTSE?
5021 &pThis->StatTxDescTSEData:
5022 &pThis->StatTxDescData);
5023 E1K_INC_ISTAT_CNT(pThis->uStatDescDat);
5024 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
5025 if (pDesc->data.cmd.u20DTALEN == 0 || pDesc->data.u64BufAddr == 0)
5026 {
5027 E1kLog2(("% Empty data descriptor, skipped.\n", pThis->szPrf));
5028 if (pDesc->data.cmd.fEOP)
5029 {
5030 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
5031 pThis->u16TxPktLen = 0;
5032 }
5033 }
5034 else
5035 {
5036 /*
5037 * Add the descriptor data to the frame. If the frame is complete,
5038 * transmit it and reset the u16TxPktLen field.
5039 */
5040 if (e1kXmitIsGsoBuf(pThisCC->CTX_SUFF(pTxSg)))
5041 {
5042 STAM_COUNTER_INC(&pThis->StatTxPathGSO);
5043 bool fRc = e1kAddToFrame(pDevIns, pThis, pThisCC, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
5044 if (pDesc->data.cmd.fEOP)
5045 {
5046 if ( fRc
5047 && pThisCC->CTX_SUFF(pTxSg)
5048 && pThisCC->CTX_SUFF(pTxSg)->cbUsed == (size_t)pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN)
5049 {
5050 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
5051 E1K_INC_CNT32(TSCTC);
5052 }
5053 else
5054 {
5055 if (fRc)
5056 E1kLog(("%s bad GSO/TSE %p or %u < %u\n" , pThis->szPrf,
5057 pThisCC->CTX_SUFF(pTxSg), pThisCC->CTX_SUFF(pTxSg) ? pThisCC->CTX_SUFF(pTxSg)->cbUsed : 0,
5058 pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN));
5059 e1kXmitFreeBuf(pThis, pThisCC);
5060 E1K_INC_CNT32(TSCTFC);
5061 }
5062 pThis->u16TxPktLen = 0;
5063 }
5064 }
5065 else if (!pDesc->data.cmd.fTSE)
5066 {
5067 STAM_COUNTER_INC(&pThis->StatTxPathRegular);
5068 bool fRc = e1kAddToFrame(pDevIns, pThis, pThisCC, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
5069 if (pDesc->data.cmd.fEOP)
5070 {
5071 if (fRc && pThisCC->CTX_SUFF(pTxSg))
5072 {
5073 Assert(pThisCC->CTX_SUFF(pTxSg)->cSegs == 1);
5074 if (pThis->fIPcsum)
5075 e1kInsertChecksum(pThis, (uint8_t *)pThisCC->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
5076 pThis->contextNormal.ip.u8CSO,
5077 pThis->contextNormal.ip.u8CSS,
5078 pThis->contextNormal.ip.u16CSE);
5079 if (pThis->fTCPcsum)
5080 e1kInsertChecksum(pThis, (uint8_t *)pThisCC->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
5081 pThis->contextNormal.tu.u8CSO,
5082 pThis->contextNormal.tu.u8CSS,
5083 pThis->contextNormal.tu.u16CSE);
5084 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
5085 }
5086 else
5087 e1kXmitFreeBuf(pThis, pThisCC);
5088 pThis->u16TxPktLen = 0;
5089 }
5090 }
5091 else
5092 {
5093 STAM_COUNTER_INC(&pThis->StatTxPathFallback);
5094 rc = e1kFallbackAddToFrame(pDevIns, pThis, pDesc, fOnWorkerThread);
5095 }
5096 }
5097 e1kDescReport(pDevIns, pThis, pDesc, addr);
5098 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5099 break;
5100 }
5101
5102 case E1K_DTYP_LEGACY:
5103 STAM_COUNTER_INC(&pThis->StatTxDescLegacy);
5104 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
5105 if (pDesc->legacy.cmd.u16Length == 0 || pDesc->legacy.u64BufAddr == 0)
5106 {
5107 E1kLog(("%s Empty legacy descriptor, skipped.\n", pThis->szPrf));
5108 }
5109 else
5110 {
5111 /* Add fragment to frame. */
5112 if (e1kAddToFrame(pDevIns, pThis, pThisCC, pDesc->data.u64BufAddr, pDesc->legacy.cmd.u16Length))
5113 {
5114 E1K_INC_ISTAT_CNT(pThis->uStatDescLeg);
5115
5116 /* Last fragment: Transmit and reset the packet storage counter. */
5117 if (pDesc->legacy.cmd.fEOP)
5118 {
5119 if (pDesc->legacy.cmd.fIC)
5120 {
5121 e1kInsertChecksum(pThis,
5122 (uint8_t *)pThisCC->CTX_SUFF(pTxSg)->aSegs[0].pvSeg,
5123 pThis->u16TxPktLen,
5124 pDesc->legacy.cmd.u8CSO,
5125 pDesc->legacy.dw3.u8CSS,
5126 0);
5127 }
5128 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
5129 pThis->u16TxPktLen = 0;
5130 }
5131 }
5132 /* Last fragment + failure: free the buffer and reset the storage counter. */
5133 else if (pDesc->legacy.cmd.fEOP)
5134 {
5135 e1kXmitFreeBuf(pThis, pThisCC);
5136 pThis->u16TxPktLen = 0;
5137 }
5138 }
5139 e1kDescReport(pDevIns, pThis, pDesc, addr);
5140 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5141 break;
5142
5143 default:
5144 E1kLog(("%s ERROR Unsupported transmit descriptor type: 0x%04x\n",
5145 pThis->szPrf, e1kGetDescType(pDesc)));
5146 break;
5147 }
5148
5149 return rc;
5150}
5151
5152DECLINLINE(void) e1kUpdateTxContext(PE1KSTATE pThis, E1KTXDESC *pDesc)
5153{
5154 if (pDesc->context.dw2.fTSE)
5155 {
5156 pThis->contextTSE = pDesc->context;
5157 uint32_t cbMaxSegmentSize = pThis->contextTSE.dw3.u16MSS + pThis->contextTSE.dw3.u8HDRLEN + 4; /*VTAG*/
5158 if (RT_UNLIKELY(cbMaxSegmentSize > E1K_MAX_TX_PKT_SIZE))
5159 {
5160 pThis->contextTSE.dw3.u16MSS = E1K_MAX_TX_PKT_SIZE - pThis->contextTSE.dw3.u8HDRLEN - 4; /*VTAG*/
5161 LogRelMax(10, ("%s: Transmit packet is too large: %u > %u(max). Adjusted MSS to %u.\n",
5162 pThis->szPrf, cbMaxSegmentSize, E1K_MAX_TX_PKT_SIZE, pThis->contextTSE.dw3.u16MSS));
5163 }
5164 pThis->u32PayRemain = pThis->contextTSE.dw2.u20PAYLEN;
5165 pThis->u16HdrRemain = pThis->contextTSE.dw3.u8HDRLEN;
5166 e1kSetupGsoCtx(&pThis->GsoCtx, &pThis->contextTSE);
5167 STAM_COUNTER_INC(&pThis->StatTxDescCtxTSE);
5168 }
5169 else
5170 {
5171 pThis->contextNormal = pDesc->context;
5172 STAM_COUNTER_INC(&pThis->StatTxDescCtxNormal);
5173 }
5174 E1kLog2(("%s %s context updated: IP CSS=%02X, IP CSO=%02X, IP CSE=%04X"
5175 ", TU CSS=%02X, TU CSO=%02X, TU CSE=%04X\n", pThis->szPrf,
5176 pDesc->context.dw2.fTSE ? "TSE" : "Normal",
5177 pDesc->context.ip.u8CSS,
5178 pDesc->context.ip.u8CSO,
5179 pDesc->context.ip.u16CSE,
5180 pDesc->context.tu.u8CSS,
5181 pDesc->context.tu.u8CSO,
5182 pDesc->context.tu.u16CSE));
5183}
5184
5185static bool e1kLocateTxPacket(PE1KSTATE pThis)
5186{
5187 LogFlow(("%s e1kLocateTxPacket: ENTER cbTxAlloc=%d\n",
5188 pThis->szPrf, pThis->cbTxAlloc));
5189 /* Check if we have located the packet already. */
5190 if (pThis->cbTxAlloc)
5191 {
5192 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d\n",
5193 pThis->szPrf, pThis->cbTxAlloc));
5194 return true;
5195 }
5196
5197 bool fTSE = false;
5198 uint32_t cbPacket = 0;
5199
5200 for (int i = pThis->iTxDCurrent; i < pThis->nTxDFetched; ++i)
5201 {
5202 E1KTXDESC *pDesc = &pThis->aTxDescriptors[i];
5203 switch (e1kGetDescType(pDesc))
5204 {
5205 case E1K_DTYP_CONTEXT:
5206 if (cbPacket == 0)
5207 e1kUpdateTxContext(pThis, pDesc);
5208 else
5209 E1kLog(("%s e1kLocateTxPacket: ignoring a context descriptor in the middle of a packet, cbPacket=%d\n",
5210 pThis->szPrf, cbPacket));
5211 continue;
5212 case E1K_DTYP_LEGACY:
5213 /* Skip invalid descriptors. */
5214 if (cbPacket > 0 && (pThis->fGSO || fTSE))
5215 {
5216 E1kLog(("%s e1kLocateTxPacket: ignoring a legacy descriptor in the segmentation context, cbPacket=%d\n",
5217 pThis->szPrf, cbPacket));
5218 pDesc->legacy.dw3.fDD = true; /* Make sure it is skipped by processing */
5219 continue;
5220 }
5221 /* Skip empty descriptors. */
5222 if (!pDesc->legacy.u64BufAddr || !pDesc->legacy.cmd.u16Length)
5223 break;
5224 cbPacket += pDesc->legacy.cmd.u16Length;
5225 pThis->fGSO = false;
5226 break;
5227 case E1K_DTYP_DATA:
5228 /* Skip invalid descriptors. */
5229 if (cbPacket > 0 && (bool)pDesc->data.cmd.fTSE != fTSE)
5230 {
5231 E1kLog(("%s e1kLocateTxPacket: ignoring %sTSE descriptor in the %ssegmentation context, cbPacket=%d\n",
5232 pThis->szPrf, pDesc->data.cmd.fTSE ? "" : "non-", fTSE ? "" : "non-", cbPacket));
5233 pDesc->data.dw3.fDD = true; /* Make sure it is skipped by processing */
5234 continue;
5235 }
5236 /* Skip empty descriptors. */
5237 if (!pDesc->data.u64BufAddr || !pDesc->data.cmd.u20DTALEN)
5238 break;
5239 if (cbPacket == 0)
5240 {
5241 /*
5242 * The first fragment: save IXSM and TXSM options
5243 * as these are only valid in the first fragment.
5244 */
5245 pThis->fIPcsum = pDesc->data.dw3.fIXSM;
5246 pThis->fTCPcsum = pDesc->data.dw3.fTXSM;
5247 fTSE = pDesc->data.cmd.fTSE;
5248 /*
5249 * TSE descriptors have VLE bit properly set in
5250 * the first fragment.
5251 */
5252 if (fTSE)
5253 {
5254 pThis->fVTag = pDesc->data.cmd.fVLE;
5255 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
5256 }
5257 pThis->fGSO = e1kCanDoGso(pThis, &pThis->GsoCtx, &pDesc->data, &pThis->contextTSE);
5258 }
5259 cbPacket += pDesc->data.cmd.u20DTALEN;
5260 break;
5261 default:
5262 AssertMsgFailed(("Impossible descriptor type!"));
5263 }
5264 if (pDesc->legacy.cmd.fEOP)
5265 {
5266 /*
5267 * Non-TSE descriptors have VLE bit properly set in
5268 * the last fragment.
5269 */
5270 if (!fTSE)
5271 {
5272 pThis->fVTag = pDesc->data.cmd.fVLE;
5273 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
5274 }
5275 /*
5276 * Compute the required buffer size. If we cannot do GSO but still
5277 * have to do segmentation we allocate the first segment only.
5278 */
5279 pThis->cbTxAlloc = (!fTSE || pThis->fGSO) ?
5280 cbPacket :
5281 RT_MIN(cbPacket, pThis->contextTSE.dw3.u16MSS + pThis->contextTSE.dw3.u8HDRLEN);
5282 if (pThis->fVTag)
5283 pThis->cbTxAlloc += 4;
5284 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d cbPacket=%d%s%s\n",
5285 pThis->szPrf, pThis->cbTxAlloc, cbPacket,
5286 pThis->fGSO ? " GSO" : "", fTSE ? " TSE" : ""));
5287 return true;
5288 }
5289 }
5290
5291 if (cbPacket == 0 && pThis->nTxDFetched - pThis->iTxDCurrent > 0)
5292 {
5293 /* All descriptors were empty, we need to process them as a dummy packet */
5294 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d, zero packet!\n",
5295 pThis->szPrf, pThis->cbTxAlloc));
5296 return true;
5297 }
5298 LogFlow(("%s e1kLocateTxPacket: RET false cbTxAlloc=%d cbPacket=%d\n",
5299 pThis->szPrf, pThis->cbTxAlloc, cbPacket));
5300 return false;
5301}
5302
5303static int e1kXmitPacket(PPDMDEVINS pDevIns, PE1KSTATE pThis, bool fOnWorkerThread)
5304{
5305 PE1KSTATECC pThisCC = PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC);
5306 int rc = VINF_SUCCESS;
5307
5308 LogFlow(("%s e1kXmitPacket: ENTER current=%d fetched=%d\n",
5309 pThis->szPrf, pThis->iTxDCurrent, pThis->nTxDFetched));
5310
5311 while (pThis->iTxDCurrent < pThis->nTxDFetched)
5312 {
5313 E1KTXDESC *pDesc = &pThis->aTxDescriptors[pThis->iTxDCurrent];
5314 E1kLog3(("%s About to process new TX descriptor at %08x%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
5315 pThis->szPrf, TDBAH, TDBAL + TDH * sizeof(E1KTXDESC), TDLEN, TDH, TDT));
5316 rc = e1kXmitDesc(pDevIns, pThis, pThisCC, pDesc, e1kDescAddr(TDBAH, TDBAL, TDH), fOnWorkerThread);
5317 if (RT_FAILURE(rc))
5318 break;
5319 if (++TDH * sizeof(E1KTXDESC) >= TDLEN)
5320 TDH = 0;
5321 uint32_t uLowThreshold = GET_BITS(TXDCTL, LWTHRESH)*8;
5322 if (uLowThreshold != 0 && e1kGetTxLen(pThis) <= uLowThreshold)
5323 {
5324 E1kLog2(("%s Low on transmit descriptors, raise ICR.TXD_LOW, len=%x thresh=%x\n",
5325 pThis->szPrf, e1kGetTxLen(pThis), GET_BITS(TXDCTL, LWTHRESH)*8));
5326 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_TXD_LOW);
5327 }
5328 ++pThis->iTxDCurrent;
5329 if (e1kGetDescType(pDesc) != E1K_DTYP_CONTEXT && pDesc->legacy.cmd.fEOP)
5330 break;
5331 }
5332
5333 LogFlow(("%s e1kXmitPacket: RET %Rrc current=%d fetched=%d\n",
5334 pThis->szPrf, rc, pThis->iTxDCurrent, pThis->nTxDFetched));
5335 return rc;
5336}
5337
5338#endif /* E1K_WITH_TXD_CACHE */
5339#ifndef E1K_WITH_TXD_CACHE
5340
5341/**
5342 * Transmit pending descriptors.
5343 *
5344 * @returns VBox status code. VERR_TRY_AGAIN is returned if we're busy.
5345 *
5346 * @param pDevIns The device instance.
5347 * @param pThis The E1000 state.
5348 * @param fOnWorkerThread Whether we're on a worker thread or on an EMT.
5349 */
5350static int e1kXmitPending(PPDMDEVINS pDevIns, PE1KSTATE pThis, bool fOnWorkerThread)
5351{
5352 int rc = VINF_SUCCESS;
5353 PE1KSTATECC pThisCC = PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC);
5354
5355 /* Check if transmitter is enabled. */
5356 if (!(TCTL & TCTL_EN))
5357 return VINF_SUCCESS;
5358 /*
5359 * Grab the xmit lock of the driver as well as the E1K device state.
5360 */
5361 rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
5362 if (RT_LIKELY(rc == VINF_SUCCESS))
5363 {
5364 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
5365 if (pDrv)
5366 {
5367 rc = pDrv->pfnBeginXmit(pDrv, fOnWorkerThread);
5368 if (RT_FAILURE(rc))
5369 {
5370 e1kCsTxLeave(pThis);
5371 return rc;
5372 }
5373 }
5374 /*
5375 * Process all pending descriptors.
5376 * Note! Do not process descriptors in locked state
5377 */
5378 while (TDH != TDT && !pThis->fLocked)
5379 {
5380 E1KTXDESC desc;
5381 E1kLog3(("%s About to process new TX descriptor at %08x%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
5382 pThis->szPrf, TDBAH, TDBAL + TDH * sizeof(desc), TDLEN, TDH, TDT));
5383
5384 e1kLoadDesc(pDevIns, &desc, ((uint64_t)TDBAH << 32) + TDBAL + TDH * sizeof(desc));
5385 rc = e1kXmitDesc(pDevIns, pThis, pThisCC, &desc, e1kDescAddr(TDBAH, TDBAL, TDH), fOnWorkerThread);
5386 /* If we failed to transmit descriptor we will try it again later */
5387 if (RT_FAILURE(rc))
5388 break;
5389 if (++TDH * sizeof(desc) >= TDLEN)
5390 TDH = 0;
5391
5392 if (e1kGetTxLen(pThis) <= GET_BITS(TXDCTL, LWTHRESH)*8)
5393 {
5394 E1kLog2(("%s Low on transmit descriptors, raise ICR.TXD_LOW, len=%x thresh=%x\n",
5395 pThis->szPrf, e1kGetTxLen(pThis), GET_BITS(TXDCTL, LWTHRESH)*8));
5396 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_TXD_LOW);
5397 }
5398
5399 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5400 }
5401
5402 /// @todo uncomment: pThis->uStatIntTXQE++;
5403 /// @todo uncomment: e1kRaiseInterrupt(pDevIns, pThis, ICR_TXQE);
5404 /*
5405 * Release the lock.
5406 */
5407 if (pDrv)
5408 pDrv->pfnEndXmit(pDrv);
5409 e1kCsTxLeave(pThis);
5410 }
5411
5412 return rc;
5413}
5414
5415#else /* E1K_WITH_TXD_CACHE */
5416
5417static void e1kDumpTxDCache(PPDMDEVINS pDevIns, PE1KSTATE pThis)
5418{
5419 unsigned i, cDescs = TDLEN / sizeof(E1KTXDESC);
5420 uint32_t tdh = TDH;
5421 LogRel(("E1000: -- Transmit Descriptors (%d total) --\n", cDescs));
5422 for (i = 0; i < cDescs; ++i)
5423 {
5424 E1KTXDESC desc;
5425 PDMDevHlpPhysRead(pDevIns , e1kDescAddr(TDBAH, TDBAL, i), &desc, sizeof(desc));
5426 if (i == tdh)
5427 LogRel(("E1000: >>> "));
5428 LogRel(("E1000: %RGp: %R[e1ktxd]\n", e1kDescAddr(TDBAH, TDBAL, i), &desc));
5429 }
5430 LogRel(("E1000: -- Transmit Descriptors in Cache (at %d (TDH %d)/ fetched %d / max %d) --\n",
5431 pThis->iTxDCurrent, TDH, pThis->nTxDFetched, E1K_TXD_CACHE_SIZE));
5432 if (tdh > pThis->iTxDCurrent)
5433 tdh -= pThis->iTxDCurrent;
5434 else
5435 tdh = cDescs + tdh - pThis->iTxDCurrent;
5436 for (i = 0; i < pThis->nTxDFetched; ++i)
5437 {
5438 if (i == pThis->iTxDCurrent)
5439 LogRel(("E1000: >>> "));
5440 LogRel(("E1000: %RGp: %R[e1ktxd]\n", e1kDescAddr(TDBAH, TDBAL, tdh++ % cDescs), &pThis->aTxDescriptors[i]));
5441 }
5442}
5443
5444/**
5445 * Transmit pending descriptors.
5446 *
5447 * @returns VBox status code. VERR_TRY_AGAIN is returned if we're busy.
5448 *
5449 * @param pDevIns The device instance.
5450 * @param pThis The E1000 state.
5451 * @param fOnWorkerThread Whether we're on a worker thread or on an EMT.
5452 */
5453static int e1kXmitPending(PPDMDEVINS pDevIns, PE1KSTATE pThis, bool fOnWorkerThread)
5454{
5455 PE1KSTATECC pThisCC = PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC);
5456 int rc = VINF_SUCCESS;
5457
5458 /* Check if transmitter is enabled. */
5459 if (!(TCTL & TCTL_EN))
5460 return VINF_SUCCESS;
5461 /*
5462 * Grab the xmit lock of the driver as well as the E1K device state.
5463 */
5464 PPDMINETWORKUP pDrv = pThisCC->CTX_SUFF(pDrv);
5465 if (pDrv)
5466 {
5467 rc = pDrv->pfnBeginXmit(pDrv, fOnWorkerThread);
5468 if (RT_FAILURE(rc))
5469 return rc;
5470 }
5471
5472 /*
5473 * Process all pending descriptors.
5474 * Note! Do not process descriptors in locked state
5475 */
5476 rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
5477 if (RT_LIKELY(rc == VINF_SUCCESS))
5478 {
5479 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
5480 /*
5481 * fIncomplete is set whenever we try to fetch additional descriptors
5482 * for an incomplete packet. If fail to locate a complete packet on
5483 * the next iteration we need to reset the cache or we risk to get
5484 * stuck in this loop forever.
5485 */
5486 bool fIncomplete = false;
5487 while (!pThis->fLocked && e1kTxDLazyLoad(pDevIns, pThis))
5488 {
5489 while (e1kLocateTxPacket(pThis))
5490 {
5491 fIncomplete = false;
5492 /* Found a complete packet, allocate it. */
5493 rc = e1kXmitAllocBuf(pThis, pThisCC, pThis->fGSO);
5494 /* If we're out of bandwidth we'll come back later. */
5495 if (RT_FAILURE(rc))
5496 goto out;
5497 /* Copy the packet to allocated buffer and send it. */
5498 rc = e1kXmitPacket(pDevIns, pThis, fOnWorkerThread);
5499 /* If we're out of bandwidth we'll come back later. */
5500 if (RT_FAILURE(rc))
5501 goto out;
5502 }
5503 uint8_t u8Remain = pThis->nTxDFetched - pThis->iTxDCurrent;
5504 if (RT_UNLIKELY(fIncomplete))
5505 {
5506 static bool fTxDCacheDumped = false;
5507 /*
5508 * The descriptor cache is full, but we were unable to find
5509 * a complete packet in it. Drop the cache and hope that
5510 * the guest driver can recover from network card error.
5511 */
5512 LogRel(("%s: No complete packets in%s TxD cache! "
5513 "Fetched=%d, current=%d, TX len=%d.\n",
5514 pThis->szPrf,
5515 u8Remain == E1K_TXD_CACHE_SIZE ? " full" : "",
5516 pThis->nTxDFetched, pThis->iTxDCurrent,
5517 e1kGetTxLen(pThis)));
5518 if (!fTxDCacheDumped)
5519 {
5520 fTxDCacheDumped = true;
5521 e1kDumpTxDCache(pDevIns, pThis);
5522 }
5523 pThis->iTxDCurrent = pThis->nTxDFetched = 0;
5524 /*
5525 * Returning an error at this point means Guru in R0
5526 * (see @bugref{6428}).
5527 */
5528# ifdef IN_RING3
5529 rc = VERR_NET_INCOMPLETE_TX_PACKET;
5530# else /* !IN_RING3 */
5531 rc = VINF_IOM_R3_MMIO_WRITE;
5532# endif /* !IN_RING3 */
5533 goto out;
5534 }
5535 if (u8Remain > 0)
5536 {
5537 Log4(("%s Incomplete packet at %d. Already fetched %d, "
5538 "%d more are available\n",
5539 pThis->szPrf, pThis->iTxDCurrent, u8Remain,
5540 e1kGetTxLen(pThis) - u8Remain));
5541
5542 /*
5543 * A packet was partially fetched. Move incomplete packet to
5544 * the beginning of cache buffer, then load more descriptors.
5545 */
5546 memmove(pThis->aTxDescriptors,
5547 &pThis->aTxDescriptors[pThis->iTxDCurrent],
5548 u8Remain * sizeof(E1KTXDESC));
5549 pThis->iTxDCurrent = 0;
5550 pThis->nTxDFetched = u8Remain;
5551 e1kTxDLoadMore(pDevIns, pThis);
5552 fIncomplete = true;
5553 }
5554 else
5555 pThis->nTxDFetched = 0;
5556 pThis->iTxDCurrent = 0;
5557 }
5558 if (!pThis->fLocked && GET_BITS(TXDCTL, LWTHRESH) == 0)
5559 {
5560 E1kLog2(("%s Out of transmit descriptors, raise ICR.TXD_LOW\n",
5561 pThis->szPrf));
5562 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_TXD_LOW);
5563 }
5564out:
5565 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5566
5567 /// @todo uncomment: pThis->uStatIntTXQE++;
5568 /// @todo uncomment: e1kRaiseInterrupt(pDevIns, pThis, ICR_TXQE);
5569
5570 e1kCsTxLeave(pThis);
5571 }
5572
5573
5574 /*
5575 * Release the lock.
5576 */
5577 if (pDrv)
5578 pDrv->pfnEndXmit(pDrv);
5579 return rc;
5580}
5581
5582#endif /* E1K_WITH_TXD_CACHE */
5583#ifdef IN_RING3
5584
5585/**
5586 * @interface_method_impl{PDMINETWORKDOWN,pfnXmitPending}
5587 */
5588static DECLCALLBACK(void) e1kR3NetworkDown_XmitPending(PPDMINETWORKDOWN pInterface)
5589{
5590 PE1KSTATECC pThisCC = RT_FROM_MEMBER(pInterface, E1KSTATECC, INetworkDown);
5591 PE1KSTATE pThis = pThisCC->pShared;
5592 /* Resume suspended transmission */
5593 STATUS &= ~STATUS_TXOFF;
5594 e1kXmitPending(pThisCC->pDevInsR3, pThis, true /*fOnWorkerThread*/);
5595}
5596
5597/**
5598 * @callback_method_impl{FNPDMTASKDEV,
5599 * Executes e1kXmitPending at the behest of ring-0/raw-mode.}
5600 * @note Not executed on EMT.
5601 */
5602static DECLCALLBACK(void) e1kTxTaskCallback(PPDMDEVINS pDevIns, void *pvUser)
5603{
5604 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
5605 E1kLog2(("%s e1kTxTaskCallback:\n", pThis->szPrf));
5606
5607 int rc = e1kXmitPending(pDevIns, pThis, false /*fOnWorkerThread*/);
5608 AssertMsg(RT_SUCCESS(rc) || rc == VERR_TRY_AGAIN || rc == VERR_NET_DOWN, ("%Rrc\n", rc));
5609
5610 RT_NOREF(rc, pvUser);
5611}
5612
5613/**
5614 * @callback_method_impl{FNPDMTASKDEV, Handler for the wakeup signaller queue.}
5615 */
5616static DECLCALLBACK(void) e1kCanRxTaskCallback(PPDMDEVINS pDevIns, void *pvUser)
5617{
5618 RT_NOREF(pvUser);
5619 e1kWakeupReceive(pDevIns);
5620}
5621
5622#endif /* IN_RING3 */
5623
5624/**
5625 * Write handler for Transmit Descriptor Tail register.
5626 *
5627 * @param pThis The device state structure.
5628 * @param offset Register offset in memory-mapped frame.
5629 * @param index Register index in register array.
5630 * @param value The value to store.
5631 * @param mask Used to implement partial writes (8 and 16-bit).
5632 * @thread EMT
5633 */
5634static int e1kRegWriteTDT(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5635{
5636 int rc = e1kRegWriteDefault(pDevIns, pThis, offset, index, value);
5637
5638 /* All descriptors starting with head and not including tail belong to us. */
5639 /* Process them. */
5640 E1kLog2(("%s e1kRegWriteTDT: TDBAL=%08x, TDBAH=%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
5641 pThis->szPrf, TDBAL, TDBAH, TDLEN, TDH, TDT));
5642
5643 /* Ignore TDT writes when the link is down. */
5644 if (TDH != TDT && (STATUS & STATUS_LU))
5645 {
5646 Log5(("E1000: TDT write: TDH=%08x, TDT=%08x, %d descriptors to process\n", TDH, TDT, e1kGetTxLen(pThis)));
5647 E1kLog(("%s e1kRegWriteTDT: %d descriptors to process\n",
5648 pThis->szPrf, e1kGetTxLen(pThis)));
5649
5650 /* Transmit pending packets if possible, defer it if we cannot do it
5651 in the current context. */
5652#ifdef E1K_TX_DELAY
5653 rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
5654 if (RT_LIKELY(rc == VINF_SUCCESS))
5655 {
5656 if (!PDMDevInsTimerIsActive(pDevIns, pThis->hTXDTimer))
5657 {
5658#ifdef E1K_INT_STATS
5659 pThis->u64ArmedAt = RTTimeNanoTS();
5660#endif
5661 e1kArmTimer(pDevIns, pThis, pThis->hTXDTimer, E1K_TX_DELAY);
5662 }
5663 E1K_INC_ISTAT_CNT(pThis->uStatTxDelayed);
5664 e1kCsTxLeave(pThis);
5665 return rc;
5666 }
5667 /* We failed to enter the TX critical section -- transmit as usual. */
5668#endif /* E1K_TX_DELAY */
5669#ifndef IN_RING3
5670 PE1KSTATECC pThisCC = PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC);
5671 if (!pThisCC->CTX_SUFF(pDrv))
5672 PDMDevHlpTaskTrigger(pDevIns, pThis->hTxTask);
5673 else
5674#endif
5675 {
5676 rc = e1kXmitPending(pDevIns, pThis, false /*fOnWorkerThread*/);
5677 if (rc == VERR_TRY_AGAIN)
5678 rc = VINF_SUCCESS;
5679 else if (rc == VERR_SEM_BUSY)
5680 rc = VINF_IOM_R3_MMIO_WRITE;
5681 AssertRC(rc);
5682 }
5683 }
5684
5685 return rc;
5686}
5687
5688/**
5689 * Write handler for Multicast Table Array registers.
5690 *
5691 * @param pThis The device state structure.
5692 * @param offset Register offset in memory-mapped frame.
5693 * @param index Register index in register array.
5694 * @param value The value to store.
5695 * @thread EMT
5696 */
5697static int e1kRegWriteMTA(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5698{
5699 RT_NOREF_PV(pDevIns);
5700 AssertReturn(offset - g_aE1kRegMap[index].offset < sizeof(pThis->auMTA), VERR_DEV_IO_ERROR);
5701 pThis->auMTA[(offset - g_aE1kRegMap[index].offset) / sizeof(pThis->auMTA[0])] = value;
5702
5703 return VINF_SUCCESS;
5704}
5705
5706/**
5707 * Read handler for Multicast Table Array registers.
5708 *
5709 * @returns VBox status code.
5710 *
5711 * @param pThis The device state structure.
5712 * @param offset Register offset in memory-mapped frame.
5713 * @param index Register index in register array.
5714 * @thread EMT
5715 */
5716static int e1kRegReadMTA(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5717{
5718 RT_NOREF_PV(pDevIns);
5719 AssertReturn(offset - g_aE1kRegMap[index].offset < sizeof(pThis->auMTA), VERR_DEV_IO_ERROR);
5720 *pu32Value = pThis->auMTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auMTA[0])];
5721
5722 return VINF_SUCCESS;
5723}
5724
5725/**
5726 * Write handler for Receive Address registers.
5727 *
5728 * @param pThis The device state structure.
5729 * @param offset Register offset in memory-mapped frame.
5730 * @param index Register index in register array.
5731 * @param value The value to store.
5732 * @thread EMT
5733 */
5734static int e1kRegWriteRA(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5735{
5736 RT_NOREF_PV(pDevIns);
5737 AssertReturn(offset - g_aE1kRegMap[index].offset < sizeof(pThis->aRecAddr.au32), VERR_DEV_IO_ERROR);
5738 pThis->aRecAddr.au32[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->aRecAddr.au32[0])] = value;
5739
5740 return VINF_SUCCESS;
5741}
5742
5743/**
5744 * Read handler for Receive Address registers.
5745 *
5746 * @returns VBox status code.
5747 *
5748 * @param pThis The device state structure.
5749 * @param offset Register offset in memory-mapped frame.
5750 * @param index Register index in register array.
5751 * @thread EMT
5752 */
5753static int e1kRegReadRA(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5754{
5755 RT_NOREF_PV(pDevIns);
5756 AssertReturn(offset - g_aE1kRegMap[index].offset< sizeof(pThis->aRecAddr.au32), VERR_DEV_IO_ERROR);
5757 *pu32Value = pThis->aRecAddr.au32[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->aRecAddr.au32[0])];
5758
5759 return VINF_SUCCESS;
5760}
5761
5762/**
5763 * Write handler for VLAN Filter Table Array registers.
5764 *
5765 * @param pThis The device state structure.
5766 * @param offset Register offset in memory-mapped frame.
5767 * @param index Register index in register array.
5768 * @param value The value to store.
5769 * @thread EMT
5770 */
5771static int e1kRegWriteVFTA(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5772{
5773 RT_NOREF_PV(pDevIns);
5774 AssertReturn(offset - g_aE1kRegMap[index].offset < sizeof(pThis->auVFTA), VINF_SUCCESS);
5775 pThis->auVFTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auVFTA[0])] = value;
5776
5777 return VINF_SUCCESS;
5778}
5779
5780/**
5781 * Read handler for VLAN Filter Table Array registers.
5782 *
5783 * @returns VBox status code.
5784 *
5785 * @param pThis The device state structure.
5786 * @param offset Register offset in memory-mapped frame.
5787 * @param index Register index in register array.
5788 * @thread EMT
5789 */
5790static int e1kRegReadVFTA(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5791{
5792 RT_NOREF_PV(pDevIns);
5793 AssertReturn(offset - g_aE1kRegMap[index].offset< sizeof(pThis->auVFTA), VERR_DEV_IO_ERROR);
5794 *pu32Value = pThis->auVFTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auVFTA[0])];
5795
5796 return VINF_SUCCESS;
5797}
5798
5799/**
5800 * Read handler for unimplemented registers.
5801 *
5802 * Merely reports reads from unimplemented registers.
5803 *
5804 * @returns VBox status code.
5805 *
5806 * @param pThis The device state structure.
5807 * @param offset Register offset in memory-mapped frame.
5808 * @param index Register index in register array.
5809 * @thread EMT
5810 */
5811static int e1kRegReadUnimplemented(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5812{
5813 RT_NOREF(pDevIns, pThis, offset, index);
5814 E1kLog(("%s At %08X read (00000000) attempt from unimplemented register %s (%s)\n",
5815 pThis->szPrf, offset, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5816 *pu32Value = 0;
5817
5818 return VINF_SUCCESS;
5819}
5820
5821/**
5822 * Default register read handler with automatic clear operation.
5823 *
5824 * Retrieves the value of register from register array in device state structure.
5825 * Then resets all bits.
5826 *
5827 * @remarks The 'mask' parameter is simply ignored as masking and shifting is
5828 * done in the caller.
5829 *
5830 * @returns VBox status code.
5831 *
5832 * @param pThis The device state structure.
5833 * @param offset Register offset in memory-mapped frame.
5834 * @param index Register index in register array.
5835 * @thread EMT
5836 */
5837static int e1kRegReadAutoClear(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5838{
5839 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
5840 int rc = e1kRegReadDefault(pDevIns, pThis, offset, index, pu32Value);
5841 pThis->auRegs[index] = 0;
5842
5843 return rc;
5844}
5845
5846/**
5847 * Default register read handler.
5848 *
5849 * Retrieves the value of register from register array in device state structure.
5850 * Bits corresponding to 0s in 'readable' mask will always read as 0s.
5851 *
5852 * @remarks The 'mask' parameter is simply ignored as masking and shifting is
5853 * done in the caller.
5854 *
5855 * @returns VBox status code.
5856 *
5857 * @param pThis The device state structure.
5858 * @param offset Register offset in memory-mapped frame.
5859 * @param index Register index in register array.
5860 * @thread EMT
5861 */
5862static int e1kRegReadDefault(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5863{
5864 RT_NOREF_PV(pDevIns); RT_NOREF_PV(offset);
5865
5866 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
5867 *pu32Value = pThis->auRegs[index] & g_aE1kRegMap[index].readable;
5868
5869 return VINF_SUCCESS;
5870}
5871
5872/**
5873 * Write handler for unimplemented registers.
5874 *
5875 * Merely reports writes to unimplemented registers.
5876 *
5877 * @param pThis The device state structure.
5878 * @param offset Register offset in memory-mapped frame.
5879 * @param index Register index in register array.
5880 * @param value The value to store.
5881 * @thread EMT
5882 */
5883
5884 static int e1kRegWriteUnimplemented(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5885{
5886 RT_NOREF_PV(pDevIns); RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index); RT_NOREF_PV(value);
5887
5888 E1kLog(("%s At %08X write attempt (%08X) to unimplemented register %s (%s)\n",
5889 pThis->szPrf, offset, value, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5890
5891 return VINF_SUCCESS;
5892}
5893
5894/**
5895 * Default register write handler.
5896 *
5897 * Stores the value to the register array in device state structure. Only bits
5898 * corresponding to 1s both in 'writable' and 'mask' will be stored.
5899 *
5900 * @returns VBox status code.
5901 *
5902 * @param pThis The device state structure.
5903 * @param offset Register offset in memory-mapped frame.
5904 * @param index Register index in register array.
5905 * @param value The value to store.
5906 * @param mask Used to implement partial writes (8 and 16-bit).
5907 * @thread EMT
5908 */
5909
5910static int e1kRegWriteDefault(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5911{
5912 RT_NOREF(pDevIns, offset);
5913
5914 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
5915 pThis->auRegs[index] = (value & g_aE1kRegMap[index].writable)
5916 | (pThis->auRegs[index] & ~g_aE1kRegMap[index].writable);
5917
5918 return VINF_SUCCESS;
5919}
5920
5921/**
5922 * Search register table for matching register.
5923 *
5924 * @returns Index in the register table or -1 if not found.
5925 *
5926 * @param offReg Register offset in memory-mapped region.
5927 * @thread EMT
5928 */
5929static int e1kRegLookup(uint32_t offReg)
5930{
5931
5932#if 0
5933 int index;
5934
5935 for (index = 0; index < E1K_NUM_OF_REGS; index++)
5936 {
5937 if (g_aE1kRegMap[index].offset <= offReg && offReg < g_aE1kRegMap[index].offset + g_aE1kRegMap[index].size)
5938 {
5939 return index;
5940 }
5941 }
5942#else
5943 int iStart = 0;
5944 int iEnd = E1K_NUM_OF_BINARY_SEARCHABLE;
5945 for (;;)
5946 {
5947 int i = (iEnd - iStart) / 2 + iStart;
5948 uint32_t offCur = g_aE1kRegMap[i].offset;
5949 if (offReg < offCur)
5950 {
5951 if (i == iStart)
5952 break;
5953 iEnd = i;
5954 }
5955 else if (offReg >= offCur + g_aE1kRegMap[i].size)
5956 {
5957 i++;
5958 if (i == iEnd)
5959 break;
5960 iStart = i;
5961 }
5962 else
5963 return i;
5964 Assert(iEnd > iStart);
5965 }
5966
5967 for (unsigned i = E1K_NUM_OF_BINARY_SEARCHABLE; i < RT_ELEMENTS(g_aE1kRegMap); i++)
5968 if (offReg - g_aE1kRegMap[i].offset < g_aE1kRegMap[i].size)
5969 return i;
5970
5971# ifdef VBOX_STRICT
5972 for (unsigned i = 0; i < RT_ELEMENTS(g_aE1kRegMap); i++)
5973 Assert(offReg - g_aE1kRegMap[i].offset >= g_aE1kRegMap[i].size);
5974# endif
5975
5976#endif
5977
5978 return -1;
5979}
5980
5981/**
5982 * Handle unaligned register read operation.
5983 *
5984 * Looks up and calls appropriate handler.
5985 *
5986 * @returns VBox status code.
5987 *
5988 * @param pDevIns The device instance.
5989 * @param pThis The device state structure.
5990 * @param offReg Register offset in memory-mapped frame.
5991 * @param pv Where to store the result.
5992 * @param cb Number of bytes to read.
5993 * @thread EMT
5994 * @remarks IOM takes care of unaligned and small reads via MMIO. For I/O port
5995 * accesses we have to take care of that ourselves.
5996 */
5997static int e1kRegReadUnaligned(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offReg, void *pv, uint32_t cb)
5998{
5999 uint32_t u32 = 0;
6000 uint32_t shift;
6001 int rc = VINF_SUCCESS;
6002 int index = e1kRegLookup(offReg);
6003#ifdef LOG_ENABLED
6004 char buf[9];
6005#endif
6006
6007 /*
6008 * From the spec:
6009 * For registers that should be accessed as 32-bit double words, partial writes (less than a 32-bit
6010 * double word) is ignored. Partial reads return all 32 bits of data regardless of the byte enables.
6011 */
6012
6013 /*
6014 * To be able to read bytes and short word we convert them to properly
6015 * shifted 32-bit words and masks. The idea is to keep register-specific
6016 * handlers simple. Most accesses will be 32-bit anyway.
6017 */
6018 uint32_t mask;
6019 switch (cb)
6020 {
6021 case 4: mask = 0xFFFFFFFF; break;
6022 case 2: mask = 0x0000FFFF; break;
6023 case 1: mask = 0x000000FF; break;
6024 default:
6025 return PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "unsupported op size: offset=%#10x cb=%#10x\n", offReg, cb);
6026 }
6027 if (index != -1)
6028 {
6029 RT_UNTRUSTED_VALIDATED_FENCE(); /* paranoia because of port I/O. */
6030 if (g_aE1kRegMap[index].readable)
6031 {
6032 /* Make the mask correspond to the bits we are about to read. */
6033 shift = (offReg - g_aE1kRegMap[index].offset) % sizeof(uint32_t) * 8;
6034 mask <<= shift;
6035 if (!mask)
6036 return PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "Zero mask: offset=%#10x cb=%#10x\n", offReg, cb);
6037 /*
6038 * Read it. Pass the mask so the handler knows what has to be read.
6039 * Mask out irrelevant bits.
6040 */
6041 //rc = e1kCsEnter(pThis, VERR_SEM_BUSY, RT_SRC_POS);
6042 if (RT_UNLIKELY(rc != VINF_SUCCESS))
6043 return rc;
6044 //pThis->fDelayInts = false;
6045 //pThis->iStatIntLost += pThis->iStatIntLostOne;
6046 //pThis->iStatIntLostOne = 0;
6047 rc = g_aE1kRegMap[index].pfnRead(pDevIns, pThis, offReg & 0xFFFFFFFC, index, &u32);
6048 u32 &= mask;
6049 //e1kCsLeave(pThis);
6050 E1kLog2(("%s At %08X read %s from %s (%s)\n",
6051 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf), g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
6052 Log6(("%s At %08X read %s from %s (%s) [UNALIGNED]\n",
6053 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf), g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
6054 /* Shift back the result. */
6055 u32 >>= shift;
6056 }
6057 else
6058 E1kLog(("%s At %08X read (%s) attempt from write-only register %s (%s)\n",
6059 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf), g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
6060 if (IOM_SUCCESS(rc))
6061 STAM_COUNTER_INC(&pThis->aStatRegReads[index]);
6062 }
6063 else
6064 E1kLog(("%s At %08X read (%s) attempt from non-existing register\n",
6065 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf)));
6066
6067 memcpy(pv, &u32, cb);
6068 return rc;
6069}
6070
6071/**
6072 * Handle 4 byte aligned and sized read operation.
6073 *
6074 * Looks up and calls appropriate handler.
6075 *
6076 * @returns VBox status code.
6077 *
6078 * @param pDevIns The device instance.
6079 * @param pThis The device state structure.
6080 * @param offReg Register offset in memory-mapped frame.
6081 * @param pu32 Where to store the result.
6082 * @thread EMT
6083 */
6084static VBOXSTRICTRC e1kRegReadAlignedU32(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offReg, uint32_t *pu32)
6085{
6086 Assert(!(offReg & 3));
6087
6088 /*
6089 * Lookup the register and check that it's readable.
6090 */
6091 VBOXSTRICTRC rc = VINF_SUCCESS;
6092 int idxReg = e1kRegLookup(offReg);
6093 if (RT_LIKELY(idxReg != -1))
6094 {
6095 RT_UNTRUSTED_VALIDATED_FENCE(); /* paranoia because of port I/O. */
6096 if (RT_UNLIKELY(g_aE1kRegMap[idxReg].readable))
6097 {
6098 /*
6099 * Read it. Pass the mask so the handler knows what has to be read.
6100 * Mask out irrelevant bits.
6101 */
6102 //rc = e1kCsEnter(pThis, VERR_SEM_BUSY, RT_SRC_POS);
6103 //if (RT_UNLIKELY(rc != VINF_SUCCESS))
6104 // return rc;
6105 //pThis->fDelayInts = false;
6106 //pThis->iStatIntLost += pThis->iStatIntLostOne;
6107 //pThis->iStatIntLostOne = 0;
6108 rc = g_aE1kRegMap[idxReg].pfnRead(pDevIns, pThis, offReg & 0xFFFFFFFC, idxReg, pu32);
6109 //e1kCsLeave(pThis);
6110 Log6(("%s At %08X read %08X from %s (%s)\n",
6111 pThis->szPrf, offReg, *pu32, g_aE1kRegMap[idxReg].abbrev, g_aE1kRegMap[idxReg].name));
6112 if (IOM_SUCCESS(rc))
6113 STAM_COUNTER_INC(&pThis->aStatRegReads[idxReg]);
6114 }
6115 else
6116 E1kLog(("%s At %08X read attempt from non-readable register %s (%s)\n",
6117 pThis->szPrf, offReg, g_aE1kRegMap[idxReg].abbrev, g_aE1kRegMap[idxReg].name));
6118 }
6119 else
6120 E1kLog(("%s At %08X read attempt from non-existing register\n", pThis->szPrf, offReg));
6121 return rc;
6122}
6123
6124/**
6125 * Handle 4 byte sized and aligned register write operation.
6126 *
6127 * Looks up and calls appropriate handler.
6128 *
6129 * @returns VBox status code.
6130 *
6131 * @param pDevIns The device instance.
6132 * @param pThis The device state structure.
6133 * @param offReg Register offset in memory-mapped frame.
6134 * @param u32Value The value to write.
6135 * @thread EMT
6136 */
6137static VBOXSTRICTRC e1kRegWriteAlignedU32(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offReg, uint32_t u32Value)
6138{
6139 VBOXSTRICTRC rc = VINF_SUCCESS;
6140 int index = e1kRegLookup(offReg);
6141 if (RT_LIKELY(index != -1))
6142 {
6143 RT_UNTRUSTED_VALIDATED_FENCE(); /* paranoia because of port I/O. */
6144 if (RT_LIKELY(g_aE1kRegMap[index].writable))
6145 {
6146 /*
6147 * Write it. Pass the mask so the handler knows what has to be written.
6148 * Mask out irrelevant bits.
6149 */
6150 Log6(("%s At %08X write %08X to %s (%s)\n",
6151 pThis->szPrf, offReg, u32Value, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
6152 //rc = e1kCsEnter(pThis, VERR_SEM_BUSY, RT_SRC_POS);
6153 //if (RT_UNLIKELY(rc != VINF_SUCCESS))
6154 // return rc;
6155 //pThis->fDelayInts = false;
6156 //pThis->iStatIntLost += pThis->iStatIntLostOne;
6157 //pThis->iStatIntLostOne = 0;
6158 rc = g_aE1kRegMap[index].pfnWrite(pDevIns, pThis, offReg, index, u32Value);
6159 //e1kCsLeave(pThis);
6160 }
6161 else
6162 E1kLog(("%s At %08X write attempt (%08X) to read-only register %s (%s)\n",
6163 pThis->szPrf, offReg, u32Value, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
6164 if (IOM_SUCCESS(rc))
6165 STAM_COUNTER_INC(&pThis->aStatRegWrites[index]);
6166 }
6167 else
6168 E1kLog(("%s At %08X write attempt (%08X) to non-existing register\n",
6169 pThis->szPrf, offReg, u32Value));
6170 return rc;
6171}
6172
6173
6174/* -=-=-=-=- MMIO and I/O Port Callbacks -=-=-=-=- */
6175
6176/**
6177 * @callback_method_impl{FNIOMMMIONEWREAD}
6178 */
6179static DECLCALLBACK(VBOXSTRICTRC) e1kMMIORead(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void *pv, uint32_t cb)
6180{
6181 RT_NOREF2(pvUser, cb);
6182 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
6183 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatMMIORead), a);
6184
6185 Assert(off < E1K_MM_SIZE);
6186 Assert(cb == 4);
6187 Assert(!(off & 3));
6188
6189 VBOXSTRICTRC rcStrict = e1kRegReadAlignedU32(pDevIns, pThis, (uint32_t)off, (uint32_t *)pv);
6190
6191 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatMMIORead), a);
6192 return rcStrict;
6193}
6194
6195/**
6196 * @callback_method_impl{FNIOMMMIONEWWRITE}
6197 */
6198static DECLCALLBACK(VBOXSTRICTRC) e1kMMIOWrite(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void const *pv, uint32_t cb)
6199{
6200 RT_NOREF2(pvUser, cb);
6201 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
6202 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatMMIOWrite), a);
6203
6204 Assert(off < E1K_MM_SIZE);
6205 Assert(cb == 4);
6206 Assert(!(off & 3));
6207
6208 VBOXSTRICTRC rcStrict = e1kRegWriteAlignedU32(pDevIns, pThis, (uint32_t)off, *(uint32_t const *)pv);
6209
6210 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatMMIOWrite), a);
6211 return rcStrict;
6212}
6213
6214/**
6215 * @callback_method_impl{FNIOMIOPORTNEWIN}
6216 */
6217static DECLCALLBACK(VBOXSTRICTRC) e1kIOPortIn(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT offPort, uint32_t *pu32, unsigned cb)
6218{
6219 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
6220 VBOXSTRICTRC rc;
6221 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatIORead), a);
6222 RT_NOREF_PV(pvUser);
6223
6224 if (RT_LIKELY(cb == 4))
6225 switch (offPort)
6226 {
6227 case 0x00: /* IOADDR */
6228 *pu32 = pThis->uSelectedReg;
6229 E1kLog2(("%s e1kIOPortIn: IOADDR(0), selecting register %#010x, val=%#010x\n", pThis->szPrf, pThis->uSelectedReg, *pu32));
6230 rc = VINF_SUCCESS;
6231 break;
6232
6233 case 0x04: /* IODATA */
6234 if (!(pThis->uSelectedReg & 3))
6235 rc = e1kRegReadAlignedU32(pDevIns, pThis, pThis->uSelectedReg, pu32);
6236 else /** @todo r=bird: I wouldn't be surprised if this unaligned branch wasn't necessary. */
6237 rc = e1kRegReadUnaligned(pDevIns, pThis, pThis->uSelectedReg, pu32, cb);
6238 if (rc == VINF_IOM_R3_MMIO_READ)
6239 rc = VINF_IOM_R3_IOPORT_READ;
6240 E1kLog2(("%s e1kIOPortIn: IODATA(4), reading from selected register %#010x, val=%#010x\n", pThis->szPrf, pThis->uSelectedReg, *pu32));
6241 break;
6242
6243 default:
6244 E1kLog(("%s e1kIOPortIn: invalid port %#010x\n", pThis->szPrf, offPort));
6245 /** @todo r=bird: Check what real hardware returns here. */
6246 //rc = VERR_IOM_IOPORT_UNUSED; /* Why not? */
6247 rc = VINF_IOM_MMIO_UNUSED_00; /* used to return VINF_SUCCESS and not touch *pu32, which amounted to this. */
6248 break;
6249 }
6250 else
6251 {
6252 E1kLog(("%s e1kIOPortIn: invalid op size: offPort=%RTiop cb=%08x", pThis->szPrf, offPort, cb));
6253 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "%s e1kIOPortIn: invalid op size: offPort=%RTiop cb=%08x\n", pThis->szPrf, offPort, cb);
6254 *pu32 = 0; /** @todo r=bird: Check what real hardware returns here. (Didn't used to set a value here, picked zero as that's what we'd end up in most cases.) */
6255 }
6256 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatIORead), a);
6257 return rc;
6258}
6259
6260
6261/**
6262 * @callback_method_impl{FNIOMIOPORTNEWOUT}
6263 */
6264static DECLCALLBACK(VBOXSTRICTRC) e1kIOPortOut(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT offPort, uint32_t u32, unsigned cb)
6265{
6266 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
6267 VBOXSTRICTRC rc;
6268 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatIOWrite), a);
6269 RT_NOREF_PV(pvUser);
6270
6271 E1kLog2(("%s e1kIOPortOut: offPort=%RTiop value=%08x\n", pThis->szPrf, offPort, u32));
6272 if (RT_LIKELY(cb == 4))
6273 {
6274 switch (offPort)
6275 {
6276 case 0x00: /* IOADDR */
6277 pThis->uSelectedReg = u32;
6278 E1kLog2(("%s e1kIOPortOut: IOADDR(0), selected register %08x\n", pThis->szPrf, pThis->uSelectedReg));
6279 rc = VINF_SUCCESS;
6280 break;
6281
6282 case 0x04: /* IODATA */
6283 E1kLog2(("%s e1kIOPortOut: IODATA(4), writing to selected register %#010x, value=%#010x\n", pThis->szPrf, pThis->uSelectedReg, u32));
6284 if (RT_LIKELY(!(pThis->uSelectedReg & 3)))
6285 {
6286 rc = e1kRegWriteAlignedU32(pDevIns, pThis, pThis->uSelectedReg, u32);
6287 if (rc == VINF_IOM_R3_MMIO_WRITE)
6288 rc = VINF_IOM_R3_IOPORT_WRITE;
6289 }
6290 else
6291 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS,
6292 "Spec violation: misaligned offset: %#10x, ignored.\n", pThis->uSelectedReg);
6293 break;
6294
6295 default:
6296 E1kLog(("%s e1kIOPortOut: invalid port %#010x\n", pThis->szPrf, offPort));
6297 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "invalid port %#010x\n", offPort);
6298 }
6299 }
6300 else
6301 {
6302 E1kLog(("%s e1kIOPortOut: invalid op size: offPort=%RTiop cb=%08x\n", pThis->szPrf, offPort, cb));
6303 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "%s: invalid op size: offPort=%RTiop cb=%#x\n", pThis->szPrf, offPort, cb);
6304 }
6305
6306 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatIOWrite), a);
6307 return rc;
6308}
6309
6310#ifdef IN_RING3
6311
6312/**
6313 * Dump complete device state to log.
6314 *
6315 * @param pThis Pointer to device state.
6316 */
6317static void e1kDumpState(PE1KSTATE pThis)
6318{
6319 RT_NOREF(pThis);
6320 for (int i = 0; i < E1K_NUM_OF_32BIT_REGS; ++i)
6321 E1kLog2(("%s: %8.8s = %08x\n", pThis->szPrf, g_aE1kRegMap[i].abbrev, pThis->auRegs[i]));
6322# ifdef E1K_INT_STATS
6323 LogRel(("%s: Interrupt attempts: %d\n", pThis->szPrf, pThis->uStatIntTry));
6324 LogRel(("%s: Interrupts raised : %d\n", pThis->szPrf, pThis->uStatInt));
6325 LogRel(("%s: Interrupts lowered: %d\n", pThis->szPrf, pThis->uStatIntLower));
6326 LogRel(("%s: ICR outside ISR : %d\n", pThis->szPrf, pThis->uStatNoIntICR));
6327 LogRel(("%s: IMS raised ints : %d\n", pThis->szPrf, pThis->uStatIntIMS));
6328 LogRel(("%s: Interrupts skipped: %d\n", pThis->szPrf, pThis->uStatIntSkip));
6329 LogRel(("%s: Masked interrupts : %d\n", pThis->szPrf, pThis->uStatIntMasked));
6330 LogRel(("%s: Early interrupts : %d\n", pThis->szPrf, pThis->uStatIntEarly));
6331 LogRel(("%s: Late interrupts : %d\n", pThis->szPrf, pThis->uStatIntLate));
6332 LogRel(("%s: Lost interrupts : %d\n", pThis->szPrf, pThis->iStatIntLost));
6333 LogRel(("%s: Interrupts by RX : %d\n", pThis->szPrf, pThis->uStatIntRx));
6334 LogRel(("%s: Interrupts by TX : %d\n", pThis->szPrf, pThis->uStatIntTx));
6335 LogRel(("%s: Interrupts by ICS : %d\n", pThis->szPrf, pThis->uStatIntICS));
6336 LogRel(("%s: Interrupts by RDTR: %d\n", pThis->szPrf, pThis->uStatIntRDTR));
6337 LogRel(("%s: Interrupts by RDMT: %d\n", pThis->szPrf, pThis->uStatIntRXDMT0));
6338 LogRel(("%s: Interrupts by TXQE: %d\n", pThis->szPrf, pThis->uStatIntTXQE));
6339 LogRel(("%s: TX int delay asked: %d\n", pThis->szPrf, pThis->uStatTxIDE));
6340 LogRel(("%s: TX delayed: %d\n", pThis->szPrf, pThis->uStatTxDelayed));
6341 LogRel(("%s: TX delay expired: %d\n", pThis->szPrf, pThis->uStatTxDelayExp));
6342 LogRel(("%s: TX no report asked: %d\n", pThis->szPrf, pThis->uStatTxNoRS));
6343 LogRel(("%s: TX abs timer expd : %d\n", pThis->szPrf, pThis->uStatTAD));
6344 LogRel(("%s: TX int timer expd : %d\n", pThis->szPrf, pThis->uStatTID));
6345 LogRel(("%s: RX abs timer expd : %d\n", pThis->szPrf, pThis->uStatRAD));
6346 LogRel(("%s: RX int timer expd : %d\n", pThis->szPrf, pThis->uStatRID));
6347 LogRel(("%s: TX CTX descriptors: %d\n", pThis->szPrf, pThis->uStatDescCtx));
6348 LogRel(("%s: TX DAT descriptors: %d\n", pThis->szPrf, pThis->uStatDescDat));
6349 LogRel(("%s: TX LEG descriptors: %d\n", pThis->szPrf, pThis->uStatDescLeg));
6350 LogRel(("%s: Received frames : %d\n", pThis->szPrf, pThis->uStatRxFrm));
6351 LogRel(("%s: Transmitted frames: %d\n", pThis->szPrf, pThis->uStatTxFrm));
6352 LogRel(("%s: TX frames up to 1514: %d\n", pThis->szPrf, pThis->uStatTx1514));
6353 LogRel(("%s: TX frames up to 2962: %d\n", pThis->szPrf, pThis->uStatTx2962));
6354 LogRel(("%s: TX frames up to 4410: %d\n", pThis->szPrf, pThis->uStatTx4410));
6355 LogRel(("%s: TX frames up to 5858: %d\n", pThis->szPrf, pThis->uStatTx5858));
6356 LogRel(("%s: TX frames up to 7306: %d\n", pThis->szPrf, pThis->uStatTx7306));
6357 LogRel(("%s: TX frames up to 8754: %d\n", pThis->szPrf, pThis->uStatTx8754));
6358 LogRel(("%s: TX frames up to 16384: %d\n", pThis->szPrf, pThis->uStatTx16384));
6359 LogRel(("%s: TX frames up to 32768: %d\n", pThis->szPrf, pThis->uStatTx32768));
6360 LogRel(("%s: Larger TX frames : %d\n", pThis->szPrf, pThis->uStatTxLarge));
6361 LogRel(("%s: Max TX Delay : %lld\n", pThis->szPrf, pThis->uStatMaxTxDelay));
6362# endif /* E1K_INT_STATS */
6363}
6364
6365/**
6366 * @callback_method_impl{FNPCIIOREGIONMAP}
6367 *
6368 * @todo Can remove this one later, it's realy just here for taking down
6369 * addresses for e1kInfo(), an alignment assertion and sentimentality.
6370 */
6371static DECLCALLBACK(int) e1kR3Map(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, uint32_t iRegion,
6372 RTGCPHYS GCPhysAddress, RTGCPHYS cb, PCIADDRESSSPACE enmType)
6373{
6374 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
6375 E1kLog(("%s e1kR3Map: iRegion=%u GCPhysAddress=%RGp\n", pThis->szPrf, iRegion, GCPhysAddress));
6376 RT_NOREF(pPciDev, iRegion, cb);
6377 Assert(pPciDev == pDevIns->apPciDevs[0]);
6378
6379 switch (enmType)
6380 {
6381 case PCI_ADDRESS_SPACE_IO:
6382 pThis->IOPortBase = (RTIOPORT)GCPhysAddress;
6383 break;
6384
6385 case PCI_ADDRESS_SPACE_MEM:
6386 pThis->addrMMReg = GCPhysAddress;
6387 Assert(!(GCPhysAddress & 7) || GCPhysAddress == NIL_RTGCPHYS);
6388 break;
6389
6390 default:
6391 /* We should never get here */
6392 AssertMsgFailedReturn(("Invalid PCI address space param in map callback"), VERR_INTERNAL_ERROR);
6393 }
6394 return VINF_SUCCESS;
6395}
6396
6397
6398/* -=-=-=-=- PDMINETWORKDOWN -=-=-=-=- */
6399
6400/**
6401 * Check if the device can receive data now.
6402 * This must be called before the pfnRecieve() method is called.
6403 *
6404 * @returns Number of bytes the device can receive.
6405 * @param pDevIns The device instance.
6406 * @param pThis The instance data.
6407 * @thread EMT
6408 */
6409static int e1kCanReceive(PPDMDEVINS pDevIns, PE1KSTATE pThis)
6410{
6411#ifndef E1K_WITH_RXD_CACHE
6412 size_t cb;
6413
6414 if (RT_UNLIKELY(e1kCsRxEnter(pThis, VERR_SEM_BUSY) != VINF_SUCCESS))
6415 return VERR_NET_NO_BUFFER_SPACE;
6416
6417 if (RT_UNLIKELY(RDLEN == sizeof(E1KRXDESC)))
6418 {
6419 E1KRXDESC desc;
6420 PDMDevHlpPhysRead(pDevIns, e1kDescAddr(RDBAH, RDBAL, RDH), &desc, sizeof(desc));
6421 if (desc.status.fDD)
6422 cb = 0;
6423 else
6424 cb = pThis->u16RxBSize;
6425 }
6426 else if (RDH < RDT)
6427 cb = (RDT - RDH) * pThis->u16RxBSize;
6428 else if (RDH > RDT)
6429 cb = (RDLEN/sizeof(E1KRXDESC) - RDH + RDT) * pThis->u16RxBSize;
6430 else
6431 {
6432 cb = 0;
6433 E1kLogRel(("E1000: OUT of RX descriptors!\n"));
6434 }
6435 E1kLog2(("%s e1kCanReceive: at exit RDH=%d RDT=%d RDLEN=%d u16RxBSize=%d cb=%lu\n",
6436 pThis->szPrf, RDH, RDT, RDLEN, pThis->u16RxBSize, cb));
6437
6438 e1kCsRxLeave(pThis);
6439 return cb > 0 ? VINF_SUCCESS : VERR_NET_NO_BUFFER_SPACE;
6440#else /* E1K_WITH_RXD_CACHE */
6441 int rc = VINF_SUCCESS;
6442
6443 if (RT_UNLIKELY(e1kCsRxEnter(pThis, VERR_SEM_BUSY) != VINF_SUCCESS))
6444 return VERR_NET_NO_BUFFER_SPACE;
6445
6446 if (RT_UNLIKELY(RDLEN == sizeof(E1KRXDESC)))
6447 {
6448 E1KRXDESC desc;
6449 PDMDevHlpPhysRead(pDevIns, e1kDescAddr(RDBAH, RDBAL, RDH), &desc, sizeof(desc));
6450 if (desc.status.fDD)
6451 rc = VERR_NET_NO_BUFFER_SPACE;
6452 }
6453 else if (e1kRxDIsCacheEmpty(pThis) && RDH == RDT)
6454 {
6455 /* Cache is empty, so is the RX ring. */
6456 rc = VERR_NET_NO_BUFFER_SPACE;
6457 }
6458 E1kLog2(("%s e1kCanReceive: at exit in_cache=%d RDH=%d RDT=%d RDLEN=%d"
6459 " u16RxBSize=%d rc=%Rrc\n", pThis->szPrf,
6460 e1kRxDInCache(pThis), RDH, RDT, RDLEN, pThis->u16RxBSize, rc));
6461
6462 e1kCsRxLeave(pThis);
6463 return rc;
6464#endif /* E1K_WITH_RXD_CACHE */
6465}
6466
6467/**
6468 * @interface_method_impl{PDMINETWORKDOWN,pfnWaitReceiveAvail}
6469 */
6470static DECLCALLBACK(int) e1kR3NetworkDown_WaitReceiveAvail(PPDMINETWORKDOWN pInterface, RTMSINTERVAL cMillies)
6471{
6472 PE1KSTATECC pThisCC = RT_FROM_MEMBER(pInterface, E1KSTATECC, INetworkDown);
6473 PE1KSTATE pThis = pThisCC->pShared;
6474 PPDMDEVINS pDevIns = pThisCC->pDevInsR3;
6475
6476 int rc = e1kCanReceive(pDevIns, pThis);
6477
6478 if (RT_SUCCESS(rc))
6479 return VINF_SUCCESS;
6480 if (RT_UNLIKELY(cMillies == 0))
6481 return VERR_NET_NO_BUFFER_SPACE;
6482
6483 rc = VERR_INTERRUPTED;
6484 ASMAtomicXchgBool(&pThis->fMaybeOutOfSpace, true);
6485 STAM_PROFILE_START(&pThis->StatRxOverflow, a);
6486 VMSTATE enmVMState;
6487 while (RT_LIKELY( (enmVMState = PDMDevHlpVMState(pDevIns)) == VMSTATE_RUNNING
6488 || enmVMState == VMSTATE_RUNNING_LS))
6489 {
6490 int rc2 = e1kCanReceive(pDevIns, pThis);
6491 if (RT_SUCCESS(rc2))
6492 {
6493 rc = VINF_SUCCESS;
6494 break;
6495 }
6496 E1kLogRel(("E1000: e1kR3NetworkDown_WaitReceiveAvail: waiting cMillies=%u...\n", cMillies));
6497 E1kLog(("%s: e1kR3NetworkDown_WaitReceiveAvail: waiting cMillies=%u...\n", pThis->szPrf, cMillies));
6498 RTSemEventWait(pThis->hEventMoreRxDescAvail, cMillies);
6499 }
6500 STAM_PROFILE_STOP(&pThis->StatRxOverflow, a);
6501 ASMAtomicXchgBool(&pThis->fMaybeOutOfSpace, false);
6502
6503 return rc;
6504}
6505
6506
6507/**
6508 * Matches the packet addresses against Receive Address table. Looks for
6509 * exact matches only.
6510 *
6511 * @returns true if address matches.
6512 * @param pThis Pointer to the state structure.
6513 * @param pvBuf The ethernet packet.
6514 * @param cb Number of bytes available in the packet.
6515 * @thread EMT
6516 */
6517static bool e1kPerfectMatch(PE1KSTATE pThis, const void *pvBuf)
6518{
6519 for (unsigned i = 0; i < RT_ELEMENTS(pThis->aRecAddr.array); i++)
6520 {
6521 E1KRAELEM* ra = pThis->aRecAddr.array + i;
6522
6523 /* Valid address? */
6524 if (ra->ctl & RA_CTL_AV)
6525 {
6526 Assert((ra->ctl & RA_CTL_AS) < 2);
6527 //unsigned char *pAddr = (unsigned char*)pvBuf + sizeof(ra->addr)*(ra->ctl & RA_CTL_AS);
6528 //E1kLog3(("%s Matching %02x:%02x:%02x:%02x:%02x:%02x against %02x:%02x:%02x:%02x:%02x:%02x...\n",
6529 // pThis->szPrf, pAddr[0], pAddr[1], pAddr[2], pAddr[3], pAddr[4], pAddr[5],
6530 // ra->addr[0], ra->addr[1], ra->addr[2], ra->addr[3], ra->addr[4], ra->addr[5]));
6531 /*
6532 * Address Select:
6533 * 00b = Destination address
6534 * 01b = Source address
6535 * 10b = Reserved
6536 * 11b = Reserved
6537 * Since ethernet header is (DA, SA, len) we can use address
6538 * select as index.
6539 */
6540 if (memcmp((char*)pvBuf + sizeof(ra->addr)*(ra->ctl & RA_CTL_AS),
6541 ra->addr, sizeof(ra->addr)) == 0)
6542 return true;
6543 }
6544 }
6545
6546 return false;
6547}
6548
6549/**
6550 * Matches the packet addresses against Multicast Table Array.
6551 *
6552 * @remarks This is imperfect match since it matches not exact address but
6553 * a subset of addresses.
6554 *
6555 * @returns true if address matches.
6556 * @param pThis Pointer to the state structure.
6557 * @param pvBuf The ethernet packet.
6558 * @param cb Number of bytes available in the packet.
6559 * @thread EMT
6560 */
6561static bool e1kImperfectMatch(PE1KSTATE pThis, const void *pvBuf)
6562{
6563 /* Get bits 32..47 of destination address */
6564 uint16_t u16Bit = ((uint16_t*)pvBuf)[2];
6565
6566 unsigned offset = GET_BITS(RCTL, MO);
6567 /*
6568 * offset means:
6569 * 00b = bits 36..47
6570 * 01b = bits 35..46
6571 * 10b = bits 34..45
6572 * 11b = bits 32..43
6573 */
6574 if (offset < 3)
6575 u16Bit = u16Bit >> (4 - offset);
6576 return ASMBitTest(pThis->auMTA, u16Bit & 0xFFF);
6577}
6578
6579/**
6580 * Determines if the packet is to be delivered to upper layer.
6581 *
6582 * The following filters supported:
6583 * - Exact Unicast/Multicast
6584 * - Promiscuous Unicast/Multicast
6585 * - Multicast
6586 * - VLAN
6587 *
6588 * @returns true if packet is intended for this node.
6589 * @param pThis Pointer to the state structure.
6590 * @param pvBuf The ethernet packet.
6591 * @param cb Number of bytes available in the packet.
6592 * @param pStatus Bit field to store status bits.
6593 * @thread EMT
6594 */
6595static bool e1kAddressFilter(PE1KSTATE pThis, const void *pvBuf, size_t cb, E1KRXDST *pStatus)
6596{
6597 Assert(cb > 14);
6598 /* Assume that we fail to pass exact filter. */
6599 pStatus->fPIF = false;
6600 pStatus->fVP = false;
6601 /* Discard oversized packets */
6602 if (cb > E1K_MAX_RX_PKT_SIZE)
6603 {
6604 E1kLog(("%s ERROR: Incoming packet is too big, cb=%d > max=%d\n",
6605 pThis->szPrf, cb, E1K_MAX_RX_PKT_SIZE));
6606 E1K_INC_CNT32(ROC);
6607 return false;
6608 }
6609 else if (!(RCTL & RCTL_LPE) && cb > 1522)
6610 {
6611 /* When long packet reception is disabled packets over 1522 are discarded */
6612 E1kLog(("%s Discarding incoming packet (LPE=0), cb=%d\n",
6613 pThis->szPrf, cb));
6614 E1K_INC_CNT32(ROC);
6615 return false;
6616 }
6617
6618 uint16_t *u16Ptr = (uint16_t*)pvBuf;
6619 /* Compare TPID with VLAN Ether Type */
6620 if (RT_BE2H_U16(u16Ptr[6]) == VET)
6621 {
6622 pStatus->fVP = true;
6623 /* Is VLAN filtering enabled? */
6624 if (RCTL & RCTL_VFE)
6625 {
6626 /* It is 802.1q packet indeed, let's filter by VID */
6627 if (RCTL & RCTL_CFIEN)
6628 {
6629 E1kLog3(("%s VLAN filter: VLAN=%d CFI=%d RCTL_CFI=%d\n", pThis->szPrf,
6630 E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7])),
6631 E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])),
6632 !!(RCTL & RCTL_CFI)));
6633 if (E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])) != !!(RCTL & RCTL_CFI))
6634 {
6635 E1kLog2(("%s Packet filter: CFIs do not match in packet and RCTL (%d!=%d)\n",
6636 pThis->szPrf, E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])), !!(RCTL & RCTL_CFI)));
6637 return false;
6638 }
6639 }
6640 else
6641 E1kLog3(("%s VLAN filter: VLAN=%d\n", pThis->szPrf,
6642 E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))));
6643 if (!ASMBitTest(pThis->auVFTA, E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))))
6644 {
6645 E1kLog2(("%s Packet filter: no VLAN match (id=%d)\n",
6646 pThis->szPrf, E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))));
6647 return false;
6648 }
6649 }
6650 }
6651 /* Broadcast filtering */
6652 if (e1kIsBroadcast(pvBuf) && (RCTL & RCTL_BAM))
6653 return true;
6654 E1kLog2(("%s Packet filter: not a broadcast\n", pThis->szPrf));
6655 if (e1kIsMulticast(pvBuf))
6656 {
6657 /* Is multicast promiscuous enabled? */
6658 if (RCTL & RCTL_MPE)
6659 return true;
6660 E1kLog2(("%s Packet filter: no promiscuous multicast\n", pThis->szPrf));
6661 /* Try perfect matches first */
6662 if (e1kPerfectMatch(pThis, pvBuf))
6663 {
6664 pStatus->fPIF = true;
6665 return true;
6666 }
6667 E1kLog2(("%s Packet filter: no perfect match\n", pThis->szPrf));
6668 if (e1kImperfectMatch(pThis, pvBuf))
6669 return true;
6670 E1kLog2(("%s Packet filter: no imperfect match\n", pThis->szPrf));
6671 }
6672 else {
6673 /* Is unicast promiscuous enabled? */
6674 if (RCTL & RCTL_UPE)
6675 return true;
6676 E1kLog2(("%s Packet filter: no promiscuous unicast\n", pThis->szPrf));
6677 if (e1kPerfectMatch(pThis, pvBuf))
6678 {
6679 pStatus->fPIF = true;
6680 return true;
6681 }
6682 E1kLog2(("%s Packet filter: no perfect match\n", pThis->szPrf));
6683 }
6684 E1kLog2(("%s Packet filter: packet discarded\n", pThis->szPrf));
6685 return false;
6686}
6687
6688/**
6689 * @interface_method_impl{PDMINETWORKDOWN,pfnReceive}
6690 */
6691static DECLCALLBACK(int) e1kR3NetworkDown_Receive(PPDMINETWORKDOWN pInterface, const void *pvBuf, size_t cb)
6692{
6693 PE1KSTATECC pThisCC = RT_FROM_MEMBER(pInterface, E1KSTATECC, INetworkDown);
6694 PE1KSTATE pThis = pThisCC->pShared;
6695 PPDMDEVINS pDevIns = pThisCC->pDevInsR3;
6696 int rc = VINF_SUCCESS;
6697
6698 /*
6699 * Drop packets if the VM is not running yet/anymore.
6700 */
6701 VMSTATE enmVMState = PDMDevHlpVMState(pDevIns);
6702 if ( enmVMState != VMSTATE_RUNNING
6703 && enmVMState != VMSTATE_RUNNING_LS)
6704 {
6705 E1kLog(("%s Dropping incoming packet as VM is not running.\n", pThis->szPrf));
6706 return VINF_SUCCESS;
6707 }
6708
6709 /* Discard incoming packets in locked state */
6710 if (!(RCTL & RCTL_EN) || pThis->fLocked || !(STATUS & STATUS_LU))
6711 {
6712 E1kLog(("%s Dropping incoming packet as receive operation is disabled.\n", pThis->szPrf));
6713 return VINF_SUCCESS;
6714 }
6715
6716 STAM_PROFILE_ADV_START(&pThis->StatReceive, a);
6717
6718 //if (!e1kCsEnter(pThis, RT_SRC_POS))
6719 // return VERR_PERMISSION_DENIED;
6720
6721 e1kPacketDump(pThis, (const uint8_t*)pvBuf, cb, "<-- Incoming");
6722
6723 /* Update stats */
6724 if (RT_LIKELY(e1kCsEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
6725 {
6726 E1K_INC_CNT32(TPR);
6727 E1K_ADD_CNT64(TORL, TORH, cb < 64? 64 : cb);
6728 e1kCsLeave(pThis);
6729 }
6730 STAM_PROFILE_ADV_START(&pThis->StatReceiveFilter, a);
6731 E1KRXDST status;
6732 RT_ZERO(status);
6733 bool fPassed = e1kAddressFilter(pThis, pvBuf, cb, &status);
6734 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveFilter, a);
6735 if (fPassed)
6736 {
6737 rc = e1kHandleRxPacket(pDevIns, pThis, pvBuf, cb, status);
6738 }
6739 //e1kCsLeave(pThis);
6740 STAM_PROFILE_ADV_STOP(&pThis->StatReceive, a);
6741
6742 return rc;
6743}
6744
6745
6746/* -=-=-=-=- PDMILEDPORTS -=-=-=-=- */
6747
6748/**
6749 * @interface_method_impl{PDMILEDPORTS,pfnQueryStatusLed}
6750 */
6751static DECLCALLBACK(int) e1kR3QueryStatusLed(PPDMILEDPORTS pInterface, unsigned iLUN, PPDMLED *ppLed)
6752{
6753 if (iLUN == 0)
6754 {
6755 PE1KSTATECC pThisCC = RT_FROM_MEMBER(pInterface, E1KSTATECC, ILeds);
6756 *ppLed = &pThisCC->pShared->led;
6757 return VINF_SUCCESS;
6758 }
6759 return VERR_PDM_LUN_NOT_FOUND;
6760}
6761
6762
6763/* -=-=-=-=- PDMINETWORKCONFIG -=-=-=-=- */
6764
6765/**
6766 * @interface_method_impl{PDMINETWORKCONFIG,pfnGetMac}
6767 */
6768static DECLCALLBACK(int) e1kR3GetMac(PPDMINETWORKCONFIG pInterface, PRTMAC pMac)
6769{
6770 PE1KSTATECC pThisCC = RT_FROM_MEMBER(pInterface, E1KSTATECC, INetworkConfig);
6771 pThisCC->eeprom.getMac(pMac);
6772 return VINF_SUCCESS;
6773}
6774
6775/**
6776 * @interface_method_impl{PDMINETWORKCONFIG,pfnGetLinkState}
6777 */
6778static DECLCALLBACK(PDMNETWORKLINKSTATE) e1kR3GetLinkState(PPDMINETWORKCONFIG pInterface)
6779{
6780 PE1KSTATECC pThisCC = RT_FROM_MEMBER(pInterface, E1KSTATECC, INetworkConfig);
6781 PE1KSTATE pThis = pThisCC->pShared;
6782 if (STATUS & STATUS_LU)
6783 return PDMNETWORKLINKSTATE_UP;
6784 return PDMNETWORKLINKSTATE_DOWN;
6785}
6786
6787/**
6788 * @interface_method_impl{PDMINETWORKCONFIG,pfnSetLinkState}
6789 */
6790static DECLCALLBACK(int) e1kR3SetLinkState(PPDMINETWORKCONFIG pInterface, PDMNETWORKLINKSTATE enmState)
6791{
6792 PE1KSTATECC pThisCC = RT_FROM_MEMBER(pInterface, E1KSTATECC, INetworkConfig);
6793 PE1KSTATE pThis = pThisCC->pShared;
6794 PPDMDEVINS pDevIns = pThisCC->pDevInsR3;
6795
6796 E1kLog(("%s e1kR3SetLinkState: enmState=%d\n", pThis->szPrf, enmState));
6797 switch (enmState)
6798 {
6799 case PDMNETWORKLINKSTATE_UP:
6800 pThis->fCableConnected = true;
6801 /* If link was down, bring it up after a while. */
6802 if (!(STATUS & STATUS_LU))
6803 e1kBringLinkUpDelayed(pDevIns, pThis);
6804 break;
6805 case PDMNETWORKLINKSTATE_DOWN:
6806 pThis->fCableConnected = false;
6807 /* Always set the phy link state to down, regardless of the STATUS_LU bit.
6808 * We might have to set the link state before the driver initializes us. */
6809 Phy::setLinkStatus(&pThis->phy, false);
6810 /* If link was up, bring it down. */
6811 if (STATUS & STATUS_LU)
6812 e1kR3LinkDown(pDevIns, pThis, pThisCC);
6813 break;
6814 case PDMNETWORKLINKSTATE_DOWN_RESUME:
6815 /*
6816 * There is not much sense in bringing down the link if it has not come up yet.
6817 * If it is up though, we bring it down temporarely, then bring it up again.
6818 */
6819 if (STATUS & STATUS_LU)
6820 e1kR3LinkDownTemp(pDevIns, pThis, pThisCC);
6821 break;
6822 default:
6823 ;
6824 }
6825 return VINF_SUCCESS;
6826}
6827
6828
6829/* -=-=-=-=- PDMIBASE -=-=-=-=- */
6830
6831/**
6832 * @interface_method_impl{PDMIBASE,pfnQueryInterface}
6833 */
6834static DECLCALLBACK(void *) e1kR3QueryInterface(struct PDMIBASE *pInterface, const char *pszIID)
6835{
6836 PE1KSTATECC pThisCC = RT_FROM_MEMBER(pInterface, E1KSTATECC, IBase);
6837 Assert(&pThisCC->IBase == pInterface);
6838
6839 PDMIBASE_RETURN_INTERFACE(pszIID, PDMIBASE, &pThisCC->IBase);
6840 PDMIBASE_RETURN_INTERFACE(pszIID, PDMINETWORKDOWN, &pThisCC->INetworkDown);
6841 PDMIBASE_RETURN_INTERFACE(pszIID, PDMINETWORKCONFIG, &pThisCC->INetworkConfig);
6842 PDMIBASE_RETURN_INTERFACE(pszIID, PDMILEDPORTS, &pThisCC->ILeds);
6843 return NULL;
6844}
6845
6846
6847/* -=-=-=-=- Saved State -=-=-=-=- */
6848
6849/**
6850 * Saves the configuration.
6851 *
6852 * @param pThis The E1K state.
6853 * @param pSSM The handle to the saved state.
6854 */
6855static void e1kSaveConfig(PE1KSTATE pThis, PSSMHANDLE pSSM)
6856{
6857 SSMR3PutMem(pSSM, &pThis->macConfigured, sizeof(pThis->macConfigured));
6858 SSMR3PutU32(pSSM, pThis->eChip);
6859}
6860
6861/**
6862 * @callback_method_impl{FNSSMDEVLIVEEXEC,Save basic configuration.}
6863 */
6864static DECLCALLBACK(int) e1kLiveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uPass)
6865{
6866 RT_NOREF(uPass);
6867 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
6868 e1kSaveConfig(pThis, pSSM);
6869 return VINF_SSM_DONT_CALL_AGAIN;
6870}
6871
6872/**
6873 * @callback_method_impl{FNSSMDEVSAVEPREP,Synchronize.}
6874 */
6875static DECLCALLBACK(int) e1kSavePrep(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6876{
6877 RT_NOREF(pSSM);
6878 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
6879
6880 int rc = e1kCsEnter(pThis, VERR_SEM_BUSY);
6881 if (RT_UNLIKELY(rc != VINF_SUCCESS))
6882 return rc;
6883 e1kCsLeave(pThis);
6884 return VINF_SUCCESS;
6885#if 0
6886 /* 1) Prevent all threads from modifying the state and memory */
6887 //pThis->fLocked = true;
6888 /* 2) Cancel all timers */
6889#ifdef E1K_TX_DELAY
6890 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTXDTimer));
6891#endif /* E1K_TX_DELAY */
6892//#ifdef E1K_USE_TX_TIMERS
6893 if (pThis->fTidEnabled)
6894 {
6895 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTIDTimer));
6896#ifndef E1K_NO_TAD
6897 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTADTimer));
6898#endif /* E1K_NO_TAD */
6899 }
6900//#endif /* E1K_USE_TX_TIMERS */
6901#ifdef E1K_USE_RX_TIMERS
6902 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRIDTimer));
6903 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRADTimer));
6904#endif /* E1K_USE_RX_TIMERS */
6905 e1kCancelTimer(pThis, pThis->CTX_SUFF(pIntTimer));
6906 /* 3) Did I forget anything? */
6907 E1kLog(("%s Locked\n", pThis->szPrf));
6908 return VINF_SUCCESS;
6909#endif
6910}
6911
6912/**
6913 * @callback_method_impl{FNSSMDEVSAVEEXEC}
6914 */
6915static DECLCALLBACK(int) e1kSaveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6916{
6917 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
6918 PE1KSTATECC pThisCC = PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC);
6919
6920 e1kSaveConfig(pThis, pSSM);
6921 pThisCC->eeprom.save(pSSM);
6922 e1kDumpState(pThis);
6923 SSMR3PutMem(pSSM, pThis->auRegs, sizeof(pThis->auRegs));
6924 SSMR3PutBool(pSSM, pThis->fIntRaised);
6925 Phy::saveState(pSSM, &pThis->phy);
6926 SSMR3PutU32(pSSM, pThis->uSelectedReg);
6927 SSMR3PutMem(pSSM, pThis->auMTA, sizeof(pThis->auMTA));
6928 SSMR3PutMem(pSSM, &pThis->aRecAddr, sizeof(pThis->aRecAddr));
6929 SSMR3PutMem(pSSM, pThis->auVFTA, sizeof(pThis->auVFTA));
6930 SSMR3PutU64(pSSM, pThis->u64AckedAt);
6931 SSMR3PutU16(pSSM, pThis->u16RxBSize);
6932 //SSMR3PutBool(pSSM, pThis->fDelayInts);
6933 //SSMR3PutBool(pSSM, pThis->fIntMaskUsed);
6934 SSMR3PutU16(pSSM, pThis->u16TxPktLen);
6935/** @todo State wrt to the TSE buffer is incomplete, so little point in
6936 * saving this actually. */
6937 SSMR3PutMem(pSSM, pThis->aTxPacketFallback, pThis->u16TxPktLen);
6938 SSMR3PutBool(pSSM, pThis->fIPcsum);
6939 SSMR3PutBool(pSSM, pThis->fTCPcsum);
6940 SSMR3PutMem(pSSM, &pThis->contextTSE, sizeof(pThis->contextTSE));
6941 SSMR3PutMem(pSSM, &pThis->contextNormal, sizeof(pThis->contextNormal));
6942 SSMR3PutBool(pSSM, pThis->fVTag);
6943 SSMR3PutU16(pSSM, pThis->u16VTagTCI);
6944#ifdef E1K_WITH_TXD_CACHE
6945#if 0
6946 SSMR3PutU8(pSSM, pThis->nTxDFetched);
6947 SSMR3PutMem(pSSM, pThis->aTxDescriptors,
6948 pThis->nTxDFetched * sizeof(pThis->aTxDescriptors[0]));
6949#else
6950 /*
6951 * There is no point in storing TX descriptor cache entries as we can simply
6952 * fetch them again. Moreover, normally the cache is always empty when we
6953 * save the state. Store zero entries for compatibility.
6954 */
6955 SSMR3PutU8(pSSM, 0);
6956#endif
6957#endif /* E1K_WITH_TXD_CACHE */
6958/** @todo GSO requires some more state here. */
6959 E1kLog(("%s State has been saved\n", pThis->szPrf));
6960 return VINF_SUCCESS;
6961}
6962
6963#if 0
6964/**
6965 * @callback_method_impl{FNSSMDEVSAVEDONE}
6966 */
6967static DECLCALLBACK(int) e1kSaveDone(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6968{
6969 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
6970
6971 /* If VM is being powered off unlocking will result in assertions in PGM */
6972 if (PDMDevHlpGetVM(pDevIns)->enmVMState == VMSTATE_RUNNING)
6973 pThis->fLocked = false;
6974 else
6975 E1kLog(("%s VM is not running -- remain locked\n", pThis->szPrf));
6976 E1kLog(("%s Unlocked\n", pThis->szPrf));
6977 return VINF_SUCCESS;
6978}
6979#endif
6980
6981/**
6982 * @callback_method_impl{FNSSMDEVLOADPREP,Synchronize.}
6983 */
6984static DECLCALLBACK(int) e1kLoadPrep(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6985{
6986 RT_NOREF(pSSM);
6987 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
6988
6989 int rc = e1kCsEnter(pThis, VERR_SEM_BUSY);
6990 if (RT_UNLIKELY(rc != VINF_SUCCESS))
6991 return rc;
6992 e1kCsLeave(pThis);
6993 return VINF_SUCCESS;
6994}
6995
6996/**
6997 * @callback_method_impl{FNSSMDEVLOADEXEC}
6998 */
6999static DECLCALLBACK(int) e1kLoadExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
7000{
7001 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
7002 PE1KSTATECC pThisCC = PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC);
7003 int rc;
7004
7005 if ( uVersion != E1K_SAVEDSTATE_VERSION
7006#ifdef E1K_WITH_TXD_CACHE
7007 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG
7008#endif /* E1K_WITH_TXD_CACHE */
7009 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_41
7010 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_30)
7011 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
7012
7013 if ( uVersion > E1K_SAVEDSTATE_VERSION_VBOX_30
7014 || uPass != SSM_PASS_FINAL)
7015 {
7016 /* config checks */
7017 RTMAC macConfigured;
7018 rc = SSMR3GetMem(pSSM, &macConfigured, sizeof(macConfigured));
7019 AssertRCReturn(rc, rc);
7020 if ( memcmp(&macConfigured, &pThis->macConfigured, sizeof(macConfigured))
7021 && (uPass == 0 || !PDMDevHlpVMTeleportedAndNotFullyResumedYet(pDevIns)) )
7022 LogRel(("%s: The mac address differs: config=%RTmac saved=%RTmac\n", pThis->szPrf, &pThis->macConfigured, &macConfigured));
7023
7024 E1KCHIP eChip;
7025 rc = SSMR3GetU32(pSSM, &eChip);
7026 AssertRCReturn(rc, rc);
7027 if (eChip != pThis->eChip)
7028 return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("The chip type differs: config=%u saved=%u"), pThis->eChip, eChip);
7029 }
7030
7031 if (uPass == SSM_PASS_FINAL)
7032 {
7033 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_30)
7034 {
7035 rc = pThisCC->eeprom.load(pSSM);
7036 AssertRCReturn(rc, rc);
7037 }
7038 /* the state */
7039 SSMR3GetMem(pSSM, &pThis->auRegs, sizeof(pThis->auRegs));
7040 SSMR3GetBool(pSSM, &pThis->fIntRaised);
7041 /** @todo PHY could be made a separate device with its own versioning */
7042 Phy::loadState(pSSM, &pThis->phy);
7043 SSMR3GetU32(pSSM, &pThis->uSelectedReg);
7044 SSMR3GetMem(pSSM, &pThis->auMTA, sizeof(pThis->auMTA));
7045 SSMR3GetMem(pSSM, &pThis->aRecAddr, sizeof(pThis->aRecAddr));
7046 SSMR3GetMem(pSSM, &pThis->auVFTA, sizeof(pThis->auVFTA));
7047 SSMR3GetU64(pSSM, &pThis->u64AckedAt);
7048 SSMR3GetU16(pSSM, &pThis->u16RxBSize);
7049 //SSMR3GetBool(pSSM, pThis->fDelayInts);
7050 //SSMR3GetBool(pSSM, pThis->fIntMaskUsed);
7051 SSMR3GetU16(pSSM, &pThis->u16TxPktLen);
7052 if (pThis->u16TxPktLen > sizeof(pThis->aTxPacketFallback))
7053 pThis->u16TxPktLen = sizeof(pThis->aTxPacketFallback);
7054 SSMR3GetMem(pSSM, &pThis->aTxPacketFallback[0], pThis->u16TxPktLen);
7055 SSMR3GetBool(pSSM, &pThis->fIPcsum);
7056 SSMR3GetBool(pSSM, &pThis->fTCPcsum);
7057 SSMR3GetMem(pSSM, &pThis->contextTSE, sizeof(pThis->contextTSE));
7058 rc = SSMR3GetMem(pSSM, &pThis->contextNormal, sizeof(pThis->contextNormal));
7059 AssertRCReturn(rc, rc);
7060 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_41)
7061 {
7062 SSMR3GetBool(pSSM, &pThis->fVTag);
7063 rc = SSMR3GetU16(pSSM, &pThis->u16VTagTCI);
7064 AssertRCReturn(rc, rc);
7065 }
7066 else
7067 {
7068 pThis->fVTag = false;
7069 pThis->u16VTagTCI = 0;
7070 }
7071#ifdef E1K_WITH_TXD_CACHE
7072 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG)
7073 {
7074 rc = SSMR3GetU8(pSSM, &pThis->nTxDFetched);
7075 AssertRCReturn(rc, rc);
7076 if (pThis->nTxDFetched)
7077 SSMR3GetMem(pSSM, pThis->aTxDescriptors,
7078 pThis->nTxDFetched * sizeof(pThis->aTxDescriptors[0]));
7079 }
7080 else
7081 pThis->nTxDFetched = 0;
7082 /*
7083 * @todo: Perhaps we should not store TXD cache as the entries can be
7084 * simply fetched again from guest's memory. Or can't they?
7085 */
7086#endif /* E1K_WITH_TXD_CACHE */
7087#ifdef E1K_WITH_RXD_CACHE
7088 /*
7089 * There is no point in storing the RX descriptor cache in the saved
7090 * state, we just need to make sure it is empty.
7091 */
7092 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
7093#endif /* E1K_WITH_RXD_CACHE */
7094 /* derived state */
7095 e1kSetupGsoCtx(&pThis->GsoCtx, &pThis->contextTSE);
7096
7097 E1kLog(("%s State has been restored\n", pThis->szPrf));
7098 e1kDumpState(pThis);
7099 }
7100 return VINF_SUCCESS;
7101}
7102
7103/**
7104 * @callback_method_impl{FNSSMDEVLOADDONE, Link status adjustments after loading.}
7105 */
7106static DECLCALLBACK(int) e1kLoadDone(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
7107{
7108 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
7109 PE1KSTATECC pThisCC = PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC);
7110 RT_NOREF(pSSM);
7111
7112 /* Update promiscuous mode */
7113 if (pThisCC->pDrvR3)
7114 pThisCC->pDrvR3->pfnSetPromiscuousMode(pThisCC->pDrvR3, !!(RCTL & (RCTL_UPE | RCTL_MPE)));
7115
7116 /*
7117 * Force the link down here, since PDMNETWORKLINKSTATE_DOWN_RESUME is never
7118 * passed to us. We go through all this stuff if the link was up and we
7119 * wasn't teleported.
7120 */
7121 if ( (STATUS & STATUS_LU)
7122 && !PDMDevHlpVMTeleportedAndNotFullyResumedYet(pDevIns)
7123 && pThis->cMsLinkUpDelay)
7124 {
7125 e1kR3LinkDownTemp(pDevIns, pThis, pThisCC);
7126 }
7127 return VINF_SUCCESS;
7128}
7129
7130
7131
7132/* -=-=-=-=- Debug Info + Log Types -=-=-=-=- */
7133
7134/**
7135 * @callback_method_impl{FNRTSTRFORMATTYPE}
7136 */
7137static DECLCALLBACK(size_t) e1kFmtRxDesc(PFNRTSTROUTPUT pfnOutput,
7138 void *pvArgOutput,
7139 const char *pszType,
7140 void const *pvValue,
7141 int cchWidth,
7142 int cchPrecision,
7143 unsigned fFlags,
7144 void *pvUser)
7145{
7146 RT_NOREF(cchWidth, cchPrecision, fFlags, pvUser);
7147 AssertReturn(strcmp(pszType, "e1krxd") == 0, 0);
7148 E1KRXDESC* pDesc = (E1KRXDESC*)pvValue;
7149 if (!pDesc)
7150 return RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "NULL_RXD");
7151
7152 size_t cbPrintf = 0;
7153 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Address=%16LX Length=%04X Csum=%04X\n",
7154 pDesc->u64BufAddr, pDesc->u16Length, pDesc->u16Checksum);
7155 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, " STA: %s %s %s %s %s %s %s ERR: %s %s %s %s SPECIAL: %s VLAN=%03x PRI=%x",
7156 pDesc->status.fPIF ? "PIF" : "pif",
7157 pDesc->status.fIPCS ? "IPCS" : "ipcs",
7158 pDesc->status.fTCPCS ? "TCPCS" : "tcpcs",
7159 pDesc->status.fVP ? "VP" : "vp",
7160 pDesc->status.fIXSM ? "IXSM" : "ixsm",
7161 pDesc->status.fEOP ? "EOP" : "eop",
7162 pDesc->status.fDD ? "DD" : "dd",
7163 pDesc->status.fRXE ? "RXE" : "rxe",
7164 pDesc->status.fIPE ? "IPE" : "ipe",
7165 pDesc->status.fTCPE ? "TCPE" : "tcpe",
7166 pDesc->status.fCE ? "CE" : "ce",
7167 E1K_SPEC_CFI(pDesc->status.u16Special) ? "CFI" :"cfi",
7168 E1K_SPEC_VLAN(pDesc->status.u16Special),
7169 E1K_SPEC_PRI(pDesc->status.u16Special));
7170 return cbPrintf;
7171}
7172
7173/**
7174 * @callback_method_impl{FNRTSTRFORMATTYPE}
7175 */
7176static DECLCALLBACK(size_t) e1kFmtTxDesc(PFNRTSTROUTPUT pfnOutput,
7177 void *pvArgOutput,
7178 const char *pszType,
7179 void const *pvValue,
7180 int cchWidth,
7181 int cchPrecision,
7182 unsigned fFlags,
7183 void *pvUser)
7184{
7185 RT_NOREF(cchWidth, cchPrecision, fFlags, pvUser);
7186 AssertReturn(strcmp(pszType, "e1ktxd") == 0, 0);
7187 E1KTXDESC *pDesc = (E1KTXDESC*)pvValue;
7188 if (!pDesc)
7189 return RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "NULL_TXD");
7190
7191 size_t cbPrintf = 0;
7192 switch (e1kGetDescType(pDesc))
7193 {
7194 case E1K_DTYP_CONTEXT:
7195 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Type=Context\n"
7196 " IPCSS=%02X IPCSO=%02X IPCSE=%04X TUCSS=%02X TUCSO=%02X TUCSE=%04X\n"
7197 " TUCMD:%s%s%s %s %s PAYLEN=%04x HDRLEN=%04x MSS=%04x STA: %s",
7198 pDesc->context.ip.u8CSS, pDesc->context.ip.u8CSO, pDesc->context.ip.u16CSE,
7199 pDesc->context.tu.u8CSS, pDesc->context.tu.u8CSO, pDesc->context.tu.u16CSE,
7200 pDesc->context.dw2.fIDE ? " IDE":"",
7201 pDesc->context.dw2.fRS ? " RS" :"",
7202 pDesc->context.dw2.fTSE ? " TSE":"",
7203 pDesc->context.dw2.fIP ? "IPv4":"IPv6",
7204 pDesc->context.dw2.fTCP ? "TCP":"UDP",
7205 pDesc->context.dw2.u20PAYLEN,
7206 pDesc->context.dw3.u8HDRLEN,
7207 pDesc->context.dw3.u16MSS,
7208 pDesc->context.dw3.fDD?"DD":"");
7209 break;
7210 case E1K_DTYP_DATA:
7211 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Type=Data Address=%16LX DTALEN=%05X\n"
7212 " DCMD:%s%s%s%s%s%s%s STA:%s%s%s POPTS:%s%s SPECIAL:%s VLAN=%03x PRI=%x",
7213 pDesc->data.u64BufAddr,
7214 pDesc->data.cmd.u20DTALEN,
7215 pDesc->data.cmd.fIDE ? " IDE" :"",
7216 pDesc->data.cmd.fVLE ? " VLE" :"",
7217 pDesc->data.cmd.fRPS ? " RPS" :"",
7218 pDesc->data.cmd.fRS ? " RS" :"",
7219 pDesc->data.cmd.fTSE ? " TSE" :"",
7220 pDesc->data.cmd.fIFCS? " IFCS":"",
7221 pDesc->data.cmd.fEOP ? " EOP" :"",
7222 pDesc->data.dw3.fDD ? " DD" :"",
7223 pDesc->data.dw3.fEC ? " EC" :"",
7224 pDesc->data.dw3.fLC ? " LC" :"",
7225 pDesc->data.dw3.fTXSM? " TXSM":"",
7226 pDesc->data.dw3.fIXSM? " IXSM":"",
7227 E1K_SPEC_CFI(pDesc->data.dw3.u16Special) ? "CFI" :"cfi",
7228 E1K_SPEC_VLAN(pDesc->data.dw3.u16Special),
7229 E1K_SPEC_PRI(pDesc->data.dw3.u16Special));
7230 break;
7231 case E1K_DTYP_LEGACY:
7232 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Type=Legacy Address=%16LX DTALEN=%05X\n"
7233 " CMD:%s%s%s%s%s%s%s STA:%s%s%s CSO=%02x CSS=%02x SPECIAL:%s VLAN=%03x PRI=%x",
7234 pDesc->data.u64BufAddr,
7235 pDesc->legacy.cmd.u16Length,
7236 pDesc->legacy.cmd.fIDE ? " IDE" :"",
7237 pDesc->legacy.cmd.fVLE ? " VLE" :"",
7238 pDesc->legacy.cmd.fRPS ? " RPS" :"",
7239 pDesc->legacy.cmd.fRS ? " RS" :"",
7240 pDesc->legacy.cmd.fIC ? " IC" :"",
7241 pDesc->legacy.cmd.fIFCS? " IFCS":"",
7242 pDesc->legacy.cmd.fEOP ? " EOP" :"",
7243 pDesc->legacy.dw3.fDD ? " DD" :"",
7244 pDesc->legacy.dw3.fEC ? " EC" :"",
7245 pDesc->legacy.dw3.fLC ? " LC" :"",
7246 pDesc->legacy.cmd.u8CSO,
7247 pDesc->legacy.dw3.u8CSS,
7248 E1K_SPEC_CFI(pDesc->legacy.dw3.u16Special) ? "CFI" :"cfi",
7249 E1K_SPEC_VLAN(pDesc->legacy.dw3.u16Special),
7250 E1K_SPEC_PRI(pDesc->legacy.dw3.u16Special));
7251 break;
7252 default:
7253 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Invalid Transmit Descriptor");
7254 break;
7255 }
7256
7257 return cbPrintf;
7258}
7259
7260/** Initializes debug helpers (logging format types). */
7261static int e1kInitDebugHelpers(void)
7262{
7263 int rc = VINF_SUCCESS;
7264 static bool s_fHelpersRegistered = false;
7265 if (!s_fHelpersRegistered)
7266 {
7267 s_fHelpersRegistered = true;
7268 rc = RTStrFormatTypeRegister("e1krxd", e1kFmtRxDesc, NULL);
7269 AssertRCReturn(rc, rc);
7270 rc = RTStrFormatTypeRegister("e1ktxd", e1kFmtTxDesc, NULL);
7271 AssertRCReturn(rc, rc);
7272 }
7273 return rc;
7274}
7275
7276/**
7277 * Status info callback.
7278 *
7279 * @param pDevIns The device instance.
7280 * @param pHlp The output helpers.
7281 * @param pszArgs The arguments.
7282 */
7283static DECLCALLBACK(void) e1kInfo(PPDMDEVINS pDevIns, PCDBGFINFOHLP pHlp, const char *pszArgs)
7284{
7285 RT_NOREF(pszArgs);
7286 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
7287 unsigned i;
7288 // bool fRcvRing = false;
7289 // bool fXmtRing = false;
7290
7291 /*
7292 * Parse args.
7293 if (pszArgs)
7294 {
7295 fRcvRing = strstr(pszArgs, "verbose") || strstr(pszArgs, "rcv");
7296 fXmtRing = strstr(pszArgs, "verbose") || strstr(pszArgs, "xmt");
7297 }
7298 */
7299
7300 /*
7301 * Show info.
7302 */
7303 pHlp->pfnPrintf(pHlp, "E1000 #%d: port=%RTiop mmio=%RGp mac-cfg=%RTmac %s%s%s\n",
7304 pDevIns->iInstance, pThis->IOPortBase, pThis->addrMMReg,
7305 &pThis->macConfigured, g_aChips[pThis->eChip].pcszName,
7306 pDevIns->fRCEnabled ? " RC" : "", pDevIns->fR0Enabled ? " R0" : "");
7307
7308 e1kCsEnter(pThis, VERR_INTERNAL_ERROR); /* Not sure why but PCNet does it */
7309
7310 for (i = 0; i < E1K_NUM_OF_32BIT_REGS; ++i)
7311 pHlp->pfnPrintf(pHlp, "%8.8s = %08x\n", g_aE1kRegMap[i].abbrev, pThis->auRegs[i]);
7312
7313 for (i = 0; i < RT_ELEMENTS(pThis->aRecAddr.array); i++)
7314 {
7315 E1KRAELEM* ra = pThis->aRecAddr.array + i;
7316 if (ra->ctl & RA_CTL_AV)
7317 {
7318 const char *pcszTmp;
7319 switch (ra->ctl & RA_CTL_AS)
7320 {
7321 case 0: pcszTmp = "DST"; break;
7322 case 1: pcszTmp = "SRC"; break;
7323 default: pcszTmp = "reserved";
7324 }
7325 pHlp->pfnPrintf(pHlp, "RA%02d: %s %RTmac\n", i, pcszTmp, ra->addr);
7326 }
7327 }
7328 unsigned cDescs = RDLEN / sizeof(E1KRXDESC);
7329 uint32_t rdh = RDH;
7330 pHlp->pfnPrintf(pHlp, "\n-- Receive Descriptors (%d total) --\n", cDescs);
7331 for (i = 0; i < cDescs; ++i)
7332 {
7333 E1KRXDESC desc;
7334 PDMDevHlpPhysRead(pDevIns, e1kDescAddr(RDBAH, RDBAL, i),
7335 &desc, sizeof(desc));
7336 if (i == rdh)
7337 pHlp->pfnPrintf(pHlp, ">>> ");
7338 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1krxd]\n", e1kDescAddr(RDBAH, RDBAL, i), &desc);
7339 }
7340#ifdef E1K_WITH_RXD_CACHE
7341 pHlp->pfnPrintf(pHlp, "\n-- Receive Descriptors in Cache (at %d (RDH %d)/ fetched %d / max %d) --\n",
7342 pThis->iRxDCurrent, RDH, pThis->nRxDFetched, E1K_RXD_CACHE_SIZE);
7343 if (rdh > pThis->iRxDCurrent)
7344 rdh -= pThis->iRxDCurrent;
7345 else
7346 rdh = cDescs + rdh - pThis->iRxDCurrent;
7347 for (i = 0; i < pThis->nRxDFetched; ++i)
7348 {
7349 if (i == pThis->iRxDCurrent)
7350 pHlp->pfnPrintf(pHlp, ">>> ");
7351 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1krxd]\n",
7352 e1kDescAddr(RDBAH, RDBAL, rdh++ % cDescs),
7353 &pThis->aRxDescriptors[i]);
7354 }
7355#endif /* E1K_WITH_RXD_CACHE */
7356
7357 cDescs = TDLEN / sizeof(E1KTXDESC);
7358 uint32_t tdh = TDH;
7359 pHlp->pfnPrintf(pHlp, "\n-- Transmit Descriptors (%d total) --\n", cDescs);
7360 for (i = 0; i < cDescs; ++i)
7361 {
7362 E1KTXDESC desc;
7363 PDMDevHlpPhysRead(pDevIns, e1kDescAddr(TDBAH, TDBAL, i),
7364 &desc, sizeof(desc));
7365 if (i == tdh)
7366 pHlp->pfnPrintf(pHlp, ">>> ");
7367 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1ktxd]\n", e1kDescAddr(TDBAH, TDBAL, i), &desc);
7368 }
7369#ifdef E1K_WITH_TXD_CACHE
7370 pHlp->pfnPrintf(pHlp, "\n-- Transmit Descriptors in Cache (at %d (TDH %d)/ fetched %d / max %d) --\n",
7371 pThis->iTxDCurrent, TDH, pThis->nTxDFetched, E1K_TXD_CACHE_SIZE);
7372 if (tdh > pThis->iTxDCurrent)
7373 tdh -= pThis->iTxDCurrent;
7374 else
7375 tdh = cDescs + tdh - pThis->iTxDCurrent;
7376 for (i = 0; i < pThis->nTxDFetched; ++i)
7377 {
7378 if (i == pThis->iTxDCurrent)
7379 pHlp->pfnPrintf(pHlp, ">>> ");
7380 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1ktxd]\n",
7381 e1kDescAddr(TDBAH, TDBAL, tdh++ % cDescs),
7382 &pThis->aTxDescriptors[i]);
7383 }
7384#endif /* E1K_WITH_TXD_CACHE */
7385
7386
7387#ifdef E1K_INT_STATS
7388 pHlp->pfnPrintf(pHlp, "Interrupt attempts: %d\n", pThis->uStatIntTry);
7389 pHlp->pfnPrintf(pHlp, "Interrupts raised : %d\n", pThis->uStatInt);
7390 pHlp->pfnPrintf(pHlp, "Interrupts lowered: %d\n", pThis->uStatIntLower);
7391 pHlp->pfnPrintf(pHlp, "ICR outside ISR : %d\n", pThis->uStatNoIntICR);
7392 pHlp->pfnPrintf(pHlp, "IMS raised ints : %d\n", pThis->uStatIntIMS);
7393 pHlp->pfnPrintf(pHlp, "Interrupts skipped: %d\n", pThis->uStatIntSkip);
7394 pHlp->pfnPrintf(pHlp, "Masked interrupts : %d\n", pThis->uStatIntMasked);
7395 pHlp->pfnPrintf(pHlp, "Early interrupts : %d\n", pThis->uStatIntEarly);
7396 pHlp->pfnPrintf(pHlp, "Late interrupts : %d\n", pThis->uStatIntLate);
7397 pHlp->pfnPrintf(pHlp, "Lost interrupts : %d\n", pThis->iStatIntLost);
7398 pHlp->pfnPrintf(pHlp, "Interrupts by RX : %d\n", pThis->uStatIntRx);
7399 pHlp->pfnPrintf(pHlp, "Interrupts by TX : %d\n", pThis->uStatIntTx);
7400 pHlp->pfnPrintf(pHlp, "Interrupts by ICS : %d\n", pThis->uStatIntICS);
7401 pHlp->pfnPrintf(pHlp, "Interrupts by RDTR: %d\n", pThis->uStatIntRDTR);
7402 pHlp->pfnPrintf(pHlp, "Interrupts by RDMT: %d\n", pThis->uStatIntRXDMT0);
7403 pHlp->pfnPrintf(pHlp, "Interrupts by TXQE: %d\n", pThis->uStatIntTXQE);
7404 pHlp->pfnPrintf(pHlp, "TX int delay asked: %d\n", pThis->uStatTxIDE);
7405 pHlp->pfnPrintf(pHlp, "TX delayed: %d\n", pThis->uStatTxDelayed);
7406 pHlp->pfnPrintf(pHlp, "TX delayed expired: %d\n", pThis->uStatTxDelayExp);
7407 pHlp->pfnPrintf(pHlp, "TX no report asked: %d\n", pThis->uStatTxNoRS);
7408 pHlp->pfnPrintf(pHlp, "TX abs timer expd : %d\n", pThis->uStatTAD);
7409 pHlp->pfnPrintf(pHlp, "TX int timer expd : %d\n", pThis->uStatTID);
7410 pHlp->pfnPrintf(pHlp, "RX abs timer expd : %d\n", pThis->uStatRAD);
7411 pHlp->pfnPrintf(pHlp, "RX int timer expd : %d\n", pThis->uStatRID);
7412 pHlp->pfnPrintf(pHlp, "TX CTX descriptors: %d\n", pThis->uStatDescCtx);
7413 pHlp->pfnPrintf(pHlp, "TX DAT descriptors: %d\n", pThis->uStatDescDat);
7414 pHlp->pfnPrintf(pHlp, "TX LEG descriptors: %d\n", pThis->uStatDescLeg);
7415 pHlp->pfnPrintf(pHlp, "Received frames : %d\n", pThis->uStatRxFrm);
7416 pHlp->pfnPrintf(pHlp, "Transmitted frames: %d\n", pThis->uStatTxFrm);
7417 pHlp->pfnPrintf(pHlp, "TX frames up to 1514: %d\n", pThis->uStatTx1514);
7418 pHlp->pfnPrintf(pHlp, "TX frames up to 2962: %d\n", pThis->uStatTx2962);
7419 pHlp->pfnPrintf(pHlp, "TX frames up to 4410: %d\n", pThis->uStatTx4410);
7420 pHlp->pfnPrintf(pHlp, "TX frames up to 5858: %d\n", pThis->uStatTx5858);
7421 pHlp->pfnPrintf(pHlp, "TX frames up to 7306: %d\n", pThis->uStatTx7306);
7422 pHlp->pfnPrintf(pHlp, "TX frames up to 8754: %d\n", pThis->uStatTx8754);
7423 pHlp->pfnPrintf(pHlp, "TX frames up to 16384: %d\n", pThis->uStatTx16384);
7424 pHlp->pfnPrintf(pHlp, "TX frames up to 32768: %d\n", pThis->uStatTx32768);
7425 pHlp->pfnPrintf(pHlp, "Larger TX frames : %d\n", pThis->uStatTxLarge);
7426#endif /* E1K_INT_STATS */
7427
7428 e1kCsLeave(pThis);
7429}
7430
7431
7432
7433/* -=-=-=-=- PDMDEVREG -=-=-=-=- */
7434
7435/**
7436 * Detach notification.
7437 *
7438 * One port on the network card has been disconnected from the network.
7439 *
7440 * @param pDevIns The device instance.
7441 * @param iLUN The logical unit which is being detached.
7442 * @param fFlags Flags, combination of the PDMDEVATT_FLAGS_* \#defines.
7443 */
7444static DECLCALLBACK(void) e1kR3Detach(PPDMDEVINS pDevIns, unsigned iLUN, uint32_t fFlags)
7445{
7446 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
7447 PE1KSTATECC pThisCC = PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC);
7448 Log(("%s e1kR3Detach:\n", pThis->szPrf));
7449 RT_NOREF(fFlags);
7450
7451 AssertLogRelReturnVoid(iLUN == 0);
7452
7453 PDMCritSectEnter(&pThis->cs, VERR_SEM_BUSY);
7454
7455 /** @todo r=pritesh still need to check if i missed
7456 * to clean something in this function
7457 */
7458
7459 /*
7460 * Zero some important members.
7461 */
7462 pThisCC->pDrvBase = NULL;
7463 pThisCC->pDrvR3 = NULL;
7464#if 0 /** @todo @bugref{9218} ring-0 driver stuff */
7465 pThisR0->pDrvR0 = NIL_RTR0PTR;
7466 pThisRC->pDrvRC = NIL_RTRCPTR;
7467#endif
7468
7469 PDMCritSectLeave(&pThis->cs);
7470}
7471
7472/**
7473 * Attach the Network attachment.
7474 *
7475 * One port on the network card has been connected to a network.
7476 *
7477 * @returns VBox status code.
7478 * @param pDevIns The device instance.
7479 * @param iLUN The logical unit which is being attached.
7480 * @param fFlags Flags, combination of the PDMDEVATT_FLAGS_* \#defines.
7481 *
7482 * @remarks This code path is not used during construction.
7483 */
7484static DECLCALLBACK(int) e1kR3Attach(PPDMDEVINS pDevIns, unsigned iLUN, uint32_t fFlags)
7485{
7486 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
7487 PE1KSTATECC pThisCC = PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC);
7488 LogFlow(("%s e1kR3Attach:\n", pThis->szPrf));
7489 RT_NOREF(fFlags);
7490
7491 AssertLogRelReturn(iLUN == 0, VERR_PDM_NO_SUCH_LUN);
7492
7493 PDMCritSectEnter(&pThis->cs, VERR_SEM_BUSY);
7494
7495 /*
7496 * Attach the driver.
7497 */
7498 int rc = PDMDevHlpDriverAttach(pDevIns, 0, &pThisCC->IBase, &pThisCC->pDrvBase, "Network Port");
7499 if (RT_SUCCESS(rc))
7500 {
7501 if (rc == VINF_NAT_DNS)
7502 {
7503#ifdef RT_OS_LINUX
7504 PDMDevHlpVMSetRuntimeError(pDevIns, 0 /*fFlags*/, "NoDNSforNAT",
7505 N_("A Domain Name Server (DNS) for NAT networking could not be determined. Please check your /etc/resolv.conf for <tt>nameserver</tt> entries. Either add one manually (<i>man resolv.conf</i>) or ensure that your host is correctly connected to an ISP. If you ignore this warning the guest will not be able to perform nameserver lookups and it will probably observe delays if trying so"));
7506#else
7507 PDMDevHlpVMSetRuntimeError(pDevIns, 0 /*fFlags*/, "NoDNSforNAT",
7508 N_("A Domain Name Server (DNS) for NAT networking could not be determined. Ensure that your host is correctly connected to an ISP. If you ignore this warning the guest will not be able to perform nameserver lookups and it will probably observe delays if trying so"));
7509#endif
7510 }
7511 pThisCC->pDrvR3 = PDMIBASE_QUERY_INTERFACE(pThisCC->pDrvBase, PDMINETWORKUP);
7512 AssertMsgStmt(pThisCC->pDrvR3, ("Failed to obtain the PDMINETWORKUP interface!\n"),
7513 rc = VERR_PDM_MISSING_INTERFACE_BELOW);
7514 if (RT_SUCCESS(rc))
7515 {
7516#if 0 /** @todo @bugref{9218} ring-0 driver stuff */
7517 pThisR0->pDrvR0 = PDMIBASER0_QUERY_INTERFACE(PDMIBASE_QUERY_INTERFACE(pThisCC->pDrvBase, PDMIBASER0), PDMINETWORKUP);
7518 pThisRC->pDrvRC = PDMIBASERC_QUERY_INTERFACE(PDMIBASE_QUERY_INTERFACE(pThisCC->pDrvBase, PDMIBASERC), PDMINETWORKUP);
7519#endif
7520 }
7521 }
7522 else if ( rc == VERR_PDM_NO_ATTACHED_DRIVER
7523 || rc == VERR_PDM_CFG_MISSING_DRIVER_NAME)
7524 {
7525 /* This should never happen because this function is not called
7526 * if there is no driver to attach! */
7527 Log(("%s No attached driver!\n", pThis->szPrf));
7528 }
7529
7530 /*
7531 * Temporary set the link down if it was up so that the guest will know
7532 * that we have change the configuration of the network card
7533 */
7534 if ((STATUS & STATUS_LU) && RT_SUCCESS(rc))
7535 e1kR3LinkDownTemp(pDevIns, pThis, pThisCC);
7536
7537 PDMCritSectLeave(&pThis->cs);
7538 return rc;
7539
7540}
7541
7542/**
7543 * @copydoc FNPDMDEVPOWEROFF
7544 */
7545static DECLCALLBACK(void) e1kR3PowerOff(PPDMDEVINS pDevIns)
7546{
7547 /* Poke thread waiting for buffer space. */
7548 e1kWakeupReceive(pDevIns);
7549}
7550
7551/**
7552 * @copydoc FNPDMDEVRESET
7553 */
7554static DECLCALLBACK(void) e1kR3Reset(PPDMDEVINS pDevIns)
7555{
7556 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
7557 PE1KSTATECC pThisCC = PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC);
7558#ifdef E1K_TX_DELAY
7559 e1kCancelTimer(pDevIns, pThis, pThis->hTXDTimer);
7560#endif /* E1K_TX_DELAY */
7561 e1kCancelTimer(pDevIns, pThis, pThis->hIntTimer);
7562 e1kCancelTimer(pDevIns, pThis, pThis->hLUTimer);
7563 e1kXmitFreeBuf(pThis, pThisCC);
7564 pThis->u16TxPktLen = 0;
7565 pThis->fIPcsum = false;
7566 pThis->fTCPcsum = false;
7567 pThis->fIntMaskUsed = false;
7568 pThis->fDelayInts = false;
7569 pThis->fLocked = false;
7570 pThis->u64AckedAt = 0;
7571 e1kR3HardReset(pDevIns, pThis, pThisCC);
7572}
7573
7574/**
7575 * @copydoc FNPDMDEVSUSPEND
7576 */
7577static DECLCALLBACK(void) e1kR3Suspend(PPDMDEVINS pDevIns)
7578{
7579 /* Poke thread waiting for buffer space. */
7580 e1kWakeupReceive(pDevIns);
7581}
7582
7583/**
7584 * Device relocation callback.
7585 *
7586 * When this callback is called the device instance data, and if the
7587 * device have a GC component, is being relocated, or/and the selectors
7588 * have been changed. The device must use the chance to perform the
7589 * necessary pointer relocations and data updates.
7590 *
7591 * Before the GC code is executed the first time, this function will be
7592 * called with a 0 delta so GC pointer calculations can be one in one place.
7593 *
7594 * @param pDevIns Pointer to the device instance.
7595 * @param offDelta The relocation delta relative to the old location.
7596 *
7597 * @remark A relocation CANNOT fail.
7598 */
7599static DECLCALLBACK(void) e1kR3Relocate(PPDMDEVINS pDevIns, RTGCINTPTR offDelta)
7600{
7601 PE1KSTATERC pThisRC = PDMINS_2_DATA_RC(pDevIns, PE1KSTATERC);
7602 if (pThisRC)
7603 pThisRC->pDevInsRC = PDMDEVINS_2_RCPTR(pDevIns);
7604 RT_NOREF(offDelta);
7605}
7606
7607/**
7608 * Destruct a device instance.
7609 *
7610 * We need to free non-VM resources only.
7611 *
7612 * @returns VBox status code.
7613 * @param pDevIns The device instance data.
7614 * @thread EMT
7615 */
7616static DECLCALLBACK(int) e1kR3Destruct(PPDMDEVINS pDevIns)
7617{
7618 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
7619 PDMDEV_CHECK_VERSIONS_RETURN_QUIET(pDevIns);
7620
7621 e1kDumpState(pThis);
7622 E1kLog(("%s Destroying instance\n", pThis->szPrf));
7623 if (PDMCritSectIsInitialized(&pThis->cs))
7624 {
7625 if (pThis->hEventMoreRxDescAvail != NIL_RTSEMEVENT)
7626 {
7627 RTSemEventSignal(pThis->hEventMoreRxDescAvail);
7628 RTSemEventDestroy(pThis->hEventMoreRxDescAvail);
7629 pThis->hEventMoreRxDescAvail = NIL_RTSEMEVENT;
7630 }
7631#ifdef E1K_WITH_TX_CS
7632 PDMR3CritSectDelete(&pThis->csTx);
7633#endif /* E1K_WITH_TX_CS */
7634 PDMR3CritSectDelete(&pThis->csRx);
7635 PDMR3CritSectDelete(&pThis->cs);
7636 }
7637 return VINF_SUCCESS;
7638}
7639
7640
7641/**
7642 * Set PCI configuration space registers.
7643 *
7644 * @param pci Reference to PCI device structure.
7645 * @thread EMT
7646 */
7647static void e1kR3ConfigurePciDev(PPDMPCIDEV pPciDev, E1KCHIP eChip)
7648{
7649 Assert(eChip < RT_ELEMENTS(g_aChips));
7650 /* Configure PCI Device, assume 32-bit mode ******************************/
7651 PDMPciDevSetVendorId(pPciDev, g_aChips[eChip].uPCIVendorId);
7652 PDMPciDevSetDeviceId(pPciDev, g_aChips[eChip].uPCIDeviceId);
7653 PDMPciDevSetWord( pPciDev, VBOX_PCI_SUBSYSTEM_VENDOR_ID, g_aChips[eChip].uPCISubsystemVendorId);
7654 PDMPciDevSetWord( pPciDev, VBOX_PCI_SUBSYSTEM_ID, g_aChips[eChip].uPCISubsystemId);
7655
7656 PDMPciDevSetWord( pPciDev, VBOX_PCI_COMMAND, 0x0000);
7657 /* DEVSEL Timing (medium device), 66 MHz Capable, New capabilities */
7658 PDMPciDevSetWord( pPciDev, VBOX_PCI_STATUS,
7659 VBOX_PCI_STATUS_DEVSEL_MEDIUM | VBOX_PCI_STATUS_CAP_LIST | VBOX_PCI_STATUS_66MHZ);
7660 /* Stepping A2 */
7661 PDMPciDevSetByte( pPciDev, VBOX_PCI_REVISION_ID, 0x02);
7662 /* Ethernet adapter */
7663 PDMPciDevSetByte( pPciDev, VBOX_PCI_CLASS_PROG, 0x00);
7664 PDMPciDevSetWord( pPciDev, VBOX_PCI_CLASS_DEVICE, 0x0200);
7665 /* normal single function Ethernet controller */
7666 PDMPciDevSetByte( pPciDev, VBOX_PCI_HEADER_TYPE, 0x00);
7667 /* Memory Register Base Address */
7668 PDMPciDevSetDWord(pPciDev, VBOX_PCI_BASE_ADDRESS_0, 0x00000000);
7669 /* Memory Flash Base Address */
7670 PDMPciDevSetDWord(pPciDev, VBOX_PCI_BASE_ADDRESS_1, 0x00000000);
7671 /* IO Register Base Address */
7672 PDMPciDevSetDWord(pPciDev, VBOX_PCI_BASE_ADDRESS_2, 0x00000001);
7673 /* Expansion ROM Base Address */
7674 PDMPciDevSetDWord(pPciDev, VBOX_PCI_ROM_ADDRESS, 0x00000000);
7675 /* Capabilities Pointer */
7676 PDMPciDevSetByte( pPciDev, VBOX_PCI_CAPABILITY_LIST, 0xDC);
7677 /* Interrupt Pin: INTA# */
7678 PDMPciDevSetByte( pPciDev, VBOX_PCI_INTERRUPT_PIN, 0x01);
7679 /* Max_Lat/Min_Gnt: very high priority and time slice */
7680 PDMPciDevSetByte( pPciDev, VBOX_PCI_MIN_GNT, 0xFF);
7681 PDMPciDevSetByte( pPciDev, VBOX_PCI_MAX_LAT, 0x00);
7682
7683 /* PCI Power Management Registers ****************************************/
7684 /* Capability ID: PCI Power Management Registers */
7685 PDMPciDevSetByte( pPciDev, 0xDC, VBOX_PCI_CAP_ID_PM);
7686 /* Next Item Pointer: PCI-X */
7687 PDMPciDevSetByte( pPciDev, 0xDC + 1, 0xE4);
7688 /* Power Management Capabilities: PM disabled, DSI */
7689 PDMPciDevSetWord( pPciDev, 0xDC + 2,
7690 0x0002 | VBOX_PCI_PM_CAP_DSI);
7691 /* Power Management Control / Status Register: PM disabled */
7692 PDMPciDevSetWord( pPciDev, 0xDC + 4, 0x0000);
7693 /* PMCSR_BSE Bridge Support Extensions: Not supported */
7694 PDMPciDevSetByte( pPciDev, 0xDC + 6, 0x00);
7695 /* Data Register: PM disabled, always 0 */
7696 PDMPciDevSetByte( pPciDev, 0xDC + 7, 0x00);
7697
7698 /* PCI-X Configuration Registers *****************************************/
7699 /* Capability ID: PCI-X Configuration Registers */
7700 PDMPciDevSetByte( pPciDev, 0xE4, VBOX_PCI_CAP_ID_PCIX);
7701#ifdef E1K_WITH_MSI
7702 PDMPciDevSetByte( pPciDev, 0xE4 + 1, 0x80);
7703#else
7704 /* Next Item Pointer: None (Message Signalled Interrupts are disabled) */
7705 PDMPciDevSetByte( pPciDev, 0xE4 + 1, 0x00);
7706#endif
7707 /* PCI-X Command: Enable Relaxed Ordering */
7708 PDMPciDevSetWord( pPciDev, 0xE4 + 2, VBOX_PCI_X_CMD_ERO);
7709 /* PCI-X Status: 32-bit, 66MHz*/
7710 /** @todo is this value really correct? fff8 doesn't look like actual PCI address */
7711 PDMPciDevSetDWord(pPciDev, 0xE4 + 4, 0x0040FFF8);
7712}
7713
7714/**
7715 * @interface_method_impl{PDMDEVREG,pfnConstruct}
7716 */
7717static DECLCALLBACK(int) e1kR3Construct(PPDMDEVINS pDevIns, int iInstance, PCFGMNODE pCfg)
7718{
7719 PDMDEV_CHECK_VERSIONS_RETURN(pDevIns);
7720 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
7721 PE1KSTATECC pThisCC = PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC);
7722 int rc;
7723
7724 /*
7725 * Initialize the instance data (state).
7726 * Note! Caller has initialized it to ZERO already.
7727 */
7728 RTStrPrintf(pThis->szPrf, sizeof(pThis->szPrf), "E1000#%d", iInstance);
7729 E1kLog(("%s Constructing new instance sizeof(E1KRXDESC)=%d\n", pThis->szPrf, sizeof(E1KRXDESC)));
7730 pThis->hEventMoreRxDescAvail = NIL_RTSEMEVENT;
7731 pThis->u16TxPktLen = 0;
7732 pThis->fIPcsum = false;
7733 pThis->fTCPcsum = false;
7734 pThis->fIntMaskUsed = false;
7735 pThis->fDelayInts = false;
7736 pThis->fLocked = false;
7737 pThis->u64AckedAt = 0;
7738 pThis->led.u32Magic = PDMLED_MAGIC;
7739 pThis->u32PktNo = 1;
7740
7741 pThisCC->pDevInsR3 = pDevIns;
7742 pThisCC->pShared = pThis;
7743
7744 /* Interfaces */
7745 pThisCC->IBase.pfnQueryInterface = e1kR3QueryInterface;
7746
7747 pThisCC->INetworkDown.pfnWaitReceiveAvail = e1kR3NetworkDown_WaitReceiveAvail;
7748 pThisCC->INetworkDown.pfnReceive = e1kR3NetworkDown_Receive;
7749 pThisCC->INetworkDown.pfnXmitPending = e1kR3NetworkDown_XmitPending;
7750
7751 pThisCC->ILeds.pfnQueryStatusLed = e1kR3QueryStatusLed;
7752
7753 pThisCC->INetworkConfig.pfnGetMac = e1kR3GetMac;
7754 pThisCC->INetworkConfig.pfnGetLinkState = e1kR3GetLinkState;
7755 pThisCC->INetworkConfig.pfnSetLinkState = e1kR3SetLinkState;
7756
7757 /*
7758 * Internal validations.
7759 */
7760 for (uint32_t iReg = 1; iReg < E1K_NUM_OF_BINARY_SEARCHABLE; iReg++)
7761 AssertLogRelMsgReturn( g_aE1kRegMap[iReg].offset > g_aE1kRegMap[iReg - 1].offset
7762 && g_aE1kRegMap[iReg].offset + g_aE1kRegMap[iReg].size
7763 >= g_aE1kRegMap[iReg - 1].offset + g_aE1kRegMap[iReg - 1].size,
7764 ("%s@%#xLB%#x vs %s@%#xLB%#x\n",
7765 g_aE1kRegMap[iReg].abbrev, g_aE1kRegMap[iReg].offset, g_aE1kRegMap[iReg].size,
7766 g_aE1kRegMap[iReg - 1].abbrev, g_aE1kRegMap[iReg - 1].offset, g_aE1kRegMap[iReg - 1].size),
7767 VERR_INTERNAL_ERROR_4);
7768
7769 /*
7770 * Validate configuration.
7771 */
7772 PDMDEV_VALIDATE_CONFIG_RETURN(pDevIns,
7773 "MAC|"
7774 "CableConnected|"
7775 "AdapterType|"
7776 "LineSpeed|"
7777 "ItrEnabled|"
7778 "ItrRxEnabled|"
7779 "EthernetCRC|"
7780 "GSOEnabled|"
7781 "LinkUpDelay", "");
7782
7783 /** @todo LineSpeed unused! */
7784
7785 /*
7786 * Get config params
7787 */
7788 rc = CFGMR3QueryBytes(pCfg, "MAC", pThis->macConfigured.au8, sizeof(pThis->macConfigured.au8));
7789 if (RT_FAILURE(rc))
7790 return PDMDEV_SET_ERROR(pDevIns, rc,
7791 N_("Configuration error: Failed to get MAC address"));
7792 rc = CFGMR3QueryBool(pCfg, "CableConnected", &pThis->fCableConnected);
7793 if (RT_FAILURE(rc))
7794 return PDMDEV_SET_ERROR(pDevIns, rc,
7795 N_("Configuration error: Failed to get the value of 'CableConnected'"));
7796 rc = CFGMR3QueryU32(pCfg, "AdapterType", (uint32_t*)&pThis->eChip);
7797 if (RT_FAILURE(rc))
7798 return PDMDEV_SET_ERROR(pDevIns, rc,
7799 N_("Configuration error: Failed to get the value of 'AdapterType'"));
7800 Assert(pThis->eChip <= E1K_CHIP_82545EM);
7801
7802 rc = CFGMR3QueryBoolDef(pCfg, "EthernetCRC", &pThis->fEthernetCRC, true);
7803 if (RT_FAILURE(rc))
7804 return PDMDEV_SET_ERROR(pDevIns, rc,
7805 N_("Configuration error: Failed to get the value of 'EthernetCRC'"));
7806
7807 rc = CFGMR3QueryBoolDef(pCfg, "GSOEnabled", &pThis->fGSOEnabled, true);
7808 if (RT_FAILURE(rc))
7809 return PDMDEV_SET_ERROR(pDevIns, rc,
7810 N_("Configuration error: Failed to get the value of 'GSOEnabled'"));
7811
7812 rc = CFGMR3QueryBoolDef(pCfg, "ItrEnabled", &pThis->fItrEnabled, false);
7813 if (RT_FAILURE(rc))
7814 return PDMDEV_SET_ERROR(pDevIns, rc,
7815 N_("Configuration error: Failed to get the value of 'ItrEnabled'"));
7816
7817 rc = CFGMR3QueryBoolDef(pCfg, "ItrRxEnabled", &pThis->fItrRxEnabled, true);
7818 if (RT_FAILURE(rc))
7819 return PDMDEV_SET_ERROR(pDevIns, rc,
7820 N_("Configuration error: Failed to get the value of 'ItrRxEnabled'"));
7821
7822 rc = CFGMR3QueryBoolDef(pCfg, "TidEnabled", &pThis->fTidEnabled, false);
7823 if (RT_FAILURE(rc))
7824 return PDMDEV_SET_ERROR(pDevIns, rc,
7825 N_("Configuration error: Failed to get the value of 'TidEnabled'"));
7826
7827 rc = CFGMR3QueryU32Def(pCfg, "LinkUpDelay", (uint32_t*)&pThis->cMsLinkUpDelay, 3000); /* ms */
7828 if (RT_FAILURE(rc))
7829 return PDMDEV_SET_ERROR(pDevIns, rc,
7830 N_("Configuration error: Failed to get the value of 'LinkUpDelay'"));
7831 Assert(pThis->cMsLinkUpDelay <= 300000); /* less than 5 minutes */
7832 if (pThis->cMsLinkUpDelay > 5000)
7833 LogRel(("%s: WARNING! Link up delay is set to %u seconds!\n", pThis->szPrf, pThis->cMsLinkUpDelay / 1000));
7834 else if (pThis->cMsLinkUpDelay == 0)
7835 LogRel(("%s: WARNING! Link up delay is disabled!\n", pThis->szPrf));
7836
7837 LogRel(("%s: Chip=%s LinkUpDelay=%ums EthernetCRC=%s GSO=%s Itr=%s ItrRx=%s TID=%s R0=%s RC=%s\n", pThis->szPrf,
7838 g_aChips[pThis->eChip].pcszName, pThis->cMsLinkUpDelay,
7839 pThis->fEthernetCRC ? "on" : "off",
7840 pThis->fGSOEnabled ? "enabled" : "disabled",
7841 pThis->fItrEnabled ? "enabled" : "disabled",
7842 pThis->fItrRxEnabled ? "enabled" : "disabled",
7843 pThis->fTidEnabled ? "enabled" : "disabled",
7844 pDevIns->fR0Enabled ? "enabled" : "disabled",
7845 pDevIns->fRCEnabled ? "enabled" : "disabled"));
7846
7847 /*
7848 * Initialize sub-components and register everything with the VMM.
7849 */
7850
7851 /* Initialize the EEPROM. */
7852 pThisCC->eeprom.init(pThis->macConfigured);
7853
7854 /* Initialize internal PHY. */
7855 Phy::init(&pThis->phy, iInstance, pThis->eChip == E1K_CHIP_82543GC ? PHY_EPID_M881000 : PHY_EPID_M881011);
7856
7857 /* Initialize critical sections. We do our own locking. */
7858 rc = PDMDevHlpSetDeviceCritSect(pDevIns, PDMDevHlpCritSectGetNop(pDevIns));
7859 AssertRCReturn(rc, rc);
7860
7861 rc = PDMDevHlpCritSectInit(pDevIns, &pThis->cs, RT_SRC_POS, "E1000#%d", iInstance);
7862 AssertRCReturn(rc, rc);
7863 rc = PDMDevHlpCritSectInit(pDevIns, &pThis->csRx, RT_SRC_POS, "E1000#%dRX", iInstance);
7864 AssertRCReturn(rc, rc);
7865#ifdef E1K_WITH_TX_CS
7866 rc = PDMDevHlpCritSectInit(pDevIns, &pThis->csTx, RT_SRC_POS, "E1000#%dTX", iInstance);
7867 AssertRCReturn(rc, rc);
7868#endif
7869
7870 /* Saved state registration. */
7871 rc = PDMDevHlpSSMRegisterEx(pDevIns, E1K_SAVEDSTATE_VERSION, sizeof(E1KSTATE), NULL,
7872 NULL, e1kLiveExec, NULL,
7873 e1kSavePrep, e1kSaveExec, NULL,
7874 e1kLoadPrep, e1kLoadExec, e1kLoadDone);
7875 AssertRCReturn(rc, rc);
7876
7877 /* Set PCI config registers and register ourselves with the PCI bus. */
7878 PDMPCIDEV_ASSERT_VALID(pDevIns, pDevIns->apPciDevs[0]);
7879 e1kR3ConfigurePciDev(pDevIns->apPciDevs[0], pThis->eChip);
7880 rc = PDMDevHlpPCIRegister(pDevIns, pDevIns->apPciDevs[0]);
7881 AssertRCReturn(rc, rc);
7882
7883#ifdef E1K_WITH_MSI
7884 PDMMSIREG MsiReg;
7885 RT_ZERO(MsiReg);
7886 MsiReg.cMsiVectors = 1;
7887 MsiReg.iMsiCapOffset = 0x80;
7888 MsiReg.iMsiNextOffset = 0x0;
7889 MsiReg.fMsi64bit = false;
7890 rc = PDMDevHlpPCIRegisterMsi(pDevIns, &MsiReg);
7891 AssertRCReturn(rc, rc);
7892#endif
7893
7894 /*
7895 * Map our registers to memory space (region 0, see e1kR3ConfigurePciDev)
7896 * From the spec (regarding flags):
7897 * For registers that should be accessed as 32-bit double words,
7898 * partial writes (less than a 32-bit double word) is ignored.
7899 * Partial reads return all 32 bits of data regardless of the
7900 * byte enables.
7901 */
7902 rc = PDMDevHlpMmioCreateEx(pDevIns, E1K_MM_SIZE, IOMMMIO_FLAGS_READ_DWORD | IOMMMIO_FLAGS_WRITE_ONLY_DWORD,
7903 pDevIns->apPciDevs[0], 0 /*iPciRegion*/,
7904 e1kMMIOWrite, e1kMMIORead, NULL /*pfnFill*/, NULL /*pvUser*/, "E1000", &pThis->hMmioRegion);
7905 AssertRCReturn(rc, rc);
7906 rc = PDMDevHlpPCIIORegionRegisterMmio(pDevIns, 0, E1K_MM_SIZE, PCI_ADDRESS_SPACE_MEM, pThis->hMmioRegion, e1kR3Map);
7907 AssertRCReturn(rc, rc);
7908
7909 /* Map our registers to IO space (region 2, see e1kR3ConfigurePciDev) */
7910 static IOMIOPORTDESC const s_aExtDescs[] =
7911 {
7912 { "IOADDR", "IOADDR", NULL, NULL }, { "unused", "unused", NULL, NULL }, { "unused", "unused", NULL, NULL }, { "unused", "unused", NULL, NULL },
7913 { "IODATA", "IODATA", NULL, NULL }, { "unused", "unused", NULL, NULL }, { "unused", "unused", NULL, NULL }, { "unused", "unused", NULL, NULL },
7914 { NULL, NULL, NULL, NULL }
7915 };
7916 rc = PDMDevHlpIoPortCreate(pDevIns, E1K_IOPORT_SIZE, pDevIns->apPciDevs[0], 2 /*iPciRegion*/,
7917 e1kIOPortOut, e1kIOPortIn, NULL /*pvUser*/, "E1000", s_aExtDescs, &pThis->hIoPorts);
7918 AssertRCReturn(rc, rc);
7919 rc = PDMDevHlpPCIIORegionRegisterIo(pDevIns, 2, E1K_IOPORT_SIZE, pThis->hIoPorts, e1kR3Map);
7920 AssertRCReturn(rc, rc);
7921
7922 /* Create transmit queue */
7923 rc = PDMDevHlpTaskCreate(pDevIns, PDMTASK_F_RZ, "E1000-Xmit", e1kTxTaskCallback, NULL, &pThis->hTxTask);
7924 AssertRCReturn(rc, rc);
7925
7926 /* Create the RX notifier signaller. */
7927 rc = PDMDevHlpTaskCreate(pDevIns, PDMTASK_F_RZ, "E1000-Rcv", e1kCanRxTaskCallback, NULL, &pThis->hCanRxTask);
7928 AssertRCReturn(rc, rc);
7929
7930#ifdef E1K_TX_DELAY
7931 /* Create Transmit Delay Timer */
7932 rc = PDMDevHlpTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kTxDelayTimer, pThis, TMTIMER_FLAGS_NO_CRIT_SECT,
7933 "E1000 Transmit Delay Timer", &pThis->hTXDTimer);
7934 AssertRCReturn(rc, rc);
7935 rc = PDMDevHlpTimerSetCritSect(pDevIns, pThis->hTXDTimer, &pThis->csTx);
7936 AssertRCReturn(rc, rc);
7937#endif /* E1K_TX_DELAY */
7938
7939//#ifdef E1K_USE_TX_TIMERS
7940 if (pThis->fTidEnabled)
7941 {
7942 /* Create Transmit Interrupt Delay Timer */
7943 rc = PDMDevHlpTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kTxIntDelayTimer, pThis, TMTIMER_FLAGS_NO_CRIT_SECT,
7944 "E1000 Transmit Interrupt Delay Timer", &pThis->hTIDTimer);
7945 AssertRCReturn(rc, rc);
7946
7947# ifndef E1K_NO_TAD
7948 /* Create Transmit Absolute Delay Timer */
7949 rc = PDMDevHlpTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kTxAbsDelayTimer, pThis, TMTIMER_FLAGS_NO_CRIT_SECT,
7950 "E1000 Transmit Absolute Delay Timer", &pThis->hTADTimer);
7951 AssertRCReturn(rc, rc);
7952# endif /* E1K_NO_TAD */
7953 }
7954//#endif /* E1K_USE_TX_TIMERS */
7955
7956#ifdef E1K_USE_RX_TIMERS
7957 /* Create Receive Interrupt Delay Timer */
7958 rc = PDMDevHlpTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kRxIntDelayTimer, pThis, TMTIMER_FLAGS_NO_CRIT_SECT,
7959 "E1000 Receive Interrupt Delay Timer", &pThis->hRIDTimer);
7960 AssertRCReturn(rc, rc);
7961
7962 /* Create Receive Absolute Delay Timer */
7963 rc = PDMDevHlpTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kRxAbsDelayTimer, pThis, TMTIMER_FLAGS_NO_CRIT_SECT,
7964 "E1000 Receive Absolute Delay Timer", &pThis->hRADTimer);
7965 AssertRCReturn(rc, rc);
7966#endif /* E1K_USE_RX_TIMERS */
7967
7968 /* Create Late Interrupt Timer */
7969 rc = PDMDevHlpTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kLateIntTimer, pThis, TMTIMER_FLAGS_NO_CRIT_SECT,
7970 "E1000 Late Interrupt Timer", &pThis->hIntTimer);
7971 AssertRCReturn(rc, rc);
7972
7973 /* Create Link Up Timer */
7974 rc = PDMDevHlpTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kLinkUpTimer, pThis, TMTIMER_FLAGS_NO_CRIT_SECT,
7975 "E1000 Link Up Timer", &pThis->hLUTimer);
7976 AssertRCReturn(rc, rc);
7977
7978 /* Register the info item */
7979 char szTmp[20];
7980 RTStrPrintf(szTmp, sizeof(szTmp), "e1k%d", iInstance);
7981 PDMDevHlpDBGFInfoRegister(pDevIns, szTmp, "E1000 info.", e1kInfo);
7982
7983 /* Status driver */
7984 PPDMIBASE pBase;
7985 rc = PDMDevHlpDriverAttach(pDevIns, PDM_STATUS_LUN, &pThisCC->IBase, &pBase, "Status Port");
7986 if (RT_FAILURE(rc))
7987 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to attach the status LUN"));
7988 pThisCC->pLedsConnector = PDMIBASE_QUERY_INTERFACE(pBase, PDMILEDCONNECTORS);
7989
7990 /* Network driver */
7991 rc = PDMDevHlpDriverAttach(pDevIns, 0, &pThisCC->IBase, &pThisCC->pDrvBase, "Network Port");
7992 if (RT_SUCCESS(rc))
7993 {
7994 if (rc == VINF_NAT_DNS)
7995 PDMDevHlpVMSetRuntimeError(pDevIns, 0 /*fFlags*/, "NoDNSforNAT",
7996 N_("A Domain Name Server (DNS) for NAT networking could not be determined. Ensure that your host is correctly connected to an ISP. If you ignore this warning the guest will not be able to perform nameserver lookups and it will probably observe delays if trying so"));
7997 pThisCC->pDrvR3 = PDMIBASE_QUERY_INTERFACE(pThisCC->pDrvBase, PDMINETWORKUP);
7998 AssertMsgReturn(pThisCC->pDrvR3, ("Failed to obtain the PDMINETWORKUP interface!\n"), VERR_PDM_MISSING_INTERFACE_BELOW);
7999
8000#if 0 /** @todo @bugref{9218} ring-0 driver stuff */
8001 pThisR0->pDrvR0 = PDMIBASER0_QUERY_INTERFACE(PDMIBASE_QUERY_INTERFACE(pThisCC->pDrvBase, PDMIBASER0), PDMINETWORKUP);
8002 pThisRC->pDrvRC = PDMIBASERC_QUERY_INTERFACE(PDMIBASE_QUERY_INTERFACE(pThisCC->pDrvBase, PDMIBASERC), PDMINETWORKUP);
8003#endif
8004 }
8005 else if ( rc == VERR_PDM_NO_ATTACHED_DRIVER
8006 || rc == VERR_PDM_CFG_MISSING_DRIVER_NAME)
8007 {
8008 /* No error! */
8009 E1kLog(("%s This adapter is not attached to any network!\n", pThis->szPrf));
8010 }
8011 else
8012 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to attach the network LUN"));
8013
8014 rc = RTSemEventCreate(&pThis->hEventMoreRxDescAvail);
8015 AssertRCReturn(rc, rc);
8016
8017 rc = e1kInitDebugHelpers();
8018 AssertRCReturn(rc, rc);
8019
8020 e1kR3HardReset(pDevIns, pThis, pThisCC);
8021
8022 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data received", "/Public/Net/E1k%u/BytesReceived", iInstance);
8023 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data transmitted", "/Public/Net/E1k%u/BytesTransmitted", iInstance);
8024
8025 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data received", "/Devices/E1k%d/ReceiveBytes", iInstance);
8026 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data transmitted", "/Devices/E1k%d/TransmitBytes", iInstance);
8027
8028#if defined(VBOX_WITH_STATISTICS)
8029 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMMIOReadRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO reads in RZ", "/Devices/E1k%d/MMIO/ReadRZ", iInstance);
8030 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMMIOReadR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO reads in R3", "/Devices/E1k%d/MMIO/ReadR3", iInstance);
8031 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMMIOWriteRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO writes in RZ", "/Devices/E1k%d/MMIO/WriteRZ", iInstance);
8032 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMMIOWriteR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO writes in R3", "/Devices/E1k%d/MMIO/WriteR3", iInstance);
8033 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatEEPROMRead, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling EEPROM reads", "/Devices/E1k%d/EEPROM/Read", iInstance);
8034 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatEEPROMWrite, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling EEPROM writes", "/Devices/E1k%d/EEPROM/Write", iInstance);
8035 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIOReadRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in RZ", "/Devices/E1k%d/IO/ReadRZ", iInstance);
8036 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIOReadR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in R3", "/Devices/E1k%d/IO/ReadR3", iInstance);
8037 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIOWriteRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in RZ", "/Devices/E1k%d/IO/WriteRZ", iInstance);
8038 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIOWriteR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in R3", "/Devices/E1k%d/IO/WriteR3", iInstance);
8039 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatLateIntTimer, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling late int timer", "/Devices/E1k%d/LateInt/Timer", iInstance);
8040 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatLateInts, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of late interrupts", "/Devices/E1k%d/LateInt/Occured", iInstance);
8041 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIntsRaised, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of raised interrupts", "/Devices/E1k%d/Interrupts/Raised", iInstance);
8042 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIntsPrevented, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of prevented interrupts", "/Devices/E1k%d/Interrupts/Prevented", iInstance);
8043 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceive, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive", "/Devices/E1k%d/Receive/Total", iInstance);
8044 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveCRC, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive checksumming", "/Devices/E1k%d/Receive/CRC", iInstance);
8045 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveFilter, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive filtering", "/Devices/E1k%d/Receive/Filter", iInstance);
8046 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveStore, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive storing", "/Devices/E1k%d/Receive/Store", iInstance);
8047 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatRxOverflow, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_OCCURENCE, "Profiling RX overflows", "/Devices/E1k%d/RxOverflow", iInstance);
8048 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatRxOverflowWakeup, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Nr of RX overflow wakeups", "/Devices/E1k%d/RxOverflowWakeup", iInstance);
8049 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling transmits in RZ", "/Devices/E1k%d/Transmit/TotalRZ", iInstance);
8050 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling transmits in R3", "/Devices/E1k%d/Transmit/TotalR3", iInstance);
8051 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitSendRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling send transmit in RZ", "/Devices/E1k%d/Transmit/SendRZ", iInstance);
8052 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitSendR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling send transmit in R3", "/Devices/E1k%d/Transmit/SendR3", iInstance);
8053
8054 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescCtxNormal, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of normal context descriptors","/Devices/E1k%d/TxDesc/ContexNormal", iInstance);
8055 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescCtxTSE, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TSE context descriptors", "/Devices/E1k%d/TxDesc/ContextTSE", iInstance);
8056 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescData, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TX data descriptors", "/Devices/E1k%d/TxDesc/Data", iInstance);
8057 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescLegacy, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TX legacy descriptors", "/Devices/E1k%d/TxDesc/Legacy", iInstance);
8058 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescTSEData, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TX TSE data descriptors", "/Devices/E1k%d/TxDesc/TSEData", iInstance);
8059 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxPathFallback, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Fallback TSE descriptor path", "/Devices/E1k%d/TxPath/Fallback", iInstance);
8060 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxPathGSO, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "GSO TSE descriptor path", "/Devices/E1k%d/TxPath/GSO", iInstance);
8061 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxPathRegular, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Regular descriptor path", "/Devices/E1k%d/TxPath/Normal", iInstance);
8062 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatPHYAccesses, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of PHY accesses", "/Devices/E1k%d/PHYAccesses", iInstance);
8063 for (unsigned iReg = 0; iReg < E1K_NUM_OF_REGS; iReg++)
8064 {
8065 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->aStatRegReads[iReg], STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
8066 g_aE1kRegMap[iReg].name, "/Devices/E1k%d/Regs/%s-Reads", iInstance, g_aE1kRegMap[iReg].abbrev);
8067 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->aStatRegWrites[iReg], STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
8068 g_aE1kRegMap[iReg].name, "/Devices/E1k%d/Regs/%s-Writes", iInstance, g_aE1kRegMap[iReg].abbrev);
8069 }
8070#endif /* VBOX_WITH_STATISTICS */
8071
8072#ifdef E1K_INT_STATS
8073 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->u64ArmedAt, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "u64ArmedAt", "/Devices/E1k%d/u64ArmedAt", iInstance);
8074 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatMaxTxDelay, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatMaxTxDelay", "/Devices/E1k%d/uStatMaxTxDelay", iInstance);
8075 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatInt, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatInt", "/Devices/E1k%d/uStatInt", iInstance);
8076 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntTry, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntTry", "/Devices/E1k%d/uStatIntTry", iInstance);
8077 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntLower, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntLower", "/Devices/E1k%d/uStatIntLower", iInstance);
8078 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatNoIntICR, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatNoIntICR", "/Devices/E1k%d/uStatNoIntICR", iInstance);
8079 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->iStatIntLost, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "iStatIntLost", "/Devices/E1k%d/iStatIntLost", iInstance);
8080 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->iStatIntLostOne, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "iStatIntLostOne", "/Devices/E1k%d/iStatIntLostOne", iInstance);
8081 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntIMS, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntIMS", "/Devices/E1k%d/uStatIntIMS", iInstance);
8082 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntSkip, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntSkip", "/Devices/E1k%d/uStatIntSkip", iInstance);
8083 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntLate, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntLate", "/Devices/E1k%d/uStatIntLate", iInstance);
8084 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntMasked, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntMasked", "/Devices/E1k%d/uStatIntMasked", iInstance);
8085 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntEarly, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntEarly", "/Devices/E1k%d/uStatIntEarly", iInstance);
8086 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntRx, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntRx", "/Devices/E1k%d/uStatIntRx", iInstance);
8087 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntTx, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntTx", "/Devices/E1k%d/uStatIntTx", iInstance);
8088 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntICS, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntICS", "/Devices/E1k%d/uStatIntICS", iInstance);
8089 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntRDTR, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntRDTR", "/Devices/E1k%d/uStatIntRDTR", iInstance);
8090 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntRXDMT0, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntRXDMT0", "/Devices/E1k%d/uStatIntRXDMT0", iInstance);
8091 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntTXQE, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntTXQE", "/Devices/E1k%d/uStatIntTXQE", iInstance);
8092 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxNoRS, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxNoRS", "/Devices/E1k%d/uStatTxNoRS", iInstance);
8093 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxIDE, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxIDE", "/Devices/E1k%d/uStatTxIDE", iInstance);
8094 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxDelayed, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxDelayed", "/Devices/E1k%d/uStatTxDelayed", iInstance);
8095 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxDelayExp, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxDelayExp", "/Devices/E1k%d/uStatTxDelayExp", iInstance);
8096 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTAD, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTAD", "/Devices/E1k%d/uStatTAD", iInstance);
8097 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTID, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTID", "/Devices/E1k%d/uStatTID", iInstance);
8098 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatRAD, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatRAD", "/Devices/E1k%d/uStatRAD", iInstance);
8099 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatRID, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatRID", "/Devices/E1k%d/uStatRID", iInstance);
8100 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatRxFrm, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatRxFrm", "/Devices/E1k%d/uStatRxFrm", iInstance);
8101 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxFrm, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxFrm", "/Devices/E1k%d/uStatTxFrm", iInstance);
8102 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatDescCtx, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatDescCtx", "/Devices/E1k%d/uStatDescCtx", iInstance);
8103 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatDescDat, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatDescDat", "/Devices/E1k%d/uStatDescDat", iInstance);
8104 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatDescLeg, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatDescLeg", "/Devices/E1k%d/uStatDescLeg", iInstance);
8105 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx1514, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx1514", "/Devices/E1k%d/uStatTx1514", iInstance);
8106 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx2962, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx2962", "/Devices/E1k%d/uStatTx2962", iInstance);
8107 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx4410, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx4410", "/Devices/E1k%d/uStatTx4410", iInstance);
8108 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx5858, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx5858", "/Devices/E1k%d/uStatTx5858", iInstance);
8109 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx7306, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx7306", "/Devices/E1k%d/uStatTx7306", iInstance);
8110 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx8754, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx8754", "/Devices/E1k%d/uStatTx8754", iInstance);
8111 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx16384, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx16384", "/Devices/E1k%d/uStatTx16384", iInstance);
8112 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx32768, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx32768", "/Devices/E1k%d/uStatTx32768", iInstance);
8113 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxLarge, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxLarge", "/Devices/E1k%d/uStatTxLarge", iInstance);
8114#endif /* E1K_INT_STATS */
8115
8116 return VINF_SUCCESS;
8117}
8118
8119#else /* !IN_RING3 */
8120
8121/**
8122 * @callback_method_impl{PDMDEVREGR0,pfnConstruct}
8123 */
8124static DECLCALLBACK(int) e1kRZConstruct(PPDMDEVINS pDevIns)
8125{
8126 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
8127 PE1KSTATECC pThisCC = PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC);
8128
8129 pThisCC->CTX_SUFF(pDevIns) = pDevIns;
8130 /** @todo @bugref{9218} ring-0 driver stuff */
8131 pThisCC->CTX_SUFF(pDrv) = NULL;
8132 pThisCC->CTX_SUFF(pTxSg) = NULL;
8133
8134 int rc = PDMDevHlpMmioSetUpContext(pDevIns, pThis->hMmioRegion, e1kMMIOWrite, e1kMMIORead, NULL /*pvUser*/);
8135 AssertRCReturn(rc, rc);
8136
8137 rc = PDMDevHlpIoPortSetUpContext(pDevIns, pThis->hIoPorts, e1kIOPortOut, e1kIOPortIn, NULL /*pvUser*/);
8138 AssertRCReturn(rc, rc);
8139
8140 return VINF_SUCCESS;
8141}
8142
8143#endif /* !IN_RING3 */
8144
8145/**
8146 * The device registration structure.
8147 */
8148const PDMDEVREG g_DeviceE1000 =
8149{
8150 /* .u32version = */ PDM_DEVREG_VERSION,
8151 /* .uReserved0 = */ 0,
8152 /* .szName = */ "e1000",
8153 /* .fFlags = */ PDM_DEVREG_FLAGS_DEFAULT_BITS | PDM_DEVREG_FLAGS_RC | PDM_DEVREG_FLAGS_R0,
8154 /* .fClass = */ PDM_DEVREG_CLASS_NETWORK,
8155 /* .cMaxInstances = */ ~0U,
8156 /* .uSharedVersion = */ 42,
8157 /* .cbInstanceShared = */ sizeof(E1KSTATE),
8158 /* .cbInstanceCC = */ sizeof(E1KSTATECC),
8159 /* .cbInstanceRC = */ sizeof(E1KSTATERC),
8160 /* .cMaxPciDevices = */ 1,
8161 /* .cMaxMsixVectors = */ 0,
8162 /* .pszDescription = */ "Intel PRO/1000 MT Desktop Ethernet.",
8163#if defined(IN_RING3)
8164 /* .pszRCMod = */ "VBoxDDRC.rc",
8165 /* .pszR0Mod = */ "VBoxDDR0.r0",
8166 /* .pfnConstruct = */ e1kR3Construct,
8167 /* .pfnDestruct = */ e1kR3Destruct,
8168 /* .pfnRelocate = */ e1kR3Relocate,
8169 /* .pfnMemSetup = */ NULL,
8170 /* .pfnPowerOn = */ NULL,
8171 /* .pfnReset = */ e1kR3Reset,
8172 /* .pfnSuspend = */ e1kR3Suspend,
8173 /* .pfnResume = */ NULL,
8174 /* .pfnAttach = */ e1kR3Attach,
8175 /* .pfnDeatch = */ e1kR3Detach,
8176 /* .pfnQueryInterface = */ NULL,
8177 /* .pfnInitComplete = */ NULL,
8178 /* .pfnPowerOff = */ e1kR3PowerOff,
8179 /* .pfnSoftReset = */ NULL,
8180 /* .pfnReserved0 = */ NULL,
8181 /* .pfnReserved1 = */ NULL,
8182 /* .pfnReserved2 = */ NULL,
8183 /* .pfnReserved3 = */ NULL,
8184 /* .pfnReserved4 = */ NULL,
8185 /* .pfnReserved5 = */ NULL,
8186 /* .pfnReserved6 = */ NULL,
8187 /* .pfnReserved7 = */ NULL,
8188#elif defined(IN_RING0)
8189 /* .pfnEarlyConstruct = */ NULL,
8190 /* .pfnConstruct = */ e1kRZConstruct,
8191 /* .pfnDestruct = */ NULL,
8192 /* .pfnFinalDestruct = */ NULL,
8193 /* .pfnRequest = */ NULL,
8194 /* .pfnReserved0 = */ NULL,
8195 /* .pfnReserved1 = */ NULL,
8196 /* .pfnReserved2 = */ NULL,
8197 /* .pfnReserved3 = */ NULL,
8198 /* .pfnReserved4 = */ NULL,
8199 /* .pfnReserved5 = */ NULL,
8200 /* .pfnReserved6 = */ NULL,
8201 /* .pfnReserved7 = */ NULL,
8202#elif defined(IN_RC)
8203 /* .pfnConstruct = */ e1kRZConstruct,
8204 /* .pfnReserved0 = */ NULL,
8205 /* .pfnReserved1 = */ NULL,
8206 /* .pfnReserved2 = */ NULL,
8207 /* .pfnReserved3 = */ NULL,
8208 /* .pfnReserved4 = */ NULL,
8209 /* .pfnReserved5 = */ NULL,
8210 /* .pfnReserved6 = */ NULL,
8211 /* .pfnReserved7 = */ NULL,
8212#else
8213# error "Not in IN_RING3, IN_RING0 or IN_RC!"
8214#endif
8215 /* .u32VersionEnd = */ PDM_DEVREG_VERSION
8216};
8217
8218#endif /* !VBOX_DEVICE_STRUCT_TESTCASE */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette