VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/DevE1000.cpp@ 106061

Last change on this file since 106061 was 106061, checked in by vboxsync, 3 months ago

Copyright year updates by scm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 339.5 KB
Line 
1/* $Id: DevE1000.cpp 106061 2024-09-16 14:03:52Z vboxsync $ */
2/** @file
3 * DevE1000 - Intel 82540EM Ethernet Controller Emulation.
4 *
5 * Implemented in accordance with the specification:
6 *
7 * PCI/PCI-X Family of Gigabit Ethernet Controllers Software Developer's Manual
8 * 82540EP/EM, 82541xx, 82544GC/EI, 82545GM/EM, 82546GB/EB, and 82547xx
9 *
10 * 317453-002 Revision 3.5
11 *
12 * @todo IPv6 checksum offloading support
13 * @todo Flexible Filter / Wakeup (optional?)
14 */
15
16/*
17 * Copyright (C) 2007-2024 Oracle and/or its affiliates.
18 *
19 * This file is part of VirtualBox base platform packages, as
20 * available from https://www.virtualbox.org.
21 *
22 * This program is free software; you can redistribute it and/or
23 * modify it under the terms of the GNU General Public License
24 * as published by the Free Software Foundation, in version 3 of the
25 * License.
26 *
27 * This program is distributed in the hope that it will be useful, but
28 * WITHOUT ANY WARRANTY; without even the implied warranty of
29 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
30 * General Public License for more details.
31 *
32 * You should have received a copy of the GNU General Public License
33 * along with this program; if not, see <https://www.gnu.org/licenses>.
34 *
35 * SPDX-License-Identifier: GPL-3.0-only
36 */
37
38
39/*********************************************************************************************************************************
40* Header Files *
41*********************************************************************************************************************************/
42#define LOG_GROUP LOG_GROUP_DEV_E1000
43#include <iprt/crc.h>
44#include <iprt/ctype.h>
45#include <iprt/net.h>
46#include <iprt/semaphore.h>
47#include <iprt/string.h>
48#include <iprt/time.h>
49#include <iprt/uuid.h>
50#include <VBox/vmm/pdmdev.h>
51#include <VBox/vmm/pdmnetifs.h>
52#include <VBox/vmm/pdmnetinline.h>
53#include <VBox/param.h>
54#include "VBoxDD.h"
55
56#include "DevEEPROM.h"
57#include "DevE1000Phy.h"
58
59
60/*********************************************************************************************************************************
61* Defined Constants And Macros *
62*********************************************************************************************************************************/
63/** @name E1000 Build Options
64 * @{ */
65/** @def E1K_INIT_RA0
66 * E1K_INIT_RA0 forces E1000 to set the first entry in Receive Address filter
67 * table to MAC address obtained from CFGM. Most guests read MAC address from
68 * EEPROM and write it to RA[0] explicitly, but Mac OS X seems to depend on it
69 * being already set (see @bugref{4657}).
70 */
71#define E1K_INIT_RA0
72/** @def E1K_LSC_ON_RESET
73 * E1K_LSC_ON_RESET causes e1000 to generate Link Status Change
74 * interrupt after hard reset. This makes the E1K_LSC_ON_SLU option unnecessary.
75 * With unplugged cable, LSC is triggerred for 82543GC only.
76 */
77#define E1K_LSC_ON_RESET
78/** @def E1K_LSC_ON_SLU
79 * E1K_LSC_ON_SLU causes E1000 to generate Link Status Change interrupt when
80 * the guest driver brings up the link via STATUS.LU bit. Again the only guest
81 * that requires it is Mac OS X (see @bugref{4657}).
82 */
83//#define E1K_LSC_ON_SLU
84/** @def E1K_INIT_LINKUP_DELAY
85 * E1K_INIT_LINKUP_DELAY prevents the link going up while the driver is still
86 * in init (see @bugref{8624}).
87 */
88#define E1K_INIT_LINKUP_DELAY_US (2000 * 1000)
89/** @def E1K_IMS_INT_DELAY_NS
90 * E1K_IMS_INT_DELAY_NS prevents interrupt storms in Windows guests on enabling
91 * interrupts (see @bugref{8624}).
92 */
93#define E1K_IMS_INT_DELAY_NS 100
94/** @def E1K_TX_DELAY
95 * E1K_TX_DELAY aims to improve guest-host transfer rate for TCP streams by
96 * preventing packets to be sent immediately. It allows to send several
97 * packets in a batch reducing the number of acknowledgments. Note that it
98 * effectively disables R0 TX path, forcing sending in R3.
99 */
100//#define E1K_TX_DELAY 150
101/** @def E1K_USE_TX_TIMERS
102 * E1K_USE_TX_TIMERS aims to reduce the number of generated TX interrupts if a
103 * guest driver set the delays via the Transmit Interrupt Delay Value (TIDV)
104 * register. Enabling it showed no positive effects on existing guests so it
105 * stays disabled. See sections 3.2.7.1 and 3.4.3.1 in "8254x Family of Gigabit
106 * Ethernet Controllers Software Developer’s Manual" for more detailed
107 * explanation.
108 */
109//#define E1K_USE_TX_TIMERS
110/** @def E1K_NO_TAD
111 * E1K_NO_TAD disables one of two timers enabled by E1K_USE_TX_TIMERS, the
112 * Transmit Absolute Delay time. This timer sets the maximum time interval
113 * during which TX interrupts can be postponed (delayed). It has no effect
114 * if E1K_USE_TX_TIMERS is not defined.
115 */
116//#define E1K_NO_TAD
117/** @def E1K_REL_DEBUG
118 * E1K_REL_DEBUG enables debug logging of l1, l2, l3 in release build.
119 */
120//#define E1K_REL_DEBUG
121/** @def E1K_INT_STATS
122 * E1K_INT_STATS enables collection of internal statistics used for
123 * debugging of delayed interrupts, etc.
124 */
125#define E1K_INT_STATS
126/** @def E1K_WITH_MSI
127 * E1K_WITH_MSI enables rudimentary MSI support. Not implemented.
128 */
129//#define E1K_WITH_MSI
130/** @def E1K_WITH_TX_CS
131 * E1K_WITH_TX_CS protects e1kXmitPending with a critical section.
132 */
133#define E1K_WITH_TX_CS
134/** @def E1K_WITH_TXD_CACHE
135 * E1K_WITH_TXD_CACHE causes E1000 to fetch multiple TX descriptors in a
136 * single physical memory read (or two if it wraps around the end of TX
137 * descriptor ring). It is required for proper functioning of bandwidth
138 * resource control as it allows to compute exact sizes of packets prior
139 * to allocating their buffers (see @bugref{5582}).
140 */
141#define E1K_WITH_TXD_CACHE
142/** @def E1K_WITH_RXD_CACHE
143 * E1K_WITH_RXD_CACHE causes E1000 to fetch multiple RX descriptors in a
144 * single physical memory read (or two if it wraps around the end of RX
145 * descriptor ring). Intel's packet driver for DOS needs this option in
146 * order to work properly (see @bugref{6217}).
147 */
148#define E1K_WITH_RXD_CACHE
149/** @def E1K_WITH_PREREG_MMIO
150 * E1K_WITH_PREREG_MMIO enables a new style MMIO registration and is
151 * currently only done for testing the relateted PDM, IOM and PGM code. */
152//#define E1K_WITH_PREREG_MMIO
153/* @} */
154/* End of Options ************************************************************/
155
156#ifdef E1K_WITH_TXD_CACHE
157/**
158 * E1K_TXD_CACHE_SIZE specifies the maximum number of TX descriptors stored
159 * in the state structure. It limits the amount of descriptors loaded in one
160 * batch read. For example, Linux guest may use up to 20 descriptors per
161 * TSE packet. The largest TSE packet seen (Windows guest) was 45 descriptors.
162 */
163# define E1K_TXD_CACHE_SIZE 64u
164#endif /* E1K_WITH_TXD_CACHE */
165
166#ifdef E1K_WITH_RXD_CACHE
167/**
168 * E1K_RXD_CACHE_SIZE specifies the maximum number of RX descriptors stored
169 * in the state structure. It limits the amount of descriptors loaded in one
170 * batch read. For example, XP guest adds 15 RX descriptors at a time.
171 */
172# define E1K_RXD_CACHE_SIZE 16u
173#endif /* E1K_WITH_RXD_CACHE */
174
175
176/* Little helpers ************************************************************/
177#undef htons
178#undef ntohs
179#undef htonl
180#undef ntohl
181#define htons(x) ((((x) & 0xff00) >> 8) | (((x) & 0x00ff) << 8))
182#define ntohs(x) htons(x)
183#define htonl(x) ASMByteSwapU32(x)
184#define ntohl(x) htonl(x)
185
186#ifndef DEBUG
187# ifdef E1K_REL_DEBUG
188# define DEBUG
189# define E1kLog(a) LogRel(a)
190# define E1kLog2(a) LogRel(a)
191# define E1kLog3(a) LogRel(a)
192# define E1kLogX(x, a) LogRel(a)
193//# define E1kLog3(a) do {} while (0)
194# else
195# define E1kLog(a) do {} while (0)
196# define E1kLog2(a) do {} while (0)
197# define E1kLog3(a) do {} while (0)
198# define E1kLogX(x, a) do {} while (0)
199# endif
200#else
201# define E1kLog(a) Log(a)
202# define E1kLog2(a) Log2(a)
203# define E1kLog3(a) Log3(a)
204# define E1kLogX(x, a) LogIt(x, LOG_GROUP, a)
205//# define E1kLog(a) do {} while (0)
206//# define E1kLog2(a) do {} while (0)
207//# define E1kLog3(a) do {} while (0)
208#endif
209
210#if 0
211# define LOG_ENABLED
212# define E1kLogRel(a) LogRel(a)
213# undef Log6
214# define Log6(a) LogRel(a)
215#else
216# define E1kLogRel(a) do { } while (0)
217#endif
218
219//#undef DEBUG
220
221#define E1K_RELOCATE(p, o) *(RTHCUINTPTR *)&p += o
222
223#define E1K_INC_CNT32(cnt) \
224do { \
225 if (cnt < UINT32_MAX) \
226 cnt++; \
227} while (0)
228
229#define E1K_ADD_CNT64(cntLo, cntHi, val) \
230do { \
231 uint64_t u64Cnt = RT_MAKE_U64(cntLo, cntHi); \
232 uint64_t tmp = u64Cnt; \
233 u64Cnt += val; \
234 if (tmp > u64Cnt ) \
235 u64Cnt = UINT64_MAX; \
236 cntLo = (uint32_t)u64Cnt; \
237 cntHi = (uint32_t)(u64Cnt >> 32); \
238} while (0)
239
240#ifdef E1K_INT_STATS
241# define E1K_INC_ISTAT_CNT(cnt) do { ++cnt; } while (0)
242#else /* E1K_INT_STATS */
243# define E1K_INC_ISTAT_CNT(cnt) do { } while (0)
244#endif /* E1K_INT_STATS */
245
246
247/*****************************************************************************/
248
249typedef uint32_t E1KCHIP;
250#define E1K_CHIP_82540EM 0
251#define E1K_CHIP_82543GC 1
252#define E1K_CHIP_82545EM 2
253
254#ifdef IN_RING3
255/** Different E1000 chips. */
256static const struct E1kChips
257{
258 uint16_t uPCIVendorId;
259 uint16_t uPCIDeviceId;
260 uint16_t uPCISubsystemVendorId;
261 uint16_t uPCISubsystemId;
262 const char *pcszName;
263} g_aChips[] =
264{
265 /* Vendor Device SSVendor SubSys Name */
266 { 0x8086,
267 /* Temporary code, as MSI-aware driver dislike 0x100E. How to do that right? */
268# ifdef E1K_WITH_MSI
269 0x105E,
270# else
271 0x100E,
272# endif
273 0x8086, 0x001E, "82540EM" }, /* Intel 82540EM-A in Intel PRO/1000 MT Desktop */
274 { 0x8086, 0x1004, 0x8086, 0x1004, "82543GC" }, /* Intel 82543GC in Intel PRO/1000 T Server */
275 { 0x8086, 0x100F, 0x15AD, 0x0750, "82545EM" } /* Intel 82545EM-A in VMWare Network Adapter */
276};
277#endif /* IN_RING3 */
278
279
280/* The size of register area mapped to I/O space */
281#define E1K_IOPORT_SIZE 0x8
282/* The size of memory-mapped register area */
283#define E1K_MM_SIZE 0x20000
284
285#define E1K_MAX_TX_PKT_SIZE 16288
286#define E1K_MAX_RX_PKT_SIZE 16384
287
288/*****************************************************************************/
289
290#ifndef VBOX_DEVICE_STRUCT_TESTCASE
291/** Gets the specfieid bits from the register. */
292#define GET_BITS(reg, bits) ((reg & reg##_##bits##_MASK) >> reg##_##bits##_SHIFT)
293#define GET_BITS_V(val, reg, bits) ((val & reg##_##bits##_MASK) >> reg##_##bits##_SHIFT)
294#define BITS(reg, bits, bitval) (bitval << reg##_##bits##_SHIFT)
295#define SET_BITS(reg, bits, bitval) do { reg = (reg & ~reg##_##bits##_MASK) | (bitval << reg##_##bits##_SHIFT); } while (0)
296#define SET_BITS_V(val, reg, bits, bitval) do { val = (val & ~reg##_##bits##_MASK) | (bitval << reg##_##bits##_SHIFT); } while (0)
297
298#define CTRL_SLU UINT32_C(0x00000040)
299#define CTRL_MDIO UINT32_C(0x00100000)
300#define CTRL_MDC UINT32_C(0x00200000)
301#define CTRL_MDIO_DIR UINT32_C(0x01000000)
302#define CTRL_MDC_DIR UINT32_C(0x02000000)
303#define CTRL_RESET UINT32_C(0x04000000)
304#define CTRL_VME UINT32_C(0x40000000)
305
306#define STATUS_LU UINT32_C(0x00000002)
307#define STATUS_TXOFF UINT32_C(0x00000010)
308
309#define EECD_EE_WIRES UINT32_C(0x0F)
310#define EECD_EE_REQ UINT32_C(0x40)
311#define EECD_EE_GNT UINT32_C(0x80)
312
313#define EERD_START UINT32_C(0x00000001)
314#define EERD_DONE UINT32_C(0x00000010)
315#define EERD_DATA_MASK UINT32_C(0xFFFF0000)
316#define EERD_DATA_SHIFT 16
317#define EERD_ADDR_MASK UINT32_C(0x0000FF00)
318#define EERD_ADDR_SHIFT 8
319
320#define MDIC_DATA_MASK UINT32_C(0x0000FFFF)
321#define MDIC_DATA_SHIFT 0
322#define MDIC_REG_MASK UINT32_C(0x001F0000)
323#define MDIC_REG_SHIFT 16
324#define MDIC_PHY_MASK UINT32_C(0x03E00000)
325#define MDIC_PHY_SHIFT 21
326#define MDIC_OP_WRITE UINT32_C(0x04000000)
327#define MDIC_OP_READ UINT32_C(0x08000000)
328#define MDIC_READY UINT32_C(0x10000000)
329#define MDIC_INT_EN UINT32_C(0x20000000)
330#define MDIC_ERROR UINT32_C(0x40000000)
331
332#define TCTL_EN UINT32_C(0x00000002)
333#define TCTL_PSP UINT32_C(0x00000008)
334
335#define RCTL_EN UINT32_C(0x00000002)
336#define RCTL_UPE UINT32_C(0x00000008)
337#define RCTL_MPE UINT32_C(0x00000010)
338#define RCTL_LPE UINT32_C(0x00000020)
339#define RCTL_LBM_MASK UINT32_C(0x000000C0)
340#define RCTL_LBM_SHIFT 6
341#define RCTL_RDMTS_MASK UINT32_C(0x00000300)
342#define RCTL_RDMTS_SHIFT 8
343#define RCTL_LBM_TCVR UINT32_C(3) /**< PHY or external SerDes loopback. */
344#define RCTL_MO_MASK UINT32_C(0x00003000)
345#define RCTL_MO_SHIFT 12
346#define RCTL_BAM UINT32_C(0x00008000)
347#define RCTL_BSIZE_MASK UINT32_C(0x00030000)
348#define RCTL_BSIZE_SHIFT 16
349#define RCTL_VFE UINT32_C(0x00040000)
350#define RCTL_CFIEN UINT32_C(0x00080000)
351#define RCTL_CFI UINT32_C(0x00100000)
352#define RCTL_BSEX UINT32_C(0x02000000)
353#define RCTL_SECRC UINT32_C(0x04000000)
354
355#define ICR_TXDW UINT32_C(0x00000001)
356#define ICR_TXQE UINT32_C(0x00000002)
357#define ICR_LSC UINT32_C(0x00000004)
358#define ICR_RXDMT0 UINT32_C(0x00000010)
359#define ICR_RXT0 UINT32_C(0x00000080)
360#define ICR_TXD_LOW UINT32_C(0x00008000)
361#define RDTR_FPD UINT32_C(0x80000000)
362
363#define PBA_st ((PBAST*)(pThis->auRegs + PBA_IDX))
364typedef struct
365{
366 unsigned rxa : 7;
367 unsigned rxa_r : 9;
368 unsigned txa : 16;
369} PBAST;
370AssertCompileSize(PBAST, 4);
371
372#define TXDCTL_WTHRESH_MASK 0x003F0000
373#define TXDCTL_WTHRESH_SHIFT 16
374#define TXDCTL_LWTHRESH_MASK 0xFE000000
375#define TXDCTL_LWTHRESH_SHIFT 25
376
377#define RXCSUM_PCSS_MASK UINT32_C(0x000000FF)
378#define RXCSUM_PCSS_SHIFT 0
379
380/** @name Register access macros
381 * @remarks These ASSUME alocal variable @a pThis of type PE1KSTATE.
382 * @{ */
383#define CTRL pThis->auRegs[CTRL_IDX]
384#define STATUS pThis->auRegs[STATUS_IDX]
385#define EECD pThis->auRegs[EECD_IDX]
386#define EERD pThis->auRegs[EERD_IDX]
387#define CTRL_EXT pThis->auRegs[CTRL_EXT_IDX]
388#define FLA pThis->auRegs[FLA_IDX]
389#define MDIC pThis->auRegs[MDIC_IDX]
390#define FCAL pThis->auRegs[FCAL_IDX]
391#define FCAH pThis->auRegs[FCAH_IDX]
392#define FCT pThis->auRegs[FCT_IDX]
393#define VET pThis->auRegs[VET_IDX]
394#define ICR pThis->auRegs[ICR_IDX]
395#define ITR pThis->auRegs[ITR_IDX]
396#define ICS pThis->auRegs[ICS_IDX]
397#define IMS pThis->auRegs[IMS_IDX]
398#define IMC pThis->auRegs[IMC_IDX]
399#define RCTL pThis->auRegs[RCTL_IDX]
400#define FCTTV pThis->auRegs[FCTTV_IDX]
401#define TXCW pThis->auRegs[TXCW_IDX]
402#define RXCW pThis->auRegs[RXCW_IDX]
403#define TCTL pThis->auRegs[TCTL_IDX]
404#define TIPG pThis->auRegs[TIPG_IDX]
405#define AIFS pThis->auRegs[AIFS_IDX]
406#define LEDCTL pThis->auRegs[LEDCTL_IDX]
407#define PBA pThis->auRegs[PBA_IDX]
408#define FCRTL pThis->auRegs[FCRTL_IDX]
409#define FCRTH pThis->auRegs[FCRTH_IDX]
410#define RDFH pThis->auRegs[RDFH_IDX]
411#define RDFT pThis->auRegs[RDFT_IDX]
412#define RDFHS pThis->auRegs[RDFHS_IDX]
413#define RDFTS pThis->auRegs[RDFTS_IDX]
414#define RDFPC pThis->auRegs[RDFPC_IDX]
415#define RDBAL pThis->auRegs[RDBAL_IDX]
416#define RDBAH pThis->auRegs[RDBAH_IDX]
417#define RDLEN pThis->auRegs[RDLEN_IDX]
418#define RDH pThis->auRegs[RDH_IDX]
419#define RDT pThis->auRegs[RDT_IDX]
420#define RDTR pThis->auRegs[RDTR_IDX]
421#define RXDCTL pThis->auRegs[RXDCTL_IDX]
422#define RADV pThis->auRegs[RADV_IDX]
423#define RSRPD pThis->auRegs[RSRPD_IDX]
424#define TXDMAC pThis->auRegs[TXDMAC_IDX]
425#define TDFH pThis->auRegs[TDFH_IDX]
426#define TDFT pThis->auRegs[TDFT_IDX]
427#define TDFHS pThis->auRegs[TDFHS_IDX]
428#define TDFTS pThis->auRegs[TDFTS_IDX]
429#define TDFPC pThis->auRegs[TDFPC_IDX]
430#define TDBAL pThis->auRegs[TDBAL_IDX]
431#define TDBAH pThis->auRegs[TDBAH_IDX]
432#define TDLEN pThis->auRegs[TDLEN_IDX]
433#define TDH pThis->auRegs[TDH_IDX]
434#define TDT pThis->auRegs[TDT_IDX]
435#define TIDV pThis->auRegs[TIDV_IDX]
436#define TXDCTL pThis->auRegs[TXDCTL_IDX]
437#define TADV pThis->auRegs[TADV_IDX]
438#define TSPMT pThis->auRegs[TSPMT_IDX]
439#define CRCERRS pThis->auRegs[CRCERRS_IDX]
440#define ALGNERRC pThis->auRegs[ALGNERRC_IDX]
441#define SYMERRS pThis->auRegs[SYMERRS_IDX]
442#define RXERRC pThis->auRegs[RXERRC_IDX]
443#define MPC pThis->auRegs[MPC_IDX]
444#define SCC pThis->auRegs[SCC_IDX]
445#define ECOL pThis->auRegs[ECOL_IDX]
446#define MCC pThis->auRegs[MCC_IDX]
447#define LATECOL pThis->auRegs[LATECOL_IDX]
448#define COLC pThis->auRegs[COLC_IDX]
449#define DC pThis->auRegs[DC_IDX]
450#define TNCRS pThis->auRegs[TNCRS_IDX]
451/* #define SEC pThis->auRegs[SEC_IDX] Conflict with sys/time.h */
452#define CEXTERR pThis->auRegs[CEXTERR_IDX]
453#define RLEC pThis->auRegs[RLEC_IDX]
454#define XONRXC pThis->auRegs[XONRXC_IDX]
455#define XONTXC pThis->auRegs[XONTXC_IDX]
456#define XOFFRXC pThis->auRegs[XOFFRXC_IDX]
457#define XOFFTXC pThis->auRegs[XOFFTXC_IDX]
458#define FCRUC pThis->auRegs[FCRUC_IDX]
459#define PRC64 pThis->auRegs[PRC64_IDX]
460#define PRC127 pThis->auRegs[PRC127_IDX]
461#define PRC255 pThis->auRegs[PRC255_IDX]
462#define PRC511 pThis->auRegs[PRC511_IDX]
463#define PRC1023 pThis->auRegs[PRC1023_IDX]
464#define PRC1522 pThis->auRegs[PRC1522_IDX]
465#define GPRC pThis->auRegs[GPRC_IDX]
466#define BPRC pThis->auRegs[BPRC_IDX]
467#define MPRC pThis->auRegs[MPRC_IDX]
468#define GPTC pThis->auRegs[GPTC_IDX]
469#define GORCL pThis->auRegs[GORCL_IDX]
470#define GORCH pThis->auRegs[GORCH_IDX]
471#define GOTCL pThis->auRegs[GOTCL_IDX]
472#define GOTCH pThis->auRegs[GOTCH_IDX]
473#define RNBC pThis->auRegs[RNBC_IDX]
474#define RUC pThis->auRegs[RUC_IDX]
475#define RFC pThis->auRegs[RFC_IDX]
476#define ROC pThis->auRegs[ROC_IDX]
477#define RJC pThis->auRegs[RJC_IDX]
478#define MGTPRC pThis->auRegs[MGTPRC_IDX]
479#define MGTPDC pThis->auRegs[MGTPDC_IDX]
480#define MGTPTC pThis->auRegs[MGTPTC_IDX]
481#define TORL pThis->auRegs[TORL_IDX]
482#define TORH pThis->auRegs[TORH_IDX]
483#define TOTL pThis->auRegs[TOTL_IDX]
484#define TOTH pThis->auRegs[TOTH_IDX]
485#define TPR pThis->auRegs[TPR_IDX]
486#define TPT pThis->auRegs[TPT_IDX]
487#define PTC64 pThis->auRegs[PTC64_IDX]
488#define PTC127 pThis->auRegs[PTC127_IDX]
489#define PTC255 pThis->auRegs[PTC255_IDX]
490#define PTC511 pThis->auRegs[PTC511_IDX]
491#define PTC1023 pThis->auRegs[PTC1023_IDX]
492#define PTC1522 pThis->auRegs[PTC1522_IDX]
493#define MPTC pThis->auRegs[MPTC_IDX]
494#define BPTC pThis->auRegs[BPTC_IDX]
495#define TSCTC pThis->auRegs[TSCTC_IDX]
496#define TSCTFC pThis->auRegs[TSCTFC_IDX]
497#define RXCSUM pThis->auRegs[RXCSUM_IDX]
498#define WUC pThis->auRegs[WUC_IDX]
499#define WUFC pThis->auRegs[WUFC_IDX]
500#define WUS pThis->auRegs[WUS_IDX]
501#define MANC pThis->auRegs[MANC_IDX]
502#define IPAV pThis->auRegs[IPAV_IDX]
503#define WUPL pThis->auRegs[WUPL_IDX]
504/** @} */
505#endif /* VBOX_DEVICE_STRUCT_TESTCASE */
506
507/**
508 * Indices of memory-mapped registers in register table.
509 */
510typedef enum
511{
512 CTRL_IDX,
513 STATUS_IDX,
514 EECD_IDX,
515 EERD_IDX,
516 CTRL_EXT_IDX,
517 FLA_IDX,
518 MDIC_IDX,
519 FCAL_IDX,
520 FCAH_IDX,
521 FCT_IDX,
522 VET_IDX,
523 ICR_IDX,
524 ITR_IDX,
525 ICS_IDX,
526 IMS_IDX,
527 IMC_IDX,
528 RCTL_IDX,
529 FCTTV_IDX,
530 TXCW_IDX,
531 RXCW_IDX,
532 TCTL_IDX,
533 TIPG_IDX,
534 AIFS_IDX,
535 LEDCTL_IDX,
536 PBA_IDX,
537 FCRTL_IDX,
538 FCRTH_IDX,
539 RDFH_IDX,
540 RDFT_IDX,
541 RDFHS_IDX,
542 RDFTS_IDX,
543 RDFPC_IDX,
544 RDBAL_IDX,
545 RDBAH_IDX,
546 RDLEN_IDX,
547 RDH_IDX,
548 RDT_IDX,
549 RDTR_IDX,
550 RXDCTL_IDX,
551 RADV_IDX,
552 RSRPD_IDX,
553 TXDMAC_IDX,
554 TDFH_IDX,
555 TDFT_IDX,
556 TDFHS_IDX,
557 TDFTS_IDX,
558 TDFPC_IDX,
559 TDBAL_IDX,
560 TDBAH_IDX,
561 TDLEN_IDX,
562 TDH_IDX,
563 TDT_IDX,
564 TIDV_IDX,
565 TXDCTL_IDX,
566 TADV_IDX,
567 TSPMT_IDX,
568 CRCERRS_IDX,
569 ALGNERRC_IDX,
570 SYMERRS_IDX,
571 RXERRC_IDX,
572 MPC_IDX,
573 SCC_IDX,
574 ECOL_IDX,
575 MCC_IDX,
576 LATECOL_IDX,
577 COLC_IDX,
578 DC_IDX,
579 TNCRS_IDX,
580 SEC_IDX,
581 CEXTERR_IDX,
582 RLEC_IDX,
583 XONRXC_IDX,
584 XONTXC_IDX,
585 XOFFRXC_IDX,
586 XOFFTXC_IDX,
587 FCRUC_IDX,
588 PRC64_IDX,
589 PRC127_IDX,
590 PRC255_IDX,
591 PRC511_IDX,
592 PRC1023_IDX,
593 PRC1522_IDX,
594 GPRC_IDX,
595 BPRC_IDX,
596 MPRC_IDX,
597 GPTC_IDX,
598 GORCL_IDX,
599 GORCH_IDX,
600 GOTCL_IDX,
601 GOTCH_IDX,
602 RNBC_IDX,
603 RUC_IDX,
604 RFC_IDX,
605 ROC_IDX,
606 RJC_IDX,
607 MGTPRC_IDX,
608 MGTPDC_IDX,
609 MGTPTC_IDX,
610 TORL_IDX,
611 TORH_IDX,
612 TOTL_IDX,
613 TOTH_IDX,
614 TPR_IDX,
615 TPT_IDX,
616 PTC64_IDX,
617 PTC127_IDX,
618 PTC255_IDX,
619 PTC511_IDX,
620 PTC1023_IDX,
621 PTC1522_IDX,
622 MPTC_IDX,
623 BPTC_IDX,
624 TSCTC_IDX,
625 TSCTFC_IDX,
626 RXCSUM_IDX,
627 WUC_IDX,
628 WUFC_IDX,
629 WUS_IDX,
630 MANC_IDX,
631 IPAV_IDX,
632 WUPL_IDX,
633 MTA_IDX,
634 RA_IDX,
635 VFTA_IDX,
636 IP4AT_IDX,
637 IP6AT_IDX,
638 WUPM_IDX,
639 FFLT_IDX,
640 FFMT_IDX,
641 FFVT_IDX,
642 PBM_IDX,
643 RA_82542_IDX,
644 MTA_82542_IDX,
645 VFTA_82542_IDX,
646 E1K_NUM_OF_REGS
647} E1kRegIndex;
648
649#define E1K_NUM_OF_32BIT_REGS MTA_IDX
650/** The number of registers with strictly increasing offset. */
651#define E1K_NUM_OF_BINARY_SEARCHABLE (WUPL_IDX + 1)
652
653
654/**
655 * Define E1000-specific EEPROM layout.
656 */
657struct E1kEEPROM
658{
659 public:
660 EEPROM93C46 eeprom;
661
662#ifdef IN_RING3
663 /**
664 * Initialize EEPROM content.
665 *
666 * @param macAddr MAC address of E1000.
667 */
668 void init(RTMAC &macAddr)
669 {
670 eeprom.init();
671 memcpy(eeprom.m_au16Data, macAddr.au16, sizeof(macAddr.au16));
672 eeprom.m_au16Data[0x04] = 0xFFFF;
673 /*
674 * bit 3 - full support for power management
675 * bit 10 - full duplex
676 */
677 eeprom.m_au16Data[0x0A] = 0x4408;
678 eeprom.m_au16Data[0x0B] = 0x001E;
679 eeprom.m_au16Data[0x0C] = 0x8086;
680 eeprom.m_au16Data[0x0D] = 0x100E;
681 eeprom.m_au16Data[0x0E] = 0x8086;
682 eeprom.m_au16Data[0x0F] = 0x3040;
683 eeprom.m_au16Data[0x21] = 0x7061;
684 eeprom.m_au16Data[0x22] = 0x280C;
685 eeprom.m_au16Data[0x23] = 0x00C8;
686 eeprom.m_au16Data[0x24] = 0x00C8;
687 eeprom.m_au16Data[0x2F] = 0x0602;
688 updateChecksum();
689 };
690
691 /**
692 * Compute the checksum as required by E1000 and store it
693 * in the last word.
694 */
695 void updateChecksum()
696 {
697 uint16_t u16Checksum = 0;
698
699 for (int i = 0; i < eeprom.SIZE-1; i++)
700 u16Checksum += eeprom.m_au16Data[i];
701 eeprom.m_au16Data[eeprom.SIZE-1] = 0xBABA - u16Checksum;
702 };
703
704 /**
705 * First 6 bytes of EEPROM contain MAC address.
706 *
707 * @returns MAC address of E1000.
708 */
709 void getMac(PRTMAC pMac)
710 {
711 memcpy(pMac->au16, eeprom.m_au16Data, sizeof(pMac->au16));
712 };
713
714 uint32_t read()
715 {
716 return eeprom.read();
717 }
718
719 void write(uint32_t u32Wires)
720 {
721 eeprom.write(u32Wires);
722 }
723
724 bool readWord(uint32_t u32Addr, uint16_t *pu16Value)
725 {
726 return eeprom.readWord(u32Addr, pu16Value);
727 }
728
729 int load(PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM)
730 {
731 return eeprom.load(pHlp, pSSM);
732 }
733
734 void save(PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM)
735 {
736 eeprom.save(pHlp, pSSM);
737 }
738#endif /* IN_RING3 */
739};
740
741
742#define E1K_SPEC_VLAN(s) (s & 0xFFF)
743#define E1K_SPEC_CFI(s) (!!((s>>12) & 0x1))
744#define E1K_SPEC_PRI(s) ((s>>13) & 0x7)
745
746struct E1kRxDStatus
747{
748 /** @name Descriptor Status field (3.2.3.1)
749 * @{ */
750 unsigned fDD : 1; /**< Descriptor Done. */
751 unsigned fEOP : 1; /**< End of packet. */
752 unsigned fIXSM : 1; /**< Ignore checksum indication. */
753 unsigned fVP : 1; /**< VLAN, matches VET. */
754 unsigned : 1;
755 unsigned fTCPCS : 1; /**< RCP Checksum calculated on the packet. */
756 unsigned fIPCS : 1; /**< IP Checksum calculated on the packet. */
757 unsigned fPIF : 1; /**< Passed in-exact filter */
758 /** @} */
759 /** @name Descriptor Errors field (3.2.3.2)
760 * (Only valid when fEOP and fDD are set.)
761 * @{ */
762 unsigned fCE : 1; /**< CRC or alignment error. */
763 unsigned : 4; /**< Reserved, varies with different models... */
764 unsigned fTCPE : 1; /**< TCP/UDP checksum error. */
765 unsigned fIPE : 1; /**< IP Checksum error. */
766 unsigned fRXE : 1; /**< RX Data error. */
767 /** @} */
768 /** @name Descriptor Special field (3.2.3.3)
769 * @{ */
770 unsigned u16Special : 16; /**< VLAN: Id, Canonical form, Priority. */
771 /** @} */
772};
773typedef struct E1kRxDStatus E1KRXDST;
774
775struct E1kRxDesc_st
776{
777 uint64_t u64BufAddr; /**< Address of data buffer */
778 uint16_t u16Length; /**< Length of data in buffer */
779 uint16_t u16Checksum; /**< Packet checksum */
780 E1KRXDST status;
781};
782typedef struct E1kRxDesc_st E1KRXDESC;
783AssertCompileSize(E1KRXDESC, 16);
784
785#define E1K_DTYP_LEGACY -1
786#define E1K_DTYP_CONTEXT 0
787#define E1K_DTYP_DATA 1
788#define E1K_DTYP_INVALID 2
789
790struct E1kTDLegacy
791{
792 uint64_t u64BufAddr; /**< Address of data buffer */
793 struct TDLCmd_st
794 {
795 unsigned u16Length : 16;
796 unsigned u8CSO : 8;
797 /* CMD field : 8 */
798 unsigned fEOP : 1;
799 unsigned fIFCS : 1;
800 unsigned fIC : 1;
801 unsigned fRS : 1;
802 unsigned fRPS : 1;
803 unsigned fDEXT : 1;
804 unsigned fVLE : 1;
805 unsigned fIDE : 1;
806 } cmd;
807 struct TDLDw3_st
808 {
809 /* STA field */
810 unsigned fDD : 1;
811 unsigned fEC : 1;
812 unsigned fLC : 1;
813 unsigned fTURSV : 1;
814 /* RSV field */
815 unsigned u4RSV : 4;
816 /* CSS field */
817 unsigned u8CSS : 8;
818 /* Special field*/
819 unsigned u16Special: 16;
820 } dw3;
821};
822
823/**
824 * TCP/IP Context Transmit Descriptor, section 3.3.6.
825 */
826struct E1kTDContext
827{
828 struct CheckSum_st
829 {
830 /** TSE: Header start. !TSE: Checksum start. */
831 unsigned u8CSS : 8;
832 /** Checksum offset - where to store it. */
833 unsigned u8CSO : 8;
834 /** Checksum ending (inclusive) offset, 0 = end of packet. */
835 unsigned u16CSE : 16;
836 } ip;
837 struct CheckSum_st tu;
838 struct TDCDw2_st
839 {
840 /** TSE: The total number of payload bytes for this context. Sans header. */
841 unsigned u20PAYLEN : 20;
842 /** The descriptor type - E1K_DTYP_CONTEXT (0). */
843 unsigned u4DTYP : 4;
844 /** TUCMD field, 8 bits
845 * @{ */
846 /** TSE: TCP (set) or UDP (clear). */
847 unsigned fTCP : 1;
848 /** TSE: IPv4 (set) or IPv6 (clear) - for finding the payload length field in
849 * the IP header. Does not affect the checksumming.
850 * @remarks 82544GC/EI interprets a cleared field differently. */
851 unsigned fIP : 1;
852 /** TSE: TCP segmentation enable. When clear the context describes */
853 unsigned fTSE : 1;
854 /** Report status (only applies to dw3.fDD for here). */
855 unsigned fRS : 1;
856 /** Reserved, MBZ. */
857 unsigned fRSV1 : 1;
858 /** Descriptor extension, must be set for this descriptor type. */
859 unsigned fDEXT : 1;
860 /** Reserved, MBZ. */
861 unsigned fRSV2 : 1;
862 /** Interrupt delay enable. */
863 unsigned fIDE : 1;
864 /** @} */
865 } dw2;
866 struct TDCDw3_st
867 {
868 /** Descriptor Done. */
869 unsigned fDD : 1;
870 /** Reserved, MBZ. */
871 unsigned u7RSV : 7;
872 /** TSO: The header (prototype) length (Ethernet[, VLAN tag], IP, TCP/UDP. */
873 unsigned u8HDRLEN : 8;
874 /** TSO: Maximum segment size. */
875 unsigned u16MSS : 16;
876 } dw3;
877};
878typedef struct E1kTDContext E1KTXCTX;
879
880/**
881 * TCP/IP Data Transmit Descriptor, section 3.3.7.
882 */
883struct E1kTDData
884{
885 uint64_t u64BufAddr; /**< Address of data buffer */
886 struct TDDCmd_st
887 {
888 /** The total length of data pointed to by this descriptor. */
889 unsigned u20DTALEN : 20;
890 /** The descriptor type - E1K_DTYP_DATA (1). */
891 unsigned u4DTYP : 4;
892 /** @name DCMD field, 8 bits (3.3.7.1).
893 * @{ */
894 /** End of packet. Note TSCTFC update. */
895 unsigned fEOP : 1;
896 /** Insert Ethernet FCS/CRC (requires fEOP to be set). */
897 unsigned fIFCS : 1;
898 /** Use the TSE context when set and the normal when clear. */
899 unsigned fTSE : 1;
900 /** Report status (dw3.STA). */
901 unsigned fRS : 1;
902 /** Reserved. 82544GC/EI defines this report packet set (RPS). */
903 unsigned fRPS : 1;
904 /** Descriptor extension, must be set for this descriptor type. */
905 unsigned fDEXT : 1;
906 /** VLAN enable, requires CTRL.VME, auto enables FCS/CRC.
907 * Insert dw3.SPECIAL after ethernet header. */
908 unsigned fVLE : 1;
909 /** Interrupt delay enable. */
910 unsigned fIDE : 1;
911 /** @} */
912 } cmd;
913 struct TDDDw3_st
914 {
915 /** @name STA field (3.3.7.2)
916 * @{ */
917 unsigned fDD : 1; /**< Descriptor done. */
918 unsigned fEC : 1; /**< Excess collision. */
919 unsigned fLC : 1; /**< Late collision. */
920 /** Reserved, except for the usual oddball (82544GC/EI) where it's called TU. */
921 unsigned fTURSV : 1;
922 /** @} */
923 unsigned u4RSV : 4; /**< Reserved field, MBZ. */
924 /** @name POPTS (Packet Option) field (3.3.7.3)
925 * @{ */
926 unsigned fIXSM : 1; /**< Insert IP checksum. */
927 unsigned fTXSM : 1; /**< Insert TCP/UDP checksum. */
928 unsigned u6RSV : 6; /**< Reserved, MBZ. */
929 /** @} */
930 /** @name SPECIAL field - VLAN tag to be inserted after ethernet header.
931 * Requires fEOP, fVLE and CTRL.VME to be set.
932 * @{ */
933 unsigned u16Special: 16; /**< VLAN: Id, Canonical form, Priority. */
934 /** @} */
935 } dw3;
936};
937typedef struct E1kTDData E1KTXDAT;
938
939union E1kTxDesc
940{
941 struct E1kTDLegacy legacy;
942 struct E1kTDContext context;
943 struct E1kTDData data;
944};
945typedef union E1kTxDesc E1KTXDESC;
946AssertCompileSize(E1KTXDESC, 16);
947
948#define RA_CTL_AS 0x0003
949#define RA_CTL_AV 0x8000
950
951union E1kRecAddr
952{
953 uint32_t au32[32];
954 struct RAArray
955 {
956 uint8_t addr[6];
957 uint16_t ctl;
958 } array[16];
959};
960typedef struct E1kRecAddr::RAArray E1KRAELEM;
961typedef union E1kRecAddr E1KRA;
962AssertCompileSize(E1KRA, 8*16);
963
964#define E1K_IP_RF UINT16_C(0x8000) /**< reserved fragment flag */
965#define E1K_IP_DF UINT16_C(0x4000) /**< dont fragment flag */
966#define E1K_IP_MF UINT16_C(0x2000) /**< more fragments flag */
967#define E1K_IP_OFFMASK UINT16_C(0x1fff) /**< mask for fragmenting bits */
968
969/** @todo use+extend RTNETIPV4 */
970struct E1kIpHeader
971{
972 /* type of service / version / header length */
973 uint16_t tos_ver_hl;
974 /* total length */
975 uint16_t total_len;
976 /* identification */
977 uint16_t ident;
978 /* fragment offset field */
979 uint16_t offset;
980 /* time to live / protocol*/
981 uint16_t ttl_proto;
982 /* checksum */
983 uint16_t chksum;
984 /* source IP address */
985 uint32_t src;
986 /* destination IP address */
987 uint32_t dest;
988};
989AssertCompileSize(struct E1kIpHeader, 20);
990
991#define E1K_TCP_FIN UINT16_C(0x01)
992#define E1K_TCP_SYN UINT16_C(0x02)
993#define E1K_TCP_RST UINT16_C(0x04)
994#define E1K_TCP_PSH UINT16_C(0x08)
995#define E1K_TCP_ACK UINT16_C(0x10)
996#define E1K_TCP_URG UINT16_C(0x20)
997#define E1K_TCP_ECE UINT16_C(0x40)
998#define E1K_TCP_CWR UINT16_C(0x80)
999#define E1K_TCP_FLAGS UINT16_C(0x3f)
1000
1001/** @todo use+extend RTNETTCP */
1002struct E1kTcpHeader
1003{
1004 uint16_t src;
1005 uint16_t dest;
1006 uint32_t seqno;
1007 uint32_t ackno;
1008 uint16_t hdrlen_flags;
1009 uint16_t wnd;
1010 uint16_t chksum;
1011 uint16_t urgp;
1012};
1013AssertCompileSize(struct E1kTcpHeader, 20);
1014
1015
1016#ifdef E1K_WITH_TXD_CACHE
1017/** The current Saved state version. */
1018# define E1K_SAVEDSTATE_VERSION 4
1019/** Saved state version for VirtualBox 4.2 with VLAN tag fields. */
1020# define E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG 3
1021#else /* !E1K_WITH_TXD_CACHE */
1022/** The current Saved state version. */
1023# define E1K_SAVEDSTATE_VERSION 3
1024#endif /* !E1K_WITH_TXD_CACHE */
1025/** Saved state version for VirtualBox 4.1 and earlier.
1026 * These did not include VLAN tag fields. */
1027#define E1K_SAVEDSTATE_VERSION_VBOX_41 2
1028/** Saved state version for VirtualBox 3.0 and earlier.
1029 * This did not include the configuration part nor the E1kEEPROM. */
1030#define E1K_SAVEDSTATE_VERSION_VBOX_30 1
1031
1032/**
1033 * E1000 shared device state.
1034 *
1035 * This is shared between ring-0 and ring-3.
1036 */
1037typedef struct E1KSTATE
1038{
1039 char szPrf[8]; /**< Log prefix, e.g. E1000#1. */
1040
1041 /** Handle to PCI region \#0, the MMIO region. */
1042 IOMIOPORTHANDLE hMmioRegion;
1043 /** Handle to PCI region \#2, the I/O ports. */
1044 IOMIOPORTHANDLE hIoPorts;
1045
1046 /** Receive Interrupt Delay Timer. */
1047 TMTIMERHANDLE hRIDTimer;
1048 /** Receive Absolute Delay Timer. */
1049 TMTIMERHANDLE hRADTimer;
1050 /** Transmit Interrupt Delay Timer. */
1051 TMTIMERHANDLE hTIDTimer;
1052 /** Transmit Absolute Delay Timer. */
1053 TMTIMERHANDLE hTADTimer;
1054 /** Transmit Delay Timer. */
1055 TMTIMERHANDLE hTXDTimer;
1056 /** Late Interrupt Timer. */
1057 TMTIMERHANDLE hIntTimer;
1058 /** Link Up(/Restore) Timer. */
1059 TMTIMERHANDLE hLUTimer;
1060
1061 /** Transmit task. */
1062 PDMTASKHANDLE hTxTask;
1063
1064 /** Critical section - what is it protecting? */
1065 PDMCRITSECT cs;
1066 /** RX Critical section. */
1067 PDMCRITSECT csRx;
1068#ifdef E1K_WITH_TX_CS
1069 /** TX Critical section. */
1070 PDMCRITSECT csTx;
1071#endif /* E1K_WITH_TX_CS */
1072 /** MAC address obtained from the configuration. */
1073 RTMAC macConfigured;
1074 uint16_t u16Padding0;
1075 /** EMT: Last time the interrupt was acknowledged. */
1076 uint64_t u64AckedAt;
1077 /** All: Used for eliminating spurious interrupts. */
1078 bool fIntRaised;
1079 /** EMT: false if the cable is disconnected by the GUI. */
1080 bool fCableConnected;
1081 /** true if the device is attached to a driver. */
1082 bool fIsAttached;
1083 /** EMT: Compute Ethernet CRC for RX packets. */
1084 bool fEthernetCRC;
1085 /** All: throttle interrupts. */
1086 bool fItrEnabled;
1087 /** All: throttle RX interrupts. */
1088 bool fItrRxEnabled;
1089 /** All: Delay TX interrupts using TIDV/TADV. */
1090 bool fTidEnabled;
1091 bool afPadding[2];
1092 /** Link up delay (in milliseconds). */
1093 uint32_t cMsLinkUpDelay;
1094
1095 /** All: Device register storage. */
1096 uint32_t auRegs[E1K_NUM_OF_32BIT_REGS];
1097 /** TX/RX: Status LED. */
1098 PDMLED led;
1099 /** TX/RX: Number of packet being sent/received to show in debug log. */
1100 uint32_t u32PktNo;
1101
1102 /** EMT: Offset of the register to be read via IO. */
1103 uint32_t uSelectedReg;
1104 /** EMT: Multicast Table Array. */
1105 uint32_t auMTA[128];
1106 /** EMT: Receive Address registers. */
1107 E1KRA aRecAddr;
1108 /** EMT: VLAN filter table array. */
1109 uint32_t auVFTA[128];
1110 /** EMT: Receive buffer size. */
1111 uint16_t u16RxBSize;
1112 /** EMT: Locked state -- no state alteration possible. */
1113 bool fLocked;
1114 /** EMT: */
1115 bool fDelayInts;
1116 /** All: */
1117 bool fIntMaskUsed;
1118
1119 /** N/A: */
1120 bool volatile fMaybeOutOfSpace;
1121 /** EMT: Gets signalled when more RX descriptors become available. */
1122 SUPSEMEVENT hEventMoreRxDescAvail;
1123#ifdef E1K_WITH_RXD_CACHE
1124 /** RX: Fetched RX descriptors. */
1125 E1KRXDESC aRxDescriptors[E1K_RXD_CACHE_SIZE];
1126 //uint64_t aRxDescAddr[E1K_RXD_CACHE_SIZE];
1127 /** RX: Actual number of fetched RX descriptors. */
1128 uint32_t nRxDFetched;
1129 /** RX: Index in cache of RX descriptor being processed. */
1130 uint32_t iRxDCurrent;
1131#endif /* E1K_WITH_RXD_CACHE */
1132
1133 /** TX: Context used for TCP segmentation packets. */
1134 E1KTXCTX contextTSE;
1135 /** TX: Context used for ordinary packets. */
1136 E1KTXCTX contextNormal;
1137#ifdef E1K_WITH_TXD_CACHE
1138 /** TX: Fetched TX descriptors. */
1139 E1KTXDESC aTxDescriptors[E1K_TXD_CACHE_SIZE];
1140 /** TX: Validity of TX descriptors. Set by e1kLocateTxPacket, used by e1kXmitPacket. */
1141 bool afTxDValid[E1K_TXD_CACHE_SIZE];
1142 /** TX: Actual number of fetched TX descriptors. */
1143 uint8_t nTxDFetched;
1144 /** TX: Index in cache of TX descriptor being processed. */
1145 uint8_t iTxDCurrent;
1146 /** TX: Will this frame be sent as GSO. */
1147 bool fGSO;
1148 /** Alignment padding. */
1149 bool fReserved;
1150 /** TX: Number of bytes in next packet. */
1151 uint32_t cbTxAlloc;
1152
1153#endif /* E1K_WITH_TXD_CACHE */
1154 /** GSO context. u8Type is set to PDMNETWORKGSOTYPE_INVALID when not
1155 * applicable to the current TSE mode. */
1156 PDMNETWORKGSO GsoCtx;
1157 /** Scratch space for holding the loopback / fallback scatter / gather
1158 * descriptor. */
1159 union
1160 {
1161 PDMSCATTERGATHER Sg;
1162 uint8_t padding[8 * sizeof(RTUINTPTR)];
1163 } uTxFallback;
1164 /** TX: Transmit packet buffer use for TSE fallback and loopback. */
1165 uint8_t aTxPacketFallback[E1K_MAX_TX_PKT_SIZE];
1166 /** TX: Number of bytes assembled in TX packet buffer. */
1167 uint16_t u16TxPktLen;
1168 /** TX: False will force segmentation in e1000 instead of sending frames as GSO. */
1169 bool fGSOEnabled;
1170 /** TX: IP checksum has to be inserted if true. */
1171 bool fIPcsum;
1172 /** TX: TCP/UDP checksum has to be inserted if true. */
1173 bool fTCPcsum;
1174 /** TX: VLAN tag has to be inserted if true. */
1175 bool fVTag;
1176 /** TX: TCI part of VLAN tag to be inserted. */
1177 uint16_t u16VTagTCI;
1178 /** TX TSE fallback: Number of payload bytes remaining in TSE context. */
1179 uint32_t u32PayRemain;
1180 /** TX TSE fallback: Number of header bytes remaining in TSE context. */
1181 uint16_t u16HdrRemain;
1182 /** TX TSE fallback: Flags from template header. */
1183 uint16_t u16SavedFlags;
1184 /** TX TSE fallback: Partial checksum from template header. */
1185 uint32_t u32SavedCsum;
1186 /** ?: Emulated controller type. */
1187 E1KCHIP eChip;
1188
1189 /** EMT: Physical interface emulation. */
1190 PHY phy;
1191
1192#if 0
1193 /** Alignment padding. */
1194 uint8_t Alignment[HC_ARCH_BITS == 64 ? 8 : 4];
1195#endif
1196
1197 STAMCOUNTER StatReceiveBytes;
1198 STAMCOUNTER StatTransmitBytes;
1199#if defined(VBOX_WITH_STATISTICS)
1200 STAMPROFILEADV StatMMIOReadRZ;
1201 STAMPROFILEADV StatMMIOReadR3;
1202 STAMPROFILEADV StatMMIOWriteRZ;
1203 STAMPROFILEADV StatMMIOWriteR3;
1204 STAMPROFILEADV StatEEPROMRead;
1205 STAMPROFILEADV StatEEPROMWrite;
1206 STAMPROFILEADV StatIOReadRZ;
1207 STAMPROFILEADV StatIOReadR3;
1208 STAMPROFILEADV StatIOWriteRZ;
1209 STAMPROFILEADV StatIOWriteR3;
1210 STAMPROFILEADV StatLateIntTimer;
1211 STAMCOUNTER StatLateInts;
1212 STAMCOUNTER StatIntsRaised;
1213 STAMCOUNTER StatIntsPrevented;
1214 STAMPROFILEADV StatReceive;
1215 STAMPROFILEADV StatReceiveCRC;
1216 STAMPROFILEADV StatReceiveFilter;
1217 STAMPROFILEADV StatReceiveStore;
1218 STAMPROFILEADV StatTransmitRZ;
1219 STAMPROFILEADV StatTransmitR3;
1220 STAMPROFILE StatTransmitSendRZ;
1221 STAMPROFILE StatTransmitSendR3;
1222 STAMPROFILE StatRxOverflow;
1223 STAMCOUNTER StatRxOverflowWakeupRZ;
1224 STAMCOUNTER StatRxOverflowWakeupR3;
1225 STAMCOUNTER StatTxDescCtxNormal;
1226 STAMCOUNTER StatTxDescCtxTSE;
1227 STAMCOUNTER StatTxDescLegacy;
1228 STAMCOUNTER StatTxDescData;
1229 STAMCOUNTER StatTxDescTSEData;
1230 STAMCOUNTER StatTxPathFallback;
1231 STAMCOUNTER StatTxPathGSO;
1232 STAMCOUNTER StatTxPathRegular;
1233 STAMCOUNTER StatPHYAccesses;
1234 STAMCOUNTER aStatRegWrites[E1K_NUM_OF_REGS];
1235 STAMCOUNTER aStatRegReads[E1K_NUM_OF_REGS];
1236#endif /* VBOX_WITH_STATISTICS */
1237
1238#ifdef E1K_INT_STATS
1239 /* Internal stats */
1240 uint64_t u64ArmedAt;
1241 uint64_t uStatMaxTxDelay;
1242 uint32_t uStatInt;
1243 uint32_t uStatIntTry;
1244 uint32_t uStatIntLower;
1245 uint32_t uStatNoIntICR;
1246 int32_t iStatIntLost;
1247 int32_t iStatIntLostOne;
1248 uint32_t uStatIntIMS;
1249 uint32_t uStatIntSkip;
1250 uint32_t uStatIntLate;
1251 uint32_t uStatIntMasked;
1252 uint32_t uStatIntEarly;
1253 uint32_t uStatIntRx;
1254 uint32_t uStatIntTx;
1255 uint32_t uStatIntICS;
1256 uint32_t uStatIntRDTR;
1257 uint32_t uStatIntRXDMT0;
1258 uint32_t uStatIntTXQE;
1259 uint32_t uStatTxNoRS;
1260 uint32_t uStatTxIDE;
1261 uint32_t uStatTxDelayed;
1262 uint32_t uStatTxDelayExp;
1263 uint32_t uStatTAD;
1264 uint32_t uStatTID;
1265 uint32_t uStatRAD;
1266 uint32_t uStatRID;
1267 uint32_t uStatRxFrm;
1268 uint32_t uStatTxFrm;
1269 uint32_t uStatDescCtx;
1270 uint32_t uStatDescDat;
1271 uint32_t uStatDescLeg;
1272 uint32_t uStatTx1514;
1273 uint32_t uStatTx2962;
1274 uint32_t uStatTx4410;
1275 uint32_t uStatTx5858;
1276 uint32_t uStatTx7306;
1277 uint32_t uStatTx8754;
1278 uint32_t uStatTx16384;
1279 uint32_t uStatTx32768;
1280 uint32_t uStatTxLarge;
1281 uint32_t uStatAlign;
1282#endif /* E1K_INT_STATS */
1283} E1KSTATE;
1284/** Pointer to the E1000 device state. */
1285typedef E1KSTATE *PE1KSTATE;
1286
1287/**
1288 * E1000 ring-3 device state
1289 *
1290 * @implements PDMINETWORKDOWN
1291 * @implements PDMINETWORKCONFIG
1292 * @implements PDMILEDPORTS
1293 */
1294typedef struct E1KSTATER3
1295{
1296 PDMIBASE IBase;
1297 PDMINETWORKDOWN INetworkDown;
1298 PDMINETWORKCONFIG INetworkConfig;
1299 /** LED interface */
1300 PDMILEDPORTS ILeds;
1301 /** Attached network driver. */
1302 R3PTRTYPE(PPDMIBASE) pDrvBase;
1303 R3PTRTYPE(PPDMILEDCONNECTORS) pLedsConnector;
1304
1305 /** Pointer to the shared state. */
1306 R3PTRTYPE(PE1KSTATE) pShared;
1307
1308 /** Device instance. */
1309 PPDMDEVINSR3 pDevInsR3;
1310 /** Attached network driver. */
1311 PPDMINETWORKUPR3 pDrvR3;
1312 /** The scatter / gather buffer used for the current outgoing packet. */
1313 R3PTRTYPE(PPDMSCATTERGATHER) pTxSgR3;
1314
1315 /** EMT: EEPROM emulation */
1316 E1kEEPROM eeprom;
1317} E1KSTATER3;
1318/** Pointer to the E1000 ring-3 device state. */
1319typedef E1KSTATER3 *PE1KSTATER3;
1320
1321
1322/**
1323 * E1000 ring-0 device state
1324 */
1325typedef struct E1KSTATER0
1326{
1327 /** Device instance. */
1328 PPDMDEVINSR0 pDevInsR0;
1329 /** Attached network driver. */
1330 PPDMINETWORKUPR0 pDrvR0;
1331 /** The scatter / gather buffer used for the current outgoing packet - R0. */
1332 R0PTRTYPE(PPDMSCATTERGATHER) pTxSgR0;
1333} E1KSTATER0;
1334/** Pointer to the E1000 ring-0 device state. */
1335typedef E1KSTATER0 *PE1KSTATER0;
1336
1337
1338/**
1339 * E1000 raw-mode device state
1340 */
1341typedef struct E1KSTATERC
1342{
1343 /** Device instance. */
1344 PPDMDEVINSRC pDevInsRC;
1345 /** Attached network driver. */
1346 PPDMINETWORKUPRC pDrvRC;
1347 /** The scatter / gather buffer used for the current outgoing packet. */
1348 RCPTRTYPE(PPDMSCATTERGATHER) pTxSgRC;
1349} E1KSTATERC;
1350/** Pointer to the E1000 raw-mode device state. */
1351typedef E1KSTATERC *PE1KSTATERC;
1352
1353
1354/** @def PE1KSTATECC
1355 * Pointer to the instance data for the current context. */
1356#ifdef IN_RING3
1357typedef E1KSTATER3 E1KSTATECC;
1358typedef PE1KSTATER3 PE1KSTATECC;
1359#elif defined(IN_RING0)
1360typedef E1KSTATER0 E1KSTATECC;
1361typedef PE1KSTATER0 PE1KSTATECC;
1362#elif defined(IN_RC)
1363typedef E1KSTATERC E1KSTATECC;
1364typedef PE1KSTATERC PE1KSTATECC;
1365#else
1366# error "Not IN_RING3, IN_RING0 or IN_RC"
1367#endif
1368
1369
1370#ifndef VBOX_DEVICE_STRUCT_TESTCASE
1371
1372/* Forward declarations ******************************************************/
1373static int e1kXmitPending(PPDMDEVINS pDevIns, PE1KSTATE pThis, bool fOnWorkerThread);
1374
1375/**
1376 * E1000 register read handler.
1377 */
1378typedef int (FNE1KREGREAD)(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1379/**
1380 * E1000 register write handler.
1381 */
1382typedef int (FNE1KREGWRITE)(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1383
1384static FNE1KREGREAD e1kRegReadUnimplemented;
1385static FNE1KREGWRITE e1kRegWriteUnimplemented;
1386static FNE1KREGREAD e1kRegReadAutoClear;
1387static FNE1KREGREAD e1kRegReadDefault;
1388static FNE1KREGWRITE e1kRegWriteDefault;
1389#if 0 /* unused */
1390static FNE1KREGREAD e1kRegReadCTRL;
1391#endif
1392static FNE1KREGWRITE e1kRegWriteCTRL;
1393static FNE1KREGREAD e1kRegReadEECD;
1394static FNE1KREGWRITE e1kRegWriteEECD;
1395static FNE1KREGWRITE e1kRegWriteEERD;
1396static FNE1KREGWRITE e1kRegWriteMDIC;
1397static FNE1KREGREAD e1kRegReadICR;
1398static FNE1KREGWRITE e1kRegWriteICR;
1399static FNE1KREGREAD e1kRegReadICS;
1400static FNE1KREGWRITE e1kRegWriteICS;
1401static FNE1KREGWRITE e1kRegWriteIMS;
1402static FNE1KREGWRITE e1kRegWriteIMC;
1403static FNE1KREGWRITE e1kRegWriteRCTL;
1404static FNE1KREGWRITE e1kRegWritePBA;
1405static FNE1KREGWRITE e1kRegWriteRDT;
1406static FNE1KREGWRITE e1kRegWriteRDTR;
1407static FNE1KREGWRITE e1kRegWriteTDT;
1408static FNE1KREGREAD e1kRegReadMTA;
1409static FNE1KREGWRITE e1kRegWriteMTA;
1410static FNE1KREGREAD e1kRegReadRA;
1411static FNE1KREGWRITE e1kRegWriteRA;
1412static FNE1KREGREAD e1kRegReadVFTA;
1413static FNE1KREGWRITE e1kRegWriteVFTA;
1414
1415/**
1416 * Register map table.
1417 *
1418 * Override pfnRead and pfnWrite to get register-specific behavior.
1419 */
1420static const struct E1kRegMap_st
1421{
1422 /** Register offset in the register space. */
1423 uint32_t offset;
1424 /** Size in bytes. Registers of size > 4 are in fact tables. */
1425 uint32_t size;
1426 /** Readable bits. */
1427 uint32_t readable;
1428 /** Writable bits. */
1429 uint32_t writable;
1430 /** Read callback. */
1431 FNE1KREGREAD *pfnRead;
1432 /** Write callback. */
1433 FNE1KREGWRITE *pfnWrite;
1434 /** Abbreviated name. */
1435 const char *abbrev;
1436 /** Full name. */
1437 const char *name;
1438} g_aE1kRegMap[E1K_NUM_OF_REGS] =
1439{
1440 /* offset size read mask write mask read callback write callback abbrev full name */
1441 /*------- ------- ---------- ---------- ----------------------- ------------------------ ---------- ------------------------------*/
1442 { 0x00000, 0x00004, 0xDBF31BE9, 0xDBF31BE9, e1kRegReadDefault , e1kRegWriteCTRL , "CTRL" , "Device Control" },
1443 { 0x00008, 0x00004, 0x0000FDFF, 0x00000000, e1kRegReadDefault , e1kRegWriteUnimplemented, "STATUS" , "Device Status" },
1444 { 0x00010, 0x00004, 0x000027F0, 0x00000070, e1kRegReadEECD , e1kRegWriteEECD , "EECD" , "EEPROM/Flash Control/Data" },
1445 { 0x00014, 0x00004, 0xFFFFFF10, 0xFFFFFF00, e1kRegReadDefault , e1kRegWriteEERD , "EERD" , "EEPROM Read" },
1446 { 0x00018, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CTRL_EXT", "Extended Device Control" },
1447 { 0x0001c, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FLA" , "Flash Access (N/A)" },
1448 { 0x00020, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteMDIC , "MDIC" , "MDI Control" },
1449 { 0x00028, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCAL" , "Flow Control Address Low" },
1450 { 0x0002c, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCAH" , "Flow Control Address High" },
1451 { 0x00030, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCT" , "Flow Control Type" },
1452 { 0x00038, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "VET" , "VLAN EtherType" },
1453 { 0x000c0, 0x00004, 0x0001F6DF, 0x0001F6DF, e1kRegReadICR , e1kRegWriteICR , "ICR" , "Interrupt Cause Read" },
1454 { 0x000c4, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "ITR" , "Interrupt Throttling" },
1455 { 0x000c8, 0x00004, 0x0001F6DF, 0xFFFFFFFF, e1kRegReadICS , e1kRegWriteICS , "ICS" , "Interrupt Cause Set" },
1456 { 0x000d0, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteIMS , "IMS" , "Interrupt Mask Set/Read" },
1457 { 0x000d8, 0x00004, 0x00000000, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteIMC , "IMC" , "Interrupt Mask Clear" },
1458 { 0x00100, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteRCTL , "RCTL" , "Receive Control" },
1459 { 0x00170, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCTTV" , "Flow Control Transmit Timer Value" },
1460 { 0x00178, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TXCW" , "Transmit Configuration Word (N/A)" },
1461 { 0x00180, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXCW" , "Receive Configuration Word (N/A)" },
1462 { 0x00400, 0x00004, 0x017FFFFA, 0x017FFFFA, e1kRegReadDefault , e1kRegWriteDefault , "TCTL" , "Transmit Control" },
1463 { 0x00410, 0x00004, 0x3FFFFFFF, 0x3FFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TIPG" , "Transmit IPG" },
1464 { 0x00458, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "AIFS" , "Adaptive IFS Throttle - AIT" },
1465 { 0x00e00, 0x00004, 0xCFCFCFCF, 0xCFCFCFCF, e1kRegReadDefault , e1kRegWriteDefault , "LEDCTL" , "LED Control" },
1466 { 0x01000, 0x00004, 0xFFFF007F, 0x0000007F, e1kRegReadDefault , e1kRegWritePBA , "PBA" , "Packet Buffer Allocation" },
1467 { 0x02160, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRTL" , "Flow Control Receive Threshold Low" },
1468 { 0x02168, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRTH" , "Flow Control Receive Threshold High" },
1469 { 0x02410, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFH" , "Receive Data FIFO Head" },
1470 { 0x02418, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFT" , "Receive Data FIFO Tail" },
1471 { 0x02420, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFHS" , "Receive Data FIFO Head Saved Register" },
1472 { 0x02428, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFTS" , "Receive Data FIFO Tail Saved Register" },
1473 { 0x02430, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFPC" , "Receive Data FIFO Packet Count" },
1474 { 0x02800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDBAL" , "Receive Descriptor Base Low" },
1475 { 0x02804, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDBAH" , "Receive Descriptor Base High" },
1476 { 0x02808, 0x00004, 0x000FFF80, 0x000FFF80, e1kRegReadDefault , e1kRegWriteDefault , "RDLEN" , "Receive Descriptor Length" },
1477 { 0x02810, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDH" , "Receive Descriptor Head" },
1478 { 0x02818, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteRDT , "RDT" , "Receive Descriptor Tail" },
1479 { 0x02820, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteRDTR , "RDTR" , "Receive Delay Timer" },
1480 { 0x02828, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXDCTL" , "Receive Descriptor Control" },
1481 { 0x0282c, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "RADV" , "Receive Interrupt Absolute Delay Timer" },
1482 { 0x02c00, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RSRPD" , "Receive Small Packet Detect Interrupt" },
1483 { 0x03000, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TXDMAC" , "TX DMA Control (N/A)" },
1484 { 0x03410, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFH" , "Transmit Data FIFO Head" },
1485 { 0x03418, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFT" , "Transmit Data FIFO Tail" },
1486 { 0x03420, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFHS" , "Transmit Data FIFO Head Saved Register" },
1487 { 0x03428, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFTS" , "Transmit Data FIFO Tail Saved Register" },
1488 { 0x03430, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFPC" , "Transmit Data FIFO Packet Count" },
1489 { 0x03800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDBAL" , "Transmit Descriptor Base Low" },
1490 { 0x03804, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDBAH" , "Transmit Descriptor Base High" },
1491 { 0x03808, 0x00004, 0x000FFF80, 0x000FFF80, e1kRegReadDefault , e1kRegWriteDefault , "TDLEN" , "Transmit Descriptor Length" },
1492 { 0x03810, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDH" , "Transmit Descriptor Head" },
1493 { 0x03818, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteTDT , "TDT" , "Transmit Descriptor Tail" },
1494 { 0x03820, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TIDV" , "Transmit Interrupt Delay Value" },
1495 { 0x03828, 0x00004, 0xFF3F3F3F, 0xFF3F3F3F, e1kRegReadDefault , e1kRegWriteDefault , "TXDCTL" , "Transmit Descriptor Control" },
1496 { 0x0382c, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TADV" , "Transmit Absolute Interrupt Delay Timer" },
1497 { 0x03830, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TSPMT" , "TCP Segmentation Pad and Threshold" },
1498 { 0x04000, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CRCERRS" , "CRC Error Count" },
1499 { 0x04004, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "ALGNERRC", "Alignment Error Count" },
1500 { 0x04008, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SYMERRS" , "Symbol Error Count" },
1501 { 0x0400c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXERRC" , "RX Error Count" },
1502 { 0x04010, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MPC" , "Missed Packets Count" },
1503 { 0x04014, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SCC" , "Single Collision Count" },
1504 { 0x04018, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "ECOL" , "Excessive Collisions Count" },
1505 { 0x0401c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MCC" , "Multiple Collision Count" },
1506 { 0x04020, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "LATECOL" , "Late Collisions Count" },
1507 { 0x04028, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "COLC" , "Collision Count" },
1508 { 0x04030, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "DC" , "Defer Count" },
1509 { 0x04034, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TNCRS" , "Transmit - No CRS" },
1510 { 0x04038, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SEC" , "Sequence Error Count" },
1511 { 0x0403c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CEXTERR" , "Carrier Extension Error Count" },
1512 { 0x04040, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RLEC" , "Receive Length Error Count" },
1513 { 0x04048, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XONRXC" , "XON Received Count" },
1514 { 0x0404c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XONTXC" , "XON Transmitted Count" },
1515 { 0x04050, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XOFFRXC" , "XOFF Received Count" },
1516 { 0x04054, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XOFFTXC" , "XOFF Transmitted Count" },
1517 { 0x04058, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRUC" , "FC Received Unsupported Count" },
1518 { 0x0405c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC64" , "Packets Received (64 Bytes) Count" },
1519 { 0x04060, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC127" , "Packets Received (65-127 Bytes) Count" },
1520 { 0x04064, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC255" , "Packets Received (128-255 Bytes) Count" },
1521 { 0x04068, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC511" , "Packets Received (256-511 Bytes) Count" },
1522 { 0x0406c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC1023" , "Packets Received (512-1023 Bytes) Count" },
1523 { 0x04070, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC1522" , "Packets Received (1024-Max Bytes)" },
1524 { 0x04074, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GPRC" , "Good Packets Received Count" },
1525 { 0x04078, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "BPRC" , "Broadcast Packets Received Count" },
1526 { 0x0407c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "MPRC" , "Multicast Packets Received Count" },
1527 { 0x04080, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GPTC" , "Good Packets Transmitted Count" },
1528 { 0x04088, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GORCL" , "Good Octets Received Count (Low)" },
1529 { 0x0408c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GORCH" , "Good Octets Received Count (Hi)" },
1530 { 0x04090, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GOTCL" , "Good Octets Transmitted Count (Low)" },
1531 { 0x04094, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GOTCH" , "Good Octets Transmitted Count (Hi)" },
1532 { 0x040a0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RNBC" , "Receive No Buffers Count" },
1533 { 0x040a4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RUC" , "Receive Undersize Count" },
1534 { 0x040a8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RFC" , "Receive Fragment Count" },
1535 { 0x040ac, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "ROC" , "Receive Oversize Count" },
1536 { 0x040b0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RJC" , "Receive Jabber Count" },
1537 { 0x040b4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPRC" , "Management Packets Received Count" },
1538 { 0x040b8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPDC" , "Management Packets Dropped Count" },
1539 { 0x040bc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPTC" , "Management Pkts Transmitted Count" },
1540 { 0x040c0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TORL" , "Total Octets Received (Lo)" },
1541 { 0x040c4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TORH" , "Total Octets Received (Hi)" },
1542 { 0x040c8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TOTL" , "Total Octets Transmitted (Lo)" },
1543 { 0x040cc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TOTH" , "Total Octets Transmitted (Hi)" },
1544 { 0x040d0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TPR" , "Total Packets Received" },
1545 { 0x040d4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TPT" , "Total Packets Transmitted" },
1546 { 0x040d8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC64" , "Packets Transmitted (64 Bytes) Count" },
1547 { 0x040dc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC127" , "Packets Transmitted (65-127 Bytes) Count" },
1548 { 0x040e0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC255" , "Packets Transmitted (128-255 Bytes) Count" },
1549 { 0x040e4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC511" , "Packets Transmitted (256-511 Bytes) Count" },
1550 { 0x040e8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC1023" , "Packets Transmitted (512-1023 Bytes) Count" },
1551 { 0x040ec, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC1522" , "Packets Transmitted (1024 Bytes or Greater) Count" },
1552 { 0x040f0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "MPTC" , "Multicast Packets Transmitted Count" },
1553 { 0x040f4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "BPTC" , "Broadcast Packets Transmitted Count" },
1554 { 0x040f8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TSCTC" , "TCP Segmentation Context Transmitted Count" },
1555 { 0x040fc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TSCTFC" , "TCP Segmentation Context Tx Fail Count" },
1556 { 0x05000, 0x00004, 0x000007FF, 0x000007FF, e1kRegReadDefault , e1kRegWriteDefault , "RXCSUM" , "Receive Checksum Control" },
1557 { 0x05800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUC" , "Wakeup Control" },
1558 { 0x05808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUFC" , "Wakeup Filter Control" },
1559 { 0x05810, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUS" , "Wakeup Status" },
1560 { 0x05820, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "MANC" , "Management Control" },
1561 { 0x05838, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IPAV" , "IP Address Valid" },
1562 { 0x05900, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUPL" , "Wakeup Packet Length" },
1563 { 0x05200, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadMTA , e1kRegWriteMTA , "MTA" , "Multicast Table Array (n)" },
1564 { 0x05400, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadRA , e1kRegWriteRA , "RA" , "Receive Address (64-bit) (n)" },
1565 { 0x05600, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadVFTA , e1kRegWriteVFTA , "VFTA" , "VLAN Filter Table Array (n)" },
1566 { 0x05840, 0x0001c, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IP4AT" , "IPv4 Address Table" },
1567 { 0x05880, 0x00010, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IP6AT" , "IPv6 Address Table" },
1568 { 0x05a00, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUPM" , "Wakeup Packet Memory" },
1569 { 0x05f00, 0x0001c, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFLT" , "Flexible Filter Length Table" },
1570 { 0x09000, 0x003fc, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFMT" , "Flexible Filter Mask Table" },
1571 { 0x09800, 0x003fc, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFVT" , "Flexible Filter Value Table" },
1572 { 0x10000, 0x10000, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "PBM" , "Packet Buffer Memory (n)" },
1573 { 0x00040, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadRA , e1kRegWriteRA , "RA82542" , "Receive Address (64-bit) (n) (82542)" },
1574 { 0x00200, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadMTA , e1kRegWriteMTA , "MTA82542", "Multicast Table Array (n) (82542)" },
1575 { 0x00600, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadVFTA , e1kRegWriteVFTA , "VFTA82542", "VLAN Filter Table Array (n) (82542)" }
1576};
1577
1578#ifdef LOG_ENABLED
1579
1580/**
1581 * Convert U32 value to hex string. Masked bytes are replaced with dots.
1582 *
1583 * @remarks The mask has half-byte byte (not bit) granularity (e.g. 0000000F).
1584 *
1585 * @returns The buffer.
1586 *
1587 * @param u32 The word to convert into string.
1588 * @param mask Selects which bytes to convert.
1589 * @param buf Where to put the result.
1590 */
1591static char *e1kU32toHex(uint32_t u32, uint32_t mask, char *buf)
1592{
1593 for (char *ptr = buf + 7; ptr >= buf; --ptr, u32 >>=4, mask >>=4)
1594 {
1595 if (mask & 0xF)
1596 *ptr = (u32 & 0xF) + ((u32 & 0xF) > 9 ? '7' : '0');
1597 else
1598 *ptr = '.';
1599 }
1600 buf[8] = 0;
1601 return buf;
1602}
1603
1604/**
1605 * Returns timer name for debug purposes.
1606 *
1607 * @returns The timer name.
1608 *
1609 * @param pThis The device state structure.
1610 * @param hTimer The timer to name.
1611 */
1612DECLINLINE(const char *) e1kGetTimerName(PE1KSTATE pThis, TMTIMERHANDLE hTimer)
1613{
1614 if (hTimer == pThis->hTIDTimer)
1615 return "TID";
1616 if (hTimer == pThis->hTADTimer)
1617 return "TAD";
1618 if (hTimer == pThis->hRIDTimer)
1619 return "RID";
1620 if (hTimer == pThis->hRADTimer)
1621 return "RAD";
1622 if (hTimer == pThis->hIntTimer)
1623 return "Int";
1624 if (hTimer == pThis->hTXDTimer)
1625 return "TXD";
1626 if (hTimer == pThis->hLUTimer)
1627 return "LinkUp";
1628 return "unknown";
1629}
1630
1631#endif /* LOG_ENABLED */
1632
1633/**
1634 * Arm a timer.
1635 *
1636 * @param pDevIns The device instance.
1637 * @param pThis Pointer to the device state structure.
1638 * @param hTimer The timer to arm.
1639 * @param uExpireIn Expiration interval in microseconds.
1640 */
1641DECLINLINE(void) e1kArmTimer(PPDMDEVINS pDevIns, PE1KSTATE pThis, TMTIMERHANDLE hTimer, uint32_t uExpireIn)
1642{
1643 if (pThis->fLocked)
1644 return;
1645
1646 E1kLog2(("%s Arming %s timer to fire in %d usec...\n",
1647 pThis->szPrf, e1kGetTimerName(pThis, hTimer), uExpireIn));
1648 int rc = PDMDevHlpTimerSetMicro(pDevIns, hTimer, uExpireIn);
1649 AssertRC(rc);
1650}
1651
1652#ifdef IN_RING3
1653/**
1654 * Cancel a timer.
1655 *
1656 * @param pDevIns The device instance.
1657 * @param pThis Pointer to the device state structure.
1658 * @param pTimer Pointer to the timer.
1659 */
1660DECLINLINE(void) e1kCancelTimer(PPDMDEVINS pDevIns, PE1KSTATE pThis, TMTIMERHANDLE hTimer)
1661{
1662 E1kLog2(("%s Stopping %s timer...\n",
1663 pThis->szPrf, e1kGetTimerName(pThis, hTimer)));
1664 int rc = PDMDevHlpTimerStop(pDevIns, hTimer);
1665 if (RT_FAILURE(rc))
1666 E1kLog2(("%s e1kCancelTimer: TMTimerStop(%s) failed with %Rrc\n",
1667 pThis->szPrf, e1kGetTimerName(pThis, hTimer), rc));
1668 RT_NOREF_PV(pThis);
1669}
1670#endif /* IN_RING3 */
1671
1672
1673#define e1kCsEnter(ps, rcBusy) PDMDevHlpCritSectEnter(pDevIns, &(ps)->cs, (rcBusy))
1674#define e1kCsEnterReturn(ps, rcBusy) do { \
1675 int const rcLock = PDMDevHlpCritSectEnter(pDevIns, &(ps)->cs, (rcBusy)); \
1676 if (rcLock == VINF_SUCCESS) { /* likely */ } \
1677 else return rcLock; \
1678 } while (0)
1679#define e1kR3CsEnterAsserted(ps) do { \
1680 int const rcLock = PDMDevHlpCritSectEnter(pDevIns, &(ps)->cs, VERR_SEM_BUSY); \
1681 PDM_CRITSECT_RELEASE_ASSERT_RC_DEV(pDevIns, &(ps)->cs, rcLock); \
1682 } while (0)
1683#define e1kCsLeave(ps) PDMDevHlpCritSectLeave(pDevIns, &(ps)->cs)
1684
1685
1686#define e1kCsRxEnter(ps, rcBusy) PDMDevHlpCritSectEnter(pDevIns, &(ps)->csRx, (rcBusy))
1687#define e1kCsRxEnterReturn(ps) do { \
1688 int const rcLock = PDMDevHlpCritSectEnter(pDevIns, &(ps)->csRx, VERR_SEM_BUSY); \
1689 AssertRCReturn(rcLock, rcLock); \
1690 } while (0)
1691#define e1kR3CsRxEnterAsserted(ps) do { \
1692 int const rcLock = PDMDevHlpCritSectEnter(pDevIns, &(ps)->csRx, VERR_SEM_BUSY); \
1693 PDM_CRITSECT_RELEASE_ASSERT_RC_DEV(pDevIns, &(ps)->csRx, rcLock); \
1694 } while (0)
1695#define e1kCsRxLeave(ps) PDMDevHlpCritSectLeave(pDevIns, &(ps)->csRx)
1696#define e1kCsRxIsOwner(ps) PDMDevHlpCritSectIsOwner(pDevIns, &(ps)->csRx)
1697
1698
1699#ifndef E1K_WITH_TX_CS
1700# define e1kCsTxEnter(ps, rcBusy) VINF_SUCCESS
1701# define e1kR3CsTxEnterAsserted(ps) do { } while (0)
1702# define e1kCsTxLeave(ps) do { } while (0)
1703#else /* E1K_WITH_TX_CS */
1704# define e1kCsTxEnter(ps, rcBusy) PDMDevHlpCritSectEnter(pDevIns, &(ps)->csTx, (rcBusy))
1705# define e1kR3CsTxEnterAsserted(ps) do { \
1706 int const rcLock = PDMDevHlpCritSectEnter(pDevIns, &(ps)->csTx, VERR_SEM_BUSY); \
1707 PDM_CRITSECT_RELEASE_ASSERT_RC_DEV(pDevIns, &(ps)->csTx, rcLock); \
1708 } while (0)
1709# define e1kCsTxLeave(ps) PDMDevHlpCritSectLeave(pDevIns, &(ps)->csTx)
1710# define e1kCsTxIsOwner(ps) PDMDevHlpCritSectIsOwner(pDevIns, &(ps)->csTx)
1711#endif /* E1K_WITH_TX_CS */
1712
1713
1714#ifdef E1K_WITH_TXD_CACHE
1715/*
1716 * Transmit Descriptor Register Context
1717 */
1718struct E1kTxDContext
1719{
1720 uint32_t tdlen;
1721 uint32_t tdh;
1722 uint32_t tdt;
1723 uint8_t nextPacket;
1724};
1725typedef struct E1kTxDContext E1KTXDC, *PE1KTXDC;
1726
1727DECLINLINE(bool) e1kUpdateTxDContext(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KTXDC pContext)
1728{
1729 Assert(e1kCsTxIsOwner(pThis));
1730 if (!e1kCsTxIsOwner(pThis))
1731 {
1732 memset(pContext, 0, sizeof(E1KTXDC));
1733 return false;
1734 }
1735 pContext->tdlen = TDLEN;
1736 pContext->tdh = TDH;
1737 pContext->tdt = TDT;
1738 uint32_t cTxRingSize = pContext->tdlen / sizeof(E1KTXDESC);
1739#ifdef DEBUG
1740 if (pContext->tdh >= cTxRingSize)
1741 {
1742 Log(("%s e1kUpdateTxDContext: will return false because TDH too big (%u >= %u)\n",
1743 pThis->szPrf, pContext->tdh, cTxRingSize));
1744 return VINF_SUCCESS;
1745 }
1746 if (pContext->tdt >= cTxRingSize)
1747 {
1748 Log(("%s e1kUpdateTxDContext: will return false because TDT too big (%u >= %u)\n",
1749 pThis->szPrf, pContext->tdt, cTxRingSize));
1750 return VINF_SUCCESS;
1751 }
1752#endif /* DEBUG */
1753 return pContext->tdh < cTxRingSize && pContext->tdt < cTxRingSize;
1754}
1755#endif /* E1K_WITH_TXD_CACHE */
1756#ifdef E1K_WITH_RXD_CACHE
1757/*
1758 * Receive Descriptor Register Context
1759 */
1760struct E1kRxDContext
1761{
1762 uint32_t rdlen;
1763 uint32_t rdh;
1764 uint32_t rdt;
1765};
1766typedef struct E1kRxDContext E1KRXDC, *PE1KRXDC;
1767
1768DECLINLINE(bool) e1kUpdateRxDContext(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KRXDC pContext, const char *pcszCallee)
1769{
1770 Assert(e1kCsRxIsOwner(pThis));
1771 if (!e1kCsRxIsOwner(pThis))
1772 return false;
1773 pContext->rdlen = RDLEN;
1774 pContext->rdh = RDH;
1775 pContext->rdt = RDT;
1776 uint32_t cRxRingSize = pContext->rdlen / sizeof(E1KRXDESC);
1777 /*
1778 * Note that the checks for RDT are a bit different. Some guests, OS/2 for
1779 * example, intend to use all descriptors in RX ring, so they point RDT
1780 * right beyond the last descriptor in the ring. While this is not
1781 * acceptable for other registers, it works out fine for RDT.
1782 */
1783#ifdef DEBUG
1784 if (pContext->rdh >= cRxRingSize)
1785 {
1786 Log(("%s e1kUpdateRxDContext: called from %s, will return false because RDH too big (%u >= %u)\n",
1787 pThis->szPrf, pcszCallee, pContext->rdh, cRxRingSize));
1788 return VINF_SUCCESS;
1789 }
1790 if (pContext->rdt > cRxRingSize)
1791 {
1792 Log(("%s e1kUpdateRxDContext: called from %s, will return false because RDT too big (%u > %u)\n",
1793 pThis->szPrf, pcszCallee, pContext->rdt, cRxRingSize));
1794 return VINF_SUCCESS;
1795 }
1796#else /* !DEBUG */
1797 RT_NOREF(pcszCallee);
1798#endif /* !DEBUG */
1799 return pContext->rdh < cRxRingSize && pContext->rdt <= cRxRingSize; // && (RCTL & RCTL_EN);
1800}
1801#endif /* E1K_WITH_RXD_CACHE */
1802
1803/**
1804 * Wakeup the RX thread.
1805 */
1806static void e1kWakeupReceive(PPDMDEVINS pDevIns, PE1KSTATE pThis)
1807{
1808 if ( pThis->fMaybeOutOfSpace
1809 && pThis->hEventMoreRxDescAvail != NIL_SUPSEMEVENT)
1810 {
1811 STAM_COUNTER_INC(&pThis->CTX_SUFF_Z(StatRxOverflowWakeup));
1812 E1kLog(("%s Waking up Out-of-RX-space semaphore\n", pThis->szPrf));
1813 int rc = PDMDevHlpSUPSemEventSignal(pDevIns, pThis->hEventMoreRxDescAvail);
1814 AssertRC(rc);
1815 }
1816}
1817
1818#ifdef IN_RING3
1819
1820/**
1821 * Hardware reset. Revert all registers to initial values.
1822 *
1823 * @param pDevIns The device instance.
1824 * @param pThis The device state structure.
1825 * @param pThisCC The current context instance data.
1826 */
1827static void e1kR3HardReset(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KSTATECC pThisCC)
1828{
1829 E1kLog(("%s Hard reset triggered\n", pThis->szPrf));
1830 /* No interrupts should survive device reset, see @bugref(9556). */
1831 if (pThis->fIntRaised)
1832 {
1833 /* Lower(0) INTA(0) */
1834 PDMDevHlpPCISetIrq(pDevIns, 0, 0);
1835 pThis->fIntRaised = false;
1836 E1kLog(("%s e1kR3HardReset: Lowered IRQ: ICR=%08x\n", pThis->szPrf, ICR));
1837 }
1838 memset(pThis->auRegs, 0, sizeof(pThis->auRegs));
1839 memset(pThis->aRecAddr.au32, 0, sizeof(pThis->aRecAddr.au32));
1840# ifdef E1K_INIT_RA0
1841 memcpy(pThis->aRecAddr.au32, pThis->macConfigured.au8,
1842 sizeof(pThis->macConfigured.au8));
1843 pThis->aRecAddr.array[0].ctl |= RA_CTL_AV;
1844# endif /* E1K_INIT_RA0 */
1845 STATUS = 0x0081; /* SPEED=10b (1000 Mb/s), FD=1b (Full Duplex) */
1846 EECD = 0x0100; /* EE_PRES=1b (EEPROM present) */
1847 CTRL = 0x0a09; /* FRCSPD=1b SPEED=10b LRST=1b FD=1b */
1848 TSPMT = 0x01000400;/* TSMT=0400h TSPBP=0100h */
1849 Assert(GET_BITS(RCTL, BSIZE) == 0);
1850 pThis->u16RxBSize = 2048;
1851
1852 uint16_t u16LedCtl = 0x0602; /* LED0/LINK_UP#, LED2/LINK100# */
1853 pThisCC->eeprom.readWord(0x2F, &u16LedCtl); /* Read LEDCTL defaults from EEPROM */
1854 LEDCTL = 0x07008300 | (((uint32_t)u16LedCtl & 0xCF00) << 8) | (u16LedCtl & 0xCF); /* Only LED0 and LED2 defaults come from EEPROM */
1855
1856 /* Reset promiscuous mode */
1857 if (pThisCC->pDrvR3)
1858 pThisCC->pDrvR3->pfnSetPromiscuousMode(pThisCC->pDrvR3, false);
1859
1860# ifdef E1K_WITH_TXD_CACHE
1861 e1kR3CsTxEnterAsserted(pThis);
1862 pThis->nTxDFetched = 0;
1863 pThis->iTxDCurrent = 0;
1864 pThis->fGSO = false;
1865 pThis->cbTxAlloc = 0;
1866 e1kCsTxLeave(pThis);
1867# endif /* E1K_WITH_TXD_CACHE */
1868# ifdef E1K_WITH_RXD_CACHE
1869 e1kR3CsRxEnterAsserted(pThis);
1870 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
1871 e1kCsRxLeave(pThis);
1872# endif /* E1K_WITH_RXD_CACHE */
1873# ifdef E1K_LSC_ON_RESET
1874 E1kLog(("%s Will trigger LSC in %d seconds...\n",
1875 pThis->szPrf, pThis->cMsLinkUpDelay / 1000));
1876 e1kArmTimer(pDevIns, pThis, pThis->hLUTimer, pThis->cMsLinkUpDelay * 1000);
1877# endif /* E1K_LSC_ON_RESET */
1878}
1879
1880#endif /* IN_RING3 */
1881
1882/**
1883 * Compute Internet checksum.
1884 *
1885 * @remarks Refer to http://www.netfor2.com/checksum.html for short intro.
1886 *
1887 * @param pThis The device state structure.
1888 * @param cpPacket The packet.
1889 * @param cb The size of the packet.
1890 * @param pszText A string denoting direction of packet transfer.
1891 *
1892 * @return The 1's complement of the 1's complement sum.
1893 *
1894 * @thread E1000_TX
1895 */
1896static uint16_t e1kCSum16(const void *pvBuf, size_t cb)
1897{
1898 uint32_t csum = 0;
1899 uint16_t *pu16 = (uint16_t *)pvBuf;
1900
1901 while (cb > 1)
1902 {
1903 csum += *pu16++;
1904 cb -= 2;
1905 }
1906 if (cb)
1907 csum += *(uint8_t*)pu16;
1908 while (csum >> 16)
1909 csum = (csum >> 16) + (csum & 0xFFFF);
1910 Assert(csum < 65536);
1911 return (uint16_t)~csum;
1912}
1913
1914/**
1915 * Dump a packet to debug log.
1916 *
1917 * @param pDevIns The device instance.
1918 * @param pThis The device state structure.
1919 * @param cpPacket The packet.
1920 * @param cb The size of the packet.
1921 * @param pszText A string denoting direction of packet transfer.
1922 * @thread E1000_TX
1923 */
1924DECLINLINE(void) e1kPacketDump(PPDMDEVINS pDevIns, PE1KSTATE pThis, const uint8_t *cpPacket, size_t cb, const char *pszText)
1925{
1926#ifdef DEBUG
1927 if (RT_LIKELY(e1kCsEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
1928 {
1929 Log4(("%s --- %s packet #%d: %RTmac => %RTmac (%d bytes) ---\n",
1930 pThis->szPrf, pszText, ++pThis->u32PktNo, cpPacket+6, cpPacket, cb));
1931 if (ntohs(*(uint16_t*)(cpPacket+12)) == 0x86DD)
1932 {
1933 Log4(("%s --- IPv6: %RTnaipv6 => %RTnaipv6\n",
1934 pThis->szPrf, cpPacket+14+8, cpPacket+14+24));
1935 if (*(cpPacket+14+6) == 0x6)
1936 Log4(("%s --- TCP: seq=%x ack=%x\n", pThis->szPrf,
1937 ntohl(*(uint32_t*)(cpPacket+14+40+4)), ntohl(*(uint32_t*)(cpPacket+14+40+8))));
1938 }
1939 else if (ntohs(*(uint16_t*)(cpPacket+12)) == 0x800)
1940 {
1941 Log4(("%s --- IPv4: %RTnaipv4 => %RTnaipv4\n",
1942 pThis->szPrf, *(uint32_t*)(cpPacket+14+12), *(uint32_t*)(cpPacket+14+16)));
1943 if (*(cpPacket+14+6) == 0x6)
1944 Log4(("%s --- TCP: seq=%x ack=%x\n", pThis->szPrf,
1945 ntohl(*(uint32_t*)(cpPacket+14+20+4)), ntohl(*(uint32_t*)(cpPacket+14+20+8))));
1946 }
1947 E1kLog3(("%.*Rhxd\n", cb, cpPacket));
1948 e1kCsLeave(pThis);
1949 }
1950#else
1951 if (RT_LIKELY(e1kCsEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
1952 {
1953 if (ntohs(*(uint16_t*)(cpPacket+12)) == 0x86DD)
1954 E1kLogRel(("E1000: %s packet #%d, %RTmac => %RTmac, %RTnaipv6 => %RTnaipv6, seq=%x ack=%x\n",
1955 pszText, ++pThis->u32PktNo, cpPacket+6, cpPacket, cpPacket+14+8, cpPacket+14+24,
1956 ntohl(*(uint32_t*)(cpPacket+14+40+4)), ntohl(*(uint32_t*)(cpPacket+14+40+8))));
1957 else
1958 E1kLogRel(("E1000: %s packet #%d, %RTmac => %RTmac, %RTnaipv4 => %RTnaipv4, seq=%x ack=%x\n",
1959 pszText, ++pThis->u32PktNo, cpPacket+6, cpPacket,
1960 *(uint32_t*)(cpPacket+14+12), *(uint32_t*)(cpPacket+14+16),
1961 ntohl(*(uint32_t*)(cpPacket+14+20+4)), ntohl(*(uint32_t*)(cpPacket+14+20+8))));
1962 e1kCsLeave(pThis);
1963 }
1964 RT_NOREF2(cb, pszText);
1965#endif
1966}
1967
1968/**
1969 * Determine the type of transmit descriptor.
1970 *
1971 * @returns Descriptor type. See E1K_DTYP_XXX defines.
1972 *
1973 * @param pDesc Pointer to descriptor union.
1974 * @thread E1000_TX
1975 */
1976DECLINLINE(int) e1kGetDescType(E1KTXDESC *pDesc)
1977{
1978 if (pDesc->legacy.cmd.fDEXT)
1979 return pDesc->context.dw2.u4DTYP;
1980 return E1K_DTYP_LEGACY;
1981}
1982
1983
1984#ifdef E1K_WITH_RXD_CACHE
1985/**
1986 * Return the number of RX descriptor that belong to the hardware.
1987 *
1988 * @returns the number of available descriptors in RX ring.
1989 * @param pRxdc The receive descriptor register context.
1990 * @thread ???
1991 */
1992DECLINLINE(uint32_t) e1kGetRxLen(PE1KRXDC pRxdc)
1993{
1994 /**
1995 * Make sure RDT won't change during computation. EMT may modify RDT at
1996 * any moment.
1997 */
1998 uint32_t rdt = pRxdc->rdt;
1999 return (pRxdc->rdh > rdt ? pRxdc->rdlen/sizeof(E1KRXDESC) : 0) + rdt - pRxdc->rdh;
2000}
2001
2002DECLINLINE(unsigned) e1kRxDInCache(PE1KSTATE pThis)
2003{
2004 return pThis->nRxDFetched > pThis->iRxDCurrent ?
2005 pThis->nRxDFetched - pThis->iRxDCurrent : 0;
2006}
2007
2008DECLINLINE(unsigned) e1kRxDIsCacheEmpty(PE1KSTATE pThis)
2009{
2010 return pThis->iRxDCurrent >= pThis->nRxDFetched;
2011}
2012
2013/**
2014 * Load receive descriptors from guest memory. The caller needs to be in Rx
2015 * critical section.
2016 *
2017 * We need two physical reads in case the tail wrapped around the end of RX
2018 * descriptor ring.
2019 *
2020 * @returns the actual number of descriptors fetched.
2021 * @param pDevIns The device instance.
2022 * @param pThis The device state structure.
2023 * @thread EMT, RX
2024 */
2025DECLINLINE(unsigned) e1kRxDPrefetch(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KRXDC pRxdc)
2026{
2027 E1kLog3(("%s e1kRxDPrefetch: RDH=%x RDT=%x RDLEN=%x "
2028 "iRxDCurrent=%x nRxDFetched=%x\n",
2029 pThis->szPrf, pRxdc->rdh, pRxdc->rdt, pRxdc->rdlen, pThis->iRxDCurrent, pThis->nRxDFetched));
2030 /* We've already loaded pThis->nRxDFetched descriptors past RDH. */
2031 unsigned nDescsAvailable = e1kGetRxLen(pRxdc) - e1kRxDInCache(pThis);
2032 unsigned nDescsToFetch = RT_MIN(nDescsAvailable, E1K_RXD_CACHE_SIZE - pThis->nRxDFetched);
2033 unsigned nDescsTotal = pRxdc->rdlen / sizeof(E1KRXDESC);
2034 Assert(nDescsTotal != 0);
2035 if (nDescsTotal == 0)
2036 return 0;
2037 unsigned nFirstNotLoaded = (pRxdc->rdh + e1kRxDInCache(pThis)) % nDescsTotal;
2038 unsigned nDescsInSingleRead = RT_MIN(nDescsToFetch, nDescsTotal - nFirstNotLoaded);
2039 E1kLog3(("%s e1kRxDPrefetch: nDescsAvailable=%u nDescsToFetch=%u "
2040 "nDescsTotal=%u nFirstNotLoaded=0x%x nDescsInSingleRead=%u\n",
2041 pThis->szPrf, nDescsAvailable, nDescsToFetch, nDescsTotal,
2042 nFirstNotLoaded, nDescsInSingleRead));
2043 if (nDescsToFetch == 0)
2044 return 0;
2045 E1KRXDESC* pFirstEmptyDesc = &pThis->aRxDescriptors[pThis->nRxDFetched];
2046 PDMDevHlpPCIPhysRead(pDevIns,
2047 ((uint64_t)RDBAH << 32) + RDBAL + nFirstNotLoaded * sizeof(E1KRXDESC),
2048 pFirstEmptyDesc, nDescsInSingleRead * sizeof(E1KRXDESC));
2049 // uint64_t addrBase = ((uint64_t)RDBAH << 32) + RDBAL;
2050 // unsigned i, j;
2051 // for (i = pThis->nRxDFetched; i < pThis->nRxDFetched + nDescsInSingleRead; ++i)
2052 // {
2053 // pThis->aRxDescAddr[i] = addrBase + (nFirstNotLoaded + i - pThis->nRxDFetched) * sizeof(E1KRXDESC);
2054 // E1kLog3(("%s aRxDescAddr[%d] = %p\n", pThis->szPrf, i, pThis->aRxDescAddr[i]));
2055 // }
2056 E1kLog3(("%s Fetched %u RX descriptors at %08x%08x(0x%x), RDLEN=%08x, RDH=%08x, RDT=%08x\n",
2057 pThis->szPrf, nDescsInSingleRead,
2058 RDBAH, RDBAL + pRxdc->rdh * sizeof(E1KRXDESC),
2059 nFirstNotLoaded, pRxdc->rdlen, pRxdc->rdh, pRxdc->rdt));
2060 if (nDescsToFetch > nDescsInSingleRead)
2061 {
2062 PDMDevHlpPCIPhysRead(pDevIns,
2063 ((uint64_t)RDBAH << 32) + RDBAL,
2064 pFirstEmptyDesc + nDescsInSingleRead,
2065 (nDescsToFetch - nDescsInSingleRead) * sizeof(E1KRXDESC));
2066 // Assert(i == pThis->nRxDFetched + nDescsInSingleRead);
2067 // for (j = 0; i < pThis->nRxDFetched + nDescsToFetch; ++i, ++j)
2068 // {
2069 // pThis->aRxDescAddr[i] = addrBase + j * sizeof(E1KRXDESC);
2070 // E1kLog3(("%s aRxDescAddr[%d] = %p\n", pThis->szPrf, i, pThis->aRxDescAddr[i]));
2071 // }
2072 E1kLog3(("%s Fetched %u RX descriptors at %08x%08x\n",
2073 pThis->szPrf, nDescsToFetch - nDescsInSingleRead,
2074 RDBAH, RDBAL));
2075 }
2076 pThis->nRxDFetched += nDescsToFetch;
2077 return nDescsToFetch;
2078}
2079
2080# ifdef IN_RING3 /* currently only used in ring-3 due to stack space requirements of the caller */
2081/**
2082 * Dump receive descriptor to debug log.
2083 *
2084 * @param pThis The device state structure.
2085 * @param pDesc Pointer to the descriptor.
2086 * @thread E1000_RX
2087 */
2088static void e1kPrintRDesc(PE1KSTATE pThis, E1KRXDESC *pDesc)
2089{
2090 RT_NOREF2(pThis, pDesc);
2091 E1kLog2(("%s <-- Receive Descriptor (%d bytes):\n", pThis->szPrf, pDesc->u16Length));
2092 E1kLog2((" Address=%16LX Length=%04X Csum=%04X\n",
2093 pDesc->u64BufAddr, pDesc->u16Length, pDesc->u16Checksum));
2094 E1kLog2((" STA: %s %s %s %s %s %s %s ERR: %s %s %s %s SPECIAL: %s VLAN=%03x PRI=%x\n",
2095 pDesc->status.fPIF ? "PIF" : "pif",
2096 pDesc->status.fIPCS ? "IPCS" : "ipcs",
2097 pDesc->status.fTCPCS ? "TCPCS" : "tcpcs",
2098 pDesc->status.fVP ? "VP" : "vp",
2099 pDesc->status.fIXSM ? "IXSM" : "ixsm",
2100 pDesc->status.fEOP ? "EOP" : "eop",
2101 pDesc->status.fDD ? "DD" : "dd",
2102 pDesc->status.fRXE ? "RXE" : "rxe",
2103 pDesc->status.fIPE ? "IPE" : "ipe",
2104 pDesc->status.fTCPE ? "TCPE" : "tcpe",
2105 pDesc->status.fCE ? "CE" : "ce",
2106 E1K_SPEC_CFI(pDesc->status.u16Special) ? "CFI" :"cfi",
2107 E1K_SPEC_VLAN(pDesc->status.u16Special),
2108 E1K_SPEC_PRI(pDesc->status.u16Special)));
2109}
2110# endif /* IN_RING3 */
2111#endif /* E1K_WITH_RXD_CACHE */
2112
2113/**
2114 * Dump transmit descriptor to debug log.
2115 *
2116 * @param pThis The device state structure.
2117 * @param pDesc Pointer to descriptor union.
2118 * @param pszDir A string denoting direction of descriptor transfer
2119 * @thread E1000_TX
2120 */
2121static void e1kPrintTDesc(PE1KSTATE pThis, E1KTXDESC *pDesc, const char *pszDir,
2122 unsigned uLevel = RTLOGGRPFLAGS_LEVEL_2)
2123{
2124 RT_NOREF4(pThis, pDesc, pszDir, uLevel);
2125
2126 /*
2127 * Unfortunately we cannot use our format handler here, we want R0 logging
2128 * as well.
2129 */
2130 switch (e1kGetDescType(pDesc))
2131 {
2132 case E1K_DTYP_CONTEXT:
2133 E1kLogX(uLevel, ("%s %s Context Transmit Descriptor %s\n",
2134 pThis->szPrf, pszDir, pszDir));
2135 E1kLogX(uLevel, (" IPCSS=%02X IPCSO=%02X IPCSE=%04X TUCSS=%02X TUCSO=%02X TUCSE=%04X\n",
2136 pDesc->context.ip.u8CSS, pDesc->context.ip.u8CSO, pDesc->context.ip.u16CSE,
2137 pDesc->context.tu.u8CSS, pDesc->context.tu.u8CSO, pDesc->context.tu.u16CSE));
2138 E1kLogX(uLevel, (" TUCMD:%s%s%s %s %s PAYLEN=%04x HDRLEN=%04x MSS=%04x STA: %s\n",
2139 pDesc->context.dw2.fIDE ? " IDE":"",
2140 pDesc->context.dw2.fRS ? " RS" :"",
2141 pDesc->context.dw2.fTSE ? " TSE":"",
2142 pDesc->context.dw2.fIP ? "IPv4":"IPv6",
2143 pDesc->context.dw2.fTCP ? "TCP":"UDP",
2144 pDesc->context.dw2.u20PAYLEN,
2145 pDesc->context.dw3.u8HDRLEN,
2146 pDesc->context.dw3.u16MSS,
2147 pDesc->context.dw3.fDD?"DD":""));
2148 break;
2149 case E1K_DTYP_DATA:
2150 E1kLogX(uLevel, ("%s %s Data Transmit Descriptor (%d bytes) %s\n",
2151 pThis->szPrf, pszDir, pDesc->data.cmd.u20DTALEN, pszDir));
2152 E1kLogX(uLevel, (" Address=%16LX DTALEN=%05X\n",
2153 pDesc->data.u64BufAddr,
2154 pDesc->data.cmd.u20DTALEN));
2155 E1kLogX(uLevel, (" DCMD:%s%s%s%s%s%s%s STA:%s%s%s POPTS:%s%s SPECIAL:%s VLAN=%03x PRI=%x\n",
2156 pDesc->data.cmd.fIDE ? " IDE" :"",
2157 pDesc->data.cmd.fVLE ? " VLE" :"",
2158 pDesc->data.cmd.fRPS ? " RPS" :"",
2159 pDesc->data.cmd.fRS ? " RS" :"",
2160 pDesc->data.cmd.fTSE ? " TSE" :"",
2161 pDesc->data.cmd.fIFCS? " IFCS":"",
2162 pDesc->data.cmd.fEOP ? " EOP" :"",
2163 pDesc->data.dw3.fDD ? " DD" :"",
2164 pDesc->data.dw3.fEC ? " EC" :"",
2165 pDesc->data.dw3.fLC ? " LC" :"",
2166 pDesc->data.dw3.fTXSM? " TXSM":"",
2167 pDesc->data.dw3.fIXSM? " IXSM":"",
2168 E1K_SPEC_CFI(pDesc->data.dw3.u16Special) ? "CFI" :"cfi",
2169 E1K_SPEC_VLAN(pDesc->data.dw3.u16Special),
2170 E1K_SPEC_PRI(pDesc->data.dw3.u16Special)));
2171 break;
2172 case E1K_DTYP_LEGACY:
2173 E1kLogX(uLevel, ("%s %s Legacy Transmit Descriptor (%d bytes) %s\n",
2174 pThis->szPrf, pszDir, pDesc->legacy.cmd.u16Length, pszDir));
2175 E1kLogX(uLevel, (" Address=%16LX DTALEN=%05X\n",
2176 pDesc->data.u64BufAddr,
2177 pDesc->legacy.cmd.u16Length));
2178 E1kLogX(uLevel, (" CMD:%s%s%s%s%s%s%s STA:%s%s%s CSO=%02x CSS=%02x SPECIAL:%s VLAN=%03x PRI=%x\n",
2179 pDesc->legacy.cmd.fIDE ? " IDE" :"",
2180 pDesc->legacy.cmd.fVLE ? " VLE" :"",
2181 pDesc->legacy.cmd.fRPS ? " RPS" :"",
2182 pDesc->legacy.cmd.fRS ? " RS" :"",
2183 pDesc->legacy.cmd.fIC ? " IC" :"",
2184 pDesc->legacy.cmd.fIFCS? " IFCS":"",
2185 pDesc->legacy.cmd.fEOP ? " EOP" :"",
2186 pDesc->legacy.dw3.fDD ? " DD" :"",
2187 pDesc->legacy.dw3.fEC ? " EC" :"",
2188 pDesc->legacy.dw3.fLC ? " LC" :"",
2189 pDesc->legacy.cmd.u8CSO,
2190 pDesc->legacy.dw3.u8CSS,
2191 E1K_SPEC_CFI(pDesc->legacy.dw3.u16Special) ? "CFI" :"cfi",
2192 E1K_SPEC_VLAN(pDesc->legacy.dw3.u16Special),
2193 E1K_SPEC_PRI(pDesc->legacy.dw3.u16Special)));
2194 break;
2195 default:
2196 E1kLog(("%s %s Invalid Transmit Descriptor %s\n",
2197 pThis->szPrf, pszDir, pszDir));
2198 break;
2199 }
2200}
2201
2202/**
2203 * Raise an interrupt later.
2204 *
2205 * @param pThis The device state structure.
2206 */
2207DECLINLINE(void) e1kPostponeInterrupt(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint64_t nsDeadline)
2208{
2209 if (!PDMDevHlpTimerIsActive(pDevIns, pThis->hIntTimer))
2210 PDMDevHlpTimerSetNano(pDevIns, pThis->hIntTimer, nsDeadline);
2211}
2212
2213/**
2214 * Raise interrupt if not masked.
2215 *
2216 * @param pThis The device state structure.
2217 */
2218static int e1kRaiseInterrupt(PPDMDEVINS pDevIns, PE1KSTATE pThis, int rcBusy, uint32_t u32IntCause)
2219{
2220 /* Do NOT use e1kCsEnterReturn here as most callers doesn't check the
2221 status code. They'll pass a negative rcBusy. */
2222 int rc = e1kCsEnter(pThis, rcBusy);
2223 if (RT_LIKELY(rc == VINF_SUCCESS))
2224 { /* likely */ }
2225 else
2226 {
2227 PDM_CRITSECT_RELEASE_ASSERT_RC_DEV(pDevIns, &pThis->cs, rc);
2228 return rc;
2229 }
2230
2231 E1K_INC_ISTAT_CNT(pThis->uStatIntTry);
2232 ICR |= u32IntCause;
2233 if (ICR & IMS)
2234 {
2235 if (pThis->fIntRaised)
2236 {
2237 E1K_INC_ISTAT_CNT(pThis->uStatIntSkip);
2238 E1kLog2(("%s e1kRaiseInterrupt: Already raised, skipped. ICR&IMS=%08x\n",
2239 pThis->szPrf, ICR & IMS));
2240 }
2241 else
2242 {
2243 uint64_t tsNow = PDMDevHlpTimerGet(pDevIns, pThis->hIntTimer);
2244 if (!!ITR && tsNow - pThis->u64AckedAt < ITR * 256
2245 && pThis->fItrEnabled && (pThis->fItrRxEnabled || !(ICR & ICR_RXT0)))
2246 {
2247 E1K_INC_ISTAT_CNT(pThis->uStatIntEarly);
2248 E1kLog2(("%s e1kRaiseInterrupt: Too early to raise again: %d ns < %d ns.\n",
2249 pThis->szPrf, (uint32_t)(tsNow - pThis->u64AckedAt), ITR * 256));
2250 e1kPostponeInterrupt(pDevIns, pThis, ITR * 256);
2251 }
2252 else
2253 {
2254
2255 /* Since we are delivering the interrupt now
2256 * there is no need to do it later -- stop the timer.
2257 */
2258 PDMDevHlpTimerStop(pDevIns, pThis->hIntTimer);
2259 E1K_INC_ISTAT_CNT(pThis->uStatInt);
2260 STAM_COUNTER_INC(&pThis->StatIntsRaised);
2261 /* Got at least one unmasked interrupt cause */
2262 pThis->fIntRaised = true;
2263 /* Raise(1) INTA(0) */
2264 E1kLogRel(("E1000: irq RAISED icr&mask=0x%x, icr=0x%x\n", ICR & IMS, ICR));
2265 PDMDevHlpPCISetIrq(pDevIns, 0, 1);
2266 E1kLog(("%s e1kRaiseInterrupt: Raised. ICR&IMS=%08x\n",
2267 pThis->szPrf, ICR & IMS));
2268 }
2269 }
2270 }
2271 else
2272 {
2273 E1K_INC_ISTAT_CNT(pThis->uStatIntMasked);
2274 E1kLog2(("%s e1kRaiseInterrupt: Not raising, ICR=%08x, IMS=%08x\n",
2275 pThis->szPrf, ICR, IMS));
2276 }
2277 e1kCsLeave(pThis);
2278 return VINF_SUCCESS;
2279}
2280
2281/**
2282 * Compute the physical address of the descriptor.
2283 *
2284 * @returns the physical address of the descriptor.
2285 *
2286 * @param baseHigh High-order 32 bits of descriptor table address.
2287 * @param baseLow Low-order 32 bits of descriptor table address.
2288 * @param idxDesc The descriptor index in the table.
2289 */
2290DECLINLINE(RTGCPHYS) e1kDescAddr(uint32_t baseHigh, uint32_t baseLow, uint32_t idxDesc)
2291{
2292 AssertCompile(sizeof(E1KRXDESC) == sizeof(E1KTXDESC));
2293 return ((uint64_t)baseHigh << 32) + baseLow + idxDesc * sizeof(E1KRXDESC);
2294}
2295
2296#ifdef IN_RING3 /* currently only used in ring-3 due to stack space requirements of the caller */
2297/**
2298 * Advance the head pointer of the receive descriptor queue.
2299 *
2300 * @remarks RDH always points to the next available RX descriptor.
2301 *
2302 * @param pDevIns The device instance.
2303 * @param pThis The device state structure.
2304 */
2305DECLINLINE(void) e1kAdvanceRDH(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KRXDC pRxdc)
2306{
2307 Assert(e1kCsRxIsOwner(pThis));
2308 //e1kR3CsEnterAsserted(pThis);
2309 if (++pRxdc->rdh * sizeof(E1KRXDESC) >= pRxdc->rdlen)
2310 pRxdc->rdh = 0;
2311 RDH = pRxdc->rdh; /* Sync the actual register and RXDC */
2312#ifdef E1K_WITH_RXD_CACHE
2313 /*
2314 * We need to fetch descriptors now as the guest may advance RDT all the way
2315 * to RDH as soon as we generate RXDMT0 interrupt. This is mostly to provide
2316 * compatibility with Phar Lap ETS, see @bugref(7346). Note that we do not
2317 * check if the receiver is enabled. It must be, otherwise we won't get here
2318 * in the first place.
2319 *
2320 * Note that we should have moved both RDH and iRxDCurrent by now.
2321 */
2322 if (e1kRxDIsCacheEmpty(pThis))
2323 {
2324 /* Cache is empty, reset it and check if we can fetch more. */
2325 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
2326 E1kLog3(("%s e1kAdvanceRDH: Rx cache is empty, RDH=%x RDT=%x "
2327 "iRxDCurrent=%x nRxDFetched=%x\n",
2328 pThis->szPrf, pRxdc->rdh, pRxdc->rdt, pThis->iRxDCurrent, pThis->nRxDFetched));
2329 e1kRxDPrefetch(pDevIns, pThis, pRxdc);
2330 }
2331#endif /* E1K_WITH_RXD_CACHE */
2332 /*
2333 * Compute current receive queue length and fire RXDMT0 interrupt
2334 * if we are low on receive buffers
2335 */
2336 uint32_t uRQueueLen = pRxdc->rdh>pRxdc->rdt ? pRxdc->rdlen/sizeof(E1KRXDESC)-pRxdc->rdh+pRxdc->rdt : pRxdc->rdt-pRxdc->rdh;
2337 /*
2338 * The minimum threshold is controlled by RDMTS bits of RCTL:
2339 * 00 = 1/2 of RDLEN
2340 * 01 = 1/4 of RDLEN
2341 * 10 = 1/8 of RDLEN
2342 * 11 = reserved
2343 */
2344 uint32_t uMinRQThreshold = pRxdc->rdlen / sizeof(E1KRXDESC) / (2 << GET_BITS(RCTL, RDMTS));
2345 if (uRQueueLen <= uMinRQThreshold)
2346 {
2347 E1kLogRel(("E1000: low on RX descriptors, RDH=%x RDT=%x len=%x threshold=%x\n", pRxdc->rdh, pRxdc->rdt, uRQueueLen, uMinRQThreshold));
2348 E1kLog2(("%s Low on RX descriptors, RDH=%x RDT=%x len=%x threshold=%x, raise an interrupt\n",
2349 pThis->szPrf, pRxdc->rdh, pRxdc->rdt, uRQueueLen, uMinRQThreshold));
2350 E1K_INC_ISTAT_CNT(pThis->uStatIntRXDMT0);
2351 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_RXDMT0);
2352 }
2353 E1kLog2(("%s e1kAdvanceRDH: at exit RDH=%x RDT=%x len=%x\n",
2354 pThis->szPrf, pRxdc->rdh, pRxdc->rdt, uRQueueLen));
2355 //e1kCsLeave(pThis);
2356}
2357#endif /* IN_RING3 */
2358
2359#ifdef E1K_WITH_RXD_CACHE
2360
2361# ifdef IN_RING3 /* currently only used in ring-3 due to stack space requirements of the caller */
2362
2363/**
2364 * Obtain the next RX descriptor from RXD cache, fetching descriptors from the
2365 * RX ring if the cache is empty.
2366 *
2367 * Note that we cannot advance the cache pointer (iRxDCurrent) yet as it will
2368 * go out of sync with RDH which will cause trouble when EMT checks if the
2369 * cache is empty to do pre-fetch @bugref(6217).
2370 *
2371 * @param pDevIns The device instance.
2372 * @param pThis The device state structure.
2373 * @thread RX
2374 */
2375DECLINLINE(E1KRXDESC *) e1kRxDGet(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KRXDC pRxdc)
2376{
2377 Assert(e1kCsRxIsOwner(pThis));
2378 /* Check the cache first. */
2379 if (pThis->iRxDCurrent < pThis->nRxDFetched)
2380 return &pThis->aRxDescriptors[pThis->iRxDCurrent];
2381 /* Cache is empty, reset it and check if we can fetch more. */
2382 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
2383 if (e1kRxDPrefetch(pDevIns, pThis, pRxdc))
2384 return &pThis->aRxDescriptors[pThis->iRxDCurrent];
2385 /* Out of Rx descriptors. */
2386 return NULL;
2387}
2388
2389
2390/**
2391 * Return the RX descriptor obtained with e1kRxDGet() and advance the cache
2392 * pointer. The descriptor gets written back to the RXD ring.
2393 *
2394 * @param pDevIns The device instance.
2395 * @param pThis The device state structure.
2396 * @param pDesc The descriptor being "returned" to the RX ring.
2397 * @thread RX
2398 */
2399DECLINLINE(void) e1kRxDPut(PPDMDEVINS pDevIns, PE1KSTATE pThis, E1KRXDESC* pDesc, PE1KRXDC pRxdc)
2400{
2401 Assert(e1kCsRxIsOwner(pThis));
2402 pThis->iRxDCurrent++;
2403 // Assert(pDesc >= pThis->aRxDescriptors);
2404 // Assert(pDesc < pThis->aRxDescriptors + E1K_RXD_CACHE_SIZE);
2405 // uint64_t addr = e1kDescAddr(RDBAH, RDBAL, RDH);
2406 // uint32_t rdh = RDH;
2407 // Assert(pThis->aRxDescAddr[pDesc - pThis->aRxDescriptors] == addr);
2408 PDMDevHlpPCIPhysWrite(pDevIns, e1kDescAddr(RDBAH, RDBAL, pRxdc->rdh), pDesc, sizeof(E1KRXDESC));
2409 /*
2410 * We need to print the descriptor before advancing RDH as it may fetch new
2411 * descriptors into the cache.
2412 */
2413 e1kPrintRDesc(pThis, pDesc);
2414 e1kAdvanceRDH(pDevIns, pThis, pRxdc);
2415}
2416
2417/**
2418 * Store a fragment of received packet at the specifed address.
2419 *
2420 * @param pDevIns The device instance.
2421 * @param pThis The device state structure.
2422 * @param pDesc The next available RX descriptor.
2423 * @param pvBuf The fragment.
2424 * @param cb The size of the fragment.
2425 */
2426static void e1kStoreRxFragment(PPDMDEVINS pDevIns, PE1KSTATE pThis, E1KRXDESC *pDesc, const void *pvBuf, size_t cb)
2427{
2428 STAM_PROFILE_ADV_START(&pThis->StatReceiveStore, a);
2429 E1kLog2(("%s e1kStoreRxFragment: store fragment of %04X at %016LX, EOP=%d\n",
2430 pThis->szPrf, cb, pDesc->u64BufAddr, pDesc->status.fEOP));
2431 PDMDevHlpPCIPhysWrite(pDevIns, pDesc->u64BufAddr, pvBuf, cb);
2432 pDesc->u16Length = (uint16_t)cb;
2433 Assert(pDesc->u16Length == cb);
2434 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveStore, a);
2435 RT_NOREF(pThis);
2436}
2437
2438# endif /* IN_RING3 */
2439
2440#else /* !E1K_WITH_RXD_CACHE */
2441
2442/**
2443 * Store a fragment of received packet that fits into the next available RX
2444 * buffer.
2445 *
2446 * @remarks Trigger the RXT0 interrupt if it is the last fragment of the packet.
2447 *
2448 * @param pDevIns The device instance.
2449 * @param pThis The device state structure.
2450 * @param pDesc The next available RX descriptor.
2451 * @param pvBuf The fragment.
2452 * @param cb The size of the fragment.
2453 */
2454static void e1kStoreRxFragment(PPDMDEVINS pDevIns, PE1KSTATE pThis, E1KRXDESC *pDesc, const void *pvBuf, size_t cb)
2455{
2456 STAM_PROFILE_ADV_START(&pThis->StatReceiveStore, a);
2457 E1kLog2(("%s e1kStoreRxFragment: store fragment of %04X at %016LX, EOP=%d\n", pThis->szPrf, cb, pDesc->u64BufAddr, pDesc->status.fEOP));
2458 PDMDevHlpPCIPhysWrite(pDevIns, pDesc->u64BufAddr, pvBuf, cb);
2459 pDesc->u16Length = (uint16_t)cb; Assert(pDesc->u16Length == cb);
2460 /* Write back the descriptor */
2461 PDMDevHlpPCIPhysWrite(pDevIns, e1kDescAddr(RDBAH, RDBAL, RDH), pDesc, sizeof(E1KRXDESC));
2462 e1kPrintRDesc(pThis, pDesc);
2463 E1kLogRel(("E1000: Wrote back RX desc, RDH=%x\n", RDH));
2464 /* Advance head */
2465 e1kAdvanceRDH(pDevIns, pThis);
2466 //E1kLog2(("%s e1kStoreRxFragment: EOP=%d RDTR=%08X RADV=%08X\n", pThis->szPrf, pDesc->fEOP, RDTR, RADV));
2467 if (pDesc->status.fEOP)
2468 {
2469 /* Complete packet has been stored -- it is time to let the guest know. */
2470#ifdef E1K_USE_RX_TIMERS
2471 if (RDTR)
2472 {
2473 /* Arm the timer to fire in RDTR usec (discard .024) */
2474 e1kArmTimer(pDevIns, pThis, pThis->hRIDTimer, RDTR);
2475 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
2476 if (RADV != 0 && !PDMDevHlpTimerIsActive(pDevIns, pThis->CTX_SUFF(pRADTimer)))
2477 e1kArmTimer(pThis, pThis->hRADTimer, RADV);
2478 }
2479 else
2480 {
2481#endif
2482 /* 0 delay means immediate interrupt */
2483 E1K_INC_ISTAT_CNT(pThis->uStatIntRx);
2484 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_RXT0);
2485#ifdef E1K_USE_RX_TIMERS
2486 }
2487#endif
2488 }
2489 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveStore, a);
2490}
2491
2492#endif /* !E1K_WITH_RXD_CACHE */
2493
2494/**
2495 * Returns true if it is a broadcast packet.
2496 *
2497 * @returns true if destination address indicates broadcast.
2498 * @param pvBuf The ethernet packet.
2499 */
2500DECLINLINE(bool) e1kIsBroadcast(const void *pvBuf)
2501{
2502 static const uint8_t s_abBcastAddr[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
2503 return memcmp(pvBuf, s_abBcastAddr, sizeof(s_abBcastAddr)) == 0;
2504}
2505
2506/**
2507 * Returns true if it is a multicast packet.
2508 *
2509 * @remarks returns true for broadcast packets as well.
2510 * @returns true if destination address indicates multicast.
2511 * @param pvBuf The ethernet packet.
2512 */
2513DECLINLINE(bool) e1kIsMulticast(const void *pvBuf)
2514{
2515 return (*(char*)pvBuf) & 1;
2516}
2517
2518#ifdef IN_RING3 /* currently only used in ring-3 due to stack space requirements of the caller */
2519/**
2520 * Set IXSM, IPCS and TCPCS flags according to the packet type.
2521 *
2522 * @remarks We emulate checksum offloading for major packets types only.
2523 *
2524 * @returns VBox status code.
2525 * @param pThis The device state structure.
2526 * @param pFrame The available data.
2527 * @param cb Number of bytes available in the buffer.
2528 * @param status Bit fields containing status info.
2529 */
2530static int e1kRxChecksumOffload(PE1KSTATE pThis, const uint8_t *pFrame, size_t cb, E1KRXDST *pStatus)
2531{
2532 /** @todo
2533 * It is not safe to bypass checksum verification for packets coming
2534 * from real wire. We currently unable to tell where packets are
2535 * coming from so we tell the driver to ignore our checksum flags
2536 * and do verification in software.
2537 */
2538# if 0
2539 uint16_t uEtherType = ntohs(*(uint16_t*)(pFrame + 12));
2540
2541 E1kLog2(("%s e1kRxChecksumOffload: EtherType=%x\n", pThis->szPrf, uEtherType));
2542
2543 switch (uEtherType)
2544 {
2545 case 0x800: /* IPv4 */
2546 {
2547 pStatus->fIXSM = false;
2548 pStatus->fIPCS = true;
2549 PRTNETIPV4 pIpHdr4 = (PRTNETIPV4)(pFrame + 14);
2550 /* TCP/UDP checksum offloading works with TCP and UDP only */
2551 pStatus->fTCPCS = pIpHdr4->ip_p == 6 || pIpHdr4->ip_p == 17;
2552 break;
2553 }
2554 case 0x86DD: /* IPv6 */
2555 pStatus->fIXSM = false;
2556 pStatus->fIPCS = false;
2557 pStatus->fTCPCS = true;
2558 break;
2559 default: /* ARP, VLAN, etc. */
2560 pStatus->fIXSM = true;
2561 break;
2562 }
2563# else
2564 pStatus->fIXSM = true;
2565 RT_NOREF_PV(pThis); RT_NOREF_PV(pFrame); RT_NOREF_PV(cb);
2566# endif
2567 return VINF_SUCCESS;
2568}
2569#endif /* IN_RING3 */
2570
2571/**
2572 * Pad and store received packet.
2573 *
2574 * @remarks Make sure that the packet appears to upper layer as one coming
2575 * from real Ethernet: pad it and insert FCS.
2576 *
2577 * @returns VBox status code.
2578 * @param pDevIns The device instance.
2579 * @param pThis The device state structure.
2580 * @param pvBuf The available data.
2581 * @param cb Number of bytes available in the buffer.
2582 * @param status Bit fields containing status info.
2583 */
2584static int e1kHandleRxPacket(PPDMDEVINS pDevIns, PE1KSTATE pThis, const void *pvBuf, size_t cb, E1KRXDST status)
2585{
2586#if defined(IN_RING3) /** @todo Remove this extra copying, it's gonna make us run out of kernel / hypervisor stack! */
2587 uint8_t rxPacket[E1K_MAX_RX_PKT_SIZE];
2588 uint8_t *ptr = rxPacket;
2589# ifdef E1K_WITH_RXD_CACHE
2590 E1KRXDC rxdc;
2591# endif /* E1K_WITH_RXD_CACHE */
2592
2593 e1kCsRxEnterReturn(pThis);
2594# ifdef E1K_WITH_RXD_CACHE
2595 if (RT_UNLIKELY(!e1kUpdateRxDContext(pDevIns, pThis, &rxdc, "e1kHandleRxPacket")))
2596 {
2597 e1kCsRxLeave(pThis);
2598 E1kLog(("%s e1kHandleRxPacket: failed to update Rx context, returning VINF_SUCCESS\n", pThis->szPrf));
2599 return VINF_SUCCESS;
2600 }
2601# endif /* E1K_WITH_RXD_CACHE */
2602
2603 if (cb > 70) /* unqualified guess */
2604 pThis->led.Asserted.s.fReading = pThis->led.Actual.s.fReading = 1;
2605
2606 Assert(cb <= E1K_MAX_RX_PKT_SIZE);
2607 Assert(cb > 16);
2608 size_t cbMax = ((RCTL & RCTL_LPE) ? E1K_MAX_RX_PKT_SIZE - 4 : 1518) - (status.fVP ? 0 : 4);
2609 E1kLog3(("%s Max RX packet size is %u\n", pThis->szPrf, cbMax));
2610 if (status.fVP)
2611 {
2612 /* VLAN packet -- strip VLAN tag in VLAN mode */
2613 if ((CTRL & CTRL_VME) && cb > 16)
2614 {
2615 uint16_t *u16Ptr = (uint16_t*)pvBuf;
2616 memcpy(rxPacket, pvBuf, 12); /* Copy src and dst addresses */
2617 status.u16Special = RT_BE2H_U16(u16Ptr[7]); /* Extract VLAN tag */
2618 memcpy(rxPacket + 12, (uint8_t*)pvBuf + 16, cb - 16); /* Copy the rest of the packet */
2619 cb -= 4;
2620 E1kLog3(("%s Stripped tag for VLAN %u (cb=%u)\n",
2621 pThis->szPrf, status.u16Special, cb));
2622 }
2623 else
2624 {
2625 status.fVP = false; /* Set VP only if we stripped the tag */
2626 memcpy(rxPacket, pvBuf, cb);
2627 }
2628 }
2629 else
2630 memcpy(rxPacket, pvBuf, cb);
2631 /* Pad short packets */
2632 if (cb < 60)
2633 {
2634 memset(rxPacket + cb, 0, 60 - cb);
2635 cb = 60;
2636 }
2637 if (!(RCTL & RCTL_SECRC) && cb <= cbMax)
2638 {
2639 STAM_PROFILE_ADV_START(&pThis->StatReceiveCRC, a);
2640 /*
2641 * Add FCS if CRC stripping is not enabled. Since the value of CRC
2642 * is ignored by most of drivers we may as well save us the trouble
2643 * of calculating it (see EthernetCRC CFGM parameter).
2644 */
2645 if (pThis->fEthernetCRC)
2646 *(uint32_t*)(rxPacket + cb) = RTCrc32(rxPacket, cb);
2647 cb += sizeof(uint32_t);
2648 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveCRC, a);
2649 E1kLog3(("%s Added FCS (cb=%u)\n", pThis->szPrf, cb));
2650 }
2651 /* Compute checksum of complete packet */
2652 size_t cbCSumStart = RT_MIN(GET_BITS(RXCSUM, PCSS), cb);
2653 uint16_t checksum = e1kCSum16(rxPacket + cbCSumStart, cb - cbCSumStart);
2654 e1kRxChecksumOffload(pThis, rxPacket, cb, &status);
2655
2656 /* Update stats */
2657 E1K_INC_CNT32(GPRC);
2658 if (e1kIsBroadcast(pvBuf))
2659 E1K_INC_CNT32(BPRC);
2660 else if (e1kIsMulticast(pvBuf))
2661 E1K_INC_CNT32(MPRC);
2662 /* Update octet receive counter */
2663 E1K_ADD_CNT64(GORCL, GORCH, cb);
2664 STAM_REL_COUNTER_ADD(&pThis->StatReceiveBytes, cb);
2665 if (cb == 64)
2666 E1K_INC_CNT32(PRC64);
2667 else if (cb < 128)
2668 E1K_INC_CNT32(PRC127);
2669 else if (cb < 256)
2670 E1K_INC_CNT32(PRC255);
2671 else if (cb < 512)
2672 E1K_INC_CNT32(PRC511);
2673 else if (cb < 1024)
2674 E1K_INC_CNT32(PRC1023);
2675 else
2676 E1K_INC_CNT32(PRC1522);
2677
2678 E1K_INC_ISTAT_CNT(pThis->uStatRxFrm);
2679
2680# ifdef E1K_WITH_RXD_CACHE
2681 while (cb > 0)
2682 {
2683 E1KRXDESC *pDesc = e1kRxDGet(pDevIns, pThis, &rxdc);
2684
2685 if (pDesc == NULL)
2686 {
2687 E1kLog(("%s Out of receive buffers, dropping the packet "
2688 "(cb=%u, in_cache=%u, RDH=%x RDT=%x)\n",
2689 pThis->szPrf, cb, e1kRxDInCache(pThis), rxdc.rdh, rxdc.rdt));
2690 break;
2691 }
2692# else /* !E1K_WITH_RXD_CACHE */
2693 if (RDH == RDT)
2694 {
2695 E1kLog(("%s Out of receive buffers, dropping the packet\n",
2696 pThis->szPrf));
2697 }
2698 /* Store the packet to receive buffers */
2699 while (RDH != RDT)
2700 {
2701 /* Load the descriptor pointed by head */
2702 E1KRXDESC desc, *pDesc = &desc;
2703 PDMDevHlpPCIPhysRead(pDevIns, e1kDescAddr(RDBAH, RDBAL, RDH), &desc, sizeof(desc));
2704# endif /* !E1K_WITH_RXD_CACHE */
2705 if (pDesc->u64BufAddr)
2706 {
2707 uint16_t u16RxBufferSize = pThis->u16RxBSize; /* see @bugref{9427} */
2708
2709 /* Update descriptor */
2710 pDesc->status = status;
2711 pDesc->u16Checksum = checksum;
2712 pDesc->status.fDD = true;
2713
2714 /*
2715 * We need to leave Rx critical section here or we risk deadlocking
2716 * with EMT in e1kRegWriteRDT when the write is to an unallocated
2717 * page or has an access handler associated with it.
2718 * Note that it is safe to leave the critical section here since
2719 * e1kRegWriteRDT() never modifies RDH. It never touches already
2720 * fetched RxD cache entries either.
2721 */
2722 if (cb > u16RxBufferSize)
2723 {
2724 pDesc->status.fEOP = false;
2725 e1kCsRxLeave(pThis);
2726 e1kStoreRxFragment(pDevIns, pThis, pDesc, ptr, u16RxBufferSize);
2727 e1kCsRxEnterReturn(pThis);
2728# ifdef E1K_WITH_RXD_CACHE
2729 if (RT_UNLIKELY(!e1kUpdateRxDContext(pDevIns, pThis, &rxdc, "e1kHandleRxPacket")))
2730 {
2731 e1kCsRxLeave(pThis);
2732 E1kLog(("%s e1kHandleRxPacket: failed to update Rx context, returning VINF_SUCCESS\n", pThis->szPrf));
2733 return VINF_SUCCESS;
2734 }
2735# endif /* E1K_WITH_RXD_CACHE */
2736 ptr += u16RxBufferSize;
2737 cb -= u16RxBufferSize;
2738 }
2739 else
2740 {
2741 pDesc->status.fEOP = true;
2742 e1kCsRxLeave(pThis);
2743 e1kStoreRxFragment(pDevIns, pThis, pDesc, ptr, cb);
2744# ifdef E1K_WITH_RXD_CACHE
2745 e1kCsRxEnterReturn(pThis);
2746 if (RT_UNLIKELY(!e1kUpdateRxDContext(pDevIns, pThis, &rxdc, "e1kHandleRxPacket")))
2747 {
2748 e1kCsRxLeave(pThis);
2749 E1kLog(("%s e1kHandleRxPacket: failed to update Rx context, returning VINF_SUCCESS\n", pThis->szPrf));
2750 return VINF_SUCCESS;
2751 }
2752 cb = 0;
2753# else /* !E1K_WITH_RXD_CACHE */
2754 pThis->led.Actual.s.fReading = 0;
2755 return VINF_SUCCESS;
2756# endif /* !E1K_WITH_RXD_CACHE */
2757 }
2758 /*
2759 * Note: RDH is advanced by e1kStoreRxFragment if E1K_WITH_RXD_CACHE
2760 * is not defined.
2761 */
2762 }
2763# ifdef E1K_WITH_RXD_CACHE
2764 /* Write back the descriptor. */
2765 pDesc->status.fDD = true;
2766 e1kRxDPut(pDevIns, pThis, pDesc, &rxdc);
2767# else /* !E1K_WITH_RXD_CACHE */
2768 else
2769 {
2770 /* Write back the descriptor. */
2771 pDesc->status.fDD = true;
2772 PDMDevHlpPCIPhysWrite(pDevIns, e1kDescAddr(RDBAH, RDBAL, RDH), pDesc, sizeof(E1KRXDESC));
2773 e1kAdvanceRDH(pDevIns, pThis);
2774 }
2775# endif /* !E1K_WITH_RXD_CACHE */
2776 }
2777
2778 if (cb > 0)
2779 E1kLog(("%s Out of receive buffers, dropping %u bytes", pThis->szPrf, cb));
2780
2781 pThis->led.Actual.s.fReading = 0;
2782
2783 e1kCsRxLeave(pThis);
2784# ifdef E1K_WITH_RXD_CACHE
2785 /* Complete packet has been stored -- it is time to let the guest know. */
2786# ifdef E1K_USE_RX_TIMERS
2787 if (RDTR)
2788 {
2789 /* Arm the timer to fire in RDTR usec (discard .024) */
2790 e1kArmTimer(pThis, pThis->hRIDTimer, RDTR);
2791 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
2792 if (RADV != 0 && !PDMDevHlpTimerIsActive(pDevIns, pThis->hRADTimer))
2793 e1kArmTimer(pThis, pThis->hRADTimer, RADV);
2794 }
2795 else
2796 {
2797# endif /* E1K_USE_RX_TIMERS */
2798 /* 0 delay means immediate interrupt */
2799 E1K_INC_ISTAT_CNT(pThis->uStatIntRx);
2800 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_RXT0);
2801# ifdef E1K_USE_RX_TIMERS
2802 }
2803# endif /* E1K_USE_RX_TIMERS */
2804# endif /* E1K_WITH_RXD_CACHE */
2805
2806 return VINF_SUCCESS;
2807#else /* !IN_RING3 */
2808 RT_NOREF(pDevIns, pThis, pvBuf, cb, status);
2809 return VERR_INTERNAL_ERROR_2;
2810#endif /* !IN_RING3 */
2811}
2812
2813
2814#ifdef IN_RING3
2815/**
2816 * Bring the link up after the configured delay, 5 seconds by default.
2817 *
2818 * @param pDevIns The device instance.
2819 * @param pThis The device state structure.
2820 * @thread any
2821 */
2822DECLINLINE(void) e1kBringLinkUpDelayed(PPDMDEVINS pDevIns, PE1KSTATE pThis)
2823{
2824 E1kLog(("%s Will bring up the link in %d seconds...\n",
2825 pThis->szPrf, pThis->cMsLinkUpDelay / 1000));
2826 e1kArmTimer(pDevIns, pThis, pThis->hLUTimer, pThis->cMsLinkUpDelay * 1000);
2827}
2828
2829/**
2830 * Bring up the link immediately.
2831 *
2832 * @param pDevIns The device instance.
2833 * @param pThis The device state structure.
2834 * @param pThisCC The current context instance data.
2835 */
2836DECLINLINE(void) e1kR3LinkUp(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KSTATECC pThisCC)
2837{
2838 E1kLog(("%s Link is up\n", pThis->szPrf));
2839 STATUS |= STATUS_LU;
2840 Phy::setLinkStatus(&pThis->phy, true);
2841 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_LSC);
2842 if (pThisCC->pDrvR3)
2843 pThisCC->pDrvR3->pfnNotifyLinkChanged(pThisCC->pDrvR3, PDMNETWORKLINKSTATE_UP);
2844 /* Trigger processing of pending TX descriptors (see @bugref{8942}). */
2845 PDMDevHlpTaskTrigger(pDevIns, pThis->hTxTask);
2846}
2847
2848/**
2849 * Bring down the link immediately.
2850 *
2851 * @param pDevIns The device instance.
2852 * @param pThis The device state structure.
2853 * @param pThisCC The current context instance data.
2854 */
2855DECLINLINE(void) e1kR3LinkDown(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KSTATECC pThisCC)
2856{
2857 E1kLog(("%s Link is down\n", pThis->szPrf));
2858 STATUS &= ~STATUS_LU;
2859#ifdef E1K_LSC_ON_RESET
2860 Phy::setLinkStatus(&pThis->phy, false);
2861#endif /* E1K_LSC_ON_RESET */
2862 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_LSC);
2863 if (pThisCC->pDrvR3)
2864 pThisCC->pDrvR3->pfnNotifyLinkChanged(pThisCC->pDrvR3, PDMNETWORKLINKSTATE_DOWN);
2865}
2866
2867/**
2868 * Bring down the link temporarily.
2869 *
2870 * @param pDevIns The device instance.
2871 * @param pThis The device state structure.
2872 * @param pThisCC The current context instance data.
2873 */
2874DECLINLINE(void) e1kR3LinkDownTemp(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KSTATECC pThisCC)
2875{
2876 E1kLog(("%s Link is down temporarily\n", pThis->szPrf));
2877 STATUS &= ~STATUS_LU;
2878 Phy::setLinkStatus(&pThis->phy, false);
2879 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_LSC);
2880 /*
2881 * Notifying the associated driver that the link went down (even temporarily)
2882 * seems to be the right thing, but it was not done before. This may cause
2883 * a regression if the driver does not expect the link to go down as a result
2884 * of sending PDMNETWORKLINKSTATE_DOWN_RESUME to this device. Earlier versions
2885 * of code notified the driver that the link was up! See @bugref{7057}.
2886 */
2887 if (pThisCC->pDrvR3)
2888 pThisCC->pDrvR3->pfnNotifyLinkChanged(pThisCC->pDrvR3, PDMNETWORKLINKSTATE_DOWN);
2889 e1kBringLinkUpDelayed(pDevIns, pThis);
2890}
2891#endif /* IN_RING3 */
2892
2893#if 0 /* unused */
2894/**
2895 * Read handler for Device Status register.
2896 *
2897 * Get the link status from PHY.
2898 *
2899 * @returns VBox status code.
2900 *
2901 * @param pThis The device state structure.
2902 * @param offset Register offset in memory-mapped frame.
2903 * @param index Register index in register array.
2904 * @param mask Used to implement partial reads (8 and 16-bit).
2905 */
2906static int e1kRegReadCTRL(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
2907{
2908 E1kLog(("%s e1kRegReadCTRL: mdio dir=%s mdc dir=%s mdc=%d\n",
2909 pThis->szPrf, (CTRL & CTRL_MDIO_DIR)?"OUT":"IN ",
2910 (CTRL & CTRL_MDC_DIR)?"OUT":"IN ", !!(CTRL & CTRL_MDC)));
2911 if ((CTRL & CTRL_MDIO_DIR) == 0 && (CTRL & CTRL_MDC))
2912 {
2913 /* MDC is high and MDIO pin is used for input, read MDIO pin from PHY */
2914 if (Phy::readMDIO(&pThis->phy))
2915 *pu32Value = CTRL | CTRL_MDIO;
2916 else
2917 *pu32Value = CTRL & ~CTRL_MDIO;
2918 E1kLog(("%s e1kRegReadCTRL: Phy::readMDIO(%d)\n",
2919 pThis->szPrf, !!(*pu32Value & CTRL_MDIO)));
2920 }
2921 else
2922 {
2923 /* MDIO pin is used for output, ignore it */
2924 *pu32Value = CTRL;
2925 }
2926 return VINF_SUCCESS;
2927}
2928#endif /* unused */
2929
2930/**
2931 * A helper function to detect the link state to the other side of "the wire".
2932 *
2933 * When deciding to bring up the link we need to take into account both if the
2934 * cable is connected and if our device is actually connected to the outside
2935 * world. If no driver is attached we won't be able to allocate TX buffers,
2936 * which will prevent us from TX descriptor processing, which will result in
2937 * "TX unit hang" in the guest.
2938 *
2939 * @returns true if the device is connected to something.
2940 *
2941 * @param pDevIns The device instance.
2942 */
2943DECLINLINE(bool) e1kIsConnected(PPDMDEVINS pDevIns)
2944{
2945 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
2946 return pThis->fCableConnected && pThis->fIsAttached;
2947}
2948
2949/**
2950 * A callback used by PHY to indicate that the link needs to be updated due to
2951 * reset of PHY.
2952 *
2953 * @param pDevIns The device instance.
2954 * @thread any
2955 */
2956void e1kPhyLinkResetCallback(PPDMDEVINS pDevIns)
2957{
2958 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
2959
2960 /* Make sure we have cable connected and MAC can talk to PHY */
2961 if (e1kIsConnected(pDevIns) && (CTRL & CTRL_SLU))
2962 e1kArmTimer(pDevIns, pThis, pThis->hLUTimer, E1K_INIT_LINKUP_DELAY_US);
2963 else
2964 Log(("%s PHY link reset callback ignored (cable %sconnected, driver %stached, CTRL_SLU=%u)\n", pThis->szPrf,
2965 pThis->fCableConnected ? "" : "dis", pThis->fIsAttached ? "at" : "de", CTRL & CTRL_SLU ? 1 : 0));
2966}
2967
2968/**
2969 * Write handler for Device Control register.
2970 *
2971 * Handles reset.
2972 *
2973 * @param pThis The device state structure.
2974 * @param offset Register offset in memory-mapped frame.
2975 * @param index Register index in register array.
2976 * @param value The value to store.
2977 * @param mask Used to implement partial writes (8 and 16-bit).
2978 * @thread EMT
2979 */
2980static int e1kRegWriteCTRL(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2981{
2982 int rc = VINF_SUCCESS;
2983
2984 if (value & CTRL_RESET)
2985 { /* RST */
2986#ifndef IN_RING3
2987 return VINF_IOM_R3_MMIO_WRITE;
2988#else
2989 e1kR3HardReset(pDevIns, pThis, PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC));
2990#endif
2991 }
2992 else
2993 {
2994#ifdef E1K_LSC_ON_SLU
2995 /*
2996 * When the guest changes 'Set Link Up' bit from 0 to 1 we check if
2997 * the link is down and the cable is connected, and if they are we
2998 * bring the link up, see @bugref{8624}.
2999 */
3000 if ( (value & CTRL_SLU)
3001 && !(CTRL & CTRL_SLU)
3002 && pThis->fCableConnected
3003 && !(STATUS & STATUS_LU))
3004 {
3005 /* It should take about 2 seconds for the link to come up */
3006 e1kArmTimer(pDevIns, pThis, pThis->hLUTimer, E1K_INIT_LINKUP_DELAY_US);
3007 }
3008#else /* !E1K_LSC_ON_SLU */
3009 if ( (value & CTRL_SLU)
3010 && !(CTRL & CTRL_SLU)
3011 && e1kIsConnected(pDevIns)
3012 && !PDMDevHlpTimerIsActive(pDevIns, pThis->hLUTimer))
3013 {
3014 /* PXE does not use LSC interrupts, see @bugref{9113}. */
3015 STATUS |= STATUS_LU;
3016 }
3017#endif /* !E1K_LSC_ON_SLU */
3018 if ((value & CTRL_VME) != (CTRL & CTRL_VME))
3019 {
3020 E1kLog(("%s VLAN Mode %s\n", pThis->szPrf, (value & CTRL_VME) ? "Enabled" : "Disabled"));
3021 }
3022 Log7(("%s e1kRegWriteCTRL: mdio dir=%s mdc dir=%s mdc=%s mdio=%d\n",
3023 pThis->szPrf, (value & CTRL_MDIO_DIR)?"OUT":"IN ",
3024 (value & CTRL_MDC_DIR)?"OUT":"IN ", (value & CTRL_MDC)?"HIGH":"LOW ", !!(value & CTRL_MDIO)));
3025 if (value & CTRL_MDC)
3026 {
3027 if (value & CTRL_MDIO_DIR)
3028 {
3029 Log7(("%s e1kRegWriteCTRL: Phy::writeMDIO(%d)\n", pThis->szPrf, !!(value & CTRL_MDIO)));
3030 /* MDIO direction pin is set to output and MDC is high, write MDIO pin value to PHY */
3031 Phy::writeMDIO(&pThis->phy, !!(value & CTRL_MDIO), pDevIns);
3032 }
3033 else
3034 {
3035 if (Phy::readMDIO(&pThis->phy))
3036 value |= CTRL_MDIO;
3037 else
3038 value &= ~CTRL_MDIO;
3039 Log7(("%s e1kRegWriteCTRL: Phy::readMDIO(%d)\n", pThis->szPrf, !!(value & CTRL_MDIO)));
3040 }
3041 }
3042 rc = e1kRegWriteDefault(pDevIns, pThis, offset, index, value);
3043 }
3044
3045 return rc;
3046}
3047
3048/**
3049 * Write handler for EEPROM/Flash Control/Data register.
3050 *
3051 * Handles EEPROM access requests; forwards writes to EEPROM device if access has been granted.
3052 *
3053 * @param pThis The device state structure.
3054 * @param offset Register offset in memory-mapped frame.
3055 * @param index Register index in register array.
3056 * @param value The value to store.
3057 * @param mask Used to implement partial writes (8 and 16-bit).
3058 * @thread EMT
3059 */
3060static int e1kRegWriteEECD(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3061{
3062 RT_NOREF(pDevIns, offset, index);
3063#ifdef IN_RING3
3064 /* So far we are concerned with lower byte only */
3065 if ((EECD & EECD_EE_GNT) || pThis->eChip == E1K_CHIP_82543GC)
3066 {
3067 /* Access to EEPROM granted -- forward 4-wire bits to EEPROM device */
3068 /* Note: 82543GC does not need to request EEPROM access */
3069 STAM_PROFILE_ADV_START(&pThis->StatEEPROMWrite, a);
3070 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
3071 pThisCC->eeprom.write(value & EECD_EE_WIRES);
3072 STAM_PROFILE_ADV_STOP(&pThis->StatEEPROMWrite, a);
3073 }
3074 if (value & EECD_EE_REQ)
3075 EECD |= EECD_EE_REQ|EECD_EE_GNT;
3076 else
3077 EECD &= ~EECD_EE_GNT;
3078 //e1kRegWriteDefault(pThis, offset, index, value );
3079
3080 return VINF_SUCCESS;
3081#else /* !IN_RING3 */
3082 RT_NOREF(pThis, value);
3083 return VINF_IOM_R3_MMIO_WRITE;
3084#endif /* !IN_RING3 */
3085}
3086
3087/**
3088 * Read handler for EEPROM/Flash Control/Data register.
3089 *
3090 * Lower 4 bits come from EEPROM device if EEPROM access has been granted.
3091 *
3092 * @returns VBox status code.
3093 *
3094 * @param pThis The device state structure.
3095 * @param offset Register offset in memory-mapped frame.
3096 * @param index Register index in register array.
3097 * @param mask Used to implement partial reads (8 and 16-bit).
3098 * @thread EMT
3099 */
3100static int e1kRegReadEECD(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
3101{
3102#ifdef IN_RING3
3103 uint32_t value = 0; /* Get rid of false positive in parfait. */
3104 int rc = e1kRegReadDefault(pDevIns, pThis, offset, index, &value);
3105 if (RT_SUCCESS(rc))
3106 {
3107 if ((value & EECD_EE_GNT) || pThis->eChip == E1K_CHIP_82543GC)
3108 {
3109 /* Note: 82543GC does not need to request EEPROM access */
3110 /* Access to EEPROM granted -- get 4-wire bits to EEPROM device */
3111 STAM_PROFILE_ADV_START(&pThis->StatEEPROMRead, a);
3112 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
3113 value |= pThisCC->eeprom.read();
3114 STAM_PROFILE_ADV_STOP(&pThis->StatEEPROMRead, a);
3115 }
3116 *pu32Value = value;
3117 }
3118
3119 return rc;
3120#else /* !IN_RING3 */
3121 RT_NOREF_PV(pDevIns); RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index); RT_NOREF_PV(pu32Value);
3122 return VINF_IOM_R3_MMIO_READ;
3123#endif /* !IN_RING3 */
3124}
3125
3126/**
3127 * Write handler for EEPROM Read register.
3128 *
3129 * Handles EEPROM word access requests, reads EEPROM and stores the result
3130 * into DATA field.
3131 *
3132 * @param pThis The device state structure.
3133 * @param offset Register offset in memory-mapped frame.
3134 * @param index Register index in register array.
3135 * @param value The value to store.
3136 * @param mask Used to implement partial writes (8 and 16-bit).
3137 * @thread EMT
3138 */
3139static int e1kRegWriteEERD(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3140{
3141#ifdef IN_RING3
3142 /* Make use of 'writable' and 'readable' masks. */
3143 e1kRegWriteDefault(pDevIns, pThis, offset, index, value);
3144 /* DONE and DATA are set only if read was triggered by START. */
3145 if (value & EERD_START)
3146 {
3147 STAM_PROFILE_ADV_START(&pThis->StatEEPROMRead, a);
3148 uint16_t tmp;
3149 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
3150 if (pThisCC->eeprom.readWord(GET_BITS_V(value, EERD, ADDR), &tmp))
3151 SET_BITS(EERD, DATA, tmp);
3152 EERD |= EERD_DONE;
3153 STAM_PROFILE_ADV_STOP(&pThis->StatEEPROMRead, a);
3154 }
3155
3156 return VINF_SUCCESS;
3157#else /* !IN_RING3 */
3158 RT_NOREF_PV(pDevIns); RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index); RT_NOREF_PV(value);
3159 return VINF_IOM_R3_MMIO_WRITE;
3160#endif /* !IN_RING3 */
3161}
3162
3163
3164/**
3165 * Write handler for MDI Control register.
3166 *
3167 * Handles PHY read/write requests; forwards requests to internal PHY device.
3168 *
3169 * @param pThis The device state structure.
3170 * @param offset Register offset in memory-mapped frame.
3171 * @param index Register index in register array.
3172 * @param value The value to store.
3173 * @param mask Used to implement partial writes (8 and 16-bit).
3174 * @thread EMT
3175 */
3176static int e1kRegWriteMDIC(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3177{
3178 if (value & MDIC_INT_EN)
3179 {
3180 E1kLog(("%s ERROR! Interrupt at the end of an MDI cycle is not supported yet.\n",
3181 pThis->szPrf));
3182 }
3183 else if (value & MDIC_READY)
3184 {
3185 E1kLog(("%s ERROR! Ready bit is not reset by software during write operation.\n",
3186 pThis->szPrf));
3187 }
3188 else if (GET_BITS_V(value, MDIC, PHY) != 1)
3189 {
3190 E1kLog(("%s WARNING! Access to invalid PHY detected, phy=%d.\n",
3191 pThis->szPrf, GET_BITS_V(value, MDIC, PHY)));
3192 /*
3193 * Some drivers scan the MDIO bus for a PHY. We can work with these
3194 * drivers if we set MDIC_READY and MDIC_ERROR when there isn't a PHY
3195 * at the requested address, see @bugref{7346}.
3196 */
3197 MDIC = MDIC_READY | MDIC_ERROR;
3198 }
3199 else
3200 {
3201 /* Store the value */
3202 e1kRegWriteDefault(pDevIns, pThis, offset, index, value);
3203 STAM_COUNTER_INC(&pThis->StatPHYAccesses);
3204 /* Forward op to PHY */
3205 if (value & MDIC_OP_READ)
3206 SET_BITS(MDIC, DATA, Phy::readRegister(&pThis->phy, GET_BITS_V(value, MDIC, REG), pDevIns));
3207 else
3208 Phy::writeRegister(&pThis->phy, GET_BITS_V(value, MDIC, REG), value & MDIC_DATA_MASK, pDevIns);
3209 /* Let software know that we are done */
3210 MDIC |= MDIC_READY;
3211 }
3212
3213 return VINF_SUCCESS;
3214}
3215
3216/**
3217 * Write handler for Interrupt Cause Read register.
3218 *
3219 * Bits corresponding to 1s in 'value' will be cleared in ICR register.
3220 *
3221 * @param pThis The device state structure.
3222 * @param offset Register offset in memory-mapped frame.
3223 * @param index Register index in register array.
3224 * @param value The value to store.
3225 * @param mask Used to implement partial writes (8 and 16-bit).
3226 * @thread EMT
3227 */
3228static int e1kRegWriteICR(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3229{
3230 ICR &= ~value;
3231
3232 RT_NOREF_PV(pDevIns); RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index);
3233 return VINF_SUCCESS;
3234}
3235
3236/**
3237 * Read handler for Interrupt Cause Read register.
3238 *
3239 * Reading this register acknowledges all interrupts.
3240 *
3241 * @returns VBox status code.
3242 *
3243 * @param pThis The device state structure.
3244 * @param offset Register offset in memory-mapped frame.
3245 * @param index Register index in register array.
3246 * @param mask Not used.
3247 * @thread EMT
3248 */
3249static int e1kRegReadICR(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
3250{
3251 e1kCsEnterReturn(pThis, VINF_IOM_R3_MMIO_READ);
3252
3253 uint32_t value = 0;
3254 int rc = e1kRegReadDefault(pDevIns, pThis, offset, index, &value);
3255 if (RT_SUCCESS(rc))
3256 {
3257 if (value)
3258 {
3259 if (!pThis->fIntRaised)
3260 E1K_INC_ISTAT_CNT(pThis->uStatNoIntICR);
3261 /*
3262 * Not clearing ICR causes QNX to hang as it reads ICR in a loop
3263 * with disabled interrupts.
3264 */
3265 //if (IMS)
3266 if (1)
3267 {
3268 /*
3269 * Interrupts were enabled -- we are supposedly at the very
3270 * beginning of interrupt handler
3271 */
3272 E1kLogRel(("E1000: irq lowered, icr=0x%x\n", ICR));
3273 E1kLog(("%s e1kRegReadICR: Lowered IRQ (%08x)\n", pThis->szPrf, ICR));
3274 /* Clear all pending interrupts */
3275 ICR = 0;
3276 pThis->fIntRaised = false;
3277 /* Lower(0) INTA(0) */
3278 PDMDevHlpPCISetIrq(pDevIns, 0, 0);
3279
3280 pThis->u64AckedAt = PDMDevHlpTimerGet(pDevIns, pThis->hIntTimer);
3281 if (pThis->fIntMaskUsed)
3282 pThis->fDelayInts = true;
3283 }
3284 else
3285 {
3286 /*
3287 * Interrupts are disabled -- in windows guests ICR read is done
3288 * just before re-enabling interrupts
3289 */
3290 E1kLog(("%s e1kRegReadICR: Suppressing auto-clear due to disabled interrupts (%08x)\n", pThis->szPrf, ICR));
3291 }
3292 }
3293 *pu32Value = value;
3294 }
3295 e1kCsLeave(pThis);
3296
3297 return rc;
3298}
3299
3300/**
3301 * Read handler for Interrupt Cause Set register.
3302 *
3303 * VxWorks driver uses this undocumented feature of real H/W to read ICR without acknowledging interrupts.
3304 *
3305 * @returns VBox status code.
3306 *
3307 * @param pThis The device state structure.
3308 * @param offset Register offset in memory-mapped frame.
3309 * @param index Register index in register array.
3310 * @param pu32Value Where to store the value of the register.
3311 * @thread EMT
3312 */
3313static int e1kRegReadICS(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
3314{
3315 RT_NOREF_PV(index);
3316 return e1kRegReadDefault(pDevIns, pThis, offset, ICR_IDX, pu32Value);
3317}
3318
3319/**
3320 * Write handler for Interrupt Cause Set register.
3321 *
3322 * Bits corresponding to 1s in 'value' will be set in ICR register.
3323 *
3324 * @param pThis The device state structure.
3325 * @param offset Register offset in memory-mapped frame.
3326 * @param index Register index in register array.
3327 * @param value The value to store.
3328 * @param mask Used to implement partial writes (8 and 16-bit).
3329 * @thread EMT
3330 */
3331static int e1kRegWriteICS(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3332{
3333 RT_NOREF_PV(offset); RT_NOREF_PV(index);
3334 E1K_INC_ISTAT_CNT(pThis->uStatIntICS);
3335 return e1kRaiseInterrupt(pDevIns, pThis, VINF_IOM_R3_MMIO_WRITE, value & g_aE1kRegMap[ICS_IDX].writable);
3336}
3337
3338/**
3339 * Write handler for Interrupt Mask Set register.
3340 *
3341 * Will trigger pending interrupts.
3342 *
3343 * @param pThis The device state structure.
3344 * @param offset Register offset in memory-mapped frame.
3345 * @param index Register index in register array.
3346 * @param value The value to store.
3347 * @param mask Used to implement partial writes (8 and 16-bit).
3348 * @thread EMT
3349 */
3350static int e1kRegWriteIMS(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3351{
3352 RT_NOREF_PV(offset); RT_NOREF_PV(index);
3353
3354 IMS |= value;
3355 E1kLogRel(("E1000: irq enabled, RDH=%x RDT=%x TDH=%x TDT=%x\n", RDH, RDT, TDH, TDT));
3356 E1kLog(("%s e1kRegWriteIMS: IRQ enabled\n", pThis->szPrf));
3357 /*
3358 * We cannot raise an interrupt here as it will occasionally cause an interrupt storm
3359 * in Windows guests (see @bugref{8624}, @bugref{5023}).
3360 */
3361 if ((ICR & IMS) && !pThis->fLocked)
3362 {
3363 E1K_INC_ISTAT_CNT(pThis->uStatIntIMS);
3364 e1kPostponeInterrupt(pDevIns, pThis, E1K_IMS_INT_DELAY_NS);
3365 }
3366
3367 return VINF_SUCCESS;
3368}
3369
3370/**
3371 * Write handler for Interrupt Mask Clear register.
3372 *
3373 * Bits corresponding to 1s in 'value' will be cleared in IMS register.
3374 *
3375 * @param pThis The device state structure.
3376 * @param offset Register offset in memory-mapped frame.
3377 * @param index Register index in register array.
3378 * @param value The value to store.
3379 * @param mask Used to implement partial writes (8 and 16-bit).
3380 * @thread EMT
3381 */
3382static int e1kRegWriteIMC(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3383{
3384 RT_NOREF_PV(offset); RT_NOREF_PV(index);
3385
3386 e1kCsEnterReturn(pThis, VINF_IOM_R3_MMIO_WRITE);
3387 if (pThis->fIntRaised)
3388 {
3389 /*
3390 * Technically we should reset fIntRaised in ICR read handler, but it will cause
3391 * Windows to freeze since it may receive an interrupt while still in the very beginning
3392 * of interrupt handler.
3393 */
3394 E1K_INC_ISTAT_CNT(pThis->uStatIntLower);
3395 STAM_COUNTER_INC(&pThis->StatIntsPrevented);
3396 E1kLogRel(("E1000: irq lowered (IMC), icr=0x%x\n", ICR));
3397 /* Lower(0) INTA(0) */
3398 PDMDevHlpPCISetIrq(pDevIns, 0, 0);
3399 pThis->fIntRaised = false;
3400 E1kLog(("%s e1kRegWriteIMC: Lowered IRQ: ICR=%08x\n", pThis->szPrf, ICR));
3401 }
3402 IMS &= ~value;
3403 E1kLog(("%s e1kRegWriteIMC: IRQ disabled\n", pThis->szPrf));
3404 e1kCsLeave(pThis);
3405
3406 return VINF_SUCCESS;
3407}
3408
3409/**
3410 * Write handler for Receive Control register.
3411 *
3412 * @param pThis The device state structure.
3413 * @param offset Register offset in memory-mapped frame.
3414 * @param index Register index in register array.
3415 * @param value The value to store.
3416 * @param mask Used to implement partial writes (8 and 16-bit).
3417 * @thread EMT
3418 */
3419static int e1kRegWriteRCTL(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3420{
3421 /* Update promiscuous mode */
3422 bool fBecomePromiscous = !!(value & (RCTL_UPE | RCTL_MPE));
3423 if (fBecomePromiscous != !!( RCTL & (RCTL_UPE | RCTL_MPE)))
3424 {
3425 /* Promiscuity has changed, pass the knowledge on. */
3426#ifndef IN_RING3
3427 return VINF_IOM_R3_MMIO_WRITE;
3428#else
3429 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
3430 if (pThisCC->pDrvR3)
3431 pThisCC->pDrvR3->pfnSetPromiscuousMode(pThisCC->pDrvR3, fBecomePromiscous);
3432#endif
3433 }
3434
3435 /* Adjust receive buffer size */
3436 unsigned cbRxBuf = 2048 >> GET_BITS_V(value, RCTL, BSIZE);
3437 if (value & RCTL_BSEX)
3438 cbRxBuf *= 16;
3439 if (cbRxBuf > E1K_MAX_RX_PKT_SIZE)
3440 cbRxBuf = E1K_MAX_RX_PKT_SIZE;
3441 if (cbRxBuf != pThis->u16RxBSize)
3442 E1kLog2(("%s e1kRegWriteRCTL: Setting receive buffer size to %d (old %d)\n",
3443 pThis->szPrf, cbRxBuf, pThis->u16RxBSize));
3444 Assert(cbRxBuf < 65536);
3445 pThis->u16RxBSize = (uint16_t)cbRxBuf;
3446
3447 /* Update the register */
3448 return e1kRegWriteDefault(pDevIns, pThis, offset, index, value);
3449}
3450
3451/**
3452 * Write handler for Packet Buffer Allocation register.
3453 *
3454 * TXA = 64 - RXA.
3455 *
3456 * @param pThis The device state structure.
3457 * @param offset Register offset in memory-mapped frame.
3458 * @param index Register index in register array.
3459 * @param value The value to store.
3460 * @param mask Used to implement partial writes (8 and 16-bit).
3461 * @thread EMT
3462 */
3463static int e1kRegWritePBA(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3464{
3465 e1kRegWriteDefault(pDevIns, pThis, offset, index, value);
3466 PBA_st->txa = 64 - PBA_st->rxa;
3467
3468 return VINF_SUCCESS;
3469}
3470
3471/**
3472 * Write handler for Receive Descriptor Tail register.
3473 *
3474 * @remarks Write into RDT forces switch to HC and signal to
3475 * e1kR3NetworkDown_WaitReceiveAvail().
3476 *
3477 * @returns VBox status code.
3478 *
3479 * @param pThis The device state structure.
3480 * @param offset Register offset in memory-mapped frame.
3481 * @param index Register index in register array.
3482 * @param value The value to store.
3483 * @param mask Used to implement partial writes (8 and 16-bit).
3484 * @thread EMT
3485 */
3486static int e1kRegWriteRDT(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3487{
3488#ifndef IN_RING3
3489 /* XXX */
3490// return VINF_IOM_R3_MMIO_WRITE;
3491#endif
3492 int rc = e1kCsRxEnter(pThis, VINF_IOM_R3_MMIO_WRITE);
3493 if (RT_LIKELY(rc == VINF_SUCCESS))
3494 {
3495 E1kLog(("%s e1kRegWriteRDT\n", pThis->szPrf));
3496#ifndef E1K_WITH_RXD_CACHE
3497 /*
3498 * Some drivers advance RDT too far, so that it equals RDH. This
3499 * somehow manages to work with real hardware but not with this
3500 * emulated device. We can work with these drivers if we just
3501 * write 1 less when we see a driver writing RDT equal to RDH,
3502 * see @bugref{7346}.
3503 */
3504 if (value == RDH)
3505 {
3506 if (RDH == 0)
3507 value = (RDLEN / sizeof(E1KRXDESC)) - 1;
3508 else
3509 value = RDH - 1;
3510 }
3511#endif /* !E1K_WITH_RXD_CACHE */
3512 rc = e1kRegWriteDefault(pDevIns, pThis, offset, index, value);
3513#ifdef E1K_WITH_RXD_CACHE
3514 E1KRXDC rxdc;
3515 if (RT_UNLIKELY(!e1kUpdateRxDContext(pDevIns, pThis, &rxdc, "e1kRegWriteRDT")))
3516 {
3517 e1kCsRxLeave(pThis);
3518 E1kLog(("%s e1kRegWriteRDT: failed to update Rx context, returning VINF_SUCCESS\n", pThis->szPrf));
3519 return VINF_SUCCESS;
3520 }
3521 /*
3522 * We need to fetch descriptors now as RDT may go whole circle
3523 * before we attempt to store a received packet. For example,
3524 * Intel's DOS drivers use 2 (!) RX descriptors with the total ring
3525 * size being only 8 descriptors! Note that we fetch descriptors
3526 * only when the cache is empty to reduce the number of memory reads
3527 * in case of frequent RDT writes. Don't fetch anything when the
3528 * receiver is disabled either as RDH, RDT, RDLEN can be in some
3529 * messed up state.
3530 * Note that despite the cache may seem empty, meaning that there are
3531 * no more available descriptors in it, it may still be used by RX
3532 * thread which has not yet written the last descriptor back but has
3533 * temporarily released the RX lock in order to write the packet body
3534 * to descriptor's buffer. At this point we still going to do prefetch
3535 * but it won't actually fetch anything if there are no unused slots in
3536 * our "empty" cache (nRxDFetched==E1K_RXD_CACHE_SIZE). We must not
3537 * reset the cache here even if it appears empty. It will be reset at
3538 * a later point in e1kRxDGet().
3539 */
3540 if (e1kRxDIsCacheEmpty(pThis) && (RCTL & RCTL_EN))
3541 e1kRxDPrefetch(pDevIns, pThis, &rxdc);
3542#endif /* E1K_WITH_RXD_CACHE */
3543 e1kCsRxLeave(pThis);
3544 if (RT_SUCCESS(rc))
3545 {
3546 /* Signal that we have more receive descriptors available. */
3547 e1kWakeupReceive(pDevIns, pThis);
3548 }
3549 }
3550 return rc;
3551}
3552
3553/**
3554 * Write handler for Receive Delay Timer register.
3555 *
3556 * @param pThis The device state structure.
3557 * @param offset Register offset in memory-mapped frame.
3558 * @param index Register index in register array.
3559 * @param value The value to store.
3560 * @param mask Used to implement partial writes (8 and 16-bit).
3561 * @thread EMT
3562 */
3563static int e1kRegWriteRDTR(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3564{
3565 e1kRegWriteDefault(pDevIns, pThis, offset, index, value);
3566 if (value & RDTR_FPD)
3567 {
3568 /* Flush requested, cancel both timers and raise interrupt */
3569#ifdef E1K_USE_RX_TIMERS
3570 e1kCancelTimer(pDevIns, pThis, pThis->hRIDTimer);
3571 e1kCancelTimer(pDevIns, pThis, pThis->hRADTimer);
3572#endif
3573 E1K_INC_ISTAT_CNT(pThis->uStatIntRDTR);
3574 return e1kRaiseInterrupt(pDevIns, pThis, VINF_IOM_R3_MMIO_WRITE, ICR_RXT0);
3575 }
3576
3577 return VINF_SUCCESS;
3578}
3579
3580DECLINLINE(uint32_t) e1kGetTxLen(PE1KTXDC pTxdc)
3581{
3582 /**
3583 * Make sure TDT won't change during computation. EMT may modify TDT at
3584 * any moment.
3585 */
3586 uint32_t tdt = pTxdc->tdt;
3587 return (pTxdc->tdh > tdt ? pTxdc->tdlen/sizeof(E1KTXDESC) : 0) + tdt - pTxdc->tdh;
3588}
3589
3590#ifdef IN_RING3
3591
3592# ifdef E1K_TX_DELAY
3593/**
3594 * @callback_method_impl{FNTMTIMERDEV, Transmit Delay Timer handler.}
3595 */
3596static DECLCALLBACK(void) e1kR3TxDelayTimer(PPDMDEVINS pDevIns, TMTIMERHANDLE hTimer, void *pvUser)
3597{
3598 PE1KSTATE pThis = (PE1KSTATE)pvUser;
3599 Assert(PDMDevHlpCritSectIsOwner(pDevIns, &pThis->csTx));
3600 RT_NOREF(hTimer);
3601
3602 E1K_INC_ISTAT_CNT(pThis->uStatTxDelayExp);
3603# ifdef E1K_INT_STATS
3604 uint64_t u64Elapsed = RTTimeNanoTS() - pThis->u64ArmedAt;
3605 if (u64Elapsed > pThis->uStatMaxTxDelay)
3606 pThis->uStatMaxTxDelay = u64Elapsed;
3607# endif
3608 int rc = e1kXmitPending(pDevIns, pThis, false /*fOnWorkerThread*/);
3609 AssertMsg(RT_SUCCESS(rc) || rc == VERR_TRY_AGAIN, ("%Rrc\n", rc));
3610}
3611# endif /* E1K_TX_DELAY */
3612
3613//# ifdef E1K_USE_TX_TIMERS
3614
3615/**
3616 * @callback_method_impl{FNTMTIMERDEV, Transmit Interrupt Delay Timer handler.}
3617 */
3618static DECLCALLBACK(void) e1kR3TxIntDelayTimer(PPDMDEVINS pDevIns, TMTIMERHANDLE hTimer, void *pvUser)
3619{
3620 PE1KSTATE pThis = (PE1KSTATE)pvUser;
3621 Assert(hTimer == pThis->hTIDTimer); RT_NOREF(hTimer);
3622
3623 E1K_INC_ISTAT_CNT(pThis->uStatTID);
3624 /* Cancel absolute delay timer as we have already got attention */
3625# ifndef E1K_NO_TAD
3626 e1kCancelTimer(pDevIns, pThis, pThis->hTADTimer);
3627# endif
3628 e1kRaiseInterrupt(pDevIns, pThis, VERR_IGNORED, ICR_TXDW);
3629}
3630
3631/**
3632 * @callback_method_impl{FNTMTIMERDEV, Transmit Absolute Delay Timer handler.}
3633 */
3634static DECLCALLBACK(void) e1kR3TxAbsDelayTimer(PPDMDEVINS pDevIns, TMTIMERHANDLE hTimer, void *pvUser)
3635{
3636 PE1KSTATE pThis = (PE1KSTATE)pvUser;
3637 Assert(hTimer == pThis->hTADTimer); RT_NOREF(hTimer);
3638
3639 E1K_INC_ISTAT_CNT(pThis->uStatTAD);
3640 /* Cancel interrupt delay timer as we have already got attention */
3641 e1kCancelTimer(pDevIns, pThis, pThis->hTIDTimer);
3642 e1kRaiseInterrupt(pDevIns, pThis, VERR_IGNORED, ICR_TXDW);
3643}
3644
3645//# endif /* E1K_USE_TX_TIMERS */
3646# ifdef E1K_USE_RX_TIMERS
3647
3648/**
3649 * @callback_method_impl{FNTMTIMERDEV, Receive Interrupt Delay Timer handler.}
3650 */
3651static DECLCALLBACK(void) e1kR3RxIntDelayTimer(PPDMDEVINS pDevIns, TMTIMERHANDLE hTimer, void *pvUser)
3652{
3653 PE1KSTATE pThis = (PE1KSTATE)pvUser;
3654 Assert(hTimer == pThis->hRIDTimer); RT_NOREF(hTimer);
3655
3656 E1K_INC_ISTAT_CNT(pThis->uStatRID);
3657 /* Cancel absolute delay timer as we have already got attention */
3658 e1kCancelTimer(pDevIns, pThis, pThis->hRADTimer);
3659 e1kRaiseInterrupt(pDevIns, pThis, VERR_IGNORED, ICR_RXT0);
3660}
3661
3662/**
3663 * @callback_method_impl{FNTMTIMERDEV, Receive Absolute Delay Timer handler.}
3664 */
3665static DECLCALLBACK(void) e1kR3RxAbsDelayTimer(PPDMDEVINS pDevIns, TMTIMERHANDLE hTimer, void *pvUser)
3666{
3667 PE1KSTATE pThis = (PE1KSTATE)pvUser;
3668 Assert(hTimer == pThis->hRADTimer); RT_NOREF(hTimer);
3669
3670 E1K_INC_ISTAT_CNT(pThis->uStatRAD);
3671 /* Cancel interrupt delay timer as we have already got attention */
3672 e1kCancelTimer(pDevIns, pThis, pThis->hRIDTimer);
3673 e1kRaiseInterrupt(pDevIns, pThis, VERR_IGNORED, ICR_RXT0);
3674}
3675
3676# endif /* E1K_USE_RX_TIMERS */
3677
3678/**
3679 * @callback_method_impl{FNTMTIMERDEV, Late Interrupt Timer handler.}
3680 */
3681static DECLCALLBACK(void) e1kR3LateIntTimer(PPDMDEVINS pDevIns, TMTIMERHANDLE hTimer, void *pvUser)
3682{
3683 PE1KSTATE pThis = (PE1KSTATE)pvUser;
3684 Assert(hTimer == pThis->hIntTimer); RT_NOREF(hTimer);
3685 RT_NOREF(hTimer);
3686
3687 STAM_PROFILE_ADV_START(&pThis->StatLateIntTimer, a);
3688 STAM_COUNTER_INC(&pThis->StatLateInts);
3689 E1K_INC_ISTAT_CNT(pThis->uStatIntLate);
3690# if 0
3691 if (pThis->iStatIntLost > -100)
3692 pThis->iStatIntLost--;
3693# endif
3694 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, 0);
3695 STAM_PROFILE_ADV_STOP(&pThis->StatLateIntTimer, a);
3696}
3697
3698/**
3699 * @callback_method_impl{FNTMTIMERDEV, Link Up Timer handler.}
3700 */
3701static DECLCALLBACK(void) e1kR3LinkUpTimer(PPDMDEVINS pDevIns, TMTIMERHANDLE hTimer, void *pvUser)
3702{
3703 PE1KSTATE pThis = (PE1KSTATE)pvUser;
3704 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
3705 Assert(hTimer == pThis->hLUTimer); RT_NOREF(hTimer);
3706
3707 /*
3708 * This can happen if we set the link status to down when the Link up timer was
3709 * already armed (shortly after e1kR3LoadDone() or when the cable was disconnected
3710 * and connect+disconnect the cable very quick. Moreover, 82543GC triggers LSC
3711 * on reset even if the cable is unplugged (see @bugref{8942}).
3712 */
3713 if (e1kIsConnected(pDevIns))
3714 {
3715 /* 82543GC does not have an internal PHY */
3716 if (pThis->eChip == E1K_CHIP_82543GC || (CTRL & CTRL_SLU))
3717 e1kR3LinkUp(pDevIns, pThis, pThisCC);
3718 }
3719# ifdef E1K_LSC_ON_RESET
3720 else if (pThis->eChip == E1K_CHIP_82543GC)
3721 e1kR3LinkDown(pDevIns, pThis, pThisCC);
3722# endif /* E1K_LSC_ON_RESET */
3723}
3724
3725#endif /* IN_RING3 */
3726
3727/**
3728 * Sets up the GSO context according to the TSE new context descriptor.
3729 *
3730 * @param pGso The GSO context to setup.
3731 * @param pCtx The context descriptor.
3732 */
3733DECLINLINE(bool) e1kSetupGsoCtx(PPDMNETWORKGSO pGso, E1KTXCTX const *pCtx)
3734{
3735 pGso->u8Type = PDMNETWORKGSOTYPE_INVALID;
3736
3737 /*
3738 * See if the context descriptor describes something that could be TCP or
3739 * UDP over IPv[46].
3740 */
3741 /* Check the header ordering and spacing: 1. Ethernet, 2. IP, 3. TCP/UDP. */
3742 if (RT_UNLIKELY( pCtx->ip.u8CSS < sizeof(RTNETETHERHDR) ))
3743 {
3744 E1kLog(("e1kSetupGsoCtx: IPCSS=%#x\n", pCtx->ip.u8CSS));
3745 return false;
3746 }
3747 if (RT_UNLIKELY( pCtx->tu.u8CSS < (size_t)pCtx->ip.u8CSS + (pCtx->dw2.fIP ? RTNETIPV4_MIN_LEN : RTNETIPV6_MIN_LEN) ))
3748 {
3749 E1kLog(("e1kSetupGsoCtx: TUCSS=%#x\n", pCtx->tu.u8CSS));
3750 return false;
3751 }
3752 if (RT_UNLIKELY( pCtx->dw2.fTCP
3753 ? pCtx->dw3.u8HDRLEN < (size_t)pCtx->tu.u8CSS + RTNETTCP_MIN_LEN
3754 : pCtx->dw3.u8HDRLEN != (size_t)pCtx->tu.u8CSS + RTNETUDP_MIN_LEN ))
3755 {
3756 E1kLog(("e1kSetupGsoCtx: HDRLEN=%#x TCP=%d\n", pCtx->dw3.u8HDRLEN, pCtx->dw2.fTCP));
3757 return false;
3758 }
3759
3760 /* The end of the TCP/UDP checksum should stop at the end of the packet or at least after the headers. */
3761 if (RT_UNLIKELY( pCtx->tu.u16CSE > 0 && pCtx->tu.u16CSE <= pCtx->dw3.u8HDRLEN ))
3762 {
3763 E1kLog(("e1kSetupGsoCtx: TUCSE=%#x HDRLEN=%#x\n", pCtx->tu.u16CSE, pCtx->dw3.u8HDRLEN));
3764 return false;
3765 }
3766
3767 /* IPv4 checksum offset. */
3768 if (RT_UNLIKELY( pCtx->dw2.fIP && (size_t)pCtx->ip.u8CSO - pCtx->ip.u8CSS != RT_UOFFSETOF(RTNETIPV4, ip_sum) ))
3769 {
3770 E1kLog(("e1kSetupGsoCtx: IPCSO=%#x IPCSS=%#x\n", pCtx->ip.u8CSO, pCtx->ip.u8CSS));
3771 return false;
3772 }
3773
3774 /* TCP/UDP checksum offsets. */
3775 if (RT_UNLIKELY( (size_t)pCtx->tu.u8CSO - pCtx->tu.u8CSS
3776 != ( pCtx->dw2.fTCP
3777 ? RT_UOFFSETOF(RTNETTCP, th_sum)
3778 : RT_UOFFSETOF(RTNETUDP, uh_sum) ) ))
3779 {
3780 E1kLog(("e1kSetupGsoCtx: TUCSO=%#x TUCSS=%#x TCP=%d\n", pCtx->ip.u8CSO, pCtx->ip.u8CSS, pCtx->dw2.fTCP));
3781 return false;
3782 }
3783
3784 /*
3785 * Because of internal networking using a 16-bit size field for GSO context
3786 * plus frame, we have to make sure we don't exceed this.
3787 */
3788 if (RT_UNLIKELY( pCtx->dw3.u8HDRLEN + pCtx->dw2.u20PAYLEN > VBOX_MAX_GSO_SIZE ))
3789 {
3790 E1kLog(("e1kSetupGsoCtx: HDRLEN(=%#x) + PAYLEN(=%#x) = %#x, max is %#x\n",
3791 pCtx->dw3.u8HDRLEN, pCtx->dw2.u20PAYLEN, pCtx->dw3.u8HDRLEN + pCtx->dw2.u20PAYLEN, VBOX_MAX_GSO_SIZE));
3792 return false;
3793 }
3794
3795 /*
3796 * We're good for now - we'll do more checks when seeing the data.
3797 * So, figure the type of offloading and setup the context.
3798 */
3799 if (pCtx->dw2.fIP)
3800 {
3801 if (pCtx->dw2.fTCP)
3802 {
3803 pGso->u8Type = PDMNETWORKGSOTYPE_IPV4_TCP;
3804 pGso->cbHdrsSeg = pCtx->dw3.u8HDRLEN;
3805 }
3806 else
3807 {
3808 pGso->u8Type = PDMNETWORKGSOTYPE_IPV4_UDP;
3809 pGso->cbHdrsSeg = pCtx->tu.u8CSS; /* IP header only */
3810 }
3811 /** @todo Detect IPv4-IPv6 tunneling (need test setup since linux doesn't do
3812 * this yet it seems)... */
3813 }
3814 else
3815 {
3816 pGso->cbHdrsSeg = pCtx->dw3.u8HDRLEN; /** @todo IPv6 UFO */
3817 if (pCtx->dw2.fTCP)
3818 pGso->u8Type = PDMNETWORKGSOTYPE_IPV6_TCP;
3819 else
3820 pGso->u8Type = PDMNETWORKGSOTYPE_IPV6_UDP;
3821 }
3822 pGso->offHdr1 = pCtx->ip.u8CSS;
3823 pGso->offHdr2 = pCtx->tu.u8CSS;
3824 pGso->cbHdrsTotal = pCtx->dw3.u8HDRLEN;
3825 pGso->cbMaxSeg = pCtx->dw3.u16MSS + (pGso->u8Type == PDMNETWORKGSOTYPE_IPV4_UDP ? pGso->offHdr2 : 0);
3826 Assert(PDMNetGsoIsValid(pGso, sizeof(*pGso), pGso->cbMaxSeg * 5));
3827 E1kLog2(("e1kSetupGsoCtx: mss=%#x hdr=%#x hdrseg=%#x hdr1=%#x hdr2=%#x %s\n",
3828 pGso->cbMaxSeg, pGso->cbHdrsTotal, pGso->cbHdrsSeg, pGso->offHdr1, pGso->offHdr2, PDMNetGsoTypeName((PDMNETWORKGSOTYPE)pGso->u8Type) ));
3829 return PDMNetGsoIsValid(pGso, sizeof(*pGso), pGso->cbMaxSeg * 5);
3830}
3831
3832/**
3833 * Checks if we can use GSO processing for the current TSE frame.
3834 *
3835 * @param pThis The device state structure.
3836 * @param pGso The GSO context.
3837 * @param pData The first data descriptor of the frame.
3838 * @param pCtx The TSO context descriptor.
3839 */
3840DECLINLINE(bool) e1kCanDoGso(PE1KSTATE pThis, PCPDMNETWORKGSO pGso, E1KTXDAT const *pData, E1KTXCTX const *pCtx)
3841{
3842 if (!pData->cmd.fTSE)
3843 {
3844 E1kLog2(("e1kCanDoGso: !TSE\n"));
3845 return false;
3846 }
3847 if (pData->cmd.fVLE) /** @todo VLAN tagging. */
3848 {
3849 E1kLog(("e1kCanDoGso: VLE\n"));
3850 return false;
3851 }
3852 if (RT_UNLIKELY(!pThis->fGSOEnabled))
3853 {
3854 E1kLog3(("e1kCanDoGso: GSO disabled via CFGM\n"));
3855 return false;
3856 }
3857
3858 switch ((PDMNETWORKGSOTYPE)pGso->u8Type)
3859 {
3860 case PDMNETWORKGSOTYPE_IPV4_TCP:
3861 case PDMNETWORKGSOTYPE_IPV4_UDP:
3862 if (!pData->dw3.fIXSM)
3863 {
3864 E1kLog(("e1kCanDoGso: !IXSM (IPv4)\n"));
3865 return false;
3866 }
3867 if (!pData->dw3.fTXSM)
3868 {
3869 E1kLog(("e1kCanDoGso: !TXSM (IPv4)\n"));
3870 return false;
3871 }
3872 /** @todo what more check should we perform here? Ethernet frame type? */
3873 E1kLog2(("e1kCanDoGso: OK, IPv4\n"));
3874 return true;
3875
3876 case PDMNETWORKGSOTYPE_IPV6_TCP:
3877 case PDMNETWORKGSOTYPE_IPV6_UDP:
3878 if (pData->dw3.fIXSM && pCtx->ip.u8CSO)
3879 {
3880 E1kLog(("e1kCanDoGso: IXSM (IPv6)\n"));
3881 return false;
3882 }
3883 if (!pData->dw3.fTXSM)
3884 {
3885 E1kLog(("e1kCanDoGso: TXSM (IPv6)\n"));
3886 return false;
3887 }
3888 /** @todo what more check should we perform here? Ethernet frame type? */
3889 E1kLog2(("e1kCanDoGso: OK, IPv4\n"));
3890 return true;
3891
3892 default:
3893 Assert(pGso->u8Type == PDMNETWORKGSOTYPE_INVALID);
3894 E1kLog2(("e1kCanDoGso: e1kSetupGsoCtx failed\n"));
3895 return false;
3896 }
3897}
3898
3899/**
3900 * Frees the current xmit buffer.
3901 *
3902 * @param pThis The device state structure.
3903 */
3904static void e1kXmitFreeBuf(PE1KSTATE pThis, PE1KSTATECC pThisCC)
3905{
3906 PPDMSCATTERGATHER pSg = pThisCC->CTX_SUFF(pTxSg);
3907 if (pSg)
3908 {
3909 pThisCC->CTX_SUFF(pTxSg) = NULL;
3910
3911 if (pSg->pvAllocator != pThis)
3912 {
3913 PPDMINETWORKUP pDrv = pThisCC->CTX_SUFF(pDrv);
3914 if (pDrv)
3915 pDrv->pfnFreeBuf(pDrv, pSg);
3916 }
3917 else
3918 {
3919 /* loopback */
3920 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3921 Assert(pSg->fFlags == (PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3));
3922 pSg->fFlags = 0;
3923 pSg->pvAllocator = NULL;
3924 }
3925 }
3926}
3927
3928#ifndef E1K_WITH_TXD_CACHE
3929/**
3930 * Allocates an xmit buffer.
3931 *
3932 * @returns See PDMINETWORKUP::pfnAllocBuf.
3933 * @param pThis The device state structure.
3934 * @param cbMin The minimum frame size.
3935 * @param fExactSize Whether cbMin is exact or if we have to max it
3936 * out to the max MTU size.
3937 * @param fGso Whether this is a GSO frame or not.
3938 */
3939DECLINLINE(int) e1kXmitAllocBuf(PE1KSTATE pThis, PE1KSTATECC pThisCC, size_t cbMin, bool fExactSize, bool fGso)
3940{
3941 /* Adjust cbMin if necessary. */
3942 if (!fExactSize)
3943 cbMin = RT_MAX(cbMin, E1K_MAX_TX_PKT_SIZE);
3944
3945 /* Deal with existing buffer (descriptor screw up, reset, etc). */
3946 if (RT_UNLIKELY(pThisCC->CTX_SUFF(pTxSg)))
3947 e1kXmitFreeBuf(pThis, pThisCC);
3948 Assert(pThisCC->CTX_SUFF(pTxSg) == NULL);
3949
3950 /*
3951 * Allocate the buffer.
3952 */
3953 PPDMSCATTERGATHER pSg;
3954 if (RT_LIKELY(GET_BITS(RCTL, LBM) != RCTL_LBM_TCVR))
3955 {
3956 PPDMINETWORKUP pDrv = pThisCC->CTX_SUFF(pDrv);
3957 if (RT_UNLIKELY(!pDrv))
3958 return VERR_NET_DOWN;
3959 int rc = pDrv->pfnAllocBuf(pDrv, cbMin, fGso ? &pThis->GsoCtx : NULL, &pSg);
3960 if (RT_FAILURE(rc))
3961 {
3962 /* Suspend TX as we are out of buffers atm */
3963 STATUS |= STATUS_TXOFF;
3964 return rc;
3965 }
3966 }
3967 else
3968 {
3969 /* Create a loopback using the fallback buffer and preallocated SG. */
3970 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3971 pSg = &pThis->uTxFallback.Sg;
3972 pSg->fFlags = PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3;
3973 pSg->cbUsed = 0;
3974 pSg->cbAvailable = 0;
3975 pSg->pvAllocator = pThis;
3976 pSg->pvUser = NULL; /* No GSO here. */
3977 pSg->cSegs = 1;
3978 pSg->aSegs[0].pvSeg = pThis->aTxPacketFallback;
3979 pSg->aSegs[0].cbSeg = sizeof(pThis->aTxPacketFallback);
3980 }
3981
3982 pThisCC->CTX_SUFF(pTxSg) = pSg;
3983 return VINF_SUCCESS;
3984}
3985#else /* E1K_WITH_TXD_CACHE */
3986/**
3987 * Allocates an xmit buffer.
3988 *
3989 * @returns See PDMINETWORKUP::pfnAllocBuf.
3990 * @param pThis The device state structure.
3991 * @param cbMin The minimum frame size.
3992 * @param fExactSize Whether cbMin is exact or if we have to max it
3993 * out to the max MTU size.
3994 * @param fGso Whether this is a GSO frame or not.
3995 */
3996DECLINLINE(int) e1kXmitAllocBuf(PE1KSTATE pThis, PE1KSTATECC pThisCC, bool fGso)
3997{
3998 /* Deal with existing buffer (descriptor screw up, reset, etc). */
3999 if (RT_UNLIKELY(pThisCC->CTX_SUFF(pTxSg)))
4000 e1kXmitFreeBuf(pThis, pThisCC);
4001 Assert(pThisCC->CTX_SUFF(pTxSg) == NULL);
4002
4003 /*
4004 * Allocate the buffer.
4005 */
4006 PPDMSCATTERGATHER pSg;
4007 if (RT_LIKELY(GET_BITS(RCTL, LBM) != RCTL_LBM_TCVR))
4008 {
4009 if (pThis->cbTxAlloc == 0)
4010 {
4011 /* Zero packet, no need for the buffer */
4012 return VINF_SUCCESS;
4013 }
4014 if (fGso && pThis->GsoCtx.u8Type == PDMNETWORKGSOTYPE_INVALID)
4015 {
4016 E1kLog3(("Invalid GSO context, won't allocate this packet, cb=%u %s%s\n",
4017 pThis->cbTxAlloc, pThis->fVTag ? "VLAN " : "", pThis->fGSO ? "GSO " : ""));
4018 /* No valid GSO context is available, ignore this packet. */
4019 pThis->cbTxAlloc = 0;
4020 return VINF_SUCCESS;
4021 }
4022
4023 PPDMINETWORKUP pDrv = pThisCC->CTX_SUFF(pDrv);
4024 if (RT_UNLIKELY(!pDrv))
4025 return VERR_NET_DOWN;
4026 int rc = pDrv->pfnAllocBuf(pDrv, pThis->cbTxAlloc, fGso ? &pThis->GsoCtx : NULL, &pSg);
4027 if (RT_FAILURE(rc))
4028 {
4029 /* Suspend TX as we are out of buffers atm */
4030 STATUS |= STATUS_TXOFF;
4031 return rc;
4032 }
4033 E1kLog3(("%s Allocated buffer for TX packet: cb=%u %s%s\n",
4034 pThis->szPrf, pThis->cbTxAlloc,
4035 pThis->fVTag ? "VLAN " : "",
4036 pThis->fGSO ? "GSO " : ""));
4037 }
4038 else
4039 {
4040 /* Create a loopback using the fallback buffer and preallocated SG. */
4041 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
4042 pSg = &pThis->uTxFallback.Sg;
4043 pSg->fFlags = PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3;
4044 pSg->cbUsed = 0;
4045 pSg->cbAvailable = sizeof(pThis->aTxPacketFallback);
4046 pSg->pvAllocator = pThis;
4047 pSg->pvUser = NULL; /* No GSO here. */
4048 pSg->cSegs = 1;
4049 pSg->aSegs[0].pvSeg = pThis->aTxPacketFallback;
4050 pSg->aSegs[0].cbSeg = sizeof(pThis->aTxPacketFallback);
4051 }
4052 pThis->cbTxAlloc = 0;
4053
4054 pThisCC->CTX_SUFF(pTxSg) = pSg;
4055 return VINF_SUCCESS;
4056}
4057#endif /* E1K_WITH_TXD_CACHE */
4058
4059/**
4060 * Checks if it's a GSO buffer or not.
4061 *
4062 * @returns true / false.
4063 * @param pTxSg The scatter / gather buffer.
4064 */
4065DECLINLINE(bool) e1kXmitIsGsoBuf(PDMSCATTERGATHER const *pTxSg)
4066{
4067#if 0
4068 if (!pTxSg)
4069 E1kLog(("e1kXmitIsGsoBuf: pTxSG is NULL\n"));
4070 if (pTxSg && pTxSg->pvUser)
4071 E1kLog(("e1kXmitIsGsoBuf: pvUser is NULL\n"));
4072#endif
4073 return pTxSg && pTxSg->pvUser /* GSO indicator */;
4074}
4075
4076#ifndef E1K_WITH_TXD_CACHE
4077/**
4078 * Load transmit descriptor from guest memory.
4079 *
4080 * @param pDevIns The device instance.
4081 * @param pDesc Pointer to descriptor union.
4082 * @param addr Physical address in guest context.
4083 * @thread E1000_TX
4084 */
4085DECLINLINE(void) e1kLoadDesc(PPDMDEVINS pDevIns, E1KTXDESC *pDesc, RTGCPHYS addr)
4086{
4087 PDMDevHlpPCIPhysRead(pDevIns, addr, pDesc, sizeof(E1KTXDESC));
4088}
4089#else /* E1K_WITH_TXD_CACHE */
4090/**
4091 * Load transmit descriptors from guest memory.
4092 *
4093 * We need two physical reads in case the tail wrapped around the end of TX
4094 * descriptor ring.
4095 *
4096 * @returns the actual number of descriptors fetched.
4097 * @param pDevIns The device instance.
4098 * @param pThis The device state structure.
4099 * @thread E1000_TX
4100 */
4101DECLINLINE(unsigned) e1kTxDLoadMore(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KTXDC pTxdc)
4102{
4103 Assert(pThis->iTxDCurrent == 0);
4104 /* We've already loaded pThis->nTxDFetched descriptors past TDH. */
4105 unsigned nDescsAvailable = e1kGetTxLen(pTxdc) - pThis->nTxDFetched;
4106 /* The following two lines ensure that pThis->nTxDFetched never overflows. */
4107 AssertCompile(E1K_TXD_CACHE_SIZE < (256 * sizeof(pThis->nTxDFetched)));
4108 unsigned nDescsToFetch = RT_MIN(nDescsAvailable, E1K_TXD_CACHE_SIZE - pThis->nTxDFetched);
4109 unsigned nDescsTotal = pTxdc->tdlen / sizeof(E1KTXDESC);
4110 Assert(nDescsTotal != 0);
4111 if (nDescsTotal == 0)
4112 return 0;
4113 unsigned nFirstNotLoaded = (pTxdc->tdh + pThis->nTxDFetched) % nDescsTotal;
4114 unsigned nDescsInSingleRead = RT_MIN(nDescsToFetch, nDescsTotal - nFirstNotLoaded);
4115 E1kLog3(("%s e1kTxDLoadMore: nDescsAvailable=%u nDescsToFetch=%u nDescsTotal=%u nFirstNotLoaded=0x%x nDescsInSingleRead=%u\n",
4116 pThis->szPrf, nDescsAvailable, nDescsToFetch, nDescsTotal,
4117 nFirstNotLoaded, nDescsInSingleRead));
4118 if (nDescsToFetch == 0)
4119 return 0;
4120 E1KTXDESC* pFirstEmptyDesc = &pThis->aTxDescriptors[pThis->nTxDFetched];
4121 PDMDevHlpPCIPhysRead(pDevIns,
4122 ((uint64_t)TDBAH << 32) + TDBAL + nFirstNotLoaded * sizeof(E1KTXDESC),
4123 pFirstEmptyDesc, nDescsInSingleRead * sizeof(E1KTXDESC));
4124 E1kLog3(("%s Fetched %u TX descriptors at %08x%08x(0x%x), TDLEN=%08x, TDH=%08x, TDT=%08x\n",
4125 pThis->szPrf, nDescsInSingleRead,
4126 TDBAH, TDBAL + pTxdc->tdh * sizeof(E1KTXDESC),
4127 nFirstNotLoaded, pTxdc->tdlen, pTxdc->tdh, pTxdc->tdt));
4128 if (nDescsToFetch > nDescsInSingleRead)
4129 {
4130 PDMDevHlpPCIPhysRead(pDevIns,
4131 ((uint64_t)TDBAH << 32) + TDBAL,
4132 pFirstEmptyDesc + nDescsInSingleRead,
4133 (nDescsToFetch - nDescsInSingleRead) * sizeof(E1KTXDESC));
4134 E1kLog3(("%s Fetched %u TX descriptors at %08x%08x\n",
4135 pThis->szPrf, nDescsToFetch - nDescsInSingleRead,
4136 TDBAH, TDBAL));
4137 }
4138 pThis->nTxDFetched += (uint8_t)nDescsToFetch;
4139 return nDescsToFetch;
4140}
4141
4142/**
4143 * Load transmit descriptors from guest memory only if there are no loaded
4144 * descriptors.
4145 *
4146 * @returns true if there are descriptors in cache.
4147 * @param pDevIns The device instance.
4148 * @param pThis The device state structure.
4149 * @thread E1000_TX
4150 */
4151DECLINLINE(bool) e1kTxDLazyLoad(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KTXDC pTxdc)
4152{
4153 if (pThis->nTxDFetched == 0)
4154 return e1kTxDLoadMore(pDevIns, pThis, pTxdc) != 0;
4155 return true;
4156}
4157#endif /* E1K_WITH_TXD_CACHE */
4158
4159/**
4160 * Write back transmit descriptor to guest memory.
4161 *
4162 * @param pDevIns The device instance.
4163 * @param pThis The device state structure.
4164 * @param pDesc Pointer to descriptor union.
4165 * @param addr Physical address in guest context.
4166 * @thread E1000_TX
4167 */
4168DECLINLINE(void) e1kWriteBackDesc(PPDMDEVINS pDevIns, PE1KSTATE pThis, E1KTXDESC *pDesc, RTGCPHYS addr)
4169{
4170 /* Only the last half of the descriptor has to be written back. */
4171 e1kPrintTDesc(pThis, pDesc, "^^^");
4172 PDMDevHlpPCIPhysWrite(pDevIns, addr, pDesc, sizeof(E1KTXDESC));
4173}
4174
4175/**
4176 * Transmit complete frame.
4177 *
4178 * @remarks We skip the FCS since we're not responsible for sending anything to
4179 * a real ethernet wire.
4180 *
4181 * @param pDevIns The device instance.
4182 * @param pThis The device state structure.
4183 * @param pThisCC The current context instance data.
4184 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4185 * @thread E1000_TX
4186 */
4187static void e1kTransmitFrame(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KSTATECC pThisCC, bool fOnWorkerThread)
4188{
4189 PPDMSCATTERGATHER pSg = pThisCC->CTX_SUFF(pTxSg);
4190 uint32_t cbFrame = pSg ? (uint32_t)pSg->cbUsed : 0;
4191 Assert(!pSg || pSg->cSegs == 1);
4192
4193 if (cbFrame < 14)
4194 {
4195 Log(("%s Ignoring invalid frame (%u bytes)\n", pThis->szPrf, cbFrame));
4196 return;
4197 }
4198 if (cbFrame > 70) /* unqualified guess */
4199 pThis->led.Asserted.s.fWriting = pThis->led.Actual.s.fWriting = 1;
4200
4201#ifdef E1K_INT_STATS
4202 if (cbFrame <= 1514)
4203 E1K_INC_ISTAT_CNT(pThis->uStatTx1514);
4204 else if (cbFrame <= 2962)
4205 E1K_INC_ISTAT_CNT(pThis->uStatTx2962);
4206 else if (cbFrame <= 4410)
4207 E1K_INC_ISTAT_CNT(pThis->uStatTx4410);
4208 else if (cbFrame <= 5858)
4209 E1K_INC_ISTAT_CNT(pThis->uStatTx5858);
4210 else if (cbFrame <= 7306)
4211 E1K_INC_ISTAT_CNT(pThis->uStatTx7306);
4212 else if (cbFrame <= 8754)
4213 E1K_INC_ISTAT_CNT(pThis->uStatTx8754);
4214 else if (cbFrame <= 16384)
4215 E1K_INC_ISTAT_CNT(pThis->uStatTx16384);
4216 else if (cbFrame <= 32768)
4217 E1K_INC_ISTAT_CNT(pThis->uStatTx32768);
4218 else
4219 E1K_INC_ISTAT_CNT(pThis->uStatTxLarge);
4220#endif /* E1K_INT_STATS */
4221
4222 /* Add VLAN tag */
4223 if (cbFrame > 12 && pThis->fVTag && pSg->cbUsed + 4 <= pSg->cbAvailable)
4224 {
4225 E1kLog3(("%s Inserting VLAN tag %08x\n",
4226 pThis->szPrf, RT_BE2H_U16((uint16_t)VET) | (RT_BE2H_U16(pThis->u16VTagTCI) << 16)));
4227 memmove((uint8_t*)pSg->aSegs[0].pvSeg + 16, (uint8_t*)pSg->aSegs[0].pvSeg + 12, cbFrame - 12);
4228 *((uint32_t*)pSg->aSegs[0].pvSeg + 3) = RT_BE2H_U16((uint16_t)VET) | (RT_BE2H_U16(pThis->u16VTagTCI) << 16);
4229 pSg->cbUsed += 4;
4230 cbFrame += 4;
4231 Assert(pSg->cbUsed == cbFrame);
4232 Assert(pSg->cbUsed <= pSg->cbAvailable);
4233 }
4234/* E1kLog2(("%s < < < Outgoing packet. Dump follows: > > >\n"
4235 "%.*Rhxd\n"
4236 "%s < < < < < < < < < < < < < End of dump > > > > > > > > > > > >\n",
4237 pThis->szPrf, cbFrame, pSg->aSegs[0].pvSeg, pThis->szPrf));*/
4238
4239 /* Update the stats */
4240 E1K_INC_CNT32(TPT);
4241 E1K_ADD_CNT64(TOTL, TOTH, cbFrame);
4242 E1K_INC_CNT32(GPTC);
4243 if (pSg && e1kIsBroadcast(pSg->aSegs[0].pvSeg))
4244 E1K_INC_CNT32(BPTC);
4245 else if (pSg && e1kIsMulticast(pSg->aSegs[0].pvSeg))
4246 E1K_INC_CNT32(MPTC);
4247 /* Update octet transmit counter */
4248 E1K_ADD_CNT64(GOTCL, GOTCH, cbFrame);
4249 if (pThisCC->CTX_SUFF(pDrv))
4250 STAM_REL_COUNTER_ADD(&pThis->StatTransmitBytes, cbFrame);
4251 if (cbFrame == 64)
4252 E1K_INC_CNT32(PTC64);
4253 else if (cbFrame < 128)
4254 E1K_INC_CNT32(PTC127);
4255 else if (cbFrame < 256)
4256 E1K_INC_CNT32(PTC255);
4257 else if (cbFrame < 512)
4258 E1K_INC_CNT32(PTC511);
4259 else if (cbFrame < 1024)
4260 E1K_INC_CNT32(PTC1023);
4261 else
4262 E1K_INC_CNT32(PTC1522);
4263
4264 E1K_INC_ISTAT_CNT(pThis->uStatTxFrm);
4265
4266 /*
4267 * Dump and send the packet.
4268 */
4269 int rc = VERR_NET_DOWN;
4270 if (pSg && pSg->pvAllocator != pThis)
4271 {
4272 e1kPacketDump(pDevIns, pThis, (uint8_t const *)pSg->aSegs[0].pvSeg, cbFrame, "--> Outgoing");
4273
4274 pThisCC->CTX_SUFF(pTxSg) = NULL;
4275 PPDMINETWORKUP pDrv = pThisCC->CTX_SUFF(pDrv);
4276 if (pDrv)
4277 {
4278 /* Release critical section to avoid deadlock in CanReceive */
4279 //e1kCsLeave(pThis);
4280 STAM_PROFILE_START(&pThis->CTX_SUFF_Z(StatTransmitSend), a);
4281 rc = pDrv->pfnSendBuf(pDrv, pSg, fOnWorkerThread);
4282 STAM_PROFILE_STOP(&pThis->CTX_SUFF_Z(StatTransmitSend), a);
4283 //e1kR3CsEnterAsserted(pThis);
4284 }
4285 }
4286 else if (pSg)
4287 {
4288 Assert(pSg->aSegs[0].pvSeg == pThis->aTxPacketFallback);
4289 e1kPacketDump(pDevIns, pThis, (uint8_t const *)pSg->aSegs[0].pvSeg, cbFrame, "--> Loopback");
4290
4291 /** @todo do we actually need to check that we're in loopback mode here? */
4292 if (GET_BITS(RCTL, LBM) == RCTL_LBM_TCVR)
4293 {
4294 E1KRXDST status;
4295 RT_ZERO(status);
4296 status.fPIF = true;
4297 e1kHandleRxPacket(pDevIns, pThis, pSg->aSegs[0].pvSeg, cbFrame, status);
4298 rc = VINF_SUCCESS;
4299 }
4300 e1kXmitFreeBuf(pThis, pThisCC);
4301 }
4302 else
4303 rc = VERR_NET_DOWN;
4304 if (RT_FAILURE(rc))
4305 {
4306 E1kLogRel(("E1000: ERROR! pfnSend returned %Rrc\n", rc));
4307 /** @todo handle VERR_NET_DOWN and VERR_NET_NO_BUFFER_SPACE. Signal error ? */
4308 }
4309
4310 pThis->led.Actual.s.fWriting = 0;
4311}
4312
4313/**
4314 * Compute and write internet checksum (e1kCSum16) at the specified offset.
4315 *
4316 * @param pThis The device state structure.
4317 * @param pPkt Pointer to the packet.
4318 * @param u16PktLen Total length of the packet.
4319 * @param cso Offset in packet to write checksum at.
4320 * @param css Offset in packet to start computing
4321 * checksum from.
4322 * @param cse Offset in packet to stop computing
4323 * checksum at.
4324 * @param fUdp Replace 0 checksum with all 1s.
4325 * @thread E1000_TX
4326 */
4327static void e1kInsertChecksum(PE1KSTATE pThis, uint8_t *pPkt, uint16_t u16PktLen, uint8_t cso, uint8_t css, uint16_t cse, bool fUdp = false)
4328{
4329 RT_NOREF1(pThis);
4330
4331 if (css >= u16PktLen)
4332 {
4333 E1kLog2(("%s css(%X) is greater than packet length-1(%X), checksum is not inserted\n",
4334 pThis->szPrf, cso, u16PktLen));
4335 return;
4336 }
4337
4338 if (cso >= u16PktLen - 1)
4339 {
4340 E1kLog2(("%s cso(%X) is greater than packet length-2(%X), checksum is not inserted\n",
4341 pThis->szPrf, cso, u16PktLen));
4342 return;
4343 }
4344
4345 if (cse == 0 || cse >= u16PktLen)
4346 cse = u16PktLen - 1;
4347 else if (cse < css)
4348 {
4349 E1kLog2(("%s css(%X) is greater than cse(%X), checksum is not inserted\n",
4350 pThis->szPrf, css, cse));
4351 return;
4352 }
4353
4354 uint16_t u16ChkSum = e1kCSum16(pPkt + css, cse - css + 1);
4355 if (fUdp && u16ChkSum == 0)
4356 u16ChkSum = ~u16ChkSum; /* 0 means no checksum computed in case of UDP (see @bugref{9883}) */
4357 E1kLog2(("%s Inserting csum: %04X at %02X, old value: %04X\n", pThis->szPrf,
4358 u16ChkSum, cso, *(uint16_t*)(pPkt + cso)));
4359 *(uint16_t*)(pPkt + cso) = u16ChkSum;
4360}
4361
4362/**
4363 * Add a part of descriptor's buffer to transmit frame.
4364 *
4365 * @remarks data.u64BufAddr is used unconditionally for both data
4366 * and legacy descriptors since it is identical to
4367 * legacy.u64BufAddr.
4368 *
4369 * @param pDevIns The device instance.
4370 * @param pThis The device state structure.
4371 * @param pDesc Pointer to the descriptor to transmit.
4372 * @param u16Len Length of buffer to the end of segment.
4373 * @param fSend Force packet sending.
4374 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4375 * @thread E1000_TX
4376 */
4377#ifndef E1K_WITH_TXD_CACHE
4378static void e1kFallbackAddSegment(PPDMDEVINS pDevIns, PE1KSTATE pThis, RTGCPHYS PhysAddr, uint16_t u16Len, bool fSend, bool fOnWorkerThread)
4379{
4380 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
4381 /* TCP header being transmitted */
4382 struct E1kTcpHeader *pTcpHdr = (struct E1kTcpHeader *)(pThis->aTxPacketFallback + pThis->contextTSE.tu.u8CSS);
4383 /* IP header being transmitted */
4384 struct E1kIpHeader *pIpHdr = (struct E1kIpHeader *)(pThis->aTxPacketFallback + pThis->contextTSE.ip.u8CSS);
4385
4386 E1kLog3(("%s e1kFallbackAddSegment: Length=%x, remaining payload=%x, header=%x, send=%RTbool\n",
4387 pThis->szPrf, u16Len, pThis->u32PayRemain, pThis->u16HdrRemain, fSend));
4388 Assert(pThis->u32PayRemain + pThis->u16HdrRemain > 0);
4389
4390 PDMDevHlpPCIPhysRead(pDevIns, PhysAddr, pThis->aTxPacketFallback + pThis->u16TxPktLen, u16Len);
4391 E1kLog3(("%s Dump of the segment:\n"
4392 "%.*Rhxd\n"
4393 "%s --- End of dump ---\n",
4394 pThis->szPrf, u16Len, pThis->aTxPacketFallback + pThis->u16TxPktLen, pThis->szPrf));
4395 pThis->u16TxPktLen += u16Len;
4396 E1kLog3(("%s e1kFallbackAddSegment: pThis->u16TxPktLen=%x\n",
4397 pThis->szPrf, pThis->u16TxPktLen));
4398 if (pThis->u16HdrRemain > 0)
4399 {
4400 /* The header was not complete, check if it is now */
4401 if (u16Len >= pThis->u16HdrRemain)
4402 {
4403 /* The rest is payload */
4404 u16Len -= pThis->u16HdrRemain;
4405 pThis->u16HdrRemain = 0;
4406 /* Save partial checksum and flags */
4407 pThis->u32SavedCsum = pTcpHdr->chksum;
4408 pThis->u16SavedFlags = pTcpHdr->hdrlen_flags;
4409 /* Clear FIN and PSH flags now and set them only in the last segment */
4410 pTcpHdr->hdrlen_flags &= ~htons(E1K_TCP_FIN | E1K_TCP_PSH);
4411 }
4412 else
4413 {
4414 /* Still not */
4415 pThis->u16HdrRemain -= u16Len;
4416 E1kLog3(("%s e1kFallbackAddSegment: Header is still incomplete, 0x%x bytes remain.\n",
4417 pThis->szPrf, pThis->u16HdrRemain));
4418 return;
4419 }
4420 }
4421
4422 pThis->u32PayRemain -= u16Len;
4423
4424 if (fSend)
4425 {
4426 /* Leave ethernet header intact */
4427 /* IP Total Length = payload + headers - ethernet header */
4428 pIpHdr->total_len = htons(pThis->u16TxPktLen - pThis->contextTSE.ip.u8CSS);
4429 E1kLog3(("%s e1kFallbackAddSegment: End of packet, pIpHdr->total_len=%x\n",
4430 pThis->szPrf, ntohs(pIpHdr->total_len)));
4431 /* Update IP Checksum */
4432 pIpHdr->chksum = 0;
4433 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4434 pThis->contextTSE.ip.u8CSO,
4435 pThis->contextTSE.ip.u8CSS,
4436 pThis->contextTSE.ip.u16CSE);
4437
4438 /* Update TCP flags */
4439 /* Restore original FIN and PSH flags for the last segment */
4440 if (pThis->u32PayRemain == 0)
4441 {
4442 pTcpHdr->hdrlen_flags = pThis->u16SavedFlags;
4443 E1K_INC_CNT32(TSCTC);
4444 }
4445 /* Add TCP length to partial pseudo header sum */
4446 uint32_t csum = pThis->u32SavedCsum
4447 + htons(pThis->u16TxPktLen - pThis->contextTSE.tu.u8CSS);
4448 while (csum >> 16)
4449 csum = (csum >> 16) + (csum & 0xFFFF);
4450 pTcpHdr->chksum = csum;
4451 /* Compute final checksum */
4452 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4453 pThis->contextTSE.tu.u8CSO,
4454 pThis->contextTSE.tu.u8CSS,
4455 pThis->contextTSE.tu.u16CSE);
4456
4457 /*
4458 * Transmit it. If we've use the SG already, allocate a new one before
4459 * we copy of the data.
4460 */
4461 PPDMSCATTERGATHER pTxSg = pThisCC->CTX_SUFF(pTxSg);
4462 if (!pTxSg)
4463 {
4464 e1kXmitAllocBuf(pThis, pThisCC, pThis->u16TxPktLen + (pThis->fVTag ? 4 : 0), true /*fExactSize*/, false /*fGso*/);
4465 pTxSg = pThisCC->CTX_SUFF(pTxSg);
4466 }
4467 if (pTxSg)
4468 {
4469 Assert(pThis->u16TxPktLen <= pThisCC->CTX_SUFF(pTxSg)->cbAvailable);
4470 Assert(pTxSg->cSegs == 1);
4471 if (pThis->CCCTX_SUFF(pTxSg)->aSegs[0].pvSeg != pThis->aTxPacketFallback)
4472 memcpy(pTxSg->aSegs[0].pvSeg, pThis->aTxPacketFallback, pThis->u16TxPktLen);
4473 pTxSg->cbUsed = pThis->u16TxPktLen;
4474 pTxSg->aSegs[0].cbSeg = pThis->u16TxPktLen;
4475 }
4476 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
4477
4478 /* Update Sequence Number */
4479 pTcpHdr->seqno = htonl(ntohl(pTcpHdr->seqno) + pThis->u16TxPktLen
4480 - pThis->contextTSE.dw3.u8HDRLEN);
4481 /* Increment IP identification */
4482 pIpHdr->ident = htons(ntohs(pIpHdr->ident) + 1);
4483 }
4484}
4485#else /* E1K_WITH_TXD_CACHE */
4486static int e1kFallbackAddSegment(PPDMDEVINS pDevIns, PE1KSTATE pThis, RTGCPHYS PhysAddr, uint16_t u16Len, bool fSend, bool fOnWorkerThread)
4487{
4488 int rc = VINF_SUCCESS;
4489 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
4490 /* TCP header being transmitted */
4491 struct E1kTcpHeader *pTcpHdr = (struct E1kTcpHeader *)(pThis->aTxPacketFallback + pThis->contextTSE.tu.u8CSS);
4492 /* IP header being transmitted */
4493 struct E1kIpHeader *pIpHdr = (struct E1kIpHeader *)(pThis->aTxPacketFallback + pThis->contextTSE.ip.u8CSS);
4494
4495 E1kLog3(("%s e1kFallbackAddSegment: Length=%x, remaining payload=%x, header=%x, send=%RTbool\n",
4496 pThis->szPrf, u16Len, pThis->u32PayRemain, pThis->u16HdrRemain, fSend));
4497 AssertReturn(pThis->u32PayRemain + pThis->u16HdrRemain > 0, VINF_SUCCESS);
4498
4499 if (pThis->u16TxPktLen + u16Len <= sizeof(pThis->aTxPacketFallback))
4500 PDMDevHlpPCIPhysRead(pDevIns, PhysAddr, pThis->aTxPacketFallback + pThis->u16TxPktLen, u16Len);
4501 else
4502 E1kLog(("%s e1kFallbackAddSegment: writing beyond aTxPacketFallback, u16TxPktLen=%d(0x%x) + u16Len=%d(0x%x) > %d\n",
4503 pThis->szPrf, pThis->u16TxPktLen, pThis->u16TxPktLen, u16Len, u16Len, sizeof(pThis->aTxPacketFallback)));
4504 E1kLog3(("%s Dump of the segment:\n"
4505 "%.*Rhxd\n"
4506 "%s --- End of dump ---\n",
4507 pThis->szPrf, u16Len, pThis->aTxPacketFallback + pThis->u16TxPktLen, pThis->szPrf));
4508 pThis->u16TxPktLen += u16Len;
4509 E1kLog3(("%s e1kFallbackAddSegment: pThis->u16TxPktLen=%x\n",
4510 pThis->szPrf, pThis->u16TxPktLen));
4511 if (pThis->u16HdrRemain > 0)
4512 {
4513 /* The header was not complete, check if it is now */
4514 if (u16Len >= pThis->u16HdrRemain)
4515 {
4516 /* The rest is payload */
4517 u16Len -= pThis->u16HdrRemain;
4518 pThis->u16HdrRemain = 0;
4519 /* Save partial checksum and flags */
4520 pThis->u32SavedCsum = pTcpHdr->chksum;
4521 pThis->u16SavedFlags = pTcpHdr->hdrlen_flags;
4522 /* Clear FIN and PSH flags now and set them only in the last segment */
4523 pTcpHdr->hdrlen_flags &= ~htons(E1K_TCP_FIN | E1K_TCP_PSH);
4524 }
4525 else
4526 {
4527 /* Still not */
4528 pThis->u16HdrRemain -= u16Len;
4529 E1kLog3(("%s e1kFallbackAddSegment: Header is still incomplete, 0x%x bytes remain.\n",
4530 pThis->szPrf, pThis->u16HdrRemain));
4531 return rc;
4532 }
4533 }
4534
4535 if (u16Len > pThis->u32PayRemain)
4536 pThis->u32PayRemain = 0;
4537 else
4538 pThis->u32PayRemain -= u16Len;
4539
4540 if (fSend)
4541 {
4542 /* Leave ethernet header intact */
4543 /* IP Total Length = payload + headers - ethernet header */
4544 pIpHdr->total_len = htons(pThis->u16TxPktLen - pThis->contextTSE.ip.u8CSS);
4545 E1kLog3(("%s e1kFallbackAddSegment: End of packet, pIpHdr->total_len=%x\n",
4546 pThis->szPrf, ntohs(pIpHdr->total_len)));
4547 /* Update IP Checksum */
4548 pIpHdr->chksum = 0;
4549 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4550 pThis->contextTSE.ip.u8CSO,
4551 pThis->contextTSE.ip.u8CSS,
4552 pThis->contextTSE.ip.u16CSE);
4553
4554 /* Update TCP flags */
4555 /* Restore original FIN and PSH flags for the last segment */
4556 if (pThis->u32PayRemain == 0)
4557 {
4558 pTcpHdr->hdrlen_flags = pThis->u16SavedFlags;
4559 E1K_INC_CNT32(TSCTC);
4560 }
4561 /* Add TCP length to partial pseudo header sum */
4562 uint32_t csum = pThis->u32SavedCsum
4563 + htons(pThis->u16TxPktLen - pThis->contextTSE.tu.u8CSS);
4564 while (csum >> 16)
4565 csum = (csum >> 16) + (csum & 0xFFFF);
4566 Assert(csum < 65536);
4567 pTcpHdr->chksum = (uint16_t)csum;
4568 /* Compute final checksum */
4569 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4570 pThis->contextTSE.tu.u8CSO,
4571 pThis->contextTSE.tu.u8CSS,
4572 pThis->contextTSE.tu.u16CSE);
4573
4574 /*
4575 * Transmit it.
4576 */
4577 PPDMSCATTERGATHER pTxSg = pThisCC->CTX_SUFF(pTxSg);
4578 if (pTxSg)
4579 {
4580 /* Make sure the packet fits into the allocated buffer */
4581 size_t cbCopy = RT_MIN(pThis->u16TxPktLen, pThisCC->CTX_SUFF(pTxSg)->cbAvailable);
4582#ifdef DEBUG
4583 if (pThis->u16TxPktLen > pTxSg->cbAvailable)
4584 E1kLog(("%s e1kFallbackAddSegment: truncating packet, u16TxPktLen=%d(0x%x) > cbAvailable=%d(0x%x)\n",
4585 pThis->szPrf, pThis->u16TxPktLen, pThis->u16TxPktLen, pTxSg->cbAvailable, pTxSg->cbAvailable));
4586#endif /* DEBUG */
4587 Assert(pTxSg->cSegs == 1);
4588 if (pTxSg->aSegs[0].pvSeg != pThis->aTxPacketFallback)
4589 memcpy(pTxSg->aSegs[0].pvSeg, pThis->aTxPacketFallback, cbCopy);
4590 pTxSg->cbUsed = cbCopy;
4591 pTxSg->aSegs[0].cbSeg = cbCopy;
4592 }
4593 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
4594
4595 /* Update Sequence Number */
4596 pTcpHdr->seqno = htonl(ntohl(pTcpHdr->seqno) + pThis->u16TxPktLen
4597 - pThis->contextTSE.dw3.u8HDRLEN);
4598 /* Increment IP identification */
4599 pIpHdr->ident = htons(ntohs(pIpHdr->ident) + 1);
4600
4601 /* Allocate new buffer for the next segment. */
4602 if (pThis->u32PayRemain)
4603 {
4604 pThis->cbTxAlloc = RT_MIN(pThis->u32PayRemain,
4605 pThis->contextTSE.dw3.u16MSS)
4606 + pThis->contextTSE.dw3.u8HDRLEN;
4607 /* Do not add VLAN tags to empty packets. */
4608 if (pThis->fVTag && pThis->cbTxAlloc > 0)
4609 pThis->cbTxAlloc += 4;
4610 rc = e1kXmitAllocBuf(pThis, pThisCC, false /* fGSO */);
4611 }
4612 }
4613
4614 return rc;
4615}
4616#endif /* E1K_WITH_TXD_CACHE */
4617
4618#ifndef E1K_WITH_TXD_CACHE
4619/**
4620 * TCP segmentation offloading fallback: Add descriptor's buffer to transmit
4621 * frame.
4622 *
4623 * We construct the frame in the fallback buffer first and the copy it to the SG
4624 * buffer before passing it down to the network driver code.
4625 *
4626 * @returns true if the frame should be transmitted, false if not.
4627 *
4628 * @param pThis The device state structure.
4629 * @param pDesc Pointer to the descriptor to transmit.
4630 * @param cbFragment Length of descriptor's buffer.
4631 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4632 * @thread E1000_TX
4633 */
4634static bool e1kFallbackAddToFrame(PE1KSTATE pThis, E1KTXDESC *pDesc, uint32_t cbFragment, bool fOnWorkerThread)
4635{
4636 PPDMSCATTERGATHER pTxSg = pThisCC->CTX_SUFF(pTxSg);
4637 Assert(e1kGetDescType(pDesc) == E1K_DTYP_DATA);
4638 Assert(pDesc->data.cmd.fTSE);
4639 Assert(!e1kXmitIsGsoBuf(pTxSg));
4640
4641 uint16_t u16MaxPktLen = pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw3.u16MSS;
4642 Assert(u16MaxPktLen != 0);
4643 Assert(u16MaxPktLen < E1K_MAX_TX_PKT_SIZE);
4644
4645 /*
4646 * Carve out segments.
4647 */
4648 do
4649 {
4650 /* Calculate how many bytes we have left in this TCP segment */
4651 uint32_t cb = u16MaxPktLen - pThis->u16TxPktLen;
4652 if (cb > cbFragment)
4653 {
4654 /* This descriptor fits completely into current segment */
4655 cb = cbFragment;
4656 e1kFallbackAddSegment(pDevIns, pThis, pDesc->data.u64BufAddr, cb, pDesc->data.cmd.fEOP /*fSend*/, fOnWorkerThread);
4657 }
4658 else
4659 {
4660 e1kFallbackAddSegment(pDevIns, pThis, pDesc->data.u64BufAddr, cb, true /*fSend*/, fOnWorkerThread);
4661 /*
4662 * Rewind the packet tail pointer to the beginning of payload,
4663 * so we continue writing right beyond the header.
4664 */
4665 pThis->u16TxPktLen = pThis->contextTSE.dw3.u8HDRLEN;
4666 }
4667
4668 pDesc->data.u64BufAddr += cb;
4669 cbFragment -= cb;
4670 } while (cbFragment > 0);
4671
4672 if (pDesc->data.cmd.fEOP)
4673 {
4674 /* End of packet, next segment will contain header. */
4675 if (pThis->u32PayRemain != 0)
4676 E1K_INC_CNT32(TSCTFC);
4677 pThis->u16TxPktLen = 0;
4678 e1kXmitFreeBuf(pThis, PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC));
4679 }
4680
4681 return false;
4682}
4683#else /* E1K_WITH_TXD_CACHE */
4684/**
4685 * TCP segmentation offloading fallback: Add descriptor's buffer to transmit
4686 * frame.
4687 *
4688 * We construct the frame in the fallback buffer first and the copy it to the SG
4689 * buffer before passing it down to the network driver code.
4690 *
4691 * @returns error code
4692 *
4693 * @param pDevIns The device instance.
4694 * @param pThis The device state structure.
4695 * @param pDesc Pointer to the descriptor to transmit.
4696 * @param cbFragment Length of descriptor's buffer.
4697 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4698 * @thread E1000_TX
4699 */
4700static int e1kFallbackAddToFrame(PPDMDEVINS pDevIns, PE1KSTATE pThis, E1KTXDESC *pDesc, bool fOnWorkerThread)
4701{
4702#ifdef VBOX_STRICT
4703 PPDMSCATTERGATHER pTxSg = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC)->CTX_SUFF(pTxSg);
4704 Assert(e1kGetDescType(pDesc) == E1K_DTYP_DATA);
4705 Assert(pDesc->data.cmd.fTSE);
4706 Assert(!e1kXmitIsGsoBuf(pTxSg));
4707#endif
4708
4709 uint16_t u16MaxPktLen = pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw3.u16MSS;
4710 /* We cannot produce empty packets, ignore all TX descriptors (see @bugref{9571}) */
4711 if (u16MaxPktLen == 0)
4712 return VINF_SUCCESS;
4713
4714 /*
4715 * Carve out segments.
4716 */
4717 int rc = VINF_SUCCESS;
4718 do
4719 {
4720 /* Calculate how many bytes we have left in this TCP segment */
4721 uint16_t cb = u16MaxPktLen - pThis->u16TxPktLen;
4722 if (cb > pDesc->data.cmd.u20DTALEN)
4723 {
4724 /* This descriptor fits completely into current segment */
4725 cb = (uint16_t)pDesc->data.cmd.u20DTALEN; /* u20DTALEN at this point is guarantied to fit into 16 bits. */
4726 rc = e1kFallbackAddSegment(pDevIns, pThis, pDesc->data.u64BufAddr, cb, pDesc->data.cmd.fEOP /*fSend*/, fOnWorkerThread);
4727 }
4728 else
4729 {
4730 rc = e1kFallbackAddSegment(pDevIns, pThis, pDesc->data.u64BufAddr, cb, true /*fSend*/, fOnWorkerThread);
4731 /*
4732 * Rewind the packet tail pointer to the beginning of payload,
4733 * so we continue writing right beyond the header.
4734 */
4735 pThis->u16TxPktLen = pThis->contextTSE.dw3.u8HDRLEN;
4736 }
4737
4738 pDesc->data.u64BufAddr += cb;
4739 pDesc->data.cmd.u20DTALEN -= cb;
4740 } while (pDesc->data.cmd.u20DTALEN > 0 && RT_SUCCESS(rc));
4741
4742 if (pDesc->data.cmd.fEOP)
4743 {
4744 /* End of packet, next segment will contain header. */
4745 if (pThis->u32PayRemain != 0)
4746 E1K_INC_CNT32(TSCTFC);
4747 pThis->u16TxPktLen = 0;
4748 e1kXmitFreeBuf(pThis, PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC));
4749 }
4750
4751 return VINF_SUCCESS; /// @todo consider rc;
4752}
4753#endif /* E1K_WITH_TXD_CACHE */
4754
4755
4756/**
4757 * Add descriptor's buffer to transmit frame.
4758 *
4759 * This deals with GSO and normal frames, e1kFallbackAddToFrame deals with the
4760 * TSE frames we cannot handle as GSO.
4761 *
4762 * @returns true on success, false on failure.
4763 *
4764 * @param pDevIns The device instance.
4765 * @param pThisCC The current context instance data.
4766 * @param pThis The device state structure.
4767 * @param PhysAddr The physical address of the descriptor buffer.
4768 * @param cbFragment Length of descriptor's buffer.
4769 * @thread E1000_TX
4770 */
4771static bool e1kAddToFrame(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KSTATECC pThisCC, RTGCPHYS PhysAddr, uint32_t cbFragment)
4772{
4773 PPDMSCATTERGATHER pTxSg = pThisCC->CTX_SUFF(pTxSg);
4774 bool const fGso = e1kXmitIsGsoBuf(pTxSg);
4775 uint32_t const cbNewPkt = cbFragment + pThis->u16TxPktLen;
4776
4777 LogFlow(("%s e1kAddToFrame: ENTER cbFragment=%d u16TxPktLen=%d cbUsed=%d cbAvailable=%d fGSO=%s\n",
4778 pThis->szPrf, cbFragment, pThis->u16TxPktLen, pTxSg->cbUsed, pTxSg->cbAvailable,
4779 fGso ? "true" : "false"));
4780 PCPDMNETWORKGSO pGso = (PCPDMNETWORKGSO)pTxSg->pvUser;
4781 if (pGso)
4782 {
4783 if (RT_UNLIKELY(pGso->cbMaxSeg == 0))
4784 {
4785 E1kLog(("%s zero-sized fragments are not allowed\n", pThis->szPrf));
4786 return false;
4787 }
4788 if (RT_UNLIKELY(pGso->u8Type == PDMNETWORKGSOTYPE_IPV4_UDP))
4789 {
4790 E1kLog(("%s UDP fragmentation is no longer supported\n", pThis->szPrf));
4791 return false;
4792 }
4793 }
4794 if (RT_UNLIKELY( !fGso && cbNewPkt > E1K_MAX_TX_PKT_SIZE ))
4795 {
4796 E1kLog(("%s Transmit packet is too large: %u > %u(max)\n", pThis->szPrf, cbNewPkt, E1K_MAX_TX_PKT_SIZE));
4797 return false;
4798 }
4799 if (RT_UNLIKELY( cbNewPkt > pTxSg->cbAvailable ))
4800 {
4801 E1kLog(("%s Transmit packet is too large: %u > %u(max)\n", pThis->szPrf, cbNewPkt, pTxSg->cbAvailable));
4802 return false;
4803 }
4804
4805 if (RT_LIKELY(pTxSg))
4806 {
4807 Assert(pTxSg->cSegs == 1);
4808 if (pTxSg->cbUsed != pThis->u16TxPktLen)
4809 E1kLog(("%s e1kAddToFrame: pTxSg->cbUsed=%d(0x%x) != u16TxPktLen=%d(0x%x)\n",
4810 pThis->szPrf, pTxSg->cbUsed, pTxSg->cbUsed, pThis->u16TxPktLen, pThis->u16TxPktLen));
4811
4812 PDMDevHlpPCIPhysRead(pDevIns, PhysAddr, (uint8_t *)pTxSg->aSegs[0].pvSeg + pThis->u16TxPktLen, cbFragment);
4813
4814 pTxSg->cbUsed = cbNewPkt;
4815 }
4816 pThis->u16TxPktLen = cbNewPkt;
4817
4818 return true;
4819}
4820
4821
4822/**
4823 * Write the descriptor back to guest memory and notify the guest.
4824 *
4825 * @param pThis The device state structure.
4826 * @param pDesc Pointer to the descriptor have been transmitted.
4827 * @param addr Physical address of the descriptor in guest memory.
4828 * @thread E1000_TX
4829 */
4830static void e1kDescReport(PPDMDEVINS pDevIns, PE1KSTATE pThis, E1KTXDESC *pDesc, RTGCPHYS addr)
4831{
4832 /*
4833 * We fake descriptor write-back bursting. Descriptors are written back as they are
4834 * processed.
4835 */
4836 /* Let's pretend we process descriptors. Write back with DD set. */
4837 /*
4838 * Prior to r71586 we tried to accomodate the case when write-back bursts
4839 * are enabled without actually implementing bursting by writing back all
4840 * descriptors, even the ones that do not have RS set. This caused kernel
4841 * panics with Linux SMP kernels, as the e1000 driver tried to free up skb
4842 * associated with written back descriptor if it happened to be a context
4843 * descriptor since context descriptors do not have skb associated to them.
4844 * Starting from r71586 we write back only the descriptors with RS set,
4845 * which is a little bit different from what the real hardware does in
4846 * case there is a chain of data descritors where some of them have RS set
4847 * and others do not. It is very uncommon scenario imho.
4848 * We need to check RPS as well since some legacy drivers use it instead of
4849 * RS even with newer cards.
4850 */
4851 if (pDesc->legacy.cmd.fRS || pDesc->legacy.cmd.fRPS)
4852 {
4853 pDesc->legacy.dw3.fDD = 1; /* Descriptor Done */
4854 e1kWriteBackDesc(pDevIns, pThis, pDesc, addr);
4855 if (pDesc->legacy.cmd.fEOP)
4856 {
4857//#ifdef E1K_USE_TX_TIMERS
4858 if (pThis->fTidEnabled && pDesc->legacy.cmd.fIDE)
4859 {
4860 E1K_INC_ISTAT_CNT(pThis->uStatTxIDE);
4861 //if (pThis->fIntRaised)
4862 //{
4863 // /* Interrupt is already pending, no need for timers */
4864 // ICR |= ICR_TXDW;
4865 //}
4866 //else {
4867 /* Arm the timer to fire in TIVD usec (discard .024) */
4868 e1kArmTimer(pDevIns, pThis, pThis->hTIDTimer, TIDV);
4869# ifndef E1K_NO_TAD
4870 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
4871 E1kLog2(("%s Checking if TAD timer is running\n",
4872 pThis->szPrf));
4873 if (TADV != 0 && !PDMDevHlpTimerIsActive(pDevIns, pThis->hTADTimer))
4874 e1kArmTimer(pDevIns, pThis, pThis->hTADTimer, TADV);
4875# endif /* E1K_NO_TAD */
4876 }
4877 else
4878 {
4879 if (pThis->fTidEnabled)
4880 {
4881 E1kLog2(("%s No IDE set, cancel TAD timer and raise interrupt\n",
4882 pThis->szPrf));
4883 /* Cancel both timers if armed and fire immediately. */
4884# ifndef E1K_NO_TAD
4885 PDMDevHlpTimerStop(pDevIns, pThis->hTADTimer);
4886# endif
4887 PDMDevHlpTimerStop(pDevIns, pThis->hTIDTimer);
4888 }
4889//#endif /* E1K_USE_TX_TIMERS */
4890 E1K_INC_ISTAT_CNT(pThis->uStatIntTx);
4891 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_TXDW);
4892//#ifdef E1K_USE_TX_TIMERS
4893 }
4894//#endif /* E1K_USE_TX_TIMERS */
4895 }
4896 }
4897 else
4898 {
4899 E1K_INC_ISTAT_CNT(pThis->uStatTxNoRS);
4900 }
4901}
4902
4903#ifndef E1K_WITH_TXD_CACHE
4904
4905/**
4906 * Process Transmit Descriptor.
4907 *
4908 * E1000 supports three types of transmit descriptors:
4909 * - legacy data descriptors of older format (context-less).
4910 * - data the same as legacy but providing new offloading capabilities.
4911 * - context sets up the context for following data descriptors.
4912 *
4913 * @param pDevIns The device instance.
4914 * @param pThis The device state structure.
4915 * @param pThisCC The current context instance data.
4916 * @param pDesc Pointer to descriptor union.
4917 * @param addr Physical address of descriptor in guest memory.
4918 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4919 * @thread E1000_TX
4920 */
4921static int e1kXmitDesc(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KSTATECC pThisCC, E1KTXDESC *pDesc,
4922 RTGCPHYS addr, bool fOnWorkerThread)
4923{
4924 int rc = VINF_SUCCESS;
4925 uint32_t cbVTag = 0;
4926
4927 e1kPrintTDesc(pThis, pDesc, "vvv");
4928
4929//#ifdef E1K_USE_TX_TIMERS
4930 if (pThis->fTidEnabled)
4931 e1kCancelTimer(pDevIns, pThis, pThis->hTIDTimer);
4932//#endif /* E1K_USE_TX_TIMERS */
4933
4934 switch (e1kGetDescType(pDesc))
4935 {
4936 case E1K_DTYP_CONTEXT:
4937 if (pDesc->context.dw2.fTSE)
4938 {
4939 pThis->contextTSE = pDesc->context;
4940 pThis->u32PayRemain = pDesc->context.dw2.u20PAYLEN;
4941 pThis->u16HdrRemain = pDesc->context.dw3.u8HDRLEN;
4942 e1kSetupGsoCtx(&pThis->GsoCtx, &pDesc->context);
4943 STAM_COUNTER_INC(&pThis->StatTxDescCtxTSE);
4944 }
4945 else
4946 {
4947 pThis->contextNormal = pDesc->context;
4948 STAM_COUNTER_INC(&pThis->StatTxDescCtxNormal);
4949 }
4950 E1kLog2(("%s %s context updated: IP CSS=%02X, IP CSO=%02X, IP CSE=%04X"
4951 ", TU CSS=%02X, TU CSO=%02X, TU CSE=%04X\n", pThis->szPrf,
4952 pDesc->context.dw2.fTSE ? "TSE" : "Normal",
4953 pDesc->context.ip.u8CSS,
4954 pDesc->context.ip.u8CSO,
4955 pDesc->context.ip.u16CSE,
4956 pDesc->context.tu.u8CSS,
4957 pDesc->context.tu.u8CSO,
4958 pDesc->context.tu.u16CSE));
4959 E1K_INC_ISTAT_CNT(pThis->uStatDescCtx);
4960 e1kDescReport(pThis, pDesc, addr);
4961 break;
4962
4963 case E1K_DTYP_DATA:
4964 {
4965 if (pDesc->data.cmd.u20DTALEN == 0 || pDesc->data.u64BufAddr == 0)
4966 {
4967 E1kLog2(("% Empty data descriptor, skipped.\n", pThis->szPrf));
4968 /** @todo Same as legacy when !TSE. See below. */
4969 break;
4970 }
4971 STAM_COUNTER_INC(pDesc->data.cmd.fTSE?
4972 &pThis->StatTxDescTSEData:
4973 &pThis->StatTxDescData);
4974 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
4975 E1K_INC_ISTAT_CNT(pThis->uStatDescDat);
4976
4977 /*
4978 * The last descriptor of non-TSE packet must contain VLE flag.
4979 * TSE packets have VLE flag in the first descriptor. The later
4980 * case is taken care of a bit later when cbVTag gets assigned.
4981 *
4982 * 1) pDesc->data.cmd.fEOP && !pDesc->data.cmd.fTSE
4983 */
4984 if (pDesc->data.cmd.fEOP && !pDesc->data.cmd.fTSE)
4985 {
4986 pThis->fVTag = pDesc->data.cmd.fVLE;
4987 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
4988 }
4989 /*
4990 * First fragment: Allocate new buffer and save the IXSM and TXSM
4991 * packet options as these are only valid in the first fragment.
4992 */
4993 if (pThis->u16TxPktLen == 0)
4994 {
4995 pThis->fIPcsum = pDesc->data.dw3.fIXSM;
4996 pThis->fTCPcsum = pDesc->data.dw3.fTXSM;
4997 E1kLog2(("%s Saving checksum flags:%s%s; \n", pThis->szPrf,
4998 pThis->fIPcsum ? " IP" : "",
4999 pThis->fTCPcsum ? " TCP/UDP" : ""));
5000 if (pDesc->data.cmd.fTSE)
5001 {
5002 /* 2) pDesc->data.cmd.fTSE && pThis->u16TxPktLen == 0 */
5003 pThis->fVTag = pDesc->data.cmd.fVLE;
5004 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
5005 cbVTag = pThis->fVTag ? 4 : 0;
5006 }
5007 else if (pDesc->data.cmd.fEOP)
5008 cbVTag = pDesc->data.cmd.fVLE ? 4 : 0;
5009 else
5010 cbVTag = 4;
5011 E1kLog3(("%s About to allocate TX buffer: cbVTag=%u\n", pThis->szPrf, cbVTag));
5012 if (e1kCanDoGso(pThis, &pThis->GsoCtx, &pDesc->data, &pThis->contextTSE))
5013 rc = e1kXmitAllocBuf(pThis, pThisCC, pThis->contextTSE.dw2.u20PAYLEN + pThis->contextTSE.dw3.u8HDRLEN + cbVTag,
5014 true /*fExactSize*/, true /*fGso*/);
5015 else if (pDesc->data.cmd.fTSE)
5016 rc = e1kXmitAllocBuf(pThis, pThisCC, , pThis->contextTSE.dw3.u16MSS + pThis->contextTSE.dw3.u8HDRLEN + cbVTag,
5017 pDesc->data.cmd.fTSE /*fExactSize*/, false /*fGso*/);
5018 else
5019 rc = e1kXmitAllocBuf(pThis, pThisCC, pDesc->data.cmd.u20DTALEN + cbVTag,
5020 pDesc->data.cmd.fEOP /*fExactSize*/, false /*fGso*/);
5021
5022 /**
5023 * @todo: Perhaps it is not that simple for GSO packets! We may
5024 * need to unwind some changes.
5025 */
5026 if (RT_FAILURE(rc))
5027 {
5028 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5029 break;
5030 }
5031 /** @todo Is there any way to indicating errors other than collisions? Like
5032 * VERR_NET_DOWN. */
5033 }
5034
5035 /*
5036 * Add the descriptor data to the frame. If the frame is complete,
5037 * transmit it and reset the u16TxPktLen field.
5038 */
5039 if (e1kXmitIsGsoBuf(pThisCC->CTX_SUFF(pTxSg)))
5040 {
5041 STAM_COUNTER_INC(&pThis->StatTxPathGSO);
5042 bool fRc = e1kAddToFrame(pDevIns, pThis, pThisCC, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
5043 if (pDesc->data.cmd.fEOP)
5044 {
5045 if ( fRc
5046 && pThisCC->CTX_SUFF(pTxSg)
5047 && pThisCC->CTX_SUFF(pTxSg)->cbUsed == (size_t)pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN)
5048 {
5049 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
5050 E1K_INC_CNT32(TSCTC);
5051 }
5052 else
5053 {
5054 if (fRc)
5055 E1kLog(("%s bad GSO/TSE %p or %u < %u\n" , pThis->szPrf,
5056 pThisCC->CTX_SUFF(pTxSg), pThisCC->CTX_SUFF(pTxSg) ? pThisCC->CTX_SUFF(pTxSg)->cbUsed : 0,
5057 pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN));
5058 e1kXmitFreeBuf(pThis);
5059 E1K_INC_CNT32(TSCTFC);
5060 }
5061 pThis->u16TxPktLen = 0;
5062 }
5063 }
5064 else if (!pDesc->data.cmd.fTSE)
5065 {
5066 STAM_COUNTER_INC(&pThis->StatTxPathRegular);
5067 bool fRc = e1kAddToFrame(pDevIns, pThis, pThisCC, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
5068 if (pDesc->data.cmd.fEOP)
5069 {
5070 if (fRc && pThisCC->CTX_SUFF(pTxSg))
5071 {
5072 Assert(pThisCC->CTX_SUFF(pTxSg)->cSegs == 1);
5073 if (pThis->fIPcsum)
5074 e1kInsertChecksum(pThis, (uint8_t *)pThisCC->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
5075 pThis->contextNormal.ip.u8CSO,
5076 pThis->contextNormal.ip.u8CSS,
5077 pThis->contextNormal.ip.u16CSE);
5078 if (pThis->fTCPcsum)
5079 e1kInsertChecksum(pThis, (uint8_t *)pThisCC->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
5080 pThis->contextNormal.tu.u8CSO,
5081 pThis->contextNormal.tu.u8CSS,
5082 pThis->contextNormal.tu.u16CSE,
5083 !pThis->contextNormal.dw2.fTCP);
5084 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
5085 }
5086 else
5087 e1kXmitFreeBuf(pThis);
5088 pThis->u16TxPktLen = 0;
5089 }
5090 }
5091 else
5092 {
5093 STAM_COUNTER_INC(&pThis->StatTxPathFallback);
5094 e1kFallbackAddToFrame(pDevIns, pThis, pDesc, pDesc->data.cmd.u20DTALEN, fOnWorkerThread);
5095 }
5096
5097 e1kDescReport(pThis, pDesc, addr);
5098 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5099 break;
5100 }
5101
5102 case E1K_DTYP_LEGACY:
5103 if (pDesc->legacy.cmd.u16Length == 0 || pDesc->legacy.u64BufAddr == 0)
5104 {
5105 E1kLog(("%s Empty legacy descriptor, skipped.\n", pThis->szPrf));
5106 /** @todo 3.3.3, Length/Buffer Address: RS set -> write DD when processing. */
5107 break;
5108 }
5109 STAM_COUNTER_INC(&pThis->StatTxDescLegacy);
5110 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
5111
5112 /* First fragment: allocate new buffer. */
5113 if (pThis->u16TxPktLen == 0)
5114 {
5115 if (pDesc->legacy.cmd.fEOP)
5116 cbVTag = pDesc->legacy.cmd.fVLE ? 4 : 0;
5117 else
5118 cbVTag = 4;
5119 E1kLog3(("%s About to allocate TX buffer: cbVTag=%u\n", pThis->szPrf, cbVTag));
5120 /** @todo reset status bits? */
5121 rc = e1kXmitAllocBuf(pThis, pThisCC, pDesc->legacy.cmd.u16Length + cbVTag, pDesc->legacy.cmd.fEOP, false /*fGso*/);
5122 if (RT_FAILURE(rc))
5123 {
5124 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5125 break;
5126 }
5127
5128 /** @todo Is there any way to indicating errors other than collisions? Like
5129 * VERR_NET_DOWN. */
5130 }
5131
5132 /* Add fragment to frame. */
5133 if (e1kAddToFrame(pDevIns, pThis, pThisCC, pDesc->data.u64BufAddr, pDesc->legacy.cmd.u16Length))
5134 {
5135 E1K_INC_ISTAT_CNT(pThis->uStatDescLeg);
5136
5137 /* Last fragment: Transmit and reset the packet storage counter. */
5138 if (pDesc->legacy.cmd.fEOP)
5139 {
5140 pThis->fVTag = pDesc->legacy.cmd.fVLE;
5141 pThis->u16VTagTCI = pDesc->legacy.dw3.u16Special;
5142 /** @todo Offload processing goes here. */
5143 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
5144 pThis->u16TxPktLen = 0;
5145 }
5146 }
5147 /* Last fragment + failure: free the buffer and reset the storage counter. */
5148 else if (pDesc->legacy.cmd.fEOP)
5149 {
5150 e1kXmitFreeBuf(pThis);
5151 pThis->u16TxPktLen = 0;
5152 }
5153
5154 e1kDescReport(pThis, pDesc, addr);
5155 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5156 break;
5157
5158 default:
5159 E1kLog(("%s ERROR Unsupported transmit descriptor type: 0x%04x\n",
5160 pThis->szPrf, e1kGetDescType(pDesc)));
5161 break;
5162 }
5163
5164 return rc;
5165}
5166
5167#else /* E1K_WITH_TXD_CACHE */
5168
5169/**
5170 * Process Transmit Descriptor.
5171 *
5172 * E1000 supports three types of transmit descriptors:
5173 * - legacy data descriptors of older format (context-less).
5174 * - data the same as legacy but providing new offloading capabilities.
5175 * - context sets up the context for following data descriptors.
5176 *
5177 * @param pDevIns The device instance.
5178 * @param pThis The device state structure.
5179 * @param pThisCC The current context instance data.
5180 * @param pDesc Pointer to descriptor union.
5181 * @param addr Physical address of descriptor in guest memory.
5182 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
5183 * @param cbPacketSize Size of the packet as previously computed.
5184 * @thread E1000_TX
5185 */
5186static int e1kXmitDesc(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KSTATECC pThisCC, E1KTXDESC *pDesc,
5187 RTGCPHYS addr, bool fOnWorkerThread)
5188{
5189 int rc = VINF_SUCCESS;
5190
5191 e1kPrintTDesc(pThis, pDesc, "vvv");
5192
5193//#ifdef E1K_USE_TX_TIMERS
5194 if (pThis->fTidEnabled)
5195 PDMDevHlpTimerStop(pDevIns, pThis->hTIDTimer);
5196//#endif /* E1K_USE_TX_TIMERS */
5197
5198 switch (e1kGetDescType(pDesc))
5199 {
5200 case E1K_DTYP_CONTEXT:
5201 /* The caller have already updated the context */
5202 E1K_INC_ISTAT_CNT(pThis->uStatDescCtx);
5203 e1kDescReport(pDevIns, pThis, pDesc, addr);
5204 break;
5205
5206 case E1K_DTYP_DATA:
5207 {
5208 STAM_COUNTER_INC(pDesc->data.cmd.fTSE?
5209 &pThis->StatTxDescTSEData:
5210 &pThis->StatTxDescData);
5211 E1K_INC_ISTAT_CNT(pThis->uStatDescDat);
5212 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
5213 if (pDesc->data.cmd.u20DTALEN == 0 || pDesc->data.u64BufAddr == 0)
5214 {
5215 E1kLog2(("%s Empty data descriptor, skipped.\n", pThis->szPrf));
5216 if (pDesc->data.cmd.fEOP)
5217 {
5218 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
5219 pThis->u16TxPktLen = 0;
5220 }
5221 }
5222 else
5223 {
5224 /*
5225 * Add the descriptor data to the frame. If the frame is complete,
5226 * transmit it and reset the u16TxPktLen field.
5227 */
5228 if (e1kXmitIsGsoBuf(pThisCC->CTX_SUFF(pTxSg)))
5229 {
5230 STAM_COUNTER_INC(&pThis->StatTxPathGSO);
5231 bool fRc = e1kAddToFrame(pDevIns, pThis, pThisCC, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
5232 if (pDesc->data.cmd.fEOP)
5233 {
5234 if ( fRc
5235 && pThisCC->CTX_SUFF(pTxSg)
5236 && pThisCC->CTX_SUFF(pTxSg)->cbUsed == (size_t)pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN)
5237 {
5238 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
5239 E1K_INC_CNT32(TSCTC);
5240 }
5241 else
5242 {
5243 if (fRc)
5244 E1kLog(("%s bad GSO/TSE %p or %u < %u\n" , pThis->szPrf,
5245 pThisCC->CTX_SUFF(pTxSg), pThisCC->CTX_SUFF(pTxSg) ? pThisCC->CTX_SUFF(pTxSg)->cbUsed : 0,
5246 pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN));
5247 e1kXmitFreeBuf(pThis, pThisCC);
5248 E1K_INC_CNT32(TSCTFC);
5249 }
5250 pThis->u16TxPktLen = 0;
5251 }
5252 }
5253 else if (!pDesc->data.cmd.fTSE)
5254 {
5255 STAM_COUNTER_INC(&pThis->StatTxPathRegular);
5256 bool fRc = e1kAddToFrame(pDevIns, pThis, pThisCC, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
5257 if (pDesc->data.cmd.fEOP)
5258 {
5259 if (fRc && pThisCC->CTX_SUFF(pTxSg))
5260 {
5261 Assert(pThisCC->CTX_SUFF(pTxSg)->cSegs == 1);
5262 if (pThis->fIPcsum)
5263 e1kInsertChecksum(pThis, (uint8_t *)pThisCC->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
5264 pThis->contextNormal.ip.u8CSO,
5265 pThis->contextNormal.ip.u8CSS,
5266 pThis->contextNormal.ip.u16CSE);
5267 if (pThis->fTCPcsum)
5268 e1kInsertChecksum(pThis, (uint8_t *)pThisCC->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
5269 pThis->contextNormal.tu.u8CSO,
5270 pThis->contextNormal.tu.u8CSS,
5271 pThis->contextNormal.tu.u16CSE,
5272 !pThis->contextNormal.dw2.fTCP);
5273 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
5274 }
5275 else
5276 e1kXmitFreeBuf(pThis, pThisCC);
5277 pThis->u16TxPktLen = 0;
5278 }
5279 }
5280 else
5281 {
5282 STAM_COUNTER_INC(&pThis->StatTxPathFallback);
5283 rc = e1kFallbackAddToFrame(pDevIns, pThis, pDesc, fOnWorkerThread);
5284 }
5285 }
5286 e1kDescReport(pDevIns, pThis, pDesc, addr);
5287 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5288 break;
5289 }
5290
5291 case E1K_DTYP_LEGACY:
5292 STAM_COUNTER_INC(&pThis->StatTxDescLegacy);
5293 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
5294 if (pDesc->legacy.cmd.u16Length == 0 || pDesc->legacy.u64BufAddr == 0)
5295 {
5296 E1kLog(("%s Empty legacy descriptor, skipped.\n", pThis->szPrf));
5297 if (pDesc->data.cmd.fEOP)
5298 {
5299 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
5300 pThis->u16TxPktLen = 0;
5301 }
5302 }
5303 else
5304 {
5305 /* Add fragment to frame. */
5306 if (e1kAddToFrame(pDevIns, pThis, pThisCC, pDesc->data.u64BufAddr, pDesc->legacy.cmd.u16Length))
5307 {
5308 E1K_INC_ISTAT_CNT(pThis->uStatDescLeg);
5309
5310 /* Last fragment: Transmit and reset the packet storage counter. */
5311 if (pDesc->legacy.cmd.fEOP)
5312 {
5313 if (pDesc->legacy.cmd.fIC)
5314 {
5315 e1kInsertChecksum(pThis,
5316 (uint8_t *)pThisCC->CTX_SUFF(pTxSg)->aSegs[0].pvSeg,
5317 pThis->u16TxPktLen,
5318 pDesc->legacy.cmd.u8CSO,
5319 pDesc->legacy.dw3.u8CSS,
5320 0);
5321 }
5322 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
5323 pThis->u16TxPktLen = 0;
5324 }
5325 }
5326 /* Last fragment + failure: free the buffer and reset the storage counter. */
5327 else if (pDesc->legacy.cmd.fEOP)
5328 {
5329 e1kXmitFreeBuf(pThis, pThisCC);
5330 pThis->u16TxPktLen = 0;
5331 }
5332 }
5333 e1kDescReport(pDevIns, pThis, pDesc, addr);
5334 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5335 break;
5336
5337 default:
5338 E1kLog(("%s ERROR Unsupported transmit descriptor type: 0x%04x\n",
5339 pThis->szPrf, e1kGetDescType(pDesc)));
5340 break;
5341 }
5342
5343 return rc;
5344}
5345
5346DECLINLINE(bool) e1kUpdateTxContext(PE1KSTATE pThis, E1KTXDESC *pDesc)
5347{
5348 if (pDesc->context.dw2.fTSE)
5349 {
5350 if (!e1kSetupGsoCtx(&pThis->GsoCtx, &pDesc->context))
5351 {
5352 pThis->contextTSE.dw2.u4DTYP = E1K_DTYP_INVALID;
5353 return false;
5354 }
5355 pThis->contextTSE = pDesc->context;
5356 uint32_t cbMaxSegmentSize = pThis->contextTSE.dw3.u16MSS + pThis->contextTSE.dw3.u8HDRLEN + 4; /*VTAG*/
5357 if (RT_UNLIKELY(cbMaxSegmentSize > E1K_MAX_TX_PKT_SIZE))
5358 {
5359 pThis->contextTSE.dw3.u16MSS = E1K_MAX_TX_PKT_SIZE - pThis->contextTSE.dw3.u8HDRLEN - 4; /*VTAG*/
5360 LogRelMax(10, ("%s: Transmit packet is too large: %u > %u(max). Adjusted MSS to %u.\n",
5361 pThis->szPrf, cbMaxSegmentSize, E1K_MAX_TX_PKT_SIZE, pThis->contextTSE.dw3.u16MSS));
5362 }
5363 pThis->u32PayRemain = pThis->contextTSE.dw2.u20PAYLEN;
5364 pThis->u16HdrRemain = pThis->contextTSE.dw3.u8HDRLEN;
5365 e1kSetupGsoCtx(&pThis->GsoCtx, &pThis->contextTSE);
5366 STAM_COUNTER_INC(&pThis->StatTxDescCtxTSE);
5367 }
5368 else
5369 {
5370 pThis->contextNormal = pDesc->context;
5371 STAM_COUNTER_INC(&pThis->StatTxDescCtxNormal);
5372 }
5373 E1kLog2(("%s %s context updated: IP CSS=%02X, IP CSO=%02X, IP CSE=%04X"
5374 ", TU CSS=%02X, TU CSO=%02X, TU CSE=%04X\n", pThis->szPrf,
5375 pDesc->context.dw2.fTSE ? "TSE" : "Normal",
5376 pDesc->context.ip.u8CSS,
5377 pDesc->context.ip.u8CSO,
5378 pDesc->context.ip.u16CSE,
5379 pDesc->context.tu.u8CSS,
5380 pDesc->context.tu.u8CSO,
5381 pDesc->context.tu.u16CSE));
5382 return true; /* Consider returning false for invalid descriptors */
5383}
5384
5385enum E1kPacketType
5386{
5387 E1K_PACKET_NONE = 0,
5388 E1K_PACKET_LEGACY,
5389 E1K_PACKET_NORMAL,
5390 E1K_PACKET_TSE
5391};
5392
5393static int e1kLocateTxPacket(PE1KSTATE pThis, PE1KTXDC pTxdc)
5394{
5395 LogFlow(("%s e1kLocateTxPacket: ENTER cbTxAlloc=%d\n",
5396 pThis->szPrf, pThis->cbTxAlloc));
5397 /* Check if we have located the packet already. */
5398 if (pThis->cbTxAlloc)
5399 {
5400 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d\n",
5401 pThis->szPrf, pThis->cbTxAlloc));
5402 return true;
5403 }
5404
5405 pThis->fGSO = false;
5406 pThis->fVTag = false;
5407 pThis->fIPcsum = false;
5408 pThis->fTCPcsum = false;
5409 pThis->u16TxPktLen = 0;
5410
5411 enum E1kPacketType packetType = E1K_PACKET_NONE;
5412 enum E1kPacketType expectedPacketType = E1K_PACKET_NONE;
5413 /*
5414 * Valid packets start with 1 or 0 context descriptors, followed by 1 or
5415 * more data descriptors of the same type: legacy, normal or TSE. Note
5416 * that legacy descriptors do not belong to neither normal nor segmentation
5417 * contexts rendering the sequence (context_descriptor, legacy_descriptor)
5418 * invalid, but the context descriptor will still be applied and the legacy
5419 * descriptor will be treated as the beginning of next packet.
5420 */
5421 bool fInvalidPacket = false;
5422 bool fTSE = false;
5423 uint32_t cbPacket = 0;
5424
5425 /* Since we process one packet at a time we will only mark current packet's descriptors as valid */
5426 memset(pThis->afTxDValid, 0, sizeof(pThis->afTxDValid));
5427 for (int i = pThis->iTxDCurrent; i < pThis->nTxDFetched; ++i)
5428 {
5429 E1KTXDESC *pDesc = &pThis->aTxDescriptors[i];
5430
5431 switch (e1kGetDescType(pDesc))
5432 {
5433 case E1K_DTYP_CONTEXT:
5434 /* There can be only one context per packet. Each context descriptor starts a new packet. */
5435 if (packetType != E1K_PACKET_NONE)
5436 {
5437 fInvalidPacket = true;
5438 break;
5439 }
5440 packetType = (pDesc->context.dw2.fTSE) ? E1K_PACKET_TSE : E1K_PACKET_NORMAL;
5441 if (cbPacket == 0)
5442 pThis->afTxDValid[i] = e1kUpdateTxContext(pThis, pDesc);
5443 else
5444 E1kLog(("%s e1kLocateTxPacket: ignoring a context descriptor in the middle of a packet, cbPacket=%d\n",
5445 pThis->szPrf, cbPacket));
5446 continue;
5447 case E1K_DTYP_LEGACY:
5448 if (packetType != E1K_PACKET_NONE && packetType != E1K_PACKET_LEGACY)
5449 {
5450 fInvalidPacket = true;
5451 break;
5452 }
5453 packetType = E1K_PACKET_LEGACY;
5454 /* Skip invalid descriptors. */
5455 if (cbPacket > 0 && (pThis->fGSO || fTSE))
5456 {
5457 E1kLog(("%s e1kLocateTxPacket: ignoring a legacy descriptor in the segmentation context, cbPacket=%d\n",
5458 pThis->szPrf, cbPacket));
5459 continue;
5460 }
5461 pThis->afTxDValid[i] = true; /* Passed all checks, process it */
5462
5463 /* Skip empty descriptors. */
5464 if (!pDesc->legacy.u64BufAddr || !pDesc->legacy.cmd.u16Length)
5465 break;
5466 cbPacket += pDesc->legacy.cmd.u16Length;
5467 pThis->fGSO = false;
5468 break;
5469 case E1K_DTYP_DATA:
5470 expectedPacketType = pDesc->data.cmd.fTSE ? E1K_PACKET_TSE : E1K_PACKET_NORMAL;
5471 if (packetType != E1K_PACKET_NONE && packetType != expectedPacketType)
5472 {
5473 fInvalidPacket = true;
5474 break;
5475 }
5476 /* Skip invalid descriptors. */
5477 if (pDesc->data.cmd.fTSE)
5478 {
5479 if (pThis->contextTSE.dw2.u4DTYP == E1K_DTYP_INVALID)
5480 {
5481 E1kLog(("%s e1kLocateTxPacket: ignoring TSE descriptor in invalid segmentation context, cbPacket=%d\n",
5482 pThis->szPrf, cbPacket));
5483 continue;
5484 }
5485 }
5486 else /* !TSE */
5487 {
5488 if (pThis->contextNormal.dw2.u4DTYP == E1K_DTYP_INVALID)
5489 {
5490 E1kLog(("%s e1kLocateTxPacket: ignoring non-TSE descriptor in invalid normal context, cbPacket=%d\n",
5491 pThis->szPrf, cbPacket));
5492 continue;
5493 }
5494 }
5495 if (cbPacket > 0 && (bool)pDesc->data.cmd.fTSE != fTSE)
5496 {
5497 E1kLog(("%s e1kLocateTxPacket: ignoring %sTSE descriptor in the %ssegmentation context, cbPacket=%d\n",
5498 pThis->szPrf, pDesc->data.cmd.fTSE ? "" : "non-", fTSE ? "" : "non-", cbPacket));
5499 continue;
5500 }
5501 pThis->afTxDValid[i] = true; /* Passed all checks, process it */
5502
5503 /* Skip empty descriptors. */
5504 if (!pDesc->data.u64BufAddr || !pDesc->data.cmd.u20DTALEN)
5505 break;
5506 if (cbPacket == 0)
5507 {
5508 /*
5509 * The first fragment: save IXSM and TXSM options
5510 * as these are only valid in the first fragment.
5511 */
5512 pThis->fIPcsum = pDesc->data.dw3.fIXSM;
5513 pThis->fTCPcsum = pDesc->data.dw3.fTXSM;
5514 fTSE = pDesc->data.cmd.fTSE;
5515 /*
5516 * TSE descriptors have VLE bit properly set in
5517 * the first fragment.
5518 */
5519 if (fTSE)
5520 {
5521 pThis->fVTag = pDesc->data.cmd.fVLE;
5522 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
5523 }
5524 pThis->fGSO = e1kCanDoGso(pThis, &pThis->GsoCtx, &pDesc->data, &pThis->contextTSE);
5525 }
5526 cbPacket += pDesc->data.cmd.u20DTALEN;
5527 break;
5528 default:
5529 AssertMsgFailed(("Impossible descriptor type!"));
5530 continue;
5531 }
5532 if (fInvalidPacket)
5533 {
5534 for (int index = pThis->iTxDCurrent; index < i; ++index)
5535 pThis->afTxDValid[index] = false; /* Make sure all descriptors for this packet are skipped by processing */
5536 LogFlow(("%s e1kLocateTxPacket: marked %d descriptors as invalid\n", pThis->szPrf, i - pThis->iTxDCurrent));
5537 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d cbPacket=%d%s%s\n",
5538 pThis->szPrf, pThis->cbTxAlloc, cbPacket,
5539 pThis->fGSO ? " GSO" : "", fTSE ? " TSE" : ""));
5540 pTxdc->nextPacket = i;
5541 return true;
5542 }
5543 if (pDesc->legacy.cmd.fEOP)
5544 {
5545 /*
5546 * Non-TSE descriptors have VLE bit properly set in
5547 * the last fragment.
5548 */
5549 if (!fTSE)
5550 {
5551 pThis->fVTag = pDesc->data.cmd.fVLE;
5552 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
5553 }
5554 /*
5555 * Compute the required buffer size. If we cannot do GSO but still
5556 * have to do segmentation we allocate the first segment only.
5557 */
5558 pThis->cbTxAlloc = (!fTSE || pThis->fGSO) ?
5559 cbPacket :
5560 RT_MIN(cbPacket, pThis->contextTSE.dw3.u16MSS + pThis->contextTSE.dw3.u8HDRLEN);
5561 /* Do not add VLAN tags to empty packets. */
5562 if (pThis->fVTag && pThis->cbTxAlloc > 0)
5563 pThis->cbTxAlloc += 4;
5564 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d cbPacket=%d%s%s\n",
5565 pThis->szPrf, pThis->cbTxAlloc, cbPacket,
5566 pThis->fGSO ? " GSO" : "", fTSE ? " TSE" : ""));
5567 pTxdc->nextPacket = i + 1;
5568 return true;
5569 }
5570 }
5571
5572 if (cbPacket == 0 && pThis->nTxDFetched - pThis->iTxDCurrent > 0)
5573 {
5574 /* All descriptors were empty, we need to process them as a dummy packet */
5575 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d, zero packet!\n",
5576 pThis->szPrf, pThis->cbTxAlloc));
5577 pTxdc->nextPacket = pThis->nTxDFetched;
5578 return true;
5579 }
5580 LogFlow(("%s e1kLocateTxPacket: RET false cbTxAlloc=%d cbPacket=%d\n",
5581 pThis->szPrf, pThis->cbTxAlloc, cbPacket));
5582 return false;
5583}
5584
5585static int e1kXmitPacket(PPDMDEVINS pDevIns, PE1KSTATE pThis, bool fOnWorkerThread, PE1KTXDC pTxdc)
5586{
5587 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
5588 int rc = VINF_SUCCESS;
5589
5590 LogFlow(("%s e1kXmitPacket: ENTER current=%d fetched=%d\n",
5591 pThis->szPrf, pThis->iTxDCurrent, pThis->nTxDFetched));
5592
5593 while (pThis->iTxDCurrent < pTxdc->nextPacket && pThis->iTxDCurrent < pThis->nTxDFetched)
5594 {
5595 E1KTXDESC *pDesc = &pThis->aTxDescriptors[pThis->iTxDCurrent];
5596 E1kLog3(("%s About to process new TX descriptor at %08x%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
5597 pThis->szPrf, TDBAH, TDBAL + pTxdc->tdh * sizeof(E1KTXDESC), pTxdc->tdlen, pTxdc->tdh, pTxdc->tdt));
5598 if (!pThis->afTxDValid[pThis->iTxDCurrent])
5599 {
5600 e1kPrintTDesc(pThis, pDesc, "vvv");
5601 E1kLog(("%s e1kXmitDesc: skipping bad descriptor ^^^\n", pThis->szPrf));
5602 e1kDescReport(pDevIns, pThis, pDesc, e1kDescAddr(TDBAH, TDBAL, pTxdc->tdh));
5603 rc = VINF_SUCCESS;
5604 }
5605 else
5606 rc = e1kXmitDesc(pDevIns, pThis, pThisCC, pDesc, e1kDescAddr(TDBAH, TDBAL, pTxdc->tdh), fOnWorkerThread);
5607 if (RT_FAILURE(rc))
5608 break;
5609 if (++pTxdc->tdh * sizeof(E1KTXDESC) >= pTxdc->tdlen)
5610 pTxdc->tdh = 0;
5611 TDH = pTxdc->tdh; /* Sync the actual register and TXDC */
5612 uint32_t uLowThreshold = GET_BITS(TXDCTL, LWTHRESH)*8;
5613 if (uLowThreshold != 0 && e1kGetTxLen(pTxdc) <= uLowThreshold)
5614 {
5615 E1kLog2(("%s Low on transmit descriptors, raise ICR.TXD_LOW, len=%x thresh=%x\n",
5616 pThis->szPrf, e1kGetTxLen(pTxdc), GET_BITS(TXDCTL, LWTHRESH)*8));
5617 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_TXD_LOW);
5618 }
5619 ++pThis->iTxDCurrent;
5620 if (e1kGetDescType(pDesc) != E1K_DTYP_CONTEXT && pDesc->legacy.cmd.fEOP)
5621 break;
5622 }
5623
5624 LogFlow(("%s e1kXmitPacket: RET %Rrc current=%d fetched=%d\n",
5625 pThis->szPrf, rc, pThis->iTxDCurrent, pThis->nTxDFetched));
5626 return rc;
5627}
5628
5629#endif /* E1K_WITH_TXD_CACHE */
5630#ifndef E1K_WITH_TXD_CACHE
5631
5632/**
5633 * Transmit pending descriptors.
5634 *
5635 * @returns VBox status code. VERR_TRY_AGAIN is returned if we're busy.
5636 *
5637 * @param pDevIns The device instance.
5638 * @param pThis The E1000 state.
5639 * @param fOnWorkerThread Whether we're on a worker thread or on an EMT.
5640 */
5641static int e1kXmitPending(PPDMDEVINS pDevIns, PE1KSTATE pThis, bool fOnWorkerThread)
5642{
5643 int rc = VINF_SUCCESS;
5644 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
5645
5646 /* Check if transmitter is enabled. */
5647 if (!(TCTL & TCTL_EN))
5648 return VINF_SUCCESS;
5649 /*
5650 * Grab the xmit lock of the driver as well as the E1K device state.
5651 */
5652 rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
5653 if (RT_LIKELY(rc == VINF_SUCCESS))
5654 {
5655 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
5656 if (pDrv)
5657 {
5658 rc = pDrv->pfnBeginXmit(pDrv, fOnWorkerThread);
5659 if (RT_FAILURE(rc))
5660 {
5661 e1kCsTxLeave(pThis);
5662 return rc;
5663 }
5664 }
5665 /*
5666 * Process all pending descriptors.
5667 * Note! Do not process descriptors in locked state
5668 */
5669 while (TDH != TDT && !pThis->fLocked)
5670 {
5671 E1KTXDESC desc;
5672 E1kLog3(("%s About to process new TX descriptor at %08x%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
5673 pThis->szPrf, TDBAH, TDBAL + TDH * sizeof(desc), TDLEN, TDH, TDT));
5674
5675 e1kLoadDesc(pDevIns, &desc, ((uint64_t)TDBAH << 32) + TDBAL + TDH * sizeof(desc));
5676 rc = e1kXmitDesc(pDevIns, pThis, pThisCC, &desc, e1kDescAddr(TDBAH, TDBAL, TDH), fOnWorkerThread);
5677 /* If we failed to transmit descriptor we will try it again later */
5678 if (RT_FAILURE(rc))
5679 break;
5680 if (++TDH * sizeof(desc) >= TDLEN)
5681 TDH = 0;
5682
5683 if (e1kGetTxLen(pThis) <= GET_BITS(TXDCTL, LWTHRESH)*8)
5684 {
5685 E1kLog2(("%s Low on transmit descriptors, raise ICR.TXD_LOW, len=%x thresh=%x\n",
5686 pThis->szPrf, e1kGetTxLen(pThis), GET_BITS(TXDCTL, LWTHRESH)*8));
5687 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_TXD_LOW);
5688 }
5689
5690 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5691 }
5692
5693 /// @todo uncomment: pThis->uStatIntTXQE++;
5694 /// @todo uncomment: e1kRaiseInterrupt(pDevIns, pThis, ICR_TXQE);
5695 /*
5696 * Release the lock.
5697 */
5698 if (pDrv)
5699 pDrv->pfnEndXmit(pDrv);
5700 e1kCsTxLeave(pThis);
5701 }
5702
5703 return rc;
5704}
5705
5706#else /* E1K_WITH_TXD_CACHE */
5707
5708static void e1kDumpTxDCache(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KTXDC pTxdc)
5709{
5710 unsigned i, cDescs = pTxdc->tdlen / sizeof(E1KTXDESC);
5711 uint32_t tdh = pTxdc->tdh;
5712 LogRel(("E1000: -- Transmit Descriptors (%d total) --\n", cDescs));
5713 for (i = 0; i < cDescs; ++i)
5714 {
5715 E1KTXDESC desc;
5716 PDMDevHlpPCIPhysRead(pDevIns , e1kDescAddr(TDBAH, TDBAL, i), &desc, sizeof(desc));
5717 if (i == tdh)
5718 LogRel(("E1000: >>> "));
5719 LogRel(("E1000: %RGp: %R[e1ktxd]\n", e1kDescAddr(TDBAH, TDBAL, i), &desc));
5720 }
5721 LogRel(("E1000: -- Transmit Descriptors in Cache (at %d (TDH %d)/ fetched %d / max %d) --\n",
5722 pThis->iTxDCurrent, pTxdc->tdh, pThis->nTxDFetched, E1K_TXD_CACHE_SIZE));
5723 if (tdh > pThis->iTxDCurrent)
5724 tdh -= pThis->iTxDCurrent;
5725 else
5726 tdh = cDescs + tdh - pThis->iTxDCurrent;
5727 for (i = 0; i < pThis->nTxDFetched; ++i)
5728 {
5729 if (i == pThis->iTxDCurrent)
5730 LogRel(("E1000: >>> "));
5731 if (cDescs)
5732 LogRel(("E1000: %RGp: %R[e1ktxd]\n", e1kDescAddr(TDBAH, TDBAL, tdh++ % cDescs), &pThis->aTxDescriptors[i]));
5733 else
5734 LogRel(("E1000: <lost>: %R[e1ktxd]\n", &pThis->aTxDescriptors[i]));
5735 }
5736}
5737
5738/**
5739 * Transmit pending descriptors.
5740 *
5741 * @returns VBox status code. VERR_TRY_AGAIN is returned if we're busy.
5742 *
5743 * @param pDevIns The device instance.
5744 * @param pThis The E1000 state.
5745 * @param fOnWorkerThread Whether we're on a worker thread or on an EMT.
5746 */
5747static int e1kXmitPending(PPDMDEVINS pDevIns, PE1KSTATE pThis, bool fOnWorkerThread)
5748{
5749 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
5750 int rc = VINF_SUCCESS;
5751
5752 /* Check if transmitter is enabled. */
5753 if (!(TCTL & TCTL_EN))
5754 return VINF_SUCCESS;
5755 /*
5756 * Grab the xmit lock of the driver as well as the E1K device state.
5757 */
5758 PPDMINETWORKUP pDrv = pThisCC->CTX_SUFF(pDrv);
5759 if (pDrv)
5760 {
5761 rc = pDrv->pfnBeginXmit(pDrv, fOnWorkerThread);
5762 if (RT_FAILURE(rc))
5763 return rc;
5764 }
5765
5766 /*
5767 * Process all pending descriptors.
5768 * Note! Do not process descriptors in locked state
5769 */
5770 rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
5771 if (RT_LIKELY(rc == VINF_SUCCESS && (TCTL & TCTL_EN)))
5772 {
5773 E1KTXDC txdc;
5774 bool fTxContextValid = e1kUpdateTxDContext(pDevIns, pThis, &txdc);
5775 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
5776 /*
5777 * fIncomplete is set whenever we try to fetch additional descriptors
5778 * for an incomplete packet. If fail to locate a complete packet on
5779 * the next iteration we need to reset the cache or we risk to get
5780 * stuck in this loop forever.
5781 */
5782 bool fIncomplete = false;
5783 while (fTxContextValid && !pThis->fLocked && e1kTxDLazyLoad(pDevIns, pThis, &txdc))
5784 {
5785 while (e1kLocateTxPacket(pThis, &txdc))
5786 {
5787 Log4(("%s e1kXmitPending: Located packet at %d. Next packet at %d\n",
5788 pThis->szPrf, pThis->iTxDCurrent, txdc.nextPacket));
5789 fIncomplete = false;
5790 /* Found a complete packet, allocate it. */
5791 rc = e1kXmitAllocBuf(pThis, pThisCC, pThis->fGSO);
5792 /* If we're out of bandwidth we'll come back later. */
5793 if (RT_FAILURE(rc))
5794 goto out;
5795 /* Copy the packet to allocated buffer and send it. */
5796 rc = e1kXmitPacket(pDevIns, pThis, fOnWorkerThread, &txdc);
5797 /* If we're out of bandwidth we'll come back later. */
5798 if (RT_FAILURE(rc))
5799 goto out;
5800 }
5801 uint8_t u8Remain = pThis->nTxDFetched - pThis->iTxDCurrent;
5802 if (RT_UNLIKELY(fIncomplete))
5803 {
5804 static bool fTxDCacheDumped = false;
5805 /*
5806 * The descriptor cache is full, but we were unable to find
5807 * a complete packet in it. Drop the cache and hope that
5808 * the guest driver can recover from network card error.
5809 */
5810 LogRel(("%s: No complete packets in%s TxD cache! "
5811 "Fetched=%d, current=%d, TX len=%d.\n",
5812 pThis->szPrf,
5813 u8Remain == E1K_TXD_CACHE_SIZE ? " full" : "",
5814 pThis->nTxDFetched, pThis->iTxDCurrent,
5815 e1kGetTxLen(&txdc)));
5816 if (!fTxDCacheDumped)
5817 {
5818 fTxDCacheDumped = true;
5819 e1kDumpTxDCache(pDevIns, pThis, &txdc);
5820 }
5821 pThis->iTxDCurrent = pThis->nTxDFetched = 0;
5822 /*
5823 * Returning an error at this point means Guru in R0
5824 * (see @bugref{6428}).
5825 */
5826# ifdef IN_RING3
5827 rc = VERR_NET_INCOMPLETE_TX_PACKET;
5828# else /* !IN_RING3 */
5829 rc = VINF_IOM_R3_MMIO_WRITE;
5830# endif /* !IN_RING3 */
5831 goto out;
5832 }
5833 if (u8Remain > 0)
5834 {
5835 Log4(("%s Incomplete packet at %d. Already fetched %d, "
5836 "%d more are available\n",
5837 pThis->szPrf, pThis->iTxDCurrent, u8Remain,
5838 e1kGetTxLen(&txdc) - u8Remain));
5839
5840 /*
5841 * A packet was partially fetched. Move incomplete packet to
5842 * the beginning of cache buffer, then load more descriptors.
5843 */
5844 memmove(pThis->aTxDescriptors,
5845 &pThis->aTxDescriptors[pThis->iTxDCurrent],
5846 u8Remain * sizeof(E1KTXDESC));
5847 pThis->iTxDCurrent = 0;
5848 pThis->nTxDFetched = u8Remain;
5849 e1kTxDLoadMore(pDevIns, pThis, &txdc);
5850 fIncomplete = true;
5851 }
5852 else
5853 pThis->nTxDFetched = 0;
5854 pThis->iTxDCurrent = 0;
5855 }
5856 if (!pThis->fLocked && GET_BITS(TXDCTL, LWTHRESH) == 0)
5857 {
5858 E1kLog2(("%s Out of transmit descriptors, raise ICR.TXD_LOW\n",
5859 pThis->szPrf));
5860 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_TXD_LOW);
5861 }
5862out:
5863 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5864
5865 /// @todo uncomment: pThis->uStatIntTXQE++;
5866 /// @todo uncomment: e1kRaiseInterrupt(pDevIns, pThis, ICR_TXQE);
5867
5868 e1kCsTxLeave(pThis);
5869 }
5870
5871
5872 /*
5873 * Release the lock.
5874 */
5875 if (pDrv)
5876 pDrv->pfnEndXmit(pDrv);
5877 return rc;
5878}
5879
5880#endif /* E1K_WITH_TXD_CACHE */
5881#ifdef IN_RING3
5882
5883/**
5884 * @interface_method_impl{PDMINETWORKDOWN,pfnXmitPending}
5885 */
5886static DECLCALLBACK(void) e1kR3NetworkDown_XmitPending(PPDMINETWORKDOWN pInterface)
5887{
5888 PE1KSTATECC pThisCC = RT_FROM_MEMBER(pInterface, E1KSTATECC, INetworkDown);
5889 PE1KSTATE pThis = pThisCC->pShared;
5890 /* Resume suspended transmission */
5891 STATUS &= ~STATUS_TXOFF;
5892 e1kXmitPending(pThisCC->pDevInsR3, pThis, true /*fOnWorkerThread*/);
5893}
5894
5895/**
5896 * @callback_method_impl{FNPDMTASKDEV,
5897 * Executes e1kXmitPending at the behest of ring-0/raw-mode.}
5898 * @note Not executed on EMT.
5899 */
5900static DECLCALLBACK(void) e1kR3TxTaskCallback(PPDMDEVINS pDevIns, void *pvUser)
5901{
5902 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
5903 E1kLog2(("%s e1kR3TxTaskCallback:\n", pThis->szPrf));
5904
5905 int rc = e1kXmitPending(pDevIns, pThis, false /*fOnWorkerThread*/);
5906 AssertMsg(RT_SUCCESS(rc) || rc == VERR_TRY_AGAIN || rc == VERR_NET_DOWN, ("%Rrc\n", rc));
5907
5908 RT_NOREF(rc, pvUser);
5909}
5910
5911#endif /* IN_RING3 */
5912
5913/**
5914 * Write handler for Transmit Descriptor Tail register.
5915 *
5916 * @param pThis The device state structure.
5917 * @param offset Register offset in memory-mapped frame.
5918 * @param index Register index in register array.
5919 * @param value The value to store.
5920 * @param mask Used to implement partial writes (8 and 16-bit).
5921 * @thread EMT
5922 */
5923static int e1kRegWriteTDT(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5924{
5925 int rc = e1kRegWriteDefault(pDevIns, pThis, offset, index, value);
5926
5927 /* All descriptors starting with head and not including tail belong to us. */
5928 /* Process them. */
5929 E1kLog2(("%s e1kRegWriteTDT: TDBAL=%08x, TDBAH=%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
5930 pThis->szPrf, TDBAL, TDBAH, TDLEN, TDH, TDT));
5931
5932 /* Compose a temporary TX context, breaking TX CS rule, for debugging purposes. */
5933 /* If we decide to transmit, the TX critical section will be entered later in e1kXmitPending(). */
5934 E1KTXDC txdc;
5935 txdc.tdlen = TDLEN;
5936 txdc.tdh = TDH;
5937 txdc.tdt = TDT;
5938 /* Ignore TDT writes when the link is down. */
5939 if (txdc.tdh != txdc.tdt && (STATUS & STATUS_LU))
5940 {
5941 Log5(("E1000: TDT write: TDH=%08x, TDT=%08x, %d descriptors to process\n", txdc.tdh, txdc.tdt, e1kGetTxLen(&txdc)));
5942 E1kLog(("%s e1kRegWriteTDT: %d descriptors to process\n",
5943 pThis->szPrf, e1kGetTxLen(&txdc)));
5944
5945 /* Transmit pending packets if possible, defer it if we cannot do it
5946 in the current context. */
5947#ifdef E1K_TX_DELAY
5948 rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
5949 if (RT_LIKELY(rc == VINF_SUCCESS))
5950 {
5951 if (!PDMDevInsTimerIsActive(pDevIns, pThis->hTXDTimer))
5952 {
5953# ifdef E1K_INT_STATS
5954 pThis->u64ArmedAt = RTTimeNanoTS();
5955# endif
5956 e1kArmTimer(pDevIns, pThis, pThis->hTXDTimer, E1K_TX_DELAY);
5957 }
5958 E1K_INC_ISTAT_CNT(pThis->uStatTxDelayed);
5959 e1kCsTxLeave(pThis);
5960 return rc;
5961 }
5962 /* We failed to enter the TX critical section -- transmit as usual. */
5963#endif /* E1K_TX_DELAY */
5964#ifndef IN_RING3
5965 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
5966 if (!pThisCC->CTX_SUFF(pDrv))
5967 {
5968 PDMDevHlpTaskTrigger(pDevIns, pThis->hTxTask);
5969 rc = VINF_SUCCESS;
5970 }
5971 else
5972#endif
5973 {
5974 rc = e1kXmitPending(pDevIns, pThis, false /*fOnWorkerThread*/);
5975 if ( rc == VERR_TRY_AGAIN
5976 || rc == VERR_NET_DOWN)
5977 rc = VINF_SUCCESS;
5978#ifndef IN_RING3
5979 else if (rc == VERR_SEM_BUSY)
5980 rc = VINF_IOM_R3_MMIO_WRITE;
5981#endif
5982 AssertRC(rc);
5983 }
5984 }
5985
5986 return rc;
5987}
5988
5989/**
5990 * Write handler for Multicast Table Array registers.
5991 *
5992 * @param pThis The device state structure.
5993 * @param offset Register offset in memory-mapped frame.
5994 * @param index Register index in register array.
5995 * @param value The value to store.
5996 * @thread EMT
5997 */
5998static int e1kRegWriteMTA(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5999{
6000 RT_NOREF_PV(pDevIns);
6001 AssertReturn(offset - g_aE1kRegMap[index].offset < sizeof(pThis->auMTA), VERR_DEV_IO_ERROR);
6002 pThis->auMTA[(offset - g_aE1kRegMap[index].offset) / sizeof(pThis->auMTA[0])] = value;
6003
6004 return VINF_SUCCESS;
6005}
6006
6007/**
6008 * Read handler for Multicast Table Array registers.
6009 *
6010 * @returns VBox status code.
6011 *
6012 * @param pThis The device state structure.
6013 * @param offset Register offset in memory-mapped frame.
6014 * @param index Register index in register array.
6015 * @thread EMT
6016 */
6017static int e1kRegReadMTA(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
6018{
6019 RT_NOREF_PV(pDevIns);
6020 AssertReturn(offset - g_aE1kRegMap[index].offset < sizeof(pThis->auMTA), VERR_DEV_IO_ERROR);
6021 *pu32Value = pThis->auMTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auMTA[0])];
6022
6023 return VINF_SUCCESS;
6024}
6025
6026/**
6027 * Write handler for Receive Address registers.
6028 *
6029 * @param pThis The device state structure.
6030 * @param offset Register offset in memory-mapped frame.
6031 * @param index Register index in register array.
6032 * @param value The value to store.
6033 * @thread EMT
6034 */
6035static int e1kRegWriteRA(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
6036{
6037 RT_NOREF_PV(pDevIns);
6038 AssertReturn(offset - g_aE1kRegMap[index].offset < sizeof(pThis->aRecAddr.au32), VERR_DEV_IO_ERROR);
6039 pThis->aRecAddr.au32[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->aRecAddr.au32[0])] = value;
6040
6041 return VINF_SUCCESS;
6042}
6043
6044/**
6045 * Read handler for Receive Address registers.
6046 *
6047 * @returns VBox status code.
6048 *
6049 * @param pThis The device state structure.
6050 * @param offset Register offset in memory-mapped frame.
6051 * @param index Register index in register array.
6052 * @thread EMT
6053 */
6054static int e1kRegReadRA(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
6055{
6056 RT_NOREF_PV(pDevIns);
6057 AssertReturn(offset - g_aE1kRegMap[index].offset< sizeof(pThis->aRecAddr.au32), VERR_DEV_IO_ERROR);
6058 *pu32Value = pThis->aRecAddr.au32[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->aRecAddr.au32[0])];
6059
6060 return VINF_SUCCESS;
6061}
6062
6063/**
6064 * Write handler for VLAN Filter Table Array registers.
6065 *
6066 * @param pThis The device state structure.
6067 * @param offset Register offset in memory-mapped frame.
6068 * @param index Register index in register array.
6069 * @param value The value to store.
6070 * @thread EMT
6071 */
6072static int e1kRegWriteVFTA(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
6073{
6074 RT_NOREF_PV(pDevIns);
6075 AssertReturn(offset - g_aE1kRegMap[index].offset < sizeof(pThis->auVFTA), VINF_SUCCESS);
6076 pThis->auVFTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auVFTA[0])] = value;
6077
6078 return VINF_SUCCESS;
6079}
6080
6081/**
6082 * Read handler for VLAN Filter Table Array registers.
6083 *
6084 * @returns VBox status code.
6085 *
6086 * @param pThis The device state structure.
6087 * @param offset Register offset in memory-mapped frame.
6088 * @param index Register index in register array.
6089 * @thread EMT
6090 */
6091static int e1kRegReadVFTA(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
6092{
6093 RT_NOREF_PV(pDevIns);
6094 AssertReturn(offset - g_aE1kRegMap[index].offset< sizeof(pThis->auVFTA), VERR_DEV_IO_ERROR);
6095 *pu32Value = pThis->auVFTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auVFTA[0])];
6096
6097 return VINF_SUCCESS;
6098}
6099
6100/**
6101 * Read handler for unimplemented registers.
6102 *
6103 * Merely reports reads from unimplemented registers.
6104 *
6105 * @returns VBox status code.
6106 *
6107 * @param pThis The device state structure.
6108 * @param offset Register offset in memory-mapped frame.
6109 * @param index Register index in register array.
6110 * @thread EMT
6111 */
6112static int e1kRegReadUnimplemented(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
6113{
6114 RT_NOREF(pDevIns, pThis, offset, index);
6115 E1kLog(("%s At %08X read (00000000) attempt from unimplemented register %s (%s)\n",
6116 pThis->szPrf, offset, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
6117 *pu32Value = 0;
6118
6119 return VINF_SUCCESS;
6120}
6121
6122/**
6123 * Default register read handler with automatic clear operation.
6124 *
6125 * Retrieves the value of register from register array in device state structure.
6126 * Then resets all bits.
6127 *
6128 * @remarks The 'mask' parameter is simply ignored as masking and shifting is
6129 * done in the caller.
6130 *
6131 * @returns VBox status code.
6132 *
6133 * @param pThis The device state structure.
6134 * @param offset Register offset in memory-mapped frame.
6135 * @param index Register index in register array.
6136 * @thread EMT
6137 */
6138static int e1kRegReadAutoClear(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
6139{
6140 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
6141 int rc = e1kRegReadDefault(pDevIns, pThis, offset, index, pu32Value);
6142 pThis->auRegs[index] = 0;
6143
6144 return rc;
6145}
6146
6147/**
6148 * Default register read handler.
6149 *
6150 * Retrieves the value of register from register array in device state structure.
6151 * Bits corresponding to 0s in 'readable' mask will always read as 0s.
6152 *
6153 * @remarks The 'mask' parameter is simply ignored as masking and shifting is
6154 * done in the caller.
6155 *
6156 * @returns VBox status code.
6157 *
6158 * @param pThis The device state structure.
6159 * @param offset Register offset in memory-mapped frame.
6160 * @param index Register index in register array.
6161 * @thread EMT
6162 */
6163static int e1kRegReadDefault(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
6164{
6165 RT_NOREF_PV(pDevIns); RT_NOREF_PV(offset);
6166
6167 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
6168 *pu32Value = pThis->auRegs[index] & g_aE1kRegMap[index].readable;
6169
6170 return VINF_SUCCESS;
6171}
6172
6173/**
6174 * Write handler for unimplemented registers.
6175 *
6176 * Merely reports writes to unimplemented registers.
6177 *
6178 * @param pThis The device state structure.
6179 * @param offset Register offset in memory-mapped frame.
6180 * @param index Register index in register array.
6181 * @param value The value to store.
6182 * @thread EMT
6183 */
6184
6185 static int e1kRegWriteUnimplemented(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
6186{
6187 RT_NOREF_PV(pDevIns); RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index); RT_NOREF_PV(value);
6188
6189 E1kLog(("%s At %08X write attempt (%08X) to unimplemented register %s (%s)\n",
6190 pThis->szPrf, offset, value, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
6191
6192 return VINF_SUCCESS;
6193}
6194
6195/**
6196 * Default register write handler.
6197 *
6198 * Stores the value to the register array in device state structure. Only bits
6199 * corresponding to 1s both in 'writable' and 'mask' will be stored.
6200 *
6201 * @returns VBox status code.
6202 *
6203 * @param pThis The device state structure.
6204 * @param offset Register offset in memory-mapped frame.
6205 * @param index Register index in register array.
6206 * @param value The value to store.
6207 * @param mask Used to implement partial writes (8 and 16-bit).
6208 * @thread EMT
6209 */
6210
6211static int e1kRegWriteDefault(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
6212{
6213 RT_NOREF(pDevIns, offset);
6214
6215 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
6216 pThis->auRegs[index] = (value & g_aE1kRegMap[index].writable)
6217 | (pThis->auRegs[index] & ~g_aE1kRegMap[index].writable);
6218
6219 return VINF_SUCCESS;
6220}
6221
6222/**
6223 * Search register table for matching register.
6224 *
6225 * @returns Index in the register table or -1 if not found.
6226 *
6227 * @param offReg Register offset in memory-mapped region.
6228 * @thread EMT
6229 */
6230static int e1kRegLookup(uint32_t offReg)
6231{
6232
6233#if 0
6234 int index;
6235
6236 for (index = 0; index < E1K_NUM_OF_REGS; index++)
6237 {
6238 if (g_aE1kRegMap[index].offset <= offReg && offReg < g_aE1kRegMap[index].offset + g_aE1kRegMap[index].size)
6239 {
6240 return index;
6241 }
6242 }
6243#else
6244 int iStart = 0;
6245 int iEnd = E1K_NUM_OF_BINARY_SEARCHABLE;
6246 for (;;)
6247 {
6248 int i = (iEnd - iStart) / 2 + iStart;
6249 uint32_t offCur = g_aE1kRegMap[i].offset;
6250 if (offReg < offCur)
6251 {
6252 if (i == iStart)
6253 break;
6254 iEnd = i;
6255 }
6256 else if (offReg >= offCur + g_aE1kRegMap[i].size)
6257 {
6258 i++;
6259 if (i == iEnd)
6260 break;
6261 iStart = i;
6262 }
6263 else
6264 return i;
6265 Assert(iEnd > iStart);
6266 }
6267
6268 for (unsigned i = E1K_NUM_OF_BINARY_SEARCHABLE; i < RT_ELEMENTS(g_aE1kRegMap); i++)
6269 if (offReg - g_aE1kRegMap[i].offset < g_aE1kRegMap[i].size)
6270 return (int)i;
6271
6272# ifdef VBOX_STRICT
6273 for (unsigned i = 0; i < RT_ELEMENTS(g_aE1kRegMap); i++)
6274 Assert(offReg - g_aE1kRegMap[i].offset >= g_aE1kRegMap[i].size);
6275# endif
6276
6277#endif
6278
6279 return -1;
6280}
6281
6282/**
6283 * Handle unaligned register read operation.
6284 *
6285 * Looks up and calls appropriate handler.
6286 *
6287 * @returns VBox status code.
6288 *
6289 * @param pDevIns The device instance.
6290 * @param pThis The device state structure.
6291 * @param offReg Register offset in memory-mapped frame.
6292 * @param pv Where to store the result.
6293 * @param cb Number of bytes to read.
6294 * @thread EMT
6295 * @remarks IOM takes care of unaligned and small reads via MMIO. For I/O port
6296 * accesses we have to take care of that ourselves.
6297 */
6298static int e1kRegReadUnaligned(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offReg, void *pv, uint32_t cb)
6299{
6300 uint32_t u32 = 0;
6301 uint32_t shift;
6302 int rc = VINF_SUCCESS;
6303 int index = e1kRegLookup(offReg);
6304#ifdef LOG_ENABLED
6305 char buf[9];
6306#endif
6307
6308 /*
6309 * From the spec:
6310 * For registers that should be accessed as 32-bit double words, partial writes (less than a 32-bit
6311 * double word) is ignored. Partial reads return all 32 bits of data regardless of the byte enables.
6312 */
6313
6314 /*
6315 * To be able to read bytes and short word we convert them to properly
6316 * shifted 32-bit words and masks. The idea is to keep register-specific
6317 * handlers simple. Most accesses will be 32-bit anyway.
6318 */
6319 uint32_t mask;
6320 switch (cb)
6321 {
6322 case 4: mask = 0xFFFFFFFF; break;
6323 case 2: mask = 0x0000FFFF; break;
6324 case 1: mask = 0x000000FF; break;
6325 default:
6326 return PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "unsupported op size: offset=%#10x cb=%#10x\n", offReg, cb);
6327 }
6328 if (index >= 0)
6329 {
6330 RT_UNTRUSTED_VALIDATED_FENCE(); /* paranoia because of port I/O. */
6331 if (g_aE1kRegMap[index].readable)
6332 {
6333 /* Make the mask correspond to the bits we are about to read. */
6334 shift = (offReg - g_aE1kRegMap[index].offset) % sizeof(uint32_t) * 8;
6335 mask <<= shift;
6336 if (!mask)
6337 return PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "Zero mask: offset=%#10x cb=%#10x\n", offReg, cb);
6338 /*
6339 * Read it. Pass the mask so the handler knows what has to be read.
6340 * Mask out irrelevant bits.
6341 */
6342 //e1kCsEnterReturn(pThis, VERR_SEM_BUSY);
6343 //pThis->fDelayInts = false;
6344 //pThis->iStatIntLost += pThis->iStatIntLostOne;
6345 //pThis->iStatIntLostOne = 0;
6346 rc = g_aE1kRegMap[index].pfnRead(pDevIns, pThis, offReg & 0xFFFFFFFC, (uint32_t)index, &u32);
6347 u32 &= mask;
6348 //e1kCsLeave(pThis);
6349 E1kLog2(("%s At %08X read %s from %s (%s)\n",
6350 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf), g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
6351 Log6(("%s At %08X read %s from %s (%s) [UNALIGNED]\n",
6352 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf), g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
6353 /* Shift back the result. */
6354 u32 >>= shift;
6355 }
6356 else
6357 E1kLog(("%s At %08X read (%s) attempt from write-only register %s (%s)\n",
6358 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf), g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
6359 if (IOM_SUCCESS(rc))
6360 STAM_COUNTER_INC(&pThis->aStatRegReads[index]);
6361 }
6362 else
6363 E1kLog(("%s At %08X read (%s) attempt from non-existing register\n",
6364 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf)));
6365
6366 memcpy(pv, &u32, cb);
6367 return rc;
6368}
6369
6370/**
6371 * Handle 4 byte aligned and sized read operation.
6372 *
6373 * Looks up and calls appropriate handler.
6374 *
6375 * @returns VBox status code.
6376 *
6377 * @param pDevIns The device instance.
6378 * @param pThis The device state structure.
6379 * @param offReg Register offset in memory-mapped frame.
6380 * @param pu32 Where to store the result.
6381 * @thread EMT
6382 */
6383static VBOXSTRICTRC e1kRegReadAlignedU32(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offReg, uint32_t *pu32)
6384{
6385 Assert(!(offReg & 3));
6386
6387 /*
6388 * Lookup the register and check that it's readable.
6389 */
6390 VBOXSTRICTRC rc = VINF_SUCCESS;
6391 int idxReg = e1kRegLookup(offReg);
6392 if (RT_LIKELY(idxReg >= 0))
6393 {
6394 RT_UNTRUSTED_VALIDATED_FENCE(); /* paranoia because of port I/O. */
6395 if (RT_UNLIKELY(g_aE1kRegMap[idxReg].readable))
6396 {
6397 /*
6398 * Read it. Pass the mask so the handler knows what has to be read.
6399 * Mask out irrelevant bits.
6400 */
6401 //e1kCsEnterReturn(pThis, VERR_SEM_BUSY);
6402 //pThis->fDelayInts = false;
6403 //pThis->iStatIntLost += pThis->iStatIntLostOne;
6404 //pThis->iStatIntLostOne = 0;
6405 rc = g_aE1kRegMap[idxReg].pfnRead(pDevIns, pThis, offReg & 0xFFFFFFFC, (uint32_t)idxReg, pu32);
6406 //e1kCsLeave(pThis);
6407 Log6(("%s At %08X read %08X from %s (%s)\n",
6408 pThis->szPrf, offReg, *pu32, g_aE1kRegMap[idxReg].abbrev, g_aE1kRegMap[idxReg].name));
6409 if (IOM_SUCCESS(rc))
6410 STAM_COUNTER_INC(&pThis->aStatRegReads[idxReg]);
6411 }
6412 else
6413 E1kLog(("%s At %08X read attempt from non-readable register %s (%s)\n",
6414 pThis->szPrf, offReg, g_aE1kRegMap[idxReg].abbrev, g_aE1kRegMap[idxReg].name));
6415 }
6416 else
6417 E1kLog(("%s At %08X read attempt from non-existing register\n", pThis->szPrf, offReg));
6418 return rc;
6419}
6420
6421/**
6422 * Handle 4 byte sized and aligned register write operation.
6423 *
6424 * Looks up and calls appropriate handler.
6425 *
6426 * @returns VBox status code.
6427 *
6428 * @param pDevIns The device instance.
6429 * @param pThis The device state structure.
6430 * @param offReg Register offset in memory-mapped frame.
6431 * @param u32Value The value to write.
6432 * @thread EMT
6433 */
6434static VBOXSTRICTRC e1kRegWriteAlignedU32(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offReg, uint32_t u32Value)
6435{
6436 VBOXSTRICTRC rc = VINF_SUCCESS;
6437 int index = e1kRegLookup(offReg);
6438 if (RT_LIKELY(index >= 0))
6439 {
6440 RT_UNTRUSTED_VALIDATED_FENCE(); /* paranoia because of port I/O. */
6441 if (RT_LIKELY(g_aE1kRegMap[index].writable))
6442 {
6443 /*
6444 * Write it. Pass the mask so the handler knows what has to be written.
6445 * Mask out irrelevant bits.
6446 */
6447 Log6(("%s At %08X write %08X to %s (%s)\n",
6448 pThis->szPrf, offReg, u32Value, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
6449 //e1kCsEnterReturn(pThis, VERR_SEM_BUSY);
6450 //pThis->fDelayInts = false;
6451 //pThis->iStatIntLost += pThis->iStatIntLostOne;
6452 //pThis->iStatIntLostOne = 0;
6453 rc = g_aE1kRegMap[index].pfnWrite(pDevIns, pThis, offReg, (uint32_t)index, u32Value);
6454 //e1kCsLeave(pThis);
6455 }
6456 else
6457 E1kLog(("%s At %08X write attempt (%08X) to read-only register %s (%s)\n",
6458 pThis->szPrf, offReg, u32Value, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
6459 if (IOM_SUCCESS(rc))
6460 STAM_COUNTER_INC(&pThis->aStatRegWrites[index]);
6461 }
6462 else
6463 E1kLog(("%s At %08X write attempt (%08X) to non-existing register\n",
6464 pThis->szPrf, offReg, u32Value));
6465 return rc;
6466}
6467
6468
6469/* -=-=-=-=- MMIO and I/O Port Callbacks -=-=-=-=- */
6470
6471/**
6472 * @callback_method_impl{FNIOMMMIONEWREAD}
6473 */
6474static DECLCALLBACK(VBOXSTRICTRC) e1kMMIORead(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void *pv, uint32_t cb)
6475{
6476 RT_NOREF2(pvUser, cb);
6477 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
6478 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatMMIORead), a);
6479
6480 Assert(off < E1K_MM_SIZE);
6481 Assert(cb == 4);
6482 Assert(!(off & 3));
6483
6484 VBOXSTRICTRC rcStrict = e1kRegReadAlignedU32(pDevIns, pThis, (uint32_t)off, (uint32_t *)pv);
6485
6486 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatMMIORead), a);
6487 return rcStrict;
6488}
6489
6490/**
6491 * @callback_method_impl{FNIOMMMIONEWWRITE}
6492 */
6493static DECLCALLBACK(VBOXSTRICTRC) e1kMMIOWrite(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void const *pv, uint32_t cb)
6494{
6495 RT_NOREF2(pvUser, cb);
6496 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
6497 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatMMIOWrite), a);
6498
6499 Assert(off < E1K_MM_SIZE);
6500 Assert(cb == 4);
6501 Assert(!(off & 3));
6502
6503 VBOXSTRICTRC rcStrict = e1kRegWriteAlignedU32(pDevIns, pThis, (uint32_t)off, *(uint32_t const *)pv);
6504
6505 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatMMIOWrite), a);
6506 return rcStrict;
6507}
6508
6509/**
6510 * @callback_method_impl{FNIOMIOPORTNEWIN}
6511 */
6512static DECLCALLBACK(VBOXSTRICTRC) e1kIOPortIn(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT offPort, uint32_t *pu32, unsigned cb)
6513{
6514 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
6515 VBOXSTRICTRC rc;
6516 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatIORead), a);
6517 RT_NOREF_PV(pvUser);
6518
6519 if (RT_LIKELY(cb == 4))
6520 switch (offPort)
6521 {
6522 case 0x00: /* IOADDR */
6523 *pu32 = pThis->uSelectedReg;
6524 Log9(("%s e1kIOPortIn: IOADDR(0), selecting register %#010x, val=%#010x\n", pThis->szPrf, pThis->uSelectedReg, *pu32));
6525 rc = VINF_SUCCESS;
6526 break;
6527
6528 case 0x04: /* IODATA */
6529 if (!(pThis->uSelectedReg & 3))
6530 rc = e1kRegReadAlignedU32(pDevIns, pThis, pThis->uSelectedReg, pu32);
6531 else /** @todo r=bird: I wouldn't be surprised if this unaligned branch wasn't necessary. */
6532 rc = e1kRegReadUnaligned(pDevIns, pThis, pThis->uSelectedReg, pu32, cb);
6533 if (rc == VINF_IOM_R3_MMIO_READ)
6534 rc = VINF_IOM_R3_IOPORT_READ;
6535 Log9(("%s e1kIOPortIn: IODATA(4), reading from selected register %#010x, val=%#010x\n", pThis->szPrf, pThis->uSelectedReg, *pu32));
6536 break;
6537
6538 default:
6539 E1kLog(("%s e1kIOPortIn: invalid port %#010x\n", pThis->szPrf, offPort));
6540 /** @todo r=bird: Check what real hardware returns here. */
6541 //rc = VERR_IOM_IOPORT_UNUSED; /* Why not? */
6542 rc = VINF_IOM_MMIO_UNUSED_00; /* used to return VINF_SUCCESS and not touch *pu32, which amounted to this. */
6543 break;
6544 }
6545 else
6546 {
6547 E1kLog(("%s e1kIOPortIn: invalid op size: offPort=%RTiop cb=%08x", pThis->szPrf, offPort, cb));
6548 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "%s e1kIOPortIn: invalid op size: offPort=%RTiop cb=%08x\n", pThis->szPrf, offPort, cb);
6549 *pu32 = 0; /** @todo r=bird: Check what real hardware returns here. (Didn't used to set a value here, picked zero as that's what we'd end up in most cases.) */
6550 }
6551 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatIORead), a);
6552 return rc;
6553}
6554
6555
6556/**
6557 * @callback_method_impl{FNIOMIOPORTNEWOUT}
6558 */
6559static DECLCALLBACK(VBOXSTRICTRC) e1kIOPortOut(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT offPort, uint32_t u32, unsigned cb)
6560{
6561 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
6562 VBOXSTRICTRC rc;
6563 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatIOWrite), a);
6564 RT_NOREF_PV(pvUser);
6565
6566 Log9(("%s e1kIOPortOut: offPort=%RTiop value=%08x\n", pThis->szPrf, offPort, u32));
6567 if (RT_LIKELY(cb == 4))
6568 {
6569 switch (offPort)
6570 {
6571 case 0x00: /* IOADDR */
6572 pThis->uSelectedReg = u32;
6573 Log9(("%s e1kIOPortOut: IOADDR(0), selected register %08x\n", pThis->szPrf, pThis->uSelectedReg));
6574 rc = VINF_SUCCESS;
6575 break;
6576
6577 case 0x04: /* IODATA */
6578 Log9(("%s e1kIOPortOut: IODATA(4), writing to selected register %#010x, value=%#010x\n", pThis->szPrf, pThis->uSelectedReg, u32));
6579 if (RT_LIKELY(!(pThis->uSelectedReg & 3)))
6580 {
6581 rc = e1kRegWriteAlignedU32(pDevIns, pThis, pThis->uSelectedReg, u32);
6582 if (rc == VINF_IOM_R3_MMIO_WRITE)
6583 rc = VINF_IOM_R3_IOPORT_WRITE;
6584 }
6585 else
6586 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS,
6587 "Spec violation: misaligned offset: %#10x, ignored.\n", pThis->uSelectedReg);
6588 break;
6589
6590 default:
6591 E1kLog(("%s e1kIOPortOut: invalid port %#010x\n", pThis->szPrf, offPort));
6592 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "invalid port %#010x\n", offPort);
6593 }
6594 }
6595 else
6596 {
6597 E1kLog(("%s e1kIOPortOut: invalid op size: offPort=%RTiop cb=%08x\n", pThis->szPrf, offPort, cb));
6598 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "%s: invalid op size: offPort=%RTiop cb=%#x\n", pThis->szPrf, offPort, cb);
6599 }
6600
6601 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatIOWrite), a);
6602 return rc;
6603}
6604
6605#ifdef IN_RING3
6606
6607/**
6608 * Dump complete device state to log.
6609 *
6610 * @param pThis Pointer to device state.
6611 */
6612static void e1kDumpState(PE1KSTATE pThis)
6613{
6614 RT_NOREF(pThis);
6615 for (int i = 0; i < E1K_NUM_OF_32BIT_REGS; ++i)
6616 E1kLog2(("%s: %8.8s = %08x\n", pThis->szPrf, g_aE1kRegMap[i].abbrev, pThis->auRegs[i]));
6617# ifdef E1K_INT_STATS
6618 LogRel(("%s: Interrupt attempts: %d\n", pThis->szPrf, pThis->uStatIntTry));
6619 LogRel(("%s: Interrupts raised : %d\n", pThis->szPrf, pThis->uStatInt));
6620 LogRel(("%s: Interrupts lowered: %d\n", pThis->szPrf, pThis->uStatIntLower));
6621 LogRel(("%s: ICR outside ISR : %d\n", pThis->szPrf, pThis->uStatNoIntICR));
6622 LogRel(("%s: IMS raised ints : %d\n", pThis->szPrf, pThis->uStatIntIMS));
6623 LogRel(("%s: Interrupts skipped: %d\n", pThis->szPrf, pThis->uStatIntSkip));
6624 LogRel(("%s: Masked interrupts : %d\n", pThis->szPrf, pThis->uStatIntMasked));
6625 LogRel(("%s: Early interrupts : %d\n", pThis->szPrf, pThis->uStatIntEarly));
6626 LogRel(("%s: Late interrupts : %d\n", pThis->szPrf, pThis->uStatIntLate));
6627 LogRel(("%s: Lost interrupts : %d\n", pThis->szPrf, pThis->iStatIntLost));
6628 LogRel(("%s: Interrupts by RX : %d\n", pThis->szPrf, pThis->uStatIntRx));
6629 LogRel(("%s: Interrupts by TX : %d\n", pThis->szPrf, pThis->uStatIntTx));
6630 LogRel(("%s: Interrupts by ICS : %d\n", pThis->szPrf, pThis->uStatIntICS));
6631 LogRel(("%s: Interrupts by RDTR: %d\n", pThis->szPrf, pThis->uStatIntRDTR));
6632 LogRel(("%s: Interrupts by RDMT: %d\n", pThis->szPrf, pThis->uStatIntRXDMT0));
6633 LogRel(("%s: Interrupts by TXQE: %d\n", pThis->szPrf, pThis->uStatIntTXQE));
6634 LogRel(("%s: TX int delay asked: %d\n", pThis->szPrf, pThis->uStatTxIDE));
6635 LogRel(("%s: TX delayed: %d\n", pThis->szPrf, pThis->uStatTxDelayed));
6636 LogRel(("%s: TX delay expired: %d\n", pThis->szPrf, pThis->uStatTxDelayExp));
6637 LogRel(("%s: TX no report asked: %d\n", pThis->szPrf, pThis->uStatTxNoRS));
6638 LogRel(("%s: TX abs timer expd : %d\n", pThis->szPrf, pThis->uStatTAD));
6639 LogRel(("%s: TX int timer expd : %d\n", pThis->szPrf, pThis->uStatTID));
6640 LogRel(("%s: RX abs timer expd : %d\n", pThis->szPrf, pThis->uStatRAD));
6641 LogRel(("%s: RX int timer expd : %d\n", pThis->szPrf, pThis->uStatRID));
6642 LogRel(("%s: TX CTX descriptors: %d\n", pThis->szPrf, pThis->uStatDescCtx));
6643 LogRel(("%s: TX DAT descriptors: %d\n", pThis->szPrf, pThis->uStatDescDat));
6644 LogRel(("%s: TX LEG descriptors: %d\n", pThis->szPrf, pThis->uStatDescLeg));
6645 LogRel(("%s: Received frames : %d\n", pThis->szPrf, pThis->uStatRxFrm));
6646 LogRel(("%s: Transmitted frames: %d\n", pThis->szPrf, pThis->uStatTxFrm));
6647 LogRel(("%s: TX frames up to 1514: %d\n", pThis->szPrf, pThis->uStatTx1514));
6648 LogRel(("%s: TX frames up to 2962: %d\n", pThis->szPrf, pThis->uStatTx2962));
6649 LogRel(("%s: TX frames up to 4410: %d\n", pThis->szPrf, pThis->uStatTx4410));
6650 LogRel(("%s: TX frames up to 5858: %d\n", pThis->szPrf, pThis->uStatTx5858));
6651 LogRel(("%s: TX frames up to 7306: %d\n", pThis->szPrf, pThis->uStatTx7306));
6652 LogRel(("%s: TX frames up to 8754: %d\n", pThis->szPrf, pThis->uStatTx8754));
6653 LogRel(("%s: TX frames up to 16384: %d\n", pThis->szPrf, pThis->uStatTx16384));
6654 LogRel(("%s: TX frames up to 32768: %d\n", pThis->szPrf, pThis->uStatTx32768));
6655 LogRel(("%s: Larger TX frames : %d\n", pThis->szPrf, pThis->uStatTxLarge));
6656 LogRel(("%s: Max TX Delay : %lld\n", pThis->szPrf, pThis->uStatMaxTxDelay));
6657# endif /* E1K_INT_STATS */
6658}
6659
6660
6661/* -=-=-=-=- PDMINETWORKDOWN -=-=-=-=- */
6662
6663/**
6664 * Check if the device can receive data now.
6665 * This must be called before the pfnRecieve() method is called.
6666 *
6667 * @returns VBox status code.
6668 * @retval VERR_NET_NO_BUFFER_SPACE if we cannot receive.
6669 * @param pDevIns The device instance.
6670 * @param pThis The instance data.
6671 * @thread EMT
6672 */
6673static int e1kR3CanReceive(PPDMDEVINS pDevIns, PE1KSTATE pThis)
6674{
6675# ifndef E1K_WITH_RXD_CACHE
6676 size_t cb;
6677
6678 e1kCsRxEnterReturn(pThis);
6679
6680 if (RT_UNLIKELY(RDLEN == sizeof(E1KRXDESC)))
6681 {
6682 E1KRXDESC desc;
6683 PDMDevHlpPCIPhysRead(pDevIns, e1kDescAddr(RDBAH, RDBAL, RDH), &desc, sizeof(desc));
6684 if (desc.status.fDD)
6685 cb = 0;
6686 else
6687 cb = pThis->u16RxBSize;
6688 }
6689 else if (RDH < RDT)
6690 cb = (RDT - RDH) * pThis->u16RxBSize;
6691 else if (RDH > RDT)
6692 cb = (RDLEN / sizeof(E1KRXDESC) - RDH + RDT) * pThis->u16RxBSize;
6693 else
6694 {
6695 cb = 0;
6696 E1kLogRel(("E1000: OUT of RX descriptors!\n"));
6697 }
6698 E1kLog2(("%s e1kR3CanReceive: at exit RDH=%d RDT=%d RDLEN=%d u16RxBSize=%d cb=%lu\n",
6699 pThis->szPrf, RDH, RDT, RDLEN, pThis->u16RxBSize, cb));
6700
6701 e1kCsRxLeave(pThis);
6702 return cb > 0 ? VINF_SUCCESS : VERR_NET_NO_BUFFER_SPACE;
6703# else /* E1K_WITH_RXD_CACHE */
6704
6705 e1kCsRxEnterReturn(pThis);
6706
6707 E1KRXDC rxdc;
6708 if (RT_UNLIKELY(!e1kUpdateRxDContext(pDevIns, pThis, &rxdc, "e1kR3CanReceive")))
6709 {
6710 e1kCsRxLeave(pThis);
6711 E1kLog(("%s e1kR3CanReceive: failed to update Rx context, returning VERR_NET_NO_BUFFER_SPACE\n", pThis->szPrf));
6712 return VERR_NET_NO_BUFFER_SPACE;
6713 }
6714
6715 int rc = VINF_SUCCESS;
6716 if (RT_UNLIKELY(rxdc.rdlen == sizeof(E1KRXDESC)))
6717 {
6718 E1KRXDESC desc;
6719 PDMDevHlpPCIPhysRead(pDevIns, e1kDescAddr(RDBAH, RDBAL, rxdc.rdh), &desc, sizeof(desc));
6720 if (desc.status.fDD)
6721 rc = VERR_NET_NO_BUFFER_SPACE;
6722 }
6723 else if (e1kRxDIsCacheEmpty(pThis) && rxdc.rdh == rxdc.rdt)
6724 {
6725 /* Cache is empty, so is the RX ring. */
6726 rc = VERR_NET_NO_BUFFER_SPACE;
6727 }
6728 E1kLog2(("%s e1kR3CanReceive: at exit in_cache=%d RDH=%d RDT=%d RDLEN=%d u16RxBSize=%d rc=%Rrc\n", pThis->szPrf,
6729 e1kRxDInCache(pThis), rxdc.rdh, rxdc.rdt, rxdc.rdlen, pThis->u16RxBSize, rc));
6730
6731 e1kCsRxLeave(pThis);
6732 return rc;
6733# endif /* E1K_WITH_RXD_CACHE */
6734}
6735
6736/**
6737 * @interface_method_impl{PDMINETWORKDOWN,pfnWaitReceiveAvail}
6738 */
6739static DECLCALLBACK(int) e1kR3NetworkDown_WaitReceiveAvail(PPDMINETWORKDOWN pInterface, RTMSINTERVAL cMillies)
6740{
6741 PE1KSTATECC pThisCC = RT_FROM_MEMBER(pInterface, E1KSTATECC, INetworkDown);
6742 PE1KSTATE pThis = pThisCC->pShared;
6743 PPDMDEVINS pDevIns = pThisCC->pDevInsR3;
6744
6745 int rc = e1kR3CanReceive(pDevIns, pThis);
6746 if (RT_SUCCESS(rc))
6747 return VINF_SUCCESS;
6748
6749 if (RT_UNLIKELY(cMillies == 0))
6750 return VERR_NET_NO_BUFFER_SPACE;
6751
6752 rc = VERR_INTERRUPTED;
6753 ASMAtomicXchgBool(&pThis->fMaybeOutOfSpace, true);
6754 STAM_PROFILE_START(&pThis->StatRxOverflow, a);
6755 VMSTATE enmVMState;
6756 while (RT_LIKELY( (enmVMState = PDMDevHlpVMState(pDevIns)) == VMSTATE_RUNNING
6757 || enmVMState == VMSTATE_RUNNING_LS))
6758 {
6759 int rc2 = e1kR3CanReceive(pDevIns, pThis);
6760 if (RT_SUCCESS(rc2))
6761 {
6762 rc = VINF_SUCCESS;
6763 break;
6764 }
6765 E1kLogRel(("E1000: e1kR3NetworkDown_WaitReceiveAvail: waiting cMillies=%u...\n", cMillies));
6766 E1kLog(("%s: e1kR3NetworkDown_WaitReceiveAvail: waiting cMillies=%u...\n", pThis->szPrf, cMillies));
6767 PDMDevHlpSUPSemEventWaitNoResume(pDevIns, pThis->hEventMoreRxDescAvail, cMillies);
6768 }
6769 STAM_PROFILE_STOP(&pThis->StatRxOverflow, a);
6770 ASMAtomicXchgBool(&pThis->fMaybeOutOfSpace, false);
6771
6772 return rc;
6773}
6774
6775
6776/**
6777 * Matches the packet addresses against Receive Address table. Looks for
6778 * exact matches only.
6779 *
6780 * @returns true if address matches.
6781 * @param pThis Pointer to the state structure.
6782 * @param pvBuf The ethernet packet.
6783 * @param cb Number of bytes available in the packet.
6784 * @thread EMT
6785 */
6786static bool e1kPerfectMatch(PE1KSTATE pThis, const void *pvBuf)
6787{
6788 for (unsigned i = 0; i < RT_ELEMENTS(pThis->aRecAddr.array); i++)
6789 {
6790 E1KRAELEM* ra = pThis->aRecAddr.array + i;
6791
6792 /* Valid address? */
6793 if (ra->ctl & RA_CTL_AV)
6794 {
6795 Assert((ra->ctl & RA_CTL_AS) < 2);
6796 //unsigned char *pAddr = (unsigned char*)pvBuf + sizeof(ra->addr)*(ra->ctl & RA_CTL_AS);
6797 //E1kLog3(("%s Matching %02x:%02x:%02x:%02x:%02x:%02x against %02x:%02x:%02x:%02x:%02x:%02x...\n",
6798 // pThis->szPrf, pAddr[0], pAddr[1], pAddr[2], pAddr[3], pAddr[4], pAddr[5],
6799 // ra->addr[0], ra->addr[1], ra->addr[2], ra->addr[3], ra->addr[4], ra->addr[5]));
6800 /*
6801 * Address Select:
6802 * 00b = Destination address
6803 * 01b = Source address
6804 * 10b = Reserved
6805 * 11b = Reserved
6806 * Since ethernet header is (DA, SA, len) we can use address
6807 * select as index.
6808 */
6809 if (memcmp((char*)pvBuf + sizeof(ra->addr)*(ra->ctl & RA_CTL_AS),
6810 ra->addr, sizeof(ra->addr)) == 0)
6811 return true;
6812 }
6813 }
6814
6815 return false;
6816}
6817
6818/**
6819 * Matches the packet addresses against Multicast Table Array.
6820 *
6821 * @remarks This is imperfect match since it matches not exact address but
6822 * a subset of addresses.
6823 *
6824 * @returns true if address matches.
6825 * @param pThis Pointer to the state structure.
6826 * @param pvBuf The ethernet packet.
6827 * @param cb Number of bytes available in the packet.
6828 * @thread EMT
6829 */
6830static bool e1kImperfectMatch(PE1KSTATE pThis, const void *pvBuf)
6831{
6832 /* Get bits 32..47 of destination address */
6833 uint16_t u16Bit = ((uint16_t*)pvBuf)[2];
6834
6835 unsigned offset = GET_BITS(RCTL, MO);
6836 /*
6837 * offset means:
6838 * 00b = bits 36..47
6839 * 01b = bits 35..46
6840 * 10b = bits 34..45
6841 * 11b = bits 32..43
6842 */
6843 if (offset < 3)
6844 u16Bit = u16Bit >> (4 - offset);
6845 return ASMBitTest(pThis->auMTA, u16Bit & 0xFFF);
6846}
6847
6848/**
6849 * Determines if the packet is to be delivered to upper layer.
6850 *
6851 * The following filters supported:
6852 * - Exact Unicast/Multicast
6853 * - Promiscuous Unicast/Multicast
6854 * - Multicast
6855 * - VLAN
6856 *
6857 * @returns true if packet is intended for this node.
6858 * @param pThis Pointer to the state structure.
6859 * @param pvBuf The ethernet packet.
6860 * @param cb Number of bytes available in the packet.
6861 * @param pStatus Bit field to store status bits.
6862 * @thread EMT
6863 */
6864static bool e1kAddressFilter(PE1KSTATE pThis, const void *pvBuf, size_t cb, E1KRXDST *pStatus)
6865{
6866 Assert(cb > 14);
6867 /* Assume that we fail to pass exact filter. */
6868 pStatus->fPIF = false;
6869 pStatus->fVP = false;
6870 /* Discard oversized packets */
6871 if (cb > E1K_MAX_RX_PKT_SIZE)
6872 {
6873 E1kLog(("%s ERROR: Incoming packet is too big, cb=%d > max=%d\n",
6874 pThis->szPrf, cb, E1K_MAX_RX_PKT_SIZE));
6875 E1K_INC_CNT32(ROC);
6876 return false;
6877 }
6878 else if (!(RCTL & RCTL_LPE) && cb > 1522)
6879 {
6880 /* When long packet reception is disabled packets over 1522 are discarded */
6881 E1kLog(("%s Discarding incoming packet (LPE=0), cb=%d\n",
6882 pThis->szPrf, cb));
6883 E1K_INC_CNT32(ROC);
6884 return false;
6885 }
6886
6887 uint16_t *u16Ptr = (uint16_t*)pvBuf;
6888 /* Compare TPID with VLAN Ether Type */
6889 if (RT_BE2H_U16(u16Ptr[6]) == VET)
6890 {
6891 pStatus->fVP = true;
6892 /* Is VLAN filtering enabled? */
6893 if (RCTL & RCTL_VFE)
6894 {
6895 /* It is 802.1q packet indeed, let's filter by VID */
6896 if (RCTL & RCTL_CFIEN)
6897 {
6898 E1kLog3(("%s VLAN filter: VLAN=%d CFI=%d RCTL_CFI=%d\n", pThis->szPrf,
6899 E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7])),
6900 E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])),
6901 !!(RCTL & RCTL_CFI)));
6902 if (E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])) != !!(RCTL & RCTL_CFI))
6903 {
6904 E1kLog2(("%s Packet filter: CFIs do not match in packet and RCTL (%d!=%d)\n",
6905 pThis->szPrf, E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])), !!(RCTL & RCTL_CFI)));
6906 return false;
6907 }
6908 }
6909 else
6910 E1kLog3(("%s VLAN filter: VLAN=%d\n", pThis->szPrf,
6911 E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))));
6912 if (!ASMBitTest(pThis->auVFTA, E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))))
6913 {
6914 E1kLog2(("%s Packet filter: no VLAN match (id=%d)\n",
6915 pThis->szPrf, E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))));
6916 return false;
6917 }
6918 }
6919 }
6920 /* Broadcast filtering */
6921 if (e1kIsBroadcast(pvBuf) && (RCTL & RCTL_BAM))
6922 return true;
6923 E1kLog2(("%s Packet filter: not a broadcast\n", pThis->szPrf));
6924 if (e1kIsMulticast(pvBuf))
6925 {
6926 /* Is multicast promiscuous enabled? */
6927 if (RCTL & RCTL_MPE)
6928 return true;
6929 E1kLog2(("%s Packet filter: no promiscuous multicast\n", pThis->szPrf));
6930 /* Try perfect matches first */
6931 if (e1kPerfectMatch(pThis, pvBuf))
6932 {
6933 pStatus->fPIF = true;
6934 return true;
6935 }
6936 E1kLog2(("%s Packet filter: no perfect match\n", pThis->szPrf));
6937 if (e1kImperfectMatch(pThis, pvBuf))
6938 return true;
6939 E1kLog2(("%s Packet filter: no imperfect match\n", pThis->szPrf));
6940 }
6941 else {
6942 /* Is unicast promiscuous enabled? */
6943 if (RCTL & RCTL_UPE)
6944 return true;
6945 E1kLog2(("%s Packet filter: no promiscuous unicast\n", pThis->szPrf));
6946 if (e1kPerfectMatch(pThis, pvBuf))
6947 {
6948 pStatus->fPIF = true;
6949 return true;
6950 }
6951 E1kLog2(("%s Packet filter: no perfect match\n", pThis->szPrf));
6952 }
6953 E1kLog2(("%s Packet filter: packet discarded\n", pThis->szPrf));
6954 return false;
6955}
6956
6957/**
6958 * @interface_method_impl{PDMINETWORKDOWN,pfnReceive}
6959 */
6960static DECLCALLBACK(int) e1kR3NetworkDown_Receive(PPDMINETWORKDOWN pInterface, const void *pvBuf, size_t cb)
6961{
6962 PE1KSTATECC pThisCC = RT_FROM_MEMBER(pInterface, E1KSTATECC, INetworkDown);
6963 PE1KSTATE pThis = pThisCC->pShared;
6964 PPDMDEVINS pDevIns = pThisCC->pDevInsR3;
6965 int rc = VINF_SUCCESS;
6966
6967 /*
6968 * Drop packets if the VM is not running yet/anymore.
6969 */
6970 VMSTATE enmVMState = PDMDevHlpVMState(pDevIns);
6971 if ( enmVMState != VMSTATE_RUNNING
6972 && enmVMState != VMSTATE_RUNNING_LS)
6973 {
6974 E1kLog(("%s Dropping incoming packet as VM is not running.\n", pThis->szPrf));
6975 return VINF_SUCCESS;
6976 }
6977
6978 /* Discard incoming packets in locked state */
6979 if (!(RCTL & RCTL_EN) || pThis->fLocked || !(STATUS & STATUS_LU))
6980 {
6981 E1kLog(("%s Dropping incoming packet as receive operation is disabled.\n", pThis->szPrf));
6982 return VINF_SUCCESS;
6983 }
6984
6985 STAM_PROFILE_ADV_START(&pThis->StatReceive, a);
6986
6987 //e1kR3CsEnterAsserted(pThis);
6988
6989 e1kPacketDump(pDevIns, pThis, (const uint8_t*)pvBuf, cb, "<-- Incoming");
6990
6991 /* Update stats */
6992 e1kR3CsEnterAsserted(pThis);
6993 E1K_INC_CNT32(TPR);
6994 E1K_ADD_CNT64(TORL, TORH, cb < 64? 64 : cb);
6995 e1kCsLeave(pThis);
6996
6997 STAM_PROFILE_ADV_START(&pThis->StatReceiveFilter, a);
6998 E1KRXDST status;
6999 RT_ZERO(status);
7000 bool fPassed = e1kAddressFilter(pThis, pvBuf, cb, &status);
7001 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveFilter, a);
7002 if (fPassed)
7003 {
7004 rc = e1kHandleRxPacket(pDevIns, pThis, pvBuf, cb, status);
7005 }
7006 //e1kCsLeave(pThis);
7007 STAM_PROFILE_ADV_STOP(&pThis->StatReceive, a);
7008
7009 return rc;
7010}
7011
7012
7013/* -=-=-=-=- PDMILEDPORTS -=-=-=-=- */
7014
7015/**
7016 * @interface_method_impl{PDMILEDPORTS,pfnQueryStatusLed}
7017 */
7018static DECLCALLBACK(int) e1kR3QueryStatusLed(PPDMILEDPORTS pInterface, unsigned iLUN, PPDMLED *ppLed)
7019{
7020 if (iLUN == 0)
7021 {
7022 PE1KSTATECC pThisCC = RT_FROM_MEMBER(pInterface, E1KSTATECC, ILeds);
7023 *ppLed = &pThisCC->pShared->led;
7024 return VINF_SUCCESS;
7025 }
7026 return VERR_PDM_LUN_NOT_FOUND;
7027}
7028
7029
7030/* -=-=-=-=- PDMINETWORKCONFIG -=-=-=-=- */
7031
7032/**
7033 * @interface_method_impl{PDMINETWORKCONFIG,pfnGetMac}
7034 */
7035static DECLCALLBACK(int) e1kR3GetMac(PPDMINETWORKCONFIG pInterface, PRTMAC pMac)
7036{
7037 PE1KSTATECC pThisCC = RT_FROM_MEMBER(pInterface, E1KSTATECC, INetworkConfig);
7038 pThisCC->eeprom.getMac(pMac);
7039 return VINF_SUCCESS;
7040}
7041
7042/**
7043 * @interface_method_impl{PDMINETWORKCONFIG,pfnGetLinkState}
7044 */
7045static DECLCALLBACK(PDMNETWORKLINKSTATE) e1kR3GetLinkState(PPDMINETWORKCONFIG pInterface)
7046{
7047 PE1KSTATECC pThisCC = RT_FROM_MEMBER(pInterface, E1KSTATECC, INetworkConfig);
7048 PE1KSTATE pThis = pThisCC->pShared;
7049 if (STATUS & STATUS_LU)
7050 return PDMNETWORKLINKSTATE_UP;
7051 return PDMNETWORKLINKSTATE_DOWN;
7052}
7053
7054/**
7055 * @interface_method_impl{PDMINETWORKCONFIG,pfnSetLinkState}
7056 */
7057static DECLCALLBACK(int) e1kR3SetLinkState(PPDMINETWORKCONFIG pInterface, PDMNETWORKLINKSTATE enmState)
7058{
7059 PE1KSTATECC pThisCC = RT_FROM_MEMBER(pInterface, E1KSTATECC, INetworkConfig);
7060 PE1KSTATE pThis = pThisCC->pShared;
7061 PPDMDEVINS pDevIns = pThisCC->pDevInsR3;
7062
7063 E1kLog(("%s e1kR3SetLinkState: enmState=%d\n", pThis->szPrf, enmState));
7064 switch (enmState)
7065 {
7066 case PDMNETWORKLINKSTATE_UP:
7067 pThis->fCableConnected = true;
7068 /* If link was down, bring it up after a while. */
7069 if (!(STATUS & STATUS_LU))
7070 e1kBringLinkUpDelayed(pDevIns, pThis);
7071 break;
7072 case PDMNETWORKLINKSTATE_DOWN:
7073 pThis->fCableConnected = false;
7074 /* Always set the phy link state to down, regardless of the STATUS_LU bit.
7075 * We might have to set the link state before the driver initializes us. */
7076 Phy::setLinkStatus(&pThis->phy, false);
7077 /* If link was up, bring it down. */
7078 if (STATUS & STATUS_LU)
7079 e1kR3LinkDown(pDevIns, pThis, pThisCC);
7080 break;
7081 case PDMNETWORKLINKSTATE_DOWN_RESUME:
7082 /*
7083 * There is not much sense in bringing down the link if it has not come up yet.
7084 * If it is up though, we bring it down temporarely, then bring it up again.
7085 */
7086 if (STATUS & STATUS_LU)
7087 e1kR3LinkDownTemp(pDevIns, pThis, pThisCC);
7088 break;
7089 default:
7090 ;
7091 }
7092 return VINF_SUCCESS;
7093}
7094
7095
7096/* -=-=-=-=- PDMIBASE -=-=-=-=- */
7097
7098/**
7099 * @interface_method_impl{PDMIBASE,pfnQueryInterface}
7100 */
7101static DECLCALLBACK(void *) e1kR3QueryInterface(struct PDMIBASE *pInterface, const char *pszIID)
7102{
7103 PE1KSTATECC pThisCC = RT_FROM_MEMBER(pInterface, E1KSTATECC, IBase);
7104 Assert(&pThisCC->IBase == pInterface);
7105
7106 PDMIBASE_RETURN_INTERFACE(pszIID, PDMIBASE, &pThisCC->IBase);
7107 PDMIBASE_RETURN_INTERFACE(pszIID, PDMINETWORKDOWN, &pThisCC->INetworkDown);
7108 PDMIBASE_RETURN_INTERFACE(pszIID, PDMINETWORKCONFIG, &pThisCC->INetworkConfig);
7109 PDMIBASE_RETURN_INTERFACE(pszIID, PDMILEDPORTS, &pThisCC->ILeds);
7110 return NULL;
7111}
7112
7113
7114/* -=-=-=-=- Saved State -=-=-=-=- */
7115
7116/**
7117 * Saves the configuration.
7118 *
7119 * @param pThis The E1K state.
7120 * @param pSSM The handle to the saved state.
7121 */
7122static void e1kR3SaveConfig(PCPDMDEVHLPR3 pHlp, PE1KSTATE pThis, PSSMHANDLE pSSM)
7123{
7124 pHlp->pfnSSMPutMem(pSSM, &pThis->macConfigured, sizeof(pThis->macConfigured));
7125 pHlp->pfnSSMPutU32(pSSM, pThis->eChip);
7126}
7127
7128/**
7129 * @callback_method_impl{FNSSMDEVLIVEEXEC,Save basic configuration.}
7130 */
7131static DECLCALLBACK(int) e1kR3LiveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uPass)
7132{
7133 RT_NOREF(uPass);
7134 e1kR3SaveConfig(pDevIns->pHlpR3, PDMDEVINS_2_DATA(pDevIns, PE1KSTATE), pSSM);
7135 return VINF_SSM_DONT_CALL_AGAIN;
7136}
7137
7138/**
7139 * @callback_method_impl{FNSSMDEVSAVEPREP,Synchronize.}
7140 */
7141static DECLCALLBACK(int) e1kR3SavePrep(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
7142{
7143 RT_NOREF(pSSM);
7144 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
7145
7146 e1kCsEnterReturn(pThis, VERR_SEM_BUSY);
7147 e1kCsLeave(pThis);
7148 return VINF_SUCCESS;
7149#if 0
7150 /* 1) Prevent all threads from modifying the state and memory */
7151 //pThis->fLocked = true;
7152 /* 2) Cancel all timers */
7153#ifdef E1K_TX_DELAY
7154 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTXDTimer));
7155#endif /* E1K_TX_DELAY */
7156//#ifdef E1K_USE_TX_TIMERS
7157 if (pThis->fTidEnabled)
7158 {
7159 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTIDTimer));
7160#ifndef E1K_NO_TAD
7161 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTADTimer));
7162#endif /* E1K_NO_TAD */
7163 }
7164//#endif /* E1K_USE_TX_TIMERS */
7165#ifdef E1K_USE_RX_TIMERS
7166 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRIDTimer));
7167 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRADTimer));
7168#endif /* E1K_USE_RX_TIMERS */
7169 e1kCancelTimer(pThis, pThis->CTX_SUFF(pIntTimer));
7170 /* 3) Did I forget anything? */
7171 E1kLog(("%s Locked\n", pThis->szPrf));
7172 return VINF_SUCCESS;
7173#endif
7174}
7175
7176/**
7177 * @callback_method_impl{FNSSMDEVSAVEEXEC}
7178 */
7179static DECLCALLBACK(int) e1kR3SaveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
7180{
7181 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
7182 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
7183 PCPDMDEVHLPR3 pHlp = pDevIns->pHlpR3;
7184
7185 e1kR3SaveConfig(pHlp, pThis, pSSM);
7186 pThisCC->eeprom.save(pHlp, pSSM);
7187 e1kDumpState(pThis);
7188 pHlp->pfnSSMPutMem(pSSM, pThis->auRegs, sizeof(pThis->auRegs));
7189 pHlp->pfnSSMPutBool(pSSM, pThis->fIntRaised);
7190 Phy::saveState(pHlp, pSSM, &pThis->phy);
7191 pHlp->pfnSSMPutU32(pSSM, pThis->uSelectedReg);
7192 pHlp->pfnSSMPutMem(pSSM, pThis->auMTA, sizeof(pThis->auMTA));
7193 pHlp->pfnSSMPutMem(pSSM, &pThis->aRecAddr, sizeof(pThis->aRecAddr));
7194 pHlp->pfnSSMPutMem(pSSM, pThis->auVFTA, sizeof(pThis->auVFTA));
7195 pHlp->pfnSSMPutU64(pSSM, pThis->u64AckedAt);
7196 pHlp->pfnSSMPutU16(pSSM, pThis->u16RxBSize);
7197 //pHlp->pfnSSMPutBool(pSSM, pThis->fDelayInts);
7198 //pHlp->pfnSSMPutBool(pSSM, pThis->fIntMaskUsed);
7199 pHlp->pfnSSMPutU16(pSSM, pThis->u16TxPktLen);
7200/** @todo State wrt to the TSE buffer is incomplete, so little point in
7201 * saving this actually. */
7202 pHlp->pfnSSMPutMem(pSSM, pThis->aTxPacketFallback, pThis->u16TxPktLen);
7203 pHlp->pfnSSMPutBool(pSSM, pThis->fIPcsum);
7204 pHlp->pfnSSMPutBool(pSSM, pThis->fTCPcsum);
7205 pHlp->pfnSSMPutMem(pSSM, &pThis->contextTSE, sizeof(pThis->contextTSE));
7206 pHlp->pfnSSMPutMem(pSSM, &pThis->contextNormal, sizeof(pThis->contextNormal));
7207 pHlp->pfnSSMPutBool(pSSM, pThis->fVTag);
7208 pHlp->pfnSSMPutU16(pSSM, pThis->u16VTagTCI);
7209#ifdef E1K_WITH_TXD_CACHE
7210# if 0
7211 pHlp->pfnSSMPutU8(pSSM, pThis->nTxDFetched);
7212 pHlp->pfnSSMPutMem(pSSM, pThis->aTxDescriptors,
7213 pThis->nTxDFetched * sizeof(pThis->aTxDescriptors[0]));
7214# else
7215 /*
7216 * There is no point in storing TX descriptor cache entries as we can simply
7217 * fetch them again. Moreover, normally the cache is always empty when we
7218 * save the state. Store zero entries for compatibility.
7219 */
7220 pHlp->pfnSSMPutU8(pSSM, 0);
7221# endif
7222#endif /* E1K_WITH_TXD_CACHE */
7223/** @todo GSO requires some more state here. */
7224 E1kLog(("%s State has been saved\n", pThis->szPrf));
7225 return VINF_SUCCESS;
7226}
7227
7228#if 0
7229/**
7230 * @callback_method_impl{FNSSMDEVSAVEDONE}
7231 */
7232static DECLCALLBACK(int) e1kSaveDone(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
7233{
7234 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
7235
7236 /* If VM is being powered off unlocking will result in assertions in PGM */
7237 if (PDMDevHlpGetVM(pDevIns)->enmVMState == VMSTATE_RUNNING)
7238 pThis->fLocked = false;
7239 else
7240 E1kLog(("%s VM is not running -- remain locked\n", pThis->szPrf));
7241 E1kLog(("%s Unlocked\n", pThis->szPrf));
7242 return VINF_SUCCESS;
7243}
7244#endif
7245
7246/**
7247 * @callback_method_impl{FNSSMDEVLOADPREP,Synchronize.}
7248 */
7249static DECLCALLBACK(int) e1kR3LoadPrep(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
7250{
7251 RT_NOREF(pSSM);
7252 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
7253
7254 e1kCsEnterReturn(pThis, VERR_SEM_BUSY);
7255 e1kCsLeave(pThis);
7256 return VINF_SUCCESS;
7257}
7258
7259/**
7260 * @callback_method_impl{FNSSMDEVLOADEXEC}
7261 */
7262static DECLCALLBACK(int) e1kR3LoadExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
7263{
7264 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
7265 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
7266 PCPDMDEVHLPR3 pHlp = pDevIns->pHlpR3;
7267 int rc;
7268
7269 if ( uVersion != E1K_SAVEDSTATE_VERSION
7270#ifdef E1K_WITH_TXD_CACHE
7271 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG
7272#endif /* E1K_WITH_TXD_CACHE */
7273 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_41
7274 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_30)
7275 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
7276
7277 if ( uVersion > E1K_SAVEDSTATE_VERSION_VBOX_30
7278 || uPass != SSM_PASS_FINAL)
7279 {
7280 /* config checks */
7281 RTMAC macConfigured;
7282 rc = pHlp->pfnSSMGetMem(pSSM, &macConfigured, sizeof(macConfigured));
7283 AssertRCReturn(rc, rc);
7284 if ( memcmp(&macConfigured, &pThis->macConfigured, sizeof(macConfigured))
7285 && (uPass == 0 || !PDMDevHlpVMTeleportedAndNotFullyResumedYet(pDevIns)) )
7286 LogRel(("%s: The mac address differs: config=%RTmac saved=%RTmac\n", pThis->szPrf, &pThis->macConfigured, &macConfigured));
7287
7288 E1KCHIP eChip;
7289 rc = pHlp->pfnSSMGetU32(pSSM, &eChip);
7290 AssertRCReturn(rc, rc);
7291 if (eChip != pThis->eChip)
7292 return pHlp->pfnSSMSetCfgError(pSSM, RT_SRC_POS, N_("The chip type differs: config=%u saved=%u"), pThis->eChip, eChip);
7293 }
7294
7295 if (uPass == SSM_PASS_FINAL)
7296 {
7297 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_30)
7298 {
7299 rc = pThisCC->eeprom.load(pHlp, pSSM);
7300 AssertRCReturn(rc, rc);
7301 }
7302 /* the state */
7303 pHlp->pfnSSMGetMem(pSSM, &pThis->auRegs, sizeof(pThis->auRegs));
7304 pHlp->pfnSSMGetBool(pSSM, &pThis->fIntRaised);
7305 /** @todo PHY could be made a separate device with its own versioning */
7306 Phy::loadState(pHlp, pSSM, &pThis->phy);
7307 pHlp->pfnSSMGetU32(pSSM, &pThis->uSelectedReg);
7308 pHlp->pfnSSMGetMem(pSSM, &pThis->auMTA, sizeof(pThis->auMTA));
7309 pHlp->pfnSSMGetMem(pSSM, &pThis->aRecAddr, sizeof(pThis->aRecAddr));
7310 pHlp->pfnSSMGetMem(pSSM, &pThis->auVFTA, sizeof(pThis->auVFTA));
7311 pHlp->pfnSSMGetU64(pSSM, &pThis->u64AckedAt);
7312 pHlp->pfnSSMGetU16(pSSM, &pThis->u16RxBSize);
7313 //pHlp->pfnSSMGetBool(pSSM, pThis->fDelayInts);
7314 //pHlp->pfnSSMGetBool(pSSM, pThis->fIntMaskUsed);
7315 rc = pHlp->pfnSSMGetU16(pSSM, &pThis->u16TxPktLen);
7316 AssertRCReturn(rc, rc);
7317 if (pThis->u16TxPktLen > sizeof(pThis->aTxPacketFallback))
7318 pThis->u16TxPktLen = sizeof(pThis->aTxPacketFallback);
7319 pHlp->pfnSSMGetMem(pSSM, &pThis->aTxPacketFallback[0], pThis->u16TxPktLen);
7320 pHlp->pfnSSMGetBool(pSSM, &pThis->fIPcsum);
7321 pHlp->pfnSSMGetBool(pSSM, &pThis->fTCPcsum);
7322 pHlp->pfnSSMGetMem(pSSM, &pThis->contextTSE, sizeof(pThis->contextTSE));
7323 rc = pHlp->pfnSSMGetMem(pSSM, &pThis->contextNormal, sizeof(pThis->contextNormal));
7324 AssertRCReturn(rc, rc);
7325 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_41)
7326 {
7327 pHlp->pfnSSMGetBool(pSSM, &pThis->fVTag);
7328 rc = pHlp->pfnSSMGetU16(pSSM, &pThis->u16VTagTCI);
7329 AssertRCReturn(rc, rc);
7330 }
7331 else
7332 {
7333 pThis->fVTag = false;
7334 pThis->u16VTagTCI = 0;
7335 }
7336#ifdef E1K_WITH_TXD_CACHE
7337 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG)
7338 {
7339 rc = pHlp->pfnSSMGetU8(pSSM, &pThis->nTxDFetched);
7340 AssertRCReturn(rc, rc);
7341 if (pThis->nTxDFetched)
7342 pHlp->pfnSSMGetMem(pSSM, pThis->aTxDescriptors,
7343 pThis->nTxDFetched * sizeof(pThis->aTxDescriptors[0]));
7344 }
7345 else
7346 pThis->nTxDFetched = 0;
7347 /**
7348 * @todo Perhaps we should not store TXD cache as the entries can be
7349 * simply fetched again from guest's memory. Or can't they?
7350 */
7351#endif /* E1K_WITH_TXD_CACHE */
7352#ifdef E1K_WITH_RXD_CACHE
7353 /*
7354 * There is no point in storing the RX descriptor cache in the saved
7355 * state, we just need to make sure it is empty.
7356 */
7357 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
7358#endif /* E1K_WITH_RXD_CACHE */
7359 rc = pHlp->pfnSSMHandleGetStatus(pSSM);
7360 AssertRCReturn(rc, rc);
7361
7362 /* derived state */
7363 e1kSetupGsoCtx(&pThis->GsoCtx, &pThis->contextTSE);
7364
7365 E1kLog(("%s State has been restored\n", pThis->szPrf));
7366 e1kDumpState(pThis);
7367 }
7368 return VINF_SUCCESS;
7369}
7370
7371/**
7372 * @callback_method_impl{FNSSMDEVLOADDONE, Link status adjustments after loading.}
7373 */
7374static DECLCALLBACK(int) e1kR3LoadDone(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
7375{
7376 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
7377 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
7378 RT_NOREF(pSSM);
7379
7380 /* Update promiscuous mode */
7381 if (pThisCC->pDrvR3)
7382 pThisCC->pDrvR3->pfnSetPromiscuousMode(pThisCC->pDrvR3, !!(RCTL & (RCTL_UPE | RCTL_MPE)));
7383
7384 /*
7385 * Force the link down here, since PDMNETWORKLINKSTATE_DOWN_RESUME is never
7386 * passed to us. We go through all this stuff if the link was up and we
7387 * wasn't teleported.
7388 */
7389 if ( (STATUS & STATUS_LU)
7390 && !PDMDevHlpVMTeleportedAndNotFullyResumedYet(pDevIns)
7391 && pThis->cMsLinkUpDelay)
7392 {
7393 e1kR3LinkDownTemp(pDevIns, pThis, pThisCC);
7394 }
7395 return VINF_SUCCESS;
7396}
7397
7398
7399
7400/* -=-=-=-=- Debug Info + Log Types -=-=-=-=- */
7401
7402/**
7403 * @callback_method_impl{FNRTSTRFORMATTYPE}
7404 */
7405static DECLCALLBACK(size_t) e1kR3FmtRxDesc(PFNRTSTROUTPUT pfnOutput,
7406 void *pvArgOutput,
7407 const char *pszType,
7408 void const *pvValue,
7409 int cchWidth,
7410 int cchPrecision,
7411 unsigned fFlags,
7412 void *pvUser)
7413{
7414 RT_NOREF(cchWidth, cchPrecision, fFlags, pvUser);
7415 AssertReturn(strcmp(pszType, "e1krxd") == 0, 0);
7416 E1KRXDESC* pDesc = (E1KRXDESC*)pvValue;
7417 if (!pDesc)
7418 return RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "NULL_RXD");
7419
7420 size_t cbPrintf = 0;
7421 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Address=%16LX Length=%04X Csum=%04X\n",
7422 pDesc->u64BufAddr, pDesc->u16Length, pDesc->u16Checksum);
7423 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, " STA: %s %s %s %s %s %s %s ERR: %s %s %s %s SPECIAL: %s VLAN=%03x PRI=%x",
7424 pDesc->status.fPIF ? "PIF" : "pif",
7425 pDesc->status.fIPCS ? "IPCS" : "ipcs",
7426 pDesc->status.fTCPCS ? "TCPCS" : "tcpcs",
7427 pDesc->status.fVP ? "VP" : "vp",
7428 pDesc->status.fIXSM ? "IXSM" : "ixsm",
7429 pDesc->status.fEOP ? "EOP" : "eop",
7430 pDesc->status.fDD ? "DD" : "dd",
7431 pDesc->status.fRXE ? "RXE" : "rxe",
7432 pDesc->status.fIPE ? "IPE" : "ipe",
7433 pDesc->status.fTCPE ? "TCPE" : "tcpe",
7434 pDesc->status.fCE ? "CE" : "ce",
7435 E1K_SPEC_CFI(pDesc->status.u16Special) ? "CFI" :"cfi",
7436 E1K_SPEC_VLAN(pDesc->status.u16Special),
7437 E1K_SPEC_PRI(pDesc->status.u16Special));
7438 return cbPrintf;
7439}
7440
7441/**
7442 * @callback_method_impl{FNRTSTRFORMATTYPE}
7443 */
7444static DECLCALLBACK(size_t) e1kR3FmtTxDesc(PFNRTSTROUTPUT pfnOutput,
7445 void *pvArgOutput,
7446 const char *pszType,
7447 void const *pvValue,
7448 int cchWidth,
7449 int cchPrecision,
7450 unsigned fFlags,
7451 void *pvUser)
7452{
7453 RT_NOREF(cchWidth, cchPrecision, fFlags, pvUser);
7454 AssertReturn(strcmp(pszType, "e1ktxd") == 0, 0);
7455 E1KTXDESC *pDesc = (E1KTXDESC*)pvValue;
7456 if (!pDesc)
7457 return RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "NULL_TXD");
7458
7459 size_t cbPrintf = 0;
7460 switch (e1kGetDescType(pDesc))
7461 {
7462 case E1K_DTYP_CONTEXT:
7463 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Type=Context\n"
7464 " IPCSS=%02X IPCSO=%02X IPCSE=%04X TUCSS=%02X TUCSO=%02X TUCSE=%04X\n"
7465 " TUCMD:%s%s%s %s %s PAYLEN=%04x HDRLEN=%04x MSS=%04x STA: %s",
7466 pDesc->context.ip.u8CSS, pDesc->context.ip.u8CSO, pDesc->context.ip.u16CSE,
7467 pDesc->context.tu.u8CSS, pDesc->context.tu.u8CSO, pDesc->context.tu.u16CSE,
7468 pDesc->context.dw2.fIDE ? " IDE":"",
7469 pDesc->context.dw2.fRS ? " RS" :"",
7470 pDesc->context.dw2.fTSE ? " TSE":"",
7471 pDesc->context.dw2.fIP ? "IPv4":"IPv6",
7472 pDesc->context.dw2.fTCP ? "TCP":"UDP",
7473 pDesc->context.dw2.u20PAYLEN,
7474 pDesc->context.dw3.u8HDRLEN,
7475 pDesc->context.dw3.u16MSS,
7476 pDesc->context.dw3.fDD?"DD":"");
7477 break;
7478 case E1K_DTYP_DATA:
7479 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Type=Data Address=%16LX DTALEN=%05X\n"
7480 " DCMD:%s%s%s%s%s%s%s STA:%s%s%s POPTS:%s%s SPECIAL:%s VLAN=%03x PRI=%x",
7481 pDesc->data.u64BufAddr,
7482 pDesc->data.cmd.u20DTALEN,
7483 pDesc->data.cmd.fIDE ? " IDE" :"",
7484 pDesc->data.cmd.fVLE ? " VLE" :"",
7485 pDesc->data.cmd.fRPS ? " RPS" :"",
7486 pDesc->data.cmd.fRS ? " RS" :"",
7487 pDesc->data.cmd.fTSE ? " TSE" :"",
7488 pDesc->data.cmd.fIFCS? " IFCS":"",
7489 pDesc->data.cmd.fEOP ? " EOP" :"",
7490 pDesc->data.dw3.fDD ? " DD" :"",
7491 pDesc->data.dw3.fEC ? " EC" :"",
7492 pDesc->data.dw3.fLC ? " LC" :"",
7493 pDesc->data.dw3.fTXSM? " TXSM":"",
7494 pDesc->data.dw3.fIXSM? " IXSM":"",
7495 E1K_SPEC_CFI(pDesc->data.dw3.u16Special) ? "CFI" :"cfi",
7496 E1K_SPEC_VLAN(pDesc->data.dw3.u16Special),
7497 E1K_SPEC_PRI(pDesc->data.dw3.u16Special));
7498 break;
7499 case E1K_DTYP_LEGACY:
7500 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Type=Legacy Address=%16LX DTALEN=%05X\n"
7501 " CMD:%s%s%s%s%s%s%s STA:%s%s%s CSO=%02x CSS=%02x SPECIAL:%s VLAN=%03x PRI=%x",
7502 pDesc->data.u64BufAddr,
7503 pDesc->legacy.cmd.u16Length,
7504 pDesc->legacy.cmd.fIDE ? " IDE" :"",
7505 pDesc->legacy.cmd.fVLE ? " VLE" :"",
7506 pDesc->legacy.cmd.fRPS ? " RPS" :"",
7507 pDesc->legacy.cmd.fRS ? " RS" :"",
7508 pDesc->legacy.cmd.fIC ? " IC" :"",
7509 pDesc->legacy.cmd.fIFCS? " IFCS":"",
7510 pDesc->legacy.cmd.fEOP ? " EOP" :"",
7511 pDesc->legacy.dw3.fDD ? " DD" :"",
7512 pDesc->legacy.dw3.fEC ? " EC" :"",
7513 pDesc->legacy.dw3.fLC ? " LC" :"",
7514 pDesc->legacy.cmd.u8CSO,
7515 pDesc->legacy.dw3.u8CSS,
7516 E1K_SPEC_CFI(pDesc->legacy.dw3.u16Special) ? "CFI" :"cfi",
7517 E1K_SPEC_VLAN(pDesc->legacy.dw3.u16Special),
7518 E1K_SPEC_PRI(pDesc->legacy.dw3.u16Special));
7519 break;
7520 default:
7521 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Invalid Transmit Descriptor");
7522 break;
7523 }
7524
7525 return cbPrintf;
7526}
7527
7528/** Initializes debug helpers (logging format types). */
7529static int e1kR3InitDebugHelpers(void)
7530{
7531 int rc = VINF_SUCCESS;
7532 static bool s_fHelpersRegistered = false;
7533 if (!s_fHelpersRegistered)
7534 {
7535 s_fHelpersRegistered = true;
7536 rc = RTStrFormatTypeRegister("e1krxd", e1kR3FmtRxDesc, NULL);
7537 AssertRCReturn(rc, rc);
7538 rc = RTStrFormatTypeRegister("e1ktxd", e1kR3FmtTxDesc, NULL);
7539 AssertRCReturn(rc, rc);
7540 }
7541 return rc;
7542}
7543
7544/**
7545 * Status info callback.
7546 *
7547 * @param pDevIns The device instance.
7548 * @param pHlp The output helpers.
7549 * @param pszArgs The arguments.
7550 */
7551static DECLCALLBACK(void) e1kR3Info(PPDMDEVINS pDevIns, PCDBGFINFOHLP pHlp, const char *pszArgs)
7552{
7553 RT_NOREF(pszArgs);
7554 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
7555 unsigned i;
7556 // bool fRcvRing = false;
7557 // bool fXmtRing = false;
7558
7559 /*
7560 * Parse args.
7561 if (pszArgs)
7562 {
7563 fRcvRing = strstr(pszArgs, "verbose") || strstr(pszArgs, "rcv");
7564 fXmtRing = strstr(pszArgs, "verbose") || strstr(pszArgs, "xmt");
7565 }
7566 */
7567
7568 /*
7569 * Show info.
7570 */
7571 pHlp->pfnPrintf(pHlp, "E1000 #%d: port=%04x mmio=%RGp mac-cfg=%RTmac %s%s%s\n",
7572 pDevIns->iInstance,
7573 PDMDevHlpIoPortGetMappingAddress(pDevIns, pThis->hIoPorts),
7574 PDMDevHlpMmioGetMappingAddress(pDevIns, pThis->hMmioRegion),
7575 &pThis->macConfigured, g_aChips[pThis->eChip].pcszName,
7576 pDevIns->fRCEnabled ? " RC" : "", pDevIns->fR0Enabled ? " R0" : "");
7577
7578 e1kR3CsEnterAsserted(pThis); /* Not sure why but PCNet does it */
7579
7580 for (i = 0; i < E1K_NUM_OF_32BIT_REGS; ++i)
7581 pHlp->pfnPrintf(pHlp, "%8.8s = %08x\n", g_aE1kRegMap[i].abbrev, pThis->auRegs[i]);
7582
7583 for (i = 0; i < RT_ELEMENTS(pThis->aRecAddr.array); i++)
7584 {
7585 E1KRAELEM* ra = pThis->aRecAddr.array + i;
7586 if (ra->ctl & RA_CTL_AV)
7587 {
7588 const char *pcszTmp;
7589 switch (ra->ctl & RA_CTL_AS)
7590 {
7591 case 0: pcszTmp = "DST"; break;
7592 case 1: pcszTmp = "SRC"; break;
7593 default: pcszTmp = "reserved";
7594 }
7595 pHlp->pfnPrintf(pHlp, "RA%02d: %s %RTmac\n", i, pcszTmp, ra->addr);
7596 }
7597 }
7598 unsigned cDescs = RDLEN / sizeof(E1KRXDESC);
7599 uint32_t rdh = RDH;
7600 pHlp->pfnPrintf(pHlp, "\n-- Receive Descriptors (%d total) --\n", cDescs);
7601 for (i = 0; i < cDescs; ++i)
7602 {
7603 E1KRXDESC desc;
7604 PDMDevHlpPCIPhysRead(pDevIns, e1kDescAddr(RDBAH, RDBAL, i),
7605 &desc, sizeof(desc));
7606 if (i == rdh)
7607 pHlp->pfnPrintf(pHlp, ">>> ");
7608 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1krxd]\n", e1kDescAddr(RDBAH, RDBAL, i), &desc);
7609 }
7610#ifdef E1K_WITH_RXD_CACHE
7611 pHlp->pfnPrintf(pHlp, "\n-- Receive Descriptors in Cache (at %d (RDH %d)/ fetched %d / max %d) --\n",
7612 pThis->iRxDCurrent, RDH, pThis->nRxDFetched, E1K_RXD_CACHE_SIZE);
7613 if (rdh > pThis->iRxDCurrent)
7614 rdh -= pThis->iRxDCurrent;
7615 else
7616 rdh = cDescs + rdh - pThis->iRxDCurrent;
7617 for (i = 0; i < pThis->nRxDFetched; ++i)
7618 {
7619 if (i == pThis->iRxDCurrent)
7620 pHlp->pfnPrintf(pHlp, ">>> ");
7621 if (cDescs)
7622 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1krxd]\n",
7623 e1kDescAddr(RDBAH, RDBAL, rdh++ % cDescs),
7624 &pThis->aRxDescriptors[i]);
7625 else
7626 pHlp->pfnPrintf(pHlp, "<lost>: %R[e1krxd]\n",
7627 &pThis->aRxDescriptors[i]);
7628 }
7629#endif /* E1K_WITH_RXD_CACHE */
7630
7631 cDescs = TDLEN / sizeof(E1KTXDESC);
7632 uint32_t tdh = TDH;
7633 pHlp->pfnPrintf(pHlp, "\n-- Transmit Descriptors (%d total) --\n", cDescs);
7634 for (i = 0; i < cDescs; ++i)
7635 {
7636 E1KTXDESC desc;
7637 PDMDevHlpPCIPhysRead(pDevIns, e1kDescAddr(TDBAH, TDBAL, i),
7638 &desc, sizeof(desc));
7639 if (i == tdh)
7640 pHlp->pfnPrintf(pHlp, ">>> ");
7641 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1ktxd]\n", e1kDescAddr(TDBAH, TDBAL, i), &desc);
7642 }
7643#ifdef E1K_WITH_TXD_CACHE
7644 pHlp->pfnPrintf(pHlp, "\n-- Transmit Descriptors in Cache (at %d (TDH %d)/ fetched %d / max %d) --\n",
7645 pThis->iTxDCurrent, TDH, pThis->nTxDFetched, E1K_TXD_CACHE_SIZE);
7646 if (tdh > pThis->iTxDCurrent)
7647 tdh -= pThis->iTxDCurrent;
7648 else
7649 tdh = cDescs + tdh - pThis->iTxDCurrent;
7650 for (i = 0; i < pThis->nTxDFetched; ++i)
7651 {
7652 if (i == pThis->iTxDCurrent)
7653 pHlp->pfnPrintf(pHlp, ">>> ");
7654 if (cDescs)
7655 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1ktxd]\n",
7656 e1kDescAddr(TDBAH, TDBAL, tdh++ % cDescs),
7657 &pThis->aTxDescriptors[i]);
7658 else
7659 pHlp->pfnPrintf(pHlp, "<lost>: %R[e1ktxd]\n",
7660 &pThis->aTxDescriptors[i]);
7661 }
7662#endif /* E1K_WITH_TXD_CACHE */
7663
7664
7665#ifdef E1K_INT_STATS
7666 pHlp->pfnPrintf(pHlp, "Interrupt attempts: %d\n", pThis->uStatIntTry);
7667 pHlp->pfnPrintf(pHlp, "Interrupts raised : %d\n", pThis->uStatInt);
7668 pHlp->pfnPrintf(pHlp, "Interrupts lowered: %d\n", pThis->uStatIntLower);
7669 pHlp->pfnPrintf(pHlp, "ICR outside ISR : %d\n", pThis->uStatNoIntICR);
7670 pHlp->pfnPrintf(pHlp, "IMS raised ints : %d\n", pThis->uStatIntIMS);
7671 pHlp->pfnPrintf(pHlp, "Interrupts skipped: %d\n", pThis->uStatIntSkip);
7672 pHlp->pfnPrintf(pHlp, "Masked interrupts : %d\n", pThis->uStatIntMasked);
7673 pHlp->pfnPrintf(pHlp, "Early interrupts : %d\n", pThis->uStatIntEarly);
7674 pHlp->pfnPrintf(pHlp, "Late interrupts : %d\n", pThis->uStatIntLate);
7675 pHlp->pfnPrintf(pHlp, "Lost interrupts : %d\n", pThis->iStatIntLost);
7676 pHlp->pfnPrintf(pHlp, "Interrupts by RX : %d\n", pThis->uStatIntRx);
7677 pHlp->pfnPrintf(pHlp, "Interrupts by TX : %d\n", pThis->uStatIntTx);
7678 pHlp->pfnPrintf(pHlp, "Interrupts by ICS : %d\n", pThis->uStatIntICS);
7679 pHlp->pfnPrintf(pHlp, "Interrupts by RDTR: %d\n", pThis->uStatIntRDTR);
7680 pHlp->pfnPrintf(pHlp, "Interrupts by RDMT: %d\n", pThis->uStatIntRXDMT0);
7681 pHlp->pfnPrintf(pHlp, "Interrupts by TXQE: %d\n", pThis->uStatIntTXQE);
7682 pHlp->pfnPrintf(pHlp, "TX int delay asked: %d\n", pThis->uStatTxIDE);
7683 pHlp->pfnPrintf(pHlp, "TX delayed: %d\n", pThis->uStatTxDelayed);
7684 pHlp->pfnPrintf(pHlp, "TX delayed expired: %d\n", pThis->uStatTxDelayExp);
7685 pHlp->pfnPrintf(pHlp, "TX no report asked: %d\n", pThis->uStatTxNoRS);
7686 pHlp->pfnPrintf(pHlp, "TX abs timer expd : %d\n", pThis->uStatTAD);
7687 pHlp->pfnPrintf(pHlp, "TX int timer expd : %d\n", pThis->uStatTID);
7688 pHlp->pfnPrintf(pHlp, "RX abs timer expd : %d\n", pThis->uStatRAD);
7689 pHlp->pfnPrintf(pHlp, "RX int timer expd : %d\n", pThis->uStatRID);
7690 pHlp->pfnPrintf(pHlp, "TX CTX descriptors: %d\n", pThis->uStatDescCtx);
7691 pHlp->pfnPrintf(pHlp, "TX DAT descriptors: %d\n", pThis->uStatDescDat);
7692 pHlp->pfnPrintf(pHlp, "TX LEG descriptors: %d\n", pThis->uStatDescLeg);
7693 pHlp->pfnPrintf(pHlp, "Received frames : %d\n", pThis->uStatRxFrm);
7694 pHlp->pfnPrintf(pHlp, "Transmitted frames: %d\n", pThis->uStatTxFrm);
7695 pHlp->pfnPrintf(pHlp, "TX frames up to 1514: %d\n", pThis->uStatTx1514);
7696 pHlp->pfnPrintf(pHlp, "TX frames up to 2962: %d\n", pThis->uStatTx2962);
7697 pHlp->pfnPrintf(pHlp, "TX frames up to 4410: %d\n", pThis->uStatTx4410);
7698 pHlp->pfnPrintf(pHlp, "TX frames up to 5858: %d\n", pThis->uStatTx5858);
7699 pHlp->pfnPrintf(pHlp, "TX frames up to 7306: %d\n", pThis->uStatTx7306);
7700 pHlp->pfnPrintf(pHlp, "TX frames up to 8754: %d\n", pThis->uStatTx8754);
7701 pHlp->pfnPrintf(pHlp, "TX frames up to 16384: %d\n", pThis->uStatTx16384);
7702 pHlp->pfnPrintf(pHlp, "TX frames up to 32768: %d\n", pThis->uStatTx32768);
7703 pHlp->pfnPrintf(pHlp, "Larger TX frames : %d\n", pThis->uStatTxLarge);
7704#endif /* E1K_INT_STATS */
7705
7706 e1kCsLeave(pThis);
7707}
7708
7709
7710
7711/* -=-=-=-=- PDMDEVREG -=-=-=-=- */
7712
7713/**
7714 * Detach notification.
7715 *
7716 * One port on the network card has been disconnected from the network.
7717 *
7718 * @param pDevIns The device instance.
7719 * @param iLUN The logical unit which is being detached.
7720 * @param fFlags Flags, combination of the PDMDEVATT_FLAGS_* \#defines.
7721 */
7722static DECLCALLBACK(void) e1kR3Detach(PPDMDEVINS pDevIns, unsigned iLUN, uint32_t fFlags)
7723{
7724 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
7725 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
7726 Log(("%s e1kR3Detach:\n", pThis->szPrf));
7727 RT_NOREF(fFlags);
7728
7729 AssertLogRelReturnVoid(iLUN == 0);
7730
7731 e1kR3CsEnterAsserted(pThis);
7732
7733 /* Mark device as detached. */
7734 pThis->fIsAttached = false;
7735 /*
7736 * Zero some important members.
7737 */
7738 pThisCC->pDrvBase = NULL;
7739 pThisCC->pDrvR3 = NULL;
7740#if 0 /** @todo @bugref{9218} ring-0 driver stuff */
7741 pThisR0->pDrvR0 = NIL_RTR0PTR;
7742 pThisRC->pDrvRC = NIL_RTRCPTR;
7743#endif
7744
7745 PDMDevHlpCritSectLeave(pDevIns, &pThis->cs);
7746}
7747
7748/**
7749 * Attach the Network attachment.
7750 *
7751 * One port on the network card has been connected to a network.
7752 *
7753 * @returns VBox status code.
7754 * @param pDevIns The device instance.
7755 * @param iLUN The logical unit which is being attached.
7756 * @param fFlags Flags, combination of the PDMDEVATT_FLAGS_* \#defines.
7757 *
7758 * @remarks This code path is not used during construction.
7759 */
7760static DECLCALLBACK(int) e1kR3Attach(PPDMDEVINS pDevIns, unsigned iLUN, uint32_t fFlags)
7761{
7762 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
7763 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
7764 LogFlow(("%s e1kR3Attach:\n", pThis->szPrf));
7765 RT_NOREF(fFlags);
7766
7767 AssertLogRelReturn(iLUN == 0, VERR_PDM_NO_SUCH_LUN);
7768
7769 e1kR3CsEnterAsserted(pThis);
7770
7771 /*
7772 * Attach the driver.
7773 */
7774 int rc = PDMDevHlpDriverAttach(pDevIns, 0, &pThisCC->IBase, &pThisCC->pDrvBase, "Network Port");
7775 if (RT_SUCCESS(rc))
7776 {
7777 pThisCC->pDrvR3 = PDMIBASE_QUERY_INTERFACE(pThisCC->pDrvBase, PDMINETWORKUP);
7778 AssertMsgStmt(pThisCC->pDrvR3, ("Failed to obtain the PDMINETWORKUP interface!\n"),
7779 rc = VERR_PDM_MISSING_INTERFACE_BELOW);
7780 if (RT_SUCCESS(rc))
7781 {
7782#if 0 /** @todo @bugref{9218} ring-0 driver stuff */
7783 pThisR0->pDrvR0 = PDMIBASER0_QUERY_INTERFACE(PDMIBASE_QUERY_INTERFACE(pThisCC->pDrvBase, PDMIBASER0), PDMINETWORKUP);
7784 pThisRC->pDrvRC = PDMIBASERC_QUERY_INTERFACE(PDMIBASE_QUERY_INTERFACE(pThisCC->pDrvBase, PDMIBASERC), PDMINETWORKUP);
7785#endif
7786 /* Mark device as attached. */
7787 pThis->fIsAttached = true;
7788 }
7789 }
7790 else if ( rc == VERR_PDM_NO_ATTACHED_DRIVER
7791 || rc == VERR_PDM_CFG_MISSING_DRIVER_NAME)
7792 {
7793 /* This should never happen because this function is not called
7794 * if there is no driver to attach! */
7795 Log(("%s No attached driver!\n", pThis->szPrf));
7796 }
7797
7798 /*
7799 * Temporary set the link down if it was up so that the guest will know
7800 * that we have change the configuration of the network card
7801 */
7802 if ((STATUS & STATUS_LU) && RT_SUCCESS(rc))
7803 e1kR3LinkDownTemp(pDevIns, pThis, pThisCC);
7804
7805 PDMDevHlpCritSectLeave(pDevIns, &pThis->cs);
7806 return rc;
7807}
7808
7809/**
7810 * @copydoc FNPDMDEVPOWEROFF
7811 */
7812static DECLCALLBACK(void) e1kR3PowerOff(PPDMDEVINS pDevIns)
7813{
7814 /* Poke thread waiting for buffer space. */
7815 e1kWakeupReceive(pDevIns, PDMDEVINS_2_DATA(pDevIns, PE1KSTATE));
7816}
7817
7818/**
7819 * @copydoc FNPDMDEVRESET
7820 */
7821static DECLCALLBACK(void) e1kR3Reset(PPDMDEVINS pDevIns)
7822{
7823 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
7824 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
7825#ifdef E1K_TX_DELAY
7826 e1kCancelTimer(pDevIns, pThis, pThis->hTXDTimer);
7827#endif /* E1K_TX_DELAY */
7828 e1kCancelTimer(pDevIns, pThis, pThis->hIntTimer);
7829 e1kCancelTimer(pDevIns, pThis, pThis->hLUTimer);
7830 e1kXmitFreeBuf(pThis, pThisCC);
7831 pThis->u16TxPktLen = 0;
7832 pThis->fIPcsum = false;
7833 pThis->fTCPcsum = false;
7834 pThis->fIntMaskUsed = false;
7835 pThis->fDelayInts = false;
7836 pThis->fLocked = false;
7837 pThis->u64AckedAt = 0;
7838 e1kR3HardReset(pDevIns, pThis, pThisCC);
7839}
7840
7841/**
7842 * @copydoc FNPDMDEVSUSPEND
7843 */
7844static DECLCALLBACK(void) e1kR3Suspend(PPDMDEVINS pDevIns)
7845{
7846 /* Poke thread waiting for buffer space. */
7847 e1kWakeupReceive(pDevIns, PDMDEVINS_2_DATA(pDevIns, PE1KSTATE));
7848}
7849
7850/**
7851 * Device relocation callback.
7852 *
7853 * When this callback is called the device instance data, and if the
7854 * device have a GC component, is being relocated, or/and the selectors
7855 * have been changed. The device must use the chance to perform the
7856 * necessary pointer relocations and data updates.
7857 *
7858 * Before the GC code is executed the first time, this function will be
7859 * called with a 0 delta so GC pointer calculations can be one in one place.
7860 *
7861 * @param pDevIns Pointer to the device instance.
7862 * @param offDelta The relocation delta relative to the old location.
7863 *
7864 * @remark A relocation CANNOT fail.
7865 */
7866static DECLCALLBACK(void) e1kR3Relocate(PPDMDEVINS pDevIns, RTGCINTPTR offDelta)
7867{
7868 PE1KSTATERC pThisRC = PDMINS_2_DATA_RC(pDevIns, PE1KSTATERC);
7869 if (pThisRC)
7870 pThisRC->pDevInsRC = PDMDEVINS_2_RCPTR(pDevIns);
7871 RT_NOREF(offDelta);
7872}
7873
7874/**
7875 * Destruct a device instance.
7876 *
7877 * We need to free non-VM resources only.
7878 *
7879 * @returns VBox status code.
7880 * @param pDevIns The device instance data.
7881 * @thread EMT
7882 */
7883static DECLCALLBACK(int) e1kR3Destruct(PPDMDEVINS pDevIns)
7884{
7885 PDMDEV_CHECK_VERSIONS_RETURN_QUIET(pDevIns);
7886 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
7887
7888 e1kDumpState(pThis);
7889 E1kLog(("%s Destroying instance\n", pThis->szPrf));
7890 if (PDMDevHlpCritSectIsInitialized(pDevIns, &pThis->cs))
7891 {
7892 if (pThis->hEventMoreRxDescAvail != NIL_SUPSEMEVENT)
7893 {
7894 PDMDevHlpSUPSemEventSignal(pDevIns, pThis->hEventMoreRxDescAvail);
7895 RTThreadYield();
7896 PDMDevHlpSUPSemEventClose(pDevIns, pThis->hEventMoreRxDescAvail);
7897 pThis->hEventMoreRxDescAvail = NIL_SUPSEMEVENT;
7898 }
7899#ifdef E1K_WITH_TX_CS
7900 PDMDevHlpCritSectDelete(pDevIns, &pThis->csTx);
7901#endif /* E1K_WITH_TX_CS */
7902 PDMDevHlpCritSectDelete(pDevIns, &pThis->csRx);
7903 PDMDevHlpCritSectDelete(pDevIns, &pThis->cs);
7904 }
7905 return VINF_SUCCESS;
7906}
7907
7908
7909/**
7910 * Set PCI configuration space registers.
7911 *
7912 * @param pci Reference to PCI device structure.
7913 * @thread EMT
7914 */
7915static void e1kR3ConfigurePciDev(PPDMPCIDEV pPciDev, E1KCHIP eChip)
7916{
7917 Assert(eChip < RT_ELEMENTS(g_aChips));
7918 /* Configure PCI Device, assume 32-bit mode ******************************/
7919 PDMPciDevSetVendorId(pPciDev, g_aChips[eChip].uPCIVendorId);
7920 PDMPciDevSetDeviceId(pPciDev, g_aChips[eChip].uPCIDeviceId);
7921 PDMPciDevSetWord( pPciDev, VBOX_PCI_SUBSYSTEM_VENDOR_ID, g_aChips[eChip].uPCISubsystemVendorId);
7922 PDMPciDevSetWord( pPciDev, VBOX_PCI_SUBSYSTEM_ID, g_aChips[eChip].uPCISubsystemId);
7923
7924 PDMPciDevSetWord( pPciDev, VBOX_PCI_COMMAND, 0x0000);
7925 /* DEVSEL Timing (medium device), 66 MHz Capable, New capabilities */
7926 PDMPciDevSetWord( pPciDev, VBOX_PCI_STATUS,
7927 VBOX_PCI_STATUS_DEVSEL_MEDIUM | VBOX_PCI_STATUS_CAP_LIST | VBOX_PCI_STATUS_66MHZ);
7928 /* Stepping A2 */
7929 PDMPciDevSetByte( pPciDev, VBOX_PCI_REVISION_ID, 0x02);
7930 /* Ethernet adapter */
7931 PDMPciDevSetByte( pPciDev, VBOX_PCI_CLASS_PROG, 0x00);
7932 PDMPciDevSetWord( pPciDev, VBOX_PCI_CLASS_DEVICE, 0x0200);
7933 /* normal single function Ethernet controller */
7934 PDMPciDevSetByte( pPciDev, VBOX_PCI_HEADER_TYPE, 0x00);
7935 /* Memory Register Base Address */
7936 PDMPciDevSetDWord(pPciDev, VBOX_PCI_BASE_ADDRESS_0, 0x00000000);
7937 /* Memory Flash Base Address */
7938 PDMPciDevSetDWord(pPciDev, VBOX_PCI_BASE_ADDRESS_1, 0x00000000);
7939 /* IO Register Base Address */
7940 PDMPciDevSetDWord(pPciDev, VBOX_PCI_BASE_ADDRESS_2, 0x00000001);
7941 /* Expansion ROM Base Address */
7942 PDMPciDevSetDWord(pPciDev, VBOX_PCI_ROM_ADDRESS, 0x00000000);
7943 /* Capabilities Pointer */
7944 PDMPciDevSetByte( pPciDev, VBOX_PCI_CAPABILITY_LIST, 0xDC);
7945 /* Interrupt Pin: INTA# */
7946 PDMPciDevSetByte( pPciDev, VBOX_PCI_INTERRUPT_PIN, 0x01);
7947 /* Max_Lat/Min_Gnt: very high priority and time slice */
7948 PDMPciDevSetByte( pPciDev, VBOX_PCI_MIN_GNT, 0xFF);
7949 PDMPciDevSetByte( pPciDev, VBOX_PCI_MAX_LAT, 0x00);
7950
7951 /* PCI Power Management Registers ****************************************/
7952 /* Capability ID: PCI Power Management Registers */
7953 PDMPciDevSetByte( pPciDev, 0xDC, VBOX_PCI_CAP_ID_PM);
7954 /* Next Item Pointer: PCI-X */
7955 PDMPciDevSetByte( pPciDev, 0xDC + 1, 0xE4);
7956 /* Power Management Capabilities: PM disabled, DSI */
7957 PDMPciDevSetWord( pPciDev, 0xDC + 2,
7958 0x0002 | VBOX_PCI_PM_CAP_DSI);
7959 /* Power Management Control / Status Register: PM disabled */
7960 PDMPciDevSetWord( pPciDev, 0xDC + 4, 0x0000);
7961 /* PMCSR_BSE Bridge Support Extensions: Not supported */
7962 PDMPciDevSetByte( pPciDev, 0xDC + 6, 0x00);
7963 /* Data Register: PM disabled, always 0 */
7964 PDMPciDevSetByte( pPciDev, 0xDC + 7, 0x00);
7965
7966 /* PCI-X Configuration Registers *****************************************/
7967 /* Capability ID: PCI-X Configuration Registers */
7968 PDMPciDevSetByte( pPciDev, 0xE4, VBOX_PCI_CAP_ID_PCIX);
7969#ifdef E1K_WITH_MSI
7970 PDMPciDevSetByte( pPciDev, 0xE4 + 1, 0x80);
7971#else
7972 /* Next Item Pointer: None (Message Signalled Interrupts are disabled) */
7973 PDMPciDevSetByte( pPciDev, 0xE4 + 1, 0x00);
7974#endif
7975 /* PCI-X Command: Enable Relaxed Ordering */
7976 PDMPciDevSetWord( pPciDev, 0xE4 + 2, VBOX_PCI_X_CMD_ERO);
7977 /* PCI-X Status: 32-bit, 66MHz*/
7978 /** @todo is this value really correct? fff8 doesn't look like actual PCI address */
7979 PDMPciDevSetDWord(pPciDev, 0xE4 + 4, 0x0040FFF8);
7980}
7981
7982/**
7983 * @interface_method_impl{PDMDEVREG,pfnConstruct}
7984 */
7985static DECLCALLBACK(int) e1kR3Construct(PPDMDEVINS pDevIns, int iInstance, PCFGMNODE pCfg)
7986{
7987 PDMDEV_CHECK_VERSIONS_RETURN(pDevIns);
7988 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
7989 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
7990 int rc;
7991
7992 /*
7993 * Initialize the instance data (state).
7994 * Note! Caller has initialized it to ZERO already.
7995 */
7996 RTStrPrintf(pThis->szPrf, sizeof(pThis->szPrf), "E1000#%d", iInstance);
7997 E1kLog(("%s Constructing new instance sizeof(E1KRXDESC)=%d\n", pThis->szPrf, sizeof(E1KRXDESC)));
7998 pThis->hEventMoreRxDescAvail = NIL_SUPSEMEVENT;
7999 pThis->u16TxPktLen = 0;
8000 pThis->fIPcsum = false;
8001 pThis->fTCPcsum = false;
8002 pThis->fIntMaskUsed = false;
8003 pThis->fDelayInts = false;
8004 pThis->fLocked = false;
8005 pThis->u64AckedAt = 0;
8006 pThis->led.u32Magic = PDMLED_MAGIC;
8007 pThis->u32PktNo = 1;
8008 pThis->fIsAttached = false;
8009
8010 pThisCC->pDevInsR3 = pDevIns;
8011 pThisCC->pShared = pThis;
8012
8013 /* Interfaces */
8014 pThisCC->IBase.pfnQueryInterface = e1kR3QueryInterface;
8015
8016 pThisCC->INetworkDown.pfnWaitReceiveAvail = e1kR3NetworkDown_WaitReceiveAvail;
8017 pThisCC->INetworkDown.pfnReceive = e1kR3NetworkDown_Receive;
8018 pThisCC->INetworkDown.pfnXmitPending = e1kR3NetworkDown_XmitPending;
8019
8020 pThisCC->ILeds.pfnQueryStatusLed = e1kR3QueryStatusLed;
8021
8022 pThisCC->INetworkConfig.pfnGetMac = e1kR3GetMac;
8023 pThisCC->INetworkConfig.pfnGetLinkState = e1kR3GetLinkState;
8024 pThisCC->INetworkConfig.pfnSetLinkState = e1kR3SetLinkState;
8025
8026 /*
8027 * Internal validations.
8028 */
8029 for (uint32_t iReg = 1; iReg < E1K_NUM_OF_BINARY_SEARCHABLE; iReg++)
8030 AssertLogRelMsgReturn( g_aE1kRegMap[iReg].offset > g_aE1kRegMap[iReg - 1].offset
8031 && g_aE1kRegMap[iReg].offset + g_aE1kRegMap[iReg].size
8032 >= g_aE1kRegMap[iReg - 1].offset + g_aE1kRegMap[iReg - 1].size,
8033 ("%s@%#xLB%#x vs %s@%#xLB%#x\n",
8034 g_aE1kRegMap[iReg].abbrev, g_aE1kRegMap[iReg].offset, g_aE1kRegMap[iReg].size,
8035 g_aE1kRegMap[iReg - 1].abbrev, g_aE1kRegMap[iReg - 1].offset, g_aE1kRegMap[iReg - 1].size),
8036 VERR_INTERNAL_ERROR_4);
8037
8038 /*
8039 * Validate configuration.
8040 */
8041 PDMDEV_VALIDATE_CONFIG_RETURN(pDevIns,
8042 "MAC|"
8043 "CableConnected|"
8044 "AdapterType|"
8045 "LineSpeed|"
8046 "ItrEnabled|"
8047 "ItrRxEnabled|"
8048 "EthernetCRC|"
8049 "GSOEnabled|"
8050 "LinkUpDelay|"
8051 "StatNo",
8052 "");
8053
8054 /** @todo LineSpeed unused! */
8055
8056 /*
8057 * Get config params
8058 */
8059 PCPDMDEVHLPR3 pHlp = pDevIns->pHlpR3;
8060 rc = pHlp->pfnCFGMQueryBytes(pCfg, "MAC", pThis->macConfigured.au8, sizeof(pThis->macConfigured.au8));
8061 if (RT_FAILURE(rc))
8062 return PDMDEV_SET_ERROR(pDevIns, rc,
8063 N_("Configuration error: Failed to get MAC address"));
8064 rc = pHlp->pfnCFGMQueryBool(pCfg, "CableConnected", &pThis->fCableConnected);
8065 if (RT_FAILURE(rc))
8066 return PDMDEV_SET_ERROR(pDevIns, rc,
8067 N_("Configuration error: Failed to get the value of 'CableConnected'"));
8068 rc = pHlp->pfnCFGMQueryU32(pCfg, "AdapterType", (uint32_t*)&pThis->eChip);
8069 if (RT_FAILURE(rc))
8070 return PDMDEV_SET_ERROR(pDevIns, rc,
8071 N_("Configuration error: Failed to get the value of 'AdapterType'"));
8072 Assert(pThis->eChip <= E1K_CHIP_82545EM);
8073
8074 rc = pHlp->pfnCFGMQueryBoolDef(pCfg, "EthernetCRC", &pThis->fEthernetCRC, true);
8075 if (RT_FAILURE(rc))
8076 return PDMDEV_SET_ERROR(pDevIns, rc,
8077 N_("Configuration error: Failed to get the value of 'EthernetCRC'"));
8078
8079 rc = pHlp->pfnCFGMQueryBoolDef(pCfg, "GSOEnabled", &pThis->fGSOEnabled, true);
8080 if (RT_FAILURE(rc))
8081 return PDMDEV_SET_ERROR(pDevIns, rc,
8082 N_("Configuration error: Failed to get the value of 'GSOEnabled'"));
8083
8084 rc = pHlp->pfnCFGMQueryBoolDef(pCfg, "ItrEnabled", &pThis->fItrEnabled, false);
8085 if (RT_FAILURE(rc))
8086 return PDMDEV_SET_ERROR(pDevIns, rc,
8087 N_("Configuration error: Failed to get the value of 'ItrEnabled'"));
8088
8089 rc = pHlp->pfnCFGMQueryBoolDef(pCfg, "ItrRxEnabled", &pThis->fItrRxEnabled, true);
8090 if (RT_FAILURE(rc))
8091 return PDMDEV_SET_ERROR(pDevIns, rc,
8092 N_("Configuration error: Failed to get the value of 'ItrRxEnabled'"));
8093
8094 rc = pHlp->pfnCFGMQueryBoolDef(pCfg, "TidEnabled", &pThis->fTidEnabled, false);
8095 if (RT_FAILURE(rc))
8096 return PDMDEV_SET_ERROR(pDevIns, rc,
8097 N_("Configuration error: Failed to get the value of 'TidEnabled'"));
8098
8099 /*
8100 * Increased the link up delay from 3 to 5 seconds to make sure a guest notices the link loss
8101 * and updates its network configuration when the link is restored. See @bugref{10114}.
8102 */
8103 rc = pHlp->pfnCFGMQueryU32Def(pCfg, "LinkUpDelay", (uint32_t*)&pThis->cMsLinkUpDelay, 5000); /* ms */
8104 if (RT_FAILURE(rc))
8105 return PDMDEV_SET_ERROR(pDevIns, rc,
8106 N_("Configuration error: Failed to get the value of 'LinkUpDelay'"));
8107 Assert(pThis->cMsLinkUpDelay <= 300000); /* less than 5 minutes */
8108 if (pThis->cMsLinkUpDelay > 5000)
8109 LogRel(("%s: WARNING! Link up delay is set to %u seconds!\n", pThis->szPrf, pThis->cMsLinkUpDelay / 1000));
8110 else if (pThis->cMsLinkUpDelay == 0)
8111 LogRel(("%s: WARNING! Link up delay is disabled!\n", pThis->szPrf));
8112
8113 uint32_t uStatNo = (uint32_t)iInstance;
8114 rc = pHlp->pfnCFGMQueryU32Def(pCfg, "StatNo", &uStatNo, (uint32_t)iInstance);
8115 if (RT_FAILURE(rc))
8116 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Configuration error: Failed to get the \"StatNo\" value"));
8117
8118 LogRel(("%s: Chip=%s LinkUpDelay=%ums EthernetCRC=%s GSO=%s Itr=%s ItrRx=%s TID=%s R0=%s RC=%s\n", pThis->szPrf,
8119 g_aChips[pThis->eChip].pcszName, pThis->cMsLinkUpDelay,
8120 pThis->fEthernetCRC ? "on" : "off",
8121 pThis->fGSOEnabled ? "enabled" : "disabled",
8122 pThis->fItrEnabled ? "enabled" : "disabled",
8123 pThis->fItrRxEnabled ? "enabled" : "disabled",
8124 pThis->fTidEnabled ? "enabled" : "disabled",
8125 pDevIns->fR0Enabled ? "enabled" : "disabled",
8126 pDevIns->fRCEnabled ? "enabled" : "disabled"));
8127
8128 /*
8129 * Initialize sub-components and register everything with the VMM.
8130 */
8131
8132 /* Initialize the EEPROM. */
8133 pThisCC->eeprom.init(pThis->macConfigured);
8134
8135 /* Initialize internal PHY. */
8136 Phy::init(&pThis->phy, iInstance, pThis->eChip == E1K_CHIP_82543GC ? PHY_EPID_M881000 : PHY_EPID_M881011);
8137
8138 /* Initialize critical sections. We do our own locking. */
8139 rc = PDMDevHlpSetDeviceCritSect(pDevIns, PDMDevHlpCritSectGetNop(pDevIns));
8140 AssertRCReturn(rc, rc);
8141
8142 rc = PDMDevHlpCritSectInit(pDevIns, &pThis->cs, RT_SRC_POS, "E1000#%d", iInstance);
8143 AssertRCReturn(rc, rc);
8144 rc = PDMDevHlpCritSectInit(pDevIns, &pThis->csRx, RT_SRC_POS, "E1000#%dRX", iInstance);
8145 AssertRCReturn(rc, rc);
8146#ifdef E1K_WITH_TX_CS
8147 rc = PDMDevHlpCritSectInit(pDevIns, &pThis->csTx, RT_SRC_POS, "E1000#%dTX", iInstance);
8148 AssertRCReturn(rc, rc);
8149#endif
8150
8151 /* Saved state registration. */
8152 rc = PDMDevHlpSSMRegisterEx(pDevIns, E1K_SAVEDSTATE_VERSION, sizeof(E1KSTATE), NULL,
8153 NULL, e1kR3LiveExec, NULL,
8154 e1kR3SavePrep, e1kR3SaveExec, NULL,
8155 e1kR3LoadPrep, e1kR3LoadExec, e1kR3LoadDone);
8156 AssertRCReturn(rc, rc);
8157
8158 /* Set PCI config registers and register ourselves with the PCI bus. */
8159 PDMPCIDEV_ASSERT_VALID(pDevIns, pDevIns->apPciDevs[0]);
8160 e1kR3ConfigurePciDev(pDevIns->apPciDevs[0], pThis->eChip);
8161 rc = PDMDevHlpPCIRegister(pDevIns, pDevIns->apPciDevs[0]);
8162 AssertRCReturn(rc, rc);
8163
8164#ifdef E1K_WITH_MSI
8165 PDMMSIREG MsiReg;
8166 RT_ZERO(MsiReg);
8167 MsiReg.cMsiVectors = 1;
8168 MsiReg.iMsiCapOffset = 0x80;
8169 MsiReg.iMsiNextOffset = 0x0;
8170 MsiReg.fMsi64bit = false;
8171 rc = PDMDevHlpPCIRegisterMsi(pDevIns, &MsiReg);
8172 AssertRCReturn(rc, rc);
8173#endif
8174
8175 /*
8176 * Map our registers to memory space (region 0, see e1kR3ConfigurePciDev)
8177 * From the spec (regarding flags):
8178 * For registers that should be accessed as 32-bit double words,
8179 * partial writes (less than a 32-bit double word) is ignored.
8180 * Partial reads return all 32 bits of data regardless of the
8181 * byte enables.
8182 */
8183 rc = PDMDevHlpMmioCreateEx(pDevIns, E1K_MM_SIZE, IOMMMIO_FLAGS_READ_DWORD | IOMMMIO_FLAGS_WRITE_ONLY_DWORD,
8184 pDevIns->apPciDevs[0], 0 /*iPciRegion*/,
8185 e1kMMIOWrite, e1kMMIORead, NULL /*pfnFill*/, NULL /*pvUser*/, "E1000", &pThis->hMmioRegion);
8186 AssertRCReturn(rc, rc);
8187 rc = PDMDevHlpPCIIORegionRegisterMmio(pDevIns, 0, E1K_MM_SIZE, PCI_ADDRESS_SPACE_MEM, pThis->hMmioRegion, NULL);
8188 AssertRCReturn(rc, rc);
8189
8190 /* Map our registers to IO space (region 2, see e1kR3ConfigurePciDev) */
8191 static IOMIOPORTDESC const s_aExtDescs[] =
8192 {
8193 { "IOADDR", "IOADDR", NULL, NULL }, { "unused", "unused", NULL, NULL }, { "unused", "unused", NULL, NULL }, { "unused", "unused", NULL, NULL },
8194 { "IODATA", "IODATA", NULL, NULL }, { "unused", "unused", NULL, NULL }, { "unused", "unused", NULL, NULL }, { "unused", "unused", NULL, NULL },
8195 { NULL, NULL, NULL, NULL }
8196 };
8197 rc = PDMDevHlpIoPortCreate(pDevIns, E1K_IOPORT_SIZE, pDevIns->apPciDevs[0], 2 /*iPciRegion*/,
8198 e1kIOPortOut, e1kIOPortIn, NULL /*pvUser*/, "E1000", s_aExtDescs, &pThis->hIoPorts);
8199 AssertRCReturn(rc, rc);
8200 rc = PDMDevHlpPCIIORegionRegisterIo(pDevIns, 2, E1K_IOPORT_SIZE, pThis->hIoPorts);
8201 AssertRCReturn(rc, rc);
8202
8203 /* Create transmit queue */
8204 rc = PDMDevHlpTaskCreate(pDevIns, PDMTASK_F_RZ, "E1000-Xmit", e1kR3TxTaskCallback, NULL, &pThis->hTxTask);
8205 AssertRCReturn(rc, rc);
8206
8207#ifdef E1K_TX_DELAY
8208 /* Create Transmit Delay Timer */
8209 rc = PDMDevHlpTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kR3TxDelayTimer, pThis,
8210 TMTIMER_FLAGS_NO_CRIT_SECT | TMTIMER_FLAGS_RING0, "E1000 Xmit Delay", &pThis->hTXDTimer);
8211 AssertRCReturn(rc, rc);
8212 rc = PDMDevHlpTimerSetCritSect(pDevIns, pThis->hTXDTimer, &pThis->csTx);
8213 AssertRCReturn(rc, rc);
8214#endif /* E1K_TX_DELAY */
8215
8216//#ifdef E1K_USE_TX_TIMERS
8217 if (pThis->fTidEnabled)
8218 {
8219 /* Create Transmit Interrupt Delay Timer */
8220 rc = PDMDevHlpTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kR3TxIntDelayTimer, pThis,
8221 TMTIMER_FLAGS_NO_CRIT_SECT | TMTIMER_FLAGS_RING0, "E1000 Xmit IRQ Delay", &pThis->hTIDTimer);
8222 AssertRCReturn(rc, rc);
8223
8224# ifndef E1K_NO_TAD
8225 /* Create Transmit Absolute Delay Timer */
8226 rc = PDMDevHlpTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kR3TxAbsDelayTimer, pThis,
8227 TMTIMER_FLAGS_NO_CRIT_SECT | TMTIMER_FLAGS_RING0, "E1000 Xmit Abs Delay", &pThis->hTADTimer);
8228 AssertRCReturn(rc, rc);
8229# endif /* E1K_NO_TAD */
8230 }
8231//#endif /* E1K_USE_TX_TIMERS */
8232
8233#ifdef E1K_USE_RX_TIMERS
8234 /* Create Receive Interrupt Delay Timer */
8235 rc = PDMDevHlpTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kR3RxIntDelayTimer, pThis,
8236 TMTIMER_FLAGS_NO_CRIT_SECT | TMTIMER_FLAGS_RING0, "E1000 Recv IRQ Delay", &pThis->hRIDTimer);
8237 AssertRCReturn(rc, rc);
8238
8239 /* Create Receive Absolute Delay Timer */
8240 rc = PDMDevHlpTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kR3RxAbsDelayTimer, pThis,
8241 TMTIMER_FLAGS_NO_CRIT_SECT | TMTIMER_FLAGS_RING0, "E1000 Recv Abs Delay", &pThis->hRADTimer);
8242 AssertRCReturn(rc, rc);
8243#endif /* E1K_USE_RX_TIMERS */
8244
8245 /* Create Late Interrupt Timer */
8246 rc = PDMDevHlpTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kR3LateIntTimer, pThis,
8247 TMTIMER_FLAGS_NO_CRIT_SECT | TMTIMER_FLAGS_RING0, "E1000 Late IRQ", &pThis->hIntTimer);
8248 AssertRCReturn(rc, rc);
8249
8250 /* Create Link Up Timer */
8251 rc = PDMDevHlpTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kR3LinkUpTimer, pThis,
8252 TMTIMER_FLAGS_NO_CRIT_SECT | TMTIMER_FLAGS_RING0, "E1000 Link Up", &pThis->hLUTimer);
8253 AssertRCReturn(rc, rc);
8254
8255 /* Register the info item */
8256 char szTmp[20];
8257 RTStrPrintf(szTmp, sizeof(szTmp), "e1k%d", iInstance);
8258 PDMDevHlpDBGFInfoRegister(pDevIns, szTmp, "E1000 info.", e1kR3Info);
8259
8260 /* Status driver */
8261 PPDMIBASE pBase;
8262 rc = PDMDevHlpDriverAttach(pDevIns, PDM_STATUS_LUN, &pThisCC->IBase, &pBase, "Status Port");
8263 if (RT_SUCCESS(rc))
8264 pThisCC->pLedsConnector = PDMIBASE_QUERY_INTERFACE(pBase, PDMILEDCONNECTORS);
8265 else if (rc == VERR_PDM_NO_ATTACHED_DRIVER)
8266 rc = VINF_SUCCESS;
8267 else
8268 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to attach the status LUN"));
8269
8270 /* Network driver */
8271 rc = PDMDevHlpDriverAttach(pDevIns, 0, &pThisCC->IBase, &pThisCC->pDrvBase, "Network Port");
8272 if (RT_SUCCESS(rc))
8273 {
8274 pThisCC->pDrvR3 = PDMIBASE_QUERY_INTERFACE(pThisCC->pDrvBase, PDMINETWORKUP);
8275 AssertMsgReturn(pThisCC->pDrvR3, ("Failed to obtain the PDMINETWORKUP interface!\n"), VERR_PDM_MISSING_INTERFACE_BELOW);
8276
8277#if 0 /** @todo @bugref{9218} ring-0 driver stuff */
8278 pThisR0->pDrvR0 = PDMIBASER0_QUERY_INTERFACE(PDMIBASE_QUERY_INTERFACE(pThisCC->pDrvBase, PDMIBASER0), PDMINETWORKUP);
8279 pThisRC->pDrvRC = PDMIBASERC_QUERY_INTERFACE(PDMIBASE_QUERY_INTERFACE(pThisCC->pDrvBase, PDMIBASERC), PDMINETWORKUP);
8280#endif
8281 /* Mark device as attached. */
8282 pThis->fIsAttached = true;
8283 }
8284 else if ( rc == VERR_PDM_NO_ATTACHED_DRIVER
8285 || rc == VERR_PDM_CFG_MISSING_DRIVER_NAME)
8286 {
8287 /* No error! */
8288 E1kLog(("%s This adapter is not attached to any network!\n", pThis->szPrf));
8289 }
8290 else
8291 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to attach the network LUN"));
8292
8293 rc = PDMDevHlpSUPSemEventCreate(pDevIns, &pThis->hEventMoreRxDescAvail);
8294 AssertRCReturn(rc, rc);
8295
8296 rc = e1kR3InitDebugHelpers();
8297 AssertRCReturn(rc, rc);
8298
8299 e1kR3HardReset(pDevIns, pThis, pThisCC);
8300
8301 /*
8302 * Register statistics.
8303 * The /Public/ bits are official and used by session info in the GUI.
8304 */
8305 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
8306 "Amount of data received", "/Public/NetAdapter/%u/BytesReceived", uStatNo);
8307 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
8308 "Amount of data transmitted", "/Public/NetAdapter/%u/BytesTransmitted", uStatNo);
8309 PDMDevHlpSTAMRegisterF(pDevIns, &pDevIns->iInstance, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
8310 "Device instance number", "/Public/NetAdapter/%u/%s", uStatNo, pDevIns->pReg->szName);
8311
8312 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatReceiveBytes, STAMTYPE_COUNTER, "ReceiveBytes", STAMUNIT_BYTES, "Amount of data received");
8313 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTransmitBytes, STAMTYPE_COUNTER, "TransmitBytes", STAMUNIT_BYTES, "Amount of data transmitted");
8314
8315#if defined(VBOX_WITH_STATISTICS)
8316 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMMIOReadRZ, STAMTYPE_PROFILE, "MMIO/ReadRZ", STAMUNIT_TICKS_PER_CALL, "Profiling MMIO reads in RZ");
8317 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMMIOReadR3, STAMTYPE_PROFILE, "MMIO/ReadR3", STAMUNIT_TICKS_PER_CALL, "Profiling MMIO reads in R3");
8318 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMMIOWriteRZ, STAMTYPE_PROFILE, "MMIO/WriteRZ", STAMUNIT_TICKS_PER_CALL, "Profiling MMIO writes in RZ");
8319 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMMIOWriteR3, STAMTYPE_PROFILE, "MMIO/WriteR3", STAMUNIT_TICKS_PER_CALL, "Profiling MMIO writes in R3");
8320 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatEEPROMRead, STAMTYPE_PROFILE, "EEPROM/Read", STAMUNIT_TICKS_PER_CALL, "Profiling EEPROM reads");
8321 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatEEPROMWrite, STAMTYPE_PROFILE, "EEPROM/Write", STAMUNIT_TICKS_PER_CALL, "Profiling EEPROM writes");
8322 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIOReadRZ, STAMTYPE_PROFILE, "IO/ReadRZ", STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in RZ");
8323 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIOReadR3, STAMTYPE_PROFILE, "IO/ReadR3", STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in R3");
8324 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIOWriteRZ, STAMTYPE_PROFILE, "IO/WriteRZ", STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in RZ");
8325 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIOWriteR3, STAMTYPE_PROFILE, "IO/WriteR3", STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in R3");
8326 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatLateIntTimer, STAMTYPE_PROFILE, "LateInt/Timer", STAMUNIT_TICKS_PER_CALL, "Profiling late int timer");
8327 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatLateInts, STAMTYPE_COUNTER, "LateInt/Occured", STAMUNIT_OCCURENCES, "Number of late interrupts");
8328 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIntsRaised, STAMTYPE_COUNTER, "Interrupts/Raised", STAMUNIT_OCCURENCES, "Number of raised interrupts");
8329 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIntsPrevented, STAMTYPE_COUNTER, "Interrupts/Prevented", STAMUNIT_OCCURENCES, "Number of prevented interrupts");
8330 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatReceive, STAMTYPE_PROFILE, "Receive/Total", STAMUNIT_TICKS_PER_CALL, "Profiling receive");
8331 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatReceiveCRC, STAMTYPE_PROFILE, "Receive/CRC", STAMUNIT_TICKS_PER_CALL, "Profiling receive checksumming");
8332 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatReceiveFilter, STAMTYPE_PROFILE, "Receive/Filter", STAMUNIT_TICKS_PER_CALL, "Profiling receive filtering");
8333 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatReceiveStore, STAMTYPE_PROFILE, "Receive/Store", STAMUNIT_TICKS_PER_CALL, "Profiling receive storing");
8334 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatRxOverflow, STAMTYPE_PROFILE, "RxOverflow", STAMUNIT_TICKS_PER_OCCURENCE, "Profiling RX overflows");
8335 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatRxOverflowWakeupRZ, STAMTYPE_COUNTER, "RxOverflowWakeupRZ", STAMUNIT_OCCURENCES, "Nr of RX overflow wakeups in RZ");
8336 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatRxOverflowWakeupR3, STAMTYPE_COUNTER, "RxOverflowWakeupR3", STAMUNIT_OCCURENCES, "Nr of RX overflow wakeups in R3");
8337 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTransmitRZ, STAMTYPE_PROFILE, "Transmit/TotalRZ", STAMUNIT_TICKS_PER_CALL, "Profiling transmits in RZ");
8338 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTransmitR3, STAMTYPE_PROFILE, "Transmit/TotalR3", STAMUNIT_TICKS_PER_CALL, "Profiling transmits in R3");
8339 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTransmitSendRZ, STAMTYPE_PROFILE, "Transmit/SendRZ", STAMUNIT_TICKS_PER_CALL, "Profiling send transmit in RZ");
8340 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTransmitSendR3, STAMTYPE_PROFILE, "Transmit/SendR3", STAMUNIT_TICKS_PER_CALL, "Profiling send transmit in R3");
8341
8342 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTxDescCtxNormal, STAMTYPE_COUNTER, "TxDesc/ContexNormal", STAMUNIT_OCCURENCES, "Number of normal context descriptors");
8343 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTxDescCtxTSE, STAMTYPE_COUNTER, "TxDesc/ContextTSE", STAMUNIT_OCCURENCES, "Number of TSE context descriptors");
8344 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTxDescData, STAMTYPE_COUNTER, "TxDesc/Data", STAMUNIT_OCCURENCES, "Number of TX data descriptors");
8345 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTxDescLegacy, STAMTYPE_COUNTER, "TxDesc/Legacy", STAMUNIT_OCCURENCES, "Number of TX legacy descriptors");
8346 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTxDescTSEData, STAMTYPE_COUNTER, "TxDesc/TSEData", STAMUNIT_OCCURENCES, "Number of TX TSE data descriptors");
8347 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTxPathFallback, STAMTYPE_COUNTER, "TxPath/Fallback", STAMUNIT_OCCURENCES, "Fallback TSE descriptor path");
8348 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTxPathGSO, STAMTYPE_COUNTER, "TxPath/GSO", STAMUNIT_OCCURENCES, "GSO TSE descriptor path");
8349 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTxPathRegular, STAMTYPE_COUNTER, "TxPath/Normal", STAMUNIT_OCCURENCES, "Regular descriptor path");
8350 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatPHYAccesses, STAMTYPE_COUNTER, "PHYAccesses", STAMUNIT_OCCURENCES, "Number of PHY accesses");
8351 for (unsigned iReg = 0; iReg < E1K_NUM_OF_REGS; iReg++)
8352 {
8353 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->aStatRegReads[iReg], STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
8354 g_aE1kRegMap[iReg].name, "Regs/%s-Reads", g_aE1kRegMap[iReg].abbrev);
8355 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->aStatRegWrites[iReg], STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
8356 g_aE1kRegMap[iReg].name, "Regs/%s-Writes", g_aE1kRegMap[iReg].abbrev);
8357 }
8358#endif /* VBOX_WITH_STATISTICS */
8359
8360#ifdef E1K_INT_STATS
8361 PDMDevHlpSTAMRegister(pDevIns, &pThis->u64ArmedAt, STAMTYPE_U64, "u64ArmedAt", STAMUNIT_NS, NULL);
8362 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatMaxTxDelay, STAMTYPE_U64, "uStatMaxTxDelay", STAMUNIT_NS, NULL);
8363 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatInt, STAMTYPE_U32, "uStatInt", STAMUNIT_NS, NULL);
8364 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatIntTry, STAMTYPE_U32, "uStatIntTry", STAMUNIT_NS, NULL);
8365 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatIntLower, STAMTYPE_U32, "uStatIntLower", STAMUNIT_NS, NULL);
8366 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatNoIntICR, STAMTYPE_U32, "uStatNoIntICR", STAMUNIT_NS, NULL);
8367 PDMDevHlpSTAMRegister(pDevIns, &pThis->iStatIntLost, STAMTYPE_U32, "iStatIntLost", STAMUNIT_NS, NULL);
8368 PDMDevHlpSTAMRegister(pDevIns, &pThis->iStatIntLostOne, STAMTYPE_U32, "iStatIntLostOne", STAMUNIT_NS, NULL);
8369 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatIntIMS, STAMTYPE_U32, "uStatIntIMS", STAMUNIT_NS, NULL);
8370 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatIntSkip, STAMTYPE_U32, "uStatIntSkip", STAMUNIT_NS, NULL);
8371 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatIntLate, STAMTYPE_U32, "uStatIntLate", STAMUNIT_NS, NULL);
8372 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatIntMasked, STAMTYPE_U32, "uStatIntMasked", STAMUNIT_NS, NULL);
8373 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatIntEarly, STAMTYPE_U32, "uStatIntEarly", STAMUNIT_NS, NULL);
8374 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatIntRx, STAMTYPE_U32, "uStatIntRx", STAMUNIT_NS, NULL);
8375 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatIntTx, STAMTYPE_U32, "uStatIntTx", STAMUNIT_NS, NULL);
8376 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatIntICS, STAMTYPE_U32, "uStatIntICS", STAMUNIT_NS, NULL);
8377 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatIntRDTR, STAMTYPE_U32, "uStatIntRDTR", STAMUNIT_NS, NULL);
8378 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatIntRXDMT0, STAMTYPE_U32, "uStatIntRXDMT0", STAMUNIT_NS, NULL);
8379 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatIntTXQE, STAMTYPE_U32, "uStatIntTXQE", STAMUNIT_NS, NULL);
8380 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTxNoRS, STAMTYPE_U32, "uStatTxNoRS", STAMUNIT_NS, NULL);
8381 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTxIDE, STAMTYPE_U32, "uStatTxIDE", STAMUNIT_NS, NULL);
8382 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTxDelayed, STAMTYPE_U32, "uStatTxDelayed", STAMUNIT_NS, NULL);
8383 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTxDelayExp, STAMTYPE_U32, "uStatTxDelayExp", STAMUNIT_NS, NULL);
8384 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTAD, STAMTYPE_U32, "uStatTAD", STAMUNIT_NS, NULL);
8385 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTID, STAMTYPE_U32, "uStatTID", STAMUNIT_NS, NULL);
8386 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatRAD, STAMTYPE_U32, "uStatRAD", STAMUNIT_NS, NULL);
8387 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatRID, STAMTYPE_U32, "uStatRID", STAMUNIT_NS, NULL);
8388 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatRxFrm, STAMTYPE_U32, "uStatRxFrm", STAMUNIT_NS, NULL);
8389 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTxFrm, STAMTYPE_U32, "uStatTxFrm", STAMUNIT_NS, NULL);
8390 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatDescCtx, STAMTYPE_U32, "uStatDescCtx", STAMUNIT_NS, NULL);
8391 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatDescDat, STAMTYPE_U32, "uStatDescDat", STAMUNIT_NS, NULL);
8392 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatDescLeg, STAMTYPE_U32, "uStatDescLeg", STAMUNIT_NS, NULL);
8393 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTx1514, STAMTYPE_U32, "uStatTx1514", STAMUNIT_NS, NULL);
8394 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTx2962, STAMTYPE_U32, "uStatTx2962", STAMUNIT_NS, NULL);
8395 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTx4410, STAMTYPE_U32, "uStatTx4410", STAMUNIT_NS, NULL);
8396 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTx5858, STAMTYPE_U32, "uStatTx5858", STAMUNIT_NS, NULL);
8397 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTx7306, STAMTYPE_U32, "uStatTx7306", STAMUNIT_NS, NULL);
8398 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTx8754, STAMTYPE_U32, "uStatTx8754", STAMUNIT_NS, NULL);
8399 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTx16384, STAMTYPE_U32, "uStatTx16384", STAMUNIT_NS, NULL);
8400 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTx32768, STAMTYPE_U32, "uStatTx32768", STAMUNIT_NS, NULL);
8401 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTxLarge, STAMTYPE_U32, "uStatTxLarge", STAMUNIT_NS, NULL);
8402#endif /* E1K_INT_STATS */
8403
8404 return VINF_SUCCESS;
8405}
8406
8407#else /* !IN_RING3 */
8408
8409/**
8410 * @callback_method_impl{PDMDEVREGR0,pfnConstruct}
8411 */
8412static DECLCALLBACK(int) e1kRZConstruct(PPDMDEVINS pDevIns)
8413{
8414 PDMDEV_CHECK_VERSIONS_RETURN(pDevIns);
8415 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
8416 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
8417
8418 /* Initialize context specific state data: */
8419 pThisCC->CTX_SUFF(pDevIns) = pDevIns;
8420 /** @todo @bugref{9218} ring-0 driver stuff */
8421 pThisCC->CTX_SUFF(pDrv) = NULL;
8422 pThisCC->CTX_SUFF(pTxSg) = NULL;
8423
8424 /* Configure critical sections the same way: */
8425 int rc = PDMDevHlpSetDeviceCritSect(pDevIns, PDMDevHlpCritSectGetNop(pDevIns));
8426 AssertRCReturn(rc, rc);
8427
8428 /* Set up MMIO and I/O port callbacks for this context: */
8429 rc = PDMDevHlpMmioSetUpContext(pDevIns, pThis->hMmioRegion, e1kMMIOWrite, e1kMMIORead, NULL /*pvUser*/);
8430 AssertRCReturn(rc, rc);
8431
8432 rc = PDMDevHlpIoPortSetUpContext(pDevIns, pThis->hIoPorts, e1kIOPortOut, e1kIOPortIn, NULL /*pvUser*/);
8433 AssertRCReturn(rc, rc);
8434
8435 return VINF_SUCCESS;
8436}
8437
8438#endif /* !IN_RING3 */
8439
8440/**
8441 * The device registration structure.
8442 */
8443const PDMDEVREG g_DeviceE1000 =
8444{
8445 /* .u32version = */ PDM_DEVREG_VERSION,
8446 /* .uReserved0 = */ 0,
8447 /* .szName = */ "e1000",
8448 /* .fFlags = */ PDM_DEVREG_FLAGS_DEFAULT_BITS | PDM_DEVREG_FLAGS_RZ | PDM_DEVREG_FLAGS_NEW_STYLE,
8449 /* .fClass = */ PDM_DEVREG_CLASS_NETWORK,
8450 /* .cMaxInstances = */ ~0U,
8451 /* .uSharedVersion = */ 42,
8452 /* .cbInstanceShared = */ sizeof(E1KSTATE),
8453 /* .cbInstanceCC = */ sizeof(E1KSTATECC),
8454 /* .cbInstanceRC = */ sizeof(E1KSTATERC),
8455 /* .cMaxPciDevices = */ 1,
8456 /* .cMaxMsixVectors = */ 0,
8457 /* .pszDescription = */ "Intel PRO/1000 MT Desktop Ethernet.",
8458#if defined(IN_RING3)
8459 /* .pszRCMod = */ "VBoxDDRC.rc",
8460 /* .pszR0Mod = */ "VBoxDDR0.r0",
8461 /* .pfnConstruct = */ e1kR3Construct,
8462 /* .pfnDestruct = */ e1kR3Destruct,
8463 /* .pfnRelocate = */ e1kR3Relocate,
8464 /* .pfnMemSetup = */ NULL,
8465 /* .pfnPowerOn = */ NULL,
8466 /* .pfnReset = */ e1kR3Reset,
8467 /* .pfnSuspend = */ e1kR3Suspend,
8468 /* .pfnResume = */ NULL,
8469 /* .pfnAttach = */ e1kR3Attach,
8470 /* .pfnDeatch = */ e1kR3Detach,
8471 /* .pfnQueryInterface = */ NULL,
8472 /* .pfnInitComplete = */ NULL,
8473 /* .pfnPowerOff = */ e1kR3PowerOff,
8474 /* .pfnSoftReset = */ NULL,
8475 /* .pfnReserved0 = */ NULL,
8476 /* .pfnReserved1 = */ NULL,
8477 /* .pfnReserved2 = */ NULL,
8478 /* .pfnReserved3 = */ NULL,
8479 /* .pfnReserved4 = */ NULL,
8480 /* .pfnReserved5 = */ NULL,
8481 /* .pfnReserved6 = */ NULL,
8482 /* .pfnReserved7 = */ NULL,
8483#elif defined(IN_RING0)
8484 /* .pfnEarlyConstruct = */ NULL,
8485 /* .pfnConstruct = */ e1kRZConstruct,
8486 /* .pfnDestruct = */ NULL,
8487 /* .pfnFinalDestruct = */ NULL,
8488 /* .pfnRequest = */ NULL,
8489 /* .pfnReserved0 = */ NULL,
8490 /* .pfnReserved1 = */ NULL,
8491 /* .pfnReserved2 = */ NULL,
8492 /* .pfnReserved3 = */ NULL,
8493 /* .pfnReserved4 = */ NULL,
8494 /* .pfnReserved5 = */ NULL,
8495 /* .pfnReserved6 = */ NULL,
8496 /* .pfnReserved7 = */ NULL,
8497#elif defined(IN_RC)
8498 /* .pfnConstruct = */ e1kRZConstruct,
8499 /* .pfnReserved0 = */ NULL,
8500 /* .pfnReserved1 = */ NULL,
8501 /* .pfnReserved2 = */ NULL,
8502 /* .pfnReserved3 = */ NULL,
8503 /* .pfnReserved4 = */ NULL,
8504 /* .pfnReserved5 = */ NULL,
8505 /* .pfnReserved6 = */ NULL,
8506 /* .pfnReserved7 = */ NULL,
8507#else
8508# error "Not in IN_RING3, IN_RING0 or IN_RC!"
8509#endif
8510 /* .u32VersionEnd = */ PDM_DEVREG_VERSION
8511};
8512
8513#endif /* !VBOX_DEVICE_STRUCT_TESTCASE */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette