VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/DevE1000.cpp@ 78223

Last change on this file since 78223 was 78119, checked in by vboxsync, 6 years ago

Dev/E1000: (bugref:9427) RX buffer size fix.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 329.4 KB
Line 
1/* $Id: DevE1000.cpp 78119 2019-04-12 10:19:15Z vboxsync $ */
2/** @file
3 * DevE1000 - Intel 82540EM Ethernet Controller Emulation.
4 *
5 * Implemented in accordance with the specification:
6 *
7 * PCI/PCI-X Family of Gigabit Ethernet Controllers Software Developer's Manual
8 * 82540EP/EM, 82541xx, 82544GC/EI, 82545GM/EM, 82546GB/EB, and 82547xx
9 *
10 * 317453-002 Revision 3.5
11 *
12 * @todo IPv6 checksum offloading support
13 * @todo Flexible Filter / Wakeup (optional?)
14 */
15
16/*
17 * Copyright (C) 2007-2019 Oracle Corporation
18 *
19 * This file is part of VirtualBox Open Source Edition (OSE), as
20 * available from http://www.virtualbox.org. This file is free software;
21 * you can redistribute it and/or modify it under the terms of the GNU
22 * General Public License (GPL) as published by the Free Software
23 * Foundation, in version 2 as it comes in the "COPYING" file of the
24 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
25 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_DEV_E1000
33#include <iprt/crc.h>
34#include <iprt/ctype.h>
35#include <iprt/net.h>
36#include <iprt/semaphore.h>
37#include <iprt/string.h>
38#include <iprt/time.h>
39#include <iprt/uuid.h>
40#include <VBox/vmm/pdmdev.h>
41#include <VBox/vmm/pdmnetifs.h>
42#include <VBox/vmm/pdmnetinline.h>
43#include <VBox/param.h>
44#include "VBoxDD.h"
45
46#include "DevEEPROM.h"
47#include "DevE1000Phy.h"
48
49
50/*********************************************************************************************************************************
51* Defined Constants And Macros *
52*********************************************************************************************************************************/
53/** @name E1000 Build Options
54 * @{ */
55/** @def E1K_INIT_RA0
56 * E1K_INIT_RA0 forces E1000 to set the first entry in Receive Address filter
57 * table to MAC address obtained from CFGM. Most guests read MAC address from
58 * EEPROM and write it to RA[0] explicitly, but Mac OS X seems to depend on it
59 * being already set (see @bugref{4657}).
60 */
61#define E1K_INIT_RA0
62/** @def E1K_LSC_ON_RESET
63 * E1K_LSC_ON_RESET causes e1000 to generate Link Status Change
64 * interrupt after hard reset. This makes the E1K_LSC_ON_SLU option unnecessary.
65 * With unplugged cable, LSC is triggerred for 82543GC only.
66 */
67#define E1K_LSC_ON_RESET
68/** @def E1K_LSC_ON_SLU
69 * E1K_LSC_ON_SLU causes E1000 to generate Link Status Change interrupt when
70 * the guest driver brings up the link via STATUS.LU bit. Again the only guest
71 * that requires it is Mac OS X (see @bugref{4657}).
72 */
73//#define E1K_LSC_ON_SLU
74/** @def E1K_INIT_LINKUP_DELAY
75 * E1K_INIT_LINKUP_DELAY prevents the link going up while the driver is still
76 * in init (see @bugref{8624}).
77 */
78#define E1K_INIT_LINKUP_DELAY_US (2000 * 1000)
79/** @def E1K_IMS_INT_DELAY_NS
80 * E1K_IMS_INT_DELAY_NS prevents interrupt storms in Windows guests on enabling
81 * interrupts (see @bugref{8624}).
82 */
83#define E1K_IMS_INT_DELAY_NS 100
84/** @def E1K_TX_DELAY
85 * E1K_TX_DELAY aims to improve guest-host transfer rate for TCP streams by
86 * preventing packets to be sent immediately. It allows to send several
87 * packets in a batch reducing the number of acknowledgments. Note that it
88 * effectively disables R0 TX path, forcing sending in R3.
89 */
90//#define E1K_TX_DELAY 150
91/** @def E1K_USE_TX_TIMERS
92 * E1K_USE_TX_TIMERS aims to reduce the number of generated TX interrupts if a
93 * guest driver set the delays via the Transmit Interrupt Delay Value (TIDV)
94 * register. Enabling it showed no positive effects on existing guests so it
95 * stays disabled. See sections 3.2.7.1 and 3.4.3.1 in "8254x Family of Gigabit
96 * Ethernet Controllers Software Developer’s Manual" for more detailed
97 * explanation.
98 */
99//#define E1K_USE_TX_TIMERS
100/** @def E1K_NO_TAD
101 * E1K_NO_TAD disables one of two timers enabled by E1K_USE_TX_TIMERS, the
102 * Transmit Absolute Delay time. This timer sets the maximum time interval
103 * during which TX interrupts can be postponed (delayed). It has no effect
104 * if E1K_USE_TX_TIMERS is not defined.
105 */
106//#define E1K_NO_TAD
107/** @def E1K_REL_DEBUG
108 * E1K_REL_DEBUG enables debug logging of l1, l2, l3 in release build.
109 */
110//#define E1K_REL_DEBUG
111/** @def E1K_INT_STATS
112 * E1K_INT_STATS enables collection of internal statistics used for
113 * debugging of delayed interrupts, etc.
114 */
115#define E1K_INT_STATS
116/** @def E1K_WITH_MSI
117 * E1K_WITH_MSI enables rudimentary MSI support. Not implemented.
118 */
119//#define E1K_WITH_MSI
120/** @def E1K_WITH_TX_CS
121 * E1K_WITH_TX_CS protects e1kXmitPending with a critical section.
122 */
123#define E1K_WITH_TX_CS
124/** @def E1K_WITH_TXD_CACHE
125 * E1K_WITH_TXD_CACHE causes E1000 to fetch multiple TX descriptors in a
126 * single physical memory read (or two if it wraps around the end of TX
127 * descriptor ring). It is required for proper functioning of bandwidth
128 * resource control as it allows to compute exact sizes of packets prior
129 * to allocating their buffers (see @bugref{5582}).
130 */
131#define E1K_WITH_TXD_CACHE
132/** @def E1K_WITH_RXD_CACHE
133 * E1K_WITH_RXD_CACHE causes E1000 to fetch multiple RX descriptors in a
134 * single physical memory read (or two if it wraps around the end of RX
135 * descriptor ring). Intel's packet driver for DOS needs this option in
136 * order to work properly (see @bugref{6217}).
137 */
138#define E1K_WITH_RXD_CACHE
139/** @def E1K_WITH_PREREG_MMIO
140 * E1K_WITH_PREREG_MMIO enables a new style MMIO registration and is
141 * currently only done for testing the relateted PDM, IOM and PGM code. */
142//#define E1K_WITH_PREREG_MMIO
143/* @} */
144/* End of Options ************************************************************/
145
146#ifdef E1K_WITH_TXD_CACHE
147/**
148 * E1K_TXD_CACHE_SIZE specifies the maximum number of TX descriptors stored
149 * in the state structure. It limits the amount of descriptors loaded in one
150 * batch read. For example, Linux guest may use up to 20 descriptors per
151 * TSE packet. The largest TSE packet seen (Windows guest) was 45 descriptors.
152 */
153# define E1K_TXD_CACHE_SIZE 64u
154#endif /* E1K_WITH_TXD_CACHE */
155
156#ifdef E1K_WITH_RXD_CACHE
157/**
158 * E1K_RXD_CACHE_SIZE specifies the maximum number of RX descriptors stored
159 * in the state structure. It limits the amount of descriptors loaded in one
160 * batch read. For example, XP guest adds 15 RX descriptors at a time.
161 */
162# define E1K_RXD_CACHE_SIZE 16u
163#endif /* E1K_WITH_RXD_CACHE */
164
165
166/* Little helpers ************************************************************/
167#undef htons
168#undef ntohs
169#undef htonl
170#undef ntohl
171#define htons(x) ((((x) & 0xff00) >> 8) | (((x) & 0x00ff) << 8))
172#define ntohs(x) htons(x)
173#define htonl(x) ASMByteSwapU32(x)
174#define ntohl(x) htonl(x)
175
176#ifndef DEBUG
177# ifdef E1K_REL_DEBUG
178# define DEBUG
179# define E1kLog(a) LogRel(a)
180# define E1kLog2(a) LogRel(a)
181# define E1kLog3(a) LogRel(a)
182# define E1kLogX(x, a) LogRel(a)
183//# define E1kLog3(a) do {} while (0)
184# else
185# define E1kLog(a) do {} while (0)
186# define E1kLog2(a) do {} while (0)
187# define E1kLog3(a) do {} while (0)
188# define E1kLogX(x, a) do {} while (0)
189# endif
190#else
191# define E1kLog(a) Log(a)
192# define E1kLog2(a) Log2(a)
193# define E1kLog3(a) Log3(a)
194# define E1kLogX(x, a) LogIt(x, LOG_GROUP, a)
195//# define E1kLog(a) do {} while (0)
196//# define E1kLog2(a) do {} while (0)
197//# define E1kLog3(a) do {} while (0)
198#endif
199
200#if 0
201# define LOG_ENABLED
202# define E1kLogRel(a) LogRel(a)
203# undef Log6
204# define Log6(a) LogRel(a)
205#else
206# define E1kLogRel(a) do { } while (0)
207#endif
208
209//#undef DEBUG
210
211#define STATE_TO_DEVINS(pThis) (((PE1KSTATE )pThis)->CTX_SUFF(pDevIns))
212#define E1K_RELOCATE(p, o) *(RTHCUINTPTR *)&p += o
213
214#define E1K_INC_CNT32(cnt) \
215do { \
216 if (cnt < UINT32_MAX) \
217 cnt++; \
218} while (0)
219
220#define E1K_ADD_CNT64(cntLo, cntHi, val) \
221do { \
222 uint64_t u64Cnt = RT_MAKE_U64(cntLo, cntHi); \
223 uint64_t tmp = u64Cnt; \
224 u64Cnt += val; \
225 if (tmp > u64Cnt ) \
226 u64Cnt = UINT64_MAX; \
227 cntLo = (uint32_t)u64Cnt; \
228 cntHi = (uint32_t)(u64Cnt >> 32); \
229} while (0)
230
231#ifdef E1K_INT_STATS
232# define E1K_INC_ISTAT_CNT(cnt) do { ++cnt; } while (0)
233#else /* E1K_INT_STATS */
234# define E1K_INC_ISTAT_CNT(cnt) do { } while (0)
235#endif /* E1K_INT_STATS */
236
237
238/*****************************************************************************/
239
240typedef uint32_t E1KCHIP;
241#define E1K_CHIP_82540EM 0
242#define E1K_CHIP_82543GC 1
243#define E1K_CHIP_82545EM 2
244
245#ifdef IN_RING3
246/** Different E1000 chips. */
247static const struct E1kChips
248{
249 uint16_t uPCIVendorId;
250 uint16_t uPCIDeviceId;
251 uint16_t uPCISubsystemVendorId;
252 uint16_t uPCISubsystemId;
253 const char *pcszName;
254} g_aChips[] =
255{
256 /* Vendor Device SSVendor SubSys Name */
257 { 0x8086,
258 /* Temporary code, as MSI-aware driver dislike 0x100E. How to do that right? */
259# ifdef E1K_WITH_MSI
260 0x105E,
261# else
262 0x100E,
263# endif
264 0x8086, 0x001E, "82540EM" }, /* Intel 82540EM-A in Intel PRO/1000 MT Desktop */
265 { 0x8086, 0x1004, 0x8086, 0x1004, "82543GC" }, /* Intel 82543GC in Intel PRO/1000 T Server */
266 { 0x8086, 0x100F, 0x15AD, 0x0750, "82545EM" } /* Intel 82545EM-A in VMWare Network Adapter */
267};
268#endif /* IN_RING3 */
269
270
271/* The size of register area mapped to I/O space */
272#define E1K_IOPORT_SIZE 0x8
273/* The size of memory-mapped register area */
274#define E1K_MM_SIZE 0x20000
275
276#define E1K_MAX_TX_PKT_SIZE 16288
277#define E1K_MAX_RX_PKT_SIZE 16384
278
279/*****************************************************************************/
280
281/** Gets the specfieid bits from the register. */
282#define GET_BITS(reg, bits) ((reg & reg##_##bits##_MASK) >> reg##_##bits##_SHIFT)
283#define GET_BITS_V(val, reg, bits) ((val & reg##_##bits##_MASK) >> reg##_##bits##_SHIFT)
284#define BITS(reg, bits, bitval) (bitval << reg##_##bits##_SHIFT)
285#define SET_BITS(reg, bits, bitval) do { reg = (reg & ~reg##_##bits##_MASK) | (bitval << reg##_##bits##_SHIFT); } while (0)
286#define SET_BITS_V(val, reg, bits, bitval) do { val = (val & ~reg##_##bits##_MASK) | (bitval << reg##_##bits##_SHIFT); } while (0)
287
288#define CTRL_SLU UINT32_C(0x00000040)
289#define CTRL_MDIO UINT32_C(0x00100000)
290#define CTRL_MDC UINT32_C(0x00200000)
291#define CTRL_MDIO_DIR UINT32_C(0x01000000)
292#define CTRL_MDC_DIR UINT32_C(0x02000000)
293#define CTRL_RESET UINT32_C(0x04000000)
294#define CTRL_VME UINT32_C(0x40000000)
295
296#define STATUS_LU UINT32_C(0x00000002)
297#define STATUS_TXOFF UINT32_C(0x00000010)
298
299#define EECD_EE_WIRES UINT32_C(0x0F)
300#define EECD_EE_REQ UINT32_C(0x40)
301#define EECD_EE_GNT UINT32_C(0x80)
302
303#define EERD_START UINT32_C(0x00000001)
304#define EERD_DONE UINT32_C(0x00000010)
305#define EERD_DATA_MASK UINT32_C(0xFFFF0000)
306#define EERD_DATA_SHIFT 16
307#define EERD_ADDR_MASK UINT32_C(0x0000FF00)
308#define EERD_ADDR_SHIFT 8
309
310#define MDIC_DATA_MASK UINT32_C(0x0000FFFF)
311#define MDIC_DATA_SHIFT 0
312#define MDIC_REG_MASK UINT32_C(0x001F0000)
313#define MDIC_REG_SHIFT 16
314#define MDIC_PHY_MASK UINT32_C(0x03E00000)
315#define MDIC_PHY_SHIFT 21
316#define MDIC_OP_WRITE UINT32_C(0x04000000)
317#define MDIC_OP_READ UINT32_C(0x08000000)
318#define MDIC_READY UINT32_C(0x10000000)
319#define MDIC_INT_EN UINT32_C(0x20000000)
320#define MDIC_ERROR UINT32_C(0x40000000)
321
322#define TCTL_EN UINT32_C(0x00000002)
323#define TCTL_PSP UINT32_C(0x00000008)
324
325#define RCTL_EN UINT32_C(0x00000002)
326#define RCTL_UPE UINT32_C(0x00000008)
327#define RCTL_MPE UINT32_C(0x00000010)
328#define RCTL_LPE UINT32_C(0x00000020)
329#define RCTL_LBM_MASK UINT32_C(0x000000C0)
330#define RCTL_LBM_SHIFT 6
331#define RCTL_RDMTS_MASK UINT32_C(0x00000300)
332#define RCTL_RDMTS_SHIFT 8
333#define RCTL_LBM_TCVR UINT32_C(3) /**< PHY or external SerDes loopback. */
334#define RCTL_MO_MASK UINT32_C(0x00003000)
335#define RCTL_MO_SHIFT 12
336#define RCTL_BAM UINT32_C(0x00008000)
337#define RCTL_BSIZE_MASK UINT32_C(0x00030000)
338#define RCTL_BSIZE_SHIFT 16
339#define RCTL_VFE UINT32_C(0x00040000)
340#define RCTL_CFIEN UINT32_C(0x00080000)
341#define RCTL_CFI UINT32_C(0x00100000)
342#define RCTL_BSEX UINT32_C(0x02000000)
343#define RCTL_SECRC UINT32_C(0x04000000)
344
345#define ICR_TXDW UINT32_C(0x00000001)
346#define ICR_TXQE UINT32_C(0x00000002)
347#define ICR_LSC UINT32_C(0x00000004)
348#define ICR_RXDMT0 UINT32_C(0x00000010)
349#define ICR_RXT0 UINT32_C(0x00000080)
350#define ICR_TXD_LOW UINT32_C(0x00008000)
351#define RDTR_FPD UINT32_C(0x80000000)
352
353#define PBA_st ((PBAST*)(pThis->auRegs + PBA_IDX))
354typedef struct
355{
356 unsigned rxa : 7;
357 unsigned rxa_r : 9;
358 unsigned txa : 16;
359} PBAST;
360AssertCompileSize(PBAST, 4);
361
362#define TXDCTL_WTHRESH_MASK 0x003F0000
363#define TXDCTL_WTHRESH_SHIFT 16
364#define TXDCTL_LWTHRESH_MASK 0xFE000000
365#define TXDCTL_LWTHRESH_SHIFT 25
366
367#define RXCSUM_PCSS_MASK UINT32_C(0x000000FF)
368#define RXCSUM_PCSS_SHIFT 0
369
370/** @name Register access macros
371 * @remarks These ASSUME alocal variable @a pThis of type PE1KSTATE.
372 * @{ */
373#define CTRL pThis->auRegs[CTRL_IDX]
374#define STATUS pThis->auRegs[STATUS_IDX]
375#define EECD pThis->auRegs[EECD_IDX]
376#define EERD pThis->auRegs[EERD_IDX]
377#define CTRL_EXT pThis->auRegs[CTRL_EXT_IDX]
378#define FLA pThis->auRegs[FLA_IDX]
379#define MDIC pThis->auRegs[MDIC_IDX]
380#define FCAL pThis->auRegs[FCAL_IDX]
381#define FCAH pThis->auRegs[FCAH_IDX]
382#define FCT pThis->auRegs[FCT_IDX]
383#define VET pThis->auRegs[VET_IDX]
384#define ICR pThis->auRegs[ICR_IDX]
385#define ITR pThis->auRegs[ITR_IDX]
386#define ICS pThis->auRegs[ICS_IDX]
387#define IMS pThis->auRegs[IMS_IDX]
388#define IMC pThis->auRegs[IMC_IDX]
389#define RCTL pThis->auRegs[RCTL_IDX]
390#define FCTTV pThis->auRegs[FCTTV_IDX]
391#define TXCW pThis->auRegs[TXCW_IDX]
392#define RXCW pThis->auRegs[RXCW_IDX]
393#define TCTL pThis->auRegs[TCTL_IDX]
394#define TIPG pThis->auRegs[TIPG_IDX]
395#define AIFS pThis->auRegs[AIFS_IDX]
396#define LEDCTL pThis->auRegs[LEDCTL_IDX]
397#define PBA pThis->auRegs[PBA_IDX]
398#define FCRTL pThis->auRegs[FCRTL_IDX]
399#define FCRTH pThis->auRegs[FCRTH_IDX]
400#define RDFH pThis->auRegs[RDFH_IDX]
401#define RDFT pThis->auRegs[RDFT_IDX]
402#define RDFHS pThis->auRegs[RDFHS_IDX]
403#define RDFTS pThis->auRegs[RDFTS_IDX]
404#define RDFPC pThis->auRegs[RDFPC_IDX]
405#define RDBAL pThis->auRegs[RDBAL_IDX]
406#define RDBAH pThis->auRegs[RDBAH_IDX]
407#define RDLEN pThis->auRegs[RDLEN_IDX]
408#define RDH pThis->auRegs[RDH_IDX]
409#define RDT pThis->auRegs[RDT_IDX]
410#define RDTR pThis->auRegs[RDTR_IDX]
411#define RXDCTL pThis->auRegs[RXDCTL_IDX]
412#define RADV pThis->auRegs[RADV_IDX]
413#define RSRPD pThis->auRegs[RSRPD_IDX]
414#define TXDMAC pThis->auRegs[TXDMAC_IDX]
415#define TDFH pThis->auRegs[TDFH_IDX]
416#define TDFT pThis->auRegs[TDFT_IDX]
417#define TDFHS pThis->auRegs[TDFHS_IDX]
418#define TDFTS pThis->auRegs[TDFTS_IDX]
419#define TDFPC pThis->auRegs[TDFPC_IDX]
420#define TDBAL pThis->auRegs[TDBAL_IDX]
421#define TDBAH pThis->auRegs[TDBAH_IDX]
422#define TDLEN pThis->auRegs[TDLEN_IDX]
423#define TDH pThis->auRegs[TDH_IDX]
424#define TDT pThis->auRegs[TDT_IDX]
425#define TIDV pThis->auRegs[TIDV_IDX]
426#define TXDCTL pThis->auRegs[TXDCTL_IDX]
427#define TADV pThis->auRegs[TADV_IDX]
428#define TSPMT pThis->auRegs[TSPMT_IDX]
429#define CRCERRS pThis->auRegs[CRCERRS_IDX]
430#define ALGNERRC pThis->auRegs[ALGNERRC_IDX]
431#define SYMERRS pThis->auRegs[SYMERRS_IDX]
432#define RXERRC pThis->auRegs[RXERRC_IDX]
433#define MPC pThis->auRegs[MPC_IDX]
434#define SCC pThis->auRegs[SCC_IDX]
435#define ECOL pThis->auRegs[ECOL_IDX]
436#define MCC pThis->auRegs[MCC_IDX]
437#define LATECOL pThis->auRegs[LATECOL_IDX]
438#define COLC pThis->auRegs[COLC_IDX]
439#define DC pThis->auRegs[DC_IDX]
440#define TNCRS pThis->auRegs[TNCRS_IDX]
441/* #define SEC pThis->auRegs[SEC_IDX] Conflict with sys/time.h */
442#define CEXTERR pThis->auRegs[CEXTERR_IDX]
443#define RLEC pThis->auRegs[RLEC_IDX]
444#define XONRXC pThis->auRegs[XONRXC_IDX]
445#define XONTXC pThis->auRegs[XONTXC_IDX]
446#define XOFFRXC pThis->auRegs[XOFFRXC_IDX]
447#define XOFFTXC pThis->auRegs[XOFFTXC_IDX]
448#define FCRUC pThis->auRegs[FCRUC_IDX]
449#define PRC64 pThis->auRegs[PRC64_IDX]
450#define PRC127 pThis->auRegs[PRC127_IDX]
451#define PRC255 pThis->auRegs[PRC255_IDX]
452#define PRC511 pThis->auRegs[PRC511_IDX]
453#define PRC1023 pThis->auRegs[PRC1023_IDX]
454#define PRC1522 pThis->auRegs[PRC1522_IDX]
455#define GPRC pThis->auRegs[GPRC_IDX]
456#define BPRC pThis->auRegs[BPRC_IDX]
457#define MPRC pThis->auRegs[MPRC_IDX]
458#define GPTC pThis->auRegs[GPTC_IDX]
459#define GORCL pThis->auRegs[GORCL_IDX]
460#define GORCH pThis->auRegs[GORCH_IDX]
461#define GOTCL pThis->auRegs[GOTCL_IDX]
462#define GOTCH pThis->auRegs[GOTCH_IDX]
463#define RNBC pThis->auRegs[RNBC_IDX]
464#define RUC pThis->auRegs[RUC_IDX]
465#define RFC pThis->auRegs[RFC_IDX]
466#define ROC pThis->auRegs[ROC_IDX]
467#define RJC pThis->auRegs[RJC_IDX]
468#define MGTPRC pThis->auRegs[MGTPRC_IDX]
469#define MGTPDC pThis->auRegs[MGTPDC_IDX]
470#define MGTPTC pThis->auRegs[MGTPTC_IDX]
471#define TORL pThis->auRegs[TORL_IDX]
472#define TORH pThis->auRegs[TORH_IDX]
473#define TOTL pThis->auRegs[TOTL_IDX]
474#define TOTH pThis->auRegs[TOTH_IDX]
475#define TPR pThis->auRegs[TPR_IDX]
476#define TPT pThis->auRegs[TPT_IDX]
477#define PTC64 pThis->auRegs[PTC64_IDX]
478#define PTC127 pThis->auRegs[PTC127_IDX]
479#define PTC255 pThis->auRegs[PTC255_IDX]
480#define PTC511 pThis->auRegs[PTC511_IDX]
481#define PTC1023 pThis->auRegs[PTC1023_IDX]
482#define PTC1522 pThis->auRegs[PTC1522_IDX]
483#define MPTC pThis->auRegs[MPTC_IDX]
484#define BPTC pThis->auRegs[BPTC_IDX]
485#define TSCTC pThis->auRegs[TSCTC_IDX]
486#define TSCTFC pThis->auRegs[TSCTFC_IDX]
487#define RXCSUM pThis->auRegs[RXCSUM_IDX]
488#define WUC pThis->auRegs[WUC_IDX]
489#define WUFC pThis->auRegs[WUFC_IDX]
490#define WUS pThis->auRegs[WUS_IDX]
491#define MANC pThis->auRegs[MANC_IDX]
492#define IPAV pThis->auRegs[IPAV_IDX]
493#define WUPL pThis->auRegs[WUPL_IDX]
494/** @} */
495
496/**
497 * Indices of memory-mapped registers in register table.
498 */
499typedef enum
500{
501 CTRL_IDX,
502 STATUS_IDX,
503 EECD_IDX,
504 EERD_IDX,
505 CTRL_EXT_IDX,
506 FLA_IDX,
507 MDIC_IDX,
508 FCAL_IDX,
509 FCAH_IDX,
510 FCT_IDX,
511 VET_IDX,
512 ICR_IDX,
513 ITR_IDX,
514 ICS_IDX,
515 IMS_IDX,
516 IMC_IDX,
517 RCTL_IDX,
518 FCTTV_IDX,
519 TXCW_IDX,
520 RXCW_IDX,
521 TCTL_IDX,
522 TIPG_IDX,
523 AIFS_IDX,
524 LEDCTL_IDX,
525 PBA_IDX,
526 FCRTL_IDX,
527 FCRTH_IDX,
528 RDFH_IDX,
529 RDFT_IDX,
530 RDFHS_IDX,
531 RDFTS_IDX,
532 RDFPC_IDX,
533 RDBAL_IDX,
534 RDBAH_IDX,
535 RDLEN_IDX,
536 RDH_IDX,
537 RDT_IDX,
538 RDTR_IDX,
539 RXDCTL_IDX,
540 RADV_IDX,
541 RSRPD_IDX,
542 TXDMAC_IDX,
543 TDFH_IDX,
544 TDFT_IDX,
545 TDFHS_IDX,
546 TDFTS_IDX,
547 TDFPC_IDX,
548 TDBAL_IDX,
549 TDBAH_IDX,
550 TDLEN_IDX,
551 TDH_IDX,
552 TDT_IDX,
553 TIDV_IDX,
554 TXDCTL_IDX,
555 TADV_IDX,
556 TSPMT_IDX,
557 CRCERRS_IDX,
558 ALGNERRC_IDX,
559 SYMERRS_IDX,
560 RXERRC_IDX,
561 MPC_IDX,
562 SCC_IDX,
563 ECOL_IDX,
564 MCC_IDX,
565 LATECOL_IDX,
566 COLC_IDX,
567 DC_IDX,
568 TNCRS_IDX,
569 SEC_IDX,
570 CEXTERR_IDX,
571 RLEC_IDX,
572 XONRXC_IDX,
573 XONTXC_IDX,
574 XOFFRXC_IDX,
575 XOFFTXC_IDX,
576 FCRUC_IDX,
577 PRC64_IDX,
578 PRC127_IDX,
579 PRC255_IDX,
580 PRC511_IDX,
581 PRC1023_IDX,
582 PRC1522_IDX,
583 GPRC_IDX,
584 BPRC_IDX,
585 MPRC_IDX,
586 GPTC_IDX,
587 GORCL_IDX,
588 GORCH_IDX,
589 GOTCL_IDX,
590 GOTCH_IDX,
591 RNBC_IDX,
592 RUC_IDX,
593 RFC_IDX,
594 ROC_IDX,
595 RJC_IDX,
596 MGTPRC_IDX,
597 MGTPDC_IDX,
598 MGTPTC_IDX,
599 TORL_IDX,
600 TORH_IDX,
601 TOTL_IDX,
602 TOTH_IDX,
603 TPR_IDX,
604 TPT_IDX,
605 PTC64_IDX,
606 PTC127_IDX,
607 PTC255_IDX,
608 PTC511_IDX,
609 PTC1023_IDX,
610 PTC1522_IDX,
611 MPTC_IDX,
612 BPTC_IDX,
613 TSCTC_IDX,
614 TSCTFC_IDX,
615 RXCSUM_IDX,
616 WUC_IDX,
617 WUFC_IDX,
618 WUS_IDX,
619 MANC_IDX,
620 IPAV_IDX,
621 WUPL_IDX,
622 MTA_IDX,
623 RA_IDX,
624 VFTA_IDX,
625 IP4AT_IDX,
626 IP6AT_IDX,
627 WUPM_IDX,
628 FFLT_IDX,
629 FFMT_IDX,
630 FFVT_IDX,
631 PBM_IDX,
632 RA_82542_IDX,
633 MTA_82542_IDX,
634 VFTA_82542_IDX,
635 E1K_NUM_OF_REGS
636} E1kRegIndex;
637
638#define E1K_NUM_OF_32BIT_REGS MTA_IDX
639/** The number of registers with strictly increasing offset. */
640#define E1K_NUM_OF_BINARY_SEARCHABLE (WUPL_IDX + 1)
641
642
643/**
644 * Define E1000-specific EEPROM layout.
645 */
646struct E1kEEPROM
647{
648 public:
649 EEPROM93C46 eeprom;
650
651#ifdef IN_RING3
652 /**
653 * Initialize EEPROM content.
654 *
655 * @param macAddr MAC address of E1000.
656 */
657 void init(RTMAC &macAddr)
658 {
659 eeprom.init();
660 memcpy(eeprom.m_au16Data, macAddr.au16, sizeof(macAddr.au16));
661 eeprom.m_au16Data[0x04] = 0xFFFF;
662 /*
663 * bit 3 - full support for power management
664 * bit 10 - full duplex
665 */
666 eeprom.m_au16Data[0x0A] = 0x4408;
667 eeprom.m_au16Data[0x0B] = 0x001E;
668 eeprom.m_au16Data[0x0C] = 0x8086;
669 eeprom.m_au16Data[0x0D] = 0x100E;
670 eeprom.m_au16Data[0x0E] = 0x8086;
671 eeprom.m_au16Data[0x0F] = 0x3040;
672 eeprom.m_au16Data[0x21] = 0x7061;
673 eeprom.m_au16Data[0x22] = 0x280C;
674 eeprom.m_au16Data[0x23] = 0x00C8;
675 eeprom.m_au16Data[0x24] = 0x00C8;
676 eeprom.m_au16Data[0x2F] = 0x0602;
677 updateChecksum();
678 };
679
680 /**
681 * Compute the checksum as required by E1000 and store it
682 * in the last word.
683 */
684 void updateChecksum()
685 {
686 uint16_t u16Checksum = 0;
687
688 for (int i = 0; i < eeprom.SIZE-1; i++)
689 u16Checksum += eeprom.m_au16Data[i];
690 eeprom.m_au16Data[eeprom.SIZE-1] = 0xBABA - u16Checksum;
691 };
692
693 /**
694 * First 6 bytes of EEPROM contain MAC address.
695 *
696 * @returns MAC address of E1000.
697 */
698 void getMac(PRTMAC pMac)
699 {
700 memcpy(pMac->au16, eeprom.m_au16Data, sizeof(pMac->au16));
701 };
702
703 uint32_t read()
704 {
705 return eeprom.read();
706 }
707
708 void write(uint32_t u32Wires)
709 {
710 eeprom.write(u32Wires);
711 }
712
713 bool readWord(uint32_t u32Addr, uint16_t *pu16Value)
714 {
715 return eeprom.readWord(u32Addr, pu16Value);
716 }
717
718 int load(PSSMHANDLE pSSM)
719 {
720 return eeprom.load(pSSM);
721 }
722
723 void save(PSSMHANDLE pSSM)
724 {
725 eeprom.save(pSSM);
726 }
727#endif /* IN_RING3 */
728};
729
730
731#define E1K_SPEC_VLAN(s) (s & 0xFFF)
732#define E1K_SPEC_CFI(s) (!!((s>>12) & 0x1))
733#define E1K_SPEC_PRI(s) ((s>>13) & 0x7)
734
735struct E1kRxDStatus
736{
737 /** @name Descriptor Status field (3.2.3.1)
738 * @{ */
739 unsigned fDD : 1; /**< Descriptor Done. */
740 unsigned fEOP : 1; /**< End of packet. */
741 unsigned fIXSM : 1; /**< Ignore checksum indication. */
742 unsigned fVP : 1; /**< VLAN, matches VET. */
743 unsigned : 1;
744 unsigned fTCPCS : 1; /**< RCP Checksum calculated on the packet. */
745 unsigned fIPCS : 1; /**< IP Checksum calculated on the packet. */
746 unsigned fPIF : 1; /**< Passed in-exact filter */
747 /** @} */
748 /** @name Descriptor Errors field (3.2.3.2)
749 * (Only valid when fEOP and fDD are set.)
750 * @{ */
751 unsigned fCE : 1; /**< CRC or alignment error. */
752 unsigned : 4; /**< Reserved, varies with different models... */
753 unsigned fTCPE : 1; /**< TCP/UDP checksum error. */
754 unsigned fIPE : 1; /**< IP Checksum error. */
755 unsigned fRXE : 1; /**< RX Data error. */
756 /** @} */
757 /** @name Descriptor Special field (3.2.3.3)
758 * @{ */
759 unsigned u16Special : 16; /**< VLAN: Id, Canonical form, Priority. */
760 /** @} */
761};
762typedef struct E1kRxDStatus E1KRXDST;
763
764struct E1kRxDesc_st
765{
766 uint64_t u64BufAddr; /**< Address of data buffer */
767 uint16_t u16Length; /**< Length of data in buffer */
768 uint16_t u16Checksum; /**< Packet checksum */
769 E1KRXDST status;
770};
771typedef struct E1kRxDesc_st E1KRXDESC;
772AssertCompileSize(E1KRXDESC, 16);
773
774#define E1K_DTYP_LEGACY -1
775#define E1K_DTYP_CONTEXT 0
776#define E1K_DTYP_DATA 1
777
778struct E1kTDLegacy
779{
780 uint64_t u64BufAddr; /**< Address of data buffer */
781 struct TDLCmd_st
782 {
783 unsigned u16Length : 16;
784 unsigned u8CSO : 8;
785 /* CMD field : 8 */
786 unsigned fEOP : 1;
787 unsigned fIFCS : 1;
788 unsigned fIC : 1;
789 unsigned fRS : 1;
790 unsigned fRPS : 1;
791 unsigned fDEXT : 1;
792 unsigned fVLE : 1;
793 unsigned fIDE : 1;
794 } cmd;
795 struct TDLDw3_st
796 {
797 /* STA field */
798 unsigned fDD : 1;
799 unsigned fEC : 1;
800 unsigned fLC : 1;
801 unsigned fTURSV : 1;
802 /* RSV field */
803 unsigned u4RSV : 4;
804 /* CSS field */
805 unsigned u8CSS : 8;
806 /* Special field*/
807 unsigned u16Special: 16;
808 } dw3;
809};
810
811/**
812 * TCP/IP Context Transmit Descriptor, section 3.3.6.
813 */
814struct E1kTDContext
815{
816 struct CheckSum_st
817 {
818 /** TSE: Header start. !TSE: Checksum start. */
819 unsigned u8CSS : 8;
820 /** Checksum offset - where to store it. */
821 unsigned u8CSO : 8;
822 /** Checksum ending (inclusive) offset, 0 = end of packet. */
823 unsigned u16CSE : 16;
824 } ip;
825 struct CheckSum_st tu;
826 struct TDCDw2_st
827 {
828 /** TSE: The total number of payload bytes for this context. Sans header. */
829 unsigned u20PAYLEN : 20;
830 /** The descriptor type - E1K_DTYP_CONTEXT (0). */
831 unsigned u4DTYP : 4;
832 /** TUCMD field, 8 bits
833 * @{ */
834 /** TSE: TCP (set) or UDP (clear). */
835 unsigned fTCP : 1;
836 /** TSE: IPv4 (set) or IPv6 (clear) - for finding the payload length field in
837 * the IP header. Does not affect the checksumming.
838 * @remarks 82544GC/EI interprets a cleared field differently. */
839 unsigned fIP : 1;
840 /** TSE: TCP segmentation enable. When clear the context describes */
841 unsigned fTSE : 1;
842 /** Report status (only applies to dw3.fDD for here). */
843 unsigned fRS : 1;
844 /** Reserved, MBZ. */
845 unsigned fRSV1 : 1;
846 /** Descriptor extension, must be set for this descriptor type. */
847 unsigned fDEXT : 1;
848 /** Reserved, MBZ. */
849 unsigned fRSV2 : 1;
850 /** Interrupt delay enable. */
851 unsigned fIDE : 1;
852 /** @} */
853 } dw2;
854 struct TDCDw3_st
855 {
856 /** Descriptor Done. */
857 unsigned fDD : 1;
858 /** Reserved, MBZ. */
859 unsigned u7RSV : 7;
860 /** TSO: The header (prototype) length (Ethernet[, VLAN tag], IP, TCP/UDP. */
861 unsigned u8HDRLEN : 8;
862 /** TSO: Maximum segment size. */
863 unsigned u16MSS : 16;
864 } dw3;
865};
866typedef struct E1kTDContext E1KTXCTX;
867
868/**
869 * TCP/IP Data Transmit Descriptor, section 3.3.7.
870 */
871struct E1kTDData
872{
873 uint64_t u64BufAddr; /**< Address of data buffer */
874 struct TDDCmd_st
875 {
876 /** The total length of data pointed to by this descriptor. */
877 unsigned u20DTALEN : 20;
878 /** The descriptor type - E1K_DTYP_DATA (1). */
879 unsigned u4DTYP : 4;
880 /** @name DCMD field, 8 bits (3.3.7.1).
881 * @{ */
882 /** End of packet. Note TSCTFC update. */
883 unsigned fEOP : 1;
884 /** Insert Ethernet FCS/CRC (requires fEOP to be set). */
885 unsigned fIFCS : 1;
886 /** Use the TSE context when set and the normal when clear. */
887 unsigned fTSE : 1;
888 /** Report status (dw3.STA). */
889 unsigned fRS : 1;
890 /** Reserved. 82544GC/EI defines this report packet set (RPS). */
891 unsigned fRPS : 1;
892 /** Descriptor extension, must be set for this descriptor type. */
893 unsigned fDEXT : 1;
894 /** VLAN enable, requires CTRL.VME, auto enables FCS/CRC.
895 * Insert dw3.SPECIAL after ethernet header. */
896 unsigned fVLE : 1;
897 /** Interrupt delay enable. */
898 unsigned fIDE : 1;
899 /** @} */
900 } cmd;
901 struct TDDDw3_st
902 {
903 /** @name STA field (3.3.7.2)
904 * @{ */
905 unsigned fDD : 1; /**< Descriptor done. */
906 unsigned fEC : 1; /**< Excess collision. */
907 unsigned fLC : 1; /**< Late collision. */
908 /** Reserved, except for the usual oddball (82544GC/EI) where it's called TU. */
909 unsigned fTURSV : 1;
910 /** @} */
911 unsigned u4RSV : 4; /**< Reserved field, MBZ. */
912 /** @name POPTS (Packet Option) field (3.3.7.3)
913 * @{ */
914 unsigned fIXSM : 1; /**< Insert IP checksum. */
915 unsigned fTXSM : 1; /**< Insert TCP/UDP checksum. */
916 unsigned u6RSV : 6; /**< Reserved, MBZ. */
917 /** @} */
918 /** @name SPECIAL field - VLAN tag to be inserted after ethernet header.
919 * Requires fEOP, fVLE and CTRL.VME to be set.
920 * @{ */
921 unsigned u16Special: 16; /**< VLAN: Id, Canonical form, Priority. */
922 /** @} */
923 } dw3;
924};
925typedef struct E1kTDData E1KTXDAT;
926
927union E1kTxDesc
928{
929 struct E1kTDLegacy legacy;
930 struct E1kTDContext context;
931 struct E1kTDData data;
932};
933typedef union E1kTxDesc E1KTXDESC;
934AssertCompileSize(E1KTXDESC, 16);
935
936#define RA_CTL_AS 0x0003
937#define RA_CTL_AV 0x8000
938
939union E1kRecAddr
940{
941 uint32_t au32[32];
942 struct RAArray
943 {
944 uint8_t addr[6];
945 uint16_t ctl;
946 } array[16];
947};
948typedef struct E1kRecAddr::RAArray E1KRAELEM;
949typedef union E1kRecAddr E1KRA;
950AssertCompileSize(E1KRA, 8*16);
951
952#define E1K_IP_RF UINT16_C(0x8000) /**< reserved fragment flag */
953#define E1K_IP_DF UINT16_C(0x4000) /**< dont fragment flag */
954#define E1K_IP_MF UINT16_C(0x2000) /**< more fragments flag */
955#define E1K_IP_OFFMASK UINT16_C(0x1fff) /**< mask for fragmenting bits */
956
957/** @todo use+extend RTNETIPV4 */
958struct E1kIpHeader
959{
960 /* type of service / version / header length */
961 uint16_t tos_ver_hl;
962 /* total length */
963 uint16_t total_len;
964 /* identification */
965 uint16_t ident;
966 /* fragment offset field */
967 uint16_t offset;
968 /* time to live / protocol*/
969 uint16_t ttl_proto;
970 /* checksum */
971 uint16_t chksum;
972 /* source IP address */
973 uint32_t src;
974 /* destination IP address */
975 uint32_t dest;
976};
977AssertCompileSize(struct E1kIpHeader, 20);
978
979#define E1K_TCP_FIN UINT16_C(0x01)
980#define E1K_TCP_SYN UINT16_C(0x02)
981#define E1K_TCP_RST UINT16_C(0x04)
982#define E1K_TCP_PSH UINT16_C(0x08)
983#define E1K_TCP_ACK UINT16_C(0x10)
984#define E1K_TCP_URG UINT16_C(0x20)
985#define E1K_TCP_ECE UINT16_C(0x40)
986#define E1K_TCP_CWR UINT16_C(0x80)
987#define E1K_TCP_FLAGS UINT16_C(0x3f)
988
989/** @todo use+extend RTNETTCP */
990struct E1kTcpHeader
991{
992 uint16_t src;
993 uint16_t dest;
994 uint32_t seqno;
995 uint32_t ackno;
996 uint16_t hdrlen_flags;
997 uint16_t wnd;
998 uint16_t chksum;
999 uint16_t urgp;
1000};
1001AssertCompileSize(struct E1kTcpHeader, 20);
1002
1003
1004#ifdef E1K_WITH_TXD_CACHE
1005/** The current Saved state version. */
1006# define E1K_SAVEDSTATE_VERSION 4
1007/** Saved state version for VirtualBox 4.2 with VLAN tag fields. */
1008# define E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG 3
1009#else /* !E1K_WITH_TXD_CACHE */
1010/** The current Saved state version. */
1011# define E1K_SAVEDSTATE_VERSION 3
1012#endif /* !E1K_WITH_TXD_CACHE */
1013/** Saved state version for VirtualBox 4.1 and earlier.
1014 * These did not include VLAN tag fields. */
1015#define E1K_SAVEDSTATE_VERSION_VBOX_41 2
1016/** Saved state version for VirtualBox 3.0 and earlier.
1017 * This did not include the configuration part nor the E1kEEPROM. */
1018#define E1K_SAVEDSTATE_VERSION_VBOX_30 1
1019
1020/**
1021 * Device state structure.
1022 *
1023 * Holds the current state of device.
1024 *
1025 * @implements PDMINETWORKDOWN
1026 * @implements PDMINETWORKCONFIG
1027 * @implements PDMILEDPORTS
1028 */
1029struct E1kState_st
1030{
1031 char szPrf[8]; /**< Log prefix, e.g. E1000#1. */
1032 PDMIBASE IBase;
1033 PDMINETWORKDOWN INetworkDown;
1034 PDMINETWORKCONFIG INetworkConfig;
1035 PDMILEDPORTS ILeds; /**< LED interface */
1036 R3PTRTYPE(PPDMIBASE) pDrvBase; /**< Attached network driver. */
1037 R3PTRTYPE(PPDMILEDCONNECTORS) pLedsConnector;
1038
1039 PPDMDEVINSR3 pDevInsR3; /**< Device instance - R3. */
1040 R3PTRTYPE(PPDMQUEUE) pTxQueueR3; /**< Transmit queue - R3. */
1041 R3PTRTYPE(PPDMQUEUE) pCanRxQueueR3; /**< Rx wakeup signaller - R3. */
1042 PPDMINETWORKUPR3 pDrvR3; /**< Attached network driver - R3. */
1043 PTMTIMERR3 pRIDTimerR3; /**< Receive Interrupt Delay Timer - R3. */
1044 PTMTIMERR3 pRADTimerR3; /**< Receive Absolute Delay Timer - R3. */
1045 PTMTIMERR3 pTIDTimerR3; /**< Transmit Interrupt Delay Timer - R3. */
1046 PTMTIMERR3 pTADTimerR3; /**< Transmit Absolute Delay Timer - R3. */
1047 PTMTIMERR3 pTXDTimerR3; /**< Transmit Delay Timer - R3. */
1048 PTMTIMERR3 pIntTimerR3; /**< Late Interrupt Timer - R3. */
1049 PTMTIMERR3 pLUTimerR3; /**< Link Up(/Restore) Timer. */
1050 /** The scatter / gather buffer used for the current outgoing packet - R3. */
1051 R3PTRTYPE(PPDMSCATTERGATHER) pTxSgR3;
1052
1053 PPDMDEVINSR0 pDevInsR0; /**< Device instance - R0. */
1054 R0PTRTYPE(PPDMQUEUE) pTxQueueR0; /**< Transmit queue - R0. */
1055 R0PTRTYPE(PPDMQUEUE) pCanRxQueueR0; /**< Rx wakeup signaller - R0. */
1056 PPDMINETWORKUPR0 pDrvR0; /**< Attached network driver - R0. */
1057 PTMTIMERR0 pRIDTimerR0; /**< Receive Interrupt Delay Timer - R0. */
1058 PTMTIMERR0 pRADTimerR0; /**< Receive Absolute Delay Timer - R0. */
1059 PTMTIMERR0 pTIDTimerR0; /**< Transmit Interrupt Delay Timer - R0. */
1060 PTMTIMERR0 pTADTimerR0; /**< Transmit Absolute Delay Timer - R0. */
1061 PTMTIMERR0 pTXDTimerR0; /**< Transmit Delay Timer - R0. */
1062 PTMTIMERR0 pIntTimerR0; /**< Late Interrupt Timer - R0. */
1063 PTMTIMERR0 pLUTimerR0; /**< Link Up(/Restore) Timer - R0. */
1064 /** The scatter / gather buffer used for the current outgoing packet - R0. */
1065 R0PTRTYPE(PPDMSCATTERGATHER) pTxSgR0;
1066
1067 PPDMDEVINSRC pDevInsRC; /**< Device instance - RC. */
1068 RCPTRTYPE(PPDMQUEUE) pTxQueueRC; /**< Transmit queue - RC. */
1069 RCPTRTYPE(PPDMQUEUE) pCanRxQueueRC; /**< Rx wakeup signaller - RC. */
1070 PPDMINETWORKUPRC pDrvRC; /**< Attached network driver - RC. */
1071 PTMTIMERRC pRIDTimerRC; /**< Receive Interrupt Delay Timer - RC. */
1072 PTMTIMERRC pRADTimerRC; /**< Receive Absolute Delay Timer - RC. */
1073 PTMTIMERRC pTIDTimerRC; /**< Transmit Interrupt Delay Timer - RC. */
1074 PTMTIMERRC pTADTimerRC; /**< Transmit Absolute Delay Timer - RC. */
1075 PTMTIMERRC pTXDTimerRC; /**< Transmit Delay Timer - RC. */
1076 PTMTIMERRC pIntTimerRC; /**< Late Interrupt Timer - RC. */
1077 PTMTIMERRC pLUTimerRC; /**< Link Up(/Restore) Timer - RC. */
1078 /** The scatter / gather buffer used for the current outgoing packet - RC. */
1079 RCPTRTYPE(PPDMSCATTERGATHER) pTxSgRC;
1080 RTRCPTR RCPtrAlignment;
1081
1082#if HC_ARCH_BITS != 32
1083 uint32_t Alignment1;
1084#endif
1085 PDMCRITSECT cs; /**< Critical section - what is it protecting? */
1086 PDMCRITSECT csRx; /**< RX Critical section. */
1087#ifdef E1K_WITH_TX_CS
1088 PDMCRITSECT csTx; /**< TX Critical section. */
1089#endif /* E1K_WITH_TX_CS */
1090 /** Base address of memory-mapped registers. */
1091 RTGCPHYS addrMMReg;
1092 /** MAC address obtained from the configuration. */
1093 RTMAC macConfigured;
1094 /** Base port of I/O space region. */
1095 RTIOPORT IOPortBase;
1096 /** EMT: */
1097 PDMPCIDEV pciDevice;
1098 /** EMT: Last time the interrupt was acknowledged. */
1099 uint64_t u64AckedAt;
1100 /** All: Used for eliminating spurious interrupts. */
1101 bool fIntRaised;
1102 /** EMT: false if the cable is disconnected by the GUI. */
1103 bool fCableConnected;
1104 /** EMT: */
1105 bool fR0Enabled;
1106 /** EMT: */
1107 bool fRCEnabled;
1108 /** EMT: Compute Ethernet CRC for RX packets. */
1109 bool fEthernetCRC;
1110 /** All: throttle interrupts. */
1111 bool fItrEnabled;
1112 /** All: throttle RX interrupts. */
1113 bool fItrRxEnabled;
1114 /** All: Delay TX interrupts using TIDV/TADV. */
1115 bool fTidEnabled;
1116 /** Link up delay (in milliseconds). */
1117 uint32_t cMsLinkUpDelay;
1118
1119 /** All: Device register storage. */
1120 uint32_t auRegs[E1K_NUM_OF_32BIT_REGS];
1121 /** TX/RX: Status LED. */
1122 PDMLED led;
1123 /** TX/RX: Number of packet being sent/received to show in debug log. */
1124 uint32_t u32PktNo;
1125
1126 /** EMT: Offset of the register to be read via IO. */
1127 uint32_t uSelectedReg;
1128 /** EMT: Multicast Table Array. */
1129 uint32_t auMTA[128];
1130 /** EMT: Receive Address registers. */
1131 E1KRA aRecAddr;
1132 /** EMT: VLAN filter table array. */
1133 uint32_t auVFTA[128];
1134 /** EMT: Receive buffer size. */
1135 uint16_t u16RxBSize;
1136 /** EMT: Locked state -- no state alteration possible. */
1137 bool fLocked;
1138 /** EMT: */
1139 bool fDelayInts;
1140 /** All: */
1141 bool fIntMaskUsed;
1142
1143 /** N/A: */
1144 bool volatile fMaybeOutOfSpace;
1145 /** EMT: Gets signalled when more RX descriptors become available. */
1146 RTSEMEVENT hEventMoreRxDescAvail;
1147#ifdef E1K_WITH_RXD_CACHE
1148 /** RX: Fetched RX descriptors. */
1149 E1KRXDESC aRxDescriptors[E1K_RXD_CACHE_SIZE];
1150 //uint64_t aRxDescAddr[E1K_RXD_CACHE_SIZE];
1151 /** RX: Actual number of fetched RX descriptors. */
1152 uint32_t nRxDFetched;
1153 /** RX: Index in cache of RX descriptor being processed. */
1154 uint32_t iRxDCurrent;
1155#endif /* E1K_WITH_RXD_CACHE */
1156
1157 /** TX: Context used for TCP segmentation packets. */
1158 E1KTXCTX contextTSE;
1159 /** TX: Context used for ordinary packets. */
1160 E1KTXCTX contextNormal;
1161#ifdef E1K_WITH_TXD_CACHE
1162 /** TX: Fetched TX descriptors. */
1163 E1KTXDESC aTxDescriptors[E1K_TXD_CACHE_SIZE];
1164 /** TX: Actual number of fetched TX descriptors. */
1165 uint8_t nTxDFetched;
1166 /** TX: Index in cache of TX descriptor being processed. */
1167 uint8_t iTxDCurrent;
1168 /** TX: Will this frame be sent as GSO. */
1169 bool fGSO;
1170 /** Alignment padding. */
1171 bool fReserved;
1172 /** TX: Number of bytes in next packet. */
1173 uint32_t cbTxAlloc;
1174
1175#endif /* E1K_WITH_TXD_CACHE */
1176 /** GSO context. u8Type is set to PDMNETWORKGSOTYPE_INVALID when not
1177 * applicable to the current TSE mode. */
1178 PDMNETWORKGSO GsoCtx;
1179 /** Scratch space for holding the loopback / fallback scatter / gather
1180 * descriptor. */
1181 union
1182 {
1183 PDMSCATTERGATHER Sg;
1184 uint8_t padding[8 * sizeof(RTUINTPTR)];
1185 } uTxFallback;
1186 /** TX: Transmit packet buffer use for TSE fallback and loopback. */
1187 uint8_t aTxPacketFallback[E1K_MAX_TX_PKT_SIZE];
1188 /** TX: Number of bytes assembled in TX packet buffer. */
1189 uint16_t u16TxPktLen;
1190 /** TX: False will force segmentation in e1000 instead of sending frames as GSO. */
1191 bool fGSOEnabled;
1192 /** TX: IP checksum has to be inserted if true. */
1193 bool fIPcsum;
1194 /** TX: TCP/UDP checksum has to be inserted if true. */
1195 bool fTCPcsum;
1196 /** TX: VLAN tag has to be inserted if true. */
1197 bool fVTag;
1198 /** TX: TCI part of VLAN tag to be inserted. */
1199 uint16_t u16VTagTCI;
1200 /** TX TSE fallback: Number of payload bytes remaining in TSE context. */
1201 uint32_t u32PayRemain;
1202 /** TX TSE fallback: Number of header bytes remaining in TSE context. */
1203 uint16_t u16HdrRemain;
1204 /** TX TSE fallback: Flags from template header. */
1205 uint16_t u16SavedFlags;
1206 /** TX TSE fallback: Partial checksum from template header. */
1207 uint32_t u32SavedCsum;
1208 /** ?: Emulated controller type. */
1209 E1KCHIP eChip;
1210
1211 /** EMT: EEPROM emulation */
1212 E1kEEPROM eeprom;
1213 /** EMT: Physical interface emulation. */
1214 PHY phy;
1215
1216#if 0
1217 /** Alignment padding. */
1218 uint8_t Alignment[HC_ARCH_BITS == 64 ? 8 : 4];
1219#endif
1220
1221 STAMCOUNTER StatReceiveBytes;
1222 STAMCOUNTER StatTransmitBytes;
1223#if defined(VBOX_WITH_STATISTICS)
1224 STAMPROFILEADV StatMMIOReadRZ;
1225 STAMPROFILEADV StatMMIOReadR3;
1226 STAMPROFILEADV StatMMIOWriteRZ;
1227 STAMPROFILEADV StatMMIOWriteR3;
1228 STAMPROFILEADV StatEEPROMRead;
1229 STAMPROFILEADV StatEEPROMWrite;
1230 STAMPROFILEADV StatIOReadRZ;
1231 STAMPROFILEADV StatIOReadR3;
1232 STAMPROFILEADV StatIOWriteRZ;
1233 STAMPROFILEADV StatIOWriteR3;
1234 STAMPROFILEADV StatLateIntTimer;
1235 STAMCOUNTER StatLateInts;
1236 STAMCOUNTER StatIntsRaised;
1237 STAMCOUNTER StatIntsPrevented;
1238 STAMPROFILEADV StatReceive;
1239 STAMPROFILEADV StatReceiveCRC;
1240 STAMPROFILEADV StatReceiveFilter;
1241 STAMPROFILEADV StatReceiveStore;
1242 STAMPROFILEADV StatTransmitRZ;
1243 STAMPROFILEADV StatTransmitR3;
1244 STAMPROFILE StatTransmitSendRZ;
1245 STAMPROFILE StatTransmitSendR3;
1246 STAMPROFILE StatRxOverflow;
1247 STAMCOUNTER StatRxOverflowWakeup;
1248 STAMCOUNTER StatTxDescCtxNormal;
1249 STAMCOUNTER StatTxDescCtxTSE;
1250 STAMCOUNTER StatTxDescLegacy;
1251 STAMCOUNTER StatTxDescData;
1252 STAMCOUNTER StatTxDescTSEData;
1253 STAMCOUNTER StatTxPathFallback;
1254 STAMCOUNTER StatTxPathGSO;
1255 STAMCOUNTER StatTxPathRegular;
1256 STAMCOUNTER StatPHYAccesses;
1257 STAMCOUNTER aStatRegWrites[E1K_NUM_OF_REGS];
1258 STAMCOUNTER aStatRegReads[E1K_NUM_OF_REGS];
1259#endif /* VBOX_WITH_STATISTICS */
1260
1261#ifdef E1K_INT_STATS
1262 /* Internal stats */
1263 uint64_t u64ArmedAt;
1264 uint64_t uStatMaxTxDelay;
1265 uint32_t uStatInt;
1266 uint32_t uStatIntTry;
1267 uint32_t uStatIntLower;
1268 uint32_t uStatNoIntICR;
1269 int32_t iStatIntLost;
1270 int32_t iStatIntLostOne;
1271 uint32_t uStatIntIMS;
1272 uint32_t uStatIntSkip;
1273 uint32_t uStatIntLate;
1274 uint32_t uStatIntMasked;
1275 uint32_t uStatIntEarly;
1276 uint32_t uStatIntRx;
1277 uint32_t uStatIntTx;
1278 uint32_t uStatIntICS;
1279 uint32_t uStatIntRDTR;
1280 uint32_t uStatIntRXDMT0;
1281 uint32_t uStatIntTXQE;
1282 uint32_t uStatTxNoRS;
1283 uint32_t uStatTxIDE;
1284 uint32_t uStatTxDelayed;
1285 uint32_t uStatTxDelayExp;
1286 uint32_t uStatTAD;
1287 uint32_t uStatTID;
1288 uint32_t uStatRAD;
1289 uint32_t uStatRID;
1290 uint32_t uStatRxFrm;
1291 uint32_t uStatTxFrm;
1292 uint32_t uStatDescCtx;
1293 uint32_t uStatDescDat;
1294 uint32_t uStatDescLeg;
1295 uint32_t uStatTx1514;
1296 uint32_t uStatTx2962;
1297 uint32_t uStatTx4410;
1298 uint32_t uStatTx5858;
1299 uint32_t uStatTx7306;
1300 uint32_t uStatTx8754;
1301 uint32_t uStatTx16384;
1302 uint32_t uStatTx32768;
1303 uint32_t uStatTxLarge;
1304 uint32_t uStatAlign;
1305#endif /* E1K_INT_STATS */
1306};
1307typedef struct E1kState_st E1KSTATE;
1308/** Pointer to the E1000 device state. */
1309typedef E1KSTATE *PE1KSTATE;
1310
1311#ifndef VBOX_DEVICE_STRUCT_TESTCASE
1312
1313/* Forward declarations ******************************************************/
1314static int e1kXmitPending(PE1KSTATE pThis, bool fOnWorkerThread);
1315
1316static int e1kRegReadUnimplemented (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1317static int e1kRegWriteUnimplemented(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1318static int e1kRegReadAutoClear (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1319static int e1kRegReadDefault (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1320static int e1kRegWriteDefault (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1321#if 0 /* unused */
1322static int e1kRegReadCTRL (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1323#endif
1324static int e1kRegWriteCTRL (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1325static int e1kRegReadEECD (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1326static int e1kRegWriteEECD (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1327static int e1kRegWriteEERD (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1328static int e1kRegWriteMDIC (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1329static int e1kRegReadICR (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1330static int e1kRegWriteICR (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1331static int e1kRegWriteICS (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1332static int e1kRegWriteIMS (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1333static int e1kRegWriteIMC (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1334static int e1kRegWriteRCTL (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1335static int e1kRegWritePBA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1336static int e1kRegWriteRDT (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1337static int e1kRegWriteRDTR (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1338static int e1kRegWriteTDT (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1339static int e1kRegReadMTA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1340static int e1kRegWriteMTA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1341static int e1kRegReadRA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1342static int e1kRegWriteRA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1343static int e1kRegReadVFTA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1344static int e1kRegWriteVFTA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1345
1346/**
1347 * Register map table.
1348 *
1349 * Override pfnRead and pfnWrite to get register-specific behavior.
1350 */
1351static const struct E1kRegMap_st
1352{
1353 /** Register offset in the register space. */
1354 uint32_t offset;
1355 /** Size in bytes. Registers of size > 4 are in fact tables. */
1356 uint32_t size;
1357 /** Readable bits. */
1358 uint32_t readable;
1359 /** Writable bits. */
1360 uint32_t writable;
1361 /** Read callback. */
1362 int (*pfnRead)(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1363 /** Write callback. */
1364 int (*pfnWrite)(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1365 /** Abbreviated name. */
1366 const char *abbrev;
1367 /** Full name. */
1368 const char *name;
1369} g_aE1kRegMap[E1K_NUM_OF_REGS] =
1370{
1371 /* offset size read mask write mask read callback write callback abbrev full name */
1372 /*------- ------- ---------- ---------- ----------------------- ------------------------ ---------- ------------------------------*/
1373 { 0x00000, 0x00004, 0xDBF31BE9, 0xDBF31BE9, e1kRegReadDefault , e1kRegWriteCTRL , "CTRL" , "Device Control" },
1374 { 0x00008, 0x00004, 0x0000FDFF, 0x00000000, e1kRegReadDefault , e1kRegWriteUnimplemented, "STATUS" , "Device Status" },
1375 { 0x00010, 0x00004, 0x000027F0, 0x00000070, e1kRegReadEECD , e1kRegWriteEECD , "EECD" , "EEPROM/Flash Control/Data" },
1376 { 0x00014, 0x00004, 0xFFFFFF10, 0xFFFFFF00, e1kRegReadDefault , e1kRegWriteEERD , "EERD" , "EEPROM Read" },
1377 { 0x00018, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CTRL_EXT", "Extended Device Control" },
1378 { 0x0001c, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FLA" , "Flash Access (N/A)" },
1379 { 0x00020, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteMDIC , "MDIC" , "MDI Control" },
1380 { 0x00028, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCAL" , "Flow Control Address Low" },
1381 { 0x0002c, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCAH" , "Flow Control Address High" },
1382 { 0x00030, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCT" , "Flow Control Type" },
1383 { 0x00038, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "VET" , "VLAN EtherType" },
1384 { 0x000c0, 0x00004, 0x0001F6DF, 0x0001F6DF, e1kRegReadICR , e1kRegWriteICR , "ICR" , "Interrupt Cause Read" },
1385 { 0x000c4, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "ITR" , "Interrupt Throttling" },
1386 { 0x000c8, 0x00004, 0x00000000, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteICS , "ICS" , "Interrupt Cause Set" },
1387 { 0x000d0, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteIMS , "IMS" , "Interrupt Mask Set/Read" },
1388 { 0x000d8, 0x00004, 0x00000000, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteIMC , "IMC" , "Interrupt Mask Clear" },
1389 { 0x00100, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteRCTL , "RCTL" , "Receive Control" },
1390 { 0x00170, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCTTV" , "Flow Control Transmit Timer Value" },
1391 { 0x00178, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TXCW" , "Transmit Configuration Word (N/A)" },
1392 { 0x00180, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXCW" , "Receive Configuration Word (N/A)" },
1393 { 0x00400, 0x00004, 0x017FFFFA, 0x017FFFFA, e1kRegReadDefault , e1kRegWriteDefault , "TCTL" , "Transmit Control" },
1394 { 0x00410, 0x00004, 0x3FFFFFFF, 0x3FFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TIPG" , "Transmit IPG" },
1395 { 0x00458, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "AIFS" , "Adaptive IFS Throttle - AIT" },
1396 { 0x00e00, 0x00004, 0xCFCFCFCF, 0xCFCFCFCF, e1kRegReadDefault , e1kRegWriteDefault , "LEDCTL" , "LED Control" },
1397 { 0x01000, 0x00004, 0xFFFF007F, 0x0000007F, e1kRegReadDefault , e1kRegWritePBA , "PBA" , "Packet Buffer Allocation" },
1398 { 0x02160, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRTL" , "Flow Control Receive Threshold Low" },
1399 { 0x02168, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRTH" , "Flow Control Receive Threshold High" },
1400 { 0x02410, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFH" , "Receive Data FIFO Head" },
1401 { 0x02418, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFT" , "Receive Data FIFO Tail" },
1402 { 0x02420, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFHS" , "Receive Data FIFO Head Saved Register" },
1403 { 0x02428, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFTS" , "Receive Data FIFO Tail Saved Register" },
1404 { 0x02430, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFPC" , "Receive Data FIFO Packet Count" },
1405 { 0x02800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDBAL" , "Receive Descriptor Base Low" },
1406 { 0x02804, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDBAH" , "Receive Descriptor Base High" },
1407 { 0x02808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDLEN" , "Receive Descriptor Length" },
1408 { 0x02810, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDH" , "Receive Descriptor Head" },
1409 { 0x02818, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteRDT , "RDT" , "Receive Descriptor Tail" },
1410 { 0x02820, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteRDTR , "RDTR" , "Receive Delay Timer" },
1411 { 0x02828, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXDCTL" , "Receive Descriptor Control" },
1412 { 0x0282c, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "RADV" , "Receive Interrupt Absolute Delay Timer" },
1413 { 0x02c00, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RSRPD" , "Receive Small Packet Detect Interrupt" },
1414 { 0x03000, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TXDMAC" , "TX DMA Control (N/A)" },
1415 { 0x03410, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFH" , "Transmit Data FIFO Head" },
1416 { 0x03418, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFT" , "Transmit Data FIFO Tail" },
1417 { 0x03420, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFHS" , "Transmit Data FIFO Head Saved Register" },
1418 { 0x03428, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFTS" , "Transmit Data FIFO Tail Saved Register" },
1419 { 0x03430, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFPC" , "Transmit Data FIFO Packet Count" },
1420 { 0x03800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDBAL" , "Transmit Descriptor Base Low" },
1421 { 0x03804, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDBAH" , "Transmit Descriptor Base High" },
1422 { 0x03808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDLEN" , "Transmit Descriptor Length" },
1423 { 0x03810, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDH" , "Transmit Descriptor Head" },
1424 { 0x03818, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteTDT , "TDT" , "Transmit Descriptor Tail" },
1425 { 0x03820, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TIDV" , "Transmit Interrupt Delay Value" },
1426 { 0x03828, 0x00004, 0xFF3F3F3F, 0xFF3F3F3F, e1kRegReadDefault , e1kRegWriteDefault , "TXDCTL" , "Transmit Descriptor Control" },
1427 { 0x0382c, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TADV" , "Transmit Absolute Interrupt Delay Timer" },
1428 { 0x03830, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TSPMT" , "TCP Segmentation Pad and Threshold" },
1429 { 0x04000, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CRCERRS" , "CRC Error Count" },
1430 { 0x04004, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "ALGNERRC", "Alignment Error Count" },
1431 { 0x04008, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SYMERRS" , "Symbol Error Count" },
1432 { 0x0400c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXERRC" , "RX Error Count" },
1433 { 0x04010, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MPC" , "Missed Packets Count" },
1434 { 0x04014, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SCC" , "Single Collision Count" },
1435 { 0x04018, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "ECOL" , "Excessive Collisions Count" },
1436 { 0x0401c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MCC" , "Multiple Collision Count" },
1437 { 0x04020, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "LATECOL" , "Late Collisions Count" },
1438 { 0x04028, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "COLC" , "Collision Count" },
1439 { 0x04030, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "DC" , "Defer Count" },
1440 { 0x04034, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TNCRS" , "Transmit - No CRS" },
1441 { 0x04038, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SEC" , "Sequence Error Count" },
1442 { 0x0403c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CEXTERR" , "Carrier Extension Error Count" },
1443 { 0x04040, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RLEC" , "Receive Length Error Count" },
1444 { 0x04048, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XONRXC" , "XON Received Count" },
1445 { 0x0404c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XONTXC" , "XON Transmitted Count" },
1446 { 0x04050, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XOFFRXC" , "XOFF Received Count" },
1447 { 0x04054, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XOFFTXC" , "XOFF Transmitted Count" },
1448 { 0x04058, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRUC" , "FC Received Unsupported Count" },
1449 { 0x0405c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC64" , "Packets Received (64 Bytes) Count" },
1450 { 0x04060, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC127" , "Packets Received (65-127 Bytes) Count" },
1451 { 0x04064, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC255" , "Packets Received (128-255 Bytes) Count" },
1452 { 0x04068, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC511" , "Packets Received (256-511 Bytes) Count" },
1453 { 0x0406c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC1023" , "Packets Received (512-1023 Bytes) Count" },
1454 { 0x04070, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC1522" , "Packets Received (1024-Max Bytes)" },
1455 { 0x04074, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GPRC" , "Good Packets Received Count" },
1456 { 0x04078, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "BPRC" , "Broadcast Packets Received Count" },
1457 { 0x0407c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "MPRC" , "Multicast Packets Received Count" },
1458 { 0x04080, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GPTC" , "Good Packets Transmitted Count" },
1459 { 0x04088, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GORCL" , "Good Octets Received Count (Low)" },
1460 { 0x0408c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GORCH" , "Good Octets Received Count (Hi)" },
1461 { 0x04090, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GOTCL" , "Good Octets Transmitted Count (Low)" },
1462 { 0x04094, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GOTCH" , "Good Octets Transmitted Count (Hi)" },
1463 { 0x040a0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RNBC" , "Receive No Buffers Count" },
1464 { 0x040a4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RUC" , "Receive Undersize Count" },
1465 { 0x040a8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RFC" , "Receive Fragment Count" },
1466 { 0x040ac, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "ROC" , "Receive Oversize Count" },
1467 { 0x040b0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RJC" , "Receive Jabber Count" },
1468 { 0x040b4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPRC" , "Management Packets Received Count" },
1469 { 0x040b8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPDC" , "Management Packets Dropped Count" },
1470 { 0x040bc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPTC" , "Management Pkts Transmitted Count" },
1471 { 0x040c0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TORL" , "Total Octets Received (Lo)" },
1472 { 0x040c4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TORH" , "Total Octets Received (Hi)" },
1473 { 0x040c8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TOTL" , "Total Octets Transmitted (Lo)" },
1474 { 0x040cc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TOTH" , "Total Octets Transmitted (Hi)" },
1475 { 0x040d0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TPR" , "Total Packets Received" },
1476 { 0x040d4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TPT" , "Total Packets Transmitted" },
1477 { 0x040d8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC64" , "Packets Transmitted (64 Bytes) Count" },
1478 { 0x040dc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC127" , "Packets Transmitted (65-127 Bytes) Count" },
1479 { 0x040e0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC255" , "Packets Transmitted (128-255 Bytes) Count" },
1480 { 0x040e4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC511" , "Packets Transmitted (256-511 Bytes) Count" },
1481 { 0x040e8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC1023" , "Packets Transmitted (512-1023 Bytes) Count" },
1482 { 0x040ec, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC1522" , "Packets Transmitted (1024 Bytes or Greater) Count" },
1483 { 0x040f0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "MPTC" , "Multicast Packets Transmitted Count" },
1484 { 0x040f4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "BPTC" , "Broadcast Packets Transmitted Count" },
1485 { 0x040f8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TSCTC" , "TCP Segmentation Context Transmitted Count" },
1486 { 0x040fc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TSCTFC" , "TCP Segmentation Context Tx Fail Count" },
1487 { 0x05000, 0x00004, 0x000007FF, 0x000007FF, e1kRegReadDefault , e1kRegWriteDefault , "RXCSUM" , "Receive Checksum Control" },
1488 { 0x05800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUC" , "Wakeup Control" },
1489 { 0x05808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUFC" , "Wakeup Filter Control" },
1490 { 0x05810, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUS" , "Wakeup Status" },
1491 { 0x05820, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "MANC" , "Management Control" },
1492 { 0x05838, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IPAV" , "IP Address Valid" },
1493 { 0x05900, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUPL" , "Wakeup Packet Length" },
1494 { 0x05200, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadMTA , e1kRegWriteMTA , "MTA" , "Multicast Table Array (n)" },
1495 { 0x05400, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadRA , e1kRegWriteRA , "RA" , "Receive Address (64-bit) (n)" },
1496 { 0x05600, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadVFTA , e1kRegWriteVFTA , "VFTA" , "VLAN Filter Table Array (n)" },
1497 { 0x05840, 0x0001c, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IP4AT" , "IPv4 Address Table" },
1498 { 0x05880, 0x00010, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IP6AT" , "IPv6 Address Table" },
1499 { 0x05a00, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUPM" , "Wakeup Packet Memory" },
1500 { 0x05f00, 0x0001c, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFLT" , "Flexible Filter Length Table" },
1501 { 0x09000, 0x003fc, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFMT" , "Flexible Filter Mask Table" },
1502 { 0x09800, 0x003fc, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFVT" , "Flexible Filter Value Table" },
1503 { 0x10000, 0x10000, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "PBM" , "Packet Buffer Memory (n)" },
1504 { 0x00040, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadRA , e1kRegWriteRA , "RA82542" , "Receive Address (64-bit) (n) (82542)" },
1505 { 0x00200, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadMTA , e1kRegWriteMTA , "MTA82542", "Multicast Table Array (n) (82542)" },
1506 { 0x00600, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadVFTA , e1kRegWriteVFTA , "VFTA82542", "VLAN Filter Table Array (n) (82542)" }
1507};
1508
1509#ifdef LOG_ENABLED
1510
1511/**
1512 * Convert U32 value to hex string. Masked bytes are replaced with dots.
1513 *
1514 * @remarks The mask has byte (not bit) granularity (e.g. 000000FF).
1515 *
1516 * @returns The buffer.
1517 *
1518 * @param u32 The word to convert into string.
1519 * @param mask Selects which bytes to convert.
1520 * @param buf Where to put the result.
1521 */
1522static char *e1kU32toHex(uint32_t u32, uint32_t mask, char *buf)
1523{
1524 for (char *ptr = buf + 7; ptr >= buf; --ptr, u32 >>=4, mask >>=4)
1525 {
1526 if (mask & 0xF)
1527 *ptr = (u32 & 0xF) + ((u32 & 0xF) > 9 ? '7' : '0');
1528 else
1529 *ptr = '.';
1530 }
1531 buf[8] = 0;
1532 return buf;
1533}
1534
1535/**
1536 * Returns timer name for debug purposes.
1537 *
1538 * @returns The timer name.
1539 *
1540 * @param pThis The device state structure.
1541 * @param pTimer The timer to get the name for.
1542 */
1543DECLINLINE(const char *) e1kGetTimerName(PE1KSTATE pThis, PTMTIMER pTimer)
1544{
1545 if (pTimer == pThis->CTX_SUFF(pTIDTimer))
1546 return "TID";
1547 if (pTimer == pThis->CTX_SUFF(pTADTimer))
1548 return "TAD";
1549 if (pTimer == pThis->CTX_SUFF(pRIDTimer))
1550 return "RID";
1551 if (pTimer == pThis->CTX_SUFF(pRADTimer))
1552 return "RAD";
1553 if (pTimer == pThis->CTX_SUFF(pIntTimer))
1554 return "Int";
1555 if (pTimer == pThis->CTX_SUFF(pTXDTimer))
1556 return "TXD";
1557 if (pTimer == pThis->CTX_SUFF(pLUTimer))
1558 return "LinkUp";
1559 return "unknown";
1560}
1561
1562#endif /* DEBUG */
1563
1564/**
1565 * Arm a timer.
1566 *
1567 * @param pThis Pointer to the device state structure.
1568 * @param pTimer Pointer to the timer.
1569 * @param uExpireIn Expiration interval in microseconds.
1570 */
1571DECLINLINE(void) e1kArmTimer(PE1KSTATE pThis, PTMTIMER pTimer, uint32_t uExpireIn)
1572{
1573 if (pThis->fLocked)
1574 return;
1575
1576 E1kLog2(("%s Arming %s timer to fire in %d usec...\n",
1577 pThis->szPrf, e1kGetTimerName(pThis, pTimer), uExpireIn));
1578 TMTimerSetMicro(pTimer, uExpireIn);
1579}
1580
1581#ifdef IN_RING3
1582/**
1583 * Cancel a timer.
1584 *
1585 * @param pThis Pointer to the device state structure.
1586 * @param pTimer Pointer to the timer.
1587 */
1588DECLINLINE(void) e1kCancelTimer(PE1KSTATE pThis, PTMTIMER pTimer)
1589{
1590 E1kLog2(("%s Stopping %s timer...\n",
1591 pThis->szPrf, e1kGetTimerName(pThis, pTimer)));
1592 int rc = TMTimerStop(pTimer);
1593 if (RT_FAILURE(rc))
1594 E1kLog2(("%s e1kCancelTimer: TMTimerStop() failed with %Rrc\n",
1595 pThis->szPrf, rc));
1596 RT_NOREF1(pThis);
1597}
1598#endif /* IN_RING3 */
1599
1600#define e1kCsEnter(ps, rc) PDMCritSectEnter(&ps->cs, rc)
1601#define e1kCsLeave(ps) PDMCritSectLeave(&ps->cs)
1602
1603#define e1kCsRxEnter(ps, rc) PDMCritSectEnter(&ps->csRx, rc)
1604#define e1kCsRxLeave(ps) PDMCritSectLeave(&ps->csRx)
1605#define e1kCsRxIsOwner(ps) PDMCritSectIsOwner(&ps->csRx)
1606
1607#ifndef E1K_WITH_TX_CS
1608# define e1kCsTxEnter(ps, rc) VINF_SUCCESS
1609# define e1kCsTxLeave(ps) do { } while (0)
1610#else /* E1K_WITH_TX_CS */
1611# define e1kCsTxEnter(ps, rc) PDMCritSectEnter(&ps->csTx, rc)
1612# define e1kCsTxLeave(ps) PDMCritSectLeave(&ps->csTx)
1613#endif /* E1K_WITH_TX_CS */
1614
1615#ifdef IN_RING3
1616
1617/**
1618 * Wakeup the RX thread.
1619 */
1620static void e1kWakeupReceive(PPDMDEVINS pDevIns)
1621{
1622 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
1623 if ( pThis->fMaybeOutOfSpace
1624 && pThis->hEventMoreRxDescAvail != NIL_RTSEMEVENT)
1625 {
1626 STAM_COUNTER_INC(&pThis->StatRxOverflowWakeup);
1627 E1kLog(("%s Waking up Out-of-RX-space semaphore\n", pThis->szPrf));
1628 RTSemEventSignal(pThis->hEventMoreRxDescAvail);
1629 }
1630}
1631
1632/**
1633 * Hardware reset. Revert all registers to initial values.
1634 *
1635 * @param pThis The device state structure.
1636 */
1637static void e1kHardReset(PE1KSTATE pThis)
1638{
1639 E1kLog(("%s Hard reset triggered\n", pThis->szPrf));
1640 memset(pThis->auRegs, 0, sizeof(pThis->auRegs));
1641 memset(pThis->aRecAddr.au32, 0, sizeof(pThis->aRecAddr.au32));
1642#ifdef E1K_INIT_RA0
1643 memcpy(pThis->aRecAddr.au32, pThis->macConfigured.au8,
1644 sizeof(pThis->macConfigured.au8));
1645 pThis->aRecAddr.array[0].ctl |= RA_CTL_AV;
1646#endif /* E1K_INIT_RA0 */
1647 STATUS = 0x0081; /* SPEED=10b (1000 Mb/s), FD=1b (Full Duplex) */
1648 EECD = 0x0100; /* EE_PRES=1b (EEPROM present) */
1649 CTRL = 0x0a09; /* FRCSPD=1b SPEED=10b LRST=1b FD=1b */
1650 TSPMT = 0x01000400;/* TSMT=0400h TSPBP=0100h */
1651 Assert(GET_BITS(RCTL, BSIZE) == 0);
1652 pThis->u16RxBSize = 2048;
1653
1654 uint16_t u16LedCtl = 0x0602; /* LED0/LINK_UP#, LED2/LINK100# */
1655 pThis->eeprom.readWord(0x2F, &u16LedCtl); /* Read LEDCTL defaults from EEPROM */
1656 LEDCTL = 0x07008300 | (((uint32_t)u16LedCtl & 0xCF00) << 8) | (u16LedCtl & 0xCF); /* Only LED0 and LED2 defaults come from EEPROM */
1657
1658 /* Reset promiscuous mode */
1659 if (pThis->pDrvR3)
1660 pThis->pDrvR3->pfnSetPromiscuousMode(pThis->pDrvR3, false);
1661
1662#ifdef E1K_WITH_TXD_CACHE
1663 int rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
1664 if (RT_LIKELY(rc == VINF_SUCCESS))
1665 {
1666 pThis->nTxDFetched = 0;
1667 pThis->iTxDCurrent = 0;
1668 pThis->fGSO = false;
1669 pThis->cbTxAlloc = 0;
1670 e1kCsTxLeave(pThis);
1671 }
1672#endif /* E1K_WITH_TXD_CACHE */
1673#ifdef E1K_WITH_RXD_CACHE
1674 if (RT_LIKELY(e1kCsRxEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
1675 {
1676 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
1677 e1kCsRxLeave(pThis);
1678 }
1679#endif /* E1K_WITH_RXD_CACHE */
1680#ifdef E1K_LSC_ON_RESET
1681 E1kLog(("%s Will trigger LSC in %d seconds...\n",
1682 pThis->szPrf, pThis->cMsLinkUpDelay / 1000));
1683 e1kArmTimer(pThis, pThis->CTX_SUFF(pLUTimer), pThis->cMsLinkUpDelay * 1000);
1684#endif /* E1K_LSC_ON_RESET */
1685}
1686
1687#endif /* IN_RING3 */
1688
1689/**
1690 * Compute Internet checksum.
1691 *
1692 * @remarks Refer to http://www.netfor2.com/checksum.html for short intro.
1693 *
1694 * @param pThis The device state structure.
1695 * @param cpPacket The packet.
1696 * @param cb The size of the packet.
1697 * @param pszText A string denoting direction of packet transfer.
1698 *
1699 * @return The 1's complement of the 1's complement sum.
1700 *
1701 * @thread E1000_TX
1702 */
1703static uint16_t e1kCSum16(const void *pvBuf, size_t cb)
1704{
1705 uint32_t csum = 0;
1706 uint16_t *pu16 = (uint16_t *)pvBuf;
1707
1708 while (cb > 1)
1709 {
1710 csum += *pu16++;
1711 cb -= 2;
1712 }
1713 if (cb)
1714 csum += *(uint8_t*)pu16;
1715 while (csum >> 16)
1716 csum = (csum >> 16) + (csum & 0xFFFF);
1717 return ~csum;
1718}
1719
1720/**
1721 * Dump a packet to debug log.
1722 *
1723 * @param pThis The device state structure.
1724 * @param cpPacket The packet.
1725 * @param cb The size of the packet.
1726 * @param pszText A string denoting direction of packet transfer.
1727 * @thread E1000_TX
1728 */
1729DECLINLINE(void) e1kPacketDump(PE1KSTATE pThis, const uint8_t *cpPacket, size_t cb, const char *pszText)
1730{
1731#ifdef DEBUG
1732 if (RT_LIKELY(e1kCsEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
1733 {
1734 Log4(("%s --- %s packet #%d: %RTmac => %RTmac (%d bytes) ---\n",
1735 pThis->szPrf, pszText, ++pThis->u32PktNo, cpPacket+6, cpPacket, cb));
1736 if (ntohs(*(uint16_t*)(cpPacket+12)) == 0x86DD)
1737 {
1738 Log4(("%s --- IPv6: %RTnaipv6 => %RTnaipv6\n",
1739 pThis->szPrf, cpPacket+14+8, cpPacket+14+24));
1740 if (*(cpPacket+14+6) == 0x6)
1741 Log4(("%s --- TCP: seq=%x ack=%x\n", pThis->szPrf,
1742 ntohl(*(uint32_t*)(cpPacket+14+40+4)), ntohl(*(uint32_t*)(cpPacket+14+40+8))));
1743 }
1744 else if (ntohs(*(uint16_t*)(cpPacket+12)) == 0x800)
1745 {
1746 Log4(("%s --- IPv4: %RTnaipv4 => %RTnaipv4\n",
1747 pThis->szPrf, *(uint32_t*)(cpPacket+14+12), *(uint32_t*)(cpPacket+14+16)));
1748 if (*(cpPacket+14+6) == 0x6)
1749 Log4(("%s --- TCP: seq=%x ack=%x\n", pThis->szPrf,
1750 ntohl(*(uint32_t*)(cpPacket+14+20+4)), ntohl(*(uint32_t*)(cpPacket+14+20+8))));
1751 }
1752 E1kLog3(("%.*Rhxd\n", cb, cpPacket));
1753 e1kCsLeave(pThis);
1754 }
1755#else
1756 if (RT_LIKELY(e1kCsEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
1757 {
1758 if (ntohs(*(uint16_t*)(cpPacket+12)) == 0x86DD)
1759 E1kLogRel(("E1000: %s packet #%d, %RTmac => %RTmac, %RTnaipv6 => %RTnaipv6, seq=%x ack=%x\n",
1760 pszText, ++pThis->u32PktNo, cpPacket+6, cpPacket, cpPacket+14+8, cpPacket+14+24,
1761 ntohl(*(uint32_t*)(cpPacket+14+40+4)), ntohl(*(uint32_t*)(cpPacket+14+40+8))));
1762 else
1763 E1kLogRel(("E1000: %s packet #%d, %RTmac => %RTmac, %RTnaipv4 => %RTnaipv4, seq=%x ack=%x\n",
1764 pszText, ++pThis->u32PktNo, cpPacket+6, cpPacket,
1765 *(uint32_t*)(cpPacket+14+12), *(uint32_t*)(cpPacket+14+16),
1766 ntohl(*(uint32_t*)(cpPacket+14+20+4)), ntohl(*(uint32_t*)(cpPacket+14+20+8))));
1767 e1kCsLeave(pThis);
1768 }
1769 RT_NOREF2(cb, pszText);
1770#endif
1771}
1772
1773/**
1774 * Determine the type of transmit descriptor.
1775 *
1776 * @returns Descriptor type. See E1K_DTYP_XXX defines.
1777 *
1778 * @param pDesc Pointer to descriptor union.
1779 * @thread E1000_TX
1780 */
1781DECLINLINE(int) e1kGetDescType(E1KTXDESC *pDesc)
1782{
1783 if (pDesc->legacy.cmd.fDEXT)
1784 return pDesc->context.dw2.u4DTYP;
1785 return E1K_DTYP_LEGACY;
1786}
1787
1788
1789#ifdef E1K_WITH_RXD_CACHE
1790/**
1791 * Return the number of RX descriptor that belong to the hardware.
1792 *
1793 * @returns the number of available descriptors in RX ring.
1794 * @param pThis The device state structure.
1795 * @thread ???
1796 */
1797DECLINLINE(uint32_t) e1kGetRxLen(PE1KSTATE pThis)
1798{
1799 /**
1800 * Make sure RDT won't change during computation. EMT may modify RDT at
1801 * any moment.
1802 */
1803 uint32_t rdt = RDT;
1804 return (RDH > rdt ? RDLEN/sizeof(E1KRXDESC) : 0) + rdt - RDH;
1805}
1806
1807DECLINLINE(unsigned) e1kRxDInCache(PE1KSTATE pThis)
1808{
1809 return pThis->nRxDFetched > pThis->iRxDCurrent ?
1810 pThis->nRxDFetched - pThis->iRxDCurrent : 0;
1811}
1812
1813DECLINLINE(unsigned) e1kRxDIsCacheEmpty(PE1KSTATE pThis)
1814{
1815 return pThis->iRxDCurrent >= pThis->nRxDFetched;
1816}
1817
1818/**
1819 * Load receive descriptors from guest memory. The caller needs to be in Rx
1820 * critical section.
1821 *
1822 * We need two physical reads in case the tail wrapped around the end of RX
1823 * descriptor ring.
1824 *
1825 * @returns the actual number of descriptors fetched.
1826 * @param pThis The device state structure.
1827 * @param pDesc Pointer to descriptor union.
1828 * @param addr Physical address in guest context.
1829 * @thread EMT, RX
1830 */
1831DECLINLINE(unsigned) e1kRxDPrefetch(PE1KSTATE pThis)
1832{
1833 /* We've already loaded pThis->nRxDFetched descriptors past RDH. */
1834 unsigned nDescsAvailable = e1kGetRxLen(pThis) - e1kRxDInCache(pThis);
1835 unsigned nDescsToFetch = RT_MIN(nDescsAvailable, E1K_RXD_CACHE_SIZE - pThis->nRxDFetched);
1836 unsigned nDescsTotal = RDLEN / sizeof(E1KRXDESC);
1837 Assert(nDescsTotal != 0);
1838 if (nDescsTotal == 0)
1839 return 0;
1840 unsigned nFirstNotLoaded = (RDH + e1kRxDInCache(pThis)) % nDescsTotal;
1841 unsigned nDescsInSingleRead = RT_MIN(nDescsToFetch, nDescsTotal - nFirstNotLoaded);
1842 E1kLog3(("%s e1kRxDPrefetch: nDescsAvailable=%u nDescsToFetch=%u "
1843 "nDescsTotal=%u nFirstNotLoaded=0x%x nDescsInSingleRead=%u\n",
1844 pThis->szPrf, nDescsAvailable, nDescsToFetch, nDescsTotal,
1845 nFirstNotLoaded, nDescsInSingleRead));
1846 if (nDescsToFetch == 0)
1847 return 0;
1848 E1KRXDESC* pFirstEmptyDesc = &pThis->aRxDescriptors[pThis->nRxDFetched];
1849 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns),
1850 ((uint64_t)RDBAH << 32) + RDBAL + nFirstNotLoaded * sizeof(E1KRXDESC),
1851 pFirstEmptyDesc, nDescsInSingleRead * sizeof(E1KRXDESC));
1852 // uint64_t addrBase = ((uint64_t)RDBAH << 32) + RDBAL;
1853 // unsigned i, j;
1854 // for (i = pThis->nRxDFetched; i < pThis->nRxDFetched + nDescsInSingleRead; ++i)
1855 // {
1856 // pThis->aRxDescAddr[i] = addrBase + (nFirstNotLoaded + i - pThis->nRxDFetched) * sizeof(E1KRXDESC);
1857 // E1kLog3(("%s aRxDescAddr[%d] = %p\n", pThis->szPrf, i, pThis->aRxDescAddr[i]));
1858 // }
1859 E1kLog3(("%s Fetched %u RX descriptors at %08x%08x(0x%x), RDLEN=%08x, RDH=%08x, RDT=%08x\n",
1860 pThis->szPrf, nDescsInSingleRead,
1861 RDBAH, RDBAL + RDH * sizeof(E1KRXDESC),
1862 nFirstNotLoaded, RDLEN, RDH, RDT));
1863 if (nDescsToFetch > nDescsInSingleRead)
1864 {
1865 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns),
1866 ((uint64_t)RDBAH << 32) + RDBAL,
1867 pFirstEmptyDesc + nDescsInSingleRead,
1868 (nDescsToFetch - nDescsInSingleRead) * sizeof(E1KRXDESC));
1869 // Assert(i == pThis->nRxDFetched + nDescsInSingleRead);
1870 // for (j = 0; i < pThis->nRxDFetched + nDescsToFetch; ++i, ++j)
1871 // {
1872 // pThis->aRxDescAddr[i] = addrBase + j * sizeof(E1KRXDESC);
1873 // E1kLog3(("%s aRxDescAddr[%d] = %p\n", pThis->szPrf, i, pThis->aRxDescAddr[i]));
1874 // }
1875 E1kLog3(("%s Fetched %u RX descriptors at %08x%08x\n",
1876 pThis->szPrf, nDescsToFetch - nDescsInSingleRead,
1877 RDBAH, RDBAL));
1878 }
1879 pThis->nRxDFetched += nDescsToFetch;
1880 return nDescsToFetch;
1881}
1882
1883# ifdef IN_RING3 /* currently only used in ring-3 due to stack space requirements of the caller */
1884/**
1885 * Dump receive descriptor to debug log.
1886 *
1887 * @param pThis The device state structure.
1888 * @param pDesc Pointer to the descriptor.
1889 * @thread E1000_RX
1890 */
1891static void e1kPrintRDesc(PE1KSTATE pThis, E1KRXDESC *pDesc)
1892{
1893 RT_NOREF2(pThis, pDesc);
1894 E1kLog2(("%s <-- Receive Descriptor (%d bytes):\n", pThis->szPrf, pDesc->u16Length));
1895 E1kLog2((" Address=%16LX Length=%04X Csum=%04X\n",
1896 pDesc->u64BufAddr, pDesc->u16Length, pDesc->u16Checksum));
1897 E1kLog2((" STA: %s %s %s %s %s %s %s ERR: %s %s %s %s SPECIAL: %s VLAN=%03x PRI=%x\n",
1898 pDesc->status.fPIF ? "PIF" : "pif",
1899 pDesc->status.fIPCS ? "IPCS" : "ipcs",
1900 pDesc->status.fTCPCS ? "TCPCS" : "tcpcs",
1901 pDesc->status.fVP ? "VP" : "vp",
1902 pDesc->status.fIXSM ? "IXSM" : "ixsm",
1903 pDesc->status.fEOP ? "EOP" : "eop",
1904 pDesc->status.fDD ? "DD" : "dd",
1905 pDesc->status.fRXE ? "RXE" : "rxe",
1906 pDesc->status.fIPE ? "IPE" : "ipe",
1907 pDesc->status.fTCPE ? "TCPE" : "tcpe",
1908 pDesc->status.fCE ? "CE" : "ce",
1909 E1K_SPEC_CFI(pDesc->status.u16Special) ? "CFI" :"cfi",
1910 E1K_SPEC_VLAN(pDesc->status.u16Special),
1911 E1K_SPEC_PRI(pDesc->status.u16Special)));
1912}
1913# endif /* IN_RING3 */
1914#endif /* E1K_WITH_RXD_CACHE */
1915
1916/**
1917 * Dump transmit descriptor to debug log.
1918 *
1919 * @param pThis The device state structure.
1920 * @param pDesc Pointer to descriptor union.
1921 * @param pszDir A string denoting direction of descriptor transfer
1922 * @thread E1000_TX
1923 */
1924static void e1kPrintTDesc(PE1KSTATE pThis, E1KTXDESC *pDesc, const char *pszDir,
1925 unsigned uLevel = RTLOGGRPFLAGS_LEVEL_2)
1926{
1927 RT_NOREF4(pThis, pDesc, pszDir, uLevel);
1928
1929 /*
1930 * Unfortunately we cannot use our format handler here, we want R0 logging
1931 * as well.
1932 */
1933 switch (e1kGetDescType(pDesc))
1934 {
1935 case E1K_DTYP_CONTEXT:
1936 E1kLogX(uLevel, ("%s %s Context Transmit Descriptor %s\n",
1937 pThis->szPrf, pszDir, pszDir));
1938 E1kLogX(uLevel, (" IPCSS=%02X IPCSO=%02X IPCSE=%04X TUCSS=%02X TUCSO=%02X TUCSE=%04X\n",
1939 pDesc->context.ip.u8CSS, pDesc->context.ip.u8CSO, pDesc->context.ip.u16CSE,
1940 pDesc->context.tu.u8CSS, pDesc->context.tu.u8CSO, pDesc->context.tu.u16CSE));
1941 E1kLogX(uLevel, (" TUCMD:%s%s%s %s %s PAYLEN=%04x HDRLEN=%04x MSS=%04x STA: %s\n",
1942 pDesc->context.dw2.fIDE ? " IDE":"",
1943 pDesc->context.dw2.fRS ? " RS" :"",
1944 pDesc->context.dw2.fTSE ? " TSE":"",
1945 pDesc->context.dw2.fIP ? "IPv4":"IPv6",
1946 pDesc->context.dw2.fTCP ? "TCP":"UDP",
1947 pDesc->context.dw2.u20PAYLEN,
1948 pDesc->context.dw3.u8HDRLEN,
1949 pDesc->context.dw3.u16MSS,
1950 pDesc->context.dw3.fDD?"DD":""));
1951 break;
1952 case E1K_DTYP_DATA:
1953 E1kLogX(uLevel, ("%s %s Data Transmit Descriptor (%d bytes) %s\n",
1954 pThis->szPrf, pszDir, pDesc->data.cmd.u20DTALEN, pszDir));
1955 E1kLogX(uLevel, (" Address=%16LX DTALEN=%05X\n",
1956 pDesc->data.u64BufAddr,
1957 pDesc->data.cmd.u20DTALEN));
1958 E1kLogX(uLevel, (" DCMD:%s%s%s%s%s%s%s STA:%s%s%s POPTS:%s%s SPECIAL:%s VLAN=%03x PRI=%x\n",
1959 pDesc->data.cmd.fIDE ? " IDE" :"",
1960 pDesc->data.cmd.fVLE ? " VLE" :"",
1961 pDesc->data.cmd.fRPS ? " RPS" :"",
1962 pDesc->data.cmd.fRS ? " RS" :"",
1963 pDesc->data.cmd.fTSE ? " TSE" :"",
1964 pDesc->data.cmd.fIFCS? " IFCS":"",
1965 pDesc->data.cmd.fEOP ? " EOP" :"",
1966 pDesc->data.dw3.fDD ? " DD" :"",
1967 pDesc->data.dw3.fEC ? " EC" :"",
1968 pDesc->data.dw3.fLC ? " LC" :"",
1969 pDesc->data.dw3.fTXSM? " TXSM":"",
1970 pDesc->data.dw3.fIXSM? " IXSM":"",
1971 E1K_SPEC_CFI(pDesc->data.dw3.u16Special) ? "CFI" :"cfi",
1972 E1K_SPEC_VLAN(pDesc->data.dw3.u16Special),
1973 E1K_SPEC_PRI(pDesc->data.dw3.u16Special)));
1974 break;
1975 case E1K_DTYP_LEGACY:
1976 E1kLogX(uLevel, ("%s %s Legacy Transmit Descriptor (%d bytes) %s\n",
1977 pThis->szPrf, pszDir, pDesc->legacy.cmd.u16Length, pszDir));
1978 E1kLogX(uLevel, (" Address=%16LX DTALEN=%05X\n",
1979 pDesc->data.u64BufAddr,
1980 pDesc->legacy.cmd.u16Length));
1981 E1kLogX(uLevel, (" CMD:%s%s%s%s%s%s%s STA:%s%s%s CSO=%02x CSS=%02x SPECIAL:%s VLAN=%03x PRI=%x\n",
1982 pDesc->legacy.cmd.fIDE ? " IDE" :"",
1983 pDesc->legacy.cmd.fVLE ? " VLE" :"",
1984 pDesc->legacy.cmd.fRPS ? " RPS" :"",
1985 pDesc->legacy.cmd.fRS ? " RS" :"",
1986 pDesc->legacy.cmd.fIC ? " IC" :"",
1987 pDesc->legacy.cmd.fIFCS? " IFCS":"",
1988 pDesc->legacy.cmd.fEOP ? " EOP" :"",
1989 pDesc->legacy.dw3.fDD ? " DD" :"",
1990 pDesc->legacy.dw3.fEC ? " EC" :"",
1991 pDesc->legacy.dw3.fLC ? " LC" :"",
1992 pDesc->legacy.cmd.u8CSO,
1993 pDesc->legacy.dw3.u8CSS,
1994 E1K_SPEC_CFI(pDesc->legacy.dw3.u16Special) ? "CFI" :"cfi",
1995 E1K_SPEC_VLAN(pDesc->legacy.dw3.u16Special),
1996 E1K_SPEC_PRI(pDesc->legacy.dw3.u16Special)));
1997 break;
1998 default:
1999 E1kLog(("%s %s Invalid Transmit Descriptor %s\n",
2000 pThis->szPrf, pszDir, pszDir));
2001 break;
2002 }
2003}
2004
2005/**
2006 * Raise an interrupt later.
2007 *
2008 * @param pThis The device state structure.
2009 */
2010inline void e1kPostponeInterrupt(PE1KSTATE pThis, uint64_t uNanoseconds)
2011{
2012 if (!TMTimerIsActive(pThis->CTX_SUFF(pIntTimer)))
2013 TMTimerSetNano(pThis->CTX_SUFF(pIntTimer), uNanoseconds);
2014}
2015
2016/**
2017 * Raise interrupt if not masked.
2018 *
2019 * @param pThis The device state structure.
2020 */
2021static int e1kRaiseInterrupt(PE1KSTATE pThis, int rcBusy, uint32_t u32IntCause = 0)
2022{
2023 int rc = e1kCsEnter(pThis, rcBusy);
2024 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2025 return rc;
2026
2027 E1K_INC_ISTAT_CNT(pThis->uStatIntTry);
2028 ICR |= u32IntCause;
2029 if (ICR & IMS)
2030 {
2031 if (pThis->fIntRaised)
2032 {
2033 E1K_INC_ISTAT_CNT(pThis->uStatIntSkip);
2034 E1kLog2(("%s e1kRaiseInterrupt: Already raised, skipped. ICR&IMS=%08x\n",
2035 pThis->szPrf, ICR & IMS));
2036 }
2037 else
2038 {
2039 uint64_t tsNow = TMTimerGet(pThis->CTX_SUFF(pIntTimer));
2040 if (!!ITR && tsNow - pThis->u64AckedAt < ITR * 256
2041 && pThis->fItrEnabled && (pThis->fItrRxEnabled || !(ICR & ICR_RXT0)))
2042 {
2043 E1K_INC_ISTAT_CNT(pThis->uStatIntEarly);
2044 E1kLog2(("%s e1kRaiseInterrupt: Too early to raise again: %d ns < %d ns.\n",
2045 pThis->szPrf, (uint32_t)(tsNow - pThis->u64AckedAt), ITR * 256));
2046 e1kPostponeInterrupt(pThis, ITR * 256);
2047 }
2048 else
2049 {
2050
2051 /* Since we are delivering the interrupt now
2052 * there is no need to do it later -- stop the timer.
2053 */
2054 TMTimerStop(pThis->CTX_SUFF(pIntTimer));
2055 E1K_INC_ISTAT_CNT(pThis->uStatInt);
2056 STAM_COUNTER_INC(&pThis->StatIntsRaised);
2057 /* Got at least one unmasked interrupt cause */
2058 pThis->fIntRaised = true;
2059 /* Raise(1) INTA(0) */
2060 E1kLogRel(("E1000: irq RAISED icr&mask=0x%x, icr=0x%x\n", ICR & IMS, ICR));
2061 PDMDevHlpPCISetIrq(pThis->CTX_SUFF(pDevIns), 0, 1);
2062 E1kLog(("%s e1kRaiseInterrupt: Raised. ICR&IMS=%08x\n",
2063 pThis->szPrf, ICR & IMS));
2064 }
2065 }
2066 }
2067 else
2068 {
2069 E1K_INC_ISTAT_CNT(pThis->uStatIntMasked);
2070 E1kLog2(("%s e1kRaiseInterrupt: Not raising, ICR=%08x, IMS=%08x\n",
2071 pThis->szPrf, ICR, IMS));
2072 }
2073 e1kCsLeave(pThis);
2074 return VINF_SUCCESS;
2075}
2076
2077/**
2078 * Compute the physical address of the descriptor.
2079 *
2080 * @returns the physical address of the descriptor.
2081 *
2082 * @param baseHigh High-order 32 bits of descriptor table address.
2083 * @param baseLow Low-order 32 bits of descriptor table address.
2084 * @param idxDesc The descriptor index in the table.
2085 */
2086DECLINLINE(RTGCPHYS) e1kDescAddr(uint32_t baseHigh, uint32_t baseLow, uint32_t idxDesc)
2087{
2088 AssertCompile(sizeof(E1KRXDESC) == sizeof(E1KTXDESC));
2089 return ((uint64_t)baseHigh << 32) + baseLow + idxDesc * sizeof(E1KRXDESC);
2090}
2091
2092#ifdef IN_RING3 /* currently only used in ring-3 due to stack space requirements of the caller */
2093/**
2094 * Advance the head pointer of the receive descriptor queue.
2095 *
2096 * @remarks RDH always points to the next available RX descriptor.
2097 *
2098 * @param pThis The device state structure.
2099 */
2100DECLINLINE(void) e1kAdvanceRDH(PE1KSTATE pThis)
2101{
2102 Assert(e1kCsRxIsOwner(pThis));
2103 //e1kCsEnter(pThis, RT_SRC_POS);
2104 if (++RDH * sizeof(E1KRXDESC) >= RDLEN)
2105 RDH = 0;
2106#ifdef E1K_WITH_RXD_CACHE
2107 /*
2108 * We need to fetch descriptors now as the guest may advance RDT all the way
2109 * to RDH as soon as we generate RXDMT0 interrupt. This is mostly to provide
2110 * compatibility with Phar Lap ETS, see @bugref(7346). Note that we do not
2111 * check if the receiver is enabled. It must be, otherwise we won't get here
2112 * in the first place.
2113 *
2114 * Note that we should have moved both RDH and iRxDCurrent by now.
2115 */
2116 if (e1kRxDIsCacheEmpty(pThis))
2117 {
2118 /* Cache is empty, reset it and check if we can fetch more. */
2119 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
2120 E1kLog3(("%s e1kAdvanceRDH: Rx cache is empty, RDH=%x RDT=%x "
2121 "iRxDCurrent=%x nRxDFetched=%x\n",
2122 pThis->szPrf, RDH, RDT, pThis->iRxDCurrent, pThis->nRxDFetched));
2123 e1kRxDPrefetch(pThis);
2124 }
2125#endif /* E1K_WITH_RXD_CACHE */
2126 /*
2127 * Compute current receive queue length and fire RXDMT0 interrupt
2128 * if we are low on receive buffers
2129 */
2130 uint32_t uRQueueLen = RDH>RDT ? RDLEN/sizeof(E1KRXDESC)-RDH+RDT : RDT-RDH;
2131 /*
2132 * The minimum threshold is controlled by RDMTS bits of RCTL:
2133 * 00 = 1/2 of RDLEN
2134 * 01 = 1/4 of RDLEN
2135 * 10 = 1/8 of RDLEN
2136 * 11 = reserved
2137 */
2138 uint32_t uMinRQThreshold = RDLEN / sizeof(E1KRXDESC) / (2 << GET_BITS(RCTL, RDMTS));
2139 if (uRQueueLen <= uMinRQThreshold)
2140 {
2141 E1kLogRel(("E1000: low on RX descriptors, RDH=%x RDT=%x len=%x threshold=%x\n", RDH, RDT, uRQueueLen, uMinRQThreshold));
2142 E1kLog2(("%s Low on RX descriptors, RDH=%x RDT=%x len=%x threshold=%x, raise an interrupt\n",
2143 pThis->szPrf, RDH, RDT, uRQueueLen, uMinRQThreshold));
2144 E1K_INC_ISTAT_CNT(pThis->uStatIntRXDMT0);
2145 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_RXDMT0);
2146 }
2147 E1kLog2(("%s e1kAdvanceRDH: at exit RDH=%x RDT=%x len=%x\n",
2148 pThis->szPrf, RDH, RDT, uRQueueLen));
2149 //e1kCsLeave(pThis);
2150}
2151#endif /* IN_RING3 */
2152
2153#ifdef E1K_WITH_RXD_CACHE
2154
2155# ifdef IN_RING3 /* currently only used in ring-3 due to stack space requirements of the caller */
2156
2157/**
2158 * Obtain the next RX descriptor from RXD cache, fetching descriptors from the
2159 * RX ring if the cache is empty.
2160 *
2161 * Note that we cannot advance the cache pointer (iRxDCurrent) yet as it will
2162 * go out of sync with RDH which will cause trouble when EMT checks if the
2163 * cache is empty to do pre-fetch @bugref(6217).
2164 *
2165 * @param pThis The device state structure.
2166 * @thread RX
2167 */
2168DECLINLINE(E1KRXDESC*) e1kRxDGet(PE1KSTATE pThis)
2169{
2170 Assert(e1kCsRxIsOwner(pThis));
2171 /* Check the cache first. */
2172 if (pThis->iRxDCurrent < pThis->nRxDFetched)
2173 return &pThis->aRxDescriptors[pThis->iRxDCurrent];
2174 /* Cache is empty, reset it and check if we can fetch more. */
2175 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
2176 if (e1kRxDPrefetch(pThis))
2177 return &pThis->aRxDescriptors[pThis->iRxDCurrent];
2178 /* Out of Rx descriptors. */
2179 return NULL;
2180}
2181
2182
2183/**
2184 * Return the RX descriptor obtained with e1kRxDGet() and advance the cache
2185 * pointer. The descriptor gets written back to the RXD ring.
2186 *
2187 * @param pThis The device state structure.
2188 * @param pDesc The descriptor being "returned" to the RX ring.
2189 * @thread RX
2190 */
2191DECLINLINE(void) e1kRxDPut(PE1KSTATE pThis, E1KRXDESC* pDesc)
2192{
2193 Assert(e1kCsRxIsOwner(pThis));
2194 pThis->iRxDCurrent++;
2195 // Assert(pDesc >= pThis->aRxDescriptors);
2196 // Assert(pDesc < pThis->aRxDescriptors + E1K_RXD_CACHE_SIZE);
2197 // uint64_t addr = e1kDescAddr(RDBAH, RDBAL, RDH);
2198 // uint32_t rdh = RDH;
2199 // Assert(pThis->aRxDescAddr[pDesc - pThis->aRxDescriptors] == addr);
2200 PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns),
2201 e1kDescAddr(RDBAH, RDBAL, RDH),
2202 pDesc, sizeof(E1KRXDESC));
2203 /*
2204 * We need to print the descriptor before advancing RDH as it may fetch new
2205 * descriptors into the cache.
2206 */
2207 e1kPrintRDesc(pThis, pDesc);
2208 e1kAdvanceRDH(pThis);
2209}
2210
2211/**
2212 * Store a fragment of received packet at the specifed address.
2213 *
2214 * @param pThis The device state structure.
2215 * @param pDesc The next available RX descriptor.
2216 * @param pvBuf The fragment.
2217 * @param cb The size of the fragment.
2218 */
2219static DECLCALLBACK(void) e1kStoreRxFragment(PE1KSTATE pThis, E1KRXDESC *pDesc, const void *pvBuf, size_t cb)
2220{
2221 STAM_PROFILE_ADV_START(&pThis->StatReceiveStore, a);
2222 E1kLog2(("%s e1kStoreRxFragment: store fragment of %04X at %016LX, EOP=%d\n",
2223 pThis->szPrf, cb, pDesc->u64BufAddr, pDesc->status.fEOP));
2224 PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns), pDesc->u64BufAddr, pvBuf, cb);
2225 pDesc->u16Length = (uint16_t)cb; Assert(pDesc->u16Length == cb);
2226 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveStore, a);
2227}
2228
2229# endif /* IN_RING3 */
2230
2231#else /* !E1K_WITH_RXD_CACHE */
2232
2233/**
2234 * Store a fragment of received packet that fits into the next available RX
2235 * buffer.
2236 *
2237 * @remarks Trigger the RXT0 interrupt if it is the last fragment of the packet.
2238 *
2239 * @param pThis The device state structure.
2240 * @param pDesc The next available RX descriptor.
2241 * @param pvBuf The fragment.
2242 * @param cb The size of the fragment.
2243 */
2244static DECLCALLBACK(void) e1kStoreRxFragment(PE1KSTATE pThis, E1KRXDESC *pDesc, const void *pvBuf, size_t cb)
2245{
2246 STAM_PROFILE_ADV_START(&pThis->StatReceiveStore, a);
2247 E1kLog2(("%s e1kStoreRxFragment: store fragment of %04X at %016LX, EOP=%d\n", pThis->szPrf, cb, pDesc->u64BufAddr, pDesc->status.fEOP));
2248 PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns), pDesc->u64BufAddr, pvBuf, cb);
2249 pDesc->u16Length = (uint16_t)cb; Assert(pDesc->u16Length == cb);
2250 /* Write back the descriptor */
2251 PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns), e1kDescAddr(RDBAH, RDBAL, RDH), pDesc, sizeof(E1KRXDESC));
2252 e1kPrintRDesc(pThis, pDesc);
2253 E1kLogRel(("E1000: Wrote back RX desc, RDH=%x\n", RDH));
2254 /* Advance head */
2255 e1kAdvanceRDH(pThis);
2256 //E1kLog2(("%s e1kStoreRxFragment: EOP=%d RDTR=%08X RADV=%08X\n", pThis->szPrf, pDesc->fEOP, RDTR, RADV));
2257 if (pDesc->status.fEOP)
2258 {
2259 /* Complete packet has been stored -- it is time to let the guest know. */
2260#ifdef E1K_USE_RX_TIMERS
2261 if (RDTR)
2262 {
2263 /* Arm the timer to fire in RDTR usec (discard .024) */
2264 e1kArmTimer(pThis, pThis->CTX_SUFF(pRIDTimer), RDTR);
2265 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
2266 if (RADV != 0 && !TMTimerIsActive(pThis->CTX_SUFF(pRADTimer)))
2267 e1kArmTimer(pThis, pThis->CTX_SUFF(pRADTimer), RADV);
2268 }
2269 else
2270 {
2271#endif
2272 /* 0 delay means immediate interrupt */
2273 E1K_INC_ISTAT_CNT(pThis->uStatIntRx);
2274 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_RXT0);
2275#ifdef E1K_USE_RX_TIMERS
2276 }
2277#endif
2278 }
2279 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveStore, a);
2280}
2281
2282#endif /* !E1K_WITH_RXD_CACHE */
2283
2284/**
2285 * Returns true if it is a broadcast packet.
2286 *
2287 * @returns true if destination address indicates broadcast.
2288 * @param pvBuf The ethernet packet.
2289 */
2290DECLINLINE(bool) e1kIsBroadcast(const void *pvBuf)
2291{
2292 static const uint8_t s_abBcastAddr[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
2293 return memcmp(pvBuf, s_abBcastAddr, sizeof(s_abBcastAddr)) == 0;
2294}
2295
2296/**
2297 * Returns true if it is a multicast packet.
2298 *
2299 * @remarks returns true for broadcast packets as well.
2300 * @returns true if destination address indicates multicast.
2301 * @param pvBuf The ethernet packet.
2302 */
2303DECLINLINE(bool) e1kIsMulticast(const void *pvBuf)
2304{
2305 return (*(char*)pvBuf) & 1;
2306}
2307
2308#ifdef IN_RING3 /* currently only used in ring-3 due to stack space requirements of the caller */
2309/**
2310 * Set IXSM, IPCS and TCPCS flags according to the packet type.
2311 *
2312 * @remarks We emulate checksum offloading for major packets types only.
2313 *
2314 * @returns VBox status code.
2315 * @param pThis The device state structure.
2316 * @param pFrame The available data.
2317 * @param cb Number of bytes available in the buffer.
2318 * @param status Bit fields containing status info.
2319 */
2320static int e1kRxChecksumOffload(PE1KSTATE pThis, const uint8_t *pFrame, size_t cb, E1KRXDST *pStatus)
2321{
2322 /** @todo
2323 * It is not safe to bypass checksum verification for packets coming
2324 * from real wire. We currently unable to tell where packets are
2325 * coming from so we tell the driver to ignore our checksum flags
2326 * and do verification in software.
2327 */
2328# if 0
2329 uint16_t uEtherType = ntohs(*(uint16_t*)(pFrame + 12));
2330
2331 E1kLog2(("%s e1kRxChecksumOffload: EtherType=%x\n", pThis->szPrf, uEtherType));
2332
2333 switch (uEtherType)
2334 {
2335 case 0x800: /* IPv4 */
2336 {
2337 pStatus->fIXSM = false;
2338 pStatus->fIPCS = true;
2339 PRTNETIPV4 pIpHdr4 = (PRTNETIPV4)(pFrame + 14);
2340 /* TCP/UDP checksum offloading works with TCP and UDP only */
2341 pStatus->fTCPCS = pIpHdr4->ip_p == 6 || pIpHdr4->ip_p == 17;
2342 break;
2343 }
2344 case 0x86DD: /* IPv6 */
2345 pStatus->fIXSM = false;
2346 pStatus->fIPCS = false;
2347 pStatus->fTCPCS = true;
2348 break;
2349 default: /* ARP, VLAN, etc. */
2350 pStatus->fIXSM = true;
2351 break;
2352 }
2353# else
2354 pStatus->fIXSM = true;
2355 RT_NOREF_PV(pThis); RT_NOREF_PV(pFrame); RT_NOREF_PV(cb);
2356# endif
2357 return VINF_SUCCESS;
2358}
2359#endif /* IN_RING3 */
2360
2361/**
2362 * Pad and store received packet.
2363 *
2364 * @remarks Make sure that the packet appears to upper layer as one coming
2365 * from real Ethernet: pad it and insert FCS.
2366 *
2367 * @returns VBox status code.
2368 * @param pThis The device state structure.
2369 * @param pvBuf The available data.
2370 * @param cb Number of bytes available in the buffer.
2371 * @param status Bit fields containing status info.
2372 */
2373static int e1kHandleRxPacket(PE1KSTATE pThis, const void *pvBuf, size_t cb, E1KRXDST status)
2374{
2375#if defined(IN_RING3) /** @todo Remove this extra copying, it's gonna make us run out of kernel / hypervisor stack! */
2376 uint8_t rxPacket[E1K_MAX_RX_PKT_SIZE];
2377 uint8_t *ptr = rxPacket;
2378
2379 int rc = e1kCsRxEnter(pThis, VERR_SEM_BUSY);
2380 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2381 return rc;
2382
2383 if (cb > 70) /* unqualified guess */
2384 pThis->led.Asserted.s.fReading = pThis->led.Actual.s.fReading = 1;
2385
2386 Assert(cb <= E1K_MAX_RX_PKT_SIZE);
2387 Assert(cb > 16);
2388 size_t cbMax = ((RCTL & RCTL_LPE) ? E1K_MAX_RX_PKT_SIZE - 4 : 1518) - (status.fVP ? 0 : 4);
2389 E1kLog3(("%s Max RX packet size is %u\n", pThis->szPrf, cbMax));
2390 if (status.fVP)
2391 {
2392 /* VLAN packet -- strip VLAN tag in VLAN mode */
2393 if ((CTRL & CTRL_VME) && cb > 16)
2394 {
2395 uint16_t *u16Ptr = (uint16_t*)pvBuf;
2396 memcpy(rxPacket, pvBuf, 12); /* Copy src and dst addresses */
2397 status.u16Special = RT_BE2H_U16(u16Ptr[7]); /* Extract VLAN tag */
2398 memcpy(rxPacket + 12, (uint8_t*)pvBuf + 16, cb - 16); /* Copy the rest of the packet */
2399 cb -= 4;
2400 E1kLog3(("%s Stripped tag for VLAN %u (cb=%u)\n",
2401 pThis->szPrf, status.u16Special, cb));
2402 }
2403 else
2404 status.fVP = false; /* Set VP only if we stripped the tag */
2405 }
2406 else
2407 memcpy(rxPacket, pvBuf, cb);
2408 /* Pad short packets */
2409 if (cb < 60)
2410 {
2411 memset(rxPacket + cb, 0, 60 - cb);
2412 cb = 60;
2413 }
2414 if (!(RCTL & RCTL_SECRC) && cb <= cbMax)
2415 {
2416 STAM_PROFILE_ADV_START(&pThis->StatReceiveCRC, a);
2417 /*
2418 * Add FCS if CRC stripping is not enabled. Since the value of CRC
2419 * is ignored by most of drivers we may as well save us the trouble
2420 * of calculating it (see EthernetCRC CFGM parameter).
2421 */
2422 if (pThis->fEthernetCRC)
2423 *(uint32_t*)(rxPacket + cb) = RTCrc32(rxPacket, cb);
2424 cb += sizeof(uint32_t);
2425 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveCRC, a);
2426 E1kLog3(("%s Added FCS (cb=%u)\n", pThis->szPrf, cb));
2427 }
2428 /* Compute checksum of complete packet */
2429 uint16_t checksum = e1kCSum16(rxPacket + GET_BITS(RXCSUM, PCSS), cb);
2430 e1kRxChecksumOffload(pThis, rxPacket, cb, &status);
2431
2432 /* Update stats */
2433 E1K_INC_CNT32(GPRC);
2434 if (e1kIsBroadcast(pvBuf))
2435 E1K_INC_CNT32(BPRC);
2436 else if (e1kIsMulticast(pvBuf))
2437 E1K_INC_CNT32(MPRC);
2438 /* Update octet receive counter */
2439 E1K_ADD_CNT64(GORCL, GORCH, cb);
2440 STAM_REL_COUNTER_ADD(&pThis->StatReceiveBytes, cb);
2441 if (cb == 64)
2442 E1K_INC_CNT32(PRC64);
2443 else if (cb < 128)
2444 E1K_INC_CNT32(PRC127);
2445 else if (cb < 256)
2446 E1K_INC_CNT32(PRC255);
2447 else if (cb < 512)
2448 E1K_INC_CNT32(PRC511);
2449 else if (cb < 1024)
2450 E1K_INC_CNT32(PRC1023);
2451 else
2452 E1K_INC_CNT32(PRC1522);
2453
2454 E1K_INC_ISTAT_CNT(pThis->uStatRxFrm);
2455
2456# ifdef E1K_WITH_RXD_CACHE
2457 while (cb > 0)
2458 {
2459 E1KRXDESC *pDesc = e1kRxDGet(pThis);
2460
2461 if (pDesc == NULL)
2462 {
2463 E1kLog(("%s Out of receive buffers, dropping the packet "
2464 "(cb=%u, in_cache=%u, RDH=%x RDT=%x)\n",
2465 pThis->szPrf, cb, e1kRxDInCache(pThis), RDH, RDT));
2466 break;
2467 }
2468# else /* !E1K_WITH_RXD_CACHE */
2469 if (RDH == RDT)
2470 {
2471 E1kLog(("%s Out of receive buffers, dropping the packet\n",
2472 pThis->szPrf));
2473 }
2474 /* Store the packet to receive buffers */
2475 while (RDH != RDT)
2476 {
2477 /* Load the descriptor pointed by head */
2478 E1KRXDESC desc, *pDesc = &desc;
2479 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), e1kDescAddr(RDBAH, RDBAL, RDH),
2480 &desc, sizeof(desc));
2481# endif /* !E1K_WITH_RXD_CACHE */
2482 if (pDesc->u64BufAddr)
2483 {
2484 uint16_t u16RxBufferSize = pThis->u16RxBSize; /* see @bugref{9427} */
2485
2486 /* Update descriptor */
2487 pDesc->status = status;
2488 pDesc->u16Checksum = checksum;
2489 pDesc->status.fDD = true;
2490
2491 /*
2492 * We need to leave Rx critical section here or we risk deadlocking
2493 * with EMT in e1kRegWriteRDT when the write is to an unallocated
2494 * page or has an access handler associated with it.
2495 * Note that it is safe to leave the critical section here since
2496 * e1kRegWriteRDT() never modifies RDH. It never touches already
2497 * fetched RxD cache entries either.
2498 */
2499 if (cb > u16RxBufferSize)
2500 {
2501 pDesc->status.fEOP = false;
2502 e1kCsRxLeave(pThis);
2503 e1kStoreRxFragment(pThis, pDesc, ptr, u16RxBufferSize);
2504 rc = e1kCsRxEnter(pThis, VERR_SEM_BUSY);
2505 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2506 return rc;
2507 ptr += u16RxBufferSize;
2508 cb -= u16RxBufferSize;
2509 }
2510 else
2511 {
2512 pDesc->status.fEOP = true;
2513 e1kCsRxLeave(pThis);
2514 e1kStoreRxFragment(pThis, pDesc, ptr, cb);
2515# ifdef E1K_WITH_RXD_CACHE
2516 rc = e1kCsRxEnter(pThis, VERR_SEM_BUSY);
2517 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2518 return rc;
2519 cb = 0;
2520# else /* !E1K_WITH_RXD_CACHE */
2521 pThis->led.Actual.s.fReading = 0;
2522 return VINF_SUCCESS;
2523# endif /* !E1K_WITH_RXD_CACHE */
2524 }
2525 /*
2526 * Note: RDH is advanced by e1kStoreRxFragment if E1K_WITH_RXD_CACHE
2527 * is not defined.
2528 */
2529 }
2530# ifdef E1K_WITH_RXD_CACHE
2531 /* Write back the descriptor. */
2532 pDesc->status.fDD = true;
2533 e1kRxDPut(pThis, pDesc);
2534# else /* !E1K_WITH_RXD_CACHE */
2535 else
2536 {
2537 /* Write back the descriptor. */
2538 pDesc->status.fDD = true;
2539 PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns),
2540 e1kDescAddr(RDBAH, RDBAL, RDH),
2541 pDesc, sizeof(E1KRXDESC));
2542 e1kAdvanceRDH(pThis);
2543 }
2544# endif /* !E1K_WITH_RXD_CACHE */
2545 }
2546
2547 if (cb > 0)
2548 E1kLog(("%s Out of receive buffers, dropping %u bytes", pThis->szPrf, cb));
2549
2550 pThis->led.Actual.s.fReading = 0;
2551
2552 e1kCsRxLeave(pThis);
2553# ifdef E1K_WITH_RXD_CACHE
2554 /* Complete packet has been stored -- it is time to let the guest know. */
2555# ifdef E1K_USE_RX_TIMERS
2556 if (RDTR)
2557 {
2558 /* Arm the timer to fire in RDTR usec (discard .024) */
2559 e1kArmTimer(pThis, pThis->CTX_SUFF(pRIDTimer), RDTR);
2560 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
2561 if (RADV != 0 && !TMTimerIsActive(pThis->CTX_SUFF(pRADTimer)))
2562 e1kArmTimer(pThis, pThis->CTX_SUFF(pRADTimer), RADV);
2563 }
2564 else
2565 {
2566# endif /* E1K_USE_RX_TIMERS */
2567 /* 0 delay means immediate interrupt */
2568 E1K_INC_ISTAT_CNT(pThis->uStatIntRx);
2569 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_RXT0);
2570# ifdef E1K_USE_RX_TIMERS
2571 }
2572# endif /* E1K_USE_RX_TIMERS */
2573# endif /* E1K_WITH_RXD_CACHE */
2574
2575 return VINF_SUCCESS;
2576#else /* !IN_RING3 */
2577 RT_NOREF_PV(pThis); RT_NOREF_PV(pvBuf); RT_NOREF_PV(cb); RT_NOREF_PV(status);
2578 return VERR_INTERNAL_ERROR_2;
2579#endif /* !IN_RING3 */
2580}
2581
2582
2583#ifdef IN_RING3
2584/**
2585 * Bring the link up after the configured delay, 5 seconds by default.
2586 *
2587 * @param pThis The device state structure.
2588 * @thread any
2589 */
2590DECLINLINE(void) e1kBringLinkUpDelayed(PE1KSTATE pThis)
2591{
2592 E1kLog(("%s Will bring up the link in %d seconds...\n",
2593 pThis->szPrf, pThis->cMsLinkUpDelay / 1000));
2594 e1kArmTimer(pThis, pThis->CTX_SUFF(pLUTimer), pThis->cMsLinkUpDelay * 1000);
2595}
2596
2597/**
2598 * Bring up the link immediately.
2599 *
2600 * @param pThis The device state structure.
2601 */
2602DECLINLINE(void) e1kR3LinkUp(PE1KSTATE pThis)
2603{
2604 E1kLog(("%s Link is up\n", pThis->szPrf));
2605 STATUS |= STATUS_LU;
2606 Phy::setLinkStatus(&pThis->phy, true);
2607 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_LSC);
2608 if (pThis->pDrvR3)
2609 pThis->pDrvR3->pfnNotifyLinkChanged(pThis->pDrvR3, PDMNETWORKLINKSTATE_UP);
2610 /* Process pending TX descriptors (see @bugref{8942}) */
2611 PPDMQUEUEITEMCORE pItem = PDMQueueAlloc(pThis->CTX_SUFF(pTxQueue));
2612 if (RT_UNLIKELY(pItem))
2613 PDMQueueInsert(pThis->CTX_SUFF(pTxQueue), pItem);
2614}
2615
2616/**
2617 * Bring down the link immediately.
2618 *
2619 * @param pThis The device state structure.
2620 */
2621DECLINLINE(void) e1kR3LinkDown(PE1KSTATE pThis)
2622{
2623 E1kLog(("%s Link is down\n", pThis->szPrf));
2624 STATUS &= ~STATUS_LU;
2625#ifdef E1K_LSC_ON_RESET
2626 Phy::setLinkStatus(&pThis->phy, false);
2627#endif /* E1K_LSC_ON_RESET */
2628 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_LSC);
2629 if (pThis->pDrvR3)
2630 pThis->pDrvR3->pfnNotifyLinkChanged(pThis->pDrvR3, PDMNETWORKLINKSTATE_DOWN);
2631}
2632
2633/**
2634 * Bring down the link temporarily.
2635 *
2636 * @param pThis The device state structure.
2637 */
2638DECLINLINE(void) e1kR3LinkDownTemp(PE1KSTATE pThis)
2639{
2640 E1kLog(("%s Link is down temporarily\n", pThis->szPrf));
2641 STATUS &= ~STATUS_LU;
2642 Phy::setLinkStatus(&pThis->phy, false);
2643 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_LSC);
2644 /*
2645 * Notifying the associated driver that the link went down (even temporarily)
2646 * seems to be the right thing, but it was not done before. This may cause
2647 * a regression if the driver does not expect the link to go down as a result
2648 * of sending PDMNETWORKLINKSTATE_DOWN_RESUME to this device. Earlier versions
2649 * of code notified the driver that the link was up! See @bugref{7057}.
2650 */
2651 if (pThis->pDrvR3)
2652 pThis->pDrvR3->pfnNotifyLinkChanged(pThis->pDrvR3, PDMNETWORKLINKSTATE_DOWN);
2653 e1kBringLinkUpDelayed(pThis);
2654}
2655#endif /* IN_RING3 */
2656
2657#if 0 /* unused */
2658/**
2659 * Read handler for Device Status register.
2660 *
2661 * Get the link status from PHY.
2662 *
2663 * @returns VBox status code.
2664 *
2665 * @param pThis The device state structure.
2666 * @param offset Register offset in memory-mapped frame.
2667 * @param index Register index in register array.
2668 * @param mask Used to implement partial reads (8 and 16-bit).
2669 */
2670static int e1kRegReadCTRL(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
2671{
2672 E1kLog(("%s e1kRegReadCTRL: mdio dir=%s mdc dir=%s mdc=%d\n",
2673 pThis->szPrf, (CTRL & CTRL_MDIO_DIR)?"OUT":"IN ",
2674 (CTRL & CTRL_MDC_DIR)?"OUT":"IN ", !!(CTRL & CTRL_MDC)));
2675 if ((CTRL & CTRL_MDIO_DIR) == 0 && (CTRL & CTRL_MDC))
2676 {
2677 /* MDC is high and MDIO pin is used for input, read MDIO pin from PHY */
2678 if (Phy::readMDIO(&pThis->phy))
2679 *pu32Value = CTRL | CTRL_MDIO;
2680 else
2681 *pu32Value = CTRL & ~CTRL_MDIO;
2682 E1kLog(("%s e1kRegReadCTRL: Phy::readMDIO(%d)\n",
2683 pThis->szPrf, !!(*pu32Value & CTRL_MDIO)));
2684 }
2685 else
2686 {
2687 /* MDIO pin is used for output, ignore it */
2688 *pu32Value = CTRL;
2689 }
2690 return VINF_SUCCESS;
2691}
2692#endif /* unused */
2693
2694/**
2695 * A callback used by PHY to indicate that the link needs to be updated due to
2696 * reset of PHY.
2697 *
2698 * @param pPhy A pointer to phy member of the device state structure.
2699 * @thread any
2700 */
2701void e1kPhyLinkResetCallback(PPHY pPhy)
2702{
2703 /* PHY is aggregated into e1000, get pThis from pPhy. */
2704 PE1KSTATE pThis = RT_FROM_MEMBER(pPhy, E1KSTATE, phy);
2705 /* Make sure we have cable connected and MAC can talk to PHY */
2706 if (pThis->fCableConnected && (CTRL & CTRL_SLU))
2707 e1kArmTimer(pThis, pThis->CTX_SUFF(pLUTimer), E1K_INIT_LINKUP_DELAY_US);
2708}
2709
2710/**
2711 * Write handler for Device Control register.
2712 *
2713 * Handles reset.
2714 *
2715 * @param pThis The device state structure.
2716 * @param offset Register offset in memory-mapped frame.
2717 * @param index Register index in register array.
2718 * @param value The value to store.
2719 * @param mask Used to implement partial writes (8 and 16-bit).
2720 * @thread EMT
2721 */
2722static int e1kRegWriteCTRL(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2723{
2724 int rc = VINF_SUCCESS;
2725
2726 if (value & CTRL_RESET)
2727 { /* RST */
2728#ifndef IN_RING3
2729 return VINF_IOM_R3_MMIO_WRITE;
2730#else
2731 e1kHardReset(pThis);
2732#endif
2733 }
2734 else
2735 {
2736#ifdef E1K_LSC_ON_SLU
2737 /*
2738 * When the guest changes 'Set Link Up' bit from 0 to 1 we check if
2739 * the link is down and the cable is connected, and if they are we
2740 * bring the link up, see @bugref{8624}.
2741 */
2742 if ( (value & CTRL_SLU)
2743 && !(CTRL & CTRL_SLU)
2744 && pThis->fCableConnected
2745 && !(STATUS & STATUS_LU))
2746 {
2747 /* It should take about 2 seconds for the link to come up */
2748 e1kArmTimer(pThis, pThis->CTX_SUFF(pLUTimer), E1K_INIT_LINKUP_DELAY_US);
2749 }
2750#else /* !E1K_LSC_ON_SLU */
2751 if ( (value & CTRL_SLU)
2752 && !(CTRL & CTRL_SLU)
2753 && pThis->fCableConnected
2754 && !TMTimerIsActive(pThis->CTX_SUFF(pLUTimer)))
2755 {
2756 /* PXE does not use LSC interrupts, see @bugref{9113}. */
2757 STATUS |= STATUS_LU;
2758 }
2759#endif /* !E1K_LSC_ON_SLU */
2760 if ((value & CTRL_VME) != (CTRL & CTRL_VME))
2761 {
2762 E1kLog(("%s VLAN Mode %s\n", pThis->szPrf, (value & CTRL_VME) ? "Enabled" : "Disabled"));
2763 }
2764 Log7(("%s e1kRegWriteCTRL: mdio dir=%s mdc dir=%s mdc=%s mdio=%d\n",
2765 pThis->szPrf, (value & CTRL_MDIO_DIR)?"OUT":"IN ",
2766 (value & CTRL_MDC_DIR)?"OUT":"IN ", (value & CTRL_MDC)?"HIGH":"LOW ", !!(value & CTRL_MDIO)));
2767 if (value & CTRL_MDC)
2768 {
2769 if (value & CTRL_MDIO_DIR)
2770 {
2771 Log7(("%s e1kRegWriteCTRL: Phy::writeMDIO(%d)\n", pThis->szPrf, !!(value & CTRL_MDIO)));
2772 /* MDIO direction pin is set to output and MDC is high, write MDIO pin value to PHY */
2773 Phy::writeMDIO(&pThis->phy, !!(value & CTRL_MDIO));
2774 }
2775 else
2776 {
2777 if (Phy::readMDIO(&pThis->phy))
2778 value |= CTRL_MDIO;
2779 else
2780 value &= ~CTRL_MDIO;
2781 Log7(("%s e1kRegWriteCTRL: Phy::readMDIO(%d)\n", pThis->szPrf, !!(value & CTRL_MDIO)));
2782 }
2783 }
2784 rc = e1kRegWriteDefault(pThis, offset, index, value);
2785 }
2786
2787 return rc;
2788}
2789
2790/**
2791 * Write handler for EEPROM/Flash Control/Data register.
2792 *
2793 * Handles EEPROM access requests; forwards writes to EEPROM device if access has been granted.
2794 *
2795 * @param pThis The device state structure.
2796 * @param offset Register offset in memory-mapped frame.
2797 * @param index Register index in register array.
2798 * @param value The value to store.
2799 * @param mask Used to implement partial writes (8 and 16-bit).
2800 * @thread EMT
2801 */
2802static int e1kRegWriteEECD(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2803{
2804 RT_NOREF(offset, index);
2805#ifdef IN_RING3
2806 /* So far we are concerned with lower byte only */
2807 if ((EECD & EECD_EE_GNT) || pThis->eChip == E1K_CHIP_82543GC)
2808 {
2809 /* Access to EEPROM granted -- forward 4-wire bits to EEPROM device */
2810 /* Note: 82543GC does not need to request EEPROM access */
2811 STAM_PROFILE_ADV_START(&pThis->StatEEPROMWrite, a);
2812 pThis->eeprom.write(value & EECD_EE_WIRES);
2813 STAM_PROFILE_ADV_STOP(&pThis->StatEEPROMWrite, a);
2814 }
2815 if (value & EECD_EE_REQ)
2816 EECD |= EECD_EE_REQ|EECD_EE_GNT;
2817 else
2818 EECD &= ~EECD_EE_GNT;
2819 //e1kRegWriteDefault(pThis, offset, index, value );
2820
2821 return VINF_SUCCESS;
2822#else /* !IN_RING3 */
2823 RT_NOREF(pThis, value);
2824 return VINF_IOM_R3_MMIO_WRITE;
2825#endif /* !IN_RING3 */
2826}
2827
2828/**
2829 * Read handler for EEPROM/Flash Control/Data register.
2830 *
2831 * Lower 4 bits come from EEPROM device if EEPROM access has been granted.
2832 *
2833 * @returns VBox status code.
2834 *
2835 * @param pThis The device state structure.
2836 * @param offset Register offset in memory-mapped frame.
2837 * @param index Register index in register array.
2838 * @param mask Used to implement partial reads (8 and 16-bit).
2839 * @thread EMT
2840 */
2841static int e1kRegReadEECD(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
2842{
2843#ifdef IN_RING3
2844 uint32_t value;
2845 int rc = e1kRegReadDefault(pThis, offset, index, &value);
2846 if (RT_SUCCESS(rc))
2847 {
2848 if ((value & EECD_EE_GNT) || pThis->eChip == E1K_CHIP_82543GC)
2849 {
2850 /* Note: 82543GC does not need to request EEPROM access */
2851 /* Access to EEPROM granted -- get 4-wire bits to EEPROM device */
2852 STAM_PROFILE_ADV_START(&pThis->StatEEPROMRead, a);
2853 value |= pThis->eeprom.read();
2854 STAM_PROFILE_ADV_STOP(&pThis->StatEEPROMRead, a);
2855 }
2856 *pu32Value = value;
2857 }
2858
2859 return rc;
2860#else /* !IN_RING3 */
2861 RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index); RT_NOREF_PV(pu32Value);
2862 return VINF_IOM_R3_MMIO_READ;
2863#endif /* !IN_RING3 */
2864}
2865
2866/**
2867 * Write handler for EEPROM Read register.
2868 *
2869 * Handles EEPROM word access requests, reads EEPROM and stores the result
2870 * into DATA field.
2871 *
2872 * @param pThis The device state structure.
2873 * @param offset Register offset in memory-mapped frame.
2874 * @param index Register index in register array.
2875 * @param value The value to store.
2876 * @param mask Used to implement partial writes (8 and 16-bit).
2877 * @thread EMT
2878 */
2879static int e1kRegWriteEERD(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2880{
2881#ifdef IN_RING3
2882 /* Make use of 'writable' and 'readable' masks. */
2883 e1kRegWriteDefault(pThis, offset, index, value);
2884 /* DONE and DATA are set only if read was triggered by START. */
2885 if (value & EERD_START)
2886 {
2887 uint16_t tmp;
2888 STAM_PROFILE_ADV_START(&pThis->StatEEPROMRead, a);
2889 if (pThis->eeprom.readWord(GET_BITS_V(value, EERD, ADDR), &tmp))
2890 SET_BITS(EERD, DATA, tmp);
2891 EERD |= EERD_DONE;
2892 STAM_PROFILE_ADV_STOP(&pThis->StatEEPROMRead, a);
2893 }
2894
2895 return VINF_SUCCESS;
2896#else /* !IN_RING3 */
2897 RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index); RT_NOREF_PV(value);
2898 return VINF_IOM_R3_MMIO_WRITE;
2899#endif /* !IN_RING3 */
2900}
2901
2902
2903/**
2904 * Write handler for MDI Control register.
2905 *
2906 * Handles PHY read/write requests; forwards requests to internal PHY device.
2907 *
2908 * @param pThis The device state structure.
2909 * @param offset Register offset in memory-mapped frame.
2910 * @param index Register index in register array.
2911 * @param value The value to store.
2912 * @param mask Used to implement partial writes (8 and 16-bit).
2913 * @thread EMT
2914 */
2915static int e1kRegWriteMDIC(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2916{
2917 if (value & MDIC_INT_EN)
2918 {
2919 E1kLog(("%s ERROR! Interrupt at the end of an MDI cycle is not supported yet.\n",
2920 pThis->szPrf));
2921 }
2922 else if (value & MDIC_READY)
2923 {
2924 E1kLog(("%s ERROR! Ready bit is not reset by software during write operation.\n",
2925 pThis->szPrf));
2926 }
2927 else if (GET_BITS_V(value, MDIC, PHY) != 1)
2928 {
2929 E1kLog(("%s WARNING! Access to invalid PHY detected, phy=%d.\n",
2930 pThis->szPrf, GET_BITS_V(value, MDIC, PHY)));
2931 /*
2932 * Some drivers scan the MDIO bus for a PHY. We can work with these
2933 * drivers if we set MDIC_READY and MDIC_ERROR when there isn't a PHY
2934 * at the requested address, see @bugref{7346}.
2935 */
2936 MDIC = MDIC_READY | MDIC_ERROR;
2937 }
2938 else
2939 {
2940 /* Store the value */
2941 e1kRegWriteDefault(pThis, offset, index, value);
2942 STAM_COUNTER_INC(&pThis->StatPHYAccesses);
2943 /* Forward op to PHY */
2944 if (value & MDIC_OP_READ)
2945 SET_BITS(MDIC, DATA, Phy::readRegister(&pThis->phy, GET_BITS_V(value, MDIC, REG)));
2946 else
2947 Phy::writeRegister(&pThis->phy, GET_BITS_V(value, MDIC, REG), value & MDIC_DATA_MASK);
2948 /* Let software know that we are done */
2949 MDIC |= MDIC_READY;
2950 }
2951
2952 return VINF_SUCCESS;
2953}
2954
2955/**
2956 * Write handler for Interrupt Cause Read register.
2957 *
2958 * Bits corresponding to 1s in 'value' will be cleared in ICR register.
2959 *
2960 * @param pThis The device state structure.
2961 * @param offset Register offset in memory-mapped frame.
2962 * @param index Register index in register array.
2963 * @param value The value to store.
2964 * @param mask Used to implement partial writes (8 and 16-bit).
2965 * @thread EMT
2966 */
2967static int e1kRegWriteICR(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2968{
2969 ICR &= ~value;
2970
2971 RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index);
2972 return VINF_SUCCESS;
2973}
2974
2975/**
2976 * Read handler for Interrupt Cause Read register.
2977 *
2978 * Reading this register acknowledges all interrupts.
2979 *
2980 * @returns VBox status code.
2981 *
2982 * @param pThis The device state structure.
2983 * @param offset Register offset in memory-mapped frame.
2984 * @param index Register index in register array.
2985 * @param mask Not used.
2986 * @thread EMT
2987 */
2988static int e1kRegReadICR(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
2989{
2990 int rc = e1kCsEnter(pThis, VINF_IOM_R3_MMIO_READ);
2991 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2992 return rc;
2993
2994 uint32_t value = 0;
2995 rc = e1kRegReadDefault(pThis, offset, index, &value);
2996 if (RT_SUCCESS(rc))
2997 {
2998 if (value)
2999 {
3000 if (!pThis->fIntRaised)
3001 E1K_INC_ISTAT_CNT(pThis->uStatNoIntICR);
3002 /*
3003 * Not clearing ICR causes QNX to hang as it reads ICR in a loop
3004 * with disabled interrupts.
3005 */
3006 //if (IMS)
3007 if (1)
3008 {
3009 /*
3010 * Interrupts were enabled -- we are supposedly at the very
3011 * beginning of interrupt handler
3012 */
3013 E1kLogRel(("E1000: irq lowered, icr=0x%x\n", ICR));
3014 E1kLog(("%s e1kRegReadICR: Lowered IRQ (%08x)\n", pThis->szPrf, ICR));
3015 /* Clear all pending interrupts */
3016 ICR = 0;
3017 pThis->fIntRaised = false;
3018 /* Lower(0) INTA(0) */
3019 PDMDevHlpPCISetIrq(pThis->CTX_SUFF(pDevIns), 0, 0);
3020
3021 pThis->u64AckedAt = TMTimerGet(pThis->CTX_SUFF(pIntTimer));
3022 if (pThis->fIntMaskUsed)
3023 pThis->fDelayInts = true;
3024 }
3025 else
3026 {
3027 /*
3028 * Interrupts are disabled -- in windows guests ICR read is done
3029 * just before re-enabling interrupts
3030 */
3031 E1kLog(("%s e1kRegReadICR: Suppressing auto-clear due to disabled interrupts (%08x)\n", pThis->szPrf, ICR));
3032 }
3033 }
3034 *pu32Value = value;
3035 }
3036 e1kCsLeave(pThis);
3037
3038 return rc;
3039}
3040
3041/**
3042 * Write handler for Interrupt Cause Set register.
3043 *
3044 * Bits corresponding to 1s in 'value' will be set in ICR register.
3045 *
3046 * @param pThis The device state structure.
3047 * @param offset Register offset in memory-mapped frame.
3048 * @param index Register index in register array.
3049 * @param value The value to store.
3050 * @param mask Used to implement partial writes (8 and 16-bit).
3051 * @thread EMT
3052 */
3053static int e1kRegWriteICS(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3054{
3055 RT_NOREF_PV(offset); RT_NOREF_PV(index);
3056 E1K_INC_ISTAT_CNT(pThis->uStatIntICS);
3057 return e1kRaiseInterrupt(pThis, VINF_IOM_R3_MMIO_WRITE, value & g_aE1kRegMap[ICS_IDX].writable);
3058}
3059
3060/**
3061 * Write handler for Interrupt Mask Set register.
3062 *
3063 * Will trigger pending interrupts.
3064 *
3065 * @param pThis The device state structure.
3066 * @param offset Register offset in memory-mapped frame.
3067 * @param index Register index in register array.
3068 * @param value The value to store.
3069 * @param mask Used to implement partial writes (8 and 16-bit).
3070 * @thread EMT
3071 */
3072static int e1kRegWriteIMS(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3073{
3074 RT_NOREF_PV(offset); RT_NOREF_PV(index);
3075
3076 IMS |= value;
3077 E1kLogRel(("E1000: irq enabled, RDH=%x RDT=%x TDH=%x TDT=%x\n", RDH, RDT, TDH, TDT));
3078 E1kLog(("%s e1kRegWriteIMS: IRQ enabled\n", pThis->szPrf));
3079 /*
3080 * We cannot raise an interrupt here as it will occasionally cause an interrupt storm
3081 * in Windows guests (see @bugref{8624}, @bugref{5023}).
3082 */
3083 if ((ICR & IMS) && !pThis->fLocked)
3084 {
3085 E1K_INC_ISTAT_CNT(pThis->uStatIntIMS);
3086 e1kPostponeInterrupt(pThis, E1K_IMS_INT_DELAY_NS);
3087 }
3088
3089 return VINF_SUCCESS;
3090}
3091
3092/**
3093 * Write handler for Interrupt Mask Clear register.
3094 *
3095 * Bits corresponding to 1s in 'value' will be cleared in IMS register.
3096 *
3097 * @param pThis The device state structure.
3098 * @param offset Register offset in memory-mapped frame.
3099 * @param index Register index in register array.
3100 * @param value The value to store.
3101 * @param mask Used to implement partial writes (8 and 16-bit).
3102 * @thread EMT
3103 */
3104static int e1kRegWriteIMC(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3105{
3106 RT_NOREF_PV(offset); RT_NOREF_PV(index);
3107
3108 int rc = e1kCsEnter(pThis, VINF_IOM_R3_MMIO_WRITE);
3109 if (RT_UNLIKELY(rc != VINF_SUCCESS))
3110 return rc;
3111 if (pThis->fIntRaised)
3112 {
3113 /*
3114 * Technically we should reset fIntRaised in ICR read handler, but it will cause
3115 * Windows to freeze since it may receive an interrupt while still in the very beginning
3116 * of interrupt handler.
3117 */
3118 E1K_INC_ISTAT_CNT(pThis->uStatIntLower);
3119 STAM_COUNTER_INC(&pThis->StatIntsPrevented);
3120 E1kLogRel(("E1000: irq lowered (IMC), icr=0x%x\n", ICR));
3121 /* Lower(0) INTA(0) */
3122 PDMDevHlpPCISetIrq(pThis->CTX_SUFF(pDevIns), 0, 0);
3123 pThis->fIntRaised = false;
3124 E1kLog(("%s e1kRegWriteIMC: Lowered IRQ: ICR=%08x\n", pThis->szPrf, ICR));
3125 }
3126 IMS &= ~value;
3127 E1kLog(("%s e1kRegWriteIMC: IRQ disabled\n", pThis->szPrf));
3128 e1kCsLeave(pThis);
3129
3130 return VINF_SUCCESS;
3131}
3132
3133/**
3134 * Write handler for Receive Control register.
3135 *
3136 * @param pThis The device state structure.
3137 * @param offset Register offset in memory-mapped frame.
3138 * @param index Register index in register array.
3139 * @param value The value to store.
3140 * @param mask Used to implement partial writes (8 and 16-bit).
3141 * @thread EMT
3142 */
3143static int e1kRegWriteRCTL(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3144{
3145 /* Update promiscuous mode */
3146 bool fBecomePromiscous = !!(value & (RCTL_UPE | RCTL_MPE));
3147 if (fBecomePromiscous != !!( RCTL & (RCTL_UPE | RCTL_MPE)))
3148 {
3149 /* Promiscuity has changed, pass the knowledge on. */
3150#ifndef IN_RING3
3151 return VINF_IOM_R3_MMIO_WRITE;
3152#else
3153 if (pThis->pDrvR3)
3154 pThis->pDrvR3->pfnSetPromiscuousMode(pThis->pDrvR3, fBecomePromiscous);
3155#endif
3156 }
3157
3158 /* Adjust receive buffer size */
3159 unsigned cbRxBuf = 2048 >> GET_BITS_V(value, RCTL, BSIZE);
3160 if (value & RCTL_BSEX)
3161 cbRxBuf *= 16;
3162 if (cbRxBuf > E1K_MAX_RX_PKT_SIZE)
3163 cbRxBuf = E1K_MAX_RX_PKT_SIZE;
3164 if (cbRxBuf != pThis->u16RxBSize)
3165 E1kLog2(("%s e1kRegWriteRCTL: Setting receive buffer size to %d (old %d)\n",
3166 pThis->szPrf, cbRxBuf, pThis->u16RxBSize));
3167 pThis->u16RxBSize = cbRxBuf;
3168
3169 /* Update the register */
3170 e1kRegWriteDefault(pThis, offset, index, value);
3171
3172 return VINF_SUCCESS;
3173}
3174
3175/**
3176 * Write handler for Packet Buffer Allocation register.
3177 *
3178 * TXA = 64 - RXA.
3179 *
3180 * @param pThis The device state structure.
3181 * @param offset Register offset in memory-mapped frame.
3182 * @param index Register index in register array.
3183 * @param value The value to store.
3184 * @param mask Used to implement partial writes (8 and 16-bit).
3185 * @thread EMT
3186 */
3187static int e1kRegWritePBA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3188{
3189 e1kRegWriteDefault(pThis, offset, index, value);
3190 PBA_st->txa = 64 - PBA_st->rxa;
3191
3192 return VINF_SUCCESS;
3193}
3194
3195/**
3196 * Write handler for Receive Descriptor Tail register.
3197 *
3198 * @remarks Write into RDT forces switch to HC and signal to
3199 * e1kR3NetworkDown_WaitReceiveAvail().
3200 *
3201 * @returns VBox status code.
3202 *
3203 * @param pThis The device state structure.
3204 * @param offset Register offset in memory-mapped frame.
3205 * @param index Register index in register array.
3206 * @param value The value to store.
3207 * @param mask Used to implement partial writes (8 and 16-bit).
3208 * @thread EMT
3209 */
3210static int e1kRegWriteRDT(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3211{
3212#ifndef IN_RING3
3213 /* XXX */
3214// return VINF_IOM_R3_MMIO_WRITE;
3215#endif
3216 int rc = e1kCsRxEnter(pThis, VINF_IOM_R3_MMIO_WRITE);
3217 if (RT_LIKELY(rc == VINF_SUCCESS))
3218 {
3219 E1kLog(("%s e1kRegWriteRDT\n", pThis->szPrf));
3220#ifndef E1K_WITH_RXD_CACHE
3221 /*
3222 * Some drivers advance RDT too far, so that it equals RDH. This
3223 * somehow manages to work with real hardware but not with this
3224 * emulated device. We can work with these drivers if we just
3225 * write 1 less when we see a driver writing RDT equal to RDH,
3226 * see @bugref{7346}.
3227 */
3228 if (value == RDH)
3229 {
3230 if (RDH == 0)
3231 value = (RDLEN / sizeof(E1KRXDESC)) - 1;
3232 else
3233 value = RDH - 1;
3234 }
3235#endif /* !E1K_WITH_RXD_CACHE */
3236 rc = e1kRegWriteDefault(pThis, offset, index, value);
3237#ifdef E1K_WITH_RXD_CACHE
3238 /*
3239 * We need to fetch descriptors now as RDT may go whole circle
3240 * before we attempt to store a received packet. For example,
3241 * Intel's DOS drivers use 2 (!) RX descriptors with the total ring
3242 * size being only 8 descriptors! Note that we fetch descriptors
3243 * only when the cache is empty to reduce the number of memory reads
3244 * in case of frequent RDT writes. Don't fetch anything when the
3245 * receiver is disabled either as RDH, RDT, RDLEN can be in some
3246 * messed up state.
3247 * Note that despite the cache may seem empty, meaning that there are
3248 * no more available descriptors in it, it may still be used by RX
3249 * thread which has not yet written the last descriptor back but has
3250 * temporarily released the RX lock in order to write the packet body
3251 * to descriptor's buffer. At this point we still going to do prefetch
3252 * but it won't actually fetch anything if there are no unused slots in
3253 * our "empty" cache (nRxDFetched==E1K_RXD_CACHE_SIZE). We must not
3254 * reset the cache here even if it appears empty. It will be reset at
3255 * a later point in e1kRxDGet().
3256 */
3257 if (e1kRxDIsCacheEmpty(pThis) && (RCTL & RCTL_EN))
3258 e1kRxDPrefetch(pThis);
3259#endif /* E1K_WITH_RXD_CACHE */
3260 e1kCsRxLeave(pThis);
3261 if (RT_SUCCESS(rc))
3262 {
3263/** @todo bird: Use SUPSem* for this so we can signal it in ring-0 as well
3264 * without requiring any context switches. We should also check the
3265 * wait condition before bothering to queue the item as we're currently
3266 * queuing thousands of items per second here in a normal transmit
3267 * scenario. Expect performance changes when fixing this! */
3268#ifdef IN_RING3
3269 /* Signal that we have more receive descriptors available. */
3270 e1kWakeupReceive(pThis->CTX_SUFF(pDevIns));
3271#else
3272 PPDMQUEUEITEMCORE pItem = PDMQueueAlloc(pThis->CTX_SUFF(pCanRxQueue));
3273 if (pItem)
3274 PDMQueueInsert(pThis->CTX_SUFF(pCanRxQueue), pItem);
3275#endif
3276 }
3277 }
3278 return rc;
3279}
3280
3281/**
3282 * Write handler for Receive Delay Timer register.
3283 *
3284 * @param pThis The device state structure.
3285 * @param offset Register offset in memory-mapped frame.
3286 * @param index Register index in register array.
3287 * @param value The value to store.
3288 * @param mask Used to implement partial writes (8 and 16-bit).
3289 * @thread EMT
3290 */
3291static int e1kRegWriteRDTR(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3292{
3293 e1kRegWriteDefault(pThis, offset, index, value);
3294 if (value & RDTR_FPD)
3295 {
3296 /* Flush requested, cancel both timers and raise interrupt */
3297#ifdef E1K_USE_RX_TIMERS
3298 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRIDTimer));
3299 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRADTimer));
3300#endif
3301 E1K_INC_ISTAT_CNT(pThis->uStatIntRDTR);
3302 return e1kRaiseInterrupt(pThis, VINF_IOM_R3_MMIO_WRITE, ICR_RXT0);
3303 }
3304
3305 return VINF_SUCCESS;
3306}
3307
3308DECLINLINE(uint32_t) e1kGetTxLen(PE1KSTATE pThis)
3309{
3310 /**
3311 * Make sure TDT won't change during computation. EMT may modify TDT at
3312 * any moment.
3313 */
3314 uint32_t tdt = TDT;
3315 return (TDH>tdt ? TDLEN/sizeof(E1KTXDESC) : 0) + tdt - TDH;
3316}
3317
3318#ifdef IN_RING3
3319
3320# ifdef E1K_TX_DELAY
3321/**
3322 * Transmit Delay Timer handler.
3323 *
3324 * @remarks We only get here when the timer expires.
3325 *
3326 * @param pDevIns Pointer to device instance structure.
3327 * @param pTimer Pointer to the timer.
3328 * @param pvUser NULL.
3329 * @thread EMT
3330 */
3331static DECLCALLBACK(void) e1kTxDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3332{
3333 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3334 Assert(PDMCritSectIsOwner(&pThis->csTx));
3335
3336 E1K_INC_ISTAT_CNT(pThis->uStatTxDelayExp);
3337# ifdef E1K_INT_STATS
3338 uint64_t u64Elapsed = RTTimeNanoTS() - pThis->u64ArmedAt;
3339 if (u64Elapsed > pThis->uStatMaxTxDelay)
3340 pThis->uStatMaxTxDelay = u64Elapsed;
3341# endif
3342 int rc = e1kXmitPending(pThis, false /*fOnWorkerThread*/);
3343 AssertMsg(RT_SUCCESS(rc) || rc == VERR_TRY_AGAIN, ("%Rrc\n", rc));
3344}
3345# endif /* E1K_TX_DELAY */
3346
3347//# ifdef E1K_USE_TX_TIMERS
3348
3349/**
3350 * Transmit Interrupt Delay Timer handler.
3351 *
3352 * @remarks We only get here when the timer expires.
3353 *
3354 * @param pDevIns Pointer to device instance structure.
3355 * @param pTimer Pointer to the timer.
3356 * @param pvUser NULL.
3357 * @thread EMT
3358 */
3359static DECLCALLBACK(void) e1kTxIntDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3360{
3361 RT_NOREF(pDevIns);
3362 RT_NOREF(pTimer);
3363 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3364
3365 E1K_INC_ISTAT_CNT(pThis->uStatTID);
3366 /* Cancel absolute delay timer as we have already got attention */
3367# ifndef E1K_NO_TAD
3368 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTADTimer));
3369# endif
3370 e1kRaiseInterrupt(pThis, ICR_TXDW);
3371}
3372
3373/**
3374 * Transmit Absolute Delay Timer handler.
3375 *
3376 * @remarks We only get here when the timer expires.
3377 *
3378 * @param pDevIns Pointer to device instance structure.
3379 * @param pTimer Pointer to the timer.
3380 * @param pvUser NULL.
3381 * @thread EMT
3382 */
3383static DECLCALLBACK(void) e1kTxAbsDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3384{
3385 RT_NOREF(pDevIns);
3386 RT_NOREF(pTimer);
3387 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3388
3389 E1K_INC_ISTAT_CNT(pThis->uStatTAD);
3390 /* Cancel interrupt delay timer as we have already got attention */
3391 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTIDTimer));
3392 e1kRaiseInterrupt(pThis, ICR_TXDW);
3393}
3394
3395//# endif /* E1K_USE_TX_TIMERS */
3396# ifdef E1K_USE_RX_TIMERS
3397
3398/**
3399 * Receive Interrupt Delay Timer handler.
3400 *
3401 * @remarks We only get here when the timer expires.
3402 *
3403 * @param pDevIns Pointer to device instance structure.
3404 * @param pTimer Pointer to the timer.
3405 * @param pvUser NULL.
3406 * @thread EMT
3407 */
3408static DECLCALLBACK(void) e1kRxIntDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3409{
3410 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3411
3412 E1K_INC_ISTAT_CNT(pThis->uStatRID);
3413 /* Cancel absolute delay timer as we have already got attention */
3414 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRADTimer));
3415 e1kRaiseInterrupt(pThis, ICR_RXT0);
3416}
3417
3418/**
3419 * Receive Absolute Delay Timer handler.
3420 *
3421 * @remarks We only get here when the timer expires.
3422 *
3423 * @param pDevIns Pointer to device instance structure.
3424 * @param pTimer Pointer to the timer.
3425 * @param pvUser NULL.
3426 * @thread EMT
3427 */
3428static DECLCALLBACK(void) e1kRxAbsDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3429{
3430 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3431
3432 E1K_INC_ISTAT_CNT(pThis->uStatRAD);
3433 /* Cancel interrupt delay timer as we have already got attention */
3434 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRIDTimer));
3435 e1kRaiseInterrupt(pThis, ICR_RXT0);
3436}
3437
3438# endif /* E1K_USE_RX_TIMERS */
3439
3440/**
3441 * Late Interrupt Timer handler.
3442 *
3443 * @param pDevIns Pointer to device instance structure.
3444 * @param pTimer Pointer to the timer.
3445 * @param pvUser NULL.
3446 * @thread EMT
3447 */
3448static DECLCALLBACK(void) e1kLateIntTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3449{
3450 RT_NOREF(pDevIns, pTimer);
3451 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3452
3453 STAM_PROFILE_ADV_START(&pThis->StatLateIntTimer, a);
3454 STAM_COUNTER_INC(&pThis->StatLateInts);
3455 E1K_INC_ISTAT_CNT(pThis->uStatIntLate);
3456# if 0
3457 if (pThis->iStatIntLost > -100)
3458 pThis->iStatIntLost--;
3459# endif
3460 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, 0);
3461 STAM_PROFILE_ADV_STOP(&pThis->StatLateIntTimer, a);
3462}
3463
3464/**
3465 * Link Up Timer handler.
3466 *
3467 * @param pDevIns Pointer to device instance structure.
3468 * @param pTimer Pointer to the timer.
3469 * @param pvUser NULL.
3470 * @thread EMT
3471 */
3472static DECLCALLBACK(void) e1kLinkUpTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3473{
3474 RT_NOREF(pDevIns, pTimer);
3475 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3476
3477 /*
3478 * This can happen if we set the link status to down when the Link up timer was
3479 * already armed (shortly after e1kLoadDone() or when the cable was disconnected
3480 * and connect+disconnect the cable very quick. Moreover, 82543GC triggers LSC
3481 * on reset even if the cable is unplugged (see @bugref{8942}).
3482 */
3483 if (pThis->fCableConnected)
3484 {
3485 /* 82543GC does not have an internal PHY */
3486 if (pThis->eChip == E1K_CHIP_82543GC || (CTRL & CTRL_SLU))
3487 e1kR3LinkUp(pThis);
3488 }
3489#ifdef E1K_LSC_ON_RESET
3490 else if (pThis->eChip == E1K_CHIP_82543GC)
3491 e1kR3LinkDown(pThis);
3492#endif /* E1K_LSC_ON_RESET */
3493}
3494
3495#endif /* IN_RING3 */
3496
3497/**
3498 * Sets up the GSO context according to the TSE new context descriptor.
3499 *
3500 * @param pGso The GSO context to setup.
3501 * @param pCtx The context descriptor.
3502 */
3503DECLINLINE(void) e1kSetupGsoCtx(PPDMNETWORKGSO pGso, E1KTXCTX const *pCtx)
3504{
3505 pGso->u8Type = PDMNETWORKGSOTYPE_INVALID;
3506
3507 /*
3508 * See if the context descriptor describes something that could be TCP or
3509 * UDP over IPv[46].
3510 */
3511 /* Check the header ordering and spacing: 1. Ethernet, 2. IP, 3. TCP/UDP. */
3512 if (RT_UNLIKELY( pCtx->ip.u8CSS < sizeof(RTNETETHERHDR) ))
3513 {
3514 E1kLog(("e1kSetupGsoCtx: IPCSS=%#x\n", pCtx->ip.u8CSS));
3515 return;
3516 }
3517 if (RT_UNLIKELY( pCtx->tu.u8CSS < (size_t)pCtx->ip.u8CSS + (pCtx->dw2.fIP ? RTNETIPV4_MIN_LEN : RTNETIPV6_MIN_LEN) ))
3518 {
3519 E1kLog(("e1kSetupGsoCtx: TUCSS=%#x\n", pCtx->tu.u8CSS));
3520 return;
3521 }
3522 if (RT_UNLIKELY( pCtx->dw2.fTCP
3523 ? pCtx->dw3.u8HDRLEN < (size_t)pCtx->tu.u8CSS + RTNETTCP_MIN_LEN
3524 : pCtx->dw3.u8HDRLEN != (size_t)pCtx->tu.u8CSS + RTNETUDP_MIN_LEN ))
3525 {
3526 E1kLog(("e1kSetupGsoCtx: HDRLEN=%#x TCP=%d\n", pCtx->dw3.u8HDRLEN, pCtx->dw2.fTCP));
3527 return;
3528 }
3529
3530 /* The end of the TCP/UDP checksum should stop at the end of the packet or at least after the headers. */
3531 if (RT_UNLIKELY( pCtx->tu.u16CSE > 0 && pCtx->tu.u16CSE <= pCtx->dw3.u8HDRLEN ))
3532 {
3533 E1kLog(("e1kSetupGsoCtx: TUCSE=%#x HDRLEN=%#x\n", pCtx->tu.u16CSE, pCtx->dw3.u8HDRLEN));
3534 return;
3535 }
3536
3537 /* IPv4 checksum offset. */
3538 if (RT_UNLIKELY( pCtx->dw2.fIP && (size_t)pCtx->ip.u8CSO - pCtx->ip.u8CSS != RT_UOFFSETOF(RTNETIPV4, ip_sum) ))
3539 {
3540 E1kLog(("e1kSetupGsoCtx: IPCSO=%#x IPCSS=%#x\n", pCtx->ip.u8CSO, pCtx->ip.u8CSS));
3541 return;
3542 }
3543
3544 /* TCP/UDP checksum offsets. */
3545 if (RT_UNLIKELY( (size_t)pCtx->tu.u8CSO - pCtx->tu.u8CSS
3546 != ( pCtx->dw2.fTCP
3547 ? RT_UOFFSETOF(RTNETTCP, th_sum)
3548 : RT_UOFFSETOF(RTNETUDP, uh_sum) ) ))
3549 {
3550 E1kLog(("e1kSetupGsoCtx: TUCSO=%#x TUCSS=%#x TCP=%d\n", pCtx->ip.u8CSO, pCtx->ip.u8CSS, pCtx->dw2.fTCP));
3551 return;
3552 }
3553
3554 /*
3555 * Because of internal networking using a 16-bit size field for GSO context
3556 * plus frame, we have to make sure we don't exceed this.
3557 */
3558 if (RT_UNLIKELY( pCtx->dw3.u8HDRLEN + pCtx->dw2.u20PAYLEN > VBOX_MAX_GSO_SIZE ))
3559 {
3560 E1kLog(("e1kSetupGsoCtx: HDRLEN(=%#x) + PAYLEN(=%#x) = %#x, max is %#x\n",
3561 pCtx->dw3.u8HDRLEN, pCtx->dw2.u20PAYLEN, pCtx->dw3.u8HDRLEN + pCtx->dw2.u20PAYLEN, VBOX_MAX_GSO_SIZE));
3562 return;
3563 }
3564
3565 /*
3566 * We're good for now - we'll do more checks when seeing the data.
3567 * So, figure the type of offloading and setup the context.
3568 */
3569 if (pCtx->dw2.fIP)
3570 {
3571 if (pCtx->dw2.fTCP)
3572 {
3573 pGso->u8Type = PDMNETWORKGSOTYPE_IPV4_TCP;
3574 pGso->cbHdrsSeg = pCtx->dw3.u8HDRLEN;
3575 }
3576 else
3577 {
3578 pGso->u8Type = PDMNETWORKGSOTYPE_IPV4_UDP;
3579 pGso->cbHdrsSeg = pCtx->tu.u8CSS; /* IP header only */
3580 }
3581 /** @todo Detect IPv4-IPv6 tunneling (need test setup since linux doesn't do
3582 * this yet it seems)... */
3583 }
3584 else
3585 {
3586 pGso->cbHdrsSeg = pCtx->dw3.u8HDRLEN; /** @todo IPv6 UFO */
3587 if (pCtx->dw2.fTCP)
3588 pGso->u8Type = PDMNETWORKGSOTYPE_IPV6_TCP;
3589 else
3590 pGso->u8Type = PDMNETWORKGSOTYPE_IPV6_UDP;
3591 }
3592 pGso->offHdr1 = pCtx->ip.u8CSS;
3593 pGso->offHdr2 = pCtx->tu.u8CSS;
3594 pGso->cbHdrsTotal = pCtx->dw3.u8HDRLEN;
3595 pGso->cbMaxSeg = pCtx->dw3.u16MSS;
3596 Assert(PDMNetGsoIsValid(pGso, sizeof(*pGso), pGso->cbMaxSeg * 5));
3597 E1kLog2(("e1kSetupGsoCtx: mss=%#x hdr=%#x hdrseg=%#x hdr1=%#x hdr2=%#x %s\n",
3598 pGso->cbMaxSeg, pGso->cbHdrsTotal, pGso->cbHdrsSeg, pGso->offHdr1, pGso->offHdr2, PDMNetGsoTypeName((PDMNETWORKGSOTYPE)pGso->u8Type) ));
3599}
3600
3601/**
3602 * Checks if we can use GSO processing for the current TSE frame.
3603 *
3604 * @param pThis The device state structure.
3605 * @param pGso The GSO context.
3606 * @param pData The first data descriptor of the frame.
3607 * @param pCtx The TSO context descriptor.
3608 */
3609DECLINLINE(bool) e1kCanDoGso(PE1KSTATE pThis, PCPDMNETWORKGSO pGso, E1KTXDAT const *pData, E1KTXCTX const *pCtx)
3610{
3611 if (!pData->cmd.fTSE)
3612 {
3613 E1kLog2(("e1kCanDoGso: !TSE\n"));
3614 return false;
3615 }
3616 if (pData->cmd.fVLE) /** @todo VLAN tagging. */
3617 {
3618 E1kLog(("e1kCanDoGso: VLE\n"));
3619 return false;
3620 }
3621 if (RT_UNLIKELY(!pThis->fGSOEnabled))
3622 {
3623 E1kLog3(("e1kCanDoGso: GSO disabled via CFGM\n"));
3624 return false;
3625 }
3626
3627 switch ((PDMNETWORKGSOTYPE)pGso->u8Type)
3628 {
3629 case PDMNETWORKGSOTYPE_IPV4_TCP:
3630 case PDMNETWORKGSOTYPE_IPV4_UDP:
3631 if (!pData->dw3.fIXSM)
3632 {
3633 E1kLog(("e1kCanDoGso: !IXSM (IPv4)\n"));
3634 return false;
3635 }
3636 if (!pData->dw3.fTXSM)
3637 {
3638 E1kLog(("e1kCanDoGso: !TXSM (IPv4)\n"));
3639 return false;
3640 }
3641 /** @todo what more check should we perform here? Ethernet frame type? */
3642 E1kLog2(("e1kCanDoGso: OK, IPv4\n"));
3643 return true;
3644
3645 case PDMNETWORKGSOTYPE_IPV6_TCP:
3646 case PDMNETWORKGSOTYPE_IPV6_UDP:
3647 if (pData->dw3.fIXSM && pCtx->ip.u8CSO)
3648 {
3649 E1kLog(("e1kCanDoGso: IXSM (IPv6)\n"));
3650 return false;
3651 }
3652 if (!pData->dw3.fTXSM)
3653 {
3654 E1kLog(("e1kCanDoGso: TXSM (IPv6)\n"));
3655 return false;
3656 }
3657 /** @todo what more check should we perform here? Ethernet frame type? */
3658 E1kLog2(("e1kCanDoGso: OK, IPv4\n"));
3659 return true;
3660
3661 default:
3662 Assert(pGso->u8Type == PDMNETWORKGSOTYPE_INVALID);
3663 E1kLog2(("e1kCanDoGso: e1kSetupGsoCtx failed\n"));
3664 return false;
3665 }
3666}
3667
3668/**
3669 * Frees the current xmit buffer.
3670 *
3671 * @param pThis The device state structure.
3672 */
3673static void e1kXmitFreeBuf(PE1KSTATE pThis)
3674{
3675 PPDMSCATTERGATHER pSg = pThis->CTX_SUFF(pTxSg);
3676 if (pSg)
3677 {
3678 pThis->CTX_SUFF(pTxSg) = NULL;
3679
3680 if (pSg->pvAllocator != pThis)
3681 {
3682 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
3683 if (pDrv)
3684 pDrv->pfnFreeBuf(pDrv, pSg);
3685 }
3686 else
3687 {
3688 /* loopback */
3689 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3690 Assert(pSg->fFlags == (PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3));
3691 pSg->fFlags = 0;
3692 pSg->pvAllocator = NULL;
3693 }
3694 }
3695}
3696
3697#ifndef E1K_WITH_TXD_CACHE
3698/**
3699 * Allocates an xmit buffer.
3700 *
3701 * @returns See PDMINETWORKUP::pfnAllocBuf.
3702 * @param pThis The device state structure.
3703 * @param cbMin The minimum frame size.
3704 * @param fExactSize Whether cbMin is exact or if we have to max it
3705 * out to the max MTU size.
3706 * @param fGso Whether this is a GSO frame or not.
3707 */
3708DECLINLINE(int) e1kXmitAllocBuf(PE1KSTATE pThis, size_t cbMin, bool fExactSize, bool fGso)
3709{
3710 /* Adjust cbMin if necessary. */
3711 if (!fExactSize)
3712 cbMin = RT_MAX(cbMin, E1K_MAX_TX_PKT_SIZE);
3713
3714 /* Deal with existing buffer (descriptor screw up, reset, etc). */
3715 if (RT_UNLIKELY(pThis->CTX_SUFF(pTxSg)))
3716 e1kXmitFreeBuf(pThis);
3717 Assert(pThis->CTX_SUFF(pTxSg) == NULL);
3718
3719 /*
3720 * Allocate the buffer.
3721 */
3722 PPDMSCATTERGATHER pSg;
3723 if (RT_LIKELY(GET_BITS(RCTL, LBM) != RCTL_LBM_TCVR))
3724 {
3725 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
3726 if (RT_UNLIKELY(!pDrv))
3727 return VERR_NET_DOWN;
3728 int rc = pDrv->pfnAllocBuf(pDrv, cbMin, fGso ? &pThis->GsoCtx : NULL, &pSg);
3729 if (RT_FAILURE(rc))
3730 {
3731 /* Suspend TX as we are out of buffers atm */
3732 STATUS |= STATUS_TXOFF;
3733 return rc;
3734 }
3735 }
3736 else
3737 {
3738 /* Create a loopback using the fallback buffer and preallocated SG. */
3739 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3740 pSg = &pThis->uTxFallback.Sg;
3741 pSg->fFlags = PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3;
3742 pSg->cbUsed = 0;
3743 pSg->cbAvailable = 0;
3744 pSg->pvAllocator = pThis;
3745 pSg->pvUser = NULL; /* No GSO here. */
3746 pSg->cSegs = 1;
3747 pSg->aSegs[0].pvSeg = pThis->aTxPacketFallback;
3748 pSg->aSegs[0].cbSeg = sizeof(pThis->aTxPacketFallback);
3749 }
3750
3751 pThis->CTX_SUFF(pTxSg) = pSg;
3752 return VINF_SUCCESS;
3753}
3754#else /* E1K_WITH_TXD_CACHE */
3755/**
3756 * Allocates an xmit buffer.
3757 *
3758 * @returns See PDMINETWORKUP::pfnAllocBuf.
3759 * @param pThis The device state structure.
3760 * @param cbMin The minimum frame size.
3761 * @param fExactSize Whether cbMin is exact or if we have to max it
3762 * out to the max MTU size.
3763 * @param fGso Whether this is a GSO frame or not.
3764 */
3765DECLINLINE(int) e1kXmitAllocBuf(PE1KSTATE pThis, bool fGso)
3766{
3767 /* Deal with existing buffer (descriptor screw up, reset, etc). */
3768 if (RT_UNLIKELY(pThis->CTX_SUFF(pTxSg)))
3769 e1kXmitFreeBuf(pThis);
3770 Assert(pThis->CTX_SUFF(pTxSg) == NULL);
3771
3772 /*
3773 * Allocate the buffer.
3774 */
3775 PPDMSCATTERGATHER pSg;
3776 if (RT_LIKELY(GET_BITS(RCTL, LBM) != RCTL_LBM_TCVR))
3777 {
3778 if (pThis->cbTxAlloc == 0)
3779 {
3780 /* Zero packet, no need for the buffer */
3781 return VINF_SUCCESS;
3782 }
3783
3784 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
3785 if (RT_UNLIKELY(!pDrv))
3786 return VERR_NET_DOWN;
3787 int rc = pDrv->pfnAllocBuf(pDrv, pThis->cbTxAlloc, fGso ? &pThis->GsoCtx : NULL, &pSg);
3788 if (RT_FAILURE(rc))
3789 {
3790 /* Suspend TX as we are out of buffers atm */
3791 STATUS |= STATUS_TXOFF;
3792 return rc;
3793 }
3794 E1kLog3(("%s Allocated buffer for TX packet: cb=%u %s%s\n",
3795 pThis->szPrf, pThis->cbTxAlloc,
3796 pThis->fVTag ? "VLAN " : "",
3797 pThis->fGSO ? "GSO " : ""));
3798 }
3799 else
3800 {
3801 /* Create a loopback using the fallback buffer and preallocated SG. */
3802 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3803 pSg = &pThis->uTxFallback.Sg;
3804 pSg->fFlags = PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3;
3805 pSg->cbUsed = 0;
3806 pSg->cbAvailable = sizeof(pThis->aTxPacketFallback);
3807 pSg->pvAllocator = pThis;
3808 pSg->pvUser = NULL; /* No GSO here. */
3809 pSg->cSegs = 1;
3810 pSg->aSegs[0].pvSeg = pThis->aTxPacketFallback;
3811 pSg->aSegs[0].cbSeg = sizeof(pThis->aTxPacketFallback);
3812 }
3813 pThis->cbTxAlloc = 0;
3814
3815 pThis->CTX_SUFF(pTxSg) = pSg;
3816 return VINF_SUCCESS;
3817}
3818#endif /* E1K_WITH_TXD_CACHE */
3819
3820/**
3821 * Checks if it's a GSO buffer or not.
3822 *
3823 * @returns true / false.
3824 * @param pTxSg The scatter / gather buffer.
3825 */
3826DECLINLINE(bool) e1kXmitIsGsoBuf(PDMSCATTERGATHER const *pTxSg)
3827{
3828#if 0
3829 if (!pTxSg)
3830 E1kLog(("e1kXmitIsGsoBuf: pTxSG is NULL\n"));
3831 if (pTxSg && pTxSg->pvUser)
3832 E1kLog(("e1kXmitIsGsoBuf: pvUser is NULL\n"));
3833#endif
3834 return pTxSg && pTxSg->pvUser /* GSO indicator */;
3835}
3836
3837#ifndef E1K_WITH_TXD_CACHE
3838/**
3839 * Load transmit descriptor from guest memory.
3840 *
3841 * @param pThis The device state structure.
3842 * @param pDesc Pointer to descriptor union.
3843 * @param addr Physical address in guest context.
3844 * @thread E1000_TX
3845 */
3846DECLINLINE(void) e1kLoadDesc(PE1KSTATE pThis, E1KTXDESC *pDesc, RTGCPHYS addr)
3847{
3848 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), addr, pDesc, sizeof(E1KTXDESC));
3849}
3850#else /* E1K_WITH_TXD_CACHE */
3851/**
3852 * Load transmit descriptors from guest memory.
3853 *
3854 * We need two physical reads in case the tail wrapped around the end of TX
3855 * descriptor ring.
3856 *
3857 * @returns the actual number of descriptors fetched.
3858 * @param pThis The device state structure.
3859 * @param pDesc Pointer to descriptor union.
3860 * @param addr Physical address in guest context.
3861 * @thread E1000_TX
3862 */
3863DECLINLINE(unsigned) e1kTxDLoadMore(PE1KSTATE pThis)
3864{
3865 Assert(pThis->iTxDCurrent == 0);
3866 /* We've already loaded pThis->nTxDFetched descriptors past TDH. */
3867 unsigned nDescsAvailable = e1kGetTxLen(pThis) - pThis->nTxDFetched;
3868 unsigned nDescsToFetch = RT_MIN(nDescsAvailable, E1K_TXD_CACHE_SIZE - pThis->nTxDFetched);
3869 unsigned nDescsTotal = TDLEN / sizeof(E1KTXDESC);
3870 unsigned nFirstNotLoaded = (TDH + pThis->nTxDFetched) % nDescsTotal;
3871 unsigned nDescsInSingleRead = RT_MIN(nDescsToFetch, nDescsTotal - nFirstNotLoaded);
3872 E1kLog3(("%s e1kTxDLoadMore: nDescsAvailable=%u nDescsToFetch=%u "
3873 "nDescsTotal=%u nFirstNotLoaded=0x%x nDescsInSingleRead=%u\n",
3874 pThis->szPrf, nDescsAvailable, nDescsToFetch, nDescsTotal,
3875 nFirstNotLoaded, nDescsInSingleRead));
3876 if (nDescsToFetch == 0)
3877 return 0;
3878 E1KTXDESC* pFirstEmptyDesc = &pThis->aTxDescriptors[pThis->nTxDFetched];
3879 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns),
3880 ((uint64_t)TDBAH << 32) + TDBAL + nFirstNotLoaded * sizeof(E1KTXDESC),
3881 pFirstEmptyDesc, nDescsInSingleRead * sizeof(E1KTXDESC));
3882 E1kLog3(("%s Fetched %u TX descriptors at %08x%08x(0x%x), TDLEN=%08x, TDH=%08x, TDT=%08x\n",
3883 pThis->szPrf, nDescsInSingleRead,
3884 TDBAH, TDBAL + TDH * sizeof(E1KTXDESC),
3885 nFirstNotLoaded, TDLEN, TDH, TDT));
3886 if (nDescsToFetch > nDescsInSingleRead)
3887 {
3888 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns),
3889 ((uint64_t)TDBAH << 32) + TDBAL,
3890 pFirstEmptyDesc + nDescsInSingleRead,
3891 (nDescsToFetch - nDescsInSingleRead) * sizeof(E1KTXDESC));
3892 E1kLog3(("%s Fetched %u TX descriptors at %08x%08x\n",
3893 pThis->szPrf, nDescsToFetch - nDescsInSingleRead,
3894 TDBAH, TDBAL));
3895 }
3896 pThis->nTxDFetched += nDescsToFetch;
3897 return nDescsToFetch;
3898}
3899
3900/**
3901 * Load transmit descriptors from guest memory only if there are no loaded
3902 * descriptors.
3903 *
3904 * @returns true if there are descriptors in cache.
3905 * @param pThis The device state structure.
3906 * @param pDesc Pointer to descriptor union.
3907 * @param addr Physical address in guest context.
3908 * @thread E1000_TX
3909 */
3910DECLINLINE(bool) e1kTxDLazyLoad(PE1KSTATE pThis)
3911{
3912 if (pThis->nTxDFetched == 0)
3913 return e1kTxDLoadMore(pThis) != 0;
3914 return true;
3915}
3916#endif /* E1K_WITH_TXD_CACHE */
3917
3918/**
3919 * Write back transmit descriptor to guest memory.
3920 *
3921 * @param pThis The device state structure.
3922 * @param pDesc Pointer to descriptor union.
3923 * @param addr Physical address in guest context.
3924 * @thread E1000_TX
3925 */
3926DECLINLINE(void) e1kWriteBackDesc(PE1KSTATE pThis, E1KTXDESC *pDesc, RTGCPHYS addr)
3927{
3928 /* Only the last half of the descriptor has to be written back. */
3929 e1kPrintTDesc(pThis, pDesc, "^^^");
3930 PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns), addr, pDesc, sizeof(E1KTXDESC));
3931}
3932
3933/**
3934 * Transmit complete frame.
3935 *
3936 * @remarks We skip the FCS since we're not responsible for sending anything to
3937 * a real ethernet wire.
3938 *
3939 * @param pThis The device state structure.
3940 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
3941 * @thread E1000_TX
3942 */
3943static void e1kTransmitFrame(PE1KSTATE pThis, bool fOnWorkerThread)
3944{
3945 PPDMSCATTERGATHER pSg = pThis->CTX_SUFF(pTxSg);
3946 uint32_t cbFrame = pSg ? (uint32_t)pSg->cbUsed : 0;
3947 Assert(!pSg || pSg->cSegs == 1);
3948
3949 if (cbFrame > 70) /* unqualified guess */
3950 pThis->led.Asserted.s.fWriting = pThis->led.Actual.s.fWriting = 1;
3951
3952#ifdef E1K_INT_STATS
3953 if (cbFrame <= 1514)
3954 E1K_INC_ISTAT_CNT(pThis->uStatTx1514);
3955 else if (cbFrame <= 2962)
3956 E1K_INC_ISTAT_CNT(pThis->uStatTx2962);
3957 else if (cbFrame <= 4410)
3958 E1K_INC_ISTAT_CNT(pThis->uStatTx4410);
3959 else if (cbFrame <= 5858)
3960 E1K_INC_ISTAT_CNT(pThis->uStatTx5858);
3961 else if (cbFrame <= 7306)
3962 E1K_INC_ISTAT_CNT(pThis->uStatTx7306);
3963 else if (cbFrame <= 8754)
3964 E1K_INC_ISTAT_CNT(pThis->uStatTx8754);
3965 else if (cbFrame <= 16384)
3966 E1K_INC_ISTAT_CNT(pThis->uStatTx16384);
3967 else if (cbFrame <= 32768)
3968 E1K_INC_ISTAT_CNT(pThis->uStatTx32768);
3969 else
3970 E1K_INC_ISTAT_CNT(pThis->uStatTxLarge);
3971#endif /* E1K_INT_STATS */
3972
3973 /* Add VLAN tag */
3974 if (cbFrame > 12 && pThis->fVTag)
3975 {
3976 E1kLog3(("%s Inserting VLAN tag %08x\n",
3977 pThis->szPrf, RT_BE2H_U16(VET) | (RT_BE2H_U16(pThis->u16VTagTCI) << 16)));
3978 memmove((uint8_t*)pSg->aSegs[0].pvSeg + 16, (uint8_t*)pSg->aSegs[0].pvSeg + 12, cbFrame - 12);
3979 *((uint32_t*)pSg->aSegs[0].pvSeg + 3) = RT_BE2H_U16(VET) | (RT_BE2H_U16(pThis->u16VTagTCI) << 16);
3980 pSg->cbUsed += 4;
3981 cbFrame += 4;
3982 Assert(pSg->cbUsed == cbFrame);
3983 Assert(pSg->cbUsed <= pSg->cbAvailable);
3984 }
3985/* E1kLog2(("%s < < < Outgoing packet. Dump follows: > > >\n"
3986 "%.*Rhxd\n"
3987 "%s < < < < < < < < < < < < < End of dump > > > > > > > > > > > >\n",
3988 pThis->szPrf, cbFrame, pSg->aSegs[0].pvSeg, pThis->szPrf));*/
3989
3990 /* Update the stats */
3991 E1K_INC_CNT32(TPT);
3992 E1K_ADD_CNT64(TOTL, TOTH, cbFrame);
3993 E1K_INC_CNT32(GPTC);
3994 if (pSg && e1kIsBroadcast(pSg->aSegs[0].pvSeg))
3995 E1K_INC_CNT32(BPTC);
3996 else if (pSg && e1kIsMulticast(pSg->aSegs[0].pvSeg))
3997 E1K_INC_CNT32(MPTC);
3998 /* Update octet transmit counter */
3999 E1K_ADD_CNT64(GOTCL, GOTCH, cbFrame);
4000 if (pThis->CTX_SUFF(pDrv))
4001 STAM_REL_COUNTER_ADD(&pThis->StatTransmitBytes, cbFrame);
4002 if (cbFrame == 64)
4003 E1K_INC_CNT32(PTC64);
4004 else if (cbFrame < 128)
4005 E1K_INC_CNT32(PTC127);
4006 else if (cbFrame < 256)
4007 E1K_INC_CNT32(PTC255);
4008 else if (cbFrame < 512)
4009 E1K_INC_CNT32(PTC511);
4010 else if (cbFrame < 1024)
4011 E1K_INC_CNT32(PTC1023);
4012 else
4013 E1K_INC_CNT32(PTC1522);
4014
4015 E1K_INC_ISTAT_CNT(pThis->uStatTxFrm);
4016
4017 /*
4018 * Dump and send the packet.
4019 */
4020 int rc = VERR_NET_DOWN;
4021 if (pSg && pSg->pvAllocator != pThis)
4022 {
4023 e1kPacketDump(pThis, (uint8_t const *)pSg->aSegs[0].pvSeg, cbFrame, "--> Outgoing");
4024
4025 pThis->CTX_SUFF(pTxSg) = NULL;
4026 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
4027 if (pDrv)
4028 {
4029 /* Release critical section to avoid deadlock in CanReceive */
4030 //e1kCsLeave(pThis);
4031 STAM_PROFILE_START(&pThis->CTX_SUFF_Z(StatTransmitSend), a);
4032 rc = pDrv->pfnSendBuf(pDrv, pSg, fOnWorkerThread);
4033 STAM_PROFILE_STOP(&pThis->CTX_SUFF_Z(StatTransmitSend), a);
4034 //e1kCsEnter(pThis, RT_SRC_POS);
4035 }
4036 }
4037 else if (pSg)
4038 {
4039 Assert(pSg->aSegs[0].pvSeg == pThis->aTxPacketFallback);
4040 e1kPacketDump(pThis, (uint8_t const *)pSg->aSegs[0].pvSeg, cbFrame, "--> Loopback");
4041
4042 /** @todo do we actually need to check that we're in loopback mode here? */
4043 if (GET_BITS(RCTL, LBM) == RCTL_LBM_TCVR)
4044 {
4045 E1KRXDST status;
4046 RT_ZERO(status);
4047 status.fPIF = true;
4048 e1kHandleRxPacket(pThis, pSg->aSegs[0].pvSeg, cbFrame, status);
4049 rc = VINF_SUCCESS;
4050 }
4051 e1kXmitFreeBuf(pThis);
4052 }
4053 else
4054 rc = VERR_NET_DOWN;
4055 if (RT_FAILURE(rc))
4056 {
4057 E1kLogRel(("E1000: ERROR! pfnSend returned %Rrc\n", rc));
4058 /** @todo handle VERR_NET_DOWN and VERR_NET_NO_BUFFER_SPACE. Signal error ? */
4059 }
4060
4061 pThis->led.Actual.s.fWriting = 0;
4062}
4063
4064/**
4065 * Compute and write internet checksum (e1kCSum16) at the specified offset.
4066 *
4067 * @param pThis The device state structure.
4068 * @param pPkt Pointer to the packet.
4069 * @param u16PktLen Total length of the packet.
4070 * @param cso Offset in packet to write checksum at.
4071 * @param css Offset in packet to start computing
4072 * checksum from.
4073 * @param cse Offset in packet to stop computing
4074 * checksum at.
4075 * @thread E1000_TX
4076 */
4077static void e1kInsertChecksum(PE1KSTATE pThis, uint8_t *pPkt, uint16_t u16PktLen, uint8_t cso, uint8_t css, uint16_t cse)
4078{
4079 RT_NOREF1(pThis);
4080
4081 if (css >= u16PktLen)
4082 {
4083 E1kLog2(("%s css(%X) is greater than packet length-1(%X), checksum is not inserted\n",
4084 pThis->szPrf, cso, u16PktLen));
4085 return;
4086 }
4087
4088 if (cso >= u16PktLen - 1)
4089 {
4090 E1kLog2(("%s cso(%X) is greater than packet length-2(%X), checksum is not inserted\n",
4091 pThis->szPrf, cso, u16PktLen));
4092 return;
4093 }
4094
4095 if (cse == 0)
4096 cse = u16PktLen - 1;
4097 else if (cse < css)
4098 {
4099 E1kLog2(("%s css(%X) is greater than cse(%X), checksum is not inserted\n",
4100 pThis->szPrf, css, cse));
4101 return;
4102 }
4103
4104 uint16_t u16ChkSum = e1kCSum16(pPkt + css, cse - css + 1);
4105 E1kLog2(("%s Inserting csum: %04X at %02X, old value: %04X\n", pThis->szPrf,
4106 u16ChkSum, cso, *(uint16_t*)(pPkt + cso)));
4107 *(uint16_t*)(pPkt + cso) = u16ChkSum;
4108}
4109
4110/**
4111 * Add a part of descriptor's buffer to transmit frame.
4112 *
4113 * @remarks data.u64BufAddr is used unconditionally for both data
4114 * and legacy descriptors since it is identical to
4115 * legacy.u64BufAddr.
4116 *
4117 * @param pThis The device state structure.
4118 * @param pDesc Pointer to the descriptor to transmit.
4119 * @param u16Len Length of buffer to the end of segment.
4120 * @param fSend Force packet sending.
4121 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4122 * @thread E1000_TX
4123 */
4124#ifndef E1K_WITH_TXD_CACHE
4125static void e1kFallbackAddSegment(PE1KSTATE pThis, RTGCPHYS PhysAddr, uint16_t u16Len, bool fSend, bool fOnWorkerThread)
4126{
4127 /* TCP header being transmitted */
4128 struct E1kTcpHeader *pTcpHdr = (struct E1kTcpHeader *)
4129 (pThis->aTxPacketFallback + pThis->contextTSE.tu.u8CSS);
4130 /* IP header being transmitted */
4131 struct E1kIpHeader *pIpHdr = (struct E1kIpHeader *)
4132 (pThis->aTxPacketFallback + pThis->contextTSE.ip.u8CSS);
4133
4134 E1kLog3(("%s e1kFallbackAddSegment: Length=%x, remaining payload=%x, header=%x, send=%RTbool\n",
4135 pThis->szPrf, u16Len, pThis->u32PayRemain, pThis->u16HdrRemain, fSend));
4136 Assert(pThis->u32PayRemain + pThis->u16HdrRemain > 0);
4137
4138 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), PhysAddr,
4139 pThis->aTxPacketFallback + pThis->u16TxPktLen, u16Len);
4140 E1kLog3(("%s Dump of the segment:\n"
4141 "%.*Rhxd\n"
4142 "%s --- End of dump ---\n",
4143 pThis->szPrf, u16Len, pThis->aTxPacketFallback + pThis->u16TxPktLen, pThis->szPrf));
4144 pThis->u16TxPktLen += u16Len;
4145 E1kLog3(("%s e1kFallbackAddSegment: pThis->u16TxPktLen=%x\n",
4146 pThis->szPrf, pThis->u16TxPktLen));
4147 if (pThis->u16HdrRemain > 0)
4148 {
4149 /* The header was not complete, check if it is now */
4150 if (u16Len >= pThis->u16HdrRemain)
4151 {
4152 /* The rest is payload */
4153 u16Len -= pThis->u16HdrRemain;
4154 pThis->u16HdrRemain = 0;
4155 /* Save partial checksum and flags */
4156 pThis->u32SavedCsum = pTcpHdr->chksum;
4157 pThis->u16SavedFlags = pTcpHdr->hdrlen_flags;
4158 /* Clear FIN and PSH flags now and set them only in the last segment */
4159 pTcpHdr->hdrlen_flags &= ~htons(E1K_TCP_FIN | E1K_TCP_PSH);
4160 }
4161 else
4162 {
4163 /* Still not */
4164 pThis->u16HdrRemain -= u16Len;
4165 E1kLog3(("%s e1kFallbackAddSegment: Header is still incomplete, 0x%x bytes remain.\n",
4166 pThis->szPrf, pThis->u16HdrRemain));
4167 return;
4168 }
4169 }
4170
4171 pThis->u32PayRemain -= u16Len;
4172
4173 if (fSend)
4174 {
4175 /* Leave ethernet header intact */
4176 /* IP Total Length = payload + headers - ethernet header */
4177 pIpHdr->total_len = htons(pThis->u16TxPktLen - pThis->contextTSE.ip.u8CSS);
4178 E1kLog3(("%s e1kFallbackAddSegment: End of packet, pIpHdr->total_len=%x\n",
4179 pThis->szPrf, ntohs(pIpHdr->total_len)));
4180 /* Update IP Checksum */
4181 pIpHdr->chksum = 0;
4182 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4183 pThis->contextTSE.ip.u8CSO,
4184 pThis->contextTSE.ip.u8CSS,
4185 pThis->contextTSE.ip.u16CSE);
4186
4187 /* Update TCP flags */
4188 /* Restore original FIN and PSH flags for the last segment */
4189 if (pThis->u32PayRemain == 0)
4190 {
4191 pTcpHdr->hdrlen_flags = pThis->u16SavedFlags;
4192 E1K_INC_CNT32(TSCTC);
4193 }
4194 /* Add TCP length to partial pseudo header sum */
4195 uint32_t csum = pThis->u32SavedCsum
4196 + htons(pThis->u16TxPktLen - pThis->contextTSE.tu.u8CSS);
4197 while (csum >> 16)
4198 csum = (csum >> 16) + (csum & 0xFFFF);
4199 pTcpHdr->chksum = csum;
4200 /* Compute final checksum */
4201 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4202 pThis->contextTSE.tu.u8CSO,
4203 pThis->contextTSE.tu.u8CSS,
4204 pThis->contextTSE.tu.u16CSE);
4205
4206 /*
4207 * Transmit it. If we've use the SG already, allocate a new one before
4208 * we copy of the data.
4209 */
4210 if (!pThis->CTX_SUFF(pTxSg))
4211 e1kXmitAllocBuf(pThis, pThis->u16TxPktLen + (pThis->fVTag ? 4 : 0), true /*fExactSize*/, false /*fGso*/);
4212 if (pThis->CTX_SUFF(pTxSg))
4213 {
4214 Assert(pThis->u16TxPktLen <= pThis->CTX_SUFF(pTxSg)->cbAvailable);
4215 Assert(pThis->CTX_SUFF(pTxSg)->cSegs == 1);
4216 if (pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg != pThis->aTxPacketFallback)
4217 memcpy(pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->aTxPacketFallback, pThis->u16TxPktLen);
4218 pThis->CTX_SUFF(pTxSg)->cbUsed = pThis->u16TxPktLen;
4219 pThis->CTX_SUFF(pTxSg)->aSegs[0].cbSeg = pThis->u16TxPktLen;
4220 }
4221 e1kTransmitFrame(pThis, fOnWorkerThread);
4222
4223 /* Update Sequence Number */
4224 pTcpHdr->seqno = htonl(ntohl(pTcpHdr->seqno) + pThis->u16TxPktLen
4225 - pThis->contextTSE.dw3.u8HDRLEN);
4226 /* Increment IP identification */
4227 pIpHdr->ident = htons(ntohs(pIpHdr->ident) + 1);
4228 }
4229}
4230#else /* E1K_WITH_TXD_CACHE */
4231static int e1kFallbackAddSegment(PE1KSTATE pThis, RTGCPHYS PhysAddr, uint16_t u16Len, bool fSend, bool fOnWorkerThread)
4232{
4233 int rc = VINF_SUCCESS;
4234 /* TCP header being transmitted */
4235 struct E1kTcpHeader *pTcpHdr = (struct E1kTcpHeader *)
4236 (pThis->aTxPacketFallback + pThis->contextTSE.tu.u8CSS);
4237 /* IP header being transmitted */
4238 struct E1kIpHeader *pIpHdr = (struct E1kIpHeader *)
4239 (pThis->aTxPacketFallback + pThis->contextTSE.ip.u8CSS);
4240
4241 E1kLog3(("%s e1kFallbackAddSegment: Length=%x, remaining payload=%x, header=%x, send=%RTbool\n",
4242 pThis->szPrf, u16Len, pThis->u32PayRemain, pThis->u16HdrRemain, fSend));
4243 Assert(pThis->u32PayRemain + pThis->u16HdrRemain > 0);
4244
4245 if (pThis->u16TxPktLen + u16Len <= sizeof(pThis->aTxPacketFallback))
4246 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), PhysAddr,
4247 pThis->aTxPacketFallback + pThis->u16TxPktLen, u16Len);
4248 else
4249 E1kLog(("%s e1kFallbackAddSegment: writing beyond aTxPacketFallback, u16TxPktLen=%d(0x%x) + u16Len=%d(0x%x) > %d\n",
4250 pThis->szPrf, pThis->u16TxPktLen, pThis->u16TxPktLen, u16Len, u16Len, sizeof(pThis->aTxPacketFallback)));
4251 E1kLog3(("%s Dump of the segment:\n"
4252 "%.*Rhxd\n"
4253 "%s --- End of dump ---\n",
4254 pThis->szPrf, u16Len, pThis->aTxPacketFallback + pThis->u16TxPktLen, pThis->szPrf));
4255 pThis->u16TxPktLen += u16Len;
4256 E1kLog3(("%s e1kFallbackAddSegment: pThis->u16TxPktLen=%x\n",
4257 pThis->szPrf, pThis->u16TxPktLen));
4258 if (pThis->u16HdrRemain > 0)
4259 {
4260 /* The header was not complete, check if it is now */
4261 if (u16Len >= pThis->u16HdrRemain)
4262 {
4263 /* The rest is payload */
4264 u16Len -= pThis->u16HdrRemain;
4265 pThis->u16HdrRemain = 0;
4266 /* Save partial checksum and flags */
4267 pThis->u32SavedCsum = pTcpHdr->chksum;
4268 pThis->u16SavedFlags = pTcpHdr->hdrlen_flags;
4269 /* Clear FIN and PSH flags now and set them only in the last segment */
4270 pTcpHdr->hdrlen_flags &= ~htons(E1K_TCP_FIN | E1K_TCP_PSH);
4271 }
4272 else
4273 {
4274 /* Still not */
4275 pThis->u16HdrRemain -= u16Len;
4276 E1kLog3(("%s e1kFallbackAddSegment: Header is still incomplete, 0x%x bytes remain.\n",
4277 pThis->szPrf, pThis->u16HdrRemain));
4278 return rc;
4279 }
4280 }
4281
4282 pThis->u32PayRemain -= u16Len;
4283
4284 if (fSend)
4285 {
4286 /* Leave ethernet header intact */
4287 /* IP Total Length = payload + headers - ethernet header */
4288 pIpHdr->total_len = htons(pThis->u16TxPktLen - pThis->contextTSE.ip.u8CSS);
4289 E1kLog3(("%s e1kFallbackAddSegment: End of packet, pIpHdr->total_len=%x\n",
4290 pThis->szPrf, ntohs(pIpHdr->total_len)));
4291 /* Update IP Checksum */
4292 pIpHdr->chksum = 0;
4293 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4294 pThis->contextTSE.ip.u8CSO,
4295 pThis->contextTSE.ip.u8CSS,
4296 pThis->contextTSE.ip.u16CSE);
4297
4298 /* Update TCP flags */
4299 /* Restore original FIN and PSH flags for the last segment */
4300 if (pThis->u32PayRemain == 0)
4301 {
4302 pTcpHdr->hdrlen_flags = pThis->u16SavedFlags;
4303 E1K_INC_CNT32(TSCTC);
4304 }
4305 /* Add TCP length to partial pseudo header sum */
4306 uint32_t csum = pThis->u32SavedCsum
4307 + htons(pThis->u16TxPktLen - pThis->contextTSE.tu.u8CSS);
4308 while (csum >> 16)
4309 csum = (csum >> 16) + (csum & 0xFFFF);
4310 pTcpHdr->chksum = csum;
4311 /* Compute final checksum */
4312 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4313 pThis->contextTSE.tu.u8CSO,
4314 pThis->contextTSE.tu.u8CSS,
4315 pThis->contextTSE.tu.u16CSE);
4316
4317 /*
4318 * Transmit it.
4319 */
4320 if (pThis->CTX_SUFF(pTxSg))
4321 {
4322 /* Make sure the packet fits into the allocated buffer */
4323 size_t cbCopy = RT_MIN(pThis->u16TxPktLen, pThis->CTX_SUFF(pTxSg)->cbAvailable);
4324#ifdef DEBUG
4325 if (pThis->u16TxPktLen > pThis->CTX_SUFF(pTxSg)->cbAvailable)
4326 E1kLog(("%s e1kFallbackAddSegment: truncating packet, u16TxPktLen=%d(0x%x) > cbAvailable=%d(0x%x)\n",
4327 pThis->szPrf, pThis->u16TxPktLen, pThis->u16TxPktLen,
4328 pThis->CTX_SUFF(pTxSg)->cbAvailable, pThis->CTX_SUFF(pTxSg)->cbAvailable));
4329#endif /* DEBUG */
4330 Assert(pThis->CTX_SUFF(pTxSg)->cSegs == 1);
4331 if (pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg != pThis->aTxPacketFallback)
4332 memcpy(pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->aTxPacketFallback, cbCopy);
4333 pThis->CTX_SUFF(pTxSg)->cbUsed = cbCopy;
4334 pThis->CTX_SUFF(pTxSg)->aSegs[0].cbSeg = cbCopy;
4335 }
4336 e1kTransmitFrame(pThis, fOnWorkerThread);
4337
4338 /* Update Sequence Number */
4339 pTcpHdr->seqno = htonl(ntohl(pTcpHdr->seqno) + pThis->u16TxPktLen
4340 - pThis->contextTSE.dw3.u8HDRLEN);
4341 /* Increment IP identification */
4342 pIpHdr->ident = htons(ntohs(pIpHdr->ident) + 1);
4343
4344 /* Allocate new buffer for the next segment. */
4345 if (pThis->u32PayRemain)
4346 {
4347 pThis->cbTxAlloc = RT_MIN(pThis->u32PayRemain,
4348 pThis->contextTSE.dw3.u16MSS)
4349 + pThis->contextTSE.dw3.u8HDRLEN
4350 + (pThis->fVTag ? 4 : 0);
4351 rc = e1kXmitAllocBuf(pThis, false /* fGSO */);
4352 }
4353 }
4354
4355 return rc;
4356}
4357#endif /* E1K_WITH_TXD_CACHE */
4358
4359#ifndef E1K_WITH_TXD_CACHE
4360/**
4361 * TCP segmentation offloading fallback: Add descriptor's buffer to transmit
4362 * frame.
4363 *
4364 * We construct the frame in the fallback buffer first and the copy it to the SG
4365 * buffer before passing it down to the network driver code.
4366 *
4367 * @returns true if the frame should be transmitted, false if not.
4368 *
4369 * @param pThis The device state structure.
4370 * @param pDesc Pointer to the descriptor to transmit.
4371 * @param cbFragment Length of descriptor's buffer.
4372 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4373 * @thread E1000_TX
4374 */
4375static bool e1kFallbackAddToFrame(PE1KSTATE pThis, E1KTXDESC *pDesc, uint32_t cbFragment, bool fOnWorkerThread)
4376{
4377 PPDMSCATTERGATHER pTxSg = pThis->CTX_SUFF(pTxSg);
4378 Assert(e1kGetDescType(pDesc) == E1K_DTYP_DATA);
4379 Assert(pDesc->data.cmd.fTSE);
4380 Assert(!e1kXmitIsGsoBuf(pTxSg));
4381
4382 uint16_t u16MaxPktLen = pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw3.u16MSS;
4383 Assert(u16MaxPktLen != 0);
4384 Assert(u16MaxPktLen < E1K_MAX_TX_PKT_SIZE);
4385
4386 /*
4387 * Carve out segments.
4388 */
4389 do
4390 {
4391 /* Calculate how many bytes we have left in this TCP segment */
4392 uint32_t cb = u16MaxPktLen - pThis->u16TxPktLen;
4393 if (cb > cbFragment)
4394 {
4395 /* This descriptor fits completely into current segment */
4396 cb = cbFragment;
4397 e1kFallbackAddSegment(pThis, pDesc->data.u64BufAddr, cb, pDesc->data.cmd.fEOP /*fSend*/, fOnWorkerThread);
4398 }
4399 else
4400 {
4401 e1kFallbackAddSegment(pThis, pDesc->data.u64BufAddr, cb, true /*fSend*/, fOnWorkerThread);
4402 /*
4403 * Rewind the packet tail pointer to the beginning of payload,
4404 * so we continue writing right beyond the header.
4405 */
4406 pThis->u16TxPktLen = pThis->contextTSE.dw3.u8HDRLEN;
4407 }
4408
4409 pDesc->data.u64BufAddr += cb;
4410 cbFragment -= cb;
4411 } while (cbFragment > 0);
4412
4413 if (pDesc->data.cmd.fEOP)
4414 {
4415 /* End of packet, next segment will contain header. */
4416 if (pThis->u32PayRemain != 0)
4417 E1K_INC_CNT32(TSCTFC);
4418 pThis->u16TxPktLen = 0;
4419 e1kXmitFreeBuf(pThis);
4420 }
4421
4422 return false;
4423}
4424#else /* E1K_WITH_TXD_CACHE */
4425/**
4426 * TCP segmentation offloading fallback: Add descriptor's buffer to transmit
4427 * frame.
4428 *
4429 * We construct the frame in the fallback buffer first and the copy it to the SG
4430 * buffer before passing it down to the network driver code.
4431 *
4432 * @returns error code
4433 *
4434 * @param pThis The device state structure.
4435 * @param pDesc Pointer to the descriptor to transmit.
4436 * @param cbFragment Length of descriptor's buffer.
4437 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4438 * @thread E1000_TX
4439 */
4440static int e1kFallbackAddToFrame(PE1KSTATE pThis, E1KTXDESC *pDesc, bool fOnWorkerThread)
4441{
4442#ifdef VBOX_STRICT
4443 PPDMSCATTERGATHER pTxSg = pThis->CTX_SUFF(pTxSg);
4444 Assert(e1kGetDescType(pDesc) == E1K_DTYP_DATA);
4445 Assert(pDesc->data.cmd.fTSE);
4446 Assert(!e1kXmitIsGsoBuf(pTxSg));
4447#endif
4448
4449 uint16_t u16MaxPktLen = pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw3.u16MSS;
4450
4451 /*
4452 * Carve out segments.
4453 */
4454 int rc = VINF_SUCCESS;
4455 do
4456 {
4457 /* Calculate how many bytes we have left in this TCP segment */
4458 uint32_t cb = u16MaxPktLen - pThis->u16TxPktLen;
4459 if (cb > pDesc->data.cmd.u20DTALEN)
4460 {
4461 /* This descriptor fits completely into current segment */
4462 cb = pDesc->data.cmd.u20DTALEN;
4463 rc = e1kFallbackAddSegment(pThis, pDesc->data.u64BufAddr, cb, pDesc->data.cmd.fEOP /*fSend*/, fOnWorkerThread);
4464 }
4465 else
4466 {
4467 rc = e1kFallbackAddSegment(pThis, pDesc->data.u64BufAddr, cb, true /*fSend*/, fOnWorkerThread);
4468 /*
4469 * Rewind the packet tail pointer to the beginning of payload,
4470 * so we continue writing right beyond the header.
4471 */
4472 pThis->u16TxPktLen = pThis->contextTSE.dw3.u8HDRLEN;
4473 }
4474
4475 pDesc->data.u64BufAddr += cb;
4476 pDesc->data.cmd.u20DTALEN -= cb;
4477 } while (pDesc->data.cmd.u20DTALEN > 0 && RT_SUCCESS(rc));
4478
4479 if (pDesc->data.cmd.fEOP)
4480 {
4481 /* End of packet, next segment will contain header. */
4482 if (pThis->u32PayRemain != 0)
4483 E1K_INC_CNT32(TSCTFC);
4484 pThis->u16TxPktLen = 0;
4485 e1kXmitFreeBuf(pThis);
4486 }
4487
4488 return VINF_SUCCESS; /// @todo consider rc;
4489}
4490#endif /* E1K_WITH_TXD_CACHE */
4491
4492
4493/**
4494 * Add descriptor's buffer to transmit frame.
4495 *
4496 * This deals with GSO and normal frames, e1kFallbackAddToFrame deals with the
4497 * TSE frames we cannot handle as GSO.
4498 *
4499 * @returns true on success, false on failure.
4500 *
4501 * @param pThis The device state structure.
4502 * @param PhysAddr The physical address of the descriptor buffer.
4503 * @param cbFragment Length of descriptor's buffer.
4504 * @thread E1000_TX
4505 */
4506static bool e1kAddToFrame(PE1KSTATE pThis, RTGCPHYS PhysAddr, uint32_t cbFragment)
4507{
4508 PPDMSCATTERGATHER pTxSg = pThis->CTX_SUFF(pTxSg);
4509 bool const fGso = e1kXmitIsGsoBuf(pTxSg);
4510 uint32_t const cbNewPkt = cbFragment + pThis->u16TxPktLen;
4511
4512 LogFlow(("%s e1kAddToFrame: ENTER cbFragment=%d u16TxPktLen=%d cbUsed=%d cbAvailable=%d fGSO=%s\n",
4513 pThis->szPrf, cbFragment, pThis->u16TxPktLen, pTxSg->cbUsed, pTxSg->cbAvailable,
4514 fGso ? "true" : "false"));
4515 if (RT_UNLIKELY( !fGso && cbNewPkt > E1K_MAX_TX_PKT_SIZE ))
4516 {
4517 E1kLog(("%s Transmit packet is too large: %u > %u(max)\n", pThis->szPrf, cbNewPkt, E1K_MAX_TX_PKT_SIZE));
4518 return false;
4519 }
4520 if (RT_UNLIKELY( cbNewPkt > pTxSg->cbAvailable ))
4521 {
4522 E1kLog(("%s Transmit packet is too large: %u > %u(max)\n", pThis->szPrf, cbNewPkt, pTxSg->cbAvailable));
4523 return false;
4524 }
4525
4526 if (RT_LIKELY(pTxSg))
4527 {
4528 Assert(pTxSg->cSegs == 1);
4529 if (pTxSg->cbUsed != pThis->u16TxPktLen)
4530 E1kLog(("%s e1kAddToFrame: pTxSg->cbUsed=%d(0x%x) != u16TxPktLen=%d(0x%x)\n",
4531 pThis->szPrf, pTxSg->cbUsed, pTxSg->cbUsed, pThis->u16TxPktLen, pThis->u16TxPktLen));
4532
4533 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), PhysAddr,
4534 (uint8_t *)pTxSg->aSegs[0].pvSeg + pThis->u16TxPktLen, cbFragment);
4535
4536 pTxSg->cbUsed = cbNewPkt;
4537 }
4538 pThis->u16TxPktLen = cbNewPkt;
4539
4540 return true;
4541}
4542
4543
4544/**
4545 * Write the descriptor back to guest memory and notify the guest.
4546 *
4547 * @param pThis The device state structure.
4548 * @param pDesc Pointer to the descriptor have been transmitted.
4549 * @param addr Physical address of the descriptor in guest memory.
4550 * @thread E1000_TX
4551 */
4552static void e1kDescReport(PE1KSTATE pThis, E1KTXDESC *pDesc, RTGCPHYS addr)
4553{
4554 /*
4555 * We fake descriptor write-back bursting. Descriptors are written back as they are
4556 * processed.
4557 */
4558 /* Let's pretend we process descriptors. Write back with DD set. */
4559 /*
4560 * Prior to r71586 we tried to accomodate the case when write-back bursts
4561 * are enabled without actually implementing bursting by writing back all
4562 * descriptors, even the ones that do not have RS set. This caused kernel
4563 * panics with Linux SMP kernels, as the e1000 driver tried to free up skb
4564 * associated with written back descriptor if it happened to be a context
4565 * descriptor since context descriptors do not have skb associated to them.
4566 * Starting from r71586 we write back only the descriptors with RS set,
4567 * which is a little bit different from what the real hardware does in
4568 * case there is a chain of data descritors where some of them have RS set
4569 * and others do not. It is very uncommon scenario imho.
4570 * We need to check RPS as well since some legacy drivers use it instead of
4571 * RS even with newer cards.
4572 */
4573 if (pDesc->legacy.cmd.fRS || pDesc->legacy.cmd.fRPS)
4574 {
4575 pDesc->legacy.dw3.fDD = 1; /* Descriptor Done */
4576 e1kWriteBackDesc(pThis, pDesc, addr);
4577 if (pDesc->legacy.cmd.fEOP)
4578 {
4579//#ifdef E1K_USE_TX_TIMERS
4580 if (pThis->fTidEnabled && pDesc->legacy.cmd.fIDE)
4581 {
4582 E1K_INC_ISTAT_CNT(pThis->uStatTxIDE);
4583 //if (pThis->fIntRaised)
4584 //{
4585 // /* Interrupt is already pending, no need for timers */
4586 // ICR |= ICR_TXDW;
4587 //}
4588 //else {
4589 /* Arm the timer to fire in TIVD usec (discard .024) */
4590 e1kArmTimer(pThis, pThis->CTX_SUFF(pTIDTimer), TIDV);
4591# ifndef E1K_NO_TAD
4592 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
4593 E1kLog2(("%s Checking if TAD timer is running\n",
4594 pThis->szPrf));
4595 if (TADV != 0 && !TMTimerIsActive(pThis->CTX_SUFF(pTADTimer)))
4596 e1kArmTimer(pThis, pThis->CTX_SUFF(pTADTimer), TADV);
4597# endif /* E1K_NO_TAD */
4598 }
4599 else
4600 {
4601 if (pThis->fTidEnabled)
4602 {
4603 E1kLog2(("%s No IDE set, cancel TAD timer and raise interrupt\n",
4604 pThis->szPrf));
4605 /* Cancel both timers if armed and fire immediately. */
4606# ifndef E1K_NO_TAD
4607 TMTimerStop(pThis->CTX_SUFF(pTADTimer));
4608# endif
4609 TMTimerStop(pThis->CTX_SUFF(pTIDTimer));
4610 }
4611//#endif /* E1K_USE_TX_TIMERS */
4612 E1K_INC_ISTAT_CNT(pThis->uStatIntTx);
4613 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_TXDW);
4614//#ifdef E1K_USE_TX_TIMERS
4615 }
4616//#endif /* E1K_USE_TX_TIMERS */
4617 }
4618 }
4619 else
4620 {
4621 E1K_INC_ISTAT_CNT(pThis->uStatTxNoRS);
4622 }
4623}
4624
4625#ifndef E1K_WITH_TXD_CACHE
4626
4627/**
4628 * Process Transmit Descriptor.
4629 *
4630 * E1000 supports three types of transmit descriptors:
4631 * - legacy data descriptors of older format (context-less).
4632 * - data the same as legacy but providing new offloading capabilities.
4633 * - context sets up the context for following data descriptors.
4634 *
4635 * @param pThis The device state structure.
4636 * @param pDesc Pointer to descriptor union.
4637 * @param addr Physical address of descriptor in guest memory.
4638 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4639 * @thread E1000_TX
4640 */
4641static int e1kXmitDesc(PE1KSTATE pThis, E1KTXDESC *pDesc, RTGCPHYS addr, bool fOnWorkerThread)
4642{
4643 int rc = VINF_SUCCESS;
4644 uint32_t cbVTag = 0;
4645
4646 e1kPrintTDesc(pThis, pDesc, "vvv");
4647
4648//#ifdef E1K_USE_TX_TIMERS
4649 if (pThis->fTidEnabled)
4650 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTIDTimer));
4651//#endif /* E1K_USE_TX_TIMERS */
4652
4653 switch (e1kGetDescType(pDesc))
4654 {
4655 case E1K_DTYP_CONTEXT:
4656 if (pDesc->context.dw2.fTSE)
4657 {
4658 pThis->contextTSE = pDesc->context;
4659 pThis->u32PayRemain = pDesc->context.dw2.u20PAYLEN;
4660 pThis->u16HdrRemain = pDesc->context.dw3.u8HDRLEN;
4661 e1kSetupGsoCtx(&pThis->GsoCtx, &pDesc->context);
4662 STAM_COUNTER_INC(&pThis->StatTxDescCtxTSE);
4663 }
4664 else
4665 {
4666 pThis->contextNormal = pDesc->context;
4667 STAM_COUNTER_INC(&pThis->StatTxDescCtxNormal);
4668 }
4669 E1kLog2(("%s %s context updated: IP CSS=%02X, IP CSO=%02X, IP CSE=%04X"
4670 ", TU CSS=%02X, TU CSO=%02X, TU CSE=%04X\n", pThis->szPrf,
4671 pDesc->context.dw2.fTSE ? "TSE" : "Normal",
4672 pDesc->context.ip.u8CSS,
4673 pDesc->context.ip.u8CSO,
4674 pDesc->context.ip.u16CSE,
4675 pDesc->context.tu.u8CSS,
4676 pDesc->context.tu.u8CSO,
4677 pDesc->context.tu.u16CSE));
4678 E1K_INC_ISTAT_CNT(pThis->uStatDescCtx);
4679 e1kDescReport(pThis, pDesc, addr);
4680 break;
4681
4682 case E1K_DTYP_DATA:
4683 {
4684 if (pDesc->data.cmd.u20DTALEN == 0 || pDesc->data.u64BufAddr == 0)
4685 {
4686 E1kLog2(("% Empty data descriptor, skipped.\n", pThis->szPrf));
4687 /** @todo Same as legacy when !TSE. See below. */
4688 break;
4689 }
4690 STAM_COUNTER_INC(pDesc->data.cmd.fTSE?
4691 &pThis->StatTxDescTSEData:
4692 &pThis->StatTxDescData);
4693 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
4694 E1K_INC_ISTAT_CNT(pThis->uStatDescDat);
4695
4696 /*
4697 * The last descriptor of non-TSE packet must contain VLE flag.
4698 * TSE packets have VLE flag in the first descriptor. The later
4699 * case is taken care of a bit later when cbVTag gets assigned.
4700 *
4701 * 1) pDesc->data.cmd.fEOP && !pDesc->data.cmd.fTSE
4702 */
4703 if (pDesc->data.cmd.fEOP && !pDesc->data.cmd.fTSE)
4704 {
4705 pThis->fVTag = pDesc->data.cmd.fVLE;
4706 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
4707 }
4708 /*
4709 * First fragment: Allocate new buffer and save the IXSM and TXSM
4710 * packet options as these are only valid in the first fragment.
4711 */
4712 if (pThis->u16TxPktLen == 0)
4713 {
4714 pThis->fIPcsum = pDesc->data.dw3.fIXSM;
4715 pThis->fTCPcsum = pDesc->data.dw3.fTXSM;
4716 E1kLog2(("%s Saving checksum flags:%s%s; \n", pThis->szPrf,
4717 pThis->fIPcsum ? " IP" : "",
4718 pThis->fTCPcsum ? " TCP/UDP" : ""));
4719 if (pDesc->data.cmd.fTSE)
4720 {
4721 /* 2) pDesc->data.cmd.fTSE && pThis->u16TxPktLen == 0 */
4722 pThis->fVTag = pDesc->data.cmd.fVLE;
4723 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
4724 cbVTag = pThis->fVTag ? 4 : 0;
4725 }
4726 else if (pDesc->data.cmd.fEOP)
4727 cbVTag = pDesc->data.cmd.fVLE ? 4 : 0;
4728 else
4729 cbVTag = 4;
4730 E1kLog3(("%s About to allocate TX buffer: cbVTag=%u\n", pThis->szPrf, cbVTag));
4731 if (e1kCanDoGso(pThis, &pThis->GsoCtx, &pDesc->data, &pThis->contextTSE))
4732 rc = e1kXmitAllocBuf(pThis, pThis->contextTSE.dw2.u20PAYLEN + pThis->contextTSE.dw3.u8HDRLEN + cbVTag,
4733 true /*fExactSize*/, true /*fGso*/);
4734 else if (pDesc->data.cmd.fTSE)
4735 rc = e1kXmitAllocBuf(pThis, pThis->contextTSE.dw3.u16MSS + pThis->contextTSE.dw3.u8HDRLEN + cbVTag,
4736 pDesc->data.cmd.fTSE /*fExactSize*/, false /*fGso*/);
4737 else
4738 rc = e1kXmitAllocBuf(pThis, pDesc->data.cmd.u20DTALEN + cbVTag,
4739 pDesc->data.cmd.fEOP /*fExactSize*/, false /*fGso*/);
4740
4741 /**
4742 * @todo: Perhaps it is not that simple for GSO packets! We may
4743 * need to unwind some changes.
4744 */
4745 if (RT_FAILURE(rc))
4746 {
4747 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4748 break;
4749 }
4750 /** @todo Is there any way to indicating errors other than collisions? Like
4751 * VERR_NET_DOWN. */
4752 }
4753
4754 /*
4755 * Add the descriptor data to the frame. If the frame is complete,
4756 * transmit it and reset the u16TxPktLen field.
4757 */
4758 if (e1kXmitIsGsoBuf(pThis->CTX_SUFF(pTxSg)))
4759 {
4760 STAM_COUNTER_INC(&pThis->StatTxPathGSO);
4761 bool fRc = e1kAddToFrame(pThis, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4762 if (pDesc->data.cmd.fEOP)
4763 {
4764 if ( fRc
4765 && pThis->CTX_SUFF(pTxSg)
4766 && pThis->CTX_SUFF(pTxSg)->cbUsed == (size_t)pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN)
4767 {
4768 e1kTransmitFrame(pThis, fOnWorkerThread);
4769 E1K_INC_CNT32(TSCTC);
4770 }
4771 else
4772 {
4773 if (fRc)
4774 E1kLog(("%s bad GSO/TSE %p or %u < %u\n" , pThis->szPrf,
4775 pThis->CTX_SUFF(pTxSg), pThis->CTX_SUFF(pTxSg) ? pThis->CTX_SUFF(pTxSg)->cbUsed : 0,
4776 pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN));
4777 e1kXmitFreeBuf(pThis);
4778 E1K_INC_CNT32(TSCTFC);
4779 }
4780 pThis->u16TxPktLen = 0;
4781 }
4782 }
4783 else if (!pDesc->data.cmd.fTSE)
4784 {
4785 STAM_COUNTER_INC(&pThis->StatTxPathRegular);
4786 bool fRc = e1kAddToFrame(pThis, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4787 if (pDesc->data.cmd.fEOP)
4788 {
4789 if (fRc && pThis->CTX_SUFF(pTxSg))
4790 {
4791 Assert(pThis->CTX_SUFF(pTxSg)->cSegs == 1);
4792 if (pThis->fIPcsum)
4793 e1kInsertChecksum(pThis, (uint8_t *)pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
4794 pThis->contextNormal.ip.u8CSO,
4795 pThis->contextNormal.ip.u8CSS,
4796 pThis->contextNormal.ip.u16CSE);
4797 if (pThis->fTCPcsum)
4798 e1kInsertChecksum(pThis, (uint8_t *)pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
4799 pThis->contextNormal.tu.u8CSO,
4800 pThis->contextNormal.tu.u8CSS,
4801 pThis->contextNormal.tu.u16CSE);
4802 e1kTransmitFrame(pThis, fOnWorkerThread);
4803 }
4804 else
4805 e1kXmitFreeBuf(pThis);
4806 pThis->u16TxPktLen = 0;
4807 }
4808 }
4809 else
4810 {
4811 STAM_COUNTER_INC(&pThis->StatTxPathFallback);
4812 e1kFallbackAddToFrame(pThis, pDesc, pDesc->data.cmd.u20DTALEN, fOnWorkerThread);
4813 }
4814
4815 e1kDescReport(pThis, pDesc, addr);
4816 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4817 break;
4818 }
4819
4820 case E1K_DTYP_LEGACY:
4821 if (pDesc->legacy.cmd.u16Length == 0 || pDesc->legacy.u64BufAddr == 0)
4822 {
4823 E1kLog(("%s Empty legacy descriptor, skipped.\n", pThis->szPrf));
4824 /** @todo 3.3.3, Length/Buffer Address: RS set -> write DD when processing. */
4825 break;
4826 }
4827 STAM_COUNTER_INC(&pThis->StatTxDescLegacy);
4828 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
4829
4830 /* First fragment: allocate new buffer. */
4831 if (pThis->u16TxPktLen == 0)
4832 {
4833 if (pDesc->legacy.cmd.fEOP)
4834 cbVTag = pDesc->legacy.cmd.fVLE ? 4 : 0;
4835 else
4836 cbVTag = 4;
4837 E1kLog3(("%s About to allocate TX buffer: cbVTag=%u\n", pThis->szPrf, cbVTag));
4838 /** @todo reset status bits? */
4839 rc = e1kXmitAllocBuf(pThis, pDesc->legacy.cmd.u16Length + cbVTag, pDesc->legacy.cmd.fEOP, false /*fGso*/);
4840 if (RT_FAILURE(rc))
4841 {
4842 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4843 break;
4844 }
4845
4846 /** @todo Is there any way to indicating errors other than collisions? Like
4847 * VERR_NET_DOWN. */
4848 }
4849
4850 /* Add fragment to frame. */
4851 if (e1kAddToFrame(pThis, pDesc->data.u64BufAddr, pDesc->legacy.cmd.u16Length))
4852 {
4853 E1K_INC_ISTAT_CNT(pThis->uStatDescLeg);
4854
4855 /* Last fragment: Transmit and reset the packet storage counter. */
4856 if (pDesc->legacy.cmd.fEOP)
4857 {
4858 pThis->fVTag = pDesc->legacy.cmd.fVLE;
4859 pThis->u16VTagTCI = pDesc->legacy.dw3.u16Special;
4860 /** @todo Offload processing goes here. */
4861 e1kTransmitFrame(pThis, fOnWorkerThread);
4862 pThis->u16TxPktLen = 0;
4863 }
4864 }
4865 /* Last fragment + failure: free the buffer and reset the storage counter. */
4866 else if (pDesc->legacy.cmd.fEOP)
4867 {
4868 e1kXmitFreeBuf(pThis);
4869 pThis->u16TxPktLen = 0;
4870 }
4871
4872 e1kDescReport(pThis, pDesc, addr);
4873 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4874 break;
4875
4876 default:
4877 E1kLog(("%s ERROR Unsupported transmit descriptor type: 0x%04x\n",
4878 pThis->szPrf, e1kGetDescType(pDesc)));
4879 break;
4880 }
4881
4882 return rc;
4883}
4884
4885#else /* E1K_WITH_TXD_CACHE */
4886
4887/**
4888 * Process Transmit Descriptor.
4889 *
4890 * E1000 supports three types of transmit descriptors:
4891 * - legacy data descriptors of older format (context-less).
4892 * - data the same as legacy but providing new offloading capabilities.
4893 * - context sets up the context for following data descriptors.
4894 *
4895 * @param pThis The device state structure.
4896 * @param pDesc Pointer to descriptor union.
4897 * @param addr Physical address of descriptor in guest memory.
4898 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4899 * @param cbPacketSize Size of the packet as previously computed.
4900 * @thread E1000_TX
4901 */
4902static int e1kXmitDesc(PE1KSTATE pThis, E1KTXDESC *pDesc, RTGCPHYS addr,
4903 bool fOnWorkerThread)
4904{
4905 int rc = VINF_SUCCESS;
4906
4907 e1kPrintTDesc(pThis, pDesc, "vvv");
4908
4909 if (pDesc->legacy.dw3.fDD)
4910 {
4911 E1kLog(("%s e1kXmitDesc: skipping bad descriptor ^^^\n", pThis->szPrf));
4912 e1kDescReport(pThis, pDesc, addr);
4913 return VINF_SUCCESS;
4914 }
4915
4916//#ifdef E1K_USE_TX_TIMERS
4917 if (pThis->fTidEnabled)
4918 TMTimerStop(pThis->CTX_SUFF(pTIDTimer));
4919//#endif /* E1K_USE_TX_TIMERS */
4920
4921 switch (e1kGetDescType(pDesc))
4922 {
4923 case E1K_DTYP_CONTEXT:
4924 /* The caller have already updated the context */
4925 E1K_INC_ISTAT_CNT(pThis->uStatDescCtx);
4926 e1kDescReport(pThis, pDesc, addr);
4927 break;
4928
4929 case E1K_DTYP_DATA:
4930 {
4931 STAM_COUNTER_INC(pDesc->data.cmd.fTSE?
4932 &pThis->StatTxDescTSEData:
4933 &pThis->StatTxDescData);
4934 E1K_INC_ISTAT_CNT(pThis->uStatDescDat);
4935 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
4936 if (pDesc->data.cmd.u20DTALEN == 0 || pDesc->data.u64BufAddr == 0)
4937 {
4938 E1kLog2(("% Empty data descriptor, skipped.\n", pThis->szPrf));
4939 if (pDesc->data.cmd.fEOP)
4940 {
4941 e1kTransmitFrame(pThis, fOnWorkerThread);
4942 pThis->u16TxPktLen = 0;
4943 }
4944 }
4945 else
4946 {
4947 /*
4948 * Add the descriptor data to the frame. If the frame is complete,
4949 * transmit it and reset the u16TxPktLen field.
4950 */
4951 if (e1kXmitIsGsoBuf(pThis->CTX_SUFF(pTxSg)))
4952 {
4953 STAM_COUNTER_INC(&pThis->StatTxPathGSO);
4954 bool fRc = e1kAddToFrame(pThis, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4955 if (pDesc->data.cmd.fEOP)
4956 {
4957 if ( fRc
4958 && pThis->CTX_SUFF(pTxSg)
4959 && pThis->CTX_SUFF(pTxSg)->cbUsed == (size_t)pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN)
4960 {
4961 e1kTransmitFrame(pThis, fOnWorkerThread);
4962 E1K_INC_CNT32(TSCTC);
4963 }
4964 else
4965 {
4966 if (fRc)
4967 E1kLog(("%s bad GSO/TSE %p or %u < %u\n" , pThis->szPrf,
4968 pThis->CTX_SUFF(pTxSg), pThis->CTX_SUFF(pTxSg) ? pThis->CTX_SUFF(pTxSg)->cbUsed : 0,
4969 pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN));
4970 e1kXmitFreeBuf(pThis);
4971 E1K_INC_CNT32(TSCTFC);
4972 }
4973 pThis->u16TxPktLen = 0;
4974 }
4975 }
4976 else if (!pDesc->data.cmd.fTSE)
4977 {
4978 STAM_COUNTER_INC(&pThis->StatTxPathRegular);
4979 bool fRc = e1kAddToFrame(pThis, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4980 if (pDesc->data.cmd.fEOP)
4981 {
4982 if (fRc && pThis->CTX_SUFF(pTxSg))
4983 {
4984 Assert(pThis->CTX_SUFF(pTxSg)->cSegs == 1);
4985 if (pThis->fIPcsum)
4986 e1kInsertChecksum(pThis, (uint8_t *)pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
4987 pThis->contextNormal.ip.u8CSO,
4988 pThis->contextNormal.ip.u8CSS,
4989 pThis->contextNormal.ip.u16CSE);
4990 if (pThis->fTCPcsum)
4991 e1kInsertChecksum(pThis, (uint8_t *)pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
4992 pThis->contextNormal.tu.u8CSO,
4993 pThis->contextNormal.tu.u8CSS,
4994 pThis->contextNormal.tu.u16CSE);
4995 e1kTransmitFrame(pThis, fOnWorkerThread);
4996 }
4997 else
4998 e1kXmitFreeBuf(pThis);
4999 pThis->u16TxPktLen = 0;
5000 }
5001 }
5002 else
5003 {
5004 STAM_COUNTER_INC(&pThis->StatTxPathFallback);
5005 rc = e1kFallbackAddToFrame(pThis, pDesc, fOnWorkerThread);
5006 }
5007 }
5008 e1kDescReport(pThis, pDesc, addr);
5009 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5010 break;
5011 }
5012
5013 case E1K_DTYP_LEGACY:
5014 STAM_COUNTER_INC(&pThis->StatTxDescLegacy);
5015 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
5016 if (pDesc->legacy.cmd.u16Length == 0 || pDesc->legacy.u64BufAddr == 0)
5017 {
5018 E1kLog(("%s Empty legacy descriptor, skipped.\n", pThis->szPrf));
5019 }
5020 else
5021 {
5022 /* Add fragment to frame. */
5023 if (e1kAddToFrame(pThis, pDesc->data.u64BufAddr, pDesc->legacy.cmd.u16Length))
5024 {
5025 E1K_INC_ISTAT_CNT(pThis->uStatDescLeg);
5026
5027 /* Last fragment: Transmit and reset the packet storage counter. */
5028 if (pDesc->legacy.cmd.fEOP)
5029 {
5030 if (pDesc->legacy.cmd.fIC)
5031 {
5032 e1kInsertChecksum(pThis,
5033 (uint8_t *)pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg,
5034 pThis->u16TxPktLen,
5035 pDesc->legacy.cmd.u8CSO,
5036 pDesc->legacy.dw3.u8CSS,
5037 0);
5038 }
5039 e1kTransmitFrame(pThis, fOnWorkerThread);
5040 pThis->u16TxPktLen = 0;
5041 }
5042 }
5043 /* Last fragment + failure: free the buffer and reset the storage counter. */
5044 else if (pDesc->legacy.cmd.fEOP)
5045 {
5046 e1kXmitFreeBuf(pThis);
5047 pThis->u16TxPktLen = 0;
5048 }
5049 }
5050 e1kDescReport(pThis, pDesc, addr);
5051 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5052 break;
5053
5054 default:
5055 E1kLog(("%s ERROR Unsupported transmit descriptor type: 0x%04x\n",
5056 pThis->szPrf, e1kGetDescType(pDesc)));
5057 break;
5058 }
5059
5060 return rc;
5061}
5062
5063DECLINLINE(void) e1kUpdateTxContext(PE1KSTATE pThis, E1KTXDESC *pDesc)
5064{
5065 if (pDesc->context.dw2.fTSE)
5066 {
5067 pThis->contextTSE = pDesc->context;
5068 uint32_t cbMaxSegmentSize = pThis->contextTSE.dw3.u16MSS + pThis->contextTSE.dw3.u8HDRLEN + 4; /*VTAG*/
5069 if (RT_UNLIKELY(cbMaxSegmentSize > E1K_MAX_TX_PKT_SIZE))
5070 {
5071 pThis->contextTSE.dw3.u16MSS = E1K_MAX_TX_PKT_SIZE - pThis->contextTSE.dw3.u8HDRLEN - 4; /*VTAG*/
5072 LogRelMax(10, ("%s: Transmit packet is too large: %u > %u(max). Adjusted MSS to %u.\n",
5073 pThis->szPrf, cbMaxSegmentSize, E1K_MAX_TX_PKT_SIZE, pThis->contextTSE.dw3.u16MSS));
5074 }
5075 pThis->u32PayRemain = pThis->contextTSE.dw2.u20PAYLEN;
5076 pThis->u16HdrRemain = pThis->contextTSE.dw3.u8HDRLEN;
5077 e1kSetupGsoCtx(&pThis->GsoCtx, &pThis->contextTSE);
5078 STAM_COUNTER_INC(&pThis->StatTxDescCtxTSE);
5079 }
5080 else
5081 {
5082 pThis->contextNormal = pDesc->context;
5083 STAM_COUNTER_INC(&pThis->StatTxDescCtxNormal);
5084 }
5085 E1kLog2(("%s %s context updated: IP CSS=%02X, IP CSO=%02X, IP CSE=%04X"
5086 ", TU CSS=%02X, TU CSO=%02X, TU CSE=%04X\n", pThis->szPrf,
5087 pDesc->context.dw2.fTSE ? "TSE" : "Normal",
5088 pDesc->context.ip.u8CSS,
5089 pDesc->context.ip.u8CSO,
5090 pDesc->context.ip.u16CSE,
5091 pDesc->context.tu.u8CSS,
5092 pDesc->context.tu.u8CSO,
5093 pDesc->context.tu.u16CSE));
5094}
5095
5096static bool e1kLocateTxPacket(PE1KSTATE pThis)
5097{
5098 LogFlow(("%s e1kLocateTxPacket: ENTER cbTxAlloc=%d\n",
5099 pThis->szPrf, pThis->cbTxAlloc));
5100 /* Check if we have located the packet already. */
5101 if (pThis->cbTxAlloc)
5102 {
5103 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d\n",
5104 pThis->szPrf, pThis->cbTxAlloc));
5105 return true;
5106 }
5107
5108 bool fTSE = false;
5109 uint32_t cbPacket = 0;
5110
5111 for (int i = pThis->iTxDCurrent; i < pThis->nTxDFetched; ++i)
5112 {
5113 E1KTXDESC *pDesc = &pThis->aTxDescriptors[i];
5114 switch (e1kGetDescType(pDesc))
5115 {
5116 case E1K_DTYP_CONTEXT:
5117 if (cbPacket == 0)
5118 e1kUpdateTxContext(pThis, pDesc);
5119 else
5120 E1kLog(("%s e1kLocateTxPacket: ignoring a context descriptor in the middle of a packet, cbPacket=%d\n",
5121 pThis->szPrf, cbPacket));
5122 continue;
5123 case E1K_DTYP_LEGACY:
5124 /* Skip invalid descriptors. */
5125 if (cbPacket > 0 && (pThis->fGSO || fTSE))
5126 {
5127 E1kLog(("%s e1kLocateTxPacket: ignoring a legacy descriptor in the segmentation context, cbPacket=%d\n",
5128 pThis->szPrf, cbPacket));
5129 pDesc->legacy.dw3.fDD = true; /* Make sure it is skipped by processing */
5130 continue;
5131 }
5132 /* Skip empty descriptors. */
5133 if (!pDesc->legacy.u64BufAddr || !pDesc->legacy.cmd.u16Length)
5134 break;
5135 cbPacket += pDesc->legacy.cmd.u16Length;
5136 pThis->fGSO = false;
5137 break;
5138 case E1K_DTYP_DATA:
5139 /* Skip invalid descriptors. */
5140 if (cbPacket > 0 && (bool)pDesc->data.cmd.fTSE != fTSE)
5141 {
5142 E1kLog(("%s e1kLocateTxPacket: ignoring %sTSE descriptor in the %ssegmentation context, cbPacket=%d\n",
5143 pThis->szPrf, pDesc->data.cmd.fTSE ? "" : "non-", fTSE ? "" : "non-", cbPacket));
5144 pDesc->data.dw3.fDD = true; /* Make sure it is skipped by processing */
5145 continue;
5146 }
5147 /* Skip empty descriptors. */
5148 if (!pDesc->data.u64BufAddr || !pDesc->data.cmd.u20DTALEN)
5149 break;
5150 if (cbPacket == 0)
5151 {
5152 /*
5153 * The first fragment: save IXSM and TXSM options
5154 * as these are only valid in the first fragment.
5155 */
5156 pThis->fIPcsum = pDesc->data.dw3.fIXSM;
5157 pThis->fTCPcsum = pDesc->data.dw3.fTXSM;
5158 fTSE = pDesc->data.cmd.fTSE;
5159 /*
5160 * TSE descriptors have VLE bit properly set in
5161 * the first fragment.
5162 */
5163 if (fTSE)
5164 {
5165 pThis->fVTag = pDesc->data.cmd.fVLE;
5166 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
5167 }
5168 pThis->fGSO = e1kCanDoGso(pThis, &pThis->GsoCtx, &pDesc->data, &pThis->contextTSE);
5169 }
5170 cbPacket += pDesc->data.cmd.u20DTALEN;
5171 break;
5172 default:
5173 AssertMsgFailed(("Impossible descriptor type!"));
5174 }
5175 if (pDesc->legacy.cmd.fEOP)
5176 {
5177 /*
5178 * Non-TSE descriptors have VLE bit properly set in
5179 * the last fragment.
5180 */
5181 if (!fTSE)
5182 {
5183 pThis->fVTag = pDesc->data.cmd.fVLE;
5184 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
5185 }
5186 /*
5187 * Compute the required buffer size. If we cannot do GSO but still
5188 * have to do segmentation we allocate the first segment only.
5189 */
5190 pThis->cbTxAlloc = (!fTSE || pThis->fGSO) ?
5191 cbPacket :
5192 RT_MIN(cbPacket, pThis->contextTSE.dw3.u16MSS + pThis->contextTSE.dw3.u8HDRLEN);
5193 if (pThis->fVTag)
5194 pThis->cbTxAlloc += 4;
5195 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d cbPacket=%d%s%s\n",
5196 pThis->szPrf, pThis->cbTxAlloc, cbPacket,
5197 pThis->fGSO ? " GSO" : "", fTSE ? " TSE" : ""));
5198 return true;
5199 }
5200 }
5201
5202 if (cbPacket == 0 && pThis->nTxDFetched - pThis->iTxDCurrent > 0)
5203 {
5204 /* All descriptors were empty, we need to process them as a dummy packet */
5205 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d, zero packet!\n",
5206 pThis->szPrf, pThis->cbTxAlloc));
5207 return true;
5208 }
5209 LogFlow(("%s e1kLocateTxPacket: RET false cbTxAlloc=%d cbPacket=%d\n",
5210 pThis->szPrf, pThis->cbTxAlloc, cbPacket));
5211 return false;
5212}
5213
5214static int e1kXmitPacket(PE1KSTATE pThis, bool fOnWorkerThread)
5215{
5216 int rc = VINF_SUCCESS;
5217
5218 LogFlow(("%s e1kXmitPacket: ENTER current=%d fetched=%d\n",
5219 pThis->szPrf, pThis->iTxDCurrent, pThis->nTxDFetched));
5220
5221 while (pThis->iTxDCurrent < pThis->nTxDFetched)
5222 {
5223 E1KTXDESC *pDesc = &pThis->aTxDescriptors[pThis->iTxDCurrent];
5224 E1kLog3(("%s About to process new TX descriptor at %08x%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
5225 pThis->szPrf, TDBAH, TDBAL + TDH * sizeof(E1KTXDESC), TDLEN, TDH, TDT));
5226 rc = e1kXmitDesc(pThis, pDesc, e1kDescAddr(TDBAH, TDBAL, TDH), fOnWorkerThread);
5227 if (RT_FAILURE(rc))
5228 break;
5229 if (++TDH * sizeof(E1KTXDESC) >= TDLEN)
5230 TDH = 0;
5231 uint32_t uLowThreshold = GET_BITS(TXDCTL, LWTHRESH)*8;
5232 if (uLowThreshold != 0 && e1kGetTxLen(pThis) <= uLowThreshold)
5233 {
5234 E1kLog2(("%s Low on transmit descriptors, raise ICR.TXD_LOW, len=%x thresh=%x\n",
5235 pThis->szPrf, e1kGetTxLen(pThis), GET_BITS(TXDCTL, LWTHRESH)*8));
5236 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_TXD_LOW);
5237 }
5238 ++pThis->iTxDCurrent;
5239 if (e1kGetDescType(pDesc) != E1K_DTYP_CONTEXT && pDesc->legacy.cmd.fEOP)
5240 break;
5241 }
5242
5243 LogFlow(("%s e1kXmitPacket: RET %Rrc current=%d fetched=%d\n",
5244 pThis->szPrf, rc, pThis->iTxDCurrent, pThis->nTxDFetched));
5245 return rc;
5246}
5247
5248#endif /* E1K_WITH_TXD_CACHE */
5249#ifndef E1K_WITH_TXD_CACHE
5250
5251/**
5252 * Transmit pending descriptors.
5253 *
5254 * @returns VBox status code. VERR_TRY_AGAIN is returned if we're busy.
5255 *
5256 * @param pThis The E1000 state.
5257 * @param fOnWorkerThread Whether we're on a worker thread or on an EMT.
5258 */
5259static int e1kXmitPending(PE1KSTATE pThis, bool fOnWorkerThread)
5260{
5261 int rc = VINF_SUCCESS;
5262
5263 /* Check if transmitter is enabled. */
5264 if (!(TCTL & TCTL_EN))
5265 return VINF_SUCCESS;
5266 /*
5267 * Grab the xmit lock of the driver as well as the E1K device state.
5268 */
5269 rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
5270 if (RT_LIKELY(rc == VINF_SUCCESS))
5271 {
5272 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
5273 if (pDrv)
5274 {
5275 rc = pDrv->pfnBeginXmit(pDrv, fOnWorkerThread);
5276 if (RT_FAILURE(rc))
5277 {
5278 e1kCsTxLeave(pThis);
5279 return rc;
5280 }
5281 }
5282 /*
5283 * Process all pending descriptors.
5284 * Note! Do not process descriptors in locked state
5285 */
5286 while (TDH != TDT && !pThis->fLocked)
5287 {
5288 E1KTXDESC desc;
5289 E1kLog3(("%s About to process new TX descriptor at %08x%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
5290 pThis->szPrf, TDBAH, TDBAL + TDH * sizeof(desc), TDLEN, TDH, TDT));
5291
5292 e1kLoadDesc(pThis, &desc, ((uint64_t)TDBAH << 32) + TDBAL + TDH * sizeof(desc));
5293 rc = e1kXmitDesc(pThis, &desc, e1kDescAddr(TDBAH, TDBAL, TDH), fOnWorkerThread);
5294 /* If we failed to transmit descriptor we will try it again later */
5295 if (RT_FAILURE(rc))
5296 break;
5297 if (++TDH * sizeof(desc) >= TDLEN)
5298 TDH = 0;
5299
5300 if (e1kGetTxLen(pThis) <= GET_BITS(TXDCTL, LWTHRESH)*8)
5301 {
5302 E1kLog2(("%s Low on transmit descriptors, raise ICR.TXD_LOW, len=%x thresh=%x\n",
5303 pThis->szPrf, e1kGetTxLen(pThis), GET_BITS(TXDCTL, LWTHRESH)*8));
5304 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_TXD_LOW);
5305 }
5306
5307 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5308 }
5309
5310 /// @todo uncomment: pThis->uStatIntTXQE++;
5311 /// @todo uncomment: e1kRaiseInterrupt(pThis, ICR_TXQE);
5312 /*
5313 * Release the lock.
5314 */
5315 if (pDrv)
5316 pDrv->pfnEndXmit(pDrv);
5317 e1kCsTxLeave(pThis);
5318 }
5319
5320 return rc;
5321}
5322
5323#else /* E1K_WITH_TXD_CACHE */
5324
5325static void e1kDumpTxDCache(PE1KSTATE pThis)
5326{
5327 unsigned i, cDescs = TDLEN / sizeof(E1KTXDESC);
5328 uint32_t tdh = TDH;
5329 LogRel(("E1000: -- Transmit Descriptors (%d total) --\n", cDescs));
5330 for (i = 0; i < cDescs; ++i)
5331 {
5332 E1KTXDESC desc;
5333 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), e1kDescAddr(TDBAH, TDBAL, i),
5334 &desc, sizeof(desc));
5335 if (i == tdh)
5336 LogRel(("E1000: >>> "));
5337 LogRel(("E1000: %RGp: %R[e1ktxd]\n", e1kDescAddr(TDBAH, TDBAL, i), &desc));
5338 }
5339 LogRel(("E1000: -- Transmit Descriptors in Cache (at %d (TDH %d)/ fetched %d / max %d) --\n",
5340 pThis->iTxDCurrent, TDH, pThis->nTxDFetched, E1K_TXD_CACHE_SIZE));
5341 if (tdh > pThis->iTxDCurrent)
5342 tdh -= pThis->iTxDCurrent;
5343 else
5344 tdh = cDescs + tdh - pThis->iTxDCurrent;
5345 for (i = 0; i < pThis->nTxDFetched; ++i)
5346 {
5347 if (i == pThis->iTxDCurrent)
5348 LogRel(("E1000: >>> "));
5349 LogRel(("E1000: %RGp: %R[e1ktxd]\n", e1kDescAddr(TDBAH, TDBAL, tdh++ % cDescs), &pThis->aTxDescriptors[i]));
5350 }
5351}
5352
5353/**
5354 * Transmit pending descriptors.
5355 *
5356 * @returns VBox status code. VERR_TRY_AGAIN is returned if we're busy.
5357 *
5358 * @param pThis The E1000 state.
5359 * @param fOnWorkerThread Whether we're on a worker thread or on an EMT.
5360 */
5361static int e1kXmitPending(PE1KSTATE pThis, bool fOnWorkerThread)
5362{
5363 int rc = VINF_SUCCESS;
5364
5365 /* Check if transmitter is enabled. */
5366 if (!(TCTL & TCTL_EN))
5367 return VINF_SUCCESS;
5368 /*
5369 * Grab the xmit lock of the driver as well as the E1K device state.
5370 */
5371 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
5372 if (pDrv)
5373 {
5374 rc = pDrv->pfnBeginXmit(pDrv, fOnWorkerThread);
5375 if (RT_FAILURE(rc))
5376 return rc;
5377 }
5378
5379 /*
5380 * Process all pending descriptors.
5381 * Note! Do not process descriptors in locked state
5382 */
5383 rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
5384 if (RT_LIKELY(rc == VINF_SUCCESS))
5385 {
5386 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
5387 /*
5388 * fIncomplete is set whenever we try to fetch additional descriptors
5389 * for an incomplete packet. If fail to locate a complete packet on
5390 * the next iteration we need to reset the cache or we risk to get
5391 * stuck in this loop forever.
5392 */
5393 bool fIncomplete = false;
5394 while (!pThis->fLocked && e1kTxDLazyLoad(pThis))
5395 {
5396 while (e1kLocateTxPacket(pThis))
5397 {
5398 fIncomplete = false;
5399 /* Found a complete packet, allocate it. */
5400 rc = e1kXmitAllocBuf(pThis, pThis->fGSO);
5401 /* If we're out of bandwidth we'll come back later. */
5402 if (RT_FAILURE(rc))
5403 goto out;
5404 /* Copy the packet to allocated buffer and send it. */
5405 rc = e1kXmitPacket(pThis, fOnWorkerThread);
5406 /* If we're out of bandwidth we'll come back later. */
5407 if (RT_FAILURE(rc))
5408 goto out;
5409 }
5410 uint8_t u8Remain = pThis->nTxDFetched - pThis->iTxDCurrent;
5411 if (RT_UNLIKELY(fIncomplete))
5412 {
5413 static bool fTxDCacheDumped = false;
5414 /*
5415 * The descriptor cache is full, but we were unable to find
5416 * a complete packet in it. Drop the cache and hope that
5417 * the guest driver can recover from network card error.
5418 */
5419 LogRel(("%s: No complete packets in%s TxD cache! "
5420 "Fetched=%d, current=%d, TX len=%d.\n",
5421 pThis->szPrf,
5422 u8Remain == E1K_TXD_CACHE_SIZE ? " full" : "",
5423 pThis->nTxDFetched, pThis->iTxDCurrent,
5424 e1kGetTxLen(pThis)));
5425 if (!fTxDCacheDumped)
5426 {
5427 fTxDCacheDumped = true;
5428 e1kDumpTxDCache(pThis);
5429 }
5430 pThis->iTxDCurrent = pThis->nTxDFetched = 0;
5431 /*
5432 * Returning an error at this point means Guru in R0
5433 * (see @bugref{6428}).
5434 */
5435# ifdef IN_RING3
5436 rc = VERR_NET_INCOMPLETE_TX_PACKET;
5437# else /* !IN_RING3 */
5438 rc = VINF_IOM_R3_MMIO_WRITE;
5439# endif /* !IN_RING3 */
5440 goto out;
5441 }
5442 if (u8Remain > 0)
5443 {
5444 Log4(("%s Incomplete packet at %d. Already fetched %d, "
5445 "%d more are available\n",
5446 pThis->szPrf, pThis->iTxDCurrent, u8Remain,
5447 e1kGetTxLen(pThis) - u8Remain));
5448
5449 /*
5450 * A packet was partially fetched. Move incomplete packet to
5451 * the beginning of cache buffer, then load more descriptors.
5452 */
5453 memmove(pThis->aTxDescriptors,
5454 &pThis->aTxDescriptors[pThis->iTxDCurrent],
5455 u8Remain * sizeof(E1KTXDESC));
5456 pThis->iTxDCurrent = 0;
5457 pThis->nTxDFetched = u8Remain;
5458 e1kTxDLoadMore(pThis);
5459 fIncomplete = true;
5460 }
5461 else
5462 pThis->nTxDFetched = 0;
5463 pThis->iTxDCurrent = 0;
5464 }
5465 if (!pThis->fLocked && GET_BITS(TXDCTL, LWTHRESH) == 0)
5466 {
5467 E1kLog2(("%s Out of transmit descriptors, raise ICR.TXD_LOW\n",
5468 pThis->szPrf));
5469 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_TXD_LOW);
5470 }
5471out:
5472 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5473
5474 /// @todo uncomment: pThis->uStatIntTXQE++;
5475 /// @todo uncomment: e1kRaiseInterrupt(pThis, ICR_TXQE);
5476
5477 e1kCsTxLeave(pThis);
5478 }
5479
5480
5481 /*
5482 * Release the lock.
5483 */
5484 if (pDrv)
5485 pDrv->pfnEndXmit(pDrv);
5486 return rc;
5487}
5488
5489#endif /* E1K_WITH_TXD_CACHE */
5490#ifdef IN_RING3
5491
5492/**
5493 * @interface_method_impl{PDMINETWORKDOWN,pfnXmitPending}
5494 */
5495static DECLCALLBACK(void) e1kR3NetworkDown_XmitPending(PPDMINETWORKDOWN pInterface)
5496{
5497 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkDown);
5498 /* Resume suspended transmission */
5499 STATUS &= ~STATUS_TXOFF;
5500 e1kXmitPending(pThis, true /*fOnWorkerThread*/);
5501}
5502
5503/**
5504 * Callback for consuming from transmit queue. It gets called in R3 whenever
5505 * we enqueue something in R0/GC.
5506 *
5507 * @returns true
5508 * @param pDevIns Pointer to device instance structure.
5509 * @param pItem Pointer to the element being dequeued (not used).
5510 * @thread ???
5511 */
5512static DECLCALLBACK(bool) e1kTxQueueConsumer(PPDMDEVINS pDevIns, PPDMQUEUEITEMCORE pItem)
5513{
5514 NOREF(pItem);
5515 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
5516 E1kLog2(("%s e1kTxQueueConsumer:\n", pThis->szPrf));
5517
5518 int rc = e1kXmitPending(pThis, false /*fOnWorkerThread*/); NOREF(rc);
5519#ifndef DEBUG_andy /** @todo r=andy Happens for me a lot, mute this for me. */
5520 AssertMsg(RT_SUCCESS(rc) || rc == VERR_TRY_AGAIN, ("%Rrc\n", rc));
5521#endif
5522 return true;
5523}
5524
5525/**
5526 * Handler for the wakeup signaller queue.
5527 */
5528static DECLCALLBACK(bool) e1kCanRxQueueConsumer(PPDMDEVINS pDevIns, PPDMQUEUEITEMCORE pItem)
5529{
5530 RT_NOREF(pItem);
5531 e1kWakeupReceive(pDevIns);
5532 return true;
5533}
5534
5535#endif /* IN_RING3 */
5536
5537/**
5538 * Write handler for Transmit Descriptor Tail register.
5539 *
5540 * @param pThis The device state structure.
5541 * @param offset Register offset in memory-mapped frame.
5542 * @param index Register index in register array.
5543 * @param value The value to store.
5544 * @param mask Used to implement partial writes (8 and 16-bit).
5545 * @thread EMT
5546 */
5547static int e1kRegWriteTDT(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5548{
5549 int rc = e1kRegWriteDefault(pThis, offset, index, value);
5550
5551 /* All descriptors starting with head and not including tail belong to us. */
5552 /* Process them. */
5553 E1kLog2(("%s e1kRegWriteTDT: TDBAL=%08x, TDBAH=%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
5554 pThis->szPrf, TDBAL, TDBAH, TDLEN, TDH, TDT));
5555
5556 /* Ignore TDT writes when the link is down. */
5557 if (TDH != TDT && (STATUS & STATUS_LU))
5558 {
5559 Log5(("E1000: TDT write: TDH=%08x, TDT=%08x, %d descriptors to process\n", TDH, TDT, e1kGetTxLen(pThis)));
5560 E1kLog(("%s e1kRegWriteTDT: %d descriptors to process\n",
5561 pThis->szPrf, e1kGetTxLen(pThis)));
5562
5563 /* Transmit pending packets if possible, defer it if we cannot do it
5564 in the current context. */
5565#ifdef E1K_TX_DELAY
5566 rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
5567 if (RT_LIKELY(rc == VINF_SUCCESS))
5568 {
5569 if (!TMTimerIsActive(pThis->CTX_SUFF(pTXDTimer)))
5570 {
5571#ifdef E1K_INT_STATS
5572 pThis->u64ArmedAt = RTTimeNanoTS();
5573#endif
5574 e1kArmTimer(pThis, pThis->CTX_SUFF(pTXDTimer), E1K_TX_DELAY);
5575 }
5576 E1K_INC_ISTAT_CNT(pThis->uStatTxDelayed);
5577 e1kCsTxLeave(pThis);
5578 return rc;
5579 }
5580 /* We failed to enter the TX critical section -- transmit as usual. */
5581#endif /* E1K_TX_DELAY */
5582#ifndef IN_RING3
5583 if (!pThis->CTX_SUFF(pDrv))
5584 {
5585 PPDMQUEUEITEMCORE pItem = PDMQueueAlloc(pThis->CTX_SUFF(pTxQueue));
5586 if (RT_UNLIKELY(pItem))
5587 PDMQueueInsert(pThis->CTX_SUFF(pTxQueue), pItem);
5588 }
5589 else
5590#endif
5591 {
5592 rc = e1kXmitPending(pThis, false /*fOnWorkerThread*/);
5593 if (rc == VERR_TRY_AGAIN)
5594 rc = VINF_SUCCESS;
5595 else if (rc == VERR_SEM_BUSY)
5596 rc = VINF_IOM_R3_MMIO_WRITE;
5597 AssertRC(rc);
5598 }
5599 }
5600
5601 return rc;
5602}
5603
5604/**
5605 * Write handler for Multicast Table Array registers.
5606 *
5607 * @param pThis The device state structure.
5608 * @param offset Register offset in memory-mapped frame.
5609 * @param index Register index in register array.
5610 * @param value The value to store.
5611 * @thread EMT
5612 */
5613static int e1kRegWriteMTA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5614{
5615 AssertReturn(offset - g_aE1kRegMap[index].offset < sizeof(pThis->auMTA), VERR_DEV_IO_ERROR);
5616 pThis->auMTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auMTA[0])] = value;
5617
5618 return VINF_SUCCESS;
5619}
5620
5621/**
5622 * Read handler for Multicast Table Array registers.
5623 *
5624 * @returns VBox status code.
5625 *
5626 * @param pThis The device state structure.
5627 * @param offset Register offset in memory-mapped frame.
5628 * @param index Register index in register array.
5629 * @thread EMT
5630 */
5631static int e1kRegReadMTA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5632{
5633 AssertReturn(offset - g_aE1kRegMap[index].offset< sizeof(pThis->auMTA), VERR_DEV_IO_ERROR);
5634 *pu32Value = pThis->auMTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auMTA[0])];
5635
5636 return VINF_SUCCESS;
5637}
5638
5639/**
5640 * Write handler for Receive Address registers.
5641 *
5642 * @param pThis The device state structure.
5643 * @param offset Register offset in memory-mapped frame.
5644 * @param index Register index in register array.
5645 * @param value The value to store.
5646 * @thread EMT
5647 */
5648static int e1kRegWriteRA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5649{
5650 AssertReturn(offset - g_aE1kRegMap[index].offset < sizeof(pThis->aRecAddr.au32), VERR_DEV_IO_ERROR);
5651 pThis->aRecAddr.au32[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->aRecAddr.au32[0])] = value;
5652
5653 return VINF_SUCCESS;
5654}
5655
5656/**
5657 * Read handler for Receive Address registers.
5658 *
5659 * @returns VBox status code.
5660 *
5661 * @param pThis The device state structure.
5662 * @param offset Register offset in memory-mapped frame.
5663 * @param index Register index in register array.
5664 * @thread EMT
5665 */
5666static int e1kRegReadRA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5667{
5668 AssertReturn(offset - g_aE1kRegMap[index].offset< sizeof(pThis->aRecAddr.au32), VERR_DEV_IO_ERROR);
5669 *pu32Value = pThis->aRecAddr.au32[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->aRecAddr.au32[0])];
5670
5671 return VINF_SUCCESS;
5672}
5673
5674/**
5675 * Write handler for VLAN Filter Table Array registers.
5676 *
5677 * @param pThis The device state structure.
5678 * @param offset Register offset in memory-mapped frame.
5679 * @param index Register index in register array.
5680 * @param value The value to store.
5681 * @thread EMT
5682 */
5683static int e1kRegWriteVFTA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5684{
5685 AssertReturn(offset - g_aE1kRegMap[index].offset < sizeof(pThis->auVFTA), VINF_SUCCESS);
5686 pThis->auVFTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auVFTA[0])] = value;
5687
5688 return VINF_SUCCESS;
5689}
5690
5691/**
5692 * Read handler for VLAN Filter Table Array registers.
5693 *
5694 * @returns VBox status code.
5695 *
5696 * @param pThis The device state structure.
5697 * @param offset Register offset in memory-mapped frame.
5698 * @param index Register index in register array.
5699 * @thread EMT
5700 */
5701static int e1kRegReadVFTA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5702{
5703 AssertReturn(offset - g_aE1kRegMap[index].offset< sizeof(pThis->auVFTA), VERR_DEV_IO_ERROR);
5704 *pu32Value = pThis->auVFTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auVFTA[0])];
5705
5706 return VINF_SUCCESS;
5707}
5708
5709/**
5710 * Read handler for unimplemented registers.
5711 *
5712 * Merely reports reads from unimplemented registers.
5713 *
5714 * @returns VBox status code.
5715 *
5716 * @param pThis The device state structure.
5717 * @param offset Register offset in memory-mapped frame.
5718 * @param index Register index in register array.
5719 * @thread EMT
5720 */
5721static int e1kRegReadUnimplemented(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5722{
5723 RT_NOREF3(pThis, offset, index);
5724 E1kLog(("%s At %08X read (00000000) attempt from unimplemented register %s (%s)\n",
5725 pThis->szPrf, offset, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5726 *pu32Value = 0;
5727
5728 return VINF_SUCCESS;
5729}
5730
5731/**
5732 * Default register read handler with automatic clear operation.
5733 *
5734 * Retrieves the value of register from register array in device state structure.
5735 * Then resets all bits.
5736 *
5737 * @remarks The 'mask' parameter is simply ignored as masking and shifting is
5738 * done in the caller.
5739 *
5740 * @returns VBox status code.
5741 *
5742 * @param pThis The device state structure.
5743 * @param offset Register offset in memory-mapped frame.
5744 * @param index Register index in register array.
5745 * @thread EMT
5746 */
5747static int e1kRegReadAutoClear(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5748{
5749 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
5750 int rc = e1kRegReadDefault(pThis, offset, index, pu32Value);
5751 pThis->auRegs[index] = 0;
5752
5753 return rc;
5754}
5755
5756/**
5757 * Default register read handler.
5758 *
5759 * Retrieves the value of register from register array in device state structure.
5760 * Bits corresponding to 0s in 'readable' mask will always read as 0s.
5761 *
5762 * @remarks The 'mask' parameter is simply ignored as masking and shifting is
5763 * done in the caller.
5764 *
5765 * @returns VBox status code.
5766 *
5767 * @param pThis The device state structure.
5768 * @param offset Register offset in memory-mapped frame.
5769 * @param index Register index in register array.
5770 * @thread EMT
5771 */
5772static int e1kRegReadDefault(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5773{
5774 RT_NOREF_PV(offset);
5775
5776 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
5777 *pu32Value = pThis->auRegs[index] & g_aE1kRegMap[index].readable;
5778
5779 return VINF_SUCCESS;
5780}
5781
5782/**
5783 * Write handler for unimplemented registers.
5784 *
5785 * Merely reports writes to unimplemented registers.
5786 *
5787 * @param pThis The device state structure.
5788 * @param offset Register offset in memory-mapped frame.
5789 * @param index Register index in register array.
5790 * @param value The value to store.
5791 * @thread EMT
5792 */
5793
5794 static int e1kRegWriteUnimplemented(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5795{
5796 RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index); RT_NOREF_PV(value);
5797
5798 E1kLog(("%s At %08X write attempt (%08X) to unimplemented register %s (%s)\n",
5799 pThis->szPrf, offset, value, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5800
5801 return VINF_SUCCESS;
5802}
5803
5804/**
5805 * Default register write handler.
5806 *
5807 * Stores the value to the register array in device state structure. Only bits
5808 * corresponding to 1s both in 'writable' and 'mask' will be stored.
5809 *
5810 * @returns VBox status code.
5811 *
5812 * @param pThis The device state structure.
5813 * @param offset Register offset in memory-mapped frame.
5814 * @param index Register index in register array.
5815 * @param value The value to store.
5816 * @param mask Used to implement partial writes (8 and 16-bit).
5817 * @thread EMT
5818 */
5819
5820static int e1kRegWriteDefault(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5821{
5822 RT_NOREF_PV(offset);
5823
5824 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
5825 pThis->auRegs[index] = (value & g_aE1kRegMap[index].writable)
5826 | (pThis->auRegs[index] & ~g_aE1kRegMap[index].writable);
5827
5828 return VINF_SUCCESS;
5829}
5830
5831/**
5832 * Search register table for matching register.
5833 *
5834 * @returns Index in the register table or -1 if not found.
5835 *
5836 * @param offReg Register offset in memory-mapped region.
5837 * @thread EMT
5838 */
5839static int e1kRegLookup(uint32_t offReg)
5840{
5841
5842#if 0
5843 int index;
5844
5845 for (index = 0; index < E1K_NUM_OF_REGS; index++)
5846 {
5847 if (g_aE1kRegMap[index].offset <= offReg && offReg < g_aE1kRegMap[index].offset + g_aE1kRegMap[index].size)
5848 {
5849 return index;
5850 }
5851 }
5852#else
5853 int iStart = 0;
5854 int iEnd = E1K_NUM_OF_BINARY_SEARCHABLE;
5855 for (;;)
5856 {
5857 int i = (iEnd - iStart) / 2 + iStart;
5858 uint32_t offCur = g_aE1kRegMap[i].offset;
5859 if (offReg < offCur)
5860 {
5861 if (i == iStart)
5862 break;
5863 iEnd = i;
5864 }
5865 else if (offReg >= offCur + g_aE1kRegMap[i].size)
5866 {
5867 i++;
5868 if (i == iEnd)
5869 break;
5870 iStart = i;
5871 }
5872 else
5873 return i;
5874 Assert(iEnd > iStart);
5875 }
5876
5877 for (unsigned i = E1K_NUM_OF_BINARY_SEARCHABLE; i < RT_ELEMENTS(g_aE1kRegMap); i++)
5878 if (offReg - g_aE1kRegMap[i].offset < g_aE1kRegMap[i].size)
5879 return i;
5880
5881# ifdef VBOX_STRICT
5882 for (unsigned i = 0; i < RT_ELEMENTS(g_aE1kRegMap); i++)
5883 Assert(offReg - g_aE1kRegMap[i].offset >= g_aE1kRegMap[i].size);
5884# endif
5885
5886#endif
5887
5888 return -1;
5889}
5890
5891/**
5892 * Handle unaligned register read operation.
5893 *
5894 * Looks up and calls appropriate handler.
5895 *
5896 * @returns VBox status code.
5897 *
5898 * @param pThis The device state structure.
5899 * @param offReg Register offset in memory-mapped frame.
5900 * @param pv Where to store the result.
5901 * @param cb Number of bytes to read.
5902 * @thread EMT
5903 * @remarks IOM takes care of unaligned and small reads via MMIO. For I/O port
5904 * accesses we have to take care of that ourselves.
5905 */
5906static int e1kRegReadUnaligned(PE1KSTATE pThis, uint32_t offReg, void *pv, uint32_t cb)
5907{
5908 uint32_t u32 = 0;
5909 uint32_t shift;
5910 int rc = VINF_SUCCESS;
5911 int index = e1kRegLookup(offReg);
5912#ifdef LOG_ENABLED
5913 char buf[9];
5914#endif
5915
5916 /*
5917 * From the spec:
5918 * For registers that should be accessed as 32-bit double words, partial writes (less than a 32-bit
5919 * double word) is ignored. Partial reads return all 32 bits of data regardless of the byte enables.
5920 */
5921
5922 /*
5923 * To be able to read bytes and short word we convert them to properly
5924 * shifted 32-bit words and masks. The idea is to keep register-specific
5925 * handlers simple. Most accesses will be 32-bit anyway.
5926 */
5927 uint32_t mask;
5928 switch (cb)
5929 {
5930 case 4: mask = 0xFFFFFFFF; break;
5931 case 2: mask = 0x0000FFFF; break;
5932 case 1: mask = 0x000000FF; break;
5933 default:
5934 return PDMDevHlpDBGFStop(pThis->CTX_SUFF(pDevIns), RT_SRC_POS,
5935 "unsupported op size: offset=%#10x cb=%#10x\n", offReg, cb);
5936 }
5937 if (index != -1)
5938 {
5939 RT_UNTRUSTED_VALIDATED_FENCE(); /* paranoia because of port I/O. */
5940 if (g_aE1kRegMap[index].readable)
5941 {
5942 /* Make the mask correspond to the bits we are about to read. */
5943 shift = (offReg - g_aE1kRegMap[index].offset) % sizeof(uint32_t) * 8;
5944 mask <<= shift;
5945 if (!mask)
5946 return PDMDevHlpDBGFStop(pThis->CTX_SUFF(pDevIns), RT_SRC_POS, "Zero mask: offset=%#10x cb=%#10x\n", offReg, cb);
5947 /*
5948 * Read it. Pass the mask so the handler knows what has to be read.
5949 * Mask out irrelevant bits.
5950 */
5951 //rc = e1kCsEnter(pThis, VERR_SEM_BUSY, RT_SRC_POS);
5952 if (RT_UNLIKELY(rc != VINF_SUCCESS))
5953 return rc;
5954 //pThis->fDelayInts = false;
5955 //pThis->iStatIntLost += pThis->iStatIntLostOne;
5956 //pThis->iStatIntLostOne = 0;
5957 rc = g_aE1kRegMap[index].pfnRead(pThis, offReg & 0xFFFFFFFC, index, &u32);
5958 u32 &= mask;
5959 //e1kCsLeave(pThis);
5960 E1kLog2(("%s At %08X read %s from %s (%s)\n",
5961 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf), g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5962 Log6(("%s At %08X read %s from %s (%s) [UNALIGNED]\n",
5963 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf), g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5964 /* Shift back the result. */
5965 u32 >>= shift;
5966 }
5967 else
5968 E1kLog(("%s At %08X read (%s) attempt from write-only register %s (%s)\n",
5969 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf), g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5970 if (IOM_SUCCESS(rc))
5971 STAM_COUNTER_INC(&pThis->aStatRegReads[index]);
5972 }
5973 else
5974 E1kLog(("%s At %08X read (%s) attempt from non-existing register\n",
5975 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf)));
5976
5977 memcpy(pv, &u32, cb);
5978 return rc;
5979}
5980
5981/**
5982 * Handle 4 byte aligned and sized read operation.
5983 *
5984 * Looks up and calls appropriate handler.
5985 *
5986 * @returns VBox status code.
5987 *
5988 * @param pThis The device state structure.
5989 * @param offReg Register offset in memory-mapped frame.
5990 * @param pu32 Where to store the result.
5991 * @thread EMT
5992 */
5993static int e1kRegReadAlignedU32(PE1KSTATE pThis, uint32_t offReg, uint32_t *pu32)
5994{
5995 Assert(!(offReg & 3));
5996
5997 /*
5998 * Lookup the register and check that it's readable.
5999 */
6000 int rc = VINF_SUCCESS;
6001 int idxReg = e1kRegLookup(offReg);
6002 if (RT_LIKELY(idxReg != -1))
6003 {
6004 RT_UNTRUSTED_VALIDATED_FENCE(); /* paranoia because of port I/O. */
6005 if (RT_UNLIKELY(g_aE1kRegMap[idxReg].readable))
6006 {
6007 /*
6008 * Read it. Pass the mask so the handler knows what has to be read.
6009 * Mask out irrelevant bits.
6010 */
6011 //rc = e1kCsEnter(pThis, VERR_SEM_BUSY, RT_SRC_POS);
6012 //if (RT_UNLIKELY(rc != VINF_SUCCESS))
6013 // return rc;
6014 //pThis->fDelayInts = false;
6015 //pThis->iStatIntLost += pThis->iStatIntLostOne;
6016 //pThis->iStatIntLostOne = 0;
6017 rc = g_aE1kRegMap[idxReg].pfnRead(pThis, offReg & 0xFFFFFFFC, idxReg, pu32);
6018 //e1kCsLeave(pThis);
6019 Log6(("%s At %08X read %08X from %s (%s)\n",
6020 pThis->szPrf, offReg, *pu32, g_aE1kRegMap[idxReg].abbrev, g_aE1kRegMap[idxReg].name));
6021 if (IOM_SUCCESS(rc))
6022 STAM_COUNTER_INC(&pThis->aStatRegReads[idxReg]);
6023 }
6024 else
6025 E1kLog(("%s At %08X read attempt from non-readable register %s (%s)\n",
6026 pThis->szPrf, offReg, g_aE1kRegMap[idxReg].abbrev, g_aE1kRegMap[idxReg].name));
6027 }
6028 else
6029 E1kLog(("%s At %08X read attempt from non-existing register\n", pThis->szPrf, offReg));
6030 return rc;
6031}
6032
6033/**
6034 * Handle 4 byte sized and aligned register write operation.
6035 *
6036 * Looks up and calls appropriate handler.
6037 *
6038 * @returns VBox status code.
6039 *
6040 * @param pThis The device state structure.
6041 * @param offReg Register offset in memory-mapped frame.
6042 * @param u32Value The value to write.
6043 * @thread EMT
6044 */
6045static int e1kRegWriteAlignedU32(PE1KSTATE pThis, uint32_t offReg, uint32_t u32Value)
6046{
6047 int rc = VINF_SUCCESS;
6048 int index = e1kRegLookup(offReg);
6049 if (RT_LIKELY(index != -1))
6050 {
6051 RT_UNTRUSTED_VALIDATED_FENCE(); /* paranoia because of port I/O. */
6052 if (RT_LIKELY(g_aE1kRegMap[index].writable))
6053 {
6054 /*
6055 * Write it. Pass the mask so the handler knows what has to be written.
6056 * Mask out irrelevant bits.
6057 */
6058 Log6(("%s At %08X write %08X to %s (%s)\n",
6059 pThis->szPrf, offReg, u32Value, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
6060 //rc = e1kCsEnter(pThis, VERR_SEM_BUSY, RT_SRC_POS);
6061 //if (RT_UNLIKELY(rc != VINF_SUCCESS))
6062 // return rc;
6063 //pThis->fDelayInts = false;
6064 //pThis->iStatIntLost += pThis->iStatIntLostOne;
6065 //pThis->iStatIntLostOne = 0;
6066 rc = g_aE1kRegMap[index].pfnWrite(pThis, offReg, index, u32Value);
6067 //e1kCsLeave(pThis);
6068 }
6069 else
6070 E1kLog(("%s At %08X write attempt (%08X) to read-only register %s (%s)\n",
6071 pThis->szPrf, offReg, u32Value, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
6072 if (IOM_SUCCESS(rc))
6073 STAM_COUNTER_INC(&pThis->aStatRegWrites[index]);
6074 }
6075 else
6076 E1kLog(("%s At %08X write attempt (%08X) to non-existing register\n",
6077 pThis->szPrf, offReg, u32Value));
6078 return rc;
6079}
6080
6081
6082/* -=-=-=-=- MMIO and I/O Port Callbacks -=-=-=-=- */
6083
6084/**
6085 * @callback_method_impl{FNIOMMMIOREAD}
6086 */
6087PDMBOTHCBDECL(int) e1kMMIORead(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS GCPhysAddr, void *pv, unsigned cb)
6088{
6089 RT_NOREF2(pvUser, cb);
6090 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
6091 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatMMIORead), a);
6092
6093 uint32_t offReg = GCPhysAddr - pThis->addrMMReg;
6094 Assert(offReg < E1K_MM_SIZE);
6095 Assert(cb == 4);
6096 Assert(!(GCPhysAddr & 3));
6097
6098 int rc = e1kRegReadAlignedU32(pThis, offReg, (uint32_t *)pv);
6099
6100 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatMMIORead), a);
6101 return rc;
6102}
6103
6104/**
6105 * @callback_method_impl{FNIOMMMIOWRITE}
6106 */
6107PDMBOTHCBDECL(int) e1kMMIOWrite(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS GCPhysAddr, void const *pv, unsigned cb)
6108{
6109 RT_NOREF2(pvUser, cb);
6110 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
6111 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatMMIOWrite), a);
6112
6113 uint32_t offReg = GCPhysAddr - pThis->addrMMReg;
6114 Assert(offReg < E1K_MM_SIZE);
6115 Assert(cb == 4);
6116 Assert(!(GCPhysAddr & 3));
6117
6118 int rc = e1kRegWriteAlignedU32(pThis, offReg, *(uint32_t const *)pv);
6119
6120 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatMMIOWrite), a);
6121 return rc;
6122}
6123
6124/**
6125 * @callback_method_impl{FNIOMIOPORTIN}
6126 */
6127PDMBOTHCBDECL(int) e1kIOPortIn(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT uPort, uint32_t *pu32, unsigned cb)
6128{
6129 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
6130 int rc;
6131 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatIORead), a);
6132 RT_NOREF_PV(pvUser);
6133
6134 uPort -= pThis->IOPortBase;
6135 if (RT_LIKELY(cb == 4))
6136 switch (uPort)
6137 {
6138 case 0x00: /* IOADDR */
6139 *pu32 = pThis->uSelectedReg;
6140 E1kLog2(("%s e1kIOPortIn: IOADDR(0), selecting register %#010x, val=%#010x\n", pThis->szPrf, pThis->uSelectedReg, *pu32));
6141 rc = VINF_SUCCESS;
6142 break;
6143
6144 case 0x04: /* IODATA */
6145 if (!(pThis->uSelectedReg & 3))
6146 rc = e1kRegReadAlignedU32(pThis, pThis->uSelectedReg, pu32);
6147 else /** @todo r=bird: I wouldn't be surprised if this unaligned branch wasn't necessary. */
6148 rc = e1kRegReadUnaligned(pThis, pThis->uSelectedReg, pu32, cb);
6149 if (rc == VINF_IOM_R3_MMIO_READ)
6150 rc = VINF_IOM_R3_IOPORT_READ;
6151 E1kLog2(("%s e1kIOPortIn: IODATA(4), reading from selected register %#010x, val=%#010x\n", pThis->szPrf, pThis->uSelectedReg, *pu32));
6152 break;
6153
6154 default:
6155 E1kLog(("%s e1kIOPortIn: invalid port %#010x\n", pThis->szPrf, uPort));
6156 //rc = VERR_IOM_IOPORT_UNUSED; /* Why not? */
6157 rc = VINF_SUCCESS;
6158 }
6159 else
6160 {
6161 E1kLog(("%s e1kIOPortIn: invalid op size: uPort=%RTiop cb=%08x", pThis->szPrf, uPort, cb));
6162 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "%s e1kIOPortIn: invalid op size: uPort=%RTiop cb=%08x\n", pThis->szPrf, uPort, cb);
6163 }
6164 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatIORead), a);
6165 return rc;
6166}
6167
6168
6169/**
6170 * @callback_method_impl{FNIOMIOPORTOUT}
6171 */
6172PDMBOTHCBDECL(int) e1kIOPortOut(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT uPort, uint32_t u32, unsigned cb)
6173{
6174 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
6175 int rc;
6176 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatIOWrite), a);
6177 RT_NOREF_PV(pvUser);
6178
6179 E1kLog2(("%s e1kIOPortOut: uPort=%RTiop value=%08x\n", pThis->szPrf, uPort, u32));
6180 if (RT_LIKELY(cb == 4))
6181 {
6182 uPort -= pThis->IOPortBase;
6183 switch (uPort)
6184 {
6185 case 0x00: /* IOADDR */
6186 pThis->uSelectedReg = u32;
6187 E1kLog2(("%s e1kIOPortOut: IOADDR(0), selected register %08x\n", pThis->szPrf, pThis->uSelectedReg));
6188 rc = VINF_SUCCESS;
6189 break;
6190
6191 case 0x04: /* IODATA */
6192 E1kLog2(("%s e1kIOPortOut: IODATA(4), writing to selected register %#010x, value=%#010x\n", pThis->szPrf, pThis->uSelectedReg, u32));
6193 if (RT_LIKELY(!(pThis->uSelectedReg & 3)))
6194 {
6195 rc = e1kRegWriteAlignedU32(pThis, pThis->uSelectedReg, u32);
6196 if (rc == VINF_IOM_R3_MMIO_WRITE)
6197 rc = VINF_IOM_R3_IOPORT_WRITE;
6198 }
6199 else
6200 rc = PDMDevHlpDBGFStop(pThis->CTX_SUFF(pDevIns), RT_SRC_POS,
6201 "Spec violation: misaligned offset: %#10x, ignored.\n", pThis->uSelectedReg);
6202 break;
6203
6204 default:
6205 E1kLog(("%s e1kIOPortOut: invalid port %#010x\n", pThis->szPrf, uPort));
6206 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "invalid port %#010x\n", uPort);
6207 }
6208 }
6209 else
6210 {
6211 E1kLog(("%s e1kIOPortOut: invalid op size: uPort=%RTiop cb=%08x\n", pThis->szPrf, uPort, cb));
6212 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "%s: invalid op size: uPort=%RTiop cb=%#x\n", pThis->szPrf, uPort, cb);
6213 }
6214
6215 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatIOWrite), a);
6216 return rc;
6217}
6218
6219#ifdef IN_RING3
6220
6221/**
6222 * Dump complete device state to log.
6223 *
6224 * @param pThis Pointer to device state.
6225 */
6226static void e1kDumpState(PE1KSTATE pThis)
6227{
6228 RT_NOREF(pThis);
6229 for (int i = 0; i < E1K_NUM_OF_32BIT_REGS; ++i)
6230 E1kLog2(("%s: %8.8s = %08x\n", pThis->szPrf, g_aE1kRegMap[i].abbrev, pThis->auRegs[i]));
6231# ifdef E1K_INT_STATS
6232 LogRel(("%s: Interrupt attempts: %d\n", pThis->szPrf, pThis->uStatIntTry));
6233 LogRel(("%s: Interrupts raised : %d\n", pThis->szPrf, pThis->uStatInt));
6234 LogRel(("%s: Interrupts lowered: %d\n", pThis->szPrf, pThis->uStatIntLower));
6235 LogRel(("%s: ICR outside ISR : %d\n", pThis->szPrf, pThis->uStatNoIntICR));
6236 LogRel(("%s: IMS raised ints : %d\n", pThis->szPrf, pThis->uStatIntIMS));
6237 LogRel(("%s: Interrupts skipped: %d\n", pThis->szPrf, pThis->uStatIntSkip));
6238 LogRel(("%s: Masked interrupts : %d\n", pThis->szPrf, pThis->uStatIntMasked));
6239 LogRel(("%s: Early interrupts : %d\n", pThis->szPrf, pThis->uStatIntEarly));
6240 LogRel(("%s: Late interrupts : %d\n", pThis->szPrf, pThis->uStatIntLate));
6241 LogRel(("%s: Lost interrupts : %d\n", pThis->szPrf, pThis->iStatIntLost));
6242 LogRel(("%s: Interrupts by RX : %d\n", pThis->szPrf, pThis->uStatIntRx));
6243 LogRel(("%s: Interrupts by TX : %d\n", pThis->szPrf, pThis->uStatIntTx));
6244 LogRel(("%s: Interrupts by ICS : %d\n", pThis->szPrf, pThis->uStatIntICS));
6245 LogRel(("%s: Interrupts by RDTR: %d\n", pThis->szPrf, pThis->uStatIntRDTR));
6246 LogRel(("%s: Interrupts by RDMT: %d\n", pThis->szPrf, pThis->uStatIntRXDMT0));
6247 LogRel(("%s: Interrupts by TXQE: %d\n", pThis->szPrf, pThis->uStatIntTXQE));
6248 LogRel(("%s: TX int delay asked: %d\n", pThis->szPrf, pThis->uStatTxIDE));
6249 LogRel(("%s: TX delayed: %d\n", pThis->szPrf, pThis->uStatTxDelayed));
6250 LogRel(("%s: TX delay expired: %d\n", pThis->szPrf, pThis->uStatTxDelayExp));
6251 LogRel(("%s: TX no report asked: %d\n", pThis->szPrf, pThis->uStatTxNoRS));
6252 LogRel(("%s: TX abs timer expd : %d\n", pThis->szPrf, pThis->uStatTAD));
6253 LogRel(("%s: TX int timer expd : %d\n", pThis->szPrf, pThis->uStatTID));
6254 LogRel(("%s: RX abs timer expd : %d\n", pThis->szPrf, pThis->uStatRAD));
6255 LogRel(("%s: RX int timer expd : %d\n", pThis->szPrf, pThis->uStatRID));
6256 LogRel(("%s: TX CTX descriptors: %d\n", pThis->szPrf, pThis->uStatDescCtx));
6257 LogRel(("%s: TX DAT descriptors: %d\n", pThis->szPrf, pThis->uStatDescDat));
6258 LogRel(("%s: TX LEG descriptors: %d\n", pThis->szPrf, pThis->uStatDescLeg));
6259 LogRel(("%s: Received frames : %d\n", pThis->szPrf, pThis->uStatRxFrm));
6260 LogRel(("%s: Transmitted frames: %d\n", pThis->szPrf, pThis->uStatTxFrm));
6261 LogRel(("%s: TX frames up to 1514: %d\n", pThis->szPrf, pThis->uStatTx1514));
6262 LogRel(("%s: TX frames up to 2962: %d\n", pThis->szPrf, pThis->uStatTx2962));
6263 LogRel(("%s: TX frames up to 4410: %d\n", pThis->szPrf, pThis->uStatTx4410));
6264 LogRel(("%s: TX frames up to 5858: %d\n", pThis->szPrf, pThis->uStatTx5858));
6265 LogRel(("%s: TX frames up to 7306: %d\n", pThis->szPrf, pThis->uStatTx7306));
6266 LogRel(("%s: TX frames up to 8754: %d\n", pThis->szPrf, pThis->uStatTx8754));
6267 LogRel(("%s: TX frames up to 16384: %d\n", pThis->szPrf, pThis->uStatTx16384));
6268 LogRel(("%s: TX frames up to 32768: %d\n", pThis->szPrf, pThis->uStatTx32768));
6269 LogRel(("%s: Larger TX frames : %d\n", pThis->szPrf, pThis->uStatTxLarge));
6270 LogRel(("%s: Max TX Delay : %lld\n", pThis->szPrf, pThis->uStatMaxTxDelay));
6271# endif /* E1K_INT_STATS */
6272}
6273
6274/**
6275 * @callback_method_impl{FNPCIIOREGIONMAP}
6276 */
6277static DECLCALLBACK(int) e1kMap(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, uint32_t iRegion,
6278 RTGCPHYS GCPhysAddress, RTGCPHYS cb, PCIADDRESSSPACE enmType)
6279{
6280 RT_NOREF(pPciDev, iRegion);
6281 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE *);
6282 int rc;
6283
6284 switch (enmType)
6285 {
6286 case PCI_ADDRESS_SPACE_IO:
6287 pThis->IOPortBase = (RTIOPORT)GCPhysAddress;
6288 rc = PDMDevHlpIOPortRegister(pDevIns, pThis->IOPortBase, cb, NULL /*pvUser*/,
6289 e1kIOPortOut, e1kIOPortIn, NULL, NULL, "E1000");
6290 if (pThis->fR0Enabled && RT_SUCCESS(rc))
6291 rc = PDMDevHlpIOPortRegisterR0(pDevIns, pThis->IOPortBase, cb, NIL_RTR0PTR /*pvUser*/,
6292 "e1kIOPortOut", "e1kIOPortIn", NULL, NULL, "E1000");
6293 if (pThis->fRCEnabled && RT_SUCCESS(rc))
6294 rc = PDMDevHlpIOPortRegisterRC(pDevIns, pThis->IOPortBase, cb, NIL_RTRCPTR /*pvUser*/,
6295 "e1kIOPortOut", "e1kIOPortIn", NULL, NULL, "E1000");
6296 break;
6297
6298 case PCI_ADDRESS_SPACE_MEM:
6299 /*
6300 * From the spec:
6301 * For registers that should be accessed as 32-bit double words,
6302 * partial writes (less than a 32-bit double word) is ignored.
6303 * Partial reads return all 32 bits of data regardless of the
6304 * byte enables.
6305 */
6306#ifdef E1K_WITH_PREREG_MMIO
6307 pThis->addrMMReg = GCPhysAddress;
6308 if (GCPhysAddress == NIL_RTGCPHYS)
6309 rc = VINF_SUCCESS;
6310 else
6311 {
6312 Assert(!(GCPhysAddress & 7));
6313 rc = PDMDevHlpMMIOExMap(pDevIns, pPciDev, iRegion, GCPhysAddress);
6314 }
6315#else
6316 pThis->addrMMReg = GCPhysAddress; Assert(!(GCPhysAddress & 7));
6317 rc = PDMDevHlpMMIORegister(pDevIns, GCPhysAddress, cb, NULL /*pvUser*/,
6318 IOMMMIO_FLAGS_READ_DWORD | IOMMMIO_FLAGS_WRITE_ONLY_DWORD,
6319 e1kMMIOWrite, e1kMMIORead, "E1000");
6320 if (pThis->fR0Enabled && RT_SUCCESS(rc))
6321 rc = PDMDevHlpMMIORegisterR0(pDevIns, GCPhysAddress, cb, NIL_RTR0PTR /*pvUser*/,
6322 "e1kMMIOWrite", "e1kMMIORead");
6323 if (pThis->fRCEnabled && RT_SUCCESS(rc))
6324 rc = PDMDevHlpMMIORegisterRC(pDevIns, GCPhysAddress, cb, NIL_RTRCPTR /*pvUser*/,
6325 "e1kMMIOWrite", "e1kMMIORead");
6326#endif
6327 break;
6328
6329 default:
6330 /* We should never get here */
6331 AssertMsgFailed(("Invalid PCI address space param in map callback"));
6332 rc = VERR_INTERNAL_ERROR;
6333 break;
6334 }
6335 return rc;
6336}
6337
6338
6339/* -=-=-=-=- PDMINETWORKDOWN -=-=-=-=- */
6340
6341/**
6342 * Check if the device can receive data now.
6343 * This must be called before the pfnRecieve() method is called.
6344 *
6345 * @returns Number of bytes the device can receive.
6346 * @param pInterface Pointer to the interface structure containing the called function pointer.
6347 * @thread EMT
6348 */
6349static int e1kCanReceive(PE1KSTATE pThis)
6350{
6351#ifndef E1K_WITH_RXD_CACHE
6352 size_t cb;
6353
6354 if (RT_UNLIKELY(e1kCsRxEnter(pThis, VERR_SEM_BUSY) != VINF_SUCCESS))
6355 return VERR_NET_NO_BUFFER_SPACE;
6356
6357 if (RT_UNLIKELY(RDLEN == sizeof(E1KRXDESC)))
6358 {
6359 E1KRXDESC desc;
6360 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), e1kDescAddr(RDBAH, RDBAL, RDH),
6361 &desc, sizeof(desc));
6362 if (desc.status.fDD)
6363 cb = 0;
6364 else
6365 cb = pThis->u16RxBSize;
6366 }
6367 else if (RDH < RDT)
6368 cb = (RDT - RDH) * pThis->u16RxBSize;
6369 else if (RDH > RDT)
6370 cb = (RDLEN/sizeof(E1KRXDESC) - RDH + RDT) * pThis->u16RxBSize;
6371 else
6372 {
6373 cb = 0;
6374 E1kLogRel(("E1000: OUT of RX descriptors!\n"));
6375 }
6376 E1kLog2(("%s e1kCanReceive: at exit RDH=%d RDT=%d RDLEN=%d u16RxBSize=%d cb=%lu\n",
6377 pThis->szPrf, RDH, RDT, RDLEN, pThis->u16RxBSize, cb));
6378
6379 e1kCsRxLeave(pThis);
6380 return cb > 0 ? VINF_SUCCESS : VERR_NET_NO_BUFFER_SPACE;
6381#else /* E1K_WITH_RXD_CACHE */
6382 int rc = VINF_SUCCESS;
6383
6384 if (RT_UNLIKELY(e1kCsRxEnter(pThis, VERR_SEM_BUSY) != VINF_SUCCESS))
6385 return VERR_NET_NO_BUFFER_SPACE;
6386
6387 if (RT_UNLIKELY(RDLEN == sizeof(E1KRXDESC)))
6388 {
6389 E1KRXDESC desc;
6390 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), e1kDescAddr(RDBAH, RDBAL, RDH),
6391 &desc, sizeof(desc));
6392 if (desc.status.fDD)
6393 rc = VERR_NET_NO_BUFFER_SPACE;
6394 }
6395 else if (e1kRxDIsCacheEmpty(pThis) && RDH == RDT)
6396 {
6397 /* Cache is empty, so is the RX ring. */
6398 rc = VERR_NET_NO_BUFFER_SPACE;
6399 }
6400 E1kLog2(("%s e1kCanReceive: at exit in_cache=%d RDH=%d RDT=%d RDLEN=%d"
6401 " u16RxBSize=%d rc=%Rrc\n", pThis->szPrf,
6402 e1kRxDInCache(pThis), RDH, RDT, RDLEN, pThis->u16RxBSize, rc));
6403
6404 e1kCsRxLeave(pThis);
6405 return rc;
6406#endif /* E1K_WITH_RXD_CACHE */
6407}
6408
6409/**
6410 * @interface_method_impl{PDMINETWORKDOWN,pfnWaitReceiveAvail}
6411 */
6412static DECLCALLBACK(int) e1kR3NetworkDown_WaitReceiveAvail(PPDMINETWORKDOWN pInterface, RTMSINTERVAL cMillies)
6413{
6414 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkDown);
6415 int rc = e1kCanReceive(pThis);
6416
6417 if (RT_SUCCESS(rc))
6418 return VINF_SUCCESS;
6419 if (RT_UNLIKELY(cMillies == 0))
6420 return VERR_NET_NO_BUFFER_SPACE;
6421
6422 rc = VERR_INTERRUPTED;
6423 ASMAtomicXchgBool(&pThis->fMaybeOutOfSpace, true);
6424 STAM_PROFILE_START(&pThis->StatRxOverflow, a);
6425 VMSTATE enmVMState;
6426 while (RT_LIKELY( (enmVMState = PDMDevHlpVMState(pThis->CTX_SUFF(pDevIns))) == VMSTATE_RUNNING
6427 || enmVMState == VMSTATE_RUNNING_LS))
6428 {
6429 int rc2 = e1kCanReceive(pThis);
6430 if (RT_SUCCESS(rc2))
6431 {
6432 rc = VINF_SUCCESS;
6433 break;
6434 }
6435 E1kLogRel(("E1000: e1kR3NetworkDown_WaitReceiveAvail: waiting cMillies=%u...\n", cMillies));
6436 E1kLog(("%s: e1kR3NetworkDown_WaitReceiveAvail: waiting cMillies=%u...\n", pThis->szPrf, cMillies));
6437 RTSemEventWait(pThis->hEventMoreRxDescAvail, cMillies);
6438 }
6439 STAM_PROFILE_STOP(&pThis->StatRxOverflow, a);
6440 ASMAtomicXchgBool(&pThis->fMaybeOutOfSpace, false);
6441
6442 return rc;
6443}
6444
6445
6446/**
6447 * Matches the packet addresses against Receive Address table. Looks for
6448 * exact matches only.
6449 *
6450 * @returns true if address matches.
6451 * @param pThis Pointer to the state structure.
6452 * @param pvBuf The ethernet packet.
6453 * @param cb Number of bytes available in the packet.
6454 * @thread EMT
6455 */
6456static bool e1kPerfectMatch(PE1KSTATE pThis, const void *pvBuf)
6457{
6458 for (unsigned i = 0; i < RT_ELEMENTS(pThis->aRecAddr.array); i++)
6459 {
6460 E1KRAELEM* ra = pThis->aRecAddr.array + i;
6461
6462 /* Valid address? */
6463 if (ra->ctl & RA_CTL_AV)
6464 {
6465 Assert((ra->ctl & RA_CTL_AS) < 2);
6466 //unsigned char *pAddr = (unsigned char*)pvBuf + sizeof(ra->addr)*(ra->ctl & RA_CTL_AS);
6467 //E1kLog3(("%s Matching %02x:%02x:%02x:%02x:%02x:%02x against %02x:%02x:%02x:%02x:%02x:%02x...\n",
6468 // pThis->szPrf, pAddr[0], pAddr[1], pAddr[2], pAddr[3], pAddr[4], pAddr[5],
6469 // ra->addr[0], ra->addr[1], ra->addr[2], ra->addr[3], ra->addr[4], ra->addr[5]));
6470 /*
6471 * Address Select:
6472 * 00b = Destination address
6473 * 01b = Source address
6474 * 10b = Reserved
6475 * 11b = Reserved
6476 * Since ethernet header is (DA, SA, len) we can use address
6477 * select as index.
6478 */
6479 if (memcmp((char*)pvBuf + sizeof(ra->addr)*(ra->ctl & RA_CTL_AS),
6480 ra->addr, sizeof(ra->addr)) == 0)
6481 return true;
6482 }
6483 }
6484
6485 return false;
6486}
6487
6488/**
6489 * Matches the packet addresses against Multicast Table Array.
6490 *
6491 * @remarks This is imperfect match since it matches not exact address but
6492 * a subset of addresses.
6493 *
6494 * @returns true if address matches.
6495 * @param pThis Pointer to the state structure.
6496 * @param pvBuf The ethernet packet.
6497 * @param cb Number of bytes available in the packet.
6498 * @thread EMT
6499 */
6500static bool e1kImperfectMatch(PE1KSTATE pThis, const void *pvBuf)
6501{
6502 /* Get bits 32..47 of destination address */
6503 uint16_t u16Bit = ((uint16_t*)pvBuf)[2];
6504
6505 unsigned offset = GET_BITS(RCTL, MO);
6506 /*
6507 * offset means:
6508 * 00b = bits 36..47
6509 * 01b = bits 35..46
6510 * 10b = bits 34..45
6511 * 11b = bits 32..43
6512 */
6513 if (offset < 3)
6514 u16Bit = u16Bit >> (4 - offset);
6515 return ASMBitTest(pThis->auMTA, u16Bit & 0xFFF);
6516}
6517
6518/**
6519 * Determines if the packet is to be delivered to upper layer.
6520 *
6521 * The following filters supported:
6522 * - Exact Unicast/Multicast
6523 * - Promiscuous Unicast/Multicast
6524 * - Multicast
6525 * - VLAN
6526 *
6527 * @returns true if packet is intended for this node.
6528 * @param pThis Pointer to the state structure.
6529 * @param pvBuf The ethernet packet.
6530 * @param cb Number of bytes available in the packet.
6531 * @param pStatus Bit field to store status bits.
6532 * @thread EMT
6533 */
6534static bool e1kAddressFilter(PE1KSTATE pThis, const void *pvBuf, size_t cb, E1KRXDST *pStatus)
6535{
6536 Assert(cb > 14);
6537 /* Assume that we fail to pass exact filter. */
6538 pStatus->fPIF = false;
6539 pStatus->fVP = false;
6540 /* Discard oversized packets */
6541 if (cb > E1K_MAX_RX_PKT_SIZE)
6542 {
6543 E1kLog(("%s ERROR: Incoming packet is too big, cb=%d > max=%d\n",
6544 pThis->szPrf, cb, E1K_MAX_RX_PKT_SIZE));
6545 E1K_INC_CNT32(ROC);
6546 return false;
6547 }
6548 else if (!(RCTL & RCTL_LPE) && cb > 1522)
6549 {
6550 /* When long packet reception is disabled packets over 1522 are discarded */
6551 E1kLog(("%s Discarding incoming packet (LPE=0), cb=%d\n",
6552 pThis->szPrf, cb));
6553 E1K_INC_CNT32(ROC);
6554 return false;
6555 }
6556
6557 uint16_t *u16Ptr = (uint16_t*)pvBuf;
6558 /* Compare TPID with VLAN Ether Type */
6559 if (RT_BE2H_U16(u16Ptr[6]) == VET)
6560 {
6561 pStatus->fVP = true;
6562 /* Is VLAN filtering enabled? */
6563 if (RCTL & RCTL_VFE)
6564 {
6565 /* It is 802.1q packet indeed, let's filter by VID */
6566 if (RCTL & RCTL_CFIEN)
6567 {
6568 E1kLog3(("%s VLAN filter: VLAN=%d CFI=%d RCTL_CFI=%d\n", pThis->szPrf,
6569 E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7])),
6570 E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])),
6571 !!(RCTL & RCTL_CFI)));
6572 if (E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])) != !!(RCTL & RCTL_CFI))
6573 {
6574 E1kLog2(("%s Packet filter: CFIs do not match in packet and RCTL (%d!=%d)\n",
6575 pThis->szPrf, E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])), !!(RCTL & RCTL_CFI)));
6576 return false;
6577 }
6578 }
6579 else
6580 E1kLog3(("%s VLAN filter: VLAN=%d\n", pThis->szPrf,
6581 E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))));
6582 if (!ASMBitTest(pThis->auVFTA, E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))))
6583 {
6584 E1kLog2(("%s Packet filter: no VLAN match (id=%d)\n",
6585 pThis->szPrf, E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))));
6586 return false;
6587 }
6588 }
6589 }
6590 /* Broadcast filtering */
6591 if (e1kIsBroadcast(pvBuf) && (RCTL & RCTL_BAM))
6592 return true;
6593 E1kLog2(("%s Packet filter: not a broadcast\n", pThis->szPrf));
6594 if (e1kIsMulticast(pvBuf))
6595 {
6596 /* Is multicast promiscuous enabled? */
6597 if (RCTL & RCTL_MPE)
6598 return true;
6599 E1kLog2(("%s Packet filter: no promiscuous multicast\n", pThis->szPrf));
6600 /* Try perfect matches first */
6601 if (e1kPerfectMatch(pThis, pvBuf))
6602 {
6603 pStatus->fPIF = true;
6604 return true;
6605 }
6606 E1kLog2(("%s Packet filter: no perfect match\n", pThis->szPrf));
6607 if (e1kImperfectMatch(pThis, pvBuf))
6608 return true;
6609 E1kLog2(("%s Packet filter: no imperfect match\n", pThis->szPrf));
6610 }
6611 else {
6612 /* Is unicast promiscuous enabled? */
6613 if (RCTL & RCTL_UPE)
6614 return true;
6615 E1kLog2(("%s Packet filter: no promiscuous unicast\n", pThis->szPrf));
6616 if (e1kPerfectMatch(pThis, pvBuf))
6617 {
6618 pStatus->fPIF = true;
6619 return true;
6620 }
6621 E1kLog2(("%s Packet filter: no perfect match\n", pThis->szPrf));
6622 }
6623 E1kLog2(("%s Packet filter: packet discarded\n", pThis->szPrf));
6624 return false;
6625}
6626
6627/**
6628 * @interface_method_impl{PDMINETWORKDOWN,pfnReceive}
6629 */
6630static DECLCALLBACK(int) e1kR3NetworkDown_Receive(PPDMINETWORKDOWN pInterface, const void *pvBuf, size_t cb)
6631{
6632 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkDown);
6633 int rc = VINF_SUCCESS;
6634
6635 /*
6636 * Drop packets if the VM is not running yet/anymore.
6637 */
6638 VMSTATE enmVMState = PDMDevHlpVMState(STATE_TO_DEVINS(pThis));
6639 if ( enmVMState != VMSTATE_RUNNING
6640 && enmVMState != VMSTATE_RUNNING_LS)
6641 {
6642 E1kLog(("%s Dropping incoming packet as VM is not running.\n", pThis->szPrf));
6643 return VINF_SUCCESS;
6644 }
6645
6646 /* Discard incoming packets in locked state */
6647 if (!(RCTL & RCTL_EN) || pThis->fLocked || !(STATUS & STATUS_LU))
6648 {
6649 E1kLog(("%s Dropping incoming packet as receive operation is disabled.\n", pThis->szPrf));
6650 return VINF_SUCCESS;
6651 }
6652
6653 STAM_PROFILE_ADV_START(&pThis->StatReceive, a);
6654
6655 //if (!e1kCsEnter(pThis, RT_SRC_POS))
6656 // return VERR_PERMISSION_DENIED;
6657
6658 e1kPacketDump(pThis, (const uint8_t*)pvBuf, cb, "<-- Incoming");
6659
6660 /* Update stats */
6661 if (RT_LIKELY(e1kCsEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
6662 {
6663 E1K_INC_CNT32(TPR);
6664 E1K_ADD_CNT64(TORL, TORH, cb < 64? 64 : cb);
6665 e1kCsLeave(pThis);
6666 }
6667 STAM_PROFILE_ADV_START(&pThis->StatReceiveFilter, a);
6668 E1KRXDST status;
6669 RT_ZERO(status);
6670 bool fPassed = e1kAddressFilter(pThis, pvBuf, cb, &status);
6671 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveFilter, a);
6672 if (fPassed)
6673 {
6674 rc = e1kHandleRxPacket(pThis, pvBuf, cb, status);
6675 }
6676 //e1kCsLeave(pThis);
6677 STAM_PROFILE_ADV_STOP(&pThis->StatReceive, a);
6678
6679 return rc;
6680}
6681
6682
6683/* -=-=-=-=- PDMILEDPORTS -=-=-=-=- */
6684
6685/**
6686 * @interface_method_impl{PDMILEDPORTS,pfnQueryStatusLed}
6687 */
6688static DECLCALLBACK(int) e1kR3QueryStatusLed(PPDMILEDPORTS pInterface, unsigned iLUN, PPDMLED *ppLed)
6689{
6690 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, ILeds);
6691 int rc = VERR_PDM_LUN_NOT_FOUND;
6692
6693 if (iLUN == 0)
6694 {
6695 *ppLed = &pThis->led;
6696 rc = VINF_SUCCESS;
6697 }
6698 return rc;
6699}
6700
6701
6702/* -=-=-=-=- PDMINETWORKCONFIG -=-=-=-=- */
6703
6704/**
6705 * @interface_method_impl{PDMINETWORKCONFIG,pfnGetMac}
6706 */
6707static DECLCALLBACK(int) e1kR3GetMac(PPDMINETWORKCONFIG pInterface, PRTMAC pMac)
6708{
6709 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkConfig);
6710 pThis->eeprom.getMac(pMac);
6711 return VINF_SUCCESS;
6712}
6713
6714/**
6715 * @interface_method_impl{PDMINETWORKCONFIG,pfnGetLinkState}
6716 */
6717static DECLCALLBACK(PDMNETWORKLINKSTATE) e1kR3GetLinkState(PPDMINETWORKCONFIG pInterface)
6718{
6719 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkConfig);
6720 if (STATUS & STATUS_LU)
6721 return PDMNETWORKLINKSTATE_UP;
6722 return PDMNETWORKLINKSTATE_DOWN;
6723}
6724
6725/**
6726 * @interface_method_impl{PDMINETWORKCONFIG,pfnSetLinkState}
6727 */
6728static DECLCALLBACK(int) e1kR3SetLinkState(PPDMINETWORKCONFIG pInterface, PDMNETWORKLINKSTATE enmState)
6729{
6730 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkConfig);
6731
6732 E1kLog(("%s e1kR3SetLinkState: enmState=%d\n", pThis->szPrf, enmState));
6733 switch (enmState)
6734 {
6735 case PDMNETWORKLINKSTATE_UP:
6736 pThis->fCableConnected = true;
6737 /* If link was down, bring it up after a while. */
6738 if (!(STATUS & STATUS_LU))
6739 e1kBringLinkUpDelayed(pThis);
6740 break;
6741 case PDMNETWORKLINKSTATE_DOWN:
6742 pThis->fCableConnected = false;
6743 /* Always set the phy link state to down, regardless of the STATUS_LU bit.
6744 * We might have to set the link state before the driver initializes us. */
6745 Phy::setLinkStatus(&pThis->phy, false);
6746 /* If link was up, bring it down. */
6747 if (STATUS & STATUS_LU)
6748 e1kR3LinkDown(pThis);
6749 break;
6750 case PDMNETWORKLINKSTATE_DOWN_RESUME:
6751 /*
6752 * There is not much sense in bringing down the link if it has not come up yet.
6753 * If it is up though, we bring it down temporarely, then bring it up again.
6754 */
6755 if (STATUS & STATUS_LU)
6756 e1kR3LinkDownTemp(pThis);
6757 break;
6758 default:
6759 ;
6760 }
6761 return VINF_SUCCESS;
6762}
6763
6764
6765/* -=-=-=-=- PDMIBASE -=-=-=-=- */
6766
6767/**
6768 * @interface_method_impl{PDMIBASE,pfnQueryInterface}
6769 */
6770static DECLCALLBACK(void *) e1kR3QueryInterface(struct PDMIBASE *pInterface, const char *pszIID)
6771{
6772 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, IBase);
6773 Assert(&pThis->IBase == pInterface);
6774
6775 PDMIBASE_RETURN_INTERFACE(pszIID, PDMIBASE, &pThis->IBase);
6776 PDMIBASE_RETURN_INTERFACE(pszIID, PDMINETWORKDOWN, &pThis->INetworkDown);
6777 PDMIBASE_RETURN_INTERFACE(pszIID, PDMINETWORKCONFIG, &pThis->INetworkConfig);
6778 PDMIBASE_RETURN_INTERFACE(pszIID, PDMILEDPORTS, &pThis->ILeds);
6779 return NULL;
6780}
6781
6782
6783/* -=-=-=-=- Saved State -=-=-=-=- */
6784
6785/**
6786 * Saves the configuration.
6787 *
6788 * @param pThis The E1K state.
6789 * @param pSSM The handle to the saved state.
6790 */
6791static void e1kSaveConfig(PE1KSTATE pThis, PSSMHANDLE pSSM)
6792{
6793 SSMR3PutMem(pSSM, &pThis->macConfigured, sizeof(pThis->macConfigured));
6794 SSMR3PutU32(pSSM, pThis->eChip);
6795}
6796
6797/**
6798 * @callback_method_impl{FNSSMDEVLIVEEXEC,Save basic configuration.}
6799 */
6800static DECLCALLBACK(int) e1kLiveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uPass)
6801{
6802 RT_NOREF(uPass);
6803 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6804 e1kSaveConfig(pThis, pSSM);
6805 return VINF_SSM_DONT_CALL_AGAIN;
6806}
6807
6808/**
6809 * @callback_method_impl{FNSSMDEVSAVEPREP,Synchronize.}
6810 */
6811static DECLCALLBACK(int) e1kSavePrep(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6812{
6813 RT_NOREF(pSSM);
6814 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6815
6816 int rc = e1kCsEnter(pThis, VERR_SEM_BUSY);
6817 if (RT_UNLIKELY(rc != VINF_SUCCESS))
6818 return rc;
6819 e1kCsLeave(pThis);
6820 return VINF_SUCCESS;
6821#if 0
6822 /* 1) Prevent all threads from modifying the state and memory */
6823 //pThis->fLocked = true;
6824 /* 2) Cancel all timers */
6825#ifdef E1K_TX_DELAY
6826 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTXDTimer));
6827#endif /* E1K_TX_DELAY */
6828//#ifdef E1K_USE_TX_TIMERS
6829 if (pThis->fTidEnabled)
6830 {
6831 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTIDTimer));
6832#ifndef E1K_NO_TAD
6833 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTADTimer));
6834#endif /* E1K_NO_TAD */
6835 }
6836//#endif /* E1K_USE_TX_TIMERS */
6837#ifdef E1K_USE_RX_TIMERS
6838 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRIDTimer));
6839 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRADTimer));
6840#endif /* E1K_USE_RX_TIMERS */
6841 e1kCancelTimer(pThis, pThis->CTX_SUFF(pIntTimer));
6842 /* 3) Did I forget anything? */
6843 E1kLog(("%s Locked\n", pThis->szPrf));
6844 return VINF_SUCCESS;
6845#endif
6846}
6847
6848/**
6849 * @callback_method_impl{FNSSMDEVSAVEEXEC}
6850 */
6851static DECLCALLBACK(int) e1kSaveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6852{
6853 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6854
6855 e1kSaveConfig(pThis, pSSM);
6856 pThis->eeprom.save(pSSM);
6857 e1kDumpState(pThis);
6858 SSMR3PutMem(pSSM, pThis->auRegs, sizeof(pThis->auRegs));
6859 SSMR3PutBool(pSSM, pThis->fIntRaised);
6860 Phy::saveState(pSSM, &pThis->phy);
6861 SSMR3PutU32(pSSM, pThis->uSelectedReg);
6862 SSMR3PutMem(pSSM, pThis->auMTA, sizeof(pThis->auMTA));
6863 SSMR3PutMem(pSSM, &pThis->aRecAddr, sizeof(pThis->aRecAddr));
6864 SSMR3PutMem(pSSM, pThis->auVFTA, sizeof(pThis->auVFTA));
6865 SSMR3PutU64(pSSM, pThis->u64AckedAt);
6866 SSMR3PutU16(pSSM, pThis->u16RxBSize);
6867 //SSMR3PutBool(pSSM, pThis->fDelayInts);
6868 //SSMR3PutBool(pSSM, pThis->fIntMaskUsed);
6869 SSMR3PutU16(pSSM, pThis->u16TxPktLen);
6870/** @todo State wrt to the TSE buffer is incomplete, so little point in
6871 * saving this actually. */
6872 SSMR3PutMem(pSSM, pThis->aTxPacketFallback, pThis->u16TxPktLen);
6873 SSMR3PutBool(pSSM, pThis->fIPcsum);
6874 SSMR3PutBool(pSSM, pThis->fTCPcsum);
6875 SSMR3PutMem(pSSM, &pThis->contextTSE, sizeof(pThis->contextTSE));
6876 SSMR3PutMem(pSSM, &pThis->contextNormal, sizeof(pThis->contextNormal));
6877 SSMR3PutBool(pSSM, pThis->fVTag);
6878 SSMR3PutU16(pSSM, pThis->u16VTagTCI);
6879#ifdef E1K_WITH_TXD_CACHE
6880#if 0
6881 SSMR3PutU8(pSSM, pThis->nTxDFetched);
6882 SSMR3PutMem(pSSM, pThis->aTxDescriptors,
6883 pThis->nTxDFetched * sizeof(pThis->aTxDescriptors[0]));
6884#else
6885 /*
6886 * There is no point in storing TX descriptor cache entries as we can simply
6887 * fetch them again. Moreover, normally the cache is always empty when we
6888 * save the state. Store zero entries for compatibility.
6889 */
6890 SSMR3PutU8(pSSM, 0);
6891#endif
6892#endif /* E1K_WITH_TXD_CACHE */
6893/** @todo GSO requires some more state here. */
6894 E1kLog(("%s State has been saved\n", pThis->szPrf));
6895 return VINF_SUCCESS;
6896}
6897
6898#if 0
6899/**
6900 * @callback_method_impl{FNSSMDEVSAVEDONE}
6901 */
6902static DECLCALLBACK(int) e1kSaveDone(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6903{
6904 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6905
6906 /* If VM is being powered off unlocking will result in assertions in PGM */
6907 if (PDMDevHlpGetVM(pDevIns)->enmVMState == VMSTATE_RUNNING)
6908 pThis->fLocked = false;
6909 else
6910 E1kLog(("%s VM is not running -- remain locked\n", pThis->szPrf));
6911 E1kLog(("%s Unlocked\n", pThis->szPrf));
6912 return VINF_SUCCESS;
6913}
6914#endif
6915
6916/**
6917 * @callback_method_impl{FNSSMDEVLOADPREP,Synchronize.}
6918 */
6919static DECLCALLBACK(int) e1kLoadPrep(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6920{
6921 RT_NOREF(pSSM);
6922 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6923
6924 int rc = e1kCsEnter(pThis, VERR_SEM_BUSY);
6925 if (RT_UNLIKELY(rc != VINF_SUCCESS))
6926 return rc;
6927 e1kCsLeave(pThis);
6928 return VINF_SUCCESS;
6929}
6930
6931/**
6932 * @callback_method_impl{FNSSMDEVLOADEXEC}
6933 */
6934static DECLCALLBACK(int) e1kLoadExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
6935{
6936 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6937 int rc;
6938
6939 if ( uVersion != E1K_SAVEDSTATE_VERSION
6940#ifdef E1K_WITH_TXD_CACHE
6941 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG
6942#endif /* E1K_WITH_TXD_CACHE */
6943 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_41
6944 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_30)
6945 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
6946
6947 if ( uVersion > E1K_SAVEDSTATE_VERSION_VBOX_30
6948 || uPass != SSM_PASS_FINAL)
6949 {
6950 /* config checks */
6951 RTMAC macConfigured;
6952 rc = SSMR3GetMem(pSSM, &macConfigured, sizeof(macConfigured));
6953 AssertRCReturn(rc, rc);
6954 if ( memcmp(&macConfigured, &pThis->macConfigured, sizeof(macConfigured))
6955 && (uPass == 0 || !PDMDevHlpVMTeleportedAndNotFullyResumedYet(pDevIns)) )
6956 LogRel(("%s: The mac address differs: config=%RTmac saved=%RTmac\n", pThis->szPrf, &pThis->macConfigured, &macConfigured));
6957
6958 E1KCHIP eChip;
6959 rc = SSMR3GetU32(pSSM, &eChip);
6960 AssertRCReturn(rc, rc);
6961 if (eChip != pThis->eChip)
6962 return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("The chip type differs: config=%u saved=%u"), pThis->eChip, eChip);
6963 }
6964
6965 if (uPass == SSM_PASS_FINAL)
6966 {
6967 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_30)
6968 {
6969 rc = pThis->eeprom.load(pSSM);
6970 AssertRCReturn(rc, rc);
6971 }
6972 /* the state */
6973 SSMR3GetMem(pSSM, &pThis->auRegs, sizeof(pThis->auRegs));
6974 SSMR3GetBool(pSSM, &pThis->fIntRaised);
6975 /** @todo PHY could be made a separate device with its own versioning */
6976 Phy::loadState(pSSM, &pThis->phy);
6977 SSMR3GetU32(pSSM, &pThis->uSelectedReg);
6978 SSMR3GetMem(pSSM, &pThis->auMTA, sizeof(pThis->auMTA));
6979 SSMR3GetMem(pSSM, &pThis->aRecAddr, sizeof(pThis->aRecAddr));
6980 SSMR3GetMem(pSSM, &pThis->auVFTA, sizeof(pThis->auVFTA));
6981 SSMR3GetU64(pSSM, &pThis->u64AckedAt);
6982 SSMR3GetU16(pSSM, &pThis->u16RxBSize);
6983 //SSMR3GetBool(pSSM, pThis->fDelayInts);
6984 //SSMR3GetBool(pSSM, pThis->fIntMaskUsed);
6985 SSMR3GetU16(pSSM, &pThis->u16TxPktLen);
6986 if (pThis->u16TxPktLen > sizeof(pThis->aTxPacketFallback))
6987 pThis->u16TxPktLen = sizeof(pThis->aTxPacketFallback);
6988 SSMR3GetMem(pSSM, &pThis->aTxPacketFallback[0], pThis->u16TxPktLen);
6989 SSMR3GetBool(pSSM, &pThis->fIPcsum);
6990 SSMR3GetBool(pSSM, &pThis->fTCPcsum);
6991 SSMR3GetMem(pSSM, &pThis->contextTSE, sizeof(pThis->contextTSE));
6992 rc = SSMR3GetMem(pSSM, &pThis->contextNormal, sizeof(pThis->contextNormal));
6993 AssertRCReturn(rc, rc);
6994 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_41)
6995 {
6996 SSMR3GetBool(pSSM, &pThis->fVTag);
6997 rc = SSMR3GetU16(pSSM, &pThis->u16VTagTCI);
6998 AssertRCReturn(rc, rc);
6999 }
7000 else
7001 {
7002 pThis->fVTag = false;
7003 pThis->u16VTagTCI = 0;
7004 }
7005#ifdef E1K_WITH_TXD_CACHE
7006 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG)
7007 {
7008 rc = SSMR3GetU8(pSSM, &pThis->nTxDFetched);
7009 AssertRCReturn(rc, rc);
7010 if (pThis->nTxDFetched)
7011 SSMR3GetMem(pSSM, pThis->aTxDescriptors,
7012 pThis->nTxDFetched * sizeof(pThis->aTxDescriptors[0]));
7013 }
7014 else
7015 pThis->nTxDFetched = 0;
7016 /*
7017 * @todo: Perhaps we should not store TXD cache as the entries can be
7018 * simply fetched again from guest's memory. Or can't they?
7019 */
7020#endif /* E1K_WITH_TXD_CACHE */
7021#ifdef E1K_WITH_RXD_CACHE
7022 /*
7023 * There is no point in storing the RX descriptor cache in the saved
7024 * state, we just need to make sure it is empty.
7025 */
7026 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
7027#endif /* E1K_WITH_RXD_CACHE */
7028 /* derived state */
7029 e1kSetupGsoCtx(&pThis->GsoCtx, &pThis->contextTSE);
7030
7031 E1kLog(("%s State has been restored\n", pThis->szPrf));
7032 e1kDumpState(pThis);
7033 }
7034 return VINF_SUCCESS;
7035}
7036
7037/**
7038 * @callback_method_impl{FNSSMDEVLOADDONE, Link status adjustments after loading.}
7039 */
7040static DECLCALLBACK(int) e1kLoadDone(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
7041{
7042 RT_NOREF(pSSM);
7043 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7044
7045 /* Update promiscuous mode */
7046 if (pThis->pDrvR3)
7047 pThis->pDrvR3->pfnSetPromiscuousMode(pThis->pDrvR3,
7048 !!(RCTL & (RCTL_UPE | RCTL_MPE)));
7049
7050 /*
7051 * Force the link down here, since PDMNETWORKLINKSTATE_DOWN_RESUME is never
7052 * passed to us. We go through all this stuff if the link was up and we
7053 * wasn't teleported.
7054 */
7055 if ( (STATUS & STATUS_LU)
7056 && !PDMDevHlpVMTeleportedAndNotFullyResumedYet(pDevIns)
7057 && pThis->cMsLinkUpDelay)
7058 {
7059 e1kR3LinkDownTemp(pThis);
7060 }
7061 return VINF_SUCCESS;
7062}
7063
7064
7065
7066/* -=-=-=-=- Debug Info + Log Types -=-=-=-=- */
7067
7068/**
7069 * @callback_method_impl{FNRTSTRFORMATTYPE}
7070 */
7071static DECLCALLBACK(size_t) e1kFmtRxDesc(PFNRTSTROUTPUT pfnOutput,
7072 void *pvArgOutput,
7073 const char *pszType,
7074 void const *pvValue,
7075 int cchWidth,
7076 int cchPrecision,
7077 unsigned fFlags,
7078 void *pvUser)
7079{
7080 RT_NOREF(cchWidth, cchPrecision, fFlags, pvUser);
7081 AssertReturn(strcmp(pszType, "e1krxd") == 0, 0);
7082 E1KRXDESC* pDesc = (E1KRXDESC*)pvValue;
7083 if (!pDesc)
7084 return RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "NULL_RXD");
7085
7086 size_t cbPrintf = 0;
7087 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Address=%16LX Length=%04X Csum=%04X\n",
7088 pDesc->u64BufAddr, pDesc->u16Length, pDesc->u16Checksum);
7089 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, " STA: %s %s %s %s %s %s %s ERR: %s %s %s %s SPECIAL: %s VLAN=%03x PRI=%x",
7090 pDesc->status.fPIF ? "PIF" : "pif",
7091 pDesc->status.fIPCS ? "IPCS" : "ipcs",
7092 pDesc->status.fTCPCS ? "TCPCS" : "tcpcs",
7093 pDesc->status.fVP ? "VP" : "vp",
7094 pDesc->status.fIXSM ? "IXSM" : "ixsm",
7095 pDesc->status.fEOP ? "EOP" : "eop",
7096 pDesc->status.fDD ? "DD" : "dd",
7097 pDesc->status.fRXE ? "RXE" : "rxe",
7098 pDesc->status.fIPE ? "IPE" : "ipe",
7099 pDesc->status.fTCPE ? "TCPE" : "tcpe",
7100 pDesc->status.fCE ? "CE" : "ce",
7101 E1K_SPEC_CFI(pDesc->status.u16Special) ? "CFI" :"cfi",
7102 E1K_SPEC_VLAN(pDesc->status.u16Special),
7103 E1K_SPEC_PRI(pDesc->status.u16Special));
7104 return cbPrintf;
7105}
7106
7107/**
7108 * @callback_method_impl{FNRTSTRFORMATTYPE}
7109 */
7110static DECLCALLBACK(size_t) e1kFmtTxDesc(PFNRTSTROUTPUT pfnOutput,
7111 void *pvArgOutput,
7112 const char *pszType,
7113 void const *pvValue,
7114 int cchWidth,
7115 int cchPrecision,
7116 unsigned fFlags,
7117 void *pvUser)
7118{
7119 RT_NOREF(cchWidth, cchPrecision, fFlags, pvUser);
7120 AssertReturn(strcmp(pszType, "e1ktxd") == 0, 0);
7121 E1KTXDESC *pDesc = (E1KTXDESC*)pvValue;
7122 if (!pDesc)
7123 return RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "NULL_TXD");
7124
7125 size_t cbPrintf = 0;
7126 switch (e1kGetDescType(pDesc))
7127 {
7128 case E1K_DTYP_CONTEXT:
7129 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Type=Context\n"
7130 " IPCSS=%02X IPCSO=%02X IPCSE=%04X TUCSS=%02X TUCSO=%02X TUCSE=%04X\n"
7131 " TUCMD:%s%s%s %s %s PAYLEN=%04x HDRLEN=%04x MSS=%04x STA: %s",
7132 pDesc->context.ip.u8CSS, pDesc->context.ip.u8CSO, pDesc->context.ip.u16CSE,
7133 pDesc->context.tu.u8CSS, pDesc->context.tu.u8CSO, pDesc->context.tu.u16CSE,
7134 pDesc->context.dw2.fIDE ? " IDE":"",
7135 pDesc->context.dw2.fRS ? " RS" :"",
7136 pDesc->context.dw2.fTSE ? " TSE":"",
7137 pDesc->context.dw2.fIP ? "IPv4":"IPv6",
7138 pDesc->context.dw2.fTCP ? "TCP":"UDP",
7139 pDesc->context.dw2.u20PAYLEN,
7140 pDesc->context.dw3.u8HDRLEN,
7141 pDesc->context.dw3.u16MSS,
7142 pDesc->context.dw3.fDD?"DD":"");
7143 break;
7144 case E1K_DTYP_DATA:
7145 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Type=Data Address=%16LX DTALEN=%05X\n"
7146 " DCMD:%s%s%s%s%s%s%s STA:%s%s%s POPTS:%s%s SPECIAL:%s VLAN=%03x PRI=%x",
7147 pDesc->data.u64BufAddr,
7148 pDesc->data.cmd.u20DTALEN,
7149 pDesc->data.cmd.fIDE ? " IDE" :"",
7150 pDesc->data.cmd.fVLE ? " VLE" :"",
7151 pDesc->data.cmd.fRPS ? " RPS" :"",
7152 pDesc->data.cmd.fRS ? " RS" :"",
7153 pDesc->data.cmd.fTSE ? " TSE" :"",
7154 pDesc->data.cmd.fIFCS? " IFCS":"",
7155 pDesc->data.cmd.fEOP ? " EOP" :"",
7156 pDesc->data.dw3.fDD ? " DD" :"",
7157 pDesc->data.dw3.fEC ? " EC" :"",
7158 pDesc->data.dw3.fLC ? " LC" :"",
7159 pDesc->data.dw3.fTXSM? " TXSM":"",
7160 pDesc->data.dw3.fIXSM? " IXSM":"",
7161 E1K_SPEC_CFI(pDesc->data.dw3.u16Special) ? "CFI" :"cfi",
7162 E1K_SPEC_VLAN(pDesc->data.dw3.u16Special),
7163 E1K_SPEC_PRI(pDesc->data.dw3.u16Special));
7164 break;
7165 case E1K_DTYP_LEGACY:
7166 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Type=Legacy Address=%16LX DTALEN=%05X\n"
7167 " CMD:%s%s%s%s%s%s%s STA:%s%s%s CSO=%02x CSS=%02x SPECIAL:%s VLAN=%03x PRI=%x",
7168 pDesc->data.u64BufAddr,
7169 pDesc->legacy.cmd.u16Length,
7170 pDesc->legacy.cmd.fIDE ? " IDE" :"",
7171 pDesc->legacy.cmd.fVLE ? " VLE" :"",
7172 pDesc->legacy.cmd.fRPS ? " RPS" :"",
7173 pDesc->legacy.cmd.fRS ? " RS" :"",
7174 pDesc->legacy.cmd.fIC ? " IC" :"",
7175 pDesc->legacy.cmd.fIFCS? " IFCS":"",
7176 pDesc->legacy.cmd.fEOP ? " EOP" :"",
7177 pDesc->legacy.dw3.fDD ? " DD" :"",
7178 pDesc->legacy.dw3.fEC ? " EC" :"",
7179 pDesc->legacy.dw3.fLC ? " LC" :"",
7180 pDesc->legacy.cmd.u8CSO,
7181 pDesc->legacy.dw3.u8CSS,
7182 E1K_SPEC_CFI(pDesc->legacy.dw3.u16Special) ? "CFI" :"cfi",
7183 E1K_SPEC_VLAN(pDesc->legacy.dw3.u16Special),
7184 E1K_SPEC_PRI(pDesc->legacy.dw3.u16Special));
7185 break;
7186 default:
7187 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Invalid Transmit Descriptor");
7188 break;
7189 }
7190
7191 return cbPrintf;
7192}
7193
7194/** Initializes debug helpers (logging format types). */
7195static int e1kInitDebugHelpers(void)
7196{
7197 int rc = VINF_SUCCESS;
7198 static bool s_fHelpersRegistered = false;
7199 if (!s_fHelpersRegistered)
7200 {
7201 s_fHelpersRegistered = true;
7202 rc = RTStrFormatTypeRegister("e1krxd", e1kFmtRxDesc, NULL);
7203 AssertRCReturn(rc, rc);
7204 rc = RTStrFormatTypeRegister("e1ktxd", e1kFmtTxDesc, NULL);
7205 AssertRCReturn(rc, rc);
7206 }
7207 return rc;
7208}
7209
7210/**
7211 * Status info callback.
7212 *
7213 * @param pDevIns The device instance.
7214 * @param pHlp The output helpers.
7215 * @param pszArgs The arguments.
7216 */
7217static DECLCALLBACK(void) e1kInfo(PPDMDEVINS pDevIns, PCDBGFINFOHLP pHlp, const char *pszArgs)
7218{
7219 RT_NOREF(pszArgs);
7220 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7221 unsigned i;
7222 // bool fRcvRing = false;
7223 // bool fXmtRing = false;
7224
7225 /*
7226 * Parse args.
7227 if (pszArgs)
7228 {
7229 fRcvRing = strstr(pszArgs, "verbose") || strstr(pszArgs, "rcv");
7230 fXmtRing = strstr(pszArgs, "verbose") || strstr(pszArgs, "xmt");
7231 }
7232 */
7233
7234 /*
7235 * Show info.
7236 */
7237 pHlp->pfnPrintf(pHlp, "E1000 #%d: port=%RTiop mmio=%RGp mac-cfg=%RTmac %s%s%s\n",
7238 pDevIns->iInstance, pThis->IOPortBase, pThis->addrMMReg,
7239 &pThis->macConfigured, g_aChips[pThis->eChip].pcszName,
7240 pThis->fRCEnabled ? " GC" : "", pThis->fR0Enabled ? " R0" : "");
7241
7242 e1kCsEnter(pThis, VERR_INTERNAL_ERROR); /* Not sure why but PCNet does it */
7243
7244 for (i = 0; i < E1K_NUM_OF_32BIT_REGS; ++i)
7245 pHlp->pfnPrintf(pHlp, "%8.8s = %08x\n", g_aE1kRegMap[i].abbrev, pThis->auRegs[i]);
7246
7247 for (i = 0; i < RT_ELEMENTS(pThis->aRecAddr.array); i++)
7248 {
7249 E1KRAELEM* ra = pThis->aRecAddr.array + i;
7250 if (ra->ctl & RA_CTL_AV)
7251 {
7252 const char *pcszTmp;
7253 switch (ra->ctl & RA_CTL_AS)
7254 {
7255 case 0: pcszTmp = "DST"; break;
7256 case 1: pcszTmp = "SRC"; break;
7257 default: pcszTmp = "reserved";
7258 }
7259 pHlp->pfnPrintf(pHlp, "RA%02d: %s %RTmac\n", i, pcszTmp, ra->addr);
7260 }
7261 }
7262 unsigned cDescs = RDLEN / sizeof(E1KRXDESC);
7263 uint32_t rdh = RDH;
7264 pHlp->pfnPrintf(pHlp, "\n-- Receive Descriptors (%d total) --\n", cDescs);
7265 for (i = 0; i < cDescs; ++i)
7266 {
7267 E1KRXDESC desc;
7268 PDMDevHlpPhysRead(pDevIns, e1kDescAddr(RDBAH, RDBAL, i),
7269 &desc, sizeof(desc));
7270 if (i == rdh)
7271 pHlp->pfnPrintf(pHlp, ">>> ");
7272 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1krxd]\n", e1kDescAddr(RDBAH, RDBAL, i), &desc);
7273 }
7274#ifdef E1K_WITH_RXD_CACHE
7275 pHlp->pfnPrintf(pHlp, "\n-- Receive Descriptors in Cache (at %d (RDH %d)/ fetched %d / max %d) --\n",
7276 pThis->iRxDCurrent, RDH, pThis->nRxDFetched, E1K_RXD_CACHE_SIZE);
7277 if (rdh > pThis->iRxDCurrent)
7278 rdh -= pThis->iRxDCurrent;
7279 else
7280 rdh = cDescs + rdh - pThis->iRxDCurrent;
7281 for (i = 0; i < pThis->nRxDFetched; ++i)
7282 {
7283 if (i == pThis->iRxDCurrent)
7284 pHlp->pfnPrintf(pHlp, ">>> ");
7285 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1krxd]\n",
7286 e1kDescAddr(RDBAH, RDBAL, rdh++ % cDescs),
7287 &pThis->aRxDescriptors[i]);
7288 }
7289#endif /* E1K_WITH_RXD_CACHE */
7290
7291 cDescs = TDLEN / sizeof(E1KTXDESC);
7292 uint32_t tdh = TDH;
7293 pHlp->pfnPrintf(pHlp, "\n-- Transmit Descriptors (%d total) --\n", cDescs);
7294 for (i = 0; i < cDescs; ++i)
7295 {
7296 E1KTXDESC desc;
7297 PDMDevHlpPhysRead(pDevIns, e1kDescAddr(TDBAH, TDBAL, i),
7298 &desc, sizeof(desc));
7299 if (i == tdh)
7300 pHlp->pfnPrintf(pHlp, ">>> ");
7301 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1ktxd]\n", e1kDescAddr(TDBAH, TDBAL, i), &desc);
7302 }
7303#ifdef E1K_WITH_TXD_CACHE
7304 pHlp->pfnPrintf(pHlp, "\n-- Transmit Descriptors in Cache (at %d (TDH %d)/ fetched %d / max %d) --\n",
7305 pThis->iTxDCurrent, TDH, pThis->nTxDFetched, E1K_TXD_CACHE_SIZE);
7306 if (tdh > pThis->iTxDCurrent)
7307 tdh -= pThis->iTxDCurrent;
7308 else
7309 tdh = cDescs + tdh - pThis->iTxDCurrent;
7310 for (i = 0; i < pThis->nTxDFetched; ++i)
7311 {
7312 if (i == pThis->iTxDCurrent)
7313 pHlp->pfnPrintf(pHlp, ">>> ");
7314 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1ktxd]\n",
7315 e1kDescAddr(TDBAH, TDBAL, tdh++ % cDescs),
7316 &pThis->aTxDescriptors[i]);
7317 }
7318#endif /* E1K_WITH_TXD_CACHE */
7319
7320
7321#ifdef E1K_INT_STATS
7322 pHlp->pfnPrintf(pHlp, "Interrupt attempts: %d\n", pThis->uStatIntTry);
7323 pHlp->pfnPrintf(pHlp, "Interrupts raised : %d\n", pThis->uStatInt);
7324 pHlp->pfnPrintf(pHlp, "Interrupts lowered: %d\n", pThis->uStatIntLower);
7325 pHlp->pfnPrintf(pHlp, "ICR outside ISR : %d\n", pThis->uStatNoIntICR);
7326 pHlp->pfnPrintf(pHlp, "IMS raised ints : %d\n", pThis->uStatIntIMS);
7327 pHlp->pfnPrintf(pHlp, "Interrupts skipped: %d\n", pThis->uStatIntSkip);
7328 pHlp->pfnPrintf(pHlp, "Masked interrupts : %d\n", pThis->uStatIntMasked);
7329 pHlp->pfnPrintf(pHlp, "Early interrupts : %d\n", pThis->uStatIntEarly);
7330 pHlp->pfnPrintf(pHlp, "Late interrupts : %d\n", pThis->uStatIntLate);
7331 pHlp->pfnPrintf(pHlp, "Lost interrupts : %d\n", pThis->iStatIntLost);
7332 pHlp->pfnPrintf(pHlp, "Interrupts by RX : %d\n", pThis->uStatIntRx);
7333 pHlp->pfnPrintf(pHlp, "Interrupts by TX : %d\n", pThis->uStatIntTx);
7334 pHlp->pfnPrintf(pHlp, "Interrupts by ICS : %d\n", pThis->uStatIntICS);
7335 pHlp->pfnPrintf(pHlp, "Interrupts by RDTR: %d\n", pThis->uStatIntRDTR);
7336 pHlp->pfnPrintf(pHlp, "Interrupts by RDMT: %d\n", pThis->uStatIntRXDMT0);
7337 pHlp->pfnPrintf(pHlp, "Interrupts by TXQE: %d\n", pThis->uStatIntTXQE);
7338 pHlp->pfnPrintf(pHlp, "TX int delay asked: %d\n", pThis->uStatTxIDE);
7339 pHlp->pfnPrintf(pHlp, "TX delayed: %d\n", pThis->uStatTxDelayed);
7340 pHlp->pfnPrintf(pHlp, "TX delayed expired: %d\n", pThis->uStatTxDelayExp);
7341 pHlp->pfnPrintf(pHlp, "TX no report asked: %d\n", pThis->uStatTxNoRS);
7342 pHlp->pfnPrintf(pHlp, "TX abs timer expd : %d\n", pThis->uStatTAD);
7343 pHlp->pfnPrintf(pHlp, "TX int timer expd : %d\n", pThis->uStatTID);
7344 pHlp->pfnPrintf(pHlp, "RX abs timer expd : %d\n", pThis->uStatRAD);
7345 pHlp->pfnPrintf(pHlp, "RX int timer expd : %d\n", pThis->uStatRID);
7346 pHlp->pfnPrintf(pHlp, "TX CTX descriptors: %d\n", pThis->uStatDescCtx);
7347 pHlp->pfnPrintf(pHlp, "TX DAT descriptors: %d\n", pThis->uStatDescDat);
7348 pHlp->pfnPrintf(pHlp, "TX LEG descriptors: %d\n", pThis->uStatDescLeg);
7349 pHlp->pfnPrintf(pHlp, "Received frames : %d\n", pThis->uStatRxFrm);
7350 pHlp->pfnPrintf(pHlp, "Transmitted frames: %d\n", pThis->uStatTxFrm);
7351 pHlp->pfnPrintf(pHlp, "TX frames up to 1514: %d\n", pThis->uStatTx1514);
7352 pHlp->pfnPrintf(pHlp, "TX frames up to 2962: %d\n", pThis->uStatTx2962);
7353 pHlp->pfnPrintf(pHlp, "TX frames up to 4410: %d\n", pThis->uStatTx4410);
7354 pHlp->pfnPrintf(pHlp, "TX frames up to 5858: %d\n", pThis->uStatTx5858);
7355 pHlp->pfnPrintf(pHlp, "TX frames up to 7306: %d\n", pThis->uStatTx7306);
7356 pHlp->pfnPrintf(pHlp, "TX frames up to 8754: %d\n", pThis->uStatTx8754);
7357 pHlp->pfnPrintf(pHlp, "TX frames up to 16384: %d\n", pThis->uStatTx16384);
7358 pHlp->pfnPrintf(pHlp, "TX frames up to 32768: %d\n", pThis->uStatTx32768);
7359 pHlp->pfnPrintf(pHlp, "Larger TX frames : %d\n", pThis->uStatTxLarge);
7360#endif /* E1K_INT_STATS */
7361
7362 e1kCsLeave(pThis);
7363}
7364
7365
7366
7367/* -=-=-=-=- PDMDEVREG -=-=-=-=- */
7368
7369/**
7370 * Detach notification.
7371 *
7372 * One port on the network card has been disconnected from the network.
7373 *
7374 * @param pDevIns The device instance.
7375 * @param iLUN The logical unit which is being detached.
7376 * @param fFlags Flags, combination of the PDMDEVATT_FLAGS_* \#defines.
7377 */
7378static DECLCALLBACK(void) e1kR3Detach(PPDMDEVINS pDevIns, unsigned iLUN, uint32_t fFlags)
7379{
7380 RT_NOREF(fFlags);
7381 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7382 Log(("%s e1kR3Detach:\n", pThis->szPrf));
7383
7384 AssertLogRelReturnVoid(iLUN == 0);
7385
7386 PDMCritSectEnter(&pThis->cs, VERR_SEM_BUSY);
7387
7388 /** @todo r=pritesh still need to check if i missed
7389 * to clean something in this function
7390 */
7391
7392 /*
7393 * Zero some important members.
7394 */
7395 pThis->pDrvBase = NULL;
7396 pThis->pDrvR3 = NULL;
7397 pThis->pDrvR0 = NIL_RTR0PTR;
7398 pThis->pDrvRC = NIL_RTRCPTR;
7399
7400 PDMCritSectLeave(&pThis->cs);
7401}
7402
7403/**
7404 * Attach the Network attachment.
7405 *
7406 * One port on the network card has been connected to a network.
7407 *
7408 * @returns VBox status code.
7409 * @param pDevIns The device instance.
7410 * @param iLUN The logical unit which is being attached.
7411 * @param fFlags Flags, combination of the PDMDEVATT_FLAGS_* \#defines.
7412 *
7413 * @remarks This code path is not used during construction.
7414 */
7415static DECLCALLBACK(int) e1kR3Attach(PPDMDEVINS pDevIns, unsigned iLUN, uint32_t fFlags)
7416{
7417 RT_NOREF(fFlags);
7418 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7419 LogFlow(("%s e1kR3Attach:\n", pThis->szPrf));
7420
7421 AssertLogRelReturn(iLUN == 0, VERR_PDM_NO_SUCH_LUN);
7422
7423 PDMCritSectEnter(&pThis->cs, VERR_SEM_BUSY);
7424
7425 /*
7426 * Attach the driver.
7427 */
7428 int rc = PDMDevHlpDriverAttach(pDevIns, 0, &pThis->IBase, &pThis->pDrvBase, "Network Port");
7429 if (RT_SUCCESS(rc))
7430 {
7431 if (rc == VINF_NAT_DNS)
7432 {
7433#ifdef RT_OS_LINUX
7434 PDMDevHlpVMSetRuntimeError(pDevIns, 0 /*fFlags*/, "NoDNSforNAT",
7435 N_("A Domain Name Server (DNS) for NAT networking could not be determined. Please check your /etc/resolv.conf for <tt>nameserver</tt> entries. Either add one manually (<i>man resolv.conf</i>) or ensure that your host is correctly connected to an ISP. If you ignore this warning the guest will not be able to perform nameserver lookups and it will probably observe delays if trying so"));
7436#else
7437 PDMDevHlpVMSetRuntimeError(pDevIns, 0 /*fFlags*/, "NoDNSforNAT",
7438 N_("A Domain Name Server (DNS) for NAT networking could not be determined. Ensure that your host is correctly connected to an ISP. If you ignore this warning the guest will not be able to perform nameserver lookups and it will probably observe delays if trying so"));
7439#endif
7440 }
7441 pThis->pDrvR3 = PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMINETWORKUP);
7442 AssertMsgStmt(pThis->pDrvR3, ("Failed to obtain the PDMINETWORKUP interface!\n"),
7443 rc = VERR_PDM_MISSING_INTERFACE_BELOW);
7444 if (RT_SUCCESS(rc))
7445 {
7446 PPDMIBASER0 pBaseR0 = PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMIBASER0);
7447 pThis->pDrvR0 = pBaseR0 ? pBaseR0->pfnQueryInterface(pBaseR0, PDMINETWORKUP_IID) : NIL_RTR0PTR;
7448
7449 PPDMIBASERC pBaseRC = PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMIBASERC);
7450 pThis->pDrvRC = pBaseRC ? pBaseRC->pfnQueryInterface(pBaseRC, PDMINETWORKUP_IID) : NIL_RTR0PTR;
7451 }
7452 }
7453 else if ( rc == VERR_PDM_NO_ATTACHED_DRIVER
7454 || rc == VERR_PDM_CFG_MISSING_DRIVER_NAME)
7455 {
7456 /* This should never happen because this function is not called
7457 * if there is no driver to attach! */
7458 Log(("%s No attached driver!\n", pThis->szPrf));
7459 }
7460
7461 /*
7462 * Temporary set the link down if it was up so that the guest
7463 * will know that we have change the configuration of the
7464 * network card
7465 */
7466 if ((STATUS & STATUS_LU) && RT_SUCCESS(rc))
7467 e1kR3LinkDownTemp(pThis);
7468
7469 PDMCritSectLeave(&pThis->cs);
7470 return rc;
7471
7472}
7473
7474/**
7475 * @copydoc FNPDMDEVPOWEROFF
7476 */
7477static DECLCALLBACK(void) e1kR3PowerOff(PPDMDEVINS pDevIns)
7478{
7479 /* Poke thread waiting for buffer space. */
7480 e1kWakeupReceive(pDevIns);
7481}
7482
7483/**
7484 * @copydoc FNPDMDEVRESET
7485 */
7486static DECLCALLBACK(void) e1kR3Reset(PPDMDEVINS pDevIns)
7487{
7488 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7489#ifdef E1K_TX_DELAY
7490 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTXDTimer));
7491#endif /* E1K_TX_DELAY */
7492 e1kCancelTimer(pThis, pThis->CTX_SUFF(pIntTimer));
7493 e1kCancelTimer(pThis, pThis->CTX_SUFF(pLUTimer));
7494 e1kXmitFreeBuf(pThis);
7495 pThis->u16TxPktLen = 0;
7496 pThis->fIPcsum = false;
7497 pThis->fTCPcsum = false;
7498 pThis->fIntMaskUsed = false;
7499 pThis->fDelayInts = false;
7500 pThis->fLocked = false;
7501 pThis->u64AckedAt = 0;
7502 e1kHardReset(pThis);
7503}
7504
7505/**
7506 * @copydoc FNPDMDEVSUSPEND
7507 */
7508static DECLCALLBACK(void) e1kR3Suspend(PPDMDEVINS pDevIns)
7509{
7510 /* Poke thread waiting for buffer space. */
7511 e1kWakeupReceive(pDevIns);
7512}
7513
7514/**
7515 * Device relocation callback.
7516 *
7517 * When this callback is called the device instance data, and if the
7518 * device have a GC component, is being relocated, or/and the selectors
7519 * have been changed. The device must use the chance to perform the
7520 * necessary pointer relocations and data updates.
7521 *
7522 * Before the GC code is executed the first time, this function will be
7523 * called with a 0 delta so GC pointer calculations can be one in one place.
7524 *
7525 * @param pDevIns Pointer to the device instance.
7526 * @param offDelta The relocation delta relative to the old location.
7527 *
7528 * @remark A relocation CANNOT fail.
7529 */
7530static DECLCALLBACK(void) e1kR3Relocate(PPDMDEVINS pDevIns, RTGCINTPTR offDelta)
7531{
7532 RT_NOREF(offDelta);
7533 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7534 pThis->pDevInsRC = PDMDEVINS_2_RCPTR(pDevIns);
7535 pThis->pTxQueueRC = PDMQueueRCPtr(pThis->pTxQueueR3);
7536 pThis->pCanRxQueueRC = PDMQueueRCPtr(pThis->pCanRxQueueR3);
7537#ifdef E1K_USE_RX_TIMERS
7538 pThis->pRIDTimerRC = TMTimerRCPtr(pThis->pRIDTimerR3);
7539 pThis->pRADTimerRC = TMTimerRCPtr(pThis->pRADTimerR3);
7540#endif /* E1K_USE_RX_TIMERS */
7541//#ifdef E1K_USE_TX_TIMERS
7542 if (pThis->fTidEnabled)
7543 {
7544 pThis->pTIDTimerRC = TMTimerRCPtr(pThis->pTIDTimerR3);
7545# ifndef E1K_NO_TAD
7546 pThis->pTADTimerRC = TMTimerRCPtr(pThis->pTADTimerR3);
7547# endif /* E1K_NO_TAD */
7548 }
7549//#endif /* E1K_USE_TX_TIMERS */
7550#ifdef E1K_TX_DELAY
7551 pThis->pTXDTimerRC = TMTimerRCPtr(pThis->pTXDTimerR3);
7552#endif /* E1K_TX_DELAY */
7553 pThis->pIntTimerRC = TMTimerRCPtr(pThis->pIntTimerR3);
7554 pThis->pLUTimerRC = TMTimerRCPtr(pThis->pLUTimerR3);
7555}
7556
7557/**
7558 * Destruct a device instance.
7559 *
7560 * We need to free non-VM resources only.
7561 *
7562 * @returns VBox status code.
7563 * @param pDevIns The device instance data.
7564 * @thread EMT
7565 */
7566static DECLCALLBACK(int) e1kR3Destruct(PPDMDEVINS pDevIns)
7567{
7568 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7569 PDMDEV_CHECK_VERSIONS_RETURN_QUIET(pDevIns);
7570
7571 e1kDumpState(pThis);
7572 E1kLog(("%s Destroying instance\n", pThis->szPrf));
7573 if (PDMCritSectIsInitialized(&pThis->cs))
7574 {
7575 if (pThis->hEventMoreRxDescAvail != NIL_RTSEMEVENT)
7576 {
7577 RTSemEventSignal(pThis->hEventMoreRxDescAvail);
7578 RTSemEventDestroy(pThis->hEventMoreRxDescAvail);
7579 pThis->hEventMoreRxDescAvail = NIL_RTSEMEVENT;
7580 }
7581#ifdef E1K_WITH_TX_CS
7582 PDMR3CritSectDelete(&pThis->csTx);
7583#endif /* E1K_WITH_TX_CS */
7584 PDMR3CritSectDelete(&pThis->csRx);
7585 PDMR3CritSectDelete(&pThis->cs);
7586 }
7587 return VINF_SUCCESS;
7588}
7589
7590
7591/**
7592 * Set PCI configuration space registers.
7593 *
7594 * @param pci Reference to PCI device structure.
7595 * @thread EMT
7596 */
7597static DECLCALLBACK(void) e1kConfigurePciDev(PPDMPCIDEV pPciDev, E1KCHIP eChip)
7598{
7599 Assert(eChip < RT_ELEMENTS(g_aChips));
7600 /* Configure PCI Device, assume 32-bit mode ******************************/
7601 PCIDevSetVendorId(pPciDev, g_aChips[eChip].uPCIVendorId);
7602 PCIDevSetDeviceId(pPciDev, g_aChips[eChip].uPCIDeviceId);
7603 PCIDevSetWord( pPciDev, VBOX_PCI_SUBSYSTEM_VENDOR_ID, g_aChips[eChip].uPCISubsystemVendorId);
7604 PCIDevSetWord( pPciDev, VBOX_PCI_SUBSYSTEM_ID, g_aChips[eChip].uPCISubsystemId);
7605
7606 PCIDevSetWord( pPciDev, VBOX_PCI_COMMAND, 0x0000);
7607 /* DEVSEL Timing (medium device), 66 MHz Capable, New capabilities */
7608 PCIDevSetWord( pPciDev, VBOX_PCI_STATUS,
7609 VBOX_PCI_STATUS_DEVSEL_MEDIUM | VBOX_PCI_STATUS_CAP_LIST | VBOX_PCI_STATUS_66MHZ);
7610 /* Stepping A2 */
7611 PCIDevSetByte( pPciDev, VBOX_PCI_REVISION_ID, 0x02);
7612 /* Ethernet adapter */
7613 PCIDevSetByte( pPciDev, VBOX_PCI_CLASS_PROG, 0x00);
7614 PCIDevSetWord( pPciDev, VBOX_PCI_CLASS_DEVICE, 0x0200);
7615 /* normal single function Ethernet controller */
7616 PCIDevSetByte( pPciDev, VBOX_PCI_HEADER_TYPE, 0x00);
7617 /* Memory Register Base Address */
7618 PCIDevSetDWord(pPciDev, VBOX_PCI_BASE_ADDRESS_0, 0x00000000);
7619 /* Memory Flash Base Address */
7620 PCIDevSetDWord(pPciDev, VBOX_PCI_BASE_ADDRESS_1, 0x00000000);
7621 /* IO Register Base Address */
7622 PCIDevSetDWord(pPciDev, VBOX_PCI_BASE_ADDRESS_2, 0x00000001);
7623 /* Expansion ROM Base Address */
7624 PCIDevSetDWord(pPciDev, VBOX_PCI_ROM_ADDRESS, 0x00000000);
7625 /* Capabilities Pointer */
7626 PCIDevSetByte( pPciDev, VBOX_PCI_CAPABILITY_LIST, 0xDC);
7627 /* Interrupt Pin: INTA# */
7628 PCIDevSetByte( pPciDev, VBOX_PCI_INTERRUPT_PIN, 0x01);
7629 /* Max_Lat/Min_Gnt: very high priority and time slice */
7630 PCIDevSetByte( pPciDev, VBOX_PCI_MIN_GNT, 0xFF);
7631 PCIDevSetByte( pPciDev, VBOX_PCI_MAX_LAT, 0x00);
7632
7633 /* PCI Power Management Registers ****************************************/
7634 /* Capability ID: PCI Power Management Registers */
7635 PCIDevSetByte( pPciDev, 0xDC, VBOX_PCI_CAP_ID_PM);
7636 /* Next Item Pointer: PCI-X */
7637 PCIDevSetByte( pPciDev, 0xDC + 1, 0xE4);
7638 /* Power Management Capabilities: PM disabled, DSI */
7639 PCIDevSetWord( pPciDev, 0xDC + 2,
7640 0x0002 | VBOX_PCI_PM_CAP_DSI);
7641 /* Power Management Control / Status Register: PM disabled */
7642 PCIDevSetWord( pPciDev, 0xDC + 4, 0x0000);
7643 /* PMCSR_BSE Bridge Support Extensions: Not supported */
7644 PCIDevSetByte( pPciDev, 0xDC + 6, 0x00);
7645 /* Data Register: PM disabled, always 0 */
7646 PCIDevSetByte( pPciDev, 0xDC + 7, 0x00);
7647
7648 /* PCI-X Configuration Registers *****************************************/
7649 /* Capability ID: PCI-X Configuration Registers */
7650 PCIDevSetByte( pPciDev, 0xE4, VBOX_PCI_CAP_ID_PCIX);
7651#ifdef E1K_WITH_MSI
7652 PCIDevSetByte( pPciDev, 0xE4 + 1, 0x80);
7653#else
7654 /* Next Item Pointer: None (Message Signalled Interrupts are disabled) */
7655 PCIDevSetByte( pPciDev, 0xE4 + 1, 0x00);
7656#endif
7657 /* PCI-X Command: Enable Relaxed Ordering */
7658 PCIDevSetWord( pPciDev, 0xE4 + 2, VBOX_PCI_X_CMD_ERO);
7659 /* PCI-X Status: 32-bit, 66MHz*/
7660 /** @todo is this value really correct? fff8 doesn't look like actual PCI address */
7661 PCIDevSetDWord(pPciDev, 0xE4 + 4, 0x0040FFF8);
7662}
7663
7664/**
7665 * @interface_method_impl{PDMDEVREG,pfnConstruct}
7666 */
7667static DECLCALLBACK(int) e1kR3Construct(PPDMDEVINS pDevIns, int iInstance, PCFGMNODE pCfg)
7668{
7669 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7670 int rc;
7671 PDMDEV_CHECK_VERSIONS_RETURN(pDevIns);
7672
7673 /*
7674 * Initialize the instance data (state).
7675 * Note! Caller has initialized it to ZERO already.
7676 */
7677 RTStrPrintf(pThis->szPrf, sizeof(pThis->szPrf), "E1000#%d", iInstance);
7678 E1kLog(("%s Constructing new instance sizeof(E1KRXDESC)=%d\n", pThis->szPrf, sizeof(E1KRXDESC)));
7679 pThis->hEventMoreRxDescAvail = NIL_RTSEMEVENT;
7680 pThis->pDevInsR3 = pDevIns;
7681 pThis->pDevInsR0 = PDMDEVINS_2_R0PTR(pDevIns);
7682 pThis->pDevInsRC = PDMDEVINS_2_RCPTR(pDevIns);
7683 pThis->u16TxPktLen = 0;
7684 pThis->fIPcsum = false;
7685 pThis->fTCPcsum = false;
7686 pThis->fIntMaskUsed = false;
7687 pThis->fDelayInts = false;
7688 pThis->fLocked = false;
7689 pThis->u64AckedAt = 0;
7690 pThis->led.u32Magic = PDMLED_MAGIC;
7691 pThis->u32PktNo = 1;
7692
7693 /* Interfaces */
7694 pThis->IBase.pfnQueryInterface = e1kR3QueryInterface;
7695
7696 pThis->INetworkDown.pfnWaitReceiveAvail = e1kR3NetworkDown_WaitReceiveAvail;
7697 pThis->INetworkDown.pfnReceive = e1kR3NetworkDown_Receive;
7698 pThis->INetworkDown.pfnXmitPending = e1kR3NetworkDown_XmitPending;
7699
7700 pThis->ILeds.pfnQueryStatusLed = e1kR3QueryStatusLed;
7701
7702 pThis->INetworkConfig.pfnGetMac = e1kR3GetMac;
7703 pThis->INetworkConfig.pfnGetLinkState = e1kR3GetLinkState;
7704 pThis->INetworkConfig.pfnSetLinkState = e1kR3SetLinkState;
7705
7706 /*
7707 * Internal validations.
7708 */
7709 for (uint32_t iReg = 1; iReg < E1K_NUM_OF_BINARY_SEARCHABLE; iReg++)
7710 AssertLogRelMsgReturn( g_aE1kRegMap[iReg].offset > g_aE1kRegMap[iReg - 1].offset
7711 && g_aE1kRegMap[iReg].offset + g_aE1kRegMap[iReg].size
7712 >= g_aE1kRegMap[iReg - 1].offset + g_aE1kRegMap[iReg - 1].size,
7713 ("%s@%#xLB%#x vs %s@%#xLB%#x\n",
7714 g_aE1kRegMap[iReg].abbrev, g_aE1kRegMap[iReg].offset, g_aE1kRegMap[iReg].size,
7715 g_aE1kRegMap[iReg - 1].abbrev, g_aE1kRegMap[iReg - 1].offset, g_aE1kRegMap[iReg - 1].size),
7716 VERR_INTERNAL_ERROR_4);
7717
7718 /*
7719 * Validate configuration.
7720 */
7721 if (!CFGMR3AreValuesValid(pCfg, "MAC\0" "CableConnected\0" "AdapterType\0"
7722 "LineSpeed\0" "GCEnabled\0" "R0Enabled\0"
7723 "ItrEnabled\0" "ItrRxEnabled\0"
7724 "EthernetCRC\0" "GSOEnabled\0" "LinkUpDelay\0"))
7725 return PDMDEV_SET_ERROR(pDevIns, VERR_PDM_DEVINS_UNKNOWN_CFG_VALUES,
7726 N_("Invalid configuration for E1000 device"));
7727
7728 /** @todo LineSpeed unused! */
7729
7730 /* Get config params */
7731 rc = CFGMR3QueryBytes(pCfg, "MAC", pThis->macConfigured.au8, sizeof(pThis->macConfigured.au8));
7732 if (RT_FAILURE(rc))
7733 return PDMDEV_SET_ERROR(pDevIns, rc,
7734 N_("Configuration error: Failed to get MAC address"));
7735 rc = CFGMR3QueryBool(pCfg, "CableConnected", &pThis->fCableConnected);
7736 if (RT_FAILURE(rc))
7737 return PDMDEV_SET_ERROR(pDevIns, rc,
7738 N_("Configuration error: Failed to get the value of 'CableConnected'"));
7739 rc = CFGMR3QueryU32(pCfg, "AdapterType", (uint32_t*)&pThis->eChip);
7740 if (RT_FAILURE(rc))
7741 return PDMDEV_SET_ERROR(pDevIns, rc,
7742 N_("Configuration error: Failed to get the value of 'AdapterType'"));
7743 Assert(pThis->eChip <= E1K_CHIP_82545EM);
7744 rc = CFGMR3QueryBoolDef(pCfg, "GCEnabled", &pThis->fRCEnabled, true);
7745 if (RT_FAILURE(rc))
7746 return PDMDEV_SET_ERROR(pDevIns, rc,
7747 N_("Configuration error: Failed to get the value of 'GCEnabled'"));
7748
7749 rc = CFGMR3QueryBoolDef(pCfg, "R0Enabled", &pThis->fR0Enabled, true);
7750 if (RT_FAILURE(rc))
7751 return PDMDEV_SET_ERROR(pDevIns, rc,
7752 N_("Configuration error: Failed to get the value of 'R0Enabled'"));
7753
7754 rc = CFGMR3QueryBoolDef(pCfg, "EthernetCRC", &pThis->fEthernetCRC, true);
7755 if (RT_FAILURE(rc))
7756 return PDMDEV_SET_ERROR(pDevIns, rc,
7757 N_("Configuration error: Failed to get the value of 'EthernetCRC'"));
7758
7759 rc = CFGMR3QueryBoolDef(pCfg, "GSOEnabled", &pThis->fGSOEnabled, true);
7760 if (RT_FAILURE(rc))
7761 return PDMDEV_SET_ERROR(pDevIns, rc,
7762 N_("Configuration error: Failed to get the value of 'GSOEnabled'"));
7763
7764 rc = CFGMR3QueryBoolDef(pCfg, "ItrEnabled", &pThis->fItrEnabled, false);
7765 if (RT_FAILURE(rc))
7766 return PDMDEV_SET_ERROR(pDevIns, rc,
7767 N_("Configuration error: Failed to get the value of 'ItrEnabled'"));
7768
7769 rc = CFGMR3QueryBoolDef(pCfg, "ItrRxEnabled", &pThis->fItrRxEnabled, true);
7770 if (RT_FAILURE(rc))
7771 return PDMDEV_SET_ERROR(pDevIns, rc,
7772 N_("Configuration error: Failed to get the value of 'ItrRxEnabled'"));
7773
7774 rc = CFGMR3QueryBoolDef(pCfg, "TidEnabled", &pThis->fTidEnabled, false);
7775 if (RT_FAILURE(rc))
7776 return PDMDEV_SET_ERROR(pDevIns, rc,
7777 N_("Configuration error: Failed to get the value of 'TidEnabled'"));
7778
7779 rc = CFGMR3QueryU32Def(pCfg, "LinkUpDelay", (uint32_t*)&pThis->cMsLinkUpDelay, 3000); /* ms */
7780 if (RT_FAILURE(rc))
7781 return PDMDEV_SET_ERROR(pDevIns, rc,
7782 N_("Configuration error: Failed to get the value of 'LinkUpDelay'"));
7783 Assert(pThis->cMsLinkUpDelay <= 300000); /* less than 5 minutes */
7784 if (pThis->cMsLinkUpDelay > 5000)
7785 LogRel(("%s: WARNING! Link up delay is set to %u seconds!\n", pThis->szPrf, pThis->cMsLinkUpDelay / 1000));
7786 else if (pThis->cMsLinkUpDelay == 0)
7787 LogRel(("%s: WARNING! Link up delay is disabled!\n", pThis->szPrf));
7788
7789 LogRel(("%s: Chip=%s LinkUpDelay=%ums EthernetCRC=%s GSO=%s Itr=%s ItrRx=%s TID=%s R0=%s GC=%s\n", pThis->szPrf,
7790 g_aChips[pThis->eChip].pcszName, pThis->cMsLinkUpDelay,
7791 pThis->fEthernetCRC ? "on" : "off",
7792 pThis->fGSOEnabled ? "enabled" : "disabled",
7793 pThis->fItrEnabled ? "enabled" : "disabled",
7794 pThis->fItrRxEnabled ? "enabled" : "disabled",
7795 pThis->fTidEnabled ? "enabled" : "disabled",
7796 pThis->fR0Enabled ? "enabled" : "disabled",
7797 pThis->fRCEnabled ? "enabled" : "disabled"));
7798
7799 /* Initialize the EEPROM. */
7800 pThis->eeprom.init(pThis->macConfigured);
7801
7802 /* Initialize internal PHY. */
7803 Phy::init(&pThis->phy, iInstance, pThis->eChip == E1K_CHIP_82543GC ? PHY_EPID_M881000 : PHY_EPID_M881011);
7804
7805 /* Initialize critical sections. We do our own locking. */
7806 rc = PDMDevHlpSetDeviceCritSect(pDevIns, PDMDevHlpCritSectGetNop(pDevIns));
7807 AssertRCReturn(rc, rc);
7808
7809 rc = PDMDevHlpCritSectInit(pDevIns, &pThis->cs, RT_SRC_POS, "E1000#%d", iInstance);
7810 if (RT_FAILURE(rc))
7811 return rc;
7812 rc = PDMDevHlpCritSectInit(pDevIns, &pThis->csRx, RT_SRC_POS, "E1000#%dRX", iInstance);
7813 if (RT_FAILURE(rc))
7814 return rc;
7815#ifdef E1K_WITH_TX_CS
7816 rc = PDMDevHlpCritSectInit(pDevIns, &pThis->csTx, RT_SRC_POS, "E1000#%dTX", iInstance);
7817 if (RT_FAILURE(rc))
7818 return rc;
7819#endif /* E1K_WITH_TX_CS */
7820
7821 /* Saved state registration. */
7822 rc = PDMDevHlpSSMRegisterEx(pDevIns, E1K_SAVEDSTATE_VERSION, sizeof(E1KSTATE), NULL,
7823 NULL, e1kLiveExec, NULL,
7824 e1kSavePrep, e1kSaveExec, NULL,
7825 e1kLoadPrep, e1kLoadExec, e1kLoadDone);
7826 if (RT_FAILURE(rc))
7827 return rc;
7828
7829 /* Set PCI config registers and register ourselves with the PCI bus. */
7830 e1kConfigurePciDev(&pThis->pciDevice, pThis->eChip);
7831 rc = PDMDevHlpPCIRegister(pDevIns, &pThis->pciDevice);
7832 if (RT_FAILURE(rc))
7833 return rc;
7834
7835#ifdef E1K_WITH_MSI
7836 PDMMSIREG MsiReg;
7837 RT_ZERO(MsiReg);
7838 MsiReg.cMsiVectors = 1;
7839 MsiReg.iMsiCapOffset = 0x80;
7840 MsiReg.iMsiNextOffset = 0x0;
7841 MsiReg.fMsi64bit = false;
7842 rc = PDMDevHlpPCIRegisterMsi(pDevIns, &MsiReg);
7843 AssertRCReturn(rc, rc);
7844#endif
7845
7846
7847 /* Map our registers to memory space (region 0, see e1kConfigurePCI)*/
7848 rc = PDMDevHlpPCIIORegionRegister(pDevIns, 0, E1K_MM_SIZE, PCI_ADDRESS_SPACE_MEM, e1kMap);
7849 if (RT_FAILURE(rc))
7850 return rc;
7851#ifdef E1K_WITH_PREREG_MMIO
7852 rc = PDMDevHlpMMIOExPreRegister(pDevIns, 0, E1K_MM_SIZE, IOMMMIO_FLAGS_READ_DWORD | IOMMMIO_FLAGS_WRITE_ONLY_DWORD, "E1000",
7853 NULL /*pvUserR3*/, e1kMMIOWrite, e1kMMIORead, NULL /*pfnFillR3*/,
7854 NIL_RTR0PTR /*pvUserR0*/, pThis->fR0Enabled ? "e1kMMIOWrite" : NULL,
7855 pThis->fR0Enabled ? "e1kMMIORead" : NULL, NULL /*pszFillR0*/,
7856 NIL_RTRCPTR /*pvUserRC*/, pThis->fRCEnabled ? "e1kMMIOWrite" : NULL,
7857 pThis->fRCEnabled ? "e1kMMIORead" : NULL, NULL /*pszFillRC*/);
7858 AssertLogRelRCReturn(rc, rc);
7859#endif
7860 /* Map our registers to IO space (region 2, see e1kConfigurePCI) */
7861 rc = PDMDevHlpPCIIORegionRegister(pDevIns, 2, E1K_IOPORT_SIZE, PCI_ADDRESS_SPACE_IO, e1kMap);
7862 if (RT_FAILURE(rc))
7863 return rc;
7864
7865 /* Create transmit queue */
7866 rc = PDMDevHlpQueueCreate(pDevIns, sizeof(PDMQUEUEITEMCORE), 1, 0,
7867 e1kTxQueueConsumer, true, "E1000-Xmit", &pThis->pTxQueueR3);
7868 if (RT_FAILURE(rc))
7869 return rc;
7870 pThis->pTxQueueR0 = PDMQueueR0Ptr(pThis->pTxQueueR3);
7871 pThis->pTxQueueRC = PDMQueueRCPtr(pThis->pTxQueueR3);
7872
7873 /* Create the RX notifier signaller. */
7874 rc = PDMDevHlpQueueCreate(pDevIns, sizeof(PDMQUEUEITEMCORE), 1, 0,
7875 e1kCanRxQueueConsumer, true, "E1000-Rcv", &pThis->pCanRxQueueR3);
7876 if (RT_FAILURE(rc))
7877 return rc;
7878 pThis->pCanRxQueueR0 = PDMQueueR0Ptr(pThis->pCanRxQueueR3);
7879 pThis->pCanRxQueueRC = PDMQueueRCPtr(pThis->pCanRxQueueR3);
7880
7881#ifdef E1K_TX_DELAY
7882 /* Create Transmit Delay Timer */
7883 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kTxDelayTimer, pThis,
7884 TMTIMER_FLAGS_NO_CRIT_SECT,
7885 "E1000 Transmit Delay Timer", &pThis->pTXDTimerR3);
7886 if (RT_FAILURE(rc))
7887 return rc;
7888 pThis->pTXDTimerR0 = TMTimerR0Ptr(pThis->pTXDTimerR3);
7889 pThis->pTXDTimerRC = TMTimerRCPtr(pThis->pTXDTimerR3);
7890 TMR3TimerSetCritSect(pThis->pTXDTimerR3, &pThis->csTx);
7891#endif /* E1K_TX_DELAY */
7892
7893//#ifdef E1K_USE_TX_TIMERS
7894 if (pThis->fTidEnabled)
7895 {
7896 /* Create Transmit Interrupt Delay Timer */
7897 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kTxIntDelayTimer, pThis,
7898 TMTIMER_FLAGS_NO_CRIT_SECT,
7899 "E1000 Transmit Interrupt Delay Timer", &pThis->pTIDTimerR3);
7900 if (RT_FAILURE(rc))
7901 return rc;
7902 pThis->pTIDTimerR0 = TMTimerR0Ptr(pThis->pTIDTimerR3);
7903 pThis->pTIDTimerRC = TMTimerRCPtr(pThis->pTIDTimerR3);
7904
7905# ifndef E1K_NO_TAD
7906 /* Create Transmit Absolute Delay Timer */
7907 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kTxAbsDelayTimer, pThis,
7908 TMTIMER_FLAGS_NO_CRIT_SECT,
7909 "E1000 Transmit Absolute Delay Timer", &pThis->pTADTimerR3);
7910 if (RT_FAILURE(rc))
7911 return rc;
7912 pThis->pTADTimerR0 = TMTimerR0Ptr(pThis->pTADTimerR3);
7913 pThis->pTADTimerRC = TMTimerRCPtr(pThis->pTADTimerR3);
7914# endif /* E1K_NO_TAD */
7915 }
7916//#endif /* E1K_USE_TX_TIMERS */
7917
7918#ifdef E1K_USE_RX_TIMERS
7919 /* Create Receive Interrupt Delay Timer */
7920 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kRxIntDelayTimer, pThis,
7921 TMTIMER_FLAGS_NO_CRIT_SECT,
7922 "E1000 Receive Interrupt Delay Timer", &pThis->pRIDTimerR3);
7923 if (RT_FAILURE(rc))
7924 return rc;
7925 pThis->pRIDTimerR0 = TMTimerR0Ptr(pThis->pRIDTimerR3);
7926 pThis->pRIDTimerRC = TMTimerRCPtr(pThis->pRIDTimerR3);
7927
7928 /* Create Receive Absolute Delay Timer */
7929 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kRxAbsDelayTimer, pThis,
7930 TMTIMER_FLAGS_NO_CRIT_SECT,
7931 "E1000 Receive Absolute Delay Timer", &pThis->pRADTimerR3);
7932 if (RT_FAILURE(rc))
7933 return rc;
7934 pThis->pRADTimerR0 = TMTimerR0Ptr(pThis->pRADTimerR3);
7935 pThis->pRADTimerRC = TMTimerRCPtr(pThis->pRADTimerR3);
7936#endif /* E1K_USE_RX_TIMERS */
7937
7938 /* Create Late Interrupt Timer */
7939 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kLateIntTimer, pThis,
7940 TMTIMER_FLAGS_NO_CRIT_SECT,
7941 "E1000 Late Interrupt Timer", &pThis->pIntTimerR3);
7942 if (RT_FAILURE(rc))
7943 return rc;
7944 pThis->pIntTimerR0 = TMTimerR0Ptr(pThis->pIntTimerR3);
7945 pThis->pIntTimerRC = TMTimerRCPtr(pThis->pIntTimerR3);
7946
7947 /* Create Link Up Timer */
7948 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kLinkUpTimer, pThis,
7949 TMTIMER_FLAGS_NO_CRIT_SECT,
7950 "E1000 Link Up Timer", &pThis->pLUTimerR3);
7951 if (RT_FAILURE(rc))
7952 return rc;
7953 pThis->pLUTimerR0 = TMTimerR0Ptr(pThis->pLUTimerR3);
7954 pThis->pLUTimerRC = TMTimerRCPtr(pThis->pLUTimerR3);
7955
7956 /* Register the info item */
7957 char szTmp[20];
7958 RTStrPrintf(szTmp, sizeof(szTmp), "e1k%d", iInstance);
7959 PDMDevHlpDBGFInfoRegister(pDevIns, szTmp, "E1000 info.", e1kInfo);
7960
7961 /* Status driver */
7962 PPDMIBASE pBase;
7963 rc = PDMDevHlpDriverAttach(pDevIns, PDM_STATUS_LUN, &pThis->IBase, &pBase, "Status Port");
7964 if (RT_FAILURE(rc))
7965 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to attach the status LUN"));
7966 pThis->pLedsConnector = PDMIBASE_QUERY_INTERFACE(pBase, PDMILEDCONNECTORS);
7967
7968 /* Network driver */
7969 rc = PDMDevHlpDriverAttach(pDevIns, 0, &pThis->IBase, &pThis->pDrvBase, "Network Port");
7970 if (RT_SUCCESS(rc))
7971 {
7972 if (rc == VINF_NAT_DNS)
7973 PDMDevHlpVMSetRuntimeError(pDevIns, 0 /*fFlags*/, "NoDNSforNAT",
7974 N_("A Domain Name Server (DNS) for NAT networking could not be determined. Ensure that your host is correctly connected to an ISP. If you ignore this warning the guest will not be able to perform nameserver lookups and it will probably observe delays if trying so"));
7975 pThis->pDrvR3 = PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMINETWORKUP);
7976 AssertMsgReturn(pThis->pDrvR3, ("Failed to obtain the PDMINETWORKUP interface!\n"), VERR_PDM_MISSING_INTERFACE_BELOW);
7977
7978 pThis->pDrvR0 = PDMIBASER0_QUERY_INTERFACE(PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMIBASER0), PDMINETWORKUP);
7979 pThis->pDrvRC = PDMIBASERC_QUERY_INTERFACE(PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMIBASERC), PDMINETWORKUP);
7980 }
7981 else if ( rc == VERR_PDM_NO_ATTACHED_DRIVER
7982 || rc == VERR_PDM_CFG_MISSING_DRIVER_NAME)
7983 {
7984 /* No error! */
7985 E1kLog(("%s This adapter is not attached to any network!\n", pThis->szPrf));
7986 }
7987 else
7988 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to attach the network LUN"));
7989
7990 rc = RTSemEventCreate(&pThis->hEventMoreRxDescAvail);
7991 if (RT_FAILURE(rc))
7992 return rc;
7993
7994 rc = e1kInitDebugHelpers();
7995 if (RT_FAILURE(rc))
7996 return rc;
7997
7998 e1kHardReset(pThis);
7999
8000 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data received", "/Public/Net/E1k%u/BytesReceived", iInstance);
8001 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data transmitted", "/Public/Net/E1k%u/BytesTransmitted", iInstance);
8002
8003 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data received", "/Devices/E1k%d/ReceiveBytes", iInstance);
8004 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data transmitted", "/Devices/E1k%d/TransmitBytes", iInstance);
8005
8006#if defined(VBOX_WITH_STATISTICS)
8007 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMMIOReadRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO reads in RZ", "/Devices/E1k%d/MMIO/ReadRZ", iInstance);
8008 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMMIOReadR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO reads in R3", "/Devices/E1k%d/MMIO/ReadR3", iInstance);
8009 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMMIOWriteRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO writes in RZ", "/Devices/E1k%d/MMIO/WriteRZ", iInstance);
8010 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMMIOWriteR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO writes in R3", "/Devices/E1k%d/MMIO/WriteR3", iInstance);
8011 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatEEPROMRead, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling EEPROM reads", "/Devices/E1k%d/EEPROM/Read", iInstance);
8012 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatEEPROMWrite, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling EEPROM writes", "/Devices/E1k%d/EEPROM/Write", iInstance);
8013 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIOReadRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in RZ", "/Devices/E1k%d/IO/ReadRZ", iInstance);
8014 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIOReadR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in R3", "/Devices/E1k%d/IO/ReadR3", iInstance);
8015 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIOWriteRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in RZ", "/Devices/E1k%d/IO/WriteRZ", iInstance);
8016 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIOWriteR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in R3", "/Devices/E1k%d/IO/WriteR3", iInstance);
8017 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatLateIntTimer, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling late int timer", "/Devices/E1k%d/LateInt/Timer", iInstance);
8018 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatLateInts, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of late interrupts", "/Devices/E1k%d/LateInt/Occured", iInstance);
8019 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIntsRaised, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of raised interrupts", "/Devices/E1k%d/Interrupts/Raised", iInstance);
8020 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIntsPrevented, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of prevented interrupts", "/Devices/E1k%d/Interrupts/Prevented", iInstance);
8021 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceive, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive", "/Devices/E1k%d/Receive/Total", iInstance);
8022 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveCRC, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive checksumming", "/Devices/E1k%d/Receive/CRC", iInstance);
8023 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveFilter, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive filtering", "/Devices/E1k%d/Receive/Filter", iInstance);
8024 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveStore, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive storing", "/Devices/E1k%d/Receive/Store", iInstance);
8025 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatRxOverflow, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_OCCURENCE, "Profiling RX overflows", "/Devices/E1k%d/RxOverflow", iInstance);
8026 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatRxOverflowWakeup, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Nr of RX overflow wakeups", "/Devices/E1k%d/RxOverflowWakeup", iInstance);
8027 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling transmits in RZ", "/Devices/E1k%d/Transmit/TotalRZ", iInstance);
8028 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling transmits in R3", "/Devices/E1k%d/Transmit/TotalR3", iInstance);
8029 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitSendRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling send transmit in RZ", "/Devices/E1k%d/Transmit/SendRZ", iInstance);
8030 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitSendR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling send transmit in R3", "/Devices/E1k%d/Transmit/SendR3", iInstance);
8031
8032 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescCtxNormal, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of normal context descriptors","/Devices/E1k%d/TxDesc/ContexNormal", iInstance);
8033 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescCtxTSE, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TSE context descriptors", "/Devices/E1k%d/TxDesc/ContextTSE", iInstance);
8034 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescData, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TX data descriptors", "/Devices/E1k%d/TxDesc/Data", iInstance);
8035 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescLegacy, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TX legacy descriptors", "/Devices/E1k%d/TxDesc/Legacy", iInstance);
8036 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescTSEData, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TX TSE data descriptors", "/Devices/E1k%d/TxDesc/TSEData", iInstance);
8037 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxPathFallback, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Fallback TSE descriptor path", "/Devices/E1k%d/TxPath/Fallback", iInstance);
8038 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxPathGSO, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "GSO TSE descriptor path", "/Devices/E1k%d/TxPath/GSO", iInstance);
8039 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxPathRegular, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Regular descriptor path", "/Devices/E1k%d/TxPath/Normal", iInstance);
8040 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatPHYAccesses, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of PHY accesses", "/Devices/E1k%d/PHYAccesses", iInstance);
8041 for (unsigned iReg = 0; iReg < E1K_NUM_OF_REGS; iReg++)
8042 {
8043 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->aStatRegReads[iReg], STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
8044 g_aE1kRegMap[iReg].name, "/Devices/E1k%d/Regs/%s-Reads", iInstance, g_aE1kRegMap[iReg].abbrev);
8045 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->aStatRegWrites[iReg], STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
8046 g_aE1kRegMap[iReg].name, "/Devices/E1k%d/Regs/%s-Writes", iInstance, g_aE1kRegMap[iReg].abbrev);
8047 }
8048#endif /* VBOX_WITH_STATISTICS */
8049
8050#ifdef E1K_INT_STATS
8051 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->u64ArmedAt, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "u64ArmedAt", "/Devices/E1k%d/u64ArmedAt", iInstance);
8052 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatMaxTxDelay, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatMaxTxDelay", "/Devices/E1k%d/uStatMaxTxDelay", iInstance);
8053 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatInt, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatInt", "/Devices/E1k%d/uStatInt", iInstance);
8054 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntTry, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntTry", "/Devices/E1k%d/uStatIntTry", iInstance);
8055 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntLower, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntLower", "/Devices/E1k%d/uStatIntLower", iInstance);
8056 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatNoIntICR, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatNoIntICR", "/Devices/E1k%d/uStatNoIntICR", iInstance);
8057 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->iStatIntLost, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "iStatIntLost", "/Devices/E1k%d/iStatIntLost", iInstance);
8058 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->iStatIntLostOne, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "iStatIntLostOne", "/Devices/E1k%d/iStatIntLostOne", iInstance);
8059 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntIMS, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntIMS", "/Devices/E1k%d/uStatIntIMS", iInstance);
8060 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntSkip, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntSkip", "/Devices/E1k%d/uStatIntSkip", iInstance);
8061 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntLate, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntLate", "/Devices/E1k%d/uStatIntLate", iInstance);
8062 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntMasked, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntMasked", "/Devices/E1k%d/uStatIntMasked", iInstance);
8063 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntEarly, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntEarly", "/Devices/E1k%d/uStatIntEarly", iInstance);
8064 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntRx, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntRx", "/Devices/E1k%d/uStatIntRx", iInstance);
8065 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntTx, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntTx", "/Devices/E1k%d/uStatIntTx", iInstance);
8066 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntICS, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntICS", "/Devices/E1k%d/uStatIntICS", iInstance);
8067 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntRDTR, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntRDTR", "/Devices/E1k%d/uStatIntRDTR", iInstance);
8068 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntRXDMT0, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntRXDMT0", "/Devices/E1k%d/uStatIntRXDMT0", iInstance);
8069 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntTXQE, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntTXQE", "/Devices/E1k%d/uStatIntTXQE", iInstance);
8070 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxNoRS, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxNoRS", "/Devices/E1k%d/uStatTxNoRS", iInstance);
8071 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxIDE, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxIDE", "/Devices/E1k%d/uStatTxIDE", iInstance);
8072 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxDelayed, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxDelayed", "/Devices/E1k%d/uStatTxDelayed", iInstance);
8073 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxDelayExp, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxDelayExp", "/Devices/E1k%d/uStatTxDelayExp", iInstance);
8074 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTAD, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTAD", "/Devices/E1k%d/uStatTAD", iInstance);
8075 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTID, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTID", "/Devices/E1k%d/uStatTID", iInstance);
8076 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatRAD, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatRAD", "/Devices/E1k%d/uStatRAD", iInstance);
8077 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatRID, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatRID", "/Devices/E1k%d/uStatRID", iInstance);
8078 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatRxFrm, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatRxFrm", "/Devices/E1k%d/uStatRxFrm", iInstance);
8079 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxFrm, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxFrm", "/Devices/E1k%d/uStatTxFrm", iInstance);
8080 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatDescCtx, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatDescCtx", "/Devices/E1k%d/uStatDescCtx", iInstance);
8081 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatDescDat, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatDescDat", "/Devices/E1k%d/uStatDescDat", iInstance);
8082 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatDescLeg, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatDescLeg", "/Devices/E1k%d/uStatDescLeg", iInstance);
8083 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx1514, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx1514", "/Devices/E1k%d/uStatTx1514", iInstance);
8084 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx2962, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx2962", "/Devices/E1k%d/uStatTx2962", iInstance);
8085 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx4410, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx4410", "/Devices/E1k%d/uStatTx4410", iInstance);
8086 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx5858, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx5858", "/Devices/E1k%d/uStatTx5858", iInstance);
8087 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx7306, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx7306", "/Devices/E1k%d/uStatTx7306", iInstance);
8088 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx8754, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx8754", "/Devices/E1k%d/uStatTx8754", iInstance);
8089 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx16384, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx16384", "/Devices/E1k%d/uStatTx16384", iInstance);
8090 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx32768, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx32768", "/Devices/E1k%d/uStatTx32768", iInstance);
8091 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxLarge, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxLarge", "/Devices/E1k%d/uStatTxLarge", iInstance);
8092#endif /* E1K_INT_STATS */
8093
8094 return VINF_SUCCESS;
8095}
8096
8097/**
8098 * The device registration structure.
8099 */
8100const PDMDEVREG g_DeviceE1000 =
8101{
8102 /* Structure version. PDM_DEVREG_VERSION defines the current version. */
8103 PDM_DEVREG_VERSION,
8104 /* Device name. */
8105 "e1000",
8106 /* Name of guest context module (no path).
8107 * Only evalutated if PDM_DEVREG_FLAGS_RC is set. */
8108 "VBoxDDRC.rc",
8109 /* Name of ring-0 module (no path).
8110 * Only evalutated if PDM_DEVREG_FLAGS_RC is set. */
8111 "VBoxDDR0.r0",
8112 /* The description of the device. The UTF-8 string pointed to shall, like this structure,
8113 * remain unchanged from registration till VM destruction. */
8114 "Intel PRO/1000 MT Desktop Ethernet.\n",
8115
8116 /* Flags, combination of the PDM_DEVREG_FLAGS_* \#defines. */
8117 PDM_DEVREG_FLAGS_DEFAULT_BITS | PDM_DEVREG_FLAGS_RC | PDM_DEVREG_FLAGS_R0,
8118 /* Device class(es), combination of the PDM_DEVREG_CLASS_* \#defines. */
8119 PDM_DEVREG_CLASS_NETWORK,
8120 /* Maximum number of instances (per VM). */
8121 ~0U,
8122 /* Size of the instance data. */
8123 sizeof(E1KSTATE),
8124
8125 /* pfnConstruct */
8126 e1kR3Construct,
8127 /* pfnDestruct */
8128 e1kR3Destruct,
8129 /* pfnRelocate */
8130 e1kR3Relocate,
8131 /* pfnMemSetup */
8132 NULL,
8133 /* pfnPowerOn */
8134 NULL,
8135 /* pfnReset */
8136 e1kR3Reset,
8137 /* pfnSuspend */
8138 e1kR3Suspend,
8139 /* pfnResume */
8140 NULL,
8141 /* pfnAttach */
8142 e1kR3Attach,
8143 /* pfnDeatch */
8144 e1kR3Detach,
8145 /* pfnQueryInterface */
8146 NULL,
8147 /* pfnInitComplete */
8148 NULL,
8149 /* pfnPowerOff */
8150 e1kR3PowerOff,
8151 /* pfnSoftReset */
8152 NULL,
8153
8154 /* u32VersionEnd */
8155 PDM_DEVREG_VERSION
8156};
8157
8158#endif /* IN_RING3 */
8159#endif /* !VBOX_DEVICE_STRUCT_TESTCASE */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette