VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/DevE1000.cpp@ 64226

Last change on this file since 64226 was 64115, checked in by vboxsync, 8 years ago

PDM,IOM,PGM: Morphed the MMIO2 API into a mixed MMIO2 and pre-registered MMIO API that is able to deal with really large (<= 64GB) MMIO ranges. Limited testing, so back out at first sign of trouble.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 320.4 KB
Line 
1/* $Id: DevE1000.cpp 64115 2016-09-30 20:14:27Z vboxsync $ */
2/** @file
3 * DevE1000 - Intel 82540EM Ethernet Controller Emulation.
4 *
5 * Implemented in accordance with the specification:
6 *
7 * PCI/PCI-X Family of Gigabit Ethernet Controllers Software Developer's Manual
8 * 82540EP/EM, 82541xx, 82544GC/EI, 82545GM/EM, 82546GB/EB, and 82547xx
9 *
10 * 317453-002 Revision 3.5
11 *
12 * @todo IPv6 checksum offloading support
13 * @todo Flexible Filter / Wakeup (optional?)
14 */
15
16/*
17 * Copyright (C) 2007-2016 Oracle Corporation
18 *
19 * This file is part of VirtualBox Open Source Edition (OSE), as
20 * available from http://www.virtualbox.org. This file is free software;
21 * you can redistribute it and/or modify it under the terms of the GNU
22 * General Public License (GPL) as published by the Free Software
23 * Foundation, in version 2 as it comes in the "COPYING" file of the
24 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
25 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_DEV_E1000
33#include <iprt/crc.h>
34#include <iprt/ctype.h>
35#include <iprt/net.h>
36#include <iprt/semaphore.h>
37#include <iprt/string.h>
38#include <iprt/time.h>
39#include <iprt/uuid.h>
40#include <VBox/vmm/pdmdev.h>
41#include <VBox/vmm/pdmnetifs.h>
42#include <VBox/vmm/pdmnetinline.h>
43#include <VBox/param.h>
44#include "VBoxDD.h"
45
46#include "DevEEPROM.h"
47#include "DevE1000Phy.h"
48
49
50/*********************************************************************************************************************************
51* Defined Constants And Macros *
52*********************************************************************************************************************************/
53/** @name E1000 Build Options
54 * @{ */
55/** @def E1K_INIT_RA0
56 * E1K_INIT_RA0 forces E1000 to set the first entry in Receive Address filter
57 * table to MAC address obtained from CFGM. Most guests read MAC address from
58 * EEPROM and write it to RA[0] explicitly, but Mac OS X seems to depend on it
59 * being already set (see @bugref{4657}).
60 */
61#define E1K_INIT_RA0
62/** @def E1K_LSC_ON_SLU
63 * E1K_LSC_ON_SLU causes E1000 to generate Link Status Change interrupt when
64 * the guest driver brings up the link via STATUS.LU bit. Again the only guest
65 * that requires it is Mac OS X (see @bugref{4657}).
66 */
67#define E1K_LSC_ON_SLU
68/** @def E1K_TX_DELAY
69 * E1K_TX_DELAY aims to improve guest-host transfer rate for TCP streams by
70 * preventing packets to be sent immediately. It allows to send several
71 * packets in a batch reducing the number of acknowledgments. Note that it
72 * effectively disables R0 TX path, forcing sending in R3.
73 */
74//#define E1K_TX_DELAY 150
75/** @def E1K_USE_TX_TIMERS
76 * E1K_USE_TX_TIMERS aims to reduce the number of generated TX interrupts if a
77 * guest driver set the delays via the Transmit Interrupt Delay Value (TIDV)
78 * register. Enabling it showed no positive effects on existing guests so it
79 * stays disabled. See sections 3.2.7.1 and 3.4.3.1 in "8254x Family of Gigabit
80 * Ethernet Controllers Software Developer’s Manual" for more detailed
81 * explanation.
82 */
83//#define E1K_USE_TX_TIMERS
84/** @def E1K_NO_TAD
85 * E1K_NO_TAD disables one of two timers enabled by E1K_USE_TX_TIMERS, the
86 * Transmit Absolute Delay time. This timer sets the maximum time interval
87 * during which TX interrupts can be postponed (delayed). It has no effect
88 * if E1K_USE_TX_TIMERS is not defined.
89 */
90//#define E1K_NO_TAD
91/** @def E1K_REL_DEBUG
92 * E1K_REL_DEBUG enables debug logging of l1, l2, l3 in release build.
93 */
94//#define E1K_REL_DEBUG
95/** @def E1K_INT_STATS
96 * E1K_INT_STATS enables collection of internal statistics used for
97 * debugging of delayed interrupts, etc.
98 */
99//#define E1K_INT_STATS
100/** @def E1K_WITH_MSI
101 * E1K_WITH_MSI enables rudimentary MSI support. Not implemented.
102 */
103//#define E1K_WITH_MSI
104/** @def E1K_WITH_TX_CS
105 * E1K_WITH_TX_CS protects e1kXmitPending with a critical section.
106 */
107#define E1K_WITH_TX_CS
108/** @def E1K_WITH_TXD_CACHE
109 * E1K_WITH_TXD_CACHE causes E1000 to fetch multiple TX descriptors in a
110 * single physical memory read (or two if it wraps around the end of TX
111 * descriptor ring). It is required for proper functioning of bandwidth
112 * resource control as it allows to compute exact sizes of packets prior
113 * to allocating their buffers (see @bugref{5582}).
114 */
115#define E1K_WITH_TXD_CACHE
116/** @def E1K_WITH_RXD_CACHE
117 * E1K_WITH_RXD_CACHE causes E1000 to fetch multiple RX descriptors in a
118 * single physical memory read (or two if it wraps around the end of RX
119 * descriptor ring). Intel's packet driver for DOS needs this option in
120 * order to work properly (see @bugref{6217}).
121 */
122#define E1K_WITH_RXD_CACHE
123/** @def E1K_WITH_PREREG_MMIO
124 * E1K_WITH_PREREG_MMIO enables a new style MMIO registration and is
125 * currently only done for testing the relateted PDM, IOM and PGM code. */
126//#define E1K_WITH_PREREG_MMIO
127/* @} */
128/* End of Options ************************************************************/
129
130#ifdef E1K_WITH_TXD_CACHE
131/**
132 * E1K_TXD_CACHE_SIZE specifies the maximum number of TX descriptors stored
133 * in the state structure. It limits the amount of descriptors loaded in one
134 * batch read. For example, Linux guest may use up to 20 descriptors per
135 * TSE packet. The largest TSE packet seen (Windows guest) was 45 descriptors.
136 */
137# define E1K_TXD_CACHE_SIZE 64u
138#endif /* E1K_WITH_TXD_CACHE */
139
140#ifdef E1K_WITH_RXD_CACHE
141/**
142 * E1K_RXD_CACHE_SIZE specifies the maximum number of RX descriptors stored
143 * in the state structure. It limits the amount of descriptors loaded in one
144 * batch read. For example, XP guest adds 15 RX descriptors at a time.
145 */
146# define E1K_RXD_CACHE_SIZE 16u
147#endif /* E1K_WITH_RXD_CACHE */
148
149
150/* Little helpers ************************************************************/
151#undef htons
152#undef ntohs
153#undef htonl
154#undef ntohl
155#define htons(x) ((((x) & 0xff00) >> 8) | (((x) & 0x00ff) << 8))
156#define ntohs(x) htons(x)
157#define htonl(x) ASMByteSwapU32(x)
158#define ntohl(x) htonl(x)
159
160#ifndef DEBUG
161# ifdef E1K_REL_DEBUG
162# define DEBUG
163# define E1kLog(a) LogRel(a)
164# define E1kLog2(a) LogRel(a)
165# define E1kLog3(a) LogRel(a)
166# define E1kLogX(x, a) LogRel(a)
167//# define E1kLog3(a) do {} while (0)
168# else
169# define E1kLog(a) do {} while (0)
170# define E1kLog2(a) do {} while (0)
171# define E1kLog3(a) do {} while (0)
172# define E1kLogX(x, a) do {} while (0)
173# endif
174#else
175# define E1kLog(a) Log(a)
176# define E1kLog2(a) Log2(a)
177# define E1kLog3(a) Log3(a)
178# define E1kLogX(x, a) LogIt(x, LOG_GROUP, a)
179//# define E1kLog(a) do {} while (0)
180//# define E1kLog2(a) do {} while (0)
181//# define E1kLog3(a) do {} while (0)
182#endif
183
184#if 0
185# define LOG_ENABLED
186# define E1kLogRel(a) LogRel(a)
187# undef Log6
188# define Log6(a) LogRel(a)
189#else
190# define E1kLogRel(a) do { } while (0)
191#endif
192
193//#undef DEBUG
194
195#define STATE_TO_DEVINS(pThis) (((PE1KSTATE )pThis)->CTX_SUFF(pDevIns))
196#define E1K_RELOCATE(p, o) *(RTHCUINTPTR *)&p += o
197
198#define E1K_INC_CNT32(cnt) \
199do { \
200 if (cnt < UINT32_MAX) \
201 cnt++; \
202} while (0)
203
204#define E1K_ADD_CNT64(cntLo, cntHi, val) \
205do { \
206 uint64_t u64Cnt = RT_MAKE_U64(cntLo, cntHi); \
207 uint64_t tmp = u64Cnt; \
208 u64Cnt += val; \
209 if (tmp > u64Cnt ) \
210 u64Cnt = UINT64_MAX; \
211 cntLo = (uint32_t)u64Cnt; \
212 cntHi = (uint32_t)(u64Cnt >> 32); \
213} while (0)
214
215#ifdef E1K_INT_STATS
216# define E1K_INC_ISTAT_CNT(cnt) do { ++cnt; } while (0)
217#else /* E1K_INT_STATS */
218# define E1K_INC_ISTAT_CNT(cnt) do { } while (0)
219#endif /* E1K_INT_STATS */
220
221
222/*****************************************************************************/
223
224typedef uint32_t E1KCHIP;
225#define E1K_CHIP_82540EM 0
226#define E1K_CHIP_82543GC 1
227#define E1K_CHIP_82545EM 2
228
229#ifdef IN_RING3
230/** Different E1000 chips. */
231static const struct E1kChips
232{
233 uint16_t uPCIVendorId;
234 uint16_t uPCIDeviceId;
235 uint16_t uPCISubsystemVendorId;
236 uint16_t uPCISubsystemId;
237 const char *pcszName;
238} g_aChips[] =
239{
240 /* Vendor Device SSVendor SubSys Name */
241 { 0x8086,
242 /* Temporary code, as MSI-aware driver dislike 0x100E. How to do that right? */
243# ifdef E1K_WITH_MSI
244 0x105E,
245# else
246 0x100E,
247# endif
248 0x8086, 0x001E, "82540EM" }, /* Intel 82540EM-A in Intel PRO/1000 MT Desktop */
249 { 0x8086, 0x1004, 0x8086, 0x1004, "82543GC" }, /* Intel 82543GC in Intel PRO/1000 T Server */
250 { 0x8086, 0x100F, 0x15AD, 0x0750, "82545EM" } /* Intel 82545EM-A in VMWare Network Adapter */
251};
252#endif /* IN_RING3 */
253
254
255/* The size of register area mapped to I/O space */
256#define E1K_IOPORT_SIZE 0x8
257/* The size of memory-mapped register area */
258#define E1K_MM_SIZE 0x20000
259
260#define E1K_MAX_TX_PKT_SIZE 16288
261#define E1K_MAX_RX_PKT_SIZE 16384
262
263/*****************************************************************************/
264
265/** Gets the specfieid bits from the register. */
266#define GET_BITS(reg, bits) ((reg & reg##_##bits##_MASK) >> reg##_##bits##_SHIFT)
267#define GET_BITS_V(val, reg, bits) ((val & reg##_##bits##_MASK) >> reg##_##bits##_SHIFT)
268#define BITS(reg, bits, bitval) (bitval << reg##_##bits##_SHIFT)
269#define SET_BITS(reg, bits, bitval) do { reg = (reg & ~reg##_##bits##_MASK) | (bitval << reg##_##bits##_SHIFT); } while (0)
270#define SET_BITS_V(val, reg, bits, bitval) do { val = (val & ~reg##_##bits##_MASK) | (bitval << reg##_##bits##_SHIFT); } while (0)
271
272#define CTRL_SLU UINT32_C(0x00000040)
273#define CTRL_MDIO UINT32_C(0x00100000)
274#define CTRL_MDC UINT32_C(0x00200000)
275#define CTRL_MDIO_DIR UINT32_C(0x01000000)
276#define CTRL_MDC_DIR UINT32_C(0x02000000)
277#define CTRL_RESET UINT32_C(0x04000000)
278#define CTRL_VME UINT32_C(0x40000000)
279
280#define STATUS_LU UINT32_C(0x00000002)
281#define STATUS_TXOFF UINT32_C(0x00000010)
282
283#define EECD_EE_WIRES UINT32_C(0x0F)
284#define EECD_EE_REQ UINT32_C(0x40)
285#define EECD_EE_GNT UINT32_C(0x80)
286
287#define EERD_START UINT32_C(0x00000001)
288#define EERD_DONE UINT32_C(0x00000010)
289#define EERD_DATA_MASK UINT32_C(0xFFFF0000)
290#define EERD_DATA_SHIFT 16
291#define EERD_ADDR_MASK UINT32_C(0x0000FF00)
292#define EERD_ADDR_SHIFT 8
293
294#define MDIC_DATA_MASK UINT32_C(0x0000FFFF)
295#define MDIC_DATA_SHIFT 0
296#define MDIC_REG_MASK UINT32_C(0x001F0000)
297#define MDIC_REG_SHIFT 16
298#define MDIC_PHY_MASK UINT32_C(0x03E00000)
299#define MDIC_PHY_SHIFT 21
300#define MDIC_OP_WRITE UINT32_C(0x04000000)
301#define MDIC_OP_READ UINT32_C(0x08000000)
302#define MDIC_READY UINT32_C(0x10000000)
303#define MDIC_INT_EN UINT32_C(0x20000000)
304#define MDIC_ERROR UINT32_C(0x40000000)
305
306#define TCTL_EN UINT32_C(0x00000002)
307#define TCTL_PSP UINT32_C(0x00000008)
308
309#define RCTL_EN UINT32_C(0x00000002)
310#define RCTL_UPE UINT32_C(0x00000008)
311#define RCTL_MPE UINT32_C(0x00000010)
312#define RCTL_LPE UINT32_C(0x00000020)
313#define RCTL_LBM_MASK UINT32_C(0x000000C0)
314#define RCTL_LBM_SHIFT 6
315#define RCTL_RDMTS_MASK UINT32_C(0x00000300)
316#define RCTL_RDMTS_SHIFT 8
317#define RCTL_LBM_TCVR UINT32_C(3) /**< PHY or external SerDes loopback. */
318#define RCTL_MO_MASK UINT32_C(0x00003000)
319#define RCTL_MO_SHIFT 12
320#define RCTL_BAM UINT32_C(0x00008000)
321#define RCTL_BSIZE_MASK UINT32_C(0x00030000)
322#define RCTL_BSIZE_SHIFT 16
323#define RCTL_VFE UINT32_C(0x00040000)
324#define RCTL_CFIEN UINT32_C(0x00080000)
325#define RCTL_CFI UINT32_C(0x00100000)
326#define RCTL_BSEX UINT32_C(0x02000000)
327#define RCTL_SECRC UINT32_C(0x04000000)
328
329#define ICR_TXDW UINT32_C(0x00000001)
330#define ICR_TXQE UINT32_C(0x00000002)
331#define ICR_LSC UINT32_C(0x00000004)
332#define ICR_RXDMT0 UINT32_C(0x00000010)
333#define ICR_RXT0 UINT32_C(0x00000080)
334#define ICR_TXD_LOW UINT32_C(0x00008000)
335#define RDTR_FPD UINT32_C(0x80000000)
336
337#define PBA_st ((PBAST*)(pThis->auRegs + PBA_IDX))
338typedef struct
339{
340 unsigned rxa : 7;
341 unsigned rxa_r : 9;
342 unsigned txa : 16;
343} PBAST;
344AssertCompileSize(PBAST, 4);
345
346#define TXDCTL_WTHRESH_MASK 0x003F0000
347#define TXDCTL_WTHRESH_SHIFT 16
348#define TXDCTL_LWTHRESH_MASK 0xFE000000
349#define TXDCTL_LWTHRESH_SHIFT 25
350
351#define RXCSUM_PCSS_MASK UINT32_C(0x000000FF)
352#define RXCSUM_PCSS_SHIFT 0
353
354/** @name Register access macros
355 * @remarks These ASSUME alocal variable @a pThis of type PE1KSTATE.
356 * @{ */
357#define CTRL pThis->auRegs[CTRL_IDX]
358#define STATUS pThis->auRegs[STATUS_IDX]
359#define EECD pThis->auRegs[EECD_IDX]
360#define EERD pThis->auRegs[EERD_IDX]
361#define CTRL_EXT pThis->auRegs[CTRL_EXT_IDX]
362#define FLA pThis->auRegs[FLA_IDX]
363#define MDIC pThis->auRegs[MDIC_IDX]
364#define FCAL pThis->auRegs[FCAL_IDX]
365#define FCAH pThis->auRegs[FCAH_IDX]
366#define FCT pThis->auRegs[FCT_IDX]
367#define VET pThis->auRegs[VET_IDX]
368#define ICR pThis->auRegs[ICR_IDX]
369#define ITR pThis->auRegs[ITR_IDX]
370#define ICS pThis->auRegs[ICS_IDX]
371#define IMS pThis->auRegs[IMS_IDX]
372#define IMC pThis->auRegs[IMC_IDX]
373#define RCTL pThis->auRegs[RCTL_IDX]
374#define FCTTV pThis->auRegs[FCTTV_IDX]
375#define TXCW pThis->auRegs[TXCW_IDX]
376#define RXCW pThis->auRegs[RXCW_IDX]
377#define TCTL pThis->auRegs[TCTL_IDX]
378#define TIPG pThis->auRegs[TIPG_IDX]
379#define AIFS pThis->auRegs[AIFS_IDX]
380#define LEDCTL pThis->auRegs[LEDCTL_IDX]
381#define PBA pThis->auRegs[PBA_IDX]
382#define FCRTL pThis->auRegs[FCRTL_IDX]
383#define FCRTH pThis->auRegs[FCRTH_IDX]
384#define RDFH pThis->auRegs[RDFH_IDX]
385#define RDFT pThis->auRegs[RDFT_IDX]
386#define RDFHS pThis->auRegs[RDFHS_IDX]
387#define RDFTS pThis->auRegs[RDFTS_IDX]
388#define RDFPC pThis->auRegs[RDFPC_IDX]
389#define RDBAL pThis->auRegs[RDBAL_IDX]
390#define RDBAH pThis->auRegs[RDBAH_IDX]
391#define RDLEN pThis->auRegs[RDLEN_IDX]
392#define RDH pThis->auRegs[RDH_IDX]
393#define RDT pThis->auRegs[RDT_IDX]
394#define RDTR pThis->auRegs[RDTR_IDX]
395#define RXDCTL pThis->auRegs[RXDCTL_IDX]
396#define RADV pThis->auRegs[RADV_IDX]
397#define RSRPD pThis->auRegs[RSRPD_IDX]
398#define TXDMAC pThis->auRegs[TXDMAC_IDX]
399#define TDFH pThis->auRegs[TDFH_IDX]
400#define TDFT pThis->auRegs[TDFT_IDX]
401#define TDFHS pThis->auRegs[TDFHS_IDX]
402#define TDFTS pThis->auRegs[TDFTS_IDX]
403#define TDFPC pThis->auRegs[TDFPC_IDX]
404#define TDBAL pThis->auRegs[TDBAL_IDX]
405#define TDBAH pThis->auRegs[TDBAH_IDX]
406#define TDLEN pThis->auRegs[TDLEN_IDX]
407#define TDH pThis->auRegs[TDH_IDX]
408#define TDT pThis->auRegs[TDT_IDX]
409#define TIDV pThis->auRegs[TIDV_IDX]
410#define TXDCTL pThis->auRegs[TXDCTL_IDX]
411#define TADV pThis->auRegs[TADV_IDX]
412#define TSPMT pThis->auRegs[TSPMT_IDX]
413#define CRCERRS pThis->auRegs[CRCERRS_IDX]
414#define ALGNERRC pThis->auRegs[ALGNERRC_IDX]
415#define SYMERRS pThis->auRegs[SYMERRS_IDX]
416#define RXERRC pThis->auRegs[RXERRC_IDX]
417#define MPC pThis->auRegs[MPC_IDX]
418#define SCC pThis->auRegs[SCC_IDX]
419#define ECOL pThis->auRegs[ECOL_IDX]
420#define MCC pThis->auRegs[MCC_IDX]
421#define LATECOL pThis->auRegs[LATECOL_IDX]
422#define COLC pThis->auRegs[COLC_IDX]
423#define DC pThis->auRegs[DC_IDX]
424#define TNCRS pThis->auRegs[TNCRS_IDX]
425/* #define SEC pThis->auRegs[SEC_IDX] Conflict with sys/time.h */
426#define CEXTERR pThis->auRegs[CEXTERR_IDX]
427#define RLEC pThis->auRegs[RLEC_IDX]
428#define XONRXC pThis->auRegs[XONRXC_IDX]
429#define XONTXC pThis->auRegs[XONTXC_IDX]
430#define XOFFRXC pThis->auRegs[XOFFRXC_IDX]
431#define XOFFTXC pThis->auRegs[XOFFTXC_IDX]
432#define FCRUC pThis->auRegs[FCRUC_IDX]
433#define PRC64 pThis->auRegs[PRC64_IDX]
434#define PRC127 pThis->auRegs[PRC127_IDX]
435#define PRC255 pThis->auRegs[PRC255_IDX]
436#define PRC511 pThis->auRegs[PRC511_IDX]
437#define PRC1023 pThis->auRegs[PRC1023_IDX]
438#define PRC1522 pThis->auRegs[PRC1522_IDX]
439#define GPRC pThis->auRegs[GPRC_IDX]
440#define BPRC pThis->auRegs[BPRC_IDX]
441#define MPRC pThis->auRegs[MPRC_IDX]
442#define GPTC pThis->auRegs[GPTC_IDX]
443#define GORCL pThis->auRegs[GORCL_IDX]
444#define GORCH pThis->auRegs[GORCH_IDX]
445#define GOTCL pThis->auRegs[GOTCL_IDX]
446#define GOTCH pThis->auRegs[GOTCH_IDX]
447#define RNBC pThis->auRegs[RNBC_IDX]
448#define RUC pThis->auRegs[RUC_IDX]
449#define RFC pThis->auRegs[RFC_IDX]
450#define ROC pThis->auRegs[ROC_IDX]
451#define RJC pThis->auRegs[RJC_IDX]
452#define MGTPRC pThis->auRegs[MGTPRC_IDX]
453#define MGTPDC pThis->auRegs[MGTPDC_IDX]
454#define MGTPTC pThis->auRegs[MGTPTC_IDX]
455#define TORL pThis->auRegs[TORL_IDX]
456#define TORH pThis->auRegs[TORH_IDX]
457#define TOTL pThis->auRegs[TOTL_IDX]
458#define TOTH pThis->auRegs[TOTH_IDX]
459#define TPR pThis->auRegs[TPR_IDX]
460#define TPT pThis->auRegs[TPT_IDX]
461#define PTC64 pThis->auRegs[PTC64_IDX]
462#define PTC127 pThis->auRegs[PTC127_IDX]
463#define PTC255 pThis->auRegs[PTC255_IDX]
464#define PTC511 pThis->auRegs[PTC511_IDX]
465#define PTC1023 pThis->auRegs[PTC1023_IDX]
466#define PTC1522 pThis->auRegs[PTC1522_IDX]
467#define MPTC pThis->auRegs[MPTC_IDX]
468#define BPTC pThis->auRegs[BPTC_IDX]
469#define TSCTC pThis->auRegs[TSCTC_IDX]
470#define TSCTFC pThis->auRegs[TSCTFC_IDX]
471#define RXCSUM pThis->auRegs[RXCSUM_IDX]
472#define WUC pThis->auRegs[WUC_IDX]
473#define WUFC pThis->auRegs[WUFC_IDX]
474#define WUS pThis->auRegs[WUS_IDX]
475#define MANC pThis->auRegs[MANC_IDX]
476#define IPAV pThis->auRegs[IPAV_IDX]
477#define WUPL pThis->auRegs[WUPL_IDX]
478/** @} */
479
480/**
481 * Indices of memory-mapped registers in register table.
482 */
483typedef enum
484{
485 CTRL_IDX,
486 STATUS_IDX,
487 EECD_IDX,
488 EERD_IDX,
489 CTRL_EXT_IDX,
490 FLA_IDX,
491 MDIC_IDX,
492 FCAL_IDX,
493 FCAH_IDX,
494 FCT_IDX,
495 VET_IDX,
496 ICR_IDX,
497 ITR_IDX,
498 ICS_IDX,
499 IMS_IDX,
500 IMC_IDX,
501 RCTL_IDX,
502 FCTTV_IDX,
503 TXCW_IDX,
504 RXCW_IDX,
505 TCTL_IDX,
506 TIPG_IDX,
507 AIFS_IDX,
508 LEDCTL_IDX,
509 PBA_IDX,
510 FCRTL_IDX,
511 FCRTH_IDX,
512 RDFH_IDX,
513 RDFT_IDX,
514 RDFHS_IDX,
515 RDFTS_IDX,
516 RDFPC_IDX,
517 RDBAL_IDX,
518 RDBAH_IDX,
519 RDLEN_IDX,
520 RDH_IDX,
521 RDT_IDX,
522 RDTR_IDX,
523 RXDCTL_IDX,
524 RADV_IDX,
525 RSRPD_IDX,
526 TXDMAC_IDX,
527 TDFH_IDX,
528 TDFT_IDX,
529 TDFHS_IDX,
530 TDFTS_IDX,
531 TDFPC_IDX,
532 TDBAL_IDX,
533 TDBAH_IDX,
534 TDLEN_IDX,
535 TDH_IDX,
536 TDT_IDX,
537 TIDV_IDX,
538 TXDCTL_IDX,
539 TADV_IDX,
540 TSPMT_IDX,
541 CRCERRS_IDX,
542 ALGNERRC_IDX,
543 SYMERRS_IDX,
544 RXERRC_IDX,
545 MPC_IDX,
546 SCC_IDX,
547 ECOL_IDX,
548 MCC_IDX,
549 LATECOL_IDX,
550 COLC_IDX,
551 DC_IDX,
552 TNCRS_IDX,
553 SEC_IDX,
554 CEXTERR_IDX,
555 RLEC_IDX,
556 XONRXC_IDX,
557 XONTXC_IDX,
558 XOFFRXC_IDX,
559 XOFFTXC_IDX,
560 FCRUC_IDX,
561 PRC64_IDX,
562 PRC127_IDX,
563 PRC255_IDX,
564 PRC511_IDX,
565 PRC1023_IDX,
566 PRC1522_IDX,
567 GPRC_IDX,
568 BPRC_IDX,
569 MPRC_IDX,
570 GPTC_IDX,
571 GORCL_IDX,
572 GORCH_IDX,
573 GOTCL_IDX,
574 GOTCH_IDX,
575 RNBC_IDX,
576 RUC_IDX,
577 RFC_IDX,
578 ROC_IDX,
579 RJC_IDX,
580 MGTPRC_IDX,
581 MGTPDC_IDX,
582 MGTPTC_IDX,
583 TORL_IDX,
584 TORH_IDX,
585 TOTL_IDX,
586 TOTH_IDX,
587 TPR_IDX,
588 TPT_IDX,
589 PTC64_IDX,
590 PTC127_IDX,
591 PTC255_IDX,
592 PTC511_IDX,
593 PTC1023_IDX,
594 PTC1522_IDX,
595 MPTC_IDX,
596 BPTC_IDX,
597 TSCTC_IDX,
598 TSCTFC_IDX,
599 RXCSUM_IDX,
600 WUC_IDX,
601 WUFC_IDX,
602 WUS_IDX,
603 MANC_IDX,
604 IPAV_IDX,
605 WUPL_IDX,
606 MTA_IDX,
607 RA_IDX,
608 VFTA_IDX,
609 IP4AT_IDX,
610 IP6AT_IDX,
611 WUPM_IDX,
612 FFLT_IDX,
613 FFMT_IDX,
614 FFVT_IDX,
615 PBM_IDX,
616 RA_82542_IDX,
617 MTA_82542_IDX,
618 VFTA_82542_IDX,
619 E1K_NUM_OF_REGS
620} E1kRegIndex;
621
622#define E1K_NUM_OF_32BIT_REGS MTA_IDX
623/** The number of registers with strictly increasing offset. */
624#define E1K_NUM_OF_BINARY_SEARCHABLE (WUPL_IDX + 1)
625
626
627/**
628 * Define E1000-specific EEPROM layout.
629 */
630struct E1kEEPROM
631{
632 public:
633 EEPROM93C46 eeprom;
634
635#ifdef IN_RING3
636 /**
637 * Initialize EEPROM content.
638 *
639 * @param macAddr MAC address of E1000.
640 */
641 void init(RTMAC &macAddr)
642 {
643 eeprom.init();
644 memcpy(eeprom.m_au16Data, macAddr.au16, sizeof(macAddr.au16));
645 eeprom.m_au16Data[0x04] = 0xFFFF;
646 /*
647 * bit 3 - full support for power management
648 * bit 10 - full duplex
649 */
650 eeprom.m_au16Data[0x0A] = 0x4408;
651 eeprom.m_au16Data[0x0B] = 0x001E;
652 eeprom.m_au16Data[0x0C] = 0x8086;
653 eeprom.m_au16Data[0x0D] = 0x100E;
654 eeprom.m_au16Data[0x0E] = 0x8086;
655 eeprom.m_au16Data[0x0F] = 0x3040;
656 eeprom.m_au16Data[0x21] = 0x7061;
657 eeprom.m_au16Data[0x22] = 0x280C;
658 eeprom.m_au16Data[0x23] = 0x00C8;
659 eeprom.m_au16Data[0x24] = 0x00C8;
660 eeprom.m_au16Data[0x2F] = 0x0602;
661 updateChecksum();
662 };
663
664 /**
665 * Compute the checksum as required by E1000 and store it
666 * in the last word.
667 */
668 void updateChecksum()
669 {
670 uint16_t u16Checksum = 0;
671
672 for (int i = 0; i < eeprom.SIZE-1; i++)
673 u16Checksum += eeprom.m_au16Data[i];
674 eeprom.m_au16Data[eeprom.SIZE-1] = 0xBABA - u16Checksum;
675 };
676
677 /**
678 * First 6 bytes of EEPROM contain MAC address.
679 *
680 * @returns MAC address of E1000.
681 */
682 void getMac(PRTMAC pMac)
683 {
684 memcpy(pMac->au16, eeprom.m_au16Data, sizeof(pMac->au16));
685 };
686
687 uint32_t read()
688 {
689 return eeprom.read();
690 }
691
692 void write(uint32_t u32Wires)
693 {
694 eeprom.write(u32Wires);
695 }
696
697 bool readWord(uint32_t u32Addr, uint16_t *pu16Value)
698 {
699 return eeprom.readWord(u32Addr, pu16Value);
700 }
701
702 int load(PSSMHANDLE pSSM)
703 {
704 return eeprom.load(pSSM);
705 }
706
707 void save(PSSMHANDLE pSSM)
708 {
709 eeprom.save(pSSM);
710 }
711#endif /* IN_RING3 */
712};
713
714
715#define E1K_SPEC_VLAN(s) (s & 0xFFF)
716#define E1K_SPEC_CFI(s) (!!((s>>12) & 0x1))
717#define E1K_SPEC_PRI(s) ((s>>13) & 0x7)
718
719struct E1kRxDStatus
720{
721 /** @name Descriptor Status field (3.2.3.1)
722 * @{ */
723 unsigned fDD : 1; /**< Descriptor Done. */
724 unsigned fEOP : 1; /**< End of packet. */
725 unsigned fIXSM : 1; /**< Ignore checksum indication. */
726 unsigned fVP : 1; /**< VLAN, matches VET. */
727 unsigned : 1;
728 unsigned fTCPCS : 1; /**< RCP Checksum calculated on the packet. */
729 unsigned fIPCS : 1; /**< IP Checksum calculated on the packet. */
730 unsigned fPIF : 1; /**< Passed in-exact filter */
731 /** @} */
732 /** @name Descriptor Errors field (3.2.3.2)
733 * (Only valid when fEOP and fDD are set.)
734 * @{ */
735 unsigned fCE : 1; /**< CRC or alignment error. */
736 unsigned : 4; /**< Reserved, varies with different models... */
737 unsigned fTCPE : 1; /**< TCP/UDP checksum error. */
738 unsigned fIPE : 1; /**< IP Checksum error. */
739 unsigned fRXE : 1; /**< RX Data error. */
740 /** @} */
741 /** @name Descriptor Special field (3.2.3.3)
742 * @{ */
743 unsigned u16Special : 16; /**< VLAN: Id, Canonical form, Priority. */
744 /** @} */
745};
746typedef struct E1kRxDStatus E1KRXDST;
747
748struct E1kRxDesc_st
749{
750 uint64_t u64BufAddr; /**< Address of data buffer */
751 uint16_t u16Length; /**< Length of data in buffer */
752 uint16_t u16Checksum; /**< Packet checksum */
753 E1KRXDST status;
754};
755typedef struct E1kRxDesc_st E1KRXDESC;
756AssertCompileSize(E1KRXDESC, 16);
757
758#define E1K_DTYP_LEGACY -1
759#define E1K_DTYP_CONTEXT 0
760#define E1K_DTYP_DATA 1
761
762struct E1kTDLegacy
763{
764 uint64_t u64BufAddr; /**< Address of data buffer */
765 struct TDLCmd_st
766 {
767 unsigned u16Length : 16;
768 unsigned u8CSO : 8;
769 /* CMD field : 8 */
770 unsigned fEOP : 1;
771 unsigned fIFCS : 1;
772 unsigned fIC : 1;
773 unsigned fRS : 1;
774 unsigned fRPS : 1;
775 unsigned fDEXT : 1;
776 unsigned fVLE : 1;
777 unsigned fIDE : 1;
778 } cmd;
779 struct TDLDw3_st
780 {
781 /* STA field */
782 unsigned fDD : 1;
783 unsigned fEC : 1;
784 unsigned fLC : 1;
785 unsigned fTURSV : 1;
786 /* RSV field */
787 unsigned u4RSV : 4;
788 /* CSS field */
789 unsigned u8CSS : 8;
790 /* Special field*/
791 unsigned u16Special: 16;
792 } dw3;
793};
794
795/**
796 * TCP/IP Context Transmit Descriptor, section 3.3.6.
797 */
798struct E1kTDContext
799{
800 struct CheckSum_st
801 {
802 /** TSE: Header start. !TSE: Checksum start. */
803 unsigned u8CSS : 8;
804 /** Checksum offset - where to store it. */
805 unsigned u8CSO : 8;
806 /** Checksum ending (inclusive) offset, 0 = end of packet. */
807 unsigned u16CSE : 16;
808 } ip;
809 struct CheckSum_st tu;
810 struct TDCDw2_st
811 {
812 /** TSE: The total number of payload bytes for this context. Sans header. */
813 unsigned u20PAYLEN : 20;
814 /** The descriptor type - E1K_DTYP_CONTEXT (0). */
815 unsigned u4DTYP : 4;
816 /** TUCMD field, 8 bits
817 * @{ */
818 /** TSE: TCP (set) or UDP (clear). */
819 unsigned fTCP : 1;
820 /** TSE: IPv4 (set) or IPv6 (clear) - for finding the payload length field in
821 * the IP header. Does not affect the checksumming.
822 * @remarks 82544GC/EI interprets a cleared field differently. */
823 unsigned fIP : 1;
824 /** TSE: TCP segmentation enable. When clear the context describes */
825 unsigned fTSE : 1;
826 /** Report status (only applies to dw3.fDD for here). */
827 unsigned fRS : 1;
828 /** Reserved, MBZ. */
829 unsigned fRSV1 : 1;
830 /** Descriptor extension, must be set for this descriptor type. */
831 unsigned fDEXT : 1;
832 /** Reserved, MBZ. */
833 unsigned fRSV2 : 1;
834 /** Interrupt delay enable. */
835 unsigned fIDE : 1;
836 /** @} */
837 } dw2;
838 struct TDCDw3_st
839 {
840 /** Descriptor Done. */
841 unsigned fDD : 1;
842 /** Reserved, MBZ. */
843 unsigned u7RSV : 7;
844 /** TSO: The header (prototype) length (Ethernet[, VLAN tag], IP, TCP/UDP. */
845 unsigned u8HDRLEN : 8;
846 /** TSO: Maximum segment size. */
847 unsigned u16MSS : 16;
848 } dw3;
849};
850typedef struct E1kTDContext E1KTXCTX;
851
852/**
853 * TCP/IP Data Transmit Descriptor, section 3.3.7.
854 */
855struct E1kTDData
856{
857 uint64_t u64BufAddr; /**< Address of data buffer */
858 struct TDDCmd_st
859 {
860 /** The total length of data pointed to by this descriptor. */
861 unsigned u20DTALEN : 20;
862 /** The descriptor type - E1K_DTYP_DATA (1). */
863 unsigned u4DTYP : 4;
864 /** @name DCMD field, 8 bits (3.3.7.1).
865 * @{ */
866 /** End of packet. Note TSCTFC update. */
867 unsigned fEOP : 1;
868 /** Insert Ethernet FCS/CRC (requires fEOP to be set). */
869 unsigned fIFCS : 1;
870 /** Use the TSE context when set and the normal when clear. */
871 unsigned fTSE : 1;
872 /** Report status (dw3.STA). */
873 unsigned fRS : 1;
874 /** Reserved. 82544GC/EI defines this report packet set (RPS). */
875 unsigned fRPS : 1;
876 /** Descriptor extension, must be set for this descriptor type. */
877 unsigned fDEXT : 1;
878 /** VLAN enable, requires CTRL.VME, auto enables FCS/CRC.
879 * Insert dw3.SPECIAL after ethernet header. */
880 unsigned fVLE : 1;
881 /** Interrupt delay enable. */
882 unsigned fIDE : 1;
883 /** @} */
884 } cmd;
885 struct TDDDw3_st
886 {
887 /** @name STA field (3.3.7.2)
888 * @{ */
889 unsigned fDD : 1; /**< Descriptor done. */
890 unsigned fEC : 1; /**< Excess collision. */
891 unsigned fLC : 1; /**< Late collision. */
892 /** Reserved, except for the usual oddball (82544GC/EI) where it's called TU. */
893 unsigned fTURSV : 1;
894 /** @} */
895 unsigned u4RSV : 4; /**< Reserved field, MBZ. */
896 /** @name POPTS (Packet Option) field (3.3.7.3)
897 * @{ */
898 unsigned fIXSM : 1; /**< Insert IP checksum. */
899 unsigned fTXSM : 1; /**< Insert TCP/UDP checksum. */
900 unsigned u6RSV : 6; /**< Reserved, MBZ. */
901 /** @} */
902 /** @name SPECIAL field - VLAN tag to be inserted after ethernet header.
903 * Requires fEOP, fVLE and CTRL.VME to be set.
904 * @{ */
905 unsigned u16Special: 16; /**< VLAN: Id, Canonical form, Priority. */
906 /** @} */
907 } dw3;
908};
909typedef struct E1kTDData E1KTXDAT;
910
911union E1kTxDesc
912{
913 struct E1kTDLegacy legacy;
914 struct E1kTDContext context;
915 struct E1kTDData data;
916};
917typedef union E1kTxDesc E1KTXDESC;
918AssertCompileSize(E1KTXDESC, 16);
919
920#define RA_CTL_AS 0x0003
921#define RA_CTL_AV 0x8000
922
923union E1kRecAddr
924{
925 uint32_t au32[32];
926 struct RAArray
927 {
928 uint8_t addr[6];
929 uint16_t ctl;
930 } array[16];
931};
932typedef struct E1kRecAddr::RAArray E1KRAELEM;
933typedef union E1kRecAddr E1KRA;
934AssertCompileSize(E1KRA, 8*16);
935
936#define E1K_IP_RF UINT16_C(0x8000) /**< reserved fragment flag */
937#define E1K_IP_DF UINT16_C(0x4000) /**< dont fragment flag */
938#define E1K_IP_MF UINT16_C(0x2000) /**< more fragments flag */
939#define E1K_IP_OFFMASK UINT16_C(0x1fff) /**< mask for fragmenting bits */
940
941/** @todo use+extend RTNETIPV4 */
942struct E1kIpHeader
943{
944 /* type of service / version / header length */
945 uint16_t tos_ver_hl;
946 /* total length */
947 uint16_t total_len;
948 /* identification */
949 uint16_t ident;
950 /* fragment offset field */
951 uint16_t offset;
952 /* time to live / protocol*/
953 uint16_t ttl_proto;
954 /* checksum */
955 uint16_t chksum;
956 /* source IP address */
957 uint32_t src;
958 /* destination IP address */
959 uint32_t dest;
960};
961AssertCompileSize(struct E1kIpHeader, 20);
962
963#define E1K_TCP_FIN UINT16_C(0x01)
964#define E1K_TCP_SYN UINT16_C(0x02)
965#define E1K_TCP_RST UINT16_C(0x04)
966#define E1K_TCP_PSH UINT16_C(0x08)
967#define E1K_TCP_ACK UINT16_C(0x10)
968#define E1K_TCP_URG UINT16_C(0x20)
969#define E1K_TCP_ECE UINT16_C(0x40)
970#define E1K_TCP_CWR UINT16_C(0x80)
971#define E1K_TCP_FLAGS UINT16_C(0x3f)
972
973/** @todo use+extend RTNETTCP */
974struct E1kTcpHeader
975{
976 uint16_t src;
977 uint16_t dest;
978 uint32_t seqno;
979 uint32_t ackno;
980 uint16_t hdrlen_flags;
981 uint16_t wnd;
982 uint16_t chksum;
983 uint16_t urgp;
984};
985AssertCompileSize(struct E1kTcpHeader, 20);
986
987
988#ifdef E1K_WITH_TXD_CACHE
989/** The current Saved state version. */
990# define E1K_SAVEDSTATE_VERSION 4
991/** Saved state version for VirtualBox 4.2 with VLAN tag fields. */
992# define E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG 3
993#else /* !E1K_WITH_TXD_CACHE */
994/** The current Saved state version. */
995# define E1K_SAVEDSTATE_VERSION 3
996#endif /* !E1K_WITH_TXD_CACHE */
997/** Saved state version for VirtualBox 4.1 and earlier.
998 * These did not include VLAN tag fields. */
999#define E1K_SAVEDSTATE_VERSION_VBOX_41 2
1000/** Saved state version for VirtualBox 3.0 and earlier.
1001 * This did not include the configuration part nor the E1kEEPROM. */
1002#define E1K_SAVEDSTATE_VERSION_VBOX_30 1
1003
1004/**
1005 * Device state structure.
1006 *
1007 * Holds the current state of device.
1008 *
1009 * @implements PDMINETWORKDOWN
1010 * @implements PDMINETWORKCONFIG
1011 * @implements PDMILEDPORTS
1012 */
1013struct E1kState_st
1014{
1015 char szPrf[8]; /**< Log prefix, e.g. E1000#1. */
1016 PDMIBASE IBase;
1017 PDMINETWORKDOWN INetworkDown;
1018 PDMINETWORKCONFIG INetworkConfig;
1019 PDMILEDPORTS ILeds; /**< LED interface */
1020 R3PTRTYPE(PPDMIBASE) pDrvBase; /**< Attached network driver. */
1021 R3PTRTYPE(PPDMILEDCONNECTORS) pLedsConnector;
1022
1023 PPDMDEVINSR3 pDevInsR3; /**< Device instance - R3. */
1024 R3PTRTYPE(PPDMQUEUE) pTxQueueR3; /**< Transmit queue - R3. */
1025 R3PTRTYPE(PPDMQUEUE) pCanRxQueueR3; /**< Rx wakeup signaller - R3. */
1026 PPDMINETWORKUPR3 pDrvR3; /**< Attached network driver - R3. */
1027 PTMTIMERR3 pRIDTimerR3; /**< Receive Interrupt Delay Timer - R3. */
1028 PTMTIMERR3 pRADTimerR3; /**< Receive Absolute Delay Timer - R3. */
1029 PTMTIMERR3 pTIDTimerR3; /**< Transmit Interrupt Delay Timer - R3. */
1030 PTMTIMERR3 pTADTimerR3; /**< Transmit Absolute Delay Timer - R3. */
1031 PTMTIMERR3 pTXDTimerR3; /**< Transmit Delay Timer - R3. */
1032 PTMTIMERR3 pIntTimerR3; /**< Late Interrupt Timer - R3. */
1033 PTMTIMERR3 pLUTimerR3; /**< Link Up(/Restore) Timer. */
1034 /** The scatter / gather buffer used for the current outgoing packet - R3. */
1035 R3PTRTYPE(PPDMSCATTERGATHER) pTxSgR3;
1036
1037 PPDMDEVINSR0 pDevInsR0; /**< Device instance - R0. */
1038 R0PTRTYPE(PPDMQUEUE) pTxQueueR0; /**< Transmit queue - R0. */
1039 R0PTRTYPE(PPDMQUEUE) pCanRxQueueR0; /**< Rx wakeup signaller - R0. */
1040 PPDMINETWORKUPR0 pDrvR0; /**< Attached network driver - R0. */
1041 PTMTIMERR0 pRIDTimerR0; /**< Receive Interrupt Delay Timer - R0. */
1042 PTMTIMERR0 pRADTimerR0; /**< Receive Absolute Delay Timer - R0. */
1043 PTMTIMERR0 pTIDTimerR0; /**< Transmit Interrupt Delay Timer - R0. */
1044 PTMTIMERR0 pTADTimerR0; /**< Transmit Absolute Delay Timer - R0. */
1045 PTMTIMERR0 pTXDTimerR0; /**< Transmit Delay Timer - R0. */
1046 PTMTIMERR0 pIntTimerR0; /**< Late Interrupt Timer - R0. */
1047 PTMTIMERR0 pLUTimerR0; /**< Link Up(/Restore) Timer - R0. */
1048 /** The scatter / gather buffer used for the current outgoing packet - R0. */
1049 R0PTRTYPE(PPDMSCATTERGATHER) pTxSgR0;
1050
1051 PPDMDEVINSRC pDevInsRC; /**< Device instance - RC. */
1052 RCPTRTYPE(PPDMQUEUE) pTxQueueRC; /**< Transmit queue - RC. */
1053 RCPTRTYPE(PPDMQUEUE) pCanRxQueueRC; /**< Rx wakeup signaller - RC. */
1054 PPDMINETWORKUPRC pDrvRC; /**< Attached network driver - RC. */
1055 PTMTIMERRC pRIDTimerRC; /**< Receive Interrupt Delay Timer - RC. */
1056 PTMTIMERRC pRADTimerRC; /**< Receive Absolute Delay Timer - RC. */
1057 PTMTIMERRC pTIDTimerRC; /**< Transmit Interrupt Delay Timer - RC. */
1058 PTMTIMERRC pTADTimerRC; /**< Transmit Absolute Delay Timer - RC. */
1059 PTMTIMERRC pTXDTimerRC; /**< Transmit Delay Timer - RC. */
1060 PTMTIMERRC pIntTimerRC; /**< Late Interrupt Timer - RC. */
1061 PTMTIMERRC pLUTimerRC; /**< Link Up(/Restore) Timer - RC. */
1062 /** The scatter / gather buffer used for the current outgoing packet - RC. */
1063 RCPTRTYPE(PPDMSCATTERGATHER) pTxSgRC;
1064 RTRCPTR RCPtrAlignment;
1065
1066#if HC_ARCH_BITS != 32
1067 uint32_t Alignment1;
1068#endif
1069 PDMCRITSECT cs; /**< Critical section - what is it protecting? */
1070 PDMCRITSECT csRx; /**< RX Critical section. */
1071#ifdef E1K_WITH_TX_CS
1072 PDMCRITSECT csTx; /**< TX Critical section. */
1073#endif /* E1K_WITH_TX_CS */
1074 /** Base address of memory-mapped registers. */
1075 RTGCPHYS addrMMReg;
1076 /** MAC address obtained from the configuration. */
1077 RTMAC macConfigured;
1078 /** Base port of I/O space region. */
1079 RTIOPORT IOPortBase;
1080 /** EMT: */
1081 PCIDEVICE pciDevice;
1082 /** EMT: Last time the interrupt was acknowledged. */
1083 uint64_t u64AckedAt;
1084 /** All: Used for eliminating spurious interrupts. */
1085 bool fIntRaised;
1086 /** EMT: false if the cable is disconnected by the GUI. */
1087 bool fCableConnected;
1088 /** EMT: */
1089 bool fR0Enabled;
1090 /** EMT: */
1091 bool fRCEnabled;
1092 /** EMT: Compute Ethernet CRC for RX packets. */
1093 bool fEthernetCRC;
1094 /** All: throttle interrupts. */
1095 bool fItrEnabled;
1096 /** All: throttle RX interrupts. */
1097 bool fItrRxEnabled;
1098
1099 bool Alignment2;
1100 /** Link up delay (in milliseconds). */
1101 uint32_t cMsLinkUpDelay;
1102
1103 /** All: Device register storage. */
1104 uint32_t auRegs[E1K_NUM_OF_32BIT_REGS];
1105 /** TX/RX: Status LED. */
1106 PDMLED led;
1107 /** TX/RX: Number of packet being sent/received to show in debug log. */
1108 uint32_t u32PktNo;
1109
1110 /** EMT: Offset of the register to be read via IO. */
1111 uint32_t uSelectedReg;
1112 /** EMT: Multicast Table Array. */
1113 uint32_t auMTA[128];
1114 /** EMT: Receive Address registers. */
1115 E1KRA aRecAddr;
1116 /** EMT: VLAN filter table array. */
1117 uint32_t auVFTA[128];
1118 /** EMT: Receive buffer size. */
1119 uint16_t u16RxBSize;
1120 /** EMT: Locked state -- no state alteration possible. */
1121 bool fLocked;
1122 /** EMT: */
1123 bool fDelayInts;
1124 /** All: */
1125 bool fIntMaskUsed;
1126
1127 /** N/A: */
1128 bool volatile fMaybeOutOfSpace;
1129 /** EMT: Gets signalled when more RX descriptors become available. */
1130 RTSEMEVENT hEventMoreRxDescAvail;
1131#ifdef E1K_WITH_RXD_CACHE
1132 /** RX: Fetched RX descriptors. */
1133 E1KRXDESC aRxDescriptors[E1K_RXD_CACHE_SIZE];
1134 //uint64_t aRxDescAddr[E1K_RXD_CACHE_SIZE];
1135 /** RX: Actual number of fetched RX descriptors. */
1136 uint32_t nRxDFetched;
1137 /** RX: Index in cache of RX descriptor being processed. */
1138 uint32_t iRxDCurrent;
1139#endif /* E1K_WITH_RXD_CACHE */
1140
1141 /** TX: Context used for TCP segmentation packets. */
1142 E1KTXCTX contextTSE;
1143 /** TX: Context used for ordinary packets. */
1144 E1KTXCTX contextNormal;
1145#ifdef E1K_WITH_TXD_CACHE
1146 /** TX: Fetched TX descriptors. */
1147 E1KTXDESC aTxDescriptors[E1K_TXD_CACHE_SIZE];
1148 /** TX: Actual number of fetched TX descriptors. */
1149 uint8_t nTxDFetched;
1150 /** TX: Index in cache of TX descriptor being processed. */
1151 uint8_t iTxDCurrent;
1152 /** TX: Will this frame be sent as GSO. */
1153 bool fGSO;
1154 /** Alignment padding. */
1155 bool fReserved;
1156 /** TX: Number of bytes in next packet. */
1157 uint32_t cbTxAlloc;
1158
1159#endif /* E1K_WITH_TXD_CACHE */
1160 /** GSO context. u8Type is set to PDMNETWORKGSOTYPE_INVALID when not
1161 * applicable to the current TSE mode. */
1162 PDMNETWORKGSO GsoCtx;
1163 /** Scratch space for holding the loopback / fallback scatter / gather
1164 * descriptor. */
1165 union
1166 {
1167 PDMSCATTERGATHER Sg;
1168 uint8_t padding[8 * sizeof(RTUINTPTR)];
1169 } uTxFallback;
1170 /** TX: Transmit packet buffer use for TSE fallback and loopback. */
1171 uint8_t aTxPacketFallback[E1K_MAX_TX_PKT_SIZE];
1172 /** TX: Number of bytes assembled in TX packet buffer. */
1173 uint16_t u16TxPktLen;
1174 /** TX: False will force segmentation in e1000 instead of sending frames as GSO. */
1175 bool fGSOEnabled;
1176 /** TX: IP checksum has to be inserted if true. */
1177 bool fIPcsum;
1178 /** TX: TCP/UDP checksum has to be inserted if true. */
1179 bool fTCPcsum;
1180 /** TX: VLAN tag has to be inserted if true. */
1181 bool fVTag;
1182 /** TX: TCI part of VLAN tag to be inserted. */
1183 uint16_t u16VTagTCI;
1184 /** TX TSE fallback: Number of payload bytes remaining in TSE context. */
1185 uint32_t u32PayRemain;
1186 /** TX TSE fallback: Number of header bytes remaining in TSE context. */
1187 uint16_t u16HdrRemain;
1188 /** TX TSE fallback: Flags from template header. */
1189 uint16_t u16SavedFlags;
1190 /** TX TSE fallback: Partial checksum from template header. */
1191 uint32_t u32SavedCsum;
1192 /** ?: Emulated controller type. */
1193 E1KCHIP eChip;
1194
1195 /** EMT: EEPROM emulation */
1196 E1kEEPROM eeprom;
1197 /** EMT: Physical interface emulation. */
1198 PHY phy;
1199
1200#if 0
1201 /** Alignment padding. */
1202 uint8_t Alignment[HC_ARCH_BITS == 64 ? 8 : 4];
1203#endif
1204
1205 STAMCOUNTER StatReceiveBytes;
1206 STAMCOUNTER StatTransmitBytes;
1207#if defined(VBOX_WITH_STATISTICS)
1208 STAMPROFILEADV StatMMIOReadRZ;
1209 STAMPROFILEADV StatMMIOReadR3;
1210 STAMPROFILEADV StatMMIOWriteRZ;
1211 STAMPROFILEADV StatMMIOWriteR3;
1212 STAMPROFILEADV StatEEPROMRead;
1213 STAMPROFILEADV StatEEPROMWrite;
1214 STAMPROFILEADV StatIOReadRZ;
1215 STAMPROFILEADV StatIOReadR3;
1216 STAMPROFILEADV StatIOWriteRZ;
1217 STAMPROFILEADV StatIOWriteR3;
1218 STAMPROFILEADV StatLateIntTimer;
1219 STAMCOUNTER StatLateInts;
1220 STAMCOUNTER StatIntsRaised;
1221 STAMCOUNTER StatIntsPrevented;
1222 STAMPROFILEADV StatReceive;
1223 STAMPROFILEADV StatReceiveCRC;
1224 STAMPROFILEADV StatReceiveFilter;
1225 STAMPROFILEADV StatReceiveStore;
1226 STAMPROFILEADV StatTransmitRZ;
1227 STAMPROFILEADV StatTransmitR3;
1228 STAMPROFILE StatTransmitSendRZ;
1229 STAMPROFILE StatTransmitSendR3;
1230 STAMPROFILE StatRxOverflow;
1231 STAMCOUNTER StatRxOverflowWakeup;
1232 STAMCOUNTER StatTxDescCtxNormal;
1233 STAMCOUNTER StatTxDescCtxTSE;
1234 STAMCOUNTER StatTxDescLegacy;
1235 STAMCOUNTER StatTxDescData;
1236 STAMCOUNTER StatTxDescTSEData;
1237 STAMCOUNTER StatTxPathFallback;
1238 STAMCOUNTER StatTxPathGSO;
1239 STAMCOUNTER StatTxPathRegular;
1240 STAMCOUNTER StatPHYAccesses;
1241 STAMCOUNTER aStatRegWrites[E1K_NUM_OF_REGS];
1242 STAMCOUNTER aStatRegReads[E1K_NUM_OF_REGS];
1243#endif /* VBOX_WITH_STATISTICS */
1244
1245#ifdef E1K_INT_STATS
1246 /* Internal stats */
1247 uint64_t u64ArmedAt;
1248 uint64_t uStatMaxTxDelay;
1249 uint32_t uStatInt;
1250 uint32_t uStatIntTry;
1251 uint32_t uStatIntLower;
1252 uint32_t uStatIntDly;
1253 int32_t iStatIntLost;
1254 int32_t iStatIntLostOne;
1255 uint32_t uStatDisDly;
1256 uint32_t uStatIntSkip;
1257 uint32_t uStatIntLate;
1258 uint32_t uStatIntMasked;
1259 uint32_t uStatIntEarly;
1260 uint32_t uStatIntRx;
1261 uint32_t uStatIntTx;
1262 uint32_t uStatIntICS;
1263 uint32_t uStatIntRDTR;
1264 uint32_t uStatIntRXDMT0;
1265 uint32_t uStatIntTXQE;
1266 uint32_t uStatTxNoRS;
1267 uint32_t uStatTxIDE;
1268 uint32_t uStatTxDelayed;
1269 uint32_t uStatTxDelayExp;
1270 uint32_t uStatTAD;
1271 uint32_t uStatTID;
1272 uint32_t uStatRAD;
1273 uint32_t uStatRID;
1274 uint32_t uStatRxFrm;
1275 uint32_t uStatTxFrm;
1276 uint32_t uStatDescCtx;
1277 uint32_t uStatDescDat;
1278 uint32_t uStatDescLeg;
1279 uint32_t uStatTx1514;
1280 uint32_t uStatTx2962;
1281 uint32_t uStatTx4410;
1282 uint32_t uStatTx5858;
1283 uint32_t uStatTx7306;
1284 uint32_t uStatTx8754;
1285 uint32_t uStatTx16384;
1286 uint32_t uStatTx32768;
1287 uint32_t uStatTxLarge;
1288 uint32_t uStatAlign;
1289#endif /* E1K_INT_STATS */
1290};
1291typedef struct E1kState_st E1KSTATE;
1292/** Pointer to the E1000 device state. */
1293typedef E1KSTATE *PE1KSTATE;
1294
1295#ifndef VBOX_DEVICE_STRUCT_TESTCASE
1296
1297/* Forward declarations ******************************************************/
1298static int e1kXmitPending(PE1KSTATE pThis, bool fOnWorkerThread);
1299
1300static int e1kRegReadUnimplemented (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1301static int e1kRegWriteUnimplemented(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1302static int e1kRegReadAutoClear (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1303static int e1kRegReadDefault (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1304static int e1kRegWriteDefault (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1305#if 0 /* unused */
1306static int e1kRegReadCTRL (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1307#endif
1308static int e1kRegWriteCTRL (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1309static int e1kRegReadEECD (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1310static int e1kRegWriteEECD (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1311static int e1kRegWriteEERD (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1312static int e1kRegWriteMDIC (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1313static int e1kRegReadICR (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1314static int e1kRegWriteICR (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1315static int e1kRegWriteICS (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1316static int e1kRegWriteIMS (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1317static int e1kRegWriteIMC (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1318static int e1kRegWriteRCTL (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1319static int e1kRegWritePBA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1320static int e1kRegWriteRDT (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1321static int e1kRegWriteRDTR (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1322static int e1kRegWriteTDT (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1323static int e1kRegReadMTA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1324static int e1kRegWriteMTA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1325static int e1kRegReadRA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1326static int e1kRegWriteRA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1327static int e1kRegReadVFTA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1328static int e1kRegWriteVFTA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1329
1330/**
1331 * Register map table.
1332 *
1333 * Override pfnRead and pfnWrite to get register-specific behavior.
1334 */
1335static const struct E1kRegMap_st
1336{
1337 /** Register offset in the register space. */
1338 uint32_t offset;
1339 /** Size in bytes. Registers of size > 4 are in fact tables. */
1340 uint32_t size;
1341 /** Readable bits. */
1342 uint32_t readable;
1343 /** Writable bits. */
1344 uint32_t writable;
1345 /** Read callback. */
1346 int (*pfnRead)(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1347 /** Write callback. */
1348 int (*pfnWrite)(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1349 /** Abbreviated name. */
1350 const char *abbrev;
1351 /** Full name. */
1352 const char *name;
1353} g_aE1kRegMap[E1K_NUM_OF_REGS] =
1354{
1355 /* offset size read mask write mask read callback write callback abbrev full name */
1356 /*------- ------- ---------- ---------- ----------------------- ------------------------ ---------- ------------------------------*/
1357 { 0x00000, 0x00004, 0xDBF31BE9, 0xDBF31BE9, e1kRegReadDefault , e1kRegWriteCTRL , "CTRL" , "Device Control" },
1358 { 0x00008, 0x00004, 0x0000FDFF, 0x00000000, e1kRegReadDefault , e1kRegWriteUnimplemented, "STATUS" , "Device Status" },
1359 { 0x00010, 0x00004, 0x000027F0, 0x00000070, e1kRegReadEECD , e1kRegWriteEECD , "EECD" , "EEPROM/Flash Control/Data" },
1360 { 0x00014, 0x00004, 0xFFFFFF10, 0xFFFFFF00, e1kRegReadDefault , e1kRegWriteEERD , "EERD" , "EEPROM Read" },
1361 { 0x00018, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CTRL_EXT", "Extended Device Control" },
1362 { 0x0001c, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FLA" , "Flash Access (N/A)" },
1363 { 0x00020, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteMDIC , "MDIC" , "MDI Control" },
1364 { 0x00028, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCAL" , "Flow Control Address Low" },
1365 { 0x0002c, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCAH" , "Flow Control Address High" },
1366 { 0x00030, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCT" , "Flow Control Type" },
1367 { 0x00038, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "VET" , "VLAN EtherType" },
1368 { 0x000c0, 0x00004, 0x0001F6DF, 0x0001F6DF, e1kRegReadICR , e1kRegWriteICR , "ICR" , "Interrupt Cause Read" },
1369 { 0x000c4, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "ITR" , "Interrupt Throttling" },
1370 { 0x000c8, 0x00004, 0x00000000, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteICS , "ICS" , "Interrupt Cause Set" },
1371 { 0x000d0, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteIMS , "IMS" , "Interrupt Mask Set/Read" },
1372 { 0x000d8, 0x00004, 0x00000000, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteIMC , "IMC" , "Interrupt Mask Clear" },
1373 { 0x00100, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteRCTL , "RCTL" , "Receive Control" },
1374 { 0x00170, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCTTV" , "Flow Control Transmit Timer Value" },
1375 { 0x00178, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TXCW" , "Transmit Configuration Word (N/A)" },
1376 { 0x00180, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXCW" , "Receive Configuration Word (N/A)" },
1377 { 0x00400, 0x00004, 0x017FFFFA, 0x017FFFFA, e1kRegReadDefault , e1kRegWriteDefault , "TCTL" , "Transmit Control" },
1378 { 0x00410, 0x00004, 0x3FFFFFFF, 0x3FFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TIPG" , "Transmit IPG" },
1379 { 0x00458, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "AIFS" , "Adaptive IFS Throttle - AIT" },
1380 { 0x00e00, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "LEDCTL" , "LED Control" },
1381 { 0x01000, 0x00004, 0xFFFF007F, 0x0000007F, e1kRegReadDefault , e1kRegWritePBA , "PBA" , "Packet Buffer Allocation" },
1382 { 0x02160, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRTL" , "Flow Control Receive Threshold Low" },
1383 { 0x02168, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRTH" , "Flow Control Receive Threshold High" },
1384 { 0x02410, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFH" , "Receive Data FIFO Head" },
1385 { 0x02418, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFT" , "Receive Data FIFO Tail" },
1386 { 0x02420, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFHS" , "Receive Data FIFO Head Saved Register" },
1387 { 0x02428, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFTS" , "Receive Data FIFO Tail Saved Register" },
1388 { 0x02430, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFPC" , "Receive Data FIFO Packet Count" },
1389 { 0x02800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDBAL" , "Receive Descriptor Base Low" },
1390 { 0x02804, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDBAH" , "Receive Descriptor Base High" },
1391 { 0x02808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDLEN" , "Receive Descriptor Length" },
1392 { 0x02810, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDH" , "Receive Descriptor Head" },
1393 { 0x02818, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteRDT , "RDT" , "Receive Descriptor Tail" },
1394 { 0x02820, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteRDTR , "RDTR" , "Receive Delay Timer" },
1395 { 0x02828, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXDCTL" , "Receive Descriptor Control" },
1396 { 0x0282c, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "RADV" , "Receive Interrupt Absolute Delay Timer" },
1397 { 0x02c00, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RSRPD" , "Receive Small Packet Detect Interrupt" },
1398 { 0x03000, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TXDMAC" , "TX DMA Control (N/A)" },
1399 { 0x03410, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFH" , "Transmit Data FIFO Head" },
1400 { 0x03418, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFT" , "Transmit Data FIFO Tail" },
1401 { 0x03420, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFHS" , "Transmit Data FIFO Head Saved Register" },
1402 { 0x03428, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFTS" , "Transmit Data FIFO Tail Saved Register" },
1403 { 0x03430, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFPC" , "Transmit Data FIFO Packet Count" },
1404 { 0x03800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDBAL" , "Transmit Descriptor Base Low" },
1405 { 0x03804, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDBAH" , "Transmit Descriptor Base High" },
1406 { 0x03808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDLEN" , "Transmit Descriptor Length" },
1407 { 0x03810, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDH" , "Transmit Descriptor Head" },
1408 { 0x03818, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteTDT , "TDT" , "Transmit Descriptor Tail" },
1409 { 0x03820, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TIDV" , "Transmit Interrupt Delay Value" },
1410 { 0x03828, 0x00004, 0xFF3F3F3F, 0xFF3F3F3F, e1kRegReadDefault , e1kRegWriteDefault , "TXDCTL" , "Transmit Descriptor Control" },
1411 { 0x0382c, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TADV" , "Transmit Absolute Interrupt Delay Timer" },
1412 { 0x03830, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TSPMT" , "TCP Segmentation Pad and Threshold" },
1413 { 0x04000, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CRCERRS" , "CRC Error Count" },
1414 { 0x04004, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "ALGNERRC", "Alignment Error Count" },
1415 { 0x04008, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SYMERRS" , "Symbol Error Count" },
1416 { 0x0400c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXERRC" , "RX Error Count" },
1417 { 0x04010, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MPC" , "Missed Packets Count" },
1418 { 0x04014, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SCC" , "Single Collision Count" },
1419 { 0x04018, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "ECOL" , "Excessive Collisions Count" },
1420 { 0x0401c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MCC" , "Multiple Collision Count" },
1421 { 0x04020, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "LATECOL" , "Late Collisions Count" },
1422 { 0x04028, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "COLC" , "Collision Count" },
1423 { 0x04030, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "DC" , "Defer Count" },
1424 { 0x04034, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TNCRS" , "Transmit - No CRS" },
1425 { 0x04038, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SEC" , "Sequence Error Count" },
1426 { 0x0403c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CEXTERR" , "Carrier Extension Error Count" },
1427 { 0x04040, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RLEC" , "Receive Length Error Count" },
1428 { 0x04048, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XONRXC" , "XON Received Count" },
1429 { 0x0404c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XONTXC" , "XON Transmitted Count" },
1430 { 0x04050, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XOFFRXC" , "XOFF Received Count" },
1431 { 0x04054, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XOFFTXC" , "XOFF Transmitted Count" },
1432 { 0x04058, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRUC" , "FC Received Unsupported Count" },
1433 { 0x0405c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC64" , "Packets Received (64 Bytes) Count" },
1434 { 0x04060, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC127" , "Packets Received (65-127 Bytes) Count" },
1435 { 0x04064, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC255" , "Packets Received (128-255 Bytes) Count" },
1436 { 0x04068, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC511" , "Packets Received (256-511 Bytes) Count" },
1437 { 0x0406c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC1023" , "Packets Received (512-1023 Bytes) Count" },
1438 { 0x04070, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC1522" , "Packets Received (1024-Max Bytes)" },
1439 { 0x04074, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GPRC" , "Good Packets Received Count" },
1440 { 0x04078, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "BPRC" , "Broadcast Packets Received Count" },
1441 { 0x0407c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "MPRC" , "Multicast Packets Received Count" },
1442 { 0x04080, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GPTC" , "Good Packets Transmitted Count" },
1443 { 0x04088, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GORCL" , "Good Octets Received Count (Low)" },
1444 { 0x0408c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GORCH" , "Good Octets Received Count (Hi)" },
1445 { 0x04090, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GOTCL" , "Good Octets Transmitted Count (Low)" },
1446 { 0x04094, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GOTCH" , "Good Octets Transmitted Count (Hi)" },
1447 { 0x040a0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RNBC" , "Receive No Buffers Count" },
1448 { 0x040a4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RUC" , "Receive Undersize Count" },
1449 { 0x040a8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RFC" , "Receive Fragment Count" },
1450 { 0x040ac, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "ROC" , "Receive Oversize Count" },
1451 { 0x040b0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RJC" , "Receive Jabber Count" },
1452 { 0x040b4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPRC" , "Management Packets Received Count" },
1453 { 0x040b8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPDC" , "Management Packets Dropped Count" },
1454 { 0x040bc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPTC" , "Management Pkts Transmitted Count" },
1455 { 0x040c0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TORL" , "Total Octets Received (Lo)" },
1456 { 0x040c4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TORH" , "Total Octets Received (Hi)" },
1457 { 0x040c8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TOTL" , "Total Octets Transmitted (Lo)" },
1458 { 0x040cc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TOTH" , "Total Octets Transmitted (Hi)" },
1459 { 0x040d0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TPR" , "Total Packets Received" },
1460 { 0x040d4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TPT" , "Total Packets Transmitted" },
1461 { 0x040d8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC64" , "Packets Transmitted (64 Bytes) Count" },
1462 { 0x040dc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC127" , "Packets Transmitted (65-127 Bytes) Count" },
1463 { 0x040e0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC255" , "Packets Transmitted (128-255 Bytes) Count" },
1464 { 0x040e4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC511" , "Packets Transmitted (256-511 Bytes) Count" },
1465 { 0x040e8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC1023" , "Packets Transmitted (512-1023 Bytes) Count" },
1466 { 0x040ec, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC1522" , "Packets Transmitted (1024 Bytes or Greater) Count" },
1467 { 0x040f0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "MPTC" , "Multicast Packets Transmitted Count" },
1468 { 0x040f4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "BPTC" , "Broadcast Packets Transmitted Count" },
1469 { 0x040f8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TSCTC" , "TCP Segmentation Context Transmitted Count" },
1470 { 0x040fc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TSCTFC" , "TCP Segmentation Context Tx Fail Count" },
1471 { 0x05000, 0x00004, 0x000007FF, 0x000007FF, e1kRegReadDefault , e1kRegWriteDefault , "RXCSUM" , "Receive Checksum Control" },
1472 { 0x05800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUC" , "Wakeup Control" },
1473 { 0x05808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUFC" , "Wakeup Filter Control" },
1474 { 0x05810, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUS" , "Wakeup Status" },
1475 { 0x05820, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "MANC" , "Management Control" },
1476 { 0x05838, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IPAV" , "IP Address Valid" },
1477 { 0x05900, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUPL" , "Wakeup Packet Length" },
1478 { 0x05200, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadMTA , e1kRegWriteMTA , "MTA" , "Multicast Table Array (n)" },
1479 { 0x05400, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadRA , e1kRegWriteRA , "RA" , "Receive Address (64-bit) (n)" },
1480 { 0x05600, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadVFTA , e1kRegWriteVFTA , "VFTA" , "VLAN Filter Table Array (n)" },
1481 { 0x05840, 0x0001c, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IP4AT" , "IPv4 Address Table" },
1482 { 0x05880, 0x00010, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IP6AT" , "IPv6 Address Table" },
1483 { 0x05a00, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUPM" , "Wakeup Packet Memory" },
1484 { 0x05f00, 0x0001c, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFLT" , "Flexible Filter Length Table" },
1485 { 0x09000, 0x003fc, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFMT" , "Flexible Filter Mask Table" },
1486 { 0x09800, 0x003fc, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFVT" , "Flexible Filter Value Table" },
1487 { 0x10000, 0x10000, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "PBM" , "Packet Buffer Memory (n)" },
1488 { 0x00040, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadRA , e1kRegWriteRA , "RA82542" , "Receive Address (64-bit) (n) (82542)" },
1489 { 0x00200, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadMTA , e1kRegWriteMTA , "MTA82542", "Multicast Table Array (n) (82542)" },
1490 { 0x00600, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadVFTA , e1kRegWriteVFTA , "VFTA82542", "VLAN Filter Table Array (n) (82542)" }
1491};
1492
1493#ifdef LOG_ENABLED
1494
1495/**
1496 * Convert U32 value to hex string. Masked bytes are replaced with dots.
1497 *
1498 * @remarks The mask has byte (not bit) granularity (e.g. 000000FF).
1499 *
1500 * @returns The buffer.
1501 *
1502 * @param u32 The word to convert into string.
1503 * @param mask Selects which bytes to convert.
1504 * @param buf Where to put the result.
1505 */
1506static char *e1kU32toHex(uint32_t u32, uint32_t mask, char *buf)
1507{
1508 for (char *ptr = buf + 7; ptr >= buf; --ptr, u32 >>=4, mask >>=4)
1509 {
1510 if (mask & 0xF)
1511 *ptr = (u32 & 0xF) + ((u32 & 0xF) > 9 ? '7' : '0');
1512 else
1513 *ptr = '.';
1514 }
1515 buf[8] = 0;
1516 return buf;
1517}
1518
1519/**
1520 * Returns timer name for debug purposes.
1521 *
1522 * @returns The timer name.
1523 *
1524 * @param pThis The device state structure.
1525 * @param pTimer The timer to get the name for.
1526 */
1527DECLINLINE(const char *) e1kGetTimerName(PE1KSTATE pThis, PTMTIMER pTimer)
1528{
1529 if (pTimer == pThis->CTX_SUFF(pTIDTimer))
1530 return "TID";
1531 if (pTimer == pThis->CTX_SUFF(pTADTimer))
1532 return "TAD";
1533 if (pTimer == pThis->CTX_SUFF(pRIDTimer))
1534 return "RID";
1535 if (pTimer == pThis->CTX_SUFF(pRADTimer))
1536 return "RAD";
1537 if (pTimer == pThis->CTX_SUFF(pIntTimer))
1538 return "Int";
1539 if (pTimer == pThis->CTX_SUFF(pTXDTimer))
1540 return "TXD";
1541 if (pTimer == pThis->CTX_SUFF(pLUTimer))
1542 return "LinkUp";
1543 return "unknown";
1544}
1545
1546#endif /* DEBUG */
1547
1548/**
1549 * Arm a timer.
1550 *
1551 * @param pThis Pointer to the device state structure.
1552 * @param pTimer Pointer to the timer.
1553 * @param uExpireIn Expiration interval in microseconds.
1554 */
1555DECLINLINE(void) e1kArmTimer(PE1KSTATE pThis, PTMTIMER pTimer, uint32_t uExpireIn)
1556{
1557 if (pThis->fLocked)
1558 return;
1559
1560 E1kLog2(("%s Arming %s timer to fire in %d usec...\n",
1561 pThis->szPrf, e1kGetTimerName(pThis, pTimer), uExpireIn));
1562 TMTimerSetMicro(pTimer, uExpireIn);
1563}
1564
1565#ifdef IN_RING3
1566/**
1567 * Cancel a timer.
1568 *
1569 * @param pThis Pointer to the device state structure.
1570 * @param pTimer Pointer to the timer.
1571 */
1572DECLINLINE(void) e1kCancelTimer(PE1KSTATE pThis, PTMTIMER pTimer)
1573{
1574 E1kLog2(("%s Stopping %s timer...\n",
1575 pThis->szPrf, e1kGetTimerName(pThis, pTimer)));
1576 int rc = TMTimerStop(pTimer);
1577 if (RT_FAILURE(rc))
1578 E1kLog2(("%s e1kCancelTimer: TMTimerStop() failed with %Rrc\n",
1579 pThis->szPrf, rc));
1580 RT_NOREF1(pThis);
1581}
1582#endif /* IN_RING3 */
1583
1584#define e1kCsEnter(ps, rc) PDMCritSectEnter(&ps->cs, rc)
1585#define e1kCsLeave(ps) PDMCritSectLeave(&ps->cs)
1586
1587#define e1kCsRxEnter(ps, rc) PDMCritSectEnter(&ps->csRx, rc)
1588#define e1kCsRxLeave(ps) PDMCritSectLeave(&ps->csRx)
1589#define e1kCsRxIsOwner(ps) PDMCritSectIsOwner(&ps->csRx)
1590
1591#ifndef E1K_WITH_TX_CS
1592# define e1kCsTxEnter(ps, rc) VINF_SUCCESS
1593# define e1kCsTxLeave(ps) do { } while (0)
1594#else /* E1K_WITH_TX_CS */
1595# define e1kCsTxEnter(ps, rc) PDMCritSectEnter(&ps->csTx, rc)
1596# define e1kCsTxLeave(ps) PDMCritSectLeave(&ps->csTx)
1597#endif /* E1K_WITH_TX_CS */
1598
1599#ifdef IN_RING3
1600
1601/**
1602 * Wakeup the RX thread.
1603 */
1604static void e1kWakeupReceive(PPDMDEVINS pDevIns)
1605{
1606 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
1607 if ( pThis->fMaybeOutOfSpace
1608 && pThis->hEventMoreRxDescAvail != NIL_RTSEMEVENT)
1609 {
1610 STAM_COUNTER_INC(&pThis->StatRxOverflowWakeup);
1611 E1kLog(("%s Waking up Out-of-RX-space semaphore\n", pThis->szPrf));
1612 RTSemEventSignal(pThis->hEventMoreRxDescAvail);
1613 }
1614}
1615
1616/**
1617 * Hardware reset. Revert all registers to initial values.
1618 *
1619 * @param pThis The device state structure.
1620 */
1621static void e1kHardReset(PE1KSTATE pThis)
1622{
1623 E1kLog(("%s Hard reset triggered\n", pThis->szPrf));
1624 memset(pThis->auRegs, 0, sizeof(pThis->auRegs));
1625 memset(pThis->aRecAddr.au32, 0, sizeof(pThis->aRecAddr.au32));
1626#ifdef E1K_INIT_RA0
1627 memcpy(pThis->aRecAddr.au32, pThis->macConfigured.au8,
1628 sizeof(pThis->macConfigured.au8));
1629 pThis->aRecAddr.array[0].ctl |= RA_CTL_AV;
1630#endif /* E1K_INIT_RA0 */
1631 STATUS = 0x0081; /* SPEED=10b (1000 Mb/s), FD=1b (Full Duplex) */
1632 EECD = 0x0100; /* EE_PRES=1b (EEPROM present) */
1633 CTRL = 0x0a09; /* FRCSPD=1b SPEED=10b LRST=1b FD=1b */
1634 TSPMT = 0x01000400;/* TSMT=0400h TSPBP=0100h */
1635 Assert(GET_BITS(RCTL, BSIZE) == 0);
1636 pThis->u16RxBSize = 2048;
1637
1638 /* Reset promiscuous mode */
1639 if (pThis->pDrvR3)
1640 pThis->pDrvR3->pfnSetPromiscuousMode(pThis->pDrvR3, false);
1641
1642#ifdef E1K_WITH_TXD_CACHE
1643 int rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
1644 if (RT_LIKELY(rc == VINF_SUCCESS))
1645 {
1646 pThis->nTxDFetched = 0;
1647 pThis->iTxDCurrent = 0;
1648 pThis->fGSO = false;
1649 pThis->cbTxAlloc = 0;
1650 e1kCsTxLeave(pThis);
1651 }
1652#endif /* E1K_WITH_TXD_CACHE */
1653#ifdef E1K_WITH_RXD_CACHE
1654 if (RT_LIKELY(e1kCsRxEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
1655 {
1656 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
1657 e1kCsRxLeave(pThis);
1658 }
1659#endif /* E1K_WITH_RXD_CACHE */
1660}
1661
1662#endif /* IN_RING3 */
1663
1664/**
1665 * Compute Internet checksum.
1666 *
1667 * @remarks Refer to http://www.netfor2.com/checksum.html for short intro.
1668 *
1669 * @param pThis The device state structure.
1670 * @param cpPacket The packet.
1671 * @param cb The size of the packet.
1672 * @param pszText A string denoting direction of packet transfer.
1673 *
1674 * @return The 1's complement of the 1's complement sum.
1675 *
1676 * @thread E1000_TX
1677 */
1678static uint16_t e1kCSum16(const void *pvBuf, size_t cb)
1679{
1680 uint32_t csum = 0;
1681 uint16_t *pu16 = (uint16_t *)pvBuf;
1682
1683 while (cb > 1)
1684 {
1685 csum += *pu16++;
1686 cb -= 2;
1687 }
1688 if (cb)
1689 csum += *(uint8_t*)pu16;
1690 while (csum >> 16)
1691 csum = (csum >> 16) + (csum & 0xFFFF);
1692 return ~csum;
1693}
1694
1695/**
1696 * Dump a packet to debug log.
1697 *
1698 * @param pThis The device state structure.
1699 * @param cpPacket The packet.
1700 * @param cb The size of the packet.
1701 * @param pszText A string denoting direction of packet transfer.
1702 * @thread E1000_TX
1703 */
1704DECLINLINE(void) e1kPacketDump(PE1KSTATE pThis, const uint8_t *cpPacket, size_t cb, const char *pszText)
1705{
1706#ifdef DEBUG
1707 if (RT_LIKELY(e1kCsEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
1708 {
1709 Log4(("%s --- %s packet #%d: %RTmac => %RTmac (%d bytes) ---\n",
1710 pThis->szPrf, pszText, ++pThis->u32PktNo, cpPacket+6, cpPacket, cb));
1711 if (ntohs(*(uint16_t*)(cpPacket+12)) == 0x86DD)
1712 {
1713 Log4(("%s --- IPv6: %RTnaipv6 => %RTnaipv6\n",
1714 pThis->szPrf, cpPacket+14+8, cpPacket+14+24));
1715 if (*(cpPacket+14+6) == 0x6)
1716 Log4(("%s --- TCP: seq=%x ack=%x\n", pThis->szPrf,
1717 ntohl(*(uint32_t*)(cpPacket+14+40+4)), ntohl(*(uint32_t*)(cpPacket+14+40+8))));
1718 }
1719 else if (ntohs(*(uint16_t*)(cpPacket+12)) == 0x800)
1720 {
1721 Log4(("%s --- IPv4: %RTnaipv4 => %RTnaipv4\n",
1722 pThis->szPrf, *(uint32_t*)(cpPacket+14+12), *(uint32_t*)(cpPacket+14+16)));
1723 if (*(cpPacket+14+6) == 0x6)
1724 Log4(("%s --- TCP: seq=%x ack=%x\n", pThis->szPrf,
1725 ntohl(*(uint32_t*)(cpPacket+14+20+4)), ntohl(*(uint32_t*)(cpPacket+14+20+8))));
1726 }
1727 E1kLog3(("%.*Rhxd\n", cb, cpPacket));
1728 e1kCsLeave(pThis);
1729 }
1730#else
1731 if (RT_LIKELY(e1kCsEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
1732 {
1733 if (ntohs(*(uint16_t*)(cpPacket+12)) == 0x86DD)
1734 E1kLogRel(("E1000: %s packet #%d, %RTmac => %RTmac, %RTnaipv6 => %RTnaipv6, seq=%x ack=%x\n",
1735 pszText, ++pThis->u32PktNo, cpPacket+6, cpPacket, cpPacket+14+8, cpPacket+14+24,
1736 ntohl(*(uint32_t*)(cpPacket+14+40+4)), ntohl(*(uint32_t*)(cpPacket+14+40+8))));
1737 else
1738 E1kLogRel(("E1000: %s packet #%d, %RTmac => %RTmac, %RTnaipv4 => %RTnaipv4, seq=%x ack=%x\n",
1739 pszText, ++pThis->u32PktNo, cpPacket+6, cpPacket,
1740 *(uint32_t*)(cpPacket+14+12), *(uint32_t*)(cpPacket+14+16),
1741 ntohl(*(uint32_t*)(cpPacket+14+20+4)), ntohl(*(uint32_t*)(cpPacket+14+20+8))));
1742 e1kCsLeave(pThis);
1743 }
1744 RT_NOREF2(cb, pszText);
1745#endif
1746}
1747
1748/**
1749 * Determine the type of transmit descriptor.
1750 *
1751 * @returns Descriptor type. See E1K_DTYP_XXX defines.
1752 *
1753 * @param pDesc Pointer to descriptor union.
1754 * @thread E1000_TX
1755 */
1756DECLINLINE(int) e1kGetDescType(E1KTXDESC *pDesc)
1757{
1758 if (pDesc->legacy.cmd.fDEXT)
1759 return pDesc->context.dw2.u4DTYP;
1760 return E1K_DTYP_LEGACY;
1761}
1762
1763
1764#if defined(E1K_WITH_RXD_CACHE) && defined(IN_RING3) /* currently only used in ring-3 due to stack space requirements of the caller */
1765/**
1766 * Dump receive descriptor to debug log.
1767 *
1768 * @param pThis The device state structure.
1769 * @param pDesc Pointer to the descriptor.
1770 * @thread E1000_RX
1771 */
1772static void e1kPrintRDesc(PE1KSTATE pThis, E1KRXDESC *pDesc)
1773{
1774 RT_NOREF2(pThis, pDesc);
1775 E1kLog2(("%s <-- Receive Descriptor (%d bytes):\n", pThis->szPrf, pDesc->u16Length));
1776 E1kLog2((" Address=%16LX Length=%04X Csum=%04X\n",
1777 pDesc->u64BufAddr, pDesc->u16Length, pDesc->u16Checksum));
1778 E1kLog2((" STA: %s %s %s %s %s %s %s ERR: %s %s %s %s SPECIAL: %s VLAN=%03x PRI=%x\n",
1779 pDesc->status.fPIF ? "PIF" : "pif",
1780 pDesc->status.fIPCS ? "IPCS" : "ipcs",
1781 pDesc->status.fTCPCS ? "TCPCS" : "tcpcs",
1782 pDesc->status.fVP ? "VP" : "vp",
1783 pDesc->status.fIXSM ? "IXSM" : "ixsm",
1784 pDesc->status.fEOP ? "EOP" : "eop",
1785 pDesc->status.fDD ? "DD" : "dd",
1786 pDesc->status.fRXE ? "RXE" : "rxe",
1787 pDesc->status.fIPE ? "IPE" : "ipe",
1788 pDesc->status.fTCPE ? "TCPE" : "tcpe",
1789 pDesc->status.fCE ? "CE" : "ce",
1790 E1K_SPEC_CFI(pDesc->status.u16Special) ? "CFI" :"cfi",
1791 E1K_SPEC_VLAN(pDesc->status.u16Special),
1792 E1K_SPEC_PRI(pDesc->status.u16Special)));
1793}
1794#endif /* E1K_WITH_RXD_CACHE && IN_RING3 */
1795
1796/**
1797 * Dump transmit descriptor to debug log.
1798 *
1799 * @param pThis The device state structure.
1800 * @param pDesc Pointer to descriptor union.
1801 * @param pszDir A string denoting direction of descriptor transfer
1802 * @thread E1000_TX
1803 */
1804static void e1kPrintTDesc(PE1KSTATE pThis, E1KTXDESC *pDesc, const char *pszDir,
1805 unsigned uLevel = RTLOGGRPFLAGS_LEVEL_2)
1806{
1807 RT_NOREF4(pThis, pDesc, pszDir, uLevel);
1808
1809 /*
1810 * Unfortunately we cannot use our format handler here, we want R0 logging
1811 * as well.
1812 */
1813 switch (e1kGetDescType(pDesc))
1814 {
1815 case E1K_DTYP_CONTEXT:
1816 E1kLogX(uLevel, ("%s %s Context Transmit Descriptor %s\n",
1817 pThis->szPrf, pszDir, pszDir));
1818 E1kLogX(uLevel, (" IPCSS=%02X IPCSO=%02X IPCSE=%04X TUCSS=%02X TUCSO=%02X TUCSE=%04X\n",
1819 pDesc->context.ip.u8CSS, pDesc->context.ip.u8CSO, pDesc->context.ip.u16CSE,
1820 pDesc->context.tu.u8CSS, pDesc->context.tu.u8CSO, pDesc->context.tu.u16CSE));
1821 E1kLogX(uLevel, (" TUCMD:%s%s%s %s %s PAYLEN=%04x HDRLEN=%04x MSS=%04x STA: %s\n",
1822 pDesc->context.dw2.fIDE ? " IDE":"",
1823 pDesc->context.dw2.fRS ? " RS" :"",
1824 pDesc->context.dw2.fTSE ? " TSE":"",
1825 pDesc->context.dw2.fIP ? "IPv4":"IPv6",
1826 pDesc->context.dw2.fTCP ? "TCP":"UDP",
1827 pDesc->context.dw2.u20PAYLEN,
1828 pDesc->context.dw3.u8HDRLEN,
1829 pDesc->context.dw3.u16MSS,
1830 pDesc->context.dw3.fDD?"DD":""));
1831 break;
1832 case E1K_DTYP_DATA:
1833 E1kLogX(uLevel, ("%s %s Data Transmit Descriptor (%d bytes) %s\n",
1834 pThis->szPrf, pszDir, pDesc->data.cmd.u20DTALEN, pszDir));
1835 E1kLogX(uLevel, (" Address=%16LX DTALEN=%05X\n",
1836 pDesc->data.u64BufAddr,
1837 pDesc->data.cmd.u20DTALEN));
1838 E1kLogX(uLevel, (" DCMD:%s%s%s%s%s%s%s STA:%s%s%s POPTS:%s%s SPECIAL:%s VLAN=%03x PRI=%x\n",
1839 pDesc->data.cmd.fIDE ? " IDE" :"",
1840 pDesc->data.cmd.fVLE ? " VLE" :"",
1841 pDesc->data.cmd.fRPS ? " RPS" :"",
1842 pDesc->data.cmd.fRS ? " RS" :"",
1843 pDesc->data.cmd.fTSE ? " TSE" :"",
1844 pDesc->data.cmd.fIFCS? " IFCS":"",
1845 pDesc->data.cmd.fEOP ? " EOP" :"",
1846 pDesc->data.dw3.fDD ? " DD" :"",
1847 pDesc->data.dw3.fEC ? " EC" :"",
1848 pDesc->data.dw3.fLC ? " LC" :"",
1849 pDesc->data.dw3.fTXSM? " TXSM":"",
1850 pDesc->data.dw3.fIXSM? " IXSM":"",
1851 E1K_SPEC_CFI(pDesc->data.dw3.u16Special) ? "CFI" :"cfi",
1852 E1K_SPEC_VLAN(pDesc->data.dw3.u16Special),
1853 E1K_SPEC_PRI(pDesc->data.dw3.u16Special)));
1854 break;
1855 case E1K_DTYP_LEGACY:
1856 E1kLogX(uLevel, ("%s %s Legacy Transmit Descriptor (%d bytes) %s\n",
1857 pThis->szPrf, pszDir, pDesc->legacy.cmd.u16Length, pszDir));
1858 E1kLogX(uLevel, (" Address=%16LX DTALEN=%05X\n",
1859 pDesc->data.u64BufAddr,
1860 pDesc->legacy.cmd.u16Length));
1861 E1kLogX(uLevel, (" CMD:%s%s%s%s%s%s%s STA:%s%s%s CSO=%02x CSS=%02x SPECIAL:%s VLAN=%03x PRI=%x\n",
1862 pDesc->legacy.cmd.fIDE ? " IDE" :"",
1863 pDesc->legacy.cmd.fVLE ? " VLE" :"",
1864 pDesc->legacy.cmd.fRPS ? " RPS" :"",
1865 pDesc->legacy.cmd.fRS ? " RS" :"",
1866 pDesc->legacy.cmd.fIC ? " IC" :"",
1867 pDesc->legacy.cmd.fIFCS? " IFCS":"",
1868 pDesc->legacy.cmd.fEOP ? " EOP" :"",
1869 pDesc->legacy.dw3.fDD ? " DD" :"",
1870 pDesc->legacy.dw3.fEC ? " EC" :"",
1871 pDesc->legacy.dw3.fLC ? " LC" :"",
1872 pDesc->legacy.cmd.u8CSO,
1873 pDesc->legacy.dw3.u8CSS,
1874 E1K_SPEC_CFI(pDesc->legacy.dw3.u16Special) ? "CFI" :"cfi",
1875 E1K_SPEC_VLAN(pDesc->legacy.dw3.u16Special),
1876 E1K_SPEC_PRI(pDesc->legacy.dw3.u16Special)));
1877 break;
1878 default:
1879 E1kLog(("%s %s Invalid Transmit Descriptor %s\n",
1880 pThis->szPrf, pszDir, pszDir));
1881 break;
1882 }
1883}
1884
1885/**
1886 * Raise an interrupt later.
1887 *
1888 * @param pThis The device state structure.
1889 */
1890inline void e1kPostponeInterrupt(PE1KSTATE pThis, uint64_t uNanoseconds)
1891{
1892 if (!TMTimerIsActive(pThis->CTX_SUFF(pIntTimer)))
1893 TMTimerSetNano(pThis->CTX_SUFF(pIntTimer), uNanoseconds);
1894}
1895
1896/**
1897 * Raise interrupt if not masked.
1898 *
1899 * @param pThis The device state structure.
1900 */
1901static int e1kRaiseInterrupt(PE1KSTATE pThis, int rcBusy, uint32_t u32IntCause = 0)
1902{
1903 int rc = e1kCsEnter(pThis, rcBusy);
1904 if (RT_UNLIKELY(rc != VINF_SUCCESS))
1905 return rc;
1906
1907 E1K_INC_ISTAT_CNT(pThis->uStatIntTry);
1908 ICR |= u32IntCause;
1909 if (ICR & IMS)
1910 {
1911 if (pThis->fIntRaised)
1912 {
1913 E1K_INC_ISTAT_CNT(pThis->uStatIntSkip);
1914 E1kLog2(("%s e1kRaiseInterrupt: Already raised, skipped. ICR&IMS=%08x\n",
1915 pThis->szPrf, ICR & IMS));
1916 }
1917 else
1918 {
1919 uint64_t tsNow = TMTimerGet(pThis->CTX_SUFF(pIntTimer));
1920 if (!!ITR && tsNow - pThis->u64AckedAt < ITR * 256
1921 && pThis->fItrEnabled && (pThis->fItrRxEnabled || !(ICR & ICR_RXT0)))
1922 {
1923 E1K_INC_ISTAT_CNT(pThis->uStatIntEarly);
1924 E1kLog2(("%s e1kRaiseInterrupt: Too early to raise again: %d ns < %d ns.\n",
1925 pThis->szPrf, (uint32_t)(tsNow - pThis->u64AckedAt), ITR * 256));
1926 e1kPostponeInterrupt(pThis, ITR * 256);
1927 }
1928 else
1929 {
1930
1931 /* Since we are delivering the interrupt now
1932 * there is no need to do it later -- stop the timer.
1933 */
1934 TMTimerStop(pThis->CTX_SUFF(pIntTimer));
1935 E1K_INC_ISTAT_CNT(pThis->uStatInt);
1936 STAM_COUNTER_INC(&pThis->StatIntsRaised);
1937 /* Got at least one unmasked interrupt cause */
1938 pThis->fIntRaised = true;
1939 /* Raise(1) INTA(0) */
1940 E1kLogRel(("E1000: irq RAISED icr&mask=0x%x, icr=0x%x\n", ICR & IMS, ICR));
1941 PDMDevHlpPCISetIrq(pThis->CTX_SUFF(pDevIns), 0, 1);
1942 E1kLog(("%s e1kRaiseInterrupt: Raised. ICR&IMS=%08x\n",
1943 pThis->szPrf, ICR & IMS));
1944 }
1945 }
1946 }
1947 else
1948 {
1949 E1K_INC_ISTAT_CNT(pThis->uStatIntMasked);
1950 E1kLog2(("%s e1kRaiseInterrupt: Not raising, ICR=%08x, IMS=%08x\n",
1951 pThis->szPrf, ICR, IMS));
1952 }
1953 e1kCsLeave(pThis);
1954 return VINF_SUCCESS;
1955}
1956
1957/**
1958 * Compute the physical address of the descriptor.
1959 *
1960 * @returns the physical address of the descriptor.
1961 *
1962 * @param baseHigh High-order 32 bits of descriptor table address.
1963 * @param baseLow Low-order 32 bits of descriptor table address.
1964 * @param idxDesc The descriptor index in the table.
1965 */
1966DECLINLINE(RTGCPHYS) e1kDescAddr(uint32_t baseHigh, uint32_t baseLow, uint32_t idxDesc)
1967{
1968 AssertCompile(sizeof(E1KRXDESC) == sizeof(E1KTXDESC));
1969 return ((uint64_t)baseHigh << 32) + baseLow + idxDesc * sizeof(E1KRXDESC);
1970}
1971
1972#ifdef IN_RING3 /* currently only used in ring-3 due to stack space requirements of the caller */
1973/**
1974 * Advance the head pointer of the receive descriptor queue.
1975 *
1976 * @remarks RDH always points to the next available RX descriptor.
1977 *
1978 * @param pThis The device state structure.
1979 */
1980DECLINLINE(void) e1kAdvanceRDH(PE1KSTATE pThis)
1981{
1982 Assert(e1kCsRxIsOwner(pThis));
1983 //e1kCsEnter(pThis, RT_SRC_POS);
1984 if (++RDH * sizeof(E1KRXDESC) >= RDLEN)
1985 RDH = 0;
1986 /*
1987 * Compute current receive queue length and fire RXDMT0 interrupt
1988 * if we are low on receive buffers
1989 */
1990 uint32_t uRQueueLen = RDH>RDT ? RDLEN/sizeof(E1KRXDESC)-RDH+RDT : RDT-RDH;
1991 /*
1992 * The minimum threshold is controlled by RDMTS bits of RCTL:
1993 * 00 = 1/2 of RDLEN
1994 * 01 = 1/4 of RDLEN
1995 * 10 = 1/8 of RDLEN
1996 * 11 = reserved
1997 */
1998 uint32_t uMinRQThreshold = RDLEN / sizeof(E1KRXDESC) / (2 << GET_BITS(RCTL, RDMTS));
1999 if (uRQueueLen <= uMinRQThreshold)
2000 {
2001 E1kLogRel(("E1000: low on RX descriptors, RDH=%x RDT=%x len=%x threshold=%x\n", RDH, RDT, uRQueueLen, uMinRQThreshold));
2002 E1kLog2(("%s Low on RX descriptors, RDH=%x RDT=%x len=%x threshold=%x, raise an interrupt\n",
2003 pThis->szPrf, RDH, RDT, uRQueueLen, uMinRQThreshold));
2004 E1K_INC_ISTAT_CNT(pThis->uStatIntRXDMT0);
2005 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_RXDMT0);
2006 }
2007 E1kLog2(("%s e1kAdvanceRDH: at exit RDH=%x RDT=%x len=%x\n",
2008 pThis->szPrf, RDH, RDT, uRQueueLen));
2009 //e1kCsLeave(pThis);
2010}
2011#endif /* IN_RING3 */
2012
2013#ifdef E1K_WITH_RXD_CACHE
2014
2015/**
2016 * Return the number of RX descriptor that belong to the hardware.
2017 *
2018 * @returns the number of available descriptors in RX ring.
2019 * @param pThis The device state structure.
2020 * @thread ???
2021 */
2022DECLINLINE(uint32_t) e1kGetRxLen(PE1KSTATE pThis)
2023{
2024 /**
2025 * Make sure RDT won't change during computation. EMT may modify RDT at
2026 * any moment.
2027 */
2028 uint32_t rdt = RDT;
2029 return (RDH > rdt ? RDLEN/sizeof(E1KRXDESC) : 0) + rdt - RDH;
2030}
2031
2032DECLINLINE(unsigned) e1kRxDInCache(PE1KSTATE pThis)
2033{
2034 return pThis->nRxDFetched > pThis->iRxDCurrent ?
2035 pThis->nRxDFetched - pThis->iRxDCurrent : 0;
2036}
2037
2038DECLINLINE(unsigned) e1kRxDIsCacheEmpty(PE1KSTATE pThis)
2039{
2040 return pThis->iRxDCurrent >= pThis->nRxDFetched;
2041}
2042
2043/**
2044 * Load receive descriptors from guest memory. The caller needs to be in Rx
2045 * critical section.
2046 *
2047 * We need two physical reads in case the tail wrapped around the end of RX
2048 * descriptor ring.
2049 *
2050 * @returns the actual number of descriptors fetched.
2051 * @param pThis The device state structure.
2052 * @param pDesc Pointer to descriptor union.
2053 * @param addr Physical address in guest context.
2054 * @thread EMT, RX
2055 */
2056DECLINLINE(unsigned) e1kRxDPrefetch(PE1KSTATE pThis)
2057{
2058 /* We've already loaded pThis->nRxDFetched descriptors past RDH. */
2059 unsigned nDescsAvailable = e1kGetRxLen(pThis) - e1kRxDInCache(pThis);
2060 unsigned nDescsToFetch = RT_MIN(nDescsAvailable, E1K_RXD_CACHE_SIZE - pThis->nRxDFetched);
2061 unsigned nDescsTotal = RDLEN / sizeof(E1KRXDESC);
2062 Assert(nDescsTotal != 0);
2063 if (nDescsTotal == 0)
2064 return 0;
2065 unsigned nFirstNotLoaded = (RDH + e1kRxDInCache(pThis)) % nDescsTotal;
2066 unsigned nDescsInSingleRead = RT_MIN(nDescsToFetch, nDescsTotal - nFirstNotLoaded);
2067 E1kLog3(("%s e1kRxDPrefetch: nDescsAvailable=%u nDescsToFetch=%u "
2068 "nDescsTotal=%u nFirstNotLoaded=0x%x nDescsInSingleRead=%u\n",
2069 pThis->szPrf, nDescsAvailable, nDescsToFetch, nDescsTotal,
2070 nFirstNotLoaded, nDescsInSingleRead));
2071 if (nDescsToFetch == 0)
2072 return 0;
2073 E1KRXDESC* pFirstEmptyDesc = &pThis->aRxDescriptors[pThis->nRxDFetched];
2074 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns),
2075 ((uint64_t)RDBAH << 32) + RDBAL + nFirstNotLoaded * sizeof(E1KRXDESC),
2076 pFirstEmptyDesc, nDescsInSingleRead * sizeof(E1KRXDESC));
2077 // uint64_t addrBase = ((uint64_t)RDBAH << 32) + RDBAL;
2078 // unsigned i, j;
2079 // for (i = pThis->nRxDFetched; i < pThis->nRxDFetched + nDescsInSingleRead; ++i)
2080 // {
2081 // pThis->aRxDescAddr[i] = addrBase + (nFirstNotLoaded + i - pThis->nRxDFetched) * sizeof(E1KRXDESC);
2082 // E1kLog3(("%s aRxDescAddr[%d] = %p\n", pThis->szPrf, i, pThis->aRxDescAddr[i]));
2083 // }
2084 E1kLog3(("%s Fetched %u RX descriptors at %08x%08x(0x%x), RDLEN=%08x, RDH=%08x, RDT=%08x\n",
2085 pThis->szPrf, nDescsInSingleRead,
2086 RDBAH, RDBAL + RDH * sizeof(E1KRXDESC),
2087 nFirstNotLoaded, RDLEN, RDH, RDT));
2088 if (nDescsToFetch > nDescsInSingleRead)
2089 {
2090 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns),
2091 ((uint64_t)RDBAH << 32) + RDBAL,
2092 pFirstEmptyDesc + nDescsInSingleRead,
2093 (nDescsToFetch - nDescsInSingleRead) * sizeof(E1KRXDESC));
2094 // Assert(i == pThis->nRxDFetched + nDescsInSingleRead);
2095 // for (j = 0; i < pThis->nRxDFetched + nDescsToFetch; ++i, ++j)
2096 // {
2097 // pThis->aRxDescAddr[i] = addrBase + j * sizeof(E1KRXDESC);
2098 // E1kLog3(("%s aRxDescAddr[%d] = %p\n", pThis->szPrf, i, pThis->aRxDescAddr[i]));
2099 // }
2100 E1kLog3(("%s Fetched %u RX descriptors at %08x%08x\n",
2101 pThis->szPrf, nDescsToFetch - nDescsInSingleRead,
2102 RDBAH, RDBAL));
2103 }
2104 pThis->nRxDFetched += nDescsToFetch;
2105 return nDescsToFetch;
2106}
2107
2108# ifdef IN_RING3 /* currently only used in ring-3 due to stack space requirements of the caller */
2109
2110/**
2111 * Obtain the next RX descriptor from RXD cache, fetching descriptors from the
2112 * RX ring if the cache is empty.
2113 *
2114 * Note that we cannot advance the cache pointer (iRxDCurrent) yet as it will
2115 * go out of sync with RDH which will cause trouble when EMT checks if the
2116 * cache is empty to do pre-fetch @bugref(6217).
2117 *
2118 * @param pThis The device state structure.
2119 * @thread RX
2120 */
2121DECLINLINE(E1KRXDESC*) e1kRxDGet(PE1KSTATE pThis)
2122{
2123 Assert(e1kCsRxIsOwner(pThis));
2124 /* Check the cache first. */
2125 if (pThis->iRxDCurrent < pThis->nRxDFetched)
2126 return &pThis->aRxDescriptors[pThis->iRxDCurrent];
2127 /* Cache is empty, reset it and check if we can fetch more. */
2128 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
2129 if (e1kRxDPrefetch(pThis))
2130 return &pThis->aRxDescriptors[pThis->iRxDCurrent];
2131 /* Out of Rx descriptors. */
2132 return NULL;
2133}
2134
2135
2136/**
2137 * Return the RX descriptor obtained with e1kRxDGet() and advance the cache
2138 * pointer. The descriptor gets written back to the RXD ring.
2139 *
2140 * @param pThis The device state structure.
2141 * @param pDesc The descriptor being "returned" to the RX ring.
2142 * @thread RX
2143 */
2144DECLINLINE(void) e1kRxDPut(PE1KSTATE pThis, E1KRXDESC* pDesc)
2145{
2146 Assert(e1kCsRxIsOwner(pThis));
2147 pThis->iRxDCurrent++;
2148 // Assert(pDesc >= pThis->aRxDescriptors);
2149 // Assert(pDesc < pThis->aRxDescriptors + E1K_RXD_CACHE_SIZE);
2150 // uint64_t addr = e1kDescAddr(RDBAH, RDBAL, RDH);
2151 // uint32_t rdh = RDH;
2152 // Assert(pThis->aRxDescAddr[pDesc - pThis->aRxDescriptors] == addr);
2153 PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns),
2154 e1kDescAddr(RDBAH, RDBAL, RDH),
2155 pDesc, sizeof(E1KRXDESC));
2156 e1kAdvanceRDH(pThis);
2157 e1kPrintRDesc(pThis, pDesc);
2158}
2159
2160/**
2161 * Store a fragment of received packet at the specifed address.
2162 *
2163 * @param pThis The device state structure.
2164 * @param pDesc The next available RX descriptor.
2165 * @param pvBuf The fragment.
2166 * @param cb The size of the fragment.
2167 */
2168static DECLCALLBACK(void) e1kStoreRxFragment(PE1KSTATE pThis, E1KRXDESC *pDesc, const void *pvBuf, size_t cb)
2169{
2170 STAM_PROFILE_ADV_START(&pThis->StatReceiveStore, a);
2171 E1kLog2(("%s e1kStoreRxFragment: store fragment of %04X at %016LX, EOP=%d\n",
2172 pThis->szPrf, cb, pDesc->u64BufAddr, pDesc->status.fEOP));
2173 PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns), pDesc->u64BufAddr, pvBuf, cb);
2174 pDesc->u16Length = (uint16_t)cb; Assert(pDesc->u16Length == cb);
2175 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveStore, a);
2176}
2177
2178# endif
2179
2180#else /* !E1K_WITH_RXD_CACHE */
2181
2182/**
2183 * Store a fragment of received packet that fits into the next available RX
2184 * buffer.
2185 *
2186 * @remarks Trigger the RXT0 interrupt if it is the last fragment of the packet.
2187 *
2188 * @param pThis The device state structure.
2189 * @param pDesc The next available RX descriptor.
2190 * @param pvBuf The fragment.
2191 * @param cb The size of the fragment.
2192 */
2193static DECLCALLBACK(void) e1kStoreRxFragment(PE1KSTATE pThis, E1KRXDESC *pDesc, const void *pvBuf, size_t cb)
2194{
2195 STAM_PROFILE_ADV_START(&pThis->StatReceiveStore, a);
2196 E1kLog2(("%s e1kStoreRxFragment: store fragment of %04X at %016LX, EOP=%d\n", pThis->szPrf, cb, pDesc->u64BufAddr, pDesc->status.fEOP));
2197 PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns), pDesc->u64BufAddr, pvBuf, cb);
2198 pDesc->u16Length = (uint16_t)cb; Assert(pDesc->u16Length == cb);
2199 /* Write back the descriptor */
2200 PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns), e1kDescAddr(RDBAH, RDBAL, RDH), pDesc, sizeof(E1KRXDESC));
2201 e1kPrintRDesc(pThis, pDesc);
2202 E1kLogRel(("E1000: Wrote back RX desc, RDH=%x\n", RDH));
2203 /* Advance head */
2204 e1kAdvanceRDH(pThis);
2205 //E1kLog2(("%s e1kStoreRxFragment: EOP=%d RDTR=%08X RADV=%08X\n", pThis->szPrf, pDesc->fEOP, RDTR, RADV));
2206 if (pDesc->status.fEOP)
2207 {
2208 /* Complete packet has been stored -- it is time to let the guest know. */
2209#ifdef E1K_USE_RX_TIMERS
2210 if (RDTR)
2211 {
2212 /* Arm the timer to fire in RDTR usec (discard .024) */
2213 e1kArmTimer(pThis, pThis->CTX_SUFF(pRIDTimer), RDTR);
2214 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
2215 if (RADV != 0 && !TMTimerIsActive(pThis->CTX_SUFF(pRADTimer)))
2216 e1kArmTimer(pThis, pThis->CTX_SUFF(pRADTimer), RADV);
2217 }
2218 else
2219 {
2220#endif
2221 /* 0 delay means immediate interrupt */
2222 E1K_INC_ISTAT_CNT(pThis->uStatIntRx);
2223 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_RXT0);
2224#ifdef E1K_USE_RX_TIMERS
2225 }
2226#endif
2227 }
2228 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveStore, a);
2229}
2230
2231#endif /* !E1K_WITH_RXD_CACHE */
2232
2233/**
2234 * Returns true if it is a broadcast packet.
2235 *
2236 * @returns true if destination address indicates broadcast.
2237 * @param pvBuf The ethernet packet.
2238 */
2239DECLINLINE(bool) e1kIsBroadcast(const void *pvBuf)
2240{
2241 static const uint8_t s_abBcastAddr[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
2242 return memcmp(pvBuf, s_abBcastAddr, sizeof(s_abBcastAddr)) == 0;
2243}
2244
2245/**
2246 * Returns true if it is a multicast packet.
2247 *
2248 * @remarks returns true for broadcast packets as well.
2249 * @returns true if destination address indicates multicast.
2250 * @param pvBuf The ethernet packet.
2251 */
2252DECLINLINE(bool) e1kIsMulticast(const void *pvBuf)
2253{
2254 return (*(char*)pvBuf) & 1;
2255}
2256
2257#ifdef IN_RING3 /* currently only used in ring-3 due to stack space requirements of the caller */
2258/**
2259 * Set IXSM, IPCS and TCPCS flags according to the packet type.
2260 *
2261 * @remarks We emulate checksum offloading for major packets types only.
2262 *
2263 * @returns VBox status code.
2264 * @param pThis The device state structure.
2265 * @param pFrame The available data.
2266 * @param cb Number of bytes available in the buffer.
2267 * @param status Bit fields containing status info.
2268 */
2269static int e1kRxChecksumOffload(PE1KSTATE pThis, const uint8_t *pFrame, size_t cb, E1KRXDST *pStatus)
2270{
2271 /** @todo
2272 * It is not safe to bypass checksum verification for packets coming
2273 * from real wire. We currently unable to tell where packets are
2274 * coming from so we tell the driver to ignore our checksum flags
2275 * and do verification in software.
2276 */
2277# if 0
2278 uint16_t uEtherType = ntohs(*(uint16_t*)(pFrame + 12));
2279
2280 E1kLog2(("%s e1kRxChecksumOffload: EtherType=%x\n", pThis->szPrf, uEtherType));
2281
2282 switch (uEtherType)
2283 {
2284 case 0x800: /* IPv4 */
2285 {
2286 pStatus->fIXSM = false;
2287 pStatus->fIPCS = true;
2288 PRTNETIPV4 pIpHdr4 = (PRTNETIPV4)(pFrame + 14);
2289 /* TCP/UDP checksum offloading works with TCP and UDP only */
2290 pStatus->fTCPCS = pIpHdr4->ip_p == 6 || pIpHdr4->ip_p == 17;
2291 break;
2292 }
2293 case 0x86DD: /* IPv6 */
2294 pStatus->fIXSM = false;
2295 pStatus->fIPCS = false;
2296 pStatus->fTCPCS = true;
2297 break;
2298 default: /* ARP, VLAN, etc. */
2299 pStatus->fIXSM = true;
2300 break;
2301 }
2302# else
2303 pStatus->fIXSM = true;
2304 RT_NOREF_PV(pThis); RT_NOREF_PV(pFrame); RT_NOREF_PV(cb);
2305# endif
2306 return VINF_SUCCESS;
2307}
2308#endif /* IN_RING3 */
2309
2310/**
2311 * Pad and store received packet.
2312 *
2313 * @remarks Make sure that the packet appears to upper layer as one coming
2314 * from real Ethernet: pad it and insert FCS.
2315 *
2316 * @returns VBox status code.
2317 * @param pThis The device state structure.
2318 * @param pvBuf The available data.
2319 * @param cb Number of bytes available in the buffer.
2320 * @param status Bit fields containing status info.
2321 */
2322static int e1kHandleRxPacket(PE1KSTATE pThis, const void *pvBuf, size_t cb, E1KRXDST status)
2323{
2324#if defined(IN_RING3) /** @todo Remove this extra copying, it's gonna make us run out of kernel / hypervisor stack! */
2325 uint8_t rxPacket[E1K_MAX_RX_PKT_SIZE];
2326 uint8_t *ptr = rxPacket;
2327
2328 int rc = e1kCsRxEnter(pThis, VERR_SEM_BUSY);
2329 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2330 return rc;
2331
2332 if (cb > 70) /* unqualified guess */
2333 pThis->led.Asserted.s.fReading = pThis->led.Actual.s.fReading = 1;
2334
2335 Assert(cb <= E1K_MAX_RX_PKT_SIZE);
2336 Assert(cb > 16);
2337 size_t cbMax = ((RCTL & RCTL_LPE) ? E1K_MAX_RX_PKT_SIZE - 4 : 1518) - (status.fVP ? 0 : 4);
2338 E1kLog3(("%s Max RX packet size is %u\n", pThis->szPrf, cbMax));
2339 if (status.fVP)
2340 {
2341 /* VLAN packet -- strip VLAN tag in VLAN mode */
2342 if ((CTRL & CTRL_VME) && cb > 16)
2343 {
2344 uint16_t *u16Ptr = (uint16_t*)pvBuf;
2345 memcpy(rxPacket, pvBuf, 12); /* Copy src and dst addresses */
2346 status.u16Special = RT_BE2H_U16(u16Ptr[7]); /* Extract VLAN tag */
2347 memcpy(rxPacket + 12, (uint8_t*)pvBuf + 16, cb - 16); /* Copy the rest of the packet */
2348 cb -= 4;
2349 E1kLog3(("%s Stripped tag for VLAN %u (cb=%u)\n",
2350 pThis->szPrf, status.u16Special, cb));
2351 }
2352 else
2353 status.fVP = false; /* Set VP only if we stripped the tag */
2354 }
2355 else
2356 memcpy(rxPacket, pvBuf, cb);
2357 /* Pad short packets */
2358 if (cb < 60)
2359 {
2360 memset(rxPacket + cb, 0, 60 - cb);
2361 cb = 60;
2362 }
2363 if (!(RCTL & RCTL_SECRC) && cb <= cbMax)
2364 {
2365 STAM_PROFILE_ADV_START(&pThis->StatReceiveCRC, a);
2366 /*
2367 * Add FCS if CRC stripping is not enabled. Since the value of CRC
2368 * is ignored by most of drivers we may as well save us the trouble
2369 * of calculating it (see EthernetCRC CFGM parameter).
2370 */
2371 if (pThis->fEthernetCRC)
2372 *(uint32_t*)(rxPacket + cb) = RTCrc32(rxPacket, cb);
2373 cb += sizeof(uint32_t);
2374 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveCRC, a);
2375 E1kLog3(("%s Added FCS (cb=%u)\n", pThis->szPrf, cb));
2376 }
2377 /* Compute checksum of complete packet */
2378 uint16_t checksum = e1kCSum16(rxPacket + GET_BITS(RXCSUM, PCSS), cb);
2379 e1kRxChecksumOffload(pThis, rxPacket, cb, &status);
2380
2381 /* Update stats */
2382 E1K_INC_CNT32(GPRC);
2383 if (e1kIsBroadcast(pvBuf))
2384 E1K_INC_CNT32(BPRC);
2385 else if (e1kIsMulticast(pvBuf))
2386 E1K_INC_CNT32(MPRC);
2387 /* Update octet receive counter */
2388 E1K_ADD_CNT64(GORCL, GORCH, cb);
2389 STAM_REL_COUNTER_ADD(&pThis->StatReceiveBytes, cb);
2390 if (cb == 64)
2391 E1K_INC_CNT32(PRC64);
2392 else if (cb < 128)
2393 E1K_INC_CNT32(PRC127);
2394 else if (cb < 256)
2395 E1K_INC_CNT32(PRC255);
2396 else if (cb < 512)
2397 E1K_INC_CNT32(PRC511);
2398 else if (cb < 1024)
2399 E1K_INC_CNT32(PRC1023);
2400 else
2401 E1K_INC_CNT32(PRC1522);
2402
2403 E1K_INC_ISTAT_CNT(pThis->uStatRxFrm);
2404
2405# ifdef E1K_WITH_RXD_CACHE
2406 while (cb > 0)
2407 {
2408 E1KRXDESC *pDesc = e1kRxDGet(pThis);
2409
2410 if (pDesc == NULL)
2411 {
2412 E1kLog(("%s Out of receive buffers, dropping the packet "
2413 "(cb=%u, in_cache=%u, RDH=%x RDT=%x)\n",
2414 pThis->szPrf, cb, e1kRxDInCache(pThis), RDH, RDT));
2415 break;
2416 }
2417# else /* !E1K_WITH_RXD_CACHE */
2418 if (RDH == RDT)
2419 {
2420 E1kLog(("%s Out of receive buffers, dropping the packet\n",
2421 pThis->szPrf));
2422 }
2423 /* Store the packet to receive buffers */
2424 while (RDH != RDT)
2425 {
2426 /* Load the descriptor pointed by head */
2427 E1KRXDESC desc, *pDesc = &desc;
2428 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), e1kDescAddr(RDBAH, RDBAL, RDH),
2429 &desc, sizeof(desc));
2430# endif /* !E1K_WITH_RXD_CACHE */
2431 if (pDesc->u64BufAddr)
2432 {
2433 /* Update descriptor */
2434 pDesc->status = status;
2435 pDesc->u16Checksum = checksum;
2436 pDesc->status.fDD = true;
2437
2438 /*
2439 * We need to leave Rx critical section here or we risk deadlocking
2440 * with EMT in e1kRegWriteRDT when the write is to an unallocated
2441 * page or has an access handler associated with it.
2442 * Note that it is safe to leave the critical section here since
2443 * e1kRegWriteRDT() never modifies RDH. It never touches already
2444 * fetched RxD cache entries either.
2445 */
2446 if (cb > pThis->u16RxBSize)
2447 {
2448 pDesc->status.fEOP = false;
2449 e1kCsRxLeave(pThis);
2450 e1kStoreRxFragment(pThis, pDesc, ptr, pThis->u16RxBSize);
2451 rc = e1kCsRxEnter(pThis, VERR_SEM_BUSY);
2452 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2453 return rc;
2454 ptr += pThis->u16RxBSize;
2455 cb -= pThis->u16RxBSize;
2456 }
2457 else
2458 {
2459 pDesc->status.fEOP = true;
2460 e1kCsRxLeave(pThis);
2461 e1kStoreRxFragment(pThis, pDesc, ptr, cb);
2462# ifdef E1K_WITH_RXD_CACHE
2463 rc = e1kCsRxEnter(pThis, VERR_SEM_BUSY);
2464 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2465 return rc;
2466 cb = 0;
2467# else /* !E1K_WITH_RXD_CACHE */
2468 pThis->led.Actual.s.fReading = 0;
2469 return VINF_SUCCESS;
2470# endif /* !E1K_WITH_RXD_CACHE */
2471 }
2472 /*
2473 * Note: RDH is advanced by e1kStoreRxFragment if E1K_WITH_RXD_CACHE
2474 * is not defined.
2475 */
2476 }
2477# ifdef E1K_WITH_RXD_CACHE
2478 /* Write back the descriptor. */
2479 pDesc->status.fDD = true;
2480 e1kRxDPut(pThis, pDesc);
2481# else /* !E1K_WITH_RXD_CACHE */
2482 else
2483 {
2484 /* Write back the descriptor. */
2485 pDesc->status.fDD = true;
2486 PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns),
2487 e1kDescAddr(RDBAH, RDBAL, RDH),
2488 pDesc, sizeof(E1KRXDESC));
2489 e1kAdvanceRDH(pThis);
2490 }
2491# endif /* !E1K_WITH_RXD_CACHE */
2492 }
2493
2494 if (cb > 0)
2495 E1kLog(("%s Out of receive buffers, dropping %u bytes", pThis->szPrf, cb));
2496
2497 pThis->led.Actual.s.fReading = 0;
2498
2499 e1kCsRxLeave(pThis);
2500# ifdef E1K_WITH_RXD_CACHE
2501 /* Complete packet has been stored -- it is time to let the guest know. */
2502# ifdef E1K_USE_RX_TIMERS
2503 if (RDTR)
2504 {
2505 /* Arm the timer to fire in RDTR usec (discard .024) */
2506 e1kArmTimer(pThis, pThis->CTX_SUFF(pRIDTimer), RDTR);
2507 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
2508 if (RADV != 0 && !TMTimerIsActive(pThis->CTX_SUFF(pRADTimer)))
2509 e1kArmTimer(pThis, pThis->CTX_SUFF(pRADTimer), RADV);
2510 }
2511 else
2512 {
2513# endif /* E1K_USE_RX_TIMERS */
2514 /* 0 delay means immediate interrupt */
2515 E1K_INC_ISTAT_CNT(pThis->uStatIntRx);
2516 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_RXT0);
2517# ifdef E1K_USE_RX_TIMERS
2518 }
2519# endif /* E1K_USE_RX_TIMERS */
2520# endif /* E1K_WITH_RXD_CACHE */
2521
2522 return VINF_SUCCESS;
2523#else /* !IN_RING3 */
2524 RT_NOREF_PV(pThis); RT_NOREF_PV(pvBuf); RT_NOREF_PV(cb); RT_NOREF_PV(status);
2525 return VERR_INTERNAL_ERROR_2;
2526#endif /* !IN_RING3 */
2527}
2528
2529
2530/**
2531 * Bring the link up after the configured delay, 5 seconds by default.
2532 *
2533 * @param pThis The device state structure.
2534 * @thread any
2535 */
2536DECLINLINE(void) e1kBringLinkUpDelayed(PE1KSTATE pThis)
2537{
2538 E1kLog(("%s Will bring up the link in %d seconds...\n",
2539 pThis->szPrf, pThis->cMsLinkUpDelay / 1000));
2540 e1kArmTimer(pThis, pThis->CTX_SUFF(pLUTimer), pThis->cMsLinkUpDelay * 1000);
2541}
2542
2543#ifdef IN_RING3
2544/**
2545 * Bring up the link immediately.
2546 *
2547 * @param pThis The device state structure.
2548 */
2549DECLINLINE(void) e1kR3LinkUp(PE1KSTATE pThis)
2550{
2551 E1kLog(("%s Link is up\n", pThis->szPrf));
2552 STATUS |= STATUS_LU;
2553 Phy::setLinkStatus(&pThis->phy, true);
2554 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_LSC);
2555 if (pThis->pDrvR3)
2556 pThis->pDrvR3->pfnNotifyLinkChanged(pThis->pDrvR3, PDMNETWORKLINKSTATE_UP);
2557}
2558
2559/**
2560 * Bring down the link immediately.
2561 *
2562 * @param pThis The device state structure.
2563 */
2564DECLINLINE(void) e1kR3LinkDown(PE1KSTATE pThis)
2565{
2566 E1kLog(("%s Link is down\n", pThis->szPrf));
2567 STATUS &= ~STATUS_LU;
2568 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_LSC);
2569 if (pThis->pDrvR3)
2570 pThis->pDrvR3->pfnNotifyLinkChanged(pThis->pDrvR3, PDMNETWORKLINKSTATE_DOWN);
2571}
2572
2573/**
2574 * Bring down the link temporarily.
2575 *
2576 * @param pThis The device state structure.
2577 */
2578DECLINLINE(void) e1kR3LinkDownTemp(PE1KSTATE pThis)
2579{
2580 E1kLog(("%s Link is down temporarily\n", pThis->szPrf));
2581 STATUS &= ~STATUS_LU;
2582 Phy::setLinkStatus(&pThis->phy, false);
2583 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_LSC);
2584 /*
2585 * Notifying the associated driver that the link went down (even temporarily)
2586 * seems to be the right thing, but it was not done before. This may cause
2587 * a regression if the driver does not expect the link to go down as a result
2588 * of sending PDMNETWORKLINKSTATE_DOWN_RESUME to this device. Earlier versions
2589 * of code notified the driver that the link was up! See @bugref{7057}.
2590 */
2591 if (pThis->pDrvR3)
2592 pThis->pDrvR3->pfnNotifyLinkChanged(pThis->pDrvR3, PDMNETWORKLINKSTATE_DOWN);
2593 e1kBringLinkUpDelayed(pThis);
2594}
2595#endif /* IN_RING3 */
2596
2597#if 0 /* unused */
2598/**
2599 * Read handler for Device Status register.
2600 *
2601 * Get the link status from PHY.
2602 *
2603 * @returns VBox status code.
2604 *
2605 * @param pThis The device state structure.
2606 * @param offset Register offset in memory-mapped frame.
2607 * @param index Register index in register array.
2608 * @param mask Used to implement partial reads (8 and 16-bit).
2609 */
2610static int e1kRegReadCTRL(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
2611{
2612 E1kLog(("%s e1kRegReadCTRL: mdio dir=%s mdc dir=%s mdc=%d\n",
2613 pThis->szPrf, (CTRL & CTRL_MDIO_DIR)?"OUT":"IN ",
2614 (CTRL & CTRL_MDC_DIR)?"OUT":"IN ", !!(CTRL & CTRL_MDC)));
2615 if ((CTRL & CTRL_MDIO_DIR) == 0 && (CTRL & CTRL_MDC))
2616 {
2617 /* MDC is high and MDIO pin is used for input, read MDIO pin from PHY */
2618 if (Phy::readMDIO(&pThis->phy))
2619 *pu32Value = CTRL | CTRL_MDIO;
2620 else
2621 *pu32Value = CTRL & ~CTRL_MDIO;
2622 E1kLog(("%s e1kRegReadCTRL: Phy::readMDIO(%d)\n",
2623 pThis->szPrf, !!(*pu32Value & CTRL_MDIO)));
2624 }
2625 else
2626 {
2627 /* MDIO pin is used for output, ignore it */
2628 *pu32Value = CTRL;
2629 }
2630 return VINF_SUCCESS;
2631}
2632#endif /* unused */
2633
2634/**
2635 * Write handler for Device Control register.
2636 *
2637 * Handles reset.
2638 *
2639 * @param pThis The device state structure.
2640 * @param offset Register offset in memory-mapped frame.
2641 * @param index Register index in register array.
2642 * @param value The value to store.
2643 * @param mask Used to implement partial writes (8 and 16-bit).
2644 * @thread EMT
2645 */
2646static int e1kRegWriteCTRL(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2647{
2648 int rc = VINF_SUCCESS;
2649
2650 if (value & CTRL_RESET)
2651 { /* RST */
2652#ifndef IN_RING3
2653 return VINF_IOM_R3_MMIO_WRITE;
2654#else
2655 e1kHardReset(pThis);
2656#endif
2657 }
2658 else
2659 {
2660 if ( (value & CTRL_SLU)
2661 && pThis->fCableConnected
2662 && !(STATUS & STATUS_LU))
2663 {
2664 /* The driver indicates that we should bring up the link */
2665 /* Do so in 5 seconds (by default). */
2666 e1kBringLinkUpDelayed(pThis);
2667 /*
2668 * Change the status (but not PHY status) anyway as Windows expects
2669 * it for 82543GC.
2670 */
2671 STATUS |= STATUS_LU;
2672 }
2673 if (value & CTRL_VME)
2674 {
2675 E1kLog(("%s VLAN Mode Enabled\n", pThis->szPrf));
2676 }
2677 E1kLog(("%s e1kRegWriteCTRL: mdio dir=%s mdc dir=%s mdc=%s mdio=%d\n",
2678 pThis->szPrf, (value & CTRL_MDIO_DIR)?"OUT":"IN ",
2679 (value & CTRL_MDC_DIR)?"OUT":"IN ", (value & CTRL_MDC)?"HIGH":"LOW ", !!(value & CTRL_MDIO)));
2680 if (value & CTRL_MDC)
2681 {
2682 if (value & CTRL_MDIO_DIR)
2683 {
2684 E1kLog(("%s e1kRegWriteCTRL: Phy::writeMDIO(%d)\n", pThis->szPrf, !!(value & CTRL_MDIO)));
2685 /* MDIO direction pin is set to output and MDC is high, write MDIO pin value to PHY */
2686 Phy::writeMDIO(&pThis->phy, !!(value & CTRL_MDIO));
2687 }
2688 else
2689 {
2690 if (Phy::readMDIO(&pThis->phy))
2691 value |= CTRL_MDIO;
2692 else
2693 value &= ~CTRL_MDIO;
2694 E1kLog(("%s e1kRegWriteCTRL: Phy::readMDIO(%d)\n",
2695 pThis->szPrf, !!(value & CTRL_MDIO)));
2696 }
2697 }
2698 rc = e1kRegWriteDefault(pThis, offset, index, value);
2699 }
2700
2701 return rc;
2702}
2703
2704/**
2705 * Write handler for EEPROM/Flash Control/Data register.
2706 *
2707 * Handles EEPROM access requests; forwards writes to EEPROM device if access has been granted.
2708 *
2709 * @param pThis The device state structure.
2710 * @param offset Register offset in memory-mapped frame.
2711 * @param index Register index in register array.
2712 * @param value The value to store.
2713 * @param mask Used to implement partial writes (8 and 16-bit).
2714 * @thread EMT
2715 */
2716static int e1kRegWriteEECD(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2717{
2718 RT_NOREF(offset, index);
2719#ifdef IN_RING3
2720 /* So far we are concerned with lower byte only */
2721 if ((EECD & EECD_EE_GNT) || pThis->eChip == E1K_CHIP_82543GC)
2722 {
2723 /* Access to EEPROM granted -- forward 4-wire bits to EEPROM device */
2724 /* Note: 82543GC does not need to request EEPROM access */
2725 STAM_PROFILE_ADV_START(&pThis->StatEEPROMWrite, a);
2726 pThis->eeprom.write(value & EECD_EE_WIRES);
2727 STAM_PROFILE_ADV_STOP(&pThis->StatEEPROMWrite, a);
2728 }
2729 if (value & EECD_EE_REQ)
2730 EECD |= EECD_EE_REQ|EECD_EE_GNT;
2731 else
2732 EECD &= ~EECD_EE_GNT;
2733 //e1kRegWriteDefault(pThis, offset, index, value );
2734
2735 return VINF_SUCCESS;
2736#else /* !IN_RING3 */
2737 RT_NOREF(pThis, value);
2738 return VINF_IOM_R3_MMIO_WRITE;
2739#endif /* !IN_RING3 */
2740}
2741
2742/**
2743 * Read handler for EEPROM/Flash Control/Data register.
2744 *
2745 * Lower 4 bits come from EEPROM device if EEPROM access has been granted.
2746 *
2747 * @returns VBox status code.
2748 *
2749 * @param pThis The device state structure.
2750 * @param offset Register offset in memory-mapped frame.
2751 * @param index Register index in register array.
2752 * @param mask Used to implement partial reads (8 and 16-bit).
2753 * @thread EMT
2754 */
2755static int e1kRegReadEECD(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
2756{
2757#ifdef IN_RING3
2758 uint32_t value;
2759 int rc = e1kRegReadDefault(pThis, offset, index, &value);
2760 if (RT_SUCCESS(rc))
2761 {
2762 if ((value & EECD_EE_GNT) || pThis->eChip == E1K_CHIP_82543GC)
2763 {
2764 /* Note: 82543GC does not need to request EEPROM access */
2765 /* Access to EEPROM granted -- get 4-wire bits to EEPROM device */
2766 STAM_PROFILE_ADV_START(&pThis->StatEEPROMRead, a);
2767 value |= pThis->eeprom.read();
2768 STAM_PROFILE_ADV_STOP(&pThis->StatEEPROMRead, a);
2769 }
2770 *pu32Value = value;
2771 }
2772
2773 return rc;
2774#else /* !IN_RING3 */
2775 RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index); RT_NOREF_PV(pu32Value);
2776 return VINF_IOM_R3_MMIO_READ;
2777#endif /* !IN_RING3 */
2778}
2779
2780/**
2781 * Write handler for EEPROM Read register.
2782 *
2783 * Handles EEPROM word access requests, reads EEPROM and stores the result
2784 * into DATA field.
2785 *
2786 * @param pThis The device state structure.
2787 * @param offset Register offset in memory-mapped frame.
2788 * @param index Register index in register array.
2789 * @param value The value to store.
2790 * @param mask Used to implement partial writes (8 and 16-bit).
2791 * @thread EMT
2792 */
2793static int e1kRegWriteEERD(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2794{
2795#ifdef IN_RING3
2796 /* Make use of 'writable' and 'readable' masks. */
2797 e1kRegWriteDefault(pThis, offset, index, value);
2798 /* DONE and DATA are set only if read was triggered by START. */
2799 if (value & EERD_START)
2800 {
2801 uint16_t tmp;
2802 STAM_PROFILE_ADV_START(&pThis->StatEEPROMRead, a);
2803 if (pThis->eeprom.readWord(GET_BITS_V(value, EERD, ADDR), &tmp))
2804 SET_BITS(EERD, DATA, tmp);
2805 EERD |= EERD_DONE;
2806 STAM_PROFILE_ADV_STOP(&pThis->StatEEPROMRead, a);
2807 }
2808
2809 return VINF_SUCCESS;
2810#else /* !IN_RING3 */
2811 RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index); RT_NOREF_PV(value);
2812 return VINF_IOM_R3_MMIO_WRITE;
2813#endif /* !IN_RING3 */
2814}
2815
2816
2817/**
2818 * Write handler for MDI Control register.
2819 *
2820 * Handles PHY read/write requests; forwards requests to internal PHY device.
2821 *
2822 * @param pThis The device state structure.
2823 * @param offset Register offset in memory-mapped frame.
2824 * @param index Register index in register array.
2825 * @param value The value to store.
2826 * @param mask Used to implement partial writes (8 and 16-bit).
2827 * @thread EMT
2828 */
2829static int e1kRegWriteMDIC(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2830{
2831 if (value & MDIC_INT_EN)
2832 {
2833 E1kLog(("%s ERROR! Interrupt at the end of an MDI cycle is not supported yet.\n",
2834 pThis->szPrf));
2835 }
2836 else if (value & MDIC_READY)
2837 {
2838 E1kLog(("%s ERROR! Ready bit is not reset by software during write operation.\n",
2839 pThis->szPrf));
2840 }
2841 else if (GET_BITS_V(value, MDIC, PHY) != 1)
2842 {
2843 E1kLog(("%s WARNING! Access to invalid PHY detected, phy=%d.\n",
2844 pThis->szPrf, GET_BITS_V(value, MDIC, PHY)));
2845 /*
2846 * Some drivers scan the MDIO bus for a PHY. We can work with these
2847 * drivers if we set MDIC_READY and MDIC_ERROR when there isn't a PHY
2848 * at the requested address, see @bugref{7346}.
2849 */
2850 MDIC = MDIC_READY | MDIC_ERROR;
2851 }
2852 else
2853 {
2854 /* Store the value */
2855 e1kRegWriteDefault(pThis, offset, index, value);
2856 STAM_COUNTER_INC(&pThis->StatPHYAccesses);
2857 /* Forward op to PHY */
2858 if (value & MDIC_OP_READ)
2859 SET_BITS(MDIC, DATA, Phy::readRegister(&pThis->phy, GET_BITS_V(value, MDIC, REG)));
2860 else
2861 Phy::writeRegister(&pThis->phy, GET_BITS_V(value, MDIC, REG), value & MDIC_DATA_MASK);
2862 /* Let software know that we are done */
2863 MDIC |= MDIC_READY;
2864 }
2865
2866 return VINF_SUCCESS;
2867}
2868
2869/**
2870 * Write handler for Interrupt Cause Read register.
2871 *
2872 * Bits corresponding to 1s in 'value' will be cleared in ICR register.
2873 *
2874 * @param pThis The device state structure.
2875 * @param offset Register offset in memory-mapped frame.
2876 * @param index Register index in register array.
2877 * @param value The value to store.
2878 * @param mask Used to implement partial writes (8 and 16-bit).
2879 * @thread EMT
2880 */
2881static int e1kRegWriteICR(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2882{
2883 ICR &= ~value;
2884
2885 RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index);
2886 return VINF_SUCCESS;
2887}
2888
2889/**
2890 * Read handler for Interrupt Cause Read register.
2891 *
2892 * Reading this register acknowledges all interrupts.
2893 *
2894 * @returns VBox status code.
2895 *
2896 * @param pThis The device state structure.
2897 * @param offset Register offset in memory-mapped frame.
2898 * @param index Register index in register array.
2899 * @param mask Not used.
2900 * @thread EMT
2901 */
2902static int e1kRegReadICR(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
2903{
2904 int rc = e1kCsEnter(pThis, VINF_IOM_R3_MMIO_READ);
2905 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2906 return rc;
2907
2908 uint32_t value = 0;
2909 rc = e1kRegReadDefault(pThis, offset, index, &value);
2910 if (RT_SUCCESS(rc))
2911 {
2912 if (value)
2913 {
2914 /*
2915 * Not clearing ICR causes QNX to hang as it reads ICR in a loop
2916 * with disabled interrupts.
2917 */
2918 //if (IMS)
2919 if (1)
2920 {
2921 /*
2922 * Interrupts were enabled -- we are supposedly at the very
2923 * beginning of interrupt handler
2924 */
2925 E1kLogRel(("E1000: irq lowered, icr=0x%x\n", ICR));
2926 E1kLog(("%s e1kRegReadICR: Lowered IRQ (%08x)\n", pThis->szPrf, ICR));
2927 /* Clear all pending interrupts */
2928 ICR = 0;
2929 pThis->fIntRaised = false;
2930 /* Lower(0) INTA(0) */
2931 PDMDevHlpPCISetIrq(pThis->CTX_SUFF(pDevIns), 0, 0);
2932
2933 pThis->u64AckedAt = TMTimerGet(pThis->CTX_SUFF(pIntTimer));
2934 if (pThis->fIntMaskUsed)
2935 pThis->fDelayInts = true;
2936 }
2937 else
2938 {
2939 /*
2940 * Interrupts are disabled -- in windows guests ICR read is done
2941 * just before re-enabling interrupts
2942 */
2943 E1kLog(("%s e1kRegReadICR: Suppressing auto-clear due to disabled interrupts (%08x)\n", pThis->szPrf, ICR));
2944 }
2945 }
2946 *pu32Value = value;
2947 }
2948 e1kCsLeave(pThis);
2949
2950 return rc;
2951}
2952
2953/**
2954 * Write handler for Interrupt Cause Set register.
2955 *
2956 * Bits corresponding to 1s in 'value' will be set in ICR register.
2957 *
2958 * @param pThis The device state structure.
2959 * @param offset Register offset in memory-mapped frame.
2960 * @param index Register index in register array.
2961 * @param value The value to store.
2962 * @param mask Used to implement partial writes (8 and 16-bit).
2963 * @thread EMT
2964 */
2965static int e1kRegWriteICS(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2966{
2967 RT_NOREF_PV(offset); RT_NOREF_PV(index);
2968 E1K_INC_ISTAT_CNT(pThis->uStatIntICS);
2969 return e1kRaiseInterrupt(pThis, VINF_IOM_R3_MMIO_WRITE, value & g_aE1kRegMap[ICS_IDX].writable);
2970}
2971
2972/**
2973 * Write handler for Interrupt Mask Set register.
2974 *
2975 * Will trigger pending interrupts.
2976 *
2977 * @param pThis The device state structure.
2978 * @param offset Register offset in memory-mapped frame.
2979 * @param index Register index in register array.
2980 * @param value The value to store.
2981 * @param mask Used to implement partial writes (8 and 16-bit).
2982 * @thread EMT
2983 */
2984static int e1kRegWriteIMS(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2985{
2986 RT_NOREF_PV(offset); RT_NOREF_PV(index);
2987
2988 IMS |= value;
2989 E1kLogRel(("E1000: irq enabled, RDH=%x RDT=%x TDH=%x TDT=%x\n", RDH, RDT, TDH, TDT));
2990 E1kLog(("%s e1kRegWriteIMS: IRQ enabled\n", pThis->szPrf));
2991 e1kRaiseInterrupt(pThis, VINF_IOM_R3_MMIO_WRITE, 0);
2992
2993 return VINF_SUCCESS;
2994}
2995
2996/**
2997 * Write handler for Interrupt Mask Clear register.
2998 *
2999 * Bits corresponding to 1s in 'value' will be cleared in IMS register.
3000 *
3001 * @param pThis The device state structure.
3002 * @param offset Register offset in memory-mapped frame.
3003 * @param index Register index in register array.
3004 * @param value The value to store.
3005 * @param mask Used to implement partial writes (8 and 16-bit).
3006 * @thread EMT
3007 */
3008static int e1kRegWriteIMC(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3009{
3010 RT_NOREF_PV(offset); RT_NOREF_PV(index);
3011
3012 int rc = e1kCsEnter(pThis, VINF_IOM_R3_MMIO_WRITE);
3013 if (RT_UNLIKELY(rc != VINF_SUCCESS))
3014 return rc;
3015 if (pThis->fIntRaised)
3016 {
3017 /*
3018 * Technically we should reset fIntRaised in ICR read handler, but it will cause
3019 * Windows to freeze since it may receive an interrupt while still in the very beginning
3020 * of interrupt handler.
3021 */
3022 E1K_INC_ISTAT_CNT(pThis->uStatIntLower);
3023 STAM_COUNTER_INC(&pThis->StatIntsPrevented);
3024 E1kLogRel(("E1000: irq lowered (IMC), icr=0x%x\n", ICR));
3025 /* Lower(0) INTA(0) */
3026 PDMDevHlpPCISetIrq(pThis->CTX_SUFF(pDevIns), 0, 0);
3027 pThis->fIntRaised = false;
3028 E1kLog(("%s e1kRegWriteIMC: Lowered IRQ: ICR=%08x\n", pThis->szPrf, ICR));
3029 }
3030 IMS &= ~value;
3031 E1kLog(("%s e1kRegWriteIMC: IRQ disabled\n", pThis->szPrf));
3032 e1kCsLeave(pThis);
3033
3034 return VINF_SUCCESS;
3035}
3036
3037/**
3038 * Write handler for Receive Control register.
3039 *
3040 * @param pThis The device state structure.
3041 * @param offset Register offset in memory-mapped frame.
3042 * @param index Register index in register array.
3043 * @param value The value to store.
3044 * @param mask Used to implement partial writes (8 and 16-bit).
3045 * @thread EMT
3046 */
3047static int e1kRegWriteRCTL(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3048{
3049 /* Update promiscuous mode */
3050 bool fBecomePromiscous = !!(value & (RCTL_UPE | RCTL_MPE));
3051 if (fBecomePromiscous != !!( RCTL & (RCTL_UPE | RCTL_MPE)))
3052 {
3053 /* Promiscuity has changed, pass the knowledge on. */
3054#ifndef IN_RING3
3055 return VINF_IOM_R3_MMIO_WRITE;
3056#else
3057 if (pThis->pDrvR3)
3058 pThis->pDrvR3->pfnSetPromiscuousMode(pThis->pDrvR3, fBecomePromiscous);
3059#endif
3060 }
3061
3062 /* Adjust receive buffer size */
3063 unsigned cbRxBuf = 2048 >> GET_BITS_V(value, RCTL, BSIZE);
3064 if (value & RCTL_BSEX)
3065 cbRxBuf *= 16;
3066 if (cbRxBuf != pThis->u16RxBSize)
3067 E1kLog2(("%s e1kRegWriteRCTL: Setting receive buffer size to %d (old %d)\n",
3068 pThis->szPrf, cbRxBuf, pThis->u16RxBSize));
3069 pThis->u16RxBSize = cbRxBuf;
3070
3071 /* Update the register */
3072 e1kRegWriteDefault(pThis, offset, index, value);
3073
3074 return VINF_SUCCESS;
3075}
3076
3077/**
3078 * Write handler for Packet Buffer Allocation register.
3079 *
3080 * TXA = 64 - RXA.
3081 *
3082 * @param pThis The device state structure.
3083 * @param offset Register offset in memory-mapped frame.
3084 * @param index Register index in register array.
3085 * @param value The value to store.
3086 * @param mask Used to implement partial writes (8 and 16-bit).
3087 * @thread EMT
3088 */
3089static int e1kRegWritePBA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3090{
3091 e1kRegWriteDefault(pThis, offset, index, value);
3092 PBA_st->txa = 64 - PBA_st->rxa;
3093
3094 return VINF_SUCCESS;
3095}
3096
3097/**
3098 * Write handler for Receive Descriptor Tail register.
3099 *
3100 * @remarks Write into RDT forces switch to HC and signal to
3101 * e1kR3NetworkDown_WaitReceiveAvail().
3102 *
3103 * @returns VBox status code.
3104 *
3105 * @param pThis The device state structure.
3106 * @param offset Register offset in memory-mapped frame.
3107 * @param index Register index in register array.
3108 * @param value The value to store.
3109 * @param mask Used to implement partial writes (8 and 16-bit).
3110 * @thread EMT
3111 */
3112static int e1kRegWriteRDT(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3113{
3114#ifndef IN_RING3
3115 /* XXX */
3116// return VINF_IOM_R3_MMIO_WRITE;
3117#endif
3118 int rc = e1kCsRxEnter(pThis, VINF_IOM_R3_MMIO_WRITE);
3119 if (RT_LIKELY(rc == VINF_SUCCESS))
3120 {
3121 E1kLog(("%s e1kRegWriteRDT\n", pThis->szPrf));
3122 /*
3123 * Some drivers advance RDT too far, so that it equals RDH. This
3124 * somehow manages to work with real hardware but not with this
3125 * emulated device. We can work with these drivers if we just
3126 * write 1 less when we see a driver writing RDT equal to RDH,
3127 * see @bugref{7346}.
3128 */
3129 if (value == RDH)
3130 {
3131 if (RDH == 0)
3132 value = (RDLEN / sizeof(E1KRXDESC)) - 1;
3133 else
3134 value = RDH - 1;
3135 }
3136 rc = e1kRegWriteDefault(pThis, offset, index, value);
3137#ifdef E1K_WITH_RXD_CACHE
3138 /*
3139 * We need to fetch descriptors now as RDT may go whole circle
3140 * before we attempt to store a received packet. For example,
3141 * Intel's DOS drivers use 2 (!) RX descriptors with the total ring
3142 * size being only 8 descriptors! Note that we fetch descriptors
3143 * only when the cache is empty to reduce the number of memory reads
3144 * in case of frequent RDT writes. Don't fetch anything when the
3145 * receiver is disabled either as RDH, RDT, RDLEN can be in some
3146 * messed up state.
3147 * Note that despite the cache may seem empty, meaning that there are
3148 * no more available descriptors in it, it may still be used by RX
3149 * thread which has not yet written the last descriptor back but has
3150 * temporarily released the RX lock in order to write the packet body
3151 * to descriptor's buffer. At this point we still going to do prefetch
3152 * but it won't actually fetch anything if there are no unused slots in
3153 * our "empty" cache (nRxDFetched==E1K_RXD_CACHE_SIZE). We must not
3154 * reset the cache here even if it appears empty. It will be reset at
3155 * a later point in e1kRxDGet().
3156 */
3157 if (e1kRxDIsCacheEmpty(pThis) && (RCTL & RCTL_EN))
3158 e1kRxDPrefetch(pThis);
3159#endif /* E1K_WITH_RXD_CACHE */
3160 e1kCsRxLeave(pThis);
3161 if (RT_SUCCESS(rc))
3162 {
3163/** @todo bird: Use SUPSem* for this so we can signal it in ring-0 as well
3164 * without requiring any context switches. We should also check the
3165 * wait condition before bothering to queue the item as we're currently
3166 * queuing thousands of items per second here in a normal transmit
3167 * scenario. Expect performance changes when fixing this! */
3168#ifdef IN_RING3
3169 /* Signal that we have more receive descriptors available. */
3170 e1kWakeupReceive(pThis->CTX_SUFF(pDevIns));
3171#else
3172 PPDMQUEUEITEMCORE pItem = PDMQueueAlloc(pThis->CTX_SUFF(pCanRxQueue));
3173 if (pItem)
3174 PDMQueueInsert(pThis->CTX_SUFF(pCanRxQueue), pItem);
3175#endif
3176 }
3177 }
3178 return rc;
3179}
3180
3181/**
3182 * Write handler for Receive Delay Timer register.
3183 *
3184 * @param pThis The device state structure.
3185 * @param offset Register offset in memory-mapped frame.
3186 * @param index Register index in register array.
3187 * @param value The value to store.
3188 * @param mask Used to implement partial writes (8 and 16-bit).
3189 * @thread EMT
3190 */
3191static int e1kRegWriteRDTR(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3192{
3193 e1kRegWriteDefault(pThis, offset, index, value);
3194 if (value & RDTR_FPD)
3195 {
3196 /* Flush requested, cancel both timers and raise interrupt */
3197#ifdef E1K_USE_RX_TIMERS
3198 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRIDTimer));
3199 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRADTimer));
3200#endif
3201 E1K_INC_ISTAT_CNT(pThis->uStatIntRDTR);
3202 return e1kRaiseInterrupt(pThis, VINF_IOM_R3_MMIO_WRITE, ICR_RXT0);
3203 }
3204
3205 return VINF_SUCCESS;
3206}
3207
3208DECLINLINE(uint32_t) e1kGetTxLen(PE1KSTATE pThis)
3209{
3210 /**
3211 * Make sure TDT won't change during computation. EMT may modify TDT at
3212 * any moment.
3213 */
3214 uint32_t tdt = TDT;
3215 return (TDH>tdt ? TDLEN/sizeof(E1KTXDESC) : 0) + tdt - TDH;
3216}
3217
3218#ifdef IN_RING3
3219
3220# ifdef E1K_TX_DELAY
3221/**
3222 * Transmit Delay Timer handler.
3223 *
3224 * @remarks We only get here when the timer expires.
3225 *
3226 * @param pDevIns Pointer to device instance structure.
3227 * @param pTimer Pointer to the timer.
3228 * @param pvUser NULL.
3229 * @thread EMT
3230 */
3231static DECLCALLBACK(void) e1kTxDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3232{
3233 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3234 Assert(PDMCritSectIsOwner(&pThis->csTx));
3235
3236 E1K_INC_ISTAT_CNT(pThis->uStatTxDelayExp);
3237# ifdef E1K_INT_STATS
3238 uint64_t u64Elapsed = RTTimeNanoTS() - pThis->u64ArmedAt;
3239 if (u64Elapsed > pThis->uStatMaxTxDelay)
3240 pThis->uStatMaxTxDelay = u64Elapsed;
3241# endif
3242 int rc = e1kXmitPending(pThis, false /*fOnWorkerThread*/);
3243 AssertMsg(RT_SUCCESS(rc) || rc == VERR_TRY_AGAIN, ("%Rrc\n", rc));
3244}
3245# endif /* E1K_TX_DELAY */
3246
3247# ifdef E1K_USE_TX_TIMERS
3248
3249/**
3250 * Transmit Interrupt Delay Timer handler.
3251 *
3252 * @remarks We only get here when the timer expires.
3253 *
3254 * @param pDevIns Pointer to device instance structure.
3255 * @param pTimer Pointer to the timer.
3256 * @param pvUser NULL.
3257 * @thread EMT
3258 */
3259static DECLCALLBACK(void) e1kTxIntDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3260{
3261 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3262
3263 E1K_INC_ISTAT_CNT(pThis->uStatTID);
3264 /* Cancel absolute delay timer as we have already got attention */
3265# ifndef E1K_NO_TAD
3266 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTADTimer));
3267# endif
3268 e1kRaiseInterrupt(pThis, ICR_TXDW);
3269}
3270
3271/**
3272 * Transmit Absolute Delay Timer handler.
3273 *
3274 * @remarks We only get here when the timer expires.
3275 *
3276 * @param pDevIns Pointer to device instance structure.
3277 * @param pTimer Pointer to the timer.
3278 * @param pvUser NULL.
3279 * @thread EMT
3280 */
3281static DECLCALLBACK(void) e1kTxAbsDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3282{
3283 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3284
3285 E1K_INC_ISTAT_CNT(pThis->uStatTAD);
3286 /* Cancel interrupt delay timer as we have already got attention */
3287 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTIDTimer));
3288 e1kRaiseInterrupt(pThis, ICR_TXDW);
3289}
3290
3291# endif /* E1K_USE_TX_TIMERS */
3292# ifdef E1K_USE_RX_TIMERS
3293
3294/**
3295 * Receive Interrupt Delay Timer handler.
3296 *
3297 * @remarks We only get here when the timer expires.
3298 *
3299 * @param pDevIns Pointer to device instance structure.
3300 * @param pTimer Pointer to the timer.
3301 * @param pvUser NULL.
3302 * @thread EMT
3303 */
3304static DECLCALLBACK(void) e1kRxIntDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3305{
3306 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3307
3308 E1K_INC_ISTAT_CNT(pThis->uStatRID);
3309 /* Cancel absolute delay timer as we have already got attention */
3310 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRADTimer));
3311 e1kRaiseInterrupt(pThis, ICR_RXT0);
3312}
3313
3314/**
3315 * Receive Absolute Delay Timer handler.
3316 *
3317 * @remarks We only get here when the timer expires.
3318 *
3319 * @param pDevIns Pointer to device instance structure.
3320 * @param pTimer Pointer to the timer.
3321 * @param pvUser NULL.
3322 * @thread EMT
3323 */
3324static DECLCALLBACK(void) e1kRxAbsDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3325{
3326 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3327
3328 E1K_INC_ISTAT_CNT(pThis->uStatRAD);
3329 /* Cancel interrupt delay timer as we have already got attention */
3330 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRIDTimer));
3331 e1kRaiseInterrupt(pThis, ICR_RXT0);
3332}
3333
3334# endif /* E1K_USE_RX_TIMERS */
3335
3336/**
3337 * Late Interrupt Timer handler.
3338 *
3339 * @param pDevIns Pointer to device instance structure.
3340 * @param pTimer Pointer to the timer.
3341 * @param pvUser NULL.
3342 * @thread EMT
3343 */
3344static DECLCALLBACK(void) e1kLateIntTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3345{
3346 RT_NOREF(pDevIns, pTimer);
3347 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3348
3349 STAM_PROFILE_ADV_START(&pThis->StatLateIntTimer, a);
3350 STAM_COUNTER_INC(&pThis->StatLateInts);
3351 E1K_INC_ISTAT_CNT(pThis->uStatIntLate);
3352# if 0
3353 if (pThis->iStatIntLost > -100)
3354 pThis->iStatIntLost--;
3355# endif
3356 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, 0);
3357 STAM_PROFILE_ADV_STOP(&pThis->StatLateIntTimer, a);
3358}
3359
3360/**
3361 * Link Up Timer handler.
3362 *
3363 * @param pDevIns Pointer to device instance structure.
3364 * @param pTimer Pointer to the timer.
3365 * @param pvUser NULL.
3366 * @thread EMT
3367 */
3368static DECLCALLBACK(void) e1kLinkUpTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3369{
3370 RT_NOREF(pDevIns, pTimer);
3371 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3372
3373 /*
3374 * This can happen if we set the link status to down when the Link up timer was
3375 * already armed (shortly after e1kLoadDone() or when the cable was disconnected
3376 * and connect+disconnect the cable very quick.
3377 */
3378 if (!pThis->fCableConnected)
3379 return;
3380
3381 e1kR3LinkUp(pThis);
3382}
3383
3384#endif /* IN_RING3 */
3385
3386/**
3387 * Sets up the GSO context according to the TSE new context descriptor.
3388 *
3389 * @param pGso The GSO context to setup.
3390 * @param pCtx The context descriptor.
3391 */
3392DECLINLINE(void) e1kSetupGsoCtx(PPDMNETWORKGSO pGso, E1KTXCTX const *pCtx)
3393{
3394 pGso->u8Type = PDMNETWORKGSOTYPE_INVALID;
3395
3396 /*
3397 * See if the context descriptor describes something that could be TCP or
3398 * UDP over IPv[46].
3399 */
3400 /* Check the header ordering and spacing: 1. Ethernet, 2. IP, 3. TCP/UDP. */
3401 if (RT_UNLIKELY( pCtx->ip.u8CSS < sizeof(RTNETETHERHDR) ))
3402 {
3403 E1kLog(("e1kSetupGsoCtx: IPCSS=%#x\n", pCtx->ip.u8CSS));
3404 return;
3405 }
3406 if (RT_UNLIKELY( pCtx->tu.u8CSS < (size_t)pCtx->ip.u8CSS + (pCtx->dw2.fIP ? RTNETIPV4_MIN_LEN : RTNETIPV6_MIN_LEN) ))
3407 {
3408 E1kLog(("e1kSetupGsoCtx: TUCSS=%#x\n", pCtx->tu.u8CSS));
3409 return;
3410 }
3411 if (RT_UNLIKELY( pCtx->dw2.fTCP
3412 ? pCtx->dw3.u8HDRLEN < (size_t)pCtx->tu.u8CSS + RTNETTCP_MIN_LEN
3413 : pCtx->dw3.u8HDRLEN != (size_t)pCtx->tu.u8CSS + RTNETUDP_MIN_LEN ))
3414 {
3415 E1kLog(("e1kSetupGsoCtx: HDRLEN=%#x TCP=%d\n", pCtx->dw3.u8HDRLEN, pCtx->dw2.fTCP));
3416 return;
3417 }
3418
3419 /* The end of the TCP/UDP checksum should stop at the end of the packet or at least after the headers. */
3420 if (RT_UNLIKELY( pCtx->tu.u16CSE > 0 && pCtx->tu.u16CSE <= pCtx->dw3.u8HDRLEN ))
3421 {
3422 E1kLog(("e1kSetupGsoCtx: TUCSE=%#x HDRLEN=%#x\n", pCtx->tu.u16CSE, pCtx->dw3.u8HDRLEN));
3423 return;
3424 }
3425
3426 /* IPv4 checksum offset. */
3427 if (RT_UNLIKELY( pCtx->dw2.fIP && (size_t)pCtx->ip.u8CSO - pCtx->ip.u8CSS != RT_UOFFSETOF(RTNETIPV4, ip_sum) ))
3428 {
3429 E1kLog(("e1kSetupGsoCtx: IPCSO=%#x IPCSS=%#x\n", pCtx->ip.u8CSO, pCtx->ip.u8CSS));
3430 return;
3431 }
3432
3433 /* TCP/UDP checksum offsets. */
3434 if (RT_UNLIKELY( (size_t)pCtx->tu.u8CSO - pCtx->tu.u8CSS
3435 != ( pCtx->dw2.fTCP
3436 ? RT_UOFFSETOF(RTNETTCP, th_sum)
3437 : RT_UOFFSETOF(RTNETUDP, uh_sum) ) ))
3438 {
3439 E1kLog(("e1kSetupGsoCtx: TUCSO=%#x TUCSS=%#x TCP=%d\n", pCtx->ip.u8CSO, pCtx->ip.u8CSS, pCtx->dw2.fTCP));
3440 return;
3441 }
3442
3443 /*
3444 * Because of internal networking using a 16-bit size field for GSO context
3445 * plus frame, we have to make sure we don't exceed this.
3446 */
3447 if (RT_UNLIKELY( pCtx->dw3.u8HDRLEN + pCtx->dw2.u20PAYLEN > VBOX_MAX_GSO_SIZE ))
3448 {
3449 E1kLog(("e1kSetupGsoCtx: HDRLEN(=%#x) + PAYLEN(=%#x) = %#x, max is %#x\n",
3450 pCtx->dw3.u8HDRLEN, pCtx->dw2.u20PAYLEN, pCtx->dw3.u8HDRLEN + pCtx->dw2.u20PAYLEN, VBOX_MAX_GSO_SIZE));
3451 return;
3452 }
3453
3454 /*
3455 * We're good for now - we'll do more checks when seeing the data.
3456 * So, figure the type of offloading and setup the context.
3457 */
3458 if (pCtx->dw2.fIP)
3459 {
3460 if (pCtx->dw2.fTCP)
3461 {
3462 pGso->u8Type = PDMNETWORKGSOTYPE_IPV4_TCP;
3463 pGso->cbHdrsSeg = pCtx->dw3.u8HDRLEN;
3464 }
3465 else
3466 {
3467 pGso->u8Type = PDMNETWORKGSOTYPE_IPV4_UDP;
3468 pGso->cbHdrsSeg = pCtx->tu.u8CSS; /* IP header only */
3469 }
3470 /** @todo Detect IPv4-IPv6 tunneling (need test setup since linux doesn't do
3471 * this yet it seems)... */
3472 }
3473 else
3474 {
3475 pGso->cbHdrsSeg = pCtx->dw3.u8HDRLEN; /** @todo IPv6 UFO */
3476 if (pCtx->dw2.fTCP)
3477 pGso->u8Type = PDMNETWORKGSOTYPE_IPV6_TCP;
3478 else
3479 pGso->u8Type = PDMNETWORKGSOTYPE_IPV6_UDP;
3480 }
3481 pGso->offHdr1 = pCtx->ip.u8CSS;
3482 pGso->offHdr2 = pCtx->tu.u8CSS;
3483 pGso->cbHdrsTotal = pCtx->dw3.u8HDRLEN;
3484 pGso->cbMaxSeg = pCtx->dw3.u16MSS;
3485 Assert(PDMNetGsoIsValid(pGso, sizeof(*pGso), pGso->cbMaxSeg * 5));
3486 E1kLog2(("e1kSetupGsoCtx: mss=%#x hdr=%#x hdrseg=%#x hdr1=%#x hdr2=%#x %s\n",
3487 pGso->cbMaxSeg, pGso->cbHdrsTotal, pGso->cbHdrsSeg, pGso->offHdr1, pGso->offHdr2, PDMNetGsoTypeName((PDMNETWORKGSOTYPE)pGso->u8Type) ));
3488}
3489
3490/**
3491 * Checks if we can use GSO processing for the current TSE frame.
3492 *
3493 * @param pThis The device state structure.
3494 * @param pGso The GSO context.
3495 * @param pData The first data descriptor of the frame.
3496 * @param pCtx The TSO context descriptor.
3497 */
3498DECLINLINE(bool) e1kCanDoGso(PE1KSTATE pThis, PCPDMNETWORKGSO pGso, E1KTXDAT const *pData, E1KTXCTX const *pCtx)
3499{
3500 if (!pData->cmd.fTSE)
3501 {
3502 E1kLog2(("e1kCanDoGso: !TSE\n"));
3503 return false;
3504 }
3505 if (pData->cmd.fVLE) /** @todo VLAN tagging. */
3506 {
3507 E1kLog(("e1kCanDoGso: VLE\n"));
3508 return false;
3509 }
3510 if (RT_UNLIKELY(!pThis->fGSOEnabled))
3511 {
3512 E1kLog3(("e1kCanDoGso: GSO disabled via CFGM\n"));
3513 return false;
3514 }
3515
3516 switch ((PDMNETWORKGSOTYPE)pGso->u8Type)
3517 {
3518 case PDMNETWORKGSOTYPE_IPV4_TCP:
3519 case PDMNETWORKGSOTYPE_IPV4_UDP:
3520 if (!pData->dw3.fIXSM)
3521 {
3522 E1kLog(("e1kCanDoGso: !IXSM (IPv4)\n"));
3523 return false;
3524 }
3525 if (!pData->dw3.fTXSM)
3526 {
3527 E1kLog(("e1kCanDoGso: !TXSM (IPv4)\n"));
3528 return false;
3529 }
3530 /** @todo what more check should we perform here? Ethernet frame type? */
3531 E1kLog2(("e1kCanDoGso: OK, IPv4\n"));
3532 return true;
3533
3534 case PDMNETWORKGSOTYPE_IPV6_TCP:
3535 case PDMNETWORKGSOTYPE_IPV6_UDP:
3536 if (pData->dw3.fIXSM && pCtx->ip.u8CSO)
3537 {
3538 E1kLog(("e1kCanDoGso: IXSM (IPv6)\n"));
3539 return false;
3540 }
3541 if (!pData->dw3.fTXSM)
3542 {
3543 E1kLog(("e1kCanDoGso: TXSM (IPv6)\n"));
3544 return false;
3545 }
3546 /** @todo what more check should we perform here? Ethernet frame type? */
3547 E1kLog2(("e1kCanDoGso: OK, IPv4\n"));
3548 return true;
3549
3550 default:
3551 Assert(pGso->u8Type == PDMNETWORKGSOTYPE_INVALID);
3552 E1kLog2(("e1kCanDoGso: e1kSetupGsoCtx failed\n"));
3553 return false;
3554 }
3555}
3556
3557/**
3558 * Frees the current xmit buffer.
3559 *
3560 * @param pThis The device state structure.
3561 */
3562static void e1kXmitFreeBuf(PE1KSTATE pThis)
3563{
3564 PPDMSCATTERGATHER pSg = pThis->CTX_SUFF(pTxSg);
3565 if (pSg)
3566 {
3567 pThis->CTX_SUFF(pTxSg) = NULL;
3568
3569 if (pSg->pvAllocator != pThis)
3570 {
3571 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
3572 if (pDrv)
3573 pDrv->pfnFreeBuf(pDrv, pSg);
3574 }
3575 else
3576 {
3577 /* loopback */
3578 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3579 Assert(pSg->fFlags == (PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3));
3580 pSg->fFlags = 0;
3581 pSg->pvAllocator = NULL;
3582 }
3583 }
3584}
3585
3586#ifndef E1K_WITH_TXD_CACHE
3587/**
3588 * Allocates an xmit buffer.
3589 *
3590 * @returns See PDMINETWORKUP::pfnAllocBuf.
3591 * @param pThis The device state structure.
3592 * @param cbMin The minimum frame size.
3593 * @param fExactSize Whether cbMin is exact or if we have to max it
3594 * out to the max MTU size.
3595 * @param fGso Whether this is a GSO frame or not.
3596 */
3597DECLINLINE(int) e1kXmitAllocBuf(PE1KSTATE pThis, size_t cbMin, bool fExactSize, bool fGso)
3598{
3599 /* Adjust cbMin if necessary. */
3600 if (!fExactSize)
3601 cbMin = RT_MAX(cbMin, E1K_MAX_TX_PKT_SIZE);
3602
3603 /* Deal with existing buffer (descriptor screw up, reset, etc). */
3604 if (RT_UNLIKELY(pThis->CTX_SUFF(pTxSg)))
3605 e1kXmitFreeBuf(pThis);
3606 Assert(pThis->CTX_SUFF(pTxSg) == NULL);
3607
3608 /*
3609 * Allocate the buffer.
3610 */
3611 PPDMSCATTERGATHER pSg;
3612 if (RT_LIKELY(GET_BITS(RCTL, LBM) != RCTL_LBM_TCVR))
3613 {
3614 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
3615 if (RT_UNLIKELY(!pDrv))
3616 return VERR_NET_DOWN;
3617 int rc = pDrv->pfnAllocBuf(pDrv, cbMin, fGso ? &pThis->GsoCtx : NULL, &pSg);
3618 if (RT_FAILURE(rc))
3619 {
3620 /* Suspend TX as we are out of buffers atm */
3621 STATUS |= STATUS_TXOFF;
3622 return rc;
3623 }
3624 }
3625 else
3626 {
3627 /* Create a loopback using the fallback buffer and preallocated SG. */
3628 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3629 pSg = &pThis->uTxFallback.Sg;
3630 pSg->fFlags = PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3;
3631 pSg->cbUsed = 0;
3632 pSg->cbAvailable = 0;
3633 pSg->pvAllocator = pThis;
3634 pSg->pvUser = NULL; /* No GSO here. */
3635 pSg->cSegs = 1;
3636 pSg->aSegs[0].pvSeg = pThis->aTxPacketFallback;
3637 pSg->aSegs[0].cbSeg = sizeof(pThis->aTxPacketFallback);
3638 }
3639
3640 pThis->CTX_SUFF(pTxSg) = pSg;
3641 return VINF_SUCCESS;
3642}
3643#else /* E1K_WITH_TXD_CACHE */
3644/**
3645 * Allocates an xmit buffer.
3646 *
3647 * @returns See PDMINETWORKUP::pfnAllocBuf.
3648 * @param pThis The device state structure.
3649 * @param cbMin The minimum frame size.
3650 * @param fExactSize Whether cbMin is exact or if we have to max it
3651 * out to the max MTU size.
3652 * @param fGso Whether this is a GSO frame or not.
3653 */
3654DECLINLINE(int) e1kXmitAllocBuf(PE1KSTATE pThis, bool fGso)
3655{
3656 /* Deal with existing buffer (descriptor screw up, reset, etc). */
3657 if (RT_UNLIKELY(pThis->CTX_SUFF(pTxSg)))
3658 e1kXmitFreeBuf(pThis);
3659 Assert(pThis->CTX_SUFF(pTxSg) == NULL);
3660
3661 /*
3662 * Allocate the buffer.
3663 */
3664 PPDMSCATTERGATHER pSg;
3665 if (RT_LIKELY(GET_BITS(RCTL, LBM) != RCTL_LBM_TCVR))
3666 {
3667 if (pThis->cbTxAlloc == 0)
3668 {
3669 /* Zero packet, no need for the buffer */
3670 return VINF_SUCCESS;
3671 }
3672
3673 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
3674 if (RT_UNLIKELY(!pDrv))
3675 return VERR_NET_DOWN;
3676 int rc = pDrv->pfnAllocBuf(pDrv, pThis->cbTxAlloc, fGso ? &pThis->GsoCtx : NULL, &pSg);
3677 if (RT_FAILURE(rc))
3678 {
3679 /* Suspend TX as we are out of buffers atm */
3680 STATUS |= STATUS_TXOFF;
3681 return rc;
3682 }
3683 E1kLog3(("%s Allocated buffer for TX packet: cb=%u %s%s\n",
3684 pThis->szPrf, pThis->cbTxAlloc,
3685 pThis->fVTag ? "VLAN " : "",
3686 pThis->fGSO ? "GSO " : ""));
3687 pThis->cbTxAlloc = 0;
3688 }
3689 else
3690 {
3691 /* Create a loopback using the fallback buffer and preallocated SG. */
3692 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3693 pSg = &pThis->uTxFallback.Sg;
3694 pSg->fFlags = PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3;
3695 pSg->cbUsed = 0;
3696 pSg->cbAvailable = 0;
3697 pSg->pvAllocator = pThis;
3698 pSg->pvUser = NULL; /* No GSO here. */
3699 pSg->cSegs = 1;
3700 pSg->aSegs[0].pvSeg = pThis->aTxPacketFallback;
3701 pSg->aSegs[0].cbSeg = sizeof(pThis->aTxPacketFallback);
3702 }
3703
3704 pThis->CTX_SUFF(pTxSg) = pSg;
3705 return VINF_SUCCESS;
3706}
3707#endif /* E1K_WITH_TXD_CACHE */
3708
3709/**
3710 * Checks if it's a GSO buffer or not.
3711 *
3712 * @returns true / false.
3713 * @param pTxSg The scatter / gather buffer.
3714 */
3715DECLINLINE(bool) e1kXmitIsGsoBuf(PDMSCATTERGATHER const *pTxSg)
3716{
3717#if 0
3718 if (!pTxSg)
3719 E1kLog(("e1kXmitIsGsoBuf: pTxSG is NULL\n"));
3720 if (pTxSg && pTxSg->pvUser)
3721 E1kLog(("e1kXmitIsGsoBuf: pvUser is NULL\n"));
3722#endif
3723 return pTxSg && pTxSg->pvUser /* GSO indicator */;
3724}
3725
3726#ifndef E1K_WITH_TXD_CACHE
3727/**
3728 * Load transmit descriptor from guest memory.
3729 *
3730 * @param pThis The device state structure.
3731 * @param pDesc Pointer to descriptor union.
3732 * @param addr Physical address in guest context.
3733 * @thread E1000_TX
3734 */
3735DECLINLINE(void) e1kLoadDesc(PE1KSTATE pThis, E1KTXDESC *pDesc, RTGCPHYS addr)
3736{
3737 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), addr, pDesc, sizeof(E1KTXDESC));
3738}
3739#else /* E1K_WITH_TXD_CACHE */
3740/**
3741 * Load transmit descriptors from guest memory.
3742 *
3743 * We need two physical reads in case the tail wrapped around the end of TX
3744 * descriptor ring.
3745 *
3746 * @returns the actual number of descriptors fetched.
3747 * @param pThis The device state structure.
3748 * @param pDesc Pointer to descriptor union.
3749 * @param addr Physical address in guest context.
3750 * @thread E1000_TX
3751 */
3752DECLINLINE(unsigned) e1kTxDLoadMore(PE1KSTATE pThis)
3753{
3754 Assert(pThis->iTxDCurrent == 0);
3755 /* We've already loaded pThis->nTxDFetched descriptors past TDH. */
3756 unsigned nDescsAvailable = e1kGetTxLen(pThis) - pThis->nTxDFetched;
3757 unsigned nDescsToFetch = RT_MIN(nDescsAvailable, E1K_TXD_CACHE_SIZE - pThis->nTxDFetched);
3758 unsigned nDescsTotal = TDLEN / sizeof(E1KTXDESC);
3759 unsigned nFirstNotLoaded = (TDH + pThis->nTxDFetched) % nDescsTotal;
3760 unsigned nDescsInSingleRead = RT_MIN(nDescsToFetch, nDescsTotal - nFirstNotLoaded);
3761 E1kLog3(("%s e1kTxDLoadMore: nDescsAvailable=%u nDescsToFetch=%u "
3762 "nDescsTotal=%u nFirstNotLoaded=0x%x nDescsInSingleRead=%u\n",
3763 pThis->szPrf, nDescsAvailable, nDescsToFetch, nDescsTotal,
3764 nFirstNotLoaded, nDescsInSingleRead));
3765 if (nDescsToFetch == 0)
3766 return 0;
3767 E1KTXDESC* pFirstEmptyDesc = &pThis->aTxDescriptors[pThis->nTxDFetched];
3768 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns),
3769 ((uint64_t)TDBAH << 32) + TDBAL + nFirstNotLoaded * sizeof(E1KTXDESC),
3770 pFirstEmptyDesc, nDescsInSingleRead * sizeof(E1KTXDESC));
3771 E1kLog3(("%s Fetched %u TX descriptors at %08x%08x(0x%x), TDLEN=%08x, TDH=%08x, TDT=%08x\n",
3772 pThis->szPrf, nDescsInSingleRead,
3773 TDBAH, TDBAL + TDH * sizeof(E1KTXDESC),
3774 nFirstNotLoaded, TDLEN, TDH, TDT));
3775 if (nDescsToFetch > nDescsInSingleRead)
3776 {
3777 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns),
3778 ((uint64_t)TDBAH << 32) + TDBAL,
3779 pFirstEmptyDesc + nDescsInSingleRead,
3780 (nDescsToFetch - nDescsInSingleRead) * sizeof(E1KTXDESC));
3781 E1kLog3(("%s Fetched %u TX descriptors at %08x%08x\n",
3782 pThis->szPrf, nDescsToFetch - nDescsInSingleRead,
3783 TDBAH, TDBAL));
3784 }
3785 pThis->nTxDFetched += nDescsToFetch;
3786 return nDescsToFetch;
3787}
3788
3789/**
3790 * Load transmit descriptors from guest memory only if there are no loaded
3791 * descriptors.
3792 *
3793 * @returns true if there are descriptors in cache.
3794 * @param pThis The device state structure.
3795 * @param pDesc Pointer to descriptor union.
3796 * @param addr Physical address in guest context.
3797 * @thread E1000_TX
3798 */
3799DECLINLINE(bool) e1kTxDLazyLoad(PE1KSTATE pThis)
3800{
3801 if (pThis->nTxDFetched == 0)
3802 return e1kTxDLoadMore(pThis) != 0;
3803 return true;
3804}
3805#endif /* E1K_WITH_TXD_CACHE */
3806
3807/**
3808 * Write back transmit descriptor to guest memory.
3809 *
3810 * @param pThis The device state structure.
3811 * @param pDesc Pointer to descriptor union.
3812 * @param addr Physical address in guest context.
3813 * @thread E1000_TX
3814 */
3815DECLINLINE(void) e1kWriteBackDesc(PE1KSTATE pThis, E1KTXDESC *pDesc, RTGCPHYS addr)
3816{
3817 /* Only the last half of the descriptor has to be written back. */
3818 e1kPrintTDesc(pThis, pDesc, "^^^");
3819 PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns), addr, pDesc, sizeof(E1KTXDESC));
3820}
3821
3822/**
3823 * Transmit complete frame.
3824 *
3825 * @remarks We skip the FCS since we're not responsible for sending anything to
3826 * a real ethernet wire.
3827 *
3828 * @param pThis The device state structure.
3829 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
3830 * @thread E1000_TX
3831 */
3832static void e1kTransmitFrame(PE1KSTATE pThis, bool fOnWorkerThread)
3833{
3834 PPDMSCATTERGATHER pSg = pThis->CTX_SUFF(pTxSg);
3835 uint32_t cbFrame = pSg ? (uint32_t)pSg->cbUsed : 0;
3836 Assert(!pSg || pSg->cSegs == 1);
3837
3838 if (cbFrame > 70) /* unqualified guess */
3839 pThis->led.Asserted.s.fWriting = pThis->led.Actual.s.fWriting = 1;
3840
3841#ifdef E1K_INT_STATS
3842 if (cbFrame <= 1514)
3843 E1K_INC_ISTAT_CNT(pThis->uStatTx1514);
3844 else if (cbFrame <= 2962)
3845 E1K_INC_ISTAT_CNT(pThis->uStatTx2962);
3846 else if (cbFrame <= 4410)
3847 E1K_INC_ISTAT_CNT(pThis->uStatTx4410);
3848 else if (cbFrame <= 5858)
3849 E1K_INC_ISTAT_CNT(pThis->uStatTx5858);
3850 else if (cbFrame <= 7306)
3851 E1K_INC_ISTAT_CNT(pThis->uStatTx7306);
3852 else if (cbFrame <= 8754)
3853 E1K_INC_ISTAT_CNT(pThis->uStatTx8754);
3854 else if (cbFrame <= 16384)
3855 E1K_INC_ISTAT_CNT(pThis->uStatTx16384);
3856 else if (cbFrame <= 32768)
3857 E1K_INC_ISTAT_CNT(pThis->uStatTx32768);
3858 else
3859 E1K_INC_ISTAT_CNT(pThis->uStatTxLarge);
3860#endif /* E1K_INT_STATS */
3861
3862 /* Add VLAN tag */
3863 if (cbFrame > 12 && pThis->fVTag)
3864 {
3865 E1kLog3(("%s Inserting VLAN tag %08x\n",
3866 pThis->szPrf, RT_BE2H_U16(VET) | (RT_BE2H_U16(pThis->u16VTagTCI) << 16)));
3867 memmove((uint8_t*)pSg->aSegs[0].pvSeg + 16, (uint8_t*)pSg->aSegs[0].pvSeg + 12, cbFrame - 12);
3868 *((uint32_t*)pSg->aSegs[0].pvSeg + 3) = RT_BE2H_U16(VET) | (RT_BE2H_U16(pThis->u16VTagTCI) << 16);
3869 pSg->cbUsed += 4;
3870 cbFrame += 4;
3871 Assert(pSg->cbUsed == cbFrame);
3872 Assert(pSg->cbUsed <= pSg->cbAvailable);
3873 }
3874/* E1kLog2(("%s < < < Outgoing packet. Dump follows: > > >\n"
3875 "%.*Rhxd\n"
3876 "%s < < < < < < < < < < < < < End of dump > > > > > > > > > > > >\n",
3877 pThis->szPrf, cbFrame, pSg->aSegs[0].pvSeg, pThis->szPrf));*/
3878
3879 /* Update the stats */
3880 E1K_INC_CNT32(TPT);
3881 E1K_ADD_CNT64(TOTL, TOTH, cbFrame);
3882 E1K_INC_CNT32(GPTC);
3883 if (pSg && e1kIsBroadcast(pSg->aSegs[0].pvSeg))
3884 E1K_INC_CNT32(BPTC);
3885 else if (pSg && e1kIsMulticast(pSg->aSegs[0].pvSeg))
3886 E1K_INC_CNT32(MPTC);
3887 /* Update octet transmit counter */
3888 E1K_ADD_CNT64(GOTCL, GOTCH, cbFrame);
3889 if (pThis->CTX_SUFF(pDrv))
3890 STAM_REL_COUNTER_ADD(&pThis->StatTransmitBytes, cbFrame);
3891 if (cbFrame == 64)
3892 E1K_INC_CNT32(PTC64);
3893 else if (cbFrame < 128)
3894 E1K_INC_CNT32(PTC127);
3895 else if (cbFrame < 256)
3896 E1K_INC_CNT32(PTC255);
3897 else if (cbFrame < 512)
3898 E1K_INC_CNT32(PTC511);
3899 else if (cbFrame < 1024)
3900 E1K_INC_CNT32(PTC1023);
3901 else
3902 E1K_INC_CNT32(PTC1522);
3903
3904 E1K_INC_ISTAT_CNT(pThis->uStatTxFrm);
3905
3906 /*
3907 * Dump and send the packet.
3908 */
3909 int rc = VERR_NET_DOWN;
3910 if (pSg && pSg->pvAllocator != pThis)
3911 {
3912 e1kPacketDump(pThis, (uint8_t const *)pSg->aSegs[0].pvSeg, cbFrame, "--> Outgoing");
3913
3914 pThis->CTX_SUFF(pTxSg) = NULL;
3915 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
3916 if (pDrv)
3917 {
3918 /* Release critical section to avoid deadlock in CanReceive */
3919 //e1kCsLeave(pThis);
3920 STAM_PROFILE_START(&pThis->CTX_SUFF_Z(StatTransmitSend), a);
3921 rc = pDrv->pfnSendBuf(pDrv, pSg, fOnWorkerThread);
3922 STAM_PROFILE_STOP(&pThis->CTX_SUFF_Z(StatTransmitSend), a);
3923 //e1kCsEnter(pThis, RT_SRC_POS);
3924 }
3925 }
3926 else if (pSg)
3927 {
3928 Assert(pSg->aSegs[0].pvSeg == pThis->aTxPacketFallback);
3929 e1kPacketDump(pThis, (uint8_t const *)pSg->aSegs[0].pvSeg, cbFrame, "--> Loopback");
3930
3931 /** @todo do we actually need to check that we're in loopback mode here? */
3932 if (GET_BITS(RCTL, LBM) == RCTL_LBM_TCVR)
3933 {
3934 E1KRXDST status;
3935 RT_ZERO(status);
3936 status.fPIF = true;
3937 e1kHandleRxPacket(pThis, pSg->aSegs[0].pvSeg, cbFrame, status);
3938 rc = VINF_SUCCESS;
3939 }
3940 e1kXmitFreeBuf(pThis);
3941 }
3942 else
3943 rc = VERR_NET_DOWN;
3944 if (RT_FAILURE(rc))
3945 {
3946 E1kLogRel(("E1000: ERROR! pfnSend returned %Rrc\n", rc));
3947 /** @todo handle VERR_NET_DOWN and VERR_NET_NO_BUFFER_SPACE. Signal error ? */
3948 }
3949
3950 pThis->led.Actual.s.fWriting = 0;
3951}
3952
3953/**
3954 * Compute and write internet checksum (e1kCSum16) at the specified offset.
3955 *
3956 * @param pThis The device state structure.
3957 * @param pPkt Pointer to the packet.
3958 * @param u16PktLen Total length of the packet.
3959 * @param cso Offset in packet to write checksum at.
3960 * @param css Offset in packet to start computing
3961 * checksum from.
3962 * @param cse Offset in packet to stop computing
3963 * checksum at.
3964 * @thread E1000_TX
3965 */
3966static void e1kInsertChecksum(PE1KSTATE pThis, uint8_t *pPkt, uint16_t u16PktLen, uint8_t cso, uint8_t css, uint16_t cse)
3967{
3968 RT_NOREF1(pThis);
3969
3970 if (css >= u16PktLen)
3971 {
3972 E1kLog2(("%s css(%X) is greater than packet length-1(%X), checksum is not inserted\n",
3973 pThis->szPrf, cso, u16PktLen));
3974 return;
3975 }
3976
3977 if (cso >= u16PktLen - 1)
3978 {
3979 E1kLog2(("%s cso(%X) is greater than packet length-2(%X), checksum is not inserted\n",
3980 pThis->szPrf, cso, u16PktLen));
3981 return;
3982 }
3983
3984 if (cse == 0)
3985 cse = u16PktLen - 1;
3986 uint16_t u16ChkSum = e1kCSum16(pPkt + css, cse - css + 1);
3987 E1kLog2(("%s Inserting csum: %04X at %02X, old value: %04X\n", pThis->szPrf,
3988 u16ChkSum, cso, *(uint16_t*)(pPkt + cso)));
3989 *(uint16_t*)(pPkt + cso) = u16ChkSum;
3990}
3991
3992/**
3993 * Add a part of descriptor's buffer to transmit frame.
3994 *
3995 * @remarks data.u64BufAddr is used unconditionally for both data
3996 * and legacy descriptors since it is identical to
3997 * legacy.u64BufAddr.
3998 *
3999 * @param pThis The device state structure.
4000 * @param pDesc Pointer to the descriptor to transmit.
4001 * @param u16Len Length of buffer to the end of segment.
4002 * @param fSend Force packet sending.
4003 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4004 * @thread E1000_TX
4005 */
4006#ifndef E1K_WITH_TXD_CACHE
4007static void e1kFallbackAddSegment(PE1KSTATE pThis, RTGCPHYS PhysAddr, uint16_t u16Len, bool fSend, bool fOnWorkerThread)
4008{
4009 /* TCP header being transmitted */
4010 struct E1kTcpHeader *pTcpHdr = (struct E1kTcpHeader *)
4011 (pThis->aTxPacketFallback + pThis->contextTSE.tu.u8CSS);
4012 /* IP header being transmitted */
4013 struct E1kIpHeader *pIpHdr = (struct E1kIpHeader *)
4014 (pThis->aTxPacketFallback + pThis->contextTSE.ip.u8CSS);
4015
4016 E1kLog3(("%s e1kFallbackAddSegment: Length=%x, remaining payload=%x, header=%x, send=%RTbool\n",
4017 pThis->szPrf, u16Len, pThis->u32PayRemain, pThis->u16HdrRemain, fSend));
4018 Assert(pThis->u32PayRemain + pThis->u16HdrRemain > 0);
4019
4020 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), PhysAddr,
4021 pThis->aTxPacketFallback + pThis->u16TxPktLen, u16Len);
4022 E1kLog3(("%s Dump of the segment:\n"
4023 "%.*Rhxd\n"
4024 "%s --- End of dump ---\n",
4025 pThis->szPrf, u16Len, pThis->aTxPacketFallback + pThis->u16TxPktLen, pThis->szPrf));
4026 pThis->u16TxPktLen += u16Len;
4027 E1kLog3(("%s e1kFallbackAddSegment: pThis->u16TxPktLen=%x\n",
4028 pThis->szPrf, pThis->u16TxPktLen));
4029 if (pThis->u16HdrRemain > 0)
4030 {
4031 /* The header was not complete, check if it is now */
4032 if (u16Len >= pThis->u16HdrRemain)
4033 {
4034 /* The rest is payload */
4035 u16Len -= pThis->u16HdrRemain;
4036 pThis->u16HdrRemain = 0;
4037 /* Save partial checksum and flags */
4038 pThis->u32SavedCsum = pTcpHdr->chksum;
4039 pThis->u16SavedFlags = pTcpHdr->hdrlen_flags;
4040 /* Clear FIN and PSH flags now and set them only in the last segment */
4041 pTcpHdr->hdrlen_flags &= ~htons(E1K_TCP_FIN | E1K_TCP_PSH);
4042 }
4043 else
4044 {
4045 /* Still not */
4046 pThis->u16HdrRemain -= u16Len;
4047 E1kLog3(("%s e1kFallbackAddSegment: Header is still incomplete, 0x%x bytes remain.\n",
4048 pThis->szPrf, pThis->u16HdrRemain));
4049 return;
4050 }
4051 }
4052
4053 pThis->u32PayRemain -= u16Len;
4054
4055 if (fSend)
4056 {
4057 /* Leave ethernet header intact */
4058 /* IP Total Length = payload + headers - ethernet header */
4059 pIpHdr->total_len = htons(pThis->u16TxPktLen - pThis->contextTSE.ip.u8CSS);
4060 E1kLog3(("%s e1kFallbackAddSegment: End of packet, pIpHdr->total_len=%x\n",
4061 pThis->szPrf, ntohs(pIpHdr->total_len)));
4062 /* Update IP Checksum */
4063 pIpHdr->chksum = 0;
4064 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4065 pThis->contextTSE.ip.u8CSO,
4066 pThis->contextTSE.ip.u8CSS,
4067 pThis->contextTSE.ip.u16CSE);
4068
4069 /* Update TCP flags */
4070 /* Restore original FIN and PSH flags for the last segment */
4071 if (pThis->u32PayRemain == 0)
4072 {
4073 pTcpHdr->hdrlen_flags = pThis->u16SavedFlags;
4074 E1K_INC_CNT32(TSCTC);
4075 }
4076 /* Add TCP length to partial pseudo header sum */
4077 uint32_t csum = pThis->u32SavedCsum
4078 + htons(pThis->u16TxPktLen - pThis->contextTSE.tu.u8CSS);
4079 while (csum >> 16)
4080 csum = (csum >> 16) + (csum & 0xFFFF);
4081 pTcpHdr->chksum = csum;
4082 /* Compute final checksum */
4083 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4084 pThis->contextTSE.tu.u8CSO,
4085 pThis->contextTSE.tu.u8CSS,
4086 pThis->contextTSE.tu.u16CSE);
4087
4088 /*
4089 * Transmit it. If we've use the SG already, allocate a new one before
4090 * we copy of the data.
4091 */
4092 if (!pThis->CTX_SUFF(pTxSg))
4093 e1kXmitAllocBuf(pThis, pThis->u16TxPktLen + (pThis->fVTag ? 4 : 0), true /*fExactSize*/, false /*fGso*/);
4094 if (pThis->CTX_SUFF(pTxSg))
4095 {
4096 Assert(pThis->u16TxPktLen <= pThis->CTX_SUFF(pTxSg)->cbAvailable);
4097 Assert(pThis->CTX_SUFF(pTxSg)->cSegs == 1);
4098 if (pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg != pThis->aTxPacketFallback)
4099 memcpy(pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->aTxPacketFallback, pThis->u16TxPktLen);
4100 pThis->CTX_SUFF(pTxSg)->cbUsed = pThis->u16TxPktLen;
4101 pThis->CTX_SUFF(pTxSg)->aSegs[0].cbSeg = pThis->u16TxPktLen;
4102 }
4103 e1kTransmitFrame(pThis, fOnWorkerThread);
4104
4105 /* Update Sequence Number */
4106 pTcpHdr->seqno = htonl(ntohl(pTcpHdr->seqno) + pThis->u16TxPktLen
4107 - pThis->contextTSE.dw3.u8HDRLEN);
4108 /* Increment IP identification */
4109 pIpHdr->ident = htons(ntohs(pIpHdr->ident) + 1);
4110 }
4111}
4112#else /* E1K_WITH_TXD_CACHE */
4113static int e1kFallbackAddSegment(PE1KSTATE pThis, RTGCPHYS PhysAddr, uint16_t u16Len, bool fSend, bool fOnWorkerThread)
4114{
4115 int rc = VINF_SUCCESS;
4116 /* TCP header being transmitted */
4117 struct E1kTcpHeader *pTcpHdr = (struct E1kTcpHeader *)
4118 (pThis->aTxPacketFallback + pThis->contextTSE.tu.u8CSS);
4119 /* IP header being transmitted */
4120 struct E1kIpHeader *pIpHdr = (struct E1kIpHeader *)
4121 (pThis->aTxPacketFallback + pThis->contextTSE.ip.u8CSS);
4122
4123 E1kLog3(("%s e1kFallbackAddSegment: Length=%x, remaining payload=%x, header=%x, send=%RTbool\n",
4124 pThis->szPrf, u16Len, pThis->u32PayRemain, pThis->u16HdrRemain, fSend));
4125 Assert(pThis->u32PayRemain + pThis->u16HdrRemain > 0);
4126
4127 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), PhysAddr,
4128 pThis->aTxPacketFallback + pThis->u16TxPktLen, u16Len);
4129 E1kLog3(("%s Dump of the segment:\n"
4130 "%.*Rhxd\n"
4131 "%s --- End of dump ---\n",
4132 pThis->szPrf, u16Len, pThis->aTxPacketFallback + pThis->u16TxPktLen, pThis->szPrf));
4133 pThis->u16TxPktLen += u16Len;
4134 E1kLog3(("%s e1kFallbackAddSegment: pThis->u16TxPktLen=%x\n",
4135 pThis->szPrf, pThis->u16TxPktLen));
4136 if (pThis->u16HdrRemain > 0)
4137 {
4138 /* The header was not complete, check if it is now */
4139 if (u16Len >= pThis->u16HdrRemain)
4140 {
4141 /* The rest is payload */
4142 u16Len -= pThis->u16HdrRemain;
4143 pThis->u16HdrRemain = 0;
4144 /* Save partial checksum and flags */
4145 pThis->u32SavedCsum = pTcpHdr->chksum;
4146 pThis->u16SavedFlags = pTcpHdr->hdrlen_flags;
4147 /* Clear FIN and PSH flags now and set them only in the last segment */
4148 pTcpHdr->hdrlen_flags &= ~htons(E1K_TCP_FIN | E1K_TCP_PSH);
4149 }
4150 else
4151 {
4152 /* Still not */
4153 pThis->u16HdrRemain -= u16Len;
4154 E1kLog3(("%s e1kFallbackAddSegment: Header is still incomplete, 0x%x bytes remain.\n",
4155 pThis->szPrf, pThis->u16HdrRemain));
4156 return rc;
4157 }
4158 }
4159
4160 pThis->u32PayRemain -= u16Len;
4161
4162 if (fSend)
4163 {
4164 /* Leave ethernet header intact */
4165 /* IP Total Length = payload + headers - ethernet header */
4166 pIpHdr->total_len = htons(pThis->u16TxPktLen - pThis->contextTSE.ip.u8CSS);
4167 E1kLog3(("%s e1kFallbackAddSegment: End of packet, pIpHdr->total_len=%x\n",
4168 pThis->szPrf, ntohs(pIpHdr->total_len)));
4169 /* Update IP Checksum */
4170 pIpHdr->chksum = 0;
4171 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4172 pThis->contextTSE.ip.u8CSO,
4173 pThis->contextTSE.ip.u8CSS,
4174 pThis->contextTSE.ip.u16CSE);
4175
4176 /* Update TCP flags */
4177 /* Restore original FIN and PSH flags for the last segment */
4178 if (pThis->u32PayRemain == 0)
4179 {
4180 pTcpHdr->hdrlen_flags = pThis->u16SavedFlags;
4181 E1K_INC_CNT32(TSCTC);
4182 }
4183 /* Add TCP length to partial pseudo header sum */
4184 uint32_t csum = pThis->u32SavedCsum
4185 + htons(pThis->u16TxPktLen - pThis->contextTSE.tu.u8CSS);
4186 while (csum >> 16)
4187 csum = (csum >> 16) + (csum & 0xFFFF);
4188 pTcpHdr->chksum = csum;
4189 /* Compute final checksum */
4190 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4191 pThis->contextTSE.tu.u8CSO,
4192 pThis->contextTSE.tu.u8CSS,
4193 pThis->contextTSE.tu.u16CSE);
4194
4195 /*
4196 * Transmit it.
4197 */
4198 if (pThis->CTX_SUFF(pTxSg))
4199 {
4200 Assert(pThis->u16TxPktLen <= pThis->CTX_SUFF(pTxSg)->cbAvailable);
4201 Assert(pThis->CTX_SUFF(pTxSg)->cSegs == 1);
4202 if (pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg != pThis->aTxPacketFallback)
4203 memcpy(pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->aTxPacketFallback, pThis->u16TxPktLen);
4204 pThis->CTX_SUFF(pTxSg)->cbUsed = pThis->u16TxPktLen;
4205 pThis->CTX_SUFF(pTxSg)->aSegs[0].cbSeg = pThis->u16TxPktLen;
4206 }
4207 e1kTransmitFrame(pThis, fOnWorkerThread);
4208
4209 /* Update Sequence Number */
4210 pTcpHdr->seqno = htonl(ntohl(pTcpHdr->seqno) + pThis->u16TxPktLen
4211 - pThis->contextTSE.dw3.u8HDRLEN);
4212 /* Increment IP identification */
4213 pIpHdr->ident = htons(ntohs(pIpHdr->ident) + 1);
4214
4215 /* Allocate new buffer for the next segment. */
4216 if (pThis->u32PayRemain)
4217 {
4218 pThis->cbTxAlloc = RT_MIN(pThis->u32PayRemain,
4219 pThis->contextTSE.dw3.u16MSS)
4220 + pThis->contextTSE.dw3.u8HDRLEN
4221 + (pThis->fVTag ? 4 : 0);
4222 rc = e1kXmitAllocBuf(pThis, false /* fGSO */);
4223 }
4224 }
4225
4226 return rc;
4227}
4228#endif /* E1K_WITH_TXD_CACHE */
4229
4230#ifndef E1K_WITH_TXD_CACHE
4231/**
4232 * TCP segmentation offloading fallback: Add descriptor's buffer to transmit
4233 * frame.
4234 *
4235 * We construct the frame in the fallback buffer first and the copy it to the SG
4236 * buffer before passing it down to the network driver code.
4237 *
4238 * @returns true if the frame should be transmitted, false if not.
4239 *
4240 * @param pThis The device state structure.
4241 * @param pDesc Pointer to the descriptor to transmit.
4242 * @param cbFragment Length of descriptor's buffer.
4243 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4244 * @thread E1000_TX
4245 */
4246static bool e1kFallbackAddToFrame(PE1KSTATE pThis, E1KTXDESC *pDesc, uint32_t cbFragment, bool fOnWorkerThread)
4247{
4248 PPDMSCATTERGATHER pTxSg = pThis->CTX_SUFF(pTxSg);
4249 Assert(e1kGetDescType(pDesc) == E1K_DTYP_DATA);
4250 Assert(pDesc->data.cmd.fTSE);
4251 Assert(!e1kXmitIsGsoBuf(pTxSg));
4252
4253 uint16_t u16MaxPktLen = pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw3.u16MSS;
4254 Assert(u16MaxPktLen != 0);
4255 Assert(u16MaxPktLen < E1K_MAX_TX_PKT_SIZE);
4256
4257 /*
4258 * Carve out segments.
4259 */
4260 do
4261 {
4262 /* Calculate how many bytes we have left in this TCP segment */
4263 uint32_t cb = u16MaxPktLen - pThis->u16TxPktLen;
4264 if (cb > cbFragment)
4265 {
4266 /* This descriptor fits completely into current segment */
4267 cb = cbFragment;
4268 e1kFallbackAddSegment(pThis, pDesc->data.u64BufAddr, cb, pDesc->data.cmd.fEOP /*fSend*/, fOnWorkerThread);
4269 }
4270 else
4271 {
4272 e1kFallbackAddSegment(pThis, pDesc->data.u64BufAddr, cb, true /*fSend*/, fOnWorkerThread);
4273 /*
4274 * Rewind the packet tail pointer to the beginning of payload,
4275 * so we continue writing right beyond the header.
4276 */
4277 pThis->u16TxPktLen = pThis->contextTSE.dw3.u8HDRLEN;
4278 }
4279
4280 pDesc->data.u64BufAddr += cb;
4281 cbFragment -= cb;
4282 } while (cbFragment > 0);
4283
4284 if (pDesc->data.cmd.fEOP)
4285 {
4286 /* End of packet, next segment will contain header. */
4287 if (pThis->u32PayRemain != 0)
4288 E1K_INC_CNT32(TSCTFC);
4289 pThis->u16TxPktLen = 0;
4290 e1kXmitFreeBuf(pThis);
4291 }
4292
4293 return false;
4294}
4295#else /* E1K_WITH_TXD_CACHE */
4296/**
4297 * TCP segmentation offloading fallback: Add descriptor's buffer to transmit
4298 * frame.
4299 *
4300 * We construct the frame in the fallback buffer first and the copy it to the SG
4301 * buffer before passing it down to the network driver code.
4302 *
4303 * @returns error code
4304 *
4305 * @param pThis The device state structure.
4306 * @param pDesc Pointer to the descriptor to transmit.
4307 * @param cbFragment Length of descriptor's buffer.
4308 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4309 * @thread E1000_TX
4310 */
4311static int e1kFallbackAddToFrame(PE1KSTATE pThis, E1KTXDESC *pDesc, bool fOnWorkerThread)
4312{
4313#ifdef VBOX_STRICT
4314 PPDMSCATTERGATHER pTxSg = pThis->CTX_SUFF(pTxSg);
4315 Assert(e1kGetDescType(pDesc) == E1K_DTYP_DATA);
4316 Assert(pDesc->data.cmd.fTSE);
4317 Assert(!e1kXmitIsGsoBuf(pTxSg));
4318#endif
4319
4320 uint16_t u16MaxPktLen = pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw3.u16MSS;
4321 Assert(u16MaxPktLen != 0);
4322 Assert(u16MaxPktLen < E1K_MAX_TX_PKT_SIZE);
4323
4324 /*
4325 * Carve out segments.
4326 */
4327 int rc;
4328 do
4329 {
4330 /* Calculate how many bytes we have left in this TCP segment */
4331 uint32_t cb = u16MaxPktLen - pThis->u16TxPktLen;
4332 if (cb > pDesc->data.cmd.u20DTALEN)
4333 {
4334 /* This descriptor fits completely into current segment */
4335 cb = pDesc->data.cmd.u20DTALEN;
4336 rc = e1kFallbackAddSegment(pThis, pDesc->data.u64BufAddr, cb, pDesc->data.cmd.fEOP /*fSend*/, fOnWorkerThread);
4337 }
4338 else
4339 {
4340 rc = e1kFallbackAddSegment(pThis, pDesc->data.u64BufAddr, cb, true /*fSend*/, fOnWorkerThread);
4341 /*
4342 * Rewind the packet tail pointer to the beginning of payload,
4343 * so we continue writing right beyond the header.
4344 */
4345 pThis->u16TxPktLen = pThis->contextTSE.dw3.u8HDRLEN;
4346 }
4347
4348 pDesc->data.u64BufAddr += cb;
4349 pDesc->data.cmd.u20DTALEN -= cb;
4350 } while (pDesc->data.cmd.u20DTALEN > 0 && RT_SUCCESS(rc));
4351
4352 if (pDesc->data.cmd.fEOP)
4353 {
4354 /* End of packet, next segment will contain header. */
4355 if (pThis->u32PayRemain != 0)
4356 E1K_INC_CNT32(TSCTFC);
4357 pThis->u16TxPktLen = 0;
4358 e1kXmitFreeBuf(pThis);
4359 }
4360
4361 return false;
4362}
4363#endif /* E1K_WITH_TXD_CACHE */
4364
4365
4366/**
4367 * Add descriptor's buffer to transmit frame.
4368 *
4369 * This deals with GSO and normal frames, e1kFallbackAddToFrame deals with the
4370 * TSE frames we cannot handle as GSO.
4371 *
4372 * @returns true on success, false on failure.
4373 *
4374 * @param pThis The device state structure.
4375 * @param PhysAddr The physical address of the descriptor buffer.
4376 * @param cbFragment Length of descriptor's buffer.
4377 * @thread E1000_TX
4378 */
4379static bool e1kAddToFrame(PE1KSTATE pThis, RTGCPHYS PhysAddr, uint32_t cbFragment)
4380{
4381 PPDMSCATTERGATHER pTxSg = pThis->CTX_SUFF(pTxSg);
4382 bool const fGso = e1kXmitIsGsoBuf(pTxSg);
4383 uint32_t const cbNewPkt = cbFragment + pThis->u16TxPktLen;
4384
4385 if (RT_UNLIKELY( !fGso && cbNewPkt > E1K_MAX_TX_PKT_SIZE ))
4386 {
4387 E1kLog(("%s Transmit packet is too large: %u > %u(max)\n", pThis->szPrf, cbNewPkt, E1K_MAX_TX_PKT_SIZE));
4388 return false;
4389 }
4390 if (RT_UNLIKELY( fGso && cbNewPkt > pTxSg->cbAvailable ))
4391 {
4392 E1kLog(("%s Transmit packet is too large: %u > %u(max)/GSO\n", pThis->szPrf, cbNewPkt, pTxSg->cbAvailable));
4393 return false;
4394 }
4395
4396 if (RT_LIKELY(pTxSg))
4397 {
4398 Assert(pTxSg->cSegs == 1);
4399 Assert(pTxSg->cbUsed == pThis->u16TxPktLen);
4400
4401 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), PhysAddr,
4402 (uint8_t *)pTxSg->aSegs[0].pvSeg + pThis->u16TxPktLen, cbFragment);
4403
4404 pTxSg->cbUsed = cbNewPkt;
4405 }
4406 pThis->u16TxPktLen = cbNewPkt;
4407
4408 return true;
4409}
4410
4411
4412/**
4413 * Write the descriptor back to guest memory and notify the guest.
4414 *
4415 * @param pThis The device state structure.
4416 * @param pDesc Pointer to the descriptor have been transmitted.
4417 * @param addr Physical address of the descriptor in guest memory.
4418 * @thread E1000_TX
4419 */
4420static void e1kDescReport(PE1KSTATE pThis, E1KTXDESC *pDesc, RTGCPHYS addr)
4421{
4422 /*
4423 * We fake descriptor write-back bursting. Descriptors are written back as they are
4424 * processed.
4425 */
4426 /* Let's pretend we process descriptors. Write back with DD set. */
4427 /*
4428 * Prior to r71586 we tried to accomodate the case when write-back bursts
4429 * are enabled without actually implementing bursting by writing back all
4430 * descriptors, even the ones that do not have RS set. This caused kernel
4431 * panics with Linux SMP kernels, as the e1000 driver tried to free up skb
4432 * associated with written back descriptor if it happened to be a context
4433 * descriptor since context descriptors do not have skb associated to them.
4434 * Starting from r71586 we write back only the descriptors with RS set,
4435 * which is a little bit different from what the real hardware does in
4436 * case there is a chain of data descritors where some of them have RS set
4437 * and others do not. It is very uncommon scenario imho.
4438 * We need to check RPS as well since some legacy drivers use it instead of
4439 * RS even with newer cards.
4440 */
4441 if (pDesc->legacy.cmd.fRS || pDesc->legacy.cmd.fRPS)
4442 {
4443 pDesc->legacy.dw3.fDD = 1; /* Descriptor Done */
4444 e1kWriteBackDesc(pThis, pDesc, addr);
4445 if (pDesc->legacy.cmd.fEOP)
4446 {
4447#ifdef E1K_USE_TX_TIMERS
4448 if (pDesc->legacy.cmd.fIDE)
4449 {
4450 E1K_INC_ISTAT_CNT(pThis->uStatTxIDE);
4451 //if (pThis->fIntRaised)
4452 //{
4453 // /* Interrupt is already pending, no need for timers */
4454 // ICR |= ICR_TXDW;
4455 //}
4456 //else {
4457 /* Arm the timer to fire in TIVD usec (discard .024) */
4458 e1kArmTimer(pThis, pThis->CTX_SUFF(pTIDTimer), TIDV);
4459# ifndef E1K_NO_TAD
4460 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
4461 E1kLog2(("%s Checking if TAD timer is running\n",
4462 pThis->szPrf));
4463 if (TADV != 0 && !TMTimerIsActive(pThis->CTX_SUFF(pTADTimer)))
4464 e1kArmTimer(pThis, pThis->CTX_SUFF(pTADTimer), TADV);
4465# endif /* E1K_NO_TAD */
4466 }
4467 else
4468 {
4469 E1kLog2(("%s No IDE set, cancel TAD timer and raise interrupt\n",
4470 pThis->szPrf));
4471# ifndef E1K_NO_TAD
4472 /* Cancel both timers if armed and fire immediately. */
4473 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTADTimer));
4474# endif
4475#endif /* E1K_USE_TX_TIMERS */
4476 E1K_INC_ISTAT_CNT(pThis->uStatIntTx);
4477 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_TXDW);
4478#ifdef E1K_USE_TX_TIMERS
4479 }
4480#endif /* E1K_USE_TX_TIMERS */
4481 }
4482 }
4483 else
4484 {
4485 E1K_INC_ISTAT_CNT(pThis->uStatTxNoRS);
4486 }
4487}
4488
4489#ifndef E1K_WITH_TXD_CACHE
4490
4491/**
4492 * Process Transmit Descriptor.
4493 *
4494 * E1000 supports three types of transmit descriptors:
4495 * - legacy data descriptors of older format (context-less).
4496 * - data the same as legacy but providing new offloading capabilities.
4497 * - context sets up the context for following data descriptors.
4498 *
4499 * @param pThis The device state structure.
4500 * @param pDesc Pointer to descriptor union.
4501 * @param addr Physical address of descriptor in guest memory.
4502 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4503 * @thread E1000_TX
4504 */
4505static int e1kXmitDesc(PE1KSTATE pThis, E1KTXDESC *pDesc, RTGCPHYS addr, bool fOnWorkerThread)
4506{
4507 int rc = VINF_SUCCESS;
4508 uint32_t cbVTag = 0;
4509
4510 e1kPrintTDesc(pThis, pDesc, "vvv");
4511
4512#ifdef E1K_USE_TX_TIMERS
4513 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTIDTimer));
4514#endif /* E1K_USE_TX_TIMERS */
4515
4516 switch (e1kGetDescType(pDesc))
4517 {
4518 case E1K_DTYP_CONTEXT:
4519 if (pDesc->context.dw2.fTSE)
4520 {
4521 pThis->contextTSE = pDesc->context;
4522 pThis->u32PayRemain = pDesc->context.dw2.u20PAYLEN;
4523 pThis->u16HdrRemain = pDesc->context.dw3.u8HDRLEN;
4524 e1kSetupGsoCtx(&pThis->GsoCtx, &pDesc->context);
4525 STAM_COUNTER_INC(&pThis->StatTxDescCtxTSE);
4526 }
4527 else
4528 {
4529 pThis->contextNormal = pDesc->context;
4530 STAM_COUNTER_INC(&pThis->StatTxDescCtxNormal);
4531 }
4532 E1kLog2(("%s %s context updated: IP CSS=%02X, IP CSO=%02X, IP CSE=%04X"
4533 ", TU CSS=%02X, TU CSO=%02X, TU CSE=%04X\n", pThis->szPrf,
4534 pDesc->context.dw2.fTSE ? "TSE" : "Normal",
4535 pDesc->context.ip.u8CSS,
4536 pDesc->context.ip.u8CSO,
4537 pDesc->context.ip.u16CSE,
4538 pDesc->context.tu.u8CSS,
4539 pDesc->context.tu.u8CSO,
4540 pDesc->context.tu.u16CSE));
4541 E1K_INC_ISTAT_CNT(pThis->uStatDescCtx);
4542 e1kDescReport(pThis, pDesc, addr);
4543 break;
4544
4545 case E1K_DTYP_DATA:
4546 {
4547 if (pDesc->data.cmd.u20DTALEN == 0 || pDesc->data.u64BufAddr == 0)
4548 {
4549 E1kLog2(("% Empty data descriptor, skipped.\n", pThis->szPrf));
4550 /** @todo Same as legacy when !TSE. See below. */
4551 break;
4552 }
4553 STAM_COUNTER_INC(pDesc->data.cmd.fTSE?
4554 &pThis->StatTxDescTSEData:
4555 &pThis->StatTxDescData);
4556 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
4557 E1K_INC_ISTAT_CNT(pThis->uStatDescDat);
4558
4559 /*
4560 * The last descriptor of non-TSE packet must contain VLE flag.
4561 * TSE packets have VLE flag in the first descriptor. The later
4562 * case is taken care of a bit later when cbVTag gets assigned.
4563 *
4564 * 1) pDesc->data.cmd.fEOP && !pDesc->data.cmd.fTSE
4565 */
4566 if (pDesc->data.cmd.fEOP && !pDesc->data.cmd.fTSE)
4567 {
4568 pThis->fVTag = pDesc->data.cmd.fVLE;
4569 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
4570 }
4571 /*
4572 * First fragment: Allocate new buffer and save the IXSM and TXSM
4573 * packet options as these are only valid in the first fragment.
4574 */
4575 if (pThis->u16TxPktLen == 0)
4576 {
4577 pThis->fIPcsum = pDesc->data.dw3.fIXSM;
4578 pThis->fTCPcsum = pDesc->data.dw3.fTXSM;
4579 E1kLog2(("%s Saving checksum flags:%s%s; \n", pThis->szPrf,
4580 pThis->fIPcsum ? " IP" : "",
4581 pThis->fTCPcsum ? " TCP/UDP" : ""));
4582 if (pDesc->data.cmd.fTSE)
4583 {
4584 /* 2) pDesc->data.cmd.fTSE && pThis->u16TxPktLen == 0 */
4585 pThis->fVTag = pDesc->data.cmd.fVLE;
4586 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
4587 cbVTag = pThis->fVTag ? 4 : 0;
4588 }
4589 else if (pDesc->data.cmd.fEOP)
4590 cbVTag = pDesc->data.cmd.fVLE ? 4 : 0;
4591 else
4592 cbVTag = 4;
4593 E1kLog3(("%s About to allocate TX buffer: cbVTag=%u\n", pThis->szPrf, cbVTag));
4594 if (e1kCanDoGso(pThis, &pThis->GsoCtx, &pDesc->data, &pThis->contextTSE))
4595 rc = e1kXmitAllocBuf(pThis, pThis->contextTSE.dw2.u20PAYLEN + pThis->contextTSE.dw3.u8HDRLEN + cbVTag,
4596 true /*fExactSize*/, true /*fGso*/);
4597 else if (pDesc->data.cmd.fTSE)
4598 rc = e1kXmitAllocBuf(pThis, pThis->contextTSE.dw3.u16MSS + pThis->contextTSE.dw3.u8HDRLEN + cbVTag,
4599 pDesc->data.cmd.fTSE /*fExactSize*/, false /*fGso*/);
4600 else
4601 rc = e1kXmitAllocBuf(pThis, pDesc->data.cmd.u20DTALEN + cbVTag,
4602 pDesc->data.cmd.fEOP /*fExactSize*/, false /*fGso*/);
4603
4604 /**
4605 * @todo: Perhaps it is not that simple for GSO packets! We may
4606 * need to unwind some changes.
4607 */
4608 if (RT_FAILURE(rc))
4609 {
4610 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4611 break;
4612 }
4613 /** @todo Is there any way to indicating errors other than collisions? Like
4614 * VERR_NET_DOWN. */
4615 }
4616
4617 /*
4618 * Add the descriptor data to the frame. If the frame is complete,
4619 * transmit it and reset the u16TxPktLen field.
4620 */
4621 if (e1kXmitIsGsoBuf(pThis->CTX_SUFF(pTxSg)))
4622 {
4623 STAM_COUNTER_INC(&pThis->StatTxPathGSO);
4624 bool fRc = e1kAddToFrame(pThis, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4625 if (pDesc->data.cmd.fEOP)
4626 {
4627 if ( fRc
4628 && pThis->CTX_SUFF(pTxSg)
4629 && pThis->CTX_SUFF(pTxSg)->cbUsed == (size_t)pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN)
4630 {
4631 e1kTransmitFrame(pThis, fOnWorkerThread);
4632 E1K_INC_CNT32(TSCTC);
4633 }
4634 else
4635 {
4636 if (fRc)
4637 E1kLog(("%s bad GSO/TSE %p or %u < %u\n" , pThis->szPrf,
4638 pThis->CTX_SUFF(pTxSg), pThis->CTX_SUFF(pTxSg) ? pThis->CTX_SUFF(pTxSg)->cbUsed : 0,
4639 pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN));
4640 e1kXmitFreeBuf(pThis);
4641 E1K_INC_CNT32(TSCTFC);
4642 }
4643 pThis->u16TxPktLen = 0;
4644 }
4645 }
4646 else if (!pDesc->data.cmd.fTSE)
4647 {
4648 STAM_COUNTER_INC(&pThis->StatTxPathRegular);
4649 bool fRc = e1kAddToFrame(pThis, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4650 if (pDesc->data.cmd.fEOP)
4651 {
4652 if (fRc && pThis->CTX_SUFF(pTxSg))
4653 {
4654 Assert(pThis->CTX_SUFF(pTxSg)->cSegs == 1);
4655 if (pThis->fIPcsum)
4656 e1kInsertChecksum(pThis, (uint8_t *)pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
4657 pThis->contextNormal.ip.u8CSO,
4658 pThis->contextNormal.ip.u8CSS,
4659 pThis->contextNormal.ip.u16CSE);
4660 if (pThis->fTCPcsum)
4661 e1kInsertChecksum(pThis, (uint8_t *)pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
4662 pThis->contextNormal.tu.u8CSO,
4663 pThis->contextNormal.tu.u8CSS,
4664 pThis->contextNormal.tu.u16CSE);
4665 e1kTransmitFrame(pThis, fOnWorkerThread);
4666 }
4667 else
4668 e1kXmitFreeBuf(pThis);
4669 pThis->u16TxPktLen = 0;
4670 }
4671 }
4672 else
4673 {
4674 STAM_COUNTER_INC(&pThis->StatTxPathFallback);
4675 e1kFallbackAddToFrame(pThis, pDesc, pDesc->data.cmd.u20DTALEN, fOnWorkerThread);
4676 }
4677
4678 e1kDescReport(pThis, pDesc, addr);
4679 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4680 break;
4681 }
4682
4683 case E1K_DTYP_LEGACY:
4684 if (pDesc->legacy.cmd.u16Length == 0 || pDesc->legacy.u64BufAddr == 0)
4685 {
4686 E1kLog(("%s Empty legacy descriptor, skipped.\n", pThis->szPrf));
4687 /** @todo 3.3.3, Length/Buffer Address: RS set -> write DD when processing. */
4688 break;
4689 }
4690 STAM_COUNTER_INC(&pThis->StatTxDescLegacy);
4691 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
4692
4693 /* First fragment: allocate new buffer. */
4694 if (pThis->u16TxPktLen == 0)
4695 {
4696 if (pDesc->legacy.cmd.fEOP)
4697 cbVTag = pDesc->legacy.cmd.fVLE ? 4 : 0;
4698 else
4699 cbVTag = 4;
4700 E1kLog3(("%s About to allocate TX buffer: cbVTag=%u\n", pThis->szPrf, cbVTag));
4701 /** @todo reset status bits? */
4702 rc = e1kXmitAllocBuf(pThis, pDesc->legacy.cmd.u16Length + cbVTag, pDesc->legacy.cmd.fEOP, false /*fGso*/);
4703 if (RT_FAILURE(rc))
4704 {
4705 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4706 break;
4707 }
4708
4709 /** @todo Is there any way to indicating errors other than collisions? Like
4710 * VERR_NET_DOWN. */
4711 }
4712
4713 /* Add fragment to frame. */
4714 if (e1kAddToFrame(pThis, pDesc->data.u64BufAddr, pDesc->legacy.cmd.u16Length))
4715 {
4716 E1K_INC_ISTAT_CNT(pThis->uStatDescLeg);
4717
4718 /* Last fragment: Transmit and reset the packet storage counter. */
4719 if (pDesc->legacy.cmd.fEOP)
4720 {
4721 pThis->fVTag = pDesc->legacy.cmd.fVLE;
4722 pThis->u16VTagTCI = pDesc->legacy.dw3.u16Special;
4723 /** @todo Offload processing goes here. */
4724 e1kTransmitFrame(pThis, fOnWorkerThread);
4725 pThis->u16TxPktLen = 0;
4726 }
4727 }
4728 /* Last fragment + failure: free the buffer and reset the storage counter. */
4729 else if (pDesc->legacy.cmd.fEOP)
4730 {
4731 e1kXmitFreeBuf(pThis);
4732 pThis->u16TxPktLen = 0;
4733 }
4734
4735 e1kDescReport(pThis, pDesc, addr);
4736 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4737 break;
4738
4739 default:
4740 E1kLog(("%s ERROR Unsupported transmit descriptor type: 0x%04x\n",
4741 pThis->szPrf, e1kGetDescType(pDesc)));
4742 break;
4743 }
4744
4745 return rc;
4746}
4747
4748#else /* E1K_WITH_TXD_CACHE */
4749
4750/**
4751 * Process Transmit Descriptor.
4752 *
4753 * E1000 supports three types of transmit descriptors:
4754 * - legacy data descriptors of older format (context-less).
4755 * - data the same as legacy but providing new offloading capabilities.
4756 * - context sets up the context for following data descriptors.
4757 *
4758 * @param pThis The device state structure.
4759 * @param pDesc Pointer to descriptor union.
4760 * @param addr Physical address of descriptor in guest memory.
4761 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4762 * @param cbPacketSize Size of the packet as previously computed.
4763 * @thread E1000_TX
4764 */
4765static int e1kXmitDesc(PE1KSTATE pThis, E1KTXDESC *pDesc, RTGCPHYS addr,
4766 bool fOnWorkerThread)
4767{
4768 int rc = VINF_SUCCESS;
4769
4770 e1kPrintTDesc(pThis, pDesc, "vvv");
4771
4772#ifdef E1K_USE_TX_TIMERS
4773 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTIDTimer));
4774#endif /* E1K_USE_TX_TIMERS */
4775
4776 switch (e1kGetDescType(pDesc))
4777 {
4778 case E1K_DTYP_CONTEXT:
4779 /* The caller have already updated the context */
4780 E1K_INC_ISTAT_CNT(pThis->uStatDescCtx);
4781 e1kDescReport(pThis, pDesc, addr);
4782 break;
4783
4784 case E1K_DTYP_DATA:
4785 {
4786 STAM_COUNTER_INC(pDesc->data.cmd.fTSE?
4787 &pThis->StatTxDescTSEData:
4788 &pThis->StatTxDescData);
4789 E1K_INC_ISTAT_CNT(pThis->uStatDescDat);
4790 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
4791 if (pDesc->data.cmd.u20DTALEN == 0 || pDesc->data.u64BufAddr == 0)
4792 {
4793 E1kLog2(("% Empty data descriptor, skipped.\n", pThis->szPrf));
4794 }
4795 else
4796 {
4797 /*
4798 * Add the descriptor data to the frame. If the frame is complete,
4799 * transmit it and reset the u16TxPktLen field.
4800 */
4801 if (e1kXmitIsGsoBuf(pThis->CTX_SUFF(pTxSg)))
4802 {
4803 STAM_COUNTER_INC(&pThis->StatTxPathGSO);
4804 bool fRc = e1kAddToFrame(pThis, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4805 if (pDesc->data.cmd.fEOP)
4806 {
4807 if ( fRc
4808 && pThis->CTX_SUFF(pTxSg)
4809 && pThis->CTX_SUFF(pTxSg)->cbUsed == (size_t)pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN)
4810 {
4811 e1kTransmitFrame(pThis, fOnWorkerThread);
4812 E1K_INC_CNT32(TSCTC);
4813 }
4814 else
4815 {
4816 if (fRc)
4817 E1kLog(("%s bad GSO/TSE %p or %u < %u\n" , pThis->szPrf,
4818 pThis->CTX_SUFF(pTxSg), pThis->CTX_SUFF(pTxSg) ? pThis->CTX_SUFF(pTxSg)->cbUsed : 0,
4819 pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN));
4820 e1kXmitFreeBuf(pThis);
4821 E1K_INC_CNT32(TSCTFC);
4822 }
4823 pThis->u16TxPktLen = 0;
4824 }
4825 }
4826 else if (!pDesc->data.cmd.fTSE)
4827 {
4828 STAM_COUNTER_INC(&pThis->StatTxPathRegular);
4829 bool fRc = e1kAddToFrame(pThis, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4830 if (pDesc->data.cmd.fEOP)
4831 {
4832 if (fRc && pThis->CTX_SUFF(pTxSg))
4833 {
4834 Assert(pThis->CTX_SUFF(pTxSg)->cSegs == 1);
4835 if (pThis->fIPcsum)
4836 e1kInsertChecksum(pThis, (uint8_t *)pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
4837 pThis->contextNormal.ip.u8CSO,
4838 pThis->contextNormal.ip.u8CSS,
4839 pThis->contextNormal.ip.u16CSE);
4840 if (pThis->fTCPcsum)
4841 e1kInsertChecksum(pThis, (uint8_t *)pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
4842 pThis->contextNormal.tu.u8CSO,
4843 pThis->contextNormal.tu.u8CSS,
4844 pThis->contextNormal.tu.u16CSE);
4845 e1kTransmitFrame(pThis, fOnWorkerThread);
4846 }
4847 else
4848 e1kXmitFreeBuf(pThis);
4849 pThis->u16TxPktLen = 0;
4850 }
4851 }
4852 else
4853 {
4854 STAM_COUNTER_INC(&pThis->StatTxPathFallback);
4855 rc = e1kFallbackAddToFrame(pThis, pDesc, fOnWorkerThread);
4856 }
4857 }
4858 e1kDescReport(pThis, pDesc, addr);
4859 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4860 break;
4861 }
4862
4863 case E1K_DTYP_LEGACY:
4864 STAM_COUNTER_INC(&pThis->StatTxDescLegacy);
4865 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
4866 if (pDesc->legacy.cmd.u16Length == 0 || pDesc->legacy.u64BufAddr == 0)
4867 {
4868 E1kLog(("%s Empty legacy descriptor, skipped.\n", pThis->szPrf));
4869 }
4870 else
4871 {
4872 /* Add fragment to frame. */
4873 if (e1kAddToFrame(pThis, pDesc->data.u64BufAddr, pDesc->legacy.cmd.u16Length))
4874 {
4875 E1K_INC_ISTAT_CNT(pThis->uStatDescLeg);
4876
4877 /* Last fragment: Transmit and reset the packet storage counter. */
4878 if (pDesc->legacy.cmd.fEOP)
4879 {
4880 if (pDesc->legacy.cmd.fIC)
4881 {
4882 e1kInsertChecksum(pThis,
4883 (uint8_t *)pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg,
4884 pThis->u16TxPktLen,
4885 pDesc->legacy.cmd.u8CSO,
4886 pDesc->legacy.dw3.u8CSS,
4887 0);
4888 }
4889 e1kTransmitFrame(pThis, fOnWorkerThread);
4890 pThis->u16TxPktLen = 0;
4891 }
4892 }
4893 /* Last fragment + failure: free the buffer and reset the storage counter. */
4894 else if (pDesc->legacy.cmd.fEOP)
4895 {
4896 e1kXmitFreeBuf(pThis);
4897 pThis->u16TxPktLen = 0;
4898 }
4899 }
4900 e1kDescReport(pThis, pDesc, addr);
4901 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4902 break;
4903
4904 default:
4905 E1kLog(("%s ERROR Unsupported transmit descriptor type: 0x%04x\n",
4906 pThis->szPrf, e1kGetDescType(pDesc)));
4907 break;
4908 }
4909
4910 return rc;
4911}
4912
4913DECLINLINE(void) e1kUpdateTxContext(PE1KSTATE pThis, E1KTXDESC *pDesc)
4914{
4915 if (pDesc->context.dw2.fTSE)
4916 {
4917 pThis->contextTSE = pDesc->context;
4918 pThis->u32PayRemain = pDesc->context.dw2.u20PAYLEN;
4919 pThis->u16HdrRemain = pDesc->context.dw3.u8HDRLEN;
4920 e1kSetupGsoCtx(&pThis->GsoCtx, &pDesc->context);
4921 STAM_COUNTER_INC(&pThis->StatTxDescCtxTSE);
4922 }
4923 else
4924 {
4925 pThis->contextNormal = pDesc->context;
4926 STAM_COUNTER_INC(&pThis->StatTxDescCtxNormal);
4927 }
4928 E1kLog2(("%s %s context updated: IP CSS=%02X, IP CSO=%02X, IP CSE=%04X"
4929 ", TU CSS=%02X, TU CSO=%02X, TU CSE=%04X\n", pThis->szPrf,
4930 pDesc->context.dw2.fTSE ? "TSE" : "Normal",
4931 pDesc->context.ip.u8CSS,
4932 pDesc->context.ip.u8CSO,
4933 pDesc->context.ip.u16CSE,
4934 pDesc->context.tu.u8CSS,
4935 pDesc->context.tu.u8CSO,
4936 pDesc->context.tu.u16CSE));
4937}
4938
4939static bool e1kLocateTxPacket(PE1KSTATE pThis)
4940{
4941 LogFlow(("%s e1kLocateTxPacket: ENTER cbTxAlloc=%d\n",
4942 pThis->szPrf, pThis->cbTxAlloc));
4943 /* Check if we have located the packet already. */
4944 if (pThis->cbTxAlloc)
4945 {
4946 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d\n",
4947 pThis->szPrf, pThis->cbTxAlloc));
4948 return true;
4949 }
4950
4951 bool fTSE = false;
4952 uint32_t cbPacket = 0;
4953
4954 for (int i = pThis->iTxDCurrent; i < pThis->nTxDFetched; ++i)
4955 {
4956 E1KTXDESC *pDesc = &pThis->aTxDescriptors[i];
4957 switch (e1kGetDescType(pDesc))
4958 {
4959 case E1K_DTYP_CONTEXT:
4960 e1kUpdateTxContext(pThis, pDesc);
4961 continue;
4962 case E1K_DTYP_LEGACY:
4963 /* Skip empty descriptors. */
4964 if (!pDesc->legacy.u64BufAddr || !pDesc->legacy.cmd.u16Length)
4965 break;
4966 cbPacket += pDesc->legacy.cmd.u16Length;
4967 pThis->fGSO = false;
4968 break;
4969 case E1K_DTYP_DATA:
4970 /* Skip empty descriptors. */
4971 if (!pDesc->data.u64BufAddr || !pDesc->data.cmd.u20DTALEN)
4972 break;
4973 if (cbPacket == 0)
4974 {
4975 /*
4976 * The first fragment: save IXSM and TXSM options
4977 * as these are only valid in the first fragment.
4978 */
4979 pThis->fIPcsum = pDesc->data.dw3.fIXSM;
4980 pThis->fTCPcsum = pDesc->data.dw3.fTXSM;
4981 fTSE = pDesc->data.cmd.fTSE;
4982 /*
4983 * TSE descriptors have VLE bit properly set in
4984 * the first fragment.
4985 */
4986 if (fTSE)
4987 {
4988 pThis->fVTag = pDesc->data.cmd.fVLE;
4989 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
4990 }
4991 pThis->fGSO = e1kCanDoGso(pThis, &pThis->GsoCtx, &pDesc->data, &pThis->contextTSE);
4992 }
4993 cbPacket += pDesc->data.cmd.u20DTALEN;
4994 break;
4995 default:
4996 AssertMsgFailed(("Impossible descriptor type!"));
4997 }
4998 if (pDesc->legacy.cmd.fEOP)
4999 {
5000 /*
5001 * Non-TSE descriptors have VLE bit properly set in
5002 * the last fragment.
5003 */
5004 if (!fTSE)
5005 {
5006 pThis->fVTag = pDesc->data.cmd.fVLE;
5007 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
5008 }
5009 /*
5010 * Compute the required buffer size. If we cannot do GSO but still
5011 * have to do segmentation we allocate the first segment only.
5012 */
5013 pThis->cbTxAlloc = (!fTSE || pThis->fGSO) ?
5014 cbPacket :
5015 RT_MIN(cbPacket, pThis->contextTSE.dw3.u16MSS + pThis->contextTSE.dw3.u8HDRLEN);
5016 if (pThis->fVTag)
5017 pThis->cbTxAlloc += 4;
5018 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d\n",
5019 pThis->szPrf, pThis->cbTxAlloc));
5020 return true;
5021 }
5022 }
5023
5024 if (cbPacket == 0 && pThis->nTxDFetched - pThis->iTxDCurrent > 0)
5025 {
5026 /* All descriptors were empty, we need to process them as a dummy packet */
5027 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d, zero packet!\n",
5028 pThis->szPrf, pThis->cbTxAlloc));
5029 return true;
5030 }
5031 LogFlow(("%s e1kLocateTxPacket: RET false cbTxAlloc=%d\n",
5032 pThis->szPrf, pThis->cbTxAlloc));
5033 return false;
5034}
5035
5036static int e1kXmitPacket(PE1KSTATE pThis, bool fOnWorkerThread)
5037{
5038 int rc = VINF_SUCCESS;
5039
5040 LogFlow(("%s e1kXmitPacket: ENTER current=%d fetched=%d\n",
5041 pThis->szPrf, pThis->iTxDCurrent, pThis->nTxDFetched));
5042
5043 while (pThis->iTxDCurrent < pThis->nTxDFetched)
5044 {
5045 E1KTXDESC *pDesc = &pThis->aTxDescriptors[pThis->iTxDCurrent];
5046 E1kLog3(("%s About to process new TX descriptor at %08x%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
5047 pThis->szPrf, TDBAH, TDBAL + TDH * sizeof(E1KTXDESC), TDLEN, TDH, TDT));
5048 rc = e1kXmitDesc(pThis, pDesc, e1kDescAddr(TDBAH, TDBAL, TDH), fOnWorkerThread);
5049 if (RT_FAILURE(rc))
5050 break;
5051 if (++TDH * sizeof(E1KTXDESC) >= TDLEN)
5052 TDH = 0;
5053 uint32_t uLowThreshold = GET_BITS(TXDCTL, LWTHRESH)*8;
5054 if (uLowThreshold != 0 && e1kGetTxLen(pThis) <= uLowThreshold)
5055 {
5056 E1kLog2(("%s Low on transmit descriptors, raise ICR.TXD_LOW, len=%x thresh=%x\n",
5057 pThis->szPrf, e1kGetTxLen(pThis), GET_BITS(TXDCTL, LWTHRESH)*8));
5058 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_TXD_LOW);
5059 }
5060 ++pThis->iTxDCurrent;
5061 if (e1kGetDescType(pDesc) != E1K_DTYP_CONTEXT && pDesc->legacy.cmd.fEOP)
5062 break;
5063 }
5064
5065 LogFlow(("%s e1kXmitPacket: RET %Rrc current=%d fetched=%d\n",
5066 pThis->szPrf, rc, pThis->iTxDCurrent, pThis->nTxDFetched));
5067 return rc;
5068}
5069
5070#endif /* E1K_WITH_TXD_CACHE */
5071#ifndef E1K_WITH_TXD_CACHE
5072
5073/**
5074 * Transmit pending descriptors.
5075 *
5076 * @returns VBox status code. VERR_TRY_AGAIN is returned if we're busy.
5077 *
5078 * @param pThis The E1000 state.
5079 * @param fOnWorkerThread Whether we're on a worker thread or on an EMT.
5080 */
5081static int e1kXmitPending(PE1KSTATE pThis, bool fOnWorkerThread)
5082{
5083 int rc = VINF_SUCCESS;
5084
5085 /* Check if transmitter is enabled. */
5086 if (!(TCTL & TCTL_EN))
5087 return VINF_SUCCESS;
5088 /*
5089 * Grab the xmit lock of the driver as well as the E1K device state.
5090 */
5091 rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
5092 if (RT_LIKELY(rc == VINF_SUCCESS))
5093 {
5094 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
5095 if (pDrv)
5096 {
5097 rc = pDrv->pfnBeginXmit(pDrv, fOnWorkerThread);
5098 if (RT_FAILURE(rc))
5099 {
5100 e1kCsTxLeave(pThis);
5101 return rc;
5102 }
5103 }
5104 /*
5105 * Process all pending descriptors.
5106 * Note! Do not process descriptors in locked state
5107 */
5108 while (TDH != TDT && !pThis->fLocked)
5109 {
5110 E1KTXDESC desc;
5111 E1kLog3(("%s About to process new TX descriptor at %08x%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
5112 pThis->szPrf, TDBAH, TDBAL + TDH * sizeof(desc), TDLEN, TDH, TDT));
5113
5114 e1kLoadDesc(pThis, &desc, ((uint64_t)TDBAH << 32) + TDBAL + TDH * sizeof(desc));
5115 rc = e1kXmitDesc(pThis, &desc, e1kDescAddr(TDBAH, TDBAL, TDH), fOnWorkerThread);
5116 /* If we failed to transmit descriptor we will try it again later */
5117 if (RT_FAILURE(rc))
5118 break;
5119 if (++TDH * sizeof(desc) >= TDLEN)
5120 TDH = 0;
5121
5122 if (e1kGetTxLen(pThis) <= GET_BITS(TXDCTL, LWTHRESH)*8)
5123 {
5124 E1kLog2(("%s Low on transmit descriptors, raise ICR.TXD_LOW, len=%x thresh=%x\n",
5125 pThis->szPrf, e1kGetTxLen(pThis), GET_BITS(TXDCTL, LWTHRESH)*8));
5126 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_TXD_LOW);
5127 }
5128
5129 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5130 }
5131
5132 /// @todo uncomment: pThis->uStatIntTXQE++;
5133 /// @todo uncomment: e1kRaiseInterrupt(pThis, ICR_TXQE);
5134 /*
5135 * Release the lock.
5136 */
5137 if (pDrv)
5138 pDrv->pfnEndXmit(pDrv);
5139 e1kCsTxLeave(pThis);
5140 }
5141
5142 return rc;
5143}
5144
5145#else /* E1K_WITH_TXD_CACHE */
5146
5147static void e1kDumpTxDCache(PE1KSTATE pThis)
5148{
5149 unsigned i, cDescs = TDLEN / sizeof(E1KTXDESC);
5150 uint32_t tdh = TDH;
5151 LogRel(("-- Transmit Descriptors (%d total) --\n", cDescs));
5152 for (i = 0; i < cDescs; ++i)
5153 {
5154 E1KTXDESC desc;
5155 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), e1kDescAddr(TDBAH, TDBAL, i),
5156 &desc, sizeof(desc));
5157 if (i == tdh)
5158 LogRel((">>> "));
5159 LogRel(("%RGp: %R[e1ktxd]\n", e1kDescAddr(TDBAH, TDBAL, i), &desc));
5160 }
5161 LogRel(("-- Transmit Descriptors in Cache (at %d (TDH %d)/ fetched %d / max %d) --\n",
5162 pThis->iTxDCurrent, TDH, pThis->nTxDFetched, E1K_TXD_CACHE_SIZE));
5163 if (tdh > pThis->iTxDCurrent)
5164 tdh -= pThis->iTxDCurrent;
5165 else
5166 tdh = cDescs + tdh - pThis->iTxDCurrent;
5167 for (i = 0; i < pThis->nTxDFetched; ++i)
5168 {
5169 if (i == pThis->iTxDCurrent)
5170 LogRel((">>> "));
5171 LogRel(("%RGp: %R[e1ktxd]\n", e1kDescAddr(TDBAH, TDBAL, tdh++ % cDescs), &pThis->aTxDescriptors[i]));
5172 }
5173}
5174
5175/**
5176 * Transmit pending descriptors.
5177 *
5178 * @returns VBox status code. VERR_TRY_AGAIN is returned if we're busy.
5179 *
5180 * @param pThis The E1000 state.
5181 * @param fOnWorkerThread Whether we're on a worker thread or on an EMT.
5182 */
5183static int e1kXmitPending(PE1KSTATE pThis, bool fOnWorkerThread)
5184{
5185 int rc = VINF_SUCCESS;
5186
5187 /* Check if transmitter is enabled. */
5188 if (!(TCTL & TCTL_EN))
5189 return VINF_SUCCESS;
5190 /*
5191 * Grab the xmit lock of the driver as well as the E1K device state.
5192 */
5193 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
5194 if (pDrv)
5195 {
5196 rc = pDrv->pfnBeginXmit(pDrv, fOnWorkerThread);
5197 if (RT_FAILURE(rc))
5198 return rc;
5199 }
5200
5201 /*
5202 * Process all pending descriptors.
5203 * Note! Do not process descriptors in locked state
5204 */
5205 rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
5206 if (RT_LIKELY(rc == VINF_SUCCESS))
5207 {
5208 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
5209 /*
5210 * fIncomplete is set whenever we try to fetch additional descriptors
5211 * for an incomplete packet. If fail to locate a complete packet on
5212 * the next iteration we need to reset the cache or we risk to get
5213 * stuck in this loop forever.
5214 */
5215 bool fIncomplete = false;
5216 while (!pThis->fLocked && e1kTxDLazyLoad(pThis))
5217 {
5218 while (e1kLocateTxPacket(pThis))
5219 {
5220 fIncomplete = false;
5221 /* Found a complete packet, allocate it. */
5222 rc = e1kXmitAllocBuf(pThis, pThis->fGSO);
5223 /* If we're out of bandwidth we'll come back later. */
5224 if (RT_FAILURE(rc))
5225 goto out;
5226 /* Copy the packet to allocated buffer and send it. */
5227 rc = e1kXmitPacket(pThis, fOnWorkerThread);
5228 /* If we're out of bandwidth we'll come back later. */
5229 if (RT_FAILURE(rc))
5230 goto out;
5231 }
5232 uint8_t u8Remain = pThis->nTxDFetched - pThis->iTxDCurrent;
5233 if (RT_UNLIKELY(fIncomplete))
5234 {
5235 static bool fTxDCacheDumped = false;
5236 /*
5237 * The descriptor cache is full, but we were unable to find
5238 * a complete packet in it. Drop the cache and hope that
5239 * the guest driver can recover from network card error.
5240 */
5241 LogRel(("%s No complete packets in%s TxD cache! "
5242 "Fetched=%d, current=%d, TX len=%d.\n",
5243 pThis->szPrf,
5244 u8Remain == E1K_TXD_CACHE_SIZE ? " full" : "",
5245 pThis->nTxDFetched, pThis->iTxDCurrent,
5246 e1kGetTxLen(pThis)));
5247 if (!fTxDCacheDumped)
5248 {
5249 fTxDCacheDumped = true;
5250 e1kDumpTxDCache(pThis);
5251 }
5252 pThis->iTxDCurrent = pThis->nTxDFetched = 0;
5253 /*
5254 * Returning an error at this point means Guru in R0
5255 * (see @bugref{6428}).
5256 */
5257# ifdef IN_RING3
5258 rc = VERR_NET_INCOMPLETE_TX_PACKET;
5259# else /* !IN_RING3 */
5260 rc = VINF_IOM_R3_MMIO_WRITE;
5261# endif /* !IN_RING3 */
5262 goto out;
5263 }
5264 if (u8Remain > 0)
5265 {
5266 Log4(("%s Incomplete packet at %d. Already fetched %d, "
5267 "%d more are available\n",
5268 pThis->szPrf, pThis->iTxDCurrent, u8Remain,
5269 e1kGetTxLen(pThis) - u8Remain));
5270
5271 /*
5272 * A packet was partially fetched. Move incomplete packet to
5273 * the beginning of cache buffer, then load more descriptors.
5274 */
5275 memmove(pThis->aTxDescriptors,
5276 &pThis->aTxDescriptors[pThis->iTxDCurrent],
5277 u8Remain * sizeof(E1KTXDESC));
5278 pThis->iTxDCurrent = 0;
5279 pThis->nTxDFetched = u8Remain;
5280 e1kTxDLoadMore(pThis);
5281 fIncomplete = true;
5282 }
5283 else
5284 pThis->nTxDFetched = 0;
5285 pThis->iTxDCurrent = 0;
5286 }
5287 if (!pThis->fLocked && GET_BITS(TXDCTL, LWTHRESH) == 0)
5288 {
5289 E1kLog2(("%s Out of transmit descriptors, raise ICR.TXD_LOW\n",
5290 pThis->szPrf));
5291 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_TXD_LOW);
5292 }
5293out:
5294 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5295
5296 /// @todo uncomment: pThis->uStatIntTXQE++;
5297 /// @todo uncomment: e1kRaiseInterrupt(pThis, ICR_TXQE);
5298
5299 e1kCsTxLeave(pThis);
5300 }
5301
5302
5303 /*
5304 * Release the lock.
5305 */
5306 if (pDrv)
5307 pDrv->pfnEndXmit(pDrv);
5308 return rc;
5309}
5310
5311#endif /* E1K_WITH_TXD_CACHE */
5312#ifdef IN_RING3
5313
5314/**
5315 * @interface_method_impl{PDMINETWORKDOWN,pfnXmitPending}
5316 */
5317static DECLCALLBACK(void) e1kR3NetworkDown_XmitPending(PPDMINETWORKDOWN pInterface)
5318{
5319 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkDown);
5320 /* Resume suspended transmission */
5321 STATUS &= ~STATUS_TXOFF;
5322 e1kXmitPending(pThis, true /*fOnWorkerThread*/);
5323}
5324
5325/**
5326 * Callback for consuming from transmit queue. It gets called in R3 whenever
5327 * we enqueue something in R0/GC.
5328 *
5329 * @returns true
5330 * @param pDevIns Pointer to device instance structure.
5331 * @param pItem Pointer to the element being dequeued (not used).
5332 * @thread ???
5333 */
5334static DECLCALLBACK(bool) e1kTxQueueConsumer(PPDMDEVINS pDevIns, PPDMQUEUEITEMCORE pItem)
5335{
5336 NOREF(pItem);
5337 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
5338 E1kLog2(("%s e1kTxQueueConsumer:\n", pThis->szPrf));
5339
5340 int rc = e1kXmitPending(pThis, false /*fOnWorkerThread*/); NOREF(rc);
5341#ifndef DEBUG_andy /** @todo r=andy Happens for me a lot, mute this for me. */
5342 AssertMsg(RT_SUCCESS(rc) || rc == VERR_TRY_AGAIN, ("%Rrc\n", rc));
5343#endif
5344 return true;
5345}
5346
5347/**
5348 * Handler for the wakeup signaller queue.
5349 */
5350static DECLCALLBACK(bool) e1kCanRxQueueConsumer(PPDMDEVINS pDevIns, PPDMQUEUEITEMCORE pItem)
5351{
5352 RT_NOREF(pItem);
5353 e1kWakeupReceive(pDevIns);
5354 return true;
5355}
5356
5357#endif /* IN_RING3 */
5358
5359/**
5360 * Write handler for Transmit Descriptor Tail register.
5361 *
5362 * @param pThis The device state structure.
5363 * @param offset Register offset in memory-mapped frame.
5364 * @param index Register index in register array.
5365 * @param value The value to store.
5366 * @param mask Used to implement partial writes (8 and 16-bit).
5367 * @thread EMT
5368 */
5369static int e1kRegWriteTDT(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5370{
5371 int rc = e1kRegWriteDefault(pThis, offset, index, value);
5372
5373 /* All descriptors starting with head and not including tail belong to us. */
5374 /* Process them. */
5375 E1kLog2(("%s e1kRegWriteTDT: TDBAL=%08x, TDBAH=%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
5376 pThis->szPrf, TDBAL, TDBAH, TDLEN, TDH, TDT));
5377
5378 /* Ignore TDT writes when the link is down. */
5379 if (TDH != TDT && (STATUS & STATUS_LU))
5380 {
5381 Log5(("E1000: TDT write: TDH=%08x, TDT=%08x, %d descriptors to process\n", TDH, TDT, e1kGetTxLen(pThis)));
5382 E1kLog(("%s e1kRegWriteTDT: %d descriptors to process\n",
5383 pThis->szPrf, e1kGetTxLen(pThis)));
5384
5385 /* Transmit pending packets if possible, defer it if we cannot do it
5386 in the current context. */
5387#ifdef E1K_TX_DELAY
5388 rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
5389 if (RT_LIKELY(rc == VINF_SUCCESS))
5390 {
5391 if (!TMTimerIsActive(pThis->CTX_SUFF(pTXDTimer)))
5392 {
5393#ifdef E1K_INT_STATS
5394 pThis->u64ArmedAt = RTTimeNanoTS();
5395#endif
5396 e1kArmTimer(pThis, pThis->CTX_SUFF(pTXDTimer), E1K_TX_DELAY);
5397 }
5398 E1K_INC_ISTAT_CNT(pThis->uStatTxDelayed);
5399 e1kCsTxLeave(pThis);
5400 return rc;
5401 }
5402 /* We failed to enter the TX critical section -- transmit as usual. */
5403#endif /* E1K_TX_DELAY */
5404#ifndef IN_RING3
5405 if (!pThis->CTX_SUFF(pDrv))
5406 {
5407 PPDMQUEUEITEMCORE pItem = PDMQueueAlloc(pThis->CTX_SUFF(pTxQueue));
5408 if (RT_UNLIKELY(pItem))
5409 PDMQueueInsert(pThis->CTX_SUFF(pTxQueue), pItem);
5410 }
5411 else
5412#endif
5413 {
5414 rc = e1kXmitPending(pThis, false /*fOnWorkerThread*/);
5415 if (rc == VERR_TRY_AGAIN)
5416 rc = VINF_SUCCESS;
5417 else if (rc == VERR_SEM_BUSY)
5418 rc = VINF_IOM_R3_MMIO_WRITE;
5419 AssertRC(rc);
5420 }
5421 }
5422
5423 return rc;
5424}
5425
5426/**
5427 * Write handler for Multicast Table Array registers.
5428 *
5429 * @param pThis The device state structure.
5430 * @param offset Register offset in memory-mapped frame.
5431 * @param index Register index in register array.
5432 * @param value The value to store.
5433 * @thread EMT
5434 */
5435static int e1kRegWriteMTA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5436{
5437 AssertReturn(offset - g_aE1kRegMap[index].offset < sizeof(pThis->auMTA), VERR_DEV_IO_ERROR);
5438 pThis->auMTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auMTA[0])] = value;
5439
5440 return VINF_SUCCESS;
5441}
5442
5443/**
5444 * Read handler for Multicast Table Array registers.
5445 *
5446 * @returns VBox status code.
5447 *
5448 * @param pThis The device state structure.
5449 * @param offset Register offset in memory-mapped frame.
5450 * @param index Register index in register array.
5451 * @thread EMT
5452 */
5453static int e1kRegReadMTA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5454{
5455 AssertReturn(offset - g_aE1kRegMap[index].offset< sizeof(pThis->auMTA), VERR_DEV_IO_ERROR);
5456 *pu32Value = pThis->auMTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auMTA[0])];
5457
5458 return VINF_SUCCESS;
5459}
5460
5461/**
5462 * Write handler for Receive Address registers.
5463 *
5464 * @param pThis The device state structure.
5465 * @param offset Register offset in memory-mapped frame.
5466 * @param index Register index in register array.
5467 * @param value The value to store.
5468 * @thread EMT
5469 */
5470static int e1kRegWriteRA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5471{
5472 AssertReturn(offset - g_aE1kRegMap[index].offset < sizeof(pThis->aRecAddr.au32), VERR_DEV_IO_ERROR);
5473 pThis->aRecAddr.au32[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->aRecAddr.au32[0])] = value;
5474
5475 return VINF_SUCCESS;
5476}
5477
5478/**
5479 * Read handler for Receive Address registers.
5480 *
5481 * @returns VBox status code.
5482 *
5483 * @param pThis The device state structure.
5484 * @param offset Register offset in memory-mapped frame.
5485 * @param index Register index in register array.
5486 * @thread EMT
5487 */
5488static int e1kRegReadRA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5489{
5490 AssertReturn(offset - g_aE1kRegMap[index].offset< sizeof(pThis->aRecAddr.au32), VERR_DEV_IO_ERROR);
5491 *pu32Value = pThis->aRecAddr.au32[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->aRecAddr.au32[0])];
5492
5493 return VINF_SUCCESS;
5494}
5495
5496/**
5497 * Write handler for VLAN Filter Table Array registers.
5498 *
5499 * @param pThis The device state structure.
5500 * @param offset Register offset in memory-mapped frame.
5501 * @param index Register index in register array.
5502 * @param value The value to store.
5503 * @thread EMT
5504 */
5505static int e1kRegWriteVFTA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5506{
5507 AssertReturn(offset - g_aE1kRegMap[index].offset < sizeof(pThis->auVFTA), VINF_SUCCESS);
5508 pThis->auVFTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auVFTA[0])] = value;
5509
5510 return VINF_SUCCESS;
5511}
5512
5513/**
5514 * Read handler for VLAN Filter Table Array registers.
5515 *
5516 * @returns VBox status code.
5517 *
5518 * @param pThis The device state structure.
5519 * @param offset Register offset in memory-mapped frame.
5520 * @param index Register index in register array.
5521 * @thread EMT
5522 */
5523static int e1kRegReadVFTA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5524{
5525 AssertReturn(offset - g_aE1kRegMap[index].offset< sizeof(pThis->auVFTA), VERR_DEV_IO_ERROR);
5526 *pu32Value = pThis->auVFTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auVFTA[0])];
5527
5528 return VINF_SUCCESS;
5529}
5530
5531/**
5532 * Read handler for unimplemented registers.
5533 *
5534 * Merely reports reads from unimplemented registers.
5535 *
5536 * @returns VBox status code.
5537 *
5538 * @param pThis The device state structure.
5539 * @param offset Register offset in memory-mapped frame.
5540 * @param index Register index in register array.
5541 * @thread EMT
5542 */
5543static int e1kRegReadUnimplemented(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5544{
5545 RT_NOREF3(pThis, offset, index);
5546 E1kLog(("%s At %08X read (00000000) attempt from unimplemented register %s (%s)\n",
5547 pThis->szPrf, offset, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5548 *pu32Value = 0;
5549
5550 return VINF_SUCCESS;
5551}
5552
5553/**
5554 * Default register read handler with automatic clear operation.
5555 *
5556 * Retrieves the value of register from register array in device state structure.
5557 * Then resets all bits.
5558 *
5559 * @remarks The 'mask' parameter is simply ignored as masking and shifting is
5560 * done in the caller.
5561 *
5562 * @returns VBox status code.
5563 *
5564 * @param pThis The device state structure.
5565 * @param offset Register offset in memory-mapped frame.
5566 * @param index Register index in register array.
5567 * @thread EMT
5568 */
5569static int e1kRegReadAutoClear(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5570{
5571 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
5572 int rc = e1kRegReadDefault(pThis, offset, index, pu32Value);
5573 pThis->auRegs[index] = 0;
5574
5575 return rc;
5576}
5577
5578/**
5579 * Default register read handler.
5580 *
5581 * Retrieves the value of register from register array in device state structure.
5582 * Bits corresponding to 0s in 'readable' mask will always read as 0s.
5583 *
5584 * @remarks The 'mask' parameter is simply ignored as masking and shifting is
5585 * done in the caller.
5586 *
5587 * @returns VBox status code.
5588 *
5589 * @param pThis The device state structure.
5590 * @param offset Register offset in memory-mapped frame.
5591 * @param index Register index in register array.
5592 * @thread EMT
5593 */
5594static int e1kRegReadDefault(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5595{
5596 RT_NOREF_PV(offset);
5597
5598 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
5599 *pu32Value = pThis->auRegs[index] & g_aE1kRegMap[index].readable;
5600
5601 return VINF_SUCCESS;
5602}
5603
5604/**
5605 * Write handler for unimplemented registers.
5606 *
5607 * Merely reports writes to unimplemented registers.
5608 *
5609 * @param pThis The device state structure.
5610 * @param offset Register offset in memory-mapped frame.
5611 * @param index Register index in register array.
5612 * @param value The value to store.
5613 * @thread EMT
5614 */
5615
5616 static int e1kRegWriteUnimplemented(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5617{
5618 RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index); RT_NOREF_PV(value);
5619
5620 E1kLog(("%s At %08X write attempt (%08X) to unimplemented register %s (%s)\n",
5621 pThis->szPrf, offset, value, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5622
5623 return VINF_SUCCESS;
5624}
5625
5626/**
5627 * Default register write handler.
5628 *
5629 * Stores the value to the register array in device state structure. Only bits
5630 * corresponding to 1s both in 'writable' and 'mask' will be stored.
5631 *
5632 * @returns VBox status code.
5633 *
5634 * @param pThis The device state structure.
5635 * @param offset Register offset in memory-mapped frame.
5636 * @param index Register index in register array.
5637 * @param value The value to store.
5638 * @param mask Used to implement partial writes (8 and 16-bit).
5639 * @thread EMT
5640 */
5641
5642static int e1kRegWriteDefault(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5643{
5644 RT_NOREF_PV(offset);
5645
5646 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
5647 pThis->auRegs[index] = (value & g_aE1kRegMap[index].writable)
5648 | (pThis->auRegs[index] & ~g_aE1kRegMap[index].writable);
5649
5650 return VINF_SUCCESS;
5651}
5652
5653/**
5654 * Search register table for matching register.
5655 *
5656 * @returns Index in the register table or -1 if not found.
5657 *
5658 * @param offReg Register offset in memory-mapped region.
5659 * @thread EMT
5660 */
5661static int e1kRegLookup(uint32_t offReg)
5662{
5663
5664#if 0
5665 int index;
5666
5667 for (index = 0; index < E1K_NUM_OF_REGS; index++)
5668 {
5669 if (g_aE1kRegMap[index].offset <= offReg && offReg < g_aE1kRegMap[index].offset + g_aE1kRegMap[index].size)
5670 {
5671 return index;
5672 }
5673 }
5674#else
5675 int iStart = 0;
5676 int iEnd = E1K_NUM_OF_BINARY_SEARCHABLE;
5677 for (;;)
5678 {
5679 int i = (iEnd - iStart) / 2 + iStart;
5680 uint32_t offCur = g_aE1kRegMap[i].offset;
5681 if (offReg < offCur)
5682 {
5683 if (i == iStart)
5684 break;
5685 iEnd = i;
5686 }
5687 else if (offReg >= offCur + g_aE1kRegMap[i].size)
5688 {
5689 i++;
5690 if (i == iEnd)
5691 break;
5692 iStart = i;
5693 }
5694 else
5695 return i;
5696 Assert(iEnd > iStart);
5697 }
5698
5699 for (unsigned i = E1K_NUM_OF_BINARY_SEARCHABLE; i < RT_ELEMENTS(g_aE1kRegMap); i++)
5700 if (offReg - g_aE1kRegMap[i].offset < g_aE1kRegMap[i].size)
5701 return i;
5702
5703# ifdef VBOX_STRICT
5704 for (unsigned i = 0; i < RT_ELEMENTS(g_aE1kRegMap); i++)
5705 Assert(offReg - g_aE1kRegMap[i].offset >= g_aE1kRegMap[i].size);
5706# endif
5707
5708#endif
5709
5710 return -1;
5711}
5712
5713/**
5714 * Handle unaligned register read operation.
5715 *
5716 * Looks up and calls appropriate handler.
5717 *
5718 * @returns VBox status code.
5719 *
5720 * @param pThis The device state structure.
5721 * @param offReg Register offset in memory-mapped frame.
5722 * @param pv Where to store the result.
5723 * @param cb Number of bytes to read.
5724 * @thread EMT
5725 * @remarks IOM takes care of unaligned and small reads via MMIO. For I/O port
5726 * accesses we have to take care of that ourselves.
5727 */
5728static int e1kRegReadUnaligned(PE1KSTATE pThis, uint32_t offReg, void *pv, uint32_t cb)
5729{
5730 uint32_t u32 = 0;
5731 uint32_t shift;
5732 int rc = VINF_SUCCESS;
5733 int index = e1kRegLookup(offReg);
5734#ifdef LOG_ENABLED
5735 char buf[9];
5736#endif
5737
5738 /*
5739 * From the spec:
5740 * For registers that should be accessed as 32-bit double words, partial writes (less than a 32-bit
5741 * double word) is ignored. Partial reads return all 32 bits of data regardless of the byte enables.
5742 */
5743
5744 /*
5745 * To be able to read bytes and short word we convert them to properly
5746 * shifted 32-bit words and masks. The idea is to keep register-specific
5747 * handlers simple. Most accesses will be 32-bit anyway.
5748 */
5749 uint32_t mask;
5750 switch (cb)
5751 {
5752 case 4: mask = 0xFFFFFFFF; break;
5753 case 2: mask = 0x0000FFFF; break;
5754 case 1: mask = 0x000000FF; break;
5755 default:
5756 return PDMDevHlpDBGFStop(pThis->CTX_SUFF(pDevIns), RT_SRC_POS,
5757 "unsupported op size: offset=%#10x cb=%#10x\n", offReg, cb);
5758 }
5759 if (index != -1)
5760 {
5761 if (g_aE1kRegMap[index].readable)
5762 {
5763 /* Make the mask correspond to the bits we are about to read. */
5764 shift = (offReg - g_aE1kRegMap[index].offset) % sizeof(uint32_t) * 8;
5765 mask <<= shift;
5766 if (!mask)
5767 return PDMDevHlpDBGFStop(pThis->CTX_SUFF(pDevIns), RT_SRC_POS, "Zero mask: offset=%#10x cb=%#10x\n", offReg, cb);
5768 /*
5769 * Read it. Pass the mask so the handler knows what has to be read.
5770 * Mask out irrelevant bits.
5771 */
5772 //rc = e1kCsEnter(pThis, VERR_SEM_BUSY, RT_SRC_POS);
5773 if (RT_UNLIKELY(rc != VINF_SUCCESS))
5774 return rc;
5775 //pThis->fDelayInts = false;
5776 //pThis->iStatIntLost += pThis->iStatIntLostOne;
5777 //pThis->iStatIntLostOne = 0;
5778 rc = g_aE1kRegMap[index].pfnRead(pThis, offReg & 0xFFFFFFFC, index, &u32);
5779 u32 &= mask;
5780 //e1kCsLeave(pThis);
5781 E1kLog2(("%s At %08X read %s from %s (%s)\n",
5782 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf), g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5783 Log6(("%s At %08X read %s from %s (%s) [UNALIGNED]\n",
5784 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf), g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5785 /* Shift back the result. */
5786 u32 >>= shift;
5787 }
5788 else
5789 E1kLog(("%s At %08X read (%s) attempt from write-only register %s (%s)\n",
5790 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf), g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5791 if (IOM_SUCCESS(rc))
5792 STAM_COUNTER_INC(&pThis->aStatRegReads[index]);
5793 }
5794 else
5795 E1kLog(("%s At %08X read (%s) attempt from non-existing register\n",
5796 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf)));
5797
5798 memcpy(pv, &u32, cb);
5799 return rc;
5800}
5801
5802/**
5803 * Handle 4 byte aligned and sized read operation.
5804 *
5805 * Looks up and calls appropriate handler.
5806 *
5807 * @returns VBox status code.
5808 *
5809 * @param pThis The device state structure.
5810 * @param offReg Register offset in memory-mapped frame.
5811 * @param pu32 Where to store the result.
5812 * @thread EMT
5813 */
5814static int e1kRegReadAlignedU32(PE1KSTATE pThis, uint32_t offReg, uint32_t *pu32)
5815{
5816 Assert(!(offReg & 3));
5817
5818 /*
5819 * Lookup the register and check that it's readable.
5820 */
5821 int rc = VINF_SUCCESS;
5822 int idxReg = e1kRegLookup(offReg);
5823 if (RT_LIKELY(idxReg != -1))
5824 {
5825 if (RT_UNLIKELY(g_aE1kRegMap[idxReg].readable))
5826 {
5827 /*
5828 * Read it. Pass the mask so the handler knows what has to be read.
5829 * Mask out irrelevant bits.
5830 */
5831 //rc = e1kCsEnter(pThis, VERR_SEM_BUSY, RT_SRC_POS);
5832 //if (RT_UNLIKELY(rc != VINF_SUCCESS))
5833 // return rc;
5834 //pThis->fDelayInts = false;
5835 //pThis->iStatIntLost += pThis->iStatIntLostOne;
5836 //pThis->iStatIntLostOne = 0;
5837 rc = g_aE1kRegMap[idxReg].pfnRead(pThis, offReg & 0xFFFFFFFC, idxReg, pu32);
5838 //e1kCsLeave(pThis);
5839 Log6(("%s At %08X read %08X from %s (%s)\n",
5840 pThis->szPrf, offReg, *pu32, g_aE1kRegMap[idxReg].abbrev, g_aE1kRegMap[idxReg].name));
5841 if (IOM_SUCCESS(rc))
5842 STAM_COUNTER_INC(&pThis->aStatRegReads[idxReg]);
5843 }
5844 else
5845 E1kLog(("%s At %08X read attempt from non-readable register %s (%s)\n",
5846 pThis->szPrf, offReg, g_aE1kRegMap[idxReg].abbrev, g_aE1kRegMap[idxReg].name));
5847 }
5848 else
5849 E1kLog(("%s At %08X read attempt from non-existing register\n", pThis->szPrf, offReg));
5850 return rc;
5851}
5852
5853/**
5854 * Handle 4 byte sized and aligned register write operation.
5855 *
5856 * Looks up and calls appropriate handler.
5857 *
5858 * @returns VBox status code.
5859 *
5860 * @param pThis The device state structure.
5861 * @param offReg Register offset in memory-mapped frame.
5862 * @param u32Value The value to write.
5863 * @thread EMT
5864 */
5865static int e1kRegWriteAlignedU32(PE1KSTATE pThis, uint32_t offReg, uint32_t u32Value)
5866{
5867 int rc = VINF_SUCCESS;
5868 int index = e1kRegLookup(offReg);
5869 if (RT_LIKELY(index != -1))
5870 {
5871 if (RT_LIKELY(g_aE1kRegMap[index].writable))
5872 {
5873 /*
5874 * Write it. Pass the mask so the handler knows what has to be written.
5875 * Mask out irrelevant bits.
5876 */
5877 Log6(("%s At %08X write %08X to %s (%s)\n",
5878 pThis->szPrf, offReg, u32Value, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5879 //rc = e1kCsEnter(pThis, VERR_SEM_BUSY, RT_SRC_POS);
5880 //if (RT_UNLIKELY(rc != VINF_SUCCESS))
5881 // return rc;
5882 //pThis->fDelayInts = false;
5883 //pThis->iStatIntLost += pThis->iStatIntLostOne;
5884 //pThis->iStatIntLostOne = 0;
5885 rc = g_aE1kRegMap[index].pfnWrite(pThis, offReg, index, u32Value);
5886 //e1kCsLeave(pThis);
5887 }
5888 else
5889 E1kLog(("%s At %08X write attempt (%08X) to read-only register %s (%s)\n",
5890 pThis->szPrf, offReg, u32Value, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5891 if (IOM_SUCCESS(rc))
5892 STAM_COUNTER_INC(&pThis->aStatRegWrites[index]);
5893 }
5894 else
5895 E1kLog(("%s At %08X write attempt (%08X) to non-existing register\n",
5896 pThis->szPrf, offReg, u32Value));
5897 return rc;
5898}
5899
5900
5901/* -=-=-=-=- MMIO and I/O Port Callbacks -=-=-=-=- */
5902
5903/**
5904 * @callback_method_impl{FNIOMMMIOREAD}
5905 */
5906PDMBOTHCBDECL(int) e1kMMIORead(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS GCPhysAddr, void *pv, unsigned cb)
5907{
5908 RT_NOREF2(pvUser, cb);
5909 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
5910 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatMMIORead), a);
5911
5912 uint32_t offReg = GCPhysAddr - pThis->addrMMReg;
5913 Assert(offReg < E1K_MM_SIZE);
5914 Assert(cb == 4);
5915 Assert(!(GCPhysAddr & 3));
5916
5917 int rc = e1kRegReadAlignedU32(pThis, offReg, (uint32_t *)pv);
5918
5919 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatMMIORead), a);
5920 return rc;
5921}
5922
5923/**
5924 * @callback_method_impl{FNIOMMMIOWRITE}
5925 */
5926PDMBOTHCBDECL(int) e1kMMIOWrite(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS GCPhysAddr, void const *pv, unsigned cb)
5927{
5928 RT_NOREF2(pvUser, cb);
5929 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
5930 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatMMIOWrite), a);
5931
5932 uint32_t offReg = GCPhysAddr - pThis->addrMMReg;
5933 Assert(offReg < E1K_MM_SIZE);
5934 Assert(cb == 4);
5935 Assert(!(GCPhysAddr & 3));
5936
5937 int rc = e1kRegWriteAlignedU32(pThis, offReg, *(uint32_t const *)pv);
5938
5939 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatMMIOWrite), a);
5940 return rc;
5941}
5942
5943/**
5944 * @callback_method_impl{FNIOMIOPORTIN}
5945 */
5946PDMBOTHCBDECL(int) e1kIOPortIn(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT uPort, uint32_t *pu32, unsigned cb)
5947{
5948 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
5949 int rc;
5950 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatIORead), a);
5951 RT_NOREF_PV(pvUser);
5952
5953 uPort -= pThis->IOPortBase;
5954 if (RT_LIKELY(cb == 4))
5955 switch (uPort)
5956 {
5957 case 0x00: /* IOADDR */
5958 *pu32 = pThis->uSelectedReg;
5959 E1kLog2(("%s e1kIOPortIn: IOADDR(0), selecting register %#010x, val=%#010x\n", pThis->szPrf, pThis->uSelectedReg, *pu32));
5960 rc = VINF_SUCCESS;
5961 break;
5962
5963 case 0x04: /* IODATA */
5964 if (!(pThis->uSelectedReg & 3))
5965 rc = e1kRegReadAlignedU32(pThis, pThis->uSelectedReg, pu32);
5966 else /** @todo r=bird: I wouldn't be surprised if this unaligned branch wasn't necessary. */
5967 rc = e1kRegReadUnaligned(pThis, pThis->uSelectedReg, pu32, cb);
5968 if (rc == VINF_IOM_R3_MMIO_READ)
5969 rc = VINF_IOM_R3_IOPORT_READ;
5970 E1kLog2(("%s e1kIOPortIn: IODATA(4), reading from selected register %#010x, val=%#010x\n", pThis->szPrf, pThis->uSelectedReg, *pu32));
5971 break;
5972
5973 default:
5974 E1kLog(("%s e1kIOPortIn: invalid port %#010x\n", pThis->szPrf, uPort));
5975 //rc = VERR_IOM_IOPORT_UNUSED; /* Why not? */
5976 rc = VINF_SUCCESS;
5977 }
5978 else
5979 {
5980 E1kLog(("%s e1kIOPortIn: invalid op size: uPort=%RTiop cb=%08x", pThis->szPrf, uPort, cb));
5981 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "%s e1kIOPortIn: invalid op size: uPort=%RTiop cb=%08x\n", pThis->szPrf, uPort, cb);
5982 }
5983 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatIORead), a);
5984 return rc;
5985}
5986
5987
5988/**
5989 * @callback_method_impl{FNIOMIOPORTOUT}
5990 */
5991PDMBOTHCBDECL(int) e1kIOPortOut(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT uPort, uint32_t u32, unsigned cb)
5992{
5993 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
5994 int rc;
5995 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatIOWrite), a);
5996 RT_NOREF_PV(pvUser);
5997
5998 E1kLog2(("%s e1kIOPortOut: uPort=%RTiop value=%08x\n", pThis->szPrf, uPort, u32));
5999 if (RT_LIKELY(cb == 4))
6000 {
6001 uPort -= pThis->IOPortBase;
6002 switch (uPort)
6003 {
6004 case 0x00: /* IOADDR */
6005 pThis->uSelectedReg = u32;
6006 E1kLog2(("%s e1kIOPortOut: IOADDR(0), selected register %08x\n", pThis->szPrf, pThis->uSelectedReg));
6007 rc = VINF_SUCCESS;
6008 break;
6009
6010 case 0x04: /* IODATA */
6011 E1kLog2(("%s e1kIOPortOut: IODATA(4), writing to selected register %#010x, value=%#010x\n", pThis->szPrf, pThis->uSelectedReg, u32));
6012 if (RT_LIKELY(!(pThis->uSelectedReg & 3)))
6013 {
6014 rc = e1kRegWriteAlignedU32(pThis, pThis->uSelectedReg, u32);
6015 if (rc == VINF_IOM_R3_MMIO_WRITE)
6016 rc = VINF_IOM_R3_IOPORT_WRITE;
6017 }
6018 else
6019 rc = PDMDevHlpDBGFStop(pThis->CTX_SUFF(pDevIns), RT_SRC_POS,
6020 "Spec violation: misaligned offset: %#10x, ignored.\n", pThis->uSelectedReg);
6021 break;
6022
6023 default:
6024 E1kLog(("%s e1kIOPortOut: invalid port %#010x\n", pThis->szPrf, uPort));
6025 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "invalid port %#010x\n", uPort);
6026 }
6027 }
6028 else
6029 {
6030 E1kLog(("%s e1kIOPortOut: invalid op size: uPort=%RTiop cb=%08x\n", pThis->szPrf, uPort, cb));
6031 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "%s: invalid op size: uPort=%RTiop cb=%#x\n", pThis->szPrf, uPort, cb);
6032 }
6033
6034 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatIOWrite), a);
6035 return rc;
6036}
6037
6038#ifdef IN_RING3
6039
6040/**
6041 * Dump complete device state to log.
6042 *
6043 * @param pThis Pointer to device state.
6044 */
6045static void e1kDumpState(PE1KSTATE pThis)
6046{
6047 RT_NOREF(pThis);
6048 for (int i = 0; i < E1K_NUM_OF_32BIT_REGS; ++i)
6049 E1kLog2(("%s %8.8s = %08x\n", pThis->szPrf, g_aE1kRegMap[i].abbrev, pThis->auRegs[i]));
6050# ifdef E1K_INT_STATS
6051 LogRel(("%s Interrupt attempts: %d\n", pThis->szPrf, pThis->uStatIntTry));
6052 LogRel(("%s Interrupts raised : %d\n", pThis->szPrf, pThis->uStatInt));
6053 LogRel(("%s Interrupts lowered: %d\n", pThis->szPrf, pThis->uStatIntLower));
6054 LogRel(("%s Interrupts delayed: %d\n", pThis->szPrf, pThis->uStatIntDly));
6055 LogRel(("%s Disabled delayed: %d\n", pThis->szPrf, pThis->uStatDisDly));
6056 LogRel(("%s Interrupts skipped: %d\n", pThis->szPrf, pThis->uStatIntSkip));
6057 LogRel(("%s Masked interrupts : %d\n", pThis->szPrf, pThis->uStatIntMasked));
6058 LogRel(("%s Early interrupts : %d\n", pThis->szPrf, pThis->uStatIntEarly));
6059 LogRel(("%s Late interrupts : %d\n", pThis->szPrf, pThis->uStatIntLate));
6060 LogRel(("%s Lost interrupts : %d\n", pThis->szPrf, pThis->iStatIntLost));
6061 LogRel(("%s Interrupts by RX : %d\n", pThis->szPrf, pThis->uStatIntRx));
6062 LogRel(("%s Interrupts by TX : %d\n", pThis->szPrf, pThis->uStatIntTx));
6063 LogRel(("%s Interrupts by ICS : %d\n", pThis->szPrf, pThis->uStatIntICS));
6064 LogRel(("%s Interrupts by RDTR: %d\n", pThis->szPrf, pThis->uStatIntRDTR));
6065 LogRel(("%s Interrupts by RDMT: %d\n", pThis->szPrf, pThis->uStatIntRXDMT0));
6066 LogRel(("%s Interrupts by TXQE: %d\n", pThis->szPrf, pThis->uStatIntTXQE));
6067 LogRel(("%s TX int delay asked: %d\n", pThis->szPrf, pThis->uStatTxIDE));
6068 LogRel(("%s TX delayed: %d\n", pThis->szPrf, pThis->uStatTxDelayed));
6069 LogRel(("%s TX delay expired: %d\n", pThis->szPrf, pThis->uStatTxDelayExp));
6070 LogRel(("%s TX no report asked: %d\n", pThis->szPrf, pThis->uStatTxNoRS));
6071 LogRel(("%s TX abs timer expd : %d\n", pThis->szPrf, pThis->uStatTAD));
6072 LogRel(("%s TX int timer expd : %d\n", pThis->szPrf, pThis->uStatTID));
6073 LogRel(("%s RX abs timer expd : %d\n", pThis->szPrf, pThis->uStatRAD));
6074 LogRel(("%s RX int timer expd : %d\n", pThis->szPrf, pThis->uStatRID));
6075 LogRel(("%s TX CTX descriptors: %d\n", pThis->szPrf, pThis->uStatDescCtx));
6076 LogRel(("%s TX DAT descriptors: %d\n", pThis->szPrf, pThis->uStatDescDat));
6077 LogRel(("%s TX LEG descriptors: %d\n", pThis->szPrf, pThis->uStatDescLeg));
6078 LogRel(("%s Received frames : %d\n", pThis->szPrf, pThis->uStatRxFrm));
6079 LogRel(("%s Transmitted frames: %d\n", pThis->szPrf, pThis->uStatTxFrm));
6080 LogRel(("%s TX frames up to 1514: %d\n", pThis->szPrf, pThis->uStatTx1514));
6081 LogRel(("%s TX frames up to 2962: %d\n", pThis->szPrf, pThis->uStatTx2962));
6082 LogRel(("%s TX frames up to 4410: %d\n", pThis->szPrf, pThis->uStatTx4410));
6083 LogRel(("%s TX frames up to 5858: %d\n", pThis->szPrf, pThis->uStatTx5858));
6084 LogRel(("%s TX frames up to 7306: %d\n", pThis->szPrf, pThis->uStatTx7306));
6085 LogRel(("%s TX frames up to 8754: %d\n", pThis->szPrf, pThis->uStatTx8754));
6086 LogRel(("%s TX frames up to 16384: %d\n", pThis->szPrf, pThis->uStatTx16384));
6087 LogRel(("%s TX frames up to 32768: %d\n", pThis->szPrf, pThis->uStatTx32768));
6088 LogRel(("%s Larger TX frames : %d\n", pThis->szPrf, pThis->uStatTxLarge));
6089 LogRel(("%s Max TX Delay : %lld\n", pThis->szPrf, pThis->uStatMaxTxDelay));
6090# endif /* E1K_INT_STATS */
6091}
6092
6093/**
6094 * @callback_method_impl{FNPCIIOREGIONMAP}
6095 */
6096static DECLCALLBACK(int) e1kMap(PPCIDEVICE pPciDev, int iRegion, RTGCPHYS GCPhysAddress, RTGCPHYS cb, PCIADDRESSSPACE enmType)
6097{
6098 RT_NOREF(iRegion);
6099 PE1KSTATE pThis = PDMINS_2_DATA(pPciDev->pDevIns, E1KSTATE*);
6100 int rc;
6101
6102 switch (enmType)
6103 {
6104 case PCI_ADDRESS_SPACE_IO:
6105 pThis->IOPortBase = (RTIOPORT)GCPhysAddress;
6106 rc = PDMDevHlpIOPortRegister(pPciDev->pDevIns, pThis->IOPortBase, cb, NULL /*pvUser*/,
6107 e1kIOPortOut, e1kIOPortIn, NULL, NULL, "E1000");
6108 if (pThis->fR0Enabled && RT_SUCCESS(rc))
6109 rc = PDMDevHlpIOPortRegisterR0(pPciDev->pDevIns, pThis->IOPortBase, cb, NIL_RTR0PTR /*pvUser*/,
6110 "e1kIOPortOut", "e1kIOPortIn", NULL, NULL, "E1000");
6111 if (pThis->fRCEnabled && RT_SUCCESS(rc))
6112 rc = PDMDevHlpIOPortRegisterRC(pPciDev->pDevIns, pThis->IOPortBase, cb, NIL_RTRCPTR /*pvUser*/,
6113 "e1kIOPortOut", "e1kIOPortIn", NULL, NULL, "E1000");
6114 break;
6115
6116 case PCI_ADDRESS_SPACE_MEM:
6117 /*
6118 * From the spec:
6119 * For registers that should be accessed as 32-bit double words,
6120 * partial writes (less than a 32-bit double word) is ignored.
6121 * Partial reads return all 32 bits of data regardless of the
6122 * byte enables.
6123 */
6124#ifdef E1K_WITH_PREREG_MMIO
6125 pThis->addrMMReg = GCPhysAddress;
6126 if (GCPhysAddress == NIL_RTGCPHYS)
6127 rc = VINF_SUCCESS;
6128 else
6129 {
6130 Assert(!(GCPhysAddress & 7));
6131 rc = PDMDevHlpMMIOExMap(pPciDev->pDevIns, iRegion, GCPhysAddress);
6132 }
6133#else
6134 pThis->addrMMReg = GCPhysAddress; Assert(!(GCPhysAddress & 7));
6135 rc = PDMDevHlpMMIORegister(pPciDev->pDevIns, GCPhysAddress, cb, NULL /*pvUser*/,
6136 IOMMMIO_FLAGS_READ_DWORD | IOMMMIO_FLAGS_WRITE_ONLY_DWORD,
6137 e1kMMIOWrite, e1kMMIORead, "E1000");
6138 if (pThis->fR0Enabled && RT_SUCCESS(rc))
6139 rc = PDMDevHlpMMIORegisterR0(pPciDev->pDevIns, GCPhysAddress, cb, NIL_RTR0PTR /*pvUser*/,
6140 "e1kMMIOWrite", "e1kMMIORead");
6141 if (pThis->fRCEnabled && RT_SUCCESS(rc))
6142 rc = PDMDevHlpMMIORegisterRC(pPciDev->pDevIns, GCPhysAddress, cb, NIL_RTRCPTR /*pvUser*/,
6143 "e1kMMIOWrite", "e1kMMIORead");
6144#endif
6145 break;
6146
6147 default:
6148 /* We should never get here */
6149 AssertMsgFailed(("Invalid PCI address space param in map callback"));
6150 rc = VERR_INTERNAL_ERROR;
6151 break;
6152 }
6153 return rc;
6154}
6155
6156
6157/* -=-=-=-=- PDMINETWORKDOWN -=-=-=-=- */
6158
6159/**
6160 * Check if the device can receive data now.
6161 * This must be called before the pfnRecieve() method is called.
6162 *
6163 * @returns Number of bytes the device can receive.
6164 * @param pInterface Pointer to the interface structure containing the called function pointer.
6165 * @thread EMT
6166 */
6167static int e1kCanReceive(PE1KSTATE pThis)
6168{
6169#ifndef E1K_WITH_RXD_CACHE
6170 size_t cb;
6171
6172 if (RT_UNLIKELY(e1kCsRxEnter(pThis, VERR_SEM_BUSY) != VINF_SUCCESS))
6173 return VERR_NET_NO_BUFFER_SPACE;
6174
6175 if (RT_UNLIKELY(RDLEN == sizeof(E1KRXDESC)))
6176 {
6177 E1KRXDESC desc;
6178 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), e1kDescAddr(RDBAH, RDBAL, RDH),
6179 &desc, sizeof(desc));
6180 if (desc.status.fDD)
6181 cb = 0;
6182 else
6183 cb = pThis->u16RxBSize;
6184 }
6185 else if (RDH < RDT)
6186 cb = (RDT - RDH) * pThis->u16RxBSize;
6187 else if (RDH > RDT)
6188 cb = (RDLEN/sizeof(E1KRXDESC) - RDH + RDT) * pThis->u16RxBSize;
6189 else
6190 {
6191 cb = 0;
6192 E1kLogRel(("E1000: OUT of RX descriptors!\n"));
6193 }
6194 E1kLog2(("%s e1kCanReceive: at exit RDH=%d RDT=%d RDLEN=%d u16RxBSize=%d cb=%lu\n",
6195 pThis->szPrf, RDH, RDT, RDLEN, pThis->u16RxBSize, cb));
6196
6197 e1kCsRxLeave(pThis);
6198 return cb > 0 ? VINF_SUCCESS : VERR_NET_NO_BUFFER_SPACE;
6199#else /* E1K_WITH_RXD_CACHE */
6200 int rc = VINF_SUCCESS;
6201
6202 if (RT_UNLIKELY(e1kCsRxEnter(pThis, VERR_SEM_BUSY) != VINF_SUCCESS))
6203 return VERR_NET_NO_BUFFER_SPACE;
6204
6205 if (RT_UNLIKELY(RDLEN == sizeof(E1KRXDESC)))
6206 {
6207 E1KRXDESC desc;
6208 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), e1kDescAddr(RDBAH, RDBAL, RDH),
6209 &desc, sizeof(desc));
6210 if (desc.status.fDD)
6211 rc = VERR_NET_NO_BUFFER_SPACE;
6212 }
6213 else if (e1kRxDIsCacheEmpty(pThis) && RDH == RDT)
6214 {
6215 /* Cache is empty, so is the RX ring. */
6216 rc = VERR_NET_NO_BUFFER_SPACE;
6217 }
6218 E1kLog2(("%s e1kCanReceive: at exit in_cache=%d RDH=%d RDT=%d RDLEN=%d"
6219 " u16RxBSize=%d rc=%Rrc\n", pThis->szPrf,
6220 e1kRxDInCache(pThis), RDH, RDT, RDLEN, pThis->u16RxBSize, rc));
6221
6222 e1kCsRxLeave(pThis);
6223 return rc;
6224#endif /* E1K_WITH_RXD_CACHE */
6225}
6226
6227/**
6228 * @interface_method_impl{PDMINETWORKDOWN,pfnWaitReceiveAvail}
6229 */
6230static DECLCALLBACK(int) e1kR3NetworkDown_WaitReceiveAvail(PPDMINETWORKDOWN pInterface, RTMSINTERVAL cMillies)
6231{
6232 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkDown);
6233 int rc = e1kCanReceive(pThis);
6234
6235 if (RT_SUCCESS(rc))
6236 return VINF_SUCCESS;
6237 if (RT_UNLIKELY(cMillies == 0))
6238 return VERR_NET_NO_BUFFER_SPACE;
6239
6240 rc = VERR_INTERRUPTED;
6241 ASMAtomicXchgBool(&pThis->fMaybeOutOfSpace, true);
6242 STAM_PROFILE_START(&pThis->StatRxOverflow, a);
6243 VMSTATE enmVMState;
6244 while (RT_LIKELY( (enmVMState = PDMDevHlpVMState(pThis->CTX_SUFF(pDevIns))) == VMSTATE_RUNNING
6245 || enmVMState == VMSTATE_RUNNING_LS))
6246 {
6247 int rc2 = e1kCanReceive(pThis);
6248 if (RT_SUCCESS(rc2))
6249 {
6250 rc = VINF_SUCCESS;
6251 break;
6252 }
6253 E1kLogRel(("E1000 e1kR3NetworkDown_WaitReceiveAvail: waiting cMillies=%u...\n", cMillies));
6254 E1kLog(("%s e1kR3NetworkDown_WaitReceiveAvail: waiting cMillies=%u...\n", pThis->szPrf, cMillies));
6255 RTSemEventWait(pThis->hEventMoreRxDescAvail, cMillies);
6256 }
6257 STAM_PROFILE_STOP(&pThis->StatRxOverflow, a);
6258 ASMAtomicXchgBool(&pThis->fMaybeOutOfSpace, false);
6259
6260 return rc;
6261}
6262
6263
6264/**
6265 * Matches the packet addresses against Receive Address table. Looks for
6266 * exact matches only.
6267 *
6268 * @returns true if address matches.
6269 * @param pThis Pointer to the state structure.
6270 * @param pvBuf The ethernet packet.
6271 * @param cb Number of bytes available in the packet.
6272 * @thread EMT
6273 */
6274static bool e1kPerfectMatch(PE1KSTATE pThis, const void *pvBuf)
6275{
6276 for (unsigned i = 0; i < RT_ELEMENTS(pThis->aRecAddr.array); i++)
6277 {
6278 E1KRAELEM* ra = pThis->aRecAddr.array + i;
6279
6280 /* Valid address? */
6281 if (ra->ctl & RA_CTL_AV)
6282 {
6283 Assert((ra->ctl & RA_CTL_AS) < 2);
6284 //unsigned char *pAddr = (unsigned char*)pvBuf + sizeof(ra->addr)*(ra->ctl & RA_CTL_AS);
6285 //E1kLog3(("%s Matching %02x:%02x:%02x:%02x:%02x:%02x against %02x:%02x:%02x:%02x:%02x:%02x...\n",
6286 // pThis->szPrf, pAddr[0], pAddr[1], pAddr[2], pAddr[3], pAddr[4], pAddr[5],
6287 // ra->addr[0], ra->addr[1], ra->addr[2], ra->addr[3], ra->addr[4], ra->addr[5]));
6288 /*
6289 * Address Select:
6290 * 00b = Destination address
6291 * 01b = Source address
6292 * 10b = Reserved
6293 * 11b = Reserved
6294 * Since ethernet header is (DA, SA, len) we can use address
6295 * select as index.
6296 */
6297 if (memcmp((char*)pvBuf + sizeof(ra->addr)*(ra->ctl & RA_CTL_AS),
6298 ra->addr, sizeof(ra->addr)) == 0)
6299 return true;
6300 }
6301 }
6302
6303 return false;
6304}
6305
6306/**
6307 * Matches the packet addresses against Multicast Table Array.
6308 *
6309 * @remarks This is imperfect match since it matches not exact address but
6310 * a subset of addresses.
6311 *
6312 * @returns true if address matches.
6313 * @param pThis Pointer to the state structure.
6314 * @param pvBuf The ethernet packet.
6315 * @param cb Number of bytes available in the packet.
6316 * @thread EMT
6317 */
6318static bool e1kImperfectMatch(PE1KSTATE pThis, const void *pvBuf)
6319{
6320 /* Get bits 32..47 of destination address */
6321 uint16_t u16Bit = ((uint16_t*)pvBuf)[2];
6322
6323 unsigned offset = GET_BITS(RCTL, MO);
6324 /*
6325 * offset means:
6326 * 00b = bits 36..47
6327 * 01b = bits 35..46
6328 * 10b = bits 34..45
6329 * 11b = bits 32..43
6330 */
6331 if (offset < 3)
6332 u16Bit = u16Bit >> (4 - offset);
6333 return ASMBitTest(pThis->auMTA, u16Bit & 0xFFF);
6334}
6335
6336/**
6337 * Determines if the packet is to be delivered to upper layer.
6338 *
6339 * The following filters supported:
6340 * - Exact Unicast/Multicast
6341 * - Promiscuous Unicast/Multicast
6342 * - Multicast
6343 * - VLAN
6344 *
6345 * @returns true if packet is intended for this node.
6346 * @param pThis Pointer to the state structure.
6347 * @param pvBuf The ethernet packet.
6348 * @param cb Number of bytes available in the packet.
6349 * @param pStatus Bit field to store status bits.
6350 * @thread EMT
6351 */
6352static bool e1kAddressFilter(PE1KSTATE pThis, const void *pvBuf, size_t cb, E1KRXDST *pStatus)
6353{
6354 Assert(cb > 14);
6355 /* Assume that we fail to pass exact filter. */
6356 pStatus->fPIF = false;
6357 pStatus->fVP = false;
6358 /* Discard oversized packets */
6359 if (cb > E1K_MAX_RX_PKT_SIZE)
6360 {
6361 E1kLog(("%s ERROR: Incoming packet is too big, cb=%d > max=%d\n",
6362 pThis->szPrf, cb, E1K_MAX_RX_PKT_SIZE));
6363 E1K_INC_CNT32(ROC);
6364 return false;
6365 }
6366 else if (!(RCTL & RCTL_LPE) && cb > 1522)
6367 {
6368 /* When long packet reception is disabled packets over 1522 are discarded */
6369 E1kLog(("%s Discarding incoming packet (LPE=0), cb=%d\n",
6370 pThis->szPrf, cb));
6371 E1K_INC_CNT32(ROC);
6372 return false;
6373 }
6374
6375 uint16_t *u16Ptr = (uint16_t*)pvBuf;
6376 /* Compare TPID with VLAN Ether Type */
6377 if (RT_BE2H_U16(u16Ptr[6]) == VET)
6378 {
6379 pStatus->fVP = true;
6380 /* Is VLAN filtering enabled? */
6381 if (RCTL & RCTL_VFE)
6382 {
6383 /* It is 802.1q packet indeed, let's filter by VID */
6384 if (RCTL & RCTL_CFIEN)
6385 {
6386 E1kLog3(("%s VLAN filter: VLAN=%d CFI=%d RCTL_CFI=%d\n", pThis->szPrf,
6387 E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7])),
6388 E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])),
6389 !!(RCTL & RCTL_CFI)));
6390 if (E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])) != !!(RCTL & RCTL_CFI))
6391 {
6392 E1kLog2(("%s Packet filter: CFIs do not match in packet and RCTL (%d!=%d)\n",
6393 pThis->szPrf, E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])), !!(RCTL & RCTL_CFI)));
6394 return false;
6395 }
6396 }
6397 else
6398 E1kLog3(("%s VLAN filter: VLAN=%d\n", pThis->szPrf,
6399 E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))));
6400 if (!ASMBitTest(pThis->auVFTA, E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))))
6401 {
6402 E1kLog2(("%s Packet filter: no VLAN match (id=%d)\n",
6403 pThis->szPrf, E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))));
6404 return false;
6405 }
6406 }
6407 }
6408 /* Broadcast filtering */
6409 if (e1kIsBroadcast(pvBuf) && (RCTL & RCTL_BAM))
6410 return true;
6411 E1kLog2(("%s Packet filter: not a broadcast\n", pThis->szPrf));
6412 if (e1kIsMulticast(pvBuf))
6413 {
6414 /* Is multicast promiscuous enabled? */
6415 if (RCTL & RCTL_MPE)
6416 return true;
6417 E1kLog2(("%s Packet filter: no promiscuous multicast\n", pThis->szPrf));
6418 /* Try perfect matches first */
6419 if (e1kPerfectMatch(pThis, pvBuf))
6420 {
6421 pStatus->fPIF = true;
6422 return true;
6423 }
6424 E1kLog2(("%s Packet filter: no perfect match\n", pThis->szPrf));
6425 if (e1kImperfectMatch(pThis, pvBuf))
6426 return true;
6427 E1kLog2(("%s Packet filter: no imperfect match\n", pThis->szPrf));
6428 }
6429 else {
6430 /* Is unicast promiscuous enabled? */
6431 if (RCTL & RCTL_UPE)
6432 return true;
6433 E1kLog2(("%s Packet filter: no promiscuous unicast\n", pThis->szPrf));
6434 if (e1kPerfectMatch(pThis, pvBuf))
6435 {
6436 pStatus->fPIF = true;
6437 return true;
6438 }
6439 E1kLog2(("%s Packet filter: no perfect match\n", pThis->szPrf));
6440 }
6441 E1kLog2(("%s Packet filter: packet discarded\n", pThis->szPrf));
6442 return false;
6443}
6444
6445/**
6446 * @interface_method_impl{PDMINETWORKDOWN,pfnReceive}
6447 */
6448static DECLCALLBACK(int) e1kR3NetworkDown_Receive(PPDMINETWORKDOWN pInterface, const void *pvBuf, size_t cb)
6449{
6450 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkDown);
6451 int rc = VINF_SUCCESS;
6452
6453 /*
6454 * Drop packets if the VM is not running yet/anymore.
6455 */
6456 VMSTATE enmVMState = PDMDevHlpVMState(STATE_TO_DEVINS(pThis));
6457 if ( enmVMState != VMSTATE_RUNNING
6458 && enmVMState != VMSTATE_RUNNING_LS)
6459 {
6460 E1kLog(("%s Dropping incoming packet as VM is not running.\n", pThis->szPrf));
6461 return VINF_SUCCESS;
6462 }
6463
6464 /* Discard incoming packets in locked state */
6465 if (!(RCTL & RCTL_EN) || pThis->fLocked || !(STATUS & STATUS_LU))
6466 {
6467 E1kLog(("%s Dropping incoming packet as receive operation is disabled.\n", pThis->szPrf));
6468 return VINF_SUCCESS;
6469 }
6470
6471 STAM_PROFILE_ADV_START(&pThis->StatReceive, a);
6472
6473 //if (!e1kCsEnter(pThis, RT_SRC_POS))
6474 // return VERR_PERMISSION_DENIED;
6475
6476 e1kPacketDump(pThis, (const uint8_t*)pvBuf, cb, "<-- Incoming");
6477
6478 /* Update stats */
6479 if (RT_LIKELY(e1kCsEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
6480 {
6481 E1K_INC_CNT32(TPR);
6482 E1K_ADD_CNT64(TORL, TORH, cb < 64? 64 : cb);
6483 e1kCsLeave(pThis);
6484 }
6485 STAM_PROFILE_ADV_START(&pThis->StatReceiveFilter, a);
6486 E1KRXDST status;
6487 RT_ZERO(status);
6488 bool fPassed = e1kAddressFilter(pThis, pvBuf, cb, &status);
6489 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveFilter, a);
6490 if (fPassed)
6491 {
6492 rc = e1kHandleRxPacket(pThis, pvBuf, cb, status);
6493 }
6494 //e1kCsLeave(pThis);
6495 STAM_PROFILE_ADV_STOP(&pThis->StatReceive, a);
6496
6497 return rc;
6498}
6499
6500
6501/* -=-=-=-=- PDMILEDPORTS -=-=-=-=- */
6502
6503/**
6504 * @interface_method_impl{PDMILEDPORTS,pfnQueryStatusLed}
6505 */
6506static DECLCALLBACK(int) e1kR3QueryStatusLed(PPDMILEDPORTS pInterface, unsigned iLUN, PPDMLED *ppLed)
6507{
6508 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, ILeds);
6509 int rc = VERR_PDM_LUN_NOT_FOUND;
6510
6511 if (iLUN == 0)
6512 {
6513 *ppLed = &pThis->led;
6514 rc = VINF_SUCCESS;
6515 }
6516 return rc;
6517}
6518
6519
6520/* -=-=-=-=- PDMINETWORKCONFIG -=-=-=-=- */
6521
6522/**
6523 * @interface_method_impl{PDMINETWORKCONFIG,pfnGetMac}
6524 */
6525static DECLCALLBACK(int) e1kR3GetMac(PPDMINETWORKCONFIG pInterface, PRTMAC pMac)
6526{
6527 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkConfig);
6528 pThis->eeprom.getMac(pMac);
6529 return VINF_SUCCESS;
6530}
6531
6532/**
6533 * @interface_method_impl{PDMINETWORKCONFIG,pfnGetLinkState}
6534 */
6535static DECLCALLBACK(PDMNETWORKLINKSTATE) e1kR3GetLinkState(PPDMINETWORKCONFIG pInterface)
6536{
6537 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkConfig);
6538 if (STATUS & STATUS_LU)
6539 return PDMNETWORKLINKSTATE_UP;
6540 return PDMNETWORKLINKSTATE_DOWN;
6541}
6542
6543/**
6544 * @interface_method_impl{PDMINETWORKCONFIG,pfnSetLinkState}
6545 */
6546static DECLCALLBACK(int) e1kR3SetLinkState(PPDMINETWORKCONFIG pInterface, PDMNETWORKLINKSTATE enmState)
6547{
6548 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkConfig);
6549
6550 E1kLog(("%s e1kR3SetLinkState: enmState=%d\n", pThis->szPrf, enmState));
6551 switch (enmState)
6552 {
6553 case PDMNETWORKLINKSTATE_UP:
6554 pThis->fCableConnected = true;
6555 /* If link was down, bring it up after a while. */
6556 if (!(STATUS & STATUS_LU))
6557 e1kBringLinkUpDelayed(pThis);
6558 break;
6559 case PDMNETWORKLINKSTATE_DOWN:
6560 pThis->fCableConnected = false;
6561 /* Always set the phy link state to down, regardless of the STATUS_LU bit.
6562 * We might have to set the link state before the driver initializes us. */
6563 Phy::setLinkStatus(&pThis->phy, false);
6564 /* If link was up, bring it down. */
6565 if (STATUS & STATUS_LU)
6566 e1kR3LinkDown(pThis);
6567 break;
6568 case PDMNETWORKLINKSTATE_DOWN_RESUME:
6569 /*
6570 * There is not much sense in bringing down the link if it has not come up yet.
6571 * If it is up though, we bring it down temporarely, then bring it up again.
6572 */
6573 if (STATUS & STATUS_LU)
6574 e1kR3LinkDownTemp(pThis);
6575 break;
6576 default:
6577 ;
6578 }
6579 return VINF_SUCCESS;
6580}
6581
6582
6583/* -=-=-=-=- PDMIBASE -=-=-=-=- */
6584
6585/**
6586 * @interface_method_impl{PDMIBASE,pfnQueryInterface}
6587 */
6588static DECLCALLBACK(void *) e1kR3QueryInterface(struct PDMIBASE *pInterface, const char *pszIID)
6589{
6590 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, IBase);
6591 Assert(&pThis->IBase == pInterface);
6592
6593 PDMIBASE_RETURN_INTERFACE(pszIID, PDMIBASE, &pThis->IBase);
6594 PDMIBASE_RETURN_INTERFACE(pszIID, PDMINETWORKDOWN, &pThis->INetworkDown);
6595 PDMIBASE_RETURN_INTERFACE(pszIID, PDMINETWORKCONFIG, &pThis->INetworkConfig);
6596 PDMIBASE_RETURN_INTERFACE(pszIID, PDMILEDPORTS, &pThis->ILeds);
6597 return NULL;
6598}
6599
6600
6601/* -=-=-=-=- Saved State -=-=-=-=- */
6602
6603/**
6604 * Saves the configuration.
6605 *
6606 * @param pThis The E1K state.
6607 * @param pSSM The handle to the saved state.
6608 */
6609static void e1kSaveConfig(PE1KSTATE pThis, PSSMHANDLE pSSM)
6610{
6611 SSMR3PutMem(pSSM, &pThis->macConfigured, sizeof(pThis->macConfigured));
6612 SSMR3PutU32(pSSM, pThis->eChip);
6613}
6614
6615/**
6616 * @callback_method_impl{FNSSMDEVLIVEEXEC,Save basic configuration.}
6617 */
6618static DECLCALLBACK(int) e1kLiveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uPass)
6619{
6620 RT_NOREF(uPass);
6621 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6622 e1kSaveConfig(pThis, pSSM);
6623 return VINF_SSM_DONT_CALL_AGAIN;
6624}
6625
6626/**
6627 * @callback_method_impl{FNSSMDEVSAVEPREP,Synchronize.}
6628 */
6629static DECLCALLBACK(int) e1kSavePrep(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6630{
6631 RT_NOREF(pSSM);
6632 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6633
6634 int rc = e1kCsEnter(pThis, VERR_SEM_BUSY);
6635 if (RT_UNLIKELY(rc != VINF_SUCCESS))
6636 return rc;
6637 e1kCsLeave(pThis);
6638 return VINF_SUCCESS;
6639#if 0
6640 /* 1) Prevent all threads from modifying the state and memory */
6641 //pThis->fLocked = true;
6642 /* 2) Cancel all timers */
6643#ifdef E1K_TX_DELAY
6644 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTXDTimer));
6645#endif /* E1K_TX_DELAY */
6646#ifdef E1K_USE_TX_TIMERS
6647 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTIDTimer));
6648#ifndef E1K_NO_TAD
6649 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTADTimer));
6650#endif /* E1K_NO_TAD */
6651#endif /* E1K_USE_TX_TIMERS */
6652#ifdef E1K_USE_RX_TIMERS
6653 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRIDTimer));
6654 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRADTimer));
6655#endif /* E1K_USE_RX_TIMERS */
6656 e1kCancelTimer(pThis, pThis->CTX_SUFF(pIntTimer));
6657 /* 3) Did I forget anything? */
6658 E1kLog(("%s Locked\n", pThis->szPrf));
6659 return VINF_SUCCESS;
6660#endif
6661}
6662
6663/**
6664 * @callback_method_impl{FNSSMDEVSAVEEXEC}
6665 */
6666static DECLCALLBACK(int) e1kSaveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6667{
6668 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6669
6670 e1kSaveConfig(pThis, pSSM);
6671 pThis->eeprom.save(pSSM);
6672 e1kDumpState(pThis);
6673 SSMR3PutMem(pSSM, pThis->auRegs, sizeof(pThis->auRegs));
6674 SSMR3PutBool(pSSM, pThis->fIntRaised);
6675 Phy::saveState(pSSM, &pThis->phy);
6676 SSMR3PutU32(pSSM, pThis->uSelectedReg);
6677 SSMR3PutMem(pSSM, pThis->auMTA, sizeof(pThis->auMTA));
6678 SSMR3PutMem(pSSM, &pThis->aRecAddr, sizeof(pThis->aRecAddr));
6679 SSMR3PutMem(pSSM, pThis->auVFTA, sizeof(pThis->auVFTA));
6680 SSMR3PutU64(pSSM, pThis->u64AckedAt);
6681 SSMR3PutU16(pSSM, pThis->u16RxBSize);
6682 //SSMR3PutBool(pSSM, pThis->fDelayInts);
6683 //SSMR3PutBool(pSSM, pThis->fIntMaskUsed);
6684 SSMR3PutU16(pSSM, pThis->u16TxPktLen);
6685/** @todo State wrt to the TSE buffer is incomplete, so little point in
6686 * saving this actually. */
6687 SSMR3PutMem(pSSM, pThis->aTxPacketFallback, pThis->u16TxPktLen);
6688 SSMR3PutBool(pSSM, pThis->fIPcsum);
6689 SSMR3PutBool(pSSM, pThis->fTCPcsum);
6690 SSMR3PutMem(pSSM, &pThis->contextTSE, sizeof(pThis->contextTSE));
6691 SSMR3PutMem(pSSM, &pThis->contextNormal, sizeof(pThis->contextNormal));
6692 SSMR3PutBool(pSSM, pThis->fVTag);
6693 SSMR3PutU16(pSSM, pThis->u16VTagTCI);
6694#ifdef E1K_WITH_TXD_CACHE
6695#if 0
6696 SSMR3PutU8(pSSM, pThis->nTxDFetched);
6697 SSMR3PutMem(pSSM, pThis->aTxDescriptors,
6698 pThis->nTxDFetched * sizeof(pThis->aTxDescriptors[0]));
6699#else
6700 /*
6701 * There is no point in storing TX descriptor cache entries as we can simply
6702 * fetch them again. Moreover, normally the cache is always empty when we
6703 * save the state. Store zero entries for compatibility.
6704 */
6705 SSMR3PutU8(pSSM, 0);
6706#endif
6707#endif /* E1K_WITH_TXD_CACHE */
6708/** @todo GSO requires some more state here. */
6709 E1kLog(("%s State has been saved\n", pThis->szPrf));
6710 return VINF_SUCCESS;
6711}
6712
6713#if 0
6714/**
6715 * @callback_method_impl{FNSSMDEVSAVEDONE}
6716 */
6717static DECLCALLBACK(int) e1kSaveDone(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6718{
6719 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6720
6721 /* If VM is being powered off unlocking will result in assertions in PGM */
6722 if (PDMDevHlpGetVM(pDevIns)->enmVMState == VMSTATE_RUNNING)
6723 pThis->fLocked = false;
6724 else
6725 E1kLog(("%s VM is not running -- remain locked\n", pThis->szPrf));
6726 E1kLog(("%s Unlocked\n", pThis->szPrf));
6727 return VINF_SUCCESS;
6728}
6729#endif
6730
6731/**
6732 * @callback_method_impl{FNSSMDEVLOADPREP,Synchronize.}
6733 */
6734static DECLCALLBACK(int) e1kLoadPrep(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6735{
6736 RT_NOREF(pSSM);
6737 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6738
6739 int rc = e1kCsEnter(pThis, VERR_SEM_BUSY);
6740 if (RT_UNLIKELY(rc != VINF_SUCCESS))
6741 return rc;
6742 e1kCsLeave(pThis);
6743 return VINF_SUCCESS;
6744}
6745
6746/**
6747 * @callback_method_impl{FNSSMDEVLOADEXEC}
6748 */
6749static DECLCALLBACK(int) e1kLoadExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
6750{
6751 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6752 int rc;
6753
6754 if ( uVersion != E1K_SAVEDSTATE_VERSION
6755#ifdef E1K_WITH_TXD_CACHE
6756 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG
6757#endif /* E1K_WITH_TXD_CACHE */
6758 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_41
6759 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_30)
6760 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
6761
6762 if ( uVersion > E1K_SAVEDSTATE_VERSION_VBOX_30
6763 || uPass != SSM_PASS_FINAL)
6764 {
6765 /* config checks */
6766 RTMAC macConfigured;
6767 rc = SSMR3GetMem(pSSM, &macConfigured, sizeof(macConfigured));
6768 AssertRCReturn(rc, rc);
6769 if ( memcmp(&macConfigured, &pThis->macConfigured, sizeof(macConfigured))
6770 && (uPass == 0 || !PDMDevHlpVMTeleportedAndNotFullyResumedYet(pDevIns)) )
6771 LogRel(("%s: The mac address differs: config=%RTmac saved=%RTmac\n", pThis->szPrf, &pThis->macConfigured, &macConfigured));
6772
6773 E1KCHIP eChip;
6774 rc = SSMR3GetU32(pSSM, &eChip);
6775 AssertRCReturn(rc, rc);
6776 if (eChip != pThis->eChip)
6777 return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("The chip type differs: config=%u saved=%u"), pThis->eChip, eChip);
6778 }
6779
6780 if (uPass == SSM_PASS_FINAL)
6781 {
6782 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_30)
6783 {
6784 rc = pThis->eeprom.load(pSSM);
6785 AssertRCReturn(rc, rc);
6786 }
6787 /* the state */
6788 SSMR3GetMem(pSSM, &pThis->auRegs, sizeof(pThis->auRegs));
6789 SSMR3GetBool(pSSM, &pThis->fIntRaised);
6790 /** @todo PHY could be made a separate device with its own versioning */
6791 Phy::loadState(pSSM, &pThis->phy);
6792 SSMR3GetU32(pSSM, &pThis->uSelectedReg);
6793 SSMR3GetMem(pSSM, &pThis->auMTA, sizeof(pThis->auMTA));
6794 SSMR3GetMem(pSSM, &pThis->aRecAddr, sizeof(pThis->aRecAddr));
6795 SSMR3GetMem(pSSM, &pThis->auVFTA, sizeof(pThis->auVFTA));
6796 SSMR3GetU64(pSSM, &pThis->u64AckedAt);
6797 SSMR3GetU16(pSSM, &pThis->u16RxBSize);
6798 //SSMR3GetBool(pSSM, pThis->fDelayInts);
6799 //SSMR3GetBool(pSSM, pThis->fIntMaskUsed);
6800 SSMR3GetU16(pSSM, &pThis->u16TxPktLen);
6801 SSMR3GetMem(pSSM, &pThis->aTxPacketFallback[0], pThis->u16TxPktLen);
6802 SSMR3GetBool(pSSM, &pThis->fIPcsum);
6803 SSMR3GetBool(pSSM, &pThis->fTCPcsum);
6804 SSMR3GetMem(pSSM, &pThis->contextTSE, sizeof(pThis->contextTSE));
6805 rc = SSMR3GetMem(pSSM, &pThis->contextNormal, sizeof(pThis->contextNormal));
6806 AssertRCReturn(rc, rc);
6807 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_41)
6808 {
6809 SSMR3GetBool(pSSM, &pThis->fVTag);
6810 rc = SSMR3GetU16(pSSM, &pThis->u16VTagTCI);
6811 AssertRCReturn(rc, rc);
6812 }
6813 else
6814 {
6815 pThis->fVTag = false;
6816 pThis->u16VTagTCI = 0;
6817 }
6818#ifdef E1K_WITH_TXD_CACHE
6819 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG)
6820 {
6821 rc = SSMR3GetU8(pSSM, &pThis->nTxDFetched);
6822 AssertRCReturn(rc, rc);
6823 if (pThis->nTxDFetched)
6824 SSMR3GetMem(pSSM, pThis->aTxDescriptors,
6825 pThis->nTxDFetched * sizeof(pThis->aTxDescriptors[0]));
6826 }
6827 else
6828 pThis->nTxDFetched = 0;
6829 /*
6830 * @todo: Perhaps we should not store TXD cache as the entries can be
6831 * simply fetched again from guest's memory. Or can't they?
6832 */
6833#endif /* E1K_WITH_TXD_CACHE */
6834#ifdef E1K_WITH_RXD_CACHE
6835 /*
6836 * There is no point in storing the RX descriptor cache in the saved
6837 * state, we just need to make sure it is empty.
6838 */
6839 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
6840#endif /* E1K_WITH_RXD_CACHE */
6841 /* derived state */
6842 e1kSetupGsoCtx(&pThis->GsoCtx, &pThis->contextTSE);
6843
6844 E1kLog(("%s State has been restored\n", pThis->szPrf));
6845 e1kDumpState(pThis);
6846 }
6847 return VINF_SUCCESS;
6848}
6849
6850/**
6851 * @callback_method_impl{FNSSMDEVLOADDONE, Link status adjustments after loading.}
6852 */
6853static DECLCALLBACK(int) e1kLoadDone(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6854{
6855 RT_NOREF(pSSM);
6856 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6857
6858 /* Update promiscuous mode */
6859 if (pThis->pDrvR3)
6860 pThis->pDrvR3->pfnSetPromiscuousMode(pThis->pDrvR3,
6861 !!(RCTL & (RCTL_UPE | RCTL_MPE)));
6862
6863 /*
6864 * Force the link down here, since PDMNETWORKLINKSTATE_DOWN_RESUME is never
6865 * passed to us. We go through all this stuff if the link was up and we
6866 * wasn't teleported.
6867 */
6868 if ( (STATUS & STATUS_LU)
6869 && !PDMDevHlpVMTeleportedAndNotFullyResumedYet(pDevIns)
6870 && pThis->cMsLinkUpDelay)
6871 {
6872 e1kR3LinkDownTemp(pThis);
6873 }
6874 return VINF_SUCCESS;
6875}
6876
6877
6878
6879/* -=-=-=-=- Debug Info + Log Types -=-=-=-=- */
6880
6881/**
6882 * @callback_method_impl{FNRTSTRFORMATTYPE}
6883 */
6884static DECLCALLBACK(size_t) e1kFmtRxDesc(PFNRTSTROUTPUT pfnOutput,
6885 void *pvArgOutput,
6886 const char *pszType,
6887 void const *pvValue,
6888 int cchWidth,
6889 int cchPrecision,
6890 unsigned fFlags,
6891 void *pvUser)
6892{
6893 RT_NOREF(cchWidth, cchPrecision, fFlags, pvUser);
6894 AssertReturn(strcmp(pszType, "e1krxd") == 0, 0);
6895 E1KRXDESC* pDesc = (E1KRXDESC*)pvValue;
6896 if (!pDesc)
6897 return RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "NULL_RXD");
6898
6899 size_t cbPrintf = 0;
6900 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Address=%16LX Length=%04X Csum=%04X\n",
6901 pDesc->u64BufAddr, pDesc->u16Length, pDesc->u16Checksum);
6902 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, " STA: %s %s %s %s %s %s %s ERR: %s %s %s %s SPECIAL: %s VLAN=%03x PRI=%x",
6903 pDesc->status.fPIF ? "PIF" : "pif",
6904 pDesc->status.fIPCS ? "IPCS" : "ipcs",
6905 pDesc->status.fTCPCS ? "TCPCS" : "tcpcs",
6906 pDesc->status.fVP ? "VP" : "vp",
6907 pDesc->status.fIXSM ? "IXSM" : "ixsm",
6908 pDesc->status.fEOP ? "EOP" : "eop",
6909 pDesc->status.fDD ? "DD" : "dd",
6910 pDesc->status.fRXE ? "RXE" : "rxe",
6911 pDesc->status.fIPE ? "IPE" : "ipe",
6912 pDesc->status.fTCPE ? "TCPE" : "tcpe",
6913 pDesc->status.fCE ? "CE" : "ce",
6914 E1K_SPEC_CFI(pDesc->status.u16Special) ? "CFI" :"cfi",
6915 E1K_SPEC_VLAN(pDesc->status.u16Special),
6916 E1K_SPEC_PRI(pDesc->status.u16Special));
6917 return cbPrintf;
6918}
6919
6920/**
6921 * @callback_method_impl{FNRTSTRFORMATTYPE}
6922 */
6923static DECLCALLBACK(size_t) e1kFmtTxDesc(PFNRTSTROUTPUT pfnOutput,
6924 void *pvArgOutput,
6925 const char *pszType,
6926 void const *pvValue,
6927 int cchWidth,
6928 int cchPrecision,
6929 unsigned fFlags,
6930 void *pvUser)
6931{
6932 RT_NOREF(cchWidth, cchPrecision, fFlags, pvUser);
6933 AssertReturn(strcmp(pszType, "e1ktxd") == 0, 0);
6934 E1KTXDESC *pDesc = (E1KTXDESC*)pvValue;
6935 if (!pDesc)
6936 return RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "NULL_TXD");
6937
6938 size_t cbPrintf = 0;
6939 switch (e1kGetDescType(pDesc))
6940 {
6941 case E1K_DTYP_CONTEXT:
6942 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Type=Context\n"
6943 " IPCSS=%02X IPCSO=%02X IPCSE=%04X TUCSS=%02X TUCSO=%02X TUCSE=%04X\n"
6944 " TUCMD:%s%s%s %s %s PAYLEN=%04x HDRLEN=%04x MSS=%04x STA: %s",
6945 pDesc->context.ip.u8CSS, pDesc->context.ip.u8CSO, pDesc->context.ip.u16CSE,
6946 pDesc->context.tu.u8CSS, pDesc->context.tu.u8CSO, pDesc->context.tu.u16CSE,
6947 pDesc->context.dw2.fIDE ? " IDE":"",
6948 pDesc->context.dw2.fRS ? " RS" :"",
6949 pDesc->context.dw2.fTSE ? " TSE":"",
6950 pDesc->context.dw2.fIP ? "IPv4":"IPv6",
6951 pDesc->context.dw2.fTCP ? "TCP":"UDP",
6952 pDesc->context.dw2.u20PAYLEN,
6953 pDesc->context.dw3.u8HDRLEN,
6954 pDesc->context.dw3.u16MSS,
6955 pDesc->context.dw3.fDD?"DD":"");
6956 break;
6957 case E1K_DTYP_DATA:
6958 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Type=Data Address=%16LX DTALEN=%05X\n"
6959 " DCMD:%s%s%s%s%s%s%s STA:%s%s%s POPTS:%s%s SPECIAL:%s VLAN=%03x PRI=%x",
6960 pDesc->data.u64BufAddr,
6961 pDesc->data.cmd.u20DTALEN,
6962 pDesc->data.cmd.fIDE ? " IDE" :"",
6963 pDesc->data.cmd.fVLE ? " VLE" :"",
6964 pDesc->data.cmd.fRPS ? " RPS" :"",
6965 pDesc->data.cmd.fRS ? " RS" :"",
6966 pDesc->data.cmd.fTSE ? " TSE" :"",
6967 pDesc->data.cmd.fIFCS? " IFCS":"",
6968 pDesc->data.cmd.fEOP ? " EOP" :"",
6969 pDesc->data.dw3.fDD ? " DD" :"",
6970 pDesc->data.dw3.fEC ? " EC" :"",
6971 pDesc->data.dw3.fLC ? " LC" :"",
6972 pDesc->data.dw3.fTXSM? " TXSM":"",
6973 pDesc->data.dw3.fIXSM? " IXSM":"",
6974 E1K_SPEC_CFI(pDesc->data.dw3.u16Special) ? "CFI" :"cfi",
6975 E1K_SPEC_VLAN(pDesc->data.dw3.u16Special),
6976 E1K_SPEC_PRI(pDesc->data.dw3.u16Special));
6977 break;
6978 case E1K_DTYP_LEGACY:
6979 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Type=Legacy Address=%16LX DTALEN=%05X\n"
6980 " CMD:%s%s%s%s%s%s%s STA:%s%s%s CSO=%02x CSS=%02x SPECIAL:%s VLAN=%03x PRI=%x",
6981 pDesc->data.u64BufAddr,
6982 pDesc->legacy.cmd.u16Length,
6983 pDesc->legacy.cmd.fIDE ? " IDE" :"",
6984 pDesc->legacy.cmd.fVLE ? " VLE" :"",
6985 pDesc->legacy.cmd.fRPS ? " RPS" :"",
6986 pDesc->legacy.cmd.fRS ? " RS" :"",
6987 pDesc->legacy.cmd.fIC ? " IC" :"",
6988 pDesc->legacy.cmd.fIFCS? " IFCS":"",
6989 pDesc->legacy.cmd.fEOP ? " EOP" :"",
6990 pDesc->legacy.dw3.fDD ? " DD" :"",
6991 pDesc->legacy.dw3.fEC ? " EC" :"",
6992 pDesc->legacy.dw3.fLC ? " LC" :"",
6993 pDesc->legacy.cmd.u8CSO,
6994 pDesc->legacy.dw3.u8CSS,
6995 E1K_SPEC_CFI(pDesc->legacy.dw3.u16Special) ? "CFI" :"cfi",
6996 E1K_SPEC_VLAN(pDesc->legacy.dw3.u16Special),
6997 E1K_SPEC_PRI(pDesc->legacy.dw3.u16Special));
6998 break;
6999 default:
7000 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Invalid Transmit Descriptor");
7001 break;
7002 }
7003
7004 return cbPrintf;
7005}
7006
7007/** Initializes debug helpers (logging format types). */
7008static int e1kInitDebugHelpers(void)
7009{
7010 int rc = VINF_SUCCESS;
7011 static bool s_fHelpersRegistered = false;
7012 if (!s_fHelpersRegistered)
7013 {
7014 s_fHelpersRegistered = true;
7015 rc = RTStrFormatTypeRegister("e1krxd", e1kFmtRxDesc, NULL);
7016 AssertRCReturn(rc, rc);
7017 rc = RTStrFormatTypeRegister("e1ktxd", e1kFmtTxDesc, NULL);
7018 AssertRCReturn(rc, rc);
7019 }
7020 return rc;
7021}
7022
7023/**
7024 * Status info callback.
7025 *
7026 * @param pDevIns The device instance.
7027 * @param pHlp The output helpers.
7028 * @param pszArgs The arguments.
7029 */
7030static DECLCALLBACK(void) e1kInfo(PPDMDEVINS pDevIns, PCDBGFINFOHLP pHlp, const char *pszArgs)
7031{
7032 RT_NOREF(pszArgs);
7033 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7034 unsigned i;
7035 // bool fRcvRing = false;
7036 // bool fXmtRing = false;
7037
7038 /*
7039 * Parse args.
7040 if (pszArgs)
7041 {
7042 fRcvRing = strstr(pszArgs, "verbose") || strstr(pszArgs, "rcv");
7043 fXmtRing = strstr(pszArgs, "verbose") || strstr(pszArgs, "xmt");
7044 }
7045 */
7046
7047 /*
7048 * Show info.
7049 */
7050 pHlp->pfnPrintf(pHlp, "E1000 #%d: port=%RTiop mmio=%RGp mac-cfg=%RTmac %s%s%s\n",
7051 pDevIns->iInstance, pThis->IOPortBase, pThis->addrMMReg,
7052 &pThis->macConfigured, g_aChips[pThis->eChip].pcszName,
7053 pThis->fRCEnabled ? " GC" : "", pThis->fR0Enabled ? " R0" : "");
7054
7055 e1kCsEnter(pThis, VERR_INTERNAL_ERROR); /* Not sure why but PCNet does it */
7056
7057 for (i = 0; i < E1K_NUM_OF_32BIT_REGS; ++i)
7058 pHlp->pfnPrintf(pHlp, "%8.8s = %08x\n", g_aE1kRegMap[i].abbrev, pThis->auRegs[i]);
7059
7060 for (i = 0; i < RT_ELEMENTS(pThis->aRecAddr.array); i++)
7061 {
7062 E1KRAELEM* ra = pThis->aRecAddr.array + i;
7063 if (ra->ctl & RA_CTL_AV)
7064 {
7065 const char *pcszTmp;
7066 switch (ra->ctl & RA_CTL_AS)
7067 {
7068 case 0: pcszTmp = "DST"; break;
7069 case 1: pcszTmp = "SRC"; break;
7070 default: pcszTmp = "reserved";
7071 }
7072 pHlp->pfnPrintf(pHlp, "RA%02d: %s %RTmac\n", i, pcszTmp, ra->addr);
7073 }
7074 }
7075 unsigned cDescs = RDLEN / sizeof(E1KRXDESC);
7076 uint32_t rdh = RDH;
7077 pHlp->pfnPrintf(pHlp, "\n-- Receive Descriptors (%d total) --\n", cDescs);
7078 for (i = 0; i < cDescs; ++i)
7079 {
7080 E1KRXDESC desc;
7081 PDMDevHlpPhysRead(pDevIns, e1kDescAddr(RDBAH, RDBAL, i),
7082 &desc, sizeof(desc));
7083 if (i == rdh)
7084 pHlp->pfnPrintf(pHlp, ">>> ");
7085 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1krxd]\n", e1kDescAddr(RDBAH, RDBAL, i), &desc);
7086 }
7087#ifdef E1K_WITH_RXD_CACHE
7088 pHlp->pfnPrintf(pHlp, "\n-- Receive Descriptors in Cache (at %d (RDH %d)/ fetched %d / max %d) --\n",
7089 pThis->iRxDCurrent, RDH, pThis->nRxDFetched, E1K_RXD_CACHE_SIZE);
7090 if (rdh > pThis->iRxDCurrent)
7091 rdh -= pThis->iRxDCurrent;
7092 else
7093 rdh = cDescs + rdh - pThis->iRxDCurrent;
7094 for (i = 0; i < pThis->nRxDFetched; ++i)
7095 {
7096 if (i == pThis->iRxDCurrent)
7097 pHlp->pfnPrintf(pHlp, ">>> ");
7098 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1krxd]\n",
7099 e1kDescAddr(RDBAH, RDBAL, rdh++ % cDescs),
7100 &pThis->aRxDescriptors[i]);
7101 }
7102#endif /* E1K_WITH_RXD_CACHE */
7103
7104 cDescs = TDLEN / sizeof(E1KTXDESC);
7105 uint32_t tdh = TDH;
7106 pHlp->pfnPrintf(pHlp, "\n-- Transmit Descriptors (%d total) --\n", cDescs);
7107 for (i = 0; i < cDescs; ++i)
7108 {
7109 E1KTXDESC desc;
7110 PDMDevHlpPhysRead(pDevIns, e1kDescAddr(TDBAH, TDBAL, i),
7111 &desc, sizeof(desc));
7112 if (i == tdh)
7113 pHlp->pfnPrintf(pHlp, ">>> ");
7114 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1ktxd]\n", e1kDescAddr(TDBAH, TDBAL, i), &desc);
7115 }
7116#ifdef E1K_WITH_TXD_CACHE
7117 pHlp->pfnPrintf(pHlp, "\n-- Transmit Descriptors in Cache (at %d (TDH %d)/ fetched %d / max %d) --\n",
7118 pThis->iTxDCurrent, TDH, pThis->nTxDFetched, E1K_TXD_CACHE_SIZE);
7119 if (tdh > pThis->iTxDCurrent)
7120 tdh -= pThis->iTxDCurrent;
7121 else
7122 tdh = cDescs + tdh - pThis->iTxDCurrent;
7123 for (i = 0; i < pThis->nTxDFetched; ++i)
7124 {
7125 if (i == pThis->iTxDCurrent)
7126 pHlp->pfnPrintf(pHlp, ">>> ");
7127 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1ktxd]\n",
7128 e1kDescAddr(TDBAH, TDBAL, tdh++ % cDescs),
7129 &pThis->aTxDescriptors[i]);
7130 }
7131#endif /* E1K_WITH_TXD_CACHE */
7132
7133
7134#ifdef E1K_INT_STATS
7135 pHlp->pfnPrintf(pHlp, "Interrupt attempts: %d\n", pThis->uStatIntTry);
7136 pHlp->pfnPrintf(pHlp, "Interrupts raised : %d\n", pThis->uStatInt);
7137 pHlp->pfnPrintf(pHlp, "Interrupts lowered: %d\n", pThis->uStatIntLower);
7138 pHlp->pfnPrintf(pHlp, "Interrupts delayed: %d\n", pThis->uStatIntDly);
7139 pHlp->pfnPrintf(pHlp, "Disabled delayed: %d\n", pThis->uStatDisDly);
7140 pHlp->pfnPrintf(pHlp, "Interrupts skipped: %d\n", pThis->uStatIntSkip);
7141 pHlp->pfnPrintf(pHlp, "Masked interrupts : %d\n", pThis->uStatIntMasked);
7142 pHlp->pfnPrintf(pHlp, "Early interrupts : %d\n", pThis->uStatIntEarly);
7143 pHlp->pfnPrintf(pHlp, "Late interrupts : %d\n", pThis->uStatIntLate);
7144 pHlp->pfnPrintf(pHlp, "Lost interrupts : %d\n", pThis->iStatIntLost);
7145 pHlp->pfnPrintf(pHlp, "Interrupts by RX : %d\n", pThis->uStatIntRx);
7146 pHlp->pfnPrintf(pHlp, "Interrupts by TX : %d\n", pThis->uStatIntTx);
7147 pHlp->pfnPrintf(pHlp, "Interrupts by ICS : %d\n", pThis->uStatIntICS);
7148 pHlp->pfnPrintf(pHlp, "Interrupts by RDTR: %d\n", pThis->uStatIntRDTR);
7149 pHlp->pfnPrintf(pHlp, "Interrupts by RDMT: %d\n", pThis->uStatIntRXDMT0);
7150 pHlp->pfnPrintf(pHlp, "Interrupts by TXQE: %d\n", pThis->uStatIntTXQE);
7151 pHlp->pfnPrintf(pHlp, "TX int delay asked: %d\n", pThis->uStatTxIDE);
7152 pHlp->pfnPrintf(pHlp, "TX delayed: %d\n", pThis->uStatTxDelayed);
7153 pHlp->pfnPrintf(pHlp, "TX delayed expired: %d\n", pThis->uStatTxDelayExp);
7154 pHlp->pfnPrintf(pHlp, "TX no report asked: %d\n", pThis->uStatTxNoRS);
7155 pHlp->pfnPrintf(pHlp, "TX abs timer expd : %d\n", pThis->uStatTAD);
7156 pHlp->pfnPrintf(pHlp, "TX int timer expd : %d\n", pThis->uStatTID);
7157 pHlp->pfnPrintf(pHlp, "RX abs timer expd : %d\n", pThis->uStatRAD);
7158 pHlp->pfnPrintf(pHlp, "RX int timer expd : %d\n", pThis->uStatRID);
7159 pHlp->pfnPrintf(pHlp, "TX CTX descriptors: %d\n", pThis->uStatDescCtx);
7160 pHlp->pfnPrintf(pHlp, "TX DAT descriptors: %d\n", pThis->uStatDescDat);
7161 pHlp->pfnPrintf(pHlp, "TX LEG descriptors: %d\n", pThis->uStatDescLeg);
7162 pHlp->pfnPrintf(pHlp, "Received frames : %d\n", pThis->uStatRxFrm);
7163 pHlp->pfnPrintf(pHlp, "Transmitted frames: %d\n", pThis->uStatTxFrm);
7164 pHlp->pfnPrintf(pHlp, "TX frames up to 1514: %d\n", pThis->uStatTx1514);
7165 pHlp->pfnPrintf(pHlp, "TX frames up to 2962: %d\n", pThis->uStatTx2962);
7166 pHlp->pfnPrintf(pHlp, "TX frames up to 4410: %d\n", pThis->uStatTx4410);
7167 pHlp->pfnPrintf(pHlp, "TX frames up to 5858: %d\n", pThis->uStatTx5858);
7168 pHlp->pfnPrintf(pHlp, "TX frames up to 7306: %d\n", pThis->uStatTx7306);
7169 pHlp->pfnPrintf(pHlp, "TX frames up to 8754: %d\n", pThis->uStatTx8754);
7170 pHlp->pfnPrintf(pHlp, "TX frames up to 16384: %d\n", pThis->uStatTx16384);
7171 pHlp->pfnPrintf(pHlp, "TX frames up to 32768: %d\n", pThis->uStatTx32768);
7172 pHlp->pfnPrintf(pHlp, "Larger TX frames : %d\n", pThis->uStatTxLarge);
7173#endif /* E1K_INT_STATS */
7174
7175 e1kCsLeave(pThis);
7176}
7177
7178
7179
7180/* -=-=-=-=- PDMDEVREG -=-=-=-=- */
7181
7182/**
7183 * Detach notification.
7184 *
7185 * One port on the network card has been disconnected from the network.
7186 *
7187 * @param pDevIns The device instance.
7188 * @param iLUN The logical unit which is being detached.
7189 * @param fFlags Flags, combination of the PDMDEVATT_FLAGS_* \#defines.
7190 */
7191static DECLCALLBACK(void) e1kR3Detach(PPDMDEVINS pDevIns, unsigned iLUN, uint32_t fFlags)
7192{
7193 RT_NOREF(fFlags);
7194 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7195 Log(("%s e1kR3Detach:\n", pThis->szPrf));
7196
7197 AssertLogRelReturnVoid(iLUN == 0);
7198
7199 PDMCritSectEnter(&pThis->cs, VERR_SEM_BUSY);
7200
7201 /** @todo r=pritesh still need to check if i missed
7202 * to clean something in this function
7203 */
7204
7205 /*
7206 * Zero some important members.
7207 */
7208 pThis->pDrvBase = NULL;
7209 pThis->pDrvR3 = NULL;
7210 pThis->pDrvR0 = NIL_RTR0PTR;
7211 pThis->pDrvRC = NIL_RTRCPTR;
7212
7213 PDMCritSectLeave(&pThis->cs);
7214}
7215
7216/**
7217 * Attach the Network attachment.
7218 *
7219 * One port on the network card has been connected to a network.
7220 *
7221 * @returns VBox status code.
7222 * @param pDevIns The device instance.
7223 * @param iLUN The logical unit which is being attached.
7224 * @param fFlags Flags, combination of the PDMDEVATT_FLAGS_* \#defines.
7225 *
7226 * @remarks This code path is not used during construction.
7227 */
7228static DECLCALLBACK(int) e1kR3Attach(PPDMDEVINS pDevIns, unsigned iLUN, uint32_t fFlags)
7229{
7230 RT_NOREF(fFlags);
7231 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7232 LogFlow(("%s e1kR3Attach:\n", pThis->szPrf));
7233
7234 AssertLogRelReturn(iLUN == 0, VERR_PDM_NO_SUCH_LUN);
7235
7236 PDMCritSectEnter(&pThis->cs, VERR_SEM_BUSY);
7237
7238 /*
7239 * Attach the driver.
7240 */
7241 int rc = PDMDevHlpDriverAttach(pDevIns, 0, &pThis->IBase, &pThis->pDrvBase, "Network Port");
7242 if (RT_SUCCESS(rc))
7243 {
7244 if (rc == VINF_NAT_DNS)
7245 {
7246#ifdef RT_OS_LINUX
7247 PDMDevHlpVMSetRuntimeError(pDevIns, 0 /*fFlags*/, "NoDNSforNAT",
7248 N_("A Domain Name Server (DNS) for NAT networking could not be determined. Please check your /etc/resolv.conf for <tt>nameserver</tt> entries. Either add one manually (<i>man resolv.conf</i>) or ensure that your host is correctly connected to an ISP. If you ignore this warning the guest will not be able to perform nameserver lookups and it will probably observe delays if trying so"));
7249#else
7250 PDMDevHlpVMSetRuntimeError(pDevIns, 0 /*fFlags*/, "NoDNSforNAT",
7251 N_("A Domain Name Server (DNS) for NAT networking could not be determined. Ensure that your host is correctly connected to an ISP. If you ignore this warning the guest will not be able to perform nameserver lookups and it will probably observe delays if trying so"));
7252#endif
7253 }
7254 pThis->pDrvR3 = PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMINETWORKUP);
7255 AssertMsgStmt(pThis->pDrvR3, ("Failed to obtain the PDMINETWORKUP interface!\n"),
7256 rc = VERR_PDM_MISSING_INTERFACE_BELOW);
7257 if (RT_SUCCESS(rc))
7258 {
7259 PPDMIBASER0 pBaseR0 = PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMIBASER0);
7260 pThis->pDrvR0 = pBaseR0 ? pBaseR0->pfnQueryInterface(pBaseR0, PDMINETWORKUP_IID) : NIL_RTR0PTR;
7261
7262 PPDMIBASERC pBaseRC = PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMIBASERC);
7263 pThis->pDrvRC = pBaseRC ? pBaseRC->pfnQueryInterface(pBaseRC, PDMINETWORKUP_IID) : NIL_RTR0PTR;
7264 }
7265 }
7266 else if ( rc == VERR_PDM_NO_ATTACHED_DRIVER
7267 || rc == VERR_PDM_CFG_MISSING_DRIVER_NAME)
7268 {
7269 /* This should never happen because this function is not called
7270 * if there is no driver to attach! */
7271 Log(("%s No attached driver!\n", pThis->szPrf));
7272 }
7273
7274 /*
7275 * Temporary set the link down if it was up so that the guest
7276 * will know that we have change the configuration of the
7277 * network card
7278 */
7279 if ((STATUS & STATUS_LU) && RT_SUCCESS(rc))
7280 e1kR3LinkDownTemp(pThis);
7281
7282 PDMCritSectLeave(&pThis->cs);
7283 return rc;
7284
7285}
7286
7287/**
7288 * @copydoc FNPDMDEVPOWEROFF
7289 */
7290static DECLCALLBACK(void) e1kR3PowerOff(PPDMDEVINS pDevIns)
7291{
7292 /* Poke thread waiting for buffer space. */
7293 e1kWakeupReceive(pDevIns);
7294}
7295
7296/**
7297 * @copydoc FNPDMDEVRESET
7298 */
7299static DECLCALLBACK(void) e1kR3Reset(PPDMDEVINS pDevIns)
7300{
7301 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7302#ifdef E1K_TX_DELAY
7303 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTXDTimer));
7304#endif /* E1K_TX_DELAY */
7305 e1kCancelTimer(pThis, pThis->CTX_SUFF(pIntTimer));
7306 e1kCancelTimer(pThis, pThis->CTX_SUFF(pLUTimer));
7307 e1kXmitFreeBuf(pThis);
7308 pThis->u16TxPktLen = 0;
7309 pThis->fIPcsum = false;
7310 pThis->fTCPcsum = false;
7311 pThis->fIntMaskUsed = false;
7312 pThis->fDelayInts = false;
7313 pThis->fLocked = false;
7314 pThis->u64AckedAt = 0;
7315 e1kHardReset(pThis);
7316}
7317
7318/**
7319 * @copydoc FNPDMDEVSUSPEND
7320 */
7321static DECLCALLBACK(void) e1kR3Suspend(PPDMDEVINS pDevIns)
7322{
7323 /* Poke thread waiting for buffer space. */
7324 e1kWakeupReceive(pDevIns);
7325}
7326
7327/**
7328 * Device relocation callback.
7329 *
7330 * When this callback is called the device instance data, and if the
7331 * device have a GC component, is being relocated, or/and the selectors
7332 * have been changed. The device must use the chance to perform the
7333 * necessary pointer relocations and data updates.
7334 *
7335 * Before the GC code is executed the first time, this function will be
7336 * called with a 0 delta so GC pointer calculations can be one in one place.
7337 *
7338 * @param pDevIns Pointer to the device instance.
7339 * @param offDelta The relocation delta relative to the old location.
7340 *
7341 * @remark A relocation CANNOT fail.
7342 */
7343static DECLCALLBACK(void) e1kR3Relocate(PPDMDEVINS pDevIns, RTGCINTPTR offDelta)
7344{
7345 RT_NOREF(offDelta);
7346 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7347 pThis->pDevInsRC = PDMDEVINS_2_RCPTR(pDevIns);
7348 pThis->pTxQueueRC = PDMQueueRCPtr(pThis->pTxQueueR3);
7349 pThis->pCanRxQueueRC = PDMQueueRCPtr(pThis->pCanRxQueueR3);
7350#ifdef E1K_USE_RX_TIMERS
7351 pThis->pRIDTimerRC = TMTimerRCPtr(pThis->pRIDTimerR3);
7352 pThis->pRADTimerRC = TMTimerRCPtr(pThis->pRADTimerR3);
7353#endif /* E1K_USE_RX_TIMERS */
7354#ifdef E1K_USE_TX_TIMERS
7355 pThis->pTIDTimerRC = TMTimerRCPtr(pThis->pTIDTimerR3);
7356# ifndef E1K_NO_TAD
7357 pThis->pTADTimerRC = TMTimerRCPtr(pThis->pTADTimerR3);
7358# endif /* E1K_NO_TAD */
7359#endif /* E1K_USE_TX_TIMERS */
7360#ifdef E1K_TX_DELAY
7361 pThis->pTXDTimerRC = TMTimerRCPtr(pThis->pTXDTimerR3);
7362#endif /* E1K_TX_DELAY */
7363 pThis->pIntTimerRC = TMTimerRCPtr(pThis->pIntTimerR3);
7364 pThis->pLUTimerRC = TMTimerRCPtr(pThis->pLUTimerR3);
7365}
7366
7367/**
7368 * Destruct a device instance.
7369 *
7370 * We need to free non-VM resources only.
7371 *
7372 * @returns VBox status code.
7373 * @param pDevIns The device instance data.
7374 * @thread EMT
7375 */
7376static DECLCALLBACK(int) e1kR3Destruct(PPDMDEVINS pDevIns)
7377{
7378 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7379 PDMDEV_CHECK_VERSIONS_RETURN_QUIET(pDevIns);
7380
7381 e1kDumpState(pThis);
7382 E1kLog(("%s Destroying instance\n", pThis->szPrf));
7383 if (PDMCritSectIsInitialized(&pThis->cs))
7384 {
7385 if (pThis->hEventMoreRxDescAvail != NIL_RTSEMEVENT)
7386 {
7387 RTSemEventSignal(pThis->hEventMoreRxDescAvail);
7388 RTSemEventDestroy(pThis->hEventMoreRxDescAvail);
7389 pThis->hEventMoreRxDescAvail = NIL_RTSEMEVENT;
7390 }
7391#ifdef E1K_WITH_TX_CS
7392 PDMR3CritSectDelete(&pThis->csTx);
7393#endif /* E1K_WITH_TX_CS */
7394 PDMR3CritSectDelete(&pThis->csRx);
7395 PDMR3CritSectDelete(&pThis->cs);
7396 }
7397 return VINF_SUCCESS;
7398}
7399
7400
7401/**
7402 * Set PCI configuration space registers.
7403 *
7404 * @param pci Reference to PCI device structure.
7405 * @thread EMT
7406 */
7407static DECLCALLBACK(void) e1kConfigurePciDev(PPCIDEVICE pPciDev, E1KCHIP eChip)
7408{
7409 Assert(eChip < RT_ELEMENTS(g_aChips));
7410 /* Configure PCI Device, assume 32-bit mode ******************************/
7411 PCIDevSetVendorId(pPciDev, g_aChips[eChip].uPCIVendorId);
7412 PCIDevSetDeviceId(pPciDev, g_aChips[eChip].uPCIDeviceId);
7413 PCIDevSetWord( pPciDev, VBOX_PCI_SUBSYSTEM_VENDOR_ID, g_aChips[eChip].uPCISubsystemVendorId);
7414 PCIDevSetWord( pPciDev, VBOX_PCI_SUBSYSTEM_ID, g_aChips[eChip].uPCISubsystemId);
7415
7416 PCIDevSetWord( pPciDev, VBOX_PCI_COMMAND, 0x0000);
7417 /* DEVSEL Timing (medium device), 66 MHz Capable, New capabilities */
7418 PCIDevSetWord( pPciDev, VBOX_PCI_STATUS,
7419 VBOX_PCI_STATUS_DEVSEL_MEDIUM | VBOX_PCI_STATUS_CAP_LIST | VBOX_PCI_STATUS_66MHZ);
7420 /* Stepping A2 */
7421 PCIDevSetByte( pPciDev, VBOX_PCI_REVISION_ID, 0x02);
7422 /* Ethernet adapter */
7423 PCIDevSetByte( pPciDev, VBOX_PCI_CLASS_PROG, 0x00);
7424 PCIDevSetWord( pPciDev, VBOX_PCI_CLASS_DEVICE, 0x0200);
7425 /* normal single function Ethernet controller */
7426 PCIDevSetByte( pPciDev, VBOX_PCI_HEADER_TYPE, 0x00);
7427 /* Memory Register Base Address */
7428 PCIDevSetDWord(pPciDev, VBOX_PCI_BASE_ADDRESS_0, 0x00000000);
7429 /* Memory Flash Base Address */
7430 PCIDevSetDWord(pPciDev, VBOX_PCI_BASE_ADDRESS_1, 0x00000000);
7431 /* IO Register Base Address */
7432 PCIDevSetDWord(pPciDev, VBOX_PCI_BASE_ADDRESS_2, 0x00000001);
7433 /* Expansion ROM Base Address */
7434 PCIDevSetDWord(pPciDev, VBOX_PCI_ROM_ADDRESS, 0x00000000);
7435 /* Capabilities Pointer */
7436 PCIDevSetByte( pPciDev, VBOX_PCI_CAPABILITY_LIST, 0xDC);
7437 /* Interrupt Pin: INTA# */
7438 PCIDevSetByte( pPciDev, VBOX_PCI_INTERRUPT_PIN, 0x01);
7439 /* Max_Lat/Min_Gnt: very high priority and time slice */
7440 PCIDevSetByte( pPciDev, VBOX_PCI_MIN_GNT, 0xFF);
7441 PCIDevSetByte( pPciDev, VBOX_PCI_MAX_LAT, 0x00);
7442
7443 /* PCI Power Management Registers ****************************************/
7444 /* Capability ID: PCI Power Management Registers */
7445 PCIDevSetByte( pPciDev, 0xDC, VBOX_PCI_CAP_ID_PM);
7446 /* Next Item Pointer: PCI-X */
7447 PCIDevSetByte( pPciDev, 0xDC + 1, 0xE4);
7448 /* Power Management Capabilities: PM disabled, DSI */
7449 PCIDevSetWord( pPciDev, 0xDC + 2,
7450 0x0002 | VBOX_PCI_PM_CAP_DSI);
7451 /* Power Management Control / Status Register: PM disabled */
7452 PCIDevSetWord( pPciDev, 0xDC + 4, 0x0000);
7453 /* PMCSR_BSE Bridge Support Extensions: Not supported */
7454 PCIDevSetByte( pPciDev, 0xDC + 6, 0x00);
7455 /* Data Register: PM disabled, always 0 */
7456 PCIDevSetByte( pPciDev, 0xDC + 7, 0x00);
7457
7458 /* PCI-X Configuration Registers *****************************************/
7459 /* Capability ID: PCI-X Configuration Registers */
7460 PCIDevSetByte( pPciDev, 0xE4, VBOX_PCI_CAP_ID_PCIX);
7461#ifdef E1K_WITH_MSI
7462 PCIDevSetByte( pPciDev, 0xE4 + 1, 0x80);
7463#else
7464 /* Next Item Pointer: None (Message Signalled Interrupts are disabled) */
7465 PCIDevSetByte( pPciDev, 0xE4 + 1, 0x00);
7466#endif
7467 /* PCI-X Command: Enable Relaxed Ordering */
7468 PCIDevSetWord( pPciDev, 0xE4 + 2, VBOX_PCI_X_CMD_ERO);
7469 /* PCI-X Status: 32-bit, 66MHz*/
7470 /** @todo is this value really correct? fff8 doesn't look like actual PCI address */
7471 PCIDevSetDWord(pPciDev, 0xE4 + 4, 0x0040FFF8);
7472}
7473
7474/**
7475 * @interface_method_impl{PDMDEVREG,pfnConstruct}
7476 */
7477static DECLCALLBACK(int) e1kR3Construct(PPDMDEVINS pDevIns, int iInstance, PCFGMNODE pCfg)
7478{
7479 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7480 int rc;
7481 PDMDEV_CHECK_VERSIONS_RETURN(pDevIns);
7482
7483 /*
7484 * Initialize the instance data (state).
7485 * Note! Caller has initialized it to ZERO already.
7486 */
7487 RTStrPrintf(pThis->szPrf, sizeof(pThis->szPrf), "E1000#%d", iInstance);
7488 E1kLog(("%s Constructing new instance sizeof(E1KRXDESC)=%d\n", pThis->szPrf, sizeof(E1KRXDESC)));
7489 pThis->hEventMoreRxDescAvail = NIL_RTSEMEVENT;
7490 pThis->pDevInsR3 = pDevIns;
7491 pThis->pDevInsR0 = PDMDEVINS_2_R0PTR(pDevIns);
7492 pThis->pDevInsRC = PDMDEVINS_2_RCPTR(pDevIns);
7493 pThis->u16TxPktLen = 0;
7494 pThis->fIPcsum = false;
7495 pThis->fTCPcsum = false;
7496 pThis->fIntMaskUsed = false;
7497 pThis->fDelayInts = false;
7498 pThis->fLocked = false;
7499 pThis->u64AckedAt = 0;
7500 pThis->led.u32Magic = PDMLED_MAGIC;
7501 pThis->u32PktNo = 1;
7502
7503 /* Interfaces */
7504 pThis->IBase.pfnQueryInterface = e1kR3QueryInterface;
7505
7506 pThis->INetworkDown.pfnWaitReceiveAvail = e1kR3NetworkDown_WaitReceiveAvail;
7507 pThis->INetworkDown.pfnReceive = e1kR3NetworkDown_Receive;
7508 pThis->INetworkDown.pfnXmitPending = e1kR3NetworkDown_XmitPending;
7509
7510 pThis->ILeds.pfnQueryStatusLed = e1kR3QueryStatusLed;
7511
7512 pThis->INetworkConfig.pfnGetMac = e1kR3GetMac;
7513 pThis->INetworkConfig.pfnGetLinkState = e1kR3GetLinkState;
7514 pThis->INetworkConfig.pfnSetLinkState = e1kR3SetLinkState;
7515
7516 /*
7517 * Internal validations.
7518 */
7519 for (uint32_t iReg = 1; iReg < E1K_NUM_OF_BINARY_SEARCHABLE; iReg++)
7520 AssertLogRelMsgReturn( g_aE1kRegMap[iReg].offset > g_aE1kRegMap[iReg - 1].offset
7521 && g_aE1kRegMap[iReg].offset + g_aE1kRegMap[iReg].size
7522 >= g_aE1kRegMap[iReg - 1].offset + g_aE1kRegMap[iReg - 1].size,
7523 ("%s@%#xLB%#x vs %s@%#xLB%#x\n",
7524 g_aE1kRegMap[iReg].abbrev, g_aE1kRegMap[iReg].offset, g_aE1kRegMap[iReg].size,
7525 g_aE1kRegMap[iReg - 1].abbrev, g_aE1kRegMap[iReg - 1].offset, g_aE1kRegMap[iReg - 1].size),
7526 VERR_INTERNAL_ERROR_4);
7527
7528 /*
7529 * Validate configuration.
7530 */
7531 if (!CFGMR3AreValuesValid(pCfg, "MAC\0" "CableConnected\0" "AdapterType\0"
7532 "LineSpeed\0" "GCEnabled\0" "R0Enabled\0"
7533 "ItrEnabled\0" "ItrRxEnabled\0"
7534 "EthernetCRC\0" "GSOEnabled\0" "LinkUpDelay\0"))
7535 return PDMDEV_SET_ERROR(pDevIns, VERR_PDM_DEVINS_UNKNOWN_CFG_VALUES,
7536 N_("Invalid configuration for E1000 device"));
7537
7538 /** @todo LineSpeed unused! */
7539
7540 pThis->fR0Enabled = true;
7541 pThis->fRCEnabled = true;
7542 pThis->fEthernetCRC = true;
7543 pThis->fGSOEnabled = true;
7544 pThis->fItrEnabled = true;
7545 pThis->fItrRxEnabled = true;
7546
7547 /* Get config params */
7548 rc = CFGMR3QueryBytes(pCfg, "MAC", pThis->macConfigured.au8, sizeof(pThis->macConfigured.au8));
7549 if (RT_FAILURE(rc))
7550 return PDMDEV_SET_ERROR(pDevIns, rc,
7551 N_("Configuration error: Failed to get MAC address"));
7552 rc = CFGMR3QueryBool(pCfg, "CableConnected", &pThis->fCableConnected);
7553 if (RT_FAILURE(rc))
7554 return PDMDEV_SET_ERROR(pDevIns, rc,
7555 N_("Configuration error: Failed to get the value of 'CableConnected'"));
7556 rc = CFGMR3QueryU32(pCfg, "AdapterType", (uint32_t*)&pThis->eChip);
7557 if (RT_FAILURE(rc))
7558 return PDMDEV_SET_ERROR(pDevIns, rc,
7559 N_("Configuration error: Failed to get the value of 'AdapterType'"));
7560 Assert(pThis->eChip <= E1K_CHIP_82545EM);
7561 rc = CFGMR3QueryBoolDef(pCfg, "GCEnabled", &pThis->fRCEnabled, true);
7562 if (RT_FAILURE(rc))
7563 return PDMDEV_SET_ERROR(pDevIns, rc,
7564 N_("Configuration error: Failed to get the value of 'GCEnabled'"));
7565
7566 rc = CFGMR3QueryBoolDef(pCfg, "R0Enabled", &pThis->fR0Enabled, true);
7567 if (RT_FAILURE(rc))
7568 return PDMDEV_SET_ERROR(pDevIns, rc,
7569 N_("Configuration error: Failed to get the value of 'R0Enabled'"));
7570
7571 rc = CFGMR3QueryBoolDef(pCfg, "EthernetCRC", &pThis->fEthernetCRC, true);
7572 if (RT_FAILURE(rc))
7573 return PDMDEV_SET_ERROR(pDevIns, rc,
7574 N_("Configuration error: Failed to get the value of 'EthernetCRC'"));
7575
7576 rc = CFGMR3QueryBoolDef(pCfg, "GSOEnabled", &pThis->fGSOEnabled, true);
7577 if (RT_FAILURE(rc))
7578 return PDMDEV_SET_ERROR(pDevIns, rc,
7579 N_("Configuration error: Failed to get the value of 'GSOEnabled'"));
7580
7581 rc = CFGMR3QueryBoolDef(pCfg, "ItrEnabled", &pThis->fItrEnabled, true);
7582 if (RT_FAILURE(rc))
7583 return PDMDEV_SET_ERROR(pDevIns, rc,
7584 N_("Configuration error: Failed to get the value of 'ItrEnabled'"));
7585
7586 rc = CFGMR3QueryBoolDef(pCfg, "ItrRxEnabled", &pThis->fItrRxEnabled, true);
7587 if (RT_FAILURE(rc))
7588 return PDMDEV_SET_ERROR(pDevIns, rc,
7589 N_("Configuration error: Failed to get the value of 'ItrRxEnabled'"));
7590
7591 rc = CFGMR3QueryU32Def(pCfg, "LinkUpDelay", (uint32_t*)&pThis->cMsLinkUpDelay, 5000); /* ms */
7592 if (RT_FAILURE(rc))
7593 return PDMDEV_SET_ERROR(pDevIns, rc,
7594 N_("Configuration error: Failed to get the value of 'LinkUpDelay'"));
7595 Assert(pThis->cMsLinkUpDelay <= 300000); /* less than 5 minutes */
7596 if (pThis->cMsLinkUpDelay > 5000)
7597 LogRel(("%s WARNING! Link up delay is set to %u seconds!\n", pThis->szPrf, pThis->cMsLinkUpDelay / 1000));
7598 else if (pThis->cMsLinkUpDelay == 0)
7599 LogRel(("%s WARNING! Link up delay is disabled!\n", pThis->szPrf));
7600
7601 LogRel(("%s Chip=%s LinkUpDelay=%ums EthernetCRC=%s GSO=%s Itr=%s ItrRx=%s R0=%s GC=%s\n", pThis->szPrf,
7602 g_aChips[pThis->eChip].pcszName, pThis->cMsLinkUpDelay,
7603 pThis->fEthernetCRC ? "on" : "off",
7604 pThis->fGSOEnabled ? "enabled" : "disabled",
7605 pThis->fItrEnabled ? "enabled" : "disabled",
7606 pThis->fItrRxEnabled ? "enabled" : "disabled",
7607 pThis->fR0Enabled ? "enabled" : "disabled",
7608 pThis->fRCEnabled ? "enabled" : "disabled"));
7609
7610 /* Initialize the EEPROM. */
7611 pThis->eeprom.init(pThis->macConfigured);
7612
7613 /* Initialize internal PHY. */
7614 Phy::init(&pThis->phy, iInstance, pThis->eChip == E1K_CHIP_82543GC ? PHY_EPID_M881000 : PHY_EPID_M881011);
7615 Phy::setLinkStatus(&pThis->phy, pThis->fCableConnected);
7616
7617 /* Initialize critical sections. We do our own locking. */
7618 rc = PDMDevHlpSetDeviceCritSect(pDevIns, PDMDevHlpCritSectGetNop(pDevIns));
7619 AssertRCReturn(rc, rc);
7620
7621 rc = PDMDevHlpCritSectInit(pDevIns, &pThis->cs, RT_SRC_POS, "E1000#%d", iInstance);
7622 if (RT_FAILURE(rc))
7623 return rc;
7624 rc = PDMDevHlpCritSectInit(pDevIns, &pThis->csRx, RT_SRC_POS, "E1000#%dRX", iInstance);
7625 if (RT_FAILURE(rc))
7626 return rc;
7627#ifdef E1K_WITH_TX_CS
7628 rc = PDMDevHlpCritSectInit(pDevIns, &pThis->csTx, RT_SRC_POS, "E1000#%dTX", iInstance);
7629 if (RT_FAILURE(rc))
7630 return rc;
7631#endif /* E1K_WITH_TX_CS */
7632
7633 /* Saved state registration. */
7634 rc = PDMDevHlpSSMRegisterEx(pDevIns, E1K_SAVEDSTATE_VERSION, sizeof(E1KSTATE), NULL,
7635 NULL, e1kLiveExec, NULL,
7636 e1kSavePrep, e1kSaveExec, NULL,
7637 e1kLoadPrep, e1kLoadExec, e1kLoadDone);
7638 if (RT_FAILURE(rc))
7639 return rc;
7640
7641 /* Set PCI config registers and register ourselves with the PCI bus. */
7642 e1kConfigurePciDev(&pThis->pciDevice, pThis->eChip);
7643 rc = PDMDevHlpPCIRegister(pDevIns, &pThis->pciDevice);
7644 if (RT_FAILURE(rc))
7645 return rc;
7646
7647#ifdef E1K_WITH_MSI
7648 PDMMSIREG MsiReg;
7649 RT_ZERO(MsiReg);
7650 MsiReg.cMsiVectors = 1;
7651 MsiReg.iMsiCapOffset = 0x80;
7652 MsiReg.iMsiNextOffset = 0x0;
7653 MsiReg.fMsi64bit = false;
7654 rc = PDMDevHlpPCIRegisterMsi(pDevIns, &MsiReg);
7655 AssertRCReturn(rc, rc);
7656#endif
7657
7658
7659 /* Map our registers to memory space (region 0, see e1kConfigurePCI)*/
7660 rc = PDMDevHlpPCIIORegionRegister(pDevIns, 0, E1K_MM_SIZE, PCI_ADDRESS_SPACE_MEM, e1kMap);
7661 if (RT_FAILURE(rc))
7662 return rc;
7663#ifdef E1K_WITH_PREREG_MMIO
7664 rc = PDMDevHlpMMIOExPreRegister(pDevIns, 0, E1K_MM_SIZE, IOMMMIO_FLAGS_READ_DWORD | IOMMMIO_FLAGS_WRITE_ONLY_DWORD, "E1000",
7665 NULL /*pvUserR3*/, e1kMMIOWrite, e1kMMIORead, NULL /*pfnFillR3*/,
7666 NIL_RTR0PTR /*pvUserR0*/, pThis->fR0Enabled ? "e1kMMIOWrite" : NULL,
7667 pThis->fR0Enabled ? "e1kMMIORead" : NULL, NULL /*pszFillR0*/,
7668 NIL_RTRCPTR /*pvUserRC*/, pThis->fRCEnabled ? "e1kMMIOWrite" : NULL,
7669 pThis->fRCEnabled ? "e1kMMIORead" : NULL, NULL /*pszFillRC*/);
7670 AssertLogRelRCReturn(rc, rc);
7671#endif
7672 /* Map our registers to IO space (region 2, see e1kConfigurePCI) */
7673 rc = PDMDevHlpPCIIORegionRegister(pDevIns, 2, E1K_IOPORT_SIZE, PCI_ADDRESS_SPACE_IO, e1kMap);
7674 if (RT_FAILURE(rc))
7675 return rc;
7676
7677 /* Create transmit queue */
7678 rc = PDMDevHlpQueueCreate(pDevIns, sizeof(PDMQUEUEITEMCORE), 1, 0,
7679 e1kTxQueueConsumer, true, "E1000-Xmit", &pThis->pTxQueueR3);
7680 if (RT_FAILURE(rc))
7681 return rc;
7682 pThis->pTxQueueR0 = PDMQueueR0Ptr(pThis->pTxQueueR3);
7683 pThis->pTxQueueRC = PDMQueueRCPtr(pThis->pTxQueueR3);
7684
7685 /* Create the RX notifier signaller. */
7686 rc = PDMDevHlpQueueCreate(pDevIns, sizeof(PDMQUEUEITEMCORE), 1, 0,
7687 e1kCanRxQueueConsumer, true, "E1000-Rcv", &pThis->pCanRxQueueR3);
7688 if (RT_FAILURE(rc))
7689 return rc;
7690 pThis->pCanRxQueueR0 = PDMQueueR0Ptr(pThis->pCanRxQueueR3);
7691 pThis->pCanRxQueueRC = PDMQueueRCPtr(pThis->pCanRxQueueR3);
7692
7693#ifdef E1K_TX_DELAY
7694 /* Create Transmit Delay Timer */
7695 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kTxDelayTimer, pThis,
7696 TMTIMER_FLAGS_NO_CRIT_SECT,
7697 "E1000 Transmit Delay Timer", &pThis->pTXDTimerR3);
7698 if (RT_FAILURE(rc))
7699 return rc;
7700 pThis->pTXDTimerR0 = TMTimerR0Ptr(pThis->pTXDTimerR3);
7701 pThis->pTXDTimerRC = TMTimerRCPtr(pThis->pTXDTimerR3);
7702 TMR3TimerSetCritSect(pThis->pTXDTimerR3, &pThis->csTx);
7703#endif /* E1K_TX_DELAY */
7704
7705#ifdef E1K_USE_TX_TIMERS
7706 /* Create Transmit Interrupt Delay Timer */
7707 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kTxIntDelayTimer, pThis,
7708 TMTIMER_FLAGS_NO_CRIT_SECT,
7709 "E1000 Transmit Interrupt Delay Timer", &pThis->pTIDTimerR3);
7710 if (RT_FAILURE(rc))
7711 return rc;
7712 pThis->pTIDTimerR0 = TMTimerR0Ptr(pThis->pTIDTimerR3);
7713 pThis->pTIDTimerRC = TMTimerRCPtr(pThis->pTIDTimerR3);
7714
7715# ifndef E1K_NO_TAD
7716 /* Create Transmit Absolute Delay Timer */
7717 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kTxAbsDelayTimer, pThis,
7718 TMTIMER_FLAGS_NO_CRIT_SECT,
7719 "E1000 Transmit Absolute Delay Timer", &pThis->pTADTimerR3);
7720 if (RT_FAILURE(rc))
7721 return rc;
7722 pThis->pTADTimerR0 = TMTimerR0Ptr(pThis->pTADTimerR3);
7723 pThis->pTADTimerRC = TMTimerRCPtr(pThis->pTADTimerR3);
7724# endif /* E1K_NO_TAD */
7725#endif /* E1K_USE_TX_TIMERS */
7726
7727#ifdef E1K_USE_RX_TIMERS
7728 /* Create Receive Interrupt Delay Timer */
7729 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kRxIntDelayTimer, pThis,
7730 TMTIMER_FLAGS_NO_CRIT_SECT,
7731 "E1000 Receive Interrupt Delay Timer", &pThis->pRIDTimerR3);
7732 if (RT_FAILURE(rc))
7733 return rc;
7734 pThis->pRIDTimerR0 = TMTimerR0Ptr(pThis->pRIDTimerR3);
7735 pThis->pRIDTimerRC = TMTimerRCPtr(pThis->pRIDTimerR3);
7736
7737 /* Create Receive Absolute Delay Timer */
7738 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kRxAbsDelayTimer, pThis,
7739 TMTIMER_FLAGS_NO_CRIT_SECT,
7740 "E1000 Receive Absolute Delay Timer", &pThis->pRADTimerR3);
7741 if (RT_FAILURE(rc))
7742 return rc;
7743 pThis->pRADTimerR0 = TMTimerR0Ptr(pThis->pRADTimerR3);
7744 pThis->pRADTimerRC = TMTimerRCPtr(pThis->pRADTimerR3);
7745#endif /* E1K_USE_RX_TIMERS */
7746
7747 /* Create Late Interrupt Timer */
7748 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kLateIntTimer, pThis,
7749 TMTIMER_FLAGS_NO_CRIT_SECT,
7750 "E1000 Late Interrupt Timer", &pThis->pIntTimerR3);
7751 if (RT_FAILURE(rc))
7752 return rc;
7753 pThis->pIntTimerR0 = TMTimerR0Ptr(pThis->pIntTimerR3);
7754 pThis->pIntTimerRC = TMTimerRCPtr(pThis->pIntTimerR3);
7755
7756 /* Create Link Up Timer */
7757 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kLinkUpTimer, pThis,
7758 TMTIMER_FLAGS_NO_CRIT_SECT,
7759 "E1000 Link Up Timer", &pThis->pLUTimerR3);
7760 if (RT_FAILURE(rc))
7761 return rc;
7762 pThis->pLUTimerR0 = TMTimerR0Ptr(pThis->pLUTimerR3);
7763 pThis->pLUTimerRC = TMTimerRCPtr(pThis->pLUTimerR3);
7764
7765 /* Register the info item */
7766 char szTmp[20];
7767 RTStrPrintf(szTmp, sizeof(szTmp), "e1k%d", iInstance);
7768 PDMDevHlpDBGFInfoRegister(pDevIns, szTmp, "E1000 info.", e1kInfo);
7769
7770 /* Status driver */
7771 PPDMIBASE pBase;
7772 rc = PDMDevHlpDriverAttach(pDevIns, PDM_STATUS_LUN, &pThis->IBase, &pBase, "Status Port");
7773 if (RT_FAILURE(rc))
7774 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to attach the status LUN"));
7775 pThis->pLedsConnector = PDMIBASE_QUERY_INTERFACE(pBase, PDMILEDCONNECTORS);
7776
7777 /* Network driver */
7778 rc = PDMDevHlpDriverAttach(pDevIns, 0, &pThis->IBase, &pThis->pDrvBase, "Network Port");
7779 if (RT_SUCCESS(rc))
7780 {
7781 if (rc == VINF_NAT_DNS)
7782 PDMDevHlpVMSetRuntimeError(pDevIns, 0 /*fFlags*/, "NoDNSforNAT",
7783 N_("A Domain Name Server (DNS) for NAT networking could not be determined. Ensure that your host is correctly connected to an ISP. If you ignore this warning the guest will not be able to perform nameserver lookups and it will probably observe delays if trying so"));
7784 pThis->pDrvR3 = PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMINETWORKUP);
7785 AssertMsgReturn(pThis->pDrvR3, ("Failed to obtain the PDMINETWORKUP interface!\n"), VERR_PDM_MISSING_INTERFACE_BELOW);
7786
7787 pThis->pDrvR0 = PDMIBASER0_QUERY_INTERFACE(PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMIBASER0), PDMINETWORKUP);
7788 pThis->pDrvRC = PDMIBASERC_QUERY_INTERFACE(PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMIBASERC), PDMINETWORKUP);
7789 }
7790 else if ( rc == VERR_PDM_NO_ATTACHED_DRIVER
7791 || rc == VERR_PDM_CFG_MISSING_DRIVER_NAME)
7792 {
7793 /* No error! */
7794 E1kLog(("%s This adapter is not attached to any network!\n", pThis->szPrf));
7795 }
7796 else
7797 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to attach the network LUN"));
7798
7799 rc = RTSemEventCreate(&pThis->hEventMoreRxDescAvail);
7800 if (RT_FAILURE(rc))
7801 return rc;
7802
7803 rc = e1kInitDebugHelpers();
7804 if (RT_FAILURE(rc))
7805 return rc;
7806
7807 e1kHardReset(pThis);
7808
7809 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data received", "/Public/Net/E1k%u/BytesReceived", iInstance);
7810 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data transmitted", "/Public/Net/E1k%u/BytesTransmitted", iInstance);
7811
7812 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data received", "/Devices/E1k%d/ReceiveBytes", iInstance);
7813 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data transmitted", "/Devices/E1k%d/TransmitBytes", iInstance);
7814
7815#if defined(VBOX_WITH_STATISTICS)
7816 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMMIOReadRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO reads in RZ", "/Devices/E1k%d/MMIO/ReadRZ", iInstance);
7817 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMMIOReadR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO reads in R3", "/Devices/E1k%d/MMIO/ReadR3", iInstance);
7818 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMMIOWriteRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO writes in RZ", "/Devices/E1k%d/MMIO/WriteRZ", iInstance);
7819 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMMIOWriteR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO writes in R3", "/Devices/E1k%d/MMIO/WriteR3", iInstance);
7820 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatEEPROMRead, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling EEPROM reads", "/Devices/E1k%d/EEPROM/Read", iInstance);
7821 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatEEPROMWrite, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling EEPROM writes", "/Devices/E1k%d/EEPROM/Write", iInstance);
7822 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIOReadRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in RZ", "/Devices/E1k%d/IO/ReadRZ", iInstance);
7823 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIOReadR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in R3", "/Devices/E1k%d/IO/ReadR3", iInstance);
7824 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIOWriteRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in RZ", "/Devices/E1k%d/IO/WriteRZ", iInstance);
7825 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIOWriteR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in R3", "/Devices/E1k%d/IO/WriteR3", iInstance);
7826 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatLateIntTimer, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling late int timer", "/Devices/E1k%d/LateInt/Timer", iInstance);
7827 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatLateInts, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of late interrupts", "/Devices/E1k%d/LateInt/Occured", iInstance);
7828 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIntsRaised, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of raised interrupts", "/Devices/E1k%d/Interrupts/Raised", iInstance);
7829 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIntsPrevented, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of prevented interrupts", "/Devices/E1k%d/Interrupts/Prevented", iInstance);
7830 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceive, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive", "/Devices/E1k%d/Receive/Total", iInstance);
7831 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveCRC, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive checksumming", "/Devices/E1k%d/Receive/CRC", iInstance);
7832 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveFilter, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive filtering", "/Devices/E1k%d/Receive/Filter", iInstance);
7833 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveStore, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive storing", "/Devices/E1k%d/Receive/Store", iInstance);
7834 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatRxOverflow, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_OCCURENCE, "Profiling RX overflows", "/Devices/E1k%d/RxOverflow", iInstance);
7835 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatRxOverflowWakeup, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Nr of RX overflow wakeups", "/Devices/E1k%d/RxOverflowWakeup", iInstance);
7836 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling transmits in RZ", "/Devices/E1k%d/Transmit/TotalRZ", iInstance);
7837 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling transmits in R3", "/Devices/E1k%d/Transmit/TotalR3", iInstance);
7838 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitSendRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling send transmit in RZ", "/Devices/E1k%d/Transmit/SendRZ", iInstance);
7839 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitSendR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling send transmit in R3", "/Devices/E1k%d/Transmit/SendR3", iInstance);
7840
7841 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescCtxNormal, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of normal context descriptors","/Devices/E1k%d/TxDesc/ContexNormal", iInstance);
7842 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescCtxTSE, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TSE context descriptors", "/Devices/E1k%d/TxDesc/ContextTSE", iInstance);
7843 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescData, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TX data descriptors", "/Devices/E1k%d/TxDesc/Data", iInstance);
7844 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescLegacy, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TX legacy descriptors", "/Devices/E1k%d/TxDesc/Legacy", iInstance);
7845 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescTSEData, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TX TSE data descriptors", "/Devices/E1k%d/TxDesc/TSEData", iInstance);
7846 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxPathFallback, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Fallback TSE descriptor path", "/Devices/E1k%d/TxPath/Fallback", iInstance);
7847 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxPathGSO, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "GSO TSE descriptor path", "/Devices/E1k%d/TxPath/GSO", iInstance);
7848 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxPathRegular, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Regular descriptor path", "/Devices/E1k%d/TxPath/Normal", iInstance);
7849 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatPHYAccesses, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of PHY accesses", "/Devices/E1k%d/PHYAccesses", iInstance);
7850 for (unsigned iReg = 0; iReg < E1K_NUM_OF_REGS; iReg++)
7851 {
7852 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->aStatRegReads[iReg], STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
7853 g_aE1kRegMap[iReg].name, "/Devices/E1k%d/Regs/%s-Reads", iInstance, g_aE1kRegMap[iReg].abbrev);
7854 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->aStatRegWrites[iReg], STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
7855 g_aE1kRegMap[iReg].name, "/Devices/E1k%d/Regs/%s-Writes", iInstance, g_aE1kRegMap[iReg].abbrev);
7856 }
7857#endif /* VBOX_WITH_STATISTICS */
7858
7859#ifdef E1K_INT_STATS
7860 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->u64ArmedAt, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "u64ArmedAt", "/Devices/E1k%d/u64ArmedAt", iInstance);
7861 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatMaxTxDelay, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatMaxTxDelay", "/Devices/E1k%d/uStatMaxTxDelay", iInstance);
7862 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatInt, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatInt", "/Devices/E1k%d/uStatInt", iInstance);
7863 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntTry, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntTry", "/Devices/E1k%d/uStatIntTry", iInstance);
7864 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntLower, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntLower", "/Devices/E1k%d/uStatIntLower", iInstance);
7865 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntDly, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntDly", "/Devices/E1k%d/uStatIntDly", iInstance);
7866 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->iStatIntLost, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "iStatIntLost", "/Devices/E1k%d/iStatIntLost", iInstance);
7867 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->iStatIntLostOne, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "iStatIntLostOne", "/Devices/E1k%d/iStatIntLostOne", iInstance);
7868 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatDisDly, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatDisDly", "/Devices/E1k%d/uStatDisDly", iInstance);
7869 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntSkip, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntSkip", "/Devices/E1k%d/uStatIntSkip", iInstance);
7870 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntLate, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntLate", "/Devices/E1k%d/uStatIntLate", iInstance);
7871 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntMasked, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntMasked", "/Devices/E1k%d/uStatIntMasked", iInstance);
7872 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntEarly, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntEarly", "/Devices/E1k%d/uStatIntEarly", iInstance);
7873 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntRx, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntRx", "/Devices/E1k%d/uStatIntRx", iInstance);
7874 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntTx, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntTx", "/Devices/E1k%d/uStatIntTx", iInstance);
7875 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntICS, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntICS", "/Devices/E1k%d/uStatIntICS", iInstance);
7876 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntRDTR, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntRDTR", "/Devices/E1k%d/uStatIntRDTR", iInstance);
7877 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntRXDMT0, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntRXDMT0", "/Devices/E1k%d/uStatIntRXDMT0", iInstance);
7878 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntTXQE, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntTXQE", "/Devices/E1k%d/uStatIntTXQE", iInstance);
7879 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxNoRS, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxNoRS", "/Devices/E1k%d/uStatTxNoRS", iInstance);
7880 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxIDE, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxIDE", "/Devices/E1k%d/uStatTxIDE", iInstance);
7881 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxDelayed, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxDelayed", "/Devices/E1k%d/uStatTxDelayed", iInstance);
7882 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxDelayExp, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxDelayExp", "/Devices/E1k%d/uStatTxDelayExp", iInstance);
7883 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTAD, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTAD", "/Devices/E1k%d/uStatTAD", iInstance);
7884 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTID, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTID", "/Devices/E1k%d/uStatTID", iInstance);
7885 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatRAD, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatRAD", "/Devices/E1k%d/uStatRAD", iInstance);
7886 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatRID, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatRID", "/Devices/E1k%d/uStatRID", iInstance);
7887 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatRxFrm, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatRxFrm", "/Devices/E1k%d/uStatRxFrm", iInstance);
7888 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxFrm, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxFrm", "/Devices/E1k%d/uStatTxFrm", iInstance);
7889 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatDescCtx, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatDescCtx", "/Devices/E1k%d/uStatDescCtx", iInstance);
7890 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatDescDat, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatDescDat", "/Devices/E1k%d/uStatDescDat", iInstance);
7891 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatDescLeg, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatDescLeg", "/Devices/E1k%d/uStatDescLeg", iInstance);
7892 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx1514, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx1514", "/Devices/E1k%d/uStatTx1514", iInstance);
7893 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx2962, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx2962", "/Devices/E1k%d/uStatTx2962", iInstance);
7894 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx4410, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx4410", "/Devices/E1k%d/uStatTx4410", iInstance);
7895 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx5858, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx5858", "/Devices/E1k%d/uStatTx5858", iInstance);
7896 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx7306, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx7306", "/Devices/E1k%d/uStatTx7306", iInstance);
7897 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx8754, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx8754", "/Devices/E1k%d/uStatTx8754", iInstance);
7898 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx16384, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx16384", "/Devices/E1k%d/uStatTx16384", iInstance);
7899 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx32768, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx32768", "/Devices/E1k%d/uStatTx32768", iInstance);
7900 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxLarge, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxLarge", "/Devices/E1k%d/uStatTxLarge", iInstance);
7901#endif /* E1K_INT_STATS */
7902
7903 return VINF_SUCCESS;
7904}
7905
7906/**
7907 * The device registration structure.
7908 */
7909const PDMDEVREG g_DeviceE1000 =
7910{
7911 /* Structure version. PDM_DEVREG_VERSION defines the current version. */
7912 PDM_DEVREG_VERSION,
7913 /* Device name. */
7914 "e1000",
7915 /* Name of guest context module (no path).
7916 * Only evalutated if PDM_DEVREG_FLAGS_RC is set. */
7917 "VBoxDDRC.rc",
7918 /* Name of ring-0 module (no path).
7919 * Only evalutated if PDM_DEVREG_FLAGS_RC is set. */
7920 "VBoxDDR0.r0",
7921 /* The description of the device. The UTF-8 string pointed to shall, like this structure,
7922 * remain unchanged from registration till VM destruction. */
7923 "Intel PRO/1000 MT Desktop Ethernet.\n",
7924
7925 /* Flags, combination of the PDM_DEVREG_FLAGS_* \#defines. */
7926 PDM_DEVREG_FLAGS_DEFAULT_BITS | PDM_DEVREG_FLAGS_RC | PDM_DEVREG_FLAGS_R0,
7927 /* Device class(es), combination of the PDM_DEVREG_CLASS_* \#defines. */
7928 PDM_DEVREG_CLASS_NETWORK,
7929 /* Maximum number of instances (per VM). */
7930 ~0U,
7931 /* Size of the instance data. */
7932 sizeof(E1KSTATE),
7933
7934 /* pfnConstruct */
7935 e1kR3Construct,
7936 /* pfnDestruct */
7937 e1kR3Destruct,
7938 /* pfnRelocate */
7939 e1kR3Relocate,
7940 /* pfnMemSetup */
7941 NULL,
7942 /* pfnPowerOn */
7943 NULL,
7944 /* pfnReset */
7945 e1kR3Reset,
7946 /* pfnSuspend */
7947 e1kR3Suspend,
7948 /* pfnResume */
7949 NULL,
7950 /* pfnAttach */
7951 e1kR3Attach,
7952 /* pfnDeatch */
7953 e1kR3Detach,
7954 /* pfnQueryInterface */
7955 NULL,
7956 /* pfnInitComplete */
7957 NULL,
7958 /* pfnPowerOff */
7959 e1kR3PowerOff,
7960 /* pfnSoftReset */
7961 NULL,
7962
7963 /* u32VersionEnd */
7964 PDM_DEVREG_VERSION
7965};
7966
7967#endif /* IN_RING3 */
7968#endif /* !VBOX_DEVICE_STRUCT_TESTCASE */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette