VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/DevE1000.cpp@ 64807

Last change on this file since 64807 was 64807, checked in by vboxsync, 8 years ago

Dev/E1000: (bugref:8624) Bring link up immediately, interrupt later (500 ms, MacOS fix).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 323.0 KB
Line 
1/* $Id: DevE1000.cpp 64807 2016-12-08 11:27:49Z vboxsync $ */
2/** @file
3 * DevE1000 - Intel 82540EM Ethernet Controller Emulation.
4 *
5 * Implemented in accordance with the specification:
6 *
7 * PCI/PCI-X Family of Gigabit Ethernet Controllers Software Developer's Manual
8 * 82540EP/EM, 82541xx, 82544GC/EI, 82545GM/EM, 82546GB/EB, and 82547xx
9 *
10 * 317453-002 Revision 3.5
11 *
12 * @todo IPv6 checksum offloading support
13 * @todo Flexible Filter / Wakeup (optional?)
14 */
15
16/*
17 * Copyright (C) 2007-2016 Oracle Corporation
18 *
19 * This file is part of VirtualBox Open Source Edition (OSE), as
20 * available from http://www.virtualbox.org. This file is free software;
21 * you can redistribute it and/or modify it under the terms of the GNU
22 * General Public License (GPL) as published by the Free Software
23 * Foundation, in version 2 as it comes in the "COPYING" file of the
24 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
25 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_DEV_E1000
33#include <iprt/crc.h>
34#include <iprt/ctype.h>
35#include <iprt/net.h>
36#include <iprt/semaphore.h>
37#include <iprt/string.h>
38#include <iprt/time.h>
39#include <iprt/uuid.h>
40#include <VBox/vmm/pdmdev.h>
41#include <VBox/vmm/pdmnetifs.h>
42#include <VBox/vmm/pdmnetinline.h>
43#include <VBox/param.h>
44#include "VBoxDD.h"
45
46#include "DevEEPROM.h"
47#include "DevE1000Phy.h"
48
49
50/*********************************************************************************************************************************
51* Defined Constants And Macros *
52*********************************************************************************************************************************/
53/** @name E1000 Build Options
54 * @{ */
55/** @def E1K_INIT_RA0
56 * E1K_INIT_RA0 forces E1000 to set the first entry in Receive Address filter
57 * table to MAC address obtained from CFGM. Most guests read MAC address from
58 * EEPROM and write it to RA[0] explicitly, but Mac OS X seems to depend on it
59 * being already set (see @bugref{4657}).
60 */
61#define E1K_INIT_RA0
62/** @def E1K_LSC_ON_SLU
63 * E1K_LSC_ON_SLU causes E1000 to generate Link Status Change interrupt when
64 * the guest driver brings up the link via STATUS.LU bit. Again the only guest
65 * that requires it is Mac OS X (see @bugref{4657}).
66 */
67#define E1K_LSC_ON_SLU
68/** @def E1K_INIT_LINKUP_DELAY_US
69 * E1K_INIT_LINKUP_DELAY_US prevents the link going up while the driver is still
70 * in init (see @bugref{8624}). The units are microseconds.
71 */
72#define E1K_INIT_LINKUP_DELAY_US (500 * 1000)
73/** @def E1K_IMS_INT_DELAY_NS
74 * E1K_IMS_INT_DELAY_NS prevents interrupt storms in Windows guests on enabling
75 * interrupts (see @bugref{8624}).
76 */
77#define E1K_IMS_INT_DELAY_NS 100
78/** @def E1K_TX_DELAY
79 * E1K_TX_DELAY aims to improve guest-host transfer rate for TCP streams by
80 * preventing packets to be sent immediately. It allows to send several
81 * packets in a batch reducing the number of acknowledgments. Note that it
82 * effectively disables R0 TX path, forcing sending in R3.
83 */
84//#define E1K_TX_DELAY 150
85/** @def E1K_USE_TX_TIMERS
86 * E1K_USE_TX_TIMERS aims to reduce the number of generated TX interrupts if a
87 * guest driver set the delays via the Transmit Interrupt Delay Value (TIDV)
88 * register. Enabling it showed no positive effects on existing guests so it
89 * stays disabled. See sections 3.2.7.1 and 3.4.3.1 in "8254x Family of Gigabit
90 * Ethernet Controllers Software Developer’s Manual" for more detailed
91 * explanation.
92 */
93//#define E1K_USE_TX_TIMERS
94/** @def E1K_NO_TAD
95 * E1K_NO_TAD disables one of two timers enabled by E1K_USE_TX_TIMERS, the
96 * Transmit Absolute Delay time. This timer sets the maximum time interval
97 * during which TX interrupts can be postponed (delayed). It has no effect
98 * if E1K_USE_TX_TIMERS is not defined.
99 */
100//#define E1K_NO_TAD
101/** @def E1K_REL_DEBUG
102 * E1K_REL_DEBUG enables debug logging of l1, l2, l3 in release build.
103 */
104//#define E1K_REL_DEBUG
105/** @def E1K_INT_STATS
106 * E1K_INT_STATS enables collection of internal statistics used for
107 * debugging of delayed interrupts, etc.
108 */
109#define E1K_INT_STATS
110/** @def E1K_WITH_MSI
111 * E1K_WITH_MSI enables rudimentary MSI support. Not implemented.
112 */
113//#define E1K_WITH_MSI
114/** @def E1K_WITH_TX_CS
115 * E1K_WITH_TX_CS protects e1kXmitPending with a critical section.
116 */
117#define E1K_WITH_TX_CS
118/** @def E1K_WITH_TXD_CACHE
119 * E1K_WITH_TXD_CACHE causes E1000 to fetch multiple TX descriptors in a
120 * single physical memory read (or two if it wraps around the end of TX
121 * descriptor ring). It is required for proper functioning of bandwidth
122 * resource control as it allows to compute exact sizes of packets prior
123 * to allocating their buffers (see @bugref{5582}).
124 */
125#define E1K_WITH_TXD_CACHE
126/** @def E1K_WITH_RXD_CACHE
127 * E1K_WITH_RXD_CACHE causes E1000 to fetch multiple RX descriptors in a
128 * single physical memory read (or two if it wraps around the end of RX
129 * descriptor ring). Intel's packet driver for DOS needs this option in
130 * order to work properly (see @bugref{6217}).
131 */
132#define E1K_WITH_RXD_CACHE
133/** @def E1K_WITH_PREREG_MMIO
134 * E1K_WITH_PREREG_MMIO enables a new style MMIO registration and is
135 * currently only done for testing the relateted PDM, IOM and PGM code. */
136//#define E1K_WITH_PREREG_MMIO
137/* @} */
138/* End of Options ************************************************************/
139
140#ifdef E1K_WITH_TXD_CACHE
141/**
142 * E1K_TXD_CACHE_SIZE specifies the maximum number of TX descriptors stored
143 * in the state structure. It limits the amount of descriptors loaded in one
144 * batch read. For example, Linux guest may use up to 20 descriptors per
145 * TSE packet. The largest TSE packet seen (Windows guest) was 45 descriptors.
146 */
147# define E1K_TXD_CACHE_SIZE 64u
148#endif /* E1K_WITH_TXD_CACHE */
149
150#ifdef E1K_WITH_RXD_CACHE
151/**
152 * E1K_RXD_CACHE_SIZE specifies the maximum number of RX descriptors stored
153 * in the state structure. It limits the amount of descriptors loaded in one
154 * batch read. For example, XP guest adds 15 RX descriptors at a time.
155 */
156# define E1K_RXD_CACHE_SIZE 16u
157#endif /* E1K_WITH_RXD_CACHE */
158
159
160/* Little helpers ************************************************************/
161#undef htons
162#undef ntohs
163#undef htonl
164#undef ntohl
165#define htons(x) ((((x) & 0xff00) >> 8) | (((x) & 0x00ff) << 8))
166#define ntohs(x) htons(x)
167#define htonl(x) ASMByteSwapU32(x)
168#define ntohl(x) htonl(x)
169
170#ifndef DEBUG
171# ifdef E1K_REL_DEBUG
172# define DEBUG
173# define E1kLog(a) LogRel(a)
174# define E1kLog2(a) LogRel(a)
175# define E1kLog3(a) LogRel(a)
176# define E1kLogX(x, a) LogRel(a)
177//# define E1kLog3(a) do {} while (0)
178# else
179# define E1kLog(a) do {} while (0)
180# define E1kLog2(a) do {} while (0)
181# define E1kLog3(a) do {} while (0)
182# define E1kLogX(x, a) do {} while (0)
183# endif
184#else
185# define E1kLog(a) Log(a)
186# define E1kLog2(a) Log2(a)
187# define E1kLog3(a) Log3(a)
188# define E1kLogX(x, a) LogIt(x, LOG_GROUP, a)
189//# define E1kLog(a) do {} while (0)
190//# define E1kLog2(a) do {} while (0)
191//# define E1kLog3(a) do {} while (0)
192#endif
193
194#if 0
195# define LOG_ENABLED
196# define E1kLogRel(a) LogRel(a)
197# undef Log6
198# define Log6(a) LogRel(a)
199#else
200# define E1kLogRel(a) do { } while (0)
201#endif
202
203//#undef DEBUG
204
205#define STATE_TO_DEVINS(pThis) (((PE1KSTATE )pThis)->CTX_SUFF(pDevIns))
206#define E1K_RELOCATE(p, o) *(RTHCUINTPTR *)&p += o
207
208#define E1K_INC_CNT32(cnt) \
209do { \
210 if (cnt < UINT32_MAX) \
211 cnt++; \
212} while (0)
213
214#define E1K_ADD_CNT64(cntLo, cntHi, val) \
215do { \
216 uint64_t u64Cnt = RT_MAKE_U64(cntLo, cntHi); \
217 uint64_t tmp = u64Cnt; \
218 u64Cnt += val; \
219 if (tmp > u64Cnt ) \
220 u64Cnt = UINT64_MAX; \
221 cntLo = (uint32_t)u64Cnt; \
222 cntHi = (uint32_t)(u64Cnt >> 32); \
223} while (0)
224
225#ifdef E1K_INT_STATS
226# define E1K_INC_ISTAT_CNT(cnt) do { ++cnt; } while (0)
227#else /* E1K_INT_STATS */
228# define E1K_INC_ISTAT_CNT(cnt) do { } while (0)
229#endif /* E1K_INT_STATS */
230
231
232/*****************************************************************************/
233
234typedef uint32_t E1KCHIP;
235#define E1K_CHIP_82540EM 0
236#define E1K_CHIP_82543GC 1
237#define E1K_CHIP_82545EM 2
238
239#ifdef IN_RING3
240/** Different E1000 chips. */
241static const struct E1kChips
242{
243 uint16_t uPCIVendorId;
244 uint16_t uPCIDeviceId;
245 uint16_t uPCISubsystemVendorId;
246 uint16_t uPCISubsystemId;
247 const char *pcszName;
248} g_aChips[] =
249{
250 /* Vendor Device SSVendor SubSys Name */
251 { 0x8086,
252 /* Temporary code, as MSI-aware driver dislike 0x100E. How to do that right? */
253# ifdef E1K_WITH_MSI
254 0x105E,
255# else
256 0x100E,
257# endif
258 0x8086, 0x001E, "82540EM" }, /* Intel 82540EM-A in Intel PRO/1000 MT Desktop */
259 { 0x8086, 0x1004, 0x8086, 0x1004, "82543GC" }, /* Intel 82543GC in Intel PRO/1000 T Server */
260 { 0x8086, 0x100F, 0x15AD, 0x0750, "82545EM" } /* Intel 82545EM-A in VMWare Network Adapter */
261};
262#endif /* IN_RING3 */
263
264
265/* The size of register area mapped to I/O space */
266#define E1K_IOPORT_SIZE 0x8
267/* The size of memory-mapped register area */
268#define E1K_MM_SIZE 0x20000
269
270#define E1K_MAX_TX_PKT_SIZE 16288
271#define E1K_MAX_RX_PKT_SIZE 16384
272
273/*****************************************************************************/
274
275/** Gets the specfieid bits from the register. */
276#define GET_BITS(reg, bits) ((reg & reg##_##bits##_MASK) >> reg##_##bits##_SHIFT)
277#define GET_BITS_V(val, reg, bits) ((val & reg##_##bits##_MASK) >> reg##_##bits##_SHIFT)
278#define BITS(reg, bits, bitval) (bitval << reg##_##bits##_SHIFT)
279#define SET_BITS(reg, bits, bitval) do { reg = (reg & ~reg##_##bits##_MASK) | (bitval << reg##_##bits##_SHIFT); } while (0)
280#define SET_BITS_V(val, reg, bits, bitval) do { val = (val & ~reg##_##bits##_MASK) | (bitval << reg##_##bits##_SHIFT); } while (0)
281
282#define CTRL_SLU UINT32_C(0x00000040)
283#define CTRL_MDIO UINT32_C(0x00100000)
284#define CTRL_MDC UINT32_C(0x00200000)
285#define CTRL_MDIO_DIR UINT32_C(0x01000000)
286#define CTRL_MDC_DIR UINT32_C(0x02000000)
287#define CTRL_RESET UINT32_C(0x04000000)
288#define CTRL_VME UINT32_C(0x40000000)
289
290#define STATUS_LU UINT32_C(0x00000002)
291#define STATUS_TXOFF UINT32_C(0x00000010)
292
293#define EECD_EE_WIRES UINT32_C(0x0F)
294#define EECD_EE_REQ UINT32_C(0x40)
295#define EECD_EE_GNT UINT32_C(0x80)
296
297#define EERD_START UINT32_C(0x00000001)
298#define EERD_DONE UINT32_C(0x00000010)
299#define EERD_DATA_MASK UINT32_C(0xFFFF0000)
300#define EERD_DATA_SHIFT 16
301#define EERD_ADDR_MASK UINT32_C(0x0000FF00)
302#define EERD_ADDR_SHIFT 8
303
304#define MDIC_DATA_MASK UINT32_C(0x0000FFFF)
305#define MDIC_DATA_SHIFT 0
306#define MDIC_REG_MASK UINT32_C(0x001F0000)
307#define MDIC_REG_SHIFT 16
308#define MDIC_PHY_MASK UINT32_C(0x03E00000)
309#define MDIC_PHY_SHIFT 21
310#define MDIC_OP_WRITE UINT32_C(0x04000000)
311#define MDIC_OP_READ UINT32_C(0x08000000)
312#define MDIC_READY UINT32_C(0x10000000)
313#define MDIC_INT_EN UINT32_C(0x20000000)
314#define MDIC_ERROR UINT32_C(0x40000000)
315
316#define TCTL_EN UINT32_C(0x00000002)
317#define TCTL_PSP UINT32_C(0x00000008)
318
319#define RCTL_EN UINT32_C(0x00000002)
320#define RCTL_UPE UINT32_C(0x00000008)
321#define RCTL_MPE UINT32_C(0x00000010)
322#define RCTL_LPE UINT32_C(0x00000020)
323#define RCTL_LBM_MASK UINT32_C(0x000000C0)
324#define RCTL_LBM_SHIFT 6
325#define RCTL_RDMTS_MASK UINT32_C(0x00000300)
326#define RCTL_RDMTS_SHIFT 8
327#define RCTL_LBM_TCVR UINT32_C(3) /**< PHY or external SerDes loopback. */
328#define RCTL_MO_MASK UINT32_C(0x00003000)
329#define RCTL_MO_SHIFT 12
330#define RCTL_BAM UINT32_C(0x00008000)
331#define RCTL_BSIZE_MASK UINT32_C(0x00030000)
332#define RCTL_BSIZE_SHIFT 16
333#define RCTL_VFE UINT32_C(0x00040000)
334#define RCTL_CFIEN UINT32_C(0x00080000)
335#define RCTL_CFI UINT32_C(0x00100000)
336#define RCTL_BSEX UINT32_C(0x02000000)
337#define RCTL_SECRC UINT32_C(0x04000000)
338
339#define ICR_TXDW UINT32_C(0x00000001)
340#define ICR_TXQE UINT32_C(0x00000002)
341#define ICR_LSC UINT32_C(0x00000004)
342#define ICR_RXDMT0 UINT32_C(0x00000010)
343#define ICR_RXT0 UINT32_C(0x00000080)
344#define ICR_TXD_LOW UINT32_C(0x00008000)
345#define RDTR_FPD UINT32_C(0x80000000)
346
347#define PBA_st ((PBAST*)(pThis->auRegs + PBA_IDX))
348typedef struct
349{
350 unsigned rxa : 7;
351 unsigned rxa_r : 9;
352 unsigned txa : 16;
353} PBAST;
354AssertCompileSize(PBAST, 4);
355
356#define TXDCTL_WTHRESH_MASK 0x003F0000
357#define TXDCTL_WTHRESH_SHIFT 16
358#define TXDCTL_LWTHRESH_MASK 0xFE000000
359#define TXDCTL_LWTHRESH_SHIFT 25
360
361#define RXCSUM_PCSS_MASK UINT32_C(0x000000FF)
362#define RXCSUM_PCSS_SHIFT 0
363
364/** @name Register access macros
365 * @remarks These ASSUME alocal variable @a pThis of type PE1KSTATE.
366 * @{ */
367#define CTRL pThis->auRegs[CTRL_IDX]
368#define STATUS pThis->auRegs[STATUS_IDX]
369#define EECD pThis->auRegs[EECD_IDX]
370#define EERD pThis->auRegs[EERD_IDX]
371#define CTRL_EXT pThis->auRegs[CTRL_EXT_IDX]
372#define FLA pThis->auRegs[FLA_IDX]
373#define MDIC pThis->auRegs[MDIC_IDX]
374#define FCAL pThis->auRegs[FCAL_IDX]
375#define FCAH pThis->auRegs[FCAH_IDX]
376#define FCT pThis->auRegs[FCT_IDX]
377#define VET pThis->auRegs[VET_IDX]
378#define ICR pThis->auRegs[ICR_IDX]
379#define ITR pThis->auRegs[ITR_IDX]
380#define ICS pThis->auRegs[ICS_IDX]
381#define IMS pThis->auRegs[IMS_IDX]
382#define IMC pThis->auRegs[IMC_IDX]
383#define RCTL pThis->auRegs[RCTL_IDX]
384#define FCTTV pThis->auRegs[FCTTV_IDX]
385#define TXCW pThis->auRegs[TXCW_IDX]
386#define RXCW pThis->auRegs[RXCW_IDX]
387#define TCTL pThis->auRegs[TCTL_IDX]
388#define TIPG pThis->auRegs[TIPG_IDX]
389#define AIFS pThis->auRegs[AIFS_IDX]
390#define LEDCTL pThis->auRegs[LEDCTL_IDX]
391#define PBA pThis->auRegs[PBA_IDX]
392#define FCRTL pThis->auRegs[FCRTL_IDX]
393#define FCRTH pThis->auRegs[FCRTH_IDX]
394#define RDFH pThis->auRegs[RDFH_IDX]
395#define RDFT pThis->auRegs[RDFT_IDX]
396#define RDFHS pThis->auRegs[RDFHS_IDX]
397#define RDFTS pThis->auRegs[RDFTS_IDX]
398#define RDFPC pThis->auRegs[RDFPC_IDX]
399#define RDBAL pThis->auRegs[RDBAL_IDX]
400#define RDBAH pThis->auRegs[RDBAH_IDX]
401#define RDLEN pThis->auRegs[RDLEN_IDX]
402#define RDH pThis->auRegs[RDH_IDX]
403#define RDT pThis->auRegs[RDT_IDX]
404#define RDTR pThis->auRegs[RDTR_IDX]
405#define RXDCTL pThis->auRegs[RXDCTL_IDX]
406#define RADV pThis->auRegs[RADV_IDX]
407#define RSRPD pThis->auRegs[RSRPD_IDX]
408#define TXDMAC pThis->auRegs[TXDMAC_IDX]
409#define TDFH pThis->auRegs[TDFH_IDX]
410#define TDFT pThis->auRegs[TDFT_IDX]
411#define TDFHS pThis->auRegs[TDFHS_IDX]
412#define TDFTS pThis->auRegs[TDFTS_IDX]
413#define TDFPC pThis->auRegs[TDFPC_IDX]
414#define TDBAL pThis->auRegs[TDBAL_IDX]
415#define TDBAH pThis->auRegs[TDBAH_IDX]
416#define TDLEN pThis->auRegs[TDLEN_IDX]
417#define TDH pThis->auRegs[TDH_IDX]
418#define TDT pThis->auRegs[TDT_IDX]
419#define TIDV pThis->auRegs[TIDV_IDX]
420#define TXDCTL pThis->auRegs[TXDCTL_IDX]
421#define TADV pThis->auRegs[TADV_IDX]
422#define TSPMT pThis->auRegs[TSPMT_IDX]
423#define CRCERRS pThis->auRegs[CRCERRS_IDX]
424#define ALGNERRC pThis->auRegs[ALGNERRC_IDX]
425#define SYMERRS pThis->auRegs[SYMERRS_IDX]
426#define RXERRC pThis->auRegs[RXERRC_IDX]
427#define MPC pThis->auRegs[MPC_IDX]
428#define SCC pThis->auRegs[SCC_IDX]
429#define ECOL pThis->auRegs[ECOL_IDX]
430#define MCC pThis->auRegs[MCC_IDX]
431#define LATECOL pThis->auRegs[LATECOL_IDX]
432#define COLC pThis->auRegs[COLC_IDX]
433#define DC pThis->auRegs[DC_IDX]
434#define TNCRS pThis->auRegs[TNCRS_IDX]
435/* #define SEC pThis->auRegs[SEC_IDX] Conflict with sys/time.h */
436#define CEXTERR pThis->auRegs[CEXTERR_IDX]
437#define RLEC pThis->auRegs[RLEC_IDX]
438#define XONRXC pThis->auRegs[XONRXC_IDX]
439#define XONTXC pThis->auRegs[XONTXC_IDX]
440#define XOFFRXC pThis->auRegs[XOFFRXC_IDX]
441#define XOFFTXC pThis->auRegs[XOFFTXC_IDX]
442#define FCRUC pThis->auRegs[FCRUC_IDX]
443#define PRC64 pThis->auRegs[PRC64_IDX]
444#define PRC127 pThis->auRegs[PRC127_IDX]
445#define PRC255 pThis->auRegs[PRC255_IDX]
446#define PRC511 pThis->auRegs[PRC511_IDX]
447#define PRC1023 pThis->auRegs[PRC1023_IDX]
448#define PRC1522 pThis->auRegs[PRC1522_IDX]
449#define GPRC pThis->auRegs[GPRC_IDX]
450#define BPRC pThis->auRegs[BPRC_IDX]
451#define MPRC pThis->auRegs[MPRC_IDX]
452#define GPTC pThis->auRegs[GPTC_IDX]
453#define GORCL pThis->auRegs[GORCL_IDX]
454#define GORCH pThis->auRegs[GORCH_IDX]
455#define GOTCL pThis->auRegs[GOTCL_IDX]
456#define GOTCH pThis->auRegs[GOTCH_IDX]
457#define RNBC pThis->auRegs[RNBC_IDX]
458#define RUC pThis->auRegs[RUC_IDX]
459#define RFC pThis->auRegs[RFC_IDX]
460#define ROC pThis->auRegs[ROC_IDX]
461#define RJC pThis->auRegs[RJC_IDX]
462#define MGTPRC pThis->auRegs[MGTPRC_IDX]
463#define MGTPDC pThis->auRegs[MGTPDC_IDX]
464#define MGTPTC pThis->auRegs[MGTPTC_IDX]
465#define TORL pThis->auRegs[TORL_IDX]
466#define TORH pThis->auRegs[TORH_IDX]
467#define TOTL pThis->auRegs[TOTL_IDX]
468#define TOTH pThis->auRegs[TOTH_IDX]
469#define TPR pThis->auRegs[TPR_IDX]
470#define TPT pThis->auRegs[TPT_IDX]
471#define PTC64 pThis->auRegs[PTC64_IDX]
472#define PTC127 pThis->auRegs[PTC127_IDX]
473#define PTC255 pThis->auRegs[PTC255_IDX]
474#define PTC511 pThis->auRegs[PTC511_IDX]
475#define PTC1023 pThis->auRegs[PTC1023_IDX]
476#define PTC1522 pThis->auRegs[PTC1522_IDX]
477#define MPTC pThis->auRegs[MPTC_IDX]
478#define BPTC pThis->auRegs[BPTC_IDX]
479#define TSCTC pThis->auRegs[TSCTC_IDX]
480#define TSCTFC pThis->auRegs[TSCTFC_IDX]
481#define RXCSUM pThis->auRegs[RXCSUM_IDX]
482#define WUC pThis->auRegs[WUC_IDX]
483#define WUFC pThis->auRegs[WUFC_IDX]
484#define WUS pThis->auRegs[WUS_IDX]
485#define MANC pThis->auRegs[MANC_IDX]
486#define IPAV pThis->auRegs[IPAV_IDX]
487#define WUPL pThis->auRegs[WUPL_IDX]
488/** @} */
489
490/**
491 * Indices of memory-mapped registers in register table.
492 */
493typedef enum
494{
495 CTRL_IDX,
496 STATUS_IDX,
497 EECD_IDX,
498 EERD_IDX,
499 CTRL_EXT_IDX,
500 FLA_IDX,
501 MDIC_IDX,
502 FCAL_IDX,
503 FCAH_IDX,
504 FCT_IDX,
505 VET_IDX,
506 ICR_IDX,
507 ITR_IDX,
508 ICS_IDX,
509 IMS_IDX,
510 IMC_IDX,
511 RCTL_IDX,
512 FCTTV_IDX,
513 TXCW_IDX,
514 RXCW_IDX,
515 TCTL_IDX,
516 TIPG_IDX,
517 AIFS_IDX,
518 LEDCTL_IDX,
519 PBA_IDX,
520 FCRTL_IDX,
521 FCRTH_IDX,
522 RDFH_IDX,
523 RDFT_IDX,
524 RDFHS_IDX,
525 RDFTS_IDX,
526 RDFPC_IDX,
527 RDBAL_IDX,
528 RDBAH_IDX,
529 RDLEN_IDX,
530 RDH_IDX,
531 RDT_IDX,
532 RDTR_IDX,
533 RXDCTL_IDX,
534 RADV_IDX,
535 RSRPD_IDX,
536 TXDMAC_IDX,
537 TDFH_IDX,
538 TDFT_IDX,
539 TDFHS_IDX,
540 TDFTS_IDX,
541 TDFPC_IDX,
542 TDBAL_IDX,
543 TDBAH_IDX,
544 TDLEN_IDX,
545 TDH_IDX,
546 TDT_IDX,
547 TIDV_IDX,
548 TXDCTL_IDX,
549 TADV_IDX,
550 TSPMT_IDX,
551 CRCERRS_IDX,
552 ALGNERRC_IDX,
553 SYMERRS_IDX,
554 RXERRC_IDX,
555 MPC_IDX,
556 SCC_IDX,
557 ECOL_IDX,
558 MCC_IDX,
559 LATECOL_IDX,
560 COLC_IDX,
561 DC_IDX,
562 TNCRS_IDX,
563 SEC_IDX,
564 CEXTERR_IDX,
565 RLEC_IDX,
566 XONRXC_IDX,
567 XONTXC_IDX,
568 XOFFRXC_IDX,
569 XOFFTXC_IDX,
570 FCRUC_IDX,
571 PRC64_IDX,
572 PRC127_IDX,
573 PRC255_IDX,
574 PRC511_IDX,
575 PRC1023_IDX,
576 PRC1522_IDX,
577 GPRC_IDX,
578 BPRC_IDX,
579 MPRC_IDX,
580 GPTC_IDX,
581 GORCL_IDX,
582 GORCH_IDX,
583 GOTCL_IDX,
584 GOTCH_IDX,
585 RNBC_IDX,
586 RUC_IDX,
587 RFC_IDX,
588 ROC_IDX,
589 RJC_IDX,
590 MGTPRC_IDX,
591 MGTPDC_IDX,
592 MGTPTC_IDX,
593 TORL_IDX,
594 TORH_IDX,
595 TOTL_IDX,
596 TOTH_IDX,
597 TPR_IDX,
598 TPT_IDX,
599 PTC64_IDX,
600 PTC127_IDX,
601 PTC255_IDX,
602 PTC511_IDX,
603 PTC1023_IDX,
604 PTC1522_IDX,
605 MPTC_IDX,
606 BPTC_IDX,
607 TSCTC_IDX,
608 TSCTFC_IDX,
609 RXCSUM_IDX,
610 WUC_IDX,
611 WUFC_IDX,
612 WUS_IDX,
613 MANC_IDX,
614 IPAV_IDX,
615 WUPL_IDX,
616 MTA_IDX,
617 RA_IDX,
618 VFTA_IDX,
619 IP4AT_IDX,
620 IP6AT_IDX,
621 WUPM_IDX,
622 FFLT_IDX,
623 FFMT_IDX,
624 FFVT_IDX,
625 PBM_IDX,
626 RA_82542_IDX,
627 MTA_82542_IDX,
628 VFTA_82542_IDX,
629 E1K_NUM_OF_REGS
630} E1kRegIndex;
631
632#define E1K_NUM_OF_32BIT_REGS MTA_IDX
633/** The number of registers with strictly increasing offset. */
634#define E1K_NUM_OF_BINARY_SEARCHABLE (WUPL_IDX + 1)
635
636
637/**
638 * Define E1000-specific EEPROM layout.
639 */
640struct E1kEEPROM
641{
642 public:
643 EEPROM93C46 eeprom;
644
645#ifdef IN_RING3
646 /**
647 * Initialize EEPROM content.
648 *
649 * @param macAddr MAC address of E1000.
650 */
651 void init(RTMAC &macAddr)
652 {
653 eeprom.init();
654 memcpy(eeprom.m_au16Data, macAddr.au16, sizeof(macAddr.au16));
655 eeprom.m_au16Data[0x04] = 0xFFFF;
656 /*
657 * bit 3 - full support for power management
658 * bit 10 - full duplex
659 */
660 eeprom.m_au16Data[0x0A] = 0x4408;
661 eeprom.m_au16Data[0x0B] = 0x001E;
662 eeprom.m_au16Data[0x0C] = 0x8086;
663 eeprom.m_au16Data[0x0D] = 0x100E;
664 eeprom.m_au16Data[0x0E] = 0x8086;
665 eeprom.m_au16Data[0x0F] = 0x3040;
666 eeprom.m_au16Data[0x21] = 0x7061;
667 eeprom.m_au16Data[0x22] = 0x280C;
668 eeprom.m_au16Data[0x23] = 0x00C8;
669 eeprom.m_au16Data[0x24] = 0x00C8;
670 eeprom.m_au16Data[0x2F] = 0x0602;
671 updateChecksum();
672 };
673
674 /**
675 * Compute the checksum as required by E1000 and store it
676 * in the last word.
677 */
678 void updateChecksum()
679 {
680 uint16_t u16Checksum = 0;
681
682 for (int i = 0; i < eeprom.SIZE-1; i++)
683 u16Checksum += eeprom.m_au16Data[i];
684 eeprom.m_au16Data[eeprom.SIZE-1] = 0xBABA - u16Checksum;
685 };
686
687 /**
688 * First 6 bytes of EEPROM contain MAC address.
689 *
690 * @returns MAC address of E1000.
691 */
692 void getMac(PRTMAC pMac)
693 {
694 memcpy(pMac->au16, eeprom.m_au16Data, sizeof(pMac->au16));
695 };
696
697 uint32_t read()
698 {
699 return eeprom.read();
700 }
701
702 void write(uint32_t u32Wires)
703 {
704 eeprom.write(u32Wires);
705 }
706
707 bool readWord(uint32_t u32Addr, uint16_t *pu16Value)
708 {
709 return eeprom.readWord(u32Addr, pu16Value);
710 }
711
712 int load(PSSMHANDLE pSSM)
713 {
714 return eeprom.load(pSSM);
715 }
716
717 void save(PSSMHANDLE pSSM)
718 {
719 eeprom.save(pSSM);
720 }
721#endif /* IN_RING3 */
722};
723
724
725#define E1K_SPEC_VLAN(s) (s & 0xFFF)
726#define E1K_SPEC_CFI(s) (!!((s>>12) & 0x1))
727#define E1K_SPEC_PRI(s) ((s>>13) & 0x7)
728
729struct E1kRxDStatus
730{
731 /** @name Descriptor Status field (3.2.3.1)
732 * @{ */
733 unsigned fDD : 1; /**< Descriptor Done. */
734 unsigned fEOP : 1; /**< End of packet. */
735 unsigned fIXSM : 1; /**< Ignore checksum indication. */
736 unsigned fVP : 1; /**< VLAN, matches VET. */
737 unsigned : 1;
738 unsigned fTCPCS : 1; /**< RCP Checksum calculated on the packet. */
739 unsigned fIPCS : 1; /**< IP Checksum calculated on the packet. */
740 unsigned fPIF : 1; /**< Passed in-exact filter */
741 /** @} */
742 /** @name Descriptor Errors field (3.2.3.2)
743 * (Only valid when fEOP and fDD are set.)
744 * @{ */
745 unsigned fCE : 1; /**< CRC or alignment error. */
746 unsigned : 4; /**< Reserved, varies with different models... */
747 unsigned fTCPE : 1; /**< TCP/UDP checksum error. */
748 unsigned fIPE : 1; /**< IP Checksum error. */
749 unsigned fRXE : 1; /**< RX Data error. */
750 /** @} */
751 /** @name Descriptor Special field (3.2.3.3)
752 * @{ */
753 unsigned u16Special : 16; /**< VLAN: Id, Canonical form, Priority. */
754 /** @} */
755};
756typedef struct E1kRxDStatus E1KRXDST;
757
758struct E1kRxDesc_st
759{
760 uint64_t u64BufAddr; /**< Address of data buffer */
761 uint16_t u16Length; /**< Length of data in buffer */
762 uint16_t u16Checksum; /**< Packet checksum */
763 E1KRXDST status;
764};
765typedef struct E1kRxDesc_st E1KRXDESC;
766AssertCompileSize(E1KRXDESC, 16);
767
768#define E1K_DTYP_LEGACY -1
769#define E1K_DTYP_CONTEXT 0
770#define E1K_DTYP_DATA 1
771
772struct E1kTDLegacy
773{
774 uint64_t u64BufAddr; /**< Address of data buffer */
775 struct TDLCmd_st
776 {
777 unsigned u16Length : 16;
778 unsigned u8CSO : 8;
779 /* CMD field : 8 */
780 unsigned fEOP : 1;
781 unsigned fIFCS : 1;
782 unsigned fIC : 1;
783 unsigned fRS : 1;
784 unsigned fRPS : 1;
785 unsigned fDEXT : 1;
786 unsigned fVLE : 1;
787 unsigned fIDE : 1;
788 } cmd;
789 struct TDLDw3_st
790 {
791 /* STA field */
792 unsigned fDD : 1;
793 unsigned fEC : 1;
794 unsigned fLC : 1;
795 unsigned fTURSV : 1;
796 /* RSV field */
797 unsigned u4RSV : 4;
798 /* CSS field */
799 unsigned u8CSS : 8;
800 /* Special field*/
801 unsigned u16Special: 16;
802 } dw3;
803};
804
805/**
806 * TCP/IP Context Transmit Descriptor, section 3.3.6.
807 */
808struct E1kTDContext
809{
810 struct CheckSum_st
811 {
812 /** TSE: Header start. !TSE: Checksum start. */
813 unsigned u8CSS : 8;
814 /** Checksum offset - where to store it. */
815 unsigned u8CSO : 8;
816 /** Checksum ending (inclusive) offset, 0 = end of packet. */
817 unsigned u16CSE : 16;
818 } ip;
819 struct CheckSum_st tu;
820 struct TDCDw2_st
821 {
822 /** TSE: The total number of payload bytes for this context. Sans header. */
823 unsigned u20PAYLEN : 20;
824 /** The descriptor type - E1K_DTYP_CONTEXT (0). */
825 unsigned u4DTYP : 4;
826 /** TUCMD field, 8 bits
827 * @{ */
828 /** TSE: TCP (set) or UDP (clear). */
829 unsigned fTCP : 1;
830 /** TSE: IPv4 (set) or IPv6 (clear) - for finding the payload length field in
831 * the IP header. Does not affect the checksumming.
832 * @remarks 82544GC/EI interprets a cleared field differently. */
833 unsigned fIP : 1;
834 /** TSE: TCP segmentation enable. When clear the context describes */
835 unsigned fTSE : 1;
836 /** Report status (only applies to dw3.fDD for here). */
837 unsigned fRS : 1;
838 /** Reserved, MBZ. */
839 unsigned fRSV1 : 1;
840 /** Descriptor extension, must be set for this descriptor type. */
841 unsigned fDEXT : 1;
842 /** Reserved, MBZ. */
843 unsigned fRSV2 : 1;
844 /** Interrupt delay enable. */
845 unsigned fIDE : 1;
846 /** @} */
847 } dw2;
848 struct TDCDw3_st
849 {
850 /** Descriptor Done. */
851 unsigned fDD : 1;
852 /** Reserved, MBZ. */
853 unsigned u7RSV : 7;
854 /** TSO: The header (prototype) length (Ethernet[, VLAN tag], IP, TCP/UDP. */
855 unsigned u8HDRLEN : 8;
856 /** TSO: Maximum segment size. */
857 unsigned u16MSS : 16;
858 } dw3;
859};
860typedef struct E1kTDContext E1KTXCTX;
861
862/**
863 * TCP/IP Data Transmit Descriptor, section 3.3.7.
864 */
865struct E1kTDData
866{
867 uint64_t u64BufAddr; /**< Address of data buffer */
868 struct TDDCmd_st
869 {
870 /** The total length of data pointed to by this descriptor. */
871 unsigned u20DTALEN : 20;
872 /** The descriptor type - E1K_DTYP_DATA (1). */
873 unsigned u4DTYP : 4;
874 /** @name DCMD field, 8 bits (3.3.7.1).
875 * @{ */
876 /** End of packet. Note TSCTFC update. */
877 unsigned fEOP : 1;
878 /** Insert Ethernet FCS/CRC (requires fEOP to be set). */
879 unsigned fIFCS : 1;
880 /** Use the TSE context when set and the normal when clear. */
881 unsigned fTSE : 1;
882 /** Report status (dw3.STA). */
883 unsigned fRS : 1;
884 /** Reserved. 82544GC/EI defines this report packet set (RPS). */
885 unsigned fRPS : 1;
886 /** Descriptor extension, must be set for this descriptor type. */
887 unsigned fDEXT : 1;
888 /** VLAN enable, requires CTRL.VME, auto enables FCS/CRC.
889 * Insert dw3.SPECIAL after ethernet header. */
890 unsigned fVLE : 1;
891 /** Interrupt delay enable. */
892 unsigned fIDE : 1;
893 /** @} */
894 } cmd;
895 struct TDDDw3_st
896 {
897 /** @name STA field (3.3.7.2)
898 * @{ */
899 unsigned fDD : 1; /**< Descriptor done. */
900 unsigned fEC : 1; /**< Excess collision. */
901 unsigned fLC : 1; /**< Late collision. */
902 /** Reserved, except for the usual oddball (82544GC/EI) where it's called TU. */
903 unsigned fTURSV : 1;
904 /** @} */
905 unsigned u4RSV : 4; /**< Reserved field, MBZ. */
906 /** @name POPTS (Packet Option) field (3.3.7.3)
907 * @{ */
908 unsigned fIXSM : 1; /**< Insert IP checksum. */
909 unsigned fTXSM : 1; /**< Insert TCP/UDP checksum. */
910 unsigned u6RSV : 6; /**< Reserved, MBZ. */
911 /** @} */
912 /** @name SPECIAL field - VLAN tag to be inserted after ethernet header.
913 * Requires fEOP, fVLE and CTRL.VME to be set.
914 * @{ */
915 unsigned u16Special: 16; /**< VLAN: Id, Canonical form, Priority. */
916 /** @} */
917 } dw3;
918};
919typedef struct E1kTDData E1KTXDAT;
920
921union E1kTxDesc
922{
923 struct E1kTDLegacy legacy;
924 struct E1kTDContext context;
925 struct E1kTDData data;
926};
927typedef union E1kTxDesc E1KTXDESC;
928AssertCompileSize(E1KTXDESC, 16);
929
930#define RA_CTL_AS 0x0003
931#define RA_CTL_AV 0x8000
932
933union E1kRecAddr
934{
935 uint32_t au32[32];
936 struct RAArray
937 {
938 uint8_t addr[6];
939 uint16_t ctl;
940 } array[16];
941};
942typedef struct E1kRecAddr::RAArray E1KRAELEM;
943typedef union E1kRecAddr E1KRA;
944AssertCompileSize(E1KRA, 8*16);
945
946#define E1K_IP_RF UINT16_C(0x8000) /**< reserved fragment flag */
947#define E1K_IP_DF UINT16_C(0x4000) /**< dont fragment flag */
948#define E1K_IP_MF UINT16_C(0x2000) /**< more fragments flag */
949#define E1K_IP_OFFMASK UINT16_C(0x1fff) /**< mask for fragmenting bits */
950
951/** @todo use+extend RTNETIPV4 */
952struct E1kIpHeader
953{
954 /* type of service / version / header length */
955 uint16_t tos_ver_hl;
956 /* total length */
957 uint16_t total_len;
958 /* identification */
959 uint16_t ident;
960 /* fragment offset field */
961 uint16_t offset;
962 /* time to live / protocol*/
963 uint16_t ttl_proto;
964 /* checksum */
965 uint16_t chksum;
966 /* source IP address */
967 uint32_t src;
968 /* destination IP address */
969 uint32_t dest;
970};
971AssertCompileSize(struct E1kIpHeader, 20);
972
973#define E1K_TCP_FIN UINT16_C(0x01)
974#define E1K_TCP_SYN UINT16_C(0x02)
975#define E1K_TCP_RST UINT16_C(0x04)
976#define E1K_TCP_PSH UINT16_C(0x08)
977#define E1K_TCP_ACK UINT16_C(0x10)
978#define E1K_TCP_URG UINT16_C(0x20)
979#define E1K_TCP_ECE UINT16_C(0x40)
980#define E1K_TCP_CWR UINT16_C(0x80)
981#define E1K_TCP_FLAGS UINT16_C(0x3f)
982
983/** @todo use+extend RTNETTCP */
984struct E1kTcpHeader
985{
986 uint16_t src;
987 uint16_t dest;
988 uint32_t seqno;
989 uint32_t ackno;
990 uint16_t hdrlen_flags;
991 uint16_t wnd;
992 uint16_t chksum;
993 uint16_t urgp;
994};
995AssertCompileSize(struct E1kTcpHeader, 20);
996
997
998#ifdef E1K_WITH_TXD_CACHE
999/** The current Saved state version. */
1000# define E1K_SAVEDSTATE_VERSION 4
1001/** Saved state version for VirtualBox 4.2 with VLAN tag fields. */
1002# define E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG 3
1003#else /* !E1K_WITH_TXD_CACHE */
1004/** The current Saved state version. */
1005# define E1K_SAVEDSTATE_VERSION 3
1006#endif /* !E1K_WITH_TXD_CACHE */
1007/** Saved state version for VirtualBox 4.1 and earlier.
1008 * These did not include VLAN tag fields. */
1009#define E1K_SAVEDSTATE_VERSION_VBOX_41 2
1010/** Saved state version for VirtualBox 3.0 and earlier.
1011 * This did not include the configuration part nor the E1kEEPROM. */
1012#define E1K_SAVEDSTATE_VERSION_VBOX_30 1
1013
1014/**
1015 * Device state structure.
1016 *
1017 * Holds the current state of device.
1018 *
1019 * @implements PDMINETWORKDOWN
1020 * @implements PDMINETWORKCONFIG
1021 * @implements PDMILEDPORTS
1022 */
1023struct E1kState_st
1024{
1025 char szPrf[8]; /**< Log prefix, e.g. E1000#1. */
1026 PDMIBASE IBase;
1027 PDMINETWORKDOWN INetworkDown;
1028 PDMINETWORKCONFIG INetworkConfig;
1029 PDMILEDPORTS ILeds; /**< LED interface */
1030 R3PTRTYPE(PPDMIBASE) pDrvBase; /**< Attached network driver. */
1031 R3PTRTYPE(PPDMILEDCONNECTORS) pLedsConnector;
1032
1033 PPDMDEVINSR3 pDevInsR3; /**< Device instance - R3. */
1034 R3PTRTYPE(PPDMQUEUE) pTxQueueR3; /**< Transmit queue - R3. */
1035 R3PTRTYPE(PPDMQUEUE) pCanRxQueueR3; /**< Rx wakeup signaller - R3. */
1036 PPDMINETWORKUPR3 pDrvR3; /**< Attached network driver - R3. */
1037 PTMTIMERR3 pRIDTimerR3; /**< Receive Interrupt Delay Timer - R3. */
1038 PTMTIMERR3 pRADTimerR3; /**< Receive Absolute Delay Timer - R3. */
1039 PTMTIMERR3 pTIDTimerR3; /**< Transmit Interrupt Delay Timer - R3. */
1040 PTMTIMERR3 pTADTimerR3; /**< Transmit Absolute Delay Timer - R3. */
1041 PTMTIMERR3 pTXDTimerR3; /**< Transmit Delay Timer - R3. */
1042 PTMTIMERR3 pIntTimerR3; /**< Late Interrupt Timer - R3. */
1043 PTMTIMERR3 pLUTimerR3; /**< Link Up(/Restore) Timer. */
1044 /** The scatter / gather buffer used for the current outgoing packet - R3. */
1045 R3PTRTYPE(PPDMSCATTERGATHER) pTxSgR3;
1046
1047 PPDMDEVINSR0 pDevInsR0; /**< Device instance - R0. */
1048 R0PTRTYPE(PPDMQUEUE) pTxQueueR0; /**< Transmit queue - R0. */
1049 R0PTRTYPE(PPDMQUEUE) pCanRxQueueR0; /**< Rx wakeup signaller - R0. */
1050 PPDMINETWORKUPR0 pDrvR0; /**< Attached network driver - R0. */
1051 PTMTIMERR0 pRIDTimerR0; /**< Receive Interrupt Delay Timer - R0. */
1052 PTMTIMERR0 pRADTimerR0; /**< Receive Absolute Delay Timer - R0. */
1053 PTMTIMERR0 pTIDTimerR0; /**< Transmit Interrupt Delay Timer - R0. */
1054 PTMTIMERR0 pTADTimerR0; /**< Transmit Absolute Delay Timer - R0. */
1055 PTMTIMERR0 pTXDTimerR0; /**< Transmit Delay Timer - R0. */
1056 PTMTIMERR0 pIntTimerR0; /**< Late Interrupt Timer - R0. */
1057 PTMTIMERR0 pLUTimerR0; /**< Link Up(/Restore) Timer - R0. */
1058 /** The scatter / gather buffer used for the current outgoing packet - R0. */
1059 R0PTRTYPE(PPDMSCATTERGATHER) pTxSgR0;
1060
1061 PPDMDEVINSRC pDevInsRC; /**< Device instance - RC. */
1062 RCPTRTYPE(PPDMQUEUE) pTxQueueRC; /**< Transmit queue - RC. */
1063 RCPTRTYPE(PPDMQUEUE) pCanRxQueueRC; /**< Rx wakeup signaller - RC. */
1064 PPDMINETWORKUPRC pDrvRC; /**< Attached network driver - RC. */
1065 PTMTIMERRC pRIDTimerRC; /**< Receive Interrupt Delay Timer - RC. */
1066 PTMTIMERRC pRADTimerRC; /**< Receive Absolute Delay Timer - RC. */
1067 PTMTIMERRC pTIDTimerRC; /**< Transmit Interrupt Delay Timer - RC. */
1068 PTMTIMERRC pTADTimerRC; /**< Transmit Absolute Delay Timer - RC. */
1069 PTMTIMERRC pTXDTimerRC; /**< Transmit Delay Timer - RC. */
1070 PTMTIMERRC pIntTimerRC; /**< Late Interrupt Timer - RC. */
1071 PTMTIMERRC pLUTimerRC; /**< Link Up(/Restore) Timer - RC. */
1072 /** The scatter / gather buffer used for the current outgoing packet - RC. */
1073 RCPTRTYPE(PPDMSCATTERGATHER) pTxSgRC;
1074 RTRCPTR RCPtrAlignment;
1075
1076#if HC_ARCH_BITS != 32
1077 uint32_t Alignment1;
1078#endif
1079 PDMCRITSECT cs; /**< Critical section - what is it protecting? */
1080 PDMCRITSECT csRx; /**< RX Critical section. */
1081#ifdef E1K_WITH_TX_CS
1082 PDMCRITSECT csTx; /**< TX Critical section. */
1083#endif /* E1K_WITH_TX_CS */
1084 /** Base address of memory-mapped registers. */
1085 RTGCPHYS addrMMReg;
1086 /** MAC address obtained from the configuration. */
1087 RTMAC macConfigured;
1088 /** Base port of I/O space region. */
1089 RTIOPORT IOPortBase;
1090 /** EMT: */
1091 PDMPCIDEV pciDevice;
1092 /** EMT: Last time the interrupt was acknowledged. */
1093 uint64_t u64AckedAt;
1094 /** All: Used for eliminating spurious interrupts. */
1095 bool fIntRaised;
1096 /** EMT: false if the cable is disconnected by the GUI. */
1097 bool fCableConnected;
1098 /** EMT: */
1099 bool fR0Enabled;
1100 /** EMT: */
1101 bool fRCEnabled;
1102 /** EMT: Compute Ethernet CRC for RX packets. */
1103 bool fEthernetCRC;
1104 /** All: throttle interrupts. */
1105 bool fItrEnabled;
1106 /** All: throttle RX interrupts. */
1107 bool fItrRxEnabled;
1108 /** All: Delay TX interrupts using TIDV/TADV. */
1109 bool fTidEnabled;
1110 /** Link up delay (in milliseconds). */
1111 uint32_t cMsLinkUpDelay;
1112
1113 /** All: Device register storage. */
1114 uint32_t auRegs[E1K_NUM_OF_32BIT_REGS];
1115 /** TX/RX: Status LED. */
1116 PDMLED led;
1117 /** TX/RX: Number of packet being sent/received to show in debug log. */
1118 uint32_t u32PktNo;
1119
1120 /** EMT: Offset of the register to be read via IO. */
1121 uint32_t uSelectedReg;
1122 /** EMT: Multicast Table Array. */
1123 uint32_t auMTA[128];
1124 /** EMT: Receive Address registers. */
1125 E1KRA aRecAddr;
1126 /** EMT: VLAN filter table array. */
1127 uint32_t auVFTA[128];
1128 /** EMT: Receive buffer size. */
1129 uint16_t u16RxBSize;
1130 /** EMT: Locked state -- no state alteration possible. */
1131 bool fLocked;
1132 /** EMT: */
1133 bool fDelayInts;
1134 /** All: */
1135 bool fIntMaskUsed;
1136
1137 /** N/A: */
1138 bool volatile fMaybeOutOfSpace;
1139 /** EMT: Gets signalled when more RX descriptors become available. */
1140 RTSEMEVENT hEventMoreRxDescAvail;
1141#ifdef E1K_WITH_RXD_CACHE
1142 /** RX: Fetched RX descriptors. */
1143 E1KRXDESC aRxDescriptors[E1K_RXD_CACHE_SIZE];
1144 //uint64_t aRxDescAddr[E1K_RXD_CACHE_SIZE];
1145 /** RX: Actual number of fetched RX descriptors. */
1146 uint32_t nRxDFetched;
1147 /** RX: Index in cache of RX descriptor being processed. */
1148 uint32_t iRxDCurrent;
1149#endif /* E1K_WITH_RXD_CACHE */
1150
1151 /** TX: Context used for TCP segmentation packets. */
1152 E1KTXCTX contextTSE;
1153 /** TX: Context used for ordinary packets. */
1154 E1KTXCTX contextNormal;
1155#ifdef E1K_WITH_TXD_CACHE
1156 /** TX: Fetched TX descriptors. */
1157 E1KTXDESC aTxDescriptors[E1K_TXD_CACHE_SIZE];
1158 /** TX: Actual number of fetched TX descriptors. */
1159 uint8_t nTxDFetched;
1160 /** TX: Index in cache of TX descriptor being processed. */
1161 uint8_t iTxDCurrent;
1162 /** TX: Will this frame be sent as GSO. */
1163 bool fGSO;
1164 /** Alignment padding. */
1165 bool fReserved;
1166 /** TX: Number of bytes in next packet. */
1167 uint32_t cbTxAlloc;
1168
1169#endif /* E1K_WITH_TXD_CACHE */
1170 /** GSO context. u8Type is set to PDMNETWORKGSOTYPE_INVALID when not
1171 * applicable to the current TSE mode. */
1172 PDMNETWORKGSO GsoCtx;
1173 /** Scratch space for holding the loopback / fallback scatter / gather
1174 * descriptor. */
1175 union
1176 {
1177 PDMSCATTERGATHER Sg;
1178 uint8_t padding[8 * sizeof(RTUINTPTR)];
1179 } uTxFallback;
1180 /** TX: Transmit packet buffer use for TSE fallback and loopback. */
1181 uint8_t aTxPacketFallback[E1K_MAX_TX_PKT_SIZE];
1182 /** TX: Number of bytes assembled in TX packet buffer. */
1183 uint16_t u16TxPktLen;
1184 /** TX: False will force segmentation in e1000 instead of sending frames as GSO. */
1185 bool fGSOEnabled;
1186 /** TX: IP checksum has to be inserted if true. */
1187 bool fIPcsum;
1188 /** TX: TCP/UDP checksum has to be inserted if true. */
1189 bool fTCPcsum;
1190 /** TX: VLAN tag has to be inserted if true. */
1191 bool fVTag;
1192 /** TX: TCI part of VLAN tag to be inserted. */
1193 uint16_t u16VTagTCI;
1194 /** TX TSE fallback: Number of payload bytes remaining in TSE context. */
1195 uint32_t u32PayRemain;
1196 /** TX TSE fallback: Number of header bytes remaining in TSE context. */
1197 uint16_t u16HdrRemain;
1198 /** TX TSE fallback: Flags from template header. */
1199 uint16_t u16SavedFlags;
1200 /** TX TSE fallback: Partial checksum from template header. */
1201 uint32_t u32SavedCsum;
1202 /** ?: Emulated controller type. */
1203 E1KCHIP eChip;
1204
1205 /** EMT: EEPROM emulation */
1206 E1kEEPROM eeprom;
1207 /** EMT: Physical interface emulation. */
1208 PHY phy;
1209
1210#if 0
1211 /** Alignment padding. */
1212 uint8_t Alignment[HC_ARCH_BITS == 64 ? 8 : 4];
1213#endif
1214
1215 STAMCOUNTER StatReceiveBytes;
1216 STAMCOUNTER StatTransmitBytes;
1217#if defined(VBOX_WITH_STATISTICS)
1218 STAMPROFILEADV StatMMIOReadRZ;
1219 STAMPROFILEADV StatMMIOReadR3;
1220 STAMPROFILEADV StatMMIOWriteRZ;
1221 STAMPROFILEADV StatMMIOWriteR3;
1222 STAMPROFILEADV StatEEPROMRead;
1223 STAMPROFILEADV StatEEPROMWrite;
1224 STAMPROFILEADV StatIOReadRZ;
1225 STAMPROFILEADV StatIOReadR3;
1226 STAMPROFILEADV StatIOWriteRZ;
1227 STAMPROFILEADV StatIOWriteR3;
1228 STAMPROFILEADV StatLateIntTimer;
1229 STAMCOUNTER StatLateInts;
1230 STAMCOUNTER StatIntsRaised;
1231 STAMCOUNTER StatIntsPrevented;
1232 STAMPROFILEADV StatReceive;
1233 STAMPROFILEADV StatReceiveCRC;
1234 STAMPROFILEADV StatReceiveFilter;
1235 STAMPROFILEADV StatReceiveStore;
1236 STAMPROFILEADV StatTransmitRZ;
1237 STAMPROFILEADV StatTransmitR3;
1238 STAMPROFILE StatTransmitSendRZ;
1239 STAMPROFILE StatTransmitSendR3;
1240 STAMPROFILE StatRxOverflow;
1241 STAMCOUNTER StatRxOverflowWakeup;
1242 STAMCOUNTER StatTxDescCtxNormal;
1243 STAMCOUNTER StatTxDescCtxTSE;
1244 STAMCOUNTER StatTxDescLegacy;
1245 STAMCOUNTER StatTxDescData;
1246 STAMCOUNTER StatTxDescTSEData;
1247 STAMCOUNTER StatTxPathFallback;
1248 STAMCOUNTER StatTxPathGSO;
1249 STAMCOUNTER StatTxPathRegular;
1250 STAMCOUNTER StatPHYAccesses;
1251 STAMCOUNTER aStatRegWrites[E1K_NUM_OF_REGS];
1252 STAMCOUNTER aStatRegReads[E1K_NUM_OF_REGS];
1253#endif /* VBOX_WITH_STATISTICS */
1254
1255#ifdef E1K_INT_STATS
1256 /* Internal stats */
1257 uint64_t u64ArmedAt;
1258 uint64_t uStatMaxTxDelay;
1259 uint32_t uStatInt;
1260 uint32_t uStatIntTry;
1261 uint32_t uStatIntLower;
1262 uint32_t uStatNoIntICR;
1263 int32_t iStatIntLost;
1264 int32_t iStatIntLostOne;
1265 uint32_t uStatIntIMS;
1266 uint32_t uStatIntSkip;
1267 uint32_t uStatIntLate;
1268 uint32_t uStatIntMasked;
1269 uint32_t uStatIntEarly;
1270 uint32_t uStatIntRx;
1271 uint32_t uStatIntTx;
1272 uint32_t uStatIntICS;
1273 uint32_t uStatIntRDTR;
1274 uint32_t uStatIntRXDMT0;
1275 uint32_t uStatIntTXQE;
1276 uint32_t uStatTxNoRS;
1277 uint32_t uStatTxIDE;
1278 uint32_t uStatTxDelayed;
1279 uint32_t uStatTxDelayExp;
1280 uint32_t uStatTAD;
1281 uint32_t uStatTID;
1282 uint32_t uStatRAD;
1283 uint32_t uStatRID;
1284 uint32_t uStatRxFrm;
1285 uint32_t uStatTxFrm;
1286 uint32_t uStatDescCtx;
1287 uint32_t uStatDescDat;
1288 uint32_t uStatDescLeg;
1289 uint32_t uStatTx1514;
1290 uint32_t uStatTx2962;
1291 uint32_t uStatTx4410;
1292 uint32_t uStatTx5858;
1293 uint32_t uStatTx7306;
1294 uint32_t uStatTx8754;
1295 uint32_t uStatTx16384;
1296 uint32_t uStatTx32768;
1297 uint32_t uStatTxLarge;
1298 uint32_t uStatAlign;
1299#endif /* E1K_INT_STATS */
1300};
1301typedef struct E1kState_st E1KSTATE;
1302/** Pointer to the E1000 device state. */
1303typedef E1KSTATE *PE1KSTATE;
1304
1305#ifndef VBOX_DEVICE_STRUCT_TESTCASE
1306
1307/* Forward declarations ******************************************************/
1308static int e1kXmitPending(PE1KSTATE pThis, bool fOnWorkerThread);
1309
1310static int e1kRegReadUnimplemented (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1311static int e1kRegWriteUnimplemented(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1312static int e1kRegReadAutoClear (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1313static int e1kRegReadDefault (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1314static int e1kRegWriteDefault (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1315#if 0 /* unused */
1316static int e1kRegReadCTRL (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1317#endif
1318static int e1kRegWriteCTRL (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1319static int e1kRegReadEECD (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1320static int e1kRegWriteEECD (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1321static int e1kRegWriteEERD (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1322static int e1kRegWriteMDIC (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1323static int e1kRegReadICR (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1324static int e1kRegWriteICR (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1325static int e1kRegWriteICS (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1326static int e1kRegWriteIMS (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1327static int e1kRegWriteIMC (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1328static int e1kRegWriteRCTL (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1329static int e1kRegWritePBA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1330static int e1kRegWriteRDT (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1331static int e1kRegWriteRDTR (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1332static int e1kRegWriteTDT (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1333static int e1kRegReadMTA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1334static int e1kRegWriteMTA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1335static int e1kRegReadRA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1336static int e1kRegWriteRA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1337static int e1kRegReadVFTA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1338static int e1kRegWriteVFTA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1339
1340/**
1341 * Register map table.
1342 *
1343 * Override pfnRead and pfnWrite to get register-specific behavior.
1344 */
1345static const struct E1kRegMap_st
1346{
1347 /** Register offset in the register space. */
1348 uint32_t offset;
1349 /** Size in bytes. Registers of size > 4 are in fact tables. */
1350 uint32_t size;
1351 /** Readable bits. */
1352 uint32_t readable;
1353 /** Writable bits. */
1354 uint32_t writable;
1355 /** Read callback. */
1356 int (*pfnRead)(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1357 /** Write callback. */
1358 int (*pfnWrite)(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1359 /** Abbreviated name. */
1360 const char *abbrev;
1361 /** Full name. */
1362 const char *name;
1363} g_aE1kRegMap[E1K_NUM_OF_REGS] =
1364{
1365 /* offset size read mask write mask read callback write callback abbrev full name */
1366 /*------- ------- ---------- ---------- ----------------------- ------------------------ ---------- ------------------------------*/
1367 { 0x00000, 0x00004, 0xDBF31BE9, 0xDBF31BE9, e1kRegReadDefault , e1kRegWriteCTRL , "CTRL" , "Device Control" },
1368 { 0x00008, 0x00004, 0x0000FDFF, 0x00000000, e1kRegReadDefault , e1kRegWriteUnimplemented, "STATUS" , "Device Status" },
1369 { 0x00010, 0x00004, 0x000027F0, 0x00000070, e1kRegReadEECD , e1kRegWriteEECD , "EECD" , "EEPROM/Flash Control/Data" },
1370 { 0x00014, 0x00004, 0xFFFFFF10, 0xFFFFFF00, e1kRegReadDefault , e1kRegWriteEERD , "EERD" , "EEPROM Read" },
1371 { 0x00018, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CTRL_EXT", "Extended Device Control" },
1372 { 0x0001c, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FLA" , "Flash Access (N/A)" },
1373 { 0x00020, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteMDIC , "MDIC" , "MDI Control" },
1374 { 0x00028, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCAL" , "Flow Control Address Low" },
1375 { 0x0002c, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCAH" , "Flow Control Address High" },
1376 { 0x00030, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCT" , "Flow Control Type" },
1377 { 0x00038, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "VET" , "VLAN EtherType" },
1378 { 0x000c0, 0x00004, 0x0001F6DF, 0x0001F6DF, e1kRegReadICR , e1kRegWriteICR , "ICR" , "Interrupt Cause Read" },
1379 { 0x000c4, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "ITR" , "Interrupt Throttling" },
1380 { 0x000c8, 0x00004, 0x00000000, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteICS , "ICS" , "Interrupt Cause Set" },
1381 { 0x000d0, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteIMS , "IMS" , "Interrupt Mask Set/Read" },
1382 { 0x000d8, 0x00004, 0x00000000, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteIMC , "IMC" , "Interrupt Mask Clear" },
1383 { 0x00100, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteRCTL , "RCTL" , "Receive Control" },
1384 { 0x00170, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCTTV" , "Flow Control Transmit Timer Value" },
1385 { 0x00178, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TXCW" , "Transmit Configuration Word (N/A)" },
1386 { 0x00180, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXCW" , "Receive Configuration Word (N/A)" },
1387 { 0x00400, 0x00004, 0x017FFFFA, 0x017FFFFA, e1kRegReadDefault , e1kRegWriteDefault , "TCTL" , "Transmit Control" },
1388 { 0x00410, 0x00004, 0x3FFFFFFF, 0x3FFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TIPG" , "Transmit IPG" },
1389 { 0x00458, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "AIFS" , "Adaptive IFS Throttle - AIT" },
1390 { 0x00e00, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "LEDCTL" , "LED Control" },
1391 { 0x01000, 0x00004, 0xFFFF007F, 0x0000007F, e1kRegReadDefault , e1kRegWritePBA , "PBA" , "Packet Buffer Allocation" },
1392 { 0x02160, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRTL" , "Flow Control Receive Threshold Low" },
1393 { 0x02168, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRTH" , "Flow Control Receive Threshold High" },
1394 { 0x02410, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFH" , "Receive Data FIFO Head" },
1395 { 0x02418, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFT" , "Receive Data FIFO Tail" },
1396 { 0x02420, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFHS" , "Receive Data FIFO Head Saved Register" },
1397 { 0x02428, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFTS" , "Receive Data FIFO Tail Saved Register" },
1398 { 0x02430, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFPC" , "Receive Data FIFO Packet Count" },
1399 { 0x02800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDBAL" , "Receive Descriptor Base Low" },
1400 { 0x02804, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDBAH" , "Receive Descriptor Base High" },
1401 { 0x02808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDLEN" , "Receive Descriptor Length" },
1402 { 0x02810, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDH" , "Receive Descriptor Head" },
1403 { 0x02818, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteRDT , "RDT" , "Receive Descriptor Tail" },
1404 { 0x02820, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteRDTR , "RDTR" , "Receive Delay Timer" },
1405 { 0x02828, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXDCTL" , "Receive Descriptor Control" },
1406 { 0x0282c, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "RADV" , "Receive Interrupt Absolute Delay Timer" },
1407 { 0x02c00, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RSRPD" , "Receive Small Packet Detect Interrupt" },
1408 { 0x03000, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TXDMAC" , "TX DMA Control (N/A)" },
1409 { 0x03410, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFH" , "Transmit Data FIFO Head" },
1410 { 0x03418, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFT" , "Transmit Data FIFO Tail" },
1411 { 0x03420, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFHS" , "Transmit Data FIFO Head Saved Register" },
1412 { 0x03428, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFTS" , "Transmit Data FIFO Tail Saved Register" },
1413 { 0x03430, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFPC" , "Transmit Data FIFO Packet Count" },
1414 { 0x03800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDBAL" , "Transmit Descriptor Base Low" },
1415 { 0x03804, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDBAH" , "Transmit Descriptor Base High" },
1416 { 0x03808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDLEN" , "Transmit Descriptor Length" },
1417 { 0x03810, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDH" , "Transmit Descriptor Head" },
1418 { 0x03818, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteTDT , "TDT" , "Transmit Descriptor Tail" },
1419 { 0x03820, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TIDV" , "Transmit Interrupt Delay Value" },
1420 { 0x03828, 0x00004, 0xFF3F3F3F, 0xFF3F3F3F, e1kRegReadDefault , e1kRegWriteDefault , "TXDCTL" , "Transmit Descriptor Control" },
1421 { 0x0382c, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TADV" , "Transmit Absolute Interrupt Delay Timer" },
1422 { 0x03830, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TSPMT" , "TCP Segmentation Pad and Threshold" },
1423 { 0x04000, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CRCERRS" , "CRC Error Count" },
1424 { 0x04004, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "ALGNERRC", "Alignment Error Count" },
1425 { 0x04008, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SYMERRS" , "Symbol Error Count" },
1426 { 0x0400c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXERRC" , "RX Error Count" },
1427 { 0x04010, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MPC" , "Missed Packets Count" },
1428 { 0x04014, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SCC" , "Single Collision Count" },
1429 { 0x04018, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "ECOL" , "Excessive Collisions Count" },
1430 { 0x0401c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MCC" , "Multiple Collision Count" },
1431 { 0x04020, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "LATECOL" , "Late Collisions Count" },
1432 { 0x04028, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "COLC" , "Collision Count" },
1433 { 0x04030, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "DC" , "Defer Count" },
1434 { 0x04034, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TNCRS" , "Transmit - No CRS" },
1435 { 0x04038, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SEC" , "Sequence Error Count" },
1436 { 0x0403c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CEXTERR" , "Carrier Extension Error Count" },
1437 { 0x04040, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RLEC" , "Receive Length Error Count" },
1438 { 0x04048, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XONRXC" , "XON Received Count" },
1439 { 0x0404c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XONTXC" , "XON Transmitted Count" },
1440 { 0x04050, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XOFFRXC" , "XOFF Received Count" },
1441 { 0x04054, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XOFFTXC" , "XOFF Transmitted Count" },
1442 { 0x04058, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRUC" , "FC Received Unsupported Count" },
1443 { 0x0405c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC64" , "Packets Received (64 Bytes) Count" },
1444 { 0x04060, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC127" , "Packets Received (65-127 Bytes) Count" },
1445 { 0x04064, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC255" , "Packets Received (128-255 Bytes) Count" },
1446 { 0x04068, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC511" , "Packets Received (256-511 Bytes) Count" },
1447 { 0x0406c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC1023" , "Packets Received (512-1023 Bytes) Count" },
1448 { 0x04070, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC1522" , "Packets Received (1024-Max Bytes)" },
1449 { 0x04074, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GPRC" , "Good Packets Received Count" },
1450 { 0x04078, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "BPRC" , "Broadcast Packets Received Count" },
1451 { 0x0407c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "MPRC" , "Multicast Packets Received Count" },
1452 { 0x04080, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GPTC" , "Good Packets Transmitted Count" },
1453 { 0x04088, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GORCL" , "Good Octets Received Count (Low)" },
1454 { 0x0408c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GORCH" , "Good Octets Received Count (Hi)" },
1455 { 0x04090, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GOTCL" , "Good Octets Transmitted Count (Low)" },
1456 { 0x04094, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GOTCH" , "Good Octets Transmitted Count (Hi)" },
1457 { 0x040a0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RNBC" , "Receive No Buffers Count" },
1458 { 0x040a4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RUC" , "Receive Undersize Count" },
1459 { 0x040a8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RFC" , "Receive Fragment Count" },
1460 { 0x040ac, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "ROC" , "Receive Oversize Count" },
1461 { 0x040b0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RJC" , "Receive Jabber Count" },
1462 { 0x040b4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPRC" , "Management Packets Received Count" },
1463 { 0x040b8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPDC" , "Management Packets Dropped Count" },
1464 { 0x040bc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPTC" , "Management Pkts Transmitted Count" },
1465 { 0x040c0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TORL" , "Total Octets Received (Lo)" },
1466 { 0x040c4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TORH" , "Total Octets Received (Hi)" },
1467 { 0x040c8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TOTL" , "Total Octets Transmitted (Lo)" },
1468 { 0x040cc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TOTH" , "Total Octets Transmitted (Hi)" },
1469 { 0x040d0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TPR" , "Total Packets Received" },
1470 { 0x040d4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TPT" , "Total Packets Transmitted" },
1471 { 0x040d8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC64" , "Packets Transmitted (64 Bytes) Count" },
1472 { 0x040dc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC127" , "Packets Transmitted (65-127 Bytes) Count" },
1473 { 0x040e0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC255" , "Packets Transmitted (128-255 Bytes) Count" },
1474 { 0x040e4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC511" , "Packets Transmitted (256-511 Bytes) Count" },
1475 { 0x040e8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC1023" , "Packets Transmitted (512-1023 Bytes) Count" },
1476 { 0x040ec, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC1522" , "Packets Transmitted (1024 Bytes or Greater) Count" },
1477 { 0x040f0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "MPTC" , "Multicast Packets Transmitted Count" },
1478 { 0x040f4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "BPTC" , "Broadcast Packets Transmitted Count" },
1479 { 0x040f8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TSCTC" , "TCP Segmentation Context Transmitted Count" },
1480 { 0x040fc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TSCTFC" , "TCP Segmentation Context Tx Fail Count" },
1481 { 0x05000, 0x00004, 0x000007FF, 0x000007FF, e1kRegReadDefault , e1kRegWriteDefault , "RXCSUM" , "Receive Checksum Control" },
1482 { 0x05800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUC" , "Wakeup Control" },
1483 { 0x05808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUFC" , "Wakeup Filter Control" },
1484 { 0x05810, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUS" , "Wakeup Status" },
1485 { 0x05820, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "MANC" , "Management Control" },
1486 { 0x05838, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IPAV" , "IP Address Valid" },
1487 { 0x05900, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUPL" , "Wakeup Packet Length" },
1488 { 0x05200, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadMTA , e1kRegWriteMTA , "MTA" , "Multicast Table Array (n)" },
1489 { 0x05400, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadRA , e1kRegWriteRA , "RA" , "Receive Address (64-bit) (n)" },
1490 { 0x05600, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadVFTA , e1kRegWriteVFTA , "VFTA" , "VLAN Filter Table Array (n)" },
1491 { 0x05840, 0x0001c, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IP4AT" , "IPv4 Address Table" },
1492 { 0x05880, 0x00010, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IP6AT" , "IPv6 Address Table" },
1493 { 0x05a00, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUPM" , "Wakeup Packet Memory" },
1494 { 0x05f00, 0x0001c, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFLT" , "Flexible Filter Length Table" },
1495 { 0x09000, 0x003fc, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFMT" , "Flexible Filter Mask Table" },
1496 { 0x09800, 0x003fc, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFVT" , "Flexible Filter Value Table" },
1497 { 0x10000, 0x10000, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "PBM" , "Packet Buffer Memory (n)" },
1498 { 0x00040, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadRA , e1kRegWriteRA , "RA82542" , "Receive Address (64-bit) (n) (82542)" },
1499 { 0x00200, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadMTA , e1kRegWriteMTA , "MTA82542", "Multicast Table Array (n) (82542)" },
1500 { 0x00600, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadVFTA , e1kRegWriteVFTA , "VFTA82542", "VLAN Filter Table Array (n) (82542)" }
1501};
1502
1503#ifdef LOG_ENABLED
1504
1505/**
1506 * Convert U32 value to hex string. Masked bytes are replaced with dots.
1507 *
1508 * @remarks The mask has byte (not bit) granularity (e.g. 000000FF).
1509 *
1510 * @returns The buffer.
1511 *
1512 * @param u32 The word to convert into string.
1513 * @param mask Selects which bytes to convert.
1514 * @param buf Where to put the result.
1515 */
1516static char *e1kU32toHex(uint32_t u32, uint32_t mask, char *buf)
1517{
1518 for (char *ptr = buf + 7; ptr >= buf; --ptr, u32 >>=4, mask >>=4)
1519 {
1520 if (mask & 0xF)
1521 *ptr = (u32 & 0xF) + ((u32 & 0xF) > 9 ? '7' : '0');
1522 else
1523 *ptr = '.';
1524 }
1525 buf[8] = 0;
1526 return buf;
1527}
1528
1529/**
1530 * Returns timer name for debug purposes.
1531 *
1532 * @returns The timer name.
1533 *
1534 * @param pThis The device state structure.
1535 * @param pTimer The timer to get the name for.
1536 */
1537DECLINLINE(const char *) e1kGetTimerName(PE1KSTATE pThis, PTMTIMER pTimer)
1538{
1539 if (pTimer == pThis->CTX_SUFF(pTIDTimer))
1540 return "TID";
1541 if (pTimer == pThis->CTX_SUFF(pTADTimer))
1542 return "TAD";
1543 if (pTimer == pThis->CTX_SUFF(pRIDTimer))
1544 return "RID";
1545 if (pTimer == pThis->CTX_SUFF(pRADTimer))
1546 return "RAD";
1547 if (pTimer == pThis->CTX_SUFF(pIntTimer))
1548 return "Int";
1549 if (pTimer == pThis->CTX_SUFF(pTXDTimer))
1550 return "TXD";
1551 if (pTimer == pThis->CTX_SUFF(pLUTimer))
1552 return "LinkUp";
1553 return "unknown";
1554}
1555
1556#endif /* DEBUG */
1557
1558/**
1559 * Arm a timer.
1560 *
1561 * @param pThis Pointer to the device state structure.
1562 * @param pTimer Pointer to the timer.
1563 * @param uExpireIn Expiration interval in microseconds.
1564 */
1565DECLINLINE(void) e1kArmTimer(PE1KSTATE pThis, PTMTIMER pTimer, uint32_t uExpireIn)
1566{
1567 if (pThis->fLocked)
1568 return;
1569
1570 E1kLog2(("%s Arming %s timer to fire in %d usec...\n",
1571 pThis->szPrf, e1kGetTimerName(pThis, pTimer), uExpireIn));
1572 TMTimerSetMicro(pTimer, uExpireIn);
1573}
1574
1575#ifdef IN_RING3
1576/**
1577 * Cancel a timer.
1578 *
1579 * @param pThis Pointer to the device state structure.
1580 * @param pTimer Pointer to the timer.
1581 */
1582DECLINLINE(void) e1kCancelTimer(PE1KSTATE pThis, PTMTIMER pTimer)
1583{
1584 E1kLog2(("%s Stopping %s timer...\n",
1585 pThis->szPrf, e1kGetTimerName(pThis, pTimer)));
1586 int rc = TMTimerStop(pTimer);
1587 if (RT_FAILURE(rc))
1588 E1kLog2(("%s e1kCancelTimer: TMTimerStop() failed with %Rrc\n",
1589 pThis->szPrf, rc));
1590 RT_NOREF1(pThis);
1591}
1592#endif /* IN_RING3 */
1593
1594#define e1kCsEnter(ps, rc) PDMCritSectEnter(&ps->cs, rc)
1595#define e1kCsLeave(ps) PDMCritSectLeave(&ps->cs)
1596
1597#define e1kCsRxEnter(ps, rc) PDMCritSectEnter(&ps->csRx, rc)
1598#define e1kCsRxLeave(ps) PDMCritSectLeave(&ps->csRx)
1599#define e1kCsRxIsOwner(ps) PDMCritSectIsOwner(&ps->csRx)
1600
1601#ifndef E1K_WITH_TX_CS
1602# define e1kCsTxEnter(ps, rc) VINF_SUCCESS
1603# define e1kCsTxLeave(ps) do { } while (0)
1604#else /* E1K_WITH_TX_CS */
1605# define e1kCsTxEnter(ps, rc) PDMCritSectEnter(&ps->csTx, rc)
1606# define e1kCsTxLeave(ps) PDMCritSectLeave(&ps->csTx)
1607#endif /* E1K_WITH_TX_CS */
1608
1609#ifdef IN_RING3
1610
1611/**
1612 * Wakeup the RX thread.
1613 */
1614static void e1kWakeupReceive(PPDMDEVINS pDevIns)
1615{
1616 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
1617 if ( pThis->fMaybeOutOfSpace
1618 && pThis->hEventMoreRxDescAvail != NIL_RTSEMEVENT)
1619 {
1620 STAM_COUNTER_INC(&pThis->StatRxOverflowWakeup);
1621 E1kLog(("%s Waking up Out-of-RX-space semaphore\n", pThis->szPrf));
1622 RTSemEventSignal(pThis->hEventMoreRxDescAvail);
1623 }
1624}
1625
1626/**
1627 * Hardware reset. Revert all registers to initial values.
1628 *
1629 * @param pThis The device state structure.
1630 */
1631static void e1kHardReset(PE1KSTATE pThis)
1632{
1633 E1kLog(("%s Hard reset triggered\n", pThis->szPrf));
1634 memset(pThis->auRegs, 0, sizeof(pThis->auRegs));
1635 memset(pThis->aRecAddr.au32, 0, sizeof(pThis->aRecAddr.au32));
1636#ifdef E1K_INIT_RA0
1637 memcpy(pThis->aRecAddr.au32, pThis->macConfigured.au8,
1638 sizeof(pThis->macConfigured.au8));
1639 pThis->aRecAddr.array[0].ctl |= RA_CTL_AV;
1640#endif /* E1K_INIT_RA0 */
1641 STATUS = 0x0081; /* SPEED=10b (1000 Mb/s), FD=1b (Full Duplex) */
1642 EECD = 0x0100; /* EE_PRES=1b (EEPROM present) */
1643 CTRL = 0x0a09; /* FRCSPD=1b SPEED=10b LRST=1b FD=1b */
1644 TSPMT = 0x01000400;/* TSMT=0400h TSPBP=0100h */
1645 Assert(GET_BITS(RCTL, BSIZE) == 0);
1646 pThis->u16RxBSize = 2048;
1647
1648 /* Reset promiscuous mode */
1649 if (pThis->pDrvR3)
1650 pThis->pDrvR3->pfnSetPromiscuousMode(pThis->pDrvR3, false);
1651
1652#ifdef E1K_WITH_TXD_CACHE
1653 int rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
1654 if (RT_LIKELY(rc == VINF_SUCCESS))
1655 {
1656 pThis->nTxDFetched = 0;
1657 pThis->iTxDCurrent = 0;
1658 pThis->fGSO = false;
1659 pThis->cbTxAlloc = 0;
1660 e1kCsTxLeave(pThis);
1661 }
1662#endif /* E1K_WITH_TXD_CACHE */
1663#ifdef E1K_WITH_RXD_CACHE
1664 if (RT_LIKELY(e1kCsRxEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
1665 {
1666 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
1667 e1kCsRxLeave(pThis);
1668 }
1669#endif /* E1K_WITH_RXD_CACHE */
1670}
1671
1672#endif /* IN_RING3 */
1673
1674/**
1675 * Compute Internet checksum.
1676 *
1677 * @remarks Refer to http://www.netfor2.com/checksum.html for short intro.
1678 *
1679 * @param pThis The device state structure.
1680 * @param cpPacket The packet.
1681 * @param cb The size of the packet.
1682 * @param pszText A string denoting direction of packet transfer.
1683 *
1684 * @return The 1's complement of the 1's complement sum.
1685 *
1686 * @thread E1000_TX
1687 */
1688static uint16_t e1kCSum16(const void *pvBuf, size_t cb)
1689{
1690 uint32_t csum = 0;
1691 uint16_t *pu16 = (uint16_t *)pvBuf;
1692
1693 while (cb > 1)
1694 {
1695 csum += *pu16++;
1696 cb -= 2;
1697 }
1698 if (cb)
1699 csum += *(uint8_t*)pu16;
1700 while (csum >> 16)
1701 csum = (csum >> 16) + (csum & 0xFFFF);
1702 return ~csum;
1703}
1704
1705/**
1706 * Dump a packet to debug log.
1707 *
1708 * @param pThis The device state structure.
1709 * @param cpPacket The packet.
1710 * @param cb The size of the packet.
1711 * @param pszText A string denoting direction of packet transfer.
1712 * @thread E1000_TX
1713 */
1714DECLINLINE(void) e1kPacketDump(PE1KSTATE pThis, const uint8_t *cpPacket, size_t cb, const char *pszText)
1715{
1716#ifdef DEBUG
1717 if (RT_LIKELY(e1kCsEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
1718 {
1719 Log4(("%s --- %s packet #%d: %RTmac => %RTmac (%d bytes) ---\n",
1720 pThis->szPrf, pszText, ++pThis->u32PktNo, cpPacket+6, cpPacket, cb));
1721 if (ntohs(*(uint16_t*)(cpPacket+12)) == 0x86DD)
1722 {
1723 Log4(("%s --- IPv6: %RTnaipv6 => %RTnaipv6\n",
1724 pThis->szPrf, cpPacket+14+8, cpPacket+14+24));
1725 if (*(cpPacket+14+6) == 0x6)
1726 Log4(("%s --- TCP: seq=%x ack=%x\n", pThis->szPrf,
1727 ntohl(*(uint32_t*)(cpPacket+14+40+4)), ntohl(*(uint32_t*)(cpPacket+14+40+8))));
1728 }
1729 else if (ntohs(*(uint16_t*)(cpPacket+12)) == 0x800)
1730 {
1731 Log4(("%s --- IPv4: %RTnaipv4 => %RTnaipv4\n",
1732 pThis->szPrf, *(uint32_t*)(cpPacket+14+12), *(uint32_t*)(cpPacket+14+16)));
1733 if (*(cpPacket+14+6) == 0x6)
1734 Log4(("%s --- TCP: seq=%x ack=%x\n", pThis->szPrf,
1735 ntohl(*(uint32_t*)(cpPacket+14+20+4)), ntohl(*(uint32_t*)(cpPacket+14+20+8))));
1736 }
1737 E1kLog3(("%.*Rhxd\n", cb, cpPacket));
1738 e1kCsLeave(pThis);
1739 }
1740#else
1741 if (RT_LIKELY(e1kCsEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
1742 {
1743 if (ntohs(*(uint16_t*)(cpPacket+12)) == 0x86DD)
1744 E1kLogRel(("E1000: %s packet #%d, %RTmac => %RTmac, %RTnaipv6 => %RTnaipv6, seq=%x ack=%x\n",
1745 pszText, ++pThis->u32PktNo, cpPacket+6, cpPacket, cpPacket+14+8, cpPacket+14+24,
1746 ntohl(*(uint32_t*)(cpPacket+14+40+4)), ntohl(*(uint32_t*)(cpPacket+14+40+8))));
1747 else
1748 E1kLogRel(("E1000: %s packet #%d, %RTmac => %RTmac, %RTnaipv4 => %RTnaipv4, seq=%x ack=%x\n",
1749 pszText, ++pThis->u32PktNo, cpPacket+6, cpPacket,
1750 *(uint32_t*)(cpPacket+14+12), *(uint32_t*)(cpPacket+14+16),
1751 ntohl(*(uint32_t*)(cpPacket+14+20+4)), ntohl(*(uint32_t*)(cpPacket+14+20+8))));
1752 e1kCsLeave(pThis);
1753 }
1754 RT_NOREF2(cb, pszText);
1755#endif
1756}
1757
1758/**
1759 * Determine the type of transmit descriptor.
1760 *
1761 * @returns Descriptor type. See E1K_DTYP_XXX defines.
1762 *
1763 * @param pDesc Pointer to descriptor union.
1764 * @thread E1000_TX
1765 */
1766DECLINLINE(int) e1kGetDescType(E1KTXDESC *pDesc)
1767{
1768 if (pDesc->legacy.cmd.fDEXT)
1769 return pDesc->context.dw2.u4DTYP;
1770 return E1K_DTYP_LEGACY;
1771}
1772
1773
1774#if defined(E1K_WITH_RXD_CACHE) && defined(IN_RING3) /* currently only used in ring-3 due to stack space requirements of the caller */
1775/**
1776 * Dump receive descriptor to debug log.
1777 *
1778 * @param pThis The device state structure.
1779 * @param pDesc Pointer to the descriptor.
1780 * @thread E1000_RX
1781 */
1782static void e1kPrintRDesc(PE1KSTATE pThis, E1KRXDESC *pDesc)
1783{
1784 RT_NOREF2(pThis, pDesc);
1785 E1kLog2(("%s <-- Receive Descriptor (%d bytes):\n", pThis->szPrf, pDesc->u16Length));
1786 E1kLog2((" Address=%16LX Length=%04X Csum=%04X\n",
1787 pDesc->u64BufAddr, pDesc->u16Length, pDesc->u16Checksum));
1788 E1kLog2((" STA: %s %s %s %s %s %s %s ERR: %s %s %s %s SPECIAL: %s VLAN=%03x PRI=%x\n",
1789 pDesc->status.fPIF ? "PIF" : "pif",
1790 pDesc->status.fIPCS ? "IPCS" : "ipcs",
1791 pDesc->status.fTCPCS ? "TCPCS" : "tcpcs",
1792 pDesc->status.fVP ? "VP" : "vp",
1793 pDesc->status.fIXSM ? "IXSM" : "ixsm",
1794 pDesc->status.fEOP ? "EOP" : "eop",
1795 pDesc->status.fDD ? "DD" : "dd",
1796 pDesc->status.fRXE ? "RXE" : "rxe",
1797 pDesc->status.fIPE ? "IPE" : "ipe",
1798 pDesc->status.fTCPE ? "TCPE" : "tcpe",
1799 pDesc->status.fCE ? "CE" : "ce",
1800 E1K_SPEC_CFI(pDesc->status.u16Special) ? "CFI" :"cfi",
1801 E1K_SPEC_VLAN(pDesc->status.u16Special),
1802 E1K_SPEC_PRI(pDesc->status.u16Special)));
1803}
1804#endif /* E1K_WITH_RXD_CACHE && IN_RING3 */
1805
1806/**
1807 * Dump transmit descriptor to debug log.
1808 *
1809 * @param pThis The device state structure.
1810 * @param pDesc Pointer to descriptor union.
1811 * @param pszDir A string denoting direction of descriptor transfer
1812 * @thread E1000_TX
1813 */
1814static void e1kPrintTDesc(PE1KSTATE pThis, E1KTXDESC *pDesc, const char *pszDir,
1815 unsigned uLevel = RTLOGGRPFLAGS_LEVEL_2)
1816{
1817 RT_NOREF4(pThis, pDesc, pszDir, uLevel);
1818
1819 /*
1820 * Unfortunately we cannot use our format handler here, we want R0 logging
1821 * as well.
1822 */
1823 switch (e1kGetDescType(pDesc))
1824 {
1825 case E1K_DTYP_CONTEXT:
1826 E1kLogX(uLevel, ("%s %s Context Transmit Descriptor %s\n",
1827 pThis->szPrf, pszDir, pszDir));
1828 E1kLogX(uLevel, (" IPCSS=%02X IPCSO=%02X IPCSE=%04X TUCSS=%02X TUCSO=%02X TUCSE=%04X\n",
1829 pDesc->context.ip.u8CSS, pDesc->context.ip.u8CSO, pDesc->context.ip.u16CSE,
1830 pDesc->context.tu.u8CSS, pDesc->context.tu.u8CSO, pDesc->context.tu.u16CSE));
1831 E1kLogX(uLevel, (" TUCMD:%s%s%s %s %s PAYLEN=%04x HDRLEN=%04x MSS=%04x STA: %s\n",
1832 pDesc->context.dw2.fIDE ? " IDE":"",
1833 pDesc->context.dw2.fRS ? " RS" :"",
1834 pDesc->context.dw2.fTSE ? " TSE":"",
1835 pDesc->context.dw2.fIP ? "IPv4":"IPv6",
1836 pDesc->context.dw2.fTCP ? "TCP":"UDP",
1837 pDesc->context.dw2.u20PAYLEN,
1838 pDesc->context.dw3.u8HDRLEN,
1839 pDesc->context.dw3.u16MSS,
1840 pDesc->context.dw3.fDD?"DD":""));
1841 break;
1842 case E1K_DTYP_DATA:
1843 E1kLogX(uLevel, ("%s %s Data Transmit Descriptor (%d bytes) %s\n",
1844 pThis->szPrf, pszDir, pDesc->data.cmd.u20DTALEN, pszDir));
1845 E1kLogX(uLevel, (" Address=%16LX DTALEN=%05X\n",
1846 pDesc->data.u64BufAddr,
1847 pDesc->data.cmd.u20DTALEN));
1848 E1kLogX(uLevel, (" DCMD:%s%s%s%s%s%s%s STA:%s%s%s POPTS:%s%s SPECIAL:%s VLAN=%03x PRI=%x\n",
1849 pDesc->data.cmd.fIDE ? " IDE" :"",
1850 pDesc->data.cmd.fVLE ? " VLE" :"",
1851 pDesc->data.cmd.fRPS ? " RPS" :"",
1852 pDesc->data.cmd.fRS ? " RS" :"",
1853 pDesc->data.cmd.fTSE ? " TSE" :"",
1854 pDesc->data.cmd.fIFCS? " IFCS":"",
1855 pDesc->data.cmd.fEOP ? " EOP" :"",
1856 pDesc->data.dw3.fDD ? " DD" :"",
1857 pDesc->data.dw3.fEC ? " EC" :"",
1858 pDesc->data.dw3.fLC ? " LC" :"",
1859 pDesc->data.dw3.fTXSM? " TXSM":"",
1860 pDesc->data.dw3.fIXSM? " IXSM":"",
1861 E1K_SPEC_CFI(pDesc->data.dw3.u16Special) ? "CFI" :"cfi",
1862 E1K_SPEC_VLAN(pDesc->data.dw3.u16Special),
1863 E1K_SPEC_PRI(pDesc->data.dw3.u16Special)));
1864 break;
1865 case E1K_DTYP_LEGACY:
1866 E1kLogX(uLevel, ("%s %s Legacy Transmit Descriptor (%d bytes) %s\n",
1867 pThis->szPrf, pszDir, pDesc->legacy.cmd.u16Length, pszDir));
1868 E1kLogX(uLevel, (" Address=%16LX DTALEN=%05X\n",
1869 pDesc->data.u64BufAddr,
1870 pDesc->legacy.cmd.u16Length));
1871 E1kLogX(uLevel, (" CMD:%s%s%s%s%s%s%s STA:%s%s%s CSO=%02x CSS=%02x SPECIAL:%s VLAN=%03x PRI=%x\n",
1872 pDesc->legacy.cmd.fIDE ? " IDE" :"",
1873 pDesc->legacy.cmd.fVLE ? " VLE" :"",
1874 pDesc->legacy.cmd.fRPS ? " RPS" :"",
1875 pDesc->legacy.cmd.fRS ? " RS" :"",
1876 pDesc->legacy.cmd.fIC ? " IC" :"",
1877 pDesc->legacy.cmd.fIFCS? " IFCS":"",
1878 pDesc->legacy.cmd.fEOP ? " EOP" :"",
1879 pDesc->legacy.dw3.fDD ? " DD" :"",
1880 pDesc->legacy.dw3.fEC ? " EC" :"",
1881 pDesc->legacy.dw3.fLC ? " LC" :"",
1882 pDesc->legacy.cmd.u8CSO,
1883 pDesc->legacy.dw3.u8CSS,
1884 E1K_SPEC_CFI(pDesc->legacy.dw3.u16Special) ? "CFI" :"cfi",
1885 E1K_SPEC_VLAN(pDesc->legacy.dw3.u16Special),
1886 E1K_SPEC_PRI(pDesc->legacy.dw3.u16Special)));
1887 break;
1888 default:
1889 E1kLog(("%s %s Invalid Transmit Descriptor %s\n",
1890 pThis->szPrf, pszDir, pszDir));
1891 break;
1892 }
1893}
1894
1895/**
1896 * Raise an interrupt later.
1897 *
1898 * @param pThis The device state structure.
1899 */
1900inline void e1kPostponeInterrupt(PE1KSTATE pThis, uint64_t uNanoseconds)
1901{
1902 if (!TMTimerIsActive(pThis->CTX_SUFF(pIntTimer)))
1903 TMTimerSetNano(pThis->CTX_SUFF(pIntTimer), uNanoseconds);
1904}
1905
1906/**
1907 * Raise interrupt if not masked.
1908 *
1909 * @param pThis The device state structure.
1910 */
1911static int e1kRaiseInterrupt(PE1KSTATE pThis, int rcBusy, uint32_t u32IntCause = 0)
1912{
1913 int rc = e1kCsEnter(pThis, rcBusy);
1914 if (RT_UNLIKELY(rc != VINF_SUCCESS))
1915 return rc;
1916
1917 E1K_INC_ISTAT_CNT(pThis->uStatIntTry);
1918 ICR |= u32IntCause;
1919 if (ICR & IMS)
1920 {
1921 if (pThis->fIntRaised)
1922 {
1923 E1K_INC_ISTAT_CNT(pThis->uStatIntSkip);
1924 E1kLog2(("%s e1kRaiseInterrupt: Already raised, skipped. ICR&IMS=%08x\n",
1925 pThis->szPrf, ICR & IMS));
1926 }
1927 else
1928 {
1929 uint64_t tsNow = TMTimerGet(pThis->CTX_SUFF(pIntTimer));
1930 if (!!ITR && tsNow - pThis->u64AckedAt < ITR * 256
1931 && pThis->fItrEnabled && (pThis->fItrRxEnabled || !(ICR & ICR_RXT0)))
1932 {
1933 E1K_INC_ISTAT_CNT(pThis->uStatIntEarly);
1934 E1kLog2(("%s e1kRaiseInterrupt: Too early to raise again: %d ns < %d ns.\n",
1935 pThis->szPrf, (uint32_t)(tsNow - pThis->u64AckedAt), ITR * 256));
1936 e1kPostponeInterrupt(pThis, ITR * 256);
1937 }
1938 else
1939 {
1940
1941 /* Since we are delivering the interrupt now
1942 * there is no need to do it later -- stop the timer.
1943 */
1944 TMTimerStop(pThis->CTX_SUFF(pIntTimer));
1945 E1K_INC_ISTAT_CNT(pThis->uStatInt);
1946 STAM_COUNTER_INC(&pThis->StatIntsRaised);
1947 /* Got at least one unmasked interrupt cause */
1948 pThis->fIntRaised = true;
1949 /* Raise(1) INTA(0) */
1950 E1kLogRel(("E1000: irq RAISED icr&mask=0x%x, icr=0x%x\n", ICR & IMS, ICR));
1951 PDMDevHlpPCISetIrq(pThis->CTX_SUFF(pDevIns), 0, 1);
1952 E1kLog(("%s e1kRaiseInterrupt: Raised. ICR&IMS=%08x\n",
1953 pThis->szPrf, ICR & IMS));
1954 }
1955 }
1956 }
1957 else
1958 {
1959 E1K_INC_ISTAT_CNT(pThis->uStatIntMasked);
1960 E1kLog2(("%s e1kRaiseInterrupt: Not raising, ICR=%08x, IMS=%08x\n",
1961 pThis->szPrf, ICR, IMS));
1962 }
1963 e1kCsLeave(pThis);
1964 return VINF_SUCCESS;
1965}
1966
1967/**
1968 * Compute the physical address of the descriptor.
1969 *
1970 * @returns the physical address of the descriptor.
1971 *
1972 * @param baseHigh High-order 32 bits of descriptor table address.
1973 * @param baseLow Low-order 32 bits of descriptor table address.
1974 * @param idxDesc The descriptor index in the table.
1975 */
1976DECLINLINE(RTGCPHYS) e1kDescAddr(uint32_t baseHigh, uint32_t baseLow, uint32_t idxDesc)
1977{
1978 AssertCompile(sizeof(E1KRXDESC) == sizeof(E1KTXDESC));
1979 return ((uint64_t)baseHigh << 32) + baseLow + idxDesc * sizeof(E1KRXDESC);
1980}
1981
1982#ifdef IN_RING3 /* currently only used in ring-3 due to stack space requirements of the caller */
1983/**
1984 * Advance the head pointer of the receive descriptor queue.
1985 *
1986 * @remarks RDH always points to the next available RX descriptor.
1987 *
1988 * @param pThis The device state structure.
1989 */
1990DECLINLINE(void) e1kAdvanceRDH(PE1KSTATE pThis)
1991{
1992 Assert(e1kCsRxIsOwner(pThis));
1993 //e1kCsEnter(pThis, RT_SRC_POS);
1994 if (++RDH * sizeof(E1KRXDESC) >= RDLEN)
1995 RDH = 0;
1996 /*
1997 * Compute current receive queue length and fire RXDMT0 interrupt
1998 * if we are low on receive buffers
1999 */
2000 uint32_t uRQueueLen = RDH>RDT ? RDLEN/sizeof(E1KRXDESC)-RDH+RDT : RDT-RDH;
2001 /*
2002 * The minimum threshold is controlled by RDMTS bits of RCTL:
2003 * 00 = 1/2 of RDLEN
2004 * 01 = 1/4 of RDLEN
2005 * 10 = 1/8 of RDLEN
2006 * 11 = reserved
2007 */
2008 uint32_t uMinRQThreshold = RDLEN / sizeof(E1KRXDESC) / (2 << GET_BITS(RCTL, RDMTS));
2009 if (uRQueueLen <= uMinRQThreshold)
2010 {
2011 E1kLogRel(("E1000: low on RX descriptors, RDH=%x RDT=%x len=%x threshold=%x\n", RDH, RDT, uRQueueLen, uMinRQThreshold));
2012 E1kLog2(("%s Low on RX descriptors, RDH=%x RDT=%x len=%x threshold=%x, raise an interrupt\n",
2013 pThis->szPrf, RDH, RDT, uRQueueLen, uMinRQThreshold));
2014 E1K_INC_ISTAT_CNT(pThis->uStatIntRXDMT0);
2015 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_RXDMT0);
2016 }
2017 E1kLog2(("%s e1kAdvanceRDH: at exit RDH=%x RDT=%x len=%x\n",
2018 pThis->szPrf, RDH, RDT, uRQueueLen));
2019 //e1kCsLeave(pThis);
2020}
2021#endif /* IN_RING3 */
2022
2023#ifdef E1K_WITH_RXD_CACHE
2024
2025/**
2026 * Return the number of RX descriptor that belong to the hardware.
2027 *
2028 * @returns the number of available descriptors in RX ring.
2029 * @param pThis The device state structure.
2030 * @thread ???
2031 */
2032DECLINLINE(uint32_t) e1kGetRxLen(PE1KSTATE pThis)
2033{
2034 /**
2035 * Make sure RDT won't change during computation. EMT may modify RDT at
2036 * any moment.
2037 */
2038 uint32_t rdt = RDT;
2039 return (RDH > rdt ? RDLEN/sizeof(E1KRXDESC) : 0) + rdt - RDH;
2040}
2041
2042DECLINLINE(unsigned) e1kRxDInCache(PE1KSTATE pThis)
2043{
2044 return pThis->nRxDFetched > pThis->iRxDCurrent ?
2045 pThis->nRxDFetched - pThis->iRxDCurrent : 0;
2046}
2047
2048DECLINLINE(unsigned) e1kRxDIsCacheEmpty(PE1KSTATE pThis)
2049{
2050 return pThis->iRxDCurrent >= pThis->nRxDFetched;
2051}
2052
2053/**
2054 * Load receive descriptors from guest memory. The caller needs to be in Rx
2055 * critical section.
2056 *
2057 * We need two physical reads in case the tail wrapped around the end of RX
2058 * descriptor ring.
2059 *
2060 * @returns the actual number of descriptors fetched.
2061 * @param pThis The device state structure.
2062 * @param pDesc Pointer to descriptor union.
2063 * @param addr Physical address in guest context.
2064 * @thread EMT, RX
2065 */
2066DECLINLINE(unsigned) e1kRxDPrefetch(PE1KSTATE pThis)
2067{
2068 /* We've already loaded pThis->nRxDFetched descriptors past RDH. */
2069 unsigned nDescsAvailable = e1kGetRxLen(pThis) - e1kRxDInCache(pThis);
2070 unsigned nDescsToFetch = RT_MIN(nDescsAvailable, E1K_RXD_CACHE_SIZE - pThis->nRxDFetched);
2071 unsigned nDescsTotal = RDLEN / sizeof(E1KRXDESC);
2072 Assert(nDescsTotal != 0);
2073 if (nDescsTotal == 0)
2074 return 0;
2075 unsigned nFirstNotLoaded = (RDH + e1kRxDInCache(pThis)) % nDescsTotal;
2076 unsigned nDescsInSingleRead = RT_MIN(nDescsToFetch, nDescsTotal - nFirstNotLoaded);
2077 E1kLog3(("%s e1kRxDPrefetch: nDescsAvailable=%u nDescsToFetch=%u "
2078 "nDescsTotal=%u nFirstNotLoaded=0x%x nDescsInSingleRead=%u\n",
2079 pThis->szPrf, nDescsAvailable, nDescsToFetch, nDescsTotal,
2080 nFirstNotLoaded, nDescsInSingleRead));
2081 if (nDescsToFetch == 0)
2082 return 0;
2083 E1KRXDESC* pFirstEmptyDesc = &pThis->aRxDescriptors[pThis->nRxDFetched];
2084 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns),
2085 ((uint64_t)RDBAH << 32) + RDBAL + nFirstNotLoaded * sizeof(E1KRXDESC),
2086 pFirstEmptyDesc, nDescsInSingleRead * sizeof(E1KRXDESC));
2087 // uint64_t addrBase = ((uint64_t)RDBAH << 32) + RDBAL;
2088 // unsigned i, j;
2089 // for (i = pThis->nRxDFetched; i < pThis->nRxDFetched + nDescsInSingleRead; ++i)
2090 // {
2091 // pThis->aRxDescAddr[i] = addrBase + (nFirstNotLoaded + i - pThis->nRxDFetched) * sizeof(E1KRXDESC);
2092 // E1kLog3(("%s aRxDescAddr[%d] = %p\n", pThis->szPrf, i, pThis->aRxDescAddr[i]));
2093 // }
2094 E1kLog3(("%s Fetched %u RX descriptors at %08x%08x(0x%x), RDLEN=%08x, RDH=%08x, RDT=%08x\n",
2095 pThis->szPrf, nDescsInSingleRead,
2096 RDBAH, RDBAL + RDH * sizeof(E1KRXDESC),
2097 nFirstNotLoaded, RDLEN, RDH, RDT));
2098 if (nDescsToFetch > nDescsInSingleRead)
2099 {
2100 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns),
2101 ((uint64_t)RDBAH << 32) + RDBAL,
2102 pFirstEmptyDesc + nDescsInSingleRead,
2103 (nDescsToFetch - nDescsInSingleRead) * sizeof(E1KRXDESC));
2104 // Assert(i == pThis->nRxDFetched + nDescsInSingleRead);
2105 // for (j = 0; i < pThis->nRxDFetched + nDescsToFetch; ++i, ++j)
2106 // {
2107 // pThis->aRxDescAddr[i] = addrBase + j * sizeof(E1KRXDESC);
2108 // E1kLog3(("%s aRxDescAddr[%d] = %p\n", pThis->szPrf, i, pThis->aRxDescAddr[i]));
2109 // }
2110 E1kLog3(("%s Fetched %u RX descriptors at %08x%08x\n",
2111 pThis->szPrf, nDescsToFetch - nDescsInSingleRead,
2112 RDBAH, RDBAL));
2113 }
2114 pThis->nRxDFetched += nDescsToFetch;
2115 return nDescsToFetch;
2116}
2117
2118# ifdef IN_RING3 /* currently only used in ring-3 due to stack space requirements of the caller */
2119
2120/**
2121 * Obtain the next RX descriptor from RXD cache, fetching descriptors from the
2122 * RX ring if the cache is empty.
2123 *
2124 * Note that we cannot advance the cache pointer (iRxDCurrent) yet as it will
2125 * go out of sync with RDH which will cause trouble when EMT checks if the
2126 * cache is empty to do pre-fetch @bugref(6217).
2127 *
2128 * @param pThis The device state structure.
2129 * @thread RX
2130 */
2131DECLINLINE(E1KRXDESC*) e1kRxDGet(PE1KSTATE pThis)
2132{
2133 Assert(e1kCsRxIsOwner(pThis));
2134 /* Check the cache first. */
2135 if (pThis->iRxDCurrent < pThis->nRxDFetched)
2136 return &pThis->aRxDescriptors[pThis->iRxDCurrent];
2137 /* Cache is empty, reset it and check if we can fetch more. */
2138 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
2139 if (e1kRxDPrefetch(pThis))
2140 return &pThis->aRxDescriptors[pThis->iRxDCurrent];
2141 /* Out of Rx descriptors. */
2142 return NULL;
2143}
2144
2145
2146/**
2147 * Return the RX descriptor obtained with e1kRxDGet() and advance the cache
2148 * pointer. The descriptor gets written back to the RXD ring.
2149 *
2150 * @param pThis The device state structure.
2151 * @param pDesc The descriptor being "returned" to the RX ring.
2152 * @thread RX
2153 */
2154DECLINLINE(void) e1kRxDPut(PE1KSTATE pThis, E1KRXDESC* pDesc)
2155{
2156 Assert(e1kCsRxIsOwner(pThis));
2157 pThis->iRxDCurrent++;
2158 // Assert(pDesc >= pThis->aRxDescriptors);
2159 // Assert(pDesc < pThis->aRxDescriptors + E1K_RXD_CACHE_SIZE);
2160 // uint64_t addr = e1kDescAddr(RDBAH, RDBAL, RDH);
2161 // uint32_t rdh = RDH;
2162 // Assert(pThis->aRxDescAddr[pDesc - pThis->aRxDescriptors] == addr);
2163 PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns),
2164 e1kDescAddr(RDBAH, RDBAL, RDH),
2165 pDesc, sizeof(E1KRXDESC));
2166 e1kAdvanceRDH(pThis);
2167 e1kPrintRDesc(pThis, pDesc);
2168}
2169
2170/**
2171 * Store a fragment of received packet at the specifed address.
2172 *
2173 * @param pThis The device state structure.
2174 * @param pDesc The next available RX descriptor.
2175 * @param pvBuf The fragment.
2176 * @param cb The size of the fragment.
2177 */
2178static DECLCALLBACK(void) e1kStoreRxFragment(PE1KSTATE pThis, E1KRXDESC *pDesc, const void *pvBuf, size_t cb)
2179{
2180 STAM_PROFILE_ADV_START(&pThis->StatReceiveStore, a);
2181 E1kLog2(("%s e1kStoreRxFragment: store fragment of %04X at %016LX, EOP=%d\n",
2182 pThis->szPrf, cb, pDesc->u64BufAddr, pDesc->status.fEOP));
2183 PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns), pDesc->u64BufAddr, pvBuf, cb);
2184 pDesc->u16Length = (uint16_t)cb; Assert(pDesc->u16Length == cb);
2185 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveStore, a);
2186}
2187
2188# endif
2189
2190#else /* !E1K_WITH_RXD_CACHE */
2191
2192/**
2193 * Store a fragment of received packet that fits into the next available RX
2194 * buffer.
2195 *
2196 * @remarks Trigger the RXT0 interrupt if it is the last fragment of the packet.
2197 *
2198 * @param pThis The device state structure.
2199 * @param pDesc The next available RX descriptor.
2200 * @param pvBuf The fragment.
2201 * @param cb The size of the fragment.
2202 */
2203static DECLCALLBACK(void) e1kStoreRxFragment(PE1KSTATE pThis, E1KRXDESC *pDesc, const void *pvBuf, size_t cb)
2204{
2205 STAM_PROFILE_ADV_START(&pThis->StatReceiveStore, a);
2206 E1kLog2(("%s e1kStoreRxFragment: store fragment of %04X at %016LX, EOP=%d\n", pThis->szPrf, cb, pDesc->u64BufAddr, pDesc->status.fEOP));
2207 PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns), pDesc->u64BufAddr, pvBuf, cb);
2208 pDesc->u16Length = (uint16_t)cb; Assert(pDesc->u16Length == cb);
2209 /* Write back the descriptor */
2210 PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns), e1kDescAddr(RDBAH, RDBAL, RDH), pDesc, sizeof(E1KRXDESC));
2211 e1kPrintRDesc(pThis, pDesc);
2212 E1kLogRel(("E1000: Wrote back RX desc, RDH=%x\n", RDH));
2213 /* Advance head */
2214 e1kAdvanceRDH(pThis);
2215 //E1kLog2(("%s e1kStoreRxFragment: EOP=%d RDTR=%08X RADV=%08X\n", pThis->szPrf, pDesc->fEOP, RDTR, RADV));
2216 if (pDesc->status.fEOP)
2217 {
2218 /* Complete packet has been stored -- it is time to let the guest know. */
2219#ifdef E1K_USE_RX_TIMERS
2220 if (RDTR)
2221 {
2222 /* Arm the timer to fire in RDTR usec (discard .024) */
2223 e1kArmTimer(pThis, pThis->CTX_SUFF(pRIDTimer), RDTR);
2224 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
2225 if (RADV != 0 && !TMTimerIsActive(pThis->CTX_SUFF(pRADTimer)))
2226 e1kArmTimer(pThis, pThis->CTX_SUFF(pRADTimer), RADV);
2227 }
2228 else
2229 {
2230#endif
2231 /* 0 delay means immediate interrupt */
2232 E1K_INC_ISTAT_CNT(pThis->uStatIntRx);
2233 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_RXT0);
2234#ifdef E1K_USE_RX_TIMERS
2235 }
2236#endif
2237 }
2238 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveStore, a);
2239}
2240
2241#endif /* !E1K_WITH_RXD_CACHE */
2242
2243/**
2244 * Returns true if it is a broadcast packet.
2245 *
2246 * @returns true if destination address indicates broadcast.
2247 * @param pvBuf The ethernet packet.
2248 */
2249DECLINLINE(bool) e1kIsBroadcast(const void *pvBuf)
2250{
2251 static const uint8_t s_abBcastAddr[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
2252 return memcmp(pvBuf, s_abBcastAddr, sizeof(s_abBcastAddr)) == 0;
2253}
2254
2255/**
2256 * Returns true if it is a multicast packet.
2257 *
2258 * @remarks returns true for broadcast packets as well.
2259 * @returns true if destination address indicates multicast.
2260 * @param pvBuf The ethernet packet.
2261 */
2262DECLINLINE(bool) e1kIsMulticast(const void *pvBuf)
2263{
2264 return (*(char*)pvBuf) & 1;
2265}
2266
2267#ifdef IN_RING3 /* currently only used in ring-3 due to stack space requirements of the caller */
2268/**
2269 * Set IXSM, IPCS and TCPCS flags according to the packet type.
2270 *
2271 * @remarks We emulate checksum offloading for major packets types only.
2272 *
2273 * @returns VBox status code.
2274 * @param pThis The device state structure.
2275 * @param pFrame The available data.
2276 * @param cb Number of bytes available in the buffer.
2277 * @param status Bit fields containing status info.
2278 */
2279static int e1kRxChecksumOffload(PE1KSTATE pThis, const uint8_t *pFrame, size_t cb, E1KRXDST *pStatus)
2280{
2281 /** @todo
2282 * It is not safe to bypass checksum verification for packets coming
2283 * from real wire. We currently unable to tell where packets are
2284 * coming from so we tell the driver to ignore our checksum flags
2285 * and do verification in software.
2286 */
2287# if 0
2288 uint16_t uEtherType = ntohs(*(uint16_t*)(pFrame + 12));
2289
2290 E1kLog2(("%s e1kRxChecksumOffload: EtherType=%x\n", pThis->szPrf, uEtherType));
2291
2292 switch (uEtherType)
2293 {
2294 case 0x800: /* IPv4 */
2295 {
2296 pStatus->fIXSM = false;
2297 pStatus->fIPCS = true;
2298 PRTNETIPV4 pIpHdr4 = (PRTNETIPV4)(pFrame + 14);
2299 /* TCP/UDP checksum offloading works with TCP and UDP only */
2300 pStatus->fTCPCS = pIpHdr4->ip_p == 6 || pIpHdr4->ip_p == 17;
2301 break;
2302 }
2303 case 0x86DD: /* IPv6 */
2304 pStatus->fIXSM = false;
2305 pStatus->fIPCS = false;
2306 pStatus->fTCPCS = true;
2307 break;
2308 default: /* ARP, VLAN, etc. */
2309 pStatus->fIXSM = true;
2310 break;
2311 }
2312# else
2313 pStatus->fIXSM = true;
2314 RT_NOREF_PV(pThis); RT_NOREF_PV(pFrame); RT_NOREF_PV(cb);
2315# endif
2316 return VINF_SUCCESS;
2317}
2318#endif /* IN_RING3 */
2319
2320/**
2321 * Pad and store received packet.
2322 *
2323 * @remarks Make sure that the packet appears to upper layer as one coming
2324 * from real Ethernet: pad it and insert FCS.
2325 *
2326 * @returns VBox status code.
2327 * @param pThis The device state structure.
2328 * @param pvBuf The available data.
2329 * @param cb Number of bytes available in the buffer.
2330 * @param status Bit fields containing status info.
2331 */
2332static int e1kHandleRxPacket(PE1KSTATE pThis, const void *pvBuf, size_t cb, E1KRXDST status)
2333{
2334#if defined(IN_RING3) /** @todo Remove this extra copying, it's gonna make us run out of kernel / hypervisor stack! */
2335 uint8_t rxPacket[E1K_MAX_RX_PKT_SIZE];
2336 uint8_t *ptr = rxPacket;
2337
2338 int rc = e1kCsRxEnter(pThis, VERR_SEM_BUSY);
2339 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2340 return rc;
2341
2342 if (cb > 70) /* unqualified guess */
2343 pThis->led.Asserted.s.fReading = pThis->led.Actual.s.fReading = 1;
2344
2345 Assert(cb <= E1K_MAX_RX_PKT_SIZE);
2346 Assert(cb > 16);
2347 size_t cbMax = ((RCTL & RCTL_LPE) ? E1K_MAX_RX_PKT_SIZE - 4 : 1518) - (status.fVP ? 0 : 4);
2348 E1kLog3(("%s Max RX packet size is %u\n", pThis->szPrf, cbMax));
2349 if (status.fVP)
2350 {
2351 /* VLAN packet -- strip VLAN tag in VLAN mode */
2352 if ((CTRL & CTRL_VME) && cb > 16)
2353 {
2354 uint16_t *u16Ptr = (uint16_t*)pvBuf;
2355 memcpy(rxPacket, pvBuf, 12); /* Copy src and dst addresses */
2356 status.u16Special = RT_BE2H_U16(u16Ptr[7]); /* Extract VLAN tag */
2357 memcpy(rxPacket + 12, (uint8_t*)pvBuf + 16, cb - 16); /* Copy the rest of the packet */
2358 cb -= 4;
2359 E1kLog3(("%s Stripped tag for VLAN %u (cb=%u)\n",
2360 pThis->szPrf, status.u16Special, cb));
2361 }
2362 else
2363 status.fVP = false; /* Set VP only if we stripped the tag */
2364 }
2365 else
2366 memcpy(rxPacket, pvBuf, cb);
2367 /* Pad short packets */
2368 if (cb < 60)
2369 {
2370 memset(rxPacket + cb, 0, 60 - cb);
2371 cb = 60;
2372 }
2373 if (!(RCTL & RCTL_SECRC) && cb <= cbMax)
2374 {
2375 STAM_PROFILE_ADV_START(&pThis->StatReceiveCRC, a);
2376 /*
2377 * Add FCS if CRC stripping is not enabled. Since the value of CRC
2378 * is ignored by most of drivers we may as well save us the trouble
2379 * of calculating it (see EthernetCRC CFGM parameter).
2380 */
2381 if (pThis->fEthernetCRC)
2382 *(uint32_t*)(rxPacket + cb) = RTCrc32(rxPacket, cb);
2383 cb += sizeof(uint32_t);
2384 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveCRC, a);
2385 E1kLog3(("%s Added FCS (cb=%u)\n", pThis->szPrf, cb));
2386 }
2387 /* Compute checksum of complete packet */
2388 uint16_t checksum = e1kCSum16(rxPacket + GET_BITS(RXCSUM, PCSS), cb);
2389 e1kRxChecksumOffload(pThis, rxPacket, cb, &status);
2390
2391 /* Update stats */
2392 E1K_INC_CNT32(GPRC);
2393 if (e1kIsBroadcast(pvBuf))
2394 E1K_INC_CNT32(BPRC);
2395 else if (e1kIsMulticast(pvBuf))
2396 E1K_INC_CNT32(MPRC);
2397 /* Update octet receive counter */
2398 E1K_ADD_CNT64(GORCL, GORCH, cb);
2399 STAM_REL_COUNTER_ADD(&pThis->StatReceiveBytes, cb);
2400 if (cb == 64)
2401 E1K_INC_CNT32(PRC64);
2402 else if (cb < 128)
2403 E1K_INC_CNT32(PRC127);
2404 else if (cb < 256)
2405 E1K_INC_CNT32(PRC255);
2406 else if (cb < 512)
2407 E1K_INC_CNT32(PRC511);
2408 else if (cb < 1024)
2409 E1K_INC_CNT32(PRC1023);
2410 else
2411 E1K_INC_CNT32(PRC1522);
2412
2413 E1K_INC_ISTAT_CNT(pThis->uStatRxFrm);
2414
2415# ifdef E1K_WITH_RXD_CACHE
2416 while (cb > 0)
2417 {
2418 E1KRXDESC *pDesc = e1kRxDGet(pThis);
2419
2420 if (pDesc == NULL)
2421 {
2422 E1kLog(("%s Out of receive buffers, dropping the packet "
2423 "(cb=%u, in_cache=%u, RDH=%x RDT=%x)\n",
2424 pThis->szPrf, cb, e1kRxDInCache(pThis), RDH, RDT));
2425 break;
2426 }
2427# else /* !E1K_WITH_RXD_CACHE */
2428 if (RDH == RDT)
2429 {
2430 E1kLog(("%s Out of receive buffers, dropping the packet\n",
2431 pThis->szPrf));
2432 }
2433 /* Store the packet to receive buffers */
2434 while (RDH != RDT)
2435 {
2436 /* Load the descriptor pointed by head */
2437 E1KRXDESC desc, *pDesc = &desc;
2438 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), e1kDescAddr(RDBAH, RDBAL, RDH),
2439 &desc, sizeof(desc));
2440# endif /* !E1K_WITH_RXD_CACHE */
2441 if (pDesc->u64BufAddr)
2442 {
2443 /* Update descriptor */
2444 pDesc->status = status;
2445 pDesc->u16Checksum = checksum;
2446 pDesc->status.fDD = true;
2447
2448 /*
2449 * We need to leave Rx critical section here or we risk deadlocking
2450 * with EMT in e1kRegWriteRDT when the write is to an unallocated
2451 * page or has an access handler associated with it.
2452 * Note that it is safe to leave the critical section here since
2453 * e1kRegWriteRDT() never modifies RDH. It never touches already
2454 * fetched RxD cache entries either.
2455 */
2456 if (cb > pThis->u16RxBSize)
2457 {
2458 pDesc->status.fEOP = false;
2459 e1kCsRxLeave(pThis);
2460 e1kStoreRxFragment(pThis, pDesc, ptr, pThis->u16RxBSize);
2461 rc = e1kCsRxEnter(pThis, VERR_SEM_BUSY);
2462 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2463 return rc;
2464 ptr += pThis->u16RxBSize;
2465 cb -= pThis->u16RxBSize;
2466 }
2467 else
2468 {
2469 pDesc->status.fEOP = true;
2470 e1kCsRxLeave(pThis);
2471 e1kStoreRxFragment(pThis, pDesc, ptr, cb);
2472# ifdef E1K_WITH_RXD_CACHE
2473 rc = e1kCsRxEnter(pThis, VERR_SEM_BUSY);
2474 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2475 return rc;
2476 cb = 0;
2477# else /* !E1K_WITH_RXD_CACHE */
2478 pThis->led.Actual.s.fReading = 0;
2479 return VINF_SUCCESS;
2480# endif /* !E1K_WITH_RXD_CACHE */
2481 }
2482 /*
2483 * Note: RDH is advanced by e1kStoreRxFragment if E1K_WITH_RXD_CACHE
2484 * is not defined.
2485 */
2486 }
2487# ifdef E1K_WITH_RXD_CACHE
2488 /* Write back the descriptor. */
2489 pDesc->status.fDD = true;
2490 e1kRxDPut(pThis, pDesc);
2491# else /* !E1K_WITH_RXD_CACHE */
2492 else
2493 {
2494 /* Write back the descriptor. */
2495 pDesc->status.fDD = true;
2496 PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns),
2497 e1kDescAddr(RDBAH, RDBAL, RDH),
2498 pDesc, sizeof(E1KRXDESC));
2499 e1kAdvanceRDH(pThis);
2500 }
2501# endif /* !E1K_WITH_RXD_CACHE */
2502 }
2503
2504 if (cb > 0)
2505 E1kLog(("%s Out of receive buffers, dropping %u bytes", pThis->szPrf, cb));
2506
2507 pThis->led.Actual.s.fReading = 0;
2508
2509 e1kCsRxLeave(pThis);
2510# ifdef E1K_WITH_RXD_CACHE
2511 /* Complete packet has been stored -- it is time to let the guest know. */
2512# ifdef E1K_USE_RX_TIMERS
2513 if (RDTR)
2514 {
2515 /* Arm the timer to fire in RDTR usec (discard .024) */
2516 e1kArmTimer(pThis, pThis->CTX_SUFF(pRIDTimer), RDTR);
2517 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
2518 if (RADV != 0 && !TMTimerIsActive(pThis->CTX_SUFF(pRADTimer)))
2519 e1kArmTimer(pThis, pThis->CTX_SUFF(pRADTimer), RADV);
2520 }
2521 else
2522 {
2523# endif /* E1K_USE_RX_TIMERS */
2524 /* 0 delay means immediate interrupt */
2525 E1K_INC_ISTAT_CNT(pThis->uStatIntRx);
2526 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_RXT0);
2527# ifdef E1K_USE_RX_TIMERS
2528 }
2529# endif /* E1K_USE_RX_TIMERS */
2530# endif /* E1K_WITH_RXD_CACHE */
2531
2532 return VINF_SUCCESS;
2533#else /* !IN_RING3 */
2534 RT_NOREF_PV(pThis); RT_NOREF_PV(pvBuf); RT_NOREF_PV(cb); RT_NOREF_PV(status);
2535 return VERR_INTERNAL_ERROR_2;
2536#endif /* !IN_RING3 */
2537}
2538
2539
2540#ifdef IN_RING3
2541/**
2542 * Bring the link up after the configured delay, 5 seconds by default.
2543 *
2544 * @param pThis The device state structure.
2545 * @thread any
2546 */
2547DECLINLINE(void) e1kBringLinkUpDelayed(PE1KSTATE pThis)
2548{
2549 E1kLog(("%s Will bring up the link in %d seconds...\n",
2550 pThis->szPrf, pThis->cMsLinkUpDelay / 1000));
2551 e1kArmTimer(pThis, pThis->CTX_SUFF(pLUTimer), pThis->cMsLinkUpDelay * 1000);
2552}
2553
2554/**
2555 * Bring up the link immediately.
2556 *
2557 * @param pThis The device state structure.
2558 */
2559DECLINLINE(void) e1kR3LinkUp(PE1KSTATE pThis)
2560{
2561 E1kLog(("%s Link is up\n", pThis->szPrf));
2562 STATUS |= STATUS_LU;
2563 Phy::setLinkStatus(&pThis->phy, true);
2564 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_LSC);
2565 if (pThis->pDrvR3)
2566 pThis->pDrvR3->pfnNotifyLinkChanged(pThis->pDrvR3, PDMNETWORKLINKSTATE_UP);
2567}
2568
2569/**
2570 * Bring down the link immediately.
2571 *
2572 * @param pThis The device state structure.
2573 */
2574DECLINLINE(void) e1kR3LinkDown(PE1KSTATE pThis)
2575{
2576 E1kLog(("%s Link is down\n", pThis->szPrf));
2577 STATUS &= ~STATUS_LU;
2578 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_LSC);
2579 if (pThis->pDrvR3)
2580 pThis->pDrvR3->pfnNotifyLinkChanged(pThis->pDrvR3, PDMNETWORKLINKSTATE_DOWN);
2581}
2582
2583/**
2584 * Bring down the link temporarily.
2585 *
2586 * @param pThis The device state structure.
2587 */
2588DECLINLINE(void) e1kR3LinkDownTemp(PE1KSTATE pThis)
2589{
2590 E1kLog(("%s Link is down temporarily\n", pThis->szPrf));
2591 STATUS &= ~STATUS_LU;
2592 Phy::setLinkStatus(&pThis->phy, false);
2593 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_LSC);
2594 /*
2595 * Notifying the associated driver that the link went down (even temporarily)
2596 * seems to be the right thing, but it was not done before. This may cause
2597 * a regression if the driver does not expect the link to go down as a result
2598 * of sending PDMNETWORKLINKSTATE_DOWN_RESUME to this device. Earlier versions
2599 * of code notified the driver that the link was up! See @bugref{7057}.
2600 */
2601 if (pThis->pDrvR3)
2602 pThis->pDrvR3->pfnNotifyLinkChanged(pThis->pDrvR3, PDMNETWORKLINKSTATE_DOWN);
2603 e1kBringLinkUpDelayed(pThis);
2604}
2605#endif /* IN_RING3 */
2606
2607#if 0 /* unused */
2608/**
2609 * Read handler for Device Status register.
2610 *
2611 * Get the link status from PHY.
2612 *
2613 * @returns VBox status code.
2614 *
2615 * @param pThis The device state structure.
2616 * @param offset Register offset in memory-mapped frame.
2617 * @param index Register index in register array.
2618 * @param mask Used to implement partial reads (8 and 16-bit).
2619 */
2620static int e1kRegReadCTRL(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
2621{
2622 E1kLog(("%s e1kRegReadCTRL: mdio dir=%s mdc dir=%s mdc=%d\n",
2623 pThis->szPrf, (CTRL & CTRL_MDIO_DIR)?"OUT":"IN ",
2624 (CTRL & CTRL_MDC_DIR)?"OUT":"IN ", !!(CTRL & CTRL_MDC)));
2625 if ((CTRL & CTRL_MDIO_DIR) == 0 && (CTRL & CTRL_MDC))
2626 {
2627 /* MDC is high and MDIO pin is used for input, read MDIO pin from PHY */
2628 if (Phy::readMDIO(&pThis->phy))
2629 *pu32Value = CTRL | CTRL_MDIO;
2630 else
2631 *pu32Value = CTRL & ~CTRL_MDIO;
2632 E1kLog(("%s e1kRegReadCTRL: Phy::readMDIO(%d)\n",
2633 pThis->szPrf, !!(*pu32Value & CTRL_MDIO)));
2634 }
2635 else
2636 {
2637 /* MDIO pin is used for output, ignore it */
2638 *pu32Value = CTRL;
2639 }
2640 return VINF_SUCCESS;
2641}
2642#endif /* unused */
2643
2644/**
2645 * Write handler for Device Control register.
2646 *
2647 * Handles reset.
2648 *
2649 * @param pThis The device state structure.
2650 * @param offset Register offset in memory-mapped frame.
2651 * @param index Register index in register array.
2652 * @param value The value to store.
2653 * @param mask Used to implement partial writes (8 and 16-bit).
2654 * @thread EMT
2655 */
2656static int e1kRegWriteCTRL(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2657{
2658 int rc = VINF_SUCCESS;
2659
2660 if (value & CTRL_RESET)
2661 { /* RST */
2662#ifndef IN_RING3
2663 return VINF_IOM_R3_MMIO_WRITE;
2664#else
2665 e1kHardReset(pThis);
2666#endif
2667 }
2668 else
2669 {
2670 /*
2671 * When the guest changes 'Set Link Up' bit from 0 to 1 we check if
2672 * the link is down and the cable is connected, and if they are we
2673 * bring the link up, see @bugref{8624}.
2674 */
2675 if ( (value & CTRL_SLU)
2676 && !(CTRL & CTRL_SLU)
2677 && pThis->fCableConnected
2678 && !(STATUS & STATUS_LU))
2679 {
2680 /*
2681 * Arch Linux guests urge us to bring up the link immediately or risk
2682 * hitting Tx unit hang detection by the driver. The interrupt must
2683 * be delayed though, to avoid interrupt storms in Windows guests.
2684 * See @bugref{8624} for details. */
2685 STATUS |= STATUS_LU;
2686 Phy::setLinkStatus(&pThis->phy, true);
2687#ifdef E1K_INIT_LINKUP_DELAY
2688 /* We need to deliver a link-up interrupt for macOS guests. */
2689 e1kArmTimer(pThis, pThis->CTX_SUFF(pLUTimer), E1K_INIT_LINKUP_DELAY_US);
2690#else /* !E1K_INIT_LINKUP_DELAY */
2691 /*
2692 * Raising an interrupt immediately may cause an interrupt storm in
2693 * Windows guests when the guest's driver enables interrupts from
2694 * e1000 during init. Usually the driver will have interrupts
2695 * disabled when it sets CTRL_SLU, so we won't actually raise an
2696 * interrupt here, but we will do it as soon as the guest enables
2697 * interrupts via IMS. See @bugref{8624} for details.
2698 */
2699 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_LSC);
2700#endif /* !E1K_INIT_LINKUP_DELAY */
2701 }
2702 if (value & CTRL_VME)
2703 {
2704 E1kLog(("%s VLAN Mode Enabled\n", pThis->szPrf));
2705 }
2706 E1kLog(("%s e1kRegWriteCTRL: mdio dir=%s mdc dir=%s mdc=%s mdio=%d\n",
2707 pThis->szPrf, (value & CTRL_MDIO_DIR)?"OUT":"IN ",
2708 (value & CTRL_MDC_DIR)?"OUT":"IN ", (value & CTRL_MDC)?"HIGH":"LOW ", !!(value & CTRL_MDIO)));
2709 if (value & CTRL_MDC)
2710 {
2711 if (value & CTRL_MDIO_DIR)
2712 {
2713 E1kLog(("%s e1kRegWriteCTRL: Phy::writeMDIO(%d)\n", pThis->szPrf, !!(value & CTRL_MDIO)));
2714 /* MDIO direction pin is set to output and MDC is high, write MDIO pin value to PHY */
2715 Phy::writeMDIO(&pThis->phy, !!(value & CTRL_MDIO));
2716 }
2717 else
2718 {
2719 if (Phy::readMDIO(&pThis->phy))
2720 value |= CTRL_MDIO;
2721 else
2722 value &= ~CTRL_MDIO;
2723 E1kLog(("%s e1kRegWriteCTRL: Phy::readMDIO(%d)\n",
2724 pThis->szPrf, !!(value & CTRL_MDIO)));
2725 }
2726 }
2727 rc = e1kRegWriteDefault(pThis, offset, index, value);
2728 }
2729
2730 return rc;
2731}
2732
2733/**
2734 * Write handler for EEPROM/Flash Control/Data register.
2735 *
2736 * Handles EEPROM access requests; forwards writes to EEPROM device if access has been granted.
2737 *
2738 * @param pThis The device state structure.
2739 * @param offset Register offset in memory-mapped frame.
2740 * @param index Register index in register array.
2741 * @param value The value to store.
2742 * @param mask Used to implement partial writes (8 and 16-bit).
2743 * @thread EMT
2744 */
2745static int e1kRegWriteEECD(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2746{
2747 RT_NOREF(offset, index);
2748#ifdef IN_RING3
2749 /* So far we are concerned with lower byte only */
2750 if ((EECD & EECD_EE_GNT) || pThis->eChip == E1K_CHIP_82543GC)
2751 {
2752 /* Access to EEPROM granted -- forward 4-wire bits to EEPROM device */
2753 /* Note: 82543GC does not need to request EEPROM access */
2754 STAM_PROFILE_ADV_START(&pThis->StatEEPROMWrite, a);
2755 pThis->eeprom.write(value & EECD_EE_WIRES);
2756 STAM_PROFILE_ADV_STOP(&pThis->StatEEPROMWrite, a);
2757 }
2758 if (value & EECD_EE_REQ)
2759 EECD |= EECD_EE_REQ|EECD_EE_GNT;
2760 else
2761 EECD &= ~EECD_EE_GNT;
2762 //e1kRegWriteDefault(pThis, offset, index, value );
2763
2764 return VINF_SUCCESS;
2765#else /* !IN_RING3 */
2766 RT_NOREF(pThis, value);
2767 return VINF_IOM_R3_MMIO_WRITE;
2768#endif /* !IN_RING3 */
2769}
2770
2771/**
2772 * Read handler for EEPROM/Flash Control/Data register.
2773 *
2774 * Lower 4 bits come from EEPROM device if EEPROM access has been granted.
2775 *
2776 * @returns VBox status code.
2777 *
2778 * @param pThis The device state structure.
2779 * @param offset Register offset in memory-mapped frame.
2780 * @param index Register index in register array.
2781 * @param mask Used to implement partial reads (8 and 16-bit).
2782 * @thread EMT
2783 */
2784static int e1kRegReadEECD(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
2785{
2786#ifdef IN_RING3
2787 uint32_t value;
2788 int rc = e1kRegReadDefault(pThis, offset, index, &value);
2789 if (RT_SUCCESS(rc))
2790 {
2791 if ((value & EECD_EE_GNT) || pThis->eChip == E1K_CHIP_82543GC)
2792 {
2793 /* Note: 82543GC does not need to request EEPROM access */
2794 /* Access to EEPROM granted -- get 4-wire bits to EEPROM device */
2795 STAM_PROFILE_ADV_START(&pThis->StatEEPROMRead, a);
2796 value |= pThis->eeprom.read();
2797 STAM_PROFILE_ADV_STOP(&pThis->StatEEPROMRead, a);
2798 }
2799 *pu32Value = value;
2800 }
2801
2802 return rc;
2803#else /* !IN_RING3 */
2804 RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index); RT_NOREF_PV(pu32Value);
2805 return VINF_IOM_R3_MMIO_READ;
2806#endif /* !IN_RING3 */
2807}
2808
2809/**
2810 * Write handler for EEPROM Read register.
2811 *
2812 * Handles EEPROM word access requests, reads EEPROM and stores the result
2813 * into DATA field.
2814 *
2815 * @param pThis The device state structure.
2816 * @param offset Register offset in memory-mapped frame.
2817 * @param index Register index in register array.
2818 * @param value The value to store.
2819 * @param mask Used to implement partial writes (8 and 16-bit).
2820 * @thread EMT
2821 */
2822static int e1kRegWriteEERD(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2823{
2824#ifdef IN_RING3
2825 /* Make use of 'writable' and 'readable' masks. */
2826 e1kRegWriteDefault(pThis, offset, index, value);
2827 /* DONE and DATA are set only if read was triggered by START. */
2828 if (value & EERD_START)
2829 {
2830 uint16_t tmp;
2831 STAM_PROFILE_ADV_START(&pThis->StatEEPROMRead, a);
2832 if (pThis->eeprom.readWord(GET_BITS_V(value, EERD, ADDR), &tmp))
2833 SET_BITS(EERD, DATA, tmp);
2834 EERD |= EERD_DONE;
2835 STAM_PROFILE_ADV_STOP(&pThis->StatEEPROMRead, a);
2836 }
2837
2838 return VINF_SUCCESS;
2839#else /* !IN_RING3 */
2840 RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index); RT_NOREF_PV(value);
2841 return VINF_IOM_R3_MMIO_WRITE;
2842#endif /* !IN_RING3 */
2843}
2844
2845
2846/**
2847 * Write handler for MDI Control register.
2848 *
2849 * Handles PHY read/write requests; forwards requests to internal PHY device.
2850 *
2851 * @param pThis The device state structure.
2852 * @param offset Register offset in memory-mapped frame.
2853 * @param index Register index in register array.
2854 * @param value The value to store.
2855 * @param mask Used to implement partial writes (8 and 16-bit).
2856 * @thread EMT
2857 */
2858static int e1kRegWriteMDIC(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2859{
2860 if (value & MDIC_INT_EN)
2861 {
2862 E1kLog(("%s ERROR! Interrupt at the end of an MDI cycle is not supported yet.\n",
2863 pThis->szPrf));
2864 }
2865 else if (value & MDIC_READY)
2866 {
2867 E1kLog(("%s ERROR! Ready bit is not reset by software during write operation.\n",
2868 pThis->szPrf));
2869 }
2870 else if (GET_BITS_V(value, MDIC, PHY) != 1)
2871 {
2872 E1kLog(("%s WARNING! Access to invalid PHY detected, phy=%d.\n",
2873 pThis->szPrf, GET_BITS_V(value, MDIC, PHY)));
2874 /*
2875 * Some drivers scan the MDIO bus for a PHY. We can work with these
2876 * drivers if we set MDIC_READY and MDIC_ERROR when there isn't a PHY
2877 * at the requested address, see @bugref{7346}.
2878 */
2879 MDIC = MDIC_READY | MDIC_ERROR;
2880 }
2881 else
2882 {
2883 /* Store the value */
2884 e1kRegWriteDefault(pThis, offset, index, value);
2885 STAM_COUNTER_INC(&pThis->StatPHYAccesses);
2886 /* Forward op to PHY */
2887 if (value & MDIC_OP_READ)
2888 SET_BITS(MDIC, DATA, Phy::readRegister(&pThis->phy, GET_BITS_V(value, MDIC, REG)));
2889 else
2890 Phy::writeRegister(&pThis->phy, GET_BITS_V(value, MDIC, REG), value & MDIC_DATA_MASK);
2891 /* Let software know that we are done */
2892 MDIC |= MDIC_READY;
2893 }
2894
2895 return VINF_SUCCESS;
2896}
2897
2898/**
2899 * Write handler for Interrupt Cause Read register.
2900 *
2901 * Bits corresponding to 1s in 'value' will be cleared in ICR register.
2902 *
2903 * @param pThis The device state structure.
2904 * @param offset Register offset in memory-mapped frame.
2905 * @param index Register index in register array.
2906 * @param value The value to store.
2907 * @param mask Used to implement partial writes (8 and 16-bit).
2908 * @thread EMT
2909 */
2910static int e1kRegWriteICR(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2911{
2912 ICR &= ~value;
2913
2914 RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index);
2915 return VINF_SUCCESS;
2916}
2917
2918/**
2919 * Read handler for Interrupt Cause Read register.
2920 *
2921 * Reading this register acknowledges all interrupts.
2922 *
2923 * @returns VBox status code.
2924 *
2925 * @param pThis The device state structure.
2926 * @param offset Register offset in memory-mapped frame.
2927 * @param index Register index in register array.
2928 * @param mask Not used.
2929 * @thread EMT
2930 */
2931static int e1kRegReadICR(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
2932{
2933 int rc = e1kCsEnter(pThis, VINF_IOM_R3_MMIO_READ);
2934 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2935 return rc;
2936
2937 uint32_t value = 0;
2938 rc = e1kRegReadDefault(pThis, offset, index, &value);
2939 if (RT_SUCCESS(rc))
2940 {
2941 if (value)
2942 {
2943 if (!pThis->fIntRaised)
2944 E1K_INC_ISTAT_CNT(pThis->uStatNoIntICR);
2945 /*
2946 * Not clearing ICR causes QNX to hang as it reads ICR in a loop
2947 * with disabled interrupts.
2948 */
2949 //if (IMS)
2950 if (1)
2951 {
2952 /*
2953 * Interrupts were enabled -- we are supposedly at the very
2954 * beginning of interrupt handler
2955 */
2956 E1kLogRel(("E1000: irq lowered, icr=0x%x\n", ICR));
2957 E1kLog(("%s e1kRegReadICR: Lowered IRQ (%08x)\n", pThis->szPrf, ICR));
2958 /* Clear all pending interrupts */
2959 ICR = 0;
2960 pThis->fIntRaised = false;
2961 /* Lower(0) INTA(0) */
2962 PDMDevHlpPCISetIrq(pThis->CTX_SUFF(pDevIns), 0, 0);
2963
2964 pThis->u64AckedAt = TMTimerGet(pThis->CTX_SUFF(pIntTimer));
2965 if (pThis->fIntMaskUsed)
2966 pThis->fDelayInts = true;
2967 }
2968 else
2969 {
2970 /*
2971 * Interrupts are disabled -- in windows guests ICR read is done
2972 * just before re-enabling interrupts
2973 */
2974 E1kLog(("%s e1kRegReadICR: Suppressing auto-clear due to disabled interrupts (%08x)\n", pThis->szPrf, ICR));
2975 }
2976 }
2977 *pu32Value = value;
2978 }
2979 e1kCsLeave(pThis);
2980
2981 return rc;
2982}
2983
2984/**
2985 * Write handler for Interrupt Cause Set register.
2986 *
2987 * Bits corresponding to 1s in 'value' will be set in ICR register.
2988 *
2989 * @param pThis The device state structure.
2990 * @param offset Register offset in memory-mapped frame.
2991 * @param index Register index in register array.
2992 * @param value The value to store.
2993 * @param mask Used to implement partial writes (8 and 16-bit).
2994 * @thread EMT
2995 */
2996static int e1kRegWriteICS(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2997{
2998 RT_NOREF_PV(offset); RT_NOREF_PV(index);
2999 E1K_INC_ISTAT_CNT(pThis->uStatIntICS);
3000 return e1kRaiseInterrupt(pThis, VINF_IOM_R3_MMIO_WRITE, value & g_aE1kRegMap[ICS_IDX].writable);
3001}
3002
3003/**
3004 * Write handler for Interrupt Mask Set register.
3005 *
3006 * Will trigger pending interrupts.
3007 *
3008 * @param pThis The device state structure.
3009 * @param offset Register offset in memory-mapped frame.
3010 * @param index Register index in register array.
3011 * @param value The value to store.
3012 * @param mask Used to implement partial writes (8 and 16-bit).
3013 * @thread EMT
3014 */
3015static int e1kRegWriteIMS(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3016{
3017 RT_NOREF_PV(offset); RT_NOREF_PV(index);
3018
3019 IMS |= value;
3020 E1kLogRel(("E1000: irq enabled, RDH=%x RDT=%x TDH=%x TDT=%x\n", RDH, RDT, TDH, TDT));
3021 E1kLog(("%s e1kRegWriteIMS: IRQ enabled\n", pThis->szPrf));
3022 /*
3023 * We cannot raise an interrupt here as it will occasionally cause an interrupt storm
3024 * in Windows guests (see @bugref{8624}, @bugref{5023}).
3025 */
3026 if ((ICR & IMS) && !pThis->fLocked)
3027 {
3028 E1K_INC_ISTAT_CNT(pThis->uStatIntIMS);
3029 e1kPostponeInterrupt(pThis, E1K_IMS_INT_DELAY_NS);
3030 }
3031
3032 return VINF_SUCCESS;
3033}
3034
3035/**
3036 * Write handler for Interrupt Mask Clear register.
3037 *
3038 * Bits corresponding to 1s in 'value' will be cleared in IMS register.
3039 *
3040 * @param pThis The device state structure.
3041 * @param offset Register offset in memory-mapped frame.
3042 * @param index Register index in register array.
3043 * @param value The value to store.
3044 * @param mask Used to implement partial writes (8 and 16-bit).
3045 * @thread EMT
3046 */
3047static int e1kRegWriteIMC(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3048{
3049 RT_NOREF_PV(offset); RT_NOREF_PV(index);
3050
3051 int rc = e1kCsEnter(pThis, VINF_IOM_R3_MMIO_WRITE);
3052 if (RT_UNLIKELY(rc != VINF_SUCCESS))
3053 return rc;
3054 if (pThis->fIntRaised)
3055 {
3056 /*
3057 * Technically we should reset fIntRaised in ICR read handler, but it will cause
3058 * Windows to freeze since it may receive an interrupt while still in the very beginning
3059 * of interrupt handler.
3060 */
3061 E1K_INC_ISTAT_CNT(pThis->uStatIntLower);
3062 STAM_COUNTER_INC(&pThis->StatIntsPrevented);
3063 E1kLogRel(("E1000: irq lowered (IMC), icr=0x%x\n", ICR));
3064 /* Lower(0) INTA(0) */
3065 PDMDevHlpPCISetIrq(pThis->CTX_SUFF(pDevIns), 0, 0);
3066 pThis->fIntRaised = false;
3067 E1kLog(("%s e1kRegWriteIMC: Lowered IRQ: ICR=%08x\n", pThis->szPrf, ICR));
3068 }
3069 IMS &= ~value;
3070 E1kLog(("%s e1kRegWriteIMC: IRQ disabled\n", pThis->szPrf));
3071 e1kCsLeave(pThis);
3072
3073 return VINF_SUCCESS;
3074}
3075
3076/**
3077 * Write handler for Receive Control register.
3078 *
3079 * @param pThis The device state structure.
3080 * @param offset Register offset in memory-mapped frame.
3081 * @param index Register index in register array.
3082 * @param value The value to store.
3083 * @param mask Used to implement partial writes (8 and 16-bit).
3084 * @thread EMT
3085 */
3086static int e1kRegWriteRCTL(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3087{
3088 /* Update promiscuous mode */
3089 bool fBecomePromiscous = !!(value & (RCTL_UPE | RCTL_MPE));
3090 if (fBecomePromiscous != !!( RCTL & (RCTL_UPE | RCTL_MPE)))
3091 {
3092 /* Promiscuity has changed, pass the knowledge on. */
3093#ifndef IN_RING3
3094 return VINF_IOM_R3_MMIO_WRITE;
3095#else
3096 if (pThis->pDrvR3)
3097 pThis->pDrvR3->pfnSetPromiscuousMode(pThis->pDrvR3, fBecomePromiscous);
3098#endif
3099 }
3100
3101 /* Adjust receive buffer size */
3102 unsigned cbRxBuf = 2048 >> GET_BITS_V(value, RCTL, BSIZE);
3103 if (value & RCTL_BSEX)
3104 cbRxBuf *= 16;
3105 if (cbRxBuf != pThis->u16RxBSize)
3106 E1kLog2(("%s e1kRegWriteRCTL: Setting receive buffer size to %d (old %d)\n",
3107 pThis->szPrf, cbRxBuf, pThis->u16RxBSize));
3108 pThis->u16RxBSize = cbRxBuf;
3109
3110 /* Update the register */
3111 e1kRegWriteDefault(pThis, offset, index, value);
3112
3113 return VINF_SUCCESS;
3114}
3115
3116/**
3117 * Write handler for Packet Buffer Allocation register.
3118 *
3119 * TXA = 64 - RXA.
3120 *
3121 * @param pThis The device state structure.
3122 * @param offset Register offset in memory-mapped frame.
3123 * @param index Register index in register array.
3124 * @param value The value to store.
3125 * @param mask Used to implement partial writes (8 and 16-bit).
3126 * @thread EMT
3127 */
3128static int e1kRegWritePBA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3129{
3130 e1kRegWriteDefault(pThis, offset, index, value);
3131 PBA_st->txa = 64 - PBA_st->rxa;
3132
3133 return VINF_SUCCESS;
3134}
3135
3136/**
3137 * Write handler for Receive Descriptor Tail register.
3138 *
3139 * @remarks Write into RDT forces switch to HC and signal to
3140 * e1kR3NetworkDown_WaitReceiveAvail().
3141 *
3142 * @returns VBox status code.
3143 *
3144 * @param pThis The device state structure.
3145 * @param offset Register offset in memory-mapped frame.
3146 * @param index Register index in register array.
3147 * @param value The value to store.
3148 * @param mask Used to implement partial writes (8 and 16-bit).
3149 * @thread EMT
3150 */
3151static int e1kRegWriteRDT(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3152{
3153#ifndef IN_RING3
3154 /* XXX */
3155// return VINF_IOM_R3_MMIO_WRITE;
3156#endif
3157 int rc = e1kCsRxEnter(pThis, VINF_IOM_R3_MMIO_WRITE);
3158 if (RT_LIKELY(rc == VINF_SUCCESS))
3159 {
3160 E1kLog(("%s e1kRegWriteRDT\n", pThis->szPrf));
3161 /*
3162 * Some drivers advance RDT too far, so that it equals RDH. This
3163 * somehow manages to work with real hardware but not with this
3164 * emulated device. We can work with these drivers if we just
3165 * write 1 less when we see a driver writing RDT equal to RDH,
3166 * see @bugref{7346}.
3167 */
3168 if (value == RDH)
3169 {
3170 if (RDH == 0)
3171 value = (RDLEN / sizeof(E1KRXDESC)) - 1;
3172 else
3173 value = RDH - 1;
3174 }
3175 rc = e1kRegWriteDefault(pThis, offset, index, value);
3176#ifdef E1K_WITH_RXD_CACHE
3177 /*
3178 * We need to fetch descriptors now as RDT may go whole circle
3179 * before we attempt to store a received packet. For example,
3180 * Intel's DOS drivers use 2 (!) RX descriptors with the total ring
3181 * size being only 8 descriptors! Note that we fetch descriptors
3182 * only when the cache is empty to reduce the number of memory reads
3183 * in case of frequent RDT writes. Don't fetch anything when the
3184 * receiver is disabled either as RDH, RDT, RDLEN can be in some
3185 * messed up state.
3186 * Note that despite the cache may seem empty, meaning that there are
3187 * no more available descriptors in it, it may still be used by RX
3188 * thread which has not yet written the last descriptor back but has
3189 * temporarily released the RX lock in order to write the packet body
3190 * to descriptor's buffer. At this point we still going to do prefetch
3191 * but it won't actually fetch anything if there are no unused slots in
3192 * our "empty" cache (nRxDFetched==E1K_RXD_CACHE_SIZE). We must not
3193 * reset the cache here even if it appears empty. It will be reset at
3194 * a later point in e1kRxDGet().
3195 */
3196 if (e1kRxDIsCacheEmpty(pThis) && (RCTL & RCTL_EN))
3197 e1kRxDPrefetch(pThis);
3198#endif /* E1K_WITH_RXD_CACHE */
3199 e1kCsRxLeave(pThis);
3200 if (RT_SUCCESS(rc))
3201 {
3202/** @todo bird: Use SUPSem* for this so we can signal it in ring-0 as well
3203 * without requiring any context switches. We should also check the
3204 * wait condition before bothering to queue the item as we're currently
3205 * queuing thousands of items per second here in a normal transmit
3206 * scenario. Expect performance changes when fixing this! */
3207#ifdef IN_RING3
3208 /* Signal that we have more receive descriptors available. */
3209 e1kWakeupReceive(pThis->CTX_SUFF(pDevIns));
3210#else
3211 PPDMQUEUEITEMCORE pItem = PDMQueueAlloc(pThis->CTX_SUFF(pCanRxQueue));
3212 if (pItem)
3213 PDMQueueInsert(pThis->CTX_SUFF(pCanRxQueue), pItem);
3214#endif
3215 }
3216 }
3217 return rc;
3218}
3219
3220/**
3221 * Write handler for Receive Delay Timer register.
3222 *
3223 * @param pThis The device state structure.
3224 * @param offset Register offset in memory-mapped frame.
3225 * @param index Register index in register array.
3226 * @param value The value to store.
3227 * @param mask Used to implement partial writes (8 and 16-bit).
3228 * @thread EMT
3229 */
3230static int e1kRegWriteRDTR(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3231{
3232 e1kRegWriteDefault(pThis, offset, index, value);
3233 if (value & RDTR_FPD)
3234 {
3235 /* Flush requested, cancel both timers and raise interrupt */
3236#ifdef E1K_USE_RX_TIMERS
3237 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRIDTimer));
3238 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRADTimer));
3239#endif
3240 E1K_INC_ISTAT_CNT(pThis->uStatIntRDTR);
3241 return e1kRaiseInterrupt(pThis, VINF_IOM_R3_MMIO_WRITE, ICR_RXT0);
3242 }
3243
3244 return VINF_SUCCESS;
3245}
3246
3247DECLINLINE(uint32_t) e1kGetTxLen(PE1KSTATE pThis)
3248{
3249 /**
3250 * Make sure TDT won't change during computation. EMT may modify TDT at
3251 * any moment.
3252 */
3253 uint32_t tdt = TDT;
3254 return (TDH>tdt ? TDLEN/sizeof(E1KTXDESC) : 0) + tdt - TDH;
3255}
3256
3257#ifdef IN_RING3
3258
3259# ifdef E1K_TX_DELAY
3260/**
3261 * Transmit Delay Timer handler.
3262 *
3263 * @remarks We only get here when the timer expires.
3264 *
3265 * @param pDevIns Pointer to device instance structure.
3266 * @param pTimer Pointer to the timer.
3267 * @param pvUser NULL.
3268 * @thread EMT
3269 */
3270static DECLCALLBACK(void) e1kTxDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3271{
3272 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3273 Assert(PDMCritSectIsOwner(&pThis->csTx));
3274
3275 E1K_INC_ISTAT_CNT(pThis->uStatTxDelayExp);
3276# ifdef E1K_INT_STATS
3277 uint64_t u64Elapsed = RTTimeNanoTS() - pThis->u64ArmedAt;
3278 if (u64Elapsed > pThis->uStatMaxTxDelay)
3279 pThis->uStatMaxTxDelay = u64Elapsed;
3280# endif
3281 int rc = e1kXmitPending(pThis, false /*fOnWorkerThread*/);
3282 AssertMsg(RT_SUCCESS(rc) || rc == VERR_TRY_AGAIN, ("%Rrc\n", rc));
3283}
3284# endif /* E1K_TX_DELAY */
3285
3286//# ifdef E1K_USE_TX_TIMERS
3287
3288/**
3289 * Transmit Interrupt Delay Timer handler.
3290 *
3291 * @remarks We only get here when the timer expires.
3292 *
3293 * @param pDevIns Pointer to device instance structure.
3294 * @param pTimer Pointer to the timer.
3295 * @param pvUser NULL.
3296 * @thread EMT
3297 */
3298static DECLCALLBACK(void) e1kTxIntDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3299{
3300 RT_NOREF(pDevIns);
3301 RT_NOREF(pTimer);
3302 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3303
3304 E1K_INC_ISTAT_CNT(pThis->uStatTID);
3305 /* Cancel absolute delay timer as we have already got attention */
3306# ifndef E1K_NO_TAD
3307 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTADTimer));
3308# endif
3309 e1kRaiseInterrupt(pThis, ICR_TXDW);
3310}
3311
3312/**
3313 * Transmit Absolute Delay Timer handler.
3314 *
3315 * @remarks We only get here when the timer expires.
3316 *
3317 * @param pDevIns Pointer to device instance structure.
3318 * @param pTimer Pointer to the timer.
3319 * @param pvUser NULL.
3320 * @thread EMT
3321 */
3322static DECLCALLBACK(void) e1kTxAbsDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3323{
3324 RT_NOREF(pDevIns);
3325 RT_NOREF(pTimer);
3326 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3327
3328 E1K_INC_ISTAT_CNT(pThis->uStatTAD);
3329 /* Cancel interrupt delay timer as we have already got attention */
3330 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTIDTimer));
3331 e1kRaiseInterrupt(pThis, ICR_TXDW);
3332}
3333
3334//# endif /* E1K_USE_TX_TIMERS */
3335# ifdef E1K_USE_RX_TIMERS
3336
3337/**
3338 * Receive Interrupt Delay Timer handler.
3339 *
3340 * @remarks We only get here when the timer expires.
3341 *
3342 * @param pDevIns Pointer to device instance structure.
3343 * @param pTimer Pointer to the timer.
3344 * @param pvUser NULL.
3345 * @thread EMT
3346 */
3347static DECLCALLBACK(void) e1kRxIntDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3348{
3349 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3350
3351 E1K_INC_ISTAT_CNT(pThis->uStatRID);
3352 /* Cancel absolute delay timer as we have already got attention */
3353 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRADTimer));
3354 e1kRaiseInterrupt(pThis, ICR_RXT0);
3355}
3356
3357/**
3358 * Receive Absolute Delay Timer handler.
3359 *
3360 * @remarks We only get here when the timer expires.
3361 *
3362 * @param pDevIns Pointer to device instance structure.
3363 * @param pTimer Pointer to the timer.
3364 * @param pvUser NULL.
3365 * @thread EMT
3366 */
3367static DECLCALLBACK(void) e1kRxAbsDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3368{
3369 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3370
3371 E1K_INC_ISTAT_CNT(pThis->uStatRAD);
3372 /* Cancel interrupt delay timer as we have already got attention */
3373 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRIDTimer));
3374 e1kRaiseInterrupt(pThis, ICR_RXT0);
3375}
3376
3377# endif /* E1K_USE_RX_TIMERS */
3378
3379/**
3380 * Late Interrupt Timer handler.
3381 *
3382 * @param pDevIns Pointer to device instance structure.
3383 * @param pTimer Pointer to the timer.
3384 * @param pvUser NULL.
3385 * @thread EMT
3386 */
3387static DECLCALLBACK(void) e1kLateIntTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3388{
3389 RT_NOREF(pDevIns, pTimer);
3390 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3391
3392 STAM_PROFILE_ADV_START(&pThis->StatLateIntTimer, a);
3393 STAM_COUNTER_INC(&pThis->StatLateInts);
3394 E1K_INC_ISTAT_CNT(pThis->uStatIntLate);
3395# if 0
3396 if (pThis->iStatIntLost > -100)
3397 pThis->iStatIntLost--;
3398# endif
3399 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, 0);
3400 STAM_PROFILE_ADV_STOP(&pThis->StatLateIntTimer, a);
3401}
3402
3403/**
3404 * Link Up Timer handler.
3405 *
3406 * @param pDevIns Pointer to device instance structure.
3407 * @param pTimer Pointer to the timer.
3408 * @param pvUser NULL.
3409 * @thread EMT
3410 */
3411static DECLCALLBACK(void) e1kLinkUpTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3412{
3413 RT_NOREF(pDevIns, pTimer);
3414 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3415
3416 /*
3417 * This can happen if we set the link status to down when the Link up timer was
3418 * already armed (shortly after e1kLoadDone() or when the cable was disconnected
3419 * and connect+disconnect the cable very quick.
3420 */
3421 if (!pThis->fCableConnected)
3422 return;
3423
3424 e1kR3LinkUp(pThis);
3425}
3426
3427#endif /* IN_RING3 */
3428
3429/**
3430 * Sets up the GSO context according to the TSE new context descriptor.
3431 *
3432 * @param pGso The GSO context to setup.
3433 * @param pCtx The context descriptor.
3434 */
3435DECLINLINE(void) e1kSetupGsoCtx(PPDMNETWORKGSO pGso, E1KTXCTX const *pCtx)
3436{
3437 pGso->u8Type = PDMNETWORKGSOTYPE_INVALID;
3438
3439 /*
3440 * See if the context descriptor describes something that could be TCP or
3441 * UDP over IPv[46].
3442 */
3443 /* Check the header ordering and spacing: 1. Ethernet, 2. IP, 3. TCP/UDP. */
3444 if (RT_UNLIKELY( pCtx->ip.u8CSS < sizeof(RTNETETHERHDR) ))
3445 {
3446 E1kLog(("e1kSetupGsoCtx: IPCSS=%#x\n", pCtx->ip.u8CSS));
3447 return;
3448 }
3449 if (RT_UNLIKELY( pCtx->tu.u8CSS < (size_t)pCtx->ip.u8CSS + (pCtx->dw2.fIP ? RTNETIPV4_MIN_LEN : RTNETIPV6_MIN_LEN) ))
3450 {
3451 E1kLog(("e1kSetupGsoCtx: TUCSS=%#x\n", pCtx->tu.u8CSS));
3452 return;
3453 }
3454 if (RT_UNLIKELY( pCtx->dw2.fTCP
3455 ? pCtx->dw3.u8HDRLEN < (size_t)pCtx->tu.u8CSS + RTNETTCP_MIN_LEN
3456 : pCtx->dw3.u8HDRLEN != (size_t)pCtx->tu.u8CSS + RTNETUDP_MIN_LEN ))
3457 {
3458 E1kLog(("e1kSetupGsoCtx: HDRLEN=%#x TCP=%d\n", pCtx->dw3.u8HDRLEN, pCtx->dw2.fTCP));
3459 return;
3460 }
3461
3462 /* The end of the TCP/UDP checksum should stop at the end of the packet or at least after the headers. */
3463 if (RT_UNLIKELY( pCtx->tu.u16CSE > 0 && pCtx->tu.u16CSE <= pCtx->dw3.u8HDRLEN ))
3464 {
3465 E1kLog(("e1kSetupGsoCtx: TUCSE=%#x HDRLEN=%#x\n", pCtx->tu.u16CSE, pCtx->dw3.u8HDRLEN));
3466 return;
3467 }
3468
3469 /* IPv4 checksum offset. */
3470 if (RT_UNLIKELY( pCtx->dw2.fIP && (size_t)pCtx->ip.u8CSO - pCtx->ip.u8CSS != RT_UOFFSETOF(RTNETIPV4, ip_sum) ))
3471 {
3472 E1kLog(("e1kSetupGsoCtx: IPCSO=%#x IPCSS=%#x\n", pCtx->ip.u8CSO, pCtx->ip.u8CSS));
3473 return;
3474 }
3475
3476 /* TCP/UDP checksum offsets. */
3477 if (RT_UNLIKELY( (size_t)pCtx->tu.u8CSO - pCtx->tu.u8CSS
3478 != ( pCtx->dw2.fTCP
3479 ? RT_UOFFSETOF(RTNETTCP, th_sum)
3480 : RT_UOFFSETOF(RTNETUDP, uh_sum) ) ))
3481 {
3482 E1kLog(("e1kSetupGsoCtx: TUCSO=%#x TUCSS=%#x TCP=%d\n", pCtx->ip.u8CSO, pCtx->ip.u8CSS, pCtx->dw2.fTCP));
3483 return;
3484 }
3485
3486 /*
3487 * Because of internal networking using a 16-bit size field for GSO context
3488 * plus frame, we have to make sure we don't exceed this.
3489 */
3490 if (RT_UNLIKELY( pCtx->dw3.u8HDRLEN + pCtx->dw2.u20PAYLEN > VBOX_MAX_GSO_SIZE ))
3491 {
3492 E1kLog(("e1kSetupGsoCtx: HDRLEN(=%#x) + PAYLEN(=%#x) = %#x, max is %#x\n",
3493 pCtx->dw3.u8HDRLEN, pCtx->dw2.u20PAYLEN, pCtx->dw3.u8HDRLEN + pCtx->dw2.u20PAYLEN, VBOX_MAX_GSO_SIZE));
3494 return;
3495 }
3496
3497 /*
3498 * We're good for now - we'll do more checks when seeing the data.
3499 * So, figure the type of offloading and setup the context.
3500 */
3501 if (pCtx->dw2.fIP)
3502 {
3503 if (pCtx->dw2.fTCP)
3504 {
3505 pGso->u8Type = PDMNETWORKGSOTYPE_IPV4_TCP;
3506 pGso->cbHdrsSeg = pCtx->dw3.u8HDRLEN;
3507 }
3508 else
3509 {
3510 pGso->u8Type = PDMNETWORKGSOTYPE_IPV4_UDP;
3511 pGso->cbHdrsSeg = pCtx->tu.u8CSS; /* IP header only */
3512 }
3513 /** @todo Detect IPv4-IPv6 tunneling (need test setup since linux doesn't do
3514 * this yet it seems)... */
3515 }
3516 else
3517 {
3518 pGso->cbHdrsSeg = pCtx->dw3.u8HDRLEN; /** @todo IPv6 UFO */
3519 if (pCtx->dw2.fTCP)
3520 pGso->u8Type = PDMNETWORKGSOTYPE_IPV6_TCP;
3521 else
3522 pGso->u8Type = PDMNETWORKGSOTYPE_IPV6_UDP;
3523 }
3524 pGso->offHdr1 = pCtx->ip.u8CSS;
3525 pGso->offHdr2 = pCtx->tu.u8CSS;
3526 pGso->cbHdrsTotal = pCtx->dw3.u8HDRLEN;
3527 pGso->cbMaxSeg = pCtx->dw3.u16MSS;
3528 Assert(PDMNetGsoIsValid(pGso, sizeof(*pGso), pGso->cbMaxSeg * 5));
3529 E1kLog2(("e1kSetupGsoCtx: mss=%#x hdr=%#x hdrseg=%#x hdr1=%#x hdr2=%#x %s\n",
3530 pGso->cbMaxSeg, pGso->cbHdrsTotal, pGso->cbHdrsSeg, pGso->offHdr1, pGso->offHdr2, PDMNetGsoTypeName((PDMNETWORKGSOTYPE)pGso->u8Type) ));
3531}
3532
3533/**
3534 * Checks if we can use GSO processing for the current TSE frame.
3535 *
3536 * @param pThis The device state structure.
3537 * @param pGso The GSO context.
3538 * @param pData The first data descriptor of the frame.
3539 * @param pCtx The TSO context descriptor.
3540 */
3541DECLINLINE(bool) e1kCanDoGso(PE1KSTATE pThis, PCPDMNETWORKGSO pGso, E1KTXDAT const *pData, E1KTXCTX const *pCtx)
3542{
3543 if (!pData->cmd.fTSE)
3544 {
3545 E1kLog2(("e1kCanDoGso: !TSE\n"));
3546 return false;
3547 }
3548 if (pData->cmd.fVLE) /** @todo VLAN tagging. */
3549 {
3550 E1kLog(("e1kCanDoGso: VLE\n"));
3551 return false;
3552 }
3553 if (RT_UNLIKELY(!pThis->fGSOEnabled))
3554 {
3555 E1kLog3(("e1kCanDoGso: GSO disabled via CFGM\n"));
3556 return false;
3557 }
3558
3559 switch ((PDMNETWORKGSOTYPE)pGso->u8Type)
3560 {
3561 case PDMNETWORKGSOTYPE_IPV4_TCP:
3562 case PDMNETWORKGSOTYPE_IPV4_UDP:
3563 if (!pData->dw3.fIXSM)
3564 {
3565 E1kLog(("e1kCanDoGso: !IXSM (IPv4)\n"));
3566 return false;
3567 }
3568 if (!pData->dw3.fTXSM)
3569 {
3570 E1kLog(("e1kCanDoGso: !TXSM (IPv4)\n"));
3571 return false;
3572 }
3573 /** @todo what more check should we perform here? Ethernet frame type? */
3574 E1kLog2(("e1kCanDoGso: OK, IPv4\n"));
3575 return true;
3576
3577 case PDMNETWORKGSOTYPE_IPV6_TCP:
3578 case PDMNETWORKGSOTYPE_IPV6_UDP:
3579 if (pData->dw3.fIXSM && pCtx->ip.u8CSO)
3580 {
3581 E1kLog(("e1kCanDoGso: IXSM (IPv6)\n"));
3582 return false;
3583 }
3584 if (!pData->dw3.fTXSM)
3585 {
3586 E1kLog(("e1kCanDoGso: TXSM (IPv6)\n"));
3587 return false;
3588 }
3589 /** @todo what more check should we perform here? Ethernet frame type? */
3590 E1kLog2(("e1kCanDoGso: OK, IPv4\n"));
3591 return true;
3592
3593 default:
3594 Assert(pGso->u8Type == PDMNETWORKGSOTYPE_INVALID);
3595 E1kLog2(("e1kCanDoGso: e1kSetupGsoCtx failed\n"));
3596 return false;
3597 }
3598}
3599
3600/**
3601 * Frees the current xmit buffer.
3602 *
3603 * @param pThis The device state structure.
3604 */
3605static void e1kXmitFreeBuf(PE1KSTATE pThis)
3606{
3607 PPDMSCATTERGATHER pSg = pThis->CTX_SUFF(pTxSg);
3608 if (pSg)
3609 {
3610 pThis->CTX_SUFF(pTxSg) = NULL;
3611
3612 if (pSg->pvAllocator != pThis)
3613 {
3614 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
3615 if (pDrv)
3616 pDrv->pfnFreeBuf(pDrv, pSg);
3617 }
3618 else
3619 {
3620 /* loopback */
3621 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3622 Assert(pSg->fFlags == (PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3));
3623 pSg->fFlags = 0;
3624 pSg->pvAllocator = NULL;
3625 }
3626 }
3627}
3628
3629#ifndef E1K_WITH_TXD_CACHE
3630/**
3631 * Allocates an xmit buffer.
3632 *
3633 * @returns See PDMINETWORKUP::pfnAllocBuf.
3634 * @param pThis The device state structure.
3635 * @param cbMin The minimum frame size.
3636 * @param fExactSize Whether cbMin is exact or if we have to max it
3637 * out to the max MTU size.
3638 * @param fGso Whether this is a GSO frame or not.
3639 */
3640DECLINLINE(int) e1kXmitAllocBuf(PE1KSTATE pThis, size_t cbMin, bool fExactSize, bool fGso)
3641{
3642 /* Adjust cbMin if necessary. */
3643 if (!fExactSize)
3644 cbMin = RT_MAX(cbMin, E1K_MAX_TX_PKT_SIZE);
3645
3646 /* Deal with existing buffer (descriptor screw up, reset, etc). */
3647 if (RT_UNLIKELY(pThis->CTX_SUFF(pTxSg)))
3648 e1kXmitFreeBuf(pThis);
3649 Assert(pThis->CTX_SUFF(pTxSg) == NULL);
3650
3651 /*
3652 * Allocate the buffer.
3653 */
3654 PPDMSCATTERGATHER pSg;
3655 if (RT_LIKELY(GET_BITS(RCTL, LBM) != RCTL_LBM_TCVR))
3656 {
3657 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
3658 if (RT_UNLIKELY(!pDrv))
3659 return VERR_NET_DOWN;
3660 int rc = pDrv->pfnAllocBuf(pDrv, cbMin, fGso ? &pThis->GsoCtx : NULL, &pSg);
3661 if (RT_FAILURE(rc))
3662 {
3663 /* Suspend TX as we are out of buffers atm */
3664 STATUS |= STATUS_TXOFF;
3665 return rc;
3666 }
3667 }
3668 else
3669 {
3670 /* Create a loopback using the fallback buffer and preallocated SG. */
3671 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3672 pSg = &pThis->uTxFallback.Sg;
3673 pSg->fFlags = PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3;
3674 pSg->cbUsed = 0;
3675 pSg->cbAvailable = 0;
3676 pSg->pvAllocator = pThis;
3677 pSg->pvUser = NULL; /* No GSO here. */
3678 pSg->cSegs = 1;
3679 pSg->aSegs[0].pvSeg = pThis->aTxPacketFallback;
3680 pSg->aSegs[0].cbSeg = sizeof(pThis->aTxPacketFallback);
3681 }
3682
3683 pThis->CTX_SUFF(pTxSg) = pSg;
3684 return VINF_SUCCESS;
3685}
3686#else /* E1K_WITH_TXD_CACHE */
3687/**
3688 * Allocates an xmit buffer.
3689 *
3690 * @returns See PDMINETWORKUP::pfnAllocBuf.
3691 * @param pThis The device state structure.
3692 * @param cbMin The minimum frame size.
3693 * @param fExactSize Whether cbMin is exact or if we have to max it
3694 * out to the max MTU size.
3695 * @param fGso Whether this is a GSO frame or not.
3696 */
3697DECLINLINE(int) e1kXmitAllocBuf(PE1KSTATE pThis, bool fGso)
3698{
3699 /* Deal with existing buffer (descriptor screw up, reset, etc). */
3700 if (RT_UNLIKELY(pThis->CTX_SUFF(pTxSg)))
3701 e1kXmitFreeBuf(pThis);
3702 Assert(pThis->CTX_SUFF(pTxSg) == NULL);
3703
3704 /*
3705 * Allocate the buffer.
3706 */
3707 PPDMSCATTERGATHER pSg;
3708 if (RT_LIKELY(GET_BITS(RCTL, LBM) != RCTL_LBM_TCVR))
3709 {
3710 if (pThis->cbTxAlloc == 0)
3711 {
3712 /* Zero packet, no need for the buffer */
3713 return VINF_SUCCESS;
3714 }
3715
3716 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
3717 if (RT_UNLIKELY(!pDrv))
3718 return VERR_NET_DOWN;
3719 int rc = pDrv->pfnAllocBuf(pDrv, pThis->cbTxAlloc, fGso ? &pThis->GsoCtx : NULL, &pSg);
3720 if (RT_FAILURE(rc))
3721 {
3722 /* Suspend TX as we are out of buffers atm */
3723 STATUS |= STATUS_TXOFF;
3724 return rc;
3725 }
3726 E1kLog3(("%s Allocated buffer for TX packet: cb=%u %s%s\n",
3727 pThis->szPrf, pThis->cbTxAlloc,
3728 pThis->fVTag ? "VLAN " : "",
3729 pThis->fGSO ? "GSO " : ""));
3730 pThis->cbTxAlloc = 0;
3731 }
3732 else
3733 {
3734 /* Create a loopback using the fallback buffer and preallocated SG. */
3735 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3736 pSg = &pThis->uTxFallback.Sg;
3737 pSg->fFlags = PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3;
3738 pSg->cbUsed = 0;
3739 pSg->cbAvailable = 0;
3740 pSg->pvAllocator = pThis;
3741 pSg->pvUser = NULL; /* No GSO here. */
3742 pSg->cSegs = 1;
3743 pSg->aSegs[0].pvSeg = pThis->aTxPacketFallback;
3744 pSg->aSegs[0].cbSeg = sizeof(pThis->aTxPacketFallback);
3745 }
3746
3747 pThis->CTX_SUFF(pTxSg) = pSg;
3748 return VINF_SUCCESS;
3749}
3750#endif /* E1K_WITH_TXD_CACHE */
3751
3752/**
3753 * Checks if it's a GSO buffer or not.
3754 *
3755 * @returns true / false.
3756 * @param pTxSg The scatter / gather buffer.
3757 */
3758DECLINLINE(bool) e1kXmitIsGsoBuf(PDMSCATTERGATHER const *pTxSg)
3759{
3760#if 0
3761 if (!pTxSg)
3762 E1kLog(("e1kXmitIsGsoBuf: pTxSG is NULL\n"));
3763 if (pTxSg && pTxSg->pvUser)
3764 E1kLog(("e1kXmitIsGsoBuf: pvUser is NULL\n"));
3765#endif
3766 return pTxSg && pTxSg->pvUser /* GSO indicator */;
3767}
3768
3769#ifndef E1K_WITH_TXD_CACHE
3770/**
3771 * Load transmit descriptor from guest memory.
3772 *
3773 * @param pThis The device state structure.
3774 * @param pDesc Pointer to descriptor union.
3775 * @param addr Physical address in guest context.
3776 * @thread E1000_TX
3777 */
3778DECLINLINE(void) e1kLoadDesc(PE1KSTATE pThis, E1KTXDESC *pDesc, RTGCPHYS addr)
3779{
3780 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), addr, pDesc, sizeof(E1KTXDESC));
3781}
3782#else /* E1K_WITH_TXD_CACHE */
3783/**
3784 * Load transmit descriptors from guest memory.
3785 *
3786 * We need two physical reads in case the tail wrapped around the end of TX
3787 * descriptor ring.
3788 *
3789 * @returns the actual number of descriptors fetched.
3790 * @param pThis The device state structure.
3791 * @param pDesc Pointer to descriptor union.
3792 * @param addr Physical address in guest context.
3793 * @thread E1000_TX
3794 */
3795DECLINLINE(unsigned) e1kTxDLoadMore(PE1KSTATE pThis)
3796{
3797 Assert(pThis->iTxDCurrent == 0);
3798 /* We've already loaded pThis->nTxDFetched descriptors past TDH. */
3799 unsigned nDescsAvailable = e1kGetTxLen(pThis) - pThis->nTxDFetched;
3800 unsigned nDescsToFetch = RT_MIN(nDescsAvailable, E1K_TXD_CACHE_SIZE - pThis->nTxDFetched);
3801 unsigned nDescsTotal = TDLEN / sizeof(E1KTXDESC);
3802 unsigned nFirstNotLoaded = (TDH + pThis->nTxDFetched) % nDescsTotal;
3803 unsigned nDescsInSingleRead = RT_MIN(nDescsToFetch, nDescsTotal - nFirstNotLoaded);
3804 E1kLog3(("%s e1kTxDLoadMore: nDescsAvailable=%u nDescsToFetch=%u "
3805 "nDescsTotal=%u nFirstNotLoaded=0x%x nDescsInSingleRead=%u\n",
3806 pThis->szPrf, nDescsAvailable, nDescsToFetch, nDescsTotal,
3807 nFirstNotLoaded, nDescsInSingleRead));
3808 if (nDescsToFetch == 0)
3809 return 0;
3810 E1KTXDESC* pFirstEmptyDesc = &pThis->aTxDescriptors[pThis->nTxDFetched];
3811 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns),
3812 ((uint64_t)TDBAH << 32) + TDBAL + nFirstNotLoaded * sizeof(E1KTXDESC),
3813 pFirstEmptyDesc, nDescsInSingleRead * sizeof(E1KTXDESC));
3814 E1kLog3(("%s Fetched %u TX descriptors at %08x%08x(0x%x), TDLEN=%08x, TDH=%08x, TDT=%08x\n",
3815 pThis->szPrf, nDescsInSingleRead,
3816 TDBAH, TDBAL + TDH * sizeof(E1KTXDESC),
3817 nFirstNotLoaded, TDLEN, TDH, TDT));
3818 if (nDescsToFetch > nDescsInSingleRead)
3819 {
3820 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns),
3821 ((uint64_t)TDBAH << 32) + TDBAL,
3822 pFirstEmptyDesc + nDescsInSingleRead,
3823 (nDescsToFetch - nDescsInSingleRead) * sizeof(E1KTXDESC));
3824 E1kLog3(("%s Fetched %u TX descriptors at %08x%08x\n",
3825 pThis->szPrf, nDescsToFetch - nDescsInSingleRead,
3826 TDBAH, TDBAL));
3827 }
3828 pThis->nTxDFetched += nDescsToFetch;
3829 return nDescsToFetch;
3830}
3831
3832/**
3833 * Load transmit descriptors from guest memory only if there are no loaded
3834 * descriptors.
3835 *
3836 * @returns true if there are descriptors in cache.
3837 * @param pThis The device state structure.
3838 * @param pDesc Pointer to descriptor union.
3839 * @param addr Physical address in guest context.
3840 * @thread E1000_TX
3841 */
3842DECLINLINE(bool) e1kTxDLazyLoad(PE1KSTATE pThis)
3843{
3844 if (pThis->nTxDFetched == 0)
3845 return e1kTxDLoadMore(pThis) != 0;
3846 return true;
3847}
3848#endif /* E1K_WITH_TXD_CACHE */
3849
3850/**
3851 * Write back transmit descriptor to guest memory.
3852 *
3853 * @param pThis The device state structure.
3854 * @param pDesc Pointer to descriptor union.
3855 * @param addr Physical address in guest context.
3856 * @thread E1000_TX
3857 */
3858DECLINLINE(void) e1kWriteBackDesc(PE1KSTATE pThis, E1KTXDESC *pDesc, RTGCPHYS addr)
3859{
3860 /* Only the last half of the descriptor has to be written back. */
3861 e1kPrintTDesc(pThis, pDesc, "^^^");
3862 PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns), addr, pDesc, sizeof(E1KTXDESC));
3863}
3864
3865/**
3866 * Transmit complete frame.
3867 *
3868 * @remarks We skip the FCS since we're not responsible for sending anything to
3869 * a real ethernet wire.
3870 *
3871 * @param pThis The device state structure.
3872 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
3873 * @thread E1000_TX
3874 */
3875static void e1kTransmitFrame(PE1KSTATE pThis, bool fOnWorkerThread)
3876{
3877 PPDMSCATTERGATHER pSg = pThis->CTX_SUFF(pTxSg);
3878 uint32_t cbFrame = pSg ? (uint32_t)pSg->cbUsed : 0;
3879 Assert(!pSg || pSg->cSegs == 1);
3880
3881 if (cbFrame > 70) /* unqualified guess */
3882 pThis->led.Asserted.s.fWriting = pThis->led.Actual.s.fWriting = 1;
3883
3884#ifdef E1K_INT_STATS
3885 if (cbFrame <= 1514)
3886 E1K_INC_ISTAT_CNT(pThis->uStatTx1514);
3887 else if (cbFrame <= 2962)
3888 E1K_INC_ISTAT_CNT(pThis->uStatTx2962);
3889 else if (cbFrame <= 4410)
3890 E1K_INC_ISTAT_CNT(pThis->uStatTx4410);
3891 else if (cbFrame <= 5858)
3892 E1K_INC_ISTAT_CNT(pThis->uStatTx5858);
3893 else if (cbFrame <= 7306)
3894 E1K_INC_ISTAT_CNT(pThis->uStatTx7306);
3895 else if (cbFrame <= 8754)
3896 E1K_INC_ISTAT_CNT(pThis->uStatTx8754);
3897 else if (cbFrame <= 16384)
3898 E1K_INC_ISTAT_CNT(pThis->uStatTx16384);
3899 else if (cbFrame <= 32768)
3900 E1K_INC_ISTAT_CNT(pThis->uStatTx32768);
3901 else
3902 E1K_INC_ISTAT_CNT(pThis->uStatTxLarge);
3903#endif /* E1K_INT_STATS */
3904
3905 /* Add VLAN tag */
3906 if (cbFrame > 12 && pThis->fVTag)
3907 {
3908 E1kLog3(("%s Inserting VLAN tag %08x\n",
3909 pThis->szPrf, RT_BE2H_U16(VET) | (RT_BE2H_U16(pThis->u16VTagTCI) << 16)));
3910 memmove((uint8_t*)pSg->aSegs[0].pvSeg + 16, (uint8_t*)pSg->aSegs[0].pvSeg + 12, cbFrame - 12);
3911 *((uint32_t*)pSg->aSegs[0].pvSeg + 3) = RT_BE2H_U16(VET) | (RT_BE2H_U16(pThis->u16VTagTCI) << 16);
3912 pSg->cbUsed += 4;
3913 cbFrame += 4;
3914 Assert(pSg->cbUsed == cbFrame);
3915 Assert(pSg->cbUsed <= pSg->cbAvailable);
3916 }
3917/* E1kLog2(("%s < < < Outgoing packet. Dump follows: > > >\n"
3918 "%.*Rhxd\n"
3919 "%s < < < < < < < < < < < < < End of dump > > > > > > > > > > > >\n",
3920 pThis->szPrf, cbFrame, pSg->aSegs[0].pvSeg, pThis->szPrf));*/
3921
3922 /* Update the stats */
3923 E1K_INC_CNT32(TPT);
3924 E1K_ADD_CNT64(TOTL, TOTH, cbFrame);
3925 E1K_INC_CNT32(GPTC);
3926 if (pSg && e1kIsBroadcast(pSg->aSegs[0].pvSeg))
3927 E1K_INC_CNT32(BPTC);
3928 else if (pSg && e1kIsMulticast(pSg->aSegs[0].pvSeg))
3929 E1K_INC_CNT32(MPTC);
3930 /* Update octet transmit counter */
3931 E1K_ADD_CNT64(GOTCL, GOTCH, cbFrame);
3932 if (pThis->CTX_SUFF(pDrv))
3933 STAM_REL_COUNTER_ADD(&pThis->StatTransmitBytes, cbFrame);
3934 if (cbFrame == 64)
3935 E1K_INC_CNT32(PTC64);
3936 else if (cbFrame < 128)
3937 E1K_INC_CNT32(PTC127);
3938 else if (cbFrame < 256)
3939 E1K_INC_CNT32(PTC255);
3940 else if (cbFrame < 512)
3941 E1K_INC_CNT32(PTC511);
3942 else if (cbFrame < 1024)
3943 E1K_INC_CNT32(PTC1023);
3944 else
3945 E1K_INC_CNT32(PTC1522);
3946
3947 E1K_INC_ISTAT_CNT(pThis->uStatTxFrm);
3948
3949 /*
3950 * Dump and send the packet.
3951 */
3952 int rc = VERR_NET_DOWN;
3953 if (pSg && pSg->pvAllocator != pThis)
3954 {
3955 e1kPacketDump(pThis, (uint8_t const *)pSg->aSegs[0].pvSeg, cbFrame, "--> Outgoing");
3956
3957 pThis->CTX_SUFF(pTxSg) = NULL;
3958 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
3959 if (pDrv)
3960 {
3961 /* Release critical section to avoid deadlock in CanReceive */
3962 //e1kCsLeave(pThis);
3963 STAM_PROFILE_START(&pThis->CTX_SUFF_Z(StatTransmitSend), a);
3964 rc = pDrv->pfnSendBuf(pDrv, pSg, fOnWorkerThread);
3965 STAM_PROFILE_STOP(&pThis->CTX_SUFF_Z(StatTransmitSend), a);
3966 //e1kCsEnter(pThis, RT_SRC_POS);
3967 }
3968 }
3969 else if (pSg)
3970 {
3971 Assert(pSg->aSegs[0].pvSeg == pThis->aTxPacketFallback);
3972 e1kPacketDump(pThis, (uint8_t const *)pSg->aSegs[0].pvSeg, cbFrame, "--> Loopback");
3973
3974 /** @todo do we actually need to check that we're in loopback mode here? */
3975 if (GET_BITS(RCTL, LBM) == RCTL_LBM_TCVR)
3976 {
3977 E1KRXDST status;
3978 RT_ZERO(status);
3979 status.fPIF = true;
3980 e1kHandleRxPacket(pThis, pSg->aSegs[0].pvSeg, cbFrame, status);
3981 rc = VINF_SUCCESS;
3982 }
3983 e1kXmitFreeBuf(pThis);
3984 }
3985 else
3986 rc = VERR_NET_DOWN;
3987 if (RT_FAILURE(rc))
3988 {
3989 E1kLogRel(("E1000: ERROR! pfnSend returned %Rrc\n", rc));
3990 /** @todo handle VERR_NET_DOWN and VERR_NET_NO_BUFFER_SPACE. Signal error ? */
3991 }
3992
3993 pThis->led.Actual.s.fWriting = 0;
3994}
3995
3996/**
3997 * Compute and write internet checksum (e1kCSum16) at the specified offset.
3998 *
3999 * @param pThis The device state structure.
4000 * @param pPkt Pointer to the packet.
4001 * @param u16PktLen Total length of the packet.
4002 * @param cso Offset in packet to write checksum at.
4003 * @param css Offset in packet to start computing
4004 * checksum from.
4005 * @param cse Offset in packet to stop computing
4006 * checksum at.
4007 * @thread E1000_TX
4008 */
4009static void e1kInsertChecksum(PE1KSTATE pThis, uint8_t *pPkt, uint16_t u16PktLen, uint8_t cso, uint8_t css, uint16_t cse)
4010{
4011 RT_NOREF1(pThis);
4012
4013 if (css >= u16PktLen)
4014 {
4015 E1kLog2(("%s css(%X) is greater than packet length-1(%X), checksum is not inserted\n",
4016 pThis->szPrf, cso, u16PktLen));
4017 return;
4018 }
4019
4020 if (cso >= u16PktLen - 1)
4021 {
4022 E1kLog2(("%s cso(%X) is greater than packet length-2(%X), checksum is not inserted\n",
4023 pThis->szPrf, cso, u16PktLen));
4024 return;
4025 }
4026
4027 if (cse == 0)
4028 cse = u16PktLen - 1;
4029 uint16_t u16ChkSum = e1kCSum16(pPkt + css, cse - css + 1);
4030 E1kLog2(("%s Inserting csum: %04X at %02X, old value: %04X\n", pThis->szPrf,
4031 u16ChkSum, cso, *(uint16_t*)(pPkt + cso)));
4032 *(uint16_t*)(pPkt + cso) = u16ChkSum;
4033}
4034
4035/**
4036 * Add a part of descriptor's buffer to transmit frame.
4037 *
4038 * @remarks data.u64BufAddr is used unconditionally for both data
4039 * and legacy descriptors since it is identical to
4040 * legacy.u64BufAddr.
4041 *
4042 * @param pThis The device state structure.
4043 * @param pDesc Pointer to the descriptor to transmit.
4044 * @param u16Len Length of buffer to the end of segment.
4045 * @param fSend Force packet sending.
4046 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4047 * @thread E1000_TX
4048 */
4049#ifndef E1K_WITH_TXD_CACHE
4050static void e1kFallbackAddSegment(PE1KSTATE pThis, RTGCPHYS PhysAddr, uint16_t u16Len, bool fSend, bool fOnWorkerThread)
4051{
4052 /* TCP header being transmitted */
4053 struct E1kTcpHeader *pTcpHdr = (struct E1kTcpHeader *)
4054 (pThis->aTxPacketFallback + pThis->contextTSE.tu.u8CSS);
4055 /* IP header being transmitted */
4056 struct E1kIpHeader *pIpHdr = (struct E1kIpHeader *)
4057 (pThis->aTxPacketFallback + pThis->contextTSE.ip.u8CSS);
4058
4059 E1kLog3(("%s e1kFallbackAddSegment: Length=%x, remaining payload=%x, header=%x, send=%RTbool\n",
4060 pThis->szPrf, u16Len, pThis->u32PayRemain, pThis->u16HdrRemain, fSend));
4061 Assert(pThis->u32PayRemain + pThis->u16HdrRemain > 0);
4062
4063 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), PhysAddr,
4064 pThis->aTxPacketFallback + pThis->u16TxPktLen, u16Len);
4065 E1kLog3(("%s Dump of the segment:\n"
4066 "%.*Rhxd\n"
4067 "%s --- End of dump ---\n",
4068 pThis->szPrf, u16Len, pThis->aTxPacketFallback + pThis->u16TxPktLen, pThis->szPrf));
4069 pThis->u16TxPktLen += u16Len;
4070 E1kLog3(("%s e1kFallbackAddSegment: pThis->u16TxPktLen=%x\n",
4071 pThis->szPrf, pThis->u16TxPktLen));
4072 if (pThis->u16HdrRemain > 0)
4073 {
4074 /* The header was not complete, check if it is now */
4075 if (u16Len >= pThis->u16HdrRemain)
4076 {
4077 /* The rest is payload */
4078 u16Len -= pThis->u16HdrRemain;
4079 pThis->u16HdrRemain = 0;
4080 /* Save partial checksum and flags */
4081 pThis->u32SavedCsum = pTcpHdr->chksum;
4082 pThis->u16SavedFlags = pTcpHdr->hdrlen_flags;
4083 /* Clear FIN and PSH flags now and set them only in the last segment */
4084 pTcpHdr->hdrlen_flags &= ~htons(E1K_TCP_FIN | E1K_TCP_PSH);
4085 }
4086 else
4087 {
4088 /* Still not */
4089 pThis->u16HdrRemain -= u16Len;
4090 E1kLog3(("%s e1kFallbackAddSegment: Header is still incomplete, 0x%x bytes remain.\n",
4091 pThis->szPrf, pThis->u16HdrRemain));
4092 return;
4093 }
4094 }
4095
4096 pThis->u32PayRemain -= u16Len;
4097
4098 if (fSend)
4099 {
4100 /* Leave ethernet header intact */
4101 /* IP Total Length = payload + headers - ethernet header */
4102 pIpHdr->total_len = htons(pThis->u16TxPktLen - pThis->contextTSE.ip.u8CSS);
4103 E1kLog3(("%s e1kFallbackAddSegment: End of packet, pIpHdr->total_len=%x\n",
4104 pThis->szPrf, ntohs(pIpHdr->total_len)));
4105 /* Update IP Checksum */
4106 pIpHdr->chksum = 0;
4107 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4108 pThis->contextTSE.ip.u8CSO,
4109 pThis->contextTSE.ip.u8CSS,
4110 pThis->contextTSE.ip.u16CSE);
4111
4112 /* Update TCP flags */
4113 /* Restore original FIN and PSH flags for the last segment */
4114 if (pThis->u32PayRemain == 0)
4115 {
4116 pTcpHdr->hdrlen_flags = pThis->u16SavedFlags;
4117 E1K_INC_CNT32(TSCTC);
4118 }
4119 /* Add TCP length to partial pseudo header sum */
4120 uint32_t csum = pThis->u32SavedCsum
4121 + htons(pThis->u16TxPktLen - pThis->contextTSE.tu.u8CSS);
4122 while (csum >> 16)
4123 csum = (csum >> 16) + (csum & 0xFFFF);
4124 pTcpHdr->chksum = csum;
4125 /* Compute final checksum */
4126 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4127 pThis->contextTSE.tu.u8CSO,
4128 pThis->contextTSE.tu.u8CSS,
4129 pThis->contextTSE.tu.u16CSE);
4130
4131 /*
4132 * Transmit it. If we've use the SG already, allocate a new one before
4133 * we copy of the data.
4134 */
4135 if (!pThis->CTX_SUFF(pTxSg))
4136 e1kXmitAllocBuf(pThis, pThis->u16TxPktLen + (pThis->fVTag ? 4 : 0), true /*fExactSize*/, false /*fGso*/);
4137 if (pThis->CTX_SUFF(pTxSg))
4138 {
4139 Assert(pThis->u16TxPktLen <= pThis->CTX_SUFF(pTxSg)->cbAvailable);
4140 Assert(pThis->CTX_SUFF(pTxSg)->cSegs == 1);
4141 if (pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg != pThis->aTxPacketFallback)
4142 memcpy(pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->aTxPacketFallback, pThis->u16TxPktLen);
4143 pThis->CTX_SUFF(pTxSg)->cbUsed = pThis->u16TxPktLen;
4144 pThis->CTX_SUFF(pTxSg)->aSegs[0].cbSeg = pThis->u16TxPktLen;
4145 }
4146 e1kTransmitFrame(pThis, fOnWorkerThread);
4147
4148 /* Update Sequence Number */
4149 pTcpHdr->seqno = htonl(ntohl(pTcpHdr->seqno) + pThis->u16TxPktLen
4150 - pThis->contextTSE.dw3.u8HDRLEN);
4151 /* Increment IP identification */
4152 pIpHdr->ident = htons(ntohs(pIpHdr->ident) + 1);
4153 }
4154}
4155#else /* E1K_WITH_TXD_CACHE */
4156static int e1kFallbackAddSegment(PE1KSTATE pThis, RTGCPHYS PhysAddr, uint16_t u16Len, bool fSend, bool fOnWorkerThread)
4157{
4158 int rc = VINF_SUCCESS;
4159 /* TCP header being transmitted */
4160 struct E1kTcpHeader *pTcpHdr = (struct E1kTcpHeader *)
4161 (pThis->aTxPacketFallback + pThis->contextTSE.tu.u8CSS);
4162 /* IP header being transmitted */
4163 struct E1kIpHeader *pIpHdr = (struct E1kIpHeader *)
4164 (pThis->aTxPacketFallback + pThis->contextTSE.ip.u8CSS);
4165
4166 E1kLog3(("%s e1kFallbackAddSegment: Length=%x, remaining payload=%x, header=%x, send=%RTbool\n",
4167 pThis->szPrf, u16Len, pThis->u32PayRemain, pThis->u16HdrRemain, fSend));
4168 Assert(pThis->u32PayRemain + pThis->u16HdrRemain > 0);
4169
4170 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), PhysAddr,
4171 pThis->aTxPacketFallback + pThis->u16TxPktLen, u16Len);
4172 E1kLog3(("%s Dump of the segment:\n"
4173 "%.*Rhxd\n"
4174 "%s --- End of dump ---\n",
4175 pThis->szPrf, u16Len, pThis->aTxPacketFallback + pThis->u16TxPktLen, pThis->szPrf));
4176 pThis->u16TxPktLen += u16Len;
4177 E1kLog3(("%s e1kFallbackAddSegment: pThis->u16TxPktLen=%x\n",
4178 pThis->szPrf, pThis->u16TxPktLen));
4179 if (pThis->u16HdrRemain > 0)
4180 {
4181 /* The header was not complete, check if it is now */
4182 if (u16Len >= pThis->u16HdrRemain)
4183 {
4184 /* The rest is payload */
4185 u16Len -= pThis->u16HdrRemain;
4186 pThis->u16HdrRemain = 0;
4187 /* Save partial checksum and flags */
4188 pThis->u32SavedCsum = pTcpHdr->chksum;
4189 pThis->u16SavedFlags = pTcpHdr->hdrlen_flags;
4190 /* Clear FIN and PSH flags now and set them only in the last segment */
4191 pTcpHdr->hdrlen_flags &= ~htons(E1K_TCP_FIN | E1K_TCP_PSH);
4192 }
4193 else
4194 {
4195 /* Still not */
4196 pThis->u16HdrRemain -= u16Len;
4197 E1kLog3(("%s e1kFallbackAddSegment: Header is still incomplete, 0x%x bytes remain.\n",
4198 pThis->szPrf, pThis->u16HdrRemain));
4199 return rc;
4200 }
4201 }
4202
4203 pThis->u32PayRemain -= u16Len;
4204
4205 if (fSend)
4206 {
4207 /* Leave ethernet header intact */
4208 /* IP Total Length = payload + headers - ethernet header */
4209 pIpHdr->total_len = htons(pThis->u16TxPktLen - pThis->contextTSE.ip.u8CSS);
4210 E1kLog3(("%s e1kFallbackAddSegment: End of packet, pIpHdr->total_len=%x\n",
4211 pThis->szPrf, ntohs(pIpHdr->total_len)));
4212 /* Update IP Checksum */
4213 pIpHdr->chksum = 0;
4214 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4215 pThis->contextTSE.ip.u8CSO,
4216 pThis->contextTSE.ip.u8CSS,
4217 pThis->contextTSE.ip.u16CSE);
4218
4219 /* Update TCP flags */
4220 /* Restore original FIN and PSH flags for the last segment */
4221 if (pThis->u32PayRemain == 0)
4222 {
4223 pTcpHdr->hdrlen_flags = pThis->u16SavedFlags;
4224 E1K_INC_CNT32(TSCTC);
4225 }
4226 /* Add TCP length to partial pseudo header sum */
4227 uint32_t csum = pThis->u32SavedCsum
4228 + htons(pThis->u16TxPktLen - pThis->contextTSE.tu.u8CSS);
4229 while (csum >> 16)
4230 csum = (csum >> 16) + (csum & 0xFFFF);
4231 pTcpHdr->chksum = csum;
4232 /* Compute final checksum */
4233 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4234 pThis->contextTSE.tu.u8CSO,
4235 pThis->contextTSE.tu.u8CSS,
4236 pThis->contextTSE.tu.u16CSE);
4237
4238 /*
4239 * Transmit it.
4240 */
4241 if (pThis->CTX_SUFF(pTxSg))
4242 {
4243 Assert(pThis->u16TxPktLen <= pThis->CTX_SUFF(pTxSg)->cbAvailable);
4244 Assert(pThis->CTX_SUFF(pTxSg)->cSegs == 1);
4245 if (pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg != pThis->aTxPacketFallback)
4246 memcpy(pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->aTxPacketFallback, pThis->u16TxPktLen);
4247 pThis->CTX_SUFF(pTxSg)->cbUsed = pThis->u16TxPktLen;
4248 pThis->CTX_SUFF(pTxSg)->aSegs[0].cbSeg = pThis->u16TxPktLen;
4249 }
4250 e1kTransmitFrame(pThis, fOnWorkerThread);
4251
4252 /* Update Sequence Number */
4253 pTcpHdr->seqno = htonl(ntohl(pTcpHdr->seqno) + pThis->u16TxPktLen
4254 - pThis->contextTSE.dw3.u8HDRLEN);
4255 /* Increment IP identification */
4256 pIpHdr->ident = htons(ntohs(pIpHdr->ident) + 1);
4257
4258 /* Allocate new buffer for the next segment. */
4259 if (pThis->u32PayRemain)
4260 {
4261 pThis->cbTxAlloc = RT_MIN(pThis->u32PayRemain,
4262 pThis->contextTSE.dw3.u16MSS)
4263 + pThis->contextTSE.dw3.u8HDRLEN
4264 + (pThis->fVTag ? 4 : 0);
4265 rc = e1kXmitAllocBuf(pThis, false /* fGSO */);
4266 }
4267 }
4268
4269 return rc;
4270}
4271#endif /* E1K_WITH_TXD_CACHE */
4272
4273#ifndef E1K_WITH_TXD_CACHE
4274/**
4275 * TCP segmentation offloading fallback: Add descriptor's buffer to transmit
4276 * frame.
4277 *
4278 * We construct the frame in the fallback buffer first and the copy it to the SG
4279 * buffer before passing it down to the network driver code.
4280 *
4281 * @returns true if the frame should be transmitted, false if not.
4282 *
4283 * @param pThis The device state structure.
4284 * @param pDesc Pointer to the descriptor to transmit.
4285 * @param cbFragment Length of descriptor's buffer.
4286 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4287 * @thread E1000_TX
4288 */
4289static bool e1kFallbackAddToFrame(PE1KSTATE pThis, E1KTXDESC *pDesc, uint32_t cbFragment, bool fOnWorkerThread)
4290{
4291 PPDMSCATTERGATHER pTxSg = pThis->CTX_SUFF(pTxSg);
4292 Assert(e1kGetDescType(pDesc) == E1K_DTYP_DATA);
4293 Assert(pDesc->data.cmd.fTSE);
4294 Assert(!e1kXmitIsGsoBuf(pTxSg));
4295
4296 uint16_t u16MaxPktLen = pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw3.u16MSS;
4297 Assert(u16MaxPktLen != 0);
4298 Assert(u16MaxPktLen < E1K_MAX_TX_PKT_SIZE);
4299
4300 /*
4301 * Carve out segments.
4302 */
4303 do
4304 {
4305 /* Calculate how many bytes we have left in this TCP segment */
4306 uint32_t cb = u16MaxPktLen - pThis->u16TxPktLen;
4307 if (cb > cbFragment)
4308 {
4309 /* This descriptor fits completely into current segment */
4310 cb = cbFragment;
4311 e1kFallbackAddSegment(pThis, pDesc->data.u64BufAddr, cb, pDesc->data.cmd.fEOP /*fSend*/, fOnWorkerThread);
4312 }
4313 else
4314 {
4315 e1kFallbackAddSegment(pThis, pDesc->data.u64BufAddr, cb, true /*fSend*/, fOnWorkerThread);
4316 /*
4317 * Rewind the packet tail pointer to the beginning of payload,
4318 * so we continue writing right beyond the header.
4319 */
4320 pThis->u16TxPktLen = pThis->contextTSE.dw3.u8HDRLEN;
4321 }
4322
4323 pDesc->data.u64BufAddr += cb;
4324 cbFragment -= cb;
4325 } while (cbFragment > 0);
4326
4327 if (pDesc->data.cmd.fEOP)
4328 {
4329 /* End of packet, next segment will contain header. */
4330 if (pThis->u32PayRemain != 0)
4331 E1K_INC_CNT32(TSCTFC);
4332 pThis->u16TxPktLen = 0;
4333 e1kXmitFreeBuf(pThis);
4334 }
4335
4336 return false;
4337}
4338#else /* E1K_WITH_TXD_CACHE */
4339/**
4340 * TCP segmentation offloading fallback: Add descriptor's buffer to transmit
4341 * frame.
4342 *
4343 * We construct the frame in the fallback buffer first and the copy it to the SG
4344 * buffer before passing it down to the network driver code.
4345 *
4346 * @returns error code
4347 *
4348 * @param pThis The device state structure.
4349 * @param pDesc Pointer to the descriptor to transmit.
4350 * @param cbFragment Length of descriptor's buffer.
4351 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4352 * @thread E1000_TX
4353 */
4354static int e1kFallbackAddToFrame(PE1KSTATE pThis, E1KTXDESC *pDesc, bool fOnWorkerThread)
4355{
4356#ifdef VBOX_STRICT
4357 PPDMSCATTERGATHER pTxSg = pThis->CTX_SUFF(pTxSg);
4358 Assert(e1kGetDescType(pDesc) == E1K_DTYP_DATA);
4359 Assert(pDesc->data.cmd.fTSE);
4360 Assert(!e1kXmitIsGsoBuf(pTxSg));
4361#endif
4362
4363 uint16_t u16MaxPktLen = pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw3.u16MSS;
4364 Assert(u16MaxPktLen != 0);
4365 Assert(u16MaxPktLen < E1K_MAX_TX_PKT_SIZE);
4366
4367 /*
4368 * Carve out segments.
4369 */
4370 int rc;
4371 do
4372 {
4373 /* Calculate how many bytes we have left in this TCP segment */
4374 uint32_t cb = u16MaxPktLen - pThis->u16TxPktLen;
4375 if (cb > pDesc->data.cmd.u20DTALEN)
4376 {
4377 /* This descriptor fits completely into current segment */
4378 cb = pDesc->data.cmd.u20DTALEN;
4379 rc = e1kFallbackAddSegment(pThis, pDesc->data.u64BufAddr, cb, pDesc->data.cmd.fEOP /*fSend*/, fOnWorkerThread);
4380 }
4381 else
4382 {
4383 rc = e1kFallbackAddSegment(pThis, pDesc->data.u64BufAddr, cb, true /*fSend*/, fOnWorkerThread);
4384 /*
4385 * Rewind the packet tail pointer to the beginning of payload,
4386 * so we continue writing right beyond the header.
4387 */
4388 pThis->u16TxPktLen = pThis->contextTSE.dw3.u8HDRLEN;
4389 }
4390
4391 pDesc->data.u64BufAddr += cb;
4392 pDesc->data.cmd.u20DTALEN -= cb;
4393 } while (pDesc->data.cmd.u20DTALEN > 0 && RT_SUCCESS(rc));
4394
4395 if (pDesc->data.cmd.fEOP)
4396 {
4397 /* End of packet, next segment will contain header. */
4398 if (pThis->u32PayRemain != 0)
4399 E1K_INC_CNT32(TSCTFC);
4400 pThis->u16TxPktLen = 0;
4401 e1kXmitFreeBuf(pThis);
4402 }
4403
4404 return false;
4405}
4406#endif /* E1K_WITH_TXD_CACHE */
4407
4408
4409/**
4410 * Add descriptor's buffer to transmit frame.
4411 *
4412 * This deals with GSO and normal frames, e1kFallbackAddToFrame deals with the
4413 * TSE frames we cannot handle as GSO.
4414 *
4415 * @returns true on success, false on failure.
4416 *
4417 * @param pThis The device state structure.
4418 * @param PhysAddr The physical address of the descriptor buffer.
4419 * @param cbFragment Length of descriptor's buffer.
4420 * @thread E1000_TX
4421 */
4422static bool e1kAddToFrame(PE1KSTATE pThis, RTGCPHYS PhysAddr, uint32_t cbFragment)
4423{
4424 PPDMSCATTERGATHER pTxSg = pThis->CTX_SUFF(pTxSg);
4425 bool const fGso = e1kXmitIsGsoBuf(pTxSg);
4426 uint32_t const cbNewPkt = cbFragment + pThis->u16TxPktLen;
4427
4428 if (RT_UNLIKELY( !fGso && cbNewPkt > E1K_MAX_TX_PKT_SIZE ))
4429 {
4430 E1kLog(("%s Transmit packet is too large: %u > %u(max)\n", pThis->szPrf, cbNewPkt, E1K_MAX_TX_PKT_SIZE));
4431 return false;
4432 }
4433 if (RT_UNLIKELY( fGso && cbNewPkt > pTxSg->cbAvailable ))
4434 {
4435 E1kLog(("%s Transmit packet is too large: %u > %u(max)/GSO\n", pThis->szPrf, cbNewPkt, pTxSg->cbAvailable));
4436 return false;
4437 }
4438
4439 if (RT_LIKELY(pTxSg))
4440 {
4441 Assert(pTxSg->cSegs == 1);
4442 Assert(pTxSg->cbUsed == pThis->u16TxPktLen);
4443
4444 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), PhysAddr,
4445 (uint8_t *)pTxSg->aSegs[0].pvSeg + pThis->u16TxPktLen, cbFragment);
4446
4447 pTxSg->cbUsed = cbNewPkt;
4448 }
4449 pThis->u16TxPktLen = cbNewPkt;
4450
4451 return true;
4452}
4453
4454
4455/**
4456 * Write the descriptor back to guest memory and notify the guest.
4457 *
4458 * @param pThis The device state structure.
4459 * @param pDesc Pointer to the descriptor have been transmitted.
4460 * @param addr Physical address of the descriptor in guest memory.
4461 * @thread E1000_TX
4462 */
4463static void e1kDescReport(PE1KSTATE pThis, E1KTXDESC *pDesc, RTGCPHYS addr)
4464{
4465 /*
4466 * We fake descriptor write-back bursting. Descriptors are written back as they are
4467 * processed.
4468 */
4469 /* Let's pretend we process descriptors. Write back with DD set. */
4470 /*
4471 * Prior to r71586 we tried to accomodate the case when write-back bursts
4472 * are enabled without actually implementing bursting by writing back all
4473 * descriptors, even the ones that do not have RS set. This caused kernel
4474 * panics with Linux SMP kernels, as the e1000 driver tried to free up skb
4475 * associated with written back descriptor if it happened to be a context
4476 * descriptor since context descriptors do not have skb associated to them.
4477 * Starting from r71586 we write back only the descriptors with RS set,
4478 * which is a little bit different from what the real hardware does in
4479 * case there is a chain of data descritors where some of them have RS set
4480 * and others do not. It is very uncommon scenario imho.
4481 * We need to check RPS as well since some legacy drivers use it instead of
4482 * RS even with newer cards.
4483 */
4484 if (pDesc->legacy.cmd.fRS || pDesc->legacy.cmd.fRPS)
4485 {
4486 pDesc->legacy.dw3.fDD = 1; /* Descriptor Done */
4487 e1kWriteBackDesc(pThis, pDesc, addr);
4488 if (pDesc->legacy.cmd.fEOP)
4489 {
4490//#ifdef E1K_USE_TX_TIMERS
4491 if (pThis->fTidEnabled && pDesc->legacy.cmd.fIDE)
4492 {
4493 E1K_INC_ISTAT_CNT(pThis->uStatTxIDE);
4494 //if (pThis->fIntRaised)
4495 //{
4496 // /* Interrupt is already pending, no need for timers */
4497 // ICR |= ICR_TXDW;
4498 //}
4499 //else {
4500 /* Arm the timer to fire in TIVD usec (discard .024) */
4501 e1kArmTimer(pThis, pThis->CTX_SUFF(pTIDTimer), TIDV);
4502# ifndef E1K_NO_TAD
4503 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
4504 E1kLog2(("%s Checking if TAD timer is running\n",
4505 pThis->szPrf));
4506 if (TADV != 0 && !TMTimerIsActive(pThis->CTX_SUFF(pTADTimer)))
4507 e1kArmTimer(pThis, pThis->CTX_SUFF(pTADTimer), TADV);
4508# endif /* E1K_NO_TAD */
4509 }
4510 else
4511 {
4512 if (pThis->fTidEnabled)
4513 {
4514 E1kLog2(("%s No IDE set, cancel TAD timer and raise interrupt\n",
4515 pThis->szPrf));
4516 /* Cancel both timers if armed and fire immediately. */
4517# ifndef E1K_NO_TAD
4518 TMTimerStop(pThis->CTX_SUFF(pTADTimer));
4519# endif
4520 TMTimerStop(pThis->CTX_SUFF(pTIDTimer));
4521 }
4522//#endif /* E1K_USE_TX_TIMERS */
4523 E1K_INC_ISTAT_CNT(pThis->uStatIntTx);
4524 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_TXDW);
4525//#ifdef E1K_USE_TX_TIMERS
4526 }
4527//#endif /* E1K_USE_TX_TIMERS */
4528 }
4529 }
4530 else
4531 {
4532 E1K_INC_ISTAT_CNT(pThis->uStatTxNoRS);
4533 }
4534}
4535
4536#ifndef E1K_WITH_TXD_CACHE
4537
4538/**
4539 * Process Transmit Descriptor.
4540 *
4541 * E1000 supports three types of transmit descriptors:
4542 * - legacy data descriptors of older format (context-less).
4543 * - data the same as legacy but providing new offloading capabilities.
4544 * - context sets up the context for following data descriptors.
4545 *
4546 * @param pThis The device state structure.
4547 * @param pDesc Pointer to descriptor union.
4548 * @param addr Physical address of descriptor in guest memory.
4549 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4550 * @thread E1000_TX
4551 */
4552static int e1kXmitDesc(PE1KSTATE pThis, E1KTXDESC *pDesc, RTGCPHYS addr, bool fOnWorkerThread)
4553{
4554 int rc = VINF_SUCCESS;
4555 uint32_t cbVTag = 0;
4556
4557 e1kPrintTDesc(pThis, pDesc, "vvv");
4558
4559//#ifdef E1K_USE_TX_TIMERS
4560 if (pThis->fTidEnabled)
4561 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTIDTimer));
4562//#endif /* E1K_USE_TX_TIMERS */
4563
4564 switch (e1kGetDescType(pDesc))
4565 {
4566 case E1K_DTYP_CONTEXT:
4567 if (pDesc->context.dw2.fTSE)
4568 {
4569 pThis->contextTSE = pDesc->context;
4570 pThis->u32PayRemain = pDesc->context.dw2.u20PAYLEN;
4571 pThis->u16HdrRemain = pDesc->context.dw3.u8HDRLEN;
4572 e1kSetupGsoCtx(&pThis->GsoCtx, &pDesc->context);
4573 STAM_COUNTER_INC(&pThis->StatTxDescCtxTSE);
4574 }
4575 else
4576 {
4577 pThis->contextNormal = pDesc->context;
4578 STAM_COUNTER_INC(&pThis->StatTxDescCtxNormal);
4579 }
4580 E1kLog2(("%s %s context updated: IP CSS=%02X, IP CSO=%02X, IP CSE=%04X"
4581 ", TU CSS=%02X, TU CSO=%02X, TU CSE=%04X\n", pThis->szPrf,
4582 pDesc->context.dw2.fTSE ? "TSE" : "Normal",
4583 pDesc->context.ip.u8CSS,
4584 pDesc->context.ip.u8CSO,
4585 pDesc->context.ip.u16CSE,
4586 pDesc->context.tu.u8CSS,
4587 pDesc->context.tu.u8CSO,
4588 pDesc->context.tu.u16CSE));
4589 E1K_INC_ISTAT_CNT(pThis->uStatDescCtx);
4590 e1kDescReport(pThis, pDesc, addr);
4591 break;
4592
4593 case E1K_DTYP_DATA:
4594 {
4595 if (pDesc->data.cmd.u20DTALEN == 0 || pDesc->data.u64BufAddr == 0)
4596 {
4597 E1kLog2(("% Empty data descriptor, skipped.\n", pThis->szPrf));
4598 /** @todo Same as legacy when !TSE. See below. */
4599 break;
4600 }
4601 STAM_COUNTER_INC(pDesc->data.cmd.fTSE?
4602 &pThis->StatTxDescTSEData:
4603 &pThis->StatTxDescData);
4604 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
4605 E1K_INC_ISTAT_CNT(pThis->uStatDescDat);
4606
4607 /*
4608 * The last descriptor of non-TSE packet must contain VLE flag.
4609 * TSE packets have VLE flag in the first descriptor. The later
4610 * case is taken care of a bit later when cbVTag gets assigned.
4611 *
4612 * 1) pDesc->data.cmd.fEOP && !pDesc->data.cmd.fTSE
4613 */
4614 if (pDesc->data.cmd.fEOP && !pDesc->data.cmd.fTSE)
4615 {
4616 pThis->fVTag = pDesc->data.cmd.fVLE;
4617 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
4618 }
4619 /*
4620 * First fragment: Allocate new buffer and save the IXSM and TXSM
4621 * packet options as these are only valid in the first fragment.
4622 */
4623 if (pThis->u16TxPktLen == 0)
4624 {
4625 pThis->fIPcsum = pDesc->data.dw3.fIXSM;
4626 pThis->fTCPcsum = pDesc->data.dw3.fTXSM;
4627 E1kLog2(("%s Saving checksum flags:%s%s; \n", pThis->szPrf,
4628 pThis->fIPcsum ? " IP" : "",
4629 pThis->fTCPcsum ? " TCP/UDP" : ""));
4630 if (pDesc->data.cmd.fTSE)
4631 {
4632 /* 2) pDesc->data.cmd.fTSE && pThis->u16TxPktLen == 0 */
4633 pThis->fVTag = pDesc->data.cmd.fVLE;
4634 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
4635 cbVTag = pThis->fVTag ? 4 : 0;
4636 }
4637 else if (pDesc->data.cmd.fEOP)
4638 cbVTag = pDesc->data.cmd.fVLE ? 4 : 0;
4639 else
4640 cbVTag = 4;
4641 E1kLog3(("%s About to allocate TX buffer: cbVTag=%u\n", pThis->szPrf, cbVTag));
4642 if (e1kCanDoGso(pThis, &pThis->GsoCtx, &pDesc->data, &pThis->contextTSE))
4643 rc = e1kXmitAllocBuf(pThis, pThis->contextTSE.dw2.u20PAYLEN + pThis->contextTSE.dw3.u8HDRLEN + cbVTag,
4644 true /*fExactSize*/, true /*fGso*/);
4645 else if (pDesc->data.cmd.fTSE)
4646 rc = e1kXmitAllocBuf(pThis, pThis->contextTSE.dw3.u16MSS + pThis->contextTSE.dw3.u8HDRLEN + cbVTag,
4647 pDesc->data.cmd.fTSE /*fExactSize*/, false /*fGso*/);
4648 else
4649 rc = e1kXmitAllocBuf(pThis, pDesc->data.cmd.u20DTALEN + cbVTag,
4650 pDesc->data.cmd.fEOP /*fExactSize*/, false /*fGso*/);
4651
4652 /**
4653 * @todo: Perhaps it is not that simple for GSO packets! We may
4654 * need to unwind some changes.
4655 */
4656 if (RT_FAILURE(rc))
4657 {
4658 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4659 break;
4660 }
4661 /** @todo Is there any way to indicating errors other than collisions? Like
4662 * VERR_NET_DOWN. */
4663 }
4664
4665 /*
4666 * Add the descriptor data to the frame. If the frame is complete,
4667 * transmit it and reset the u16TxPktLen field.
4668 */
4669 if (e1kXmitIsGsoBuf(pThis->CTX_SUFF(pTxSg)))
4670 {
4671 STAM_COUNTER_INC(&pThis->StatTxPathGSO);
4672 bool fRc = e1kAddToFrame(pThis, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4673 if (pDesc->data.cmd.fEOP)
4674 {
4675 if ( fRc
4676 && pThis->CTX_SUFF(pTxSg)
4677 && pThis->CTX_SUFF(pTxSg)->cbUsed == (size_t)pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN)
4678 {
4679 e1kTransmitFrame(pThis, fOnWorkerThread);
4680 E1K_INC_CNT32(TSCTC);
4681 }
4682 else
4683 {
4684 if (fRc)
4685 E1kLog(("%s bad GSO/TSE %p or %u < %u\n" , pThis->szPrf,
4686 pThis->CTX_SUFF(pTxSg), pThis->CTX_SUFF(pTxSg) ? pThis->CTX_SUFF(pTxSg)->cbUsed : 0,
4687 pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN));
4688 e1kXmitFreeBuf(pThis);
4689 E1K_INC_CNT32(TSCTFC);
4690 }
4691 pThis->u16TxPktLen = 0;
4692 }
4693 }
4694 else if (!pDesc->data.cmd.fTSE)
4695 {
4696 STAM_COUNTER_INC(&pThis->StatTxPathRegular);
4697 bool fRc = e1kAddToFrame(pThis, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4698 if (pDesc->data.cmd.fEOP)
4699 {
4700 if (fRc && pThis->CTX_SUFF(pTxSg))
4701 {
4702 Assert(pThis->CTX_SUFF(pTxSg)->cSegs == 1);
4703 if (pThis->fIPcsum)
4704 e1kInsertChecksum(pThis, (uint8_t *)pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
4705 pThis->contextNormal.ip.u8CSO,
4706 pThis->contextNormal.ip.u8CSS,
4707 pThis->contextNormal.ip.u16CSE);
4708 if (pThis->fTCPcsum)
4709 e1kInsertChecksum(pThis, (uint8_t *)pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
4710 pThis->contextNormal.tu.u8CSO,
4711 pThis->contextNormal.tu.u8CSS,
4712 pThis->contextNormal.tu.u16CSE);
4713 e1kTransmitFrame(pThis, fOnWorkerThread);
4714 }
4715 else
4716 e1kXmitFreeBuf(pThis);
4717 pThis->u16TxPktLen = 0;
4718 }
4719 }
4720 else
4721 {
4722 STAM_COUNTER_INC(&pThis->StatTxPathFallback);
4723 e1kFallbackAddToFrame(pThis, pDesc, pDesc->data.cmd.u20DTALEN, fOnWorkerThread);
4724 }
4725
4726 e1kDescReport(pThis, pDesc, addr);
4727 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4728 break;
4729 }
4730
4731 case E1K_DTYP_LEGACY:
4732 if (pDesc->legacy.cmd.u16Length == 0 || pDesc->legacy.u64BufAddr == 0)
4733 {
4734 E1kLog(("%s Empty legacy descriptor, skipped.\n", pThis->szPrf));
4735 /** @todo 3.3.3, Length/Buffer Address: RS set -> write DD when processing. */
4736 break;
4737 }
4738 STAM_COUNTER_INC(&pThis->StatTxDescLegacy);
4739 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
4740
4741 /* First fragment: allocate new buffer. */
4742 if (pThis->u16TxPktLen == 0)
4743 {
4744 if (pDesc->legacy.cmd.fEOP)
4745 cbVTag = pDesc->legacy.cmd.fVLE ? 4 : 0;
4746 else
4747 cbVTag = 4;
4748 E1kLog3(("%s About to allocate TX buffer: cbVTag=%u\n", pThis->szPrf, cbVTag));
4749 /** @todo reset status bits? */
4750 rc = e1kXmitAllocBuf(pThis, pDesc->legacy.cmd.u16Length + cbVTag, pDesc->legacy.cmd.fEOP, false /*fGso*/);
4751 if (RT_FAILURE(rc))
4752 {
4753 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4754 break;
4755 }
4756
4757 /** @todo Is there any way to indicating errors other than collisions? Like
4758 * VERR_NET_DOWN. */
4759 }
4760
4761 /* Add fragment to frame. */
4762 if (e1kAddToFrame(pThis, pDesc->data.u64BufAddr, pDesc->legacy.cmd.u16Length))
4763 {
4764 E1K_INC_ISTAT_CNT(pThis->uStatDescLeg);
4765
4766 /* Last fragment: Transmit and reset the packet storage counter. */
4767 if (pDesc->legacy.cmd.fEOP)
4768 {
4769 pThis->fVTag = pDesc->legacy.cmd.fVLE;
4770 pThis->u16VTagTCI = pDesc->legacy.dw3.u16Special;
4771 /** @todo Offload processing goes here. */
4772 e1kTransmitFrame(pThis, fOnWorkerThread);
4773 pThis->u16TxPktLen = 0;
4774 }
4775 }
4776 /* Last fragment + failure: free the buffer and reset the storage counter. */
4777 else if (pDesc->legacy.cmd.fEOP)
4778 {
4779 e1kXmitFreeBuf(pThis);
4780 pThis->u16TxPktLen = 0;
4781 }
4782
4783 e1kDescReport(pThis, pDesc, addr);
4784 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4785 break;
4786
4787 default:
4788 E1kLog(("%s ERROR Unsupported transmit descriptor type: 0x%04x\n",
4789 pThis->szPrf, e1kGetDescType(pDesc)));
4790 break;
4791 }
4792
4793 return rc;
4794}
4795
4796#else /* E1K_WITH_TXD_CACHE */
4797
4798/**
4799 * Process Transmit Descriptor.
4800 *
4801 * E1000 supports three types of transmit descriptors:
4802 * - legacy data descriptors of older format (context-less).
4803 * - data the same as legacy but providing new offloading capabilities.
4804 * - context sets up the context for following data descriptors.
4805 *
4806 * @param pThis The device state structure.
4807 * @param pDesc Pointer to descriptor union.
4808 * @param addr Physical address of descriptor in guest memory.
4809 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4810 * @param cbPacketSize Size of the packet as previously computed.
4811 * @thread E1000_TX
4812 */
4813static int e1kXmitDesc(PE1KSTATE pThis, E1KTXDESC *pDesc, RTGCPHYS addr,
4814 bool fOnWorkerThread)
4815{
4816 int rc = VINF_SUCCESS;
4817
4818 e1kPrintTDesc(pThis, pDesc, "vvv");
4819
4820//#ifdef E1K_USE_TX_TIMERS
4821 if (pThis->fTidEnabled)
4822 TMTimerStop(pThis->CTX_SUFF(pTIDTimer));
4823//#endif /* E1K_USE_TX_TIMERS */
4824
4825 switch (e1kGetDescType(pDesc))
4826 {
4827 case E1K_DTYP_CONTEXT:
4828 /* The caller have already updated the context */
4829 E1K_INC_ISTAT_CNT(pThis->uStatDescCtx);
4830 e1kDescReport(pThis, pDesc, addr);
4831 break;
4832
4833 case E1K_DTYP_DATA:
4834 {
4835 STAM_COUNTER_INC(pDesc->data.cmd.fTSE?
4836 &pThis->StatTxDescTSEData:
4837 &pThis->StatTxDescData);
4838 E1K_INC_ISTAT_CNT(pThis->uStatDescDat);
4839 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
4840 if (pDesc->data.cmd.u20DTALEN == 0 || pDesc->data.u64BufAddr == 0)
4841 {
4842 E1kLog2(("% Empty data descriptor, skipped.\n", pThis->szPrf));
4843 }
4844 else
4845 {
4846 /*
4847 * Add the descriptor data to the frame. If the frame is complete,
4848 * transmit it and reset the u16TxPktLen field.
4849 */
4850 if (e1kXmitIsGsoBuf(pThis->CTX_SUFF(pTxSg)))
4851 {
4852 STAM_COUNTER_INC(&pThis->StatTxPathGSO);
4853 bool fRc = e1kAddToFrame(pThis, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4854 if (pDesc->data.cmd.fEOP)
4855 {
4856 if ( fRc
4857 && pThis->CTX_SUFF(pTxSg)
4858 && pThis->CTX_SUFF(pTxSg)->cbUsed == (size_t)pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN)
4859 {
4860 e1kTransmitFrame(pThis, fOnWorkerThread);
4861 E1K_INC_CNT32(TSCTC);
4862 }
4863 else
4864 {
4865 if (fRc)
4866 E1kLog(("%s bad GSO/TSE %p or %u < %u\n" , pThis->szPrf,
4867 pThis->CTX_SUFF(pTxSg), pThis->CTX_SUFF(pTxSg) ? pThis->CTX_SUFF(pTxSg)->cbUsed : 0,
4868 pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN));
4869 e1kXmitFreeBuf(pThis);
4870 E1K_INC_CNT32(TSCTFC);
4871 }
4872 pThis->u16TxPktLen = 0;
4873 }
4874 }
4875 else if (!pDesc->data.cmd.fTSE)
4876 {
4877 STAM_COUNTER_INC(&pThis->StatTxPathRegular);
4878 bool fRc = e1kAddToFrame(pThis, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4879 if (pDesc->data.cmd.fEOP)
4880 {
4881 if (fRc && pThis->CTX_SUFF(pTxSg))
4882 {
4883 Assert(pThis->CTX_SUFF(pTxSg)->cSegs == 1);
4884 if (pThis->fIPcsum)
4885 e1kInsertChecksum(pThis, (uint8_t *)pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
4886 pThis->contextNormal.ip.u8CSO,
4887 pThis->contextNormal.ip.u8CSS,
4888 pThis->contextNormal.ip.u16CSE);
4889 if (pThis->fTCPcsum)
4890 e1kInsertChecksum(pThis, (uint8_t *)pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
4891 pThis->contextNormal.tu.u8CSO,
4892 pThis->contextNormal.tu.u8CSS,
4893 pThis->contextNormal.tu.u16CSE);
4894 e1kTransmitFrame(pThis, fOnWorkerThread);
4895 }
4896 else
4897 e1kXmitFreeBuf(pThis);
4898 pThis->u16TxPktLen = 0;
4899 }
4900 }
4901 else
4902 {
4903 STAM_COUNTER_INC(&pThis->StatTxPathFallback);
4904 rc = e1kFallbackAddToFrame(pThis, pDesc, fOnWorkerThread);
4905 }
4906 }
4907 e1kDescReport(pThis, pDesc, addr);
4908 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4909 break;
4910 }
4911
4912 case E1K_DTYP_LEGACY:
4913 STAM_COUNTER_INC(&pThis->StatTxDescLegacy);
4914 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
4915 if (pDesc->legacy.cmd.u16Length == 0 || pDesc->legacy.u64BufAddr == 0)
4916 {
4917 E1kLog(("%s Empty legacy descriptor, skipped.\n", pThis->szPrf));
4918 }
4919 else
4920 {
4921 /* Add fragment to frame. */
4922 if (e1kAddToFrame(pThis, pDesc->data.u64BufAddr, pDesc->legacy.cmd.u16Length))
4923 {
4924 E1K_INC_ISTAT_CNT(pThis->uStatDescLeg);
4925
4926 /* Last fragment: Transmit and reset the packet storage counter. */
4927 if (pDesc->legacy.cmd.fEOP)
4928 {
4929 if (pDesc->legacy.cmd.fIC)
4930 {
4931 e1kInsertChecksum(pThis,
4932 (uint8_t *)pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg,
4933 pThis->u16TxPktLen,
4934 pDesc->legacy.cmd.u8CSO,
4935 pDesc->legacy.dw3.u8CSS,
4936 0);
4937 }
4938 e1kTransmitFrame(pThis, fOnWorkerThread);
4939 pThis->u16TxPktLen = 0;
4940 }
4941 }
4942 /* Last fragment + failure: free the buffer and reset the storage counter. */
4943 else if (pDesc->legacy.cmd.fEOP)
4944 {
4945 e1kXmitFreeBuf(pThis);
4946 pThis->u16TxPktLen = 0;
4947 }
4948 }
4949 e1kDescReport(pThis, pDesc, addr);
4950 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4951 break;
4952
4953 default:
4954 E1kLog(("%s ERROR Unsupported transmit descriptor type: 0x%04x\n",
4955 pThis->szPrf, e1kGetDescType(pDesc)));
4956 break;
4957 }
4958
4959 return rc;
4960}
4961
4962DECLINLINE(void) e1kUpdateTxContext(PE1KSTATE pThis, E1KTXDESC *pDesc)
4963{
4964 if (pDesc->context.dw2.fTSE)
4965 {
4966 pThis->contextTSE = pDesc->context;
4967 pThis->u32PayRemain = pDesc->context.dw2.u20PAYLEN;
4968 pThis->u16HdrRemain = pDesc->context.dw3.u8HDRLEN;
4969 e1kSetupGsoCtx(&pThis->GsoCtx, &pDesc->context);
4970 STAM_COUNTER_INC(&pThis->StatTxDescCtxTSE);
4971 }
4972 else
4973 {
4974 pThis->contextNormal = pDesc->context;
4975 STAM_COUNTER_INC(&pThis->StatTxDescCtxNormal);
4976 }
4977 E1kLog2(("%s %s context updated: IP CSS=%02X, IP CSO=%02X, IP CSE=%04X"
4978 ", TU CSS=%02X, TU CSO=%02X, TU CSE=%04X\n", pThis->szPrf,
4979 pDesc->context.dw2.fTSE ? "TSE" : "Normal",
4980 pDesc->context.ip.u8CSS,
4981 pDesc->context.ip.u8CSO,
4982 pDesc->context.ip.u16CSE,
4983 pDesc->context.tu.u8CSS,
4984 pDesc->context.tu.u8CSO,
4985 pDesc->context.tu.u16CSE));
4986}
4987
4988static bool e1kLocateTxPacket(PE1KSTATE pThis)
4989{
4990 LogFlow(("%s e1kLocateTxPacket: ENTER cbTxAlloc=%d\n",
4991 pThis->szPrf, pThis->cbTxAlloc));
4992 /* Check if we have located the packet already. */
4993 if (pThis->cbTxAlloc)
4994 {
4995 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d\n",
4996 pThis->szPrf, pThis->cbTxAlloc));
4997 return true;
4998 }
4999
5000 bool fTSE = false;
5001 uint32_t cbPacket = 0;
5002
5003 for (int i = pThis->iTxDCurrent; i < pThis->nTxDFetched; ++i)
5004 {
5005 E1KTXDESC *pDesc = &pThis->aTxDescriptors[i];
5006 switch (e1kGetDescType(pDesc))
5007 {
5008 case E1K_DTYP_CONTEXT:
5009 e1kUpdateTxContext(pThis, pDesc);
5010 continue;
5011 case E1K_DTYP_LEGACY:
5012 /* Skip empty descriptors. */
5013 if (!pDesc->legacy.u64BufAddr || !pDesc->legacy.cmd.u16Length)
5014 break;
5015 cbPacket += pDesc->legacy.cmd.u16Length;
5016 pThis->fGSO = false;
5017 break;
5018 case E1K_DTYP_DATA:
5019 /* Skip empty descriptors. */
5020 if (!pDesc->data.u64BufAddr || !pDesc->data.cmd.u20DTALEN)
5021 break;
5022 if (cbPacket == 0)
5023 {
5024 /*
5025 * The first fragment: save IXSM and TXSM options
5026 * as these are only valid in the first fragment.
5027 */
5028 pThis->fIPcsum = pDesc->data.dw3.fIXSM;
5029 pThis->fTCPcsum = pDesc->data.dw3.fTXSM;
5030 fTSE = pDesc->data.cmd.fTSE;
5031 /*
5032 * TSE descriptors have VLE bit properly set in
5033 * the first fragment.
5034 */
5035 if (fTSE)
5036 {
5037 pThis->fVTag = pDesc->data.cmd.fVLE;
5038 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
5039 }
5040 pThis->fGSO = e1kCanDoGso(pThis, &pThis->GsoCtx, &pDesc->data, &pThis->contextTSE);
5041 }
5042 cbPacket += pDesc->data.cmd.u20DTALEN;
5043 break;
5044 default:
5045 AssertMsgFailed(("Impossible descriptor type!"));
5046 }
5047 if (pDesc->legacy.cmd.fEOP)
5048 {
5049 /*
5050 * Non-TSE descriptors have VLE bit properly set in
5051 * the last fragment.
5052 */
5053 if (!fTSE)
5054 {
5055 pThis->fVTag = pDesc->data.cmd.fVLE;
5056 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
5057 }
5058 /*
5059 * Compute the required buffer size. If we cannot do GSO but still
5060 * have to do segmentation we allocate the first segment only.
5061 */
5062 pThis->cbTxAlloc = (!fTSE || pThis->fGSO) ?
5063 cbPacket :
5064 RT_MIN(cbPacket, pThis->contextTSE.dw3.u16MSS + pThis->contextTSE.dw3.u8HDRLEN);
5065 if (pThis->fVTag)
5066 pThis->cbTxAlloc += 4;
5067 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d\n",
5068 pThis->szPrf, pThis->cbTxAlloc));
5069 return true;
5070 }
5071 }
5072
5073 if (cbPacket == 0 && pThis->nTxDFetched - pThis->iTxDCurrent > 0)
5074 {
5075 /* All descriptors were empty, we need to process them as a dummy packet */
5076 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d, zero packet!\n",
5077 pThis->szPrf, pThis->cbTxAlloc));
5078 return true;
5079 }
5080 LogFlow(("%s e1kLocateTxPacket: RET false cbTxAlloc=%d\n",
5081 pThis->szPrf, pThis->cbTxAlloc));
5082 return false;
5083}
5084
5085static int e1kXmitPacket(PE1KSTATE pThis, bool fOnWorkerThread)
5086{
5087 int rc = VINF_SUCCESS;
5088
5089 LogFlow(("%s e1kXmitPacket: ENTER current=%d fetched=%d\n",
5090 pThis->szPrf, pThis->iTxDCurrent, pThis->nTxDFetched));
5091
5092 while (pThis->iTxDCurrent < pThis->nTxDFetched)
5093 {
5094 E1KTXDESC *pDesc = &pThis->aTxDescriptors[pThis->iTxDCurrent];
5095 E1kLog3(("%s About to process new TX descriptor at %08x%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
5096 pThis->szPrf, TDBAH, TDBAL + TDH * sizeof(E1KTXDESC), TDLEN, TDH, TDT));
5097 rc = e1kXmitDesc(pThis, pDesc, e1kDescAddr(TDBAH, TDBAL, TDH), fOnWorkerThread);
5098 if (RT_FAILURE(rc))
5099 break;
5100 if (++TDH * sizeof(E1KTXDESC) >= TDLEN)
5101 TDH = 0;
5102 uint32_t uLowThreshold = GET_BITS(TXDCTL, LWTHRESH)*8;
5103 if (uLowThreshold != 0 && e1kGetTxLen(pThis) <= uLowThreshold)
5104 {
5105 E1kLog2(("%s Low on transmit descriptors, raise ICR.TXD_LOW, len=%x thresh=%x\n",
5106 pThis->szPrf, e1kGetTxLen(pThis), GET_BITS(TXDCTL, LWTHRESH)*8));
5107 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_TXD_LOW);
5108 }
5109 ++pThis->iTxDCurrent;
5110 if (e1kGetDescType(pDesc) != E1K_DTYP_CONTEXT && pDesc->legacy.cmd.fEOP)
5111 break;
5112 }
5113
5114 LogFlow(("%s e1kXmitPacket: RET %Rrc current=%d fetched=%d\n",
5115 pThis->szPrf, rc, pThis->iTxDCurrent, pThis->nTxDFetched));
5116 return rc;
5117}
5118
5119#endif /* E1K_WITH_TXD_CACHE */
5120#ifndef E1K_WITH_TXD_CACHE
5121
5122/**
5123 * Transmit pending descriptors.
5124 *
5125 * @returns VBox status code. VERR_TRY_AGAIN is returned if we're busy.
5126 *
5127 * @param pThis The E1000 state.
5128 * @param fOnWorkerThread Whether we're on a worker thread or on an EMT.
5129 */
5130static int e1kXmitPending(PE1KSTATE pThis, bool fOnWorkerThread)
5131{
5132 int rc = VINF_SUCCESS;
5133
5134 /* Check if transmitter is enabled. */
5135 if (!(TCTL & TCTL_EN))
5136 return VINF_SUCCESS;
5137 /*
5138 * Grab the xmit lock of the driver as well as the E1K device state.
5139 */
5140 rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
5141 if (RT_LIKELY(rc == VINF_SUCCESS))
5142 {
5143 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
5144 if (pDrv)
5145 {
5146 rc = pDrv->pfnBeginXmit(pDrv, fOnWorkerThread);
5147 if (RT_FAILURE(rc))
5148 {
5149 e1kCsTxLeave(pThis);
5150 return rc;
5151 }
5152 }
5153 /*
5154 * Process all pending descriptors.
5155 * Note! Do not process descriptors in locked state
5156 */
5157 while (TDH != TDT && !pThis->fLocked)
5158 {
5159 E1KTXDESC desc;
5160 E1kLog3(("%s About to process new TX descriptor at %08x%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
5161 pThis->szPrf, TDBAH, TDBAL + TDH * sizeof(desc), TDLEN, TDH, TDT));
5162
5163 e1kLoadDesc(pThis, &desc, ((uint64_t)TDBAH << 32) + TDBAL + TDH * sizeof(desc));
5164 rc = e1kXmitDesc(pThis, &desc, e1kDescAddr(TDBAH, TDBAL, TDH), fOnWorkerThread);
5165 /* If we failed to transmit descriptor we will try it again later */
5166 if (RT_FAILURE(rc))
5167 break;
5168 if (++TDH * sizeof(desc) >= TDLEN)
5169 TDH = 0;
5170
5171 if (e1kGetTxLen(pThis) <= GET_BITS(TXDCTL, LWTHRESH)*8)
5172 {
5173 E1kLog2(("%s Low on transmit descriptors, raise ICR.TXD_LOW, len=%x thresh=%x\n",
5174 pThis->szPrf, e1kGetTxLen(pThis), GET_BITS(TXDCTL, LWTHRESH)*8));
5175 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_TXD_LOW);
5176 }
5177
5178 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5179 }
5180
5181 /// @todo uncomment: pThis->uStatIntTXQE++;
5182 /// @todo uncomment: e1kRaiseInterrupt(pThis, ICR_TXQE);
5183 /*
5184 * Release the lock.
5185 */
5186 if (pDrv)
5187 pDrv->pfnEndXmit(pDrv);
5188 e1kCsTxLeave(pThis);
5189 }
5190
5191 return rc;
5192}
5193
5194#else /* E1K_WITH_TXD_CACHE */
5195
5196static void e1kDumpTxDCache(PE1KSTATE pThis)
5197{
5198 unsigned i, cDescs = TDLEN / sizeof(E1KTXDESC);
5199 uint32_t tdh = TDH;
5200 LogRel(("-- Transmit Descriptors (%d total) --\n", cDescs));
5201 for (i = 0; i < cDescs; ++i)
5202 {
5203 E1KTXDESC desc;
5204 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), e1kDescAddr(TDBAH, TDBAL, i),
5205 &desc, sizeof(desc));
5206 if (i == tdh)
5207 LogRel((">>> "));
5208 LogRel(("%RGp: %R[e1ktxd]\n", e1kDescAddr(TDBAH, TDBAL, i), &desc));
5209 }
5210 LogRel(("-- Transmit Descriptors in Cache (at %d (TDH %d)/ fetched %d / max %d) --\n",
5211 pThis->iTxDCurrent, TDH, pThis->nTxDFetched, E1K_TXD_CACHE_SIZE));
5212 if (tdh > pThis->iTxDCurrent)
5213 tdh -= pThis->iTxDCurrent;
5214 else
5215 tdh = cDescs + tdh - pThis->iTxDCurrent;
5216 for (i = 0; i < pThis->nTxDFetched; ++i)
5217 {
5218 if (i == pThis->iTxDCurrent)
5219 LogRel((">>> "));
5220 LogRel(("%RGp: %R[e1ktxd]\n", e1kDescAddr(TDBAH, TDBAL, tdh++ % cDescs), &pThis->aTxDescriptors[i]));
5221 }
5222}
5223
5224/**
5225 * Transmit pending descriptors.
5226 *
5227 * @returns VBox status code. VERR_TRY_AGAIN is returned if we're busy.
5228 *
5229 * @param pThis The E1000 state.
5230 * @param fOnWorkerThread Whether we're on a worker thread or on an EMT.
5231 */
5232static int e1kXmitPending(PE1KSTATE pThis, bool fOnWorkerThread)
5233{
5234 int rc = VINF_SUCCESS;
5235
5236 /* Check if transmitter is enabled. */
5237 if (!(TCTL & TCTL_EN))
5238 return VINF_SUCCESS;
5239 /*
5240 * Grab the xmit lock of the driver as well as the E1K device state.
5241 */
5242 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
5243 if (pDrv)
5244 {
5245 rc = pDrv->pfnBeginXmit(pDrv, fOnWorkerThread);
5246 if (RT_FAILURE(rc))
5247 return rc;
5248 }
5249
5250 /*
5251 * Process all pending descriptors.
5252 * Note! Do not process descriptors in locked state
5253 */
5254 rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
5255 if (RT_LIKELY(rc == VINF_SUCCESS))
5256 {
5257 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
5258 /*
5259 * fIncomplete is set whenever we try to fetch additional descriptors
5260 * for an incomplete packet. If fail to locate a complete packet on
5261 * the next iteration we need to reset the cache or we risk to get
5262 * stuck in this loop forever.
5263 */
5264 bool fIncomplete = false;
5265 while (!pThis->fLocked && e1kTxDLazyLoad(pThis))
5266 {
5267 while (e1kLocateTxPacket(pThis))
5268 {
5269 fIncomplete = false;
5270 /* Found a complete packet, allocate it. */
5271 rc = e1kXmitAllocBuf(pThis, pThis->fGSO);
5272 /* If we're out of bandwidth we'll come back later. */
5273 if (RT_FAILURE(rc))
5274 goto out;
5275 /* Copy the packet to allocated buffer and send it. */
5276 rc = e1kXmitPacket(pThis, fOnWorkerThread);
5277 /* If we're out of bandwidth we'll come back later. */
5278 if (RT_FAILURE(rc))
5279 goto out;
5280 }
5281 uint8_t u8Remain = pThis->nTxDFetched - pThis->iTxDCurrent;
5282 if (RT_UNLIKELY(fIncomplete))
5283 {
5284 static bool fTxDCacheDumped = false;
5285 /*
5286 * The descriptor cache is full, but we were unable to find
5287 * a complete packet in it. Drop the cache and hope that
5288 * the guest driver can recover from network card error.
5289 */
5290 LogRel(("%s No complete packets in%s TxD cache! "
5291 "Fetched=%d, current=%d, TX len=%d.\n",
5292 pThis->szPrf,
5293 u8Remain == E1K_TXD_CACHE_SIZE ? " full" : "",
5294 pThis->nTxDFetched, pThis->iTxDCurrent,
5295 e1kGetTxLen(pThis)));
5296 if (!fTxDCacheDumped)
5297 {
5298 fTxDCacheDumped = true;
5299 e1kDumpTxDCache(pThis);
5300 }
5301 pThis->iTxDCurrent = pThis->nTxDFetched = 0;
5302 /*
5303 * Returning an error at this point means Guru in R0
5304 * (see @bugref{6428}).
5305 */
5306# ifdef IN_RING3
5307 rc = VERR_NET_INCOMPLETE_TX_PACKET;
5308# else /* !IN_RING3 */
5309 rc = VINF_IOM_R3_MMIO_WRITE;
5310# endif /* !IN_RING3 */
5311 goto out;
5312 }
5313 if (u8Remain > 0)
5314 {
5315 Log4(("%s Incomplete packet at %d. Already fetched %d, "
5316 "%d more are available\n",
5317 pThis->szPrf, pThis->iTxDCurrent, u8Remain,
5318 e1kGetTxLen(pThis) - u8Remain));
5319
5320 /*
5321 * A packet was partially fetched. Move incomplete packet to
5322 * the beginning of cache buffer, then load more descriptors.
5323 */
5324 memmove(pThis->aTxDescriptors,
5325 &pThis->aTxDescriptors[pThis->iTxDCurrent],
5326 u8Remain * sizeof(E1KTXDESC));
5327 pThis->iTxDCurrent = 0;
5328 pThis->nTxDFetched = u8Remain;
5329 e1kTxDLoadMore(pThis);
5330 fIncomplete = true;
5331 }
5332 else
5333 pThis->nTxDFetched = 0;
5334 pThis->iTxDCurrent = 0;
5335 }
5336 if (!pThis->fLocked && GET_BITS(TXDCTL, LWTHRESH) == 0)
5337 {
5338 E1kLog2(("%s Out of transmit descriptors, raise ICR.TXD_LOW\n",
5339 pThis->szPrf));
5340 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_TXD_LOW);
5341 }
5342out:
5343 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5344
5345 /// @todo uncomment: pThis->uStatIntTXQE++;
5346 /// @todo uncomment: e1kRaiseInterrupt(pThis, ICR_TXQE);
5347
5348 e1kCsTxLeave(pThis);
5349 }
5350
5351
5352 /*
5353 * Release the lock.
5354 */
5355 if (pDrv)
5356 pDrv->pfnEndXmit(pDrv);
5357 return rc;
5358}
5359
5360#endif /* E1K_WITH_TXD_CACHE */
5361#ifdef IN_RING3
5362
5363/**
5364 * @interface_method_impl{PDMINETWORKDOWN,pfnXmitPending}
5365 */
5366static DECLCALLBACK(void) e1kR3NetworkDown_XmitPending(PPDMINETWORKDOWN pInterface)
5367{
5368 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkDown);
5369 /* Resume suspended transmission */
5370 STATUS &= ~STATUS_TXOFF;
5371 e1kXmitPending(pThis, true /*fOnWorkerThread*/);
5372}
5373
5374/**
5375 * Callback for consuming from transmit queue. It gets called in R3 whenever
5376 * we enqueue something in R0/GC.
5377 *
5378 * @returns true
5379 * @param pDevIns Pointer to device instance structure.
5380 * @param pItem Pointer to the element being dequeued (not used).
5381 * @thread ???
5382 */
5383static DECLCALLBACK(bool) e1kTxQueueConsumer(PPDMDEVINS pDevIns, PPDMQUEUEITEMCORE pItem)
5384{
5385 NOREF(pItem);
5386 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
5387 E1kLog2(("%s e1kTxQueueConsumer:\n", pThis->szPrf));
5388
5389 int rc = e1kXmitPending(pThis, false /*fOnWorkerThread*/); NOREF(rc);
5390#ifndef DEBUG_andy /** @todo r=andy Happens for me a lot, mute this for me. */
5391 AssertMsg(RT_SUCCESS(rc) || rc == VERR_TRY_AGAIN, ("%Rrc\n", rc));
5392#endif
5393 return true;
5394}
5395
5396/**
5397 * Handler for the wakeup signaller queue.
5398 */
5399static DECLCALLBACK(bool) e1kCanRxQueueConsumer(PPDMDEVINS pDevIns, PPDMQUEUEITEMCORE pItem)
5400{
5401 RT_NOREF(pItem);
5402 e1kWakeupReceive(pDevIns);
5403 return true;
5404}
5405
5406#endif /* IN_RING3 */
5407
5408/**
5409 * Write handler for Transmit Descriptor Tail register.
5410 *
5411 * @param pThis The device state structure.
5412 * @param offset Register offset in memory-mapped frame.
5413 * @param index Register index in register array.
5414 * @param value The value to store.
5415 * @param mask Used to implement partial writes (8 and 16-bit).
5416 * @thread EMT
5417 */
5418static int e1kRegWriteTDT(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5419{
5420 int rc = e1kRegWriteDefault(pThis, offset, index, value);
5421
5422 /* All descriptors starting with head and not including tail belong to us. */
5423 /* Process them. */
5424 E1kLog2(("%s e1kRegWriteTDT: TDBAL=%08x, TDBAH=%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
5425 pThis->szPrf, TDBAL, TDBAH, TDLEN, TDH, TDT));
5426
5427 /* Ignore TDT writes when the link is down. */
5428 if (TDH != TDT && (STATUS & STATUS_LU))
5429 {
5430 Log5(("E1000: TDT write: TDH=%08x, TDT=%08x, %d descriptors to process\n", TDH, TDT, e1kGetTxLen(pThis)));
5431 E1kLog(("%s e1kRegWriteTDT: %d descriptors to process\n",
5432 pThis->szPrf, e1kGetTxLen(pThis)));
5433
5434 /* Transmit pending packets if possible, defer it if we cannot do it
5435 in the current context. */
5436#ifdef E1K_TX_DELAY
5437 rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
5438 if (RT_LIKELY(rc == VINF_SUCCESS))
5439 {
5440 if (!TMTimerIsActive(pThis->CTX_SUFF(pTXDTimer)))
5441 {
5442#ifdef E1K_INT_STATS
5443 pThis->u64ArmedAt = RTTimeNanoTS();
5444#endif
5445 e1kArmTimer(pThis, pThis->CTX_SUFF(pTXDTimer), E1K_TX_DELAY);
5446 }
5447 E1K_INC_ISTAT_CNT(pThis->uStatTxDelayed);
5448 e1kCsTxLeave(pThis);
5449 return rc;
5450 }
5451 /* We failed to enter the TX critical section -- transmit as usual. */
5452#endif /* E1K_TX_DELAY */
5453#ifndef IN_RING3
5454 if (!pThis->CTX_SUFF(pDrv))
5455 {
5456 PPDMQUEUEITEMCORE pItem = PDMQueueAlloc(pThis->CTX_SUFF(pTxQueue));
5457 if (RT_UNLIKELY(pItem))
5458 PDMQueueInsert(pThis->CTX_SUFF(pTxQueue), pItem);
5459 }
5460 else
5461#endif
5462 {
5463 rc = e1kXmitPending(pThis, false /*fOnWorkerThread*/);
5464 if (rc == VERR_TRY_AGAIN)
5465 rc = VINF_SUCCESS;
5466 else if (rc == VERR_SEM_BUSY)
5467 rc = VINF_IOM_R3_MMIO_WRITE;
5468 AssertRC(rc);
5469 }
5470 }
5471
5472 return rc;
5473}
5474
5475/**
5476 * Write handler for Multicast Table Array registers.
5477 *
5478 * @param pThis The device state structure.
5479 * @param offset Register offset in memory-mapped frame.
5480 * @param index Register index in register array.
5481 * @param value The value to store.
5482 * @thread EMT
5483 */
5484static int e1kRegWriteMTA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5485{
5486 AssertReturn(offset - g_aE1kRegMap[index].offset < sizeof(pThis->auMTA), VERR_DEV_IO_ERROR);
5487 pThis->auMTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auMTA[0])] = value;
5488
5489 return VINF_SUCCESS;
5490}
5491
5492/**
5493 * Read handler for Multicast Table Array registers.
5494 *
5495 * @returns VBox status code.
5496 *
5497 * @param pThis The device state structure.
5498 * @param offset Register offset in memory-mapped frame.
5499 * @param index Register index in register array.
5500 * @thread EMT
5501 */
5502static int e1kRegReadMTA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5503{
5504 AssertReturn(offset - g_aE1kRegMap[index].offset< sizeof(pThis->auMTA), VERR_DEV_IO_ERROR);
5505 *pu32Value = pThis->auMTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auMTA[0])];
5506
5507 return VINF_SUCCESS;
5508}
5509
5510/**
5511 * Write handler for Receive Address registers.
5512 *
5513 * @param pThis The device state structure.
5514 * @param offset Register offset in memory-mapped frame.
5515 * @param index Register index in register array.
5516 * @param value The value to store.
5517 * @thread EMT
5518 */
5519static int e1kRegWriteRA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5520{
5521 AssertReturn(offset - g_aE1kRegMap[index].offset < sizeof(pThis->aRecAddr.au32), VERR_DEV_IO_ERROR);
5522 pThis->aRecAddr.au32[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->aRecAddr.au32[0])] = value;
5523
5524 return VINF_SUCCESS;
5525}
5526
5527/**
5528 * Read handler for Receive Address registers.
5529 *
5530 * @returns VBox status code.
5531 *
5532 * @param pThis The device state structure.
5533 * @param offset Register offset in memory-mapped frame.
5534 * @param index Register index in register array.
5535 * @thread EMT
5536 */
5537static int e1kRegReadRA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5538{
5539 AssertReturn(offset - g_aE1kRegMap[index].offset< sizeof(pThis->aRecAddr.au32), VERR_DEV_IO_ERROR);
5540 *pu32Value = pThis->aRecAddr.au32[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->aRecAddr.au32[0])];
5541
5542 return VINF_SUCCESS;
5543}
5544
5545/**
5546 * Write handler for VLAN Filter Table Array registers.
5547 *
5548 * @param pThis The device state structure.
5549 * @param offset Register offset in memory-mapped frame.
5550 * @param index Register index in register array.
5551 * @param value The value to store.
5552 * @thread EMT
5553 */
5554static int e1kRegWriteVFTA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5555{
5556 AssertReturn(offset - g_aE1kRegMap[index].offset < sizeof(pThis->auVFTA), VINF_SUCCESS);
5557 pThis->auVFTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auVFTA[0])] = value;
5558
5559 return VINF_SUCCESS;
5560}
5561
5562/**
5563 * Read handler for VLAN Filter Table Array registers.
5564 *
5565 * @returns VBox status code.
5566 *
5567 * @param pThis The device state structure.
5568 * @param offset Register offset in memory-mapped frame.
5569 * @param index Register index in register array.
5570 * @thread EMT
5571 */
5572static int e1kRegReadVFTA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5573{
5574 AssertReturn(offset - g_aE1kRegMap[index].offset< sizeof(pThis->auVFTA), VERR_DEV_IO_ERROR);
5575 *pu32Value = pThis->auVFTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auVFTA[0])];
5576
5577 return VINF_SUCCESS;
5578}
5579
5580/**
5581 * Read handler for unimplemented registers.
5582 *
5583 * Merely reports reads from unimplemented registers.
5584 *
5585 * @returns VBox status code.
5586 *
5587 * @param pThis The device state structure.
5588 * @param offset Register offset in memory-mapped frame.
5589 * @param index Register index in register array.
5590 * @thread EMT
5591 */
5592static int e1kRegReadUnimplemented(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5593{
5594 RT_NOREF3(pThis, offset, index);
5595 E1kLog(("%s At %08X read (00000000) attempt from unimplemented register %s (%s)\n",
5596 pThis->szPrf, offset, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5597 *pu32Value = 0;
5598
5599 return VINF_SUCCESS;
5600}
5601
5602/**
5603 * Default register read handler with automatic clear operation.
5604 *
5605 * Retrieves the value of register from register array in device state structure.
5606 * Then resets all bits.
5607 *
5608 * @remarks The 'mask' parameter is simply ignored as masking and shifting is
5609 * done in the caller.
5610 *
5611 * @returns VBox status code.
5612 *
5613 * @param pThis The device state structure.
5614 * @param offset Register offset in memory-mapped frame.
5615 * @param index Register index in register array.
5616 * @thread EMT
5617 */
5618static int e1kRegReadAutoClear(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5619{
5620 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
5621 int rc = e1kRegReadDefault(pThis, offset, index, pu32Value);
5622 pThis->auRegs[index] = 0;
5623
5624 return rc;
5625}
5626
5627/**
5628 * Default register read handler.
5629 *
5630 * Retrieves the value of register from register array in device state structure.
5631 * Bits corresponding to 0s in 'readable' mask will always read as 0s.
5632 *
5633 * @remarks The 'mask' parameter is simply ignored as masking and shifting is
5634 * done in the caller.
5635 *
5636 * @returns VBox status code.
5637 *
5638 * @param pThis The device state structure.
5639 * @param offset Register offset in memory-mapped frame.
5640 * @param index Register index in register array.
5641 * @thread EMT
5642 */
5643static int e1kRegReadDefault(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5644{
5645 RT_NOREF_PV(offset);
5646
5647 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
5648 *pu32Value = pThis->auRegs[index] & g_aE1kRegMap[index].readable;
5649
5650 return VINF_SUCCESS;
5651}
5652
5653/**
5654 * Write handler for unimplemented registers.
5655 *
5656 * Merely reports writes to unimplemented registers.
5657 *
5658 * @param pThis The device state structure.
5659 * @param offset Register offset in memory-mapped frame.
5660 * @param index Register index in register array.
5661 * @param value The value to store.
5662 * @thread EMT
5663 */
5664
5665 static int e1kRegWriteUnimplemented(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5666{
5667 RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index); RT_NOREF_PV(value);
5668
5669 E1kLog(("%s At %08X write attempt (%08X) to unimplemented register %s (%s)\n",
5670 pThis->szPrf, offset, value, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5671
5672 return VINF_SUCCESS;
5673}
5674
5675/**
5676 * Default register write handler.
5677 *
5678 * Stores the value to the register array in device state structure. Only bits
5679 * corresponding to 1s both in 'writable' and 'mask' will be stored.
5680 *
5681 * @returns VBox status code.
5682 *
5683 * @param pThis The device state structure.
5684 * @param offset Register offset in memory-mapped frame.
5685 * @param index Register index in register array.
5686 * @param value The value to store.
5687 * @param mask Used to implement partial writes (8 and 16-bit).
5688 * @thread EMT
5689 */
5690
5691static int e1kRegWriteDefault(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5692{
5693 RT_NOREF_PV(offset);
5694
5695 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
5696 pThis->auRegs[index] = (value & g_aE1kRegMap[index].writable)
5697 | (pThis->auRegs[index] & ~g_aE1kRegMap[index].writable);
5698
5699 return VINF_SUCCESS;
5700}
5701
5702/**
5703 * Search register table for matching register.
5704 *
5705 * @returns Index in the register table or -1 if not found.
5706 *
5707 * @param offReg Register offset in memory-mapped region.
5708 * @thread EMT
5709 */
5710static int e1kRegLookup(uint32_t offReg)
5711{
5712
5713#if 0
5714 int index;
5715
5716 for (index = 0; index < E1K_NUM_OF_REGS; index++)
5717 {
5718 if (g_aE1kRegMap[index].offset <= offReg && offReg < g_aE1kRegMap[index].offset + g_aE1kRegMap[index].size)
5719 {
5720 return index;
5721 }
5722 }
5723#else
5724 int iStart = 0;
5725 int iEnd = E1K_NUM_OF_BINARY_SEARCHABLE;
5726 for (;;)
5727 {
5728 int i = (iEnd - iStart) / 2 + iStart;
5729 uint32_t offCur = g_aE1kRegMap[i].offset;
5730 if (offReg < offCur)
5731 {
5732 if (i == iStart)
5733 break;
5734 iEnd = i;
5735 }
5736 else if (offReg >= offCur + g_aE1kRegMap[i].size)
5737 {
5738 i++;
5739 if (i == iEnd)
5740 break;
5741 iStart = i;
5742 }
5743 else
5744 return i;
5745 Assert(iEnd > iStart);
5746 }
5747
5748 for (unsigned i = E1K_NUM_OF_BINARY_SEARCHABLE; i < RT_ELEMENTS(g_aE1kRegMap); i++)
5749 if (offReg - g_aE1kRegMap[i].offset < g_aE1kRegMap[i].size)
5750 return i;
5751
5752# ifdef VBOX_STRICT
5753 for (unsigned i = 0; i < RT_ELEMENTS(g_aE1kRegMap); i++)
5754 Assert(offReg - g_aE1kRegMap[i].offset >= g_aE1kRegMap[i].size);
5755# endif
5756
5757#endif
5758
5759 return -1;
5760}
5761
5762/**
5763 * Handle unaligned register read operation.
5764 *
5765 * Looks up and calls appropriate handler.
5766 *
5767 * @returns VBox status code.
5768 *
5769 * @param pThis The device state structure.
5770 * @param offReg Register offset in memory-mapped frame.
5771 * @param pv Where to store the result.
5772 * @param cb Number of bytes to read.
5773 * @thread EMT
5774 * @remarks IOM takes care of unaligned and small reads via MMIO. For I/O port
5775 * accesses we have to take care of that ourselves.
5776 */
5777static int e1kRegReadUnaligned(PE1KSTATE pThis, uint32_t offReg, void *pv, uint32_t cb)
5778{
5779 uint32_t u32 = 0;
5780 uint32_t shift;
5781 int rc = VINF_SUCCESS;
5782 int index = e1kRegLookup(offReg);
5783#ifdef LOG_ENABLED
5784 char buf[9];
5785#endif
5786
5787 /*
5788 * From the spec:
5789 * For registers that should be accessed as 32-bit double words, partial writes (less than a 32-bit
5790 * double word) is ignored. Partial reads return all 32 bits of data regardless of the byte enables.
5791 */
5792
5793 /*
5794 * To be able to read bytes and short word we convert them to properly
5795 * shifted 32-bit words and masks. The idea is to keep register-specific
5796 * handlers simple. Most accesses will be 32-bit anyway.
5797 */
5798 uint32_t mask;
5799 switch (cb)
5800 {
5801 case 4: mask = 0xFFFFFFFF; break;
5802 case 2: mask = 0x0000FFFF; break;
5803 case 1: mask = 0x000000FF; break;
5804 default:
5805 return PDMDevHlpDBGFStop(pThis->CTX_SUFF(pDevIns), RT_SRC_POS,
5806 "unsupported op size: offset=%#10x cb=%#10x\n", offReg, cb);
5807 }
5808 if (index != -1)
5809 {
5810 if (g_aE1kRegMap[index].readable)
5811 {
5812 /* Make the mask correspond to the bits we are about to read. */
5813 shift = (offReg - g_aE1kRegMap[index].offset) % sizeof(uint32_t) * 8;
5814 mask <<= shift;
5815 if (!mask)
5816 return PDMDevHlpDBGFStop(pThis->CTX_SUFF(pDevIns), RT_SRC_POS, "Zero mask: offset=%#10x cb=%#10x\n", offReg, cb);
5817 /*
5818 * Read it. Pass the mask so the handler knows what has to be read.
5819 * Mask out irrelevant bits.
5820 */
5821 //rc = e1kCsEnter(pThis, VERR_SEM_BUSY, RT_SRC_POS);
5822 if (RT_UNLIKELY(rc != VINF_SUCCESS))
5823 return rc;
5824 //pThis->fDelayInts = false;
5825 //pThis->iStatIntLost += pThis->iStatIntLostOne;
5826 //pThis->iStatIntLostOne = 0;
5827 rc = g_aE1kRegMap[index].pfnRead(pThis, offReg & 0xFFFFFFFC, index, &u32);
5828 u32 &= mask;
5829 //e1kCsLeave(pThis);
5830 E1kLog2(("%s At %08X read %s from %s (%s)\n",
5831 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf), g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5832 Log6(("%s At %08X read %s from %s (%s) [UNALIGNED]\n",
5833 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf), g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5834 /* Shift back the result. */
5835 u32 >>= shift;
5836 }
5837 else
5838 E1kLog(("%s At %08X read (%s) attempt from write-only register %s (%s)\n",
5839 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf), g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5840 if (IOM_SUCCESS(rc))
5841 STAM_COUNTER_INC(&pThis->aStatRegReads[index]);
5842 }
5843 else
5844 E1kLog(("%s At %08X read (%s) attempt from non-existing register\n",
5845 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf)));
5846
5847 memcpy(pv, &u32, cb);
5848 return rc;
5849}
5850
5851/**
5852 * Handle 4 byte aligned and sized read operation.
5853 *
5854 * Looks up and calls appropriate handler.
5855 *
5856 * @returns VBox status code.
5857 *
5858 * @param pThis The device state structure.
5859 * @param offReg Register offset in memory-mapped frame.
5860 * @param pu32 Where to store the result.
5861 * @thread EMT
5862 */
5863static int e1kRegReadAlignedU32(PE1KSTATE pThis, uint32_t offReg, uint32_t *pu32)
5864{
5865 Assert(!(offReg & 3));
5866
5867 /*
5868 * Lookup the register and check that it's readable.
5869 */
5870 int rc = VINF_SUCCESS;
5871 int idxReg = e1kRegLookup(offReg);
5872 if (RT_LIKELY(idxReg != -1))
5873 {
5874 if (RT_UNLIKELY(g_aE1kRegMap[idxReg].readable))
5875 {
5876 /*
5877 * Read it. Pass the mask so the handler knows what has to be read.
5878 * Mask out irrelevant bits.
5879 */
5880 //rc = e1kCsEnter(pThis, VERR_SEM_BUSY, RT_SRC_POS);
5881 //if (RT_UNLIKELY(rc != VINF_SUCCESS))
5882 // return rc;
5883 //pThis->fDelayInts = false;
5884 //pThis->iStatIntLost += pThis->iStatIntLostOne;
5885 //pThis->iStatIntLostOne = 0;
5886 rc = g_aE1kRegMap[idxReg].pfnRead(pThis, offReg & 0xFFFFFFFC, idxReg, pu32);
5887 //e1kCsLeave(pThis);
5888 Log6(("%s At %08X read %08X from %s (%s)\n",
5889 pThis->szPrf, offReg, *pu32, g_aE1kRegMap[idxReg].abbrev, g_aE1kRegMap[idxReg].name));
5890 if (IOM_SUCCESS(rc))
5891 STAM_COUNTER_INC(&pThis->aStatRegReads[idxReg]);
5892 }
5893 else
5894 E1kLog(("%s At %08X read attempt from non-readable register %s (%s)\n",
5895 pThis->szPrf, offReg, g_aE1kRegMap[idxReg].abbrev, g_aE1kRegMap[idxReg].name));
5896 }
5897 else
5898 E1kLog(("%s At %08X read attempt from non-existing register\n", pThis->szPrf, offReg));
5899 return rc;
5900}
5901
5902/**
5903 * Handle 4 byte sized and aligned register write operation.
5904 *
5905 * Looks up and calls appropriate handler.
5906 *
5907 * @returns VBox status code.
5908 *
5909 * @param pThis The device state structure.
5910 * @param offReg Register offset in memory-mapped frame.
5911 * @param u32Value The value to write.
5912 * @thread EMT
5913 */
5914static int e1kRegWriteAlignedU32(PE1KSTATE pThis, uint32_t offReg, uint32_t u32Value)
5915{
5916 int rc = VINF_SUCCESS;
5917 int index = e1kRegLookup(offReg);
5918 if (RT_LIKELY(index != -1))
5919 {
5920 if (RT_LIKELY(g_aE1kRegMap[index].writable))
5921 {
5922 /*
5923 * Write it. Pass the mask so the handler knows what has to be written.
5924 * Mask out irrelevant bits.
5925 */
5926 Log6(("%s At %08X write %08X to %s (%s)\n",
5927 pThis->szPrf, offReg, u32Value, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5928 //rc = e1kCsEnter(pThis, VERR_SEM_BUSY, RT_SRC_POS);
5929 //if (RT_UNLIKELY(rc != VINF_SUCCESS))
5930 // return rc;
5931 //pThis->fDelayInts = false;
5932 //pThis->iStatIntLost += pThis->iStatIntLostOne;
5933 //pThis->iStatIntLostOne = 0;
5934 rc = g_aE1kRegMap[index].pfnWrite(pThis, offReg, index, u32Value);
5935 //e1kCsLeave(pThis);
5936 }
5937 else
5938 E1kLog(("%s At %08X write attempt (%08X) to read-only register %s (%s)\n",
5939 pThis->szPrf, offReg, u32Value, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5940 if (IOM_SUCCESS(rc))
5941 STAM_COUNTER_INC(&pThis->aStatRegWrites[index]);
5942 }
5943 else
5944 E1kLog(("%s At %08X write attempt (%08X) to non-existing register\n",
5945 pThis->szPrf, offReg, u32Value));
5946 return rc;
5947}
5948
5949
5950/* -=-=-=-=- MMIO and I/O Port Callbacks -=-=-=-=- */
5951
5952/**
5953 * @callback_method_impl{FNIOMMMIOREAD}
5954 */
5955PDMBOTHCBDECL(int) e1kMMIORead(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS GCPhysAddr, void *pv, unsigned cb)
5956{
5957 RT_NOREF2(pvUser, cb);
5958 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
5959 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatMMIORead), a);
5960
5961 uint32_t offReg = GCPhysAddr - pThis->addrMMReg;
5962 Assert(offReg < E1K_MM_SIZE);
5963 Assert(cb == 4);
5964 Assert(!(GCPhysAddr & 3));
5965
5966 int rc = e1kRegReadAlignedU32(pThis, offReg, (uint32_t *)pv);
5967
5968 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatMMIORead), a);
5969 return rc;
5970}
5971
5972/**
5973 * @callback_method_impl{FNIOMMMIOWRITE}
5974 */
5975PDMBOTHCBDECL(int) e1kMMIOWrite(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS GCPhysAddr, void const *pv, unsigned cb)
5976{
5977 RT_NOREF2(pvUser, cb);
5978 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
5979 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatMMIOWrite), a);
5980
5981 uint32_t offReg = GCPhysAddr - pThis->addrMMReg;
5982 Assert(offReg < E1K_MM_SIZE);
5983 Assert(cb == 4);
5984 Assert(!(GCPhysAddr & 3));
5985
5986 int rc = e1kRegWriteAlignedU32(pThis, offReg, *(uint32_t const *)pv);
5987
5988 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatMMIOWrite), a);
5989 return rc;
5990}
5991
5992/**
5993 * @callback_method_impl{FNIOMIOPORTIN}
5994 */
5995PDMBOTHCBDECL(int) e1kIOPortIn(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT uPort, uint32_t *pu32, unsigned cb)
5996{
5997 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
5998 int rc;
5999 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatIORead), a);
6000 RT_NOREF_PV(pvUser);
6001
6002 uPort -= pThis->IOPortBase;
6003 if (RT_LIKELY(cb == 4))
6004 switch (uPort)
6005 {
6006 case 0x00: /* IOADDR */
6007 *pu32 = pThis->uSelectedReg;
6008 E1kLog2(("%s e1kIOPortIn: IOADDR(0), selecting register %#010x, val=%#010x\n", pThis->szPrf, pThis->uSelectedReg, *pu32));
6009 rc = VINF_SUCCESS;
6010 break;
6011
6012 case 0x04: /* IODATA */
6013 if (!(pThis->uSelectedReg & 3))
6014 rc = e1kRegReadAlignedU32(pThis, pThis->uSelectedReg, pu32);
6015 else /** @todo r=bird: I wouldn't be surprised if this unaligned branch wasn't necessary. */
6016 rc = e1kRegReadUnaligned(pThis, pThis->uSelectedReg, pu32, cb);
6017 if (rc == VINF_IOM_R3_MMIO_READ)
6018 rc = VINF_IOM_R3_IOPORT_READ;
6019 E1kLog2(("%s e1kIOPortIn: IODATA(4), reading from selected register %#010x, val=%#010x\n", pThis->szPrf, pThis->uSelectedReg, *pu32));
6020 break;
6021
6022 default:
6023 E1kLog(("%s e1kIOPortIn: invalid port %#010x\n", pThis->szPrf, uPort));
6024 //rc = VERR_IOM_IOPORT_UNUSED; /* Why not? */
6025 rc = VINF_SUCCESS;
6026 }
6027 else
6028 {
6029 E1kLog(("%s e1kIOPortIn: invalid op size: uPort=%RTiop cb=%08x", pThis->szPrf, uPort, cb));
6030 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "%s e1kIOPortIn: invalid op size: uPort=%RTiop cb=%08x\n", pThis->szPrf, uPort, cb);
6031 }
6032 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatIORead), a);
6033 return rc;
6034}
6035
6036
6037/**
6038 * @callback_method_impl{FNIOMIOPORTOUT}
6039 */
6040PDMBOTHCBDECL(int) e1kIOPortOut(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT uPort, uint32_t u32, unsigned cb)
6041{
6042 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
6043 int rc;
6044 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatIOWrite), a);
6045 RT_NOREF_PV(pvUser);
6046
6047 E1kLog2(("%s e1kIOPortOut: uPort=%RTiop value=%08x\n", pThis->szPrf, uPort, u32));
6048 if (RT_LIKELY(cb == 4))
6049 {
6050 uPort -= pThis->IOPortBase;
6051 switch (uPort)
6052 {
6053 case 0x00: /* IOADDR */
6054 pThis->uSelectedReg = u32;
6055 E1kLog2(("%s e1kIOPortOut: IOADDR(0), selected register %08x\n", pThis->szPrf, pThis->uSelectedReg));
6056 rc = VINF_SUCCESS;
6057 break;
6058
6059 case 0x04: /* IODATA */
6060 E1kLog2(("%s e1kIOPortOut: IODATA(4), writing to selected register %#010x, value=%#010x\n", pThis->szPrf, pThis->uSelectedReg, u32));
6061 if (RT_LIKELY(!(pThis->uSelectedReg & 3)))
6062 {
6063 rc = e1kRegWriteAlignedU32(pThis, pThis->uSelectedReg, u32);
6064 if (rc == VINF_IOM_R3_MMIO_WRITE)
6065 rc = VINF_IOM_R3_IOPORT_WRITE;
6066 }
6067 else
6068 rc = PDMDevHlpDBGFStop(pThis->CTX_SUFF(pDevIns), RT_SRC_POS,
6069 "Spec violation: misaligned offset: %#10x, ignored.\n", pThis->uSelectedReg);
6070 break;
6071
6072 default:
6073 E1kLog(("%s e1kIOPortOut: invalid port %#010x\n", pThis->szPrf, uPort));
6074 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "invalid port %#010x\n", uPort);
6075 }
6076 }
6077 else
6078 {
6079 E1kLog(("%s e1kIOPortOut: invalid op size: uPort=%RTiop cb=%08x\n", pThis->szPrf, uPort, cb));
6080 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "%s: invalid op size: uPort=%RTiop cb=%#x\n", pThis->szPrf, uPort, cb);
6081 }
6082
6083 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatIOWrite), a);
6084 return rc;
6085}
6086
6087#ifdef IN_RING3
6088
6089/**
6090 * Dump complete device state to log.
6091 *
6092 * @param pThis Pointer to device state.
6093 */
6094static void e1kDumpState(PE1KSTATE pThis)
6095{
6096 RT_NOREF(pThis);
6097 for (int i = 0; i < E1K_NUM_OF_32BIT_REGS; ++i)
6098 E1kLog2(("%s %8.8s = %08x\n", pThis->szPrf, g_aE1kRegMap[i].abbrev, pThis->auRegs[i]));
6099# ifdef E1K_INT_STATS
6100 LogRel(("%s Interrupt attempts: %d\n", pThis->szPrf, pThis->uStatIntTry));
6101 LogRel(("%s Interrupts raised : %d\n", pThis->szPrf, pThis->uStatInt));
6102 LogRel(("%s Interrupts lowered: %d\n", pThis->szPrf, pThis->uStatIntLower));
6103 LogRel(("%s ICR outside ISR : %d\n", pThis->szPrf, pThis->uStatNoIntICR));
6104 LogRel(("%s IMS raised ints : %d\n", pThis->szPrf, pThis->uStatIntIMS));
6105 LogRel(("%s Interrupts skipped: %d\n", pThis->szPrf, pThis->uStatIntSkip));
6106 LogRel(("%s Masked interrupts : %d\n", pThis->szPrf, pThis->uStatIntMasked));
6107 LogRel(("%s Early interrupts : %d\n", pThis->szPrf, pThis->uStatIntEarly));
6108 LogRel(("%s Late interrupts : %d\n", pThis->szPrf, pThis->uStatIntLate));
6109 LogRel(("%s Lost interrupts : %d\n", pThis->szPrf, pThis->iStatIntLost));
6110 LogRel(("%s Interrupts by RX : %d\n", pThis->szPrf, pThis->uStatIntRx));
6111 LogRel(("%s Interrupts by TX : %d\n", pThis->szPrf, pThis->uStatIntTx));
6112 LogRel(("%s Interrupts by ICS : %d\n", pThis->szPrf, pThis->uStatIntICS));
6113 LogRel(("%s Interrupts by RDTR: %d\n", pThis->szPrf, pThis->uStatIntRDTR));
6114 LogRel(("%s Interrupts by RDMT: %d\n", pThis->szPrf, pThis->uStatIntRXDMT0));
6115 LogRel(("%s Interrupts by TXQE: %d\n", pThis->szPrf, pThis->uStatIntTXQE));
6116 LogRel(("%s TX int delay asked: %d\n", pThis->szPrf, pThis->uStatTxIDE));
6117 LogRel(("%s TX delayed: %d\n", pThis->szPrf, pThis->uStatTxDelayed));
6118 LogRel(("%s TX delay expired: %d\n", pThis->szPrf, pThis->uStatTxDelayExp));
6119 LogRel(("%s TX no report asked: %d\n", pThis->szPrf, pThis->uStatTxNoRS));
6120 LogRel(("%s TX abs timer expd : %d\n", pThis->szPrf, pThis->uStatTAD));
6121 LogRel(("%s TX int timer expd : %d\n", pThis->szPrf, pThis->uStatTID));
6122 LogRel(("%s RX abs timer expd : %d\n", pThis->szPrf, pThis->uStatRAD));
6123 LogRel(("%s RX int timer expd : %d\n", pThis->szPrf, pThis->uStatRID));
6124 LogRel(("%s TX CTX descriptors: %d\n", pThis->szPrf, pThis->uStatDescCtx));
6125 LogRel(("%s TX DAT descriptors: %d\n", pThis->szPrf, pThis->uStatDescDat));
6126 LogRel(("%s TX LEG descriptors: %d\n", pThis->szPrf, pThis->uStatDescLeg));
6127 LogRel(("%s Received frames : %d\n", pThis->szPrf, pThis->uStatRxFrm));
6128 LogRel(("%s Transmitted frames: %d\n", pThis->szPrf, pThis->uStatTxFrm));
6129 LogRel(("%s TX frames up to 1514: %d\n", pThis->szPrf, pThis->uStatTx1514));
6130 LogRel(("%s TX frames up to 2962: %d\n", pThis->szPrf, pThis->uStatTx2962));
6131 LogRel(("%s TX frames up to 4410: %d\n", pThis->szPrf, pThis->uStatTx4410));
6132 LogRel(("%s TX frames up to 5858: %d\n", pThis->szPrf, pThis->uStatTx5858));
6133 LogRel(("%s TX frames up to 7306: %d\n", pThis->szPrf, pThis->uStatTx7306));
6134 LogRel(("%s TX frames up to 8754: %d\n", pThis->szPrf, pThis->uStatTx8754));
6135 LogRel(("%s TX frames up to 16384: %d\n", pThis->szPrf, pThis->uStatTx16384));
6136 LogRel(("%s TX frames up to 32768: %d\n", pThis->szPrf, pThis->uStatTx32768));
6137 LogRel(("%s Larger TX frames : %d\n", pThis->szPrf, pThis->uStatTxLarge));
6138 LogRel(("%s Max TX Delay : %lld\n", pThis->szPrf, pThis->uStatMaxTxDelay));
6139# endif /* E1K_INT_STATS */
6140}
6141
6142/**
6143 * @callback_method_impl{FNPCIIOREGIONMAP}
6144 */
6145static DECLCALLBACK(int) e1kMap(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, uint32_t iRegion,
6146 RTGCPHYS GCPhysAddress, RTGCPHYS cb, PCIADDRESSSPACE enmType)
6147{
6148 RT_NOREF(pPciDev, iRegion);
6149 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE *);
6150 int rc;
6151
6152 switch (enmType)
6153 {
6154 case PCI_ADDRESS_SPACE_IO:
6155 pThis->IOPortBase = (RTIOPORT)GCPhysAddress;
6156 rc = PDMDevHlpIOPortRegister(pDevIns, pThis->IOPortBase, cb, NULL /*pvUser*/,
6157 e1kIOPortOut, e1kIOPortIn, NULL, NULL, "E1000");
6158 if (pThis->fR0Enabled && RT_SUCCESS(rc))
6159 rc = PDMDevHlpIOPortRegisterR0(pDevIns, pThis->IOPortBase, cb, NIL_RTR0PTR /*pvUser*/,
6160 "e1kIOPortOut", "e1kIOPortIn", NULL, NULL, "E1000");
6161 if (pThis->fRCEnabled && RT_SUCCESS(rc))
6162 rc = PDMDevHlpIOPortRegisterRC(pDevIns, pThis->IOPortBase, cb, NIL_RTRCPTR /*pvUser*/,
6163 "e1kIOPortOut", "e1kIOPortIn", NULL, NULL, "E1000");
6164 break;
6165
6166 case PCI_ADDRESS_SPACE_MEM:
6167 /*
6168 * From the spec:
6169 * For registers that should be accessed as 32-bit double words,
6170 * partial writes (less than a 32-bit double word) is ignored.
6171 * Partial reads return all 32 bits of data regardless of the
6172 * byte enables.
6173 */
6174#ifdef E1K_WITH_PREREG_MMIO
6175 pThis->addrMMReg = GCPhysAddress;
6176 if (GCPhysAddress == NIL_RTGCPHYS)
6177 rc = VINF_SUCCESS;
6178 else
6179 {
6180 Assert(!(GCPhysAddress & 7));
6181 rc = PDMDevHlpMMIOExMap(pDevIns, pPciDev, iRegion, GCPhysAddress);
6182 }
6183#else
6184 pThis->addrMMReg = GCPhysAddress; Assert(!(GCPhysAddress & 7));
6185 rc = PDMDevHlpMMIORegister(pDevIns, GCPhysAddress, cb, NULL /*pvUser*/,
6186 IOMMMIO_FLAGS_READ_DWORD | IOMMMIO_FLAGS_WRITE_ONLY_DWORD,
6187 e1kMMIOWrite, e1kMMIORead, "E1000");
6188 if (pThis->fR0Enabled && RT_SUCCESS(rc))
6189 rc = PDMDevHlpMMIORegisterR0(pDevIns, GCPhysAddress, cb, NIL_RTR0PTR /*pvUser*/,
6190 "e1kMMIOWrite", "e1kMMIORead");
6191 if (pThis->fRCEnabled && RT_SUCCESS(rc))
6192 rc = PDMDevHlpMMIORegisterRC(pDevIns, GCPhysAddress, cb, NIL_RTRCPTR /*pvUser*/,
6193 "e1kMMIOWrite", "e1kMMIORead");
6194#endif
6195 break;
6196
6197 default:
6198 /* We should never get here */
6199 AssertMsgFailed(("Invalid PCI address space param in map callback"));
6200 rc = VERR_INTERNAL_ERROR;
6201 break;
6202 }
6203 return rc;
6204}
6205
6206
6207/* -=-=-=-=- PDMINETWORKDOWN -=-=-=-=- */
6208
6209/**
6210 * Check if the device can receive data now.
6211 * This must be called before the pfnRecieve() method is called.
6212 *
6213 * @returns Number of bytes the device can receive.
6214 * @param pInterface Pointer to the interface structure containing the called function pointer.
6215 * @thread EMT
6216 */
6217static int e1kCanReceive(PE1KSTATE pThis)
6218{
6219#ifndef E1K_WITH_RXD_CACHE
6220 size_t cb;
6221
6222 if (RT_UNLIKELY(e1kCsRxEnter(pThis, VERR_SEM_BUSY) != VINF_SUCCESS))
6223 return VERR_NET_NO_BUFFER_SPACE;
6224
6225 if (RT_UNLIKELY(RDLEN == sizeof(E1KRXDESC)))
6226 {
6227 E1KRXDESC desc;
6228 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), e1kDescAddr(RDBAH, RDBAL, RDH),
6229 &desc, sizeof(desc));
6230 if (desc.status.fDD)
6231 cb = 0;
6232 else
6233 cb = pThis->u16RxBSize;
6234 }
6235 else if (RDH < RDT)
6236 cb = (RDT - RDH) * pThis->u16RxBSize;
6237 else if (RDH > RDT)
6238 cb = (RDLEN/sizeof(E1KRXDESC) - RDH + RDT) * pThis->u16RxBSize;
6239 else
6240 {
6241 cb = 0;
6242 E1kLogRel(("E1000: OUT of RX descriptors!\n"));
6243 }
6244 E1kLog2(("%s e1kCanReceive: at exit RDH=%d RDT=%d RDLEN=%d u16RxBSize=%d cb=%lu\n",
6245 pThis->szPrf, RDH, RDT, RDLEN, pThis->u16RxBSize, cb));
6246
6247 e1kCsRxLeave(pThis);
6248 return cb > 0 ? VINF_SUCCESS : VERR_NET_NO_BUFFER_SPACE;
6249#else /* E1K_WITH_RXD_CACHE */
6250 int rc = VINF_SUCCESS;
6251
6252 if (RT_UNLIKELY(e1kCsRxEnter(pThis, VERR_SEM_BUSY) != VINF_SUCCESS))
6253 return VERR_NET_NO_BUFFER_SPACE;
6254
6255 if (RT_UNLIKELY(RDLEN == sizeof(E1KRXDESC)))
6256 {
6257 E1KRXDESC desc;
6258 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), e1kDescAddr(RDBAH, RDBAL, RDH),
6259 &desc, sizeof(desc));
6260 if (desc.status.fDD)
6261 rc = VERR_NET_NO_BUFFER_SPACE;
6262 }
6263 else if (e1kRxDIsCacheEmpty(pThis) && RDH == RDT)
6264 {
6265 /* Cache is empty, so is the RX ring. */
6266 rc = VERR_NET_NO_BUFFER_SPACE;
6267 }
6268 E1kLog2(("%s e1kCanReceive: at exit in_cache=%d RDH=%d RDT=%d RDLEN=%d"
6269 " u16RxBSize=%d rc=%Rrc\n", pThis->szPrf,
6270 e1kRxDInCache(pThis), RDH, RDT, RDLEN, pThis->u16RxBSize, rc));
6271
6272 e1kCsRxLeave(pThis);
6273 return rc;
6274#endif /* E1K_WITH_RXD_CACHE */
6275}
6276
6277/**
6278 * @interface_method_impl{PDMINETWORKDOWN,pfnWaitReceiveAvail}
6279 */
6280static DECLCALLBACK(int) e1kR3NetworkDown_WaitReceiveAvail(PPDMINETWORKDOWN pInterface, RTMSINTERVAL cMillies)
6281{
6282 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkDown);
6283 int rc = e1kCanReceive(pThis);
6284
6285 if (RT_SUCCESS(rc))
6286 return VINF_SUCCESS;
6287 if (RT_UNLIKELY(cMillies == 0))
6288 return VERR_NET_NO_BUFFER_SPACE;
6289
6290 rc = VERR_INTERRUPTED;
6291 ASMAtomicXchgBool(&pThis->fMaybeOutOfSpace, true);
6292 STAM_PROFILE_START(&pThis->StatRxOverflow, a);
6293 VMSTATE enmVMState;
6294 while (RT_LIKELY( (enmVMState = PDMDevHlpVMState(pThis->CTX_SUFF(pDevIns))) == VMSTATE_RUNNING
6295 || enmVMState == VMSTATE_RUNNING_LS))
6296 {
6297 int rc2 = e1kCanReceive(pThis);
6298 if (RT_SUCCESS(rc2))
6299 {
6300 rc = VINF_SUCCESS;
6301 break;
6302 }
6303 E1kLogRel(("E1000 e1kR3NetworkDown_WaitReceiveAvail: waiting cMillies=%u...\n", cMillies));
6304 E1kLog(("%s e1kR3NetworkDown_WaitReceiveAvail: waiting cMillies=%u...\n", pThis->szPrf, cMillies));
6305 RTSemEventWait(pThis->hEventMoreRxDescAvail, cMillies);
6306 }
6307 STAM_PROFILE_STOP(&pThis->StatRxOverflow, a);
6308 ASMAtomicXchgBool(&pThis->fMaybeOutOfSpace, false);
6309
6310 return rc;
6311}
6312
6313
6314/**
6315 * Matches the packet addresses against Receive Address table. Looks for
6316 * exact matches only.
6317 *
6318 * @returns true if address matches.
6319 * @param pThis Pointer to the state structure.
6320 * @param pvBuf The ethernet packet.
6321 * @param cb Number of bytes available in the packet.
6322 * @thread EMT
6323 */
6324static bool e1kPerfectMatch(PE1KSTATE pThis, const void *pvBuf)
6325{
6326 for (unsigned i = 0; i < RT_ELEMENTS(pThis->aRecAddr.array); i++)
6327 {
6328 E1KRAELEM* ra = pThis->aRecAddr.array + i;
6329
6330 /* Valid address? */
6331 if (ra->ctl & RA_CTL_AV)
6332 {
6333 Assert((ra->ctl & RA_CTL_AS) < 2);
6334 //unsigned char *pAddr = (unsigned char*)pvBuf + sizeof(ra->addr)*(ra->ctl & RA_CTL_AS);
6335 //E1kLog3(("%s Matching %02x:%02x:%02x:%02x:%02x:%02x against %02x:%02x:%02x:%02x:%02x:%02x...\n",
6336 // pThis->szPrf, pAddr[0], pAddr[1], pAddr[2], pAddr[3], pAddr[4], pAddr[5],
6337 // ra->addr[0], ra->addr[1], ra->addr[2], ra->addr[3], ra->addr[4], ra->addr[5]));
6338 /*
6339 * Address Select:
6340 * 00b = Destination address
6341 * 01b = Source address
6342 * 10b = Reserved
6343 * 11b = Reserved
6344 * Since ethernet header is (DA, SA, len) we can use address
6345 * select as index.
6346 */
6347 if (memcmp((char*)pvBuf + sizeof(ra->addr)*(ra->ctl & RA_CTL_AS),
6348 ra->addr, sizeof(ra->addr)) == 0)
6349 return true;
6350 }
6351 }
6352
6353 return false;
6354}
6355
6356/**
6357 * Matches the packet addresses against Multicast Table Array.
6358 *
6359 * @remarks This is imperfect match since it matches not exact address but
6360 * a subset of addresses.
6361 *
6362 * @returns true if address matches.
6363 * @param pThis Pointer to the state structure.
6364 * @param pvBuf The ethernet packet.
6365 * @param cb Number of bytes available in the packet.
6366 * @thread EMT
6367 */
6368static bool e1kImperfectMatch(PE1KSTATE pThis, const void *pvBuf)
6369{
6370 /* Get bits 32..47 of destination address */
6371 uint16_t u16Bit = ((uint16_t*)pvBuf)[2];
6372
6373 unsigned offset = GET_BITS(RCTL, MO);
6374 /*
6375 * offset means:
6376 * 00b = bits 36..47
6377 * 01b = bits 35..46
6378 * 10b = bits 34..45
6379 * 11b = bits 32..43
6380 */
6381 if (offset < 3)
6382 u16Bit = u16Bit >> (4 - offset);
6383 return ASMBitTest(pThis->auMTA, u16Bit & 0xFFF);
6384}
6385
6386/**
6387 * Determines if the packet is to be delivered to upper layer.
6388 *
6389 * The following filters supported:
6390 * - Exact Unicast/Multicast
6391 * - Promiscuous Unicast/Multicast
6392 * - Multicast
6393 * - VLAN
6394 *
6395 * @returns true if packet is intended for this node.
6396 * @param pThis Pointer to the state structure.
6397 * @param pvBuf The ethernet packet.
6398 * @param cb Number of bytes available in the packet.
6399 * @param pStatus Bit field to store status bits.
6400 * @thread EMT
6401 */
6402static bool e1kAddressFilter(PE1KSTATE pThis, const void *pvBuf, size_t cb, E1KRXDST *pStatus)
6403{
6404 Assert(cb > 14);
6405 /* Assume that we fail to pass exact filter. */
6406 pStatus->fPIF = false;
6407 pStatus->fVP = false;
6408 /* Discard oversized packets */
6409 if (cb > E1K_MAX_RX_PKT_SIZE)
6410 {
6411 E1kLog(("%s ERROR: Incoming packet is too big, cb=%d > max=%d\n",
6412 pThis->szPrf, cb, E1K_MAX_RX_PKT_SIZE));
6413 E1K_INC_CNT32(ROC);
6414 return false;
6415 }
6416 else if (!(RCTL & RCTL_LPE) && cb > 1522)
6417 {
6418 /* When long packet reception is disabled packets over 1522 are discarded */
6419 E1kLog(("%s Discarding incoming packet (LPE=0), cb=%d\n",
6420 pThis->szPrf, cb));
6421 E1K_INC_CNT32(ROC);
6422 return false;
6423 }
6424
6425 uint16_t *u16Ptr = (uint16_t*)pvBuf;
6426 /* Compare TPID with VLAN Ether Type */
6427 if (RT_BE2H_U16(u16Ptr[6]) == VET)
6428 {
6429 pStatus->fVP = true;
6430 /* Is VLAN filtering enabled? */
6431 if (RCTL & RCTL_VFE)
6432 {
6433 /* It is 802.1q packet indeed, let's filter by VID */
6434 if (RCTL & RCTL_CFIEN)
6435 {
6436 E1kLog3(("%s VLAN filter: VLAN=%d CFI=%d RCTL_CFI=%d\n", pThis->szPrf,
6437 E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7])),
6438 E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])),
6439 !!(RCTL & RCTL_CFI)));
6440 if (E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])) != !!(RCTL & RCTL_CFI))
6441 {
6442 E1kLog2(("%s Packet filter: CFIs do not match in packet and RCTL (%d!=%d)\n",
6443 pThis->szPrf, E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])), !!(RCTL & RCTL_CFI)));
6444 return false;
6445 }
6446 }
6447 else
6448 E1kLog3(("%s VLAN filter: VLAN=%d\n", pThis->szPrf,
6449 E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))));
6450 if (!ASMBitTest(pThis->auVFTA, E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))))
6451 {
6452 E1kLog2(("%s Packet filter: no VLAN match (id=%d)\n",
6453 pThis->szPrf, E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))));
6454 return false;
6455 }
6456 }
6457 }
6458 /* Broadcast filtering */
6459 if (e1kIsBroadcast(pvBuf) && (RCTL & RCTL_BAM))
6460 return true;
6461 E1kLog2(("%s Packet filter: not a broadcast\n", pThis->szPrf));
6462 if (e1kIsMulticast(pvBuf))
6463 {
6464 /* Is multicast promiscuous enabled? */
6465 if (RCTL & RCTL_MPE)
6466 return true;
6467 E1kLog2(("%s Packet filter: no promiscuous multicast\n", pThis->szPrf));
6468 /* Try perfect matches first */
6469 if (e1kPerfectMatch(pThis, pvBuf))
6470 {
6471 pStatus->fPIF = true;
6472 return true;
6473 }
6474 E1kLog2(("%s Packet filter: no perfect match\n", pThis->szPrf));
6475 if (e1kImperfectMatch(pThis, pvBuf))
6476 return true;
6477 E1kLog2(("%s Packet filter: no imperfect match\n", pThis->szPrf));
6478 }
6479 else {
6480 /* Is unicast promiscuous enabled? */
6481 if (RCTL & RCTL_UPE)
6482 return true;
6483 E1kLog2(("%s Packet filter: no promiscuous unicast\n", pThis->szPrf));
6484 if (e1kPerfectMatch(pThis, pvBuf))
6485 {
6486 pStatus->fPIF = true;
6487 return true;
6488 }
6489 E1kLog2(("%s Packet filter: no perfect match\n", pThis->szPrf));
6490 }
6491 E1kLog2(("%s Packet filter: packet discarded\n", pThis->szPrf));
6492 return false;
6493}
6494
6495/**
6496 * @interface_method_impl{PDMINETWORKDOWN,pfnReceive}
6497 */
6498static DECLCALLBACK(int) e1kR3NetworkDown_Receive(PPDMINETWORKDOWN pInterface, const void *pvBuf, size_t cb)
6499{
6500 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkDown);
6501 int rc = VINF_SUCCESS;
6502
6503 /*
6504 * Drop packets if the VM is not running yet/anymore.
6505 */
6506 VMSTATE enmVMState = PDMDevHlpVMState(STATE_TO_DEVINS(pThis));
6507 if ( enmVMState != VMSTATE_RUNNING
6508 && enmVMState != VMSTATE_RUNNING_LS)
6509 {
6510 E1kLog(("%s Dropping incoming packet as VM is not running.\n", pThis->szPrf));
6511 return VINF_SUCCESS;
6512 }
6513
6514 /* Discard incoming packets in locked state */
6515 if (!(RCTL & RCTL_EN) || pThis->fLocked || !(STATUS & STATUS_LU))
6516 {
6517 E1kLog(("%s Dropping incoming packet as receive operation is disabled.\n", pThis->szPrf));
6518 return VINF_SUCCESS;
6519 }
6520
6521 STAM_PROFILE_ADV_START(&pThis->StatReceive, a);
6522
6523 //if (!e1kCsEnter(pThis, RT_SRC_POS))
6524 // return VERR_PERMISSION_DENIED;
6525
6526 e1kPacketDump(pThis, (const uint8_t*)pvBuf, cb, "<-- Incoming");
6527
6528 /* Update stats */
6529 if (RT_LIKELY(e1kCsEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
6530 {
6531 E1K_INC_CNT32(TPR);
6532 E1K_ADD_CNT64(TORL, TORH, cb < 64? 64 : cb);
6533 e1kCsLeave(pThis);
6534 }
6535 STAM_PROFILE_ADV_START(&pThis->StatReceiveFilter, a);
6536 E1KRXDST status;
6537 RT_ZERO(status);
6538 bool fPassed = e1kAddressFilter(pThis, pvBuf, cb, &status);
6539 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveFilter, a);
6540 if (fPassed)
6541 {
6542 rc = e1kHandleRxPacket(pThis, pvBuf, cb, status);
6543 }
6544 //e1kCsLeave(pThis);
6545 STAM_PROFILE_ADV_STOP(&pThis->StatReceive, a);
6546
6547 return rc;
6548}
6549
6550
6551/* -=-=-=-=- PDMILEDPORTS -=-=-=-=- */
6552
6553/**
6554 * @interface_method_impl{PDMILEDPORTS,pfnQueryStatusLed}
6555 */
6556static DECLCALLBACK(int) e1kR3QueryStatusLed(PPDMILEDPORTS pInterface, unsigned iLUN, PPDMLED *ppLed)
6557{
6558 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, ILeds);
6559 int rc = VERR_PDM_LUN_NOT_FOUND;
6560
6561 if (iLUN == 0)
6562 {
6563 *ppLed = &pThis->led;
6564 rc = VINF_SUCCESS;
6565 }
6566 return rc;
6567}
6568
6569
6570/* -=-=-=-=- PDMINETWORKCONFIG -=-=-=-=- */
6571
6572/**
6573 * @interface_method_impl{PDMINETWORKCONFIG,pfnGetMac}
6574 */
6575static DECLCALLBACK(int) e1kR3GetMac(PPDMINETWORKCONFIG pInterface, PRTMAC pMac)
6576{
6577 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkConfig);
6578 pThis->eeprom.getMac(pMac);
6579 return VINF_SUCCESS;
6580}
6581
6582/**
6583 * @interface_method_impl{PDMINETWORKCONFIG,pfnGetLinkState}
6584 */
6585static DECLCALLBACK(PDMNETWORKLINKSTATE) e1kR3GetLinkState(PPDMINETWORKCONFIG pInterface)
6586{
6587 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkConfig);
6588 if (STATUS & STATUS_LU)
6589 return PDMNETWORKLINKSTATE_UP;
6590 return PDMNETWORKLINKSTATE_DOWN;
6591}
6592
6593/**
6594 * @interface_method_impl{PDMINETWORKCONFIG,pfnSetLinkState}
6595 */
6596static DECLCALLBACK(int) e1kR3SetLinkState(PPDMINETWORKCONFIG pInterface, PDMNETWORKLINKSTATE enmState)
6597{
6598 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkConfig);
6599
6600 E1kLog(("%s e1kR3SetLinkState: enmState=%d\n", pThis->szPrf, enmState));
6601 switch (enmState)
6602 {
6603 case PDMNETWORKLINKSTATE_UP:
6604 pThis->fCableConnected = true;
6605 /* If link was down, bring it up after a while. */
6606 if (!(STATUS & STATUS_LU))
6607 e1kBringLinkUpDelayed(pThis);
6608 break;
6609 case PDMNETWORKLINKSTATE_DOWN:
6610 pThis->fCableConnected = false;
6611 /* Always set the phy link state to down, regardless of the STATUS_LU bit.
6612 * We might have to set the link state before the driver initializes us. */
6613 Phy::setLinkStatus(&pThis->phy, false);
6614 /* If link was up, bring it down. */
6615 if (STATUS & STATUS_LU)
6616 e1kR3LinkDown(pThis);
6617 break;
6618 case PDMNETWORKLINKSTATE_DOWN_RESUME:
6619 /*
6620 * There is not much sense in bringing down the link if it has not come up yet.
6621 * If it is up though, we bring it down temporarely, then bring it up again.
6622 */
6623 if (STATUS & STATUS_LU)
6624 e1kR3LinkDownTemp(pThis);
6625 break;
6626 default:
6627 ;
6628 }
6629 return VINF_SUCCESS;
6630}
6631
6632
6633/* -=-=-=-=- PDMIBASE -=-=-=-=- */
6634
6635/**
6636 * @interface_method_impl{PDMIBASE,pfnQueryInterface}
6637 */
6638static DECLCALLBACK(void *) e1kR3QueryInterface(struct PDMIBASE *pInterface, const char *pszIID)
6639{
6640 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, IBase);
6641 Assert(&pThis->IBase == pInterface);
6642
6643 PDMIBASE_RETURN_INTERFACE(pszIID, PDMIBASE, &pThis->IBase);
6644 PDMIBASE_RETURN_INTERFACE(pszIID, PDMINETWORKDOWN, &pThis->INetworkDown);
6645 PDMIBASE_RETURN_INTERFACE(pszIID, PDMINETWORKCONFIG, &pThis->INetworkConfig);
6646 PDMIBASE_RETURN_INTERFACE(pszIID, PDMILEDPORTS, &pThis->ILeds);
6647 return NULL;
6648}
6649
6650
6651/* -=-=-=-=- Saved State -=-=-=-=- */
6652
6653/**
6654 * Saves the configuration.
6655 *
6656 * @param pThis The E1K state.
6657 * @param pSSM The handle to the saved state.
6658 */
6659static void e1kSaveConfig(PE1KSTATE pThis, PSSMHANDLE pSSM)
6660{
6661 SSMR3PutMem(pSSM, &pThis->macConfigured, sizeof(pThis->macConfigured));
6662 SSMR3PutU32(pSSM, pThis->eChip);
6663}
6664
6665/**
6666 * @callback_method_impl{FNSSMDEVLIVEEXEC,Save basic configuration.}
6667 */
6668static DECLCALLBACK(int) e1kLiveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uPass)
6669{
6670 RT_NOREF(uPass);
6671 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6672 e1kSaveConfig(pThis, pSSM);
6673 return VINF_SSM_DONT_CALL_AGAIN;
6674}
6675
6676/**
6677 * @callback_method_impl{FNSSMDEVSAVEPREP,Synchronize.}
6678 */
6679static DECLCALLBACK(int) e1kSavePrep(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6680{
6681 RT_NOREF(pSSM);
6682 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6683
6684 int rc = e1kCsEnter(pThis, VERR_SEM_BUSY);
6685 if (RT_UNLIKELY(rc != VINF_SUCCESS))
6686 return rc;
6687 e1kCsLeave(pThis);
6688 return VINF_SUCCESS;
6689#if 0
6690 /* 1) Prevent all threads from modifying the state and memory */
6691 //pThis->fLocked = true;
6692 /* 2) Cancel all timers */
6693#ifdef E1K_TX_DELAY
6694 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTXDTimer));
6695#endif /* E1K_TX_DELAY */
6696//#ifdef E1K_USE_TX_TIMERS
6697 if (pThis->fTidEnabled)
6698 {
6699 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTIDTimer));
6700#ifndef E1K_NO_TAD
6701 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTADTimer));
6702#endif /* E1K_NO_TAD */
6703 }
6704//#endif /* E1K_USE_TX_TIMERS */
6705#ifdef E1K_USE_RX_TIMERS
6706 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRIDTimer));
6707 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRADTimer));
6708#endif /* E1K_USE_RX_TIMERS */
6709 e1kCancelTimer(pThis, pThis->CTX_SUFF(pIntTimer));
6710 /* 3) Did I forget anything? */
6711 E1kLog(("%s Locked\n", pThis->szPrf));
6712 return VINF_SUCCESS;
6713#endif
6714}
6715
6716/**
6717 * @callback_method_impl{FNSSMDEVSAVEEXEC}
6718 */
6719static DECLCALLBACK(int) e1kSaveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6720{
6721 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6722
6723 e1kSaveConfig(pThis, pSSM);
6724 pThis->eeprom.save(pSSM);
6725 e1kDumpState(pThis);
6726 SSMR3PutMem(pSSM, pThis->auRegs, sizeof(pThis->auRegs));
6727 SSMR3PutBool(pSSM, pThis->fIntRaised);
6728 Phy::saveState(pSSM, &pThis->phy);
6729 SSMR3PutU32(pSSM, pThis->uSelectedReg);
6730 SSMR3PutMem(pSSM, pThis->auMTA, sizeof(pThis->auMTA));
6731 SSMR3PutMem(pSSM, &pThis->aRecAddr, sizeof(pThis->aRecAddr));
6732 SSMR3PutMem(pSSM, pThis->auVFTA, sizeof(pThis->auVFTA));
6733 SSMR3PutU64(pSSM, pThis->u64AckedAt);
6734 SSMR3PutU16(pSSM, pThis->u16RxBSize);
6735 //SSMR3PutBool(pSSM, pThis->fDelayInts);
6736 //SSMR3PutBool(pSSM, pThis->fIntMaskUsed);
6737 SSMR3PutU16(pSSM, pThis->u16TxPktLen);
6738/** @todo State wrt to the TSE buffer is incomplete, so little point in
6739 * saving this actually. */
6740 SSMR3PutMem(pSSM, pThis->aTxPacketFallback, pThis->u16TxPktLen);
6741 SSMR3PutBool(pSSM, pThis->fIPcsum);
6742 SSMR3PutBool(pSSM, pThis->fTCPcsum);
6743 SSMR3PutMem(pSSM, &pThis->contextTSE, sizeof(pThis->contextTSE));
6744 SSMR3PutMem(pSSM, &pThis->contextNormal, sizeof(pThis->contextNormal));
6745 SSMR3PutBool(pSSM, pThis->fVTag);
6746 SSMR3PutU16(pSSM, pThis->u16VTagTCI);
6747#ifdef E1K_WITH_TXD_CACHE
6748#if 0
6749 SSMR3PutU8(pSSM, pThis->nTxDFetched);
6750 SSMR3PutMem(pSSM, pThis->aTxDescriptors,
6751 pThis->nTxDFetched * sizeof(pThis->aTxDescriptors[0]));
6752#else
6753 /*
6754 * There is no point in storing TX descriptor cache entries as we can simply
6755 * fetch them again. Moreover, normally the cache is always empty when we
6756 * save the state. Store zero entries for compatibility.
6757 */
6758 SSMR3PutU8(pSSM, 0);
6759#endif
6760#endif /* E1K_WITH_TXD_CACHE */
6761/** @todo GSO requires some more state here. */
6762 E1kLog(("%s State has been saved\n", pThis->szPrf));
6763 return VINF_SUCCESS;
6764}
6765
6766#if 0
6767/**
6768 * @callback_method_impl{FNSSMDEVSAVEDONE}
6769 */
6770static DECLCALLBACK(int) e1kSaveDone(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6771{
6772 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6773
6774 /* If VM is being powered off unlocking will result in assertions in PGM */
6775 if (PDMDevHlpGetVM(pDevIns)->enmVMState == VMSTATE_RUNNING)
6776 pThis->fLocked = false;
6777 else
6778 E1kLog(("%s VM is not running -- remain locked\n", pThis->szPrf));
6779 E1kLog(("%s Unlocked\n", pThis->szPrf));
6780 return VINF_SUCCESS;
6781}
6782#endif
6783
6784/**
6785 * @callback_method_impl{FNSSMDEVLOADPREP,Synchronize.}
6786 */
6787static DECLCALLBACK(int) e1kLoadPrep(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6788{
6789 RT_NOREF(pSSM);
6790 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6791
6792 int rc = e1kCsEnter(pThis, VERR_SEM_BUSY);
6793 if (RT_UNLIKELY(rc != VINF_SUCCESS))
6794 return rc;
6795 e1kCsLeave(pThis);
6796 return VINF_SUCCESS;
6797}
6798
6799/**
6800 * @callback_method_impl{FNSSMDEVLOADEXEC}
6801 */
6802static DECLCALLBACK(int) e1kLoadExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
6803{
6804 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6805 int rc;
6806
6807 if ( uVersion != E1K_SAVEDSTATE_VERSION
6808#ifdef E1K_WITH_TXD_CACHE
6809 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG
6810#endif /* E1K_WITH_TXD_CACHE */
6811 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_41
6812 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_30)
6813 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
6814
6815 if ( uVersion > E1K_SAVEDSTATE_VERSION_VBOX_30
6816 || uPass != SSM_PASS_FINAL)
6817 {
6818 /* config checks */
6819 RTMAC macConfigured;
6820 rc = SSMR3GetMem(pSSM, &macConfigured, sizeof(macConfigured));
6821 AssertRCReturn(rc, rc);
6822 if ( memcmp(&macConfigured, &pThis->macConfigured, sizeof(macConfigured))
6823 && (uPass == 0 || !PDMDevHlpVMTeleportedAndNotFullyResumedYet(pDevIns)) )
6824 LogRel(("%s: The mac address differs: config=%RTmac saved=%RTmac\n", pThis->szPrf, &pThis->macConfigured, &macConfigured));
6825
6826 E1KCHIP eChip;
6827 rc = SSMR3GetU32(pSSM, &eChip);
6828 AssertRCReturn(rc, rc);
6829 if (eChip != pThis->eChip)
6830 return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("The chip type differs: config=%u saved=%u"), pThis->eChip, eChip);
6831 }
6832
6833 if (uPass == SSM_PASS_FINAL)
6834 {
6835 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_30)
6836 {
6837 rc = pThis->eeprom.load(pSSM);
6838 AssertRCReturn(rc, rc);
6839 }
6840 /* the state */
6841 SSMR3GetMem(pSSM, &pThis->auRegs, sizeof(pThis->auRegs));
6842 SSMR3GetBool(pSSM, &pThis->fIntRaised);
6843 /** @todo PHY could be made a separate device with its own versioning */
6844 Phy::loadState(pSSM, &pThis->phy);
6845 SSMR3GetU32(pSSM, &pThis->uSelectedReg);
6846 SSMR3GetMem(pSSM, &pThis->auMTA, sizeof(pThis->auMTA));
6847 SSMR3GetMem(pSSM, &pThis->aRecAddr, sizeof(pThis->aRecAddr));
6848 SSMR3GetMem(pSSM, &pThis->auVFTA, sizeof(pThis->auVFTA));
6849 SSMR3GetU64(pSSM, &pThis->u64AckedAt);
6850 SSMR3GetU16(pSSM, &pThis->u16RxBSize);
6851 //SSMR3GetBool(pSSM, pThis->fDelayInts);
6852 //SSMR3GetBool(pSSM, pThis->fIntMaskUsed);
6853 SSMR3GetU16(pSSM, &pThis->u16TxPktLen);
6854 SSMR3GetMem(pSSM, &pThis->aTxPacketFallback[0], pThis->u16TxPktLen);
6855 SSMR3GetBool(pSSM, &pThis->fIPcsum);
6856 SSMR3GetBool(pSSM, &pThis->fTCPcsum);
6857 SSMR3GetMem(pSSM, &pThis->contextTSE, sizeof(pThis->contextTSE));
6858 rc = SSMR3GetMem(pSSM, &pThis->contextNormal, sizeof(pThis->contextNormal));
6859 AssertRCReturn(rc, rc);
6860 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_41)
6861 {
6862 SSMR3GetBool(pSSM, &pThis->fVTag);
6863 rc = SSMR3GetU16(pSSM, &pThis->u16VTagTCI);
6864 AssertRCReturn(rc, rc);
6865 }
6866 else
6867 {
6868 pThis->fVTag = false;
6869 pThis->u16VTagTCI = 0;
6870 }
6871#ifdef E1K_WITH_TXD_CACHE
6872 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG)
6873 {
6874 rc = SSMR3GetU8(pSSM, &pThis->nTxDFetched);
6875 AssertRCReturn(rc, rc);
6876 if (pThis->nTxDFetched)
6877 SSMR3GetMem(pSSM, pThis->aTxDescriptors,
6878 pThis->nTxDFetched * sizeof(pThis->aTxDescriptors[0]));
6879 }
6880 else
6881 pThis->nTxDFetched = 0;
6882 /*
6883 * @todo: Perhaps we should not store TXD cache as the entries can be
6884 * simply fetched again from guest's memory. Or can't they?
6885 */
6886#endif /* E1K_WITH_TXD_CACHE */
6887#ifdef E1K_WITH_RXD_CACHE
6888 /*
6889 * There is no point in storing the RX descriptor cache in the saved
6890 * state, we just need to make sure it is empty.
6891 */
6892 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
6893#endif /* E1K_WITH_RXD_CACHE */
6894 /* derived state */
6895 e1kSetupGsoCtx(&pThis->GsoCtx, &pThis->contextTSE);
6896
6897 E1kLog(("%s State has been restored\n", pThis->szPrf));
6898 e1kDumpState(pThis);
6899 }
6900 return VINF_SUCCESS;
6901}
6902
6903/**
6904 * @callback_method_impl{FNSSMDEVLOADDONE, Link status adjustments after loading.}
6905 */
6906static DECLCALLBACK(int) e1kLoadDone(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6907{
6908 RT_NOREF(pSSM);
6909 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6910
6911 /* Update promiscuous mode */
6912 if (pThis->pDrvR3)
6913 pThis->pDrvR3->pfnSetPromiscuousMode(pThis->pDrvR3,
6914 !!(RCTL & (RCTL_UPE | RCTL_MPE)));
6915
6916 /*
6917 * Force the link down here, since PDMNETWORKLINKSTATE_DOWN_RESUME is never
6918 * passed to us. We go through all this stuff if the link was up and we
6919 * wasn't teleported.
6920 */
6921 if ( (STATUS & STATUS_LU)
6922 && !PDMDevHlpVMTeleportedAndNotFullyResumedYet(pDevIns)
6923 && pThis->cMsLinkUpDelay)
6924 {
6925 e1kR3LinkDownTemp(pThis);
6926 }
6927 return VINF_SUCCESS;
6928}
6929
6930
6931
6932/* -=-=-=-=- Debug Info + Log Types -=-=-=-=- */
6933
6934/**
6935 * @callback_method_impl{FNRTSTRFORMATTYPE}
6936 */
6937static DECLCALLBACK(size_t) e1kFmtRxDesc(PFNRTSTROUTPUT pfnOutput,
6938 void *pvArgOutput,
6939 const char *pszType,
6940 void const *pvValue,
6941 int cchWidth,
6942 int cchPrecision,
6943 unsigned fFlags,
6944 void *pvUser)
6945{
6946 RT_NOREF(cchWidth, cchPrecision, fFlags, pvUser);
6947 AssertReturn(strcmp(pszType, "e1krxd") == 0, 0);
6948 E1KRXDESC* pDesc = (E1KRXDESC*)pvValue;
6949 if (!pDesc)
6950 return RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "NULL_RXD");
6951
6952 size_t cbPrintf = 0;
6953 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Address=%16LX Length=%04X Csum=%04X\n",
6954 pDesc->u64BufAddr, pDesc->u16Length, pDesc->u16Checksum);
6955 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, " STA: %s %s %s %s %s %s %s ERR: %s %s %s %s SPECIAL: %s VLAN=%03x PRI=%x",
6956 pDesc->status.fPIF ? "PIF" : "pif",
6957 pDesc->status.fIPCS ? "IPCS" : "ipcs",
6958 pDesc->status.fTCPCS ? "TCPCS" : "tcpcs",
6959 pDesc->status.fVP ? "VP" : "vp",
6960 pDesc->status.fIXSM ? "IXSM" : "ixsm",
6961 pDesc->status.fEOP ? "EOP" : "eop",
6962 pDesc->status.fDD ? "DD" : "dd",
6963 pDesc->status.fRXE ? "RXE" : "rxe",
6964 pDesc->status.fIPE ? "IPE" : "ipe",
6965 pDesc->status.fTCPE ? "TCPE" : "tcpe",
6966 pDesc->status.fCE ? "CE" : "ce",
6967 E1K_SPEC_CFI(pDesc->status.u16Special) ? "CFI" :"cfi",
6968 E1K_SPEC_VLAN(pDesc->status.u16Special),
6969 E1K_SPEC_PRI(pDesc->status.u16Special));
6970 return cbPrintf;
6971}
6972
6973/**
6974 * @callback_method_impl{FNRTSTRFORMATTYPE}
6975 */
6976static DECLCALLBACK(size_t) e1kFmtTxDesc(PFNRTSTROUTPUT pfnOutput,
6977 void *pvArgOutput,
6978 const char *pszType,
6979 void const *pvValue,
6980 int cchWidth,
6981 int cchPrecision,
6982 unsigned fFlags,
6983 void *pvUser)
6984{
6985 RT_NOREF(cchWidth, cchPrecision, fFlags, pvUser);
6986 AssertReturn(strcmp(pszType, "e1ktxd") == 0, 0);
6987 E1KTXDESC *pDesc = (E1KTXDESC*)pvValue;
6988 if (!pDesc)
6989 return RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "NULL_TXD");
6990
6991 size_t cbPrintf = 0;
6992 switch (e1kGetDescType(pDesc))
6993 {
6994 case E1K_DTYP_CONTEXT:
6995 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Type=Context\n"
6996 " IPCSS=%02X IPCSO=%02X IPCSE=%04X TUCSS=%02X TUCSO=%02X TUCSE=%04X\n"
6997 " TUCMD:%s%s%s %s %s PAYLEN=%04x HDRLEN=%04x MSS=%04x STA: %s",
6998 pDesc->context.ip.u8CSS, pDesc->context.ip.u8CSO, pDesc->context.ip.u16CSE,
6999 pDesc->context.tu.u8CSS, pDesc->context.tu.u8CSO, pDesc->context.tu.u16CSE,
7000 pDesc->context.dw2.fIDE ? " IDE":"",
7001 pDesc->context.dw2.fRS ? " RS" :"",
7002 pDesc->context.dw2.fTSE ? " TSE":"",
7003 pDesc->context.dw2.fIP ? "IPv4":"IPv6",
7004 pDesc->context.dw2.fTCP ? "TCP":"UDP",
7005 pDesc->context.dw2.u20PAYLEN,
7006 pDesc->context.dw3.u8HDRLEN,
7007 pDesc->context.dw3.u16MSS,
7008 pDesc->context.dw3.fDD?"DD":"");
7009 break;
7010 case E1K_DTYP_DATA:
7011 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Type=Data Address=%16LX DTALEN=%05X\n"
7012 " DCMD:%s%s%s%s%s%s%s STA:%s%s%s POPTS:%s%s SPECIAL:%s VLAN=%03x PRI=%x",
7013 pDesc->data.u64BufAddr,
7014 pDesc->data.cmd.u20DTALEN,
7015 pDesc->data.cmd.fIDE ? " IDE" :"",
7016 pDesc->data.cmd.fVLE ? " VLE" :"",
7017 pDesc->data.cmd.fRPS ? " RPS" :"",
7018 pDesc->data.cmd.fRS ? " RS" :"",
7019 pDesc->data.cmd.fTSE ? " TSE" :"",
7020 pDesc->data.cmd.fIFCS? " IFCS":"",
7021 pDesc->data.cmd.fEOP ? " EOP" :"",
7022 pDesc->data.dw3.fDD ? " DD" :"",
7023 pDesc->data.dw3.fEC ? " EC" :"",
7024 pDesc->data.dw3.fLC ? " LC" :"",
7025 pDesc->data.dw3.fTXSM? " TXSM":"",
7026 pDesc->data.dw3.fIXSM? " IXSM":"",
7027 E1K_SPEC_CFI(pDesc->data.dw3.u16Special) ? "CFI" :"cfi",
7028 E1K_SPEC_VLAN(pDesc->data.dw3.u16Special),
7029 E1K_SPEC_PRI(pDesc->data.dw3.u16Special));
7030 break;
7031 case E1K_DTYP_LEGACY:
7032 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Type=Legacy Address=%16LX DTALEN=%05X\n"
7033 " CMD:%s%s%s%s%s%s%s STA:%s%s%s CSO=%02x CSS=%02x SPECIAL:%s VLAN=%03x PRI=%x",
7034 pDesc->data.u64BufAddr,
7035 pDesc->legacy.cmd.u16Length,
7036 pDesc->legacy.cmd.fIDE ? " IDE" :"",
7037 pDesc->legacy.cmd.fVLE ? " VLE" :"",
7038 pDesc->legacy.cmd.fRPS ? " RPS" :"",
7039 pDesc->legacy.cmd.fRS ? " RS" :"",
7040 pDesc->legacy.cmd.fIC ? " IC" :"",
7041 pDesc->legacy.cmd.fIFCS? " IFCS":"",
7042 pDesc->legacy.cmd.fEOP ? " EOP" :"",
7043 pDesc->legacy.dw3.fDD ? " DD" :"",
7044 pDesc->legacy.dw3.fEC ? " EC" :"",
7045 pDesc->legacy.dw3.fLC ? " LC" :"",
7046 pDesc->legacy.cmd.u8CSO,
7047 pDesc->legacy.dw3.u8CSS,
7048 E1K_SPEC_CFI(pDesc->legacy.dw3.u16Special) ? "CFI" :"cfi",
7049 E1K_SPEC_VLAN(pDesc->legacy.dw3.u16Special),
7050 E1K_SPEC_PRI(pDesc->legacy.dw3.u16Special));
7051 break;
7052 default:
7053 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Invalid Transmit Descriptor");
7054 break;
7055 }
7056
7057 return cbPrintf;
7058}
7059
7060/** Initializes debug helpers (logging format types). */
7061static int e1kInitDebugHelpers(void)
7062{
7063 int rc = VINF_SUCCESS;
7064 static bool s_fHelpersRegistered = false;
7065 if (!s_fHelpersRegistered)
7066 {
7067 s_fHelpersRegistered = true;
7068 rc = RTStrFormatTypeRegister("e1krxd", e1kFmtRxDesc, NULL);
7069 AssertRCReturn(rc, rc);
7070 rc = RTStrFormatTypeRegister("e1ktxd", e1kFmtTxDesc, NULL);
7071 AssertRCReturn(rc, rc);
7072 }
7073 return rc;
7074}
7075
7076/**
7077 * Status info callback.
7078 *
7079 * @param pDevIns The device instance.
7080 * @param pHlp The output helpers.
7081 * @param pszArgs The arguments.
7082 */
7083static DECLCALLBACK(void) e1kInfo(PPDMDEVINS pDevIns, PCDBGFINFOHLP pHlp, const char *pszArgs)
7084{
7085 RT_NOREF(pszArgs);
7086 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7087 unsigned i;
7088 // bool fRcvRing = false;
7089 // bool fXmtRing = false;
7090
7091 /*
7092 * Parse args.
7093 if (pszArgs)
7094 {
7095 fRcvRing = strstr(pszArgs, "verbose") || strstr(pszArgs, "rcv");
7096 fXmtRing = strstr(pszArgs, "verbose") || strstr(pszArgs, "xmt");
7097 }
7098 */
7099
7100 /*
7101 * Show info.
7102 */
7103 pHlp->pfnPrintf(pHlp, "E1000 #%d: port=%RTiop mmio=%RGp mac-cfg=%RTmac %s%s%s\n",
7104 pDevIns->iInstance, pThis->IOPortBase, pThis->addrMMReg,
7105 &pThis->macConfigured, g_aChips[pThis->eChip].pcszName,
7106 pThis->fRCEnabled ? " GC" : "", pThis->fR0Enabled ? " R0" : "");
7107
7108 e1kCsEnter(pThis, VERR_INTERNAL_ERROR); /* Not sure why but PCNet does it */
7109
7110 for (i = 0; i < E1K_NUM_OF_32BIT_REGS; ++i)
7111 pHlp->pfnPrintf(pHlp, "%8.8s = %08x\n", g_aE1kRegMap[i].abbrev, pThis->auRegs[i]);
7112
7113 for (i = 0; i < RT_ELEMENTS(pThis->aRecAddr.array); i++)
7114 {
7115 E1KRAELEM* ra = pThis->aRecAddr.array + i;
7116 if (ra->ctl & RA_CTL_AV)
7117 {
7118 const char *pcszTmp;
7119 switch (ra->ctl & RA_CTL_AS)
7120 {
7121 case 0: pcszTmp = "DST"; break;
7122 case 1: pcszTmp = "SRC"; break;
7123 default: pcszTmp = "reserved";
7124 }
7125 pHlp->pfnPrintf(pHlp, "RA%02d: %s %RTmac\n", i, pcszTmp, ra->addr);
7126 }
7127 }
7128 unsigned cDescs = RDLEN / sizeof(E1KRXDESC);
7129 uint32_t rdh = RDH;
7130 pHlp->pfnPrintf(pHlp, "\n-- Receive Descriptors (%d total) --\n", cDescs);
7131 for (i = 0; i < cDescs; ++i)
7132 {
7133 E1KRXDESC desc;
7134 PDMDevHlpPhysRead(pDevIns, e1kDescAddr(RDBAH, RDBAL, i),
7135 &desc, sizeof(desc));
7136 if (i == rdh)
7137 pHlp->pfnPrintf(pHlp, ">>> ");
7138 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1krxd]\n", e1kDescAddr(RDBAH, RDBAL, i), &desc);
7139 }
7140#ifdef E1K_WITH_RXD_CACHE
7141 pHlp->pfnPrintf(pHlp, "\n-- Receive Descriptors in Cache (at %d (RDH %d)/ fetched %d / max %d) --\n",
7142 pThis->iRxDCurrent, RDH, pThis->nRxDFetched, E1K_RXD_CACHE_SIZE);
7143 if (rdh > pThis->iRxDCurrent)
7144 rdh -= pThis->iRxDCurrent;
7145 else
7146 rdh = cDescs + rdh - pThis->iRxDCurrent;
7147 for (i = 0; i < pThis->nRxDFetched; ++i)
7148 {
7149 if (i == pThis->iRxDCurrent)
7150 pHlp->pfnPrintf(pHlp, ">>> ");
7151 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1krxd]\n",
7152 e1kDescAddr(RDBAH, RDBAL, rdh++ % cDescs),
7153 &pThis->aRxDescriptors[i]);
7154 }
7155#endif /* E1K_WITH_RXD_CACHE */
7156
7157 cDescs = TDLEN / sizeof(E1KTXDESC);
7158 uint32_t tdh = TDH;
7159 pHlp->pfnPrintf(pHlp, "\n-- Transmit Descriptors (%d total) --\n", cDescs);
7160 for (i = 0; i < cDescs; ++i)
7161 {
7162 E1KTXDESC desc;
7163 PDMDevHlpPhysRead(pDevIns, e1kDescAddr(TDBAH, TDBAL, i),
7164 &desc, sizeof(desc));
7165 if (i == tdh)
7166 pHlp->pfnPrintf(pHlp, ">>> ");
7167 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1ktxd]\n", e1kDescAddr(TDBAH, TDBAL, i), &desc);
7168 }
7169#ifdef E1K_WITH_TXD_CACHE
7170 pHlp->pfnPrintf(pHlp, "\n-- Transmit Descriptors in Cache (at %d (TDH %d)/ fetched %d / max %d) --\n",
7171 pThis->iTxDCurrent, TDH, pThis->nTxDFetched, E1K_TXD_CACHE_SIZE);
7172 if (tdh > pThis->iTxDCurrent)
7173 tdh -= pThis->iTxDCurrent;
7174 else
7175 tdh = cDescs + tdh - pThis->iTxDCurrent;
7176 for (i = 0; i < pThis->nTxDFetched; ++i)
7177 {
7178 if (i == pThis->iTxDCurrent)
7179 pHlp->pfnPrintf(pHlp, ">>> ");
7180 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1ktxd]\n",
7181 e1kDescAddr(TDBAH, TDBAL, tdh++ % cDescs),
7182 &pThis->aTxDescriptors[i]);
7183 }
7184#endif /* E1K_WITH_TXD_CACHE */
7185
7186
7187#ifdef E1K_INT_STATS
7188 pHlp->pfnPrintf(pHlp, "Interrupt attempts: %d\n", pThis->uStatIntTry);
7189 pHlp->pfnPrintf(pHlp, "Interrupts raised : %d\n", pThis->uStatInt);
7190 pHlp->pfnPrintf(pHlp, "Interrupts lowered: %d\n", pThis->uStatIntLower);
7191 pHlp->pfnPrintf(pHlp, "ICR outside ISR : %d\n", pThis->uStatNoIntICR);
7192 pHlp->pfnPrintf(pHlp, "IMS raised ints : %d\n", pThis->uStatIntIMS);
7193 pHlp->pfnPrintf(pHlp, "Interrupts skipped: %d\n", pThis->uStatIntSkip);
7194 pHlp->pfnPrintf(pHlp, "Masked interrupts : %d\n", pThis->uStatIntMasked);
7195 pHlp->pfnPrintf(pHlp, "Early interrupts : %d\n", pThis->uStatIntEarly);
7196 pHlp->pfnPrintf(pHlp, "Late interrupts : %d\n", pThis->uStatIntLate);
7197 pHlp->pfnPrintf(pHlp, "Lost interrupts : %d\n", pThis->iStatIntLost);
7198 pHlp->pfnPrintf(pHlp, "Interrupts by RX : %d\n", pThis->uStatIntRx);
7199 pHlp->pfnPrintf(pHlp, "Interrupts by TX : %d\n", pThis->uStatIntTx);
7200 pHlp->pfnPrintf(pHlp, "Interrupts by ICS : %d\n", pThis->uStatIntICS);
7201 pHlp->pfnPrintf(pHlp, "Interrupts by RDTR: %d\n", pThis->uStatIntRDTR);
7202 pHlp->pfnPrintf(pHlp, "Interrupts by RDMT: %d\n", pThis->uStatIntRXDMT0);
7203 pHlp->pfnPrintf(pHlp, "Interrupts by TXQE: %d\n", pThis->uStatIntTXQE);
7204 pHlp->pfnPrintf(pHlp, "TX int delay asked: %d\n", pThis->uStatTxIDE);
7205 pHlp->pfnPrintf(pHlp, "TX delayed: %d\n", pThis->uStatTxDelayed);
7206 pHlp->pfnPrintf(pHlp, "TX delayed expired: %d\n", pThis->uStatTxDelayExp);
7207 pHlp->pfnPrintf(pHlp, "TX no report asked: %d\n", pThis->uStatTxNoRS);
7208 pHlp->pfnPrintf(pHlp, "TX abs timer expd : %d\n", pThis->uStatTAD);
7209 pHlp->pfnPrintf(pHlp, "TX int timer expd : %d\n", pThis->uStatTID);
7210 pHlp->pfnPrintf(pHlp, "RX abs timer expd : %d\n", pThis->uStatRAD);
7211 pHlp->pfnPrintf(pHlp, "RX int timer expd : %d\n", pThis->uStatRID);
7212 pHlp->pfnPrintf(pHlp, "TX CTX descriptors: %d\n", pThis->uStatDescCtx);
7213 pHlp->pfnPrintf(pHlp, "TX DAT descriptors: %d\n", pThis->uStatDescDat);
7214 pHlp->pfnPrintf(pHlp, "TX LEG descriptors: %d\n", pThis->uStatDescLeg);
7215 pHlp->pfnPrintf(pHlp, "Received frames : %d\n", pThis->uStatRxFrm);
7216 pHlp->pfnPrintf(pHlp, "Transmitted frames: %d\n", pThis->uStatTxFrm);
7217 pHlp->pfnPrintf(pHlp, "TX frames up to 1514: %d\n", pThis->uStatTx1514);
7218 pHlp->pfnPrintf(pHlp, "TX frames up to 2962: %d\n", pThis->uStatTx2962);
7219 pHlp->pfnPrintf(pHlp, "TX frames up to 4410: %d\n", pThis->uStatTx4410);
7220 pHlp->pfnPrintf(pHlp, "TX frames up to 5858: %d\n", pThis->uStatTx5858);
7221 pHlp->pfnPrintf(pHlp, "TX frames up to 7306: %d\n", pThis->uStatTx7306);
7222 pHlp->pfnPrintf(pHlp, "TX frames up to 8754: %d\n", pThis->uStatTx8754);
7223 pHlp->pfnPrintf(pHlp, "TX frames up to 16384: %d\n", pThis->uStatTx16384);
7224 pHlp->pfnPrintf(pHlp, "TX frames up to 32768: %d\n", pThis->uStatTx32768);
7225 pHlp->pfnPrintf(pHlp, "Larger TX frames : %d\n", pThis->uStatTxLarge);
7226#endif /* E1K_INT_STATS */
7227
7228 e1kCsLeave(pThis);
7229}
7230
7231
7232
7233/* -=-=-=-=- PDMDEVREG -=-=-=-=- */
7234
7235/**
7236 * Detach notification.
7237 *
7238 * One port on the network card has been disconnected from the network.
7239 *
7240 * @param pDevIns The device instance.
7241 * @param iLUN The logical unit which is being detached.
7242 * @param fFlags Flags, combination of the PDMDEVATT_FLAGS_* \#defines.
7243 */
7244static DECLCALLBACK(void) e1kR3Detach(PPDMDEVINS pDevIns, unsigned iLUN, uint32_t fFlags)
7245{
7246 RT_NOREF(fFlags);
7247 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7248 Log(("%s e1kR3Detach:\n", pThis->szPrf));
7249
7250 AssertLogRelReturnVoid(iLUN == 0);
7251
7252 PDMCritSectEnter(&pThis->cs, VERR_SEM_BUSY);
7253
7254 /** @todo r=pritesh still need to check if i missed
7255 * to clean something in this function
7256 */
7257
7258 /*
7259 * Zero some important members.
7260 */
7261 pThis->pDrvBase = NULL;
7262 pThis->pDrvR3 = NULL;
7263 pThis->pDrvR0 = NIL_RTR0PTR;
7264 pThis->pDrvRC = NIL_RTRCPTR;
7265
7266 PDMCritSectLeave(&pThis->cs);
7267}
7268
7269/**
7270 * Attach the Network attachment.
7271 *
7272 * One port on the network card has been connected to a network.
7273 *
7274 * @returns VBox status code.
7275 * @param pDevIns The device instance.
7276 * @param iLUN The logical unit which is being attached.
7277 * @param fFlags Flags, combination of the PDMDEVATT_FLAGS_* \#defines.
7278 *
7279 * @remarks This code path is not used during construction.
7280 */
7281static DECLCALLBACK(int) e1kR3Attach(PPDMDEVINS pDevIns, unsigned iLUN, uint32_t fFlags)
7282{
7283 RT_NOREF(fFlags);
7284 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7285 LogFlow(("%s e1kR3Attach:\n", pThis->szPrf));
7286
7287 AssertLogRelReturn(iLUN == 0, VERR_PDM_NO_SUCH_LUN);
7288
7289 PDMCritSectEnter(&pThis->cs, VERR_SEM_BUSY);
7290
7291 /*
7292 * Attach the driver.
7293 */
7294 int rc = PDMDevHlpDriverAttach(pDevIns, 0, &pThis->IBase, &pThis->pDrvBase, "Network Port");
7295 if (RT_SUCCESS(rc))
7296 {
7297 if (rc == VINF_NAT_DNS)
7298 {
7299#ifdef RT_OS_LINUX
7300 PDMDevHlpVMSetRuntimeError(pDevIns, 0 /*fFlags*/, "NoDNSforNAT",
7301 N_("A Domain Name Server (DNS) for NAT networking could not be determined. Please check your /etc/resolv.conf for <tt>nameserver</tt> entries. Either add one manually (<i>man resolv.conf</i>) or ensure that your host is correctly connected to an ISP. If you ignore this warning the guest will not be able to perform nameserver lookups and it will probably observe delays if trying so"));
7302#else
7303 PDMDevHlpVMSetRuntimeError(pDevIns, 0 /*fFlags*/, "NoDNSforNAT",
7304 N_("A Domain Name Server (DNS) for NAT networking could not be determined. Ensure that your host is correctly connected to an ISP. If you ignore this warning the guest will not be able to perform nameserver lookups and it will probably observe delays if trying so"));
7305#endif
7306 }
7307 pThis->pDrvR3 = PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMINETWORKUP);
7308 AssertMsgStmt(pThis->pDrvR3, ("Failed to obtain the PDMINETWORKUP interface!\n"),
7309 rc = VERR_PDM_MISSING_INTERFACE_BELOW);
7310 if (RT_SUCCESS(rc))
7311 {
7312 PPDMIBASER0 pBaseR0 = PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMIBASER0);
7313 pThis->pDrvR0 = pBaseR0 ? pBaseR0->pfnQueryInterface(pBaseR0, PDMINETWORKUP_IID) : NIL_RTR0PTR;
7314
7315 PPDMIBASERC pBaseRC = PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMIBASERC);
7316 pThis->pDrvRC = pBaseRC ? pBaseRC->pfnQueryInterface(pBaseRC, PDMINETWORKUP_IID) : NIL_RTR0PTR;
7317 }
7318 }
7319 else if ( rc == VERR_PDM_NO_ATTACHED_DRIVER
7320 || rc == VERR_PDM_CFG_MISSING_DRIVER_NAME)
7321 {
7322 /* This should never happen because this function is not called
7323 * if there is no driver to attach! */
7324 Log(("%s No attached driver!\n", pThis->szPrf));
7325 }
7326
7327 /*
7328 * Temporary set the link down if it was up so that the guest
7329 * will know that we have change the configuration of the
7330 * network card
7331 */
7332 if ((STATUS & STATUS_LU) && RT_SUCCESS(rc))
7333 e1kR3LinkDownTemp(pThis);
7334
7335 PDMCritSectLeave(&pThis->cs);
7336 return rc;
7337
7338}
7339
7340/**
7341 * @copydoc FNPDMDEVPOWEROFF
7342 */
7343static DECLCALLBACK(void) e1kR3PowerOff(PPDMDEVINS pDevIns)
7344{
7345 /* Poke thread waiting for buffer space. */
7346 e1kWakeupReceive(pDevIns);
7347}
7348
7349/**
7350 * @copydoc FNPDMDEVRESET
7351 */
7352static DECLCALLBACK(void) e1kR3Reset(PPDMDEVINS pDevIns)
7353{
7354 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7355#ifdef E1K_TX_DELAY
7356 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTXDTimer));
7357#endif /* E1K_TX_DELAY */
7358 e1kCancelTimer(pThis, pThis->CTX_SUFF(pIntTimer));
7359 e1kCancelTimer(pThis, pThis->CTX_SUFF(pLUTimer));
7360 e1kXmitFreeBuf(pThis);
7361 pThis->u16TxPktLen = 0;
7362 pThis->fIPcsum = false;
7363 pThis->fTCPcsum = false;
7364 pThis->fIntMaskUsed = false;
7365 pThis->fDelayInts = false;
7366 pThis->fLocked = false;
7367 pThis->u64AckedAt = 0;
7368 e1kHardReset(pThis);
7369}
7370
7371/**
7372 * @copydoc FNPDMDEVSUSPEND
7373 */
7374static DECLCALLBACK(void) e1kR3Suspend(PPDMDEVINS pDevIns)
7375{
7376 /* Poke thread waiting for buffer space. */
7377 e1kWakeupReceive(pDevIns);
7378}
7379
7380/**
7381 * Device relocation callback.
7382 *
7383 * When this callback is called the device instance data, and if the
7384 * device have a GC component, is being relocated, or/and the selectors
7385 * have been changed. The device must use the chance to perform the
7386 * necessary pointer relocations and data updates.
7387 *
7388 * Before the GC code is executed the first time, this function will be
7389 * called with a 0 delta so GC pointer calculations can be one in one place.
7390 *
7391 * @param pDevIns Pointer to the device instance.
7392 * @param offDelta The relocation delta relative to the old location.
7393 *
7394 * @remark A relocation CANNOT fail.
7395 */
7396static DECLCALLBACK(void) e1kR3Relocate(PPDMDEVINS pDevIns, RTGCINTPTR offDelta)
7397{
7398 RT_NOREF(offDelta);
7399 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7400 pThis->pDevInsRC = PDMDEVINS_2_RCPTR(pDevIns);
7401 pThis->pTxQueueRC = PDMQueueRCPtr(pThis->pTxQueueR3);
7402 pThis->pCanRxQueueRC = PDMQueueRCPtr(pThis->pCanRxQueueR3);
7403#ifdef E1K_USE_RX_TIMERS
7404 pThis->pRIDTimerRC = TMTimerRCPtr(pThis->pRIDTimerR3);
7405 pThis->pRADTimerRC = TMTimerRCPtr(pThis->pRADTimerR3);
7406#endif /* E1K_USE_RX_TIMERS */
7407//#ifdef E1K_USE_TX_TIMERS
7408 if (pThis->fTidEnabled)
7409 {
7410 pThis->pTIDTimerRC = TMTimerRCPtr(pThis->pTIDTimerR3);
7411# ifndef E1K_NO_TAD
7412 pThis->pTADTimerRC = TMTimerRCPtr(pThis->pTADTimerR3);
7413# endif /* E1K_NO_TAD */
7414 }
7415//#endif /* E1K_USE_TX_TIMERS */
7416#ifdef E1K_TX_DELAY
7417 pThis->pTXDTimerRC = TMTimerRCPtr(pThis->pTXDTimerR3);
7418#endif /* E1K_TX_DELAY */
7419 pThis->pIntTimerRC = TMTimerRCPtr(pThis->pIntTimerR3);
7420 pThis->pLUTimerRC = TMTimerRCPtr(pThis->pLUTimerR3);
7421}
7422
7423/**
7424 * Destruct a device instance.
7425 *
7426 * We need to free non-VM resources only.
7427 *
7428 * @returns VBox status code.
7429 * @param pDevIns The device instance data.
7430 * @thread EMT
7431 */
7432static DECLCALLBACK(int) e1kR3Destruct(PPDMDEVINS pDevIns)
7433{
7434 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7435 PDMDEV_CHECK_VERSIONS_RETURN_QUIET(pDevIns);
7436
7437 e1kDumpState(pThis);
7438 E1kLog(("%s Destroying instance\n", pThis->szPrf));
7439 if (PDMCritSectIsInitialized(&pThis->cs))
7440 {
7441 if (pThis->hEventMoreRxDescAvail != NIL_RTSEMEVENT)
7442 {
7443 RTSemEventSignal(pThis->hEventMoreRxDescAvail);
7444 RTSemEventDestroy(pThis->hEventMoreRxDescAvail);
7445 pThis->hEventMoreRxDescAvail = NIL_RTSEMEVENT;
7446 }
7447#ifdef E1K_WITH_TX_CS
7448 PDMR3CritSectDelete(&pThis->csTx);
7449#endif /* E1K_WITH_TX_CS */
7450 PDMR3CritSectDelete(&pThis->csRx);
7451 PDMR3CritSectDelete(&pThis->cs);
7452 }
7453 return VINF_SUCCESS;
7454}
7455
7456
7457/**
7458 * Set PCI configuration space registers.
7459 *
7460 * @param pci Reference to PCI device structure.
7461 * @thread EMT
7462 */
7463static DECLCALLBACK(void) e1kConfigurePciDev(PPDMPCIDEV pPciDev, E1KCHIP eChip)
7464{
7465 Assert(eChip < RT_ELEMENTS(g_aChips));
7466 /* Configure PCI Device, assume 32-bit mode ******************************/
7467 PCIDevSetVendorId(pPciDev, g_aChips[eChip].uPCIVendorId);
7468 PCIDevSetDeviceId(pPciDev, g_aChips[eChip].uPCIDeviceId);
7469 PCIDevSetWord( pPciDev, VBOX_PCI_SUBSYSTEM_VENDOR_ID, g_aChips[eChip].uPCISubsystemVendorId);
7470 PCIDevSetWord( pPciDev, VBOX_PCI_SUBSYSTEM_ID, g_aChips[eChip].uPCISubsystemId);
7471
7472 PCIDevSetWord( pPciDev, VBOX_PCI_COMMAND, 0x0000);
7473 /* DEVSEL Timing (medium device), 66 MHz Capable, New capabilities */
7474 PCIDevSetWord( pPciDev, VBOX_PCI_STATUS,
7475 VBOX_PCI_STATUS_DEVSEL_MEDIUM | VBOX_PCI_STATUS_CAP_LIST | VBOX_PCI_STATUS_66MHZ);
7476 /* Stepping A2 */
7477 PCIDevSetByte( pPciDev, VBOX_PCI_REVISION_ID, 0x02);
7478 /* Ethernet adapter */
7479 PCIDevSetByte( pPciDev, VBOX_PCI_CLASS_PROG, 0x00);
7480 PCIDevSetWord( pPciDev, VBOX_PCI_CLASS_DEVICE, 0x0200);
7481 /* normal single function Ethernet controller */
7482 PCIDevSetByte( pPciDev, VBOX_PCI_HEADER_TYPE, 0x00);
7483 /* Memory Register Base Address */
7484 PCIDevSetDWord(pPciDev, VBOX_PCI_BASE_ADDRESS_0, 0x00000000);
7485 /* Memory Flash Base Address */
7486 PCIDevSetDWord(pPciDev, VBOX_PCI_BASE_ADDRESS_1, 0x00000000);
7487 /* IO Register Base Address */
7488 PCIDevSetDWord(pPciDev, VBOX_PCI_BASE_ADDRESS_2, 0x00000001);
7489 /* Expansion ROM Base Address */
7490 PCIDevSetDWord(pPciDev, VBOX_PCI_ROM_ADDRESS, 0x00000000);
7491 /* Capabilities Pointer */
7492 PCIDevSetByte( pPciDev, VBOX_PCI_CAPABILITY_LIST, 0xDC);
7493 /* Interrupt Pin: INTA# */
7494 PCIDevSetByte( pPciDev, VBOX_PCI_INTERRUPT_PIN, 0x01);
7495 /* Max_Lat/Min_Gnt: very high priority and time slice */
7496 PCIDevSetByte( pPciDev, VBOX_PCI_MIN_GNT, 0xFF);
7497 PCIDevSetByte( pPciDev, VBOX_PCI_MAX_LAT, 0x00);
7498
7499 /* PCI Power Management Registers ****************************************/
7500 /* Capability ID: PCI Power Management Registers */
7501 PCIDevSetByte( pPciDev, 0xDC, VBOX_PCI_CAP_ID_PM);
7502 /* Next Item Pointer: PCI-X */
7503 PCIDevSetByte( pPciDev, 0xDC + 1, 0xE4);
7504 /* Power Management Capabilities: PM disabled, DSI */
7505 PCIDevSetWord( pPciDev, 0xDC + 2,
7506 0x0002 | VBOX_PCI_PM_CAP_DSI);
7507 /* Power Management Control / Status Register: PM disabled */
7508 PCIDevSetWord( pPciDev, 0xDC + 4, 0x0000);
7509 /* PMCSR_BSE Bridge Support Extensions: Not supported */
7510 PCIDevSetByte( pPciDev, 0xDC + 6, 0x00);
7511 /* Data Register: PM disabled, always 0 */
7512 PCIDevSetByte( pPciDev, 0xDC + 7, 0x00);
7513
7514 /* PCI-X Configuration Registers *****************************************/
7515 /* Capability ID: PCI-X Configuration Registers */
7516 PCIDevSetByte( pPciDev, 0xE4, VBOX_PCI_CAP_ID_PCIX);
7517#ifdef E1K_WITH_MSI
7518 PCIDevSetByte( pPciDev, 0xE4 + 1, 0x80);
7519#else
7520 /* Next Item Pointer: None (Message Signalled Interrupts are disabled) */
7521 PCIDevSetByte( pPciDev, 0xE4 + 1, 0x00);
7522#endif
7523 /* PCI-X Command: Enable Relaxed Ordering */
7524 PCIDevSetWord( pPciDev, 0xE4 + 2, VBOX_PCI_X_CMD_ERO);
7525 /* PCI-X Status: 32-bit, 66MHz*/
7526 /** @todo is this value really correct? fff8 doesn't look like actual PCI address */
7527 PCIDevSetDWord(pPciDev, 0xE4 + 4, 0x0040FFF8);
7528}
7529
7530/**
7531 * @interface_method_impl{PDMDEVREG,pfnConstruct}
7532 */
7533static DECLCALLBACK(int) e1kR3Construct(PPDMDEVINS pDevIns, int iInstance, PCFGMNODE pCfg)
7534{
7535 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7536 int rc;
7537 PDMDEV_CHECK_VERSIONS_RETURN(pDevIns);
7538
7539 /*
7540 * Initialize the instance data (state).
7541 * Note! Caller has initialized it to ZERO already.
7542 */
7543 RTStrPrintf(pThis->szPrf, sizeof(pThis->szPrf), "E1000#%d", iInstance);
7544 E1kLog(("%s Constructing new instance sizeof(E1KRXDESC)=%d\n", pThis->szPrf, sizeof(E1KRXDESC)));
7545 pThis->hEventMoreRxDescAvail = NIL_RTSEMEVENT;
7546 pThis->pDevInsR3 = pDevIns;
7547 pThis->pDevInsR0 = PDMDEVINS_2_R0PTR(pDevIns);
7548 pThis->pDevInsRC = PDMDEVINS_2_RCPTR(pDevIns);
7549 pThis->u16TxPktLen = 0;
7550 pThis->fIPcsum = false;
7551 pThis->fTCPcsum = false;
7552 pThis->fIntMaskUsed = false;
7553 pThis->fDelayInts = false;
7554 pThis->fLocked = false;
7555 pThis->u64AckedAt = 0;
7556 pThis->led.u32Magic = PDMLED_MAGIC;
7557 pThis->u32PktNo = 1;
7558
7559 /* Interfaces */
7560 pThis->IBase.pfnQueryInterface = e1kR3QueryInterface;
7561
7562 pThis->INetworkDown.pfnWaitReceiveAvail = e1kR3NetworkDown_WaitReceiveAvail;
7563 pThis->INetworkDown.pfnReceive = e1kR3NetworkDown_Receive;
7564 pThis->INetworkDown.pfnXmitPending = e1kR3NetworkDown_XmitPending;
7565
7566 pThis->ILeds.pfnQueryStatusLed = e1kR3QueryStatusLed;
7567
7568 pThis->INetworkConfig.pfnGetMac = e1kR3GetMac;
7569 pThis->INetworkConfig.pfnGetLinkState = e1kR3GetLinkState;
7570 pThis->INetworkConfig.pfnSetLinkState = e1kR3SetLinkState;
7571
7572 /*
7573 * Internal validations.
7574 */
7575 for (uint32_t iReg = 1; iReg < E1K_NUM_OF_BINARY_SEARCHABLE; iReg++)
7576 AssertLogRelMsgReturn( g_aE1kRegMap[iReg].offset > g_aE1kRegMap[iReg - 1].offset
7577 && g_aE1kRegMap[iReg].offset + g_aE1kRegMap[iReg].size
7578 >= g_aE1kRegMap[iReg - 1].offset + g_aE1kRegMap[iReg - 1].size,
7579 ("%s@%#xLB%#x vs %s@%#xLB%#x\n",
7580 g_aE1kRegMap[iReg].abbrev, g_aE1kRegMap[iReg].offset, g_aE1kRegMap[iReg].size,
7581 g_aE1kRegMap[iReg - 1].abbrev, g_aE1kRegMap[iReg - 1].offset, g_aE1kRegMap[iReg - 1].size),
7582 VERR_INTERNAL_ERROR_4);
7583
7584 /*
7585 * Validate configuration.
7586 */
7587 if (!CFGMR3AreValuesValid(pCfg, "MAC\0" "CableConnected\0" "AdapterType\0"
7588 "LineSpeed\0" "GCEnabled\0" "R0Enabled\0"
7589 "ItrEnabled\0" "ItrRxEnabled\0"
7590 "EthernetCRC\0" "GSOEnabled\0" "LinkUpDelay\0"))
7591 return PDMDEV_SET_ERROR(pDevIns, VERR_PDM_DEVINS_UNKNOWN_CFG_VALUES,
7592 N_("Invalid configuration for E1000 device"));
7593
7594 /** @todo LineSpeed unused! */
7595
7596 /* Get config params */
7597 rc = CFGMR3QueryBytes(pCfg, "MAC", pThis->macConfigured.au8, sizeof(pThis->macConfigured.au8));
7598 if (RT_FAILURE(rc))
7599 return PDMDEV_SET_ERROR(pDevIns, rc,
7600 N_("Configuration error: Failed to get MAC address"));
7601 rc = CFGMR3QueryBool(pCfg, "CableConnected", &pThis->fCableConnected);
7602 if (RT_FAILURE(rc))
7603 return PDMDEV_SET_ERROR(pDevIns, rc,
7604 N_("Configuration error: Failed to get the value of 'CableConnected'"));
7605 rc = CFGMR3QueryU32(pCfg, "AdapterType", (uint32_t*)&pThis->eChip);
7606 if (RT_FAILURE(rc))
7607 return PDMDEV_SET_ERROR(pDevIns, rc,
7608 N_("Configuration error: Failed to get the value of 'AdapterType'"));
7609 Assert(pThis->eChip <= E1K_CHIP_82545EM);
7610 rc = CFGMR3QueryBoolDef(pCfg, "GCEnabled", &pThis->fRCEnabled, true);
7611 if (RT_FAILURE(rc))
7612 return PDMDEV_SET_ERROR(pDevIns, rc,
7613 N_("Configuration error: Failed to get the value of 'GCEnabled'"));
7614
7615 rc = CFGMR3QueryBoolDef(pCfg, "R0Enabled", &pThis->fR0Enabled, true);
7616 if (RT_FAILURE(rc))
7617 return PDMDEV_SET_ERROR(pDevIns, rc,
7618 N_("Configuration error: Failed to get the value of 'R0Enabled'"));
7619
7620 rc = CFGMR3QueryBoolDef(pCfg, "EthernetCRC", &pThis->fEthernetCRC, true);
7621 if (RT_FAILURE(rc))
7622 return PDMDEV_SET_ERROR(pDevIns, rc,
7623 N_("Configuration error: Failed to get the value of 'EthernetCRC'"));
7624
7625 rc = CFGMR3QueryBoolDef(pCfg, "GSOEnabled", &pThis->fGSOEnabled, true);
7626 if (RT_FAILURE(rc))
7627 return PDMDEV_SET_ERROR(pDevIns, rc,
7628 N_("Configuration error: Failed to get the value of 'GSOEnabled'"));
7629
7630 rc = CFGMR3QueryBoolDef(pCfg, "ItrEnabled", &pThis->fItrEnabled, false);
7631 if (RT_FAILURE(rc))
7632 return PDMDEV_SET_ERROR(pDevIns, rc,
7633 N_("Configuration error: Failed to get the value of 'ItrEnabled'"));
7634
7635 rc = CFGMR3QueryBoolDef(pCfg, "ItrRxEnabled", &pThis->fItrRxEnabled, true);
7636 if (RT_FAILURE(rc))
7637 return PDMDEV_SET_ERROR(pDevIns, rc,
7638 N_("Configuration error: Failed to get the value of 'ItrRxEnabled'"));
7639
7640 rc = CFGMR3QueryBoolDef(pCfg, "TidEnabled", &pThis->fTidEnabled, false);
7641 if (RT_FAILURE(rc))
7642 return PDMDEV_SET_ERROR(pDevIns, rc,
7643 N_("Configuration error: Failed to get the value of 'TidEnabled'"));
7644
7645 rc = CFGMR3QueryU32Def(pCfg, "LinkUpDelay", (uint32_t*)&pThis->cMsLinkUpDelay, 5000); /* ms */
7646 if (RT_FAILURE(rc))
7647 return PDMDEV_SET_ERROR(pDevIns, rc,
7648 N_("Configuration error: Failed to get the value of 'LinkUpDelay'"));
7649 Assert(pThis->cMsLinkUpDelay <= 300000); /* less than 5 minutes */
7650 if (pThis->cMsLinkUpDelay > 5000)
7651 LogRel(("%s WARNING! Link up delay is set to %u seconds!\n", pThis->szPrf, pThis->cMsLinkUpDelay / 1000));
7652 else if (pThis->cMsLinkUpDelay == 0)
7653 LogRel(("%s WARNING! Link up delay is disabled!\n", pThis->szPrf));
7654
7655 LogRel(("%s Chip=%s LinkUpDelay=%ums EthernetCRC=%s GSO=%s Itr=%s ItrRx=%s TID=%s R0=%s GC=%s\n", pThis->szPrf,
7656 g_aChips[pThis->eChip].pcszName, pThis->cMsLinkUpDelay,
7657 pThis->fEthernetCRC ? "on" : "off",
7658 pThis->fGSOEnabled ? "enabled" : "disabled",
7659 pThis->fItrEnabled ? "enabled" : "disabled",
7660 pThis->fItrRxEnabled ? "enabled" : "disabled",
7661 pThis->fTidEnabled ? "enabled" : "disabled",
7662 pThis->fR0Enabled ? "enabled" : "disabled",
7663 pThis->fRCEnabled ? "enabled" : "disabled"));
7664
7665 /* Initialize the EEPROM. */
7666 pThis->eeprom.init(pThis->macConfigured);
7667
7668 /* Initialize internal PHY. */
7669 Phy::init(&pThis->phy, iInstance, pThis->eChip == E1K_CHIP_82543GC ? PHY_EPID_M881000 : PHY_EPID_M881011);
7670 Phy::setLinkStatus(&pThis->phy, pThis->fCableConnected);
7671
7672 /* Initialize critical sections. We do our own locking. */
7673 rc = PDMDevHlpSetDeviceCritSect(pDevIns, PDMDevHlpCritSectGetNop(pDevIns));
7674 AssertRCReturn(rc, rc);
7675
7676 rc = PDMDevHlpCritSectInit(pDevIns, &pThis->cs, RT_SRC_POS, "E1000#%d", iInstance);
7677 if (RT_FAILURE(rc))
7678 return rc;
7679 rc = PDMDevHlpCritSectInit(pDevIns, &pThis->csRx, RT_SRC_POS, "E1000#%dRX", iInstance);
7680 if (RT_FAILURE(rc))
7681 return rc;
7682#ifdef E1K_WITH_TX_CS
7683 rc = PDMDevHlpCritSectInit(pDevIns, &pThis->csTx, RT_SRC_POS, "E1000#%dTX", iInstance);
7684 if (RT_FAILURE(rc))
7685 return rc;
7686#endif /* E1K_WITH_TX_CS */
7687
7688 /* Saved state registration. */
7689 rc = PDMDevHlpSSMRegisterEx(pDevIns, E1K_SAVEDSTATE_VERSION, sizeof(E1KSTATE), NULL,
7690 NULL, e1kLiveExec, NULL,
7691 e1kSavePrep, e1kSaveExec, NULL,
7692 e1kLoadPrep, e1kLoadExec, e1kLoadDone);
7693 if (RT_FAILURE(rc))
7694 return rc;
7695
7696 /* Set PCI config registers and register ourselves with the PCI bus. */
7697 e1kConfigurePciDev(&pThis->pciDevice, pThis->eChip);
7698 rc = PDMDevHlpPCIRegister(pDevIns, &pThis->pciDevice);
7699 if (RT_FAILURE(rc))
7700 return rc;
7701
7702#ifdef E1K_WITH_MSI
7703 PDMMSIREG MsiReg;
7704 RT_ZERO(MsiReg);
7705 MsiReg.cMsiVectors = 1;
7706 MsiReg.iMsiCapOffset = 0x80;
7707 MsiReg.iMsiNextOffset = 0x0;
7708 MsiReg.fMsi64bit = false;
7709 rc = PDMDevHlpPCIRegisterMsi(pDevIns, &MsiReg);
7710 AssertRCReturn(rc, rc);
7711#endif
7712
7713
7714 /* Map our registers to memory space (region 0, see e1kConfigurePCI)*/
7715 rc = PDMDevHlpPCIIORegionRegister(pDevIns, 0, E1K_MM_SIZE, PCI_ADDRESS_SPACE_MEM, e1kMap);
7716 if (RT_FAILURE(rc))
7717 return rc;
7718#ifdef E1K_WITH_PREREG_MMIO
7719 rc = PDMDevHlpMMIOExPreRegister(pDevIns, 0, E1K_MM_SIZE, IOMMMIO_FLAGS_READ_DWORD | IOMMMIO_FLAGS_WRITE_ONLY_DWORD, "E1000",
7720 NULL /*pvUserR3*/, e1kMMIOWrite, e1kMMIORead, NULL /*pfnFillR3*/,
7721 NIL_RTR0PTR /*pvUserR0*/, pThis->fR0Enabled ? "e1kMMIOWrite" : NULL,
7722 pThis->fR0Enabled ? "e1kMMIORead" : NULL, NULL /*pszFillR0*/,
7723 NIL_RTRCPTR /*pvUserRC*/, pThis->fRCEnabled ? "e1kMMIOWrite" : NULL,
7724 pThis->fRCEnabled ? "e1kMMIORead" : NULL, NULL /*pszFillRC*/);
7725 AssertLogRelRCReturn(rc, rc);
7726#endif
7727 /* Map our registers to IO space (region 2, see e1kConfigurePCI) */
7728 rc = PDMDevHlpPCIIORegionRegister(pDevIns, 2, E1K_IOPORT_SIZE, PCI_ADDRESS_SPACE_IO, e1kMap);
7729 if (RT_FAILURE(rc))
7730 return rc;
7731
7732 /* Create transmit queue */
7733 rc = PDMDevHlpQueueCreate(pDevIns, sizeof(PDMQUEUEITEMCORE), 1, 0,
7734 e1kTxQueueConsumer, true, "E1000-Xmit", &pThis->pTxQueueR3);
7735 if (RT_FAILURE(rc))
7736 return rc;
7737 pThis->pTxQueueR0 = PDMQueueR0Ptr(pThis->pTxQueueR3);
7738 pThis->pTxQueueRC = PDMQueueRCPtr(pThis->pTxQueueR3);
7739
7740 /* Create the RX notifier signaller. */
7741 rc = PDMDevHlpQueueCreate(pDevIns, sizeof(PDMQUEUEITEMCORE), 1, 0,
7742 e1kCanRxQueueConsumer, true, "E1000-Rcv", &pThis->pCanRxQueueR3);
7743 if (RT_FAILURE(rc))
7744 return rc;
7745 pThis->pCanRxQueueR0 = PDMQueueR0Ptr(pThis->pCanRxQueueR3);
7746 pThis->pCanRxQueueRC = PDMQueueRCPtr(pThis->pCanRxQueueR3);
7747
7748#ifdef E1K_TX_DELAY
7749 /* Create Transmit Delay Timer */
7750 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kTxDelayTimer, pThis,
7751 TMTIMER_FLAGS_NO_CRIT_SECT,
7752 "E1000 Transmit Delay Timer", &pThis->pTXDTimerR3);
7753 if (RT_FAILURE(rc))
7754 return rc;
7755 pThis->pTXDTimerR0 = TMTimerR0Ptr(pThis->pTXDTimerR3);
7756 pThis->pTXDTimerRC = TMTimerRCPtr(pThis->pTXDTimerR3);
7757 TMR3TimerSetCritSect(pThis->pTXDTimerR3, &pThis->csTx);
7758#endif /* E1K_TX_DELAY */
7759
7760//#ifdef E1K_USE_TX_TIMERS
7761 if (pThis->fTidEnabled)
7762 {
7763 /* Create Transmit Interrupt Delay Timer */
7764 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kTxIntDelayTimer, pThis,
7765 TMTIMER_FLAGS_NO_CRIT_SECT,
7766 "E1000 Transmit Interrupt Delay Timer", &pThis->pTIDTimerR3);
7767 if (RT_FAILURE(rc))
7768 return rc;
7769 pThis->pTIDTimerR0 = TMTimerR0Ptr(pThis->pTIDTimerR3);
7770 pThis->pTIDTimerRC = TMTimerRCPtr(pThis->pTIDTimerR3);
7771
7772# ifndef E1K_NO_TAD
7773 /* Create Transmit Absolute Delay Timer */
7774 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kTxAbsDelayTimer, pThis,
7775 TMTIMER_FLAGS_NO_CRIT_SECT,
7776 "E1000 Transmit Absolute Delay Timer", &pThis->pTADTimerR3);
7777 if (RT_FAILURE(rc))
7778 return rc;
7779 pThis->pTADTimerR0 = TMTimerR0Ptr(pThis->pTADTimerR3);
7780 pThis->pTADTimerRC = TMTimerRCPtr(pThis->pTADTimerR3);
7781# endif /* E1K_NO_TAD */
7782 }
7783//#endif /* E1K_USE_TX_TIMERS */
7784
7785#ifdef E1K_USE_RX_TIMERS
7786 /* Create Receive Interrupt Delay Timer */
7787 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kRxIntDelayTimer, pThis,
7788 TMTIMER_FLAGS_NO_CRIT_SECT,
7789 "E1000 Receive Interrupt Delay Timer", &pThis->pRIDTimerR3);
7790 if (RT_FAILURE(rc))
7791 return rc;
7792 pThis->pRIDTimerR0 = TMTimerR0Ptr(pThis->pRIDTimerR3);
7793 pThis->pRIDTimerRC = TMTimerRCPtr(pThis->pRIDTimerR3);
7794
7795 /* Create Receive Absolute Delay Timer */
7796 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kRxAbsDelayTimer, pThis,
7797 TMTIMER_FLAGS_NO_CRIT_SECT,
7798 "E1000 Receive Absolute Delay Timer", &pThis->pRADTimerR3);
7799 if (RT_FAILURE(rc))
7800 return rc;
7801 pThis->pRADTimerR0 = TMTimerR0Ptr(pThis->pRADTimerR3);
7802 pThis->pRADTimerRC = TMTimerRCPtr(pThis->pRADTimerR3);
7803#endif /* E1K_USE_RX_TIMERS */
7804
7805 /* Create Late Interrupt Timer */
7806 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kLateIntTimer, pThis,
7807 TMTIMER_FLAGS_NO_CRIT_SECT,
7808 "E1000 Late Interrupt Timer", &pThis->pIntTimerR3);
7809 if (RT_FAILURE(rc))
7810 return rc;
7811 pThis->pIntTimerR0 = TMTimerR0Ptr(pThis->pIntTimerR3);
7812 pThis->pIntTimerRC = TMTimerRCPtr(pThis->pIntTimerR3);
7813
7814 /* Create Link Up Timer */
7815 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kLinkUpTimer, pThis,
7816 TMTIMER_FLAGS_NO_CRIT_SECT,
7817 "E1000 Link Up Timer", &pThis->pLUTimerR3);
7818 if (RT_FAILURE(rc))
7819 return rc;
7820 pThis->pLUTimerR0 = TMTimerR0Ptr(pThis->pLUTimerR3);
7821 pThis->pLUTimerRC = TMTimerRCPtr(pThis->pLUTimerR3);
7822
7823 /* Register the info item */
7824 char szTmp[20];
7825 RTStrPrintf(szTmp, sizeof(szTmp), "e1k%d", iInstance);
7826 PDMDevHlpDBGFInfoRegister(pDevIns, szTmp, "E1000 info.", e1kInfo);
7827
7828 /* Status driver */
7829 PPDMIBASE pBase;
7830 rc = PDMDevHlpDriverAttach(pDevIns, PDM_STATUS_LUN, &pThis->IBase, &pBase, "Status Port");
7831 if (RT_FAILURE(rc))
7832 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to attach the status LUN"));
7833 pThis->pLedsConnector = PDMIBASE_QUERY_INTERFACE(pBase, PDMILEDCONNECTORS);
7834
7835 /* Network driver */
7836 rc = PDMDevHlpDriverAttach(pDevIns, 0, &pThis->IBase, &pThis->pDrvBase, "Network Port");
7837 if (RT_SUCCESS(rc))
7838 {
7839 if (rc == VINF_NAT_DNS)
7840 PDMDevHlpVMSetRuntimeError(pDevIns, 0 /*fFlags*/, "NoDNSforNAT",
7841 N_("A Domain Name Server (DNS) for NAT networking could not be determined. Ensure that your host is correctly connected to an ISP. If you ignore this warning the guest will not be able to perform nameserver lookups and it will probably observe delays if trying so"));
7842 pThis->pDrvR3 = PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMINETWORKUP);
7843 AssertMsgReturn(pThis->pDrvR3, ("Failed to obtain the PDMINETWORKUP interface!\n"), VERR_PDM_MISSING_INTERFACE_BELOW);
7844
7845 pThis->pDrvR0 = PDMIBASER0_QUERY_INTERFACE(PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMIBASER0), PDMINETWORKUP);
7846 pThis->pDrvRC = PDMIBASERC_QUERY_INTERFACE(PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMIBASERC), PDMINETWORKUP);
7847 }
7848 else if ( rc == VERR_PDM_NO_ATTACHED_DRIVER
7849 || rc == VERR_PDM_CFG_MISSING_DRIVER_NAME)
7850 {
7851 /* No error! */
7852 E1kLog(("%s This adapter is not attached to any network!\n", pThis->szPrf));
7853 }
7854 else
7855 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to attach the network LUN"));
7856
7857 rc = RTSemEventCreate(&pThis->hEventMoreRxDescAvail);
7858 if (RT_FAILURE(rc))
7859 return rc;
7860
7861 rc = e1kInitDebugHelpers();
7862 if (RT_FAILURE(rc))
7863 return rc;
7864
7865 e1kHardReset(pThis);
7866
7867 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data received", "/Public/Net/E1k%u/BytesReceived", iInstance);
7868 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data transmitted", "/Public/Net/E1k%u/BytesTransmitted", iInstance);
7869
7870 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data received", "/Devices/E1k%d/ReceiveBytes", iInstance);
7871 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data transmitted", "/Devices/E1k%d/TransmitBytes", iInstance);
7872
7873#if defined(VBOX_WITH_STATISTICS)
7874 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMMIOReadRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO reads in RZ", "/Devices/E1k%d/MMIO/ReadRZ", iInstance);
7875 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMMIOReadR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO reads in R3", "/Devices/E1k%d/MMIO/ReadR3", iInstance);
7876 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMMIOWriteRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO writes in RZ", "/Devices/E1k%d/MMIO/WriteRZ", iInstance);
7877 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMMIOWriteR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO writes in R3", "/Devices/E1k%d/MMIO/WriteR3", iInstance);
7878 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatEEPROMRead, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling EEPROM reads", "/Devices/E1k%d/EEPROM/Read", iInstance);
7879 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatEEPROMWrite, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling EEPROM writes", "/Devices/E1k%d/EEPROM/Write", iInstance);
7880 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIOReadRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in RZ", "/Devices/E1k%d/IO/ReadRZ", iInstance);
7881 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIOReadR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in R3", "/Devices/E1k%d/IO/ReadR3", iInstance);
7882 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIOWriteRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in RZ", "/Devices/E1k%d/IO/WriteRZ", iInstance);
7883 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIOWriteR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in R3", "/Devices/E1k%d/IO/WriteR3", iInstance);
7884 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatLateIntTimer, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling late int timer", "/Devices/E1k%d/LateInt/Timer", iInstance);
7885 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatLateInts, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of late interrupts", "/Devices/E1k%d/LateInt/Occured", iInstance);
7886 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIntsRaised, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of raised interrupts", "/Devices/E1k%d/Interrupts/Raised", iInstance);
7887 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIntsPrevented, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of prevented interrupts", "/Devices/E1k%d/Interrupts/Prevented", iInstance);
7888 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceive, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive", "/Devices/E1k%d/Receive/Total", iInstance);
7889 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveCRC, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive checksumming", "/Devices/E1k%d/Receive/CRC", iInstance);
7890 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveFilter, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive filtering", "/Devices/E1k%d/Receive/Filter", iInstance);
7891 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveStore, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive storing", "/Devices/E1k%d/Receive/Store", iInstance);
7892 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatRxOverflow, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_OCCURENCE, "Profiling RX overflows", "/Devices/E1k%d/RxOverflow", iInstance);
7893 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatRxOverflowWakeup, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Nr of RX overflow wakeups", "/Devices/E1k%d/RxOverflowWakeup", iInstance);
7894 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling transmits in RZ", "/Devices/E1k%d/Transmit/TotalRZ", iInstance);
7895 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling transmits in R3", "/Devices/E1k%d/Transmit/TotalR3", iInstance);
7896 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitSendRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling send transmit in RZ", "/Devices/E1k%d/Transmit/SendRZ", iInstance);
7897 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitSendR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling send transmit in R3", "/Devices/E1k%d/Transmit/SendR3", iInstance);
7898
7899 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescCtxNormal, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of normal context descriptors","/Devices/E1k%d/TxDesc/ContexNormal", iInstance);
7900 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescCtxTSE, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TSE context descriptors", "/Devices/E1k%d/TxDesc/ContextTSE", iInstance);
7901 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescData, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TX data descriptors", "/Devices/E1k%d/TxDesc/Data", iInstance);
7902 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescLegacy, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TX legacy descriptors", "/Devices/E1k%d/TxDesc/Legacy", iInstance);
7903 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescTSEData, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TX TSE data descriptors", "/Devices/E1k%d/TxDesc/TSEData", iInstance);
7904 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxPathFallback, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Fallback TSE descriptor path", "/Devices/E1k%d/TxPath/Fallback", iInstance);
7905 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxPathGSO, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "GSO TSE descriptor path", "/Devices/E1k%d/TxPath/GSO", iInstance);
7906 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxPathRegular, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Regular descriptor path", "/Devices/E1k%d/TxPath/Normal", iInstance);
7907 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatPHYAccesses, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of PHY accesses", "/Devices/E1k%d/PHYAccesses", iInstance);
7908 for (unsigned iReg = 0; iReg < E1K_NUM_OF_REGS; iReg++)
7909 {
7910 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->aStatRegReads[iReg], STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
7911 g_aE1kRegMap[iReg].name, "/Devices/E1k%d/Regs/%s-Reads", iInstance, g_aE1kRegMap[iReg].abbrev);
7912 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->aStatRegWrites[iReg], STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
7913 g_aE1kRegMap[iReg].name, "/Devices/E1k%d/Regs/%s-Writes", iInstance, g_aE1kRegMap[iReg].abbrev);
7914 }
7915#endif /* VBOX_WITH_STATISTICS */
7916
7917#ifdef E1K_INT_STATS
7918 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->u64ArmedAt, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "u64ArmedAt", "/Devices/E1k%d/u64ArmedAt", iInstance);
7919 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatMaxTxDelay, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatMaxTxDelay", "/Devices/E1k%d/uStatMaxTxDelay", iInstance);
7920 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatInt, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatInt", "/Devices/E1k%d/uStatInt", iInstance);
7921 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntTry, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntTry", "/Devices/E1k%d/uStatIntTry", iInstance);
7922 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntLower, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntLower", "/Devices/E1k%d/uStatIntLower", iInstance);
7923 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatNoIntICR, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatNoIntICR", "/Devices/E1k%d/uStatNoIntICR", iInstance);
7924 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->iStatIntLost, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "iStatIntLost", "/Devices/E1k%d/iStatIntLost", iInstance);
7925 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->iStatIntLostOne, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "iStatIntLostOne", "/Devices/E1k%d/iStatIntLostOne", iInstance);
7926 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntIMS, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntIMS", "/Devices/E1k%d/uStatIntIMS", iInstance);
7927 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntSkip, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntSkip", "/Devices/E1k%d/uStatIntSkip", iInstance);
7928 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntLate, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntLate", "/Devices/E1k%d/uStatIntLate", iInstance);
7929 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntMasked, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntMasked", "/Devices/E1k%d/uStatIntMasked", iInstance);
7930 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntEarly, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntEarly", "/Devices/E1k%d/uStatIntEarly", iInstance);
7931 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntRx, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntRx", "/Devices/E1k%d/uStatIntRx", iInstance);
7932 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntTx, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntTx", "/Devices/E1k%d/uStatIntTx", iInstance);
7933 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntICS, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntICS", "/Devices/E1k%d/uStatIntICS", iInstance);
7934 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntRDTR, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntRDTR", "/Devices/E1k%d/uStatIntRDTR", iInstance);
7935 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntRXDMT0, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntRXDMT0", "/Devices/E1k%d/uStatIntRXDMT0", iInstance);
7936 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntTXQE, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntTXQE", "/Devices/E1k%d/uStatIntTXQE", iInstance);
7937 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxNoRS, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxNoRS", "/Devices/E1k%d/uStatTxNoRS", iInstance);
7938 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxIDE, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxIDE", "/Devices/E1k%d/uStatTxIDE", iInstance);
7939 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxDelayed, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxDelayed", "/Devices/E1k%d/uStatTxDelayed", iInstance);
7940 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxDelayExp, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxDelayExp", "/Devices/E1k%d/uStatTxDelayExp", iInstance);
7941 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTAD, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTAD", "/Devices/E1k%d/uStatTAD", iInstance);
7942 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTID, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTID", "/Devices/E1k%d/uStatTID", iInstance);
7943 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatRAD, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatRAD", "/Devices/E1k%d/uStatRAD", iInstance);
7944 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatRID, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatRID", "/Devices/E1k%d/uStatRID", iInstance);
7945 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatRxFrm, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatRxFrm", "/Devices/E1k%d/uStatRxFrm", iInstance);
7946 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxFrm, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxFrm", "/Devices/E1k%d/uStatTxFrm", iInstance);
7947 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatDescCtx, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatDescCtx", "/Devices/E1k%d/uStatDescCtx", iInstance);
7948 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatDescDat, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatDescDat", "/Devices/E1k%d/uStatDescDat", iInstance);
7949 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatDescLeg, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatDescLeg", "/Devices/E1k%d/uStatDescLeg", iInstance);
7950 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx1514, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx1514", "/Devices/E1k%d/uStatTx1514", iInstance);
7951 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx2962, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx2962", "/Devices/E1k%d/uStatTx2962", iInstance);
7952 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx4410, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx4410", "/Devices/E1k%d/uStatTx4410", iInstance);
7953 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx5858, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx5858", "/Devices/E1k%d/uStatTx5858", iInstance);
7954 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx7306, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx7306", "/Devices/E1k%d/uStatTx7306", iInstance);
7955 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx8754, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx8754", "/Devices/E1k%d/uStatTx8754", iInstance);
7956 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx16384, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx16384", "/Devices/E1k%d/uStatTx16384", iInstance);
7957 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx32768, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx32768", "/Devices/E1k%d/uStatTx32768", iInstance);
7958 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxLarge, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxLarge", "/Devices/E1k%d/uStatTxLarge", iInstance);
7959#endif /* E1K_INT_STATS */
7960
7961 return VINF_SUCCESS;
7962}
7963
7964/**
7965 * The device registration structure.
7966 */
7967const PDMDEVREG g_DeviceE1000 =
7968{
7969 /* Structure version. PDM_DEVREG_VERSION defines the current version. */
7970 PDM_DEVREG_VERSION,
7971 /* Device name. */
7972 "e1000",
7973 /* Name of guest context module (no path).
7974 * Only evalutated if PDM_DEVREG_FLAGS_RC is set. */
7975 "VBoxDDRC.rc",
7976 /* Name of ring-0 module (no path).
7977 * Only evalutated if PDM_DEVREG_FLAGS_RC is set. */
7978 "VBoxDDR0.r0",
7979 /* The description of the device. The UTF-8 string pointed to shall, like this structure,
7980 * remain unchanged from registration till VM destruction. */
7981 "Intel PRO/1000 MT Desktop Ethernet.\n",
7982
7983 /* Flags, combination of the PDM_DEVREG_FLAGS_* \#defines. */
7984 PDM_DEVREG_FLAGS_DEFAULT_BITS | PDM_DEVREG_FLAGS_RC | PDM_DEVREG_FLAGS_R0,
7985 /* Device class(es), combination of the PDM_DEVREG_CLASS_* \#defines. */
7986 PDM_DEVREG_CLASS_NETWORK,
7987 /* Maximum number of instances (per VM). */
7988 ~0U,
7989 /* Size of the instance data. */
7990 sizeof(E1KSTATE),
7991
7992 /* pfnConstruct */
7993 e1kR3Construct,
7994 /* pfnDestruct */
7995 e1kR3Destruct,
7996 /* pfnRelocate */
7997 e1kR3Relocate,
7998 /* pfnMemSetup */
7999 NULL,
8000 /* pfnPowerOn */
8001 NULL,
8002 /* pfnReset */
8003 e1kR3Reset,
8004 /* pfnSuspend */
8005 e1kR3Suspend,
8006 /* pfnResume */
8007 NULL,
8008 /* pfnAttach */
8009 e1kR3Attach,
8010 /* pfnDeatch */
8011 e1kR3Detach,
8012 /* pfnQueryInterface */
8013 NULL,
8014 /* pfnInitComplete */
8015 NULL,
8016 /* pfnPowerOff */
8017 e1kR3PowerOff,
8018 /* pfnSoftReset */
8019 NULL,
8020
8021 /* u32VersionEnd */
8022 PDM_DEVREG_VERSION
8023};
8024
8025#endif /* IN_RING3 */
8026#endif /* !VBOX_DEVICE_STRUCT_TESTCASE */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette