VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/DevE1000.cpp@ 88529

Last change on this file since 88529 was 88529, checked in by vboxsync, 4 years ago

Dev/E1000: Use a dedicated array instead of DD bit to store TX descriptor validity.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 334.2 KB
Line 
1/* $Id: DevE1000.cpp 88529 2021-04-15 12:01:54Z vboxsync $ */
2/** @file
3 * DevE1000 - Intel 82540EM Ethernet Controller Emulation.
4 *
5 * Implemented in accordance with the specification:
6 *
7 * PCI/PCI-X Family of Gigabit Ethernet Controllers Software Developer's Manual
8 * 82540EP/EM, 82541xx, 82544GC/EI, 82545GM/EM, 82546GB/EB, and 82547xx
9 *
10 * 317453-002 Revision 3.5
11 *
12 * @todo IPv6 checksum offloading support
13 * @todo Flexible Filter / Wakeup (optional?)
14 */
15
16/*
17 * Copyright (C) 2007-2020 Oracle Corporation
18 *
19 * This file is part of VirtualBox Open Source Edition (OSE), as
20 * available from http://www.virtualbox.org. This file is free software;
21 * you can redistribute it and/or modify it under the terms of the GNU
22 * General Public License (GPL) as published by the Free Software
23 * Foundation, in version 2 as it comes in the "COPYING" file of the
24 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
25 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_DEV_E1000
33#include <iprt/crc.h>
34#include <iprt/ctype.h>
35#include <iprt/net.h>
36#include <iprt/semaphore.h>
37#include <iprt/string.h>
38#include <iprt/time.h>
39#include <iprt/uuid.h>
40#include <VBox/vmm/pdmdev.h>
41#include <VBox/vmm/pdmnetifs.h>
42#include <VBox/vmm/pdmnetinline.h>
43#include <VBox/param.h>
44#include "VBoxDD.h"
45
46#include "DevEEPROM.h"
47#include "DevE1000Phy.h"
48
49
50/*********************************************************************************************************************************
51* Defined Constants And Macros *
52*********************************************************************************************************************************/
53/** @name E1000 Build Options
54 * @{ */
55/** @def E1K_INIT_RA0
56 * E1K_INIT_RA0 forces E1000 to set the first entry in Receive Address filter
57 * table to MAC address obtained from CFGM. Most guests read MAC address from
58 * EEPROM and write it to RA[0] explicitly, but Mac OS X seems to depend on it
59 * being already set (see @bugref{4657}).
60 */
61#define E1K_INIT_RA0
62/** @def E1K_LSC_ON_RESET
63 * E1K_LSC_ON_RESET causes e1000 to generate Link Status Change
64 * interrupt after hard reset. This makes the E1K_LSC_ON_SLU option unnecessary.
65 * With unplugged cable, LSC is triggerred for 82543GC only.
66 */
67#define E1K_LSC_ON_RESET
68/** @def E1K_LSC_ON_SLU
69 * E1K_LSC_ON_SLU causes E1000 to generate Link Status Change interrupt when
70 * the guest driver brings up the link via STATUS.LU bit. Again the only guest
71 * that requires it is Mac OS X (see @bugref{4657}).
72 */
73//#define E1K_LSC_ON_SLU
74/** @def E1K_INIT_LINKUP_DELAY
75 * E1K_INIT_LINKUP_DELAY prevents the link going up while the driver is still
76 * in init (see @bugref{8624}).
77 */
78#define E1K_INIT_LINKUP_DELAY_US (2000 * 1000)
79/** @def E1K_IMS_INT_DELAY_NS
80 * E1K_IMS_INT_DELAY_NS prevents interrupt storms in Windows guests on enabling
81 * interrupts (see @bugref{8624}).
82 */
83#define E1K_IMS_INT_DELAY_NS 100
84/** @def E1K_TX_DELAY
85 * E1K_TX_DELAY aims to improve guest-host transfer rate for TCP streams by
86 * preventing packets to be sent immediately. It allows to send several
87 * packets in a batch reducing the number of acknowledgments. Note that it
88 * effectively disables R0 TX path, forcing sending in R3.
89 */
90//#define E1K_TX_DELAY 150
91/** @def E1K_USE_TX_TIMERS
92 * E1K_USE_TX_TIMERS aims to reduce the number of generated TX interrupts if a
93 * guest driver set the delays via the Transmit Interrupt Delay Value (TIDV)
94 * register. Enabling it showed no positive effects on existing guests so it
95 * stays disabled. See sections 3.2.7.1 and 3.4.3.1 in "8254x Family of Gigabit
96 * Ethernet Controllers Software Developer’s Manual" for more detailed
97 * explanation.
98 */
99//#define E1K_USE_TX_TIMERS
100/** @def E1K_NO_TAD
101 * E1K_NO_TAD disables one of two timers enabled by E1K_USE_TX_TIMERS, the
102 * Transmit Absolute Delay time. This timer sets the maximum time interval
103 * during which TX interrupts can be postponed (delayed). It has no effect
104 * if E1K_USE_TX_TIMERS is not defined.
105 */
106//#define E1K_NO_TAD
107/** @def E1K_REL_DEBUG
108 * E1K_REL_DEBUG enables debug logging of l1, l2, l3 in release build.
109 */
110//#define E1K_REL_DEBUG
111/** @def E1K_INT_STATS
112 * E1K_INT_STATS enables collection of internal statistics used for
113 * debugging of delayed interrupts, etc.
114 */
115#define E1K_INT_STATS
116/** @def E1K_WITH_MSI
117 * E1K_WITH_MSI enables rudimentary MSI support. Not implemented.
118 */
119//#define E1K_WITH_MSI
120/** @def E1K_WITH_TX_CS
121 * E1K_WITH_TX_CS protects e1kXmitPending with a critical section.
122 */
123#define E1K_WITH_TX_CS
124/** @def E1K_WITH_TXD_CACHE
125 * E1K_WITH_TXD_CACHE causes E1000 to fetch multiple TX descriptors in a
126 * single physical memory read (or two if it wraps around the end of TX
127 * descriptor ring). It is required for proper functioning of bandwidth
128 * resource control as it allows to compute exact sizes of packets prior
129 * to allocating their buffers (see @bugref{5582}).
130 */
131#define E1K_WITH_TXD_CACHE
132/** @def E1K_WITH_RXD_CACHE
133 * E1K_WITH_RXD_CACHE causes E1000 to fetch multiple RX descriptors in a
134 * single physical memory read (or two if it wraps around the end of RX
135 * descriptor ring). Intel's packet driver for DOS needs this option in
136 * order to work properly (see @bugref{6217}).
137 */
138#define E1K_WITH_RXD_CACHE
139/** @def E1K_WITH_PREREG_MMIO
140 * E1K_WITH_PREREG_MMIO enables a new style MMIO registration and is
141 * currently only done for testing the relateted PDM, IOM and PGM code. */
142//#define E1K_WITH_PREREG_MMIO
143/* @} */
144/* End of Options ************************************************************/
145
146#ifdef E1K_WITH_TXD_CACHE
147/**
148 * E1K_TXD_CACHE_SIZE specifies the maximum number of TX descriptors stored
149 * in the state structure. It limits the amount of descriptors loaded in one
150 * batch read. For example, Linux guest may use up to 20 descriptors per
151 * TSE packet. The largest TSE packet seen (Windows guest) was 45 descriptors.
152 */
153# define E1K_TXD_CACHE_SIZE 64u
154#endif /* E1K_WITH_TXD_CACHE */
155
156#ifdef E1K_WITH_RXD_CACHE
157/**
158 * E1K_RXD_CACHE_SIZE specifies the maximum number of RX descriptors stored
159 * in the state structure. It limits the amount of descriptors loaded in one
160 * batch read. For example, XP guest adds 15 RX descriptors at a time.
161 */
162# define E1K_RXD_CACHE_SIZE 16u
163#endif /* E1K_WITH_RXD_CACHE */
164
165
166/* Little helpers ************************************************************/
167#undef htons
168#undef ntohs
169#undef htonl
170#undef ntohl
171#define htons(x) ((((x) & 0xff00) >> 8) | (((x) & 0x00ff) << 8))
172#define ntohs(x) htons(x)
173#define htonl(x) ASMByteSwapU32(x)
174#define ntohl(x) htonl(x)
175
176#ifndef DEBUG
177# ifdef E1K_REL_DEBUG
178# define DEBUG
179# define E1kLog(a) LogRel(a)
180# define E1kLog2(a) LogRel(a)
181# define E1kLog3(a) LogRel(a)
182# define E1kLogX(x, a) LogRel(a)
183//# define E1kLog3(a) do {} while (0)
184# else
185# define E1kLog(a) do {} while (0)
186# define E1kLog2(a) do {} while (0)
187# define E1kLog3(a) do {} while (0)
188# define E1kLogX(x, a) do {} while (0)
189# endif
190#else
191# define E1kLog(a) Log(a)
192# define E1kLog2(a) Log2(a)
193# define E1kLog3(a) Log3(a)
194# define E1kLogX(x, a) LogIt(x, LOG_GROUP, a)
195//# define E1kLog(a) do {} while (0)
196//# define E1kLog2(a) do {} while (0)
197//# define E1kLog3(a) do {} while (0)
198#endif
199
200#if 0
201# define LOG_ENABLED
202# define E1kLogRel(a) LogRel(a)
203# undef Log6
204# define Log6(a) LogRel(a)
205#else
206# define E1kLogRel(a) do { } while (0)
207#endif
208
209//#undef DEBUG
210
211#define E1K_RELOCATE(p, o) *(RTHCUINTPTR *)&p += o
212
213#define E1K_INC_CNT32(cnt) \
214do { \
215 if (cnt < UINT32_MAX) \
216 cnt++; \
217} while (0)
218
219#define E1K_ADD_CNT64(cntLo, cntHi, val) \
220do { \
221 uint64_t u64Cnt = RT_MAKE_U64(cntLo, cntHi); \
222 uint64_t tmp = u64Cnt; \
223 u64Cnt += val; \
224 if (tmp > u64Cnt ) \
225 u64Cnt = UINT64_MAX; \
226 cntLo = (uint32_t)u64Cnt; \
227 cntHi = (uint32_t)(u64Cnt >> 32); \
228} while (0)
229
230#ifdef E1K_INT_STATS
231# define E1K_INC_ISTAT_CNT(cnt) do { ++cnt; } while (0)
232#else /* E1K_INT_STATS */
233# define E1K_INC_ISTAT_CNT(cnt) do { } while (0)
234#endif /* E1K_INT_STATS */
235
236
237/*****************************************************************************/
238
239typedef uint32_t E1KCHIP;
240#define E1K_CHIP_82540EM 0
241#define E1K_CHIP_82543GC 1
242#define E1K_CHIP_82545EM 2
243
244#ifdef IN_RING3
245/** Different E1000 chips. */
246static const struct E1kChips
247{
248 uint16_t uPCIVendorId;
249 uint16_t uPCIDeviceId;
250 uint16_t uPCISubsystemVendorId;
251 uint16_t uPCISubsystemId;
252 const char *pcszName;
253} g_aChips[] =
254{
255 /* Vendor Device SSVendor SubSys Name */
256 { 0x8086,
257 /* Temporary code, as MSI-aware driver dislike 0x100E. How to do that right? */
258# ifdef E1K_WITH_MSI
259 0x105E,
260# else
261 0x100E,
262# endif
263 0x8086, 0x001E, "82540EM" }, /* Intel 82540EM-A in Intel PRO/1000 MT Desktop */
264 { 0x8086, 0x1004, 0x8086, 0x1004, "82543GC" }, /* Intel 82543GC in Intel PRO/1000 T Server */
265 { 0x8086, 0x100F, 0x15AD, 0x0750, "82545EM" } /* Intel 82545EM-A in VMWare Network Adapter */
266};
267#endif /* IN_RING3 */
268
269
270/* The size of register area mapped to I/O space */
271#define E1K_IOPORT_SIZE 0x8
272/* The size of memory-mapped register area */
273#define E1K_MM_SIZE 0x20000
274
275#define E1K_MAX_TX_PKT_SIZE 16288
276#define E1K_MAX_RX_PKT_SIZE 16384
277
278/*****************************************************************************/
279
280/** Gets the specfieid bits from the register. */
281#define GET_BITS(reg, bits) ((reg & reg##_##bits##_MASK) >> reg##_##bits##_SHIFT)
282#define GET_BITS_V(val, reg, bits) ((val & reg##_##bits##_MASK) >> reg##_##bits##_SHIFT)
283#define BITS(reg, bits, bitval) (bitval << reg##_##bits##_SHIFT)
284#define SET_BITS(reg, bits, bitval) do { reg = (reg & ~reg##_##bits##_MASK) | (bitval << reg##_##bits##_SHIFT); } while (0)
285#define SET_BITS_V(val, reg, bits, bitval) do { val = (val & ~reg##_##bits##_MASK) | (bitval << reg##_##bits##_SHIFT); } while (0)
286
287#define CTRL_SLU UINT32_C(0x00000040)
288#define CTRL_MDIO UINT32_C(0x00100000)
289#define CTRL_MDC UINT32_C(0x00200000)
290#define CTRL_MDIO_DIR UINT32_C(0x01000000)
291#define CTRL_MDC_DIR UINT32_C(0x02000000)
292#define CTRL_RESET UINT32_C(0x04000000)
293#define CTRL_VME UINT32_C(0x40000000)
294
295#define STATUS_LU UINT32_C(0x00000002)
296#define STATUS_TXOFF UINT32_C(0x00000010)
297
298#define EECD_EE_WIRES UINT32_C(0x0F)
299#define EECD_EE_REQ UINT32_C(0x40)
300#define EECD_EE_GNT UINT32_C(0x80)
301
302#define EERD_START UINT32_C(0x00000001)
303#define EERD_DONE UINT32_C(0x00000010)
304#define EERD_DATA_MASK UINT32_C(0xFFFF0000)
305#define EERD_DATA_SHIFT 16
306#define EERD_ADDR_MASK UINT32_C(0x0000FF00)
307#define EERD_ADDR_SHIFT 8
308
309#define MDIC_DATA_MASK UINT32_C(0x0000FFFF)
310#define MDIC_DATA_SHIFT 0
311#define MDIC_REG_MASK UINT32_C(0x001F0000)
312#define MDIC_REG_SHIFT 16
313#define MDIC_PHY_MASK UINT32_C(0x03E00000)
314#define MDIC_PHY_SHIFT 21
315#define MDIC_OP_WRITE UINT32_C(0x04000000)
316#define MDIC_OP_READ UINT32_C(0x08000000)
317#define MDIC_READY UINT32_C(0x10000000)
318#define MDIC_INT_EN UINT32_C(0x20000000)
319#define MDIC_ERROR UINT32_C(0x40000000)
320
321#define TCTL_EN UINT32_C(0x00000002)
322#define TCTL_PSP UINT32_C(0x00000008)
323
324#define RCTL_EN UINT32_C(0x00000002)
325#define RCTL_UPE UINT32_C(0x00000008)
326#define RCTL_MPE UINT32_C(0x00000010)
327#define RCTL_LPE UINT32_C(0x00000020)
328#define RCTL_LBM_MASK UINT32_C(0x000000C0)
329#define RCTL_LBM_SHIFT 6
330#define RCTL_RDMTS_MASK UINT32_C(0x00000300)
331#define RCTL_RDMTS_SHIFT 8
332#define RCTL_LBM_TCVR UINT32_C(3) /**< PHY or external SerDes loopback. */
333#define RCTL_MO_MASK UINT32_C(0x00003000)
334#define RCTL_MO_SHIFT 12
335#define RCTL_BAM UINT32_C(0x00008000)
336#define RCTL_BSIZE_MASK UINT32_C(0x00030000)
337#define RCTL_BSIZE_SHIFT 16
338#define RCTL_VFE UINT32_C(0x00040000)
339#define RCTL_CFIEN UINT32_C(0x00080000)
340#define RCTL_CFI UINT32_C(0x00100000)
341#define RCTL_BSEX UINT32_C(0x02000000)
342#define RCTL_SECRC UINT32_C(0x04000000)
343
344#define ICR_TXDW UINT32_C(0x00000001)
345#define ICR_TXQE UINT32_C(0x00000002)
346#define ICR_LSC UINT32_C(0x00000004)
347#define ICR_RXDMT0 UINT32_C(0x00000010)
348#define ICR_RXT0 UINT32_C(0x00000080)
349#define ICR_TXD_LOW UINT32_C(0x00008000)
350#define RDTR_FPD UINT32_C(0x80000000)
351
352#define PBA_st ((PBAST*)(pThis->auRegs + PBA_IDX))
353typedef struct
354{
355 unsigned rxa : 7;
356 unsigned rxa_r : 9;
357 unsigned txa : 16;
358} PBAST;
359AssertCompileSize(PBAST, 4);
360
361#define TXDCTL_WTHRESH_MASK 0x003F0000
362#define TXDCTL_WTHRESH_SHIFT 16
363#define TXDCTL_LWTHRESH_MASK 0xFE000000
364#define TXDCTL_LWTHRESH_SHIFT 25
365
366#define RXCSUM_PCSS_MASK UINT32_C(0x000000FF)
367#define RXCSUM_PCSS_SHIFT 0
368
369/** @name Register access macros
370 * @remarks These ASSUME alocal variable @a pThis of type PE1KSTATE.
371 * @{ */
372#define CTRL pThis->auRegs[CTRL_IDX]
373#define STATUS pThis->auRegs[STATUS_IDX]
374#define EECD pThis->auRegs[EECD_IDX]
375#define EERD pThis->auRegs[EERD_IDX]
376#define CTRL_EXT pThis->auRegs[CTRL_EXT_IDX]
377#define FLA pThis->auRegs[FLA_IDX]
378#define MDIC pThis->auRegs[MDIC_IDX]
379#define FCAL pThis->auRegs[FCAL_IDX]
380#define FCAH pThis->auRegs[FCAH_IDX]
381#define FCT pThis->auRegs[FCT_IDX]
382#define VET pThis->auRegs[VET_IDX]
383#define ICR pThis->auRegs[ICR_IDX]
384#define ITR pThis->auRegs[ITR_IDX]
385#define ICS pThis->auRegs[ICS_IDX]
386#define IMS pThis->auRegs[IMS_IDX]
387#define IMC pThis->auRegs[IMC_IDX]
388#define RCTL pThis->auRegs[RCTL_IDX]
389#define FCTTV pThis->auRegs[FCTTV_IDX]
390#define TXCW pThis->auRegs[TXCW_IDX]
391#define RXCW pThis->auRegs[RXCW_IDX]
392#define TCTL pThis->auRegs[TCTL_IDX]
393#define TIPG pThis->auRegs[TIPG_IDX]
394#define AIFS pThis->auRegs[AIFS_IDX]
395#define LEDCTL pThis->auRegs[LEDCTL_IDX]
396#define PBA pThis->auRegs[PBA_IDX]
397#define FCRTL pThis->auRegs[FCRTL_IDX]
398#define FCRTH pThis->auRegs[FCRTH_IDX]
399#define RDFH pThis->auRegs[RDFH_IDX]
400#define RDFT pThis->auRegs[RDFT_IDX]
401#define RDFHS pThis->auRegs[RDFHS_IDX]
402#define RDFTS pThis->auRegs[RDFTS_IDX]
403#define RDFPC pThis->auRegs[RDFPC_IDX]
404#define RDBAL pThis->auRegs[RDBAL_IDX]
405#define RDBAH pThis->auRegs[RDBAH_IDX]
406#define RDLEN pThis->auRegs[RDLEN_IDX]
407#define RDH pThis->auRegs[RDH_IDX]
408#define RDT pThis->auRegs[RDT_IDX]
409#define RDTR pThis->auRegs[RDTR_IDX]
410#define RXDCTL pThis->auRegs[RXDCTL_IDX]
411#define RADV pThis->auRegs[RADV_IDX]
412#define RSRPD pThis->auRegs[RSRPD_IDX]
413#define TXDMAC pThis->auRegs[TXDMAC_IDX]
414#define TDFH pThis->auRegs[TDFH_IDX]
415#define TDFT pThis->auRegs[TDFT_IDX]
416#define TDFHS pThis->auRegs[TDFHS_IDX]
417#define TDFTS pThis->auRegs[TDFTS_IDX]
418#define TDFPC pThis->auRegs[TDFPC_IDX]
419#define TDBAL pThis->auRegs[TDBAL_IDX]
420#define TDBAH pThis->auRegs[TDBAH_IDX]
421#define TDLEN pThis->auRegs[TDLEN_IDX]
422#define TDH pThis->auRegs[TDH_IDX]
423#define TDT pThis->auRegs[TDT_IDX]
424#define TIDV pThis->auRegs[TIDV_IDX]
425#define TXDCTL pThis->auRegs[TXDCTL_IDX]
426#define TADV pThis->auRegs[TADV_IDX]
427#define TSPMT pThis->auRegs[TSPMT_IDX]
428#define CRCERRS pThis->auRegs[CRCERRS_IDX]
429#define ALGNERRC pThis->auRegs[ALGNERRC_IDX]
430#define SYMERRS pThis->auRegs[SYMERRS_IDX]
431#define RXERRC pThis->auRegs[RXERRC_IDX]
432#define MPC pThis->auRegs[MPC_IDX]
433#define SCC pThis->auRegs[SCC_IDX]
434#define ECOL pThis->auRegs[ECOL_IDX]
435#define MCC pThis->auRegs[MCC_IDX]
436#define LATECOL pThis->auRegs[LATECOL_IDX]
437#define COLC pThis->auRegs[COLC_IDX]
438#define DC pThis->auRegs[DC_IDX]
439#define TNCRS pThis->auRegs[TNCRS_IDX]
440/* #define SEC pThis->auRegs[SEC_IDX] Conflict with sys/time.h */
441#define CEXTERR pThis->auRegs[CEXTERR_IDX]
442#define RLEC pThis->auRegs[RLEC_IDX]
443#define XONRXC pThis->auRegs[XONRXC_IDX]
444#define XONTXC pThis->auRegs[XONTXC_IDX]
445#define XOFFRXC pThis->auRegs[XOFFRXC_IDX]
446#define XOFFTXC pThis->auRegs[XOFFTXC_IDX]
447#define FCRUC pThis->auRegs[FCRUC_IDX]
448#define PRC64 pThis->auRegs[PRC64_IDX]
449#define PRC127 pThis->auRegs[PRC127_IDX]
450#define PRC255 pThis->auRegs[PRC255_IDX]
451#define PRC511 pThis->auRegs[PRC511_IDX]
452#define PRC1023 pThis->auRegs[PRC1023_IDX]
453#define PRC1522 pThis->auRegs[PRC1522_IDX]
454#define GPRC pThis->auRegs[GPRC_IDX]
455#define BPRC pThis->auRegs[BPRC_IDX]
456#define MPRC pThis->auRegs[MPRC_IDX]
457#define GPTC pThis->auRegs[GPTC_IDX]
458#define GORCL pThis->auRegs[GORCL_IDX]
459#define GORCH pThis->auRegs[GORCH_IDX]
460#define GOTCL pThis->auRegs[GOTCL_IDX]
461#define GOTCH pThis->auRegs[GOTCH_IDX]
462#define RNBC pThis->auRegs[RNBC_IDX]
463#define RUC pThis->auRegs[RUC_IDX]
464#define RFC pThis->auRegs[RFC_IDX]
465#define ROC pThis->auRegs[ROC_IDX]
466#define RJC pThis->auRegs[RJC_IDX]
467#define MGTPRC pThis->auRegs[MGTPRC_IDX]
468#define MGTPDC pThis->auRegs[MGTPDC_IDX]
469#define MGTPTC pThis->auRegs[MGTPTC_IDX]
470#define TORL pThis->auRegs[TORL_IDX]
471#define TORH pThis->auRegs[TORH_IDX]
472#define TOTL pThis->auRegs[TOTL_IDX]
473#define TOTH pThis->auRegs[TOTH_IDX]
474#define TPR pThis->auRegs[TPR_IDX]
475#define TPT pThis->auRegs[TPT_IDX]
476#define PTC64 pThis->auRegs[PTC64_IDX]
477#define PTC127 pThis->auRegs[PTC127_IDX]
478#define PTC255 pThis->auRegs[PTC255_IDX]
479#define PTC511 pThis->auRegs[PTC511_IDX]
480#define PTC1023 pThis->auRegs[PTC1023_IDX]
481#define PTC1522 pThis->auRegs[PTC1522_IDX]
482#define MPTC pThis->auRegs[MPTC_IDX]
483#define BPTC pThis->auRegs[BPTC_IDX]
484#define TSCTC pThis->auRegs[TSCTC_IDX]
485#define TSCTFC pThis->auRegs[TSCTFC_IDX]
486#define RXCSUM pThis->auRegs[RXCSUM_IDX]
487#define WUC pThis->auRegs[WUC_IDX]
488#define WUFC pThis->auRegs[WUFC_IDX]
489#define WUS pThis->auRegs[WUS_IDX]
490#define MANC pThis->auRegs[MANC_IDX]
491#define IPAV pThis->auRegs[IPAV_IDX]
492#define WUPL pThis->auRegs[WUPL_IDX]
493/** @} */
494
495/**
496 * Indices of memory-mapped registers in register table.
497 */
498typedef enum
499{
500 CTRL_IDX,
501 STATUS_IDX,
502 EECD_IDX,
503 EERD_IDX,
504 CTRL_EXT_IDX,
505 FLA_IDX,
506 MDIC_IDX,
507 FCAL_IDX,
508 FCAH_IDX,
509 FCT_IDX,
510 VET_IDX,
511 ICR_IDX,
512 ITR_IDX,
513 ICS_IDX,
514 IMS_IDX,
515 IMC_IDX,
516 RCTL_IDX,
517 FCTTV_IDX,
518 TXCW_IDX,
519 RXCW_IDX,
520 TCTL_IDX,
521 TIPG_IDX,
522 AIFS_IDX,
523 LEDCTL_IDX,
524 PBA_IDX,
525 FCRTL_IDX,
526 FCRTH_IDX,
527 RDFH_IDX,
528 RDFT_IDX,
529 RDFHS_IDX,
530 RDFTS_IDX,
531 RDFPC_IDX,
532 RDBAL_IDX,
533 RDBAH_IDX,
534 RDLEN_IDX,
535 RDH_IDX,
536 RDT_IDX,
537 RDTR_IDX,
538 RXDCTL_IDX,
539 RADV_IDX,
540 RSRPD_IDX,
541 TXDMAC_IDX,
542 TDFH_IDX,
543 TDFT_IDX,
544 TDFHS_IDX,
545 TDFTS_IDX,
546 TDFPC_IDX,
547 TDBAL_IDX,
548 TDBAH_IDX,
549 TDLEN_IDX,
550 TDH_IDX,
551 TDT_IDX,
552 TIDV_IDX,
553 TXDCTL_IDX,
554 TADV_IDX,
555 TSPMT_IDX,
556 CRCERRS_IDX,
557 ALGNERRC_IDX,
558 SYMERRS_IDX,
559 RXERRC_IDX,
560 MPC_IDX,
561 SCC_IDX,
562 ECOL_IDX,
563 MCC_IDX,
564 LATECOL_IDX,
565 COLC_IDX,
566 DC_IDX,
567 TNCRS_IDX,
568 SEC_IDX,
569 CEXTERR_IDX,
570 RLEC_IDX,
571 XONRXC_IDX,
572 XONTXC_IDX,
573 XOFFRXC_IDX,
574 XOFFTXC_IDX,
575 FCRUC_IDX,
576 PRC64_IDX,
577 PRC127_IDX,
578 PRC255_IDX,
579 PRC511_IDX,
580 PRC1023_IDX,
581 PRC1522_IDX,
582 GPRC_IDX,
583 BPRC_IDX,
584 MPRC_IDX,
585 GPTC_IDX,
586 GORCL_IDX,
587 GORCH_IDX,
588 GOTCL_IDX,
589 GOTCH_IDX,
590 RNBC_IDX,
591 RUC_IDX,
592 RFC_IDX,
593 ROC_IDX,
594 RJC_IDX,
595 MGTPRC_IDX,
596 MGTPDC_IDX,
597 MGTPTC_IDX,
598 TORL_IDX,
599 TORH_IDX,
600 TOTL_IDX,
601 TOTH_IDX,
602 TPR_IDX,
603 TPT_IDX,
604 PTC64_IDX,
605 PTC127_IDX,
606 PTC255_IDX,
607 PTC511_IDX,
608 PTC1023_IDX,
609 PTC1522_IDX,
610 MPTC_IDX,
611 BPTC_IDX,
612 TSCTC_IDX,
613 TSCTFC_IDX,
614 RXCSUM_IDX,
615 WUC_IDX,
616 WUFC_IDX,
617 WUS_IDX,
618 MANC_IDX,
619 IPAV_IDX,
620 WUPL_IDX,
621 MTA_IDX,
622 RA_IDX,
623 VFTA_IDX,
624 IP4AT_IDX,
625 IP6AT_IDX,
626 WUPM_IDX,
627 FFLT_IDX,
628 FFMT_IDX,
629 FFVT_IDX,
630 PBM_IDX,
631 RA_82542_IDX,
632 MTA_82542_IDX,
633 VFTA_82542_IDX,
634 E1K_NUM_OF_REGS
635} E1kRegIndex;
636
637#define E1K_NUM_OF_32BIT_REGS MTA_IDX
638/** The number of registers with strictly increasing offset. */
639#define E1K_NUM_OF_BINARY_SEARCHABLE (WUPL_IDX + 1)
640
641
642/**
643 * Define E1000-specific EEPROM layout.
644 */
645struct E1kEEPROM
646{
647 public:
648 EEPROM93C46 eeprom;
649
650#ifdef IN_RING3
651 /**
652 * Initialize EEPROM content.
653 *
654 * @param macAddr MAC address of E1000.
655 */
656 void init(RTMAC &macAddr)
657 {
658 eeprom.init();
659 memcpy(eeprom.m_au16Data, macAddr.au16, sizeof(macAddr.au16));
660 eeprom.m_au16Data[0x04] = 0xFFFF;
661 /*
662 * bit 3 - full support for power management
663 * bit 10 - full duplex
664 */
665 eeprom.m_au16Data[0x0A] = 0x4408;
666 eeprom.m_au16Data[0x0B] = 0x001E;
667 eeprom.m_au16Data[0x0C] = 0x8086;
668 eeprom.m_au16Data[0x0D] = 0x100E;
669 eeprom.m_au16Data[0x0E] = 0x8086;
670 eeprom.m_au16Data[0x0F] = 0x3040;
671 eeprom.m_au16Data[0x21] = 0x7061;
672 eeprom.m_au16Data[0x22] = 0x280C;
673 eeprom.m_au16Data[0x23] = 0x00C8;
674 eeprom.m_au16Data[0x24] = 0x00C8;
675 eeprom.m_au16Data[0x2F] = 0x0602;
676 updateChecksum();
677 };
678
679 /**
680 * Compute the checksum as required by E1000 and store it
681 * in the last word.
682 */
683 void updateChecksum()
684 {
685 uint16_t u16Checksum = 0;
686
687 for (int i = 0; i < eeprom.SIZE-1; i++)
688 u16Checksum += eeprom.m_au16Data[i];
689 eeprom.m_au16Data[eeprom.SIZE-1] = 0xBABA - u16Checksum;
690 };
691
692 /**
693 * First 6 bytes of EEPROM contain MAC address.
694 *
695 * @returns MAC address of E1000.
696 */
697 void getMac(PRTMAC pMac)
698 {
699 memcpy(pMac->au16, eeprom.m_au16Data, sizeof(pMac->au16));
700 };
701
702 uint32_t read()
703 {
704 return eeprom.read();
705 }
706
707 void write(uint32_t u32Wires)
708 {
709 eeprom.write(u32Wires);
710 }
711
712 bool readWord(uint32_t u32Addr, uint16_t *pu16Value)
713 {
714 return eeprom.readWord(u32Addr, pu16Value);
715 }
716
717 int load(PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM)
718 {
719 return eeprom.load(pHlp, pSSM);
720 }
721
722 void save(PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM)
723 {
724 eeprom.save(pHlp, pSSM);
725 }
726#endif /* IN_RING3 */
727};
728
729
730#define E1K_SPEC_VLAN(s) (s & 0xFFF)
731#define E1K_SPEC_CFI(s) (!!((s>>12) & 0x1))
732#define E1K_SPEC_PRI(s) ((s>>13) & 0x7)
733
734struct E1kRxDStatus
735{
736 /** @name Descriptor Status field (3.2.3.1)
737 * @{ */
738 unsigned fDD : 1; /**< Descriptor Done. */
739 unsigned fEOP : 1; /**< End of packet. */
740 unsigned fIXSM : 1; /**< Ignore checksum indication. */
741 unsigned fVP : 1; /**< VLAN, matches VET. */
742 unsigned : 1;
743 unsigned fTCPCS : 1; /**< RCP Checksum calculated on the packet. */
744 unsigned fIPCS : 1; /**< IP Checksum calculated on the packet. */
745 unsigned fPIF : 1; /**< Passed in-exact filter */
746 /** @} */
747 /** @name Descriptor Errors field (3.2.3.2)
748 * (Only valid when fEOP and fDD are set.)
749 * @{ */
750 unsigned fCE : 1; /**< CRC or alignment error. */
751 unsigned : 4; /**< Reserved, varies with different models... */
752 unsigned fTCPE : 1; /**< TCP/UDP checksum error. */
753 unsigned fIPE : 1; /**< IP Checksum error. */
754 unsigned fRXE : 1; /**< RX Data error. */
755 /** @} */
756 /** @name Descriptor Special field (3.2.3.3)
757 * @{ */
758 unsigned u16Special : 16; /**< VLAN: Id, Canonical form, Priority. */
759 /** @} */
760};
761typedef struct E1kRxDStatus E1KRXDST;
762
763struct E1kRxDesc_st
764{
765 uint64_t u64BufAddr; /**< Address of data buffer */
766 uint16_t u16Length; /**< Length of data in buffer */
767 uint16_t u16Checksum; /**< Packet checksum */
768 E1KRXDST status;
769};
770typedef struct E1kRxDesc_st E1KRXDESC;
771AssertCompileSize(E1KRXDESC, 16);
772
773#define E1K_DTYP_LEGACY -1
774#define E1K_DTYP_CONTEXT 0
775#define E1K_DTYP_DATA 1
776
777struct E1kTDLegacy
778{
779 uint64_t u64BufAddr; /**< Address of data buffer */
780 struct TDLCmd_st
781 {
782 unsigned u16Length : 16;
783 unsigned u8CSO : 8;
784 /* CMD field : 8 */
785 unsigned fEOP : 1;
786 unsigned fIFCS : 1;
787 unsigned fIC : 1;
788 unsigned fRS : 1;
789 unsigned fRPS : 1;
790 unsigned fDEXT : 1;
791 unsigned fVLE : 1;
792 unsigned fIDE : 1;
793 } cmd;
794 struct TDLDw3_st
795 {
796 /* STA field */
797 unsigned fDD : 1;
798 unsigned fEC : 1;
799 unsigned fLC : 1;
800 unsigned fTURSV : 1;
801 /* RSV field */
802 unsigned u4RSV : 4;
803 /* CSS field */
804 unsigned u8CSS : 8;
805 /* Special field*/
806 unsigned u16Special: 16;
807 } dw3;
808};
809
810/**
811 * TCP/IP Context Transmit Descriptor, section 3.3.6.
812 */
813struct E1kTDContext
814{
815 struct CheckSum_st
816 {
817 /** TSE: Header start. !TSE: Checksum start. */
818 unsigned u8CSS : 8;
819 /** Checksum offset - where to store it. */
820 unsigned u8CSO : 8;
821 /** Checksum ending (inclusive) offset, 0 = end of packet. */
822 unsigned u16CSE : 16;
823 } ip;
824 struct CheckSum_st tu;
825 struct TDCDw2_st
826 {
827 /** TSE: The total number of payload bytes for this context. Sans header. */
828 unsigned u20PAYLEN : 20;
829 /** The descriptor type - E1K_DTYP_CONTEXT (0). */
830 unsigned u4DTYP : 4;
831 /** TUCMD field, 8 bits
832 * @{ */
833 /** TSE: TCP (set) or UDP (clear). */
834 unsigned fTCP : 1;
835 /** TSE: IPv4 (set) or IPv6 (clear) - for finding the payload length field in
836 * the IP header. Does not affect the checksumming.
837 * @remarks 82544GC/EI interprets a cleared field differently. */
838 unsigned fIP : 1;
839 /** TSE: TCP segmentation enable. When clear the context describes */
840 unsigned fTSE : 1;
841 /** Report status (only applies to dw3.fDD for here). */
842 unsigned fRS : 1;
843 /** Reserved, MBZ. */
844 unsigned fRSV1 : 1;
845 /** Descriptor extension, must be set for this descriptor type. */
846 unsigned fDEXT : 1;
847 /** Reserved, MBZ. */
848 unsigned fRSV2 : 1;
849 /** Interrupt delay enable. */
850 unsigned fIDE : 1;
851 /** @} */
852 } dw2;
853 struct TDCDw3_st
854 {
855 /** Descriptor Done. */
856 unsigned fDD : 1;
857 /** Reserved, MBZ. */
858 unsigned u7RSV : 7;
859 /** TSO: The header (prototype) length (Ethernet[, VLAN tag], IP, TCP/UDP. */
860 unsigned u8HDRLEN : 8;
861 /** TSO: Maximum segment size. */
862 unsigned u16MSS : 16;
863 } dw3;
864};
865typedef struct E1kTDContext E1KTXCTX;
866
867/**
868 * TCP/IP Data Transmit Descriptor, section 3.3.7.
869 */
870struct E1kTDData
871{
872 uint64_t u64BufAddr; /**< Address of data buffer */
873 struct TDDCmd_st
874 {
875 /** The total length of data pointed to by this descriptor. */
876 unsigned u20DTALEN : 20;
877 /** The descriptor type - E1K_DTYP_DATA (1). */
878 unsigned u4DTYP : 4;
879 /** @name DCMD field, 8 bits (3.3.7.1).
880 * @{ */
881 /** End of packet. Note TSCTFC update. */
882 unsigned fEOP : 1;
883 /** Insert Ethernet FCS/CRC (requires fEOP to be set). */
884 unsigned fIFCS : 1;
885 /** Use the TSE context when set and the normal when clear. */
886 unsigned fTSE : 1;
887 /** Report status (dw3.STA). */
888 unsigned fRS : 1;
889 /** Reserved. 82544GC/EI defines this report packet set (RPS). */
890 unsigned fRPS : 1;
891 /** Descriptor extension, must be set for this descriptor type. */
892 unsigned fDEXT : 1;
893 /** VLAN enable, requires CTRL.VME, auto enables FCS/CRC.
894 * Insert dw3.SPECIAL after ethernet header. */
895 unsigned fVLE : 1;
896 /** Interrupt delay enable. */
897 unsigned fIDE : 1;
898 /** @} */
899 } cmd;
900 struct TDDDw3_st
901 {
902 /** @name STA field (3.3.7.2)
903 * @{ */
904 unsigned fDD : 1; /**< Descriptor done. */
905 unsigned fEC : 1; /**< Excess collision. */
906 unsigned fLC : 1; /**< Late collision. */
907 /** Reserved, except for the usual oddball (82544GC/EI) where it's called TU. */
908 unsigned fTURSV : 1;
909 /** @} */
910 unsigned u4RSV : 4; /**< Reserved field, MBZ. */
911 /** @name POPTS (Packet Option) field (3.3.7.3)
912 * @{ */
913 unsigned fIXSM : 1; /**< Insert IP checksum. */
914 unsigned fTXSM : 1; /**< Insert TCP/UDP checksum. */
915 unsigned u6RSV : 6; /**< Reserved, MBZ. */
916 /** @} */
917 /** @name SPECIAL field - VLAN tag to be inserted after ethernet header.
918 * Requires fEOP, fVLE and CTRL.VME to be set.
919 * @{ */
920 unsigned u16Special: 16; /**< VLAN: Id, Canonical form, Priority. */
921 /** @} */
922 } dw3;
923};
924typedef struct E1kTDData E1KTXDAT;
925
926union E1kTxDesc
927{
928 struct E1kTDLegacy legacy;
929 struct E1kTDContext context;
930 struct E1kTDData data;
931};
932typedef union E1kTxDesc E1KTXDESC;
933AssertCompileSize(E1KTXDESC, 16);
934
935#define RA_CTL_AS 0x0003
936#define RA_CTL_AV 0x8000
937
938union E1kRecAddr
939{
940 uint32_t au32[32];
941 struct RAArray
942 {
943 uint8_t addr[6];
944 uint16_t ctl;
945 } array[16];
946};
947typedef struct E1kRecAddr::RAArray E1KRAELEM;
948typedef union E1kRecAddr E1KRA;
949AssertCompileSize(E1KRA, 8*16);
950
951#define E1K_IP_RF UINT16_C(0x8000) /**< reserved fragment flag */
952#define E1K_IP_DF UINT16_C(0x4000) /**< dont fragment flag */
953#define E1K_IP_MF UINT16_C(0x2000) /**< more fragments flag */
954#define E1K_IP_OFFMASK UINT16_C(0x1fff) /**< mask for fragmenting bits */
955
956/** @todo use+extend RTNETIPV4 */
957struct E1kIpHeader
958{
959 /* type of service / version / header length */
960 uint16_t tos_ver_hl;
961 /* total length */
962 uint16_t total_len;
963 /* identification */
964 uint16_t ident;
965 /* fragment offset field */
966 uint16_t offset;
967 /* time to live / protocol*/
968 uint16_t ttl_proto;
969 /* checksum */
970 uint16_t chksum;
971 /* source IP address */
972 uint32_t src;
973 /* destination IP address */
974 uint32_t dest;
975};
976AssertCompileSize(struct E1kIpHeader, 20);
977
978#define E1K_TCP_FIN UINT16_C(0x01)
979#define E1K_TCP_SYN UINT16_C(0x02)
980#define E1K_TCP_RST UINT16_C(0x04)
981#define E1K_TCP_PSH UINT16_C(0x08)
982#define E1K_TCP_ACK UINT16_C(0x10)
983#define E1K_TCP_URG UINT16_C(0x20)
984#define E1K_TCP_ECE UINT16_C(0x40)
985#define E1K_TCP_CWR UINT16_C(0x80)
986#define E1K_TCP_FLAGS UINT16_C(0x3f)
987
988/** @todo use+extend RTNETTCP */
989struct E1kTcpHeader
990{
991 uint16_t src;
992 uint16_t dest;
993 uint32_t seqno;
994 uint32_t ackno;
995 uint16_t hdrlen_flags;
996 uint16_t wnd;
997 uint16_t chksum;
998 uint16_t urgp;
999};
1000AssertCompileSize(struct E1kTcpHeader, 20);
1001
1002
1003#ifdef E1K_WITH_TXD_CACHE
1004/** The current Saved state version. */
1005# define E1K_SAVEDSTATE_VERSION 4
1006/** Saved state version for VirtualBox 4.2 with VLAN tag fields. */
1007# define E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG 3
1008#else /* !E1K_WITH_TXD_CACHE */
1009/** The current Saved state version. */
1010# define E1K_SAVEDSTATE_VERSION 3
1011#endif /* !E1K_WITH_TXD_CACHE */
1012/** Saved state version for VirtualBox 4.1 and earlier.
1013 * These did not include VLAN tag fields. */
1014#define E1K_SAVEDSTATE_VERSION_VBOX_41 2
1015/** Saved state version for VirtualBox 3.0 and earlier.
1016 * This did not include the configuration part nor the E1kEEPROM. */
1017#define E1K_SAVEDSTATE_VERSION_VBOX_30 1
1018
1019/**
1020 * E1000 shared device state.
1021 *
1022 * This is shared between ring-0 and ring-3.
1023 */
1024typedef struct E1KSTATE
1025{
1026 char szPrf[8]; /**< Log prefix, e.g. E1000#1. */
1027
1028 /** Handle to PCI region \#0, the MMIO region. */
1029 IOMIOPORTHANDLE hMmioRegion;
1030 /** Handle to PCI region \#2, the I/O ports. */
1031 IOMIOPORTHANDLE hIoPorts;
1032
1033 /** Receive Interrupt Delay Timer. */
1034 TMTIMERHANDLE hRIDTimer;
1035 /** Receive Absolute Delay Timer. */
1036 TMTIMERHANDLE hRADTimer;
1037 /** Transmit Interrupt Delay Timer. */
1038 TMTIMERHANDLE hTIDTimer;
1039 /** Transmit Absolute Delay Timer. */
1040 TMTIMERHANDLE hTADTimer;
1041 /** Transmit Delay Timer. */
1042 TMTIMERHANDLE hTXDTimer;
1043 /** Late Interrupt Timer. */
1044 TMTIMERHANDLE hIntTimer;
1045 /** Link Up(/Restore) Timer. */
1046 TMTIMERHANDLE hLUTimer;
1047
1048 /** Transmit task. */
1049 PDMTASKHANDLE hTxTask;
1050
1051 /** Critical section - what is it protecting? */
1052 PDMCRITSECT cs;
1053 /** RX Critical section. */
1054 PDMCRITSECT csRx;
1055#ifdef E1K_WITH_TX_CS
1056 /** TX Critical section. */
1057 PDMCRITSECT csTx;
1058#endif /* E1K_WITH_TX_CS */
1059 /** MAC address obtained from the configuration. */
1060 RTMAC macConfigured;
1061 uint16_t u16Padding0;
1062 /** EMT: Last time the interrupt was acknowledged. */
1063 uint64_t u64AckedAt;
1064 /** All: Used for eliminating spurious interrupts. */
1065 bool fIntRaised;
1066 /** EMT: false if the cable is disconnected by the GUI. */
1067 bool fCableConnected;
1068 /** EMT: Compute Ethernet CRC for RX packets. */
1069 bool fEthernetCRC;
1070 /** All: throttle interrupts. */
1071 bool fItrEnabled;
1072 /** All: throttle RX interrupts. */
1073 bool fItrRxEnabled;
1074 /** All: Delay TX interrupts using TIDV/TADV. */
1075 bool fTidEnabled;
1076 bool afPadding[2];
1077 /** Link up delay (in milliseconds). */
1078 uint32_t cMsLinkUpDelay;
1079
1080 /** All: Device register storage. */
1081 uint32_t auRegs[E1K_NUM_OF_32BIT_REGS];
1082 /** TX/RX: Status LED. */
1083 PDMLED led;
1084 /** TX/RX: Number of packet being sent/received to show in debug log. */
1085 uint32_t u32PktNo;
1086
1087 /** EMT: Offset of the register to be read via IO. */
1088 uint32_t uSelectedReg;
1089 /** EMT: Multicast Table Array. */
1090 uint32_t auMTA[128];
1091 /** EMT: Receive Address registers. */
1092 E1KRA aRecAddr;
1093 /** EMT: VLAN filter table array. */
1094 uint32_t auVFTA[128];
1095 /** EMT: Receive buffer size. */
1096 uint16_t u16RxBSize;
1097 /** EMT: Locked state -- no state alteration possible. */
1098 bool fLocked;
1099 /** EMT: */
1100 bool fDelayInts;
1101 /** All: */
1102 bool fIntMaskUsed;
1103
1104 /** N/A: */
1105 bool volatile fMaybeOutOfSpace;
1106 /** EMT: Gets signalled when more RX descriptors become available. */
1107 SUPSEMEVENT hEventMoreRxDescAvail;
1108#ifdef E1K_WITH_RXD_CACHE
1109 /** RX: Fetched RX descriptors. */
1110 E1KRXDESC aRxDescriptors[E1K_RXD_CACHE_SIZE];
1111 //uint64_t aRxDescAddr[E1K_RXD_CACHE_SIZE];
1112 /** RX: Actual number of fetched RX descriptors. */
1113 uint32_t nRxDFetched;
1114 /** RX: Index in cache of RX descriptor being processed. */
1115 uint32_t iRxDCurrent;
1116#endif /* E1K_WITH_RXD_CACHE */
1117
1118 /** TX: Context used for TCP segmentation packets. */
1119 E1KTXCTX contextTSE;
1120 /** TX: Context used for ordinary packets. */
1121 E1KTXCTX contextNormal;
1122#ifdef E1K_WITH_TXD_CACHE
1123 /** TX: Fetched TX descriptors. */
1124 E1KTXDESC aTxDescriptors[E1K_TXD_CACHE_SIZE];
1125 /** TX: Validity of TX descriptors. Set by e1kLocateTxPacket, used by e1kXmitPacket. */
1126 bool afTxDValid[E1K_TXD_CACHE_SIZE];
1127 /** TX: Actual number of fetched TX descriptors. */
1128 uint8_t nTxDFetched;
1129 /** TX: Index in cache of TX descriptor being processed. */
1130 uint8_t iTxDCurrent;
1131 /** TX: Will this frame be sent as GSO. */
1132 bool fGSO;
1133 /** Alignment padding. */
1134 bool fReserved;
1135 /** TX: Number of bytes in next packet. */
1136 uint32_t cbTxAlloc;
1137
1138#endif /* E1K_WITH_TXD_CACHE */
1139 /** GSO context. u8Type is set to PDMNETWORKGSOTYPE_INVALID when not
1140 * applicable to the current TSE mode. */
1141 PDMNETWORKGSO GsoCtx;
1142 /** Scratch space for holding the loopback / fallback scatter / gather
1143 * descriptor. */
1144 union
1145 {
1146 PDMSCATTERGATHER Sg;
1147 uint8_t padding[8 * sizeof(RTUINTPTR)];
1148 } uTxFallback;
1149 /** TX: Transmit packet buffer use for TSE fallback and loopback. */
1150 uint8_t aTxPacketFallback[E1K_MAX_TX_PKT_SIZE];
1151 /** TX: Number of bytes assembled in TX packet buffer. */
1152 uint16_t u16TxPktLen;
1153 /** TX: False will force segmentation in e1000 instead of sending frames as GSO. */
1154 bool fGSOEnabled;
1155 /** TX: IP checksum has to be inserted if true. */
1156 bool fIPcsum;
1157 /** TX: TCP/UDP checksum has to be inserted if true. */
1158 bool fTCPcsum;
1159 /** TX: VLAN tag has to be inserted if true. */
1160 bool fVTag;
1161 /** TX: TCI part of VLAN tag to be inserted. */
1162 uint16_t u16VTagTCI;
1163 /** TX TSE fallback: Number of payload bytes remaining in TSE context. */
1164 uint32_t u32PayRemain;
1165 /** TX TSE fallback: Number of header bytes remaining in TSE context. */
1166 uint16_t u16HdrRemain;
1167 /** TX TSE fallback: Flags from template header. */
1168 uint16_t u16SavedFlags;
1169 /** TX TSE fallback: Partial checksum from template header. */
1170 uint32_t u32SavedCsum;
1171 /** ?: Emulated controller type. */
1172 E1KCHIP eChip;
1173
1174 /** EMT: Physical interface emulation. */
1175 PHY phy;
1176
1177#if 0
1178 /** Alignment padding. */
1179 uint8_t Alignment[HC_ARCH_BITS == 64 ? 8 : 4];
1180#endif
1181
1182 STAMCOUNTER StatReceiveBytes;
1183 STAMCOUNTER StatTransmitBytes;
1184#if defined(VBOX_WITH_STATISTICS)
1185 STAMPROFILEADV StatMMIOReadRZ;
1186 STAMPROFILEADV StatMMIOReadR3;
1187 STAMPROFILEADV StatMMIOWriteRZ;
1188 STAMPROFILEADV StatMMIOWriteR3;
1189 STAMPROFILEADV StatEEPROMRead;
1190 STAMPROFILEADV StatEEPROMWrite;
1191 STAMPROFILEADV StatIOReadRZ;
1192 STAMPROFILEADV StatIOReadR3;
1193 STAMPROFILEADV StatIOWriteRZ;
1194 STAMPROFILEADV StatIOWriteR3;
1195 STAMPROFILEADV StatLateIntTimer;
1196 STAMCOUNTER StatLateInts;
1197 STAMCOUNTER StatIntsRaised;
1198 STAMCOUNTER StatIntsPrevented;
1199 STAMPROFILEADV StatReceive;
1200 STAMPROFILEADV StatReceiveCRC;
1201 STAMPROFILEADV StatReceiveFilter;
1202 STAMPROFILEADV StatReceiveStore;
1203 STAMPROFILEADV StatTransmitRZ;
1204 STAMPROFILEADV StatTransmitR3;
1205 STAMPROFILE StatTransmitSendRZ;
1206 STAMPROFILE StatTransmitSendR3;
1207 STAMPROFILE StatRxOverflow;
1208 STAMCOUNTER StatRxOverflowWakeupRZ;
1209 STAMCOUNTER StatRxOverflowWakeupR3;
1210 STAMCOUNTER StatTxDescCtxNormal;
1211 STAMCOUNTER StatTxDescCtxTSE;
1212 STAMCOUNTER StatTxDescLegacy;
1213 STAMCOUNTER StatTxDescData;
1214 STAMCOUNTER StatTxDescTSEData;
1215 STAMCOUNTER StatTxPathFallback;
1216 STAMCOUNTER StatTxPathGSO;
1217 STAMCOUNTER StatTxPathRegular;
1218 STAMCOUNTER StatPHYAccesses;
1219 STAMCOUNTER aStatRegWrites[E1K_NUM_OF_REGS];
1220 STAMCOUNTER aStatRegReads[E1K_NUM_OF_REGS];
1221#endif /* VBOX_WITH_STATISTICS */
1222
1223#ifdef E1K_INT_STATS
1224 /* Internal stats */
1225 uint64_t u64ArmedAt;
1226 uint64_t uStatMaxTxDelay;
1227 uint32_t uStatInt;
1228 uint32_t uStatIntTry;
1229 uint32_t uStatIntLower;
1230 uint32_t uStatNoIntICR;
1231 int32_t iStatIntLost;
1232 int32_t iStatIntLostOne;
1233 uint32_t uStatIntIMS;
1234 uint32_t uStatIntSkip;
1235 uint32_t uStatIntLate;
1236 uint32_t uStatIntMasked;
1237 uint32_t uStatIntEarly;
1238 uint32_t uStatIntRx;
1239 uint32_t uStatIntTx;
1240 uint32_t uStatIntICS;
1241 uint32_t uStatIntRDTR;
1242 uint32_t uStatIntRXDMT0;
1243 uint32_t uStatIntTXQE;
1244 uint32_t uStatTxNoRS;
1245 uint32_t uStatTxIDE;
1246 uint32_t uStatTxDelayed;
1247 uint32_t uStatTxDelayExp;
1248 uint32_t uStatTAD;
1249 uint32_t uStatTID;
1250 uint32_t uStatRAD;
1251 uint32_t uStatRID;
1252 uint32_t uStatRxFrm;
1253 uint32_t uStatTxFrm;
1254 uint32_t uStatDescCtx;
1255 uint32_t uStatDescDat;
1256 uint32_t uStatDescLeg;
1257 uint32_t uStatTx1514;
1258 uint32_t uStatTx2962;
1259 uint32_t uStatTx4410;
1260 uint32_t uStatTx5858;
1261 uint32_t uStatTx7306;
1262 uint32_t uStatTx8754;
1263 uint32_t uStatTx16384;
1264 uint32_t uStatTx32768;
1265 uint32_t uStatTxLarge;
1266 uint32_t uStatAlign;
1267#endif /* E1K_INT_STATS */
1268} E1KSTATE;
1269/** Pointer to the E1000 device state. */
1270typedef E1KSTATE *PE1KSTATE;
1271
1272/**
1273 * E1000 ring-3 device state
1274 *
1275 * @implements PDMINETWORKDOWN
1276 * @implements PDMINETWORKCONFIG
1277 * @implements PDMILEDPORTS
1278 */
1279typedef struct E1KSTATER3
1280{
1281 PDMIBASE IBase;
1282 PDMINETWORKDOWN INetworkDown;
1283 PDMINETWORKCONFIG INetworkConfig;
1284 /** LED interface */
1285 PDMILEDPORTS ILeds;
1286 /** Attached network driver. */
1287 R3PTRTYPE(PPDMIBASE) pDrvBase;
1288 R3PTRTYPE(PPDMILEDCONNECTORS) pLedsConnector;
1289
1290 /** Pointer to the shared state. */
1291 R3PTRTYPE(PE1KSTATE) pShared;
1292
1293 /** Device instance. */
1294 PPDMDEVINSR3 pDevInsR3;
1295 /** Attached network driver. */
1296 PPDMINETWORKUPR3 pDrvR3;
1297 /** The scatter / gather buffer used for the current outgoing packet. */
1298 R3PTRTYPE(PPDMSCATTERGATHER) pTxSgR3;
1299
1300 /** EMT: EEPROM emulation */
1301 E1kEEPROM eeprom;
1302} E1KSTATER3;
1303/** Pointer to the E1000 ring-3 device state. */
1304typedef E1KSTATER3 *PE1KSTATER3;
1305
1306
1307/**
1308 * E1000 ring-0 device state
1309 */
1310typedef struct E1KSTATER0
1311{
1312 /** Device instance. */
1313 PPDMDEVINSR0 pDevInsR0;
1314 /** Attached network driver. */
1315 PPDMINETWORKUPR0 pDrvR0;
1316 /** The scatter / gather buffer used for the current outgoing packet - R0. */
1317 R0PTRTYPE(PPDMSCATTERGATHER) pTxSgR0;
1318} E1KSTATER0;
1319/** Pointer to the E1000 ring-0 device state. */
1320typedef E1KSTATER0 *PE1KSTATER0;
1321
1322
1323/**
1324 * E1000 raw-mode device state
1325 */
1326typedef struct E1KSTATERC
1327{
1328 /** Device instance. */
1329 PPDMDEVINSRC pDevInsRC;
1330 /** Attached network driver. */
1331 PPDMINETWORKUPRC pDrvRC;
1332 /** The scatter / gather buffer used for the current outgoing packet. */
1333 RCPTRTYPE(PPDMSCATTERGATHER) pTxSgRC;
1334} E1KSTATERC;
1335/** Pointer to the E1000 raw-mode device state. */
1336typedef E1KSTATERC *PE1KSTATERC;
1337
1338
1339/** @def PE1KSTATECC
1340 * Pointer to the instance data for the current context. */
1341#ifdef IN_RING3
1342typedef E1KSTATER3 E1KSTATECC;
1343typedef PE1KSTATER3 PE1KSTATECC;
1344#elif defined(IN_RING0)
1345typedef E1KSTATER0 E1KSTATECC;
1346typedef PE1KSTATER0 PE1KSTATECC;
1347#elif defined(IN_RC)
1348typedef E1KSTATERC E1KSTATECC;
1349typedef PE1KSTATERC PE1KSTATECC;
1350#else
1351# error "Not IN_RING3, IN_RING0 or IN_RC"
1352#endif
1353
1354
1355#ifndef VBOX_DEVICE_STRUCT_TESTCASE
1356
1357/* Forward declarations ******************************************************/
1358static int e1kXmitPending(PPDMDEVINS pDevIns, PE1KSTATE pThis, bool fOnWorkerThread);
1359
1360/**
1361 * E1000 register read handler.
1362 */
1363typedef int (FNE1KREGREAD)(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1364/**
1365 * E1000 register write handler.
1366 */
1367typedef int (FNE1KREGWRITE)(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1368
1369static FNE1KREGREAD e1kRegReadUnimplemented;
1370static FNE1KREGWRITE e1kRegWriteUnimplemented;
1371static FNE1KREGREAD e1kRegReadAutoClear;
1372static FNE1KREGREAD e1kRegReadDefault;
1373static FNE1KREGWRITE e1kRegWriteDefault;
1374#if 0 /* unused */
1375static FNE1KREGREAD e1kRegReadCTRL;
1376#endif
1377static FNE1KREGWRITE e1kRegWriteCTRL;
1378static FNE1KREGREAD e1kRegReadEECD;
1379static FNE1KREGWRITE e1kRegWriteEECD;
1380static FNE1KREGWRITE e1kRegWriteEERD;
1381static FNE1KREGWRITE e1kRegWriteMDIC;
1382static FNE1KREGREAD e1kRegReadICR;
1383static FNE1KREGWRITE e1kRegWriteICR;
1384static FNE1KREGREAD e1kRegReadICS;
1385static FNE1KREGWRITE e1kRegWriteICS;
1386static FNE1KREGWRITE e1kRegWriteIMS;
1387static FNE1KREGWRITE e1kRegWriteIMC;
1388static FNE1KREGWRITE e1kRegWriteRCTL;
1389static FNE1KREGWRITE e1kRegWritePBA;
1390static FNE1KREGWRITE e1kRegWriteRDT;
1391static FNE1KREGWRITE e1kRegWriteRDTR;
1392static FNE1KREGWRITE e1kRegWriteTDT;
1393static FNE1KREGREAD e1kRegReadMTA;
1394static FNE1KREGWRITE e1kRegWriteMTA;
1395static FNE1KREGREAD e1kRegReadRA;
1396static FNE1KREGWRITE e1kRegWriteRA;
1397static FNE1KREGREAD e1kRegReadVFTA;
1398static FNE1KREGWRITE e1kRegWriteVFTA;
1399
1400/**
1401 * Register map table.
1402 *
1403 * Override pfnRead and pfnWrite to get register-specific behavior.
1404 */
1405static const struct E1kRegMap_st
1406{
1407 /** Register offset in the register space. */
1408 uint32_t offset;
1409 /** Size in bytes. Registers of size > 4 are in fact tables. */
1410 uint32_t size;
1411 /** Readable bits. */
1412 uint32_t readable;
1413 /** Writable bits. */
1414 uint32_t writable;
1415 /** Read callback. */
1416 FNE1KREGREAD *pfnRead;
1417 /** Write callback. */
1418 FNE1KREGWRITE *pfnWrite;
1419 /** Abbreviated name. */
1420 const char *abbrev;
1421 /** Full name. */
1422 const char *name;
1423} g_aE1kRegMap[E1K_NUM_OF_REGS] =
1424{
1425 /* offset size read mask write mask read callback write callback abbrev full name */
1426 /*------- ------- ---------- ---------- ----------------------- ------------------------ ---------- ------------------------------*/
1427 { 0x00000, 0x00004, 0xDBF31BE9, 0xDBF31BE9, e1kRegReadDefault , e1kRegWriteCTRL , "CTRL" , "Device Control" },
1428 { 0x00008, 0x00004, 0x0000FDFF, 0x00000000, e1kRegReadDefault , e1kRegWriteUnimplemented, "STATUS" , "Device Status" },
1429 { 0x00010, 0x00004, 0x000027F0, 0x00000070, e1kRegReadEECD , e1kRegWriteEECD , "EECD" , "EEPROM/Flash Control/Data" },
1430 { 0x00014, 0x00004, 0xFFFFFF10, 0xFFFFFF00, e1kRegReadDefault , e1kRegWriteEERD , "EERD" , "EEPROM Read" },
1431 { 0x00018, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CTRL_EXT", "Extended Device Control" },
1432 { 0x0001c, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FLA" , "Flash Access (N/A)" },
1433 { 0x00020, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteMDIC , "MDIC" , "MDI Control" },
1434 { 0x00028, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCAL" , "Flow Control Address Low" },
1435 { 0x0002c, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCAH" , "Flow Control Address High" },
1436 { 0x00030, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCT" , "Flow Control Type" },
1437 { 0x00038, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "VET" , "VLAN EtherType" },
1438 { 0x000c0, 0x00004, 0x0001F6DF, 0x0001F6DF, e1kRegReadICR , e1kRegWriteICR , "ICR" , "Interrupt Cause Read" },
1439 { 0x000c4, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "ITR" , "Interrupt Throttling" },
1440 { 0x000c8, 0x00004, 0x0001F6DF, 0xFFFFFFFF, e1kRegReadICS , e1kRegWriteICS , "ICS" , "Interrupt Cause Set" },
1441 { 0x000d0, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteIMS , "IMS" , "Interrupt Mask Set/Read" },
1442 { 0x000d8, 0x00004, 0x00000000, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteIMC , "IMC" , "Interrupt Mask Clear" },
1443 { 0x00100, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteRCTL , "RCTL" , "Receive Control" },
1444 { 0x00170, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCTTV" , "Flow Control Transmit Timer Value" },
1445 { 0x00178, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TXCW" , "Transmit Configuration Word (N/A)" },
1446 { 0x00180, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXCW" , "Receive Configuration Word (N/A)" },
1447 { 0x00400, 0x00004, 0x017FFFFA, 0x017FFFFA, e1kRegReadDefault , e1kRegWriteDefault , "TCTL" , "Transmit Control" },
1448 { 0x00410, 0x00004, 0x3FFFFFFF, 0x3FFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TIPG" , "Transmit IPG" },
1449 { 0x00458, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "AIFS" , "Adaptive IFS Throttle - AIT" },
1450 { 0x00e00, 0x00004, 0xCFCFCFCF, 0xCFCFCFCF, e1kRegReadDefault , e1kRegWriteDefault , "LEDCTL" , "LED Control" },
1451 { 0x01000, 0x00004, 0xFFFF007F, 0x0000007F, e1kRegReadDefault , e1kRegWritePBA , "PBA" , "Packet Buffer Allocation" },
1452 { 0x02160, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRTL" , "Flow Control Receive Threshold Low" },
1453 { 0x02168, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRTH" , "Flow Control Receive Threshold High" },
1454 { 0x02410, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFH" , "Receive Data FIFO Head" },
1455 { 0x02418, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFT" , "Receive Data FIFO Tail" },
1456 { 0x02420, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFHS" , "Receive Data FIFO Head Saved Register" },
1457 { 0x02428, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFTS" , "Receive Data FIFO Tail Saved Register" },
1458 { 0x02430, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFPC" , "Receive Data FIFO Packet Count" },
1459 { 0x02800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDBAL" , "Receive Descriptor Base Low" },
1460 { 0x02804, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDBAH" , "Receive Descriptor Base High" },
1461 { 0x02808, 0x00004, 0x000FFF80, 0x000FFF80, e1kRegReadDefault , e1kRegWriteDefault , "RDLEN" , "Receive Descriptor Length" },
1462 { 0x02810, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDH" , "Receive Descriptor Head" },
1463 { 0x02818, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteRDT , "RDT" , "Receive Descriptor Tail" },
1464 { 0x02820, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteRDTR , "RDTR" , "Receive Delay Timer" },
1465 { 0x02828, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXDCTL" , "Receive Descriptor Control" },
1466 { 0x0282c, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "RADV" , "Receive Interrupt Absolute Delay Timer" },
1467 { 0x02c00, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RSRPD" , "Receive Small Packet Detect Interrupt" },
1468 { 0x03000, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TXDMAC" , "TX DMA Control (N/A)" },
1469 { 0x03410, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFH" , "Transmit Data FIFO Head" },
1470 { 0x03418, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFT" , "Transmit Data FIFO Tail" },
1471 { 0x03420, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFHS" , "Transmit Data FIFO Head Saved Register" },
1472 { 0x03428, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFTS" , "Transmit Data FIFO Tail Saved Register" },
1473 { 0x03430, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFPC" , "Transmit Data FIFO Packet Count" },
1474 { 0x03800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDBAL" , "Transmit Descriptor Base Low" },
1475 { 0x03804, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDBAH" , "Transmit Descriptor Base High" },
1476 { 0x03808, 0x00004, 0x000FFF80, 0x000FFF80, e1kRegReadDefault , e1kRegWriteDefault , "TDLEN" , "Transmit Descriptor Length" },
1477 { 0x03810, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDH" , "Transmit Descriptor Head" },
1478 { 0x03818, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteTDT , "TDT" , "Transmit Descriptor Tail" },
1479 { 0x03820, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TIDV" , "Transmit Interrupt Delay Value" },
1480 { 0x03828, 0x00004, 0xFF3F3F3F, 0xFF3F3F3F, e1kRegReadDefault , e1kRegWriteDefault , "TXDCTL" , "Transmit Descriptor Control" },
1481 { 0x0382c, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TADV" , "Transmit Absolute Interrupt Delay Timer" },
1482 { 0x03830, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TSPMT" , "TCP Segmentation Pad and Threshold" },
1483 { 0x04000, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CRCERRS" , "CRC Error Count" },
1484 { 0x04004, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "ALGNERRC", "Alignment Error Count" },
1485 { 0x04008, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SYMERRS" , "Symbol Error Count" },
1486 { 0x0400c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXERRC" , "RX Error Count" },
1487 { 0x04010, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MPC" , "Missed Packets Count" },
1488 { 0x04014, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SCC" , "Single Collision Count" },
1489 { 0x04018, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "ECOL" , "Excessive Collisions Count" },
1490 { 0x0401c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MCC" , "Multiple Collision Count" },
1491 { 0x04020, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "LATECOL" , "Late Collisions Count" },
1492 { 0x04028, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "COLC" , "Collision Count" },
1493 { 0x04030, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "DC" , "Defer Count" },
1494 { 0x04034, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TNCRS" , "Transmit - No CRS" },
1495 { 0x04038, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SEC" , "Sequence Error Count" },
1496 { 0x0403c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CEXTERR" , "Carrier Extension Error Count" },
1497 { 0x04040, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RLEC" , "Receive Length Error Count" },
1498 { 0x04048, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XONRXC" , "XON Received Count" },
1499 { 0x0404c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XONTXC" , "XON Transmitted Count" },
1500 { 0x04050, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XOFFRXC" , "XOFF Received Count" },
1501 { 0x04054, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XOFFTXC" , "XOFF Transmitted Count" },
1502 { 0x04058, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRUC" , "FC Received Unsupported Count" },
1503 { 0x0405c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC64" , "Packets Received (64 Bytes) Count" },
1504 { 0x04060, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC127" , "Packets Received (65-127 Bytes) Count" },
1505 { 0x04064, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC255" , "Packets Received (128-255 Bytes) Count" },
1506 { 0x04068, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC511" , "Packets Received (256-511 Bytes) Count" },
1507 { 0x0406c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC1023" , "Packets Received (512-1023 Bytes) Count" },
1508 { 0x04070, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC1522" , "Packets Received (1024-Max Bytes)" },
1509 { 0x04074, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GPRC" , "Good Packets Received Count" },
1510 { 0x04078, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "BPRC" , "Broadcast Packets Received Count" },
1511 { 0x0407c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "MPRC" , "Multicast Packets Received Count" },
1512 { 0x04080, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GPTC" , "Good Packets Transmitted Count" },
1513 { 0x04088, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GORCL" , "Good Octets Received Count (Low)" },
1514 { 0x0408c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GORCH" , "Good Octets Received Count (Hi)" },
1515 { 0x04090, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GOTCL" , "Good Octets Transmitted Count (Low)" },
1516 { 0x04094, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GOTCH" , "Good Octets Transmitted Count (Hi)" },
1517 { 0x040a0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RNBC" , "Receive No Buffers Count" },
1518 { 0x040a4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RUC" , "Receive Undersize Count" },
1519 { 0x040a8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RFC" , "Receive Fragment Count" },
1520 { 0x040ac, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "ROC" , "Receive Oversize Count" },
1521 { 0x040b0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RJC" , "Receive Jabber Count" },
1522 { 0x040b4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPRC" , "Management Packets Received Count" },
1523 { 0x040b8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPDC" , "Management Packets Dropped Count" },
1524 { 0x040bc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPTC" , "Management Pkts Transmitted Count" },
1525 { 0x040c0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TORL" , "Total Octets Received (Lo)" },
1526 { 0x040c4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TORH" , "Total Octets Received (Hi)" },
1527 { 0x040c8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TOTL" , "Total Octets Transmitted (Lo)" },
1528 { 0x040cc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TOTH" , "Total Octets Transmitted (Hi)" },
1529 { 0x040d0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TPR" , "Total Packets Received" },
1530 { 0x040d4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TPT" , "Total Packets Transmitted" },
1531 { 0x040d8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC64" , "Packets Transmitted (64 Bytes) Count" },
1532 { 0x040dc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC127" , "Packets Transmitted (65-127 Bytes) Count" },
1533 { 0x040e0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC255" , "Packets Transmitted (128-255 Bytes) Count" },
1534 { 0x040e4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC511" , "Packets Transmitted (256-511 Bytes) Count" },
1535 { 0x040e8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC1023" , "Packets Transmitted (512-1023 Bytes) Count" },
1536 { 0x040ec, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC1522" , "Packets Transmitted (1024 Bytes or Greater) Count" },
1537 { 0x040f0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "MPTC" , "Multicast Packets Transmitted Count" },
1538 { 0x040f4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "BPTC" , "Broadcast Packets Transmitted Count" },
1539 { 0x040f8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TSCTC" , "TCP Segmentation Context Transmitted Count" },
1540 { 0x040fc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TSCTFC" , "TCP Segmentation Context Tx Fail Count" },
1541 { 0x05000, 0x00004, 0x000007FF, 0x000007FF, e1kRegReadDefault , e1kRegWriteDefault , "RXCSUM" , "Receive Checksum Control" },
1542 { 0x05800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUC" , "Wakeup Control" },
1543 { 0x05808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUFC" , "Wakeup Filter Control" },
1544 { 0x05810, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUS" , "Wakeup Status" },
1545 { 0x05820, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "MANC" , "Management Control" },
1546 { 0x05838, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IPAV" , "IP Address Valid" },
1547 { 0x05900, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUPL" , "Wakeup Packet Length" },
1548 { 0x05200, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadMTA , e1kRegWriteMTA , "MTA" , "Multicast Table Array (n)" },
1549 { 0x05400, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadRA , e1kRegWriteRA , "RA" , "Receive Address (64-bit) (n)" },
1550 { 0x05600, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadVFTA , e1kRegWriteVFTA , "VFTA" , "VLAN Filter Table Array (n)" },
1551 { 0x05840, 0x0001c, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IP4AT" , "IPv4 Address Table" },
1552 { 0x05880, 0x00010, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IP6AT" , "IPv6 Address Table" },
1553 { 0x05a00, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUPM" , "Wakeup Packet Memory" },
1554 { 0x05f00, 0x0001c, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFLT" , "Flexible Filter Length Table" },
1555 { 0x09000, 0x003fc, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFMT" , "Flexible Filter Mask Table" },
1556 { 0x09800, 0x003fc, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFVT" , "Flexible Filter Value Table" },
1557 { 0x10000, 0x10000, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "PBM" , "Packet Buffer Memory (n)" },
1558 { 0x00040, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadRA , e1kRegWriteRA , "RA82542" , "Receive Address (64-bit) (n) (82542)" },
1559 { 0x00200, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadMTA , e1kRegWriteMTA , "MTA82542", "Multicast Table Array (n) (82542)" },
1560 { 0x00600, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadVFTA , e1kRegWriteVFTA , "VFTA82542", "VLAN Filter Table Array (n) (82542)" }
1561};
1562
1563#ifdef LOG_ENABLED
1564
1565/**
1566 * Convert U32 value to hex string. Masked bytes are replaced with dots.
1567 *
1568 * @remarks The mask has half-byte byte (not bit) granularity (e.g. 0000000F).
1569 *
1570 * @returns The buffer.
1571 *
1572 * @param u32 The word to convert into string.
1573 * @param mask Selects which bytes to convert.
1574 * @param buf Where to put the result.
1575 */
1576static char *e1kU32toHex(uint32_t u32, uint32_t mask, char *buf)
1577{
1578 for (char *ptr = buf + 7; ptr >= buf; --ptr, u32 >>=4, mask >>=4)
1579 {
1580 if (mask & 0xF)
1581 *ptr = (u32 & 0xF) + ((u32 & 0xF) > 9 ? '7' : '0');
1582 else
1583 *ptr = '.';
1584 }
1585 buf[8] = 0;
1586 return buf;
1587}
1588
1589/**
1590 * Returns timer name for debug purposes.
1591 *
1592 * @returns The timer name.
1593 *
1594 * @param pThis The device state structure.
1595 * @param hTimer The timer to name.
1596 */
1597DECLINLINE(const char *) e1kGetTimerName(PE1KSTATE pThis, TMTIMERHANDLE hTimer)
1598{
1599 if (hTimer == pThis->hTIDTimer)
1600 return "TID";
1601 if (hTimer == pThis->hTADTimer)
1602 return "TAD";
1603 if (hTimer == pThis->hRIDTimer)
1604 return "RID";
1605 if (hTimer == pThis->hRADTimer)
1606 return "RAD";
1607 if (hTimer == pThis->hIntTimer)
1608 return "Int";
1609 if (hTimer == pThis->hTXDTimer)
1610 return "TXD";
1611 if (hTimer == pThis->hLUTimer)
1612 return "LinkUp";
1613 return "unknown";
1614}
1615
1616#endif /* LOG_ENABLED */
1617
1618/**
1619 * Arm a timer.
1620 *
1621 * @param pDevIns The device instance.
1622 * @param pThis Pointer to the device state structure.
1623 * @param hTimer The timer to arm.
1624 * @param uExpireIn Expiration interval in microseconds.
1625 */
1626DECLINLINE(void) e1kArmTimer(PPDMDEVINS pDevIns, PE1KSTATE pThis, TMTIMERHANDLE hTimer, uint32_t uExpireIn)
1627{
1628 if (pThis->fLocked)
1629 return;
1630
1631 E1kLog2(("%s Arming %s timer to fire in %d usec...\n",
1632 pThis->szPrf, e1kGetTimerName(pThis, hTimer), uExpireIn));
1633 int rc = PDMDevHlpTimerSetMicro(pDevIns, hTimer, uExpireIn);
1634 AssertRC(rc);
1635}
1636
1637#ifdef IN_RING3
1638/**
1639 * Cancel a timer.
1640 *
1641 * @param pDevIns The device instance.
1642 * @param pThis Pointer to the device state structure.
1643 * @param pTimer Pointer to the timer.
1644 */
1645DECLINLINE(void) e1kCancelTimer(PPDMDEVINS pDevIns, PE1KSTATE pThis, TMTIMERHANDLE hTimer)
1646{
1647 E1kLog2(("%s Stopping %s timer...\n",
1648 pThis->szPrf, e1kGetTimerName(pThis, hTimer)));
1649 int rc = PDMDevHlpTimerStop(pDevIns, hTimer);
1650 if (RT_FAILURE(rc))
1651 E1kLog2(("%s e1kCancelTimer: TMTimerStop(%s) failed with %Rrc\n",
1652 pThis->szPrf, e1kGetTimerName(pThis, hTimer), rc));
1653 RT_NOREF_PV(pThis);
1654}
1655#endif /* IN_RING3 */
1656
1657#define e1kCsEnter(ps, rc) PDMDevHlpCritSectEnter(pDevIns, &ps->cs, rc)
1658#define e1kCsLeave(ps) PDMDevHlpCritSectLeave(pDevIns, &ps->cs)
1659
1660#define e1kCsRxEnter(ps, rc) PDMDevHlpCritSectEnter(pDevIns, &ps->csRx, rc)
1661#define e1kCsRxLeave(ps) PDMDevHlpCritSectLeave(pDevIns, &ps->csRx)
1662#define e1kCsRxIsOwner(ps) PDMDevHlpCritSectIsOwner(pDevIns, &ps->csRx)
1663
1664#ifndef E1K_WITH_TX_CS
1665# define e1kCsTxEnter(ps, rc) VINF_SUCCESS
1666# define e1kCsTxLeave(ps) do { } while (0)
1667#else /* E1K_WITH_TX_CS */
1668# define e1kCsTxEnter(ps, rc) PDMDevHlpCritSectEnter(pDevIns, &ps->csTx, rc)
1669# define e1kCsTxLeave(ps) PDMDevHlpCritSectLeave(pDevIns, &ps->csTx)
1670# define e1kCsTxIsOwner(ps) PDMDevHlpCritSectIsOwner(pDevIns, &ps->csTx)
1671#endif /* E1K_WITH_TX_CS */
1672
1673
1674#ifdef E1K_WITH_TXD_CACHE
1675/*
1676 * Transmit Descriptor Register Context
1677 */
1678struct E1kTxDContext
1679{
1680 uint32_t tdlen;
1681 uint32_t tdh;
1682 uint32_t tdt;
1683};
1684typedef struct E1kTxDContext E1KTXDC, *PE1KTXDC;
1685
1686DECLINLINE(bool) e1kUpdateTxDContext(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KTXDC pContext)
1687{
1688 Assert(e1kCsTxIsOwner(pThis));
1689 if (!e1kCsTxIsOwner(pThis))
1690 {
1691 memset(pContext, 0, sizeof(E1KTXDC));
1692 return false;
1693 }
1694 pContext->tdlen = TDLEN;
1695 pContext->tdh = TDH;
1696 pContext->tdt = TDT;
1697 uint32_t cTxRingSize = pContext->tdlen / sizeof(E1KTXDESC);
1698#ifdef DEBUG
1699 if (pContext->tdh >= cTxRingSize)
1700 {
1701 Log(("%s e1kUpdateTxDContext: will return false because TDH too big (%u >= %u)\n",
1702 pThis->szPrf, pContext->tdh, cTxRingSize));
1703 return VINF_SUCCESS;
1704 }
1705 if (pContext->tdt >= cTxRingSize)
1706 {
1707 Log(("%s e1kUpdateTxDContext: will return false because TDT too big (%u >= %u)\n",
1708 pThis->szPrf, pContext->tdt, cTxRingSize));
1709 return VINF_SUCCESS;
1710 }
1711#endif /* DEBUG */
1712 return pContext->tdh < cTxRingSize && pContext->tdt < cTxRingSize;
1713}
1714#endif /* E1K_WITH_TXD_CACHE */
1715#ifdef E1K_WITH_RXD_CACHE
1716/*
1717 * Receive Descriptor Register Context
1718 */
1719struct E1kRxDContext
1720{
1721 uint32_t rdlen;
1722 uint32_t rdh;
1723 uint32_t rdt;
1724};
1725typedef struct E1kRxDContext E1KRXDC, *PE1KRXDC;
1726
1727DECLINLINE(bool) e1kUpdateRxDContext(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KRXDC pContext, const char *pcszCallee)
1728{
1729 Assert(e1kCsRxIsOwner(pThis));
1730 if (!e1kCsRxIsOwner(pThis))
1731 return false;
1732 pContext->rdlen = RDLEN;
1733 pContext->rdh = RDH;
1734 pContext->rdt = RDT;
1735 uint32_t cRxRingSize = pContext->rdlen / sizeof(E1KRXDESC);
1736 /*
1737 * Note that the checks for RDT are a bit different. Some guests, OS/2 for
1738 * example, intend to use all descriptors in RX ring, so they point RDT
1739 * right beyond the last descriptor in the ring. While this is not
1740 * acceptable for other registers, it works out fine for RDT.
1741 */
1742#ifdef DEBUG
1743 if (pContext->rdh >= cRxRingSize)
1744 {
1745 Log(("%s e1kUpdateRxDContext: called from %s, will return false because RDH too big (%u >= %u)\n",
1746 pThis->szPrf, pcszCallee, pContext->rdh, cRxRingSize));
1747 return VINF_SUCCESS;
1748 }
1749 if (pContext->rdt > cRxRingSize)
1750 {
1751 Log(("%s e1kUpdateRxDContext: called from %s, will return false because RDT too big (%u > %u)\n",
1752 pThis->szPrf, pcszCallee, pContext->rdt, cRxRingSize));
1753 return VINF_SUCCESS;
1754 }
1755#else /* !DEBUG */
1756 RT_NOREF(pcszCallee);
1757#endif /* !DEBUG */
1758 return pContext->rdh < cRxRingSize && pContext->rdt <= cRxRingSize; // && (RCTL & RCTL_EN);
1759}
1760#endif /* E1K_WITH_RXD_CACHE */
1761
1762/**
1763 * Wakeup the RX thread.
1764 */
1765static void e1kWakeupReceive(PPDMDEVINS pDevIns, PE1KSTATE pThis)
1766{
1767 if ( pThis->fMaybeOutOfSpace
1768 && pThis->hEventMoreRxDescAvail != NIL_SUPSEMEVENT)
1769 {
1770 STAM_COUNTER_INC(&pThis->CTX_SUFF_Z(StatRxOverflowWakeup));
1771 E1kLog(("%s Waking up Out-of-RX-space semaphore\n", pThis->szPrf));
1772 int rc = PDMDevHlpSUPSemEventSignal(pDevIns, pThis->hEventMoreRxDescAvail);
1773 AssertRC(rc);
1774 }
1775}
1776
1777#ifdef IN_RING3
1778
1779/**
1780 * Hardware reset. Revert all registers to initial values.
1781 *
1782 * @param pDevIns The device instance.
1783 * @param pThis The device state structure.
1784 * @param pThisCC The current context instance data.
1785 */
1786static void e1kR3HardReset(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KSTATECC pThisCC)
1787{
1788 E1kLog(("%s Hard reset triggered\n", pThis->szPrf));
1789 /* No interrupts should survive device reset, see @bugref(9556). */
1790 if (pThis->fIntRaised)
1791 {
1792 /* Lower(0) INTA(0) */
1793 PDMDevHlpPCISetIrq(pDevIns, 0, 0);
1794 pThis->fIntRaised = false;
1795 E1kLog(("%s e1kR3HardReset: Lowered IRQ: ICR=%08x\n", pThis->szPrf, ICR));
1796 }
1797 memset(pThis->auRegs, 0, sizeof(pThis->auRegs));
1798 memset(pThis->aRecAddr.au32, 0, sizeof(pThis->aRecAddr.au32));
1799#ifdef E1K_INIT_RA0
1800 memcpy(pThis->aRecAddr.au32, pThis->macConfigured.au8,
1801 sizeof(pThis->macConfigured.au8));
1802 pThis->aRecAddr.array[0].ctl |= RA_CTL_AV;
1803#endif /* E1K_INIT_RA0 */
1804 STATUS = 0x0081; /* SPEED=10b (1000 Mb/s), FD=1b (Full Duplex) */
1805 EECD = 0x0100; /* EE_PRES=1b (EEPROM present) */
1806 CTRL = 0x0a09; /* FRCSPD=1b SPEED=10b LRST=1b FD=1b */
1807 TSPMT = 0x01000400;/* TSMT=0400h TSPBP=0100h */
1808 Assert(GET_BITS(RCTL, BSIZE) == 0);
1809 pThis->u16RxBSize = 2048;
1810
1811 uint16_t u16LedCtl = 0x0602; /* LED0/LINK_UP#, LED2/LINK100# */
1812 pThisCC->eeprom.readWord(0x2F, &u16LedCtl); /* Read LEDCTL defaults from EEPROM */
1813 LEDCTL = 0x07008300 | (((uint32_t)u16LedCtl & 0xCF00) << 8) | (u16LedCtl & 0xCF); /* Only LED0 and LED2 defaults come from EEPROM */
1814
1815 /* Reset promiscuous mode */
1816 if (pThisCC->pDrvR3)
1817 pThisCC->pDrvR3->pfnSetPromiscuousMode(pThisCC->pDrvR3, false);
1818
1819#ifdef E1K_WITH_TXD_CACHE
1820 int rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
1821 if (RT_LIKELY(rc == VINF_SUCCESS))
1822 {
1823 pThis->nTxDFetched = 0;
1824 pThis->iTxDCurrent = 0;
1825 pThis->fGSO = false;
1826 pThis->cbTxAlloc = 0;
1827 e1kCsTxLeave(pThis);
1828 }
1829#endif /* E1K_WITH_TXD_CACHE */
1830#ifdef E1K_WITH_RXD_CACHE
1831 if (RT_LIKELY(e1kCsRxEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
1832 {
1833 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
1834 e1kCsRxLeave(pThis);
1835 }
1836#endif /* E1K_WITH_RXD_CACHE */
1837#ifdef E1K_LSC_ON_RESET
1838 E1kLog(("%s Will trigger LSC in %d seconds...\n",
1839 pThis->szPrf, pThis->cMsLinkUpDelay / 1000));
1840 e1kArmTimer(pDevIns, pThis, pThis->hLUTimer, pThis->cMsLinkUpDelay * 1000);
1841#endif /* E1K_LSC_ON_RESET */
1842}
1843
1844#endif /* IN_RING3 */
1845
1846/**
1847 * Compute Internet checksum.
1848 *
1849 * @remarks Refer to http://www.netfor2.com/checksum.html for short intro.
1850 *
1851 * @param pThis The device state structure.
1852 * @param cpPacket The packet.
1853 * @param cb The size of the packet.
1854 * @param pszText A string denoting direction of packet transfer.
1855 *
1856 * @return The 1's complement of the 1's complement sum.
1857 *
1858 * @thread E1000_TX
1859 */
1860static uint16_t e1kCSum16(const void *pvBuf, size_t cb)
1861{
1862 uint32_t csum = 0;
1863 uint16_t *pu16 = (uint16_t *)pvBuf;
1864
1865 while (cb > 1)
1866 {
1867 csum += *pu16++;
1868 cb -= 2;
1869 }
1870 if (cb)
1871 csum += *(uint8_t*)pu16;
1872 while (csum >> 16)
1873 csum = (csum >> 16) + (csum & 0xFFFF);
1874 Assert(csum < 65536);
1875 return (uint16_t)~csum;
1876}
1877
1878/**
1879 * Dump a packet to debug log.
1880 *
1881 * @param pDevIns The device instance.
1882 * @param pThis The device state structure.
1883 * @param cpPacket The packet.
1884 * @param cb The size of the packet.
1885 * @param pszText A string denoting direction of packet transfer.
1886 * @thread E1000_TX
1887 */
1888DECLINLINE(void) e1kPacketDump(PPDMDEVINS pDevIns, PE1KSTATE pThis, const uint8_t *cpPacket, size_t cb, const char *pszText)
1889{
1890#ifdef DEBUG
1891 if (RT_LIKELY(e1kCsEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
1892 {
1893 Log4(("%s --- %s packet #%d: %RTmac => %RTmac (%d bytes) ---\n",
1894 pThis->szPrf, pszText, ++pThis->u32PktNo, cpPacket+6, cpPacket, cb));
1895 if (ntohs(*(uint16_t*)(cpPacket+12)) == 0x86DD)
1896 {
1897 Log4(("%s --- IPv6: %RTnaipv6 => %RTnaipv6\n",
1898 pThis->szPrf, cpPacket+14+8, cpPacket+14+24));
1899 if (*(cpPacket+14+6) == 0x6)
1900 Log4(("%s --- TCP: seq=%x ack=%x\n", pThis->szPrf,
1901 ntohl(*(uint32_t*)(cpPacket+14+40+4)), ntohl(*(uint32_t*)(cpPacket+14+40+8))));
1902 }
1903 else if (ntohs(*(uint16_t*)(cpPacket+12)) == 0x800)
1904 {
1905 Log4(("%s --- IPv4: %RTnaipv4 => %RTnaipv4\n",
1906 pThis->szPrf, *(uint32_t*)(cpPacket+14+12), *(uint32_t*)(cpPacket+14+16)));
1907 if (*(cpPacket+14+6) == 0x6)
1908 Log4(("%s --- TCP: seq=%x ack=%x\n", pThis->szPrf,
1909 ntohl(*(uint32_t*)(cpPacket+14+20+4)), ntohl(*(uint32_t*)(cpPacket+14+20+8))));
1910 }
1911 E1kLog3(("%.*Rhxd\n", cb, cpPacket));
1912 e1kCsLeave(pThis);
1913 }
1914#else
1915 if (RT_LIKELY(e1kCsEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
1916 {
1917 if (ntohs(*(uint16_t*)(cpPacket+12)) == 0x86DD)
1918 E1kLogRel(("E1000: %s packet #%d, %RTmac => %RTmac, %RTnaipv6 => %RTnaipv6, seq=%x ack=%x\n",
1919 pszText, ++pThis->u32PktNo, cpPacket+6, cpPacket, cpPacket+14+8, cpPacket+14+24,
1920 ntohl(*(uint32_t*)(cpPacket+14+40+4)), ntohl(*(uint32_t*)(cpPacket+14+40+8))));
1921 else
1922 E1kLogRel(("E1000: %s packet #%d, %RTmac => %RTmac, %RTnaipv4 => %RTnaipv4, seq=%x ack=%x\n",
1923 pszText, ++pThis->u32PktNo, cpPacket+6, cpPacket,
1924 *(uint32_t*)(cpPacket+14+12), *(uint32_t*)(cpPacket+14+16),
1925 ntohl(*(uint32_t*)(cpPacket+14+20+4)), ntohl(*(uint32_t*)(cpPacket+14+20+8))));
1926 e1kCsLeave(pThis);
1927 }
1928 RT_NOREF2(cb, pszText);
1929#endif
1930}
1931
1932/**
1933 * Determine the type of transmit descriptor.
1934 *
1935 * @returns Descriptor type. See E1K_DTYP_XXX defines.
1936 *
1937 * @param pDesc Pointer to descriptor union.
1938 * @thread E1000_TX
1939 */
1940DECLINLINE(int) e1kGetDescType(E1KTXDESC *pDesc)
1941{
1942 if (pDesc->legacy.cmd.fDEXT)
1943 return pDesc->context.dw2.u4DTYP;
1944 return E1K_DTYP_LEGACY;
1945}
1946
1947
1948#ifdef E1K_WITH_RXD_CACHE
1949/**
1950 * Return the number of RX descriptor that belong to the hardware.
1951 *
1952 * @returns the number of available descriptors in RX ring.
1953 * @param pRxdc The receive descriptor register context.
1954 * @thread ???
1955 */
1956DECLINLINE(uint32_t) e1kGetRxLen(PE1KRXDC pRxdc)
1957{
1958 /**
1959 * Make sure RDT won't change during computation. EMT may modify RDT at
1960 * any moment.
1961 */
1962 uint32_t rdt = pRxdc->rdt;
1963 return (pRxdc->rdh > rdt ? pRxdc->rdlen/sizeof(E1KRXDESC) : 0) + rdt - pRxdc->rdh;
1964}
1965
1966DECLINLINE(unsigned) e1kRxDInCache(PE1KSTATE pThis)
1967{
1968 return pThis->nRxDFetched > pThis->iRxDCurrent ?
1969 pThis->nRxDFetched - pThis->iRxDCurrent : 0;
1970}
1971
1972DECLINLINE(unsigned) e1kRxDIsCacheEmpty(PE1KSTATE pThis)
1973{
1974 return pThis->iRxDCurrent >= pThis->nRxDFetched;
1975}
1976
1977/**
1978 * Load receive descriptors from guest memory. The caller needs to be in Rx
1979 * critical section.
1980 *
1981 * We need two physical reads in case the tail wrapped around the end of RX
1982 * descriptor ring.
1983 *
1984 * @returns the actual number of descriptors fetched.
1985 * @param pDevIns The device instance.
1986 * @param pThis The device state structure.
1987 * @thread EMT, RX
1988 */
1989DECLINLINE(unsigned) e1kRxDPrefetch(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KRXDC pRxdc)
1990{
1991 E1kLog3(("%s e1kRxDPrefetch: RDH=%x RDT=%x RDLEN=%x "
1992 "iRxDCurrent=%x nRxDFetched=%x\n",
1993 pThis->szPrf, pRxdc->rdh, pRxdc->rdt, pRxdc->rdlen, pThis->iRxDCurrent, pThis->nRxDFetched));
1994 /* We've already loaded pThis->nRxDFetched descriptors past RDH. */
1995 unsigned nDescsAvailable = e1kGetRxLen(pRxdc) - e1kRxDInCache(pThis);
1996 unsigned nDescsToFetch = RT_MIN(nDescsAvailable, E1K_RXD_CACHE_SIZE - pThis->nRxDFetched);
1997 unsigned nDescsTotal = pRxdc->rdlen / sizeof(E1KRXDESC);
1998 Assert(nDescsTotal != 0);
1999 if (nDescsTotal == 0)
2000 return 0;
2001 unsigned nFirstNotLoaded = (pRxdc->rdh + e1kRxDInCache(pThis)) % nDescsTotal;
2002 unsigned nDescsInSingleRead = RT_MIN(nDescsToFetch, nDescsTotal - nFirstNotLoaded);
2003 E1kLog3(("%s e1kRxDPrefetch: nDescsAvailable=%u nDescsToFetch=%u "
2004 "nDescsTotal=%u nFirstNotLoaded=0x%x nDescsInSingleRead=%u\n",
2005 pThis->szPrf, nDescsAvailable, nDescsToFetch, nDescsTotal,
2006 nFirstNotLoaded, nDescsInSingleRead));
2007 if (nDescsToFetch == 0)
2008 return 0;
2009 E1KRXDESC* pFirstEmptyDesc = &pThis->aRxDescriptors[pThis->nRxDFetched];
2010 PDMDevHlpPCIPhysRead(pDevIns,
2011 ((uint64_t)RDBAH << 32) + RDBAL + nFirstNotLoaded * sizeof(E1KRXDESC),
2012 pFirstEmptyDesc, nDescsInSingleRead * sizeof(E1KRXDESC));
2013 // uint64_t addrBase = ((uint64_t)RDBAH << 32) + RDBAL;
2014 // unsigned i, j;
2015 // for (i = pThis->nRxDFetched; i < pThis->nRxDFetched + nDescsInSingleRead; ++i)
2016 // {
2017 // pThis->aRxDescAddr[i] = addrBase + (nFirstNotLoaded + i - pThis->nRxDFetched) * sizeof(E1KRXDESC);
2018 // E1kLog3(("%s aRxDescAddr[%d] = %p\n", pThis->szPrf, i, pThis->aRxDescAddr[i]));
2019 // }
2020 E1kLog3(("%s Fetched %u RX descriptors at %08x%08x(0x%x), RDLEN=%08x, RDH=%08x, RDT=%08x\n",
2021 pThis->szPrf, nDescsInSingleRead,
2022 RDBAH, RDBAL + pRxdc->rdh * sizeof(E1KRXDESC),
2023 nFirstNotLoaded, pRxdc->rdlen, pRxdc->rdh, pRxdc->rdt));
2024 if (nDescsToFetch > nDescsInSingleRead)
2025 {
2026 PDMDevHlpPCIPhysRead(pDevIns,
2027 ((uint64_t)RDBAH << 32) + RDBAL,
2028 pFirstEmptyDesc + nDescsInSingleRead,
2029 (nDescsToFetch - nDescsInSingleRead) * sizeof(E1KRXDESC));
2030 // Assert(i == pThis->nRxDFetched + nDescsInSingleRead);
2031 // for (j = 0; i < pThis->nRxDFetched + nDescsToFetch; ++i, ++j)
2032 // {
2033 // pThis->aRxDescAddr[i] = addrBase + j * sizeof(E1KRXDESC);
2034 // E1kLog3(("%s aRxDescAddr[%d] = %p\n", pThis->szPrf, i, pThis->aRxDescAddr[i]));
2035 // }
2036 E1kLog3(("%s Fetched %u RX descriptors at %08x%08x\n",
2037 pThis->szPrf, nDescsToFetch - nDescsInSingleRead,
2038 RDBAH, RDBAL));
2039 }
2040 pThis->nRxDFetched += nDescsToFetch;
2041 return nDescsToFetch;
2042}
2043
2044# ifdef IN_RING3 /* currently only used in ring-3 due to stack space requirements of the caller */
2045/**
2046 * Dump receive descriptor to debug log.
2047 *
2048 * @param pThis The device state structure.
2049 * @param pDesc Pointer to the descriptor.
2050 * @thread E1000_RX
2051 */
2052static void e1kPrintRDesc(PE1KSTATE pThis, E1KRXDESC *pDesc)
2053{
2054 RT_NOREF2(pThis, pDesc);
2055 E1kLog2(("%s <-- Receive Descriptor (%d bytes):\n", pThis->szPrf, pDesc->u16Length));
2056 E1kLog2((" Address=%16LX Length=%04X Csum=%04X\n",
2057 pDesc->u64BufAddr, pDesc->u16Length, pDesc->u16Checksum));
2058 E1kLog2((" STA: %s %s %s %s %s %s %s ERR: %s %s %s %s SPECIAL: %s VLAN=%03x PRI=%x\n",
2059 pDesc->status.fPIF ? "PIF" : "pif",
2060 pDesc->status.fIPCS ? "IPCS" : "ipcs",
2061 pDesc->status.fTCPCS ? "TCPCS" : "tcpcs",
2062 pDesc->status.fVP ? "VP" : "vp",
2063 pDesc->status.fIXSM ? "IXSM" : "ixsm",
2064 pDesc->status.fEOP ? "EOP" : "eop",
2065 pDesc->status.fDD ? "DD" : "dd",
2066 pDesc->status.fRXE ? "RXE" : "rxe",
2067 pDesc->status.fIPE ? "IPE" : "ipe",
2068 pDesc->status.fTCPE ? "TCPE" : "tcpe",
2069 pDesc->status.fCE ? "CE" : "ce",
2070 E1K_SPEC_CFI(pDesc->status.u16Special) ? "CFI" :"cfi",
2071 E1K_SPEC_VLAN(pDesc->status.u16Special),
2072 E1K_SPEC_PRI(pDesc->status.u16Special)));
2073}
2074# endif /* IN_RING3 */
2075#endif /* E1K_WITH_RXD_CACHE */
2076
2077/**
2078 * Dump transmit descriptor to debug log.
2079 *
2080 * @param pThis The device state structure.
2081 * @param pDesc Pointer to descriptor union.
2082 * @param pszDir A string denoting direction of descriptor transfer
2083 * @thread E1000_TX
2084 */
2085static void e1kPrintTDesc(PE1KSTATE pThis, E1KTXDESC *pDesc, const char *pszDir,
2086 unsigned uLevel = RTLOGGRPFLAGS_LEVEL_2)
2087{
2088 RT_NOREF4(pThis, pDesc, pszDir, uLevel);
2089
2090 /*
2091 * Unfortunately we cannot use our format handler here, we want R0 logging
2092 * as well.
2093 */
2094 switch (e1kGetDescType(pDesc))
2095 {
2096 case E1K_DTYP_CONTEXT:
2097 E1kLogX(uLevel, ("%s %s Context Transmit Descriptor %s\n",
2098 pThis->szPrf, pszDir, pszDir));
2099 E1kLogX(uLevel, (" IPCSS=%02X IPCSO=%02X IPCSE=%04X TUCSS=%02X TUCSO=%02X TUCSE=%04X\n",
2100 pDesc->context.ip.u8CSS, pDesc->context.ip.u8CSO, pDesc->context.ip.u16CSE,
2101 pDesc->context.tu.u8CSS, pDesc->context.tu.u8CSO, pDesc->context.tu.u16CSE));
2102 E1kLogX(uLevel, (" TUCMD:%s%s%s %s %s PAYLEN=%04x HDRLEN=%04x MSS=%04x STA: %s\n",
2103 pDesc->context.dw2.fIDE ? " IDE":"",
2104 pDesc->context.dw2.fRS ? " RS" :"",
2105 pDesc->context.dw2.fTSE ? " TSE":"",
2106 pDesc->context.dw2.fIP ? "IPv4":"IPv6",
2107 pDesc->context.dw2.fTCP ? "TCP":"UDP",
2108 pDesc->context.dw2.u20PAYLEN,
2109 pDesc->context.dw3.u8HDRLEN,
2110 pDesc->context.dw3.u16MSS,
2111 pDesc->context.dw3.fDD?"DD":""));
2112 break;
2113 case E1K_DTYP_DATA:
2114 E1kLogX(uLevel, ("%s %s Data Transmit Descriptor (%d bytes) %s\n",
2115 pThis->szPrf, pszDir, pDesc->data.cmd.u20DTALEN, pszDir));
2116 E1kLogX(uLevel, (" Address=%16LX DTALEN=%05X\n",
2117 pDesc->data.u64BufAddr,
2118 pDesc->data.cmd.u20DTALEN));
2119 E1kLogX(uLevel, (" DCMD:%s%s%s%s%s%s%s STA:%s%s%s POPTS:%s%s SPECIAL:%s VLAN=%03x PRI=%x\n",
2120 pDesc->data.cmd.fIDE ? " IDE" :"",
2121 pDesc->data.cmd.fVLE ? " VLE" :"",
2122 pDesc->data.cmd.fRPS ? " RPS" :"",
2123 pDesc->data.cmd.fRS ? " RS" :"",
2124 pDesc->data.cmd.fTSE ? " TSE" :"",
2125 pDesc->data.cmd.fIFCS? " IFCS":"",
2126 pDesc->data.cmd.fEOP ? " EOP" :"",
2127 pDesc->data.dw3.fDD ? " DD" :"",
2128 pDesc->data.dw3.fEC ? " EC" :"",
2129 pDesc->data.dw3.fLC ? " LC" :"",
2130 pDesc->data.dw3.fTXSM? " TXSM":"",
2131 pDesc->data.dw3.fIXSM? " IXSM":"",
2132 E1K_SPEC_CFI(pDesc->data.dw3.u16Special) ? "CFI" :"cfi",
2133 E1K_SPEC_VLAN(pDesc->data.dw3.u16Special),
2134 E1K_SPEC_PRI(pDesc->data.dw3.u16Special)));
2135 break;
2136 case E1K_DTYP_LEGACY:
2137 E1kLogX(uLevel, ("%s %s Legacy Transmit Descriptor (%d bytes) %s\n",
2138 pThis->szPrf, pszDir, pDesc->legacy.cmd.u16Length, pszDir));
2139 E1kLogX(uLevel, (" Address=%16LX DTALEN=%05X\n",
2140 pDesc->data.u64BufAddr,
2141 pDesc->legacy.cmd.u16Length));
2142 E1kLogX(uLevel, (" CMD:%s%s%s%s%s%s%s STA:%s%s%s CSO=%02x CSS=%02x SPECIAL:%s VLAN=%03x PRI=%x\n",
2143 pDesc->legacy.cmd.fIDE ? " IDE" :"",
2144 pDesc->legacy.cmd.fVLE ? " VLE" :"",
2145 pDesc->legacy.cmd.fRPS ? " RPS" :"",
2146 pDesc->legacy.cmd.fRS ? " RS" :"",
2147 pDesc->legacy.cmd.fIC ? " IC" :"",
2148 pDesc->legacy.cmd.fIFCS? " IFCS":"",
2149 pDesc->legacy.cmd.fEOP ? " EOP" :"",
2150 pDesc->legacy.dw3.fDD ? " DD" :"",
2151 pDesc->legacy.dw3.fEC ? " EC" :"",
2152 pDesc->legacy.dw3.fLC ? " LC" :"",
2153 pDesc->legacy.cmd.u8CSO,
2154 pDesc->legacy.dw3.u8CSS,
2155 E1K_SPEC_CFI(pDesc->legacy.dw3.u16Special) ? "CFI" :"cfi",
2156 E1K_SPEC_VLAN(pDesc->legacy.dw3.u16Special),
2157 E1K_SPEC_PRI(pDesc->legacy.dw3.u16Special)));
2158 break;
2159 default:
2160 E1kLog(("%s %s Invalid Transmit Descriptor %s\n",
2161 pThis->szPrf, pszDir, pszDir));
2162 break;
2163 }
2164}
2165
2166/**
2167 * Raise an interrupt later.
2168 *
2169 * @param pThis The device state structure.
2170 */
2171DECLINLINE(void) e1kPostponeInterrupt(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint64_t nsDeadline)
2172{
2173 if (!PDMDevHlpTimerIsActive(pDevIns, pThis->hIntTimer))
2174 PDMDevHlpTimerSetNano(pDevIns, pThis->hIntTimer, nsDeadline);
2175}
2176
2177/**
2178 * Raise interrupt if not masked.
2179 *
2180 * @param pThis The device state structure.
2181 */
2182static int e1kRaiseInterrupt(PPDMDEVINS pDevIns, PE1KSTATE pThis, int rcBusy, uint32_t u32IntCause)
2183{
2184 int rc = e1kCsEnter(pThis, rcBusy);
2185 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2186 return rc;
2187
2188 E1K_INC_ISTAT_CNT(pThis->uStatIntTry);
2189 ICR |= u32IntCause;
2190 if (ICR & IMS)
2191 {
2192 if (pThis->fIntRaised)
2193 {
2194 E1K_INC_ISTAT_CNT(pThis->uStatIntSkip);
2195 E1kLog2(("%s e1kRaiseInterrupt: Already raised, skipped. ICR&IMS=%08x\n",
2196 pThis->szPrf, ICR & IMS));
2197 }
2198 else
2199 {
2200 uint64_t tsNow = PDMDevHlpTimerGet(pDevIns, pThis->hIntTimer);
2201 if (!!ITR && tsNow - pThis->u64AckedAt < ITR * 256
2202 && pThis->fItrEnabled && (pThis->fItrRxEnabled || !(ICR & ICR_RXT0)))
2203 {
2204 E1K_INC_ISTAT_CNT(pThis->uStatIntEarly);
2205 E1kLog2(("%s e1kRaiseInterrupt: Too early to raise again: %d ns < %d ns.\n",
2206 pThis->szPrf, (uint32_t)(tsNow - pThis->u64AckedAt), ITR * 256));
2207 e1kPostponeInterrupt(pDevIns, pThis, ITR * 256);
2208 }
2209 else
2210 {
2211
2212 /* Since we are delivering the interrupt now
2213 * there is no need to do it later -- stop the timer.
2214 */
2215 PDMDevHlpTimerStop(pDevIns, pThis->hIntTimer);
2216 E1K_INC_ISTAT_CNT(pThis->uStatInt);
2217 STAM_COUNTER_INC(&pThis->StatIntsRaised);
2218 /* Got at least one unmasked interrupt cause */
2219 pThis->fIntRaised = true;
2220 /* Raise(1) INTA(0) */
2221 E1kLogRel(("E1000: irq RAISED icr&mask=0x%x, icr=0x%x\n", ICR & IMS, ICR));
2222 PDMDevHlpPCISetIrq(pDevIns, 0, 1);
2223 E1kLog(("%s e1kRaiseInterrupt: Raised. ICR&IMS=%08x\n",
2224 pThis->szPrf, ICR & IMS));
2225 }
2226 }
2227 }
2228 else
2229 {
2230 E1K_INC_ISTAT_CNT(pThis->uStatIntMasked);
2231 E1kLog2(("%s e1kRaiseInterrupt: Not raising, ICR=%08x, IMS=%08x\n",
2232 pThis->szPrf, ICR, IMS));
2233 }
2234 e1kCsLeave(pThis);
2235 return VINF_SUCCESS;
2236}
2237
2238/**
2239 * Compute the physical address of the descriptor.
2240 *
2241 * @returns the physical address of the descriptor.
2242 *
2243 * @param baseHigh High-order 32 bits of descriptor table address.
2244 * @param baseLow Low-order 32 bits of descriptor table address.
2245 * @param idxDesc The descriptor index in the table.
2246 */
2247DECLINLINE(RTGCPHYS) e1kDescAddr(uint32_t baseHigh, uint32_t baseLow, uint32_t idxDesc)
2248{
2249 AssertCompile(sizeof(E1KRXDESC) == sizeof(E1KTXDESC));
2250 return ((uint64_t)baseHigh << 32) + baseLow + idxDesc * sizeof(E1KRXDESC);
2251}
2252
2253#ifdef IN_RING3 /* currently only used in ring-3 due to stack space requirements of the caller */
2254/**
2255 * Advance the head pointer of the receive descriptor queue.
2256 *
2257 * @remarks RDH always points to the next available RX descriptor.
2258 *
2259 * @param pDevIns The device instance.
2260 * @param pThis The device state structure.
2261 */
2262DECLINLINE(void) e1kAdvanceRDH(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KRXDC pRxdc)
2263{
2264 Assert(e1kCsRxIsOwner(pThis));
2265 //e1kCsEnter(pThis, RT_SRC_POS);
2266 if (++pRxdc->rdh * sizeof(E1KRXDESC) >= pRxdc->rdlen)
2267 pRxdc->rdh = 0;
2268 RDH = pRxdc->rdh; /* Sync the actual register and RXDC */
2269#ifdef E1K_WITH_RXD_CACHE
2270 /*
2271 * We need to fetch descriptors now as the guest may advance RDT all the way
2272 * to RDH as soon as we generate RXDMT0 interrupt. This is mostly to provide
2273 * compatibility with Phar Lap ETS, see @bugref(7346). Note that we do not
2274 * check if the receiver is enabled. It must be, otherwise we won't get here
2275 * in the first place.
2276 *
2277 * Note that we should have moved both RDH and iRxDCurrent by now.
2278 */
2279 if (e1kRxDIsCacheEmpty(pThis))
2280 {
2281 /* Cache is empty, reset it and check if we can fetch more. */
2282 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
2283 E1kLog3(("%s e1kAdvanceRDH: Rx cache is empty, RDH=%x RDT=%x "
2284 "iRxDCurrent=%x nRxDFetched=%x\n",
2285 pThis->szPrf, pRxdc->rdh, pRxdc->rdt, pThis->iRxDCurrent, pThis->nRxDFetched));
2286 e1kRxDPrefetch(pDevIns, pThis, pRxdc);
2287 }
2288#endif /* E1K_WITH_RXD_CACHE */
2289 /*
2290 * Compute current receive queue length and fire RXDMT0 interrupt
2291 * if we are low on receive buffers
2292 */
2293 uint32_t uRQueueLen = pRxdc->rdh>pRxdc->rdt ? pRxdc->rdlen/sizeof(E1KRXDESC)-pRxdc->rdh+pRxdc->rdt : pRxdc->rdt-pRxdc->rdh;
2294 /*
2295 * The minimum threshold is controlled by RDMTS bits of RCTL:
2296 * 00 = 1/2 of RDLEN
2297 * 01 = 1/4 of RDLEN
2298 * 10 = 1/8 of RDLEN
2299 * 11 = reserved
2300 */
2301 uint32_t uMinRQThreshold = pRxdc->rdlen / sizeof(E1KRXDESC) / (2 << GET_BITS(RCTL, RDMTS));
2302 if (uRQueueLen <= uMinRQThreshold)
2303 {
2304 E1kLogRel(("E1000: low on RX descriptors, RDH=%x RDT=%x len=%x threshold=%x\n", pRxdc->rdh, pRxdc->rdt, uRQueueLen, uMinRQThreshold));
2305 E1kLog2(("%s Low on RX descriptors, RDH=%x RDT=%x len=%x threshold=%x, raise an interrupt\n",
2306 pThis->szPrf, pRxdc->rdh, pRxdc->rdt, uRQueueLen, uMinRQThreshold));
2307 E1K_INC_ISTAT_CNT(pThis->uStatIntRXDMT0);
2308 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_RXDMT0);
2309 }
2310 E1kLog2(("%s e1kAdvanceRDH: at exit RDH=%x RDT=%x len=%x\n",
2311 pThis->szPrf, pRxdc->rdh, pRxdc->rdt, uRQueueLen));
2312 //e1kCsLeave(pThis);
2313}
2314#endif /* IN_RING3 */
2315
2316#ifdef E1K_WITH_RXD_CACHE
2317
2318# ifdef IN_RING3 /* currently only used in ring-3 due to stack space requirements of the caller */
2319
2320/**
2321 * Obtain the next RX descriptor from RXD cache, fetching descriptors from the
2322 * RX ring if the cache is empty.
2323 *
2324 * Note that we cannot advance the cache pointer (iRxDCurrent) yet as it will
2325 * go out of sync with RDH which will cause trouble when EMT checks if the
2326 * cache is empty to do pre-fetch @bugref(6217).
2327 *
2328 * @param pDevIns The device instance.
2329 * @param pThis The device state structure.
2330 * @thread RX
2331 */
2332DECLINLINE(E1KRXDESC *) e1kRxDGet(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KRXDC pRxdc)
2333{
2334 Assert(e1kCsRxIsOwner(pThis));
2335 /* Check the cache first. */
2336 if (pThis->iRxDCurrent < pThis->nRxDFetched)
2337 return &pThis->aRxDescriptors[pThis->iRxDCurrent];
2338 /* Cache is empty, reset it and check if we can fetch more. */
2339 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
2340 if (e1kRxDPrefetch(pDevIns, pThis, pRxdc))
2341 return &pThis->aRxDescriptors[pThis->iRxDCurrent];
2342 /* Out of Rx descriptors. */
2343 return NULL;
2344}
2345
2346
2347/**
2348 * Return the RX descriptor obtained with e1kRxDGet() and advance the cache
2349 * pointer. The descriptor gets written back to the RXD ring.
2350 *
2351 * @param pDevIns The device instance.
2352 * @param pThis The device state structure.
2353 * @param pDesc The descriptor being "returned" to the RX ring.
2354 * @thread RX
2355 */
2356DECLINLINE(void) e1kRxDPut(PPDMDEVINS pDevIns, PE1KSTATE pThis, E1KRXDESC* pDesc, PE1KRXDC pRxdc)
2357{
2358 Assert(e1kCsRxIsOwner(pThis));
2359 pThis->iRxDCurrent++;
2360 // Assert(pDesc >= pThis->aRxDescriptors);
2361 // Assert(pDesc < pThis->aRxDescriptors + E1K_RXD_CACHE_SIZE);
2362 // uint64_t addr = e1kDescAddr(RDBAH, RDBAL, RDH);
2363 // uint32_t rdh = RDH;
2364 // Assert(pThis->aRxDescAddr[pDesc - pThis->aRxDescriptors] == addr);
2365 PDMDevHlpPCIPhysWrite(pDevIns, e1kDescAddr(RDBAH, RDBAL, pRxdc->rdh), pDesc, sizeof(E1KRXDESC));
2366 /*
2367 * We need to print the descriptor before advancing RDH as it may fetch new
2368 * descriptors into the cache.
2369 */
2370 e1kPrintRDesc(pThis, pDesc);
2371 e1kAdvanceRDH(pDevIns, pThis, pRxdc);
2372}
2373
2374/**
2375 * Store a fragment of received packet at the specifed address.
2376 *
2377 * @param pDevIns The device instance.
2378 * @param pThis The device state structure.
2379 * @param pDesc The next available RX descriptor.
2380 * @param pvBuf The fragment.
2381 * @param cb The size of the fragment.
2382 */
2383static void e1kStoreRxFragment(PPDMDEVINS pDevIns, PE1KSTATE pThis, E1KRXDESC *pDesc, const void *pvBuf, size_t cb)
2384{
2385 STAM_PROFILE_ADV_START(&pThis->StatReceiveStore, a);
2386 E1kLog2(("%s e1kStoreRxFragment: store fragment of %04X at %016LX, EOP=%d\n",
2387 pThis->szPrf, cb, pDesc->u64BufAddr, pDesc->status.fEOP));
2388 PDMDevHlpPCIPhysWrite(pDevIns, pDesc->u64BufAddr, pvBuf, cb);
2389 pDesc->u16Length = (uint16_t)cb;
2390 Assert(pDesc->u16Length == cb);
2391 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveStore, a);
2392 RT_NOREF(pThis);
2393}
2394
2395# endif /* IN_RING3 */
2396
2397#else /* !E1K_WITH_RXD_CACHE */
2398
2399/**
2400 * Store a fragment of received packet that fits into the next available RX
2401 * buffer.
2402 *
2403 * @remarks Trigger the RXT0 interrupt if it is the last fragment of the packet.
2404 *
2405 * @param pDevIns The device instance.
2406 * @param pThis The device state structure.
2407 * @param pDesc The next available RX descriptor.
2408 * @param pvBuf The fragment.
2409 * @param cb The size of the fragment.
2410 */
2411static void e1kStoreRxFragment(PPDMDEVINS pDevIns, PE1KSTATE pThis, E1KRXDESC *pDesc, const void *pvBuf, size_t cb)
2412{
2413 STAM_PROFILE_ADV_START(&pThis->StatReceiveStore, a);
2414 E1kLog2(("%s e1kStoreRxFragment: store fragment of %04X at %016LX, EOP=%d\n", pThis->szPrf, cb, pDesc->u64BufAddr, pDesc->status.fEOP));
2415 PDMDevHlpPCIPhysWrite(pDevIns, pDesc->u64BufAddr, pvBuf, cb);
2416 pDesc->u16Length = (uint16_t)cb; Assert(pDesc->u16Length == cb);
2417 /* Write back the descriptor */
2418 PDMDevHlpPCIPhysWrite(pDevIns, e1kDescAddr(RDBAH, RDBAL, RDH), pDesc, sizeof(E1KRXDESC));
2419 e1kPrintRDesc(pThis, pDesc);
2420 E1kLogRel(("E1000: Wrote back RX desc, RDH=%x\n", RDH));
2421 /* Advance head */
2422 e1kAdvanceRDH(pDevIns, pThis);
2423 //E1kLog2(("%s e1kStoreRxFragment: EOP=%d RDTR=%08X RADV=%08X\n", pThis->szPrf, pDesc->fEOP, RDTR, RADV));
2424 if (pDesc->status.fEOP)
2425 {
2426 /* Complete packet has been stored -- it is time to let the guest know. */
2427#ifdef E1K_USE_RX_TIMERS
2428 if (RDTR)
2429 {
2430 /* Arm the timer to fire in RDTR usec (discard .024) */
2431 e1kArmTimer(pDevIns, pThis, pThis->hRIDTimer, RDTR);
2432 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
2433 if (RADV != 0 && !PDMDevHlpTimerIsActive(pDevIns, pThis->CTX_SUFF(pRADTimer)))
2434 e1kArmTimer(pThis, pThis->hRADTimer, RADV);
2435 }
2436 else
2437 {
2438#endif
2439 /* 0 delay means immediate interrupt */
2440 E1K_INC_ISTAT_CNT(pThis->uStatIntRx);
2441 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_RXT0);
2442#ifdef E1K_USE_RX_TIMERS
2443 }
2444#endif
2445 }
2446 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveStore, a);
2447}
2448
2449#endif /* !E1K_WITH_RXD_CACHE */
2450
2451/**
2452 * Returns true if it is a broadcast packet.
2453 *
2454 * @returns true if destination address indicates broadcast.
2455 * @param pvBuf The ethernet packet.
2456 */
2457DECLINLINE(bool) e1kIsBroadcast(const void *pvBuf)
2458{
2459 static const uint8_t s_abBcastAddr[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
2460 return memcmp(pvBuf, s_abBcastAddr, sizeof(s_abBcastAddr)) == 0;
2461}
2462
2463/**
2464 * Returns true if it is a multicast packet.
2465 *
2466 * @remarks returns true for broadcast packets as well.
2467 * @returns true if destination address indicates multicast.
2468 * @param pvBuf The ethernet packet.
2469 */
2470DECLINLINE(bool) e1kIsMulticast(const void *pvBuf)
2471{
2472 return (*(char*)pvBuf) & 1;
2473}
2474
2475#ifdef IN_RING3 /* currently only used in ring-3 due to stack space requirements of the caller */
2476/**
2477 * Set IXSM, IPCS and TCPCS flags according to the packet type.
2478 *
2479 * @remarks We emulate checksum offloading for major packets types only.
2480 *
2481 * @returns VBox status code.
2482 * @param pThis The device state structure.
2483 * @param pFrame The available data.
2484 * @param cb Number of bytes available in the buffer.
2485 * @param status Bit fields containing status info.
2486 */
2487static int e1kRxChecksumOffload(PE1KSTATE pThis, const uint8_t *pFrame, size_t cb, E1KRXDST *pStatus)
2488{
2489 /** @todo
2490 * It is not safe to bypass checksum verification for packets coming
2491 * from real wire. We currently unable to tell where packets are
2492 * coming from so we tell the driver to ignore our checksum flags
2493 * and do verification in software.
2494 */
2495# if 0
2496 uint16_t uEtherType = ntohs(*(uint16_t*)(pFrame + 12));
2497
2498 E1kLog2(("%s e1kRxChecksumOffload: EtherType=%x\n", pThis->szPrf, uEtherType));
2499
2500 switch (uEtherType)
2501 {
2502 case 0x800: /* IPv4 */
2503 {
2504 pStatus->fIXSM = false;
2505 pStatus->fIPCS = true;
2506 PRTNETIPV4 pIpHdr4 = (PRTNETIPV4)(pFrame + 14);
2507 /* TCP/UDP checksum offloading works with TCP and UDP only */
2508 pStatus->fTCPCS = pIpHdr4->ip_p == 6 || pIpHdr4->ip_p == 17;
2509 break;
2510 }
2511 case 0x86DD: /* IPv6 */
2512 pStatus->fIXSM = false;
2513 pStatus->fIPCS = false;
2514 pStatus->fTCPCS = true;
2515 break;
2516 default: /* ARP, VLAN, etc. */
2517 pStatus->fIXSM = true;
2518 break;
2519 }
2520# else
2521 pStatus->fIXSM = true;
2522 RT_NOREF_PV(pThis); RT_NOREF_PV(pFrame); RT_NOREF_PV(cb);
2523# endif
2524 return VINF_SUCCESS;
2525}
2526#endif /* IN_RING3 */
2527
2528/**
2529 * Pad and store received packet.
2530 *
2531 * @remarks Make sure that the packet appears to upper layer as one coming
2532 * from real Ethernet: pad it and insert FCS.
2533 *
2534 * @returns VBox status code.
2535 * @param pDevIns The device instance.
2536 * @param pThis The device state structure.
2537 * @param pvBuf The available data.
2538 * @param cb Number of bytes available in the buffer.
2539 * @param status Bit fields containing status info.
2540 */
2541static int e1kHandleRxPacket(PPDMDEVINS pDevIns, PE1KSTATE pThis, const void *pvBuf, size_t cb, E1KRXDST status)
2542{
2543#if defined(IN_RING3) /** @todo Remove this extra copying, it's gonna make us run out of kernel / hypervisor stack! */
2544 uint8_t rxPacket[E1K_MAX_RX_PKT_SIZE];
2545 uint8_t *ptr = rxPacket;
2546# ifdef E1K_WITH_RXD_CACHE
2547 E1KRXDC rxdc;
2548# endif /* E1K_WITH_RXD_CACHE */
2549
2550 int rc = e1kCsRxEnter(pThis, VERR_SEM_BUSY);
2551 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2552 return rc;
2553# ifdef E1K_WITH_RXD_CACHE
2554 if (RT_UNLIKELY(!e1kUpdateRxDContext(pDevIns, pThis, &rxdc, "e1kHandleRxPacket")))
2555 {
2556 e1kCsRxLeave(pThis);
2557 E1kLog(("%s e1kHandleRxPacket: failed to update Rx context, returning VINF_SUCCESS\n", pThis->szPrf));
2558 return VINF_SUCCESS;
2559 }
2560# endif /* E1K_WITH_RXD_CACHE */
2561
2562 if (cb > 70) /* unqualified guess */
2563 pThis->led.Asserted.s.fReading = pThis->led.Actual.s.fReading = 1;
2564
2565 Assert(cb <= E1K_MAX_RX_PKT_SIZE);
2566 Assert(cb > 16);
2567 size_t cbMax = ((RCTL & RCTL_LPE) ? E1K_MAX_RX_PKT_SIZE - 4 : 1518) - (status.fVP ? 0 : 4);
2568 E1kLog3(("%s Max RX packet size is %u\n", pThis->szPrf, cbMax));
2569 if (status.fVP)
2570 {
2571 /* VLAN packet -- strip VLAN tag in VLAN mode */
2572 if ((CTRL & CTRL_VME) && cb > 16)
2573 {
2574 uint16_t *u16Ptr = (uint16_t*)pvBuf;
2575 memcpy(rxPacket, pvBuf, 12); /* Copy src and dst addresses */
2576 status.u16Special = RT_BE2H_U16(u16Ptr[7]); /* Extract VLAN tag */
2577 memcpy(rxPacket + 12, (uint8_t*)pvBuf + 16, cb - 16); /* Copy the rest of the packet */
2578 cb -= 4;
2579 E1kLog3(("%s Stripped tag for VLAN %u (cb=%u)\n",
2580 pThis->szPrf, status.u16Special, cb));
2581 }
2582 else
2583 {
2584 status.fVP = false; /* Set VP only if we stripped the tag */
2585 memcpy(rxPacket, pvBuf, cb);
2586 }
2587 }
2588 else
2589 memcpy(rxPacket, pvBuf, cb);
2590 /* Pad short packets */
2591 if (cb < 60)
2592 {
2593 memset(rxPacket + cb, 0, 60 - cb);
2594 cb = 60;
2595 }
2596 if (!(RCTL & RCTL_SECRC) && cb <= cbMax)
2597 {
2598 STAM_PROFILE_ADV_START(&pThis->StatReceiveCRC, a);
2599 /*
2600 * Add FCS if CRC stripping is not enabled. Since the value of CRC
2601 * is ignored by most of drivers we may as well save us the trouble
2602 * of calculating it (see EthernetCRC CFGM parameter).
2603 */
2604 if (pThis->fEthernetCRC)
2605 *(uint32_t*)(rxPacket + cb) = RTCrc32(rxPacket, cb);
2606 cb += sizeof(uint32_t);
2607 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveCRC, a);
2608 E1kLog3(("%s Added FCS (cb=%u)\n", pThis->szPrf, cb));
2609 }
2610 /* Compute checksum of complete packet */
2611 size_t cbCSumStart = RT_MIN(GET_BITS(RXCSUM, PCSS), cb);
2612 uint16_t checksum = e1kCSum16(rxPacket + cbCSumStart, cb - cbCSumStart);
2613 e1kRxChecksumOffload(pThis, rxPacket, cb, &status);
2614
2615 /* Update stats */
2616 E1K_INC_CNT32(GPRC);
2617 if (e1kIsBroadcast(pvBuf))
2618 E1K_INC_CNT32(BPRC);
2619 else if (e1kIsMulticast(pvBuf))
2620 E1K_INC_CNT32(MPRC);
2621 /* Update octet receive counter */
2622 E1K_ADD_CNT64(GORCL, GORCH, cb);
2623 STAM_REL_COUNTER_ADD(&pThis->StatReceiveBytes, cb);
2624 if (cb == 64)
2625 E1K_INC_CNT32(PRC64);
2626 else if (cb < 128)
2627 E1K_INC_CNT32(PRC127);
2628 else if (cb < 256)
2629 E1K_INC_CNT32(PRC255);
2630 else if (cb < 512)
2631 E1K_INC_CNT32(PRC511);
2632 else if (cb < 1024)
2633 E1K_INC_CNT32(PRC1023);
2634 else
2635 E1K_INC_CNT32(PRC1522);
2636
2637 E1K_INC_ISTAT_CNT(pThis->uStatRxFrm);
2638
2639# ifdef E1K_WITH_RXD_CACHE
2640 while (cb > 0)
2641 {
2642 E1KRXDESC *pDesc = e1kRxDGet(pDevIns, pThis, &rxdc);
2643
2644 if (pDesc == NULL)
2645 {
2646 E1kLog(("%s Out of receive buffers, dropping the packet "
2647 "(cb=%u, in_cache=%u, RDH=%x RDT=%x)\n",
2648 pThis->szPrf, cb, e1kRxDInCache(pThis), rxdc.rdh, rxdc.rdt));
2649 break;
2650 }
2651# else /* !E1K_WITH_RXD_CACHE */
2652 if (RDH == RDT)
2653 {
2654 E1kLog(("%s Out of receive buffers, dropping the packet\n",
2655 pThis->szPrf));
2656 }
2657 /* Store the packet to receive buffers */
2658 while (RDH != RDT)
2659 {
2660 /* Load the descriptor pointed by head */
2661 E1KRXDESC desc, *pDesc = &desc;
2662 PDMDevHlpPCIPhysRead(pDevIns, e1kDescAddr(RDBAH, RDBAL, RDH), &desc, sizeof(desc));
2663# endif /* !E1K_WITH_RXD_CACHE */
2664 if (pDesc->u64BufAddr)
2665 {
2666 uint16_t u16RxBufferSize = pThis->u16RxBSize; /* see @bugref{9427} */
2667
2668 /* Update descriptor */
2669 pDesc->status = status;
2670 pDesc->u16Checksum = checksum;
2671 pDesc->status.fDD = true;
2672
2673 /*
2674 * We need to leave Rx critical section here or we risk deadlocking
2675 * with EMT in e1kRegWriteRDT when the write is to an unallocated
2676 * page or has an access handler associated with it.
2677 * Note that it is safe to leave the critical section here since
2678 * e1kRegWriteRDT() never modifies RDH. It never touches already
2679 * fetched RxD cache entries either.
2680 */
2681 if (cb > u16RxBufferSize)
2682 {
2683 pDesc->status.fEOP = false;
2684 e1kCsRxLeave(pThis);
2685 e1kStoreRxFragment(pDevIns, pThis, pDesc, ptr, u16RxBufferSize);
2686 rc = e1kCsRxEnter(pThis, VERR_SEM_BUSY);
2687 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2688 return rc;
2689# ifdef E1K_WITH_RXD_CACHE
2690 if (RT_UNLIKELY(!e1kUpdateRxDContext(pDevIns, pThis, &rxdc, "e1kHandleRxPacket")))
2691 {
2692 e1kCsRxLeave(pThis);
2693 E1kLog(("%s e1kHandleRxPacket: failed to update Rx context, returning VINF_SUCCESS\n", pThis->szPrf));
2694 return VINF_SUCCESS;
2695 }
2696# endif /* E1K_WITH_RXD_CACHE */
2697 ptr += u16RxBufferSize;
2698 cb -= u16RxBufferSize;
2699 }
2700 else
2701 {
2702 pDesc->status.fEOP = true;
2703 e1kCsRxLeave(pThis);
2704 e1kStoreRxFragment(pDevIns, pThis, pDesc, ptr, cb);
2705# ifdef E1K_WITH_RXD_CACHE
2706 rc = e1kCsRxEnter(pThis, VERR_SEM_BUSY);
2707 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2708 return rc;
2709 if (RT_UNLIKELY(!e1kUpdateRxDContext(pDevIns, pThis, &rxdc, "e1kHandleRxPacket")))
2710 {
2711 e1kCsRxLeave(pThis);
2712 E1kLog(("%s e1kHandleRxPacket: failed to update Rx context, returning VINF_SUCCESS\n", pThis->szPrf));
2713 return VINF_SUCCESS;
2714 }
2715 cb = 0;
2716# else /* !E1K_WITH_RXD_CACHE */
2717 pThis->led.Actual.s.fReading = 0;
2718 return VINF_SUCCESS;
2719# endif /* !E1K_WITH_RXD_CACHE */
2720 }
2721 /*
2722 * Note: RDH is advanced by e1kStoreRxFragment if E1K_WITH_RXD_CACHE
2723 * is not defined.
2724 */
2725 }
2726# ifdef E1K_WITH_RXD_CACHE
2727 /* Write back the descriptor. */
2728 pDesc->status.fDD = true;
2729 e1kRxDPut(pDevIns, pThis, pDesc, &rxdc);
2730# else /* !E1K_WITH_RXD_CACHE */
2731 else
2732 {
2733 /* Write back the descriptor. */
2734 pDesc->status.fDD = true;
2735 PDMDevHlpPCIPhysWrite(pDevIns, e1kDescAddr(RDBAH, RDBAL, RDH), pDesc, sizeof(E1KRXDESC));
2736 e1kAdvanceRDH(pDevIns, pThis);
2737 }
2738# endif /* !E1K_WITH_RXD_CACHE */
2739 }
2740
2741 if (cb > 0)
2742 E1kLog(("%s Out of receive buffers, dropping %u bytes", pThis->szPrf, cb));
2743
2744 pThis->led.Actual.s.fReading = 0;
2745
2746 e1kCsRxLeave(pThis);
2747# ifdef E1K_WITH_RXD_CACHE
2748 /* Complete packet has been stored -- it is time to let the guest know. */
2749# ifdef E1K_USE_RX_TIMERS
2750 if (RDTR)
2751 {
2752 /* Arm the timer to fire in RDTR usec (discard .024) */
2753 e1kArmTimer(pThis, pThis->hRIDTimer, RDTR);
2754 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
2755 if (RADV != 0 && !PDMDevHlpTimerIsActive(pDevIns, pThis->hRADTimer))
2756 e1kArmTimer(pThis, pThis->hRADTimer, RADV);
2757 }
2758 else
2759 {
2760# endif /* E1K_USE_RX_TIMERS */
2761 /* 0 delay means immediate interrupt */
2762 E1K_INC_ISTAT_CNT(pThis->uStatIntRx);
2763 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_RXT0);
2764# ifdef E1K_USE_RX_TIMERS
2765 }
2766# endif /* E1K_USE_RX_TIMERS */
2767# endif /* E1K_WITH_RXD_CACHE */
2768
2769 return VINF_SUCCESS;
2770#else /* !IN_RING3 */
2771 RT_NOREF(pDevIns, pThis, pvBuf, cb, status);
2772 return VERR_INTERNAL_ERROR_2;
2773#endif /* !IN_RING3 */
2774}
2775
2776
2777#ifdef IN_RING3
2778/**
2779 * Bring the link up after the configured delay, 5 seconds by default.
2780 *
2781 * @param pDevIns The device instance.
2782 * @param pThis The device state structure.
2783 * @thread any
2784 */
2785DECLINLINE(void) e1kBringLinkUpDelayed(PPDMDEVINS pDevIns, PE1KSTATE pThis)
2786{
2787 E1kLog(("%s Will bring up the link in %d seconds...\n",
2788 pThis->szPrf, pThis->cMsLinkUpDelay / 1000));
2789 e1kArmTimer(pDevIns, pThis, pThis->hLUTimer, pThis->cMsLinkUpDelay * 1000);
2790}
2791
2792/**
2793 * Bring up the link immediately.
2794 *
2795 * @param pDevIns The device instance.
2796 * @param pThis The device state structure.
2797 * @param pThisCC The current context instance data.
2798 */
2799DECLINLINE(void) e1kR3LinkUp(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KSTATECC pThisCC)
2800{
2801 E1kLog(("%s Link is up\n", pThis->szPrf));
2802 STATUS |= STATUS_LU;
2803 Phy::setLinkStatus(&pThis->phy, true);
2804 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_LSC);
2805 if (pThisCC->pDrvR3)
2806 pThisCC->pDrvR3->pfnNotifyLinkChanged(pThisCC->pDrvR3, PDMNETWORKLINKSTATE_UP);
2807 /* Trigger processing of pending TX descriptors (see @bugref{8942}). */
2808 PDMDevHlpTaskTrigger(pDevIns, pThis->hTxTask);
2809}
2810
2811/**
2812 * Bring down the link immediately.
2813 *
2814 * @param pDevIns The device instance.
2815 * @param pThis The device state structure.
2816 * @param pThisCC The current context instance data.
2817 */
2818DECLINLINE(void) e1kR3LinkDown(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KSTATECC pThisCC)
2819{
2820 E1kLog(("%s Link is down\n", pThis->szPrf));
2821 STATUS &= ~STATUS_LU;
2822#ifdef E1K_LSC_ON_RESET
2823 Phy::setLinkStatus(&pThis->phy, false);
2824#endif /* E1K_LSC_ON_RESET */
2825 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_LSC);
2826 if (pThisCC->pDrvR3)
2827 pThisCC->pDrvR3->pfnNotifyLinkChanged(pThisCC->pDrvR3, PDMNETWORKLINKSTATE_DOWN);
2828}
2829
2830/**
2831 * Bring down the link temporarily.
2832 *
2833 * @param pDevIns The device instance.
2834 * @param pThis The device state structure.
2835 * @param pThisCC The current context instance data.
2836 */
2837DECLINLINE(void) e1kR3LinkDownTemp(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KSTATECC pThisCC)
2838{
2839 E1kLog(("%s Link is down temporarily\n", pThis->szPrf));
2840 STATUS &= ~STATUS_LU;
2841 Phy::setLinkStatus(&pThis->phy, false);
2842 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_LSC);
2843 /*
2844 * Notifying the associated driver that the link went down (even temporarily)
2845 * seems to be the right thing, but it was not done before. This may cause
2846 * a regression if the driver does not expect the link to go down as a result
2847 * of sending PDMNETWORKLINKSTATE_DOWN_RESUME to this device. Earlier versions
2848 * of code notified the driver that the link was up! See @bugref{7057}.
2849 */
2850 if (pThisCC->pDrvR3)
2851 pThisCC->pDrvR3->pfnNotifyLinkChanged(pThisCC->pDrvR3, PDMNETWORKLINKSTATE_DOWN);
2852 e1kBringLinkUpDelayed(pDevIns, pThis);
2853}
2854#endif /* IN_RING3 */
2855
2856#if 0 /* unused */
2857/**
2858 * Read handler for Device Status register.
2859 *
2860 * Get the link status from PHY.
2861 *
2862 * @returns VBox status code.
2863 *
2864 * @param pThis The device state structure.
2865 * @param offset Register offset in memory-mapped frame.
2866 * @param index Register index in register array.
2867 * @param mask Used to implement partial reads (8 and 16-bit).
2868 */
2869static int e1kRegReadCTRL(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
2870{
2871 E1kLog(("%s e1kRegReadCTRL: mdio dir=%s mdc dir=%s mdc=%d\n",
2872 pThis->szPrf, (CTRL & CTRL_MDIO_DIR)?"OUT":"IN ",
2873 (CTRL & CTRL_MDC_DIR)?"OUT":"IN ", !!(CTRL & CTRL_MDC)));
2874 if ((CTRL & CTRL_MDIO_DIR) == 0 && (CTRL & CTRL_MDC))
2875 {
2876 /* MDC is high and MDIO pin is used for input, read MDIO pin from PHY */
2877 if (Phy::readMDIO(&pThis->phy))
2878 *pu32Value = CTRL | CTRL_MDIO;
2879 else
2880 *pu32Value = CTRL & ~CTRL_MDIO;
2881 E1kLog(("%s e1kRegReadCTRL: Phy::readMDIO(%d)\n",
2882 pThis->szPrf, !!(*pu32Value & CTRL_MDIO)));
2883 }
2884 else
2885 {
2886 /* MDIO pin is used for output, ignore it */
2887 *pu32Value = CTRL;
2888 }
2889 return VINF_SUCCESS;
2890}
2891#endif /* unused */
2892
2893/**
2894 * A helper function to detect the link state to the other side of "the wire".
2895 *
2896 * When deciding to bring up the link we need to take into account both if the
2897 * cable is connected and if our device is actually connected to the outside
2898 * world. If no driver is attached we won't be able to allocate TX buffers,
2899 * which will prevent us from TX descriptor processing, which will result in
2900 * "TX unit hang" in the guest.
2901 *
2902 * @returns true if the device is connected to something.
2903 *
2904 * @param pDevIns The device instance.
2905 */
2906DECLINLINE(bool) e1kIsConnected(PPDMDEVINS pDevIns)
2907{
2908 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
2909 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
2910 return pThis->fCableConnected && pThisCC->CTX_SUFF(pDrv);
2911}
2912
2913/**
2914 * A callback used by PHY to indicate that the link needs to be updated due to
2915 * reset of PHY.
2916 *
2917 * @param pDevIns The device instance.
2918 * @thread any
2919 */
2920void e1kPhyLinkResetCallback(PPDMDEVINS pDevIns)
2921{
2922 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
2923
2924 /* Make sure we have cable connected and MAC can talk to PHY */
2925 if (e1kIsConnected(pDevIns) && (CTRL & CTRL_SLU))
2926 e1kArmTimer(pDevIns, pThis, pThis->hLUTimer, E1K_INIT_LINKUP_DELAY_US);
2927}
2928
2929/**
2930 * Write handler for Device Control register.
2931 *
2932 * Handles reset.
2933 *
2934 * @param pThis The device state structure.
2935 * @param offset Register offset in memory-mapped frame.
2936 * @param index Register index in register array.
2937 * @param value The value to store.
2938 * @param mask Used to implement partial writes (8 and 16-bit).
2939 * @thread EMT
2940 */
2941static int e1kRegWriteCTRL(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2942{
2943 int rc = VINF_SUCCESS;
2944
2945 if (value & CTRL_RESET)
2946 { /* RST */
2947#ifndef IN_RING3
2948 return VINF_IOM_R3_MMIO_WRITE;
2949#else
2950 e1kR3HardReset(pDevIns, pThis, PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC));
2951#endif
2952 }
2953 else
2954 {
2955#ifdef E1K_LSC_ON_SLU
2956 /*
2957 * When the guest changes 'Set Link Up' bit from 0 to 1 we check if
2958 * the link is down and the cable is connected, and if they are we
2959 * bring the link up, see @bugref{8624}.
2960 */
2961 if ( (value & CTRL_SLU)
2962 && !(CTRL & CTRL_SLU)
2963 && pThis->fCableConnected
2964 && !(STATUS & STATUS_LU))
2965 {
2966 /* It should take about 2 seconds for the link to come up */
2967 e1kArmTimer(pDevIns, pThis, pThis->hLUTimer, E1K_INIT_LINKUP_DELAY_US);
2968 }
2969#else /* !E1K_LSC_ON_SLU */
2970 if ( (value & CTRL_SLU)
2971 && !(CTRL & CTRL_SLU)
2972 && e1kIsConnected(pDevIns)
2973 && !PDMDevHlpTimerIsActive(pDevIns, pThis->hLUTimer))
2974 {
2975 /* PXE does not use LSC interrupts, see @bugref{9113}. */
2976 STATUS |= STATUS_LU;
2977 }
2978#endif /* !E1K_LSC_ON_SLU */
2979 if ((value & CTRL_VME) != (CTRL & CTRL_VME))
2980 {
2981 E1kLog(("%s VLAN Mode %s\n", pThis->szPrf, (value & CTRL_VME) ? "Enabled" : "Disabled"));
2982 }
2983 Log7(("%s e1kRegWriteCTRL: mdio dir=%s mdc dir=%s mdc=%s mdio=%d\n",
2984 pThis->szPrf, (value & CTRL_MDIO_DIR)?"OUT":"IN ",
2985 (value & CTRL_MDC_DIR)?"OUT":"IN ", (value & CTRL_MDC)?"HIGH":"LOW ", !!(value & CTRL_MDIO)));
2986 if (value & CTRL_MDC)
2987 {
2988 if (value & CTRL_MDIO_DIR)
2989 {
2990 Log7(("%s e1kRegWriteCTRL: Phy::writeMDIO(%d)\n", pThis->szPrf, !!(value & CTRL_MDIO)));
2991 /* MDIO direction pin is set to output and MDC is high, write MDIO pin value to PHY */
2992 Phy::writeMDIO(&pThis->phy, !!(value & CTRL_MDIO), pDevIns);
2993 }
2994 else
2995 {
2996 if (Phy::readMDIO(&pThis->phy))
2997 value |= CTRL_MDIO;
2998 else
2999 value &= ~CTRL_MDIO;
3000 Log7(("%s e1kRegWriteCTRL: Phy::readMDIO(%d)\n", pThis->szPrf, !!(value & CTRL_MDIO)));
3001 }
3002 }
3003 rc = e1kRegWriteDefault(pDevIns, pThis, offset, index, value);
3004 }
3005
3006 return rc;
3007}
3008
3009/**
3010 * Write handler for EEPROM/Flash Control/Data register.
3011 *
3012 * Handles EEPROM access requests; forwards writes to EEPROM device if access has been granted.
3013 *
3014 * @param pThis The device state structure.
3015 * @param offset Register offset in memory-mapped frame.
3016 * @param index Register index in register array.
3017 * @param value The value to store.
3018 * @param mask Used to implement partial writes (8 and 16-bit).
3019 * @thread EMT
3020 */
3021static int e1kRegWriteEECD(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3022{
3023 RT_NOREF(pDevIns, offset, index);
3024#ifdef IN_RING3
3025 /* So far we are concerned with lower byte only */
3026 if ((EECD & EECD_EE_GNT) || pThis->eChip == E1K_CHIP_82543GC)
3027 {
3028 /* Access to EEPROM granted -- forward 4-wire bits to EEPROM device */
3029 /* Note: 82543GC does not need to request EEPROM access */
3030 STAM_PROFILE_ADV_START(&pThis->StatEEPROMWrite, a);
3031 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
3032 pThisCC->eeprom.write(value & EECD_EE_WIRES);
3033 STAM_PROFILE_ADV_STOP(&pThis->StatEEPROMWrite, a);
3034 }
3035 if (value & EECD_EE_REQ)
3036 EECD |= EECD_EE_REQ|EECD_EE_GNT;
3037 else
3038 EECD &= ~EECD_EE_GNT;
3039 //e1kRegWriteDefault(pThis, offset, index, value );
3040
3041 return VINF_SUCCESS;
3042#else /* !IN_RING3 */
3043 RT_NOREF(pThis, value);
3044 return VINF_IOM_R3_MMIO_WRITE;
3045#endif /* !IN_RING3 */
3046}
3047
3048/**
3049 * Read handler for EEPROM/Flash Control/Data register.
3050 *
3051 * Lower 4 bits come from EEPROM device if EEPROM access has been granted.
3052 *
3053 * @returns VBox status code.
3054 *
3055 * @param pThis The device state structure.
3056 * @param offset Register offset in memory-mapped frame.
3057 * @param index Register index in register array.
3058 * @param mask Used to implement partial reads (8 and 16-bit).
3059 * @thread EMT
3060 */
3061static int e1kRegReadEECD(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
3062{
3063#ifdef IN_RING3
3064 uint32_t value = 0; /* Get rid of false positive in parfait. */
3065 int rc = e1kRegReadDefault(pDevIns, pThis, offset, index, &value);
3066 if (RT_SUCCESS(rc))
3067 {
3068 if ((value & EECD_EE_GNT) || pThis->eChip == E1K_CHIP_82543GC)
3069 {
3070 /* Note: 82543GC does not need to request EEPROM access */
3071 /* Access to EEPROM granted -- get 4-wire bits to EEPROM device */
3072 STAM_PROFILE_ADV_START(&pThis->StatEEPROMRead, a);
3073 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
3074 value |= pThisCC->eeprom.read();
3075 STAM_PROFILE_ADV_STOP(&pThis->StatEEPROMRead, a);
3076 }
3077 *pu32Value = value;
3078 }
3079
3080 return rc;
3081#else /* !IN_RING3 */
3082 RT_NOREF_PV(pDevIns); RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index); RT_NOREF_PV(pu32Value);
3083 return VINF_IOM_R3_MMIO_READ;
3084#endif /* !IN_RING3 */
3085}
3086
3087/**
3088 * Write handler for EEPROM Read register.
3089 *
3090 * Handles EEPROM word access requests, reads EEPROM and stores the result
3091 * into DATA field.
3092 *
3093 * @param pThis The device state structure.
3094 * @param offset Register offset in memory-mapped frame.
3095 * @param index Register index in register array.
3096 * @param value The value to store.
3097 * @param mask Used to implement partial writes (8 and 16-bit).
3098 * @thread EMT
3099 */
3100static int e1kRegWriteEERD(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3101{
3102#ifdef IN_RING3
3103 /* Make use of 'writable' and 'readable' masks. */
3104 e1kRegWriteDefault(pDevIns, pThis, offset, index, value);
3105 /* DONE and DATA are set only if read was triggered by START. */
3106 if (value & EERD_START)
3107 {
3108 STAM_PROFILE_ADV_START(&pThis->StatEEPROMRead, a);
3109 uint16_t tmp;
3110 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
3111 if (pThisCC->eeprom.readWord(GET_BITS_V(value, EERD, ADDR), &tmp))
3112 SET_BITS(EERD, DATA, tmp);
3113 EERD |= EERD_DONE;
3114 STAM_PROFILE_ADV_STOP(&pThis->StatEEPROMRead, a);
3115 }
3116
3117 return VINF_SUCCESS;
3118#else /* !IN_RING3 */
3119 RT_NOREF_PV(pDevIns); RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index); RT_NOREF_PV(value);
3120 return VINF_IOM_R3_MMIO_WRITE;
3121#endif /* !IN_RING3 */
3122}
3123
3124
3125/**
3126 * Write handler for MDI Control register.
3127 *
3128 * Handles PHY read/write requests; forwards requests to internal PHY device.
3129 *
3130 * @param pThis The device state structure.
3131 * @param offset Register offset in memory-mapped frame.
3132 * @param index Register index in register array.
3133 * @param value The value to store.
3134 * @param mask Used to implement partial writes (8 and 16-bit).
3135 * @thread EMT
3136 */
3137static int e1kRegWriteMDIC(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3138{
3139 if (value & MDIC_INT_EN)
3140 {
3141 E1kLog(("%s ERROR! Interrupt at the end of an MDI cycle is not supported yet.\n",
3142 pThis->szPrf));
3143 }
3144 else if (value & MDIC_READY)
3145 {
3146 E1kLog(("%s ERROR! Ready bit is not reset by software during write operation.\n",
3147 pThis->szPrf));
3148 }
3149 else if (GET_BITS_V(value, MDIC, PHY) != 1)
3150 {
3151 E1kLog(("%s WARNING! Access to invalid PHY detected, phy=%d.\n",
3152 pThis->szPrf, GET_BITS_V(value, MDIC, PHY)));
3153 /*
3154 * Some drivers scan the MDIO bus for a PHY. We can work with these
3155 * drivers if we set MDIC_READY and MDIC_ERROR when there isn't a PHY
3156 * at the requested address, see @bugref{7346}.
3157 */
3158 MDIC = MDIC_READY | MDIC_ERROR;
3159 }
3160 else
3161 {
3162 /* Store the value */
3163 e1kRegWriteDefault(pDevIns, pThis, offset, index, value);
3164 STAM_COUNTER_INC(&pThis->StatPHYAccesses);
3165 /* Forward op to PHY */
3166 if (value & MDIC_OP_READ)
3167 SET_BITS(MDIC, DATA, Phy::readRegister(&pThis->phy, GET_BITS_V(value, MDIC, REG), pDevIns));
3168 else
3169 Phy::writeRegister(&pThis->phy, GET_BITS_V(value, MDIC, REG), value & MDIC_DATA_MASK, pDevIns);
3170 /* Let software know that we are done */
3171 MDIC |= MDIC_READY;
3172 }
3173
3174 return VINF_SUCCESS;
3175}
3176
3177/**
3178 * Write handler for Interrupt Cause Read register.
3179 *
3180 * Bits corresponding to 1s in 'value' will be cleared in ICR register.
3181 *
3182 * @param pThis The device state structure.
3183 * @param offset Register offset in memory-mapped frame.
3184 * @param index Register index in register array.
3185 * @param value The value to store.
3186 * @param mask Used to implement partial writes (8 and 16-bit).
3187 * @thread EMT
3188 */
3189static int e1kRegWriteICR(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3190{
3191 ICR &= ~value;
3192
3193 RT_NOREF_PV(pDevIns); RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index);
3194 return VINF_SUCCESS;
3195}
3196
3197/**
3198 * Read handler for Interrupt Cause Read register.
3199 *
3200 * Reading this register acknowledges all interrupts.
3201 *
3202 * @returns VBox status code.
3203 *
3204 * @param pThis The device state structure.
3205 * @param offset Register offset in memory-mapped frame.
3206 * @param index Register index in register array.
3207 * @param mask Not used.
3208 * @thread EMT
3209 */
3210static int e1kRegReadICR(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
3211{
3212 int rc = e1kCsEnter(pThis, VINF_IOM_R3_MMIO_READ);
3213 if (RT_UNLIKELY(rc != VINF_SUCCESS))
3214 return rc;
3215
3216 uint32_t value = 0;
3217 rc = e1kRegReadDefault(pDevIns, pThis, offset, index, &value);
3218 if (RT_SUCCESS(rc))
3219 {
3220 if (value)
3221 {
3222 if (!pThis->fIntRaised)
3223 E1K_INC_ISTAT_CNT(pThis->uStatNoIntICR);
3224 /*
3225 * Not clearing ICR causes QNX to hang as it reads ICR in a loop
3226 * with disabled interrupts.
3227 */
3228 //if (IMS)
3229 if (1)
3230 {
3231 /*
3232 * Interrupts were enabled -- we are supposedly at the very
3233 * beginning of interrupt handler
3234 */
3235 E1kLogRel(("E1000: irq lowered, icr=0x%x\n", ICR));
3236 E1kLog(("%s e1kRegReadICR: Lowered IRQ (%08x)\n", pThis->szPrf, ICR));
3237 /* Clear all pending interrupts */
3238 ICR = 0;
3239 pThis->fIntRaised = false;
3240 /* Lower(0) INTA(0) */
3241 PDMDevHlpPCISetIrq(pDevIns, 0, 0);
3242
3243 pThis->u64AckedAt = PDMDevHlpTimerGet(pDevIns, pThis->hIntTimer);
3244 if (pThis->fIntMaskUsed)
3245 pThis->fDelayInts = true;
3246 }
3247 else
3248 {
3249 /*
3250 * Interrupts are disabled -- in windows guests ICR read is done
3251 * just before re-enabling interrupts
3252 */
3253 E1kLog(("%s e1kRegReadICR: Suppressing auto-clear due to disabled interrupts (%08x)\n", pThis->szPrf, ICR));
3254 }
3255 }
3256 *pu32Value = value;
3257 }
3258 e1kCsLeave(pThis);
3259
3260 return rc;
3261}
3262
3263/**
3264 * Read handler for Interrupt Cause Set register.
3265 *
3266 * VxWorks driver uses this undocumented feature of real H/W to read ICR without acknowledging interrupts.
3267 *
3268 * @returns VBox status code.
3269 *
3270 * @param pThis The device state structure.
3271 * @param offset Register offset in memory-mapped frame.
3272 * @param index Register index in register array.
3273 * @param pu32Value Where to store the value of the register.
3274 * @thread EMT
3275 */
3276static int e1kRegReadICS(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
3277{
3278 RT_NOREF_PV(index);
3279 return e1kRegReadDefault(pDevIns, pThis, offset, ICR_IDX, pu32Value);
3280}
3281
3282/**
3283 * Write handler for Interrupt Cause Set register.
3284 *
3285 * Bits corresponding to 1s in 'value' will be set in ICR register.
3286 *
3287 * @param pThis The device state structure.
3288 * @param offset Register offset in memory-mapped frame.
3289 * @param index Register index in register array.
3290 * @param value The value to store.
3291 * @param mask Used to implement partial writes (8 and 16-bit).
3292 * @thread EMT
3293 */
3294static int e1kRegWriteICS(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3295{
3296 RT_NOREF_PV(offset); RT_NOREF_PV(index);
3297 E1K_INC_ISTAT_CNT(pThis->uStatIntICS);
3298 return e1kRaiseInterrupt(pDevIns, pThis, VINF_IOM_R3_MMIO_WRITE, value & g_aE1kRegMap[ICS_IDX].writable);
3299}
3300
3301/**
3302 * Write handler for Interrupt Mask Set register.
3303 *
3304 * Will trigger pending interrupts.
3305 *
3306 * @param pThis The device state structure.
3307 * @param offset Register offset in memory-mapped frame.
3308 * @param index Register index in register array.
3309 * @param value The value to store.
3310 * @param mask Used to implement partial writes (8 and 16-bit).
3311 * @thread EMT
3312 */
3313static int e1kRegWriteIMS(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3314{
3315 RT_NOREF_PV(offset); RT_NOREF_PV(index);
3316
3317 IMS |= value;
3318 E1kLogRel(("E1000: irq enabled, RDH=%x RDT=%x TDH=%x TDT=%x\n", RDH, RDT, TDH, TDT));
3319 E1kLog(("%s e1kRegWriteIMS: IRQ enabled\n", pThis->szPrf));
3320 /*
3321 * We cannot raise an interrupt here as it will occasionally cause an interrupt storm
3322 * in Windows guests (see @bugref{8624}, @bugref{5023}).
3323 */
3324 if ((ICR & IMS) && !pThis->fLocked)
3325 {
3326 E1K_INC_ISTAT_CNT(pThis->uStatIntIMS);
3327 e1kPostponeInterrupt(pDevIns, pThis, E1K_IMS_INT_DELAY_NS);
3328 }
3329
3330 return VINF_SUCCESS;
3331}
3332
3333/**
3334 * Write handler for Interrupt Mask Clear register.
3335 *
3336 * Bits corresponding to 1s in 'value' will be cleared in IMS register.
3337 *
3338 * @param pThis The device state structure.
3339 * @param offset Register offset in memory-mapped frame.
3340 * @param index Register index in register array.
3341 * @param value The value to store.
3342 * @param mask Used to implement partial writes (8 and 16-bit).
3343 * @thread EMT
3344 */
3345static int e1kRegWriteIMC(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3346{
3347 RT_NOREF_PV(offset); RT_NOREF_PV(index);
3348
3349 int rc = e1kCsEnter(pThis, VINF_IOM_R3_MMIO_WRITE);
3350 if (RT_UNLIKELY(rc != VINF_SUCCESS))
3351 return rc;
3352 if (pThis->fIntRaised)
3353 {
3354 /*
3355 * Technically we should reset fIntRaised in ICR read handler, but it will cause
3356 * Windows to freeze since it may receive an interrupt while still in the very beginning
3357 * of interrupt handler.
3358 */
3359 E1K_INC_ISTAT_CNT(pThis->uStatIntLower);
3360 STAM_COUNTER_INC(&pThis->StatIntsPrevented);
3361 E1kLogRel(("E1000: irq lowered (IMC), icr=0x%x\n", ICR));
3362 /* Lower(0) INTA(0) */
3363 PDMDevHlpPCISetIrq(pDevIns, 0, 0);
3364 pThis->fIntRaised = false;
3365 E1kLog(("%s e1kRegWriteIMC: Lowered IRQ: ICR=%08x\n", pThis->szPrf, ICR));
3366 }
3367 IMS &= ~value;
3368 E1kLog(("%s e1kRegWriteIMC: IRQ disabled\n", pThis->szPrf));
3369 e1kCsLeave(pThis);
3370
3371 return VINF_SUCCESS;
3372}
3373
3374/**
3375 * Write handler for Receive Control register.
3376 *
3377 * @param pThis The device state structure.
3378 * @param offset Register offset in memory-mapped frame.
3379 * @param index Register index in register array.
3380 * @param value The value to store.
3381 * @param mask Used to implement partial writes (8 and 16-bit).
3382 * @thread EMT
3383 */
3384static int e1kRegWriteRCTL(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3385{
3386 /* Update promiscuous mode */
3387 bool fBecomePromiscous = !!(value & (RCTL_UPE | RCTL_MPE));
3388 if (fBecomePromiscous != !!( RCTL & (RCTL_UPE | RCTL_MPE)))
3389 {
3390 /* Promiscuity has changed, pass the knowledge on. */
3391#ifndef IN_RING3
3392 return VINF_IOM_R3_MMIO_WRITE;
3393#else
3394 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
3395 if (pThisCC->pDrvR3)
3396 pThisCC->pDrvR3->pfnSetPromiscuousMode(pThisCC->pDrvR3, fBecomePromiscous);
3397#endif
3398 }
3399
3400 /* Adjust receive buffer size */
3401 unsigned cbRxBuf = 2048 >> GET_BITS_V(value, RCTL, BSIZE);
3402 if (value & RCTL_BSEX)
3403 cbRxBuf *= 16;
3404 if (cbRxBuf > E1K_MAX_RX_PKT_SIZE)
3405 cbRxBuf = E1K_MAX_RX_PKT_SIZE;
3406 if (cbRxBuf != pThis->u16RxBSize)
3407 E1kLog2(("%s e1kRegWriteRCTL: Setting receive buffer size to %d (old %d)\n",
3408 pThis->szPrf, cbRxBuf, pThis->u16RxBSize));
3409 Assert(cbRxBuf < 65536);
3410 pThis->u16RxBSize = (uint16_t)cbRxBuf;
3411
3412 /* Update the register */
3413 return e1kRegWriteDefault(pDevIns, pThis, offset, index, value);
3414}
3415
3416/**
3417 * Write handler for Packet Buffer Allocation register.
3418 *
3419 * TXA = 64 - RXA.
3420 *
3421 * @param pThis The device state structure.
3422 * @param offset Register offset in memory-mapped frame.
3423 * @param index Register index in register array.
3424 * @param value The value to store.
3425 * @param mask Used to implement partial writes (8 and 16-bit).
3426 * @thread EMT
3427 */
3428static int e1kRegWritePBA(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3429{
3430 e1kRegWriteDefault(pDevIns, pThis, offset, index, value);
3431 PBA_st->txa = 64 - PBA_st->rxa;
3432
3433 return VINF_SUCCESS;
3434}
3435
3436/**
3437 * Write handler for Receive Descriptor Tail register.
3438 *
3439 * @remarks Write into RDT forces switch to HC and signal to
3440 * e1kR3NetworkDown_WaitReceiveAvail().
3441 *
3442 * @returns VBox status code.
3443 *
3444 * @param pThis The device state structure.
3445 * @param offset Register offset in memory-mapped frame.
3446 * @param index Register index in register array.
3447 * @param value The value to store.
3448 * @param mask Used to implement partial writes (8 and 16-bit).
3449 * @thread EMT
3450 */
3451static int e1kRegWriteRDT(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3452{
3453#ifndef IN_RING3
3454 /* XXX */
3455// return VINF_IOM_R3_MMIO_WRITE;
3456#endif
3457 int rc = e1kCsRxEnter(pThis, VINF_IOM_R3_MMIO_WRITE);
3458 if (RT_LIKELY(rc == VINF_SUCCESS))
3459 {
3460 E1kLog(("%s e1kRegWriteRDT\n", pThis->szPrf));
3461#ifndef E1K_WITH_RXD_CACHE
3462 /*
3463 * Some drivers advance RDT too far, so that it equals RDH. This
3464 * somehow manages to work with real hardware but not with this
3465 * emulated device. We can work with these drivers if we just
3466 * write 1 less when we see a driver writing RDT equal to RDH,
3467 * see @bugref{7346}.
3468 */
3469 if (value == RDH)
3470 {
3471 if (RDH == 0)
3472 value = (RDLEN / sizeof(E1KRXDESC)) - 1;
3473 else
3474 value = RDH - 1;
3475 }
3476#endif /* !E1K_WITH_RXD_CACHE */
3477 rc = e1kRegWriteDefault(pDevIns, pThis, offset, index, value);
3478#ifdef E1K_WITH_RXD_CACHE
3479 E1KRXDC rxdc;
3480 if (RT_UNLIKELY(!e1kUpdateRxDContext(pDevIns, pThis, &rxdc, "e1kRegWriteRDT")))
3481 {
3482 e1kCsRxLeave(pThis);
3483 E1kLog(("%s e1kRegWriteRDT: failed to update Rx context, returning VINF_SUCCESS\n", pThis->szPrf));
3484 return VINF_SUCCESS;
3485 }
3486 /*
3487 * We need to fetch descriptors now as RDT may go whole circle
3488 * before we attempt to store a received packet. For example,
3489 * Intel's DOS drivers use 2 (!) RX descriptors with the total ring
3490 * size being only 8 descriptors! Note that we fetch descriptors
3491 * only when the cache is empty to reduce the number of memory reads
3492 * in case of frequent RDT writes. Don't fetch anything when the
3493 * receiver is disabled either as RDH, RDT, RDLEN can be in some
3494 * messed up state.
3495 * Note that despite the cache may seem empty, meaning that there are
3496 * no more available descriptors in it, it may still be used by RX
3497 * thread which has not yet written the last descriptor back but has
3498 * temporarily released the RX lock in order to write the packet body
3499 * to descriptor's buffer. At this point we still going to do prefetch
3500 * but it won't actually fetch anything if there are no unused slots in
3501 * our "empty" cache (nRxDFetched==E1K_RXD_CACHE_SIZE). We must not
3502 * reset the cache here even if it appears empty. It will be reset at
3503 * a later point in e1kRxDGet().
3504 */
3505 if (e1kRxDIsCacheEmpty(pThis) && (RCTL & RCTL_EN))
3506 e1kRxDPrefetch(pDevIns, pThis, &rxdc);
3507#endif /* E1K_WITH_RXD_CACHE */
3508 e1kCsRxLeave(pThis);
3509 if (RT_SUCCESS(rc))
3510 {
3511 /* Signal that we have more receive descriptors available. */
3512 e1kWakeupReceive(pDevIns, pThis);
3513 }
3514 }
3515 return rc;
3516}
3517
3518/**
3519 * Write handler for Receive Delay Timer register.
3520 *
3521 * @param pThis The device state structure.
3522 * @param offset Register offset in memory-mapped frame.
3523 * @param index Register index in register array.
3524 * @param value The value to store.
3525 * @param mask Used to implement partial writes (8 and 16-bit).
3526 * @thread EMT
3527 */
3528static int e1kRegWriteRDTR(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3529{
3530 e1kRegWriteDefault(pDevIns, pThis, offset, index, value);
3531 if (value & RDTR_FPD)
3532 {
3533 /* Flush requested, cancel both timers and raise interrupt */
3534#ifdef E1K_USE_RX_TIMERS
3535 e1kCancelTimer(pDevIns, pThis, pThis->hRIDTimer);
3536 e1kCancelTimer(pDevIns, pThis, pThis->hRADTimer);
3537#endif
3538 E1K_INC_ISTAT_CNT(pThis->uStatIntRDTR);
3539 return e1kRaiseInterrupt(pDevIns, pThis, VINF_IOM_R3_MMIO_WRITE, ICR_RXT0);
3540 }
3541
3542 return VINF_SUCCESS;
3543}
3544
3545DECLINLINE(uint32_t) e1kGetTxLen(PE1KTXDC pTxdc)
3546{
3547 /**
3548 * Make sure TDT won't change during computation. EMT may modify TDT at
3549 * any moment.
3550 */
3551 uint32_t tdt = pTxdc->tdt;
3552 return (pTxdc->tdh > tdt ? pTxdc->tdlen/sizeof(E1KTXDESC) : 0) + tdt - pTxdc->tdh;
3553}
3554
3555#ifdef IN_RING3
3556
3557# ifdef E1K_TX_DELAY
3558/**
3559 * @callback_method_impl{FNTMTIMERDEV, Transmit Delay Timer handler.}
3560 */
3561static DECLCALLBACK(void) e1kR3TxDelayTimer(PPDMDEVINS pDevIns, TMTIMERHANDLE hTimer, void *pvUser)
3562{
3563 PE1KSTATE pThis = (PE1KSTATE)pvUser;
3564 Assert(PDMCritSectIsOwner(&pThis->csTx));
3565 RT_NOREF(hTimer);
3566
3567 E1K_INC_ISTAT_CNT(pThis->uStatTxDelayExp);
3568# ifdef E1K_INT_STATS
3569 uint64_t u64Elapsed = RTTimeNanoTS() - pThis->u64ArmedAt;
3570 if (u64Elapsed > pThis->uStatMaxTxDelay)
3571 pThis->uStatMaxTxDelay = u64Elapsed;
3572# endif
3573 int rc = e1kXmitPending(pDevIns, pThis, false /*fOnWorkerThread*/);
3574 AssertMsg(RT_SUCCESS(rc) || rc == VERR_TRY_AGAIN, ("%Rrc\n", rc));
3575}
3576# endif /* E1K_TX_DELAY */
3577
3578//# ifdef E1K_USE_TX_TIMERS
3579
3580/**
3581 * @callback_method_impl{FNTMTIMERDEV, Transmit Interrupt Delay Timer handler.}
3582 */
3583static DECLCALLBACK(void) e1kR3TxIntDelayTimer(PPDMDEVINS pDevIns, TMTIMERHANDLE hTimer, void *pvUser)
3584{
3585 PE1KSTATE pThis = (PE1KSTATE)pvUser;
3586 Assert(hTimer == pThis->hTIDTimer); RT_NOREF(hTimer);
3587
3588 E1K_INC_ISTAT_CNT(pThis->uStatTID);
3589 /* Cancel absolute delay timer as we have already got attention */
3590# ifndef E1K_NO_TAD
3591 e1kCancelTimer(pDevIns, pThis, pThis->hTADTimer);
3592# endif
3593 e1kRaiseInterrupt(pDevIns, pThis, VERR_IGNORED, ICR_TXDW);
3594}
3595
3596/**
3597 * @callback_method_impl{FNTMTIMERDEV, Transmit Absolute Delay Timer handler.}
3598 */
3599static DECLCALLBACK(void) e1kR3TxAbsDelayTimer(PPDMDEVINS pDevIns, TMTIMERHANDLE hTimer, void *pvUser)
3600{
3601 PE1KSTATE pThis = (PE1KSTATE)pvUser;
3602 Assert(hTimer == pThis->hTADTimer); RT_NOREF(hTimer);
3603
3604 E1K_INC_ISTAT_CNT(pThis->uStatTAD);
3605 /* Cancel interrupt delay timer as we have already got attention */
3606 e1kCancelTimer(pDevIns, pThis, pThis->hTIDTimer);
3607 e1kRaiseInterrupt(pDevIns, pThis, VERR_IGNORED, ICR_TXDW);
3608}
3609
3610//# endif /* E1K_USE_TX_TIMERS */
3611# ifdef E1K_USE_RX_TIMERS
3612
3613/**
3614 * @callback_method_impl{FNTMTIMERDEV, Receive Interrupt Delay Timer handler.}
3615 */
3616static DECLCALLBACK(void) e1kR3RxIntDelayTimer(PPDMDEVINS pDevIns, TMTIMERHANDLE hTimer, void *pvUser)
3617{
3618 PE1KSTATE pThis = (PE1KSTATE)pvUser;
3619 Assert(hTimer == pThis->hRIDTimer); RT_NOREF(hTimer);
3620
3621 E1K_INC_ISTAT_CNT(pThis->uStatRID);
3622 /* Cancel absolute delay timer as we have already got attention */
3623 e1kCancelTimer(pDevIns, pThis, pThis->hRADTimer);
3624 e1kRaiseInterrupt(pDevIns, pThis, VERR_IGNORED, ICR_RXT0);
3625}
3626
3627/**
3628 * @callback_method_impl{FNTMTIMERDEV, Receive Absolute Delay Timer handler.}
3629 */
3630static DECLCALLBACK(void) e1kR3RxAbsDelayTimer(PPDMDEVINS pDevIns, TMTIMERHANDLE hTimer, void *pvUser)
3631{
3632 PE1KSTATE pThis = (PE1KSTATE)pvUser;
3633 Assert(hTimer == pThis->hRADTimer); RT_NOREF(hTimer);
3634
3635 E1K_INC_ISTAT_CNT(pThis->uStatRAD);
3636 /* Cancel interrupt delay timer as we have already got attention */
3637 e1kCancelTimer(pDevIns, pThis, pThis->hRIDTimer);
3638 e1kRaiseInterrupt(pDevIns, pThis, VERR_IGNORED, ICR_RXT0);
3639}
3640
3641# endif /* E1K_USE_RX_TIMERS */
3642
3643/**
3644 * @callback_method_impl{FNTMTIMERDEV, Late Interrupt Timer handler.}
3645 */
3646static DECLCALLBACK(void) e1kR3LateIntTimer(PPDMDEVINS pDevIns, TMTIMERHANDLE hTimer, void *pvUser)
3647{
3648 PE1KSTATE pThis = (PE1KSTATE)pvUser;
3649 Assert(hTimer == pThis->hIntTimer); RT_NOREF(hTimer);
3650 RT_NOREF(hTimer);
3651
3652 STAM_PROFILE_ADV_START(&pThis->StatLateIntTimer, a);
3653 STAM_COUNTER_INC(&pThis->StatLateInts);
3654 E1K_INC_ISTAT_CNT(pThis->uStatIntLate);
3655# if 0
3656 if (pThis->iStatIntLost > -100)
3657 pThis->iStatIntLost--;
3658# endif
3659 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, 0);
3660 STAM_PROFILE_ADV_STOP(&pThis->StatLateIntTimer, a);
3661}
3662
3663/**
3664 * @callback_method_impl{FNTMTIMERDEV, Link Up Timer handler.}
3665 */
3666static DECLCALLBACK(void) e1kR3LinkUpTimer(PPDMDEVINS pDevIns, TMTIMERHANDLE hTimer, void *pvUser)
3667{
3668 PE1KSTATE pThis = (PE1KSTATE)pvUser;
3669 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
3670 Assert(hTimer == pThis->hLUTimer); RT_NOREF(hTimer);
3671
3672 /*
3673 * This can happen if we set the link status to down when the Link up timer was
3674 * already armed (shortly after e1kLoadDone() or when the cable was disconnected
3675 * and connect+disconnect the cable very quick. Moreover, 82543GC triggers LSC
3676 * on reset even if the cable is unplugged (see @bugref{8942}).
3677 */
3678 if (e1kIsConnected(pDevIns))
3679 {
3680 /* 82543GC does not have an internal PHY */
3681 if (pThis->eChip == E1K_CHIP_82543GC || (CTRL & CTRL_SLU))
3682 e1kR3LinkUp(pDevIns, pThis, pThisCC);
3683 }
3684# ifdef E1K_LSC_ON_RESET
3685 else if (pThis->eChip == E1K_CHIP_82543GC)
3686 e1kR3LinkDown(pDevIns, pThis, pThisCC);
3687# endif /* E1K_LSC_ON_RESET */
3688}
3689
3690#endif /* IN_RING3 */
3691
3692/**
3693 * Sets up the GSO context according to the TSE new context descriptor.
3694 *
3695 * @param pGso The GSO context to setup.
3696 * @param pCtx The context descriptor.
3697 */
3698DECLINLINE(void) e1kSetupGsoCtx(PPDMNETWORKGSO pGso, E1KTXCTX const *pCtx)
3699{
3700 pGso->u8Type = PDMNETWORKGSOTYPE_INVALID;
3701
3702 /*
3703 * See if the context descriptor describes something that could be TCP or
3704 * UDP over IPv[46].
3705 */
3706 /* Check the header ordering and spacing: 1. Ethernet, 2. IP, 3. TCP/UDP. */
3707 if (RT_UNLIKELY( pCtx->ip.u8CSS < sizeof(RTNETETHERHDR) ))
3708 {
3709 E1kLog(("e1kSetupGsoCtx: IPCSS=%#x\n", pCtx->ip.u8CSS));
3710 return;
3711 }
3712 if (RT_UNLIKELY( pCtx->tu.u8CSS < (size_t)pCtx->ip.u8CSS + (pCtx->dw2.fIP ? RTNETIPV4_MIN_LEN : RTNETIPV6_MIN_LEN) ))
3713 {
3714 E1kLog(("e1kSetupGsoCtx: TUCSS=%#x\n", pCtx->tu.u8CSS));
3715 return;
3716 }
3717 if (RT_UNLIKELY( pCtx->dw2.fTCP
3718 ? pCtx->dw3.u8HDRLEN < (size_t)pCtx->tu.u8CSS + RTNETTCP_MIN_LEN
3719 : pCtx->dw3.u8HDRLEN != (size_t)pCtx->tu.u8CSS + RTNETUDP_MIN_LEN ))
3720 {
3721 E1kLog(("e1kSetupGsoCtx: HDRLEN=%#x TCP=%d\n", pCtx->dw3.u8HDRLEN, pCtx->dw2.fTCP));
3722 return;
3723 }
3724
3725 /* The end of the TCP/UDP checksum should stop at the end of the packet or at least after the headers. */
3726 if (RT_UNLIKELY( pCtx->tu.u16CSE > 0 && pCtx->tu.u16CSE <= pCtx->dw3.u8HDRLEN ))
3727 {
3728 E1kLog(("e1kSetupGsoCtx: TUCSE=%#x HDRLEN=%#x\n", pCtx->tu.u16CSE, pCtx->dw3.u8HDRLEN));
3729 return;
3730 }
3731
3732 /* IPv4 checksum offset. */
3733 if (RT_UNLIKELY( pCtx->dw2.fIP && (size_t)pCtx->ip.u8CSO - pCtx->ip.u8CSS != RT_UOFFSETOF(RTNETIPV4, ip_sum) ))
3734 {
3735 E1kLog(("e1kSetupGsoCtx: IPCSO=%#x IPCSS=%#x\n", pCtx->ip.u8CSO, pCtx->ip.u8CSS));
3736 return;
3737 }
3738
3739 /* TCP/UDP checksum offsets. */
3740 if (RT_UNLIKELY( (size_t)pCtx->tu.u8CSO - pCtx->tu.u8CSS
3741 != ( pCtx->dw2.fTCP
3742 ? RT_UOFFSETOF(RTNETTCP, th_sum)
3743 : RT_UOFFSETOF(RTNETUDP, uh_sum) ) ))
3744 {
3745 E1kLog(("e1kSetupGsoCtx: TUCSO=%#x TUCSS=%#x TCP=%d\n", pCtx->ip.u8CSO, pCtx->ip.u8CSS, pCtx->dw2.fTCP));
3746 return;
3747 }
3748
3749 /*
3750 * Because of internal networking using a 16-bit size field for GSO context
3751 * plus frame, we have to make sure we don't exceed this.
3752 */
3753 if (RT_UNLIKELY( pCtx->dw3.u8HDRLEN + pCtx->dw2.u20PAYLEN > VBOX_MAX_GSO_SIZE ))
3754 {
3755 E1kLog(("e1kSetupGsoCtx: HDRLEN(=%#x) + PAYLEN(=%#x) = %#x, max is %#x\n",
3756 pCtx->dw3.u8HDRLEN, pCtx->dw2.u20PAYLEN, pCtx->dw3.u8HDRLEN + pCtx->dw2.u20PAYLEN, VBOX_MAX_GSO_SIZE));
3757 return;
3758 }
3759
3760 /*
3761 * We're good for now - we'll do more checks when seeing the data.
3762 * So, figure the type of offloading and setup the context.
3763 */
3764 if (pCtx->dw2.fIP)
3765 {
3766 if (pCtx->dw2.fTCP)
3767 {
3768 pGso->u8Type = PDMNETWORKGSOTYPE_IPV4_TCP;
3769 pGso->cbHdrsSeg = pCtx->dw3.u8HDRLEN;
3770 }
3771 else
3772 {
3773 pGso->u8Type = PDMNETWORKGSOTYPE_IPV4_UDP;
3774 pGso->cbHdrsSeg = pCtx->tu.u8CSS; /* IP header only */
3775 }
3776 /** @todo Detect IPv4-IPv6 tunneling (need test setup since linux doesn't do
3777 * this yet it seems)... */
3778 }
3779 else
3780 {
3781 pGso->cbHdrsSeg = pCtx->dw3.u8HDRLEN; /** @todo IPv6 UFO */
3782 if (pCtx->dw2.fTCP)
3783 pGso->u8Type = PDMNETWORKGSOTYPE_IPV6_TCP;
3784 else
3785 pGso->u8Type = PDMNETWORKGSOTYPE_IPV6_UDP;
3786 }
3787 pGso->offHdr1 = pCtx->ip.u8CSS;
3788 pGso->offHdr2 = pCtx->tu.u8CSS;
3789 pGso->cbHdrsTotal = pCtx->dw3.u8HDRLEN;
3790 pGso->cbMaxSeg = pCtx->dw3.u16MSS + (pGso->u8Type == PDMNETWORKGSOTYPE_IPV4_UDP ? pGso->offHdr2 : 0);
3791 Assert(PDMNetGsoIsValid(pGso, sizeof(*pGso), pGso->cbMaxSeg * 5));
3792 E1kLog2(("e1kSetupGsoCtx: mss=%#x hdr=%#x hdrseg=%#x hdr1=%#x hdr2=%#x %s\n",
3793 pGso->cbMaxSeg, pGso->cbHdrsTotal, pGso->cbHdrsSeg, pGso->offHdr1, pGso->offHdr2, PDMNetGsoTypeName((PDMNETWORKGSOTYPE)pGso->u8Type) ));
3794}
3795
3796/**
3797 * Checks if we can use GSO processing for the current TSE frame.
3798 *
3799 * @param pThis The device state structure.
3800 * @param pGso The GSO context.
3801 * @param pData The first data descriptor of the frame.
3802 * @param pCtx The TSO context descriptor.
3803 */
3804DECLINLINE(bool) e1kCanDoGso(PE1KSTATE pThis, PCPDMNETWORKGSO pGso, E1KTXDAT const *pData, E1KTXCTX const *pCtx)
3805{
3806 if (!pData->cmd.fTSE)
3807 {
3808 E1kLog2(("e1kCanDoGso: !TSE\n"));
3809 return false;
3810 }
3811 if (pData->cmd.fVLE) /** @todo VLAN tagging. */
3812 {
3813 E1kLog(("e1kCanDoGso: VLE\n"));
3814 return false;
3815 }
3816 if (RT_UNLIKELY(!pThis->fGSOEnabled))
3817 {
3818 E1kLog3(("e1kCanDoGso: GSO disabled via CFGM\n"));
3819 return false;
3820 }
3821
3822 switch ((PDMNETWORKGSOTYPE)pGso->u8Type)
3823 {
3824 case PDMNETWORKGSOTYPE_IPV4_TCP:
3825 case PDMNETWORKGSOTYPE_IPV4_UDP:
3826 if (!pData->dw3.fIXSM)
3827 {
3828 E1kLog(("e1kCanDoGso: !IXSM (IPv4)\n"));
3829 return false;
3830 }
3831 if (!pData->dw3.fTXSM)
3832 {
3833 E1kLog(("e1kCanDoGso: !TXSM (IPv4)\n"));
3834 return false;
3835 }
3836 /** @todo what more check should we perform here? Ethernet frame type? */
3837 E1kLog2(("e1kCanDoGso: OK, IPv4\n"));
3838 return true;
3839
3840 case PDMNETWORKGSOTYPE_IPV6_TCP:
3841 case PDMNETWORKGSOTYPE_IPV6_UDP:
3842 if (pData->dw3.fIXSM && pCtx->ip.u8CSO)
3843 {
3844 E1kLog(("e1kCanDoGso: IXSM (IPv6)\n"));
3845 return false;
3846 }
3847 if (!pData->dw3.fTXSM)
3848 {
3849 E1kLog(("e1kCanDoGso: TXSM (IPv6)\n"));
3850 return false;
3851 }
3852 /** @todo what more check should we perform here? Ethernet frame type? */
3853 E1kLog2(("e1kCanDoGso: OK, IPv4\n"));
3854 return true;
3855
3856 default:
3857 Assert(pGso->u8Type == PDMNETWORKGSOTYPE_INVALID);
3858 E1kLog2(("e1kCanDoGso: e1kSetupGsoCtx failed\n"));
3859 return false;
3860 }
3861}
3862
3863/**
3864 * Frees the current xmit buffer.
3865 *
3866 * @param pThis The device state structure.
3867 */
3868static void e1kXmitFreeBuf(PE1KSTATE pThis, PE1KSTATECC pThisCC)
3869{
3870 PPDMSCATTERGATHER pSg = pThisCC->CTX_SUFF(pTxSg);
3871 if (pSg)
3872 {
3873 pThisCC->CTX_SUFF(pTxSg) = NULL;
3874
3875 if (pSg->pvAllocator != pThis)
3876 {
3877 PPDMINETWORKUP pDrv = pThisCC->CTX_SUFF(pDrv);
3878 if (pDrv)
3879 pDrv->pfnFreeBuf(pDrv, pSg);
3880 }
3881 else
3882 {
3883 /* loopback */
3884 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3885 Assert(pSg->fFlags == (PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3));
3886 pSg->fFlags = 0;
3887 pSg->pvAllocator = NULL;
3888 }
3889 }
3890}
3891
3892#ifndef E1K_WITH_TXD_CACHE
3893/**
3894 * Allocates an xmit buffer.
3895 *
3896 * @returns See PDMINETWORKUP::pfnAllocBuf.
3897 * @param pThis The device state structure.
3898 * @param cbMin The minimum frame size.
3899 * @param fExactSize Whether cbMin is exact or if we have to max it
3900 * out to the max MTU size.
3901 * @param fGso Whether this is a GSO frame or not.
3902 */
3903DECLINLINE(int) e1kXmitAllocBuf(PE1KSTATE pThis, PE1KSTATECC pThisCC, size_t cbMin, bool fExactSize, bool fGso)
3904{
3905 /* Adjust cbMin if necessary. */
3906 if (!fExactSize)
3907 cbMin = RT_MAX(cbMin, E1K_MAX_TX_PKT_SIZE);
3908
3909 /* Deal with existing buffer (descriptor screw up, reset, etc). */
3910 if (RT_UNLIKELY(pThisCC->CTX_SUFF(pTxSg)))
3911 e1kXmitFreeBuf(pThis, pThisCC);
3912 Assert(pThisCC->CTX_SUFF(pTxSg) == NULL);
3913
3914 /*
3915 * Allocate the buffer.
3916 */
3917 PPDMSCATTERGATHER pSg;
3918 if (RT_LIKELY(GET_BITS(RCTL, LBM) != RCTL_LBM_TCVR))
3919 {
3920 PPDMINETWORKUP pDrv = pThisCC->CTX_SUFF(pDrv);
3921 if (RT_UNLIKELY(!pDrv))
3922 return VERR_NET_DOWN;
3923 int rc = pDrv->pfnAllocBuf(pDrv, cbMin, fGso ? &pThis->GsoCtx : NULL, &pSg);
3924 if (RT_FAILURE(rc))
3925 {
3926 /* Suspend TX as we are out of buffers atm */
3927 STATUS |= STATUS_TXOFF;
3928 return rc;
3929 }
3930 }
3931 else
3932 {
3933 /* Create a loopback using the fallback buffer and preallocated SG. */
3934 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3935 pSg = &pThis->uTxFallback.Sg;
3936 pSg->fFlags = PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3;
3937 pSg->cbUsed = 0;
3938 pSg->cbAvailable = 0;
3939 pSg->pvAllocator = pThis;
3940 pSg->pvUser = NULL; /* No GSO here. */
3941 pSg->cSegs = 1;
3942 pSg->aSegs[0].pvSeg = pThis->aTxPacketFallback;
3943 pSg->aSegs[0].cbSeg = sizeof(pThis->aTxPacketFallback);
3944 }
3945
3946 pThisCC->CTX_SUFF(pTxSg) = pSg;
3947 return VINF_SUCCESS;
3948}
3949#else /* E1K_WITH_TXD_CACHE */
3950/**
3951 * Allocates an xmit buffer.
3952 *
3953 * @returns See PDMINETWORKUP::pfnAllocBuf.
3954 * @param pThis The device state structure.
3955 * @param cbMin The minimum frame size.
3956 * @param fExactSize Whether cbMin is exact or if we have to max it
3957 * out to the max MTU size.
3958 * @param fGso Whether this is a GSO frame or not.
3959 */
3960DECLINLINE(int) e1kXmitAllocBuf(PE1KSTATE pThis, PE1KSTATECC pThisCC, bool fGso)
3961{
3962 /* Deal with existing buffer (descriptor screw up, reset, etc). */
3963 if (RT_UNLIKELY(pThisCC->CTX_SUFF(pTxSg)))
3964 e1kXmitFreeBuf(pThis, pThisCC);
3965 Assert(pThisCC->CTX_SUFF(pTxSg) == NULL);
3966
3967 /*
3968 * Allocate the buffer.
3969 */
3970 PPDMSCATTERGATHER pSg;
3971 if (RT_LIKELY(GET_BITS(RCTL, LBM) != RCTL_LBM_TCVR))
3972 {
3973 if (pThis->cbTxAlloc == 0)
3974 {
3975 /* Zero packet, no need for the buffer */
3976 return VINF_SUCCESS;
3977 }
3978 if (fGso && pThis->GsoCtx.u8Type == PDMNETWORKGSOTYPE_INVALID)
3979 {
3980 E1kLog3(("Invalid GSO context, won't allocate this packet, cb=%u %s%s\n",
3981 pThis->cbTxAlloc, pThis->fVTag ? "VLAN " : "", pThis->fGSO ? "GSO " : ""));
3982 /* No valid GSO context is available, ignore this packet. */
3983 pThis->cbTxAlloc = 0;
3984 return VINF_SUCCESS;
3985 }
3986
3987 PPDMINETWORKUP pDrv = pThisCC->CTX_SUFF(pDrv);
3988 if (RT_UNLIKELY(!pDrv))
3989 return VERR_NET_DOWN;
3990 int rc = pDrv->pfnAllocBuf(pDrv, pThis->cbTxAlloc, fGso ? &pThis->GsoCtx : NULL, &pSg);
3991 if (RT_FAILURE(rc))
3992 {
3993 /* Suspend TX as we are out of buffers atm */
3994 STATUS |= STATUS_TXOFF;
3995 return rc;
3996 }
3997 E1kLog3(("%s Allocated buffer for TX packet: cb=%u %s%s\n",
3998 pThis->szPrf, pThis->cbTxAlloc,
3999 pThis->fVTag ? "VLAN " : "",
4000 pThis->fGSO ? "GSO " : ""));
4001 }
4002 else
4003 {
4004 /* Create a loopback using the fallback buffer and preallocated SG. */
4005 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
4006 pSg = &pThis->uTxFallback.Sg;
4007 pSg->fFlags = PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3;
4008 pSg->cbUsed = 0;
4009 pSg->cbAvailable = sizeof(pThis->aTxPacketFallback);
4010 pSg->pvAllocator = pThis;
4011 pSg->pvUser = NULL; /* No GSO here. */
4012 pSg->cSegs = 1;
4013 pSg->aSegs[0].pvSeg = pThis->aTxPacketFallback;
4014 pSg->aSegs[0].cbSeg = sizeof(pThis->aTxPacketFallback);
4015 }
4016 pThis->cbTxAlloc = 0;
4017
4018 pThisCC->CTX_SUFF(pTxSg) = pSg;
4019 return VINF_SUCCESS;
4020}
4021#endif /* E1K_WITH_TXD_CACHE */
4022
4023/**
4024 * Checks if it's a GSO buffer or not.
4025 *
4026 * @returns true / false.
4027 * @param pTxSg The scatter / gather buffer.
4028 */
4029DECLINLINE(bool) e1kXmitIsGsoBuf(PDMSCATTERGATHER const *pTxSg)
4030{
4031#if 0
4032 if (!pTxSg)
4033 E1kLog(("e1kXmitIsGsoBuf: pTxSG is NULL\n"));
4034 if (pTxSg && pTxSg->pvUser)
4035 E1kLog(("e1kXmitIsGsoBuf: pvUser is NULL\n"));
4036#endif
4037 return pTxSg && pTxSg->pvUser /* GSO indicator */;
4038}
4039
4040#ifndef E1K_WITH_TXD_CACHE
4041/**
4042 * Load transmit descriptor from guest memory.
4043 *
4044 * @param pDevIns The device instance.
4045 * @param pDesc Pointer to descriptor union.
4046 * @param addr Physical address in guest context.
4047 * @thread E1000_TX
4048 */
4049DECLINLINE(void) e1kLoadDesc(PPDMDEVINS pDevIns, E1KTXDESC *pDesc, RTGCPHYS addr)
4050{
4051 PDMDevHlpPCIPhysRead(pDevIns, addr, pDesc, sizeof(E1KTXDESC));
4052}
4053#else /* E1K_WITH_TXD_CACHE */
4054/**
4055 * Load transmit descriptors from guest memory.
4056 *
4057 * We need two physical reads in case the tail wrapped around the end of TX
4058 * descriptor ring.
4059 *
4060 * @returns the actual number of descriptors fetched.
4061 * @param pDevIns The device instance.
4062 * @param pThis The device state structure.
4063 * @thread E1000_TX
4064 */
4065DECLINLINE(unsigned) e1kTxDLoadMore(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KTXDC pTxdc)
4066{
4067 Assert(pThis->iTxDCurrent == 0);
4068 /* We've already loaded pThis->nTxDFetched descriptors past TDH. */
4069 unsigned nDescsAvailable = e1kGetTxLen(pTxdc) - pThis->nTxDFetched;
4070 /* The following two lines ensure that pThis->nTxDFetched never overflows. */
4071 AssertCompile(E1K_TXD_CACHE_SIZE < (256 * sizeof(pThis->nTxDFetched)));
4072 unsigned nDescsToFetch = RT_MIN(nDescsAvailable, E1K_TXD_CACHE_SIZE - pThis->nTxDFetched);
4073 unsigned nDescsTotal = pTxdc->tdlen / sizeof(E1KTXDESC);
4074 Assert(nDescsTotal != 0);
4075 if (nDescsTotal == 0)
4076 return 0;
4077 unsigned nFirstNotLoaded = (pTxdc->tdh + pThis->nTxDFetched) % nDescsTotal;
4078 unsigned nDescsInSingleRead = RT_MIN(nDescsToFetch, nDescsTotal - nFirstNotLoaded);
4079 E1kLog3(("%s e1kTxDLoadMore: nDescsAvailable=%u nDescsToFetch=%u nDescsTotal=%u nFirstNotLoaded=0x%x nDescsInSingleRead=%u\n",
4080 pThis->szPrf, nDescsAvailable, nDescsToFetch, nDescsTotal,
4081 nFirstNotLoaded, nDescsInSingleRead));
4082 if (nDescsToFetch == 0)
4083 return 0;
4084 E1KTXDESC* pFirstEmptyDesc = &pThis->aTxDescriptors[pThis->nTxDFetched];
4085 PDMDevHlpPCIPhysRead(pDevIns,
4086 ((uint64_t)TDBAH << 32) + TDBAL + nFirstNotLoaded * sizeof(E1KTXDESC),
4087 pFirstEmptyDesc, nDescsInSingleRead * sizeof(E1KTXDESC));
4088 E1kLog3(("%s Fetched %u TX descriptors at %08x%08x(0x%x), TDLEN=%08x, TDH=%08x, TDT=%08x\n",
4089 pThis->szPrf, nDescsInSingleRead,
4090 TDBAH, TDBAL + pTxdc->tdh * sizeof(E1KTXDESC),
4091 nFirstNotLoaded, pTxdc->tdlen, pTxdc->tdh, pTxdc->tdt));
4092 if (nDescsToFetch > nDescsInSingleRead)
4093 {
4094 PDMDevHlpPCIPhysRead(pDevIns,
4095 ((uint64_t)TDBAH << 32) + TDBAL,
4096 pFirstEmptyDesc + nDescsInSingleRead,
4097 (nDescsToFetch - nDescsInSingleRead) * sizeof(E1KTXDESC));
4098 E1kLog3(("%s Fetched %u TX descriptors at %08x%08x\n",
4099 pThis->szPrf, nDescsToFetch - nDescsInSingleRead,
4100 TDBAH, TDBAL));
4101 }
4102 pThis->nTxDFetched += (uint8_t)nDescsToFetch;
4103 return nDescsToFetch;
4104}
4105
4106/**
4107 * Load transmit descriptors from guest memory only if there are no loaded
4108 * descriptors.
4109 *
4110 * @returns true if there are descriptors in cache.
4111 * @param pDevIns The device instance.
4112 * @param pThis The device state structure.
4113 * @thread E1000_TX
4114 */
4115DECLINLINE(bool) e1kTxDLazyLoad(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KTXDC pTxdc)
4116{
4117 if (pThis->nTxDFetched == 0)
4118 return e1kTxDLoadMore(pDevIns, pThis, pTxdc) != 0;
4119 return true;
4120}
4121#endif /* E1K_WITH_TXD_CACHE */
4122
4123/**
4124 * Write back transmit descriptor to guest memory.
4125 *
4126 * @param pDevIns The device instance.
4127 * @param pThis The device state structure.
4128 * @param pDesc Pointer to descriptor union.
4129 * @param addr Physical address in guest context.
4130 * @thread E1000_TX
4131 */
4132DECLINLINE(void) e1kWriteBackDesc(PPDMDEVINS pDevIns, PE1KSTATE pThis, E1KTXDESC *pDesc, RTGCPHYS addr)
4133{
4134 /* Only the last half of the descriptor has to be written back. */
4135 e1kPrintTDesc(pThis, pDesc, "^^^");
4136 PDMDevHlpPCIPhysWrite(pDevIns, addr, pDesc, sizeof(E1KTXDESC));
4137}
4138
4139/**
4140 * Transmit complete frame.
4141 *
4142 * @remarks We skip the FCS since we're not responsible for sending anything to
4143 * a real ethernet wire.
4144 *
4145 * @param pDevIns The device instance.
4146 * @param pThis The device state structure.
4147 * @param pThisCC The current context instance data.
4148 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4149 * @thread E1000_TX
4150 */
4151static void e1kTransmitFrame(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KSTATECC pThisCC, bool fOnWorkerThread)
4152{
4153 PPDMSCATTERGATHER pSg = pThisCC->CTX_SUFF(pTxSg);
4154 uint32_t cbFrame = pSg ? (uint32_t)pSg->cbUsed : 0;
4155 Assert(!pSg || pSg->cSegs == 1);
4156
4157 if (cbFrame > 70) /* unqualified guess */
4158 pThis->led.Asserted.s.fWriting = pThis->led.Actual.s.fWriting = 1;
4159
4160#ifdef E1K_INT_STATS
4161 if (cbFrame <= 1514)
4162 E1K_INC_ISTAT_CNT(pThis->uStatTx1514);
4163 else if (cbFrame <= 2962)
4164 E1K_INC_ISTAT_CNT(pThis->uStatTx2962);
4165 else if (cbFrame <= 4410)
4166 E1K_INC_ISTAT_CNT(pThis->uStatTx4410);
4167 else if (cbFrame <= 5858)
4168 E1K_INC_ISTAT_CNT(pThis->uStatTx5858);
4169 else if (cbFrame <= 7306)
4170 E1K_INC_ISTAT_CNT(pThis->uStatTx7306);
4171 else if (cbFrame <= 8754)
4172 E1K_INC_ISTAT_CNT(pThis->uStatTx8754);
4173 else if (cbFrame <= 16384)
4174 E1K_INC_ISTAT_CNT(pThis->uStatTx16384);
4175 else if (cbFrame <= 32768)
4176 E1K_INC_ISTAT_CNT(pThis->uStatTx32768);
4177 else
4178 E1K_INC_ISTAT_CNT(pThis->uStatTxLarge);
4179#endif /* E1K_INT_STATS */
4180
4181 /* Add VLAN tag */
4182 if (cbFrame > 12 && pThis->fVTag)
4183 {
4184 E1kLog3(("%s Inserting VLAN tag %08x\n",
4185 pThis->szPrf, RT_BE2H_U16((uint16_t)VET) | (RT_BE2H_U16(pThis->u16VTagTCI) << 16)));
4186 memmove((uint8_t*)pSg->aSegs[0].pvSeg + 16, (uint8_t*)pSg->aSegs[0].pvSeg + 12, cbFrame - 12);
4187 *((uint32_t*)pSg->aSegs[0].pvSeg + 3) = RT_BE2H_U16((uint16_t)VET) | (RT_BE2H_U16(pThis->u16VTagTCI) << 16);
4188 pSg->cbUsed += 4;
4189 cbFrame += 4;
4190 Assert(pSg->cbUsed == cbFrame);
4191 Assert(pSg->cbUsed <= pSg->cbAvailable);
4192 }
4193/* E1kLog2(("%s < < < Outgoing packet. Dump follows: > > >\n"
4194 "%.*Rhxd\n"
4195 "%s < < < < < < < < < < < < < End of dump > > > > > > > > > > > >\n",
4196 pThis->szPrf, cbFrame, pSg->aSegs[0].pvSeg, pThis->szPrf));*/
4197
4198 /* Update the stats */
4199 E1K_INC_CNT32(TPT);
4200 E1K_ADD_CNT64(TOTL, TOTH, cbFrame);
4201 E1K_INC_CNT32(GPTC);
4202 if (pSg && e1kIsBroadcast(pSg->aSegs[0].pvSeg))
4203 E1K_INC_CNT32(BPTC);
4204 else if (pSg && e1kIsMulticast(pSg->aSegs[0].pvSeg))
4205 E1K_INC_CNT32(MPTC);
4206 /* Update octet transmit counter */
4207 E1K_ADD_CNT64(GOTCL, GOTCH, cbFrame);
4208 if (pThisCC->CTX_SUFF(pDrv))
4209 STAM_REL_COUNTER_ADD(&pThis->StatTransmitBytes, cbFrame);
4210 if (cbFrame == 64)
4211 E1K_INC_CNT32(PTC64);
4212 else if (cbFrame < 128)
4213 E1K_INC_CNT32(PTC127);
4214 else if (cbFrame < 256)
4215 E1K_INC_CNT32(PTC255);
4216 else if (cbFrame < 512)
4217 E1K_INC_CNT32(PTC511);
4218 else if (cbFrame < 1024)
4219 E1K_INC_CNT32(PTC1023);
4220 else
4221 E1K_INC_CNT32(PTC1522);
4222
4223 E1K_INC_ISTAT_CNT(pThis->uStatTxFrm);
4224
4225 /*
4226 * Dump and send the packet.
4227 */
4228 int rc = VERR_NET_DOWN;
4229 if (pSg && pSg->pvAllocator != pThis)
4230 {
4231 e1kPacketDump(pDevIns, pThis, (uint8_t const *)pSg->aSegs[0].pvSeg, cbFrame, "--> Outgoing");
4232
4233 pThisCC->CTX_SUFF(pTxSg) = NULL;
4234 PPDMINETWORKUP pDrv = pThisCC->CTX_SUFF(pDrv);
4235 if (pDrv)
4236 {
4237 /* Release critical section to avoid deadlock in CanReceive */
4238 //e1kCsLeave(pThis);
4239 STAM_PROFILE_START(&pThis->CTX_SUFF_Z(StatTransmitSend), a);
4240 rc = pDrv->pfnSendBuf(pDrv, pSg, fOnWorkerThread);
4241 STAM_PROFILE_STOP(&pThis->CTX_SUFF_Z(StatTransmitSend), a);
4242 //e1kCsEnter(pThis, RT_SRC_POS);
4243 }
4244 }
4245 else if (pSg)
4246 {
4247 Assert(pSg->aSegs[0].pvSeg == pThis->aTxPacketFallback);
4248 e1kPacketDump(pDevIns, pThis, (uint8_t const *)pSg->aSegs[0].pvSeg, cbFrame, "--> Loopback");
4249
4250 /** @todo do we actually need to check that we're in loopback mode here? */
4251 if (GET_BITS(RCTL, LBM) == RCTL_LBM_TCVR)
4252 {
4253 E1KRXDST status;
4254 RT_ZERO(status);
4255 status.fPIF = true;
4256 e1kHandleRxPacket(pDevIns, pThis, pSg->aSegs[0].pvSeg, cbFrame, status);
4257 rc = VINF_SUCCESS;
4258 }
4259 e1kXmitFreeBuf(pThis, pThisCC);
4260 }
4261 else
4262 rc = VERR_NET_DOWN;
4263 if (RT_FAILURE(rc))
4264 {
4265 E1kLogRel(("E1000: ERROR! pfnSend returned %Rrc\n", rc));
4266 /** @todo handle VERR_NET_DOWN and VERR_NET_NO_BUFFER_SPACE. Signal error ? */
4267 }
4268
4269 pThis->led.Actual.s.fWriting = 0;
4270}
4271
4272/**
4273 * Compute and write internet checksum (e1kCSum16) at the specified offset.
4274 *
4275 * @param pThis The device state structure.
4276 * @param pPkt Pointer to the packet.
4277 * @param u16PktLen Total length of the packet.
4278 * @param cso Offset in packet to write checksum at.
4279 * @param css Offset in packet to start computing
4280 * checksum from.
4281 * @param cse Offset in packet to stop computing
4282 * checksum at.
4283 * @param fUdp Replace 0 checksum with all 1s.
4284 * @thread E1000_TX
4285 */
4286static void e1kInsertChecksum(PE1KSTATE pThis, uint8_t *pPkt, uint16_t u16PktLen, uint8_t cso, uint8_t css, uint16_t cse, bool fUdp = false)
4287{
4288 RT_NOREF1(pThis);
4289
4290 if (css >= u16PktLen)
4291 {
4292 E1kLog2(("%s css(%X) is greater than packet length-1(%X), checksum is not inserted\n",
4293 pThis->szPrf, cso, u16PktLen));
4294 return;
4295 }
4296
4297 if (cso >= u16PktLen - 1)
4298 {
4299 E1kLog2(("%s cso(%X) is greater than packet length-2(%X), checksum is not inserted\n",
4300 pThis->szPrf, cso, u16PktLen));
4301 return;
4302 }
4303
4304 if (cse == 0 || cse >= u16PktLen)
4305 cse = u16PktLen - 1;
4306 else if (cse < css)
4307 {
4308 E1kLog2(("%s css(%X) is greater than cse(%X), checksum is not inserted\n",
4309 pThis->szPrf, css, cse));
4310 return;
4311 }
4312
4313 uint16_t u16ChkSum = e1kCSum16(pPkt + css, cse - css + 1);
4314 if (fUdp && u16ChkSum == 0)
4315 u16ChkSum = ~u16ChkSum; /* 0 means no checksum computed in case of UDP (see @bugref{9883}) */
4316 E1kLog2(("%s Inserting csum: %04X at %02X, old value: %04X\n", pThis->szPrf,
4317 u16ChkSum, cso, *(uint16_t*)(pPkt + cso)));
4318 *(uint16_t*)(pPkt + cso) = u16ChkSum;
4319}
4320
4321/**
4322 * Add a part of descriptor's buffer to transmit frame.
4323 *
4324 * @remarks data.u64BufAddr is used unconditionally for both data
4325 * and legacy descriptors since it is identical to
4326 * legacy.u64BufAddr.
4327 *
4328 * @param pDevIns The device instance.
4329 * @param pThis The device state structure.
4330 * @param pDesc Pointer to the descriptor to transmit.
4331 * @param u16Len Length of buffer to the end of segment.
4332 * @param fSend Force packet sending.
4333 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4334 * @thread E1000_TX
4335 */
4336#ifndef E1K_WITH_TXD_CACHE
4337static void e1kFallbackAddSegment(PPDMDEVINS pDevIns, PE1KSTATE pThis, RTGCPHYS PhysAddr, uint16_t u16Len, bool fSend, bool fOnWorkerThread)
4338{
4339 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
4340 /* TCP header being transmitted */
4341 struct E1kTcpHeader *pTcpHdr = (struct E1kTcpHeader *)(pThis->aTxPacketFallback + pThis->contextTSE.tu.u8CSS);
4342 /* IP header being transmitted */
4343 struct E1kIpHeader *pIpHdr = (struct E1kIpHeader *)(pThis->aTxPacketFallback + pThis->contextTSE.ip.u8CSS);
4344
4345 E1kLog3(("%s e1kFallbackAddSegment: Length=%x, remaining payload=%x, header=%x, send=%RTbool\n",
4346 pThis->szPrf, u16Len, pThis->u32PayRemain, pThis->u16HdrRemain, fSend));
4347 Assert(pThis->u32PayRemain + pThis->u16HdrRemain > 0);
4348
4349 PDMDevHlpPCIPhysRead(pDevIns, PhysAddr, pThis->aTxPacketFallback + pThis->u16TxPktLen, u16Len);
4350 E1kLog3(("%s Dump of the segment:\n"
4351 "%.*Rhxd\n"
4352 "%s --- End of dump ---\n",
4353 pThis->szPrf, u16Len, pThis->aTxPacketFallback + pThis->u16TxPktLen, pThis->szPrf));
4354 pThis->u16TxPktLen += u16Len;
4355 E1kLog3(("%s e1kFallbackAddSegment: pThis->u16TxPktLen=%x\n",
4356 pThis->szPrf, pThis->u16TxPktLen));
4357 if (pThis->u16HdrRemain > 0)
4358 {
4359 /* The header was not complete, check if it is now */
4360 if (u16Len >= pThis->u16HdrRemain)
4361 {
4362 /* The rest is payload */
4363 u16Len -= pThis->u16HdrRemain;
4364 pThis->u16HdrRemain = 0;
4365 /* Save partial checksum and flags */
4366 pThis->u32SavedCsum = pTcpHdr->chksum;
4367 pThis->u16SavedFlags = pTcpHdr->hdrlen_flags;
4368 /* Clear FIN and PSH flags now and set them only in the last segment */
4369 pTcpHdr->hdrlen_flags &= ~htons(E1K_TCP_FIN | E1K_TCP_PSH);
4370 }
4371 else
4372 {
4373 /* Still not */
4374 pThis->u16HdrRemain -= u16Len;
4375 E1kLog3(("%s e1kFallbackAddSegment: Header is still incomplete, 0x%x bytes remain.\n",
4376 pThis->szPrf, pThis->u16HdrRemain));
4377 return;
4378 }
4379 }
4380
4381 pThis->u32PayRemain -= u16Len;
4382
4383 if (fSend)
4384 {
4385 /* Leave ethernet header intact */
4386 /* IP Total Length = payload + headers - ethernet header */
4387 pIpHdr->total_len = htons(pThis->u16TxPktLen - pThis->contextTSE.ip.u8CSS);
4388 E1kLog3(("%s e1kFallbackAddSegment: End of packet, pIpHdr->total_len=%x\n",
4389 pThis->szPrf, ntohs(pIpHdr->total_len)));
4390 /* Update IP Checksum */
4391 pIpHdr->chksum = 0;
4392 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4393 pThis->contextTSE.ip.u8CSO,
4394 pThis->contextTSE.ip.u8CSS,
4395 pThis->contextTSE.ip.u16CSE);
4396
4397 /* Update TCP flags */
4398 /* Restore original FIN and PSH flags for the last segment */
4399 if (pThis->u32PayRemain == 0)
4400 {
4401 pTcpHdr->hdrlen_flags = pThis->u16SavedFlags;
4402 E1K_INC_CNT32(TSCTC);
4403 }
4404 /* Add TCP length to partial pseudo header sum */
4405 uint32_t csum = pThis->u32SavedCsum
4406 + htons(pThis->u16TxPktLen - pThis->contextTSE.tu.u8CSS);
4407 while (csum >> 16)
4408 csum = (csum >> 16) + (csum & 0xFFFF);
4409 pTcpHdr->chksum = csum;
4410 /* Compute final checksum */
4411 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4412 pThis->contextTSE.tu.u8CSO,
4413 pThis->contextTSE.tu.u8CSS,
4414 pThis->contextTSE.tu.u16CSE);
4415
4416 /*
4417 * Transmit it. If we've use the SG already, allocate a new one before
4418 * we copy of the data.
4419 */
4420 PPDMSCATTERGATHER pTxSg = pThisCC->CTX_SUFF(pTxSg);
4421 if (!pTxSg)
4422 {
4423 e1kXmitAllocBuf(pThis, pThisCC, pThis->u16TxPktLen + (pThis->fVTag ? 4 : 0), true /*fExactSize*/, false /*fGso*/);
4424 pTxSg = pThisCC->CTX_SUFF(pTxSg);
4425 }
4426 if (pTxSg)
4427 {
4428 Assert(pThis->u16TxPktLen <= pThisCC->CTX_SUFF(pTxSg)->cbAvailable);
4429 Assert(pTxSg->cSegs == 1);
4430 if (pThis->CCCTX_SUFF(pTxSg)->aSegs[0].pvSeg != pThis->aTxPacketFallback)
4431 memcpy(pTxSg->aSegs[0].pvSeg, pThis->aTxPacketFallback, pThis->u16TxPktLen);
4432 pTxSg->cbUsed = pThis->u16TxPktLen;
4433 pTxSg->aSegs[0].cbSeg = pThis->u16TxPktLen;
4434 }
4435 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
4436
4437 /* Update Sequence Number */
4438 pTcpHdr->seqno = htonl(ntohl(pTcpHdr->seqno) + pThis->u16TxPktLen
4439 - pThis->contextTSE.dw3.u8HDRLEN);
4440 /* Increment IP identification */
4441 pIpHdr->ident = htons(ntohs(pIpHdr->ident) + 1);
4442 }
4443}
4444#else /* E1K_WITH_TXD_CACHE */
4445static int e1kFallbackAddSegment(PPDMDEVINS pDevIns, PE1KSTATE pThis, RTGCPHYS PhysAddr, uint16_t u16Len, bool fSend, bool fOnWorkerThread)
4446{
4447 int rc = VINF_SUCCESS;
4448 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
4449 /* TCP header being transmitted */
4450 struct E1kTcpHeader *pTcpHdr = (struct E1kTcpHeader *)(pThis->aTxPacketFallback + pThis->contextTSE.tu.u8CSS);
4451 /* IP header being transmitted */
4452 struct E1kIpHeader *pIpHdr = (struct E1kIpHeader *)(pThis->aTxPacketFallback + pThis->contextTSE.ip.u8CSS);
4453
4454 E1kLog3(("%s e1kFallbackAddSegment: Length=%x, remaining payload=%x, header=%x, send=%RTbool\n",
4455 pThis->szPrf, u16Len, pThis->u32PayRemain, pThis->u16HdrRemain, fSend));
4456 AssertReturn(pThis->u32PayRemain + pThis->u16HdrRemain > 0, VINF_SUCCESS);
4457
4458 if (pThis->u16TxPktLen + u16Len <= sizeof(pThis->aTxPacketFallback))
4459 PDMDevHlpPCIPhysRead(pDevIns, PhysAddr, pThis->aTxPacketFallback + pThis->u16TxPktLen, u16Len);
4460 else
4461 E1kLog(("%s e1kFallbackAddSegment: writing beyond aTxPacketFallback, u16TxPktLen=%d(0x%x) + u16Len=%d(0x%x) > %d\n",
4462 pThis->szPrf, pThis->u16TxPktLen, pThis->u16TxPktLen, u16Len, u16Len, sizeof(pThis->aTxPacketFallback)));
4463 E1kLog3(("%s Dump of the segment:\n"
4464 "%.*Rhxd\n"
4465 "%s --- End of dump ---\n",
4466 pThis->szPrf, u16Len, pThis->aTxPacketFallback + pThis->u16TxPktLen, pThis->szPrf));
4467 pThis->u16TxPktLen += u16Len;
4468 E1kLog3(("%s e1kFallbackAddSegment: pThis->u16TxPktLen=%x\n",
4469 pThis->szPrf, pThis->u16TxPktLen));
4470 if (pThis->u16HdrRemain > 0)
4471 {
4472 /* The header was not complete, check if it is now */
4473 if (u16Len >= pThis->u16HdrRemain)
4474 {
4475 /* The rest is payload */
4476 u16Len -= pThis->u16HdrRemain;
4477 pThis->u16HdrRemain = 0;
4478 /* Save partial checksum and flags */
4479 pThis->u32SavedCsum = pTcpHdr->chksum;
4480 pThis->u16SavedFlags = pTcpHdr->hdrlen_flags;
4481 /* Clear FIN and PSH flags now and set them only in the last segment */
4482 pTcpHdr->hdrlen_flags &= ~htons(E1K_TCP_FIN | E1K_TCP_PSH);
4483 }
4484 else
4485 {
4486 /* Still not */
4487 pThis->u16HdrRemain -= u16Len;
4488 E1kLog3(("%s e1kFallbackAddSegment: Header is still incomplete, 0x%x bytes remain.\n",
4489 pThis->szPrf, pThis->u16HdrRemain));
4490 return rc;
4491 }
4492 }
4493
4494 if (u16Len > pThis->u32PayRemain)
4495 pThis->u32PayRemain = 0;
4496 else
4497 pThis->u32PayRemain -= u16Len;
4498
4499 if (fSend)
4500 {
4501 /* Leave ethernet header intact */
4502 /* IP Total Length = payload + headers - ethernet header */
4503 pIpHdr->total_len = htons(pThis->u16TxPktLen - pThis->contextTSE.ip.u8CSS);
4504 E1kLog3(("%s e1kFallbackAddSegment: End of packet, pIpHdr->total_len=%x\n",
4505 pThis->szPrf, ntohs(pIpHdr->total_len)));
4506 /* Update IP Checksum */
4507 pIpHdr->chksum = 0;
4508 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4509 pThis->contextTSE.ip.u8CSO,
4510 pThis->contextTSE.ip.u8CSS,
4511 pThis->contextTSE.ip.u16CSE);
4512
4513 /* Update TCP flags */
4514 /* Restore original FIN and PSH flags for the last segment */
4515 if (pThis->u32PayRemain == 0)
4516 {
4517 pTcpHdr->hdrlen_flags = pThis->u16SavedFlags;
4518 E1K_INC_CNT32(TSCTC);
4519 }
4520 /* Add TCP length to partial pseudo header sum */
4521 uint32_t csum = pThis->u32SavedCsum
4522 + htons(pThis->u16TxPktLen - pThis->contextTSE.tu.u8CSS);
4523 while (csum >> 16)
4524 csum = (csum >> 16) + (csum & 0xFFFF);
4525 Assert(csum < 65536);
4526 pTcpHdr->chksum = (uint16_t)csum;
4527 /* Compute final checksum */
4528 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4529 pThis->contextTSE.tu.u8CSO,
4530 pThis->contextTSE.tu.u8CSS,
4531 pThis->contextTSE.tu.u16CSE);
4532
4533 /*
4534 * Transmit it.
4535 */
4536 PPDMSCATTERGATHER pTxSg = pThisCC->CTX_SUFF(pTxSg);
4537 if (pTxSg)
4538 {
4539 /* Make sure the packet fits into the allocated buffer */
4540 size_t cbCopy = RT_MIN(pThis->u16TxPktLen, pThisCC->CTX_SUFF(pTxSg)->cbAvailable);
4541#ifdef DEBUG
4542 if (pThis->u16TxPktLen > pTxSg->cbAvailable)
4543 E1kLog(("%s e1kFallbackAddSegment: truncating packet, u16TxPktLen=%d(0x%x) > cbAvailable=%d(0x%x)\n",
4544 pThis->szPrf, pThis->u16TxPktLen, pThis->u16TxPktLen, pTxSg->cbAvailable, pTxSg->cbAvailable));
4545#endif /* DEBUG */
4546 Assert(pTxSg->cSegs == 1);
4547 if (pTxSg->aSegs[0].pvSeg != pThis->aTxPacketFallback)
4548 memcpy(pTxSg->aSegs[0].pvSeg, pThis->aTxPacketFallback, cbCopy);
4549 pTxSg->cbUsed = cbCopy;
4550 pTxSg->aSegs[0].cbSeg = cbCopy;
4551 }
4552 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
4553
4554 /* Update Sequence Number */
4555 pTcpHdr->seqno = htonl(ntohl(pTcpHdr->seqno) + pThis->u16TxPktLen
4556 - pThis->contextTSE.dw3.u8HDRLEN);
4557 /* Increment IP identification */
4558 pIpHdr->ident = htons(ntohs(pIpHdr->ident) + 1);
4559
4560 /* Allocate new buffer for the next segment. */
4561 if (pThis->u32PayRemain)
4562 {
4563 pThis->cbTxAlloc = RT_MIN(pThis->u32PayRemain,
4564 pThis->contextTSE.dw3.u16MSS)
4565 + pThis->contextTSE.dw3.u8HDRLEN;
4566 /* Do not add VLAN tags to empty packets. */
4567 if (pThis->fVTag && pThis->cbTxAlloc > 0)
4568 pThis->cbTxAlloc += 4;
4569 rc = e1kXmitAllocBuf(pThis, pThisCC, false /* fGSO */);
4570 }
4571 }
4572
4573 return rc;
4574}
4575#endif /* E1K_WITH_TXD_CACHE */
4576
4577#ifndef E1K_WITH_TXD_CACHE
4578/**
4579 * TCP segmentation offloading fallback: Add descriptor's buffer to transmit
4580 * frame.
4581 *
4582 * We construct the frame in the fallback buffer first and the copy it to the SG
4583 * buffer before passing it down to the network driver code.
4584 *
4585 * @returns true if the frame should be transmitted, false if not.
4586 *
4587 * @param pThis The device state structure.
4588 * @param pDesc Pointer to the descriptor to transmit.
4589 * @param cbFragment Length of descriptor's buffer.
4590 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4591 * @thread E1000_TX
4592 */
4593static bool e1kFallbackAddToFrame(PE1KSTATE pThis, E1KTXDESC *pDesc, uint32_t cbFragment, bool fOnWorkerThread)
4594{
4595 PPDMSCATTERGATHER pTxSg = pThisCC->CTX_SUFF(pTxSg);
4596 Assert(e1kGetDescType(pDesc) == E1K_DTYP_DATA);
4597 Assert(pDesc->data.cmd.fTSE);
4598 Assert(!e1kXmitIsGsoBuf(pTxSg));
4599
4600 uint16_t u16MaxPktLen = pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw3.u16MSS;
4601 Assert(u16MaxPktLen != 0);
4602 Assert(u16MaxPktLen < E1K_MAX_TX_PKT_SIZE);
4603
4604 /*
4605 * Carve out segments.
4606 */
4607 do
4608 {
4609 /* Calculate how many bytes we have left in this TCP segment */
4610 uint32_t cb = u16MaxPktLen - pThis->u16TxPktLen;
4611 if (cb > cbFragment)
4612 {
4613 /* This descriptor fits completely into current segment */
4614 cb = cbFragment;
4615 e1kFallbackAddSegment(pDevIns, pThis, pDesc->data.u64BufAddr, cb, pDesc->data.cmd.fEOP /*fSend*/, fOnWorkerThread);
4616 }
4617 else
4618 {
4619 e1kFallbackAddSegment(pDevIns, pThis, pDesc->data.u64BufAddr, cb, true /*fSend*/, fOnWorkerThread);
4620 /*
4621 * Rewind the packet tail pointer to the beginning of payload,
4622 * so we continue writing right beyond the header.
4623 */
4624 pThis->u16TxPktLen = pThis->contextTSE.dw3.u8HDRLEN;
4625 }
4626
4627 pDesc->data.u64BufAddr += cb;
4628 cbFragment -= cb;
4629 } while (cbFragment > 0);
4630
4631 if (pDesc->data.cmd.fEOP)
4632 {
4633 /* End of packet, next segment will contain header. */
4634 if (pThis->u32PayRemain != 0)
4635 E1K_INC_CNT32(TSCTFC);
4636 pThis->u16TxPktLen = 0;
4637 e1kXmitFreeBuf(pThis, PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC));
4638 }
4639
4640 return false;
4641}
4642#else /* E1K_WITH_TXD_CACHE */
4643/**
4644 * TCP segmentation offloading fallback: Add descriptor's buffer to transmit
4645 * frame.
4646 *
4647 * We construct the frame in the fallback buffer first and the copy it to the SG
4648 * buffer before passing it down to the network driver code.
4649 *
4650 * @returns error code
4651 *
4652 * @param pDevIns The device instance.
4653 * @param pThis The device state structure.
4654 * @param pDesc Pointer to the descriptor to transmit.
4655 * @param cbFragment Length of descriptor's buffer.
4656 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4657 * @thread E1000_TX
4658 */
4659static int e1kFallbackAddToFrame(PPDMDEVINS pDevIns, PE1KSTATE pThis, E1KTXDESC *pDesc, bool fOnWorkerThread)
4660{
4661#ifdef VBOX_STRICT
4662 PPDMSCATTERGATHER pTxSg = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC)->CTX_SUFF(pTxSg);
4663 Assert(e1kGetDescType(pDesc) == E1K_DTYP_DATA);
4664 Assert(pDesc->data.cmd.fTSE);
4665 Assert(!e1kXmitIsGsoBuf(pTxSg));
4666#endif
4667
4668 uint16_t u16MaxPktLen = pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw3.u16MSS;
4669 /* We cannot produce empty packets, ignore all TX descriptors (see @bugref{9571}) */
4670 if (u16MaxPktLen == 0)
4671 return VINF_SUCCESS;
4672
4673 /*
4674 * Carve out segments.
4675 */
4676 int rc = VINF_SUCCESS;
4677 do
4678 {
4679 /* Calculate how many bytes we have left in this TCP segment */
4680 uint16_t cb = u16MaxPktLen - pThis->u16TxPktLen;
4681 if (cb > pDesc->data.cmd.u20DTALEN)
4682 {
4683 /* This descriptor fits completely into current segment */
4684 cb = (uint16_t)pDesc->data.cmd.u20DTALEN; /* u20DTALEN at this point is guarantied to fit into 16 bits. */
4685 rc = e1kFallbackAddSegment(pDevIns, pThis, pDesc->data.u64BufAddr, cb, pDesc->data.cmd.fEOP /*fSend*/, fOnWorkerThread);
4686 }
4687 else
4688 {
4689 rc = e1kFallbackAddSegment(pDevIns, pThis, pDesc->data.u64BufAddr, cb, true /*fSend*/, fOnWorkerThread);
4690 /*
4691 * Rewind the packet tail pointer to the beginning of payload,
4692 * so we continue writing right beyond the header.
4693 */
4694 pThis->u16TxPktLen = pThis->contextTSE.dw3.u8HDRLEN;
4695 }
4696
4697 pDesc->data.u64BufAddr += cb;
4698 pDesc->data.cmd.u20DTALEN -= cb;
4699 } while (pDesc->data.cmd.u20DTALEN > 0 && RT_SUCCESS(rc));
4700
4701 if (pDesc->data.cmd.fEOP)
4702 {
4703 /* End of packet, next segment will contain header. */
4704 if (pThis->u32PayRemain != 0)
4705 E1K_INC_CNT32(TSCTFC);
4706 pThis->u16TxPktLen = 0;
4707 e1kXmitFreeBuf(pThis, PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC));
4708 }
4709
4710 return VINF_SUCCESS; /// @todo consider rc;
4711}
4712#endif /* E1K_WITH_TXD_CACHE */
4713
4714
4715/**
4716 * Add descriptor's buffer to transmit frame.
4717 *
4718 * This deals with GSO and normal frames, e1kFallbackAddToFrame deals with the
4719 * TSE frames we cannot handle as GSO.
4720 *
4721 * @returns true on success, false on failure.
4722 *
4723 * @param pDevIns The device instance.
4724 * @param pThisCC The current context instance data.
4725 * @param pThis The device state structure.
4726 * @param PhysAddr The physical address of the descriptor buffer.
4727 * @param cbFragment Length of descriptor's buffer.
4728 * @thread E1000_TX
4729 */
4730static bool e1kAddToFrame(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KSTATECC pThisCC, RTGCPHYS PhysAddr, uint32_t cbFragment)
4731{
4732 PPDMSCATTERGATHER pTxSg = pThisCC->CTX_SUFF(pTxSg);
4733 bool const fGso = e1kXmitIsGsoBuf(pTxSg);
4734 uint32_t const cbNewPkt = cbFragment + pThis->u16TxPktLen;
4735
4736 LogFlow(("%s e1kAddToFrame: ENTER cbFragment=%d u16TxPktLen=%d cbUsed=%d cbAvailable=%d fGSO=%s\n",
4737 pThis->szPrf, cbFragment, pThis->u16TxPktLen, pTxSg->cbUsed, pTxSg->cbAvailable,
4738 fGso ? "true" : "false"));
4739 PCPDMNETWORKGSO pGso = (PCPDMNETWORKGSO)pTxSg->pvUser;
4740 if (pGso)
4741 {
4742 if (RT_UNLIKELY(pGso->cbMaxSeg == 0))
4743 {
4744 E1kLog(("%s zero-sized fragments are not allowed\n", pThis->szPrf));
4745 return false;
4746 }
4747 if (RT_UNLIKELY(pGso->u8Type == PDMNETWORKGSOTYPE_IPV4_UDP))
4748 {
4749 E1kLog(("%s UDP fragmentation is no longer supported\n", pThis->szPrf));
4750 return false;
4751 }
4752 }
4753 if (RT_UNLIKELY( !fGso && cbNewPkt > E1K_MAX_TX_PKT_SIZE ))
4754 {
4755 E1kLog(("%s Transmit packet is too large: %u > %u(max)\n", pThis->szPrf, cbNewPkt, E1K_MAX_TX_PKT_SIZE));
4756 return false;
4757 }
4758 if (RT_UNLIKELY( cbNewPkt > pTxSg->cbAvailable ))
4759 {
4760 E1kLog(("%s Transmit packet is too large: %u > %u(max)\n", pThis->szPrf, cbNewPkt, pTxSg->cbAvailable));
4761 return false;
4762 }
4763
4764 if (RT_LIKELY(pTxSg))
4765 {
4766 Assert(pTxSg->cSegs == 1);
4767 if (pTxSg->cbUsed != pThis->u16TxPktLen)
4768 E1kLog(("%s e1kAddToFrame: pTxSg->cbUsed=%d(0x%x) != u16TxPktLen=%d(0x%x)\n",
4769 pThis->szPrf, pTxSg->cbUsed, pTxSg->cbUsed, pThis->u16TxPktLen, pThis->u16TxPktLen));
4770
4771 PDMDevHlpPCIPhysRead(pDevIns, PhysAddr, (uint8_t *)pTxSg->aSegs[0].pvSeg + pThis->u16TxPktLen, cbFragment);
4772
4773 pTxSg->cbUsed = cbNewPkt;
4774 }
4775 pThis->u16TxPktLen = cbNewPkt;
4776
4777 return true;
4778}
4779
4780
4781/**
4782 * Write the descriptor back to guest memory and notify the guest.
4783 *
4784 * @param pThis The device state structure.
4785 * @param pDesc Pointer to the descriptor have been transmitted.
4786 * @param addr Physical address of the descriptor in guest memory.
4787 * @thread E1000_TX
4788 */
4789static void e1kDescReport(PPDMDEVINS pDevIns, PE1KSTATE pThis, E1KTXDESC *pDesc, RTGCPHYS addr)
4790{
4791 /*
4792 * We fake descriptor write-back bursting. Descriptors are written back as they are
4793 * processed.
4794 */
4795 /* Let's pretend we process descriptors. Write back with DD set. */
4796 /*
4797 * Prior to r71586 we tried to accomodate the case when write-back bursts
4798 * are enabled without actually implementing bursting by writing back all
4799 * descriptors, even the ones that do not have RS set. This caused kernel
4800 * panics with Linux SMP kernels, as the e1000 driver tried to free up skb
4801 * associated with written back descriptor if it happened to be a context
4802 * descriptor since context descriptors do not have skb associated to them.
4803 * Starting from r71586 we write back only the descriptors with RS set,
4804 * which is a little bit different from what the real hardware does in
4805 * case there is a chain of data descritors where some of them have RS set
4806 * and others do not. It is very uncommon scenario imho.
4807 * We need to check RPS as well since some legacy drivers use it instead of
4808 * RS even with newer cards.
4809 */
4810 if (pDesc->legacy.cmd.fRS || pDesc->legacy.cmd.fRPS)
4811 {
4812 pDesc->legacy.dw3.fDD = 1; /* Descriptor Done */
4813 e1kWriteBackDesc(pDevIns, pThis, pDesc, addr);
4814 if (pDesc->legacy.cmd.fEOP)
4815 {
4816//#ifdef E1K_USE_TX_TIMERS
4817 if (pThis->fTidEnabled && pDesc->legacy.cmd.fIDE)
4818 {
4819 E1K_INC_ISTAT_CNT(pThis->uStatTxIDE);
4820 //if (pThis->fIntRaised)
4821 //{
4822 // /* Interrupt is already pending, no need for timers */
4823 // ICR |= ICR_TXDW;
4824 //}
4825 //else {
4826 /* Arm the timer to fire in TIVD usec (discard .024) */
4827 e1kArmTimer(pDevIns, pThis, pThis->hTIDTimer, TIDV);
4828# ifndef E1K_NO_TAD
4829 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
4830 E1kLog2(("%s Checking if TAD timer is running\n",
4831 pThis->szPrf));
4832 if (TADV != 0 && !PDMDevHlpTimerIsActive(pDevIns, pThis->hTADTimer))
4833 e1kArmTimer(pDevIns, pThis, pThis->hTADTimer, TADV);
4834# endif /* E1K_NO_TAD */
4835 }
4836 else
4837 {
4838 if (pThis->fTidEnabled)
4839 {
4840 E1kLog2(("%s No IDE set, cancel TAD timer and raise interrupt\n",
4841 pThis->szPrf));
4842 /* Cancel both timers if armed and fire immediately. */
4843# ifndef E1K_NO_TAD
4844 PDMDevHlpTimerStop(pDevIns, pThis->hTADTimer);
4845# endif
4846 PDMDevHlpTimerStop(pDevIns, pThis->hTIDTimer);
4847 }
4848//#endif /* E1K_USE_TX_TIMERS */
4849 E1K_INC_ISTAT_CNT(pThis->uStatIntTx);
4850 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_TXDW);
4851//#ifdef E1K_USE_TX_TIMERS
4852 }
4853//#endif /* E1K_USE_TX_TIMERS */
4854 }
4855 }
4856 else
4857 {
4858 E1K_INC_ISTAT_CNT(pThis->uStatTxNoRS);
4859 }
4860}
4861
4862#ifndef E1K_WITH_TXD_CACHE
4863
4864/**
4865 * Process Transmit Descriptor.
4866 *
4867 * E1000 supports three types of transmit descriptors:
4868 * - legacy data descriptors of older format (context-less).
4869 * - data the same as legacy but providing new offloading capabilities.
4870 * - context sets up the context for following data descriptors.
4871 *
4872 * @param pDevIns The device instance.
4873 * @param pThis The device state structure.
4874 * @param pThisCC The current context instance data.
4875 * @param pDesc Pointer to descriptor union.
4876 * @param addr Physical address of descriptor in guest memory.
4877 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4878 * @thread E1000_TX
4879 */
4880static int e1kXmitDesc(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KSTATECC pThisCC, E1KTXDESC *pDesc,
4881 RTGCPHYS addr, bool fOnWorkerThread)
4882{
4883 int rc = VINF_SUCCESS;
4884 uint32_t cbVTag = 0;
4885
4886 e1kPrintTDesc(pThis, pDesc, "vvv");
4887
4888//#ifdef E1K_USE_TX_TIMERS
4889 if (pThis->fTidEnabled)
4890 e1kCancelTimer(pDevIns, pThis, pThis->hTIDTimer);
4891//#endif /* E1K_USE_TX_TIMERS */
4892
4893 switch (e1kGetDescType(pDesc))
4894 {
4895 case E1K_DTYP_CONTEXT:
4896 if (pDesc->context.dw2.fTSE)
4897 {
4898 pThis->contextTSE = pDesc->context;
4899 pThis->u32PayRemain = pDesc->context.dw2.u20PAYLEN;
4900 pThis->u16HdrRemain = pDesc->context.dw3.u8HDRLEN;
4901 e1kSetupGsoCtx(&pThis->GsoCtx, &pDesc->context);
4902 STAM_COUNTER_INC(&pThis->StatTxDescCtxTSE);
4903 }
4904 else
4905 {
4906 pThis->contextNormal = pDesc->context;
4907 STAM_COUNTER_INC(&pThis->StatTxDescCtxNormal);
4908 }
4909 E1kLog2(("%s %s context updated: IP CSS=%02X, IP CSO=%02X, IP CSE=%04X"
4910 ", TU CSS=%02X, TU CSO=%02X, TU CSE=%04X\n", pThis->szPrf,
4911 pDesc->context.dw2.fTSE ? "TSE" : "Normal",
4912 pDesc->context.ip.u8CSS,
4913 pDesc->context.ip.u8CSO,
4914 pDesc->context.ip.u16CSE,
4915 pDesc->context.tu.u8CSS,
4916 pDesc->context.tu.u8CSO,
4917 pDesc->context.tu.u16CSE));
4918 E1K_INC_ISTAT_CNT(pThis->uStatDescCtx);
4919 e1kDescReport(pThis, pDesc, addr);
4920 break;
4921
4922 case E1K_DTYP_DATA:
4923 {
4924 if (pDesc->data.cmd.u20DTALEN == 0 || pDesc->data.u64BufAddr == 0)
4925 {
4926 E1kLog2(("% Empty data descriptor, skipped.\n", pThis->szPrf));
4927 /** @todo Same as legacy when !TSE. See below. */
4928 break;
4929 }
4930 STAM_COUNTER_INC(pDesc->data.cmd.fTSE?
4931 &pThis->StatTxDescTSEData:
4932 &pThis->StatTxDescData);
4933 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
4934 E1K_INC_ISTAT_CNT(pThis->uStatDescDat);
4935
4936 /*
4937 * The last descriptor of non-TSE packet must contain VLE flag.
4938 * TSE packets have VLE flag in the first descriptor. The later
4939 * case is taken care of a bit later when cbVTag gets assigned.
4940 *
4941 * 1) pDesc->data.cmd.fEOP && !pDesc->data.cmd.fTSE
4942 */
4943 if (pDesc->data.cmd.fEOP && !pDesc->data.cmd.fTSE)
4944 {
4945 pThis->fVTag = pDesc->data.cmd.fVLE;
4946 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
4947 }
4948 /*
4949 * First fragment: Allocate new buffer and save the IXSM and TXSM
4950 * packet options as these are only valid in the first fragment.
4951 */
4952 if (pThis->u16TxPktLen == 0)
4953 {
4954 pThis->fIPcsum = pDesc->data.dw3.fIXSM;
4955 pThis->fTCPcsum = pDesc->data.dw3.fTXSM;
4956 E1kLog2(("%s Saving checksum flags:%s%s; \n", pThis->szPrf,
4957 pThis->fIPcsum ? " IP" : "",
4958 pThis->fTCPcsum ? " TCP/UDP" : ""));
4959 if (pDesc->data.cmd.fTSE)
4960 {
4961 /* 2) pDesc->data.cmd.fTSE && pThis->u16TxPktLen == 0 */
4962 pThis->fVTag = pDesc->data.cmd.fVLE;
4963 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
4964 cbVTag = pThis->fVTag ? 4 : 0;
4965 }
4966 else if (pDesc->data.cmd.fEOP)
4967 cbVTag = pDesc->data.cmd.fVLE ? 4 : 0;
4968 else
4969 cbVTag = 4;
4970 E1kLog3(("%s About to allocate TX buffer: cbVTag=%u\n", pThis->szPrf, cbVTag));
4971 if (e1kCanDoGso(pThis, &pThis->GsoCtx, &pDesc->data, &pThis->contextTSE))
4972 rc = e1kXmitAllocBuf(pThis, pThisCC, pThis->contextTSE.dw2.u20PAYLEN + pThis->contextTSE.dw3.u8HDRLEN + cbVTag,
4973 true /*fExactSize*/, true /*fGso*/);
4974 else if (pDesc->data.cmd.fTSE)
4975 rc = e1kXmitAllocBuf(pThis, pThisCC, , pThis->contextTSE.dw3.u16MSS + pThis->contextTSE.dw3.u8HDRLEN + cbVTag,
4976 pDesc->data.cmd.fTSE /*fExactSize*/, false /*fGso*/);
4977 else
4978 rc = e1kXmitAllocBuf(pThis, pThisCC, pDesc->data.cmd.u20DTALEN + cbVTag,
4979 pDesc->data.cmd.fEOP /*fExactSize*/, false /*fGso*/);
4980
4981 /**
4982 * @todo: Perhaps it is not that simple for GSO packets! We may
4983 * need to unwind some changes.
4984 */
4985 if (RT_FAILURE(rc))
4986 {
4987 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4988 break;
4989 }
4990 /** @todo Is there any way to indicating errors other than collisions? Like
4991 * VERR_NET_DOWN. */
4992 }
4993
4994 /*
4995 * Add the descriptor data to the frame. If the frame is complete,
4996 * transmit it and reset the u16TxPktLen field.
4997 */
4998 if (e1kXmitIsGsoBuf(pThisCC->CTX_SUFF(pTxSg)))
4999 {
5000 STAM_COUNTER_INC(&pThis->StatTxPathGSO);
5001 bool fRc = e1kAddToFrame(pDevIns, pThis, pThisCC, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
5002 if (pDesc->data.cmd.fEOP)
5003 {
5004 if ( fRc
5005 && pThisCC->CTX_SUFF(pTxSg)
5006 && pThisCC->CTX_SUFF(pTxSg)->cbUsed == (size_t)pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN)
5007 {
5008 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
5009 E1K_INC_CNT32(TSCTC);
5010 }
5011 else
5012 {
5013 if (fRc)
5014 E1kLog(("%s bad GSO/TSE %p or %u < %u\n" , pThis->szPrf,
5015 pThisCC->CTX_SUFF(pTxSg), pThisCC->CTX_SUFF(pTxSg) ? pThisCC->CTX_SUFF(pTxSg)->cbUsed : 0,
5016 pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN));
5017 e1kXmitFreeBuf(pThis);
5018 E1K_INC_CNT32(TSCTFC);
5019 }
5020 pThis->u16TxPktLen = 0;
5021 }
5022 }
5023 else if (!pDesc->data.cmd.fTSE)
5024 {
5025 STAM_COUNTER_INC(&pThis->StatTxPathRegular);
5026 bool fRc = e1kAddToFrame(pDevIns, pThis, pThisCC, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
5027 if (pDesc->data.cmd.fEOP)
5028 {
5029 if (fRc && pThisCC->CTX_SUFF(pTxSg))
5030 {
5031 Assert(pThisCC->CTX_SUFF(pTxSg)->cSegs == 1);
5032 if (pThis->fIPcsum)
5033 e1kInsertChecksum(pThis, (uint8_t *)pThisCC->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
5034 pThis->contextNormal.ip.u8CSO,
5035 pThis->contextNormal.ip.u8CSS,
5036 pThis->contextNormal.ip.u16CSE);
5037 if (pThis->fTCPcsum)
5038 e1kInsertChecksum(pThis, (uint8_t *)pThisCC->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
5039 pThis->contextNormal.tu.u8CSO,
5040 pThis->contextNormal.tu.u8CSS,
5041 pThis->contextNormal.tu.u16CSE,
5042 !pThis->contextNormal.dw2.fTCP);
5043 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
5044 }
5045 else
5046 e1kXmitFreeBuf(pThis);
5047 pThis->u16TxPktLen = 0;
5048 }
5049 }
5050 else
5051 {
5052 STAM_COUNTER_INC(&pThis->StatTxPathFallback);
5053 e1kFallbackAddToFrame(pDevIns, pThis, pDesc, pDesc->data.cmd.u20DTALEN, fOnWorkerThread);
5054 }
5055
5056 e1kDescReport(pThis, pDesc, addr);
5057 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5058 break;
5059 }
5060
5061 case E1K_DTYP_LEGACY:
5062 if (pDesc->legacy.cmd.u16Length == 0 || pDesc->legacy.u64BufAddr == 0)
5063 {
5064 E1kLog(("%s Empty legacy descriptor, skipped.\n", pThis->szPrf));
5065 /** @todo 3.3.3, Length/Buffer Address: RS set -> write DD when processing. */
5066 break;
5067 }
5068 STAM_COUNTER_INC(&pThis->StatTxDescLegacy);
5069 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
5070
5071 /* First fragment: allocate new buffer. */
5072 if (pThis->u16TxPktLen == 0)
5073 {
5074 if (pDesc->legacy.cmd.fEOP)
5075 cbVTag = pDesc->legacy.cmd.fVLE ? 4 : 0;
5076 else
5077 cbVTag = 4;
5078 E1kLog3(("%s About to allocate TX buffer: cbVTag=%u\n", pThis->szPrf, cbVTag));
5079 /** @todo reset status bits? */
5080 rc = e1kXmitAllocBuf(pThis, pThisCC, pDesc->legacy.cmd.u16Length + cbVTag, pDesc->legacy.cmd.fEOP, false /*fGso*/);
5081 if (RT_FAILURE(rc))
5082 {
5083 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5084 break;
5085 }
5086
5087 /** @todo Is there any way to indicating errors other than collisions? Like
5088 * VERR_NET_DOWN. */
5089 }
5090
5091 /* Add fragment to frame. */
5092 if (e1kAddToFrame(pDevIns, pThis, pThisCC, pDesc->data.u64BufAddr, pDesc->legacy.cmd.u16Length))
5093 {
5094 E1K_INC_ISTAT_CNT(pThis->uStatDescLeg);
5095
5096 /* Last fragment: Transmit and reset the packet storage counter. */
5097 if (pDesc->legacy.cmd.fEOP)
5098 {
5099 pThis->fVTag = pDesc->legacy.cmd.fVLE;
5100 pThis->u16VTagTCI = pDesc->legacy.dw3.u16Special;
5101 /** @todo Offload processing goes here. */
5102 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
5103 pThis->u16TxPktLen = 0;
5104 }
5105 }
5106 /* Last fragment + failure: free the buffer and reset the storage counter. */
5107 else if (pDesc->legacy.cmd.fEOP)
5108 {
5109 e1kXmitFreeBuf(pThis);
5110 pThis->u16TxPktLen = 0;
5111 }
5112
5113 e1kDescReport(pThis, pDesc, addr);
5114 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5115 break;
5116
5117 default:
5118 E1kLog(("%s ERROR Unsupported transmit descriptor type: 0x%04x\n",
5119 pThis->szPrf, e1kGetDescType(pDesc)));
5120 break;
5121 }
5122
5123 return rc;
5124}
5125
5126#else /* E1K_WITH_TXD_CACHE */
5127
5128/**
5129 * Process Transmit Descriptor.
5130 *
5131 * E1000 supports three types of transmit descriptors:
5132 * - legacy data descriptors of older format (context-less).
5133 * - data the same as legacy but providing new offloading capabilities.
5134 * - context sets up the context for following data descriptors.
5135 *
5136 * @param pDevIns The device instance.
5137 * @param pThis The device state structure.
5138 * @param pThisCC The current context instance data.
5139 * @param pDesc Pointer to descriptor union.
5140 * @param addr Physical address of descriptor in guest memory.
5141 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
5142 * @param cbPacketSize Size of the packet as previously computed.
5143 * @thread E1000_TX
5144 */
5145static int e1kXmitDesc(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KSTATECC pThisCC, E1KTXDESC *pDesc,
5146 RTGCPHYS addr, bool fOnWorkerThread)
5147{
5148 int rc = VINF_SUCCESS;
5149
5150 e1kPrintTDesc(pThis, pDesc, "vvv");
5151
5152//#ifdef E1K_USE_TX_TIMERS
5153 if (pThis->fTidEnabled)
5154 PDMDevHlpTimerStop(pDevIns, pThis->hTIDTimer);
5155//#endif /* E1K_USE_TX_TIMERS */
5156
5157 switch (e1kGetDescType(pDesc))
5158 {
5159 case E1K_DTYP_CONTEXT:
5160 /* The caller have already updated the context */
5161 E1K_INC_ISTAT_CNT(pThis->uStatDescCtx);
5162 e1kDescReport(pDevIns, pThis, pDesc, addr);
5163 break;
5164
5165 case E1K_DTYP_DATA:
5166 {
5167 STAM_COUNTER_INC(pDesc->data.cmd.fTSE?
5168 &pThis->StatTxDescTSEData:
5169 &pThis->StatTxDescData);
5170 E1K_INC_ISTAT_CNT(pThis->uStatDescDat);
5171 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
5172 if (pDesc->data.cmd.u20DTALEN == 0 || pDesc->data.u64BufAddr == 0)
5173 {
5174 E1kLog2(("%s Empty data descriptor, skipped.\n", pThis->szPrf));
5175 if (pDesc->data.cmd.fEOP)
5176 {
5177 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
5178 pThis->u16TxPktLen = 0;
5179 }
5180 }
5181 else
5182 {
5183 /*
5184 * Add the descriptor data to the frame. If the frame is complete,
5185 * transmit it and reset the u16TxPktLen field.
5186 */
5187 if (e1kXmitIsGsoBuf(pThisCC->CTX_SUFF(pTxSg)))
5188 {
5189 STAM_COUNTER_INC(&pThis->StatTxPathGSO);
5190 bool fRc = e1kAddToFrame(pDevIns, pThis, pThisCC, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
5191 if (pDesc->data.cmd.fEOP)
5192 {
5193 if ( fRc
5194 && pThisCC->CTX_SUFF(pTxSg)
5195 && pThisCC->CTX_SUFF(pTxSg)->cbUsed == (size_t)pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN)
5196 {
5197 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
5198 E1K_INC_CNT32(TSCTC);
5199 }
5200 else
5201 {
5202 if (fRc)
5203 E1kLog(("%s bad GSO/TSE %p or %u < %u\n" , pThis->szPrf,
5204 pThisCC->CTX_SUFF(pTxSg), pThisCC->CTX_SUFF(pTxSg) ? pThisCC->CTX_SUFF(pTxSg)->cbUsed : 0,
5205 pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN));
5206 e1kXmitFreeBuf(pThis, pThisCC);
5207 E1K_INC_CNT32(TSCTFC);
5208 }
5209 pThis->u16TxPktLen = 0;
5210 }
5211 }
5212 else if (!pDesc->data.cmd.fTSE)
5213 {
5214 STAM_COUNTER_INC(&pThis->StatTxPathRegular);
5215 bool fRc = e1kAddToFrame(pDevIns, pThis, pThisCC, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
5216 if (pDesc->data.cmd.fEOP)
5217 {
5218 if (fRc && pThisCC->CTX_SUFF(pTxSg))
5219 {
5220 Assert(pThisCC->CTX_SUFF(pTxSg)->cSegs == 1);
5221 if (pThis->fIPcsum)
5222 e1kInsertChecksum(pThis, (uint8_t *)pThisCC->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
5223 pThis->contextNormal.ip.u8CSO,
5224 pThis->contextNormal.ip.u8CSS,
5225 pThis->contextNormal.ip.u16CSE);
5226 if (pThis->fTCPcsum)
5227 e1kInsertChecksum(pThis, (uint8_t *)pThisCC->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
5228 pThis->contextNormal.tu.u8CSO,
5229 pThis->contextNormal.tu.u8CSS,
5230 pThis->contextNormal.tu.u16CSE,
5231 !pThis->contextNormal.dw2.fTCP);
5232 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
5233 }
5234 else
5235 e1kXmitFreeBuf(pThis, pThisCC);
5236 pThis->u16TxPktLen = 0;
5237 }
5238 }
5239 else
5240 {
5241 STAM_COUNTER_INC(&pThis->StatTxPathFallback);
5242 rc = e1kFallbackAddToFrame(pDevIns, pThis, pDesc, fOnWorkerThread);
5243 }
5244 }
5245 e1kDescReport(pDevIns, pThis, pDesc, addr);
5246 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5247 break;
5248 }
5249
5250 case E1K_DTYP_LEGACY:
5251 STAM_COUNTER_INC(&pThis->StatTxDescLegacy);
5252 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
5253 if (pDesc->legacy.cmd.u16Length == 0 || pDesc->legacy.u64BufAddr == 0)
5254 {
5255 E1kLog(("%s Empty legacy descriptor, skipped.\n", pThis->szPrf));
5256 }
5257 else
5258 {
5259 /* Add fragment to frame. */
5260 if (e1kAddToFrame(pDevIns, pThis, pThisCC, pDesc->data.u64BufAddr, pDesc->legacy.cmd.u16Length))
5261 {
5262 E1K_INC_ISTAT_CNT(pThis->uStatDescLeg);
5263
5264 /* Last fragment: Transmit and reset the packet storage counter. */
5265 if (pDesc->legacy.cmd.fEOP)
5266 {
5267 if (pDesc->legacy.cmd.fIC)
5268 {
5269 e1kInsertChecksum(pThis,
5270 (uint8_t *)pThisCC->CTX_SUFF(pTxSg)->aSegs[0].pvSeg,
5271 pThis->u16TxPktLen,
5272 pDesc->legacy.cmd.u8CSO,
5273 pDesc->legacy.dw3.u8CSS,
5274 0);
5275 }
5276 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
5277 pThis->u16TxPktLen = 0;
5278 }
5279 }
5280 /* Last fragment + failure: free the buffer and reset the storage counter. */
5281 else if (pDesc->legacy.cmd.fEOP)
5282 {
5283 e1kXmitFreeBuf(pThis, pThisCC);
5284 pThis->u16TxPktLen = 0;
5285 }
5286 }
5287 e1kDescReport(pDevIns, pThis, pDesc, addr);
5288 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5289 break;
5290
5291 default:
5292 E1kLog(("%s ERROR Unsupported transmit descriptor type: 0x%04x\n",
5293 pThis->szPrf, e1kGetDescType(pDesc)));
5294 break;
5295 }
5296
5297 return rc;
5298}
5299
5300DECLINLINE(bool) e1kUpdateTxContext(PE1KSTATE pThis, E1KTXDESC *pDesc)
5301{
5302 if (pDesc->context.dw2.fTSE)
5303 {
5304 pThis->contextTSE = pDesc->context;
5305 uint32_t cbMaxSegmentSize = pThis->contextTSE.dw3.u16MSS + pThis->contextTSE.dw3.u8HDRLEN + 4; /*VTAG*/
5306 if (RT_UNLIKELY(cbMaxSegmentSize > E1K_MAX_TX_PKT_SIZE))
5307 {
5308 pThis->contextTSE.dw3.u16MSS = E1K_MAX_TX_PKT_SIZE - pThis->contextTSE.dw3.u8HDRLEN - 4; /*VTAG*/
5309 LogRelMax(10, ("%s: Transmit packet is too large: %u > %u(max). Adjusted MSS to %u.\n",
5310 pThis->szPrf, cbMaxSegmentSize, E1K_MAX_TX_PKT_SIZE, pThis->contextTSE.dw3.u16MSS));
5311 }
5312 pThis->u32PayRemain = pThis->contextTSE.dw2.u20PAYLEN;
5313 pThis->u16HdrRemain = pThis->contextTSE.dw3.u8HDRLEN;
5314 e1kSetupGsoCtx(&pThis->GsoCtx, &pThis->contextTSE);
5315 STAM_COUNTER_INC(&pThis->StatTxDescCtxTSE);
5316 }
5317 else
5318 {
5319 pThis->contextNormal = pDesc->context;
5320 STAM_COUNTER_INC(&pThis->StatTxDescCtxNormal);
5321 }
5322 E1kLog2(("%s %s context updated: IP CSS=%02X, IP CSO=%02X, IP CSE=%04X"
5323 ", TU CSS=%02X, TU CSO=%02X, TU CSE=%04X\n", pThis->szPrf,
5324 pDesc->context.dw2.fTSE ? "TSE" : "Normal",
5325 pDesc->context.ip.u8CSS,
5326 pDesc->context.ip.u8CSO,
5327 pDesc->context.ip.u16CSE,
5328 pDesc->context.tu.u8CSS,
5329 pDesc->context.tu.u8CSO,
5330 pDesc->context.tu.u16CSE));
5331 return true; /* TODO: Consider returning false for invalid descriptors */
5332}
5333
5334static bool e1kLocateTxPacket(PE1KSTATE pThis)
5335{
5336 LogFlow(("%s e1kLocateTxPacket: ENTER cbTxAlloc=%d\n",
5337 pThis->szPrf, pThis->cbTxAlloc));
5338 /* Check if we have located the packet already. */
5339 if (pThis->cbTxAlloc)
5340 {
5341 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d\n",
5342 pThis->szPrf, pThis->cbTxAlloc));
5343 return true;
5344 }
5345
5346 bool fTSE = false;
5347 uint32_t cbPacket = 0;
5348
5349 /* Since we process one packet at a time we will only mark current packet's descriptors as valid */
5350 memset(pThis->afTxDValid, 0, sizeof(pThis->afTxDValid));
5351 for (int i = pThis->iTxDCurrent; i < pThis->nTxDFetched; ++i)
5352 {
5353 E1KTXDESC *pDesc = &pThis->aTxDescriptors[i];
5354 /* Assume the descriptor valid until proven otherwise. */
5355 pThis->afTxDValid[i] = true;
5356 switch (e1kGetDescType(pDesc))
5357 {
5358 case E1K_DTYP_CONTEXT:
5359 if (cbPacket == 0)
5360 pThis->afTxDValid[i] = e1kUpdateTxContext(pThis, pDesc);
5361 else
5362 E1kLog(("%s e1kLocateTxPacket: ignoring a context descriptor in the middle of a packet, cbPacket=%d\n",
5363 pThis->szPrf, cbPacket));
5364 continue;
5365 case E1K_DTYP_LEGACY:
5366 /* Skip invalid descriptors. */
5367 if (cbPacket > 0 && (pThis->fGSO || fTSE))
5368 {
5369 E1kLog(("%s e1kLocateTxPacket: ignoring a legacy descriptor in the segmentation context, cbPacket=%d\n",
5370 pThis->szPrf, cbPacket));
5371 pThis->afTxDValid[i] = false; /* Make sure it is skipped by processing */
5372 continue;
5373 }
5374 /* Skip empty descriptors. */
5375 if (!pDesc->legacy.u64BufAddr || !pDesc->legacy.cmd.u16Length)
5376 break;
5377 cbPacket += pDesc->legacy.cmd.u16Length;
5378 pThis->fGSO = false;
5379 break;
5380 case E1K_DTYP_DATA:
5381 /* Skip invalid descriptors. */
5382 if (cbPacket > 0 && (bool)pDesc->data.cmd.fTSE != fTSE)
5383 {
5384 E1kLog(("%s e1kLocateTxPacket: ignoring %sTSE descriptor in the %ssegmentation context, cbPacket=%d\n",
5385 pThis->szPrf, pDesc->data.cmd.fTSE ? "" : "non-", fTSE ? "" : "non-", cbPacket));
5386 pThis->afTxDValid[i] = false; /* Make sure it is skipped by processing */
5387 continue;
5388 }
5389 /* Skip empty descriptors. */
5390 if (!pDesc->data.u64BufAddr || !pDesc->data.cmd.u20DTALEN)
5391 break;
5392 if (cbPacket == 0)
5393 {
5394 /*
5395 * The first fragment: save IXSM and TXSM options
5396 * as these are only valid in the first fragment.
5397 */
5398 pThis->fIPcsum = pDesc->data.dw3.fIXSM;
5399 pThis->fTCPcsum = pDesc->data.dw3.fTXSM;
5400 fTSE = pDesc->data.cmd.fTSE;
5401 /*
5402 * TSE descriptors have VLE bit properly set in
5403 * the first fragment.
5404 */
5405 if (fTSE)
5406 {
5407 pThis->fVTag = pDesc->data.cmd.fVLE;
5408 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
5409 }
5410 pThis->fGSO = e1kCanDoGso(pThis, &pThis->GsoCtx, &pDesc->data, &pThis->contextTSE);
5411 }
5412 cbPacket += pDesc->data.cmd.u20DTALEN;
5413 break;
5414 default:
5415 AssertMsgFailed(("Impossible descriptor type!"));
5416 continue;
5417 }
5418 if (pDesc->legacy.cmd.fEOP)
5419 {
5420 /*
5421 * Non-TSE descriptors have VLE bit properly set in
5422 * the last fragment.
5423 */
5424 if (!fTSE)
5425 {
5426 pThis->fVTag = pDesc->data.cmd.fVLE;
5427 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
5428 }
5429 /*
5430 * Compute the required buffer size. If we cannot do GSO but still
5431 * have to do segmentation we allocate the first segment only.
5432 */
5433 pThis->cbTxAlloc = (!fTSE || pThis->fGSO) ?
5434 cbPacket :
5435 RT_MIN(cbPacket, pThis->contextTSE.dw3.u16MSS + pThis->contextTSE.dw3.u8HDRLEN);
5436 /* Do not add VLAN tags to empty packets. */
5437 if (pThis->fVTag && pThis->cbTxAlloc > 0)
5438 pThis->cbTxAlloc += 4;
5439 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d cbPacket=%d%s%s\n",
5440 pThis->szPrf, pThis->cbTxAlloc, cbPacket,
5441 pThis->fGSO ? " GSO" : "", fTSE ? " TSE" : ""));
5442 return true;
5443 }
5444 }
5445
5446 if (cbPacket == 0 && pThis->nTxDFetched - pThis->iTxDCurrent > 0)
5447 {
5448 /* All descriptors were empty, we need to process them as a dummy packet */
5449 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d, zero packet!\n",
5450 pThis->szPrf, pThis->cbTxAlloc));
5451 return true;
5452 }
5453 LogFlow(("%s e1kLocateTxPacket: RET false cbTxAlloc=%d cbPacket=%d\n",
5454 pThis->szPrf, pThis->cbTxAlloc, cbPacket));
5455 return false;
5456}
5457
5458static int e1kXmitPacket(PPDMDEVINS pDevIns, PE1KSTATE pThis, bool fOnWorkerThread, PE1KTXDC pTxdc)
5459{
5460 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
5461 int rc = VINF_SUCCESS;
5462
5463 LogFlow(("%s e1kXmitPacket: ENTER current=%d fetched=%d\n",
5464 pThis->szPrf, pThis->iTxDCurrent, pThis->nTxDFetched));
5465
5466 while (pThis->iTxDCurrent < pThis->nTxDFetched)
5467 {
5468 E1KTXDESC *pDesc = &pThis->aTxDescriptors[pThis->iTxDCurrent];
5469 E1kLog3(("%s About to process new TX descriptor at %08x%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
5470 pThis->szPrf, TDBAH, TDBAL + pTxdc->tdh * sizeof(E1KTXDESC), pTxdc->tdlen, pTxdc->tdh, pTxdc->tdt));
5471 if (!pThis->afTxDValid[pThis->iTxDCurrent])
5472 {
5473 e1kPrintTDesc(pThis, pDesc, "vvv");
5474 E1kLog(("%s e1kXmitDesc: skipping bad descriptor ^^^\n", pThis->szPrf));
5475 e1kDescReport(pDevIns, pThis, pDesc, e1kDescAddr(TDBAH, TDBAL, pTxdc->tdh));
5476 rc = VINF_SUCCESS;
5477 }
5478 else
5479 rc = e1kXmitDesc(pDevIns, pThis, pThisCC, pDesc, e1kDescAddr(TDBAH, TDBAL, pTxdc->tdh), fOnWorkerThread);
5480 if (RT_FAILURE(rc))
5481 break;
5482 if (++pTxdc->tdh * sizeof(E1KTXDESC) >= pTxdc->tdlen)
5483 pTxdc->tdh = 0;
5484 TDH = pTxdc->tdh; /* Sync the actual register and TXDC */
5485 uint32_t uLowThreshold = GET_BITS(TXDCTL, LWTHRESH)*8;
5486 if (uLowThreshold != 0 && e1kGetTxLen(pTxdc) <= uLowThreshold)
5487 {
5488 E1kLog2(("%s Low on transmit descriptors, raise ICR.TXD_LOW, len=%x thresh=%x\n",
5489 pThis->szPrf, e1kGetTxLen(pTxdc), GET_BITS(TXDCTL, LWTHRESH)*8));
5490 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_TXD_LOW);
5491 }
5492 ++pThis->iTxDCurrent;
5493 if (e1kGetDescType(pDesc) != E1K_DTYP_CONTEXT && pDesc->legacy.cmd.fEOP)
5494 break;
5495 }
5496
5497 LogFlow(("%s e1kXmitPacket: RET %Rrc current=%d fetched=%d\n",
5498 pThis->szPrf, rc, pThis->iTxDCurrent, pThis->nTxDFetched));
5499 return rc;
5500}
5501
5502#endif /* E1K_WITH_TXD_CACHE */
5503#ifndef E1K_WITH_TXD_CACHE
5504
5505/**
5506 * Transmit pending descriptors.
5507 *
5508 * @returns VBox status code. VERR_TRY_AGAIN is returned if we're busy.
5509 *
5510 * @param pDevIns The device instance.
5511 * @param pThis The E1000 state.
5512 * @param fOnWorkerThread Whether we're on a worker thread or on an EMT.
5513 */
5514static int e1kXmitPending(PPDMDEVINS pDevIns, PE1KSTATE pThis, bool fOnWorkerThread)
5515{
5516 int rc = VINF_SUCCESS;
5517 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
5518
5519 /* Check if transmitter is enabled. */
5520 if (!(TCTL & TCTL_EN))
5521 return VINF_SUCCESS;
5522 /*
5523 * Grab the xmit lock of the driver as well as the E1K device state.
5524 */
5525 rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
5526 if (RT_LIKELY(rc == VINF_SUCCESS))
5527 {
5528 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
5529 if (pDrv)
5530 {
5531 rc = pDrv->pfnBeginXmit(pDrv, fOnWorkerThread);
5532 if (RT_FAILURE(rc))
5533 {
5534 e1kCsTxLeave(pThis);
5535 return rc;
5536 }
5537 }
5538 /*
5539 * Process all pending descriptors.
5540 * Note! Do not process descriptors in locked state
5541 */
5542 while (TDH != TDT && !pThis->fLocked)
5543 {
5544 E1KTXDESC desc;
5545 E1kLog3(("%s About to process new TX descriptor at %08x%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
5546 pThis->szPrf, TDBAH, TDBAL + TDH * sizeof(desc), TDLEN, TDH, TDT));
5547
5548 e1kLoadDesc(pDevIns, &desc, ((uint64_t)TDBAH << 32) + TDBAL + TDH * sizeof(desc));
5549 rc = e1kXmitDesc(pDevIns, pThis, pThisCC, &desc, e1kDescAddr(TDBAH, TDBAL, TDH), fOnWorkerThread);
5550 /* If we failed to transmit descriptor we will try it again later */
5551 if (RT_FAILURE(rc))
5552 break;
5553 if (++TDH * sizeof(desc) >= TDLEN)
5554 TDH = 0;
5555
5556 if (e1kGetTxLen(pThis) <= GET_BITS(TXDCTL, LWTHRESH)*8)
5557 {
5558 E1kLog2(("%s Low on transmit descriptors, raise ICR.TXD_LOW, len=%x thresh=%x\n",
5559 pThis->szPrf, e1kGetTxLen(pThis), GET_BITS(TXDCTL, LWTHRESH)*8));
5560 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_TXD_LOW);
5561 }
5562
5563 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5564 }
5565
5566 /// @todo uncomment: pThis->uStatIntTXQE++;
5567 /// @todo uncomment: e1kRaiseInterrupt(pDevIns, pThis, ICR_TXQE);
5568 /*
5569 * Release the lock.
5570 */
5571 if (pDrv)
5572 pDrv->pfnEndXmit(pDrv);
5573 e1kCsTxLeave(pThis);
5574 }
5575
5576 return rc;
5577}
5578
5579#else /* E1K_WITH_TXD_CACHE */
5580
5581static void e1kDumpTxDCache(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KTXDC pTxdc)
5582{
5583 unsigned i, cDescs = pTxdc->tdlen / sizeof(E1KTXDESC);
5584 uint32_t tdh = pTxdc->tdh;
5585 LogRel(("E1000: -- Transmit Descriptors (%d total) --\n", cDescs));
5586 for (i = 0; i < cDescs; ++i)
5587 {
5588 E1KTXDESC desc;
5589 PDMDevHlpPCIPhysRead(pDevIns , e1kDescAddr(TDBAH, TDBAL, i), &desc, sizeof(desc));
5590 if (i == tdh)
5591 LogRel(("E1000: >>> "));
5592 LogRel(("E1000: %RGp: %R[e1ktxd]\n", e1kDescAddr(TDBAH, TDBAL, i), &desc));
5593 }
5594 LogRel(("E1000: -- Transmit Descriptors in Cache (at %d (TDH %d)/ fetched %d / max %d) --\n",
5595 pThis->iTxDCurrent, pTxdc->tdh, pThis->nTxDFetched, E1K_TXD_CACHE_SIZE));
5596 if (tdh > pThis->iTxDCurrent)
5597 tdh -= pThis->iTxDCurrent;
5598 else
5599 tdh = cDescs + tdh - pThis->iTxDCurrent;
5600 for (i = 0; i < pThis->nTxDFetched; ++i)
5601 {
5602 if (i == pThis->iTxDCurrent)
5603 LogRel(("E1000: >>> "));
5604 if (cDescs)
5605 LogRel(("E1000: %RGp: %R[e1ktxd]\n", e1kDescAddr(TDBAH, TDBAL, tdh++ % cDescs), &pThis->aTxDescriptors[i]));
5606 else
5607 LogRel(("E1000: <lost>: %R[e1ktxd]\n", &pThis->aTxDescriptors[i]));
5608 }
5609}
5610
5611/**
5612 * Transmit pending descriptors.
5613 *
5614 * @returns VBox status code. VERR_TRY_AGAIN is returned if we're busy.
5615 *
5616 * @param pDevIns The device instance.
5617 * @param pThis The E1000 state.
5618 * @param fOnWorkerThread Whether we're on a worker thread or on an EMT.
5619 */
5620static int e1kXmitPending(PPDMDEVINS pDevIns, PE1KSTATE pThis, bool fOnWorkerThread)
5621{
5622 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
5623 int rc = VINF_SUCCESS;
5624
5625 /* Check if transmitter is enabled. */
5626 if (!(TCTL & TCTL_EN))
5627 return VINF_SUCCESS;
5628 /*
5629 * Grab the xmit lock of the driver as well as the E1K device state.
5630 */
5631 PPDMINETWORKUP pDrv = pThisCC->CTX_SUFF(pDrv);
5632 if (pDrv)
5633 {
5634 rc = pDrv->pfnBeginXmit(pDrv, fOnWorkerThread);
5635 if (RT_FAILURE(rc))
5636 return rc;
5637 }
5638
5639 /*
5640 * Process all pending descriptors.
5641 * Note! Do not process descriptors in locked state
5642 */
5643 rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
5644 if (RT_LIKELY(rc == VINF_SUCCESS && (TCTL & TCTL_EN)))
5645 {
5646 E1KTXDC txdc;
5647 bool fTxContextValid = e1kUpdateTxDContext(pDevIns, pThis, &txdc);
5648 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
5649 /*
5650 * fIncomplete is set whenever we try to fetch additional descriptors
5651 * for an incomplete packet. If fail to locate a complete packet on
5652 * the next iteration we need to reset the cache or we risk to get
5653 * stuck in this loop forever.
5654 */
5655 bool fIncomplete = false;
5656 while (fTxContextValid && !pThis->fLocked && e1kTxDLazyLoad(pDevIns, pThis, &txdc))
5657 {
5658 while (e1kLocateTxPacket(pThis))
5659 {
5660 fIncomplete = false;
5661 /* Found a complete packet, allocate it. */
5662 rc = e1kXmitAllocBuf(pThis, pThisCC, pThis->fGSO);
5663 /* If we're out of bandwidth we'll come back later. */
5664 if (RT_FAILURE(rc))
5665 goto out;
5666 /* Copy the packet to allocated buffer and send it. */
5667 rc = e1kXmitPacket(pDevIns, pThis, fOnWorkerThread, &txdc);
5668 /* If we're out of bandwidth we'll come back later. */
5669 if (RT_FAILURE(rc))
5670 goto out;
5671 }
5672 uint8_t u8Remain = pThis->nTxDFetched - pThis->iTxDCurrent;
5673 if (RT_UNLIKELY(fIncomplete))
5674 {
5675 static bool fTxDCacheDumped = false;
5676 /*
5677 * The descriptor cache is full, but we were unable to find
5678 * a complete packet in it. Drop the cache and hope that
5679 * the guest driver can recover from network card error.
5680 */
5681 LogRel(("%s: No complete packets in%s TxD cache! "
5682 "Fetched=%d, current=%d, TX len=%d.\n",
5683 pThis->szPrf,
5684 u8Remain == E1K_TXD_CACHE_SIZE ? " full" : "",
5685 pThis->nTxDFetched, pThis->iTxDCurrent,
5686 e1kGetTxLen(&txdc)));
5687 if (!fTxDCacheDumped)
5688 {
5689 fTxDCacheDumped = true;
5690 e1kDumpTxDCache(pDevIns, pThis, &txdc);
5691 }
5692 pThis->iTxDCurrent = pThis->nTxDFetched = 0;
5693 /*
5694 * Returning an error at this point means Guru in R0
5695 * (see @bugref{6428}).
5696 */
5697# ifdef IN_RING3
5698 rc = VERR_NET_INCOMPLETE_TX_PACKET;
5699# else /* !IN_RING3 */
5700 rc = VINF_IOM_R3_MMIO_WRITE;
5701# endif /* !IN_RING3 */
5702 goto out;
5703 }
5704 if (u8Remain > 0)
5705 {
5706 Log4(("%s Incomplete packet at %d. Already fetched %d, "
5707 "%d more are available\n",
5708 pThis->szPrf, pThis->iTxDCurrent, u8Remain,
5709 e1kGetTxLen(&txdc) - u8Remain));
5710
5711 /*
5712 * A packet was partially fetched. Move incomplete packet to
5713 * the beginning of cache buffer, then load more descriptors.
5714 */
5715 memmove(pThis->aTxDescriptors,
5716 &pThis->aTxDescriptors[pThis->iTxDCurrent],
5717 u8Remain * sizeof(E1KTXDESC));
5718 pThis->iTxDCurrent = 0;
5719 pThis->nTxDFetched = u8Remain;
5720 e1kTxDLoadMore(pDevIns, pThis, &txdc);
5721 fIncomplete = true;
5722 }
5723 else
5724 pThis->nTxDFetched = 0;
5725 pThis->iTxDCurrent = 0;
5726 }
5727 if (!pThis->fLocked && GET_BITS(TXDCTL, LWTHRESH) == 0)
5728 {
5729 E1kLog2(("%s Out of transmit descriptors, raise ICR.TXD_LOW\n",
5730 pThis->szPrf));
5731 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_TXD_LOW);
5732 }
5733out:
5734 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5735
5736 /// @todo uncomment: pThis->uStatIntTXQE++;
5737 /// @todo uncomment: e1kRaiseInterrupt(pDevIns, pThis, ICR_TXQE);
5738
5739 e1kCsTxLeave(pThis);
5740 }
5741
5742
5743 /*
5744 * Release the lock.
5745 */
5746 if (pDrv)
5747 pDrv->pfnEndXmit(pDrv);
5748 return rc;
5749}
5750
5751#endif /* E1K_WITH_TXD_CACHE */
5752#ifdef IN_RING3
5753
5754/**
5755 * @interface_method_impl{PDMINETWORKDOWN,pfnXmitPending}
5756 */
5757static DECLCALLBACK(void) e1kR3NetworkDown_XmitPending(PPDMINETWORKDOWN pInterface)
5758{
5759 PE1KSTATECC pThisCC = RT_FROM_MEMBER(pInterface, E1KSTATECC, INetworkDown);
5760 PE1KSTATE pThis = pThisCC->pShared;
5761 /* Resume suspended transmission */
5762 STATUS &= ~STATUS_TXOFF;
5763 e1kXmitPending(pThisCC->pDevInsR3, pThis, true /*fOnWorkerThread*/);
5764}
5765
5766/**
5767 * @callback_method_impl{FNPDMTASKDEV,
5768 * Executes e1kXmitPending at the behest of ring-0/raw-mode.}
5769 * @note Not executed on EMT.
5770 */
5771static DECLCALLBACK(void) e1kR3TxTaskCallback(PPDMDEVINS pDevIns, void *pvUser)
5772{
5773 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
5774 E1kLog2(("%s e1kR3TxTaskCallback:\n", pThis->szPrf));
5775
5776 int rc = e1kXmitPending(pDevIns, pThis, false /*fOnWorkerThread*/);
5777 AssertMsg(RT_SUCCESS(rc) || rc == VERR_TRY_AGAIN || rc == VERR_NET_DOWN, ("%Rrc\n", rc));
5778
5779 RT_NOREF(rc, pvUser);
5780}
5781
5782#endif /* IN_RING3 */
5783
5784/**
5785 * Write handler for Transmit Descriptor Tail register.
5786 *
5787 * @param pThis The device state structure.
5788 * @param offset Register offset in memory-mapped frame.
5789 * @param index Register index in register array.
5790 * @param value The value to store.
5791 * @param mask Used to implement partial writes (8 and 16-bit).
5792 * @thread EMT
5793 */
5794static int e1kRegWriteTDT(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5795{
5796 int rc = e1kRegWriteDefault(pDevIns, pThis, offset, index, value);
5797
5798 /* All descriptors starting with head and not including tail belong to us. */
5799 /* Process them. */
5800 E1kLog2(("%s e1kRegWriteTDT: TDBAL=%08x, TDBAH=%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
5801 pThis->szPrf, TDBAL, TDBAH, TDLEN, TDH, TDT));
5802
5803 /* Compose a temporary TX context, breaking TX CS rule, for debugging purposes. */
5804 /* If we decide to transmit, the TX critical section will be entered later in e1kXmitPending(). */
5805 E1KTXDC txdc;
5806 txdc.tdlen = TDLEN;
5807 txdc.tdh = TDH;
5808 txdc.tdt = TDT;
5809 /* Ignore TDT writes when the link is down. */
5810 if (txdc.tdh != txdc.tdt && (STATUS & STATUS_LU))
5811 {
5812 Log5(("E1000: TDT write: TDH=%08x, TDT=%08x, %d descriptors to process\n", txdc.tdh, txdc.tdt, e1kGetTxLen(&txdc)));
5813 E1kLog(("%s e1kRegWriteTDT: %d descriptors to process\n",
5814 pThis->szPrf, e1kGetTxLen(&txdc)));
5815
5816 /* Transmit pending packets if possible, defer it if we cannot do it
5817 in the current context. */
5818#ifdef E1K_TX_DELAY
5819 rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
5820 if (RT_LIKELY(rc == VINF_SUCCESS))
5821 {
5822 if (!PDMDevInsTimerIsActive(pDevIns, pThis->hTXDTimer))
5823 {
5824# ifdef E1K_INT_STATS
5825 pThis->u64ArmedAt = RTTimeNanoTS();
5826# endif
5827 e1kArmTimer(pDevIns, pThis, pThis->hTXDTimer, E1K_TX_DELAY);
5828 }
5829 E1K_INC_ISTAT_CNT(pThis->uStatTxDelayed);
5830 e1kCsTxLeave(pThis);
5831 return rc;
5832 }
5833 /* We failed to enter the TX critical section -- transmit as usual. */
5834#endif /* E1K_TX_DELAY */
5835#ifndef IN_RING3
5836 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
5837 if (!pThisCC->CTX_SUFF(pDrv))
5838 {
5839 PDMDevHlpTaskTrigger(pDevIns, pThis->hTxTask);
5840 rc = VINF_SUCCESS;
5841 }
5842 else
5843#endif
5844 {
5845 rc = e1kXmitPending(pDevIns, pThis, false /*fOnWorkerThread*/);
5846 if (rc == VERR_TRY_AGAIN)
5847 rc = VINF_SUCCESS;
5848#ifndef IN_RING3
5849 else if (rc == VERR_SEM_BUSY)
5850 rc = VINF_IOM_R3_MMIO_WRITE;
5851#endif
5852 AssertRC(rc);
5853 }
5854 }
5855
5856 return rc;
5857}
5858
5859/**
5860 * Write handler for Multicast Table Array registers.
5861 *
5862 * @param pThis The device state structure.
5863 * @param offset Register offset in memory-mapped frame.
5864 * @param index Register index in register array.
5865 * @param value The value to store.
5866 * @thread EMT
5867 */
5868static int e1kRegWriteMTA(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5869{
5870 RT_NOREF_PV(pDevIns);
5871 AssertReturn(offset - g_aE1kRegMap[index].offset < sizeof(pThis->auMTA), VERR_DEV_IO_ERROR);
5872 pThis->auMTA[(offset - g_aE1kRegMap[index].offset) / sizeof(pThis->auMTA[0])] = value;
5873
5874 return VINF_SUCCESS;
5875}
5876
5877/**
5878 * Read handler for Multicast Table Array registers.
5879 *
5880 * @returns VBox status code.
5881 *
5882 * @param pThis The device state structure.
5883 * @param offset Register offset in memory-mapped frame.
5884 * @param index Register index in register array.
5885 * @thread EMT
5886 */
5887static int e1kRegReadMTA(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5888{
5889 RT_NOREF_PV(pDevIns);
5890 AssertReturn(offset - g_aE1kRegMap[index].offset < sizeof(pThis->auMTA), VERR_DEV_IO_ERROR);
5891 *pu32Value = pThis->auMTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auMTA[0])];
5892
5893 return VINF_SUCCESS;
5894}
5895
5896/**
5897 * Write handler for Receive Address registers.
5898 *
5899 * @param pThis The device state structure.
5900 * @param offset Register offset in memory-mapped frame.
5901 * @param index Register index in register array.
5902 * @param value The value to store.
5903 * @thread EMT
5904 */
5905static int e1kRegWriteRA(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5906{
5907 RT_NOREF_PV(pDevIns);
5908 AssertReturn(offset - g_aE1kRegMap[index].offset < sizeof(pThis->aRecAddr.au32), VERR_DEV_IO_ERROR);
5909 pThis->aRecAddr.au32[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->aRecAddr.au32[0])] = value;
5910
5911 return VINF_SUCCESS;
5912}
5913
5914/**
5915 * Read handler for Receive Address registers.
5916 *
5917 * @returns VBox status code.
5918 *
5919 * @param pThis The device state structure.
5920 * @param offset Register offset in memory-mapped frame.
5921 * @param index Register index in register array.
5922 * @thread EMT
5923 */
5924static int e1kRegReadRA(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5925{
5926 RT_NOREF_PV(pDevIns);
5927 AssertReturn(offset - g_aE1kRegMap[index].offset< sizeof(pThis->aRecAddr.au32), VERR_DEV_IO_ERROR);
5928 *pu32Value = pThis->aRecAddr.au32[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->aRecAddr.au32[0])];
5929
5930 return VINF_SUCCESS;
5931}
5932
5933/**
5934 * Write handler for VLAN Filter Table Array registers.
5935 *
5936 * @param pThis The device state structure.
5937 * @param offset Register offset in memory-mapped frame.
5938 * @param index Register index in register array.
5939 * @param value The value to store.
5940 * @thread EMT
5941 */
5942static int e1kRegWriteVFTA(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5943{
5944 RT_NOREF_PV(pDevIns);
5945 AssertReturn(offset - g_aE1kRegMap[index].offset < sizeof(pThis->auVFTA), VINF_SUCCESS);
5946 pThis->auVFTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auVFTA[0])] = value;
5947
5948 return VINF_SUCCESS;
5949}
5950
5951/**
5952 * Read handler for VLAN Filter Table Array registers.
5953 *
5954 * @returns VBox status code.
5955 *
5956 * @param pThis The device state structure.
5957 * @param offset Register offset in memory-mapped frame.
5958 * @param index Register index in register array.
5959 * @thread EMT
5960 */
5961static int e1kRegReadVFTA(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5962{
5963 RT_NOREF_PV(pDevIns);
5964 AssertReturn(offset - g_aE1kRegMap[index].offset< sizeof(pThis->auVFTA), VERR_DEV_IO_ERROR);
5965 *pu32Value = pThis->auVFTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auVFTA[0])];
5966
5967 return VINF_SUCCESS;
5968}
5969
5970/**
5971 * Read handler for unimplemented registers.
5972 *
5973 * Merely reports reads from unimplemented registers.
5974 *
5975 * @returns VBox status code.
5976 *
5977 * @param pThis The device state structure.
5978 * @param offset Register offset in memory-mapped frame.
5979 * @param index Register index in register array.
5980 * @thread EMT
5981 */
5982static int e1kRegReadUnimplemented(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5983{
5984 RT_NOREF(pDevIns, pThis, offset, index);
5985 E1kLog(("%s At %08X read (00000000) attempt from unimplemented register %s (%s)\n",
5986 pThis->szPrf, offset, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5987 *pu32Value = 0;
5988
5989 return VINF_SUCCESS;
5990}
5991
5992/**
5993 * Default register read handler with automatic clear operation.
5994 *
5995 * Retrieves the value of register from register array in device state structure.
5996 * Then resets all bits.
5997 *
5998 * @remarks The 'mask' parameter is simply ignored as masking and shifting is
5999 * done in the caller.
6000 *
6001 * @returns VBox status code.
6002 *
6003 * @param pThis The device state structure.
6004 * @param offset Register offset in memory-mapped frame.
6005 * @param index Register index in register array.
6006 * @thread EMT
6007 */
6008static int e1kRegReadAutoClear(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
6009{
6010 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
6011 int rc = e1kRegReadDefault(pDevIns, pThis, offset, index, pu32Value);
6012 pThis->auRegs[index] = 0;
6013
6014 return rc;
6015}
6016
6017/**
6018 * Default register read handler.
6019 *
6020 * Retrieves the value of register from register array in device state structure.
6021 * Bits corresponding to 0s in 'readable' mask will always read as 0s.
6022 *
6023 * @remarks The 'mask' parameter is simply ignored as masking and shifting is
6024 * done in the caller.
6025 *
6026 * @returns VBox status code.
6027 *
6028 * @param pThis The device state structure.
6029 * @param offset Register offset in memory-mapped frame.
6030 * @param index Register index in register array.
6031 * @thread EMT
6032 */
6033static int e1kRegReadDefault(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
6034{
6035 RT_NOREF_PV(pDevIns); RT_NOREF_PV(offset);
6036
6037 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
6038 *pu32Value = pThis->auRegs[index] & g_aE1kRegMap[index].readable;
6039
6040 return VINF_SUCCESS;
6041}
6042
6043/**
6044 * Write handler for unimplemented registers.
6045 *
6046 * Merely reports writes to unimplemented registers.
6047 *
6048 * @param pThis The device state structure.
6049 * @param offset Register offset in memory-mapped frame.
6050 * @param index Register index in register array.
6051 * @param value The value to store.
6052 * @thread EMT
6053 */
6054
6055 static int e1kRegWriteUnimplemented(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
6056{
6057 RT_NOREF_PV(pDevIns); RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index); RT_NOREF_PV(value);
6058
6059 E1kLog(("%s At %08X write attempt (%08X) to unimplemented register %s (%s)\n",
6060 pThis->szPrf, offset, value, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
6061
6062 return VINF_SUCCESS;
6063}
6064
6065/**
6066 * Default register write handler.
6067 *
6068 * Stores the value to the register array in device state structure. Only bits
6069 * corresponding to 1s both in 'writable' and 'mask' will be stored.
6070 *
6071 * @returns VBox status code.
6072 *
6073 * @param pThis The device state structure.
6074 * @param offset Register offset in memory-mapped frame.
6075 * @param index Register index in register array.
6076 * @param value The value to store.
6077 * @param mask Used to implement partial writes (8 and 16-bit).
6078 * @thread EMT
6079 */
6080
6081static int e1kRegWriteDefault(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
6082{
6083 RT_NOREF(pDevIns, offset);
6084
6085 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
6086 pThis->auRegs[index] = (value & g_aE1kRegMap[index].writable)
6087 | (pThis->auRegs[index] & ~g_aE1kRegMap[index].writable);
6088
6089 return VINF_SUCCESS;
6090}
6091
6092/**
6093 * Search register table for matching register.
6094 *
6095 * @returns Index in the register table or -1 if not found.
6096 *
6097 * @param offReg Register offset in memory-mapped region.
6098 * @thread EMT
6099 */
6100static int e1kRegLookup(uint32_t offReg)
6101{
6102
6103#if 0
6104 int index;
6105
6106 for (index = 0; index < E1K_NUM_OF_REGS; index++)
6107 {
6108 if (g_aE1kRegMap[index].offset <= offReg && offReg < g_aE1kRegMap[index].offset + g_aE1kRegMap[index].size)
6109 {
6110 return index;
6111 }
6112 }
6113#else
6114 int iStart = 0;
6115 int iEnd = E1K_NUM_OF_BINARY_SEARCHABLE;
6116 for (;;)
6117 {
6118 int i = (iEnd - iStart) / 2 + iStart;
6119 uint32_t offCur = g_aE1kRegMap[i].offset;
6120 if (offReg < offCur)
6121 {
6122 if (i == iStart)
6123 break;
6124 iEnd = i;
6125 }
6126 else if (offReg >= offCur + g_aE1kRegMap[i].size)
6127 {
6128 i++;
6129 if (i == iEnd)
6130 break;
6131 iStart = i;
6132 }
6133 else
6134 return i;
6135 Assert(iEnd > iStart);
6136 }
6137
6138 for (unsigned i = E1K_NUM_OF_BINARY_SEARCHABLE; i < RT_ELEMENTS(g_aE1kRegMap); i++)
6139 if (offReg - g_aE1kRegMap[i].offset < g_aE1kRegMap[i].size)
6140 return (int)i;
6141
6142# ifdef VBOX_STRICT
6143 for (unsigned i = 0; i < RT_ELEMENTS(g_aE1kRegMap); i++)
6144 Assert(offReg - g_aE1kRegMap[i].offset >= g_aE1kRegMap[i].size);
6145# endif
6146
6147#endif
6148
6149 return -1;
6150}
6151
6152/**
6153 * Handle unaligned register read operation.
6154 *
6155 * Looks up and calls appropriate handler.
6156 *
6157 * @returns VBox status code.
6158 *
6159 * @param pDevIns The device instance.
6160 * @param pThis The device state structure.
6161 * @param offReg Register offset in memory-mapped frame.
6162 * @param pv Where to store the result.
6163 * @param cb Number of bytes to read.
6164 * @thread EMT
6165 * @remarks IOM takes care of unaligned and small reads via MMIO. For I/O port
6166 * accesses we have to take care of that ourselves.
6167 */
6168static int e1kRegReadUnaligned(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offReg, void *pv, uint32_t cb)
6169{
6170 uint32_t u32 = 0;
6171 uint32_t shift;
6172 int rc = VINF_SUCCESS;
6173 int index = e1kRegLookup(offReg);
6174#ifdef LOG_ENABLED
6175 char buf[9];
6176#endif
6177
6178 /*
6179 * From the spec:
6180 * For registers that should be accessed as 32-bit double words, partial writes (less than a 32-bit
6181 * double word) is ignored. Partial reads return all 32 bits of data regardless of the byte enables.
6182 */
6183
6184 /*
6185 * To be able to read bytes and short word we convert them to properly
6186 * shifted 32-bit words and masks. The idea is to keep register-specific
6187 * handlers simple. Most accesses will be 32-bit anyway.
6188 */
6189 uint32_t mask;
6190 switch (cb)
6191 {
6192 case 4: mask = 0xFFFFFFFF; break;
6193 case 2: mask = 0x0000FFFF; break;
6194 case 1: mask = 0x000000FF; break;
6195 default:
6196 return PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "unsupported op size: offset=%#10x cb=%#10x\n", offReg, cb);
6197 }
6198 if (index >= 0)
6199 {
6200 RT_UNTRUSTED_VALIDATED_FENCE(); /* paranoia because of port I/O. */
6201 if (g_aE1kRegMap[index].readable)
6202 {
6203 /* Make the mask correspond to the bits we are about to read. */
6204 shift = (offReg - g_aE1kRegMap[index].offset) % sizeof(uint32_t) * 8;
6205 mask <<= shift;
6206 if (!mask)
6207 return PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "Zero mask: offset=%#10x cb=%#10x\n", offReg, cb);
6208 /*
6209 * Read it. Pass the mask so the handler knows what has to be read.
6210 * Mask out irrelevant bits.
6211 */
6212 //rc = e1kCsEnter(pThis, VERR_SEM_BUSY, RT_SRC_POS);
6213 if (RT_UNLIKELY(rc != VINF_SUCCESS))
6214 return rc;
6215 //pThis->fDelayInts = false;
6216 //pThis->iStatIntLost += pThis->iStatIntLostOne;
6217 //pThis->iStatIntLostOne = 0;
6218 rc = g_aE1kRegMap[index].pfnRead(pDevIns, pThis, offReg & 0xFFFFFFFC, (uint32_t)index, &u32);
6219 u32 &= mask;
6220 //e1kCsLeave(pThis);
6221 E1kLog2(("%s At %08X read %s from %s (%s)\n",
6222 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf), g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
6223 Log6(("%s At %08X read %s from %s (%s) [UNALIGNED]\n",
6224 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf), g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
6225 /* Shift back the result. */
6226 u32 >>= shift;
6227 }
6228 else
6229 E1kLog(("%s At %08X read (%s) attempt from write-only register %s (%s)\n",
6230 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf), g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
6231 if (IOM_SUCCESS(rc))
6232 STAM_COUNTER_INC(&pThis->aStatRegReads[index]);
6233 }
6234 else
6235 E1kLog(("%s At %08X read (%s) attempt from non-existing register\n",
6236 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf)));
6237
6238 memcpy(pv, &u32, cb);
6239 return rc;
6240}
6241
6242/**
6243 * Handle 4 byte aligned and sized read operation.
6244 *
6245 * Looks up and calls appropriate handler.
6246 *
6247 * @returns VBox status code.
6248 *
6249 * @param pDevIns The device instance.
6250 * @param pThis The device state structure.
6251 * @param offReg Register offset in memory-mapped frame.
6252 * @param pu32 Where to store the result.
6253 * @thread EMT
6254 */
6255static VBOXSTRICTRC e1kRegReadAlignedU32(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offReg, uint32_t *pu32)
6256{
6257 Assert(!(offReg & 3));
6258
6259 /*
6260 * Lookup the register and check that it's readable.
6261 */
6262 VBOXSTRICTRC rc = VINF_SUCCESS;
6263 int idxReg = e1kRegLookup(offReg);
6264 if (RT_LIKELY(idxReg >= 0))
6265 {
6266 RT_UNTRUSTED_VALIDATED_FENCE(); /* paranoia because of port I/O. */
6267 if (RT_UNLIKELY(g_aE1kRegMap[idxReg].readable))
6268 {
6269 /*
6270 * Read it. Pass the mask so the handler knows what has to be read.
6271 * Mask out irrelevant bits.
6272 */
6273 //rc = e1kCsEnter(pThis, VERR_SEM_BUSY, RT_SRC_POS);
6274 //if (RT_UNLIKELY(rc != VINF_SUCCESS))
6275 // return rc;
6276 //pThis->fDelayInts = false;
6277 //pThis->iStatIntLost += pThis->iStatIntLostOne;
6278 //pThis->iStatIntLostOne = 0;
6279 rc = g_aE1kRegMap[idxReg].pfnRead(pDevIns, pThis, offReg & 0xFFFFFFFC, (uint32_t)idxReg, pu32);
6280 //e1kCsLeave(pThis);
6281 Log6(("%s At %08X read %08X from %s (%s)\n",
6282 pThis->szPrf, offReg, *pu32, g_aE1kRegMap[idxReg].abbrev, g_aE1kRegMap[idxReg].name));
6283 if (IOM_SUCCESS(rc))
6284 STAM_COUNTER_INC(&pThis->aStatRegReads[idxReg]);
6285 }
6286 else
6287 E1kLog(("%s At %08X read attempt from non-readable register %s (%s)\n",
6288 pThis->szPrf, offReg, g_aE1kRegMap[idxReg].abbrev, g_aE1kRegMap[idxReg].name));
6289 }
6290 else
6291 E1kLog(("%s At %08X read attempt from non-existing register\n", pThis->szPrf, offReg));
6292 return rc;
6293}
6294
6295/**
6296 * Handle 4 byte sized and aligned register write operation.
6297 *
6298 * Looks up and calls appropriate handler.
6299 *
6300 * @returns VBox status code.
6301 *
6302 * @param pDevIns The device instance.
6303 * @param pThis The device state structure.
6304 * @param offReg Register offset in memory-mapped frame.
6305 * @param u32Value The value to write.
6306 * @thread EMT
6307 */
6308static VBOXSTRICTRC e1kRegWriteAlignedU32(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offReg, uint32_t u32Value)
6309{
6310 VBOXSTRICTRC rc = VINF_SUCCESS;
6311 int index = e1kRegLookup(offReg);
6312 if (RT_LIKELY(index >= 0))
6313 {
6314 RT_UNTRUSTED_VALIDATED_FENCE(); /* paranoia because of port I/O. */
6315 if (RT_LIKELY(g_aE1kRegMap[index].writable))
6316 {
6317 /*
6318 * Write it. Pass the mask so the handler knows what has to be written.
6319 * Mask out irrelevant bits.
6320 */
6321 Log6(("%s At %08X write %08X to %s (%s)\n",
6322 pThis->szPrf, offReg, u32Value, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
6323 //rc = e1kCsEnter(pThis, VERR_SEM_BUSY, RT_SRC_POS);
6324 //if (RT_UNLIKELY(rc != VINF_SUCCESS))
6325 // return rc;
6326 //pThis->fDelayInts = false;
6327 //pThis->iStatIntLost += pThis->iStatIntLostOne;
6328 //pThis->iStatIntLostOne = 0;
6329 rc = g_aE1kRegMap[index].pfnWrite(pDevIns, pThis, offReg, (uint32_t)index, u32Value);
6330 //e1kCsLeave(pThis);
6331 }
6332 else
6333 E1kLog(("%s At %08X write attempt (%08X) to read-only register %s (%s)\n",
6334 pThis->szPrf, offReg, u32Value, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
6335 if (IOM_SUCCESS(rc))
6336 STAM_COUNTER_INC(&pThis->aStatRegWrites[index]);
6337 }
6338 else
6339 E1kLog(("%s At %08X write attempt (%08X) to non-existing register\n",
6340 pThis->szPrf, offReg, u32Value));
6341 return rc;
6342}
6343
6344
6345/* -=-=-=-=- MMIO and I/O Port Callbacks -=-=-=-=- */
6346
6347/**
6348 * @callback_method_impl{FNIOMMMIONEWREAD}
6349 */
6350static DECLCALLBACK(VBOXSTRICTRC) e1kMMIORead(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void *pv, uint32_t cb)
6351{
6352 RT_NOREF2(pvUser, cb);
6353 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
6354 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatMMIORead), a);
6355
6356 Assert(off < E1K_MM_SIZE);
6357 Assert(cb == 4);
6358 Assert(!(off & 3));
6359
6360 VBOXSTRICTRC rcStrict = e1kRegReadAlignedU32(pDevIns, pThis, (uint32_t)off, (uint32_t *)pv);
6361
6362 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatMMIORead), a);
6363 return rcStrict;
6364}
6365
6366/**
6367 * @callback_method_impl{FNIOMMMIONEWWRITE}
6368 */
6369static DECLCALLBACK(VBOXSTRICTRC) e1kMMIOWrite(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void const *pv, uint32_t cb)
6370{
6371 RT_NOREF2(pvUser, cb);
6372 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
6373 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatMMIOWrite), a);
6374
6375 Assert(off < E1K_MM_SIZE);
6376 Assert(cb == 4);
6377 Assert(!(off & 3));
6378
6379 VBOXSTRICTRC rcStrict = e1kRegWriteAlignedU32(pDevIns, pThis, (uint32_t)off, *(uint32_t const *)pv);
6380
6381 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatMMIOWrite), a);
6382 return rcStrict;
6383}
6384
6385/**
6386 * @callback_method_impl{FNIOMIOPORTNEWIN}
6387 */
6388static DECLCALLBACK(VBOXSTRICTRC) e1kIOPortIn(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT offPort, uint32_t *pu32, unsigned cb)
6389{
6390 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
6391 VBOXSTRICTRC rc;
6392 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatIORead), a);
6393 RT_NOREF_PV(pvUser);
6394
6395 if (RT_LIKELY(cb == 4))
6396 switch (offPort)
6397 {
6398 case 0x00: /* IOADDR */
6399 *pu32 = pThis->uSelectedReg;
6400 Log9(("%s e1kIOPortIn: IOADDR(0), selecting register %#010x, val=%#010x\n", pThis->szPrf, pThis->uSelectedReg, *pu32));
6401 rc = VINF_SUCCESS;
6402 break;
6403
6404 case 0x04: /* IODATA */
6405 if (!(pThis->uSelectedReg & 3))
6406 rc = e1kRegReadAlignedU32(pDevIns, pThis, pThis->uSelectedReg, pu32);
6407 else /** @todo r=bird: I wouldn't be surprised if this unaligned branch wasn't necessary. */
6408 rc = e1kRegReadUnaligned(pDevIns, pThis, pThis->uSelectedReg, pu32, cb);
6409 if (rc == VINF_IOM_R3_MMIO_READ)
6410 rc = VINF_IOM_R3_IOPORT_READ;
6411 Log9(("%s e1kIOPortIn: IODATA(4), reading from selected register %#010x, val=%#010x\n", pThis->szPrf, pThis->uSelectedReg, *pu32));
6412 break;
6413
6414 default:
6415 E1kLog(("%s e1kIOPortIn: invalid port %#010x\n", pThis->szPrf, offPort));
6416 /** @todo r=bird: Check what real hardware returns here. */
6417 //rc = VERR_IOM_IOPORT_UNUSED; /* Why not? */
6418 rc = VINF_IOM_MMIO_UNUSED_00; /* used to return VINF_SUCCESS and not touch *pu32, which amounted to this. */
6419 break;
6420 }
6421 else
6422 {
6423 E1kLog(("%s e1kIOPortIn: invalid op size: offPort=%RTiop cb=%08x", pThis->szPrf, offPort, cb));
6424 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "%s e1kIOPortIn: invalid op size: offPort=%RTiop cb=%08x\n", pThis->szPrf, offPort, cb);
6425 *pu32 = 0; /** @todo r=bird: Check what real hardware returns here. (Didn't used to set a value here, picked zero as that's what we'd end up in most cases.) */
6426 }
6427 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatIORead), a);
6428 return rc;
6429}
6430
6431
6432/**
6433 * @callback_method_impl{FNIOMIOPORTNEWOUT}
6434 */
6435static DECLCALLBACK(VBOXSTRICTRC) e1kIOPortOut(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT offPort, uint32_t u32, unsigned cb)
6436{
6437 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
6438 VBOXSTRICTRC rc;
6439 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatIOWrite), a);
6440 RT_NOREF_PV(pvUser);
6441
6442 Log9(("%s e1kIOPortOut: offPort=%RTiop value=%08x\n", pThis->szPrf, offPort, u32));
6443 if (RT_LIKELY(cb == 4))
6444 {
6445 switch (offPort)
6446 {
6447 case 0x00: /* IOADDR */
6448 pThis->uSelectedReg = u32;
6449 Log9(("%s e1kIOPortOut: IOADDR(0), selected register %08x\n", pThis->szPrf, pThis->uSelectedReg));
6450 rc = VINF_SUCCESS;
6451 break;
6452
6453 case 0x04: /* IODATA */
6454 Log9(("%s e1kIOPortOut: IODATA(4), writing to selected register %#010x, value=%#010x\n", pThis->szPrf, pThis->uSelectedReg, u32));
6455 if (RT_LIKELY(!(pThis->uSelectedReg & 3)))
6456 {
6457 rc = e1kRegWriteAlignedU32(pDevIns, pThis, pThis->uSelectedReg, u32);
6458 if (rc == VINF_IOM_R3_MMIO_WRITE)
6459 rc = VINF_IOM_R3_IOPORT_WRITE;
6460 }
6461 else
6462 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS,
6463 "Spec violation: misaligned offset: %#10x, ignored.\n", pThis->uSelectedReg);
6464 break;
6465
6466 default:
6467 E1kLog(("%s e1kIOPortOut: invalid port %#010x\n", pThis->szPrf, offPort));
6468 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "invalid port %#010x\n", offPort);
6469 }
6470 }
6471 else
6472 {
6473 E1kLog(("%s e1kIOPortOut: invalid op size: offPort=%RTiop cb=%08x\n", pThis->szPrf, offPort, cb));
6474 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "%s: invalid op size: offPort=%RTiop cb=%#x\n", pThis->szPrf, offPort, cb);
6475 }
6476
6477 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatIOWrite), a);
6478 return rc;
6479}
6480
6481#ifdef IN_RING3
6482
6483/**
6484 * Dump complete device state to log.
6485 *
6486 * @param pThis Pointer to device state.
6487 */
6488static void e1kDumpState(PE1KSTATE pThis)
6489{
6490 RT_NOREF(pThis);
6491 for (int i = 0; i < E1K_NUM_OF_32BIT_REGS; ++i)
6492 E1kLog2(("%s: %8.8s = %08x\n", pThis->szPrf, g_aE1kRegMap[i].abbrev, pThis->auRegs[i]));
6493# ifdef E1K_INT_STATS
6494 LogRel(("%s: Interrupt attempts: %d\n", pThis->szPrf, pThis->uStatIntTry));
6495 LogRel(("%s: Interrupts raised : %d\n", pThis->szPrf, pThis->uStatInt));
6496 LogRel(("%s: Interrupts lowered: %d\n", pThis->szPrf, pThis->uStatIntLower));
6497 LogRel(("%s: ICR outside ISR : %d\n", pThis->szPrf, pThis->uStatNoIntICR));
6498 LogRel(("%s: IMS raised ints : %d\n", pThis->szPrf, pThis->uStatIntIMS));
6499 LogRel(("%s: Interrupts skipped: %d\n", pThis->szPrf, pThis->uStatIntSkip));
6500 LogRel(("%s: Masked interrupts : %d\n", pThis->szPrf, pThis->uStatIntMasked));
6501 LogRel(("%s: Early interrupts : %d\n", pThis->szPrf, pThis->uStatIntEarly));
6502 LogRel(("%s: Late interrupts : %d\n", pThis->szPrf, pThis->uStatIntLate));
6503 LogRel(("%s: Lost interrupts : %d\n", pThis->szPrf, pThis->iStatIntLost));
6504 LogRel(("%s: Interrupts by RX : %d\n", pThis->szPrf, pThis->uStatIntRx));
6505 LogRel(("%s: Interrupts by TX : %d\n", pThis->szPrf, pThis->uStatIntTx));
6506 LogRel(("%s: Interrupts by ICS : %d\n", pThis->szPrf, pThis->uStatIntICS));
6507 LogRel(("%s: Interrupts by RDTR: %d\n", pThis->szPrf, pThis->uStatIntRDTR));
6508 LogRel(("%s: Interrupts by RDMT: %d\n", pThis->szPrf, pThis->uStatIntRXDMT0));
6509 LogRel(("%s: Interrupts by TXQE: %d\n", pThis->szPrf, pThis->uStatIntTXQE));
6510 LogRel(("%s: TX int delay asked: %d\n", pThis->szPrf, pThis->uStatTxIDE));
6511 LogRel(("%s: TX delayed: %d\n", pThis->szPrf, pThis->uStatTxDelayed));
6512 LogRel(("%s: TX delay expired: %d\n", pThis->szPrf, pThis->uStatTxDelayExp));
6513 LogRel(("%s: TX no report asked: %d\n", pThis->szPrf, pThis->uStatTxNoRS));
6514 LogRel(("%s: TX abs timer expd : %d\n", pThis->szPrf, pThis->uStatTAD));
6515 LogRel(("%s: TX int timer expd : %d\n", pThis->szPrf, pThis->uStatTID));
6516 LogRel(("%s: RX abs timer expd : %d\n", pThis->szPrf, pThis->uStatRAD));
6517 LogRel(("%s: RX int timer expd : %d\n", pThis->szPrf, pThis->uStatRID));
6518 LogRel(("%s: TX CTX descriptors: %d\n", pThis->szPrf, pThis->uStatDescCtx));
6519 LogRel(("%s: TX DAT descriptors: %d\n", pThis->szPrf, pThis->uStatDescDat));
6520 LogRel(("%s: TX LEG descriptors: %d\n", pThis->szPrf, pThis->uStatDescLeg));
6521 LogRel(("%s: Received frames : %d\n", pThis->szPrf, pThis->uStatRxFrm));
6522 LogRel(("%s: Transmitted frames: %d\n", pThis->szPrf, pThis->uStatTxFrm));
6523 LogRel(("%s: TX frames up to 1514: %d\n", pThis->szPrf, pThis->uStatTx1514));
6524 LogRel(("%s: TX frames up to 2962: %d\n", pThis->szPrf, pThis->uStatTx2962));
6525 LogRel(("%s: TX frames up to 4410: %d\n", pThis->szPrf, pThis->uStatTx4410));
6526 LogRel(("%s: TX frames up to 5858: %d\n", pThis->szPrf, pThis->uStatTx5858));
6527 LogRel(("%s: TX frames up to 7306: %d\n", pThis->szPrf, pThis->uStatTx7306));
6528 LogRel(("%s: TX frames up to 8754: %d\n", pThis->szPrf, pThis->uStatTx8754));
6529 LogRel(("%s: TX frames up to 16384: %d\n", pThis->szPrf, pThis->uStatTx16384));
6530 LogRel(("%s: TX frames up to 32768: %d\n", pThis->szPrf, pThis->uStatTx32768));
6531 LogRel(("%s: Larger TX frames : %d\n", pThis->szPrf, pThis->uStatTxLarge));
6532 LogRel(("%s: Max TX Delay : %lld\n", pThis->szPrf, pThis->uStatMaxTxDelay));
6533# endif /* E1K_INT_STATS */
6534}
6535
6536
6537/* -=-=-=-=- PDMINETWORKDOWN -=-=-=-=- */
6538
6539/**
6540 * Check if the device can receive data now.
6541 * This must be called before the pfnRecieve() method is called.
6542 *
6543 * @returns Number of bytes the device can receive.
6544 * @param pDevIns The device instance.
6545 * @param pThis The instance data.
6546 * @thread EMT
6547 */
6548static int e1kCanReceive(PPDMDEVINS pDevIns, PE1KSTATE pThis)
6549{
6550#ifndef E1K_WITH_RXD_CACHE
6551 size_t cb;
6552
6553 if (RT_UNLIKELY(e1kCsRxEnter(pThis, VERR_SEM_BUSY) != VINF_SUCCESS))
6554 return VERR_NET_NO_BUFFER_SPACE;
6555
6556 if (RT_UNLIKELY(RDLEN == sizeof(E1KRXDESC)))
6557 {
6558 E1KRXDESC desc;
6559 PDMDevHlpPCIPhysRead(pDevIns, e1kDescAddr(RDBAH, RDBAL, RDH), &desc, sizeof(desc));
6560 if (desc.status.fDD)
6561 cb = 0;
6562 else
6563 cb = pThis->u16RxBSize;
6564 }
6565 else if (RDH < RDT)
6566 cb = (RDT - RDH) * pThis->u16RxBSize;
6567 else if (RDH > RDT)
6568 cb = (RDLEN/sizeof(E1KRXDESC) - RDH + RDT) * pThis->u16RxBSize;
6569 else
6570 {
6571 cb = 0;
6572 E1kLogRel(("E1000: OUT of RX descriptors!\n"));
6573 }
6574 E1kLog2(("%s e1kCanReceive: at exit RDH=%d RDT=%d RDLEN=%d u16RxBSize=%d cb=%lu\n",
6575 pThis->szPrf, RDH, RDT, RDLEN, pThis->u16RxBSize, cb));
6576
6577 e1kCsRxLeave(pThis);
6578 return cb > 0 ? VINF_SUCCESS : VERR_NET_NO_BUFFER_SPACE;
6579#else /* E1K_WITH_RXD_CACHE */
6580 int rc = VINF_SUCCESS;
6581
6582 if (RT_UNLIKELY(e1kCsRxEnter(pThis, VERR_SEM_BUSY) != VINF_SUCCESS))
6583 return VERR_NET_NO_BUFFER_SPACE;
6584 E1KRXDC rxdc;
6585 if (RT_UNLIKELY(!e1kUpdateRxDContext(pDevIns, pThis, &rxdc, "e1kCanReceive")))
6586 {
6587 e1kCsRxLeave(pThis);
6588 E1kLog(("%s e1kCanReceive: failed to update Rx context, returning VERR_NET_NO_BUFFER_SPACE\n", pThis->szPrf));
6589 return VERR_NET_NO_BUFFER_SPACE;
6590 }
6591
6592 if (RT_UNLIKELY(rxdc.rdlen == sizeof(E1KRXDESC)))
6593 {
6594 E1KRXDESC desc;
6595 PDMDevHlpPCIPhysRead(pDevIns, e1kDescAddr(RDBAH, RDBAL, rxdc.rdh), &desc, sizeof(desc));
6596 if (desc.status.fDD)
6597 rc = VERR_NET_NO_BUFFER_SPACE;
6598 }
6599 else if (e1kRxDIsCacheEmpty(pThis) && rxdc.rdh == rxdc.rdt)
6600 {
6601 /* Cache is empty, so is the RX ring. */
6602 rc = VERR_NET_NO_BUFFER_SPACE;
6603 }
6604 E1kLog2(("%s e1kCanReceive: at exit in_cache=%d RDH=%d RDT=%d RDLEN=%d"
6605 " u16RxBSize=%d rc=%Rrc\n", pThis->szPrf,
6606 e1kRxDInCache(pThis), rxdc.rdh, rxdc.rdt, rxdc.rdlen, pThis->u16RxBSize, rc));
6607
6608 e1kCsRxLeave(pThis);
6609 return rc;
6610#endif /* E1K_WITH_RXD_CACHE */
6611}
6612
6613/**
6614 * @interface_method_impl{PDMINETWORKDOWN,pfnWaitReceiveAvail}
6615 */
6616static DECLCALLBACK(int) e1kR3NetworkDown_WaitReceiveAvail(PPDMINETWORKDOWN pInterface, RTMSINTERVAL cMillies)
6617{
6618 PE1KSTATECC pThisCC = RT_FROM_MEMBER(pInterface, E1KSTATECC, INetworkDown);
6619 PE1KSTATE pThis = pThisCC->pShared;
6620 PPDMDEVINS pDevIns = pThisCC->pDevInsR3;
6621
6622 int rc = e1kCanReceive(pDevIns, pThis);
6623
6624 if (RT_SUCCESS(rc))
6625 return VINF_SUCCESS;
6626 if (RT_UNLIKELY(cMillies == 0))
6627 return VERR_NET_NO_BUFFER_SPACE;
6628
6629 rc = VERR_INTERRUPTED;
6630 ASMAtomicXchgBool(&pThis->fMaybeOutOfSpace, true);
6631 STAM_PROFILE_START(&pThis->StatRxOverflow, a);
6632 VMSTATE enmVMState;
6633 while (RT_LIKELY( (enmVMState = PDMDevHlpVMState(pDevIns)) == VMSTATE_RUNNING
6634 || enmVMState == VMSTATE_RUNNING_LS))
6635 {
6636 int rc2 = e1kCanReceive(pDevIns, pThis);
6637 if (RT_SUCCESS(rc2))
6638 {
6639 rc = VINF_SUCCESS;
6640 break;
6641 }
6642 E1kLogRel(("E1000: e1kR3NetworkDown_WaitReceiveAvail: waiting cMillies=%u...\n", cMillies));
6643 E1kLog(("%s: e1kR3NetworkDown_WaitReceiveAvail: waiting cMillies=%u...\n", pThis->szPrf, cMillies));
6644 PDMDevHlpSUPSemEventWaitNoResume(pDevIns, pThis->hEventMoreRxDescAvail, cMillies);
6645 }
6646 STAM_PROFILE_STOP(&pThis->StatRxOverflow, a);
6647 ASMAtomicXchgBool(&pThis->fMaybeOutOfSpace, false);
6648
6649 return rc;
6650}
6651
6652
6653/**
6654 * Matches the packet addresses against Receive Address table. Looks for
6655 * exact matches only.
6656 *
6657 * @returns true if address matches.
6658 * @param pThis Pointer to the state structure.
6659 * @param pvBuf The ethernet packet.
6660 * @param cb Number of bytes available in the packet.
6661 * @thread EMT
6662 */
6663static bool e1kPerfectMatch(PE1KSTATE pThis, const void *pvBuf)
6664{
6665 for (unsigned i = 0; i < RT_ELEMENTS(pThis->aRecAddr.array); i++)
6666 {
6667 E1KRAELEM* ra = pThis->aRecAddr.array + i;
6668
6669 /* Valid address? */
6670 if (ra->ctl & RA_CTL_AV)
6671 {
6672 Assert((ra->ctl & RA_CTL_AS) < 2);
6673 //unsigned char *pAddr = (unsigned char*)pvBuf + sizeof(ra->addr)*(ra->ctl & RA_CTL_AS);
6674 //E1kLog3(("%s Matching %02x:%02x:%02x:%02x:%02x:%02x against %02x:%02x:%02x:%02x:%02x:%02x...\n",
6675 // pThis->szPrf, pAddr[0], pAddr[1], pAddr[2], pAddr[3], pAddr[4], pAddr[5],
6676 // ra->addr[0], ra->addr[1], ra->addr[2], ra->addr[3], ra->addr[4], ra->addr[5]));
6677 /*
6678 * Address Select:
6679 * 00b = Destination address
6680 * 01b = Source address
6681 * 10b = Reserved
6682 * 11b = Reserved
6683 * Since ethernet header is (DA, SA, len) we can use address
6684 * select as index.
6685 */
6686 if (memcmp((char*)pvBuf + sizeof(ra->addr)*(ra->ctl & RA_CTL_AS),
6687 ra->addr, sizeof(ra->addr)) == 0)
6688 return true;
6689 }
6690 }
6691
6692 return false;
6693}
6694
6695/**
6696 * Matches the packet addresses against Multicast Table Array.
6697 *
6698 * @remarks This is imperfect match since it matches not exact address but
6699 * a subset of addresses.
6700 *
6701 * @returns true if address matches.
6702 * @param pThis Pointer to the state structure.
6703 * @param pvBuf The ethernet packet.
6704 * @param cb Number of bytes available in the packet.
6705 * @thread EMT
6706 */
6707static bool e1kImperfectMatch(PE1KSTATE pThis, const void *pvBuf)
6708{
6709 /* Get bits 32..47 of destination address */
6710 uint16_t u16Bit = ((uint16_t*)pvBuf)[2];
6711
6712 unsigned offset = GET_BITS(RCTL, MO);
6713 /*
6714 * offset means:
6715 * 00b = bits 36..47
6716 * 01b = bits 35..46
6717 * 10b = bits 34..45
6718 * 11b = bits 32..43
6719 */
6720 if (offset < 3)
6721 u16Bit = u16Bit >> (4 - offset);
6722 return ASMBitTest(pThis->auMTA, u16Bit & 0xFFF);
6723}
6724
6725/**
6726 * Determines if the packet is to be delivered to upper layer.
6727 *
6728 * The following filters supported:
6729 * - Exact Unicast/Multicast
6730 * - Promiscuous Unicast/Multicast
6731 * - Multicast
6732 * - VLAN
6733 *
6734 * @returns true if packet is intended for this node.
6735 * @param pThis Pointer to the state structure.
6736 * @param pvBuf The ethernet packet.
6737 * @param cb Number of bytes available in the packet.
6738 * @param pStatus Bit field to store status bits.
6739 * @thread EMT
6740 */
6741static bool e1kAddressFilter(PE1KSTATE pThis, const void *pvBuf, size_t cb, E1KRXDST *pStatus)
6742{
6743 Assert(cb > 14);
6744 /* Assume that we fail to pass exact filter. */
6745 pStatus->fPIF = false;
6746 pStatus->fVP = false;
6747 /* Discard oversized packets */
6748 if (cb > E1K_MAX_RX_PKT_SIZE)
6749 {
6750 E1kLog(("%s ERROR: Incoming packet is too big, cb=%d > max=%d\n",
6751 pThis->szPrf, cb, E1K_MAX_RX_PKT_SIZE));
6752 E1K_INC_CNT32(ROC);
6753 return false;
6754 }
6755 else if (!(RCTL & RCTL_LPE) && cb > 1522)
6756 {
6757 /* When long packet reception is disabled packets over 1522 are discarded */
6758 E1kLog(("%s Discarding incoming packet (LPE=0), cb=%d\n",
6759 pThis->szPrf, cb));
6760 E1K_INC_CNT32(ROC);
6761 return false;
6762 }
6763
6764 uint16_t *u16Ptr = (uint16_t*)pvBuf;
6765 /* Compare TPID with VLAN Ether Type */
6766 if (RT_BE2H_U16(u16Ptr[6]) == VET)
6767 {
6768 pStatus->fVP = true;
6769 /* Is VLAN filtering enabled? */
6770 if (RCTL & RCTL_VFE)
6771 {
6772 /* It is 802.1q packet indeed, let's filter by VID */
6773 if (RCTL & RCTL_CFIEN)
6774 {
6775 E1kLog3(("%s VLAN filter: VLAN=%d CFI=%d RCTL_CFI=%d\n", pThis->szPrf,
6776 E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7])),
6777 E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])),
6778 !!(RCTL & RCTL_CFI)));
6779 if (E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])) != !!(RCTL & RCTL_CFI))
6780 {
6781 E1kLog2(("%s Packet filter: CFIs do not match in packet and RCTL (%d!=%d)\n",
6782 pThis->szPrf, E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])), !!(RCTL & RCTL_CFI)));
6783 return false;
6784 }
6785 }
6786 else
6787 E1kLog3(("%s VLAN filter: VLAN=%d\n", pThis->szPrf,
6788 E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))));
6789 if (!ASMBitTest(pThis->auVFTA, E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))))
6790 {
6791 E1kLog2(("%s Packet filter: no VLAN match (id=%d)\n",
6792 pThis->szPrf, E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))));
6793 return false;
6794 }
6795 }
6796 }
6797 /* Broadcast filtering */
6798 if (e1kIsBroadcast(pvBuf) && (RCTL & RCTL_BAM))
6799 return true;
6800 E1kLog2(("%s Packet filter: not a broadcast\n", pThis->szPrf));
6801 if (e1kIsMulticast(pvBuf))
6802 {
6803 /* Is multicast promiscuous enabled? */
6804 if (RCTL & RCTL_MPE)
6805 return true;
6806 E1kLog2(("%s Packet filter: no promiscuous multicast\n", pThis->szPrf));
6807 /* Try perfect matches first */
6808 if (e1kPerfectMatch(pThis, pvBuf))
6809 {
6810 pStatus->fPIF = true;
6811 return true;
6812 }
6813 E1kLog2(("%s Packet filter: no perfect match\n", pThis->szPrf));
6814 if (e1kImperfectMatch(pThis, pvBuf))
6815 return true;
6816 E1kLog2(("%s Packet filter: no imperfect match\n", pThis->szPrf));
6817 }
6818 else {
6819 /* Is unicast promiscuous enabled? */
6820 if (RCTL & RCTL_UPE)
6821 return true;
6822 E1kLog2(("%s Packet filter: no promiscuous unicast\n", pThis->szPrf));
6823 if (e1kPerfectMatch(pThis, pvBuf))
6824 {
6825 pStatus->fPIF = true;
6826 return true;
6827 }
6828 E1kLog2(("%s Packet filter: no perfect match\n", pThis->szPrf));
6829 }
6830 E1kLog2(("%s Packet filter: packet discarded\n", pThis->szPrf));
6831 return false;
6832}
6833
6834/**
6835 * @interface_method_impl{PDMINETWORKDOWN,pfnReceive}
6836 */
6837static DECLCALLBACK(int) e1kR3NetworkDown_Receive(PPDMINETWORKDOWN pInterface, const void *pvBuf, size_t cb)
6838{
6839 PE1KSTATECC pThisCC = RT_FROM_MEMBER(pInterface, E1KSTATECC, INetworkDown);
6840 PE1KSTATE pThis = pThisCC->pShared;
6841 PPDMDEVINS pDevIns = pThisCC->pDevInsR3;
6842 int rc = VINF_SUCCESS;
6843
6844 /*
6845 * Drop packets if the VM is not running yet/anymore.
6846 */
6847 VMSTATE enmVMState = PDMDevHlpVMState(pDevIns);
6848 if ( enmVMState != VMSTATE_RUNNING
6849 && enmVMState != VMSTATE_RUNNING_LS)
6850 {
6851 E1kLog(("%s Dropping incoming packet as VM is not running.\n", pThis->szPrf));
6852 return VINF_SUCCESS;
6853 }
6854
6855 /* Discard incoming packets in locked state */
6856 if (!(RCTL & RCTL_EN) || pThis->fLocked || !(STATUS & STATUS_LU))
6857 {
6858 E1kLog(("%s Dropping incoming packet as receive operation is disabled.\n", pThis->szPrf));
6859 return VINF_SUCCESS;
6860 }
6861
6862 STAM_PROFILE_ADV_START(&pThis->StatReceive, a);
6863
6864 //if (!e1kCsEnter(pThis, RT_SRC_POS))
6865 // return VERR_PERMISSION_DENIED;
6866
6867 e1kPacketDump(pDevIns, pThis, (const uint8_t*)pvBuf, cb, "<-- Incoming");
6868
6869 /* Update stats */
6870 if (RT_LIKELY(e1kCsEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
6871 {
6872 E1K_INC_CNT32(TPR);
6873 E1K_ADD_CNT64(TORL, TORH, cb < 64? 64 : cb);
6874 e1kCsLeave(pThis);
6875 }
6876 STAM_PROFILE_ADV_START(&pThis->StatReceiveFilter, a);
6877 E1KRXDST status;
6878 RT_ZERO(status);
6879 bool fPassed = e1kAddressFilter(pThis, pvBuf, cb, &status);
6880 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveFilter, a);
6881 if (fPassed)
6882 {
6883 rc = e1kHandleRxPacket(pDevIns, pThis, pvBuf, cb, status);
6884 }
6885 //e1kCsLeave(pThis);
6886 STAM_PROFILE_ADV_STOP(&pThis->StatReceive, a);
6887
6888 return rc;
6889}
6890
6891
6892/* -=-=-=-=- PDMILEDPORTS -=-=-=-=- */
6893
6894/**
6895 * @interface_method_impl{PDMILEDPORTS,pfnQueryStatusLed}
6896 */
6897static DECLCALLBACK(int) e1kR3QueryStatusLed(PPDMILEDPORTS pInterface, unsigned iLUN, PPDMLED *ppLed)
6898{
6899 if (iLUN == 0)
6900 {
6901 PE1KSTATECC pThisCC = RT_FROM_MEMBER(pInterface, E1KSTATECC, ILeds);
6902 *ppLed = &pThisCC->pShared->led;
6903 return VINF_SUCCESS;
6904 }
6905 return VERR_PDM_LUN_NOT_FOUND;
6906}
6907
6908
6909/* -=-=-=-=- PDMINETWORKCONFIG -=-=-=-=- */
6910
6911/**
6912 * @interface_method_impl{PDMINETWORKCONFIG,pfnGetMac}
6913 */
6914static DECLCALLBACK(int) e1kR3GetMac(PPDMINETWORKCONFIG pInterface, PRTMAC pMac)
6915{
6916 PE1KSTATECC pThisCC = RT_FROM_MEMBER(pInterface, E1KSTATECC, INetworkConfig);
6917 pThisCC->eeprom.getMac(pMac);
6918 return VINF_SUCCESS;
6919}
6920
6921/**
6922 * @interface_method_impl{PDMINETWORKCONFIG,pfnGetLinkState}
6923 */
6924static DECLCALLBACK(PDMNETWORKLINKSTATE) e1kR3GetLinkState(PPDMINETWORKCONFIG pInterface)
6925{
6926 PE1KSTATECC pThisCC = RT_FROM_MEMBER(pInterface, E1KSTATECC, INetworkConfig);
6927 PE1KSTATE pThis = pThisCC->pShared;
6928 if (STATUS & STATUS_LU)
6929 return PDMNETWORKLINKSTATE_UP;
6930 return PDMNETWORKLINKSTATE_DOWN;
6931}
6932
6933/**
6934 * @interface_method_impl{PDMINETWORKCONFIG,pfnSetLinkState}
6935 */
6936static DECLCALLBACK(int) e1kR3SetLinkState(PPDMINETWORKCONFIG pInterface, PDMNETWORKLINKSTATE enmState)
6937{
6938 PE1KSTATECC pThisCC = RT_FROM_MEMBER(pInterface, E1KSTATECC, INetworkConfig);
6939 PE1KSTATE pThis = pThisCC->pShared;
6940 PPDMDEVINS pDevIns = pThisCC->pDevInsR3;
6941
6942 E1kLog(("%s e1kR3SetLinkState: enmState=%d\n", pThis->szPrf, enmState));
6943 switch (enmState)
6944 {
6945 case PDMNETWORKLINKSTATE_UP:
6946 pThis->fCableConnected = true;
6947 /* If link was down, bring it up after a while. */
6948 if (!(STATUS & STATUS_LU))
6949 e1kBringLinkUpDelayed(pDevIns, pThis);
6950 break;
6951 case PDMNETWORKLINKSTATE_DOWN:
6952 pThis->fCableConnected = false;
6953 /* Always set the phy link state to down, regardless of the STATUS_LU bit.
6954 * We might have to set the link state before the driver initializes us. */
6955 Phy::setLinkStatus(&pThis->phy, false);
6956 /* If link was up, bring it down. */
6957 if (STATUS & STATUS_LU)
6958 e1kR3LinkDown(pDevIns, pThis, pThisCC);
6959 break;
6960 case PDMNETWORKLINKSTATE_DOWN_RESUME:
6961 /*
6962 * There is not much sense in bringing down the link if it has not come up yet.
6963 * If it is up though, we bring it down temporarely, then bring it up again.
6964 */
6965 if (STATUS & STATUS_LU)
6966 e1kR3LinkDownTemp(pDevIns, pThis, pThisCC);
6967 break;
6968 default:
6969 ;
6970 }
6971 return VINF_SUCCESS;
6972}
6973
6974
6975/* -=-=-=-=- PDMIBASE -=-=-=-=- */
6976
6977/**
6978 * @interface_method_impl{PDMIBASE,pfnQueryInterface}
6979 */
6980static DECLCALLBACK(void *) e1kR3QueryInterface(struct PDMIBASE *pInterface, const char *pszIID)
6981{
6982 PE1KSTATECC pThisCC = RT_FROM_MEMBER(pInterface, E1KSTATECC, IBase);
6983 Assert(&pThisCC->IBase == pInterface);
6984
6985 PDMIBASE_RETURN_INTERFACE(pszIID, PDMIBASE, &pThisCC->IBase);
6986 PDMIBASE_RETURN_INTERFACE(pszIID, PDMINETWORKDOWN, &pThisCC->INetworkDown);
6987 PDMIBASE_RETURN_INTERFACE(pszIID, PDMINETWORKCONFIG, &pThisCC->INetworkConfig);
6988 PDMIBASE_RETURN_INTERFACE(pszIID, PDMILEDPORTS, &pThisCC->ILeds);
6989 return NULL;
6990}
6991
6992
6993/* -=-=-=-=- Saved State -=-=-=-=- */
6994
6995/**
6996 * Saves the configuration.
6997 *
6998 * @param pThis The E1K state.
6999 * @param pSSM The handle to the saved state.
7000 */
7001static void e1kSaveConfig(PCPDMDEVHLPR3 pHlp, PE1KSTATE pThis, PSSMHANDLE pSSM)
7002{
7003 pHlp->pfnSSMPutMem(pSSM, &pThis->macConfigured, sizeof(pThis->macConfigured));
7004 pHlp->pfnSSMPutU32(pSSM, pThis->eChip);
7005}
7006
7007/**
7008 * @callback_method_impl{FNSSMDEVLIVEEXEC,Save basic configuration.}
7009 */
7010static DECLCALLBACK(int) e1kLiveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uPass)
7011{
7012 RT_NOREF(uPass);
7013 e1kSaveConfig(pDevIns->pHlpR3, PDMDEVINS_2_DATA(pDevIns, PE1KSTATE), pSSM);
7014 return VINF_SSM_DONT_CALL_AGAIN;
7015}
7016
7017/**
7018 * @callback_method_impl{FNSSMDEVSAVEPREP,Synchronize.}
7019 */
7020static DECLCALLBACK(int) e1kSavePrep(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
7021{
7022 RT_NOREF(pSSM);
7023 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
7024
7025 int rc = e1kCsEnter(pThis, VERR_SEM_BUSY);
7026 if (RT_UNLIKELY(rc != VINF_SUCCESS))
7027 return rc;
7028 e1kCsLeave(pThis);
7029 return VINF_SUCCESS;
7030#if 0
7031 /* 1) Prevent all threads from modifying the state and memory */
7032 //pThis->fLocked = true;
7033 /* 2) Cancel all timers */
7034#ifdef E1K_TX_DELAY
7035 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTXDTimer));
7036#endif /* E1K_TX_DELAY */
7037//#ifdef E1K_USE_TX_TIMERS
7038 if (pThis->fTidEnabled)
7039 {
7040 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTIDTimer));
7041#ifndef E1K_NO_TAD
7042 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTADTimer));
7043#endif /* E1K_NO_TAD */
7044 }
7045//#endif /* E1K_USE_TX_TIMERS */
7046#ifdef E1K_USE_RX_TIMERS
7047 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRIDTimer));
7048 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRADTimer));
7049#endif /* E1K_USE_RX_TIMERS */
7050 e1kCancelTimer(pThis, pThis->CTX_SUFF(pIntTimer));
7051 /* 3) Did I forget anything? */
7052 E1kLog(("%s Locked\n", pThis->szPrf));
7053 return VINF_SUCCESS;
7054#endif
7055}
7056
7057/**
7058 * @callback_method_impl{FNSSMDEVSAVEEXEC}
7059 */
7060static DECLCALLBACK(int) e1kSaveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
7061{
7062 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
7063 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
7064 PCPDMDEVHLPR3 pHlp = pDevIns->pHlpR3;
7065
7066 e1kSaveConfig(pHlp, pThis, pSSM);
7067 pThisCC->eeprom.save(pHlp, pSSM);
7068 e1kDumpState(pThis);
7069 pHlp->pfnSSMPutMem(pSSM, pThis->auRegs, sizeof(pThis->auRegs));
7070 pHlp->pfnSSMPutBool(pSSM, pThis->fIntRaised);
7071 Phy::saveState(pHlp, pSSM, &pThis->phy);
7072 pHlp->pfnSSMPutU32(pSSM, pThis->uSelectedReg);
7073 pHlp->pfnSSMPutMem(pSSM, pThis->auMTA, sizeof(pThis->auMTA));
7074 pHlp->pfnSSMPutMem(pSSM, &pThis->aRecAddr, sizeof(pThis->aRecAddr));
7075 pHlp->pfnSSMPutMem(pSSM, pThis->auVFTA, sizeof(pThis->auVFTA));
7076 pHlp->pfnSSMPutU64(pSSM, pThis->u64AckedAt);
7077 pHlp->pfnSSMPutU16(pSSM, pThis->u16RxBSize);
7078 //pHlp->pfnSSMPutBool(pSSM, pThis->fDelayInts);
7079 //pHlp->pfnSSMPutBool(pSSM, pThis->fIntMaskUsed);
7080 pHlp->pfnSSMPutU16(pSSM, pThis->u16TxPktLen);
7081/** @todo State wrt to the TSE buffer is incomplete, so little point in
7082 * saving this actually. */
7083 pHlp->pfnSSMPutMem(pSSM, pThis->aTxPacketFallback, pThis->u16TxPktLen);
7084 pHlp->pfnSSMPutBool(pSSM, pThis->fIPcsum);
7085 pHlp->pfnSSMPutBool(pSSM, pThis->fTCPcsum);
7086 pHlp->pfnSSMPutMem(pSSM, &pThis->contextTSE, sizeof(pThis->contextTSE));
7087 pHlp->pfnSSMPutMem(pSSM, &pThis->contextNormal, sizeof(pThis->contextNormal));
7088 pHlp->pfnSSMPutBool(pSSM, pThis->fVTag);
7089 pHlp->pfnSSMPutU16(pSSM, pThis->u16VTagTCI);
7090#ifdef E1K_WITH_TXD_CACHE
7091# if 0
7092 pHlp->pfnSSMPutU8(pSSM, pThis->nTxDFetched);
7093 pHlp->pfnSSMPutMem(pSSM, pThis->aTxDescriptors,
7094 pThis->nTxDFetched * sizeof(pThis->aTxDescriptors[0]));
7095# else
7096 /*
7097 * There is no point in storing TX descriptor cache entries as we can simply
7098 * fetch them again. Moreover, normally the cache is always empty when we
7099 * save the state. Store zero entries for compatibility.
7100 */
7101 pHlp->pfnSSMPutU8(pSSM, 0);
7102# endif
7103#endif /* E1K_WITH_TXD_CACHE */
7104/** @todo GSO requires some more state here. */
7105 E1kLog(("%s State has been saved\n", pThis->szPrf));
7106 return VINF_SUCCESS;
7107}
7108
7109#if 0
7110/**
7111 * @callback_method_impl{FNSSMDEVSAVEDONE}
7112 */
7113static DECLCALLBACK(int) e1kSaveDone(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
7114{
7115 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
7116
7117 /* If VM is being powered off unlocking will result in assertions in PGM */
7118 if (PDMDevHlpGetVM(pDevIns)->enmVMState == VMSTATE_RUNNING)
7119 pThis->fLocked = false;
7120 else
7121 E1kLog(("%s VM is not running -- remain locked\n", pThis->szPrf));
7122 E1kLog(("%s Unlocked\n", pThis->szPrf));
7123 return VINF_SUCCESS;
7124}
7125#endif
7126
7127/**
7128 * @callback_method_impl{FNSSMDEVLOADPREP,Synchronize.}
7129 */
7130static DECLCALLBACK(int) e1kLoadPrep(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
7131{
7132 RT_NOREF(pSSM);
7133 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
7134
7135 int rc = e1kCsEnter(pThis, VERR_SEM_BUSY);
7136 if (RT_UNLIKELY(rc != VINF_SUCCESS))
7137 return rc;
7138 e1kCsLeave(pThis);
7139 return VINF_SUCCESS;
7140}
7141
7142/**
7143 * @callback_method_impl{FNSSMDEVLOADEXEC}
7144 */
7145static DECLCALLBACK(int) e1kLoadExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
7146{
7147 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
7148 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
7149 PCPDMDEVHLPR3 pHlp = pDevIns->pHlpR3;
7150 int rc;
7151
7152 if ( uVersion != E1K_SAVEDSTATE_VERSION
7153#ifdef E1K_WITH_TXD_CACHE
7154 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG
7155#endif /* E1K_WITH_TXD_CACHE */
7156 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_41
7157 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_30)
7158 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
7159
7160 if ( uVersion > E1K_SAVEDSTATE_VERSION_VBOX_30
7161 || uPass != SSM_PASS_FINAL)
7162 {
7163 /* config checks */
7164 RTMAC macConfigured;
7165 rc = pHlp->pfnSSMGetMem(pSSM, &macConfigured, sizeof(macConfigured));
7166 AssertRCReturn(rc, rc);
7167 if ( memcmp(&macConfigured, &pThis->macConfigured, sizeof(macConfigured))
7168 && (uPass == 0 || !PDMDevHlpVMTeleportedAndNotFullyResumedYet(pDevIns)) )
7169 LogRel(("%s: The mac address differs: config=%RTmac saved=%RTmac\n", pThis->szPrf, &pThis->macConfigured, &macConfigured));
7170
7171 E1KCHIP eChip;
7172 rc = pHlp->pfnSSMGetU32(pSSM, &eChip);
7173 AssertRCReturn(rc, rc);
7174 if (eChip != pThis->eChip)
7175 return pHlp->pfnSSMSetCfgError(pSSM, RT_SRC_POS, N_("The chip type differs: config=%u saved=%u"), pThis->eChip, eChip);
7176 }
7177
7178 if (uPass == SSM_PASS_FINAL)
7179 {
7180 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_30)
7181 {
7182 rc = pThisCC->eeprom.load(pHlp, pSSM);
7183 AssertRCReturn(rc, rc);
7184 }
7185 /* the state */
7186 pHlp->pfnSSMGetMem(pSSM, &pThis->auRegs, sizeof(pThis->auRegs));
7187 pHlp->pfnSSMGetBool(pSSM, &pThis->fIntRaised);
7188 /** @todo PHY could be made a separate device with its own versioning */
7189 Phy::loadState(pHlp, pSSM, &pThis->phy);
7190 pHlp->pfnSSMGetU32(pSSM, &pThis->uSelectedReg);
7191 pHlp->pfnSSMGetMem(pSSM, &pThis->auMTA, sizeof(pThis->auMTA));
7192 pHlp->pfnSSMGetMem(pSSM, &pThis->aRecAddr, sizeof(pThis->aRecAddr));
7193 pHlp->pfnSSMGetMem(pSSM, &pThis->auVFTA, sizeof(pThis->auVFTA));
7194 pHlp->pfnSSMGetU64(pSSM, &pThis->u64AckedAt);
7195 pHlp->pfnSSMGetU16(pSSM, &pThis->u16RxBSize);
7196 //pHlp->pfnSSMGetBool(pSSM, pThis->fDelayInts);
7197 //pHlp->pfnSSMGetBool(pSSM, pThis->fIntMaskUsed);
7198 rc = pHlp->pfnSSMGetU16(pSSM, &pThis->u16TxPktLen);
7199 AssertRCReturn(rc, rc);
7200 if (pThis->u16TxPktLen > sizeof(pThis->aTxPacketFallback))
7201 pThis->u16TxPktLen = sizeof(pThis->aTxPacketFallback);
7202 pHlp->pfnSSMGetMem(pSSM, &pThis->aTxPacketFallback[0], pThis->u16TxPktLen);
7203 pHlp->pfnSSMGetBool(pSSM, &pThis->fIPcsum);
7204 pHlp->pfnSSMGetBool(pSSM, &pThis->fTCPcsum);
7205 pHlp->pfnSSMGetMem(pSSM, &pThis->contextTSE, sizeof(pThis->contextTSE));
7206 rc = pHlp->pfnSSMGetMem(pSSM, &pThis->contextNormal, sizeof(pThis->contextNormal));
7207 AssertRCReturn(rc, rc);
7208 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_41)
7209 {
7210 pHlp->pfnSSMGetBool(pSSM, &pThis->fVTag);
7211 rc = pHlp->pfnSSMGetU16(pSSM, &pThis->u16VTagTCI);
7212 AssertRCReturn(rc, rc);
7213 }
7214 else
7215 {
7216 pThis->fVTag = false;
7217 pThis->u16VTagTCI = 0;
7218 }
7219#ifdef E1K_WITH_TXD_CACHE
7220 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG)
7221 {
7222 rc = pHlp->pfnSSMGetU8(pSSM, &pThis->nTxDFetched);
7223 AssertRCReturn(rc, rc);
7224 if (pThis->nTxDFetched)
7225 pHlp->pfnSSMGetMem(pSSM, pThis->aTxDescriptors,
7226 pThis->nTxDFetched * sizeof(pThis->aTxDescriptors[0]));
7227 }
7228 else
7229 pThis->nTxDFetched = 0;
7230 /**
7231 * @todo Perhaps we should not store TXD cache as the entries can be
7232 * simply fetched again from guest's memory. Or can't they?
7233 */
7234#endif /* E1K_WITH_TXD_CACHE */
7235#ifdef E1K_WITH_RXD_CACHE
7236 /*
7237 * There is no point in storing the RX descriptor cache in the saved
7238 * state, we just need to make sure it is empty.
7239 */
7240 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
7241#endif /* E1K_WITH_RXD_CACHE */
7242 rc = pHlp->pfnSSMHandleGetStatus(pSSM);
7243 AssertRCReturn(rc, rc);
7244
7245 /* derived state */
7246 e1kSetupGsoCtx(&pThis->GsoCtx, &pThis->contextTSE);
7247
7248 E1kLog(("%s State has been restored\n", pThis->szPrf));
7249 e1kDumpState(pThis);
7250 }
7251 return VINF_SUCCESS;
7252}
7253
7254/**
7255 * @callback_method_impl{FNSSMDEVLOADDONE, Link status adjustments after loading.}
7256 */
7257static DECLCALLBACK(int) e1kLoadDone(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
7258{
7259 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
7260 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
7261 RT_NOREF(pSSM);
7262
7263 /* Update promiscuous mode */
7264 if (pThisCC->pDrvR3)
7265 pThisCC->pDrvR3->pfnSetPromiscuousMode(pThisCC->pDrvR3, !!(RCTL & (RCTL_UPE | RCTL_MPE)));
7266
7267 /*
7268 * Force the link down here, since PDMNETWORKLINKSTATE_DOWN_RESUME is never
7269 * passed to us. We go through all this stuff if the link was up and we
7270 * wasn't teleported.
7271 */
7272 if ( (STATUS & STATUS_LU)
7273 && !PDMDevHlpVMTeleportedAndNotFullyResumedYet(pDevIns)
7274 && pThis->cMsLinkUpDelay)
7275 {
7276 e1kR3LinkDownTemp(pDevIns, pThis, pThisCC);
7277 }
7278 return VINF_SUCCESS;
7279}
7280
7281
7282
7283/* -=-=-=-=- Debug Info + Log Types -=-=-=-=- */
7284
7285/**
7286 * @callback_method_impl{FNRTSTRFORMATTYPE}
7287 */
7288static DECLCALLBACK(size_t) e1kFmtRxDesc(PFNRTSTROUTPUT pfnOutput,
7289 void *pvArgOutput,
7290 const char *pszType,
7291 void const *pvValue,
7292 int cchWidth,
7293 int cchPrecision,
7294 unsigned fFlags,
7295 void *pvUser)
7296{
7297 RT_NOREF(cchWidth, cchPrecision, fFlags, pvUser);
7298 AssertReturn(strcmp(pszType, "e1krxd") == 0, 0);
7299 E1KRXDESC* pDesc = (E1KRXDESC*)pvValue;
7300 if (!pDesc)
7301 return RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "NULL_RXD");
7302
7303 size_t cbPrintf = 0;
7304 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Address=%16LX Length=%04X Csum=%04X\n",
7305 pDesc->u64BufAddr, pDesc->u16Length, pDesc->u16Checksum);
7306 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, " STA: %s %s %s %s %s %s %s ERR: %s %s %s %s SPECIAL: %s VLAN=%03x PRI=%x",
7307 pDesc->status.fPIF ? "PIF" : "pif",
7308 pDesc->status.fIPCS ? "IPCS" : "ipcs",
7309 pDesc->status.fTCPCS ? "TCPCS" : "tcpcs",
7310 pDesc->status.fVP ? "VP" : "vp",
7311 pDesc->status.fIXSM ? "IXSM" : "ixsm",
7312 pDesc->status.fEOP ? "EOP" : "eop",
7313 pDesc->status.fDD ? "DD" : "dd",
7314 pDesc->status.fRXE ? "RXE" : "rxe",
7315 pDesc->status.fIPE ? "IPE" : "ipe",
7316 pDesc->status.fTCPE ? "TCPE" : "tcpe",
7317 pDesc->status.fCE ? "CE" : "ce",
7318 E1K_SPEC_CFI(pDesc->status.u16Special) ? "CFI" :"cfi",
7319 E1K_SPEC_VLAN(pDesc->status.u16Special),
7320 E1K_SPEC_PRI(pDesc->status.u16Special));
7321 return cbPrintf;
7322}
7323
7324/**
7325 * @callback_method_impl{FNRTSTRFORMATTYPE}
7326 */
7327static DECLCALLBACK(size_t) e1kFmtTxDesc(PFNRTSTROUTPUT pfnOutput,
7328 void *pvArgOutput,
7329 const char *pszType,
7330 void const *pvValue,
7331 int cchWidth,
7332 int cchPrecision,
7333 unsigned fFlags,
7334 void *pvUser)
7335{
7336 RT_NOREF(cchWidth, cchPrecision, fFlags, pvUser);
7337 AssertReturn(strcmp(pszType, "e1ktxd") == 0, 0);
7338 E1KTXDESC *pDesc = (E1KTXDESC*)pvValue;
7339 if (!pDesc)
7340 return RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "NULL_TXD");
7341
7342 size_t cbPrintf = 0;
7343 switch (e1kGetDescType(pDesc))
7344 {
7345 case E1K_DTYP_CONTEXT:
7346 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Type=Context\n"
7347 " IPCSS=%02X IPCSO=%02X IPCSE=%04X TUCSS=%02X TUCSO=%02X TUCSE=%04X\n"
7348 " TUCMD:%s%s%s %s %s PAYLEN=%04x HDRLEN=%04x MSS=%04x STA: %s",
7349 pDesc->context.ip.u8CSS, pDesc->context.ip.u8CSO, pDesc->context.ip.u16CSE,
7350 pDesc->context.tu.u8CSS, pDesc->context.tu.u8CSO, pDesc->context.tu.u16CSE,
7351 pDesc->context.dw2.fIDE ? " IDE":"",
7352 pDesc->context.dw2.fRS ? " RS" :"",
7353 pDesc->context.dw2.fTSE ? " TSE":"",
7354 pDesc->context.dw2.fIP ? "IPv4":"IPv6",
7355 pDesc->context.dw2.fTCP ? "TCP":"UDP",
7356 pDesc->context.dw2.u20PAYLEN,
7357 pDesc->context.dw3.u8HDRLEN,
7358 pDesc->context.dw3.u16MSS,
7359 pDesc->context.dw3.fDD?"DD":"");
7360 break;
7361 case E1K_DTYP_DATA:
7362 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Type=Data Address=%16LX DTALEN=%05X\n"
7363 " DCMD:%s%s%s%s%s%s%s STA:%s%s%s POPTS:%s%s SPECIAL:%s VLAN=%03x PRI=%x",
7364 pDesc->data.u64BufAddr,
7365 pDesc->data.cmd.u20DTALEN,
7366 pDesc->data.cmd.fIDE ? " IDE" :"",
7367 pDesc->data.cmd.fVLE ? " VLE" :"",
7368 pDesc->data.cmd.fRPS ? " RPS" :"",
7369 pDesc->data.cmd.fRS ? " RS" :"",
7370 pDesc->data.cmd.fTSE ? " TSE" :"",
7371 pDesc->data.cmd.fIFCS? " IFCS":"",
7372 pDesc->data.cmd.fEOP ? " EOP" :"",
7373 pDesc->data.dw3.fDD ? " DD" :"",
7374 pDesc->data.dw3.fEC ? " EC" :"",
7375 pDesc->data.dw3.fLC ? " LC" :"",
7376 pDesc->data.dw3.fTXSM? " TXSM":"",
7377 pDesc->data.dw3.fIXSM? " IXSM":"",
7378 E1K_SPEC_CFI(pDesc->data.dw3.u16Special) ? "CFI" :"cfi",
7379 E1K_SPEC_VLAN(pDesc->data.dw3.u16Special),
7380 E1K_SPEC_PRI(pDesc->data.dw3.u16Special));
7381 break;
7382 case E1K_DTYP_LEGACY:
7383 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Type=Legacy Address=%16LX DTALEN=%05X\n"
7384 " CMD:%s%s%s%s%s%s%s STA:%s%s%s CSO=%02x CSS=%02x SPECIAL:%s VLAN=%03x PRI=%x",
7385 pDesc->data.u64BufAddr,
7386 pDesc->legacy.cmd.u16Length,
7387 pDesc->legacy.cmd.fIDE ? " IDE" :"",
7388 pDesc->legacy.cmd.fVLE ? " VLE" :"",
7389 pDesc->legacy.cmd.fRPS ? " RPS" :"",
7390 pDesc->legacy.cmd.fRS ? " RS" :"",
7391 pDesc->legacy.cmd.fIC ? " IC" :"",
7392 pDesc->legacy.cmd.fIFCS? " IFCS":"",
7393 pDesc->legacy.cmd.fEOP ? " EOP" :"",
7394 pDesc->legacy.dw3.fDD ? " DD" :"",
7395 pDesc->legacy.dw3.fEC ? " EC" :"",
7396 pDesc->legacy.dw3.fLC ? " LC" :"",
7397 pDesc->legacy.cmd.u8CSO,
7398 pDesc->legacy.dw3.u8CSS,
7399 E1K_SPEC_CFI(pDesc->legacy.dw3.u16Special) ? "CFI" :"cfi",
7400 E1K_SPEC_VLAN(pDesc->legacy.dw3.u16Special),
7401 E1K_SPEC_PRI(pDesc->legacy.dw3.u16Special));
7402 break;
7403 default:
7404 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Invalid Transmit Descriptor");
7405 break;
7406 }
7407
7408 return cbPrintf;
7409}
7410
7411/** Initializes debug helpers (logging format types). */
7412static int e1kInitDebugHelpers(void)
7413{
7414 int rc = VINF_SUCCESS;
7415 static bool s_fHelpersRegistered = false;
7416 if (!s_fHelpersRegistered)
7417 {
7418 s_fHelpersRegistered = true;
7419 rc = RTStrFormatTypeRegister("e1krxd", e1kFmtRxDesc, NULL);
7420 AssertRCReturn(rc, rc);
7421 rc = RTStrFormatTypeRegister("e1ktxd", e1kFmtTxDesc, NULL);
7422 AssertRCReturn(rc, rc);
7423 }
7424 return rc;
7425}
7426
7427/**
7428 * Status info callback.
7429 *
7430 * @param pDevIns The device instance.
7431 * @param pHlp The output helpers.
7432 * @param pszArgs The arguments.
7433 */
7434static DECLCALLBACK(void) e1kInfo(PPDMDEVINS pDevIns, PCDBGFINFOHLP pHlp, const char *pszArgs)
7435{
7436 RT_NOREF(pszArgs);
7437 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
7438 unsigned i;
7439 // bool fRcvRing = false;
7440 // bool fXmtRing = false;
7441
7442 /*
7443 * Parse args.
7444 if (pszArgs)
7445 {
7446 fRcvRing = strstr(pszArgs, "verbose") || strstr(pszArgs, "rcv");
7447 fXmtRing = strstr(pszArgs, "verbose") || strstr(pszArgs, "xmt");
7448 }
7449 */
7450
7451 /*
7452 * Show info.
7453 */
7454 pHlp->pfnPrintf(pHlp, "E1000 #%d: port=%04x mmio=%RGp mac-cfg=%RTmac %s%s%s\n",
7455 pDevIns->iInstance,
7456 PDMDevHlpIoPortGetMappingAddress(pDevIns, pThis->hIoPorts),
7457 PDMDevHlpMmioGetMappingAddress(pDevIns, pThis->hMmioRegion),
7458 &pThis->macConfigured, g_aChips[pThis->eChip].pcszName,
7459 pDevIns->fRCEnabled ? " RC" : "", pDevIns->fR0Enabled ? " R0" : "");
7460
7461 e1kCsEnter(pThis, VERR_INTERNAL_ERROR); /* Not sure why but PCNet does it */
7462
7463 for (i = 0; i < E1K_NUM_OF_32BIT_REGS; ++i)
7464 pHlp->pfnPrintf(pHlp, "%8.8s = %08x\n", g_aE1kRegMap[i].abbrev, pThis->auRegs[i]);
7465
7466 for (i = 0; i < RT_ELEMENTS(pThis->aRecAddr.array); i++)
7467 {
7468 E1KRAELEM* ra = pThis->aRecAddr.array + i;
7469 if (ra->ctl & RA_CTL_AV)
7470 {
7471 const char *pcszTmp;
7472 switch (ra->ctl & RA_CTL_AS)
7473 {
7474 case 0: pcszTmp = "DST"; break;
7475 case 1: pcszTmp = "SRC"; break;
7476 default: pcszTmp = "reserved";
7477 }
7478 pHlp->pfnPrintf(pHlp, "RA%02d: %s %RTmac\n", i, pcszTmp, ra->addr);
7479 }
7480 }
7481 unsigned cDescs = RDLEN / sizeof(E1KRXDESC);
7482 uint32_t rdh = RDH;
7483 pHlp->pfnPrintf(pHlp, "\n-- Receive Descriptors (%d total) --\n", cDescs);
7484 for (i = 0; i < cDescs; ++i)
7485 {
7486 E1KRXDESC desc;
7487 PDMDevHlpPCIPhysRead(pDevIns, e1kDescAddr(RDBAH, RDBAL, i),
7488 &desc, sizeof(desc));
7489 if (i == rdh)
7490 pHlp->pfnPrintf(pHlp, ">>> ");
7491 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1krxd]\n", e1kDescAddr(RDBAH, RDBAL, i), &desc);
7492 }
7493#ifdef E1K_WITH_RXD_CACHE
7494 pHlp->pfnPrintf(pHlp, "\n-- Receive Descriptors in Cache (at %d (RDH %d)/ fetched %d / max %d) --\n",
7495 pThis->iRxDCurrent, RDH, pThis->nRxDFetched, E1K_RXD_CACHE_SIZE);
7496 if (rdh > pThis->iRxDCurrent)
7497 rdh -= pThis->iRxDCurrent;
7498 else
7499 rdh = cDescs + rdh - pThis->iRxDCurrent;
7500 for (i = 0; i < pThis->nRxDFetched; ++i)
7501 {
7502 if (i == pThis->iRxDCurrent)
7503 pHlp->pfnPrintf(pHlp, ">>> ");
7504 if (cDescs)
7505 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1krxd]\n",
7506 e1kDescAddr(RDBAH, RDBAL, rdh++ % cDescs),
7507 &pThis->aRxDescriptors[i]);
7508 else
7509 pHlp->pfnPrintf(pHlp, "<lost>: %R[e1krxd]\n",
7510 &pThis->aRxDescriptors[i]);
7511 }
7512#endif /* E1K_WITH_RXD_CACHE */
7513
7514 cDescs = TDLEN / sizeof(E1KTXDESC);
7515 uint32_t tdh = TDH;
7516 pHlp->pfnPrintf(pHlp, "\n-- Transmit Descriptors (%d total) --\n", cDescs);
7517 for (i = 0; i < cDescs; ++i)
7518 {
7519 E1KTXDESC desc;
7520 PDMDevHlpPCIPhysRead(pDevIns, e1kDescAddr(TDBAH, TDBAL, i),
7521 &desc, sizeof(desc));
7522 if (i == tdh)
7523 pHlp->pfnPrintf(pHlp, ">>> ");
7524 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1ktxd]\n", e1kDescAddr(TDBAH, TDBAL, i), &desc);
7525 }
7526#ifdef E1K_WITH_TXD_CACHE
7527 pHlp->pfnPrintf(pHlp, "\n-- Transmit Descriptors in Cache (at %d (TDH %d)/ fetched %d / max %d) --\n",
7528 pThis->iTxDCurrent, TDH, pThis->nTxDFetched, E1K_TXD_CACHE_SIZE);
7529 if (tdh > pThis->iTxDCurrent)
7530 tdh -= pThis->iTxDCurrent;
7531 else
7532 tdh = cDescs + tdh - pThis->iTxDCurrent;
7533 for (i = 0; i < pThis->nTxDFetched; ++i)
7534 {
7535 if (i == pThis->iTxDCurrent)
7536 pHlp->pfnPrintf(pHlp, ">>> ");
7537 if (cDescs)
7538 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1ktxd]\n",
7539 e1kDescAddr(TDBAH, TDBAL, tdh++ % cDescs),
7540 &pThis->aTxDescriptors[i]);
7541 else
7542 pHlp->pfnPrintf(pHlp, "<lost>: %R[e1ktxd]\n",
7543 &pThis->aTxDescriptors[i]);
7544 }
7545#endif /* E1K_WITH_TXD_CACHE */
7546
7547
7548#ifdef E1K_INT_STATS
7549 pHlp->pfnPrintf(pHlp, "Interrupt attempts: %d\n", pThis->uStatIntTry);
7550 pHlp->pfnPrintf(pHlp, "Interrupts raised : %d\n", pThis->uStatInt);
7551 pHlp->pfnPrintf(pHlp, "Interrupts lowered: %d\n", pThis->uStatIntLower);
7552 pHlp->pfnPrintf(pHlp, "ICR outside ISR : %d\n", pThis->uStatNoIntICR);
7553 pHlp->pfnPrintf(pHlp, "IMS raised ints : %d\n", pThis->uStatIntIMS);
7554 pHlp->pfnPrintf(pHlp, "Interrupts skipped: %d\n", pThis->uStatIntSkip);
7555 pHlp->pfnPrintf(pHlp, "Masked interrupts : %d\n", pThis->uStatIntMasked);
7556 pHlp->pfnPrintf(pHlp, "Early interrupts : %d\n", pThis->uStatIntEarly);
7557 pHlp->pfnPrintf(pHlp, "Late interrupts : %d\n", pThis->uStatIntLate);
7558 pHlp->pfnPrintf(pHlp, "Lost interrupts : %d\n", pThis->iStatIntLost);
7559 pHlp->pfnPrintf(pHlp, "Interrupts by RX : %d\n", pThis->uStatIntRx);
7560 pHlp->pfnPrintf(pHlp, "Interrupts by TX : %d\n", pThis->uStatIntTx);
7561 pHlp->pfnPrintf(pHlp, "Interrupts by ICS : %d\n", pThis->uStatIntICS);
7562 pHlp->pfnPrintf(pHlp, "Interrupts by RDTR: %d\n", pThis->uStatIntRDTR);
7563 pHlp->pfnPrintf(pHlp, "Interrupts by RDMT: %d\n", pThis->uStatIntRXDMT0);
7564 pHlp->pfnPrintf(pHlp, "Interrupts by TXQE: %d\n", pThis->uStatIntTXQE);
7565 pHlp->pfnPrintf(pHlp, "TX int delay asked: %d\n", pThis->uStatTxIDE);
7566 pHlp->pfnPrintf(pHlp, "TX delayed: %d\n", pThis->uStatTxDelayed);
7567 pHlp->pfnPrintf(pHlp, "TX delayed expired: %d\n", pThis->uStatTxDelayExp);
7568 pHlp->pfnPrintf(pHlp, "TX no report asked: %d\n", pThis->uStatTxNoRS);
7569 pHlp->pfnPrintf(pHlp, "TX abs timer expd : %d\n", pThis->uStatTAD);
7570 pHlp->pfnPrintf(pHlp, "TX int timer expd : %d\n", pThis->uStatTID);
7571 pHlp->pfnPrintf(pHlp, "RX abs timer expd : %d\n", pThis->uStatRAD);
7572 pHlp->pfnPrintf(pHlp, "RX int timer expd : %d\n", pThis->uStatRID);
7573 pHlp->pfnPrintf(pHlp, "TX CTX descriptors: %d\n", pThis->uStatDescCtx);
7574 pHlp->pfnPrintf(pHlp, "TX DAT descriptors: %d\n", pThis->uStatDescDat);
7575 pHlp->pfnPrintf(pHlp, "TX LEG descriptors: %d\n", pThis->uStatDescLeg);
7576 pHlp->pfnPrintf(pHlp, "Received frames : %d\n", pThis->uStatRxFrm);
7577 pHlp->pfnPrintf(pHlp, "Transmitted frames: %d\n", pThis->uStatTxFrm);
7578 pHlp->pfnPrintf(pHlp, "TX frames up to 1514: %d\n", pThis->uStatTx1514);
7579 pHlp->pfnPrintf(pHlp, "TX frames up to 2962: %d\n", pThis->uStatTx2962);
7580 pHlp->pfnPrintf(pHlp, "TX frames up to 4410: %d\n", pThis->uStatTx4410);
7581 pHlp->pfnPrintf(pHlp, "TX frames up to 5858: %d\n", pThis->uStatTx5858);
7582 pHlp->pfnPrintf(pHlp, "TX frames up to 7306: %d\n", pThis->uStatTx7306);
7583 pHlp->pfnPrintf(pHlp, "TX frames up to 8754: %d\n", pThis->uStatTx8754);
7584 pHlp->pfnPrintf(pHlp, "TX frames up to 16384: %d\n", pThis->uStatTx16384);
7585 pHlp->pfnPrintf(pHlp, "TX frames up to 32768: %d\n", pThis->uStatTx32768);
7586 pHlp->pfnPrintf(pHlp, "Larger TX frames : %d\n", pThis->uStatTxLarge);
7587#endif /* E1K_INT_STATS */
7588
7589 e1kCsLeave(pThis);
7590}
7591
7592
7593
7594/* -=-=-=-=- PDMDEVREG -=-=-=-=- */
7595
7596/**
7597 * Detach notification.
7598 *
7599 * One port on the network card has been disconnected from the network.
7600 *
7601 * @param pDevIns The device instance.
7602 * @param iLUN The logical unit which is being detached.
7603 * @param fFlags Flags, combination of the PDMDEVATT_FLAGS_* \#defines.
7604 */
7605static DECLCALLBACK(void) e1kR3Detach(PPDMDEVINS pDevIns, unsigned iLUN, uint32_t fFlags)
7606{
7607 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
7608 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
7609 Log(("%s e1kR3Detach:\n", pThis->szPrf));
7610 RT_NOREF(fFlags);
7611
7612 AssertLogRelReturnVoid(iLUN == 0);
7613
7614 PDMDevHlpCritSectEnter(pDevIns, &pThis->cs, VERR_SEM_BUSY);
7615
7616 /** @todo r=pritesh still need to check if i missed
7617 * to clean something in this function
7618 */
7619
7620 /*
7621 * Zero some important members.
7622 */
7623 pThisCC->pDrvBase = NULL;
7624 pThisCC->pDrvR3 = NULL;
7625#if 0 /** @todo @bugref{9218} ring-0 driver stuff */
7626 pThisR0->pDrvR0 = NIL_RTR0PTR;
7627 pThisRC->pDrvRC = NIL_RTRCPTR;
7628#endif
7629
7630 PDMDevHlpCritSectLeave(pDevIns, &pThis->cs);
7631}
7632
7633/**
7634 * Attach the Network attachment.
7635 *
7636 * One port on the network card has been connected to a network.
7637 *
7638 * @returns VBox status code.
7639 * @param pDevIns The device instance.
7640 * @param iLUN The logical unit which is being attached.
7641 * @param fFlags Flags, combination of the PDMDEVATT_FLAGS_* \#defines.
7642 *
7643 * @remarks This code path is not used during construction.
7644 */
7645static DECLCALLBACK(int) e1kR3Attach(PPDMDEVINS pDevIns, unsigned iLUN, uint32_t fFlags)
7646{
7647 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
7648 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
7649 LogFlow(("%s e1kR3Attach:\n", pThis->szPrf));
7650 RT_NOREF(fFlags);
7651
7652 AssertLogRelReturn(iLUN == 0, VERR_PDM_NO_SUCH_LUN);
7653
7654 PDMDevHlpCritSectEnter(pDevIns, &pThis->cs, VERR_SEM_BUSY);
7655
7656 /*
7657 * Attach the driver.
7658 */
7659 int rc = PDMDevHlpDriverAttach(pDevIns, 0, &pThisCC->IBase, &pThisCC->pDrvBase, "Network Port");
7660 if (RT_SUCCESS(rc))
7661 {
7662 pThisCC->pDrvR3 = PDMIBASE_QUERY_INTERFACE(pThisCC->pDrvBase, PDMINETWORKUP);
7663 AssertMsgStmt(pThisCC->pDrvR3, ("Failed to obtain the PDMINETWORKUP interface!\n"),
7664 rc = VERR_PDM_MISSING_INTERFACE_BELOW);
7665 if (RT_SUCCESS(rc))
7666 {
7667#if 0 /** @todo @bugref{9218} ring-0 driver stuff */
7668 pThisR0->pDrvR0 = PDMIBASER0_QUERY_INTERFACE(PDMIBASE_QUERY_INTERFACE(pThisCC->pDrvBase, PDMIBASER0), PDMINETWORKUP);
7669 pThisRC->pDrvRC = PDMIBASERC_QUERY_INTERFACE(PDMIBASE_QUERY_INTERFACE(pThisCC->pDrvBase, PDMIBASERC), PDMINETWORKUP);
7670#endif
7671 }
7672 }
7673 else if ( rc == VERR_PDM_NO_ATTACHED_DRIVER
7674 || rc == VERR_PDM_CFG_MISSING_DRIVER_NAME)
7675 {
7676 /* This should never happen because this function is not called
7677 * if there is no driver to attach! */
7678 Log(("%s No attached driver!\n", pThis->szPrf));
7679 }
7680
7681 /*
7682 * Temporary set the link down if it was up so that the guest will know
7683 * that we have change the configuration of the network card
7684 */
7685 if ((STATUS & STATUS_LU) && RT_SUCCESS(rc))
7686 e1kR3LinkDownTemp(pDevIns, pThis, pThisCC);
7687
7688 PDMDevHlpCritSectLeave(pDevIns, &pThis->cs);
7689 return rc;
7690}
7691
7692/**
7693 * @copydoc FNPDMDEVPOWEROFF
7694 */
7695static DECLCALLBACK(void) e1kR3PowerOff(PPDMDEVINS pDevIns)
7696{
7697 /* Poke thread waiting for buffer space. */
7698 e1kWakeupReceive(pDevIns, PDMDEVINS_2_DATA(pDevIns, PE1KSTATE));
7699}
7700
7701/**
7702 * @copydoc FNPDMDEVRESET
7703 */
7704static DECLCALLBACK(void) e1kR3Reset(PPDMDEVINS pDevIns)
7705{
7706 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
7707 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
7708#ifdef E1K_TX_DELAY
7709 e1kCancelTimer(pDevIns, pThis, pThis->hTXDTimer);
7710#endif /* E1K_TX_DELAY */
7711 e1kCancelTimer(pDevIns, pThis, pThis->hIntTimer);
7712 e1kCancelTimer(pDevIns, pThis, pThis->hLUTimer);
7713 e1kXmitFreeBuf(pThis, pThisCC);
7714 pThis->u16TxPktLen = 0;
7715 pThis->fIPcsum = false;
7716 pThis->fTCPcsum = false;
7717 pThis->fIntMaskUsed = false;
7718 pThis->fDelayInts = false;
7719 pThis->fLocked = false;
7720 pThis->u64AckedAt = 0;
7721 e1kR3HardReset(pDevIns, pThis, pThisCC);
7722}
7723
7724/**
7725 * @copydoc FNPDMDEVSUSPEND
7726 */
7727static DECLCALLBACK(void) e1kR3Suspend(PPDMDEVINS pDevIns)
7728{
7729 /* Poke thread waiting for buffer space. */
7730 e1kWakeupReceive(pDevIns, PDMDEVINS_2_DATA(pDevIns, PE1KSTATE));
7731}
7732
7733/**
7734 * Device relocation callback.
7735 *
7736 * When this callback is called the device instance data, and if the
7737 * device have a GC component, is being relocated, or/and the selectors
7738 * have been changed. The device must use the chance to perform the
7739 * necessary pointer relocations and data updates.
7740 *
7741 * Before the GC code is executed the first time, this function will be
7742 * called with a 0 delta so GC pointer calculations can be one in one place.
7743 *
7744 * @param pDevIns Pointer to the device instance.
7745 * @param offDelta The relocation delta relative to the old location.
7746 *
7747 * @remark A relocation CANNOT fail.
7748 */
7749static DECLCALLBACK(void) e1kR3Relocate(PPDMDEVINS pDevIns, RTGCINTPTR offDelta)
7750{
7751 PE1KSTATERC pThisRC = PDMINS_2_DATA_RC(pDevIns, PE1KSTATERC);
7752 if (pThisRC)
7753 pThisRC->pDevInsRC = PDMDEVINS_2_RCPTR(pDevIns);
7754 RT_NOREF(offDelta);
7755}
7756
7757/**
7758 * Destruct a device instance.
7759 *
7760 * We need to free non-VM resources only.
7761 *
7762 * @returns VBox status code.
7763 * @param pDevIns The device instance data.
7764 * @thread EMT
7765 */
7766static DECLCALLBACK(int) e1kR3Destruct(PPDMDEVINS pDevIns)
7767{
7768 PDMDEV_CHECK_VERSIONS_RETURN_QUIET(pDevIns);
7769 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
7770
7771 e1kDumpState(pThis);
7772 E1kLog(("%s Destroying instance\n", pThis->szPrf));
7773 if (PDMDevHlpCritSectIsInitialized(pDevIns, &pThis->cs))
7774 {
7775 if (pThis->hEventMoreRxDescAvail != NIL_SUPSEMEVENT)
7776 {
7777 PDMDevHlpSUPSemEventSignal(pDevIns, pThis->hEventMoreRxDescAvail);
7778 RTThreadYield();
7779 PDMDevHlpSUPSemEventClose(pDevIns, pThis->hEventMoreRxDescAvail);
7780 pThis->hEventMoreRxDescAvail = NIL_SUPSEMEVENT;
7781 }
7782#ifdef E1K_WITH_TX_CS
7783 PDMDevHlpCritSectDelete(pDevIns, &pThis->csTx);
7784#endif /* E1K_WITH_TX_CS */
7785 PDMDevHlpCritSectDelete(pDevIns, &pThis->csRx);
7786 PDMDevHlpCritSectDelete(pDevIns, &pThis->cs);
7787 }
7788 return VINF_SUCCESS;
7789}
7790
7791
7792/**
7793 * Set PCI configuration space registers.
7794 *
7795 * @param pci Reference to PCI device structure.
7796 * @thread EMT
7797 */
7798static void e1kR3ConfigurePciDev(PPDMPCIDEV pPciDev, E1KCHIP eChip)
7799{
7800 Assert(eChip < RT_ELEMENTS(g_aChips));
7801 /* Configure PCI Device, assume 32-bit mode ******************************/
7802 PDMPciDevSetVendorId(pPciDev, g_aChips[eChip].uPCIVendorId);
7803 PDMPciDevSetDeviceId(pPciDev, g_aChips[eChip].uPCIDeviceId);
7804 PDMPciDevSetWord( pPciDev, VBOX_PCI_SUBSYSTEM_VENDOR_ID, g_aChips[eChip].uPCISubsystemVendorId);
7805 PDMPciDevSetWord( pPciDev, VBOX_PCI_SUBSYSTEM_ID, g_aChips[eChip].uPCISubsystemId);
7806
7807 PDMPciDevSetWord( pPciDev, VBOX_PCI_COMMAND, 0x0000);
7808 /* DEVSEL Timing (medium device), 66 MHz Capable, New capabilities */
7809 PDMPciDevSetWord( pPciDev, VBOX_PCI_STATUS,
7810 VBOX_PCI_STATUS_DEVSEL_MEDIUM | VBOX_PCI_STATUS_CAP_LIST | VBOX_PCI_STATUS_66MHZ);
7811 /* Stepping A2 */
7812 PDMPciDevSetByte( pPciDev, VBOX_PCI_REVISION_ID, 0x02);
7813 /* Ethernet adapter */
7814 PDMPciDevSetByte( pPciDev, VBOX_PCI_CLASS_PROG, 0x00);
7815 PDMPciDevSetWord( pPciDev, VBOX_PCI_CLASS_DEVICE, 0x0200);
7816 /* normal single function Ethernet controller */
7817 PDMPciDevSetByte( pPciDev, VBOX_PCI_HEADER_TYPE, 0x00);
7818 /* Memory Register Base Address */
7819 PDMPciDevSetDWord(pPciDev, VBOX_PCI_BASE_ADDRESS_0, 0x00000000);
7820 /* Memory Flash Base Address */
7821 PDMPciDevSetDWord(pPciDev, VBOX_PCI_BASE_ADDRESS_1, 0x00000000);
7822 /* IO Register Base Address */
7823 PDMPciDevSetDWord(pPciDev, VBOX_PCI_BASE_ADDRESS_2, 0x00000001);
7824 /* Expansion ROM Base Address */
7825 PDMPciDevSetDWord(pPciDev, VBOX_PCI_ROM_ADDRESS, 0x00000000);
7826 /* Capabilities Pointer */
7827 PDMPciDevSetByte( pPciDev, VBOX_PCI_CAPABILITY_LIST, 0xDC);
7828 /* Interrupt Pin: INTA# */
7829 PDMPciDevSetByte( pPciDev, VBOX_PCI_INTERRUPT_PIN, 0x01);
7830 /* Max_Lat/Min_Gnt: very high priority and time slice */
7831 PDMPciDevSetByte( pPciDev, VBOX_PCI_MIN_GNT, 0xFF);
7832 PDMPciDevSetByte( pPciDev, VBOX_PCI_MAX_LAT, 0x00);
7833
7834 /* PCI Power Management Registers ****************************************/
7835 /* Capability ID: PCI Power Management Registers */
7836 PDMPciDevSetByte( pPciDev, 0xDC, VBOX_PCI_CAP_ID_PM);
7837 /* Next Item Pointer: PCI-X */
7838 PDMPciDevSetByte( pPciDev, 0xDC + 1, 0xE4);
7839 /* Power Management Capabilities: PM disabled, DSI */
7840 PDMPciDevSetWord( pPciDev, 0xDC + 2,
7841 0x0002 | VBOX_PCI_PM_CAP_DSI);
7842 /* Power Management Control / Status Register: PM disabled */
7843 PDMPciDevSetWord( pPciDev, 0xDC + 4, 0x0000);
7844 /* PMCSR_BSE Bridge Support Extensions: Not supported */
7845 PDMPciDevSetByte( pPciDev, 0xDC + 6, 0x00);
7846 /* Data Register: PM disabled, always 0 */
7847 PDMPciDevSetByte( pPciDev, 0xDC + 7, 0x00);
7848
7849 /* PCI-X Configuration Registers *****************************************/
7850 /* Capability ID: PCI-X Configuration Registers */
7851 PDMPciDevSetByte( pPciDev, 0xE4, VBOX_PCI_CAP_ID_PCIX);
7852#ifdef E1K_WITH_MSI
7853 PDMPciDevSetByte( pPciDev, 0xE4 + 1, 0x80);
7854#else
7855 /* Next Item Pointer: None (Message Signalled Interrupts are disabled) */
7856 PDMPciDevSetByte( pPciDev, 0xE4 + 1, 0x00);
7857#endif
7858 /* PCI-X Command: Enable Relaxed Ordering */
7859 PDMPciDevSetWord( pPciDev, 0xE4 + 2, VBOX_PCI_X_CMD_ERO);
7860 /* PCI-X Status: 32-bit, 66MHz*/
7861 /** @todo is this value really correct? fff8 doesn't look like actual PCI address */
7862 PDMPciDevSetDWord(pPciDev, 0xE4 + 4, 0x0040FFF8);
7863}
7864
7865/**
7866 * @interface_method_impl{PDMDEVREG,pfnConstruct}
7867 */
7868static DECLCALLBACK(int) e1kR3Construct(PPDMDEVINS pDevIns, int iInstance, PCFGMNODE pCfg)
7869{
7870 PDMDEV_CHECK_VERSIONS_RETURN(pDevIns);
7871 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
7872 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
7873 int rc;
7874
7875 /*
7876 * Initialize the instance data (state).
7877 * Note! Caller has initialized it to ZERO already.
7878 */
7879 RTStrPrintf(pThis->szPrf, sizeof(pThis->szPrf), "E1000#%d", iInstance);
7880 E1kLog(("%s Constructing new instance sizeof(E1KRXDESC)=%d\n", pThis->szPrf, sizeof(E1KRXDESC)));
7881 pThis->hEventMoreRxDescAvail = NIL_SUPSEMEVENT;
7882 pThis->u16TxPktLen = 0;
7883 pThis->fIPcsum = false;
7884 pThis->fTCPcsum = false;
7885 pThis->fIntMaskUsed = false;
7886 pThis->fDelayInts = false;
7887 pThis->fLocked = false;
7888 pThis->u64AckedAt = 0;
7889 pThis->led.u32Magic = PDMLED_MAGIC;
7890 pThis->u32PktNo = 1;
7891
7892 pThisCC->pDevInsR3 = pDevIns;
7893 pThisCC->pShared = pThis;
7894
7895 /* Interfaces */
7896 pThisCC->IBase.pfnQueryInterface = e1kR3QueryInterface;
7897
7898 pThisCC->INetworkDown.pfnWaitReceiveAvail = e1kR3NetworkDown_WaitReceiveAvail;
7899 pThisCC->INetworkDown.pfnReceive = e1kR3NetworkDown_Receive;
7900 pThisCC->INetworkDown.pfnXmitPending = e1kR3NetworkDown_XmitPending;
7901
7902 pThisCC->ILeds.pfnQueryStatusLed = e1kR3QueryStatusLed;
7903
7904 pThisCC->INetworkConfig.pfnGetMac = e1kR3GetMac;
7905 pThisCC->INetworkConfig.pfnGetLinkState = e1kR3GetLinkState;
7906 pThisCC->INetworkConfig.pfnSetLinkState = e1kR3SetLinkState;
7907
7908 /*
7909 * Internal validations.
7910 */
7911 for (uint32_t iReg = 1; iReg < E1K_NUM_OF_BINARY_SEARCHABLE; iReg++)
7912 AssertLogRelMsgReturn( g_aE1kRegMap[iReg].offset > g_aE1kRegMap[iReg - 1].offset
7913 && g_aE1kRegMap[iReg].offset + g_aE1kRegMap[iReg].size
7914 >= g_aE1kRegMap[iReg - 1].offset + g_aE1kRegMap[iReg - 1].size,
7915 ("%s@%#xLB%#x vs %s@%#xLB%#x\n",
7916 g_aE1kRegMap[iReg].abbrev, g_aE1kRegMap[iReg].offset, g_aE1kRegMap[iReg].size,
7917 g_aE1kRegMap[iReg - 1].abbrev, g_aE1kRegMap[iReg - 1].offset, g_aE1kRegMap[iReg - 1].size),
7918 VERR_INTERNAL_ERROR_4);
7919
7920 /*
7921 * Validate configuration.
7922 */
7923 PDMDEV_VALIDATE_CONFIG_RETURN(pDevIns,
7924 "MAC|"
7925 "CableConnected|"
7926 "AdapterType|"
7927 "LineSpeed|"
7928 "ItrEnabled|"
7929 "ItrRxEnabled|"
7930 "EthernetCRC|"
7931 "GSOEnabled|"
7932 "LinkUpDelay|"
7933 "StatNo",
7934 "");
7935
7936 /** @todo LineSpeed unused! */
7937
7938 /*
7939 * Get config params
7940 */
7941 PCPDMDEVHLPR3 pHlp = pDevIns->pHlpR3;
7942 rc = pHlp->pfnCFGMQueryBytes(pCfg, "MAC", pThis->macConfigured.au8, sizeof(pThis->macConfigured.au8));
7943 if (RT_FAILURE(rc))
7944 return PDMDEV_SET_ERROR(pDevIns, rc,
7945 N_("Configuration error: Failed to get MAC address"));
7946 rc = pHlp->pfnCFGMQueryBool(pCfg, "CableConnected", &pThis->fCableConnected);
7947 if (RT_FAILURE(rc))
7948 return PDMDEV_SET_ERROR(pDevIns, rc,
7949 N_("Configuration error: Failed to get the value of 'CableConnected'"));
7950 rc = pHlp->pfnCFGMQueryU32(pCfg, "AdapterType", (uint32_t*)&pThis->eChip);
7951 if (RT_FAILURE(rc))
7952 return PDMDEV_SET_ERROR(pDevIns, rc,
7953 N_("Configuration error: Failed to get the value of 'AdapterType'"));
7954 Assert(pThis->eChip <= E1K_CHIP_82545EM);
7955
7956 rc = pHlp->pfnCFGMQueryBoolDef(pCfg, "EthernetCRC", &pThis->fEthernetCRC, true);
7957 if (RT_FAILURE(rc))
7958 return PDMDEV_SET_ERROR(pDevIns, rc,
7959 N_("Configuration error: Failed to get the value of 'EthernetCRC'"));
7960
7961 rc = pHlp->pfnCFGMQueryBoolDef(pCfg, "GSOEnabled", &pThis->fGSOEnabled, true);
7962 if (RT_FAILURE(rc))
7963 return PDMDEV_SET_ERROR(pDevIns, rc,
7964 N_("Configuration error: Failed to get the value of 'GSOEnabled'"));
7965
7966 rc = pHlp->pfnCFGMQueryBoolDef(pCfg, "ItrEnabled", &pThis->fItrEnabled, false);
7967 if (RT_FAILURE(rc))
7968 return PDMDEV_SET_ERROR(pDevIns, rc,
7969 N_("Configuration error: Failed to get the value of 'ItrEnabled'"));
7970
7971 rc = pHlp->pfnCFGMQueryBoolDef(pCfg, "ItrRxEnabled", &pThis->fItrRxEnabled, true);
7972 if (RT_FAILURE(rc))
7973 return PDMDEV_SET_ERROR(pDevIns, rc,
7974 N_("Configuration error: Failed to get the value of 'ItrRxEnabled'"));
7975
7976 rc = pHlp->pfnCFGMQueryBoolDef(pCfg, "TidEnabled", &pThis->fTidEnabled, false);
7977 if (RT_FAILURE(rc))
7978 return PDMDEV_SET_ERROR(pDevIns, rc,
7979 N_("Configuration error: Failed to get the value of 'TidEnabled'"));
7980
7981 rc = pHlp->pfnCFGMQueryU32Def(pCfg, "LinkUpDelay", (uint32_t*)&pThis->cMsLinkUpDelay, 3000); /* ms */
7982 if (RT_FAILURE(rc))
7983 return PDMDEV_SET_ERROR(pDevIns, rc,
7984 N_("Configuration error: Failed to get the value of 'LinkUpDelay'"));
7985 Assert(pThis->cMsLinkUpDelay <= 300000); /* less than 5 minutes */
7986 if (pThis->cMsLinkUpDelay > 5000)
7987 LogRel(("%s: WARNING! Link up delay is set to %u seconds!\n", pThis->szPrf, pThis->cMsLinkUpDelay / 1000));
7988 else if (pThis->cMsLinkUpDelay == 0)
7989 LogRel(("%s: WARNING! Link up delay is disabled!\n", pThis->szPrf));
7990
7991 uint32_t uStatNo = (uint32_t)iInstance;
7992 rc = pHlp->pfnCFGMQueryU32Def(pCfg, "StatNo", &uStatNo, (uint32_t)iInstance);
7993 if (RT_FAILURE(rc))
7994 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Configuration error: Failed to get the \"StatNo\" value"));
7995
7996 LogRel(("%s: Chip=%s LinkUpDelay=%ums EthernetCRC=%s GSO=%s Itr=%s ItrRx=%s TID=%s R0=%s RC=%s\n", pThis->szPrf,
7997 g_aChips[pThis->eChip].pcszName, pThis->cMsLinkUpDelay,
7998 pThis->fEthernetCRC ? "on" : "off",
7999 pThis->fGSOEnabled ? "enabled" : "disabled",
8000 pThis->fItrEnabled ? "enabled" : "disabled",
8001 pThis->fItrRxEnabled ? "enabled" : "disabled",
8002 pThis->fTidEnabled ? "enabled" : "disabled",
8003 pDevIns->fR0Enabled ? "enabled" : "disabled",
8004 pDevIns->fRCEnabled ? "enabled" : "disabled"));
8005
8006 /*
8007 * Initialize sub-components and register everything with the VMM.
8008 */
8009
8010 /* Initialize the EEPROM. */
8011 pThisCC->eeprom.init(pThis->macConfigured);
8012
8013 /* Initialize internal PHY. */
8014 Phy::init(&pThis->phy, iInstance, pThis->eChip == E1K_CHIP_82543GC ? PHY_EPID_M881000 : PHY_EPID_M881011);
8015
8016 /* Initialize critical sections. We do our own locking. */
8017 rc = PDMDevHlpSetDeviceCritSect(pDevIns, PDMDevHlpCritSectGetNop(pDevIns));
8018 AssertRCReturn(rc, rc);
8019
8020 rc = PDMDevHlpCritSectInit(pDevIns, &pThis->cs, RT_SRC_POS, "E1000#%d", iInstance);
8021 AssertRCReturn(rc, rc);
8022 rc = PDMDevHlpCritSectInit(pDevIns, &pThis->csRx, RT_SRC_POS, "E1000#%dRX", iInstance);
8023 AssertRCReturn(rc, rc);
8024#ifdef E1K_WITH_TX_CS
8025 rc = PDMDevHlpCritSectInit(pDevIns, &pThis->csTx, RT_SRC_POS, "E1000#%dTX", iInstance);
8026 AssertRCReturn(rc, rc);
8027#endif
8028
8029 /* Saved state registration. */
8030 rc = PDMDevHlpSSMRegisterEx(pDevIns, E1K_SAVEDSTATE_VERSION, sizeof(E1KSTATE), NULL,
8031 NULL, e1kLiveExec, NULL,
8032 e1kSavePrep, e1kSaveExec, NULL,
8033 e1kLoadPrep, e1kLoadExec, e1kLoadDone);
8034 AssertRCReturn(rc, rc);
8035
8036 /* Set PCI config registers and register ourselves with the PCI bus. */
8037 PDMPCIDEV_ASSERT_VALID(pDevIns, pDevIns->apPciDevs[0]);
8038 e1kR3ConfigurePciDev(pDevIns->apPciDevs[0], pThis->eChip);
8039 rc = PDMDevHlpPCIRegister(pDevIns, pDevIns->apPciDevs[0]);
8040 AssertRCReturn(rc, rc);
8041
8042#ifdef E1K_WITH_MSI
8043 PDMMSIREG MsiReg;
8044 RT_ZERO(MsiReg);
8045 MsiReg.cMsiVectors = 1;
8046 MsiReg.iMsiCapOffset = 0x80;
8047 MsiReg.iMsiNextOffset = 0x0;
8048 MsiReg.fMsi64bit = false;
8049 rc = PDMDevHlpPCIRegisterMsi(pDevIns, &MsiReg);
8050 AssertRCReturn(rc, rc);
8051#endif
8052
8053 /*
8054 * Map our registers to memory space (region 0, see e1kR3ConfigurePciDev)
8055 * From the spec (regarding flags):
8056 * For registers that should be accessed as 32-bit double words,
8057 * partial writes (less than a 32-bit double word) is ignored.
8058 * Partial reads return all 32 bits of data regardless of the
8059 * byte enables.
8060 */
8061 rc = PDMDevHlpMmioCreateEx(pDevIns, E1K_MM_SIZE, IOMMMIO_FLAGS_READ_DWORD | IOMMMIO_FLAGS_WRITE_ONLY_DWORD,
8062 pDevIns->apPciDevs[0], 0 /*iPciRegion*/,
8063 e1kMMIOWrite, e1kMMIORead, NULL /*pfnFill*/, NULL /*pvUser*/, "E1000", &pThis->hMmioRegion);
8064 AssertRCReturn(rc, rc);
8065 rc = PDMDevHlpPCIIORegionRegisterMmio(pDevIns, 0, E1K_MM_SIZE, PCI_ADDRESS_SPACE_MEM, pThis->hMmioRegion, NULL);
8066 AssertRCReturn(rc, rc);
8067
8068 /* Map our registers to IO space (region 2, see e1kR3ConfigurePciDev) */
8069 static IOMIOPORTDESC const s_aExtDescs[] =
8070 {
8071 { "IOADDR", "IOADDR", NULL, NULL }, { "unused", "unused", NULL, NULL }, { "unused", "unused", NULL, NULL }, { "unused", "unused", NULL, NULL },
8072 { "IODATA", "IODATA", NULL, NULL }, { "unused", "unused", NULL, NULL }, { "unused", "unused", NULL, NULL }, { "unused", "unused", NULL, NULL },
8073 { NULL, NULL, NULL, NULL }
8074 };
8075 rc = PDMDevHlpIoPortCreate(pDevIns, E1K_IOPORT_SIZE, pDevIns->apPciDevs[0], 2 /*iPciRegion*/,
8076 e1kIOPortOut, e1kIOPortIn, NULL /*pvUser*/, "E1000", s_aExtDescs, &pThis->hIoPorts);
8077 AssertRCReturn(rc, rc);
8078 rc = PDMDevHlpPCIIORegionRegisterIo(pDevIns, 2, E1K_IOPORT_SIZE, pThis->hIoPorts);
8079 AssertRCReturn(rc, rc);
8080
8081 /* Create transmit queue */
8082 rc = PDMDevHlpTaskCreate(pDevIns, PDMTASK_F_RZ, "E1000-Xmit", e1kR3TxTaskCallback, NULL, &pThis->hTxTask);
8083 AssertRCReturn(rc, rc);
8084
8085#ifdef E1K_TX_DELAY
8086 /* Create Transmit Delay Timer */
8087 rc = PDMDevHlpTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kR3TxDelayTimer, pThis,
8088 TMTIMER_FLAGS_NO_CRIT_SECT | TMTIMER_FLAGS_RING0, "E1000 Xmit Delay", &pThis->hTXDTimer);
8089 AssertRCReturn(rc, rc);
8090 rc = PDMDevHlpTimerSetCritSect(pDevIns, pThis->hTXDTimer, &pThis->csTx);
8091 AssertRCReturn(rc, rc);
8092#endif /* E1K_TX_DELAY */
8093
8094//#ifdef E1K_USE_TX_TIMERS
8095 if (pThis->fTidEnabled)
8096 {
8097 /* Create Transmit Interrupt Delay Timer */
8098 rc = PDMDevHlpTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kR3TxIntDelayTimer, pThis,
8099 TMTIMER_FLAGS_NO_CRIT_SECT | TMTIMER_FLAGS_RING0, "E1000 Xmit IRQ Delay", &pThis->hTIDTimer);
8100 AssertRCReturn(rc, rc);
8101
8102# ifndef E1K_NO_TAD
8103 /* Create Transmit Absolute Delay Timer */
8104 rc = PDMDevHlpTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kR3TxAbsDelayTimer, pThis,
8105 TMTIMER_FLAGS_NO_CRIT_SECT | TMTIMER_FLAGS_RING0, "E1000 Xmit Abs Delay", &pThis->hTADTimer);
8106 AssertRCReturn(rc, rc);
8107# endif /* E1K_NO_TAD */
8108 }
8109//#endif /* E1K_USE_TX_TIMERS */
8110
8111#ifdef E1K_USE_RX_TIMERS
8112 /* Create Receive Interrupt Delay Timer */
8113 rc = PDMDevHlpTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kR3RxIntDelayTimer, pThis,
8114 TMTIMER_FLAGS_NO_CRIT_SECT | TMTIMER_FLAGS_RING0, "E1000 Recv IRQ Delay", &pThis->hRIDTimer);
8115 AssertRCReturn(rc, rc);
8116
8117 /* Create Receive Absolute Delay Timer */
8118 rc = PDMDevHlpTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kR3RxAbsDelayTimer, pThis,
8119 TMTIMER_FLAGS_NO_CRIT_SECT | TMTIMER_FLAGS_RING0, "E1000 Recv Abs Delay", &pThis->hRADTimer);
8120 AssertRCReturn(rc, rc);
8121#endif /* E1K_USE_RX_TIMERS */
8122
8123 /* Create Late Interrupt Timer */
8124 rc = PDMDevHlpTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kR3LateIntTimer, pThis,
8125 TMTIMER_FLAGS_NO_CRIT_SECT | TMTIMER_FLAGS_RING0, "E1000 Late IRQ", &pThis->hIntTimer);
8126 AssertRCReturn(rc, rc);
8127
8128 /* Create Link Up Timer */
8129 rc = PDMDevHlpTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kR3LinkUpTimer, pThis,
8130 TMTIMER_FLAGS_NO_CRIT_SECT | TMTIMER_FLAGS_RING0, "E1000 Link Up", &pThis->hLUTimer);
8131 AssertRCReturn(rc, rc);
8132
8133 /* Register the info item */
8134 char szTmp[20];
8135 RTStrPrintf(szTmp, sizeof(szTmp), "e1k%d", iInstance);
8136 PDMDevHlpDBGFInfoRegister(pDevIns, szTmp, "E1000 info.", e1kInfo);
8137
8138 /* Status driver */
8139 PPDMIBASE pBase;
8140 rc = PDMDevHlpDriverAttach(pDevIns, PDM_STATUS_LUN, &pThisCC->IBase, &pBase, "Status Port");
8141 if (RT_FAILURE(rc))
8142 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to attach the status LUN"));
8143 pThisCC->pLedsConnector = PDMIBASE_QUERY_INTERFACE(pBase, PDMILEDCONNECTORS);
8144
8145 /* Network driver */
8146 rc = PDMDevHlpDriverAttach(pDevIns, 0, &pThisCC->IBase, &pThisCC->pDrvBase, "Network Port");
8147 if (RT_SUCCESS(rc))
8148 {
8149 pThisCC->pDrvR3 = PDMIBASE_QUERY_INTERFACE(pThisCC->pDrvBase, PDMINETWORKUP);
8150 AssertMsgReturn(pThisCC->pDrvR3, ("Failed to obtain the PDMINETWORKUP interface!\n"), VERR_PDM_MISSING_INTERFACE_BELOW);
8151
8152#if 0 /** @todo @bugref{9218} ring-0 driver stuff */
8153 pThisR0->pDrvR0 = PDMIBASER0_QUERY_INTERFACE(PDMIBASE_QUERY_INTERFACE(pThisCC->pDrvBase, PDMIBASER0), PDMINETWORKUP);
8154 pThisRC->pDrvRC = PDMIBASERC_QUERY_INTERFACE(PDMIBASE_QUERY_INTERFACE(pThisCC->pDrvBase, PDMIBASERC), PDMINETWORKUP);
8155#endif
8156 }
8157 else if ( rc == VERR_PDM_NO_ATTACHED_DRIVER
8158 || rc == VERR_PDM_CFG_MISSING_DRIVER_NAME)
8159 {
8160 /* No error! */
8161 E1kLog(("%s This adapter is not attached to any network!\n", pThis->szPrf));
8162 }
8163 else
8164 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to attach the network LUN"));
8165
8166 rc = PDMDevHlpSUPSemEventCreate(pDevIns, &pThis->hEventMoreRxDescAvail);
8167 AssertRCReturn(rc, rc);
8168
8169 rc = e1kInitDebugHelpers();
8170 AssertRCReturn(rc, rc);
8171
8172 e1kR3HardReset(pDevIns, pThis, pThisCC);
8173
8174 /*
8175 * Register statistics.
8176 * The /Public/ bits are official and used by session info in the GUI.
8177 */
8178 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
8179 "Amount of data received", "/Public/NetAdapter/%u/BytesReceived", uStatNo);
8180 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
8181 "Amount of data transmitted", "/Public/NetAdapter/%u/BytesTransmitted", uStatNo);
8182 PDMDevHlpSTAMRegisterF(pDevIns, &pDevIns->iInstance, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
8183 "Device instance number", "/Public/NetAdapter/%u/%s", uStatNo, pDevIns->pReg->szName);
8184
8185 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatReceiveBytes, STAMTYPE_COUNTER, "ReceiveBytes", STAMUNIT_BYTES, "Amount of data received");
8186 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTransmitBytes, STAMTYPE_COUNTER, "TransmitBytes", STAMUNIT_BYTES, "Amount of data transmitted");
8187
8188#if defined(VBOX_WITH_STATISTICS)
8189 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMMIOReadRZ, STAMTYPE_PROFILE, "MMIO/ReadRZ", STAMUNIT_TICKS_PER_CALL, "Profiling MMIO reads in RZ");
8190 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMMIOReadR3, STAMTYPE_PROFILE, "MMIO/ReadR3", STAMUNIT_TICKS_PER_CALL, "Profiling MMIO reads in R3");
8191 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMMIOWriteRZ, STAMTYPE_PROFILE, "MMIO/WriteRZ", STAMUNIT_TICKS_PER_CALL, "Profiling MMIO writes in RZ");
8192 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMMIOWriteR3, STAMTYPE_PROFILE, "MMIO/WriteR3", STAMUNIT_TICKS_PER_CALL, "Profiling MMIO writes in R3");
8193 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatEEPROMRead, STAMTYPE_PROFILE, "EEPROM/Read", STAMUNIT_TICKS_PER_CALL, "Profiling EEPROM reads");
8194 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatEEPROMWrite, STAMTYPE_PROFILE, "EEPROM/Write", STAMUNIT_TICKS_PER_CALL, "Profiling EEPROM writes");
8195 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIOReadRZ, STAMTYPE_PROFILE, "IO/ReadRZ", STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in RZ");
8196 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIOReadR3, STAMTYPE_PROFILE, "IO/ReadR3", STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in R3");
8197 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIOWriteRZ, STAMTYPE_PROFILE, "IO/WriteRZ", STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in RZ");
8198 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIOWriteR3, STAMTYPE_PROFILE, "IO/WriteR3", STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in R3");
8199 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatLateIntTimer, STAMTYPE_PROFILE, "LateInt/Timer", STAMUNIT_TICKS_PER_CALL, "Profiling late int timer");
8200 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatLateInts, STAMTYPE_COUNTER, "LateInt/Occured", STAMUNIT_OCCURENCES, "Number of late interrupts");
8201 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIntsRaised, STAMTYPE_COUNTER, "Interrupts/Raised", STAMUNIT_OCCURENCES, "Number of raised interrupts");
8202 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIntsPrevented, STAMTYPE_COUNTER, "Interrupts/Prevented", STAMUNIT_OCCURENCES, "Number of prevented interrupts");
8203 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatReceive, STAMTYPE_PROFILE, "Receive/Total", STAMUNIT_TICKS_PER_CALL, "Profiling receive");
8204 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatReceiveCRC, STAMTYPE_PROFILE, "Receive/CRC", STAMUNIT_TICKS_PER_CALL, "Profiling receive checksumming");
8205 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatReceiveFilter, STAMTYPE_PROFILE, "Receive/Filter", STAMUNIT_TICKS_PER_CALL, "Profiling receive filtering");
8206 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatReceiveStore, STAMTYPE_PROFILE, "Receive/Store", STAMUNIT_TICKS_PER_CALL, "Profiling receive storing");
8207 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatRxOverflow, STAMTYPE_PROFILE, "RxOverflow", STAMUNIT_TICKS_PER_OCCURENCE, "Profiling RX overflows");
8208 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatRxOverflowWakeupRZ, STAMTYPE_COUNTER, "RxOverflowWakeupRZ", STAMUNIT_OCCURENCES, "Nr of RX overflow wakeups in RZ");
8209 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatRxOverflowWakeupR3, STAMTYPE_COUNTER, "RxOverflowWakeupR3", STAMUNIT_OCCURENCES, "Nr of RX overflow wakeups in R3");
8210 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTransmitRZ, STAMTYPE_PROFILE, "Transmit/TotalRZ", STAMUNIT_TICKS_PER_CALL, "Profiling transmits in RZ");
8211 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTransmitR3, STAMTYPE_PROFILE, "Transmit/TotalR3", STAMUNIT_TICKS_PER_CALL, "Profiling transmits in R3");
8212 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTransmitSendRZ, STAMTYPE_PROFILE, "Transmit/SendRZ", STAMUNIT_TICKS_PER_CALL, "Profiling send transmit in RZ");
8213 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTransmitSendR3, STAMTYPE_PROFILE, "Transmit/SendR3", STAMUNIT_TICKS_PER_CALL, "Profiling send transmit in R3");
8214
8215 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTxDescCtxNormal, STAMTYPE_COUNTER, "TxDesc/ContexNormal", STAMUNIT_OCCURENCES, "Number of normal context descriptors");
8216 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTxDescCtxTSE, STAMTYPE_COUNTER, "TxDesc/ContextTSE", STAMUNIT_OCCURENCES, "Number of TSE context descriptors");
8217 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTxDescData, STAMTYPE_COUNTER, "TxDesc/Data", STAMUNIT_OCCURENCES, "Number of TX data descriptors");
8218 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTxDescLegacy, STAMTYPE_COUNTER, "TxDesc/Legacy", STAMUNIT_OCCURENCES, "Number of TX legacy descriptors");
8219 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTxDescTSEData, STAMTYPE_COUNTER, "TxDesc/TSEData", STAMUNIT_OCCURENCES, "Number of TX TSE data descriptors");
8220 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTxPathFallback, STAMTYPE_COUNTER, "TxPath/Fallback", STAMUNIT_OCCURENCES, "Fallback TSE descriptor path");
8221 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTxPathGSO, STAMTYPE_COUNTER, "TxPath/GSO", STAMUNIT_OCCURENCES, "GSO TSE descriptor path");
8222 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTxPathRegular, STAMTYPE_COUNTER, "TxPath/Normal", STAMUNIT_OCCURENCES, "Regular descriptor path");
8223 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatPHYAccesses, STAMTYPE_COUNTER, "PHYAccesses", STAMUNIT_OCCURENCES, "Number of PHY accesses");
8224 for (unsigned iReg = 0; iReg < E1K_NUM_OF_REGS; iReg++)
8225 {
8226 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->aStatRegReads[iReg], STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
8227 g_aE1kRegMap[iReg].name, "Regs/%s-Reads", g_aE1kRegMap[iReg].abbrev);
8228 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->aStatRegWrites[iReg], STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
8229 g_aE1kRegMap[iReg].name, "Regs/%s-Writes", g_aE1kRegMap[iReg].abbrev);
8230 }
8231#endif /* VBOX_WITH_STATISTICS */
8232
8233#ifdef E1K_INT_STATS
8234 PDMDevHlpSTAMRegister(pDevIns, &pThis->u64ArmedAt, STAMTYPE_U64, "u64ArmedAt", STAMUNIT_NS, NULL);
8235 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatMaxTxDelay, STAMTYPE_U64, "uStatMaxTxDelay", STAMUNIT_NS, NULL);
8236 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatInt, STAMTYPE_U32, "uStatInt", STAMUNIT_NS, NULL);
8237 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatIntTry, STAMTYPE_U32, "uStatIntTry", STAMUNIT_NS, NULL);
8238 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatIntLower, STAMTYPE_U32, "uStatIntLower", STAMUNIT_NS, NULL);
8239 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatNoIntICR, STAMTYPE_U32, "uStatNoIntICR", STAMUNIT_NS, NULL);
8240 PDMDevHlpSTAMRegister(pDevIns, &pThis->iStatIntLost, STAMTYPE_U32, "iStatIntLost", STAMUNIT_NS, NULL);
8241 PDMDevHlpSTAMRegister(pDevIns, &pThis->iStatIntLostOne, STAMTYPE_U32, "iStatIntLostOne", STAMUNIT_NS, NULL);
8242 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatIntIMS, STAMTYPE_U32, "uStatIntIMS", STAMUNIT_NS, NULL);
8243 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatIntSkip, STAMTYPE_U32, "uStatIntSkip", STAMUNIT_NS, NULL);
8244 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatIntLate, STAMTYPE_U32, "uStatIntLate", STAMUNIT_NS, NULL);
8245 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatIntMasked, STAMTYPE_U32, "uStatIntMasked", STAMUNIT_NS, NULL);
8246 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatIntEarly, STAMTYPE_U32, "uStatIntEarly", STAMUNIT_NS, NULL);
8247 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatIntRx, STAMTYPE_U32, "uStatIntRx", STAMUNIT_NS, NULL);
8248 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatIntTx, STAMTYPE_U32, "uStatIntTx", STAMUNIT_NS, NULL);
8249 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatIntICS, STAMTYPE_U32, "uStatIntICS", STAMUNIT_NS, NULL);
8250 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatIntRDTR, STAMTYPE_U32, "uStatIntRDTR", STAMUNIT_NS, NULL);
8251 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatIntRXDMT0, STAMTYPE_U32, "uStatIntRXDMT0", STAMUNIT_NS, NULL);
8252 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatIntTXQE, STAMTYPE_U32, "uStatIntTXQE", STAMUNIT_NS, NULL);
8253 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTxNoRS, STAMTYPE_U32, "uStatTxNoRS", STAMUNIT_NS, NULL);
8254 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTxIDE, STAMTYPE_U32, "uStatTxIDE", STAMUNIT_NS, NULL);
8255 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTxDelayed, STAMTYPE_U32, "uStatTxDelayed", STAMUNIT_NS, NULL);
8256 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTxDelayExp, STAMTYPE_U32, "uStatTxDelayExp", STAMUNIT_NS, NULL);
8257 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTAD, STAMTYPE_U32, "uStatTAD", STAMUNIT_NS, NULL);
8258 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTID, STAMTYPE_U32, "uStatTID", STAMUNIT_NS, NULL);
8259 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatRAD, STAMTYPE_U32, "uStatRAD", STAMUNIT_NS, NULL);
8260 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatRID, STAMTYPE_U32, "uStatRID", STAMUNIT_NS, NULL);
8261 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatRxFrm, STAMTYPE_U32, "uStatRxFrm", STAMUNIT_NS, NULL);
8262 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTxFrm, STAMTYPE_U32, "uStatTxFrm", STAMUNIT_NS, NULL);
8263 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatDescCtx, STAMTYPE_U32, "uStatDescCtx", STAMUNIT_NS, NULL);
8264 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatDescDat, STAMTYPE_U32, "uStatDescDat", STAMUNIT_NS, NULL);
8265 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatDescLeg, STAMTYPE_U32, "uStatDescLeg", STAMUNIT_NS, NULL);
8266 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTx1514, STAMTYPE_U32, "uStatTx1514", STAMUNIT_NS, NULL);
8267 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTx2962, STAMTYPE_U32, "uStatTx2962", STAMUNIT_NS, NULL);
8268 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTx4410, STAMTYPE_U32, "uStatTx4410", STAMUNIT_NS, NULL);
8269 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTx5858, STAMTYPE_U32, "uStatTx5858", STAMUNIT_NS, NULL);
8270 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTx7306, STAMTYPE_U32, "uStatTx7306", STAMUNIT_NS, NULL);
8271 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTx8754, STAMTYPE_U32, "uStatTx8754", STAMUNIT_NS, NULL);
8272 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTx16384, STAMTYPE_U32, "uStatTx16384", STAMUNIT_NS, NULL);
8273 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTx32768, STAMTYPE_U32, "uStatTx32768", STAMUNIT_NS, NULL);
8274 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTxLarge, STAMTYPE_U32, "uStatTxLarge", STAMUNIT_NS, NULL);
8275#endif /* E1K_INT_STATS */
8276
8277 return VINF_SUCCESS;
8278}
8279
8280#else /* !IN_RING3 */
8281
8282/**
8283 * @callback_method_impl{PDMDEVREGR0,pfnConstruct}
8284 */
8285static DECLCALLBACK(int) e1kRZConstruct(PPDMDEVINS pDevIns)
8286{
8287 PDMDEV_CHECK_VERSIONS_RETURN(pDevIns);
8288 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
8289 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
8290
8291 /* Initialize context specific state data: */
8292 pThisCC->CTX_SUFF(pDevIns) = pDevIns;
8293 /** @todo @bugref{9218} ring-0 driver stuff */
8294 pThisCC->CTX_SUFF(pDrv) = NULL;
8295 pThisCC->CTX_SUFF(pTxSg) = NULL;
8296
8297 /* Configure critical sections the same way: */
8298 int rc = PDMDevHlpSetDeviceCritSect(pDevIns, PDMDevHlpCritSectGetNop(pDevIns));
8299 AssertRCReturn(rc, rc);
8300
8301 /* Set up MMIO and I/O port callbacks for this context: */
8302 rc = PDMDevHlpMmioSetUpContext(pDevIns, pThis->hMmioRegion, e1kMMIOWrite, e1kMMIORead, NULL /*pvUser*/);
8303 AssertRCReturn(rc, rc);
8304
8305 rc = PDMDevHlpIoPortSetUpContext(pDevIns, pThis->hIoPorts, e1kIOPortOut, e1kIOPortIn, NULL /*pvUser*/);
8306 AssertRCReturn(rc, rc);
8307
8308 return VINF_SUCCESS;
8309}
8310
8311#endif /* !IN_RING3 */
8312
8313/**
8314 * The device registration structure.
8315 */
8316const PDMDEVREG g_DeviceE1000 =
8317{
8318 /* .u32version = */ PDM_DEVREG_VERSION,
8319 /* .uReserved0 = */ 0,
8320 /* .szName = */ "e1000",
8321 /* .fFlags = */ PDM_DEVREG_FLAGS_DEFAULT_BITS | PDM_DEVREG_FLAGS_RZ | PDM_DEVREG_FLAGS_NEW_STYLE,
8322 /* .fClass = */ PDM_DEVREG_CLASS_NETWORK,
8323 /* .cMaxInstances = */ ~0U,
8324 /* .uSharedVersion = */ 42,
8325 /* .cbInstanceShared = */ sizeof(E1KSTATE),
8326 /* .cbInstanceCC = */ sizeof(E1KSTATECC),
8327 /* .cbInstanceRC = */ sizeof(E1KSTATERC),
8328 /* .cMaxPciDevices = */ 1,
8329 /* .cMaxMsixVectors = */ 0,
8330 /* .pszDescription = */ "Intel PRO/1000 MT Desktop Ethernet.",
8331#if defined(IN_RING3)
8332 /* .pszRCMod = */ "VBoxDDRC.rc",
8333 /* .pszR0Mod = */ "VBoxDDR0.r0",
8334 /* .pfnConstruct = */ e1kR3Construct,
8335 /* .pfnDestruct = */ e1kR3Destruct,
8336 /* .pfnRelocate = */ e1kR3Relocate,
8337 /* .pfnMemSetup = */ NULL,
8338 /* .pfnPowerOn = */ NULL,
8339 /* .pfnReset = */ e1kR3Reset,
8340 /* .pfnSuspend = */ e1kR3Suspend,
8341 /* .pfnResume = */ NULL,
8342 /* .pfnAttach = */ e1kR3Attach,
8343 /* .pfnDeatch = */ e1kR3Detach,
8344 /* .pfnQueryInterface = */ NULL,
8345 /* .pfnInitComplete = */ NULL,
8346 /* .pfnPowerOff = */ e1kR3PowerOff,
8347 /* .pfnSoftReset = */ NULL,
8348 /* .pfnReserved0 = */ NULL,
8349 /* .pfnReserved1 = */ NULL,
8350 /* .pfnReserved2 = */ NULL,
8351 /* .pfnReserved3 = */ NULL,
8352 /* .pfnReserved4 = */ NULL,
8353 /* .pfnReserved5 = */ NULL,
8354 /* .pfnReserved6 = */ NULL,
8355 /* .pfnReserved7 = */ NULL,
8356#elif defined(IN_RING0)
8357 /* .pfnEarlyConstruct = */ NULL,
8358 /* .pfnConstruct = */ e1kRZConstruct,
8359 /* .pfnDestruct = */ NULL,
8360 /* .pfnFinalDestruct = */ NULL,
8361 /* .pfnRequest = */ NULL,
8362 /* .pfnReserved0 = */ NULL,
8363 /* .pfnReserved1 = */ NULL,
8364 /* .pfnReserved2 = */ NULL,
8365 /* .pfnReserved3 = */ NULL,
8366 /* .pfnReserved4 = */ NULL,
8367 /* .pfnReserved5 = */ NULL,
8368 /* .pfnReserved6 = */ NULL,
8369 /* .pfnReserved7 = */ NULL,
8370#elif defined(IN_RC)
8371 /* .pfnConstruct = */ e1kRZConstruct,
8372 /* .pfnReserved0 = */ NULL,
8373 /* .pfnReserved1 = */ NULL,
8374 /* .pfnReserved2 = */ NULL,
8375 /* .pfnReserved3 = */ NULL,
8376 /* .pfnReserved4 = */ NULL,
8377 /* .pfnReserved5 = */ NULL,
8378 /* .pfnReserved6 = */ NULL,
8379 /* .pfnReserved7 = */ NULL,
8380#else
8381# error "Not in IN_RING3, IN_RING0 or IN_RC!"
8382#endif
8383 /* .u32VersionEnd = */ PDM_DEVREG_VERSION
8384};
8385
8386#endif /* !VBOX_DEVICE_STRUCT_TESTCASE */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette