VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/DevE1000.cpp@ 64402

Last change on this file since 64402 was 64390, checked in by vboxsync, 8 years ago

PDMPCIDEV: Dropped pDevIns as it isn't needed any longer.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 321.1 KB
Line 
1/* $Id: DevE1000.cpp 64390 2016-10-24 14:19:51Z vboxsync $ */
2/** @file
3 * DevE1000 - Intel 82540EM Ethernet Controller Emulation.
4 *
5 * Implemented in accordance with the specification:
6 *
7 * PCI/PCI-X Family of Gigabit Ethernet Controllers Software Developer's Manual
8 * 82540EP/EM, 82541xx, 82544GC/EI, 82545GM/EM, 82546GB/EB, and 82547xx
9 *
10 * 317453-002 Revision 3.5
11 *
12 * @todo IPv6 checksum offloading support
13 * @todo Flexible Filter / Wakeup (optional?)
14 */
15
16/*
17 * Copyright (C) 2007-2016 Oracle Corporation
18 *
19 * This file is part of VirtualBox Open Source Edition (OSE), as
20 * available from http://www.virtualbox.org. This file is free software;
21 * you can redistribute it and/or modify it under the terms of the GNU
22 * General Public License (GPL) as published by the Free Software
23 * Foundation, in version 2 as it comes in the "COPYING" file of the
24 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
25 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_DEV_E1000
33#include <iprt/crc.h>
34#include <iprt/ctype.h>
35#include <iprt/net.h>
36#include <iprt/semaphore.h>
37#include <iprt/string.h>
38#include <iprt/time.h>
39#include <iprt/uuid.h>
40#include <VBox/vmm/pdmdev.h>
41#include <VBox/vmm/pdmnetifs.h>
42#include <VBox/vmm/pdmnetinline.h>
43#include <VBox/param.h>
44#include "VBoxDD.h"
45
46#include "DevEEPROM.h"
47#include "DevE1000Phy.h"
48
49
50/*********************************************************************************************************************************
51* Defined Constants And Macros *
52*********************************************************************************************************************************/
53/** @name E1000 Build Options
54 * @{ */
55/** @def E1K_INIT_RA0
56 * E1K_INIT_RA0 forces E1000 to set the first entry in Receive Address filter
57 * table to MAC address obtained from CFGM. Most guests read MAC address from
58 * EEPROM and write it to RA[0] explicitly, but Mac OS X seems to depend on it
59 * being already set (see @bugref{4657}).
60 */
61#define E1K_INIT_RA0
62/** @def E1K_LSC_ON_SLU
63 * E1K_LSC_ON_SLU causes E1000 to generate Link Status Change interrupt when
64 * the guest driver brings up the link via STATUS.LU bit. Again the only guest
65 * that requires it is Mac OS X (see @bugref{4657}).
66 */
67#define E1K_LSC_ON_SLU
68/** @def E1K_TX_DELAY
69 * E1K_TX_DELAY aims to improve guest-host transfer rate for TCP streams by
70 * preventing packets to be sent immediately. It allows to send several
71 * packets in a batch reducing the number of acknowledgments. Note that it
72 * effectively disables R0 TX path, forcing sending in R3.
73 */
74//#define E1K_TX_DELAY 150
75/** @def E1K_USE_TX_TIMERS
76 * E1K_USE_TX_TIMERS aims to reduce the number of generated TX interrupts if a
77 * guest driver set the delays via the Transmit Interrupt Delay Value (TIDV)
78 * register. Enabling it showed no positive effects on existing guests so it
79 * stays disabled. See sections 3.2.7.1 and 3.4.3.1 in "8254x Family of Gigabit
80 * Ethernet Controllers Software Developer’s Manual" for more detailed
81 * explanation.
82 */
83//#define E1K_USE_TX_TIMERS
84/** @def E1K_NO_TAD
85 * E1K_NO_TAD disables one of two timers enabled by E1K_USE_TX_TIMERS, the
86 * Transmit Absolute Delay time. This timer sets the maximum time interval
87 * during which TX interrupts can be postponed (delayed). It has no effect
88 * if E1K_USE_TX_TIMERS is not defined.
89 */
90//#define E1K_NO_TAD
91/** @def E1K_REL_DEBUG
92 * E1K_REL_DEBUG enables debug logging of l1, l2, l3 in release build.
93 */
94//#define E1K_REL_DEBUG
95/** @def E1K_INT_STATS
96 * E1K_INT_STATS enables collection of internal statistics used for
97 * debugging of delayed interrupts, etc.
98 */
99//#define E1K_INT_STATS
100/** @def E1K_WITH_MSI
101 * E1K_WITH_MSI enables rudimentary MSI support. Not implemented.
102 */
103//#define E1K_WITH_MSI
104/** @def E1K_WITH_TX_CS
105 * E1K_WITH_TX_CS protects e1kXmitPending with a critical section.
106 */
107#define E1K_WITH_TX_CS
108/** @def E1K_WITH_TXD_CACHE
109 * E1K_WITH_TXD_CACHE causes E1000 to fetch multiple TX descriptors in a
110 * single physical memory read (or two if it wraps around the end of TX
111 * descriptor ring). It is required for proper functioning of bandwidth
112 * resource control as it allows to compute exact sizes of packets prior
113 * to allocating their buffers (see @bugref{5582}).
114 */
115#define E1K_WITH_TXD_CACHE
116/** @def E1K_WITH_RXD_CACHE
117 * E1K_WITH_RXD_CACHE causes E1000 to fetch multiple RX descriptors in a
118 * single physical memory read (or two if it wraps around the end of RX
119 * descriptor ring). Intel's packet driver for DOS needs this option in
120 * order to work properly (see @bugref{6217}).
121 */
122#define E1K_WITH_RXD_CACHE
123/** @def E1K_WITH_PREREG_MMIO
124 * E1K_WITH_PREREG_MMIO enables a new style MMIO registration and is
125 * currently only done for testing the relateted PDM, IOM and PGM code. */
126//#define E1K_WITH_PREREG_MMIO
127/* @} */
128/* End of Options ************************************************************/
129
130#ifdef E1K_WITH_TXD_CACHE
131/**
132 * E1K_TXD_CACHE_SIZE specifies the maximum number of TX descriptors stored
133 * in the state structure. It limits the amount of descriptors loaded in one
134 * batch read. For example, Linux guest may use up to 20 descriptors per
135 * TSE packet. The largest TSE packet seen (Windows guest) was 45 descriptors.
136 */
137# define E1K_TXD_CACHE_SIZE 64u
138#endif /* E1K_WITH_TXD_CACHE */
139
140#ifdef E1K_WITH_RXD_CACHE
141/**
142 * E1K_RXD_CACHE_SIZE specifies the maximum number of RX descriptors stored
143 * in the state structure. It limits the amount of descriptors loaded in one
144 * batch read. For example, XP guest adds 15 RX descriptors at a time.
145 */
146# define E1K_RXD_CACHE_SIZE 16u
147#endif /* E1K_WITH_RXD_CACHE */
148
149
150/* Little helpers ************************************************************/
151#undef htons
152#undef ntohs
153#undef htonl
154#undef ntohl
155#define htons(x) ((((x) & 0xff00) >> 8) | (((x) & 0x00ff) << 8))
156#define ntohs(x) htons(x)
157#define htonl(x) ASMByteSwapU32(x)
158#define ntohl(x) htonl(x)
159
160#ifndef DEBUG
161# ifdef E1K_REL_DEBUG
162# define DEBUG
163# define E1kLog(a) LogRel(a)
164# define E1kLog2(a) LogRel(a)
165# define E1kLog3(a) LogRel(a)
166# define E1kLogX(x, a) LogRel(a)
167//# define E1kLog3(a) do {} while (0)
168# else
169# define E1kLog(a) do {} while (0)
170# define E1kLog2(a) do {} while (0)
171# define E1kLog3(a) do {} while (0)
172# define E1kLogX(x, a) do {} while (0)
173# endif
174#else
175# define E1kLog(a) Log(a)
176# define E1kLog2(a) Log2(a)
177# define E1kLog3(a) Log3(a)
178# define E1kLogX(x, a) LogIt(x, LOG_GROUP, a)
179//# define E1kLog(a) do {} while (0)
180//# define E1kLog2(a) do {} while (0)
181//# define E1kLog3(a) do {} while (0)
182#endif
183
184#if 0
185# define LOG_ENABLED
186# define E1kLogRel(a) LogRel(a)
187# undef Log6
188# define Log6(a) LogRel(a)
189#else
190# define E1kLogRel(a) do { } while (0)
191#endif
192
193//#undef DEBUG
194
195#define STATE_TO_DEVINS(pThis) (((PE1KSTATE )pThis)->CTX_SUFF(pDevIns))
196#define E1K_RELOCATE(p, o) *(RTHCUINTPTR *)&p += o
197
198#define E1K_INC_CNT32(cnt) \
199do { \
200 if (cnt < UINT32_MAX) \
201 cnt++; \
202} while (0)
203
204#define E1K_ADD_CNT64(cntLo, cntHi, val) \
205do { \
206 uint64_t u64Cnt = RT_MAKE_U64(cntLo, cntHi); \
207 uint64_t tmp = u64Cnt; \
208 u64Cnt += val; \
209 if (tmp > u64Cnt ) \
210 u64Cnt = UINT64_MAX; \
211 cntLo = (uint32_t)u64Cnt; \
212 cntHi = (uint32_t)(u64Cnt >> 32); \
213} while (0)
214
215#ifdef E1K_INT_STATS
216# define E1K_INC_ISTAT_CNT(cnt) do { ++cnt; } while (0)
217#else /* E1K_INT_STATS */
218# define E1K_INC_ISTAT_CNT(cnt) do { } while (0)
219#endif /* E1K_INT_STATS */
220
221
222/*****************************************************************************/
223
224typedef uint32_t E1KCHIP;
225#define E1K_CHIP_82540EM 0
226#define E1K_CHIP_82543GC 1
227#define E1K_CHIP_82545EM 2
228
229#ifdef IN_RING3
230/** Different E1000 chips. */
231static const struct E1kChips
232{
233 uint16_t uPCIVendorId;
234 uint16_t uPCIDeviceId;
235 uint16_t uPCISubsystemVendorId;
236 uint16_t uPCISubsystemId;
237 const char *pcszName;
238} g_aChips[] =
239{
240 /* Vendor Device SSVendor SubSys Name */
241 { 0x8086,
242 /* Temporary code, as MSI-aware driver dislike 0x100E. How to do that right? */
243# ifdef E1K_WITH_MSI
244 0x105E,
245# else
246 0x100E,
247# endif
248 0x8086, 0x001E, "82540EM" }, /* Intel 82540EM-A in Intel PRO/1000 MT Desktop */
249 { 0x8086, 0x1004, 0x8086, 0x1004, "82543GC" }, /* Intel 82543GC in Intel PRO/1000 T Server */
250 { 0x8086, 0x100F, 0x15AD, 0x0750, "82545EM" } /* Intel 82545EM-A in VMWare Network Adapter */
251};
252#endif /* IN_RING3 */
253
254
255/* The size of register area mapped to I/O space */
256#define E1K_IOPORT_SIZE 0x8
257/* The size of memory-mapped register area */
258#define E1K_MM_SIZE 0x20000
259
260#define E1K_MAX_TX_PKT_SIZE 16288
261#define E1K_MAX_RX_PKT_SIZE 16384
262
263/*****************************************************************************/
264
265/** Gets the specfieid bits from the register. */
266#define GET_BITS(reg, bits) ((reg & reg##_##bits##_MASK) >> reg##_##bits##_SHIFT)
267#define GET_BITS_V(val, reg, bits) ((val & reg##_##bits##_MASK) >> reg##_##bits##_SHIFT)
268#define BITS(reg, bits, bitval) (bitval << reg##_##bits##_SHIFT)
269#define SET_BITS(reg, bits, bitval) do { reg = (reg & ~reg##_##bits##_MASK) | (bitval << reg##_##bits##_SHIFT); } while (0)
270#define SET_BITS_V(val, reg, bits, bitval) do { val = (val & ~reg##_##bits##_MASK) | (bitval << reg##_##bits##_SHIFT); } while (0)
271
272#define CTRL_SLU UINT32_C(0x00000040)
273#define CTRL_MDIO UINT32_C(0x00100000)
274#define CTRL_MDC UINT32_C(0x00200000)
275#define CTRL_MDIO_DIR UINT32_C(0x01000000)
276#define CTRL_MDC_DIR UINT32_C(0x02000000)
277#define CTRL_RESET UINT32_C(0x04000000)
278#define CTRL_VME UINT32_C(0x40000000)
279
280#define STATUS_LU UINT32_C(0x00000002)
281#define STATUS_TXOFF UINT32_C(0x00000010)
282
283#define EECD_EE_WIRES UINT32_C(0x0F)
284#define EECD_EE_REQ UINT32_C(0x40)
285#define EECD_EE_GNT UINT32_C(0x80)
286
287#define EERD_START UINT32_C(0x00000001)
288#define EERD_DONE UINT32_C(0x00000010)
289#define EERD_DATA_MASK UINT32_C(0xFFFF0000)
290#define EERD_DATA_SHIFT 16
291#define EERD_ADDR_MASK UINT32_C(0x0000FF00)
292#define EERD_ADDR_SHIFT 8
293
294#define MDIC_DATA_MASK UINT32_C(0x0000FFFF)
295#define MDIC_DATA_SHIFT 0
296#define MDIC_REG_MASK UINT32_C(0x001F0000)
297#define MDIC_REG_SHIFT 16
298#define MDIC_PHY_MASK UINT32_C(0x03E00000)
299#define MDIC_PHY_SHIFT 21
300#define MDIC_OP_WRITE UINT32_C(0x04000000)
301#define MDIC_OP_READ UINT32_C(0x08000000)
302#define MDIC_READY UINT32_C(0x10000000)
303#define MDIC_INT_EN UINT32_C(0x20000000)
304#define MDIC_ERROR UINT32_C(0x40000000)
305
306#define TCTL_EN UINT32_C(0x00000002)
307#define TCTL_PSP UINT32_C(0x00000008)
308
309#define RCTL_EN UINT32_C(0x00000002)
310#define RCTL_UPE UINT32_C(0x00000008)
311#define RCTL_MPE UINT32_C(0x00000010)
312#define RCTL_LPE UINT32_C(0x00000020)
313#define RCTL_LBM_MASK UINT32_C(0x000000C0)
314#define RCTL_LBM_SHIFT 6
315#define RCTL_RDMTS_MASK UINT32_C(0x00000300)
316#define RCTL_RDMTS_SHIFT 8
317#define RCTL_LBM_TCVR UINT32_C(3) /**< PHY or external SerDes loopback. */
318#define RCTL_MO_MASK UINT32_C(0x00003000)
319#define RCTL_MO_SHIFT 12
320#define RCTL_BAM UINT32_C(0x00008000)
321#define RCTL_BSIZE_MASK UINT32_C(0x00030000)
322#define RCTL_BSIZE_SHIFT 16
323#define RCTL_VFE UINT32_C(0x00040000)
324#define RCTL_CFIEN UINT32_C(0x00080000)
325#define RCTL_CFI UINT32_C(0x00100000)
326#define RCTL_BSEX UINT32_C(0x02000000)
327#define RCTL_SECRC UINT32_C(0x04000000)
328
329#define ICR_TXDW UINT32_C(0x00000001)
330#define ICR_TXQE UINT32_C(0x00000002)
331#define ICR_LSC UINT32_C(0x00000004)
332#define ICR_RXDMT0 UINT32_C(0x00000010)
333#define ICR_RXT0 UINT32_C(0x00000080)
334#define ICR_TXD_LOW UINT32_C(0x00008000)
335#define RDTR_FPD UINT32_C(0x80000000)
336
337#define PBA_st ((PBAST*)(pThis->auRegs + PBA_IDX))
338typedef struct
339{
340 unsigned rxa : 7;
341 unsigned rxa_r : 9;
342 unsigned txa : 16;
343} PBAST;
344AssertCompileSize(PBAST, 4);
345
346#define TXDCTL_WTHRESH_MASK 0x003F0000
347#define TXDCTL_WTHRESH_SHIFT 16
348#define TXDCTL_LWTHRESH_MASK 0xFE000000
349#define TXDCTL_LWTHRESH_SHIFT 25
350
351#define RXCSUM_PCSS_MASK UINT32_C(0x000000FF)
352#define RXCSUM_PCSS_SHIFT 0
353
354/** @name Register access macros
355 * @remarks These ASSUME alocal variable @a pThis of type PE1KSTATE.
356 * @{ */
357#define CTRL pThis->auRegs[CTRL_IDX]
358#define STATUS pThis->auRegs[STATUS_IDX]
359#define EECD pThis->auRegs[EECD_IDX]
360#define EERD pThis->auRegs[EERD_IDX]
361#define CTRL_EXT pThis->auRegs[CTRL_EXT_IDX]
362#define FLA pThis->auRegs[FLA_IDX]
363#define MDIC pThis->auRegs[MDIC_IDX]
364#define FCAL pThis->auRegs[FCAL_IDX]
365#define FCAH pThis->auRegs[FCAH_IDX]
366#define FCT pThis->auRegs[FCT_IDX]
367#define VET pThis->auRegs[VET_IDX]
368#define ICR pThis->auRegs[ICR_IDX]
369#define ITR pThis->auRegs[ITR_IDX]
370#define ICS pThis->auRegs[ICS_IDX]
371#define IMS pThis->auRegs[IMS_IDX]
372#define IMC pThis->auRegs[IMC_IDX]
373#define RCTL pThis->auRegs[RCTL_IDX]
374#define FCTTV pThis->auRegs[FCTTV_IDX]
375#define TXCW pThis->auRegs[TXCW_IDX]
376#define RXCW pThis->auRegs[RXCW_IDX]
377#define TCTL pThis->auRegs[TCTL_IDX]
378#define TIPG pThis->auRegs[TIPG_IDX]
379#define AIFS pThis->auRegs[AIFS_IDX]
380#define LEDCTL pThis->auRegs[LEDCTL_IDX]
381#define PBA pThis->auRegs[PBA_IDX]
382#define FCRTL pThis->auRegs[FCRTL_IDX]
383#define FCRTH pThis->auRegs[FCRTH_IDX]
384#define RDFH pThis->auRegs[RDFH_IDX]
385#define RDFT pThis->auRegs[RDFT_IDX]
386#define RDFHS pThis->auRegs[RDFHS_IDX]
387#define RDFTS pThis->auRegs[RDFTS_IDX]
388#define RDFPC pThis->auRegs[RDFPC_IDX]
389#define RDBAL pThis->auRegs[RDBAL_IDX]
390#define RDBAH pThis->auRegs[RDBAH_IDX]
391#define RDLEN pThis->auRegs[RDLEN_IDX]
392#define RDH pThis->auRegs[RDH_IDX]
393#define RDT pThis->auRegs[RDT_IDX]
394#define RDTR pThis->auRegs[RDTR_IDX]
395#define RXDCTL pThis->auRegs[RXDCTL_IDX]
396#define RADV pThis->auRegs[RADV_IDX]
397#define RSRPD pThis->auRegs[RSRPD_IDX]
398#define TXDMAC pThis->auRegs[TXDMAC_IDX]
399#define TDFH pThis->auRegs[TDFH_IDX]
400#define TDFT pThis->auRegs[TDFT_IDX]
401#define TDFHS pThis->auRegs[TDFHS_IDX]
402#define TDFTS pThis->auRegs[TDFTS_IDX]
403#define TDFPC pThis->auRegs[TDFPC_IDX]
404#define TDBAL pThis->auRegs[TDBAL_IDX]
405#define TDBAH pThis->auRegs[TDBAH_IDX]
406#define TDLEN pThis->auRegs[TDLEN_IDX]
407#define TDH pThis->auRegs[TDH_IDX]
408#define TDT pThis->auRegs[TDT_IDX]
409#define TIDV pThis->auRegs[TIDV_IDX]
410#define TXDCTL pThis->auRegs[TXDCTL_IDX]
411#define TADV pThis->auRegs[TADV_IDX]
412#define TSPMT pThis->auRegs[TSPMT_IDX]
413#define CRCERRS pThis->auRegs[CRCERRS_IDX]
414#define ALGNERRC pThis->auRegs[ALGNERRC_IDX]
415#define SYMERRS pThis->auRegs[SYMERRS_IDX]
416#define RXERRC pThis->auRegs[RXERRC_IDX]
417#define MPC pThis->auRegs[MPC_IDX]
418#define SCC pThis->auRegs[SCC_IDX]
419#define ECOL pThis->auRegs[ECOL_IDX]
420#define MCC pThis->auRegs[MCC_IDX]
421#define LATECOL pThis->auRegs[LATECOL_IDX]
422#define COLC pThis->auRegs[COLC_IDX]
423#define DC pThis->auRegs[DC_IDX]
424#define TNCRS pThis->auRegs[TNCRS_IDX]
425/* #define SEC pThis->auRegs[SEC_IDX] Conflict with sys/time.h */
426#define CEXTERR pThis->auRegs[CEXTERR_IDX]
427#define RLEC pThis->auRegs[RLEC_IDX]
428#define XONRXC pThis->auRegs[XONRXC_IDX]
429#define XONTXC pThis->auRegs[XONTXC_IDX]
430#define XOFFRXC pThis->auRegs[XOFFRXC_IDX]
431#define XOFFTXC pThis->auRegs[XOFFTXC_IDX]
432#define FCRUC pThis->auRegs[FCRUC_IDX]
433#define PRC64 pThis->auRegs[PRC64_IDX]
434#define PRC127 pThis->auRegs[PRC127_IDX]
435#define PRC255 pThis->auRegs[PRC255_IDX]
436#define PRC511 pThis->auRegs[PRC511_IDX]
437#define PRC1023 pThis->auRegs[PRC1023_IDX]
438#define PRC1522 pThis->auRegs[PRC1522_IDX]
439#define GPRC pThis->auRegs[GPRC_IDX]
440#define BPRC pThis->auRegs[BPRC_IDX]
441#define MPRC pThis->auRegs[MPRC_IDX]
442#define GPTC pThis->auRegs[GPTC_IDX]
443#define GORCL pThis->auRegs[GORCL_IDX]
444#define GORCH pThis->auRegs[GORCH_IDX]
445#define GOTCL pThis->auRegs[GOTCL_IDX]
446#define GOTCH pThis->auRegs[GOTCH_IDX]
447#define RNBC pThis->auRegs[RNBC_IDX]
448#define RUC pThis->auRegs[RUC_IDX]
449#define RFC pThis->auRegs[RFC_IDX]
450#define ROC pThis->auRegs[ROC_IDX]
451#define RJC pThis->auRegs[RJC_IDX]
452#define MGTPRC pThis->auRegs[MGTPRC_IDX]
453#define MGTPDC pThis->auRegs[MGTPDC_IDX]
454#define MGTPTC pThis->auRegs[MGTPTC_IDX]
455#define TORL pThis->auRegs[TORL_IDX]
456#define TORH pThis->auRegs[TORH_IDX]
457#define TOTL pThis->auRegs[TOTL_IDX]
458#define TOTH pThis->auRegs[TOTH_IDX]
459#define TPR pThis->auRegs[TPR_IDX]
460#define TPT pThis->auRegs[TPT_IDX]
461#define PTC64 pThis->auRegs[PTC64_IDX]
462#define PTC127 pThis->auRegs[PTC127_IDX]
463#define PTC255 pThis->auRegs[PTC255_IDX]
464#define PTC511 pThis->auRegs[PTC511_IDX]
465#define PTC1023 pThis->auRegs[PTC1023_IDX]
466#define PTC1522 pThis->auRegs[PTC1522_IDX]
467#define MPTC pThis->auRegs[MPTC_IDX]
468#define BPTC pThis->auRegs[BPTC_IDX]
469#define TSCTC pThis->auRegs[TSCTC_IDX]
470#define TSCTFC pThis->auRegs[TSCTFC_IDX]
471#define RXCSUM pThis->auRegs[RXCSUM_IDX]
472#define WUC pThis->auRegs[WUC_IDX]
473#define WUFC pThis->auRegs[WUFC_IDX]
474#define WUS pThis->auRegs[WUS_IDX]
475#define MANC pThis->auRegs[MANC_IDX]
476#define IPAV pThis->auRegs[IPAV_IDX]
477#define WUPL pThis->auRegs[WUPL_IDX]
478/** @} */
479
480/**
481 * Indices of memory-mapped registers in register table.
482 */
483typedef enum
484{
485 CTRL_IDX,
486 STATUS_IDX,
487 EECD_IDX,
488 EERD_IDX,
489 CTRL_EXT_IDX,
490 FLA_IDX,
491 MDIC_IDX,
492 FCAL_IDX,
493 FCAH_IDX,
494 FCT_IDX,
495 VET_IDX,
496 ICR_IDX,
497 ITR_IDX,
498 ICS_IDX,
499 IMS_IDX,
500 IMC_IDX,
501 RCTL_IDX,
502 FCTTV_IDX,
503 TXCW_IDX,
504 RXCW_IDX,
505 TCTL_IDX,
506 TIPG_IDX,
507 AIFS_IDX,
508 LEDCTL_IDX,
509 PBA_IDX,
510 FCRTL_IDX,
511 FCRTH_IDX,
512 RDFH_IDX,
513 RDFT_IDX,
514 RDFHS_IDX,
515 RDFTS_IDX,
516 RDFPC_IDX,
517 RDBAL_IDX,
518 RDBAH_IDX,
519 RDLEN_IDX,
520 RDH_IDX,
521 RDT_IDX,
522 RDTR_IDX,
523 RXDCTL_IDX,
524 RADV_IDX,
525 RSRPD_IDX,
526 TXDMAC_IDX,
527 TDFH_IDX,
528 TDFT_IDX,
529 TDFHS_IDX,
530 TDFTS_IDX,
531 TDFPC_IDX,
532 TDBAL_IDX,
533 TDBAH_IDX,
534 TDLEN_IDX,
535 TDH_IDX,
536 TDT_IDX,
537 TIDV_IDX,
538 TXDCTL_IDX,
539 TADV_IDX,
540 TSPMT_IDX,
541 CRCERRS_IDX,
542 ALGNERRC_IDX,
543 SYMERRS_IDX,
544 RXERRC_IDX,
545 MPC_IDX,
546 SCC_IDX,
547 ECOL_IDX,
548 MCC_IDX,
549 LATECOL_IDX,
550 COLC_IDX,
551 DC_IDX,
552 TNCRS_IDX,
553 SEC_IDX,
554 CEXTERR_IDX,
555 RLEC_IDX,
556 XONRXC_IDX,
557 XONTXC_IDX,
558 XOFFRXC_IDX,
559 XOFFTXC_IDX,
560 FCRUC_IDX,
561 PRC64_IDX,
562 PRC127_IDX,
563 PRC255_IDX,
564 PRC511_IDX,
565 PRC1023_IDX,
566 PRC1522_IDX,
567 GPRC_IDX,
568 BPRC_IDX,
569 MPRC_IDX,
570 GPTC_IDX,
571 GORCL_IDX,
572 GORCH_IDX,
573 GOTCL_IDX,
574 GOTCH_IDX,
575 RNBC_IDX,
576 RUC_IDX,
577 RFC_IDX,
578 ROC_IDX,
579 RJC_IDX,
580 MGTPRC_IDX,
581 MGTPDC_IDX,
582 MGTPTC_IDX,
583 TORL_IDX,
584 TORH_IDX,
585 TOTL_IDX,
586 TOTH_IDX,
587 TPR_IDX,
588 TPT_IDX,
589 PTC64_IDX,
590 PTC127_IDX,
591 PTC255_IDX,
592 PTC511_IDX,
593 PTC1023_IDX,
594 PTC1522_IDX,
595 MPTC_IDX,
596 BPTC_IDX,
597 TSCTC_IDX,
598 TSCTFC_IDX,
599 RXCSUM_IDX,
600 WUC_IDX,
601 WUFC_IDX,
602 WUS_IDX,
603 MANC_IDX,
604 IPAV_IDX,
605 WUPL_IDX,
606 MTA_IDX,
607 RA_IDX,
608 VFTA_IDX,
609 IP4AT_IDX,
610 IP6AT_IDX,
611 WUPM_IDX,
612 FFLT_IDX,
613 FFMT_IDX,
614 FFVT_IDX,
615 PBM_IDX,
616 RA_82542_IDX,
617 MTA_82542_IDX,
618 VFTA_82542_IDX,
619 E1K_NUM_OF_REGS
620} E1kRegIndex;
621
622#define E1K_NUM_OF_32BIT_REGS MTA_IDX
623/** The number of registers with strictly increasing offset. */
624#define E1K_NUM_OF_BINARY_SEARCHABLE (WUPL_IDX + 1)
625
626
627/**
628 * Define E1000-specific EEPROM layout.
629 */
630struct E1kEEPROM
631{
632 public:
633 EEPROM93C46 eeprom;
634
635#ifdef IN_RING3
636 /**
637 * Initialize EEPROM content.
638 *
639 * @param macAddr MAC address of E1000.
640 */
641 void init(RTMAC &macAddr)
642 {
643 eeprom.init();
644 memcpy(eeprom.m_au16Data, macAddr.au16, sizeof(macAddr.au16));
645 eeprom.m_au16Data[0x04] = 0xFFFF;
646 /*
647 * bit 3 - full support for power management
648 * bit 10 - full duplex
649 */
650 eeprom.m_au16Data[0x0A] = 0x4408;
651 eeprom.m_au16Data[0x0B] = 0x001E;
652 eeprom.m_au16Data[0x0C] = 0x8086;
653 eeprom.m_au16Data[0x0D] = 0x100E;
654 eeprom.m_au16Data[0x0E] = 0x8086;
655 eeprom.m_au16Data[0x0F] = 0x3040;
656 eeprom.m_au16Data[0x21] = 0x7061;
657 eeprom.m_au16Data[0x22] = 0x280C;
658 eeprom.m_au16Data[0x23] = 0x00C8;
659 eeprom.m_au16Data[0x24] = 0x00C8;
660 eeprom.m_au16Data[0x2F] = 0x0602;
661 updateChecksum();
662 };
663
664 /**
665 * Compute the checksum as required by E1000 and store it
666 * in the last word.
667 */
668 void updateChecksum()
669 {
670 uint16_t u16Checksum = 0;
671
672 for (int i = 0; i < eeprom.SIZE-1; i++)
673 u16Checksum += eeprom.m_au16Data[i];
674 eeprom.m_au16Data[eeprom.SIZE-1] = 0xBABA - u16Checksum;
675 };
676
677 /**
678 * First 6 bytes of EEPROM contain MAC address.
679 *
680 * @returns MAC address of E1000.
681 */
682 void getMac(PRTMAC pMac)
683 {
684 memcpy(pMac->au16, eeprom.m_au16Data, sizeof(pMac->au16));
685 };
686
687 uint32_t read()
688 {
689 return eeprom.read();
690 }
691
692 void write(uint32_t u32Wires)
693 {
694 eeprom.write(u32Wires);
695 }
696
697 bool readWord(uint32_t u32Addr, uint16_t *pu16Value)
698 {
699 return eeprom.readWord(u32Addr, pu16Value);
700 }
701
702 int load(PSSMHANDLE pSSM)
703 {
704 return eeprom.load(pSSM);
705 }
706
707 void save(PSSMHANDLE pSSM)
708 {
709 eeprom.save(pSSM);
710 }
711#endif /* IN_RING3 */
712};
713
714
715#define E1K_SPEC_VLAN(s) (s & 0xFFF)
716#define E1K_SPEC_CFI(s) (!!((s>>12) & 0x1))
717#define E1K_SPEC_PRI(s) ((s>>13) & 0x7)
718
719struct E1kRxDStatus
720{
721 /** @name Descriptor Status field (3.2.3.1)
722 * @{ */
723 unsigned fDD : 1; /**< Descriptor Done. */
724 unsigned fEOP : 1; /**< End of packet. */
725 unsigned fIXSM : 1; /**< Ignore checksum indication. */
726 unsigned fVP : 1; /**< VLAN, matches VET. */
727 unsigned : 1;
728 unsigned fTCPCS : 1; /**< RCP Checksum calculated on the packet. */
729 unsigned fIPCS : 1; /**< IP Checksum calculated on the packet. */
730 unsigned fPIF : 1; /**< Passed in-exact filter */
731 /** @} */
732 /** @name Descriptor Errors field (3.2.3.2)
733 * (Only valid when fEOP and fDD are set.)
734 * @{ */
735 unsigned fCE : 1; /**< CRC or alignment error. */
736 unsigned : 4; /**< Reserved, varies with different models... */
737 unsigned fTCPE : 1; /**< TCP/UDP checksum error. */
738 unsigned fIPE : 1; /**< IP Checksum error. */
739 unsigned fRXE : 1; /**< RX Data error. */
740 /** @} */
741 /** @name Descriptor Special field (3.2.3.3)
742 * @{ */
743 unsigned u16Special : 16; /**< VLAN: Id, Canonical form, Priority. */
744 /** @} */
745};
746typedef struct E1kRxDStatus E1KRXDST;
747
748struct E1kRxDesc_st
749{
750 uint64_t u64BufAddr; /**< Address of data buffer */
751 uint16_t u16Length; /**< Length of data in buffer */
752 uint16_t u16Checksum; /**< Packet checksum */
753 E1KRXDST status;
754};
755typedef struct E1kRxDesc_st E1KRXDESC;
756AssertCompileSize(E1KRXDESC, 16);
757
758#define E1K_DTYP_LEGACY -1
759#define E1K_DTYP_CONTEXT 0
760#define E1K_DTYP_DATA 1
761
762struct E1kTDLegacy
763{
764 uint64_t u64BufAddr; /**< Address of data buffer */
765 struct TDLCmd_st
766 {
767 unsigned u16Length : 16;
768 unsigned u8CSO : 8;
769 /* CMD field : 8 */
770 unsigned fEOP : 1;
771 unsigned fIFCS : 1;
772 unsigned fIC : 1;
773 unsigned fRS : 1;
774 unsigned fRPS : 1;
775 unsigned fDEXT : 1;
776 unsigned fVLE : 1;
777 unsigned fIDE : 1;
778 } cmd;
779 struct TDLDw3_st
780 {
781 /* STA field */
782 unsigned fDD : 1;
783 unsigned fEC : 1;
784 unsigned fLC : 1;
785 unsigned fTURSV : 1;
786 /* RSV field */
787 unsigned u4RSV : 4;
788 /* CSS field */
789 unsigned u8CSS : 8;
790 /* Special field*/
791 unsigned u16Special: 16;
792 } dw3;
793};
794
795/**
796 * TCP/IP Context Transmit Descriptor, section 3.3.6.
797 */
798struct E1kTDContext
799{
800 struct CheckSum_st
801 {
802 /** TSE: Header start. !TSE: Checksum start. */
803 unsigned u8CSS : 8;
804 /** Checksum offset - where to store it. */
805 unsigned u8CSO : 8;
806 /** Checksum ending (inclusive) offset, 0 = end of packet. */
807 unsigned u16CSE : 16;
808 } ip;
809 struct CheckSum_st tu;
810 struct TDCDw2_st
811 {
812 /** TSE: The total number of payload bytes for this context. Sans header. */
813 unsigned u20PAYLEN : 20;
814 /** The descriptor type - E1K_DTYP_CONTEXT (0). */
815 unsigned u4DTYP : 4;
816 /** TUCMD field, 8 bits
817 * @{ */
818 /** TSE: TCP (set) or UDP (clear). */
819 unsigned fTCP : 1;
820 /** TSE: IPv4 (set) or IPv6 (clear) - for finding the payload length field in
821 * the IP header. Does not affect the checksumming.
822 * @remarks 82544GC/EI interprets a cleared field differently. */
823 unsigned fIP : 1;
824 /** TSE: TCP segmentation enable. When clear the context describes */
825 unsigned fTSE : 1;
826 /** Report status (only applies to dw3.fDD for here). */
827 unsigned fRS : 1;
828 /** Reserved, MBZ. */
829 unsigned fRSV1 : 1;
830 /** Descriptor extension, must be set for this descriptor type. */
831 unsigned fDEXT : 1;
832 /** Reserved, MBZ. */
833 unsigned fRSV2 : 1;
834 /** Interrupt delay enable. */
835 unsigned fIDE : 1;
836 /** @} */
837 } dw2;
838 struct TDCDw3_st
839 {
840 /** Descriptor Done. */
841 unsigned fDD : 1;
842 /** Reserved, MBZ. */
843 unsigned u7RSV : 7;
844 /** TSO: The header (prototype) length (Ethernet[, VLAN tag], IP, TCP/UDP. */
845 unsigned u8HDRLEN : 8;
846 /** TSO: Maximum segment size. */
847 unsigned u16MSS : 16;
848 } dw3;
849};
850typedef struct E1kTDContext E1KTXCTX;
851
852/**
853 * TCP/IP Data Transmit Descriptor, section 3.3.7.
854 */
855struct E1kTDData
856{
857 uint64_t u64BufAddr; /**< Address of data buffer */
858 struct TDDCmd_st
859 {
860 /** The total length of data pointed to by this descriptor. */
861 unsigned u20DTALEN : 20;
862 /** The descriptor type - E1K_DTYP_DATA (1). */
863 unsigned u4DTYP : 4;
864 /** @name DCMD field, 8 bits (3.3.7.1).
865 * @{ */
866 /** End of packet. Note TSCTFC update. */
867 unsigned fEOP : 1;
868 /** Insert Ethernet FCS/CRC (requires fEOP to be set). */
869 unsigned fIFCS : 1;
870 /** Use the TSE context when set and the normal when clear. */
871 unsigned fTSE : 1;
872 /** Report status (dw3.STA). */
873 unsigned fRS : 1;
874 /** Reserved. 82544GC/EI defines this report packet set (RPS). */
875 unsigned fRPS : 1;
876 /** Descriptor extension, must be set for this descriptor type. */
877 unsigned fDEXT : 1;
878 /** VLAN enable, requires CTRL.VME, auto enables FCS/CRC.
879 * Insert dw3.SPECIAL after ethernet header. */
880 unsigned fVLE : 1;
881 /** Interrupt delay enable. */
882 unsigned fIDE : 1;
883 /** @} */
884 } cmd;
885 struct TDDDw3_st
886 {
887 /** @name STA field (3.3.7.2)
888 * @{ */
889 unsigned fDD : 1; /**< Descriptor done. */
890 unsigned fEC : 1; /**< Excess collision. */
891 unsigned fLC : 1; /**< Late collision. */
892 /** Reserved, except for the usual oddball (82544GC/EI) where it's called TU. */
893 unsigned fTURSV : 1;
894 /** @} */
895 unsigned u4RSV : 4; /**< Reserved field, MBZ. */
896 /** @name POPTS (Packet Option) field (3.3.7.3)
897 * @{ */
898 unsigned fIXSM : 1; /**< Insert IP checksum. */
899 unsigned fTXSM : 1; /**< Insert TCP/UDP checksum. */
900 unsigned u6RSV : 6; /**< Reserved, MBZ. */
901 /** @} */
902 /** @name SPECIAL field - VLAN tag to be inserted after ethernet header.
903 * Requires fEOP, fVLE and CTRL.VME to be set.
904 * @{ */
905 unsigned u16Special: 16; /**< VLAN: Id, Canonical form, Priority. */
906 /** @} */
907 } dw3;
908};
909typedef struct E1kTDData E1KTXDAT;
910
911union E1kTxDesc
912{
913 struct E1kTDLegacy legacy;
914 struct E1kTDContext context;
915 struct E1kTDData data;
916};
917typedef union E1kTxDesc E1KTXDESC;
918AssertCompileSize(E1KTXDESC, 16);
919
920#define RA_CTL_AS 0x0003
921#define RA_CTL_AV 0x8000
922
923union E1kRecAddr
924{
925 uint32_t au32[32];
926 struct RAArray
927 {
928 uint8_t addr[6];
929 uint16_t ctl;
930 } array[16];
931};
932typedef struct E1kRecAddr::RAArray E1KRAELEM;
933typedef union E1kRecAddr E1KRA;
934AssertCompileSize(E1KRA, 8*16);
935
936#define E1K_IP_RF UINT16_C(0x8000) /**< reserved fragment flag */
937#define E1K_IP_DF UINT16_C(0x4000) /**< dont fragment flag */
938#define E1K_IP_MF UINT16_C(0x2000) /**< more fragments flag */
939#define E1K_IP_OFFMASK UINT16_C(0x1fff) /**< mask for fragmenting bits */
940
941/** @todo use+extend RTNETIPV4 */
942struct E1kIpHeader
943{
944 /* type of service / version / header length */
945 uint16_t tos_ver_hl;
946 /* total length */
947 uint16_t total_len;
948 /* identification */
949 uint16_t ident;
950 /* fragment offset field */
951 uint16_t offset;
952 /* time to live / protocol*/
953 uint16_t ttl_proto;
954 /* checksum */
955 uint16_t chksum;
956 /* source IP address */
957 uint32_t src;
958 /* destination IP address */
959 uint32_t dest;
960};
961AssertCompileSize(struct E1kIpHeader, 20);
962
963#define E1K_TCP_FIN UINT16_C(0x01)
964#define E1K_TCP_SYN UINT16_C(0x02)
965#define E1K_TCP_RST UINT16_C(0x04)
966#define E1K_TCP_PSH UINT16_C(0x08)
967#define E1K_TCP_ACK UINT16_C(0x10)
968#define E1K_TCP_URG UINT16_C(0x20)
969#define E1K_TCP_ECE UINT16_C(0x40)
970#define E1K_TCP_CWR UINT16_C(0x80)
971#define E1K_TCP_FLAGS UINT16_C(0x3f)
972
973/** @todo use+extend RTNETTCP */
974struct E1kTcpHeader
975{
976 uint16_t src;
977 uint16_t dest;
978 uint32_t seqno;
979 uint32_t ackno;
980 uint16_t hdrlen_flags;
981 uint16_t wnd;
982 uint16_t chksum;
983 uint16_t urgp;
984};
985AssertCompileSize(struct E1kTcpHeader, 20);
986
987
988#ifdef E1K_WITH_TXD_CACHE
989/** The current Saved state version. */
990# define E1K_SAVEDSTATE_VERSION 4
991/** Saved state version for VirtualBox 4.2 with VLAN tag fields. */
992# define E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG 3
993#else /* !E1K_WITH_TXD_CACHE */
994/** The current Saved state version. */
995# define E1K_SAVEDSTATE_VERSION 3
996#endif /* !E1K_WITH_TXD_CACHE */
997/** Saved state version for VirtualBox 4.1 and earlier.
998 * These did not include VLAN tag fields. */
999#define E1K_SAVEDSTATE_VERSION_VBOX_41 2
1000/** Saved state version for VirtualBox 3.0 and earlier.
1001 * This did not include the configuration part nor the E1kEEPROM. */
1002#define E1K_SAVEDSTATE_VERSION_VBOX_30 1
1003
1004/**
1005 * Device state structure.
1006 *
1007 * Holds the current state of device.
1008 *
1009 * @implements PDMINETWORKDOWN
1010 * @implements PDMINETWORKCONFIG
1011 * @implements PDMILEDPORTS
1012 */
1013struct E1kState_st
1014{
1015 char szPrf[8]; /**< Log prefix, e.g. E1000#1. */
1016 PDMIBASE IBase;
1017 PDMINETWORKDOWN INetworkDown;
1018 PDMINETWORKCONFIG INetworkConfig;
1019 PDMILEDPORTS ILeds; /**< LED interface */
1020 R3PTRTYPE(PPDMIBASE) pDrvBase; /**< Attached network driver. */
1021 R3PTRTYPE(PPDMILEDCONNECTORS) pLedsConnector;
1022
1023 PPDMDEVINSR3 pDevInsR3; /**< Device instance - R3. */
1024 R3PTRTYPE(PPDMQUEUE) pTxQueueR3; /**< Transmit queue - R3. */
1025 R3PTRTYPE(PPDMQUEUE) pCanRxQueueR3; /**< Rx wakeup signaller - R3. */
1026 PPDMINETWORKUPR3 pDrvR3; /**< Attached network driver - R3. */
1027 PTMTIMERR3 pRIDTimerR3; /**< Receive Interrupt Delay Timer - R3. */
1028 PTMTIMERR3 pRADTimerR3; /**< Receive Absolute Delay Timer - R3. */
1029 PTMTIMERR3 pTIDTimerR3; /**< Transmit Interrupt Delay Timer - R3. */
1030 PTMTIMERR3 pTADTimerR3; /**< Transmit Absolute Delay Timer - R3. */
1031 PTMTIMERR3 pTXDTimerR3; /**< Transmit Delay Timer - R3. */
1032 PTMTIMERR3 pIntTimerR3; /**< Late Interrupt Timer - R3. */
1033 PTMTIMERR3 pLUTimerR3; /**< Link Up(/Restore) Timer. */
1034 /** The scatter / gather buffer used for the current outgoing packet - R3. */
1035 R3PTRTYPE(PPDMSCATTERGATHER) pTxSgR3;
1036
1037 PPDMDEVINSR0 pDevInsR0; /**< Device instance - R0. */
1038 R0PTRTYPE(PPDMQUEUE) pTxQueueR0; /**< Transmit queue - R0. */
1039 R0PTRTYPE(PPDMQUEUE) pCanRxQueueR0; /**< Rx wakeup signaller - R0. */
1040 PPDMINETWORKUPR0 pDrvR0; /**< Attached network driver - R0. */
1041 PTMTIMERR0 pRIDTimerR0; /**< Receive Interrupt Delay Timer - R0. */
1042 PTMTIMERR0 pRADTimerR0; /**< Receive Absolute Delay Timer - R0. */
1043 PTMTIMERR0 pTIDTimerR0; /**< Transmit Interrupt Delay Timer - R0. */
1044 PTMTIMERR0 pTADTimerR0; /**< Transmit Absolute Delay Timer - R0. */
1045 PTMTIMERR0 pTXDTimerR0; /**< Transmit Delay Timer - R0. */
1046 PTMTIMERR0 pIntTimerR0; /**< Late Interrupt Timer - R0. */
1047 PTMTIMERR0 pLUTimerR0; /**< Link Up(/Restore) Timer - R0. */
1048 /** The scatter / gather buffer used for the current outgoing packet - R0. */
1049 R0PTRTYPE(PPDMSCATTERGATHER) pTxSgR0;
1050
1051 PPDMDEVINSRC pDevInsRC; /**< Device instance - RC. */
1052 RCPTRTYPE(PPDMQUEUE) pTxQueueRC; /**< Transmit queue - RC. */
1053 RCPTRTYPE(PPDMQUEUE) pCanRxQueueRC; /**< Rx wakeup signaller - RC. */
1054 PPDMINETWORKUPRC pDrvRC; /**< Attached network driver - RC. */
1055 PTMTIMERRC pRIDTimerRC; /**< Receive Interrupt Delay Timer - RC. */
1056 PTMTIMERRC pRADTimerRC; /**< Receive Absolute Delay Timer - RC. */
1057 PTMTIMERRC pTIDTimerRC; /**< Transmit Interrupt Delay Timer - RC. */
1058 PTMTIMERRC pTADTimerRC; /**< Transmit Absolute Delay Timer - RC. */
1059 PTMTIMERRC pTXDTimerRC; /**< Transmit Delay Timer - RC. */
1060 PTMTIMERRC pIntTimerRC; /**< Late Interrupt Timer - RC. */
1061 PTMTIMERRC pLUTimerRC; /**< Link Up(/Restore) Timer - RC. */
1062 /** The scatter / gather buffer used for the current outgoing packet - RC. */
1063 RCPTRTYPE(PPDMSCATTERGATHER) pTxSgRC;
1064 RTRCPTR RCPtrAlignment;
1065
1066#if HC_ARCH_BITS != 32
1067 uint32_t Alignment1;
1068#endif
1069 PDMCRITSECT cs; /**< Critical section - what is it protecting? */
1070 PDMCRITSECT csRx; /**< RX Critical section. */
1071#ifdef E1K_WITH_TX_CS
1072 PDMCRITSECT csTx; /**< TX Critical section. */
1073#endif /* E1K_WITH_TX_CS */
1074 /** Base address of memory-mapped registers. */
1075 RTGCPHYS addrMMReg;
1076 /** MAC address obtained from the configuration. */
1077 RTMAC macConfigured;
1078 /** Base port of I/O space region. */
1079 RTIOPORT IOPortBase;
1080 /** EMT: */
1081 PDMPCIDEV pciDevice;
1082 /** EMT: Last time the interrupt was acknowledged. */
1083 uint64_t u64AckedAt;
1084 /** All: Used for eliminating spurious interrupts. */
1085 bool fIntRaised;
1086 /** EMT: false if the cable is disconnected by the GUI. */
1087 bool fCableConnected;
1088 /** EMT: */
1089 bool fR0Enabled;
1090 /** EMT: */
1091 bool fRCEnabled;
1092 /** EMT: Compute Ethernet CRC for RX packets. */
1093 bool fEthernetCRC;
1094 /** All: throttle interrupts. */
1095 bool fItrEnabled;
1096 /** All: throttle RX interrupts. */
1097 bool fItrRxEnabled;
1098 /** All: Delay TX interrupts using TIDV/TADV. */
1099 bool fTidEnabled;
1100 /** Link up delay (in milliseconds). */
1101 uint32_t cMsLinkUpDelay;
1102
1103 /** All: Device register storage. */
1104 uint32_t auRegs[E1K_NUM_OF_32BIT_REGS];
1105 /** TX/RX: Status LED. */
1106 PDMLED led;
1107 /** TX/RX: Number of packet being sent/received to show in debug log. */
1108 uint32_t u32PktNo;
1109
1110 /** EMT: Offset of the register to be read via IO. */
1111 uint32_t uSelectedReg;
1112 /** EMT: Multicast Table Array. */
1113 uint32_t auMTA[128];
1114 /** EMT: Receive Address registers. */
1115 E1KRA aRecAddr;
1116 /** EMT: VLAN filter table array. */
1117 uint32_t auVFTA[128];
1118 /** EMT: Receive buffer size. */
1119 uint16_t u16RxBSize;
1120 /** EMT: Locked state -- no state alteration possible. */
1121 bool fLocked;
1122 /** EMT: */
1123 bool fDelayInts;
1124 /** All: */
1125 bool fIntMaskUsed;
1126
1127 /** N/A: */
1128 bool volatile fMaybeOutOfSpace;
1129 /** EMT: Gets signalled when more RX descriptors become available. */
1130 RTSEMEVENT hEventMoreRxDescAvail;
1131#ifdef E1K_WITH_RXD_CACHE
1132 /** RX: Fetched RX descriptors. */
1133 E1KRXDESC aRxDescriptors[E1K_RXD_CACHE_SIZE];
1134 //uint64_t aRxDescAddr[E1K_RXD_CACHE_SIZE];
1135 /** RX: Actual number of fetched RX descriptors. */
1136 uint32_t nRxDFetched;
1137 /** RX: Index in cache of RX descriptor being processed. */
1138 uint32_t iRxDCurrent;
1139#endif /* E1K_WITH_RXD_CACHE */
1140
1141 /** TX: Context used for TCP segmentation packets. */
1142 E1KTXCTX contextTSE;
1143 /** TX: Context used for ordinary packets. */
1144 E1KTXCTX contextNormal;
1145#ifdef E1K_WITH_TXD_CACHE
1146 /** TX: Fetched TX descriptors. */
1147 E1KTXDESC aTxDescriptors[E1K_TXD_CACHE_SIZE];
1148 /** TX: Actual number of fetched TX descriptors. */
1149 uint8_t nTxDFetched;
1150 /** TX: Index in cache of TX descriptor being processed. */
1151 uint8_t iTxDCurrent;
1152 /** TX: Will this frame be sent as GSO. */
1153 bool fGSO;
1154 /** Alignment padding. */
1155 bool fReserved;
1156 /** TX: Number of bytes in next packet. */
1157 uint32_t cbTxAlloc;
1158
1159#endif /* E1K_WITH_TXD_CACHE */
1160 /** GSO context. u8Type is set to PDMNETWORKGSOTYPE_INVALID when not
1161 * applicable to the current TSE mode. */
1162 PDMNETWORKGSO GsoCtx;
1163 /** Scratch space for holding the loopback / fallback scatter / gather
1164 * descriptor. */
1165 union
1166 {
1167 PDMSCATTERGATHER Sg;
1168 uint8_t padding[8 * sizeof(RTUINTPTR)];
1169 } uTxFallback;
1170 /** TX: Transmit packet buffer use for TSE fallback and loopback. */
1171 uint8_t aTxPacketFallback[E1K_MAX_TX_PKT_SIZE];
1172 /** TX: Number of bytes assembled in TX packet buffer. */
1173 uint16_t u16TxPktLen;
1174 /** TX: False will force segmentation in e1000 instead of sending frames as GSO. */
1175 bool fGSOEnabled;
1176 /** TX: IP checksum has to be inserted if true. */
1177 bool fIPcsum;
1178 /** TX: TCP/UDP checksum has to be inserted if true. */
1179 bool fTCPcsum;
1180 /** TX: VLAN tag has to be inserted if true. */
1181 bool fVTag;
1182 /** TX: TCI part of VLAN tag to be inserted. */
1183 uint16_t u16VTagTCI;
1184 /** TX TSE fallback: Number of payload bytes remaining in TSE context. */
1185 uint32_t u32PayRemain;
1186 /** TX TSE fallback: Number of header bytes remaining in TSE context. */
1187 uint16_t u16HdrRemain;
1188 /** TX TSE fallback: Flags from template header. */
1189 uint16_t u16SavedFlags;
1190 /** TX TSE fallback: Partial checksum from template header. */
1191 uint32_t u32SavedCsum;
1192 /** ?: Emulated controller type. */
1193 E1KCHIP eChip;
1194
1195 /** EMT: EEPROM emulation */
1196 E1kEEPROM eeprom;
1197 /** EMT: Physical interface emulation. */
1198 PHY phy;
1199
1200#if 0
1201 /** Alignment padding. */
1202 uint8_t Alignment[HC_ARCH_BITS == 64 ? 8 : 4];
1203#endif
1204
1205 STAMCOUNTER StatReceiveBytes;
1206 STAMCOUNTER StatTransmitBytes;
1207#if defined(VBOX_WITH_STATISTICS)
1208 STAMPROFILEADV StatMMIOReadRZ;
1209 STAMPROFILEADV StatMMIOReadR3;
1210 STAMPROFILEADV StatMMIOWriteRZ;
1211 STAMPROFILEADV StatMMIOWriteR3;
1212 STAMPROFILEADV StatEEPROMRead;
1213 STAMPROFILEADV StatEEPROMWrite;
1214 STAMPROFILEADV StatIOReadRZ;
1215 STAMPROFILEADV StatIOReadR3;
1216 STAMPROFILEADV StatIOWriteRZ;
1217 STAMPROFILEADV StatIOWriteR3;
1218 STAMPROFILEADV StatLateIntTimer;
1219 STAMCOUNTER StatLateInts;
1220 STAMCOUNTER StatIntsRaised;
1221 STAMCOUNTER StatIntsPrevented;
1222 STAMPROFILEADV StatReceive;
1223 STAMPROFILEADV StatReceiveCRC;
1224 STAMPROFILEADV StatReceiveFilter;
1225 STAMPROFILEADV StatReceiveStore;
1226 STAMPROFILEADV StatTransmitRZ;
1227 STAMPROFILEADV StatTransmitR3;
1228 STAMPROFILE StatTransmitSendRZ;
1229 STAMPROFILE StatTransmitSendR3;
1230 STAMPROFILE StatRxOverflow;
1231 STAMCOUNTER StatRxOverflowWakeup;
1232 STAMCOUNTER StatTxDescCtxNormal;
1233 STAMCOUNTER StatTxDescCtxTSE;
1234 STAMCOUNTER StatTxDescLegacy;
1235 STAMCOUNTER StatTxDescData;
1236 STAMCOUNTER StatTxDescTSEData;
1237 STAMCOUNTER StatTxPathFallback;
1238 STAMCOUNTER StatTxPathGSO;
1239 STAMCOUNTER StatTxPathRegular;
1240 STAMCOUNTER StatPHYAccesses;
1241 STAMCOUNTER aStatRegWrites[E1K_NUM_OF_REGS];
1242 STAMCOUNTER aStatRegReads[E1K_NUM_OF_REGS];
1243#endif /* VBOX_WITH_STATISTICS */
1244
1245#ifdef E1K_INT_STATS
1246 /* Internal stats */
1247 uint64_t u64ArmedAt;
1248 uint64_t uStatMaxTxDelay;
1249 uint32_t uStatInt;
1250 uint32_t uStatIntTry;
1251 uint32_t uStatIntLower;
1252 uint32_t uStatIntDly;
1253 int32_t iStatIntLost;
1254 int32_t iStatIntLostOne;
1255 uint32_t uStatDisDly;
1256 uint32_t uStatIntSkip;
1257 uint32_t uStatIntLate;
1258 uint32_t uStatIntMasked;
1259 uint32_t uStatIntEarly;
1260 uint32_t uStatIntRx;
1261 uint32_t uStatIntTx;
1262 uint32_t uStatIntICS;
1263 uint32_t uStatIntRDTR;
1264 uint32_t uStatIntRXDMT0;
1265 uint32_t uStatIntTXQE;
1266 uint32_t uStatTxNoRS;
1267 uint32_t uStatTxIDE;
1268 uint32_t uStatTxDelayed;
1269 uint32_t uStatTxDelayExp;
1270 uint32_t uStatTAD;
1271 uint32_t uStatTID;
1272 uint32_t uStatRAD;
1273 uint32_t uStatRID;
1274 uint32_t uStatRxFrm;
1275 uint32_t uStatTxFrm;
1276 uint32_t uStatDescCtx;
1277 uint32_t uStatDescDat;
1278 uint32_t uStatDescLeg;
1279 uint32_t uStatTx1514;
1280 uint32_t uStatTx2962;
1281 uint32_t uStatTx4410;
1282 uint32_t uStatTx5858;
1283 uint32_t uStatTx7306;
1284 uint32_t uStatTx8754;
1285 uint32_t uStatTx16384;
1286 uint32_t uStatTx32768;
1287 uint32_t uStatTxLarge;
1288 uint32_t uStatAlign;
1289#endif /* E1K_INT_STATS */
1290};
1291typedef struct E1kState_st E1KSTATE;
1292/** Pointer to the E1000 device state. */
1293typedef E1KSTATE *PE1KSTATE;
1294
1295#ifndef VBOX_DEVICE_STRUCT_TESTCASE
1296
1297/* Forward declarations ******************************************************/
1298static int e1kXmitPending(PE1KSTATE pThis, bool fOnWorkerThread);
1299
1300static int e1kRegReadUnimplemented (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1301static int e1kRegWriteUnimplemented(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1302static int e1kRegReadAutoClear (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1303static int e1kRegReadDefault (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1304static int e1kRegWriteDefault (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1305#if 0 /* unused */
1306static int e1kRegReadCTRL (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1307#endif
1308static int e1kRegWriteCTRL (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1309static int e1kRegReadEECD (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1310static int e1kRegWriteEECD (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1311static int e1kRegWriteEERD (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1312static int e1kRegWriteMDIC (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1313static int e1kRegReadICR (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1314static int e1kRegWriteICR (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1315static int e1kRegWriteICS (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1316static int e1kRegWriteIMS (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1317static int e1kRegWriteIMC (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1318static int e1kRegWriteRCTL (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1319static int e1kRegWritePBA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1320static int e1kRegWriteRDT (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1321static int e1kRegWriteRDTR (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1322static int e1kRegWriteTDT (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1323static int e1kRegReadMTA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1324static int e1kRegWriteMTA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1325static int e1kRegReadRA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1326static int e1kRegWriteRA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1327static int e1kRegReadVFTA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1328static int e1kRegWriteVFTA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1329
1330/**
1331 * Register map table.
1332 *
1333 * Override pfnRead and pfnWrite to get register-specific behavior.
1334 */
1335static const struct E1kRegMap_st
1336{
1337 /** Register offset in the register space. */
1338 uint32_t offset;
1339 /** Size in bytes. Registers of size > 4 are in fact tables. */
1340 uint32_t size;
1341 /** Readable bits. */
1342 uint32_t readable;
1343 /** Writable bits. */
1344 uint32_t writable;
1345 /** Read callback. */
1346 int (*pfnRead)(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1347 /** Write callback. */
1348 int (*pfnWrite)(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1349 /** Abbreviated name. */
1350 const char *abbrev;
1351 /** Full name. */
1352 const char *name;
1353} g_aE1kRegMap[E1K_NUM_OF_REGS] =
1354{
1355 /* offset size read mask write mask read callback write callback abbrev full name */
1356 /*------- ------- ---------- ---------- ----------------------- ------------------------ ---------- ------------------------------*/
1357 { 0x00000, 0x00004, 0xDBF31BE9, 0xDBF31BE9, e1kRegReadDefault , e1kRegWriteCTRL , "CTRL" , "Device Control" },
1358 { 0x00008, 0x00004, 0x0000FDFF, 0x00000000, e1kRegReadDefault , e1kRegWriteUnimplemented, "STATUS" , "Device Status" },
1359 { 0x00010, 0x00004, 0x000027F0, 0x00000070, e1kRegReadEECD , e1kRegWriteEECD , "EECD" , "EEPROM/Flash Control/Data" },
1360 { 0x00014, 0x00004, 0xFFFFFF10, 0xFFFFFF00, e1kRegReadDefault , e1kRegWriteEERD , "EERD" , "EEPROM Read" },
1361 { 0x00018, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CTRL_EXT", "Extended Device Control" },
1362 { 0x0001c, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FLA" , "Flash Access (N/A)" },
1363 { 0x00020, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteMDIC , "MDIC" , "MDI Control" },
1364 { 0x00028, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCAL" , "Flow Control Address Low" },
1365 { 0x0002c, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCAH" , "Flow Control Address High" },
1366 { 0x00030, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCT" , "Flow Control Type" },
1367 { 0x00038, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "VET" , "VLAN EtherType" },
1368 { 0x000c0, 0x00004, 0x0001F6DF, 0x0001F6DF, e1kRegReadICR , e1kRegWriteICR , "ICR" , "Interrupt Cause Read" },
1369 { 0x000c4, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "ITR" , "Interrupt Throttling" },
1370 { 0x000c8, 0x00004, 0x00000000, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteICS , "ICS" , "Interrupt Cause Set" },
1371 { 0x000d0, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteIMS , "IMS" , "Interrupt Mask Set/Read" },
1372 { 0x000d8, 0x00004, 0x00000000, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteIMC , "IMC" , "Interrupt Mask Clear" },
1373 { 0x00100, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteRCTL , "RCTL" , "Receive Control" },
1374 { 0x00170, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCTTV" , "Flow Control Transmit Timer Value" },
1375 { 0x00178, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TXCW" , "Transmit Configuration Word (N/A)" },
1376 { 0x00180, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXCW" , "Receive Configuration Word (N/A)" },
1377 { 0x00400, 0x00004, 0x017FFFFA, 0x017FFFFA, e1kRegReadDefault , e1kRegWriteDefault , "TCTL" , "Transmit Control" },
1378 { 0x00410, 0x00004, 0x3FFFFFFF, 0x3FFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TIPG" , "Transmit IPG" },
1379 { 0x00458, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "AIFS" , "Adaptive IFS Throttle - AIT" },
1380 { 0x00e00, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "LEDCTL" , "LED Control" },
1381 { 0x01000, 0x00004, 0xFFFF007F, 0x0000007F, e1kRegReadDefault , e1kRegWritePBA , "PBA" , "Packet Buffer Allocation" },
1382 { 0x02160, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRTL" , "Flow Control Receive Threshold Low" },
1383 { 0x02168, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRTH" , "Flow Control Receive Threshold High" },
1384 { 0x02410, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFH" , "Receive Data FIFO Head" },
1385 { 0x02418, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFT" , "Receive Data FIFO Tail" },
1386 { 0x02420, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFHS" , "Receive Data FIFO Head Saved Register" },
1387 { 0x02428, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFTS" , "Receive Data FIFO Tail Saved Register" },
1388 { 0x02430, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFPC" , "Receive Data FIFO Packet Count" },
1389 { 0x02800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDBAL" , "Receive Descriptor Base Low" },
1390 { 0x02804, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDBAH" , "Receive Descriptor Base High" },
1391 { 0x02808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDLEN" , "Receive Descriptor Length" },
1392 { 0x02810, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDH" , "Receive Descriptor Head" },
1393 { 0x02818, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteRDT , "RDT" , "Receive Descriptor Tail" },
1394 { 0x02820, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteRDTR , "RDTR" , "Receive Delay Timer" },
1395 { 0x02828, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXDCTL" , "Receive Descriptor Control" },
1396 { 0x0282c, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "RADV" , "Receive Interrupt Absolute Delay Timer" },
1397 { 0x02c00, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RSRPD" , "Receive Small Packet Detect Interrupt" },
1398 { 0x03000, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TXDMAC" , "TX DMA Control (N/A)" },
1399 { 0x03410, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFH" , "Transmit Data FIFO Head" },
1400 { 0x03418, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFT" , "Transmit Data FIFO Tail" },
1401 { 0x03420, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFHS" , "Transmit Data FIFO Head Saved Register" },
1402 { 0x03428, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFTS" , "Transmit Data FIFO Tail Saved Register" },
1403 { 0x03430, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFPC" , "Transmit Data FIFO Packet Count" },
1404 { 0x03800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDBAL" , "Transmit Descriptor Base Low" },
1405 { 0x03804, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDBAH" , "Transmit Descriptor Base High" },
1406 { 0x03808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDLEN" , "Transmit Descriptor Length" },
1407 { 0x03810, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDH" , "Transmit Descriptor Head" },
1408 { 0x03818, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteTDT , "TDT" , "Transmit Descriptor Tail" },
1409 { 0x03820, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TIDV" , "Transmit Interrupt Delay Value" },
1410 { 0x03828, 0x00004, 0xFF3F3F3F, 0xFF3F3F3F, e1kRegReadDefault , e1kRegWriteDefault , "TXDCTL" , "Transmit Descriptor Control" },
1411 { 0x0382c, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TADV" , "Transmit Absolute Interrupt Delay Timer" },
1412 { 0x03830, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TSPMT" , "TCP Segmentation Pad and Threshold" },
1413 { 0x04000, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CRCERRS" , "CRC Error Count" },
1414 { 0x04004, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "ALGNERRC", "Alignment Error Count" },
1415 { 0x04008, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SYMERRS" , "Symbol Error Count" },
1416 { 0x0400c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXERRC" , "RX Error Count" },
1417 { 0x04010, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MPC" , "Missed Packets Count" },
1418 { 0x04014, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SCC" , "Single Collision Count" },
1419 { 0x04018, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "ECOL" , "Excessive Collisions Count" },
1420 { 0x0401c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MCC" , "Multiple Collision Count" },
1421 { 0x04020, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "LATECOL" , "Late Collisions Count" },
1422 { 0x04028, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "COLC" , "Collision Count" },
1423 { 0x04030, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "DC" , "Defer Count" },
1424 { 0x04034, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TNCRS" , "Transmit - No CRS" },
1425 { 0x04038, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SEC" , "Sequence Error Count" },
1426 { 0x0403c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CEXTERR" , "Carrier Extension Error Count" },
1427 { 0x04040, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RLEC" , "Receive Length Error Count" },
1428 { 0x04048, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XONRXC" , "XON Received Count" },
1429 { 0x0404c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XONTXC" , "XON Transmitted Count" },
1430 { 0x04050, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XOFFRXC" , "XOFF Received Count" },
1431 { 0x04054, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XOFFTXC" , "XOFF Transmitted Count" },
1432 { 0x04058, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRUC" , "FC Received Unsupported Count" },
1433 { 0x0405c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC64" , "Packets Received (64 Bytes) Count" },
1434 { 0x04060, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC127" , "Packets Received (65-127 Bytes) Count" },
1435 { 0x04064, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC255" , "Packets Received (128-255 Bytes) Count" },
1436 { 0x04068, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC511" , "Packets Received (256-511 Bytes) Count" },
1437 { 0x0406c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC1023" , "Packets Received (512-1023 Bytes) Count" },
1438 { 0x04070, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC1522" , "Packets Received (1024-Max Bytes)" },
1439 { 0x04074, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GPRC" , "Good Packets Received Count" },
1440 { 0x04078, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "BPRC" , "Broadcast Packets Received Count" },
1441 { 0x0407c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "MPRC" , "Multicast Packets Received Count" },
1442 { 0x04080, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GPTC" , "Good Packets Transmitted Count" },
1443 { 0x04088, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GORCL" , "Good Octets Received Count (Low)" },
1444 { 0x0408c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GORCH" , "Good Octets Received Count (Hi)" },
1445 { 0x04090, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GOTCL" , "Good Octets Transmitted Count (Low)" },
1446 { 0x04094, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GOTCH" , "Good Octets Transmitted Count (Hi)" },
1447 { 0x040a0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RNBC" , "Receive No Buffers Count" },
1448 { 0x040a4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RUC" , "Receive Undersize Count" },
1449 { 0x040a8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RFC" , "Receive Fragment Count" },
1450 { 0x040ac, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "ROC" , "Receive Oversize Count" },
1451 { 0x040b0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RJC" , "Receive Jabber Count" },
1452 { 0x040b4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPRC" , "Management Packets Received Count" },
1453 { 0x040b8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPDC" , "Management Packets Dropped Count" },
1454 { 0x040bc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPTC" , "Management Pkts Transmitted Count" },
1455 { 0x040c0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TORL" , "Total Octets Received (Lo)" },
1456 { 0x040c4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TORH" , "Total Octets Received (Hi)" },
1457 { 0x040c8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TOTL" , "Total Octets Transmitted (Lo)" },
1458 { 0x040cc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TOTH" , "Total Octets Transmitted (Hi)" },
1459 { 0x040d0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TPR" , "Total Packets Received" },
1460 { 0x040d4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TPT" , "Total Packets Transmitted" },
1461 { 0x040d8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC64" , "Packets Transmitted (64 Bytes) Count" },
1462 { 0x040dc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC127" , "Packets Transmitted (65-127 Bytes) Count" },
1463 { 0x040e0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC255" , "Packets Transmitted (128-255 Bytes) Count" },
1464 { 0x040e4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC511" , "Packets Transmitted (256-511 Bytes) Count" },
1465 { 0x040e8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC1023" , "Packets Transmitted (512-1023 Bytes) Count" },
1466 { 0x040ec, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC1522" , "Packets Transmitted (1024 Bytes or Greater) Count" },
1467 { 0x040f0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "MPTC" , "Multicast Packets Transmitted Count" },
1468 { 0x040f4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "BPTC" , "Broadcast Packets Transmitted Count" },
1469 { 0x040f8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TSCTC" , "TCP Segmentation Context Transmitted Count" },
1470 { 0x040fc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TSCTFC" , "TCP Segmentation Context Tx Fail Count" },
1471 { 0x05000, 0x00004, 0x000007FF, 0x000007FF, e1kRegReadDefault , e1kRegWriteDefault , "RXCSUM" , "Receive Checksum Control" },
1472 { 0x05800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUC" , "Wakeup Control" },
1473 { 0x05808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUFC" , "Wakeup Filter Control" },
1474 { 0x05810, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUS" , "Wakeup Status" },
1475 { 0x05820, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "MANC" , "Management Control" },
1476 { 0x05838, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IPAV" , "IP Address Valid" },
1477 { 0x05900, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUPL" , "Wakeup Packet Length" },
1478 { 0x05200, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadMTA , e1kRegWriteMTA , "MTA" , "Multicast Table Array (n)" },
1479 { 0x05400, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadRA , e1kRegWriteRA , "RA" , "Receive Address (64-bit) (n)" },
1480 { 0x05600, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadVFTA , e1kRegWriteVFTA , "VFTA" , "VLAN Filter Table Array (n)" },
1481 { 0x05840, 0x0001c, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IP4AT" , "IPv4 Address Table" },
1482 { 0x05880, 0x00010, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IP6AT" , "IPv6 Address Table" },
1483 { 0x05a00, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUPM" , "Wakeup Packet Memory" },
1484 { 0x05f00, 0x0001c, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFLT" , "Flexible Filter Length Table" },
1485 { 0x09000, 0x003fc, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFMT" , "Flexible Filter Mask Table" },
1486 { 0x09800, 0x003fc, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFVT" , "Flexible Filter Value Table" },
1487 { 0x10000, 0x10000, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "PBM" , "Packet Buffer Memory (n)" },
1488 { 0x00040, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadRA , e1kRegWriteRA , "RA82542" , "Receive Address (64-bit) (n) (82542)" },
1489 { 0x00200, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadMTA , e1kRegWriteMTA , "MTA82542", "Multicast Table Array (n) (82542)" },
1490 { 0x00600, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadVFTA , e1kRegWriteVFTA , "VFTA82542", "VLAN Filter Table Array (n) (82542)" }
1491};
1492
1493#ifdef LOG_ENABLED
1494
1495/**
1496 * Convert U32 value to hex string. Masked bytes are replaced with dots.
1497 *
1498 * @remarks The mask has byte (not bit) granularity (e.g. 000000FF).
1499 *
1500 * @returns The buffer.
1501 *
1502 * @param u32 The word to convert into string.
1503 * @param mask Selects which bytes to convert.
1504 * @param buf Where to put the result.
1505 */
1506static char *e1kU32toHex(uint32_t u32, uint32_t mask, char *buf)
1507{
1508 for (char *ptr = buf + 7; ptr >= buf; --ptr, u32 >>=4, mask >>=4)
1509 {
1510 if (mask & 0xF)
1511 *ptr = (u32 & 0xF) + ((u32 & 0xF) > 9 ? '7' : '0');
1512 else
1513 *ptr = '.';
1514 }
1515 buf[8] = 0;
1516 return buf;
1517}
1518
1519/**
1520 * Returns timer name for debug purposes.
1521 *
1522 * @returns The timer name.
1523 *
1524 * @param pThis The device state structure.
1525 * @param pTimer The timer to get the name for.
1526 */
1527DECLINLINE(const char *) e1kGetTimerName(PE1KSTATE pThis, PTMTIMER pTimer)
1528{
1529 if (pTimer == pThis->CTX_SUFF(pTIDTimer))
1530 return "TID";
1531 if (pTimer == pThis->CTX_SUFF(pTADTimer))
1532 return "TAD";
1533 if (pTimer == pThis->CTX_SUFF(pRIDTimer))
1534 return "RID";
1535 if (pTimer == pThis->CTX_SUFF(pRADTimer))
1536 return "RAD";
1537 if (pTimer == pThis->CTX_SUFF(pIntTimer))
1538 return "Int";
1539 if (pTimer == pThis->CTX_SUFF(pTXDTimer))
1540 return "TXD";
1541 if (pTimer == pThis->CTX_SUFF(pLUTimer))
1542 return "LinkUp";
1543 return "unknown";
1544}
1545
1546#endif /* DEBUG */
1547
1548/**
1549 * Arm a timer.
1550 *
1551 * @param pThis Pointer to the device state structure.
1552 * @param pTimer Pointer to the timer.
1553 * @param uExpireIn Expiration interval in microseconds.
1554 */
1555DECLINLINE(void) e1kArmTimer(PE1KSTATE pThis, PTMTIMER pTimer, uint32_t uExpireIn)
1556{
1557 if (pThis->fLocked)
1558 return;
1559
1560 E1kLog2(("%s Arming %s timer to fire in %d usec...\n",
1561 pThis->szPrf, e1kGetTimerName(pThis, pTimer), uExpireIn));
1562 TMTimerSetMicro(pTimer, uExpireIn);
1563}
1564
1565#ifdef IN_RING3
1566/**
1567 * Cancel a timer.
1568 *
1569 * @param pThis Pointer to the device state structure.
1570 * @param pTimer Pointer to the timer.
1571 */
1572DECLINLINE(void) e1kCancelTimer(PE1KSTATE pThis, PTMTIMER pTimer)
1573{
1574 E1kLog2(("%s Stopping %s timer...\n",
1575 pThis->szPrf, e1kGetTimerName(pThis, pTimer)));
1576 int rc = TMTimerStop(pTimer);
1577 if (RT_FAILURE(rc))
1578 E1kLog2(("%s e1kCancelTimer: TMTimerStop() failed with %Rrc\n",
1579 pThis->szPrf, rc));
1580 RT_NOREF1(pThis);
1581}
1582#endif /* IN_RING3 */
1583
1584#define e1kCsEnter(ps, rc) PDMCritSectEnter(&ps->cs, rc)
1585#define e1kCsLeave(ps) PDMCritSectLeave(&ps->cs)
1586
1587#define e1kCsRxEnter(ps, rc) PDMCritSectEnter(&ps->csRx, rc)
1588#define e1kCsRxLeave(ps) PDMCritSectLeave(&ps->csRx)
1589#define e1kCsRxIsOwner(ps) PDMCritSectIsOwner(&ps->csRx)
1590
1591#ifndef E1K_WITH_TX_CS
1592# define e1kCsTxEnter(ps, rc) VINF_SUCCESS
1593# define e1kCsTxLeave(ps) do { } while (0)
1594#else /* E1K_WITH_TX_CS */
1595# define e1kCsTxEnter(ps, rc) PDMCritSectEnter(&ps->csTx, rc)
1596# define e1kCsTxLeave(ps) PDMCritSectLeave(&ps->csTx)
1597#endif /* E1K_WITH_TX_CS */
1598
1599#ifdef IN_RING3
1600
1601/**
1602 * Wakeup the RX thread.
1603 */
1604static void e1kWakeupReceive(PPDMDEVINS pDevIns)
1605{
1606 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
1607 if ( pThis->fMaybeOutOfSpace
1608 && pThis->hEventMoreRxDescAvail != NIL_RTSEMEVENT)
1609 {
1610 STAM_COUNTER_INC(&pThis->StatRxOverflowWakeup);
1611 E1kLog(("%s Waking up Out-of-RX-space semaphore\n", pThis->szPrf));
1612 RTSemEventSignal(pThis->hEventMoreRxDescAvail);
1613 }
1614}
1615
1616/**
1617 * Hardware reset. Revert all registers to initial values.
1618 *
1619 * @param pThis The device state structure.
1620 */
1621static void e1kHardReset(PE1KSTATE pThis)
1622{
1623 E1kLog(("%s Hard reset triggered\n", pThis->szPrf));
1624 memset(pThis->auRegs, 0, sizeof(pThis->auRegs));
1625 memset(pThis->aRecAddr.au32, 0, sizeof(pThis->aRecAddr.au32));
1626#ifdef E1K_INIT_RA0
1627 memcpy(pThis->aRecAddr.au32, pThis->macConfigured.au8,
1628 sizeof(pThis->macConfigured.au8));
1629 pThis->aRecAddr.array[0].ctl |= RA_CTL_AV;
1630#endif /* E1K_INIT_RA0 */
1631 STATUS = 0x0081; /* SPEED=10b (1000 Mb/s), FD=1b (Full Duplex) */
1632 EECD = 0x0100; /* EE_PRES=1b (EEPROM present) */
1633 CTRL = 0x0a09; /* FRCSPD=1b SPEED=10b LRST=1b FD=1b */
1634 TSPMT = 0x01000400;/* TSMT=0400h TSPBP=0100h */
1635 Assert(GET_BITS(RCTL, BSIZE) == 0);
1636 pThis->u16RxBSize = 2048;
1637
1638 /* Reset promiscuous mode */
1639 if (pThis->pDrvR3)
1640 pThis->pDrvR3->pfnSetPromiscuousMode(pThis->pDrvR3, false);
1641
1642#ifdef E1K_WITH_TXD_CACHE
1643 int rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
1644 if (RT_LIKELY(rc == VINF_SUCCESS))
1645 {
1646 pThis->nTxDFetched = 0;
1647 pThis->iTxDCurrent = 0;
1648 pThis->fGSO = false;
1649 pThis->cbTxAlloc = 0;
1650 e1kCsTxLeave(pThis);
1651 }
1652#endif /* E1K_WITH_TXD_CACHE */
1653#ifdef E1K_WITH_RXD_CACHE
1654 if (RT_LIKELY(e1kCsRxEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
1655 {
1656 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
1657 e1kCsRxLeave(pThis);
1658 }
1659#endif /* E1K_WITH_RXD_CACHE */
1660}
1661
1662#endif /* IN_RING3 */
1663
1664/**
1665 * Compute Internet checksum.
1666 *
1667 * @remarks Refer to http://www.netfor2.com/checksum.html for short intro.
1668 *
1669 * @param pThis The device state structure.
1670 * @param cpPacket The packet.
1671 * @param cb The size of the packet.
1672 * @param pszText A string denoting direction of packet transfer.
1673 *
1674 * @return The 1's complement of the 1's complement sum.
1675 *
1676 * @thread E1000_TX
1677 */
1678static uint16_t e1kCSum16(const void *pvBuf, size_t cb)
1679{
1680 uint32_t csum = 0;
1681 uint16_t *pu16 = (uint16_t *)pvBuf;
1682
1683 while (cb > 1)
1684 {
1685 csum += *pu16++;
1686 cb -= 2;
1687 }
1688 if (cb)
1689 csum += *(uint8_t*)pu16;
1690 while (csum >> 16)
1691 csum = (csum >> 16) + (csum & 0xFFFF);
1692 return ~csum;
1693}
1694
1695/**
1696 * Dump a packet to debug log.
1697 *
1698 * @param pThis The device state structure.
1699 * @param cpPacket The packet.
1700 * @param cb The size of the packet.
1701 * @param pszText A string denoting direction of packet transfer.
1702 * @thread E1000_TX
1703 */
1704DECLINLINE(void) e1kPacketDump(PE1KSTATE pThis, const uint8_t *cpPacket, size_t cb, const char *pszText)
1705{
1706#ifdef DEBUG
1707 if (RT_LIKELY(e1kCsEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
1708 {
1709 Log4(("%s --- %s packet #%d: %RTmac => %RTmac (%d bytes) ---\n",
1710 pThis->szPrf, pszText, ++pThis->u32PktNo, cpPacket+6, cpPacket, cb));
1711 if (ntohs(*(uint16_t*)(cpPacket+12)) == 0x86DD)
1712 {
1713 Log4(("%s --- IPv6: %RTnaipv6 => %RTnaipv6\n",
1714 pThis->szPrf, cpPacket+14+8, cpPacket+14+24));
1715 if (*(cpPacket+14+6) == 0x6)
1716 Log4(("%s --- TCP: seq=%x ack=%x\n", pThis->szPrf,
1717 ntohl(*(uint32_t*)(cpPacket+14+40+4)), ntohl(*(uint32_t*)(cpPacket+14+40+8))));
1718 }
1719 else if (ntohs(*(uint16_t*)(cpPacket+12)) == 0x800)
1720 {
1721 Log4(("%s --- IPv4: %RTnaipv4 => %RTnaipv4\n",
1722 pThis->szPrf, *(uint32_t*)(cpPacket+14+12), *(uint32_t*)(cpPacket+14+16)));
1723 if (*(cpPacket+14+6) == 0x6)
1724 Log4(("%s --- TCP: seq=%x ack=%x\n", pThis->szPrf,
1725 ntohl(*(uint32_t*)(cpPacket+14+20+4)), ntohl(*(uint32_t*)(cpPacket+14+20+8))));
1726 }
1727 E1kLog3(("%.*Rhxd\n", cb, cpPacket));
1728 e1kCsLeave(pThis);
1729 }
1730#else
1731 if (RT_LIKELY(e1kCsEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
1732 {
1733 if (ntohs(*(uint16_t*)(cpPacket+12)) == 0x86DD)
1734 E1kLogRel(("E1000: %s packet #%d, %RTmac => %RTmac, %RTnaipv6 => %RTnaipv6, seq=%x ack=%x\n",
1735 pszText, ++pThis->u32PktNo, cpPacket+6, cpPacket, cpPacket+14+8, cpPacket+14+24,
1736 ntohl(*(uint32_t*)(cpPacket+14+40+4)), ntohl(*(uint32_t*)(cpPacket+14+40+8))));
1737 else
1738 E1kLogRel(("E1000: %s packet #%d, %RTmac => %RTmac, %RTnaipv4 => %RTnaipv4, seq=%x ack=%x\n",
1739 pszText, ++pThis->u32PktNo, cpPacket+6, cpPacket,
1740 *(uint32_t*)(cpPacket+14+12), *(uint32_t*)(cpPacket+14+16),
1741 ntohl(*(uint32_t*)(cpPacket+14+20+4)), ntohl(*(uint32_t*)(cpPacket+14+20+8))));
1742 e1kCsLeave(pThis);
1743 }
1744 RT_NOREF2(cb, pszText);
1745#endif
1746}
1747
1748/**
1749 * Determine the type of transmit descriptor.
1750 *
1751 * @returns Descriptor type. See E1K_DTYP_XXX defines.
1752 *
1753 * @param pDesc Pointer to descriptor union.
1754 * @thread E1000_TX
1755 */
1756DECLINLINE(int) e1kGetDescType(E1KTXDESC *pDesc)
1757{
1758 if (pDesc->legacy.cmd.fDEXT)
1759 return pDesc->context.dw2.u4DTYP;
1760 return E1K_DTYP_LEGACY;
1761}
1762
1763
1764#if defined(E1K_WITH_RXD_CACHE) && defined(IN_RING3) /* currently only used in ring-3 due to stack space requirements of the caller */
1765/**
1766 * Dump receive descriptor to debug log.
1767 *
1768 * @param pThis The device state structure.
1769 * @param pDesc Pointer to the descriptor.
1770 * @thread E1000_RX
1771 */
1772static void e1kPrintRDesc(PE1KSTATE pThis, E1KRXDESC *pDesc)
1773{
1774 RT_NOREF2(pThis, pDesc);
1775 E1kLog2(("%s <-- Receive Descriptor (%d bytes):\n", pThis->szPrf, pDesc->u16Length));
1776 E1kLog2((" Address=%16LX Length=%04X Csum=%04X\n",
1777 pDesc->u64BufAddr, pDesc->u16Length, pDesc->u16Checksum));
1778 E1kLog2((" STA: %s %s %s %s %s %s %s ERR: %s %s %s %s SPECIAL: %s VLAN=%03x PRI=%x\n",
1779 pDesc->status.fPIF ? "PIF" : "pif",
1780 pDesc->status.fIPCS ? "IPCS" : "ipcs",
1781 pDesc->status.fTCPCS ? "TCPCS" : "tcpcs",
1782 pDesc->status.fVP ? "VP" : "vp",
1783 pDesc->status.fIXSM ? "IXSM" : "ixsm",
1784 pDesc->status.fEOP ? "EOP" : "eop",
1785 pDesc->status.fDD ? "DD" : "dd",
1786 pDesc->status.fRXE ? "RXE" : "rxe",
1787 pDesc->status.fIPE ? "IPE" : "ipe",
1788 pDesc->status.fTCPE ? "TCPE" : "tcpe",
1789 pDesc->status.fCE ? "CE" : "ce",
1790 E1K_SPEC_CFI(pDesc->status.u16Special) ? "CFI" :"cfi",
1791 E1K_SPEC_VLAN(pDesc->status.u16Special),
1792 E1K_SPEC_PRI(pDesc->status.u16Special)));
1793}
1794#endif /* E1K_WITH_RXD_CACHE && IN_RING3 */
1795
1796/**
1797 * Dump transmit descriptor to debug log.
1798 *
1799 * @param pThis The device state structure.
1800 * @param pDesc Pointer to descriptor union.
1801 * @param pszDir A string denoting direction of descriptor transfer
1802 * @thread E1000_TX
1803 */
1804static void e1kPrintTDesc(PE1KSTATE pThis, E1KTXDESC *pDesc, const char *pszDir,
1805 unsigned uLevel = RTLOGGRPFLAGS_LEVEL_2)
1806{
1807 RT_NOREF4(pThis, pDesc, pszDir, uLevel);
1808
1809 /*
1810 * Unfortunately we cannot use our format handler here, we want R0 logging
1811 * as well.
1812 */
1813 switch (e1kGetDescType(pDesc))
1814 {
1815 case E1K_DTYP_CONTEXT:
1816 E1kLogX(uLevel, ("%s %s Context Transmit Descriptor %s\n",
1817 pThis->szPrf, pszDir, pszDir));
1818 E1kLogX(uLevel, (" IPCSS=%02X IPCSO=%02X IPCSE=%04X TUCSS=%02X TUCSO=%02X TUCSE=%04X\n",
1819 pDesc->context.ip.u8CSS, pDesc->context.ip.u8CSO, pDesc->context.ip.u16CSE,
1820 pDesc->context.tu.u8CSS, pDesc->context.tu.u8CSO, pDesc->context.tu.u16CSE));
1821 E1kLogX(uLevel, (" TUCMD:%s%s%s %s %s PAYLEN=%04x HDRLEN=%04x MSS=%04x STA: %s\n",
1822 pDesc->context.dw2.fIDE ? " IDE":"",
1823 pDesc->context.dw2.fRS ? " RS" :"",
1824 pDesc->context.dw2.fTSE ? " TSE":"",
1825 pDesc->context.dw2.fIP ? "IPv4":"IPv6",
1826 pDesc->context.dw2.fTCP ? "TCP":"UDP",
1827 pDesc->context.dw2.u20PAYLEN,
1828 pDesc->context.dw3.u8HDRLEN,
1829 pDesc->context.dw3.u16MSS,
1830 pDesc->context.dw3.fDD?"DD":""));
1831 break;
1832 case E1K_DTYP_DATA:
1833 E1kLogX(uLevel, ("%s %s Data Transmit Descriptor (%d bytes) %s\n",
1834 pThis->szPrf, pszDir, pDesc->data.cmd.u20DTALEN, pszDir));
1835 E1kLogX(uLevel, (" Address=%16LX DTALEN=%05X\n",
1836 pDesc->data.u64BufAddr,
1837 pDesc->data.cmd.u20DTALEN));
1838 E1kLogX(uLevel, (" DCMD:%s%s%s%s%s%s%s STA:%s%s%s POPTS:%s%s SPECIAL:%s VLAN=%03x PRI=%x\n",
1839 pDesc->data.cmd.fIDE ? " IDE" :"",
1840 pDesc->data.cmd.fVLE ? " VLE" :"",
1841 pDesc->data.cmd.fRPS ? " RPS" :"",
1842 pDesc->data.cmd.fRS ? " RS" :"",
1843 pDesc->data.cmd.fTSE ? " TSE" :"",
1844 pDesc->data.cmd.fIFCS? " IFCS":"",
1845 pDesc->data.cmd.fEOP ? " EOP" :"",
1846 pDesc->data.dw3.fDD ? " DD" :"",
1847 pDesc->data.dw3.fEC ? " EC" :"",
1848 pDesc->data.dw3.fLC ? " LC" :"",
1849 pDesc->data.dw3.fTXSM? " TXSM":"",
1850 pDesc->data.dw3.fIXSM? " IXSM":"",
1851 E1K_SPEC_CFI(pDesc->data.dw3.u16Special) ? "CFI" :"cfi",
1852 E1K_SPEC_VLAN(pDesc->data.dw3.u16Special),
1853 E1K_SPEC_PRI(pDesc->data.dw3.u16Special)));
1854 break;
1855 case E1K_DTYP_LEGACY:
1856 E1kLogX(uLevel, ("%s %s Legacy Transmit Descriptor (%d bytes) %s\n",
1857 pThis->szPrf, pszDir, pDesc->legacy.cmd.u16Length, pszDir));
1858 E1kLogX(uLevel, (" Address=%16LX DTALEN=%05X\n",
1859 pDesc->data.u64BufAddr,
1860 pDesc->legacy.cmd.u16Length));
1861 E1kLogX(uLevel, (" CMD:%s%s%s%s%s%s%s STA:%s%s%s CSO=%02x CSS=%02x SPECIAL:%s VLAN=%03x PRI=%x\n",
1862 pDesc->legacy.cmd.fIDE ? " IDE" :"",
1863 pDesc->legacy.cmd.fVLE ? " VLE" :"",
1864 pDesc->legacy.cmd.fRPS ? " RPS" :"",
1865 pDesc->legacy.cmd.fRS ? " RS" :"",
1866 pDesc->legacy.cmd.fIC ? " IC" :"",
1867 pDesc->legacy.cmd.fIFCS? " IFCS":"",
1868 pDesc->legacy.cmd.fEOP ? " EOP" :"",
1869 pDesc->legacy.dw3.fDD ? " DD" :"",
1870 pDesc->legacy.dw3.fEC ? " EC" :"",
1871 pDesc->legacy.dw3.fLC ? " LC" :"",
1872 pDesc->legacy.cmd.u8CSO,
1873 pDesc->legacy.dw3.u8CSS,
1874 E1K_SPEC_CFI(pDesc->legacy.dw3.u16Special) ? "CFI" :"cfi",
1875 E1K_SPEC_VLAN(pDesc->legacy.dw3.u16Special),
1876 E1K_SPEC_PRI(pDesc->legacy.dw3.u16Special)));
1877 break;
1878 default:
1879 E1kLog(("%s %s Invalid Transmit Descriptor %s\n",
1880 pThis->szPrf, pszDir, pszDir));
1881 break;
1882 }
1883}
1884
1885/**
1886 * Raise an interrupt later.
1887 *
1888 * @param pThis The device state structure.
1889 */
1890inline void e1kPostponeInterrupt(PE1KSTATE pThis, uint64_t uNanoseconds)
1891{
1892 if (!TMTimerIsActive(pThis->CTX_SUFF(pIntTimer)))
1893 TMTimerSetNano(pThis->CTX_SUFF(pIntTimer), uNanoseconds);
1894}
1895
1896/**
1897 * Raise interrupt if not masked.
1898 *
1899 * @param pThis The device state structure.
1900 */
1901static int e1kRaiseInterrupt(PE1KSTATE pThis, int rcBusy, uint32_t u32IntCause = 0)
1902{
1903 int rc = e1kCsEnter(pThis, rcBusy);
1904 if (RT_UNLIKELY(rc != VINF_SUCCESS))
1905 return rc;
1906
1907 E1K_INC_ISTAT_CNT(pThis->uStatIntTry);
1908 ICR |= u32IntCause;
1909 if (ICR & IMS)
1910 {
1911 if (pThis->fIntRaised)
1912 {
1913 E1K_INC_ISTAT_CNT(pThis->uStatIntSkip);
1914 E1kLog2(("%s e1kRaiseInterrupt: Already raised, skipped. ICR&IMS=%08x\n",
1915 pThis->szPrf, ICR & IMS));
1916 }
1917 else
1918 {
1919 uint64_t tsNow = TMTimerGet(pThis->CTX_SUFF(pIntTimer));
1920 if (!!ITR && tsNow - pThis->u64AckedAt < ITR * 256
1921 && pThis->fItrEnabled && (pThis->fItrRxEnabled || !(ICR & ICR_RXT0)))
1922 {
1923 E1K_INC_ISTAT_CNT(pThis->uStatIntEarly);
1924 E1kLog2(("%s e1kRaiseInterrupt: Too early to raise again: %d ns < %d ns.\n",
1925 pThis->szPrf, (uint32_t)(tsNow - pThis->u64AckedAt), ITR * 256));
1926 e1kPostponeInterrupt(pThis, ITR * 256);
1927 }
1928 else
1929 {
1930
1931 /* Since we are delivering the interrupt now
1932 * there is no need to do it later -- stop the timer.
1933 */
1934 TMTimerStop(pThis->CTX_SUFF(pIntTimer));
1935 E1K_INC_ISTAT_CNT(pThis->uStatInt);
1936 STAM_COUNTER_INC(&pThis->StatIntsRaised);
1937 /* Got at least one unmasked interrupt cause */
1938 pThis->fIntRaised = true;
1939 /* Raise(1) INTA(0) */
1940 E1kLogRel(("E1000: irq RAISED icr&mask=0x%x, icr=0x%x\n", ICR & IMS, ICR));
1941 PDMDevHlpPCISetIrq(pThis->CTX_SUFF(pDevIns), 0, 1);
1942 E1kLog(("%s e1kRaiseInterrupt: Raised. ICR&IMS=%08x\n",
1943 pThis->szPrf, ICR & IMS));
1944 }
1945 }
1946 }
1947 else
1948 {
1949 E1K_INC_ISTAT_CNT(pThis->uStatIntMasked);
1950 E1kLog2(("%s e1kRaiseInterrupt: Not raising, ICR=%08x, IMS=%08x\n",
1951 pThis->szPrf, ICR, IMS));
1952 }
1953 e1kCsLeave(pThis);
1954 return VINF_SUCCESS;
1955}
1956
1957/**
1958 * Compute the physical address of the descriptor.
1959 *
1960 * @returns the physical address of the descriptor.
1961 *
1962 * @param baseHigh High-order 32 bits of descriptor table address.
1963 * @param baseLow Low-order 32 bits of descriptor table address.
1964 * @param idxDesc The descriptor index in the table.
1965 */
1966DECLINLINE(RTGCPHYS) e1kDescAddr(uint32_t baseHigh, uint32_t baseLow, uint32_t idxDesc)
1967{
1968 AssertCompile(sizeof(E1KRXDESC) == sizeof(E1KTXDESC));
1969 return ((uint64_t)baseHigh << 32) + baseLow + idxDesc * sizeof(E1KRXDESC);
1970}
1971
1972#ifdef IN_RING3 /* currently only used in ring-3 due to stack space requirements of the caller */
1973/**
1974 * Advance the head pointer of the receive descriptor queue.
1975 *
1976 * @remarks RDH always points to the next available RX descriptor.
1977 *
1978 * @param pThis The device state structure.
1979 */
1980DECLINLINE(void) e1kAdvanceRDH(PE1KSTATE pThis)
1981{
1982 Assert(e1kCsRxIsOwner(pThis));
1983 //e1kCsEnter(pThis, RT_SRC_POS);
1984 if (++RDH * sizeof(E1KRXDESC) >= RDLEN)
1985 RDH = 0;
1986 /*
1987 * Compute current receive queue length and fire RXDMT0 interrupt
1988 * if we are low on receive buffers
1989 */
1990 uint32_t uRQueueLen = RDH>RDT ? RDLEN/sizeof(E1KRXDESC)-RDH+RDT : RDT-RDH;
1991 /*
1992 * The minimum threshold is controlled by RDMTS bits of RCTL:
1993 * 00 = 1/2 of RDLEN
1994 * 01 = 1/4 of RDLEN
1995 * 10 = 1/8 of RDLEN
1996 * 11 = reserved
1997 */
1998 uint32_t uMinRQThreshold = RDLEN / sizeof(E1KRXDESC) / (2 << GET_BITS(RCTL, RDMTS));
1999 if (uRQueueLen <= uMinRQThreshold)
2000 {
2001 E1kLogRel(("E1000: low on RX descriptors, RDH=%x RDT=%x len=%x threshold=%x\n", RDH, RDT, uRQueueLen, uMinRQThreshold));
2002 E1kLog2(("%s Low on RX descriptors, RDH=%x RDT=%x len=%x threshold=%x, raise an interrupt\n",
2003 pThis->szPrf, RDH, RDT, uRQueueLen, uMinRQThreshold));
2004 E1K_INC_ISTAT_CNT(pThis->uStatIntRXDMT0);
2005 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_RXDMT0);
2006 }
2007 E1kLog2(("%s e1kAdvanceRDH: at exit RDH=%x RDT=%x len=%x\n",
2008 pThis->szPrf, RDH, RDT, uRQueueLen));
2009 //e1kCsLeave(pThis);
2010}
2011#endif /* IN_RING3 */
2012
2013#ifdef E1K_WITH_RXD_CACHE
2014
2015/**
2016 * Return the number of RX descriptor that belong to the hardware.
2017 *
2018 * @returns the number of available descriptors in RX ring.
2019 * @param pThis The device state structure.
2020 * @thread ???
2021 */
2022DECLINLINE(uint32_t) e1kGetRxLen(PE1KSTATE pThis)
2023{
2024 /**
2025 * Make sure RDT won't change during computation. EMT may modify RDT at
2026 * any moment.
2027 */
2028 uint32_t rdt = RDT;
2029 return (RDH > rdt ? RDLEN/sizeof(E1KRXDESC) : 0) + rdt - RDH;
2030}
2031
2032DECLINLINE(unsigned) e1kRxDInCache(PE1KSTATE pThis)
2033{
2034 return pThis->nRxDFetched > pThis->iRxDCurrent ?
2035 pThis->nRxDFetched - pThis->iRxDCurrent : 0;
2036}
2037
2038DECLINLINE(unsigned) e1kRxDIsCacheEmpty(PE1KSTATE pThis)
2039{
2040 return pThis->iRxDCurrent >= pThis->nRxDFetched;
2041}
2042
2043/**
2044 * Load receive descriptors from guest memory. The caller needs to be in Rx
2045 * critical section.
2046 *
2047 * We need two physical reads in case the tail wrapped around the end of RX
2048 * descriptor ring.
2049 *
2050 * @returns the actual number of descriptors fetched.
2051 * @param pThis The device state structure.
2052 * @param pDesc Pointer to descriptor union.
2053 * @param addr Physical address in guest context.
2054 * @thread EMT, RX
2055 */
2056DECLINLINE(unsigned) e1kRxDPrefetch(PE1KSTATE pThis)
2057{
2058 /* We've already loaded pThis->nRxDFetched descriptors past RDH. */
2059 unsigned nDescsAvailable = e1kGetRxLen(pThis) - e1kRxDInCache(pThis);
2060 unsigned nDescsToFetch = RT_MIN(nDescsAvailable, E1K_RXD_CACHE_SIZE - pThis->nRxDFetched);
2061 unsigned nDescsTotal = RDLEN / sizeof(E1KRXDESC);
2062 Assert(nDescsTotal != 0);
2063 if (nDescsTotal == 0)
2064 return 0;
2065 unsigned nFirstNotLoaded = (RDH + e1kRxDInCache(pThis)) % nDescsTotal;
2066 unsigned nDescsInSingleRead = RT_MIN(nDescsToFetch, nDescsTotal - nFirstNotLoaded);
2067 E1kLog3(("%s e1kRxDPrefetch: nDescsAvailable=%u nDescsToFetch=%u "
2068 "nDescsTotal=%u nFirstNotLoaded=0x%x nDescsInSingleRead=%u\n",
2069 pThis->szPrf, nDescsAvailable, nDescsToFetch, nDescsTotal,
2070 nFirstNotLoaded, nDescsInSingleRead));
2071 if (nDescsToFetch == 0)
2072 return 0;
2073 E1KRXDESC* pFirstEmptyDesc = &pThis->aRxDescriptors[pThis->nRxDFetched];
2074 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns),
2075 ((uint64_t)RDBAH << 32) + RDBAL + nFirstNotLoaded * sizeof(E1KRXDESC),
2076 pFirstEmptyDesc, nDescsInSingleRead * sizeof(E1KRXDESC));
2077 // uint64_t addrBase = ((uint64_t)RDBAH << 32) + RDBAL;
2078 // unsigned i, j;
2079 // for (i = pThis->nRxDFetched; i < pThis->nRxDFetched + nDescsInSingleRead; ++i)
2080 // {
2081 // pThis->aRxDescAddr[i] = addrBase + (nFirstNotLoaded + i - pThis->nRxDFetched) * sizeof(E1KRXDESC);
2082 // E1kLog3(("%s aRxDescAddr[%d] = %p\n", pThis->szPrf, i, pThis->aRxDescAddr[i]));
2083 // }
2084 E1kLog3(("%s Fetched %u RX descriptors at %08x%08x(0x%x), RDLEN=%08x, RDH=%08x, RDT=%08x\n",
2085 pThis->szPrf, nDescsInSingleRead,
2086 RDBAH, RDBAL + RDH * sizeof(E1KRXDESC),
2087 nFirstNotLoaded, RDLEN, RDH, RDT));
2088 if (nDescsToFetch > nDescsInSingleRead)
2089 {
2090 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns),
2091 ((uint64_t)RDBAH << 32) + RDBAL,
2092 pFirstEmptyDesc + nDescsInSingleRead,
2093 (nDescsToFetch - nDescsInSingleRead) * sizeof(E1KRXDESC));
2094 // Assert(i == pThis->nRxDFetched + nDescsInSingleRead);
2095 // for (j = 0; i < pThis->nRxDFetched + nDescsToFetch; ++i, ++j)
2096 // {
2097 // pThis->aRxDescAddr[i] = addrBase + j * sizeof(E1KRXDESC);
2098 // E1kLog3(("%s aRxDescAddr[%d] = %p\n", pThis->szPrf, i, pThis->aRxDescAddr[i]));
2099 // }
2100 E1kLog3(("%s Fetched %u RX descriptors at %08x%08x\n",
2101 pThis->szPrf, nDescsToFetch - nDescsInSingleRead,
2102 RDBAH, RDBAL));
2103 }
2104 pThis->nRxDFetched += nDescsToFetch;
2105 return nDescsToFetch;
2106}
2107
2108# ifdef IN_RING3 /* currently only used in ring-3 due to stack space requirements of the caller */
2109
2110/**
2111 * Obtain the next RX descriptor from RXD cache, fetching descriptors from the
2112 * RX ring if the cache is empty.
2113 *
2114 * Note that we cannot advance the cache pointer (iRxDCurrent) yet as it will
2115 * go out of sync with RDH which will cause trouble when EMT checks if the
2116 * cache is empty to do pre-fetch @bugref(6217).
2117 *
2118 * @param pThis The device state structure.
2119 * @thread RX
2120 */
2121DECLINLINE(E1KRXDESC*) e1kRxDGet(PE1KSTATE pThis)
2122{
2123 Assert(e1kCsRxIsOwner(pThis));
2124 /* Check the cache first. */
2125 if (pThis->iRxDCurrent < pThis->nRxDFetched)
2126 return &pThis->aRxDescriptors[pThis->iRxDCurrent];
2127 /* Cache is empty, reset it and check if we can fetch more. */
2128 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
2129 if (e1kRxDPrefetch(pThis))
2130 return &pThis->aRxDescriptors[pThis->iRxDCurrent];
2131 /* Out of Rx descriptors. */
2132 return NULL;
2133}
2134
2135
2136/**
2137 * Return the RX descriptor obtained with e1kRxDGet() and advance the cache
2138 * pointer. The descriptor gets written back to the RXD ring.
2139 *
2140 * @param pThis The device state structure.
2141 * @param pDesc The descriptor being "returned" to the RX ring.
2142 * @thread RX
2143 */
2144DECLINLINE(void) e1kRxDPut(PE1KSTATE pThis, E1KRXDESC* pDesc)
2145{
2146 Assert(e1kCsRxIsOwner(pThis));
2147 pThis->iRxDCurrent++;
2148 // Assert(pDesc >= pThis->aRxDescriptors);
2149 // Assert(pDesc < pThis->aRxDescriptors + E1K_RXD_CACHE_SIZE);
2150 // uint64_t addr = e1kDescAddr(RDBAH, RDBAL, RDH);
2151 // uint32_t rdh = RDH;
2152 // Assert(pThis->aRxDescAddr[pDesc - pThis->aRxDescriptors] == addr);
2153 PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns),
2154 e1kDescAddr(RDBAH, RDBAL, RDH),
2155 pDesc, sizeof(E1KRXDESC));
2156 e1kAdvanceRDH(pThis);
2157 e1kPrintRDesc(pThis, pDesc);
2158}
2159
2160/**
2161 * Store a fragment of received packet at the specifed address.
2162 *
2163 * @param pThis The device state structure.
2164 * @param pDesc The next available RX descriptor.
2165 * @param pvBuf The fragment.
2166 * @param cb The size of the fragment.
2167 */
2168static DECLCALLBACK(void) e1kStoreRxFragment(PE1KSTATE pThis, E1KRXDESC *pDesc, const void *pvBuf, size_t cb)
2169{
2170 STAM_PROFILE_ADV_START(&pThis->StatReceiveStore, a);
2171 E1kLog2(("%s e1kStoreRxFragment: store fragment of %04X at %016LX, EOP=%d\n",
2172 pThis->szPrf, cb, pDesc->u64BufAddr, pDesc->status.fEOP));
2173 PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns), pDesc->u64BufAddr, pvBuf, cb);
2174 pDesc->u16Length = (uint16_t)cb; Assert(pDesc->u16Length == cb);
2175 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveStore, a);
2176}
2177
2178# endif
2179
2180#else /* !E1K_WITH_RXD_CACHE */
2181
2182/**
2183 * Store a fragment of received packet that fits into the next available RX
2184 * buffer.
2185 *
2186 * @remarks Trigger the RXT0 interrupt if it is the last fragment of the packet.
2187 *
2188 * @param pThis The device state structure.
2189 * @param pDesc The next available RX descriptor.
2190 * @param pvBuf The fragment.
2191 * @param cb The size of the fragment.
2192 */
2193static DECLCALLBACK(void) e1kStoreRxFragment(PE1KSTATE pThis, E1KRXDESC *pDesc, const void *pvBuf, size_t cb)
2194{
2195 STAM_PROFILE_ADV_START(&pThis->StatReceiveStore, a);
2196 E1kLog2(("%s e1kStoreRxFragment: store fragment of %04X at %016LX, EOP=%d\n", pThis->szPrf, cb, pDesc->u64BufAddr, pDesc->status.fEOP));
2197 PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns), pDesc->u64BufAddr, pvBuf, cb);
2198 pDesc->u16Length = (uint16_t)cb; Assert(pDesc->u16Length == cb);
2199 /* Write back the descriptor */
2200 PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns), e1kDescAddr(RDBAH, RDBAL, RDH), pDesc, sizeof(E1KRXDESC));
2201 e1kPrintRDesc(pThis, pDesc);
2202 E1kLogRel(("E1000: Wrote back RX desc, RDH=%x\n", RDH));
2203 /* Advance head */
2204 e1kAdvanceRDH(pThis);
2205 //E1kLog2(("%s e1kStoreRxFragment: EOP=%d RDTR=%08X RADV=%08X\n", pThis->szPrf, pDesc->fEOP, RDTR, RADV));
2206 if (pDesc->status.fEOP)
2207 {
2208 /* Complete packet has been stored -- it is time to let the guest know. */
2209#ifdef E1K_USE_RX_TIMERS
2210 if (RDTR)
2211 {
2212 /* Arm the timer to fire in RDTR usec (discard .024) */
2213 e1kArmTimer(pThis, pThis->CTX_SUFF(pRIDTimer), RDTR);
2214 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
2215 if (RADV != 0 && !TMTimerIsActive(pThis->CTX_SUFF(pRADTimer)))
2216 e1kArmTimer(pThis, pThis->CTX_SUFF(pRADTimer), RADV);
2217 }
2218 else
2219 {
2220#endif
2221 /* 0 delay means immediate interrupt */
2222 E1K_INC_ISTAT_CNT(pThis->uStatIntRx);
2223 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_RXT0);
2224#ifdef E1K_USE_RX_TIMERS
2225 }
2226#endif
2227 }
2228 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveStore, a);
2229}
2230
2231#endif /* !E1K_WITH_RXD_CACHE */
2232
2233/**
2234 * Returns true if it is a broadcast packet.
2235 *
2236 * @returns true if destination address indicates broadcast.
2237 * @param pvBuf The ethernet packet.
2238 */
2239DECLINLINE(bool) e1kIsBroadcast(const void *pvBuf)
2240{
2241 static const uint8_t s_abBcastAddr[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
2242 return memcmp(pvBuf, s_abBcastAddr, sizeof(s_abBcastAddr)) == 0;
2243}
2244
2245/**
2246 * Returns true if it is a multicast packet.
2247 *
2248 * @remarks returns true for broadcast packets as well.
2249 * @returns true if destination address indicates multicast.
2250 * @param pvBuf The ethernet packet.
2251 */
2252DECLINLINE(bool) e1kIsMulticast(const void *pvBuf)
2253{
2254 return (*(char*)pvBuf) & 1;
2255}
2256
2257#ifdef IN_RING3 /* currently only used in ring-3 due to stack space requirements of the caller */
2258/**
2259 * Set IXSM, IPCS and TCPCS flags according to the packet type.
2260 *
2261 * @remarks We emulate checksum offloading for major packets types only.
2262 *
2263 * @returns VBox status code.
2264 * @param pThis The device state structure.
2265 * @param pFrame The available data.
2266 * @param cb Number of bytes available in the buffer.
2267 * @param status Bit fields containing status info.
2268 */
2269static int e1kRxChecksumOffload(PE1KSTATE pThis, const uint8_t *pFrame, size_t cb, E1KRXDST *pStatus)
2270{
2271 /** @todo
2272 * It is not safe to bypass checksum verification for packets coming
2273 * from real wire. We currently unable to tell where packets are
2274 * coming from so we tell the driver to ignore our checksum flags
2275 * and do verification in software.
2276 */
2277# if 0
2278 uint16_t uEtherType = ntohs(*(uint16_t*)(pFrame + 12));
2279
2280 E1kLog2(("%s e1kRxChecksumOffload: EtherType=%x\n", pThis->szPrf, uEtherType));
2281
2282 switch (uEtherType)
2283 {
2284 case 0x800: /* IPv4 */
2285 {
2286 pStatus->fIXSM = false;
2287 pStatus->fIPCS = true;
2288 PRTNETIPV4 pIpHdr4 = (PRTNETIPV4)(pFrame + 14);
2289 /* TCP/UDP checksum offloading works with TCP and UDP only */
2290 pStatus->fTCPCS = pIpHdr4->ip_p == 6 || pIpHdr4->ip_p == 17;
2291 break;
2292 }
2293 case 0x86DD: /* IPv6 */
2294 pStatus->fIXSM = false;
2295 pStatus->fIPCS = false;
2296 pStatus->fTCPCS = true;
2297 break;
2298 default: /* ARP, VLAN, etc. */
2299 pStatus->fIXSM = true;
2300 break;
2301 }
2302# else
2303 pStatus->fIXSM = true;
2304 RT_NOREF_PV(pThis); RT_NOREF_PV(pFrame); RT_NOREF_PV(cb);
2305# endif
2306 return VINF_SUCCESS;
2307}
2308#endif /* IN_RING3 */
2309
2310/**
2311 * Pad and store received packet.
2312 *
2313 * @remarks Make sure that the packet appears to upper layer as one coming
2314 * from real Ethernet: pad it and insert FCS.
2315 *
2316 * @returns VBox status code.
2317 * @param pThis The device state structure.
2318 * @param pvBuf The available data.
2319 * @param cb Number of bytes available in the buffer.
2320 * @param status Bit fields containing status info.
2321 */
2322static int e1kHandleRxPacket(PE1KSTATE pThis, const void *pvBuf, size_t cb, E1KRXDST status)
2323{
2324#if defined(IN_RING3) /** @todo Remove this extra copying, it's gonna make us run out of kernel / hypervisor stack! */
2325 uint8_t rxPacket[E1K_MAX_RX_PKT_SIZE];
2326 uint8_t *ptr = rxPacket;
2327
2328 int rc = e1kCsRxEnter(pThis, VERR_SEM_BUSY);
2329 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2330 return rc;
2331
2332 if (cb > 70) /* unqualified guess */
2333 pThis->led.Asserted.s.fReading = pThis->led.Actual.s.fReading = 1;
2334
2335 Assert(cb <= E1K_MAX_RX_PKT_SIZE);
2336 Assert(cb > 16);
2337 size_t cbMax = ((RCTL & RCTL_LPE) ? E1K_MAX_RX_PKT_SIZE - 4 : 1518) - (status.fVP ? 0 : 4);
2338 E1kLog3(("%s Max RX packet size is %u\n", pThis->szPrf, cbMax));
2339 if (status.fVP)
2340 {
2341 /* VLAN packet -- strip VLAN tag in VLAN mode */
2342 if ((CTRL & CTRL_VME) && cb > 16)
2343 {
2344 uint16_t *u16Ptr = (uint16_t*)pvBuf;
2345 memcpy(rxPacket, pvBuf, 12); /* Copy src and dst addresses */
2346 status.u16Special = RT_BE2H_U16(u16Ptr[7]); /* Extract VLAN tag */
2347 memcpy(rxPacket + 12, (uint8_t*)pvBuf + 16, cb - 16); /* Copy the rest of the packet */
2348 cb -= 4;
2349 E1kLog3(("%s Stripped tag for VLAN %u (cb=%u)\n",
2350 pThis->szPrf, status.u16Special, cb));
2351 }
2352 else
2353 status.fVP = false; /* Set VP only if we stripped the tag */
2354 }
2355 else
2356 memcpy(rxPacket, pvBuf, cb);
2357 /* Pad short packets */
2358 if (cb < 60)
2359 {
2360 memset(rxPacket + cb, 0, 60 - cb);
2361 cb = 60;
2362 }
2363 if (!(RCTL & RCTL_SECRC) && cb <= cbMax)
2364 {
2365 STAM_PROFILE_ADV_START(&pThis->StatReceiveCRC, a);
2366 /*
2367 * Add FCS if CRC stripping is not enabled. Since the value of CRC
2368 * is ignored by most of drivers we may as well save us the trouble
2369 * of calculating it (see EthernetCRC CFGM parameter).
2370 */
2371 if (pThis->fEthernetCRC)
2372 *(uint32_t*)(rxPacket + cb) = RTCrc32(rxPacket, cb);
2373 cb += sizeof(uint32_t);
2374 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveCRC, a);
2375 E1kLog3(("%s Added FCS (cb=%u)\n", pThis->szPrf, cb));
2376 }
2377 /* Compute checksum of complete packet */
2378 uint16_t checksum = e1kCSum16(rxPacket + GET_BITS(RXCSUM, PCSS), cb);
2379 e1kRxChecksumOffload(pThis, rxPacket, cb, &status);
2380
2381 /* Update stats */
2382 E1K_INC_CNT32(GPRC);
2383 if (e1kIsBroadcast(pvBuf))
2384 E1K_INC_CNT32(BPRC);
2385 else if (e1kIsMulticast(pvBuf))
2386 E1K_INC_CNT32(MPRC);
2387 /* Update octet receive counter */
2388 E1K_ADD_CNT64(GORCL, GORCH, cb);
2389 STAM_REL_COUNTER_ADD(&pThis->StatReceiveBytes, cb);
2390 if (cb == 64)
2391 E1K_INC_CNT32(PRC64);
2392 else if (cb < 128)
2393 E1K_INC_CNT32(PRC127);
2394 else if (cb < 256)
2395 E1K_INC_CNT32(PRC255);
2396 else if (cb < 512)
2397 E1K_INC_CNT32(PRC511);
2398 else if (cb < 1024)
2399 E1K_INC_CNT32(PRC1023);
2400 else
2401 E1K_INC_CNT32(PRC1522);
2402
2403 E1K_INC_ISTAT_CNT(pThis->uStatRxFrm);
2404
2405# ifdef E1K_WITH_RXD_CACHE
2406 while (cb > 0)
2407 {
2408 E1KRXDESC *pDesc = e1kRxDGet(pThis);
2409
2410 if (pDesc == NULL)
2411 {
2412 E1kLog(("%s Out of receive buffers, dropping the packet "
2413 "(cb=%u, in_cache=%u, RDH=%x RDT=%x)\n",
2414 pThis->szPrf, cb, e1kRxDInCache(pThis), RDH, RDT));
2415 break;
2416 }
2417# else /* !E1K_WITH_RXD_CACHE */
2418 if (RDH == RDT)
2419 {
2420 E1kLog(("%s Out of receive buffers, dropping the packet\n",
2421 pThis->szPrf));
2422 }
2423 /* Store the packet to receive buffers */
2424 while (RDH != RDT)
2425 {
2426 /* Load the descriptor pointed by head */
2427 E1KRXDESC desc, *pDesc = &desc;
2428 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), e1kDescAddr(RDBAH, RDBAL, RDH),
2429 &desc, sizeof(desc));
2430# endif /* !E1K_WITH_RXD_CACHE */
2431 if (pDesc->u64BufAddr)
2432 {
2433 /* Update descriptor */
2434 pDesc->status = status;
2435 pDesc->u16Checksum = checksum;
2436 pDesc->status.fDD = true;
2437
2438 /*
2439 * We need to leave Rx critical section here or we risk deadlocking
2440 * with EMT in e1kRegWriteRDT when the write is to an unallocated
2441 * page or has an access handler associated with it.
2442 * Note that it is safe to leave the critical section here since
2443 * e1kRegWriteRDT() never modifies RDH. It never touches already
2444 * fetched RxD cache entries either.
2445 */
2446 if (cb > pThis->u16RxBSize)
2447 {
2448 pDesc->status.fEOP = false;
2449 e1kCsRxLeave(pThis);
2450 e1kStoreRxFragment(pThis, pDesc, ptr, pThis->u16RxBSize);
2451 rc = e1kCsRxEnter(pThis, VERR_SEM_BUSY);
2452 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2453 return rc;
2454 ptr += pThis->u16RxBSize;
2455 cb -= pThis->u16RxBSize;
2456 }
2457 else
2458 {
2459 pDesc->status.fEOP = true;
2460 e1kCsRxLeave(pThis);
2461 e1kStoreRxFragment(pThis, pDesc, ptr, cb);
2462# ifdef E1K_WITH_RXD_CACHE
2463 rc = e1kCsRxEnter(pThis, VERR_SEM_BUSY);
2464 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2465 return rc;
2466 cb = 0;
2467# else /* !E1K_WITH_RXD_CACHE */
2468 pThis->led.Actual.s.fReading = 0;
2469 return VINF_SUCCESS;
2470# endif /* !E1K_WITH_RXD_CACHE */
2471 }
2472 /*
2473 * Note: RDH is advanced by e1kStoreRxFragment if E1K_WITH_RXD_CACHE
2474 * is not defined.
2475 */
2476 }
2477# ifdef E1K_WITH_RXD_CACHE
2478 /* Write back the descriptor. */
2479 pDesc->status.fDD = true;
2480 e1kRxDPut(pThis, pDesc);
2481# else /* !E1K_WITH_RXD_CACHE */
2482 else
2483 {
2484 /* Write back the descriptor. */
2485 pDesc->status.fDD = true;
2486 PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns),
2487 e1kDescAddr(RDBAH, RDBAL, RDH),
2488 pDesc, sizeof(E1KRXDESC));
2489 e1kAdvanceRDH(pThis);
2490 }
2491# endif /* !E1K_WITH_RXD_CACHE */
2492 }
2493
2494 if (cb > 0)
2495 E1kLog(("%s Out of receive buffers, dropping %u bytes", pThis->szPrf, cb));
2496
2497 pThis->led.Actual.s.fReading = 0;
2498
2499 e1kCsRxLeave(pThis);
2500# ifdef E1K_WITH_RXD_CACHE
2501 /* Complete packet has been stored -- it is time to let the guest know. */
2502# ifdef E1K_USE_RX_TIMERS
2503 if (RDTR)
2504 {
2505 /* Arm the timer to fire in RDTR usec (discard .024) */
2506 e1kArmTimer(pThis, pThis->CTX_SUFF(pRIDTimer), RDTR);
2507 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
2508 if (RADV != 0 && !TMTimerIsActive(pThis->CTX_SUFF(pRADTimer)))
2509 e1kArmTimer(pThis, pThis->CTX_SUFF(pRADTimer), RADV);
2510 }
2511 else
2512 {
2513# endif /* E1K_USE_RX_TIMERS */
2514 /* 0 delay means immediate interrupt */
2515 E1K_INC_ISTAT_CNT(pThis->uStatIntRx);
2516 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_RXT0);
2517# ifdef E1K_USE_RX_TIMERS
2518 }
2519# endif /* E1K_USE_RX_TIMERS */
2520# endif /* E1K_WITH_RXD_CACHE */
2521
2522 return VINF_SUCCESS;
2523#else /* !IN_RING3 */
2524 RT_NOREF_PV(pThis); RT_NOREF_PV(pvBuf); RT_NOREF_PV(cb); RT_NOREF_PV(status);
2525 return VERR_INTERNAL_ERROR_2;
2526#endif /* !IN_RING3 */
2527}
2528
2529
2530/**
2531 * Bring the link up after the configured delay, 5 seconds by default.
2532 *
2533 * @param pThis The device state structure.
2534 * @thread any
2535 */
2536DECLINLINE(void) e1kBringLinkUpDelayed(PE1KSTATE pThis)
2537{
2538 E1kLog(("%s Will bring up the link in %d seconds...\n",
2539 pThis->szPrf, pThis->cMsLinkUpDelay / 1000));
2540 e1kArmTimer(pThis, pThis->CTX_SUFF(pLUTimer), pThis->cMsLinkUpDelay * 1000);
2541}
2542
2543#ifdef IN_RING3
2544/**
2545 * Bring up the link immediately.
2546 *
2547 * @param pThis The device state structure.
2548 */
2549DECLINLINE(void) e1kR3LinkUp(PE1KSTATE pThis)
2550{
2551 E1kLog(("%s Link is up\n", pThis->szPrf));
2552 STATUS |= STATUS_LU;
2553 Phy::setLinkStatus(&pThis->phy, true);
2554 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_LSC);
2555 if (pThis->pDrvR3)
2556 pThis->pDrvR3->pfnNotifyLinkChanged(pThis->pDrvR3, PDMNETWORKLINKSTATE_UP);
2557}
2558
2559/**
2560 * Bring down the link immediately.
2561 *
2562 * @param pThis The device state structure.
2563 */
2564DECLINLINE(void) e1kR3LinkDown(PE1KSTATE pThis)
2565{
2566 E1kLog(("%s Link is down\n", pThis->szPrf));
2567 STATUS &= ~STATUS_LU;
2568 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_LSC);
2569 if (pThis->pDrvR3)
2570 pThis->pDrvR3->pfnNotifyLinkChanged(pThis->pDrvR3, PDMNETWORKLINKSTATE_DOWN);
2571}
2572
2573/**
2574 * Bring down the link temporarily.
2575 *
2576 * @param pThis The device state structure.
2577 */
2578DECLINLINE(void) e1kR3LinkDownTemp(PE1KSTATE pThis)
2579{
2580 E1kLog(("%s Link is down temporarily\n", pThis->szPrf));
2581 STATUS &= ~STATUS_LU;
2582 Phy::setLinkStatus(&pThis->phy, false);
2583 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_LSC);
2584 /*
2585 * Notifying the associated driver that the link went down (even temporarily)
2586 * seems to be the right thing, but it was not done before. This may cause
2587 * a regression if the driver does not expect the link to go down as a result
2588 * of sending PDMNETWORKLINKSTATE_DOWN_RESUME to this device. Earlier versions
2589 * of code notified the driver that the link was up! See @bugref{7057}.
2590 */
2591 if (pThis->pDrvR3)
2592 pThis->pDrvR3->pfnNotifyLinkChanged(pThis->pDrvR3, PDMNETWORKLINKSTATE_DOWN);
2593 e1kBringLinkUpDelayed(pThis);
2594}
2595#endif /* IN_RING3 */
2596
2597#if 0 /* unused */
2598/**
2599 * Read handler for Device Status register.
2600 *
2601 * Get the link status from PHY.
2602 *
2603 * @returns VBox status code.
2604 *
2605 * @param pThis The device state structure.
2606 * @param offset Register offset in memory-mapped frame.
2607 * @param index Register index in register array.
2608 * @param mask Used to implement partial reads (8 and 16-bit).
2609 */
2610static int e1kRegReadCTRL(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
2611{
2612 E1kLog(("%s e1kRegReadCTRL: mdio dir=%s mdc dir=%s mdc=%d\n",
2613 pThis->szPrf, (CTRL & CTRL_MDIO_DIR)?"OUT":"IN ",
2614 (CTRL & CTRL_MDC_DIR)?"OUT":"IN ", !!(CTRL & CTRL_MDC)));
2615 if ((CTRL & CTRL_MDIO_DIR) == 0 && (CTRL & CTRL_MDC))
2616 {
2617 /* MDC is high and MDIO pin is used for input, read MDIO pin from PHY */
2618 if (Phy::readMDIO(&pThis->phy))
2619 *pu32Value = CTRL | CTRL_MDIO;
2620 else
2621 *pu32Value = CTRL & ~CTRL_MDIO;
2622 E1kLog(("%s e1kRegReadCTRL: Phy::readMDIO(%d)\n",
2623 pThis->szPrf, !!(*pu32Value & CTRL_MDIO)));
2624 }
2625 else
2626 {
2627 /* MDIO pin is used for output, ignore it */
2628 *pu32Value = CTRL;
2629 }
2630 return VINF_SUCCESS;
2631}
2632#endif /* unused */
2633
2634/**
2635 * Write handler for Device Control register.
2636 *
2637 * Handles reset.
2638 *
2639 * @param pThis The device state structure.
2640 * @param offset Register offset in memory-mapped frame.
2641 * @param index Register index in register array.
2642 * @param value The value to store.
2643 * @param mask Used to implement partial writes (8 and 16-bit).
2644 * @thread EMT
2645 */
2646static int e1kRegWriteCTRL(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2647{
2648 int rc = VINF_SUCCESS;
2649
2650 if (value & CTRL_RESET)
2651 { /* RST */
2652#ifndef IN_RING3
2653 return VINF_IOM_R3_MMIO_WRITE;
2654#else
2655 e1kHardReset(pThis);
2656#endif
2657 }
2658 else
2659 {
2660 if ( (value & CTRL_SLU)
2661 && pThis->fCableConnected
2662 && !(STATUS & STATUS_LU))
2663 {
2664 /* The driver indicates that we should bring up the link */
2665 /* Do so in 5 seconds (by default). */
2666 e1kBringLinkUpDelayed(pThis);
2667 /*
2668 * Change the status (but not PHY status) anyway as Windows expects
2669 * it for 82543GC.
2670 */
2671 STATUS |= STATUS_LU;
2672 }
2673 if (value & CTRL_VME)
2674 {
2675 E1kLog(("%s VLAN Mode Enabled\n", pThis->szPrf));
2676 }
2677 E1kLog(("%s e1kRegWriteCTRL: mdio dir=%s mdc dir=%s mdc=%s mdio=%d\n",
2678 pThis->szPrf, (value & CTRL_MDIO_DIR)?"OUT":"IN ",
2679 (value & CTRL_MDC_DIR)?"OUT":"IN ", (value & CTRL_MDC)?"HIGH":"LOW ", !!(value & CTRL_MDIO)));
2680 if (value & CTRL_MDC)
2681 {
2682 if (value & CTRL_MDIO_DIR)
2683 {
2684 E1kLog(("%s e1kRegWriteCTRL: Phy::writeMDIO(%d)\n", pThis->szPrf, !!(value & CTRL_MDIO)));
2685 /* MDIO direction pin is set to output and MDC is high, write MDIO pin value to PHY */
2686 Phy::writeMDIO(&pThis->phy, !!(value & CTRL_MDIO));
2687 }
2688 else
2689 {
2690 if (Phy::readMDIO(&pThis->phy))
2691 value |= CTRL_MDIO;
2692 else
2693 value &= ~CTRL_MDIO;
2694 E1kLog(("%s e1kRegWriteCTRL: Phy::readMDIO(%d)\n",
2695 pThis->szPrf, !!(value & CTRL_MDIO)));
2696 }
2697 }
2698 rc = e1kRegWriteDefault(pThis, offset, index, value);
2699 }
2700
2701 return rc;
2702}
2703
2704/**
2705 * Write handler for EEPROM/Flash Control/Data register.
2706 *
2707 * Handles EEPROM access requests; forwards writes to EEPROM device if access has been granted.
2708 *
2709 * @param pThis The device state structure.
2710 * @param offset Register offset in memory-mapped frame.
2711 * @param index Register index in register array.
2712 * @param value The value to store.
2713 * @param mask Used to implement partial writes (8 and 16-bit).
2714 * @thread EMT
2715 */
2716static int e1kRegWriteEECD(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2717{
2718 RT_NOREF(offset, index);
2719#ifdef IN_RING3
2720 /* So far we are concerned with lower byte only */
2721 if ((EECD & EECD_EE_GNT) || pThis->eChip == E1K_CHIP_82543GC)
2722 {
2723 /* Access to EEPROM granted -- forward 4-wire bits to EEPROM device */
2724 /* Note: 82543GC does not need to request EEPROM access */
2725 STAM_PROFILE_ADV_START(&pThis->StatEEPROMWrite, a);
2726 pThis->eeprom.write(value & EECD_EE_WIRES);
2727 STAM_PROFILE_ADV_STOP(&pThis->StatEEPROMWrite, a);
2728 }
2729 if (value & EECD_EE_REQ)
2730 EECD |= EECD_EE_REQ|EECD_EE_GNT;
2731 else
2732 EECD &= ~EECD_EE_GNT;
2733 //e1kRegWriteDefault(pThis, offset, index, value );
2734
2735 return VINF_SUCCESS;
2736#else /* !IN_RING3 */
2737 RT_NOREF(pThis, value);
2738 return VINF_IOM_R3_MMIO_WRITE;
2739#endif /* !IN_RING3 */
2740}
2741
2742/**
2743 * Read handler for EEPROM/Flash Control/Data register.
2744 *
2745 * Lower 4 bits come from EEPROM device if EEPROM access has been granted.
2746 *
2747 * @returns VBox status code.
2748 *
2749 * @param pThis The device state structure.
2750 * @param offset Register offset in memory-mapped frame.
2751 * @param index Register index in register array.
2752 * @param mask Used to implement partial reads (8 and 16-bit).
2753 * @thread EMT
2754 */
2755static int e1kRegReadEECD(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
2756{
2757#ifdef IN_RING3
2758 uint32_t value;
2759 int rc = e1kRegReadDefault(pThis, offset, index, &value);
2760 if (RT_SUCCESS(rc))
2761 {
2762 if ((value & EECD_EE_GNT) || pThis->eChip == E1K_CHIP_82543GC)
2763 {
2764 /* Note: 82543GC does not need to request EEPROM access */
2765 /* Access to EEPROM granted -- get 4-wire bits to EEPROM device */
2766 STAM_PROFILE_ADV_START(&pThis->StatEEPROMRead, a);
2767 value |= pThis->eeprom.read();
2768 STAM_PROFILE_ADV_STOP(&pThis->StatEEPROMRead, a);
2769 }
2770 *pu32Value = value;
2771 }
2772
2773 return rc;
2774#else /* !IN_RING3 */
2775 RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index); RT_NOREF_PV(pu32Value);
2776 return VINF_IOM_R3_MMIO_READ;
2777#endif /* !IN_RING3 */
2778}
2779
2780/**
2781 * Write handler for EEPROM Read register.
2782 *
2783 * Handles EEPROM word access requests, reads EEPROM and stores the result
2784 * into DATA field.
2785 *
2786 * @param pThis The device state structure.
2787 * @param offset Register offset in memory-mapped frame.
2788 * @param index Register index in register array.
2789 * @param value The value to store.
2790 * @param mask Used to implement partial writes (8 and 16-bit).
2791 * @thread EMT
2792 */
2793static int e1kRegWriteEERD(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2794{
2795#ifdef IN_RING3
2796 /* Make use of 'writable' and 'readable' masks. */
2797 e1kRegWriteDefault(pThis, offset, index, value);
2798 /* DONE and DATA are set only if read was triggered by START. */
2799 if (value & EERD_START)
2800 {
2801 uint16_t tmp;
2802 STAM_PROFILE_ADV_START(&pThis->StatEEPROMRead, a);
2803 if (pThis->eeprom.readWord(GET_BITS_V(value, EERD, ADDR), &tmp))
2804 SET_BITS(EERD, DATA, tmp);
2805 EERD |= EERD_DONE;
2806 STAM_PROFILE_ADV_STOP(&pThis->StatEEPROMRead, a);
2807 }
2808
2809 return VINF_SUCCESS;
2810#else /* !IN_RING3 */
2811 RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index); RT_NOREF_PV(value);
2812 return VINF_IOM_R3_MMIO_WRITE;
2813#endif /* !IN_RING3 */
2814}
2815
2816
2817/**
2818 * Write handler for MDI Control register.
2819 *
2820 * Handles PHY read/write requests; forwards requests to internal PHY device.
2821 *
2822 * @param pThis The device state structure.
2823 * @param offset Register offset in memory-mapped frame.
2824 * @param index Register index in register array.
2825 * @param value The value to store.
2826 * @param mask Used to implement partial writes (8 and 16-bit).
2827 * @thread EMT
2828 */
2829static int e1kRegWriteMDIC(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2830{
2831 if (value & MDIC_INT_EN)
2832 {
2833 E1kLog(("%s ERROR! Interrupt at the end of an MDI cycle is not supported yet.\n",
2834 pThis->szPrf));
2835 }
2836 else if (value & MDIC_READY)
2837 {
2838 E1kLog(("%s ERROR! Ready bit is not reset by software during write operation.\n",
2839 pThis->szPrf));
2840 }
2841 else if (GET_BITS_V(value, MDIC, PHY) != 1)
2842 {
2843 E1kLog(("%s WARNING! Access to invalid PHY detected, phy=%d.\n",
2844 pThis->szPrf, GET_BITS_V(value, MDIC, PHY)));
2845 /*
2846 * Some drivers scan the MDIO bus for a PHY. We can work with these
2847 * drivers if we set MDIC_READY and MDIC_ERROR when there isn't a PHY
2848 * at the requested address, see @bugref{7346}.
2849 */
2850 MDIC = MDIC_READY | MDIC_ERROR;
2851 }
2852 else
2853 {
2854 /* Store the value */
2855 e1kRegWriteDefault(pThis, offset, index, value);
2856 STAM_COUNTER_INC(&pThis->StatPHYAccesses);
2857 /* Forward op to PHY */
2858 if (value & MDIC_OP_READ)
2859 SET_BITS(MDIC, DATA, Phy::readRegister(&pThis->phy, GET_BITS_V(value, MDIC, REG)));
2860 else
2861 Phy::writeRegister(&pThis->phy, GET_BITS_V(value, MDIC, REG), value & MDIC_DATA_MASK);
2862 /* Let software know that we are done */
2863 MDIC |= MDIC_READY;
2864 }
2865
2866 return VINF_SUCCESS;
2867}
2868
2869/**
2870 * Write handler for Interrupt Cause Read register.
2871 *
2872 * Bits corresponding to 1s in 'value' will be cleared in ICR register.
2873 *
2874 * @param pThis The device state structure.
2875 * @param offset Register offset in memory-mapped frame.
2876 * @param index Register index in register array.
2877 * @param value The value to store.
2878 * @param mask Used to implement partial writes (8 and 16-bit).
2879 * @thread EMT
2880 */
2881static int e1kRegWriteICR(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2882{
2883 ICR &= ~value;
2884
2885 RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index);
2886 return VINF_SUCCESS;
2887}
2888
2889/**
2890 * Read handler for Interrupt Cause Read register.
2891 *
2892 * Reading this register acknowledges all interrupts.
2893 *
2894 * @returns VBox status code.
2895 *
2896 * @param pThis The device state structure.
2897 * @param offset Register offset in memory-mapped frame.
2898 * @param index Register index in register array.
2899 * @param mask Not used.
2900 * @thread EMT
2901 */
2902static int e1kRegReadICR(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
2903{
2904 int rc = e1kCsEnter(pThis, VINF_IOM_R3_MMIO_READ);
2905 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2906 return rc;
2907
2908 uint32_t value = 0;
2909 rc = e1kRegReadDefault(pThis, offset, index, &value);
2910 if (RT_SUCCESS(rc))
2911 {
2912 if (value)
2913 {
2914 /*
2915 * Not clearing ICR causes QNX to hang as it reads ICR in a loop
2916 * with disabled interrupts.
2917 */
2918 //if (IMS)
2919 if (1)
2920 {
2921 /*
2922 * Interrupts were enabled -- we are supposedly at the very
2923 * beginning of interrupt handler
2924 */
2925 E1kLogRel(("E1000: irq lowered, icr=0x%x\n", ICR));
2926 E1kLog(("%s e1kRegReadICR: Lowered IRQ (%08x)\n", pThis->szPrf, ICR));
2927 /* Clear all pending interrupts */
2928 ICR = 0;
2929 pThis->fIntRaised = false;
2930 /* Lower(0) INTA(0) */
2931 PDMDevHlpPCISetIrq(pThis->CTX_SUFF(pDevIns), 0, 0);
2932
2933 pThis->u64AckedAt = TMTimerGet(pThis->CTX_SUFF(pIntTimer));
2934 if (pThis->fIntMaskUsed)
2935 pThis->fDelayInts = true;
2936 }
2937 else
2938 {
2939 /*
2940 * Interrupts are disabled -- in windows guests ICR read is done
2941 * just before re-enabling interrupts
2942 */
2943 E1kLog(("%s e1kRegReadICR: Suppressing auto-clear due to disabled interrupts (%08x)\n", pThis->szPrf, ICR));
2944 }
2945 }
2946 *pu32Value = value;
2947 }
2948 e1kCsLeave(pThis);
2949
2950 return rc;
2951}
2952
2953/**
2954 * Write handler for Interrupt Cause Set register.
2955 *
2956 * Bits corresponding to 1s in 'value' will be set in ICR register.
2957 *
2958 * @param pThis The device state structure.
2959 * @param offset Register offset in memory-mapped frame.
2960 * @param index Register index in register array.
2961 * @param value The value to store.
2962 * @param mask Used to implement partial writes (8 and 16-bit).
2963 * @thread EMT
2964 */
2965static int e1kRegWriteICS(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2966{
2967 RT_NOREF_PV(offset); RT_NOREF_PV(index);
2968 E1K_INC_ISTAT_CNT(pThis->uStatIntICS);
2969 return e1kRaiseInterrupt(pThis, VINF_IOM_R3_MMIO_WRITE, value & g_aE1kRegMap[ICS_IDX].writable);
2970}
2971
2972/**
2973 * Write handler for Interrupt Mask Set register.
2974 *
2975 * Will trigger pending interrupts.
2976 *
2977 * @param pThis The device state structure.
2978 * @param offset Register offset in memory-mapped frame.
2979 * @param index Register index in register array.
2980 * @param value The value to store.
2981 * @param mask Used to implement partial writes (8 and 16-bit).
2982 * @thread EMT
2983 */
2984static int e1kRegWriteIMS(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2985{
2986 RT_NOREF_PV(offset); RT_NOREF_PV(index);
2987
2988 IMS |= value;
2989 E1kLogRel(("E1000: irq enabled, RDH=%x RDT=%x TDH=%x TDT=%x\n", RDH, RDT, TDH, TDT));
2990 E1kLog(("%s e1kRegWriteIMS: IRQ enabled\n", pThis->szPrf));
2991 e1kRaiseInterrupt(pThis, VINF_IOM_R3_MMIO_WRITE, 0);
2992
2993 return VINF_SUCCESS;
2994}
2995
2996/**
2997 * Write handler for Interrupt Mask Clear register.
2998 *
2999 * Bits corresponding to 1s in 'value' will be cleared in IMS register.
3000 *
3001 * @param pThis The device state structure.
3002 * @param offset Register offset in memory-mapped frame.
3003 * @param index Register index in register array.
3004 * @param value The value to store.
3005 * @param mask Used to implement partial writes (8 and 16-bit).
3006 * @thread EMT
3007 */
3008static int e1kRegWriteIMC(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3009{
3010 RT_NOREF_PV(offset); RT_NOREF_PV(index);
3011
3012 int rc = e1kCsEnter(pThis, VINF_IOM_R3_MMIO_WRITE);
3013 if (RT_UNLIKELY(rc != VINF_SUCCESS))
3014 return rc;
3015 if (pThis->fIntRaised)
3016 {
3017 /*
3018 * Technically we should reset fIntRaised in ICR read handler, but it will cause
3019 * Windows to freeze since it may receive an interrupt while still in the very beginning
3020 * of interrupt handler.
3021 */
3022 E1K_INC_ISTAT_CNT(pThis->uStatIntLower);
3023 STAM_COUNTER_INC(&pThis->StatIntsPrevented);
3024 E1kLogRel(("E1000: irq lowered (IMC), icr=0x%x\n", ICR));
3025 /* Lower(0) INTA(0) */
3026 PDMDevHlpPCISetIrq(pThis->CTX_SUFF(pDevIns), 0, 0);
3027 pThis->fIntRaised = false;
3028 E1kLog(("%s e1kRegWriteIMC: Lowered IRQ: ICR=%08x\n", pThis->szPrf, ICR));
3029 }
3030 IMS &= ~value;
3031 E1kLog(("%s e1kRegWriteIMC: IRQ disabled\n", pThis->szPrf));
3032 e1kCsLeave(pThis);
3033
3034 return VINF_SUCCESS;
3035}
3036
3037/**
3038 * Write handler for Receive Control register.
3039 *
3040 * @param pThis The device state structure.
3041 * @param offset Register offset in memory-mapped frame.
3042 * @param index Register index in register array.
3043 * @param value The value to store.
3044 * @param mask Used to implement partial writes (8 and 16-bit).
3045 * @thread EMT
3046 */
3047static int e1kRegWriteRCTL(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3048{
3049 /* Update promiscuous mode */
3050 bool fBecomePromiscous = !!(value & (RCTL_UPE | RCTL_MPE));
3051 if (fBecomePromiscous != !!( RCTL & (RCTL_UPE | RCTL_MPE)))
3052 {
3053 /* Promiscuity has changed, pass the knowledge on. */
3054#ifndef IN_RING3
3055 return VINF_IOM_R3_MMIO_WRITE;
3056#else
3057 if (pThis->pDrvR3)
3058 pThis->pDrvR3->pfnSetPromiscuousMode(pThis->pDrvR3, fBecomePromiscous);
3059#endif
3060 }
3061
3062 /* Adjust receive buffer size */
3063 unsigned cbRxBuf = 2048 >> GET_BITS_V(value, RCTL, BSIZE);
3064 if (value & RCTL_BSEX)
3065 cbRxBuf *= 16;
3066 if (cbRxBuf != pThis->u16RxBSize)
3067 E1kLog2(("%s e1kRegWriteRCTL: Setting receive buffer size to %d (old %d)\n",
3068 pThis->szPrf, cbRxBuf, pThis->u16RxBSize));
3069 pThis->u16RxBSize = cbRxBuf;
3070
3071 /* Update the register */
3072 e1kRegWriteDefault(pThis, offset, index, value);
3073
3074 return VINF_SUCCESS;
3075}
3076
3077/**
3078 * Write handler for Packet Buffer Allocation register.
3079 *
3080 * TXA = 64 - RXA.
3081 *
3082 * @param pThis The device state structure.
3083 * @param offset Register offset in memory-mapped frame.
3084 * @param index Register index in register array.
3085 * @param value The value to store.
3086 * @param mask Used to implement partial writes (8 and 16-bit).
3087 * @thread EMT
3088 */
3089static int e1kRegWritePBA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3090{
3091 e1kRegWriteDefault(pThis, offset, index, value);
3092 PBA_st->txa = 64 - PBA_st->rxa;
3093
3094 return VINF_SUCCESS;
3095}
3096
3097/**
3098 * Write handler for Receive Descriptor Tail register.
3099 *
3100 * @remarks Write into RDT forces switch to HC and signal to
3101 * e1kR3NetworkDown_WaitReceiveAvail().
3102 *
3103 * @returns VBox status code.
3104 *
3105 * @param pThis The device state structure.
3106 * @param offset Register offset in memory-mapped frame.
3107 * @param index Register index in register array.
3108 * @param value The value to store.
3109 * @param mask Used to implement partial writes (8 and 16-bit).
3110 * @thread EMT
3111 */
3112static int e1kRegWriteRDT(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3113{
3114#ifndef IN_RING3
3115 /* XXX */
3116// return VINF_IOM_R3_MMIO_WRITE;
3117#endif
3118 int rc = e1kCsRxEnter(pThis, VINF_IOM_R3_MMIO_WRITE);
3119 if (RT_LIKELY(rc == VINF_SUCCESS))
3120 {
3121 E1kLog(("%s e1kRegWriteRDT\n", pThis->szPrf));
3122 /*
3123 * Some drivers advance RDT too far, so that it equals RDH. This
3124 * somehow manages to work with real hardware but not with this
3125 * emulated device. We can work with these drivers if we just
3126 * write 1 less when we see a driver writing RDT equal to RDH,
3127 * see @bugref{7346}.
3128 */
3129 if (value == RDH)
3130 {
3131 if (RDH == 0)
3132 value = (RDLEN / sizeof(E1KRXDESC)) - 1;
3133 else
3134 value = RDH - 1;
3135 }
3136 rc = e1kRegWriteDefault(pThis, offset, index, value);
3137#ifdef E1K_WITH_RXD_CACHE
3138 /*
3139 * We need to fetch descriptors now as RDT may go whole circle
3140 * before we attempt to store a received packet. For example,
3141 * Intel's DOS drivers use 2 (!) RX descriptors with the total ring
3142 * size being only 8 descriptors! Note that we fetch descriptors
3143 * only when the cache is empty to reduce the number of memory reads
3144 * in case of frequent RDT writes. Don't fetch anything when the
3145 * receiver is disabled either as RDH, RDT, RDLEN can be in some
3146 * messed up state.
3147 * Note that despite the cache may seem empty, meaning that there are
3148 * no more available descriptors in it, it may still be used by RX
3149 * thread which has not yet written the last descriptor back but has
3150 * temporarily released the RX lock in order to write the packet body
3151 * to descriptor's buffer. At this point we still going to do prefetch
3152 * but it won't actually fetch anything if there are no unused slots in
3153 * our "empty" cache (nRxDFetched==E1K_RXD_CACHE_SIZE). We must not
3154 * reset the cache here even if it appears empty. It will be reset at
3155 * a later point in e1kRxDGet().
3156 */
3157 if (e1kRxDIsCacheEmpty(pThis) && (RCTL & RCTL_EN))
3158 e1kRxDPrefetch(pThis);
3159#endif /* E1K_WITH_RXD_CACHE */
3160 e1kCsRxLeave(pThis);
3161 if (RT_SUCCESS(rc))
3162 {
3163/** @todo bird: Use SUPSem* for this so we can signal it in ring-0 as well
3164 * without requiring any context switches. We should also check the
3165 * wait condition before bothering to queue the item as we're currently
3166 * queuing thousands of items per second here in a normal transmit
3167 * scenario. Expect performance changes when fixing this! */
3168#ifdef IN_RING3
3169 /* Signal that we have more receive descriptors available. */
3170 e1kWakeupReceive(pThis->CTX_SUFF(pDevIns));
3171#else
3172 PPDMQUEUEITEMCORE pItem = PDMQueueAlloc(pThis->CTX_SUFF(pCanRxQueue));
3173 if (pItem)
3174 PDMQueueInsert(pThis->CTX_SUFF(pCanRxQueue), pItem);
3175#endif
3176 }
3177 }
3178 return rc;
3179}
3180
3181/**
3182 * Write handler for Receive Delay Timer register.
3183 *
3184 * @param pThis The device state structure.
3185 * @param offset Register offset in memory-mapped frame.
3186 * @param index Register index in register array.
3187 * @param value The value to store.
3188 * @param mask Used to implement partial writes (8 and 16-bit).
3189 * @thread EMT
3190 */
3191static int e1kRegWriteRDTR(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3192{
3193 e1kRegWriteDefault(pThis, offset, index, value);
3194 if (value & RDTR_FPD)
3195 {
3196 /* Flush requested, cancel both timers and raise interrupt */
3197#ifdef E1K_USE_RX_TIMERS
3198 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRIDTimer));
3199 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRADTimer));
3200#endif
3201 E1K_INC_ISTAT_CNT(pThis->uStatIntRDTR);
3202 return e1kRaiseInterrupt(pThis, VINF_IOM_R3_MMIO_WRITE, ICR_RXT0);
3203 }
3204
3205 return VINF_SUCCESS;
3206}
3207
3208DECLINLINE(uint32_t) e1kGetTxLen(PE1KSTATE pThis)
3209{
3210 /**
3211 * Make sure TDT won't change during computation. EMT may modify TDT at
3212 * any moment.
3213 */
3214 uint32_t tdt = TDT;
3215 return (TDH>tdt ? TDLEN/sizeof(E1KTXDESC) : 0) + tdt - TDH;
3216}
3217
3218#ifdef IN_RING3
3219
3220# ifdef E1K_TX_DELAY
3221/**
3222 * Transmit Delay Timer handler.
3223 *
3224 * @remarks We only get here when the timer expires.
3225 *
3226 * @param pDevIns Pointer to device instance structure.
3227 * @param pTimer Pointer to the timer.
3228 * @param pvUser NULL.
3229 * @thread EMT
3230 */
3231static DECLCALLBACK(void) e1kTxDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3232{
3233 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3234 Assert(PDMCritSectIsOwner(&pThis->csTx));
3235
3236 E1K_INC_ISTAT_CNT(pThis->uStatTxDelayExp);
3237# ifdef E1K_INT_STATS
3238 uint64_t u64Elapsed = RTTimeNanoTS() - pThis->u64ArmedAt;
3239 if (u64Elapsed > pThis->uStatMaxTxDelay)
3240 pThis->uStatMaxTxDelay = u64Elapsed;
3241# endif
3242 int rc = e1kXmitPending(pThis, false /*fOnWorkerThread*/);
3243 AssertMsg(RT_SUCCESS(rc) || rc == VERR_TRY_AGAIN, ("%Rrc\n", rc));
3244}
3245# endif /* E1K_TX_DELAY */
3246
3247//# ifdef E1K_USE_TX_TIMERS
3248
3249/**
3250 * Transmit Interrupt Delay Timer handler.
3251 *
3252 * @remarks We only get here when the timer expires.
3253 *
3254 * @param pDevIns Pointer to device instance structure.
3255 * @param pTimer Pointer to the timer.
3256 * @param pvUser NULL.
3257 * @thread EMT
3258 */
3259static DECLCALLBACK(void) e1kTxIntDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3260{
3261 RT_NOREF(pDevIns);
3262 RT_NOREF(pTimer);
3263 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3264
3265 E1K_INC_ISTAT_CNT(pThis->uStatTID);
3266 /* Cancel absolute delay timer as we have already got attention */
3267# ifndef E1K_NO_TAD
3268 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTADTimer));
3269# endif
3270 e1kRaiseInterrupt(pThis, ICR_TXDW);
3271}
3272
3273/**
3274 * Transmit Absolute Delay Timer handler.
3275 *
3276 * @remarks We only get here when the timer expires.
3277 *
3278 * @param pDevIns Pointer to device instance structure.
3279 * @param pTimer Pointer to the timer.
3280 * @param pvUser NULL.
3281 * @thread EMT
3282 */
3283static DECLCALLBACK(void) e1kTxAbsDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3284{
3285 RT_NOREF(pDevIns);
3286 RT_NOREF(pTimer);
3287 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3288
3289 E1K_INC_ISTAT_CNT(pThis->uStatTAD);
3290 /* Cancel interrupt delay timer as we have already got attention */
3291 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTIDTimer));
3292 e1kRaiseInterrupt(pThis, ICR_TXDW);
3293}
3294
3295//# endif /* E1K_USE_TX_TIMERS */
3296# ifdef E1K_USE_RX_TIMERS
3297
3298/**
3299 * Receive Interrupt Delay Timer handler.
3300 *
3301 * @remarks We only get here when the timer expires.
3302 *
3303 * @param pDevIns Pointer to device instance structure.
3304 * @param pTimer Pointer to the timer.
3305 * @param pvUser NULL.
3306 * @thread EMT
3307 */
3308static DECLCALLBACK(void) e1kRxIntDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3309{
3310 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3311
3312 E1K_INC_ISTAT_CNT(pThis->uStatRID);
3313 /* Cancel absolute delay timer as we have already got attention */
3314 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRADTimer));
3315 e1kRaiseInterrupt(pThis, ICR_RXT0);
3316}
3317
3318/**
3319 * Receive Absolute Delay Timer handler.
3320 *
3321 * @remarks We only get here when the timer expires.
3322 *
3323 * @param pDevIns Pointer to device instance structure.
3324 * @param pTimer Pointer to the timer.
3325 * @param pvUser NULL.
3326 * @thread EMT
3327 */
3328static DECLCALLBACK(void) e1kRxAbsDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3329{
3330 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3331
3332 E1K_INC_ISTAT_CNT(pThis->uStatRAD);
3333 /* Cancel interrupt delay timer as we have already got attention */
3334 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRIDTimer));
3335 e1kRaiseInterrupt(pThis, ICR_RXT0);
3336}
3337
3338# endif /* E1K_USE_RX_TIMERS */
3339
3340/**
3341 * Late Interrupt Timer handler.
3342 *
3343 * @param pDevIns Pointer to device instance structure.
3344 * @param pTimer Pointer to the timer.
3345 * @param pvUser NULL.
3346 * @thread EMT
3347 */
3348static DECLCALLBACK(void) e1kLateIntTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3349{
3350 RT_NOREF(pDevIns, pTimer);
3351 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3352
3353 STAM_PROFILE_ADV_START(&pThis->StatLateIntTimer, a);
3354 STAM_COUNTER_INC(&pThis->StatLateInts);
3355 E1K_INC_ISTAT_CNT(pThis->uStatIntLate);
3356# if 0
3357 if (pThis->iStatIntLost > -100)
3358 pThis->iStatIntLost--;
3359# endif
3360 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, 0);
3361 STAM_PROFILE_ADV_STOP(&pThis->StatLateIntTimer, a);
3362}
3363
3364/**
3365 * Link Up Timer handler.
3366 *
3367 * @param pDevIns Pointer to device instance structure.
3368 * @param pTimer Pointer to the timer.
3369 * @param pvUser NULL.
3370 * @thread EMT
3371 */
3372static DECLCALLBACK(void) e1kLinkUpTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3373{
3374 RT_NOREF(pDevIns, pTimer);
3375 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3376
3377 /*
3378 * This can happen if we set the link status to down when the Link up timer was
3379 * already armed (shortly after e1kLoadDone() or when the cable was disconnected
3380 * and connect+disconnect the cable very quick.
3381 */
3382 if (!pThis->fCableConnected)
3383 return;
3384
3385 e1kR3LinkUp(pThis);
3386}
3387
3388#endif /* IN_RING3 */
3389
3390/**
3391 * Sets up the GSO context according to the TSE new context descriptor.
3392 *
3393 * @param pGso The GSO context to setup.
3394 * @param pCtx The context descriptor.
3395 */
3396DECLINLINE(void) e1kSetupGsoCtx(PPDMNETWORKGSO pGso, E1KTXCTX const *pCtx)
3397{
3398 pGso->u8Type = PDMNETWORKGSOTYPE_INVALID;
3399
3400 /*
3401 * See if the context descriptor describes something that could be TCP or
3402 * UDP over IPv[46].
3403 */
3404 /* Check the header ordering and spacing: 1. Ethernet, 2. IP, 3. TCP/UDP. */
3405 if (RT_UNLIKELY( pCtx->ip.u8CSS < sizeof(RTNETETHERHDR) ))
3406 {
3407 E1kLog(("e1kSetupGsoCtx: IPCSS=%#x\n", pCtx->ip.u8CSS));
3408 return;
3409 }
3410 if (RT_UNLIKELY( pCtx->tu.u8CSS < (size_t)pCtx->ip.u8CSS + (pCtx->dw2.fIP ? RTNETIPV4_MIN_LEN : RTNETIPV6_MIN_LEN) ))
3411 {
3412 E1kLog(("e1kSetupGsoCtx: TUCSS=%#x\n", pCtx->tu.u8CSS));
3413 return;
3414 }
3415 if (RT_UNLIKELY( pCtx->dw2.fTCP
3416 ? pCtx->dw3.u8HDRLEN < (size_t)pCtx->tu.u8CSS + RTNETTCP_MIN_LEN
3417 : pCtx->dw3.u8HDRLEN != (size_t)pCtx->tu.u8CSS + RTNETUDP_MIN_LEN ))
3418 {
3419 E1kLog(("e1kSetupGsoCtx: HDRLEN=%#x TCP=%d\n", pCtx->dw3.u8HDRLEN, pCtx->dw2.fTCP));
3420 return;
3421 }
3422
3423 /* The end of the TCP/UDP checksum should stop at the end of the packet or at least after the headers. */
3424 if (RT_UNLIKELY( pCtx->tu.u16CSE > 0 && pCtx->tu.u16CSE <= pCtx->dw3.u8HDRLEN ))
3425 {
3426 E1kLog(("e1kSetupGsoCtx: TUCSE=%#x HDRLEN=%#x\n", pCtx->tu.u16CSE, pCtx->dw3.u8HDRLEN));
3427 return;
3428 }
3429
3430 /* IPv4 checksum offset. */
3431 if (RT_UNLIKELY( pCtx->dw2.fIP && (size_t)pCtx->ip.u8CSO - pCtx->ip.u8CSS != RT_UOFFSETOF(RTNETIPV4, ip_sum) ))
3432 {
3433 E1kLog(("e1kSetupGsoCtx: IPCSO=%#x IPCSS=%#x\n", pCtx->ip.u8CSO, pCtx->ip.u8CSS));
3434 return;
3435 }
3436
3437 /* TCP/UDP checksum offsets. */
3438 if (RT_UNLIKELY( (size_t)pCtx->tu.u8CSO - pCtx->tu.u8CSS
3439 != ( pCtx->dw2.fTCP
3440 ? RT_UOFFSETOF(RTNETTCP, th_sum)
3441 : RT_UOFFSETOF(RTNETUDP, uh_sum) ) ))
3442 {
3443 E1kLog(("e1kSetupGsoCtx: TUCSO=%#x TUCSS=%#x TCP=%d\n", pCtx->ip.u8CSO, pCtx->ip.u8CSS, pCtx->dw2.fTCP));
3444 return;
3445 }
3446
3447 /*
3448 * Because of internal networking using a 16-bit size field for GSO context
3449 * plus frame, we have to make sure we don't exceed this.
3450 */
3451 if (RT_UNLIKELY( pCtx->dw3.u8HDRLEN + pCtx->dw2.u20PAYLEN > VBOX_MAX_GSO_SIZE ))
3452 {
3453 E1kLog(("e1kSetupGsoCtx: HDRLEN(=%#x) + PAYLEN(=%#x) = %#x, max is %#x\n",
3454 pCtx->dw3.u8HDRLEN, pCtx->dw2.u20PAYLEN, pCtx->dw3.u8HDRLEN + pCtx->dw2.u20PAYLEN, VBOX_MAX_GSO_SIZE));
3455 return;
3456 }
3457
3458 /*
3459 * We're good for now - we'll do more checks when seeing the data.
3460 * So, figure the type of offloading and setup the context.
3461 */
3462 if (pCtx->dw2.fIP)
3463 {
3464 if (pCtx->dw2.fTCP)
3465 {
3466 pGso->u8Type = PDMNETWORKGSOTYPE_IPV4_TCP;
3467 pGso->cbHdrsSeg = pCtx->dw3.u8HDRLEN;
3468 }
3469 else
3470 {
3471 pGso->u8Type = PDMNETWORKGSOTYPE_IPV4_UDP;
3472 pGso->cbHdrsSeg = pCtx->tu.u8CSS; /* IP header only */
3473 }
3474 /** @todo Detect IPv4-IPv6 tunneling (need test setup since linux doesn't do
3475 * this yet it seems)... */
3476 }
3477 else
3478 {
3479 pGso->cbHdrsSeg = pCtx->dw3.u8HDRLEN; /** @todo IPv6 UFO */
3480 if (pCtx->dw2.fTCP)
3481 pGso->u8Type = PDMNETWORKGSOTYPE_IPV6_TCP;
3482 else
3483 pGso->u8Type = PDMNETWORKGSOTYPE_IPV6_UDP;
3484 }
3485 pGso->offHdr1 = pCtx->ip.u8CSS;
3486 pGso->offHdr2 = pCtx->tu.u8CSS;
3487 pGso->cbHdrsTotal = pCtx->dw3.u8HDRLEN;
3488 pGso->cbMaxSeg = pCtx->dw3.u16MSS;
3489 Assert(PDMNetGsoIsValid(pGso, sizeof(*pGso), pGso->cbMaxSeg * 5));
3490 E1kLog2(("e1kSetupGsoCtx: mss=%#x hdr=%#x hdrseg=%#x hdr1=%#x hdr2=%#x %s\n",
3491 pGso->cbMaxSeg, pGso->cbHdrsTotal, pGso->cbHdrsSeg, pGso->offHdr1, pGso->offHdr2, PDMNetGsoTypeName((PDMNETWORKGSOTYPE)pGso->u8Type) ));
3492}
3493
3494/**
3495 * Checks if we can use GSO processing for the current TSE frame.
3496 *
3497 * @param pThis The device state structure.
3498 * @param pGso The GSO context.
3499 * @param pData The first data descriptor of the frame.
3500 * @param pCtx The TSO context descriptor.
3501 */
3502DECLINLINE(bool) e1kCanDoGso(PE1KSTATE pThis, PCPDMNETWORKGSO pGso, E1KTXDAT const *pData, E1KTXCTX const *pCtx)
3503{
3504 if (!pData->cmd.fTSE)
3505 {
3506 E1kLog2(("e1kCanDoGso: !TSE\n"));
3507 return false;
3508 }
3509 if (pData->cmd.fVLE) /** @todo VLAN tagging. */
3510 {
3511 E1kLog(("e1kCanDoGso: VLE\n"));
3512 return false;
3513 }
3514 if (RT_UNLIKELY(!pThis->fGSOEnabled))
3515 {
3516 E1kLog3(("e1kCanDoGso: GSO disabled via CFGM\n"));
3517 return false;
3518 }
3519
3520 switch ((PDMNETWORKGSOTYPE)pGso->u8Type)
3521 {
3522 case PDMNETWORKGSOTYPE_IPV4_TCP:
3523 case PDMNETWORKGSOTYPE_IPV4_UDP:
3524 if (!pData->dw3.fIXSM)
3525 {
3526 E1kLog(("e1kCanDoGso: !IXSM (IPv4)\n"));
3527 return false;
3528 }
3529 if (!pData->dw3.fTXSM)
3530 {
3531 E1kLog(("e1kCanDoGso: !TXSM (IPv4)\n"));
3532 return false;
3533 }
3534 /** @todo what more check should we perform here? Ethernet frame type? */
3535 E1kLog2(("e1kCanDoGso: OK, IPv4\n"));
3536 return true;
3537
3538 case PDMNETWORKGSOTYPE_IPV6_TCP:
3539 case PDMNETWORKGSOTYPE_IPV6_UDP:
3540 if (pData->dw3.fIXSM && pCtx->ip.u8CSO)
3541 {
3542 E1kLog(("e1kCanDoGso: IXSM (IPv6)\n"));
3543 return false;
3544 }
3545 if (!pData->dw3.fTXSM)
3546 {
3547 E1kLog(("e1kCanDoGso: TXSM (IPv6)\n"));
3548 return false;
3549 }
3550 /** @todo what more check should we perform here? Ethernet frame type? */
3551 E1kLog2(("e1kCanDoGso: OK, IPv4\n"));
3552 return true;
3553
3554 default:
3555 Assert(pGso->u8Type == PDMNETWORKGSOTYPE_INVALID);
3556 E1kLog2(("e1kCanDoGso: e1kSetupGsoCtx failed\n"));
3557 return false;
3558 }
3559}
3560
3561/**
3562 * Frees the current xmit buffer.
3563 *
3564 * @param pThis The device state structure.
3565 */
3566static void e1kXmitFreeBuf(PE1KSTATE pThis)
3567{
3568 PPDMSCATTERGATHER pSg = pThis->CTX_SUFF(pTxSg);
3569 if (pSg)
3570 {
3571 pThis->CTX_SUFF(pTxSg) = NULL;
3572
3573 if (pSg->pvAllocator != pThis)
3574 {
3575 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
3576 if (pDrv)
3577 pDrv->pfnFreeBuf(pDrv, pSg);
3578 }
3579 else
3580 {
3581 /* loopback */
3582 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3583 Assert(pSg->fFlags == (PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3));
3584 pSg->fFlags = 0;
3585 pSg->pvAllocator = NULL;
3586 }
3587 }
3588}
3589
3590#ifndef E1K_WITH_TXD_CACHE
3591/**
3592 * Allocates an xmit buffer.
3593 *
3594 * @returns See PDMINETWORKUP::pfnAllocBuf.
3595 * @param pThis The device state structure.
3596 * @param cbMin The minimum frame size.
3597 * @param fExactSize Whether cbMin is exact or if we have to max it
3598 * out to the max MTU size.
3599 * @param fGso Whether this is a GSO frame or not.
3600 */
3601DECLINLINE(int) e1kXmitAllocBuf(PE1KSTATE pThis, size_t cbMin, bool fExactSize, bool fGso)
3602{
3603 /* Adjust cbMin if necessary. */
3604 if (!fExactSize)
3605 cbMin = RT_MAX(cbMin, E1K_MAX_TX_PKT_SIZE);
3606
3607 /* Deal with existing buffer (descriptor screw up, reset, etc). */
3608 if (RT_UNLIKELY(pThis->CTX_SUFF(pTxSg)))
3609 e1kXmitFreeBuf(pThis);
3610 Assert(pThis->CTX_SUFF(pTxSg) == NULL);
3611
3612 /*
3613 * Allocate the buffer.
3614 */
3615 PPDMSCATTERGATHER pSg;
3616 if (RT_LIKELY(GET_BITS(RCTL, LBM) != RCTL_LBM_TCVR))
3617 {
3618 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
3619 if (RT_UNLIKELY(!pDrv))
3620 return VERR_NET_DOWN;
3621 int rc = pDrv->pfnAllocBuf(pDrv, cbMin, fGso ? &pThis->GsoCtx : NULL, &pSg);
3622 if (RT_FAILURE(rc))
3623 {
3624 /* Suspend TX as we are out of buffers atm */
3625 STATUS |= STATUS_TXOFF;
3626 return rc;
3627 }
3628 }
3629 else
3630 {
3631 /* Create a loopback using the fallback buffer and preallocated SG. */
3632 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3633 pSg = &pThis->uTxFallback.Sg;
3634 pSg->fFlags = PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3;
3635 pSg->cbUsed = 0;
3636 pSg->cbAvailable = 0;
3637 pSg->pvAllocator = pThis;
3638 pSg->pvUser = NULL; /* No GSO here. */
3639 pSg->cSegs = 1;
3640 pSg->aSegs[0].pvSeg = pThis->aTxPacketFallback;
3641 pSg->aSegs[0].cbSeg = sizeof(pThis->aTxPacketFallback);
3642 }
3643
3644 pThis->CTX_SUFF(pTxSg) = pSg;
3645 return VINF_SUCCESS;
3646}
3647#else /* E1K_WITH_TXD_CACHE */
3648/**
3649 * Allocates an xmit buffer.
3650 *
3651 * @returns See PDMINETWORKUP::pfnAllocBuf.
3652 * @param pThis The device state structure.
3653 * @param cbMin The minimum frame size.
3654 * @param fExactSize Whether cbMin is exact or if we have to max it
3655 * out to the max MTU size.
3656 * @param fGso Whether this is a GSO frame or not.
3657 */
3658DECLINLINE(int) e1kXmitAllocBuf(PE1KSTATE pThis, bool fGso)
3659{
3660 /* Deal with existing buffer (descriptor screw up, reset, etc). */
3661 if (RT_UNLIKELY(pThis->CTX_SUFF(pTxSg)))
3662 e1kXmitFreeBuf(pThis);
3663 Assert(pThis->CTX_SUFF(pTxSg) == NULL);
3664
3665 /*
3666 * Allocate the buffer.
3667 */
3668 PPDMSCATTERGATHER pSg;
3669 if (RT_LIKELY(GET_BITS(RCTL, LBM) != RCTL_LBM_TCVR))
3670 {
3671 if (pThis->cbTxAlloc == 0)
3672 {
3673 /* Zero packet, no need for the buffer */
3674 return VINF_SUCCESS;
3675 }
3676
3677 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
3678 if (RT_UNLIKELY(!pDrv))
3679 return VERR_NET_DOWN;
3680 int rc = pDrv->pfnAllocBuf(pDrv, pThis->cbTxAlloc, fGso ? &pThis->GsoCtx : NULL, &pSg);
3681 if (RT_FAILURE(rc))
3682 {
3683 /* Suspend TX as we are out of buffers atm */
3684 STATUS |= STATUS_TXOFF;
3685 return rc;
3686 }
3687 E1kLog3(("%s Allocated buffer for TX packet: cb=%u %s%s\n",
3688 pThis->szPrf, pThis->cbTxAlloc,
3689 pThis->fVTag ? "VLAN " : "",
3690 pThis->fGSO ? "GSO " : ""));
3691 pThis->cbTxAlloc = 0;
3692 }
3693 else
3694 {
3695 /* Create a loopback using the fallback buffer and preallocated SG. */
3696 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3697 pSg = &pThis->uTxFallback.Sg;
3698 pSg->fFlags = PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3;
3699 pSg->cbUsed = 0;
3700 pSg->cbAvailable = 0;
3701 pSg->pvAllocator = pThis;
3702 pSg->pvUser = NULL; /* No GSO here. */
3703 pSg->cSegs = 1;
3704 pSg->aSegs[0].pvSeg = pThis->aTxPacketFallback;
3705 pSg->aSegs[0].cbSeg = sizeof(pThis->aTxPacketFallback);
3706 }
3707
3708 pThis->CTX_SUFF(pTxSg) = pSg;
3709 return VINF_SUCCESS;
3710}
3711#endif /* E1K_WITH_TXD_CACHE */
3712
3713/**
3714 * Checks if it's a GSO buffer or not.
3715 *
3716 * @returns true / false.
3717 * @param pTxSg The scatter / gather buffer.
3718 */
3719DECLINLINE(bool) e1kXmitIsGsoBuf(PDMSCATTERGATHER const *pTxSg)
3720{
3721#if 0
3722 if (!pTxSg)
3723 E1kLog(("e1kXmitIsGsoBuf: pTxSG is NULL\n"));
3724 if (pTxSg && pTxSg->pvUser)
3725 E1kLog(("e1kXmitIsGsoBuf: pvUser is NULL\n"));
3726#endif
3727 return pTxSg && pTxSg->pvUser /* GSO indicator */;
3728}
3729
3730#ifndef E1K_WITH_TXD_CACHE
3731/**
3732 * Load transmit descriptor from guest memory.
3733 *
3734 * @param pThis The device state structure.
3735 * @param pDesc Pointer to descriptor union.
3736 * @param addr Physical address in guest context.
3737 * @thread E1000_TX
3738 */
3739DECLINLINE(void) e1kLoadDesc(PE1KSTATE pThis, E1KTXDESC *pDesc, RTGCPHYS addr)
3740{
3741 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), addr, pDesc, sizeof(E1KTXDESC));
3742}
3743#else /* E1K_WITH_TXD_CACHE */
3744/**
3745 * Load transmit descriptors from guest memory.
3746 *
3747 * We need two physical reads in case the tail wrapped around the end of TX
3748 * descriptor ring.
3749 *
3750 * @returns the actual number of descriptors fetched.
3751 * @param pThis The device state structure.
3752 * @param pDesc Pointer to descriptor union.
3753 * @param addr Physical address in guest context.
3754 * @thread E1000_TX
3755 */
3756DECLINLINE(unsigned) e1kTxDLoadMore(PE1KSTATE pThis)
3757{
3758 Assert(pThis->iTxDCurrent == 0);
3759 /* We've already loaded pThis->nTxDFetched descriptors past TDH. */
3760 unsigned nDescsAvailable = e1kGetTxLen(pThis) - pThis->nTxDFetched;
3761 unsigned nDescsToFetch = RT_MIN(nDescsAvailable, E1K_TXD_CACHE_SIZE - pThis->nTxDFetched);
3762 unsigned nDescsTotal = TDLEN / sizeof(E1KTXDESC);
3763 unsigned nFirstNotLoaded = (TDH + pThis->nTxDFetched) % nDescsTotal;
3764 unsigned nDescsInSingleRead = RT_MIN(nDescsToFetch, nDescsTotal - nFirstNotLoaded);
3765 E1kLog3(("%s e1kTxDLoadMore: nDescsAvailable=%u nDescsToFetch=%u "
3766 "nDescsTotal=%u nFirstNotLoaded=0x%x nDescsInSingleRead=%u\n",
3767 pThis->szPrf, nDescsAvailable, nDescsToFetch, nDescsTotal,
3768 nFirstNotLoaded, nDescsInSingleRead));
3769 if (nDescsToFetch == 0)
3770 return 0;
3771 E1KTXDESC* pFirstEmptyDesc = &pThis->aTxDescriptors[pThis->nTxDFetched];
3772 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns),
3773 ((uint64_t)TDBAH << 32) + TDBAL + nFirstNotLoaded * sizeof(E1KTXDESC),
3774 pFirstEmptyDesc, nDescsInSingleRead * sizeof(E1KTXDESC));
3775 E1kLog3(("%s Fetched %u TX descriptors at %08x%08x(0x%x), TDLEN=%08x, TDH=%08x, TDT=%08x\n",
3776 pThis->szPrf, nDescsInSingleRead,
3777 TDBAH, TDBAL + TDH * sizeof(E1KTXDESC),
3778 nFirstNotLoaded, TDLEN, TDH, TDT));
3779 if (nDescsToFetch > nDescsInSingleRead)
3780 {
3781 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns),
3782 ((uint64_t)TDBAH << 32) + TDBAL,
3783 pFirstEmptyDesc + nDescsInSingleRead,
3784 (nDescsToFetch - nDescsInSingleRead) * sizeof(E1KTXDESC));
3785 E1kLog3(("%s Fetched %u TX descriptors at %08x%08x\n",
3786 pThis->szPrf, nDescsToFetch - nDescsInSingleRead,
3787 TDBAH, TDBAL));
3788 }
3789 pThis->nTxDFetched += nDescsToFetch;
3790 return nDescsToFetch;
3791}
3792
3793/**
3794 * Load transmit descriptors from guest memory only if there are no loaded
3795 * descriptors.
3796 *
3797 * @returns true if there are descriptors in cache.
3798 * @param pThis The device state structure.
3799 * @param pDesc Pointer to descriptor union.
3800 * @param addr Physical address in guest context.
3801 * @thread E1000_TX
3802 */
3803DECLINLINE(bool) e1kTxDLazyLoad(PE1KSTATE pThis)
3804{
3805 if (pThis->nTxDFetched == 0)
3806 return e1kTxDLoadMore(pThis) != 0;
3807 return true;
3808}
3809#endif /* E1K_WITH_TXD_CACHE */
3810
3811/**
3812 * Write back transmit descriptor to guest memory.
3813 *
3814 * @param pThis The device state structure.
3815 * @param pDesc Pointer to descriptor union.
3816 * @param addr Physical address in guest context.
3817 * @thread E1000_TX
3818 */
3819DECLINLINE(void) e1kWriteBackDesc(PE1KSTATE pThis, E1KTXDESC *pDesc, RTGCPHYS addr)
3820{
3821 /* Only the last half of the descriptor has to be written back. */
3822 e1kPrintTDesc(pThis, pDesc, "^^^");
3823 PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns), addr, pDesc, sizeof(E1KTXDESC));
3824}
3825
3826/**
3827 * Transmit complete frame.
3828 *
3829 * @remarks We skip the FCS since we're not responsible for sending anything to
3830 * a real ethernet wire.
3831 *
3832 * @param pThis The device state structure.
3833 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
3834 * @thread E1000_TX
3835 */
3836static void e1kTransmitFrame(PE1KSTATE pThis, bool fOnWorkerThread)
3837{
3838 PPDMSCATTERGATHER pSg = pThis->CTX_SUFF(pTxSg);
3839 uint32_t cbFrame = pSg ? (uint32_t)pSg->cbUsed : 0;
3840 Assert(!pSg || pSg->cSegs == 1);
3841
3842 if (cbFrame > 70) /* unqualified guess */
3843 pThis->led.Asserted.s.fWriting = pThis->led.Actual.s.fWriting = 1;
3844
3845#ifdef E1K_INT_STATS
3846 if (cbFrame <= 1514)
3847 E1K_INC_ISTAT_CNT(pThis->uStatTx1514);
3848 else if (cbFrame <= 2962)
3849 E1K_INC_ISTAT_CNT(pThis->uStatTx2962);
3850 else if (cbFrame <= 4410)
3851 E1K_INC_ISTAT_CNT(pThis->uStatTx4410);
3852 else if (cbFrame <= 5858)
3853 E1K_INC_ISTAT_CNT(pThis->uStatTx5858);
3854 else if (cbFrame <= 7306)
3855 E1K_INC_ISTAT_CNT(pThis->uStatTx7306);
3856 else if (cbFrame <= 8754)
3857 E1K_INC_ISTAT_CNT(pThis->uStatTx8754);
3858 else if (cbFrame <= 16384)
3859 E1K_INC_ISTAT_CNT(pThis->uStatTx16384);
3860 else if (cbFrame <= 32768)
3861 E1K_INC_ISTAT_CNT(pThis->uStatTx32768);
3862 else
3863 E1K_INC_ISTAT_CNT(pThis->uStatTxLarge);
3864#endif /* E1K_INT_STATS */
3865
3866 /* Add VLAN tag */
3867 if (cbFrame > 12 && pThis->fVTag)
3868 {
3869 E1kLog3(("%s Inserting VLAN tag %08x\n",
3870 pThis->szPrf, RT_BE2H_U16(VET) | (RT_BE2H_U16(pThis->u16VTagTCI) << 16)));
3871 memmove((uint8_t*)pSg->aSegs[0].pvSeg + 16, (uint8_t*)pSg->aSegs[0].pvSeg + 12, cbFrame - 12);
3872 *((uint32_t*)pSg->aSegs[0].pvSeg + 3) = RT_BE2H_U16(VET) | (RT_BE2H_U16(pThis->u16VTagTCI) << 16);
3873 pSg->cbUsed += 4;
3874 cbFrame += 4;
3875 Assert(pSg->cbUsed == cbFrame);
3876 Assert(pSg->cbUsed <= pSg->cbAvailable);
3877 }
3878/* E1kLog2(("%s < < < Outgoing packet. Dump follows: > > >\n"
3879 "%.*Rhxd\n"
3880 "%s < < < < < < < < < < < < < End of dump > > > > > > > > > > > >\n",
3881 pThis->szPrf, cbFrame, pSg->aSegs[0].pvSeg, pThis->szPrf));*/
3882
3883 /* Update the stats */
3884 E1K_INC_CNT32(TPT);
3885 E1K_ADD_CNT64(TOTL, TOTH, cbFrame);
3886 E1K_INC_CNT32(GPTC);
3887 if (pSg && e1kIsBroadcast(pSg->aSegs[0].pvSeg))
3888 E1K_INC_CNT32(BPTC);
3889 else if (pSg && e1kIsMulticast(pSg->aSegs[0].pvSeg))
3890 E1K_INC_CNT32(MPTC);
3891 /* Update octet transmit counter */
3892 E1K_ADD_CNT64(GOTCL, GOTCH, cbFrame);
3893 if (pThis->CTX_SUFF(pDrv))
3894 STAM_REL_COUNTER_ADD(&pThis->StatTransmitBytes, cbFrame);
3895 if (cbFrame == 64)
3896 E1K_INC_CNT32(PTC64);
3897 else if (cbFrame < 128)
3898 E1K_INC_CNT32(PTC127);
3899 else if (cbFrame < 256)
3900 E1K_INC_CNT32(PTC255);
3901 else if (cbFrame < 512)
3902 E1K_INC_CNT32(PTC511);
3903 else if (cbFrame < 1024)
3904 E1K_INC_CNT32(PTC1023);
3905 else
3906 E1K_INC_CNT32(PTC1522);
3907
3908 E1K_INC_ISTAT_CNT(pThis->uStatTxFrm);
3909
3910 /*
3911 * Dump and send the packet.
3912 */
3913 int rc = VERR_NET_DOWN;
3914 if (pSg && pSg->pvAllocator != pThis)
3915 {
3916 e1kPacketDump(pThis, (uint8_t const *)pSg->aSegs[0].pvSeg, cbFrame, "--> Outgoing");
3917
3918 pThis->CTX_SUFF(pTxSg) = NULL;
3919 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
3920 if (pDrv)
3921 {
3922 /* Release critical section to avoid deadlock in CanReceive */
3923 //e1kCsLeave(pThis);
3924 STAM_PROFILE_START(&pThis->CTX_SUFF_Z(StatTransmitSend), a);
3925 rc = pDrv->pfnSendBuf(pDrv, pSg, fOnWorkerThread);
3926 STAM_PROFILE_STOP(&pThis->CTX_SUFF_Z(StatTransmitSend), a);
3927 //e1kCsEnter(pThis, RT_SRC_POS);
3928 }
3929 }
3930 else if (pSg)
3931 {
3932 Assert(pSg->aSegs[0].pvSeg == pThis->aTxPacketFallback);
3933 e1kPacketDump(pThis, (uint8_t const *)pSg->aSegs[0].pvSeg, cbFrame, "--> Loopback");
3934
3935 /** @todo do we actually need to check that we're in loopback mode here? */
3936 if (GET_BITS(RCTL, LBM) == RCTL_LBM_TCVR)
3937 {
3938 E1KRXDST status;
3939 RT_ZERO(status);
3940 status.fPIF = true;
3941 e1kHandleRxPacket(pThis, pSg->aSegs[0].pvSeg, cbFrame, status);
3942 rc = VINF_SUCCESS;
3943 }
3944 e1kXmitFreeBuf(pThis);
3945 }
3946 else
3947 rc = VERR_NET_DOWN;
3948 if (RT_FAILURE(rc))
3949 {
3950 E1kLogRel(("E1000: ERROR! pfnSend returned %Rrc\n", rc));
3951 /** @todo handle VERR_NET_DOWN and VERR_NET_NO_BUFFER_SPACE. Signal error ? */
3952 }
3953
3954 pThis->led.Actual.s.fWriting = 0;
3955}
3956
3957/**
3958 * Compute and write internet checksum (e1kCSum16) at the specified offset.
3959 *
3960 * @param pThis The device state structure.
3961 * @param pPkt Pointer to the packet.
3962 * @param u16PktLen Total length of the packet.
3963 * @param cso Offset in packet to write checksum at.
3964 * @param css Offset in packet to start computing
3965 * checksum from.
3966 * @param cse Offset in packet to stop computing
3967 * checksum at.
3968 * @thread E1000_TX
3969 */
3970static void e1kInsertChecksum(PE1KSTATE pThis, uint8_t *pPkt, uint16_t u16PktLen, uint8_t cso, uint8_t css, uint16_t cse)
3971{
3972 RT_NOREF1(pThis);
3973
3974 if (css >= u16PktLen)
3975 {
3976 E1kLog2(("%s css(%X) is greater than packet length-1(%X), checksum is not inserted\n",
3977 pThis->szPrf, cso, u16PktLen));
3978 return;
3979 }
3980
3981 if (cso >= u16PktLen - 1)
3982 {
3983 E1kLog2(("%s cso(%X) is greater than packet length-2(%X), checksum is not inserted\n",
3984 pThis->szPrf, cso, u16PktLen));
3985 return;
3986 }
3987
3988 if (cse == 0)
3989 cse = u16PktLen - 1;
3990 uint16_t u16ChkSum = e1kCSum16(pPkt + css, cse - css + 1);
3991 E1kLog2(("%s Inserting csum: %04X at %02X, old value: %04X\n", pThis->szPrf,
3992 u16ChkSum, cso, *(uint16_t*)(pPkt + cso)));
3993 *(uint16_t*)(pPkt + cso) = u16ChkSum;
3994}
3995
3996/**
3997 * Add a part of descriptor's buffer to transmit frame.
3998 *
3999 * @remarks data.u64BufAddr is used unconditionally for both data
4000 * and legacy descriptors since it is identical to
4001 * legacy.u64BufAddr.
4002 *
4003 * @param pThis The device state structure.
4004 * @param pDesc Pointer to the descriptor to transmit.
4005 * @param u16Len Length of buffer to the end of segment.
4006 * @param fSend Force packet sending.
4007 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4008 * @thread E1000_TX
4009 */
4010#ifndef E1K_WITH_TXD_CACHE
4011static void e1kFallbackAddSegment(PE1KSTATE pThis, RTGCPHYS PhysAddr, uint16_t u16Len, bool fSend, bool fOnWorkerThread)
4012{
4013 /* TCP header being transmitted */
4014 struct E1kTcpHeader *pTcpHdr = (struct E1kTcpHeader *)
4015 (pThis->aTxPacketFallback + pThis->contextTSE.tu.u8CSS);
4016 /* IP header being transmitted */
4017 struct E1kIpHeader *pIpHdr = (struct E1kIpHeader *)
4018 (pThis->aTxPacketFallback + pThis->contextTSE.ip.u8CSS);
4019
4020 E1kLog3(("%s e1kFallbackAddSegment: Length=%x, remaining payload=%x, header=%x, send=%RTbool\n",
4021 pThis->szPrf, u16Len, pThis->u32PayRemain, pThis->u16HdrRemain, fSend));
4022 Assert(pThis->u32PayRemain + pThis->u16HdrRemain > 0);
4023
4024 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), PhysAddr,
4025 pThis->aTxPacketFallback + pThis->u16TxPktLen, u16Len);
4026 E1kLog3(("%s Dump of the segment:\n"
4027 "%.*Rhxd\n"
4028 "%s --- End of dump ---\n",
4029 pThis->szPrf, u16Len, pThis->aTxPacketFallback + pThis->u16TxPktLen, pThis->szPrf));
4030 pThis->u16TxPktLen += u16Len;
4031 E1kLog3(("%s e1kFallbackAddSegment: pThis->u16TxPktLen=%x\n",
4032 pThis->szPrf, pThis->u16TxPktLen));
4033 if (pThis->u16HdrRemain > 0)
4034 {
4035 /* The header was not complete, check if it is now */
4036 if (u16Len >= pThis->u16HdrRemain)
4037 {
4038 /* The rest is payload */
4039 u16Len -= pThis->u16HdrRemain;
4040 pThis->u16HdrRemain = 0;
4041 /* Save partial checksum and flags */
4042 pThis->u32SavedCsum = pTcpHdr->chksum;
4043 pThis->u16SavedFlags = pTcpHdr->hdrlen_flags;
4044 /* Clear FIN and PSH flags now and set them only in the last segment */
4045 pTcpHdr->hdrlen_flags &= ~htons(E1K_TCP_FIN | E1K_TCP_PSH);
4046 }
4047 else
4048 {
4049 /* Still not */
4050 pThis->u16HdrRemain -= u16Len;
4051 E1kLog3(("%s e1kFallbackAddSegment: Header is still incomplete, 0x%x bytes remain.\n",
4052 pThis->szPrf, pThis->u16HdrRemain));
4053 return;
4054 }
4055 }
4056
4057 pThis->u32PayRemain -= u16Len;
4058
4059 if (fSend)
4060 {
4061 /* Leave ethernet header intact */
4062 /* IP Total Length = payload + headers - ethernet header */
4063 pIpHdr->total_len = htons(pThis->u16TxPktLen - pThis->contextTSE.ip.u8CSS);
4064 E1kLog3(("%s e1kFallbackAddSegment: End of packet, pIpHdr->total_len=%x\n",
4065 pThis->szPrf, ntohs(pIpHdr->total_len)));
4066 /* Update IP Checksum */
4067 pIpHdr->chksum = 0;
4068 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4069 pThis->contextTSE.ip.u8CSO,
4070 pThis->contextTSE.ip.u8CSS,
4071 pThis->contextTSE.ip.u16CSE);
4072
4073 /* Update TCP flags */
4074 /* Restore original FIN and PSH flags for the last segment */
4075 if (pThis->u32PayRemain == 0)
4076 {
4077 pTcpHdr->hdrlen_flags = pThis->u16SavedFlags;
4078 E1K_INC_CNT32(TSCTC);
4079 }
4080 /* Add TCP length to partial pseudo header sum */
4081 uint32_t csum = pThis->u32SavedCsum
4082 + htons(pThis->u16TxPktLen - pThis->contextTSE.tu.u8CSS);
4083 while (csum >> 16)
4084 csum = (csum >> 16) + (csum & 0xFFFF);
4085 pTcpHdr->chksum = csum;
4086 /* Compute final checksum */
4087 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4088 pThis->contextTSE.tu.u8CSO,
4089 pThis->contextTSE.tu.u8CSS,
4090 pThis->contextTSE.tu.u16CSE);
4091
4092 /*
4093 * Transmit it. If we've use the SG already, allocate a new one before
4094 * we copy of the data.
4095 */
4096 if (!pThis->CTX_SUFF(pTxSg))
4097 e1kXmitAllocBuf(pThis, pThis->u16TxPktLen + (pThis->fVTag ? 4 : 0), true /*fExactSize*/, false /*fGso*/);
4098 if (pThis->CTX_SUFF(pTxSg))
4099 {
4100 Assert(pThis->u16TxPktLen <= pThis->CTX_SUFF(pTxSg)->cbAvailable);
4101 Assert(pThis->CTX_SUFF(pTxSg)->cSegs == 1);
4102 if (pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg != pThis->aTxPacketFallback)
4103 memcpy(pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->aTxPacketFallback, pThis->u16TxPktLen);
4104 pThis->CTX_SUFF(pTxSg)->cbUsed = pThis->u16TxPktLen;
4105 pThis->CTX_SUFF(pTxSg)->aSegs[0].cbSeg = pThis->u16TxPktLen;
4106 }
4107 e1kTransmitFrame(pThis, fOnWorkerThread);
4108
4109 /* Update Sequence Number */
4110 pTcpHdr->seqno = htonl(ntohl(pTcpHdr->seqno) + pThis->u16TxPktLen
4111 - pThis->contextTSE.dw3.u8HDRLEN);
4112 /* Increment IP identification */
4113 pIpHdr->ident = htons(ntohs(pIpHdr->ident) + 1);
4114 }
4115}
4116#else /* E1K_WITH_TXD_CACHE */
4117static int e1kFallbackAddSegment(PE1KSTATE pThis, RTGCPHYS PhysAddr, uint16_t u16Len, bool fSend, bool fOnWorkerThread)
4118{
4119 int rc = VINF_SUCCESS;
4120 /* TCP header being transmitted */
4121 struct E1kTcpHeader *pTcpHdr = (struct E1kTcpHeader *)
4122 (pThis->aTxPacketFallback + pThis->contextTSE.tu.u8CSS);
4123 /* IP header being transmitted */
4124 struct E1kIpHeader *pIpHdr = (struct E1kIpHeader *)
4125 (pThis->aTxPacketFallback + pThis->contextTSE.ip.u8CSS);
4126
4127 E1kLog3(("%s e1kFallbackAddSegment: Length=%x, remaining payload=%x, header=%x, send=%RTbool\n",
4128 pThis->szPrf, u16Len, pThis->u32PayRemain, pThis->u16HdrRemain, fSend));
4129 Assert(pThis->u32PayRemain + pThis->u16HdrRemain > 0);
4130
4131 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), PhysAddr,
4132 pThis->aTxPacketFallback + pThis->u16TxPktLen, u16Len);
4133 E1kLog3(("%s Dump of the segment:\n"
4134 "%.*Rhxd\n"
4135 "%s --- End of dump ---\n",
4136 pThis->szPrf, u16Len, pThis->aTxPacketFallback + pThis->u16TxPktLen, pThis->szPrf));
4137 pThis->u16TxPktLen += u16Len;
4138 E1kLog3(("%s e1kFallbackAddSegment: pThis->u16TxPktLen=%x\n",
4139 pThis->szPrf, pThis->u16TxPktLen));
4140 if (pThis->u16HdrRemain > 0)
4141 {
4142 /* The header was not complete, check if it is now */
4143 if (u16Len >= pThis->u16HdrRemain)
4144 {
4145 /* The rest is payload */
4146 u16Len -= pThis->u16HdrRemain;
4147 pThis->u16HdrRemain = 0;
4148 /* Save partial checksum and flags */
4149 pThis->u32SavedCsum = pTcpHdr->chksum;
4150 pThis->u16SavedFlags = pTcpHdr->hdrlen_flags;
4151 /* Clear FIN and PSH flags now and set them only in the last segment */
4152 pTcpHdr->hdrlen_flags &= ~htons(E1K_TCP_FIN | E1K_TCP_PSH);
4153 }
4154 else
4155 {
4156 /* Still not */
4157 pThis->u16HdrRemain -= u16Len;
4158 E1kLog3(("%s e1kFallbackAddSegment: Header is still incomplete, 0x%x bytes remain.\n",
4159 pThis->szPrf, pThis->u16HdrRemain));
4160 return rc;
4161 }
4162 }
4163
4164 pThis->u32PayRemain -= u16Len;
4165
4166 if (fSend)
4167 {
4168 /* Leave ethernet header intact */
4169 /* IP Total Length = payload + headers - ethernet header */
4170 pIpHdr->total_len = htons(pThis->u16TxPktLen - pThis->contextTSE.ip.u8CSS);
4171 E1kLog3(("%s e1kFallbackAddSegment: End of packet, pIpHdr->total_len=%x\n",
4172 pThis->szPrf, ntohs(pIpHdr->total_len)));
4173 /* Update IP Checksum */
4174 pIpHdr->chksum = 0;
4175 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4176 pThis->contextTSE.ip.u8CSO,
4177 pThis->contextTSE.ip.u8CSS,
4178 pThis->contextTSE.ip.u16CSE);
4179
4180 /* Update TCP flags */
4181 /* Restore original FIN and PSH flags for the last segment */
4182 if (pThis->u32PayRemain == 0)
4183 {
4184 pTcpHdr->hdrlen_flags = pThis->u16SavedFlags;
4185 E1K_INC_CNT32(TSCTC);
4186 }
4187 /* Add TCP length to partial pseudo header sum */
4188 uint32_t csum = pThis->u32SavedCsum
4189 + htons(pThis->u16TxPktLen - pThis->contextTSE.tu.u8CSS);
4190 while (csum >> 16)
4191 csum = (csum >> 16) + (csum & 0xFFFF);
4192 pTcpHdr->chksum = csum;
4193 /* Compute final checksum */
4194 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4195 pThis->contextTSE.tu.u8CSO,
4196 pThis->contextTSE.tu.u8CSS,
4197 pThis->contextTSE.tu.u16CSE);
4198
4199 /*
4200 * Transmit it.
4201 */
4202 if (pThis->CTX_SUFF(pTxSg))
4203 {
4204 Assert(pThis->u16TxPktLen <= pThis->CTX_SUFF(pTxSg)->cbAvailable);
4205 Assert(pThis->CTX_SUFF(pTxSg)->cSegs == 1);
4206 if (pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg != pThis->aTxPacketFallback)
4207 memcpy(pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->aTxPacketFallback, pThis->u16TxPktLen);
4208 pThis->CTX_SUFF(pTxSg)->cbUsed = pThis->u16TxPktLen;
4209 pThis->CTX_SUFF(pTxSg)->aSegs[0].cbSeg = pThis->u16TxPktLen;
4210 }
4211 e1kTransmitFrame(pThis, fOnWorkerThread);
4212
4213 /* Update Sequence Number */
4214 pTcpHdr->seqno = htonl(ntohl(pTcpHdr->seqno) + pThis->u16TxPktLen
4215 - pThis->contextTSE.dw3.u8HDRLEN);
4216 /* Increment IP identification */
4217 pIpHdr->ident = htons(ntohs(pIpHdr->ident) + 1);
4218
4219 /* Allocate new buffer for the next segment. */
4220 if (pThis->u32PayRemain)
4221 {
4222 pThis->cbTxAlloc = RT_MIN(pThis->u32PayRemain,
4223 pThis->contextTSE.dw3.u16MSS)
4224 + pThis->contextTSE.dw3.u8HDRLEN
4225 + (pThis->fVTag ? 4 : 0);
4226 rc = e1kXmitAllocBuf(pThis, false /* fGSO */);
4227 }
4228 }
4229
4230 return rc;
4231}
4232#endif /* E1K_WITH_TXD_CACHE */
4233
4234#ifndef E1K_WITH_TXD_CACHE
4235/**
4236 * TCP segmentation offloading fallback: Add descriptor's buffer to transmit
4237 * frame.
4238 *
4239 * We construct the frame in the fallback buffer first and the copy it to the SG
4240 * buffer before passing it down to the network driver code.
4241 *
4242 * @returns true if the frame should be transmitted, false if not.
4243 *
4244 * @param pThis The device state structure.
4245 * @param pDesc Pointer to the descriptor to transmit.
4246 * @param cbFragment Length of descriptor's buffer.
4247 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4248 * @thread E1000_TX
4249 */
4250static bool e1kFallbackAddToFrame(PE1KSTATE pThis, E1KTXDESC *pDesc, uint32_t cbFragment, bool fOnWorkerThread)
4251{
4252 PPDMSCATTERGATHER pTxSg = pThis->CTX_SUFF(pTxSg);
4253 Assert(e1kGetDescType(pDesc) == E1K_DTYP_DATA);
4254 Assert(pDesc->data.cmd.fTSE);
4255 Assert(!e1kXmitIsGsoBuf(pTxSg));
4256
4257 uint16_t u16MaxPktLen = pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw3.u16MSS;
4258 Assert(u16MaxPktLen != 0);
4259 Assert(u16MaxPktLen < E1K_MAX_TX_PKT_SIZE);
4260
4261 /*
4262 * Carve out segments.
4263 */
4264 do
4265 {
4266 /* Calculate how many bytes we have left in this TCP segment */
4267 uint32_t cb = u16MaxPktLen - pThis->u16TxPktLen;
4268 if (cb > cbFragment)
4269 {
4270 /* This descriptor fits completely into current segment */
4271 cb = cbFragment;
4272 e1kFallbackAddSegment(pThis, pDesc->data.u64BufAddr, cb, pDesc->data.cmd.fEOP /*fSend*/, fOnWorkerThread);
4273 }
4274 else
4275 {
4276 e1kFallbackAddSegment(pThis, pDesc->data.u64BufAddr, cb, true /*fSend*/, fOnWorkerThread);
4277 /*
4278 * Rewind the packet tail pointer to the beginning of payload,
4279 * so we continue writing right beyond the header.
4280 */
4281 pThis->u16TxPktLen = pThis->contextTSE.dw3.u8HDRLEN;
4282 }
4283
4284 pDesc->data.u64BufAddr += cb;
4285 cbFragment -= cb;
4286 } while (cbFragment > 0);
4287
4288 if (pDesc->data.cmd.fEOP)
4289 {
4290 /* End of packet, next segment will contain header. */
4291 if (pThis->u32PayRemain != 0)
4292 E1K_INC_CNT32(TSCTFC);
4293 pThis->u16TxPktLen = 0;
4294 e1kXmitFreeBuf(pThis);
4295 }
4296
4297 return false;
4298}
4299#else /* E1K_WITH_TXD_CACHE */
4300/**
4301 * TCP segmentation offloading fallback: Add descriptor's buffer to transmit
4302 * frame.
4303 *
4304 * We construct the frame in the fallback buffer first and the copy it to the SG
4305 * buffer before passing it down to the network driver code.
4306 *
4307 * @returns error code
4308 *
4309 * @param pThis The device state structure.
4310 * @param pDesc Pointer to the descriptor to transmit.
4311 * @param cbFragment Length of descriptor's buffer.
4312 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4313 * @thread E1000_TX
4314 */
4315static int e1kFallbackAddToFrame(PE1KSTATE pThis, E1KTXDESC *pDesc, bool fOnWorkerThread)
4316{
4317#ifdef VBOX_STRICT
4318 PPDMSCATTERGATHER pTxSg = pThis->CTX_SUFF(pTxSg);
4319 Assert(e1kGetDescType(pDesc) == E1K_DTYP_DATA);
4320 Assert(pDesc->data.cmd.fTSE);
4321 Assert(!e1kXmitIsGsoBuf(pTxSg));
4322#endif
4323
4324 uint16_t u16MaxPktLen = pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw3.u16MSS;
4325 Assert(u16MaxPktLen != 0);
4326 Assert(u16MaxPktLen < E1K_MAX_TX_PKT_SIZE);
4327
4328 /*
4329 * Carve out segments.
4330 */
4331 int rc;
4332 do
4333 {
4334 /* Calculate how many bytes we have left in this TCP segment */
4335 uint32_t cb = u16MaxPktLen - pThis->u16TxPktLen;
4336 if (cb > pDesc->data.cmd.u20DTALEN)
4337 {
4338 /* This descriptor fits completely into current segment */
4339 cb = pDesc->data.cmd.u20DTALEN;
4340 rc = e1kFallbackAddSegment(pThis, pDesc->data.u64BufAddr, cb, pDesc->data.cmd.fEOP /*fSend*/, fOnWorkerThread);
4341 }
4342 else
4343 {
4344 rc = e1kFallbackAddSegment(pThis, pDesc->data.u64BufAddr, cb, true /*fSend*/, fOnWorkerThread);
4345 /*
4346 * Rewind the packet tail pointer to the beginning of payload,
4347 * so we continue writing right beyond the header.
4348 */
4349 pThis->u16TxPktLen = pThis->contextTSE.dw3.u8HDRLEN;
4350 }
4351
4352 pDesc->data.u64BufAddr += cb;
4353 pDesc->data.cmd.u20DTALEN -= cb;
4354 } while (pDesc->data.cmd.u20DTALEN > 0 && RT_SUCCESS(rc));
4355
4356 if (pDesc->data.cmd.fEOP)
4357 {
4358 /* End of packet, next segment will contain header. */
4359 if (pThis->u32PayRemain != 0)
4360 E1K_INC_CNT32(TSCTFC);
4361 pThis->u16TxPktLen = 0;
4362 e1kXmitFreeBuf(pThis);
4363 }
4364
4365 return false;
4366}
4367#endif /* E1K_WITH_TXD_CACHE */
4368
4369
4370/**
4371 * Add descriptor's buffer to transmit frame.
4372 *
4373 * This deals with GSO and normal frames, e1kFallbackAddToFrame deals with the
4374 * TSE frames we cannot handle as GSO.
4375 *
4376 * @returns true on success, false on failure.
4377 *
4378 * @param pThis The device state structure.
4379 * @param PhysAddr The physical address of the descriptor buffer.
4380 * @param cbFragment Length of descriptor's buffer.
4381 * @thread E1000_TX
4382 */
4383static bool e1kAddToFrame(PE1KSTATE pThis, RTGCPHYS PhysAddr, uint32_t cbFragment)
4384{
4385 PPDMSCATTERGATHER pTxSg = pThis->CTX_SUFF(pTxSg);
4386 bool const fGso = e1kXmitIsGsoBuf(pTxSg);
4387 uint32_t const cbNewPkt = cbFragment + pThis->u16TxPktLen;
4388
4389 if (RT_UNLIKELY( !fGso && cbNewPkt > E1K_MAX_TX_PKT_SIZE ))
4390 {
4391 E1kLog(("%s Transmit packet is too large: %u > %u(max)\n", pThis->szPrf, cbNewPkt, E1K_MAX_TX_PKT_SIZE));
4392 return false;
4393 }
4394 if (RT_UNLIKELY( fGso && cbNewPkt > pTxSg->cbAvailable ))
4395 {
4396 E1kLog(("%s Transmit packet is too large: %u > %u(max)/GSO\n", pThis->szPrf, cbNewPkt, pTxSg->cbAvailable));
4397 return false;
4398 }
4399
4400 if (RT_LIKELY(pTxSg))
4401 {
4402 Assert(pTxSg->cSegs == 1);
4403 Assert(pTxSg->cbUsed == pThis->u16TxPktLen);
4404
4405 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), PhysAddr,
4406 (uint8_t *)pTxSg->aSegs[0].pvSeg + pThis->u16TxPktLen, cbFragment);
4407
4408 pTxSg->cbUsed = cbNewPkt;
4409 }
4410 pThis->u16TxPktLen = cbNewPkt;
4411
4412 return true;
4413}
4414
4415
4416/**
4417 * Write the descriptor back to guest memory and notify the guest.
4418 *
4419 * @param pThis The device state structure.
4420 * @param pDesc Pointer to the descriptor have been transmitted.
4421 * @param addr Physical address of the descriptor in guest memory.
4422 * @thread E1000_TX
4423 */
4424static void e1kDescReport(PE1KSTATE pThis, E1KTXDESC *pDesc, RTGCPHYS addr)
4425{
4426 /*
4427 * We fake descriptor write-back bursting. Descriptors are written back as they are
4428 * processed.
4429 */
4430 /* Let's pretend we process descriptors. Write back with DD set. */
4431 /*
4432 * Prior to r71586 we tried to accomodate the case when write-back bursts
4433 * are enabled without actually implementing bursting by writing back all
4434 * descriptors, even the ones that do not have RS set. This caused kernel
4435 * panics with Linux SMP kernels, as the e1000 driver tried to free up skb
4436 * associated with written back descriptor if it happened to be a context
4437 * descriptor since context descriptors do not have skb associated to them.
4438 * Starting from r71586 we write back only the descriptors with RS set,
4439 * which is a little bit different from what the real hardware does in
4440 * case there is a chain of data descritors where some of them have RS set
4441 * and others do not. It is very uncommon scenario imho.
4442 * We need to check RPS as well since some legacy drivers use it instead of
4443 * RS even with newer cards.
4444 */
4445 if (pDesc->legacy.cmd.fRS || pDesc->legacy.cmd.fRPS)
4446 {
4447 pDesc->legacy.dw3.fDD = 1; /* Descriptor Done */
4448 e1kWriteBackDesc(pThis, pDesc, addr);
4449 if (pDesc->legacy.cmd.fEOP)
4450 {
4451//#ifdef E1K_USE_TX_TIMERS
4452 if (pThis->fTidEnabled && pDesc->legacy.cmd.fIDE)
4453 {
4454 E1K_INC_ISTAT_CNT(pThis->uStatTxIDE);
4455 //if (pThis->fIntRaised)
4456 //{
4457 // /* Interrupt is already pending, no need for timers */
4458 // ICR |= ICR_TXDW;
4459 //}
4460 //else {
4461 /* Arm the timer to fire in TIVD usec (discard .024) */
4462 e1kArmTimer(pThis, pThis->CTX_SUFF(pTIDTimer), TIDV);
4463# ifndef E1K_NO_TAD
4464 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
4465 E1kLog2(("%s Checking if TAD timer is running\n",
4466 pThis->szPrf));
4467 if (TADV != 0 && !TMTimerIsActive(pThis->CTX_SUFF(pTADTimer)))
4468 e1kArmTimer(pThis, pThis->CTX_SUFF(pTADTimer), TADV);
4469# endif /* E1K_NO_TAD */
4470 }
4471 else
4472 {
4473 if (pThis->fTidEnabled)
4474 {
4475 E1kLog2(("%s No IDE set, cancel TAD timer and raise interrupt\n",
4476 pThis->szPrf));
4477 /* Cancel both timers if armed and fire immediately. */
4478# ifndef E1K_NO_TAD
4479 TMTimerStop(pThis->CTX_SUFF(pTADTimer));
4480# endif
4481 TMTimerStop(pThis->CTX_SUFF(pTIDTimer));
4482 }
4483//#endif /* E1K_USE_TX_TIMERS */
4484 E1K_INC_ISTAT_CNT(pThis->uStatIntTx);
4485 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_TXDW);
4486//#ifdef E1K_USE_TX_TIMERS
4487 }
4488//#endif /* E1K_USE_TX_TIMERS */
4489 }
4490 }
4491 else
4492 {
4493 E1K_INC_ISTAT_CNT(pThis->uStatTxNoRS);
4494 }
4495}
4496
4497#ifndef E1K_WITH_TXD_CACHE
4498
4499/**
4500 * Process Transmit Descriptor.
4501 *
4502 * E1000 supports three types of transmit descriptors:
4503 * - legacy data descriptors of older format (context-less).
4504 * - data the same as legacy but providing new offloading capabilities.
4505 * - context sets up the context for following data descriptors.
4506 *
4507 * @param pThis The device state structure.
4508 * @param pDesc Pointer to descriptor union.
4509 * @param addr Physical address of descriptor in guest memory.
4510 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4511 * @thread E1000_TX
4512 */
4513static int e1kXmitDesc(PE1KSTATE pThis, E1KTXDESC *pDesc, RTGCPHYS addr, bool fOnWorkerThread)
4514{
4515 int rc = VINF_SUCCESS;
4516 uint32_t cbVTag = 0;
4517
4518 e1kPrintTDesc(pThis, pDesc, "vvv");
4519
4520//#ifdef E1K_USE_TX_TIMERS
4521 if (pThis->fTidEnabled)
4522 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTIDTimer));
4523//#endif /* E1K_USE_TX_TIMERS */
4524
4525 switch (e1kGetDescType(pDesc))
4526 {
4527 case E1K_DTYP_CONTEXT:
4528 if (pDesc->context.dw2.fTSE)
4529 {
4530 pThis->contextTSE = pDesc->context;
4531 pThis->u32PayRemain = pDesc->context.dw2.u20PAYLEN;
4532 pThis->u16HdrRemain = pDesc->context.dw3.u8HDRLEN;
4533 e1kSetupGsoCtx(&pThis->GsoCtx, &pDesc->context);
4534 STAM_COUNTER_INC(&pThis->StatTxDescCtxTSE);
4535 }
4536 else
4537 {
4538 pThis->contextNormal = pDesc->context;
4539 STAM_COUNTER_INC(&pThis->StatTxDescCtxNormal);
4540 }
4541 E1kLog2(("%s %s context updated: IP CSS=%02X, IP CSO=%02X, IP CSE=%04X"
4542 ", TU CSS=%02X, TU CSO=%02X, TU CSE=%04X\n", pThis->szPrf,
4543 pDesc->context.dw2.fTSE ? "TSE" : "Normal",
4544 pDesc->context.ip.u8CSS,
4545 pDesc->context.ip.u8CSO,
4546 pDesc->context.ip.u16CSE,
4547 pDesc->context.tu.u8CSS,
4548 pDesc->context.tu.u8CSO,
4549 pDesc->context.tu.u16CSE));
4550 E1K_INC_ISTAT_CNT(pThis->uStatDescCtx);
4551 e1kDescReport(pThis, pDesc, addr);
4552 break;
4553
4554 case E1K_DTYP_DATA:
4555 {
4556 if (pDesc->data.cmd.u20DTALEN == 0 || pDesc->data.u64BufAddr == 0)
4557 {
4558 E1kLog2(("% Empty data descriptor, skipped.\n", pThis->szPrf));
4559 /** @todo Same as legacy when !TSE. See below. */
4560 break;
4561 }
4562 STAM_COUNTER_INC(pDesc->data.cmd.fTSE?
4563 &pThis->StatTxDescTSEData:
4564 &pThis->StatTxDescData);
4565 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
4566 E1K_INC_ISTAT_CNT(pThis->uStatDescDat);
4567
4568 /*
4569 * The last descriptor of non-TSE packet must contain VLE flag.
4570 * TSE packets have VLE flag in the first descriptor. The later
4571 * case is taken care of a bit later when cbVTag gets assigned.
4572 *
4573 * 1) pDesc->data.cmd.fEOP && !pDesc->data.cmd.fTSE
4574 */
4575 if (pDesc->data.cmd.fEOP && !pDesc->data.cmd.fTSE)
4576 {
4577 pThis->fVTag = pDesc->data.cmd.fVLE;
4578 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
4579 }
4580 /*
4581 * First fragment: Allocate new buffer and save the IXSM and TXSM
4582 * packet options as these are only valid in the first fragment.
4583 */
4584 if (pThis->u16TxPktLen == 0)
4585 {
4586 pThis->fIPcsum = pDesc->data.dw3.fIXSM;
4587 pThis->fTCPcsum = pDesc->data.dw3.fTXSM;
4588 E1kLog2(("%s Saving checksum flags:%s%s; \n", pThis->szPrf,
4589 pThis->fIPcsum ? " IP" : "",
4590 pThis->fTCPcsum ? " TCP/UDP" : ""));
4591 if (pDesc->data.cmd.fTSE)
4592 {
4593 /* 2) pDesc->data.cmd.fTSE && pThis->u16TxPktLen == 0 */
4594 pThis->fVTag = pDesc->data.cmd.fVLE;
4595 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
4596 cbVTag = pThis->fVTag ? 4 : 0;
4597 }
4598 else if (pDesc->data.cmd.fEOP)
4599 cbVTag = pDesc->data.cmd.fVLE ? 4 : 0;
4600 else
4601 cbVTag = 4;
4602 E1kLog3(("%s About to allocate TX buffer: cbVTag=%u\n", pThis->szPrf, cbVTag));
4603 if (e1kCanDoGso(pThis, &pThis->GsoCtx, &pDesc->data, &pThis->contextTSE))
4604 rc = e1kXmitAllocBuf(pThis, pThis->contextTSE.dw2.u20PAYLEN + pThis->contextTSE.dw3.u8HDRLEN + cbVTag,
4605 true /*fExactSize*/, true /*fGso*/);
4606 else if (pDesc->data.cmd.fTSE)
4607 rc = e1kXmitAllocBuf(pThis, pThis->contextTSE.dw3.u16MSS + pThis->contextTSE.dw3.u8HDRLEN + cbVTag,
4608 pDesc->data.cmd.fTSE /*fExactSize*/, false /*fGso*/);
4609 else
4610 rc = e1kXmitAllocBuf(pThis, pDesc->data.cmd.u20DTALEN + cbVTag,
4611 pDesc->data.cmd.fEOP /*fExactSize*/, false /*fGso*/);
4612
4613 /**
4614 * @todo: Perhaps it is not that simple for GSO packets! We may
4615 * need to unwind some changes.
4616 */
4617 if (RT_FAILURE(rc))
4618 {
4619 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4620 break;
4621 }
4622 /** @todo Is there any way to indicating errors other than collisions? Like
4623 * VERR_NET_DOWN. */
4624 }
4625
4626 /*
4627 * Add the descriptor data to the frame. If the frame is complete,
4628 * transmit it and reset the u16TxPktLen field.
4629 */
4630 if (e1kXmitIsGsoBuf(pThis->CTX_SUFF(pTxSg)))
4631 {
4632 STAM_COUNTER_INC(&pThis->StatTxPathGSO);
4633 bool fRc = e1kAddToFrame(pThis, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4634 if (pDesc->data.cmd.fEOP)
4635 {
4636 if ( fRc
4637 && pThis->CTX_SUFF(pTxSg)
4638 && pThis->CTX_SUFF(pTxSg)->cbUsed == (size_t)pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN)
4639 {
4640 e1kTransmitFrame(pThis, fOnWorkerThread);
4641 E1K_INC_CNT32(TSCTC);
4642 }
4643 else
4644 {
4645 if (fRc)
4646 E1kLog(("%s bad GSO/TSE %p or %u < %u\n" , pThis->szPrf,
4647 pThis->CTX_SUFF(pTxSg), pThis->CTX_SUFF(pTxSg) ? pThis->CTX_SUFF(pTxSg)->cbUsed : 0,
4648 pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN));
4649 e1kXmitFreeBuf(pThis);
4650 E1K_INC_CNT32(TSCTFC);
4651 }
4652 pThis->u16TxPktLen = 0;
4653 }
4654 }
4655 else if (!pDesc->data.cmd.fTSE)
4656 {
4657 STAM_COUNTER_INC(&pThis->StatTxPathRegular);
4658 bool fRc = e1kAddToFrame(pThis, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4659 if (pDesc->data.cmd.fEOP)
4660 {
4661 if (fRc && pThis->CTX_SUFF(pTxSg))
4662 {
4663 Assert(pThis->CTX_SUFF(pTxSg)->cSegs == 1);
4664 if (pThis->fIPcsum)
4665 e1kInsertChecksum(pThis, (uint8_t *)pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
4666 pThis->contextNormal.ip.u8CSO,
4667 pThis->contextNormal.ip.u8CSS,
4668 pThis->contextNormal.ip.u16CSE);
4669 if (pThis->fTCPcsum)
4670 e1kInsertChecksum(pThis, (uint8_t *)pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
4671 pThis->contextNormal.tu.u8CSO,
4672 pThis->contextNormal.tu.u8CSS,
4673 pThis->contextNormal.tu.u16CSE);
4674 e1kTransmitFrame(pThis, fOnWorkerThread);
4675 }
4676 else
4677 e1kXmitFreeBuf(pThis);
4678 pThis->u16TxPktLen = 0;
4679 }
4680 }
4681 else
4682 {
4683 STAM_COUNTER_INC(&pThis->StatTxPathFallback);
4684 e1kFallbackAddToFrame(pThis, pDesc, pDesc->data.cmd.u20DTALEN, fOnWorkerThread);
4685 }
4686
4687 e1kDescReport(pThis, pDesc, addr);
4688 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4689 break;
4690 }
4691
4692 case E1K_DTYP_LEGACY:
4693 if (pDesc->legacy.cmd.u16Length == 0 || pDesc->legacy.u64BufAddr == 0)
4694 {
4695 E1kLog(("%s Empty legacy descriptor, skipped.\n", pThis->szPrf));
4696 /** @todo 3.3.3, Length/Buffer Address: RS set -> write DD when processing. */
4697 break;
4698 }
4699 STAM_COUNTER_INC(&pThis->StatTxDescLegacy);
4700 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
4701
4702 /* First fragment: allocate new buffer. */
4703 if (pThis->u16TxPktLen == 0)
4704 {
4705 if (pDesc->legacy.cmd.fEOP)
4706 cbVTag = pDesc->legacy.cmd.fVLE ? 4 : 0;
4707 else
4708 cbVTag = 4;
4709 E1kLog3(("%s About to allocate TX buffer: cbVTag=%u\n", pThis->szPrf, cbVTag));
4710 /** @todo reset status bits? */
4711 rc = e1kXmitAllocBuf(pThis, pDesc->legacy.cmd.u16Length + cbVTag, pDesc->legacy.cmd.fEOP, false /*fGso*/);
4712 if (RT_FAILURE(rc))
4713 {
4714 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4715 break;
4716 }
4717
4718 /** @todo Is there any way to indicating errors other than collisions? Like
4719 * VERR_NET_DOWN. */
4720 }
4721
4722 /* Add fragment to frame. */
4723 if (e1kAddToFrame(pThis, pDesc->data.u64BufAddr, pDesc->legacy.cmd.u16Length))
4724 {
4725 E1K_INC_ISTAT_CNT(pThis->uStatDescLeg);
4726
4727 /* Last fragment: Transmit and reset the packet storage counter. */
4728 if (pDesc->legacy.cmd.fEOP)
4729 {
4730 pThis->fVTag = pDesc->legacy.cmd.fVLE;
4731 pThis->u16VTagTCI = pDesc->legacy.dw3.u16Special;
4732 /** @todo Offload processing goes here. */
4733 e1kTransmitFrame(pThis, fOnWorkerThread);
4734 pThis->u16TxPktLen = 0;
4735 }
4736 }
4737 /* Last fragment + failure: free the buffer and reset the storage counter. */
4738 else if (pDesc->legacy.cmd.fEOP)
4739 {
4740 e1kXmitFreeBuf(pThis);
4741 pThis->u16TxPktLen = 0;
4742 }
4743
4744 e1kDescReport(pThis, pDesc, addr);
4745 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4746 break;
4747
4748 default:
4749 E1kLog(("%s ERROR Unsupported transmit descriptor type: 0x%04x\n",
4750 pThis->szPrf, e1kGetDescType(pDesc)));
4751 break;
4752 }
4753
4754 return rc;
4755}
4756
4757#else /* E1K_WITH_TXD_CACHE */
4758
4759/**
4760 * Process Transmit Descriptor.
4761 *
4762 * E1000 supports three types of transmit descriptors:
4763 * - legacy data descriptors of older format (context-less).
4764 * - data the same as legacy but providing new offloading capabilities.
4765 * - context sets up the context for following data descriptors.
4766 *
4767 * @param pThis The device state structure.
4768 * @param pDesc Pointer to descriptor union.
4769 * @param addr Physical address of descriptor in guest memory.
4770 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4771 * @param cbPacketSize Size of the packet as previously computed.
4772 * @thread E1000_TX
4773 */
4774static int e1kXmitDesc(PE1KSTATE pThis, E1KTXDESC *pDesc, RTGCPHYS addr,
4775 bool fOnWorkerThread)
4776{
4777 int rc = VINF_SUCCESS;
4778
4779 e1kPrintTDesc(pThis, pDesc, "vvv");
4780
4781//#ifdef E1K_USE_TX_TIMERS
4782 if (pThis->fTidEnabled)
4783 TMTimerStop(pThis->CTX_SUFF(pTIDTimer));
4784//#endif /* E1K_USE_TX_TIMERS */
4785
4786 switch (e1kGetDescType(pDesc))
4787 {
4788 case E1K_DTYP_CONTEXT:
4789 /* The caller have already updated the context */
4790 E1K_INC_ISTAT_CNT(pThis->uStatDescCtx);
4791 e1kDescReport(pThis, pDesc, addr);
4792 break;
4793
4794 case E1K_DTYP_DATA:
4795 {
4796 STAM_COUNTER_INC(pDesc->data.cmd.fTSE?
4797 &pThis->StatTxDescTSEData:
4798 &pThis->StatTxDescData);
4799 E1K_INC_ISTAT_CNT(pThis->uStatDescDat);
4800 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
4801 if (pDesc->data.cmd.u20DTALEN == 0 || pDesc->data.u64BufAddr == 0)
4802 {
4803 E1kLog2(("% Empty data descriptor, skipped.\n", pThis->szPrf));
4804 }
4805 else
4806 {
4807 /*
4808 * Add the descriptor data to the frame. If the frame is complete,
4809 * transmit it and reset the u16TxPktLen field.
4810 */
4811 if (e1kXmitIsGsoBuf(pThis->CTX_SUFF(pTxSg)))
4812 {
4813 STAM_COUNTER_INC(&pThis->StatTxPathGSO);
4814 bool fRc = e1kAddToFrame(pThis, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4815 if (pDesc->data.cmd.fEOP)
4816 {
4817 if ( fRc
4818 && pThis->CTX_SUFF(pTxSg)
4819 && pThis->CTX_SUFF(pTxSg)->cbUsed == (size_t)pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN)
4820 {
4821 e1kTransmitFrame(pThis, fOnWorkerThread);
4822 E1K_INC_CNT32(TSCTC);
4823 }
4824 else
4825 {
4826 if (fRc)
4827 E1kLog(("%s bad GSO/TSE %p or %u < %u\n" , pThis->szPrf,
4828 pThis->CTX_SUFF(pTxSg), pThis->CTX_SUFF(pTxSg) ? pThis->CTX_SUFF(pTxSg)->cbUsed : 0,
4829 pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN));
4830 e1kXmitFreeBuf(pThis);
4831 E1K_INC_CNT32(TSCTFC);
4832 }
4833 pThis->u16TxPktLen = 0;
4834 }
4835 }
4836 else if (!pDesc->data.cmd.fTSE)
4837 {
4838 STAM_COUNTER_INC(&pThis->StatTxPathRegular);
4839 bool fRc = e1kAddToFrame(pThis, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4840 if (pDesc->data.cmd.fEOP)
4841 {
4842 if (fRc && pThis->CTX_SUFF(pTxSg))
4843 {
4844 Assert(pThis->CTX_SUFF(pTxSg)->cSegs == 1);
4845 if (pThis->fIPcsum)
4846 e1kInsertChecksum(pThis, (uint8_t *)pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
4847 pThis->contextNormal.ip.u8CSO,
4848 pThis->contextNormal.ip.u8CSS,
4849 pThis->contextNormal.ip.u16CSE);
4850 if (pThis->fTCPcsum)
4851 e1kInsertChecksum(pThis, (uint8_t *)pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
4852 pThis->contextNormal.tu.u8CSO,
4853 pThis->contextNormal.tu.u8CSS,
4854 pThis->contextNormal.tu.u16CSE);
4855 e1kTransmitFrame(pThis, fOnWorkerThread);
4856 }
4857 else
4858 e1kXmitFreeBuf(pThis);
4859 pThis->u16TxPktLen = 0;
4860 }
4861 }
4862 else
4863 {
4864 STAM_COUNTER_INC(&pThis->StatTxPathFallback);
4865 rc = e1kFallbackAddToFrame(pThis, pDesc, fOnWorkerThread);
4866 }
4867 }
4868 e1kDescReport(pThis, pDesc, addr);
4869 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4870 break;
4871 }
4872
4873 case E1K_DTYP_LEGACY:
4874 STAM_COUNTER_INC(&pThis->StatTxDescLegacy);
4875 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
4876 if (pDesc->legacy.cmd.u16Length == 0 || pDesc->legacy.u64BufAddr == 0)
4877 {
4878 E1kLog(("%s Empty legacy descriptor, skipped.\n", pThis->szPrf));
4879 }
4880 else
4881 {
4882 /* Add fragment to frame. */
4883 if (e1kAddToFrame(pThis, pDesc->data.u64BufAddr, pDesc->legacy.cmd.u16Length))
4884 {
4885 E1K_INC_ISTAT_CNT(pThis->uStatDescLeg);
4886
4887 /* Last fragment: Transmit and reset the packet storage counter. */
4888 if (pDesc->legacy.cmd.fEOP)
4889 {
4890 if (pDesc->legacy.cmd.fIC)
4891 {
4892 e1kInsertChecksum(pThis,
4893 (uint8_t *)pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg,
4894 pThis->u16TxPktLen,
4895 pDesc->legacy.cmd.u8CSO,
4896 pDesc->legacy.dw3.u8CSS,
4897 0);
4898 }
4899 e1kTransmitFrame(pThis, fOnWorkerThread);
4900 pThis->u16TxPktLen = 0;
4901 }
4902 }
4903 /* Last fragment + failure: free the buffer and reset the storage counter. */
4904 else if (pDesc->legacy.cmd.fEOP)
4905 {
4906 e1kXmitFreeBuf(pThis);
4907 pThis->u16TxPktLen = 0;
4908 }
4909 }
4910 e1kDescReport(pThis, pDesc, addr);
4911 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4912 break;
4913
4914 default:
4915 E1kLog(("%s ERROR Unsupported transmit descriptor type: 0x%04x\n",
4916 pThis->szPrf, e1kGetDescType(pDesc)));
4917 break;
4918 }
4919
4920 return rc;
4921}
4922
4923DECLINLINE(void) e1kUpdateTxContext(PE1KSTATE pThis, E1KTXDESC *pDesc)
4924{
4925 if (pDesc->context.dw2.fTSE)
4926 {
4927 pThis->contextTSE = pDesc->context;
4928 pThis->u32PayRemain = pDesc->context.dw2.u20PAYLEN;
4929 pThis->u16HdrRemain = pDesc->context.dw3.u8HDRLEN;
4930 e1kSetupGsoCtx(&pThis->GsoCtx, &pDesc->context);
4931 STAM_COUNTER_INC(&pThis->StatTxDescCtxTSE);
4932 }
4933 else
4934 {
4935 pThis->contextNormal = pDesc->context;
4936 STAM_COUNTER_INC(&pThis->StatTxDescCtxNormal);
4937 }
4938 E1kLog2(("%s %s context updated: IP CSS=%02X, IP CSO=%02X, IP CSE=%04X"
4939 ", TU CSS=%02X, TU CSO=%02X, TU CSE=%04X\n", pThis->szPrf,
4940 pDesc->context.dw2.fTSE ? "TSE" : "Normal",
4941 pDesc->context.ip.u8CSS,
4942 pDesc->context.ip.u8CSO,
4943 pDesc->context.ip.u16CSE,
4944 pDesc->context.tu.u8CSS,
4945 pDesc->context.tu.u8CSO,
4946 pDesc->context.tu.u16CSE));
4947}
4948
4949static bool e1kLocateTxPacket(PE1KSTATE pThis)
4950{
4951 LogFlow(("%s e1kLocateTxPacket: ENTER cbTxAlloc=%d\n",
4952 pThis->szPrf, pThis->cbTxAlloc));
4953 /* Check if we have located the packet already. */
4954 if (pThis->cbTxAlloc)
4955 {
4956 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d\n",
4957 pThis->szPrf, pThis->cbTxAlloc));
4958 return true;
4959 }
4960
4961 bool fTSE = false;
4962 uint32_t cbPacket = 0;
4963
4964 for (int i = pThis->iTxDCurrent; i < pThis->nTxDFetched; ++i)
4965 {
4966 E1KTXDESC *pDesc = &pThis->aTxDescriptors[i];
4967 switch (e1kGetDescType(pDesc))
4968 {
4969 case E1K_DTYP_CONTEXT:
4970 e1kUpdateTxContext(pThis, pDesc);
4971 continue;
4972 case E1K_DTYP_LEGACY:
4973 /* Skip empty descriptors. */
4974 if (!pDesc->legacy.u64BufAddr || !pDesc->legacy.cmd.u16Length)
4975 break;
4976 cbPacket += pDesc->legacy.cmd.u16Length;
4977 pThis->fGSO = false;
4978 break;
4979 case E1K_DTYP_DATA:
4980 /* Skip empty descriptors. */
4981 if (!pDesc->data.u64BufAddr || !pDesc->data.cmd.u20DTALEN)
4982 break;
4983 if (cbPacket == 0)
4984 {
4985 /*
4986 * The first fragment: save IXSM and TXSM options
4987 * as these are only valid in the first fragment.
4988 */
4989 pThis->fIPcsum = pDesc->data.dw3.fIXSM;
4990 pThis->fTCPcsum = pDesc->data.dw3.fTXSM;
4991 fTSE = pDesc->data.cmd.fTSE;
4992 /*
4993 * TSE descriptors have VLE bit properly set in
4994 * the first fragment.
4995 */
4996 if (fTSE)
4997 {
4998 pThis->fVTag = pDesc->data.cmd.fVLE;
4999 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
5000 }
5001 pThis->fGSO = e1kCanDoGso(pThis, &pThis->GsoCtx, &pDesc->data, &pThis->contextTSE);
5002 }
5003 cbPacket += pDesc->data.cmd.u20DTALEN;
5004 break;
5005 default:
5006 AssertMsgFailed(("Impossible descriptor type!"));
5007 }
5008 if (pDesc->legacy.cmd.fEOP)
5009 {
5010 /*
5011 * Non-TSE descriptors have VLE bit properly set in
5012 * the last fragment.
5013 */
5014 if (!fTSE)
5015 {
5016 pThis->fVTag = pDesc->data.cmd.fVLE;
5017 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
5018 }
5019 /*
5020 * Compute the required buffer size. If we cannot do GSO but still
5021 * have to do segmentation we allocate the first segment only.
5022 */
5023 pThis->cbTxAlloc = (!fTSE || pThis->fGSO) ?
5024 cbPacket :
5025 RT_MIN(cbPacket, pThis->contextTSE.dw3.u16MSS + pThis->contextTSE.dw3.u8HDRLEN);
5026 if (pThis->fVTag)
5027 pThis->cbTxAlloc += 4;
5028 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d\n",
5029 pThis->szPrf, pThis->cbTxAlloc));
5030 return true;
5031 }
5032 }
5033
5034 if (cbPacket == 0 && pThis->nTxDFetched - pThis->iTxDCurrent > 0)
5035 {
5036 /* All descriptors were empty, we need to process them as a dummy packet */
5037 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d, zero packet!\n",
5038 pThis->szPrf, pThis->cbTxAlloc));
5039 return true;
5040 }
5041 LogFlow(("%s e1kLocateTxPacket: RET false cbTxAlloc=%d\n",
5042 pThis->szPrf, pThis->cbTxAlloc));
5043 return false;
5044}
5045
5046static int e1kXmitPacket(PE1KSTATE pThis, bool fOnWorkerThread)
5047{
5048 int rc = VINF_SUCCESS;
5049
5050 LogFlow(("%s e1kXmitPacket: ENTER current=%d fetched=%d\n",
5051 pThis->szPrf, pThis->iTxDCurrent, pThis->nTxDFetched));
5052
5053 while (pThis->iTxDCurrent < pThis->nTxDFetched)
5054 {
5055 E1KTXDESC *pDesc = &pThis->aTxDescriptors[pThis->iTxDCurrent];
5056 E1kLog3(("%s About to process new TX descriptor at %08x%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
5057 pThis->szPrf, TDBAH, TDBAL + TDH * sizeof(E1KTXDESC), TDLEN, TDH, TDT));
5058 rc = e1kXmitDesc(pThis, pDesc, e1kDescAddr(TDBAH, TDBAL, TDH), fOnWorkerThread);
5059 if (RT_FAILURE(rc))
5060 break;
5061 if (++TDH * sizeof(E1KTXDESC) >= TDLEN)
5062 TDH = 0;
5063 uint32_t uLowThreshold = GET_BITS(TXDCTL, LWTHRESH)*8;
5064 if (uLowThreshold != 0 && e1kGetTxLen(pThis) <= uLowThreshold)
5065 {
5066 E1kLog2(("%s Low on transmit descriptors, raise ICR.TXD_LOW, len=%x thresh=%x\n",
5067 pThis->szPrf, e1kGetTxLen(pThis), GET_BITS(TXDCTL, LWTHRESH)*8));
5068 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_TXD_LOW);
5069 }
5070 ++pThis->iTxDCurrent;
5071 if (e1kGetDescType(pDesc) != E1K_DTYP_CONTEXT && pDesc->legacy.cmd.fEOP)
5072 break;
5073 }
5074
5075 LogFlow(("%s e1kXmitPacket: RET %Rrc current=%d fetched=%d\n",
5076 pThis->szPrf, rc, pThis->iTxDCurrent, pThis->nTxDFetched));
5077 return rc;
5078}
5079
5080#endif /* E1K_WITH_TXD_CACHE */
5081#ifndef E1K_WITH_TXD_CACHE
5082
5083/**
5084 * Transmit pending descriptors.
5085 *
5086 * @returns VBox status code. VERR_TRY_AGAIN is returned if we're busy.
5087 *
5088 * @param pThis The E1000 state.
5089 * @param fOnWorkerThread Whether we're on a worker thread or on an EMT.
5090 */
5091static int e1kXmitPending(PE1KSTATE pThis, bool fOnWorkerThread)
5092{
5093 int rc = VINF_SUCCESS;
5094
5095 /* Check if transmitter is enabled. */
5096 if (!(TCTL & TCTL_EN))
5097 return VINF_SUCCESS;
5098 /*
5099 * Grab the xmit lock of the driver as well as the E1K device state.
5100 */
5101 rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
5102 if (RT_LIKELY(rc == VINF_SUCCESS))
5103 {
5104 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
5105 if (pDrv)
5106 {
5107 rc = pDrv->pfnBeginXmit(pDrv, fOnWorkerThread);
5108 if (RT_FAILURE(rc))
5109 {
5110 e1kCsTxLeave(pThis);
5111 return rc;
5112 }
5113 }
5114 /*
5115 * Process all pending descriptors.
5116 * Note! Do not process descriptors in locked state
5117 */
5118 while (TDH != TDT && !pThis->fLocked)
5119 {
5120 E1KTXDESC desc;
5121 E1kLog3(("%s About to process new TX descriptor at %08x%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
5122 pThis->szPrf, TDBAH, TDBAL + TDH * sizeof(desc), TDLEN, TDH, TDT));
5123
5124 e1kLoadDesc(pThis, &desc, ((uint64_t)TDBAH << 32) + TDBAL + TDH * sizeof(desc));
5125 rc = e1kXmitDesc(pThis, &desc, e1kDescAddr(TDBAH, TDBAL, TDH), fOnWorkerThread);
5126 /* If we failed to transmit descriptor we will try it again later */
5127 if (RT_FAILURE(rc))
5128 break;
5129 if (++TDH * sizeof(desc) >= TDLEN)
5130 TDH = 0;
5131
5132 if (e1kGetTxLen(pThis) <= GET_BITS(TXDCTL, LWTHRESH)*8)
5133 {
5134 E1kLog2(("%s Low on transmit descriptors, raise ICR.TXD_LOW, len=%x thresh=%x\n",
5135 pThis->szPrf, e1kGetTxLen(pThis), GET_BITS(TXDCTL, LWTHRESH)*8));
5136 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_TXD_LOW);
5137 }
5138
5139 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5140 }
5141
5142 /// @todo uncomment: pThis->uStatIntTXQE++;
5143 /// @todo uncomment: e1kRaiseInterrupt(pThis, ICR_TXQE);
5144 /*
5145 * Release the lock.
5146 */
5147 if (pDrv)
5148 pDrv->pfnEndXmit(pDrv);
5149 e1kCsTxLeave(pThis);
5150 }
5151
5152 return rc;
5153}
5154
5155#else /* E1K_WITH_TXD_CACHE */
5156
5157static void e1kDumpTxDCache(PE1KSTATE pThis)
5158{
5159 unsigned i, cDescs = TDLEN / sizeof(E1KTXDESC);
5160 uint32_t tdh = TDH;
5161 LogRel(("-- Transmit Descriptors (%d total) --\n", cDescs));
5162 for (i = 0; i < cDescs; ++i)
5163 {
5164 E1KTXDESC desc;
5165 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), e1kDescAddr(TDBAH, TDBAL, i),
5166 &desc, sizeof(desc));
5167 if (i == tdh)
5168 LogRel((">>> "));
5169 LogRel(("%RGp: %R[e1ktxd]\n", e1kDescAddr(TDBAH, TDBAL, i), &desc));
5170 }
5171 LogRel(("-- Transmit Descriptors in Cache (at %d (TDH %d)/ fetched %d / max %d) --\n",
5172 pThis->iTxDCurrent, TDH, pThis->nTxDFetched, E1K_TXD_CACHE_SIZE));
5173 if (tdh > pThis->iTxDCurrent)
5174 tdh -= pThis->iTxDCurrent;
5175 else
5176 tdh = cDescs + tdh - pThis->iTxDCurrent;
5177 for (i = 0; i < pThis->nTxDFetched; ++i)
5178 {
5179 if (i == pThis->iTxDCurrent)
5180 LogRel((">>> "));
5181 LogRel(("%RGp: %R[e1ktxd]\n", e1kDescAddr(TDBAH, TDBAL, tdh++ % cDescs), &pThis->aTxDescriptors[i]));
5182 }
5183}
5184
5185/**
5186 * Transmit pending descriptors.
5187 *
5188 * @returns VBox status code. VERR_TRY_AGAIN is returned if we're busy.
5189 *
5190 * @param pThis The E1000 state.
5191 * @param fOnWorkerThread Whether we're on a worker thread or on an EMT.
5192 */
5193static int e1kXmitPending(PE1KSTATE pThis, bool fOnWorkerThread)
5194{
5195 int rc = VINF_SUCCESS;
5196
5197 /* Check if transmitter is enabled. */
5198 if (!(TCTL & TCTL_EN))
5199 return VINF_SUCCESS;
5200 /*
5201 * Grab the xmit lock of the driver as well as the E1K device state.
5202 */
5203 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
5204 if (pDrv)
5205 {
5206 rc = pDrv->pfnBeginXmit(pDrv, fOnWorkerThread);
5207 if (RT_FAILURE(rc))
5208 return rc;
5209 }
5210
5211 /*
5212 * Process all pending descriptors.
5213 * Note! Do not process descriptors in locked state
5214 */
5215 rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
5216 if (RT_LIKELY(rc == VINF_SUCCESS))
5217 {
5218 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
5219 /*
5220 * fIncomplete is set whenever we try to fetch additional descriptors
5221 * for an incomplete packet. If fail to locate a complete packet on
5222 * the next iteration we need to reset the cache or we risk to get
5223 * stuck in this loop forever.
5224 */
5225 bool fIncomplete = false;
5226 while (!pThis->fLocked && e1kTxDLazyLoad(pThis))
5227 {
5228 while (e1kLocateTxPacket(pThis))
5229 {
5230 fIncomplete = false;
5231 /* Found a complete packet, allocate it. */
5232 rc = e1kXmitAllocBuf(pThis, pThis->fGSO);
5233 /* If we're out of bandwidth we'll come back later. */
5234 if (RT_FAILURE(rc))
5235 goto out;
5236 /* Copy the packet to allocated buffer and send it. */
5237 rc = e1kXmitPacket(pThis, fOnWorkerThread);
5238 /* If we're out of bandwidth we'll come back later. */
5239 if (RT_FAILURE(rc))
5240 goto out;
5241 }
5242 uint8_t u8Remain = pThis->nTxDFetched - pThis->iTxDCurrent;
5243 if (RT_UNLIKELY(fIncomplete))
5244 {
5245 static bool fTxDCacheDumped = false;
5246 /*
5247 * The descriptor cache is full, but we were unable to find
5248 * a complete packet in it. Drop the cache and hope that
5249 * the guest driver can recover from network card error.
5250 */
5251 LogRel(("%s No complete packets in%s TxD cache! "
5252 "Fetched=%d, current=%d, TX len=%d.\n",
5253 pThis->szPrf,
5254 u8Remain == E1K_TXD_CACHE_SIZE ? " full" : "",
5255 pThis->nTxDFetched, pThis->iTxDCurrent,
5256 e1kGetTxLen(pThis)));
5257 if (!fTxDCacheDumped)
5258 {
5259 fTxDCacheDumped = true;
5260 e1kDumpTxDCache(pThis);
5261 }
5262 pThis->iTxDCurrent = pThis->nTxDFetched = 0;
5263 /*
5264 * Returning an error at this point means Guru in R0
5265 * (see @bugref{6428}).
5266 */
5267# ifdef IN_RING3
5268 rc = VERR_NET_INCOMPLETE_TX_PACKET;
5269# else /* !IN_RING3 */
5270 rc = VINF_IOM_R3_MMIO_WRITE;
5271# endif /* !IN_RING3 */
5272 goto out;
5273 }
5274 if (u8Remain > 0)
5275 {
5276 Log4(("%s Incomplete packet at %d. Already fetched %d, "
5277 "%d more are available\n",
5278 pThis->szPrf, pThis->iTxDCurrent, u8Remain,
5279 e1kGetTxLen(pThis) - u8Remain));
5280
5281 /*
5282 * A packet was partially fetched. Move incomplete packet to
5283 * the beginning of cache buffer, then load more descriptors.
5284 */
5285 memmove(pThis->aTxDescriptors,
5286 &pThis->aTxDescriptors[pThis->iTxDCurrent],
5287 u8Remain * sizeof(E1KTXDESC));
5288 pThis->iTxDCurrent = 0;
5289 pThis->nTxDFetched = u8Remain;
5290 e1kTxDLoadMore(pThis);
5291 fIncomplete = true;
5292 }
5293 else
5294 pThis->nTxDFetched = 0;
5295 pThis->iTxDCurrent = 0;
5296 }
5297 if (!pThis->fLocked && GET_BITS(TXDCTL, LWTHRESH) == 0)
5298 {
5299 E1kLog2(("%s Out of transmit descriptors, raise ICR.TXD_LOW\n",
5300 pThis->szPrf));
5301 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_TXD_LOW);
5302 }
5303out:
5304 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5305
5306 /// @todo uncomment: pThis->uStatIntTXQE++;
5307 /// @todo uncomment: e1kRaiseInterrupt(pThis, ICR_TXQE);
5308
5309 e1kCsTxLeave(pThis);
5310 }
5311
5312
5313 /*
5314 * Release the lock.
5315 */
5316 if (pDrv)
5317 pDrv->pfnEndXmit(pDrv);
5318 return rc;
5319}
5320
5321#endif /* E1K_WITH_TXD_CACHE */
5322#ifdef IN_RING3
5323
5324/**
5325 * @interface_method_impl{PDMINETWORKDOWN,pfnXmitPending}
5326 */
5327static DECLCALLBACK(void) e1kR3NetworkDown_XmitPending(PPDMINETWORKDOWN pInterface)
5328{
5329 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkDown);
5330 /* Resume suspended transmission */
5331 STATUS &= ~STATUS_TXOFF;
5332 e1kXmitPending(pThis, true /*fOnWorkerThread*/);
5333}
5334
5335/**
5336 * Callback for consuming from transmit queue. It gets called in R3 whenever
5337 * we enqueue something in R0/GC.
5338 *
5339 * @returns true
5340 * @param pDevIns Pointer to device instance structure.
5341 * @param pItem Pointer to the element being dequeued (not used).
5342 * @thread ???
5343 */
5344static DECLCALLBACK(bool) e1kTxQueueConsumer(PPDMDEVINS pDevIns, PPDMQUEUEITEMCORE pItem)
5345{
5346 NOREF(pItem);
5347 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
5348 E1kLog2(("%s e1kTxQueueConsumer:\n", pThis->szPrf));
5349
5350 int rc = e1kXmitPending(pThis, false /*fOnWorkerThread*/); NOREF(rc);
5351#ifndef DEBUG_andy /** @todo r=andy Happens for me a lot, mute this for me. */
5352 AssertMsg(RT_SUCCESS(rc) || rc == VERR_TRY_AGAIN, ("%Rrc\n", rc));
5353#endif
5354 return true;
5355}
5356
5357/**
5358 * Handler for the wakeup signaller queue.
5359 */
5360static DECLCALLBACK(bool) e1kCanRxQueueConsumer(PPDMDEVINS pDevIns, PPDMQUEUEITEMCORE pItem)
5361{
5362 RT_NOREF(pItem);
5363 e1kWakeupReceive(pDevIns);
5364 return true;
5365}
5366
5367#endif /* IN_RING3 */
5368
5369/**
5370 * Write handler for Transmit Descriptor Tail register.
5371 *
5372 * @param pThis The device state structure.
5373 * @param offset Register offset in memory-mapped frame.
5374 * @param index Register index in register array.
5375 * @param value The value to store.
5376 * @param mask Used to implement partial writes (8 and 16-bit).
5377 * @thread EMT
5378 */
5379static int e1kRegWriteTDT(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5380{
5381 int rc = e1kRegWriteDefault(pThis, offset, index, value);
5382
5383 /* All descriptors starting with head and not including tail belong to us. */
5384 /* Process them. */
5385 E1kLog2(("%s e1kRegWriteTDT: TDBAL=%08x, TDBAH=%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
5386 pThis->szPrf, TDBAL, TDBAH, TDLEN, TDH, TDT));
5387
5388 /* Ignore TDT writes when the link is down. */
5389 if (TDH != TDT && (STATUS & STATUS_LU))
5390 {
5391 Log5(("E1000: TDT write: TDH=%08x, TDT=%08x, %d descriptors to process\n", TDH, TDT, e1kGetTxLen(pThis)));
5392 E1kLog(("%s e1kRegWriteTDT: %d descriptors to process\n",
5393 pThis->szPrf, e1kGetTxLen(pThis)));
5394
5395 /* Transmit pending packets if possible, defer it if we cannot do it
5396 in the current context. */
5397#ifdef E1K_TX_DELAY
5398 rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
5399 if (RT_LIKELY(rc == VINF_SUCCESS))
5400 {
5401 if (!TMTimerIsActive(pThis->CTX_SUFF(pTXDTimer)))
5402 {
5403#ifdef E1K_INT_STATS
5404 pThis->u64ArmedAt = RTTimeNanoTS();
5405#endif
5406 e1kArmTimer(pThis, pThis->CTX_SUFF(pTXDTimer), E1K_TX_DELAY);
5407 }
5408 E1K_INC_ISTAT_CNT(pThis->uStatTxDelayed);
5409 e1kCsTxLeave(pThis);
5410 return rc;
5411 }
5412 /* We failed to enter the TX critical section -- transmit as usual. */
5413#endif /* E1K_TX_DELAY */
5414#ifndef IN_RING3
5415 if (!pThis->CTX_SUFF(pDrv))
5416 {
5417 PPDMQUEUEITEMCORE pItem = PDMQueueAlloc(pThis->CTX_SUFF(pTxQueue));
5418 if (RT_UNLIKELY(pItem))
5419 PDMQueueInsert(pThis->CTX_SUFF(pTxQueue), pItem);
5420 }
5421 else
5422#endif
5423 {
5424 rc = e1kXmitPending(pThis, false /*fOnWorkerThread*/);
5425 if (rc == VERR_TRY_AGAIN)
5426 rc = VINF_SUCCESS;
5427 else if (rc == VERR_SEM_BUSY)
5428 rc = VINF_IOM_R3_MMIO_WRITE;
5429 AssertRC(rc);
5430 }
5431 }
5432
5433 return rc;
5434}
5435
5436/**
5437 * Write handler for Multicast Table Array registers.
5438 *
5439 * @param pThis The device state structure.
5440 * @param offset Register offset in memory-mapped frame.
5441 * @param index Register index in register array.
5442 * @param value The value to store.
5443 * @thread EMT
5444 */
5445static int e1kRegWriteMTA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5446{
5447 AssertReturn(offset - g_aE1kRegMap[index].offset < sizeof(pThis->auMTA), VERR_DEV_IO_ERROR);
5448 pThis->auMTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auMTA[0])] = value;
5449
5450 return VINF_SUCCESS;
5451}
5452
5453/**
5454 * Read handler for Multicast Table Array registers.
5455 *
5456 * @returns VBox status code.
5457 *
5458 * @param pThis The device state structure.
5459 * @param offset Register offset in memory-mapped frame.
5460 * @param index Register index in register array.
5461 * @thread EMT
5462 */
5463static int e1kRegReadMTA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5464{
5465 AssertReturn(offset - g_aE1kRegMap[index].offset< sizeof(pThis->auMTA), VERR_DEV_IO_ERROR);
5466 *pu32Value = pThis->auMTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auMTA[0])];
5467
5468 return VINF_SUCCESS;
5469}
5470
5471/**
5472 * Write handler for Receive Address registers.
5473 *
5474 * @param pThis The device state structure.
5475 * @param offset Register offset in memory-mapped frame.
5476 * @param index Register index in register array.
5477 * @param value The value to store.
5478 * @thread EMT
5479 */
5480static int e1kRegWriteRA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5481{
5482 AssertReturn(offset - g_aE1kRegMap[index].offset < sizeof(pThis->aRecAddr.au32), VERR_DEV_IO_ERROR);
5483 pThis->aRecAddr.au32[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->aRecAddr.au32[0])] = value;
5484
5485 return VINF_SUCCESS;
5486}
5487
5488/**
5489 * Read handler for Receive Address registers.
5490 *
5491 * @returns VBox status code.
5492 *
5493 * @param pThis The device state structure.
5494 * @param offset Register offset in memory-mapped frame.
5495 * @param index Register index in register array.
5496 * @thread EMT
5497 */
5498static int e1kRegReadRA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5499{
5500 AssertReturn(offset - g_aE1kRegMap[index].offset< sizeof(pThis->aRecAddr.au32), VERR_DEV_IO_ERROR);
5501 *pu32Value = pThis->aRecAddr.au32[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->aRecAddr.au32[0])];
5502
5503 return VINF_SUCCESS;
5504}
5505
5506/**
5507 * Write handler for VLAN Filter Table Array registers.
5508 *
5509 * @param pThis The device state structure.
5510 * @param offset Register offset in memory-mapped frame.
5511 * @param index Register index in register array.
5512 * @param value The value to store.
5513 * @thread EMT
5514 */
5515static int e1kRegWriteVFTA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5516{
5517 AssertReturn(offset - g_aE1kRegMap[index].offset < sizeof(pThis->auVFTA), VINF_SUCCESS);
5518 pThis->auVFTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auVFTA[0])] = value;
5519
5520 return VINF_SUCCESS;
5521}
5522
5523/**
5524 * Read handler for VLAN Filter Table Array registers.
5525 *
5526 * @returns VBox status code.
5527 *
5528 * @param pThis The device state structure.
5529 * @param offset Register offset in memory-mapped frame.
5530 * @param index Register index in register array.
5531 * @thread EMT
5532 */
5533static int e1kRegReadVFTA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5534{
5535 AssertReturn(offset - g_aE1kRegMap[index].offset< sizeof(pThis->auVFTA), VERR_DEV_IO_ERROR);
5536 *pu32Value = pThis->auVFTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auVFTA[0])];
5537
5538 return VINF_SUCCESS;
5539}
5540
5541/**
5542 * Read handler for unimplemented registers.
5543 *
5544 * Merely reports reads from unimplemented registers.
5545 *
5546 * @returns VBox status code.
5547 *
5548 * @param pThis The device state structure.
5549 * @param offset Register offset in memory-mapped frame.
5550 * @param index Register index in register array.
5551 * @thread EMT
5552 */
5553static int e1kRegReadUnimplemented(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5554{
5555 RT_NOREF3(pThis, offset, index);
5556 E1kLog(("%s At %08X read (00000000) attempt from unimplemented register %s (%s)\n",
5557 pThis->szPrf, offset, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5558 *pu32Value = 0;
5559
5560 return VINF_SUCCESS;
5561}
5562
5563/**
5564 * Default register read handler with automatic clear operation.
5565 *
5566 * Retrieves the value of register from register array in device state structure.
5567 * Then resets all bits.
5568 *
5569 * @remarks The 'mask' parameter is simply ignored as masking and shifting is
5570 * done in the caller.
5571 *
5572 * @returns VBox status code.
5573 *
5574 * @param pThis The device state structure.
5575 * @param offset Register offset in memory-mapped frame.
5576 * @param index Register index in register array.
5577 * @thread EMT
5578 */
5579static int e1kRegReadAutoClear(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5580{
5581 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
5582 int rc = e1kRegReadDefault(pThis, offset, index, pu32Value);
5583 pThis->auRegs[index] = 0;
5584
5585 return rc;
5586}
5587
5588/**
5589 * Default register read handler.
5590 *
5591 * Retrieves the value of register from register array in device state structure.
5592 * Bits corresponding to 0s in 'readable' mask will always read as 0s.
5593 *
5594 * @remarks The 'mask' parameter is simply ignored as masking and shifting is
5595 * done in the caller.
5596 *
5597 * @returns VBox status code.
5598 *
5599 * @param pThis The device state structure.
5600 * @param offset Register offset in memory-mapped frame.
5601 * @param index Register index in register array.
5602 * @thread EMT
5603 */
5604static int e1kRegReadDefault(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5605{
5606 RT_NOREF_PV(offset);
5607
5608 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
5609 *pu32Value = pThis->auRegs[index] & g_aE1kRegMap[index].readable;
5610
5611 return VINF_SUCCESS;
5612}
5613
5614/**
5615 * Write handler for unimplemented registers.
5616 *
5617 * Merely reports writes to unimplemented registers.
5618 *
5619 * @param pThis The device state structure.
5620 * @param offset Register offset in memory-mapped frame.
5621 * @param index Register index in register array.
5622 * @param value The value to store.
5623 * @thread EMT
5624 */
5625
5626 static int e1kRegWriteUnimplemented(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5627{
5628 RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index); RT_NOREF_PV(value);
5629
5630 E1kLog(("%s At %08X write attempt (%08X) to unimplemented register %s (%s)\n",
5631 pThis->szPrf, offset, value, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5632
5633 return VINF_SUCCESS;
5634}
5635
5636/**
5637 * Default register write handler.
5638 *
5639 * Stores the value to the register array in device state structure. Only bits
5640 * corresponding to 1s both in 'writable' and 'mask' will be stored.
5641 *
5642 * @returns VBox status code.
5643 *
5644 * @param pThis The device state structure.
5645 * @param offset Register offset in memory-mapped frame.
5646 * @param index Register index in register array.
5647 * @param value The value to store.
5648 * @param mask Used to implement partial writes (8 and 16-bit).
5649 * @thread EMT
5650 */
5651
5652static int e1kRegWriteDefault(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5653{
5654 RT_NOREF_PV(offset);
5655
5656 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
5657 pThis->auRegs[index] = (value & g_aE1kRegMap[index].writable)
5658 | (pThis->auRegs[index] & ~g_aE1kRegMap[index].writable);
5659
5660 return VINF_SUCCESS;
5661}
5662
5663/**
5664 * Search register table for matching register.
5665 *
5666 * @returns Index in the register table or -1 if not found.
5667 *
5668 * @param offReg Register offset in memory-mapped region.
5669 * @thread EMT
5670 */
5671static int e1kRegLookup(uint32_t offReg)
5672{
5673
5674#if 0
5675 int index;
5676
5677 for (index = 0; index < E1K_NUM_OF_REGS; index++)
5678 {
5679 if (g_aE1kRegMap[index].offset <= offReg && offReg < g_aE1kRegMap[index].offset + g_aE1kRegMap[index].size)
5680 {
5681 return index;
5682 }
5683 }
5684#else
5685 int iStart = 0;
5686 int iEnd = E1K_NUM_OF_BINARY_SEARCHABLE;
5687 for (;;)
5688 {
5689 int i = (iEnd - iStart) / 2 + iStart;
5690 uint32_t offCur = g_aE1kRegMap[i].offset;
5691 if (offReg < offCur)
5692 {
5693 if (i == iStart)
5694 break;
5695 iEnd = i;
5696 }
5697 else if (offReg >= offCur + g_aE1kRegMap[i].size)
5698 {
5699 i++;
5700 if (i == iEnd)
5701 break;
5702 iStart = i;
5703 }
5704 else
5705 return i;
5706 Assert(iEnd > iStart);
5707 }
5708
5709 for (unsigned i = E1K_NUM_OF_BINARY_SEARCHABLE; i < RT_ELEMENTS(g_aE1kRegMap); i++)
5710 if (offReg - g_aE1kRegMap[i].offset < g_aE1kRegMap[i].size)
5711 return i;
5712
5713# ifdef VBOX_STRICT
5714 for (unsigned i = 0; i < RT_ELEMENTS(g_aE1kRegMap); i++)
5715 Assert(offReg - g_aE1kRegMap[i].offset >= g_aE1kRegMap[i].size);
5716# endif
5717
5718#endif
5719
5720 return -1;
5721}
5722
5723/**
5724 * Handle unaligned register read operation.
5725 *
5726 * Looks up and calls appropriate handler.
5727 *
5728 * @returns VBox status code.
5729 *
5730 * @param pThis The device state structure.
5731 * @param offReg Register offset in memory-mapped frame.
5732 * @param pv Where to store the result.
5733 * @param cb Number of bytes to read.
5734 * @thread EMT
5735 * @remarks IOM takes care of unaligned and small reads via MMIO. For I/O port
5736 * accesses we have to take care of that ourselves.
5737 */
5738static int e1kRegReadUnaligned(PE1KSTATE pThis, uint32_t offReg, void *pv, uint32_t cb)
5739{
5740 uint32_t u32 = 0;
5741 uint32_t shift;
5742 int rc = VINF_SUCCESS;
5743 int index = e1kRegLookup(offReg);
5744#ifdef LOG_ENABLED
5745 char buf[9];
5746#endif
5747
5748 /*
5749 * From the spec:
5750 * For registers that should be accessed as 32-bit double words, partial writes (less than a 32-bit
5751 * double word) is ignored. Partial reads return all 32 bits of data regardless of the byte enables.
5752 */
5753
5754 /*
5755 * To be able to read bytes and short word we convert them to properly
5756 * shifted 32-bit words and masks. The idea is to keep register-specific
5757 * handlers simple. Most accesses will be 32-bit anyway.
5758 */
5759 uint32_t mask;
5760 switch (cb)
5761 {
5762 case 4: mask = 0xFFFFFFFF; break;
5763 case 2: mask = 0x0000FFFF; break;
5764 case 1: mask = 0x000000FF; break;
5765 default:
5766 return PDMDevHlpDBGFStop(pThis->CTX_SUFF(pDevIns), RT_SRC_POS,
5767 "unsupported op size: offset=%#10x cb=%#10x\n", offReg, cb);
5768 }
5769 if (index != -1)
5770 {
5771 if (g_aE1kRegMap[index].readable)
5772 {
5773 /* Make the mask correspond to the bits we are about to read. */
5774 shift = (offReg - g_aE1kRegMap[index].offset) % sizeof(uint32_t) * 8;
5775 mask <<= shift;
5776 if (!mask)
5777 return PDMDevHlpDBGFStop(pThis->CTX_SUFF(pDevIns), RT_SRC_POS, "Zero mask: offset=%#10x cb=%#10x\n", offReg, cb);
5778 /*
5779 * Read it. Pass the mask so the handler knows what has to be read.
5780 * Mask out irrelevant bits.
5781 */
5782 //rc = e1kCsEnter(pThis, VERR_SEM_BUSY, RT_SRC_POS);
5783 if (RT_UNLIKELY(rc != VINF_SUCCESS))
5784 return rc;
5785 //pThis->fDelayInts = false;
5786 //pThis->iStatIntLost += pThis->iStatIntLostOne;
5787 //pThis->iStatIntLostOne = 0;
5788 rc = g_aE1kRegMap[index].pfnRead(pThis, offReg & 0xFFFFFFFC, index, &u32);
5789 u32 &= mask;
5790 //e1kCsLeave(pThis);
5791 E1kLog2(("%s At %08X read %s from %s (%s)\n",
5792 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf), g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5793 Log6(("%s At %08X read %s from %s (%s) [UNALIGNED]\n",
5794 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf), g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5795 /* Shift back the result. */
5796 u32 >>= shift;
5797 }
5798 else
5799 E1kLog(("%s At %08X read (%s) attempt from write-only register %s (%s)\n",
5800 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf), g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5801 if (IOM_SUCCESS(rc))
5802 STAM_COUNTER_INC(&pThis->aStatRegReads[index]);
5803 }
5804 else
5805 E1kLog(("%s At %08X read (%s) attempt from non-existing register\n",
5806 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf)));
5807
5808 memcpy(pv, &u32, cb);
5809 return rc;
5810}
5811
5812/**
5813 * Handle 4 byte aligned and sized read operation.
5814 *
5815 * Looks up and calls appropriate handler.
5816 *
5817 * @returns VBox status code.
5818 *
5819 * @param pThis The device state structure.
5820 * @param offReg Register offset in memory-mapped frame.
5821 * @param pu32 Where to store the result.
5822 * @thread EMT
5823 */
5824static int e1kRegReadAlignedU32(PE1KSTATE pThis, uint32_t offReg, uint32_t *pu32)
5825{
5826 Assert(!(offReg & 3));
5827
5828 /*
5829 * Lookup the register and check that it's readable.
5830 */
5831 int rc = VINF_SUCCESS;
5832 int idxReg = e1kRegLookup(offReg);
5833 if (RT_LIKELY(idxReg != -1))
5834 {
5835 if (RT_UNLIKELY(g_aE1kRegMap[idxReg].readable))
5836 {
5837 /*
5838 * Read it. Pass the mask so the handler knows what has to be read.
5839 * Mask out irrelevant bits.
5840 */
5841 //rc = e1kCsEnter(pThis, VERR_SEM_BUSY, RT_SRC_POS);
5842 //if (RT_UNLIKELY(rc != VINF_SUCCESS))
5843 // return rc;
5844 //pThis->fDelayInts = false;
5845 //pThis->iStatIntLost += pThis->iStatIntLostOne;
5846 //pThis->iStatIntLostOne = 0;
5847 rc = g_aE1kRegMap[idxReg].pfnRead(pThis, offReg & 0xFFFFFFFC, idxReg, pu32);
5848 //e1kCsLeave(pThis);
5849 Log6(("%s At %08X read %08X from %s (%s)\n",
5850 pThis->szPrf, offReg, *pu32, g_aE1kRegMap[idxReg].abbrev, g_aE1kRegMap[idxReg].name));
5851 if (IOM_SUCCESS(rc))
5852 STAM_COUNTER_INC(&pThis->aStatRegReads[idxReg]);
5853 }
5854 else
5855 E1kLog(("%s At %08X read attempt from non-readable register %s (%s)\n",
5856 pThis->szPrf, offReg, g_aE1kRegMap[idxReg].abbrev, g_aE1kRegMap[idxReg].name));
5857 }
5858 else
5859 E1kLog(("%s At %08X read attempt from non-existing register\n", pThis->szPrf, offReg));
5860 return rc;
5861}
5862
5863/**
5864 * Handle 4 byte sized and aligned register write operation.
5865 *
5866 * Looks up and calls appropriate handler.
5867 *
5868 * @returns VBox status code.
5869 *
5870 * @param pThis The device state structure.
5871 * @param offReg Register offset in memory-mapped frame.
5872 * @param u32Value The value to write.
5873 * @thread EMT
5874 */
5875static int e1kRegWriteAlignedU32(PE1KSTATE pThis, uint32_t offReg, uint32_t u32Value)
5876{
5877 int rc = VINF_SUCCESS;
5878 int index = e1kRegLookup(offReg);
5879 if (RT_LIKELY(index != -1))
5880 {
5881 if (RT_LIKELY(g_aE1kRegMap[index].writable))
5882 {
5883 /*
5884 * Write it. Pass the mask so the handler knows what has to be written.
5885 * Mask out irrelevant bits.
5886 */
5887 Log6(("%s At %08X write %08X to %s (%s)\n",
5888 pThis->szPrf, offReg, u32Value, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5889 //rc = e1kCsEnter(pThis, VERR_SEM_BUSY, RT_SRC_POS);
5890 //if (RT_UNLIKELY(rc != VINF_SUCCESS))
5891 // return rc;
5892 //pThis->fDelayInts = false;
5893 //pThis->iStatIntLost += pThis->iStatIntLostOne;
5894 //pThis->iStatIntLostOne = 0;
5895 rc = g_aE1kRegMap[index].pfnWrite(pThis, offReg, index, u32Value);
5896 //e1kCsLeave(pThis);
5897 }
5898 else
5899 E1kLog(("%s At %08X write attempt (%08X) to read-only register %s (%s)\n",
5900 pThis->szPrf, offReg, u32Value, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5901 if (IOM_SUCCESS(rc))
5902 STAM_COUNTER_INC(&pThis->aStatRegWrites[index]);
5903 }
5904 else
5905 E1kLog(("%s At %08X write attempt (%08X) to non-existing register\n",
5906 pThis->szPrf, offReg, u32Value));
5907 return rc;
5908}
5909
5910
5911/* -=-=-=-=- MMIO and I/O Port Callbacks -=-=-=-=- */
5912
5913/**
5914 * @callback_method_impl{FNIOMMMIOREAD}
5915 */
5916PDMBOTHCBDECL(int) e1kMMIORead(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS GCPhysAddr, void *pv, unsigned cb)
5917{
5918 RT_NOREF2(pvUser, cb);
5919 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
5920 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatMMIORead), a);
5921
5922 uint32_t offReg = GCPhysAddr - pThis->addrMMReg;
5923 Assert(offReg < E1K_MM_SIZE);
5924 Assert(cb == 4);
5925 Assert(!(GCPhysAddr & 3));
5926
5927 int rc = e1kRegReadAlignedU32(pThis, offReg, (uint32_t *)pv);
5928
5929 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatMMIORead), a);
5930 return rc;
5931}
5932
5933/**
5934 * @callback_method_impl{FNIOMMMIOWRITE}
5935 */
5936PDMBOTHCBDECL(int) e1kMMIOWrite(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS GCPhysAddr, void const *pv, unsigned cb)
5937{
5938 RT_NOREF2(pvUser, cb);
5939 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
5940 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatMMIOWrite), a);
5941
5942 uint32_t offReg = GCPhysAddr - pThis->addrMMReg;
5943 Assert(offReg < E1K_MM_SIZE);
5944 Assert(cb == 4);
5945 Assert(!(GCPhysAddr & 3));
5946
5947 int rc = e1kRegWriteAlignedU32(pThis, offReg, *(uint32_t const *)pv);
5948
5949 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatMMIOWrite), a);
5950 return rc;
5951}
5952
5953/**
5954 * @callback_method_impl{FNIOMIOPORTIN}
5955 */
5956PDMBOTHCBDECL(int) e1kIOPortIn(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT uPort, uint32_t *pu32, unsigned cb)
5957{
5958 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
5959 int rc;
5960 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatIORead), a);
5961 RT_NOREF_PV(pvUser);
5962
5963 uPort -= pThis->IOPortBase;
5964 if (RT_LIKELY(cb == 4))
5965 switch (uPort)
5966 {
5967 case 0x00: /* IOADDR */
5968 *pu32 = pThis->uSelectedReg;
5969 E1kLog2(("%s e1kIOPortIn: IOADDR(0), selecting register %#010x, val=%#010x\n", pThis->szPrf, pThis->uSelectedReg, *pu32));
5970 rc = VINF_SUCCESS;
5971 break;
5972
5973 case 0x04: /* IODATA */
5974 if (!(pThis->uSelectedReg & 3))
5975 rc = e1kRegReadAlignedU32(pThis, pThis->uSelectedReg, pu32);
5976 else /** @todo r=bird: I wouldn't be surprised if this unaligned branch wasn't necessary. */
5977 rc = e1kRegReadUnaligned(pThis, pThis->uSelectedReg, pu32, cb);
5978 if (rc == VINF_IOM_R3_MMIO_READ)
5979 rc = VINF_IOM_R3_IOPORT_READ;
5980 E1kLog2(("%s e1kIOPortIn: IODATA(4), reading from selected register %#010x, val=%#010x\n", pThis->szPrf, pThis->uSelectedReg, *pu32));
5981 break;
5982
5983 default:
5984 E1kLog(("%s e1kIOPortIn: invalid port %#010x\n", pThis->szPrf, uPort));
5985 //rc = VERR_IOM_IOPORT_UNUSED; /* Why not? */
5986 rc = VINF_SUCCESS;
5987 }
5988 else
5989 {
5990 E1kLog(("%s e1kIOPortIn: invalid op size: uPort=%RTiop cb=%08x", pThis->szPrf, uPort, cb));
5991 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "%s e1kIOPortIn: invalid op size: uPort=%RTiop cb=%08x\n", pThis->szPrf, uPort, cb);
5992 }
5993 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatIORead), a);
5994 return rc;
5995}
5996
5997
5998/**
5999 * @callback_method_impl{FNIOMIOPORTOUT}
6000 */
6001PDMBOTHCBDECL(int) e1kIOPortOut(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT uPort, uint32_t u32, unsigned cb)
6002{
6003 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
6004 int rc;
6005 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatIOWrite), a);
6006 RT_NOREF_PV(pvUser);
6007
6008 E1kLog2(("%s e1kIOPortOut: uPort=%RTiop value=%08x\n", pThis->szPrf, uPort, u32));
6009 if (RT_LIKELY(cb == 4))
6010 {
6011 uPort -= pThis->IOPortBase;
6012 switch (uPort)
6013 {
6014 case 0x00: /* IOADDR */
6015 pThis->uSelectedReg = u32;
6016 E1kLog2(("%s e1kIOPortOut: IOADDR(0), selected register %08x\n", pThis->szPrf, pThis->uSelectedReg));
6017 rc = VINF_SUCCESS;
6018 break;
6019
6020 case 0x04: /* IODATA */
6021 E1kLog2(("%s e1kIOPortOut: IODATA(4), writing to selected register %#010x, value=%#010x\n", pThis->szPrf, pThis->uSelectedReg, u32));
6022 if (RT_LIKELY(!(pThis->uSelectedReg & 3)))
6023 {
6024 rc = e1kRegWriteAlignedU32(pThis, pThis->uSelectedReg, u32);
6025 if (rc == VINF_IOM_R3_MMIO_WRITE)
6026 rc = VINF_IOM_R3_IOPORT_WRITE;
6027 }
6028 else
6029 rc = PDMDevHlpDBGFStop(pThis->CTX_SUFF(pDevIns), RT_SRC_POS,
6030 "Spec violation: misaligned offset: %#10x, ignored.\n", pThis->uSelectedReg);
6031 break;
6032
6033 default:
6034 E1kLog(("%s e1kIOPortOut: invalid port %#010x\n", pThis->szPrf, uPort));
6035 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "invalid port %#010x\n", uPort);
6036 }
6037 }
6038 else
6039 {
6040 E1kLog(("%s e1kIOPortOut: invalid op size: uPort=%RTiop cb=%08x\n", pThis->szPrf, uPort, cb));
6041 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "%s: invalid op size: uPort=%RTiop cb=%#x\n", pThis->szPrf, uPort, cb);
6042 }
6043
6044 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatIOWrite), a);
6045 return rc;
6046}
6047
6048#ifdef IN_RING3
6049
6050/**
6051 * Dump complete device state to log.
6052 *
6053 * @param pThis Pointer to device state.
6054 */
6055static void e1kDumpState(PE1KSTATE pThis)
6056{
6057 RT_NOREF(pThis);
6058 for (int i = 0; i < E1K_NUM_OF_32BIT_REGS; ++i)
6059 E1kLog2(("%s %8.8s = %08x\n", pThis->szPrf, g_aE1kRegMap[i].abbrev, pThis->auRegs[i]));
6060# ifdef E1K_INT_STATS
6061 LogRel(("%s Interrupt attempts: %d\n", pThis->szPrf, pThis->uStatIntTry));
6062 LogRel(("%s Interrupts raised : %d\n", pThis->szPrf, pThis->uStatInt));
6063 LogRel(("%s Interrupts lowered: %d\n", pThis->szPrf, pThis->uStatIntLower));
6064 LogRel(("%s Interrupts delayed: %d\n", pThis->szPrf, pThis->uStatIntDly));
6065 LogRel(("%s Disabled delayed: %d\n", pThis->szPrf, pThis->uStatDisDly));
6066 LogRel(("%s Interrupts skipped: %d\n", pThis->szPrf, pThis->uStatIntSkip));
6067 LogRel(("%s Masked interrupts : %d\n", pThis->szPrf, pThis->uStatIntMasked));
6068 LogRel(("%s Early interrupts : %d\n", pThis->szPrf, pThis->uStatIntEarly));
6069 LogRel(("%s Late interrupts : %d\n", pThis->szPrf, pThis->uStatIntLate));
6070 LogRel(("%s Lost interrupts : %d\n", pThis->szPrf, pThis->iStatIntLost));
6071 LogRel(("%s Interrupts by RX : %d\n", pThis->szPrf, pThis->uStatIntRx));
6072 LogRel(("%s Interrupts by TX : %d\n", pThis->szPrf, pThis->uStatIntTx));
6073 LogRel(("%s Interrupts by ICS : %d\n", pThis->szPrf, pThis->uStatIntICS));
6074 LogRel(("%s Interrupts by RDTR: %d\n", pThis->szPrf, pThis->uStatIntRDTR));
6075 LogRel(("%s Interrupts by RDMT: %d\n", pThis->szPrf, pThis->uStatIntRXDMT0));
6076 LogRel(("%s Interrupts by TXQE: %d\n", pThis->szPrf, pThis->uStatIntTXQE));
6077 LogRel(("%s TX int delay asked: %d\n", pThis->szPrf, pThis->uStatTxIDE));
6078 LogRel(("%s TX delayed: %d\n", pThis->szPrf, pThis->uStatTxDelayed));
6079 LogRel(("%s TX delay expired: %d\n", pThis->szPrf, pThis->uStatTxDelayExp));
6080 LogRel(("%s TX no report asked: %d\n", pThis->szPrf, pThis->uStatTxNoRS));
6081 LogRel(("%s TX abs timer expd : %d\n", pThis->szPrf, pThis->uStatTAD));
6082 LogRel(("%s TX int timer expd : %d\n", pThis->szPrf, pThis->uStatTID));
6083 LogRel(("%s RX abs timer expd : %d\n", pThis->szPrf, pThis->uStatRAD));
6084 LogRel(("%s RX int timer expd : %d\n", pThis->szPrf, pThis->uStatRID));
6085 LogRel(("%s TX CTX descriptors: %d\n", pThis->szPrf, pThis->uStatDescCtx));
6086 LogRel(("%s TX DAT descriptors: %d\n", pThis->szPrf, pThis->uStatDescDat));
6087 LogRel(("%s TX LEG descriptors: %d\n", pThis->szPrf, pThis->uStatDescLeg));
6088 LogRel(("%s Received frames : %d\n", pThis->szPrf, pThis->uStatRxFrm));
6089 LogRel(("%s Transmitted frames: %d\n", pThis->szPrf, pThis->uStatTxFrm));
6090 LogRel(("%s TX frames up to 1514: %d\n", pThis->szPrf, pThis->uStatTx1514));
6091 LogRel(("%s TX frames up to 2962: %d\n", pThis->szPrf, pThis->uStatTx2962));
6092 LogRel(("%s TX frames up to 4410: %d\n", pThis->szPrf, pThis->uStatTx4410));
6093 LogRel(("%s TX frames up to 5858: %d\n", pThis->szPrf, pThis->uStatTx5858));
6094 LogRel(("%s TX frames up to 7306: %d\n", pThis->szPrf, pThis->uStatTx7306));
6095 LogRel(("%s TX frames up to 8754: %d\n", pThis->szPrf, pThis->uStatTx8754));
6096 LogRel(("%s TX frames up to 16384: %d\n", pThis->szPrf, pThis->uStatTx16384));
6097 LogRel(("%s TX frames up to 32768: %d\n", pThis->szPrf, pThis->uStatTx32768));
6098 LogRel(("%s Larger TX frames : %d\n", pThis->szPrf, pThis->uStatTxLarge));
6099 LogRel(("%s Max TX Delay : %lld\n", pThis->szPrf, pThis->uStatMaxTxDelay));
6100# endif /* E1K_INT_STATS */
6101}
6102
6103/**
6104 * @callback_method_impl{FNPCIIOREGIONMAP}
6105 */
6106static DECLCALLBACK(int) e1kMap(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, uint32_t iRegion,
6107 RTGCPHYS GCPhysAddress, RTGCPHYS cb, PCIADDRESSSPACE enmType)
6108{
6109 RT_NOREF(pPciDev, iRegion);
6110 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE *);
6111 int rc;
6112
6113 switch (enmType)
6114 {
6115 case PCI_ADDRESS_SPACE_IO:
6116 pThis->IOPortBase = (RTIOPORT)GCPhysAddress;
6117 rc = PDMDevHlpIOPortRegister(pDevIns, pThis->IOPortBase, cb, NULL /*pvUser*/,
6118 e1kIOPortOut, e1kIOPortIn, NULL, NULL, "E1000");
6119 if (pThis->fR0Enabled && RT_SUCCESS(rc))
6120 rc = PDMDevHlpIOPortRegisterR0(pDevIns, pThis->IOPortBase, cb, NIL_RTR0PTR /*pvUser*/,
6121 "e1kIOPortOut", "e1kIOPortIn", NULL, NULL, "E1000");
6122 if (pThis->fRCEnabled && RT_SUCCESS(rc))
6123 rc = PDMDevHlpIOPortRegisterRC(pDevIns, pThis->IOPortBase, cb, NIL_RTRCPTR /*pvUser*/,
6124 "e1kIOPortOut", "e1kIOPortIn", NULL, NULL, "E1000");
6125 break;
6126
6127 case PCI_ADDRESS_SPACE_MEM:
6128 /*
6129 * From the spec:
6130 * For registers that should be accessed as 32-bit double words,
6131 * partial writes (less than a 32-bit double word) is ignored.
6132 * Partial reads return all 32 bits of data regardless of the
6133 * byte enables.
6134 */
6135#ifdef E1K_WITH_PREREG_MMIO
6136 pThis->addrMMReg = GCPhysAddress;
6137 if (GCPhysAddress == NIL_RTGCPHYS)
6138 rc = VINF_SUCCESS;
6139 else
6140 {
6141 Assert(!(GCPhysAddress & 7));
6142 rc = PDMDevHlpMMIOExMap(pDevIns, pPciDev, iRegion, GCPhysAddress);
6143 }
6144#else
6145 pThis->addrMMReg = GCPhysAddress; Assert(!(GCPhysAddress & 7));
6146 rc = PDMDevHlpMMIORegister(pDevIns, GCPhysAddress, cb, NULL /*pvUser*/,
6147 IOMMMIO_FLAGS_READ_DWORD | IOMMMIO_FLAGS_WRITE_ONLY_DWORD,
6148 e1kMMIOWrite, e1kMMIORead, "E1000");
6149 if (pThis->fR0Enabled && RT_SUCCESS(rc))
6150 rc = PDMDevHlpMMIORegisterR0(pDevIns, GCPhysAddress, cb, NIL_RTR0PTR /*pvUser*/,
6151 "e1kMMIOWrite", "e1kMMIORead");
6152 if (pThis->fRCEnabled && RT_SUCCESS(rc))
6153 rc = PDMDevHlpMMIORegisterRC(pDevIns, GCPhysAddress, cb, NIL_RTRCPTR /*pvUser*/,
6154 "e1kMMIOWrite", "e1kMMIORead");
6155#endif
6156 break;
6157
6158 default:
6159 /* We should never get here */
6160 AssertMsgFailed(("Invalid PCI address space param in map callback"));
6161 rc = VERR_INTERNAL_ERROR;
6162 break;
6163 }
6164 return rc;
6165}
6166
6167
6168/* -=-=-=-=- PDMINETWORKDOWN -=-=-=-=- */
6169
6170/**
6171 * Check if the device can receive data now.
6172 * This must be called before the pfnRecieve() method is called.
6173 *
6174 * @returns Number of bytes the device can receive.
6175 * @param pInterface Pointer to the interface structure containing the called function pointer.
6176 * @thread EMT
6177 */
6178static int e1kCanReceive(PE1KSTATE pThis)
6179{
6180#ifndef E1K_WITH_RXD_CACHE
6181 size_t cb;
6182
6183 if (RT_UNLIKELY(e1kCsRxEnter(pThis, VERR_SEM_BUSY) != VINF_SUCCESS))
6184 return VERR_NET_NO_BUFFER_SPACE;
6185
6186 if (RT_UNLIKELY(RDLEN == sizeof(E1KRXDESC)))
6187 {
6188 E1KRXDESC desc;
6189 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), e1kDescAddr(RDBAH, RDBAL, RDH),
6190 &desc, sizeof(desc));
6191 if (desc.status.fDD)
6192 cb = 0;
6193 else
6194 cb = pThis->u16RxBSize;
6195 }
6196 else if (RDH < RDT)
6197 cb = (RDT - RDH) * pThis->u16RxBSize;
6198 else if (RDH > RDT)
6199 cb = (RDLEN/sizeof(E1KRXDESC) - RDH + RDT) * pThis->u16RxBSize;
6200 else
6201 {
6202 cb = 0;
6203 E1kLogRel(("E1000: OUT of RX descriptors!\n"));
6204 }
6205 E1kLog2(("%s e1kCanReceive: at exit RDH=%d RDT=%d RDLEN=%d u16RxBSize=%d cb=%lu\n",
6206 pThis->szPrf, RDH, RDT, RDLEN, pThis->u16RxBSize, cb));
6207
6208 e1kCsRxLeave(pThis);
6209 return cb > 0 ? VINF_SUCCESS : VERR_NET_NO_BUFFER_SPACE;
6210#else /* E1K_WITH_RXD_CACHE */
6211 int rc = VINF_SUCCESS;
6212
6213 if (RT_UNLIKELY(e1kCsRxEnter(pThis, VERR_SEM_BUSY) != VINF_SUCCESS))
6214 return VERR_NET_NO_BUFFER_SPACE;
6215
6216 if (RT_UNLIKELY(RDLEN == sizeof(E1KRXDESC)))
6217 {
6218 E1KRXDESC desc;
6219 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), e1kDescAddr(RDBAH, RDBAL, RDH),
6220 &desc, sizeof(desc));
6221 if (desc.status.fDD)
6222 rc = VERR_NET_NO_BUFFER_SPACE;
6223 }
6224 else if (e1kRxDIsCacheEmpty(pThis) && RDH == RDT)
6225 {
6226 /* Cache is empty, so is the RX ring. */
6227 rc = VERR_NET_NO_BUFFER_SPACE;
6228 }
6229 E1kLog2(("%s e1kCanReceive: at exit in_cache=%d RDH=%d RDT=%d RDLEN=%d"
6230 " u16RxBSize=%d rc=%Rrc\n", pThis->szPrf,
6231 e1kRxDInCache(pThis), RDH, RDT, RDLEN, pThis->u16RxBSize, rc));
6232
6233 e1kCsRxLeave(pThis);
6234 return rc;
6235#endif /* E1K_WITH_RXD_CACHE */
6236}
6237
6238/**
6239 * @interface_method_impl{PDMINETWORKDOWN,pfnWaitReceiveAvail}
6240 */
6241static DECLCALLBACK(int) e1kR3NetworkDown_WaitReceiveAvail(PPDMINETWORKDOWN pInterface, RTMSINTERVAL cMillies)
6242{
6243 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkDown);
6244 int rc = e1kCanReceive(pThis);
6245
6246 if (RT_SUCCESS(rc))
6247 return VINF_SUCCESS;
6248 if (RT_UNLIKELY(cMillies == 0))
6249 return VERR_NET_NO_BUFFER_SPACE;
6250
6251 rc = VERR_INTERRUPTED;
6252 ASMAtomicXchgBool(&pThis->fMaybeOutOfSpace, true);
6253 STAM_PROFILE_START(&pThis->StatRxOverflow, a);
6254 VMSTATE enmVMState;
6255 while (RT_LIKELY( (enmVMState = PDMDevHlpVMState(pThis->CTX_SUFF(pDevIns))) == VMSTATE_RUNNING
6256 || enmVMState == VMSTATE_RUNNING_LS))
6257 {
6258 int rc2 = e1kCanReceive(pThis);
6259 if (RT_SUCCESS(rc2))
6260 {
6261 rc = VINF_SUCCESS;
6262 break;
6263 }
6264 E1kLogRel(("E1000 e1kR3NetworkDown_WaitReceiveAvail: waiting cMillies=%u...\n", cMillies));
6265 E1kLog(("%s e1kR3NetworkDown_WaitReceiveAvail: waiting cMillies=%u...\n", pThis->szPrf, cMillies));
6266 RTSemEventWait(pThis->hEventMoreRxDescAvail, cMillies);
6267 }
6268 STAM_PROFILE_STOP(&pThis->StatRxOverflow, a);
6269 ASMAtomicXchgBool(&pThis->fMaybeOutOfSpace, false);
6270
6271 return rc;
6272}
6273
6274
6275/**
6276 * Matches the packet addresses against Receive Address table. Looks for
6277 * exact matches only.
6278 *
6279 * @returns true if address matches.
6280 * @param pThis Pointer to the state structure.
6281 * @param pvBuf The ethernet packet.
6282 * @param cb Number of bytes available in the packet.
6283 * @thread EMT
6284 */
6285static bool e1kPerfectMatch(PE1KSTATE pThis, const void *pvBuf)
6286{
6287 for (unsigned i = 0; i < RT_ELEMENTS(pThis->aRecAddr.array); i++)
6288 {
6289 E1KRAELEM* ra = pThis->aRecAddr.array + i;
6290
6291 /* Valid address? */
6292 if (ra->ctl & RA_CTL_AV)
6293 {
6294 Assert((ra->ctl & RA_CTL_AS) < 2);
6295 //unsigned char *pAddr = (unsigned char*)pvBuf + sizeof(ra->addr)*(ra->ctl & RA_CTL_AS);
6296 //E1kLog3(("%s Matching %02x:%02x:%02x:%02x:%02x:%02x against %02x:%02x:%02x:%02x:%02x:%02x...\n",
6297 // pThis->szPrf, pAddr[0], pAddr[1], pAddr[2], pAddr[3], pAddr[4], pAddr[5],
6298 // ra->addr[0], ra->addr[1], ra->addr[2], ra->addr[3], ra->addr[4], ra->addr[5]));
6299 /*
6300 * Address Select:
6301 * 00b = Destination address
6302 * 01b = Source address
6303 * 10b = Reserved
6304 * 11b = Reserved
6305 * Since ethernet header is (DA, SA, len) we can use address
6306 * select as index.
6307 */
6308 if (memcmp((char*)pvBuf + sizeof(ra->addr)*(ra->ctl & RA_CTL_AS),
6309 ra->addr, sizeof(ra->addr)) == 0)
6310 return true;
6311 }
6312 }
6313
6314 return false;
6315}
6316
6317/**
6318 * Matches the packet addresses against Multicast Table Array.
6319 *
6320 * @remarks This is imperfect match since it matches not exact address but
6321 * a subset of addresses.
6322 *
6323 * @returns true if address matches.
6324 * @param pThis Pointer to the state structure.
6325 * @param pvBuf The ethernet packet.
6326 * @param cb Number of bytes available in the packet.
6327 * @thread EMT
6328 */
6329static bool e1kImperfectMatch(PE1KSTATE pThis, const void *pvBuf)
6330{
6331 /* Get bits 32..47 of destination address */
6332 uint16_t u16Bit = ((uint16_t*)pvBuf)[2];
6333
6334 unsigned offset = GET_BITS(RCTL, MO);
6335 /*
6336 * offset means:
6337 * 00b = bits 36..47
6338 * 01b = bits 35..46
6339 * 10b = bits 34..45
6340 * 11b = bits 32..43
6341 */
6342 if (offset < 3)
6343 u16Bit = u16Bit >> (4 - offset);
6344 return ASMBitTest(pThis->auMTA, u16Bit & 0xFFF);
6345}
6346
6347/**
6348 * Determines if the packet is to be delivered to upper layer.
6349 *
6350 * The following filters supported:
6351 * - Exact Unicast/Multicast
6352 * - Promiscuous Unicast/Multicast
6353 * - Multicast
6354 * - VLAN
6355 *
6356 * @returns true if packet is intended for this node.
6357 * @param pThis Pointer to the state structure.
6358 * @param pvBuf The ethernet packet.
6359 * @param cb Number of bytes available in the packet.
6360 * @param pStatus Bit field to store status bits.
6361 * @thread EMT
6362 */
6363static bool e1kAddressFilter(PE1KSTATE pThis, const void *pvBuf, size_t cb, E1KRXDST *pStatus)
6364{
6365 Assert(cb > 14);
6366 /* Assume that we fail to pass exact filter. */
6367 pStatus->fPIF = false;
6368 pStatus->fVP = false;
6369 /* Discard oversized packets */
6370 if (cb > E1K_MAX_RX_PKT_SIZE)
6371 {
6372 E1kLog(("%s ERROR: Incoming packet is too big, cb=%d > max=%d\n",
6373 pThis->szPrf, cb, E1K_MAX_RX_PKT_SIZE));
6374 E1K_INC_CNT32(ROC);
6375 return false;
6376 }
6377 else if (!(RCTL & RCTL_LPE) && cb > 1522)
6378 {
6379 /* When long packet reception is disabled packets over 1522 are discarded */
6380 E1kLog(("%s Discarding incoming packet (LPE=0), cb=%d\n",
6381 pThis->szPrf, cb));
6382 E1K_INC_CNT32(ROC);
6383 return false;
6384 }
6385
6386 uint16_t *u16Ptr = (uint16_t*)pvBuf;
6387 /* Compare TPID with VLAN Ether Type */
6388 if (RT_BE2H_U16(u16Ptr[6]) == VET)
6389 {
6390 pStatus->fVP = true;
6391 /* Is VLAN filtering enabled? */
6392 if (RCTL & RCTL_VFE)
6393 {
6394 /* It is 802.1q packet indeed, let's filter by VID */
6395 if (RCTL & RCTL_CFIEN)
6396 {
6397 E1kLog3(("%s VLAN filter: VLAN=%d CFI=%d RCTL_CFI=%d\n", pThis->szPrf,
6398 E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7])),
6399 E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])),
6400 !!(RCTL & RCTL_CFI)));
6401 if (E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])) != !!(RCTL & RCTL_CFI))
6402 {
6403 E1kLog2(("%s Packet filter: CFIs do not match in packet and RCTL (%d!=%d)\n",
6404 pThis->szPrf, E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])), !!(RCTL & RCTL_CFI)));
6405 return false;
6406 }
6407 }
6408 else
6409 E1kLog3(("%s VLAN filter: VLAN=%d\n", pThis->szPrf,
6410 E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))));
6411 if (!ASMBitTest(pThis->auVFTA, E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))))
6412 {
6413 E1kLog2(("%s Packet filter: no VLAN match (id=%d)\n",
6414 pThis->szPrf, E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))));
6415 return false;
6416 }
6417 }
6418 }
6419 /* Broadcast filtering */
6420 if (e1kIsBroadcast(pvBuf) && (RCTL & RCTL_BAM))
6421 return true;
6422 E1kLog2(("%s Packet filter: not a broadcast\n", pThis->szPrf));
6423 if (e1kIsMulticast(pvBuf))
6424 {
6425 /* Is multicast promiscuous enabled? */
6426 if (RCTL & RCTL_MPE)
6427 return true;
6428 E1kLog2(("%s Packet filter: no promiscuous multicast\n", pThis->szPrf));
6429 /* Try perfect matches first */
6430 if (e1kPerfectMatch(pThis, pvBuf))
6431 {
6432 pStatus->fPIF = true;
6433 return true;
6434 }
6435 E1kLog2(("%s Packet filter: no perfect match\n", pThis->szPrf));
6436 if (e1kImperfectMatch(pThis, pvBuf))
6437 return true;
6438 E1kLog2(("%s Packet filter: no imperfect match\n", pThis->szPrf));
6439 }
6440 else {
6441 /* Is unicast promiscuous enabled? */
6442 if (RCTL & RCTL_UPE)
6443 return true;
6444 E1kLog2(("%s Packet filter: no promiscuous unicast\n", pThis->szPrf));
6445 if (e1kPerfectMatch(pThis, pvBuf))
6446 {
6447 pStatus->fPIF = true;
6448 return true;
6449 }
6450 E1kLog2(("%s Packet filter: no perfect match\n", pThis->szPrf));
6451 }
6452 E1kLog2(("%s Packet filter: packet discarded\n", pThis->szPrf));
6453 return false;
6454}
6455
6456/**
6457 * @interface_method_impl{PDMINETWORKDOWN,pfnReceive}
6458 */
6459static DECLCALLBACK(int) e1kR3NetworkDown_Receive(PPDMINETWORKDOWN pInterface, const void *pvBuf, size_t cb)
6460{
6461 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkDown);
6462 int rc = VINF_SUCCESS;
6463
6464 /*
6465 * Drop packets if the VM is not running yet/anymore.
6466 */
6467 VMSTATE enmVMState = PDMDevHlpVMState(STATE_TO_DEVINS(pThis));
6468 if ( enmVMState != VMSTATE_RUNNING
6469 && enmVMState != VMSTATE_RUNNING_LS)
6470 {
6471 E1kLog(("%s Dropping incoming packet as VM is not running.\n", pThis->szPrf));
6472 return VINF_SUCCESS;
6473 }
6474
6475 /* Discard incoming packets in locked state */
6476 if (!(RCTL & RCTL_EN) || pThis->fLocked || !(STATUS & STATUS_LU))
6477 {
6478 E1kLog(("%s Dropping incoming packet as receive operation is disabled.\n", pThis->szPrf));
6479 return VINF_SUCCESS;
6480 }
6481
6482 STAM_PROFILE_ADV_START(&pThis->StatReceive, a);
6483
6484 //if (!e1kCsEnter(pThis, RT_SRC_POS))
6485 // return VERR_PERMISSION_DENIED;
6486
6487 e1kPacketDump(pThis, (const uint8_t*)pvBuf, cb, "<-- Incoming");
6488
6489 /* Update stats */
6490 if (RT_LIKELY(e1kCsEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
6491 {
6492 E1K_INC_CNT32(TPR);
6493 E1K_ADD_CNT64(TORL, TORH, cb < 64? 64 : cb);
6494 e1kCsLeave(pThis);
6495 }
6496 STAM_PROFILE_ADV_START(&pThis->StatReceiveFilter, a);
6497 E1KRXDST status;
6498 RT_ZERO(status);
6499 bool fPassed = e1kAddressFilter(pThis, pvBuf, cb, &status);
6500 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveFilter, a);
6501 if (fPassed)
6502 {
6503 rc = e1kHandleRxPacket(pThis, pvBuf, cb, status);
6504 }
6505 //e1kCsLeave(pThis);
6506 STAM_PROFILE_ADV_STOP(&pThis->StatReceive, a);
6507
6508 return rc;
6509}
6510
6511
6512/* -=-=-=-=- PDMILEDPORTS -=-=-=-=- */
6513
6514/**
6515 * @interface_method_impl{PDMILEDPORTS,pfnQueryStatusLed}
6516 */
6517static DECLCALLBACK(int) e1kR3QueryStatusLed(PPDMILEDPORTS pInterface, unsigned iLUN, PPDMLED *ppLed)
6518{
6519 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, ILeds);
6520 int rc = VERR_PDM_LUN_NOT_FOUND;
6521
6522 if (iLUN == 0)
6523 {
6524 *ppLed = &pThis->led;
6525 rc = VINF_SUCCESS;
6526 }
6527 return rc;
6528}
6529
6530
6531/* -=-=-=-=- PDMINETWORKCONFIG -=-=-=-=- */
6532
6533/**
6534 * @interface_method_impl{PDMINETWORKCONFIG,pfnGetMac}
6535 */
6536static DECLCALLBACK(int) e1kR3GetMac(PPDMINETWORKCONFIG pInterface, PRTMAC pMac)
6537{
6538 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkConfig);
6539 pThis->eeprom.getMac(pMac);
6540 return VINF_SUCCESS;
6541}
6542
6543/**
6544 * @interface_method_impl{PDMINETWORKCONFIG,pfnGetLinkState}
6545 */
6546static DECLCALLBACK(PDMNETWORKLINKSTATE) e1kR3GetLinkState(PPDMINETWORKCONFIG pInterface)
6547{
6548 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkConfig);
6549 if (STATUS & STATUS_LU)
6550 return PDMNETWORKLINKSTATE_UP;
6551 return PDMNETWORKLINKSTATE_DOWN;
6552}
6553
6554/**
6555 * @interface_method_impl{PDMINETWORKCONFIG,pfnSetLinkState}
6556 */
6557static DECLCALLBACK(int) e1kR3SetLinkState(PPDMINETWORKCONFIG pInterface, PDMNETWORKLINKSTATE enmState)
6558{
6559 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkConfig);
6560
6561 E1kLog(("%s e1kR3SetLinkState: enmState=%d\n", pThis->szPrf, enmState));
6562 switch (enmState)
6563 {
6564 case PDMNETWORKLINKSTATE_UP:
6565 pThis->fCableConnected = true;
6566 /* If link was down, bring it up after a while. */
6567 if (!(STATUS & STATUS_LU))
6568 e1kBringLinkUpDelayed(pThis);
6569 break;
6570 case PDMNETWORKLINKSTATE_DOWN:
6571 pThis->fCableConnected = false;
6572 /* Always set the phy link state to down, regardless of the STATUS_LU bit.
6573 * We might have to set the link state before the driver initializes us. */
6574 Phy::setLinkStatus(&pThis->phy, false);
6575 /* If link was up, bring it down. */
6576 if (STATUS & STATUS_LU)
6577 e1kR3LinkDown(pThis);
6578 break;
6579 case PDMNETWORKLINKSTATE_DOWN_RESUME:
6580 /*
6581 * There is not much sense in bringing down the link if it has not come up yet.
6582 * If it is up though, we bring it down temporarely, then bring it up again.
6583 */
6584 if (STATUS & STATUS_LU)
6585 e1kR3LinkDownTemp(pThis);
6586 break;
6587 default:
6588 ;
6589 }
6590 return VINF_SUCCESS;
6591}
6592
6593
6594/* -=-=-=-=- PDMIBASE -=-=-=-=- */
6595
6596/**
6597 * @interface_method_impl{PDMIBASE,pfnQueryInterface}
6598 */
6599static DECLCALLBACK(void *) e1kR3QueryInterface(struct PDMIBASE *pInterface, const char *pszIID)
6600{
6601 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, IBase);
6602 Assert(&pThis->IBase == pInterface);
6603
6604 PDMIBASE_RETURN_INTERFACE(pszIID, PDMIBASE, &pThis->IBase);
6605 PDMIBASE_RETURN_INTERFACE(pszIID, PDMINETWORKDOWN, &pThis->INetworkDown);
6606 PDMIBASE_RETURN_INTERFACE(pszIID, PDMINETWORKCONFIG, &pThis->INetworkConfig);
6607 PDMIBASE_RETURN_INTERFACE(pszIID, PDMILEDPORTS, &pThis->ILeds);
6608 return NULL;
6609}
6610
6611
6612/* -=-=-=-=- Saved State -=-=-=-=- */
6613
6614/**
6615 * Saves the configuration.
6616 *
6617 * @param pThis The E1K state.
6618 * @param pSSM The handle to the saved state.
6619 */
6620static void e1kSaveConfig(PE1KSTATE pThis, PSSMHANDLE pSSM)
6621{
6622 SSMR3PutMem(pSSM, &pThis->macConfigured, sizeof(pThis->macConfigured));
6623 SSMR3PutU32(pSSM, pThis->eChip);
6624}
6625
6626/**
6627 * @callback_method_impl{FNSSMDEVLIVEEXEC,Save basic configuration.}
6628 */
6629static DECLCALLBACK(int) e1kLiveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uPass)
6630{
6631 RT_NOREF(uPass);
6632 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6633 e1kSaveConfig(pThis, pSSM);
6634 return VINF_SSM_DONT_CALL_AGAIN;
6635}
6636
6637/**
6638 * @callback_method_impl{FNSSMDEVSAVEPREP,Synchronize.}
6639 */
6640static DECLCALLBACK(int) e1kSavePrep(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6641{
6642 RT_NOREF(pSSM);
6643 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6644
6645 int rc = e1kCsEnter(pThis, VERR_SEM_BUSY);
6646 if (RT_UNLIKELY(rc != VINF_SUCCESS))
6647 return rc;
6648 e1kCsLeave(pThis);
6649 return VINF_SUCCESS;
6650#if 0
6651 /* 1) Prevent all threads from modifying the state and memory */
6652 //pThis->fLocked = true;
6653 /* 2) Cancel all timers */
6654#ifdef E1K_TX_DELAY
6655 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTXDTimer));
6656#endif /* E1K_TX_DELAY */
6657//#ifdef E1K_USE_TX_TIMERS
6658 if (pThis->fTidEnabled)
6659 {
6660 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTIDTimer));
6661#ifndef E1K_NO_TAD
6662 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTADTimer));
6663#endif /* E1K_NO_TAD */
6664 }
6665//#endif /* E1K_USE_TX_TIMERS */
6666#ifdef E1K_USE_RX_TIMERS
6667 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRIDTimer));
6668 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRADTimer));
6669#endif /* E1K_USE_RX_TIMERS */
6670 e1kCancelTimer(pThis, pThis->CTX_SUFF(pIntTimer));
6671 /* 3) Did I forget anything? */
6672 E1kLog(("%s Locked\n", pThis->szPrf));
6673 return VINF_SUCCESS;
6674#endif
6675}
6676
6677/**
6678 * @callback_method_impl{FNSSMDEVSAVEEXEC}
6679 */
6680static DECLCALLBACK(int) e1kSaveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6681{
6682 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6683
6684 e1kSaveConfig(pThis, pSSM);
6685 pThis->eeprom.save(pSSM);
6686 e1kDumpState(pThis);
6687 SSMR3PutMem(pSSM, pThis->auRegs, sizeof(pThis->auRegs));
6688 SSMR3PutBool(pSSM, pThis->fIntRaised);
6689 Phy::saveState(pSSM, &pThis->phy);
6690 SSMR3PutU32(pSSM, pThis->uSelectedReg);
6691 SSMR3PutMem(pSSM, pThis->auMTA, sizeof(pThis->auMTA));
6692 SSMR3PutMem(pSSM, &pThis->aRecAddr, sizeof(pThis->aRecAddr));
6693 SSMR3PutMem(pSSM, pThis->auVFTA, sizeof(pThis->auVFTA));
6694 SSMR3PutU64(pSSM, pThis->u64AckedAt);
6695 SSMR3PutU16(pSSM, pThis->u16RxBSize);
6696 //SSMR3PutBool(pSSM, pThis->fDelayInts);
6697 //SSMR3PutBool(pSSM, pThis->fIntMaskUsed);
6698 SSMR3PutU16(pSSM, pThis->u16TxPktLen);
6699/** @todo State wrt to the TSE buffer is incomplete, so little point in
6700 * saving this actually. */
6701 SSMR3PutMem(pSSM, pThis->aTxPacketFallback, pThis->u16TxPktLen);
6702 SSMR3PutBool(pSSM, pThis->fIPcsum);
6703 SSMR3PutBool(pSSM, pThis->fTCPcsum);
6704 SSMR3PutMem(pSSM, &pThis->contextTSE, sizeof(pThis->contextTSE));
6705 SSMR3PutMem(pSSM, &pThis->contextNormal, sizeof(pThis->contextNormal));
6706 SSMR3PutBool(pSSM, pThis->fVTag);
6707 SSMR3PutU16(pSSM, pThis->u16VTagTCI);
6708#ifdef E1K_WITH_TXD_CACHE
6709#if 0
6710 SSMR3PutU8(pSSM, pThis->nTxDFetched);
6711 SSMR3PutMem(pSSM, pThis->aTxDescriptors,
6712 pThis->nTxDFetched * sizeof(pThis->aTxDescriptors[0]));
6713#else
6714 /*
6715 * There is no point in storing TX descriptor cache entries as we can simply
6716 * fetch them again. Moreover, normally the cache is always empty when we
6717 * save the state. Store zero entries for compatibility.
6718 */
6719 SSMR3PutU8(pSSM, 0);
6720#endif
6721#endif /* E1K_WITH_TXD_CACHE */
6722/** @todo GSO requires some more state here. */
6723 E1kLog(("%s State has been saved\n", pThis->szPrf));
6724 return VINF_SUCCESS;
6725}
6726
6727#if 0
6728/**
6729 * @callback_method_impl{FNSSMDEVSAVEDONE}
6730 */
6731static DECLCALLBACK(int) e1kSaveDone(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6732{
6733 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6734
6735 /* If VM is being powered off unlocking will result in assertions in PGM */
6736 if (PDMDevHlpGetVM(pDevIns)->enmVMState == VMSTATE_RUNNING)
6737 pThis->fLocked = false;
6738 else
6739 E1kLog(("%s VM is not running -- remain locked\n", pThis->szPrf));
6740 E1kLog(("%s Unlocked\n", pThis->szPrf));
6741 return VINF_SUCCESS;
6742}
6743#endif
6744
6745/**
6746 * @callback_method_impl{FNSSMDEVLOADPREP,Synchronize.}
6747 */
6748static DECLCALLBACK(int) e1kLoadPrep(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6749{
6750 RT_NOREF(pSSM);
6751 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6752
6753 int rc = e1kCsEnter(pThis, VERR_SEM_BUSY);
6754 if (RT_UNLIKELY(rc != VINF_SUCCESS))
6755 return rc;
6756 e1kCsLeave(pThis);
6757 return VINF_SUCCESS;
6758}
6759
6760/**
6761 * @callback_method_impl{FNSSMDEVLOADEXEC}
6762 */
6763static DECLCALLBACK(int) e1kLoadExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
6764{
6765 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6766 int rc;
6767
6768 if ( uVersion != E1K_SAVEDSTATE_VERSION
6769#ifdef E1K_WITH_TXD_CACHE
6770 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG
6771#endif /* E1K_WITH_TXD_CACHE */
6772 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_41
6773 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_30)
6774 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
6775
6776 if ( uVersion > E1K_SAVEDSTATE_VERSION_VBOX_30
6777 || uPass != SSM_PASS_FINAL)
6778 {
6779 /* config checks */
6780 RTMAC macConfigured;
6781 rc = SSMR3GetMem(pSSM, &macConfigured, sizeof(macConfigured));
6782 AssertRCReturn(rc, rc);
6783 if ( memcmp(&macConfigured, &pThis->macConfigured, sizeof(macConfigured))
6784 && (uPass == 0 || !PDMDevHlpVMTeleportedAndNotFullyResumedYet(pDevIns)) )
6785 LogRel(("%s: The mac address differs: config=%RTmac saved=%RTmac\n", pThis->szPrf, &pThis->macConfigured, &macConfigured));
6786
6787 E1KCHIP eChip;
6788 rc = SSMR3GetU32(pSSM, &eChip);
6789 AssertRCReturn(rc, rc);
6790 if (eChip != pThis->eChip)
6791 return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("The chip type differs: config=%u saved=%u"), pThis->eChip, eChip);
6792 }
6793
6794 if (uPass == SSM_PASS_FINAL)
6795 {
6796 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_30)
6797 {
6798 rc = pThis->eeprom.load(pSSM);
6799 AssertRCReturn(rc, rc);
6800 }
6801 /* the state */
6802 SSMR3GetMem(pSSM, &pThis->auRegs, sizeof(pThis->auRegs));
6803 SSMR3GetBool(pSSM, &pThis->fIntRaised);
6804 /** @todo PHY could be made a separate device with its own versioning */
6805 Phy::loadState(pSSM, &pThis->phy);
6806 SSMR3GetU32(pSSM, &pThis->uSelectedReg);
6807 SSMR3GetMem(pSSM, &pThis->auMTA, sizeof(pThis->auMTA));
6808 SSMR3GetMem(pSSM, &pThis->aRecAddr, sizeof(pThis->aRecAddr));
6809 SSMR3GetMem(pSSM, &pThis->auVFTA, sizeof(pThis->auVFTA));
6810 SSMR3GetU64(pSSM, &pThis->u64AckedAt);
6811 SSMR3GetU16(pSSM, &pThis->u16RxBSize);
6812 //SSMR3GetBool(pSSM, pThis->fDelayInts);
6813 //SSMR3GetBool(pSSM, pThis->fIntMaskUsed);
6814 SSMR3GetU16(pSSM, &pThis->u16TxPktLen);
6815 SSMR3GetMem(pSSM, &pThis->aTxPacketFallback[0], pThis->u16TxPktLen);
6816 SSMR3GetBool(pSSM, &pThis->fIPcsum);
6817 SSMR3GetBool(pSSM, &pThis->fTCPcsum);
6818 SSMR3GetMem(pSSM, &pThis->contextTSE, sizeof(pThis->contextTSE));
6819 rc = SSMR3GetMem(pSSM, &pThis->contextNormal, sizeof(pThis->contextNormal));
6820 AssertRCReturn(rc, rc);
6821 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_41)
6822 {
6823 SSMR3GetBool(pSSM, &pThis->fVTag);
6824 rc = SSMR3GetU16(pSSM, &pThis->u16VTagTCI);
6825 AssertRCReturn(rc, rc);
6826 }
6827 else
6828 {
6829 pThis->fVTag = false;
6830 pThis->u16VTagTCI = 0;
6831 }
6832#ifdef E1K_WITH_TXD_CACHE
6833 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG)
6834 {
6835 rc = SSMR3GetU8(pSSM, &pThis->nTxDFetched);
6836 AssertRCReturn(rc, rc);
6837 if (pThis->nTxDFetched)
6838 SSMR3GetMem(pSSM, pThis->aTxDescriptors,
6839 pThis->nTxDFetched * sizeof(pThis->aTxDescriptors[0]));
6840 }
6841 else
6842 pThis->nTxDFetched = 0;
6843 /*
6844 * @todo: Perhaps we should not store TXD cache as the entries can be
6845 * simply fetched again from guest's memory. Or can't they?
6846 */
6847#endif /* E1K_WITH_TXD_CACHE */
6848#ifdef E1K_WITH_RXD_CACHE
6849 /*
6850 * There is no point in storing the RX descriptor cache in the saved
6851 * state, we just need to make sure it is empty.
6852 */
6853 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
6854#endif /* E1K_WITH_RXD_CACHE */
6855 /* derived state */
6856 e1kSetupGsoCtx(&pThis->GsoCtx, &pThis->contextTSE);
6857
6858 E1kLog(("%s State has been restored\n", pThis->szPrf));
6859 e1kDumpState(pThis);
6860 }
6861 return VINF_SUCCESS;
6862}
6863
6864/**
6865 * @callback_method_impl{FNSSMDEVLOADDONE, Link status adjustments after loading.}
6866 */
6867static DECLCALLBACK(int) e1kLoadDone(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6868{
6869 RT_NOREF(pSSM);
6870 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6871
6872 /* Update promiscuous mode */
6873 if (pThis->pDrvR3)
6874 pThis->pDrvR3->pfnSetPromiscuousMode(pThis->pDrvR3,
6875 !!(RCTL & (RCTL_UPE | RCTL_MPE)));
6876
6877 /*
6878 * Force the link down here, since PDMNETWORKLINKSTATE_DOWN_RESUME is never
6879 * passed to us. We go through all this stuff if the link was up and we
6880 * wasn't teleported.
6881 */
6882 if ( (STATUS & STATUS_LU)
6883 && !PDMDevHlpVMTeleportedAndNotFullyResumedYet(pDevIns)
6884 && pThis->cMsLinkUpDelay)
6885 {
6886 e1kR3LinkDownTemp(pThis);
6887 }
6888 return VINF_SUCCESS;
6889}
6890
6891
6892
6893/* -=-=-=-=- Debug Info + Log Types -=-=-=-=- */
6894
6895/**
6896 * @callback_method_impl{FNRTSTRFORMATTYPE}
6897 */
6898static DECLCALLBACK(size_t) e1kFmtRxDesc(PFNRTSTROUTPUT pfnOutput,
6899 void *pvArgOutput,
6900 const char *pszType,
6901 void const *pvValue,
6902 int cchWidth,
6903 int cchPrecision,
6904 unsigned fFlags,
6905 void *pvUser)
6906{
6907 RT_NOREF(cchWidth, cchPrecision, fFlags, pvUser);
6908 AssertReturn(strcmp(pszType, "e1krxd") == 0, 0);
6909 E1KRXDESC* pDesc = (E1KRXDESC*)pvValue;
6910 if (!pDesc)
6911 return RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "NULL_RXD");
6912
6913 size_t cbPrintf = 0;
6914 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Address=%16LX Length=%04X Csum=%04X\n",
6915 pDesc->u64BufAddr, pDesc->u16Length, pDesc->u16Checksum);
6916 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, " STA: %s %s %s %s %s %s %s ERR: %s %s %s %s SPECIAL: %s VLAN=%03x PRI=%x",
6917 pDesc->status.fPIF ? "PIF" : "pif",
6918 pDesc->status.fIPCS ? "IPCS" : "ipcs",
6919 pDesc->status.fTCPCS ? "TCPCS" : "tcpcs",
6920 pDesc->status.fVP ? "VP" : "vp",
6921 pDesc->status.fIXSM ? "IXSM" : "ixsm",
6922 pDesc->status.fEOP ? "EOP" : "eop",
6923 pDesc->status.fDD ? "DD" : "dd",
6924 pDesc->status.fRXE ? "RXE" : "rxe",
6925 pDesc->status.fIPE ? "IPE" : "ipe",
6926 pDesc->status.fTCPE ? "TCPE" : "tcpe",
6927 pDesc->status.fCE ? "CE" : "ce",
6928 E1K_SPEC_CFI(pDesc->status.u16Special) ? "CFI" :"cfi",
6929 E1K_SPEC_VLAN(pDesc->status.u16Special),
6930 E1K_SPEC_PRI(pDesc->status.u16Special));
6931 return cbPrintf;
6932}
6933
6934/**
6935 * @callback_method_impl{FNRTSTRFORMATTYPE}
6936 */
6937static DECLCALLBACK(size_t) e1kFmtTxDesc(PFNRTSTROUTPUT pfnOutput,
6938 void *pvArgOutput,
6939 const char *pszType,
6940 void const *pvValue,
6941 int cchWidth,
6942 int cchPrecision,
6943 unsigned fFlags,
6944 void *pvUser)
6945{
6946 RT_NOREF(cchWidth, cchPrecision, fFlags, pvUser);
6947 AssertReturn(strcmp(pszType, "e1ktxd") == 0, 0);
6948 E1KTXDESC *pDesc = (E1KTXDESC*)pvValue;
6949 if (!pDesc)
6950 return RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "NULL_TXD");
6951
6952 size_t cbPrintf = 0;
6953 switch (e1kGetDescType(pDesc))
6954 {
6955 case E1K_DTYP_CONTEXT:
6956 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Type=Context\n"
6957 " IPCSS=%02X IPCSO=%02X IPCSE=%04X TUCSS=%02X TUCSO=%02X TUCSE=%04X\n"
6958 " TUCMD:%s%s%s %s %s PAYLEN=%04x HDRLEN=%04x MSS=%04x STA: %s",
6959 pDesc->context.ip.u8CSS, pDesc->context.ip.u8CSO, pDesc->context.ip.u16CSE,
6960 pDesc->context.tu.u8CSS, pDesc->context.tu.u8CSO, pDesc->context.tu.u16CSE,
6961 pDesc->context.dw2.fIDE ? " IDE":"",
6962 pDesc->context.dw2.fRS ? " RS" :"",
6963 pDesc->context.dw2.fTSE ? " TSE":"",
6964 pDesc->context.dw2.fIP ? "IPv4":"IPv6",
6965 pDesc->context.dw2.fTCP ? "TCP":"UDP",
6966 pDesc->context.dw2.u20PAYLEN,
6967 pDesc->context.dw3.u8HDRLEN,
6968 pDesc->context.dw3.u16MSS,
6969 pDesc->context.dw3.fDD?"DD":"");
6970 break;
6971 case E1K_DTYP_DATA:
6972 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Type=Data Address=%16LX DTALEN=%05X\n"
6973 " DCMD:%s%s%s%s%s%s%s STA:%s%s%s POPTS:%s%s SPECIAL:%s VLAN=%03x PRI=%x",
6974 pDesc->data.u64BufAddr,
6975 pDesc->data.cmd.u20DTALEN,
6976 pDesc->data.cmd.fIDE ? " IDE" :"",
6977 pDesc->data.cmd.fVLE ? " VLE" :"",
6978 pDesc->data.cmd.fRPS ? " RPS" :"",
6979 pDesc->data.cmd.fRS ? " RS" :"",
6980 pDesc->data.cmd.fTSE ? " TSE" :"",
6981 pDesc->data.cmd.fIFCS? " IFCS":"",
6982 pDesc->data.cmd.fEOP ? " EOP" :"",
6983 pDesc->data.dw3.fDD ? " DD" :"",
6984 pDesc->data.dw3.fEC ? " EC" :"",
6985 pDesc->data.dw3.fLC ? " LC" :"",
6986 pDesc->data.dw3.fTXSM? " TXSM":"",
6987 pDesc->data.dw3.fIXSM? " IXSM":"",
6988 E1K_SPEC_CFI(pDesc->data.dw3.u16Special) ? "CFI" :"cfi",
6989 E1K_SPEC_VLAN(pDesc->data.dw3.u16Special),
6990 E1K_SPEC_PRI(pDesc->data.dw3.u16Special));
6991 break;
6992 case E1K_DTYP_LEGACY:
6993 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Type=Legacy Address=%16LX DTALEN=%05X\n"
6994 " CMD:%s%s%s%s%s%s%s STA:%s%s%s CSO=%02x CSS=%02x SPECIAL:%s VLAN=%03x PRI=%x",
6995 pDesc->data.u64BufAddr,
6996 pDesc->legacy.cmd.u16Length,
6997 pDesc->legacy.cmd.fIDE ? " IDE" :"",
6998 pDesc->legacy.cmd.fVLE ? " VLE" :"",
6999 pDesc->legacy.cmd.fRPS ? " RPS" :"",
7000 pDesc->legacy.cmd.fRS ? " RS" :"",
7001 pDesc->legacy.cmd.fIC ? " IC" :"",
7002 pDesc->legacy.cmd.fIFCS? " IFCS":"",
7003 pDesc->legacy.cmd.fEOP ? " EOP" :"",
7004 pDesc->legacy.dw3.fDD ? " DD" :"",
7005 pDesc->legacy.dw3.fEC ? " EC" :"",
7006 pDesc->legacy.dw3.fLC ? " LC" :"",
7007 pDesc->legacy.cmd.u8CSO,
7008 pDesc->legacy.dw3.u8CSS,
7009 E1K_SPEC_CFI(pDesc->legacy.dw3.u16Special) ? "CFI" :"cfi",
7010 E1K_SPEC_VLAN(pDesc->legacy.dw3.u16Special),
7011 E1K_SPEC_PRI(pDesc->legacy.dw3.u16Special));
7012 break;
7013 default:
7014 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Invalid Transmit Descriptor");
7015 break;
7016 }
7017
7018 return cbPrintf;
7019}
7020
7021/** Initializes debug helpers (logging format types). */
7022static int e1kInitDebugHelpers(void)
7023{
7024 int rc = VINF_SUCCESS;
7025 static bool s_fHelpersRegistered = false;
7026 if (!s_fHelpersRegistered)
7027 {
7028 s_fHelpersRegistered = true;
7029 rc = RTStrFormatTypeRegister("e1krxd", e1kFmtRxDesc, NULL);
7030 AssertRCReturn(rc, rc);
7031 rc = RTStrFormatTypeRegister("e1ktxd", e1kFmtTxDesc, NULL);
7032 AssertRCReturn(rc, rc);
7033 }
7034 return rc;
7035}
7036
7037/**
7038 * Status info callback.
7039 *
7040 * @param pDevIns The device instance.
7041 * @param pHlp The output helpers.
7042 * @param pszArgs The arguments.
7043 */
7044static DECLCALLBACK(void) e1kInfo(PPDMDEVINS pDevIns, PCDBGFINFOHLP pHlp, const char *pszArgs)
7045{
7046 RT_NOREF(pszArgs);
7047 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7048 unsigned i;
7049 // bool fRcvRing = false;
7050 // bool fXmtRing = false;
7051
7052 /*
7053 * Parse args.
7054 if (pszArgs)
7055 {
7056 fRcvRing = strstr(pszArgs, "verbose") || strstr(pszArgs, "rcv");
7057 fXmtRing = strstr(pszArgs, "verbose") || strstr(pszArgs, "xmt");
7058 }
7059 */
7060
7061 /*
7062 * Show info.
7063 */
7064 pHlp->pfnPrintf(pHlp, "E1000 #%d: port=%RTiop mmio=%RGp mac-cfg=%RTmac %s%s%s\n",
7065 pDevIns->iInstance, pThis->IOPortBase, pThis->addrMMReg,
7066 &pThis->macConfigured, g_aChips[pThis->eChip].pcszName,
7067 pThis->fRCEnabled ? " GC" : "", pThis->fR0Enabled ? " R0" : "");
7068
7069 e1kCsEnter(pThis, VERR_INTERNAL_ERROR); /* Not sure why but PCNet does it */
7070
7071 for (i = 0; i < E1K_NUM_OF_32BIT_REGS; ++i)
7072 pHlp->pfnPrintf(pHlp, "%8.8s = %08x\n", g_aE1kRegMap[i].abbrev, pThis->auRegs[i]);
7073
7074 for (i = 0; i < RT_ELEMENTS(pThis->aRecAddr.array); i++)
7075 {
7076 E1KRAELEM* ra = pThis->aRecAddr.array + i;
7077 if (ra->ctl & RA_CTL_AV)
7078 {
7079 const char *pcszTmp;
7080 switch (ra->ctl & RA_CTL_AS)
7081 {
7082 case 0: pcszTmp = "DST"; break;
7083 case 1: pcszTmp = "SRC"; break;
7084 default: pcszTmp = "reserved";
7085 }
7086 pHlp->pfnPrintf(pHlp, "RA%02d: %s %RTmac\n", i, pcszTmp, ra->addr);
7087 }
7088 }
7089 unsigned cDescs = RDLEN / sizeof(E1KRXDESC);
7090 uint32_t rdh = RDH;
7091 pHlp->pfnPrintf(pHlp, "\n-- Receive Descriptors (%d total) --\n", cDescs);
7092 for (i = 0; i < cDescs; ++i)
7093 {
7094 E1KRXDESC desc;
7095 PDMDevHlpPhysRead(pDevIns, e1kDescAddr(RDBAH, RDBAL, i),
7096 &desc, sizeof(desc));
7097 if (i == rdh)
7098 pHlp->pfnPrintf(pHlp, ">>> ");
7099 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1krxd]\n", e1kDescAddr(RDBAH, RDBAL, i), &desc);
7100 }
7101#ifdef E1K_WITH_RXD_CACHE
7102 pHlp->pfnPrintf(pHlp, "\n-- Receive Descriptors in Cache (at %d (RDH %d)/ fetched %d / max %d) --\n",
7103 pThis->iRxDCurrent, RDH, pThis->nRxDFetched, E1K_RXD_CACHE_SIZE);
7104 if (rdh > pThis->iRxDCurrent)
7105 rdh -= pThis->iRxDCurrent;
7106 else
7107 rdh = cDescs + rdh - pThis->iRxDCurrent;
7108 for (i = 0; i < pThis->nRxDFetched; ++i)
7109 {
7110 if (i == pThis->iRxDCurrent)
7111 pHlp->pfnPrintf(pHlp, ">>> ");
7112 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1krxd]\n",
7113 e1kDescAddr(RDBAH, RDBAL, rdh++ % cDescs),
7114 &pThis->aRxDescriptors[i]);
7115 }
7116#endif /* E1K_WITH_RXD_CACHE */
7117
7118 cDescs = TDLEN / sizeof(E1KTXDESC);
7119 uint32_t tdh = TDH;
7120 pHlp->pfnPrintf(pHlp, "\n-- Transmit Descriptors (%d total) --\n", cDescs);
7121 for (i = 0; i < cDescs; ++i)
7122 {
7123 E1KTXDESC desc;
7124 PDMDevHlpPhysRead(pDevIns, e1kDescAddr(TDBAH, TDBAL, i),
7125 &desc, sizeof(desc));
7126 if (i == tdh)
7127 pHlp->pfnPrintf(pHlp, ">>> ");
7128 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1ktxd]\n", e1kDescAddr(TDBAH, TDBAL, i), &desc);
7129 }
7130#ifdef E1K_WITH_TXD_CACHE
7131 pHlp->pfnPrintf(pHlp, "\n-- Transmit Descriptors in Cache (at %d (TDH %d)/ fetched %d / max %d) --\n",
7132 pThis->iTxDCurrent, TDH, pThis->nTxDFetched, E1K_TXD_CACHE_SIZE);
7133 if (tdh > pThis->iTxDCurrent)
7134 tdh -= pThis->iTxDCurrent;
7135 else
7136 tdh = cDescs + tdh - pThis->iTxDCurrent;
7137 for (i = 0; i < pThis->nTxDFetched; ++i)
7138 {
7139 if (i == pThis->iTxDCurrent)
7140 pHlp->pfnPrintf(pHlp, ">>> ");
7141 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1ktxd]\n",
7142 e1kDescAddr(TDBAH, TDBAL, tdh++ % cDescs),
7143 &pThis->aTxDescriptors[i]);
7144 }
7145#endif /* E1K_WITH_TXD_CACHE */
7146
7147
7148#ifdef E1K_INT_STATS
7149 pHlp->pfnPrintf(pHlp, "Interrupt attempts: %d\n", pThis->uStatIntTry);
7150 pHlp->pfnPrintf(pHlp, "Interrupts raised : %d\n", pThis->uStatInt);
7151 pHlp->pfnPrintf(pHlp, "Interrupts lowered: %d\n", pThis->uStatIntLower);
7152 pHlp->pfnPrintf(pHlp, "Interrupts delayed: %d\n", pThis->uStatIntDly);
7153 pHlp->pfnPrintf(pHlp, "Disabled delayed: %d\n", pThis->uStatDisDly);
7154 pHlp->pfnPrintf(pHlp, "Interrupts skipped: %d\n", pThis->uStatIntSkip);
7155 pHlp->pfnPrintf(pHlp, "Masked interrupts : %d\n", pThis->uStatIntMasked);
7156 pHlp->pfnPrintf(pHlp, "Early interrupts : %d\n", pThis->uStatIntEarly);
7157 pHlp->pfnPrintf(pHlp, "Late interrupts : %d\n", pThis->uStatIntLate);
7158 pHlp->pfnPrintf(pHlp, "Lost interrupts : %d\n", pThis->iStatIntLost);
7159 pHlp->pfnPrintf(pHlp, "Interrupts by RX : %d\n", pThis->uStatIntRx);
7160 pHlp->pfnPrintf(pHlp, "Interrupts by TX : %d\n", pThis->uStatIntTx);
7161 pHlp->pfnPrintf(pHlp, "Interrupts by ICS : %d\n", pThis->uStatIntICS);
7162 pHlp->pfnPrintf(pHlp, "Interrupts by RDTR: %d\n", pThis->uStatIntRDTR);
7163 pHlp->pfnPrintf(pHlp, "Interrupts by RDMT: %d\n", pThis->uStatIntRXDMT0);
7164 pHlp->pfnPrintf(pHlp, "Interrupts by TXQE: %d\n", pThis->uStatIntTXQE);
7165 pHlp->pfnPrintf(pHlp, "TX int delay asked: %d\n", pThis->uStatTxIDE);
7166 pHlp->pfnPrintf(pHlp, "TX delayed: %d\n", pThis->uStatTxDelayed);
7167 pHlp->pfnPrintf(pHlp, "TX delayed expired: %d\n", pThis->uStatTxDelayExp);
7168 pHlp->pfnPrintf(pHlp, "TX no report asked: %d\n", pThis->uStatTxNoRS);
7169 pHlp->pfnPrintf(pHlp, "TX abs timer expd : %d\n", pThis->uStatTAD);
7170 pHlp->pfnPrintf(pHlp, "TX int timer expd : %d\n", pThis->uStatTID);
7171 pHlp->pfnPrintf(pHlp, "RX abs timer expd : %d\n", pThis->uStatRAD);
7172 pHlp->pfnPrintf(pHlp, "RX int timer expd : %d\n", pThis->uStatRID);
7173 pHlp->pfnPrintf(pHlp, "TX CTX descriptors: %d\n", pThis->uStatDescCtx);
7174 pHlp->pfnPrintf(pHlp, "TX DAT descriptors: %d\n", pThis->uStatDescDat);
7175 pHlp->pfnPrintf(pHlp, "TX LEG descriptors: %d\n", pThis->uStatDescLeg);
7176 pHlp->pfnPrintf(pHlp, "Received frames : %d\n", pThis->uStatRxFrm);
7177 pHlp->pfnPrintf(pHlp, "Transmitted frames: %d\n", pThis->uStatTxFrm);
7178 pHlp->pfnPrintf(pHlp, "TX frames up to 1514: %d\n", pThis->uStatTx1514);
7179 pHlp->pfnPrintf(pHlp, "TX frames up to 2962: %d\n", pThis->uStatTx2962);
7180 pHlp->pfnPrintf(pHlp, "TX frames up to 4410: %d\n", pThis->uStatTx4410);
7181 pHlp->pfnPrintf(pHlp, "TX frames up to 5858: %d\n", pThis->uStatTx5858);
7182 pHlp->pfnPrintf(pHlp, "TX frames up to 7306: %d\n", pThis->uStatTx7306);
7183 pHlp->pfnPrintf(pHlp, "TX frames up to 8754: %d\n", pThis->uStatTx8754);
7184 pHlp->pfnPrintf(pHlp, "TX frames up to 16384: %d\n", pThis->uStatTx16384);
7185 pHlp->pfnPrintf(pHlp, "TX frames up to 32768: %d\n", pThis->uStatTx32768);
7186 pHlp->pfnPrintf(pHlp, "Larger TX frames : %d\n", pThis->uStatTxLarge);
7187#endif /* E1K_INT_STATS */
7188
7189 e1kCsLeave(pThis);
7190}
7191
7192
7193
7194/* -=-=-=-=- PDMDEVREG -=-=-=-=- */
7195
7196/**
7197 * Detach notification.
7198 *
7199 * One port on the network card has been disconnected from the network.
7200 *
7201 * @param pDevIns The device instance.
7202 * @param iLUN The logical unit which is being detached.
7203 * @param fFlags Flags, combination of the PDMDEVATT_FLAGS_* \#defines.
7204 */
7205static DECLCALLBACK(void) e1kR3Detach(PPDMDEVINS pDevIns, unsigned iLUN, uint32_t fFlags)
7206{
7207 RT_NOREF(fFlags);
7208 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7209 Log(("%s e1kR3Detach:\n", pThis->szPrf));
7210
7211 AssertLogRelReturnVoid(iLUN == 0);
7212
7213 PDMCritSectEnter(&pThis->cs, VERR_SEM_BUSY);
7214
7215 /** @todo r=pritesh still need to check if i missed
7216 * to clean something in this function
7217 */
7218
7219 /*
7220 * Zero some important members.
7221 */
7222 pThis->pDrvBase = NULL;
7223 pThis->pDrvR3 = NULL;
7224 pThis->pDrvR0 = NIL_RTR0PTR;
7225 pThis->pDrvRC = NIL_RTRCPTR;
7226
7227 PDMCritSectLeave(&pThis->cs);
7228}
7229
7230/**
7231 * Attach the Network attachment.
7232 *
7233 * One port on the network card has been connected to a network.
7234 *
7235 * @returns VBox status code.
7236 * @param pDevIns The device instance.
7237 * @param iLUN The logical unit which is being attached.
7238 * @param fFlags Flags, combination of the PDMDEVATT_FLAGS_* \#defines.
7239 *
7240 * @remarks This code path is not used during construction.
7241 */
7242static DECLCALLBACK(int) e1kR3Attach(PPDMDEVINS pDevIns, unsigned iLUN, uint32_t fFlags)
7243{
7244 RT_NOREF(fFlags);
7245 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7246 LogFlow(("%s e1kR3Attach:\n", pThis->szPrf));
7247
7248 AssertLogRelReturn(iLUN == 0, VERR_PDM_NO_SUCH_LUN);
7249
7250 PDMCritSectEnter(&pThis->cs, VERR_SEM_BUSY);
7251
7252 /*
7253 * Attach the driver.
7254 */
7255 int rc = PDMDevHlpDriverAttach(pDevIns, 0, &pThis->IBase, &pThis->pDrvBase, "Network Port");
7256 if (RT_SUCCESS(rc))
7257 {
7258 if (rc == VINF_NAT_DNS)
7259 {
7260#ifdef RT_OS_LINUX
7261 PDMDevHlpVMSetRuntimeError(pDevIns, 0 /*fFlags*/, "NoDNSforNAT",
7262 N_("A Domain Name Server (DNS) for NAT networking could not be determined. Please check your /etc/resolv.conf for <tt>nameserver</tt> entries. Either add one manually (<i>man resolv.conf</i>) or ensure that your host is correctly connected to an ISP. If you ignore this warning the guest will not be able to perform nameserver lookups and it will probably observe delays if trying so"));
7263#else
7264 PDMDevHlpVMSetRuntimeError(pDevIns, 0 /*fFlags*/, "NoDNSforNAT",
7265 N_("A Domain Name Server (DNS) for NAT networking could not be determined. Ensure that your host is correctly connected to an ISP. If you ignore this warning the guest will not be able to perform nameserver lookups and it will probably observe delays if trying so"));
7266#endif
7267 }
7268 pThis->pDrvR3 = PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMINETWORKUP);
7269 AssertMsgStmt(pThis->pDrvR3, ("Failed to obtain the PDMINETWORKUP interface!\n"),
7270 rc = VERR_PDM_MISSING_INTERFACE_BELOW);
7271 if (RT_SUCCESS(rc))
7272 {
7273 PPDMIBASER0 pBaseR0 = PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMIBASER0);
7274 pThis->pDrvR0 = pBaseR0 ? pBaseR0->pfnQueryInterface(pBaseR0, PDMINETWORKUP_IID) : NIL_RTR0PTR;
7275
7276 PPDMIBASERC pBaseRC = PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMIBASERC);
7277 pThis->pDrvRC = pBaseRC ? pBaseRC->pfnQueryInterface(pBaseRC, PDMINETWORKUP_IID) : NIL_RTR0PTR;
7278 }
7279 }
7280 else if ( rc == VERR_PDM_NO_ATTACHED_DRIVER
7281 || rc == VERR_PDM_CFG_MISSING_DRIVER_NAME)
7282 {
7283 /* This should never happen because this function is not called
7284 * if there is no driver to attach! */
7285 Log(("%s No attached driver!\n", pThis->szPrf));
7286 }
7287
7288 /*
7289 * Temporary set the link down if it was up so that the guest
7290 * will know that we have change the configuration of the
7291 * network card
7292 */
7293 if ((STATUS & STATUS_LU) && RT_SUCCESS(rc))
7294 e1kR3LinkDownTemp(pThis);
7295
7296 PDMCritSectLeave(&pThis->cs);
7297 return rc;
7298
7299}
7300
7301/**
7302 * @copydoc FNPDMDEVPOWEROFF
7303 */
7304static DECLCALLBACK(void) e1kR3PowerOff(PPDMDEVINS pDevIns)
7305{
7306 /* Poke thread waiting for buffer space. */
7307 e1kWakeupReceive(pDevIns);
7308}
7309
7310/**
7311 * @copydoc FNPDMDEVRESET
7312 */
7313static DECLCALLBACK(void) e1kR3Reset(PPDMDEVINS pDevIns)
7314{
7315 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7316#ifdef E1K_TX_DELAY
7317 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTXDTimer));
7318#endif /* E1K_TX_DELAY */
7319 e1kCancelTimer(pThis, pThis->CTX_SUFF(pIntTimer));
7320 e1kCancelTimer(pThis, pThis->CTX_SUFF(pLUTimer));
7321 e1kXmitFreeBuf(pThis);
7322 pThis->u16TxPktLen = 0;
7323 pThis->fIPcsum = false;
7324 pThis->fTCPcsum = false;
7325 pThis->fIntMaskUsed = false;
7326 pThis->fDelayInts = false;
7327 pThis->fLocked = false;
7328 pThis->u64AckedAt = 0;
7329 e1kHardReset(pThis);
7330}
7331
7332/**
7333 * @copydoc FNPDMDEVSUSPEND
7334 */
7335static DECLCALLBACK(void) e1kR3Suspend(PPDMDEVINS pDevIns)
7336{
7337 /* Poke thread waiting for buffer space. */
7338 e1kWakeupReceive(pDevIns);
7339}
7340
7341/**
7342 * Device relocation callback.
7343 *
7344 * When this callback is called the device instance data, and if the
7345 * device have a GC component, is being relocated, or/and the selectors
7346 * have been changed. The device must use the chance to perform the
7347 * necessary pointer relocations and data updates.
7348 *
7349 * Before the GC code is executed the first time, this function will be
7350 * called with a 0 delta so GC pointer calculations can be one in one place.
7351 *
7352 * @param pDevIns Pointer to the device instance.
7353 * @param offDelta The relocation delta relative to the old location.
7354 *
7355 * @remark A relocation CANNOT fail.
7356 */
7357static DECLCALLBACK(void) e1kR3Relocate(PPDMDEVINS pDevIns, RTGCINTPTR offDelta)
7358{
7359 RT_NOREF(offDelta);
7360 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7361 pThis->pDevInsRC = PDMDEVINS_2_RCPTR(pDevIns);
7362 pThis->pTxQueueRC = PDMQueueRCPtr(pThis->pTxQueueR3);
7363 pThis->pCanRxQueueRC = PDMQueueRCPtr(pThis->pCanRxQueueR3);
7364#ifdef E1K_USE_RX_TIMERS
7365 pThis->pRIDTimerRC = TMTimerRCPtr(pThis->pRIDTimerR3);
7366 pThis->pRADTimerRC = TMTimerRCPtr(pThis->pRADTimerR3);
7367#endif /* E1K_USE_RX_TIMERS */
7368//#ifdef E1K_USE_TX_TIMERS
7369 if (pThis->fTidEnabled)
7370 {
7371 pThis->pTIDTimerRC = TMTimerRCPtr(pThis->pTIDTimerR3);
7372# ifndef E1K_NO_TAD
7373 pThis->pTADTimerRC = TMTimerRCPtr(pThis->pTADTimerR3);
7374# endif /* E1K_NO_TAD */
7375 }
7376//#endif /* E1K_USE_TX_TIMERS */
7377#ifdef E1K_TX_DELAY
7378 pThis->pTXDTimerRC = TMTimerRCPtr(pThis->pTXDTimerR3);
7379#endif /* E1K_TX_DELAY */
7380 pThis->pIntTimerRC = TMTimerRCPtr(pThis->pIntTimerR3);
7381 pThis->pLUTimerRC = TMTimerRCPtr(pThis->pLUTimerR3);
7382}
7383
7384/**
7385 * Destruct a device instance.
7386 *
7387 * We need to free non-VM resources only.
7388 *
7389 * @returns VBox status code.
7390 * @param pDevIns The device instance data.
7391 * @thread EMT
7392 */
7393static DECLCALLBACK(int) e1kR3Destruct(PPDMDEVINS pDevIns)
7394{
7395 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7396 PDMDEV_CHECK_VERSIONS_RETURN_QUIET(pDevIns);
7397
7398 e1kDumpState(pThis);
7399 E1kLog(("%s Destroying instance\n", pThis->szPrf));
7400 if (PDMCritSectIsInitialized(&pThis->cs))
7401 {
7402 if (pThis->hEventMoreRxDescAvail != NIL_RTSEMEVENT)
7403 {
7404 RTSemEventSignal(pThis->hEventMoreRxDescAvail);
7405 RTSemEventDestroy(pThis->hEventMoreRxDescAvail);
7406 pThis->hEventMoreRxDescAvail = NIL_RTSEMEVENT;
7407 }
7408#ifdef E1K_WITH_TX_CS
7409 PDMR3CritSectDelete(&pThis->csTx);
7410#endif /* E1K_WITH_TX_CS */
7411 PDMR3CritSectDelete(&pThis->csRx);
7412 PDMR3CritSectDelete(&pThis->cs);
7413 }
7414 return VINF_SUCCESS;
7415}
7416
7417
7418/**
7419 * Set PCI configuration space registers.
7420 *
7421 * @param pci Reference to PCI device structure.
7422 * @thread EMT
7423 */
7424static DECLCALLBACK(void) e1kConfigurePciDev(PPDMPCIDEV pPciDev, E1KCHIP eChip)
7425{
7426 Assert(eChip < RT_ELEMENTS(g_aChips));
7427 /* Configure PCI Device, assume 32-bit mode ******************************/
7428 PCIDevSetVendorId(pPciDev, g_aChips[eChip].uPCIVendorId);
7429 PCIDevSetDeviceId(pPciDev, g_aChips[eChip].uPCIDeviceId);
7430 PCIDevSetWord( pPciDev, VBOX_PCI_SUBSYSTEM_VENDOR_ID, g_aChips[eChip].uPCISubsystemVendorId);
7431 PCIDevSetWord( pPciDev, VBOX_PCI_SUBSYSTEM_ID, g_aChips[eChip].uPCISubsystemId);
7432
7433 PCIDevSetWord( pPciDev, VBOX_PCI_COMMAND, 0x0000);
7434 /* DEVSEL Timing (medium device), 66 MHz Capable, New capabilities */
7435 PCIDevSetWord( pPciDev, VBOX_PCI_STATUS,
7436 VBOX_PCI_STATUS_DEVSEL_MEDIUM | VBOX_PCI_STATUS_CAP_LIST | VBOX_PCI_STATUS_66MHZ);
7437 /* Stepping A2 */
7438 PCIDevSetByte( pPciDev, VBOX_PCI_REVISION_ID, 0x02);
7439 /* Ethernet adapter */
7440 PCIDevSetByte( pPciDev, VBOX_PCI_CLASS_PROG, 0x00);
7441 PCIDevSetWord( pPciDev, VBOX_PCI_CLASS_DEVICE, 0x0200);
7442 /* normal single function Ethernet controller */
7443 PCIDevSetByte( pPciDev, VBOX_PCI_HEADER_TYPE, 0x00);
7444 /* Memory Register Base Address */
7445 PCIDevSetDWord(pPciDev, VBOX_PCI_BASE_ADDRESS_0, 0x00000000);
7446 /* Memory Flash Base Address */
7447 PCIDevSetDWord(pPciDev, VBOX_PCI_BASE_ADDRESS_1, 0x00000000);
7448 /* IO Register Base Address */
7449 PCIDevSetDWord(pPciDev, VBOX_PCI_BASE_ADDRESS_2, 0x00000001);
7450 /* Expansion ROM Base Address */
7451 PCIDevSetDWord(pPciDev, VBOX_PCI_ROM_ADDRESS, 0x00000000);
7452 /* Capabilities Pointer */
7453 PCIDevSetByte( pPciDev, VBOX_PCI_CAPABILITY_LIST, 0xDC);
7454 /* Interrupt Pin: INTA# */
7455 PCIDevSetByte( pPciDev, VBOX_PCI_INTERRUPT_PIN, 0x01);
7456 /* Max_Lat/Min_Gnt: very high priority and time slice */
7457 PCIDevSetByte( pPciDev, VBOX_PCI_MIN_GNT, 0xFF);
7458 PCIDevSetByte( pPciDev, VBOX_PCI_MAX_LAT, 0x00);
7459
7460 /* PCI Power Management Registers ****************************************/
7461 /* Capability ID: PCI Power Management Registers */
7462 PCIDevSetByte( pPciDev, 0xDC, VBOX_PCI_CAP_ID_PM);
7463 /* Next Item Pointer: PCI-X */
7464 PCIDevSetByte( pPciDev, 0xDC + 1, 0xE4);
7465 /* Power Management Capabilities: PM disabled, DSI */
7466 PCIDevSetWord( pPciDev, 0xDC + 2,
7467 0x0002 | VBOX_PCI_PM_CAP_DSI);
7468 /* Power Management Control / Status Register: PM disabled */
7469 PCIDevSetWord( pPciDev, 0xDC + 4, 0x0000);
7470 /* PMCSR_BSE Bridge Support Extensions: Not supported */
7471 PCIDevSetByte( pPciDev, 0xDC + 6, 0x00);
7472 /* Data Register: PM disabled, always 0 */
7473 PCIDevSetByte( pPciDev, 0xDC + 7, 0x00);
7474
7475 /* PCI-X Configuration Registers *****************************************/
7476 /* Capability ID: PCI-X Configuration Registers */
7477 PCIDevSetByte( pPciDev, 0xE4, VBOX_PCI_CAP_ID_PCIX);
7478#ifdef E1K_WITH_MSI
7479 PCIDevSetByte( pPciDev, 0xE4 + 1, 0x80);
7480#else
7481 /* Next Item Pointer: None (Message Signalled Interrupts are disabled) */
7482 PCIDevSetByte( pPciDev, 0xE4 + 1, 0x00);
7483#endif
7484 /* PCI-X Command: Enable Relaxed Ordering */
7485 PCIDevSetWord( pPciDev, 0xE4 + 2, VBOX_PCI_X_CMD_ERO);
7486 /* PCI-X Status: 32-bit, 66MHz*/
7487 /** @todo is this value really correct? fff8 doesn't look like actual PCI address */
7488 PCIDevSetDWord(pPciDev, 0xE4 + 4, 0x0040FFF8);
7489}
7490
7491/**
7492 * @interface_method_impl{PDMDEVREG,pfnConstruct}
7493 */
7494static DECLCALLBACK(int) e1kR3Construct(PPDMDEVINS pDevIns, int iInstance, PCFGMNODE pCfg)
7495{
7496 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7497 int rc;
7498 PDMDEV_CHECK_VERSIONS_RETURN(pDevIns);
7499
7500 /*
7501 * Initialize the instance data (state).
7502 * Note! Caller has initialized it to ZERO already.
7503 */
7504 RTStrPrintf(pThis->szPrf, sizeof(pThis->szPrf), "E1000#%d", iInstance);
7505 E1kLog(("%s Constructing new instance sizeof(E1KRXDESC)=%d\n", pThis->szPrf, sizeof(E1KRXDESC)));
7506 pThis->hEventMoreRxDescAvail = NIL_RTSEMEVENT;
7507 pThis->pDevInsR3 = pDevIns;
7508 pThis->pDevInsR0 = PDMDEVINS_2_R0PTR(pDevIns);
7509 pThis->pDevInsRC = PDMDEVINS_2_RCPTR(pDevIns);
7510 pThis->u16TxPktLen = 0;
7511 pThis->fIPcsum = false;
7512 pThis->fTCPcsum = false;
7513 pThis->fIntMaskUsed = false;
7514 pThis->fDelayInts = false;
7515 pThis->fLocked = false;
7516 pThis->u64AckedAt = 0;
7517 pThis->led.u32Magic = PDMLED_MAGIC;
7518 pThis->u32PktNo = 1;
7519
7520 /* Interfaces */
7521 pThis->IBase.pfnQueryInterface = e1kR3QueryInterface;
7522
7523 pThis->INetworkDown.pfnWaitReceiveAvail = e1kR3NetworkDown_WaitReceiveAvail;
7524 pThis->INetworkDown.pfnReceive = e1kR3NetworkDown_Receive;
7525 pThis->INetworkDown.pfnXmitPending = e1kR3NetworkDown_XmitPending;
7526
7527 pThis->ILeds.pfnQueryStatusLed = e1kR3QueryStatusLed;
7528
7529 pThis->INetworkConfig.pfnGetMac = e1kR3GetMac;
7530 pThis->INetworkConfig.pfnGetLinkState = e1kR3GetLinkState;
7531 pThis->INetworkConfig.pfnSetLinkState = e1kR3SetLinkState;
7532
7533 /*
7534 * Internal validations.
7535 */
7536 for (uint32_t iReg = 1; iReg < E1K_NUM_OF_BINARY_SEARCHABLE; iReg++)
7537 AssertLogRelMsgReturn( g_aE1kRegMap[iReg].offset > g_aE1kRegMap[iReg - 1].offset
7538 && g_aE1kRegMap[iReg].offset + g_aE1kRegMap[iReg].size
7539 >= g_aE1kRegMap[iReg - 1].offset + g_aE1kRegMap[iReg - 1].size,
7540 ("%s@%#xLB%#x vs %s@%#xLB%#x\n",
7541 g_aE1kRegMap[iReg].abbrev, g_aE1kRegMap[iReg].offset, g_aE1kRegMap[iReg].size,
7542 g_aE1kRegMap[iReg - 1].abbrev, g_aE1kRegMap[iReg - 1].offset, g_aE1kRegMap[iReg - 1].size),
7543 VERR_INTERNAL_ERROR_4);
7544
7545 /*
7546 * Validate configuration.
7547 */
7548 if (!CFGMR3AreValuesValid(pCfg, "MAC\0" "CableConnected\0" "AdapterType\0"
7549 "LineSpeed\0" "GCEnabled\0" "R0Enabled\0"
7550 "ItrEnabled\0" "ItrRxEnabled\0"
7551 "EthernetCRC\0" "GSOEnabled\0" "LinkUpDelay\0"))
7552 return PDMDEV_SET_ERROR(pDevIns, VERR_PDM_DEVINS_UNKNOWN_CFG_VALUES,
7553 N_("Invalid configuration for E1000 device"));
7554
7555 /** @todo LineSpeed unused! */
7556
7557 /* Get config params */
7558 rc = CFGMR3QueryBytes(pCfg, "MAC", pThis->macConfigured.au8, sizeof(pThis->macConfigured.au8));
7559 if (RT_FAILURE(rc))
7560 return PDMDEV_SET_ERROR(pDevIns, rc,
7561 N_("Configuration error: Failed to get MAC address"));
7562 rc = CFGMR3QueryBool(pCfg, "CableConnected", &pThis->fCableConnected);
7563 if (RT_FAILURE(rc))
7564 return PDMDEV_SET_ERROR(pDevIns, rc,
7565 N_("Configuration error: Failed to get the value of 'CableConnected'"));
7566 rc = CFGMR3QueryU32(pCfg, "AdapterType", (uint32_t*)&pThis->eChip);
7567 if (RT_FAILURE(rc))
7568 return PDMDEV_SET_ERROR(pDevIns, rc,
7569 N_("Configuration error: Failed to get the value of 'AdapterType'"));
7570 Assert(pThis->eChip <= E1K_CHIP_82545EM);
7571 rc = CFGMR3QueryBoolDef(pCfg, "GCEnabled", &pThis->fRCEnabled, true);
7572 if (RT_FAILURE(rc))
7573 return PDMDEV_SET_ERROR(pDevIns, rc,
7574 N_("Configuration error: Failed to get the value of 'GCEnabled'"));
7575
7576 rc = CFGMR3QueryBoolDef(pCfg, "R0Enabled", &pThis->fR0Enabled, true);
7577 if (RT_FAILURE(rc))
7578 return PDMDEV_SET_ERROR(pDevIns, rc,
7579 N_("Configuration error: Failed to get the value of 'R0Enabled'"));
7580
7581 rc = CFGMR3QueryBoolDef(pCfg, "EthernetCRC", &pThis->fEthernetCRC, true);
7582 if (RT_FAILURE(rc))
7583 return PDMDEV_SET_ERROR(pDevIns, rc,
7584 N_("Configuration error: Failed to get the value of 'EthernetCRC'"));
7585
7586 rc = CFGMR3QueryBoolDef(pCfg, "GSOEnabled", &pThis->fGSOEnabled, true);
7587 if (RT_FAILURE(rc))
7588 return PDMDEV_SET_ERROR(pDevIns, rc,
7589 N_("Configuration error: Failed to get the value of 'GSOEnabled'"));
7590
7591 rc = CFGMR3QueryBoolDef(pCfg, "ItrEnabled", &pThis->fItrEnabled, false);
7592 if (RT_FAILURE(rc))
7593 return PDMDEV_SET_ERROR(pDevIns, rc,
7594 N_("Configuration error: Failed to get the value of 'ItrEnabled'"));
7595
7596 rc = CFGMR3QueryBoolDef(pCfg, "ItrRxEnabled", &pThis->fItrRxEnabled, true);
7597 if (RT_FAILURE(rc))
7598 return PDMDEV_SET_ERROR(pDevIns, rc,
7599 N_("Configuration error: Failed to get the value of 'ItrRxEnabled'"));
7600
7601 rc = CFGMR3QueryBoolDef(pCfg, "TidEnabled", &pThis->fTidEnabled, false);
7602 if (RT_FAILURE(rc))
7603 return PDMDEV_SET_ERROR(pDevIns, rc,
7604 N_("Configuration error: Failed to get the value of 'TidEnabled'"));
7605
7606 rc = CFGMR3QueryU32Def(pCfg, "LinkUpDelay", (uint32_t*)&pThis->cMsLinkUpDelay, 5000); /* ms */
7607 if (RT_FAILURE(rc))
7608 return PDMDEV_SET_ERROR(pDevIns, rc,
7609 N_("Configuration error: Failed to get the value of 'LinkUpDelay'"));
7610 Assert(pThis->cMsLinkUpDelay <= 300000); /* less than 5 minutes */
7611 if (pThis->cMsLinkUpDelay > 5000)
7612 LogRel(("%s WARNING! Link up delay is set to %u seconds!\n", pThis->szPrf, pThis->cMsLinkUpDelay / 1000));
7613 else if (pThis->cMsLinkUpDelay == 0)
7614 LogRel(("%s WARNING! Link up delay is disabled!\n", pThis->szPrf));
7615
7616 LogRel(("%s Chip=%s LinkUpDelay=%ums EthernetCRC=%s GSO=%s Itr=%s ItrRx=%s TID=%s R0=%s GC=%s\n", pThis->szPrf,
7617 g_aChips[pThis->eChip].pcszName, pThis->cMsLinkUpDelay,
7618 pThis->fEthernetCRC ? "on" : "off",
7619 pThis->fGSOEnabled ? "enabled" : "disabled",
7620 pThis->fItrEnabled ? "enabled" : "disabled",
7621 pThis->fItrRxEnabled ? "enabled" : "disabled",
7622 pThis->fTidEnabled ? "enabled" : "disabled",
7623 pThis->fR0Enabled ? "enabled" : "disabled",
7624 pThis->fRCEnabled ? "enabled" : "disabled"));
7625
7626 /* Initialize the EEPROM. */
7627 pThis->eeprom.init(pThis->macConfigured);
7628
7629 /* Initialize internal PHY. */
7630 Phy::init(&pThis->phy, iInstance, pThis->eChip == E1K_CHIP_82543GC ? PHY_EPID_M881000 : PHY_EPID_M881011);
7631 Phy::setLinkStatus(&pThis->phy, pThis->fCableConnected);
7632
7633 /* Initialize critical sections. We do our own locking. */
7634 rc = PDMDevHlpSetDeviceCritSect(pDevIns, PDMDevHlpCritSectGetNop(pDevIns));
7635 AssertRCReturn(rc, rc);
7636
7637 rc = PDMDevHlpCritSectInit(pDevIns, &pThis->cs, RT_SRC_POS, "E1000#%d", iInstance);
7638 if (RT_FAILURE(rc))
7639 return rc;
7640 rc = PDMDevHlpCritSectInit(pDevIns, &pThis->csRx, RT_SRC_POS, "E1000#%dRX", iInstance);
7641 if (RT_FAILURE(rc))
7642 return rc;
7643#ifdef E1K_WITH_TX_CS
7644 rc = PDMDevHlpCritSectInit(pDevIns, &pThis->csTx, RT_SRC_POS, "E1000#%dTX", iInstance);
7645 if (RT_FAILURE(rc))
7646 return rc;
7647#endif /* E1K_WITH_TX_CS */
7648
7649 /* Saved state registration. */
7650 rc = PDMDevHlpSSMRegisterEx(pDevIns, E1K_SAVEDSTATE_VERSION, sizeof(E1KSTATE), NULL,
7651 NULL, e1kLiveExec, NULL,
7652 e1kSavePrep, e1kSaveExec, NULL,
7653 e1kLoadPrep, e1kLoadExec, e1kLoadDone);
7654 if (RT_FAILURE(rc))
7655 return rc;
7656
7657 /* Set PCI config registers and register ourselves with the PCI bus. */
7658 e1kConfigurePciDev(&pThis->pciDevice, pThis->eChip);
7659 rc = PDMDevHlpPCIRegister(pDevIns, &pThis->pciDevice);
7660 if (RT_FAILURE(rc))
7661 return rc;
7662
7663#ifdef E1K_WITH_MSI
7664 PDMMSIREG MsiReg;
7665 RT_ZERO(MsiReg);
7666 MsiReg.cMsiVectors = 1;
7667 MsiReg.iMsiCapOffset = 0x80;
7668 MsiReg.iMsiNextOffset = 0x0;
7669 MsiReg.fMsi64bit = false;
7670 rc = PDMDevHlpPCIRegisterMsi(pDevIns, &MsiReg);
7671 AssertRCReturn(rc, rc);
7672#endif
7673
7674
7675 /* Map our registers to memory space (region 0, see e1kConfigurePCI)*/
7676 rc = PDMDevHlpPCIIORegionRegister(pDevIns, 0, E1K_MM_SIZE, PCI_ADDRESS_SPACE_MEM, e1kMap);
7677 if (RT_FAILURE(rc))
7678 return rc;
7679#ifdef E1K_WITH_PREREG_MMIO
7680 rc = PDMDevHlpMMIOExPreRegister(pDevIns, 0, E1K_MM_SIZE, IOMMMIO_FLAGS_READ_DWORD | IOMMMIO_FLAGS_WRITE_ONLY_DWORD, "E1000",
7681 NULL /*pvUserR3*/, e1kMMIOWrite, e1kMMIORead, NULL /*pfnFillR3*/,
7682 NIL_RTR0PTR /*pvUserR0*/, pThis->fR0Enabled ? "e1kMMIOWrite" : NULL,
7683 pThis->fR0Enabled ? "e1kMMIORead" : NULL, NULL /*pszFillR0*/,
7684 NIL_RTRCPTR /*pvUserRC*/, pThis->fRCEnabled ? "e1kMMIOWrite" : NULL,
7685 pThis->fRCEnabled ? "e1kMMIORead" : NULL, NULL /*pszFillRC*/);
7686 AssertLogRelRCReturn(rc, rc);
7687#endif
7688 /* Map our registers to IO space (region 2, see e1kConfigurePCI) */
7689 rc = PDMDevHlpPCIIORegionRegister(pDevIns, 2, E1K_IOPORT_SIZE, PCI_ADDRESS_SPACE_IO, e1kMap);
7690 if (RT_FAILURE(rc))
7691 return rc;
7692
7693 /* Create transmit queue */
7694 rc = PDMDevHlpQueueCreate(pDevIns, sizeof(PDMQUEUEITEMCORE), 1, 0,
7695 e1kTxQueueConsumer, true, "E1000-Xmit", &pThis->pTxQueueR3);
7696 if (RT_FAILURE(rc))
7697 return rc;
7698 pThis->pTxQueueR0 = PDMQueueR0Ptr(pThis->pTxQueueR3);
7699 pThis->pTxQueueRC = PDMQueueRCPtr(pThis->pTxQueueR3);
7700
7701 /* Create the RX notifier signaller. */
7702 rc = PDMDevHlpQueueCreate(pDevIns, sizeof(PDMQUEUEITEMCORE), 1, 0,
7703 e1kCanRxQueueConsumer, true, "E1000-Rcv", &pThis->pCanRxQueueR3);
7704 if (RT_FAILURE(rc))
7705 return rc;
7706 pThis->pCanRxQueueR0 = PDMQueueR0Ptr(pThis->pCanRxQueueR3);
7707 pThis->pCanRxQueueRC = PDMQueueRCPtr(pThis->pCanRxQueueR3);
7708
7709#ifdef E1K_TX_DELAY
7710 /* Create Transmit Delay Timer */
7711 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kTxDelayTimer, pThis,
7712 TMTIMER_FLAGS_NO_CRIT_SECT,
7713 "E1000 Transmit Delay Timer", &pThis->pTXDTimerR3);
7714 if (RT_FAILURE(rc))
7715 return rc;
7716 pThis->pTXDTimerR0 = TMTimerR0Ptr(pThis->pTXDTimerR3);
7717 pThis->pTXDTimerRC = TMTimerRCPtr(pThis->pTXDTimerR3);
7718 TMR3TimerSetCritSect(pThis->pTXDTimerR3, &pThis->csTx);
7719#endif /* E1K_TX_DELAY */
7720
7721//#ifdef E1K_USE_TX_TIMERS
7722 if (pThis->fTidEnabled)
7723 {
7724 /* Create Transmit Interrupt Delay Timer */
7725 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kTxIntDelayTimer, pThis,
7726 TMTIMER_FLAGS_NO_CRIT_SECT,
7727 "E1000 Transmit Interrupt Delay Timer", &pThis->pTIDTimerR3);
7728 if (RT_FAILURE(rc))
7729 return rc;
7730 pThis->pTIDTimerR0 = TMTimerR0Ptr(pThis->pTIDTimerR3);
7731 pThis->pTIDTimerRC = TMTimerRCPtr(pThis->pTIDTimerR3);
7732
7733# ifndef E1K_NO_TAD
7734 /* Create Transmit Absolute Delay Timer */
7735 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kTxAbsDelayTimer, pThis,
7736 TMTIMER_FLAGS_NO_CRIT_SECT,
7737 "E1000 Transmit Absolute Delay Timer", &pThis->pTADTimerR3);
7738 if (RT_FAILURE(rc))
7739 return rc;
7740 pThis->pTADTimerR0 = TMTimerR0Ptr(pThis->pTADTimerR3);
7741 pThis->pTADTimerRC = TMTimerRCPtr(pThis->pTADTimerR3);
7742# endif /* E1K_NO_TAD */
7743 }
7744//#endif /* E1K_USE_TX_TIMERS */
7745
7746#ifdef E1K_USE_RX_TIMERS
7747 /* Create Receive Interrupt Delay Timer */
7748 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kRxIntDelayTimer, pThis,
7749 TMTIMER_FLAGS_NO_CRIT_SECT,
7750 "E1000 Receive Interrupt Delay Timer", &pThis->pRIDTimerR3);
7751 if (RT_FAILURE(rc))
7752 return rc;
7753 pThis->pRIDTimerR0 = TMTimerR0Ptr(pThis->pRIDTimerR3);
7754 pThis->pRIDTimerRC = TMTimerRCPtr(pThis->pRIDTimerR3);
7755
7756 /* Create Receive Absolute Delay Timer */
7757 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kRxAbsDelayTimer, pThis,
7758 TMTIMER_FLAGS_NO_CRIT_SECT,
7759 "E1000 Receive Absolute Delay Timer", &pThis->pRADTimerR3);
7760 if (RT_FAILURE(rc))
7761 return rc;
7762 pThis->pRADTimerR0 = TMTimerR0Ptr(pThis->pRADTimerR3);
7763 pThis->pRADTimerRC = TMTimerRCPtr(pThis->pRADTimerR3);
7764#endif /* E1K_USE_RX_TIMERS */
7765
7766 /* Create Late Interrupt Timer */
7767 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kLateIntTimer, pThis,
7768 TMTIMER_FLAGS_NO_CRIT_SECT,
7769 "E1000 Late Interrupt Timer", &pThis->pIntTimerR3);
7770 if (RT_FAILURE(rc))
7771 return rc;
7772 pThis->pIntTimerR0 = TMTimerR0Ptr(pThis->pIntTimerR3);
7773 pThis->pIntTimerRC = TMTimerRCPtr(pThis->pIntTimerR3);
7774
7775 /* Create Link Up Timer */
7776 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kLinkUpTimer, pThis,
7777 TMTIMER_FLAGS_NO_CRIT_SECT,
7778 "E1000 Link Up Timer", &pThis->pLUTimerR3);
7779 if (RT_FAILURE(rc))
7780 return rc;
7781 pThis->pLUTimerR0 = TMTimerR0Ptr(pThis->pLUTimerR3);
7782 pThis->pLUTimerRC = TMTimerRCPtr(pThis->pLUTimerR3);
7783
7784 /* Register the info item */
7785 char szTmp[20];
7786 RTStrPrintf(szTmp, sizeof(szTmp), "e1k%d", iInstance);
7787 PDMDevHlpDBGFInfoRegister(pDevIns, szTmp, "E1000 info.", e1kInfo);
7788
7789 /* Status driver */
7790 PPDMIBASE pBase;
7791 rc = PDMDevHlpDriverAttach(pDevIns, PDM_STATUS_LUN, &pThis->IBase, &pBase, "Status Port");
7792 if (RT_FAILURE(rc))
7793 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to attach the status LUN"));
7794 pThis->pLedsConnector = PDMIBASE_QUERY_INTERFACE(pBase, PDMILEDCONNECTORS);
7795
7796 /* Network driver */
7797 rc = PDMDevHlpDriverAttach(pDevIns, 0, &pThis->IBase, &pThis->pDrvBase, "Network Port");
7798 if (RT_SUCCESS(rc))
7799 {
7800 if (rc == VINF_NAT_DNS)
7801 PDMDevHlpVMSetRuntimeError(pDevIns, 0 /*fFlags*/, "NoDNSforNAT",
7802 N_("A Domain Name Server (DNS) for NAT networking could not be determined. Ensure that your host is correctly connected to an ISP. If you ignore this warning the guest will not be able to perform nameserver lookups and it will probably observe delays if trying so"));
7803 pThis->pDrvR3 = PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMINETWORKUP);
7804 AssertMsgReturn(pThis->pDrvR3, ("Failed to obtain the PDMINETWORKUP interface!\n"), VERR_PDM_MISSING_INTERFACE_BELOW);
7805
7806 pThis->pDrvR0 = PDMIBASER0_QUERY_INTERFACE(PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMIBASER0), PDMINETWORKUP);
7807 pThis->pDrvRC = PDMIBASERC_QUERY_INTERFACE(PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMIBASERC), PDMINETWORKUP);
7808 }
7809 else if ( rc == VERR_PDM_NO_ATTACHED_DRIVER
7810 || rc == VERR_PDM_CFG_MISSING_DRIVER_NAME)
7811 {
7812 /* No error! */
7813 E1kLog(("%s This adapter is not attached to any network!\n", pThis->szPrf));
7814 }
7815 else
7816 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to attach the network LUN"));
7817
7818 rc = RTSemEventCreate(&pThis->hEventMoreRxDescAvail);
7819 if (RT_FAILURE(rc))
7820 return rc;
7821
7822 rc = e1kInitDebugHelpers();
7823 if (RT_FAILURE(rc))
7824 return rc;
7825
7826 e1kHardReset(pThis);
7827
7828 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data received", "/Public/Net/E1k%u/BytesReceived", iInstance);
7829 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data transmitted", "/Public/Net/E1k%u/BytesTransmitted", iInstance);
7830
7831 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data received", "/Devices/E1k%d/ReceiveBytes", iInstance);
7832 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data transmitted", "/Devices/E1k%d/TransmitBytes", iInstance);
7833
7834#if defined(VBOX_WITH_STATISTICS)
7835 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMMIOReadRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO reads in RZ", "/Devices/E1k%d/MMIO/ReadRZ", iInstance);
7836 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMMIOReadR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO reads in R3", "/Devices/E1k%d/MMIO/ReadR3", iInstance);
7837 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMMIOWriteRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO writes in RZ", "/Devices/E1k%d/MMIO/WriteRZ", iInstance);
7838 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMMIOWriteR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO writes in R3", "/Devices/E1k%d/MMIO/WriteR3", iInstance);
7839 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatEEPROMRead, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling EEPROM reads", "/Devices/E1k%d/EEPROM/Read", iInstance);
7840 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatEEPROMWrite, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling EEPROM writes", "/Devices/E1k%d/EEPROM/Write", iInstance);
7841 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIOReadRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in RZ", "/Devices/E1k%d/IO/ReadRZ", iInstance);
7842 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIOReadR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in R3", "/Devices/E1k%d/IO/ReadR3", iInstance);
7843 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIOWriteRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in RZ", "/Devices/E1k%d/IO/WriteRZ", iInstance);
7844 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIOWriteR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in R3", "/Devices/E1k%d/IO/WriteR3", iInstance);
7845 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatLateIntTimer, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling late int timer", "/Devices/E1k%d/LateInt/Timer", iInstance);
7846 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatLateInts, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of late interrupts", "/Devices/E1k%d/LateInt/Occured", iInstance);
7847 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIntsRaised, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of raised interrupts", "/Devices/E1k%d/Interrupts/Raised", iInstance);
7848 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIntsPrevented, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of prevented interrupts", "/Devices/E1k%d/Interrupts/Prevented", iInstance);
7849 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceive, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive", "/Devices/E1k%d/Receive/Total", iInstance);
7850 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveCRC, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive checksumming", "/Devices/E1k%d/Receive/CRC", iInstance);
7851 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveFilter, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive filtering", "/Devices/E1k%d/Receive/Filter", iInstance);
7852 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveStore, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive storing", "/Devices/E1k%d/Receive/Store", iInstance);
7853 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatRxOverflow, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_OCCURENCE, "Profiling RX overflows", "/Devices/E1k%d/RxOverflow", iInstance);
7854 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatRxOverflowWakeup, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Nr of RX overflow wakeups", "/Devices/E1k%d/RxOverflowWakeup", iInstance);
7855 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling transmits in RZ", "/Devices/E1k%d/Transmit/TotalRZ", iInstance);
7856 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling transmits in R3", "/Devices/E1k%d/Transmit/TotalR3", iInstance);
7857 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitSendRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling send transmit in RZ", "/Devices/E1k%d/Transmit/SendRZ", iInstance);
7858 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitSendR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling send transmit in R3", "/Devices/E1k%d/Transmit/SendR3", iInstance);
7859
7860 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescCtxNormal, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of normal context descriptors","/Devices/E1k%d/TxDesc/ContexNormal", iInstance);
7861 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescCtxTSE, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TSE context descriptors", "/Devices/E1k%d/TxDesc/ContextTSE", iInstance);
7862 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescData, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TX data descriptors", "/Devices/E1k%d/TxDesc/Data", iInstance);
7863 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescLegacy, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TX legacy descriptors", "/Devices/E1k%d/TxDesc/Legacy", iInstance);
7864 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescTSEData, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TX TSE data descriptors", "/Devices/E1k%d/TxDesc/TSEData", iInstance);
7865 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxPathFallback, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Fallback TSE descriptor path", "/Devices/E1k%d/TxPath/Fallback", iInstance);
7866 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxPathGSO, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "GSO TSE descriptor path", "/Devices/E1k%d/TxPath/GSO", iInstance);
7867 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxPathRegular, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Regular descriptor path", "/Devices/E1k%d/TxPath/Normal", iInstance);
7868 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatPHYAccesses, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of PHY accesses", "/Devices/E1k%d/PHYAccesses", iInstance);
7869 for (unsigned iReg = 0; iReg < E1K_NUM_OF_REGS; iReg++)
7870 {
7871 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->aStatRegReads[iReg], STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
7872 g_aE1kRegMap[iReg].name, "/Devices/E1k%d/Regs/%s-Reads", iInstance, g_aE1kRegMap[iReg].abbrev);
7873 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->aStatRegWrites[iReg], STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
7874 g_aE1kRegMap[iReg].name, "/Devices/E1k%d/Regs/%s-Writes", iInstance, g_aE1kRegMap[iReg].abbrev);
7875 }
7876#endif /* VBOX_WITH_STATISTICS */
7877
7878#ifdef E1K_INT_STATS
7879 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->u64ArmedAt, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "u64ArmedAt", "/Devices/E1k%d/u64ArmedAt", iInstance);
7880 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatMaxTxDelay, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatMaxTxDelay", "/Devices/E1k%d/uStatMaxTxDelay", iInstance);
7881 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatInt, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatInt", "/Devices/E1k%d/uStatInt", iInstance);
7882 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntTry, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntTry", "/Devices/E1k%d/uStatIntTry", iInstance);
7883 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntLower, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntLower", "/Devices/E1k%d/uStatIntLower", iInstance);
7884 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntDly, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntDly", "/Devices/E1k%d/uStatIntDly", iInstance);
7885 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->iStatIntLost, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "iStatIntLost", "/Devices/E1k%d/iStatIntLost", iInstance);
7886 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->iStatIntLostOne, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "iStatIntLostOne", "/Devices/E1k%d/iStatIntLostOne", iInstance);
7887 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatDisDly, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatDisDly", "/Devices/E1k%d/uStatDisDly", iInstance);
7888 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntSkip, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntSkip", "/Devices/E1k%d/uStatIntSkip", iInstance);
7889 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntLate, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntLate", "/Devices/E1k%d/uStatIntLate", iInstance);
7890 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntMasked, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntMasked", "/Devices/E1k%d/uStatIntMasked", iInstance);
7891 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntEarly, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntEarly", "/Devices/E1k%d/uStatIntEarly", iInstance);
7892 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntRx, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntRx", "/Devices/E1k%d/uStatIntRx", iInstance);
7893 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntTx, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntTx", "/Devices/E1k%d/uStatIntTx", iInstance);
7894 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntICS, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntICS", "/Devices/E1k%d/uStatIntICS", iInstance);
7895 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntRDTR, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntRDTR", "/Devices/E1k%d/uStatIntRDTR", iInstance);
7896 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntRXDMT0, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntRXDMT0", "/Devices/E1k%d/uStatIntRXDMT0", iInstance);
7897 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntTXQE, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntTXQE", "/Devices/E1k%d/uStatIntTXQE", iInstance);
7898 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxNoRS, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxNoRS", "/Devices/E1k%d/uStatTxNoRS", iInstance);
7899 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxIDE, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxIDE", "/Devices/E1k%d/uStatTxIDE", iInstance);
7900 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxDelayed, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxDelayed", "/Devices/E1k%d/uStatTxDelayed", iInstance);
7901 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxDelayExp, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxDelayExp", "/Devices/E1k%d/uStatTxDelayExp", iInstance);
7902 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTAD, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTAD", "/Devices/E1k%d/uStatTAD", iInstance);
7903 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTID, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTID", "/Devices/E1k%d/uStatTID", iInstance);
7904 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatRAD, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatRAD", "/Devices/E1k%d/uStatRAD", iInstance);
7905 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatRID, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatRID", "/Devices/E1k%d/uStatRID", iInstance);
7906 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatRxFrm, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatRxFrm", "/Devices/E1k%d/uStatRxFrm", iInstance);
7907 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxFrm, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxFrm", "/Devices/E1k%d/uStatTxFrm", iInstance);
7908 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatDescCtx, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatDescCtx", "/Devices/E1k%d/uStatDescCtx", iInstance);
7909 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatDescDat, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatDescDat", "/Devices/E1k%d/uStatDescDat", iInstance);
7910 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatDescLeg, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatDescLeg", "/Devices/E1k%d/uStatDescLeg", iInstance);
7911 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx1514, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx1514", "/Devices/E1k%d/uStatTx1514", iInstance);
7912 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx2962, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx2962", "/Devices/E1k%d/uStatTx2962", iInstance);
7913 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx4410, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx4410", "/Devices/E1k%d/uStatTx4410", iInstance);
7914 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx5858, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx5858", "/Devices/E1k%d/uStatTx5858", iInstance);
7915 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx7306, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx7306", "/Devices/E1k%d/uStatTx7306", iInstance);
7916 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx8754, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx8754", "/Devices/E1k%d/uStatTx8754", iInstance);
7917 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx16384, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx16384", "/Devices/E1k%d/uStatTx16384", iInstance);
7918 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx32768, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx32768", "/Devices/E1k%d/uStatTx32768", iInstance);
7919 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxLarge, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxLarge", "/Devices/E1k%d/uStatTxLarge", iInstance);
7920#endif /* E1K_INT_STATS */
7921
7922 return VINF_SUCCESS;
7923}
7924
7925/**
7926 * The device registration structure.
7927 */
7928const PDMDEVREG g_DeviceE1000 =
7929{
7930 /* Structure version. PDM_DEVREG_VERSION defines the current version. */
7931 PDM_DEVREG_VERSION,
7932 /* Device name. */
7933 "e1000",
7934 /* Name of guest context module (no path).
7935 * Only evalutated if PDM_DEVREG_FLAGS_RC is set. */
7936 "VBoxDDRC.rc",
7937 /* Name of ring-0 module (no path).
7938 * Only evalutated if PDM_DEVREG_FLAGS_RC is set. */
7939 "VBoxDDR0.r0",
7940 /* The description of the device. The UTF-8 string pointed to shall, like this structure,
7941 * remain unchanged from registration till VM destruction. */
7942 "Intel PRO/1000 MT Desktop Ethernet.\n",
7943
7944 /* Flags, combination of the PDM_DEVREG_FLAGS_* \#defines. */
7945 PDM_DEVREG_FLAGS_DEFAULT_BITS | PDM_DEVREG_FLAGS_RC | PDM_DEVREG_FLAGS_R0,
7946 /* Device class(es), combination of the PDM_DEVREG_CLASS_* \#defines. */
7947 PDM_DEVREG_CLASS_NETWORK,
7948 /* Maximum number of instances (per VM). */
7949 ~0U,
7950 /* Size of the instance data. */
7951 sizeof(E1KSTATE),
7952
7953 /* pfnConstruct */
7954 e1kR3Construct,
7955 /* pfnDestruct */
7956 e1kR3Destruct,
7957 /* pfnRelocate */
7958 e1kR3Relocate,
7959 /* pfnMemSetup */
7960 NULL,
7961 /* pfnPowerOn */
7962 NULL,
7963 /* pfnReset */
7964 e1kR3Reset,
7965 /* pfnSuspend */
7966 e1kR3Suspend,
7967 /* pfnResume */
7968 NULL,
7969 /* pfnAttach */
7970 e1kR3Attach,
7971 /* pfnDeatch */
7972 e1kR3Detach,
7973 /* pfnQueryInterface */
7974 NULL,
7975 /* pfnInitComplete */
7976 NULL,
7977 /* pfnPowerOff */
7978 e1kR3PowerOff,
7979 /* pfnSoftReset */
7980 NULL,
7981
7982 /* u32VersionEnd */
7983 PDM_DEVREG_VERSION
7984};
7985
7986#endif /* IN_RING3 */
7987#endif /* !VBOX_DEVICE_STRUCT_TESTCASE */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette