VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/DevE1000.cpp@ 53426

Last change on this file since 53426 was 51150, checked in by vboxsync, 11 years ago

e1000: Phar Lap ETS support (#7346)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 317.1 KB
Line 
1/* $Id: DevE1000.cpp 51150 2014-04-28 07:38:24Z vboxsync $ */
2/** @file
3 * DevE1000 - Intel 82540EM Ethernet Controller Emulation.
4 *
5 * Implemented in accordance with the specification:
6 *
7 * PCI/PCI-X Family of Gigabit Ethernet Controllers Software Developer's Manual
8 * 82540EP/EM, 82541xx, 82544GC/EI, 82545GM/EM, 82546GB/EB, and 82547xx
9 *
10 * 317453-002 Revision 3.5
11 *
12 * @todo IPv6 checksum offloading support
13 * @todo Flexible Filter / Wakeup (optional?)
14 */
15
16/*
17 * Copyright (C) 2007-2013 Oracle Corporation
18 *
19 * This file is part of VirtualBox Open Source Edition (OSE), as
20 * available from http://www.virtualbox.org. This file is free software;
21 * you can redistribute it and/or modify it under the terms of the GNU
22 * General Public License (GPL) as published by the Free Software
23 * Foundation, in version 2 as it comes in the "COPYING" file of the
24 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
25 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
26 */
27
28/*******************************************************************************
29* Header Files *
30*******************************************************************************/
31#define LOG_GROUP LOG_GROUP_DEV_E1000
32#include <iprt/crc.h>
33#include <iprt/ctype.h>
34#include <iprt/net.h>
35#include <iprt/semaphore.h>
36#include <iprt/string.h>
37#include <iprt/time.h>
38#include <iprt/uuid.h>
39#include <VBox/vmm/pdmdev.h>
40#include <VBox/vmm/pdmnetifs.h>
41#include <VBox/vmm/pdmnetinline.h>
42#include <VBox/param.h>
43#include "VBoxDD.h"
44
45#include "DevEEPROM.h"
46#include "DevE1000Phy.h"
47
48
49/* Options *******************************************************************/
50/** @def E1K_INIT_RA0
51 * E1K_INIT_RA0 forces E1000 to set the first entry in Receive Address filter
52 * table to MAC address obtained from CFGM. Most guests read MAC address from
53 * EEPROM and write it to RA[0] explicitly, but Mac OS X seems to depend on it
54 * being already set (see @bugref{4657}).
55 */
56#define E1K_INIT_RA0
57/** @def E1K_LSC_ON_SLU
58 * E1K_LSC_ON_SLU causes E1000 to generate Link Status Change interrupt when
59 * the guest driver brings up the link via STATUS.LU bit. Again the only guest
60 * that requires it is Mac OS X (see @bugref{4657}).
61 */
62#define E1K_LSC_ON_SLU
63/** @def E1K_ITR_ENABLED
64 * E1K_ITR_ENABLED reduces the number of interrupts generated by E1000 if a
65 * guest driver requested it by writing non-zero value to the Interrupt
66 * Throttling Register (see section 13.4.18 in "8254x Family of Gigabit
67 * Ethernet Controllers Software Developer’s Manual").
68 */
69//#define E1K_ITR_ENABLED
70/** @def E1K_TX_DELAY
71 * E1K_TX_DELAY aims to improve guest-host transfer rate for TCP streams by
72 * preventing packets to be sent immediately. It allows to send several
73 * packets in a batch reducing the number of acknowledgments. Note that it
74 * effectively disables R0 TX path, forcing sending in R3.
75 */
76//#define E1K_TX_DELAY 150
77/** @def E1K_USE_TX_TIMERS
78 * E1K_USE_TX_TIMERS aims to reduce the number of generated TX interrupts if a
79 * guest driver set the delays via the Transmit Interrupt Delay Value (TIDV)
80 * register. Enabling it showed no positive effects on existing guests so it
81 * stays disabled. See sections 3.2.7.1 and 3.4.3.1 in "8254x Family of Gigabit
82 * Ethernet Controllers Software Developer’s Manual" for more detailed
83 * explanation.
84 */
85//#define E1K_USE_TX_TIMERS
86/** @def E1K_NO_TAD
87 * E1K_NO_TAD disables one of two timers enabled by E1K_USE_TX_TIMERS, the
88 * Transmit Absolute Delay time. This timer sets the maximum time interval
89 * during which TX interrupts can be postponed (delayed). It has no effect
90 * if E1K_USE_TX_TIMERS is not defined.
91 */
92//#define E1K_NO_TAD
93/** @def E1K_REL_DEBUG
94 * E1K_REL_DEBUG enables debug logging of l1, l2, l3 in release build.
95 */
96//#define E1K_REL_DEBUG
97/** @def E1K_INT_STATS
98 * E1K_INT_STATS enables collection of internal statistics used for
99 * debugging of delayed interrupts, etc.
100 */
101//#define E1K_INT_STATS
102/** @def E1K_WITH_MSI
103 * E1K_WITH_MSI enables rudimentary MSI support. Not implemented.
104 */
105//#define E1K_WITH_MSI
106/** @def E1K_WITH_TX_CS
107 * E1K_WITH_TX_CS protects e1kXmitPending with a critical section.
108 */
109#define E1K_WITH_TX_CS
110/** @def E1K_WITH_TXD_CACHE
111 * E1K_WITH_TXD_CACHE causes E1000 to fetch multiple TX descriptors in a
112 * single physical memory read (or two if it wraps around the end of TX
113 * descriptor ring). It is required for proper functioning of bandwidth
114 * resource control as it allows to compute exact sizes of packets prior
115 * to allocating their buffers (see @bugref{5582}).
116 */
117#define E1K_WITH_TXD_CACHE
118/** @def E1K_WITH_RXD_CACHE
119 * E1K_WITH_RXD_CACHE causes E1000 to fetch multiple RX descriptors in a
120 * single physical memory read (or two if it wraps around the end of RX
121 * descriptor ring). Intel's packet driver for DOS needs this option in
122 * order to work properly (see @bugref{6217}).
123 */
124#define E1K_WITH_RXD_CACHE
125/* End of Options ************************************************************/
126
127#ifdef E1K_WITH_TXD_CACHE
128/**
129 * E1K_TXD_CACHE_SIZE specifies the maximum number of TX descriptors stored
130 * in the state structure. It limits the amount of descriptors loaded in one
131 * batch read. For example, Linux guest may use up to 20 descriptors per
132 * TSE packet. The largest TSE packet seen (Windows guest) was 45 descriptors.
133 */
134# define E1K_TXD_CACHE_SIZE 64u
135#endif /* E1K_WITH_TXD_CACHE */
136
137#ifdef E1K_WITH_RXD_CACHE
138/**
139 * E1K_RXD_CACHE_SIZE specifies the maximum number of RX descriptors stored
140 * in the state structure. It limits the amount of descriptors loaded in one
141 * batch read. For example, XP guest adds 15 RX descriptors at a time.
142 */
143# define E1K_RXD_CACHE_SIZE 16u
144#endif /* E1K_WITH_RXD_CACHE */
145
146
147/* Little helpers ************************************************************/
148#undef htons
149#undef ntohs
150#undef htonl
151#undef ntohl
152#define htons(x) ((((x) & 0xff00) >> 8) | (((x) & 0x00ff) << 8))
153#define ntohs(x) htons(x)
154#define htonl(x) ASMByteSwapU32(x)
155#define ntohl(x) htonl(x)
156
157#ifndef DEBUG
158# ifdef E1K_REL_DEBUG
159# define DEBUG
160# define E1kLog(a) LogRel(a)
161# define E1kLog2(a) LogRel(a)
162# define E1kLog3(a) LogRel(a)
163# define E1kLogX(x, a) LogRel(a)
164//# define E1kLog3(a) do {} while (0)
165# else
166# define E1kLog(a) do {} while (0)
167# define E1kLog2(a) do {} while (0)
168# define E1kLog3(a) do {} while (0)
169# define E1kLogX(x, a) do {} while (0)
170# endif
171#else
172# define E1kLog(a) Log(a)
173# define E1kLog2(a) Log2(a)
174# define E1kLog3(a) Log3(a)
175# define E1kLogX(x, a) LogIt(LOG_INSTANCE, x, LOG_GROUP, a)
176//# define E1kLog(a) do {} while (0)
177//# define E1kLog2(a) do {} while (0)
178//# define E1kLog3(a) do {} while (0)
179#endif
180
181#if 0
182# define E1kLogRel(a) LogRel(a)
183#else
184# define E1kLogRel(a) do { } while (0)
185#endif
186
187//#undef DEBUG
188
189#define STATE_TO_DEVINS(pThis) (((PE1KSTATE )pThis)->CTX_SUFF(pDevIns))
190#define E1K_RELOCATE(p, o) *(RTHCUINTPTR *)&p += o
191
192#define E1K_INC_CNT32(cnt) \
193do { \
194 if (cnt < UINT32_MAX) \
195 cnt++; \
196} while (0)
197
198#define E1K_ADD_CNT64(cntLo, cntHi, val) \
199do { \
200 uint64_t u64Cnt = RT_MAKE_U64(cntLo, cntHi); \
201 uint64_t tmp = u64Cnt; \
202 u64Cnt += val; \
203 if (tmp > u64Cnt ) \
204 u64Cnt = UINT64_MAX; \
205 cntLo = (uint32_t)u64Cnt; \
206 cntHi = (uint32_t)(u64Cnt >> 32); \
207} while (0)
208
209#ifdef E1K_INT_STATS
210# define E1K_INC_ISTAT_CNT(cnt) do { ++cnt; } while (0)
211#else /* E1K_INT_STATS */
212# define E1K_INC_ISTAT_CNT(cnt) do { } while (0)
213#endif /* E1K_INT_STATS */
214
215
216/*****************************************************************************/
217
218typedef uint32_t E1KCHIP;
219#define E1K_CHIP_82540EM 0
220#define E1K_CHIP_82543GC 1
221#define E1K_CHIP_82545EM 2
222
223/** Different E1000 chips. */
224static const struct E1kChips
225{
226 uint16_t uPCIVendorId;
227 uint16_t uPCIDeviceId;
228 uint16_t uPCISubsystemVendorId;
229 uint16_t uPCISubsystemId;
230 const char *pcszName;
231} g_Chips[] =
232{
233 /* Vendor Device SSVendor SubSys Name */
234 { 0x8086,
235 /* Temporary code, as MSI-aware driver dislike 0x100E. How to do that right? */
236#ifdef E1K_WITH_MSI
237 0x105E,
238#else
239 0x100E,
240#endif
241 0x8086, 0x001E, "82540EM" }, /* Intel 82540EM-A in Intel PRO/1000 MT Desktop */
242 { 0x8086, 0x1004, 0x8086, 0x1004, "82543GC" }, /* Intel 82543GC in Intel PRO/1000 T Server */
243 { 0x8086, 0x100F, 0x15AD, 0x0750, "82545EM" } /* Intel 82545EM-A in VMWare Network Adapter */
244};
245
246
247/* The size of register area mapped to I/O space */
248#define E1K_IOPORT_SIZE 0x8
249/* The size of memory-mapped register area */
250#define E1K_MM_SIZE 0x20000
251
252#define E1K_MAX_TX_PKT_SIZE 16288
253#define E1K_MAX_RX_PKT_SIZE 16384
254
255/*****************************************************************************/
256
257/** Gets the specfieid bits from the register. */
258#define GET_BITS(reg, bits) ((reg & reg##_##bits##_MASK) >> reg##_##bits##_SHIFT)
259#define GET_BITS_V(val, reg, bits) ((val & reg##_##bits##_MASK) >> reg##_##bits##_SHIFT)
260#define BITS(reg, bits, bitval) (bitval << reg##_##bits##_SHIFT)
261#define SET_BITS(reg, bits, bitval) do { reg = (reg & ~reg##_##bits##_MASK) | (bitval << reg##_##bits##_SHIFT); } while (0)
262#define SET_BITS_V(val, reg, bits, bitval) do { val = (val & ~reg##_##bits##_MASK) | (bitval << reg##_##bits##_SHIFT); } while (0)
263
264#define CTRL_SLU UINT32_C(0x00000040)
265#define CTRL_MDIO UINT32_C(0x00100000)
266#define CTRL_MDC UINT32_C(0x00200000)
267#define CTRL_MDIO_DIR UINT32_C(0x01000000)
268#define CTRL_MDC_DIR UINT32_C(0x02000000)
269#define CTRL_RESET UINT32_C(0x04000000)
270#define CTRL_VME UINT32_C(0x40000000)
271
272#define STATUS_LU UINT32_C(0x00000002)
273#define STATUS_TXOFF UINT32_C(0x00000010)
274
275#define EECD_EE_WIRES UINT32_C(0x0F)
276#define EECD_EE_REQ UINT32_C(0x40)
277#define EECD_EE_GNT UINT32_C(0x80)
278
279#define EERD_START UINT32_C(0x00000001)
280#define EERD_DONE UINT32_C(0x00000010)
281#define EERD_DATA_MASK UINT32_C(0xFFFF0000)
282#define EERD_DATA_SHIFT 16
283#define EERD_ADDR_MASK UINT32_C(0x0000FF00)
284#define EERD_ADDR_SHIFT 8
285
286#define MDIC_DATA_MASK UINT32_C(0x0000FFFF)
287#define MDIC_DATA_SHIFT 0
288#define MDIC_REG_MASK UINT32_C(0x001F0000)
289#define MDIC_REG_SHIFT 16
290#define MDIC_PHY_MASK UINT32_C(0x03E00000)
291#define MDIC_PHY_SHIFT 21
292#define MDIC_OP_WRITE UINT32_C(0x04000000)
293#define MDIC_OP_READ UINT32_C(0x08000000)
294#define MDIC_READY UINT32_C(0x10000000)
295#define MDIC_INT_EN UINT32_C(0x20000000)
296#define MDIC_ERROR UINT32_C(0x40000000)
297
298#define TCTL_EN UINT32_C(0x00000002)
299#define TCTL_PSP UINT32_C(0x00000008)
300
301#define RCTL_EN UINT32_C(0x00000002)
302#define RCTL_UPE UINT32_C(0x00000008)
303#define RCTL_MPE UINT32_C(0x00000010)
304#define RCTL_LPE UINT32_C(0x00000020)
305#define RCTL_LBM_MASK UINT32_C(0x000000C0)
306#define RCTL_LBM_SHIFT 6
307#define RCTL_RDMTS_MASK UINT32_C(0x00000300)
308#define RCTL_RDMTS_SHIFT 8
309#define RCTL_LBM_TCVR UINT32_C(3) /**< PHY or external SerDes loopback. */
310#define RCTL_MO_MASK UINT32_C(0x00003000)
311#define RCTL_MO_SHIFT 12
312#define RCTL_BAM UINT32_C(0x00008000)
313#define RCTL_BSIZE_MASK UINT32_C(0x00030000)
314#define RCTL_BSIZE_SHIFT 16
315#define RCTL_VFE UINT32_C(0x00040000)
316#define RCTL_CFIEN UINT32_C(0x00080000)
317#define RCTL_CFI UINT32_C(0x00100000)
318#define RCTL_BSEX UINT32_C(0x02000000)
319#define RCTL_SECRC UINT32_C(0x04000000)
320
321#define ICR_TXDW UINT32_C(0x00000001)
322#define ICR_TXQE UINT32_C(0x00000002)
323#define ICR_LSC UINT32_C(0x00000004)
324#define ICR_RXDMT0 UINT32_C(0x00000010)
325#define ICR_RXT0 UINT32_C(0x00000080)
326#define ICR_TXD_LOW UINT32_C(0x00008000)
327#define RDTR_FPD UINT32_C(0x80000000)
328
329#define PBA_st ((PBAST*)(pThis->auRegs + PBA_IDX))
330typedef struct
331{
332 unsigned rxa : 7;
333 unsigned rxa_r : 9;
334 unsigned txa : 16;
335} PBAST;
336AssertCompileSize(PBAST, 4);
337
338#define TXDCTL_WTHRESH_MASK 0x003F0000
339#define TXDCTL_WTHRESH_SHIFT 16
340#define TXDCTL_LWTHRESH_MASK 0xFE000000
341#define TXDCTL_LWTHRESH_SHIFT 25
342
343#define RXCSUM_PCSS_MASK UINT32_C(0x000000FF)
344#define RXCSUM_PCSS_SHIFT 0
345
346/** @name Register access macros
347 * @remarks These ASSUME alocal variable @a pThis of type PE1KSTATE.
348 * @{ */
349#define CTRL pThis->auRegs[CTRL_IDX]
350#define STATUS pThis->auRegs[STATUS_IDX]
351#define EECD pThis->auRegs[EECD_IDX]
352#define EERD pThis->auRegs[EERD_IDX]
353#define CTRL_EXT pThis->auRegs[CTRL_EXT_IDX]
354#define FLA pThis->auRegs[FLA_IDX]
355#define MDIC pThis->auRegs[MDIC_IDX]
356#define FCAL pThis->auRegs[FCAL_IDX]
357#define FCAH pThis->auRegs[FCAH_IDX]
358#define FCT pThis->auRegs[FCT_IDX]
359#define VET pThis->auRegs[VET_IDX]
360#define ICR pThis->auRegs[ICR_IDX]
361#define ITR pThis->auRegs[ITR_IDX]
362#define ICS pThis->auRegs[ICS_IDX]
363#define IMS pThis->auRegs[IMS_IDX]
364#define IMC pThis->auRegs[IMC_IDX]
365#define RCTL pThis->auRegs[RCTL_IDX]
366#define FCTTV pThis->auRegs[FCTTV_IDX]
367#define TXCW pThis->auRegs[TXCW_IDX]
368#define RXCW pThis->auRegs[RXCW_IDX]
369#define TCTL pThis->auRegs[TCTL_IDX]
370#define TIPG pThis->auRegs[TIPG_IDX]
371#define AIFS pThis->auRegs[AIFS_IDX]
372#define LEDCTL pThis->auRegs[LEDCTL_IDX]
373#define PBA pThis->auRegs[PBA_IDX]
374#define FCRTL pThis->auRegs[FCRTL_IDX]
375#define FCRTH pThis->auRegs[FCRTH_IDX]
376#define RDFH pThis->auRegs[RDFH_IDX]
377#define RDFT pThis->auRegs[RDFT_IDX]
378#define RDFHS pThis->auRegs[RDFHS_IDX]
379#define RDFTS pThis->auRegs[RDFTS_IDX]
380#define RDFPC pThis->auRegs[RDFPC_IDX]
381#define RDBAL pThis->auRegs[RDBAL_IDX]
382#define RDBAH pThis->auRegs[RDBAH_IDX]
383#define RDLEN pThis->auRegs[RDLEN_IDX]
384#define RDH pThis->auRegs[RDH_IDX]
385#define RDT pThis->auRegs[RDT_IDX]
386#define RDTR pThis->auRegs[RDTR_IDX]
387#define RXDCTL pThis->auRegs[RXDCTL_IDX]
388#define RADV pThis->auRegs[RADV_IDX]
389#define RSRPD pThis->auRegs[RSRPD_IDX]
390#define TXDMAC pThis->auRegs[TXDMAC_IDX]
391#define TDFH pThis->auRegs[TDFH_IDX]
392#define TDFT pThis->auRegs[TDFT_IDX]
393#define TDFHS pThis->auRegs[TDFHS_IDX]
394#define TDFTS pThis->auRegs[TDFTS_IDX]
395#define TDFPC pThis->auRegs[TDFPC_IDX]
396#define TDBAL pThis->auRegs[TDBAL_IDX]
397#define TDBAH pThis->auRegs[TDBAH_IDX]
398#define TDLEN pThis->auRegs[TDLEN_IDX]
399#define TDH pThis->auRegs[TDH_IDX]
400#define TDT pThis->auRegs[TDT_IDX]
401#define TIDV pThis->auRegs[TIDV_IDX]
402#define TXDCTL pThis->auRegs[TXDCTL_IDX]
403#define TADV pThis->auRegs[TADV_IDX]
404#define TSPMT pThis->auRegs[TSPMT_IDX]
405#define CRCERRS pThis->auRegs[CRCERRS_IDX]
406#define ALGNERRC pThis->auRegs[ALGNERRC_IDX]
407#define SYMERRS pThis->auRegs[SYMERRS_IDX]
408#define RXERRC pThis->auRegs[RXERRC_IDX]
409#define MPC pThis->auRegs[MPC_IDX]
410#define SCC pThis->auRegs[SCC_IDX]
411#define ECOL pThis->auRegs[ECOL_IDX]
412#define MCC pThis->auRegs[MCC_IDX]
413#define LATECOL pThis->auRegs[LATECOL_IDX]
414#define COLC pThis->auRegs[COLC_IDX]
415#define DC pThis->auRegs[DC_IDX]
416#define TNCRS pThis->auRegs[TNCRS_IDX]
417/* #define SEC pThis->auRegs[SEC_IDX] Conflict with sys/time.h */
418#define CEXTERR pThis->auRegs[CEXTERR_IDX]
419#define RLEC pThis->auRegs[RLEC_IDX]
420#define XONRXC pThis->auRegs[XONRXC_IDX]
421#define XONTXC pThis->auRegs[XONTXC_IDX]
422#define XOFFRXC pThis->auRegs[XOFFRXC_IDX]
423#define XOFFTXC pThis->auRegs[XOFFTXC_IDX]
424#define FCRUC pThis->auRegs[FCRUC_IDX]
425#define PRC64 pThis->auRegs[PRC64_IDX]
426#define PRC127 pThis->auRegs[PRC127_IDX]
427#define PRC255 pThis->auRegs[PRC255_IDX]
428#define PRC511 pThis->auRegs[PRC511_IDX]
429#define PRC1023 pThis->auRegs[PRC1023_IDX]
430#define PRC1522 pThis->auRegs[PRC1522_IDX]
431#define GPRC pThis->auRegs[GPRC_IDX]
432#define BPRC pThis->auRegs[BPRC_IDX]
433#define MPRC pThis->auRegs[MPRC_IDX]
434#define GPTC pThis->auRegs[GPTC_IDX]
435#define GORCL pThis->auRegs[GORCL_IDX]
436#define GORCH pThis->auRegs[GORCH_IDX]
437#define GOTCL pThis->auRegs[GOTCL_IDX]
438#define GOTCH pThis->auRegs[GOTCH_IDX]
439#define RNBC pThis->auRegs[RNBC_IDX]
440#define RUC pThis->auRegs[RUC_IDX]
441#define RFC pThis->auRegs[RFC_IDX]
442#define ROC pThis->auRegs[ROC_IDX]
443#define RJC pThis->auRegs[RJC_IDX]
444#define MGTPRC pThis->auRegs[MGTPRC_IDX]
445#define MGTPDC pThis->auRegs[MGTPDC_IDX]
446#define MGTPTC pThis->auRegs[MGTPTC_IDX]
447#define TORL pThis->auRegs[TORL_IDX]
448#define TORH pThis->auRegs[TORH_IDX]
449#define TOTL pThis->auRegs[TOTL_IDX]
450#define TOTH pThis->auRegs[TOTH_IDX]
451#define TPR pThis->auRegs[TPR_IDX]
452#define TPT pThis->auRegs[TPT_IDX]
453#define PTC64 pThis->auRegs[PTC64_IDX]
454#define PTC127 pThis->auRegs[PTC127_IDX]
455#define PTC255 pThis->auRegs[PTC255_IDX]
456#define PTC511 pThis->auRegs[PTC511_IDX]
457#define PTC1023 pThis->auRegs[PTC1023_IDX]
458#define PTC1522 pThis->auRegs[PTC1522_IDX]
459#define MPTC pThis->auRegs[MPTC_IDX]
460#define BPTC pThis->auRegs[BPTC_IDX]
461#define TSCTC pThis->auRegs[TSCTC_IDX]
462#define TSCTFC pThis->auRegs[TSCTFC_IDX]
463#define RXCSUM pThis->auRegs[RXCSUM_IDX]
464#define WUC pThis->auRegs[WUC_IDX]
465#define WUFC pThis->auRegs[WUFC_IDX]
466#define WUS pThis->auRegs[WUS_IDX]
467#define MANC pThis->auRegs[MANC_IDX]
468#define IPAV pThis->auRegs[IPAV_IDX]
469#define WUPL pThis->auRegs[WUPL_IDX]
470/** @} */
471
472/**
473 * Indices of memory-mapped registers in register table.
474 */
475typedef enum
476{
477 CTRL_IDX,
478 STATUS_IDX,
479 EECD_IDX,
480 EERD_IDX,
481 CTRL_EXT_IDX,
482 FLA_IDX,
483 MDIC_IDX,
484 FCAL_IDX,
485 FCAH_IDX,
486 FCT_IDX,
487 VET_IDX,
488 ICR_IDX,
489 ITR_IDX,
490 ICS_IDX,
491 IMS_IDX,
492 IMC_IDX,
493 RCTL_IDX,
494 FCTTV_IDX,
495 TXCW_IDX,
496 RXCW_IDX,
497 TCTL_IDX,
498 TIPG_IDX,
499 AIFS_IDX,
500 LEDCTL_IDX,
501 PBA_IDX,
502 FCRTL_IDX,
503 FCRTH_IDX,
504 RDFH_IDX,
505 RDFT_IDX,
506 RDFHS_IDX,
507 RDFTS_IDX,
508 RDFPC_IDX,
509 RDBAL_IDX,
510 RDBAH_IDX,
511 RDLEN_IDX,
512 RDH_IDX,
513 RDT_IDX,
514 RDTR_IDX,
515 RXDCTL_IDX,
516 RADV_IDX,
517 RSRPD_IDX,
518 TXDMAC_IDX,
519 TDFH_IDX,
520 TDFT_IDX,
521 TDFHS_IDX,
522 TDFTS_IDX,
523 TDFPC_IDX,
524 TDBAL_IDX,
525 TDBAH_IDX,
526 TDLEN_IDX,
527 TDH_IDX,
528 TDT_IDX,
529 TIDV_IDX,
530 TXDCTL_IDX,
531 TADV_IDX,
532 TSPMT_IDX,
533 CRCERRS_IDX,
534 ALGNERRC_IDX,
535 SYMERRS_IDX,
536 RXERRC_IDX,
537 MPC_IDX,
538 SCC_IDX,
539 ECOL_IDX,
540 MCC_IDX,
541 LATECOL_IDX,
542 COLC_IDX,
543 DC_IDX,
544 TNCRS_IDX,
545 SEC_IDX,
546 CEXTERR_IDX,
547 RLEC_IDX,
548 XONRXC_IDX,
549 XONTXC_IDX,
550 XOFFRXC_IDX,
551 XOFFTXC_IDX,
552 FCRUC_IDX,
553 PRC64_IDX,
554 PRC127_IDX,
555 PRC255_IDX,
556 PRC511_IDX,
557 PRC1023_IDX,
558 PRC1522_IDX,
559 GPRC_IDX,
560 BPRC_IDX,
561 MPRC_IDX,
562 GPTC_IDX,
563 GORCL_IDX,
564 GORCH_IDX,
565 GOTCL_IDX,
566 GOTCH_IDX,
567 RNBC_IDX,
568 RUC_IDX,
569 RFC_IDX,
570 ROC_IDX,
571 RJC_IDX,
572 MGTPRC_IDX,
573 MGTPDC_IDX,
574 MGTPTC_IDX,
575 TORL_IDX,
576 TORH_IDX,
577 TOTL_IDX,
578 TOTH_IDX,
579 TPR_IDX,
580 TPT_IDX,
581 PTC64_IDX,
582 PTC127_IDX,
583 PTC255_IDX,
584 PTC511_IDX,
585 PTC1023_IDX,
586 PTC1522_IDX,
587 MPTC_IDX,
588 BPTC_IDX,
589 TSCTC_IDX,
590 TSCTFC_IDX,
591 RXCSUM_IDX,
592 WUC_IDX,
593 WUFC_IDX,
594 WUS_IDX,
595 MANC_IDX,
596 IPAV_IDX,
597 WUPL_IDX,
598 MTA_IDX,
599 RA_IDX,
600 VFTA_IDX,
601 IP4AT_IDX,
602 IP6AT_IDX,
603 WUPM_IDX,
604 FFLT_IDX,
605 FFMT_IDX,
606 FFVT_IDX,
607 PBM_IDX,
608 RA_82542_IDX,
609 MTA_82542_IDX,
610 VFTA_82542_IDX,
611 E1K_NUM_OF_REGS
612} E1kRegIndex;
613
614#define E1K_NUM_OF_32BIT_REGS MTA_IDX
615/** The number of registers with strictly increasing offset. */
616#define E1K_NUM_OF_BINARY_SEARCHABLE (WUPL_IDX + 1)
617
618
619/**
620 * Define E1000-specific EEPROM layout.
621 */
622struct E1kEEPROM
623{
624 public:
625 EEPROM93C46 eeprom;
626
627#ifdef IN_RING3
628 /**
629 * Initialize EEPROM content.
630 *
631 * @param macAddr MAC address of E1000.
632 */
633 void init(RTMAC &macAddr)
634 {
635 eeprom.init();
636 memcpy(eeprom.m_au16Data, macAddr.au16, sizeof(macAddr.au16));
637 eeprom.m_au16Data[0x04] = 0xFFFF;
638 /*
639 * bit 3 - full support for power management
640 * bit 10 - full duplex
641 */
642 eeprom.m_au16Data[0x0A] = 0x4408;
643 eeprom.m_au16Data[0x0B] = 0x001E;
644 eeprom.m_au16Data[0x0C] = 0x8086;
645 eeprom.m_au16Data[0x0D] = 0x100E;
646 eeprom.m_au16Data[0x0E] = 0x8086;
647 eeprom.m_au16Data[0x0F] = 0x3040;
648 eeprom.m_au16Data[0x21] = 0x7061;
649 eeprom.m_au16Data[0x22] = 0x280C;
650 eeprom.m_au16Data[0x23] = 0x00C8;
651 eeprom.m_au16Data[0x24] = 0x00C8;
652 eeprom.m_au16Data[0x2F] = 0x0602;
653 updateChecksum();
654 };
655
656 /**
657 * Compute the checksum as required by E1000 and store it
658 * in the last word.
659 */
660 void updateChecksum()
661 {
662 uint16_t u16Checksum = 0;
663
664 for (int i = 0; i < eeprom.SIZE-1; i++)
665 u16Checksum += eeprom.m_au16Data[i];
666 eeprom.m_au16Data[eeprom.SIZE-1] = 0xBABA - u16Checksum;
667 };
668
669 /**
670 * First 6 bytes of EEPROM contain MAC address.
671 *
672 * @returns MAC address of E1000.
673 */
674 void getMac(PRTMAC pMac)
675 {
676 memcpy(pMac->au16, eeprom.m_au16Data, sizeof(pMac->au16));
677 };
678
679 uint32_t read()
680 {
681 return eeprom.read();
682 }
683
684 void write(uint32_t u32Wires)
685 {
686 eeprom.write(u32Wires);
687 }
688
689 bool readWord(uint32_t u32Addr, uint16_t *pu16Value)
690 {
691 return eeprom.readWord(u32Addr, pu16Value);
692 }
693
694 int load(PSSMHANDLE pSSM)
695 {
696 return eeprom.load(pSSM);
697 }
698
699 void save(PSSMHANDLE pSSM)
700 {
701 eeprom.save(pSSM);
702 }
703#endif /* IN_RING3 */
704};
705
706
707#define E1K_SPEC_VLAN(s) (s & 0xFFF)
708#define E1K_SPEC_CFI(s) (!!((s>>12) & 0x1))
709#define E1K_SPEC_PRI(s) ((s>>13) & 0x7)
710
711struct E1kRxDStatus
712{
713 /** @name Descriptor Status field (3.2.3.1)
714 * @{ */
715 unsigned fDD : 1; /**< Descriptor Done. */
716 unsigned fEOP : 1; /**< End of packet. */
717 unsigned fIXSM : 1; /**< Ignore checksum indication. */
718 unsigned fVP : 1; /**< VLAN, matches VET. */
719 unsigned : 1;
720 unsigned fTCPCS : 1; /**< RCP Checksum calculated on the packet. */
721 unsigned fIPCS : 1; /**< IP Checksum calculated on the packet. */
722 unsigned fPIF : 1; /**< Passed in-exact filter */
723 /** @} */
724 /** @name Descriptor Errors field (3.2.3.2)
725 * (Only valid when fEOP and fDD are set.)
726 * @{ */
727 unsigned fCE : 1; /**< CRC or alignment error. */
728 unsigned : 4; /**< Reserved, varies with different models... */
729 unsigned fTCPE : 1; /**< TCP/UDP checksum error. */
730 unsigned fIPE : 1; /**< IP Checksum error. */
731 unsigned fRXE : 1; /**< RX Data error. */
732 /** @} */
733 /** @name Descriptor Special field (3.2.3.3)
734 * @{ */
735 unsigned u16Special : 16; /**< VLAN: Id, Canonical form, Priority. */
736 /** @} */
737};
738typedef struct E1kRxDStatus E1KRXDST;
739
740struct E1kRxDesc_st
741{
742 uint64_t u64BufAddr; /**< Address of data buffer */
743 uint16_t u16Length; /**< Length of data in buffer */
744 uint16_t u16Checksum; /**< Packet checksum */
745 E1KRXDST status;
746};
747typedef struct E1kRxDesc_st E1KRXDESC;
748AssertCompileSize(E1KRXDESC, 16);
749
750#define E1K_DTYP_LEGACY -1
751#define E1K_DTYP_CONTEXT 0
752#define E1K_DTYP_DATA 1
753
754struct E1kTDLegacy
755{
756 uint64_t u64BufAddr; /**< Address of data buffer */
757 struct TDLCmd_st
758 {
759 unsigned u16Length : 16;
760 unsigned u8CSO : 8;
761 /* CMD field : 8 */
762 unsigned fEOP : 1;
763 unsigned fIFCS : 1;
764 unsigned fIC : 1;
765 unsigned fRS : 1;
766 unsigned fRPS : 1;
767 unsigned fDEXT : 1;
768 unsigned fVLE : 1;
769 unsigned fIDE : 1;
770 } cmd;
771 struct TDLDw3_st
772 {
773 /* STA field */
774 unsigned fDD : 1;
775 unsigned fEC : 1;
776 unsigned fLC : 1;
777 unsigned fTURSV : 1;
778 /* RSV field */
779 unsigned u4RSV : 4;
780 /* CSS field */
781 unsigned u8CSS : 8;
782 /* Special field*/
783 unsigned u16Special: 16;
784 } dw3;
785};
786
787/**
788 * TCP/IP Context Transmit Descriptor, section 3.3.6.
789 */
790struct E1kTDContext
791{
792 struct CheckSum_st
793 {
794 /** TSE: Header start. !TSE: Checksum start. */
795 unsigned u8CSS : 8;
796 /** Checksum offset - where to store it. */
797 unsigned u8CSO : 8;
798 /** Checksum ending (inclusive) offset, 0 = end of packet. */
799 unsigned u16CSE : 16;
800 } ip;
801 struct CheckSum_st tu;
802 struct TDCDw2_st
803 {
804 /** TSE: The total number of payload bytes for this context. Sans header. */
805 unsigned u20PAYLEN : 20;
806 /** The descriptor type - E1K_DTYP_CONTEXT (0). */
807 unsigned u4DTYP : 4;
808 /** TUCMD field, 8 bits
809 * @{ */
810 /** TSE: TCP (set) or UDP (clear). */
811 unsigned fTCP : 1;
812 /** TSE: IPv4 (set) or IPv6 (clear) - for finding the payload length field in
813 * the IP header. Does not affect the checksumming.
814 * @remarks 82544GC/EI interprets a cleared field differently. */
815 unsigned fIP : 1;
816 /** TSE: TCP segmentation enable. When clear the context describes */
817 unsigned fTSE : 1;
818 /** Report status (only applies to dw3.fDD for here). */
819 unsigned fRS : 1;
820 /** Reserved, MBZ. */
821 unsigned fRSV1 : 1;
822 /** Descriptor extension, must be set for this descriptor type. */
823 unsigned fDEXT : 1;
824 /** Reserved, MBZ. */
825 unsigned fRSV2 : 1;
826 /** Interrupt delay enable. */
827 unsigned fIDE : 1;
828 /** @} */
829 } dw2;
830 struct TDCDw3_st
831 {
832 /** Descriptor Done. */
833 unsigned fDD : 1;
834 /** Reserved, MBZ. */
835 unsigned u7RSV : 7;
836 /** TSO: The header (prototype) length (Ethernet[, VLAN tag], IP, TCP/UDP. */
837 unsigned u8HDRLEN : 8;
838 /** TSO: Maximum segment size. */
839 unsigned u16MSS : 16;
840 } dw3;
841};
842typedef struct E1kTDContext E1KTXCTX;
843
844/**
845 * TCP/IP Data Transmit Descriptor, section 3.3.7.
846 */
847struct E1kTDData
848{
849 uint64_t u64BufAddr; /**< Address of data buffer */
850 struct TDDCmd_st
851 {
852 /** The total length of data pointed to by this descriptor. */
853 unsigned u20DTALEN : 20;
854 /** The descriptor type - E1K_DTYP_DATA (1). */
855 unsigned u4DTYP : 4;
856 /** @name DCMD field, 8 bits (3.3.7.1).
857 * @{ */
858 /** End of packet. Note TSCTFC update. */
859 unsigned fEOP : 1;
860 /** Insert Ethernet FCS/CRC (requires fEOP to be set). */
861 unsigned fIFCS : 1;
862 /** Use the TSE context when set and the normal when clear. */
863 unsigned fTSE : 1;
864 /** Report status (dw3.STA). */
865 unsigned fRS : 1;
866 /** Reserved. 82544GC/EI defines this report packet set (RPS). */
867 unsigned fRPS : 1;
868 /** Descriptor extension, must be set for this descriptor type. */
869 unsigned fDEXT : 1;
870 /** VLAN enable, requires CTRL.VME, auto enables FCS/CRC.
871 * Insert dw3.SPECIAL after ethernet header. */
872 unsigned fVLE : 1;
873 /** Interrupt delay enable. */
874 unsigned fIDE : 1;
875 /** @} */
876 } cmd;
877 struct TDDDw3_st
878 {
879 /** @name STA field (3.3.7.2)
880 * @{ */
881 unsigned fDD : 1; /**< Descriptor done. */
882 unsigned fEC : 1; /**< Excess collision. */
883 unsigned fLC : 1; /**< Late collision. */
884 /** Reserved, except for the usual oddball (82544GC/EI) where it's called TU. */
885 unsigned fTURSV : 1;
886 /** @} */
887 unsigned u4RSV : 4; /**< Reserved field, MBZ. */
888 /** @name POPTS (Packet Option) field (3.3.7.3)
889 * @{ */
890 unsigned fIXSM : 1; /**< Insert IP checksum. */
891 unsigned fTXSM : 1; /**< Insert TCP/UDP checksum. */
892 unsigned u6RSV : 6; /**< Reserved, MBZ. */
893 /** @} */
894 /** @name SPECIAL field - VLAN tag to be inserted after ethernet header.
895 * Requires fEOP, fVLE and CTRL.VME to be set.
896 * @{ */
897 unsigned u16Special: 16; /**< VLAN: Id, Canonical form, Priority. */
898 /** @} */
899 } dw3;
900};
901typedef struct E1kTDData E1KTXDAT;
902
903union E1kTxDesc
904{
905 struct E1kTDLegacy legacy;
906 struct E1kTDContext context;
907 struct E1kTDData data;
908};
909typedef union E1kTxDesc E1KTXDESC;
910AssertCompileSize(E1KTXDESC, 16);
911
912#define RA_CTL_AS 0x0003
913#define RA_CTL_AV 0x8000
914
915union E1kRecAddr
916{
917 uint32_t au32[32];
918 struct RAArray
919 {
920 uint8_t addr[6];
921 uint16_t ctl;
922 } array[16];
923};
924typedef struct E1kRecAddr::RAArray E1KRAELEM;
925typedef union E1kRecAddr E1KRA;
926AssertCompileSize(E1KRA, 8*16);
927
928#define E1K_IP_RF UINT16_C(0x8000) /**< reserved fragment flag */
929#define E1K_IP_DF UINT16_C(0x4000) /**< dont fragment flag */
930#define E1K_IP_MF UINT16_C(0x2000) /**< more fragments flag */
931#define E1K_IP_OFFMASK UINT16_C(0x1fff) /**< mask for fragmenting bits */
932
933/** @todo use+extend RTNETIPV4 */
934struct E1kIpHeader
935{
936 /* type of service / version / header length */
937 uint16_t tos_ver_hl;
938 /* total length */
939 uint16_t total_len;
940 /* identification */
941 uint16_t ident;
942 /* fragment offset field */
943 uint16_t offset;
944 /* time to live / protocol*/
945 uint16_t ttl_proto;
946 /* checksum */
947 uint16_t chksum;
948 /* source IP address */
949 uint32_t src;
950 /* destination IP address */
951 uint32_t dest;
952};
953AssertCompileSize(struct E1kIpHeader, 20);
954
955#define E1K_TCP_FIN UINT16_C(0x01)
956#define E1K_TCP_SYN UINT16_C(0x02)
957#define E1K_TCP_RST UINT16_C(0x04)
958#define E1K_TCP_PSH UINT16_C(0x08)
959#define E1K_TCP_ACK UINT16_C(0x10)
960#define E1K_TCP_URG UINT16_C(0x20)
961#define E1K_TCP_ECE UINT16_C(0x40)
962#define E1K_TCP_CWR UINT16_C(0x80)
963#define E1K_TCP_FLAGS UINT16_C(0x3f)
964
965/** @todo use+extend RTNETTCP */
966struct E1kTcpHeader
967{
968 uint16_t src;
969 uint16_t dest;
970 uint32_t seqno;
971 uint32_t ackno;
972 uint16_t hdrlen_flags;
973 uint16_t wnd;
974 uint16_t chksum;
975 uint16_t urgp;
976};
977AssertCompileSize(struct E1kTcpHeader, 20);
978
979
980#ifdef E1K_WITH_TXD_CACHE
981/** The current Saved state version. */
982# define E1K_SAVEDSTATE_VERSION 4
983/** Saved state version for VirtualBox 4.2 with VLAN tag fields. */
984# define E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG 3
985#else /* !E1K_WITH_TXD_CACHE */
986/** The current Saved state version. */
987# define E1K_SAVEDSTATE_VERSION 3
988#endif /* !E1K_WITH_TXD_CACHE */
989/** Saved state version for VirtualBox 4.1 and earlier.
990 * These did not include VLAN tag fields. */
991#define E1K_SAVEDSTATE_VERSION_VBOX_41 2
992/** Saved state version for VirtualBox 3.0 and earlier.
993 * This did not include the configuration part nor the E1kEEPROM. */
994#define E1K_SAVEDSTATE_VERSION_VBOX_30 1
995
996/**
997 * Device state structure.
998 *
999 * Holds the current state of device.
1000 *
1001 * @implements PDMINETWORKDOWN
1002 * @implements PDMINETWORKCONFIG
1003 * @implements PDMILEDPORTS
1004 */
1005struct E1kState_st
1006{
1007 char szPrf[8]; /**< Log prefix, e.g. E1000#1. */
1008 PDMIBASE IBase;
1009 PDMINETWORKDOWN INetworkDown;
1010 PDMINETWORKCONFIG INetworkConfig;
1011 PDMILEDPORTS ILeds; /**< LED interface */
1012 R3PTRTYPE(PPDMIBASE) pDrvBase; /**< Attached network driver. */
1013 R3PTRTYPE(PPDMILEDCONNECTORS) pLedsConnector;
1014
1015 PPDMDEVINSR3 pDevInsR3; /**< Device instance - R3. */
1016 R3PTRTYPE(PPDMQUEUE) pTxQueueR3; /**< Transmit queue - R3. */
1017 R3PTRTYPE(PPDMQUEUE) pCanRxQueueR3; /**< Rx wakeup signaller - R3. */
1018 PPDMINETWORKUPR3 pDrvR3; /**< Attached network driver - R3. */
1019 PTMTIMERR3 pRIDTimerR3; /**< Receive Interrupt Delay Timer - R3. */
1020 PTMTIMERR3 pRADTimerR3; /**< Receive Absolute Delay Timer - R3. */
1021 PTMTIMERR3 pTIDTimerR3; /**< Transmit Interrupt Delay Timer - R3. */
1022 PTMTIMERR3 pTADTimerR3; /**< Transmit Absolute Delay Timer - R3. */
1023 PTMTIMERR3 pTXDTimerR3; /**< Transmit Delay Timer - R3. */
1024 PTMTIMERR3 pIntTimerR3; /**< Late Interrupt Timer - R3. */
1025 PTMTIMERR3 pLUTimerR3; /**< Link Up(/Restore) Timer. */
1026 /** The scatter / gather buffer used for the current outgoing packet - R3. */
1027 R3PTRTYPE(PPDMSCATTERGATHER) pTxSgR3;
1028
1029 PPDMDEVINSR0 pDevInsR0; /**< Device instance - R0. */
1030 R0PTRTYPE(PPDMQUEUE) pTxQueueR0; /**< Transmit queue - R0. */
1031 R0PTRTYPE(PPDMQUEUE) pCanRxQueueR0; /**< Rx wakeup signaller - R0. */
1032 PPDMINETWORKUPR0 pDrvR0; /**< Attached network driver - R0. */
1033 PTMTIMERR0 pRIDTimerR0; /**< Receive Interrupt Delay Timer - R0. */
1034 PTMTIMERR0 pRADTimerR0; /**< Receive Absolute Delay Timer - R0. */
1035 PTMTIMERR0 pTIDTimerR0; /**< Transmit Interrupt Delay Timer - R0. */
1036 PTMTIMERR0 pTADTimerR0; /**< Transmit Absolute Delay Timer - R0. */
1037 PTMTIMERR0 pTXDTimerR0; /**< Transmit Delay Timer - R0. */
1038 PTMTIMERR0 pIntTimerR0; /**< Late Interrupt Timer - R0. */
1039 PTMTIMERR0 pLUTimerR0; /**< Link Up(/Restore) Timer - R0. */
1040 /** The scatter / gather buffer used for the current outgoing packet - R0. */
1041 R0PTRTYPE(PPDMSCATTERGATHER) pTxSgR0;
1042
1043 PPDMDEVINSRC pDevInsRC; /**< Device instance - RC. */
1044 RCPTRTYPE(PPDMQUEUE) pTxQueueRC; /**< Transmit queue - RC. */
1045 RCPTRTYPE(PPDMQUEUE) pCanRxQueueRC; /**< Rx wakeup signaller - RC. */
1046 PPDMINETWORKUPRC pDrvRC; /**< Attached network driver - RC. */
1047 PTMTIMERRC pRIDTimerRC; /**< Receive Interrupt Delay Timer - RC. */
1048 PTMTIMERRC pRADTimerRC; /**< Receive Absolute Delay Timer - RC. */
1049 PTMTIMERRC pTIDTimerRC; /**< Transmit Interrupt Delay Timer - RC. */
1050 PTMTIMERRC pTADTimerRC; /**< Transmit Absolute Delay Timer - RC. */
1051 PTMTIMERRC pTXDTimerRC; /**< Transmit Delay Timer - RC. */
1052 PTMTIMERRC pIntTimerRC; /**< Late Interrupt Timer - RC. */
1053 PTMTIMERRC pLUTimerRC; /**< Link Up(/Restore) Timer - RC. */
1054 /** The scatter / gather buffer used for the current outgoing packet - RC. */
1055 RCPTRTYPE(PPDMSCATTERGATHER) pTxSgRC;
1056 RTRCPTR RCPtrAlignment;
1057
1058#if HC_ARCH_BITS != 32
1059 uint32_t Alignment1;
1060#endif
1061 PDMCRITSECT cs; /**< Critical section - what is it protecting? */
1062 PDMCRITSECT csRx; /**< RX Critical section. */
1063#ifdef E1K_WITH_TX_CS
1064 PDMCRITSECT csTx; /**< TX Critical section. */
1065#endif /* E1K_WITH_TX_CS */
1066 /** Base address of memory-mapped registers. */
1067 RTGCPHYS addrMMReg;
1068 /** MAC address obtained from the configuration. */
1069 RTMAC macConfigured;
1070 /** Base port of I/O space region. */
1071 RTIOPORT IOPortBase;
1072 /** EMT: */
1073 PCIDEVICE pciDevice;
1074 /** EMT: Last time the interrupt was acknowledged. */
1075 uint64_t u64AckedAt;
1076 /** All: Used for eliminating spurious interrupts. */
1077 bool fIntRaised;
1078 /** EMT: false if the cable is disconnected by the GUI. */
1079 bool fCableConnected;
1080 /** EMT: */
1081 bool fR0Enabled;
1082 /** EMT: */
1083 bool fRCEnabled;
1084 /** EMT: Compute Ethernet CRC for RX packets. */
1085 bool fEthernetCRC;
1086
1087 bool Alignment2[3];
1088 /** Link up delay (in milliseconds). */
1089 uint32_t cMsLinkUpDelay;
1090
1091 /** All: Device register storage. */
1092 uint32_t auRegs[E1K_NUM_OF_32BIT_REGS];
1093 /** TX/RX: Status LED. */
1094 PDMLED led;
1095 /** TX/RX: Number of packet being sent/received to show in debug log. */
1096 uint32_t u32PktNo;
1097
1098 /** EMT: Offset of the register to be read via IO. */
1099 uint32_t uSelectedReg;
1100 /** EMT: Multicast Table Array. */
1101 uint32_t auMTA[128];
1102 /** EMT: Receive Address registers. */
1103 E1KRA aRecAddr;
1104 /** EMT: VLAN filter table array. */
1105 uint32_t auVFTA[128];
1106 /** EMT: Receive buffer size. */
1107 uint16_t u16RxBSize;
1108 /** EMT: Locked state -- no state alteration possible. */
1109 bool fLocked;
1110 /** EMT: */
1111 bool fDelayInts;
1112 /** All: */
1113 bool fIntMaskUsed;
1114
1115 /** N/A: */
1116 bool volatile fMaybeOutOfSpace;
1117 /** EMT: Gets signalled when more RX descriptors become available. */
1118 RTSEMEVENT hEventMoreRxDescAvail;
1119#ifdef E1K_WITH_RXD_CACHE
1120 /** RX: Fetched RX descriptors. */
1121 E1KRXDESC aRxDescriptors[E1K_RXD_CACHE_SIZE];
1122 //uint64_t aRxDescAddr[E1K_RXD_CACHE_SIZE];
1123 /** RX: Actual number of fetched RX descriptors. */
1124 uint32_t nRxDFetched;
1125 /** RX: Index in cache of RX descriptor being processed. */
1126 uint32_t iRxDCurrent;
1127#endif /* E1K_WITH_RXD_CACHE */
1128
1129 /** TX: Context used for TCP segmentation packets. */
1130 E1KTXCTX contextTSE;
1131 /** TX: Context used for ordinary packets. */
1132 E1KTXCTX contextNormal;
1133#ifdef E1K_WITH_TXD_CACHE
1134 /** TX: Fetched TX descriptors. */
1135 E1KTXDESC aTxDescriptors[E1K_TXD_CACHE_SIZE];
1136 /** TX: Actual number of fetched TX descriptors. */
1137 uint8_t nTxDFetched;
1138 /** TX: Index in cache of TX descriptor being processed. */
1139 uint8_t iTxDCurrent;
1140 /** TX: Will this frame be sent as GSO. */
1141 bool fGSO;
1142 /** Alignment padding. */
1143 bool fReserved;
1144 /** TX: Number of bytes in next packet. */
1145 uint32_t cbTxAlloc;
1146
1147#endif /* E1K_WITH_TXD_CACHE */
1148 /** GSO context. u8Type is set to PDMNETWORKGSOTYPE_INVALID when not
1149 * applicable to the current TSE mode. */
1150 PDMNETWORKGSO GsoCtx;
1151 /** Scratch space for holding the loopback / fallback scatter / gather
1152 * descriptor. */
1153 union
1154 {
1155 PDMSCATTERGATHER Sg;
1156 uint8_t padding[8 * sizeof(RTUINTPTR)];
1157 } uTxFallback;
1158 /** TX: Transmit packet buffer use for TSE fallback and loopback. */
1159 uint8_t aTxPacketFallback[E1K_MAX_TX_PKT_SIZE];
1160 /** TX: Number of bytes assembled in TX packet buffer. */
1161 uint16_t u16TxPktLen;
1162 /** TX: False will force segmentation in e1000 instead of sending frames as GSO. */
1163 bool fGSOEnabled;
1164 /** TX: IP checksum has to be inserted if true. */
1165 bool fIPcsum;
1166 /** TX: TCP/UDP checksum has to be inserted if true. */
1167 bool fTCPcsum;
1168 /** TX: VLAN tag has to be inserted if true. */
1169 bool fVTag;
1170 /** TX: TCI part of VLAN tag to be inserted. */
1171 uint16_t u16VTagTCI;
1172 /** TX TSE fallback: Number of payload bytes remaining in TSE context. */
1173 uint32_t u32PayRemain;
1174 /** TX TSE fallback: Number of header bytes remaining in TSE context. */
1175 uint16_t u16HdrRemain;
1176 /** TX TSE fallback: Flags from template header. */
1177 uint16_t u16SavedFlags;
1178 /** TX TSE fallback: Partial checksum from template header. */
1179 uint32_t u32SavedCsum;
1180 /** ?: Emulated controller type. */
1181 E1KCHIP eChip;
1182
1183 /** EMT: EEPROM emulation */
1184 E1kEEPROM eeprom;
1185 /** EMT: Physical interface emulation. */
1186 PHY phy;
1187
1188#if 0
1189 /** Alignment padding. */
1190 uint8_t Alignment[HC_ARCH_BITS == 64 ? 8 : 4];
1191#endif
1192
1193 STAMCOUNTER StatReceiveBytes;
1194 STAMCOUNTER StatTransmitBytes;
1195#if defined(VBOX_WITH_STATISTICS)
1196 STAMPROFILEADV StatMMIOReadRZ;
1197 STAMPROFILEADV StatMMIOReadR3;
1198 STAMPROFILEADV StatMMIOWriteRZ;
1199 STAMPROFILEADV StatMMIOWriteR3;
1200 STAMPROFILEADV StatEEPROMRead;
1201 STAMPROFILEADV StatEEPROMWrite;
1202 STAMPROFILEADV StatIOReadRZ;
1203 STAMPROFILEADV StatIOReadR3;
1204 STAMPROFILEADV StatIOWriteRZ;
1205 STAMPROFILEADV StatIOWriteR3;
1206 STAMPROFILEADV StatLateIntTimer;
1207 STAMCOUNTER StatLateInts;
1208 STAMCOUNTER StatIntsRaised;
1209 STAMCOUNTER StatIntsPrevented;
1210 STAMPROFILEADV StatReceive;
1211 STAMPROFILEADV StatReceiveCRC;
1212 STAMPROFILEADV StatReceiveFilter;
1213 STAMPROFILEADV StatReceiveStore;
1214 STAMPROFILEADV StatTransmitRZ;
1215 STAMPROFILEADV StatTransmitR3;
1216 STAMPROFILE StatTransmitSendRZ;
1217 STAMPROFILE StatTransmitSendR3;
1218 STAMPROFILE StatRxOverflow;
1219 STAMCOUNTER StatRxOverflowWakeup;
1220 STAMCOUNTER StatTxDescCtxNormal;
1221 STAMCOUNTER StatTxDescCtxTSE;
1222 STAMCOUNTER StatTxDescLegacy;
1223 STAMCOUNTER StatTxDescData;
1224 STAMCOUNTER StatTxDescTSEData;
1225 STAMCOUNTER StatTxPathFallback;
1226 STAMCOUNTER StatTxPathGSO;
1227 STAMCOUNTER StatTxPathRegular;
1228 STAMCOUNTER StatPHYAccesses;
1229 STAMCOUNTER aStatRegWrites[E1K_NUM_OF_REGS];
1230 STAMCOUNTER aStatRegReads[E1K_NUM_OF_REGS];
1231#endif /* VBOX_WITH_STATISTICS */
1232
1233#ifdef E1K_INT_STATS
1234 /* Internal stats */
1235 uint64_t u64ArmedAt;
1236 uint64_t uStatMaxTxDelay;
1237 uint32_t uStatInt;
1238 uint32_t uStatIntTry;
1239 uint32_t uStatIntLower;
1240 uint32_t uStatIntDly;
1241 int32_t iStatIntLost;
1242 int32_t iStatIntLostOne;
1243 uint32_t uStatDisDly;
1244 uint32_t uStatIntSkip;
1245 uint32_t uStatIntLate;
1246 uint32_t uStatIntMasked;
1247 uint32_t uStatIntEarly;
1248 uint32_t uStatIntRx;
1249 uint32_t uStatIntTx;
1250 uint32_t uStatIntICS;
1251 uint32_t uStatIntRDTR;
1252 uint32_t uStatIntRXDMT0;
1253 uint32_t uStatIntTXQE;
1254 uint32_t uStatTxNoRS;
1255 uint32_t uStatTxIDE;
1256 uint32_t uStatTxDelayed;
1257 uint32_t uStatTxDelayExp;
1258 uint32_t uStatTAD;
1259 uint32_t uStatTID;
1260 uint32_t uStatRAD;
1261 uint32_t uStatRID;
1262 uint32_t uStatRxFrm;
1263 uint32_t uStatTxFrm;
1264 uint32_t uStatDescCtx;
1265 uint32_t uStatDescDat;
1266 uint32_t uStatDescLeg;
1267 uint32_t uStatTx1514;
1268 uint32_t uStatTx2962;
1269 uint32_t uStatTx4410;
1270 uint32_t uStatTx5858;
1271 uint32_t uStatTx7306;
1272 uint32_t uStatTx8754;
1273 uint32_t uStatTx16384;
1274 uint32_t uStatTx32768;
1275 uint32_t uStatTxLarge;
1276 uint32_t uStatAlign;
1277#endif /* E1K_INT_STATS */
1278};
1279typedef struct E1kState_st E1KSTATE;
1280/** Pointer to the E1000 device state. */
1281typedef E1KSTATE *PE1KSTATE;
1282
1283#ifndef VBOX_DEVICE_STRUCT_TESTCASE
1284
1285/* Forward declarations ******************************************************/
1286static int e1kXmitPending(PE1KSTATE pThis, bool fOnWorkerThread);
1287
1288static int e1kRegReadUnimplemented (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1289static int e1kRegWriteUnimplemented(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1290static int e1kRegReadAutoClear (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1291static int e1kRegReadDefault (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1292static int e1kRegWriteDefault (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1293#if 0 /* unused */
1294static int e1kRegReadCTRL (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1295#endif
1296static int e1kRegWriteCTRL (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1297static int e1kRegReadEECD (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1298static int e1kRegWriteEECD (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1299static int e1kRegWriteEERD (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1300static int e1kRegWriteMDIC (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1301static int e1kRegReadICR (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1302static int e1kRegWriteICR (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1303static int e1kRegWriteICS (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1304static int e1kRegWriteIMS (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1305static int e1kRegWriteIMC (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1306static int e1kRegWriteRCTL (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1307static int e1kRegWritePBA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1308static int e1kRegWriteRDT (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1309static int e1kRegWriteRDTR (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1310static int e1kRegWriteTDT (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1311static int e1kRegReadMTA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1312static int e1kRegWriteMTA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1313static int e1kRegReadRA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1314static int e1kRegWriteRA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1315static int e1kRegReadVFTA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1316static int e1kRegWriteVFTA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1317
1318/**
1319 * Register map table.
1320 *
1321 * Override pfnRead and pfnWrite to get register-specific behavior.
1322 */
1323static const struct E1kRegMap_st
1324{
1325 /** Register offset in the register space. */
1326 uint32_t offset;
1327 /** Size in bytes. Registers of size > 4 are in fact tables. */
1328 uint32_t size;
1329 /** Readable bits. */
1330 uint32_t readable;
1331 /** Writable bits. */
1332 uint32_t writable;
1333 /** Read callback. */
1334 int (*pfnRead)(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1335 /** Write callback. */
1336 int (*pfnWrite)(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1337 /** Abbreviated name. */
1338 const char *abbrev;
1339 /** Full name. */
1340 const char *name;
1341} g_aE1kRegMap[E1K_NUM_OF_REGS] =
1342{
1343 /* offset size read mask write mask read callback write callback abbrev full name */
1344 /*------- ------- ---------- ---------- ----------------------- ------------------------ ---------- ------------------------------*/
1345 { 0x00000, 0x00004, 0xDBF31BE9, 0xDBF31BE9, e1kRegReadDefault , e1kRegWriteCTRL , "CTRL" , "Device Control" },
1346 { 0x00008, 0x00004, 0x0000FDFF, 0x00000000, e1kRegReadDefault , e1kRegWriteUnimplemented, "STATUS" , "Device Status" },
1347 { 0x00010, 0x00004, 0x000027F0, 0x00000070, e1kRegReadEECD , e1kRegWriteEECD , "EECD" , "EEPROM/Flash Control/Data" },
1348 { 0x00014, 0x00004, 0xFFFFFF10, 0xFFFFFF00, e1kRegReadDefault , e1kRegWriteEERD , "EERD" , "EEPROM Read" },
1349 { 0x00018, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CTRL_EXT", "Extended Device Control" },
1350 { 0x0001c, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FLA" , "Flash Access (N/A)" },
1351 { 0x00020, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteMDIC , "MDIC" , "MDI Control" },
1352 { 0x00028, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCAL" , "Flow Control Address Low" },
1353 { 0x0002c, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCAH" , "Flow Control Address High" },
1354 { 0x00030, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCT" , "Flow Control Type" },
1355 { 0x00038, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "VET" , "VLAN EtherType" },
1356 { 0x000c0, 0x00004, 0x0001F6DF, 0x0001F6DF, e1kRegReadICR , e1kRegWriteICR , "ICR" , "Interrupt Cause Read" },
1357 { 0x000c4, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "ITR" , "Interrupt Throttling" },
1358 { 0x000c8, 0x00004, 0x00000000, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteICS , "ICS" , "Interrupt Cause Set" },
1359 { 0x000d0, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteIMS , "IMS" , "Interrupt Mask Set/Read" },
1360 { 0x000d8, 0x00004, 0x00000000, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteIMC , "IMC" , "Interrupt Mask Clear" },
1361 { 0x00100, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteRCTL , "RCTL" , "Receive Control" },
1362 { 0x00170, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCTTV" , "Flow Control Transmit Timer Value" },
1363 { 0x00178, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TXCW" , "Transmit Configuration Word (N/A)" },
1364 { 0x00180, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXCW" , "Receive Configuration Word (N/A)" },
1365 { 0x00400, 0x00004, 0x017FFFFA, 0x017FFFFA, e1kRegReadDefault , e1kRegWriteDefault , "TCTL" , "Transmit Control" },
1366 { 0x00410, 0x00004, 0x3FFFFFFF, 0x3FFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TIPG" , "Transmit IPG" },
1367 { 0x00458, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "AIFS" , "Adaptive IFS Throttle - AIT" },
1368 { 0x00e00, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "LEDCTL" , "LED Control" },
1369 { 0x01000, 0x00004, 0xFFFF007F, 0x0000007F, e1kRegReadDefault , e1kRegWritePBA , "PBA" , "Packet Buffer Allocation" },
1370 { 0x02160, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRTL" , "Flow Control Receive Threshold Low" },
1371 { 0x02168, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRTH" , "Flow Control Receive Threshold High" },
1372 { 0x02410, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFH" , "Receive Data FIFO Head" },
1373 { 0x02418, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFT" , "Receive Data FIFO Tail" },
1374 { 0x02420, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFHS" , "Receive Data FIFO Head Saved Register" },
1375 { 0x02428, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFTS" , "Receive Data FIFO Tail Saved Register" },
1376 { 0x02430, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFPC" , "Receive Data FIFO Packet Count" },
1377 { 0x02800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDBAL" , "Receive Descriptor Base Low" },
1378 { 0x02804, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDBAH" , "Receive Descriptor Base High" },
1379 { 0x02808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDLEN" , "Receive Descriptor Length" },
1380 { 0x02810, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDH" , "Receive Descriptor Head" },
1381 { 0x02818, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteRDT , "RDT" , "Receive Descriptor Tail" },
1382 { 0x02820, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteRDTR , "RDTR" , "Receive Delay Timer" },
1383 { 0x02828, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXDCTL" , "Receive Descriptor Control" },
1384 { 0x0282c, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "RADV" , "Receive Interrupt Absolute Delay Timer" },
1385 { 0x02c00, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RSRPD" , "Receive Small Packet Detect Interrupt" },
1386 { 0x03000, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TXDMAC" , "TX DMA Control (N/A)" },
1387 { 0x03410, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFH" , "Transmit Data FIFO Head" },
1388 { 0x03418, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFT" , "Transmit Data FIFO Tail" },
1389 { 0x03420, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFHS" , "Transmit Data FIFO Head Saved Register" },
1390 { 0x03428, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFTS" , "Transmit Data FIFO Tail Saved Register" },
1391 { 0x03430, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFPC" , "Transmit Data FIFO Packet Count" },
1392 { 0x03800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDBAL" , "Transmit Descriptor Base Low" },
1393 { 0x03804, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDBAH" , "Transmit Descriptor Base High" },
1394 { 0x03808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDLEN" , "Transmit Descriptor Length" },
1395 { 0x03810, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDH" , "Transmit Descriptor Head" },
1396 { 0x03818, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteTDT , "TDT" , "Transmit Descriptor Tail" },
1397 { 0x03820, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TIDV" , "Transmit Interrupt Delay Value" },
1398 { 0x03828, 0x00004, 0xFF3F3F3F, 0xFF3F3F3F, e1kRegReadDefault , e1kRegWriteDefault , "TXDCTL" , "Transmit Descriptor Control" },
1399 { 0x0382c, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TADV" , "Transmit Absolute Interrupt Delay Timer" },
1400 { 0x03830, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TSPMT" , "TCP Segmentation Pad and Threshold" },
1401 { 0x04000, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CRCERRS" , "CRC Error Count" },
1402 { 0x04004, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "ALGNERRC", "Alignment Error Count" },
1403 { 0x04008, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SYMERRS" , "Symbol Error Count" },
1404 { 0x0400c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXERRC" , "RX Error Count" },
1405 { 0x04010, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MPC" , "Missed Packets Count" },
1406 { 0x04014, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SCC" , "Single Collision Count" },
1407 { 0x04018, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "ECOL" , "Excessive Collisions Count" },
1408 { 0x0401c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MCC" , "Multiple Collision Count" },
1409 { 0x04020, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "LATECOL" , "Late Collisions Count" },
1410 { 0x04028, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "COLC" , "Collision Count" },
1411 { 0x04030, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "DC" , "Defer Count" },
1412 { 0x04034, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TNCRS" , "Transmit - No CRS" },
1413 { 0x04038, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SEC" , "Sequence Error Count" },
1414 { 0x0403c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CEXTERR" , "Carrier Extension Error Count" },
1415 { 0x04040, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RLEC" , "Receive Length Error Count" },
1416 { 0x04048, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XONRXC" , "XON Received Count" },
1417 { 0x0404c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XONTXC" , "XON Transmitted Count" },
1418 { 0x04050, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XOFFRXC" , "XOFF Received Count" },
1419 { 0x04054, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XOFFTXC" , "XOFF Transmitted Count" },
1420 { 0x04058, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRUC" , "FC Received Unsupported Count" },
1421 { 0x0405c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC64" , "Packets Received (64 Bytes) Count" },
1422 { 0x04060, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC127" , "Packets Received (65-127 Bytes) Count" },
1423 { 0x04064, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC255" , "Packets Received (128-255 Bytes) Count" },
1424 { 0x04068, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC511" , "Packets Received (256-511 Bytes) Count" },
1425 { 0x0406c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC1023" , "Packets Received (512-1023 Bytes) Count" },
1426 { 0x04070, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC1522" , "Packets Received (1024-Max Bytes)" },
1427 { 0x04074, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GPRC" , "Good Packets Received Count" },
1428 { 0x04078, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "BPRC" , "Broadcast Packets Received Count" },
1429 { 0x0407c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "MPRC" , "Multicast Packets Received Count" },
1430 { 0x04080, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GPTC" , "Good Packets Transmitted Count" },
1431 { 0x04088, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GORCL" , "Good Octets Received Count (Low)" },
1432 { 0x0408c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GORCH" , "Good Octets Received Count (Hi)" },
1433 { 0x04090, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GOTCL" , "Good Octets Transmitted Count (Low)" },
1434 { 0x04094, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GOTCH" , "Good Octets Transmitted Count (Hi)" },
1435 { 0x040a0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RNBC" , "Receive No Buffers Count" },
1436 { 0x040a4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RUC" , "Receive Undersize Count" },
1437 { 0x040a8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RFC" , "Receive Fragment Count" },
1438 { 0x040ac, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "ROC" , "Receive Oversize Count" },
1439 { 0x040b0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RJC" , "Receive Jabber Count" },
1440 { 0x040b4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPRC" , "Management Packets Received Count" },
1441 { 0x040b8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPDC" , "Management Packets Dropped Count" },
1442 { 0x040bc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPTC" , "Management Pkts Transmitted Count" },
1443 { 0x040c0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TORL" , "Total Octets Received (Lo)" },
1444 { 0x040c4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TORH" , "Total Octets Received (Hi)" },
1445 { 0x040c8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TOTL" , "Total Octets Transmitted (Lo)" },
1446 { 0x040cc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TOTH" , "Total Octets Transmitted (Hi)" },
1447 { 0x040d0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TPR" , "Total Packets Received" },
1448 { 0x040d4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TPT" , "Total Packets Transmitted" },
1449 { 0x040d8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC64" , "Packets Transmitted (64 Bytes) Count" },
1450 { 0x040dc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC127" , "Packets Transmitted (65-127 Bytes) Count" },
1451 { 0x040e0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC255" , "Packets Transmitted (128-255 Bytes) Count" },
1452 { 0x040e4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC511" , "Packets Transmitted (256-511 Bytes) Count" },
1453 { 0x040e8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC1023" , "Packets Transmitted (512-1023 Bytes) Count" },
1454 { 0x040ec, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC1522" , "Packets Transmitted (1024 Bytes or Greater) Count" },
1455 { 0x040f0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "MPTC" , "Multicast Packets Transmitted Count" },
1456 { 0x040f4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "BPTC" , "Broadcast Packets Transmitted Count" },
1457 { 0x040f8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TSCTC" , "TCP Segmentation Context Transmitted Count" },
1458 { 0x040fc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TSCTFC" , "TCP Segmentation Context Tx Fail Count" },
1459 { 0x05000, 0x00004, 0x000007FF, 0x000007FF, e1kRegReadDefault , e1kRegWriteDefault , "RXCSUM" , "Receive Checksum Control" },
1460 { 0x05800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUC" , "Wakeup Control" },
1461 { 0x05808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUFC" , "Wakeup Filter Control" },
1462 { 0x05810, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUS" , "Wakeup Status" },
1463 { 0x05820, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "MANC" , "Management Control" },
1464 { 0x05838, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IPAV" , "IP Address Valid" },
1465 { 0x05900, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUPL" , "Wakeup Packet Length" },
1466 { 0x05200, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadMTA , e1kRegWriteMTA , "MTA" , "Multicast Table Array (n)" },
1467 { 0x05400, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadRA , e1kRegWriteRA , "RA" , "Receive Address (64-bit) (n)" },
1468 { 0x05600, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadVFTA , e1kRegWriteVFTA , "VFTA" , "VLAN Filter Table Array (n)" },
1469 { 0x05840, 0x0001c, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IP4AT" , "IPv4 Address Table" },
1470 { 0x05880, 0x00010, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IP6AT" , "IPv6 Address Table" },
1471 { 0x05a00, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUPM" , "Wakeup Packet Memory" },
1472 { 0x05f00, 0x0001c, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFLT" , "Flexible Filter Length Table" },
1473 { 0x09000, 0x003fc, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFMT" , "Flexible Filter Mask Table" },
1474 { 0x09800, 0x003fc, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFVT" , "Flexible Filter Value Table" },
1475 { 0x10000, 0x10000, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "PBM" , "Packet Buffer Memory (n)" },
1476 { 0x00040, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadRA , e1kRegWriteRA , "RA82542" , "Receive Address (64-bit) (n) (82542)" },
1477 { 0x00200, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadMTA , e1kRegWriteMTA , "MTA82542", "Multicast Table Array (n) (82542)" },
1478 { 0x00600, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadVFTA , e1kRegWriteVFTA , "VFTA82542", "VLAN Filter Table Array (n) (82542)" }
1479};
1480
1481#ifdef DEBUG
1482
1483/**
1484 * Convert U32 value to hex string. Masked bytes are replaced with dots.
1485 *
1486 * @remarks The mask has byte (not bit) granularity (e.g. 000000FF).
1487 *
1488 * @returns The buffer.
1489 *
1490 * @param u32 The word to convert into string.
1491 * @param mask Selects which bytes to convert.
1492 * @param buf Where to put the result.
1493 */
1494static char *e1kU32toHex(uint32_t u32, uint32_t mask, char *buf)
1495{
1496 for (char *ptr = buf + 7; ptr >= buf; --ptr, u32 >>=4, mask >>=4)
1497 {
1498 if (mask & 0xF)
1499 *ptr = (u32 & 0xF) + ((u32 & 0xF) > 9 ? '7' : '0');
1500 else
1501 *ptr = '.';
1502 }
1503 buf[8] = 0;
1504 return buf;
1505}
1506
1507/**
1508 * Returns timer name for debug purposes.
1509 *
1510 * @returns The timer name.
1511 *
1512 * @param pThis The device state structure.
1513 * @param pTimer The timer to get the name for.
1514 */
1515DECLINLINE(const char *) e1kGetTimerName(PE1KSTATE pThis, PTMTIMER pTimer)
1516{
1517 if (pTimer == pThis->CTX_SUFF(pTIDTimer))
1518 return "TID";
1519 if (pTimer == pThis->CTX_SUFF(pTADTimer))
1520 return "TAD";
1521 if (pTimer == pThis->CTX_SUFF(pRIDTimer))
1522 return "RID";
1523 if (pTimer == pThis->CTX_SUFF(pRADTimer))
1524 return "RAD";
1525 if (pTimer == pThis->CTX_SUFF(pIntTimer))
1526 return "Int";
1527 if (pTimer == pThis->CTX_SUFF(pTXDTimer))
1528 return "TXD";
1529 if (pTimer == pThis->CTX_SUFF(pLUTimer))
1530 return "LinkUp";
1531 return "unknown";
1532}
1533
1534#endif /* DEBUG */
1535
1536/**
1537 * Arm a timer.
1538 *
1539 * @param pThis Pointer to the device state structure.
1540 * @param pTimer Pointer to the timer.
1541 * @param uExpireIn Expiration interval in microseconds.
1542 */
1543DECLINLINE(void) e1kArmTimer(PE1KSTATE pThis, PTMTIMER pTimer, uint32_t uExpireIn)
1544{
1545 if (pThis->fLocked)
1546 return;
1547
1548 E1kLog2(("%s Arming %s timer to fire in %d usec...\n",
1549 pThis->szPrf, e1kGetTimerName(pThis, pTimer), uExpireIn));
1550 TMTimerSetMicro(pTimer, uExpireIn);
1551}
1552
1553/**
1554 * Cancel a timer.
1555 *
1556 * @param pThis Pointer to the device state structure.
1557 * @param pTimer Pointer to the timer.
1558 */
1559DECLINLINE(void) e1kCancelTimer(PE1KSTATE pThis, PTMTIMER pTimer)
1560{
1561 E1kLog2(("%s Stopping %s timer...\n",
1562 pThis->szPrf, e1kGetTimerName(pThis, pTimer)));
1563 int rc = TMTimerStop(pTimer);
1564 if (RT_FAILURE(rc))
1565 {
1566 E1kLog2(("%s e1kCancelTimer: TMTimerStop() failed with %Rrc\n",
1567 pThis->szPrf, rc));
1568 }
1569}
1570
1571#define e1kCsEnter(ps, rc) PDMCritSectEnter(&ps->cs, rc)
1572#define e1kCsLeave(ps) PDMCritSectLeave(&ps->cs)
1573
1574#define e1kCsRxEnter(ps, rc) PDMCritSectEnter(&ps->csRx, rc)
1575#define e1kCsRxLeave(ps) PDMCritSectLeave(&ps->csRx)
1576#define e1kCsRxIsOwner(ps) PDMCritSectIsOwner(&ps->csRx)
1577
1578#ifndef E1K_WITH_TX_CS
1579# define e1kCsTxEnter(ps, rc) VINF_SUCCESS
1580# define e1kCsTxLeave(ps) do { } while (0)
1581#else /* E1K_WITH_TX_CS */
1582# define e1kCsTxEnter(ps, rc) PDMCritSectEnter(&ps->csTx, rc)
1583# define e1kCsTxLeave(ps) PDMCritSectLeave(&ps->csTx)
1584#endif /* E1K_WITH_TX_CS */
1585
1586#ifdef IN_RING3
1587
1588/**
1589 * Wakeup the RX thread.
1590 */
1591static void e1kWakeupReceive(PPDMDEVINS pDevIns)
1592{
1593 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
1594 if ( pThis->fMaybeOutOfSpace
1595 && pThis->hEventMoreRxDescAvail != NIL_RTSEMEVENT)
1596 {
1597 STAM_COUNTER_INC(&pThis->StatRxOverflowWakeup);
1598 E1kLog(("%s Waking up Out-of-RX-space semaphore\n", pThis->szPrf));
1599 RTSemEventSignal(pThis->hEventMoreRxDescAvail);
1600 }
1601}
1602
1603/**
1604 * Hardware reset. Revert all registers to initial values.
1605 *
1606 * @param pThis The device state structure.
1607 */
1608static void e1kHardReset(PE1KSTATE pThis)
1609{
1610 E1kLog(("%s Hard reset triggered\n", pThis->szPrf));
1611 memset(pThis->auRegs, 0, sizeof(pThis->auRegs));
1612 memset(pThis->aRecAddr.au32, 0, sizeof(pThis->aRecAddr.au32));
1613#ifdef E1K_INIT_RA0
1614 memcpy(pThis->aRecAddr.au32, pThis->macConfigured.au8,
1615 sizeof(pThis->macConfigured.au8));
1616 pThis->aRecAddr.array[0].ctl |= RA_CTL_AV;
1617#endif /* E1K_INIT_RA0 */
1618 STATUS = 0x0081; /* SPEED=10b (1000 Mb/s), FD=1b (Full Duplex) */
1619 EECD = 0x0100; /* EE_PRES=1b (EEPROM present) */
1620 CTRL = 0x0a09; /* FRCSPD=1b SPEED=10b LRST=1b FD=1b */
1621 TSPMT = 0x01000400;/* TSMT=0400h TSPBP=0100h */
1622 Assert(GET_BITS(RCTL, BSIZE) == 0);
1623 pThis->u16RxBSize = 2048;
1624
1625 /* Reset promiscuous mode */
1626 if (pThis->pDrvR3)
1627 pThis->pDrvR3->pfnSetPromiscuousMode(pThis->pDrvR3, false);
1628
1629#ifdef E1K_WITH_TXD_CACHE
1630 int rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
1631 if (RT_LIKELY(rc == VINF_SUCCESS))
1632 {
1633 pThis->nTxDFetched = 0;
1634 pThis->iTxDCurrent = 0;
1635 pThis->fGSO = false;
1636 pThis->cbTxAlloc = 0;
1637 e1kCsTxLeave(pThis);
1638 }
1639#endif /* E1K_WITH_TXD_CACHE */
1640#ifdef E1K_WITH_RXD_CACHE
1641 if (RT_LIKELY(e1kCsRxEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
1642 {
1643 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
1644 e1kCsRxLeave(pThis);
1645 }
1646#endif /* E1K_WITH_RXD_CACHE */
1647}
1648
1649#endif /* IN_RING3 */
1650
1651/**
1652 * Compute Internet checksum.
1653 *
1654 * @remarks Refer to http://www.netfor2.com/checksum.html for short intro.
1655 *
1656 * @param pThis The device state structure.
1657 * @param cpPacket The packet.
1658 * @param cb The size of the packet.
1659 * @param cszText A string denoting direction of packet transfer.
1660 *
1661 * @return The 1's complement of the 1's complement sum.
1662 *
1663 * @thread E1000_TX
1664 */
1665static uint16_t e1kCSum16(const void *pvBuf, size_t cb)
1666{
1667 uint32_t csum = 0;
1668 uint16_t *pu16 = (uint16_t *)pvBuf;
1669
1670 while (cb > 1)
1671 {
1672 csum += *pu16++;
1673 cb -= 2;
1674 }
1675 if (cb)
1676 csum += *(uint8_t*)pu16;
1677 while (csum >> 16)
1678 csum = (csum >> 16) + (csum & 0xFFFF);
1679 return ~csum;
1680}
1681
1682/**
1683 * Dump a packet to debug log.
1684 *
1685 * @param pThis The device state structure.
1686 * @param cpPacket The packet.
1687 * @param cb The size of the packet.
1688 * @param cszText A string denoting direction of packet transfer.
1689 * @thread E1000_TX
1690 */
1691DECLINLINE(void) e1kPacketDump(PE1KSTATE pThis, const uint8_t *cpPacket, size_t cb, const char *cszText)
1692{
1693#ifdef DEBUG
1694 if (RT_LIKELY(e1kCsEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
1695 {
1696 Log4(("%s --- %s packet #%d: %RTmac => %RTmac (%d bytes) ---\n",
1697 pThis->szPrf, cszText, ++pThis->u32PktNo, cpPacket+6, cpPacket, cb));
1698 if (ntohs(*(uint16_t*)(cpPacket+12)) == 0x86DD)
1699 {
1700 Log4(("%s --- IPv6: %RTnaipv6 => %RTnaipv6\n",
1701 pThis->szPrf, cpPacket+14+8, cpPacket+14+24));
1702 if (*(cpPacket+14+6) == 0x6)
1703 Log4(("%s --- TCP: seq=%x ack=%x\n", pThis->szPrf,
1704 ntohl(*(uint32_t*)(cpPacket+14+40+4)), ntohl(*(uint32_t*)(cpPacket+14+40+8))));
1705 }
1706 else if (ntohs(*(uint16_t*)(cpPacket+12)) == 0x800)
1707 {
1708 Log4(("%s --- IPv4: %RTnaipv4 => %RTnaipv4\n",
1709 pThis->szPrf, *(uint32_t*)(cpPacket+14+12), *(uint32_t*)(cpPacket+14+16)));
1710 if (*(cpPacket+14+6) == 0x6)
1711 Log4(("%s --- TCP: seq=%x ack=%x\n", pThis->szPrf,
1712 ntohl(*(uint32_t*)(cpPacket+14+20+4)), ntohl(*(uint32_t*)(cpPacket+14+20+8))));
1713 }
1714 E1kLog3(("%.*Rhxd\n", cb, cpPacket));
1715 e1kCsLeave(pThis);
1716 }
1717#else
1718 if (RT_LIKELY(e1kCsEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
1719 {
1720 if (ntohs(*(uint16_t*)(cpPacket+12)) == 0x86DD)
1721 E1kLogRel(("E1000: %s packet #%d, %RTmac => %RTmac, %RTnaipv6 => %RTnaipv6, seq=%x ack=%x\n",
1722 cszText, ++pThis->u32PktNo, cpPacket+6, cpPacket, cpPacket+14+8, cpPacket+14+24,
1723 ntohl(*(uint32_t*)(cpPacket+14+40+4)), ntohl(*(uint32_t*)(cpPacket+14+40+8))));
1724 else
1725 E1kLogRel(("E1000: %s packet #%d, %RTmac => %RTmac, %RTnaipv4 => %RTnaipv4, seq=%x ack=%x\n",
1726 cszText, ++pThis->u32PktNo, cpPacket+6, cpPacket,
1727 *(uint32_t*)(cpPacket+14+12), *(uint32_t*)(cpPacket+14+16),
1728 ntohl(*(uint32_t*)(cpPacket+14+20+4)), ntohl(*(uint32_t*)(cpPacket+14+20+8))));
1729 e1kCsLeave(pThis);
1730 }
1731#endif
1732}
1733
1734/**
1735 * Determine the type of transmit descriptor.
1736 *
1737 * @returns Descriptor type. See E1K_DTYP_XXX defines.
1738 *
1739 * @param pDesc Pointer to descriptor union.
1740 * @thread E1000_TX
1741 */
1742DECLINLINE(int) e1kGetDescType(E1KTXDESC* pDesc)
1743{
1744 if (pDesc->legacy.cmd.fDEXT)
1745 return pDesc->context.dw2.u4DTYP;
1746 return E1K_DTYP_LEGACY;
1747}
1748
1749/**
1750 * Dump receive descriptor to debug log.
1751 *
1752 * @param pThis The device state structure.
1753 * @param pDesc Pointer to the descriptor.
1754 * @thread E1000_RX
1755 */
1756static void e1kPrintRDesc(PE1KSTATE pThis, E1KRXDESC* pDesc)
1757{
1758 E1kLog2(("%s <-- Receive Descriptor (%d bytes):\n", pThis->szPrf, pDesc->u16Length));
1759 E1kLog2((" Address=%16LX Length=%04X Csum=%04X\n",
1760 pDesc->u64BufAddr, pDesc->u16Length, pDesc->u16Checksum));
1761 E1kLog2((" STA: %s %s %s %s %s %s %s ERR: %s %s %s %s SPECIAL: %s VLAN=%03x PRI=%x\n",
1762 pDesc->status.fPIF ? "PIF" : "pif",
1763 pDesc->status.fIPCS ? "IPCS" : "ipcs",
1764 pDesc->status.fTCPCS ? "TCPCS" : "tcpcs",
1765 pDesc->status.fVP ? "VP" : "vp",
1766 pDesc->status.fIXSM ? "IXSM" : "ixsm",
1767 pDesc->status.fEOP ? "EOP" : "eop",
1768 pDesc->status.fDD ? "DD" : "dd",
1769 pDesc->status.fRXE ? "RXE" : "rxe",
1770 pDesc->status.fIPE ? "IPE" : "ipe",
1771 pDesc->status.fTCPE ? "TCPE" : "tcpe",
1772 pDesc->status.fCE ? "CE" : "ce",
1773 E1K_SPEC_CFI(pDesc->status.u16Special) ? "CFI" :"cfi",
1774 E1K_SPEC_VLAN(pDesc->status.u16Special),
1775 E1K_SPEC_PRI(pDesc->status.u16Special)));
1776}
1777
1778/**
1779 * Dump transmit descriptor to debug log.
1780 *
1781 * @param pThis The device state structure.
1782 * @param pDesc Pointer to descriptor union.
1783 * @param cszDir A string denoting direction of descriptor transfer
1784 * @thread E1000_TX
1785 */
1786static void e1kPrintTDesc(PE1KSTATE pThis, E1KTXDESC* pDesc, const char* cszDir,
1787 unsigned uLevel = RTLOGGRPFLAGS_LEVEL_2)
1788{
1789 /*
1790 * Unfortunately we cannot use our format handler here, we want R0 logging
1791 * as well.
1792 */
1793 switch (e1kGetDescType(pDesc))
1794 {
1795 case E1K_DTYP_CONTEXT:
1796 E1kLogX(uLevel, ("%s %s Context Transmit Descriptor %s\n",
1797 pThis->szPrf, cszDir, cszDir));
1798 E1kLogX(uLevel, (" IPCSS=%02X IPCSO=%02X IPCSE=%04X TUCSS=%02X TUCSO=%02X TUCSE=%04X\n",
1799 pDesc->context.ip.u8CSS, pDesc->context.ip.u8CSO, pDesc->context.ip.u16CSE,
1800 pDesc->context.tu.u8CSS, pDesc->context.tu.u8CSO, pDesc->context.tu.u16CSE));
1801 E1kLogX(uLevel, (" TUCMD:%s%s%s %s %s PAYLEN=%04x HDRLEN=%04x MSS=%04x STA: %s\n",
1802 pDesc->context.dw2.fIDE ? " IDE":"",
1803 pDesc->context.dw2.fRS ? " RS" :"",
1804 pDesc->context.dw2.fTSE ? " TSE":"",
1805 pDesc->context.dw2.fIP ? "IPv4":"IPv6",
1806 pDesc->context.dw2.fTCP ? "TCP":"UDP",
1807 pDesc->context.dw2.u20PAYLEN,
1808 pDesc->context.dw3.u8HDRLEN,
1809 pDesc->context.dw3.u16MSS,
1810 pDesc->context.dw3.fDD?"DD":""));
1811 break;
1812 case E1K_DTYP_DATA:
1813 E1kLogX(uLevel, ("%s %s Data Transmit Descriptor (%d bytes) %s\n",
1814 pThis->szPrf, cszDir, pDesc->data.cmd.u20DTALEN, cszDir));
1815 E1kLogX(uLevel, (" Address=%16LX DTALEN=%05X\n",
1816 pDesc->data.u64BufAddr,
1817 pDesc->data.cmd.u20DTALEN));
1818 E1kLogX(uLevel, (" DCMD:%s%s%s%s%s%s%s STA:%s%s%s POPTS:%s%s SPECIAL:%s VLAN=%03x PRI=%x\n",
1819 pDesc->data.cmd.fIDE ? " IDE" :"",
1820 pDesc->data.cmd.fVLE ? " VLE" :"",
1821 pDesc->data.cmd.fRPS ? " RPS" :"",
1822 pDesc->data.cmd.fRS ? " RS" :"",
1823 pDesc->data.cmd.fTSE ? " TSE" :"",
1824 pDesc->data.cmd.fIFCS? " IFCS":"",
1825 pDesc->data.cmd.fEOP ? " EOP" :"",
1826 pDesc->data.dw3.fDD ? " DD" :"",
1827 pDesc->data.dw3.fEC ? " EC" :"",
1828 pDesc->data.dw3.fLC ? " LC" :"",
1829 pDesc->data.dw3.fTXSM? " TXSM":"",
1830 pDesc->data.dw3.fIXSM? " IXSM":"",
1831 E1K_SPEC_CFI(pDesc->data.dw3.u16Special) ? "CFI" :"cfi",
1832 E1K_SPEC_VLAN(pDesc->data.dw3.u16Special),
1833 E1K_SPEC_PRI(pDesc->data.dw3.u16Special)));
1834 break;
1835 case E1K_DTYP_LEGACY:
1836 E1kLogX(uLevel, ("%s %s Legacy Transmit Descriptor (%d bytes) %s\n",
1837 pThis->szPrf, cszDir, pDesc->legacy.cmd.u16Length, cszDir));
1838 E1kLogX(uLevel, (" Address=%16LX DTALEN=%05X\n",
1839 pDesc->data.u64BufAddr,
1840 pDesc->legacy.cmd.u16Length));
1841 E1kLogX(uLevel, (" CMD:%s%s%s%s%s%s%s STA:%s%s%s CSO=%02x CSS=%02x SPECIAL:%s VLAN=%03x PRI=%x\n",
1842 pDesc->legacy.cmd.fIDE ? " IDE" :"",
1843 pDesc->legacy.cmd.fVLE ? " VLE" :"",
1844 pDesc->legacy.cmd.fRPS ? " RPS" :"",
1845 pDesc->legacy.cmd.fRS ? " RS" :"",
1846 pDesc->legacy.cmd.fIC ? " IC" :"",
1847 pDesc->legacy.cmd.fIFCS? " IFCS":"",
1848 pDesc->legacy.cmd.fEOP ? " EOP" :"",
1849 pDesc->legacy.dw3.fDD ? " DD" :"",
1850 pDesc->legacy.dw3.fEC ? " EC" :"",
1851 pDesc->legacy.dw3.fLC ? " LC" :"",
1852 pDesc->legacy.cmd.u8CSO,
1853 pDesc->legacy.dw3.u8CSS,
1854 E1K_SPEC_CFI(pDesc->legacy.dw3.u16Special) ? "CFI" :"cfi",
1855 E1K_SPEC_VLAN(pDesc->legacy.dw3.u16Special),
1856 E1K_SPEC_PRI(pDesc->legacy.dw3.u16Special)));
1857 break;
1858 default:
1859 E1kLog(("%s %s Invalid Transmit Descriptor %s\n",
1860 pThis->szPrf, cszDir, cszDir));
1861 break;
1862 }
1863}
1864
1865/**
1866 * Raise interrupt if not masked.
1867 *
1868 * @param pThis The device state structure.
1869 */
1870static int e1kRaiseInterrupt(PE1KSTATE pThis, int rcBusy, uint32_t u32IntCause = 0)
1871{
1872 int rc = e1kCsEnter(pThis, rcBusy);
1873 if (RT_UNLIKELY(rc != VINF_SUCCESS))
1874 return rc;
1875
1876 E1K_INC_ISTAT_CNT(pThis->uStatIntTry);
1877 ICR |= u32IntCause;
1878 if (ICR & IMS)
1879 {
1880#if 0
1881 if (pThis->fDelayInts)
1882 {
1883 E1K_INC_ISTAT_CNT(pThis->uStatIntDly);
1884 pThis->iStatIntLostOne = 1;
1885 E1kLog2(("%s e1kRaiseInterrupt: Delayed. ICR=%08x\n",
1886 pThis->szPrf, ICR));
1887#define E1K_LOST_IRQ_THRSLD 20
1888//#define E1K_LOST_IRQ_THRSLD 200000000
1889 if (pThis->iStatIntLost >= E1K_LOST_IRQ_THRSLD)
1890 {
1891 E1kLog2(("%s WARNING! Disabling delayed interrupt logic: delayed=%d, delivered=%d\n",
1892 pThis->szPrf, pThis->uStatIntDly, pThis->uStatIntLate));
1893 pThis->fIntMaskUsed = false;
1894 pThis->uStatDisDly++;
1895 }
1896 }
1897 else
1898#endif
1899 if (pThis->fIntRaised)
1900 {
1901 E1K_INC_ISTAT_CNT(pThis->uStatIntSkip);
1902 E1kLog2(("%s e1kRaiseInterrupt: Already raised, skipped. ICR&IMS=%08x\n",
1903 pThis->szPrf, ICR & IMS));
1904 }
1905 else
1906 {
1907#ifdef E1K_ITR_ENABLED
1908 uint64_t tstamp = TMTimerGet(pThis->CTX_SUFF(pIntTimer));
1909 /* interrupts/sec = 1 / (256 * 10E-9 * ITR) */
1910 E1kLog2(("%s e1kRaiseInterrupt: tstamp - pThis->u64AckedAt = %d, ITR * 256 = %d\n",
1911 pThis->szPrf, (uint32_t)(tstamp - pThis->u64AckedAt), ITR * 256));
1912 //if (!!ITR && pThis->fIntMaskUsed && tstamp - pThis->u64AckedAt < ITR * 256)
1913 if (!!ITR && tstamp - pThis->u64AckedAt < ITR * 256 && !(ICR & ICR_RXT0))
1914 {
1915 E1K_INC_ISTAT_CNT(pThis->uStatIntEarly);
1916 E1kLog2(("%s e1kRaiseInterrupt: Too early to raise again: %d ns < %d ns.\n",
1917 pThis->szPrf, (uint32_t)(tstamp - pThis->u64AckedAt), ITR * 256));
1918 }
1919 else
1920#endif
1921 {
1922
1923 /* Since we are delivering the interrupt now
1924 * there is no need to do it later -- stop the timer.
1925 */
1926 TMTimerStop(pThis->CTX_SUFF(pIntTimer));
1927 E1K_INC_ISTAT_CNT(pThis->uStatInt);
1928 STAM_COUNTER_INC(&pThis->StatIntsRaised);
1929 /* Got at least one unmasked interrupt cause */
1930 pThis->fIntRaised = true;
1931 /* Raise(1) INTA(0) */
1932 E1kLogRel(("E1000: irq RAISED icr&mask=0x%x, icr=0x%x\n", ICR & IMS, ICR));
1933 PDMDevHlpPCISetIrq(pThis->CTX_SUFF(pDevIns), 0, 1);
1934 E1kLog(("%s e1kRaiseInterrupt: Raised. ICR&IMS=%08x\n",
1935 pThis->szPrf, ICR & IMS));
1936 }
1937 }
1938 }
1939 else
1940 {
1941 E1K_INC_ISTAT_CNT(pThis->uStatIntMasked);
1942 E1kLog2(("%s e1kRaiseInterrupt: Not raising, ICR=%08x, IMS=%08x\n",
1943 pThis->szPrf, ICR, IMS));
1944 }
1945 e1kCsLeave(pThis);
1946 return VINF_SUCCESS;
1947}
1948
1949/**
1950 * Compute the physical address of the descriptor.
1951 *
1952 * @returns the physical address of the descriptor.
1953 *
1954 * @param baseHigh High-order 32 bits of descriptor table address.
1955 * @param baseLow Low-order 32 bits of descriptor table address.
1956 * @param idxDesc The descriptor index in the table.
1957 */
1958DECLINLINE(RTGCPHYS) e1kDescAddr(uint32_t baseHigh, uint32_t baseLow, uint32_t idxDesc)
1959{
1960 AssertCompile(sizeof(E1KRXDESC) == sizeof(E1KTXDESC));
1961 return ((uint64_t)baseHigh << 32) + baseLow + idxDesc * sizeof(E1KRXDESC);
1962}
1963
1964/**
1965 * Advance the head pointer of the receive descriptor queue.
1966 *
1967 * @remarks RDH always points to the next available RX descriptor.
1968 *
1969 * @param pThis The device state structure.
1970 */
1971DECLINLINE(void) e1kAdvanceRDH(PE1KSTATE pThis)
1972{
1973 Assert(e1kCsRxIsOwner(pThis));
1974 //e1kCsEnter(pThis, RT_SRC_POS);
1975 if (++RDH * sizeof(E1KRXDESC) >= RDLEN)
1976 RDH = 0;
1977 /*
1978 * Compute current receive queue length and fire RXDMT0 interrupt
1979 * if we are low on receive buffers
1980 */
1981 uint32_t uRQueueLen = RDH>RDT ? RDLEN/sizeof(E1KRXDESC)-RDH+RDT : RDT-RDH;
1982 /*
1983 * The minimum threshold is controlled by RDMTS bits of RCTL:
1984 * 00 = 1/2 of RDLEN
1985 * 01 = 1/4 of RDLEN
1986 * 10 = 1/8 of RDLEN
1987 * 11 = reserved
1988 */
1989 uint32_t uMinRQThreshold = RDLEN / sizeof(E1KRXDESC) / (2 << GET_BITS(RCTL, RDMTS));
1990 if (uRQueueLen <= uMinRQThreshold)
1991 {
1992 E1kLogRel(("E1000: low on RX descriptors, RDH=%x RDT=%x len=%x threshold=%x\n", RDH, RDT, uRQueueLen, uMinRQThreshold));
1993 E1kLog2(("%s Low on RX descriptors, RDH=%x RDT=%x len=%x threshold=%x, raise an interrupt\n",
1994 pThis->szPrf, RDH, RDT, uRQueueLen, uMinRQThreshold));
1995 E1K_INC_ISTAT_CNT(pThis->uStatIntRXDMT0);
1996 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_RXDMT0);
1997 }
1998 E1kLog2(("%s e1kAdvanceRDH: at exit RDH=%x RDT=%x len=%x\n",
1999 pThis->szPrf, RDH, RDT, uRQueueLen));
2000 //e1kCsLeave(pThis);
2001}
2002
2003#ifdef E1K_WITH_RXD_CACHE
2004/**
2005 * Return the number of RX descriptor that belong to the hardware.
2006 *
2007 * @returns the number of available descriptors in RX ring.
2008 * @param pThis The device state structure.
2009 * @thread ???
2010 */
2011DECLINLINE(uint32_t) e1kGetRxLen(PE1KSTATE pThis)
2012{
2013 /**
2014 * Make sure RDT won't change during computation. EMT may modify RDT at
2015 * any moment.
2016 */
2017 uint32_t rdt = RDT;
2018 return (RDH > rdt ? RDLEN/sizeof(E1KRXDESC) : 0) + rdt - RDH;
2019}
2020
2021DECLINLINE(unsigned) e1kRxDInCache(PE1KSTATE pThis)
2022{
2023 return pThis->nRxDFetched > pThis->iRxDCurrent ?
2024 pThis->nRxDFetched - pThis->iRxDCurrent : 0;
2025}
2026
2027DECLINLINE(unsigned) e1kRxDIsCacheEmpty(PE1KSTATE pThis)
2028{
2029 return pThis->iRxDCurrent >= pThis->nRxDFetched;
2030}
2031
2032/**
2033 * Load receive descriptors from guest memory. The caller needs to be in Rx
2034 * critical section.
2035 *
2036 * We need two physical reads in case the tail wrapped around the end of RX
2037 * descriptor ring.
2038 *
2039 * @returns the actual number of descriptors fetched.
2040 * @param pThis The device state structure.
2041 * @param pDesc Pointer to descriptor union.
2042 * @param addr Physical address in guest context.
2043 * @thread EMT, RX
2044 */
2045DECLINLINE(unsigned) e1kRxDPrefetch(PE1KSTATE pThis)
2046{
2047 /* We've already loaded pThis->nRxDFetched descriptors past RDH. */
2048 unsigned nDescsAvailable = e1kGetRxLen(pThis) - e1kRxDInCache(pThis);
2049 unsigned nDescsToFetch = RT_MIN(nDescsAvailable, E1K_RXD_CACHE_SIZE - pThis->nRxDFetched);
2050 unsigned nDescsTotal = RDLEN / sizeof(E1KRXDESC);
2051 Assert(nDescsTotal != 0);
2052 if (nDescsTotal == 0)
2053 return 0;
2054 unsigned nFirstNotLoaded = (RDH + e1kRxDInCache(pThis)) % nDescsTotal;
2055 unsigned nDescsInSingleRead = RT_MIN(nDescsToFetch, nDescsTotal - nFirstNotLoaded);
2056 E1kLog3(("%s e1kRxDPrefetch: nDescsAvailable=%u nDescsToFetch=%u "
2057 "nDescsTotal=%u nFirstNotLoaded=0x%x nDescsInSingleRead=%u\n",
2058 pThis->szPrf, nDescsAvailable, nDescsToFetch, nDescsTotal,
2059 nFirstNotLoaded, nDescsInSingleRead));
2060 if (nDescsToFetch == 0)
2061 return 0;
2062 E1KRXDESC* pFirstEmptyDesc = &pThis->aRxDescriptors[pThis->nRxDFetched];
2063 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns),
2064 ((uint64_t)RDBAH << 32) + RDBAL + nFirstNotLoaded * sizeof(E1KRXDESC),
2065 pFirstEmptyDesc, nDescsInSingleRead * sizeof(E1KRXDESC));
2066 // uint64_t addrBase = ((uint64_t)RDBAH << 32) + RDBAL;
2067 // unsigned i, j;
2068 // for (i = pThis->nRxDFetched; i < pThis->nRxDFetched + nDescsInSingleRead; ++i)
2069 // {
2070 // pThis->aRxDescAddr[i] = addrBase + (nFirstNotLoaded + i - pThis->nRxDFetched) * sizeof(E1KRXDESC);
2071 // E1kLog3(("%s aRxDescAddr[%d] = %p\n", pThis->szPrf, i, pThis->aRxDescAddr[i]));
2072 // }
2073 E1kLog3(("%s Fetched %u RX descriptors at %08x%08x(0x%x), RDLEN=%08x, RDH=%08x, RDT=%08x\n",
2074 pThis->szPrf, nDescsInSingleRead,
2075 RDBAH, RDBAL + RDH * sizeof(E1KRXDESC),
2076 nFirstNotLoaded, RDLEN, RDH, RDT));
2077 if (nDescsToFetch > nDescsInSingleRead)
2078 {
2079 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns),
2080 ((uint64_t)RDBAH << 32) + RDBAL,
2081 pFirstEmptyDesc + nDescsInSingleRead,
2082 (nDescsToFetch - nDescsInSingleRead) * sizeof(E1KRXDESC));
2083 // Assert(i == pThis->nRxDFetched + nDescsInSingleRead);
2084 // for (j = 0; i < pThis->nRxDFetched + nDescsToFetch; ++i, ++j)
2085 // {
2086 // pThis->aRxDescAddr[i] = addrBase + j * sizeof(E1KRXDESC);
2087 // E1kLog3(("%s aRxDescAddr[%d] = %p\n", pThis->szPrf, i, pThis->aRxDescAddr[i]));
2088 // }
2089 E1kLog3(("%s Fetched %u RX descriptors at %08x%08x\n",
2090 pThis->szPrf, nDescsToFetch - nDescsInSingleRead,
2091 RDBAH, RDBAL));
2092 }
2093 pThis->nRxDFetched += nDescsToFetch;
2094 return nDescsToFetch;
2095}
2096
2097/**
2098 * Obtain the next RX descriptor from RXD cache, fetching descriptors from the
2099 * RX ring if the cache is empty.
2100 *
2101 * Note that we cannot advance the cache pointer (iRxDCurrent) yet as it will
2102 * go out of sync with RDH which will cause trouble when EMT checks if the
2103 * cache is empty to do pre-fetch @bugref(6217).
2104 *
2105 * @param pThis The device state structure.
2106 * @thread RX
2107 */
2108DECLINLINE(E1KRXDESC*) e1kRxDGet(PE1KSTATE pThis)
2109{
2110 Assert(e1kCsRxIsOwner(pThis));
2111 /* Check the cache first. */
2112 if (pThis->iRxDCurrent < pThis->nRxDFetched)
2113 return &pThis->aRxDescriptors[pThis->iRxDCurrent];
2114 /* Cache is empty, reset it and check if we can fetch more. */
2115 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
2116 if (e1kRxDPrefetch(pThis))
2117 return &pThis->aRxDescriptors[pThis->iRxDCurrent];
2118 /* Out of Rx descriptors. */
2119 return NULL;
2120}
2121
2122/**
2123 * Return the RX descriptor obtained with e1kRxDGet() and advance the cache
2124 * pointer. The descriptor gets written back to the RXD ring.
2125 *
2126 * @param pThis The device state structure.
2127 * @param pDesc The descriptor being "returned" to the RX ring.
2128 * @thread RX
2129 */
2130DECLINLINE(void) e1kRxDPut(PE1KSTATE pThis, E1KRXDESC* pDesc)
2131{
2132 Assert(e1kCsRxIsOwner(pThis));
2133 pThis->iRxDCurrent++;
2134 // Assert(pDesc >= pThis->aRxDescriptors);
2135 // Assert(pDesc < pThis->aRxDescriptors + E1K_RXD_CACHE_SIZE);
2136 // uint64_t addr = e1kDescAddr(RDBAH, RDBAL, RDH);
2137 // uint32_t rdh = RDH;
2138 // Assert(pThis->aRxDescAddr[pDesc - pThis->aRxDescriptors] == addr);
2139 PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns),
2140 e1kDescAddr(RDBAH, RDBAL, RDH),
2141 pDesc, sizeof(E1KRXDESC));
2142 e1kAdvanceRDH(pThis);
2143 e1kPrintRDesc(pThis, pDesc);
2144}
2145
2146/**
2147 * Store a fragment of received packet at the specifed address.
2148 *
2149 * @param pThis The device state structure.
2150 * @param pDesc The next available RX descriptor.
2151 * @param pvBuf The fragment.
2152 * @param cb The size of the fragment.
2153 */
2154static DECLCALLBACK(void) e1kStoreRxFragment(PE1KSTATE pThis, E1KRXDESC *pDesc, const void *pvBuf, size_t cb)
2155{
2156 STAM_PROFILE_ADV_START(&pThis->StatReceiveStore, a);
2157 E1kLog2(("%s e1kStoreRxFragment: store fragment of %04X at %016LX, EOP=%d\n",
2158 pThis->szPrf, cb, pDesc->u64BufAddr, pDesc->status.fEOP));
2159 PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns), pDesc->u64BufAddr, pvBuf, cb);
2160 pDesc->u16Length = (uint16_t)cb; Assert(pDesc->u16Length == cb);
2161 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveStore, a);
2162}
2163
2164#else /* !E1K_WITH_RXD_CACHE */
2165
2166/**
2167 * Store a fragment of received packet that fits into the next available RX
2168 * buffer.
2169 *
2170 * @remarks Trigger the RXT0 interrupt if it is the last fragment of the packet.
2171 *
2172 * @param pThis The device state structure.
2173 * @param pDesc The next available RX descriptor.
2174 * @param pvBuf The fragment.
2175 * @param cb The size of the fragment.
2176 */
2177static DECLCALLBACK(void) e1kStoreRxFragment(PE1KSTATE pThis, E1KRXDESC *pDesc, const void *pvBuf, size_t cb)
2178{
2179 STAM_PROFILE_ADV_START(&pThis->StatReceiveStore, a);
2180 E1kLog2(("%s e1kStoreRxFragment: store fragment of %04X at %016LX, EOP=%d\n", pThis->szPrf, cb, pDesc->u64BufAddr, pDesc->status.fEOP));
2181 PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns), pDesc->u64BufAddr, pvBuf, cb);
2182 pDesc->u16Length = (uint16_t)cb; Assert(pDesc->u16Length == cb);
2183 /* Write back the descriptor */
2184 PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns), e1kDescAddr(RDBAH, RDBAL, RDH), pDesc, sizeof(E1KRXDESC));
2185 e1kPrintRDesc(pThis, pDesc);
2186 E1kLogRel(("E1000: Wrote back RX desc, RDH=%x\n", RDH));
2187 /* Advance head */
2188 e1kAdvanceRDH(pThis);
2189 //E1kLog2(("%s e1kStoreRxFragment: EOP=%d RDTR=%08X RADV=%08X\n", pThis->szPrf, pDesc->fEOP, RDTR, RADV));
2190 if (pDesc->status.fEOP)
2191 {
2192 /* Complete packet has been stored -- it is time to let the guest know. */
2193#ifdef E1K_USE_RX_TIMERS
2194 if (RDTR)
2195 {
2196 /* Arm the timer to fire in RDTR usec (discard .024) */
2197 e1kArmTimer(pThis, pThis->CTX_SUFF(pRIDTimer), RDTR);
2198 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
2199 if (RADV != 0 && !TMTimerIsActive(pThis->CTX_SUFF(pRADTimer)))
2200 e1kArmTimer(pThis, pThis->CTX_SUFF(pRADTimer), RADV);
2201 }
2202 else
2203 {
2204#endif
2205 /* 0 delay means immediate interrupt */
2206 E1K_INC_ISTAT_CNT(pThis->uStatIntRx);
2207 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_RXT0);
2208#ifdef E1K_USE_RX_TIMERS
2209 }
2210#endif
2211 }
2212 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveStore, a);
2213}
2214#endif /* !E1K_WITH_RXD_CACHE */
2215
2216/**
2217 * Returns true if it is a broadcast packet.
2218 *
2219 * @returns true if destination address indicates broadcast.
2220 * @param pvBuf The ethernet packet.
2221 */
2222DECLINLINE(bool) e1kIsBroadcast(const void *pvBuf)
2223{
2224 static const uint8_t s_abBcastAddr[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
2225 return memcmp(pvBuf, s_abBcastAddr, sizeof(s_abBcastAddr)) == 0;
2226}
2227
2228/**
2229 * Returns true if it is a multicast packet.
2230 *
2231 * @remarks returns true for broadcast packets as well.
2232 * @returns true if destination address indicates multicast.
2233 * @param pvBuf The ethernet packet.
2234 */
2235DECLINLINE(bool) e1kIsMulticast(const void *pvBuf)
2236{
2237 return (*(char*)pvBuf) & 1;
2238}
2239
2240/**
2241 * Set IXSM, IPCS and TCPCS flags according to the packet type.
2242 *
2243 * @remarks We emulate checksum offloading for major packets types only.
2244 *
2245 * @returns VBox status code.
2246 * @param pThis The device state structure.
2247 * @param pFrame The available data.
2248 * @param cb Number of bytes available in the buffer.
2249 * @param status Bit fields containing status info.
2250 */
2251static int e1kRxChecksumOffload(PE1KSTATE pThis, const uint8_t *pFrame, size_t cb, E1KRXDST *pStatus)
2252{
2253 /** @todo
2254 * It is not safe to bypass checksum verification for packets coming
2255 * from real wire. We currently unable to tell where packets are
2256 * coming from so we tell the driver to ignore our checksum flags
2257 * and do verification in software.
2258 */
2259#if 0
2260 uint16_t uEtherType = ntohs(*(uint16_t*)(pFrame + 12));
2261
2262 E1kLog2(("%s e1kRxChecksumOffload: EtherType=%x\n", pThis->szPrf, uEtherType));
2263
2264 switch (uEtherType)
2265 {
2266 case 0x800: /* IPv4 */
2267 {
2268 pStatus->fIXSM = false;
2269 pStatus->fIPCS = true;
2270 PRTNETIPV4 pIpHdr4 = (PRTNETIPV4)(pFrame + 14);
2271 /* TCP/UDP checksum offloading works with TCP and UDP only */
2272 pStatus->fTCPCS = pIpHdr4->ip_p == 6 || pIpHdr4->ip_p == 17;
2273 break;
2274 }
2275 case 0x86DD: /* IPv6 */
2276 pStatus->fIXSM = false;
2277 pStatus->fIPCS = false;
2278 pStatus->fTCPCS = true;
2279 break;
2280 default: /* ARP, VLAN, etc. */
2281 pStatus->fIXSM = true;
2282 break;
2283 }
2284#else
2285 pStatus->fIXSM = true;
2286#endif
2287 return VINF_SUCCESS;
2288}
2289
2290/**
2291 * Pad and store received packet.
2292 *
2293 * @remarks Make sure that the packet appears to upper layer as one coming
2294 * from real Ethernet: pad it and insert FCS.
2295 *
2296 * @returns VBox status code.
2297 * @param pThis The device state structure.
2298 * @param pvBuf The available data.
2299 * @param cb Number of bytes available in the buffer.
2300 * @param status Bit fields containing status info.
2301 */
2302static int e1kHandleRxPacket(PE1KSTATE pThis, const void *pvBuf, size_t cb, E1KRXDST status)
2303{
2304#if defined(IN_RING3) /** @todo Remove this extra copying, it's gonna make us run out of kernel / hypervisor stack! */
2305 uint8_t rxPacket[E1K_MAX_RX_PKT_SIZE];
2306 uint8_t *ptr = rxPacket;
2307
2308 int rc = e1kCsRxEnter(pThis, VERR_SEM_BUSY);
2309 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2310 return rc;
2311
2312 if (cb > 70) /* unqualified guess */
2313 pThis->led.Asserted.s.fReading = pThis->led.Actual.s.fReading = 1;
2314
2315 Assert(cb <= E1K_MAX_RX_PKT_SIZE);
2316 Assert(cb > 16);
2317 size_t cbMax = ((RCTL & RCTL_LPE) ? E1K_MAX_RX_PKT_SIZE - 4 : 1518) - (status.fVP ? 0 : 4);
2318 E1kLog3(("%s Max RX packet size is %u\n", pThis->szPrf, cbMax));
2319 if (status.fVP)
2320 {
2321 /* VLAN packet -- strip VLAN tag in VLAN mode */
2322 if ((CTRL & CTRL_VME) && cb > 16)
2323 {
2324 uint16_t *u16Ptr = (uint16_t*)pvBuf;
2325 memcpy(rxPacket, pvBuf, 12); /* Copy src and dst addresses */
2326 status.u16Special = RT_BE2H_U16(u16Ptr[7]); /* Extract VLAN tag */
2327 memcpy(rxPacket + 12, (uint8_t*)pvBuf + 16, cb - 16); /* Copy the rest of the packet */
2328 cb -= 4;
2329 E1kLog3(("%s Stripped tag for VLAN %u (cb=%u)\n",
2330 pThis->szPrf, status.u16Special, cb));
2331 }
2332 else
2333 status.fVP = false; /* Set VP only if we stripped the tag */
2334 }
2335 else
2336 memcpy(rxPacket, pvBuf, cb);
2337 /* Pad short packets */
2338 if (cb < 60)
2339 {
2340 memset(rxPacket + cb, 0, 60 - cb);
2341 cb = 60;
2342 }
2343 if (!(RCTL & RCTL_SECRC) && cb <= cbMax)
2344 {
2345 STAM_PROFILE_ADV_START(&pThis->StatReceiveCRC, a);
2346 /*
2347 * Add FCS if CRC stripping is not enabled. Since the value of CRC
2348 * is ignored by most of drivers we may as well save us the trouble
2349 * of calculating it (see EthernetCRC CFGM parameter).
2350 */
2351 if (pThis->fEthernetCRC)
2352 *(uint32_t*)(rxPacket + cb) = RTCrc32(rxPacket, cb);
2353 cb += sizeof(uint32_t);
2354 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveCRC, a);
2355 E1kLog3(("%s Added FCS (cb=%u)\n", pThis->szPrf, cb));
2356 }
2357 /* Compute checksum of complete packet */
2358 uint16_t checksum = e1kCSum16(rxPacket + GET_BITS(RXCSUM, PCSS), cb);
2359 e1kRxChecksumOffload(pThis, rxPacket, cb, &status);
2360
2361 /* Update stats */
2362 E1K_INC_CNT32(GPRC);
2363 if (e1kIsBroadcast(pvBuf))
2364 E1K_INC_CNT32(BPRC);
2365 else if (e1kIsMulticast(pvBuf))
2366 E1K_INC_CNT32(MPRC);
2367 /* Update octet receive counter */
2368 E1K_ADD_CNT64(GORCL, GORCH, cb);
2369 STAM_REL_COUNTER_ADD(&pThis->StatReceiveBytes, cb);
2370 if (cb == 64)
2371 E1K_INC_CNT32(PRC64);
2372 else if (cb < 128)
2373 E1K_INC_CNT32(PRC127);
2374 else if (cb < 256)
2375 E1K_INC_CNT32(PRC255);
2376 else if (cb < 512)
2377 E1K_INC_CNT32(PRC511);
2378 else if (cb < 1024)
2379 E1K_INC_CNT32(PRC1023);
2380 else
2381 E1K_INC_CNT32(PRC1522);
2382
2383 E1K_INC_ISTAT_CNT(pThis->uStatRxFrm);
2384
2385#ifdef E1K_WITH_RXD_CACHE
2386 while (cb > 0)
2387 {
2388 E1KRXDESC *pDesc = e1kRxDGet(pThis);
2389
2390 if (pDesc == NULL)
2391 {
2392 E1kLog(("%s Out of receive buffers, dropping the packet "
2393 "(cb=%u, in_cache=%u, RDH=%x RDT=%x)\n",
2394 pThis->szPrf, cb, e1kRxDInCache(pThis), RDH, RDT));
2395 break;
2396 }
2397#else /* !E1K_WITH_RXD_CACHE */
2398 if (RDH == RDT)
2399 {
2400 E1kLog(("%s Out of receive buffers, dropping the packet\n",
2401 pThis->szPrf));
2402 }
2403 /* Store the packet to receive buffers */
2404 while (RDH != RDT)
2405 {
2406 /* Load the descriptor pointed by head */
2407 E1KRXDESC desc, *pDesc = &desc;
2408 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), e1kDescAddr(RDBAH, RDBAL, RDH),
2409 &desc, sizeof(desc));
2410#endif /* !E1K_WITH_RXD_CACHE */
2411 if (pDesc->u64BufAddr)
2412 {
2413 /* Update descriptor */
2414 pDesc->status = status;
2415 pDesc->u16Checksum = checksum;
2416 pDesc->status.fDD = true;
2417
2418 /*
2419 * We need to leave Rx critical section here or we risk deadlocking
2420 * with EMT in e1kRegWriteRDT when the write is to an unallocated
2421 * page or has an access handler associated with it.
2422 * Note that it is safe to leave the critical section here since
2423 * e1kRegWriteRDT() never modifies RDH. It never touches already
2424 * fetched RxD cache entries either.
2425 */
2426 if (cb > pThis->u16RxBSize)
2427 {
2428 pDesc->status.fEOP = false;
2429 e1kCsRxLeave(pThis);
2430 e1kStoreRxFragment(pThis, pDesc, ptr, pThis->u16RxBSize);
2431 rc = e1kCsRxEnter(pThis, VERR_SEM_BUSY);
2432 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2433 return rc;
2434 ptr += pThis->u16RxBSize;
2435 cb -= pThis->u16RxBSize;
2436 }
2437 else
2438 {
2439 pDesc->status.fEOP = true;
2440 e1kCsRxLeave(pThis);
2441 e1kStoreRxFragment(pThis, pDesc, ptr, cb);
2442#ifdef E1K_WITH_RXD_CACHE
2443 rc = e1kCsRxEnter(pThis, VERR_SEM_BUSY);
2444 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2445 return rc;
2446 cb = 0;
2447#else /* !E1K_WITH_RXD_CACHE */
2448 pThis->led.Actual.s.fReading = 0;
2449 return VINF_SUCCESS;
2450#endif /* !E1K_WITH_RXD_CACHE */
2451 }
2452 /*
2453 * Note: RDH is advanced by e1kStoreRxFragment if E1K_WITH_RXD_CACHE
2454 * is not defined.
2455 */
2456 }
2457#ifdef E1K_WITH_RXD_CACHE
2458 /* Write back the descriptor. */
2459 pDesc->status.fDD = true;
2460 e1kRxDPut(pThis, pDesc);
2461#else /* !E1K_WITH_RXD_CACHE */
2462 else
2463 {
2464 /* Write back the descriptor. */
2465 pDesc->status.fDD = true;
2466 PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns),
2467 e1kDescAddr(RDBAH, RDBAL, RDH),
2468 pDesc, sizeof(E1KRXDESC));
2469 e1kAdvanceRDH(pThis);
2470 }
2471#endif /* !E1K_WITH_RXD_CACHE */
2472 }
2473
2474 if (cb > 0)
2475 E1kLog(("%s Out of receive buffers, dropping %u bytes", pThis->szPrf, cb));
2476
2477 pThis->led.Actual.s.fReading = 0;
2478
2479 e1kCsRxLeave(pThis);
2480#ifdef E1K_WITH_RXD_CACHE
2481 /* Complete packet has been stored -- it is time to let the guest know. */
2482# ifdef E1K_USE_RX_TIMERS
2483 if (RDTR)
2484 {
2485 /* Arm the timer to fire in RDTR usec (discard .024) */
2486 e1kArmTimer(pThis, pThis->CTX_SUFF(pRIDTimer), RDTR);
2487 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
2488 if (RADV != 0 && !TMTimerIsActive(pThis->CTX_SUFF(pRADTimer)))
2489 e1kArmTimer(pThis, pThis->CTX_SUFF(pRADTimer), RADV);
2490 }
2491 else
2492 {
2493# endif /* E1K_USE_RX_TIMERS */
2494 /* 0 delay means immediate interrupt */
2495 E1K_INC_ISTAT_CNT(pThis->uStatIntRx);
2496 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_RXT0);
2497# ifdef E1K_USE_RX_TIMERS
2498 }
2499# endif /* E1K_USE_RX_TIMERS */
2500#endif /* E1K_WITH_RXD_CACHE */
2501
2502 return VINF_SUCCESS;
2503#else
2504 return VERR_INTERNAL_ERROR_2;
2505#endif
2506}
2507
2508
2509/**
2510 * Bring the link up after the configured delay, 5 seconds by default.
2511 *
2512 * @param pThis The device state structure.
2513 * @thread any
2514 */
2515DECLINLINE(void) e1kBringLinkUpDelayed(PE1KSTATE pThis)
2516{
2517 E1kLog(("%s Will bring up the link in %d seconds...\n",
2518 pThis->szPrf, pThis->cMsLinkUpDelay / 1000));
2519 e1kArmTimer(pThis, pThis->CTX_SUFF(pLUTimer), pThis->cMsLinkUpDelay * 1000);
2520}
2521
2522#ifdef IN_RING3
2523/**
2524 * Bring up the link immediately.
2525 *
2526 * @param pThis The device state structure.
2527 */
2528DECLINLINE(void) e1kR3LinkUp(PE1KSTATE pThis)
2529{
2530 E1kLog(("%s Link is up\n", pThis->szPrf));
2531 STATUS |= STATUS_LU;
2532 Phy::setLinkStatus(&pThis->phy, true);
2533 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_LSC);
2534 if (pThis->pDrvR3)
2535 pThis->pDrvR3->pfnNotifyLinkChanged(pThis->pDrvR3, PDMNETWORKLINKSTATE_UP);
2536}
2537
2538/**
2539 * Bring down the link immediately.
2540 *
2541 * @param pThis The device state structure.
2542 */
2543DECLINLINE(void) e1kR3LinkDown(PE1KSTATE pThis)
2544{
2545 E1kLog(("%s Link is down\n", pThis->szPrf));
2546 STATUS &= ~STATUS_LU;
2547 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_LSC);
2548 if (pThis->pDrvR3)
2549 pThis->pDrvR3->pfnNotifyLinkChanged(pThis->pDrvR3, PDMNETWORKLINKSTATE_DOWN);
2550}
2551
2552/**
2553 * Bring down the link temporarily.
2554 *
2555 * @param pThis The device state structure.
2556 */
2557DECLINLINE(void) e1kR3LinkDownTemp(PE1KSTATE pThis)
2558{
2559 E1kLog(("%s Link is down temporarily\n", pThis->szPrf));
2560 STATUS &= ~STATUS_LU;
2561 Phy::setLinkStatus(&pThis->phy, false);
2562 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_LSC);
2563 /*
2564 * Notifying the associated driver that the link went down (even temporarily)
2565 * seems to be the right thing, but it was not done before. This may cause
2566 * a regression if the driver does not expect the link to go down as a result
2567 * of sending PDMNETWORKLINKSTATE_DOWN_RESUME to this device. Earlier versions
2568 * of code notified the driver that the link was up! See @bugref{7057}.
2569 */
2570 if (pThis->pDrvR3)
2571 pThis->pDrvR3->pfnNotifyLinkChanged(pThis->pDrvR3, PDMNETWORKLINKSTATE_DOWN);
2572 e1kBringLinkUpDelayed(pThis);
2573}
2574#endif /* IN_RING3 */
2575
2576#if 0 /* unused */
2577/**
2578 * Read handler for Device Status register.
2579 *
2580 * Get the link status from PHY.
2581 *
2582 * @returns VBox status code.
2583 *
2584 * @param pThis The device state structure.
2585 * @param offset Register offset in memory-mapped frame.
2586 * @param index Register index in register array.
2587 * @param mask Used to implement partial reads (8 and 16-bit).
2588 */
2589static int e1kRegReadCTRL(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
2590{
2591 E1kLog(("%s e1kRegReadCTRL: mdio dir=%s mdc dir=%s mdc=%d\n",
2592 pThis->szPrf, (CTRL & CTRL_MDIO_DIR)?"OUT":"IN ",
2593 (CTRL & CTRL_MDC_DIR)?"OUT":"IN ", !!(CTRL & CTRL_MDC)));
2594 if ((CTRL & CTRL_MDIO_DIR) == 0 && (CTRL & CTRL_MDC))
2595 {
2596 /* MDC is high and MDIO pin is used for input, read MDIO pin from PHY */
2597 if (Phy::readMDIO(&pThis->phy))
2598 *pu32Value = CTRL | CTRL_MDIO;
2599 else
2600 *pu32Value = CTRL & ~CTRL_MDIO;
2601 E1kLog(("%s e1kRegReadCTRL: Phy::readMDIO(%d)\n",
2602 pThis->szPrf, !!(*pu32Value & CTRL_MDIO)));
2603 }
2604 else
2605 {
2606 /* MDIO pin is used for output, ignore it */
2607 *pu32Value = CTRL;
2608 }
2609 return VINF_SUCCESS;
2610}
2611#endif /* unused */
2612
2613/**
2614 * Write handler for Device Control register.
2615 *
2616 * Handles reset.
2617 *
2618 * @param pThis The device state structure.
2619 * @param offset Register offset in memory-mapped frame.
2620 * @param index Register index in register array.
2621 * @param value The value to store.
2622 * @param mask Used to implement partial writes (8 and 16-bit).
2623 * @thread EMT
2624 */
2625static int e1kRegWriteCTRL(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2626{
2627 int rc = VINF_SUCCESS;
2628
2629 if (value & CTRL_RESET)
2630 { /* RST */
2631#ifndef IN_RING3
2632 return VINF_IOM_R3_IOPORT_WRITE;
2633#else
2634 e1kHardReset(pThis);
2635#endif
2636 }
2637 else
2638 {
2639 if ( (value & CTRL_SLU)
2640 && pThis->fCableConnected
2641 && !(STATUS & STATUS_LU))
2642 {
2643 /* The driver indicates that we should bring up the link */
2644 /* Do so in 5 seconds (by default). */
2645 e1kBringLinkUpDelayed(pThis);
2646 /*
2647 * Change the status (but not PHY status) anyway as Windows expects
2648 * it for 82543GC.
2649 */
2650 STATUS |= STATUS_LU;
2651 }
2652 if (value & CTRL_VME)
2653 {
2654 E1kLog(("%s VLAN Mode Enabled\n", pThis->szPrf));
2655 }
2656 E1kLog(("%s e1kRegWriteCTRL: mdio dir=%s mdc dir=%s mdc=%s mdio=%d\n",
2657 pThis->szPrf, (value & CTRL_MDIO_DIR)?"OUT":"IN ",
2658 (value & CTRL_MDC_DIR)?"OUT":"IN ", (value & CTRL_MDC)?"HIGH":"LOW ", !!(value & CTRL_MDIO)));
2659 if (value & CTRL_MDC)
2660 {
2661 if (value & CTRL_MDIO_DIR)
2662 {
2663 E1kLog(("%s e1kRegWriteCTRL: Phy::writeMDIO(%d)\n", pThis->szPrf, !!(value & CTRL_MDIO)));
2664 /* MDIO direction pin is set to output and MDC is high, write MDIO pin value to PHY */
2665 Phy::writeMDIO(&pThis->phy, !!(value & CTRL_MDIO));
2666 }
2667 else
2668 {
2669 if (Phy::readMDIO(&pThis->phy))
2670 value |= CTRL_MDIO;
2671 else
2672 value &= ~CTRL_MDIO;
2673 E1kLog(("%s e1kRegWriteCTRL: Phy::readMDIO(%d)\n",
2674 pThis->szPrf, !!(value & CTRL_MDIO)));
2675 }
2676 }
2677 rc = e1kRegWriteDefault(pThis, offset, index, value);
2678 }
2679
2680 return rc;
2681}
2682
2683/**
2684 * Write handler for EEPROM/Flash Control/Data register.
2685 *
2686 * Handles EEPROM access requests; forwards writes to EEPROM device if access has been granted.
2687 *
2688 * @param pThis The device state structure.
2689 * @param offset Register offset in memory-mapped frame.
2690 * @param index Register index in register array.
2691 * @param value The value to store.
2692 * @param mask Used to implement partial writes (8 and 16-bit).
2693 * @thread EMT
2694 */
2695static int e1kRegWriteEECD(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2696{
2697#ifdef IN_RING3
2698 /* So far we are concerned with lower byte only */
2699 if ((EECD & EECD_EE_GNT) || pThis->eChip == E1K_CHIP_82543GC)
2700 {
2701 /* Access to EEPROM granted -- forward 4-wire bits to EEPROM device */
2702 /* Note: 82543GC does not need to request EEPROM access */
2703 STAM_PROFILE_ADV_START(&pThis->StatEEPROMWrite, a);
2704 pThis->eeprom.write(value & EECD_EE_WIRES);
2705 STAM_PROFILE_ADV_STOP(&pThis->StatEEPROMWrite, a);
2706 }
2707 if (value & EECD_EE_REQ)
2708 EECD |= EECD_EE_REQ|EECD_EE_GNT;
2709 else
2710 EECD &= ~EECD_EE_GNT;
2711 //e1kRegWriteDefault(pThis, offset, index, value );
2712
2713 return VINF_SUCCESS;
2714#else /* !IN_RING3 */
2715 return VINF_IOM_R3_MMIO_WRITE;
2716#endif /* !IN_RING3 */
2717}
2718
2719/**
2720 * Read handler for EEPROM/Flash Control/Data register.
2721 *
2722 * Lower 4 bits come from EEPROM device if EEPROM access has been granted.
2723 *
2724 * @returns VBox status code.
2725 *
2726 * @param pThis The device state structure.
2727 * @param offset Register offset in memory-mapped frame.
2728 * @param index Register index in register array.
2729 * @param mask Used to implement partial reads (8 and 16-bit).
2730 * @thread EMT
2731 */
2732static int e1kRegReadEECD(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
2733{
2734#ifdef IN_RING3
2735 uint32_t value;
2736 int rc = e1kRegReadDefault(pThis, offset, index, &value);
2737 if (RT_SUCCESS(rc))
2738 {
2739 if ((value & EECD_EE_GNT) || pThis->eChip == E1K_CHIP_82543GC)
2740 {
2741 /* Note: 82543GC does not need to request EEPROM access */
2742 /* Access to EEPROM granted -- get 4-wire bits to EEPROM device */
2743 STAM_PROFILE_ADV_START(&pThis->StatEEPROMRead, a);
2744 value |= pThis->eeprom.read();
2745 STAM_PROFILE_ADV_STOP(&pThis->StatEEPROMRead, a);
2746 }
2747 *pu32Value = value;
2748 }
2749
2750 return rc;
2751#else /* !IN_RING3 */
2752 return VINF_IOM_R3_MMIO_READ;
2753#endif /* !IN_RING3 */
2754}
2755
2756/**
2757 * Write handler for EEPROM Read register.
2758 *
2759 * Handles EEPROM word access requests, reads EEPROM and stores the result
2760 * into DATA field.
2761 *
2762 * @param pThis The device state structure.
2763 * @param offset Register offset in memory-mapped frame.
2764 * @param index Register index in register array.
2765 * @param value The value to store.
2766 * @param mask Used to implement partial writes (8 and 16-bit).
2767 * @thread EMT
2768 */
2769static int e1kRegWriteEERD(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2770{
2771#ifdef IN_RING3
2772 /* Make use of 'writable' and 'readable' masks. */
2773 e1kRegWriteDefault(pThis, offset, index, value);
2774 /* DONE and DATA are set only if read was triggered by START. */
2775 if (value & EERD_START)
2776 {
2777 uint16_t tmp;
2778 STAM_PROFILE_ADV_START(&pThis->StatEEPROMRead, a);
2779 if (pThis->eeprom.readWord(GET_BITS_V(value, EERD, ADDR), &tmp))
2780 SET_BITS(EERD, DATA, tmp);
2781 EERD |= EERD_DONE;
2782 STAM_PROFILE_ADV_STOP(&pThis->StatEEPROMRead, a);
2783 }
2784
2785 return VINF_SUCCESS;
2786#else /* !IN_RING3 */
2787 return VINF_IOM_R3_MMIO_WRITE;
2788#endif /* !IN_RING3 */
2789}
2790
2791
2792/**
2793 * Write handler for MDI Control register.
2794 *
2795 * Handles PHY read/write requests; forwards requests to internal PHY device.
2796 *
2797 * @param pThis The device state structure.
2798 * @param offset Register offset in memory-mapped frame.
2799 * @param index Register index in register array.
2800 * @param value The value to store.
2801 * @param mask Used to implement partial writes (8 and 16-bit).
2802 * @thread EMT
2803 */
2804static int e1kRegWriteMDIC(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2805{
2806 if (value & MDIC_INT_EN)
2807 {
2808 E1kLog(("%s ERROR! Interrupt at the end of an MDI cycle is not supported yet.\n",
2809 pThis->szPrf));
2810 }
2811 else if (value & MDIC_READY)
2812 {
2813 E1kLog(("%s ERROR! Ready bit is not reset by software during write operation.\n",
2814 pThis->szPrf));
2815 }
2816 else if (GET_BITS_V(value, MDIC, PHY) != 1)
2817 {
2818 E1kLog(("%s WARNING! Access to invalid PHY detected, phy=%d.\n",
2819 pThis->szPrf, GET_BITS_V(value, MDIC, PHY)));
2820 /*
2821 * Some drivers scan the MDIO bus for a PHY. We can work with these
2822 * drivers if we set MDIC_READY and MDIC_ERROR when there isn't a PHY
2823 * at the requested address, see @bugref{7346}.
2824 */
2825 MDIC = MDIC_READY | MDIC_ERROR;
2826 }
2827 else
2828 {
2829 /* Store the value */
2830 e1kRegWriteDefault(pThis, offset, index, value);
2831 STAM_COUNTER_INC(&pThis->StatPHYAccesses);
2832 /* Forward op to PHY */
2833 if (value & MDIC_OP_READ)
2834 SET_BITS(MDIC, DATA, Phy::readRegister(&pThis->phy, GET_BITS_V(value, MDIC, REG)));
2835 else
2836 Phy::writeRegister(&pThis->phy, GET_BITS_V(value, MDIC, REG), value & MDIC_DATA_MASK);
2837 /* Let software know that we are done */
2838 MDIC |= MDIC_READY;
2839 }
2840
2841 return VINF_SUCCESS;
2842}
2843
2844/**
2845 * Write handler for Interrupt Cause Read register.
2846 *
2847 * Bits corresponding to 1s in 'value' will be cleared in ICR register.
2848 *
2849 * @param pThis The device state structure.
2850 * @param offset Register offset in memory-mapped frame.
2851 * @param index Register index in register array.
2852 * @param value The value to store.
2853 * @param mask Used to implement partial writes (8 and 16-bit).
2854 * @thread EMT
2855 */
2856static int e1kRegWriteICR(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2857{
2858 ICR &= ~value;
2859
2860 return VINF_SUCCESS;
2861}
2862
2863/**
2864 * Read handler for Interrupt Cause Read register.
2865 *
2866 * Reading this register acknowledges all interrupts.
2867 *
2868 * @returns VBox status code.
2869 *
2870 * @param pThis The device state structure.
2871 * @param offset Register offset in memory-mapped frame.
2872 * @param index Register index in register array.
2873 * @param mask Not used.
2874 * @thread EMT
2875 */
2876static int e1kRegReadICR(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
2877{
2878 int rc = e1kCsEnter(pThis, VINF_IOM_R3_MMIO_READ);
2879 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2880 return rc;
2881
2882 uint32_t value = 0;
2883 rc = e1kRegReadDefault(pThis, offset, index, &value);
2884 if (RT_SUCCESS(rc))
2885 {
2886 if (value)
2887 {
2888 /*
2889 * Not clearing ICR causes QNX to hang as it reads ICR in a loop
2890 * with disabled interrupts.
2891 */
2892 //if (IMS)
2893 if (1)
2894 {
2895 /*
2896 * Interrupts were enabled -- we are supposedly at the very
2897 * beginning of interrupt handler
2898 */
2899 E1kLogRel(("E1000: irq lowered, icr=0x%x\n", ICR));
2900 E1kLog(("%s e1kRegReadICR: Lowered IRQ (%08x)\n", pThis->szPrf, ICR));
2901 /* Clear all pending interrupts */
2902 ICR = 0;
2903 pThis->fIntRaised = false;
2904 /* Lower(0) INTA(0) */
2905 PDMDevHlpPCISetIrq(pThis->CTX_SUFF(pDevIns), 0, 0);
2906
2907 pThis->u64AckedAt = TMTimerGet(pThis->CTX_SUFF(pIntTimer));
2908 if (pThis->fIntMaskUsed)
2909 pThis->fDelayInts = true;
2910 }
2911 else
2912 {
2913 /*
2914 * Interrupts are disabled -- in windows guests ICR read is done
2915 * just before re-enabling interrupts
2916 */
2917 E1kLog(("%s e1kRegReadICR: Suppressing auto-clear due to disabled interrupts (%08x)\n", pThis->szPrf, ICR));
2918 }
2919 }
2920 *pu32Value = value;
2921 }
2922 e1kCsLeave(pThis);
2923
2924 return rc;
2925}
2926
2927/**
2928 * Write handler for Interrupt Cause Set register.
2929 *
2930 * Bits corresponding to 1s in 'value' will be set in ICR register.
2931 *
2932 * @param pThis The device state structure.
2933 * @param offset Register offset in memory-mapped frame.
2934 * @param index Register index in register array.
2935 * @param value The value to store.
2936 * @param mask Used to implement partial writes (8 and 16-bit).
2937 * @thread EMT
2938 */
2939static int e1kRegWriteICS(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2940{
2941 E1K_INC_ISTAT_CNT(pThis->uStatIntICS);
2942 return e1kRaiseInterrupt(pThis, VINF_IOM_R3_MMIO_WRITE, value & g_aE1kRegMap[ICS_IDX].writable);
2943}
2944
2945/**
2946 * Write handler for Interrupt Mask Set register.
2947 *
2948 * Will trigger pending interrupts.
2949 *
2950 * @param pThis The device state structure.
2951 * @param offset Register offset in memory-mapped frame.
2952 * @param index Register index in register array.
2953 * @param value The value to store.
2954 * @param mask Used to implement partial writes (8 and 16-bit).
2955 * @thread EMT
2956 */
2957static int e1kRegWriteIMS(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2958{
2959 IMS |= value;
2960 E1kLogRel(("E1000: irq enabled, RDH=%x RDT=%x TDH=%x TDT=%x\n", RDH, RDT, TDH, TDT));
2961 E1kLog(("%s e1kRegWriteIMS: IRQ enabled\n", pThis->szPrf));
2962 /* Mask changes, we need to raise pending interrupts. */
2963 if ((ICR & IMS) && !pThis->fLocked)
2964 {
2965 E1kLog2(("%s e1kRegWriteIMS: IRQ pending (%08x), arming late int timer...\n",
2966 pThis->szPrf, ICR));
2967 /* Raising an interrupt immediately causes win7 to hang upon NIC reconfiguration, see @bugref{5023}. */
2968 TMTimerSet(pThis->CTX_SUFF(pIntTimer), TMTimerFromNano(pThis->CTX_SUFF(pIntTimer), ITR * 256) +
2969 TMTimerGet(pThis->CTX_SUFF(pIntTimer)));
2970 }
2971
2972 return VINF_SUCCESS;
2973}
2974
2975/**
2976 * Write handler for Interrupt Mask Clear register.
2977 *
2978 * Bits corresponding to 1s in 'value' will be cleared in IMS register.
2979 *
2980 * @param pThis The device state structure.
2981 * @param offset Register offset in memory-mapped frame.
2982 * @param index Register index in register array.
2983 * @param value The value to store.
2984 * @param mask Used to implement partial writes (8 and 16-bit).
2985 * @thread EMT
2986 */
2987static int e1kRegWriteIMC(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2988{
2989 int rc = e1kCsEnter(pThis, VINF_IOM_R3_MMIO_WRITE);
2990 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2991 return rc;
2992 if (pThis->fIntRaised)
2993 {
2994 /*
2995 * Technically we should reset fIntRaised in ICR read handler, but it will cause
2996 * Windows to freeze since it may receive an interrupt while still in the very beginning
2997 * of interrupt handler.
2998 */
2999 E1K_INC_ISTAT_CNT(pThis->uStatIntLower);
3000 STAM_COUNTER_INC(&pThis->StatIntsPrevented);
3001 E1kLogRel(("E1000: irq lowered (IMC), icr=0x%x\n", ICR));
3002 /* Lower(0) INTA(0) */
3003 PDMDevHlpPCISetIrq(pThis->CTX_SUFF(pDevIns), 0, 0);
3004 pThis->fIntRaised = false;
3005 E1kLog(("%s e1kRegWriteIMC: Lowered IRQ: ICR=%08x\n", pThis->szPrf, ICR));
3006 }
3007 IMS &= ~value;
3008 E1kLog(("%s e1kRegWriteIMC: IRQ disabled\n", pThis->szPrf));
3009 e1kCsLeave(pThis);
3010
3011 return VINF_SUCCESS;
3012}
3013
3014/**
3015 * Write handler for Receive Control register.
3016 *
3017 * @param pThis The device state structure.
3018 * @param offset Register offset in memory-mapped frame.
3019 * @param index Register index in register array.
3020 * @param value The value to store.
3021 * @param mask Used to implement partial writes (8 and 16-bit).
3022 * @thread EMT
3023 */
3024static int e1kRegWriteRCTL(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3025{
3026 /* Update promiscuous mode */
3027 bool fBecomePromiscous = !!(value & (RCTL_UPE | RCTL_MPE));
3028 if (fBecomePromiscous != !!( RCTL & (RCTL_UPE | RCTL_MPE)))
3029 {
3030 /* Promiscuity has changed, pass the knowledge on. */
3031#ifndef IN_RING3
3032 return VINF_IOM_R3_IOPORT_WRITE;
3033#else
3034 if (pThis->pDrvR3)
3035 pThis->pDrvR3->pfnSetPromiscuousMode(pThis->pDrvR3, fBecomePromiscous);
3036#endif
3037 }
3038
3039 /* Adjust receive buffer size */
3040 unsigned cbRxBuf = 2048 >> GET_BITS_V(value, RCTL, BSIZE);
3041 if (value & RCTL_BSEX)
3042 cbRxBuf *= 16;
3043 if (cbRxBuf != pThis->u16RxBSize)
3044 E1kLog2(("%s e1kRegWriteRCTL: Setting receive buffer size to %d (old %d)\n",
3045 pThis->szPrf, cbRxBuf, pThis->u16RxBSize));
3046 pThis->u16RxBSize = cbRxBuf;
3047
3048 /* Update the register */
3049 e1kRegWriteDefault(pThis, offset, index, value);
3050
3051 return VINF_SUCCESS;
3052}
3053
3054/**
3055 * Write handler for Packet Buffer Allocation register.
3056 *
3057 * TXA = 64 - RXA.
3058 *
3059 * @param pThis The device state structure.
3060 * @param offset Register offset in memory-mapped frame.
3061 * @param index Register index in register array.
3062 * @param value The value to store.
3063 * @param mask Used to implement partial writes (8 and 16-bit).
3064 * @thread EMT
3065 */
3066static int e1kRegWritePBA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3067{
3068 e1kRegWriteDefault(pThis, offset, index, value);
3069 PBA_st->txa = 64 - PBA_st->rxa;
3070
3071 return VINF_SUCCESS;
3072}
3073
3074/**
3075 * Write handler for Receive Descriptor Tail register.
3076 *
3077 * @remarks Write into RDT forces switch to HC and signal to
3078 * e1kR3NetworkDown_WaitReceiveAvail().
3079 *
3080 * @returns VBox status code.
3081 *
3082 * @param pThis The device state structure.
3083 * @param offset Register offset in memory-mapped frame.
3084 * @param index Register index in register array.
3085 * @param value The value to store.
3086 * @param mask Used to implement partial writes (8 and 16-bit).
3087 * @thread EMT
3088 */
3089static int e1kRegWriteRDT(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3090{
3091#ifndef IN_RING3
3092 /* XXX */
3093// return VINF_IOM_R3_MMIO_WRITE;
3094#endif
3095 int rc = e1kCsRxEnter(pThis, VINF_IOM_R3_MMIO_WRITE);
3096 if (RT_LIKELY(rc == VINF_SUCCESS))
3097 {
3098 E1kLog(("%s e1kRegWriteRDT\n", pThis->szPrf));
3099 /*
3100 * Some drivers advance RDT too far, so that it equals RDH. This
3101 * somehow manages to work with real hardware but not with this
3102 * emulated device. We can work with these drivers if we just
3103 * write 1 less when we see a driver writing RDT equal to RDH,
3104 * see @bugref{7346}.
3105 */
3106 if (value == RDH)
3107 {
3108 if (RDH == 0)
3109 value = (RDLEN / sizeof(E1KRXDESC)) - 1;
3110 else
3111 value = RDH - 1;
3112 }
3113 rc = e1kRegWriteDefault(pThis, offset, index, value);
3114#ifdef E1K_WITH_RXD_CACHE
3115 /*
3116 * We need to fetch descriptors now as RDT may go whole circle
3117 * before we attempt to store a received packet. For example,
3118 * Intel's DOS drivers use 2 (!) RX descriptors with the total ring
3119 * size being only 8 descriptors! Note that we fetch descriptors
3120 * only when the cache is empty to reduce the number of memory reads
3121 * in case of frequent RDT writes. Don't fetch anything when the
3122 * receiver is disabled either as RDH, RDT, RDLEN can be in some
3123 * messed up state.
3124 * Note that despite the cache may seem empty, meaning that there are
3125 * no more available descriptors in it, it may still be used by RX
3126 * thread which has not yet written the last descriptor back but has
3127 * temporarily released the RX lock in order to write the packet body
3128 * to descriptor's buffer. At this point we still going to do prefetch
3129 * but it won't actually fetch anything if there are no unused slots in
3130 * our "empty" cache (nRxDFetched==E1K_RXD_CACHE_SIZE). We must not
3131 * reset the cache here even if it appears empty. It will be reset at
3132 * a later point in e1kRxDGet().
3133 */
3134 if (e1kRxDIsCacheEmpty(pThis) && (RCTL & RCTL_EN))
3135 e1kRxDPrefetch(pThis);
3136#endif /* E1K_WITH_RXD_CACHE */
3137 e1kCsRxLeave(pThis);
3138 if (RT_SUCCESS(rc))
3139 {
3140/** @todo bird: Use SUPSem* for this so we can signal it in ring-0 as well
3141 * without requiring any context switches. We should also check the
3142 * wait condition before bothering to queue the item as we're currently
3143 * queuing thousands of items per second here in a normal transmit
3144 * scenario. Expect performance changes when fixing this! */
3145#ifdef IN_RING3
3146 /* Signal that we have more receive descriptors available. */
3147 e1kWakeupReceive(pThis->CTX_SUFF(pDevIns));
3148#else
3149 PPDMQUEUEITEMCORE pItem = PDMQueueAlloc(pThis->CTX_SUFF(pCanRxQueue));
3150 if (pItem)
3151 PDMQueueInsert(pThis->CTX_SUFF(pCanRxQueue), pItem);
3152#endif
3153 }
3154 }
3155 return rc;
3156}
3157
3158/**
3159 * Write handler for Receive Delay Timer register.
3160 *
3161 * @param pThis The device state structure.
3162 * @param offset Register offset in memory-mapped frame.
3163 * @param index Register index in register array.
3164 * @param value The value to store.
3165 * @param mask Used to implement partial writes (8 and 16-bit).
3166 * @thread EMT
3167 */
3168static int e1kRegWriteRDTR(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3169{
3170 e1kRegWriteDefault(pThis, offset, index, value);
3171 if (value & RDTR_FPD)
3172 {
3173 /* Flush requested, cancel both timers and raise interrupt */
3174#ifdef E1K_USE_RX_TIMERS
3175 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRIDTimer));
3176 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRADTimer));
3177#endif
3178 E1K_INC_ISTAT_CNT(pThis->uStatIntRDTR);
3179 return e1kRaiseInterrupt(pThis, VINF_IOM_R3_MMIO_WRITE, ICR_RXT0);
3180 }
3181
3182 return VINF_SUCCESS;
3183}
3184
3185DECLINLINE(uint32_t) e1kGetTxLen(PE1KSTATE pThis)
3186{
3187 /**
3188 * Make sure TDT won't change during computation. EMT may modify TDT at
3189 * any moment.
3190 */
3191 uint32_t tdt = TDT;
3192 return (TDH>tdt ? TDLEN/sizeof(E1KTXDESC) : 0) + tdt - TDH;
3193}
3194
3195#ifdef IN_RING3
3196#ifdef E1K_TX_DELAY
3197
3198/**
3199 * Transmit Delay Timer handler.
3200 *
3201 * @remarks We only get here when the timer expires.
3202 *
3203 * @param pDevIns Pointer to device instance structure.
3204 * @param pTimer Pointer to the timer.
3205 * @param pvUser NULL.
3206 * @thread EMT
3207 */
3208static DECLCALLBACK(void) e1kTxDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3209{
3210 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3211 Assert(PDMCritSectIsOwner(&pThis->csTx));
3212
3213 E1K_INC_ISTAT_CNT(pThis->uStatTxDelayExp);
3214#ifdef E1K_INT_STATS
3215 uint64_t u64Elapsed = RTTimeNanoTS() - pThis->u64ArmedAt;
3216 if (u64Elapsed > pThis->uStatMaxTxDelay)
3217 pThis->uStatMaxTxDelay = u64Elapsed;
3218#endif
3219 int rc = e1kXmitPending(pThis, false /*fOnWorkerThread*/);
3220 AssertMsg(RT_SUCCESS(rc) || rc == VERR_TRY_AGAIN, ("%Rrc\n", rc));
3221}
3222#endif /* E1K_TX_DELAY */
3223
3224#ifdef E1K_USE_TX_TIMERS
3225
3226/**
3227 * Transmit Interrupt Delay Timer handler.
3228 *
3229 * @remarks We only get here when the timer expires.
3230 *
3231 * @param pDevIns Pointer to device instance structure.
3232 * @param pTimer Pointer to the timer.
3233 * @param pvUser NULL.
3234 * @thread EMT
3235 */
3236static DECLCALLBACK(void) e1kTxIntDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3237{
3238 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3239
3240 E1K_INC_ISTAT_CNT(pThis->uStatTID);
3241 /* Cancel absolute delay timer as we have already got attention */
3242#ifndef E1K_NO_TAD
3243 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTADTimer));
3244#endif /* E1K_NO_TAD */
3245 e1kRaiseInterrupt(pThis, ICR_TXDW);
3246}
3247
3248/**
3249 * Transmit Absolute Delay Timer handler.
3250 *
3251 * @remarks We only get here when the timer expires.
3252 *
3253 * @param pDevIns Pointer to device instance structure.
3254 * @param pTimer Pointer to the timer.
3255 * @param pvUser NULL.
3256 * @thread EMT
3257 */
3258static DECLCALLBACK(void) e1kTxAbsDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3259{
3260 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3261
3262 E1K_INC_ISTAT_CNT(pThis->uStatTAD);
3263 /* Cancel interrupt delay timer as we have already got attention */
3264 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTIDTimer));
3265 e1kRaiseInterrupt(pThis, ICR_TXDW);
3266}
3267
3268#endif /* E1K_USE_TX_TIMERS */
3269#ifdef E1K_USE_RX_TIMERS
3270
3271/**
3272 * Receive Interrupt Delay Timer handler.
3273 *
3274 * @remarks We only get here when the timer expires.
3275 *
3276 * @param pDevIns Pointer to device instance structure.
3277 * @param pTimer Pointer to the timer.
3278 * @param pvUser NULL.
3279 * @thread EMT
3280 */
3281static DECLCALLBACK(void) e1kRxIntDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3282{
3283 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3284
3285 E1K_INC_ISTAT_CNT(pThis->uStatRID);
3286 /* Cancel absolute delay timer as we have already got attention */
3287 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRADTimer));
3288 e1kRaiseInterrupt(pThis, ICR_RXT0);
3289}
3290
3291/**
3292 * Receive Absolute Delay Timer handler.
3293 *
3294 * @remarks We only get here when the timer expires.
3295 *
3296 * @param pDevIns Pointer to device instance structure.
3297 * @param pTimer Pointer to the timer.
3298 * @param pvUser NULL.
3299 * @thread EMT
3300 */
3301static DECLCALLBACK(void) e1kRxAbsDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3302{
3303 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3304
3305 E1K_INC_ISTAT_CNT(pThis->uStatRAD);
3306 /* Cancel interrupt delay timer as we have already got attention */
3307 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRIDTimer));
3308 e1kRaiseInterrupt(pThis, ICR_RXT0);
3309}
3310
3311#endif /* E1K_USE_RX_TIMERS */
3312
3313/**
3314 * Late Interrupt Timer handler.
3315 *
3316 * @param pDevIns Pointer to device instance structure.
3317 * @param pTimer Pointer to the timer.
3318 * @param pvUser NULL.
3319 * @thread EMT
3320 */
3321static DECLCALLBACK(void) e1kLateIntTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3322{
3323 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3324
3325 STAM_PROFILE_ADV_START(&pThis->StatLateIntTimer, a);
3326 STAM_COUNTER_INC(&pThis->StatLateInts);
3327 E1K_INC_ISTAT_CNT(pThis->uStatIntLate);
3328#if 0
3329 if (pThis->iStatIntLost > -100)
3330 pThis->iStatIntLost--;
3331#endif
3332 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, 0);
3333 STAM_PROFILE_ADV_STOP(&pThis->StatLateIntTimer, a);
3334}
3335
3336/**
3337 * Link Up Timer handler.
3338 *
3339 * @param pDevIns Pointer to device instance structure.
3340 * @param pTimer Pointer to the timer.
3341 * @param pvUser NULL.
3342 * @thread EMT
3343 */
3344static DECLCALLBACK(void) e1kLinkUpTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3345{
3346 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3347
3348 /*
3349 * This can happen if we set the link status to down when the Link up timer was
3350 * already armed (shortly after e1kLoadDone() or when the cable was disconnected
3351 * and connect+disconnect the cable very quick.
3352 */
3353 if (!pThis->fCableConnected)
3354 return;
3355
3356 e1kR3LinkUp(pThis);
3357}
3358
3359#endif /* IN_RING3 */
3360
3361/**
3362 * Sets up the GSO context according to the TSE new context descriptor.
3363 *
3364 * @param pGso The GSO context to setup.
3365 * @param pCtx The context descriptor.
3366 */
3367DECLINLINE(void) e1kSetupGsoCtx(PPDMNETWORKGSO pGso, E1KTXCTX const *pCtx)
3368{
3369 pGso->u8Type = PDMNETWORKGSOTYPE_INVALID;
3370
3371 /*
3372 * See if the context descriptor describes something that could be TCP or
3373 * UDP over IPv[46].
3374 */
3375 /* Check the header ordering and spacing: 1. Ethernet, 2. IP, 3. TCP/UDP. */
3376 if (RT_UNLIKELY( pCtx->ip.u8CSS < sizeof(RTNETETHERHDR) ))
3377 {
3378 E1kLog(("e1kSetupGsoCtx: IPCSS=%#x\n", pCtx->ip.u8CSS));
3379 return;
3380 }
3381 if (RT_UNLIKELY( pCtx->tu.u8CSS < (size_t)pCtx->ip.u8CSS + (pCtx->dw2.fIP ? RTNETIPV4_MIN_LEN : RTNETIPV6_MIN_LEN) ))
3382 {
3383 E1kLog(("e1kSetupGsoCtx: TUCSS=%#x\n", pCtx->tu.u8CSS));
3384 return;
3385 }
3386 if (RT_UNLIKELY( pCtx->dw2.fTCP
3387 ? pCtx->dw3.u8HDRLEN < (size_t)pCtx->tu.u8CSS + RTNETTCP_MIN_LEN
3388 : pCtx->dw3.u8HDRLEN != (size_t)pCtx->tu.u8CSS + RTNETUDP_MIN_LEN ))
3389 {
3390 E1kLog(("e1kSetupGsoCtx: HDRLEN=%#x TCP=%d\n", pCtx->dw3.u8HDRLEN, pCtx->dw2.fTCP));
3391 return;
3392 }
3393
3394 /* The end of the TCP/UDP checksum should stop at the end of the packet or at least after the headers. */
3395 if (RT_UNLIKELY( pCtx->tu.u16CSE > 0 && pCtx->tu.u16CSE <= pCtx->dw3.u8HDRLEN ))
3396 {
3397 E1kLog(("e1kSetupGsoCtx: TUCSE=%#x HDRLEN=%#x\n", pCtx->tu.u16CSE, pCtx->dw3.u8HDRLEN));
3398 return;
3399 }
3400
3401 /* IPv4 checksum offset. */
3402 if (RT_UNLIKELY( pCtx->dw2.fIP && (size_t)pCtx->ip.u8CSO - pCtx->ip.u8CSS != RT_UOFFSETOF(RTNETIPV4, ip_sum) ))
3403 {
3404 E1kLog(("e1kSetupGsoCtx: IPCSO=%#x IPCSS=%#x\n", pCtx->ip.u8CSO, pCtx->ip.u8CSS));
3405 return;
3406 }
3407
3408 /* TCP/UDP checksum offsets. */
3409 if (RT_UNLIKELY( (size_t)pCtx->tu.u8CSO - pCtx->tu.u8CSS
3410 != ( pCtx->dw2.fTCP
3411 ? RT_UOFFSETOF(RTNETTCP, th_sum)
3412 : RT_UOFFSETOF(RTNETUDP, uh_sum) ) ))
3413 {
3414 E1kLog(("e1kSetupGsoCtx: TUCSO=%#x TUCSS=%#x TCP=%d\n", pCtx->ip.u8CSO, pCtx->ip.u8CSS, pCtx->dw2.fTCP));
3415 return;
3416 }
3417
3418 /*
3419 * Because of internal networking using a 16-bit size field for GSO context
3420 * plus frame, we have to make sure we don't exceed this.
3421 */
3422 if (RT_UNLIKELY( pCtx->dw3.u8HDRLEN + pCtx->dw2.u20PAYLEN > VBOX_MAX_GSO_SIZE ))
3423 {
3424 E1kLog(("e1kSetupGsoCtx: HDRLEN(=%#x) + PAYLEN(=%#x) = %#x, max is %#x\n",
3425 pCtx->dw3.u8HDRLEN, pCtx->dw2.u20PAYLEN, pCtx->dw3.u8HDRLEN + pCtx->dw2.u20PAYLEN, VBOX_MAX_GSO_SIZE));
3426 return;
3427 }
3428
3429 /*
3430 * We're good for now - we'll do more checks when seeing the data.
3431 * So, figure the type of offloading and setup the context.
3432 */
3433 if (pCtx->dw2.fIP)
3434 {
3435 if (pCtx->dw2.fTCP)
3436 {
3437 pGso->u8Type = PDMNETWORKGSOTYPE_IPV4_TCP;
3438 pGso->cbHdrsSeg = pCtx->dw3.u8HDRLEN;
3439 }
3440 else
3441 {
3442 pGso->u8Type = PDMNETWORKGSOTYPE_IPV4_UDP;
3443 pGso->cbHdrsSeg = pCtx->tu.u8CSS; /* IP header only */
3444 }
3445 /** @todo Detect IPv4-IPv6 tunneling (need test setup since linux doesn't do
3446 * this yet it seems)... */
3447 }
3448 else
3449 {
3450 pGso->cbHdrsSeg = pCtx->dw3.u8HDRLEN; /* @todo IPv6 UFO */
3451 if (pCtx->dw2.fTCP)
3452 pGso->u8Type = PDMNETWORKGSOTYPE_IPV6_TCP;
3453 else
3454 pGso->u8Type = PDMNETWORKGSOTYPE_IPV6_UDP;
3455 }
3456 pGso->offHdr1 = pCtx->ip.u8CSS;
3457 pGso->offHdr2 = pCtx->tu.u8CSS;
3458 pGso->cbHdrsTotal = pCtx->dw3.u8HDRLEN;
3459 pGso->cbMaxSeg = pCtx->dw3.u16MSS;
3460 Assert(PDMNetGsoIsValid(pGso, sizeof(*pGso), pGso->cbMaxSeg * 5));
3461 E1kLog2(("e1kSetupGsoCtx: mss=%#x hdr=%#x hdrseg=%#x hdr1=%#x hdr2=%#x %s\n",
3462 pGso->cbMaxSeg, pGso->cbHdrsTotal, pGso->cbHdrsSeg, pGso->offHdr1, pGso->offHdr2, PDMNetGsoTypeName((PDMNETWORKGSOTYPE)pGso->u8Type) ));
3463}
3464
3465/**
3466 * Checks if we can use GSO processing for the current TSE frame.
3467 *
3468 * @param pThis The device state structure.
3469 * @param pGso The GSO context.
3470 * @param pData The first data descriptor of the frame.
3471 * @param pCtx The TSO context descriptor.
3472 */
3473DECLINLINE(bool) e1kCanDoGso(PE1KSTATE pThis, PCPDMNETWORKGSO pGso, E1KTXDAT const *pData, E1KTXCTX const *pCtx)
3474{
3475 if (!pData->cmd.fTSE)
3476 {
3477 E1kLog2(("e1kCanDoGso: !TSE\n"));
3478 return false;
3479 }
3480 if (pData->cmd.fVLE) /** @todo VLAN tagging. */
3481 {
3482 E1kLog(("e1kCanDoGso: VLE\n"));
3483 return false;
3484 }
3485 if (RT_UNLIKELY(!pThis->fGSOEnabled))
3486 {
3487 E1kLog3(("e1kCanDoGso: GSO disabled via CFGM\n"));
3488 return false;
3489 }
3490
3491 switch ((PDMNETWORKGSOTYPE)pGso->u8Type)
3492 {
3493 case PDMNETWORKGSOTYPE_IPV4_TCP:
3494 case PDMNETWORKGSOTYPE_IPV4_UDP:
3495 if (!pData->dw3.fIXSM)
3496 {
3497 E1kLog(("e1kCanDoGso: !IXSM (IPv4)\n"));
3498 return false;
3499 }
3500 if (!pData->dw3.fTXSM)
3501 {
3502 E1kLog(("e1kCanDoGso: !TXSM (IPv4)\n"));
3503 return false;
3504 }
3505 /** @todo what more check should we perform here? Ethernet frame type? */
3506 E1kLog2(("e1kCanDoGso: OK, IPv4\n"));
3507 return true;
3508
3509 case PDMNETWORKGSOTYPE_IPV6_TCP:
3510 case PDMNETWORKGSOTYPE_IPV6_UDP:
3511 if (pData->dw3.fIXSM && pCtx->ip.u8CSO)
3512 {
3513 E1kLog(("e1kCanDoGso: IXSM (IPv6)\n"));
3514 return false;
3515 }
3516 if (!pData->dw3.fTXSM)
3517 {
3518 E1kLog(("e1kCanDoGso: TXSM (IPv6)\n"));
3519 return false;
3520 }
3521 /** @todo what more check should we perform here? Ethernet frame type? */
3522 E1kLog2(("e1kCanDoGso: OK, IPv4\n"));
3523 return true;
3524
3525 default:
3526 Assert(pGso->u8Type == PDMNETWORKGSOTYPE_INVALID);
3527 E1kLog2(("e1kCanDoGso: e1kSetupGsoCtx failed\n"));
3528 return false;
3529 }
3530}
3531
3532/**
3533 * Frees the current xmit buffer.
3534 *
3535 * @param pThis The device state structure.
3536 */
3537static void e1kXmitFreeBuf(PE1KSTATE pThis)
3538{
3539 PPDMSCATTERGATHER pSg = pThis->CTX_SUFF(pTxSg);
3540 if (pSg)
3541 {
3542 pThis->CTX_SUFF(pTxSg) = NULL;
3543
3544 if (pSg->pvAllocator != pThis)
3545 {
3546 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
3547 if (pDrv)
3548 pDrv->pfnFreeBuf(pDrv, pSg);
3549 }
3550 else
3551 {
3552 /* loopback */
3553 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3554 Assert(pSg->fFlags == (PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3));
3555 pSg->fFlags = 0;
3556 pSg->pvAllocator = NULL;
3557 }
3558 }
3559}
3560
3561#ifndef E1K_WITH_TXD_CACHE
3562/**
3563 * Allocates an xmit buffer.
3564 *
3565 * @returns See PDMINETWORKUP::pfnAllocBuf.
3566 * @param pThis The device state structure.
3567 * @param cbMin The minimum frame size.
3568 * @param fExactSize Whether cbMin is exact or if we have to max it
3569 * out to the max MTU size.
3570 * @param fGso Whether this is a GSO frame or not.
3571 */
3572DECLINLINE(int) e1kXmitAllocBuf(PE1KSTATE pThis, size_t cbMin, bool fExactSize, bool fGso)
3573{
3574 /* Adjust cbMin if necessary. */
3575 if (!fExactSize)
3576 cbMin = RT_MAX(cbMin, E1K_MAX_TX_PKT_SIZE);
3577
3578 /* Deal with existing buffer (descriptor screw up, reset, etc). */
3579 if (RT_UNLIKELY(pThis->CTX_SUFF(pTxSg)))
3580 e1kXmitFreeBuf(pThis);
3581 Assert(pThis->CTX_SUFF(pTxSg) == NULL);
3582
3583 /*
3584 * Allocate the buffer.
3585 */
3586 PPDMSCATTERGATHER pSg;
3587 if (RT_LIKELY(GET_BITS(RCTL, LBM) != RCTL_LBM_TCVR))
3588 {
3589 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
3590 if (RT_UNLIKELY(!pDrv))
3591 return VERR_NET_DOWN;
3592 int rc = pDrv->pfnAllocBuf(pDrv, cbMin, fGso ? &pThis->GsoCtx : NULL, &pSg);
3593 if (RT_FAILURE(rc))
3594 {
3595 /* Suspend TX as we are out of buffers atm */
3596 STATUS |= STATUS_TXOFF;
3597 return rc;
3598 }
3599 }
3600 else
3601 {
3602 /* Create a loopback using the fallback buffer and preallocated SG. */
3603 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3604 pSg = &pThis->uTxFallback.Sg;
3605 pSg->fFlags = PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3;
3606 pSg->cbUsed = 0;
3607 pSg->cbAvailable = 0;
3608 pSg->pvAllocator = pThis;
3609 pSg->pvUser = NULL; /* No GSO here. */
3610 pSg->cSegs = 1;
3611 pSg->aSegs[0].pvSeg = pThis->aTxPacketFallback;
3612 pSg->aSegs[0].cbSeg = sizeof(pThis->aTxPacketFallback);
3613 }
3614
3615 pThis->CTX_SUFF(pTxSg) = pSg;
3616 return VINF_SUCCESS;
3617}
3618#else /* E1K_WITH_TXD_CACHE */
3619/**
3620 * Allocates an xmit buffer.
3621 *
3622 * @returns See PDMINETWORKUP::pfnAllocBuf.
3623 * @param pThis The device state structure.
3624 * @param cbMin The minimum frame size.
3625 * @param fExactSize Whether cbMin is exact or if we have to max it
3626 * out to the max MTU size.
3627 * @param fGso Whether this is a GSO frame or not.
3628 */
3629DECLINLINE(int) e1kXmitAllocBuf(PE1KSTATE pThis, bool fGso)
3630{
3631 /* Deal with existing buffer (descriptor screw up, reset, etc). */
3632 if (RT_UNLIKELY(pThis->CTX_SUFF(pTxSg)))
3633 e1kXmitFreeBuf(pThis);
3634 Assert(pThis->CTX_SUFF(pTxSg) == NULL);
3635
3636 /*
3637 * Allocate the buffer.
3638 */
3639 PPDMSCATTERGATHER pSg;
3640 if (RT_LIKELY(GET_BITS(RCTL, LBM) != RCTL_LBM_TCVR))
3641 {
3642 if (pThis->cbTxAlloc == 0)
3643 {
3644 /* Zero packet, no need for the buffer */
3645 return VINF_SUCCESS;
3646 }
3647
3648 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
3649 if (RT_UNLIKELY(!pDrv))
3650 return VERR_NET_DOWN;
3651 int rc = pDrv->pfnAllocBuf(pDrv, pThis->cbTxAlloc, fGso ? &pThis->GsoCtx : NULL, &pSg);
3652 if (RT_FAILURE(rc))
3653 {
3654 /* Suspend TX as we are out of buffers atm */
3655 STATUS |= STATUS_TXOFF;
3656 return rc;
3657 }
3658 E1kLog3(("%s Allocated buffer for TX packet: cb=%u %s%s\n",
3659 pThis->szPrf, pThis->cbTxAlloc,
3660 pThis->fVTag ? "VLAN " : "",
3661 pThis->fGSO ? "GSO " : ""));
3662 pThis->cbTxAlloc = 0;
3663 }
3664 else
3665 {
3666 /* Create a loopback using the fallback buffer and preallocated SG. */
3667 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3668 pSg = &pThis->uTxFallback.Sg;
3669 pSg->fFlags = PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3;
3670 pSg->cbUsed = 0;
3671 pSg->cbAvailable = 0;
3672 pSg->pvAllocator = pThis;
3673 pSg->pvUser = NULL; /* No GSO here. */
3674 pSg->cSegs = 1;
3675 pSg->aSegs[0].pvSeg = pThis->aTxPacketFallback;
3676 pSg->aSegs[0].cbSeg = sizeof(pThis->aTxPacketFallback);
3677 }
3678
3679 pThis->CTX_SUFF(pTxSg) = pSg;
3680 return VINF_SUCCESS;
3681}
3682#endif /* E1K_WITH_TXD_CACHE */
3683
3684/**
3685 * Checks if it's a GSO buffer or not.
3686 *
3687 * @returns true / false.
3688 * @param pTxSg The scatter / gather buffer.
3689 */
3690DECLINLINE(bool) e1kXmitIsGsoBuf(PDMSCATTERGATHER const *pTxSg)
3691{
3692#if 0
3693 if (!pTxSg)
3694 E1kLog(("e1kXmitIsGsoBuf: pTxSG is NULL\n"));
3695 if (pTxSg && pTxSg->pvUser)
3696 E1kLog(("e1kXmitIsGsoBuf: pvUser is NULL\n"));
3697#endif
3698 return pTxSg && pTxSg->pvUser /* GSO indicator */;
3699}
3700
3701#ifndef E1K_WITH_TXD_CACHE
3702/**
3703 * Load transmit descriptor from guest memory.
3704 *
3705 * @param pThis The device state structure.
3706 * @param pDesc Pointer to descriptor union.
3707 * @param addr Physical address in guest context.
3708 * @thread E1000_TX
3709 */
3710DECLINLINE(void) e1kLoadDesc(PE1KSTATE pThis, E1KTXDESC* pDesc, RTGCPHYS addr)
3711{
3712 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), addr, pDesc, sizeof(E1KTXDESC));
3713}
3714#else /* E1K_WITH_TXD_CACHE */
3715/**
3716 * Load transmit descriptors from guest memory.
3717 *
3718 * We need two physical reads in case the tail wrapped around the end of TX
3719 * descriptor ring.
3720 *
3721 * @returns the actual number of descriptors fetched.
3722 * @param pThis The device state structure.
3723 * @param pDesc Pointer to descriptor union.
3724 * @param addr Physical address in guest context.
3725 * @thread E1000_TX
3726 */
3727DECLINLINE(unsigned) e1kTxDLoadMore(PE1KSTATE pThis)
3728{
3729 Assert(pThis->iTxDCurrent == 0);
3730 /* We've already loaded pThis->nTxDFetched descriptors past TDH. */
3731 unsigned nDescsAvailable = e1kGetTxLen(pThis) - pThis->nTxDFetched;
3732 unsigned nDescsToFetch = RT_MIN(nDescsAvailable, E1K_TXD_CACHE_SIZE - pThis->nTxDFetched);
3733 unsigned nDescsTotal = TDLEN / sizeof(E1KTXDESC);
3734 unsigned nFirstNotLoaded = (TDH + pThis->nTxDFetched) % nDescsTotal;
3735 unsigned nDescsInSingleRead = RT_MIN(nDescsToFetch, nDescsTotal - nFirstNotLoaded);
3736 E1kLog3(("%s e1kTxDLoadMore: nDescsAvailable=%u nDescsToFetch=%u "
3737 "nDescsTotal=%u nFirstNotLoaded=0x%x nDescsInSingleRead=%u\n",
3738 pThis->szPrf, nDescsAvailable, nDescsToFetch, nDescsTotal,
3739 nFirstNotLoaded, nDescsInSingleRead));
3740 if (nDescsToFetch == 0)
3741 return 0;
3742 E1KTXDESC* pFirstEmptyDesc = &pThis->aTxDescriptors[pThis->nTxDFetched];
3743 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns),
3744 ((uint64_t)TDBAH << 32) + TDBAL + nFirstNotLoaded * sizeof(E1KTXDESC),
3745 pFirstEmptyDesc, nDescsInSingleRead * sizeof(E1KTXDESC));
3746 E1kLog3(("%s Fetched %u TX descriptors at %08x%08x(0x%x), TDLEN=%08x, TDH=%08x, TDT=%08x\n",
3747 pThis->szPrf, nDescsInSingleRead,
3748 TDBAH, TDBAL + TDH * sizeof(E1KTXDESC),
3749 nFirstNotLoaded, TDLEN, TDH, TDT));
3750 if (nDescsToFetch > nDescsInSingleRead)
3751 {
3752 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns),
3753 ((uint64_t)TDBAH << 32) + TDBAL,
3754 pFirstEmptyDesc + nDescsInSingleRead,
3755 (nDescsToFetch - nDescsInSingleRead) * sizeof(E1KTXDESC));
3756 E1kLog3(("%s Fetched %u TX descriptors at %08x%08x\n",
3757 pThis->szPrf, nDescsToFetch - nDescsInSingleRead,
3758 TDBAH, TDBAL));
3759 }
3760 pThis->nTxDFetched += nDescsToFetch;
3761 return nDescsToFetch;
3762}
3763
3764/**
3765 * Load transmit descriptors from guest memory only if there are no loaded
3766 * descriptors.
3767 *
3768 * @returns true if there are descriptors in cache.
3769 * @param pThis The device state structure.
3770 * @param pDesc Pointer to descriptor union.
3771 * @param addr Physical address in guest context.
3772 * @thread E1000_TX
3773 */
3774DECLINLINE(bool) e1kTxDLazyLoad(PE1KSTATE pThis)
3775{
3776 if (pThis->nTxDFetched == 0)
3777 return e1kTxDLoadMore(pThis) != 0;
3778 return true;
3779}
3780#endif /* E1K_WITH_TXD_CACHE */
3781
3782/**
3783 * Write back transmit descriptor to guest memory.
3784 *
3785 * @param pThis The device state structure.
3786 * @param pDesc Pointer to descriptor union.
3787 * @param addr Physical address in guest context.
3788 * @thread E1000_TX
3789 */
3790DECLINLINE(void) e1kWriteBackDesc(PE1KSTATE pThis, E1KTXDESC* pDesc, RTGCPHYS addr)
3791{
3792 /* Only the last half of the descriptor has to be written back. */
3793 e1kPrintTDesc(pThis, pDesc, "^^^");
3794 PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns), addr, pDesc, sizeof(E1KTXDESC));
3795}
3796
3797/**
3798 * Transmit complete frame.
3799 *
3800 * @remarks We skip the FCS since we're not responsible for sending anything to
3801 * a real ethernet wire.
3802 *
3803 * @param pThis The device state structure.
3804 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
3805 * @thread E1000_TX
3806 */
3807static void e1kTransmitFrame(PE1KSTATE pThis, bool fOnWorkerThread)
3808{
3809 PPDMSCATTERGATHER pSg = pThis->CTX_SUFF(pTxSg);
3810 uint32_t cbFrame = pSg ? (uint32_t)pSg->cbUsed : 0;
3811 Assert(!pSg || pSg->cSegs == 1);
3812
3813 if (cbFrame > 70) /* unqualified guess */
3814 pThis->led.Asserted.s.fWriting = pThis->led.Actual.s.fWriting = 1;
3815
3816#ifdef E1K_INT_STATS
3817 if (cbFrame <= 1514)
3818 E1K_INC_ISTAT_CNT(pThis->uStatTx1514);
3819 else if (cbFrame <= 2962)
3820 E1K_INC_ISTAT_CNT(pThis->uStatTx2962);
3821 else if (cbFrame <= 4410)
3822 E1K_INC_ISTAT_CNT(pThis->uStatTx4410);
3823 else if (cbFrame <= 5858)
3824 E1K_INC_ISTAT_CNT(pThis->uStatTx5858);
3825 else if (cbFrame <= 7306)
3826 E1K_INC_ISTAT_CNT(pThis->uStatTx7306);
3827 else if (cbFrame <= 8754)
3828 E1K_INC_ISTAT_CNT(pThis->uStatTx8754);
3829 else if (cbFrame <= 16384)
3830 E1K_INC_ISTAT_CNT(pThis->uStatTx16384);
3831 else if (cbFrame <= 32768)
3832 E1K_INC_ISTAT_CNT(pThis->uStatTx32768);
3833 else
3834 E1K_INC_ISTAT_CNT(pThis->uStatTxLarge);
3835#endif /* E1K_INT_STATS */
3836
3837 /* Add VLAN tag */
3838 if (cbFrame > 12 && pThis->fVTag)
3839 {
3840 E1kLog3(("%s Inserting VLAN tag %08x\n",
3841 pThis->szPrf, RT_BE2H_U16(VET) | (RT_BE2H_U16(pThis->u16VTagTCI) << 16)));
3842 memmove((uint8_t*)pSg->aSegs[0].pvSeg + 16, (uint8_t*)pSg->aSegs[0].pvSeg + 12, cbFrame - 12);
3843 *((uint32_t*)pSg->aSegs[0].pvSeg + 3) = RT_BE2H_U16(VET) | (RT_BE2H_U16(pThis->u16VTagTCI) << 16);
3844 pSg->cbUsed += 4;
3845 cbFrame += 4;
3846 Assert(pSg->cbUsed == cbFrame);
3847 Assert(pSg->cbUsed <= pSg->cbAvailable);
3848 }
3849/* E1kLog2(("%s < < < Outgoing packet. Dump follows: > > >\n"
3850 "%.*Rhxd\n"
3851 "%s < < < < < < < < < < < < < End of dump > > > > > > > > > > > >\n",
3852 pThis->szPrf, cbFrame, pSg->aSegs[0].pvSeg, pThis->szPrf));*/
3853
3854 /* Update the stats */
3855 E1K_INC_CNT32(TPT);
3856 E1K_ADD_CNT64(TOTL, TOTH, cbFrame);
3857 E1K_INC_CNT32(GPTC);
3858 if (pSg && e1kIsBroadcast(pSg->aSegs[0].pvSeg))
3859 E1K_INC_CNT32(BPTC);
3860 else if (pSg && e1kIsMulticast(pSg->aSegs[0].pvSeg))
3861 E1K_INC_CNT32(MPTC);
3862 /* Update octet transmit counter */
3863 E1K_ADD_CNT64(GOTCL, GOTCH, cbFrame);
3864 if (pThis->CTX_SUFF(pDrv))
3865 STAM_REL_COUNTER_ADD(&pThis->StatTransmitBytes, cbFrame);
3866 if (cbFrame == 64)
3867 E1K_INC_CNT32(PTC64);
3868 else if (cbFrame < 128)
3869 E1K_INC_CNT32(PTC127);
3870 else if (cbFrame < 256)
3871 E1K_INC_CNT32(PTC255);
3872 else if (cbFrame < 512)
3873 E1K_INC_CNT32(PTC511);
3874 else if (cbFrame < 1024)
3875 E1K_INC_CNT32(PTC1023);
3876 else
3877 E1K_INC_CNT32(PTC1522);
3878
3879 E1K_INC_ISTAT_CNT(pThis->uStatTxFrm);
3880
3881 /*
3882 * Dump and send the packet.
3883 */
3884 int rc = VERR_NET_DOWN;
3885 if (pSg && pSg->pvAllocator != pThis)
3886 {
3887 e1kPacketDump(pThis, (uint8_t const *)pSg->aSegs[0].pvSeg, cbFrame, "--> Outgoing");
3888
3889 pThis->CTX_SUFF(pTxSg) = NULL;
3890 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
3891 if (pDrv)
3892 {
3893 /* Release critical section to avoid deadlock in CanReceive */
3894 //e1kCsLeave(pThis);
3895 STAM_PROFILE_START(&pThis->CTX_SUFF_Z(StatTransmitSend), a);
3896 rc = pDrv->pfnSendBuf(pDrv, pSg, fOnWorkerThread);
3897 STAM_PROFILE_STOP(&pThis->CTX_SUFF_Z(StatTransmitSend), a);
3898 //e1kCsEnter(pThis, RT_SRC_POS);
3899 }
3900 }
3901 else if (pSg)
3902 {
3903 Assert(pSg->aSegs[0].pvSeg == pThis->aTxPacketFallback);
3904 e1kPacketDump(pThis, (uint8_t const *)pSg->aSegs[0].pvSeg, cbFrame, "--> Loopback");
3905
3906 /** @todo do we actually need to check that we're in loopback mode here? */
3907 if (GET_BITS(RCTL, LBM) == RCTL_LBM_TCVR)
3908 {
3909 E1KRXDST status;
3910 RT_ZERO(status);
3911 status.fPIF = true;
3912 e1kHandleRxPacket(pThis, pSg->aSegs[0].pvSeg, cbFrame, status);
3913 rc = VINF_SUCCESS;
3914 }
3915 e1kXmitFreeBuf(pThis);
3916 }
3917 else
3918 rc = VERR_NET_DOWN;
3919 if (RT_FAILURE(rc))
3920 {
3921 E1kLogRel(("E1000: ERROR! pfnSend returned %Rrc\n", rc));
3922 /** @todo handle VERR_NET_DOWN and VERR_NET_NO_BUFFER_SPACE. Signal error ? */
3923 }
3924
3925 pThis->led.Actual.s.fWriting = 0;
3926}
3927
3928/**
3929 * Compute and write internet checksum (e1kCSum16) at the specified offset.
3930 *
3931 * @param pThis The device state structure.
3932 * @param pPkt Pointer to the packet.
3933 * @param u16PktLen Total length of the packet.
3934 * @param cso Offset in packet to write checksum at.
3935 * @param css Offset in packet to start computing
3936 * checksum from.
3937 * @param cse Offset in packet to stop computing
3938 * checksum at.
3939 * @thread E1000_TX
3940 */
3941static void e1kInsertChecksum(PE1KSTATE pThis, uint8_t *pPkt, uint16_t u16PktLen, uint8_t cso, uint8_t css, uint16_t cse)
3942{
3943 if (css >= u16PktLen)
3944 {
3945 E1kLog2(("%s css(%X) is greater than packet length-1(%X), checksum is not inserted\n",
3946 pThis->szPrf, cso, u16PktLen));
3947 return;
3948 }
3949
3950 if (cso >= u16PktLen - 1)
3951 {
3952 E1kLog2(("%s cso(%X) is greater than packet length-2(%X), checksum is not inserted\n",
3953 pThis->szPrf, cso, u16PktLen));
3954 return;
3955 }
3956
3957 if (cse == 0)
3958 cse = u16PktLen - 1;
3959 uint16_t u16ChkSum = e1kCSum16(pPkt + css, cse - css + 1);
3960 E1kLog2(("%s Inserting csum: %04X at %02X, old value: %04X\n", pThis->szPrf,
3961 u16ChkSum, cso, *(uint16_t*)(pPkt + cso)));
3962 *(uint16_t*)(pPkt + cso) = u16ChkSum;
3963}
3964
3965/**
3966 * Add a part of descriptor's buffer to transmit frame.
3967 *
3968 * @remarks data.u64BufAddr is used unconditionally for both data
3969 * and legacy descriptors since it is identical to
3970 * legacy.u64BufAddr.
3971 *
3972 * @param pThis The device state structure.
3973 * @param pDesc Pointer to the descriptor to transmit.
3974 * @param u16Len Length of buffer to the end of segment.
3975 * @param fSend Force packet sending.
3976 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
3977 * @thread E1000_TX
3978 */
3979#ifndef E1K_WITH_TXD_CACHE
3980static void e1kFallbackAddSegment(PE1KSTATE pThis, RTGCPHYS PhysAddr, uint16_t u16Len, bool fSend, bool fOnWorkerThread)
3981{
3982 /* TCP header being transmitted */
3983 struct E1kTcpHeader *pTcpHdr = (struct E1kTcpHeader *)
3984 (pThis->aTxPacketFallback + pThis->contextTSE.tu.u8CSS);
3985 /* IP header being transmitted */
3986 struct E1kIpHeader *pIpHdr = (struct E1kIpHeader *)
3987 (pThis->aTxPacketFallback + pThis->contextTSE.ip.u8CSS);
3988
3989 E1kLog3(("%s e1kFallbackAddSegment: Length=%x, remaining payload=%x, header=%x, send=%RTbool\n",
3990 pThis->szPrf, u16Len, pThis->u32PayRemain, pThis->u16HdrRemain, fSend));
3991 Assert(pThis->u32PayRemain + pThis->u16HdrRemain > 0);
3992
3993 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), PhysAddr,
3994 pThis->aTxPacketFallback + pThis->u16TxPktLen, u16Len);
3995 E1kLog3(("%s Dump of the segment:\n"
3996 "%.*Rhxd\n"
3997 "%s --- End of dump ---\n",
3998 pThis->szPrf, u16Len, pThis->aTxPacketFallback + pThis->u16TxPktLen, pThis->szPrf));
3999 pThis->u16TxPktLen += u16Len;
4000 E1kLog3(("%s e1kFallbackAddSegment: pThis->u16TxPktLen=%x\n",
4001 pThis->szPrf, pThis->u16TxPktLen));
4002 if (pThis->u16HdrRemain > 0)
4003 {
4004 /* The header was not complete, check if it is now */
4005 if (u16Len >= pThis->u16HdrRemain)
4006 {
4007 /* The rest is payload */
4008 u16Len -= pThis->u16HdrRemain;
4009 pThis->u16HdrRemain = 0;
4010 /* Save partial checksum and flags */
4011 pThis->u32SavedCsum = pTcpHdr->chksum;
4012 pThis->u16SavedFlags = pTcpHdr->hdrlen_flags;
4013 /* Clear FIN and PSH flags now and set them only in the last segment */
4014 pTcpHdr->hdrlen_flags &= ~htons(E1K_TCP_FIN | E1K_TCP_PSH);
4015 }
4016 else
4017 {
4018 /* Still not */
4019 pThis->u16HdrRemain -= u16Len;
4020 E1kLog3(("%s e1kFallbackAddSegment: Header is still incomplete, 0x%x bytes remain.\n",
4021 pThis->szPrf, pThis->u16HdrRemain));
4022 return;
4023 }
4024 }
4025
4026 pThis->u32PayRemain -= u16Len;
4027
4028 if (fSend)
4029 {
4030 /* Leave ethernet header intact */
4031 /* IP Total Length = payload + headers - ethernet header */
4032 pIpHdr->total_len = htons(pThis->u16TxPktLen - pThis->contextTSE.ip.u8CSS);
4033 E1kLog3(("%s e1kFallbackAddSegment: End of packet, pIpHdr->total_len=%x\n",
4034 pThis->szPrf, ntohs(pIpHdr->total_len)));
4035 /* Update IP Checksum */
4036 pIpHdr->chksum = 0;
4037 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4038 pThis->contextTSE.ip.u8CSO,
4039 pThis->contextTSE.ip.u8CSS,
4040 pThis->contextTSE.ip.u16CSE);
4041
4042 /* Update TCP flags */
4043 /* Restore original FIN and PSH flags for the last segment */
4044 if (pThis->u32PayRemain == 0)
4045 {
4046 pTcpHdr->hdrlen_flags = pThis->u16SavedFlags;
4047 E1K_INC_CNT32(TSCTC);
4048 }
4049 /* Add TCP length to partial pseudo header sum */
4050 uint32_t csum = pThis->u32SavedCsum
4051 + htons(pThis->u16TxPktLen - pThis->contextTSE.tu.u8CSS);
4052 while (csum >> 16)
4053 csum = (csum >> 16) + (csum & 0xFFFF);
4054 pTcpHdr->chksum = csum;
4055 /* Compute final checksum */
4056 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4057 pThis->contextTSE.tu.u8CSO,
4058 pThis->contextTSE.tu.u8CSS,
4059 pThis->contextTSE.tu.u16CSE);
4060
4061 /*
4062 * Transmit it. If we've use the SG already, allocate a new one before
4063 * we copy of the data.
4064 */
4065 if (!pThis->CTX_SUFF(pTxSg))
4066 e1kXmitAllocBuf(pThis, pThis->u16TxPktLen + (pThis->fVTag ? 4 : 0), true /*fExactSize*/, false /*fGso*/);
4067 if (pThis->CTX_SUFF(pTxSg))
4068 {
4069 Assert(pThis->u16TxPktLen <= pThis->CTX_SUFF(pTxSg)->cbAvailable);
4070 Assert(pThis->CTX_SUFF(pTxSg)->cSegs == 1);
4071 if (pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg != pThis->aTxPacketFallback)
4072 memcpy(pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->aTxPacketFallback, pThis->u16TxPktLen);
4073 pThis->CTX_SUFF(pTxSg)->cbUsed = pThis->u16TxPktLen;
4074 pThis->CTX_SUFF(pTxSg)->aSegs[0].cbSeg = pThis->u16TxPktLen;
4075 }
4076 e1kTransmitFrame(pThis, fOnWorkerThread);
4077
4078 /* Update Sequence Number */
4079 pTcpHdr->seqno = htonl(ntohl(pTcpHdr->seqno) + pThis->u16TxPktLen
4080 - pThis->contextTSE.dw3.u8HDRLEN);
4081 /* Increment IP identification */
4082 pIpHdr->ident = htons(ntohs(pIpHdr->ident) + 1);
4083 }
4084}
4085#else /* E1K_WITH_TXD_CACHE */
4086static int e1kFallbackAddSegment(PE1KSTATE pThis, RTGCPHYS PhysAddr, uint16_t u16Len, bool fSend, bool fOnWorkerThread)
4087{
4088 int rc = VINF_SUCCESS;
4089 /* TCP header being transmitted */
4090 struct E1kTcpHeader *pTcpHdr = (struct E1kTcpHeader *)
4091 (pThis->aTxPacketFallback + pThis->contextTSE.tu.u8CSS);
4092 /* IP header being transmitted */
4093 struct E1kIpHeader *pIpHdr = (struct E1kIpHeader *)
4094 (pThis->aTxPacketFallback + pThis->contextTSE.ip.u8CSS);
4095
4096 E1kLog3(("%s e1kFallbackAddSegment: Length=%x, remaining payload=%x, header=%x, send=%RTbool\n",
4097 pThis->szPrf, u16Len, pThis->u32PayRemain, pThis->u16HdrRemain, fSend));
4098 Assert(pThis->u32PayRemain + pThis->u16HdrRemain > 0);
4099
4100 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), PhysAddr,
4101 pThis->aTxPacketFallback + pThis->u16TxPktLen, u16Len);
4102 E1kLog3(("%s Dump of the segment:\n"
4103 "%.*Rhxd\n"
4104 "%s --- End of dump ---\n",
4105 pThis->szPrf, u16Len, pThis->aTxPacketFallback + pThis->u16TxPktLen, pThis->szPrf));
4106 pThis->u16TxPktLen += u16Len;
4107 E1kLog3(("%s e1kFallbackAddSegment: pThis->u16TxPktLen=%x\n",
4108 pThis->szPrf, pThis->u16TxPktLen));
4109 if (pThis->u16HdrRemain > 0)
4110 {
4111 /* The header was not complete, check if it is now */
4112 if (u16Len >= pThis->u16HdrRemain)
4113 {
4114 /* The rest is payload */
4115 u16Len -= pThis->u16HdrRemain;
4116 pThis->u16HdrRemain = 0;
4117 /* Save partial checksum and flags */
4118 pThis->u32SavedCsum = pTcpHdr->chksum;
4119 pThis->u16SavedFlags = pTcpHdr->hdrlen_flags;
4120 /* Clear FIN and PSH flags now and set them only in the last segment */
4121 pTcpHdr->hdrlen_flags &= ~htons(E1K_TCP_FIN | E1K_TCP_PSH);
4122 }
4123 else
4124 {
4125 /* Still not */
4126 pThis->u16HdrRemain -= u16Len;
4127 E1kLog3(("%s e1kFallbackAddSegment: Header is still incomplete, 0x%x bytes remain.\n",
4128 pThis->szPrf, pThis->u16HdrRemain));
4129 return rc;
4130 }
4131 }
4132
4133 pThis->u32PayRemain -= u16Len;
4134
4135 if (fSend)
4136 {
4137 /* Leave ethernet header intact */
4138 /* IP Total Length = payload + headers - ethernet header */
4139 pIpHdr->total_len = htons(pThis->u16TxPktLen - pThis->contextTSE.ip.u8CSS);
4140 E1kLog3(("%s e1kFallbackAddSegment: End of packet, pIpHdr->total_len=%x\n",
4141 pThis->szPrf, ntohs(pIpHdr->total_len)));
4142 /* Update IP Checksum */
4143 pIpHdr->chksum = 0;
4144 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4145 pThis->contextTSE.ip.u8CSO,
4146 pThis->contextTSE.ip.u8CSS,
4147 pThis->contextTSE.ip.u16CSE);
4148
4149 /* Update TCP flags */
4150 /* Restore original FIN and PSH flags for the last segment */
4151 if (pThis->u32PayRemain == 0)
4152 {
4153 pTcpHdr->hdrlen_flags = pThis->u16SavedFlags;
4154 E1K_INC_CNT32(TSCTC);
4155 }
4156 /* Add TCP length to partial pseudo header sum */
4157 uint32_t csum = pThis->u32SavedCsum
4158 + htons(pThis->u16TxPktLen - pThis->contextTSE.tu.u8CSS);
4159 while (csum >> 16)
4160 csum = (csum >> 16) + (csum & 0xFFFF);
4161 pTcpHdr->chksum = csum;
4162 /* Compute final checksum */
4163 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4164 pThis->contextTSE.tu.u8CSO,
4165 pThis->contextTSE.tu.u8CSS,
4166 pThis->contextTSE.tu.u16CSE);
4167
4168 /*
4169 * Transmit it.
4170 */
4171 if (pThis->CTX_SUFF(pTxSg))
4172 {
4173 Assert(pThis->u16TxPktLen <= pThis->CTX_SUFF(pTxSg)->cbAvailable);
4174 Assert(pThis->CTX_SUFF(pTxSg)->cSegs == 1);
4175 if (pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg != pThis->aTxPacketFallback)
4176 memcpy(pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->aTxPacketFallback, pThis->u16TxPktLen);
4177 pThis->CTX_SUFF(pTxSg)->cbUsed = pThis->u16TxPktLen;
4178 pThis->CTX_SUFF(pTxSg)->aSegs[0].cbSeg = pThis->u16TxPktLen;
4179 }
4180 e1kTransmitFrame(pThis, fOnWorkerThread);
4181
4182 /* Update Sequence Number */
4183 pTcpHdr->seqno = htonl(ntohl(pTcpHdr->seqno) + pThis->u16TxPktLen
4184 - pThis->contextTSE.dw3.u8HDRLEN);
4185 /* Increment IP identification */
4186 pIpHdr->ident = htons(ntohs(pIpHdr->ident) + 1);
4187
4188 /* Allocate new buffer for the next segment. */
4189 if (pThis->u32PayRemain)
4190 {
4191 pThis->cbTxAlloc = RT_MIN(pThis->u32PayRemain,
4192 pThis->contextTSE.dw3.u16MSS)
4193 + pThis->contextTSE.dw3.u8HDRLEN
4194 + (pThis->fVTag ? 4 : 0);
4195 rc = e1kXmitAllocBuf(pThis, false /* fGSO */);
4196 }
4197 }
4198
4199 return rc;
4200}
4201#endif /* E1K_WITH_TXD_CACHE */
4202
4203#ifndef E1K_WITH_TXD_CACHE
4204/**
4205 * TCP segmentation offloading fallback: Add descriptor's buffer to transmit
4206 * frame.
4207 *
4208 * We construct the frame in the fallback buffer first and the copy it to the SG
4209 * buffer before passing it down to the network driver code.
4210 *
4211 * @returns true if the frame should be transmitted, false if not.
4212 *
4213 * @param pThis The device state structure.
4214 * @param pDesc Pointer to the descriptor to transmit.
4215 * @param cbFragment Length of descriptor's buffer.
4216 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4217 * @thread E1000_TX
4218 */
4219static bool e1kFallbackAddToFrame(PE1KSTATE pThis, E1KTXDESC* pDesc, uint32_t cbFragment, bool fOnWorkerThread)
4220{
4221 PPDMSCATTERGATHER pTxSg = pThis->CTX_SUFF(pTxSg);
4222 Assert(e1kGetDescType(pDesc) == E1K_DTYP_DATA);
4223 Assert(pDesc->data.cmd.fTSE);
4224 Assert(!e1kXmitIsGsoBuf(pTxSg));
4225
4226 uint16_t u16MaxPktLen = pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw3.u16MSS;
4227 Assert(u16MaxPktLen != 0);
4228 Assert(u16MaxPktLen < E1K_MAX_TX_PKT_SIZE);
4229
4230 /*
4231 * Carve out segments.
4232 */
4233 do
4234 {
4235 /* Calculate how many bytes we have left in this TCP segment */
4236 uint32_t cb = u16MaxPktLen - pThis->u16TxPktLen;
4237 if (cb > cbFragment)
4238 {
4239 /* This descriptor fits completely into current segment */
4240 cb = cbFragment;
4241 e1kFallbackAddSegment(pThis, pDesc->data.u64BufAddr, cb, pDesc->data.cmd.fEOP /*fSend*/, fOnWorkerThread);
4242 }
4243 else
4244 {
4245 e1kFallbackAddSegment(pThis, pDesc->data.u64BufAddr, cb, true /*fSend*/, fOnWorkerThread);
4246 /*
4247 * Rewind the packet tail pointer to the beginning of payload,
4248 * so we continue writing right beyond the header.
4249 */
4250 pThis->u16TxPktLen = pThis->contextTSE.dw3.u8HDRLEN;
4251 }
4252
4253 pDesc->data.u64BufAddr += cb;
4254 cbFragment -= cb;
4255 } while (cbFragment > 0);
4256
4257 if (pDesc->data.cmd.fEOP)
4258 {
4259 /* End of packet, next segment will contain header. */
4260 if (pThis->u32PayRemain != 0)
4261 E1K_INC_CNT32(TSCTFC);
4262 pThis->u16TxPktLen = 0;
4263 e1kXmitFreeBuf(pThis);
4264 }
4265
4266 return false;
4267}
4268#else /* E1K_WITH_TXD_CACHE */
4269/**
4270 * TCP segmentation offloading fallback: Add descriptor's buffer to transmit
4271 * frame.
4272 *
4273 * We construct the frame in the fallback buffer first and the copy it to the SG
4274 * buffer before passing it down to the network driver code.
4275 *
4276 * @returns error code
4277 *
4278 * @param pThis The device state structure.
4279 * @param pDesc Pointer to the descriptor to transmit.
4280 * @param cbFragment Length of descriptor's buffer.
4281 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4282 * @thread E1000_TX
4283 */
4284static int e1kFallbackAddToFrame(PE1KSTATE pThis, E1KTXDESC* pDesc, bool fOnWorkerThread)
4285{
4286 int rc = VINF_SUCCESS;
4287 PPDMSCATTERGATHER pTxSg = pThis->CTX_SUFF(pTxSg);
4288 Assert(e1kGetDescType(pDesc) == E1K_DTYP_DATA);
4289 Assert(pDesc->data.cmd.fTSE);
4290 Assert(!e1kXmitIsGsoBuf(pTxSg));
4291
4292 uint16_t u16MaxPktLen = pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw3.u16MSS;
4293 Assert(u16MaxPktLen != 0);
4294 Assert(u16MaxPktLen < E1K_MAX_TX_PKT_SIZE);
4295
4296 /*
4297 * Carve out segments.
4298 */
4299 do
4300 {
4301 /* Calculate how many bytes we have left in this TCP segment */
4302 uint32_t cb = u16MaxPktLen - pThis->u16TxPktLen;
4303 if (cb > pDesc->data.cmd.u20DTALEN)
4304 {
4305 /* This descriptor fits completely into current segment */
4306 cb = pDesc->data.cmd.u20DTALEN;
4307 rc = e1kFallbackAddSegment(pThis, pDesc->data.u64BufAddr, cb, pDesc->data.cmd.fEOP /*fSend*/, fOnWorkerThread);
4308 }
4309 else
4310 {
4311 rc = e1kFallbackAddSegment(pThis, pDesc->data.u64BufAddr, cb, true /*fSend*/, fOnWorkerThread);
4312 /*
4313 * Rewind the packet tail pointer to the beginning of payload,
4314 * so we continue writing right beyond the header.
4315 */
4316 pThis->u16TxPktLen = pThis->contextTSE.dw3.u8HDRLEN;
4317 }
4318
4319 pDesc->data.u64BufAddr += cb;
4320 pDesc->data.cmd.u20DTALEN -= cb;
4321 } while (pDesc->data.cmd.u20DTALEN > 0 && RT_SUCCESS(rc));
4322
4323 if (pDesc->data.cmd.fEOP)
4324 {
4325 /* End of packet, next segment will contain header. */
4326 if (pThis->u32PayRemain != 0)
4327 E1K_INC_CNT32(TSCTFC);
4328 pThis->u16TxPktLen = 0;
4329 e1kXmitFreeBuf(pThis);
4330 }
4331
4332 return false;
4333}
4334#endif /* E1K_WITH_TXD_CACHE */
4335
4336
4337/**
4338 * Add descriptor's buffer to transmit frame.
4339 *
4340 * This deals with GSO and normal frames, e1kFallbackAddToFrame deals with the
4341 * TSE frames we cannot handle as GSO.
4342 *
4343 * @returns true on success, false on failure.
4344 *
4345 * @param pThis The device state structure.
4346 * @param PhysAddr The physical address of the descriptor buffer.
4347 * @param cbFragment Length of descriptor's buffer.
4348 * @thread E1000_TX
4349 */
4350static bool e1kAddToFrame(PE1KSTATE pThis, RTGCPHYS PhysAddr, uint32_t cbFragment)
4351{
4352 PPDMSCATTERGATHER pTxSg = pThis->CTX_SUFF(pTxSg);
4353 bool const fGso = e1kXmitIsGsoBuf(pTxSg);
4354 uint32_t const cbNewPkt = cbFragment + pThis->u16TxPktLen;
4355
4356 if (RT_UNLIKELY( !fGso && cbNewPkt > E1K_MAX_TX_PKT_SIZE ))
4357 {
4358 E1kLog(("%s Transmit packet is too large: %u > %u(max)\n", pThis->szPrf, cbNewPkt, E1K_MAX_TX_PKT_SIZE));
4359 return false;
4360 }
4361 if (RT_UNLIKELY( fGso && cbNewPkt > pTxSg->cbAvailable ))
4362 {
4363 E1kLog(("%s Transmit packet is too large: %u > %u(max)/GSO\n", pThis->szPrf, cbNewPkt, pTxSg->cbAvailable));
4364 return false;
4365 }
4366
4367 if (RT_LIKELY(pTxSg))
4368 {
4369 Assert(pTxSg->cSegs == 1);
4370 Assert(pTxSg->cbUsed == pThis->u16TxPktLen);
4371
4372 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), PhysAddr,
4373 (uint8_t *)pTxSg->aSegs[0].pvSeg + pThis->u16TxPktLen, cbFragment);
4374
4375 pTxSg->cbUsed = cbNewPkt;
4376 }
4377 pThis->u16TxPktLen = cbNewPkt;
4378
4379 return true;
4380}
4381
4382
4383/**
4384 * Write the descriptor back to guest memory and notify the guest.
4385 *
4386 * @param pThis The device state structure.
4387 * @param pDesc Pointer to the descriptor have been transmitted.
4388 * @param addr Physical address of the descriptor in guest memory.
4389 * @thread E1000_TX
4390 */
4391static void e1kDescReport(PE1KSTATE pThis, E1KTXDESC* pDesc, RTGCPHYS addr)
4392{
4393 /*
4394 * We fake descriptor write-back bursting. Descriptors are written back as they are
4395 * processed.
4396 */
4397 /* Let's pretend we process descriptors. Write back with DD set. */
4398 /*
4399 * Prior to r71586 we tried to accomodate the case when write-back bursts
4400 * are enabled without actually implementing bursting by writing back all
4401 * descriptors, even the ones that do not have RS set. This caused kernel
4402 * panics with Linux SMP kernels, as the e1000 driver tried to free up skb
4403 * associated with written back descriptor if it happened to be a context
4404 * descriptor since context descriptors do not have skb associated to them.
4405 * Starting from r71586 we write back only the descriptors with RS set,
4406 * which is a little bit different from what the real hardware does in
4407 * case there is a chain of data descritors where some of them have RS set
4408 * and others do not. It is very uncommon scenario imho.
4409 * We need to check RPS as well since some legacy drivers use it instead of
4410 * RS even with newer cards.
4411 */
4412 if (pDesc->legacy.cmd.fRS || pDesc->legacy.cmd.fRPS)
4413 {
4414 pDesc->legacy.dw3.fDD = 1; /* Descriptor Done */
4415 e1kWriteBackDesc(pThis, pDesc, addr);
4416 if (pDesc->legacy.cmd.fEOP)
4417 {
4418#ifdef E1K_USE_TX_TIMERS
4419 if (pDesc->legacy.cmd.fIDE)
4420 {
4421 E1K_INC_ISTAT_CNT(pThis->uStatTxIDE);
4422 //if (pThis->fIntRaised)
4423 //{
4424 // /* Interrupt is already pending, no need for timers */
4425 // ICR |= ICR_TXDW;
4426 //}
4427 //else {
4428 /* Arm the timer to fire in TIVD usec (discard .024) */
4429 e1kArmTimer(pThis, pThis->CTX_SUFF(pTIDTimer), TIDV);
4430# ifndef E1K_NO_TAD
4431 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
4432 E1kLog2(("%s Checking if TAD timer is running\n",
4433 pThis->szPrf));
4434 if (TADV != 0 && !TMTimerIsActive(pThis->CTX_SUFF(pTADTimer)))
4435 e1kArmTimer(pThis, pThis->CTX_SUFF(pTADTimer), TADV);
4436# endif /* E1K_NO_TAD */
4437 }
4438 else
4439 {
4440 E1kLog2(("%s No IDE set, cancel TAD timer and raise interrupt\n",
4441 pThis->szPrf));
4442# ifndef E1K_NO_TAD
4443 /* Cancel both timers if armed and fire immediately. */
4444 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTADTimer));
4445# endif /* E1K_NO_TAD */
4446#endif /* E1K_USE_TX_TIMERS */
4447 E1K_INC_ISTAT_CNT(pThis->uStatIntTx);
4448 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_TXDW);
4449#ifdef E1K_USE_TX_TIMERS
4450 }
4451#endif /* E1K_USE_TX_TIMERS */
4452 }
4453 }
4454 else
4455 {
4456 E1K_INC_ISTAT_CNT(pThis->uStatTxNoRS);
4457 }
4458}
4459
4460#ifndef E1K_WITH_TXD_CACHE
4461
4462/**
4463 * Process Transmit Descriptor.
4464 *
4465 * E1000 supports three types of transmit descriptors:
4466 * - legacy data descriptors of older format (context-less).
4467 * - data the same as legacy but providing new offloading capabilities.
4468 * - context sets up the context for following data descriptors.
4469 *
4470 * @param pThis The device state structure.
4471 * @param pDesc Pointer to descriptor union.
4472 * @param addr Physical address of descriptor in guest memory.
4473 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4474 * @thread E1000_TX
4475 */
4476static int e1kXmitDesc(PE1KSTATE pThis, E1KTXDESC* pDesc, RTGCPHYS addr, bool fOnWorkerThread)
4477{
4478 int rc = VINF_SUCCESS;
4479 uint32_t cbVTag = 0;
4480
4481 e1kPrintTDesc(pThis, pDesc, "vvv");
4482
4483#ifdef E1K_USE_TX_TIMERS
4484 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTIDTimer));
4485#endif /* E1K_USE_TX_TIMERS */
4486
4487 switch (e1kGetDescType(pDesc))
4488 {
4489 case E1K_DTYP_CONTEXT:
4490 if (pDesc->context.dw2.fTSE)
4491 {
4492 pThis->contextTSE = pDesc->context;
4493 pThis->u32PayRemain = pDesc->context.dw2.u20PAYLEN;
4494 pThis->u16HdrRemain = pDesc->context.dw3.u8HDRLEN;
4495 e1kSetupGsoCtx(&pThis->GsoCtx, &pDesc->context);
4496 STAM_COUNTER_INC(&pThis->StatTxDescCtxTSE);
4497 }
4498 else
4499 {
4500 pThis->contextNormal = pDesc->context;
4501 STAM_COUNTER_INC(&pThis->StatTxDescCtxNormal);
4502 }
4503 E1kLog2(("%s %s context updated: IP CSS=%02X, IP CSO=%02X, IP CSE=%04X"
4504 ", TU CSS=%02X, TU CSO=%02X, TU CSE=%04X\n", pThis->szPrf,
4505 pDesc->context.dw2.fTSE ? "TSE" : "Normal",
4506 pDesc->context.ip.u8CSS,
4507 pDesc->context.ip.u8CSO,
4508 pDesc->context.ip.u16CSE,
4509 pDesc->context.tu.u8CSS,
4510 pDesc->context.tu.u8CSO,
4511 pDesc->context.tu.u16CSE));
4512 E1K_INC_ISTAT_CNT(pThis->uStatDescCtx);
4513 e1kDescReport(pThis, pDesc, addr);
4514 break;
4515
4516 case E1K_DTYP_DATA:
4517 {
4518 if (pDesc->data.cmd.u20DTALEN == 0 || pDesc->data.u64BufAddr == 0)
4519 {
4520 E1kLog2(("% Empty data descriptor, skipped.\n", pThis->szPrf));
4521 /** @todo Same as legacy when !TSE. See below. */
4522 break;
4523 }
4524 STAM_COUNTER_INC(pDesc->data.cmd.fTSE?
4525 &pThis->StatTxDescTSEData:
4526 &pThis->StatTxDescData);
4527 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
4528 E1K_INC_ISTAT_CNT(pThis->uStatDescDat);
4529
4530 /*
4531 * The last descriptor of non-TSE packet must contain VLE flag.
4532 * TSE packets have VLE flag in the first descriptor. The later
4533 * case is taken care of a bit later when cbVTag gets assigned.
4534 *
4535 * 1) pDesc->data.cmd.fEOP && !pDesc->data.cmd.fTSE
4536 */
4537 if (pDesc->data.cmd.fEOP && !pDesc->data.cmd.fTSE)
4538 {
4539 pThis->fVTag = pDesc->data.cmd.fVLE;
4540 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
4541 }
4542 /*
4543 * First fragment: Allocate new buffer and save the IXSM and TXSM
4544 * packet options as these are only valid in the first fragment.
4545 */
4546 if (pThis->u16TxPktLen == 0)
4547 {
4548 pThis->fIPcsum = pDesc->data.dw3.fIXSM;
4549 pThis->fTCPcsum = pDesc->data.dw3.fTXSM;
4550 E1kLog2(("%s Saving checksum flags:%s%s; \n", pThis->szPrf,
4551 pThis->fIPcsum ? " IP" : "",
4552 pThis->fTCPcsum ? " TCP/UDP" : ""));
4553 if (pDesc->data.cmd.fTSE)
4554 {
4555 /* 2) pDesc->data.cmd.fTSE && pThis->u16TxPktLen == 0 */
4556 pThis->fVTag = pDesc->data.cmd.fVLE;
4557 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
4558 cbVTag = pThis->fVTag ? 4 : 0;
4559 }
4560 else if (pDesc->data.cmd.fEOP)
4561 cbVTag = pDesc->data.cmd.fVLE ? 4 : 0;
4562 else
4563 cbVTag = 4;
4564 E1kLog3(("%s About to allocate TX buffer: cbVTag=%u\n", pThis->szPrf, cbVTag));
4565 if (e1kCanDoGso(pThis, &pThis->GsoCtx, &pDesc->data, &pThis->contextTSE))
4566 rc = e1kXmitAllocBuf(pThis, pThis->contextTSE.dw2.u20PAYLEN + pThis->contextTSE.dw3.u8HDRLEN + cbVTag,
4567 true /*fExactSize*/, true /*fGso*/);
4568 else if (pDesc->data.cmd.fTSE)
4569 rc = e1kXmitAllocBuf(pThis, pThis->contextTSE.dw3.u16MSS + pThis->contextTSE.dw3.u8HDRLEN + cbVTag,
4570 pDesc->data.cmd.fTSE /*fExactSize*/, false /*fGso*/);
4571 else
4572 rc = e1kXmitAllocBuf(pThis, pDesc->data.cmd.u20DTALEN + cbVTag,
4573 pDesc->data.cmd.fEOP /*fExactSize*/, false /*fGso*/);
4574
4575 /**
4576 * @todo: Perhaps it is not that simple for GSO packets! We may
4577 * need to unwind some changes.
4578 */
4579 if (RT_FAILURE(rc))
4580 {
4581 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4582 break;
4583 }
4584 /** @todo Is there any way to indicating errors other than collisions? Like
4585 * VERR_NET_DOWN. */
4586 }
4587
4588 /*
4589 * Add the descriptor data to the frame. If the frame is complete,
4590 * transmit it and reset the u16TxPktLen field.
4591 */
4592 if (e1kXmitIsGsoBuf(pThis->CTX_SUFF(pTxSg)))
4593 {
4594 STAM_COUNTER_INC(&pThis->StatTxPathGSO);
4595 bool fRc = e1kAddToFrame(pThis, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4596 if (pDesc->data.cmd.fEOP)
4597 {
4598 if ( fRc
4599 && pThis->CTX_SUFF(pTxSg)
4600 && pThis->CTX_SUFF(pTxSg)->cbUsed == (size_t)pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN)
4601 {
4602 e1kTransmitFrame(pThis, fOnWorkerThread);
4603 E1K_INC_CNT32(TSCTC);
4604 }
4605 else
4606 {
4607 if (fRc)
4608 E1kLog(("%s bad GSO/TSE %p or %u < %u\n" , pThis->szPrf,
4609 pThis->CTX_SUFF(pTxSg), pThis->CTX_SUFF(pTxSg) ? pThis->CTX_SUFF(pTxSg)->cbUsed : 0,
4610 pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN));
4611 e1kXmitFreeBuf(pThis);
4612 E1K_INC_CNT32(TSCTFC);
4613 }
4614 pThis->u16TxPktLen = 0;
4615 }
4616 }
4617 else if (!pDesc->data.cmd.fTSE)
4618 {
4619 STAM_COUNTER_INC(&pThis->StatTxPathRegular);
4620 bool fRc = e1kAddToFrame(pThis, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4621 if (pDesc->data.cmd.fEOP)
4622 {
4623 if (fRc && pThis->CTX_SUFF(pTxSg))
4624 {
4625 Assert(pThis->CTX_SUFF(pTxSg)->cSegs == 1);
4626 if (pThis->fIPcsum)
4627 e1kInsertChecksum(pThis, (uint8_t *)pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
4628 pThis->contextNormal.ip.u8CSO,
4629 pThis->contextNormal.ip.u8CSS,
4630 pThis->contextNormal.ip.u16CSE);
4631 if (pThis->fTCPcsum)
4632 e1kInsertChecksum(pThis, (uint8_t *)pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
4633 pThis->contextNormal.tu.u8CSO,
4634 pThis->contextNormal.tu.u8CSS,
4635 pThis->contextNormal.tu.u16CSE);
4636 e1kTransmitFrame(pThis, fOnWorkerThread);
4637 }
4638 else
4639 e1kXmitFreeBuf(pThis);
4640 pThis->u16TxPktLen = 0;
4641 }
4642 }
4643 else
4644 {
4645 STAM_COUNTER_INC(&pThis->StatTxPathFallback);
4646 e1kFallbackAddToFrame(pThis, pDesc, pDesc->data.cmd.u20DTALEN, fOnWorkerThread);
4647 }
4648
4649 e1kDescReport(pThis, pDesc, addr);
4650 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4651 break;
4652 }
4653
4654 case E1K_DTYP_LEGACY:
4655 if (pDesc->legacy.cmd.u16Length == 0 || pDesc->legacy.u64BufAddr == 0)
4656 {
4657 E1kLog(("%s Empty legacy descriptor, skipped.\n", pThis->szPrf));
4658 /** @todo 3.3.3, Length/Buffer Address: RS set -> write DD when processing. */
4659 break;
4660 }
4661 STAM_COUNTER_INC(&pThis->StatTxDescLegacy);
4662 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
4663
4664 /* First fragment: allocate new buffer. */
4665 if (pThis->u16TxPktLen == 0)
4666 {
4667 if (pDesc->legacy.cmd.fEOP)
4668 cbVTag = pDesc->legacy.cmd.fVLE ? 4 : 0;
4669 else
4670 cbVTag = 4;
4671 E1kLog3(("%s About to allocate TX buffer: cbVTag=%u\n", pThis->szPrf, cbVTag));
4672 /** @todo reset status bits? */
4673 rc = e1kXmitAllocBuf(pThis, pDesc->legacy.cmd.u16Length + cbVTag, pDesc->legacy.cmd.fEOP, false /*fGso*/);
4674 if (RT_FAILURE(rc))
4675 {
4676 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4677 break;
4678 }
4679
4680 /** @todo Is there any way to indicating errors other than collisions? Like
4681 * VERR_NET_DOWN. */
4682 }
4683
4684 /* Add fragment to frame. */
4685 if (e1kAddToFrame(pThis, pDesc->data.u64BufAddr, pDesc->legacy.cmd.u16Length))
4686 {
4687 E1K_INC_ISTAT_CNT(pThis->uStatDescLeg);
4688
4689 /* Last fragment: Transmit and reset the packet storage counter. */
4690 if (pDesc->legacy.cmd.fEOP)
4691 {
4692 pThis->fVTag = pDesc->legacy.cmd.fVLE;
4693 pThis->u16VTagTCI = pDesc->legacy.dw3.u16Special;
4694 /** @todo Offload processing goes here. */
4695 e1kTransmitFrame(pThis, fOnWorkerThread);
4696 pThis->u16TxPktLen = 0;
4697 }
4698 }
4699 /* Last fragment + failure: free the buffer and reset the storage counter. */
4700 else if (pDesc->legacy.cmd.fEOP)
4701 {
4702 e1kXmitFreeBuf(pThis);
4703 pThis->u16TxPktLen = 0;
4704 }
4705
4706 e1kDescReport(pThis, pDesc, addr);
4707 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4708 break;
4709
4710 default:
4711 E1kLog(("%s ERROR Unsupported transmit descriptor type: 0x%04x\n",
4712 pThis->szPrf, e1kGetDescType(pDesc)));
4713 break;
4714 }
4715
4716 return rc;
4717}
4718
4719#else /* E1K_WITH_TXD_CACHE */
4720
4721/**
4722 * Process Transmit Descriptor.
4723 *
4724 * E1000 supports three types of transmit descriptors:
4725 * - legacy data descriptors of older format (context-less).
4726 * - data the same as legacy but providing new offloading capabilities.
4727 * - context sets up the context for following data descriptors.
4728 *
4729 * @param pThis The device state structure.
4730 * @param pDesc Pointer to descriptor union.
4731 * @param addr Physical address of descriptor in guest memory.
4732 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4733 * @param cbPacketSize Size of the packet as previously computed.
4734 * @thread E1000_TX
4735 */
4736static int e1kXmitDesc(PE1KSTATE pThis, E1KTXDESC* pDesc, RTGCPHYS addr,
4737 bool fOnWorkerThread)
4738{
4739 int rc = VINF_SUCCESS;
4740 uint32_t cbVTag = 0;
4741
4742 e1kPrintTDesc(pThis, pDesc, "vvv");
4743
4744#ifdef E1K_USE_TX_TIMERS
4745 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTIDTimer));
4746#endif /* E1K_USE_TX_TIMERS */
4747
4748 switch (e1kGetDescType(pDesc))
4749 {
4750 case E1K_DTYP_CONTEXT:
4751 /* The caller have already updated the context */
4752 E1K_INC_ISTAT_CNT(pThis->uStatDescCtx);
4753 e1kDescReport(pThis, pDesc, addr);
4754 break;
4755
4756 case E1K_DTYP_DATA:
4757 {
4758 STAM_COUNTER_INC(pDesc->data.cmd.fTSE?
4759 &pThis->StatTxDescTSEData:
4760 &pThis->StatTxDescData);
4761 E1K_INC_ISTAT_CNT(pThis->uStatDescDat);
4762 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
4763 if (pDesc->data.cmd.u20DTALEN == 0 || pDesc->data.u64BufAddr == 0)
4764 {
4765 E1kLog2(("% Empty data descriptor, skipped.\n", pThis->szPrf));
4766 }
4767 else
4768 {
4769 /*
4770 * Add the descriptor data to the frame. If the frame is complete,
4771 * transmit it and reset the u16TxPktLen field.
4772 */
4773 if (e1kXmitIsGsoBuf(pThis->CTX_SUFF(pTxSg)))
4774 {
4775 STAM_COUNTER_INC(&pThis->StatTxPathGSO);
4776 bool fRc = e1kAddToFrame(pThis, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4777 if (pDesc->data.cmd.fEOP)
4778 {
4779 if ( fRc
4780 && pThis->CTX_SUFF(pTxSg)
4781 && pThis->CTX_SUFF(pTxSg)->cbUsed == (size_t)pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN)
4782 {
4783 e1kTransmitFrame(pThis, fOnWorkerThread);
4784 E1K_INC_CNT32(TSCTC);
4785 }
4786 else
4787 {
4788 if (fRc)
4789 E1kLog(("%s bad GSO/TSE %p or %u < %u\n" , pThis->szPrf,
4790 pThis->CTX_SUFF(pTxSg), pThis->CTX_SUFF(pTxSg) ? pThis->CTX_SUFF(pTxSg)->cbUsed : 0,
4791 pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN));
4792 e1kXmitFreeBuf(pThis);
4793 E1K_INC_CNT32(TSCTFC);
4794 }
4795 pThis->u16TxPktLen = 0;
4796 }
4797 }
4798 else if (!pDesc->data.cmd.fTSE)
4799 {
4800 STAM_COUNTER_INC(&pThis->StatTxPathRegular);
4801 bool fRc = e1kAddToFrame(pThis, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4802 if (pDesc->data.cmd.fEOP)
4803 {
4804 if (fRc && pThis->CTX_SUFF(pTxSg))
4805 {
4806 Assert(pThis->CTX_SUFF(pTxSg)->cSegs == 1);
4807 if (pThis->fIPcsum)
4808 e1kInsertChecksum(pThis, (uint8_t *)pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
4809 pThis->contextNormal.ip.u8CSO,
4810 pThis->contextNormal.ip.u8CSS,
4811 pThis->contextNormal.ip.u16CSE);
4812 if (pThis->fTCPcsum)
4813 e1kInsertChecksum(pThis, (uint8_t *)pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
4814 pThis->contextNormal.tu.u8CSO,
4815 pThis->contextNormal.tu.u8CSS,
4816 pThis->contextNormal.tu.u16CSE);
4817 e1kTransmitFrame(pThis, fOnWorkerThread);
4818 }
4819 else
4820 e1kXmitFreeBuf(pThis);
4821 pThis->u16TxPktLen = 0;
4822 }
4823 }
4824 else
4825 {
4826 STAM_COUNTER_INC(&pThis->StatTxPathFallback);
4827 rc = e1kFallbackAddToFrame(pThis, pDesc, fOnWorkerThread);
4828 }
4829 }
4830 e1kDescReport(pThis, pDesc, addr);
4831 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4832 break;
4833 }
4834
4835 case E1K_DTYP_LEGACY:
4836 STAM_COUNTER_INC(&pThis->StatTxDescLegacy);
4837 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
4838 if (pDesc->legacy.cmd.u16Length == 0 || pDesc->legacy.u64BufAddr == 0)
4839 {
4840 E1kLog(("%s Empty legacy descriptor, skipped.\n", pThis->szPrf));
4841 }
4842 else
4843 {
4844 /* Add fragment to frame. */
4845 if (e1kAddToFrame(pThis, pDesc->data.u64BufAddr, pDesc->legacy.cmd.u16Length))
4846 {
4847 E1K_INC_ISTAT_CNT(pThis->uStatDescLeg);
4848
4849 /* Last fragment: Transmit and reset the packet storage counter. */
4850 if (pDesc->legacy.cmd.fEOP)
4851 {
4852 if (pDesc->legacy.cmd.fIC)
4853 {
4854 e1kInsertChecksum(pThis,
4855 (uint8_t *)pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg,
4856 pThis->u16TxPktLen,
4857 pDesc->legacy.cmd.u8CSO,
4858 pDesc->legacy.dw3.u8CSS,
4859 0);
4860 }
4861 e1kTransmitFrame(pThis, fOnWorkerThread);
4862 pThis->u16TxPktLen = 0;
4863 }
4864 }
4865 /* Last fragment + failure: free the buffer and reset the storage counter. */
4866 else if (pDesc->legacy.cmd.fEOP)
4867 {
4868 e1kXmitFreeBuf(pThis);
4869 pThis->u16TxPktLen = 0;
4870 }
4871 }
4872 e1kDescReport(pThis, pDesc, addr);
4873 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4874 break;
4875
4876 default:
4877 E1kLog(("%s ERROR Unsupported transmit descriptor type: 0x%04x\n",
4878 pThis->szPrf, e1kGetDescType(pDesc)));
4879 break;
4880 }
4881
4882 return rc;
4883}
4884
4885DECLINLINE(void) e1kUpdateTxContext(PE1KSTATE pThis, E1KTXDESC* pDesc)
4886{
4887 if (pDesc->context.dw2.fTSE)
4888 {
4889 pThis->contextTSE = pDesc->context;
4890 pThis->u32PayRemain = pDesc->context.dw2.u20PAYLEN;
4891 pThis->u16HdrRemain = pDesc->context.dw3.u8HDRLEN;
4892 e1kSetupGsoCtx(&pThis->GsoCtx, &pDesc->context);
4893 STAM_COUNTER_INC(&pThis->StatTxDescCtxTSE);
4894 }
4895 else
4896 {
4897 pThis->contextNormal = pDesc->context;
4898 STAM_COUNTER_INC(&pThis->StatTxDescCtxNormal);
4899 }
4900 E1kLog2(("%s %s context updated: IP CSS=%02X, IP CSO=%02X, IP CSE=%04X"
4901 ", TU CSS=%02X, TU CSO=%02X, TU CSE=%04X\n", pThis->szPrf,
4902 pDesc->context.dw2.fTSE ? "TSE" : "Normal",
4903 pDesc->context.ip.u8CSS,
4904 pDesc->context.ip.u8CSO,
4905 pDesc->context.ip.u16CSE,
4906 pDesc->context.tu.u8CSS,
4907 pDesc->context.tu.u8CSO,
4908 pDesc->context.tu.u16CSE));
4909}
4910
4911static bool e1kLocateTxPacket(PE1KSTATE pThis)
4912{
4913 LogFlow(("%s e1kLocateTxPacket: ENTER cbTxAlloc=%d\n",
4914 pThis->szPrf, pThis->cbTxAlloc));
4915 /* Check if we have located the packet already. */
4916 if (pThis->cbTxAlloc)
4917 {
4918 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d\n",
4919 pThis->szPrf, pThis->cbTxAlloc));
4920 return true;
4921 }
4922
4923 bool fTSE = false;
4924 uint32_t cbPacket = 0;
4925
4926 for (int i = pThis->iTxDCurrent; i < pThis->nTxDFetched; ++i)
4927 {
4928 E1KTXDESC *pDesc = &pThis->aTxDescriptors[i];
4929 switch (e1kGetDescType(pDesc))
4930 {
4931 case E1K_DTYP_CONTEXT:
4932 e1kUpdateTxContext(pThis, pDesc);
4933 continue;
4934 case E1K_DTYP_LEGACY:
4935 /* Skip empty descriptors. */
4936 if (!pDesc->legacy.u64BufAddr || !pDesc->legacy.cmd.u16Length)
4937 break;
4938 cbPacket += pDesc->legacy.cmd.u16Length;
4939 pThis->fGSO = false;
4940 break;
4941 case E1K_DTYP_DATA:
4942 /* Skip empty descriptors. */
4943 if (!pDesc->data.u64BufAddr || !pDesc->data.cmd.u20DTALEN)
4944 break;
4945 if (cbPacket == 0)
4946 {
4947 /*
4948 * The first fragment: save IXSM and TXSM options
4949 * as these are only valid in the first fragment.
4950 */
4951 pThis->fIPcsum = pDesc->data.dw3.fIXSM;
4952 pThis->fTCPcsum = pDesc->data.dw3.fTXSM;
4953 fTSE = pDesc->data.cmd.fTSE;
4954 /*
4955 * TSE descriptors have VLE bit properly set in
4956 * the first fragment.
4957 */
4958 if (fTSE)
4959 {
4960 pThis->fVTag = pDesc->data.cmd.fVLE;
4961 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
4962 }
4963 pThis->fGSO = e1kCanDoGso(pThis, &pThis->GsoCtx, &pDesc->data, &pThis->contextTSE);
4964 }
4965 cbPacket += pDesc->data.cmd.u20DTALEN;
4966 break;
4967 default:
4968 AssertMsgFailed(("Impossible descriptor type!"));
4969 }
4970 if (pDesc->legacy.cmd.fEOP)
4971 {
4972 /*
4973 * Non-TSE descriptors have VLE bit properly set in
4974 * the last fragment.
4975 */
4976 if (!fTSE)
4977 {
4978 pThis->fVTag = pDesc->data.cmd.fVLE;
4979 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
4980 }
4981 /*
4982 * Compute the required buffer size. If we cannot do GSO but still
4983 * have to do segmentation we allocate the first segment only.
4984 */
4985 pThis->cbTxAlloc = (!fTSE || pThis->fGSO) ?
4986 cbPacket :
4987 RT_MIN(cbPacket, pThis->contextTSE.dw3.u16MSS + pThis->contextTSE.dw3.u8HDRLEN);
4988 if (pThis->fVTag)
4989 pThis->cbTxAlloc += 4;
4990 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d\n",
4991 pThis->szPrf, pThis->cbTxAlloc));
4992 return true;
4993 }
4994 }
4995
4996 if (cbPacket == 0 && pThis->nTxDFetched - pThis->iTxDCurrent > 0)
4997 {
4998 /* All descriptors were empty, we need to process them as a dummy packet */
4999 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d, zero packet!\n",
5000 pThis->szPrf, pThis->cbTxAlloc));
5001 return true;
5002 }
5003 LogFlow(("%s e1kLocateTxPacket: RET false cbTxAlloc=%d\n",
5004 pThis->szPrf, pThis->cbTxAlloc));
5005 return false;
5006}
5007
5008static int e1kXmitPacket(PE1KSTATE pThis, bool fOnWorkerThread)
5009{
5010 int rc = VINF_SUCCESS;
5011
5012 LogFlow(("%s e1kXmitPacket: ENTER current=%d fetched=%d\n",
5013 pThis->szPrf, pThis->iTxDCurrent, pThis->nTxDFetched));
5014
5015 while (pThis->iTxDCurrent < pThis->nTxDFetched)
5016 {
5017 E1KTXDESC *pDesc = &pThis->aTxDescriptors[pThis->iTxDCurrent];
5018 E1kLog3(("%s About to process new TX descriptor at %08x%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
5019 pThis->szPrf, TDBAH, TDBAL + TDH * sizeof(E1KTXDESC), TDLEN, TDH, TDT));
5020 rc = e1kXmitDesc(pThis, pDesc, e1kDescAddr(TDBAH, TDBAL, TDH), fOnWorkerThread);
5021 if (RT_FAILURE(rc))
5022 break;
5023 if (++TDH * sizeof(E1KTXDESC) >= TDLEN)
5024 TDH = 0;
5025 uint32_t uLowThreshold = GET_BITS(TXDCTL, LWTHRESH)*8;
5026 if (uLowThreshold != 0 && e1kGetTxLen(pThis) <= uLowThreshold)
5027 {
5028 E1kLog2(("%s Low on transmit descriptors, raise ICR.TXD_LOW, len=%x thresh=%x\n",
5029 pThis->szPrf, e1kGetTxLen(pThis), GET_BITS(TXDCTL, LWTHRESH)*8));
5030 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_TXD_LOW);
5031 }
5032 ++pThis->iTxDCurrent;
5033 if (e1kGetDescType(pDesc) != E1K_DTYP_CONTEXT && pDesc->legacy.cmd.fEOP)
5034 break;
5035 }
5036
5037 LogFlow(("%s e1kXmitPacket: RET %Rrc current=%d fetched=%d\n",
5038 pThis->szPrf, rc, pThis->iTxDCurrent, pThis->nTxDFetched));
5039 return rc;
5040}
5041
5042#endif /* E1K_WITH_TXD_CACHE */
5043#ifndef E1K_WITH_TXD_CACHE
5044
5045/**
5046 * Transmit pending descriptors.
5047 *
5048 * @returns VBox status code. VERR_TRY_AGAIN is returned if we're busy.
5049 *
5050 * @param pThis The E1000 state.
5051 * @param fOnWorkerThread Whether we're on a worker thread or on an EMT.
5052 */
5053static int e1kXmitPending(PE1KSTATE pThis, bool fOnWorkerThread)
5054{
5055 int rc = VINF_SUCCESS;
5056
5057 /* Check if transmitter is enabled. */
5058 if (!(TCTL & TCTL_EN))
5059 return VINF_SUCCESS;
5060 /*
5061 * Grab the xmit lock of the driver as well as the E1K device state.
5062 */
5063 rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
5064 if (RT_LIKELY(rc == VINF_SUCCESS))
5065 {
5066 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
5067 if (pDrv)
5068 {
5069 rc = pDrv->pfnBeginXmit(pDrv, fOnWorkerThread);
5070 if (RT_FAILURE(rc))
5071 {
5072 e1kCsTxLeave(pThis);
5073 return rc;
5074 }
5075 }
5076 /*
5077 * Process all pending descriptors.
5078 * Note! Do not process descriptors in locked state
5079 */
5080 while (TDH != TDT && !pThis->fLocked)
5081 {
5082 E1KTXDESC desc;
5083 E1kLog3(("%s About to process new TX descriptor at %08x%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
5084 pThis->szPrf, TDBAH, TDBAL + TDH * sizeof(desc), TDLEN, TDH, TDT));
5085
5086 e1kLoadDesc(pThis, &desc, ((uint64_t)TDBAH << 32) + TDBAL + TDH * sizeof(desc));
5087 rc = e1kXmitDesc(pThis, &desc, e1kDescAddr(TDBAH, TDBAL, TDH), fOnWorkerThread);
5088 /* If we failed to transmit descriptor we will try it again later */
5089 if (RT_FAILURE(rc))
5090 break;
5091 if (++TDH * sizeof(desc) >= TDLEN)
5092 TDH = 0;
5093
5094 if (e1kGetTxLen(pThis) <= GET_BITS(TXDCTL, LWTHRESH)*8)
5095 {
5096 E1kLog2(("%s Low on transmit descriptors, raise ICR.TXD_LOW, len=%x thresh=%x\n",
5097 pThis->szPrf, e1kGetTxLen(pThis), GET_BITS(TXDCTL, LWTHRESH)*8));
5098 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_TXD_LOW);
5099 }
5100
5101 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5102 }
5103
5104 /// @todo: uncomment: pThis->uStatIntTXQE++;
5105 /// @todo: uncomment: e1kRaiseInterrupt(pThis, ICR_TXQE);
5106 /*
5107 * Release the lock.
5108 */
5109 if (pDrv)
5110 pDrv->pfnEndXmit(pDrv);
5111 e1kCsTxLeave(pThis);
5112 }
5113
5114 return rc;
5115}
5116
5117#else /* E1K_WITH_TXD_CACHE */
5118
5119static void e1kDumpTxDCache(PE1KSTATE pThis)
5120{
5121 unsigned i, cDescs = TDLEN / sizeof(E1KTXDESC);
5122 uint32_t tdh = TDH;
5123 LogRel(("-- Transmit Descriptors (%d total) --\n", cDescs));
5124 for (i = 0; i < cDescs; ++i)
5125 {
5126 E1KTXDESC desc;
5127 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), e1kDescAddr(TDBAH, TDBAL, i),
5128 &desc, sizeof(desc));
5129 if (i == tdh)
5130 LogRel((">>> "));
5131 LogRel(("%RGp: %R[e1ktxd]\n", e1kDescAddr(TDBAH, TDBAL, i), &desc));
5132 }
5133 LogRel(("-- Transmit Descriptors in Cache (at %d (TDH %d)/ fetched %d / max %d) --\n",
5134 pThis->iTxDCurrent, TDH, pThis->nTxDFetched, E1K_TXD_CACHE_SIZE));
5135 if (tdh > pThis->iTxDCurrent)
5136 tdh -= pThis->iTxDCurrent;
5137 else
5138 tdh = cDescs + tdh - pThis->iTxDCurrent;
5139 for (i = 0; i < pThis->nTxDFetched; ++i)
5140 {
5141 if (i == pThis->iTxDCurrent)
5142 LogRel((">>> "));
5143 LogRel(("%RGp: %R[e1ktxd]\n", e1kDescAddr(TDBAH, TDBAL, tdh++ % cDescs), &pThis->aTxDescriptors[i]));
5144 }
5145}
5146
5147/**
5148 * Transmit pending descriptors.
5149 *
5150 * @returns VBox status code. VERR_TRY_AGAIN is returned if we're busy.
5151 *
5152 * @param pThis The E1000 state.
5153 * @param fOnWorkerThread Whether we're on a worker thread or on an EMT.
5154 */
5155static int e1kXmitPending(PE1KSTATE pThis, bool fOnWorkerThread)
5156{
5157 int rc = VINF_SUCCESS;
5158
5159 /* Check if transmitter is enabled. */
5160 if (!(TCTL & TCTL_EN))
5161 return VINF_SUCCESS;
5162 /*
5163 * Grab the xmit lock of the driver as well as the E1K device state.
5164 */
5165 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
5166 if (pDrv)
5167 {
5168 rc = pDrv->pfnBeginXmit(pDrv, fOnWorkerThread);
5169 if (RT_FAILURE(rc))
5170 return rc;
5171 }
5172
5173 /*
5174 * Process all pending descriptors.
5175 * Note! Do not process descriptors in locked state
5176 */
5177 rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
5178 if (RT_LIKELY(rc == VINF_SUCCESS))
5179 {
5180 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
5181 /*
5182 * fIncomplete is set whenever we try to fetch additional descriptors
5183 * for an incomplete packet. If fail to locate a complete packet on
5184 * the next iteration we need to reset the cache or we risk to get
5185 * stuck in this loop forever.
5186 */
5187 bool fIncomplete = false;
5188 while (!pThis->fLocked && e1kTxDLazyLoad(pThis))
5189 {
5190 while (e1kLocateTxPacket(pThis))
5191 {
5192 fIncomplete = false;
5193 /* Found a complete packet, allocate it. */
5194 rc = e1kXmitAllocBuf(pThis, pThis->fGSO);
5195 /* If we're out of bandwidth we'll come back later. */
5196 if (RT_FAILURE(rc))
5197 goto out;
5198 /* Copy the packet to allocated buffer and send it. */
5199 rc = e1kXmitPacket(pThis, fOnWorkerThread);
5200 /* If we're out of bandwidth we'll come back later. */
5201 if (RT_FAILURE(rc))
5202 goto out;
5203 }
5204 uint8_t u8Remain = pThis->nTxDFetched - pThis->iTxDCurrent;
5205 if (RT_UNLIKELY(fIncomplete))
5206 {
5207 static bool fTxDCacheDumped = false;
5208 /*
5209 * The descriptor cache is full, but we were unable to find
5210 * a complete packet in it. Drop the cache and hope that
5211 * the guest driver can recover from network card error.
5212 */
5213 LogRel(("%s No complete packets in%s TxD cache! "
5214 "Fetched=%d, current=%d, TX len=%d.\n",
5215 pThis->szPrf,
5216 u8Remain == E1K_TXD_CACHE_SIZE ? " full" : "",
5217 pThis->nTxDFetched, pThis->iTxDCurrent,
5218 e1kGetTxLen(pThis)));
5219 if (!fTxDCacheDumped)
5220 {
5221 fTxDCacheDumped = true;
5222 e1kDumpTxDCache(pThis);
5223 }
5224 pThis->iTxDCurrent = pThis->nTxDFetched = 0;
5225 /*
5226 * Returning an error at this point means Guru in R0
5227 * (see @bugref{6428}).
5228 */
5229# ifdef IN_RING3
5230 rc = VERR_NET_INCOMPLETE_TX_PACKET;
5231# else /* !IN_RING3 */
5232 rc = VINF_IOM_R3_IOPORT_WRITE;
5233# endif /* !IN_RING3 */
5234 goto out;
5235 }
5236 if (u8Remain > 0)
5237 {
5238 Log4(("%s Incomplete packet at %d. Already fetched %d, "
5239 "%d more are available\n",
5240 pThis->szPrf, pThis->iTxDCurrent, u8Remain,
5241 e1kGetTxLen(pThis) - u8Remain));
5242
5243 /*
5244 * A packet was partially fetched. Move incomplete packet to
5245 * the beginning of cache buffer, then load more descriptors.
5246 */
5247 memmove(pThis->aTxDescriptors,
5248 &pThis->aTxDescriptors[pThis->iTxDCurrent],
5249 u8Remain * sizeof(E1KTXDESC));
5250 pThis->iTxDCurrent = 0;
5251 pThis->nTxDFetched = u8Remain;
5252 e1kTxDLoadMore(pThis);
5253 fIncomplete = true;
5254 }
5255 else
5256 pThis->nTxDFetched = 0;
5257 pThis->iTxDCurrent = 0;
5258 }
5259 if (!pThis->fLocked && GET_BITS(TXDCTL, LWTHRESH) == 0)
5260 {
5261 E1kLog2(("%s Out of transmit descriptors, raise ICR.TXD_LOW\n",
5262 pThis->szPrf));
5263 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_TXD_LOW);
5264 }
5265out:
5266 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5267
5268 /// @todo: uncomment: pThis->uStatIntTXQE++;
5269 /// @todo: uncomment: e1kRaiseInterrupt(pThis, ICR_TXQE);
5270
5271 e1kCsTxLeave(pThis);
5272 }
5273
5274
5275 /*
5276 * Release the lock.
5277 */
5278 if (pDrv)
5279 pDrv->pfnEndXmit(pDrv);
5280 return rc;
5281}
5282
5283#endif /* E1K_WITH_TXD_CACHE */
5284#ifdef IN_RING3
5285
5286/**
5287 * @interface_method_impl{PDMINETWORKDOWN,pfnXmitPending}
5288 */
5289static DECLCALLBACK(void) e1kR3NetworkDown_XmitPending(PPDMINETWORKDOWN pInterface)
5290{
5291 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkDown);
5292 /* Resume suspended transmission */
5293 STATUS &= ~STATUS_TXOFF;
5294 e1kXmitPending(pThis, true /*fOnWorkerThread*/);
5295}
5296
5297/**
5298 * Callback for consuming from transmit queue. It gets called in R3 whenever
5299 * we enqueue something in R0/GC.
5300 *
5301 * @returns true
5302 * @param pDevIns Pointer to device instance structure.
5303 * @param pItem Pointer to the element being dequeued (not used).
5304 * @thread ???
5305 */
5306static DECLCALLBACK(bool) e1kTxQueueConsumer(PPDMDEVINS pDevIns, PPDMQUEUEITEMCORE pItem)
5307{
5308 NOREF(pItem);
5309 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
5310 E1kLog2(("%s e1kTxQueueConsumer:\n", pThis->szPrf));
5311
5312 int rc = e1kXmitPending(pThis, false /*fOnWorkerThread*/);
5313 AssertMsg(RT_SUCCESS(rc) || rc == VERR_TRY_AGAIN, ("%Rrc\n", rc));
5314
5315 return true;
5316}
5317
5318/**
5319 * Handler for the wakeup signaller queue.
5320 */
5321static DECLCALLBACK(bool) e1kCanRxQueueConsumer(PPDMDEVINS pDevIns, PPDMQUEUEITEMCORE pItem)
5322{
5323 e1kWakeupReceive(pDevIns);
5324 return true;
5325}
5326
5327#endif /* IN_RING3 */
5328
5329/**
5330 * Write handler for Transmit Descriptor Tail register.
5331 *
5332 * @param pThis The device state structure.
5333 * @param offset Register offset in memory-mapped frame.
5334 * @param index Register index in register array.
5335 * @param value The value to store.
5336 * @param mask Used to implement partial writes (8 and 16-bit).
5337 * @thread EMT
5338 */
5339static int e1kRegWriteTDT(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5340{
5341 int rc = e1kRegWriteDefault(pThis, offset, index, value);
5342
5343 /* All descriptors starting with head and not including tail belong to us. */
5344 /* Process them. */
5345 E1kLog2(("%s e1kRegWriteTDT: TDBAL=%08x, TDBAH=%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
5346 pThis->szPrf, TDBAL, TDBAH, TDLEN, TDH, TDT));
5347
5348 /* Ignore TDT writes when the link is down. */
5349 if (TDH != TDT && (STATUS & STATUS_LU))
5350 {
5351 Log5(("E1000: TDT write: TDH=%08x, TDT=%08x, %d descriptors to process\n", TDH, TDT, e1kGetTxLen(pThis)));
5352 E1kLog(("%s e1kRegWriteTDT: %d descriptors to process\n",
5353 pThis->szPrf, e1kGetTxLen(pThis)));
5354
5355 /* Transmit pending packets if possible, defer it if we cannot do it
5356 in the current context. */
5357#ifdef E1K_TX_DELAY
5358 rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
5359 if (RT_LIKELY(rc == VINF_SUCCESS))
5360 {
5361 if (!TMTimerIsActive(pThis->CTX_SUFF(pTXDTimer)))
5362 {
5363#ifdef E1K_INT_STATS
5364 pThis->u64ArmedAt = RTTimeNanoTS();
5365#endif
5366 e1kArmTimer(pThis, pThis->CTX_SUFF(pTXDTimer), E1K_TX_DELAY);
5367 }
5368 E1K_INC_ISTAT_CNT(pThis->uStatTxDelayed);
5369 e1kCsTxLeave(pThis);
5370 return rc;
5371 }
5372 /* We failed to enter the TX critical section -- transmit as usual. */
5373#endif /* E1K_TX_DELAY */
5374#ifndef IN_RING3
5375 if (!pThis->CTX_SUFF(pDrv))
5376 {
5377 PPDMQUEUEITEMCORE pItem = PDMQueueAlloc(pThis->CTX_SUFF(pTxQueue));
5378 if (RT_UNLIKELY(pItem))
5379 PDMQueueInsert(pThis->CTX_SUFF(pTxQueue), pItem);
5380 }
5381 else
5382#endif
5383 {
5384 rc = e1kXmitPending(pThis, false /*fOnWorkerThread*/);
5385 if (rc == VERR_TRY_AGAIN)
5386 rc = VINF_SUCCESS;
5387 else if (rc == VERR_SEM_BUSY)
5388 rc = VINF_IOM_R3_IOPORT_WRITE;
5389 AssertRC(rc);
5390 }
5391 }
5392
5393 return rc;
5394}
5395
5396/**
5397 * Write handler for Multicast Table Array registers.
5398 *
5399 * @param pThis The device state structure.
5400 * @param offset Register offset in memory-mapped frame.
5401 * @param index Register index in register array.
5402 * @param value The value to store.
5403 * @thread EMT
5404 */
5405static int e1kRegWriteMTA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5406{
5407 AssertReturn(offset - g_aE1kRegMap[index].offset < sizeof(pThis->auMTA), VERR_DEV_IO_ERROR);
5408 pThis->auMTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auMTA[0])] = value;
5409
5410 return VINF_SUCCESS;
5411}
5412
5413/**
5414 * Read handler for Multicast Table Array registers.
5415 *
5416 * @returns VBox status code.
5417 *
5418 * @param pThis The device state structure.
5419 * @param offset Register offset in memory-mapped frame.
5420 * @param index Register index in register array.
5421 * @thread EMT
5422 */
5423static int e1kRegReadMTA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5424{
5425 AssertReturn(offset - g_aE1kRegMap[index].offset< sizeof(pThis->auMTA), VERR_DEV_IO_ERROR);
5426 *pu32Value = pThis->auMTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auMTA[0])];
5427
5428 return VINF_SUCCESS;
5429}
5430
5431/**
5432 * Write handler for Receive Address registers.
5433 *
5434 * @param pThis The device state structure.
5435 * @param offset Register offset in memory-mapped frame.
5436 * @param index Register index in register array.
5437 * @param value The value to store.
5438 * @thread EMT
5439 */
5440static int e1kRegWriteRA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5441{
5442 AssertReturn(offset - g_aE1kRegMap[index].offset < sizeof(pThis->aRecAddr.au32), VERR_DEV_IO_ERROR);
5443 pThis->aRecAddr.au32[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->aRecAddr.au32[0])] = value;
5444
5445 return VINF_SUCCESS;
5446}
5447
5448/**
5449 * Read handler for Receive Address registers.
5450 *
5451 * @returns VBox status code.
5452 *
5453 * @param pThis The device state structure.
5454 * @param offset Register offset in memory-mapped frame.
5455 * @param index Register index in register array.
5456 * @thread EMT
5457 */
5458static int e1kRegReadRA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5459{
5460 AssertReturn(offset - g_aE1kRegMap[index].offset< sizeof(pThis->aRecAddr.au32), VERR_DEV_IO_ERROR);
5461 *pu32Value = pThis->aRecAddr.au32[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->aRecAddr.au32[0])];
5462
5463 return VINF_SUCCESS;
5464}
5465
5466/**
5467 * Write handler for VLAN Filter Table Array registers.
5468 *
5469 * @param pThis The device state structure.
5470 * @param offset Register offset in memory-mapped frame.
5471 * @param index Register index in register array.
5472 * @param value The value to store.
5473 * @thread EMT
5474 */
5475static int e1kRegWriteVFTA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5476{
5477 AssertReturn(offset - g_aE1kRegMap[index].offset < sizeof(pThis->auVFTA), VINF_SUCCESS);
5478 pThis->auVFTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auVFTA[0])] = value;
5479
5480 return VINF_SUCCESS;
5481}
5482
5483/**
5484 * Read handler for VLAN Filter Table Array registers.
5485 *
5486 * @returns VBox status code.
5487 *
5488 * @param pThis The device state structure.
5489 * @param offset Register offset in memory-mapped frame.
5490 * @param index Register index in register array.
5491 * @thread EMT
5492 */
5493static int e1kRegReadVFTA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5494{
5495 AssertReturn(offset - g_aE1kRegMap[index].offset< sizeof(pThis->auVFTA), VERR_DEV_IO_ERROR);
5496 *pu32Value = pThis->auVFTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auVFTA[0])];
5497
5498 return VINF_SUCCESS;
5499}
5500
5501/**
5502 * Read handler for unimplemented registers.
5503 *
5504 * Merely reports reads from unimplemented registers.
5505 *
5506 * @returns VBox status code.
5507 *
5508 * @param pThis The device state structure.
5509 * @param offset Register offset in memory-mapped frame.
5510 * @param index Register index in register array.
5511 * @thread EMT
5512 */
5513static int e1kRegReadUnimplemented(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5514{
5515 E1kLog(("%s At %08X read (00000000) attempt from unimplemented register %s (%s)\n",
5516 pThis->szPrf, offset, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5517 *pu32Value = 0;
5518
5519 return VINF_SUCCESS;
5520}
5521
5522/**
5523 * Default register read handler with automatic clear operation.
5524 *
5525 * Retrieves the value of register from register array in device state structure.
5526 * Then resets all bits.
5527 *
5528 * @remarks The 'mask' parameter is simply ignored as masking and shifting is
5529 * done in the caller.
5530 *
5531 * @returns VBox status code.
5532 *
5533 * @param pThis The device state structure.
5534 * @param offset Register offset in memory-mapped frame.
5535 * @param index Register index in register array.
5536 * @thread EMT
5537 */
5538static int e1kRegReadAutoClear(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5539{
5540 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
5541 int rc = e1kRegReadDefault(pThis, offset, index, pu32Value);
5542 pThis->auRegs[index] = 0;
5543
5544 return rc;
5545}
5546
5547/**
5548 * Default register read handler.
5549 *
5550 * Retrieves the value of register from register array in device state structure.
5551 * Bits corresponding to 0s in 'readable' mask will always read as 0s.
5552 *
5553 * @remarks The 'mask' parameter is simply ignored as masking and shifting is
5554 * done in the caller.
5555 *
5556 * @returns VBox status code.
5557 *
5558 * @param pThis The device state structure.
5559 * @param offset Register offset in memory-mapped frame.
5560 * @param index Register index in register array.
5561 * @thread EMT
5562 */
5563static int e1kRegReadDefault(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5564{
5565 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
5566 *pu32Value = pThis->auRegs[index] & g_aE1kRegMap[index].readable;
5567
5568 return VINF_SUCCESS;
5569}
5570
5571/**
5572 * Write handler for unimplemented registers.
5573 *
5574 * Merely reports writes to unimplemented registers.
5575 *
5576 * @param pThis The device state structure.
5577 * @param offset Register offset in memory-mapped frame.
5578 * @param index Register index in register array.
5579 * @param value The value to store.
5580 * @thread EMT
5581 */
5582
5583 static int e1kRegWriteUnimplemented(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5584{
5585 E1kLog(("%s At %08X write attempt (%08X) to unimplemented register %s (%s)\n",
5586 pThis->szPrf, offset, value, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5587
5588 return VINF_SUCCESS;
5589}
5590
5591/**
5592 * Default register write handler.
5593 *
5594 * Stores the value to the register array in device state structure. Only bits
5595 * corresponding to 1s both in 'writable' and 'mask' will be stored.
5596 *
5597 * @returns VBox status code.
5598 *
5599 * @param pThis The device state structure.
5600 * @param offset Register offset in memory-mapped frame.
5601 * @param index Register index in register array.
5602 * @param value The value to store.
5603 * @param mask Used to implement partial writes (8 and 16-bit).
5604 * @thread EMT
5605 */
5606
5607static int e1kRegWriteDefault(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5608{
5609 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
5610 pThis->auRegs[index] = (value & g_aE1kRegMap[index].writable)
5611 | (pThis->auRegs[index] & ~g_aE1kRegMap[index].writable);
5612
5613 return VINF_SUCCESS;
5614}
5615
5616/**
5617 * Search register table for matching register.
5618 *
5619 * @returns Index in the register table or -1 if not found.
5620 *
5621 * @param pThis The device state structure.
5622 * @param offReg Register offset in memory-mapped region.
5623 * @thread EMT
5624 */
5625static int e1kRegLookup(PE1KSTATE pThis, uint32_t offReg)
5626{
5627#if 0
5628 int index;
5629
5630 for (index = 0; index < E1K_NUM_OF_REGS; index++)
5631 {
5632 if (g_aE1kRegMap[index].offset <= offReg && offReg < g_aE1kRegMap[index].offset + g_aE1kRegMap[index].size)
5633 {
5634 return index;
5635 }
5636 }
5637#else
5638 int iStart = 0;
5639 int iEnd = E1K_NUM_OF_BINARY_SEARCHABLE;
5640 for (;;)
5641 {
5642 int i = (iEnd - iStart) / 2 + iStart;
5643 uint32_t offCur = g_aE1kRegMap[i].offset;
5644 if (offReg < offCur)
5645 {
5646 if (i == iStart)
5647 break;
5648 iEnd = i;
5649 }
5650 else if (offReg >= offCur + g_aE1kRegMap[i].size)
5651 {
5652 i++;
5653 if (i == iEnd)
5654 break;
5655 iStart = i;
5656 }
5657 else
5658 return i;
5659 Assert(iEnd > iStart);
5660 }
5661
5662 for (unsigned i = E1K_NUM_OF_BINARY_SEARCHABLE; i < RT_ELEMENTS(g_aE1kRegMap); i++)
5663 if (offReg - g_aE1kRegMap[i].offset < g_aE1kRegMap[i].size)
5664 return i;
5665
5666# ifdef VBOX_STRICT
5667 for (unsigned i = 0; i < RT_ELEMENTS(g_aE1kRegMap); i++)
5668 Assert(offReg - g_aE1kRegMap[i].offset >= g_aE1kRegMap[i].size);
5669# endif
5670
5671#endif
5672
5673 return -1;
5674}
5675
5676/**
5677 * Handle unaligned register read operation.
5678 *
5679 * Looks up and calls appropriate handler.
5680 *
5681 * @returns VBox status code.
5682 *
5683 * @param pThis The device state structure.
5684 * @param offReg Register offset in memory-mapped frame.
5685 * @param pv Where to store the result.
5686 * @param cb Number of bytes to read.
5687 * @thread EMT
5688 * @remarks IOM takes care of unaligned and small reads via MMIO. For I/O port
5689 * accesses we have to take care of that ourselves.
5690 */
5691static int e1kRegReadUnaligned(PE1KSTATE pThis, uint32_t offReg, void *pv, uint32_t cb)
5692{
5693 uint32_t u32 = 0;
5694 uint32_t shift;
5695 int rc = VINF_SUCCESS;
5696 int index = e1kRegLookup(pThis, offReg);
5697#ifdef DEBUG
5698 char buf[9];
5699#endif
5700
5701 /*
5702 * From the spec:
5703 * For registers that should be accessed as 32-bit double words, partial writes (less than a 32-bit
5704 * double word) is ignored. Partial reads return all 32 bits of data regardless of the byte enables.
5705 */
5706
5707 /*
5708 * To be able to read bytes and short word we convert them to properly
5709 * shifted 32-bit words and masks. The idea is to keep register-specific
5710 * handlers simple. Most accesses will be 32-bit anyway.
5711 */
5712 uint32_t mask;
5713 switch (cb)
5714 {
5715 case 4: mask = 0xFFFFFFFF; break;
5716 case 2: mask = 0x0000FFFF; break;
5717 case 1: mask = 0x000000FF; break;
5718 default:
5719 return PDMDevHlpDBGFStop(pThis->CTX_SUFF(pDevIns), RT_SRC_POS,
5720 "unsupported op size: offset=%#10x cb=%#10x\n", offReg, cb);
5721 }
5722 if (index != -1)
5723 {
5724 if (g_aE1kRegMap[index].readable)
5725 {
5726 /* Make the mask correspond to the bits we are about to read. */
5727 shift = (offReg - g_aE1kRegMap[index].offset) % sizeof(uint32_t) * 8;
5728 mask <<= shift;
5729 if (!mask)
5730 return PDMDevHlpDBGFStop(pThis->CTX_SUFF(pDevIns), RT_SRC_POS, "Zero mask: offset=%#10x cb=%#10x\n", offReg, cb);
5731 /*
5732 * Read it. Pass the mask so the handler knows what has to be read.
5733 * Mask out irrelevant bits.
5734 */
5735 //rc = e1kCsEnter(pThis, VERR_SEM_BUSY, RT_SRC_POS);
5736 if (RT_UNLIKELY(rc != VINF_SUCCESS))
5737 return rc;
5738 //pThis->fDelayInts = false;
5739 //pThis->iStatIntLost += pThis->iStatIntLostOne;
5740 //pThis->iStatIntLostOne = 0;
5741 rc = g_aE1kRegMap[index].pfnRead(pThis, offReg & 0xFFFFFFFC, index, &u32);
5742 u32 &= mask;
5743 //e1kCsLeave(pThis);
5744 E1kLog2(("%s At %08X read %s from %s (%s)\n",
5745 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf), g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5746 Log6(("%s At %08X read %s from %s (%s) [UNALIGNED]\n",
5747 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf), g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5748 /* Shift back the result. */
5749 u32 >>= shift;
5750 }
5751 else
5752 E1kLog(("%s At %08X read (%s) attempt from write-only register %s (%s)\n",
5753 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf), g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5754 if (IOM_SUCCESS(rc))
5755 STAM_COUNTER_INC(&pThis->aStatRegReads[index]);
5756 }
5757 else
5758 E1kLog(("%s At %08X read (%s) attempt from non-existing register\n",
5759 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf)));
5760
5761 memcpy(pv, &u32, cb);
5762 return rc;
5763}
5764
5765/**
5766 * Handle 4 byte aligned and sized read operation.
5767 *
5768 * Looks up and calls appropriate handler.
5769 *
5770 * @returns VBox status code.
5771 *
5772 * @param pThis The device state structure.
5773 * @param offReg Register offset in memory-mapped frame.
5774 * @param pu32 Where to store the result.
5775 * @thread EMT
5776 */
5777static int e1kRegReadAlignedU32(PE1KSTATE pThis, uint32_t offReg, uint32_t *pu32)
5778{
5779 Assert(!(offReg & 3));
5780
5781 /*
5782 * Lookup the register and check that it's readable.
5783 */
5784 int rc = VINF_SUCCESS;
5785 int idxReg = e1kRegLookup(pThis, offReg);
5786 if (RT_LIKELY(idxReg != -1))
5787 {
5788 if (RT_UNLIKELY(g_aE1kRegMap[idxReg].readable))
5789 {
5790 /*
5791 * Read it. Pass the mask so the handler knows what has to be read.
5792 * Mask out irrelevant bits.
5793 */
5794 //rc = e1kCsEnter(pThis, VERR_SEM_BUSY, RT_SRC_POS);
5795 //if (RT_UNLIKELY(rc != VINF_SUCCESS))
5796 // return rc;
5797 //pThis->fDelayInts = false;
5798 //pThis->iStatIntLost += pThis->iStatIntLostOne;
5799 //pThis->iStatIntLostOne = 0;
5800 rc = g_aE1kRegMap[idxReg].pfnRead(pThis, offReg & 0xFFFFFFFC, idxReg, pu32);
5801 //e1kCsLeave(pThis);
5802 Log6(("%s At %08X read %08X from %s (%s)\n",
5803 pThis->szPrf, offReg, *pu32, g_aE1kRegMap[idxReg].abbrev, g_aE1kRegMap[idxReg].name));
5804 if (IOM_SUCCESS(rc))
5805 STAM_COUNTER_INC(&pThis->aStatRegReads[idxReg]);
5806 }
5807 else
5808 E1kLog(("%s At %08X read (%s) attempt from non-existing register\n", pThis->szPrf, offReg));
5809 }
5810 else
5811 E1kLog(("%s At %08X read attempt from non-existing register\n", pThis->szPrf, offReg));
5812 return rc;
5813}
5814
5815/**
5816 * Handle 4 byte sized and aligned register write operation.
5817 *
5818 * Looks up and calls appropriate handler.
5819 *
5820 * @returns VBox status code.
5821 *
5822 * @param pThis The device state structure.
5823 * @param offReg Register offset in memory-mapped frame.
5824 * @param u32Value The value to write.
5825 * @thread EMT
5826 */
5827static int e1kRegWriteAlignedU32(PE1KSTATE pThis, uint32_t offReg, uint32_t u32Value)
5828{
5829 int rc = VINF_SUCCESS;
5830 int index = e1kRegLookup(pThis, offReg);
5831 if (RT_LIKELY(index != -1))
5832 {
5833 if (RT_LIKELY(g_aE1kRegMap[index].writable))
5834 {
5835 /*
5836 * Write it. Pass the mask so the handler knows what has to be written.
5837 * Mask out irrelevant bits.
5838 */
5839 Log6(("%s At %08X write %08X to %s (%s)\n",
5840 pThis->szPrf, offReg, u32Value, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5841 //rc = e1kCsEnter(pThis, VERR_SEM_BUSY, RT_SRC_POS);
5842 //if (RT_UNLIKELY(rc != VINF_SUCCESS))
5843 // return rc;
5844 //pThis->fDelayInts = false;
5845 //pThis->iStatIntLost += pThis->iStatIntLostOne;
5846 //pThis->iStatIntLostOne = 0;
5847 rc = g_aE1kRegMap[index].pfnWrite(pThis, offReg, index, u32Value);
5848 //e1kCsLeave(pThis);
5849 }
5850 else
5851 E1kLog(("%s At %08X write attempt (%08X) to read-only register %s (%s)\n",
5852 pThis->szPrf, offReg, u32Value, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5853 if (IOM_SUCCESS(rc))
5854 STAM_COUNTER_INC(&pThis->aStatRegWrites[index]);
5855 }
5856 else
5857 E1kLog(("%s At %08X write attempt (%08X) to non-existing register\n",
5858 pThis->szPrf, offReg, u32Value));
5859 return rc;
5860}
5861
5862
5863/* -=-=-=-=- MMIO and I/O Port Callbacks -=-=-=-=- */
5864
5865/**
5866 * @callback_method_impl{FNIOMMMIOREAD}
5867 */
5868PDMBOTHCBDECL(int) e1kMMIORead(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS GCPhysAddr, void *pv, unsigned cb)
5869{
5870 NOREF(pvUser);
5871 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
5872 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatMMIORead), a);
5873
5874 uint32_t offReg = GCPhysAddr - pThis->addrMMReg;
5875 Assert(offReg < E1K_MM_SIZE);
5876 Assert(cb == 4);
5877 Assert(!(GCPhysAddr & 3));
5878
5879 int rc = e1kRegReadAlignedU32(pThis, offReg, (uint32_t *)pv);
5880
5881 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatMMIORead), a);
5882 return rc;
5883}
5884
5885/**
5886 * @callback_method_impl{FNIOMMMIOWRITE}
5887 */
5888PDMBOTHCBDECL(int) e1kMMIOWrite(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS GCPhysAddr, void const *pv, unsigned cb)
5889{
5890 NOREF(pvUser);
5891 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
5892 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatMMIOWrite), a);
5893
5894 uint32_t offReg = GCPhysAddr - pThis->addrMMReg;
5895 Assert(offReg < E1K_MM_SIZE);
5896 Assert(cb == 4);
5897 Assert(!(GCPhysAddr & 3));
5898
5899 int rc = e1kRegWriteAlignedU32(pThis, offReg, *(uint32_t const *)pv);
5900
5901 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatMMIOWrite), a);
5902 return rc;
5903}
5904
5905/**
5906 * @callback_method_impl{FNIOMIOPORTIN}
5907 */
5908PDMBOTHCBDECL(int) e1kIOPortIn(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT uPort, uint32_t *pu32, unsigned cb)
5909{
5910 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
5911 int rc;
5912 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatIORead), a);
5913
5914 uPort -= pThis->IOPortBase;
5915 if (RT_LIKELY(cb == 4))
5916 switch (uPort)
5917 {
5918 case 0x00: /* IOADDR */
5919 *pu32 = pThis->uSelectedReg;
5920 E1kLog2(("%s e1kIOPortIn: IOADDR(0), selecting register %#010x, val=%#010x\n", pThis->szPrf, pThis->uSelectedReg, *pu32));
5921 rc = VINF_SUCCESS;
5922 break;
5923
5924 case 0x04: /* IODATA */
5925 if (!(pThis->uSelectedReg & 3))
5926 rc = e1kRegReadAlignedU32(pThis, pThis->uSelectedReg, pu32);
5927 else /** @todo r=bird: I wouldn't be surprised if this unaligned branch wasn't necessary. */
5928 rc = e1kRegReadUnaligned(pThis, pThis->uSelectedReg, pu32, cb);
5929 if (rc == VINF_IOM_R3_MMIO_READ)
5930 rc = VINF_IOM_R3_IOPORT_READ;
5931 E1kLog2(("%s e1kIOPortIn: IODATA(4), reading from selected register %#010x, val=%#010x\n", pThis->szPrf, pThis->uSelectedReg, *pu32));
5932 break;
5933
5934 default:
5935 E1kLog(("%s e1kIOPortIn: invalid port %#010x\n", pThis->szPrf, uPort));
5936 //rc = VERR_IOM_IOPORT_UNUSED; /* Why not? */
5937 rc = VINF_SUCCESS;
5938 }
5939 else
5940 {
5941 E1kLog(("%s e1kIOPortIn: invalid op size: uPort=%RTiop cb=%08x", pThis->szPrf, uPort, cb));
5942 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "%s e1kIOPortIn: invalid op size: uPort=%RTiop cb=%08x\n", pThis->szPrf, uPort, cb);
5943 }
5944 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatIORead), a);
5945 return rc;
5946}
5947
5948
5949/**
5950 * @callback_method_impl{FNIOMIOPORTOUT}
5951 */
5952PDMBOTHCBDECL(int) e1kIOPortOut(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT uPort, uint32_t u32, unsigned cb)
5953{
5954 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
5955 int rc;
5956 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatIOWrite), a);
5957
5958 E1kLog2(("%s e1kIOPortOut: uPort=%RTiop value=%08x\n", pThis->szPrf, uPort, u32));
5959 if (RT_LIKELY(cb == 4))
5960 {
5961 uPort -= pThis->IOPortBase;
5962 switch (uPort)
5963 {
5964 case 0x00: /* IOADDR */
5965 pThis->uSelectedReg = u32;
5966 E1kLog2(("%s e1kIOPortOut: IOADDR(0), selected register %08x\n", pThis->szPrf, pThis->uSelectedReg));
5967 rc = VINF_SUCCESS;
5968 break;
5969
5970 case 0x04: /* IODATA */
5971 E1kLog2(("%s e1kIOPortOut: IODATA(4), writing to selected register %#010x, value=%#010x\n", pThis->szPrf, pThis->uSelectedReg, u32));
5972 if (RT_LIKELY(!(pThis->uSelectedReg & 3)))
5973 {
5974 rc = e1kRegWriteAlignedU32(pThis, pThis->uSelectedReg, u32);
5975 if (rc == VINF_IOM_R3_MMIO_WRITE)
5976 rc = VINF_IOM_R3_IOPORT_WRITE;
5977 }
5978 else
5979 rc = PDMDevHlpDBGFStop(pThis->CTX_SUFF(pDevIns), RT_SRC_POS,
5980 "Spec violation: misaligned offset: %#10x, ignored.\n", pThis->uSelectedReg);
5981 break;
5982
5983 default:
5984 E1kLog(("%s e1kIOPortOut: invalid port %#010x\n", pThis->szPrf, uPort));
5985 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "invalid port %#010x\n", uPort);
5986 }
5987 }
5988 else
5989 {
5990 E1kLog(("%s e1kIOPortOut: invalid op size: uPort=%RTiop cb=%08x\n", pThis->szPrf, uPort, cb));
5991 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "invalid op size: uPort=%RTiop cb=%#x\n", pThis->szPrf, uPort, cb);
5992 }
5993
5994 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatIOWrite), a);
5995 return rc;
5996}
5997
5998#ifdef IN_RING3
5999
6000/**
6001 * Dump complete device state to log.
6002 *
6003 * @param pThis Pointer to device state.
6004 */
6005static void e1kDumpState(PE1KSTATE pThis)
6006{
6007 for (int i = 0; i < E1K_NUM_OF_32BIT_REGS; ++i)
6008 {
6009 E1kLog2(("%s %8.8s = %08x\n", pThis->szPrf,
6010 g_aE1kRegMap[i].abbrev, pThis->auRegs[i]));
6011 }
6012# ifdef E1K_INT_STATS
6013 LogRel(("%s Interrupt attempts: %d\n", pThis->szPrf, pThis->uStatIntTry));
6014 LogRel(("%s Interrupts raised : %d\n", pThis->szPrf, pThis->uStatInt));
6015 LogRel(("%s Interrupts lowered: %d\n", pThis->szPrf, pThis->uStatIntLower));
6016 LogRel(("%s Interrupts delayed: %d\n", pThis->szPrf, pThis->uStatIntDly));
6017 LogRel(("%s Disabled delayed: %d\n", pThis->szPrf, pThis->uStatDisDly));
6018 LogRel(("%s Interrupts skipped: %d\n", pThis->szPrf, pThis->uStatIntSkip));
6019 LogRel(("%s Masked interrupts : %d\n", pThis->szPrf, pThis->uStatIntMasked));
6020 LogRel(("%s Early interrupts : %d\n", pThis->szPrf, pThis->uStatIntEarly));
6021 LogRel(("%s Late interrupts : %d\n", pThis->szPrf, pThis->uStatIntLate));
6022 LogRel(("%s Lost interrupts : %d\n", pThis->szPrf, pThis->iStatIntLost));
6023 LogRel(("%s Interrupts by RX : %d\n", pThis->szPrf, pThis->uStatIntRx));
6024 LogRel(("%s Interrupts by TX : %d\n", pThis->szPrf, pThis->uStatIntTx));
6025 LogRel(("%s Interrupts by ICS : %d\n", pThis->szPrf, pThis->uStatIntICS));
6026 LogRel(("%s Interrupts by RDTR: %d\n", pThis->szPrf, pThis->uStatIntRDTR));
6027 LogRel(("%s Interrupts by RDMT: %d\n", pThis->szPrf, pThis->uStatIntRXDMT0));
6028 LogRel(("%s Interrupts by TXQE: %d\n", pThis->szPrf, pThis->uStatIntTXQE));
6029 LogRel(("%s TX int delay asked: %d\n", pThis->szPrf, pThis->uStatTxIDE));
6030 LogRel(("%s TX delayed: %d\n", pThis->szPrf, pThis->uStatTxDelayed));
6031 LogRel(("%s TX delay expired: %d\n", pThis->szPrf, pThis->uStatTxDelayExp));
6032 LogRel(("%s TX no report asked: %d\n", pThis->szPrf, pThis->uStatTxNoRS));
6033 LogRel(("%s TX abs timer expd : %d\n", pThis->szPrf, pThis->uStatTAD));
6034 LogRel(("%s TX int timer expd : %d\n", pThis->szPrf, pThis->uStatTID));
6035 LogRel(("%s RX abs timer expd : %d\n", pThis->szPrf, pThis->uStatRAD));
6036 LogRel(("%s RX int timer expd : %d\n", pThis->szPrf, pThis->uStatRID));
6037 LogRel(("%s TX CTX descriptors: %d\n", pThis->szPrf, pThis->uStatDescCtx));
6038 LogRel(("%s TX DAT descriptors: %d\n", pThis->szPrf, pThis->uStatDescDat));
6039 LogRel(("%s TX LEG descriptors: %d\n", pThis->szPrf, pThis->uStatDescLeg));
6040 LogRel(("%s Received frames : %d\n", pThis->szPrf, pThis->uStatRxFrm));
6041 LogRel(("%s Transmitted frames: %d\n", pThis->szPrf, pThis->uStatTxFrm));
6042 LogRel(("%s TX frames up to 1514: %d\n", pThis->szPrf, pThis->uStatTx1514));
6043 LogRel(("%s TX frames up to 2962: %d\n", pThis->szPrf, pThis->uStatTx2962));
6044 LogRel(("%s TX frames up to 4410: %d\n", pThis->szPrf, pThis->uStatTx4410));
6045 LogRel(("%s TX frames up to 5858: %d\n", pThis->szPrf, pThis->uStatTx5858));
6046 LogRel(("%s TX frames up to 7306: %d\n", pThis->szPrf, pThis->uStatTx7306));
6047 LogRel(("%s TX frames up to 8754: %d\n", pThis->szPrf, pThis->uStatTx8754));
6048 LogRel(("%s TX frames up to 16384: %d\n", pThis->szPrf, pThis->uStatTx16384));
6049 LogRel(("%s TX frames up to 32768: %d\n", pThis->szPrf, pThis->uStatTx32768));
6050 LogRel(("%s Larger TX frames : %d\n", pThis->szPrf, pThis->uStatTxLarge));
6051 LogRel(("%s Max TX Delay : %lld\n", pThis->szPrf, pThis->uStatMaxTxDelay));
6052# endif /* E1K_INT_STATS */
6053}
6054
6055/**
6056 * @callback_method_impl{FNPCIIOREGIONMAP}
6057 */
6058static DECLCALLBACK(int) e1kMap(PPCIDEVICE pPciDev, int iRegion, RTGCPHYS GCPhysAddress, uint32_t cb, PCIADDRESSSPACE enmType)
6059{
6060 PE1KSTATE pThis = PDMINS_2_DATA(pPciDev->pDevIns, E1KSTATE*);
6061 int rc;
6062
6063 switch (enmType)
6064 {
6065 case PCI_ADDRESS_SPACE_IO:
6066 pThis->IOPortBase = (RTIOPORT)GCPhysAddress;
6067 rc = PDMDevHlpIOPortRegister(pPciDev->pDevIns, pThis->IOPortBase, cb, NULL /*pvUser*/,
6068 e1kIOPortOut, e1kIOPortIn, NULL, NULL, "E1000");
6069 if (pThis->fR0Enabled && RT_SUCCESS(rc))
6070 rc = PDMDevHlpIOPortRegisterR0(pPciDev->pDevIns, pThis->IOPortBase, cb, NIL_RTR0PTR /*pvUser*/,
6071 "e1kIOPortOut", "e1kIOPortIn", NULL, NULL, "E1000");
6072 if (pThis->fRCEnabled && RT_SUCCESS(rc))
6073 rc = PDMDevHlpIOPortRegisterRC(pPciDev->pDevIns, pThis->IOPortBase, cb, NIL_RTRCPTR /*pvUser*/,
6074 "e1kIOPortOut", "e1kIOPortIn", NULL, NULL, "E1000");
6075 break;
6076
6077 case PCI_ADDRESS_SPACE_MEM:
6078 /*
6079 * From the spec:
6080 * For registers that should be accessed as 32-bit double words,
6081 * partial writes (less than a 32-bit double word) is ignored.
6082 * Partial reads return all 32 bits of data regardless of the
6083 * byte enables.
6084 */
6085 pThis->addrMMReg = GCPhysAddress; Assert(!(GCPhysAddress & 7));
6086 rc = PDMDevHlpMMIORegister(pPciDev->pDevIns, GCPhysAddress, cb, NULL /*pvUser*/,
6087 IOMMMIO_FLAGS_READ_DWORD | IOMMMIO_FLAGS_WRITE_ONLY_DWORD,
6088 e1kMMIOWrite, e1kMMIORead, "E1000");
6089 if (pThis->fR0Enabled && RT_SUCCESS(rc))
6090 rc = PDMDevHlpMMIORegisterR0(pPciDev->pDevIns, GCPhysAddress, cb, NIL_RTR0PTR /*pvUser*/,
6091 "e1kMMIOWrite", "e1kMMIORead");
6092 if (pThis->fRCEnabled && RT_SUCCESS(rc))
6093 rc = PDMDevHlpMMIORegisterRC(pPciDev->pDevIns, GCPhysAddress, cb, NIL_RTRCPTR /*pvUser*/,
6094 "e1kMMIOWrite", "e1kMMIORead");
6095 break;
6096
6097 default:
6098 /* We should never get here */
6099 AssertMsgFailed(("Invalid PCI address space param in map callback"));
6100 rc = VERR_INTERNAL_ERROR;
6101 break;
6102 }
6103 return rc;
6104}
6105
6106
6107/* -=-=-=-=- PDMINETWORKDOWN -=-=-=-=- */
6108
6109/**
6110 * Check if the device can receive data now.
6111 * This must be called before the pfnRecieve() method is called.
6112 *
6113 * @returns Number of bytes the device can receive.
6114 * @param pInterface Pointer to the interface structure containing the called function pointer.
6115 * @thread EMT
6116 */
6117static int e1kCanReceive(PE1KSTATE pThis)
6118{
6119#ifndef E1K_WITH_RXD_CACHE
6120 size_t cb;
6121
6122 if (RT_UNLIKELY(e1kCsRxEnter(pThis, VERR_SEM_BUSY) != VINF_SUCCESS))
6123 return VERR_NET_NO_BUFFER_SPACE;
6124
6125 if (RT_UNLIKELY(RDLEN == sizeof(E1KRXDESC)))
6126 {
6127 E1KRXDESC desc;
6128 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), e1kDescAddr(RDBAH, RDBAL, RDH),
6129 &desc, sizeof(desc));
6130 if (desc.status.fDD)
6131 cb = 0;
6132 else
6133 cb = pThis->u16RxBSize;
6134 }
6135 else if (RDH < RDT)
6136 cb = (RDT - RDH) * pThis->u16RxBSize;
6137 else if (RDH > RDT)
6138 cb = (RDLEN/sizeof(E1KRXDESC) - RDH + RDT) * pThis->u16RxBSize;
6139 else
6140 {
6141 cb = 0;
6142 E1kLogRel(("E1000: OUT of RX descriptors!\n"));
6143 }
6144 E1kLog2(("%s e1kCanReceive: at exit RDH=%d RDT=%d RDLEN=%d u16RxBSize=%d cb=%lu\n",
6145 pThis->szPrf, RDH, RDT, RDLEN, pThis->u16RxBSize, cb));
6146
6147 e1kCsRxLeave(pThis);
6148 return cb > 0 ? VINF_SUCCESS : VERR_NET_NO_BUFFER_SPACE;
6149#else /* E1K_WITH_RXD_CACHE */
6150 int rc = VINF_SUCCESS;
6151
6152 if (RT_UNLIKELY(e1kCsRxEnter(pThis, VERR_SEM_BUSY) != VINF_SUCCESS))
6153 return VERR_NET_NO_BUFFER_SPACE;
6154
6155 if (RT_UNLIKELY(RDLEN == sizeof(E1KRXDESC)))
6156 {
6157 E1KRXDESC desc;
6158 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), e1kDescAddr(RDBAH, RDBAL, RDH),
6159 &desc, sizeof(desc));
6160 if (desc.status.fDD)
6161 rc = VERR_NET_NO_BUFFER_SPACE;
6162 }
6163 else if (e1kRxDIsCacheEmpty(pThis) && RDH == RDT)
6164 {
6165 /* Cache is empty, so is the RX ring. */
6166 rc = VERR_NET_NO_BUFFER_SPACE;
6167 }
6168 E1kLog2(("%s e1kCanReceive: at exit in_cache=%d RDH=%d RDT=%d RDLEN=%d"
6169 " u16RxBSize=%d rc=%Rrc\n", pThis->szPrf,
6170 e1kRxDInCache(pThis), RDH, RDT, RDLEN, pThis->u16RxBSize, rc));
6171
6172 e1kCsRxLeave(pThis);
6173 return rc;
6174#endif /* E1K_WITH_RXD_CACHE */
6175}
6176
6177/**
6178 * @interface_method_impl{PDMINETWORKDOWN,pfnWaitReceiveAvail}
6179 */
6180static DECLCALLBACK(int) e1kR3NetworkDown_WaitReceiveAvail(PPDMINETWORKDOWN pInterface, RTMSINTERVAL cMillies)
6181{
6182 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkDown);
6183 int rc = e1kCanReceive(pThis);
6184
6185 if (RT_SUCCESS(rc))
6186 return VINF_SUCCESS;
6187 if (RT_UNLIKELY(cMillies == 0))
6188 return VERR_NET_NO_BUFFER_SPACE;
6189
6190 rc = VERR_INTERRUPTED;
6191 ASMAtomicXchgBool(&pThis->fMaybeOutOfSpace, true);
6192 STAM_PROFILE_START(&pThis->StatRxOverflow, a);
6193 VMSTATE enmVMState;
6194 while (RT_LIKELY( (enmVMState = PDMDevHlpVMState(pThis->CTX_SUFF(pDevIns))) == VMSTATE_RUNNING
6195 || enmVMState == VMSTATE_RUNNING_LS))
6196 {
6197 int rc2 = e1kCanReceive(pThis);
6198 if (RT_SUCCESS(rc2))
6199 {
6200 rc = VINF_SUCCESS;
6201 break;
6202 }
6203 E1kLogRel(("E1000 e1kR3NetworkDown_WaitReceiveAvail: waiting cMillies=%u...\n", cMillies));
6204 E1kLog(("%s e1kR3NetworkDown_WaitReceiveAvail: waiting cMillies=%u...\n", pThis->szPrf, cMillies));
6205 RTSemEventWait(pThis->hEventMoreRxDescAvail, cMillies);
6206 }
6207 STAM_PROFILE_STOP(&pThis->StatRxOverflow, a);
6208 ASMAtomicXchgBool(&pThis->fMaybeOutOfSpace, false);
6209
6210 return rc;
6211}
6212
6213
6214/**
6215 * Matches the packet addresses against Receive Address table. Looks for
6216 * exact matches only.
6217 *
6218 * @returns true if address matches.
6219 * @param pThis Pointer to the state structure.
6220 * @param pvBuf The ethernet packet.
6221 * @param cb Number of bytes available in the packet.
6222 * @thread EMT
6223 */
6224static bool e1kPerfectMatch(PE1KSTATE pThis, const void *pvBuf)
6225{
6226 for (unsigned i = 0; i < RT_ELEMENTS(pThis->aRecAddr.array); i++)
6227 {
6228 E1KRAELEM* ra = pThis->aRecAddr.array + i;
6229
6230 /* Valid address? */
6231 if (ra->ctl & RA_CTL_AV)
6232 {
6233 Assert((ra->ctl & RA_CTL_AS) < 2);
6234 //unsigned char *pAddr = (unsigned char*)pvBuf + sizeof(ra->addr)*(ra->ctl & RA_CTL_AS);
6235 //E1kLog3(("%s Matching %02x:%02x:%02x:%02x:%02x:%02x against %02x:%02x:%02x:%02x:%02x:%02x...\n",
6236 // pThis->szPrf, pAddr[0], pAddr[1], pAddr[2], pAddr[3], pAddr[4], pAddr[5],
6237 // ra->addr[0], ra->addr[1], ra->addr[2], ra->addr[3], ra->addr[4], ra->addr[5]));
6238 /*
6239 * Address Select:
6240 * 00b = Destination address
6241 * 01b = Source address
6242 * 10b = Reserved
6243 * 11b = Reserved
6244 * Since ethernet header is (DA, SA, len) we can use address
6245 * select as index.
6246 */
6247 if (memcmp((char*)pvBuf + sizeof(ra->addr)*(ra->ctl & RA_CTL_AS),
6248 ra->addr, sizeof(ra->addr)) == 0)
6249 return true;
6250 }
6251 }
6252
6253 return false;
6254}
6255
6256/**
6257 * Matches the packet addresses against Multicast Table Array.
6258 *
6259 * @remarks This is imperfect match since it matches not exact address but
6260 * a subset of addresses.
6261 *
6262 * @returns true if address matches.
6263 * @param pThis Pointer to the state structure.
6264 * @param pvBuf The ethernet packet.
6265 * @param cb Number of bytes available in the packet.
6266 * @thread EMT
6267 */
6268static bool e1kImperfectMatch(PE1KSTATE pThis, const void *pvBuf)
6269{
6270 /* Get bits 32..47 of destination address */
6271 uint16_t u16Bit = ((uint16_t*)pvBuf)[2];
6272
6273 unsigned offset = GET_BITS(RCTL, MO);
6274 /*
6275 * offset means:
6276 * 00b = bits 36..47
6277 * 01b = bits 35..46
6278 * 10b = bits 34..45
6279 * 11b = bits 32..43
6280 */
6281 if (offset < 3)
6282 u16Bit = u16Bit >> (4 - offset);
6283 return ASMBitTest(pThis->auMTA, u16Bit & 0xFFF);
6284}
6285
6286/**
6287 * Determines if the packet is to be delivered to upper layer.
6288 *
6289 * The following filters supported:
6290 * - Exact Unicast/Multicast
6291 * - Promiscuous Unicast/Multicast
6292 * - Multicast
6293 * - VLAN
6294 *
6295 * @returns true if packet is intended for this node.
6296 * @param pThis Pointer to the state structure.
6297 * @param pvBuf The ethernet packet.
6298 * @param cb Number of bytes available in the packet.
6299 * @param pStatus Bit field to store status bits.
6300 * @thread EMT
6301 */
6302static bool e1kAddressFilter(PE1KSTATE pThis, const void *pvBuf, size_t cb, E1KRXDST *pStatus)
6303{
6304 Assert(cb > 14);
6305 /* Assume that we fail to pass exact filter. */
6306 pStatus->fPIF = false;
6307 pStatus->fVP = false;
6308 /* Discard oversized packets */
6309 if (cb > E1K_MAX_RX_PKT_SIZE)
6310 {
6311 E1kLog(("%s ERROR: Incoming packet is too big, cb=%d > max=%d\n",
6312 pThis->szPrf, cb, E1K_MAX_RX_PKT_SIZE));
6313 E1K_INC_CNT32(ROC);
6314 return false;
6315 }
6316 else if (!(RCTL & RCTL_LPE) && cb > 1522)
6317 {
6318 /* When long packet reception is disabled packets over 1522 are discarded */
6319 E1kLog(("%s Discarding incoming packet (LPE=0), cb=%d\n",
6320 pThis->szPrf, cb));
6321 E1K_INC_CNT32(ROC);
6322 return false;
6323 }
6324
6325 uint16_t *u16Ptr = (uint16_t*)pvBuf;
6326 /* Compare TPID with VLAN Ether Type */
6327 if (RT_BE2H_U16(u16Ptr[6]) == VET)
6328 {
6329 pStatus->fVP = true;
6330 /* Is VLAN filtering enabled? */
6331 if (RCTL & RCTL_VFE)
6332 {
6333 /* It is 802.1q packet indeed, let's filter by VID */
6334 if (RCTL & RCTL_CFIEN)
6335 {
6336 E1kLog3(("%s VLAN filter: VLAN=%d CFI=%d RCTL_CFI=%d\n", pThis->szPrf,
6337 E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7])),
6338 E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])),
6339 !!(RCTL & RCTL_CFI)));
6340 if (E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])) != !!(RCTL & RCTL_CFI))
6341 {
6342 E1kLog2(("%s Packet filter: CFIs do not match in packet and RCTL (%d!=%d)\n",
6343 pThis->szPrf, E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])), !!(RCTL & RCTL_CFI)));
6344 return false;
6345 }
6346 }
6347 else
6348 E1kLog3(("%s VLAN filter: VLAN=%d\n", pThis->szPrf,
6349 E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))));
6350 if (!ASMBitTest(pThis->auVFTA, E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))))
6351 {
6352 E1kLog2(("%s Packet filter: no VLAN match (id=%d)\n",
6353 pThis->szPrf, E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))));
6354 return false;
6355 }
6356 }
6357 }
6358 /* Broadcast filtering */
6359 if (e1kIsBroadcast(pvBuf) && (RCTL & RCTL_BAM))
6360 return true;
6361 E1kLog2(("%s Packet filter: not a broadcast\n", pThis->szPrf));
6362 if (e1kIsMulticast(pvBuf))
6363 {
6364 /* Is multicast promiscuous enabled? */
6365 if (RCTL & RCTL_MPE)
6366 return true;
6367 E1kLog2(("%s Packet filter: no promiscuous multicast\n", pThis->szPrf));
6368 /* Try perfect matches first */
6369 if (e1kPerfectMatch(pThis, pvBuf))
6370 {
6371 pStatus->fPIF = true;
6372 return true;
6373 }
6374 E1kLog2(("%s Packet filter: no perfect match\n", pThis->szPrf));
6375 if (e1kImperfectMatch(pThis, pvBuf))
6376 return true;
6377 E1kLog2(("%s Packet filter: no imperfect match\n", pThis->szPrf));
6378 }
6379 else {
6380 /* Is unicast promiscuous enabled? */
6381 if (RCTL & RCTL_UPE)
6382 return true;
6383 E1kLog2(("%s Packet filter: no promiscuous unicast\n", pThis->szPrf));
6384 if (e1kPerfectMatch(pThis, pvBuf))
6385 {
6386 pStatus->fPIF = true;
6387 return true;
6388 }
6389 E1kLog2(("%s Packet filter: no perfect match\n", pThis->szPrf));
6390 }
6391 E1kLog2(("%s Packet filter: packet discarded\n", pThis->szPrf));
6392 return false;
6393}
6394
6395/**
6396 * @interface_method_impl{PDMINETWORKDOWN,pfnReceive}
6397 */
6398static DECLCALLBACK(int) e1kR3NetworkDown_Receive(PPDMINETWORKDOWN pInterface, const void *pvBuf, size_t cb)
6399{
6400 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkDown);
6401 int rc = VINF_SUCCESS;
6402
6403 /*
6404 * Drop packets if the VM is not running yet/anymore.
6405 */
6406 VMSTATE enmVMState = PDMDevHlpVMState(STATE_TO_DEVINS(pThis));
6407 if ( enmVMState != VMSTATE_RUNNING
6408 && enmVMState != VMSTATE_RUNNING_LS)
6409 {
6410 E1kLog(("%s Dropping incoming packet as VM is not running.\n", pThis->szPrf));
6411 return VINF_SUCCESS;
6412 }
6413
6414 /* Discard incoming packets in locked state */
6415 if (!(RCTL & RCTL_EN) || pThis->fLocked || !(STATUS & STATUS_LU))
6416 {
6417 E1kLog(("%s Dropping incoming packet as receive operation is disabled.\n", pThis->szPrf));
6418 return VINF_SUCCESS;
6419 }
6420
6421 STAM_PROFILE_ADV_START(&pThis->StatReceive, a);
6422
6423 //if (!e1kCsEnter(pThis, RT_SRC_POS))
6424 // return VERR_PERMISSION_DENIED;
6425
6426 e1kPacketDump(pThis, (const uint8_t*)pvBuf, cb, "<-- Incoming");
6427
6428 /* Update stats */
6429 if (RT_LIKELY(e1kCsEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
6430 {
6431 E1K_INC_CNT32(TPR);
6432 E1K_ADD_CNT64(TORL, TORH, cb < 64? 64 : cb);
6433 e1kCsLeave(pThis);
6434 }
6435 STAM_PROFILE_ADV_START(&pThis->StatReceiveFilter, a);
6436 E1KRXDST status;
6437 RT_ZERO(status);
6438 bool fPassed = e1kAddressFilter(pThis, pvBuf, cb, &status);
6439 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveFilter, a);
6440 if (fPassed)
6441 {
6442 rc = e1kHandleRxPacket(pThis, pvBuf, cb, status);
6443 }
6444 //e1kCsLeave(pThis);
6445 STAM_PROFILE_ADV_STOP(&pThis->StatReceive, a);
6446
6447 return rc;
6448}
6449
6450
6451/* -=-=-=-=- PDMILEDPORTS -=-=-=-=- */
6452
6453/**
6454 * @interface_method_impl{PDMILEDPORTS,pfnQueryStatusLed}
6455 */
6456static DECLCALLBACK(int) e1kR3QueryStatusLed(PPDMILEDPORTS pInterface, unsigned iLUN, PPDMLED *ppLed)
6457{
6458 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, ILeds);
6459 int rc = VERR_PDM_LUN_NOT_FOUND;
6460
6461 if (iLUN == 0)
6462 {
6463 *ppLed = &pThis->led;
6464 rc = VINF_SUCCESS;
6465 }
6466 return rc;
6467}
6468
6469
6470/* -=-=-=-=- PDMINETWORKCONFIG -=-=-=-=- */
6471
6472/**
6473 * @interface_method_impl{PDMINETWORKCONFIG,pfnGetMac}
6474 */
6475static DECLCALLBACK(int) e1kR3GetMac(PPDMINETWORKCONFIG pInterface, PRTMAC pMac)
6476{
6477 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkConfig);
6478 pThis->eeprom.getMac(pMac);
6479 return VINF_SUCCESS;
6480}
6481
6482/**
6483 * @interface_method_impl{PDMINETWORKCONFIG,pfnGetLinkState}
6484 */
6485static DECLCALLBACK(PDMNETWORKLINKSTATE) e1kR3GetLinkState(PPDMINETWORKCONFIG pInterface)
6486{
6487 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkConfig);
6488 if (STATUS & STATUS_LU)
6489 return PDMNETWORKLINKSTATE_UP;
6490 return PDMNETWORKLINKSTATE_DOWN;
6491}
6492
6493/**
6494 * @interface_method_impl{PDMINETWORKCONFIG,pfnSetLinkState}
6495 */
6496static DECLCALLBACK(int) e1kR3SetLinkState(PPDMINETWORKCONFIG pInterface, PDMNETWORKLINKSTATE enmState)
6497{
6498 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkConfig);
6499
6500 E1kLog(("%s e1kR3SetLinkState: enmState=%d\n", pThis->szPrf, enmState));
6501 switch (enmState)
6502 {
6503 case PDMNETWORKLINKSTATE_UP:
6504 pThis->fCableConnected = true;
6505 /* If link was down, bring it up after a while. */
6506 if (!(STATUS & STATUS_LU))
6507 e1kBringLinkUpDelayed(pThis);
6508 break;
6509 case PDMNETWORKLINKSTATE_DOWN:
6510 pThis->fCableConnected = false;
6511 /* Always set the phy link state to down, regardless of the STATUS_LU bit.
6512 * We might have to set the link state before the driver initializes us. */
6513 Phy::setLinkStatus(&pThis->phy, false);
6514 /* If link was up, bring it down. */
6515 if (STATUS & STATUS_LU)
6516 e1kR3LinkDown(pThis);
6517 break;
6518 case PDMNETWORKLINKSTATE_DOWN_RESUME:
6519 /*
6520 * There is not much sense in bringing down the link if it has not come up yet.
6521 * If it is up though, we bring it down temporarely, then bring it up again.
6522 */
6523 if (STATUS & STATUS_LU)
6524 e1kR3LinkDownTemp(pThis);
6525 break;
6526 default:
6527 ;
6528 }
6529 return VINF_SUCCESS;
6530}
6531
6532
6533/* -=-=-=-=- PDMIBASE -=-=-=-=- */
6534
6535/**
6536 * @interface_method_impl{PDMIBASE,pfnQueryInterface}
6537 */
6538static DECLCALLBACK(void *) e1kR3QueryInterface(struct PDMIBASE *pInterface, const char *pszIID)
6539{
6540 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, IBase);
6541 Assert(&pThis->IBase == pInterface);
6542
6543 PDMIBASE_RETURN_INTERFACE(pszIID, PDMIBASE, &pThis->IBase);
6544 PDMIBASE_RETURN_INTERFACE(pszIID, PDMINETWORKDOWN, &pThis->INetworkDown);
6545 PDMIBASE_RETURN_INTERFACE(pszIID, PDMINETWORKCONFIG, &pThis->INetworkConfig);
6546 PDMIBASE_RETURN_INTERFACE(pszIID, PDMILEDPORTS, &pThis->ILeds);
6547 return NULL;
6548}
6549
6550
6551/* -=-=-=-=- Saved State -=-=-=-=- */
6552
6553/**
6554 * Saves the configuration.
6555 *
6556 * @param pThis The E1K state.
6557 * @param pSSM The handle to the saved state.
6558 */
6559static void e1kSaveConfig(PE1KSTATE pThis, PSSMHANDLE pSSM)
6560{
6561 SSMR3PutMem(pSSM, &pThis->macConfigured, sizeof(pThis->macConfigured));
6562 SSMR3PutU32(pSSM, pThis->eChip);
6563}
6564
6565/**
6566 * @callback_method_impl{FNSSMDEVLIVEEXEC,Save basic configuration.}
6567 */
6568static DECLCALLBACK(int) e1kLiveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uPass)
6569{
6570 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6571 e1kSaveConfig(pThis, pSSM);
6572 return VINF_SSM_DONT_CALL_AGAIN;
6573}
6574
6575/**
6576 * @callback_method_impl{FNSSMDEVSAVEPREP,Synchronize.}
6577 */
6578static DECLCALLBACK(int) e1kSavePrep(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6579{
6580 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6581
6582 int rc = e1kCsEnter(pThis, VERR_SEM_BUSY);
6583 if (RT_UNLIKELY(rc != VINF_SUCCESS))
6584 return rc;
6585 e1kCsLeave(pThis);
6586 return VINF_SUCCESS;
6587#if 0
6588 /* 1) Prevent all threads from modifying the state and memory */
6589 //pThis->fLocked = true;
6590 /* 2) Cancel all timers */
6591#ifdef E1K_TX_DELAY
6592 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTXDTimer));
6593#endif /* E1K_TX_DELAY */
6594#ifdef E1K_USE_TX_TIMERS
6595 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTIDTimer));
6596#ifndef E1K_NO_TAD
6597 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTADTimer));
6598#endif /* E1K_NO_TAD */
6599#endif /* E1K_USE_TX_TIMERS */
6600#ifdef E1K_USE_RX_TIMERS
6601 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRIDTimer));
6602 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRADTimer));
6603#endif /* E1K_USE_RX_TIMERS */
6604 e1kCancelTimer(pThis, pThis->CTX_SUFF(pIntTimer));
6605 /* 3) Did I forget anything? */
6606 E1kLog(("%s Locked\n", pThis->szPrf));
6607 return VINF_SUCCESS;
6608#endif
6609}
6610
6611/**
6612 * @callback_method_impl{FNSSMDEVSAVEEXEC}
6613 */
6614static DECLCALLBACK(int) e1kSaveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6615{
6616 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6617
6618 e1kSaveConfig(pThis, pSSM);
6619 pThis->eeprom.save(pSSM);
6620 e1kDumpState(pThis);
6621 SSMR3PutMem(pSSM, pThis->auRegs, sizeof(pThis->auRegs));
6622 SSMR3PutBool(pSSM, pThis->fIntRaised);
6623 Phy::saveState(pSSM, &pThis->phy);
6624 SSMR3PutU32(pSSM, pThis->uSelectedReg);
6625 SSMR3PutMem(pSSM, pThis->auMTA, sizeof(pThis->auMTA));
6626 SSMR3PutMem(pSSM, &pThis->aRecAddr, sizeof(pThis->aRecAddr));
6627 SSMR3PutMem(pSSM, pThis->auVFTA, sizeof(pThis->auVFTA));
6628 SSMR3PutU64(pSSM, pThis->u64AckedAt);
6629 SSMR3PutU16(pSSM, pThis->u16RxBSize);
6630 //SSMR3PutBool(pSSM, pThis->fDelayInts);
6631 //SSMR3PutBool(pSSM, pThis->fIntMaskUsed);
6632 SSMR3PutU16(pSSM, pThis->u16TxPktLen);
6633/** @todo State wrt to the TSE buffer is incomplete, so little point in
6634 * saving this actually. */
6635 SSMR3PutMem(pSSM, pThis->aTxPacketFallback, pThis->u16TxPktLen);
6636 SSMR3PutBool(pSSM, pThis->fIPcsum);
6637 SSMR3PutBool(pSSM, pThis->fTCPcsum);
6638 SSMR3PutMem(pSSM, &pThis->contextTSE, sizeof(pThis->contextTSE));
6639 SSMR3PutMem(pSSM, &pThis->contextNormal, sizeof(pThis->contextNormal));
6640 SSMR3PutBool(pSSM, pThis->fVTag);
6641 SSMR3PutU16(pSSM, pThis->u16VTagTCI);
6642#ifdef E1K_WITH_TXD_CACHE
6643#if 0
6644 SSMR3PutU8(pSSM, pThis->nTxDFetched);
6645 SSMR3PutMem(pSSM, pThis->aTxDescriptors,
6646 pThis->nTxDFetched * sizeof(pThis->aTxDescriptors[0]));
6647#else
6648 /*
6649 * There is no point in storing TX descriptor cache entries as we can simply
6650 * fetch them again. Moreover, normally the cache is always empty when we
6651 * save the state. Store zero entries for compatibility.
6652 */
6653 SSMR3PutU8(pSSM, 0);
6654#endif
6655#endif /* E1K_WITH_TXD_CACHE */
6656/**@todo GSO requires some more state here. */
6657 E1kLog(("%s State has been saved\n", pThis->szPrf));
6658 return VINF_SUCCESS;
6659}
6660
6661#if 0
6662/**
6663 * @callback_method_impl{FNSSMDEVSAVEDONE}
6664 */
6665static DECLCALLBACK(int) e1kSaveDone(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6666{
6667 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6668
6669 /* If VM is being powered off unlocking will result in assertions in PGM */
6670 if (PDMDevHlpGetVM(pDevIns)->enmVMState == VMSTATE_RUNNING)
6671 pThis->fLocked = false;
6672 else
6673 E1kLog(("%s VM is not running -- remain locked\n", pThis->szPrf));
6674 E1kLog(("%s Unlocked\n", pThis->szPrf));
6675 return VINF_SUCCESS;
6676}
6677#endif
6678
6679/**
6680 * @callback_method_impl{FNSSMDEVLOADPREP,Synchronize.}
6681 */
6682static DECLCALLBACK(int) e1kLoadPrep(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6683{
6684 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6685
6686 int rc = e1kCsEnter(pThis, VERR_SEM_BUSY);
6687 if (RT_UNLIKELY(rc != VINF_SUCCESS))
6688 return rc;
6689 e1kCsLeave(pThis);
6690 return VINF_SUCCESS;
6691}
6692
6693/**
6694 * @callback_method_impl{FNSSMDEVLOADEXEC}
6695 */
6696static DECLCALLBACK(int) e1kLoadExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
6697{
6698 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6699 int rc;
6700
6701 if ( uVersion != E1K_SAVEDSTATE_VERSION
6702#ifdef E1K_WITH_TXD_CACHE
6703 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG
6704#endif /* E1K_WITH_TXD_CACHE */
6705 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_41
6706 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_30)
6707 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
6708
6709 if ( uVersion > E1K_SAVEDSTATE_VERSION_VBOX_30
6710 || uPass != SSM_PASS_FINAL)
6711 {
6712 /* config checks */
6713 RTMAC macConfigured;
6714 rc = SSMR3GetMem(pSSM, &macConfigured, sizeof(macConfigured));
6715 AssertRCReturn(rc, rc);
6716 if ( memcmp(&macConfigured, &pThis->macConfigured, sizeof(macConfigured))
6717 && (uPass == 0 || !PDMDevHlpVMTeleportedAndNotFullyResumedYet(pDevIns)) )
6718 LogRel(("%s: The mac address differs: config=%RTmac saved=%RTmac\n", pThis->szPrf, &pThis->macConfigured, &macConfigured));
6719
6720 E1KCHIP eChip;
6721 rc = SSMR3GetU32(pSSM, &eChip);
6722 AssertRCReturn(rc, rc);
6723 if (eChip != pThis->eChip)
6724 return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("The chip type differs: config=%u saved=%u"), pThis->eChip, eChip);
6725 }
6726
6727 if (uPass == SSM_PASS_FINAL)
6728 {
6729 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_30)
6730 {
6731 rc = pThis->eeprom.load(pSSM);
6732 AssertRCReturn(rc, rc);
6733 }
6734 /* the state */
6735 SSMR3GetMem(pSSM, &pThis->auRegs, sizeof(pThis->auRegs));
6736 SSMR3GetBool(pSSM, &pThis->fIntRaised);
6737 /** @todo: PHY could be made a separate device with its own versioning */
6738 Phy::loadState(pSSM, &pThis->phy);
6739 SSMR3GetU32(pSSM, &pThis->uSelectedReg);
6740 SSMR3GetMem(pSSM, &pThis->auMTA, sizeof(pThis->auMTA));
6741 SSMR3GetMem(pSSM, &pThis->aRecAddr, sizeof(pThis->aRecAddr));
6742 SSMR3GetMem(pSSM, &pThis->auVFTA, sizeof(pThis->auVFTA));
6743 SSMR3GetU64(pSSM, &pThis->u64AckedAt);
6744 SSMR3GetU16(pSSM, &pThis->u16RxBSize);
6745 //SSMR3GetBool(pSSM, pThis->fDelayInts);
6746 //SSMR3GetBool(pSSM, pThis->fIntMaskUsed);
6747 SSMR3GetU16(pSSM, &pThis->u16TxPktLen);
6748 SSMR3GetMem(pSSM, &pThis->aTxPacketFallback[0], pThis->u16TxPktLen);
6749 SSMR3GetBool(pSSM, &pThis->fIPcsum);
6750 SSMR3GetBool(pSSM, &pThis->fTCPcsum);
6751 SSMR3GetMem(pSSM, &pThis->contextTSE, sizeof(pThis->contextTSE));
6752 rc = SSMR3GetMem(pSSM, &pThis->contextNormal, sizeof(pThis->contextNormal));
6753 AssertRCReturn(rc, rc);
6754 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_41)
6755 {
6756 SSMR3GetBool(pSSM, &pThis->fVTag);
6757 rc = SSMR3GetU16(pSSM, &pThis->u16VTagTCI);
6758 AssertRCReturn(rc, rc);
6759 }
6760 else
6761 {
6762 pThis->fVTag = false;
6763 pThis->u16VTagTCI = 0;
6764 }
6765#ifdef E1K_WITH_TXD_CACHE
6766 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG)
6767 {
6768 rc = SSMR3GetU8(pSSM, &pThis->nTxDFetched);
6769 AssertRCReturn(rc, rc);
6770 if (pThis->nTxDFetched)
6771 SSMR3GetMem(pSSM, pThis->aTxDescriptors,
6772 pThis->nTxDFetched * sizeof(pThis->aTxDescriptors[0]));
6773 }
6774 else
6775 pThis->nTxDFetched = 0;
6776 /*
6777 * @todo: Perhaps we should not store TXD cache as the entries can be
6778 * simply fetched again from guest's memory. Or can't they?
6779 */
6780#endif /* E1K_WITH_TXD_CACHE */
6781#ifdef E1K_WITH_RXD_CACHE
6782 /*
6783 * There is no point in storing the RX descriptor cache in the saved
6784 * state, we just need to make sure it is empty.
6785 */
6786 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
6787#endif /* E1K_WITH_RXD_CACHE */
6788 /* derived state */
6789 e1kSetupGsoCtx(&pThis->GsoCtx, &pThis->contextTSE);
6790
6791 E1kLog(("%s State has been restored\n", pThis->szPrf));
6792 e1kDumpState(pThis);
6793 }
6794 return VINF_SUCCESS;
6795}
6796
6797/**
6798 * @callback_method_impl{FNSSMDEVLOADDONE, Link status adjustments after loading.}
6799 */
6800static DECLCALLBACK(int) e1kLoadDone(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6801{
6802 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6803
6804 /* Update promiscuous mode */
6805 if (pThis->pDrvR3)
6806 pThis->pDrvR3->pfnSetPromiscuousMode(pThis->pDrvR3,
6807 !!(RCTL & (RCTL_UPE | RCTL_MPE)));
6808
6809 /*
6810 * Force the link down here, since PDMNETWORKLINKSTATE_DOWN_RESUME is never
6811 * passed to us. We go through all this stuff if the link was up and we
6812 * wasn't teleported.
6813 */
6814 if ( (STATUS & STATUS_LU)
6815 && !PDMDevHlpVMTeleportedAndNotFullyResumedYet(pDevIns)
6816 && pThis->cMsLinkUpDelay)
6817 {
6818 e1kR3LinkDownTemp(pThis);
6819 }
6820 return VINF_SUCCESS;
6821}
6822
6823
6824
6825/* -=-=-=-=- Debug Info + Log Types -=-=-=-=- */
6826
6827/**
6828 * @callback_method_impl{FNRTSTRFORMATTYPE}
6829 */
6830static DECLCALLBACK(size_t) e1kFmtRxDesc(PFNRTSTROUTPUT pfnOutput,
6831 void *pvArgOutput,
6832 const char *pszType,
6833 void const *pvValue,
6834 int cchWidth,
6835 int cchPrecision,
6836 unsigned fFlags,
6837 void *pvUser)
6838{
6839 AssertReturn(strcmp(pszType, "e1krxd") == 0, 0);
6840 E1KRXDESC* pDesc = (E1KRXDESC*)pvValue;
6841 if (!pDesc)
6842 return RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "NULL_RXD");
6843
6844 size_t cbPrintf = 0;
6845 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Address=%16LX Length=%04X Csum=%04X\n",
6846 pDesc->u64BufAddr, pDesc->u16Length, pDesc->u16Checksum);
6847 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, " STA: %s %s %s %s %s %s %s ERR: %s %s %s %s SPECIAL: %s VLAN=%03x PRI=%x",
6848 pDesc->status.fPIF ? "PIF" : "pif",
6849 pDesc->status.fIPCS ? "IPCS" : "ipcs",
6850 pDesc->status.fTCPCS ? "TCPCS" : "tcpcs",
6851 pDesc->status.fVP ? "VP" : "vp",
6852 pDesc->status.fIXSM ? "IXSM" : "ixsm",
6853 pDesc->status.fEOP ? "EOP" : "eop",
6854 pDesc->status.fDD ? "DD" : "dd",
6855 pDesc->status.fRXE ? "RXE" : "rxe",
6856 pDesc->status.fIPE ? "IPE" : "ipe",
6857 pDesc->status.fTCPE ? "TCPE" : "tcpe",
6858 pDesc->status.fCE ? "CE" : "ce",
6859 E1K_SPEC_CFI(pDesc->status.u16Special) ? "CFI" :"cfi",
6860 E1K_SPEC_VLAN(pDesc->status.u16Special),
6861 E1K_SPEC_PRI(pDesc->status.u16Special));
6862 return cbPrintf;
6863}
6864
6865/**
6866 * @callback_method_impl{FNRTSTRFORMATTYPE}
6867 */
6868static DECLCALLBACK(size_t) e1kFmtTxDesc(PFNRTSTROUTPUT pfnOutput,
6869 void *pvArgOutput,
6870 const char *pszType,
6871 void const *pvValue,
6872 int cchWidth,
6873 int cchPrecision,
6874 unsigned fFlags,
6875 void *pvUser)
6876{
6877 AssertReturn(strcmp(pszType, "e1ktxd") == 0, 0);
6878 E1KTXDESC* pDesc = (E1KTXDESC*)pvValue;
6879 if (!pDesc)
6880 return RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "NULL_TXD");
6881
6882 size_t cbPrintf = 0;
6883 switch (e1kGetDescType(pDesc))
6884 {
6885 case E1K_DTYP_CONTEXT:
6886 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Type=Context\n"
6887 " IPCSS=%02X IPCSO=%02X IPCSE=%04X TUCSS=%02X TUCSO=%02X TUCSE=%04X\n"
6888 " TUCMD:%s%s%s %s %s PAYLEN=%04x HDRLEN=%04x MSS=%04x STA: %s",
6889 pDesc->context.ip.u8CSS, pDesc->context.ip.u8CSO, pDesc->context.ip.u16CSE,
6890 pDesc->context.tu.u8CSS, pDesc->context.tu.u8CSO, pDesc->context.tu.u16CSE,
6891 pDesc->context.dw2.fIDE ? " IDE":"",
6892 pDesc->context.dw2.fRS ? " RS" :"",
6893 pDesc->context.dw2.fTSE ? " TSE":"",
6894 pDesc->context.dw2.fIP ? "IPv4":"IPv6",
6895 pDesc->context.dw2.fTCP ? "TCP":"UDP",
6896 pDesc->context.dw2.u20PAYLEN,
6897 pDesc->context.dw3.u8HDRLEN,
6898 pDesc->context.dw3.u16MSS,
6899 pDesc->context.dw3.fDD?"DD":"");
6900 break;
6901 case E1K_DTYP_DATA:
6902 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Type=Data Address=%16LX DTALEN=%05X\n"
6903 " DCMD:%s%s%s%s%s%s%s STA:%s%s%s POPTS:%s%s SPECIAL:%s VLAN=%03x PRI=%x",
6904 pDesc->data.u64BufAddr,
6905 pDesc->data.cmd.u20DTALEN,
6906 pDesc->data.cmd.fIDE ? " IDE" :"",
6907 pDesc->data.cmd.fVLE ? " VLE" :"",
6908 pDesc->data.cmd.fRPS ? " RPS" :"",
6909 pDesc->data.cmd.fRS ? " RS" :"",
6910 pDesc->data.cmd.fTSE ? " TSE" :"",
6911 pDesc->data.cmd.fIFCS? " IFCS":"",
6912 pDesc->data.cmd.fEOP ? " EOP" :"",
6913 pDesc->data.dw3.fDD ? " DD" :"",
6914 pDesc->data.dw3.fEC ? " EC" :"",
6915 pDesc->data.dw3.fLC ? " LC" :"",
6916 pDesc->data.dw3.fTXSM? " TXSM":"",
6917 pDesc->data.dw3.fIXSM? " IXSM":"",
6918 E1K_SPEC_CFI(pDesc->data.dw3.u16Special) ? "CFI" :"cfi",
6919 E1K_SPEC_VLAN(pDesc->data.dw3.u16Special),
6920 E1K_SPEC_PRI(pDesc->data.dw3.u16Special));
6921 break;
6922 case E1K_DTYP_LEGACY:
6923 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Type=Legacy Address=%16LX DTALEN=%05X\n"
6924 " CMD:%s%s%s%s%s%s%s STA:%s%s%s CSO=%02x CSS=%02x SPECIAL:%s VLAN=%03x PRI=%x",
6925 pDesc->data.u64BufAddr,
6926 pDesc->legacy.cmd.u16Length,
6927 pDesc->legacy.cmd.fIDE ? " IDE" :"",
6928 pDesc->legacy.cmd.fVLE ? " VLE" :"",
6929 pDesc->legacy.cmd.fRPS ? " RPS" :"",
6930 pDesc->legacy.cmd.fRS ? " RS" :"",
6931 pDesc->legacy.cmd.fIC ? " IC" :"",
6932 pDesc->legacy.cmd.fIFCS? " IFCS":"",
6933 pDesc->legacy.cmd.fEOP ? " EOP" :"",
6934 pDesc->legacy.dw3.fDD ? " DD" :"",
6935 pDesc->legacy.dw3.fEC ? " EC" :"",
6936 pDesc->legacy.dw3.fLC ? " LC" :"",
6937 pDesc->legacy.cmd.u8CSO,
6938 pDesc->legacy.dw3.u8CSS,
6939 E1K_SPEC_CFI(pDesc->legacy.dw3.u16Special) ? "CFI" :"cfi",
6940 E1K_SPEC_VLAN(pDesc->legacy.dw3.u16Special),
6941 E1K_SPEC_PRI(pDesc->legacy.dw3.u16Special));
6942 break;
6943 default:
6944 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Invalid Transmit Descriptor");
6945 break;
6946 }
6947
6948 return cbPrintf;
6949}
6950
6951/** Initializes debug helpers (logging format types). */
6952static int e1kInitDebugHelpers(void)
6953{
6954 int rc = VINF_SUCCESS;
6955 static bool s_fHelpersRegistered = false;
6956 if (!s_fHelpersRegistered)
6957 {
6958 s_fHelpersRegistered = true;
6959 rc = RTStrFormatTypeRegister("e1krxd", e1kFmtRxDesc, NULL);
6960 AssertRCReturn(rc, rc);
6961 rc = RTStrFormatTypeRegister("e1ktxd", e1kFmtTxDesc, NULL);
6962 AssertRCReturn(rc, rc);
6963 }
6964 return rc;
6965}
6966
6967/**
6968 * Status info callback.
6969 *
6970 * @param pDevIns The device instance.
6971 * @param pHlp The output helpers.
6972 * @param pszArgs The arguments.
6973 */
6974static DECLCALLBACK(void) e1kInfo(PPDMDEVINS pDevIns, PCDBGFINFOHLP pHlp, const char *pszArgs)
6975{
6976 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6977 unsigned i;
6978 // bool fRcvRing = false;
6979 // bool fXmtRing = false;
6980
6981 /*
6982 * Parse args.
6983 if (pszArgs)
6984 {
6985 fRcvRing = strstr(pszArgs, "verbose") || strstr(pszArgs, "rcv");
6986 fXmtRing = strstr(pszArgs, "verbose") || strstr(pszArgs, "xmt");
6987 }
6988 */
6989
6990 /*
6991 * Show info.
6992 */
6993 pHlp->pfnPrintf(pHlp, "E1000 #%d: port=%RTiop mmio=%RGp mac-cfg=%RTmac %s%s%s\n",
6994 pDevIns->iInstance, pThis->IOPortBase, pThis->addrMMReg,
6995 &pThis->macConfigured, g_Chips[pThis->eChip].pcszName,
6996 pThis->fRCEnabled ? " GC" : "", pThis->fR0Enabled ? " R0" : "");
6997
6998 e1kCsEnter(pThis, VERR_INTERNAL_ERROR); /* Not sure why but PCNet does it */
6999
7000 for (i = 0; i < E1K_NUM_OF_32BIT_REGS; ++i)
7001 pHlp->pfnPrintf(pHlp, "%8.8s = %08x\n", g_aE1kRegMap[i].abbrev, pThis->auRegs[i]);
7002
7003 for (i = 0; i < RT_ELEMENTS(pThis->aRecAddr.array); i++)
7004 {
7005 E1KRAELEM* ra = pThis->aRecAddr.array + i;
7006 if (ra->ctl & RA_CTL_AV)
7007 {
7008 const char *pcszTmp;
7009 switch (ra->ctl & RA_CTL_AS)
7010 {
7011 case 0: pcszTmp = "DST"; break;
7012 case 1: pcszTmp = "SRC"; break;
7013 default: pcszTmp = "reserved";
7014 }
7015 pHlp->pfnPrintf(pHlp, "RA%02d: %s %RTmac\n", i, pcszTmp, ra->addr);
7016 }
7017 }
7018 unsigned cDescs = RDLEN / sizeof(E1KRXDESC);
7019 uint32_t rdh = RDH;
7020 pHlp->pfnPrintf(pHlp, "\n-- Receive Descriptors (%d total) --\n", cDescs);
7021 for (i = 0; i < cDescs; ++i)
7022 {
7023 E1KRXDESC desc;
7024 PDMDevHlpPhysRead(pDevIns, e1kDescAddr(RDBAH, RDBAL, i),
7025 &desc, sizeof(desc));
7026 if (i == rdh)
7027 pHlp->pfnPrintf(pHlp, ">>> ");
7028 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1krxd]\n", e1kDescAddr(RDBAH, RDBAL, i), &desc);
7029 }
7030#ifdef E1K_WITH_RXD_CACHE
7031 pHlp->pfnPrintf(pHlp, "\n-- Receive Descriptors in Cache (at %d (RDH %d)/ fetched %d / max %d) --\n",
7032 pThis->iRxDCurrent, RDH, pThis->nRxDFetched, E1K_RXD_CACHE_SIZE);
7033 if (rdh > pThis->iRxDCurrent)
7034 rdh -= pThis->iRxDCurrent;
7035 else
7036 rdh = cDescs + rdh - pThis->iRxDCurrent;
7037 for (i = 0; i < pThis->nRxDFetched; ++i)
7038 {
7039 if (i == pThis->iRxDCurrent)
7040 pHlp->pfnPrintf(pHlp, ">>> ");
7041 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1krxd]\n",
7042 e1kDescAddr(RDBAH, RDBAL, rdh++ % cDescs),
7043 &pThis->aRxDescriptors[i]);
7044 }
7045#endif /* E1K_WITH_RXD_CACHE */
7046
7047 cDescs = TDLEN / sizeof(E1KTXDESC);
7048 uint32_t tdh = TDH;
7049 pHlp->pfnPrintf(pHlp, "\n-- Transmit Descriptors (%d total) --\n", cDescs);
7050 for (i = 0; i < cDescs; ++i)
7051 {
7052 E1KTXDESC desc;
7053 PDMDevHlpPhysRead(pDevIns, e1kDescAddr(TDBAH, TDBAL, i),
7054 &desc, sizeof(desc));
7055 if (i == tdh)
7056 pHlp->pfnPrintf(pHlp, ">>> ");
7057 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1ktxd]\n", e1kDescAddr(TDBAH, TDBAL, i), &desc);
7058 }
7059#ifdef E1K_WITH_TXD_CACHE
7060 pHlp->pfnPrintf(pHlp, "\n-- Transmit Descriptors in Cache (at %d (TDH %d)/ fetched %d / max %d) --\n",
7061 pThis->iTxDCurrent, TDH, pThis->nTxDFetched, E1K_TXD_CACHE_SIZE);
7062 if (tdh > pThis->iTxDCurrent)
7063 tdh -= pThis->iTxDCurrent;
7064 else
7065 tdh = cDescs + tdh - pThis->iTxDCurrent;
7066 for (i = 0; i < pThis->nTxDFetched; ++i)
7067 {
7068 if (i == pThis->iTxDCurrent)
7069 pHlp->pfnPrintf(pHlp, ">>> ");
7070 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1ktxd]\n",
7071 e1kDescAddr(TDBAH, TDBAL, tdh++ % cDescs),
7072 &pThis->aTxDescriptors[i]);
7073 }
7074#endif /* E1K_WITH_TXD_CACHE */
7075
7076
7077#ifdef E1K_INT_STATS
7078 pHlp->pfnPrintf(pHlp, "Interrupt attempts: %d\n", pThis->uStatIntTry);
7079 pHlp->pfnPrintf(pHlp, "Interrupts raised : %d\n", pThis->uStatInt);
7080 pHlp->pfnPrintf(pHlp, "Interrupts lowered: %d\n", pThis->uStatIntLower);
7081 pHlp->pfnPrintf(pHlp, "Interrupts delayed: %d\n", pThis->uStatIntDly);
7082 pHlp->pfnPrintf(pHlp, "Disabled delayed: %d\n", pThis->uStatDisDly);
7083 pHlp->pfnPrintf(pHlp, "Interrupts skipped: %d\n", pThis->uStatIntSkip);
7084 pHlp->pfnPrintf(pHlp, "Masked interrupts : %d\n", pThis->uStatIntMasked);
7085 pHlp->pfnPrintf(pHlp, "Early interrupts : %d\n", pThis->uStatIntEarly);
7086 pHlp->pfnPrintf(pHlp, "Late interrupts : %d\n", pThis->uStatIntLate);
7087 pHlp->pfnPrintf(pHlp, "Lost interrupts : %d\n", pThis->iStatIntLost);
7088 pHlp->pfnPrintf(pHlp, "Interrupts by RX : %d\n", pThis->uStatIntRx);
7089 pHlp->pfnPrintf(pHlp, "Interrupts by TX : %d\n", pThis->uStatIntTx);
7090 pHlp->pfnPrintf(pHlp, "Interrupts by ICS : %d\n", pThis->uStatIntICS);
7091 pHlp->pfnPrintf(pHlp, "Interrupts by RDTR: %d\n", pThis->uStatIntRDTR);
7092 pHlp->pfnPrintf(pHlp, "Interrupts by RDMT: %d\n", pThis->uStatIntRXDMT0);
7093 pHlp->pfnPrintf(pHlp, "Interrupts by TXQE: %d\n", pThis->uStatIntTXQE);
7094 pHlp->pfnPrintf(pHlp, "TX int delay asked: %d\n", pThis->uStatTxIDE);
7095 pHlp->pfnPrintf(pHlp, "TX delayed: %d\n", pThis->uStatTxDelayed);
7096 pHlp->pfnPrintf(pHlp, "TX delayed expired: %d\n", pThis->uStatTxDelayExp);
7097 pHlp->pfnPrintf(pHlp, "TX no report asked: %d\n", pThis->uStatTxNoRS);
7098 pHlp->pfnPrintf(pHlp, "TX abs timer expd : %d\n", pThis->uStatTAD);
7099 pHlp->pfnPrintf(pHlp, "TX int timer expd : %d\n", pThis->uStatTID);
7100 pHlp->pfnPrintf(pHlp, "RX abs timer expd : %d\n", pThis->uStatRAD);
7101 pHlp->pfnPrintf(pHlp, "RX int timer expd : %d\n", pThis->uStatRID);
7102 pHlp->pfnPrintf(pHlp, "TX CTX descriptors: %d\n", pThis->uStatDescCtx);
7103 pHlp->pfnPrintf(pHlp, "TX DAT descriptors: %d\n", pThis->uStatDescDat);
7104 pHlp->pfnPrintf(pHlp, "TX LEG descriptors: %d\n", pThis->uStatDescLeg);
7105 pHlp->pfnPrintf(pHlp, "Received frames : %d\n", pThis->uStatRxFrm);
7106 pHlp->pfnPrintf(pHlp, "Transmitted frames: %d\n", pThis->uStatTxFrm);
7107 pHlp->pfnPrintf(pHlp, "TX frames up to 1514: %d\n", pThis->uStatTx1514);
7108 pHlp->pfnPrintf(pHlp, "TX frames up to 2962: %d\n", pThis->uStatTx2962);
7109 pHlp->pfnPrintf(pHlp, "TX frames up to 4410: %d\n", pThis->uStatTx4410);
7110 pHlp->pfnPrintf(pHlp, "TX frames up to 5858: %d\n", pThis->uStatTx5858);
7111 pHlp->pfnPrintf(pHlp, "TX frames up to 7306: %d\n", pThis->uStatTx7306);
7112 pHlp->pfnPrintf(pHlp, "TX frames up to 8754: %d\n", pThis->uStatTx8754);
7113 pHlp->pfnPrintf(pHlp, "TX frames up to 16384: %d\n", pThis->uStatTx16384);
7114 pHlp->pfnPrintf(pHlp, "TX frames up to 32768: %d\n", pThis->uStatTx32768);
7115 pHlp->pfnPrintf(pHlp, "Larger TX frames : %d\n", pThis->uStatTxLarge);
7116#endif /* E1K_INT_STATS */
7117
7118 e1kCsLeave(pThis);
7119}
7120
7121
7122
7123/* -=-=-=-=- PDMDEVREG -=-=-=-=- */
7124
7125/**
7126 * Detach notification.
7127 *
7128 * One port on the network card has been disconnected from the network.
7129 *
7130 * @param pDevIns The device instance.
7131 * @param iLUN The logical unit which is being detached.
7132 * @param fFlags Flags, combination of the PDMDEVATT_FLAGS_* \#defines.
7133 */
7134static DECLCALLBACK(void) e1kR3Detach(PPDMDEVINS pDevIns, unsigned iLUN, uint32_t fFlags)
7135{
7136 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7137 Log(("%s e1kR3Detach:\n", pThis->szPrf));
7138
7139 AssertLogRelReturnVoid(iLUN == 0);
7140
7141 PDMCritSectEnter(&pThis->cs, VERR_SEM_BUSY);
7142
7143 /** @todo: r=pritesh still need to check if i missed
7144 * to clean something in this function
7145 */
7146
7147 /*
7148 * Zero some important members.
7149 */
7150 pThis->pDrvBase = NULL;
7151 pThis->pDrvR3 = NULL;
7152 pThis->pDrvR0 = NIL_RTR0PTR;
7153 pThis->pDrvRC = NIL_RTRCPTR;
7154
7155 PDMCritSectLeave(&pThis->cs);
7156}
7157
7158/**
7159 * Attach the Network attachment.
7160 *
7161 * One port on the network card has been connected to a network.
7162 *
7163 * @returns VBox status code.
7164 * @param pDevIns The device instance.
7165 * @param iLUN The logical unit which is being attached.
7166 * @param fFlags Flags, combination of the PDMDEVATT_FLAGS_* \#defines.
7167 *
7168 * @remarks This code path is not used during construction.
7169 */
7170static DECLCALLBACK(int) e1kR3Attach(PPDMDEVINS pDevIns, unsigned iLUN, uint32_t fFlags)
7171{
7172 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7173 LogFlow(("%s e1kR3Attach:\n", pThis->szPrf));
7174
7175 AssertLogRelReturn(iLUN == 0, VERR_PDM_NO_SUCH_LUN);
7176
7177 PDMCritSectEnter(&pThis->cs, VERR_SEM_BUSY);
7178
7179 /*
7180 * Attach the driver.
7181 */
7182 int rc = PDMDevHlpDriverAttach(pDevIns, 0, &pThis->IBase, &pThis->pDrvBase, "Network Port");
7183 if (RT_SUCCESS(rc))
7184 {
7185 if (rc == VINF_NAT_DNS)
7186 {
7187#ifdef RT_OS_LINUX
7188 PDMDevHlpVMSetRuntimeError(pDevIns, 0 /*fFlags*/, "NoDNSforNAT",
7189 N_("A Domain Name Server (DNS) for NAT networking could not be determined. Please check your /etc/resolv.conf for <tt>nameserver</tt> entries. Either add one manually (<i>man resolv.conf</i>) or ensure that your host is correctly connected to an ISP. If you ignore this warning the guest will not be able to perform nameserver lookups and it will probably observe delays if trying so"));
7190#else
7191 PDMDevHlpVMSetRuntimeError(pDevIns, 0 /*fFlags*/, "NoDNSforNAT",
7192 N_("A Domain Name Server (DNS) for NAT networking could not be determined. Ensure that your host is correctly connected to an ISP. If you ignore this warning the guest will not be able to perform nameserver lookups and it will probably observe delays if trying so"));
7193#endif
7194 }
7195 pThis->pDrvR3 = PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMINETWORKUP);
7196 AssertMsgStmt(pThis->pDrvR3, ("Failed to obtain the PDMINETWORKUP interface!\n"),
7197 rc = VERR_PDM_MISSING_INTERFACE_BELOW);
7198 if (RT_SUCCESS(rc))
7199 {
7200 PPDMIBASER0 pBaseR0 = PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMIBASER0);
7201 pThis->pDrvR0 = pBaseR0 ? pBaseR0->pfnQueryInterface(pBaseR0, PDMINETWORKUP_IID) : NIL_RTR0PTR;
7202
7203 PPDMIBASERC pBaseRC = PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMIBASERC);
7204 pThis->pDrvRC = pBaseRC ? pBaseRC->pfnQueryInterface(pBaseRC, PDMINETWORKUP_IID) : NIL_RTR0PTR;
7205 }
7206 }
7207 else if ( rc == VERR_PDM_NO_ATTACHED_DRIVER
7208 || rc == VERR_PDM_CFG_MISSING_DRIVER_NAME)
7209 {
7210 /* This should never happen because this function is not called
7211 * if there is no driver to attach! */
7212 Log(("%s No attached driver!\n", pThis->szPrf));
7213 }
7214
7215 /*
7216 * Temporary set the link down if it was up so that the guest
7217 * will know that we have change the configuration of the
7218 * network card
7219 */
7220 if ((STATUS & STATUS_LU) && RT_SUCCESS(rc))
7221 e1kR3LinkDownTemp(pThis);
7222
7223 PDMCritSectLeave(&pThis->cs);
7224 return rc;
7225
7226}
7227
7228/**
7229 * @copydoc FNPDMDEVPOWEROFF
7230 */
7231static DECLCALLBACK(void) e1kR3PowerOff(PPDMDEVINS pDevIns)
7232{
7233 /* Poke thread waiting for buffer space. */
7234 e1kWakeupReceive(pDevIns);
7235}
7236
7237/**
7238 * @copydoc FNPDMDEVRESET
7239 */
7240static DECLCALLBACK(void) e1kR3Reset(PPDMDEVINS pDevIns)
7241{
7242 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7243#ifdef E1K_TX_DELAY
7244 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTXDTimer));
7245#endif /* E1K_TX_DELAY */
7246 e1kCancelTimer(pThis, pThis->CTX_SUFF(pIntTimer));
7247 e1kCancelTimer(pThis, pThis->CTX_SUFF(pLUTimer));
7248 e1kXmitFreeBuf(pThis);
7249 pThis->u16TxPktLen = 0;
7250 pThis->fIPcsum = false;
7251 pThis->fTCPcsum = false;
7252 pThis->fIntMaskUsed = false;
7253 pThis->fDelayInts = false;
7254 pThis->fLocked = false;
7255 pThis->u64AckedAt = 0;
7256 e1kHardReset(pThis);
7257}
7258
7259/**
7260 * @copydoc FNPDMDEVSUSPEND
7261 */
7262static DECLCALLBACK(void) e1kR3Suspend(PPDMDEVINS pDevIns)
7263{
7264 /* Poke thread waiting for buffer space. */
7265 e1kWakeupReceive(pDevIns);
7266}
7267
7268/**
7269 * Device relocation callback.
7270 *
7271 * When this callback is called the device instance data, and if the
7272 * device have a GC component, is being relocated, or/and the selectors
7273 * have been changed. The device must use the chance to perform the
7274 * necessary pointer relocations and data updates.
7275 *
7276 * Before the GC code is executed the first time, this function will be
7277 * called with a 0 delta so GC pointer calculations can be one in one place.
7278 *
7279 * @param pDevIns Pointer to the device instance.
7280 * @param offDelta The relocation delta relative to the old location.
7281 *
7282 * @remark A relocation CANNOT fail.
7283 */
7284static DECLCALLBACK(void) e1kR3Relocate(PPDMDEVINS pDevIns, RTGCINTPTR offDelta)
7285{
7286 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7287 pThis->pDevInsRC = PDMDEVINS_2_RCPTR(pDevIns);
7288 pThis->pTxQueueRC = PDMQueueRCPtr(pThis->pTxQueueR3);
7289 pThis->pCanRxQueueRC = PDMQueueRCPtr(pThis->pCanRxQueueR3);
7290#ifdef E1K_USE_RX_TIMERS
7291 pThis->pRIDTimerRC = TMTimerRCPtr(pThis->pRIDTimerR3);
7292 pThis->pRADTimerRC = TMTimerRCPtr(pThis->pRADTimerR3);
7293#endif /* E1K_USE_RX_TIMERS */
7294#ifdef E1K_USE_TX_TIMERS
7295 pThis->pTIDTimerRC = TMTimerRCPtr(pThis->pTIDTimerR3);
7296# ifndef E1K_NO_TAD
7297 pThis->pTADTimerRC = TMTimerRCPtr(pThis->pTADTimerR3);
7298# endif /* E1K_NO_TAD */
7299#endif /* E1K_USE_TX_TIMERS */
7300#ifdef E1K_TX_DELAY
7301 pThis->pTXDTimerRC = TMTimerRCPtr(pThis->pTXDTimerR3);
7302#endif /* E1K_TX_DELAY */
7303 pThis->pIntTimerRC = TMTimerRCPtr(pThis->pIntTimerR3);
7304 pThis->pLUTimerRC = TMTimerRCPtr(pThis->pLUTimerR3);
7305}
7306
7307/**
7308 * Destruct a device instance.
7309 *
7310 * We need to free non-VM resources only.
7311 *
7312 * @returns VBox status.
7313 * @param pDevIns The device instance data.
7314 * @thread EMT
7315 */
7316static DECLCALLBACK(int) e1kR3Destruct(PPDMDEVINS pDevIns)
7317{
7318 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7319 PDMDEV_CHECK_VERSIONS_RETURN_QUIET(pDevIns);
7320
7321 e1kDumpState(pThis);
7322 E1kLog(("%s Destroying instance\n", pThis->szPrf));
7323 if (PDMCritSectIsInitialized(&pThis->cs))
7324 {
7325 if (pThis->hEventMoreRxDescAvail != NIL_RTSEMEVENT)
7326 {
7327 RTSemEventSignal(pThis->hEventMoreRxDescAvail);
7328 RTSemEventDestroy(pThis->hEventMoreRxDescAvail);
7329 pThis->hEventMoreRxDescAvail = NIL_RTSEMEVENT;
7330 }
7331#ifdef E1K_WITH_TX_CS
7332 PDMR3CritSectDelete(&pThis->csTx);
7333#endif /* E1K_WITH_TX_CS */
7334 PDMR3CritSectDelete(&pThis->csRx);
7335 PDMR3CritSectDelete(&pThis->cs);
7336 }
7337 return VINF_SUCCESS;
7338}
7339
7340
7341/**
7342 * Set PCI configuration space registers.
7343 *
7344 * @param pci Reference to PCI device structure.
7345 * @thread EMT
7346 */
7347static DECLCALLBACK(void) e1kConfigurePciDev(PPCIDEVICE pPciDev, E1KCHIP eChip)
7348{
7349 Assert(eChip < RT_ELEMENTS(g_Chips));
7350 /* Configure PCI Device, assume 32-bit mode ******************************/
7351 PCIDevSetVendorId(pPciDev, g_Chips[eChip].uPCIVendorId);
7352 PCIDevSetDeviceId(pPciDev, g_Chips[eChip].uPCIDeviceId);
7353 PCIDevSetWord( pPciDev, VBOX_PCI_SUBSYSTEM_VENDOR_ID, g_Chips[eChip].uPCISubsystemVendorId);
7354 PCIDevSetWord( pPciDev, VBOX_PCI_SUBSYSTEM_ID, g_Chips[eChip].uPCISubsystemId);
7355
7356 PCIDevSetWord( pPciDev, VBOX_PCI_COMMAND, 0x0000);
7357 /* DEVSEL Timing (medium device), 66 MHz Capable, New capabilities */
7358 PCIDevSetWord( pPciDev, VBOX_PCI_STATUS,
7359 VBOX_PCI_STATUS_DEVSEL_MEDIUM | VBOX_PCI_STATUS_CAP_LIST | VBOX_PCI_STATUS_66MHZ);
7360 /* Stepping A2 */
7361 PCIDevSetByte( pPciDev, VBOX_PCI_REVISION_ID, 0x02);
7362 /* Ethernet adapter */
7363 PCIDevSetByte( pPciDev, VBOX_PCI_CLASS_PROG, 0x00);
7364 PCIDevSetWord( pPciDev, VBOX_PCI_CLASS_DEVICE, 0x0200);
7365 /* normal single function Ethernet controller */
7366 PCIDevSetByte( pPciDev, VBOX_PCI_HEADER_TYPE, 0x00);
7367 /* Memory Register Base Address */
7368 PCIDevSetDWord(pPciDev, VBOX_PCI_BASE_ADDRESS_0, 0x00000000);
7369 /* Memory Flash Base Address */
7370 PCIDevSetDWord(pPciDev, VBOX_PCI_BASE_ADDRESS_1, 0x00000000);
7371 /* IO Register Base Address */
7372 PCIDevSetDWord(pPciDev, VBOX_PCI_BASE_ADDRESS_2, 0x00000001);
7373 /* Expansion ROM Base Address */
7374 PCIDevSetDWord(pPciDev, VBOX_PCI_ROM_ADDRESS, 0x00000000);
7375 /* Capabilities Pointer */
7376 PCIDevSetByte( pPciDev, VBOX_PCI_CAPABILITY_LIST, 0xDC);
7377 /* Interrupt Pin: INTA# */
7378 PCIDevSetByte( pPciDev, VBOX_PCI_INTERRUPT_PIN, 0x01);
7379 /* Max_Lat/Min_Gnt: very high priority and time slice */
7380 PCIDevSetByte( pPciDev, VBOX_PCI_MIN_GNT, 0xFF);
7381 PCIDevSetByte( pPciDev, VBOX_PCI_MAX_LAT, 0x00);
7382
7383 /* PCI Power Management Registers ****************************************/
7384 /* Capability ID: PCI Power Management Registers */
7385 PCIDevSetByte( pPciDev, 0xDC, VBOX_PCI_CAP_ID_PM);
7386 /* Next Item Pointer: PCI-X */
7387 PCIDevSetByte( pPciDev, 0xDC + 1, 0xE4);
7388 /* Power Management Capabilities: PM disabled, DSI */
7389 PCIDevSetWord( pPciDev, 0xDC + 2,
7390 0x0002 | VBOX_PCI_PM_CAP_DSI);
7391 /* Power Management Control / Status Register: PM disabled */
7392 PCIDevSetWord( pPciDev, 0xDC + 4, 0x0000);
7393 /* PMCSR_BSE Bridge Support Extensions: Not supported */
7394 PCIDevSetByte( pPciDev, 0xDC + 6, 0x00);
7395 /* Data Register: PM disabled, always 0 */
7396 PCIDevSetByte( pPciDev, 0xDC + 7, 0x00);
7397
7398 /* PCI-X Configuration Registers *****************************************/
7399 /* Capability ID: PCI-X Configuration Registers */
7400 PCIDevSetByte( pPciDev, 0xE4, VBOX_PCI_CAP_ID_PCIX);
7401#ifdef E1K_WITH_MSI
7402 PCIDevSetByte( pPciDev, 0xE4 + 1, 0x80);
7403#else
7404 /* Next Item Pointer: None (Message Signalled Interrupts are disabled) */
7405 PCIDevSetByte( pPciDev, 0xE4 + 1, 0x00);
7406#endif
7407 /* PCI-X Command: Enable Relaxed Ordering */
7408 PCIDevSetWord( pPciDev, 0xE4 + 2, VBOX_PCI_X_CMD_ERO);
7409 /* PCI-X Status: 32-bit, 66MHz*/
7410 /** @todo is this value really correct? fff8 doesn't look like actual PCI address */
7411 PCIDevSetDWord(pPciDev, 0xE4 + 4, 0x0040FFF8);
7412}
7413
7414/**
7415 * @interface_method_impl{PDMDEVREG,pfnConstruct}
7416 */
7417static DECLCALLBACK(int) e1kR3Construct(PPDMDEVINS pDevIns, int iInstance, PCFGMNODE pCfg)
7418{
7419 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7420 int rc;
7421 PDMDEV_CHECK_VERSIONS_RETURN(pDevIns);
7422
7423 /*
7424 * Initialize the instance data (state).
7425 * Note! Caller has initialized it to ZERO already.
7426 */
7427 RTStrPrintf(pThis->szPrf, sizeof(pThis->szPrf), "E1000#%d", iInstance);
7428 E1kLog(("%s Constructing new instance sizeof(E1KRXDESC)=%d\n", pThis->szPrf, sizeof(E1KRXDESC)));
7429 pThis->hEventMoreRxDescAvail = NIL_RTSEMEVENT;
7430 pThis->pDevInsR3 = pDevIns;
7431 pThis->pDevInsR0 = PDMDEVINS_2_R0PTR(pDevIns);
7432 pThis->pDevInsRC = PDMDEVINS_2_RCPTR(pDevIns);
7433 pThis->u16TxPktLen = 0;
7434 pThis->fIPcsum = false;
7435 pThis->fTCPcsum = false;
7436 pThis->fIntMaskUsed = false;
7437 pThis->fDelayInts = false;
7438 pThis->fLocked = false;
7439 pThis->u64AckedAt = 0;
7440 pThis->led.u32Magic = PDMLED_MAGIC;
7441 pThis->u32PktNo = 1;
7442
7443 /* Interfaces */
7444 pThis->IBase.pfnQueryInterface = e1kR3QueryInterface;
7445
7446 pThis->INetworkDown.pfnWaitReceiveAvail = e1kR3NetworkDown_WaitReceiveAvail;
7447 pThis->INetworkDown.pfnReceive = e1kR3NetworkDown_Receive;
7448 pThis->INetworkDown.pfnXmitPending = e1kR3NetworkDown_XmitPending;
7449
7450 pThis->ILeds.pfnQueryStatusLed = e1kR3QueryStatusLed;
7451
7452 pThis->INetworkConfig.pfnGetMac = e1kR3GetMac;
7453 pThis->INetworkConfig.pfnGetLinkState = e1kR3GetLinkState;
7454 pThis->INetworkConfig.pfnSetLinkState = e1kR3SetLinkState;
7455
7456 /*
7457 * Internal validations.
7458 */
7459 for (uint32_t iReg = 1; iReg < E1K_NUM_OF_BINARY_SEARCHABLE; iReg++)
7460 AssertLogRelMsgReturn( g_aE1kRegMap[iReg].offset > g_aE1kRegMap[iReg - 1].offset
7461 && g_aE1kRegMap[iReg].offset + g_aE1kRegMap[iReg].size
7462 >= g_aE1kRegMap[iReg - 1].offset + g_aE1kRegMap[iReg - 1].size,
7463 ("%s@%#xLB%#x vs %s@%#xLB%#x\n",
7464 g_aE1kRegMap[iReg].abbrev, g_aE1kRegMap[iReg].offset, g_aE1kRegMap[iReg].size,
7465 g_aE1kRegMap[iReg - 1].abbrev, g_aE1kRegMap[iReg - 1].offset, g_aE1kRegMap[iReg - 1].size),
7466 VERR_INTERNAL_ERROR_4);
7467
7468 /*
7469 * Validate configuration.
7470 */
7471 if (!CFGMR3AreValuesValid(pCfg, "MAC\0" "CableConnected\0" "AdapterType\0"
7472 "LineSpeed\0" "GCEnabled\0" "R0Enabled\0"
7473 "EthernetCRC\0" "GSOEnabled\0" "LinkUpDelay\0"))
7474 return PDMDEV_SET_ERROR(pDevIns, VERR_PDM_DEVINS_UNKNOWN_CFG_VALUES,
7475 N_("Invalid configuration for E1000 device"));
7476
7477 /** @todo: LineSpeed unused! */
7478
7479 pThis->fR0Enabled = true;
7480 pThis->fRCEnabled = true;
7481 pThis->fEthernetCRC = true;
7482 pThis->fGSOEnabled = true;
7483
7484 /* Get config params */
7485 rc = CFGMR3QueryBytes(pCfg, "MAC", pThis->macConfigured.au8, sizeof(pThis->macConfigured.au8));
7486 if (RT_FAILURE(rc))
7487 return PDMDEV_SET_ERROR(pDevIns, rc,
7488 N_("Configuration error: Failed to get MAC address"));
7489 rc = CFGMR3QueryBool(pCfg, "CableConnected", &pThis->fCableConnected);
7490 if (RT_FAILURE(rc))
7491 return PDMDEV_SET_ERROR(pDevIns, rc,
7492 N_("Configuration error: Failed to get the value of 'CableConnected'"));
7493 rc = CFGMR3QueryU32(pCfg, "AdapterType", (uint32_t*)&pThis->eChip);
7494 if (RT_FAILURE(rc))
7495 return PDMDEV_SET_ERROR(pDevIns, rc,
7496 N_("Configuration error: Failed to get the value of 'AdapterType'"));
7497 Assert(pThis->eChip <= E1K_CHIP_82545EM);
7498 rc = CFGMR3QueryBoolDef(pCfg, "GCEnabled", &pThis->fRCEnabled, true);
7499 if (RT_FAILURE(rc))
7500 return PDMDEV_SET_ERROR(pDevIns, rc,
7501 N_("Configuration error: Failed to get the value of 'GCEnabled'"));
7502
7503 rc = CFGMR3QueryBoolDef(pCfg, "R0Enabled", &pThis->fR0Enabled, true);
7504 if (RT_FAILURE(rc))
7505 return PDMDEV_SET_ERROR(pDevIns, rc,
7506 N_("Configuration error: Failed to get the value of 'R0Enabled'"));
7507
7508 rc = CFGMR3QueryBoolDef(pCfg, "EthernetCRC", &pThis->fEthernetCRC, true);
7509 if (RT_FAILURE(rc))
7510 return PDMDEV_SET_ERROR(pDevIns, rc,
7511 N_("Configuration error: Failed to get the value of 'EthernetCRC'"));
7512
7513 rc = CFGMR3QueryBoolDef(pCfg, "GSOEnabled", &pThis->fGSOEnabled, true);
7514 if (RT_FAILURE(rc))
7515 return PDMDEV_SET_ERROR(pDevIns, rc,
7516 N_("Configuration error: Failed to get the value of 'GSOEnabled'"));
7517
7518 rc = CFGMR3QueryU32Def(pCfg, "LinkUpDelay", (uint32_t*)&pThis->cMsLinkUpDelay, 5000); /* ms */
7519 if (RT_FAILURE(rc))
7520 return PDMDEV_SET_ERROR(pDevIns, rc,
7521 N_("Configuration error: Failed to get the value of 'LinkUpDelay'"));
7522 Assert(pThis->cMsLinkUpDelay <= 300000); /* less than 5 minutes */
7523 if (pThis->cMsLinkUpDelay > 5000)
7524 LogRel(("%s WARNING! Link up delay is set to %u seconds!\n", pThis->szPrf, pThis->cMsLinkUpDelay / 1000));
7525 else if (pThis->cMsLinkUpDelay == 0)
7526 LogRel(("%s WARNING! Link up delay is disabled!\n", pThis->szPrf));
7527
7528 E1kLog(("%s Chip=%s LinkUpDelay=%ums EthernetCRC=%s GSO=%s R0=%s GC=%s\n", pThis->szPrf,
7529 g_Chips[pThis->eChip].pcszName, pThis->cMsLinkUpDelay,
7530 pThis->fEthernetCRC ? "on" : "off",
7531 pThis->fGSOEnabled ? "enabled" : "disabled",
7532 pThis->fR0Enabled ? "enabled" : "disabled",
7533 pThis->fRCEnabled ? "enabled" : "disabled"));
7534
7535 /* Initialize the EEPROM. */
7536 pThis->eeprom.init(pThis->macConfigured);
7537
7538 /* Initialize internal PHY. */
7539 Phy::init(&pThis->phy, iInstance, pThis->eChip == E1K_CHIP_82543GC ? PHY_EPID_M881000 : PHY_EPID_M881011);
7540 Phy::setLinkStatus(&pThis->phy, pThis->fCableConnected);
7541
7542 /* Initialize critical sections. We do our own locking. */
7543 rc = PDMDevHlpSetDeviceCritSect(pDevIns, PDMDevHlpCritSectGetNop(pDevIns));
7544 AssertRCReturn(rc, rc);
7545
7546 rc = PDMDevHlpCritSectInit(pDevIns, &pThis->cs, RT_SRC_POS, "E1000#%d", iInstance);
7547 if (RT_FAILURE(rc))
7548 return rc;
7549 rc = PDMDevHlpCritSectInit(pDevIns, &pThis->csRx, RT_SRC_POS, "E1000#%dRX", iInstance);
7550 if (RT_FAILURE(rc))
7551 return rc;
7552#ifdef E1K_WITH_TX_CS
7553 rc = PDMDevHlpCritSectInit(pDevIns, &pThis->csTx, RT_SRC_POS, "E1000#%dTX", iInstance);
7554 if (RT_FAILURE(rc))
7555 return rc;
7556#endif /* E1K_WITH_TX_CS */
7557
7558 /* Saved state registration. */
7559 rc = PDMDevHlpSSMRegisterEx(pDevIns, E1K_SAVEDSTATE_VERSION, sizeof(E1KSTATE), NULL,
7560 NULL, e1kLiveExec, NULL,
7561 e1kSavePrep, e1kSaveExec, NULL,
7562 e1kLoadPrep, e1kLoadExec, e1kLoadDone);
7563 if (RT_FAILURE(rc))
7564 return rc;
7565
7566 /* Set PCI config registers and register ourselves with the PCI bus. */
7567 e1kConfigurePciDev(&pThis->pciDevice, pThis->eChip);
7568 rc = PDMDevHlpPCIRegister(pDevIns, &pThis->pciDevice);
7569 if (RT_FAILURE(rc))
7570 return rc;
7571
7572#ifdef E1K_WITH_MSI
7573 PDMMSIREG MsiReg;
7574 RT_ZERO(MsiReg);
7575 MsiReg.cMsiVectors = 1;
7576 MsiReg.iMsiCapOffset = 0x80;
7577 MsiReg.iMsiNextOffset = 0x0;
7578 MsiReg.fMsi64bit = false;
7579 rc = PDMDevHlpPCIRegisterMsi(pDevIns, &MsiReg);
7580 AssertRCReturn(rc, rc);
7581#endif
7582
7583
7584 /* Map our registers to memory space (region 0, see e1kConfigurePCI)*/
7585 rc = PDMDevHlpPCIIORegionRegister(pDevIns, 0, E1K_MM_SIZE, PCI_ADDRESS_SPACE_MEM, e1kMap);
7586 if (RT_FAILURE(rc))
7587 return rc;
7588 /* Map our registers to IO space (region 2, see e1kConfigurePCI) */
7589 rc = PDMDevHlpPCIIORegionRegister(pDevIns, 2, E1K_IOPORT_SIZE, PCI_ADDRESS_SPACE_IO, e1kMap);
7590 if (RT_FAILURE(rc))
7591 return rc;
7592
7593 /* Create transmit queue */
7594 rc = PDMDevHlpQueueCreate(pDevIns, sizeof(PDMQUEUEITEMCORE), 1, 0,
7595 e1kTxQueueConsumer, true, "E1000-Xmit", &pThis->pTxQueueR3);
7596 if (RT_FAILURE(rc))
7597 return rc;
7598 pThis->pTxQueueR0 = PDMQueueR0Ptr(pThis->pTxQueueR3);
7599 pThis->pTxQueueRC = PDMQueueRCPtr(pThis->pTxQueueR3);
7600
7601 /* Create the RX notifier signaller. */
7602 rc = PDMDevHlpQueueCreate(pDevIns, sizeof(PDMQUEUEITEMCORE), 1, 0,
7603 e1kCanRxQueueConsumer, true, "E1000-Rcv", &pThis->pCanRxQueueR3);
7604 if (RT_FAILURE(rc))
7605 return rc;
7606 pThis->pCanRxQueueR0 = PDMQueueR0Ptr(pThis->pCanRxQueueR3);
7607 pThis->pCanRxQueueRC = PDMQueueRCPtr(pThis->pCanRxQueueR3);
7608
7609#ifdef E1K_TX_DELAY
7610 /* Create Transmit Delay Timer */
7611 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kTxDelayTimer, pThis,
7612 TMTIMER_FLAGS_NO_CRIT_SECT,
7613 "E1000 Transmit Delay Timer", &pThis->pTXDTimerR3);
7614 if (RT_FAILURE(rc))
7615 return rc;
7616 pThis->pTXDTimerR0 = TMTimerR0Ptr(pThis->pTXDTimerR3);
7617 pThis->pTXDTimerRC = TMTimerRCPtr(pThis->pTXDTimerR3);
7618 TMR3TimerSetCritSect(pThis->pTXDTimerR3, &pThis->csTx);
7619#endif /* E1K_TX_DELAY */
7620
7621#ifdef E1K_USE_TX_TIMERS
7622 /* Create Transmit Interrupt Delay Timer */
7623 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kTxIntDelayTimer, pThis,
7624 TMTIMER_FLAGS_NO_CRIT_SECT,
7625 "E1000 Transmit Interrupt Delay Timer", &pThis->pTIDTimerR3);
7626 if (RT_FAILURE(rc))
7627 return rc;
7628 pThis->pTIDTimerR0 = TMTimerR0Ptr(pThis->pTIDTimerR3);
7629 pThis->pTIDTimerRC = TMTimerRCPtr(pThis->pTIDTimerR3);
7630
7631# ifndef E1K_NO_TAD
7632 /* Create Transmit Absolute Delay Timer */
7633 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kTxAbsDelayTimer, pThis,
7634 TMTIMER_FLAGS_NO_CRIT_SECT,
7635 "E1000 Transmit Absolute Delay Timer", &pThis->pTADTimerR3);
7636 if (RT_FAILURE(rc))
7637 return rc;
7638 pThis->pTADTimerR0 = TMTimerR0Ptr(pThis->pTADTimerR3);
7639 pThis->pTADTimerRC = TMTimerRCPtr(pThis->pTADTimerR3);
7640# endif /* E1K_NO_TAD */
7641#endif /* E1K_USE_TX_TIMERS */
7642
7643#ifdef E1K_USE_RX_TIMERS
7644 /* Create Receive Interrupt Delay Timer */
7645 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kRxIntDelayTimer, pThis,
7646 TMTIMER_FLAGS_NO_CRIT_SECT,
7647 "E1000 Receive Interrupt Delay Timer", &pThis->pRIDTimerR3);
7648 if (RT_FAILURE(rc))
7649 return rc;
7650 pThis->pRIDTimerR0 = TMTimerR0Ptr(pThis->pRIDTimerR3);
7651 pThis->pRIDTimerRC = TMTimerRCPtr(pThis->pRIDTimerR3);
7652
7653 /* Create Receive Absolute Delay Timer */
7654 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kRxAbsDelayTimer, pThis,
7655 TMTIMER_FLAGS_NO_CRIT_SECT,
7656 "E1000 Receive Absolute Delay Timer", &pThis->pRADTimerR3);
7657 if (RT_FAILURE(rc))
7658 return rc;
7659 pThis->pRADTimerR0 = TMTimerR0Ptr(pThis->pRADTimerR3);
7660 pThis->pRADTimerRC = TMTimerRCPtr(pThis->pRADTimerR3);
7661#endif /* E1K_USE_RX_TIMERS */
7662
7663 /* Create Late Interrupt Timer */
7664 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kLateIntTimer, pThis,
7665 TMTIMER_FLAGS_NO_CRIT_SECT,
7666 "E1000 Late Interrupt Timer", &pThis->pIntTimerR3);
7667 if (RT_FAILURE(rc))
7668 return rc;
7669 pThis->pIntTimerR0 = TMTimerR0Ptr(pThis->pIntTimerR3);
7670 pThis->pIntTimerRC = TMTimerRCPtr(pThis->pIntTimerR3);
7671
7672 /* Create Link Up Timer */
7673 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kLinkUpTimer, pThis,
7674 TMTIMER_FLAGS_NO_CRIT_SECT,
7675 "E1000 Link Up Timer", &pThis->pLUTimerR3);
7676 if (RT_FAILURE(rc))
7677 return rc;
7678 pThis->pLUTimerR0 = TMTimerR0Ptr(pThis->pLUTimerR3);
7679 pThis->pLUTimerRC = TMTimerRCPtr(pThis->pLUTimerR3);
7680
7681 /* Register the info item */
7682 char szTmp[20];
7683 RTStrPrintf(szTmp, sizeof(szTmp), "e1k%d", iInstance);
7684 PDMDevHlpDBGFInfoRegister(pDevIns, szTmp, "E1000 info.", e1kInfo);
7685
7686 /* Status driver */
7687 PPDMIBASE pBase;
7688 rc = PDMDevHlpDriverAttach(pDevIns, PDM_STATUS_LUN, &pThis->IBase, &pBase, "Status Port");
7689 if (RT_FAILURE(rc))
7690 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to attach the status LUN"));
7691 pThis->pLedsConnector = PDMIBASE_QUERY_INTERFACE(pBase, PDMILEDCONNECTORS);
7692
7693 /* Network driver */
7694 rc = PDMDevHlpDriverAttach(pDevIns, 0, &pThis->IBase, &pThis->pDrvBase, "Network Port");
7695 if (RT_SUCCESS(rc))
7696 {
7697 if (rc == VINF_NAT_DNS)
7698 PDMDevHlpVMSetRuntimeError(pDevIns, 0 /*fFlags*/, "NoDNSforNAT",
7699 N_("A Domain Name Server (DNS) for NAT networking could not be determined. Ensure that your host is correctly connected to an ISP. If you ignore this warning the guest will not be able to perform nameserver lookups and it will probably observe delays if trying so"));
7700 pThis->pDrvR3 = PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMINETWORKUP);
7701 AssertMsgReturn(pThis->pDrvR3, ("Failed to obtain the PDMINETWORKUP interface!\n"), VERR_PDM_MISSING_INTERFACE_BELOW);
7702
7703 pThis->pDrvR0 = PDMIBASER0_QUERY_INTERFACE(PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMIBASER0), PDMINETWORKUP);
7704 pThis->pDrvRC = PDMIBASERC_QUERY_INTERFACE(PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMIBASERC), PDMINETWORKUP);
7705 }
7706 else if ( rc == VERR_PDM_NO_ATTACHED_DRIVER
7707 || rc == VERR_PDM_CFG_MISSING_DRIVER_NAME)
7708 {
7709 /* No error! */
7710 E1kLog(("%s This adapter is not attached to any network!\n", pThis->szPrf));
7711 }
7712 else
7713 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to attach the network LUN"));
7714
7715 rc = RTSemEventCreate(&pThis->hEventMoreRxDescAvail);
7716 if (RT_FAILURE(rc))
7717 return rc;
7718
7719 rc = e1kInitDebugHelpers();
7720 if (RT_FAILURE(rc))
7721 return rc;
7722
7723 e1kHardReset(pThis);
7724
7725 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data received", "/Public/Net/E1k%u/BytesReceived", iInstance);
7726 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data transmitted", "/Public/Net/E1k%u/BytesTransmitted", iInstance);
7727
7728 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data received", "/Devices/E1k%d/ReceiveBytes", iInstance);
7729 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data transmitted", "/Devices/E1k%d/TransmitBytes", iInstance);
7730
7731#if defined(VBOX_WITH_STATISTICS)
7732 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMMIOReadRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO reads in RZ", "/Devices/E1k%d/MMIO/ReadRZ", iInstance);
7733 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMMIOReadR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO reads in R3", "/Devices/E1k%d/MMIO/ReadR3", iInstance);
7734 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMMIOWriteRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO writes in RZ", "/Devices/E1k%d/MMIO/WriteRZ", iInstance);
7735 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMMIOWriteR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO writes in R3", "/Devices/E1k%d/MMIO/WriteR3", iInstance);
7736 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatEEPROMRead, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling EEPROM reads", "/Devices/E1k%d/EEPROM/Read", iInstance);
7737 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatEEPROMWrite, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling EEPROM writes", "/Devices/E1k%d/EEPROM/Write", iInstance);
7738 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIOReadRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in RZ", "/Devices/E1k%d/IO/ReadRZ", iInstance);
7739 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIOReadR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in R3", "/Devices/E1k%d/IO/ReadR3", iInstance);
7740 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIOWriteRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in RZ", "/Devices/E1k%d/IO/WriteRZ", iInstance);
7741 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIOWriteR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in R3", "/Devices/E1k%d/IO/WriteR3", iInstance);
7742 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatLateIntTimer, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling late int timer", "/Devices/E1k%d/LateInt/Timer", iInstance);
7743 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatLateInts, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of late interrupts", "/Devices/E1k%d/LateInt/Occured", iInstance);
7744 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIntsRaised, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of raised interrupts", "/Devices/E1k%d/Interrupts/Raised", iInstance);
7745 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIntsPrevented, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of prevented interrupts", "/Devices/E1k%d/Interrupts/Prevented", iInstance);
7746 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceive, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive", "/Devices/E1k%d/Receive/Total", iInstance);
7747 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveCRC, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive checksumming", "/Devices/E1k%d/Receive/CRC", iInstance);
7748 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveFilter, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive filtering", "/Devices/E1k%d/Receive/Filter", iInstance);
7749 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveStore, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive storing", "/Devices/E1k%d/Receive/Store", iInstance);
7750 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatRxOverflow, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_OCCURENCE, "Profiling RX overflows", "/Devices/E1k%d/RxOverflow", iInstance);
7751 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatRxOverflowWakeup, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Nr of RX overflow wakeups", "/Devices/E1k%d/RxOverflowWakeup", iInstance);
7752 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling transmits in RZ", "/Devices/E1k%d/Transmit/TotalRZ", iInstance);
7753 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling transmits in R3", "/Devices/E1k%d/Transmit/TotalR3", iInstance);
7754 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitSendRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling send transmit in RZ", "/Devices/E1k%d/Transmit/SendRZ", iInstance);
7755 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitSendR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling send transmit in R3", "/Devices/E1k%d/Transmit/SendR3", iInstance);
7756
7757 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescCtxNormal, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of normal context descriptors","/Devices/E1k%d/TxDesc/ContexNormal", iInstance);
7758 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescCtxTSE, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TSE context descriptors", "/Devices/E1k%d/TxDesc/ContextTSE", iInstance);
7759 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescData, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TX data descriptors", "/Devices/E1k%d/TxDesc/Data", iInstance);
7760 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescLegacy, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TX legacy descriptors", "/Devices/E1k%d/TxDesc/Legacy", iInstance);
7761 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescTSEData, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TX TSE data descriptors", "/Devices/E1k%d/TxDesc/TSEData", iInstance);
7762 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxPathFallback, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Fallback TSE descriptor path", "/Devices/E1k%d/TxPath/Fallback", iInstance);
7763 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxPathGSO, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "GSO TSE descriptor path", "/Devices/E1k%d/TxPath/GSO", iInstance);
7764 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxPathRegular, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Regular descriptor path", "/Devices/E1k%d/TxPath/Normal", iInstance);
7765 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatPHYAccesses, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of PHY accesses", "/Devices/E1k%d/PHYAccesses", iInstance);
7766 for (unsigned iReg = 0; iReg < E1K_NUM_OF_REGS; iReg++)
7767 {
7768 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->aStatRegReads[iReg], STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
7769 g_aE1kRegMap[iReg].name, "/Devices/E1k%d/Regs/%s-Reads", iInstance, g_aE1kRegMap[iReg].abbrev);
7770 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->aStatRegWrites[iReg], STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
7771 g_aE1kRegMap[iReg].name, "/Devices/E1k%d/Regs/%s-Writes", iInstance, g_aE1kRegMap[iReg].abbrev);
7772 }
7773#endif /* VBOX_WITH_STATISTICS */
7774
7775#ifdef E1K_INT_STATS
7776 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->u64ArmedAt, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "u64ArmedAt", "/Devices/E1k%d/u64ArmedAt", iInstance);
7777 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatMaxTxDelay, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatMaxTxDelay", "/Devices/E1k%d/uStatMaxTxDelay", iInstance);
7778 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatInt, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatInt", "/Devices/E1k%d/uStatInt", iInstance);
7779 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntTry, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntTry", "/Devices/E1k%d/uStatIntTry", iInstance);
7780 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntLower, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntLower", "/Devices/E1k%d/uStatIntLower", iInstance);
7781 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntDly, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntDly", "/Devices/E1k%d/uStatIntDly", iInstance);
7782 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->iStatIntLost, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "iStatIntLost", "/Devices/E1k%d/iStatIntLost", iInstance);
7783 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->iStatIntLostOne, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "iStatIntLostOne", "/Devices/E1k%d/iStatIntLostOne", iInstance);
7784 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatDisDly, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatDisDly", "/Devices/E1k%d/uStatDisDly", iInstance);
7785 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntSkip, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntSkip", "/Devices/E1k%d/uStatIntSkip", iInstance);
7786 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntLate, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntLate", "/Devices/E1k%d/uStatIntLate", iInstance);
7787 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntMasked, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntMasked", "/Devices/E1k%d/uStatIntMasked", iInstance);
7788 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntEarly, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntEarly", "/Devices/E1k%d/uStatIntEarly", iInstance);
7789 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntRx, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntRx", "/Devices/E1k%d/uStatIntRx", iInstance);
7790 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntTx, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntTx", "/Devices/E1k%d/uStatIntTx", iInstance);
7791 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntICS, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntICS", "/Devices/E1k%d/uStatIntICS", iInstance);
7792 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntRDTR, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntRDTR", "/Devices/E1k%d/uStatIntRDTR", iInstance);
7793 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntRXDMT0, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntRXDMT0", "/Devices/E1k%d/uStatIntRXDMT0", iInstance);
7794 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntTXQE, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntTXQE", "/Devices/E1k%d/uStatIntTXQE", iInstance);
7795 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxNoRS, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxNoRS", "/Devices/E1k%d/uStatTxNoRS", iInstance);
7796 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxIDE, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxIDE", "/Devices/E1k%d/uStatTxIDE", iInstance);
7797 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxDelayed, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxDelayed", "/Devices/E1k%d/uStatTxDelayed", iInstance);
7798 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxDelayExp, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxDelayExp", "/Devices/E1k%d/uStatTxDelayExp", iInstance);
7799 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTAD, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTAD", "/Devices/E1k%d/uStatTAD", iInstance);
7800 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTID, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTID", "/Devices/E1k%d/uStatTID", iInstance);
7801 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatRAD, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatRAD", "/Devices/E1k%d/uStatRAD", iInstance);
7802 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatRID, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatRID", "/Devices/E1k%d/uStatRID", iInstance);
7803 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatRxFrm, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatRxFrm", "/Devices/E1k%d/uStatRxFrm", iInstance);
7804 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxFrm, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxFrm", "/Devices/E1k%d/uStatTxFrm", iInstance);
7805 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatDescCtx, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatDescCtx", "/Devices/E1k%d/uStatDescCtx", iInstance);
7806 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatDescDat, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatDescDat", "/Devices/E1k%d/uStatDescDat", iInstance);
7807 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatDescLeg, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatDescLeg", "/Devices/E1k%d/uStatDescLeg", iInstance);
7808 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx1514, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx1514", "/Devices/E1k%d/uStatTx1514", iInstance);
7809 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx2962, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx2962", "/Devices/E1k%d/uStatTx2962", iInstance);
7810 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx4410, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx4410", "/Devices/E1k%d/uStatTx4410", iInstance);
7811 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx5858, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx5858", "/Devices/E1k%d/uStatTx5858", iInstance);
7812 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx7306, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx7306", "/Devices/E1k%d/uStatTx7306", iInstance);
7813 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx8754, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx8754", "/Devices/E1k%d/uStatTx8754", iInstance);
7814 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx16384, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx16384", "/Devices/E1k%d/uStatTx16384", iInstance);
7815 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx32768, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx32768", "/Devices/E1k%d/uStatTx32768", iInstance);
7816 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxLarge, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxLarge", "/Devices/E1k%d/uStatTxLarge", iInstance);
7817#endif /* E1K_INT_STATS */
7818
7819 return VINF_SUCCESS;
7820}
7821
7822/**
7823 * The device registration structure.
7824 */
7825const PDMDEVREG g_DeviceE1000 =
7826{
7827 /* Structure version. PDM_DEVREG_VERSION defines the current version. */
7828 PDM_DEVREG_VERSION,
7829 /* Device name. */
7830 "e1000",
7831 /* Name of guest context module (no path).
7832 * Only evalutated if PDM_DEVREG_FLAGS_RC is set. */
7833 "VBoxDDGC.gc",
7834 /* Name of ring-0 module (no path).
7835 * Only evalutated if PDM_DEVREG_FLAGS_RC is set. */
7836 "VBoxDDR0.r0",
7837 /* The description of the device. The UTF-8 string pointed to shall, like this structure,
7838 * remain unchanged from registration till VM destruction. */
7839 "Intel PRO/1000 MT Desktop Ethernet.\n",
7840
7841 /* Flags, combination of the PDM_DEVREG_FLAGS_* \#defines. */
7842 PDM_DEVREG_FLAGS_DEFAULT_BITS | PDM_DEVREG_FLAGS_RC | PDM_DEVREG_FLAGS_R0,
7843 /* Device class(es), combination of the PDM_DEVREG_CLASS_* \#defines. */
7844 PDM_DEVREG_CLASS_NETWORK,
7845 /* Maximum number of instances (per VM). */
7846 ~0U,
7847 /* Size of the instance data. */
7848 sizeof(E1KSTATE),
7849
7850 /* pfnConstruct */
7851 e1kR3Construct,
7852 /* pfnDestruct */
7853 e1kR3Destruct,
7854 /* pfnRelocate */
7855 e1kR3Relocate,
7856 /* pfnMemSetup */
7857 NULL,
7858 /* pfnPowerOn */
7859 NULL,
7860 /* pfnReset */
7861 e1kR3Reset,
7862 /* pfnSuspend */
7863 e1kR3Suspend,
7864 /* pfnResume */
7865 NULL,
7866 /* pfnAttach */
7867 e1kR3Attach,
7868 /* pfnDeatch */
7869 e1kR3Detach,
7870 /* pfnQueryInterface */
7871 NULL,
7872 /* pfnInitComplete */
7873 NULL,
7874 /* pfnPowerOff */
7875 e1kR3PowerOff,
7876 /* pfnSoftReset */
7877 NULL,
7878
7879 /* u32VersionEnd */
7880 PDM_DEVREG_VERSION
7881};
7882
7883#endif /* IN_RING3 */
7884#endif /* !VBOX_DEVICE_STRUCT_TESTCASE */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette