VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/DevE1000.cpp@ 43092

Last change on this file since 43092 was 43090, checked in by vboxsync, 13 years ago

Network/e1000: OS/2 regression fix (#6217)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 301.9 KB
Line 
1/* $Id: DevE1000.cpp 43090 2012-08-30 09:11:57Z vboxsync $ */
2/** @file
3 * DevE1000 - Intel 82540EM Ethernet Controller Emulation.
4 *
5 * Implemented in accordance with the specification:
6 *
7 * PCI/PCI-X Family of Gigabit Ethernet Controllers Software Developer's Manual
8 * 82540EP/EM, 82541xx, 82544GC/EI, 82545GM/EM, 82546GB/EB, and 82547xx
9 *
10 * 317453-002 Revision 3.5
11 *
12 * @todo IPv6 checksum offloading support
13 * @todo Flexible Filter / Wakeup (optional?)
14 */
15
16/*
17 * Copyright (C) 2007-2011 Oracle Corporation
18 *
19 * This file is part of VirtualBox Open Source Edition (OSE), as
20 * available from http://www.virtualbox.org. This file is free software;
21 * you can redistribute it and/or modify it under the terms of the GNU
22 * General Public License (GPL) as published by the Free Software
23 * Foundation, in version 2 as it comes in the "COPYING" file of the
24 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
25 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
26 */
27
28#define LOG_GROUP LOG_GROUP_DEV_E1000
29
30//#define E1kLogRel(a) LogRel(a)
31#define E1kLogRel(a)
32
33/* Options *******************************************************************/
34/*
35 * E1K_INIT_RA0 forces E1000 to set the first entry in Receive Address filter
36 * table to MAC address obtained from CFGM. Most guests read MAC address from
37 * EEPROM and write it to RA[0] explicitly, but Mac OS X seems to depend on it
38 * being already set (see @bugref{4657}).
39 */
40#define E1K_INIT_RA0
41/*
42 * E1K_LSC_ON_SLU causes E1000 to generate Link Status Change interrupt when
43 * the guest driver brings up the link via STATUS.LU bit. Again the only guest
44 * that requires it is Mac OS X (see @bugref{4657}).
45 */
46#define E1K_LSC_ON_SLU
47/*
48 * E1K_ITR_ENABLED reduces the number of interrupts generated by E1000 if a
49 * guest driver requested it by writing non-zero value to the Interrupt
50 * Throttling Register (see section 13.4.18 in "8254x Family of Gigabit
51 * Ethernet Controllers Software Developer’s Manual").
52 */
53//#define E1K_ITR_ENABLED
54/*
55 * E1K_TX_DELAY aims to improve guest-host transfer rate for TCP streams by
56 * preventing packets to be sent immediately. It allows to send several
57 * packets in a batch reducing the number of acknowledgments. Note that it
58 * effectively disables R0 TX path, forcing sending in R3.
59 */
60//#define E1K_TX_DELAY 150
61/*
62 * E1K_USE_TX_TIMERS aims to reduce the number of generated TX interrupts if a
63 * guest driver set the delays via the Transmit Interrupt Delay Value (TIDV)
64 * register. Enabling it showed no positive effects on existing guests so it
65 * stays disabled. See sections 3.2.7.1 and 3.4.3.1 in "8254x Family of Gigabit
66 * Ethernet Controllers Software Developer’s Manual" for more detailed
67 * explanation.
68 */
69//#define E1K_USE_TX_TIMERS
70/*
71 * E1K_NO_TAD disables one of two timers enabled by E1K_USE_TX_TIMERS, the
72 * Transmit Absolute Delay time. This timer sets the maximum time interval
73 * during which TX interrupts can be postponed (delayed). It has no effect
74 * if E1K_USE_TX_TIMERS is not defined.
75 */
76//#define E1K_NO_TAD
77/*
78 * E1K_REL_DEBUG enables debug logging of l1, l2, l3 in release build.
79 */
80//#define E1K_REL_DEBUG
81/*
82 * E1K_INT_STATS enables collection of internal statistics used for
83 * debugging of delayed interrupts, etc.
84 */
85//#define E1K_INT_STATS
86/*
87 * E1K_WITH_MSI enables rudimentary MSI support. Not implemented.
88 */
89//#define E1K_WITH_MSI
90/*
91 * E1K_WITH_TX_CS protects e1kXmitPending with a critical section.
92 */
93#define E1K_WITH_TX_CS
94/*
95 * E1K_WITH_TXD_CACHE causes E1000 to fetch multiple TX descriptors in a
96 * single physical memory read (or two if it wraps around the end of TX
97 * descriptor ring). It is required for proper functioning of bandwidth
98 * resource control as it allows to compute exact sizes of packets prior
99 * to allocating their buffers (see @bugref{5582}).
100 */
101#define E1K_WITH_TXD_CACHE
102/*
103 * E1K_WITH_RXD_CACHE causes E1000 to fetch multiple RX descriptors in a
104 * single physical memory read (or two if it wraps around the end of RX
105 * descriptor ring). Intel's packet driver for DOS needs this option in
106 * order to work properly (see @bugref{6217}).
107 */
108#define E1K_WITH_RXD_CACHE
109/* End of Options ************************************************************/
110
111#ifdef E1K_WITH_TXD_CACHE
112/*
113 * E1K_TXD_CACHE_SIZE specifies the maximum number of TX descriptors stored
114 * in the state structure. It limits the amount of descriptors loaded in one
115 * batch read. For example, Linux guest may use up to 20 descriptors per
116 * TSE packet.
117 */
118#define E1K_TXD_CACHE_SIZE 32u
119#endif /* E1K_WITH_TXD_CACHE */
120
121#ifdef E1K_WITH_RXD_CACHE
122/*
123 * E1K_RXD_CACHE_SIZE specifies the maximum number of RX descriptors stored
124 * in the state structure. It limits the amount of descriptors loaded in one
125 * batch read. For example, XP guest adds 15 RX descriptors at a time.
126 */
127#define E1K_RXD_CACHE_SIZE 16u
128#endif /* E1K_WITH_RXD_CACHE */
129
130#include <iprt/crc.h>
131#include <iprt/ctype.h>
132#include <iprt/net.h>
133#include <iprt/semaphore.h>
134#include <iprt/string.h>
135#include <iprt/time.h>
136#include <iprt/uuid.h>
137#include <VBox/vmm/pdmdev.h>
138#include <VBox/vmm/pdmnetifs.h>
139#include <VBox/vmm/pdmnetinline.h>
140#include <VBox/param.h>
141#include "VBoxDD.h"
142
143#include "DevEEPROM.h"
144#include "DevE1000Phy.h"
145
146/* Little helpers ************************************************************/
147#undef htons
148#undef ntohs
149#undef htonl
150#undef ntohl
151#define htons(x) ((((x) & 0xff00) >> 8) | (((x) & 0x00ff) << 8))
152#define ntohs(x) htons(x)
153#define htonl(x) ASMByteSwapU32(x)
154#define ntohl(x) htonl(x)
155
156#ifndef DEBUG
157# ifdef E1K_REL_DEBUG
158# define DEBUG
159# define E1kLog(a) LogRel(a)
160# define E1kLog2(a) LogRel(a)
161# define E1kLog3(a) LogRel(a)
162# define E1kLogX(x, a) LogRel(a)
163//# define E1kLog3(a) do {} while (0)
164# else
165# define E1kLog(a) do {} while (0)
166# define E1kLog2(a) do {} while (0)
167# define E1kLog3(a) do {} while (0)
168# define E1kLogX(x, a) do {} while (0)
169# endif
170#else
171# define E1kLog(a) Log(a)
172# define E1kLog2(a) Log2(a)
173# define E1kLog3(a) Log3(a)
174# define E1kLogX(x, a) LogIt(LOG_INSTANCE, x, LOG_GROUP, a)
175//# define E1kLog(a) do {} while (0)
176//# define E1kLog2(a) do {} while (0)
177//# define E1kLog3(a) do {} while (0)
178#endif
179
180//#undef DEBUG
181
182#define INSTANCE(pState) pState->szInstance
183#define STATE_TO_DEVINS(pState) (((E1KSTATE *)pState)->CTX_SUFF(pDevIns))
184#define E1K_RELOCATE(p, o) *(RTHCUINTPTR *)&p += o
185
186#define E1K_INC_CNT32(cnt) \
187do { \
188 if (cnt < UINT32_MAX) \
189 cnt++; \
190} while (0)
191
192#define E1K_ADD_CNT64(cntLo, cntHi, val) \
193do { \
194 uint64_t u64Cnt = RT_MAKE_U64(cntLo, cntHi); \
195 uint64_t tmp = u64Cnt; \
196 u64Cnt += val; \
197 if (tmp > u64Cnt ) \
198 u64Cnt = UINT64_MAX; \
199 cntLo = (uint32_t)u64Cnt; \
200 cntHi = (uint32_t)(u64Cnt >> 32); \
201} while (0)
202
203#ifdef E1K_INT_STATS
204# define E1K_INC_ISTAT_CNT(cnt) ++cnt
205#else /* E1K_INT_STATS */
206# define E1K_INC_ISTAT_CNT(cnt)
207#endif /* E1K_INT_STATS */
208
209
210/*****************************************************************************/
211
212typedef uint32_t E1KCHIP;
213#define E1K_CHIP_82540EM 0
214#define E1K_CHIP_82543GC 1
215#define E1K_CHIP_82545EM 2
216
217struct E1kChips
218{
219 uint16_t uPCIVendorId;
220 uint16_t uPCIDeviceId;
221 uint16_t uPCISubsystemVendorId;
222 uint16_t uPCISubsystemId;
223 const char *pcszName;
224} g_Chips[] =
225{
226 /* Vendor Device SSVendor SubSys Name */
227 { 0x8086,
228 /* Temporary code, as MSI-aware driver dislike 0x100E. How to do that right? */
229#ifdef E1K_WITH_MSI
230 0x105E,
231#else
232 0x100E,
233#endif
234 0x8086, 0x001E, "82540EM" }, /* Intel 82540EM-A in Intel PRO/1000 MT Desktop */
235 { 0x8086, 0x1004, 0x8086, 0x1004, "82543GC" }, /* Intel 82543GC in Intel PRO/1000 T Server */
236 { 0x8086, 0x100F, 0x15AD, 0x0750, "82545EM" } /* Intel 82545EM-A in VMWare Network Adapter */
237};
238
239
240/* The size of register area mapped to I/O space */
241#define E1K_IOPORT_SIZE 0x8
242/* The size of memory-mapped register area */
243#define E1K_MM_SIZE 0x20000
244
245#define E1K_MAX_TX_PKT_SIZE 16288
246#define E1K_MAX_RX_PKT_SIZE 16384
247
248/*****************************************************************************/
249
250/** Gets the specfieid bits from the register. */
251#define GET_BITS(reg, bits) ((reg & reg##_##bits##_MASK) >> reg##_##bits##_SHIFT)
252#define GET_BITS_V(val, reg, bits) ((val & reg##_##bits##_MASK) >> reg##_##bits##_SHIFT)
253#define BITS(reg, bits, bitval) (bitval << reg##_##bits##_SHIFT)
254#define SET_BITS(reg, bits, bitval) do { reg = (reg & ~reg##_##bits##_MASK) | (bitval << reg##_##bits##_SHIFT); } while (0)
255#define SET_BITS_V(val, reg, bits, bitval) do { val = (val & ~reg##_##bits##_MASK) | (bitval << reg##_##bits##_SHIFT); } while (0)
256
257#define CTRL_SLU 0x00000040
258#define CTRL_MDIO 0x00100000
259#define CTRL_MDC 0x00200000
260#define CTRL_MDIO_DIR 0x01000000
261#define CTRL_MDC_DIR 0x02000000
262#define CTRL_RESET 0x04000000
263#define CTRL_VME 0x40000000
264
265#define STATUS_LU 0x00000002
266#define STATUS_TXOFF 0x00000010
267
268#define EECD_EE_WIRES 0x0F
269#define EECD_EE_REQ 0x40
270#define EECD_EE_GNT 0x80
271
272#define EERD_START 0x00000001
273#define EERD_DONE 0x00000010
274#define EERD_DATA_MASK 0xFFFF0000
275#define EERD_DATA_SHIFT 16
276#define EERD_ADDR_MASK 0x0000FF00
277#define EERD_ADDR_SHIFT 8
278
279#define MDIC_DATA_MASK 0x0000FFFF
280#define MDIC_DATA_SHIFT 0
281#define MDIC_REG_MASK 0x001F0000
282#define MDIC_REG_SHIFT 16
283#define MDIC_PHY_MASK 0x03E00000
284#define MDIC_PHY_SHIFT 21
285#define MDIC_OP_WRITE 0x04000000
286#define MDIC_OP_READ 0x08000000
287#define MDIC_READY 0x10000000
288#define MDIC_INT_EN 0x20000000
289#define MDIC_ERROR 0x40000000
290
291#define TCTL_EN 0x00000002
292#define TCTL_PSP 0x00000008
293
294#define RCTL_EN 0x00000002
295#define RCTL_UPE 0x00000008
296#define RCTL_MPE 0x00000010
297#define RCTL_LPE 0x00000020
298#define RCTL_LBM_MASK 0x000000C0
299#define RCTL_LBM_SHIFT 6
300#define RCTL_RDMTS_MASK 0x00000300
301#define RCTL_RDMTS_SHIFT 8
302#define RCTL_LBM_TCVR 3 /**< PHY or external SerDes loopback. */
303#define RCTL_MO_MASK 0x00003000
304#define RCTL_MO_SHIFT 12
305#define RCTL_BAM 0x00008000
306#define RCTL_BSIZE_MASK 0x00030000
307#define RCTL_BSIZE_SHIFT 16
308#define RCTL_VFE 0x00040000
309#define RCTL_CFIEN 0x00080000
310#define RCTL_CFI 0x00100000
311#define RCTL_BSEX 0x02000000
312#define RCTL_SECRC 0x04000000
313
314#define ICR_TXDW 0x00000001
315#define ICR_TXQE 0x00000002
316#define ICR_LSC 0x00000004
317#define ICR_RXDMT0 0x00000010
318#define ICR_RXT0 0x00000080
319#define ICR_TXD_LOW 0x00008000
320#define RDTR_FPD 0x80000000
321
322#define PBA_st ((PBAST*)(pState->auRegs + PBA_IDX))
323typedef struct
324{
325 unsigned rxa : 7;
326 unsigned rxa_r : 9;
327 unsigned txa : 16;
328} PBAST;
329AssertCompileSize(PBAST, 4);
330
331#define TXDCTL_WTHRESH_MASK 0x003F0000
332#define TXDCTL_WTHRESH_SHIFT 16
333#define TXDCTL_LWTHRESH_MASK 0xFE000000
334#define TXDCTL_LWTHRESH_SHIFT 25
335
336#define RXCSUM_PCSS_MASK 0x000000FF
337#define RXCSUM_PCSS_SHIFT 0
338
339/* Register access macros ****************************************************/
340#define CTRL pState->auRegs[CTRL_IDX]
341#define STATUS pState->auRegs[STATUS_IDX]
342#define EECD pState->auRegs[EECD_IDX]
343#define EERD pState->auRegs[EERD_IDX]
344#define CTRL_EXT pState->auRegs[CTRL_EXT_IDX]
345#define FLA pState->auRegs[FLA_IDX]
346#define MDIC pState->auRegs[MDIC_IDX]
347#define FCAL pState->auRegs[FCAL_IDX]
348#define FCAH pState->auRegs[FCAH_IDX]
349#define FCT pState->auRegs[FCT_IDX]
350#define VET pState->auRegs[VET_IDX]
351#define ICR pState->auRegs[ICR_IDX]
352#define ITR pState->auRegs[ITR_IDX]
353#define ICS pState->auRegs[ICS_IDX]
354#define IMS pState->auRegs[IMS_IDX]
355#define IMC pState->auRegs[IMC_IDX]
356#define RCTL pState->auRegs[RCTL_IDX]
357#define FCTTV pState->auRegs[FCTTV_IDX]
358#define TXCW pState->auRegs[TXCW_IDX]
359#define RXCW pState->auRegs[RXCW_IDX]
360#define TCTL pState->auRegs[TCTL_IDX]
361#define TIPG pState->auRegs[TIPG_IDX]
362#define AIFS pState->auRegs[AIFS_IDX]
363#define LEDCTL pState->auRegs[LEDCTL_IDX]
364#define PBA pState->auRegs[PBA_IDX]
365#define FCRTL pState->auRegs[FCRTL_IDX]
366#define FCRTH pState->auRegs[FCRTH_IDX]
367#define RDFH pState->auRegs[RDFH_IDX]
368#define RDFT pState->auRegs[RDFT_IDX]
369#define RDFHS pState->auRegs[RDFHS_IDX]
370#define RDFTS pState->auRegs[RDFTS_IDX]
371#define RDFPC pState->auRegs[RDFPC_IDX]
372#define RDBAL pState->auRegs[RDBAL_IDX]
373#define RDBAH pState->auRegs[RDBAH_IDX]
374#define RDLEN pState->auRegs[RDLEN_IDX]
375#define RDH pState->auRegs[RDH_IDX]
376#define RDT pState->auRegs[RDT_IDX]
377#define RDTR pState->auRegs[RDTR_IDX]
378#define RXDCTL pState->auRegs[RXDCTL_IDX]
379#define RADV pState->auRegs[RADV_IDX]
380#define RSRPD pState->auRegs[RSRPD_IDX]
381#define TXDMAC pState->auRegs[TXDMAC_IDX]
382#define TDFH pState->auRegs[TDFH_IDX]
383#define TDFT pState->auRegs[TDFT_IDX]
384#define TDFHS pState->auRegs[TDFHS_IDX]
385#define TDFTS pState->auRegs[TDFTS_IDX]
386#define TDFPC pState->auRegs[TDFPC_IDX]
387#define TDBAL pState->auRegs[TDBAL_IDX]
388#define TDBAH pState->auRegs[TDBAH_IDX]
389#define TDLEN pState->auRegs[TDLEN_IDX]
390#define TDH pState->auRegs[TDH_IDX]
391#define TDT pState->auRegs[TDT_IDX]
392#define TIDV pState->auRegs[TIDV_IDX]
393#define TXDCTL pState->auRegs[TXDCTL_IDX]
394#define TADV pState->auRegs[TADV_IDX]
395#define TSPMT pState->auRegs[TSPMT_IDX]
396#define CRCERRS pState->auRegs[CRCERRS_IDX]
397#define ALGNERRC pState->auRegs[ALGNERRC_IDX]
398#define SYMERRS pState->auRegs[SYMERRS_IDX]
399#define RXERRC pState->auRegs[RXERRC_IDX]
400#define MPC pState->auRegs[MPC_IDX]
401#define SCC pState->auRegs[SCC_IDX]
402#define ECOL pState->auRegs[ECOL_IDX]
403#define MCC pState->auRegs[MCC_IDX]
404#define LATECOL pState->auRegs[LATECOL_IDX]
405#define COLC pState->auRegs[COLC_IDX]
406#define DC pState->auRegs[DC_IDX]
407#define TNCRS pState->auRegs[TNCRS_IDX]
408#define SEC pState->auRegs[SEC_IDX]
409#define CEXTERR pState->auRegs[CEXTERR_IDX]
410#define RLEC pState->auRegs[RLEC_IDX]
411#define XONRXC pState->auRegs[XONRXC_IDX]
412#define XONTXC pState->auRegs[XONTXC_IDX]
413#define XOFFRXC pState->auRegs[XOFFRXC_IDX]
414#define XOFFTXC pState->auRegs[XOFFTXC_IDX]
415#define FCRUC pState->auRegs[FCRUC_IDX]
416#define PRC64 pState->auRegs[PRC64_IDX]
417#define PRC127 pState->auRegs[PRC127_IDX]
418#define PRC255 pState->auRegs[PRC255_IDX]
419#define PRC511 pState->auRegs[PRC511_IDX]
420#define PRC1023 pState->auRegs[PRC1023_IDX]
421#define PRC1522 pState->auRegs[PRC1522_IDX]
422#define GPRC pState->auRegs[GPRC_IDX]
423#define BPRC pState->auRegs[BPRC_IDX]
424#define MPRC pState->auRegs[MPRC_IDX]
425#define GPTC pState->auRegs[GPTC_IDX]
426#define GORCL pState->auRegs[GORCL_IDX]
427#define GORCH pState->auRegs[GORCH_IDX]
428#define GOTCL pState->auRegs[GOTCL_IDX]
429#define GOTCH pState->auRegs[GOTCH_IDX]
430#define RNBC pState->auRegs[RNBC_IDX]
431#define RUC pState->auRegs[RUC_IDX]
432#define RFC pState->auRegs[RFC_IDX]
433#define ROC pState->auRegs[ROC_IDX]
434#define RJC pState->auRegs[RJC_IDX]
435#define MGTPRC pState->auRegs[MGTPRC_IDX]
436#define MGTPDC pState->auRegs[MGTPDC_IDX]
437#define MGTPTC pState->auRegs[MGTPTC_IDX]
438#define TORL pState->auRegs[TORL_IDX]
439#define TORH pState->auRegs[TORH_IDX]
440#define TOTL pState->auRegs[TOTL_IDX]
441#define TOTH pState->auRegs[TOTH_IDX]
442#define TPR pState->auRegs[TPR_IDX]
443#define TPT pState->auRegs[TPT_IDX]
444#define PTC64 pState->auRegs[PTC64_IDX]
445#define PTC127 pState->auRegs[PTC127_IDX]
446#define PTC255 pState->auRegs[PTC255_IDX]
447#define PTC511 pState->auRegs[PTC511_IDX]
448#define PTC1023 pState->auRegs[PTC1023_IDX]
449#define PTC1522 pState->auRegs[PTC1522_IDX]
450#define MPTC pState->auRegs[MPTC_IDX]
451#define BPTC pState->auRegs[BPTC_IDX]
452#define TSCTC pState->auRegs[TSCTC_IDX]
453#define TSCTFC pState->auRegs[TSCTFC_IDX]
454#define RXCSUM pState->auRegs[RXCSUM_IDX]
455#define WUC pState->auRegs[WUC_IDX]
456#define WUFC pState->auRegs[WUFC_IDX]
457#define WUS pState->auRegs[WUS_IDX]
458#define MANC pState->auRegs[MANC_IDX]
459#define IPAV pState->auRegs[IPAV_IDX]
460#define WUPL pState->auRegs[WUPL_IDX]
461
462/**
463 * Indices of memory-mapped registers in register table
464 */
465typedef enum
466{
467 CTRL_IDX,
468 STATUS_IDX,
469 EECD_IDX,
470 EERD_IDX,
471 CTRL_EXT_IDX,
472 FLA_IDX,
473 MDIC_IDX,
474 FCAL_IDX,
475 FCAH_IDX,
476 FCT_IDX,
477 VET_IDX,
478 ICR_IDX,
479 ITR_IDX,
480 ICS_IDX,
481 IMS_IDX,
482 IMC_IDX,
483 RCTL_IDX,
484 FCTTV_IDX,
485 TXCW_IDX,
486 RXCW_IDX,
487 TCTL_IDX,
488 TIPG_IDX,
489 AIFS_IDX,
490 LEDCTL_IDX,
491 PBA_IDX,
492 FCRTL_IDX,
493 FCRTH_IDX,
494 RDFH_IDX,
495 RDFT_IDX,
496 RDFHS_IDX,
497 RDFTS_IDX,
498 RDFPC_IDX,
499 RDBAL_IDX,
500 RDBAH_IDX,
501 RDLEN_IDX,
502 RDH_IDX,
503 RDT_IDX,
504 RDTR_IDX,
505 RXDCTL_IDX,
506 RADV_IDX,
507 RSRPD_IDX,
508 TXDMAC_IDX,
509 TDFH_IDX,
510 TDFT_IDX,
511 TDFHS_IDX,
512 TDFTS_IDX,
513 TDFPC_IDX,
514 TDBAL_IDX,
515 TDBAH_IDX,
516 TDLEN_IDX,
517 TDH_IDX,
518 TDT_IDX,
519 TIDV_IDX,
520 TXDCTL_IDX,
521 TADV_IDX,
522 TSPMT_IDX,
523 CRCERRS_IDX,
524 ALGNERRC_IDX,
525 SYMERRS_IDX,
526 RXERRC_IDX,
527 MPC_IDX,
528 SCC_IDX,
529 ECOL_IDX,
530 MCC_IDX,
531 LATECOL_IDX,
532 COLC_IDX,
533 DC_IDX,
534 TNCRS_IDX,
535 SEC_IDX,
536 CEXTERR_IDX,
537 RLEC_IDX,
538 XONRXC_IDX,
539 XONTXC_IDX,
540 XOFFRXC_IDX,
541 XOFFTXC_IDX,
542 FCRUC_IDX,
543 PRC64_IDX,
544 PRC127_IDX,
545 PRC255_IDX,
546 PRC511_IDX,
547 PRC1023_IDX,
548 PRC1522_IDX,
549 GPRC_IDX,
550 BPRC_IDX,
551 MPRC_IDX,
552 GPTC_IDX,
553 GORCL_IDX,
554 GORCH_IDX,
555 GOTCL_IDX,
556 GOTCH_IDX,
557 RNBC_IDX,
558 RUC_IDX,
559 RFC_IDX,
560 ROC_IDX,
561 RJC_IDX,
562 MGTPRC_IDX,
563 MGTPDC_IDX,
564 MGTPTC_IDX,
565 TORL_IDX,
566 TORH_IDX,
567 TOTL_IDX,
568 TOTH_IDX,
569 TPR_IDX,
570 TPT_IDX,
571 PTC64_IDX,
572 PTC127_IDX,
573 PTC255_IDX,
574 PTC511_IDX,
575 PTC1023_IDX,
576 PTC1522_IDX,
577 MPTC_IDX,
578 BPTC_IDX,
579 TSCTC_IDX,
580 TSCTFC_IDX,
581 RXCSUM_IDX,
582 WUC_IDX,
583 WUFC_IDX,
584 WUS_IDX,
585 MANC_IDX,
586 IPAV_IDX,
587 WUPL_IDX,
588 MTA_IDX,
589 RA_IDX,
590 VFTA_IDX,
591 IP4AT_IDX,
592 IP6AT_IDX,
593 WUPM_IDX,
594 FFLT_IDX,
595 FFMT_IDX,
596 FFVT_IDX,
597 PBM_IDX,
598 RA_82542_IDX,
599 MTA_82542_IDX,
600 VFTA_82542_IDX,
601 E1K_NUM_OF_REGS
602} E1kRegIndex;
603
604#define E1K_NUM_OF_32BIT_REGS MTA_IDX
605
606
607/**
608 * Define E1000-specific EEPROM layout.
609 */
610class E1kEEPROM
611{
612 public:
613 EEPROM93C46 eeprom;
614
615#ifdef IN_RING3
616 /**
617 * Initialize EEPROM content.
618 *
619 * @param macAddr MAC address of E1000.
620 */
621 void init(RTMAC &macAddr)
622 {
623 eeprom.init();
624 memcpy(eeprom.m_au16Data, macAddr.au16, sizeof(macAddr.au16));
625 eeprom.m_au16Data[0x04] = 0xFFFF;
626 /*
627 * bit 3 - full support for power management
628 * bit 10 - full duplex
629 */
630 eeprom.m_au16Data[0x0A] = 0x4408;
631 eeprom.m_au16Data[0x0B] = 0x001E;
632 eeprom.m_au16Data[0x0C] = 0x8086;
633 eeprom.m_au16Data[0x0D] = 0x100E;
634 eeprom.m_au16Data[0x0E] = 0x8086;
635 eeprom.m_au16Data[0x0F] = 0x3040;
636 eeprom.m_au16Data[0x21] = 0x7061;
637 eeprom.m_au16Data[0x22] = 0x280C;
638 eeprom.m_au16Data[0x23] = 0x00C8;
639 eeprom.m_au16Data[0x24] = 0x00C8;
640 eeprom.m_au16Data[0x2F] = 0x0602;
641 updateChecksum();
642 };
643
644 /**
645 * Compute the checksum as required by E1000 and store it
646 * in the last word.
647 */
648 void updateChecksum()
649 {
650 uint16_t u16Checksum = 0;
651
652 for (int i = 0; i < eeprom.SIZE-1; i++)
653 u16Checksum += eeprom.m_au16Data[i];
654 eeprom.m_au16Data[eeprom.SIZE-1] = 0xBABA - u16Checksum;
655 };
656
657 /**
658 * First 6 bytes of EEPROM contain MAC address.
659 *
660 * @returns MAC address of E1000.
661 */
662 void getMac(PRTMAC pMac)
663 {
664 memcpy(pMac->au16, eeprom.m_au16Data, sizeof(pMac->au16));
665 };
666
667 uint32_t read()
668 {
669 return eeprom.read();
670 }
671
672 void write(uint32_t u32Wires)
673 {
674 eeprom.write(u32Wires);
675 }
676
677 bool readWord(uint32_t u32Addr, uint16_t *pu16Value)
678 {
679 return eeprom.readWord(u32Addr, pu16Value);
680 }
681
682 int load(PSSMHANDLE pSSM)
683 {
684 return eeprom.load(pSSM);
685 }
686
687 void save(PSSMHANDLE pSSM)
688 {
689 eeprom.save(pSSM);
690 }
691#endif /* IN_RING3 */
692};
693
694
695#define E1K_SPEC_VLAN(s) (s & 0xFFF)
696#define E1K_SPEC_CFI(s) (!!((s>>12) & 0x1))
697#define E1K_SPEC_PRI(s) ((s>>13) & 0x7)
698
699struct E1kRxDStatus
700{
701 /** @name Descriptor Status field (3.2.3.1)
702 * @{ */
703 unsigned fDD : 1; /**< Descriptor Done. */
704 unsigned fEOP : 1; /**< End of packet. */
705 unsigned fIXSM : 1; /**< Ignore checksum indication. */
706 unsigned fVP : 1; /**< VLAN, matches VET. */
707 unsigned : 1;
708 unsigned fTCPCS : 1; /**< RCP Checksum calculated on the packet. */
709 unsigned fIPCS : 1; /**< IP Checksum calculated on the packet. */
710 unsigned fPIF : 1; /**< Passed in-exact filter */
711 /** @} */
712 /** @name Descriptor Errors field (3.2.3.2)
713 * (Only valid when fEOP and fDD are set.)
714 * @{ */
715 unsigned fCE : 1; /**< CRC or alignment error. */
716 unsigned : 4; /**< Reserved, varies with different models... */
717 unsigned fTCPE : 1; /**< TCP/UDP checksum error. */
718 unsigned fIPE : 1; /**< IP Checksum error. */
719 unsigned fRXE : 1; /**< RX Data error. */
720 /** @} */
721 /** @name Descriptor Special field (3.2.3.3)
722 * @{ */
723 unsigned u16Special : 16; /**< VLAN: Id, Canonical form, Priority. */
724 /** @} */
725};
726typedef struct E1kRxDStatus E1KRXDST;
727
728struct E1kRxDesc_st
729{
730 uint64_t u64BufAddr; /**< Address of data buffer */
731 uint16_t u16Length; /**< Length of data in buffer */
732 uint16_t u16Checksum; /**< Packet checksum */
733 E1KRXDST status;
734};
735typedef struct E1kRxDesc_st E1KRXDESC;
736AssertCompileSize(E1KRXDESC, 16);
737
738#define E1K_DTYP_LEGACY -1
739#define E1K_DTYP_CONTEXT 0
740#define E1K_DTYP_DATA 1
741
742struct E1kTDLegacy
743{
744 uint64_t u64BufAddr; /**< Address of data buffer */
745 struct TDLCmd_st
746 {
747 unsigned u16Length : 16;
748 unsigned u8CSO : 8;
749 /* CMD field : 8 */
750 unsigned fEOP : 1;
751 unsigned fIFCS : 1;
752 unsigned fIC : 1;
753 unsigned fRS : 1;
754 unsigned fRPS : 1;
755 unsigned fDEXT : 1;
756 unsigned fVLE : 1;
757 unsigned fIDE : 1;
758 } cmd;
759 struct TDLDw3_st
760 {
761 /* STA field */
762 unsigned fDD : 1;
763 unsigned fEC : 1;
764 unsigned fLC : 1;
765 unsigned fTURSV : 1;
766 /* RSV field */
767 unsigned u4RSV : 4;
768 /* CSS field */
769 unsigned u8CSS : 8;
770 /* Special field*/
771 unsigned u16Special: 16;
772 } dw3;
773};
774
775/**
776 * TCP/IP Context Transmit Descriptor, section 3.3.6.
777 */
778struct E1kTDContext
779{
780 struct CheckSum_st
781 {
782 /** TSE: Header start. !TSE: Checksum start. */
783 unsigned u8CSS : 8;
784 /** Checksum offset - where to store it. */
785 unsigned u8CSO : 8;
786 /** Checksum ending (inclusive) offset, 0 = end of packet. */
787 unsigned u16CSE : 16;
788 } ip;
789 struct CheckSum_st tu;
790 struct TDCDw2_st
791 {
792 /** TSE: The total number of payload bytes for this context. Sans header. */
793 unsigned u20PAYLEN : 20;
794 /** The descriptor type - E1K_DTYP_CONTEXT (0). */
795 unsigned u4DTYP : 4;
796 /** TUCMD field, 8 bits
797 * @{ */
798 /** TSE: TCP (set) or UDP (clear). */
799 unsigned fTCP : 1;
800 /** TSE: IPv4 (set) or IPv6 (clear) - for finding the payload length field in
801 * the IP header. Does not affect the checksumming.
802 * @remarks 82544GC/EI interprets a cleared field differently. */
803 unsigned fIP : 1;
804 /** TSE: TCP segmentation enable. When clear the context describes */
805 unsigned fTSE : 1;
806 /** Report status (only applies to dw3.fDD for here). */
807 unsigned fRS : 1;
808 /** Reserved, MBZ. */
809 unsigned fRSV1 : 1;
810 /** Descriptor extension, must be set for this descriptor type. */
811 unsigned fDEXT : 1;
812 /** Reserved, MBZ. */
813 unsigned fRSV2 : 1;
814 /** Interrupt delay enable. */
815 unsigned fIDE : 1;
816 /** @} */
817 } dw2;
818 struct TDCDw3_st
819 {
820 /** Descriptor Done. */
821 unsigned fDD : 1;
822 /** Reserved, MBZ. */
823 unsigned u7RSV : 7;
824 /** TSO: The header (prototype) length (Ethernet[, VLAN tag], IP, TCP/UDP. */
825 unsigned u8HDRLEN : 8;
826 /** TSO: Maximum segment size. */
827 unsigned u16MSS : 16;
828 } dw3;
829};
830typedef struct E1kTDContext E1KTXCTX;
831
832/**
833 * TCP/IP Data Transmit Descriptor, section 3.3.7.
834 */
835struct E1kTDData
836{
837 uint64_t u64BufAddr; /**< Address of data buffer */
838 struct TDDCmd_st
839 {
840 /** The total length of data pointed to by this descriptor. */
841 unsigned u20DTALEN : 20;
842 /** The descriptor type - E1K_DTYP_DATA (1). */
843 unsigned u4DTYP : 4;
844 /** @name DCMD field, 8 bits (3.3.7.1).
845 * @{ */
846 /** End of packet. Note TSCTFC update. */
847 unsigned fEOP : 1;
848 /** Insert Ethernet FCS/CRC (requires fEOP to be set). */
849 unsigned fIFCS : 1;
850 /** Use the TSE context when set and the normal when clear. */
851 unsigned fTSE : 1;
852 /** Report status (dw3.STA). */
853 unsigned fRS : 1;
854 /** Reserved. 82544GC/EI defines this report packet set (RPS). */
855 unsigned fRPS : 1;
856 /** Descriptor extension, must be set for this descriptor type. */
857 unsigned fDEXT : 1;
858 /** VLAN enable, requires CTRL.VME, auto enables FCS/CRC.
859 * Insert dw3.SPECIAL after ethernet header. */
860 unsigned fVLE : 1;
861 /** Interrupt delay enable. */
862 unsigned fIDE : 1;
863 /** @} */
864 } cmd;
865 struct TDDDw3_st
866 {
867 /** @name STA field (3.3.7.2)
868 * @{ */
869 unsigned fDD : 1; /**< Descriptor done. */
870 unsigned fEC : 1; /**< Excess collision. */
871 unsigned fLC : 1; /**< Late collision. */
872 /** Reserved, except for the usual oddball (82544GC/EI) where it's called TU. */
873 unsigned fTURSV : 1;
874 /** @} */
875 unsigned u4RSV : 4; /**< Reserved field, MBZ. */
876 /** @name POPTS (Packet Option) field (3.3.7.3)
877 * @{ */
878 unsigned fIXSM : 1; /**< Insert IP checksum. */
879 unsigned fTXSM : 1; /**< Insert TCP/UDP checksum. */
880 unsigned u6RSV : 6; /**< Reserved, MBZ. */
881 /** @} */
882 /** @name SPECIAL field - VLAN tag to be inserted after ethernet header.
883 * Requires fEOP, fVLE and CTRL.VME to be set.
884 * @{ */
885 unsigned u16Special: 16; /**< VLAN: Id, Canonical form, Priority. */
886 /** @} */
887 } dw3;
888};
889typedef struct E1kTDData E1KTXDAT;
890
891union E1kTxDesc
892{
893 struct E1kTDLegacy legacy;
894 struct E1kTDContext context;
895 struct E1kTDData data;
896};
897typedef union E1kTxDesc E1KTXDESC;
898AssertCompileSize(E1KTXDESC, 16);
899
900#define RA_CTL_AS 0x0003
901#define RA_CTL_AV 0x8000
902
903union E1kRecAddr
904{
905 uint32_t au32[32];
906 struct RAArray
907 {
908 uint8_t addr[6];
909 uint16_t ctl;
910 } array[16];
911};
912typedef struct E1kRecAddr::RAArray E1KRAELEM;
913typedef union E1kRecAddr E1KRA;
914AssertCompileSize(E1KRA, 8*16);
915
916#define E1K_IP_RF 0x8000 /* reserved fragment flag */
917#define E1K_IP_DF 0x4000 /* dont fragment flag */
918#define E1K_IP_MF 0x2000 /* more fragments flag */
919#define E1K_IP_OFFMASK 0x1fff /* mask for fragmenting bits */
920
921/** @todo use+extend RTNETIPV4 */
922struct E1kIpHeader
923{
924 /* type of service / version / header length */
925 uint16_t tos_ver_hl;
926 /* total length */
927 uint16_t total_len;
928 /* identification */
929 uint16_t ident;
930 /* fragment offset field */
931 uint16_t offset;
932 /* time to live / protocol*/
933 uint16_t ttl_proto;
934 /* checksum */
935 uint16_t chksum;
936 /* source IP address */
937 uint32_t src;
938 /* destination IP address */
939 uint32_t dest;
940};
941AssertCompileSize(struct E1kIpHeader, 20);
942
943#define E1K_TCP_FIN 0x01U
944#define E1K_TCP_SYN 0x02U
945#define E1K_TCP_RST 0x04U
946#define E1K_TCP_PSH 0x08U
947#define E1K_TCP_ACK 0x10U
948#define E1K_TCP_URG 0x20U
949#define E1K_TCP_ECE 0x40U
950#define E1K_TCP_CWR 0x80U
951
952#define E1K_TCP_FLAGS 0x3fU
953
954/** @todo use+extend RTNETTCP */
955struct E1kTcpHeader
956{
957 uint16_t src;
958 uint16_t dest;
959 uint32_t seqno;
960 uint32_t ackno;
961 uint16_t hdrlen_flags;
962 uint16_t wnd;
963 uint16_t chksum;
964 uint16_t urgp;
965};
966AssertCompileSize(struct E1kTcpHeader, 20);
967
968
969#ifdef E1K_WITH_TXD_CACHE
970/** The current Saved state version. */
971#define E1K_SAVEDSTATE_VERSION 4
972/** Saved state version for VirtualBox 4.2 with VLAN tag fields. */
973#define E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG 3
974#else /* !E1K_WITH_TXD_CACHE */
975/** The current Saved state version. */
976#define E1K_SAVEDSTATE_VERSION 3
977#endif /* !E1K_WITH_TXD_CACHE */
978/** Saved state version for VirtualBox 4.1 and earlier.
979 * These did not include VLAN tag fields. */
980#define E1K_SAVEDSTATE_VERSION_VBOX_41 2
981/** Saved state version for VirtualBox 3.0 and earlier.
982 * This did not include the configuration part nor the E1kEEPROM. */
983#define E1K_SAVEDSTATE_VERSION_VBOX_30 1
984
985/**
986 * Device state structure. Holds the current state of device.
987 *
988 * @implements PDMINETWORKDOWN
989 * @implements PDMINETWORKCONFIG
990 * @implements PDMILEDPORTS
991 */
992struct E1kState_st
993{
994 char szInstance[8]; /**< Instance name, e.g. E1000#1. */
995 PDMIBASE IBase;
996 PDMINETWORKDOWN INetworkDown;
997 PDMINETWORKCONFIG INetworkConfig;
998 PDMILEDPORTS ILeds; /**< LED interface */
999 R3PTRTYPE(PPDMIBASE) pDrvBase; /**< Attached network driver. */
1000 R3PTRTYPE(PPDMILEDCONNECTORS) pLedsConnector;
1001
1002 PPDMDEVINSR3 pDevInsR3; /**< Device instance - R3. */
1003 R3PTRTYPE(PPDMQUEUE) pTxQueueR3; /**< Transmit queue - R3. */
1004 R3PTRTYPE(PPDMQUEUE) pCanRxQueueR3; /**< Rx wakeup signaller - R3. */
1005 PPDMINETWORKUPR3 pDrvR3; /**< Attached network driver - R3. */
1006 PTMTIMERR3 pRIDTimerR3; /**< Receive Interrupt Delay Timer - R3. */
1007 PTMTIMERR3 pRADTimerR3; /**< Receive Absolute Delay Timer - R3. */
1008 PTMTIMERR3 pTIDTimerR3; /**< Transmit Interrupt Delay Timer - R3. */
1009 PTMTIMERR3 pTADTimerR3; /**< Transmit Absolute Delay Timer - R3. */
1010 PTMTIMERR3 pTXDTimerR3; /**< Transmit Delay Timer - R3. */
1011 PTMTIMERR3 pIntTimerR3; /**< Late Interrupt Timer - R3. */
1012 PTMTIMERR3 pLUTimerR3; /**< Link Up(/Restore) Timer. */
1013 /** The scatter / gather buffer used for the current outgoing packet - R3. */
1014 R3PTRTYPE(PPDMSCATTERGATHER) pTxSgR3;
1015
1016 PPDMDEVINSR0 pDevInsR0; /**< Device instance - R0. */
1017 R0PTRTYPE(PPDMQUEUE) pTxQueueR0; /**< Transmit queue - R0. */
1018 R0PTRTYPE(PPDMQUEUE) pCanRxQueueR0; /**< Rx wakeup signaller - R0. */
1019 PPDMINETWORKUPR0 pDrvR0; /**< Attached network driver - R0. */
1020 PTMTIMERR0 pRIDTimerR0; /**< Receive Interrupt Delay Timer - R0. */
1021 PTMTIMERR0 pRADTimerR0; /**< Receive Absolute Delay Timer - R0. */
1022 PTMTIMERR0 pTIDTimerR0; /**< Transmit Interrupt Delay Timer - R0. */
1023 PTMTIMERR0 pTADTimerR0; /**< Transmit Absolute Delay Timer - R0. */
1024 PTMTIMERR0 pTXDTimerR0; /**< Transmit Delay Timer - R0. */
1025 PTMTIMERR0 pIntTimerR0; /**< Late Interrupt Timer - R0. */
1026 PTMTIMERR0 pLUTimerR0; /**< Link Up(/Restore) Timer - R0. */
1027 /** The scatter / gather buffer used for the current outgoing packet - R0. */
1028 R0PTRTYPE(PPDMSCATTERGATHER) pTxSgR0;
1029
1030 PPDMDEVINSRC pDevInsRC; /**< Device instance - RC. */
1031 RCPTRTYPE(PPDMQUEUE) pTxQueueRC; /**< Transmit queue - RC. */
1032 RCPTRTYPE(PPDMQUEUE) pCanRxQueueRC; /**< Rx wakeup signaller - RC. */
1033 PPDMINETWORKUPRC pDrvRC; /**< Attached network driver - RC. */
1034 PTMTIMERRC pRIDTimerRC; /**< Receive Interrupt Delay Timer - RC. */
1035 PTMTIMERRC pRADTimerRC; /**< Receive Absolute Delay Timer - RC. */
1036 PTMTIMERRC pTIDTimerRC; /**< Transmit Interrupt Delay Timer - RC. */
1037 PTMTIMERRC pTADTimerRC; /**< Transmit Absolute Delay Timer - RC. */
1038 PTMTIMERRC pTXDTimerRC; /**< Transmit Delay Timer - RC. */
1039 PTMTIMERRC pIntTimerRC; /**< Late Interrupt Timer - RC. */
1040 PTMTIMERRC pLUTimerRC; /**< Link Up(/Restore) Timer - RC. */
1041 /** The scatter / gather buffer used for the current outgoing packet - RC. */
1042 RCPTRTYPE(PPDMSCATTERGATHER) pTxSgRC;
1043 RTRCPTR RCPtrAlignment;
1044
1045#if HC_ARCH_BITS != 32
1046 uint32_t Alignment1;
1047#endif
1048 PDMCRITSECT cs; /**< Critical section - what is it protecting? */
1049 PDMCRITSECT csRx; /**< RX Critical section. */
1050#ifdef E1K_WITH_TX_CS
1051 PDMCRITSECT csTx; /**< TX Critical section. */
1052#endif /* E1K_WITH_TX_CS */
1053 /** Base address of memory-mapped registers. */
1054 RTGCPHYS addrMMReg;
1055 /** MAC address obtained from the configuration. */
1056 RTMAC macConfigured;
1057 /** Base port of I/O space region. */
1058 RTIOPORT addrIOPort;
1059 /** EMT: */
1060 PCIDEVICE pciDevice;
1061 /** EMT: Last time the interrupt was acknowledged. */
1062 uint64_t u64AckedAt;
1063 /** All: Used for eliminating spurious interrupts. */
1064 bool fIntRaised;
1065 /** EMT: false if the cable is disconnected by the GUI. */
1066 bool fCableConnected;
1067 /** EMT: */
1068 bool fR0Enabled;
1069 /** EMT: */
1070 bool fGCEnabled;
1071 /** EMT: Compute Ethernet CRC for RX packets. */
1072 bool fEthernetCRC;
1073
1074 bool Alignment2[3];
1075 /** Link up delay (in milliseconds). */
1076 uint32_t cMsLinkUpDelay;
1077
1078 /** All: Device register storage. */
1079 uint32_t auRegs[E1K_NUM_OF_32BIT_REGS];
1080 /** TX/RX: Status LED. */
1081 PDMLED led;
1082 /** TX/RX: Number of packet being sent/received to show in debug log. */
1083 uint32_t u32PktNo;
1084
1085 /** EMT: Offset of the register to be read via IO. */
1086 uint32_t uSelectedReg;
1087 /** EMT: Multicast Table Array. */
1088 uint32_t auMTA[128];
1089 /** EMT: Receive Address registers. */
1090 E1KRA aRecAddr;
1091 /** EMT: VLAN filter table array. */
1092 uint32_t auVFTA[128];
1093 /** EMT: Receive buffer size. */
1094 uint16_t u16RxBSize;
1095 /** EMT: Locked state -- no state alteration possible. */
1096 bool fLocked;
1097 /** EMT: */
1098 bool fDelayInts;
1099 /** All: */
1100 bool fIntMaskUsed;
1101
1102 /** N/A: */
1103 bool volatile fMaybeOutOfSpace;
1104 /** EMT: Gets signalled when more RX descriptors become available. */
1105 RTSEMEVENT hEventMoreRxDescAvail;
1106#ifdef E1K_WITH_RXD_CACHE
1107 /** RX: Fetched RX descriptors. */
1108 E1KRXDESC aRxDescriptors[E1K_RXD_CACHE_SIZE];
1109 //uint64_t aRxDescAddr[E1K_RXD_CACHE_SIZE];
1110 /** RX: Actual number of fetched RX descriptors. */
1111 uint32_t nRxDFetched;
1112 /** RX: Index in cache of RX descriptor being processed. */
1113 uint32_t iRxDCurrent;
1114#endif /* E1K_WITH_RXD_CACHE */
1115
1116 /** TX: Context used for TCP segmentation packets. */
1117 E1KTXCTX contextTSE;
1118 /** TX: Context used for ordinary packets. */
1119 E1KTXCTX contextNormal;
1120#ifdef E1K_WITH_TXD_CACHE
1121 /** TX: Fetched TX descriptors. */
1122 E1KTXDESC aTxDescriptors[E1K_TXD_CACHE_SIZE];
1123 /** TX: Actual number of fetched TX descriptors. */
1124 uint8_t nTxDFetched;
1125 /** TX: Index in cache of TX descriptor being processed. */
1126 uint8_t iTxDCurrent;
1127 /** TX: Will this frame be sent as GSO. */
1128 bool fGSO;
1129 /** TX: Number of bytes in next packet. */
1130 uint32_t cbTxAlloc;
1131
1132#endif /* E1K_WITH_TXD_CACHE */
1133 /** GSO context. u8Type is set to PDMNETWORKGSOTYPE_INVALID when not
1134 * applicable to the current TSE mode. */
1135 PDMNETWORKGSO GsoCtx;
1136 /** Scratch space for holding the loopback / fallback scatter / gather
1137 * descriptor. */
1138 union
1139 {
1140 PDMSCATTERGATHER Sg;
1141 uint8_t padding[8 * sizeof(RTUINTPTR)];
1142 } uTxFallback;
1143 /** TX: Transmit packet buffer use for TSE fallback and loopback. */
1144 uint8_t aTxPacketFallback[E1K_MAX_TX_PKT_SIZE];
1145 /** TX: Number of bytes assembled in TX packet buffer. */
1146 uint16_t u16TxPktLen;
1147 /** TX: IP checksum has to be inserted if true. */
1148 bool fIPcsum;
1149 /** TX: TCP/UDP checksum has to be inserted if true. */
1150 bool fTCPcsum;
1151 /** TX: VLAN tag has to be inserted if true. */
1152 bool fVTag;
1153 /** TX: TCI part of VLAN tag to be inserted. */
1154 uint16_t u16VTagTCI;
1155 /** TX TSE fallback: Number of payload bytes remaining in TSE context. */
1156 uint32_t u32PayRemain;
1157 /** TX TSE fallback: Number of header bytes remaining in TSE context. */
1158 uint16_t u16HdrRemain;
1159 /** TX TSE fallback: Flags from template header. */
1160 uint16_t u16SavedFlags;
1161 /** TX TSE fallback: Partial checksum from template header. */
1162 uint32_t u32SavedCsum;
1163 /** ?: Emulated controller type. */
1164 E1KCHIP eChip;
1165
1166 /** EMT: EEPROM emulation */
1167 E1kEEPROM eeprom;
1168 /** EMT: Physical interface emulation. */
1169 PHY phy;
1170
1171#if 0
1172 /** Alignment padding. */
1173 uint8_t Alignment[HC_ARCH_BITS == 64 ? 8 : 4];
1174#endif
1175
1176 STAMCOUNTER StatReceiveBytes;
1177 STAMCOUNTER StatTransmitBytes;
1178#if defined(VBOX_WITH_STATISTICS)
1179 STAMPROFILEADV StatMMIOReadRZ;
1180 STAMPROFILEADV StatMMIOReadR3;
1181 STAMPROFILEADV StatMMIOWriteRZ;
1182 STAMPROFILEADV StatMMIOWriteR3;
1183 STAMPROFILEADV StatEEPROMRead;
1184 STAMPROFILEADV StatEEPROMWrite;
1185 STAMPROFILEADV StatIOReadRZ;
1186 STAMPROFILEADV StatIOReadR3;
1187 STAMPROFILEADV StatIOWriteRZ;
1188 STAMPROFILEADV StatIOWriteR3;
1189 STAMPROFILEADV StatLateIntTimer;
1190 STAMCOUNTER StatLateInts;
1191 STAMCOUNTER StatIntsRaised;
1192 STAMCOUNTER StatIntsPrevented;
1193 STAMPROFILEADV StatReceive;
1194 STAMPROFILEADV StatReceiveCRC;
1195 STAMPROFILEADV StatReceiveFilter;
1196 STAMPROFILEADV StatReceiveStore;
1197 STAMPROFILEADV StatTransmitRZ;
1198 STAMPROFILEADV StatTransmitR3;
1199 STAMPROFILE StatTransmitSendRZ;
1200 STAMPROFILE StatTransmitSendR3;
1201 STAMPROFILE StatRxOverflow;
1202 STAMCOUNTER StatRxOverflowWakeup;
1203 STAMCOUNTER StatTxDescCtxNormal;
1204 STAMCOUNTER StatTxDescCtxTSE;
1205 STAMCOUNTER StatTxDescLegacy;
1206 STAMCOUNTER StatTxDescData;
1207 STAMCOUNTER StatTxDescTSEData;
1208 STAMCOUNTER StatTxPathFallback;
1209 STAMCOUNTER StatTxPathGSO;
1210 STAMCOUNTER StatTxPathRegular;
1211 STAMCOUNTER StatPHYAccesses;
1212
1213#endif /* VBOX_WITH_STATISTICS */
1214
1215#ifdef E1K_INT_STATS
1216 /* Internal stats */
1217 uint64_t u64ArmedAt;
1218 uint64_t uStatMaxTxDelay;
1219 uint32_t uStatInt;
1220 uint32_t uStatIntTry;
1221 int32_t uStatIntLower;
1222 uint32_t uStatIntDly;
1223 int32_t iStatIntLost;
1224 int32_t iStatIntLostOne;
1225 uint32_t uStatDisDly;
1226 uint32_t uStatIntSkip;
1227 uint32_t uStatIntLate;
1228 uint32_t uStatIntMasked;
1229 uint32_t uStatIntEarly;
1230 uint32_t uStatIntRx;
1231 uint32_t uStatIntTx;
1232 uint32_t uStatIntICS;
1233 uint32_t uStatIntRDTR;
1234 uint32_t uStatIntRXDMT0;
1235 uint32_t uStatIntTXQE;
1236 uint32_t uStatTxNoRS;
1237 uint32_t uStatTxIDE;
1238 uint32_t uStatTxDelayed;
1239 uint32_t uStatTxDelayExp;
1240 uint32_t uStatTAD;
1241 uint32_t uStatTID;
1242 uint32_t uStatRAD;
1243 uint32_t uStatRID;
1244 uint32_t uStatRxFrm;
1245 uint32_t uStatTxFrm;
1246 uint32_t uStatDescCtx;
1247 uint32_t uStatDescDat;
1248 uint32_t uStatDescLeg;
1249 uint32_t uStatTx1514;
1250 uint32_t uStatTx2962;
1251 uint32_t uStatTx4410;
1252 uint32_t uStatTx5858;
1253 uint32_t uStatTx7306;
1254 uint32_t uStatTx8754;
1255 uint32_t uStatTx16384;
1256 uint32_t uStatTx32768;
1257 uint32_t uStatTxLarge;
1258 uint32_t uStatAlign;
1259#endif /* E1K_INT_STATS */
1260};
1261typedef struct E1kState_st E1KSTATE;
1262
1263#ifndef VBOX_DEVICE_STRUCT_TESTCASE
1264
1265/* Forward declarations ******************************************************/
1266static int e1kXmitPending(E1KSTATE *pState, bool fOnWorkerThread);
1267
1268static int e1kRegReadUnimplemented (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1269static int e1kRegWriteUnimplemented(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1270static int e1kRegReadAutoClear (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1271static int e1kRegReadDefault (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1272static int e1kRegWriteDefault (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1273#if 0 /* unused */
1274static int e1kRegReadCTRL (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1275#endif
1276static int e1kRegWriteCTRL (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1277static int e1kRegReadEECD (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1278static int e1kRegWriteEECD (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1279static int e1kRegWriteEERD (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1280static int e1kRegWriteMDIC (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1281static int e1kRegReadICR (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1282static int e1kRegWriteICR (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1283static int e1kRegWriteICS (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1284static int e1kRegWriteIMS (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1285static int e1kRegWriteIMC (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1286static int e1kRegWriteRCTL (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1287static int e1kRegWritePBA (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1288static int e1kRegWriteRDT (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1289static int e1kRegWriteRDTR (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1290static int e1kRegWriteTDT (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1291static int e1kRegReadMTA (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1292static int e1kRegWriteMTA (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1293static int e1kRegReadRA (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1294static int e1kRegWriteRA (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1295static int e1kRegReadVFTA (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1296static int e1kRegWriteVFTA (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1297
1298/**
1299 * Register map table.
1300 *
1301 * Override fn_read and fn_write to get register-specific behavior.
1302 */
1303const static struct E1kRegMap_st
1304{
1305 /** Register offset in the register space. */
1306 uint32_t offset;
1307 /** Size in bytes. Registers of size > 4 are in fact tables. */
1308 uint32_t size;
1309 /** Readable bits. */
1310 uint32_t readable;
1311 /** Writable bits. */
1312 uint32_t writable;
1313 /** Read callback. */
1314 int (*pfnRead)(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1315 /** Write callback. */
1316 int (*pfnWrite)(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1317 /** Abbreviated name. */
1318 const char *abbrev;
1319 /** Full name. */
1320 const char *name;
1321} s_e1kRegMap[E1K_NUM_OF_REGS] =
1322{
1323 /* offset size read mask write mask read callback write callback abbrev full name */
1324 /*------- ------- ---------- ---------- ----------------------- ------------------------ ---------- ------------------------------*/
1325 { 0x00000, 0x00004, 0xDBF31BE9, 0xDBF31BE9, e1kRegReadDefault , e1kRegWriteCTRL , "CTRL" , "Device Control" },
1326 { 0x00008, 0x00004, 0x0000FDFF, 0x00000000, e1kRegReadDefault , e1kRegWriteUnimplemented, "STATUS" , "Device Status" },
1327 { 0x00010, 0x00004, 0x000027F0, 0x00000070, e1kRegReadEECD , e1kRegWriteEECD , "EECD" , "EEPROM/Flash Control/Data" },
1328 { 0x00014, 0x00004, 0xFFFFFF10, 0xFFFFFF00, e1kRegReadDefault , e1kRegWriteEERD , "EERD" , "EEPROM Read" },
1329 { 0x00018, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CTRL_EXT", "Extended Device Control" },
1330 { 0x0001c, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FLA" , "Flash Access (N/A)" },
1331 { 0x00020, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteMDIC , "MDIC" , "MDI Control" },
1332 { 0x00028, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCAL" , "Flow Control Address Low" },
1333 { 0x0002c, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCAH" , "Flow Control Address High" },
1334 { 0x00030, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCT" , "Flow Control Type" },
1335 { 0x00038, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "VET" , "VLAN EtherType" },
1336 { 0x000c0, 0x00004, 0x0001F6DF, 0x0001F6DF, e1kRegReadICR , e1kRegWriteICR , "ICR" , "Interrupt Cause Read" },
1337 { 0x000c4, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "ITR" , "Interrupt Throttling" },
1338 { 0x000c8, 0x00004, 0x00000000, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteICS , "ICS" , "Interrupt Cause Set" },
1339 { 0x000d0, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteIMS , "IMS" , "Interrupt Mask Set/Read" },
1340 { 0x000d8, 0x00004, 0x00000000, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteIMC , "IMC" , "Interrupt Mask Clear" },
1341 { 0x00100, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteRCTL , "RCTL" , "Receive Control" },
1342 { 0x00170, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCTTV" , "Flow Control Transmit Timer Value" },
1343 { 0x00178, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TXCW" , "Transmit Configuration Word (N/A)" },
1344 { 0x00180, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXCW" , "Receive Configuration Word (N/A)" },
1345 { 0x00400, 0x00004, 0x017FFFFA, 0x017FFFFA, e1kRegReadDefault , e1kRegWriteDefault , "TCTL" , "Transmit Control" },
1346 { 0x00410, 0x00004, 0x3FFFFFFF, 0x3FFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TIPG" , "Transmit IPG" },
1347 { 0x00458, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "AIFS" , "Adaptive IFS Throttle - AIT" },
1348 { 0x00e00, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "LEDCTL" , "LED Control" },
1349 { 0x01000, 0x00004, 0xFFFF007F, 0x0000007F, e1kRegReadDefault , e1kRegWritePBA , "PBA" , "Packet Buffer Allocation" },
1350 { 0x02160, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRTL" , "Flow Control Receive Threshold Low" },
1351 { 0x02168, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRTH" , "Flow Control Receive Threshold High" },
1352 { 0x02410, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFH" , "Receive Data FIFO Head" },
1353 { 0x02418, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFT" , "Receive Data FIFO Tail" },
1354 { 0x02420, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFHS" , "Receive Data FIFO Head Saved Register" },
1355 { 0x02428, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFTS" , "Receive Data FIFO Tail Saved Register" },
1356 { 0x02430, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFPC" , "Receive Data FIFO Packet Count" },
1357 { 0x02800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDBAL" , "Receive Descriptor Base Low" },
1358 { 0x02804, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDBAH" , "Receive Descriptor Base High" },
1359 { 0x02808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDLEN" , "Receive Descriptor Length" },
1360 { 0x02810, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDH" , "Receive Descriptor Head" },
1361 { 0x02818, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteRDT , "RDT" , "Receive Descriptor Tail" },
1362 { 0x02820, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteRDTR , "RDTR" , "Receive Delay Timer" },
1363 { 0x02828, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXDCTL" , "Receive Descriptor Control" },
1364 { 0x0282c, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "RADV" , "Receive Interrupt Absolute Delay Timer" },
1365 { 0x02c00, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RSRPD" , "Receive Small Packet Detect Interrupt" },
1366 { 0x03000, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TXDMAC" , "TX DMA Control (N/A)" },
1367 { 0x03410, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFH" , "Transmit Data FIFO Head" },
1368 { 0x03418, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFT" , "Transmit Data FIFO Tail" },
1369 { 0x03420, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFHS" , "Transmit Data FIFO Head Saved Register" },
1370 { 0x03428, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFTS" , "Transmit Data FIFO Tail Saved Register" },
1371 { 0x03430, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFPC" , "Transmit Data FIFO Packet Count" },
1372 { 0x03800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDBAL" , "Transmit Descriptor Base Low" },
1373 { 0x03804, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDBAH" , "Transmit Descriptor Base High" },
1374 { 0x03808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDLEN" , "Transmit Descriptor Length" },
1375 { 0x03810, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDH" , "Transmit Descriptor Head" },
1376 { 0x03818, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteTDT , "TDT" , "Transmit Descriptor Tail" },
1377 { 0x03820, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TIDV" , "Transmit Interrupt Delay Value" },
1378 { 0x03828, 0x00004, 0xFF3F3F3F, 0xFF3F3F3F, e1kRegReadDefault , e1kRegWriteDefault , "TXDCTL" , "Transmit Descriptor Control" },
1379 { 0x0382c, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TADV" , "Transmit Absolute Interrupt Delay Timer" },
1380 { 0x03830, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TSPMT" , "TCP Segmentation Pad and Threshold" },
1381 { 0x04000, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CRCERRS" , "CRC Error Count" },
1382 { 0x04004, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "ALGNERRC", "Alignment Error Count" },
1383 { 0x04008, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SYMERRS" , "Symbol Error Count" },
1384 { 0x0400c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXERRC" , "RX Error Count" },
1385 { 0x04010, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MPC" , "Missed Packets Count" },
1386 { 0x04014, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SCC" , "Single Collision Count" },
1387 { 0x04018, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "ECOL" , "Excessive Collisions Count" },
1388 { 0x0401c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MCC" , "Multiple Collision Count" },
1389 { 0x04020, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "LATECOL" , "Late Collisions Count" },
1390 { 0x04028, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "COLC" , "Collision Count" },
1391 { 0x04030, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "DC" , "Defer Count" },
1392 { 0x04034, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TNCRS" , "Transmit - No CRS" },
1393 { 0x04038, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SEC" , "Sequence Error Count" },
1394 { 0x0403c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CEXTERR" , "Carrier Extension Error Count" },
1395 { 0x04040, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RLEC" , "Receive Length Error Count" },
1396 { 0x04048, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XONRXC" , "XON Received Count" },
1397 { 0x0404c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XONTXC" , "XON Transmitted Count" },
1398 { 0x04050, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XOFFRXC" , "XOFF Received Count" },
1399 { 0x04054, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XOFFTXC" , "XOFF Transmitted Count" },
1400 { 0x04058, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRUC" , "FC Received Unsupported Count" },
1401 { 0x0405c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC64" , "Packets Received (64 Bytes) Count" },
1402 { 0x04060, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC127" , "Packets Received (65-127 Bytes) Count" },
1403 { 0x04064, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC255" , "Packets Received (128-255 Bytes) Count" },
1404 { 0x04068, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC511" , "Packets Received (256-511 Bytes) Count" },
1405 { 0x0406c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC1023" , "Packets Received (512-1023 Bytes) Count" },
1406 { 0x04070, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC1522" , "Packets Received (1024-Max Bytes)" },
1407 { 0x04074, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GPRC" , "Good Packets Received Count" },
1408 { 0x04078, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "BPRC" , "Broadcast Packets Received Count" },
1409 { 0x0407c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "MPRC" , "Multicast Packets Received Count" },
1410 { 0x04080, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GPTC" , "Good Packets Transmitted Count" },
1411 { 0x04088, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GORCL" , "Good Octets Received Count (Low)" },
1412 { 0x0408c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GORCH" , "Good Octets Received Count (Hi)" },
1413 { 0x04090, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GOTCL" , "Good Octets Transmitted Count (Low)" },
1414 { 0x04094, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GOTCH" , "Good Octets Transmitted Count (Hi)" },
1415 { 0x040a0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RNBC" , "Receive No Buffers Count" },
1416 { 0x040a4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RUC" , "Receive Undersize Count" },
1417 { 0x040a8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RFC" , "Receive Fragment Count" },
1418 { 0x040ac, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "ROC" , "Receive Oversize Count" },
1419 { 0x040b0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RJC" , "Receive Jabber Count" },
1420 { 0x040b4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPRC" , "Management Packets Received Count" },
1421 { 0x040b8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPDC" , "Management Packets Dropped Count" },
1422 { 0x040bc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPTC" , "Management Pkts Transmitted Count" },
1423 { 0x040c0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TORL" , "Total Octets Received (Lo)" },
1424 { 0x040c4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TORH" , "Total Octets Received (Hi)" },
1425 { 0x040c8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TOTL" , "Total Octets Transmitted (Lo)" },
1426 { 0x040cc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TOTH" , "Total Octets Transmitted (Hi)" },
1427 { 0x040d0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TPR" , "Total Packets Received" },
1428 { 0x040d4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TPT" , "Total Packets Transmitted" },
1429 { 0x040d8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC64" , "Packets Transmitted (64 Bytes) Count" },
1430 { 0x040dc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC127" , "Packets Transmitted (65-127 Bytes) Count" },
1431 { 0x040e0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC255" , "Packets Transmitted (128-255 Bytes) Count" },
1432 { 0x040e4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC511" , "Packets Transmitted (256-511 Bytes) Count" },
1433 { 0x040e8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC1023" , "Packets Transmitted (512-1023 Bytes) Count" },
1434 { 0x040ec, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC1522" , "Packets Transmitted (1024 Bytes or Greater) Count" },
1435 { 0x040f0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "MPTC" , "Multicast Packets Transmitted Count" },
1436 { 0x040f4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "BPTC" , "Broadcast Packets Transmitted Count" },
1437 { 0x040f8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TSCTC" , "TCP Segmentation Context Transmitted Count" },
1438 { 0x040fc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TSCTFC" , "TCP Segmentation Context Tx Fail Count" },
1439 { 0x05000, 0x00004, 0x000007FF, 0x000007FF, e1kRegReadDefault , e1kRegWriteDefault , "RXCSUM" , "Receive Checksum Control" },
1440 { 0x05800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUC" , "Wakeup Control" },
1441 { 0x05808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUFC" , "Wakeup Filter Control" },
1442 { 0x05810, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUS" , "Wakeup Status" },
1443 { 0x05820, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "MANC" , "Management Control" },
1444 { 0x05838, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IPAV" , "IP Address Valid" },
1445 { 0x05900, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUPL" , "Wakeup Packet Length" },
1446 { 0x05200, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadMTA , e1kRegWriteMTA , "MTA" , "Multicast Table Array (n)" },
1447 { 0x05400, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadRA , e1kRegWriteRA , "RA" , "Receive Address (64-bit) (n)" },
1448 { 0x05600, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadVFTA , e1kRegWriteVFTA , "VFTA" , "VLAN Filter Table Array (n)" },
1449 { 0x05840, 0x0001c, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IP4AT" , "IPv4 Address Table" },
1450 { 0x05880, 0x00010, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IP6AT" , "IPv6 Address Table" },
1451 { 0x05a00, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUPM" , "Wakeup Packet Memory" },
1452 { 0x05f00, 0x0001c, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFLT" , "Flexible Filter Length Table" },
1453 { 0x09000, 0x003fc, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFMT" , "Flexible Filter Mask Table" },
1454 { 0x09800, 0x003fc, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFVT" , "Flexible Filter Value Table" },
1455 { 0x10000, 0x10000, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "PBM" , "Packet Buffer Memory (n)" },
1456 { 0x00040, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadRA , e1kRegWriteRA , "RA" , "Receive Address (64-bit) (n) (82542)" },
1457 { 0x00200, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadMTA , e1kRegWriteMTA , "MTA" , "Multicast Table Array (n) (82542)" },
1458 { 0x00600, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadVFTA , e1kRegWriteVFTA , "VFTA" , "VLAN Filter Table Array (n) (82542)" }
1459};
1460
1461#ifdef DEBUG
1462
1463/**
1464 * Convert U32 value to hex string. Masked bytes are replaced with dots.
1465 *
1466 * @remarks The mask has byte (not bit) granularity (e.g. 000000FF).
1467 *
1468 * @returns The buffer.
1469 *
1470 * @param u32 The word to convert into string.
1471 * @param mask Selects which bytes to convert.
1472 * @param buf Where to put the result.
1473 */
1474static char *e1kU32toHex(uint32_t u32, uint32_t mask, char *buf)
1475{
1476 for (char *ptr = buf + 7; ptr >= buf; --ptr, u32 >>=4, mask >>=4)
1477 {
1478 if (mask & 0xF)
1479 *ptr = (u32 & 0xF) + ((u32 & 0xF) > 9 ? '7' : '0');
1480 else
1481 *ptr = '.';
1482 }
1483 buf[8] = 0;
1484 return buf;
1485}
1486
1487/**
1488 * Returns timer name for debug purposes.
1489 *
1490 * @returns The timer name.
1491 *
1492 * @param pState The device state structure.
1493 * @param pTimer The timer to get the name for.
1494 */
1495DECLINLINE(const char *) e1kGetTimerName(E1KSTATE *pState, PTMTIMER pTimer)
1496{
1497 if (pTimer == pState->CTX_SUFF(pTIDTimer))
1498 return "TID";
1499 if (pTimer == pState->CTX_SUFF(pTADTimer))
1500 return "TAD";
1501 if (pTimer == pState->CTX_SUFF(pRIDTimer))
1502 return "RID";
1503 if (pTimer == pState->CTX_SUFF(pRADTimer))
1504 return "RAD";
1505 if (pTimer == pState->CTX_SUFF(pIntTimer))
1506 return "Int";
1507 if (pTimer == pState->CTX_SUFF(pTXDTimer))
1508 return "TXD";
1509 return "unknown";
1510}
1511
1512#endif /* DEBUG */
1513
1514/**
1515 * Arm a timer.
1516 *
1517 * @param pState Pointer to the device state structure.
1518 * @param pTimer Pointer to the timer.
1519 * @param uExpireIn Expiration interval in microseconds.
1520 */
1521DECLINLINE(void) e1kArmTimer(E1KSTATE *pState, PTMTIMER pTimer, uint32_t uExpireIn)
1522{
1523 if (pState->fLocked)
1524 return;
1525
1526 E1kLog2(("%s Arming %s timer to fire in %d usec...\n",
1527 INSTANCE(pState), e1kGetTimerName(pState, pTimer), uExpireIn));
1528 TMTimerSetMicro(pTimer, uExpireIn);
1529}
1530
1531/**
1532 * Cancel a timer.
1533 *
1534 * @param pState Pointer to the device state structure.
1535 * @param pTimer Pointer to the timer.
1536 */
1537DECLINLINE(void) e1kCancelTimer(E1KSTATE *pState, PTMTIMER pTimer)
1538{
1539 E1kLog2(("%s Stopping %s timer...\n",
1540 INSTANCE(pState), e1kGetTimerName(pState, pTimer)));
1541 int rc = TMTimerStop(pTimer);
1542 if (RT_FAILURE(rc))
1543 {
1544 E1kLog2(("%s e1kCancelTimer: TMTimerStop() failed with %Rrc\n",
1545 INSTANCE(pState), rc));
1546 }
1547}
1548
1549#define e1kCsEnter(ps, rc) PDMCritSectEnter(&ps->cs, rc)
1550#define e1kCsLeave(ps) PDMCritSectLeave(&ps->cs)
1551
1552#define e1kCsRxEnter(ps, rc) PDMCritSectEnter(&ps->csRx, rc)
1553#define e1kCsRxLeave(ps) PDMCritSectLeave(&ps->csRx)
1554#define e1kCsRxIsOwner(ps) PDMCritSectIsOwner(&ps->csRx)
1555
1556#ifndef E1K_WITH_TX_CS
1557# define e1kCsTxEnter(ps, rc) VINF_SUCCESS
1558# define e1kCsTxLeave(ps) do { } while (0)
1559# define e1kCsIsOwner(cs) true
1560#else /* E1K_WITH_TX_CS */
1561# define e1kCsTxEnter(ps, rc) PDMCritSectEnter(&ps->csTx, rc)
1562# define e1kCsTxLeave(ps) PDMCritSectLeave(&ps->csTx)
1563# define e1kCsIsOwner(cs) PDMCritSectIsOwner(cs)
1564#endif /* E1K_WITH_TX_CS */
1565
1566#ifdef IN_RING3
1567
1568/**
1569 * Wakeup the RX thread.
1570 */
1571static void e1kWakeupReceive(PPDMDEVINS pDevIns)
1572{
1573 E1KSTATE *pState = PDMINS_2_DATA(pDevIns, E1KSTATE *);
1574 if ( pState->fMaybeOutOfSpace
1575 && pState->hEventMoreRxDescAvail != NIL_RTSEMEVENT)
1576 {
1577 STAM_COUNTER_INC(&pState->StatRxOverflowWakeup);
1578 E1kLog(("%s Waking up Out-of-RX-space semaphore\n", INSTANCE(pState)));
1579 RTSemEventSignal(pState->hEventMoreRxDescAvail);
1580 }
1581}
1582
1583/**
1584 * Hardware reset. Revert all registers to initial values.
1585 *
1586 * @param pState The device state structure.
1587 */
1588static void e1kHardReset(E1KSTATE *pState)
1589{
1590 E1kLog(("%s Hard reset triggered\n", INSTANCE(pState)));
1591 memset(pState->auRegs, 0, sizeof(pState->auRegs));
1592 memset(pState->aRecAddr.au32, 0, sizeof(pState->aRecAddr.au32));
1593#ifdef E1K_INIT_RA0
1594 memcpy(pState->aRecAddr.au32, pState->macConfigured.au8,
1595 sizeof(pState->macConfigured.au8));
1596 pState->aRecAddr.array[0].ctl |= RA_CTL_AV;
1597#endif /* E1K_INIT_RA0 */
1598 STATUS = 0x0081; /* SPEED=10b (1000 Mb/s), FD=1b (Full Duplex) */
1599 EECD = 0x0100; /* EE_PRES=1b (EEPROM present) */
1600 CTRL = 0x0a09; /* FRCSPD=1b SPEED=10b LRST=1b FD=1b */
1601 TSPMT = 0x01000400;/* TSMT=0400h TSPBP=0100h */
1602 Assert(GET_BITS(RCTL, BSIZE) == 0);
1603 pState->u16RxBSize = 2048;
1604
1605 /* Reset promiscuous mode */
1606 if (pState->pDrvR3)
1607 pState->pDrvR3->pfnSetPromiscuousMode(pState->pDrvR3, false);
1608
1609#ifdef E1K_WITH_TXD_CACHE
1610 int rc = e1kCsTxEnter(pState, VERR_SEM_BUSY);
1611 if (RT_LIKELY(rc == VINF_SUCCESS))
1612 {
1613 pState->nTxDFetched = 0;
1614 pState->iTxDCurrent = 0;
1615 pState->fGSO = false;
1616 pState->cbTxAlloc = 0;
1617 e1kCsTxLeave(pState);
1618 }
1619#endif /* E1K_WITH_TXD_CACHE */
1620#ifdef E1K_WITH_RXD_CACHE
1621 if (RT_LIKELY(e1kCsRxEnter(pState, VERR_SEM_BUSY) == VINF_SUCCESS))
1622 {
1623 pState->iRxDCurrent = pState->nRxDFetched = 0;
1624 e1kCsRxLeave(pState);
1625 }
1626#endif /* E1K_WITH_RXD_CACHE */
1627}
1628
1629#endif /* IN_RING3 */
1630
1631/**
1632 * Compute Internet checksum.
1633 *
1634 * @remarks Refer to http://www.netfor2.com/checksum.html for short intro.
1635 *
1636 * @param pState The device state structure.
1637 * @param cpPacket The packet.
1638 * @param cb The size of the packet.
1639 * @param cszText A string denoting direction of packet transfer.
1640 *
1641 * @return The 1's complement of the 1's complement sum.
1642 *
1643 * @thread E1000_TX
1644 */
1645static uint16_t e1kCSum16(const void *pvBuf, size_t cb)
1646{
1647 uint32_t csum = 0;
1648 uint16_t *pu16 = (uint16_t *)pvBuf;
1649
1650 while (cb > 1)
1651 {
1652 csum += *pu16++;
1653 cb -= 2;
1654 }
1655 if (cb)
1656 csum += *(uint8_t*)pu16;
1657 while (csum >> 16)
1658 csum = (csum >> 16) + (csum & 0xFFFF);
1659 return ~csum;
1660}
1661
1662/**
1663 * Dump a packet to debug log.
1664 *
1665 * @param pState The device state structure.
1666 * @param cpPacket The packet.
1667 * @param cb The size of the packet.
1668 * @param cszText A string denoting direction of packet transfer.
1669 * @thread E1000_TX
1670 */
1671DECLINLINE(void) e1kPacketDump(E1KSTATE* pState, const uint8_t *cpPacket, size_t cb, const char *cszText)
1672{
1673#ifdef DEBUG
1674 if (RT_LIKELY(e1kCsEnter(pState, VERR_SEM_BUSY) == VINF_SUCCESS))
1675 {
1676 E1kLog(("%s --- %s packet #%d: ---\n",
1677 INSTANCE(pState), cszText, ++pState->u32PktNo));
1678 E1kLog3(("%.*Rhxd\n", cb, cpPacket));
1679 e1kCsLeave(pState);
1680 }
1681#else
1682 if (RT_LIKELY(e1kCsEnter(pState, VERR_SEM_BUSY) == VINF_SUCCESS))
1683 {
1684 E1kLogRel(("E1000: %s packet #%d, seq=%x ack=%x\n", cszText, pState->u32PktNo++, ntohl(*(uint32_t*)(cpPacket+0x26)), ntohl(*(uint32_t*)(cpPacket+0x2A))));
1685 e1kCsLeave(pState);
1686 }
1687#endif
1688}
1689
1690/**
1691 * Determine the type of transmit descriptor.
1692 *
1693 * @returns Descriptor type. See E1K_DTYP_XXX defines.
1694 *
1695 * @param pDesc Pointer to descriptor union.
1696 * @thread E1000_TX
1697 */
1698DECLINLINE(int) e1kGetDescType(E1KTXDESC* pDesc)
1699{
1700 if (pDesc->legacy.cmd.fDEXT)
1701 return pDesc->context.dw2.u4DTYP;
1702 return E1K_DTYP_LEGACY;
1703}
1704
1705/**
1706 * Dump receive descriptor to debug log.
1707 *
1708 * @param pState The device state structure.
1709 * @param pDesc Pointer to the descriptor.
1710 * @thread E1000_RX
1711 */
1712static void e1kPrintRDesc(E1KSTATE* pState, E1KRXDESC* pDesc)
1713{
1714 E1kLog2(("%s <-- Receive Descriptor (%d bytes):\n", INSTANCE(pState), pDesc->u16Length));
1715 E1kLog2((" Address=%16LX Length=%04X Csum=%04X\n",
1716 pDesc->u64BufAddr, pDesc->u16Length, pDesc->u16Checksum));
1717 E1kLog2((" STA: %s %s %s %s %s %s %s ERR: %s %s %s %s SPECIAL: %s VLAN=%03x PRI=%x\n",
1718 pDesc->status.fPIF ? "PIF" : "pif",
1719 pDesc->status.fIPCS ? "IPCS" : "ipcs",
1720 pDesc->status.fTCPCS ? "TCPCS" : "tcpcs",
1721 pDesc->status.fVP ? "VP" : "vp",
1722 pDesc->status.fIXSM ? "IXSM" : "ixsm",
1723 pDesc->status.fEOP ? "EOP" : "eop",
1724 pDesc->status.fDD ? "DD" : "dd",
1725 pDesc->status.fRXE ? "RXE" : "rxe",
1726 pDesc->status.fIPE ? "IPE" : "ipe",
1727 pDesc->status.fTCPE ? "TCPE" : "tcpe",
1728 pDesc->status.fCE ? "CE" : "ce",
1729 E1K_SPEC_CFI(pDesc->status.u16Special) ? "CFI" :"cfi",
1730 E1K_SPEC_VLAN(pDesc->status.u16Special),
1731 E1K_SPEC_PRI(pDesc->status.u16Special)));
1732}
1733
1734/**
1735 * Dump transmit descriptor to debug log.
1736 *
1737 * @param pState The device state structure.
1738 * @param pDesc Pointer to descriptor union.
1739 * @param cszDir A string denoting direction of descriptor transfer
1740 * @thread E1000_TX
1741 */
1742static void e1kPrintTDesc(E1KSTATE* pState, E1KTXDESC* pDesc, const char* cszDir,
1743 unsigned uLevel = RTLOGGRPFLAGS_LEVEL_2)
1744{
1745 switch (e1kGetDescType(pDesc))
1746 {
1747 case E1K_DTYP_CONTEXT:
1748 E1kLogX(uLevel, ("%s %s Context Transmit Descriptor %s\n",
1749 INSTANCE(pState), cszDir, cszDir));
1750 E1kLogX(uLevel, (" IPCSS=%02X IPCSO=%02X IPCSE=%04X TUCSS=%02X TUCSO=%02X TUCSE=%04X\n",
1751 pDesc->context.ip.u8CSS, pDesc->context.ip.u8CSO, pDesc->context.ip.u16CSE,
1752 pDesc->context.tu.u8CSS, pDesc->context.tu.u8CSO, pDesc->context.tu.u16CSE));
1753 E1kLogX(uLevel, (" TUCMD:%s%s%s %s %s PAYLEN=%04x HDRLEN=%04x MSS=%04x STA: %s\n",
1754 pDesc->context.dw2.fIDE ? " IDE":"",
1755 pDesc->context.dw2.fRS ? " RS" :"",
1756 pDesc->context.dw2.fTSE ? " TSE":"",
1757 pDesc->context.dw2.fIP ? "IPv4":"IPv6",
1758 pDesc->context.dw2.fTCP ? "TCP":"UDP",
1759 pDesc->context.dw2.u20PAYLEN,
1760 pDesc->context.dw3.u8HDRLEN,
1761 pDesc->context.dw3.u16MSS,
1762 pDesc->context.dw3.fDD?"DD":""));
1763 break;
1764 case E1K_DTYP_DATA:
1765 E1kLogX(uLevel, ("%s %s Data Transmit Descriptor (%d bytes) %s\n",
1766 INSTANCE(pState), cszDir, pDesc->data.cmd.u20DTALEN, cszDir));
1767 E1kLogX(uLevel, (" Address=%16LX DTALEN=%05X\n",
1768 pDesc->data.u64BufAddr,
1769 pDesc->data.cmd.u20DTALEN));
1770 E1kLogX(uLevel, (" DCMD:%s%s%s%s%s%s%s STA:%s%s%s POPTS:%s%s SPECIAL:%s VLAN=%03x PRI=%x\n",
1771 pDesc->data.cmd.fIDE ? " IDE" :"",
1772 pDesc->data.cmd.fVLE ? " VLE" :"",
1773 pDesc->data.cmd.fRPS ? " RPS" :"",
1774 pDesc->data.cmd.fRS ? " RS" :"",
1775 pDesc->data.cmd.fTSE ? " TSE" :"",
1776 pDesc->data.cmd.fIFCS? " IFCS":"",
1777 pDesc->data.cmd.fEOP ? " EOP" :"",
1778 pDesc->data.dw3.fDD ? " DD" :"",
1779 pDesc->data.dw3.fEC ? " EC" :"",
1780 pDesc->data.dw3.fLC ? " LC" :"",
1781 pDesc->data.dw3.fTXSM? " TXSM":"",
1782 pDesc->data.dw3.fIXSM? " IXSM":"",
1783 E1K_SPEC_CFI(pDesc->data.dw3.u16Special) ? "CFI" :"cfi",
1784 E1K_SPEC_VLAN(pDesc->data.dw3.u16Special),
1785 E1K_SPEC_PRI(pDesc->data.dw3.u16Special)));
1786 break;
1787 case E1K_DTYP_LEGACY:
1788 E1kLogX(uLevel, ("%s %s Legacy Transmit Descriptor (%d bytes) %s\n",
1789 INSTANCE(pState), cszDir, pDesc->legacy.cmd.u16Length, cszDir));
1790 E1kLogX(uLevel, (" Address=%16LX DTALEN=%05X\n",
1791 pDesc->data.u64BufAddr,
1792 pDesc->legacy.cmd.u16Length));
1793 E1kLogX(uLevel, (" CMD:%s%s%s%s%s%s%s STA:%s%s%s CSO=%02x CSS=%02x SPECIAL:%s VLAN=%03x PRI=%x\n",
1794 pDesc->legacy.cmd.fIDE ? " IDE" :"",
1795 pDesc->legacy.cmd.fVLE ? " VLE" :"",
1796 pDesc->legacy.cmd.fRPS ? " RPS" :"",
1797 pDesc->legacy.cmd.fRS ? " RS" :"",
1798 pDesc->legacy.cmd.fIC ? " IC" :"",
1799 pDesc->legacy.cmd.fIFCS? " IFCS":"",
1800 pDesc->legacy.cmd.fEOP ? " EOP" :"",
1801 pDesc->legacy.dw3.fDD ? " DD" :"",
1802 pDesc->legacy.dw3.fEC ? " EC" :"",
1803 pDesc->legacy.dw3.fLC ? " LC" :"",
1804 pDesc->legacy.cmd.u8CSO,
1805 pDesc->legacy.dw3.u8CSS,
1806 E1K_SPEC_CFI(pDesc->legacy.dw3.u16Special) ? "CFI" :"cfi",
1807 E1K_SPEC_VLAN(pDesc->legacy.dw3.u16Special),
1808 E1K_SPEC_PRI(pDesc->legacy.dw3.u16Special)));
1809 break;
1810 default:
1811 E1kLog(("%s %s Invalid Transmit Descriptor %s\n",
1812 INSTANCE(pState), cszDir, cszDir));
1813 break;
1814 }
1815}
1816
1817/**
1818 * Raise interrupt if not masked.
1819 *
1820 * @param pState The device state structure.
1821 */
1822static int e1kRaiseInterrupt(E1KSTATE *pState, int rcBusy, uint32_t u32IntCause = 0)
1823{
1824 int rc = e1kCsEnter(pState, rcBusy);
1825 if (RT_UNLIKELY(rc != VINF_SUCCESS))
1826 return rc;
1827
1828 E1K_INC_ISTAT_CNT(pState->uStatIntTry);
1829 ICR |= u32IntCause;
1830 if (ICR & IMS)
1831 {
1832#if 0
1833 if (pState->fDelayInts)
1834 {
1835 E1K_INC_ISTAT_CNT(pState->uStatIntDly);
1836 pState->iStatIntLostOne = 1;
1837 E1kLog2(("%s e1kRaiseInterrupt: Delayed. ICR=%08x\n",
1838 INSTANCE(pState), ICR));
1839#define E1K_LOST_IRQ_THRSLD 20
1840//#define E1K_LOST_IRQ_THRSLD 200000000
1841 if (pState->iStatIntLost >= E1K_LOST_IRQ_THRSLD)
1842 {
1843 E1kLog2(("%s WARNING! Disabling delayed interrupt logic: delayed=%d, delivered=%d\n",
1844 INSTANCE(pState), pState->uStatIntDly, pState->uStatIntLate));
1845 pState->fIntMaskUsed = false;
1846 pState->uStatDisDly++;
1847 }
1848 }
1849 else
1850#endif
1851 if (pState->fIntRaised)
1852 {
1853 E1K_INC_ISTAT_CNT(pState->uStatIntSkip);
1854 E1kLog2(("%s e1kRaiseInterrupt: Already raised, skipped. ICR&IMS=%08x\n",
1855 INSTANCE(pState), ICR & IMS));
1856 }
1857 else
1858 {
1859#ifdef E1K_ITR_ENABLED
1860 uint64_t tstamp = TMTimerGet(pState->CTX_SUFF(pIntTimer));
1861 /* interrupts/sec = 1 / (256 * 10E-9 * ITR) */
1862 E1kLog2(("%s e1kRaiseInterrupt: tstamp - pState->u64AckedAt = %d, ITR * 256 = %d\n",
1863 INSTANCE(pState), (uint32_t)(tstamp - pState->u64AckedAt), ITR * 256));
1864 //if (!!ITR && pState->fIntMaskUsed && tstamp - pState->u64AckedAt < ITR * 256)
1865 if (!!ITR && tstamp - pState->u64AckedAt < ITR * 256 && !(ICR & ICR_RXT0))
1866 {
1867 E1K_INC_ISTAT_CNT(pState->uStatIntEarly);
1868 E1kLog2(("%s e1kRaiseInterrupt: Too early to raise again: %d ns < %d ns.\n",
1869 INSTANCE(pState), (uint32_t)(tstamp - pState->u64AckedAt), ITR * 256));
1870 }
1871 else
1872#endif
1873 {
1874
1875 /* Since we are delivering the interrupt now
1876 * there is no need to do it later -- stop the timer.
1877 */
1878 TMTimerStop(pState->CTX_SUFF(pIntTimer));
1879 E1K_INC_ISTAT_CNT(pState->uStatInt);
1880 STAM_COUNTER_INC(&pState->StatIntsRaised);
1881 /* Got at least one unmasked interrupt cause */
1882 pState->fIntRaised = true;
1883 /* Raise(1) INTA(0) */
1884 E1kLogRel(("E1000: irq RAISED icr&mask=0x%x, icr=0x%x\n", ICR & IMS, ICR));
1885 PDMDevHlpPCISetIrq(pState->CTX_SUFF(pDevIns), 0, 1);
1886 E1kLog(("%s e1kRaiseInterrupt: Raised. ICR&IMS=%08x\n",
1887 INSTANCE(pState), ICR & IMS));
1888 }
1889 }
1890 }
1891 else
1892 {
1893 E1K_INC_ISTAT_CNT(pState->uStatIntMasked);
1894 E1kLog2(("%s e1kRaiseInterrupt: Not raising, ICR=%08x, IMS=%08x\n",
1895 INSTANCE(pState), ICR, IMS));
1896 }
1897 e1kCsLeave(pState);
1898 return VINF_SUCCESS;
1899}
1900
1901/**
1902 * Compute the physical address of the descriptor.
1903 *
1904 * @returns the physical address of the descriptor.
1905 *
1906 * @param baseHigh High-order 32 bits of descriptor table address.
1907 * @param baseLow Low-order 32 bits of descriptor table address.
1908 * @param idxDesc The descriptor index in the table.
1909 */
1910DECLINLINE(RTGCPHYS) e1kDescAddr(uint32_t baseHigh, uint32_t baseLow, uint32_t idxDesc)
1911{
1912 AssertCompile(sizeof(E1KRXDESC) == sizeof(E1KTXDESC));
1913 return ((uint64_t)baseHigh << 32) + baseLow + idxDesc * sizeof(E1KRXDESC);
1914}
1915
1916/**
1917 * Advance the head pointer of the receive descriptor queue.
1918 *
1919 * @remarks RDH always points to the next available RX descriptor.
1920 *
1921 * @param pState The device state structure.
1922 */
1923DECLINLINE(void) e1kAdvanceRDH(E1KSTATE *pState)
1924{
1925 Assert(e1kCsRxIsOwner(pState));
1926 //e1kCsEnter(pState, RT_SRC_POS);
1927 if (++RDH * sizeof(E1KRXDESC) >= RDLEN)
1928 RDH = 0;
1929 /*
1930 * Compute current receive queue length and fire RXDMT0 interrupt
1931 * if we are low on receive buffers
1932 */
1933 uint32_t uRQueueLen = RDH>RDT ? RDLEN/sizeof(E1KRXDESC)-RDH+RDT : RDT-RDH;
1934 /*
1935 * The minimum threshold is controlled by RDMTS bits of RCTL:
1936 * 00 = 1/2 of RDLEN
1937 * 01 = 1/4 of RDLEN
1938 * 10 = 1/8 of RDLEN
1939 * 11 = reserved
1940 */
1941 uint32_t uMinRQThreshold = RDLEN / sizeof(E1KRXDESC) / (2 << GET_BITS(RCTL, RDMTS));
1942 if (uRQueueLen <= uMinRQThreshold)
1943 {
1944 E1kLogRel(("E1000: low on RX descriptors, RDH=%x RDT=%x len=%x threshold=%x\n", RDH, RDT, uRQueueLen, uMinRQThreshold));
1945 E1kLog2(("%s Low on RX descriptors, RDH=%x RDT=%x len=%x threshold=%x, raise an interrupt\n",
1946 INSTANCE(pState), RDH, RDT, uRQueueLen, uMinRQThreshold));
1947 E1K_INC_ISTAT_CNT(pState->uStatIntRXDMT0);
1948 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, ICR_RXDMT0);
1949 }
1950 E1kLog2(("%s e1kAdvanceRDH: at exit RDH=%x RDT=%x len=%x\n",
1951 INSTANCE(pState), RDH, RDT, uRQueueLen));
1952 //e1kCsLeave(pState);
1953}
1954
1955#ifdef E1K_WITH_RXD_CACHE
1956/**
1957 * Return the number of RX descriptor that belong to the hardware.
1958 *
1959 * @returns the number of available descriptors in RX ring.
1960 * @param pState The device state structure.
1961 * @thread ???
1962 */
1963DECLINLINE(uint32_t) e1kGetRxLen(E1KSTATE* pState)
1964{
1965 /**
1966 * Make sure RDT won't change during computation. EMT may modify RDT at
1967 * any moment.
1968 */
1969 uint32_t rdt = RDT;
1970 return (RDH > rdt ? RDLEN/sizeof(E1KRXDESC) : 0) + rdt - RDH;
1971}
1972
1973DECLINLINE(unsigned) e1kRxDInCache(E1KSTATE* pState)
1974{
1975 return pState->nRxDFetched > pState->iRxDCurrent ?
1976 pState->nRxDFetched - pState->iRxDCurrent : 0;
1977}
1978
1979DECLINLINE(unsigned) e1kRxDIsCacheEmpty(E1KSTATE* pState)
1980{
1981 return pState->iRxDCurrent >= pState->nRxDFetched;
1982}
1983
1984/**
1985 * Load receive descriptors from guest memory. The caller needs to be in Rx
1986 * critical section.
1987 *
1988 * We need two physical reads in case the tail wrapped around the end of RX
1989 * descriptor ring.
1990 *
1991 * @returns the actual number of descriptors fetched.
1992 * @param pState The device state structure.
1993 * @param pDesc Pointer to descriptor union.
1994 * @param addr Physical address in guest context.
1995 * @thread EMT, RX
1996 */
1997DECLINLINE(unsigned) e1kRxDPrefetch(E1KSTATE* pState)
1998{
1999 /* We've already loaded pState->nRxDFetched descriptors past RDH. */
2000 unsigned nDescsAvailable = e1kGetRxLen(pState) - e1kRxDInCache(pState);
2001 unsigned nDescsToFetch = RT_MIN(nDescsAvailable, E1K_RXD_CACHE_SIZE - pState->nRxDFetched);
2002 unsigned nDescsTotal = RDLEN / sizeof(E1KRXDESC);
2003 Assert(nDescsTotal != 0);
2004 if (nDescsTotal == 0)
2005 return 0;
2006 unsigned nFirstNotLoaded = (RDH + e1kRxDInCache(pState)) % nDescsTotal;
2007 unsigned nDescsInSingleRead = RT_MIN(nDescsToFetch, nDescsTotal - nFirstNotLoaded);
2008 E1kLog3(("%s e1kRxDPrefetch: nDescsAvailable=%u nDescsToFetch=%u "
2009 "nDescsTotal=%u nFirstNotLoaded=0x%x nDescsInSingleRead=%u\n",
2010 INSTANCE(pState), nDescsAvailable, nDescsToFetch, nDescsTotal,
2011 nFirstNotLoaded, nDescsInSingleRead));
2012 if (nDescsToFetch == 0)
2013 return 0;
2014 E1KRXDESC* pFirstEmptyDesc = &pState->aRxDescriptors[pState->nRxDFetched];
2015 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns),
2016 ((uint64_t)RDBAH << 32) + RDBAL + nFirstNotLoaded * sizeof(E1KRXDESC),
2017 pFirstEmptyDesc, nDescsInSingleRead * sizeof(E1KRXDESC));
2018 // uint64_t addrBase = ((uint64_t)RDBAH << 32) + RDBAL;
2019 // unsigned i, j;
2020 // for (i = pState->nRxDFetched; i < pState->nRxDFetched + nDescsInSingleRead; ++i)
2021 // {
2022 // pState->aRxDescAddr[i] = addrBase + (nFirstNotLoaded + i - pState->nRxDFetched) * sizeof(E1KRXDESC);
2023 // E1kLog3(("%s aRxDescAddr[%d] = %p\n", INSTANCE(pState), i, pState->aRxDescAddr[i]));
2024 // }
2025 E1kLog3(("%s Fetched %u RX descriptors at %08x%08x(0x%x), RDLEN=%08x, RDH=%08x, RDT=%08x\n",
2026 INSTANCE(pState), nDescsInSingleRead,
2027 RDBAH, RDBAL + RDH * sizeof(E1KRXDESC),
2028 nFirstNotLoaded, RDLEN, RDH, RDT));
2029 if (nDescsToFetch > nDescsInSingleRead)
2030 {
2031 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns),
2032 ((uint64_t)RDBAH << 32) + RDBAL,
2033 pFirstEmptyDesc + nDescsInSingleRead,
2034 (nDescsToFetch - nDescsInSingleRead) * sizeof(E1KRXDESC));
2035 // Assert(i == pState->nRxDFetched + nDescsInSingleRead);
2036 // for (j = 0; i < pState->nRxDFetched + nDescsToFetch; ++i, ++j)
2037 // {
2038 // pState->aRxDescAddr[i] = addrBase + j * sizeof(E1KRXDESC);
2039 // E1kLog3(("%s aRxDescAddr[%d] = %p\n", INSTANCE(pState), i, pState->aRxDescAddr[i]));
2040 // }
2041 E1kLog3(("%s Fetched %u RX descriptors at %08x%08x\n",
2042 INSTANCE(pState), nDescsToFetch - nDescsInSingleRead,
2043 RDBAH, RDBAL));
2044 }
2045 pState->nRxDFetched += nDescsToFetch;
2046 return nDescsToFetch;
2047}
2048
2049/**
2050 * Obtain the next RX descriptor from RXD cache, fetching descriptors from the
2051 * RX ring if the cache is empty.
2052 *
2053 * Note that we cannot advance the cache pointer (iRxDCurrent) yet as it will
2054 * go out of sync with RDH which will cause trouble when EMT checks if the
2055 * cache is empty to do pre-fetch @bugref(6217).
2056 *
2057 * @param pState The device state structure.
2058 * @thread RX
2059 */
2060DECLINLINE(E1KRXDESC*) e1kRxDGet(E1KSTATE* pState)
2061{
2062 Assert(e1kCsRxIsOwner(pState));
2063 /* Check the cache first. */
2064 if (pState->iRxDCurrent < pState->nRxDFetched)
2065 return &pState->aRxDescriptors[pState->iRxDCurrent];
2066 /* Cache is empty, reset it and check if we can fetch more. */
2067 pState->iRxDCurrent = pState->nRxDFetched = 0;
2068 if (e1kRxDPrefetch(pState))
2069 return &pState->aRxDescriptors[pState->iRxDCurrent];
2070 /* Out of Rx descriptors. */
2071 return NULL;
2072}
2073
2074/**
2075 * Return the RX descriptor obtained with e1kRxDGet() and advance the cache
2076 * pointer. The descriptor gets written back to the RXD ring.
2077 *
2078 * @param pState The device state structure.
2079 * @param pDesc The descriptor being "returned" to the RX ring.
2080 * @thread RX
2081 */
2082DECLINLINE(void) e1kRxDPut(E1KSTATE* pState, E1KRXDESC* pDesc)
2083{
2084 Assert(e1kCsRxIsOwner(pState));
2085 pState->iRxDCurrent++;
2086 // Assert(pDesc >= pState->aRxDescriptors);
2087 // Assert(pDesc < pState->aRxDescriptors + E1K_RXD_CACHE_SIZE);
2088 // uint64_t addr = e1kDescAddr(RDBAH, RDBAL, RDH);
2089 // uint32_t rdh = RDH;
2090 // Assert(pState->aRxDescAddr[pDesc - pState->aRxDescriptors] == addr);
2091 PDMDevHlpPhysWrite(pState->CTX_SUFF(pDevIns),
2092 e1kDescAddr(RDBAH, RDBAL, RDH),
2093 pDesc, sizeof(E1KRXDESC));
2094 e1kAdvanceRDH(pState);
2095 e1kPrintRDesc(pState, pDesc);
2096}
2097
2098/**
2099 * Store a fragment of received packet at the specifed address.
2100 *
2101 * @param pState The device state structure.
2102 * @param pDesc The next available RX descriptor.
2103 * @param pvBuf The fragment.
2104 * @param cb The size of the fragment.
2105 */
2106static DECLCALLBACK(void) e1kStoreRxFragment(E1KSTATE *pState, E1KRXDESC *pDesc, const void *pvBuf, size_t cb)
2107{
2108 STAM_PROFILE_ADV_START(&pState->StatReceiveStore, a);
2109 E1kLog2(("%s e1kStoreRxFragment: store fragment of %04X at %016LX, EOP=%d\n",
2110 INSTANCE(pState), cb, pDesc->u64BufAddr, pDesc->status.fEOP));
2111 PDMDevHlpPhysWrite(pState->CTX_SUFF(pDevIns), pDesc->u64BufAddr, pvBuf, cb);
2112 pDesc->u16Length = (uint16_t)cb; Assert(pDesc->u16Length == cb);
2113 STAM_PROFILE_ADV_STOP(&pState->StatReceiveStore, a);
2114}
2115
2116#else /* !E1K_WITH_RXD_CACHE */
2117
2118/**
2119 * Store a fragment of received packet that fits into the next available RX
2120 * buffer.
2121 *
2122 * @remarks Trigger the RXT0 interrupt if it is the last fragment of the packet.
2123 *
2124 * @param pState The device state structure.
2125 * @param pDesc The next available RX descriptor.
2126 * @param pvBuf The fragment.
2127 * @param cb The size of the fragment.
2128 */
2129static DECLCALLBACK(void) e1kStoreRxFragment(E1KSTATE *pState, E1KRXDESC *pDesc, const void *pvBuf, size_t cb)
2130{
2131 STAM_PROFILE_ADV_START(&pState->StatReceiveStore, a);
2132 E1kLog2(("%s e1kStoreRxFragment: store fragment of %04X at %016LX, EOP=%d\n", pState->szInstance, cb, pDesc->u64BufAddr, pDesc->status.fEOP));
2133 PDMDevHlpPhysWrite(pState->CTX_SUFF(pDevIns), pDesc->u64BufAddr, pvBuf, cb);
2134 pDesc->u16Length = (uint16_t)cb; Assert(pDesc->u16Length == cb);
2135 /* Write back the descriptor */
2136 PDMDevHlpPhysWrite(pState->CTX_SUFF(pDevIns), e1kDescAddr(RDBAH, RDBAL, RDH), pDesc, sizeof(E1KRXDESC));
2137 e1kPrintRDesc(pState, pDesc);
2138 E1kLogRel(("E1000: Wrote back RX desc, RDH=%x\n", RDH));
2139 /* Advance head */
2140 e1kAdvanceRDH(pState);
2141 //E1kLog2(("%s e1kStoreRxFragment: EOP=%d RDTR=%08X RADV=%08X\n", INSTANCE(pState), pDesc->fEOP, RDTR, RADV));
2142 if (pDesc->status.fEOP)
2143 {
2144 /* Complete packet has been stored -- it is time to let the guest know. */
2145#ifdef E1K_USE_RX_TIMERS
2146 if (RDTR)
2147 {
2148 /* Arm the timer to fire in RDTR usec (discard .024) */
2149 e1kArmTimer(pState, pState->CTX_SUFF(pRIDTimer), RDTR);
2150 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
2151 if (RADV != 0 && !TMTimerIsActive(pState->CTX_SUFF(pRADTimer)))
2152 e1kArmTimer(pState, pState->CTX_SUFF(pRADTimer), RADV);
2153 }
2154 else
2155 {
2156#endif
2157 /* 0 delay means immediate interrupt */
2158 E1K_INC_ISTAT_CNT(pState->uStatIntRx);
2159 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, ICR_RXT0);
2160#ifdef E1K_USE_RX_TIMERS
2161 }
2162#endif
2163 }
2164 STAM_PROFILE_ADV_STOP(&pState->StatReceiveStore, a);
2165}
2166#endif /* !E1K_WITH_RXD_CACHE */
2167
2168/**
2169 * Returns true if it is a broadcast packet.
2170 *
2171 * @returns true if destination address indicates broadcast.
2172 * @param pvBuf The ethernet packet.
2173 */
2174DECLINLINE(bool) e1kIsBroadcast(const void *pvBuf)
2175{
2176 static const uint8_t s_abBcastAddr[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
2177 return memcmp(pvBuf, s_abBcastAddr, sizeof(s_abBcastAddr)) == 0;
2178}
2179
2180/**
2181 * Returns true if it is a multicast packet.
2182 *
2183 * @remarks returns true for broadcast packets as well.
2184 * @returns true if destination address indicates multicast.
2185 * @param pvBuf The ethernet packet.
2186 */
2187DECLINLINE(bool) e1kIsMulticast(const void *pvBuf)
2188{
2189 return (*(char*)pvBuf) & 1;
2190}
2191
2192/**
2193 * Set IXSM, IPCS and TCPCS flags according to the packet type.
2194 *
2195 * @remarks We emulate checksum offloading for major packets types only.
2196 *
2197 * @returns VBox status code.
2198 * @param pState The device state structure.
2199 * @param pFrame The available data.
2200 * @param cb Number of bytes available in the buffer.
2201 * @param status Bit fields containing status info.
2202 */
2203static int e1kRxChecksumOffload(E1KSTATE* pState, const uint8_t *pFrame, size_t cb, E1KRXDST *pStatus)
2204{
2205 /** @todo
2206 * It is not safe to bypass checksum verification for packets coming
2207 * from real wire. We currently unable to tell where packets are
2208 * coming from so we tell the driver to ignore our checksum flags
2209 * and do verification in software.
2210 */
2211#if 0
2212 uint16_t uEtherType = ntohs(*(uint16_t*)(pFrame + 12));
2213
2214 E1kLog2(("%s e1kRxChecksumOffload: EtherType=%x\n", INSTANCE(pState), uEtherType));
2215
2216 switch (uEtherType)
2217 {
2218 case 0x800: /* IPv4 */
2219 {
2220 pStatus->fIXSM = false;
2221 pStatus->fIPCS = true;
2222 PRTNETIPV4 pIpHdr4 = (PRTNETIPV4)(pFrame + 14);
2223 /* TCP/UDP checksum offloading works with TCP and UDP only */
2224 pStatus->fTCPCS = pIpHdr4->ip_p == 6 || pIpHdr4->ip_p == 17;
2225 break;
2226 }
2227 case 0x86DD: /* IPv6 */
2228 pStatus->fIXSM = false;
2229 pStatus->fIPCS = false;
2230 pStatus->fTCPCS = true;
2231 break;
2232 default: /* ARP, VLAN, etc. */
2233 pStatus->fIXSM = true;
2234 break;
2235 }
2236#else
2237 pStatus->fIXSM = true;
2238#endif
2239 return VINF_SUCCESS;
2240}
2241
2242/**
2243 * Pad and store received packet.
2244 *
2245 * @remarks Make sure that the packet appears to upper layer as one coming
2246 * from real Ethernet: pad it and insert FCS.
2247 *
2248 * @returns VBox status code.
2249 * @param pState The device state structure.
2250 * @param pvBuf The available data.
2251 * @param cb Number of bytes available in the buffer.
2252 * @param status Bit fields containing status info.
2253 */
2254static int e1kHandleRxPacket(E1KSTATE* pState, const void *pvBuf, size_t cb, E1KRXDST status)
2255{
2256#if defined(IN_RING3) /** @todo Remove this extra copying, it's gonna make us run out of kernel / hypervisor stack! */
2257 uint8_t rxPacket[E1K_MAX_RX_PKT_SIZE];
2258 uint8_t *ptr = rxPacket;
2259
2260 int rc = e1kCsRxEnter(pState, VERR_SEM_BUSY);
2261 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2262 return rc;
2263
2264 if (cb > 70) /* unqualified guess */
2265 pState->led.Asserted.s.fReading = pState->led.Actual.s.fReading = 1;
2266
2267 Assert(cb <= E1K_MAX_RX_PKT_SIZE);
2268 Assert(cb > 16);
2269 size_t cbMax = ((RCTL & RCTL_LPE) ? E1K_MAX_RX_PKT_SIZE - 4 : 1518) - (status.fVP ? 0 : 4);
2270 E1kLog3(("%s Max RX packet size is %u\n", INSTANCE(pState), cbMax));
2271 if (status.fVP)
2272 {
2273 /* VLAN packet -- strip VLAN tag in VLAN mode */
2274 if ((CTRL & CTRL_VME) && cb > 16)
2275 {
2276 uint16_t *u16Ptr = (uint16_t*)pvBuf;
2277 memcpy(rxPacket, pvBuf, 12); /* Copy src and dst addresses */
2278 status.u16Special = RT_BE2H_U16(u16Ptr[7]); /* Extract VLAN tag */
2279 memcpy(rxPacket + 12, (uint8_t*)pvBuf + 16, cb - 16); /* Copy the rest of the packet */
2280 cb -= 4;
2281 E1kLog3(("%s Stripped tag for VLAN %u (cb=%u)\n",
2282 INSTANCE(pState), status.u16Special, cb));
2283 }
2284 else
2285 status.fVP = false; /* Set VP only if we stripped the tag */
2286 }
2287 else
2288 memcpy(rxPacket, pvBuf, cb);
2289 /* Pad short packets */
2290 if (cb < 60)
2291 {
2292 memset(rxPacket + cb, 0, 60 - cb);
2293 cb = 60;
2294 }
2295 if (!(RCTL & RCTL_SECRC) && cb <= cbMax)
2296 {
2297 STAM_PROFILE_ADV_START(&pState->StatReceiveCRC, a);
2298 /*
2299 * Add FCS if CRC stripping is not enabled. Since the value of CRC
2300 * is ignored by most of drivers we may as well save us the trouble
2301 * of calculating it (see EthernetCRC CFGM parameter).
2302 */
2303 if (pState->fEthernetCRC)
2304 *(uint32_t*)(rxPacket + cb) = RTCrc32(rxPacket, cb);
2305 cb += sizeof(uint32_t);
2306 STAM_PROFILE_ADV_STOP(&pState->StatReceiveCRC, a);
2307 E1kLog3(("%s Added FCS (cb=%u)\n", INSTANCE(pState), cb));
2308 }
2309 /* Compute checksum of complete packet */
2310 uint16_t checksum = e1kCSum16(rxPacket + GET_BITS(RXCSUM, PCSS), cb);
2311 e1kRxChecksumOffload(pState, rxPacket, cb, &status);
2312
2313 /* Update stats */
2314 E1K_INC_CNT32(GPRC);
2315 if (e1kIsBroadcast(pvBuf))
2316 E1K_INC_CNT32(BPRC);
2317 else if (e1kIsMulticast(pvBuf))
2318 E1K_INC_CNT32(MPRC);
2319 /* Update octet receive counter */
2320 E1K_ADD_CNT64(GORCL, GORCH, cb);
2321 STAM_REL_COUNTER_ADD(&pState->StatReceiveBytes, cb);
2322 if (cb == 64)
2323 E1K_INC_CNT32(PRC64);
2324 else if (cb < 128)
2325 E1K_INC_CNT32(PRC127);
2326 else if (cb < 256)
2327 E1K_INC_CNT32(PRC255);
2328 else if (cb < 512)
2329 E1K_INC_CNT32(PRC511);
2330 else if (cb < 1024)
2331 E1K_INC_CNT32(PRC1023);
2332 else
2333 E1K_INC_CNT32(PRC1522);
2334
2335 E1K_INC_ISTAT_CNT(pState->uStatRxFrm);
2336
2337#ifdef E1K_WITH_RXD_CACHE
2338 while (cb > 0)
2339 {
2340 E1KRXDESC *pDesc = e1kRxDGet(pState);
2341
2342 if (pDesc == NULL)
2343 {
2344 E1kLog(("%s Out of receive buffers, dropping the packet "
2345 "(cb=%u, in_cache=%u, RDH=%x RDT=%x)\n",
2346 INSTANCE(pState), cb, e1kRxDInCache(pState), RDH, RDT));
2347 break;
2348 }
2349#else /* !E1K_WITH_RXD_CACHE */
2350 if (RDH == RDT)
2351 {
2352 E1kLog(("%s Out of receive buffers, dropping the packet\n",
2353 INSTANCE(pState)));
2354 }
2355 /* Store the packet to receive buffers */
2356 while (RDH != RDT)
2357 {
2358 /* Load the descriptor pointed by head */
2359 E1KRXDESC desc, *pDesc = &desc;
2360 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns), e1kDescAddr(RDBAH, RDBAL, RDH),
2361 &desc, sizeof(desc));
2362#endif /* !E1K_WITH_RXD_CACHE */
2363 if (pDesc->u64BufAddr)
2364 {
2365 /* Update descriptor */
2366 pDesc->status = status;
2367 pDesc->u16Checksum = checksum;
2368 pDesc->status.fDD = true;
2369
2370 /*
2371 * We need to leave Rx critical section here or we risk deadlocking
2372 * with EMT in e1kRegWriteRDT when the write is to an unallocated
2373 * page or has an access handler associated with it.
2374 * Note that it is safe to leave the critical section here since
2375 * e1kRegWriteRDT() never modifies RDH. It never touches already
2376 * fetched RxD cache entries either.
2377 */
2378 if (cb > pState->u16RxBSize)
2379 {
2380 pDesc->status.fEOP = false;
2381 e1kCsRxLeave(pState);
2382 e1kStoreRxFragment(pState, pDesc, ptr, pState->u16RxBSize);
2383 rc = e1kCsRxEnter(pState, VERR_SEM_BUSY);
2384 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2385 return rc;
2386 ptr += pState->u16RxBSize;
2387 cb -= pState->u16RxBSize;
2388 }
2389 else
2390 {
2391 pDesc->status.fEOP = true;
2392 e1kCsRxLeave(pState);
2393 e1kStoreRxFragment(pState, pDesc, ptr, cb);
2394#ifdef E1K_WITH_RXD_CACHE
2395 rc = e1kCsRxEnter(pState, VERR_SEM_BUSY);
2396 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2397 return rc;
2398 cb = 0;
2399#else /* !E1K_WITH_RXD_CACHE */
2400 pState->led.Actual.s.fReading = 0;
2401 return VINF_SUCCESS;
2402#endif /* !E1K_WITH_RXD_CACHE */
2403 }
2404 /*
2405 * Note: RDH is advanced by e1kStoreRxFragment if E1K_WITH_RXD_CACHE
2406 * is not defined.
2407 */
2408 }
2409#ifndef E1K_WITH_RXD_CACHE
2410 else
2411 {
2412#endif /* !E1K_WITH_RXD_CACHE */
2413 /* Write back the descriptor. */
2414 pDesc->status.fDD = true;
2415 e1kRxDPut(pState, pDesc);
2416#ifndef E1K_WITH_RXD_CACHE
2417 }
2418#endif /* !E1K_WITH_RXD_CACHE */
2419 }
2420
2421 if (cb > 0)
2422 E1kLog(("%s Out of receive buffers, dropping %u bytes", INSTANCE(pState), cb));
2423
2424 pState->led.Actual.s.fReading = 0;
2425
2426 e1kCsRxLeave(pState);
2427#ifdef E1K_WITH_RXD_CACHE
2428 /* Complete packet has been stored -- it is time to let the guest know. */
2429# ifdef E1K_USE_RX_TIMERS
2430 if (RDTR)
2431 {
2432 /* Arm the timer to fire in RDTR usec (discard .024) */
2433 e1kArmTimer(pState, pState->CTX_SUFF(pRIDTimer), RDTR);
2434 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
2435 if (RADV != 0 && !TMTimerIsActive(pState->CTX_SUFF(pRADTimer)))
2436 e1kArmTimer(pState, pState->CTX_SUFF(pRADTimer), RADV);
2437 }
2438 else
2439 {
2440# endif /* E1K_USE_RX_TIMERS */
2441 /* 0 delay means immediate interrupt */
2442 E1K_INC_ISTAT_CNT(pState->uStatIntRx);
2443 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, ICR_RXT0);
2444# ifdef E1K_USE_RX_TIMERS
2445 }
2446# endif /* E1K_USE_RX_TIMERS */
2447#endif /* E1K_WITH_RXD_CACHE */
2448
2449 return VINF_SUCCESS;
2450#else
2451 return VERR_INTERNAL_ERROR_2;
2452#endif
2453}
2454
2455
2456/**
2457 * Bring the link up after the configured delay, 5 seconds by default.
2458 *
2459 * @param pState The device state structure.
2460 * @thread any
2461 */
2462DECLINLINE(void) e1kBringLinkUpDelayed(E1KSTATE* pState)
2463{
2464 E1kLog(("%s Will bring up the link in %d seconds...\n",
2465 INSTANCE(pState), pState->cMsLinkUpDelay / 1000));
2466 e1kArmTimer(pState, pState->CTX_SUFF(pLUTimer), pState->cMsLinkUpDelay * 1000);
2467}
2468
2469#if 0 /* unused */
2470/**
2471 * Read handler for Device Status register.
2472 *
2473 * Get the link status from PHY.
2474 *
2475 * @returns VBox status code.
2476 *
2477 * @param pState The device state structure.
2478 * @param offset Register offset in memory-mapped frame.
2479 * @param index Register index in register array.
2480 * @param mask Used to implement partial reads (8 and 16-bit).
2481 */
2482static int e1kRegReadCTRL(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value)
2483{
2484 E1kLog(("%s e1kRegReadCTRL: mdio dir=%s mdc dir=%s mdc=%d\n",
2485 INSTANCE(pState), (CTRL & CTRL_MDIO_DIR)?"OUT":"IN ",
2486 (CTRL & CTRL_MDC_DIR)?"OUT":"IN ", !!(CTRL & CTRL_MDC)));
2487 if ((CTRL & CTRL_MDIO_DIR) == 0 && (CTRL & CTRL_MDC))
2488 {
2489 /* MDC is high and MDIO pin is used for input, read MDIO pin from PHY */
2490 if (Phy::readMDIO(&pState->phy))
2491 *pu32Value = CTRL | CTRL_MDIO;
2492 else
2493 *pu32Value = CTRL & ~CTRL_MDIO;
2494 E1kLog(("%s e1kRegReadCTRL: Phy::readMDIO(%d)\n",
2495 INSTANCE(pState), !!(*pu32Value & CTRL_MDIO)));
2496 }
2497 else
2498 {
2499 /* MDIO pin is used for output, ignore it */
2500 *pu32Value = CTRL;
2501 }
2502 return VINF_SUCCESS;
2503}
2504#endif /* unused */
2505
2506/**
2507 * Write handler for Device Control register.
2508 *
2509 * Handles reset.
2510 *
2511 * @param pState The device state structure.
2512 * @param offset Register offset in memory-mapped frame.
2513 * @param index Register index in register array.
2514 * @param value The value to store.
2515 * @param mask Used to implement partial writes (8 and 16-bit).
2516 * @thread EMT
2517 */
2518static int e1kRegWriteCTRL(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
2519{
2520 int rc = VINF_SUCCESS;
2521
2522 if (value & CTRL_RESET)
2523 { /* RST */
2524#ifndef IN_RING3
2525 return VINF_IOM_R3_IOPORT_WRITE;
2526#else
2527 e1kHardReset(pState);
2528#endif
2529 }
2530 else
2531 {
2532 if ( (value & CTRL_SLU)
2533 && pState->fCableConnected
2534 && !(STATUS & STATUS_LU))
2535 {
2536 /* The driver indicates that we should bring up the link */
2537 /* Do so in 5 seconds (by default). */
2538 e1kBringLinkUpDelayed(pState);
2539 /*
2540 * Change the status (but not PHY status) anyway as Windows expects
2541 * it for 82543GC.
2542 */
2543 STATUS |= STATUS_LU;
2544 }
2545 if (value & CTRL_VME)
2546 {
2547 E1kLog(("%s VLAN Mode Enabled\n", INSTANCE(pState)));
2548 }
2549 E1kLog(("%s e1kRegWriteCTRL: mdio dir=%s mdc dir=%s mdc=%s mdio=%d\n",
2550 INSTANCE(pState), (value & CTRL_MDIO_DIR)?"OUT":"IN ",
2551 (value & CTRL_MDC_DIR)?"OUT":"IN ", (value & CTRL_MDC)?"HIGH":"LOW ", !!(value & CTRL_MDIO)));
2552 if (value & CTRL_MDC)
2553 {
2554 if (value & CTRL_MDIO_DIR)
2555 {
2556 E1kLog(("%s e1kRegWriteCTRL: Phy::writeMDIO(%d)\n", INSTANCE(pState), !!(value & CTRL_MDIO)));
2557 /* MDIO direction pin is set to output and MDC is high, write MDIO pin value to PHY */
2558 Phy::writeMDIO(&pState->phy, !!(value & CTRL_MDIO));
2559 }
2560 else
2561 {
2562 if (Phy::readMDIO(&pState->phy))
2563 value |= CTRL_MDIO;
2564 else
2565 value &= ~CTRL_MDIO;
2566 E1kLog(("%s e1kRegWriteCTRL: Phy::readMDIO(%d)\n",
2567 INSTANCE(pState), !!(value & CTRL_MDIO)));
2568 }
2569 }
2570 rc = e1kRegWriteDefault(pState, offset, index, value);
2571 }
2572
2573 return rc;
2574}
2575
2576/**
2577 * Write handler for EEPROM/Flash Control/Data register.
2578 *
2579 * Handles EEPROM access requests; forwards writes to EEPROM device if access has been granted.
2580 *
2581 * @param pState The device state structure.
2582 * @param offset Register offset in memory-mapped frame.
2583 * @param index Register index in register array.
2584 * @param value The value to store.
2585 * @param mask Used to implement partial writes (8 and 16-bit).
2586 * @thread EMT
2587 */
2588static int e1kRegWriteEECD(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
2589{
2590#ifdef IN_RING3
2591 /* So far we are concerned with lower byte only */
2592 if ((EECD & EECD_EE_GNT) || pState->eChip == E1K_CHIP_82543GC)
2593 {
2594 /* Access to EEPROM granted -- forward 4-wire bits to EEPROM device */
2595 /* Note: 82543GC does not need to request EEPROM access */
2596 STAM_PROFILE_ADV_START(&pState->StatEEPROMWrite, a);
2597 pState->eeprom.write(value & EECD_EE_WIRES);
2598 STAM_PROFILE_ADV_STOP(&pState->StatEEPROMWrite, a);
2599 }
2600 if (value & EECD_EE_REQ)
2601 EECD |= EECD_EE_REQ|EECD_EE_GNT;
2602 else
2603 EECD &= ~EECD_EE_GNT;
2604 //e1kRegWriteDefault(pState, offset, index, value );
2605
2606 return VINF_SUCCESS;
2607#else /* !IN_RING3 */
2608 return VINF_IOM_R3_MMIO_WRITE;
2609#endif /* !IN_RING3 */
2610}
2611
2612/**
2613 * Read handler for EEPROM/Flash Control/Data register.
2614 *
2615 * Lower 4 bits come from EEPROM device if EEPROM access has been granted.
2616 *
2617 * @returns VBox status code.
2618 *
2619 * @param pState The device state structure.
2620 * @param offset Register offset in memory-mapped frame.
2621 * @param index Register index in register array.
2622 * @param mask Used to implement partial reads (8 and 16-bit).
2623 * @thread EMT
2624 */
2625static int e1kRegReadEECD(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value)
2626{
2627#ifdef IN_RING3
2628 uint32_t value;
2629 int rc = e1kRegReadDefault(pState, offset, index, &value);
2630 if (RT_SUCCESS(rc))
2631 {
2632 if ((value & EECD_EE_GNT) || pState->eChip == E1K_CHIP_82543GC)
2633 {
2634 /* Note: 82543GC does not need to request EEPROM access */
2635 /* Access to EEPROM granted -- get 4-wire bits to EEPROM device */
2636 STAM_PROFILE_ADV_START(&pState->StatEEPROMRead, a);
2637 value |= pState->eeprom.read();
2638 STAM_PROFILE_ADV_STOP(&pState->StatEEPROMRead, a);
2639 }
2640 *pu32Value = value;
2641 }
2642
2643 return rc;
2644#else /* !IN_RING3 */
2645 return VINF_IOM_R3_MMIO_READ;
2646#endif /* !IN_RING3 */
2647}
2648
2649/**
2650 * Write handler for EEPROM Read register.
2651 *
2652 * Handles EEPROM word access requests, reads EEPROM and stores the result
2653 * into DATA field.
2654 *
2655 * @param pState The device state structure.
2656 * @param offset Register offset in memory-mapped frame.
2657 * @param index Register index in register array.
2658 * @param value The value to store.
2659 * @param mask Used to implement partial writes (8 and 16-bit).
2660 * @thread EMT
2661 */
2662static int e1kRegWriteEERD(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
2663{
2664#ifdef IN_RING3
2665 /* Make use of 'writable' and 'readable' masks. */
2666 e1kRegWriteDefault(pState, offset, index, value);
2667 /* DONE and DATA are set only if read was triggered by START. */
2668 if (value & EERD_START)
2669 {
2670 uint16_t tmp;
2671 STAM_PROFILE_ADV_START(&pState->StatEEPROMRead, a);
2672 if (pState->eeprom.readWord(GET_BITS_V(value, EERD, ADDR), &tmp))
2673 SET_BITS(EERD, DATA, tmp);
2674 EERD |= EERD_DONE;
2675 STAM_PROFILE_ADV_STOP(&pState->StatEEPROMRead, a);
2676 }
2677
2678 return VINF_SUCCESS;
2679#else /* !IN_RING3 */
2680 return VINF_IOM_R3_MMIO_WRITE;
2681#endif /* !IN_RING3 */
2682}
2683
2684
2685/**
2686 * Write handler for MDI Control register.
2687 *
2688 * Handles PHY read/write requests; forwards requests to internal PHY device.
2689 *
2690 * @param pState The device state structure.
2691 * @param offset Register offset in memory-mapped frame.
2692 * @param index Register index in register array.
2693 * @param value The value to store.
2694 * @param mask Used to implement partial writes (8 and 16-bit).
2695 * @thread EMT
2696 */
2697static int e1kRegWriteMDIC(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
2698{
2699 if (value & MDIC_INT_EN)
2700 {
2701 E1kLog(("%s ERROR! Interrupt at the end of an MDI cycle is not supported yet.\n",
2702 INSTANCE(pState)));
2703 }
2704 else if (value & MDIC_READY)
2705 {
2706 E1kLog(("%s ERROR! Ready bit is not reset by software during write operation.\n",
2707 INSTANCE(pState)));
2708 }
2709 else if (GET_BITS_V(value, MDIC, PHY) != 1)
2710 {
2711 E1kLog(("%s ERROR! Access to invalid PHY detected, phy=%d.\n",
2712 INSTANCE(pState), GET_BITS_V(value, MDIC, PHY)));
2713 }
2714 else
2715 {
2716 /* Store the value */
2717 e1kRegWriteDefault(pState, offset, index, value);
2718 STAM_COUNTER_INC(&pState->StatPHYAccesses);
2719 /* Forward op to PHY */
2720 if (value & MDIC_OP_READ)
2721 SET_BITS(MDIC, DATA, Phy::readRegister(&pState->phy, GET_BITS_V(value, MDIC, REG)));
2722 else
2723 Phy::writeRegister(&pState->phy, GET_BITS_V(value, MDIC, REG), value & MDIC_DATA_MASK);
2724 /* Let software know that we are done */
2725 MDIC |= MDIC_READY;
2726 }
2727
2728 return VINF_SUCCESS;
2729}
2730
2731/**
2732 * Write handler for Interrupt Cause Read register.
2733 *
2734 * Bits corresponding to 1s in 'value' will be cleared in ICR register.
2735 *
2736 * @param pState The device state structure.
2737 * @param offset Register offset in memory-mapped frame.
2738 * @param index Register index in register array.
2739 * @param value The value to store.
2740 * @param mask Used to implement partial writes (8 and 16-bit).
2741 * @thread EMT
2742 */
2743static int e1kRegWriteICR(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
2744{
2745 ICR &= ~value;
2746
2747 return VINF_SUCCESS;
2748}
2749
2750/**
2751 * Read handler for Interrupt Cause Read register.
2752 *
2753 * Reading this register acknowledges all interrupts.
2754 *
2755 * @returns VBox status code.
2756 *
2757 * @param pState The device state structure.
2758 * @param offset Register offset in memory-mapped frame.
2759 * @param index Register index in register array.
2760 * @param mask Not used.
2761 * @thread EMT
2762 */
2763static int e1kRegReadICR(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value)
2764{
2765 int rc = e1kCsEnter(pState, VINF_IOM_R3_MMIO_READ);
2766 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2767 return rc;
2768
2769 uint32_t value = 0;
2770 rc = e1kRegReadDefault(pState, offset, index, &value);
2771 if (RT_SUCCESS(rc))
2772 {
2773 if (value)
2774 {
2775 /*
2776 * Not clearing ICR causes QNX to hang as it reads ICR in a loop
2777 * with disabled interrupts.
2778 */
2779 //if (IMS)
2780 if (1)
2781 {
2782 /*
2783 * Interrupts were enabled -- we are supposedly at the very
2784 * beginning of interrupt handler
2785 */
2786 E1kLogRel(("E1000: irq lowered, icr=0x%x\n", ICR));
2787 E1kLog(("%s e1kRegReadICR: Lowered IRQ (%08x)\n", INSTANCE(pState), ICR));
2788 /* Clear all pending interrupts */
2789 ICR = 0;
2790 pState->fIntRaised = false;
2791 /* Lower(0) INTA(0) */
2792 PDMDevHlpPCISetIrq(pState->CTX_SUFF(pDevIns), 0, 0);
2793
2794 pState->u64AckedAt = TMTimerGet(pState->CTX_SUFF(pIntTimer));
2795 if (pState->fIntMaskUsed)
2796 pState->fDelayInts = true;
2797 }
2798 else
2799 {
2800 /*
2801 * Interrupts are disabled -- in windows guests ICR read is done
2802 * just before re-enabling interrupts
2803 */
2804 E1kLog(("%s e1kRegReadICR: Suppressing auto-clear due to disabled interrupts (%08x)\n", INSTANCE(pState), ICR));
2805 }
2806 }
2807 *pu32Value = value;
2808 }
2809 e1kCsLeave(pState);
2810
2811 return rc;
2812}
2813
2814/**
2815 * Write handler for Interrupt Cause Set register.
2816 *
2817 * Bits corresponding to 1s in 'value' will be set in ICR register.
2818 *
2819 * @param pState The device state structure.
2820 * @param offset Register offset in memory-mapped frame.
2821 * @param index Register index in register array.
2822 * @param value The value to store.
2823 * @param mask Used to implement partial writes (8 and 16-bit).
2824 * @thread EMT
2825 */
2826static int e1kRegWriteICS(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
2827{
2828 E1K_INC_ISTAT_CNT(pState->uStatIntICS);
2829 return e1kRaiseInterrupt(pState, VINF_IOM_R3_MMIO_WRITE, value & s_e1kRegMap[ICS_IDX].writable);
2830}
2831
2832/**
2833 * Write handler for Interrupt Mask Set register.
2834 *
2835 * Will trigger pending interrupts.
2836 *
2837 * @param pState The device state structure.
2838 * @param offset Register offset in memory-mapped frame.
2839 * @param index Register index in register array.
2840 * @param value The value to store.
2841 * @param mask Used to implement partial writes (8 and 16-bit).
2842 * @thread EMT
2843 */
2844static int e1kRegWriteIMS(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
2845{
2846 IMS |= value;
2847 E1kLogRel(("E1000: irq enabled, RDH=%x RDT=%x TDH=%x TDT=%x\n", RDH, RDT, TDH, TDT));
2848 E1kLog(("%s e1kRegWriteIMS: IRQ enabled\n", INSTANCE(pState)));
2849 /* Mask changes, we need to raise pending interrupts. */
2850 if ((ICR & IMS) && !pState->fLocked)
2851 {
2852 E1kLog2(("%s e1kRegWriteIMS: IRQ pending (%08x), arming late int timer...\n",
2853 INSTANCE(pState), ICR));
2854 /* Raising an interrupt immediately causes win7 to hang upon NIC reconfiguration, see @bugref{5023}. */
2855 TMTimerSet(pState->CTX_SUFF(pIntTimer), TMTimerFromNano(pState->CTX_SUFF(pIntTimer), ITR * 256) +
2856 TMTimerGet(pState->CTX_SUFF(pIntTimer)));
2857 }
2858
2859 return VINF_SUCCESS;
2860}
2861
2862/**
2863 * Write handler for Interrupt Mask Clear register.
2864 *
2865 * Bits corresponding to 1s in 'value' will be cleared in IMS register.
2866 *
2867 * @param pState The device state structure.
2868 * @param offset Register offset in memory-mapped frame.
2869 * @param index Register index in register array.
2870 * @param value The value to store.
2871 * @param mask Used to implement partial writes (8 and 16-bit).
2872 * @thread EMT
2873 */
2874static int e1kRegWriteIMC(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
2875{
2876 int rc = e1kCsEnter(pState, VINF_IOM_R3_MMIO_WRITE);
2877 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2878 return rc;
2879 if (pState->fIntRaised)
2880 {
2881 /*
2882 * Technically we should reset fIntRaised in ICR read handler, but it will cause
2883 * Windows to freeze since it may receive an interrupt while still in the very beginning
2884 * of interrupt handler.
2885 */
2886 E1K_INC_ISTAT_CNT(pState->uStatIntLower);
2887 STAM_COUNTER_INC(&pState->StatIntsPrevented);
2888 E1kLogRel(("E1000: irq lowered (IMC), icr=0x%x\n", ICR));
2889 /* Lower(0) INTA(0) */
2890 PDMDevHlpPCISetIrq(pState->CTX_SUFF(pDevIns), 0, 0);
2891 pState->fIntRaised = false;
2892 E1kLog(("%s e1kRegWriteIMC: Lowered IRQ: ICR=%08x\n", INSTANCE(pState), ICR));
2893 }
2894 IMS &= ~value;
2895 E1kLog(("%s e1kRegWriteIMC: IRQ disabled\n", INSTANCE(pState)));
2896 e1kCsLeave(pState);
2897
2898 return VINF_SUCCESS;
2899}
2900
2901/**
2902 * Write handler for Receive Control register.
2903 *
2904 * @param pState The device state structure.
2905 * @param offset Register offset in memory-mapped frame.
2906 * @param index Register index in register array.
2907 * @param value The value to store.
2908 * @param mask Used to implement partial writes (8 and 16-bit).
2909 * @thread EMT
2910 */
2911static int e1kRegWriteRCTL(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
2912{
2913 /* Update promiscuous mode */
2914 bool fBecomePromiscous = !!(value & (RCTL_UPE | RCTL_MPE));
2915 if (fBecomePromiscous != !!( RCTL & (RCTL_UPE | RCTL_MPE)))
2916 {
2917 /* Promiscuity has changed, pass the knowledge on. */
2918#ifndef IN_RING3
2919 return VINF_IOM_R3_IOPORT_WRITE;
2920#else
2921 if (pState->pDrvR3)
2922 pState->pDrvR3->pfnSetPromiscuousMode(pState->pDrvR3, fBecomePromiscous);
2923#endif
2924 }
2925
2926 /* Adjust receive buffer size */
2927 unsigned cbRxBuf = 2048 >> GET_BITS_V(value, RCTL, BSIZE);
2928 if (value & RCTL_BSEX)
2929 cbRxBuf *= 16;
2930 if (cbRxBuf != pState->u16RxBSize)
2931 E1kLog2(("%s e1kRegWriteRCTL: Setting receive buffer size to %d (old %d)\n",
2932 INSTANCE(pState), cbRxBuf, pState->u16RxBSize));
2933 pState->u16RxBSize = cbRxBuf;
2934
2935 /* Update the register */
2936 e1kRegWriteDefault(pState, offset, index, value);
2937
2938 return VINF_SUCCESS;
2939}
2940
2941/**
2942 * Write handler for Packet Buffer Allocation register.
2943 *
2944 * TXA = 64 - RXA.
2945 *
2946 * @param pState The device state structure.
2947 * @param offset Register offset in memory-mapped frame.
2948 * @param index Register index in register array.
2949 * @param value The value to store.
2950 * @param mask Used to implement partial writes (8 and 16-bit).
2951 * @thread EMT
2952 */
2953static int e1kRegWritePBA(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
2954{
2955 e1kRegWriteDefault(pState, offset, index, value);
2956 PBA_st->txa = 64 - PBA_st->rxa;
2957
2958 return VINF_SUCCESS;
2959}
2960
2961/**
2962 * Write handler for Receive Descriptor Tail register.
2963 *
2964 * @remarks Write into RDT forces switch to HC and signal to
2965 * e1kNetworkDown_WaitReceiveAvail().
2966 *
2967 * @returns VBox status code.
2968 *
2969 * @param pState The device state structure.
2970 * @param offset Register offset in memory-mapped frame.
2971 * @param index Register index in register array.
2972 * @param value The value to store.
2973 * @param mask Used to implement partial writes (8 and 16-bit).
2974 * @thread EMT
2975 */
2976static int e1kRegWriteRDT(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
2977{
2978#ifndef IN_RING3
2979 /* XXX */
2980// return VINF_IOM_R3_MMIO_WRITE;
2981#endif
2982 int rc = e1kCsRxEnter(pState, VINF_IOM_R3_MMIO_WRITE);
2983 if (RT_LIKELY(rc == VINF_SUCCESS))
2984 {
2985 E1kLog(("%s e1kRegWriteRDT\n", INSTANCE(pState)));
2986 rc = e1kRegWriteDefault(pState, offset, index, value);
2987#ifdef E1K_WITH_RXD_CACHE
2988 /*
2989 * We need to fetch descriptors now as RDT may go whole circle
2990 * before we attempt to store a received packet. For example,
2991 * Intel's DOS drivers use 2 (!) RX descriptors with the total ring
2992 * size being only 8 descriptors! Note that we fetch descriptors
2993 * only when the cache is empty to reduce the number of memory reads
2994 * in case of frequent RDT writes. Don't fetch anything when the
2995 * receiver is disabled either as RDH, RDT, RDLEN can be in some
2996 * messed up state.
2997 * Note that despite the cache may seem empty, meaning that there are
2998 * no more available descriptors in it, it may still be used by RX
2999 * thread which has not yet written the last descriptor back but has
3000 * temporarily released the RX lock in order to write the packet body
3001 * to descriptor's buffer. At this point we still going to do prefetch
3002 * but it won't actually fetch anything if there are no unused slots in
3003 * our "empty" cache (nRxDFetched==E1K_RXD_CACHE_SIZE). We must not
3004 * reset the cache here even if it appears empty. It will be reset at
3005 * a later point in e1kRxDGet().
3006 */
3007 if (e1kRxDIsCacheEmpty(pState) && (RCTL & RCTL_EN))
3008 e1kRxDPrefetch(pState);
3009#endif /* E1K_WITH_RXD_CACHE */
3010 e1kCsRxLeave(pState);
3011 if (RT_SUCCESS(rc))
3012 {
3013/** @todo bird: Use SUPSem* for this so we can signal it in ring-0 as well
3014 * without requiring any context switches. We should also check the
3015 * wait condition before bothering to queue the item as we're currently
3016 * queuing thousands of items per second here in a normal transmit
3017 * scenario. Expect performance changes when fixing this! */
3018#ifdef IN_RING3
3019 /* Signal that we have more receive descriptors available. */
3020 e1kWakeupReceive(pState->CTX_SUFF(pDevIns));
3021#else
3022 PPDMQUEUEITEMCORE pItem = PDMQueueAlloc(pState->CTX_SUFF(pCanRxQueue));
3023 if (pItem)
3024 PDMQueueInsert(pState->CTX_SUFF(pCanRxQueue), pItem);
3025#endif
3026 }
3027 }
3028 return rc;
3029}
3030
3031/**
3032 * Write handler for Receive Delay Timer register.
3033 *
3034 * @param pState The device state structure.
3035 * @param offset Register offset in memory-mapped frame.
3036 * @param index Register index in register array.
3037 * @param value The value to store.
3038 * @param mask Used to implement partial writes (8 and 16-bit).
3039 * @thread EMT
3040 */
3041static int e1kRegWriteRDTR(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
3042{
3043 e1kRegWriteDefault(pState, offset, index, value);
3044 if (value & RDTR_FPD)
3045 {
3046 /* Flush requested, cancel both timers and raise interrupt */
3047#ifdef E1K_USE_RX_TIMERS
3048 e1kCancelTimer(pState, pState->CTX_SUFF(pRIDTimer));
3049 e1kCancelTimer(pState, pState->CTX_SUFF(pRADTimer));
3050#endif
3051 E1K_INC_ISTAT_CNT(pState->uStatIntRDTR);
3052 return e1kRaiseInterrupt(pState, VINF_IOM_R3_MMIO_WRITE, ICR_RXT0);
3053 }
3054
3055 return VINF_SUCCESS;
3056}
3057
3058DECLINLINE(uint32_t) e1kGetTxLen(E1KSTATE* pState)
3059{
3060 /**
3061 * Make sure TDT won't change during computation. EMT may modify TDT at
3062 * any moment.
3063 */
3064 uint32_t tdt = TDT;
3065 return (TDH>tdt ? TDLEN/sizeof(E1KTXDESC) : 0) + tdt - TDH;
3066}
3067
3068#ifdef IN_RING3
3069#ifdef E1K_TX_DELAY
3070
3071/**
3072 * Transmit Delay Timer handler.
3073 *
3074 * @remarks We only get here when the timer expires.
3075 *
3076 * @param pDevIns Pointer to device instance structure.
3077 * @param pTimer Pointer to the timer.
3078 * @param pvUser NULL.
3079 * @thread EMT
3080 */
3081static DECLCALLBACK(void) e1kTxDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3082{
3083 E1KSTATE *pState = (E1KSTATE *)pvUser;
3084 Assert(e1kCsIsOwner(&pState->csTx));
3085
3086 E1K_INC_ISTAT_CNT(pState->uStatTxDelayExp);
3087#ifdef E1K_INT_STATS
3088 uint64_t u64Elapsed = RTTimeNanoTS() - pState->u64ArmedAt;
3089 if (u64Elapsed > pState->uStatMaxTxDelay)
3090 pState->uStatMaxTxDelay = u64Elapsed;
3091#endif /* E1K_INT_STATS */
3092 int rc = e1kXmitPending(pState, false /*fOnWorkerThread*/);
3093 AssertMsg(RT_SUCCESS(rc) || rc == VERR_TRY_AGAIN, ("%Rrc\n", rc));
3094}
3095#endif /* E1K_TX_DELAY */
3096
3097#ifdef E1K_USE_TX_TIMERS
3098
3099/**
3100 * Transmit Interrupt Delay Timer handler.
3101 *
3102 * @remarks We only get here when the timer expires.
3103 *
3104 * @param pDevIns Pointer to device instance structure.
3105 * @param pTimer Pointer to the timer.
3106 * @param pvUser NULL.
3107 * @thread EMT
3108 */
3109static DECLCALLBACK(void) e1kTxIntDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3110{
3111 E1KSTATE *pState = (E1KSTATE *)pvUser;
3112
3113 E1K_INC_ISTAT_CNT(pState->uStatTID);
3114 /* Cancel absolute delay timer as we have already got attention */
3115#ifndef E1K_NO_TAD
3116 e1kCancelTimer(pState, pState->CTX_SUFF(pTADTimer));
3117#endif /* E1K_NO_TAD */
3118 e1kRaiseInterrupt(pState, ICR_TXDW);
3119}
3120
3121/**
3122 * Transmit Absolute Delay Timer handler.
3123 *
3124 * @remarks We only get here when the timer expires.
3125 *
3126 * @param pDevIns Pointer to device instance structure.
3127 * @param pTimer Pointer to the timer.
3128 * @param pvUser NULL.
3129 * @thread EMT
3130 */
3131static DECLCALLBACK(void) e1kTxAbsDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3132{
3133 E1KSTATE *pState = (E1KSTATE *)pvUser;
3134
3135 E1K_INC_ISTAT_CNT(pState->uStatTAD);
3136 /* Cancel interrupt delay timer as we have already got attention */
3137 e1kCancelTimer(pState, pState->CTX_SUFF(pTIDTimer));
3138 e1kRaiseInterrupt(pState, ICR_TXDW);
3139}
3140
3141#endif /* E1K_USE_TX_TIMERS */
3142#ifdef E1K_USE_RX_TIMERS
3143
3144/**
3145 * Receive Interrupt Delay Timer handler.
3146 *
3147 * @remarks We only get here when the timer expires.
3148 *
3149 * @param pDevIns Pointer to device instance structure.
3150 * @param pTimer Pointer to the timer.
3151 * @param pvUser NULL.
3152 * @thread EMT
3153 */
3154static DECLCALLBACK(void) e1kRxIntDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3155{
3156 E1KSTATE *pState = (E1KSTATE *)pvUser;
3157
3158 E1K_INC_ISTAT_CNT(pState->uStatRID);
3159 /* Cancel absolute delay timer as we have already got attention */
3160 e1kCancelTimer(pState, pState->CTX_SUFF(pRADTimer));
3161 e1kRaiseInterrupt(pState, ICR_RXT0);
3162}
3163
3164/**
3165 * Receive Absolute Delay Timer handler.
3166 *
3167 * @remarks We only get here when the timer expires.
3168 *
3169 * @param pDevIns Pointer to device instance structure.
3170 * @param pTimer Pointer to the timer.
3171 * @param pvUser NULL.
3172 * @thread EMT
3173 */
3174static DECLCALLBACK(void) e1kRxAbsDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3175{
3176 E1KSTATE *pState = (E1KSTATE *)pvUser;
3177
3178 E1K_INC_ISTAT_CNT(pState->uStatRAD);
3179 /* Cancel interrupt delay timer as we have already got attention */
3180 e1kCancelTimer(pState, pState->CTX_SUFF(pRIDTimer));
3181 e1kRaiseInterrupt(pState, ICR_RXT0);
3182}
3183
3184#endif /* E1K_USE_RX_TIMERS */
3185
3186/**
3187 * Late Interrupt Timer handler.
3188 *
3189 * @param pDevIns Pointer to device instance structure.
3190 * @param pTimer Pointer to the timer.
3191 * @param pvUser NULL.
3192 * @thread EMT
3193 */
3194static DECLCALLBACK(void) e1kLateIntTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3195{
3196 E1KSTATE *pState = (E1KSTATE *)pvUser;
3197
3198 STAM_PROFILE_ADV_START(&pState->StatLateIntTimer, a);
3199 STAM_COUNTER_INC(&pState->StatLateInts);
3200 E1K_INC_ISTAT_CNT(pState->uStatIntLate);
3201#if 0
3202 if (pState->iStatIntLost > -100)
3203 pState->iStatIntLost--;
3204#endif
3205 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, 0);
3206 STAM_PROFILE_ADV_STOP(&pState->StatLateIntTimer, a);
3207}
3208
3209/**
3210 * Link Up Timer handler.
3211 *
3212 * @param pDevIns Pointer to device instance structure.
3213 * @param pTimer Pointer to the timer.
3214 * @param pvUser NULL.
3215 * @thread EMT
3216 */
3217static DECLCALLBACK(void) e1kLinkUpTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3218{
3219 E1KSTATE *pState = (E1KSTATE *)pvUser;
3220
3221 /*
3222 * This can happen if we set the link status to down when the Link up timer was
3223 * already armed (shortly after e1kLoadDone() or when the cable was disconnected
3224 * and connect+disconnect the cable very quick.
3225 */
3226 if (!pState->fCableConnected)
3227 return;
3228
3229 E1kLog(("%s e1kLinkUpTimer: Link is up\n", INSTANCE(pState)));
3230 STATUS |= STATUS_LU;
3231 Phy::setLinkStatus(&pState->phy, true);
3232 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, ICR_LSC);
3233}
3234
3235#endif /* IN_RING3 */
3236
3237/**
3238 * Sets up the GSO context according to the TSE new context descriptor.
3239 *
3240 * @param pGso The GSO context to setup.
3241 * @param pCtx The context descriptor.
3242 */
3243DECLINLINE(void) e1kSetupGsoCtx(PPDMNETWORKGSO pGso, E1KTXCTX const *pCtx)
3244{
3245 pGso->u8Type = PDMNETWORKGSOTYPE_INVALID;
3246
3247 /*
3248 * See if the context descriptor describes something that could be TCP or
3249 * UDP over IPv[46].
3250 */
3251 /* Check the header ordering and spacing: 1. Ethernet, 2. IP, 3. TCP/UDP. */
3252 if (RT_UNLIKELY( pCtx->ip.u8CSS < sizeof(RTNETETHERHDR) ))
3253 {
3254 E1kLog(("e1kSetupGsoCtx: IPCSS=%#x\n", pCtx->ip.u8CSS));
3255 return;
3256 }
3257 if (RT_UNLIKELY( pCtx->tu.u8CSS < (size_t)pCtx->ip.u8CSS + (pCtx->dw2.fIP ? RTNETIPV4_MIN_LEN : RTNETIPV6_MIN_LEN) ))
3258 {
3259 E1kLog(("e1kSetupGsoCtx: TUCSS=%#x\n", pCtx->tu.u8CSS));
3260 return;
3261 }
3262 if (RT_UNLIKELY( pCtx->dw2.fTCP
3263 ? pCtx->dw3.u8HDRLEN < (size_t)pCtx->tu.u8CSS + RTNETTCP_MIN_LEN
3264 : pCtx->dw3.u8HDRLEN != (size_t)pCtx->tu.u8CSS + RTNETUDP_MIN_LEN ))
3265 {
3266 E1kLog(("e1kSetupGsoCtx: HDRLEN=%#x TCP=%d\n", pCtx->dw3.u8HDRLEN, pCtx->dw2.fTCP));
3267 return;
3268 }
3269
3270 /* The end of the TCP/UDP checksum should stop at the end of the packet or at least after the headers. */
3271 if (RT_UNLIKELY( pCtx->tu.u16CSE > 0 && pCtx->tu.u16CSE <= pCtx->dw3.u8HDRLEN ))
3272 {
3273 E1kLog(("e1kSetupGsoCtx: TUCSE=%#x HDRLEN=%#x\n", pCtx->tu.u16CSE, pCtx->dw3.u8HDRLEN));
3274 return;
3275 }
3276
3277 /* IPv4 checksum offset. */
3278 if (RT_UNLIKELY( pCtx->dw2.fIP && (size_t)pCtx->ip.u8CSO - pCtx->ip.u8CSS != RT_UOFFSETOF(RTNETIPV4, ip_sum) ))
3279 {
3280 E1kLog(("e1kSetupGsoCtx: IPCSO=%#x IPCSS=%#x\n", pCtx->ip.u8CSO, pCtx->ip.u8CSS));
3281 return;
3282 }
3283
3284 /* TCP/UDP checksum offsets. */
3285 if (RT_UNLIKELY( (size_t)pCtx->tu.u8CSO - pCtx->tu.u8CSS
3286 != ( pCtx->dw2.fTCP
3287 ? RT_UOFFSETOF(RTNETTCP, th_sum)
3288 : RT_UOFFSETOF(RTNETUDP, uh_sum) ) ))
3289 {
3290 E1kLog(("e1kSetupGsoCtx: TUCSO=%#x TUCSS=%#x TCP=%d\n", pCtx->ip.u8CSO, pCtx->ip.u8CSS, pCtx->dw2.fTCP));
3291 return;
3292 }
3293
3294 /*
3295 * Because of internal networking using a 16-bit size field for GSO context
3296 * plus frame, we have to make sure we don't exceed this.
3297 */
3298 if (RT_UNLIKELY( pCtx->dw3.u8HDRLEN + pCtx->dw2.u20PAYLEN > VBOX_MAX_GSO_SIZE ))
3299 {
3300 E1kLog(("e1kSetupGsoCtx: HDRLEN(=%#x) + PAYLEN(=%#x) = %#x, max is %#x\n",
3301 pCtx->dw3.u8HDRLEN, pCtx->dw2.u20PAYLEN, pCtx->dw3.u8HDRLEN + pCtx->dw2.u20PAYLEN, VBOX_MAX_GSO_SIZE));
3302 return;
3303 }
3304
3305 /*
3306 * We're good for now - we'll do more checks when seeing the data.
3307 * So, figure the type of offloading and setup the context.
3308 */
3309 if (pCtx->dw2.fIP)
3310 {
3311 if (pCtx->dw2.fTCP)
3312 {
3313 pGso->u8Type = PDMNETWORKGSOTYPE_IPV4_TCP;
3314 pGso->cbHdrsSeg = pCtx->dw3.u8HDRLEN;
3315 }
3316 else
3317 {
3318 pGso->u8Type = PDMNETWORKGSOTYPE_IPV4_UDP;
3319 pGso->cbHdrsSeg = pCtx->tu.u8CSS; /* IP header only */
3320 }
3321 /** @todo Detect IPv4-IPv6 tunneling (need test setup since linux doesn't do
3322 * this yet it seems)... */
3323 }
3324 else
3325 {
3326 pGso->cbHdrsSeg = pCtx->dw3.u8HDRLEN; /* @todo IPv6 UFO */
3327 if (pCtx->dw2.fTCP)
3328 pGso->u8Type = PDMNETWORKGSOTYPE_IPV6_TCP;
3329 else
3330 pGso->u8Type = PDMNETWORKGSOTYPE_IPV6_UDP;
3331 }
3332 pGso->offHdr1 = pCtx->ip.u8CSS;
3333 pGso->offHdr2 = pCtx->tu.u8CSS;
3334 pGso->cbHdrsTotal = pCtx->dw3.u8HDRLEN;
3335 pGso->cbMaxSeg = pCtx->dw3.u16MSS;
3336 Assert(PDMNetGsoIsValid(pGso, sizeof(*pGso), pGso->cbMaxSeg * 5));
3337 E1kLog2(("e1kSetupGsoCtx: mss=%#x hdr=%#x hdrseg=%#x hdr1=%#x hdr2=%#x %s\n",
3338 pGso->cbMaxSeg, pGso->cbHdrsTotal, pGso->cbHdrsSeg, pGso->offHdr1, pGso->offHdr2, PDMNetGsoTypeName((PDMNETWORKGSOTYPE)pGso->u8Type) ));
3339}
3340
3341/**
3342 * Checks if we can use GSO processing for the current TSE frame.
3343 *
3344 * @param pGso The GSO context.
3345 * @param pData The first data descriptor of the frame.
3346 * @param pCtx The TSO context descriptor.
3347 */
3348DECLINLINE(bool) e1kCanDoGso(PCPDMNETWORKGSO pGso, E1KTXDAT const *pData, E1KTXCTX const *pCtx)
3349{
3350 if (!pData->cmd.fTSE)
3351 {
3352 E1kLog2(("e1kCanDoGso: !TSE\n"));
3353 return false;
3354 }
3355 if (pData->cmd.fVLE) /** @todo VLAN tagging. */
3356 {
3357 E1kLog(("e1kCanDoGso: VLE\n"));
3358 return false;
3359 }
3360
3361 switch ((PDMNETWORKGSOTYPE)pGso->u8Type)
3362 {
3363 case PDMNETWORKGSOTYPE_IPV4_TCP:
3364 case PDMNETWORKGSOTYPE_IPV4_UDP:
3365 if (!pData->dw3.fIXSM)
3366 {
3367 E1kLog(("e1kCanDoGso: !IXSM (IPv4)\n"));
3368 return false;
3369 }
3370 if (!pData->dw3.fTXSM)
3371 {
3372 E1kLog(("e1kCanDoGso: !TXSM (IPv4)\n"));
3373 return false;
3374 }
3375 /** @todo what more check should we perform here? Ethernet frame type? */
3376 E1kLog2(("e1kCanDoGso: OK, IPv4\n"));
3377 return true;
3378
3379 case PDMNETWORKGSOTYPE_IPV6_TCP:
3380 case PDMNETWORKGSOTYPE_IPV6_UDP:
3381 if (pData->dw3.fIXSM && pCtx->ip.u8CSO)
3382 {
3383 E1kLog(("e1kCanDoGso: IXSM (IPv6)\n"));
3384 return false;
3385 }
3386 if (!pData->dw3.fTXSM)
3387 {
3388 E1kLog(("e1kCanDoGso: TXSM (IPv6)\n"));
3389 return false;
3390 }
3391 /** @todo what more check should we perform here? Ethernet frame type? */
3392 E1kLog2(("e1kCanDoGso: OK, IPv4\n"));
3393 return true;
3394
3395 default:
3396 Assert(pGso->u8Type == PDMNETWORKGSOTYPE_INVALID);
3397 E1kLog2(("e1kCanDoGso: e1kSetupGsoCtx failed\n"));
3398 return false;
3399 }
3400}
3401
3402/**
3403 * Frees the current xmit buffer.
3404 *
3405 * @param pState The device state structure.
3406 */
3407static void e1kXmitFreeBuf(E1KSTATE *pState)
3408{
3409 PPDMSCATTERGATHER pSg = pState->CTX_SUFF(pTxSg);
3410 if (pSg)
3411 {
3412 pState->CTX_SUFF(pTxSg) = NULL;
3413
3414 if (pSg->pvAllocator != pState)
3415 {
3416 PPDMINETWORKUP pDrv = pState->CTX_SUFF(pDrv);
3417 if (pDrv)
3418 pDrv->pfnFreeBuf(pDrv, pSg);
3419 }
3420 else
3421 {
3422 /* loopback */
3423 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3424 Assert(pSg->fFlags == (PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3));
3425 pSg->fFlags = 0;
3426 pSg->pvAllocator = NULL;
3427 }
3428 }
3429}
3430
3431#ifndef E1K_WITH_TXD_CACHE
3432/**
3433 * Allocates an xmit buffer.
3434 *
3435 * @returns See PDMINETWORKUP::pfnAllocBuf.
3436 * @param pState The device state structure.
3437 * @param cbMin The minimum frame size.
3438 * @param fExactSize Whether cbMin is exact or if we have to max it
3439 * out to the max MTU size.
3440 * @param fGso Whether this is a GSO frame or not.
3441 */
3442DECLINLINE(int) e1kXmitAllocBuf(E1KSTATE *pState, size_t cbMin, bool fExactSize, bool fGso)
3443{
3444 /* Adjust cbMin if necessary. */
3445 if (!fExactSize)
3446 cbMin = RT_MAX(cbMin, E1K_MAX_TX_PKT_SIZE);
3447
3448 /* Deal with existing buffer (descriptor screw up, reset, etc). */
3449 if (RT_UNLIKELY(pState->CTX_SUFF(pTxSg)))
3450 e1kXmitFreeBuf(pState);
3451 Assert(pState->CTX_SUFF(pTxSg) == NULL);
3452
3453 /*
3454 * Allocate the buffer.
3455 */
3456 PPDMSCATTERGATHER pSg;
3457 if (RT_LIKELY(GET_BITS(RCTL, LBM) != RCTL_LBM_TCVR))
3458 {
3459 PPDMINETWORKUP pDrv = pState->CTX_SUFF(pDrv);
3460 if (RT_UNLIKELY(!pDrv))
3461 return VERR_NET_DOWN;
3462 int rc = pDrv->pfnAllocBuf(pDrv, cbMin, fGso ? &pState->GsoCtx : NULL, &pSg);
3463 if (RT_FAILURE(rc))
3464 {
3465 /* Suspend TX as we are out of buffers atm */
3466 STATUS |= STATUS_TXOFF;
3467 return rc;
3468 }
3469 }
3470 else
3471 {
3472 /* Create a loopback using the fallback buffer and preallocated SG. */
3473 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3474 pSg = &pState->uTxFallback.Sg;
3475 pSg->fFlags = PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3;
3476 pSg->cbUsed = 0;
3477 pSg->cbAvailable = 0;
3478 pSg->pvAllocator = pState;
3479 pSg->pvUser = NULL; /* No GSO here. */
3480 pSg->cSegs = 1;
3481 pSg->aSegs[0].pvSeg = pState->aTxPacketFallback;
3482 pSg->aSegs[0].cbSeg = sizeof(pState->aTxPacketFallback);
3483 }
3484
3485 pState->CTX_SUFF(pTxSg) = pSg;
3486 return VINF_SUCCESS;
3487}
3488#else /* E1K_WITH_TXD_CACHE */
3489/**
3490 * Allocates an xmit buffer.
3491 *
3492 * @returns See PDMINETWORKUP::pfnAllocBuf.
3493 * @param pState The device state structure.
3494 * @param cbMin The minimum frame size.
3495 * @param fExactSize Whether cbMin is exact or if we have to max it
3496 * out to the max MTU size.
3497 * @param fGso Whether this is a GSO frame or not.
3498 */
3499DECLINLINE(int) e1kXmitAllocBuf(E1KSTATE *pState, bool fGso)
3500{
3501 /* Deal with existing buffer (descriptor screw up, reset, etc). */
3502 if (RT_UNLIKELY(pState->CTX_SUFF(pTxSg)))
3503 e1kXmitFreeBuf(pState);
3504 Assert(pState->CTX_SUFF(pTxSg) == NULL);
3505
3506 /*
3507 * Allocate the buffer.
3508 */
3509 PPDMSCATTERGATHER pSg;
3510 if (RT_LIKELY(GET_BITS(RCTL, LBM) != RCTL_LBM_TCVR))
3511 {
3512 if (pState->cbTxAlloc == 0)
3513 {
3514 /* Zero packet, no need for the buffer */
3515 return VINF_SUCCESS;
3516 }
3517
3518 PPDMINETWORKUP pDrv = pState->CTX_SUFF(pDrv);
3519 if (RT_UNLIKELY(!pDrv))
3520 return VERR_NET_DOWN;
3521 int rc = pDrv->pfnAllocBuf(pDrv, pState->cbTxAlloc, fGso ? &pState->GsoCtx : NULL, &pSg);
3522 if (RT_FAILURE(rc))
3523 {
3524 /* Suspend TX as we are out of buffers atm */
3525 STATUS |= STATUS_TXOFF;
3526 return rc;
3527 }
3528 E1kLog3(("%s Allocated buffer for TX packet: cb=%u %s%s\n",
3529 INSTANCE(pState), pState->cbTxAlloc,
3530 pState->fVTag ? "VLAN " : "",
3531 pState->fGSO ? "GSO " : ""));
3532 pState->cbTxAlloc = 0;
3533 }
3534 else
3535 {
3536 /* Create a loopback using the fallback buffer and preallocated SG. */
3537 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3538 pSg = &pState->uTxFallback.Sg;
3539 pSg->fFlags = PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3;
3540 pSg->cbUsed = 0;
3541 pSg->cbAvailable = 0;
3542 pSg->pvAllocator = pState;
3543 pSg->pvUser = NULL; /* No GSO here. */
3544 pSg->cSegs = 1;
3545 pSg->aSegs[0].pvSeg = pState->aTxPacketFallback;
3546 pSg->aSegs[0].cbSeg = sizeof(pState->aTxPacketFallback);
3547 }
3548
3549 pState->CTX_SUFF(pTxSg) = pSg;
3550 return VINF_SUCCESS;
3551}
3552#endif /* E1K_WITH_TXD_CACHE */
3553
3554/**
3555 * Checks if it's a GSO buffer or not.
3556 *
3557 * @returns true / false.
3558 * @param pTxSg The scatter / gather buffer.
3559 */
3560DECLINLINE(bool) e1kXmitIsGsoBuf(PDMSCATTERGATHER const *pTxSg)
3561{
3562#if 0
3563 if (!pTxSg)
3564 E1kLog(("e1kXmitIsGsoBuf: pTxSG is NULL\n"));
3565 if (pTxSg && pTxSg->pvUser)
3566 E1kLog(("e1kXmitIsGsoBuf: pvUser is NULL\n"));
3567#endif
3568 return pTxSg && pTxSg->pvUser /* GSO indicator */;
3569}
3570
3571#ifndef E1K_WITH_TXD_CACHE
3572/**
3573 * Load transmit descriptor from guest memory.
3574 *
3575 * @param pState The device state structure.
3576 * @param pDesc Pointer to descriptor union.
3577 * @param addr Physical address in guest context.
3578 * @thread E1000_TX
3579 */
3580DECLINLINE(void) e1kLoadDesc(E1KSTATE* pState, E1KTXDESC* pDesc, RTGCPHYS addr)
3581{
3582 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns), addr, pDesc, sizeof(E1KTXDESC));
3583}
3584#else /* E1K_WITH_TXD_CACHE */
3585/**
3586 * Load transmit descriptors from guest memory.
3587 *
3588 * We need two physical reads in case the tail wrapped around the end of TX
3589 * descriptor ring.
3590 *
3591 * @returns the actual number of descriptors fetched.
3592 * @param pState The device state structure.
3593 * @param pDesc Pointer to descriptor union.
3594 * @param addr Physical address in guest context.
3595 * @thread E1000_TX
3596 */
3597DECLINLINE(unsigned) e1kTxDLoadMore(E1KSTATE* pState)
3598{
3599 Assert(pState->iTxDCurrent == 0);
3600 /* We've already loaded pState->nTxDFetched descriptors past TDH. */
3601 unsigned nDescsAvailable = e1kGetTxLen(pState) - pState->nTxDFetched;
3602 unsigned nDescsToFetch = RT_MIN(nDescsAvailable, E1K_TXD_CACHE_SIZE - pState->nTxDFetched);
3603 unsigned nDescsTotal = TDLEN / sizeof(E1KTXDESC);
3604 unsigned nFirstNotLoaded = (TDH + pState->nTxDFetched) % nDescsTotal;
3605 unsigned nDescsInSingleRead = RT_MIN(nDescsToFetch, nDescsTotal - nFirstNotLoaded);
3606 E1kLog3(("%s e1kTxDLoadMore: nDescsAvailable=%u nDescsToFetch=%u "
3607 "nDescsTotal=%u nFirstNotLoaded=0x%x nDescsInSingleRead=%u\n",
3608 INSTANCE(pState), nDescsAvailable, nDescsToFetch, nDescsTotal,
3609 nFirstNotLoaded, nDescsInSingleRead));
3610 if (nDescsToFetch == 0)
3611 return 0;
3612 E1KTXDESC* pFirstEmptyDesc = &pState->aTxDescriptors[pState->nTxDFetched];
3613 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns),
3614 ((uint64_t)TDBAH << 32) + TDBAL + nFirstNotLoaded * sizeof(E1KTXDESC),
3615 pFirstEmptyDesc, nDescsInSingleRead * sizeof(E1KTXDESC));
3616 E1kLog3(("%s Fetched %u TX descriptors at %08x%08x(0x%x), TDLEN=%08x, TDH=%08x, TDT=%08x\n",
3617 INSTANCE(pState), nDescsInSingleRead,
3618 TDBAH, TDBAL + TDH * sizeof(E1KTXDESC),
3619 nFirstNotLoaded, TDLEN, TDH, TDT));
3620 if (nDescsToFetch > nDescsInSingleRead)
3621 {
3622 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns),
3623 ((uint64_t)TDBAH << 32) + TDBAL,
3624 pFirstEmptyDesc + nDescsInSingleRead,
3625 (nDescsToFetch - nDescsInSingleRead) * sizeof(E1KTXDESC));
3626 E1kLog3(("%s Fetched %u TX descriptors at %08x%08x\n",
3627 INSTANCE(pState), nDescsToFetch - nDescsInSingleRead,
3628 TDBAH, TDBAL));
3629 }
3630 pState->nTxDFetched += nDescsToFetch;
3631 return nDescsToFetch;
3632}
3633
3634/**
3635 * Load transmit descriptors from guest memory only if there are no loaded
3636 * descriptors.
3637 *
3638 * @returns true if there are descriptors in cache.
3639 * @param pState The device state structure.
3640 * @param pDesc Pointer to descriptor union.
3641 * @param addr Physical address in guest context.
3642 * @thread E1000_TX
3643 */
3644DECLINLINE(bool) e1kTxDLazyLoad(E1KSTATE* pState)
3645{
3646 if (pState->nTxDFetched == 0)
3647 return e1kTxDLoadMore(pState) != 0;
3648 return true;
3649}
3650#endif /* E1K_WITH_TXD_CACHE */
3651
3652/**
3653 * Write back transmit descriptor to guest memory.
3654 *
3655 * @param pState The device state structure.
3656 * @param pDesc Pointer to descriptor union.
3657 * @param addr Physical address in guest context.
3658 * @thread E1000_TX
3659 */
3660DECLINLINE(void) e1kWriteBackDesc(E1KSTATE* pState, E1KTXDESC* pDesc, RTGCPHYS addr)
3661{
3662 /* Only the last half of the descriptor has to be written back. */
3663 e1kPrintTDesc(pState, pDesc, "^^^");
3664 PDMDevHlpPhysWrite(pState->CTX_SUFF(pDevIns), addr, pDesc, sizeof(E1KTXDESC));
3665}
3666
3667/**
3668 * Transmit complete frame.
3669 *
3670 * @remarks We skip the FCS since we're not responsible for sending anything to
3671 * a real ethernet wire.
3672 *
3673 * @param pState The device state structure.
3674 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
3675 * @thread E1000_TX
3676 */
3677static void e1kTransmitFrame(E1KSTATE* pState, bool fOnWorkerThread)
3678{
3679 PPDMSCATTERGATHER pSg = pState->CTX_SUFF(pTxSg);
3680 uint32_t cbFrame = pSg ? (uint32_t)pSg->cbUsed : 0;
3681 Assert(!pSg || pSg->cSegs == 1);
3682
3683 if (cbFrame > 70) /* unqualified guess */
3684 pState->led.Asserted.s.fWriting = pState->led.Actual.s.fWriting = 1;
3685
3686#ifdef E1K_INT_STATS
3687 if (cbFrame <= 1514)
3688 E1K_INC_ISTAT_CNT(pState->uStatTx1514);
3689 else if (cbFrame <= 2962)
3690 E1K_INC_ISTAT_CNT(pState->uStatTx2962);
3691 else if (cbFrame <= 4410)
3692 E1K_INC_ISTAT_CNT(pState->uStatTx4410);
3693 else if (cbFrame <= 5858)
3694 E1K_INC_ISTAT_CNT(pState->uStatTx5858);
3695 else if (cbFrame <= 7306)
3696 E1K_INC_ISTAT_CNT(pState->uStatTx7306);
3697 else if (cbFrame <= 8754)
3698 E1K_INC_ISTAT_CNT(pState->uStatTx8754);
3699 else if (cbFrame <= 16384)
3700 E1K_INC_ISTAT_CNT(pState->uStatTx16384);
3701 else if (cbFrame <= 32768)
3702 E1K_INC_ISTAT_CNT(pState->uStatTx32768);
3703 else
3704 E1K_INC_ISTAT_CNT(pState->uStatTxLarge);
3705#endif /* E1K_INT_STATS */
3706
3707 /* Add VLAN tag */
3708 if (cbFrame > 12 && pState->fVTag)
3709 {
3710 E1kLog3(("%s Inserting VLAN tag %08x\n",
3711 INSTANCE(pState), RT_BE2H_U16(VET) | (RT_BE2H_U16(pState->u16VTagTCI) << 16)));
3712 memmove((uint8_t*)pSg->aSegs[0].pvSeg + 16, (uint8_t*)pSg->aSegs[0].pvSeg + 12, cbFrame - 12);
3713 *((uint32_t*)pSg->aSegs[0].pvSeg + 3) = RT_BE2H_U16(VET) | (RT_BE2H_U16(pState->u16VTagTCI) << 16);
3714 pSg->cbUsed += 4;
3715 cbFrame += 4;
3716 Assert(pSg->cbUsed == cbFrame);
3717 Assert(pSg->cbUsed <= pSg->cbAvailable);
3718 }
3719/* E1kLog2(("%s < < < Outgoing packet. Dump follows: > > >\n"
3720 "%.*Rhxd\n"
3721 "%s < < < < < < < < < < < < < End of dump > > > > > > > > > > > >\n",
3722 INSTANCE(pState), cbFrame, pSg->aSegs[0].pvSeg, INSTANCE(pState)));*/
3723
3724 /* Update the stats */
3725 E1K_INC_CNT32(TPT);
3726 E1K_ADD_CNT64(TOTL, TOTH, cbFrame);
3727 E1K_INC_CNT32(GPTC);
3728 if (pSg && e1kIsBroadcast(pSg->aSegs[0].pvSeg))
3729 E1K_INC_CNT32(BPTC);
3730 else if (pSg && e1kIsMulticast(pSg->aSegs[0].pvSeg))
3731 E1K_INC_CNT32(MPTC);
3732 /* Update octet transmit counter */
3733 E1K_ADD_CNT64(GOTCL, GOTCH, cbFrame);
3734 if (pState->CTX_SUFF(pDrv))
3735 STAM_REL_COUNTER_ADD(&pState->StatTransmitBytes, cbFrame);
3736 if (cbFrame == 64)
3737 E1K_INC_CNT32(PTC64);
3738 else if (cbFrame < 128)
3739 E1K_INC_CNT32(PTC127);
3740 else if (cbFrame < 256)
3741 E1K_INC_CNT32(PTC255);
3742 else if (cbFrame < 512)
3743 E1K_INC_CNT32(PTC511);
3744 else if (cbFrame < 1024)
3745 E1K_INC_CNT32(PTC1023);
3746 else
3747 E1K_INC_CNT32(PTC1522);
3748
3749 E1K_INC_ISTAT_CNT(pState->uStatTxFrm);
3750
3751 /*
3752 * Dump and send the packet.
3753 */
3754 int rc = VERR_NET_DOWN;
3755 if (pSg && pSg->pvAllocator != pState)
3756 {
3757 e1kPacketDump(pState, (uint8_t const *)pSg->aSegs[0].pvSeg, cbFrame, "--> Outgoing");
3758
3759 pState->CTX_SUFF(pTxSg) = NULL;
3760 PPDMINETWORKUP pDrv = pState->CTX_SUFF(pDrv);
3761 if (pDrv)
3762 {
3763 /* Release critical section to avoid deadlock in CanReceive */
3764 //e1kCsLeave(pState);
3765 STAM_PROFILE_START(&pState->CTX_SUFF_Z(StatTransmitSend), a);
3766 rc = pDrv->pfnSendBuf(pDrv, pSg, fOnWorkerThread);
3767 STAM_PROFILE_STOP(&pState->CTX_SUFF_Z(StatTransmitSend), a);
3768 //e1kCsEnter(pState, RT_SRC_POS);
3769 }
3770 }
3771 else if (pSg)
3772 {
3773 Assert(pSg->aSegs[0].pvSeg == pState->aTxPacketFallback);
3774 e1kPacketDump(pState, (uint8_t const *)pSg->aSegs[0].pvSeg, cbFrame, "--> Loopback");
3775
3776 /** @todo do we actually need to check that we're in loopback mode here? */
3777 if (GET_BITS(RCTL, LBM) == RCTL_LBM_TCVR)
3778 {
3779 E1KRXDST status;
3780 RT_ZERO(status);
3781 status.fPIF = true;
3782 e1kHandleRxPacket(pState, pSg->aSegs[0].pvSeg, cbFrame, status);
3783 rc = VINF_SUCCESS;
3784 }
3785 e1kXmitFreeBuf(pState);
3786 }
3787 else
3788 rc = VERR_NET_DOWN;
3789 if (RT_FAILURE(rc))
3790 {
3791 E1kLogRel(("E1000: ERROR! pfnSend returned %Rrc\n", rc));
3792 /** @todo handle VERR_NET_DOWN and VERR_NET_NO_BUFFER_SPACE. Signal error ? */
3793 }
3794
3795 pState->led.Actual.s.fWriting = 0;
3796}
3797
3798/**
3799 * Compute and write internet checksum (e1kCSum16) at the specified offset.
3800 *
3801 * @param pState The device state structure.
3802 * @param pPkt Pointer to the packet.
3803 * @param u16PktLen Total length of the packet.
3804 * @param cso Offset in packet to write checksum at.
3805 * @param css Offset in packet to start computing
3806 * checksum from.
3807 * @param cse Offset in packet to stop computing
3808 * checksum at.
3809 * @thread E1000_TX
3810 */
3811static void e1kInsertChecksum(E1KSTATE* pState, uint8_t *pPkt, uint16_t u16PktLen, uint8_t cso, uint8_t css, uint16_t cse)
3812{
3813 if (css >= u16PktLen)
3814 {
3815 E1kLog2(("%s css(%X) is greater than packet length-1(%X), checksum is not inserted\n",
3816 INSTANCE(pState), cso, u16PktLen));
3817 return;
3818 }
3819
3820 if (cso >= u16PktLen - 1)
3821 {
3822 E1kLog2(("%s cso(%X) is greater than packet length-2(%X), checksum is not inserted\n",
3823 INSTANCE(pState), cso, u16PktLen));
3824 return;
3825 }
3826
3827 if (cse == 0)
3828 cse = u16PktLen - 1;
3829 uint16_t u16ChkSum = e1kCSum16(pPkt + css, cse - css + 1);
3830 E1kLog2(("%s Inserting csum: %04X at %02X, old value: %04X\n", INSTANCE(pState),
3831 u16ChkSum, cso, *(uint16_t*)(pPkt + cso)));
3832 *(uint16_t*)(pPkt + cso) = u16ChkSum;
3833}
3834
3835/**
3836 * Add a part of descriptor's buffer to transmit frame.
3837 *
3838 * @remarks data.u64BufAddr is used unconditionally for both data
3839 * and legacy descriptors since it is identical to
3840 * legacy.u64BufAddr.
3841 *
3842 * @param pState The device state structure.
3843 * @param pDesc Pointer to the descriptor to transmit.
3844 * @param u16Len Length of buffer to the end of segment.
3845 * @param fSend Force packet sending.
3846 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
3847 * @thread E1000_TX
3848 */
3849#ifndef E1K_WITH_TXD_CACHE
3850static void e1kFallbackAddSegment(E1KSTATE* pState, RTGCPHYS PhysAddr, uint16_t u16Len, bool fSend, bool fOnWorkerThread)
3851{
3852 /* TCP header being transmitted */
3853 struct E1kTcpHeader *pTcpHdr = (struct E1kTcpHeader *)
3854 (pState->aTxPacketFallback + pState->contextTSE.tu.u8CSS);
3855 /* IP header being transmitted */
3856 struct E1kIpHeader *pIpHdr = (struct E1kIpHeader *)
3857 (pState->aTxPacketFallback + pState->contextTSE.ip.u8CSS);
3858
3859 E1kLog3(("%s e1kFallbackAddSegment: Length=%x, remaining payload=%x, header=%x, send=%RTbool\n",
3860 INSTANCE(pState), u16Len, pState->u32PayRemain, pState->u16HdrRemain, fSend));
3861 Assert(pState->u32PayRemain + pState->u16HdrRemain > 0);
3862
3863 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns), PhysAddr,
3864 pState->aTxPacketFallback + pState->u16TxPktLen, u16Len);
3865 E1kLog3(("%s Dump of the segment:\n"
3866 "%.*Rhxd\n"
3867 "%s --- End of dump ---\n",
3868 INSTANCE(pState), u16Len, pState->aTxPacketFallback + pState->u16TxPktLen, INSTANCE(pState)));
3869 pState->u16TxPktLen += u16Len;
3870 E1kLog3(("%s e1kFallbackAddSegment: pState->u16TxPktLen=%x\n",
3871 INSTANCE(pState), pState->u16TxPktLen));
3872 if (pState->u16HdrRemain > 0)
3873 {
3874 /* The header was not complete, check if it is now */
3875 if (u16Len >= pState->u16HdrRemain)
3876 {
3877 /* The rest is payload */
3878 u16Len -= pState->u16HdrRemain;
3879 pState->u16HdrRemain = 0;
3880 /* Save partial checksum and flags */
3881 pState->u32SavedCsum = pTcpHdr->chksum;
3882 pState->u16SavedFlags = pTcpHdr->hdrlen_flags;
3883 /* Clear FIN and PSH flags now and set them only in the last segment */
3884 pTcpHdr->hdrlen_flags &= ~htons(E1K_TCP_FIN | E1K_TCP_PSH);
3885 }
3886 else
3887 {
3888 /* Still not */
3889 pState->u16HdrRemain -= u16Len;
3890 E1kLog3(("%s e1kFallbackAddSegment: Header is still incomplete, 0x%x bytes remain.\n",
3891 INSTANCE(pState), pState->u16HdrRemain));
3892 return;
3893 }
3894 }
3895
3896 pState->u32PayRemain -= u16Len;
3897
3898 if (fSend)
3899 {
3900 /* Leave ethernet header intact */
3901 /* IP Total Length = payload + headers - ethernet header */
3902 pIpHdr->total_len = htons(pState->u16TxPktLen - pState->contextTSE.ip.u8CSS);
3903 E1kLog3(("%s e1kFallbackAddSegment: End of packet, pIpHdr->total_len=%x\n",
3904 INSTANCE(pState), ntohs(pIpHdr->total_len)));
3905 /* Update IP Checksum */
3906 pIpHdr->chksum = 0;
3907 e1kInsertChecksum(pState, pState->aTxPacketFallback, pState->u16TxPktLen,
3908 pState->contextTSE.ip.u8CSO,
3909 pState->contextTSE.ip.u8CSS,
3910 pState->contextTSE.ip.u16CSE);
3911
3912 /* Update TCP flags */
3913 /* Restore original FIN and PSH flags for the last segment */
3914 if (pState->u32PayRemain == 0)
3915 {
3916 pTcpHdr->hdrlen_flags = pState->u16SavedFlags;
3917 E1K_INC_CNT32(TSCTC);
3918 }
3919 /* Add TCP length to partial pseudo header sum */
3920 uint32_t csum = pState->u32SavedCsum
3921 + htons(pState->u16TxPktLen - pState->contextTSE.tu.u8CSS);
3922 while (csum >> 16)
3923 csum = (csum >> 16) + (csum & 0xFFFF);
3924 pTcpHdr->chksum = csum;
3925 /* Compute final checksum */
3926 e1kInsertChecksum(pState, pState->aTxPacketFallback, pState->u16TxPktLen,
3927 pState->contextTSE.tu.u8CSO,
3928 pState->contextTSE.tu.u8CSS,
3929 pState->contextTSE.tu.u16CSE);
3930
3931 /*
3932 * Transmit it. If we've use the SG already, allocate a new one before
3933 * we copy of the data.
3934 */
3935 if (!pState->CTX_SUFF(pTxSg))
3936 e1kXmitAllocBuf(pState, pState->u16TxPktLen + (pState->fVTag ? 4 : 0), true /*fExactSize*/, false /*fGso*/);
3937 if (pState->CTX_SUFF(pTxSg))
3938 {
3939 Assert(pState->u16TxPktLen <= pState->CTX_SUFF(pTxSg)->cbAvailable);
3940 Assert(pState->CTX_SUFF(pTxSg)->cSegs == 1);
3941 if (pState->CTX_SUFF(pTxSg)->aSegs[0].pvSeg != pState->aTxPacketFallback)
3942 memcpy(pState->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pState->aTxPacketFallback, pState->u16TxPktLen);
3943 pState->CTX_SUFF(pTxSg)->cbUsed = pState->u16TxPktLen;
3944 pState->CTX_SUFF(pTxSg)->aSegs[0].cbSeg = pState->u16TxPktLen;
3945 }
3946 e1kTransmitFrame(pState, fOnWorkerThread);
3947
3948 /* Update Sequence Number */
3949 pTcpHdr->seqno = htonl(ntohl(pTcpHdr->seqno) + pState->u16TxPktLen
3950 - pState->contextTSE.dw3.u8HDRLEN);
3951 /* Increment IP identification */
3952 pIpHdr->ident = htons(ntohs(pIpHdr->ident) + 1);
3953 }
3954}
3955#else /* E1K_WITH_TXD_CACHE */
3956static int e1kFallbackAddSegment(E1KSTATE* pState, RTGCPHYS PhysAddr, uint16_t u16Len, bool fSend, bool fOnWorkerThread)
3957{
3958 int rc = VINF_SUCCESS;
3959 /* TCP header being transmitted */
3960 struct E1kTcpHeader *pTcpHdr = (struct E1kTcpHeader *)
3961 (pState->aTxPacketFallback + pState->contextTSE.tu.u8CSS);
3962 /* IP header being transmitted */
3963 struct E1kIpHeader *pIpHdr = (struct E1kIpHeader *)
3964 (pState->aTxPacketFallback + pState->contextTSE.ip.u8CSS);
3965
3966 E1kLog3(("%s e1kFallbackAddSegment: Length=%x, remaining payload=%x, header=%x, send=%RTbool\n",
3967 INSTANCE(pState), u16Len, pState->u32PayRemain, pState->u16HdrRemain, fSend));
3968 Assert(pState->u32PayRemain + pState->u16HdrRemain > 0);
3969
3970 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns), PhysAddr,
3971 pState->aTxPacketFallback + pState->u16TxPktLen, u16Len);
3972 E1kLog3(("%s Dump of the segment:\n"
3973 "%.*Rhxd\n"
3974 "%s --- End of dump ---\n",
3975 INSTANCE(pState), u16Len, pState->aTxPacketFallback + pState->u16TxPktLen, INSTANCE(pState)));
3976 pState->u16TxPktLen += u16Len;
3977 E1kLog3(("%s e1kFallbackAddSegment: pState->u16TxPktLen=%x\n",
3978 INSTANCE(pState), pState->u16TxPktLen));
3979 if (pState->u16HdrRemain > 0)
3980 {
3981 /* The header was not complete, check if it is now */
3982 if (u16Len >= pState->u16HdrRemain)
3983 {
3984 /* The rest is payload */
3985 u16Len -= pState->u16HdrRemain;
3986 pState->u16HdrRemain = 0;
3987 /* Save partial checksum and flags */
3988 pState->u32SavedCsum = pTcpHdr->chksum;
3989 pState->u16SavedFlags = pTcpHdr->hdrlen_flags;
3990 /* Clear FIN and PSH flags now and set them only in the last segment */
3991 pTcpHdr->hdrlen_flags &= ~htons(E1K_TCP_FIN | E1K_TCP_PSH);
3992 }
3993 else
3994 {
3995 /* Still not */
3996 pState->u16HdrRemain -= u16Len;
3997 E1kLog3(("%s e1kFallbackAddSegment: Header is still incomplete, 0x%x bytes remain.\n",
3998 INSTANCE(pState), pState->u16HdrRemain));
3999 return rc;
4000 }
4001 }
4002
4003 pState->u32PayRemain -= u16Len;
4004
4005 if (fSend)
4006 {
4007 /* Leave ethernet header intact */
4008 /* IP Total Length = payload + headers - ethernet header */
4009 pIpHdr->total_len = htons(pState->u16TxPktLen - pState->contextTSE.ip.u8CSS);
4010 E1kLog3(("%s e1kFallbackAddSegment: End of packet, pIpHdr->total_len=%x\n",
4011 INSTANCE(pState), ntohs(pIpHdr->total_len)));
4012 /* Update IP Checksum */
4013 pIpHdr->chksum = 0;
4014 e1kInsertChecksum(pState, pState->aTxPacketFallback, pState->u16TxPktLen,
4015 pState->contextTSE.ip.u8CSO,
4016 pState->contextTSE.ip.u8CSS,
4017 pState->contextTSE.ip.u16CSE);
4018
4019 /* Update TCP flags */
4020 /* Restore original FIN and PSH flags for the last segment */
4021 if (pState->u32PayRemain == 0)
4022 {
4023 pTcpHdr->hdrlen_flags = pState->u16SavedFlags;
4024 E1K_INC_CNT32(TSCTC);
4025 }
4026 /* Add TCP length to partial pseudo header sum */
4027 uint32_t csum = pState->u32SavedCsum
4028 + htons(pState->u16TxPktLen - pState->contextTSE.tu.u8CSS);
4029 while (csum >> 16)
4030 csum = (csum >> 16) + (csum & 0xFFFF);
4031 pTcpHdr->chksum = csum;
4032 /* Compute final checksum */
4033 e1kInsertChecksum(pState, pState->aTxPacketFallback, pState->u16TxPktLen,
4034 pState->contextTSE.tu.u8CSO,
4035 pState->contextTSE.tu.u8CSS,
4036 pState->contextTSE.tu.u16CSE);
4037
4038 /*
4039 * Transmit it.
4040 */
4041 if (pState->CTX_SUFF(pTxSg))
4042 {
4043 Assert(pState->u16TxPktLen <= pState->CTX_SUFF(pTxSg)->cbAvailable);
4044 Assert(pState->CTX_SUFF(pTxSg)->cSegs == 1);
4045 if (pState->CTX_SUFF(pTxSg)->aSegs[0].pvSeg != pState->aTxPacketFallback)
4046 memcpy(pState->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pState->aTxPacketFallback, pState->u16TxPktLen);
4047 pState->CTX_SUFF(pTxSg)->cbUsed = pState->u16TxPktLen;
4048 pState->CTX_SUFF(pTxSg)->aSegs[0].cbSeg = pState->u16TxPktLen;
4049 }
4050 e1kTransmitFrame(pState, fOnWorkerThread);
4051
4052 /* Update Sequence Number */
4053 pTcpHdr->seqno = htonl(ntohl(pTcpHdr->seqno) + pState->u16TxPktLen
4054 - pState->contextTSE.dw3.u8HDRLEN);
4055 /* Increment IP identification */
4056 pIpHdr->ident = htons(ntohs(pIpHdr->ident) + 1);
4057
4058 /* Allocate new buffer for the next segment. */
4059 if (pState->u32PayRemain)
4060 {
4061 pState->cbTxAlloc = RT_MIN(pState->u32PayRemain,
4062 pState->contextTSE.dw3.u16MSS)
4063 + pState->contextTSE.dw3.u8HDRLEN
4064 + (pState->fVTag ? 4 : 0);
4065 rc = e1kXmitAllocBuf(pState, false /* fGSO */);
4066 }
4067 }
4068
4069 return rc;
4070}
4071#endif /* E1K_WITH_TXD_CACHE */
4072
4073#ifndef E1K_WITH_TXD_CACHE
4074/**
4075 * TCP segmentation offloading fallback: Add descriptor's buffer to transmit
4076 * frame.
4077 *
4078 * We construct the frame in the fallback buffer first and the copy it to the SG
4079 * buffer before passing it down to the network driver code.
4080 *
4081 * @returns true if the frame should be transmitted, false if not.
4082 *
4083 * @param pState The device state structure.
4084 * @param pDesc Pointer to the descriptor to transmit.
4085 * @param cbFragment Length of descriptor's buffer.
4086 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4087 * @thread E1000_TX
4088 */
4089static bool e1kFallbackAddToFrame(E1KSTATE* pState, E1KTXDESC* pDesc, uint32_t cbFragment, bool fOnWorkerThread)
4090{
4091 PPDMSCATTERGATHER pTxSg = pState->CTX_SUFF(pTxSg);
4092 Assert(e1kGetDescType(pDesc) == E1K_DTYP_DATA);
4093 Assert(pDesc->data.cmd.fTSE);
4094 Assert(!e1kXmitIsGsoBuf(pTxSg));
4095
4096 uint16_t u16MaxPktLen = pState->contextTSE.dw3.u8HDRLEN + pState->contextTSE.dw3.u16MSS;
4097 Assert(u16MaxPktLen != 0);
4098 Assert(u16MaxPktLen < E1K_MAX_TX_PKT_SIZE);
4099
4100 /*
4101 * Carve out segments.
4102 */
4103 do
4104 {
4105 /* Calculate how many bytes we have left in this TCP segment */
4106 uint32_t cb = u16MaxPktLen - pState->u16TxPktLen;
4107 if (cb > cbFragment)
4108 {
4109 /* This descriptor fits completely into current segment */
4110 cb = cbFragment;
4111 e1kFallbackAddSegment(pState, pDesc->data.u64BufAddr, cb, pDesc->data.cmd.fEOP /*fSend*/, fOnWorkerThread);
4112 }
4113 else
4114 {
4115 e1kFallbackAddSegment(pState, pDesc->data.u64BufAddr, cb, true /*fSend*/, fOnWorkerThread);
4116 /*
4117 * Rewind the packet tail pointer to the beginning of payload,
4118 * so we continue writing right beyond the header.
4119 */
4120 pState->u16TxPktLen = pState->contextTSE.dw3.u8HDRLEN;
4121 }
4122
4123 pDesc->data.u64BufAddr += cb;
4124 cbFragment -= cb;
4125 } while (cbFragment > 0);
4126
4127 if (pDesc->data.cmd.fEOP)
4128 {
4129 /* End of packet, next segment will contain header. */
4130 if (pState->u32PayRemain != 0)
4131 E1K_INC_CNT32(TSCTFC);
4132 pState->u16TxPktLen = 0;
4133 e1kXmitFreeBuf(pState);
4134 }
4135
4136 return false;
4137}
4138#else /* E1K_WITH_TXD_CACHE */
4139/**
4140 * TCP segmentation offloading fallback: Add descriptor's buffer to transmit
4141 * frame.
4142 *
4143 * We construct the frame in the fallback buffer first and the copy it to the SG
4144 * buffer before passing it down to the network driver code.
4145 *
4146 * @returns error code
4147 *
4148 * @param pState The device state structure.
4149 * @param pDesc Pointer to the descriptor to transmit.
4150 * @param cbFragment Length of descriptor's buffer.
4151 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4152 * @thread E1000_TX
4153 */
4154static int e1kFallbackAddToFrame(E1KSTATE* pState, E1KTXDESC* pDesc, bool fOnWorkerThread)
4155{
4156 int rc = VINF_SUCCESS;
4157 PPDMSCATTERGATHER pTxSg = pState->CTX_SUFF(pTxSg);
4158 Assert(e1kGetDescType(pDesc) == E1K_DTYP_DATA);
4159 Assert(pDesc->data.cmd.fTSE);
4160 Assert(!e1kXmitIsGsoBuf(pTxSg));
4161
4162 uint16_t u16MaxPktLen = pState->contextTSE.dw3.u8HDRLEN + pState->contextTSE.dw3.u16MSS;
4163 Assert(u16MaxPktLen != 0);
4164 Assert(u16MaxPktLen < E1K_MAX_TX_PKT_SIZE);
4165
4166 /*
4167 * Carve out segments.
4168 */
4169 do
4170 {
4171 /* Calculate how many bytes we have left in this TCP segment */
4172 uint32_t cb = u16MaxPktLen - pState->u16TxPktLen;
4173 if (cb > pDesc->data.cmd.u20DTALEN)
4174 {
4175 /* This descriptor fits completely into current segment */
4176 cb = pDesc->data.cmd.u20DTALEN;
4177 rc = e1kFallbackAddSegment(pState, pDesc->data.u64BufAddr, cb, pDesc->data.cmd.fEOP /*fSend*/, fOnWorkerThread);
4178 }
4179 else
4180 {
4181 rc = e1kFallbackAddSegment(pState, pDesc->data.u64BufAddr, cb, true /*fSend*/, fOnWorkerThread);
4182 /*
4183 * Rewind the packet tail pointer to the beginning of payload,
4184 * so we continue writing right beyond the header.
4185 */
4186 pState->u16TxPktLen = pState->contextTSE.dw3.u8HDRLEN;
4187 }
4188
4189 pDesc->data.u64BufAddr += cb;
4190 pDesc->data.cmd.u20DTALEN -= cb;
4191 } while (pDesc->data.cmd.u20DTALEN > 0 && RT_SUCCESS(rc));
4192
4193 if (pDesc->data.cmd.fEOP)
4194 {
4195 /* End of packet, next segment will contain header. */
4196 if (pState->u32PayRemain != 0)
4197 E1K_INC_CNT32(TSCTFC);
4198 pState->u16TxPktLen = 0;
4199 e1kXmitFreeBuf(pState);
4200 }
4201
4202 return false;
4203}
4204#endif /* E1K_WITH_TXD_CACHE */
4205
4206
4207/**
4208 * Add descriptor's buffer to transmit frame.
4209 *
4210 * This deals with GSO and normal frames, e1kFallbackAddToFrame deals with the
4211 * TSE frames we cannot handle as GSO.
4212 *
4213 * @returns true on success, false on failure.
4214 *
4215 * @param pThis The device state structure.
4216 * @param PhysAddr The physical address of the descriptor buffer.
4217 * @param cbFragment Length of descriptor's buffer.
4218 * @thread E1000_TX
4219 */
4220static bool e1kAddToFrame(E1KSTATE *pThis, RTGCPHYS PhysAddr, uint32_t cbFragment)
4221{
4222 PPDMSCATTERGATHER pTxSg = pThis->CTX_SUFF(pTxSg);
4223 bool const fGso = e1kXmitIsGsoBuf(pTxSg);
4224 uint32_t const cbNewPkt = cbFragment + pThis->u16TxPktLen;
4225
4226 if (RT_UNLIKELY( !fGso && cbNewPkt > E1K_MAX_TX_PKT_SIZE ))
4227 {
4228 E1kLog(("%s Transmit packet is too large: %u > %u(max)\n", INSTANCE(pThis), cbNewPkt, E1K_MAX_TX_PKT_SIZE));
4229 return false;
4230 }
4231 if (RT_UNLIKELY( fGso && cbNewPkt > pTxSg->cbAvailable ))
4232 {
4233 E1kLog(("%s Transmit packet is too large: %u > %u(max)/GSO\n", INSTANCE(pThis), cbNewPkt, pTxSg->cbAvailable));
4234 return false;
4235 }
4236
4237 if (RT_LIKELY(pTxSg))
4238 {
4239 Assert(pTxSg->cSegs == 1);
4240 Assert(pTxSg->cbUsed == pThis->u16TxPktLen);
4241
4242 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), PhysAddr,
4243 (uint8_t *)pTxSg->aSegs[0].pvSeg + pThis->u16TxPktLen, cbFragment);
4244
4245 pTxSg->cbUsed = cbNewPkt;
4246 }
4247 pThis->u16TxPktLen = cbNewPkt;
4248
4249 return true;
4250}
4251
4252
4253/**
4254 * Write the descriptor back to guest memory and notify the guest.
4255 *
4256 * @param pState The device state structure.
4257 * @param pDesc Pointer to the descriptor have been transmitted.
4258 * @param addr Physical address of the descriptor in guest memory.
4259 * @thread E1000_TX
4260 */
4261static void e1kDescReport(E1KSTATE* pState, E1KTXDESC* pDesc, RTGCPHYS addr)
4262{
4263 /*
4264 * We fake descriptor write-back bursting. Descriptors are written back as they are
4265 * processed.
4266 */
4267 /* Let's pretend we process descriptors. Write back with DD set. */
4268 /*
4269 * Prior to r71586 we tried to accomodate the case when write-back bursts
4270 * are enabled without actually implementing bursting by writing back all
4271 * descriptors, even the ones that do not have RS set. This caused kernel
4272 * panics with Linux SMP kernels, as the e1000 driver tried to free up skb
4273 * associated with written back descriptor if it happened to be a context
4274 * descriptor since context descriptors do not have skb associated to them.
4275 * Starting from r71586 we write back only the descriptors with RS set,
4276 * which is a little bit different from what the real hardware does in
4277 * case there is a chain of data descritors where some of them have RS set
4278 * and others do not. It is very uncommon scenario imho.
4279 * We need to check RPS as well since some legacy drivers use it instead of
4280 * RS even with newer cards.
4281 */
4282 if (pDesc->legacy.cmd.fRS || pDesc->legacy.cmd.fRPS)
4283 {
4284 pDesc->legacy.dw3.fDD = 1; /* Descriptor Done */
4285 e1kWriteBackDesc(pState, pDesc, addr);
4286 if (pDesc->legacy.cmd.fEOP)
4287 {
4288#ifdef E1K_USE_TX_TIMERS
4289 if (pDesc->legacy.cmd.fIDE)
4290 {
4291 E1K_INC_ISTAT_CNT(pState->uStatTxIDE);
4292 //if (pState->fIntRaised)
4293 //{
4294 // /* Interrupt is already pending, no need for timers */
4295 // ICR |= ICR_TXDW;
4296 //}
4297 //else {
4298 /* Arm the timer to fire in TIVD usec (discard .024) */
4299 e1kArmTimer(pState, pState->CTX_SUFF(pTIDTimer), TIDV);
4300# ifndef E1K_NO_TAD
4301 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
4302 E1kLog2(("%s Checking if TAD timer is running\n",
4303 INSTANCE(pState)));
4304 if (TADV != 0 && !TMTimerIsActive(pState->CTX_SUFF(pTADTimer)))
4305 e1kArmTimer(pState, pState->CTX_SUFF(pTADTimer), TADV);
4306# endif /* E1K_NO_TAD */
4307 }
4308 else
4309 {
4310 E1kLog2(("%s No IDE set, cancel TAD timer and raise interrupt\n",
4311 INSTANCE(pState)));
4312# ifndef E1K_NO_TAD
4313 /* Cancel both timers if armed and fire immediately. */
4314 e1kCancelTimer(pState, pState->CTX_SUFF(pTADTimer));
4315# endif /* E1K_NO_TAD */
4316#endif /* E1K_USE_TX_TIMERS */
4317 E1K_INC_ISTAT_CNT(pState->uStatIntTx);
4318 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, ICR_TXDW);
4319#ifdef E1K_USE_TX_TIMERS
4320 }
4321#endif /* E1K_USE_TX_TIMERS */
4322 }
4323 }
4324 else
4325 {
4326 E1K_INC_ISTAT_CNT(pState->uStatTxNoRS);
4327 }
4328}
4329
4330#ifndef E1K_WITH_TXD_CACHE
4331/**
4332 * Process Transmit Descriptor.
4333 *
4334 * E1000 supports three types of transmit descriptors:
4335 * - legacy data descriptors of older format (context-less).
4336 * - data the same as legacy but providing new offloading capabilities.
4337 * - context sets up the context for following data descriptors.
4338 *
4339 * @param pState The device state structure.
4340 * @param pDesc Pointer to descriptor union.
4341 * @param addr Physical address of descriptor in guest memory.
4342 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4343 * @thread E1000_TX
4344 */
4345static int e1kXmitDesc(E1KSTATE* pState, E1KTXDESC* pDesc, RTGCPHYS addr, bool fOnWorkerThread)
4346{
4347 int rc = VINF_SUCCESS;
4348 uint32_t cbVTag = 0;
4349
4350 e1kPrintTDesc(pState, pDesc, "vvv");
4351
4352#ifdef E1K_USE_TX_TIMERS
4353 e1kCancelTimer(pState, pState->CTX_SUFF(pTIDTimer));
4354#endif /* E1K_USE_TX_TIMERS */
4355
4356 switch (e1kGetDescType(pDesc))
4357 {
4358 case E1K_DTYP_CONTEXT:
4359 if (pDesc->context.dw2.fTSE)
4360 {
4361 pState->contextTSE = pDesc->context;
4362 pState->u32PayRemain = pDesc->context.dw2.u20PAYLEN;
4363 pState->u16HdrRemain = pDesc->context.dw3.u8HDRLEN;
4364 e1kSetupGsoCtx(&pState->GsoCtx, &pDesc->context);
4365 STAM_COUNTER_INC(&pState->StatTxDescCtxTSE);
4366 }
4367 else
4368 {
4369 pState->contextNormal = pDesc->context;
4370 STAM_COUNTER_INC(&pState->StatTxDescCtxNormal);
4371 }
4372 E1kLog2(("%s %s context updated: IP CSS=%02X, IP CSO=%02X, IP CSE=%04X"
4373 ", TU CSS=%02X, TU CSO=%02X, TU CSE=%04X\n", INSTANCE(pState),
4374 pDesc->context.dw2.fTSE ? "TSE" : "Normal",
4375 pDesc->context.ip.u8CSS,
4376 pDesc->context.ip.u8CSO,
4377 pDesc->context.ip.u16CSE,
4378 pDesc->context.tu.u8CSS,
4379 pDesc->context.tu.u8CSO,
4380 pDesc->context.tu.u16CSE));
4381 E1K_INC_ISTAT_CNT(pState->uStatDescCtx);
4382 e1kDescReport(pState, pDesc, addr);
4383 break;
4384
4385 case E1K_DTYP_DATA:
4386 {
4387 if (pDesc->data.cmd.u20DTALEN == 0 || pDesc->data.u64BufAddr == 0)
4388 {
4389 E1kLog2(("% Empty data descriptor, skipped.\n", INSTANCE(pState)));
4390 /** @todo Same as legacy when !TSE. See below. */
4391 break;
4392 }
4393 STAM_COUNTER_INC(pDesc->data.cmd.fTSE?
4394 &pState->StatTxDescTSEData:
4395 &pState->StatTxDescData);
4396 STAM_PROFILE_ADV_START(&pState->CTX_SUFF_Z(StatTransmit), a);
4397 E1K_INC_ISTAT_CNT(pState->uStatDescDat);
4398
4399 /*
4400 * The last descriptor of non-TSE packet must contain VLE flag.
4401 * TSE packets have VLE flag in the first descriptor. The later
4402 * case is taken care of a bit later when cbVTag gets assigned.
4403 *
4404 * 1) pDesc->data.cmd.fEOP && !pDesc->data.cmd.fTSE
4405 */
4406 if (pDesc->data.cmd.fEOP && !pDesc->data.cmd.fTSE)
4407 {
4408 pState->fVTag = pDesc->data.cmd.fVLE;
4409 pState->u16VTagTCI = pDesc->data.dw3.u16Special;
4410 }
4411 /*
4412 * First fragment: Allocate new buffer and save the IXSM and TXSM
4413 * packet options as these are only valid in the first fragment.
4414 */
4415 if (pState->u16TxPktLen == 0)
4416 {
4417 pState->fIPcsum = pDesc->data.dw3.fIXSM;
4418 pState->fTCPcsum = pDesc->data.dw3.fTXSM;
4419 E1kLog2(("%s Saving checksum flags:%s%s; \n", INSTANCE(pState),
4420 pState->fIPcsum ? " IP" : "",
4421 pState->fTCPcsum ? " TCP/UDP" : ""));
4422 if (pDesc->data.cmd.fTSE)
4423 {
4424 /* 2) pDesc->data.cmd.fTSE && pState->u16TxPktLen == 0 */
4425 pState->fVTag = pDesc->data.cmd.fVLE;
4426 pState->u16VTagTCI = pDesc->data.dw3.u16Special;
4427 cbVTag = pState->fVTag ? 4 : 0;
4428 }
4429 else if (pDesc->data.cmd.fEOP)
4430 cbVTag = pDesc->data.cmd.fVLE ? 4 : 0;
4431 else
4432 cbVTag = 4;
4433 E1kLog3(("%s About to allocate TX buffer: cbVTag=%u\n", INSTANCE(pState), cbVTag));
4434 if (e1kCanDoGso(&pState->GsoCtx, &pDesc->data, &pState->contextTSE))
4435 rc = e1kXmitAllocBuf(pState, pState->contextTSE.dw2.u20PAYLEN + pState->contextTSE.dw3.u8HDRLEN + cbVTag,
4436 true /*fExactSize*/, true /*fGso*/);
4437 else if (pDesc->data.cmd.fTSE)
4438 rc = e1kXmitAllocBuf(pState, pState->contextTSE.dw3.u16MSS + pState->contextTSE.dw3.u8HDRLEN + cbVTag,
4439 pDesc->data.cmd.fTSE /*fExactSize*/, false /*fGso*/);
4440 else
4441 rc = e1kXmitAllocBuf(pState, pDesc->data.cmd.u20DTALEN + cbVTag,
4442 pDesc->data.cmd.fEOP /*fExactSize*/, false /*fGso*/);
4443
4444 /**
4445 * @todo: Perhaps it is not that simple for GSO packets! We may
4446 * need to unwind some changes.
4447 */
4448 if (RT_FAILURE(rc))
4449 {
4450 STAM_PROFILE_ADV_STOP(&pState->CTX_SUFF_Z(StatTransmit), a);
4451 break;
4452 }
4453 /** @todo Is there any way to indicating errors other than collisions? Like
4454 * VERR_NET_DOWN. */
4455 }
4456
4457 /*
4458 * Add the descriptor data to the frame. If the frame is complete,
4459 * transmit it and reset the u16TxPktLen field.
4460 */
4461 if (e1kXmitIsGsoBuf(pState->CTX_SUFF(pTxSg)))
4462 {
4463 STAM_COUNTER_INC(&pState->StatTxPathGSO);
4464 bool fRc = e1kAddToFrame(pState, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4465 if (pDesc->data.cmd.fEOP)
4466 {
4467 if ( fRc
4468 && pState->CTX_SUFF(pTxSg)
4469 && pState->CTX_SUFF(pTxSg)->cbUsed == (size_t)pState->contextTSE.dw3.u8HDRLEN + pState->contextTSE.dw2.u20PAYLEN)
4470 {
4471 e1kTransmitFrame(pState, fOnWorkerThread);
4472 E1K_INC_CNT32(TSCTC);
4473 }
4474 else
4475 {
4476 if (fRc)
4477 E1kLog(("%s bad GSO/TSE %p or %u < %u\n" , INSTANCE(pState),
4478 pState->CTX_SUFF(pTxSg), pState->CTX_SUFF(pTxSg) ? pState->CTX_SUFF(pTxSg)->cbUsed : 0,
4479 pState->contextTSE.dw3.u8HDRLEN + pState->contextTSE.dw2.u20PAYLEN));
4480 e1kXmitFreeBuf(pState);
4481 E1K_INC_CNT32(TSCTFC);
4482 }
4483 pState->u16TxPktLen = 0;
4484 }
4485 }
4486 else if (!pDesc->data.cmd.fTSE)
4487 {
4488 STAM_COUNTER_INC(&pState->StatTxPathRegular);
4489 bool fRc = e1kAddToFrame(pState, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4490 if (pDesc->data.cmd.fEOP)
4491 {
4492 if (fRc && pState->CTX_SUFF(pTxSg))
4493 {
4494 Assert(pState->CTX_SUFF(pTxSg)->cSegs == 1);
4495 if (pState->fIPcsum)
4496 e1kInsertChecksum(pState, (uint8_t *)pState->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pState->u16TxPktLen,
4497 pState->contextNormal.ip.u8CSO,
4498 pState->contextNormal.ip.u8CSS,
4499 pState->contextNormal.ip.u16CSE);
4500 if (pState->fTCPcsum)
4501 e1kInsertChecksum(pState, (uint8_t *)pState->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pState->u16TxPktLen,
4502 pState->contextNormal.tu.u8CSO,
4503 pState->contextNormal.tu.u8CSS,
4504 pState->contextNormal.tu.u16CSE);
4505 e1kTransmitFrame(pState, fOnWorkerThread);
4506 }
4507 else
4508 e1kXmitFreeBuf(pState);
4509 pState->u16TxPktLen = 0;
4510 }
4511 }
4512 else
4513 {
4514 STAM_COUNTER_INC(&pState->StatTxPathFallback);
4515 e1kFallbackAddToFrame(pState, pDesc, pDesc->data.cmd.u20DTALEN, fOnWorkerThread);
4516 }
4517
4518 e1kDescReport(pState, pDesc, addr);
4519 STAM_PROFILE_ADV_STOP(&pState->CTX_SUFF_Z(StatTransmit), a);
4520 break;
4521 }
4522
4523 case E1K_DTYP_LEGACY:
4524 if (pDesc->legacy.cmd.u16Length == 0 || pDesc->legacy.u64BufAddr == 0)
4525 {
4526 E1kLog(("%s Empty legacy descriptor, skipped.\n", INSTANCE(pState)));
4527 /** @todo 3.3.3, Length/Buffer Address: RS set -> write DD when processing. */
4528 break;
4529 }
4530 STAM_COUNTER_INC(&pState->StatTxDescLegacy);
4531 STAM_PROFILE_ADV_START(&pState->CTX_SUFF_Z(StatTransmit), a);
4532
4533 /* First fragment: allocate new buffer. */
4534 if (pState->u16TxPktLen == 0)
4535 {
4536 if (pDesc->legacy.cmd.fEOP)
4537 cbVTag = pDesc->legacy.cmd.fVLE ? 4 : 0;
4538 else
4539 cbVTag = 4;
4540 E1kLog3(("%s About to allocate TX buffer: cbVTag=%u\n", INSTANCE(pState), cbVTag));
4541 /** @todo reset status bits? */
4542 rc = e1kXmitAllocBuf(pState, pDesc->legacy.cmd.u16Length + cbVTag, pDesc->legacy.cmd.fEOP, false /*fGso*/);
4543 if (RT_FAILURE(rc))
4544 {
4545 STAM_PROFILE_ADV_STOP(&pState->CTX_SUFF_Z(StatTransmit), a);
4546 break;
4547 }
4548
4549 /** @todo Is there any way to indicating errors other than collisions? Like
4550 * VERR_NET_DOWN. */
4551 }
4552
4553 /* Add fragment to frame. */
4554 if (e1kAddToFrame(pState, pDesc->data.u64BufAddr, pDesc->legacy.cmd.u16Length))
4555 {
4556 E1K_INC_ISTAT_CNT(pState->uStatDescLeg);
4557
4558 /* Last fragment: Transmit and reset the packet storage counter. */
4559 if (pDesc->legacy.cmd.fEOP)
4560 {
4561 pState->fVTag = pDesc->legacy.cmd.fVLE;
4562 pState->u16VTagTCI = pDesc->legacy.dw3.u16Special;
4563 /** @todo Offload processing goes here. */
4564 e1kTransmitFrame(pState, fOnWorkerThread);
4565 pState->u16TxPktLen = 0;
4566 }
4567 }
4568 /* Last fragment + failure: free the buffer and reset the storage counter. */
4569 else if (pDesc->legacy.cmd.fEOP)
4570 {
4571 e1kXmitFreeBuf(pState);
4572 pState->u16TxPktLen = 0;
4573 }
4574
4575 e1kDescReport(pState, pDesc, addr);
4576 STAM_PROFILE_ADV_STOP(&pState->CTX_SUFF_Z(StatTransmit), a);
4577 break;
4578
4579 default:
4580 E1kLog(("%s ERROR Unsupported transmit descriptor type: 0x%04x\n",
4581 INSTANCE(pState), e1kGetDescType(pDesc)));
4582 break;
4583 }
4584
4585 return rc;
4586}
4587#else /* E1K_WITH_TXD_CACHE */
4588/**
4589 * Process Transmit Descriptor.
4590 *
4591 * E1000 supports three types of transmit descriptors:
4592 * - legacy data descriptors of older format (context-less).
4593 * - data the same as legacy but providing new offloading capabilities.
4594 * - context sets up the context for following data descriptors.
4595 *
4596 * @param pState The device state structure.
4597 * @param pDesc Pointer to descriptor union.
4598 * @param addr Physical address of descriptor in guest memory.
4599 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4600 * @param cbPacketSize Size of the packet as previously computed.
4601 * @thread E1000_TX
4602 */
4603static int e1kXmitDesc(E1KSTATE* pState, E1KTXDESC* pDesc, RTGCPHYS addr,
4604 bool fOnWorkerThread)
4605{
4606 int rc = VINF_SUCCESS;
4607 uint32_t cbVTag = 0;
4608
4609 e1kPrintTDesc(pState, pDesc, "vvv");
4610
4611#ifdef E1K_USE_TX_TIMERS
4612 e1kCancelTimer(pState, pState->CTX_SUFF(pTIDTimer));
4613#endif /* E1K_USE_TX_TIMERS */
4614
4615 switch (e1kGetDescType(pDesc))
4616 {
4617 case E1K_DTYP_CONTEXT:
4618 /* The caller have already updated the context */
4619 E1K_INC_ISTAT_CNT(pState->uStatDescCtx);
4620 e1kDescReport(pState, pDesc, addr);
4621 break;
4622
4623 case E1K_DTYP_DATA:
4624 {
4625 STAM_COUNTER_INC(pDesc->data.cmd.fTSE?
4626 &pState->StatTxDescTSEData:
4627 &pState->StatTxDescData);
4628 E1K_INC_ISTAT_CNT(pState->uStatDescDat);
4629 STAM_PROFILE_ADV_START(&pState->CTX_SUFF_Z(StatTransmit), a);
4630 if (pDesc->data.cmd.u20DTALEN == 0 || pDesc->data.u64BufAddr == 0)
4631 {
4632 E1kLog2(("% Empty data descriptor, skipped.\n", INSTANCE(pState)));
4633 }
4634 else
4635 {
4636 /*
4637 * Add the descriptor data to the frame. If the frame is complete,
4638 * transmit it and reset the u16TxPktLen field.
4639 */
4640 if (e1kXmitIsGsoBuf(pState->CTX_SUFF(pTxSg)))
4641 {
4642 STAM_COUNTER_INC(&pState->StatTxPathGSO);
4643 bool fRc = e1kAddToFrame(pState, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4644 if (pDesc->data.cmd.fEOP)
4645 {
4646 if ( fRc
4647 && pState->CTX_SUFF(pTxSg)
4648 && pState->CTX_SUFF(pTxSg)->cbUsed == (size_t)pState->contextTSE.dw3.u8HDRLEN + pState->contextTSE.dw2.u20PAYLEN)
4649 {
4650 e1kTransmitFrame(pState, fOnWorkerThread);
4651 E1K_INC_CNT32(TSCTC);
4652 }
4653 else
4654 {
4655 if (fRc)
4656 E1kLog(("%s bad GSO/TSE %p or %u < %u\n" , INSTANCE(pState),
4657 pState->CTX_SUFF(pTxSg), pState->CTX_SUFF(pTxSg) ? pState->CTX_SUFF(pTxSg)->cbUsed : 0,
4658 pState->contextTSE.dw3.u8HDRLEN + pState->contextTSE.dw2.u20PAYLEN));
4659 e1kXmitFreeBuf(pState);
4660 E1K_INC_CNT32(TSCTFC);
4661 }
4662 pState->u16TxPktLen = 0;
4663 }
4664 }
4665 else if (!pDesc->data.cmd.fTSE)
4666 {
4667 STAM_COUNTER_INC(&pState->StatTxPathRegular);
4668 bool fRc = e1kAddToFrame(pState, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4669 if (pDesc->data.cmd.fEOP)
4670 {
4671 if (fRc && pState->CTX_SUFF(pTxSg))
4672 {
4673 Assert(pState->CTX_SUFF(pTxSg)->cSegs == 1);
4674 if (pState->fIPcsum)
4675 e1kInsertChecksum(pState, (uint8_t *)pState->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pState->u16TxPktLen,
4676 pState->contextNormal.ip.u8CSO,
4677 pState->contextNormal.ip.u8CSS,
4678 pState->contextNormal.ip.u16CSE);
4679 if (pState->fTCPcsum)
4680 e1kInsertChecksum(pState, (uint8_t *)pState->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pState->u16TxPktLen,
4681 pState->contextNormal.tu.u8CSO,
4682 pState->contextNormal.tu.u8CSS,
4683 pState->contextNormal.tu.u16CSE);
4684 e1kTransmitFrame(pState, fOnWorkerThread);
4685 }
4686 else
4687 e1kXmitFreeBuf(pState);
4688 pState->u16TxPktLen = 0;
4689 }
4690 }
4691 else
4692 {
4693 STAM_COUNTER_INC(&pState->StatTxPathFallback);
4694 rc = e1kFallbackAddToFrame(pState, pDesc, fOnWorkerThread);
4695 }
4696 }
4697 e1kDescReport(pState, pDesc, addr);
4698 STAM_PROFILE_ADV_STOP(&pState->CTX_SUFF_Z(StatTransmit), a);
4699 break;
4700 }
4701
4702 case E1K_DTYP_LEGACY:
4703 STAM_COUNTER_INC(&pState->StatTxDescLegacy);
4704 STAM_PROFILE_ADV_START(&pState->CTX_SUFF_Z(StatTransmit), a);
4705 if (pDesc->legacy.cmd.u16Length == 0 || pDesc->legacy.u64BufAddr == 0)
4706 {
4707 E1kLog(("%s Empty legacy descriptor, skipped.\n", INSTANCE(pState)));
4708 }
4709 else
4710 {
4711 /* Add fragment to frame. */
4712 if (e1kAddToFrame(pState, pDesc->data.u64BufAddr, pDesc->legacy.cmd.u16Length))
4713 {
4714 E1K_INC_ISTAT_CNT(pState->uStatDescLeg);
4715
4716 /* Last fragment: Transmit and reset the packet storage counter. */
4717 if (pDesc->legacy.cmd.fEOP)
4718 {
4719 if (pDesc->legacy.cmd.fIC)
4720 {
4721 e1kInsertChecksum(pState,
4722 (uint8_t *)pState->CTX_SUFF(pTxSg)->aSegs[0].pvSeg,
4723 pState->u16TxPktLen,
4724 pDesc->legacy.cmd.u8CSO,
4725 pDesc->legacy.dw3.u8CSS,
4726 0);
4727 }
4728 e1kTransmitFrame(pState, fOnWorkerThread);
4729 pState->u16TxPktLen = 0;
4730 }
4731 }
4732 /* Last fragment + failure: free the buffer and reset the storage counter. */
4733 else if (pDesc->legacy.cmd.fEOP)
4734 {
4735 e1kXmitFreeBuf(pState);
4736 pState->u16TxPktLen = 0;
4737 }
4738 }
4739 e1kDescReport(pState, pDesc, addr);
4740 STAM_PROFILE_ADV_STOP(&pState->CTX_SUFF_Z(StatTransmit), a);
4741 break;
4742
4743 default:
4744 E1kLog(("%s ERROR Unsupported transmit descriptor type: 0x%04x\n",
4745 INSTANCE(pState), e1kGetDescType(pDesc)));
4746 break;
4747 }
4748
4749 return rc;
4750}
4751
4752
4753DECLINLINE(void) e1kUpdateTxContext(E1KSTATE* pState, E1KTXDESC* pDesc)
4754{
4755 if (pDesc->context.dw2.fTSE)
4756 {
4757 pState->contextTSE = pDesc->context;
4758 pState->u32PayRemain = pDesc->context.dw2.u20PAYLEN;
4759 pState->u16HdrRemain = pDesc->context.dw3.u8HDRLEN;
4760 e1kSetupGsoCtx(&pState->GsoCtx, &pDesc->context);
4761 STAM_COUNTER_INC(&pState->StatTxDescCtxTSE);
4762 }
4763 else
4764 {
4765 pState->contextNormal = pDesc->context;
4766 STAM_COUNTER_INC(&pState->StatTxDescCtxNormal);
4767 }
4768 E1kLog2(("%s %s context updated: IP CSS=%02X, IP CSO=%02X, IP CSE=%04X"
4769 ", TU CSS=%02X, TU CSO=%02X, TU CSE=%04X\n", INSTANCE(pState),
4770 pDesc->context.dw2.fTSE ? "TSE" : "Normal",
4771 pDesc->context.ip.u8CSS,
4772 pDesc->context.ip.u8CSO,
4773 pDesc->context.ip.u16CSE,
4774 pDesc->context.tu.u8CSS,
4775 pDesc->context.tu.u8CSO,
4776 pDesc->context.tu.u16CSE));
4777}
4778
4779
4780static bool e1kLocateTxPacket(E1KSTATE *pState)
4781{
4782 LogFlow(("%s e1kLocateTxPacket: ENTER cbTxAlloc=%d\n",
4783 INSTANCE(pState), pState->cbTxAlloc));
4784 /* Check if we have located the packet already. */
4785 if (pState->cbTxAlloc)
4786 {
4787 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d\n",
4788 INSTANCE(pState), pState->cbTxAlloc));
4789 return true;
4790 }
4791
4792 bool fTSE = false;
4793 uint32_t cbPacket = 0;
4794
4795 for (int i = pState->iTxDCurrent; i < pState->nTxDFetched; ++i)
4796 {
4797 E1KTXDESC *pDesc = &pState->aTxDescriptors[i];
4798 switch (e1kGetDescType(pDesc))
4799 {
4800 case E1K_DTYP_CONTEXT:
4801 e1kUpdateTxContext(pState, pDesc);
4802 continue;
4803 case E1K_DTYP_LEGACY:
4804 /* Skip empty descriptors. */
4805 if (!pDesc->legacy.u64BufAddr || !pDesc->legacy.cmd.u16Length)
4806 break;
4807 cbPacket += pDesc->legacy.cmd.u16Length;
4808 pState->fGSO = false;
4809 break;
4810 case E1K_DTYP_DATA:
4811 /* Skip empty descriptors. */
4812 if (!pDesc->data.u64BufAddr || !pDesc->data.cmd.u20DTALEN)
4813 break;
4814 if (cbPacket == 0)
4815 {
4816 /*
4817 * The first fragment: save IXSM and TXSM options
4818 * as these are only valid in the first fragment.
4819 */
4820 pState->fIPcsum = pDesc->data.dw3.fIXSM;
4821 pState->fTCPcsum = pDesc->data.dw3.fTXSM;
4822 fTSE = pDesc->data.cmd.fTSE;
4823 /*
4824 * TSE descriptors have VLE bit properly set in
4825 * the first fragment.
4826 */
4827 if (fTSE)
4828 {
4829 pState->fVTag = pDesc->data.cmd.fVLE;
4830 pState->u16VTagTCI = pDesc->data.dw3.u16Special;
4831 }
4832 pState->fGSO = e1kCanDoGso(&pState->GsoCtx, &pDesc->data, &pState->contextTSE);
4833 }
4834 cbPacket += pDesc->data.cmd.u20DTALEN;
4835 break;
4836 default:
4837 AssertMsgFailed(("Impossible descriptor type!"));
4838 }
4839 if (pDesc->legacy.cmd.fEOP)
4840 {
4841 /*
4842 * Non-TSE descriptors have VLE bit properly set in
4843 * the last fragment.
4844 */
4845 if (!fTSE)
4846 {
4847 pState->fVTag = pDesc->data.cmd.fVLE;
4848 pState->u16VTagTCI = pDesc->data.dw3.u16Special;
4849 }
4850 /*
4851 * Compute the required buffer size. If we cannot do GSO but still
4852 * have to do segmentation we allocate the first segment only.
4853 */
4854 pState->cbTxAlloc = (!fTSE || pState->fGSO) ?
4855 cbPacket :
4856 RT_MIN(cbPacket, pState->contextTSE.dw3.u16MSS + pState->contextTSE.dw3.u8HDRLEN);
4857 if (pState->fVTag)
4858 pState->cbTxAlloc += 4;
4859 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d\n",
4860 INSTANCE(pState), pState->cbTxAlloc));
4861 return true;
4862 }
4863 }
4864
4865 if (cbPacket == 0 && pState->nTxDFetched - pState->iTxDCurrent > 0)
4866 {
4867 /* All descriptors were empty, we need to process them as a dummy packet */
4868 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d, zero packet!\n",
4869 INSTANCE(pState), pState->cbTxAlloc));
4870 return true;
4871 }
4872 LogFlow(("%s e1kLocateTxPacket: RET false cbTxAlloc=%d\n",
4873 INSTANCE(pState), pState->cbTxAlloc));
4874 return false;
4875}
4876
4877
4878static int e1kXmitPacket(E1KSTATE *pState, bool fOnWorkerThread)
4879{
4880 int rc = VINF_SUCCESS;
4881
4882 LogFlow(("%s e1kXmitPacket: ENTER current=%d fetched=%d\n",
4883 INSTANCE(pState), pState->iTxDCurrent, pState->nTxDFetched));
4884
4885 while (pState->iTxDCurrent < pState->nTxDFetched)
4886 {
4887 E1KTXDESC *pDesc = &pState->aTxDescriptors[pState->iTxDCurrent];
4888 E1kLog3(("%s About to process new TX descriptor at %08x%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
4889 INSTANCE(pState), TDBAH, TDBAL + TDH * sizeof(E1KTXDESC), TDLEN, TDH, TDT));
4890 rc = e1kXmitDesc(pState, pDesc,
4891 ((uint64_t)TDBAH << 32) + TDBAL + TDH * sizeof(E1KTXDESC),
4892 fOnWorkerThread);
4893 if (RT_FAILURE(rc))
4894 break;
4895 if (++TDH * sizeof(E1KTXDESC) >= TDLEN)
4896 TDH = 0;
4897 uint32_t uLowThreshold = GET_BITS(TXDCTL, LWTHRESH)*8;
4898 if (uLowThreshold != 0 && e1kGetTxLen(pState) <= uLowThreshold)
4899 {
4900 E1kLog2(("%s Low on transmit descriptors, raise ICR.TXD_LOW, len=%x thresh=%x\n",
4901 INSTANCE(pState), e1kGetTxLen(pState), GET_BITS(TXDCTL, LWTHRESH)*8));
4902 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, ICR_TXD_LOW);
4903 }
4904 ++pState->iTxDCurrent;
4905 if (e1kGetDescType(pDesc) != E1K_DTYP_CONTEXT && pDesc->legacy.cmd.fEOP)
4906 break;
4907 }
4908
4909 LogFlow(("%s e1kXmitPacket: RET %Rrc current=%d fetched=%d\n",
4910 INSTANCE(pState), rc, pState->iTxDCurrent, pState->nTxDFetched));
4911 return rc;
4912}
4913#endif /* E1K_WITH_TXD_CACHE */
4914
4915#ifndef E1K_WITH_TXD_CACHE
4916/**
4917 * Transmit pending descriptors.
4918 *
4919 * @returns VBox status code. VERR_TRY_AGAIN is returned if we're busy.
4920 *
4921 * @param pState The E1000 state.
4922 * @param fOnWorkerThread Whether we're on a worker thread or on an EMT.
4923 */
4924static int e1kXmitPending(E1KSTATE *pState, bool fOnWorkerThread)
4925{
4926 int rc = VINF_SUCCESS;
4927
4928 /* Check if transmitter is enabled. */
4929 if (!(TCTL & TCTL_EN))
4930 return VINF_SUCCESS;
4931 /*
4932 * Grab the xmit lock of the driver as well as the E1K device state.
4933 */
4934 rc = e1kCsTxEnter(pState, VERR_SEM_BUSY);
4935 if (RT_LIKELY(rc == VINF_SUCCESS))
4936 {
4937 PPDMINETWORKUP pDrv = pState->CTX_SUFF(pDrv);
4938 if (pDrv)
4939 {
4940 rc = pDrv->pfnBeginXmit(pDrv, fOnWorkerThread);
4941 if (RT_FAILURE(rc))
4942 {
4943 e1kCsTxLeave(pState);
4944 return rc;
4945 }
4946 }
4947 /*
4948 * Process all pending descriptors.
4949 * Note! Do not process descriptors in locked state
4950 */
4951 while (TDH != TDT && !pState->fLocked)
4952 {
4953 E1KTXDESC desc;
4954 E1kLog3(("%s About to process new TX descriptor at %08x%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
4955 INSTANCE(pState), TDBAH, TDBAL + TDH * sizeof(desc), TDLEN, TDH, TDT));
4956
4957 e1kLoadDesc(pState, &desc, ((uint64_t)TDBAH << 32) + TDBAL + TDH * sizeof(desc));
4958 rc = e1kXmitDesc(pState, &desc, ((uint64_t)TDBAH << 32) + TDBAL + TDH * sizeof(desc), fOnWorkerThread);
4959 /* If we failed to transmit descriptor we will try it again later */
4960 if (RT_FAILURE(rc))
4961 break;
4962 if (++TDH * sizeof(desc) >= TDLEN)
4963 TDH = 0;
4964
4965 if (e1kGetTxLen(pState) <= GET_BITS(TXDCTL, LWTHRESH)*8)
4966 {
4967 E1kLog2(("%s Low on transmit descriptors, raise ICR.TXD_LOW, len=%x thresh=%x\n",
4968 INSTANCE(pState), e1kGetTxLen(pState), GET_BITS(TXDCTL, LWTHRESH)*8));
4969 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, ICR_TXD_LOW);
4970 }
4971
4972 STAM_PROFILE_ADV_STOP(&pState->CTX_SUFF_Z(StatTransmit), a);
4973 }
4974
4975 /// @todo: uncomment: pState->uStatIntTXQE++;
4976 /// @todo: uncomment: e1kRaiseInterrupt(pState, ICR_TXQE);
4977 /*
4978 * Release the lock.
4979 */
4980 if (pDrv)
4981 pDrv->pfnEndXmit(pDrv);
4982 e1kCsTxLeave(pState);
4983 }
4984
4985 return rc;
4986}
4987#else /* E1K_WITH_TXD_CACHE */
4988static void e1kDumpTxDCache(E1KSTATE *pState)
4989{
4990 for (int i = 0; i < pState->nTxDFetched; ++i)
4991 e1kPrintTDesc(pState, &pState->aTxDescriptors[i], "***", RTLOGGRPFLAGS_LEVEL_4);
4992}
4993
4994/**
4995 * Transmit pending descriptors.
4996 *
4997 * @returns VBox status code. VERR_TRY_AGAIN is returned if we're busy.
4998 *
4999 * @param pState The E1000 state.
5000 * @param fOnWorkerThread Whether we're on a worker thread or on an EMT.
5001 */
5002static int e1kXmitPending(E1KSTATE *pState, bool fOnWorkerThread)
5003{
5004 int rc = VINF_SUCCESS;
5005
5006 /* Check if transmitter is enabled. */
5007 if (!(TCTL & TCTL_EN))
5008 return VINF_SUCCESS;
5009 /*
5010 * Grab the xmit lock of the driver as well as the E1K device state.
5011 */
5012 PPDMINETWORKUP pDrv = pState->CTX_SUFF(pDrv);
5013 if (pDrv)
5014 {
5015 rc = pDrv->pfnBeginXmit(pDrv, fOnWorkerThread);
5016 if (RT_FAILURE(rc))
5017 return rc;
5018 }
5019
5020 /*
5021 * Process all pending descriptors.
5022 * Note! Do not process descriptors in locked state
5023 */
5024 rc = e1kCsTxEnter(pState, VERR_SEM_BUSY);
5025 if (RT_LIKELY(rc == VINF_SUCCESS))
5026 {
5027 STAM_PROFILE_ADV_START(&pState->CTX_SUFF_Z(StatTransmit), a);
5028 /*
5029 * fIncomplete is set whenever we try to fetch additional descriptors
5030 * for an incomplete packet. If fail to locate a complete packet on
5031 * the next iteration we need to reset the cache or we risk to get
5032 * stuck in this loop forever.
5033 */
5034 bool fIncomplete = false;
5035 while (!pState->fLocked && e1kTxDLazyLoad(pState))
5036 {
5037 while (e1kLocateTxPacket(pState))
5038 {
5039 fIncomplete = false;
5040 /* Found a complete packet, allocate it. */
5041 rc = e1kXmitAllocBuf(pState, pState->fGSO);
5042 /* If we're out of bandwidth we'll come back later. */
5043 if (RT_FAILURE(rc))
5044 goto out;
5045 /* Copy the packet to allocated buffer and send it. */
5046 rc = e1kXmitPacket(pState, fOnWorkerThread);
5047 /* If we're out of bandwidth we'll come back later. */
5048 if (RT_FAILURE(rc))
5049 goto out;
5050 }
5051 uint8_t u8Remain = pState->nTxDFetched - pState->iTxDCurrent;
5052 if (RT_UNLIKELY(fIncomplete))
5053 {
5054 /*
5055 * The descriptor cache is full, but we were unable to find
5056 * a complete packet in it. Drop the cache and hope that
5057 * the guest driver can recover from network card error.
5058 */
5059 LogRel(("%s No complete packets in%s TxD cache! "
5060 "Fetched=%d, current=%d, TX len=%d.\n",
5061 INSTANCE(pState),
5062 u8Remain == E1K_TXD_CACHE_SIZE ? " full" : "",
5063 pState->nTxDFetched, pState->iTxDCurrent,
5064 e1kGetTxLen(pState)));
5065 Log4(("%s No complete packets in%s TxD cache! "
5066 "Fetched=%d, current=%d, TX len=%d. Dump follows:\n",
5067 INSTANCE(pState),
5068 u8Remain == E1K_TXD_CACHE_SIZE ? " full" : "",
5069 pState->nTxDFetched, pState->iTxDCurrent,
5070 e1kGetTxLen(pState)));
5071 e1kDumpTxDCache(pState);
5072 pState->iTxDCurrent = pState->nTxDFetched = 0;
5073 rc = VERR_NET_IO_ERROR;
5074 goto out;
5075 }
5076 if (u8Remain > 0)
5077 {
5078 Log4(("%s Incomplete packet at %d. Already fetched %d, "
5079 "%d more are available\n",
5080 INSTANCE(pState), pState->iTxDCurrent, u8Remain,
5081 e1kGetTxLen(pState) - u8Remain));
5082
5083 /*
5084 * A packet was partially fetched. Move incomplete packet to
5085 * the beginning of cache buffer, then load more descriptors.
5086 */
5087 memmove(pState->aTxDescriptors,
5088 &pState->aTxDescriptors[pState->iTxDCurrent],
5089 u8Remain * sizeof(E1KTXDESC));
5090 pState->iTxDCurrent = 0;
5091 pState->nTxDFetched = u8Remain;
5092 e1kTxDLoadMore(pState);
5093 fIncomplete = true;
5094 }
5095 else
5096 pState->nTxDFetched = 0;
5097 pState->iTxDCurrent = 0;
5098 }
5099 if (!pState->fLocked && GET_BITS(TXDCTL, LWTHRESH) == 0)
5100 {
5101 E1kLog2(("%s Out of transmit descriptors, raise ICR.TXD_LOW\n",
5102 INSTANCE(pState)));
5103 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, ICR_TXD_LOW);
5104 }
5105out:
5106 STAM_PROFILE_ADV_STOP(&pState->CTX_SUFF_Z(StatTransmit), a);
5107
5108 /// @todo: uncomment: pState->uStatIntTXQE++;
5109 /// @todo: uncomment: e1kRaiseInterrupt(pState, ICR_TXQE);
5110
5111 e1kCsTxLeave(pState);
5112 }
5113
5114
5115 /*
5116 * Release the lock.
5117 */
5118 if (pDrv)
5119 pDrv->pfnEndXmit(pDrv);
5120 return rc;
5121}
5122#endif /* E1K_WITH_TXD_CACHE */
5123
5124#ifdef IN_RING3
5125
5126/**
5127 * @interface_method_impl{PDMINETWORKDOWN,pfnXmitPending}
5128 */
5129static DECLCALLBACK(void) e1kNetworkDown_XmitPending(PPDMINETWORKDOWN pInterface)
5130{
5131 E1KSTATE *pState = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkDown);
5132 /* Resume suspended transmission */
5133 STATUS &= ~STATUS_TXOFF;
5134 e1kXmitPending(pState, true /*fOnWorkerThread*/);
5135}
5136
5137/**
5138 * Callback for consuming from transmit queue. It gets called in R3 whenever
5139 * we enqueue something in R0/GC.
5140 *
5141 * @returns true
5142 * @param pDevIns Pointer to device instance structure.
5143 * @param pItem Pointer to the element being dequeued (not used).
5144 * @thread ???
5145 */
5146static DECLCALLBACK(bool) e1kTxQueueConsumer(PPDMDEVINS pDevIns, PPDMQUEUEITEMCORE pItem)
5147{
5148 NOREF(pItem);
5149 E1KSTATE *pState = PDMINS_2_DATA(pDevIns, E1KSTATE *);
5150 E1kLog2(("%s e1kTxQueueConsumer:\n", INSTANCE(pState)));
5151
5152 int rc = e1kXmitPending(pState, false /*fOnWorkerThread*/);
5153 AssertMsg(RT_SUCCESS(rc) || rc == VERR_TRY_AGAIN, ("%Rrc\n", rc));
5154
5155 return true;
5156}
5157
5158/**
5159 * Handler for the wakeup signaller queue.
5160 */
5161static DECLCALLBACK(bool) e1kCanRxQueueConsumer(PPDMDEVINS pDevIns, PPDMQUEUEITEMCORE pItem)
5162{
5163 e1kWakeupReceive(pDevIns);
5164 return true;
5165}
5166
5167#endif /* IN_RING3 */
5168
5169/**
5170 * Write handler for Transmit Descriptor Tail register.
5171 *
5172 * @param pState The device state structure.
5173 * @param offset Register offset in memory-mapped frame.
5174 * @param index Register index in register array.
5175 * @param value The value to store.
5176 * @param mask Used to implement partial writes (8 and 16-bit).
5177 * @thread EMT
5178 */
5179static int e1kRegWriteTDT(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
5180{
5181 int rc = e1kRegWriteDefault(pState, offset, index, value);
5182
5183 /* All descriptors starting with head and not including tail belong to us. */
5184 /* Process them. */
5185 E1kLog2(("%s e1kRegWriteTDT: TDBAL=%08x, TDBAH=%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
5186 INSTANCE(pState), TDBAL, TDBAH, TDLEN, TDH, TDT));
5187
5188 /* Ignore TDT writes when the link is down. */
5189 if (TDH != TDT && (STATUS & STATUS_LU))
5190 {
5191 E1kLogRel(("E1000: TDT write: %d descriptors to process\n", e1kGetTxLen(pState)));
5192 E1kLog(("%s e1kRegWriteTDT: %d descriptors to process\n",
5193 INSTANCE(pState), e1kGetTxLen(pState)));
5194
5195 /* Transmit pending packets if possible, defer it if we cannot do it
5196 in the current context. */
5197#ifdef E1K_TX_DELAY
5198 rc = e1kCsTxEnter(pState, VERR_SEM_BUSY);
5199 if (RT_LIKELY(rc == VINF_SUCCESS))
5200 {
5201 if (!TMTimerIsActive(pState->CTX_SUFF(pTXDTimer)))
5202 {
5203#ifdef E1K_INT_STATS
5204 pState->u64ArmedAt = RTTimeNanoTS();
5205#endif /* E1K_INT_STATS */
5206 e1kArmTimer(pState, pState->CTX_SUFF(pTXDTimer), E1K_TX_DELAY);
5207 }
5208 E1K_INC_ISTAT_CNT(pState->uStatTxDelayed);
5209 e1kCsTxLeave(pState);
5210 return rc;
5211 }
5212 /* We failed to enter the TX critical section -- transmit as usual. */
5213#endif /* E1K_TX_DELAY */
5214# ifndef IN_RING3
5215 if (!pState->CTX_SUFF(pDrv))
5216 {
5217 PPDMQUEUEITEMCORE pItem = PDMQueueAlloc(pState->CTX_SUFF(pTxQueue));
5218 if (RT_UNLIKELY(pItem))
5219 PDMQueueInsert(pState->CTX_SUFF(pTxQueue), pItem);
5220 }
5221 else
5222# endif
5223 {
5224 rc = e1kXmitPending(pState, false /*fOnWorkerThread*/);
5225 if (rc == VERR_TRY_AGAIN)
5226 rc = VINF_SUCCESS;
5227 else if (rc == VERR_SEM_BUSY)
5228 rc = VINF_IOM_R3_IOPORT_WRITE;
5229 AssertRC(rc);
5230 }
5231 }
5232
5233 return rc;
5234}
5235
5236/**
5237 * Write handler for Multicast Table Array registers.
5238 *
5239 * @param pState The device state structure.
5240 * @param offset Register offset in memory-mapped frame.
5241 * @param index Register index in register array.
5242 * @param value The value to store.
5243 * @thread EMT
5244 */
5245static int e1kRegWriteMTA(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
5246{
5247 AssertReturn(offset - s_e1kRegMap[index].offset < sizeof(pState->auMTA), VERR_DEV_IO_ERROR);
5248 pState->auMTA[(offset - s_e1kRegMap[index].offset)/sizeof(pState->auMTA[0])] = value;
5249
5250 return VINF_SUCCESS;
5251}
5252
5253/**
5254 * Read handler for Multicast Table Array registers.
5255 *
5256 * @returns VBox status code.
5257 *
5258 * @param pState The device state structure.
5259 * @param offset Register offset in memory-mapped frame.
5260 * @param index Register index in register array.
5261 * @thread EMT
5262 */
5263static int e1kRegReadMTA(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5264{
5265 AssertReturn(offset - s_e1kRegMap[index].offset< sizeof(pState->auMTA), VERR_DEV_IO_ERROR);
5266 *pu32Value = pState->auMTA[(offset - s_e1kRegMap[index].offset)/sizeof(pState->auMTA[0])];
5267
5268 return VINF_SUCCESS;
5269}
5270
5271/**
5272 * Write handler for Receive Address registers.
5273 *
5274 * @param pState The device state structure.
5275 * @param offset Register offset in memory-mapped frame.
5276 * @param index Register index in register array.
5277 * @param value The value to store.
5278 * @thread EMT
5279 */
5280static int e1kRegWriteRA(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
5281{
5282 AssertReturn(offset - s_e1kRegMap[index].offset < sizeof(pState->aRecAddr.au32), VERR_DEV_IO_ERROR);
5283 pState->aRecAddr.au32[(offset - s_e1kRegMap[index].offset)/sizeof(pState->aRecAddr.au32[0])] = value;
5284
5285 return VINF_SUCCESS;
5286}
5287
5288/**
5289 * Read handler for Receive Address registers.
5290 *
5291 * @returns VBox status code.
5292 *
5293 * @param pState The device state structure.
5294 * @param offset Register offset in memory-mapped frame.
5295 * @param index Register index in register array.
5296 * @thread EMT
5297 */
5298static int e1kRegReadRA(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5299{
5300 AssertReturn(offset - s_e1kRegMap[index].offset< sizeof(pState->aRecAddr.au32), VERR_DEV_IO_ERROR);
5301 *pu32Value = pState->aRecAddr.au32[(offset - s_e1kRegMap[index].offset)/sizeof(pState->aRecAddr.au32[0])];
5302
5303 return VINF_SUCCESS;
5304}
5305
5306/**
5307 * Write handler for VLAN Filter Table Array registers.
5308 *
5309 * @param pState The device state structure.
5310 * @param offset Register offset in memory-mapped frame.
5311 * @param index Register index in register array.
5312 * @param value The value to store.
5313 * @thread EMT
5314 */
5315static int e1kRegWriteVFTA(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
5316{
5317 AssertReturn(offset - s_e1kRegMap[index].offset < sizeof(pState->auVFTA), VINF_SUCCESS);
5318 pState->auVFTA[(offset - s_e1kRegMap[index].offset)/sizeof(pState->auVFTA[0])] = value;
5319
5320 return VINF_SUCCESS;
5321}
5322
5323/**
5324 * Read handler for VLAN Filter Table Array registers.
5325 *
5326 * @returns VBox status code.
5327 *
5328 * @param pState The device state structure.
5329 * @param offset Register offset in memory-mapped frame.
5330 * @param index Register index in register array.
5331 * @thread EMT
5332 */
5333static int e1kRegReadVFTA(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5334{
5335 AssertReturn(offset - s_e1kRegMap[index].offset< sizeof(pState->auVFTA), VERR_DEV_IO_ERROR);
5336 *pu32Value = pState->auVFTA[(offset - s_e1kRegMap[index].offset)/sizeof(pState->auVFTA[0])];
5337
5338 return VINF_SUCCESS;
5339}
5340
5341/**
5342 * Read handler for unimplemented registers.
5343 *
5344 * Merely reports reads from unimplemented registers.
5345 *
5346 * @returns VBox status code.
5347 *
5348 * @param pState The device state structure.
5349 * @param offset Register offset in memory-mapped frame.
5350 * @param index Register index in register array.
5351 * @thread EMT
5352 */
5353
5354static int e1kRegReadUnimplemented(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5355{
5356 E1kLog(("%s At %08X read (00000000) attempt from unimplemented register %s (%s)\n",
5357 INSTANCE(pState), offset, s_e1kRegMap[index].abbrev, s_e1kRegMap[index].name));
5358 *pu32Value = 0;
5359
5360 return VINF_SUCCESS;
5361}
5362
5363/**
5364 * Default register read handler with automatic clear operation.
5365 *
5366 * Retrieves the value of register from register array in device state structure.
5367 * Then resets all bits.
5368 *
5369 * @remarks The 'mask' parameter is simply ignored as masking and shifting is
5370 * done in the caller.
5371 *
5372 * @returns VBox status code.
5373 *
5374 * @param pState The device state structure.
5375 * @param offset Register offset in memory-mapped frame.
5376 * @param index Register index in register array.
5377 * @thread EMT
5378 */
5379
5380static int e1kRegReadAutoClear(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5381{
5382 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
5383 int rc = e1kRegReadDefault(pState, offset, index, pu32Value);
5384 pState->auRegs[index] = 0;
5385
5386 return rc;
5387}
5388
5389/**
5390 * Default register read handler.
5391 *
5392 * Retrieves the value of register from register array in device state structure.
5393 * Bits corresponding to 0s in 'readable' mask will always read as 0s.
5394 *
5395 * @remarks The 'mask' parameter is simply ignored as masking and shifting is
5396 * done in the caller.
5397 *
5398 * @returns VBox status code.
5399 *
5400 * @param pState The device state structure.
5401 * @param offset Register offset in memory-mapped frame.
5402 * @param index Register index in register array.
5403 * @thread EMT
5404 */
5405
5406static int e1kRegReadDefault(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5407{
5408 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
5409 *pu32Value = pState->auRegs[index] & s_e1kRegMap[index].readable;
5410
5411 return VINF_SUCCESS;
5412}
5413
5414/**
5415 * Write handler for unimplemented registers.
5416 *
5417 * Merely reports writes to unimplemented registers.
5418 *
5419 * @param pState The device state structure.
5420 * @param offset Register offset in memory-mapped frame.
5421 * @param index Register index in register array.
5422 * @param value The value to store.
5423 * @thread EMT
5424 */
5425
5426 static int e1kRegWriteUnimplemented(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
5427{
5428 E1kLog(("%s At %08X write attempt (%08X) to unimplemented register %s (%s)\n",
5429 INSTANCE(pState), offset, value, s_e1kRegMap[index].abbrev, s_e1kRegMap[index].name));
5430
5431 return VINF_SUCCESS;
5432}
5433
5434/**
5435 * Default register write handler.
5436 *
5437 * Stores the value to the register array in device state structure. Only bits
5438 * corresponding to 1s both in 'writable' and 'mask' will be stored.
5439 *
5440 * @returns VBox status code.
5441 *
5442 * @param pState The device state structure.
5443 * @param offset Register offset in memory-mapped frame.
5444 * @param index Register index in register array.
5445 * @param value The value to store.
5446 * @param mask Used to implement partial writes (8 and 16-bit).
5447 * @thread EMT
5448 */
5449
5450static int e1kRegWriteDefault(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
5451{
5452 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
5453 pState->auRegs[index] = (value & s_e1kRegMap[index].writable) |
5454 (pState->auRegs[index] & ~s_e1kRegMap[index].writable);
5455
5456 return VINF_SUCCESS;
5457}
5458
5459/**
5460 * Search register table for matching register.
5461 *
5462 * @returns Index in the register table or -1 if not found.
5463 *
5464 * @param pState The device state structure.
5465 * @param uOffset Register offset in memory-mapped region.
5466 * @thread EMT
5467 */
5468static int e1kRegLookup(E1KSTATE *pState, uint32_t uOffset)
5469{
5470 int index;
5471
5472 for (index = 0; index < E1K_NUM_OF_REGS; index++)
5473 {
5474 if (s_e1kRegMap[index].offset <= uOffset && uOffset < s_e1kRegMap[index].offset + s_e1kRegMap[index].size)
5475 {
5476 return index;
5477 }
5478 }
5479
5480 return -1;
5481}
5482
5483/**
5484 * Handle register read operation.
5485 *
5486 * Looks up and calls appropriate handler.
5487 *
5488 * @returns VBox status code.
5489 *
5490 * @param pState The device state structure.
5491 * @param uOffset Register offset in memory-mapped frame.
5492 * @param pv Where to store the result.
5493 * @param cb Number of bytes to read.
5494 * @thread EMT
5495 */
5496static int e1kRegRead(E1KSTATE *pState, uint32_t uOffset, void *pv, uint32_t cb)
5497{
5498 uint32_t u32 = 0;
5499 uint32_t mask = 0;
5500 uint32_t shift;
5501 int rc = VINF_SUCCESS;
5502 int index = e1kRegLookup(pState, uOffset);
5503 const char *szInst = INSTANCE(pState);
5504#ifdef DEBUG
5505 char buf[9];
5506#endif
5507
5508 /*
5509 * From the spec:
5510 * For registers that should be accessed as 32-bit double words, partial writes (less than a 32-bit
5511 * double word) is ignored. Partial reads return all 32 bits of data regardless of the byte enables.
5512 */
5513
5514 /*
5515 * To be able to write bytes and short word we convert them
5516 * to properly shifted 32-bit words and masks. The idea is
5517 * to keep register-specific handlers simple. Most accesses
5518 * will be 32-bit anyway.
5519 */
5520 switch (cb)
5521 {
5522 case 1: mask = 0x000000FF; break;
5523 case 2: mask = 0x0000FFFF; break;
5524 case 4: mask = 0xFFFFFFFF; break;
5525 default:
5526 return PDMDevHlpDBGFStop(pState->CTX_SUFF(pDevIns), RT_SRC_POS,
5527 "%s e1kRegRead: unsupported op size: offset=%#10x cb=%#10x\n",
5528 szInst, uOffset, cb);
5529 }
5530 if (index != -1)
5531 {
5532 if (s_e1kRegMap[index].readable)
5533 {
5534 /* Make the mask correspond to the bits we are about to read. */
5535 shift = (uOffset - s_e1kRegMap[index].offset) % sizeof(uint32_t) * 8;
5536 mask <<= shift;
5537 if (!mask)
5538 return PDMDevHlpDBGFStop(pState->CTX_SUFF(pDevIns), RT_SRC_POS,
5539 "%s e1kRegRead: Zero mask: offset=%#10x cb=%#10x\n",
5540 szInst, uOffset, cb);
5541 /*
5542 * Read it. Pass the mask so the handler knows what has to be read.
5543 * Mask out irrelevant bits.
5544 */
5545 //rc = e1kCsEnter(pState, VERR_SEM_BUSY, RT_SRC_POS);
5546 if (RT_UNLIKELY(rc != VINF_SUCCESS))
5547 return rc;
5548 //pState->fDelayInts = false;
5549 //pState->iStatIntLost += pState->iStatIntLostOne;
5550 //pState->iStatIntLostOne = 0;
5551 rc = s_e1kRegMap[index].pfnRead(pState, uOffset & 0xFFFFFFFC, index, &u32);
5552 u32 &= mask;
5553 //e1kCsLeave(pState);
5554 E1kLog2(("%s At %08X read %s from %s (%s)\n",
5555 szInst, uOffset, e1kU32toHex(u32, mask, buf), s_e1kRegMap[index].abbrev, s_e1kRegMap[index].name));
5556 /* Shift back the result. */
5557 u32 >>= shift;
5558 }
5559 else
5560 {
5561 E1kLog(("%s At %08X read (%s) attempt from write-only register %s (%s)\n",
5562 szInst, uOffset, e1kU32toHex(u32, mask, buf), s_e1kRegMap[index].abbrev, s_e1kRegMap[index].name));
5563 }
5564 }
5565 else
5566 {
5567 E1kLog(("%s At %08X read (%s) attempt from non-existing register\n",
5568 szInst, uOffset, e1kU32toHex(u32, mask, buf)));
5569 }
5570
5571 memcpy(pv, &u32, cb);
5572 return rc;
5573}
5574
5575/**
5576 * Handle register write operation.
5577 *
5578 * Looks up and calls appropriate handler.
5579 *
5580 * @returns VBox status code.
5581 *
5582 * @param pState The device state structure.
5583 * @param uOffset Register offset in memory-mapped frame.
5584 * @param pv Where to fetch the value.
5585 * @param cb Number of bytes to write.
5586 * @thread EMT
5587 */
5588static int e1kRegWrite(E1KSTATE *pState, uint32_t uOffset, void const *pv, unsigned cb)
5589{
5590 int rc = VINF_SUCCESS;
5591 int index = e1kRegLookup(pState, uOffset);
5592 uint32_t u32;
5593
5594 /*
5595 * From the spec:
5596 * For registers that should be accessed as 32-bit double words, partial writes (less than a 32-bit
5597 * double word) is ignored. Partial reads return all 32 bits of data regardless of the byte enables.
5598 */
5599
5600 if (cb != 4)
5601 {
5602 E1kLog(("%s e1kRegWrite: Spec violation: unsupported op size: offset=%#10x cb=%#10x, ignored.\n",
5603 INSTANCE(pState), uOffset, cb));
5604 return VINF_SUCCESS;
5605 }
5606 if (uOffset & 3)
5607 {
5608 E1kLog(("%s e1kRegWrite: Spec violation: misaligned offset: %#10x cb=%#10x, ignored.\n",
5609 INSTANCE(pState), uOffset, cb));
5610 return VINF_SUCCESS;
5611 }
5612 u32 = *(uint32_t*)pv;
5613 if (index != -1)
5614 {
5615 if (s_e1kRegMap[index].writable)
5616 {
5617 /*
5618 * Write it. Pass the mask so the handler knows what has to be written.
5619 * Mask out irrelevant bits.
5620 */
5621 E1kLog2(("%s At %08X write %08X to %s (%s)\n",
5622 INSTANCE(pState), uOffset, u32, s_e1kRegMap[index].abbrev, s_e1kRegMap[index].name));
5623 //rc = e1kCsEnter(pState, VERR_SEM_BUSY, RT_SRC_POS);
5624 if (RT_UNLIKELY(rc != VINF_SUCCESS))
5625 return rc;
5626 //pState->fDelayInts = false;
5627 //pState->iStatIntLost += pState->iStatIntLostOne;
5628 //pState->iStatIntLostOne = 0;
5629 rc = s_e1kRegMap[index].pfnWrite(pState, uOffset, index, u32);
5630 //e1kCsLeave(pState);
5631 }
5632 else
5633 {
5634 E1kLog(("%s At %08X write attempt (%08X) to read-only register %s (%s)\n",
5635 INSTANCE(pState), uOffset, u32, s_e1kRegMap[index].abbrev, s_e1kRegMap[index].name));
5636 }
5637 }
5638 else
5639 {
5640 E1kLog(("%s At %08X write attempt (%08X) to non-existing register\n",
5641 INSTANCE(pState), uOffset, u32));
5642 }
5643 return rc;
5644}
5645
5646/**
5647 * I/O handler for memory-mapped read operations.
5648 *
5649 * @returns VBox status code.
5650 *
5651 * @param pDevIns The device instance.
5652 * @param pvUser User argument.
5653 * @param GCPhysAddr Physical address (in GC) where the read starts.
5654 * @param pv Where to store the result.
5655 * @param cb Number of bytes read.
5656 * @thread EMT
5657 */
5658PDMBOTHCBDECL(int) e1kMMIORead(PPDMDEVINS pDevIns, void *pvUser,
5659 RTGCPHYS GCPhysAddr, void *pv, unsigned cb)
5660{
5661 NOREF(pvUser);
5662 E1KSTATE *pState = PDMINS_2_DATA(pDevIns, E1KSTATE *);
5663 uint32_t uOffset = GCPhysAddr - pState->addrMMReg;
5664 STAM_PROFILE_ADV_START(&pState->CTX_SUFF_Z(StatMMIORead), a);
5665
5666 Assert(uOffset < E1K_MM_SIZE);
5667
5668 int rc = e1kRegRead(pState, uOffset, pv, cb);
5669 STAM_PROFILE_ADV_STOP(&pState->CTX_SUFF_Z(StatMMIORead), a);
5670 return rc;
5671}
5672
5673/**
5674 * Memory mapped I/O Handler for write operations.
5675 *
5676 * @returns VBox status code.
5677 *
5678 * @param pDevIns The device instance.
5679 * @param pvUser User argument.
5680 * @param GCPhysAddr Physical address (in GC) where the read starts.
5681 * @param pv Where to fetch the value.
5682 * @param cb Number of bytes to write.
5683 * @thread EMT
5684 */
5685PDMBOTHCBDECL(int) e1kMMIOWrite(PPDMDEVINS pDevIns, void *pvUser,
5686 RTGCPHYS GCPhysAddr, void const *pv, unsigned cb)
5687{
5688 NOREF(pvUser);
5689 E1KSTATE *pState = PDMINS_2_DATA(pDevIns, E1KSTATE *);
5690 uint32_t uOffset = GCPhysAddr - pState->addrMMReg;
5691 int rc;
5692 STAM_PROFILE_ADV_START(&pState->CTX_SUFF_Z(StatMMIOWrite), a);
5693
5694 Assert(uOffset < E1K_MM_SIZE);
5695 if (cb != 4)
5696 {
5697 E1kLog(("%s e1kMMIOWrite: invalid op size: offset=%#10x cb=%#10x", pDevIns, uOffset, cb));
5698 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "e1kMMIOWrite: invalid op size: offset=%#10x cb=%#10x\n", uOffset, cb);
5699 }
5700 else
5701 rc = e1kRegWrite(pState, uOffset, pv, cb);
5702
5703 STAM_PROFILE_ADV_STOP(&pState->CTX_SUFF_Z(StatMMIOWrite), a);
5704 return rc;
5705}
5706
5707/**
5708 * Port I/O Handler for IN operations.
5709 *
5710 * @returns VBox status code.
5711 *
5712 * @param pDevIns The device instance.
5713 * @param pvUser Pointer to the device state structure.
5714 * @param port Port number used for the IN operation.
5715 * @param pu32 Where to store the result.
5716 * @param cb Number of bytes read.
5717 * @thread EMT
5718 */
5719PDMBOTHCBDECL(int) e1kIOPortIn(PPDMDEVINS pDevIns, void *pvUser,
5720 RTIOPORT port, uint32_t *pu32, unsigned cb)
5721{
5722 E1KSTATE *pState = PDMINS_2_DATA(pDevIns, E1KSTATE *);
5723 int rc = VINF_SUCCESS;
5724 const char *szInst = INSTANCE(pState);
5725 STAM_PROFILE_ADV_START(&pState->CTX_SUFF_Z(StatIORead), a);
5726
5727 port -= pState->addrIOPort;
5728 if (cb != 4)
5729 {
5730 E1kLog(("%s e1kIOPortIn: invalid op size: port=%RTiop cb=%08x", szInst, port, cb));
5731 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "%s e1kIOPortIn: invalid op size: port=%RTiop cb=%08x\n", szInst, port, cb);
5732 }
5733 else
5734 switch (port)
5735 {
5736 case 0x00: /* IOADDR */
5737 *pu32 = pState->uSelectedReg;
5738 E1kLog2(("%s e1kIOPortIn: IOADDR(0), selecting register %#010x, val=%#010x\n", szInst, pState->uSelectedReg, *pu32));
5739 break;
5740 case 0x04: /* IODATA */
5741 rc = e1kRegRead(pState, pState->uSelectedReg, pu32, cb);
5742 /** @todo wrong return code triggers assertions in the debug build; fix please */
5743 if (rc == VINF_IOM_R3_MMIO_READ)
5744 rc = VINF_IOM_R3_IOPORT_READ;
5745
5746 E1kLog2(("%s e1kIOPortIn: IODATA(4), reading from selected register %#010x, val=%#010x\n", szInst, pState->uSelectedReg, *pu32));
5747 break;
5748 default:
5749 E1kLog(("%s e1kIOPortIn: invalid port %#010x\n", szInst, port));
5750 //*pRC = VERR_IOM_IOPORT_UNUSED;
5751 }
5752
5753 STAM_PROFILE_ADV_STOP(&pState->CTX_SUFF_Z(StatIORead), a);
5754 return rc;
5755}
5756
5757
5758/**
5759 * Port I/O Handler for OUT operations.
5760 *
5761 * @returns VBox status code.
5762 *
5763 * @param pDevIns The device instance.
5764 * @param pvUser User argument.
5765 * @param Port Port number used for the IN operation.
5766 * @param u32 The value to output.
5767 * @param cb The value size in bytes.
5768 * @thread EMT
5769 */
5770PDMBOTHCBDECL(int) e1kIOPortOut(PPDMDEVINS pDevIns, void *pvUser,
5771 RTIOPORT port, uint32_t u32, unsigned cb)
5772{
5773 E1KSTATE *pState = PDMINS_2_DATA(pDevIns, E1KSTATE *);
5774 int rc = VINF_SUCCESS;
5775 const char *szInst = INSTANCE(pState);
5776 STAM_PROFILE_ADV_START(&pState->CTX_SUFF_Z(StatIOWrite), a);
5777
5778 E1kLog2(("%s e1kIOPortOut: port=%RTiop value=%08x\n", szInst, port, u32));
5779 if (cb != 4)
5780 {
5781 E1kLog(("%s e1kIOPortOut: invalid op size: port=%RTiop cb=%08x\n", szInst, port, cb));
5782 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "%s e1kIOPortOut: invalid op size: port=%RTiop cb=%08x\n", szInst, port, cb);
5783 }
5784 else
5785 {
5786 port -= pState->addrIOPort;
5787 switch (port)
5788 {
5789 case 0x00: /* IOADDR */
5790 pState->uSelectedReg = u32;
5791 E1kLog2(("%s e1kIOPortOut: IOADDR(0), selected register %08x\n", szInst, pState->uSelectedReg));
5792 break;
5793 case 0x04: /* IODATA */
5794 E1kLog2(("%s e1kIOPortOut: IODATA(4), writing to selected register %#010x, value=%#010x\n", szInst, pState->uSelectedReg, u32));
5795 rc = e1kRegWrite(pState, pState->uSelectedReg, &u32, cb);
5796 /** @todo wrong return code triggers assertions in the debug build; fix please */
5797 if (rc == VINF_IOM_R3_MMIO_WRITE)
5798 rc = VINF_IOM_R3_IOPORT_WRITE;
5799 break;
5800 default:
5801 E1kLog(("%s e1kIOPortOut: invalid port %#010x\n", szInst, port));
5802 /** @todo Do we need to return an error here?
5803 * bird: VINF_SUCCESS is fine for unhandled cases of an OUT handler. (If you're curious
5804 * about the guest code and a bit adventuresome, try rc = PDMDeviceDBGFStop(...);) */
5805 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "e1kIOPortOut: invalid port %#010x\n", port);
5806 }
5807 }
5808
5809 STAM_PROFILE_ADV_STOP(&pState->CTX_SUFF_Z(StatIOWrite), a);
5810 return rc;
5811}
5812
5813#ifdef IN_RING3
5814/**
5815 * Dump complete device state to log.
5816 *
5817 * @param pState Pointer to device state.
5818 */
5819static void e1kDumpState(E1KSTATE *pState)
5820{
5821 for (int i = 0; i<E1K_NUM_OF_32BIT_REGS; ++i)
5822 {
5823 E1kLog2(("%s %8.8s = %08x\n", INSTANCE(pState),
5824 s_e1kRegMap[i].abbrev, pState->auRegs[i]));
5825 }
5826#ifdef E1K_INT_STATS
5827 LogRel(("%s Interrupt attempts: %d\n", INSTANCE(pState), pState->uStatIntTry));
5828 LogRel(("%s Interrupts raised : %d\n", INSTANCE(pState), pState->uStatInt));
5829 LogRel(("%s Interrupts lowered: %d\n", INSTANCE(pState), pState->uStatIntLower));
5830 LogRel(("%s Interrupts delayed: %d\n", INSTANCE(pState), pState->uStatIntDly));
5831 LogRel(("%s Disabled delayed: %d\n", INSTANCE(pState), pState->uStatDisDly));
5832 LogRel(("%s Interrupts skipped: %d\n", INSTANCE(pState), pState->uStatIntSkip));
5833 LogRel(("%s Masked interrupts : %d\n", INSTANCE(pState), pState->uStatIntMasked));
5834 LogRel(("%s Early interrupts : %d\n", INSTANCE(pState), pState->uStatIntEarly));
5835 LogRel(("%s Late interrupts : %d\n", INSTANCE(pState), pState->uStatIntLate));
5836 LogRel(("%s Lost interrupts : %d\n", INSTANCE(pState), pState->iStatIntLost));
5837 LogRel(("%s Interrupts by RX : %d\n", INSTANCE(pState), pState->uStatIntRx));
5838 LogRel(("%s Interrupts by TX : %d\n", INSTANCE(pState), pState->uStatIntTx));
5839 LogRel(("%s Interrupts by ICS : %d\n", INSTANCE(pState), pState->uStatIntICS));
5840 LogRel(("%s Interrupts by RDTR: %d\n", INSTANCE(pState), pState->uStatIntRDTR));
5841 LogRel(("%s Interrupts by RDMT: %d\n", INSTANCE(pState), pState->uStatIntRXDMT0));
5842 LogRel(("%s Interrupts by TXQE: %d\n", INSTANCE(pState), pState->uStatIntTXQE));
5843 LogRel(("%s TX int delay asked: %d\n", INSTANCE(pState), pState->uStatTxIDE));
5844 LogRel(("%s TX delayed: %d\n", INSTANCE(pState), pState->uStatTxDelayed));
5845 LogRel(("%s TX delay expired: %d\n", INSTANCE(pState), pState->uStatTxDelayExp));
5846 LogRel(("%s TX no report asked: %d\n", INSTANCE(pState), pState->uStatTxNoRS));
5847 LogRel(("%s TX abs timer expd : %d\n", INSTANCE(pState), pState->uStatTAD));
5848 LogRel(("%s TX int timer expd : %d\n", INSTANCE(pState), pState->uStatTID));
5849 LogRel(("%s RX abs timer expd : %d\n", INSTANCE(pState), pState->uStatRAD));
5850 LogRel(("%s RX int timer expd : %d\n", INSTANCE(pState), pState->uStatRID));
5851 LogRel(("%s TX CTX descriptors: %d\n", INSTANCE(pState), pState->uStatDescCtx));
5852 LogRel(("%s TX DAT descriptors: %d\n", INSTANCE(pState), pState->uStatDescDat));
5853 LogRel(("%s TX LEG descriptors: %d\n", INSTANCE(pState), pState->uStatDescLeg));
5854 LogRel(("%s Received frames : %d\n", INSTANCE(pState), pState->uStatRxFrm));
5855 LogRel(("%s Transmitted frames: %d\n", INSTANCE(pState), pState->uStatTxFrm));
5856 LogRel(("%s TX frames up to 1514: %d\n", INSTANCE(pState), pState->uStatTx1514));
5857 LogRel(("%s TX frames up to 2962: %d\n", INSTANCE(pState), pState->uStatTx2962));
5858 LogRel(("%s TX frames up to 4410: %d\n", INSTANCE(pState), pState->uStatTx4410));
5859 LogRel(("%s TX frames up to 5858: %d\n", INSTANCE(pState), pState->uStatTx5858));
5860 LogRel(("%s TX frames up to 7306: %d\n", INSTANCE(pState), pState->uStatTx7306));
5861 LogRel(("%s TX frames up to 8754: %d\n", INSTANCE(pState), pState->uStatTx8754));
5862 LogRel(("%s TX frames up to 16384: %d\n", INSTANCE(pState), pState->uStatTx16384));
5863 LogRel(("%s TX frames up to 32768: %d\n", INSTANCE(pState), pState->uStatTx32768));
5864 LogRel(("%s Larger TX frames : %d\n", INSTANCE(pState), pState->uStatTxLarge));
5865 LogRel(("%s Max TX Delay : %lld\n", INSTANCE(pState), pState->uStatMaxTxDelay));
5866#endif /* E1K_INT_STATS */
5867}
5868
5869/**
5870 * Map PCI I/O region.
5871 *
5872 * @return VBox status code.
5873 * @param pPciDev Pointer to PCI device. Use pPciDev->pDevIns to get the device instance.
5874 * @param iRegion The region number.
5875 * @param GCPhysAddress Physical address of the region. If iType is PCI_ADDRESS_SPACE_IO, this is an
5876 * I/O port, else it's a physical address.
5877 * This address is *NOT* relative to pci_mem_base like earlier!
5878 * @param cb Region size.
5879 * @param enmType One of the PCI_ADDRESS_SPACE_* values.
5880 * @thread EMT
5881 */
5882static DECLCALLBACK(int) e1kMap(PPCIDEVICE pPciDev, int iRegion,
5883 RTGCPHYS GCPhysAddress, uint32_t cb, PCIADDRESSSPACE enmType)
5884{
5885 int rc;
5886 E1KSTATE *pState = PDMINS_2_DATA(pPciDev->pDevIns, E1KSTATE*);
5887
5888 switch (enmType)
5889 {
5890 case PCI_ADDRESS_SPACE_IO:
5891 pState->addrIOPort = (RTIOPORT)GCPhysAddress;
5892 rc = PDMDevHlpIOPortRegister(pPciDev->pDevIns, pState->addrIOPort, cb, 0,
5893 e1kIOPortOut, e1kIOPortIn, NULL, NULL, "E1000");
5894 if (RT_FAILURE(rc))
5895 break;
5896 if (pState->fR0Enabled)
5897 {
5898 rc = PDMDevHlpIOPortRegisterR0(pPciDev->pDevIns, pState->addrIOPort, cb, 0,
5899 "e1kIOPortOut", "e1kIOPortIn", NULL, NULL, "E1000");
5900 if (RT_FAILURE(rc))
5901 break;
5902 }
5903 if (pState->fGCEnabled)
5904 {
5905 rc = PDMDevHlpIOPortRegisterRC(pPciDev->pDevIns, pState->addrIOPort, cb, 0,
5906 "e1kIOPortOut", "e1kIOPortIn", NULL, NULL, "E1000");
5907 }
5908 break;
5909 case PCI_ADDRESS_SPACE_MEM:
5910 pState->addrMMReg = GCPhysAddress;
5911 rc = PDMDevHlpMMIORegister(pPciDev->pDevIns, GCPhysAddress, cb, NULL /*pvUser*/,
5912 IOMMMIO_FLAGS_READ_PASSTHRU | IOMMMIO_FLAGS_WRITE_PASSTHRU,
5913 e1kMMIOWrite, e1kMMIORead, "E1000");
5914 if (pState->fR0Enabled)
5915 {
5916 rc = PDMDevHlpMMIORegisterR0(pPciDev->pDevIns, GCPhysAddress, cb, NIL_RTR0PTR /*pvUser*/,
5917 "e1kMMIOWrite", "e1kMMIORead");
5918 if (RT_FAILURE(rc))
5919 break;
5920 }
5921 if (pState->fGCEnabled)
5922 {
5923 rc = PDMDevHlpMMIORegisterRC(pPciDev->pDevIns, GCPhysAddress, cb, NIL_RTRCPTR /*pvUser*/,
5924 "e1kMMIOWrite", "e1kMMIORead");
5925 }
5926 break;
5927 default:
5928 /* We should never get here */
5929 AssertMsgFailed(("Invalid PCI address space param in map callback"));
5930 rc = VERR_INTERNAL_ERROR;
5931 break;
5932 }
5933 return rc;
5934}
5935
5936/**
5937 * Check if the device can receive data now.
5938 * This must be called before the pfnRecieve() method is called.
5939 *
5940 * @returns Number of bytes the device can receive.
5941 * @param pInterface Pointer to the interface structure containing the called function pointer.
5942 * @thread EMT
5943 */
5944static int e1kCanReceive(E1KSTATE *pState)
5945{
5946#ifndef E1K_WITH_RXD_CACHE
5947 size_t cb;
5948
5949 if (RT_UNLIKELY(e1kCsRxEnter(pState, VERR_SEM_BUSY) != VINF_SUCCESS))
5950 return VERR_NET_NO_BUFFER_SPACE;
5951
5952 if (RT_UNLIKELY(RDLEN == sizeof(E1KRXDESC)))
5953 {
5954 E1KRXDESC desc;
5955 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns), e1kDescAddr(RDBAH, RDBAL, RDH),
5956 &desc, sizeof(desc));
5957 if (desc.status.fDD)
5958 cb = 0;
5959 else
5960 cb = pState->u16RxBSize;
5961 }
5962 else if (RDH < RDT)
5963 cb = (RDT - RDH) * pState->u16RxBSize;
5964 else if (RDH > RDT)
5965 cb = (RDLEN/sizeof(E1KRXDESC) - RDH + RDT) * pState->u16RxBSize;
5966 else
5967 {
5968 cb = 0;
5969 E1kLogRel(("E1000: OUT of RX descriptors!\n"));
5970 }
5971 E1kLog2(("%s e1kCanReceive: at exit RDH=%d RDT=%d RDLEN=%d u16RxBSize=%d cb=%lu\n",
5972 INSTANCE(pState), RDH, RDT, RDLEN, pState->u16RxBSize, cb));
5973
5974 e1kCsRxLeave(pState);
5975 return cb > 0 ? VINF_SUCCESS : VERR_NET_NO_BUFFER_SPACE;
5976#else /* E1K_WITH_RXD_CACHE */
5977 int rc = VINF_SUCCESS;
5978
5979 if (RT_UNLIKELY(e1kCsRxEnter(pState, VERR_SEM_BUSY) != VINF_SUCCESS))
5980 return VERR_NET_NO_BUFFER_SPACE;
5981
5982 if (RT_UNLIKELY(RDLEN == sizeof(E1KRXDESC)))
5983 {
5984 E1KRXDESC desc;
5985 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns), e1kDescAddr(RDBAH, RDBAL, RDH),
5986 &desc, sizeof(desc));
5987 if (desc.status.fDD)
5988 rc = VERR_NET_NO_BUFFER_SPACE;
5989 }
5990 else if (e1kRxDIsCacheEmpty(pState) && RDH == RDT)
5991 {
5992 /* Cache is empty, so is the RX ring. */
5993 rc = VERR_NET_NO_BUFFER_SPACE;
5994 }
5995 E1kLog2(("%s e1kCanReceive: at exit in_cache=%d RDH=%d RDT=%d RDLEN=%d"
5996 " u16RxBSize=%d rc=%Rrc\n", INSTANCE(pState),
5997 e1kRxDInCache(pState), RDH, RDT, RDLEN, pState->u16RxBSize, rc));
5998
5999 e1kCsRxLeave(pState);
6000 return rc;
6001#endif /* E1K_WITH_RXD_CACHE */
6002}
6003
6004/**
6005 * @interface_method_impl{PDMINETWORKDOWN,pfnWaitReceiveAvail}
6006 */
6007static DECLCALLBACK(int) e1kNetworkDown_WaitReceiveAvail(PPDMINETWORKDOWN pInterface, RTMSINTERVAL cMillies)
6008{
6009 E1KSTATE *pState = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkDown);
6010 int rc = e1kCanReceive(pState);
6011
6012 if (RT_SUCCESS(rc))
6013 return VINF_SUCCESS;
6014 if (RT_UNLIKELY(cMillies == 0))
6015 return VERR_NET_NO_BUFFER_SPACE;
6016
6017 rc = VERR_INTERRUPTED;
6018 ASMAtomicXchgBool(&pState->fMaybeOutOfSpace, true);
6019 STAM_PROFILE_START(&pState->StatRxOverflow, a);
6020 VMSTATE enmVMState;
6021 while (RT_LIKELY( (enmVMState = PDMDevHlpVMState(pState->CTX_SUFF(pDevIns))) == VMSTATE_RUNNING
6022 || enmVMState == VMSTATE_RUNNING_LS))
6023 {
6024 int rc2 = e1kCanReceive(pState);
6025 if (RT_SUCCESS(rc2))
6026 {
6027 rc = VINF_SUCCESS;
6028 break;
6029 }
6030 E1kLogRel(("E1000 e1kNetworkDown_WaitReceiveAvail: waiting cMillies=%u...\n",
6031 cMillies));
6032 E1kLog(("%s e1kNetworkDown_WaitReceiveAvail: waiting cMillies=%u...\n",
6033 INSTANCE(pState), cMillies));
6034 RTSemEventWait(pState->hEventMoreRxDescAvail, cMillies);
6035 }
6036 STAM_PROFILE_STOP(&pState->StatRxOverflow, a);
6037 ASMAtomicXchgBool(&pState->fMaybeOutOfSpace, false);
6038
6039 return rc;
6040}
6041
6042
6043/**
6044 * Matches the packet addresses against Receive Address table. Looks for
6045 * exact matches only.
6046 *
6047 * @returns true if address matches.
6048 * @param pState Pointer to the state structure.
6049 * @param pvBuf The ethernet packet.
6050 * @param cb Number of bytes available in the packet.
6051 * @thread EMT
6052 */
6053static bool e1kPerfectMatch(E1KSTATE *pState, const void *pvBuf)
6054{
6055 for (unsigned i = 0; i < RT_ELEMENTS(pState->aRecAddr.array); i++)
6056 {
6057 E1KRAELEM* ra = pState->aRecAddr.array + i;
6058
6059 /* Valid address? */
6060 if (ra->ctl & RA_CTL_AV)
6061 {
6062 Assert((ra->ctl & RA_CTL_AS) < 2);
6063 //unsigned char *pAddr = (unsigned char*)pvBuf + sizeof(ra->addr)*(ra->ctl & RA_CTL_AS);
6064 //E1kLog3(("%s Matching %02x:%02x:%02x:%02x:%02x:%02x against %02x:%02x:%02x:%02x:%02x:%02x...\n",
6065 // INSTANCE(pState), pAddr[0], pAddr[1], pAddr[2], pAddr[3], pAddr[4], pAddr[5],
6066 // ra->addr[0], ra->addr[1], ra->addr[2], ra->addr[3], ra->addr[4], ra->addr[5]));
6067 /*
6068 * Address Select:
6069 * 00b = Destination address
6070 * 01b = Source address
6071 * 10b = Reserved
6072 * 11b = Reserved
6073 * Since ethernet header is (DA, SA, len) we can use address
6074 * select as index.
6075 */
6076 if (memcmp((char*)pvBuf + sizeof(ra->addr)*(ra->ctl & RA_CTL_AS),
6077 ra->addr, sizeof(ra->addr)) == 0)
6078 return true;
6079 }
6080 }
6081
6082 return false;
6083}
6084
6085/**
6086 * Matches the packet addresses against Multicast Table Array.
6087 *
6088 * @remarks This is imperfect match since it matches not exact address but
6089 * a subset of addresses.
6090 *
6091 * @returns true if address matches.
6092 * @param pState Pointer to the state structure.
6093 * @param pvBuf The ethernet packet.
6094 * @param cb Number of bytes available in the packet.
6095 * @thread EMT
6096 */
6097static bool e1kImperfectMatch(E1KSTATE *pState, const void *pvBuf)
6098{
6099 /* Get bits 32..47 of destination address */
6100 uint16_t u16Bit = ((uint16_t*)pvBuf)[2];
6101
6102 unsigned offset = GET_BITS(RCTL, MO);
6103 /*
6104 * offset means:
6105 * 00b = bits 36..47
6106 * 01b = bits 35..46
6107 * 10b = bits 34..45
6108 * 11b = bits 32..43
6109 */
6110 if (offset < 3)
6111 u16Bit = u16Bit >> (4 - offset);
6112 return ASMBitTest(pState->auMTA, u16Bit & 0xFFF);
6113}
6114
6115/**
6116 * Determines if the packet is to be delivered to upper layer. The following
6117 * filters supported:
6118 * - Exact Unicast/Multicast
6119 * - Promiscuous Unicast/Multicast
6120 * - Multicast
6121 * - VLAN
6122 *
6123 * @returns true if packet is intended for this node.
6124 * @param pState Pointer to the state structure.
6125 * @param pvBuf The ethernet packet.
6126 * @param cb Number of bytes available in the packet.
6127 * @param pStatus Bit field to store status bits.
6128 * @thread EMT
6129 */
6130static bool e1kAddressFilter(E1KSTATE *pState, const void *pvBuf, size_t cb, E1KRXDST *pStatus)
6131{
6132 Assert(cb > 14);
6133 /* Assume that we fail to pass exact filter. */
6134 pStatus->fPIF = false;
6135 pStatus->fVP = false;
6136 /* Discard oversized packets */
6137 if (cb > E1K_MAX_RX_PKT_SIZE)
6138 {
6139 E1kLog(("%s ERROR: Incoming packet is too big, cb=%d > max=%d\n",
6140 INSTANCE(pState), cb, E1K_MAX_RX_PKT_SIZE));
6141 E1K_INC_CNT32(ROC);
6142 return false;
6143 }
6144 else if (!(RCTL & RCTL_LPE) && cb > 1522)
6145 {
6146 /* When long packet reception is disabled packets over 1522 are discarded */
6147 E1kLog(("%s Discarding incoming packet (LPE=0), cb=%d\n",
6148 INSTANCE(pState), cb));
6149 E1K_INC_CNT32(ROC);
6150 return false;
6151 }
6152
6153 uint16_t *u16Ptr = (uint16_t*)pvBuf;
6154 /* Compare TPID with VLAN Ether Type */
6155 if (RT_BE2H_U16(u16Ptr[6]) == VET)
6156 {
6157 pStatus->fVP = true;
6158 /* Is VLAN filtering enabled? */
6159 if (RCTL & RCTL_VFE)
6160 {
6161 /* It is 802.1q packet indeed, let's filter by VID */
6162 if (RCTL & RCTL_CFIEN)
6163 {
6164 E1kLog3(("%s VLAN filter: VLAN=%d CFI=%d RCTL_CFI=%d\n", INSTANCE(pState),
6165 E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7])),
6166 E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])),
6167 !!(RCTL & RCTL_CFI)));
6168 if (E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])) != !!(RCTL & RCTL_CFI))
6169 {
6170 E1kLog2(("%s Packet filter: CFIs do not match in packet and RCTL (%d!=%d)\n",
6171 INSTANCE(pState), E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])), !!(RCTL & RCTL_CFI)));
6172 return false;
6173 }
6174 }
6175 else
6176 E1kLog3(("%s VLAN filter: VLAN=%d\n", INSTANCE(pState),
6177 E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))));
6178 if (!ASMBitTest(pState->auVFTA, E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))))
6179 {
6180 E1kLog2(("%s Packet filter: no VLAN match (id=%d)\n",
6181 INSTANCE(pState), E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))));
6182 return false;
6183 }
6184 }
6185 }
6186 /* Broadcast filtering */
6187 if (e1kIsBroadcast(pvBuf) && (RCTL & RCTL_BAM))
6188 return true;
6189 E1kLog2(("%s Packet filter: not a broadcast\n", INSTANCE(pState)));
6190 if (e1kIsMulticast(pvBuf))
6191 {
6192 /* Is multicast promiscuous enabled? */
6193 if (RCTL & RCTL_MPE)
6194 return true;
6195 E1kLog2(("%s Packet filter: no promiscuous multicast\n", INSTANCE(pState)));
6196 /* Try perfect matches first */
6197 if (e1kPerfectMatch(pState, pvBuf))
6198 {
6199 pStatus->fPIF = true;
6200 return true;
6201 }
6202 E1kLog2(("%s Packet filter: no perfect match\n", INSTANCE(pState)));
6203 if (e1kImperfectMatch(pState, pvBuf))
6204 return true;
6205 E1kLog2(("%s Packet filter: no imperfect match\n", INSTANCE(pState)));
6206 }
6207 else {
6208 /* Is unicast promiscuous enabled? */
6209 if (RCTL & RCTL_UPE)
6210 return true;
6211 E1kLog2(("%s Packet filter: no promiscuous unicast\n", INSTANCE(pState)));
6212 if (e1kPerfectMatch(pState, pvBuf))
6213 {
6214 pStatus->fPIF = true;
6215 return true;
6216 }
6217 E1kLog2(("%s Packet filter: no perfect match\n", INSTANCE(pState)));
6218 }
6219 E1kLog2(("%s Packet filter: packet discarded\n", INSTANCE(pState)));
6220 return false;
6221}
6222
6223/**
6224 * @interface_method_impl{PDMINETWORKDOWN,pfnReceive}
6225 */
6226static DECLCALLBACK(int) e1kNetworkDown_Receive(PPDMINETWORKDOWN pInterface, const void *pvBuf, size_t cb)
6227{
6228 E1KSTATE *pState = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkDown);
6229 int rc = VINF_SUCCESS;
6230
6231 /*
6232 * Drop packets if the VM is not running yet/anymore.
6233 */
6234 VMSTATE enmVMState = PDMDevHlpVMState(STATE_TO_DEVINS(pState));
6235 if ( enmVMState != VMSTATE_RUNNING
6236 && enmVMState != VMSTATE_RUNNING_LS)
6237 {
6238 E1kLog(("%s Dropping incoming packet as VM is not running.\n", INSTANCE(pState)));
6239 return VINF_SUCCESS;
6240 }
6241
6242 /* Discard incoming packets in locked state */
6243 if (!(RCTL & RCTL_EN) || pState->fLocked || !(STATUS & STATUS_LU))
6244 {
6245 E1kLog(("%s Dropping incoming packet as receive operation is disabled.\n", INSTANCE(pState)));
6246 return VINF_SUCCESS;
6247 }
6248
6249 STAM_PROFILE_ADV_START(&pState->StatReceive, a);
6250
6251 //if (!e1kCsEnter(pState, RT_SRC_POS))
6252 // return VERR_PERMISSION_DENIED;
6253
6254 e1kPacketDump(pState, (const uint8_t*)pvBuf, cb, "<-- Incoming");
6255
6256 /* Update stats */
6257 if (RT_LIKELY(e1kCsEnter(pState, VERR_SEM_BUSY) == VINF_SUCCESS))
6258 {
6259 E1K_INC_CNT32(TPR);
6260 E1K_ADD_CNT64(TORL, TORH, cb < 64? 64 : cb);
6261 e1kCsLeave(pState);
6262 }
6263 STAM_PROFILE_ADV_START(&pState->StatReceiveFilter, a);
6264 E1KRXDST status;
6265 RT_ZERO(status);
6266 bool fPassed = e1kAddressFilter(pState, pvBuf, cb, &status);
6267 STAM_PROFILE_ADV_STOP(&pState->StatReceiveFilter, a);
6268 if (fPassed)
6269 {
6270 rc = e1kHandleRxPacket(pState, pvBuf, cb, status);
6271 }
6272 //e1kCsLeave(pState);
6273 STAM_PROFILE_ADV_STOP(&pState->StatReceive, a);
6274
6275 return rc;
6276}
6277
6278/**
6279 * Gets the pointer to the status LED of a unit.
6280 *
6281 * @returns VBox status code.
6282 * @param pInterface Pointer to the interface structure.
6283 * @param iLUN The unit which status LED we desire.
6284 * @param ppLed Where to store the LED pointer.
6285 * @thread EMT
6286 */
6287static DECLCALLBACK(int) e1kQueryStatusLed(PPDMILEDPORTS pInterface, unsigned iLUN, PPDMLED *ppLed)
6288{
6289 E1KSTATE *pState = RT_FROM_MEMBER(pInterface, E1KSTATE, ILeds);
6290 int rc = VERR_PDM_LUN_NOT_FOUND;
6291
6292 if (iLUN == 0)
6293 {
6294 *ppLed = &pState->led;
6295 rc = VINF_SUCCESS;
6296 }
6297 return rc;
6298}
6299
6300/**
6301 * Gets the current Media Access Control (MAC) address.
6302 *
6303 * @returns VBox status code.
6304 * @param pInterface Pointer to the interface structure containing the called function pointer.
6305 * @param pMac Where to store the MAC address.
6306 * @thread EMT
6307 */
6308static DECLCALLBACK(int) e1kGetMac(PPDMINETWORKCONFIG pInterface, PRTMAC pMac)
6309{
6310 E1KSTATE *pState = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkConfig);
6311 pState->eeprom.getMac(pMac);
6312 return VINF_SUCCESS;
6313}
6314
6315
6316/**
6317 * Gets the new link state.
6318 *
6319 * @returns The current link state.
6320 * @param pInterface Pointer to the interface structure containing the called function pointer.
6321 * @thread EMT
6322 */
6323static DECLCALLBACK(PDMNETWORKLINKSTATE) e1kGetLinkState(PPDMINETWORKCONFIG pInterface)
6324{
6325 E1KSTATE *pState = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkConfig);
6326 if (STATUS & STATUS_LU)
6327 return PDMNETWORKLINKSTATE_UP;
6328 return PDMNETWORKLINKSTATE_DOWN;
6329}
6330
6331
6332/**
6333 * Sets the new link state.
6334 *
6335 * @returns VBox status code.
6336 * @param pInterface Pointer to the interface structure containing the called function pointer.
6337 * @param enmState The new link state
6338 * @thread EMT
6339 */
6340static DECLCALLBACK(int) e1kSetLinkState(PPDMINETWORKCONFIG pInterface, PDMNETWORKLINKSTATE enmState)
6341{
6342 E1KSTATE *pState = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkConfig);
6343 bool fOldUp = !!(STATUS & STATUS_LU);
6344 bool fNewUp = enmState == PDMNETWORKLINKSTATE_UP;
6345
6346 if ( fNewUp != fOldUp
6347 || (!fNewUp && pState->fCableConnected)) /* old state was connected but STATUS not
6348 * yet written by guest */
6349 {
6350 if (fNewUp)
6351 {
6352 E1kLog(("%s Link will be up in approximately %d secs\n",
6353 INSTANCE(pState), pState->cMsLinkUpDelay / 1000));
6354 pState->fCableConnected = true;
6355 STATUS &= ~STATUS_LU;
6356 Phy::setLinkStatus(&pState->phy, false);
6357 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, ICR_LSC);
6358 /* Restore the link back in 5 seconds (by default). */
6359 e1kBringLinkUpDelayed(pState);
6360 }
6361 else
6362 {
6363 E1kLog(("%s Link is down\n", INSTANCE(pState)));
6364 pState->fCableConnected = false;
6365 STATUS &= ~STATUS_LU;
6366 Phy::setLinkStatus(&pState->phy, false);
6367 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, ICR_LSC);
6368 }
6369 if (pState->pDrvR3)
6370 pState->pDrvR3->pfnNotifyLinkChanged(pState->pDrvR3, enmState);
6371 }
6372 return VINF_SUCCESS;
6373}
6374
6375/**
6376 * @interface_method_impl{PDMIBASE,pfnQueryInterface}
6377 */
6378static DECLCALLBACK(void *) e1kQueryInterface(struct PDMIBASE *pInterface, const char *pszIID)
6379{
6380 E1KSTATE *pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, IBase);
6381 Assert(&pThis->IBase == pInterface);
6382
6383 PDMIBASE_RETURN_INTERFACE(pszIID, PDMIBASE, &pThis->IBase);
6384 PDMIBASE_RETURN_INTERFACE(pszIID, PDMINETWORKDOWN, &pThis->INetworkDown);
6385 PDMIBASE_RETURN_INTERFACE(pszIID, PDMINETWORKCONFIG, &pThis->INetworkConfig);
6386 PDMIBASE_RETURN_INTERFACE(pszIID, PDMILEDPORTS, &pThis->ILeds);
6387 return NULL;
6388}
6389
6390/**
6391 * Saves the configuration.
6392 *
6393 * @param pState The E1K state.
6394 * @param pSSM The handle to the saved state.
6395 */
6396static void e1kSaveConfig(E1KSTATE *pState, PSSMHANDLE pSSM)
6397{
6398 SSMR3PutMem(pSSM, &pState->macConfigured, sizeof(pState->macConfigured));
6399 SSMR3PutU32(pSSM, pState->eChip);
6400}
6401
6402/**
6403 * Live save - save basic configuration.
6404 *
6405 * @returns VBox status code.
6406 * @param pDevIns The device instance.
6407 * @param pSSM The handle to the saved state.
6408 * @param uPass
6409 */
6410static DECLCALLBACK(int) e1kLiveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uPass)
6411{
6412 E1KSTATE *pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6413 e1kSaveConfig(pState, pSSM);
6414 return VINF_SSM_DONT_CALL_AGAIN;
6415}
6416
6417/**
6418 * Prepares for state saving.
6419 *
6420 * @returns VBox status code.
6421 * @param pDevIns The device instance.
6422 * @param pSSM The handle to the saved state.
6423 */
6424static DECLCALLBACK(int) e1kSavePrep(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6425{
6426 E1KSTATE* pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6427
6428 int rc = e1kCsEnter(pState, VERR_SEM_BUSY);
6429 if (RT_UNLIKELY(rc != VINF_SUCCESS))
6430 return rc;
6431 e1kCsLeave(pState);
6432 return VINF_SUCCESS;
6433#if 0
6434 /* 1) Prevent all threads from modifying the state and memory */
6435 //pState->fLocked = true;
6436 /* 2) Cancel all timers */
6437#ifdef E1K_TX_DELAY
6438 e1kCancelTimer(pState, pState->CTX_SUFF(pTXDTimer));
6439#endif /* E1K_TX_DELAY */
6440#ifdef E1K_USE_TX_TIMERS
6441 e1kCancelTimer(pState, pState->CTX_SUFF(pTIDTimer));
6442#ifndef E1K_NO_TAD
6443 e1kCancelTimer(pState, pState->CTX_SUFF(pTADTimer));
6444#endif /* E1K_NO_TAD */
6445#endif /* E1K_USE_TX_TIMERS */
6446#ifdef E1K_USE_RX_TIMERS
6447 e1kCancelTimer(pState, pState->CTX_SUFF(pRIDTimer));
6448 e1kCancelTimer(pState, pState->CTX_SUFF(pRADTimer));
6449#endif /* E1K_USE_RX_TIMERS */
6450 e1kCancelTimer(pState, pState->CTX_SUFF(pIntTimer));
6451 /* 3) Did I forget anything? */
6452 E1kLog(("%s Locked\n", INSTANCE(pState)));
6453 return VINF_SUCCESS;
6454#endif
6455}
6456
6457
6458/**
6459 * Saves the state of device.
6460 *
6461 * @returns VBox status code.
6462 * @param pDevIns The device instance.
6463 * @param pSSM The handle to the saved state.
6464 */
6465static DECLCALLBACK(int) e1kSaveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6466{
6467 E1KSTATE* pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6468
6469 e1kSaveConfig(pState, pSSM);
6470 pState->eeprom.save(pSSM);
6471 e1kDumpState(pState);
6472 SSMR3PutMem(pSSM, pState->auRegs, sizeof(pState->auRegs));
6473 SSMR3PutBool(pSSM, pState->fIntRaised);
6474 Phy::saveState(pSSM, &pState->phy);
6475 SSMR3PutU32(pSSM, pState->uSelectedReg);
6476 SSMR3PutMem(pSSM, pState->auMTA, sizeof(pState->auMTA));
6477 SSMR3PutMem(pSSM, &pState->aRecAddr, sizeof(pState->aRecAddr));
6478 SSMR3PutMem(pSSM, pState->auVFTA, sizeof(pState->auVFTA));
6479 SSMR3PutU64(pSSM, pState->u64AckedAt);
6480 SSMR3PutU16(pSSM, pState->u16RxBSize);
6481 //SSMR3PutBool(pSSM, pState->fDelayInts);
6482 //SSMR3PutBool(pSSM, pState->fIntMaskUsed);
6483 SSMR3PutU16(pSSM, pState->u16TxPktLen);
6484/** @todo State wrt to the TSE buffer is incomplete, so little point in
6485 * saving this actually. */
6486 SSMR3PutMem(pSSM, pState->aTxPacketFallback, pState->u16TxPktLen);
6487 SSMR3PutBool(pSSM, pState->fIPcsum);
6488 SSMR3PutBool(pSSM, pState->fTCPcsum);
6489 SSMR3PutMem(pSSM, &pState->contextTSE, sizeof(pState->contextTSE));
6490 SSMR3PutMem(pSSM, &pState->contextNormal, sizeof(pState->contextNormal));
6491 SSMR3PutBool(pSSM, pState->fVTag);
6492 SSMR3PutU16(pSSM, pState->u16VTagTCI);
6493#ifdef E1K_WITH_TXD_CACHE
6494 SSMR3PutU8(pSSM, pState->nTxDFetched);
6495 SSMR3PutMem(pSSM, pState->aTxDescriptors,
6496 pState->nTxDFetched * sizeof(pState->aTxDescriptors[0]));
6497#endif /* E1K_WITH_TXD_CACHE */
6498/**@todo GSO requires some more state here. */
6499 E1kLog(("%s State has been saved\n", INSTANCE(pState)));
6500 return VINF_SUCCESS;
6501}
6502
6503#if 0
6504/**
6505 * Cleanup after saving.
6506 *
6507 * @returns VBox status code.
6508 * @param pDevIns The device instance.
6509 * @param pSSM The handle to the saved state.
6510 */
6511static DECLCALLBACK(int) e1kSaveDone(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6512{
6513 E1KSTATE* pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6514
6515 /* If VM is being powered off unlocking will result in assertions in PGM */
6516 if (PDMDevHlpGetVM(pDevIns)->enmVMState == VMSTATE_RUNNING)
6517 pState->fLocked = false;
6518 else
6519 E1kLog(("%s VM is not running -- remain locked\n", INSTANCE(pState)));
6520 E1kLog(("%s Unlocked\n", INSTANCE(pState)));
6521 return VINF_SUCCESS;
6522}
6523#endif
6524
6525/**
6526 * Sync with .
6527 *
6528 * @returns VBox status code.
6529 * @param pDevIns The device instance.
6530 * @param pSSM The handle to the saved state.
6531 */
6532static DECLCALLBACK(int) e1kLoadPrep(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6533{
6534 E1KSTATE* pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6535
6536 int rc = e1kCsEnter(pState, VERR_SEM_BUSY);
6537 if (RT_UNLIKELY(rc != VINF_SUCCESS))
6538 return rc;
6539 e1kCsLeave(pState);
6540 return VINF_SUCCESS;
6541}
6542
6543/**
6544 * Restore previously saved state of device.
6545 *
6546 * @returns VBox status code.
6547 * @param pDevIns The device instance.
6548 * @param pSSM The handle to the saved state.
6549 * @param uVersion The data unit version number.
6550 * @param uPass The data pass.
6551 */
6552static DECLCALLBACK(int) e1kLoadExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
6553{
6554 E1KSTATE *pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6555 int rc;
6556
6557 if ( uVersion != E1K_SAVEDSTATE_VERSION
6558#ifdef E1K_WITH_TXD_CACHE
6559 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG
6560#endif /* E1K_WITH_TXD_CACHE */
6561 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_41
6562 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_30)
6563 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
6564
6565 if ( uVersion > E1K_SAVEDSTATE_VERSION_VBOX_30
6566 || uPass != SSM_PASS_FINAL)
6567 {
6568 /* config checks */
6569 RTMAC macConfigured;
6570 rc = SSMR3GetMem(pSSM, &macConfigured, sizeof(macConfigured));
6571 AssertRCReturn(rc, rc);
6572 if ( memcmp(&macConfigured, &pState->macConfigured, sizeof(macConfigured))
6573 && (uPass == 0 || !PDMDevHlpVMTeleportedAndNotFullyResumedYet(pDevIns)) )
6574 LogRel(("%s: The mac address differs: config=%RTmac saved=%RTmac\n", INSTANCE(pState), &pState->macConfigured, &macConfigured));
6575
6576 E1KCHIP eChip;
6577 rc = SSMR3GetU32(pSSM, &eChip);
6578 AssertRCReturn(rc, rc);
6579 if (eChip != pState->eChip)
6580 return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("The chip type differs: config=%u saved=%u"), pState->eChip, eChip);
6581 }
6582
6583 if (uPass == SSM_PASS_FINAL)
6584 {
6585 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_30)
6586 {
6587 rc = pState->eeprom.load(pSSM);
6588 AssertRCReturn(rc, rc);
6589 }
6590 /* the state */
6591 SSMR3GetMem(pSSM, &pState->auRegs, sizeof(pState->auRegs));
6592 SSMR3GetBool(pSSM, &pState->fIntRaised);
6593 /** @todo: PHY could be made a separate device with its own versioning */
6594 Phy::loadState(pSSM, &pState->phy);
6595 SSMR3GetU32(pSSM, &pState->uSelectedReg);
6596 SSMR3GetMem(pSSM, &pState->auMTA, sizeof(pState->auMTA));
6597 SSMR3GetMem(pSSM, &pState->aRecAddr, sizeof(pState->aRecAddr));
6598 SSMR3GetMem(pSSM, &pState->auVFTA, sizeof(pState->auVFTA));
6599 SSMR3GetU64(pSSM, &pState->u64AckedAt);
6600 SSMR3GetU16(pSSM, &pState->u16RxBSize);
6601 //SSMR3GetBool(pSSM, pState->fDelayInts);
6602 //SSMR3GetBool(pSSM, pState->fIntMaskUsed);
6603 SSMR3GetU16(pSSM, &pState->u16TxPktLen);
6604 SSMR3GetMem(pSSM, &pState->aTxPacketFallback[0], pState->u16TxPktLen);
6605 SSMR3GetBool(pSSM, &pState->fIPcsum);
6606 SSMR3GetBool(pSSM, &pState->fTCPcsum);
6607 SSMR3GetMem(pSSM, &pState->contextTSE, sizeof(pState->contextTSE));
6608 rc = SSMR3GetMem(pSSM, &pState->contextNormal, sizeof(pState->contextNormal));
6609 AssertRCReturn(rc, rc);
6610 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_41)
6611 {
6612 SSMR3GetBool(pSSM, &pState->fVTag);
6613 rc = SSMR3GetU16(pSSM, &pState->u16VTagTCI);
6614 AssertRCReturn(rc, rc);
6615 }
6616 else
6617 {
6618 pState->fVTag = false;
6619 pState->u16VTagTCI = 0;
6620 }
6621#ifdef E1K_WITH_TXD_CACHE
6622 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG)
6623 {
6624 rc = SSMR3GetU8(pSSM, &pState->nTxDFetched);
6625 AssertRCReturn(rc, rc);
6626 SSMR3GetMem(pSSM, pState->aTxDescriptors,
6627 pState->nTxDFetched * sizeof(pState->aTxDescriptors[0]));
6628 }
6629 else
6630 pState->nTxDFetched = 0;
6631 /*
6632 * @todo: Perhaps we should not store TXD cache as the entries can be
6633 * simply fetched again from guest's memory. Or can't they?
6634 */
6635#endif /* E1K_WITH_TXD_CACHE */
6636#ifdef E1K_WITH_RXD_CACHE
6637 /*
6638 * There is no point in storing the RX descriptor cache in the saved
6639 * state, we just need to make sure it is empty.
6640 */
6641 pState->iRxDCurrent = pState->nRxDFetched = 0;
6642#endif /* E1K_WITH_RXD_CACHE */
6643 /* derived state */
6644 e1kSetupGsoCtx(&pState->GsoCtx, &pState->contextTSE);
6645
6646 E1kLog(("%s State has been restored\n", INSTANCE(pState)));
6647 e1kDumpState(pState);
6648 }
6649 return VINF_SUCCESS;
6650}
6651
6652/**
6653 * Link status adjustments after loading.
6654 *
6655 * @returns VBox status code.
6656 * @param pDevIns The device instance.
6657 * @param pSSM The handle to the saved state.
6658 */
6659static DECLCALLBACK(int) e1kLoadDone(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6660{
6661 E1KSTATE* pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6662
6663 /* Update promiscuous mode */
6664 if (pState->pDrvR3)
6665 pState->pDrvR3->pfnSetPromiscuousMode(pState->pDrvR3,
6666 !!(RCTL & (RCTL_UPE | RCTL_MPE)));
6667
6668 /*
6669 * Force the link down here, since PDMNETWORKLINKSTATE_DOWN_RESUME is never
6670 * passed to us. We go through all this stuff if the link was up and we
6671 * wasn't teleported.
6672 */
6673 if ( (STATUS & STATUS_LU)
6674 && !PDMDevHlpVMTeleportedAndNotFullyResumedYet(pDevIns)
6675 && pState->cMsLinkUpDelay)
6676 {
6677 E1kLog(("%s Link is down temporarily\n", INSTANCE(pState)));
6678 STATUS &= ~STATUS_LU;
6679 Phy::setLinkStatus(&pState->phy, false);
6680 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, ICR_LSC);
6681 /* Restore the link back in five seconds (default). */
6682 e1kBringLinkUpDelayed(pState);
6683 }
6684 return VINF_SUCCESS;
6685}
6686
6687
6688/* -=-=-=-=- PDMDEVREG -=-=-=-=- */
6689
6690/**
6691 * Detach notification.
6692 *
6693 * One port on the network card has been disconnected from the network.
6694 *
6695 * @param pDevIns The device instance.
6696 * @param iLUN The logical unit which is being detached.
6697 * @param fFlags Flags, combination of the PDMDEVATT_FLAGS_* \#defines.
6698 */
6699static DECLCALLBACK(void) e1kDetach(PPDMDEVINS pDevIns, unsigned iLUN, uint32_t fFlags)
6700{
6701 E1KSTATE *pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6702 Log(("%s e1kDetach:\n", INSTANCE(pState)));
6703
6704 AssertLogRelReturnVoid(iLUN == 0);
6705
6706 PDMCritSectEnter(&pState->cs, VERR_SEM_BUSY);
6707
6708 /** @todo: r=pritesh still need to check if i missed
6709 * to clean something in this function
6710 */
6711
6712 /*
6713 * Zero some important members.
6714 */
6715 pState->pDrvBase = NULL;
6716 pState->pDrvR3 = NULL;
6717 pState->pDrvR0 = NIL_RTR0PTR;
6718 pState->pDrvRC = NIL_RTRCPTR;
6719
6720 PDMCritSectLeave(&pState->cs);
6721}
6722
6723/**
6724 * Attach the Network attachment.
6725 *
6726 * One port on the network card has been connected to a network.
6727 *
6728 * @returns VBox status code.
6729 * @param pDevIns The device instance.
6730 * @param iLUN The logical unit which is being attached.
6731 * @param fFlags Flags, combination of the PDMDEVATT_FLAGS_* \#defines.
6732 *
6733 * @remarks This code path is not used during construction.
6734 */
6735static DECLCALLBACK(int) e1kAttach(PPDMDEVINS pDevIns, unsigned iLUN, uint32_t fFlags)
6736{
6737 E1KSTATE *pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6738 LogFlow(("%s e1kAttach:\n", INSTANCE(pState)));
6739
6740 AssertLogRelReturn(iLUN == 0, VERR_PDM_NO_SUCH_LUN);
6741
6742 PDMCritSectEnter(&pState->cs, VERR_SEM_BUSY);
6743
6744 /*
6745 * Attach the driver.
6746 */
6747 int rc = PDMDevHlpDriverAttach(pDevIns, 0, &pState->IBase, &pState->pDrvBase, "Network Port");
6748 if (RT_SUCCESS(rc))
6749 {
6750 if (rc == VINF_NAT_DNS)
6751 {
6752#ifdef RT_OS_LINUX
6753 PDMDevHlpVMSetRuntimeError(pDevIns, 0 /*fFlags*/, "NoDNSforNAT",
6754 N_("A Domain Name Server (DNS) for NAT networking could not be determined. Please check your /etc/resolv.conf for <tt>nameserver</tt> entries. Either add one manually (<i>man resolv.conf</i>) or ensure that your host is correctly connected to an ISP. If you ignore this warning the guest will not be able to perform nameserver lookups and it will probably observe delays if trying so"));
6755#else
6756 PDMDevHlpVMSetRuntimeError(pDevIns, 0 /*fFlags*/, "NoDNSforNAT",
6757 N_("A Domain Name Server (DNS) for NAT networking could not be determined. Ensure that your host is correctly connected to an ISP. If you ignore this warning the guest will not be able to perform nameserver lookups and it will probably observe delays if trying so"));
6758#endif
6759 }
6760 pState->pDrvR3 = PDMIBASE_QUERY_INTERFACE(pState->pDrvBase, PDMINETWORKUP);
6761 AssertMsgStmt(pState->pDrvR3, ("Failed to obtain the PDMINETWORKUP interface!\n"),
6762 rc = VERR_PDM_MISSING_INTERFACE_BELOW);
6763 if (RT_SUCCESS(rc))
6764 {
6765 PPDMIBASER0 pBaseR0 = PDMIBASE_QUERY_INTERFACE(pState->pDrvBase, PDMIBASER0);
6766 pState->pDrvR0 = pBaseR0 ? pBaseR0->pfnQueryInterface(pBaseR0, PDMINETWORKUP_IID) : NIL_RTR0PTR;
6767
6768 PPDMIBASERC pBaseRC = PDMIBASE_QUERY_INTERFACE(pState->pDrvBase, PDMIBASERC);
6769 pState->pDrvRC = pBaseRC ? pBaseRC->pfnQueryInterface(pBaseRC, PDMINETWORKUP_IID) : NIL_RTR0PTR;
6770 }
6771 }
6772 else if ( rc == VERR_PDM_NO_ATTACHED_DRIVER
6773 || rc == VERR_PDM_CFG_MISSING_DRIVER_NAME)
6774 {
6775 /* This should never happen because this function is not called
6776 * if there is no driver to attach! */
6777 Log(("%s No attached driver!\n", INSTANCE(pState)));
6778 }
6779
6780 /*
6781 * Temporary set the link down if it was up so that the guest
6782 * will know that we have change the configuration of the
6783 * network card
6784 */
6785 if ((STATUS & STATUS_LU) && RT_SUCCESS(rc))
6786 {
6787 STATUS &= ~STATUS_LU;
6788 Phy::setLinkStatus(&pState->phy, false);
6789 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, ICR_LSC);
6790 /* Restore the link back in 5 seconds (default). */
6791 e1kBringLinkUpDelayed(pState);
6792 }
6793
6794 PDMCritSectLeave(&pState->cs);
6795 return rc;
6796
6797}
6798
6799/**
6800 * @copydoc FNPDMDEVPOWEROFF
6801 */
6802static DECLCALLBACK(void) e1kPowerOff(PPDMDEVINS pDevIns)
6803{
6804 /* Poke thread waiting for buffer space. */
6805 e1kWakeupReceive(pDevIns);
6806}
6807
6808/**
6809 * @copydoc FNPDMDEVRESET
6810 */
6811static DECLCALLBACK(void) e1kReset(PPDMDEVINS pDevIns)
6812{
6813 E1KSTATE *pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6814#ifdef E1K_TX_DELAY
6815 e1kCancelTimer(pState, pState->CTX_SUFF(pTXDTimer));
6816#endif /* E1K_TX_DELAY */
6817 e1kCancelTimer(pState, pState->CTX_SUFF(pIntTimer));
6818 e1kCancelTimer(pState, pState->CTX_SUFF(pLUTimer));
6819 e1kXmitFreeBuf(pState);
6820 pState->u16TxPktLen = 0;
6821 pState->fIPcsum = false;
6822 pState->fTCPcsum = false;
6823 pState->fIntMaskUsed = false;
6824 pState->fDelayInts = false;
6825 pState->fLocked = false;
6826 pState->u64AckedAt = 0;
6827 e1kHardReset(pState);
6828}
6829
6830/**
6831 * @copydoc FNPDMDEVSUSPEND
6832 */
6833static DECLCALLBACK(void) e1kSuspend(PPDMDEVINS pDevIns)
6834{
6835 /* Poke thread waiting for buffer space. */
6836 e1kWakeupReceive(pDevIns);
6837}
6838
6839/**
6840 * Device relocation callback.
6841 *
6842 * When this callback is called the device instance data, and if the
6843 * device have a GC component, is being relocated, or/and the selectors
6844 * have been changed. The device must use the chance to perform the
6845 * necessary pointer relocations and data updates.
6846 *
6847 * Before the GC code is executed the first time, this function will be
6848 * called with a 0 delta so GC pointer calculations can be one in one place.
6849 *
6850 * @param pDevIns Pointer to the device instance.
6851 * @param offDelta The relocation delta relative to the old location.
6852 *
6853 * @remark A relocation CANNOT fail.
6854 */
6855static DECLCALLBACK(void) e1kRelocate(PPDMDEVINS pDevIns, RTGCINTPTR offDelta)
6856{
6857 E1KSTATE* pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6858 pState->pDevInsRC = PDMDEVINS_2_RCPTR(pDevIns);
6859 pState->pTxQueueRC = PDMQueueRCPtr(pState->pTxQueueR3);
6860 pState->pCanRxQueueRC = PDMQueueRCPtr(pState->pCanRxQueueR3);
6861#ifdef E1K_USE_RX_TIMERS
6862 pState->pRIDTimerRC = TMTimerRCPtr(pState->pRIDTimerR3);
6863 pState->pRADTimerRC = TMTimerRCPtr(pState->pRADTimerR3);
6864#endif /* E1K_USE_RX_TIMERS */
6865#ifdef E1K_USE_TX_TIMERS
6866 pState->pTIDTimerRC = TMTimerRCPtr(pState->pTIDTimerR3);
6867# ifndef E1K_NO_TAD
6868 pState->pTADTimerRC = TMTimerRCPtr(pState->pTADTimerR3);
6869# endif /* E1K_NO_TAD */
6870#endif /* E1K_USE_TX_TIMERS */
6871#ifdef E1K_TX_DELAY
6872 pState->pTXDTimerRC = TMTimerRCPtr(pState->pTXDTimerR3);
6873#endif /* E1K_TX_DELAY */
6874 pState->pIntTimerRC = TMTimerRCPtr(pState->pIntTimerR3);
6875 pState->pLUTimerRC = TMTimerRCPtr(pState->pLUTimerR3);
6876}
6877
6878/**
6879 * Destruct a device instance.
6880 *
6881 * We need to free non-VM resources only.
6882 *
6883 * @returns VBox status.
6884 * @param pDevIns The device instance data.
6885 * @thread EMT
6886 */
6887static DECLCALLBACK(int) e1kDestruct(PPDMDEVINS pDevIns)
6888{
6889 E1KSTATE* pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6890 PDMDEV_CHECK_VERSIONS_RETURN_QUIET(pDevIns);
6891
6892 e1kDumpState(pState);
6893 E1kLog(("%s Destroying instance\n", INSTANCE(pState)));
6894 if (PDMCritSectIsInitialized(&pState->cs))
6895 {
6896 if (pState->hEventMoreRxDescAvail != NIL_RTSEMEVENT)
6897 {
6898 RTSemEventSignal(pState->hEventMoreRxDescAvail);
6899 RTSemEventDestroy(pState->hEventMoreRxDescAvail);
6900 pState->hEventMoreRxDescAvail = NIL_RTSEMEVENT;
6901 }
6902#ifdef E1K_WITH_TX_CS
6903 PDMR3CritSectDelete(&pState->csTx);
6904#endif /* E1K_WITH_TX_CS */
6905 PDMR3CritSectDelete(&pState->csRx);
6906 PDMR3CritSectDelete(&pState->cs);
6907 }
6908 return VINF_SUCCESS;
6909}
6910
6911/**
6912 * Dump receive descriptor to debugger info buffer.
6913 *
6914 * @param pState The device state structure.
6915 * @param pHlp The output helpers.
6916 * @param addr Physical address of the descriptor in guest context.
6917 * @param pDesc Pointer to the descriptor.
6918 */
6919static void e1kRDescInfo(E1KSTATE* pState, PCDBGFINFOHLP pHlp, RTGCPHYS addr, E1KRXDESC* pDesc)
6920{
6921 pHlp->pfnPrintf(pHlp, "%RGp: Address=%16LX Length=%04X Csum=%04X\n",
6922 addr, pDesc->u64BufAddr, pDesc->u16Length, pDesc->u16Checksum);
6923 pHlp->pfnPrintf(pHlp, " STA: %s %s %s %s %s %s %s ERR: %s %s %s %s SPECIAL: %s VLAN=%03x PRI=%x\n",
6924 pDesc->status.fPIF ? "PIF" : "pif",
6925 pDesc->status.fIPCS ? "IPCS" : "ipcs",
6926 pDesc->status.fTCPCS ? "TCPCS" : "tcpcs",
6927 pDesc->status.fVP ? "VP" : "vp",
6928 pDesc->status.fIXSM ? "IXSM" : "ixsm",
6929 pDesc->status.fEOP ? "EOP" : "eop",
6930 pDesc->status.fDD ? "DD" : "dd",
6931 pDesc->status.fRXE ? "RXE" : "rxe",
6932 pDesc->status.fIPE ? "IPE" : "ipe",
6933 pDesc->status.fTCPE ? "TCPE" : "tcpe",
6934 pDesc->status.fCE ? "CE" : "ce",
6935 E1K_SPEC_CFI(pDesc->status.u16Special) ? "CFI" :"cfi",
6936 E1K_SPEC_VLAN(pDesc->status.u16Special),
6937 E1K_SPEC_PRI(pDesc->status.u16Special));
6938}
6939
6940/**
6941 * Dump transmit descriptor to debugger info buffer.
6942 *
6943 * @param pState The device state structure.
6944 * @param pHlp The output helpers.
6945 * @param addr Physical address of the descriptor in guest context.
6946 * @param pDesc Pointer to descriptor union.
6947 */
6948static void e1kTDescInfo(E1KSTATE* pState, PCDBGFINFOHLP pHlp, RTGCPHYS addr, E1KTXDESC* pDesc)
6949{
6950 switch (e1kGetDescType(pDesc))
6951 {
6952 case E1K_DTYP_CONTEXT:
6953 pHlp->pfnPrintf(pHlp, "%RGp: Type=Context\n", addr);
6954 pHlp->pfnPrintf(pHlp, " IPCSS=%02X IPCSO=%02X IPCSE=%04X TUCSS=%02X TUCSO=%02X TUCSE=%04X\n",
6955 pDesc->context.ip.u8CSS, pDesc->context.ip.u8CSO, pDesc->context.ip.u16CSE,
6956 pDesc->context.tu.u8CSS, pDesc->context.tu.u8CSO, pDesc->context.tu.u16CSE);
6957 pHlp->pfnPrintf(pHlp, " TUCMD:%s%s%s %s %s PAYLEN=%04x HDRLEN=%04x MSS=%04x STA: %s\n",
6958 pDesc->context.dw2.fIDE ? " IDE":"",
6959 pDesc->context.dw2.fRS ? " RS" :"",
6960 pDesc->context.dw2.fTSE ? " TSE":"",
6961 pDesc->context.dw2.fIP ? "IPv4":"IPv6",
6962 pDesc->context.dw2.fTCP ? "TCP":"UDP",
6963 pDesc->context.dw2.u20PAYLEN,
6964 pDesc->context.dw3.u8HDRLEN,
6965 pDesc->context.dw3.u16MSS,
6966 pDesc->context.dw3.fDD?"DD":"");
6967 break;
6968 case E1K_DTYP_DATA:
6969 pHlp->pfnPrintf(pHlp, "%RGp: Type=Data Address=%16LX DTALEN=%05X\n",
6970 addr,
6971 pDesc->data.u64BufAddr,
6972 pDesc->data.cmd.u20DTALEN);
6973 pHlp->pfnPrintf(pHlp, " DCMD:%s%s%s%s%s%s%s STA:%s%s%s POPTS:%s%s SPECIAL:%s VLAN=%03x PRI=%x\n",
6974 pDesc->data.cmd.fIDE ? " IDE" :"",
6975 pDesc->data.cmd.fVLE ? " VLE" :"",
6976 pDesc->data.cmd.fRPS ? " RPS" :"",
6977 pDesc->data.cmd.fRS ? " RS" :"",
6978 pDesc->data.cmd.fTSE ? " TSE" :"",
6979 pDesc->data.cmd.fIFCS? " IFCS":"",
6980 pDesc->data.cmd.fEOP ? " EOP" :"",
6981 pDesc->data.dw3.fDD ? " DD" :"",
6982 pDesc->data.dw3.fEC ? " EC" :"",
6983 pDesc->data.dw3.fLC ? " LC" :"",
6984 pDesc->data.dw3.fTXSM? " TXSM":"",
6985 pDesc->data.dw3.fIXSM? " IXSM":"",
6986 E1K_SPEC_CFI(pDesc->data.dw3.u16Special) ? "CFI" :"cfi",
6987 E1K_SPEC_VLAN(pDesc->data.dw3.u16Special),
6988 E1K_SPEC_PRI(pDesc->data.dw3.u16Special));
6989 break;
6990 case E1K_DTYP_LEGACY:
6991 pHlp->pfnPrintf(pHlp, "%RGp: Type=Legacy Address=%16LX DTALEN=%05X\n",
6992 addr,
6993 pDesc->data.u64BufAddr,
6994 pDesc->legacy.cmd.u16Length);
6995 pHlp->pfnPrintf(pHlp, " CMD:%s%s%s%s%s%s%s STA:%s%s%s CSO=%02x CSS=%02x SPECIAL:%s VLAN=%03x PRI=%x\n",
6996 pDesc->legacy.cmd.fIDE ? " IDE" :"",
6997 pDesc->legacy.cmd.fVLE ? " VLE" :"",
6998 pDesc->legacy.cmd.fRPS ? " RPS" :"",
6999 pDesc->legacy.cmd.fRS ? " RS" :"",
7000 pDesc->legacy.cmd.fIC ? " IC" :"",
7001 pDesc->legacy.cmd.fIFCS? " IFCS":"",
7002 pDesc->legacy.cmd.fEOP ? " EOP" :"",
7003 pDesc->legacy.dw3.fDD ? " DD" :"",
7004 pDesc->legacy.dw3.fEC ? " EC" :"",
7005 pDesc->legacy.dw3.fLC ? " LC" :"",
7006 pDesc->legacy.cmd.u8CSO,
7007 pDesc->legacy.dw3.u8CSS,
7008 E1K_SPEC_CFI(pDesc->legacy.dw3.u16Special) ? "CFI" :"cfi",
7009 E1K_SPEC_VLAN(pDesc->legacy.dw3.u16Special),
7010 E1K_SPEC_PRI(pDesc->legacy.dw3.u16Special));
7011 break;
7012 default:
7013 pHlp->pfnPrintf(pHlp, "%RGp: Invalid Transmit Descriptor\n", addr);
7014 break;
7015 }
7016}
7017
7018/**
7019 * Status info callback.
7020 *
7021 * @param pDevIns The device instance.
7022 * @param pHlp The output helpers.
7023 * @param pszArgs The arguments.
7024 */
7025static DECLCALLBACK(void) e1kInfo(PPDMDEVINS pDevIns, PCDBGFINFOHLP pHlp, const char *pszArgs)
7026{
7027 E1KSTATE* pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7028 unsigned i;
7029 // bool fRcvRing = false;
7030 // bool fXmtRing = false;
7031
7032 /*
7033 * Parse args.
7034 if (pszArgs)
7035 {
7036 fRcvRing = strstr(pszArgs, "verbose") || strstr(pszArgs, "rcv");
7037 fXmtRing = strstr(pszArgs, "verbose") || strstr(pszArgs, "xmt");
7038 }
7039 */
7040
7041 /*
7042 * Show info.
7043 */
7044 pHlp->pfnPrintf(pHlp, "E1000 #%d: port=%RTiop mmio=%RGp mac-cfg=%RTmac %s%s%s\n",
7045 pDevIns->iInstance, pState->addrIOPort, pState->addrMMReg,
7046 &pState->macConfigured, g_Chips[pState->eChip].pcszName,
7047 pState->fGCEnabled ? " GC" : "", pState->fR0Enabled ? " R0" : "");
7048
7049 e1kCsEnter(pState, VERR_INTERNAL_ERROR); /* Not sure why but PCNet does it */
7050
7051 for (i = 0; i < E1K_NUM_OF_32BIT_REGS; ++i)
7052 pHlp->pfnPrintf(pHlp, "%8.8s = %08x\n", s_e1kRegMap[i].abbrev, pState->auRegs[i]);
7053
7054 for (i = 0; i < RT_ELEMENTS(pState->aRecAddr.array); i++)
7055 {
7056 E1KRAELEM* ra = pState->aRecAddr.array + i;
7057 if (ra->ctl & RA_CTL_AV)
7058 {
7059 const char *pcszTmp;
7060 switch (ra->ctl & RA_CTL_AS)
7061 {
7062 case 0: pcszTmp = "DST"; break;
7063 case 1: pcszTmp = "SRC"; break;
7064 default: pcszTmp = "reserved";
7065 }
7066 pHlp->pfnPrintf(pHlp, "RA%02d: %s %RTmac\n", i, pcszTmp, ra->addr);
7067 }
7068 }
7069 unsigned cDescs = RDLEN / sizeof(E1KRXDESC);
7070 pHlp->pfnPrintf(pHlp, "\n-- Receive Descriptors (%d total) --\n", cDescs);
7071 for (i = 0; i < cDescs; ++i)
7072 {
7073 E1KRXDESC desc;
7074 PDMDevHlpPhysRead(pDevIns, e1kDescAddr(RDBAH, RDBAL, i),
7075 &desc, sizeof(desc));
7076 e1kRDescInfo(pState, pHlp, e1kDescAddr(RDBAH, RDBAL, i), &desc);
7077 }
7078 pHlp->pfnPrintf(pHlp, "\n-- Receive Descriptors in Cache (at %d (RDH %d)/ fetched %d / max %d) --\n",
7079 pState->iRxDCurrent, RDH, pState->nRxDFetched, E1K_RXD_CACHE_SIZE);
7080 int rdh = RDH;
7081 for (i = pState->iRxDCurrent; i < pState->nRxDFetched; ++i)
7082 e1kRDescInfo(pState, pHlp, e1kDescAddr(RDBAH, RDBAL, rdh++ % cDescs), &pState->aRxDescriptors[i]);
7083
7084 cDescs = TDLEN / sizeof(E1KTXDESC);
7085 pHlp->pfnPrintf(pHlp, "\n-- Transmit Descriptors (%d total) --\n", cDescs);
7086 for (i = 0; i < cDescs; ++i)
7087 {
7088 E1KTXDESC desc;
7089 PDMDevHlpPhysRead(pDevIns, e1kDescAddr(TDBAH, TDBAL, i),
7090 &desc, sizeof(desc));
7091 e1kTDescInfo(pState, pHlp, e1kDescAddr(TDBAH, TDBAL, i), &desc);
7092 }
7093
7094
7095#ifdef E1K_INT_STATS
7096 pHlp->pfnPrintf(pHlp, "Interrupt attempts: %d\n", pState->uStatIntTry);
7097 pHlp->pfnPrintf(pHlp, "Interrupts raised : %d\n", pState->uStatInt);
7098 pHlp->pfnPrintf(pHlp, "Interrupts lowered: %d\n", pState->uStatIntLower);
7099 pHlp->pfnPrintf(pHlp, "Interrupts delayed: %d\n", pState->uStatIntDly);
7100 pHlp->pfnPrintf(pHlp, "Disabled delayed: %d\n", pState->uStatDisDly);
7101 pHlp->pfnPrintf(pHlp, "Interrupts skipped: %d\n", pState->uStatIntSkip);
7102 pHlp->pfnPrintf(pHlp, "Masked interrupts : %d\n", pState->uStatIntMasked);
7103 pHlp->pfnPrintf(pHlp, "Early interrupts : %d\n", pState->uStatIntEarly);
7104 pHlp->pfnPrintf(pHlp, "Late interrupts : %d\n", pState->uStatIntLate);
7105 pHlp->pfnPrintf(pHlp, "Lost interrupts : %d\n", pState->iStatIntLost);
7106 pHlp->pfnPrintf(pHlp, "Interrupts by RX : %d\n", pState->uStatIntRx);
7107 pHlp->pfnPrintf(pHlp, "Interrupts by TX : %d\n", pState->uStatIntTx);
7108 pHlp->pfnPrintf(pHlp, "Interrupts by ICS : %d\n", pState->uStatIntICS);
7109 pHlp->pfnPrintf(pHlp, "Interrupts by RDTR: %d\n", pState->uStatIntRDTR);
7110 pHlp->pfnPrintf(pHlp, "Interrupts by RDMT: %d\n", pState->uStatIntRXDMT0);
7111 pHlp->pfnPrintf(pHlp, "Interrupts by TXQE: %d\n", pState->uStatIntTXQE);
7112 pHlp->pfnPrintf(pHlp, "TX int delay asked: %d\n", pState->uStatTxIDE);
7113 pHlp->pfnPrintf(pHlp, "TX delayed: %d\n", pState->uStatTxDelayed);
7114 pHlp->pfnPrintf(pHlp, "TX delayed expired: %d\n", pState->uStatTxDelayExp);
7115 pHlp->pfnPrintf(pHlp, "TX no report asked: %d\n", pState->uStatTxNoRS);
7116 pHlp->pfnPrintf(pHlp, "TX abs timer expd : %d\n", pState->uStatTAD);
7117 pHlp->pfnPrintf(pHlp, "TX int timer expd : %d\n", pState->uStatTID);
7118 pHlp->pfnPrintf(pHlp, "RX abs timer expd : %d\n", pState->uStatRAD);
7119 pHlp->pfnPrintf(pHlp, "RX int timer expd : %d\n", pState->uStatRID);
7120 pHlp->pfnPrintf(pHlp, "TX CTX descriptors: %d\n", pState->uStatDescCtx);
7121 pHlp->pfnPrintf(pHlp, "TX DAT descriptors: %d\n", pState->uStatDescDat);
7122 pHlp->pfnPrintf(pHlp, "TX LEG descriptors: %d\n", pState->uStatDescLeg);
7123 pHlp->pfnPrintf(pHlp, "Received frames : %d\n", pState->uStatRxFrm);
7124 pHlp->pfnPrintf(pHlp, "Transmitted frames: %d\n", pState->uStatTxFrm);
7125 pHlp->pfnPrintf(pHlp, "TX frames up to 1514: %d\n", pState->uStatTx1514);
7126 pHlp->pfnPrintf(pHlp, "TX frames up to 2962: %d\n", pState->uStatTx2962);
7127 pHlp->pfnPrintf(pHlp, "TX frames up to 4410: %d\n", pState->uStatTx4410);
7128 pHlp->pfnPrintf(pHlp, "TX frames up to 5858: %d\n", pState->uStatTx5858);
7129 pHlp->pfnPrintf(pHlp, "TX frames up to 7306: %d\n", pState->uStatTx7306);
7130 pHlp->pfnPrintf(pHlp, "TX frames up to 8754: %d\n", pState->uStatTx8754);
7131 pHlp->pfnPrintf(pHlp, "TX frames up to 16384: %d\n", pState->uStatTx16384);
7132 pHlp->pfnPrintf(pHlp, "TX frames up to 32768: %d\n", pState->uStatTx32768);
7133 pHlp->pfnPrintf(pHlp, "Larger TX frames : %d\n", pState->uStatTxLarge);
7134#endif /* E1K_INT_STATS */
7135
7136 e1kCsLeave(pState);
7137}
7138
7139/**
7140 * Sets 8-bit register in PCI configuration space.
7141 * @param refPciDev The PCI device.
7142 * @param uOffset The register offset.
7143 * @param u16Value The value to store in the register.
7144 * @thread EMT
7145 */
7146DECLINLINE(void) e1kPCICfgSetU8(PCIDEVICE& refPciDev, uint32_t uOffset, uint8_t u8Value)
7147{
7148 Assert(uOffset < sizeof(refPciDev.config));
7149 refPciDev.config[uOffset] = u8Value;
7150}
7151
7152/**
7153 * Sets 16-bit register in PCI configuration space.
7154 * @param refPciDev The PCI device.
7155 * @param uOffset The register offset.
7156 * @param u16Value The value to store in the register.
7157 * @thread EMT
7158 */
7159DECLINLINE(void) e1kPCICfgSetU16(PCIDEVICE& refPciDev, uint32_t uOffset, uint16_t u16Value)
7160{
7161 Assert(uOffset+sizeof(u16Value) <= sizeof(refPciDev.config));
7162 *(uint16_t*)&refPciDev.config[uOffset] = u16Value;
7163}
7164
7165/**
7166 * Sets 32-bit register in PCI configuration space.
7167 * @param refPciDev The PCI device.
7168 * @param uOffset The register offset.
7169 * @param u32Value The value to store in the register.
7170 * @thread EMT
7171 */
7172DECLINLINE(void) e1kPCICfgSetU32(PCIDEVICE& refPciDev, uint32_t uOffset, uint32_t u32Value)
7173{
7174 Assert(uOffset+sizeof(u32Value) <= sizeof(refPciDev.config));
7175 *(uint32_t*)&refPciDev.config[uOffset] = u32Value;
7176}
7177
7178/**
7179 * Set PCI configuration space registers.
7180 *
7181 * @param pci Reference to PCI device structure.
7182 * @thread EMT
7183 */
7184static DECLCALLBACK(void) e1kConfigurePCI(PCIDEVICE& pci, E1KCHIP eChip)
7185{
7186 Assert(eChip < RT_ELEMENTS(g_Chips));
7187 /* Configure PCI Device, assume 32-bit mode ******************************/
7188 PCIDevSetVendorId(&pci, g_Chips[eChip].uPCIVendorId);
7189 PCIDevSetDeviceId(&pci, g_Chips[eChip].uPCIDeviceId);
7190 e1kPCICfgSetU16(pci, VBOX_PCI_SUBSYSTEM_VENDOR_ID, g_Chips[eChip].uPCISubsystemVendorId);
7191 e1kPCICfgSetU16(pci, VBOX_PCI_SUBSYSTEM_ID, g_Chips[eChip].uPCISubsystemId);
7192
7193 e1kPCICfgSetU16(pci, VBOX_PCI_COMMAND, 0x0000);
7194 /* DEVSEL Timing (medium device), 66 MHz Capable, New capabilities */
7195 e1kPCICfgSetU16(pci, VBOX_PCI_STATUS,
7196 VBOX_PCI_STATUS_DEVSEL_MEDIUM | VBOX_PCI_STATUS_CAP_LIST | VBOX_PCI_STATUS_66MHZ);
7197 /* Stepping A2 */
7198 e1kPCICfgSetU8( pci, VBOX_PCI_REVISION_ID, 0x02);
7199 /* Ethernet adapter */
7200 e1kPCICfgSetU8( pci, VBOX_PCI_CLASS_PROG, 0x00);
7201 e1kPCICfgSetU16(pci, VBOX_PCI_CLASS_DEVICE, 0x0200);
7202 /* normal single function Ethernet controller */
7203 e1kPCICfgSetU8( pci, VBOX_PCI_HEADER_TYPE, 0x00);
7204 /* Memory Register Base Address */
7205 e1kPCICfgSetU32(pci, VBOX_PCI_BASE_ADDRESS_0, 0x00000000);
7206 /* Memory Flash Base Address */
7207 e1kPCICfgSetU32(pci, VBOX_PCI_BASE_ADDRESS_1, 0x00000000);
7208 /* IO Register Base Address */
7209 e1kPCICfgSetU32(pci, VBOX_PCI_BASE_ADDRESS_2, 0x00000001);
7210 /* Expansion ROM Base Address */
7211 e1kPCICfgSetU32(pci, VBOX_PCI_ROM_ADDRESS, 0x00000000);
7212 /* Capabilities Pointer */
7213 e1kPCICfgSetU8( pci, VBOX_PCI_CAPABILITY_LIST, 0xDC);
7214 /* Interrupt Pin: INTA# */
7215 e1kPCICfgSetU8( pci, VBOX_PCI_INTERRUPT_PIN, 0x01);
7216 /* Max_Lat/Min_Gnt: very high priority and time slice */
7217 e1kPCICfgSetU8( pci, VBOX_PCI_MIN_GNT, 0xFF);
7218 e1kPCICfgSetU8( pci, VBOX_PCI_MAX_LAT, 0x00);
7219
7220 /* PCI Power Management Registers ****************************************/
7221 /* Capability ID: PCI Power Management Registers */
7222 e1kPCICfgSetU8( pci, 0xDC, VBOX_PCI_CAP_ID_PM);
7223 /* Next Item Pointer: PCI-X */
7224 e1kPCICfgSetU8( pci, 0xDC + 1, 0xE4);
7225 /* Power Management Capabilities: PM disabled, DSI */
7226 e1kPCICfgSetU16(pci, 0xDC + 2,
7227 0x0002 | VBOX_PCI_PM_CAP_DSI);
7228 /* Power Management Control / Status Register: PM disabled */
7229 e1kPCICfgSetU16(pci, 0xDC + 4, 0x0000);
7230 /* PMCSR_BSE Bridge Support Extensions: Not supported */
7231 e1kPCICfgSetU8( pci, 0xDC + 6, 0x00);
7232 /* Data Register: PM disabled, always 0 */
7233 e1kPCICfgSetU8( pci, 0xDC + 7, 0x00);
7234
7235 /* PCI-X Configuration Registers *****************************************/
7236 /* Capability ID: PCI-X Configuration Registers */
7237 e1kPCICfgSetU8( pci, 0xE4, VBOX_PCI_CAP_ID_PCIX);
7238#ifdef E1K_WITH_MSI
7239 e1kPCICfgSetU8( pci, 0xE4 + 1, 0x80);
7240#else
7241 /* Next Item Pointer: None (Message Signalled Interrupts are disabled) */
7242 e1kPCICfgSetU8( pci, 0xE4 + 1, 0x00);
7243#endif
7244 /* PCI-X Command: Enable Relaxed Ordering */
7245 e1kPCICfgSetU16(pci, 0xE4 + 2, VBOX_PCI_X_CMD_ERO);
7246 /* PCI-X Status: 32-bit, 66MHz*/
7247 /// @todo: is this value really correct? fff8 doesn't look like actual PCI address
7248 e1kPCICfgSetU32(pci, 0xE4 + 4, 0x0040FFF8);
7249}
7250
7251/**
7252 * @interface_method_impl{PDMDEVREG,pfnConstruct}
7253 */
7254static DECLCALLBACK(int) e1kConstruct(PPDMDEVINS pDevIns, int iInstance, PCFGMNODE pCfg)
7255{
7256 E1KSTATE* pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7257 int rc;
7258 PDMDEV_CHECK_VERSIONS_RETURN(pDevIns);
7259
7260 /* Init handles and log related stuff. */
7261 RTStrPrintf(pState->szInstance, sizeof(pState->szInstance), "E1000#%d", iInstance);
7262 E1kLog(("%s Constructing new instance sizeof(E1KRXDESC)=%d\n", INSTANCE(pState), sizeof(E1KRXDESC)));
7263 pState->hEventMoreRxDescAvail = NIL_RTSEMEVENT;
7264
7265 /*
7266 * Validate configuration.
7267 */
7268 if (!CFGMR3AreValuesValid(pCfg, "MAC\0" "CableConnected\0" "AdapterType\0"
7269 "LineSpeed\0" "GCEnabled\0" "R0Enabled\0"
7270 "EthernetCRC\0" "LinkUpDelay\0"))
7271 return PDMDEV_SET_ERROR(pDevIns, VERR_PDM_DEVINS_UNKNOWN_CFG_VALUES,
7272 N_("Invalid configuration for E1000 device"));
7273
7274 /** @todo: LineSpeed unused! */
7275
7276 pState->fR0Enabled = true;
7277 pState->fGCEnabled = true;
7278 pState->fEthernetCRC = true;
7279
7280 /* Get config params */
7281 rc = CFGMR3QueryBytes(pCfg, "MAC", pState->macConfigured.au8,
7282 sizeof(pState->macConfigured.au8));
7283 if (RT_FAILURE(rc))
7284 return PDMDEV_SET_ERROR(pDevIns, rc,
7285 N_("Configuration error: Failed to get MAC address"));
7286 rc = CFGMR3QueryBool(pCfg, "CableConnected", &pState->fCableConnected);
7287 if (RT_FAILURE(rc))
7288 return PDMDEV_SET_ERROR(pDevIns, rc,
7289 N_("Configuration error: Failed to get the value of 'CableConnected'"));
7290 rc = CFGMR3QueryU32(pCfg, "AdapterType", (uint32_t*)&pState->eChip);
7291 if (RT_FAILURE(rc))
7292 return PDMDEV_SET_ERROR(pDevIns, rc,
7293 N_("Configuration error: Failed to get the value of 'AdapterType'"));
7294 Assert(pState->eChip <= E1K_CHIP_82545EM);
7295 rc = CFGMR3QueryBoolDef(pCfg, "GCEnabled", &pState->fGCEnabled, true);
7296 if (RT_FAILURE(rc))
7297 return PDMDEV_SET_ERROR(pDevIns, rc,
7298 N_("Configuration error: Failed to get the value of 'GCEnabled'"));
7299
7300 rc = CFGMR3QueryBoolDef(pCfg, "R0Enabled", &pState->fR0Enabled, true);
7301 if (RT_FAILURE(rc))
7302 return PDMDEV_SET_ERROR(pDevIns, rc,
7303 N_("Configuration error: Failed to get the value of 'R0Enabled'"));
7304
7305 rc = CFGMR3QueryBoolDef(pCfg, "EthernetCRC", &pState->fEthernetCRC, true);
7306 if (RT_FAILURE(rc))
7307 return PDMDEV_SET_ERROR(pDevIns, rc,
7308 N_("Configuration error: Failed to get the value of 'EthernetCRC'"));
7309 rc = CFGMR3QueryU32Def(pCfg, "LinkUpDelay", (uint32_t*)&pState->cMsLinkUpDelay, 5000); /* ms */
7310 if (RT_FAILURE(rc))
7311 return PDMDEV_SET_ERROR(pDevIns, rc,
7312 N_("Configuration error: Failed to get the value of 'LinkUpDelay'"));
7313 Assert(pState->cMsLinkUpDelay <= 300000); /* less than 5 minutes */
7314 if (pState->cMsLinkUpDelay > 5000)
7315 {
7316 LogRel(("%s WARNING! Link up delay is set to %u seconds!\n",
7317 INSTANCE(pState), pState->cMsLinkUpDelay / 1000));
7318 }
7319 else if (pState->cMsLinkUpDelay == 0)
7320 {
7321 LogRel(("%s WARNING! Link up delay is disabled!\n", INSTANCE(pState)));
7322 }
7323
7324 E1kLog(("%s Chip=%s LinkUpDelay=%ums EthernetCRC=%s\n", INSTANCE(pState),
7325 g_Chips[pState->eChip].pcszName, pState->cMsLinkUpDelay,
7326 pState->fEthernetCRC ? "on" : "off"));
7327
7328 /* Initialize state structure */
7329 pState->pDevInsR3 = pDevIns;
7330 pState->pDevInsR0 = PDMDEVINS_2_R0PTR(pDevIns);
7331 pState->pDevInsRC = PDMDEVINS_2_RCPTR(pDevIns);
7332 pState->u16TxPktLen = 0;
7333 pState->fIPcsum = false;
7334 pState->fTCPcsum = false;
7335 pState->fIntMaskUsed = false;
7336 pState->fDelayInts = false;
7337 pState->fLocked = false;
7338 pState->u64AckedAt = 0;
7339 pState->led.u32Magic = PDMLED_MAGIC;
7340 pState->u32PktNo = 1;
7341
7342#ifdef E1K_INT_STATS
7343 pState->uStatInt = 0;
7344 pState->uStatIntTry = 0;
7345 pState->uStatIntLower = 0;
7346 pState->uStatIntDly = 0;
7347 pState->uStatDisDly = 0;
7348 pState->iStatIntLost = 0;
7349 pState->iStatIntLostOne = 0;
7350 pState->uStatIntLate = 0;
7351 pState->uStatIntMasked = 0;
7352 pState->uStatIntEarly = 0;
7353 pState->uStatIntRx = 0;
7354 pState->uStatIntTx = 0;
7355 pState->uStatIntICS = 0;
7356 pState->uStatIntRDTR = 0;
7357 pState->uStatIntRXDMT0 = 0;
7358 pState->uStatIntTXQE = 0;
7359 pState->uStatTxNoRS = 0;
7360 pState->uStatTxIDE = 0;
7361 pState->uStatTxDelayed = 0;
7362 pState->uStatTxDelayExp = 0;
7363 pState->uStatTAD = 0;
7364 pState->uStatTID = 0;
7365 pState->uStatRAD = 0;
7366 pState->uStatRID = 0;
7367 pState->uStatRxFrm = 0;
7368 pState->uStatTxFrm = 0;
7369 pState->uStatDescCtx = 0;
7370 pState->uStatDescDat = 0;
7371 pState->uStatDescLeg = 0;
7372 pState->uStatTx1514 = 0;
7373 pState->uStatTx2962 = 0;
7374 pState->uStatTx4410 = 0;
7375 pState->uStatTx5858 = 0;
7376 pState->uStatTx7306 = 0;
7377 pState->uStatTx8754 = 0;
7378 pState->uStatTx16384 = 0;
7379 pState->uStatTx32768 = 0;
7380 pState->uStatTxLarge = 0;
7381 pState->uStatMaxTxDelay = 0;
7382#endif /* E1K_INT_STATS */
7383
7384 /* Interfaces */
7385 pState->IBase.pfnQueryInterface = e1kQueryInterface;
7386
7387 pState->INetworkDown.pfnWaitReceiveAvail = e1kNetworkDown_WaitReceiveAvail;
7388 pState->INetworkDown.pfnReceive = e1kNetworkDown_Receive;
7389 pState->INetworkDown.pfnXmitPending = e1kNetworkDown_XmitPending;
7390
7391 pState->ILeds.pfnQueryStatusLed = e1kQueryStatusLed;
7392
7393 pState->INetworkConfig.pfnGetMac = e1kGetMac;
7394 pState->INetworkConfig.pfnGetLinkState = e1kGetLinkState;
7395 pState->INetworkConfig.pfnSetLinkState = e1kSetLinkState;
7396
7397 /* Initialize the EEPROM */
7398 pState->eeprom.init(pState->macConfigured);
7399
7400 /* Initialize internal PHY */
7401 Phy::init(&pState->phy, iInstance,
7402 pState->eChip == E1K_CHIP_82543GC?
7403 PHY_EPID_M881000 : PHY_EPID_M881011);
7404 Phy::setLinkStatus(&pState->phy, pState->fCableConnected);
7405
7406 rc = PDMDevHlpSSMRegisterEx(pDevIns, E1K_SAVEDSTATE_VERSION, sizeof(E1KSTATE), NULL,
7407 NULL, e1kLiveExec, NULL,
7408 e1kSavePrep, e1kSaveExec, NULL,
7409 e1kLoadPrep, e1kLoadExec, e1kLoadDone);
7410 if (RT_FAILURE(rc))
7411 return rc;
7412
7413 /* Initialize critical section */
7414 rc = PDMDevHlpCritSectInit(pDevIns, &pState->cs, RT_SRC_POS, "%s", pState->szInstance);
7415 if (RT_FAILURE(rc))
7416 return rc;
7417 rc = PDMDevHlpCritSectInit(pDevIns, &pState->csRx, RT_SRC_POS, "%sRX", pState->szInstance);
7418 if (RT_FAILURE(rc))
7419 return rc;
7420#ifdef E1K_WITH_TX_CS
7421 rc = PDMDevHlpCritSectInit(pDevIns, &pState->csTx, RT_SRC_POS, "%sTX", pState->szInstance);
7422 if (RT_FAILURE(rc))
7423 return rc;
7424#endif /* E1K_WITH_TX_CS */
7425
7426 /* Set PCI config registers */
7427 e1kConfigurePCI(pState->pciDevice, pState->eChip);
7428 /* Register PCI device */
7429 rc = PDMDevHlpPCIRegister(pDevIns, &pState->pciDevice);
7430 if (RT_FAILURE(rc))
7431 return rc;
7432
7433#ifdef E1K_WITH_MSI
7434 PDMMSIREG aMsiReg;
7435 aMsiReg.cMsiVectors = 1;
7436 aMsiReg.iMsiCapOffset = 0x80;
7437 aMsiReg.iMsiNextOffset = 0x0;
7438 aMsiReg.fMsi64bit = false;
7439 rc = PDMDevHlpPCIRegisterMsi(pDevIns, &aMsiReg);
7440 AssertRC(rc);
7441 if (RT_FAILURE (rc))
7442 return rc;
7443#endif
7444
7445
7446 /* Map our registers to memory space (region 0, see e1kConfigurePCI)*/
7447 rc = PDMDevHlpPCIIORegionRegister(pDevIns, 0, E1K_MM_SIZE,
7448 PCI_ADDRESS_SPACE_MEM, e1kMap);
7449 if (RT_FAILURE(rc))
7450 return rc;
7451 /* Map our registers to IO space (region 2, see e1kConfigurePCI) */
7452 rc = PDMDevHlpPCIIORegionRegister(pDevIns, 2, E1K_IOPORT_SIZE,
7453 PCI_ADDRESS_SPACE_IO, e1kMap);
7454 if (RT_FAILURE(rc))
7455 return rc;
7456
7457 /* Create transmit queue */
7458 rc = PDMDevHlpQueueCreate(pDevIns, sizeof(PDMQUEUEITEMCORE), 1, 0,
7459 e1kTxQueueConsumer, true, "E1000-Xmit", &pState->pTxQueueR3);
7460 if (RT_FAILURE(rc))
7461 return rc;
7462 pState->pTxQueueR0 = PDMQueueR0Ptr(pState->pTxQueueR3);
7463 pState->pTxQueueRC = PDMQueueRCPtr(pState->pTxQueueR3);
7464
7465 /* Create the RX notifier signaller. */
7466 rc = PDMDevHlpQueueCreate(pDevIns, sizeof(PDMQUEUEITEMCORE), 1, 0,
7467 e1kCanRxQueueConsumer, true, "E1000-Rcv", &pState->pCanRxQueueR3);
7468 if (RT_FAILURE(rc))
7469 return rc;
7470 pState->pCanRxQueueR0 = PDMQueueR0Ptr(pState->pCanRxQueueR3);
7471 pState->pCanRxQueueRC = PDMQueueRCPtr(pState->pCanRxQueueR3);
7472
7473#ifdef E1K_TX_DELAY
7474 /* Create Transmit Delay Timer */
7475 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kTxDelayTimer, pState,
7476 TMTIMER_FLAGS_NO_CRIT_SECT,
7477 "E1000 Transmit Delay Timer", &pState->pTXDTimerR3);
7478 if (RT_FAILURE(rc))
7479 return rc;
7480 pState->pTXDTimerR0 = TMTimerR0Ptr(pState->pTXDTimerR3);
7481 pState->pTXDTimerRC = TMTimerRCPtr(pState->pTXDTimerR3);
7482 TMR3TimerSetCritSect(pState->pTXDTimerR3, &pState->csTx);
7483#endif /* E1K_TX_DELAY */
7484
7485#ifdef E1K_USE_TX_TIMERS
7486 /* Create Transmit Interrupt Delay Timer */
7487 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kTxIntDelayTimer, pState,
7488 TMTIMER_FLAGS_NO_CRIT_SECT,
7489 "E1000 Transmit Interrupt Delay Timer", &pState->pTIDTimerR3);
7490 if (RT_FAILURE(rc))
7491 return rc;
7492 pState->pTIDTimerR0 = TMTimerR0Ptr(pState->pTIDTimerR3);
7493 pState->pTIDTimerRC = TMTimerRCPtr(pState->pTIDTimerR3);
7494
7495# ifndef E1K_NO_TAD
7496 /* Create Transmit Absolute Delay Timer */
7497 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kTxAbsDelayTimer, pState,
7498 TMTIMER_FLAGS_NO_CRIT_SECT,
7499 "E1000 Transmit Absolute Delay Timer", &pState->pTADTimerR3);
7500 if (RT_FAILURE(rc))
7501 return rc;
7502 pState->pTADTimerR0 = TMTimerR0Ptr(pState->pTADTimerR3);
7503 pState->pTADTimerRC = TMTimerRCPtr(pState->pTADTimerR3);
7504# endif /* E1K_NO_TAD */
7505#endif /* E1K_USE_TX_TIMERS */
7506
7507#ifdef E1K_USE_RX_TIMERS
7508 /* Create Receive Interrupt Delay Timer */
7509 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kRxIntDelayTimer, pState,
7510 TMTIMER_FLAGS_NO_CRIT_SECT,
7511 "E1000 Receive Interrupt Delay Timer", &pState->pRIDTimerR3);
7512 if (RT_FAILURE(rc))
7513 return rc;
7514 pState->pRIDTimerR0 = TMTimerR0Ptr(pState->pRIDTimerR3);
7515 pState->pRIDTimerRC = TMTimerRCPtr(pState->pRIDTimerR3);
7516
7517 /* Create Receive Absolute Delay Timer */
7518 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kRxAbsDelayTimer, pState,
7519 TMTIMER_FLAGS_NO_CRIT_SECT,
7520 "E1000 Receive Absolute Delay Timer", &pState->pRADTimerR3);
7521 if (RT_FAILURE(rc))
7522 return rc;
7523 pState->pRADTimerR0 = TMTimerR0Ptr(pState->pRADTimerR3);
7524 pState->pRADTimerRC = TMTimerRCPtr(pState->pRADTimerR3);
7525#endif /* E1K_USE_RX_TIMERS */
7526
7527 /* Create Late Interrupt Timer */
7528 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kLateIntTimer, pState,
7529 TMTIMER_FLAGS_NO_CRIT_SECT,
7530 "E1000 Late Interrupt Timer", &pState->pIntTimerR3);
7531 if (RT_FAILURE(rc))
7532 return rc;
7533 pState->pIntTimerR0 = TMTimerR0Ptr(pState->pIntTimerR3);
7534 pState->pIntTimerRC = TMTimerRCPtr(pState->pIntTimerR3);
7535
7536 /* Create Link Up Timer */
7537 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kLinkUpTimer, pState,
7538 TMTIMER_FLAGS_NO_CRIT_SECT,
7539 "E1000 Link Up Timer", &pState->pLUTimerR3);
7540 if (RT_FAILURE(rc))
7541 return rc;
7542 pState->pLUTimerR0 = TMTimerR0Ptr(pState->pLUTimerR3);
7543 pState->pLUTimerRC = TMTimerRCPtr(pState->pLUTimerR3);
7544
7545 /* Register the info item */
7546 char szTmp[20];
7547 RTStrPrintf(szTmp, sizeof(szTmp), "e1k%d", iInstance);
7548 PDMDevHlpDBGFInfoRegister(pDevIns, szTmp, "E1000 info.", e1kInfo);
7549
7550 /* Status driver */
7551 PPDMIBASE pBase;
7552 rc = PDMDevHlpDriverAttach(pDevIns, PDM_STATUS_LUN, &pState->IBase, &pBase, "Status Port");
7553 if (RT_FAILURE(rc))
7554 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to attach the status LUN"));
7555 pState->pLedsConnector = PDMIBASE_QUERY_INTERFACE(pBase, PDMILEDCONNECTORS);
7556
7557 rc = PDMDevHlpDriverAttach(pDevIns, 0, &pState->IBase, &pState->pDrvBase, "Network Port");
7558 if (RT_SUCCESS(rc))
7559 {
7560 if (rc == VINF_NAT_DNS)
7561 {
7562 PDMDevHlpVMSetRuntimeError(pDevIns, 0 /*fFlags*/, "NoDNSforNAT",
7563 N_("A Domain Name Server (DNS) for NAT networking could not be determined. Ensure that your host is correctly connected to an ISP. If you ignore this warning the guest will not be able to perform nameserver lookups and it will probably observe delays if trying so"));
7564 }
7565 pState->pDrvR3 = PDMIBASE_QUERY_INTERFACE(pState->pDrvBase, PDMINETWORKUP);
7566 AssertMsgReturn(pState->pDrvR3, ("Failed to obtain the PDMINETWORKUP interface!\n"),
7567 VERR_PDM_MISSING_INTERFACE_BELOW);
7568
7569 pState->pDrvR0 = PDMIBASER0_QUERY_INTERFACE(PDMIBASE_QUERY_INTERFACE(pState->pDrvBase, PDMIBASER0), PDMINETWORKUP);
7570 pState->pDrvRC = PDMIBASERC_QUERY_INTERFACE(PDMIBASE_QUERY_INTERFACE(pState->pDrvBase, PDMIBASERC), PDMINETWORKUP);
7571 }
7572 else if ( rc == VERR_PDM_NO_ATTACHED_DRIVER
7573 || rc == VERR_PDM_CFG_MISSING_DRIVER_NAME)
7574 {
7575 /* No error! */
7576 E1kLog(("%s This adapter is not attached to any network!\n", INSTANCE(pState)));
7577 }
7578 else
7579 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to attach the network LUN"));
7580
7581 rc = RTSemEventCreate(&pState->hEventMoreRxDescAvail);
7582 if (RT_FAILURE(rc))
7583 return rc;
7584
7585 e1kHardReset(pState);
7586
7587#if defined(VBOX_WITH_STATISTICS)
7588 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatMMIOReadRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO reads in RZ", "/Devices/E1k%d/MMIO/ReadRZ", iInstance);
7589 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatMMIOReadR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO reads in R3", "/Devices/E1k%d/MMIO/ReadR3", iInstance);
7590 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatMMIOWriteRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO writes in RZ", "/Devices/E1k%d/MMIO/WriteRZ", iInstance);
7591 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatMMIOWriteR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO writes in R3", "/Devices/E1k%d/MMIO/WriteR3", iInstance);
7592 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatEEPROMRead, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling EEPROM reads", "/Devices/E1k%d/EEPROM/Read", iInstance);
7593 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatEEPROMWrite, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling EEPROM writes", "/Devices/E1k%d/EEPROM/Write", iInstance);
7594 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatIOReadRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in RZ", "/Devices/E1k%d/IO/ReadRZ", iInstance);
7595 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatIOReadR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in R3", "/Devices/E1k%d/IO/ReadR3", iInstance);
7596 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatIOWriteRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in RZ", "/Devices/E1k%d/IO/WriteRZ", iInstance);
7597 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatIOWriteR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in R3", "/Devices/E1k%d/IO/WriteR3", iInstance);
7598 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatLateIntTimer, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling late int timer", "/Devices/E1k%d/LateInt/Timer", iInstance);
7599 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatLateInts, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of late interrupts", "/Devices/E1k%d/LateInt/Occured", iInstance);
7600 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatIntsRaised, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of raised interrupts", "/Devices/E1k%d/Interrupts/Raised", iInstance);
7601 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatIntsPrevented, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of prevented interrupts", "/Devices/E1k%d/Interrupts/Prevented", iInstance);
7602 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatReceive, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive", "/Devices/E1k%d/Receive/Total", iInstance);
7603 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatReceiveCRC, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive checksumming", "/Devices/E1k%d/Receive/CRC", iInstance);
7604 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatReceiveFilter, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive filtering", "/Devices/E1k%d/Receive/Filter", iInstance);
7605 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatReceiveStore, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive storing", "/Devices/E1k%d/Receive/Store", iInstance);
7606 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatRxOverflow, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_OCCURENCE, "Profiling RX overflows", "/Devices/E1k%d/RxOverflow", iInstance);
7607 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatRxOverflowWakeup, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Nr of RX overflow wakeups", "/Devices/E1k%d/RxOverflowWakeup", iInstance);
7608#endif /* VBOX_WITH_STATISTICS */
7609 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatReceiveBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data received", "/Devices/E1k%d/ReceiveBytes", iInstance);
7610#if defined(VBOX_WITH_STATISTICS)
7611 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatTransmitRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling transmits in RZ", "/Devices/E1k%d/Transmit/TotalRZ", iInstance);
7612 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatTransmitR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling transmits in R3", "/Devices/E1k%d/Transmit/TotalR3", iInstance);
7613#endif /* VBOX_WITH_STATISTICS */
7614 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatTransmitBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data transmitted", "/Devices/E1k%d/TransmitBytes", iInstance);
7615#if defined(VBOX_WITH_STATISTICS)
7616 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatTransmitSendRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling send transmit in RZ", "/Devices/E1k%d/Transmit/SendRZ", iInstance);
7617 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatTransmitSendR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling send transmit in R3", "/Devices/E1k%d/Transmit/SendR3", iInstance);
7618
7619 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatTxDescCtxNormal, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of normal context descriptors","/Devices/E1k%d/TxDesc/ContexNormal", iInstance);
7620 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatTxDescCtxTSE, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TSE context descriptors", "/Devices/E1k%d/TxDesc/ContextTSE", iInstance);
7621 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatTxDescData, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TX data descriptors", "/Devices/E1k%d/TxDesc/Data", iInstance);
7622 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatTxDescLegacy, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TX legacy descriptors", "/Devices/E1k%d/TxDesc/Legacy", iInstance);
7623 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatTxDescTSEData, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TX TSE data descriptors", "/Devices/E1k%d/TxDesc/TSEData", iInstance);
7624 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatTxPathFallback, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Fallback TSE descriptor path", "/Devices/E1k%d/TxPath/Fallback", iInstance);
7625 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatTxPathGSO, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "GSO TSE descriptor path", "/Devices/E1k%d/TxPath/GSO", iInstance);
7626 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatTxPathRegular, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Regular descriptor path", "/Devices/E1k%d/TxPath/Normal", iInstance);
7627 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatPHYAccesses, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of PHY accesses", "/Devices/E1k%d/PHYAccesses", iInstance);
7628#endif /* VBOX_WITH_STATISTICS */
7629
7630 return VINF_SUCCESS;
7631}
7632
7633/**
7634 * The device registration structure.
7635 */
7636const PDMDEVREG g_DeviceE1000 =
7637{
7638 /* Structure version. PDM_DEVREG_VERSION defines the current version. */
7639 PDM_DEVREG_VERSION,
7640 /* Device name. */
7641 "e1000",
7642 /* Name of guest context module (no path).
7643 * Only evalutated if PDM_DEVREG_FLAGS_RC is set. */
7644 "VBoxDDGC.gc",
7645 /* Name of ring-0 module (no path).
7646 * Only evalutated if PDM_DEVREG_FLAGS_RC is set. */
7647 "VBoxDDR0.r0",
7648 /* The description of the device. The UTF-8 string pointed to shall, like this structure,
7649 * remain unchanged from registration till VM destruction. */
7650 "Intel PRO/1000 MT Desktop Ethernet.\n",
7651
7652 /* Flags, combination of the PDM_DEVREG_FLAGS_* \#defines. */
7653 PDM_DEVREG_FLAGS_DEFAULT_BITS | PDM_DEVREG_FLAGS_RC | PDM_DEVREG_FLAGS_R0,
7654 /* Device class(es), combination of the PDM_DEVREG_CLASS_* \#defines. */
7655 PDM_DEVREG_CLASS_NETWORK,
7656 /* Maximum number of instances (per VM). */
7657 ~0U,
7658 /* Size of the instance data. */
7659 sizeof(E1KSTATE),
7660
7661 /* Construct instance - required. */
7662 e1kConstruct,
7663 /* Destruct instance - optional. */
7664 e1kDestruct,
7665 /* Relocation command - optional. */
7666 e1kRelocate,
7667 /* I/O Control interface - optional. */
7668 NULL,
7669 /* Power on notification - optional. */
7670 NULL,
7671 /* Reset notification - optional. */
7672 e1kReset,
7673 /* Suspend notification - optional. */
7674 e1kSuspend,
7675 /* Resume notification - optional. */
7676 NULL,
7677 /* Attach command - optional. */
7678 e1kAttach,
7679 /* Detach notification - optional. */
7680 e1kDetach,
7681 /* Query a LUN base interface - optional. */
7682 NULL,
7683 /* Init complete notification - optional. */
7684 NULL,
7685 /* Power off notification - optional. */
7686 e1kPowerOff,
7687 /* pfnSoftReset */
7688 NULL,
7689 /* u32VersionEnd */
7690 PDM_DEVREG_VERSION
7691};
7692
7693#endif /* IN_RING3 */
7694#endif /* !VBOX_DEVICE_STRUCT_TESTCASE */
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette